]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm64/status.c
xnu-6153.141.1.tar.gz
[apple/xnu.git] / osfmk / arm64 / status.c
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <debug.h>
29#include <mach/mach_types.h>
30#include <mach/kern_return.h>
31#include <mach/thread_status.h>
32#include <kern/thread.h>
33#include <kern/kalloc.h>
34#include <arm/vmparam.h>
35#include <arm/cpu_data_internal.h>
36#include <arm64/proc_reg.h>
cb323159
A
37#if __has_feature(ptrauth_calls)
38#include <ptrauth.h>
39#endif
5ba3f43e 40
0a7de745 41struct arm_vfpv2_state {
cb323159
A
42 __uint32_t __r[32];
43 __uint32_t __fpscr;
5ba3f43e
A
44};
45
cb323159 46typedef struct arm_vfpv2_state arm_vfpv2_state_t;
5ba3f43e 47
cb323159
A
48#define ARM_VFPV2_STATE_COUNT \
49 ((mach_msg_type_number_t)(sizeof (arm_vfpv2_state_t)/sizeof(uint32_t)))
5ba3f43e
A
50
51/*
52 * Forward definitions
53 */
54void thread_set_child(thread_t child, int pid);
55void thread_set_parent(thread_t parent, int pid);
56
57/*
58 * Maps state flavor to number of words in the state:
59 */
60/* __private_extern__ */
cb323159
A
61unsigned int _MachineStateCount[] = {
62 [ARM_UNIFIED_THREAD_STATE] = ARM_UNIFIED_THREAD_STATE_COUNT,
63 [ARM_VFP_STATE] = ARM_VFP_STATE_COUNT,
64 [ARM_EXCEPTION_STATE] = ARM_EXCEPTION_STATE_COUNT,
65 [ARM_DEBUG_STATE] = ARM_DEBUG_STATE_COUNT,
66 [ARM_THREAD_STATE64] = ARM_THREAD_STATE64_COUNT,
67 [ARM_EXCEPTION_STATE64] = ARM_EXCEPTION_STATE64_COUNT,
68 [ARM_THREAD_STATE32] = ARM_THREAD_STATE32_COUNT,
69 [ARM_DEBUG_STATE32] = ARM_DEBUG_STATE32_COUNT,
70 [ARM_DEBUG_STATE64] = ARM_DEBUG_STATE64_COUNT,
71 [ARM_NEON_STATE] = ARM_NEON_STATE_COUNT,
72 [ARM_NEON_STATE64] = ARM_NEON_STATE64_COUNT,
73 [ARM_PAGEIN_STATE] = ARM_PAGEIN_STATE_COUNT,
5ba3f43e
A
74};
75
76extern zone_t ads_zone;
77
78#if __arm64__
79/*
80 * Copy values from saved_state to ts64.
81 */
82void
cb323159
A
83saved_state_to_thread_state64(const arm_saved_state_t * saved_state,
84 arm_thread_state64_t * ts64)
5ba3f43e
A
85{
86 uint32_t i;
87
88 assert(is_saved_state64(saved_state));
89
90 ts64->fp = get_saved_state_fp(saved_state);
91 ts64->lr = get_saved_state_lr(saved_state);
92 ts64->sp = get_saved_state_sp(saved_state);
93 ts64->pc = get_saved_state_pc(saved_state);
94 ts64->cpsr = get_saved_state_cpsr(saved_state);
0a7de745 95 for (i = 0; i < 29; i++) {
5ba3f43e 96 ts64->x[i] = get_saved_state_reg(saved_state, i);
0a7de745 97 }
5ba3f43e
A
98}
99
100/*
101 * Copy values from ts64 to saved_state
102 */
103void
cb323159
A
104thread_state64_to_saved_state(const arm_thread_state64_t * ts64,
105 arm_saved_state_t * saved_state)
5ba3f43e
A
106{
107 uint32_t i;
bca245ac
A
108#if __has_feature(ptrauth_calls)
109 boolean_t intr = ml_set_interrupts_enabled(FALSE);
110#endif /* __has_feature(ptrauth_calls) */
5ba3f43e
A
111
112 assert(is_saved_state64(saved_state));
113
bca245ac
A
114 set_saved_state_cpsr(saved_state, (ts64->cpsr & ~PSR64_MODE_MASK) | PSR64_MODE_RW_64);
115#if __has_feature(ptrauth_calls)
116 /*
117 * Make writes to ts64->cpsr visible first, since it's useful as a
118 * canary to detect thread-state corruption.
119 */
120 __builtin_arm_dmb(DMB_ST);
121#endif
5ba3f43e
A
122 set_saved_state_fp(saved_state, ts64->fp);
123 set_saved_state_lr(saved_state, ts64->lr);
124 set_saved_state_sp(saved_state, ts64->sp);
125 set_saved_state_pc(saved_state, ts64->pc);
0a7de745 126 for (i = 0; i < 29; i++) {
5ba3f43e 127 set_saved_state_reg(saved_state, i, ts64->x[i]);
0a7de745 128 }
bca245ac
A
129
130#if __has_feature(ptrauth_calls)
131 ml_set_interrupts_enabled(intr);
132#endif /* __has_feature(ptrauth_calls) */
5ba3f43e 133}
5ba3f43e 134
cb323159
A
135#endif /* __arm64__ */
136
137static kern_return_t
138handle_get_arm32_thread_state(thread_state_t tstate,
139 mach_msg_type_number_t * count,
140 const arm_saved_state_t * saved_state)
5ba3f43e 141{
0a7de745
A
142 if (*count < ARM_THREAD_STATE32_COUNT) {
143 return KERN_INVALID_ARGUMENT;
144 }
145 if (!is_saved_state32(saved_state)) {
146 return KERN_INVALID_ARGUMENT;
147 }
5ba3f43e
A
148
149 (void)saved_state_to_thread_state32(saved_state, (arm_thread_state32_t *)tstate);
150 *count = ARM_THREAD_STATE32_COUNT;
151 return KERN_SUCCESS;
152}
153
cb323159
A
154static kern_return_t
155handle_get_arm64_thread_state(thread_state_t tstate,
156 mach_msg_type_number_t * count,
157 const arm_saved_state_t * saved_state)
5ba3f43e 158{
0a7de745
A
159 if (*count < ARM_THREAD_STATE64_COUNT) {
160 return KERN_INVALID_ARGUMENT;
161 }
162 if (!is_saved_state64(saved_state)) {
163 return KERN_INVALID_ARGUMENT;
164 }
5ba3f43e
A
165
166 (void)saved_state_to_thread_state64(saved_state, (arm_thread_state64_t *)tstate);
167 *count = ARM_THREAD_STATE64_COUNT;
168 return KERN_SUCCESS;
169}
170
171
cb323159
A
172static kern_return_t
173handle_get_arm_thread_state(thread_state_t tstate,
174 mach_msg_type_number_t * count,
175 const arm_saved_state_t * saved_state)
5ba3f43e
A
176{
177 /* In an arm64 world, this flavor can be used to retrieve the thread
178 * state of a 32-bit or 64-bit thread into a unified structure, but we
179 * need to support legacy clients who are only aware of 32-bit, so
180 * check the count to see what the client is expecting.
181 */
182 if (*count < ARM_UNIFIED_THREAD_STATE_COUNT) {
183 return handle_get_arm32_thread_state(tstate, count, saved_state);
184 }
185
186 arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *) tstate;
187 bzero(unified_state, sizeof(*unified_state));
188#if __arm64__
189 if (is_saved_state64(saved_state)) {
190 unified_state->ash.flavor = ARM_THREAD_STATE64;
191 unified_state->ash.count = ARM_THREAD_STATE64_COUNT;
192 (void)saved_state_to_thread_state64(saved_state, thread_state64(unified_state));
193 } else
194#endif
195 {
196 unified_state->ash.flavor = ARM_THREAD_STATE32;
197 unified_state->ash.count = ARM_THREAD_STATE32_COUNT;
198 (void)saved_state_to_thread_state32(saved_state, thread_state32(unified_state));
199 }
200 *count = ARM_UNIFIED_THREAD_STATE_COUNT;
0a7de745 201 return KERN_SUCCESS;
5ba3f43e
A
202}
203
cb323159
A
204
205static kern_return_t
206handle_set_arm32_thread_state(const thread_state_t tstate,
207 mach_msg_type_number_t count,
208 arm_saved_state_t * saved_state)
5ba3f43e 209{
0a7de745
A
210 if (count != ARM_THREAD_STATE32_COUNT) {
211 return KERN_INVALID_ARGUMENT;
212 }
5ba3f43e
A
213
214 (void)thread_state32_to_saved_state((const arm_thread_state32_t *)tstate, saved_state);
215 return KERN_SUCCESS;
216}
217
cb323159
A
218static kern_return_t
219handle_set_arm64_thread_state(const thread_state_t tstate,
220 mach_msg_type_number_t count,
221 arm_saved_state_t * saved_state)
5ba3f43e 222{
0a7de745
A
223 if (count != ARM_THREAD_STATE64_COUNT) {
224 return KERN_INVALID_ARGUMENT;
225 }
5ba3f43e
A
226
227 (void)thread_state64_to_saved_state((const arm_thread_state64_t *)tstate, saved_state);
228 return KERN_SUCCESS;
229}
230
231
cb323159
A
232static kern_return_t
233handle_set_arm_thread_state(const thread_state_t tstate,
234 mach_msg_type_number_t count,
235 arm_saved_state_t * saved_state)
5ba3f43e
A
236{
237 /* In an arm64 world, this flavor can be used to set the thread state of a
238 * 32-bit or 64-bit thread from a unified structure, but we need to support
239 * legacy clients who are only aware of 32-bit, so check the count to see
240 * what the client is expecting.
241 */
242 if (count < ARM_UNIFIED_THREAD_STATE_COUNT) {
d9a64523 243 if (!is_saved_state32(saved_state)) {
0a7de745 244 return KERN_INVALID_ARGUMENT;
d9a64523 245 }
5ba3f43e
A
246 return handle_set_arm32_thread_state(tstate, count, saved_state);
247 }
248
249 const arm_unified_thread_state_t *unified_state = (const arm_unified_thread_state_t *) tstate;
250#if __arm64__
251 if (is_thread_state64(unified_state)) {
d9a64523 252 if (!is_saved_state64(saved_state)) {
0a7de745 253 return KERN_INVALID_ARGUMENT;
d9a64523 254 }
5ba3f43e
A
255 (void)thread_state64_to_saved_state(const_thread_state64(unified_state), saved_state);
256 } else
257#endif
258 {
d9a64523 259 if (!is_saved_state32(saved_state)) {
0a7de745 260 return KERN_INVALID_ARGUMENT;
d9a64523 261 }
5ba3f43e
A
262 (void)thread_state32_to_saved_state(const_thread_state32(unified_state), saved_state);
263 }
264
0a7de745 265 return KERN_SUCCESS;
5ba3f43e
A
266}
267
cb323159 268
d9a64523
A
269/*
270 * Translate thread state arguments to userspace representation
271 */
272
273kern_return_t
274machine_thread_state_convert_to_user(
0a7de745
A
275 thread_t thread,
276 thread_flavor_t flavor,
277 thread_state_t tstate,
278 mach_msg_type_number_t *count)
d9a64523 279{
cb323159
A
280#if __has_feature(ptrauth_calls)
281 arm_thread_state64_t *ts64;
282
283 switch (flavor) {
284 case ARM_THREAD_STATE:
285 {
286 arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *)tstate;
287
288 if (*count < ARM_UNIFIED_THREAD_STATE_COUNT || !is_thread_state64(unified_state)) {
289 return KERN_SUCCESS;
290 }
291 ts64 = thread_state64(unified_state);
292 break;
293 }
294 case ARM_THREAD_STATE64:
295 {
296 if (*count < ARM_THREAD_STATE64_COUNT) {
297 return KERN_SUCCESS;
298 }
299 ts64 = (arm_thread_state64_t *)tstate;
300 break;
301 }
302 default:
303 return KERN_SUCCESS;
304 }
305
306 // Note that kernel threads never have disable_user_jop set
307 if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread()) ||
308 thread->machine.disable_user_jop || !thread_is_64bit_addr(thread) ||
309 (BootArgs->bootFlags & kBootFlagsDisableUserThreadStateJOP)) {
310 ts64->flags = __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH;
311 return KERN_SUCCESS;
312 }
313
314 ts64->flags = 0;
315 if (ts64->lr) {
316 // lr might contain an IB-signed return address (strip is a no-op on unsigned addresses)
317 uintptr_t stripped_lr = (uintptr_t)ptrauth_strip((void *)ts64->lr,
318 ptrauth_key_return_address);
319 if (ts64->lr != stripped_lr) {
320 // Need to allow already-signed lr value to round-trip as is
321 ts64->flags |= __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR;
322 }
323 // Note that an IB-signed return address that happens to have a 0 signature value
324 // will round-trip correctly even if IA-signed again below (and IA-authd later)
325 }
326
327 if (BootArgs->bootFlags & kBootFlagsDisableUserJOP) {
328 return KERN_SUCCESS;
329 }
330
331 if (ts64->pc) {
332 ts64->pc = (uintptr_t)pmap_sign_user_ptr((void*)ts64->pc,
333 ptrauth_key_process_independent_code, ptrauth_string_discriminator("pc"));
334 }
335 if (ts64->lr && !(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR)) {
336 ts64->lr = (uintptr_t)pmap_sign_user_ptr((void*)ts64->lr,
337 ptrauth_key_process_independent_code, ptrauth_string_discriminator("lr"));
338 }
339 if (ts64->sp) {
340 ts64->sp = (uintptr_t)pmap_sign_user_ptr((void*)ts64->sp,
341 ptrauth_key_process_independent_data, ptrauth_string_discriminator("sp"));
342 }
343 if (ts64->fp) {
344 ts64->fp = (uintptr_t)pmap_sign_user_ptr((void*)ts64->fp,
345 ptrauth_key_process_independent_data, ptrauth_string_discriminator("fp"));
346 }
347
348 return KERN_SUCCESS;
349#else
d9a64523
A
350 // No conversion to userspace representation on this platform
351 (void)thread; (void)flavor; (void)tstate; (void)count;
352 return KERN_SUCCESS;
cb323159 353#endif /* __has_feature(ptrauth_calls) */
d9a64523
A
354}
355
356/*
357 * Translate thread state arguments from userspace representation
358 */
359
360kern_return_t
361machine_thread_state_convert_from_user(
0a7de745
A
362 thread_t thread,
363 thread_flavor_t flavor,
364 thread_state_t tstate,
365 mach_msg_type_number_t count)
d9a64523 366{
cb323159
A
367#if __has_feature(ptrauth_calls)
368 arm_thread_state64_t *ts64;
369
370 switch (flavor) {
371 case ARM_THREAD_STATE:
372 {
373 arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *)tstate;
374
375 if (count < ARM_UNIFIED_THREAD_STATE_COUNT || !is_thread_state64(unified_state)) {
376 return KERN_SUCCESS;
377 }
378 ts64 = thread_state64(unified_state);
379 break;
380 }
381 case ARM_THREAD_STATE64:
382 {
383 if (count != ARM_THREAD_STATE64_COUNT) {
384 return KERN_SUCCESS;
385 }
386 ts64 = (arm_thread_state64_t *)tstate;
387 break;
388 }
389 default:
390 return KERN_SUCCESS;
391 }
392
393 // Note that kernel threads never have disable_user_jop set
394 if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread())) {
395 if (thread->machine.disable_user_jop || !thread_is_64bit_addr(thread)) {
396 ts64->flags = __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH;
397 return KERN_SUCCESS;
398 }
399 // A JOP-disabled process must not set thread state on a JOP-enabled process
400 return KERN_PROTECTION_FAILURE;
401 }
402
403 if (ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH) {
404 if (thread->machine.disable_user_jop || !thread_is_64bit_addr(thread) ||
405 (BootArgs->bootFlags & kBootFlagsDisableUserThreadStateJOP)) {
406 return KERN_SUCCESS;
407 }
408 // Disallow setting unsigned thread state on JOP-enabled processes.
409 // Ignore flag and treat thread state arguments as signed, ptrauth
410 // poisoning will cause resulting thread state to be invalid
411 ts64->flags &= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH;
412 }
413
414 if (ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR) {
415 // lr might contain an IB-signed return address (strip is a no-op on unsigned addresses)
416 uintptr_t stripped_lr = (uintptr_t)ptrauth_strip((void *)ts64->lr,
417 ptrauth_key_return_address);
418 if (ts64->lr == stripped_lr) {
419 // Don't allow unsigned pointer to be passed through as is. Ignore flag and
420 // treat as IA-signed below (where auth failure may poison the value).
421 ts64->flags &= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR;
422 }
423 // Note that an IB-signed return address that happens to have a 0 signature value
424 // will also have been IA-signed (without this flag being set) and so will IA-auth
425 // correctly below.
426 }
427
428 if (BootArgs->bootFlags & kBootFlagsDisableUserJOP) {
429 return KERN_SUCCESS;
430 }
431
432 if (ts64->pc) {
433 ts64->pc = (uintptr_t)pmap_auth_user_ptr((void*)ts64->pc,
434 ptrauth_key_process_independent_code, ptrauth_string_discriminator("pc"));
435 }
436 if (ts64->lr && !(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR)) {
437 ts64->lr = (uintptr_t)pmap_auth_user_ptr((void*)ts64->lr,
438 ptrauth_key_process_independent_code, ptrauth_string_discriminator("lr"));
439 }
440 if (ts64->sp) {
441 ts64->sp = (uintptr_t)pmap_auth_user_ptr((void*)ts64->sp,
442 ptrauth_key_process_independent_data, ptrauth_string_discriminator("sp"));
443 }
444 if (ts64->fp) {
445 ts64->fp = (uintptr_t)pmap_auth_user_ptr((void*)ts64->fp,
446 ptrauth_key_process_independent_data, ptrauth_string_discriminator("fp"));
447 }
448
449 return KERN_SUCCESS;
450#else
d9a64523
A
451 // No conversion from userspace representation on this platform
452 (void)thread; (void)flavor; (void)tstate; (void)count;
453 return KERN_SUCCESS;
cb323159 454#endif /* __has_feature(ptrauth_calls) */
d9a64523
A
455}
456
457/*
458 * Translate signal context data pointer to userspace representation
459 */
460
461kern_return_t
462machine_thread_siguctx_pointer_convert_to_user(
0a7de745
A
463 __assert_only thread_t thread,
464 user_addr_t *uctxp)
d9a64523 465{
cb323159
A
466#if __has_feature(ptrauth_calls)
467 if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread())) {
468 assert(thread->machine.disable_user_jop || !thread_is_64bit_addr(thread));
469 return KERN_SUCCESS;
470 }
471
472 if (BootArgs->bootFlags & kBootFlagsDisableUserJOP) {
473 return KERN_SUCCESS;
474 }
475
476 if (*uctxp) {
477 *uctxp = (uintptr_t)pmap_sign_user_ptr((void*)*uctxp,
478 ptrauth_key_process_independent_data, ptrauth_string_discriminator("uctx"));
479 }
480
481 return KERN_SUCCESS;
482#else
d9a64523
A
483 // No conversion to userspace representation on this platform
484 (void)thread; (void)uctxp;
485 return KERN_SUCCESS;
cb323159 486#endif /* __has_feature(ptrauth_calls) */
d9a64523
A
487}
488
489/*
490 * Translate array of function pointer syscall arguments from userspace representation
491 */
492
493kern_return_t
494machine_thread_function_pointers_convert_from_user(
0a7de745
A
495 __assert_only thread_t thread,
496 user_addr_t *fptrs,
497 uint32_t count)
d9a64523 498{
cb323159
A
499#if __has_feature(ptrauth_calls)
500 if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread())) {
501 assert(thread->machine.disable_user_jop || !thread_is_64bit_addr(thread));
502 return KERN_SUCCESS;
503 }
504
505 if (BootArgs->bootFlags & kBootFlagsDisableUserJOP) {
506 return KERN_SUCCESS;
507 }
508
509 while (count--) {
510 if (*fptrs) {
511 *fptrs = (uintptr_t)pmap_auth_user_ptr((void*)*fptrs,
512 ptrauth_key_function_pointer, 0);
513 }
514 fptrs++;
515 }
516
517 return KERN_SUCCESS;
518#else
d9a64523
A
519 // No conversion from userspace representation on this platform
520 (void)thread; (void)fptrs; (void)count;
521 return KERN_SUCCESS;
cb323159 522#endif /* __has_feature(ptrauth_calls) */
d9a64523
A
523}
524
5ba3f43e 525/*
cb323159 526 * Routine: machine_thread_get_state
5ba3f43e
A
527 *
528 */
529kern_return_t
cb323159
A
530machine_thread_get_state(thread_t thread,
531 thread_flavor_t flavor,
532 thread_state_t tstate,
533 mach_msg_type_number_t * count)
5ba3f43e
A
534{
535 switch (flavor) {
536 case THREAD_STATE_FLAVOR_LIST:
0a7de745
A
537 if (*count < 4) {
538 return KERN_INVALID_ARGUMENT;
539 }
5ba3f43e
A
540
541 tstate[0] = ARM_THREAD_STATE;
542 tstate[1] = ARM_VFP_STATE;
543 tstate[2] = ARM_EXCEPTION_STATE;
544 tstate[3] = ARM_DEBUG_STATE;
545 *count = 4;
546 break;
547
548 case THREAD_STATE_FLAVOR_LIST_NEW:
0a7de745
A
549 if (*count < 4) {
550 return KERN_INVALID_ARGUMENT;
551 }
5ba3f43e
A
552
553 tstate[0] = ARM_THREAD_STATE;
554 tstate[1] = ARM_VFP_STATE;
d9a64523
A
555 tstate[2] = thread_is_64bit_data(thread) ? ARM_EXCEPTION_STATE64 : ARM_EXCEPTION_STATE;
556 tstate[3] = thread_is_64bit_data(thread) ? ARM_DEBUG_STATE64 : ARM_DEBUG_STATE32;
5ba3f43e
A
557 *count = 4;
558 break;
559
cb323159
A
560 case THREAD_STATE_FLAVOR_LIST_10_15:
561 if (*count < 5) {
562 return KERN_INVALID_ARGUMENT;
563 }
564
565 tstate[0] = ARM_THREAD_STATE;
566 tstate[1] = ARM_VFP_STATE;
567 tstate[2] = thread_is_64bit_data(thread) ? ARM_EXCEPTION_STATE64 : ARM_EXCEPTION_STATE;
568 tstate[3] = thread_is_64bit_data(thread) ? ARM_DEBUG_STATE64 : ARM_DEBUG_STATE32;
569 tstate[4] = ARM_PAGEIN_STATE;
570 *count = 5;
571 break;
572
5ba3f43e
A
573 case ARM_THREAD_STATE:
574 {
575 kern_return_t rn = handle_get_arm_thread_state(tstate, count, thread->machine.upcb);
0a7de745
A
576 if (rn) {
577 return rn;
578 }
5ba3f43e
A
579 break;
580 }
581 case ARM_THREAD_STATE32:
582 {
0a7de745 583 if (thread_is_64bit_data(thread)) {
5ba3f43e 584 return KERN_INVALID_ARGUMENT;
0a7de745 585 }
5ba3f43e
A
586
587 kern_return_t rn = handle_get_arm32_thread_state(tstate, count, thread->machine.upcb);
0a7de745
A
588 if (rn) {
589 return rn;
590 }
5ba3f43e
A
591 break;
592 }
593#if __arm64__
594 case ARM_THREAD_STATE64:
595 {
0a7de745 596 if (!thread_is_64bit_data(thread)) {
5ba3f43e 597 return KERN_INVALID_ARGUMENT;
0a7de745 598 }
5ba3f43e
A
599
600 kern_return_t rn = handle_get_arm64_thread_state(tstate, count, thread->machine.upcb);
0a7de745
A
601 if (rn) {
602 return rn;
603 }
5ba3f43e
A
604 break;
605 }
606#endif
607 case ARM_EXCEPTION_STATE:{
0a7de745
A
608 struct arm_exception_state *state;
609 struct arm_saved_state32 *saved_state;
5ba3f43e 610
0a7de745
A
611 if (*count < ARM_EXCEPTION_STATE_COUNT) {
612 return KERN_INVALID_ARGUMENT;
613 }
614 if (thread_is_64bit_data(thread)) {
615 return KERN_INVALID_ARGUMENT;
616 }
5ba3f43e 617
0a7de745
A
618 state = (struct arm_exception_state *) tstate;
619 saved_state = saved_state32(thread->machine.upcb);
5ba3f43e 620
0a7de745
A
621 state->exception = saved_state->exception;
622 state->fsr = saved_state->esr;
623 state->far = saved_state->far;
5ba3f43e 624
0a7de745
A
625 *count = ARM_EXCEPTION_STATE_COUNT;
626 break;
627 }
5ba3f43e 628 case ARM_EXCEPTION_STATE64:{
0a7de745
A
629 struct arm_exception_state64 *state;
630 struct arm_saved_state64 *saved_state;
631
632 if (*count < ARM_EXCEPTION_STATE64_COUNT) {
633 return KERN_INVALID_ARGUMENT;
634 }
635 if (!thread_is_64bit_data(thread)) {
636 return KERN_INVALID_ARGUMENT;
637 }
5ba3f43e 638
0a7de745
A
639 state = (struct arm_exception_state64 *) tstate;
640 saved_state = saved_state64(thread->machine.upcb);
5ba3f43e 641
0a7de745
A
642 state->exception = saved_state->exception;
643 state->far = saved_state->far;
644 state->esr = saved_state->esr;
5ba3f43e 645
0a7de745
A
646 *count = ARM_EXCEPTION_STATE64_COUNT;
647 break;
648 }
649 case ARM_DEBUG_STATE:{
650 arm_legacy_debug_state_t *state;
651 arm_debug_state32_t *thread_state;
5ba3f43e 652
0a7de745
A
653 if (*count < ARM_LEGACY_DEBUG_STATE_COUNT) {
654 return KERN_INVALID_ARGUMENT;
5ba3f43e 655 }
0a7de745
A
656
657 if (thread_is_64bit_data(thread)) {
658 return KERN_INVALID_ARGUMENT;
659 }
660
661 state = (arm_legacy_debug_state_t *) tstate;
662 thread_state = find_debug_state32(thread);
663
664 if (thread_state == NULL) {
665 bzero(state, sizeof(arm_legacy_debug_state_t));
666 } else {
667 bcopy(thread_state, state, sizeof(arm_legacy_debug_state_t));
5ba3f43e 668 }
0a7de745
A
669
670 *count = ARM_LEGACY_DEBUG_STATE_COUNT;
671 break;
672 }
5ba3f43e 673 case ARM_DEBUG_STATE32:{
0a7de745
A
674 arm_debug_state32_t *state;
675 arm_debug_state32_t *thread_state;
676
677 if (*count < ARM_DEBUG_STATE32_COUNT) {
678 return KERN_INVALID_ARGUMENT;
5ba3f43e
A
679 }
680
0a7de745
A
681 if (thread_is_64bit_data(thread)) {
682 return KERN_INVALID_ARGUMENT;
683 }
684
685 state = (arm_debug_state32_t *) tstate;
686 thread_state = find_debug_state32(thread);
687
688 if (thread_state == NULL) {
689 bzero(state, sizeof(arm_debug_state32_t));
690 } else {
691 bcopy(thread_state, state, sizeof(arm_debug_state32_t));
692 }
693
694 *count = ARM_DEBUG_STATE32_COUNT;
695 break;
696 }
697
5ba3f43e 698 case ARM_DEBUG_STATE64:{
0a7de745
A
699 arm_debug_state64_t *state;
700 arm_debug_state64_t *thread_state;
701
702 if (*count < ARM_DEBUG_STATE64_COUNT) {
703 return KERN_INVALID_ARGUMENT;
5ba3f43e
A
704 }
705
0a7de745
A
706 if (!thread_is_64bit_data(thread)) {
707 return KERN_INVALID_ARGUMENT;
708 }
709
710 state = (arm_debug_state64_t *) tstate;
711 thread_state = find_debug_state64(thread);
712
713 if (thread_state == NULL) {
714 bzero(state, sizeof(arm_debug_state64_t));
715 } else {
716 bcopy(thread_state, state, sizeof(arm_debug_state64_t));
717 }
718
719 *count = ARM_DEBUG_STATE64_COUNT;
720 break;
721 }
722
5ba3f43e 723 case ARM_VFP_STATE:{
0a7de745
A
724 struct arm_vfp_state *state;
725 arm_neon_saved_state32_t *thread_state;
cb323159 726 unsigned int max;
0a7de745
A
727
728 if (*count < ARM_VFP_STATE_COUNT) {
729 if (*count < ARM_VFPV2_STATE_COUNT) {
730 return KERN_INVALID_ARGUMENT;
731 } else {
732 *count = ARM_VFPV2_STATE_COUNT;
5ba3f43e 733 }
0a7de745 734 }
5ba3f43e 735
0a7de745
A
736 if (*count == ARM_VFPV2_STATE_COUNT) {
737 max = 32;
738 } else {
739 max = 64;
740 }
5ba3f43e 741
0a7de745
A
742 state = (struct arm_vfp_state *) tstate;
743 thread_state = neon_state32(thread->machine.uNeon);
744 /* ARM64 TODO: set fpsr and fpcr from state->fpscr */
5ba3f43e 745
0a7de745
A
746 bcopy(thread_state, state, (max + 1) * sizeof(uint32_t));
747 *count = (max + 1);
748 break;
749 }
5ba3f43e
A
750 case ARM_NEON_STATE:{
751 arm_neon_state_t *state;
752 arm_neon_saved_state32_t *thread_state;
753
0a7de745
A
754 if (*count < ARM_NEON_STATE_COUNT) {
755 return KERN_INVALID_ARGUMENT;
756 }
5ba3f43e 757
0a7de745
A
758 if (thread_is_64bit_data(thread)) {
759 return KERN_INVALID_ARGUMENT;
760 }
5ba3f43e
A
761
762 state = (arm_neon_state_t *)tstate;
763 thread_state = neon_state32(thread->machine.uNeon);
764
765 assert(sizeof(*thread_state) == sizeof(*state));
766 bcopy(thread_state, state, sizeof(arm_neon_state_t));
767
768 *count = ARM_NEON_STATE_COUNT;
769 break;
0a7de745 770 }
5ba3f43e
A
771
772 case ARM_NEON_STATE64:{
773 arm_neon_state64_t *state;
774 arm_neon_saved_state64_t *thread_state;
775
0a7de745
A
776 if (*count < ARM_NEON_STATE64_COUNT) {
777 return KERN_INVALID_ARGUMENT;
778 }
5ba3f43e 779
0a7de745
A
780 if (!thread_is_64bit_data(thread)) {
781 return KERN_INVALID_ARGUMENT;
782 }
5ba3f43e
A
783
784 state = (arm_neon_state64_t *)tstate;
785 thread_state = neon_state64(thread->machine.uNeon);
786
787 /* For now, these are identical */
788 assert(sizeof(*state) == sizeof(*thread_state));
789 bcopy(thread_state, state, sizeof(arm_neon_state64_t));
790
791 *count = ARM_NEON_STATE64_COUNT;
792 break;
0a7de745 793 }
5ba3f43e 794
cb323159
A
795
796 case ARM_PAGEIN_STATE: {
797 arm_pagein_state_t *state;
798
799 if (*count < ARM_PAGEIN_STATE_COUNT) {
800 return KERN_INVALID_ARGUMENT;
801 }
802
803 state = (arm_pagein_state_t *)tstate;
804 state->__pagein_error = thread->t_pagein_error;
805
806 *count = ARM_PAGEIN_STATE_COUNT;
807 break;
808 }
809
810
5ba3f43e 811 default:
0a7de745 812 return KERN_INVALID_ARGUMENT;
5ba3f43e 813 }
0a7de745 814 return KERN_SUCCESS;
5ba3f43e
A
815}
816
817
818/*
cb323159 819 * Routine: machine_thread_get_kern_state
5ba3f43e
A
820 *
821 */
822kern_return_t
cb323159
A
823machine_thread_get_kern_state(thread_t thread,
824 thread_flavor_t flavor,
825 thread_state_t tstate,
826 mach_msg_type_number_t * count)
5ba3f43e
A
827{
828 /*
829 * This works only for an interrupted kernel thread
830 */
0a7de745 831 if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) {
5ba3f43e 832 return KERN_FAILURE;
0a7de745 833 }
5ba3f43e
A
834
835 switch (flavor) {
836 case ARM_THREAD_STATE:
837 {
838 kern_return_t rn = handle_get_arm_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
0a7de745
A
839 if (rn) {
840 return rn;
841 }
5ba3f43e
A
842 break;
843 }
844 case ARM_THREAD_STATE32:
845 {
846 kern_return_t rn = handle_get_arm32_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
0a7de745
A
847 if (rn) {
848 return rn;
849 }
5ba3f43e
A
850 break;
851 }
852#if __arm64__
853 case ARM_THREAD_STATE64:
854 {
855 kern_return_t rn = handle_get_arm64_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
0a7de745
A
856 if (rn) {
857 return rn;
858 }
5ba3f43e
A
859 break;
860 }
861#endif
862 default:
0a7de745 863 return KERN_INVALID_ARGUMENT;
5ba3f43e 864 }
0a7de745 865 return KERN_SUCCESS;
5ba3f43e
A
866}
867
868void
869machine_thread_switch_addrmode(thread_t thread)
870{
d9a64523 871 if (task_has_64Bit_data(thread->task)) {
5ba3f43e
A
872 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE64;
873 thread->machine.upcb->ash.count = ARM_SAVED_STATE64_COUNT;
874 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
875 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
876
877 /*
878 * Reinitialize the NEON state.
879 */
880 bzero(&thread->machine.uNeon->uns, sizeof(thread->machine.uNeon->uns));
881 thread->machine.uNeon->ns_64.fpcr = FPCR_DEFAULT;
882 } else {
883 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE32;
884 thread->machine.upcb->ash.count = ARM_SAVED_STATE32_COUNT;
885 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
886 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
887
888 /*
889 * Reinitialize the NEON state.
890 */
891 bzero(&thread->machine.uNeon->uns, sizeof(thread->machine.uNeon->uns));
892 thread->machine.uNeon->ns_32.fpcr = FPCR_DEFAULT_32;
893 }
894}
895
896extern long long arm_debug_get(void);
897
898/*
cb323159 899 * Routine: machine_thread_set_state
5ba3f43e
A
900 *
901 */
902kern_return_t
cb323159
A
903machine_thread_set_state(thread_t thread,
904 thread_flavor_t flavor,
905 thread_state_t tstate,
906 mach_msg_type_number_t count)
5ba3f43e
A
907{
908 kern_return_t rn;
909
910 switch (flavor) {
911 case ARM_THREAD_STATE:
912 rn = handle_set_arm_thread_state(tstate, count, thread->machine.upcb);
0a7de745
A
913 if (rn) {
914 return rn;
915 }
5ba3f43e
A
916 break;
917
918 case ARM_THREAD_STATE32:
0a7de745
A
919 if (thread_is_64bit_data(thread)) {
920 return KERN_INVALID_ARGUMENT;
921 }
5ba3f43e
A
922
923 rn = handle_set_arm32_thread_state(tstate, count, thread->machine.upcb);
0a7de745
A
924 if (rn) {
925 return rn;
926 }
5ba3f43e
A
927 break;
928
929#if __arm64__
930 case ARM_THREAD_STATE64:
0a7de745
A
931 if (!thread_is_64bit_data(thread)) {
932 return KERN_INVALID_ARGUMENT;
933 }
5ba3f43e
A
934
935 rn = handle_set_arm64_thread_state(tstate, count, thread->machine.upcb);
0a7de745
A
936 if (rn) {
937 return rn;
938 }
5ba3f43e
A
939 break;
940#endif
941 case ARM_EXCEPTION_STATE:{
0a7de745
A
942 if (count != ARM_EXCEPTION_STATE_COUNT) {
943 return KERN_INVALID_ARGUMENT;
5ba3f43e 944 }
0a7de745
A
945 if (thread_is_64bit_data(thread)) {
946 return KERN_INVALID_ARGUMENT;
947 }
948
949 break;
950 }
5ba3f43e 951 case ARM_EXCEPTION_STATE64:{
0a7de745
A
952 if (count != ARM_EXCEPTION_STATE64_COUNT) {
953 return KERN_INVALID_ARGUMENT;
954 }
955 if (!thread_is_64bit_data(thread)) {
956 return KERN_INVALID_ARGUMENT;
957 }
5ba3f43e 958
0a7de745
A
959 break;
960 }
961 case ARM_DEBUG_STATE:
962 {
963 arm_legacy_debug_state_t *state;
964 boolean_t enabled = FALSE;
965 unsigned int i;
5ba3f43e 966
0a7de745
A
967 if (count != ARM_LEGACY_DEBUG_STATE_COUNT) {
968 return KERN_INVALID_ARGUMENT;
969 }
970 if (thread_is_64bit_data(thread)) {
971 return KERN_INVALID_ARGUMENT;
5ba3f43e 972 }
5ba3f43e 973
0a7de745 974 state = (arm_legacy_debug_state_t *) tstate;
5ba3f43e 975
0a7de745
A
976 for (i = 0; i < 16; i++) {
977 /* do not allow context IDs to be set */
978 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
979 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
980 || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
981 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
982 return KERN_PROTECTION_FAILURE;
983 }
984 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
985 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
986 enabled = TRUE;
987 }
988 }
5ba3f43e 989
0a7de745
A
990 if (!enabled) {
991 arm_debug_state32_t *thread_state = find_debug_state32(thread);
992 if (thread_state != NULL) {
993 void *pTmp = thread->machine.DebugData;
994 thread->machine.DebugData = NULL;
995 zfree(ads_zone, pTmp);
5ba3f43e 996 }
0a7de745
A
997 } else {
998 arm_debug_state32_t *thread_state = find_debug_state32(thread);
999 if (thread_state == NULL) {
1000 thread->machine.DebugData = zalloc(ads_zone);
1001 bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
1002 thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE32;
1003 thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE32_COUNT;
1004 thread_state = find_debug_state32(thread);
5ba3f43e 1005 }
0a7de745
A
1006 assert(NULL != thread_state);
1007
1008 for (i = 0; i < 16; i++) {
1009 /* set appropriate privilege; mask out unknown bits */
1010 thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1011 | ARM_DBGBCR_MATCH_MASK
1012 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1013 | ARM_DBG_CR_ENABLE_MASK))
1014 | ARM_DBGBCR_TYPE_IVA
1015 | ARM_DBG_CR_LINKED_UNLINKED
1016 | ARM_DBG_CR_SECURITY_STATE_BOTH
1017 | ARM_DBG_CR_MODE_CONTROL_USER;
1018 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1019 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1020 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1021 | ARM_DBGWCR_ACCESS_CONTROL_MASK
1022 | ARM_DBG_CR_ENABLE_MASK))
1023 | ARM_DBG_CR_LINKED_UNLINKED
1024 | ARM_DBG_CR_SECURITY_STATE_BOTH
1025 | ARM_DBG_CR_MODE_CONTROL_USER;
1026 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
5ba3f43e 1027 }
0a7de745
A
1028
1029 thread_state->mdscr_el1 = 0ULL; // Legacy customers issuing ARM_DEBUG_STATE dont drive single stepping.
5ba3f43e 1030 }
0a7de745
A
1031
1032 if (thread == current_thread()) {
1033 arm_debug_set32(thread->machine.DebugData);
1034 }
1035
1036 break;
1037 }
5ba3f43e
A
1038 case ARM_DEBUG_STATE32:
1039 /* ARM64_TODO subtle bcr/wcr semantic differences e.g. wcr and ARM_DBGBCR_TYPE_IVA */
0a7de745
A
1040 {
1041 arm_debug_state32_t *state;
1042 boolean_t enabled = FALSE;
1043 unsigned int i;
5ba3f43e 1044
0a7de745
A
1045 if (count != ARM_DEBUG_STATE32_COUNT) {
1046 return KERN_INVALID_ARGUMENT;
1047 }
1048 if (thread_is_64bit_data(thread)) {
1049 return KERN_INVALID_ARGUMENT;
1050 }
1051
1052 state = (arm_debug_state32_t *) tstate;
5ba3f43e 1053
0a7de745
A
1054 if (state->mdscr_el1 & 0x1) {
1055 enabled = TRUE;
1056 }
5ba3f43e 1057
0a7de745
A
1058 for (i = 0; i < 16; i++) {
1059 /* do not allow context IDs to be set */
1060 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1061 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
1062 || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1063 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
1064 return KERN_PROTECTION_FAILURE;
1065 }
1066 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
1067 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
5ba3f43e 1068 enabled = TRUE;
0a7de745
A
1069 }
1070 }
5ba3f43e 1071
0a7de745
A
1072 if (!enabled) {
1073 arm_debug_state32_t *thread_state = find_debug_state32(thread);
1074 if (thread_state != NULL) {
1075 void *pTmp = thread->machine.DebugData;
1076 thread->machine.DebugData = NULL;
1077 zfree(ads_zone, pTmp);
1078 }
1079 } else {
1080 arm_debug_state32_t *thread_state = find_debug_state32(thread);
1081 if (thread_state == NULL) {
1082 thread->machine.DebugData = zalloc(ads_zone);
1083 bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
1084 thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE32;
1085 thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE32_COUNT;
1086 thread_state = find_debug_state32(thread);
5ba3f43e 1087 }
0a7de745
A
1088 assert(NULL != thread_state);
1089
1090 if (state->mdscr_el1 & 0x1) {
1091 thread_state->mdscr_el1 |= 0x1;
5ba3f43e 1092 } else {
0a7de745 1093 thread_state->mdscr_el1 &= ~0x1;
5ba3f43e 1094 }
0a7de745
A
1095
1096 for (i = 0; i < 16; i++) {
1097 /* set appropriate privilege; mask out unknown bits */
1098 thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1099 | ARM_DBGBCR_MATCH_MASK
1100 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1101 | ARM_DBG_CR_ENABLE_MASK))
1102 | ARM_DBGBCR_TYPE_IVA
1103 | ARM_DBG_CR_LINKED_UNLINKED
1104 | ARM_DBG_CR_SECURITY_STATE_BOTH
1105 | ARM_DBG_CR_MODE_CONTROL_USER;
1106 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1107 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1108 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1109 | ARM_DBGWCR_ACCESS_CONTROL_MASK
1110 | ARM_DBG_CR_ENABLE_MASK))
1111 | ARM_DBG_CR_LINKED_UNLINKED
1112 | ARM_DBG_CR_SECURITY_STATE_BOTH
1113 | ARM_DBG_CR_MODE_CONTROL_USER;
1114 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
5ba3f43e 1115 }
5ba3f43e
A
1116 }
1117
0a7de745
A
1118 if (thread == current_thread()) {
1119 arm_debug_set32(thread->machine.DebugData);
1120 }
1121
1122 break;
1123 }
1124
5ba3f43e 1125 case ARM_DEBUG_STATE64:
0a7de745
A
1126 {
1127 arm_debug_state64_t *state;
1128 boolean_t enabled = FALSE;
cb323159 1129 unsigned int i;
5ba3f43e 1130
0a7de745
A
1131 if (count != ARM_DEBUG_STATE64_COUNT) {
1132 return KERN_INVALID_ARGUMENT;
1133 }
1134 if (!thread_is_64bit_data(thread)) {
1135 return KERN_INVALID_ARGUMENT;
1136 }
5ba3f43e 1137
0a7de745
A
1138 state = (arm_debug_state64_t *) tstate;
1139
1140 if (state->mdscr_el1 & 0x1) {
1141 enabled = TRUE;
1142 }
5ba3f43e 1143
0a7de745
A
1144 for (i = 0; i < 16; i++) {
1145 /* do not allow context IDs to be set */
1146 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1147 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
1148 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
1149 return KERN_PROTECTION_FAILURE;
1150 }
1151 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
1152 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
5ba3f43e 1153 enabled = TRUE;
0a7de745
A
1154 }
1155 }
5ba3f43e 1156
0a7de745
A
1157 if (!enabled) {
1158 arm_debug_state64_t *thread_state = find_debug_state64(thread);
1159 if (thread_state != NULL) {
1160 void *pTmp = thread->machine.DebugData;
1161 thread->machine.DebugData = NULL;
1162 zfree(ads_zone, pTmp);
5ba3f43e 1163 }
0a7de745
A
1164 } else {
1165 arm_debug_state64_t *thread_state = find_debug_state64(thread);
1166 if (thread_state == NULL) {
1167 thread->machine.DebugData = zalloc(ads_zone);
1168 bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
1169 thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE64;
1170 thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE64_COUNT;
1171 thread_state = find_debug_state64(thread);
1172 }
1173 assert(NULL != thread_state);
5ba3f43e 1174
0a7de745
A
1175 if (state->mdscr_el1 & 0x1) {
1176 thread_state->mdscr_el1 |= 0x1;
5ba3f43e 1177 } else {
0a7de745 1178 thread_state->mdscr_el1 &= ~0x1;
5ba3f43e 1179 }
0a7de745
A
1180
1181 for (i = 0; i < 16; i++) {
1182 /* set appropriate privilege; mask out unknown bits */
1183 thread_state->bcr[i] = (state->bcr[i] & (0 /* Was ARM_DBG_CR_ADDRESS_MASK_MASK deprecated in v8 */
cb323159 1184 | 0 /* Was ARM_DBGBCR_MATCH_MASK, ignored in AArch64 state */
0a7de745
A
1185 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1186 | ARM_DBG_CR_ENABLE_MASK))
1187 | ARM_DBGBCR_TYPE_IVA
1188 | ARM_DBG_CR_LINKED_UNLINKED
1189 | ARM_DBG_CR_SECURITY_STATE_BOTH
1190 | ARM_DBG_CR_MODE_CONTROL_USER;
1191 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK64;
1192 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1193 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1194 | ARM_DBGWCR_ACCESS_CONTROL_MASK
1195 | ARM_DBG_CR_ENABLE_MASK))
1196 | ARM_DBG_CR_LINKED_UNLINKED
1197 | ARM_DBG_CR_SECURITY_STATE_BOTH
1198 | ARM_DBG_CR_MODE_CONTROL_USER;
1199 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK64;
5ba3f43e 1200 }
5ba3f43e
A
1201 }
1202
0a7de745
A
1203 if (thread == current_thread()) {
1204 arm_debug_set64(thread->machine.DebugData);
1205 }
1206
1207 break;
1208 }
1209
5ba3f43e 1210 case ARM_VFP_STATE:{
0a7de745
A
1211 struct arm_vfp_state *state;
1212 arm_neon_saved_state32_t *thread_state;
1213 unsigned int max;
5ba3f43e 1214
0a7de745
A
1215 if (count != ARM_VFP_STATE_COUNT && count != ARM_VFPV2_STATE_COUNT) {
1216 return KERN_INVALID_ARGUMENT;
1217 }
5ba3f43e 1218
0a7de745
A
1219 if (count == ARM_VFPV2_STATE_COUNT) {
1220 max = 32;
1221 } else {
1222 max = 64;
1223 }
5ba3f43e 1224
0a7de745
A
1225 state = (struct arm_vfp_state *) tstate;
1226 thread_state = neon_state32(thread->machine.uNeon);
1227 /* ARM64 TODO: combine fpsr and fpcr into state->fpscr */
5ba3f43e 1228
0a7de745 1229 bcopy(state, thread_state, (max + 1) * sizeof(uint32_t));
5ba3f43e 1230
0a7de745
A
1231 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
1232 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
1233 break;
1234 }
5ba3f43e
A
1235
1236 case ARM_NEON_STATE:{
1237 arm_neon_state_t *state;
1238 arm_neon_saved_state32_t *thread_state;
1239
0a7de745
A
1240 if (count != ARM_NEON_STATE_COUNT) {
1241 return KERN_INVALID_ARGUMENT;
1242 }
5ba3f43e 1243
0a7de745
A
1244 if (thread_is_64bit_data(thread)) {
1245 return KERN_INVALID_ARGUMENT;
1246 }
5ba3f43e
A
1247
1248 state = (arm_neon_state_t *)tstate;
1249 thread_state = neon_state32(thread->machine.uNeon);
1250
1251 assert(sizeof(*state) == sizeof(*thread_state));
1252 bcopy(state, thread_state, sizeof(arm_neon_state_t));
1253
1254 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
1255 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
1256 break;
0a7de745 1257 }
5ba3f43e
A
1258
1259 case ARM_NEON_STATE64:{
1260 arm_neon_state64_t *state;
1261 arm_neon_saved_state64_t *thread_state;
1262
0a7de745
A
1263 if (count != ARM_NEON_STATE64_COUNT) {
1264 return KERN_INVALID_ARGUMENT;
1265 }
5ba3f43e 1266
0a7de745
A
1267 if (!thread_is_64bit_data(thread)) {
1268 return KERN_INVALID_ARGUMENT;
1269 }
5ba3f43e
A
1270
1271 state = (arm_neon_state64_t *)tstate;
1272 thread_state = neon_state64(thread->machine.uNeon);
1273
1274 assert(sizeof(*state) == sizeof(*thread_state));
1275 bcopy(state, thread_state, sizeof(arm_neon_state64_t));
1276
1277 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
1278 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
1279 break;
0a7de745 1280 }
5ba3f43e 1281
cb323159 1282
5ba3f43e 1283 default:
0a7de745 1284 return KERN_INVALID_ARGUMENT;
5ba3f43e 1285 }
0a7de745 1286 return KERN_SUCCESS;
5ba3f43e
A
1287}
1288
cb323159
A
1289mach_vm_address_t
1290machine_thread_pc(thread_t thread)
1291{
1292 struct arm_saved_state *ss = get_user_regs(thread);
1293 return (mach_vm_address_t)get_saved_state_pc(ss);
1294}
1295
1296void
1297machine_thread_reset_pc(thread_t thread, mach_vm_address_t pc)
1298{
1299 set_saved_state_pc(get_user_regs(thread), (register_t)pc);
1300}
1301
5ba3f43e 1302/*
cb323159 1303 * Routine: machine_thread_state_initialize
5ba3f43e
A
1304 *
1305 */
1306kern_return_t
cb323159 1307machine_thread_state_initialize(thread_t thread)
5ba3f43e
A
1308{
1309 arm_context_t *context = thread->machine.contextData;
1310
0a7de745 1311 /*
5ba3f43e 1312 * Should always be set up later. For a kernel thread, we don't care
0a7de745 1313 * about this state. For a user thread, we'll set the state up in
5ba3f43e
A
1314 * setup_wqthread, bsdthread_create, load_main(), or load_unixthread().
1315 */
1316
1317 if (context != NULL) {
1318 bzero(&context->ss.uss, sizeof(context->ss.uss));
1319 bzero(&context->ns.uns, sizeof(context->ns.uns));
1320
1321 if (context->ns.nsh.flavor == ARM_NEON_SAVED_STATE64) {
1322 context->ns.ns_64.fpcr = FPCR_DEFAULT;
1323 } else {
1324 context->ns.ns_32.fpcr = FPCR_DEFAULT_32;
1325 }
1326 }
1327
1328 thread->machine.DebugData = NULL;
1329
cb323159
A
1330#if defined(HAS_APPLE_PAC)
1331 /* Sign the initial user-space thread state */
1332 if (thread->machine.upcb != NULL) {
bca245ac 1333 boolean_t intr = ml_set_interrupts_enabled(FALSE);
cb323159 1334 ml_sign_thread_state(thread->machine.upcb, 0, 0, 0, 0, 0);
bca245ac 1335 ml_set_interrupts_enabled(intr);
cb323159
A
1336 }
1337#endif /* defined(HAS_APPLE_PAC) */
d9a64523 1338
5ba3f43e
A
1339 return KERN_SUCCESS;
1340}
1341
1342/*
cb323159 1343 * Routine: machine_thread_dup
5ba3f43e
A
1344 *
1345 */
1346kern_return_t
cb323159
A
1347machine_thread_dup(thread_t self,
1348 thread_t target,
1349 __unused boolean_t is_corpse)
5ba3f43e
A
1350{
1351 struct arm_saved_state *self_saved_state;
1352 struct arm_saved_state *target_saved_state;
1353
1354 target->machine.cthread_self = self->machine.cthread_self;
5ba3f43e
A
1355
1356 self_saved_state = self->machine.upcb;
1357 target_saved_state = target->machine.upcb;
1358 bcopy(self_saved_state, target_saved_state, sizeof(struct arm_saved_state));
cb323159
A
1359#if defined(HAS_APPLE_PAC)
1360 if (!is_corpse && is_saved_state64(self_saved_state)) {
1361 check_and_sign_copied_thread_state(target_saved_state, self_saved_state);
1362 }
1363#endif /* defined(HAS_APPLE_PAC) */
5ba3f43e 1364
0a7de745 1365 return KERN_SUCCESS;
5ba3f43e
A
1366}
1367
1368/*
cb323159 1369 * Routine: get_user_regs
5ba3f43e
A
1370 *
1371 */
1372struct arm_saved_state *
cb323159 1373get_user_regs(thread_t thread)
5ba3f43e 1374{
0a7de745 1375 return thread->machine.upcb;
5ba3f43e
A
1376}
1377
1378arm_neon_saved_state_t *
cb323159 1379get_user_neon_regs(thread_t thread)
5ba3f43e 1380{
0a7de745 1381 return thread->machine.uNeon;
5ba3f43e
A
1382}
1383
1384/*
cb323159 1385 * Routine: find_user_regs
5ba3f43e
A
1386 *
1387 */
1388struct arm_saved_state *
cb323159 1389find_user_regs(thread_t thread)
5ba3f43e 1390{
0a7de745 1391 return thread->machine.upcb;
5ba3f43e
A
1392}
1393
1394/*
cb323159 1395 * Routine: find_kern_regs
5ba3f43e
A
1396 *
1397 */
1398struct arm_saved_state *
cb323159 1399find_kern_regs(thread_t thread)
5ba3f43e
A
1400{
1401 /*
0a7de745
A
1402 * This works only for an interrupted kernel thread
1403 */
1404 if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) {
1405 return (struct arm_saved_state *) NULL;
1406 } else {
1407 return getCpuDatap()->cpu_int_state;
1408 }
5ba3f43e
A
1409}
1410
1411arm_debug_state32_t *
cb323159 1412find_debug_state32(thread_t thread)
5ba3f43e 1413{
0a7de745 1414 if (thread && thread->machine.DebugData) {
5ba3f43e 1415 return &(thread->machine.DebugData->uds.ds32);
0a7de745 1416 } else {
5ba3f43e 1417 return NULL;
0a7de745 1418 }
5ba3f43e
A
1419}
1420
1421arm_debug_state64_t *
cb323159 1422find_debug_state64(thread_t thread)
5ba3f43e 1423{
0a7de745 1424 if (thread && thread->machine.DebugData) {
5ba3f43e 1425 return &(thread->machine.DebugData->uds.ds64);
0a7de745 1426 } else {
5ba3f43e 1427 return NULL;
0a7de745 1428 }
5ba3f43e
A
1429}
1430
1431/*
cb323159 1432 * Routine: thread_userstack
5ba3f43e
A
1433 *
1434 */
1435kern_return_t
cb323159
A
1436thread_userstack(__unused thread_t thread,
1437 int flavor,
1438 thread_state_t tstate,
1439 unsigned int count,
1440 mach_vm_offset_t * user_stack,
1441 int * customstack,
1442 boolean_t is_64bit_data
1443 )
5ba3f43e
A
1444{
1445 register_t sp;
1446
1447 switch (flavor) {
1448 case ARM_THREAD_STATE:
1449 if (count == ARM_UNIFIED_THREAD_STATE_COUNT) {
1450#if __arm64__
d9a64523 1451 if (is_64bit_data) {
5ba3f43e
A
1452 sp = ((arm_unified_thread_state_t *)tstate)->ts_64.sp;
1453 } else
1454#endif
1455 {
1456 sp = ((arm_unified_thread_state_t *)tstate)->ts_32.sp;
1457 }
1458
1459 break;
1460 }
1461
1462 /* INTENTIONAL FALL THROUGH (see machine_thread_set_state) */
1463 case ARM_THREAD_STATE32:
0a7de745
A
1464 if (count != ARM_THREAD_STATE32_COUNT) {
1465 return KERN_INVALID_ARGUMENT;
1466 }
1467 if (is_64bit_data) {
1468 return KERN_INVALID_ARGUMENT;
1469 }
5ba3f43e
A
1470
1471 sp = ((arm_thread_state32_t *)tstate)->sp;
1472 break;
1473#if __arm64__
1474 case ARM_THREAD_STATE64:
0a7de745
A
1475 if (count != ARM_THREAD_STATE64_COUNT) {
1476 return KERN_INVALID_ARGUMENT;
1477 }
1478 if (!is_64bit_data) {
1479 return KERN_INVALID_ARGUMENT;
1480 }
5ba3f43e
A
1481
1482 sp = ((arm_thread_state32_t *)tstate)->sp;
1483 break;
1484#endif
1485 default:
0a7de745 1486 return KERN_INVALID_ARGUMENT;
5ba3f43e
A
1487 }
1488
1489 if (sp) {
1490 *user_stack = CAST_USER_ADDR_T(sp);
0a7de745 1491 if (customstack) {
5ba3f43e 1492 *customstack = 1;
0a7de745 1493 }
5ba3f43e
A
1494 } else {
1495 *user_stack = CAST_USER_ADDR_T(USRSTACK64);
0a7de745 1496 if (customstack) {
5ba3f43e 1497 *customstack = 0;
0a7de745 1498 }
5ba3f43e
A
1499 }
1500
0a7de745 1501 return KERN_SUCCESS;
5ba3f43e
A
1502}
1503
1504/*
1505 * thread_userstackdefault:
1506 *
1507 * Return the default stack location for the
1508 * thread, if otherwise unknown.
1509 */
1510kern_return_t
cb323159
A
1511thread_userstackdefault(mach_vm_offset_t * default_user_stack,
1512 boolean_t is64bit)
5ba3f43e
A
1513{
1514 if (is64bit) {
1515 *default_user_stack = USRSTACK64;
1516 } else {
1517 *default_user_stack = USRSTACK;
1518 }
1519
0a7de745 1520 return KERN_SUCCESS;
5ba3f43e
A
1521}
1522
1523/*
cb323159 1524 * Routine: thread_setuserstack
5ba3f43e
A
1525 *
1526 */
1527void
cb323159
A
1528thread_setuserstack(thread_t thread,
1529 mach_vm_address_t user_stack)
5ba3f43e
A
1530{
1531 struct arm_saved_state *sv;
1532
1533 sv = get_user_regs(thread);
1534
1535 set_saved_state_sp(sv, user_stack);
1536
1537 return;
1538}
1539
1540/*
cb323159 1541 * Routine: thread_adjuserstack
5ba3f43e
A
1542 *
1543 */
1544uint64_t
cb323159
A
1545thread_adjuserstack(thread_t thread,
1546 int adjust)
5ba3f43e
A
1547{
1548 struct arm_saved_state *sv;
1549 uint64_t sp;
1550
1551 sv = get_user_regs(thread);
1552
1553 sp = get_saved_state_sp(sv);
1554 sp += adjust;
1555 set_saved_state_sp(sv, sp);;
1556
1557 return sp;
1558}
1559
1560/*
cb323159 1561 * Routine: thread_setentrypoint
5ba3f43e
A
1562 *
1563 */
1564void
cb323159
A
1565thread_setentrypoint(thread_t thread,
1566 mach_vm_offset_t entry)
5ba3f43e
A
1567{
1568 struct arm_saved_state *sv;
1569
1570 sv = get_user_regs(thread);
1571
1572 set_saved_state_pc(sv, entry);
1573
1574 return;
1575}
1576
1577/*
cb323159 1578 * Routine: thread_entrypoint
5ba3f43e
A
1579 *
1580 */
1581kern_return_t
cb323159
A
1582thread_entrypoint(__unused thread_t thread,
1583 int flavor,
1584 thread_state_t tstate,
eb6b6ca3 1585 unsigned int count,
cb323159
A
1586 mach_vm_offset_t * entry_point
1587 )
5ba3f43e
A
1588{
1589 switch (flavor) {
1590 case ARM_THREAD_STATE:
0a7de745
A
1591 {
1592 struct arm_thread_state *state;
5ba3f43e 1593
eb6b6ca3
A
1594 if (count != ARM_THREAD_STATE_COUNT) {
1595 return KERN_INVALID_ARGUMENT;
1596 }
1597
0a7de745 1598 state = (struct arm_thread_state *) tstate;
5ba3f43e 1599
0a7de745
A
1600 /*
1601 * If a valid entry point is specified, use it.
1602 */
1603 if (state->pc) {
1604 *entry_point = CAST_USER_ADDR_T(state->pc);
1605 } else {
1606 *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
5ba3f43e 1607 }
0a7de745
A
1608 }
1609 break;
5ba3f43e
A
1610
1611 case ARM_THREAD_STATE64:
0a7de745
A
1612 {
1613 struct arm_thread_state64 *state;
5ba3f43e 1614
eb6b6ca3
A
1615 if (count != ARM_THREAD_STATE64_COUNT) {
1616 return KERN_INVALID_ARGUMENT;
1617 }
1618
0a7de745 1619 state = (struct arm_thread_state64*) tstate;
5ba3f43e 1620
0a7de745
A
1621 /*
1622 * If a valid entry point is specified, use it.
1623 */
1624 if (state->pc) {
1625 *entry_point = CAST_USER_ADDR_T(state->pc);
1626 } else {
1627 *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
5ba3f43e 1628 }
0a7de745
A
1629
1630 break;
1631 }
5ba3f43e 1632 default:
0a7de745 1633 return KERN_INVALID_ARGUMENT;
5ba3f43e
A
1634 }
1635
0a7de745 1636 return KERN_SUCCESS;
5ba3f43e
A
1637}
1638
1639
1640/*
cb323159 1641 * Routine: thread_set_child
5ba3f43e
A
1642 *
1643 */
1644void
cb323159
A
1645thread_set_child(thread_t child,
1646 int pid)
5ba3f43e
A
1647{
1648 struct arm_saved_state *child_state;
1649
1650 child_state = get_user_regs(child);
1651
1652 set_saved_state_reg(child_state, 0, pid);
1653 set_saved_state_reg(child_state, 1, 1ULL);
1654}
1655
1656
1657/*
cb323159 1658 * Routine: thread_set_parent
5ba3f43e
A
1659 *
1660 */
1661void
cb323159
A
1662thread_set_parent(thread_t parent,
1663 int pid)
5ba3f43e
A
1664{
1665 struct arm_saved_state *parent_state;
1666
1667 parent_state = get_user_regs(parent);
1668
1669 set_saved_state_reg(parent_state, 0, pid);
1670 set_saved_state_reg(parent_state, 1, 0);
1671}
1672
1673
1674struct arm_act_context {
1675 struct arm_unified_thread_state ss;
1676#if __ARM_VFP__
1677 struct arm_neon_saved_state ns;
1678#endif
1679};
1680
1681/*
cb323159 1682 * Routine: act_thread_csave
5ba3f43e
A
1683 *
1684 */
cb323159 1685void *
5ba3f43e
A
1686act_thread_csave(void)
1687{
1688 struct arm_act_context *ic;
1689 kern_return_t kret;
1690 unsigned int val;
1691 thread_t thread = current_thread();
1692
1693 ic = (struct arm_act_context *) kalloc(sizeof(struct arm_act_context));
0a7de745
A
1694 if (ic == (struct arm_act_context *) NULL) {
1695 return (void *) 0;
1696 }
5ba3f43e
A
1697
1698 val = ARM_UNIFIED_THREAD_STATE_COUNT;
1699 kret = machine_thread_get_state(thread, ARM_THREAD_STATE, (thread_state_t)&ic->ss, &val);
1700 if (kret != KERN_SUCCESS) {
1701 kfree(ic, sizeof(struct arm_act_context));
0a7de745 1702 return (void *) 0;
5ba3f43e
A
1703 }
1704
1705#if __ARM_VFP__
d9a64523 1706 if (thread_is_64bit_data(thread)) {
5ba3f43e
A
1707 val = ARM_NEON_STATE64_COUNT;
1708 kret = machine_thread_get_state(thread,
0a7de745 1709 ARM_NEON_STATE64,
cb323159 1710 (thread_state_t)&ic->ns,
0a7de745 1711 &val);
5ba3f43e
A
1712 } else {
1713 val = ARM_NEON_STATE_COUNT;
1714 kret = machine_thread_get_state(thread,
0a7de745 1715 ARM_NEON_STATE,
cb323159 1716 (thread_state_t)&ic->ns,
0a7de745 1717 &val);
5ba3f43e
A
1718 }
1719 if (kret != KERN_SUCCESS) {
1720 kfree(ic, sizeof(struct arm_act_context));
0a7de745 1721 return (void *) 0;
5ba3f43e
A
1722 }
1723#endif
0a7de745 1724 return ic;
5ba3f43e
A
1725}
1726
1727/*
cb323159 1728 * Routine: act_thread_catt
5ba3f43e
A
1729 *
1730 */
1731void
cb323159 1732act_thread_catt(void * ctx)
5ba3f43e
A
1733{
1734 struct arm_act_context *ic;
1735 kern_return_t kret;
1736 thread_t thread = current_thread();
1737
1738 ic = (struct arm_act_context *) ctx;
0a7de745 1739 if (ic == (struct arm_act_context *) NULL) {
5ba3f43e 1740 return;
0a7de745 1741 }
5ba3f43e
A
1742
1743 kret = machine_thread_set_state(thread, ARM_THREAD_STATE, (thread_state_t)&ic->ss, ARM_UNIFIED_THREAD_STATE_COUNT);
0a7de745 1744 if (kret != KERN_SUCCESS) {
5ba3f43e 1745 goto out;
0a7de745 1746 }
5ba3f43e
A
1747
1748#if __ARM_VFP__
d9a64523 1749 if (thread_is_64bit_data(thread)) {
5ba3f43e 1750 kret = machine_thread_set_state(thread,
0a7de745 1751 ARM_NEON_STATE64,
cb323159 1752 (thread_state_t)&ic->ns,
0a7de745 1753 ARM_NEON_STATE64_COUNT);
5ba3f43e
A
1754 } else {
1755 kret = machine_thread_set_state(thread,
0a7de745 1756 ARM_NEON_STATE,
cb323159 1757 (thread_state_t)&ic->ns,
0a7de745 1758 ARM_NEON_STATE_COUNT);
5ba3f43e 1759 }
0a7de745 1760 if (kret != KERN_SUCCESS) {
5ba3f43e 1761 goto out;
0a7de745 1762 }
5ba3f43e
A
1763#endif
1764out:
1765 kfree(ic, sizeof(struct arm_act_context));
1766}
1767
1768/*
cb323159 1769 * Routine: act_thread_catt
5ba3f43e
A
1770 *
1771 */
0a7de745 1772void
5ba3f43e
A
1773act_thread_cfree(void *ctx)
1774{
1775 kfree(ctx, sizeof(struct arm_act_context));
1776}
1777
1778kern_return_t
cb323159
A
1779thread_set_wq_state32(thread_t thread,
1780 thread_state_t tstate)
5ba3f43e
A
1781{
1782 arm_thread_state_t *state;
1783 struct arm_saved_state *saved_state;
1784 struct arm_saved_state32 *saved_state_32;
1785 thread_t curth = current_thread();
0a7de745 1786 spl_t s = 0;
5ba3f43e 1787
d9a64523 1788 assert(!thread_is_64bit_data(thread));
5ba3f43e
A
1789
1790 saved_state = thread->machine.upcb;
1791 saved_state_32 = saved_state32(saved_state);
1792
1793 state = (arm_thread_state_t *)tstate;
1794
1795 if (curth != thread) {
1796 s = splsched();
1797 thread_lock(thread);
1798 }
1799
1800 /*
1801 * do not zero saved_state, it can be concurrently accessed
1802 * and zero is not a valid state for some of the registers,
1803 * like sp.
1804 */
1805 thread_state32_to_saved_state(state, saved_state);
1806 saved_state_32->cpsr = PSR64_USER32_DEFAULT;
1807
1808 if (curth != thread) {
1809 thread_unlock(thread);
1810 splx(s);
1811 }
1812
1813 return KERN_SUCCESS;
1814}
1815
1816kern_return_t
cb323159
A
1817thread_set_wq_state64(thread_t thread,
1818 thread_state_t tstate)
5ba3f43e
A
1819{
1820 arm_thread_state64_t *state;
1821 struct arm_saved_state *saved_state;
1822 struct arm_saved_state64 *saved_state_64;
1823 thread_t curth = current_thread();
0a7de745 1824 spl_t s = 0;
5ba3f43e 1825
d9a64523 1826 assert(thread_is_64bit_data(thread));
5ba3f43e
A
1827
1828 saved_state = thread->machine.upcb;
1829 saved_state_64 = saved_state64(saved_state);
1830 state = (arm_thread_state64_t *)tstate;
1831
1832 if (curth != thread) {
1833 s = splsched();
1834 thread_lock(thread);
1835 }
1836
1837 /*
1838 * do not zero saved_state, it can be concurrently accessed
1839 * and zero is not a valid state for some of the registers,
1840 * like sp.
1841 */
1842 thread_state64_to_saved_state(state, saved_state);
d9a64523 1843 set_saved_state_cpsr(saved_state, PSR64_USER64_DEFAULT);
5ba3f43e
A
1844
1845 if (curth != thread) {
1846 thread_unlock(thread);
1847 splx(s);
1848 }
1849
1850 return KERN_SUCCESS;
1851}