]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm64/status.c
xnu-6153.101.6.tar.gz
[apple/xnu.git] / osfmk / arm64 / status.c
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <debug.h>
29#include <mach/mach_types.h>
30#include <mach/kern_return.h>
31#include <mach/thread_status.h>
32#include <kern/thread.h>
33#include <kern/kalloc.h>
34#include <arm/vmparam.h>
35#include <arm/cpu_data_internal.h>
36#include <arm64/proc_reg.h>
cb323159
A
37#if __has_feature(ptrauth_calls)
38#include <ptrauth.h>
39#endif
5ba3f43e 40
0a7de745 41struct arm_vfpv2_state {
cb323159
A
42 __uint32_t __r[32];
43 __uint32_t __fpscr;
5ba3f43e
A
44};
45
cb323159 46typedef struct arm_vfpv2_state arm_vfpv2_state_t;
5ba3f43e 47
cb323159
A
48#define ARM_VFPV2_STATE_COUNT \
49 ((mach_msg_type_number_t)(sizeof (arm_vfpv2_state_t)/sizeof(uint32_t)))
5ba3f43e
A
50
51/*
52 * Forward definitions
53 */
54void thread_set_child(thread_t child, int pid);
55void thread_set_parent(thread_t parent, int pid);
56
57/*
58 * Maps state flavor to number of words in the state:
59 */
60/* __private_extern__ */
cb323159
A
61unsigned int _MachineStateCount[] = {
62 [ARM_UNIFIED_THREAD_STATE] = ARM_UNIFIED_THREAD_STATE_COUNT,
63 [ARM_VFP_STATE] = ARM_VFP_STATE_COUNT,
64 [ARM_EXCEPTION_STATE] = ARM_EXCEPTION_STATE_COUNT,
65 [ARM_DEBUG_STATE] = ARM_DEBUG_STATE_COUNT,
66 [ARM_THREAD_STATE64] = ARM_THREAD_STATE64_COUNT,
67 [ARM_EXCEPTION_STATE64] = ARM_EXCEPTION_STATE64_COUNT,
68 [ARM_THREAD_STATE32] = ARM_THREAD_STATE32_COUNT,
69 [ARM_DEBUG_STATE32] = ARM_DEBUG_STATE32_COUNT,
70 [ARM_DEBUG_STATE64] = ARM_DEBUG_STATE64_COUNT,
71 [ARM_NEON_STATE] = ARM_NEON_STATE_COUNT,
72 [ARM_NEON_STATE64] = ARM_NEON_STATE64_COUNT,
73 [ARM_PAGEIN_STATE] = ARM_PAGEIN_STATE_COUNT,
5ba3f43e
A
74};
75
76extern zone_t ads_zone;
77
78#if __arm64__
79/*
80 * Copy values from saved_state to ts64.
81 */
82void
cb323159
A
83saved_state_to_thread_state64(const arm_saved_state_t * saved_state,
84 arm_thread_state64_t * ts64)
5ba3f43e
A
85{
86 uint32_t i;
87
88 assert(is_saved_state64(saved_state));
89
90 ts64->fp = get_saved_state_fp(saved_state);
91 ts64->lr = get_saved_state_lr(saved_state);
92 ts64->sp = get_saved_state_sp(saved_state);
93 ts64->pc = get_saved_state_pc(saved_state);
94 ts64->cpsr = get_saved_state_cpsr(saved_state);
0a7de745 95 for (i = 0; i < 29; i++) {
5ba3f43e 96 ts64->x[i] = get_saved_state_reg(saved_state, i);
0a7de745 97 }
5ba3f43e
A
98}
99
100/*
101 * Copy values from ts64 to saved_state
102 */
103void
cb323159
A
104thread_state64_to_saved_state(const arm_thread_state64_t * ts64,
105 arm_saved_state_t * saved_state)
5ba3f43e
A
106{
107 uint32_t i;
108
109 assert(is_saved_state64(saved_state));
110
111 set_saved_state_fp(saved_state, ts64->fp);
112 set_saved_state_lr(saved_state, ts64->lr);
113 set_saved_state_sp(saved_state, ts64->sp);
114 set_saved_state_pc(saved_state, ts64->pc);
115 set_saved_state_cpsr(saved_state, (ts64->cpsr & ~PSR64_MODE_MASK) | PSR64_MODE_RW_64);
0a7de745 116 for (i = 0; i < 29; i++) {
5ba3f43e 117 set_saved_state_reg(saved_state, i, ts64->x[i]);
0a7de745 118 }
5ba3f43e 119}
5ba3f43e 120
cb323159
A
121#endif /* __arm64__ */
122
123static kern_return_t
124handle_get_arm32_thread_state(thread_state_t tstate,
125 mach_msg_type_number_t * count,
126 const arm_saved_state_t * saved_state)
5ba3f43e 127{
0a7de745
A
128 if (*count < ARM_THREAD_STATE32_COUNT) {
129 return KERN_INVALID_ARGUMENT;
130 }
131 if (!is_saved_state32(saved_state)) {
132 return KERN_INVALID_ARGUMENT;
133 }
5ba3f43e
A
134
135 (void)saved_state_to_thread_state32(saved_state, (arm_thread_state32_t *)tstate);
136 *count = ARM_THREAD_STATE32_COUNT;
137 return KERN_SUCCESS;
138}
139
cb323159
A
140static kern_return_t
141handle_get_arm64_thread_state(thread_state_t tstate,
142 mach_msg_type_number_t * count,
143 const arm_saved_state_t * saved_state)
5ba3f43e 144{
0a7de745
A
145 if (*count < ARM_THREAD_STATE64_COUNT) {
146 return KERN_INVALID_ARGUMENT;
147 }
148 if (!is_saved_state64(saved_state)) {
149 return KERN_INVALID_ARGUMENT;
150 }
5ba3f43e
A
151
152 (void)saved_state_to_thread_state64(saved_state, (arm_thread_state64_t *)tstate);
153 *count = ARM_THREAD_STATE64_COUNT;
154 return KERN_SUCCESS;
155}
156
157
cb323159
A
158static kern_return_t
159handle_get_arm_thread_state(thread_state_t tstate,
160 mach_msg_type_number_t * count,
161 const arm_saved_state_t * saved_state)
5ba3f43e
A
162{
163 /* In an arm64 world, this flavor can be used to retrieve the thread
164 * state of a 32-bit or 64-bit thread into a unified structure, but we
165 * need to support legacy clients who are only aware of 32-bit, so
166 * check the count to see what the client is expecting.
167 */
168 if (*count < ARM_UNIFIED_THREAD_STATE_COUNT) {
169 return handle_get_arm32_thread_state(tstate, count, saved_state);
170 }
171
172 arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *) tstate;
173 bzero(unified_state, sizeof(*unified_state));
174#if __arm64__
175 if (is_saved_state64(saved_state)) {
176 unified_state->ash.flavor = ARM_THREAD_STATE64;
177 unified_state->ash.count = ARM_THREAD_STATE64_COUNT;
178 (void)saved_state_to_thread_state64(saved_state, thread_state64(unified_state));
179 } else
180#endif
181 {
182 unified_state->ash.flavor = ARM_THREAD_STATE32;
183 unified_state->ash.count = ARM_THREAD_STATE32_COUNT;
184 (void)saved_state_to_thread_state32(saved_state, thread_state32(unified_state));
185 }
186 *count = ARM_UNIFIED_THREAD_STATE_COUNT;
0a7de745 187 return KERN_SUCCESS;
5ba3f43e
A
188}
189
cb323159
A
190
191static kern_return_t
192handle_set_arm32_thread_state(const thread_state_t tstate,
193 mach_msg_type_number_t count,
194 arm_saved_state_t * saved_state)
5ba3f43e 195{
0a7de745
A
196 if (count != ARM_THREAD_STATE32_COUNT) {
197 return KERN_INVALID_ARGUMENT;
198 }
5ba3f43e
A
199
200 (void)thread_state32_to_saved_state((const arm_thread_state32_t *)tstate, saved_state);
201 return KERN_SUCCESS;
202}
203
cb323159
A
204static kern_return_t
205handle_set_arm64_thread_state(const thread_state_t tstate,
206 mach_msg_type_number_t count,
207 arm_saved_state_t * saved_state)
5ba3f43e 208{
0a7de745
A
209 if (count != ARM_THREAD_STATE64_COUNT) {
210 return KERN_INVALID_ARGUMENT;
211 }
5ba3f43e
A
212
213 (void)thread_state64_to_saved_state((const arm_thread_state64_t *)tstate, saved_state);
214 return KERN_SUCCESS;
215}
216
217
cb323159
A
218static kern_return_t
219handle_set_arm_thread_state(const thread_state_t tstate,
220 mach_msg_type_number_t count,
221 arm_saved_state_t * saved_state)
5ba3f43e
A
222{
223 /* In an arm64 world, this flavor can be used to set the thread state of a
224 * 32-bit or 64-bit thread from a unified structure, but we need to support
225 * legacy clients who are only aware of 32-bit, so check the count to see
226 * what the client is expecting.
227 */
228 if (count < ARM_UNIFIED_THREAD_STATE_COUNT) {
d9a64523 229 if (!is_saved_state32(saved_state)) {
0a7de745 230 return KERN_INVALID_ARGUMENT;
d9a64523 231 }
5ba3f43e
A
232 return handle_set_arm32_thread_state(tstate, count, saved_state);
233 }
234
235 const arm_unified_thread_state_t *unified_state = (const arm_unified_thread_state_t *) tstate;
236#if __arm64__
237 if (is_thread_state64(unified_state)) {
d9a64523 238 if (!is_saved_state64(saved_state)) {
0a7de745 239 return KERN_INVALID_ARGUMENT;
d9a64523 240 }
5ba3f43e
A
241 (void)thread_state64_to_saved_state(const_thread_state64(unified_state), saved_state);
242 } else
243#endif
244 {
d9a64523 245 if (!is_saved_state32(saved_state)) {
0a7de745 246 return KERN_INVALID_ARGUMENT;
d9a64523 247 }
5ba3f43e
A
248 (void)thread_state32_to_saved_state(const_thread_state32(unified_state), saved_state);
249 }
250
0a7de745 251 return KERN_SUCCESS;
5ba3f43e
A
252}
253
cb323159 254
d9a64523
A
255/*
256 * Translate thread state arguments to userspace representation
257 */
258
259kern_return_t
260machine_thread_state_convert_to_user(
0a7de745
A
261 thread_t thread,
262 thread_flavor_t flavor,
263 thread_state_t tstate,
264 mach_msg_type_number_t *count)
d9a64523 265{
cb323159
A
266#if __has_feature(ptrauth_calls)
267 arm_thread_state64_t *ts64;
268
269 switch (flavor) {
270 case ARM_THREAD_STATE:
271 {
272 arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *)tstate;
273
274 if (*count < ARM_UNIFIED_THREAD_STATE_COUNT || !is_thread_state64(unified_state)) {
275 return KERN_SUCCESS;
276 }
277 ts64 = thread_state64(unified_state);
278 break;
279 }
280 case ARM_THREAD_STATE64:
281 {
282 if (*count < ARM_THREAD_STATE64_COUNT) {
283 return KERN_SUCCESS;
284 }
285 ts64 = (arm_thread_state64_t *)tstate;
286 break;
287 }
288 default:
289 return KERN_SUCCESS;
290 }
291
292 // Note that kernel threads never have disable_user_jop set
293 if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread()) ||
294 thread->machine.disable_user_jop || !thread_is_64bit_addr(thread) ||
295 (BootArgs->bootFlags & kBootFlagsDisableUserThreadStateJOP)) {
296 ts64->flags = __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH;
297 return KERN_SUCCESS;
298 }
299
300 ts64->flags = 0;
301 if (ts64->lr) {
302 // lr might contain an IB-signed return address (strip is a no-op on unsigned addresses)
303 uintptr_t stripped_lr = (uintptr_t)ptrauth_strip((void *)ts64->lr,
304 ptrauth_key_return_address);
305 if (ts64->lr != stripped_lr) {
306 // Need to allow already-signed lr value to round-trip as is
307 ts64->flags |= __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR;
308 }
309 // Note that an IB-signed return address that happens to have a 0 signature value
310 // will round-trip correctly even if IA-signed again below (and IA-authd later)
311 }
312
313 if (BootArgs->bootFlags & kBootFlagsDisableUserJOP) {
314 return KERN_SUCCESS;
315 }
316
317 if (ts64->pc) {
318 ts64->pc = (uintptr_t)pmap_sign_user_ptr((void*)ts64->pc,
319 ptrauth_key_process_independent_code, ptrauth_string_discriminator("pc"));
320 }
321 if (ts64->lr && !(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR)) {
322 ts64->lr = (uintptr_t)pmap_sign_user_ptr((void*)ts64->lr,
323 ptrauth_key_process_independent_code, ptrauth_string_discriminator("lr"));
324 }
325 if (ts64->sp) {
326 ts64->sp = (uintptr_t)pmap_sign_user_ptr((void*)ts64->sp,
327 ptrauth_key_process_independent_data, ptrauth_string_discriminator("sp"));
328 }
329 if (ts64->fp) {
330 ts64->fp = (uintptr_t)pmap_sign_user_ptr((void*)ts64->fp,
331 ptrauth_key_process_independent_data, ptrauth_string_discriminator("fp"));
332 }
333
334 return KERN_SUCCESS;
335#else
d9a64523
A
336 // No conversion to userspace representation on this platform
337 (void)thread; (void)flavor; (void)tstate; (void)count;
338 return KERN_SUCCESS;
cb323159 339#endif /* __has_feature(ptrauth_calls) */
d9a64523
A
340}
341
342/*
343 * Translate thread state arguments from userspace representation
344 */
345
346kern_return_t
347machine_thread_state_convert_from_user(
0a7de745
A
348 thread_t thread,
349 thread_flavor_t flavor,
350 thread_state_t tstate,
351 mach_msg_type_number_t count)
d9a64523 352{
cb323159
A
353#if __has_feature(ptrauth_calls)
354 arm_thread_state64_t *ts64;
355
356 switch (flavor) {
357 case ARM_THREAD_STATE:
358 {
359 arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *)tstate;
360
361 if (count < ARM_UNIFIED_THREAD_STATE_COUNT || !is_thread_state64(unified_state)) {
362 return KERN_SUCCESS;
363 }
364 ts64 = thread_state64(unified_state);
365 break;
366 }
367 case ARM_THREAD_STATE64:
368 {
369 if (count != ARM_THREAD_STATE64_COUNT) {
370 return KERN_SUCCESS;
371 }
372 ts64 = (arm_thread_state64_t *)tstate;
373 break;
374 }
375 default:
376 return KERN_SUCCESS;
377 }
378
379 // Note that kernel threads never have disable_user_jop set
380 if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread())) {
381 if (thread->machine.disable_user_jop || !thread_is_64bit_addr(thread)) {
382 ts64->flags = __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH;
383 return KERN_SUCCESS;
384 }
385 // A JOP-disabled process must not set thread state on a JOP-enabled process
386 return KERN_PROTECTION_FAILURE;
387 }
388
389 if (ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH) {
390 if (thread->machine.disable_user_jop || !thread_is_64bit_addr(thread) ||
391 (BootArgs->bootFlags & kBootFlagsDisableUserThreadStateJOP)) {
392 return KERN_SUCCESS;
393 }
394 // Disallow setting unsigned thread state on JOP-enabled processes.
395 // Ignore flag and treat thread state arguments as signed, ptrauth
396 // poisoning will cause resulting thread state to be invalid
397 ts64->flags &= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH;
398 }
399
400 if (ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR) {
401 // lr might contain an IB-signed return address (strip is a no-op on unsigned addresses)
402 uintptr_t stripped_lr = (uintptr_t)ptrauth_strip((void *)ts64->lr,
403 ptrauth_key_return_address);
404 if (ts64->lr == stripped_lr) {
405 // Don't allow unsigned pointer to be passed through as is. Ignore flag and
406 // treat as IA-signed below (where auth failure may poison the value).
407 ts64->flags &= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR;
408 }
409 // Note that an IB-signed return address that happens to have a 0 signature value
410 // will also have been IA-signed (without this flag being set) and so will IA-auth
411 // correctly below.
412 }
413
414 if (BootArgs->bootFlags & kBootFlagsDisableUserJOP) {
415 return KERN_SUCCESS;
416 }
417
418 if (ts64->pc) {
419 ts64->pc = (uintptr_t)pmap_auth_user_ptr((void*)ts64->pc,
420 ptrauth_key_process_independent_code, ptrauth_string_discriminator("pc"));
421 }
422 if (ts64->lr && !(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR)) {
423 ts64->lr = (uintptr_t)pmap_auth_user_ptr((void*)ts64->lr,
424 ptrauth_key_process_independent_code, ptrauth_string_discriminator("lr"));
425 }
426 if (ts64->sp) {
427 ts64->sp = (uintptr_t)pmap_auth_user_ptr((void*)ts64->sp,
428 ptrauth_key_process_independent_data, ptrauth_string_discriminator("sp"));
429 }
430 if (ts64->fp) {
431 ts64->fp = (uintptr_t)pmap_auth_user_ptr((void*)ts64->fp,
432 ptrauth_key_process_independent_data, ptrauth_string_discriminator("fp"));
433 }
434
435 return KERN_SUCCESS;
436#else
d9a64523
A
437 // No conversion from userspace representation on this platform
438 (void)thread; (void)flavor; (void)tstate; (void)count;
439 return KERN_SUCCESS;
cb323159 440#endif /* __has_feature(ptrauth_calls) */
d9a64523
A
441}
442
443/*
444 * Translate signal context data pointer to userspace representation
445 */
446
447kern_return_t
448machine_thread_siguctx_pointer_convert_to_user(
0a7de745
A
449 __assert_only thread_t thread,
450 user_addr_t *uctxp)
d9a64523 451{
cb323159
A
452#if __has_feature(ptrauth_calls)
453 if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread())) {
454 assert(thread->machine.disable_user_jop || !thread_is_64bit_addr(thread));
455 return KERN_SUCCESS;
456 }
457
458 if (BootArgs->bootFlags & kBootFlagsDisableUserJOP) {
459 return KERN_SUCCESS;
460 }
461
462 if (*uctxp) {
463 *uctxp = (uintptr_t)pmap_sign_user_ptr((void*)*uctxp,
464 ptrauth_key_process_independent_data, ptrauth_string_discriminator("uctx"));
465 }
466
467 return KERN_SUCCESS;
468#else
d9a64523
A
469 // No conversion to userspace representation on this platform
470 (void)thread; (void)uctxp;
471 return KERN_SUCCESS;
cb323159 472#endif /* __has_feature(ptrauth_calls) */
d9a64523
A
473}
474
475/*
476 * Translate array of function pointer syscall arguments from userspace representation
477 */
478
479kern_return_t
480machine_thread_function_pointers_convert_from_user(
0a7de745
A
481 __assert_only thread_t thread,
482 user_addr_t *fptrs,
483 uint32_t count)
d9a64523 484{
cb323159
A
485#if __has_feature(ptrauth_calls)
486 if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread())) {
487 assert(thread->machine.disable_user_jop || !thread_is_64bit_addr(thread));
488 return KERN_SUCCESS;
489 }
490
491 if (BootArgs->bootFlags & kBootFlagsDisableUserJOP) {
492 return KERN_SUCCESS;
493 }
494
495 while (count--) {
496 if (*fptrs) {
497 *fptrs = (uintptr_t)pmap_auth_user_ptr((void*)*fptrs,
498 ptrauth_key_function_pointer, 0);
499 }
500 fptrs++;
501 }
502
503 return KERN_SUCCESS;
504#else
d9a64523
A
505 // No conversion from userspace representation on this platform
506 (void)thread; (void)fptrs; (void)count;
507 return KERN_SUCCESS;
cb323159 508#endif /* __has_feature(ptrauth_calls) */
d9a64523
A
509}
510
5ba3f43e 511/*
cb323159 512 * Routine: machine_thread_get_state
5ba3f43e
A
513 *
514 */
515kern_return_t
cb323159
A
516machine_thread_get_state(thread_t thread,
517 thread_flavor_t flavor,
518 thread_state_t tstate,
519 mach_msg_type_number_t * count)
5ba3f43e
A
520{
521 switch (flavor) {
522 case THREAD_STATE_FLAVOR_LIST:
0a7de745
A
523 if (*count < 4) {
524 return KERN_INVALID_ARGUMENT;
525 }
5ba3f43e
A
526
527 tstate[0] = ARM_THREAD_STATE;
528 tstate[1] = ARM_VFP_STATE;
529 tstate[2] = ARM_EXCEPTION_STATE;
530 tstate[3] = ARM_DEBUG_STATE;
531 *count = 4;
532 break;
533
534 case THREAD_STATE_FLAVOR_LIST_NEW:
0a7de745
A
535 if (*count < 4) {
536 return KERN_INVALID_ARGUMENT;
537 }
5ba3f43e
A
538
539 tstate[0] = ARM_THREAD_STATE;
540 tstate[1] = ARM_VFP_STATE;
d9a64523
A
541 tstate[2] = thread_is_64bit_data(thread) ? ARM_EXCEPTION_STATE64 : ARM_EXCEPTION_STATE;
542 tstate[3] = thread_is_64bit_data(thread) ? ARM_DEBUG_STATE64 : ARM_DEBUG_STATE32;
5ba3f43e
A
543 *count = 4;
544 break;
545
cb323159
A
546 case THREAD_STATE_FLAVOR_LIST_10_15:
547 if (*count < 5) {
548 return KERN_INVALID_ARGUMENT;
549 }
550
551 tstate[0] = ARM_THREAD_STATE;
552 tstate[1] = ARM_VFP_STATE;
553 tstate[2] = thread_is_64bit_data(thread) ? ARM_EXCEPTION_STATE64 : ARM_EXCEPTION_STATE;
554 tstate[3] = thread_is_64bit_data(thread) ? ARM_DEBUG_STATE64 : ARM_DEBUG_STATE32;
555 tstate[4] = ARM_PAGEIN_STATE;
556 *count = 5;
557 break;
558
5ba3f43e
A
559 case ARM_THREAD_STATE:
560 {
561 kern_return_t rn = handle_get_arm_thread_state(tstate, count, thread->machine.upcb);
0a7de745
A
562 if (rn) {
563 return rn;
564 }
5ba3f43e
A
565 break;
566 }
567 case ARM_THREAD_STATE32:
568 {
0a7de745 569 if (thread_is_64bit_data(thread)) {
5ba3f43e 570 return KERN_INVALID_ARGUMENT;
0a7de745 571 }
5ba3f43e
A
572
573 kern_return_t rn = handle_get_arm32_thread_state(tstate, count, thread->machine.upcb);
0a7de745
A
574 if (rn) {
575 return rn;
576 }
5ba3f43e
A
577 break;
578 }
579#if __arm64__
580 case ARM_THREAD_STATE64:
581 {
0a7de745 582 if (!thread_is_64bit_data(thread)) {
5ba3f43e 583 return KERN_INVALID_ARGUMENT;
0a7de745 584 }
5ba3f43e
A
585
586 kern_return_t rn = handle_get_arm64_thread_state(tstate, count, thread->machine.upcb);
0a7de745
A
587 if (rn) {
588 return rn;
589 }
5ba3f43e
A
590 break;
591 }
592#endif
593 case ARM_EXCEPTION_STATE:{
0a7de745
A
594 struct arm_exception_state *state;
595 struct arm_saved_state32 *saved_state;
5ba3f43e 596
0a7de745
A
597 if (*count < ARM_EXCEPTION_STATE_COUNT) {
598 return KERN_INVALID_ARGUMENT;
599 }
600 if (thread_is_64bit_data(thread)) {
601 return KERN_INVALID_ARGUMENT;
602 }
5ba3f43e 603
0a7de745
A
604 state = (struct arm_exception_state *) tstate;
605 saved_state = saved_state32(thread->machine.upcb);
5ba3f43e 606
0a7de745
A
607 state->exception = saved_state->exception;
608 state->fsr = saved_state->esr;
609 state->far = saved_state->far;
5ba3f43e 610
0a7de745
A
611 *count = ARM_EXCEPTION_STATE_COUNT;
612 break;
613 }
5ba3f43e 614 case ARM_EXCEPTION_STATE64:{
0a7de745
A
615 struct arm_exception_state64 *state;
616 struct arm_saved_state64 *saved_state;
617
618 if (*count < ARM_EXCEPTION_STATE64_COUNT) {
619 return KERN_INVALID_ARGUMENT;
620 }
621 if (!thread_is_64bit_data(thread)) {
622 return KERN_INVALID_ARGUMENT;
623 }
5ba3f43e 624
0a7de745
A
625 state = (struct arm_exception_state64 *) tstate;
626 saved_state = saved_state64(thread->machine.upcb);
5ba3f43e 627
0a7de745
A
628 state->exception = saved_state->exception;
629 state->far = saved_state->far;
630 state->esr = saved_state->esr;
5ba3f43e 631
0a7de745
A
632 *count = ARM_EXCEPTION_STATE64_COUNT;
633 break;
634 }
635 case ARM_DEBUG_STATE:{
636 arm_legacy_debug_state_t *state;
637 arm_debug_state32_t *thread_state;
5ba3f43e 638
0a7de745
A
639 if (*count < ARM_LEGACY_DEBUG_STATE_COUNT) {
640 return KERN_INVALID_ARGUMENT;
5ba3f43e 641 }
0a7de745
A
642
643 if (thread_is_64bit_data(thread)) {
644 return KERN_INVALID_ARGUMENT;
645 }
646
647 state = (arm_legacy_debug_state_t *) tstate;
648 thread_state = find_debug_state32(thread);
649
650 if (thread_state == NULL) {
651 bzero(state, sizeof(arm_legacy_debug_state_t));
652 } else {
653 bcopy(thread_state, state, sizeof(arm_legacy_debug_state_t));
5ba3f43e 654 }
0a7de745
A
655
656 *count = ARM_LEGACY_DEBUG_STATE_COUNT;
657 break;
658 }
5ba3f43e 659 case ARM_DEBUG_STATE32:{
0a7de745
A
660 arm_debug_state32_t *state;
661 arm_debug_state32_t *thread_state;
662
663 if (*count < ARM_DEBUG_STATE32_COUNT) {
664 return KERN_INVALID_ARGUMENT;
5ba3f43e
A
665 }
666
0a7de745
A
667 if (thread_is_64bit_data(thread)) {
668 return KERN_INVALID_ARGUMENT;
669 }
670
671 state = (arm_debug_state32_t *) tstate;
672 thread_state = find_debug_state32(thread);
673
674 if (thread_state == NULL) {
675 bzero(state, sizeof(arm_debug_state32_t));
676 } else {
677 bcopy(thread_state, state, sizeof(arm_debug_state32_t));
678 }
679
680 *count = ARM_DEBUG_STATE32_COUNT;
681 break;
682 }
683
5ba3f43e 684 case ARM_DEBUG_STATE64:{
0a7de745
A
685 arm_debug_state64_t *state;
686 arm_debug_state64_t *thread_state;
687
688 if (*count < ARM_DEBUG_STATE64_COUNT) {
689 return KERN_INVALID_ARGUMENT;
5ba3f43e
A
690 }
691
0a7de745
A
692 if (!thread_is_64bit_data(thread)) {
693 return KERN_INVALID_ARGUMENT;
694 }
695
696 state = (arm_debug_state64_t *) tstate;
697 thread_state = find_debug_state64(thread);
698
699 if (thread_state == NULL) {
700 bzero(state, sizeof(arm_debug_state64_t));
701 } else {
702 bcopy(thread_state, state, sizeof(arm_debug_state64_t));
703 }
704
705 *count = ARM_DEBUG_STATE64_COUNT;
706 break;
707 }
708
5ba3f43e 709 case ARM_VFP_STATE:{
0a7de745
A
710 struct arm_vfp_state *state;
711 arm_neon_saved_state32_t *thread_state;
cb323159 712 unsigned int max;
0a7de745
A
713
714 if (*count < ARM_VFP_STATE_COUNT) {
715 if (*count < ARM_VFPV2_STATE_COUNT) {
716 return KERN_INVALID_ARGUMENT;
717 } else {
718 *count = ARM_VFPV2_STATE_COUNT;
5ba3f43e 719 }
0a7de745 720 }
5ba3f43e 721
0a7de745
A
722 if (*count == ARM_VFPV2_STATE_COUNT) {
723 max = 32;
724 } else {
725 max = 64;
726 }
5ba3f43e 727
0a7de745
A
728 state = (struct arm_vfp_state *) tstate;
729 thread_state = neon_state32(thread->machine.uNeon);
730 /* ARM64 TODO: set fpsr and fpcr from state->fpscr */
5ba3f43e 731
0a7de745
A
732 bcopy(thread_state, state, (max + 1) * sizeof(uint32_t));
733 *count = (max + 1);
734 break;
735 }
5ba3f43e
A
736 case ARM_NEON_STATE:{
737 arm_neon_state_t *state;
738 arm_neon_saved_state32_t *thread_state;
739
0a7de745
A
740 if (*count < ARM_NEON_STATE_COUNT) {
741 return KERN_INVALID_ARGUMENT;
742 }
5ba3f43e 743
0a7de745
A
744 if (thread_is_64bit_data(thread)) {
745 return KERN_INVALID_ARGUMENT;
746 }
5ba3f43e
A
747
748 state = (arm_neon_state_t *)tstate;
749 thread_state = neon_state32(thread->machine.uNeon);
750
751 assert(sizeof(*thread_state) == sizeof(*state));
752 bcopy(thread_state, state, sizeof(arm_neon_state_t));
753
754 *count = ARM_NEON_STATE_COUNT;
755 break;
0a7de745 756 }
5ba3f43e
A
757
758 case ARM_NEON_STATE64:{
759 arm_neon_state64_t *state;
760 arm_neon_saved_state64_t *thread_state;
761
0a7de745
A
762 if (*count < ARM_NEON_STATE64_COUNT) {
763 return KERN_INVALID_ARGUMENT;
764 }
5ba3f43e 765
0a7de745
A
766 if (!thread_is_64bit_data(thread)) {
767 return KERN_INVALID_ARGUMENT;
768 }
5ba3f43e
A
769
770 state = (arm_neon_state64_t *)tstate;
771 thread_state = neon_state64(thread->machine.uNeon);
772
773 /* For now, these are identical */
774 assert(sizeof(*state) == sizeof(*thread_state));
775 bcopy(thread_state, state, sizeof(arm_neon_state64_t));
776
777 *count = ARM_NEON_STATE64_COUNT;
778 break;
0a7de745 779 }
5ba3f43e 780
cb323159
A
781
782 case ARM_PAGEIN_STATE: {
783 arm_pagein_state_t *state;
784
785 if (*count < ARM_PAGEIN_STATE_COUNT) {
786 return KERN_INVALID_ARGUMENT;
787 }
788
789 state = (arm_pagein_state_t *)tstate;
790 state->__pagein_error = thread->t_pagein_error;
791
792 *count = ARM_PAGEIN_STATE_COUNT;
793 break;
794 }
795
796
5ba3f43e 797 default:
0a7de745 798 return KERN_INVALID_ARGUMENT;
5ba3f43e 799 }
0a7de745 800 return KERN_SUCCESS;
5ba3f43e
A
801}
802
803
804/*
cb323159 805 * Routine: machine_thread_get_kern_state
5ba3f43e
A
806 *
807 */
808kern_return_t
cb323159
A
809machine_thread_get_kern_state(thread_t thread,
810 thread_flavor_t flavor,
811 thread_state_t tstate,
812 mach_msg_type_number_t * count)
5ba3f43e
A
813{
814 /*
815 * This works only for an interrupted kernel thread
816 */
0a7de745 817 if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) {
5ba3f43e 818 return KERN_FAILURE;
0a7de745 819 }
5ba3f43e
A
820
821 switch (flavor) {
822 case ARM_THREAD_STATE:
823 {
824 kern_return_t rn = handle_get_arm_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
0a7de745
A
825 if (rn) {
826 return rn;
827 }
5ba3f43e
A
828 break;
829 }
830 case ARM_THREAD_STATE32:
831 {
832 kern_return_t rn = handle_get_arm32_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
0a7de745
A
833 if (rn) {
834 return rn;
835 }
5ba3f43e
A
836 break;
837 }
838#if __arm64__
839 case ARM_THREAD_STATE64:
840 {
841 kern_return_t rn = handle_get_arm64_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
0a7de745
A
842 if (rn) {
843 return rn;
844 }
5ba3f43e
A
845 break;
846 }
847#endif
848 default:
0a7de745 849 return KERN_INVALID_ARGUMENT;
5ba3f43e 850 }
0a7de745 851 return KERN_SUCCESS;
5ba3f43e
A
852}
853
854void
855machine_thread_switch_addrmode(thread_t thread)
856{
d9a64523 857 if (task_has_64Bit_data(thread->task)) {
5ba3f43e
A
858 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE64;
859 thread->machine.upcb->ash.count = ARM_SAVED_STATE64_COUNT;
860 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
861 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
862
863 /*
864 * Reinitialize the NEON state.
865 */
866 bzero(&thread->machine.uNeon->uns, sizeof(thread->machine.uNeon->uns));
867 thread->machine.uNeon->ns_64.fpcr = FPCR_DEFAULT;
868 } else {
869 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE32;
870 thread->machine.upcb->ash.count = ARM_SAVED_STATE32_COUNT;
871 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
872 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
873
874 /*
875 * Reinitialize the NEON state.
876 */
877 bzero(&thread->machine.uNeon->uns, sizeof(thread->machine.uNeon->uns));
878 thread->machine.uNeon->ns_32.fpcr = FPCR_DEFAULT_32;
879 }
880}
881
882extern long long arm_debug_get(void);
883
884/*
cb323159 885 * Routine: machine_thread_set_state
5ba3f43e
A
886 *
887 */
888kern_return_t
cb323159
A
889machine_thread_set_state(thread_t thread,
890 thread_flavor_t flavor,
891 thread_state_t tstate,
892 mach_msg_type_number_t count)
5ba3f43e
A
893{
894 kern_return_t rn;
895
896 switch (flavor) {
897 case ARM_THREAD_STATE:
898 rn = handle_set_arm_thread_state(tstate, count, thread->machine.upcb);
0a7de745
A
899 if (rn) {
900 return rn;
901 }
5ba3f43e
A
902 break;
903
904 case ARM_THREAD_STATE32:
0a7de745
A
905 if (thread_is_64bit_data(thread)) {
906 return KERN_INVALID_ARGUMENT;
907 }
5ba3f43e
A
908
909 rn = handle_set_arm32_thread_state(tstate, count, thread->machine.upcb);
0a7de745
A
910 if (rn) {
911 return rn;
912 }
5ba3f43e
A
913 break;
914
915#if __arm64__
916 case ARM_THREAD_STATE64:
0a7de745
A
917 if (!thread_is_64bit_data(thread)) {
918 return KERN_INVALID_ARGUMENT;
919 }
5ba3f43e
A
920
921 rn = handle_set_arm64_thread_state(tstate, count, thread->machine.upcb);
0a7de745
A
922 if (rn) {
923 return rn;
924 }
5ba3f43e
A
925 break;
926#endif
927 case ARM_EXCEPTION_STATE:{
0a7de745
A
928 if (count != ARM_EXCEPTION_STATE_COUNT) {
929 return KERN_INVALID_ARGUMENT;
5ba3f43e 930 }
0a7de745
A
931 if (thread_is_64bit_data(thread)) {
932 return KERN_INVALID_ARGUMENT;
933 }
934
935 break;
936 }
5ba3f43e 937 case ARM_EXCEPTION_STATE64:{
0a7de745
A
938 if (count != ARM_EXCEPTION_STATE64_COUNT) {
939 return KERN_INVALID_ARGUMENT;
940 }
941 if (!thread_is_64bit_data(thread)) {
942 return KERN_INVALID_ARGUMENT;
943 }
5ba3f43e 944
0a7de745
A
945 break;
946 }
947 case ARM_DEBUG_STATE:
948 {
949 arm_legacy_debug_state_t *state;
950 boolean_t enabled = FALSE;
951 unsigned int i;
5ba3f43e 952
0a7de745
A
953 if (count != ARM_LEGACY_DEBUG_STATE_COUNT) {
954 return KERN_INVALID_ARGUMENT;
955 }
956 if (thread_is_64bit_data(thread)) {
957 return KERN_INVALID_ARGUMENT;
5ba3f43e 958 }
5ba3f43e 959
0a7de745 960 state = (arm_legacy_debug_state_t *) tstate;
5ba3f43e 961
0a7de745
A
962 for (i = 0; i < 16; i++) {
963 /* do not allow context IDs to be set */
964 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
965 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
966 || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
967 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
968 return KERN_PROTECTION_FAILURE;
969 }
970 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
971 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
972 enabled = TRUE;
973 }
974 }
5ba3f43e 975
0a7de745
A
976 if (!enabled) {
977 arm_debug_state32_t *thread_state = find_debug_state32(thread);
978 if (thread_state != NULL) {
979 void *pTmp = thread->machine.DebugData;
980 thread->machine.DebugData = NULL;
981 zfree(ads_zone, pTmp);
5ba3f43e 982 }
0a7de745
A
983 } else {
984 arm_debug_state32_t *thread_state = find_debug_state32(thread);
985 if (thread_state == NULL) {
986 thread->machine.DebugData = zalloc(ads_zone);
987 bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
988 thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE32;
989 thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE32_COUNT;
990 thread_state = find_debug_state32(thread);
5ba3f43e 991 }
0a7de745
A
992 assert(NULL != thread_state);
993
994 for (i = 0; i < 16; i++) {
995 /* set appropriate privilege; mask out unknown bits */
996 thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
997 | ARM_DBGBCR_MATCH_MASK
998 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
999 | ARM_DBG_CR_ENABLE_MASK))
1000 | ARM_DBGBCR_TYPE_IVA
1001 | ARM_DBG_CR_LINKED_UNLINKED
1002 | ARM_DBG_CR_SECURITY_STATE_BOTH
1003 | ARM_DBG_CR_MODE_CONTROL_USER;
1004 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1005 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1006 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1007 | ARM_DBGWCR_ACCESS_CONTROL_MASK
1008 | ARM_DBG_CR_ENABLE_MASK))
1009 | ARM_DBG_CR_LINKED_UNLINKED
1010 | ARM_DBG_CR_SECURITY_STATE_BOTH
1011 | ARM_DBG_CR_MODE_CONTROL_USER;
1012 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
5ba3f43e 1013 }
0a7de745
A
1014
1015 thread_state->mdscr_el1 = 0ULL; // Legacy customers issuing ARM_DEBUG_STATE dont drive single stepping.
5ba3f43e 1016 }
0a7de745
A
1017
1018 if (thread == current_thread()) {
1019 arm_debug_set32(thread->machine.DebugData);
1020 }
1021
1022 break;
1023 }
5ba3f43e
A
1024 case ARM_DEBUG_STATE32:
1025 /* ARM64_TODO subtle bcr/wcr semantic differences e.g. wcr and ARM_DBGBCR_TYPE_IVA */
0a7de745
A
1026 {
1027 arm_debug_state32_t *state;
1028 boolean_t enabled = FALSE;
1029 unsigned int i;
5ba3f43e 1030
0a7de745
A
1031 if (count != ARM_DEBUG_STATE32_COUNT) {
1032 return KERN_INVALID_ARGUMENT;
1033 }
1034 if (thread_is_64bit_data(thread)) {
1035 return KERN_INVALID_ARGUMENT;
1036 }
1037
1038 state = (arm_debug_state32_t *) tstate;
5ba3f43e 1039
0a7de745
A
1040 if (state->mdscr_el1 & 0x1) {
1041 enabled = TRUE;
1042 }
5ba3f43e 1043
0a7de745
A
1044 for (i = 0; i < 16; i++) {
1045 /* do not allow context IDs to be set */
1046 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1047 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
1048 || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1049 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
1050 return KERN_PROTECTION_FAILURE;
1051 }
1052 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
1053 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
5ba3f43e 1054 enabled = TRUE;
0a7de745
A
1055 }
1056 }
5ba3f43e 1057
0a7de745
A
1058 if (!enabled) {
1059 arm_debug_state32_t *thread_state = find_debug_state32(thread);
1060 if (thread_state != NULL) {
1061 void *pTmp = thread->machine.DebugData;
1062 thread->machine.DebugData = NULL;
1063 zfree(ads_zone, pTmp);
1064 }
1065 } else {
1066 arm_debug_state32_t *thread_state = find_debug_state32(thread);
1067 if (thread_state == NULL) {
1068 thread->machine.DebugData = zalloc(ads_zone);
1069 bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
1070 thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE32;
1071 thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE32_COUNT;
1072 thread_state = find_debug_state32(thread);
5ba3f43e 1073 }
0a7de745
A
1074 assert(NULL != thread_state);
1075
1076 if (state->mdscr_el1 & 0x1) {
1077 thread_state->mdscr_el1 |= 0x1;
5ba3f43e 1078 } else {
0a7de745 1079 thread_state->mdscr_el1 &= ~0x1;
5ba3f43e 1080 }
0a7de745
A
1081
1082 for (i = 0; i < 16; i++) {
1083 /* set appropriate privilege; mask out unknown bits */
1084 thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1085 | ARM_DBGBCR_MATCH_MASK
1086 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1087 | ARM_DBG_CR_ENABLE_MASK))
1088 | ARM_DBGBCR_TYPE_IVA
1089 | ARM_DBG_CR_LINKED_UNLINKED
1090 | ARM_DBG_CR_SECURITY_STATE_BOTH
1091 | ARM_DBG_CR_MODE_CONTROL_USER;
1092 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1093 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1094 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1095 | ARM_DBGWCR_ACCESS_CONTROL_MASK
1096 | ARM_DBG_CR_ENABLE_MASK))
1097 | ARM_DBG_CR_LINKED_UNLINKED
1098 | ARM_DBG_CR_SECURITY_STATE_BOTH
1099 | ARM_DBG_CR_MODE_CONTROL_USER;
1100 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
5ba3f43e 1101 }
5ba3f43e
A
1102 }
1103
0a7de745
A
1104 if (thread == current_thread()) {
1105 arm_debug_set32(thread->machine.DebugData);
1106 }
1107
1108 break;
1109 }
1110
5ba3f43e 1111 case ARM_DEBUG_STATE64:
0a7de745
A
1112 {
1113 arm_debug_state64_t *state;
1114 boolean_t enabled = FALSE;
cb323159 1115 unsigned int i;
5ba3f43e 1116
0a7de745
A
1117 if (count != ARM_DEBUG_STATE64_COUNT) {
1118 return KERN_INVALID_ARGUMENT;
1119 }
1120 if (!thread_is_64bit_data(thread)) {
1121 return KERN_INVALID_ARGUMENT;
1122 }
5ba3f43e 1123
0a7de745
A
1124 state = (arm_debug_state64_t *) tstate;
1125
1126 if (state->mdscr_el1 & 0x1) {
1127 enabled = TRUE;
1128 }
5ba3f43e 1129
0a7de745
A
1130 for (i = 0; i < 16; i++) {
1131 /* do not allow context IDs to be set */
1132 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1133 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
1134 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
1135 return KERN_PROTECTION_FAILURE;
1136 }
1137 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
1138 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
5ba3f43e 1139 enabled = TRUE;
0a7de745
A
1140 }
1141 }
5ba3f43e 1142
0a7de745
A
1143 if (!enabled) {
1144 arm_debug_state64_t *thread_state = find_debug_state64(thread);
1145 if (thread_state != NULL) {
1146 void *pTmp = thread->machine.DebugData;
1147 thread->machine.DebugData = NULL;
1148 zfree(ads_zone, pTmp);
5ba3f43e 1149 }
0a7de745
A
1150 } else {
1151 arm_debug_state64_t *thread_state = find_debug_state64(thread);
1152 if (thread_state == NULL) {
1153 thread->machine.DebugData = zalloc(ads_zone);
1154 bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
1155 thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE64;
1156 thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE64_COUNT;
1157 thread_state = find_debug_state64(thread);
1158 }
1159 assert(NULL != thread_state);
5ba3f43e 1160
0a7de745
A
1161 if (state->mdscr_el1 & 0x1) {
1162 thread_state->mdscr_el1 |= 0x1;
5ba3f43e 1163 } else {
0a7de745 1164 thread_state->mdscr_el1 &= ~0x1;
5ba3f43e 1165 }
0a7de745
A
1166
1167 for (i = 0; i < 16; i++) {
1168 /* set appropriate privilege; mask out unknown bits */
1169 thread_state->bcr[i] = (state->bcr[i] & (0 /* Was ARM_DBG_CR_ADDRESS_MASK_MASK deprecated in v8 */
cb323159 1170 | 0 /* Was ARM_DBGBCR_MATCH_MASK, ignored in AArch64 state */
0a7de745
A
1171 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1172 | ARM_DBG_CR_ENABLE_MASK))
1173 | ARM_DBGBCR_TYPE_IVA
1174 | ARM_DBG_CR_LINKED_UNLINKED
1175 | ARM_DBG_CR_SECURITY_STATE_BOTH
1176 | ARM_DBG_CR_MODE_CONTROL_USER;
1177 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK64;
1178 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1179 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1180 | ARM_DBGWCR_ACCESS_CONTROL_MASK
1181 | ARM_DBG_CR_ENABLE_MASK))
1182 | ARM_DBG_CR_LINKED_UNLINKED
1183 | ARM_DBG_CR_SECURITY_STATE_BOTH
1184 | ARM_DBG_CR_MODE_CONTROL_USER;
1185 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK64;
5ba3f43e 1186 }
5ba3f43e
A
1187 }
1188
0a7de745
A
1189 if (thread == current_thread()) {
1190 arm_debug_set64(thread->machine.DebugData);
1191 }
1192
1193 break;
1194 }
1195
5ba3f43e 1196 case ARM_VFP_STATE:{
0a7de745
A
1197 struct arm_vfp_state *state;
1198 arm_neon_saved_state32_t *thread_state;
1199 unsigned int max;
5ba3f43e 1200
0a7de745
A
1201 if (count != ARM_VFP_STATE_COUNT && count != ARM_VFPV2_STATE_COUNT) {
1202 return KERN_INVALID_ARGUMENT;
1203 }
5ba3f43e 1204
0a7de745
A
1205 if (count == ARM_VFPV2_STATE_COUNT) {
1206 max = 32;
1207 } else {
1208 max = 64;
1209 }
5ba3f43e 1210
0a7de745
A
1211 state = (struct arm_vfp_state *) tstate;
1212 thread_state = neon_state32(thread->machine.uNeon);
1213 /* ARM64 TODO: combine fpsr and fpcr into state->fpscr */
5ba3f43e 1214
0a7de745 1215 bcopy(state, thread_state, (max + 1) * sizeof(uint32_t));
5ba3f43e 1216
0a7de745
A
1217 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
1218 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
1219 break;
1220 }
5ba3f43e
A
1221
1222 case ARM_NEON_STATE:{
1223 arm_neon_state_t *state;
1224 arm_neon_saved_state32_t *thread_state;
1225
0a7de745
A
1226 if (count != ARM_NEON_STATE_COUNT) {
1227 return KERN_INVALID_ARGUMENT;
1228 }
5ba3f43e 1229
0a7de745
A
1230 if (thread_is_64bit_data(thread)) {
1231 return KERN_INVALID_ARGUMENT;
1232 }
5ba3f43e
A
1233
1234 state = (arm_neon_state_t *)tstate;
1235 thread_state = neon_state32(thread->machine.uNeon);
1236
1237 assert(sizeof(*state) == sizeof(*thread_state));
1238 bcopy(state, thread_state, sizeof(arm_neon_state_t));
1239
1240 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
1241 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
1242 break;
0a7de745 1243 }
5ba3f43e
A
1244
1245 case ARM_NEON_STATE64:{
1246 arm_neon_state64_t *state;
1247 arm_neon_saved_state64_t *thread_state;
1248
0a7de745
A
1249 if (count != ARM_NEON_STATE64_COUNT) {
1250 return KERN_INVALID_ARGUMENT;
1251 }
5ba3f43e 1252
0a7de745
A
1253 if (!thread_is_64bit_data(thread)) {
1254 return KERN_INVALID_ARGUMENT;
1255 }
5ba3f43e
A
1256
1257 state = (arm_neon_state64_t *)tstate;
1258 thread_state = neon_state64(thread->machine.uNeon);
1259
1260 assert(sizeof(*state) == sizeof(*thread_state));
1261 bcopy(state, thread_state, sizeof(arm_neon_state64_t));
1262
1263 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
1264 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
1265 break;
0a7de745 1266 }
5ba3f43e 1267
cb323159 1268
5ba3f43e 1269 default:
0a7de745 1270 return KERN_INVALID_ARGUMENT;
5ba3f43e 1271 }
0a7de745 1272 return KERN_SUCCESS;
5ba3f43e
A
1273}
1274
cb323159
A
1275mach_vm_address_t
1276machine_thread_pc(thread_t thread)
1277{
1278 struct arm_saved_state *ss = get_user_regs(thread);
1279 return (mach_vm_address_t)get_saved_state_pc(ss);
1280}
1281
1282void
1283machine_thread_reset_pc(thread_t thread, mach_vm_address_t pc)
1284{
1285 set_saved_state_pc(get_user_regs(thread), (register_t)pc);
1286}
1287
5ba3f43e 1288/*
cb323159 1289 * Routine: machine_thread_state_initialize
5ba3f43e
A
1290 *
1291 */
1292kern_return_t
cb323159 1293machine_thread_state_initialize(thread_t thread)
5ba3f43e
A
1294{
1295 arm_context_t *context = thread->machine.contextData;
1296
0a7de745 1297 /*
5ba3f43e 1298 * Should always be set up later. For a kernel thread, we don't care
0a7de745 1299 * about this state. For a user thread, we'll set the state up in
5ba3f43e
A
1300 * setup_wqthread, bsdthread_create, load_main(), or load_unixthread().
1301 */
1302
1303 if (context != NULL) {
1304 bzero(&context->ss.uss, sizeof(context->ss.uss));
1305 bzero(&context->ns.uns, sizeof(context->ns.uns));
1306
1307 if (context->ns.nsh.flavor == ARM_NEON_SAVED_STATE64) {
1308 context->ns.ns_64.fpcr = FPCR_DEFAULT;
1309 } else {
1310 context->ns.ns_32.fpcr = FPCR_DEFAULT_32;
1311 }
1312 }
1313
1314 thread->machine.DebugData = NULL;
1315
cb323159
A
1316#if defined(HAS_APPLE_PAC)
1317 /* Sign the initial user-space thread state */
1318 if (thread->machine.upcb != NULL) {
1319 ml_sign_thread_state(thread->machine.upcb, 0, 0, 0, 0, 0);
1320 }
1321#endif /* defined(HAS_APPLE_PAC) */
d9a64523 1322
5ba3f43e
A
1323 return KERN_SUCCESS;
1324}
1325
1326/*
cb323159 1327 * Routine: machine_thread_dup
5ba3f43e
A
1328 *
1329 */
1330kern_return_t
cb323159
A
1331machine_thread_dup(thread_t self,
1332 thread_t target,
1333 __unused boolean_t is_corpse)
5ba3f43e
A
1334{
1335 struct arm_saved_state *self_saved_state;
1336 struct arm_saved_state *target_saved_state;
1337
1338 target->machine.cthread_self = self->machine.cthread_self;
5ba3f43e
A
1339
1340 self_saved_state = self->machine.upcb;
1341 target_saved_state = target->machine.upcb;
1342 bcopy(self_saved_state, target_saved_state, sizeof(struct arm_saved_state));
cb323159
A
1343#if defined(HAS_APPLE_PAC)
1344 if (!is_corpse && is_saved_state64(self_saved_state)) {
1345 check_and_sign_copied_thread_state(target_saved_state, self_saved_state);
1346 }
1347#endif /* defined(HAS_APPLE_PAC) */
5ba3f43e 1348
0a7de745 1349 return KERN_SUCCESS;
5ba3f43e
A
1350}
1351
1352/*
cb323159 1353 * Routine: get_user_regs
5ba3f43e
A
1354 *
1355 */
1356struct arm_saved_state *
cb323159 1357get_user_regs(thread_t thread)
5ba3f43e 1358{
0a7de745 1359 return thread->machine.upcb;
5ba3f43e
A
1360}
1361
1362arm_neon_saved_state_t *
cb323159 1363get_user_neon_regs(thread_t thread)
5ba3f43e 1364{
0a7de745 1365 return thread->machine.uNeon;
5ba3f43e
A
1366}
1367
1368/*
cb323159 1369 * Routine: find_user_regs
5ba3f43e
A
1370 *
1371 */
1372struct arm_saved_state *
cb323159 1373find_user_regs(thread_t thread)
5ba3f43e 1374{
0a7de745 1375 return thread->machine.upcb;
5ba3f43e
A
1376}
1377
1378/*
cb323159 1379 * Routine: find_kern_regs
5ba3f43e
A
1380 *
1381 */
1382struct arm_saved_state *
cb323159 1383find_kern_regs(thread_t thread)
5ba3f43e
A
1384{
1385 /*
0a7de745
A
1386 * This works only for an interrupted kernel thread
1387 */
1388 if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) {
1389 return (struct arm_saved_state *) NULL;
1390 } else {
1391 return getCpuDatap()->cpu_int_state;
1392 }
5ba3f43e
A
1393}
1394
1395arm_debug_state32_t *
cb323159 1396find_debug_state32(thread_t thread)
5ba3f43e 1397{
0a7de745 1398 if (thread && thread->machine.DebugData) {
5ba3f43e 1399 return &(thread->machine.DebugData->uds.ds32);
0a7de745 1400 } else {
5ba3f43e 1401 return NULL;
0a7de745 1402 }
5ba3f43e
A
1403}
1404
1405arm_debug_state64_t *
cb323159 1406find_debug_state64(thread_t thread)
5ba3f43e 1407{
0a7de745 1408 if (thread && thread->machine.DebugData) {
5ba3f43e 1409 return &(thread->machine.DebugData->uds.ds64);
0a7de745 1410 } else {
5ba3f43e 1411 return NULL;
0a7de745 1412 }
5ba3f43e
A
1413}
1414
1415/*
cb323159 1416 * Routine: thread_userstack
5ba3f43e
A
1417 *
1418 */
1419kern_return_t
cb323159
A
1420thread_userstack(__unused thread_t thread,
1421 int flavor,
1422 thread_state_t tstate,
1423 unsigned int count,
1424 mach_vm_offset_t * user_stack,
1425 int * customstack,
1426 boolean_t is_64bit_data
1427 )
5ba3f43e
A
1428{
1429 register_t sp;
1430
1431 switch (flavor) {
1432 case ARM_THREAD_STATE:
1433 if (count == ARM_UNIFIED_THREAD_STATE_COUNT) {
1434#if __arm64__
d9a64523 1435 if (is_64bit_data) {
5ba3f43e
A
1436 sp = ((arm_unified_thread_state_t *)tstate)->ts_64.sp;
1437 } else
1438#endif
1439 {
1440 sp = ((arm_unified_thread_state_t *)tstate)->ts_32.sp;
1441 }
1442
1443 break;
1444 }
1445
1446 /* INTENTIONAL FALL THROUGH (see machine_thread_set_state) */
1447 case ARM_THREAD_STATE32:
0a7de745
A
1448 if (count != ARM_THREAD_STATE32_COUNT) {
1449 return KERN_INVALID_ARGUMENT;
1450 }
1451 if (is_64bit_data) {
1452 return KERN_INVALID_ARGUMENT;
1453 }
5ba3f43e
A
1454
1455 sp = ((arm_thread_state32_t *)tstate)->sp;
1456 break;
1457#if __arm64__
1458 case ARM_THREAD_STATE64:
0a7de745
A
1459 if (count != ARM_THREAD_STATE64_COUNT) {
1460 return KERN_INVALID_ARGUMENT;
1461 }
1462 if (!is_64bit_data) {
1463 return KERN_INVALID_ARGUMENT;
1464 }
5ba3f43e
A
1465
1466 sp = ((arm_thread_state32_t *)tstate)->sp;
1467 break;
1468#endif
1469 default:
0a7de745 1470 return KERN_INVALID_ARGUMENT;
5ba3f43e
A
1471 }
1472
1473 if (sp) {
1474 *user_stack = CAST_USER_ADDR_T(sp);
0a7de745 1475 if (customstack) {
5ba3f43e 1476 *customstack = 1;
0a7de745 1477 }
5ba3f43e
A
1478 } else {
1479 *user_stack = CAST_USER_ADDR_T(USRSTACK64);
0a7de745 1480 if (customstack) {
5ba3f43e 1481 *customstack = 0;
0a7de745 1482 }
5ba3f43e
A
1483 }
1484
0a7de745 1485 return KERN_SUCCESS;
5ba3f43e
A
1486}
1487
1488/*
1489 * thread_userstackdefault:
1490 *
1491 * Return the default stack location for the
1492 * thread, if otherwise unknown.
1493 */
1494kern_return_t
cb323159
A
1495thread_userstackdefault(mach_vm_offset_t * default_user_stack,
1496 boolean_t is64bit)
5ba3f43e
A
1497{
1498 if (is64bit) {
1499 *default_user_stack = USRSTACK64;
1500 } else {
1501 *default_user_stack = USRSTACK;
1502 }
1503
0a7de745 1504 return KERN_SUCCESS;
5ba3f43e
A
1505}
1506
1507/*
cb323159 1508 * Routine: thread_setuserstack
5ba3f43e
A
1509 *
1510 */
1511void
cb323159
A
1512thread_setuserstack(thread_t thread,
1513 mach_vm_address_t user_stack)
5ba3f43e
A
1514{
1515 struct arm_saved_state *sv;
1516
1517 sv = get_user_regs(thread);
1518
1519 set_saved_state_sp(sv, user_stack);
1520
1521 return;
1522}
1523
1524/*
cb323159 1525 * Routine: thread_adjuserstack
5ba3f43e
A
1526 *
1527 */
1528uint64_t
cb323159
A
1529thread_adjuserstack(thread_t thread,
1530 int adjust)
5ba3f43e
A
1531{
1532 struct arm_saved_state *sv;
1533 uint64_t sp;
1534
1535 sv = get_user_regs(thread);
1536
1537 sp = get_saved_state_sp(sv);
1538 sp += adjust;
1539 set_saved_state_sp(sv, sp);;
1540
1541 return sp;
1542}
1543
1544/*
cb323159 1545 * Routine: thread_setentrypoint
5ba3f43e
A
1546 *
1547 */
1548void
cb323159
A
1549thread_setentrypoint(thread_t thread,
1550 mach_vm_offset_t entry)
5ba3f43e
A
1551{
1552 struct arm_saved_state *sv;
1553
1554 sv = get_user_regs(thread);
1555
1556 set_saved_state_pc(sv, entry);
1557
1558 return;
1559}
1560
1561/*
cb323159 1562 * Routine: thread_entrypoint
5ba3f43e
A
1563 *
1564 */
1565kern_return_t
cb323159
A
1566thread_entrypoint(__unused thread_t thread,
1567 int flavor,
1568 thread_state_t tstate,
1569 unsigned int count __unused,
1570 mach_vm_offset_t * entry_point
1571 )
5ba3f43e
A
1572{
1573 switch (flavor) {
1574 case ARM_THREAD_STATE:
0a7de745
A
1575 {
1576 struct arm_thread_state *state;
5ba3f43e 1577
0a7de745 1578 state = (struct arm_thread_state *) tstate;
5ba3f43e 1579
0a7de745
A
1580 /*
1581 * If a valid entry point is specified, use it.
1582 */
1583 if (state->pc) {
1584 *entry_point = CAST_USER_ADDR_T(state->pc);
1585 } else {
1586 *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
5ba3f43e 1587 }
0a7de745
A
1588 }
1589 break;
5ba3f43e
A
1590
1591 case ARM_THREAD_STATE64:
0a7de745
A
1592 {
1593 struct arm_thread_state64 *state;
5ba3f43e 1594
0a7de745 1595 state = (struct arm_thread_state64*) tstate;
5ba3f43e 1596
0a7de745
A
1597 /*
1598 * If a valid entry point is specified, use it.
1599 */
1600 if (state->pc) {
1601 *entry_point = CAST_USER_ADDR_T(state->pc);
1602 } else {
1603 *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
5ba3f43e 1604 }
0a7de745
A
1605
1606 break;
1607 }
5ba3f43e 1608 default:
0a7de745 1609 return KERN_INVALID_ARGUMENT;
5ba3f43e
A
1610 }
1611
0a7de745 1612 return KERN_SUCCESS;
5ba3f43e
A
1613}
1614
1615
1616/*
cb323159 1617 * Routine: thread_set_child
5ba3f43e
A
1618 *
1619 */
1620void
cb323159
A
1621thread_set_child(thread_t child,
1622 int pid)
5ba3f43e
A
1623{
1624 struct arm_saved_state *child_state;
1625
1626 child_state = get_user_regs(child);
1627
1628 set_saved_state_reg(child_state, 0, pid);
1629 set_saved_state_reg(child_state, 1, 1ULL);
1630}
1631
1632
1633/*
cb323159 1634 * Routine: thread_set_parent
5ba3f43e
A
1635 *
1636 */
1637void
cb323159
A
1638thread_set_parent(thread_t parent,
1639 int pid)
5ba3f43e
A
1640{
1641 struct arm_saved_state *parent_state;
1642
1643 parent_state = get_user_regs(parent);
1644
1645 set_saved_state_reg(parent_state, 0, pid);
1646 set_saved_state_reg(parent_state, 1, 0);
1647}
1648
1649
1650struct arm_act_context {
1651 struct arm_unified_thread_state ss;
1652#if __ARM_VFP__
1653 struct arm_neon_saved_state ns;
1654#endif
1655};
1656
1657/*
cb323159 1658 * Routine: act_thread_csave
5ba3f43e
A
1659 *
1660 */
cb323159 1661void *
5ba3f43e
A
1662act_thread_csave(void)
1663{
1664 struct arm_act_context *ic;
1665 kern_return_t kret;
1666 unsigned int val;
1667 thread_t thread = current_thread();
1668
1669 ic = (struct arm_act_context *) kalloc(sizeof(struct arm_act_context));
0a7de745
A
1670 if (ic == (struct arm_act_context *) NULL) {
1671 return (void *) 0;
1672 }
5ba3f43e
A
1673
1674 val = ARM_UNIFIED_THREAD_STATE_COUNT;
1675 kret = machine_thread_get_state(thread, ARM_THREAD_STATE, (thread_state_t)&ic->ss, &val);
1676 if (kret != KERN_SUCCESS) {
1677 kfree(ic, sizeof(struct arm_act_context));
0a7de745 1678 return (void *) 0;
5ba3f43e
A
1679 }
1680
1681#if __ARM_VFP__
d9a64523 1682 if (thread_is_64bit_data(thread)) {
5ba3f43e
A
1683 val = ARM_NEON_STATE64_COUNT;
1684 kret = machine_thread_get_state(thread,
0a7de745 1685 ARM_NEON_STATE64,
cb323159 1686 (thread_state_t)&ic->ns,
0a7de745 1687 &val);
5ba3f43e
A
1688 } else {
1689 val = ARM_NEON_STATE_COUNT;
1690 kret = machine_thread_get_state(thread,
0a7de745 1691 ARM_NEON_STATE,
cb323159 1692 (thread_state_t)&ic->ns,
0a7de745 1693 &val);
5ba3f43e
A
1694 }
1695 if (kret != KERN_SUCCESS) {
1696 kfree(ic, sizeof(struct arm_act_context));
0a7de745 1697 return (void *) 0;
5ba3f43e
A
1698 }
1699#endif
0a7de745 1700 return ic;
5ba3f43e
A
1701}
1702
1703/*
cb323159 1704 * Routine: act_thread_catt
5ba3f43e
A
1705 *
1706 */
1707void
cb323159 1708act_thread_catt(void * ctx)
5ba3f43e
A
1709{
1710 struct arm_act_context *ic;
1711 kern_return_t kret;
1712 thread_t thread = current_thread();
1713
1714 ic = (struct arm_act_context *) ctx;
0a7de745 1715 if (ic == (struct arm_act_context *) NULL) {
5ba3f43e 1716 return;
0a7de745 1717 }
5ba3f43e
A
1718
1719 kret = machine_thread_set_state(thread, ARM_THREAD_STATE, (thread_state_t)&ic->ss, ARM_UNIFIED_THREAD_STATE_COUNT);
0a7de745 1720 if (kret != KERN_SUCCESS) {
5ba3f43e 1721 goto out;
0a7de745 1722 }
5ba3f43e
A
1723
1724#if __ARM_VFP__
d9a64523 1725 if (thread_is_64bit_data(thread)) {
5ba3f43e 1726 kret = machine_thread_set_state(thread,
0a7de745 1727 ARM_NEON_STATE64,
cb323159 1728 (thread_state_t)&ic->ns,
0a7de745 1729 ARM_NEON_STATE64_COUNT);
5ba3f43e
A
1730 } else {
1731 kret = machine_thread_set_state(thread,
0a7de745 1732 ARM_NEON_STATE,
cb323159 1733 (thread_state_t)&ic->ns,
0a7de745 1734 ARM_NEON_STATE_COUNT);
5ba3f43e 1735 }
0a7de745 1736 if (kret != KERN_SUCCESS) {
5ba3f43e 1737 goto out;
0a7de745 1738 }
5ba3f43e
A
1739#endif
1740out:
1741 kfree(ic, sizeof(struct arm_act_context));
1742}
1743
1744/*
cb323159 1745 * Routine: act_thread_catt
5ba3f43e
A
1746 *
1747 */
0a7de745 1748void
5ba3f43e
A
1749act_thread_cfree(void *ctx)
1750{
1751 kfree(ctx, sizeof(struct arm_act_context));
1752}
1753
1754kern_return_t
cb323159
A
1755thread_set_wq_state32(thread_t thread,
1756 thread_state_t tstate)
5ba3f43e
A
1757{
1758 arm_thread_state_t *state;
1759 struct arm_saved_state *saved_state;
1760 struct arm_saved_state32 *saved_state_32;
1761 thread_t curth = current_thread();
0a7de745 1762 spl_t s = 0;
5ba3f43e 1763
d9a64523 1764 assert(!thread_is_64bit_data(thread));
5ba3f43e
A
1765
1766 saved_state = thread->machine.upcb;
1767 saved_state_32 = saved_state32(saved_state);
1768
1769 state = (arm_thread_state_t *)tstate;
1770
1771 if (curth != thread) {
1772 s = splsched();
1773 thread_lock(thread);
1774 }
1775
1776 /*
1777 * do not zero saved_state, it can be concurrently accessed
1778 * and zero is not a valid state for some of the registers,
1779 * like sp.
1780 */
1781 thread_state32_to_saved_state(state, saved_state);
1782 saved_state_32->cpsr = PSR64_USER32_DEFAULT;
1783
1784 if (curth != thread) {
1785 thread_unlock(thread);
1786 splx(s);
1787 }
1788
1789 return KERN_SUCCESS;
1790}
1791
1792kern_return_t
cb323159
A
1793thread_set_wq_state64(thread_t thread,
1794 thread_state_t tstate)
5ba3f43e
A
1795{
1796 arm_thread_state64_t *state;
1797 struct arm_saved_state *saved_state;
1798 struct arm_saved_state64 *saved_state_64;
1799 thread_t curth = current_thread();
0a7de745 1800 spl_t s = 0;
5ba3f43e 1801
d9a64523 1802 assert(thread_is_64bit_data(thread));
5ba3f43e
A
1803
1804 saved_state = thread->machine.upcb;
1805 saved_state_64 = saved_state64(saved_state);
1806 state = (arm_thread_state64_t *)tstate;
1807
1808 if (curth != thread) {
1809 s = splsched();
1810 thread_lock(thread);
1811 }
1812
1813 /*
1814 * do not zero saved_state, it can be concurrently accessed
1815 * and zero is not a valid state for some of the registers,
1816 * like sp.
1817 */
1818 thread_state64_to_saved_state(state, saved_state);
d9a64523 1819 set_saved_state_cpsr(saved_state, PSR64_USER64_DEFAULT);
5ba3f43e
A
1820
1821 if (curth != thread) {
1822 thread_unlock(thread);
1823 splx(s);
1824 }
1825
1826 return KERN_SUCCESS;
1827}