]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/status.c
xnu-6153.141.1.tar.gz
[apple/xnu.git] / osfmk / arm64 / status.c
1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <debug.h>
29 #include <mach/mach_types.h>
30 #include <mach/kern_return.h>
31 #include <mach/thread_status.h>
32 #include <kern/thread.h>
33 #include <kern/kalloc.h>
34 #include <arm/vmparam.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm64/proc_reg.h>
37 #if __has_feature(ptrauth_calls)
38 #include <ptrauth.h>
39 #endif
40
41 struct arm_vfpv2_state {
42 __uint32_t __r[32];
43 __uint32_t __fpscr;
44 };
45
46 typedef struct arm_vfpv2_state arm_vfpv2_state_t;
47
48 #define ARM_VFPV2_STATE_COUNT \
49 ((mach_msg_type_number_t)(sizeof (arm_vfpv2_state_t)/sizeof(uint32_t)))
50
51 /*
52 * Forward definitions
53 */
54 void thread_set_child(thread_t child, int pid);
55 void thread_set_parent(thread_t parent, int pid);
56
57 /*
58 * Maps state flavor to number of words in the state:
59 */
60 /* __private_extern__ */
61 unsigned int _MachineStateCount[] = {
62 [ARM_UNIFIED_THREAD_STATE] = ARM_UNIFIED_THREAD_STATE_COUNT,
63 [ARM_VFP_STATE] = ARM_VFP_STATE_COUNT,
64 [ARM_EXCEPTION_STATE] = ARM_EXCEPTION_STATE_COUNT,
65 [ARM_DEBUG_STATE] = ARM_DEBUG_STATE_COUNT,
66 [ARM_THREAD_STATE64] = ARM_THREAD_STATE64_COUNT,
67 [ARM_EXCEPTION_STATE64] = ARM_EXCEPTION_STATE64_COUNT,
68 [ARM_THREAD_STATE32] = ARM_THREAD_STATE32_COUNT,
69 [ARM_DEBUG_STATE32] = ARM_DEBUG_STATE32_COUNT,
70 [ARM_DEBUG_STATE64] = ARM_DEBUG_STATE64_COUNT,
71 [ARM_NEON_STATE] = ARM_NEON_STATE_COUNT,
72 [ARM_NEON_STATE64] = ARM_NEON_STATE64_COUNT,
73 [ARM_PAGEIN_STATE] = ARM_PAGEIN_STATE_COUNT,
74 };
75
76 extern zone_t ads_zone;
77
78 #if __arm64__
79 /*
80 * Copy values from saved_state to ts64.
81 */
82 void
83 saved_state_to_thread_state64(const arm_saved_state_t * saved_state,
84 arm_thread_state64_t * ts64)
85 {
86 uint32_t i;
87
88 assert(is_saved_state64(saved_state));
89
90 ts64->fp = get_saved_state_fp(saved_state);
91 ts64->lr = get_saved_state_lr(saved_state);
92 ts64->sp = get_saved_state_sp(saved_state);
93 ts64->pc = get_saved_state_pc(saved_state);
94 ts64->cpsr = get_saved_state_cpsr(saved_state);
95 for (i = 0; i < 29; i++) {
96 ts64->x[i] = get_saved_state_reg(saved_state, i);
97 }
98 }
99
100 /*
101 * Copy values from ts64 to saved_state
102 */
103 void
104 thread_state64_to_saved_state(const arm_thread_state64_t * ts64,
105 arm_saved_state_t * saved_state)
106 {
107 uint32_t i;
108 #if __has_feature(ptrauth_calls)
109 boolean_t intr = ml_set_interrupts_enabled(FALSE);
110 #endif /* __has_feature(ptrauth_calls) */
111
112 assert(is_saved_state64(saved_state));
113
114 set_saved_state_cpsr(saved_state, (ts64->cpsr & ~PSR64_MODE_MASK) | PSR64_MODE_RW_64);
115 #if __has_feature(ptrauth_calls)
116 /*
117 * Make writes to ts64->cpsr visible first, since it's useful as a
118 * canary to detect thread-state corruption.
119 */
120 __builtin_arm_dmb(DMB_ST);
121 #endif
122 set_saved_state_fp(saved_state, ts64->fp);
123 set_saved_state_lr(saved_state, ts64->lr);
124 set_saved_state_sp(saved_state, ts64->sp);
125 set_saved_state_pc(saved_state, ts64->pc);
126 for (i = 0; i < 29; i++) {
127 set_saved_state_reg(saved_state, i, ts64->x[i]);
128 }
129
130 #if __has_feature(ptrauth_calls)
131 ml_set_interrupts_enabled(intr);
132 #endif /* __has_feature(ptrauth_calls) */
133 }
134
135 #endif /* __arm64__ */
136
137 static kern_return_t
138 handle_get_arm32_thread_state(thread_state_t tstate,
139 mach_msg_type_number_t * count,
140 const arm_saved_state_t * saved_state)
141 {
142 if (*count < ARM_THREAD_STATE32_COUNT) {
143 return KERN_INVALID_ARGUMENT;
144 }
145 if (!is_saved_state32(saved_state)) {
146 return KERN_INVALID_ARGUMENT;
147 }
148
149 (void)saved_state_to_thread_state32(saved_state, (arm_thread_state32_t *)tstate);
150 *count = ARM_THREAD_STATE32_COUNT;
151 return KERN_SUCCESS;
152 }
153
154 static kern_return_t
155 handle_get_arm64_thread_state(thread_state_t tstate,
156 mach_msg_type_number_t * count,
157 const arm_saved_state_t * saved_state)
158 {
159 if (*count < ARM_THREAD_STATE64_COUNT) {
160 return KERN_INVALID_ARGUMENT;
161 }
162 if (!is_saved_state64(saved_state)) {
163 return KERN_INVALID_ARGUMENT;
164 }
165
166 (void)saved_state_to_thread_state64(saved_state, (arm_thread_state64_t *)tstate);
167 *count = ARM_THREAD_STATE64_COUNT;
168 return KERN_SUCCESS;
169 }
170
171
172 static kern_return_t
173 handle_get_arm_thread_state(thread_state_t tstate,
174 mach_msg_type_number_t * count,
175 const arm_saved_state_t * saved_state)
176 {
177 /* In an arm64 world, this flavor can be used to retrieve the thread
178 * state of a 32-bit or 64-bit thread into a unified structure, but we
179 * need to support legacy clients who are only aware of 32-bit, so
180 * check the count to see what the client is expecting.
181 */
182 if (*count < ARM_UNIFIED_THREAD_STATE_COUNT) {
183 return handle_get_arm32_thread_state(tstate, count, saved_state);
184 }
185
186 arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *) tstate;
187 bzero(unified_state, sizeof(*unified_state));
188 #if __arm64__
189 if (is_saved_state64(saved_state)) {
190 unified_state->ash.flavor = ARM_THREAD_STATE64;
191 unified_state->ash.count = ARM_THREAD_STATE64_COUNT;
192 (void)saved_state_to_thread_state64(saved_state, thread_state64(unified_state));
193 } else
194 #endif
195 {
196 unified_state->ash.flavor = ARM_THREAD_STATE32;
197 unified_state->ash.count = ARM_THREAD_STATE32_COUNT;
198 (void)saved_state_to_thread_state32(saved_state, thread_state32(unified_state));
199 }
200 *count = ARM_UNIFIED_THREAD_STATE_COUNT;
201 return KERN_SUCCESS;
202 }
203
204
205 static kern_return_t
206 handle_set_arm32_thread_state(const thread_state_t tstate,
207 mach_msg_type_number_t count,
208 arm_saved_state_t * saved_state)
209 {
210 if (count != ARM_THREAD_STATE32_COUNT) {
211 return KERN_INVALID_ARGUMENT;
212 }
213
214 (void)thread_state32_to_saved_state((const arm_thread_state32_t *)tstate, saved_state);
215 return KERN_SUCCESS;
216 }
217
218 static kern_return_t
219 handle_set_arm64_thread_state(const thread_state_t tstate,
220 mach_msg_type_number_t count,
221 arm_saved_state_t * saved_state)
222 {
223 if (count != ARM_THREAD_STATE64_COUNT) {
224 return KERN_INVALID_ARGUMENT;
225 }
226
227 (void)thread_state64_to_saved_state((const arm_thread_state64_t *)tstate, saved_state);
228 return KERN_SUCCESS;
229 }
230
231
232 static kern_return_t
233 handle_set_arm_thread_state(const thread_state_t tstate,
234 mach_msg_type_number_t count,
235 arm_saved_state_t * saved_state)
236 {
237 /* In an arm64 world, this flavor can be used to set the thread state of a
238 * 32-bit or 64-bit thread from a unified structure, but we need to support
239 * legacy clients who are only aware of 32-bit, so check the count to see
240 * what the client is expecting.
241 */
242 if (count < ARM_UNIFIED_THREAD_STATE_COUNT) {
243 if (!is_saved_state32(saved_state)) {
244 return KERN_INVALID_ARGUMENT;
245 }
246 return handle_set_arm32_thread_state(tstate, count, saved_state);
247 }
248
249 const arm_unified_thread_state_t *unified_state = (const arm_unified_thread_state_t *) tstate;
250 #if __arm64__
251 if (is_thread_state64(unified_state)) {
252 if (!is_saved_state64(saved_state)) {
253 return KERN_INVALID_ARGUMENT;
254 }
255 (void)thread_state64_to_saved_state(const_thread_state64(unified_state), saved_state);
256 } else
257 #endif
258 {
259 if (!is_saved_state32(saved_state)) {
260 return KERN_INVALID_ARGUMENT;
261 }
262 (void)thread_state32_to_saved_state(const_thread_state32(unified_state), saved_state);
263 }
264
265 return KERN_SUCCESS;
266 }
267
268
269 /*
270 * Translate thread state arguments to userspace representation
271 */
272
273 kern_return_t
274 machine_thread_state_convert_to_user(
275 thread_t thread,
276 thread_flavor_t flavor,
277 thread_state_t tstate,
278 mach_msg_type_number_t *count)
279 {
280 #if __has_feature(ptrauth_calls)
281 arm_thread_state64_t *ts64;
282
283 switch (flavor) {
284 case ARM_THREAD_STATE:
285 {
286 arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *)tstate;
287
288 if (*count < ARM_UNIFIED_THREAD_STATE_COUNT || !is_thread_state64(unified_state)) {
289 return KERN_SUCCESS;
290 }
291 ts64 = thread_state64(unified_state);
292 break;
293 }
294 case ARM_THREAD_STATE64:
295 {
296 if (*count < ARM_THREAD_STATE64_COUNT) {
297 return KERN_SUCCESS;
298 }
299 ts64 = (arm_thread_state64_t *)tstate;
300 break;
301 }
302 default:
303 return KERN_SUCCESS;
304 }
305
306 // Note that kernel threads never have disable_user_jop set
307 if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread()) ||
308 thread->machine.disable_user_jop || !thread_is_64bit_addr(thread) ||
309 (BootArgs->bootFlags & kBootFlagsDisableUserThreadStateJOP)) {
310 ts64->flags = __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH;
311 return KERN_SUCCESS;
312 }
313
314 ts64->flags = 0;
315 if (ts64->lr) {
316 // lr might contain an IB-signed return address (strip is a no-op on unsigned addresses)
317 uintptr_t stripped_lr = (uintptr_t)ptrauth_strip((void *)ts64->lr,
318 ptrauth_key_return_address);
319 if (ts64->lr != stripped_lr) {
320 // Need to allow already-signed lr value to round-trip as is
321 ts64->flags |= __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR;
322 }
323 // Note that an IB-signed return address that happens to have a 0 signature value
324 // will round-trip correctly even if IA-signed again below (and IA-authd later)
325 }
326
327 if (BootArgs->bootFlags & kBootFlagsDisableUserJOP) {
328 return KERN_SUCCESS;
329 }
330
331 if (ts64->pc) {
332 ts64->pc = (uintptr_t)pmap_sign_user_ptr((void*)ts64->pc,
333 ptrauth_key_process_independent_code, ptrauth_string_discriminator("pc"));
334 }
335 if (ts64->lr && !(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR)) {
336 ts64->lr = (uintptr_t)pmap_sign_user_ptr((void*)ts64->lr,
337 ptrauth_key_process_independent_code, ptrauth_string_discriminator("lr"));
338 }
339 if (ts64->sp) {
340 ts64->sp = (uintptr_t)pmap_sign_user_ptr((void*)ts64->sp,
341 ptrauth_key_process_independent_data, ptrauth_string_discriminator("sp"));
342 }
343 if (ts64->fp) {
344 ts64->fp = (uintptr_t)pmap_sign_user_ptr((void*)ts64->fp,
345 ptrauth_key_process_independent_data, ptrauth_string_discriminator("fp"));
346 }
347
348 return KERN_SUCCESS;
349 #else
350 // No conversion to userspace representation on this platform
351 (void)thread; (void)flavor; (void)tstate; (void)count;
352 return KERN_SUCCESS;
353 #endif /* __has_feature(ptrauth_calls) */
354 }
355
356 /*
357 * Translate thread state arguments from userspace representation
358 */
359
360 kern_return_t
361 machine_thread_state_convert_from_user(
362 thread_t thread,
363 thread_flavor_t flavor,
364 thread_state_t tstate,
365 mach_msg_type_number_t count)
366 {
367 #if __has_feature(ptrauth_calls)
368 arm_thread_state64_t *ts64;
369
370 switch (flavor) {
371 case ARM_THREAD_STATE:
372 {
373 arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *)tstate;
374
375 if (count < ARM_UNIFIED_THREAD_STATE_COUNT || !is_thread_state64(unified_state)) {
376 return KERN_SUCCESS;
377 }
378 ts64 = thread_state64(unified_state);
379 break;
380 }
381 case ARM_THREAD_STATE64:
382 {
383 if (count != ARM_THREAD_STATE64_COUNT) {
384 return KERN_SUCCESS;
385 }
386 ts64 = (arm_thread_state64_t *)tstate;
387 break;
388 }
389 default:
390 return KERN_SUCCESS;
391 }
392
393 // Note that kernel threads never have disable_user_jop set
394 if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread())) {
395 if (thread->machine.disable_user_jop || !thread_is_64bit_addr(thread)) {
396 ts64->flags = __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH;
397 return KERN_SUCCESS;
398 }
399 // A JOP-disabled process must not set thread state on a JOP-enabled process
400 return KERN_PROTECTION_FAILURE;
401 }
402
403 if (ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH) {
404 if (thread->machine.disable_user_jop || !thread_is_64bit_addr(thread) ||
405 (BootArgs->bootFlags & kBootFlagsDisableUserThreadStateJOP)) {
406 return KERN_SUCCESS;
407 }
408 // Disallow setting unsigned thread state on JOP-enabled processes.
409 // Ignore flag and treat thread state arguments as signed, ptrauth
410 // poisoning will cause resulting thread state to be invalid
411 ts64->flags &= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH;
412 }
413
414 if (ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR) {
415 // lr might contain an IB-signed return address (strip is a no-op on unsigned addresses)
416 uintptr_t stripped_lr = (uintptr_t)ptrauth_strip((void *)ts64->lr,
417 ptrauth_key_return_address);
418 if (ts64->lr == stripped_lr) {
419 // Don't allow unsigned pointer to be passed through as is. Ignore flag and
420 // treat as IA-signed below (where auth failure may poison the value).
421 ts64->flags &= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR;
422 }
423 // Note that an IB-signed return address that happens to have a 0 signature value
424 // will also have been IA-signed (without this flag being set) and so will IA-auth
425 // correctly below.
426 }
427
428 if (BootArgs->bootFlags & kBootFlagsDisableUserJOP) {
429 return KERN_SUCCESS;
430 }
431
432 if (ts64->pc) {
433 ts64->pc = (uintptr_t)pmap_auth_user_ptr((void*)ts64->pc,
434 ptrauth_key_process_independent_code, ptrauth_string_discriminator("pc"));
435 }
436 if (ts64->lr && !(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR)) {
437 ts64->lr = (uintptr_t)pmap_auth_user_ptr((void*)ts64->lr,
438 ptrauth_key_process_independent_code, ptrauth_string_discriminator("lr"));
439 }
440 if (ts64->sp) {
441 ts64->sp = (uintptr_t)pmap_auth_user_ptr((void*)ts64->sp,
442 ptrauth_key_process_independent_data, ptrauth_string_discriminator("sp"));
443 }
444 if (ts64->fp) {
445 ts64->fp = (uintptr_t)pmap_auth_user_ptr((void*)ts64->fp,
446 ptrauth_key_process_independent_data, ptrauth_string_discriminator("fp"));
447 }
448
449 return KERN_SUCCESS;
450 #else
451 // No conversion from userspace representation on this platform
452 (void)thread; (void)flavor; (void)tstate; (void)count;
453 return KERN_SUCCESS;
454 #endif /* __has_feature(ptrauth_calls) */
455 }
456
457 /*
458 * Translate signal context data pointer to userspace representation
459 */
460
461 kern_return_t
462 machine_thread_siguctx_pointer_convert_to_user(
463 __assert_only thread_t thread,
464 user_addr_t *uctxp)
465 {
466 #if __has_feature(ptrauth_calls)
467 if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread())) {
468 assert(thread->machine.disable_user_jop || !thread_is_64bit_addr(thread));
469 return KERN_SUCCESS;
470 }
471
472 if (BootArgs->bootFlags & kBootFlagsDisableUserJOP) {
473 return KERN_SUCCESS;
474 }
475
476 if (*uctxp) {
477 *uctxp = (uintptr_t)pmap_sign_user_ptr((void*)*uctxp,
478 ptrauth_key_process_independent_data, ptrauth_string_discriminator("uctx"));
479 }
480
481 return KERN_SUCCESS;
482 #else
483 // No conversion to userspace representation on this platform
484 (void)thread; (void)uctxp;
485 return KERN_SUCCESS;
486 #endif /* __has_feature(ptrauth_calls) */
487 }
488
489 /*
490 * Translate array of function pointer syscall arguments from userspace representation
491 */
492
493 kern_return_t
494 machine_thread_function_pointers_convert_from_user(
495 __assert_only thread_t thread,
496 user_addr_t *fptrs,
497 uint32_t count)
498 {
499 #if __has_feature(ptrauth_calls)
500 if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread())) {
501 assert(thread->machine.disable_user_jop || !thread_is_64bit_addr(thread));
502 return KERN_SUCCESS;
503 }
504
505 if (BootArgs->bootFlags & kBootFlagsDisableUserJOP) {
506 return KERN_SUCCESS;
507 }
508
509 while (count--) {
510 if (*fptrs) {
511 *fptrs = (uintptr_t)pmap_auth_user_ptr((void*)*fptrs,
512 ptrauth_key_function_pointer, 0);
513 }
514 fptrs++;
515 }
516
517 return KERN_SUCCESS;
518 #else
519 // No conversion from userspace representation on this platform
520 (void)thread; (void)fptrs; (void)count;
521 return KERN_SUCCESS;
522 #endif /* __has_feature(ptrauth_calls) */
523 }
524
525 /*
526 * Routine: machine_thread_get_state
527 *
528 */
529 kern_return_t
530 machine_thread_get_state(thread_t thread,
531 thread_flavor_t flavor,
532 thread_state_t tstate,
533 mach_msg_type_number_t * count)
534 {
535 switch (flavor) {
536 case THREAD_STATE_FLAVOR_LIST:
537 if (*count < 4) {
538 return KERN_INVALID_ARGUMENT;
539 }
540
541 tstate[0] = ARM_THREAD_STATE;
542 tstate[1] = ARM_VFP_STATE;
543 tstate[2] = ARM_EXCEPTION_STATE;
544 tstate[3] = ARM_DEBUG_STATE;
545 *count = 4;
546 break;
547
548 case THREAD_STATE_FLAVOR_LIST_NEW:
549 if (*count < 4) {
550 return KERN_INVALID_ARGUMENT;
551 }
552
553 tstate[0] = ARM_THREAD_STATE;
554 tstate[1] = ARM_VFP_STATE;
555 tstate[2] = thread_is_64bit_data(thread) ? ARM_EXCEPTION_STATE64 : ARM_EXCEPTION_STATE;
556 tstate[3] = thread_is_64bit_data(thread) ? ARM_DEBUG_STATE64 : ARM_DEBUG_STATE32;
557 *count = 4;
558 break;
559
560 case THREAD_STATE_FLAVOR_LIST_10_15:
561 if (*count < 5) {
562 return KERN_INVALID_ARGUMENT;
563 }
564
565 tstate[0] = ARM_THREAD_STATE;
566 tstate[1] = ARM_VFP_STATE;
567 tstate[2] = thread_is_64bit_data(thread) ? ARM_EXCEPTION_STATE64 : ARM_EXCEPTION_STATE;
568 tstate[3] = thread_is_64bit_data(thread) ? ARM_DEBUG_STATE64 : ARM_DEBUG_STATE32;
569 tstate[4] = ARM_PAGEIN_STATE;
570 *count = 5;
571 break;
572
573 case ARM_THREAD_STATE:
574 {
575 kern_return_t rn = handle_get_arm_thread_state(tstate, count, thread->machine.upcb);
576 if (rn) {
577 return rn;
578 }
579 break;
580 }
581 case ARM_THREAD_STATE32:
582 {
583 if (thread_is_64bit_data(thread)) {
584 return KERN_INVALID_ARGUMENT;
585 }
586
587 kern_return_t rn = handle_get_arm32_thread_state(tstate, count, thread->machine.upcb);
588 if (rn) {
589 return rn;
590 }
591 break;
592 }
593 #if __arm64__
594 case ARM_THREAD_STATE64:
595 {
596 if (!thread_is_64bit_data(thread)) {
597 return KERN_INVALID_ARGUMENT;
598 }
599
600 kern_return_t rn = handle_get_arm64_thread_state(tstate, count, thread->machine.upcb);
601 if (rn) {
602 return rn;
603 }
604 break;
605 }
606 #endif
607 case ARM_EXCEPTION_STATE:{
608 struct arm_exception_state *state;
609 struct arm_saved_state32 *saved_state;
610
611 if (*count < ARM_EXCEPTION_STATE_COUNT) {
612 return KERN_INVALID_ARGUMENT;
613 }
614 if (thread_is_64bit_data(thread)) {
615 return KERN_INVALID_ARGUMENT;
616 }
617
618 state = (struct arm_exception_state *) tstate;
619 saved_state = saved_state32(thread->machine.upcb);
620
621 state->exception = saved_state->exception;
622 state->fsr = saved_state->esr;
623 state->far = saved_state->far;
624
625 *count = ARM_EXCEPTION_STATE_COUNT;
626 break;
627 }
628 case ARM_EXCEPTION_STATE64:{
629 struct arm_exception_state64 *state;
630 struct arm_saved_state64 *saved_state;
631
632 if (*count < ARM_EXCEPTION_STATE64_COUNT) {
633 return KERN_INVALID_ARGUMENT;
634 }
635 if (!thread_is_64bit_data(thread)) {
636 return KERN_INVALID_ARGUMENT;
637 }
638
639 state = (struct arm_exception_state64 *) tstate;
640 saved_state = saved_state64(thread->machine.upcb);
641
642 state->exception = saved_state->exception;
643 state->far = saved_state->far;
644 state->esr = saved_state->esr;
645
646 *count = ARM_EXCEPTION_STATE64_COUNT;
647 break;
648 }
649 case ARM_DEBUG_STATE:{
650 arm_legacy_debug_state_t *state;
651 arm_debug_state32_t *thread_state;
652
653 if (*count < ARM_LEGACY_DEBUG_STATE_COUNT) {
654 return KERN_INVALID_ARGUMENT;
655 }
656
657 if (thread_is_64bit_data(thread)) {
658 return KERN_INVALID_ARGUMENT;
659 }
660
661 state = (arm_legacy_debug_state_t *) tstate;
662 thread_state = find_debug_state32(thread);
663
664 if (thread_state == NULL) {
665 bzero(state, sizeof(arm_legacy_debug_state_t));
666 } else {
667 bcopy(thread_state, state, sizeof(arm_legacy_debug_state_t));
668 }
669
670 *count = ARM_LEGACY_DEBUG_STATE_COUNT;
671 break;
672 }
673 case ARM_DEBUG_STATE32:{
674 arm_debug_state32_t *state;
675 arm_debug_state32_t *thread_state;
676
677 if (*count < ARM_DEBUG_STATE32_COUNT) {
678 return KERN_INVALID_ARGUMENT;
679 }
680
681 if (thread_is_64bit_data(thread)) {
682 return KERN_INVALID_ARGUMENT;
683 }
684
685 state = (arm_debug_state32_t *) tstate;
686 thread_state = find_debug_state32(thread);
687
688 if (thread_state == NULL) {
689 bzero(state, sizeof(arm_debug_state32_t));
690 } else {
691 bcopy(thread_state, state, sizeof(arm_debug_state32_t));
692 }
693
694 *count = ARM_DEBUG_STATE32_COUNT;
695 break;
696 }
697
698 case ARM_DEBUG_STATE64:{
699 arm_debug_state64_t *state;
700 arm_debug_state64_t *thread_state;
701
702 if (*count < ARM_DEBUG_STATE64_COUNT) {
703 return KERN_INVALID_ARGUMENT;
704 }
705
706 if (!thread_is_64bit_data(thread)) {
707 return KERN_INVALID_ARGUMENT;
708 }
709
710 state = (arm_debug_state64_t *) tstate;
711 thread_state = find_debug_state64(thread);
712
713 if (thread_state == NULL) {
714 bzero(state, sizeof(arm_debug_state64_t));
715 } else {
716 bcopy(thread_state, state, sizeof(arm_debug_state64_t));
717 }
718
719 *count = ARM_DEBUG_STATE64_COUNT;
720 break;
721 }
722
723 case ARM_VFP_STATE:{
724 struct arm_vfp_state *state;
725 arm_neon_saved_state32_t *thread_state;
726 unsigned int max;
727
728 if (*count < ARM_VFP_STATE_COUNT) {
729 if (*count < ARM_VFPV2_STATE_COUNT) {
730 return KERN_INVALID_ARGUMENT;
731 } else {
732 *count = ARM_VFPV2_STATE_COUNT;
733 }
734 }
735
736 if (*count == ARM_VFPV2_STATE_COUNT) {
737 max = 32;
738 } else {
739 max = 64;
740 }
741
742 state = (struct arm_vfp_state *) tstate;
743 thread_state = neon_state32(thread->machine.uNeon);
744 /* ARM64 TODO: set fpsr and fpcr from state->fpscr */
745
746 bcopy(thread_state, state, (max + 1) * sizeof(uint32_t));
747 *count = (max + 1);
748 break;
749 }
750 case ARM_NEON_STATE:{
751 arm_neon_state_t *state;
752 arm_neon_saved_state32_t *thread_state;
753
754 if (*count < ARM_NEON_STATE_COUNT) {
755 return KERN_INVALID_ARGUMENT;
756 }
757
758 if (thread_is_64bit_data(thread)) {
759 return KERN_INVALID_ARGUMENT;
760 }
761
762 state = (arm_neon_state_t *)tstate;
763 thread_state = neon_state32(thread->machine.uNeon);
764
765 assert(sizeof(*thread_state) == sizeof(*state));
766 bcopy(thread_state, state, sizeof(arm_neon_state_t));
767
768 *count = ARM_NEON_STATE_COUNT;
769 break;
770 }
771
772 case ARM_NEON_STATE64:{
773 arm_neon_state64_t *state;
774 arm_neon_saved_state64_t *thread_state;
775
776 if (*count < ARM_NEON_STATE64_COUNT) {
777 return KERN_INVALID_ARGUMENT;
778 }
779
780 if (!thread_is_64bit_data(thread)) {
781 return KERN_INVALID_ARGUMENT;
782 }
783
784 state = (arm_neon_state64_t *)tstate;
785 thread_state = neon_state64(thread->machine.uNeon);
786
787 /* For now, these are identical */
788 assert(sizeof(*state) == sizeof(*thread_state));
789 bcopy(thread_state, state, sizeof(arm_neon_state64_t));
790
791 *count = ARM_NEON_STATE64_COUNT;
792 break;
793 }
794
795
796 case ARM_PAGEIN_STATE: {
797 arm_pagein_state_t *state;
798
799 if (*count < ARM_PAGEIN_STATE_COUNT) {
800 return KERN_INVALID_ARGUMENT;
801 }
802
803 state = (arm_pagein_state_t *)tstate;
804 state->__pagein_error = thread->t_pagein_error;
805
806 *count = ARM_PAGEIN_STATE_COUNT;
807 break;
808 }
809
810
811 default:
812 return KERN_INVALID_ARGUMENT;
813 }
814 return KERN_SUCCESS;
815 }
816
817
818 /*
819 * Routine: machine_thread_get_kern_state
820 *
821 */
822 kern_return_t
823 machine_thread_get_kern_state(thread_t thread,
824 thread_flavor_t flavor,
825 thread_state_t tstate,
826 mach_msg_type_number_t * count)
827 {
828 /*
829 * This works only for an interrupted kernel thread
830 */
831 if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) {
832 return KERN_FAILURE;
833 }
834
835 switch (flavor) {
836 case ARM_THREAD_STATE:
837 {
838 kern_return_t rn = handle_get_arm_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
839 if (rn) {
840 return rn;
841 }
842 break;
843 }
844 case ARM_THREAD_STATE32:
845 {
846 kern_return_t rn = handle_get_arm32_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
847 if (rn) {
848 return rn;
849 }
850 break;
851 }
852 #if __arm64__
853 case ARM_THREAD_STATE64:
854 {
855 kern_return_t rn = handle_get_arm64_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
856 if (rn) {
857 return rn;
858 }
859 break;
860 }
861 #endif
862 default:
863 return KERN_INVALID_ARGUMENT;
864 }
865 return KERN_SUCCESS;
866 }
867
868 void
869 machine_thread_switch_addrmode(thread_t thread)
870 {
871 if (task_has_64Bit_data(thread->task)) {
872 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE64;
873 thread->machine.upcb->ash.count = ARM_SAVED_STATE64_COUNT;
874 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
875 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
876
877 /*
878 * Reinitialize the NEON state.
879 */
880 bzero(&thread->machine.uNeon->uns, sizeof(thread->machine.uNeon->uns));
881 thread->machine.uNeon->ns_64.fpcr = FPCR_DEFAULT;
882 } else {
883 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE32;
884 thread->machine.upcb->ash.count = ARM_SAVED_STATE32_COUNT;
885 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
886 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
887
888 /*
889 * Reinitialize the NEON state.
890 */
891 bzero(&thread->machine.uNeon->uns, sizeof(thread->machine.uNeon->uns));
892 thread->machine.uNeon->ns_32.fpcr = FPCR_DEFAULT_32;
893 }
894 }
895
896 extern long long arm_debug_get(void);
897
898 /*
899 * Routine: machine_thread_set_state
900 *
901 */
902 kern_return_t
903 machine_thread_set_state(thread_t thread,
904 thread_flavor_t flavor,
905 thread_state_t tstate,
906 mach_msg_type_number_t count)
907 {
908 kern_return_t rn;
909
910 switch (flavor) {
911 case ARM_THREAD_STATE:
912 rn = handle_set_arm_thread_state(tstate, count, thread->machine.upcb);
913 if (rn) {
914 return rn;
915 }
916 break;
917
918 case ARM_THREAD_STATE32:
919 if (thread_is_64bit_data(thread)) {
920 return KERN_INVALID_ARGUMENT;
921 }
922
923 rn = handle_set_arm32_thread_state(tstate, count, thread->machine.upcb);
924 if (rn) {
925 return rn;
926 }
927 break;
928
929 #if __arm64__
930 case ARM_THREAD_STATE64:
931 if (!thread_is_64bit_data(thread)) {
932 return KERN_INVALID_ARGUMENT;
933 }
934
935 rn = handle_set_arm64_thread_state(tstate, count, thread->machine.upcb);
936 if (rn) {
937 return rn;
938 }
939 break;
940 #endif
941 case ARM_EXCEPTION_STATE:{
942 if (count != ARM_EXCEPTION_STATE_COUNT) {
943 return KERN_INVALID_ARGUMENT;
944 }
945 if (thread_is_64bit_data(thread)) {
946 return KERN_INVALID_ARGUMENT;
947 }
948
949 break;
950 }
951 case ARM_EXCEPTION_STATE64:{
952 if (count != ARM_EXCEPTION_STATE64_COUNT) {
953 return KERN_INVALID_ARGUMENT;
954 }
955 if (!thread_is_64bit_data(thread)) {
956 return KERN_INVALID_ARGUMENT;
957 }
958
959 break;
960 }
961 case ARM_DEBUG_STATE:
962 {
963 arm_legacy_debug_state_t *state;
964 boolean_t enabled = FALSE;
965 unsigned int i;
966
967 if (count != ARM_LEGACY_DEBUG_STATE_COUNT) {
968 return KERN_INVALID_ARGUMENT;
969 }
970 if (thread_is_64bit_data(thread)) {
971 return KERN_INVALID_ARGUMENT;
972 }
973
974 state = (arm_legacy_debug_state_t *) tstate;
975
976 for (i = 0; i < 16; i++) {
977 /* do not allow context IDs to be set */
978 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
979 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
980 || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
981 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
982 return KERN_PROTECTION_FAILURE;
983 }
984 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
985 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
986 enabled = TRUE;
987 }
988 }
989
990 if (!enabled) {
991 arm_debug_state32_t *thread_state = find_debug_state32(thread);
992 if (thread_state != NULL) {
993 void *pTmp = thread->machine.DebugData;
994 thread->machine.DebugData = NULL;
995 zfree(ads_zone, pTmp);
996 }
997 } else {
998 arm_debug_state32_t *thread_state = find_debug_state32(thread);
999 if (thread_state == NULL) {
1000 thread->machine.DebugData = zalloc(ads_zone);
1001 bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
1002 thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE32;
1003 thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE32_COUNT;
1004 thread_state = find_debug_state32(thread);
1005 }
1006 assert(NULL != thread_state);
1007
1008 for (i = 0; i < 16; i++) {
1009 /* set appropriate privilege; mask out unknown bits */
1010 thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1011 | ARM_DBGBCR_MATCH_MASK
1012 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1013 | ARM_DBG_CR_ENABLE_MASK))
1014 | ARM_DBGBCR_TYPE_IVA
1015 | ARM_DBG_CR_LINKED_UNLINKED
1016 | ARM_DBG_CR_SECURITY_STATE_BOTH
1017 | ARM_DBG_CR_MODE_CONTROL_USER;
1018 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1019 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1020 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1021 | ARM_DBGWCR_ACCESS_CONTROL_MASK
1022 | ARM_DBG_CR_ENABLE_MASK))
1023 | ARM_DBG_CR_LINKED_UNLINKED
1024 | ARM_DBG_CR_SECURITY_STATE_BOTH
1025 | ARM_DBG_CR_MODE_CONTROL_USER;
1026 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1027 }
1028
1029 thread_state->mdscr_el1 = 0ULL; // Legacy customers issuing ARM_DEBUG_STATE dont drive single stepping.
1030 }
1031
1032 if (thread == current_thread()) {
1033 arm_debug_set32(thread->machine.DebugData);
1034 }
1035
1036 break;
1037 }
1038 case ARM_DEBUG_STATE32:
1039 /* ARM64_TODO subtle bcr/wcr semantic differences e.g. wcr and ARM_DBGBCR_TYPE_IVA */
1040 {
1041 arm_debug_state32_t *state;
1042 boolean_t enabled = FALSE;
1043 unsigned int i;
1044
1045 if (count != ARM_DEBUG_STATE32_COUNT) {
1046 return KERN_INVALID_ARGUMENT;
1047 }
1048 if (thread_is_64bit_data(thread)) {
1049 return KERN_INVALID_ARGUMENT;
1050 }
1051
1052 state = (arm_debug_state32_t *) tstate;
1053
1054 if (state->mdscr_el1 & 0x1) {
1055 enabled = TRUE;
1056 }
1057
1058 for (i = 0; i < 16; i++) {
1059 /* do not allow context IDs to be set */
1060 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1061 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
1062 || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1063 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
1064 return KERN_PROTECTION_FAILURE;
1065 }
1066 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
1067 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
1068 enabled = TRUE;
1069 }
1070 }
1071
1072 if (!enabled) {
1073 arm_debug_state32_t *thread_state = find_debug_state32(thread);
1074 if (thread_state != NULL) {
1075 void *pTmp = thread->machine.DebugData;
1076 thread->machine.DebugData = NULL;
1077 zfree(ads_zone, pTmp);
1078 }
1079 } else {
1080 arm_debug_state32_t *thread_state = find_debug_state32(thread);
1081 if (thread_state == NULL) {
1082 thread->machine.DebugData = zalloc(ads_zone);
1083 bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
1084 thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE32;
1085 thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE32_COUNT;
1086 thread_state = find_debug_state32(thread);
1087 }
1088 assert(NULL != thread_state);
1089
1090 if (state->mdscr_el1 & 0x1) {
1091 thread_state->mdscr_el1 |= 0x1;
1092 } else {
1093 thread_state->mdscr_el1 &= ~0x1;
1094 }
1095
1096 for (i = 0; i < 16; i++) {
1097 /* set appropriate privilege; mask out unknown bits */
1098 thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1099 | ARM_DBGBCR_MATCH_MASK
1100 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1101 | ARM_DBG_CR_ENABLE_MASK))
1102 | ARM_DBGBCR_TYPE_IVA
1103 | ARM_DBG_CR_LINKED_UNLINKED
1104 | ARM_DBG_CR_SECURITY_STATE_BOTH
1105 | ARM_DBG_CR_MODE_CONTROL_USER;
1106 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1107 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1108 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1109 | ARM_DBGWCR_ACCESS_CONTROL_MASK
1110 | ARM_DBG_CR_ENABLE_MASK))
1111 | ARM_DBG_CR_LINKED_UNLINKED
1112 | ARM_DBG_CR_SECURITY_STATE_BOTH
1113 | ARM_DBG_CR_MODE_CONTROL_USER;
1114 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1115 }
1116 }
1117
1118 if (thread == current_thread()) {
1119 arm_debug_set32(thread->machine.DebugData);
1120 }
1121
1122 break;
1123 }
1124
1125 case ARM_DEBUG_STATE64:
1126 {
1127 arm_debug_state64_t *state;
1128 boolean_t enabled = FALSE;
1129 unsigned int i;
1130
1131 if (count != ARM_DEBUG_STATE64_COUNT) {
1132 return KERN_INVALID_ARGUMENT;
1133 }
1134 if (!thread_is_64bit_data(thread)) {
1135 return KERN_INVALID_ARGUMENT;
1136 }
1137
1138 state = (arm_debug_state64_t *) tstate;
1139
1140 if (state->mdscr_el1 & 0x1) {
1141 enabled = TRUE;
1142 }
1143
1144 for (i = 0; i < 16; i++) {
1145 /* do not allow context IDs to be set */
1146 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1147 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
1148 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
1149 return KERN_PROTECTION_FAILURE;
1150 }
1151 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
1152 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
1153 enabled = TRUE;
1154 }
1155 }
1156
1157 if (!enabled) {
1158 arm_debug_state64_t *thread_state = find_debug_state64(thread);
1159 if (thread_state != NULL) {
1160 void *pTmp = thread->machine.DebugData;
1161 thread->machine.DebugData = NULL;
1162 zfree(ads_zone, pTmp);
1163 }
1164 } else {
1165 arm_debug_state64_t *thread_state = find_debug_state64(thread);
1166 if (thread_state == NULL) {
1167 thread->machine.DebugData = zalloc(ads_zone);
1168 bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
1169 thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE64;
1170 thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE64_COUNT;
1171 thread_state = find_debug_state64(thread);
1172 }
1173 assert(NULL != thread_state);
1174
1175 if (state->mdscr_el1 & 0x1) {
1176 thread_state->mdscr_el1 |= 0x1;
1177 } else {
1178 thread_state->mdscr_el1 &= ~0x1;
1179 }
1180
1181 for (i = 0; i < 16; i++) {
1182 /* set appropriate privilege; mask out unknown bits */
1183 thread_state->bcr[i] = (state->bcr[i] & (0 /* Was ARM_DBG_CR_ADDRESS_MASK_MASK deprecated in v8 */
1184 | 0 /* Was ARM_DBGBCR_MATCH_MASK, ignored in AArch64 state */
1185 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1186 | ARM_DBG_CR_ENABLE_MASK))
1187 | ARM_DBGBCR_TYPE_IVA
1188 | ARM_DBG_CR_LINKED_UNLINKED
1189 | ARM_DBG_CR_SECURITY_STATE_BOTH
1190 | ARM_DBG_CR_MODE_CONTROL_USER;
1191 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK64;
1192 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1193 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1194 | ARM_DBGWCR_ACCESS_CONTROL_MASK
1195 | ARM_DBG_CR_ENABLE_MASK))
1196 | ARM_DBG_CR_LINKED_UNLINKED
1197 | ARM_DBG_CR_SECURITY_STATE_BOTH
1198 | ARM_DBG_CR_MODE_CONTROL_USER;
1199 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK64;
1200 }
1201 }
1202
1203 if (thread == current_thread()) {
1204 arm_debug_set64(thread->machine.DebugData);
1205 }
1206
1207 break;
1208 }
1209
1210 case ARM_VFP_STATE:{
1211 struct arm_vfp_state *state;
1212 arm_neon_saved_state32_t *thread_state;
1213 unsigned int max;
1214
1215 if (count != ARM_VFP_STATE_COUNT && count != ARM_VFPV2_STATE_COUNT) {
1216 return KERN_INVALID_ARGUMENT;
1217 }
1218
1219 if (count == ARM_VFPV2_STATE_COUNT) {
1220 max = 32;
1221 } else {
1222 max = 64;
1223 }
1224
1225 state = (struct arm_vfp_state *) tstate;
1226 thread_state = neon_state32(thread->machine.uNeon);
1227 /* ARM64 TODO: combine fpsr and fpcr into state->fpscr */
1228
1229 bcopy(state, thread_state, (max + 1) * sizeof(uint32_t));
1230
1231 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
1232 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
1233 break;
1234 }
1235
1236 case ARM_NEON_STATE:{
1237 arm_neon_state_t *state;
1238 arm_neon_saved_state32_t *thread_state;
1239
1240 if (count != ARM_NEON_STATE_COUNT) {
1241 return KERN_INVALID_ARGUMENT;
1242 }
1243
1244 if (thread_is_64bit_data(thread)) {
1245 return KERN_INVALID_ARGUMENT;
1246 }
1247
1248 state = (arm_neon_state_t *)tstate;
1249 thread_state = neon_state32(thread->machine.uNeon);
1250
1251 assert(sizeof(*state) == sizeof(*thread_state));
1252 bcopy(state, thread_state, sizeof(arm_neon_state_t));
1253
1254 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
1255 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
1256 break;
1257 }
1258
1259 case ARM_NEON_STATE64:{
1260 arm_neon_state64_t *state;
1261 arm_neon_saved_state64_t *thread_state;
1262
1263 if (count != ARM_NEON_STATE64_COUNT) {
1264 return KERN_INVALID_ARGUMENT;
1265 }
1266
1267 if (!thread_is_64bit_data(thread)) {
1268 return KERN_INVALID_ARGUMENT;
1269 }
1270
1271 state = (arm_neon_state64_t *)tstate;
1272 thread_state = neon_state64(thread->machine.uNeon);
1273
1274 assert(sizeof(*state) == sizeof(*thread_state));
1275 bcopy(state, thread_state, sizeof(arm_neon_state64_t));
1276
1277 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
1278 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
1279 break;
1280 }
1281
1282
1283 default:
1284 return KERN_INVALID_ARGUMENT;
1285 }
1286 return KERN_SUCCESS;
1287 }
1288
1289 mach_vm_address_t
1290 machine_thread_pc(thread_t thread)
1291 {
1292 struct arm_saved_state *ss = get_user_regs(thread);
1293 return (mach_vm_address_t)get_saved_state_pc(ss);
1294 }
1295
1296 void
1297 machine_thread_reset_pc(thread_t thread, mach_vm_address_t pc)
1298 {
1299 set_saved_state_pc(get_user_regs(thread), (register_t)pc);
1300 }
1301
1302 /*
1303 * Routine: machine_thread_state_initialize
1304 *
1305 */
1306 kern_return_t
1307 machine_thread_state_initialize(thread_t thread)
1308 {
1309 arm_context_t *context = thread->machine.contextData;
1310
1311 /*
1312 * Should always be set up later. For a kernel thread, we don't care
1313 * about this state. For a user thread, we'll set the state up in
1314 * setup_wqthread, bsdthread_create, load_main(), or load_unixthread().
1315 */
1316
1317 if (context != NULL) {
1318 bzero(&context->ss.uss, sizeof(context->ss.uss));
1319 bzero(&context->ns.uns, sizeof(context->ns.uns));
1320
1321 if (context->ns.nsh.flavor == ARM_NEON_SAVED_STATE64) {
1322 context->ns.ns_64.fpcr = FPCR_DEFAULT;
1323 } else {
1324 context->ns.ns_32.fpcr = FPCR_DEFAULT_32;
1325 }
1326 }
1327
1328 thread->machine.DebugData = NULL;
1329
1330 #if defined(HAS_APPLE_PAC)
1331 /* Sign the initial user-space thread state */
1332 if (thread->machine.upcb != NULL) {
1333 boolean_t intr = ml_set_interrupts_enabled(FALSE);
1334 ml_sign_thread_state(thread->machine.upcb, 0, 0, 0, 0, 0);
1335 ml_set_interrupts_enabled(intr);
1336 }
1337 #endif /* defined(HAS_APPLE_PAC) */
1338
1339 return KERN_SUCCESS;
1340 }
1341
1342 /*
1343 * Routine: machine_thread_dup
1344 *
1345 */
1346 kern_return_t
1347 machine_thread_dup(thread_t self,
1348 thread_t target,
1349 __unused boolean_t is_corpse)
1350 {
1351 struct arm_saved_state *self_saved_state;
1352 struct arm_saved_state *target_saved_state;
1353
1354 target->machine.cthread_self = self->machine.cthread_self;
1355
1356 self_saved_state = self->machine.upcb;
1357 target_saved_state = target->machine.upcb;
1358 bcopy(self_saved_state, target_saved_state, sizeof(struct arm_saved_state));
1359 #if defined(HAS_APPLE_PAC)
1360 if (!is_corpse && is_saved_state64(self_saved_state)) {
1361 check_and_sign_copied_thread_state(target_saved_state, self_saved_state);
1362 }
1363 #endif /* defined(HAS_APPLE_PAC) */
1364
1365 return KERN_SUCCESS;
1366 }
1367
1368 /*
1369 * Routine: get_user_regs
1370 *
1371 */
1372 struct arm_saved_state *
1373 get_user_regs(thread_t thread)
1374 {
1375 return thread->machine.upcb;
1376 }
1377
1378 arm_neon_saved_state_t *
1379 get_user_neon_regs(thread_t thread)
1380 {
1381 return thread->machine.uNeon;
1382 }
1383
1384 /*
1385 * Routine: find_user_regs
1386 *
1387 */
1388 struct arm_saved_state *
1389 find_user_regs(thread_t thread)
1390 {
1391 return thread->machine.upcb;
1392 }
1393
1394 /*
1395 * Routine: find_kern_regs
1396 *
1397 */
1398 struct arm_saved_state *
1399 find_kern_regs(thread_t thread)
1400 {
1401 /*
1402 * This works only for an interrupted kernel thread
1403 */
1404 if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) {
1405 return (struct arm_saved_state *) NULL;
1406 } else {
1407 return getCpuDatap()->cpu_int_state;
1408 }
1409 }
1410
1411 arm_debug_state32_t *
1412 find_debug_state32(thread_t thread)
1413 {
1414 if (thread && thread->machine.DebugData) {
1415 return &(thread->machine.DebugData->uds.ds32);
1416 } else {
1417 return NULL;
1418 }
1419 }
1420
1421 arm_debug_state64_t *
1422 find_debug_state64(thread_t thread)
1423 {
1424 if (thread && thread->machine.DebugData) {
1425 return &(thread->machine.DebugData->uds.ds64);
1426 } else {
1427 return NULL;
1428 }
1429 }
1430
1431 /*
1432 * Routine: thread_userstack
1433 *
1434 */
1435 kern_return_t
1436 thread_userstack(__unused thread_t thread,
1437 int flavor,
1438 thread_state_t tstate,
1439 unsigned int count,
1440 mach_vm_offset_t * user_stack,
1441 int * customstack,
1442 boolean_t is_64bit_data
1443 )
1444 {
1445 register_t sp;
1446
1447 switch (flavor) {
1448 case ARM_THREAD_STATE:
1449 if (count == ARM_UNIFIED_THREAD_STATE_COUNT) {
1450 #if __arm64__
1451 if (is_64bit_data) {
1452 sp = ((arm_unified_thread_state_t *)tstate)->ts_64.sp;
1453 } else
1454 #endif
1455 {
1456 sp = ((arm_unified_thread_state_t *)tstate)->ts_32.sp;
1457 }
1458
1459 break;
1460 }
1461
1462 /* INTENTIONAL FALL THROUGH (see machine_thread_set_state) */
1463 case ARM_THREAD_STATE32:
1464 if (count != ARM_THREAD_STATE32_COUNT) {
1465 return KERN_INVALID_ARGUMENT;
1466 }
1467 if (is_64bit_data) {
1468 return KERN_INVALID_ARGUMENT;
1469 }
1470
1471 sp = ((arm_thread_state32_t *)tstate)->sp;
1472 break;
1473 #if __arm64__
1474 case ARM_THREAD_STATE64:
1475 if (count != ARM_THREAD_STATE64_COUNT) {
1476 return KERN_INVALID_ARGUMENT;
1477 }
1478 if (!is_64bit_data) {
1479 return KERN_INVALID_ARGUMENT;
1480 }
1481
1482 sp = ((arm_thread_state32_t *)tstate)->sp;
1483 break;
1484 #endif
1485 default:
1486 return KERN_INVALID_ARGUMENT;
1487 }
1488
1489 if (sp) {
1490 *user_stack = CAST_USER_ADDR_T(sp);
1491 if (customstack) {
1492 *customstack = 1;
1493 }
1494 } else {
1495 *user_stack = CAST_USER_ADDR_T(USRSTACK64);
1496 if (customstack) {
1497 *customstack = 0;
1498 }
1499 }
1500
1501 return KERN_SUCCESS;
1502 }
1503
1504 /*
1505 * thread_userstackdefault:
1506 *
1507 * Return the default stack location for the
1508 * thread, if otherwise unknown.
1509 */
1510 kern_return_t
1511 thread_userstackdefault(mach_vm_offset_t * default_user_stack,
1512 boolean_t is64bit)
1513 {
1514 if (is64bit) {
1515 *default_user_stack = USRSTACK64;
1516 } else {
1517 *default_user_stack = USRSTACK;
1518 }
1519
1520 return KERN_SUCCESS;
1521 }
1522
1523 /*
1524 * Routine: thread_setuserstack
1525 *
1526 */
1527 void
1528 thread_setuserstack(thread_t thread,
1529 mach_vm_address_t user_stack)
1530 {
1531 struct arm_saved_state *sv;
1532
1533 sv = get_user_regs(thread);
1534
1535 set_saved_state_sp(sv, user_stack);
1536
1537 return;
1538 }
1539
1540 /*
1541 * Routine: thread_adjuserstack
1542 *
1543 */
1544 uint64_t
1545 thread_adjuserstack(thread_t thread,
1546 int adjust)
1547 {
1548 struct arm_saved_state *sv;
1549 uint64_t sp;
1550
1551 sv = get_user_regs(thread);
1552
1553 sp = get_saved_state_sp(sv);
1554 sp += adjust;
1555 set_saved_state_sp(sv, sp);;
1556
1557 return sp;
1558 }
1559
1560 /*
1561 * Routine: thread_setentrypoint
1562 *
1563 */
1564 void
1565 thread_setentrypoint(thread_t thread,
1566 mach_vm_offset_t entry)
1567 {
1568 struct arm_saved_state *sv;
1569
1570 sv = get_user_regs(thread);
1571
1572 set_saved_state_pc(sv, entry);
1573
1574 return;
1575 }
1576
1577 /*
1578 * Routine: thread_entrypoint
1579 *
1580 */
1581 kern_return_t
1582 thread_entrypoint(__unused thread_t thread,
1583 int flavor,
1584 thread_state_t tstate,
1585 unsigned int count,
1586 mach_vm_offset_t * entry_point
1587 )
1588 {
1589 switch (flavor) {
1590 case ARM_THREAD_STATE:
1591 {
1592 struct arm_thread_state *state;
1593
1594 if (count != ARM_THREAD_STATE_COUNT) {
1595 return KERN_INVALID_ARGUMENT;
1596 }
1597
1598 state = (struct arm_thread_state *) tstate;
1599
1600 /*
1601 * If a valid entry point is specified, use it.
1602 */
1603 if (state->pc) {
1604 *entry_point = CAST_USER_ADDR_T(state->pc);
1605 } else {
1606 *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
1607 }
1608 }
1609 break;
1610
1611 case ARM_THREAD_STATE64:
1612 {
1613 struct arm_thread_state64 *state;
1614
1615 if (count != ARM_THREAD_STATE64_COUNT) {
1616 return KERN_INVALID_ARGUMENT;
1617 }
1618
1619 state = (struct arm_thread_state64*) tstate;
1620
1621 /*
1622 * If a valid entry point is specified, use it.
1623 */
1624 if (state->pc) {
1625 *entry_point = CAST_USER_ADDR_T(state->pc);
1626 } else {
1627 *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
1628 }
1629
1630 break;
1631 }
1632 default:
1633 return KERN_INVALID_ARGUMENT;
1634 }
1635
1636 return KERN_SUCCESS;
1637 }
1638
1639
1640 /*
1641 * Routine: thread_set_child
1642 *
1643 */
1644 void
1645 thread_set_child(thread_t child,
1646 int pid)
1647 {
1648 struct arm_saved_state *child_state;
1649
1650 child_state = get_user_regs(child);
1651
1652 set_saved_state_reg(child_state, 0, pid);
1653 set_saved_state_reg(child_state, 1, 1ULL);
1654 }
1655
1656
1657 /*
1658 * Routine: thread_set_parent
1659 *
1660 */
1661 void
1662 thread_set_parent(thread_t parent,
1663 int pid)
1664 {
1665 struct arm_saved_state *parent_state;
1666
1667 parent_state = get_user_regs(parent);
1668
1669 set_saved_state_reg(parent_state, 0, pid);
1670 set_saved_state_reg(parent_state, 1, 0);
1671 }
1672
1673
1674 struct arm_act_context {
1675 struct arm_unified_thread_state ss;
1676 #if __ARM_VFP__
1677 struct arm_neon_saved_state ns;
1678 #endif
1679 };
1680
1681 /*
1682 * Routine: act_thread_csave
1683 *
1684 */
1685 void *
1686 act_thread_csave(void)
1687 {
1688 struct arm_act_context *ic;
1689 kern_return_t kret;
1690 unsigned int val;
1691 thread_t thread = current_thread();
1692
1693 ic = (struct arm_act_context *) kalloc(sizeof(struct arm_act_context));
1694 if (ic == (struct arm_act_context *) NULL) {
1695 return (void *) 0;
1696 }
1697
1698 val = ARM_UNIFIED_THREAD_STATE_COUNT;
1699 kret = machine_thread_get_state(thread, ARM_THREAD_STATE, (thread_state_t)&ic->ss, &val);
1700 if (kret != KERN_SUCCESS) {
1701 kfree(ic, sizeof(struct arm_act_context));
1702 return (void *) 0;
1703 }
1704
1705 #if __ARM_VFP__
1706 if (thread_is_64bit_data(thread)) {
1707 val = ARM_NEON_STATE64_COUNT;
1708 kret = machine_thread_get_state(thread,
1709 ARM_NEON_STATE64,
1710 (thread_state_t)&ic->ns,
1711 &val);
1712 } else {
1713 val = ARM_NEON_STATE_COUNT;
1714 kret = machine_thread_get_state(thread,
1715 ARM_NEON_STATE,
1716 (thread_state_t)&ic->ns,
1717 &val);
1718 }
1719 if (kret != KERN_SUCCESS) {
1720 kfree(ic, sizeof(struct arm_act_context));
1721 return (void *) 0;
1722 }
1723 #endif
1724 return ic;
1725 }
1726
1727 /*
1728 * Routine: act_thread_catt
1729 *
1730 */
1731 void
1732 act_thread_catt(void * ctx)
1733 {
1734 struct arm_act_context *ic;
1735 kern_return_t kret;
1736 thread_t thread = current_thread();
1737
1738 ic = (struct arm_act_context *) ctx;
1739 if (ic == (struct arm_act_context *) NULL) {
1740 return;
1741 }
1742
1743 kret = machine_thread_set_state(thread, ARM_THREAD_STATE, (thread_state_t)&ic->ss, ARM_UNIFIED_THREAD_STATE_COUNT);
1744 if (kret != KERN_SUCCESS) {
1745 goto out;
1746 }
1747
1748 #if __ARM_VFP__
1749 if (thread_is_64bit_data(thread)) {
1750 kret = machine_thread_set_state(thread,
1751 ARM_NEON_STATE64,
1752 (thread_state_t)&ic->ns,
1753 ARM_NEON_STATE64_COUNT);
1754 } else {
1755 kret = machine_thread_set_state(thread,
1756 ARM_NEON_STATE,
1757 (thread_state_t)&ic->ns,
1758 ARM_NEON_STATE_COUNT);
1759 }
1760 if (kret != KERN_SUCCESS) {
1761 goto out;
1762 }
1763 #endif
1764 out:
1765 kfree(ic, sizeof(struct arm_act_context));
1766 }
1767
1768 /*
1769 * Routine: act_thread_catt
1770 *
1771 */
1772 void
1773 act_thread_cfree(void *ctx)
1774 {
1775 kfree(ctx, sizeof(struct arm_act_context));
1776 }
1777
1778 kern_return_t
1779 thread_set_wq_state32(thread_t thread,
1780 thread_state_t tstate)
1781 {
1782 arm_thread_state_t *state;
1783 struct arm_saved_state *saved_state;
1784 struct arm_saved_state32 *saved_state_32;
1785 thread_t curth = current_thread();
1786 spl_t s = 0;
1787
1788 assert(!thread_is_64bit_data(thread));
1789
1790 saved_state = thread->machine.upcb;
1791 saved_state_32 = saved_state32(saved_state);
1792
1793 state = (arm_thread_state_t *)tstate;
1794
1795 if (curth != thread) {
1796 s = splsched();
1797 thread_lock(thread);
1798 }
1799
1800 /*
1801 * do not zero saved_state, it can be concurrently accessed
1802 * and zero is not a valid state for some of the registers,
1803 * like sp.
1804 */
1805 thread_state32_to_saved_state(state, saved_state);
1806 saved_state_32->cpsr = PSR64_USER32_DEFAULT;
1807
1808 if (curth != thread) {
1809 thread_unlock(thread);
1810 splx(s);
1811 }
1812
1813 return KERN_SUCCESS;
1814 }
1815
1816 kern_return_t
1817 thread_set_wq_state64(thread_t thread,
1818 thread_state_t tstate)
1819 {
1820 arm_thread_state64_t *state;
1821 struct arm_saved_state *saved_state;
1822 struct arm_saved_state64 *saved_state_64;
1823 thread_t curth = current_thread();
1824 spl_t s = 0;
1825
1826 assert(thread_is_64bit_data(thread));
1827
1828 saved_state = thread->machine.upcb;
1829 saved_state_64 = saved_state64(saved_state);
1830 state = (arm_thread_state64_t *)tstate;
1831
1832 if (curth != thread) {
1833 s = splsched();
1834 thread_lock(thread);
1835 }
1836
1837 /*
1838 * do not zero saved_state, it can be concurrently accessed
1839 * and zero is not a valid state for some of the registers,
1840 * like sp.
1841 */
1842 thread_state64_to_saved_state(state, saved_state);
1843 set_saved_state_cpsr(saved_state, PSR64_USER64_DEFAULT);
1844
1845 if (curth != thread) {
1846 thread_unlock(thread);
1847 splx(s);
1848 }
1849
1850 return KERN_SUCCESS;
1851 }