]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/status.c
41d213e69033432226072226d9032b67eedab1ad
[apple/xnu.git] / osfmk / arm64 / status.c
1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <debug.h>
29 #include <mach/mach_types.h>
30 #include <mach/kern_return.h>
31 #include <mach/thread_status.h>
32 #include <kern/thread.h>
33 #include <kern/kalloc.h>
34 #include <arm/vmparam.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm64/proc_reg.h>
37 #if __has_feature(ptrauth_calls)
38 #include <ptrauth.h>
39 #endif
40
41 struct arm_vfpv2_state {
42 __uint32_t __r[32];
43 __uint32_t __fpscr;
44 };
45
46 typedef struct arm_vfpv2_state arm_vfpv2_state_t;
47
48 #define ARM_VFPV2_STATE_COUNT \
49 ((mach_msg_type_number_t)(sizeof (arm_vfpv2_state_t)/sizeof(uint32_t)))
50
51 /*
52 * Forward definitions
53 */
54 void thread_set_child(thread_t child, int pid);
55 void thread_set_parent(thread_t parent, int pid);
56
57 /*
58 * Maps state flavor to number of words in the state:
59 */
60 /* __private_extern__ */
61 unsigned int _MachineStateCount[] = {
62 [ARM_UNIFIED_THREAD_STATE] = ARM_UNIFIED_THREAD_STATE_COUNT,
63 [ARM_VFP_STATE] = ARM_VFP_STATE_COUNT,
64 [ARM_EXCEPTION_STATE] = ARM_EXCEPTION_STATE_COUNT,
65 [ARM_DEBUG_STATE] = ARM_DEBUG_STATE_COUNT,
66 [ARM_THREAD_STATE64] = ARM_THREAD_STATE64_COUNT,
67 [ARM_EXCEPTION_STATE64] = ARM_EXCEPTION_STATE64_COUNT,
68 [ARM_THREAD_STATE32] = ARM_THREAD_STATE32_COUNT,
69 [ARM_DEBUG_STATE32] = ARM_DEBUG_STATE32_COUNT,
70 [ARM_DEBUG_STATE64] = ARM_DEBUG_STATE64_COUNT,
71 [ARM_NEON_STATE] = ARM_NEON_STATE_COUNT,
72 [ARM_NEON_STATE64] = ARM_NEON_STATE64_COUNT,
73 [ARM_PAGEIN_STATE] = ARM_PAGEIN_STATE_COUNT,
74 };
75
76 extern zone_t ads_zone;
77
78 #if __arm64__
79 /*
80 * Copy values from saved_state to ts64.
81 */
82 void
83 saved_state_to_thread_state64(const arm_saved_state_t * saved_state,
84 arm_thread_state64_t * ts64)
85 {
86 uint32_t i;
87
88 assert(is_saved_state64(saved_state));
89
90 ts64->fp = get_saved_state_fp(saved_state);
91 ts64->lr = get_saved_state_lr(saved_state);
92 ts64->sp = get_saved_state_sp(saved_state);
93 ts64->pc = get_saved_state_pc(saved_state);
94 ts64->cpsr = get_saved_state_cpsr(saved_state);
95 for (i = 0; i < 29; i++) {
96 ts64->x[i] = get_saved_state_reg(saved_state, i);
97 }
98 }
99
100 /*
101 * Copy values from ts64 to saved_state
102 */
103 void
104 thread_state64_to_saved_state(const arm_thread_state64_t * ts64,
105 arm_saved_state_t * saved_state)
106 {
107 uint32_t i;
108
109 assert(is_saved_state64(saved_state));
110
111 set_saved_state_fp(saved_state, ts64->fp);
112 set_saved_state_lr(saved_state, ts64->lr);
113 set_saved_state_sp(saved_state, ts64->sp);
114 set_saved_state_pc(saved_state, ts64->pc);
115 set_saved_state_cpsr(saved_state, (ts64->cpsr & ~PSR64_MODE_MASK) | PSR64_MODE_RW_64);
116 for (i = 0; i < 29; i++) {
117 set_saved_state_reg(saved_state, i, ts64->x[i]);
118 }
119 }
120
121 #endif /* __arm64__ */
122
123 static kern_return_t
124 handle_get_arm32_thread_state(thread_state_t tstate,
125 mach_msg_type_number_t * count,
126 const arm_saved_state_t * saved_state)
127 {
128 if (*count < ARM_THREAD_STATE32_COUNT) {
129 return KERN_INVALID_ARGUMENT;
130 }
131 if (!is_saved_state32(saved_state)) {
132 return KERN_INVALID_ARGUMENT;
133 }
134
135 (void)saved_state_to_thread_state32(saved_state, (arm_thread_state32_t *)tstate);
136 *count = ARM_THREAD_STATE32_COUNT;
137 return KERN_SUCCESS;
138 }
139
140 static kern_return_t
141 handle_get_arm64_thread_state(thread_state_t tstate,
142 mach_msg_type_number_t * count,
143 const arm_saved_state_t * saved_state)
144 {
145 if (*count < ARM_THREAD_STATE64_COUNT) {
146 return KERN_INVALID_ARGUMENT;
147 }
148 if (!is_saved_state64(saved_state)) {
149 return KERN_INVALID_ARGUMENT;
150 }
151
152 (void)saved_state_to_thread_state64(saved_state, (arm_thread_state64_t *)tstate);
153 *count = ARM_THREAD_STATE64_COUNT;
154 return KERN_SUCCESS;
155 }
156
157
158 static kern_return_t
159 handle_get_arm_thread_state(thread_state_t tstate,
160 mach_msg_type_number_t * count,
161 const arm_saved_state_t * saved_state)
162 {
163 /* In an arm64 world, this flavor can be used to retrieve the thread
164 * state of a 32-bit or 64-bit thread into a unified structure, but we
165 * need to support legacy clients who are only aware of 32-bit, so
166 * check the count to see what the client is expecting.
167 */
168 if (*count < ARM_UNIFIED_THREAD_STATE_COUNT) {
169 return handle_get_arm32_thread_state(tstate, count, saved_state);
170 }
171
172 arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *) tstate;
173 bzero(unified_state, sizeof(*unified_state));
174 #if __arm64__
175 if (is_saved_state64(saved_state)) {
176 unified_state->ash.flavor = ARM_THREAD_STATE64;
177 unified_state->ash.count = ARM_THREAD_STATE64_COUNT;
178 (void)saved_state_to_thread_state64(saved_state, thread_state64(unified_state));
179 } else
180 #endif
181 {
182 unified_state->ash.flavor = ARM_THREAD_STATE32;
183 unified_state->ash.count = ARM_THREAD_STATE32_COUNT;
184 (void)saved_state_to_thread_state32(saved_state, thread_state32(unified_state));
185 }
186 *count = ARM_UNIFIED_THREAD_STATE_COUNT;
187 return KERN_SUCCESS;
188 }
189
190
191 static kern_return_t
192 handle_set_arm32_thread_state(const thread_state_t tstate,
193 mach_msg_type_number_t count,
194 arm_saved_state_t * saved_state)
195 {
196 if (count != ARM_THREAD_STATE32_COUNT) {
197 return KERN_INVALID_ARGUMENT;
198 }
199
200 (void)thread_state32_to_saved_state((const arm_thread_state32_t *)tstate, saved_state);
201 return KERN_SUCCESS;
202 }
203
204 static kern_return_t
205 handle_set_arm64_thread_state(const thread_state_t tstate,
206 mach_msg_type_number_t count,
207 arm_saved_state_t * saved_state)
208 {
209 if (count != ARM_THREAD_STATE64_COUNT) {
210 return KERN_INVALID_ARGUMENT;
211 }
212
213 (void)thread_state64_to_saved_state((const arm_thread_state64_t *)tstate, saved_state);
214 return KERN_SUCCESS;
215 }
216
217
218 static kern_return_t
219 handle_set_arm_thread_state(const thread_state_t tstate,
220 mach_msg_type_number_t count,
221 arm_saved_state_t * saved_state)
222 {
223 /* In an arm64 world, this flavor can be used to set the thread state of a
224 * 32-bit or 64-bit thread from a unified structure, but we need to support
225 * legacy clients who are only aware of 32-bit, so check the count to see
226 * what the client is expecting.
227 */
228 if (count < ARM_UNIFIED_THREAD_STATE_COUNT) {
229 if (!is_saved_state32(saved_state)) {
230 return KERN_INVALID_ARGUMENT;
231 }
232 return handle_set_arm32_thread_state(tstate, count, saved_state);
233 }
234
235 const arm_unified_thread_state_t *unified_state = (const arm_unified_thread_state_t *) tstate;
236 #if __arm64__
237 if (is_thread_state64(unified_state)) {
238 if (!is_saved_state64(saved_state)) {
239 return KERN_INVALID_ARGUMENT;
240 }
241 (void)thread_state64_to_saved_state(const_thread_state64(unified_state), saved_state);
242 } else
243 #endif
244 {
245 if (!is_saved_state32(saved_state)) {
246 return KERN_INVALID_ARGUMENT;
247 }
248 (void)thread_state32_to_saved_state(const_thread_state32(unified_state), saved_state);
249 }
250
251 return KERN_SUCCESS;
252 }
253
254
255 /*
256 * Translate thread state arguments to userspace representation
257 */
258
259 kern_return_t
260 machine_thread_state_convert_to_user(
261 thread_t thread,
262 thread_flavor_t flavor,
263 thread_state_t tstate,
264 mach_msg_type_number_t *count)
265 {
266 #if __has_feature(ptrauth_calls)
267 arm_thread_state64_t *ts64;
268
269 switch (flavor) {
270 case ARM_THREAD_STATE:
271 {
272 arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *)tstate;
273
274 if (*count < ARM_UNIFIED_THREAD_STATE_COUNT || !is_thread_state64(unified_state)) {
275 return KERN_SUCCESS;
276 }
277 ts64 = thread_state64(unified_state);
278 break;
279 }
280 case ARM_THREAD_STATE64:
281 {
282 if (*count < ARM_THREAD_STATE64_COUNT) {
283 return KERN_SUCCESS;
284 }
285 ts64 = (arm_thread_state64_t *)tstate;
286 break;
287 }
288 default:
289 return KERN_SUCCESS;
290 }
291
292 // Note that kernel threads never have disable_user_jop set
293 if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread()) ||
294 thread->machine.disable_user_jop || !thread_is_64bit_addr(thread) ||
295 (BootArgs->bootFlags & kBootFlagsDisableUserThreadStateJOP)) {
296 ts64->flags = __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH;
297 return KERN_SUCCESS;
298 }
299
300 ts64->flags = 0;
301 if (ts64->lr) {
302 // lr might contain an IB-signed return address (strip is a no-op on unsigned addresses)
303 uintptr_t stripped_lr = (uintptr_t)ptrauth_strip((void *)ts64->lr,
304 ptrauth_key_return_address);
305 if (ts64->lr != stripped_lr) {
306 // Need to allow already-signed lr value to round-trip as is
307 ts64->flags |= __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR;
308 }
309 // Note that an IB-signed return address that happens to have a 0 signature value
310 // will round-trip correctly even if IA-signed again below (and IA-authd later)
311 }
312
313 if (BootArgs->bootFlags & kBootFlagsDisableUserJOP) {
314 return KERN_SUCCESS;
315 }
316
317 if (ts64->pc) {
318 ts64->pc = (uintptr_t)pmap_sign_user_ptr((void*)ts64->pc,
319 ptrauth_key_process_independent_code, ptrauth_string_discriminator("pc"));
320 }
321 if (ts64->lr && !(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR)) {
322 ts64->lr = (uintptr_t)pmap_sign_user_ptr((void*)ts64->lr,
323 ptrauth_key_process_independent_code, ptrauth_string_discriminator("lr"));
324 }
325 if (ts64->sp) {
326 ts64->sp = (uintptr_t)pmap_sign_user_ptr((void*)ts64->sp,
327 ptrauth_key_process_independent_data, ptrauth_string_discriminator("sp"));
328 }
329 if (ts64->fp) {
330 ts64->fp = (uintptr_t)pmap_sign_user_ptr((void*)ts64->fp,
331 ptrauth_key_process_independent_data, ptrauth_string_discriminator("fp"));
332 }
333
334 return KERN_SUCCESS;
335 #else
336 // No conversion to userspace representation on this platform
337 (void)thread; (void)flavor; (void)tstate; (void)count;
338 return KERN_SUCCESS;
339 #endif /* __has_feature(ptrauth_calls) */
340 }
341
342 /*
343 * Translate thread state arguments from userspace representation
344 */
345
346 kern_return_t
347 machine_thread_state_convert_from_user(
348 thread_t thread,
349 thread_flavor_t flavor,
350 thread_state_t tstate,
351 mach_msg_type_number_t count)
352 {
353 #if __has_feature(ptrauth_calls)
354 arm_thread_state64_t *ts64;
355
356 switch (flavor) {
357 case ARM_THREAD_STATE:
358 {
359 arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *)tstate;
360
361 if (count < ARM_UNIFIED_THREAD_STATE_COUNT || !is_thread_state64(unified_state)) {
362 return KERN_SUCCESS;
363 }
364 ts64 = thread_state64(unified_state);
365 break;
366 }
367 case ARM_THREAD_STATE64:
368 {
369 if (count != ARM_THREAD_STATE64_COUNT) {
370 return KERN_SUCCESS;
371 }
372 ts64 = (arm_thread_state64_t *)tstate;
373 break;
374 }
375 default:
376 return KERN_SUCCESS;
377 }
378
379 // Note that kernel threads never have disable_user_jop set
380 if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread())) {
381 if (thread->machine.disable_user_jop || !thread_is_64bit_addr(thread)) {
382 ts64->flags = __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH;
383 return KERN_SUCCESS;
384 }
385 // A JOP-disabled process must not set thread state on a JOP-enabled process
386 return KERN_PROTECTION_FAILURE;
387 }
388
389 if (ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH) {
390 if (thread->machine.disable_user_jop || !thread_is_64bit_addr(thread) ||
391 (BootArgs->bootFlags & kBootFlagsDisableUserThreadStateJOP)) {
392 return KERN_SUCCESS;
393 }
394 // Disallow setting unsigned thread state on JOP-enabled processes.
395 // Ignore flag and treat thread state arguments as signed, ptrauth
396 // poisoning will cause resulting thread state to be invalid
397 ts64->flags &= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH;
398 }
399
400 if (ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR) {
401 // lr might contain an IB-signed return address (strip is a no-op on unsigned addresses)
402 uintptr_t stripped_lr = (uintptr_t)ptrauth_strip((void *)ts64->lr,
403 ptrauth_key_return_address);
404 if (ts64->lr == stripped_lr) {
405 // Don't allow unsigned pointer to be passed through as is. Ignore flag and
406 // treat as IA-signed below (where auth failure may poison the value).
407 ts64->flags &= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR;
408 }
409 // Note that an IB-signed return address that happens to have a 0 signature value
410 // will also have been IA-signed (without this flag being set) and so will IA-auth
411 // correctly below.
412 }
413
414 if (BootArgs->bootFlags & kBootFlagsDisableUserJOP) {
415 return KERN_SUCCESS;
416 }
417
418 if (ts64->pc) {
419 ts64->pc = (uintptr_t)pmap_auth_user_ptr((void*)ts64->pc,
420 ptrauth_key_process_independent_code, ptrauth_string_discriminator("pc"));
421 }
422 if (ts64->lr && !(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR)) {
423 ts64->lr = (uintptr_t)pmap_auth_user_ptr((void*)ts64->lr,
424 ptrauth_key_process_independent_code, ptrauth_string_discriminator("lr"));
425 }
426 if (ts64->sp) {
427 ts64->sp = (uintptr_t)pmap_auth_user_ptr((void*)ts64->sp,
428 ptrauth_key_process_independent_data, ptrauth_string_discriminator("sp"));
429 }
430 if (ts64->fp) {
431 ts64->fp = (uintptr_t)pmap_auth_user_ptr((void*)ts64->fp,
432 ptrauth_key_process_independent_data, ptrauth_string_discriminator("fp"));
433 }
434
435 return KERN_SUCCESS;
436 #else
437 // No conversion from userspace representation on this platform
438 (void)thread; (void)flavor; (void)tstate; (void)count;
439 return KERN_SUCCESS;
440 #endif /* __has_feature(ptrauth_calls) */
441 }
442
443 /*
444 * Translate signal context data pointer to userspace representation
445 */
446
447 kern_return_t
448 machine_thread_siguctx_pointer_convert_to_user(
449 __assert_only thread_t thread,
450 user_addr_t *uctxp)
451 {
452 #if __has_feature(ptrauth_calls)
453 if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread())) {
454 assert(thread->machine.disable_user_jop || !thread_is_64bit_addr(thread));
455 return KERN_SUCCESS;
456 }
457
458 if (BootArgs->bootFlags & kBootFlagsDisableUserJOP) {
459 return KERN_SUCCESS;
460 }
461
462 if (*uctxp) {
463 *uctxp = (uintptr_t)pmap_sign_user_ptr((void*)*uctxp,
464 ptrauth_key_process_independent_data, ptrauth_string_discriminator("uctx"));
465 }
466
467 return KERN_SUCCESS;
468 #else
469 // No conversion to userspace representation on this platform
470 (void)thread; (void)uctxp;
471 return KERN_SUCCESS;
472 #endif /* __has_feature(ptrauth_calls) */
473 }
474
475 /*
476 * Translate array of function pointer syscall arguments from userspace representation
477 */
478
479 kern_return_t
480 machine_thread_function_pointers_convert_from_user(
481 __assert_only thread_t thread,
482 user_addr_t *fptrs,
483 uint32_t count)
484 {
485 #if __has_feature(ptrauth_calls)
486 if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread())) {
487 assert(thread->machine.disable_user_jop || !thread_is_64bit_addr(thread));
488 return KERN_SUCCESS;
489 }
490
491 if (BootArgs->bootFlags & kBootFlagsDisableUserJOP) {
492 return KERN_SUCCESS;
493 }
494
495 while (count--) {
496 if (*fptrs) {
497 *fptrs = (uintptr_t)pmap_auth_user_ptr((void*)*fptrs,
498 ptrauth_key_function_pointer, 0);
499 }
500 fptrs++;
501 }
502
503 return KERN_SUCCESS;
504 #else
505 // No conversion from userspace representation on this platform
506 (void)thread; (void)fptrs; (void)count;
507 return KERN_SUCCESS;
508 #endif /* __has_feature(ptrauth_calls) */
509 }
510
511 /*
512 * Routine: machine_thread_get_state
513 *
514 */
515 kern_return_t
516 machine_thread_get_state(thread_t thread,
517 thread_flavor_t flavor,
518 thread_state_t tstate,
519 mach_msg_type_number_t * count)
520 {
521 switch (flavor) {
522 case THREAD_STATE_FLAVOR_LIST:
523 if (*count < 4) {
524 return KERN_INVALID_ARGUMENT;
525 }
526
527 tstate[0] = ARM_THREAD_STATE;
528 tstate[1] = ARM_VFP_STATE;
529 tstate[2] = ARM_EXCEPTION_STATE;
530 tstate[3] = ARM_DEBUG_STATE;
531 *count = 4;
532 break;
533
534 case THREAD_STATE_FLAVOR_LIST_NEW:
535 if (*count < 4) {
536 return KERN_INVALID_ARGUMENT;
537 }
538
539 tstate[0] = ARM_THREAD_STATE;
540 tstate[1] = ARM_VFP_STATE;
541 tstate[2] = thread_is_64bit_data(thread) ? ARM_EXCEPTION_STATE64 : ARM_EXCEPTION_STATE;
542 tstate[3] = thread_is_64bit_data(thread) ? ARM_DEBUG_STATE64 : ARM_DEBUG_STATE32;
543 *count = 4;
544 break;
545
546 case THREAD_STATE_FLAVOR_LIST_10_15:
547 if (*count < 5) {
548 return KERN_INVALID_ARGUMENT;
549 }
550
551 tstate[0] = ARM_THREAD_STATE;
552 tstate[1] = ARM_VFP_STATE;
553 tstate[2] = thread_is_64bit_data(thread) ? ARM_EXCEPTION_STATE64 : ARM_EXCEPTION_STATE;
554 tstate[3] = thread_is_64bit_data(thread) ? ARM_DEBUG_STATE64 : ARM_DEBUG_STATE32;
555 tstate[4] = ARM_PAGEIN_STATE;
556 *count = 5;
557 break;
558
559 case ARM_THREAD_STATE:
560 {
561 kern_return_t rn = handle_get_arm_thread_state(tstate, count, thread->machine.upcb);
562 if (rn) {
563 return rn;
564 }
565 break;
566 }
567 case ARM_THREAD_STATE32:
568 {
569 if (thread_is_64bit_data(thread)) {
570 return KERN_INVALID_ARGUMENT;
571 }
572
573 kern_return_t rn = handle_get_arm32_thread_state(tstate, count, thread->machine.upcb);
574 if (rn) {
575 return rn;
576 }
577 break;
578 }
579 #if __arm64__
580 case ARM_THREAD_STATE64:
581 {
582 if (!thread_is_64bit_data(thread)) {
583 return KERN_INVALID_ARGUMENT;
584 }
585
586 kern_return_t rn = handle_get_arm64_thread_state(tstate, count, thread->machine.upcb);
587 if (rn) {
588 return rn;
589 }
590 break;
591 }
592 #endif
593 case ARM_EXCEPTION_STATE:{
594 struct arm_exception_state *state;
595 struct arm_saved_state32 *saved_state;
596
597 if (*count < ARM_EXCEPTION_STATE_COUNT) {
598 return KERN_INVALID_ARGUMENT;
599 }
600 if (thread_is_64bit_data(thread)) {
601 return KERN_INVALID_ARGUMENT;
602 }
603
604 state = (struct arm_exception_state *) tstate;
605 saved_state = saved_state32(thread->machine.upcb);
606
607 state->exception = saved_state->exception;
608 state->fsr = saved_state->esr;
609 state->far = saved_state->far;
610
611 *count = ARM_EXCEPTION_STATE_COUNT;
612 break;
613 }
614 case ARM_EXCEPTION_STATE64:{
615 struct arm_exception_state64 *state;
616 struct arm_saved_state64 *saved_state;
617
618 if (*count < ARM_EXCEPTION_STATE64_COUNT) {
619 return KERN_INVALID_ARGUMENT;
620 }
621 if (!thread_is_64bit_data(thread)) {
622 return KERN_INVALID_ARGUMENT;
623 }
624
625 state = (struct arm_exception_state64 *) tstate;
626 saved_state = saved_state64(thread->machine.upcb);
627
628 state->exception = saved_state->exception;
629 state->far = saved_state->far;
630 state->esr = saved_state->esr;
631
632 *count = ARM_EXCEPTION_STATE64_COUNT;
633 break;
634 }
635 case ARM_DEBUG_STATE:{
636 arm_legacy_debug_state_t *state;
637 arm_debug_state32_t *thread_state;
638
639 if (*count < ARM_LEGACY_DEBUG_STATE_COUNT) {
640 return KERN_INVALID_ARGUMENT;
641 }
642
643 if (thread_is_64bit_data(thread)) {
644 return KERN_INVALID_ARGUMENT;
645 }
646
647 state = (arm_legacy_debug_state_t *) tstate;
648 thread_state = find_debug_state32(thread);
649
650 if (thread_state == NULL) {
651 bzero(state, sizeof(arm_legacy_debug_state_t));
652 } else {
653 bcopy(thread_state, state, sizeof(arm_legacy_debug_state_t));
654 }
655
656 *count = ARM_LEGACY_DEBUG_STATE_COUNT;
657 break;
658 }
659 case ARM_DEBUG_STATE32:{
660 arm_debug_state32_t *state;
661 arm_debug_state32_t *thread_state;
662
663 if (*count < ARM_DEBUG_STATE32_COUNT) {
664 return KERN_INVALID_ARGUMENT;
665 }
666
667 if (thread_is_64bit_data(thread)) {
668 return KERN_INVALID_ARGUMENT;
669 }
670
671 state = (arm_debug_state32_t *) tstate;
672 thread_state = find_debug_state32(thread);
673
674 if (thread_state == NULL) {
675 bzero(state, sizeof(arm_debug_state32_t));
676 } else {
677 bcopy(thread_state, state, sizeof(arm_debug_state32_t));
678 }
679
680 *count = ARM_DEBUG_STATE32_COUNT;
681 break;
682 }
683
684 case ARM_DEBUG_STATE64:{
685 arm_debug_state64_t *state;
686 arm_debug_state64_t *thread_state;
687
688 if (*count < ARM_DEBUG_STATE64_COUNT) {
689 return KERN_INVALID_ARGUMENT;
690 }
691
692 if (!thread_is_64bit_data(thread)) {
693 return KERN_INVALID_ARGUMENT;
694 }
695
696 state = (arm_debug_state64_t *) tstate;
697 thread_state = find_debug_state64(thread);
698
699 if (thread_state == NULL) {
700 bzero(state, sizeof(arm_debug_state64_t));
701 } else {
702 bcopy(thread_state, state, sizeof(arm_debug_state64_t));
703 }
704
705 *count = ARM_DEBUG_STATE64_COUNT;
706 break;
707 }
708
709 case ARM_VFP_STATE:{
710 struct arm_vfp_state *state;
711 arm_neon_saved_state32_t *thread_state;
712 unsigned int max;
713
714 if (*count < ARM_VFP_STATE_COUNT) {
715 if (*count < ARM_VFPV2_STATE_COUNT) {
716 return KERN_INVALID_ARGUMENT;
717 } else {
718 *count = ARM_VFPV2_STATE_COUNT;
719 }
720 }
721
722 if (*count == ARM_VFPV2_STATE_COUNT) {
723 max = 32;
724 } else {
725 max = 64;
726 }
727
728 state = (struct arm_vfp_state *) tstate;
729 thread_state = neon_state32(thread->machine.uNeon);
730 /* ARM64 TODO: set fpsr and fpcr from state->fpscr */
731
732 bcopy(thread_state, state, (max + 1) * sizeof(uint32_t));
733 *count = (max + 1);
734 break;
735 }
736 case ARM_NEON_STATE:{
737 arm_neon_state_t *state;
738 arm_neon_saved_state32_t *thread_state;
739
740 if (*count < ARM_NEON_STATE_COUNT) {
741 return KERN_INVALID_ARGUMENT;
742 }
743
744 if (thread_is_64bit_data(thread)) {
745 return KERN_INVALID_ARGUMENT;
746 }
747
748 state = (arm_neon_state_t *)tstate;
749 thread_state = neon_state32(thread->machine.uNeon);
750
751 assert(sizeof(*thread_state) == sizeof(*state));
752 bcopy(thread_state, state, sizeof(arm_neon_state_t));
753
754 *count = ARM_NEON_STATE_COUNT;
755 break;
756 }
757
758 case ARM_NEON_STATE64:{
759 arm_neon_state64_t *state;
760 arm_neon_saved_state64_t *thread_state;
761
762 if (*count < ARM_NEON_STATE64_COUNT) {
763 return KERN_INVALID_ARGUMENT;
764 }
765
766 if (!thread_is_64bit_data(thread)) {
767 return KERN_INVALID_ARGUMENT;
768 }
769
770 state = (arm_neon_state64_t *)tstate;
771 thread_state = neon_state64(thread->machine.uNeon);
772
773 /* For now, these are identical */
774 assert(sizeof(*state) == sizeof(*thread_state));
775 bcopy(thread_state, state, sizeof(arm_neon_state64_t));
776
777 *count = ARM_NEON_STATE64_COUNT;
778 break;
779 }
780
781
782 case ARM_PAGEIN_STATE: {
783 arm_pagein_state_t *state;
784
785 if (*count < ARM_PAGEIN_STATE_COUNT) {
786 return KERN_INVALID_ARGUMENT;
787 }
788
789 state = (arm_pagein_state_t *)tstate;
790 state->__pagein_error = thread->t_pagein_error;
791
792 *count = ARM_PAGEIN_STATE_COUNT;
793 break;
794 }
795
796
797 default:
798 return KERN_INVALID_ARGUMENT;
799 }
800 return KERN_SUCCESS;
801 }
802
803
804 /*
805 * Routine: machine_thread_get_kern_state
806 *
807 */
808 kern_return_t
809 machine_thread_get_kern_state(thread_t thread,
810 thread_flavor_t flavor,
811 thread_state_t tstate,
812 mach_msg_type_number_t * count)
813 {
814 /*
815 * This works only for an interrupted kernel thread
816 */
817 if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) {
818 return KERN_FAILURE;
819 }
820
821 switch (flavor) {
822 case ARM_THREAD_STATE:
823 {
824 kern_return_t rn = handle_get_arm_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
825 if (rn) {
826 return rn;
827 }
828 break;
829 }
830 case ARM_THREAD_STATE32:
831 {
832 kern_return_t rn = handle_get_arm32_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
833 if (rn) {
834 return rn;
835 }
836 break;
837 }
838 #if __arm64__
839 case ARM_THREAD_STATE64:
840 {
841 kern_return_t rn = handle_get_arm64_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
842 if (rn) {
843 return rn;
844 }
845 break;
846 }
847 #endif
848 default:
849 return KERN_INVALID_ARGUMENT;
850 }
851 return KERN_SUCCESS;
852 }
853
854 void
855 machine_thread_switch_addrmode(thread_t thread)
856 {
857 if (task_has_64Bit_data(thread->task)) {
858 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE64;
859 thread->machine.upcb->ash.count = ARM_SAVED_STATE64_COUNT;
860 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
861 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
862
863 /*
864 * Reinitialize the NEON state.
865 */
866 bzero(&thread->machine.uNeon->uns, sizeof(thread->machine.uNeon->uns));
867 thread->machine.uNeon->ns_64.fpcr = FPCR_DEFAULT;
868 } else {
869 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE32;
870 thread->machine.upcb->ash.count = ARM_SAVED_STATE32_COUNT;
871 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
872 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
873
874 /*
875 * Reinitialize the NEON state.
876 */
877 bzero(&thread->machine.uNeon->uns, sizeof(thread->machine.uNeon->uns));
878 thread->machine.uNeon->ns_32.fpcr = FPCR_DEFAULT_32;
879 }
880 }
881
882 extern long long arm_debug_get(void);
883
884 /*
885 * Routine: machine_thread_set_state
886 *
887 */
888 kern_return_t
889 machine_thread_set_state(thread_t thread,
890 thread_flavor_t flavor,
891 thread_state_t tstate,
892 mach_msg_type_number_t count)
893 {
894 kern_return_t rn;
895
896 switch (flavor) {
897 case ARM_THREAD_STATE:
898 rn = handle_set_arm_thread_state(tstate, count, thread->machine.upcb);
899 if (rn) {
900 return rn;
901 }
902 break;
903
904 case ARM_THREAD_STATE32:
905 if (thread_is_64bit_data(thread)) {
906 return KERN_INVALID_ARGUMENT;
907 }
908
909 rn = handle_set_arm32_thread_state(tstate, count, thread->machine.upcb);
910 if (rn) {
911 return rn;
912 }
913 break;
914
915 #if __arm64__
916 case ARM_THREAD_STATE64:
917 if (!thread_is_64bit_data(thread)) {
918 return KERN_INVALID_ARGUMENT;
919 }
920
921 rn = handle_set_arm64_thread_state(tstate, count, thread->machine.upcb);
922 if (rn) {
923 return rn;
924 }
925 break;
926 #endif
927 case ARM_EXCEPTION_STATE:{
928 if (count != ARM_EXCEPTION_STATE_COUNT) {
929 return KERN_INVALID_ARGUMENT;
930 }
931 if (thread_is_64bit_data(thread)) {
932 return KERN_INVALID_ARGUMENT;
933 }
934
935 break;
936 }
937 case ARM_EXCEPTION_STATE64:{
938 if (count != ARM_EXCEPTION_STATE64_COUNT) {
939 return KERN_INVALID_ARGUMENT;
940 }
941 if (!thread_is_64bit_data(thread)) {
942 return KERN_INVALID_ARGUMENT;
943 }
944
945 break;
946 }
947 case ARM_DEBUG_STATE:
948 {
949 arm_legacy_debug_state_t *state;
950 boolean_t enabled = FALSE;
951 unsigned int i;
952
953 if (count != ARM_LEGACY_DEBUG_STATE_COUNT) {
954 return KERN_INVALID_ARGUMENT;
955 }
956 if (thread_is_64bit_data(thread)) {
957 return KERN_INVALID_ARGUMENT;
958 }
959
960 state = (arm_legacy_debug_state_t *) tstate;
961
962 for (i = 0; i < 16; i++) {
963 /* do not allow context IDs to be set */
964 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
965 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
966 || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
967 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
968 return KERN_PROTECTION_FAILURE;
969 }
970 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
971 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
972 enabled = TRUE;
973 }
974 }
975
976 if (!enabled) {
977 arm_debug_state32_t *thread_state = find_debug_state32(thread);
978 if (thread_state != NULL) {
979 void *pTmp = thread->machine.DebugData;
980 thread->machine.DebugData = NULL;
981 zfree(ads_zone, pTmp);
982 }
983 } else {
984 arm_debug_state32_t *thread_state = find_debug_state32(thread);
985 if (thread_state == NULL) {
986 thread->machine.DebugData = zalloc(ads_zone);
987 bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
988 thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE32;
989 thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE32_COUNT;
990 thread_state = find_debug_state32(thread);
991 }
992 assert(NULL != thread_state);
993
994 for (i = 0; i < 16; i++) {
995 /* set appropriate privilege; mask out unknown bits */
996 thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
997 | ARM_DBGBCR_MATCH_MASK
998 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
999 | ARM_DBG_CR_ENABLE_MASK))
1000 | ARM_DBGBCR_TYPE_IVA
1001 | ARM_DBG_CR_LINKED_UNLINKED
1002 | ARM_DBG_CR_SECURITY_STATE_BOTH
1003 | ARM_DBG_CR_MODE_CONTROL_USER;
1004 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1005 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1006 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1007 | ARM_DBGWCR_ACCESS_CONTROL_MASK
1008 | ARM_DBG_CR_ENABLE_MASK))
1009 | ARM_DBG_CR_LINKED_UNLINKED
1010 | ARM_DBG_CR_SECURITY_STATE_BOTH
1011 | ARM_DBG_CR_MODE_CONTROL_USER;
1012 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1013 }
1014
1015 thread_state->mdscr_el1 = 0ULL; // Legacy customers issuing ARM_DEBUG_STATE dont drive single stepping.
1016 }
1017
1018 if (thread == current_thread()) {
1019 arm_debug_set32(thread->machine.DebugData);
1020 }
1021
1022 break;
1023 }
1024 case ARM_DEBUG_STATE32:
1025 /* ARM64_TODO subtle bcr/wcr semantic differences e.g. wcr and ARM_DBGBCR_TYPE_IVA */
1026 {
1027 arm_debug_state32_t *state;
1028 boolean_t enabled = FALSE;
1029 unsigned int i;
1030
1031 if (count != ARM_DEBUG_STATE32_COUNT) {
1032 return KERN_INVALID_ARGUMENT;
1033 }
1034 if (thread_is_64bit_data(thread)) {
1035 return KERN_INVALID_ARGUMENT;
1036 }
1037
1038 state = (arm_debug_state32_t *) tstate;
1039
1040 if (state->mdscr_el1 & 0x1) {
1041 enabled = TRUE;
1042 }
1043
1044 for (i = 0; i < 16; i++) {
1045 /* do not allow context IDs to be set */
1046 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1047 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
1048 || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1049 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
1050 return KERN_PROTECTION_FAILURE;
1051 }
1052 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
1053 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
1054 enabled = TRUE;
1055 }
1056 }
1057
1058 if (!enabled) {
1059 arm_debug_state32_t *thread_state = find_debug_state32(thread);
1060 if (thread_state != NULL) {
1061 void *pTmp = thread->machine.DebugData;
1062 thread->machine.DebugData = NULL;
1063 zfree(ads_zone, pTmp);
1064 }
1065 } else {
1066 arm_debug_state32_t *thread_state = find_debug_state32(thread);
1067 if (thread_state == NULL) {
1068 thread->machine.DebugData = zalloc(ads_zone);
1069 bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
1070 thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE32;
1071 thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE32_COUNT;
1072 thread_state = find_debug_state32(thread);
1073 }
1074 assert(NULL != thread_state);
1075
1076 if (state->mdscr_el1 & 0x1) {
1077 thread_state->mdscr_el1 |= 0x1;
1078 } else {
1079 thread_state->mdscr_el1 &= ~0x1;
1080 }
1081
1082 for (i = 0; i < 16; i++) {
1083 /* set appropriate privilege; mask out unknown bits */
1084 thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1085 | ARM_DBGBCR_MATCH_MASK
1086 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1087 | ARM_DBG_CR_ENABLE_MASK))
1088 | ARM_DBGBCR_TYPE_IVA
1089 | ARM_DBG_CR_LINKED_UNLINKED
1090 | ARM_DBG_CR_SECURITY_STATE_BOTH
1091 | ARM_DBG_CR_MODE_CONTROL_USER;
1092 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1093 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1094 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1095 | ARM_DBGWCR_ACCESS_CONTROL_MASK
1096 | ARM_DBG_CR_ENABLE_MASK))
1097 | ARM_DBG_CR_LINKED_UNLINKED
1098 | ARM_DBG_CR_SECURITY_STATE_BOTH
1099 | ARM_DBG_CR_MODE_CONTROL_USER;
1100 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1101 }
1102 }
1103
1104 if (thread == current_thread()) {
1105 arm_debug_set32(thread->machine.DebugData);
1106 }
1107
1108 break;
1109 }
1110
1111 case ARM_DEBUG_STATE64:
1112 {
1113 arm_debug_state64_t *state;
1114 boolean_t enabled = FALSE;
1115 unsigned int i;
1116
1117 if (count != ARM_DEBUG_STATE64_COUNT) {
1118 return KERN_INVALID_ARGUMENT;
1119 }
1120 if (!thread_is_64bit_data(thread)) {
1121 return KERN_INVALID_ARGUMENT;
1122 }
1123
1124 state = (arm_debug_state64_t *) tstate;
1125
1126 if (state->mdscr_el1 & 0x1) {
1127 enabled = TRUE;
1128 }
1129
1130 for (i = 0; i < 16; i++) {
1131 /* do not allow context IDs to be set */
1132 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1133 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
1134 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
1135 return KERN_PROTECTION_FAILURE;
1136 }
1137 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
1138 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
1139 enabled = TRUE;
1140 }
1141 }
1142
1143 if (!enabled) {
1144 arm_debug_state64_t *thread_state = find_debug_state64(thread);
1145 if (thread_state != NULL) {
1146 void *pTmp = thread->machine.DebugData;
1147 thread->machine.DebugData = NULL;
1148 zfree(ads_zone, pTmp);
1149 }
1150 } else {
1151 arm_debug_state64_t *thread_state = find_debug_state64(thread);
1152 if (thread_state == NULL) {
1153 thread->machine.DebugData = zalloc(ads_zone);
1154 bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
1155 thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE64;
1156 thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE64_COUNT;
1157 thread_state = find_debug_state64(thread);
1158 }
1159 assert(NULL != thread_state);
1160
1161 if (state->mdscr_el1 & 0x1) {
1162 thread_state->mdscr_el1 |= 0x1;
1163 } else {
1164 thread_state->mdscr_el1 &= ~0x1;
1165 }
1166
1167 for (i = 0; i < 16; i++) {
1168 /* set appropriate privilege; mask out unknown bits */
1169 thread_state->bcr[i] = (state->bcr[i] & (0 /* Was ARM_DBG_CR_ADDRESS_MASK_MASK deprecated in v8 */
1170 | 0 /* Was ARM_DBGBCR_MATCH_MASK, ignored in AArch64 state */
1171 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1172 | ARM_DBG_CR_ENABLE_MASK))
1173 | ARM_DBGBCR_TYPE_IVA
1174 | ARM_DBG_CR_LINKED_UNLINKED
1175 | ARM_DBG_CR_SECURITY_STATE_BOTH
1176 | ARM_DBG_CR_MODE_CONTROL_USER;
1177 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK64;
1178 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1179 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1180 | ARM_DBGWCR_ACCESS_CONTROL_MASK
1181 | ARM_DBG_CR_ENABLE_MASK))
1182 | ARM_DBG_CR_LINKED_UNLINKED
1183 | ARM_DBG_CR_SECURITY_STATE_BOTH
1184 | ARM_DBG_CR_MODE_CONTROL_USER;
1185 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK64;
1186 }
1187 }
1188
1189 if (thread == current_thread()) {
1190 arm_debug_set64(thread->machine.DebugData);
1191 }
1192
1193 break;
1194 }
1195
1196 case ARM_VFP_STATE:{
1197 struct arm_vfp_state *state;
1198 arm_neon_saved_state32_t *thread_state;
1199 unsigned int max;
1200
1201 if (count != ARM_VFP_STATE_COUNT && count != ARM_VFPV2_STATE_COUNT) {
1202 return KERN_INVALID_ARGUMENT;
1203 }
1204
1205 if (count == ARM_VFPV2_STATE_COUNT) {
1206 max = 32;
1207 } else {
1208 max = 64;
1209 }
1210
1211 state = (struct arm_vfp_state *) tstate;
1212 thread_state = neon_state32(thread->machine.uNeon);
1213 /* ARM64 TODO: combine fpsr and fpcr into state->fpscr */
1214
1215 bcopy(state, thread_state, (max + 1) * sizeof(uint32_t));
1216
1217 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
1218 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
1219 break;
1220 }
1221
1222 case ARM_NEON_STATE:{
1223 arm_neon_state_t *state;
1224 arm_neon_saved_state32_t *thread_state;
1225
1226 if (count != ARM_NEON_STATE_COUNT) {
1227 return KERN_INVALID_ARGUMENT;
1228 }
1229
1230 if (thread_is_64bit_data(thread)) {
1231 return KERN_INVALID_ARGUMENT;
1232 }
1233
1234 state = (arm_neon_state_t *)tstate;
1235 thread_state = neon_state32(thread->machine.uNeon);
1236
1237 assert(sizeof(*state) == sizeof(*thread_state));
1238 bcopy(state, thread_state, sizeof(arm_neon_state_t));
1239
1240 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
1241 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
1242 break;
1243 }
1244
1245 case ARM_NEON_STATE64:{
1246 arm_neon_state64_t *state;
1247 arm_neon_saved_state64_t *thread_state;
1248
1249 if (count != ARM_NEON_STATE64_COUNT) {
1250 return KERN_INVALID_ARGUMENT;
1251 }
1252
1253 if (!thread_is_64bit_data(thread)) {
1254 return KERN_INVALID_ARGUMENT;
1255 }
1256
1257 state = (arm_neon_state64_t *)tstate;
1258 thread_state = neon_state64(thread->machine.uNeon);
1259
1260 assert(sizeof(*state) == sizeof(*thread_state));
1261 bcopy(state, thread_state, sizeof(arm_neon_state64_t));
1262
1263 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
1264 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
1265 break;
1266 }
1267
1268
1269 default:
1270 return KERN_INVALID_ARGUMENT;
1271 }
1272 return KERN_SUCCESS;
1273 }
1274
1275 mach_vm_address_t
1276 machine_thread_pc(thread_t thread)
1277 {
1278 struct arm_saved_state *ss = get_user_regs(thread);
1279 return (mach_vm_address_t)get_saved_state_pc(ss);
1280 }
1281
1282 void
1283 machine_thread_reset_pc(thread_t thread, mach_vm_address_t pc)
1284 {
1285 set_saved_state_pc(get_user_regs(thread), (register_t)pc);
1286 }
1287
1288 /*
1289 * Routine: machine_thread_state_initialize
1290 *
1291 */
1292 kern_return_t
1293 machine_thread_state_initialize(thread_t thread)
1294 {
1295 arm_context_t *context = thread->machine.contextData;
1296
1297 /*
1298 * Should always be set up later. For a kernel thread, we don't care
1299 * about this state. For a user thread, we'll set the state up in
1300 * setup_wqthread, bsdthread_create, load_main(), or load_unixthread().
1301 */
1302
1303 if (context != NULL) {
1304 bzero(&context->ss.uss, sizeof(context->ss.uss));
1305 bzero(&context->ns.uns, sizeof(context->ns.uns));
1306
1307 if (context->ns.nsh.flavor == ARM_NEON_SAVED_STATE64) {
1308 context->ns.ns_64.fpcr = FPCR_DEFAULT;
1309 } else {
1310 context->ns.ns_32.fpcr = FPCR_DEFAULT_32;
1311 }
1312 }
1313
1314 thread->machine.DebugData = NULL;
1315
1316 #if defined(HAS_APPLE_PAC)
1317 /* Sign the initial user-space thread state */
1318 if (thread->machine.upcb != NULL) {
1319 ml_sign_thread_state(thread->machine.upcb, 0, 0, 0, 0, 0);
1320 }
1321 #endif /* defined(HAS_APPLE_PAC) */
1322
1323 return KERN_SUCCESS;
1324 }
1325
1326 /*
1327 * Routine: machine_thread_dup
1328 *
1329 */
1330 kern_return_t
1331 machine_thread_dup(thread_t self,
1332 thread_t target,
1333 __unused boolean_t is_corpse)
1334 {
1335 struct arm_saved_state *self_saved_state;
1336 struct arm_saved_state *target_saved_state;
1337
1338 target->machine.cthread_self = self->machine.cthread_self;
1339 target->machine.cthread_data = self->machine.cthread_data;
1340
1341 self_saved_state = self->machine.upcb;
1342 target_saved_state = target->machine.upcb;
1343 bcopy(self_saved_state, target_saved_state, sizeof(struct arm_saved_state));
1344 #if defined(HAS_APPLE_PAC)
1345 if (!is_corpse && is_saved_state64(self_saved_state)) {
1346 check_and_sign_copied_thread_state(target_saved_state, self_saved_state);
1347 }
1348 #endif /* defined(HAS_APPLE_PAC) */
1349
1350 return KERN_SUCCESS;
1351 }
1352
1353 /*
1354 * Routine: get_user_regs
1355 *
1356 */
1357 struct arm_saved_state *
1358 get_user_regs(thread_t thread)
1359 {
1360 return thread->machine.upcb;
1361 }
1362
1363 arm_neon_saved_state_t *
1364 get_user_neon_regs(thread_t thread)
1365 {
1366 return thread->machine.uNeon;
1367 }
1368
1369 /*
1370 * Routine: find_user_regs
1371 *
1372 */
1373 struct arm_saved_state *
1374 find_user_regs(thread_t thread)
1375 {
1376 return thread->machine.upcb;
1377 }
1378
1379 /*
1380 * Routine: find_kern_regs
1381 *
1382 */
1383 struct arm_saved_state *
1384 find_kern_regs(thread_t thread)
1385 {
1386 /*
1387 * This works only for an interrupted kernel thread
1388 */
1389 if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) {
1390 return (struct arm_saved_state *) NULL;
1391 } else {
1392 return getCpuDatap()->cpu_int_state;
1393 }
1394 }
1395
1396 arm_debug_state32_t *
1397 find_debug_state32(thread_t thread)
1398 {
1399 if (thread && thread->machine.DebugData) {
1400 return &(thread->machine.DebugData->uds.ds32);
1401 } else {
1402 return NULL;
1403 }
1404 }
1405
1406 arm_debug_state64_t *
1407 find_debug_state64(thread_t thread)
1408 {
1409 if (thread && thread->machine.DebugData) {
1410 return &(thread->machine.DebugData->uds.ds64);
1411 } else {
1412 return NULL;
1413 }
1414 }
1415
1416 /*
1417 * Routine: thread_userstack
1418 *
1419 */
1420 kern_return_t
1421 thread_userstack(__unused thread_t thread,
1422 int flavor,
1423 thread_state_t tstate,
1424 unsigned int count,
1425 mach_vm_offset_t * user_stack,
1426 int * customstack,
1427 boolean_t is_64bit_data
1428 )
1429 {
1430 register_t sp;
1431
1432 switch (flavor) {
1433 case ARM_THREAD_STATE:
1434 if (count == ARM_UNIFIED_THREAD_STATE_COUNT) {
1435 #if __arm64__
1436 if (is_64bit_data) {
1437 sp = ((arm_unified_thread_state_t *)tstate)->ts_64.sp;
1438 } else
1439 #endif
1440 {
1441 sp = ((arm_unified_thread_state_t *)tstate)->ts_32.sp;
1442 }
1443
1444 break;
1445 }
1446
1447 /* INTENTIONAL FALL THROUGH (see machine_thread_set_state) */
1448 case ARM_THREAD_STATE32:
1449 if (count != ARM_THREAD_STATE32_COUNT) {
1450 return KERN_INVALID_ARGUMENT;
1451 }
1452 if (is_64bit_data) {
1453 return KERN_INVALID_ARGUMENT;
1454 }
1455
1456 sp = ((arm_thread_state32_t *)tstate)->sp;
1457 break;
1458 #if __arm64__
1459 case ARM_THREAD_STATE64:
1460 if (count != ARM_THREAD_STATE64_COUNT) {
1461 return KERN_INVALID_ARGUMENT;
1462 }
1463 if (!is_64bit_data) {
1464 return KERN_INVALID_ARGUMENT;
1465 }
1466
1467 sp = ((arm_thread_state32_t *)tstate)->sp;
1468 break;
1469 #endif
1470 default:
1471 return KERN_INVALID_ARGUMENT;
1472 }
1473
1474 if (sp) {
1475 *user_stack = CAST_USER_ADDR_T(sp);
1476 if (customstack) {
1477 *customstack = 1;
1478 }
1479 } else {
1480 *user_stack = CAST_USER_ADDR_T(USRSTACK64);
1481 if (customstack) {
1482 *customstack = 0;
1483 }
1484 }
1485
1486 return KERN_SUCCESS;
1487 }
1488
1489 /*
1490 * thread_userstackdefault:
1491 *
1492 * Return the default stack location for the
1493 * thread, if otherwise unknown.
1494 */
1495 kern_return_t
1496 thread_userstackdefault(mach_vm_offset_t * default_user_stack,
1497 boolean_t is64bit)
1498 {
1499 if (is64bit) {
1500 *default_user_stack = USRSTACK64;
1501 } else {
1502 *default_user_stack = USRSTACK;
1503 }
1504
1505 return KERN_SUCCESS;
1506 }
1507
1508 /*
1509 * Routine: thread_setuserstack
1510 *
1511 */
1512 void
1513 thread_setuserstack(thread_t thread,
1514 mach_vm_address_t user_stack)
1515 {
1516 struct arm_saved_state *sv;
1517
1518 sv = get_user_regs(thread);
1519
1520 set_saved_state_sp(sv, user_stack);
1521
1522 return;
1523 }
1524
1525 /*
1526 * Routine: thread_adjuserstack
1527 *
1528 */
1529 uint64_t
1530 thread_adjuserstack(thread_t thread,
1531 int adjust)
1532 {
1533 struct arm_saved_state *sv;
1534 uint64_t sp;
1535
1536 sv = get_user_regs(thread);
1537
1538 sp = get_saved_state_sp(sv);
1539 sp += adjust;
1540 set_saved_state_sp(sv, sp);;
1541
1542 return sp;
1543 }
1544
1545 /*
1546 * Routine: thread_setentrypoint
1547 *
1548 */
1549 void
1550 thread_setentrypoint(thread_t thread,
1551 mach_vm_offset_t entry)
1552 {
1553 struct arm_saved_state *sv;
1554
1555 sv = get_user_regs(thread);
1556
1557 set_saved_state_pc(sv, entry);
1558
1559 return;
1560 }
1561
1562 /*
1563 * Routine: thread_entrypoint
1564 *
1565 */
1566 kern_return_t
1567 thread_entrypoint(__unused thread_t thread,
1568 int flavor,
1569 thread_state_t tstate,
1570 unsigned int count __unused,
1571 mach_vm_offset_t * entry_point
1572 )
1573 {
1574 switch (flavor) {
1575 case ARM_THREAD_STATE:
1576 {
1577 struct arm_thread_state *state;
1578
1579 state = (struct arm_thread_state *) tstate;
1580
1581 /*
1582 * If a valid entry point is specified, use it.
1583 */
1584 if (state->pc) {
1585 *entry_point = CAST_USER_ADDR_T(state->pc);
1586 } else {
1587 *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
1588 }
1589 }
1590 break;
1591
1592 case ARM_THREAD_STATE64:
1593 {
1594 struct arm_thread_state64 *state;
1595
1596 state = (struct arm_thread_state64*) tstate;
1597
1598 /*
1599 * If a valid entry point is specified, use it.
1600 */
1601 if (state->pc) {
1602 *entry_point = CAST_USER_ADDR_T(state->pc);
1603 } else {
1604 *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
1605 }
1606
1607 break;
1608 }
1609 default:
1610 return KERN_INVALID_ARGUMENT;
1611 }
1612
1613 return KERN_SUCCESS;
1614 }
1615
1616
1617 /*
1618 * Routine: thread_set_child
1619 *
1620 */
1621 void
1622 thread_set_child(thread_t child,
1623 int pid)
1624 {
1625 struct arm_saved_state *child_state;
1626
1627 child_state = get_user_regs(child);
1628
1629 set_saved_state_reg(child_state, 0, pid);
1630 set_saved_state_reg(child_state, 1, 1ULL);
1631 }
1632
1633
1634 /*
1635 * Routine: thread_set_parent
1636 *
1637 */
1638 void
1639 thread_set_parent(thread_t parent,
1640 int pid)
1641 {
1642 struct arm_saved_state *parent_state;
1643
1644 parent_state = get_user_regs(parent);
1645
1646 set_saved_state_reg(parent_state, 0, pid);
1647 set_saved_state_reg(parent_state, 1, 0);
1648 }
1649
1650
1651 struct arm_act_context {
1652 struct arm_unified_thread_state ss;
1653 #if __ARM_VFP__
1654 struct arm_neon_saved_state ns;
1655 #endif
1656 };
1657
1658 /*
1659 * Routine: act_thread_csave
1660 *
1661 */
1662 void *
1663 act_thread_csave(void)
1664 {
1665 struct arm_act_context *ic;
1666 kern_return_t kret;
1667 unsigned int val;
1668 thread_t thread = current_thread();
1669
1670 ic = (struct arm_act_context *) kalloc(sizeof(struct arm_act_context));
1671 if (ic == (struct arm_act_context *) NULL) {
1672 return (void *) 0;
1673 }
1674
1675 val = ARM_UNIFIED_THREAD_STATE_COUNT;
1676 kret = machine_thread_get_state(thread, ARM_THREAD_STATE, (thread_state_t)&ic->ss, &val);
1677 if (kret != KERN_SUCCESS) {
1678 kfree(ic, sizeof(struct arm_act_context));
1679 return (void *) 0;
1680 }
1681
1682 #if __ARM_VFP__
1683 if (thread_is_64bit_data(thread)) {
1684 val = ARM_NEON_STATE64_COUNT;
1685 kret = machine_thread_get_state(thread,
1686 ARM_NEON_STATE64,
1687 (thread_state_t)&ic->ns,
1688 &val);
1689 } else {
1690 val = ARM_NEON_STATE_COUNT;
1691 kret = machine_thread_get_state(thread,
1692 ARM_NEON_STATE,
1693 (thread_state_t)&ic->ns,
1694 &val);
1695 }
1696 if (kret != KERN_SUCCESS) {
1697 kfree(ic, sizeof(struct arm_act_context));
1698 return (void *) 0;
1699 }
1700 #endif
1701 return ic;
1702 }
1703
1704 /*
1705 * Routine: act_thread_catt
1706 *
1707 */
1708 void
1709 act_thread_catt(void * ctx)
1710 {
1711 struct arm_act_context *ic;
1712 kern_return_t kret;
1713 thread_t thread = current_thread();
1714
1715 ic = (struct arm_act_context *) ctx;
1716 if (ic == (struct arm_act_context *) NULL) {
1717 return;
1718 }
1719
1720 kret = machine_thread_set_state(thread, ARM_THREAD_STATE, (thread_state_t)&ic->ss, ARM_UNIFIED_THREAD_STATE_COUNT);
1721 if (kret != KERN_SUCCESS) {
1722 goto out;
1723 }
1724
1725 #if __ARM_VFP__
1726 if (thread_is_64bit_data(thread)) {
1727 kret = machine_thread_set_state(thread,
1728 ARM_NEON_STATE64,
1729 (thread_state_t)&ic->ns,
1730 ARM_NEON_STATE64_COUNT);
1731 } else {
1732 kret = machine_thread_set_state(thread,
1733 ARM_NEON_STATE,
1734 (thread_state_t)&ic->ns,
1735 ARM_NEON_STATE_COUNT);
1736 }
1737 if (kret != KERN_SUCCESS) {
1738 goto out;
1739 }
1740 #endif
1741 out:
1742 kfree(ic, sizeof(struct arm_act_context));
1743 }
1744
1745 /*
1746 * Routine: act_thread_catt
1747 *
1748 */
1749 void
1750 act_thread_cfree(void *ctx)
1751 {
1752 kfree(ctx, sizeof(struct arm_act_context));
1753 }
1754
1755 kern_return_t
1756 thread_set_wq_state32(thread_t thread,
1757 thread_state_t tstate)
1758 {
1759 arm_thread_state_t *state;
1760 struct arm_saved_state *saved_state;
1761 struct arm_saved_state32 *saved_state_32;
1762 thread_t curth = current_thread();
1763 spl_t s = 0;
1764
1765 assert(!thread_is_64bit_data(thread));
1766
1767 saved_state = thread->machine.upcb;
1768 saved_state_32 = saved_state32(saved_state);
1769
1770 state = (arm_thread_state_t *)tstate;
1771
1772 if (curth != thread) {
1773 s = splsched();
1774 thread_lock(thread);
1775 }
1776
1777 /*
1778 * do not zero saved_state, it can be concurrently accessed
1779 * and zero is not a valid state for some of the registers,
1780 * like sp.
1781 */
1782 thread_state32_to_saved_state(state, saved_state);
1783 saved_state_32->cpsr = PSR64_USER32_DEFAULT;
1784
1785 if (curth != thread) {
1786 thread_unlock(thread);
1787 splx(s);
1788 }
1789
1790 return KERN_SUCCESS;
1791 }
1792
1793 kern_return_t
1794 thread_set_wq_state64(thread_t thread,
1795 thread_state_t tstate)
1796 {
1797 arm_thread_state64_t *state;
1798 struct arm_saved_state *saved_state;
1799 struct arm_saved_state64 *saved_state_64;
1800 thread_t curth = current_thread();
1801 spl_t s = 0;
1802
1803 assert(thread_is_64bit_data(thread));
1804
1805 saved_state = thread->machine.upcb;
1806 saved_state_64 = saved_state64(saved_state);
1807 state = (arm_thread_state64_t *)tstate;
1808
1809 if (curth != thread) {
1810 s = splsched();
1811 thread_lock(thread);
1812 }
1813
1814 /*
1815 * do not zero saved_state, it can be concurrently accessed
1816 * and zero is not a valid state for some of the registers,
1817 * like sp.
1818 */
1819 thread_state64_to_saved_state(state, saved_state);
1820 set_saved_state_cpsr(saved_state, PSR64_USER64_DEFAULT);
1821
1822 if (curth != thread) {
1823 thread_unlock(thread);
1824 splx(s);
1825 }
1826
1827 return KERN_SUCCESS;
1828 }