]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/status.c
xnu-6153.41.3.tar.gz
[apple/xnu.git] / osfmk / arm64 / status.c
1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <debug.h>
29 #include <mach/mach_types.h>
30 #include <mach/kern_return.h>
31 #include <mach/thread_status.h>
32 #include <kern/thread.h>
33 #include <kern/kalloc.h>
34 #include <arm/vmparam.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm64/proc_reg.h>
37 #if __has_feature(ptrauth_calls)
38 #include <ptrauth.h>
39 #endif
40
41 struct arm_vfpv2_state {
42 __uint32_t __r[32];
43 __uint32_t __fpscr;
44 };
45
46 typedef struct arm_vfpv2_state arm_vfpv2_state_t;
47
48 #define ARM_VFPV2_STATE_COUNT \
49 ((mach_msg_type_number_t)(sizeof (arm_vfpv2_state_t)/sizeof(uint32_t)))
50
51 /*
52 * Forward definitions
53 */
54 void thread_set_child(thread_t child, int pid);
55 void thread_set_parent(thread_t parent, int pid);
56
57 /*
58 * Maps state flavor to number of words in the state:
59 */
60 /* __private_extern__ */
61 unsigned int _MachineStateCount[] = {
62 [ARM_UNIFIED_THREAD_STATE] = ARM_UNIFIED_THREAD_STATE_COUNT,
63 [ARM_VFP_STATE] = ARM_VFP_STATE_COUNT,
64 [ARM_EXCEPTION_STATE] = ARM_EXCEPTION_STATE_COUNT,
65 [ARM_DEBUG_STATE] = ARM_DEBUG_STATE_COUNT,
66 [ARM_THREAD_STATE64] = ARM_THREAD_STATE64_COUNT,
67 [ARM_EXCEPTION_STATE64] = ARM_EXCEPTION_STATE64_COUNT,
68 [ARM_THREAD_STATE32] = ARM_THREAD_STATE32_COUNT,
69 [ARM_DEBUG_STATE32] = ARM_DEBUG_STATE32_COUNT,
70 [ARM_DEBUG_STATE64] = ARM_DEBUG_STATE64_COUNT,
71 [ARM_NEON_STATE] = ARM_NEON_STATE_COUNT,
72 [ARM_NEON_STATE64] = ARM_NEON_STATE64_COUNT,
73 [ARM_PAGEIN_STATE] = ARM_PAGEIN_STATE_COUNT,
74 };
75
76 extern zone_t ads_zone;
77
78 #if __arm64__
79 /*
80 * Copy values from saved_state to ts64.
81 */
82 void
83 saved_state_to_thread_state64(const arm_saved_state_t * saved_state,
84 arm_thread_state64_t * ts64)
85 {
86 uint32_t i;
87
88 assert(is_saved_state64(saved_state));
89
90 ts64->fp = get_saved_state_fp(saved_state);
91 ts64->lr = get_saved_state_lr(saved_state);
92 ts64->sp = get_saved_state_sp(saved_state);
93 ts64->pc = get_saved_state_pc(saved_state);
94 ts64->cpsr = get_saved_state_cpsr(saved_state);
95 for (i = 0; i < 29; i++) {
96 ts64->x[i] = get_saved_state_reg(saved_state, i);
97 }
98 }
99
100 /*
101 * Copy values from ts64 to saved_state
102 */
103 void
104 thread_state64_to_saved_state(const arm_thread_state64_t * ts64,
105 arm_saved_state_t * saved_state)
106 {
107 uint32_t i;
108
109 assert(is_saved_state64(saved_state));
110
111 set_saved_state_fp(saved_state, ts64->fp);
112 set_saved_state_lr(saved_state, ts64->lr);
113 set_saved_state_sp(saved_state, ts64->sp);
114 set_saved_state_pc(saved_state, ts64->pc);
115 set_saved_state_cpsr(saved_state, (ts64->cpsr & ~PSR64_MODE_MASK) | PSR64_MODE_RW_64);
116 for (i = 0; i < 29; i++) {
117 set_saved_state_reg(saved_state, i, ts64->x[i]);
118 }
119 }
120
121 #endif /* __arm64__ */
122
123 static kern_return_t
124 handle_get_arm32_thread_state(thread_state_t tstate,
125 mach_msg_type_number_t * count,
126 const arm_saved_state_t * saved_state)
127 {
128 if (*count < ARM_THREAD_STATE32_COUNT) {
129 return KERN_INVALID_ARGUMENT;
130 }
131 if (!is_saved_state32(saved_state)) {
132 return KERN_INVALID_ARGUMENT;
133 }
134
135 (void)saved_state_to_thread_state32(saved_state, (arm_thread_state32_t *)tstate);
136 *count = ARM_THREAD_STATE32_COUNT;
137 return KERN_SUCCESS;
138 }
139
140 static kern_return_t
141 handle_get_arm64_thread_state(thread_state_t tstate,
142 mach_msg_type_number_t * count,
143 const arm_saved_state_t * saved_state)
144 {
145 if (*count < ARM_THREAD_STATE64_COUNT) {
146 return KERN_INVALID_ARGUMENT;
147 }
148 if (!is_saved_state64(saved_state)) {
149 return KERN_INVALID_ARGUMENT;
150 }
151
152 (void)saved_state_to_thread_state64(saved_state, (arm_thread_state64_t *)tstate);
153 *count = ARM_THREAD_STATE64_COUNT;
154 return KERN_SUCCESS;
155 }
156
157
158 static kern_return_t
159 handle_get_arm_thread_state(thread_state_t tstate,
160 mach_msg_type_number_t * count,
161 const arm_saved_state_t * saved_state)
162 {
163 /* In an arm64 world, this flavor can be used to retrieve the thread
164 * state of a 32-bit or 64-bit thread into a unified structure, but we
165 * need to support legacy clients who are only aware of 32-bit, so
166 * check the count to see what the client is expecting.
167 */
168 if (*count < ARM_UNIFIED_THREAD_STATE_COUNT) {
169 return handle_get_arm32_thread_state(tstate, count, saved_state);
170 }
171
172 arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *) tstate;
173 bzero(unified_state, sizeof(*unified_state));
174 #if __arm64__
175 if (is_saved_state64(saved_state)) {
176 unified_state->ash.flavor = ARM_THREAD_STATE64;
177 unified_state->ash.count = ARM_THREAD_STATE64_COUNT;
178 (void)saved_state_to_thread_state64(saved_state, thread_state64(unified_state));
179 } else
180 #endif
181 {
182 unified_state->ash.flavor = ARM_THREAD_STATE32;
183 unified_state->ash.count = ARM_THREAD_STATE32_COUNT;
184 (void)saved_state_to_thread_state32(saved_state, thread_state32(unified_state));
185 }
186 *count = ARM_UNIFIED_THREAD_STATE_COUNT;
187 return KERN_SUCCESS;
188 }
189
190
191 static kern_return_t
192 handle_set_arm32_thread_state(const thread_state_t tstate,
193 mach_msg_type_number_t count,
194 arm_saved_state_t * saved_state)
195 {
196 if (count != ARM_THREAD_STATE32_COUNT) {
197 return KERN_INVALID_ARGUMENT;
198 }
199
200 (void)thread_state32_to_saved_state((const arm_thread_state32_t *)tstate, saved_state);
201 return KERN_SUCCESS;
202 }
203
204 static kern_return_t
205 handle_set_arm64_thread_state(const thread_state_t tstate,
206 mach_msg_type_number_t count,
207 arm_saved_state_t * saved_state)
208 {
209 if (count != ARM_THREAD_STATE64_COUNT) {
210 return KERN_INVALID_ARGUMENT;
211 }
212
213 (void)thread_state64_to_saved_state((const arm_thread_state64_t *)tstate, saved_state);
214 return KERN_SUCCESS;
215 }
216
217
218 static kern_return_t
219 handle_set_arm_thread_state(const thread_state_t tstate,
220 mach_msg_type_number_t count,
221 arm_saved_state_t * saved_state)
222 {
223 /* In an arm64 world, this flavor can be used to set the thread state of a
224 * 32-bit or 64-bit thread from a unified structure, but we need to support
225 * legacy clients who are only aware of 32-bit, so check the count to see
226 * what the client is expecting.
227 */
228 if (count < ARM_UNIFIED_THREAD_STATE_COUNT) {
229 if (!is_saved_state32(saved_state)) {
230 return KERN_INVALID_ARGUMENT;
231 }
232 return handle_set_arm32_thread_state(tstate, count, saved_state);
233 }
234
235 const arm_unified_thread_state_t *unified_state = (const arm_unified_thread_state_t *) tstate;
236 #if __arm64__
237 if (is_thread_state64(unified_state)) {
238 if (!is_saved_state64(saved_state)) {
239 return KERN_INVALID_ARGUMENT;
240 }
241 (void)thread_state64_to_saved_state(const_thread_state64(unified_state), saved_state);
242 } else
243 #endif
244 {
245 if (!is_saved_state32(saved_state)) {
246 return KERN_INVALID_ARGUMENT;
247 }
248 (void)thread_state32_to_saved_state(const_thread_state32(unified_state), saved_state);
249 }
250
251 return KERN_SUCCESS;
252 }
253
254
255 /*
256 * Translate thread state arguments to userspace representation
257 */
258
259 kern_return_t
260 machine_thread_state_convert_to_user(
261 thread_t thread,
262 thread_flavor_t flavor,
263 thread_state_t tstate,
264 mach_msg_type_number_t *count)
265 {
266 #if __has_feature(ptrauth_calls)
267 arm_thread_state64_t *ts64;
268
269 switch (flavor) {
270 case ARM_THREAD_STATE:
271 {
272 arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *)tstate;
273
274 if (*count < ARM_UNIFIED_THREAD_STATE_COUNT || !is_thread_state64(unified_state)) {
275 return KERN_SUCCESS;
276 }
277 ts64 = thread_state64(unified_state);
278 break;
279 }
280 case ARM_THREAD_STATE64:
281 {
282 if (*count < ARM_THREAD_STATE64_COUNT) {
283 return KERN_SUCCESS;
284 }
285 ts64 = (arm_thread_state64_t *)tstate;
286 break;
287 }
288 default:
289 return KERN_SUCCESS;
290 }
291
292 // Note that kernel threads never have disable_user_jop set
293 if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread()) ||
294 thread->machine.disable_user_jop || !thread_is_64bit_addr(thread) ||
295 (BootArgs->bootFlags & kBootFlagsDisableUserThreadStateJOP)) {
296 ts64->flags = __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH;
297 return KERN_SUCCESS;
298 }
299
300 ts64->flags = 0;
301 if (ts64->lr) {
302 // lr might contain an IB-signed return address (strip is a no-op on unsigned addresses)
303 uintptr_t stripped_lr = (uintptr_t)ptrauth_strip((void *)ts64->lr,
304 ptrauth_key_return_address);
305 if (ts64->lr != stripped_lr) {
306 // Need to allow already-signed lr value to round-trip as is
307 ts64->flags |= __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR;
308 }
309 // Note that an IB-signed return address that happens to have a 0 signature value
310 // will round-trip correctly even if IA-signed again below (and IA-authd later)
311 }
312
313 if (BootArgs->bootFlags & kBootFlagsDisableUserJOP) {
314 return KERN_SUCCESS;
315 }
316
317 if (ts64->pc) {
318 ts64->pc = (uintptr_t)pmap_sign_user_ptr((void*)ts64->pc,
319 ptrauth_key_process_independent_code, ptrauth_string_discriminator("pc"));
320 }
321 if (ts64->lr && !(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR)) {
322 ts64->lr = (uintptr_t)pmap_sign_user_ptr((void*)ts64->lr,
323 ptrauth_key_process_independent_code, ptrauth_string_discriminator("lr"));
324 }
325 if (ts64->sp) {
326 ts64->sp = (uintptr_t)pmap_sign_user_ptr((void*)ts64->sp,
327 ptrauth_key_process_independent_data, ptrauth_string_discriminator("sp"));
328 }
329 if (ts64->fp) {
330 ts64->fp = (uintptr_t)pmap_sign_user_ptr((void*)ts64->fp,
331 ptrauth_key_process_independent_data, ptrauth_string_discriminator("fp"));
332 }
333
334 return KERN_SUCCESS;
335 #else
336 // No conversion to userspace representation on this platform
337 (void)thread; (void)flavor; (void)tstate; (void)count;
338 return KERN_SUCCESS;
339 #endif /* __has_feature(ptrauth_calls) */
340 }
341
342 /*
343 * Translate thread state arguments from userspace representation
344 */
345
346 kern_return_t
347 machine_thread_state_convert_from_user(
348 thread_t thread,
349 thread_flavor_t flavor,
350 thread_state_t tstate,
351 mach_msg_type_number_t count)
352 {
353 #if __has_feature(ptrauth_calls)
354 arm_thread_state64_t *ts64;
355
356 switch (flavor) {
357 case ARM_THREAD_STATE:
358 {
359 arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *)tstate;
360
361 if (count < ARM_UNIFIED_THREAD_STATE_COUNT || !is_thread_state64(unified_state)) {
362 return KERN_SUCCESS;
363 }
364 ts64 = thread_state64(unified_state);
365 break;
366 }
367 case ARM_THREAD_STATE64:
368 {
369 if (count != ARM_THREAD_STATE64_COUNT) {
370 return KERN_SUCCESS;
371 }
372 ts64 = (arm_thread_state64_t *)tstate;
373 break;
374 }
375 default:
376 return KERN_SUCCESS;
377 }
378
379 // Note that kernel threads never have disable_user_jop set
380 if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread())) {
381 if (thread->machine.disable_user_jop || !thread_is_64bit_addr(thread)) {
382 ts64->flags = __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH;
383 return KERN_SUCCESS;
384 }
385 // A JOP-disabled process must not set thread state on a JOP-enabled process
386 return KERN_PROTECTION_FAILURE;
387 }
388
389 if (ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH) {
390 if (thread->machine.disable_user_jop || !thread_is_64bit_addr(thread) ||
391 (BootArgs->bootFlags & kBootFlagsDisableUserThreadStateJOP)) {
392 return KERN_SUCCESS;
393 }
394 // Disallow setting unsigned thread state on JOP-enabled processes.
395 // Ignore flag and treat thread state arguments as signed, ptrauth
396 // poisoning will cause resulting thread state to be invalid
397 ts64->flags &= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH;
398 }
399
400 if (ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR) {
401 // lr might contain an IB-signed return address (strip is a no-op on unsigned addresses)
402 uintptr_t stripped_lr = (uintptr_t)ptrauth_strip((void *)ts64->lr,
403 ptrauth_key_return_address);
404 if (ts64->lr == stripped_lr) {
405 // Don't allow unsigned pointer to be passed through as is. Ignore flag and
406 // treat as IA-signed below (where auth failure may poison the value).
407 ts64->flags &= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR;
408 }
409 // Note that an IB-signed return address that happens to have a 0 signature value
410 // will also have been IA-signed (without this flag being set) and so will IA-auth
411 // correctly below.
412 }
413
414 if (BootArgs->bootFlags & kBootFlagsDisableUserJOP) {
415 return KERN_SUCCESS;
416 }
417
418 if (ts64->pc) {
419 ts64->pc = (uintptr_t)pmap_auth_user_ptr((void*)ts64->pc,
420 ptrauth_key_process_independent_code, ptrauth_string_discriminator("pc"));
421 }
422 if (ts64->lr && !(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR)) {
423 ts64->lr = (uintptr_t)pmap_auth_user_ptr((void*)ts64->lr,
424 ptrauth_key_process_independent_code, ptrauth_string_discriminator("lr"));
425 }
426 if (ts64->sp) {
427 ts64->sp = (uintptr_t)pmap_auth_user_ptr((void*)ts64->sp,
428 ptrauth_key_process_independent_data, ptrauth_string_discriminator("sp"));
429 }
430 if (ts64->fp) {
431 ts64->fp = (uintptr_t)pmap_auth_user_ptr((void*)ts64->fp,
432 ptrauth_key_process_independent_data, ptrauth_string_discriminator("fp"));
433 }
434
435 return KERN_SUCCESS;
436 #else
437 // No conversion from userspace representation on this platform
438 (void)thread; (void)flavor; (void)tstate; (void)count;
439 return KERN_SUCCESS;
440 #endif /* __has_feature(ptrauth_calls) */
441 }
442
443 /*
444 * Translate signal context data pointer to userspace representation
445 */
446
447 kern_return_t
448 machine_thread_siguctx_pointer_convert_to_user(
449 __assert_only thread_t thread,
450 user_addr_t *uctxp)
451 {
452 #if __has_feature(ptrauth_calls)
453 if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread())) {
454 assert(thread->machine.disable_user_jop || !thread_is_64bit_addr(thread));
455 return KERN_SUCCESS;
456 }
457
458 if (BootArgs->bootFlags & kBootFlagsDisableUserJOP) {
459 return KERN_SUCCESS;
460 }
461
462 if (*uctxp) {
463 *uctxp = (uintptr_t)pmap_sign_user_ptr((void*)*uctxp,
464 ptrauth_key_process_independent_data, ptrauth_string_discriminator("uctx"));
465 }
466
467 return KERN_SUCCESS;
468 #else
469 // No conversion to userspace representation on this platform
470 (void)thread; (void)uctxp;
471 return KERN_SUCCESS;
472 #endif /* __has_feature(ptrauth_calls) */
473 }
474
475 /*
476 * Translate array of function pointer syscall arguments from userspace representation
477 */
478
479 kern_return_t
480 machine_thread_function_pointers_convert_from_user(
481 __assert_only thread_t thread,
482 user_addr_t *fptrs,
483 uint32_t count)
484 {
485 #if __has_feature(ptrauth_calls)
486 if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread())) {
487 assert(thread->machine.disable_user_jop || !thread_is_64bit_addr(thread));
488 return KERN_SUCCESS;
489 }
490
491 if (BootArgs->bootFlags & kBootFlagsDisableUserJOP) {
492 return KERN_SUCCESS;
493 }
494
495 while (count--) {
496 if (*fptrs) {
497 *fptrs = (uintptr_t)pmap_auth_user_ptr((void*)*fptrs,
498 ptrauth_key_function_pointer, 0);
499 }
500 fptrs++;
501 }
502
503 return KERN_SUCCESS;
504 #else
505 // No conversion from userspace representation on this platform
506 (void)thread; (void)fptrs; (void)count;
507 return KERN_SUCCESS;
508 #endif /* __has_feature(ptrauth_calls) */
509 }
510
511 /*
512 * Routine: machine_thread_get_state
513 *
514 */
515 kern_return_t
516 machine_thread_get_state(thread_t thread,
517 thread_flavor_t flavor,
518 thread_state_t tstate,
519 mach_msg_type_number_t * count)
520 {
521 switch (flavor) {
522 case THREAD_STATE_FLAVOR_LIST:
523 if (*count < 4) {
524 return KERN_INVALID_ARGUMENT;
525 }
526
527 tstate[0] = ARM_THREAD_STATE;
528 tstate[1] = ARM_VFP_STATE;
529 tstate[2] = ARM_EXCEPTION_STATE;
530 tstate[3] = ARM_DEBUG_STATE;
531 *count = 4;
532 break;
533
534 case THREAD_STATE_FLAVOR_LIST_NEW:
535 if (*count < 4) {
536 return KERN_INVALID_ARGUMENT;
537 }
538
539 tstate[0] = ARM_THREAD_STATE;
540 tstate[1] = ARM_VFP_STATE;
541 tstate[2] = thread_is_64bit_data(thread) ? ARM_EXCEPTION_STATE64 : ARM_EXCEPTION_STATE;
542 tstate[3] = thread_is_64bit_data(thread) ? ARM_DEBUG_STATE64 : ARM_DEBUG_STATE32;
543 *count = 4;
544 break;
545
546 case THREAD_STATE_FLAVOR_LIST_10_15:
547 if (*count < 5) {
548 return KERN_INVALID_ARGUMENT;
549 }
550
551 tstate[0] = ARM_THREAD_STATE;
552 tstate[1] = ARM_VFP_STATE;
553 tstate[2] = thread_is_64bit_data(thread) ? ARM_EXCEPTION_STATE64 : ARM_EXCEPTION_STATE;
554 tstate[3] = thread_is_64bit_data(thread) ? ARM_DEBUG_STATE64 : ARM_DEBUG_STATE32;
555 tstate[4] = ARM_PAGEIN_STATE;
556 *count = 5;
557 break;
558
559 case ARM_THREAD_STATE:
560 {
561 kern_return_t rn = handle_get_arm_thread_state(tstate, count, thread->machine.upcb);
562 if (rn) {
563 return rn;
564 }
565 break;
566 }
567 case ARM_THREAD_STATE32:
568 {
569 if (thread_is_64bit_data(thread)) {
570 return KERN_INVALID_ARGUMENT;
571 }
572
573 kern_return_t rn = handle_get_arm32_thread_state(tstate, count, thread->machine.upcb);
574 if (rn) {
575 return rn;
576 }
577 break;
578 }
579 #if __arm64__
580 case ARM_THREAD_STATE64:
581 {
582 if (!thread_is_64bit_data(thread)) {
583 return KERN_INVALID_ARGUMENT;
584 }
585
586 kern_return_t rn = handle_get_arm64_thread_state(tstate, count, thread->machine.upcb);
587 if (rn) {
588 return rn;
589 }
590 break;
591 }
592 #endif
593 case ARM_EXCEPTION_STATE:{
594 struct arm_exception_state *state;
595 struct arm_saved_state32 *saved_state;
596
597 if (*count < ARM_EXCEPTION_STATE_COUNT) {
598 return KERN_INVALID_ARGUMENT;
599 }
600 if (thread_is_64bit_data(thread)) {
601 return KERN_INVALID_ARGUMENT;
602 }
603
604 state = (struct arm_exception_state *) tstate;
605 saved_state = saved_state32(thread->machine.upcb);
606
607 state->exception = saved_state->exception;
608 state->fsr = saved_state->esr;
609 state->far = saved_state->far;
610
611 *count = ARM_EXCEPTION_STATE_COUNT;
612 break;
613 }
614 case ARM_EXCEPTION_STATE64:{
615 struct arm_exception_state64 *state;
616 struct arm_saved_state64 *saved_state;
617
618 if (*count < ARM_EXCEPTION_STATE64_COUNT) {
619 return KERN_INVALID_ARGUMENT;
620 }
621 if (!thread_is_64bit_data(thread)) {
622 return KERN_INVALID_ARGUMENT;
623 }
624
625 state = (struct arm_exception_state64 *) tstate;
626 saved_state = saved_state64(thread->machine.upcb);
627
628 state->exception = saved_state->exception;
629 state->far = saved_state->far;
630 state->esr = saved_state->esr;
631
632 *count = ARM_EXCEPTION_STATE64_COUNT;
633 break;
634 }
635 case ARM_DEBUG_STATE:{
636 arm_legacy_debug_state_t *state;
637 arm_debug_state32_t *thread_state;
638
639 if (*count < ARM_LEGACY_DEBUG_STATE_COUNT) {
640 return KERN_INVALID_ARGUMENT;
641 }
642
643 if (thread_is_64bit_data(thread)) {
644 return KERN_INVALID_ARGUMENT;
645 }
646
647 state = (arm_legacy_debug_state_t *) tstate;
648 thread_state = find_debug_state32(thread);
649
650 if (thread_state == NULL) {
651 bzero(state, sizeof(arm_legacy_debug_state_t));
652 } else {
653 bcopy(thread_state, state, sizeof(arm_legacy_debug_state_t));
654 }
655
656 *count = ARM_LEGACY_DEBUG_STATE_COUNT;
657 break;
658 }
659 case ARM_DEBUG_STATE32:{
660 arm_debug_state32_t *state;
661 arm_debug_state32_t *thread_state;
662
663 if (*count < ARM_DEBUG_STATE32_COUNT) {
664 return KERN_INVALID_ARGUMENT;
665 }
666
667 if (thread_is_64bit_data(thread)) {
668 return KERN_INVALID_ARGUMENT;
669 }
670
671 state = (arm_debug_state32_t *) tstate;
672 thread_state = find_debug_state32(thread);
673
674 if (thread_state == NULL) {
675 bzero(state, sizeof(arm_debug_state32_t));
676 } else {
677 bcopy(thread_state, state, sizeof(arm_debug_state32_t));
678 }
679
680 *count = ARM_DEBUG_STATE32_COUNT;
681 break;
682 }
683
684 case ARM_DEBUG_STATE64:{
685 arm_debug_state64_t *state;
686 arm_debug_state64_t *thread_state;
687
688 if (*count < ARM_DEBUG_STATE64_COUNT) {
689 return KERN_INVALID_ARGUMENT;
690 }
691
692 if (!thread_is_64bit_data(thread)) {
693 return KERN_INVALID_ARGUMENT;
694 }
695
696 state = (arm_debug_state64_t *) tstate;
697 thread_state = find_debug_state64(thread);
698
699 if (thread_state == NULL) {
700 bzero(state, sizeof(arm_debug_state64_t));
701 } else {
702 bcopy(thread_state, state, sizeof(arm_debug_state64_t));
703 }
704
705 *count = ARM_DEBUG_STATE64_COUNT;
706 break;
707 }
708
709 case ARM_VFP_STATE:{
710 struct arm_vfp_state *state;
711 arm_neon_saved_state32_t *thread_state;
712 unsigned int max;
713
714 if (*count < ARM_VFP_STATE_COUNT) {
715 if (*count < ARM_VFPV2_STATE_COUNT) {
716 return KERN_INVALID_ARGUMENT;
717 } else {
718 *count = ARM_VFPV2_STATE_COUNT;
719 }
720 }
721
722 if (*count == ARM_VFPV2_STATE_COUNT) {
723 max = 32;
724 } else {
725 max = 64;
726 }
727
728 state = (struct arm_vfp_state *) tstate;
729 thread_state = neon_state32(thread->machine.uNeon);
730 /* ARM64 TODO: set fpsr and fpcr from state->fpscr */
731
732 bcopy(thread_state, state, (max + 1) * sizeof(uint32_t));
733 *count = (max + 1);
734 break;
735 }
736 case ARM_NEON_STATE:{
737 arm_neon_state_t *state;
738 arm_neon_saved_state32_t *thread_state;
739
740 if (*count < ARM_NEON_STATE_COUNT) {
741 return KERN_INVALID_ARGUMENT;
742 }
743
744 if (thread_is_64bit_data(thread)) {
745 return KERN_INVALID_ARGUMENT;
746 }
747
748 state = (arm_neon_state_t *)tstate;
749 thread_state = neon_state32(thread->machine.uNeon);
750
751 assert(sizeof(*thread_state) == sizeof(*state));
752 bcopy(thread_state, state, sizeof(arm_neon_state_t));
753
754 *count = ARM_NEON_STATE_COUNT;
755 break;
756 }
757
758 case ARM_NEON_STATE64:{
759 arm_neon_state64_t *state;
760 arm_neon_saved_state64_t *thread_state;
761
762 if (*count < ARM_NEON_STATE64_COUNT) {
763 return KERN_INVALID_ARGUMENT;
764 }
765
766 if (!thread_is_64bit_data(thread)) {
767 return KERN_INVALID_ARGUMENT;
768 }
769
770 state = (arm_neon_state64_t *)tstate;
771 thread_state = neon_state64(thread->machine.uNeon);
772
773 /* For now, these are identical */
774 assert(sizeof(*state) == sizeof(*thread_state));
775 bcopy(thread_state, state, sizeof(arm_neon_state64_t));
776
777 *count = ARM_NEON_STATE64_COUNT;
778 break;
779 }
780
781
782 case ARM_PAGEIN_STATE: {
783 arm_pagein_state_t *state;
784
785 if (*count < ARM_PAGEIN_STATE_COUNT) {
786 return KERN_INVALID_ARGUMENT;
787 }
788
789 state = (arm_pagein_state_t *)tstate;
790 state->__pagein_error = thread->t_pagein_error;
791
792 *count = ARM_PAGEIN_STATE_COUNT;
793 break;
794 }
795
796
797 default:
798 return KERN_INVALID_ARGUMENT;
799 }
800 return KERN_SUCCESS;
801 }
802
803
804 /*
805 * Routine: machine_thread_get_kern_state
806 *
807 */
808 kern_return_t
809 machine_thread_get_kern_state(thread_t thread,
810 thread_flavor_t flavor,
811 thread_state_t tstate,
812 mach_msg_type_number_t * count)
813 {
814 /*
815 * This works only for an interrupted kernel thread
816 */
817 if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) {
818 return KERN_FAILURE;
819 }
820
821 switch (flavor) {
822 case ARM_THREAD_STATE:
823 {
824 kern_return_t rn = handle_get_arm_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
825 if (rn) {
826 return rn;
827 }
828 break;
829 }
830 case ARM_THREAD_STATE32:
831 {
832 kern_return_t rn = handle_get_arm32_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
833 if (rn) {
834 return rn;
835 }
836 break;
837 }
838 #if __arm64__
839 case ARM_THREAD_STATE64:
840 {
841 kern_return_t rn = handle_get_arm64_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
842 if (rn) {
843 return rn;
844 }
845 break;
846 }
847 #endif
848 default:
849 return KERN_INVALID_ARGUMENT;
850 }
851 return KERN_SUCCESS;
852 }
853
854 void
855 machine_thread_switch_addrmode(thread_t thread)
856 {
857 if (task_has_64Bit_data(thread->task)) {
858 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE64;
859 thread->machine.upcb->ash.count = ARM_SAVED_STATE64_COUNT;
860 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
861 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
862
863 /*
864 * Reinitialize the NEON state.
865 */
866 bzero(&thread->machine.uNeon->uns, sizeof(thread->machine.uNeon->uns));
867 thread->machine.uNeon->ns_64.fpcr = FPCR_DEFAULT;
868 } else {
869 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE32;
870 thread->machine.upcb->ash.count = ARM_SAVED_STATE32_COUNT;
871 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
872 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
873
874 /*
875 * Reinitialize the NEON state.
876 */
877 bzero(&thread->machine.uNeon->uns, sizeof(thread->machine.uNeon->uns));
878 thread->machine.uNeon->ns_32.fpcr = FPCR_DEFAULT_32;
879 }
880 }
881
882 extern long long arm_debug_get(void);
883
884 /*
885 * Routine: machine_thread_set_state
886 *
887 */
888 kern_return_t
889 machine_thread_set_state(thread_t thread,
890 thread_flavor_t flavor,
891 thread_state_t tstate,
892 mach_msg_type_number_t count)
893 {
894 kern_return_t rn;
895
896 switch (flavor) {
897 case ARM_THREAD_STATE:
898 rn = handle_set_arm_thread_state(tstate, count, thread->machine.upcb);
899 if (rn) {
900 return rn;
901 }
902 break;
903
904 case ARM_THREAD_STATE32:
905 if (thread_is_64bit_data(thread)) {
906 return KERN_INVALID_ARGUMENT;
907 }
908
909 rn = handle_set_arm32_thread_state(tstate, count, thread->machine.upcb);
910 if (rn) {
911 return rn;
912 }
913 break;
914
915 #if __arm64__
916 case ARM_THREAD_STATE64:
917 if (!thread_is_64bit_data(thread)) {
918 return KERN_INVALID_ARGUMENT;
919 }
920
921 rn = handle_set_arm64_thread_state(tstate, count, thread->machine.upcb);
922 if (rn) {
923 return rn;
924 }
925 break;
926 #endif
927 case ARM_EXCEPTION_STATE:{
928 if (count != ARM_EXCEPTION_STATE_COUNT) {
929 return KERN_INVALID_ARGUMENT;
930 }
931 if (thread_is_64bit_data(thread)) {
932 return KERN_INVALID_ARGUMENT;
933 }
934
935 break;
936 }
937 case ARM_EXCEPTION_STATE64:{
938 if (count != ARM_EXCEPTION_STATE64_COUNT) {
939 return KERN_INVALID_ARGUMENT;
940 }
941 if (!thread_is_64bit_data(thread)) {
942 return KERN_INVALID_ARGUMENT;
943 }
944
945 break;
946 }
947 case ARM_DEBUG_STATE:
948 {
949 arm_legacy_debug_state_t *state;
950 boolean_t enabled = FALSE;
951 unsigned int i;
952
953 if (count != ARM_LEGACY_DEBUG_STATE_COUNT) {
954 return KERN_INVALID_ARGUMENT;
955 }
956 if (thread_is_64bit_data(thread)) {
957 return KERN_INVALID_ARGUMENT;
958 }
959
960 state = (arm_legacy_debug_state_t *) tstate;
961
962 for (i = 0; i < 16; i++) {
963 /* do not allow context IDs to be set */
964 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
965 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
966 || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
967 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
968 return KERN_PROTECTION_FAILURE;
969 }
970 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
971 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
972 enabled = TRUE;
973 }
974 }
975
976 if (!enabled) {
977 arm_debug_state32_t *thread_state = find_debug_state32(thread);
978 if (thread_state != NULL) {
979 void *pTmp = thread->machine.DebugData;
980 thread->machine.DebugData = NULL;
981 zfree(ads_zone, pTmp);
982 }
983 } else {
984 arm_debug_state32_t *thread_state = find_debug_state32(thread);
985 if (thread_state == NULL) {
986 thread->machine.DebugData = zalloc(ads_zone);
987 bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
988 thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE32;
989 thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE32_COUNT;
990 thread_state = find_debug_state32(thread);
991 }
992 assert(NULL != thread_state);
993
994 for (i = 0; i < 16; i++) {
995 /* set appropriate privilege; mask out unknown bits */
996 thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
997 | ARM_DBGBCR_MATCH_MASK
998 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
999 | ARM_DBG_CR_ENABLE_MASK))
1000 | ARM_DBGBCR_TYPE_IVA
1001 | ARM_DBG_CR_LINKED_UNLINKED
1002 | ARM_DBG_CR_SECURITY_STATE_BOTH
1003 | ARM_DBG_CR_MODE_CONTROL_USER;
1004 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1005 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1006 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1007 | ARM_DBGWCR_ACCESS_CONTROL_MASK
1008 | ARM_DBG_CR_ENABLE_MASK))
1009 | ARM_DBG_CR_LINKED_UNLINKED
1010 | ARM_DBG_CR_SECURITY_STATE_BOTH
1011 | ARM_DBG_CR_MODE_CONTROL_USER;
1012 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1013 }
1014
1015 thread_state->mdscr_el1 = 0ULL; // Legacy customers issuing ARM_DEBUG_STATE dont drive single stepping.
1016 }
1017
1018 if (thread == current_thread()) {
1019 arm_debug_set32(thread->machine.DebugData);
1020 }
1021
1022 break;
1023 }
1024 case ARM_DEBUG_STATE32:
1025 /* ARM64_TODO subtle bcr/wcr semantic differences e.g. wcr and ARM_DBGBCR_TYPE_IVA */
1026 {
1027 arm_debug_state32_t *state;
1028 boolean_t enabled = FALSE;
1029 unsigned int i;
1030
1031 if (count != ARM_DEBUG_STATE32_COUNT) {
1032 return KERN_INVALID_ARGUMENT;
1033 }
1034 if (thread_is_64bit_data(thread)) {
1035 return KERN_INVALID_ARGUMENT;
1036 }
1037
1038 state = (arm_debug_state32_t *) tstate;
1039
1040 if (state->mdscr_el1 & 0x1) {
1041 enabled = TRUE;
1042 }
1043
1044 for (i = 0; i < 16; i++) {
1045 /* do not allow context IDs to be set */
1046 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1047 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
1048 || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1049 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
1050 return KERN_PROTECTION_FAILURE;
1051 }
1052 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
1053 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
1054 enabled = TRUE;
1055 }
1056 }
1057
1058 if (!enabled) {
1059 arm_debug_state32_t *thread_state = find_debug_state32(thread);
1060 if (thread_state != NULL) {
1061 void *pTmp = thread->machine.DebugData;
1062 thread->machine.DebugData = NULL;
1063 zfree(ads_zone, pTmp);
1064 }
1065 } else {
1066 arm_debug_state32_t *thread_state = find_debug_state32(thread);
1067 if (thread_state == NULL) {
1068 thread->machine.DebugData = zalloc(ads_zone);
1069 bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
1070 thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE32;
1071 thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE32_COUNT;
1072 thread_state = find_debug_state32(thread);
1073 }
1074 assert(NULL != thread_state);
1075
1076 if (state->mdscr_el1 & 0x1) {
1077 thread_state->mdscr_el1 |= 0x1;
1078 } else {
1079 thread_state->mdscr_el1 &= ~0x1;
1080 }
1081
1082 for (i = 0; i < 16; i++) {
1083 /* set appropriate privilege; mask out unknown bits */
1084 thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1085 | ARM_DBGBCR_MATCH_MASK
1086 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1087 | ARM_DBG_CR_ENABLE_MASK))
1088 | ARM_DBGBCR_TYPE_IVA
1089 | ARM_DBG_CR_LINKED_UNLINKED
1090 | ARM_DBG_CR_SECURITY_STATE_BOTH
1091 | ARM_DBG_CR_MODE_CONTROL_USER;
1092 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1093 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1094 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1095 | ARM_DBGWCR_ACCESS_CONTROL_MASK
1096 | ARM_DBG_CR_ENABLE_MASK))
1097 | ARM_DBG_CR_LINKED_UNLINKED
1098 | ARM_DBG_CR_SECURITY_STATE_BOTH
1099 | ARM_DBG_CR_MODE_CONTROL_USER;
1100 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1101 }
1102 }
1103
1104 if (thread == current_thread()) {
1105 arm_debug_set32(thread->machine.DebugData);
1106 }
1107
1108 break;
1109 }
1110
1111 case ARM_DEBUG_STATE64:
1112 {
1113 arm_debug_state64_t *state;
1114 boolean_t enabled = FALSE;
1115 unsigned int i;
1116
1117 if (count != ARM_DEBUG_STATE64_COUNT) {
1118 return KERN_INVALID_ARGUMENT;
1119 }
1120 if (!thread_is_64bit_data(thread)) {
1121 return KERN_INVALID_ARGUMENT;
1122 }
1123
1124 state = (arm_debug_state64_t *) tstate;
1125
1126 if (state->mdscr_el1 & 0x1) {
1127 enabled = TRUE;
1128 }
1129
1130 for (i = 0; i < 16; i++) {
1131 /* do not allow context IDs to be set */
1132 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1133 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
1134 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
1135 return KERN_PROTECTION_FAILURE;
1136 }
1137 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
1138 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
1139 enabled = TRUE;
1140 }
1141 }
1142
1143 if (!enabled) {
1144 arm_debug_state64_t *thread_state = find_debug_state64(thread);
1145 if (thread_state != NULL) {
1146 void *pTmp = thread->machine.DebugData;
1147 thread->machine.DebugData = NULL;
1148 zfree(ads_zone, pTmp);
1149 }
1150 } else {
1151 arm_debug_state64_t *thread_state = find_debug_state64(thread);
1152 if (thread_state == NULL) {
1153 thread->machine.DebugData = zalloc(ads_zone);
1154 bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
1155 thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE64;
1156 thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE64_COUNT;
1157 thread_state = find_debug_state64(thread);
1158 }
1159 assert(NULL != thread_state);
1160
1161 if (state->mdscr_el1 & 0x1) {
1162 thread_state->mdscr_el1 |= 0x1;
1163 } else {
1164 thread_state->mdscr_el1 &= ~0x1;
1165 }
1166
1167 for (i = 0; i < 16; i++) {
1168 /* set appropriate privilege; mask out unknown bits */
1169 thread_state->bcr[i] = (state->bcr[i] & (0 /* Was ARM_DBG_CR_ADDRESS_MASK_MASK deprecated in v8 */
1170 | 0 /* Was ARM_DBGBCR_MATCH_MASK, ignored in AArch64 state */
1171 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1172 | ARM_DBG_CR_ENABLE_MASK))
1173 | ARM_DBGBCR_TYPE_IVA
1174 | ARM_DBG_CR_LINKED_UNLINKED
1175 | ARM_DBG_CR_SECURITY_STATE_BOTH
1176 | ARM_DBG_CR_MODE_CONTROL_USER;
1177 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK64;
1178 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1179 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1180 | ARM_DBGWCR_ACCESS_CONTROL_MASK
1181 | ARM_DBG_CR_ENABLE_MASK))
1182 | ARM_DBG_CR_LINKED_UNLINKED
1183 | ARM_DBG_CR_SECURITY_STATE_BOTH
1184 | ARM_DBG_CR_MODE_CONTROL_USER;
1185 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK64;
1186 }
1187 }
1188
1189 if (thread == current_thread()) {
1190 arm_debug_set64(thread->machine.DebugData);
1191 }
1192
1193 break;
1194 }
1195
1196 case ARM_VFP_STATE:{
1197 struct arm_vfp_state *state;
1198 arm_neon_saved_state32_t *thread_state;
1199 unsigned int max;
1200
1201 if (count != ARM_VFP_STATE_COUNT && count != ARM_VFPV2_STATE_COUNT) {
1202 return KERN_INVALID_ARGUMENT;
1203 }
1204
1205 if (count == ARM_VFPV2_STATE_COUNT) {
1206 max = 32;
1207 } else {
1208 max = 64;
1209 }
1210
1211 state = (struct arm_vfp_state *) tstate;
1212 thread_state = neon_state32(thread->machine.uNeon);
1213 /* ARM64 TODO: combine fpsr and fpcr into state->fpscr */
1214
1215 bcopy(state, thread_state, (max + 1) * sizeof(uint32_t));
1216
1217 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
1218 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
1219 break;
1220 }
1221
1222 case ARM_NEON_STATE:{
1223 arm_neon_state_t *state;
1224 arm_neon_saved_state32_t *thread_state;
1225
1226 if (count != ARM_NEON_STATE_COUNT) {
1227 return KERN_INVALID_ARGUMENT;
1228 }
1229
1230 if (thread_is_64bit_data(thread)) {
1231 return KERN_INVALID_ARGUMENT;
1232 }
1233
1234 state = (arm_neon_state_t *)tstate;
1235 thread_state = neon_state32(thread->machine.uNeon);
1236
1237 assert(sizeof(*state) == sizeof(*thread_state));
1238 bcopy(state, thread_state, sizeof(arm_neon_state_t));
1239
1240 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
1241 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
1242 break;
1243 }
1244
1245 case ARM_NEON_STATE64:{
1246 arm_neon_state64_t *state;
1247 arm_neon_saved_state64_t *thread_state;
1248
1249 if (count != ARM_NEON_STATE64_COUNT) {
1250 return KERN_INVALID_ARGUMENT;
1251 }
1252
1253 if (!thread_is_64bit_data(thread)) {
1254 return KERN_INVALID_ARGUMENT;
1255 }
1256
1257 state = (arm_neon_state64_t *)tstate;
1258 thread_state = neon_state64(thread->machine.uNeon);
1259
1260 assert(sizeof(*state) == sizeof(*thread_state));
1261 bcopy(state, thread_state, sizeof(arm_neon_state64_t));
1262
1263 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
1264 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
1265 break;
1266 }
1267
1268
1269 default:
1270 return KERN_INVALID_ARGUMENT;
1271 }
1272 return KERN_SUCCESS;
1273 }
1274
1275 mach_vm_address_t
1276 machine_thread_pc(thread_t thread)
1277 {
1278 struct arm_saved_state *ss = get_user_regs(thread);
1279 return (mach_vm_address_t)get_saved_state_pc(ss);
1280 }
1281
1282 void
1283 machine_thread_reset_pc(thread_t thread, mach_vm_address_t pc)
1284 {
1285 set_saved_state_pc(get_user_regs(thread), (register_t)pc);
1286 }
1287
1288 /*
1289 * Routine: machine_thread_state_initialize
1290 *
1291 */
1292 kern_return_t
1293 machine_thread_state_initialize(thread_t thread)
1294 {
1295 arm_context_t *context = thread->machine.contextData;
1296
1297 /*
1298 * Should always be set up later. For a kernel thread, we don't care
1299 * about this state. For a user thread, we'll set the state up in
1300 * setup_wqthread, bsdthread_create, load_main(), or load_unixthread().
1301 */
1302
1303 if (context != NULL) {
1304 bzero(&context->ss.uss, sizeof(context->ss.uss));
1305 bzero(&context->ns.uns, sizeof(context->ns.uns));
1306
1307 if (context->ns.nsh.flavor == ARM_NEON_SAVED_STATE64) {
1308 context->ns.ns_64.fpcr = FPCR_DEFAULT;
1309 } else {
1310 context->ns.ns_32.fpcr = FPCR_DEFAULT_32;
1311 }
1312 }
1313
1314 thread->machine.DebugData = NULL;
1315
1316 #if defined(HAS_APPLE_PAC)
1317 /* Sign the initial user-space thread state */
1318 if (thread->machine.upcb != NULL) {
1319 ml_sign_thread_state(thread->machine.upcb, 0, 0, 0, 0, 0);
1320 }
1321 #endif /* defined(HAS_APPLE_PAC) */
1322
1323 return KERN_SUCCESS;
1324 }
1325
1326 /*
1327 * Routine: machine_thread_dup
1328 *
1329 */
1330 kern_return_t
1331 machine_thread_dup(thread_t self,
1332 thread_t target,
1333 __unused boolean_t is_corpse)
1334 {
1335 struct arm_saved_state *self_saved_state;
1336 struct arm_saved_state *target_saved_state;
1337
1338 target->machine.cthread_self = self->machine.cthread_self;
1339
1340 self_saved_state = self->machine.upcb;
1341 target_saved_state = target->machine.upcb;
1342 bcopy(self_saved_state, target_saved_state, sizeof(struct arm_saved_state));
1343 #if defined(HAS_APPLE_PAC)
1344 if (!is_corpse && is_saved_state64(self_saved_state)) {
1345 check_and_sign_copied_thread_state(target_saved_state, self_saved_state);
1346 }
1347 #endif /* defined(HAS_APPLE_PAC) */
1348
1349 return KERN_SUCCESS;
1350 }
1351
1352 /*
1353 * Routine: get_user_regs
1354 *
1355 */
1356 struct arm_saved_state *
1357 get_user_regs(thread_t thread)
1358 {
1359 return thread->machine.upcb;
1360 }
1361
1362 arm_neon_saved_state_t *
1363 get_user_neon_regs(thread_t thread)
1364 {
1365 return thread->machine.uNeon;
1366 }
1367
1368 /*
1369 * Routine: find_user_regs
1370 *
1371 */
1372 struct arm_saved_state *
1373 find_user_regs(thread_t thread)
1374 {
1375 return thread->machine.upcb;
1376 }
1377
1378 /*
1379 * Routine: find_kern_regs
1380 *
1381 */
1382 struct arm_saved_state *
1383 find_kern_regs(thread_t thread)
1384 {
1385 /*
1386 * This works only for an interrupted kernel thread
1387 */
1388 if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) {
1389 return (struct arm_saved_state *) NULL;
1390 } else {
1391 return getCpuDatap()->cpu_int_state;
1392 }
1393 }
1394
1395 arm_debug_state32_t *
1396 find_debug_state32(thread_t thread)
1397 {
1398 if (thread && thread->machine.DebugData) {
1399 return &(thread->machine.DebugData->uds.ds32);
1400 } else {
1401 return NULL;
1402 }
1403 }
1404
1405 arm_debug_state64_t *
1406 find_debug_state64(thread_t thread)
1407 {
1408 if (thread && thread->machine.DebugData) {
1409 return &(thread->machine.DebugData->uds.ds64);
1410 } else {
1411 return NULL;
1412 }
1413 }
1414
1415 /*
1416 * Routine: thread_userstack
1417 *
1418 */
1419 kern_return_t
1420 thread_userstack(__unused thread_t thread,
1421 int flavor,
1422 thread_state_t tstate,
1423 unsigned int count,
1424 mach_vm_offset_t * user_stack,
1425 int * customstack,
1426 boolean_t is_64bit_data
1427 )
1428 {
1429 register_t sp;
1430
1431 switch (flavor) {
1432 case ARM_THREAD_STATE:
1433 if (count == ARM_UNIFIED_THREAD_STATE_COUNT) {
1434 #if __arm64__
1435 if (is_64bit_data) {
1436 sp = ((arm_unified_thread_state_t *)tstate)->ts_64.sp;
1437 } else
1438 #endif
1439 {
1440 sp = ((arm_unified_thread_state_t *)tstate)->ts_32.sp;
1441 }
1442
1443 break;
1444 }
1445
1446 /* INTENTIONAL FALL THROUGH (see machine_thread_set_state) */
1447 case ARM_THREAD_STATE32:
1448 if (count != ARM_THREAD_STATE32_COUNT) {
1449 return KERN_INVALID_ARGUMENT;
1450 }
1451 if (is_64bit_data) {
1452 return KERN_INVALID_ARGUMENT;
1453 }
1454
1455 sp = ((arm_thread_state32_t *)tstate)->sp;
1456 break;
1457 #if __arm64__
1458 case ARM_THREAD_STATE64:
1459 if (count != ARM_THREAD_STATE64_COUNT) {
1460 return KERN_INVALID_ARGUMENT;
1461 }
1462 if (!is_64bit_data) {
1463 return KERN_INVALID_ARGUMENT;
1464 }
1465
1466 sp = ((arm_thread_state32_t *)tstate)->sp;
1467 break;
1468 #endif
1469 default:
1470 return KERN_INVALID_ARGUMENT;
1471 }
1472
1473 if (sp) {
1474 *user_stack = CAST_USER_ADDR_T(sp);
1475 if (customstack) {
1476 *customstack = 1;
1477 }
1478 } else {
1479 *user_stack = CAST_USER_ADDR_T(USRSTACK64);
1480 if (customstack) {
1481 *customstack = 0;
1482 }
1483 }
1484
1485 return KERN_SUCCESS;
1486 }
1487
1488 /*
1489 * thread_userstackdefault:
1490 *
1491 * Return the default stack location for the
1492 * thread, if otherwise unknown.
1493 */
1494 kern_return_t
1495 thread_userstackdefault(mach_vm_offset_t * default_user_stack,
1496 boolean_t is64bit)
1497 {
1498 if (is64bit) {
1499 *default_user_stack = USRSTACK64;
1500 } else {
1501 *default_user_stack = USRSTACK;
1502 }
1503
1504 return KERN_SUCCESS;
1505 }
1506
1507 /*
1508 * Routine: thread_setuserstack
1509 *
1510 */
1511 void
1512 thread_setuserstack(thread_t thread,
1513 mach_vm_address_t user_stack)
1514 {
1515 struct arm_saved_state *sv;
1516
1517 sv = get_user_regs(thread);
1518
1519 set_saved_state_sp(sv, user_stack);
1520
1521 return;
1522 }
1523
1524 /*
1525 * Routine: thread_adjuserstack
1526 *
1527 */
1528 uint64_t
1529 thread_adjuserstack(thread_t thread,
1530 int adjust)
1531 {
1532 struct arm_saved_state *sv;
1533 uint64_t sp;
1534
1535 sv = get_user_regs(thread);
1536
1537 sp = get_saved_state_sp(sv);
1538 sp += adjust;
1539 set_saved_state_sp(sv, sp);;
1540
1541 return sp;
1542 }
1543
1544 /*
1545 * Routine: thread_setentrypoint
1546 *
1547 */
1548 void
1549 thread_setentrypoint(thread_t thread,
1550 mach_vm_offset_t entry)
1551 {
1552 struct arm_saved_state *sv;
1553
1554 sv = get_user_regs(thread);
1555
1556 set_saved_state_pc(sv, entry);
1557
1558 return;
1559 }
1560
1561 /*
1562 * Routine: thread_entrypoint
1563 *
1564 */
1565 kern_return_t
1566 thread_entrypoint(__unused thread_t thread,
1567 int flavor,
1568 thread_state_t tstate,
1569 unsigned int count __unused,
1570 mach_vm_offset_t * entry_point
1571 )
1572 {
1573 switch (flavor) {
1574 case ARM_THREAD_STATE:
1575 {
1576 struct arm_thread_state *state;
1577
1578 state = (struct arm_thread_state *) tstate;
1579
1580 /*
1581 * If a valid entry point is specified, use it.
1582 */
1583 if (state->pc) {
1584 *entry_point = CAST_USER_ADDR_T(state->pc);
1585 } else {
1586 *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
1587 }
1588 }
1589 break;
1590
1591 case ARM_THREAD_STATE64:
1592 {
1593 struct arm_thread_state64 *state;
1594
1595 state = (struct arm_thread_state64*) tstate;
1596
1597 /*
1598 * If a valid entry point is specified, use it.
1599 */
1600 if (state->pc) {
1601 *entry_point = CAST_USER_ADDR_T(state->pc);
1602 } else {
1603 *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
1604 }
1605
1606 break;
1607 }
1608 default:
1609 return KERN_INVALID_ARGUMENT;
1610 }
1611
1612 return KERN_SUCCESS;
1613 }
1614
1615
1616 /*
1617 * Routine: thread_set_child
1618 *
1619 */
1620 void
1621 thread_set_child(thread_t child,
1622 int pid)
1623 {
1624 struct arm_saved_state *child_state;
1625
1626 child_state = get_user_regs(child);
1627
1628 set_saved_state_reg(child_state, 0, pid);
1629 set_saved_state_reg(child_state, 1, 1ULL);
1630 }
1631
1632
1633 /*
1634 * Routine: thread_set_parent
1635 *
1636 */
1637 void
1638 thread_set_parent(thread_t parent,
1639 int pid)
1640 {
1641 struct arm_saved_state *parent_state;
1642
1643 parent_state = get_user_regs(parent);
1644
1645 set_saved_state_reg(parent_state, 0, pid);
1646 set_saved_state_reg(parent_state, 1, 0);
1647 }
1648
1649
1650 struct arm_act_context {
1651 struct arm_unified_thread_state ss;
1652 #if __ARM_VFP__
1653 struct arm_neon_saved_state ns;
1654 #endif
1655 };
1656
1657 /*
1658 * Routine: act_thread_csave
1659 *
1660 */
1661 void *
1662 act_thread_csave(void)
1663 {
1664 struct arm_act_context *ic;
1665 kern_return_t kret;
1666 unsigned int val;
1667 thread_t thread = current_thread();
1668
1669 ic = (struct arm_act_context *) kalloc(sizeof(struct arm_act_context));
1670 if (ic == (struct arm_act_context *) NULL) {
1671 return (void *) 0;
1672 }
1673
1674 val = ARM_UNIFIED_THREAD_STATE_COUNT;
1675 kret = machine_thread_get_state(thread, ARM_THREAD_STATE, (thread_state_t)&ic->ss, &val);
1676 if (kret != KERN_SUCCESS) {
1677 kfree(ic, sizeof(struct arm_act_context));
1678 return (void *) 0;
1679 }
1680
1681 #if __ARM_VFP__
1682 if (thread_is_64bit_data(thread)) {
1683 val = ARM_NEON_STATE64_COUNT;
1684 kret = machine_thread_get_state(thread,
1685 ARM_NEON_STATE64,
1686 (thread_state_t)&ic->ns,
1687 &val);
1688 } else {
1689 val = ARM_NEON_STATE_COUNT;
1690 kret = machine_thread_get_state(thread,
1691 ARM_NEON_STATE,
1692 (thread_state_t)&ic->ns,
1693 &val);
1694 }
1695 if (kret != KERN_SUCCESS) {
1696 kfree(ic, sizeof(struct arm_act_context));
1697 return (void *) 0;
1698 }
1699 #endif
1700 return ic;
1701 }
1702
1703 /*
1704 * Routine: act_thread_catt
1705 *
1706 */
1707 void
1708 act_thread_catt(void * ctx)
1709 {
1710 struct arm_act_context *ic;
1711 kern_return_t kret;
1712 thread_t thread = current_thread();
1713
1714 ic = (struct arm_act_context *) ctx;
1715 if (ic == (struct arm_act_context *) NULL) {
1716 return;
1717 }
1718
1719 kret = machine_thread_set_state(thread, ARM_THREAD_STATE, (thread_state_t)&ic->ss, ARM_UNIFIED_THREAD_STATE_COUNT);
1720 if (kret != KERN_SUCCESS) {
1721 goto out;
1722 }
1723
1724 #if __ARM_VFP__
1725 if (thread_is_64bit_data(thread)) {
1726 kret = machine_thread_set_state(thread,
1727 ARM_NEON_STATE64,
1728 (thread_state_t)&ic->ns,
1729 ARM_NEON_STATE64_COUNT);
1730 } else {
1731 kret = machine_thread_set_state(thread,
1732 ARM_NEON_STATE,
1733 (thread_state_t)&ic->ns,
1734 ARM_NEON_STATE_COUNT);
1735 }
1736 if (kret != KERN_SUCCESS) {
1737 goto out;
1738 }
1739 #endif
1740 out:
1741 kfree(ic, sizeof(struct arm_act_context));
1742 }
1743
1744 /*
1745 * Routine: act_thread_catt
1746 *
1747 */
1748 void
1749 act_thread_cfree(void *ctx)
1750 {
1751 kfree(ctx, sizeof(struct arm_act_context));
1752 }
1753
1754 kern_return_t
1755 thread_set_wq_state32(thread_t thread,
1756 thread_state_t tstate)
1757 {
1758 arm_thread_state_t *state;
1759 struct arm_saved_state *saved_state;
1760 struct arm_saved_state32 *saved_state_32;
1761 thread_t curth = current_thread();
1762 spl_t s = 0;
1763
1764 assert(!thread_is_64bit_data(thread));
1765
1766 saved_state = thread->machine.upcb;
1767 saved_state_32 = saved_state32(saved_state);
1768
1769 state = (arm_thread_state_t *)tstate;
1770
1771 if (curth != thread) {
1772 s = splsched();
1773 thread_lock(thread);
1774 }
1775
1776 /*
1777 * do not zero saved_state, it can be concurrently accessed
1778 * and zero is not a valid state for some of the registers,
1779 * like sp.
1780 */
1781 thread_state32_to_saved_state(state, saved_state);
1782 saved_state_32->cpsr = PSR64_USER32_DEFAULT;
1783
1784 if (curth != thread) {
1785 thread_unlock(thread);
1786 splx(s);
1787 }
1788
1789 return KERN_SUCCESS;
1790 }
1791
1792 kern_return_t
1793 thread_set_wq_state64(thread_t thread,
1794 thread_state_t tstate)
1795 {
1796 arm_thread_state64_t *state;
1797 struct arm_saved_state *saved_state;
1798 struct arm_saved_state64 *saved_state_64;
1799 thread_t curth = current_thread();
1800 spl_t s = 0;
1801
1802 assert(thread_is_64bit_data(thread));
1803
1804 saved_state = thread->machine.upcb;
1805 saved_state_64 = saved_state64(saved_state);
1806 state = (arm_thread_state64_t *)tstate;
1807
1808 if (curth != thread) {
1809 s = splsched();
1810 thread_lock(thread);
1811 }
1812
1813 /*
1814 * do not zero saved_state, it can be concurrently accessed
1815 * and zero is not a valid state for some of the registers,
1816 * like sp.
1817 */
1818 thread_state64_to_saved_state(state, saved_state);
1819 set_saved_state_cpsr(saved_state, PSR64_USER64_DEFAULT);
1820
1821 if (curth != thread) {
1822 thread_unlock(thread);
1823 splx(s);
1824 }
1825
1826 return KERN_SUCCESS;
1827 }