]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/model_dep.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / arm / model_dep.c
1 /*
2 * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <debug.h>
30 #include <mach_kdp.h>
31
32 #include <kern/thread.h>
33 #include <machine/pmap.h>
34 #include <device/device_types.h>
35
36 #include <mach/vm_param.h>
37 #include <mach/clock_types.h>
38 #include <mach/machine.h>
39 #include <mach/kmod.h>
40 #include <pexpert/boot.h>
41 #include <pexpert/pexpert.h>
42
43 #include <ptrauth.h>
44
45 #include <kern/misc_protos.h>
46 #include <kern/startup.h>
47 #include <kern/clock.h>
48 #include <kern/debug.h>
49 #include <kern/processor.h>
50 #include <kdp/kdp_core.h>
51 #if ALTERNATE_DEBUGGER
52 #include <arm64/alternate_debugger.h>
53 #endif
54 #include <machine/atomic.h>
55 #include <machine/trap.h>
56 #include <kern/spl.h>
57 #include <pexpert/pexpert.h>
58 #include <kdp/kdp_callout.h>
59 #include <kdp/kdp_dyld.h>
60 #include <kdp/kdp_internal.h>
61 #include <uuid/uuid.h>
62 #include <sys/codesign.h>
63 #include <sys/time.h>
64
65 #include <IOKit/IOPlatformExpert.h>
66
67 #include <mach/vm_prot.h>
68 #include <vm/vm_map.h>
69 #include <vm/pmap.h>
70 #include <vm/vm_shared_region.h>
71 #include <mach/time_value.h>
72 #include <machine/machparam.h> /* for btop */
73
74 #include <console/video_console.h>
75 #include <console/serial_protos.h>
76 #include <arm/cpu_data.h>
77 #include <arm/cpu_data_internal.h>
78 #include <arm/cpu_internal.h>
79 #include <arm/misc_protos.h>
80 #include <libkern/OSKextLibPrivate.h>
81 #include <vm/vm_kern.h>
82 #include <kern/kern_cdata.h>
83
84 #if MACH_KDP
85 void kdp_trap(unsigned int, struct arm_saved_state *);
86 #endif
87
88 extern kern_return_t do_stackshot(void *);
89 extern void kdp_snapshot_preflight(int pid, void * tracebuf,
90 uint32_t tracebuf_size, uint64_t flags,
91 kcdata_descriptor_t data_p,
92 uint64_t since_timestamp, uint32_t pagetable_mask);
93 extern int kdp_stack_snapshot_bytes_traced(void);
94 extern int kdp_stack_snapshot_bytes_uncompressed(void);
95
96 #if INTERRUPT_MASKED_DEBUG
97 extern boolean_t interrupt_masked_debug;
98 #endif
99
100 /*
101 * Increment the PANICLOG_VERSION if you change the format of the panic
102 * log in any way.
103 */
104 #define PANICLOG_VERSION 13
105 static struct kcdata_descriptor kc_panic_data;
106
107 extern char firmware_version[];
108 extern volatile uint32_t debug_enabled;
109 extern unsigned int not_in_kdp;
110
111 extern int copyinframe(vm_address_t fp, uint32_t * frame);
112 extern void kdp_callouts(kdp_event_t event);
113
114 /* #include <sys/proc.h> */
115 #define MAXCOMLEN 16
116 struct proc;
117 extern int proc_pid(struct proc *p);
118 extern void proc_name_kdp(task_t, char *, int);
119
120 /*
121 * Make sure there's enough space to include the relevant bits in the format required
122 * within the space allocated for the panic version string in the panic header.
123 * The format required by OSAnalytics/DumpPanic is 'Product Version (OS Version)'.
124 */
125 #define PANIC_HEADER_VERSION_FMT_STR "%.14s (%.14s)"
126
127 extern const char version[];
128 extern char osversion[];
129 extern char osproductversion[];
130 extern char osreleasetype[];
131
132 #if defined(XNU_TARGET_OS_BRIDGE)
133 extern char macosproductversion[];
134 extern char macosversion[];
135 #endif
136
137 extern uint8_t gPlatformECID[8];
138 extern uint32_t gPlatformMemoryID;
139
140 extern uint64_t last_hwaccess_thread;
141
142 /*Choosing the size for gTargetTypeBuffer as 16 and size for gModelTypeBuffer as 32
143 * since the target name and model name typically doesn't exceed this size */
144 extern char gTargetTypeBuffer[16];
145 extern char gModelTypeBuffer[32];
146
147 decl_simple_lock_data(extern, clock_lock);
148 extern struct timeval gIOLastSleepTime;
149 extern struct timeval gIOLastWakeTime;
150 extern boolean_t is_clock_configured;
151 extern boolean_t kernelcache_uuid_valid;
152 extern uuid_t kernelcache_uuid;
153
154 extern void stackshot_memcpy(void *dst, const void *src, size_t len);
155
156 /* Definitions for frame pointers */
157 #define FP_ALIGNMENT_MASK ((uint32_t)(0x3))
158 #define FP_LR_OFFSET ((uint32_t)4)
159 #define FP_LR_OFFSET64 ((uint32_t)8)
160 #define FP_MAX_NUM_TO_EVALUATE (50)
161
162 /* Timeout (in nanoseconds) for all processors responding to debug crosscall */
163 #define DEBUG_ACK_TIMEOUT ((uint64_t) 10000000)
164
165 /* Forward functions definitions */
166 void panic_display_times(void);
167 void panic_print_symbol_name(vm_address_t search);
168
169
170 /* Global variables */
171 static uint32_t panic_bt_depth;
172 boolean_t PanicInfoSaved = FALSE;
173 boolean_t force_immediate_debug_halt = FALSE;
174 unsigned int debug_ack_timeout_count = 0;
175 volatile unsigned int debugger_sync = 0;
176 volatile unsigned int mp_kdp_trap = 0; /* CPUs signalled by the debug CPU will spin on this */
177 volatile unsigned int debug_cpus_spinning = 0; /* Number of signalled CPUs still spinning on mp_kdp_trap (in DebuggerXCall). */
178 unsigned int DebugContextCount = 0;
179
180 #if defined(__arm64__)
181 uint8_t PE_smc_stashed_x86_system_state = 0xFF;
182 uint8_t PE_smc_stashed_x86_power_state = 0xFF;
183 uint8_t PE_smc_stashed_x86_efi_boot_state = 0xFF;
184 uint8_t PE_smc_stashed_x86_shutdown_cause = 0xFF;
185 uint64_t PE_smc_stashed_x86_prev_power_transitions = UINT64_MAX;
186 uint32_t PE_pcie_stashed_link_state = UINT32_MAX;
187 #endif
188
189
190 // Convenient macros to easily validate one or more pointers if
191 // they have defined types
192 #define VALIDATE_PTR(ptr) \
193 validate_ptr((vm_offset_t)(ptr), sizeof(*(ptr)), #ptr)
194
195 #define VALIDATE_PTR_2(ptr0, ptr1) \
196 VALIDATE_PTR(ptr0) && VALIDATE_PTR(ptr1)
197
198 #define VALIDATE_PTR_3(ptr0, ptr1, ptr2) \
199 VALIDATE_PTR_2(ptr0, ptr1) && VALIDATE_PTR(ptr2)
200
201 #define VALIDATE_PTR_4(ptr0, ptr1, ptr2, ptr3) \
202 VALIDATE_PTR_2(ptr0, ptr1) && VALIDATE_PTR_2(ptr2, ptr3)
203
204 #define GET_MACRO(_1, _2, _3, _4, NAME, ...) NAME
205
206 #define VALIDATE_PTR_LIST(...) GET_MACRO(__VA_ARGS__, VALIDATE_PTR_4, VALIDATE_PTR_3, VALIDATE_PTR_2, VALIDATE_PTR)(__VA_ARGS__)
207
208 /*
209 * Evaluate if a pointer is valid
210 * Print a message if pointer is invalid
211 */
212 static boolean_t
213 validate_ptr(
214 vm_offset_t ptr, vm_size_t size, const char * ptr_name)
215 {
216 if (ptr) {
217 if (ml_validate_nofault(ptr, size)) {
218 return TRUE;
219 } else {
220 paniclog_append_noflush("Invalid %s pointer: %p size: %d\n",
221 ptr_name, (void *)ptr, (int)size);
222 return FALSE;
223 }
224 } else {
225 paniclog_append_noflush("NULL %s pointer\n", ptr_name);
226 return FALSE;
227 }
228 }
229
230 /*
231 * Backtrace a single frame.
232 */
233 static void
234 print_one_backtrace(pmap_t pmap, vm_offset_t topfp, const char *cur_marker,
235 boolean_t is_64_bit, boolean_t print_kexts_in_backtrace)
236 {
237 int i = 0;
238 addr64_t lr;
239 addr64_t fp;
240 addr64_t fp_for_ppn;
241 ppnum_t ppn;
242 boolean_t dump_kernel_stack;
243 vm_offset_t raddrs[FP_MAX_NUM_TO_EVALUATE];
244
245 fp = topfp;
246 fp_for_ppn = 0;
247 ppn = (ppnum_t)NULL;
248
249 if (fp >= VM_MIN_KERNEL_ADDRESS) {
250 dump_kernel_stack = TRUE;
251 } else {
252 dump_kernel_stack = FALSE;
253 }
254
255 do {
256 if ((fp == 0) || ((fp & FP_ALIGNMENT_MASK) != 0)) {
257 break;
258 }
259 if (dump_kernel_stack && ((fp < VM_MIN_KERNEL_ADDRESS) || (fp > VM_MAX_KERNEL_ADDRESS))) {
260 break;
261 }
262 if ((!dump_kernel_stack) && (fp >= VM_MIN_KERNEL_ADDRESS)) {
263 break;
264 }
265
266 /*
267 * Check to see if current address will result in a different
268 * ppn than previously computed (to avoid recomputation) via
269 * (addr) ^ fp_for_ppn) >> PAGE_SHIFT)
270 */
271 if ((((fp + FP_LR_OFFSET) ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) {
272 ppn = pmap_find_phys(pmap, fp + FP_LR_OFFSET);
273 fp_for_ppn = fp + (is_64_bit ? FP_LR_OFFSET64 : FP_LR_OFFSET);
274 }
275 if (ppn != (ppnum_t)NULL) {
276 if (is_64_bit) {
277 lr = ml_phys_read_double_64(((((vm_offset_t)ppn) << PAGE_SHIFT)) | ((fp + FP_LR_OFFSET64) & PAGE_MASK));
278 #if defined(HAS_APPLE_PAC)
279 /* return addresses on stack will be signed by arm64e ABI */
280 lr = (addr64_t) ptrauth_strip((void *)lr, ptrauth_key_return_address);
281 #endif
282 } else {
283 lr = ml_phys_read_word(((((vm_offset_t)ppn) << PAGE_SHIFT)) | ((fp + FP_LR_OFFSET) & PAGE_MASK));
284 }
285 } else {
286 if (is_64_bit) {
287 paniclog_append_noflush("%s\t Could not read LR from frame at 0x%016llx\n", cur_marker, fp + FP_LR_OFFSET64);
288 } else {
289 paniclog_append_noflush("%s\t Could not read LR from frame at 0x%08x\n", cur_marker, (uint32_t)(fp + FP_LR_OFFSET));
290 }
291 break;
292 }
293 if (((fp ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) {
294 ppn = pmap_find_phys(pmap, fp);
295 fp_for_ppn = fp;
296 }
297 if (ppn != (ppnum_t)NULL) {
298 if (is_64_bit) {
299 fp = ml_phys_read_double_64(((((vm_offset_t)ppn) << PAGE_SHIFT)) | (fp & PAGE_MASK));
300 } else {
301 fp = ml_phys_read_word(((((vm_offset_t)ppn) << PAGE_SHIFT)) | (fp & PAGE_MASK));
302 }
303 } else {
304 if (is_64_bit) {
305 paniclog_append_noflush("%s\t Could not read FP from frame at 0x%016llx\n", cur_marker, fp);
306 } else {
307 paniclog_append_noflush("%s\t Could not read FP from frame at 0x%08x\n", cur_marker, (uint32_t)fp);
308 }
309 break;
310 }
311
312 if (lr) {
313 if (is_64_bit) {
314 paniclog_append_noflush("%s\t lr: 0x%016llx fp: 0x%016llx\n", cur_marker, lr, fp);
315 } else {
316 paniclog_append_noflush("%s\t lr: 0x%08x fp: 0x%08x\n", cur_marker, (uint32_t)lr, (uint32_t)fp);
317 }
318 raddrs[i] = lr;
319 }
320 } while ((++i < FP_MAX_NUM_TO_EVALUATE) && (fp != topfp));
321
322 if (print_kexts_in_backtrace && i != 0) {
323 kmod_panic_dump(&raddrs[0], i);
324 }
325 }
326
327 #define SANE_TASK_LIMIT 256
328 #define TOP_RUNNABLE_LIMIT 5
329 #define PANICLOG_UUID_BUF_SIZE 256
330
331 extern void panic_print_vnodes(void);
332
333 static void
334 panic_display_hung_cpus_help(void)
335 {
336 #if defined(__arm64__)
337 const uint32_t pcsr_offset = 0x90;
338
339 /*
340 * Print some info that might help in cases where nothing
341 * else does
342 */
343 const ml_topology_info_t *info = ml_get_topology_info();
344 if (info) {
345 unsigned i, retry;
346
347 for (i = 0; i < info->num_cpus; i++) {
348 if (info->cpus[i].cpu_UTTDBG_regs) {
349 volatile uint64_t *pcsr = (volatile uint64_t*)(info->cpus[i].cpu_UTTDBG_regs + pcsr_offset);
350 volatile uint32_t *pcsrTrigger = (volatile uint32_t*)pcsr;
351 uint64_t pc = 0;
352
353 // a number of retries are needed till this works
354 for (retry = 1024; retry && !pc; retry--) {
355 //a 32-bit read is required to make a PC sample be produced, else we'll only get a zero
356 (void)*pcsrTrigger;
357 pc = *pcsr;
358 }
359
360 //postprocessing (same as astris does)
361 if (pc >> 48) {
362 pc |= 0xffff000000000000ull;
363 }
364 paniclog_append_noflush("CORE %u recently retired instr at 0x%016llx\n", i, pc);
365 }
366 }
367 }
368 #endif //defined(__arm64__)
369 }
370
371 static void
372 do_print_all_backtraces(const char *message, uint64_t panic_options)
373 {
374 int logversion = PANICLOG_VERSION;
375 thread_t cur_thread = current_thread();
376 uintptr_t cur_fp;
377 task_t task;
378 int print_vnodes = 0;
379 const char *nohilite_thread_marker = "\t";
380
381 /* end_marker_bytes set to 200 for printing END marker + stackshot summary info always */
382 int bytes_traced = 0, bytes_remaining = 0, end_marker_bytes = 200;
383 int bytes_uncompressed = 0;
384 uint64_t bytes_used = 0ULL;
385 int err = 0;
386 char *stackshot_begin_loc = NULL;
387 kc_format_t kc_format;
388 bool filesetKC = false;
389
390 #if defined(__arm__)
391 __asm__ volatile ("mov %0, r7":"=r"(cur_fp));
392 #elif defined(__arm64__)
393 __asm__ volatile ("add %0, xzr, fp":"=r"(cur_fp));
394 #else
395 #error Unknown architecture.
396 #endif
397 if (panic_bt_depth != 0) {
398 return;
399 }
400 panic_bt_depth++;
401
402 __unused bool result = PE_get_primary_kc_format(&kc_format);
403 assert(result == true);
404 filesetKC = kc_format == KCFormatFileset;
405
406 /* Truncate panic string to 1200 bytes */
407 paniclog_append_noflush("Debugger message: %.1200s\n", message);
408 if (debug_enabled) {
409 paniclog_append_noflush("Device: %s\n",
410 ('\0' != gTargetTypeBuffer[0]) ? gTargetTypeBuffer : "Not set yet");
411 paniclog_append_noflush("Hardware Model: %s\n",
412 ('\0' != gModelTypeBuffer[0]) ? gModelTypeBuffer:"Not set yet");
413 paniclog_append_noflush("ECID: %02X%02X%02X%02X%02X%02X%02X%02X\n", gPlatformECID[7],
414 gPlatformECID[6], gPlatformECID[5], gPlatformECID[4], gPlatformECID[3],
415 gPlatformECID[2], gPlatformECID[1], gPlatformECID[0]);
416 if (last_hwaccess_thread) {
417 paniclog_append_noflush("AppleHWAccess Thread: 0x%llx\n", last_hwaccess_thread);
418 }
419 paniclog_append_noflush("Boot args: %s\n", PE_boot_args());
420 }
421 paniclog_append_noflush("Memory ID: 0x%x\n", gPlatformMemoryID);
422 paniclog_append_noflush("OS release type: %.256s\n",
423 ('\0' != osreleasetype[0]) ? osreleasetype : "Not set yet");
424 paniclog_append_noflush("OS version: %.256s\n",
425 ('\0' != osversion[0]) ? osversion : "Not set yet");
426 #if defined(XNU_TARGET_OS_BRIDGE)
427 paniclog_append_noflush("macOS version: %.256s\n",
428 ('\0' != macosversion[0]) ? macosversion : "Not set");
429 #endif
430 paniclog_append_noflush("Kernel version: %.512s\n", version);
431
432 if (kernelcache_uuid_valid) {
433 if (filesetKC) {
434 paniclog_append_noflush("Fileset Kernelcache UUID: ");
435 } else {
436 paniclog_append_noflush("KernelCache UUID: ");
437 }
438 for (size_t index = 0; index < sizeof(uuid_t); index++) {
439 paniclog_append_noflush("%02X", kernelcache_uuid[index]);
440 }
441 paniclog_append_noflush("\n");
442 }
443 panic_display_kernel_uuid();
444
445 paniclog_append_noflush("iBoot version: %.128s\n", firmware_version);
446 paniclog_append_noflush("secure boot?: %s\n", debug_enabled ? "NO": "YES");
447 #if defined(XNU_TARGET_OS_BRIDGE)
448 paniclog_append_noflush("x86 EFI Boot State: ");
449 if (PE_smc_stashed_x86_efi_boot_state != 0xFF) {
450 paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_efi_boot_state);
451 } else {
452 paniclog_append_noflush("not available\n");
453 }
454 paniclog_append_noflush("x86 System State: ");
455 if (PE_smc_stashed_x86_system_state != 0xFF) {
456 paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_system_state);
457 } else {
458 paniclog_append_noflush("not available\n");
459 }
460 paniclog_append_noflush("x86 Power State: ");
461 if (PE_smc_stashed_x86_power_state != 0xFF) {
462 paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_power_state);
463 } else {
464 paniclog_append_noflush("not available\n");
465 }
466 paniclog_append_noflush("x86 Shutdown Cause: ");
467 if (PE_smc_stashed_x86_shutdown_cause != 0xFF) {
468 paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_shutdown_cause);
469 } else {
470 paniclog_append_noflush("not available\n");
471 }
472 paniclog_append_noflush("x86 Previous Power Transitions: ");
473 if (PE_smc_stashed_x86_prev_power_transitions != UINT64_MAX) {
474 paniclog_append_noflush("0x%llx\n", PE_smc_stashed_x86_prev_power_transitions);
475 } else {
476 paniclog_append_noflush("not available\n");
477 }
478 paniclog_append_noflush("PCIeUp link state: ");
479 if (PE_pcie_stashed_link_state != UINT32_MAX) {
480 paniclog_append_noflush("0x%x\n", PE_pcie_stashed_link_state);
481 } else {
482 paniclog_append_noflush("not available\n");
483 }
484 #endif
485 if (panic_data_buffers != NULL) {
486 paniclog_append_noflush("%s data: ", panic_data_buffers->producer_name);
487 uint8_t *panic_buffer_data = (uint8_t *) panic_data_buffers->buf;
488 for (int i = 0; i < panic_data_buffers->len; i++) {
489 paniclog_append_noflush("%02X", panic_buffer_data[i]);
490 }
491 paniclog_append_noflush("\n");
492 }
493 paniclog_append_noflush("Paniclog version: %d\n", logversion);
494
495 panic_display_kernel_aslr();
496 panic_display_times();
497 panic_display_zprint();
498 panic_display_hung_cpus_help();
499 #if CONFIG_ZLEAKS
500 panic_display_ztrace();
501 #endif /* CONFIG_ZLEAKS */
502 #if CONFIG_ECC_LOGGING
503 panic_display_ecc_errors();
504 #endif /* CONFIG_ECC_LOGGING */
505
506 #if DEVELOPMENT || DEBUG
507 if (cs_debug_unsigned_exec_failures != 0 || cs_debug_unsigned_mmap_failures != 0) {
508 paniclog_append_noflush("Unsigned code exec failures: %u\n", cs_debug_unsigned_exec_failures);
509 paniclog_append_noflush("Unsigned code mmap failures: %u\n", cs_debug_unsigned_mmap_failures);
510 }
511 #endif
512
513 // Highlight threads that used high amounts of CPU in the panic log if requested (historically requested for watchdog panics)
514 if (panic_options & DEBUGGER_OPTION_PRINT_CPU_USAGE_PANICLOG) {
515 thread_t top_runnable[5] = {0};
516 thread_t thread;
517 int total_cpu_usage = 0;
518
519 print_vnodes = 1;
520
521
522 for (thread = (thread_t)queue_first(&threads);
523 VALIDATE_PTR(thread) && !queue_end(&threads, (queue_entry_t)thread);
524 thread = (thread_t)queue_next(&thread->threads)) {
525 total_cpu_usage += thread->cpu_usage;
526
527 // Look for the 5 runnable threads with highest priority
528 if (thread->state & TH_RUN) {
529 int k;
530 thread_t comparison_thread = thread;
531
532 for (k = 0; k < TOP_RUNNABLE_LIMIT; k++) {
533 if (top_runnable[k] == 0) {
534 top_runnable[k] = comparison_thread;
535 break;
536 } else if (comparison_thread->sched_pri > top_runnable[k]->sched_pri) {
537 thread_t temp = top_runnable[k];
538 top_runnable[k] = comparison_thread;
539 comparison_thread = temp;
540 } // if comparison thread has higher priority than previously saved thread
541 } // loop through highest priority runnable threads
542 } // Check if thread is runnable
543 } // Loop through all threads
544
545 // Print the relevant info for each thread identified
546 paniclog_append_noflush("Total cpu_usage: %d\n", total_cpu_usage);
547 paniclog_append_noflush("Thread task pri cpu_usage\n");
548
549 for (int i = 0; i < TOP_RUNNABLE_LIMIT; i++) {
550 if (top_runnable[i] && VALIDATE_PTR(top_runnable[i]->task) &&
551 validate_ptr((vm_offset_t)top_runnable[i]->task->bsd_info, 1, "bsd_info")) {
552 char name[MAXCOMLEN + 1];
553 proc_name_kdp(top_runnable[i]->task, name, sizeof(name));
554 paniclog_append_noflush("%p %s %d %d\n",
555 top_runnable[i], name, top_runnable[i]->sched_pri, top_runnable[i]->cpu_usage);
556 }
557 } // Loop through highest priority runnable threads
558 paniclog_append_noflush("\n");
559 }
560
561 // print current task info
562 if (VALIDATE_PTR_LIST(cur_thread, cur_thread->task)) {
563 task = cur_thread->task;
564
565 if (VALIDATE_PTR_LIST(task->map, task->map->pmap)) {
566 paniclog_append_noflush("Panicked task %p: %d pages, %d threads: ",
567 task, task->map->pmap->stats.resident_count, task->thread_count);
568 } else {
569 paniclog_append_noflush("Panicked task %p: %d threads: ",
570 task, task->thread_count);
571 }
572
573 if (validate_ptr((vm_offset_t)task->bsd_info, 1, "bsd_info")) {
574 char name[MAXCOMLEN + 1];
575 int pid = proc_pid(task->bsd_info);
576 proc_name_kdp(task, name, sizeof(name));
577 paniclog_append_noflush("pid %d: %s", pid, name);
578 } else {
579 paniclog_append_noflush("unknown task");
580 }
581
582 paniclog_append_noflush("\n");
583 }
584
585 if (cur_fp < VM_MAX_KERNEL_ADDRESS) {
586 paniclog_append_noflush("Panicked thread: %p, backtrace: 0x%llx, tid: %llu\n",
587 cur_thread, (addr64_t)cur_fp, thread_tid(cur_thread));
588 #if __LP64__
589 print_one_backtrace(kernel_pmap, cur_fp, nohilite_thread_marker, TRUE, filesetKC);
590 #else
591 print_one_backtrace(kernel_pmap, cur_fp, nohilite_thread_marker, FALSE, filesetKC);
592 #endif
593 } else {
594 paniclog_append_noflush("Could not print panicked thread backtrace:"
595 "frame pointer outside kernel vm.\n");
596 }
597
598 paniclog_append_noflush("\n");
599 if (filesetKC) {
600 kext_dump_panic_lists(&paniclog_append_noflush);
601 paniclog_append_noflush("\n");
602 }
603 panic_info->eph_panic_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->eph_panic_log_offset;
604 /* set the os version data in the panic header in the format 'Product Version (OS Version)' (only if they have been set) */
605 if ((osversion[0] != '\0') && (osproductversion[0] != '\0')) {
606 snprintf((char *)&panic_info->eph_os_version, sizeof(panic_info->eph_os_version), PANIC_HEADER_VERSION_FMT_STR,
607 osproductversion, osversion);
608 }
609 #if defined(XNU_TARGET_OS_BRIDGE)
610 if ((macosversion[0] != '\0') && (macosproductversion[0] != '\0')) {
611 snprintf((char *)&panic_info->eph_macos_version, sizeof(panic_info->eph_macos_version), PANIC_HEADER_VERSION_FMT_STR,
612 macosproductversion, macosversion);
613 }
614 #endif
615
616 if (debug_ack_timeout_count) {
617 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_DEBUGGERSYNC;
618 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
619 paniclog_append_noflush("!! debugger synchronization failed, no stackshot !!\n");
620 } else if (stackshot_active()) {
621 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_NESTED;
622 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
623 paniclog_append_noflush("!! panicked during stackshot, skipping panic stackshot !!\n");
624 } else {
625 /* Align the stackshot buffer to an 8-byte address (especially important for armv7k devices) */
626 debug_buf_ptr += (8 - ((uintptr_t)debug_buf_ptr % 8));
627 stackshot_begin_loc = debug_buf_ptr;
628
629 bytes_remaining = debug_buf_size - (unsigned int)((uintptr_t)stackshot_begin_loc - (uintptr_t)debug_buf_base);
630 err = kcdata_memory_static_init(&kc_panic_data, (mach_vm_address_t)debug_buf_ptr,
631 KCDATA_BUFFER_BEGIN_COMPRESSED, bytes_remaining - end_marker_bytes,
632 KCFLAG_USE_MEMCOPY);
633 if (err == KERN_SUCCESS) {
634 uint64_t stackshot_flags = (STACKSHOT_GET_GLOBAL_MEM_STATS | STACKSHOT_SAVE_LOADINFO | STACKSHOT_KCDATA_FORMAT |
635 STACKSHOT_ENABLE_BT_FAULTING | STACKSHOT_ENABLE_UUID_FAULTING | STACKSHOT_FROM_PANIC | STACKSHOT_DO_COMPRESS |
636 STACKSHOT_DISABLE_LATENCY_INFO | STACKSHOT_NO_IO_STATS | STACKSHOT_THREAD_WAITINFO | STACKSHOT_GET_DQ |
637 STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT);
638
639 err = kcdata_init_compress(&kc_panic_data, KCDATA_BUFFER_BEGIN_STACKSHOT, stackshot_memcpy, KCDCT_ZLIB);
640 if (err != KERN_SUCCESS) {
641 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COMPRESS_FAILED;
642 stackshot_flags &= ~STACKSHOT_DO_COMPRESS;
643 }
644 if (filesetKC) {
645 stackshot_flags |= STACKSHOT_SAVE_KEXT_LOADINFO;
646 }
647
648 kdp_snapshot_preflight(-1, stackshot_begin_loc, bytes_remaining - end_marker_bytes,
649 stackshot_flags, &kc_panic_data, 0, 0);
650 err = do_stackshot(NULL);
651 bytes_traced = kdp_stack_snapshot_bytes_traced();
652 if (bytes_traced > 0 && !err) {
653 debug_buf_ptr += bytes_traced;
654 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_SUCCEEDED;
655 panic_info->eph_stackshot_offset = PE_get_offset_into_panic_region(stackshot_begin_loc);
656 panic_info->eph_stackshot_len = bytes_traced;
657
658 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
659 if (stackshot_flags & STACKSHOT_DO_COMPRESS) {
660 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_DATA_COMPRESSED;
661 bytes_uncompressed = kdp_stack_snapshot_bytes_uncompressed();
662 paniclog_append_noflush("\n** Stackshot Succeeded ** Bytes Traced %d (Uncompressed %d) **\n", bytes_traced, bytes_uncompressed);
663 } else {
664 paniclog_append_noflush("\n** Stackshot Succeeded ** Bytes Traced %d **\n", bytes_traced);
665 }
666 } else {
667 bytes_used = kcdata_memory_get_used_bytes(&kc_panic_data);
668 if (bytes_used > 0) {
669 /* Zero out the stackshot data */
670 bzero(stackshot_begin_loc, bytes_used);
671 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_INCOMPLETE;
672
673 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
674 paniclog_append_noflush("\n** Stackshot Incomplete ** Bytes Filled %llu **\n", bytes_used);
675 } else {
676 bzero(stackshot_begin_loc, bytes_used);
677 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR;
678
679 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
680 paniclog_append_noflush("\n!! Stackshot Failed !! Bytes Traced %d, err %d\n", bytes_traced, err);
681 }
682 }
683 } else {
684 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR;
685 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
686 paniclog_append_noflush("\n!! Stackshot Failed !!\nkcdata_memory_static_init returned %d", err);
687 }
688 }
689
690 assert(panic_info->eph_other_log_offset != 0);
691
692 if (print_vnodes != 0) {
693 panic_print_vnodes();
694 }
695
696 panic_bt_depth--;
697 }
698
699 /*
700 * Entry to print_all_backtraces is serialized by the debugger lock
701 */
702 static void
703 print_all_backtraces(const char *message, uint64_t panic_options)
704 {
705 unsigned int initial_not_in_kdp = not_in_kdp;
706
707 cpu_data_t * cpu_data_ptr = getCpuDatap();
708
709 assert(cpu_data_ptr->PAB_active == FALSE);
710 cpu_data_ptr->PAB_active = TRUE;
711
712 /*
713 * Because print all backtraces uses the pmap routines, it needs to
714 * avoid taking pmap locks. Right now, this is conditionalized on
715 * not_in_kdp.
716 */
717 not_in_kdp = 0;
718 do_print_all_backtraces(message, panic_options);
719
720 not_in_kdp = initial_not_in_kdp;
721
722 cpu_data_ptr->PAB_active = FALSE;
723 }
724
725 void
726 panic_display_times()
727 {
728 if (kdp_clock_is_locked()) {
729 paniclog_append_noflush("Warning: clock is locked. Can't get time\n");
730 return;
731 }
732
733 if ((is_clock_configured) && (simple_lock_try(&clock_lock, LCK_GRP_NULL))) {
734 clock_sec_t secs, boot_secs;
735 clock_usec_t usecs, boot_usecs;
736
737 simple_unlock(&clock_lock);
738
739 clock_get_calendar_microtime(&secs, &usecs);
740 clock_get_boottime_microtime(&boot_secs, &boot_usecs);
741
742 paniclog_append_noflush("mach_absolute_time: 0x%llx\n", mach_absolute_time());
743 paniclog_append_noflush("Epoch Time: sec usec\n");
744 paniclog_append_noflush(" Boot : 0x%08x 0x%08x\n", (unsigned int)boot_secs, (unsigned int)boot_usecs);
745 paniclog_append_noflush(" Sleep : 0x%08x 0x%08x\n", (unsigned int)gIOLastSleepTime.tv_sec, (unsigned int)gIOLastSleepTime.tv_usec);
746 paniclog_append_noflush(" Wake : 0x%08x 0x%08x\n", (unsigned int)gIOLastWakeTime.tv_sec, (unsigned int)gIOLastWakeTime.tv_usec);
747 paniclog_append_noflush(" Calendar: 0x%08x 0x%08x\n\n", (unsigned int)secs, (unsigned int)usecs);
748 }
749 }
750
751 void
752 panic_print_symbol_name(vm_address_t search)
753 {
754 #pragma unused(search)
755 // empty stub. Really only used on x86_64.
756 return;
757 }
758
759 void
760 SavePanicInfo(
761 const char *message, __unused void *panic_data, uint64_t panic_options)
762 {
763 /*
764 * This should be initialized by the time we get here, but
765 * if it is not, asserting about it will be of no use (it will
766 * come right back to here), so just loop right here and now.
767 * This prevents early-boot panics from becoming recursive and
768 * thus makes them easier to debug. If you attached to a device
769 * and see your PC here, look down a few frames to see your
770 * early-boot panic there.
771 */
772 while (!panic_info || panic_info->eph_panic_log_offset == 0) {
773 ;
774 }
775
776 if (panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
777 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_BUTTON_RESET_PANIC;
778 }
779
780 if (panic_options & DEBUGGER_OPTION_COPROC_INITIATED_PANIC) {
781 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COPROC_INITIATED_PANIC;
782 }
783
784 #if defined(XNU_TARGET_OS_BRIDGE)
785 panic_info->eph_x86_power_state = PE_smc_stashed_x86_power_state;
786 panic_info->eph_x86_efi_boot_state = PE_smc_stashed_x86_efi_boot_state;
787 panic_info->eph_x86_system_state = PE_smc_stashed_x86_system_state;
788 #endif
789
790 /*
791 * On newer targets, panic data is stored directly into the iBoot panic region.
792 * If we re-enter SavePanicInfo (e.g. on a double panic) on such a target, update the
793 * panic CRC so that iBoot can hopefully find *something* useful in the panic region.
794 */
795 if (PanicInfoSaved && (debug_buf_base >= (char*)gPanicBase) && (debug_buf_base < (char*)gPanicBase + gPanicSize)) {
796 unsigned int pi_size = (unsigned int)(debug_buf_ptr - gPanicBase);
797 PE_save_buffer_to_vram((unsigned char*)gPanicBase, &pi_size);
798 PE_sync_panic_buffers(); // extra precaution; panic path likely isn't reliable if we're here
799 }
800
801 if (PanicInfoSaved || (debug_buf_size == 0)) {
802 return;
803 }
804
805 PanicInfoSaved = TRUE;
806
807 print_all_backtraces(message, panic_options);
808
809 assert(panic_info->eph_panic_log_len != 0);
810 panic_info->eph_other_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->eph_other_log_offset;
811
812 PEHaltRestart(kPEPanicSync);
813
814 /*
815 * Notifies registered IOPlatformPanicAction callbacks
816 * (which includes one to disable the memcache) and flushes
817 * the buffer contents from the cache
818 */
819 paniclog_flush();
820 }
821
822 void
823 paniclog_flush()
824 {
825 unsigned int panicbuf_length = 0;
826
827 panicbuf_length = (unsigned int)(debug_buf_ptr - gPanicBase);
828 if (!panicbuf_length) {
829 return;
830 }
831
832 /*
833 * Updates the log length of the last part of the panic log.
834 */
835 panic_info->eph_other_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->eph_other_log_offset;
836
837 /*
838 * Updates the metadata at the beginning of the panic buffer,
839 * updates the CRC.
840 */
841 PE_save_buffer_to_vram((unsigned char *)gPanicBase, &panicbuf_length);
842
843 /*
844 * This is currently unused by platform KEXTs on embedded but is
845 * kept for compatibility with the published IOKit interfaces.
846 */
847 PESavePanicInfo((unsigned char *)gPanicBase, panicbuf_length);
848
849 PE_sync_panic_buffers();
850 }
851
852 /*
853 * @function _was_in_userspace
854 *
855 * @abstract Unused function used to indicate that a CPU was in userspace
856 * before it was IPI'd to enter the Debugger context.
857 *
858 * @discussion This function should never actually be called.
859 */
860 static void __attribute__((__noreturn__))
861 _was_in_userspace(void)
862 {
863 panic("%s: should not have been invoked.", __FUNCTION__);
864 }
865
866 /*
867 * @function DebuggerXCallEnter
868 *
869 * @abstract IPI other cores so this core can run in a single-threaded context.
870 *
871 * @discussion This function should be called with the debugger lock held. It
872 * signals the other cores to go into a busy loop so this core can run in a
873 * single-threaded context and inspect kernel memory.
874 *
875 * @param proceed_on_sync_failure If true, then go ahead and try to debug even
876 * if we can't synch with the other cores. This is inherently unsafe and should
877 * only be used if the kernel is going down in flames anyway.
878 *
879 * @result returns KERN_OPERATION_TIMED_OUT if synchronization times out and
880 * proceed_on_sync_failure is false.
881 */
882 kern_return_t
883 DebuggerXCallEnter(
884 boolean_t proceed_on_sync_failure)
885 {
886 uint64_t max_mabs_time, current_mabs_time;
887 int cpu;
888 int max_cpu;
889 cpu_data_t *target_cpu_datap;
890 cpu_data_t *cpu_data_ptr = getCpuDatap();
891
892 /* Check for nested debugger entry. */
893 cpu_data_ptr->debugger_active++;
894 if (cpu_data_ptr->debugger_active != 1) {
895 return KERN_SUCCESS;
896 }
897
898 /*
899 * If debugger_sync is not 0, someone responded excessively late to the last
900 * debug request (we zero the sync variable in the return function). Zero it
901 * again here. This should prevent us from getting out of sync (heh) and
902 * timing out on every entry to the debugger if we timeout once.
903 */
904
905 debugger_sync = 0;
906 mp_kdp_trap = 1;
907 debug_cpus_spinning = 0;
908
909 /*
910 * We need a barrier here to ensure CPUs see mp_kdp_trap and spin when responding
911 * to the signal.
912 */
913 __builtin_arm_dmb(DMB_ISH);
914
915 /*
916 * Try to signal all CPUs (except ourselves, of course). Use debugger_sync to
917 * synchronize with every CPU that we appeared to signal successfully (cpu_signal
918 * is not synchronous).
919 */
920 bool cpu_signal_failed = false;
921 max_cpu = ml_get_max_cpu_number();
922
923 boolean_t immediate_halt = FALSE;
924 if (proceed_on_sync_failure && force_immediate_debug_halt) {
925 immediate_halt = TRUE;
926 }
927
928 if (!immediate_halt) {
929 for (cpu = 0; cpu <= max_cpu; cpu++) {
930 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
931
932 if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr)) {
933 continue;
934 }
935
936 if (KERN_SUCCESS == cpu_signal(target_cpu_datap, SIGPdebug, (void *)NULL, NULL)) {
937 os_atomic_inc(&debugger_sync, relaxed);
938 os_atomic_inc(&debug_cpus_spinning, relaxed);
939 } else {
940 cpu_signal_failed = true;
941 kprintf("cpu_signal failed in DebuggerXCallEnter\n");
942 }
943 }
944
945 nanoseconds_to_absolutetime(DEBUG_ACK_TIMEOUT, &max_mabs_time);
946 current_mabs_time = mach_absolute_time();
947 max_mabs_time += current_mabs_time;
948 assert(max_mabs_time > current_mabs_time);
949
950 /*
951 * Wait for DEBUG_ACK_TIMEOUT ns for a response from everyone we IPI'd. If we
952 * timeout, that is simply too bad; we don't have a true NMI, and one CPU may be
953 * uninterruptibly spinning on someone else. The best we can hope for is that
954 * all other CPUs have either responded or are spinning in a context that is
955 * debugger safe.
956 */
957 while ((debugger_sync != 0) && (current_mabs_time < max_mabs_time)) {
958 current_mabs_time = mach_absolute_time();
959 }
960 }
961
962 if (cpu_signal_failed && !proceed_on_sync_failure) {
963 DebuggerXCallReturn();
964 return KERN_FAILURE;
965 } else if (immediate_halt || (current_mabs_time >= max_mabs_time)) {
966 /*
967 * For the moment, we're aiming for a timeout that the user shouldn't notice,
968 * but will be sufficient to let the other core respond.
969 */
970 __builtin_arm_dmb(DMB_ISH);
971 for (cpu = 0; cpu <= max_cpu; cpu++) {
972 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
973
974 if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr)) {
975 continue;
976 }
977 if (!(target_cpu_datap->cpu_signal & SIGPdebug) && !immediate_halt) {
978 continue;
979 }
980 if (proceed_on_sync_failure) {
981 paniclog_append_noflush("Attempting to forcibly halt cpu %d\n", cpu);
982 dbgwrap_status_t halt_status = ml_dbgwrap_halt_cpu(cpu, 0);
983 if (halt_status < 0) {
984 paniclog_append_noflush("cpu %d failed to halt with error %d: %s\n", cpu, halt_status, ml_dbgwrap_strerror(halt_status));
985 } else {
986 if (halt_status > 0) {
987 paniclog_append_noflush("cpu %d halted with warning %d: %s\n", cpu, halt_status, ml_dbgwrap_strerror(halt_status));
988 }
989 target_cpu_datap->halt_status = CPU_HALTED;
990 }
991 } else {
992 kprintf("Debugger synch pending on cpu %d\n", cpu);
993 }
994 }
995 if (proceed_on_sync_failure) {
996 for (cpu = 0; cpu <= max_cpu; cpu++) {
997 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
998
999 if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr) ||
1000 (target_cpu_datap->halt_status == CPU_NOT_HALTED)) {
1001 continue;
1002 }
1003 dbgwrap_status_t halt_status = ml_dbgwrap_halt_cpu_with_state(cpu,
1004 NSEC_PER_SEC, &target_cpu_datap->halt_state);
1005 if ((halt_status < 0) || (halt_status == DBGWRAP_WARN_CPU_OFFLINE)) {
1006 paniclog_append_noflush("Unable to obtain state for cpu %d with status %d: %s\n", cpu, halt_status, ml_dbgwrap_strerror(halt_status));
1007 } else {
1008 paniclog_append_noflush("cpu %d successfully halted\n", cpu);
1009 target_cpu_datap->halt_status = CPU_HALTED_WITH_STATE;
1010 }
1011 }
1012 if (immediate_halt) {
1013 paniclog_append_noflush("Immediate halt requested on all cores\n");
1014 } else {
1015 paniclog_append_noflush("Debugger synchronization timed out; waited %llu nanoseconds\n", DEBUG_ACK_TIMEOUT);
1016 }
1017 debug_ack_timeout_count++;
1018 return KERN_SUCCESS;
1019 } else {
1020 DebuggerXCallReturn();
1021 return KERN_OPERATION_TIMED_OUT;
1022 }
1023 } else {
1024 return KERN_SUCCESS;
1025 }
1026 }
1027
1028 /*
1029 * @function DebuggerXCallReturn
1030 *
1031 * @abstract Resume normal multicore operation after DebuggerXCallEnter()
1032 *
1033 * @discussion This function should be called with debugger lock held.
1034 */
1035 void
1036 DebuggerXCallReturn(
1037 void)
1038 {
1039 cpu_data_t *cpu_data_ptr = getCpuDatap();
1040 uint64_t max_mabs_time, current_mabs_time;
1041
1042 cpu_data_ptr->debugger_active--;
1043 if (cpu_data_ptr->debugger_active != 0) {
1044 return;
1045 }
1046
1047 mp_kdp_trap = 0;
1048 debugger_sync = 0;
1049
1050 nanoseconds_to_absolutetime(DEBUG_ACK_TIMEOUT, &max_mabs_time);
1051 current_mabs_time = mach_absolute_time();
1052 max_mabs_time += current_mabs_time;
1053 assert(max_mabs_time > current_mabs_time);
1054
1055 /*
1056 * Wait for other CPUs to stop spinning on mp_kdp_trap (see DebuggerXCall).
1057 * It's possible for one or more CPUs to not decrement debug_cpus_spinning,
1058 * since they may be stuck somewhere else with interrupts disabled.
1059 * Wait for DEBUG_ACK_TIMEOUT ns for a response and move on if we don't get it.
1060 *
1061 * Note that the same is done in DebuggerXCallEnter, when we wait for other
1062 * CPUS to update debugger_sync. If we time out, let's hope for all CPUs to be
1063 * spinning in a debugger-safe context
1064 */
1065 while ((debug_cpus_spinning != 0) && (current_mabs_time < max_mabs_time)) {
1066 current_mabs_time = mach_absolute_time();
1067 }
1068
1069 /* Do we need a barrier here? */
1070 __builtin_arm_dmb(DMB_ISH);
1071 }
1072
1073 void
1074 DebuggerXCall(
1075 void *ctx)
1076 {
1077 boolean_t save_context = FALSE;
1078 vm_offset_t kstackptr = 0;
1079 arm_saved_state_t *regs = (arm_saved_state_t *) ctx;
1080
1081 if (regs != NULL) {
1082 #if defined(__arm64__)
1083 save_context = PSR64_IS_KERNEL(get_saved_state_cpsr(regs));
1084 #else
1085 save_context = PSR_IS_KERNEL(regs->cpsr);
1086 #endif
1087 }
1088
1089 kstackptr = current_thread()->machine.kstackptr;
1090
1091 #if defined(__arm64__)
1092 arm_kernel_saved_state_t *state = (arm_kernel_saved_state_t *)kstackptr;
1093
1094 if (save_context) {
1095 /* Save the interrupted context before acknowledging the signal */
1096 current_thread()->machine.kpcb = regs;
1097 } else if (regs) {
1098 /* zero old state so machine_trace_thread knows not to backtrace it */
1099 register_t pc = (register_t)ptrauth_strip((void *)&_was_in_userspace, ptrauth_key_function_pointer);
1100 state->fp = 0;
1101 state->pc = pc;
1102 state->lr = 0;
1103 state->sp = 0;
1104 }
1105 #else
1106 arm_saved_state_t *state = (arm_saved_state_t *)kstackptr;
1107
1108 if (save_context) {
1109 /* Save the interrupted context before acknowledging the signal */
1110 copy_signed_thread_state(state, regs);
1111 } else if (regs) {
1112 /* zero old state so machine_trace_thread knows not to backtrace it */
1113 register_t pc = (register_t)ptrauth_strip((void *)&_was_in_userspace, ptrauth_key_function_pointer);
1114 set_saved_state_fp(state, 0);
1115 set_saved_state_pc(state, pc);
1116 set_saved_state_lr(state, 0);
1117 set_saved_state_sp(state, 0);
1118 }
1119 #endif
1120
1121 /*
1122 * When running in serial mode, the core capturing the dump may hold interrupts disabled
1123 * for a time longer than the timeout. That path includes logic to reset the timestamp
1124 * so that we do not eventually trigger the interrupt timeout assert().
1125 *
1126 * Here we check whether other cores have already gone over the timeout at this point
1127 * before spinning, so we at least cover the IPI reception path. After spinning, however,
1128 * we reset the timestamp so as to avoid hitting the interrupt timeout assert().
1129 */
1130 if ((serialmode & SERIALMODE_OUTPUT) || stackshot_active()) {
1131 INTERRUPT_MASKED_DEBUG_END();
1132 }
1133
1134 os_atomic_dec(&debugger_sync, relaxed);
1135 __builtin_arm_dmb(DMB_ISH);
1136 while (mp_kdp_trap) {
1137 ;
1138 }
1139
1140 /**
1141 * Alert the triggering CPU that this CPU is done spinning. The CPU that
1142 * signalled all of the other CPUs will wait (in DebuggerXCallReturn) for
1143 * all of the CPUs to exit the above loop before continuing.
1144 */
1145 os_atomic_dec(&debug_cpus_spinning, relaxed);
1146
1147 if ((serialmode & SERIALMODE_OUTPUT) || stackshot_active()) {
1148 INTERRUPT_MASKED_DEBUG_START(current_thread()->machine.int_handler_addr, current_thread()->machine.int_type);
1149 }
1150
1151 #if defined(__arm64__)
1152 current_thread()->machine.kpcb = NULL;
1153 #endif /* defined(__arm64__) */
1154
1155 /* Any cleanup for our pushed context should go here */
1156 }
1157
1158 void
1159 DebuggerCall(
1160 unsigned int reason,
1161 void *ctx)
1162 {
1163 #if !MACH_KDP
1164 #pragma unused(reason,ctx)
1165 #endif /* !MACH_KDP */
1166
1167 #if ALTERNATE_DEBUGGER
1168 alternate_debugger_enter();
1169 #endif
1170
1171 #if MACH_KDP
1172 kdp_trap(reason, (struct arm_saved_state *)ctx);
1173 #else
1174 /* TODO: decide what to do if no debugger config */
1175 #endif
1176 }
1177
1178 boolean_t
1179 bootloader_valid_page(ppnum_t ppn)
1180 {
1181 return pmap_bootloader_page(ppn);
1182 }