]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/model_dep.c
xnu-6153.81.5.tar.gz
[apple/xnu.git] / osfmk / arm / model_dep.c
1 /*
2 * Copyright (c) 2007-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <debug.h>
30 #include <mach_kdp.h>
31
32 #include <kern/thread.h>
33 #include <machine/pmap.h>
34 #include <device/device_types.h>
35
36 #include <mach/vm_param.h>
37 #include <mach/clock_types.h>
38 #include <mach/machine.h>
39 #include <mach/kmod.h>
40 #include <pexpert/boot.h>
41 #include <pexpert/pexpert.h>
42
43 #if defined(HAS_APPLE_PAC)
44 #include <ptrauth.h>
45 #endif
46
47 #include <kern/misc_protos.h>
48 #include <kern/startup.h>
49 #include <kern/clock.h>
50 #include <kern/debug.h>
51 #include <kern/processor.h>
52 #include <kdp/kdp_core.h>
53 #if ALTERNATE_DEBUGGER
54 #include <arm64/alternate_debugger.h>
55 #endif
56 #include <machine/atomic.h>
57 #include <machine/trap.h>
58 #include <kern/spl.h>
59 #include <pexpert/pexpert.h>
60 #include <kdp/kdp_callout.h>
61 #include <kdp/kdp_dyld.h>
62 #include <kdp/kdp_internal.h>
63 #include <uuid/uuid.h>
64 #include <sys/codesign.h>
65 #include <sys/time.h>
66
67 #include <IOKit/IOPlatformExpert.h>
68
69 #include <mach/vm_prot.h>
70 #include <vm/vm_map.h>
71 #include <vm/pmap.h>
72 #include <vm/vm_shared_region.h>
73 #include <mach/time_value.h>
74 #include <machine/machparam.h> /* for btop */
75
76 #include <console/video_console.h>
77 #include <arm/cpu_data.h>
78 #include <arm/cpu_data_internal.h>
79 #include <arm/cpu_internal.h>
80 #include <arm/misc_protos.h>
81 #include <libkern/OSKextLibPrivate.h>
82 #include <vm/vm_kern.h>
83 #include <kern/kern_cdata.h>
84
85 #if MACH_KDP
86 void kdp_trap(unsigned int, struct arm_saved_state *);
87 #endif
88
89 extern kern_return_t do_stackshot(void *);
90 extern void kdp_snapshot_preflight(int pid, void *tracebuf,
91 uint32_t tracebuf_size, uint32_t flags,
92 kcdata_descriptor_t data_p,
93 boolean_t enable_faulting);
94 extern int kdp_stack_snapshot_bytes_traced(void);
95
96 /*
97 * Increment the PANICLOG_VERSION if you change the format of the panic
98 * log in any way.
99 */
100 #define PANICLOG_VERSION 13
101 static struct kcdata_descriptor kc_panic_data;
102
103 extern char firmware_version[];
104 extern volatile uint32_t debug_enabled;
105 extern unsigned int not_in_kdp;
106
107 extern int copyinframe(vm_address_t fp, uint32_t * frame);
108 extern void kdp_callouts(kdp_event_t event);
109
110 /* #include <sys/proc.h> */
111 #define MAXCOMLEN 16
112 extern int proc_pid(void *p);
113 extern void proc_name_kdp(task_t, char *, int);
114
115 /*
116 * Make sure there's enough space to include the relevant bits in the format required
117 * within the space allocated for the panic version string in the panic header.
118 * The format required by OSAnalytics/DumpPanic is 'Product Version (OS Version)'
119 */
120 #define PANIC_HEADER_VERSION_FMT_STR "%.14s (%.14s)"
121
122 extern const char version[];
123 extern char osversion[];
124 extern char osproductversion[];
125
126 #if defined(XNU_TARGET_OS_BRIDGE)
127 extern char macosproductversion[];
128 extern char macosversion[];
129 #endif
130
131 extern uint8_t gPlatformECID[8];
132 extern uint32_t gPlatformMemoryID;
133
134 extern uint64_t last_hwaccess_thread;
135
136 /*Choosing the size for gTargetTypeBuffer as 8 and size for gModelTypeBuffer as 32
137 * since the target name and model name typically doesn't exceed this size */
138 extern char gTargetTypeBuffer[8];
139 extern char gModelTypeBuffer[32];
140
141 decl_simple_lock_data(extern, clock_lock);
142 extern struct timeval gIOLastSleepTime;
143 extern struct timeval gIOLastWakeTime;
144 extern boolean_t is_clock_configured;
145 extern boolean_t kernelcache_uuid_valid;
146 extern uuid_t kernelcache_uuid;
147
148 /* Definitions for frame pointers */
149 #define FP_ALIGNMENT_MASK ((uint32_t)(0x3))
150 #define FP_LR_OFFSET ((uint32_t)4)
151 #define FP_LR_OFFSET64 ((uint32_t)8)
152 #define FP_MAX_NUM_TO_EVALUATE (50)
153
154 /* Timeout (in nanoseconds) for all processors responding to debug crosscall */
155 #define DEBUG_ACK_TIMEOUT ((uint64_t) 10000000)
156
157 /* Forward functions definitions */
158 void panic_display_times(void);
159 void panic_print_symbol_name(vm_address_t search);
160
161
162 /* Global variables */
163 static uint32_t panic_bt_depth;
164 boolean_t PanicInfoSaved = FALSE;
165 boolean_t force_immediate_debug_halt = FALSE;
166 unsigned int debug_ack_timeout_count = 0;
167 volatile unsigned int debugger_sync = 0;
168 volatile unsigned int mp_kdp_trap = 0; /* CPUs signalled by the debug CPU will spin on this */
169 unsigned int DebugContextCount = 0;
170
171 #if defined(__arm64__)
172 uint8_t PE_smc_stashed_x86_system_state = 0xFF;
173 uint8_t PE_smc_stashed_x86_power_state = 0xFF;
174 uint8_t PE_smc_stashed_x86_efi_boot_state = 0xFF;
175 uint8_t PE_smc_stashed_x86_shutdown_cause = 0xFF;
176 uint64_t PE_smc_stashed_x86_prev_power_transitions = UINT64_MAX;
177 uint32_t PE_pcie_stashed_link_state = UINT32_MAX;
178 #endif
179
180
181 // Convenient macros to easily validate one or more pointers if
182 // they have defined types
183 #define VALIDATE_PTR(ptr) \
184 validate_ptr((vm_offset_t)(ptr), sizeof(*(ptr)), #ptr)
185
186 #define VALIDATE_PTR_2(ptr0, ptr1) \
187 VALIDATE_PTR(ptr0) && VALIDATE_PTR(ptr1)
188
189 #define VALIDATE_PTR_3(ptr0, ptr1, ptr2) \
190 VALIDATE_PTR_2(ptr0, ptr1) && VALIDATE_PTR(ptr2)
191
192 #define VALIDATE_PTR_4(ptr0, ptr1, ptr2, ptr3) \
193 VALIDATE_PTR_2(ptr0, ptr1) && VALIDATE_PTR_2(ptr2, ptr3)
194
195 #define GET_MACRO(_1, _2, _3, _4, NAME, ...) NAME
196
197 #define VALIDATE_PTR_LIST(...) GET_MACRO(__VA_ARGS__, VALIDATE_PTR_4, VALIDATE_PTR_3, VALIDATE_PTR_2, VALIDATE_PTR)(__VA_ARGS__)
198
199 /*
200 * Evaluate if a pointer is valid
201 * Print a message if pointer is invalid
202 */
203 static boolean_t
204 validate_ptr(
205 vm_offset_t ptr, vm_size_t size, const char * ptr_name)
206 {
207 if (ptr) {
208 if (ml_validate_nofault(ptr, size)) {
209 return TRUE;
210 } else {
211 paniclog_append_noflush("Invalid %s pointer: %p size: %d\n",
212 ptr_name, (void *)ptr, (int)size);
213 return FALSE;
214 }
215 } else {
216 paniclog_append_noflush("NULL %s pointer\n", ptr_name);
217 return FALSE;
218 }
219 }
220
221 /*
222 * Backtrace a single frame.
223 */
224 static void
225 print_one_backtrace(pmap_t pmap, vm_offset_t topfp, const char *cur_marker,
226 boolean_t is_64_bit)
227 {
228 int i = 0;
229 addr64_t lr;
230 addr64_t fp;
231 addr64_t fp_for_ppn;
232 ppnum_t ppn;
233 boolean_t dump_kernel_stack;
234
235 fp = topfp;
236 fp_for_ppn = 0;
237 ppn = (ppnum_t)NULL;
238
239 if (fp >= VM_MIN_KERNEL_ADDRESS) {
240 dump_kernel_stack = TRUE;
241 } else {
242 dump_kernel_stack = FALSE;
243 }
244
245 do {
246 if ((fp == 0) || ((fp & FP_ALIGNMENT_MASK) != 0)) {
247 break;
248 }
249 if (dump_kernel_stack && ((fp < VM_MIN_KERNEL_ADDRESS) || (fp > VM_MAX_KERNEL_ADDRESS))) {
250 break;
251 }
252 if ((!dump_kernel_stack) && (fp >= VM_MIN_KERNEL_ADDRESS)) {
253 break;
254 }
255
256 /*
257 * Check to see if current address will result in a different
258 * ppn than previously computed (to avoid recomputation) via
259 * (addr) ^ fp_for_ppn) >> PAGE_SHIFT)
260 */
261 if ((((fp + FP_LR_OFFSET) ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) {
262 ppn = pmap_find_phys(pmap, fp + FP_LR_OFFSET);
263 fp_for_ppn = fp + (is_64_bit ? FP_LR_OFFSET64 : FP_LR_OFFSET);
264 }
265 if (ppn != (ppnum_t)NULL) {
266 if (is_64_bit) {
267 lr = ml_phys_read_double_64(((((vm_offset_t)ppn) << PAGE_SHIFT)) | ((fp + FP_LR_OFFSET64) & PAGE_MASK));
268 #if defined(HAS_APPLE_PAC)
269 /* return addresses on stack will be signed by arm64e ABI */
270 lr = (addr64_t) ptrauth_strip((void *)lr, ptrauth_key_return_address);
271 #endif
272 } else {
273 lr = ml_phys_read_word(((((vm_offset_t)ppn) << PAGE_SHIFT)) | ((fp + FP_LR_OFFSET) & PAGE_MASK));
274 }
275 } else {
276 if (is_64_bit) {
277 paniclog_append_noflush("%s\t Could not read LR from frame at 0x%016llx\n", cur_marker, fp + FP_LR_OFFSET64);
278 } else {
279 paniclog_append_noflush("%s\t Could not read LR from frame at 0x%08x\n", cur_marker, (uint32_t)(fp + FP_LR_OFFSET));
280 }
281 break;
282 }
283 if (((fp ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) {
284 ppn = pmap_find_phys(pmap, fp);
285 fp_for_ppn = fp;
286 }
287 if (ppn != (ppnum_t)NULL) {
288 if (is_64_bit) {
289 fp = ml_phys_read_double_64(((((vm_offset_t)ppn) << PAGE_SHIFT)) | (fp & PAGE_MASK));
290 } else {
291 fp = ml_phys_read_word(((((vm_offset_t)ppn) << PAGE_SHIFT)) | (fp & PAGE_MASK));
292 }
293 } else {
294 if (is_64_bit) {
295 paniclog_append_noflush("%s\t Could not read FP from frame at 0x%016llx\n", cur_marker, fp);
296 } else {
297 paniclog_append_noflush("%s\t Could not read FP from frame at 0x%08x\n", cur_marker, (uint32_t)fp);
298 }
299 break;
300 }
301
302 if (lr) {
303 if (is_64_bit) {
304 paniclog_append_noflush("%s\t lr: 0x%016llx fp: 0x%016llx\n", cur_marker, lr, fp);
305 } else {
306 paniclog_append_noflush("%s\t lr: 0x%08x fp: 0x%08x\n", cur_marker, (uint32_t)lr, (uint32_t)fp);
307 }
308 }
309 } while ((++i < FP_MAX_NUM_TO_EVALUATE) && (fp != topfp));
310 }
311
312 #define SANE_TASK_LIMIT 256
313 #define TOP_RUNNABLE_LIMIT 5
314 #define PANICLOG_UUID_BUF_SIZE 256
315
316 extern void panic_print_vnodes(void);
317
318 static void
319 do_print_all_backtraces(const char *message, uint64_t panic_options)
320 {
321 int logversion = PANICLOG_VERSION;
322 thread_t cur_thread = current_thread();
323 uintptr_t cur_fp;
324 task_t task;
325 int print_vnodes = 0;
326 const char *nohilite_thread_marker = "\t";
327
328 /* end_marker_bytes set to 200 for printing END marker + stackshot summary info always */
329 int bytes_traced = 0, bytes_remaining = 0, end_marker_bytes = 200;
330 uint64_t bytes_used = 0ULL;
331 int err = 0;
332 char *stackshot_begin_loc = NULL;
333
334 #if defined(__arm__)
335 __asm__ volatile ("mov %0, r7":"=r"(cur_fp));
336 #elif defined(__arm64__)
337 __asm__ volatile ("add %0, xzr, fp":"=r"(cur_fp));
338 #else
339 #error Unknown architecture.
340 #endif
341 if (panic_bt_depth != 0) {
342 return;
343 }
344 panic_bt_depth++;
345
346 /* Truncate panic string to 1200 bytes */
347 paniclog_append_noflush("Debugger message: %.1200s\n", message);
348 if (debug_enabled) {
349 paniclog_append_noflush("Device: %s\n",
350 ('\0' != gTargetTypeBuffer[0]) ? gTargetTypeBuffer : "Not set yet");
351 paniclog_append_noflush("Hardware Model: %s\n",
352 ('\0' != gModelTypeBuffer[0]) ? gModelTypeBuffer:"Not set yet");
353 paniclog_append_noflush("ECID: %02X%02X%02X%02X%02X%02X%02X%02X\n", gPlatformECID[7],
354 gPlatformECID[6], gPlatformECID[5], gPlatformECID[4], gPlatformECID[3],
355 gPlatformECID[2], gPlatformECID[1], gPlatformECID[0]);
356 if (last_hwaccess_thread) {
357 paniclog_append_noflush("AppleHWAccess Thread: 0x%llx\n", last_hwaccess_thread);
358 }
359 paniclog_append_noflush("Boot args: %s\n", PE_boot_args());
360 }
361 paniclog_append_noflush("Memory ID: 0x%x\n", gPlatformMemoryID);
362 paniclog_append_noflush("OS version: %.256s\n",
363 ('\0' != osversion[0]) ? osversion : "Not set yet");
364 #if defined(XNU_TARGET_OS_BRIDGE)
365 paniclog_append_noflush("macOS version: %.256s\n",
366 ('\0' != macosversion[0]) ? macosversion : "Not set");
367 #endif
368 paniclog_append_noflush("Kernel version: %.512s\n", version);
369
370 if (kernelcache_uuid_valid) {
371 paniclog_append_noflush("KernelCache UUID: ");
372 for (size_t index = 0; index < sizeof(uuid_t); index++) {
373 paniclog_append_noflush("%02X", kernelcache_uuid[index]);
374 }
375 paniclog_append_noflush("\n");
376 }
377 panic_display_kernel_uuid();
378
379 paniclog_append_noflush("iBoot version: %.128s\n", firmware_version);
380 paniclog_append_noflush("secure boot?: %s\n", debug_enabled ? "NO": "YES");
381 #if defined(XNU_TARGET_OS_BRIDGE)
382 paniclog_append_noflush("x86 EFI Boot State: ");
383 if (PE_smc_stashed_x86_efi_boot_state != 0xFF) {
384 paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_efi_boot_state);
385 } else {
386 paniclog_append_noflush("not available\n");
387 }
388 paniclog_append_noflush("x86 System State: ");
389 if (PE_smc_stashed_x86_system_state != 0xFF) {
390 paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_system_state);
391 } else {
392 paniclog_append_noflush("not available\n");
393 }
394 paniclog_append_noflush("x86 Power State: ");
395 if (PE_smc_stashed_x86_power_state != 0xFF) {
396 paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_power_state);
397 } else {
398 paniclog_append_noflush("not available\n");
399 }
400 paniclog_append_noflush("x86 Shutdown Cause: ");
401 if (PE_smc_stashed_x86_shutdown_cause != 0xFF) {
402 paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_shutdown_cause);
403 } else {
404 paniclog_append_noflush("not available\n");
405 }
406 paniclog_append_noflush("x86 Previous Power Transitions: ");
407 if (PE_smc_stashed_x86_prev_power_transitions != UINT64_MAX) {
408 paniclog_append_noflush("0x%llx\n", PE_smc_stashed_x86_prev_power_transitions);
409 } else {
410 paniclog_append_noflush("not available\n");
411 }
412 paniclog_append_noflush("PCIeUp link state: ");
413 if (PE_pcie_stashed_link_state != UINT32_MAX) {
414 paniclog_append_noflush("0x%x\n", PE_pcie_stashed_link_state);
415 } else {
416 paniclog_append_noflush("not available\n");
417 }
418 #endif
419 if (panic_data_buffers != NULL) {
420 paniclog_append_noflush("%s data: ", panic_data_buffers->producer_name);
421 uint8_t *panic_buffer_data = (uint8_t *) panic_data_buffers->buf;
422 for (int i = 0; i < panic_data_buffers->len; i++) {
423 paniclog_append_noflush("%02X", panic_buffer_data[i]);
424 }
425 paniclog_append_noflush("\n");
426 }
427 paniclog_append_noflush("Paniclog version: %d\n", logversion);
428
429 panic_display_kernel_aslr();
430 panic_display_times();
431 panic_display_zprint();
432 #if CONFIG_ZLEAKS
433 panic_display_ztrace();
434 #endif /* CONFIG_ZLEAKS */
435 #if CONFIG_ECC_LOGGING
436 panic_display_ecc_errors();
437 #endif /* CONFIG_ECC_LOGGING */
438
439 #if DEVELOPMENT || DEBUG
440 if (cs_debug_unsigned_exec_failures != 0 || cs_debug_unsigned_mmap_failures != 0) {
441 paniclog_append_noflush("Unsigned code exec failures: %u\n", cs_debug_unsigned_exec_failures);
442 paniclog_append_noflush("Unsigned code mmap failures: %u\n", cs_debug_unsigned_mmap_failures);
443 }
444 #endif
445
446 // Highlight threads that used high amounts of CPU in the panic log if requested (historically requested for watchdog panics)
447 if (panic_options & DEBUGGER_OPTION_PRINT_CPU_USAGE_PANICLOG) {
448 thread_t top_runnable[5] = {0};
449 thread_t thread;
450 int total_cpu_usage = 0;
451
452 print_vnodes = 1;
453
454
455 for (thread = (thread_t)queue_first(&threads);
456 VALIDATE_PTR(thread) && !queue_end(&threads, (queue_entry_t)thread);
457 thread = (thread_t)queue_next(&thread->threads)) {
458 total_cpu_usage += thread->cpu_usage;
459
460 // Look for the 5 runnable threads with highest priority
461 if (thread->state & TH_RUN) {
462 int k;
463 thread_t comparison_thread = thread;
464
465 for (k = 0; k < TOP_RUNNABLE_LIMIT; k++) {
466 if (top_runnable[k] == 0) {
467 top_runnable[k] = comparison_thread;
468 break;
469 } else if (comparison_thread->sched_pri > top_runnable[k]->sched_pri) {
470 thread_t temp = top_runnable[k];
471 top_runnable[k] = comparison_thread;
472 comparison_thread = temp;
473 } // if comparison thread has higher priority than previously saved thread
474 } // loop through highest priority runnable threads
475 } // Check if thread is runnable
476 } // Loop through all threads
477
478 // Print the relevant info for each thread identified
479 paniclog_append_noflush("Total cpu_usage: %d\n", total_cpu_usage);
480 paniclog_append_noflush("Thread task pri cpu_usage\n");
481
482 for (int i = 0; i < TOP_RUNNABLE_LIMIT; i++) {
483 if (top_runnable[i] && VALIDATE_PTR(top_runnable[i]->task) &&
484 validate_ptr((vm_offset_t)top_runnable[i]->task->bsd_info, 1, "bsd_info")) {
485 char name[MAXCOMLEN + 1];
486 proc_name_kdp(top_runnable[i]->task, name, sizeof(name));
487 paniclog_append_noflush("%p %s %d %d\n",
488 top_runnable[i], name, top_runnable[i]->sched_pri, top_runnable[i]->cpu_usage);
489 }
490 } // Loop through highest priority runnable threads
491 paniclog_append_noflush("\n");
492 }
493
494 // print current task info
495 if (VALIDATE_PTR_LIST(cur_thread, cur_thread->task)) {
496 task = cur_thread->task;
497
498 if (VALIDATE_PTR_LIST(task->map, task->map->pmap)) {
499 paniclog_append_noflush("Panicked task %p: %d pages, %d threads: ",
500 task, task->map->pmap->stats.resident_count, task->thread_count);
501 } else {
502 paniclog_append_noflush("Panicked task %p: %d threads: ",
503 task, task->thread_count);
504 }
505
506 if (validate_ptr((vm_offset_t)task->bsd_info, 1, "bsd_info")) {
507 char name[MAXCOMLEN + 1];
508 int pid = proc_pid(task->bsd_info);
509 proc_name_kdp(task, name, sizeof(name));
510 paniclog_append_noflush("pid %d: %s", pid, name);
511 } else {
512 paniclog_append_noflush("unknown task");
513 }
514
515 paniclog_append_noflush("\n");
516 }
517
518 if (cur_fp < VM_MAX_KERNEL_ADDRESS) {
519 paniclog_append_noflush("Panicked thread: %p, backtrace: 0x%llx, tid: %llu\n",
520 cur_thread, (addr64_t)cur_fp, thread_tid(cur_thread));
521 #if __LP64__
522 print_one_backtrace(kernel_pmap, cur_fp, nohilite_thread_marker, TRUE);
523 #else
524 print_one_backtrace(kernel_pmap, cur_fp, nohilite_thread_marker, FALSE);
525 #endif
526 } else {
527 paniclog_append_noflush("Could not print panicked thread backtrace:"
528 "frame pointer outside kernel vm.\n");
529 }
530
531 paniclog_append_noflush("\n");
532 panic_info->eph_panic_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->eph_panic_log_offset;
533 /* set the os version data in the panic header in the format 'Product Version (OS Version)' (only if they have been set) */
534 if ((osversion[0] != '\0') && (osproductversion[0] != '\0')) {
535 snprintf((char *)&panic_info->eph_os_version, sizeof(panic_info->eph_os_version), PANIC_HEADER_VERSION_FMT_STR,
536 osproductversion, osversion);
537 }
538 #if defined(XNU_TARGET_OS_BRIDGE)
539 if ((macosversion[0] != '\0') && (macosproductversion[0] != '\0')) {
540 snprintf((char *)&panic_info->eph_macos_version, sizeof(panic_info->eph_macos_version), PANIC_HEADER_VERSION_FMT_STR,
541 macosproductversion, macosversion);
542 }
543 #endif
544
545 if (debug_ack_timeout_count) {
546 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_DEBUGGERSYNC;
547 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
548 paniclog_append_noflush("!! debugger synchronization failed, no stackshot !!\n");
549 } else if (stackshot_active()) {
550 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_NESTED;
551 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
552 paniclog_append_noflush("!! panicked during stackshot, skipping panic stackshot !!\n");
553 } else {
554 /* Align the stackshot buffer to an 8-byte address (especially important for armv7k devices) */
555 debug_buf_ptr += (8 - ((uintptr_t)debug_buf_ptr % 8));
556 stackshot_begin_loc = debug_buf_ptr;
557
558 bytes_remaining = debug_buf_size - (unsigned int)((uintptr_t)stackshot_begin_loc - (uintptr_t)debug_buf_base);
559 err = kcdata_memory_static_init(&kc_panic_data, (mach_vm_address_t)debug_buf_ptr,
560 KCDATA_BUFFER_BEGIN_STACKSHOT, bytes_remaining - end_marker_bytes,
561 KCFLAG_USE_MEMCOPY);
562 if (err == KERN_SUCCESS) {
563 kdp_snapshot_preflight(-1, stackshot_begin_loc, bytes_remaining - end_marker_bytes,
564 (STACKSHOT_GET_GLOBAL_MEM_STATS | STACKSHOT_SAVE_LOADINFO | STACKSHOT_KCDATA_FORMAT |
565 STACKSHOT_ENABLE_BT_FAULTING | STACKSHOT_ENABLE_UUID_FAULTING | STACKSHOT_FROM_PANIC |
566 STACKSHOT_NO_IO_STATS | STACKSHOT_THREAD_WAITINFO | STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT), &kc_panic_data, 0);
567 err = do_stackshot(NULL);
568 bytes_traced = kdp_stack_snapshot_bytes_traced();
569 if (bytes_traced > 0 && !err) {
570 debug_buf_ptr += bytes_traced;
571 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_SUCCEEDED;
572 panic_info->eph_stackshot_offset = PE_get_offset_into_panic_region(stackshot_begin_loc);
573 panic_info->eph_stackshot_len = bytes_traced;
574
575 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
576 paniclog_append_noflush("\n** Stackshot Succeeded ** Bytes Traced %d **\n", bytes_traced);
577 } else {
578 bytes_used = kcdata_memory_get_used_bytes(&kc_panic_data);
579 if (bytes_used > 0) {
580 /* Zero out the stackshot data */
581 bzero(stackshot_begin_loc, bytes_used);
582 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_INCOMPLETE;
583
584 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
585 paniclog_append_noflush("\n** Stackshot Incomplete ** Bytes Filled %llu **\n", bytes_used);
586 } else {
587 bzero(stackshot_begin_loc, bytes_used);
588 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR;
589
590 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
591 paniclog_append_noflush("\n!! Stackshot Failed !! Bytes Traced %d, err %d\n", bytes_traced, err);
592 }
593 }
594 } else {
595 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR;
596 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
597 paniclog_append_noflush("\n!! Stackshot Failed !!\nkcdata_memory_static_init returned %d", err);
598 }
599 }
600
601 assert(panic_info->eph_other_log_offset != 0);
602
603 if (print_vnodes != 0) {
604 panic_print_vnodes();
605 }
606
607 panic_bt_depth--;
608 }
609
610 /*
611 * Entry to print_all_backtraces is serialized by the debugger lock
612 */
613 static void
614 print_all_backtraces(const char *message, uint64_t panic_options)
615 {
616 unsigned int initial_not_in_kdp = not_in_kdp;
617
618 cpu_data_t * cpu_data_ptr = getCpuDatap();
619
620 assert(cpu_data_ptr->PAB_active == FALSE);
621 cpu_data_ptr->PAB_active = TRUE;
622
623 /*
624 * Because print all backtraces uses the pmap routines, it needs to
625 * avoid taking pmap locks. Right now, this is conditionalized on
626 * not_in_kdp.
627 */
628 not_in_kdp = 0;
629 do_print_all_backtraces(message, panic_options);
630
631 not_in_kdp = initial_not_in_kdp;
632
633 cpu_data_ptr->PAB_active = FALSE;
634 }
635
636 void
637 panic_display_times()
638 {
639 if (kdp_clock_is_locked()) {
640 paniclog_append_noflush("Warning: clock is locked. Can't get time\n");
641 return;
642 }
643
644 if ((is_clock_configured) && (simple_lock_try(&clock_lock, LCK_GRP_NULL))) {
645 clock_sec_t secs, boot_secs;
646 clock_usec_t usecs, boot_usecs;
647
648 simple_unlock(&clock_lock);
649
650 clock_get_calendar_microtime(&secs, &usecs);
651 clock_get_boottime_microtime(&boot_secs, &boot_usecs);
652
653 paniclog_append_noflush("mach_absolute_time: 0x%llx\n", mach_absolute_time());
654 paniclog_append_noflush("Epoch Time: sec usec\n");
655 paniclog_append_noflush(" Boot : 0x%08x 0x%08x\n", (unsigned int)boot_secs, (unsigned int)boot_usecs);
656 paniclog_append_noflush(" Sleep : 0x%08x 0x%08x\n", (unsigned int)gIOLastSleepTime.tv_sec, (unsigned int)gIOLastSleepTime.tv_usec);
657 paniclog_append_noflush(" Wake : 0x%08x 0x%08x\n", (unsigned int)gIOLastWakeTime.tv_sec, (unsigned int)gIOLastWakeTime.tv_usec);
658 paniclog_append_noflush(" Calendar: 0x%08x 0x%08x\n\n", (unsigned int)secs, (unsigned int)usecs);
659 }
660 }
661
662 void
663 panic_print_symbol_name(vm_address_t search)
664 {
665 #pragma unused(search)
666 // empty stub. Really only used on x86_64.
667 return;
668 }
669
670 void
671 SavePanicInfo(
672 const char *message, __unused void *panic_data, uint64_t panic_options)
673 {
674 /*
675 * This should be initialized by the time we get here, but
676 * if it is not, asserting about it will be of no use (it will
677 * come right back to here), so just loop right here and now.
678 * This prevents early-boot panics from becoming recursive and
679 * thus makes them easier to debug. If you attached to a device
680 * and see your PC here, look down a few frames to see your
681 * early-boot panic there.
682 */
683 while (!panic_info || panic_info->eph_panic_log_offset == 0) {
684 ;
685 }
686
687 if (panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
688 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_BUTTON_RESET_PANIC;
689 }
690
691 if (panic_options & DEBUGGER_OPTION_COPROC_INITIATED_PANIC) {
692 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COPROC_INITIATED_PANIC;
693 }
694
695 #if defined(XNU_TARGET_OS_BRIDGE)
696 panic_info->eph_x86_power_state = PE_smc_stashed_x86_power_state;
697 panic_info->eph_x86_efi_boot_state = PE_smc_stashed_x86_efi_boot_state;
698 panic_info->eph_x86_system_state = PE_smc_stashed_x86_system_state;
699 #endif
700
701 /*
702 * On newer targets, panic data is stored directly into the iBoot panic region.
703 * If we re-enter SavePanicInfo (e.g. on a double panic) on such a target, update the
704 * panic CRC so that iBoot can hopefully find *something* useful in the panic region.
705 */
706 if (PanicInfoSaved && (debug_buf_base >= (char*)gPanicBase) && (debug_buf_base < (char*)gPanicBase + gPanicSize)) {
707 unsigned int pi_size = (unsigned int)(debug_buf_ptr - gPanicBase);
708 PE_save_buffer_to_vram((unsigned char*)gPanicBase, &pi_size);
709 PE_sync_panic_buffers(); // extra precaution; panic path likely isn't reliable if we're here
710 }
711
712 if (PanicInfoSaved || (debug_buf_size == 0)) {
713 return;
714 }
715
716 PanicInfoSaved = TRUE;
717
718 print_all_backtraces(message, panic_options);
719
720 assert(panic_info->eph_panic_log_len != 0);
721 panic_info->eph_other_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->eph_other_log_offset;
722
723 PEHaltRestart(kPEPanicSync);
724
725 /*
726 * Notifies registered IOPlatformPanicAction callbacks
727 * (which includes one to disable the memcache) and flushes
728 * the buffer contents from the cache
729 */
730 paniclog_flush();
731 }
732
733 void
734 paniclog_flush()
735 {
736 unsigned int panicbuf_length = 0;
737
738 panicbuf_length = (unsigned int)(debug_buf_ptr - gPanicBase);
739 if (!panicbuf_length) {
740 return;
741 }
742
743 /*
744 * Updates the log length of the last part of the panic log.
745 */
746 panic_info->eph_other_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->eph_other_log_offset;
747
748 /*
749 * Updates the metadata at the beginning of the panic buffer,
750 * updates the CRC.
751 */
752 PE_save_buffer_to_vram((unsigned char *)gPanicBase, &panicbuf_length);
753
754 /*
755 * This is currently unused by platform KEXTs on embedded but is
756 * kept for compatibility with the published IOKit interfaces.
757 */
758 PESavePanicInfo((unsigned char *)gPanicBase, panicbuf_length);
759
760 PE_sync_panic_buffers();
761 }
762
763 /*
764 * @function _was_in_userspace
765 *
766 * @abstract Unused function used to indicate that a CPU was in userspace
767 * before it was IPI'd to enter the Debugger context.
768 *
769 * @discussion This function should never actually be called.
770 */
771 static void __attribute__((__noreturn__))
772 _was_in_userspace(void)
773 {
774 panic("%s: should not have been invoked.", __FUNCTION__);
775 }
776
777 /*
778 * @function DebuggerXCallEnter
779 *
780 * @abstract IPI other cores so this core can run in a single-threaded context.
781 *
782 * @discussion This function should be called with the debugger lock held. It
783 * signals the other cores to go into a busy loop so this core can run in a
784 * single-threaded context and inspect kernel memory.
785 *
786 * @param proceed_on_sync_failure If true, then go ahead and try to debug even
787 * if we can't synch with the other cores. This is inherently unsafe and should
788 * only be used if the kernel is going down in flames anyway.
789 *
790 * @result returns KERN_OPERATION_TIMED_OUT if synchronization times out and
791 * proceed_on_sync_failure is false.
792 */
793 kern_return_t
794 DebuggerXCallEnter(
795 boolean_t proceed_on_sync_failure)
796 {
797 uint64_t max_mabs_time, current_mabs_time;
798 int cpu;
799 int max_cpu;
800 cpu_data_t *target_cpu_datap;
801 cpu_data_t *cpu_data_ptr = getCpuDatap();
802
803 /* Check for nested debugger entry. */
804 cpu_data_ptr->debugger_active++;
805 if (cpu_data_ptr->debugger_active != 1) {
806 return KERN_SUCCESS;
807 }
808
809 /*
810 * If debugger_sync is not 0, someone responded excessively late to the last
811 * debug request (we zero the sync variable in the return function). Zero it
812 * again here. This should prevent us from getting out of sync (heh) and
813 * timing out on every entry to the debugger if we timeout once.
814 */
815
816 debugger_sync = 0;
817 mp_kdp_trap = 1;
818
819 /*
820 * We need a barrier here to ensure CPUs see mp_kdp_trap and spin when responding
821 * to the signal.
822 */
823 __builtin_arm_dmb(DMB_ISH);
824
825 /*
826 * Try to signal all CPUs (except ourselves, of course). Use debugger_sync to
827 * synchronize with every CPU that we appeared to signal successfully (cpu_signal
828 * is not synchronous).
829 */
830 bool cpu_signal_failed = false;
831 max_cpu = ml_get_max_cpu_number();
832
833 boolean_t immediate_halt = FALSE;
834 if (proceed_on_sync_failure && force_immediate_debug_halt) {
835 immediate_halt = TRUE;
836 }
837
838 if (!immediate_halt) {
839 for (cpu = 0; cpu <= max_cpu; cpu++) {
840 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
841
842 if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr)) {
843 continue;
844 }
845
846 if (KERN_SUCCESS == cpu_signal(target_cpu_datap, SIGPdebug, (void *)NULL, NULL)) {
847 os_atomic_inc(&debugger_sync, relaxed);
848 } else {
849 cpu_signal_failed = true;
850 kprintf("cpu_signal failed in DebuggerXCallEnter\n");
851 }
852 }
853
854 nanoseconds_to_absolutetime(DEBUG_ACK_TIMEOUT, &max_mabs_time);
855 current_mabs_time = mach_absolute_time();
856 max_mabs_time += current_mabs_time;
857 assert(max_mabs_time > current_mabs_time);
858
859 /*
860 * Wait for DEBUG_ACK_TIMEOUT ns for a response from everyone we IPI'd. If we
861 * timeout, that is simply too bad; we don't have a true NMI, and one CPU may be
862 * uninterruptibly spinning on someone else. The best we can hope for is that
863 * all other CPUs have either responded or are spinning in a context that is
864 * debugger safe.
865 */
866 while ((debugger_sync != 0) && (current_mabs_time < max_mabs_time)) {
867 current_mabs_time = mach_absolute_time();
868 }
869 }
870
871 if (cpu_signal_failed && !proceed_on_sync_failure) {
872 DebuggerXCallReturn();
873 return KERN_FAILURE;
874 } else if (immediate_halt || (current_mabs_time >= max_mabs_time)) {
875 /*
876 * For the moment, we're aiming for a timeout that the user shouldn't notice,
877 * but will be sufficient to let the other core respond.
878 */
879 __builtin_arm_dmb(DMB_ISH);
880 for (cpu = 0; cpu <= max_cpu; cpu++) {
881 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
882
883 if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr)) {
884 continue;
885 }
886 if (!(target_cpu_datap->cpu_signal & SIGPdebug) && !immediate_halt) {
887 continue;
888 }
889 if (proceed_on_sync_failure) {
890 paniclog_append_noflush("Attempting to forcibly halt cpu %d\n", cpu);
891 dbgwrap_status_t halt_status = ml_dbgwrap_halt_cpu(cpu, 0);
892 if (halt_status < 0) {
893 paniclog_append_noflush("cpu %d failed to halt with error %d: %s\n", cpu, halt_status, ml_dbgwrap_strerror(halt_status));
894 } else {
895 if (halt_status > 0) {
896 paniclog_append_noflush("cpu %d halted with warning %d: %s\n", cpu, halt_status, ml_dbgwrap_strerror(halt_status));
897 } else {
898 paniclog_append_noflush("cpu %d successfully halted\n", cpu);
899 }
900 target_cpu_datap->halt_status = CPU_HALTED;
901 }
902 } else {
903 kprintf("Debugger synch pending on cpu %d\n", cpu);
904 }
905 }
906 if (proceed_on_sync_failure) {
907 for (cpu = 0; cpu <= max_cpu; cpu++) {
908 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
909
910 if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr) ||
911 (target_cpu_datap->halt_status == CPU_NOT_HALTED)) {
912 continue;
913 }
914 dbgwrap_status_t halt_status = ml_dbgwrap_halt_cpu_with_state(cpu,
915 NSEC_PER_SEC, &target_cpu_datap->halt_state);
916 if ((halt_status < 0) || (halt_status == DBGWRAP_WARN_CPU_OFFLINE)) {
917 paniclog_append_noflush("Unable to obtain state for cpu %d with status %d: %s\n", cpu, halt_status, ml_dbgwrap_strerror(halt_status));
918 } else {
919 target_cpu_datap->halt_status = CPU_HALTED_WITH_STATE;
920 }
921 }
922 if (immediate_halt) {
923 paniclog_append_noflush("Immediate halt requested on all cores\n");
924 } else {
925 paniclog_append_noflush("Debugger synchronization timed out; waited %llu nanoseconds\n", DEBUG_ACK_TIMEOUT);
926 }
927 debug_ack_timeout_count++;
928 return KERN_SUCCESS;
929 } else {
930 DebuggerXCallReturn();
931 return KERN_OPERATION_TIMED_OUT;
932 }
933 } else {
934 return KERN_SUCCESS;
935 }
936 }
937
938 /*
939 * @function DebuggerXCallReturn
940 *
941 * @abstract Resume normal multicore operation after DebuggerXCallEnter()
942 *
943 * @discussion This function should be called with debugger lock held.
944 */
945 void
946 DebuggerXCallReturn(
947 void)
948 {
949 cpu_data_t *cpu_data_ptr = getCpuDatap();
950
951 cpu_data_ptr->debugger_active--;
952 if (cpu_data_ptr->debugger_active != 0) {
953 return;
954 }
955
956 mp_kdp_trap = 0;
957 debugger_sync = 0;
958
959 /* Do we need a barrier here? */
960 __builtin_arm_dmb(DMB_ISH);
961 }
962
963 void
964 DebuggerXCall(
965 void *ctx)
966 {
967 boolean_t save_context = FALSE;
968 vm_offset_t kstackptr = 0;
969 arm_saved_state_t *regs = (arm_saved_state_t *) ctx;
970
971 if (regs != NULL) {
972 #if defined(__arm64__)
973 save_context = PSR64_IS_KERNEL(get_saved_state_cpsr(regs));
974 #else
975 save_context = PSR_IS_KERNEL(regs->cpsr);
976 #endif
977 }
978
979 kstackptr = current_thread()->machine.kstackptr;
980 arm_saved_state_t *state = (arm_saved_state_t *)kstackptr;
981
982 if (save_context) {
983 /* Save the interrupted context before acknowledging the signal */
984 copy_signed_thread_state(state, regs);
985 } else if (regs) {
986 /* zero old state so machine_trace_thread knows not to backtrace it */
987 set_saved_state_fp(state, 0);
988 set_saved_state_pc(state, (register_t)&_was_in_userspace);
989 set_saved_state_lr(state, 0);
990 set_saved_state_sp(state, 0);
991 }
992
993 os_atomic_dec(&debugger_sync, relaxed);
994 __builtin_arm_dmb(DMB_ISH);
995 while (mp_kdp_trap) {
996 ;
997 }
998
999 /* Any cleanup for our pushed context should go here */
1000 }
1001
1002 void
1003 DebuggerCall(
1004 unsigned int reason,
1005 void *ctx)
1006 {
1007 #if !MACH_KDP
1008 #pragma unused(reason,ctx)
1009 #endif /* !MACH_KDP */
1010
1011 #if ALTERNATE_DEBUGGER
1012 alternate_debugger_enter();
1013 #endif
1014
1015 #if MACH_KDP
1016 kdp_trap(reason, (struct arm_saved_state *)ctx);
1017 #else
1018 /* TODO: decide what to do if no debugger config */
1019 #endif
1020 }
1021
1022 boolean_t
1023 bootloader_valid_page(ppnum_t ppn)
1024 {
1025 return pmap_bootloader_page(ppn);
1026 }