]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm/model_dep.c
xnu-4903.270.47.tar.gz
[apple/xnu.git] / osfmk / arm / model_dep.c
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2007-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <debug.h>
30#include <mach_kdp.h>
31
32#include <kern/thread.h>
33#include <machine/pmap.h>
34#include <device/device_types.h>
35
36#include <mach/vm_param.h>
37#include <mach/clock_types.h>
38#include <mach/machine.h>
39#include <mach/kmod.h>
40#include <pexpert/boot.h>
41#include <pexpert/pexpert.h>
42
d9a64523 43
5ba3f43e
A
44#include <kern/misc_protos.h>
45#include <kern/startup.h>
46#include <kern/clock.h>
47#include <kern/debug.h>
48#include <kern/processor.h>
49#include <kdp/kdp_core.h>
50#if ALTERNATE_DEBUGGER
51#include <arm64/alternate_debugger.h>
52#endif
53#include <machine/atomic.h>
54#include <machine/trap.h>
55#include <kern/spl.h>
56#include <pexpert/pexpert.h>
57#include <kdp/kdp_callout.h>
58#include <kdp/kdp_dyld.h>
59#include <kdp/kdp_internal.h>
60#include <uuid/uuid.h>
d9a64523 61#include <sys/codesign.h>
5ba3f43e
A
62#include <sys/time.h>
63
64#include <IOKit/IOPlatformExpert.h>
65
66#include <mach/vm_prot.h>
67#include <vm/vm_map.h>
68#include <vm/pmap.h>
69#include <vm/vm_shared_region.h>
70#include <mach/time_value.h>
0a7de745 71#include <machine/machparam.h> /* for btop */
5ba3f43e
A
72
73#include <console/video_console.h>
74#include <arm/cpu_data.h>
75#include <arm/cpu_data_internal.h>
76#include <arm/cpu_internal.h>
77#include <arm/misc_protos.h>
78#include <libkern/OSKextLibPrivate.h>
79#include <vm/vm_kern.h>
80#include <kern/kern_cdata.h>
81
82#if MACH_KDP
0a7de745 83void kdp_trap(unsigned int, struct arm_saved_state *);
5ba3f43e
A
84#endif
85
0a7de745
A
86extern kern_return_t do_stackshot(void *);
87extern void kdp_snapshot_preflight(int pid, void *tracebuf,
88 uint32_t tracebuf_size, uint32_t flags,
89 kcdata_descriptor_t data_p,
90 boolean_t enable_faulting);
91extern int kdp_stack_snapshot_bytes_traced(void);
5ba3f43e
A
92
93/*
94 * Increment the PANICLOG_VERSION if you change the format of the panic
95 * log in any way.
96 */
0a7de745 97#define PANICLOG_VERSION 13
5ba3f43e
A
98static struct kcdata_descriptor kc_panic_data;
99
100extern char firmware_version[];
0a7de745 101extern volatile uint32_t debug_enabled;
5ba3f43e
A
102extern unsigned int not_in_kdp;
103
0a7de745
A
104extern int copyinframe(vm_address_t fp, uint32_t * frame);
105extern void kdp_callouts(kdp_event_t event);
5ba3f43e
A
106
107/* #include <sys/proc.h> */
108#define MAXCOMLEN 16
0a7de745
A
109extern int proc_pid(void *p);
110extern void proc_name_kdp(task_t, char *, int);
111
112/*
113 * Make sure there's enough space to include the relevant bits in the format required
114 * within the space allocated for the panic version string in the panic header.
115 * The format required by OSAnalytics/DumpPanic is 'Product Version (OS Version)'
116 */
117#define PANIC_HEADER_VERSION_FMT_STR "%.14s (%.14s)"
118
119extern const char version[];
120extern char osversion[];
121extern char osproductversion[];
122
123#if defined(XNU_TARGET_OS_BRIDGE)
124extern char macosproductversion[];
125extern char macosversion[];
126#endif
5ba3f43e 127
5ba3f43e
A
128extern uint8_t gPlatformECID[8];
129extern uint32_t gPlatformMemoryID;
130
0a7de745 131extern uint64_t last_hwaccess_thread;
5ba3f43e
A
132
133/*Choosing the size for gTargetTypeBuffer as 8 and size for gModelTypeBuffer as 32
0a7de745 134 * since the target name and model name typically doesn't exceed this size */
5ba3f43e
A
135extern char gTargetTypeBuffer[8];
136extern char gModelTypeBuffer[32];
137
0a7de745
A
138decl_simple_lock_data(extern, clock_lock)
139extern struct timeval gIOLastSleepTime;
140extern struct timeval gIOLastWakeTime;
141extern boolean_t is_clock_configured;
d9a64523 142extern boolean_t kernelcache_uuid_valid;
5ba3f43e
A
143extern uuid_t kernelcache_uuid;
144
145/* Definitions for frame pointers */
146#define FP_ALIGNMENT_MASK ((uint32_t)(0x3))
147#define FP_LR_OFFSET ((uint32_t)4)
148#define FP_LR_OFFSET64 ((uint32_t)8)
149#define FP_MAX_NUM_TO_EVALUATE (50)
150
151/* Timeout (in nanoseconds) for all processors responding to debug crosscall */
152#define DEBUG_ACK_TIMEOUT ((uint64_t) 10000000)
153
154/* Forward functions definitions */
0a7de745 155void panic_display_times(void);
5ba3f43e
A
156void panic_print_symbol_name(vm_address_t search);
157
158
159/* Global variables */
160static uint32_t panic_bt_depth;
161boolean_t PanicInfoSaved = FALSE;
162boolean_t force_immediate_debug_halt = FALSE;
163unsigned int debug_ack_timeout_count = 0;
164volatile unsigned int debugger_sync = 0;
165volatile unsigned int mp_kdp_trap = 0; /* CPUs signalled by the debug CPU will spin on this */
166unsigned int DebugContextCount = 0;
5c9f4661
A
167
168#if defined(__arm64__)
169uint8_t PE_smc_stashed_x86_system_state = 0xFF;
170uint8_t PE_smc_stashed_x86_power_state = 0xFF;
171uint8_t PE_smc_stashed_x86_efi_boot_state = 0xFF;
d9a64523
A
172uint8_t PE_smc_stashed_x86_shutdown_cause = 0xFF;
173uint64_t PE_smc_stashed_x86_prev_power_transitions = UINT64_MAX;
5c9f4661
A
174uint32_t PE_pcie_stashed_link_state = UINT32_MAX;
175#endif
176
0a7de745
A
177
178// Convenient macros to easily validate one or more pointers if
5ba3f43e
A
179// they have defined types
180#define VALIDATE_PTR(ptr) \
181 validate_ptr((vm_offset_t)(ptr), sizeof(*(ptr)), #ptr)
182
183#define VALIDATE_PTR_2(ptr0, ptr1) \
0a7de745
A
184 VALIDATE_PTR(ptr0) && VALIDATE_PTR(ptr1)
185
5ba3f43e
A
186#define VALIDATE_PTR_3(ptr0, ptr1, ptr2) \
187 VALIDATE_PTR_2(ptr0, ptr1) && VALIDATE_PTR(ptr2)
188
189#define VALIDATE_PTR_4(ptr0, ptr1, ptr2, ptr3) \
190 VALIDATE_PTR_2(ptr0, ptr1) && VALIDATE_PTR_2(ptr2, ptr3)
191
0a7de745 192#define GET_MACRO(_1, _2, _3, _4, NAME, ...) NAME
5ba3f43e
A
193
194#define VALIDATE_PTR_LIST(...) GET_MACRO(__VA_ARGS__, VALIDATE_PTR_4, VALIDATE_PTR_3, VALIDATE_PTR_2, VALIDATE_PTR)(__VA_ARGS__)
195
196/*
197 * Evaluate if a pointer is valid
198 * Print a message if pointer is invalid
199 */
0a7de745
A
200static boolean_t
201validate_ptr(
5ba3f43e
A
202 vm_offset_t ptr, vm_size_t size, const char * ptr_name)
203{
204 if (ptr) {
205 if (ml_validate_nofault(ptr, size)) {
206 return TRUE;
207 } else {
208 paniclog_append_noflush("Invalid %s pointer: %p size: %d\n",
0a7de745 209 ptr_name, (void *)ptr, (int)size);
5ba3f43e
A
210 return FALSE;
211 }
212 } else {
213 paniclog_append_noflush("NULL %s pointer\n", ptr_name);
214 return FALSE;
215 }
216}
217
218/*
219 * Backtrace a single frame.
220 */
221static void
222print_one_backtrace(pmap_t pmap, vm_offset_t topfp, const char *cur_marker,
0a7de745 223 boolean_t is_64_bit)
5ba3f43e 224{
0a7de745
A
225 int i = 0;
226 addr64_t lr;
227 addr64_t fp;
228 addr64_t fp_for_ppn;
229 ppnum_t ppn;
230 boolean_t dump_kernel_stack;
5ba3f43e
A
231
232 fp = topfp;
233 fp_for_ppn = 0;
234 ppn = (ppnum_t)NULL;
235
0a7de745 236 if (fp >= VM_MIN_KERNEL_ADDRESS) {
5ba3f43e 237 dump_kernel_stack = TRUE;
0a7de745 238 } else {
5ba3f43e 239 dump_kernel_stack = FALSE;
0a7de745 240 }
5ba3f43e
A
241
242 do {
0a7de745 243 if ((fp == 0) || ((fp & FP_ALIGNMENT_MASK) != 0)) {
5ba3f43e 244 break;
0a7de745
A
245 }
246 if (dump_kernel_stack && ((fp < VM_MIN_KERNEL_ADDRESS) || (fp > VM_MAX_KERNEL_ADDRESS))) {
5ba3f43e 247 break;
0a7de745
A
248 }
249 if ((!dump_kernel_stack) && (fp >= VM_MIN_KERNEL_ADDRESS)) {
5ba3f43e 250 break;
0a7de745
A
251 }
252
5ba3f43e
A
253 /*
254 * Check to see if current address will result in a different
255 * ppn than previously computed (to avoid recomputation) via
256 * (addr) ^ fp_for_ppn) >> PAGE_SHIFT)
257 */
258 if ((((fp + FP_LR_OFFSET) ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) {
259 ppn = pmap_find_phys(pmap, fp + FP_LR_OFFSET);
260 fp_for_ppn = fp + (is_64_bit ? FP_LR_OFFSET64 : FP_LR_OFFSET);
261 }
262 if (ppn != (ppnum_t)NULL) {
263 if (is_64_bit) {
264 lr = ml_phys_read_double_64(((((vm_offset_t)ppn) << PAGE_SHIFT)) | ((fp + FP_LR_OFFSET64) & PAGE_MASK));
265 } else {
266 lr = ml_phys_read_word(((((vm_offset_t)ppn) << PAGE_SHIFT)) | ((fp + FP_LR_OFFSET) & PAGE_MASK));
267 }
268 } else {
269 if (is_64_bit) {
270 paniclog_append_noflush("%s\t Could not read LR from frame at 0x%016llx\n", cur_marker, fp + FP_LR_OFFSET64);
271 } else {
272 paniclog_append_noflush("%s\t Could not read LR from frame at 0x%08x\n", cur_marker, (uint32_t)(fp + FP_LR_OFFSET));
273 }
274 break;
275 }
276 if (((fp ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) {
277 ppn = pmap_find_phys(pmap, fp);
278 fp_for_ppn = fp;
279 }
280 if (ppn != (ppnum_t)NULL) {
281 if (is_64_bit) {
282 fp = ml_phys_read_double_64(((((vm_offset_t)ppn) << PAGE_SHIFT)) | (fp & PAGE_MASK));
283 } else {
284 fp = ml_phys_read_word(((((vm_offset_t)ppn) << PAGE_SHIFT)) | (fp & PAGE_MASK));
285 }
286 } else {
287 if (is_64_bit) {
288 paniclog_append_noflush("%s\t Could not read FP from frame at 0x%016llx\n", cur_marker, fp);
289 } else {
290 paniclog_append_noflush("%s\t Could not read FP from frame at 0x%08x\n", cur_marker, (uint32_t)fp);
291 }
292 break;
293 }
294
295 if (lr) {
296 if (is_64_bit) {
297 paniclog_append_noflush("%s\t lr: 0x%016llx fp: 0x%016llx\n", cur_marker, lr, fp);
298 } else {
299 paniclog_append_noflush("%s\t lr: 0x%08x fp: 0x%08x\n", cur_marker, (uint32_t)lr, (uint32_t)fp);
300 }
301 }
302 } while ((++i < FP_MAX_NUM_TO_EVALUATE) && (fp != topfp));
303}
304
305#define SANE_TASK_LIMIT 256
306#define TOP_RUNNABLE_LIMIT 5
307#define PANICLOG_UUID_BUF_SIZE 256
308
309extern void panic_print_vnodes(void);
310
311static void
312do_print_all_backtraces(
0a7de745 313 const char *message)
5ba3f43e 314{
0a7de745 315 int logversion = PANICLOG_VERSION;
5ba3f43e 316 thread_t cur_thread = current_thread();
0a7de745 317 uintptr_t cur_fp;
5ba3f43e 318 task_t task;
5ba3f43e 319 int print_vnodes = 0;
0a7de745 320 const char *nohilite_thread_marker = "\t";
5ba3f43e
A
321
322 /* end_marker_bytes set to 200 for printing END marker + stackshot summary info always */
323 int bytes_traced = 0, bytes_remaining = 0, end_marker_bytes = 200;
324 uint64_t bytes_used = 0ULL;
325 int err = 0;
326 char *stackshot_begin_loc = NULL;
327
328#if defined(__arm__)
0a7de745 329 __asm__ volatile ("mov %0, r7":"=r"(cur_fp));
5ba3f43e 330#elif defined(__arm64__)
0a7de745 331 __asm__ volatile ("add %0, xzr, fp":"=r"(cur_fp));
5ba3f43e
A
332#else
333#error Unknown architecture.
334#endif
0a7de745 335 if (panic_bt_depth != 0) {
5ba3f43e 336 return;
0a7de745 337 }
5ba3f43e
A
338 panic_bt_depth++;
339
340 /* Truncate panic string to 1200 bytes -- WDT log can be ~1100 bytes */
341 paniclog_append_noflush("Debugger message: %.1200s\n", message);
342 if (debug_enabled) {
343 paniclog_append_noflush("Device: %s\n",
0a7de745 344 ('\0' != gTargetTypeBuffer[0]) ? gTargetTypeBuffer : "Not set yet");
5ba3f43e 345 paniclog_append_noflush("Hardware Model: %s\n",
0a7de745 346 ('\0' != gModelTypeBuffer[0]) ? gModelTypeBuffer:"Not set yet");
5ba3f43e 347 paniclog_append_noflush("ECID: %02X%02X%02X%02X%02X%02X%02X%02X\n", gPlatformECID[7],
0a7de745
A
348 gPlatformECID[6], gPlatformECID[5], gPlatformECID[4], gPlatformECID[3],
349 gPlatformECID[2], gPlatformECID[1], gPlatformECID[0]);
5ba3f43e
A
350 if (last_hwaccess_thread) {
351 paniclog_append_noflush("AppleHWAccess Thread: 0x%llx\n", last_hwaccess_thread);
352 }
d9a64523 353 paniclog_append_noflush("Boot args: %s\n", PE_boot_args());
5ba3f43e
A
354 }
355 paniclog_append_noflush("Memory ID: 0x%x\n", gPlatformMemoryID);
356 paniclog_append_noflush("OS version: %.256s\n",
0a7de745
A
357 ('\0' != osversion[0]) ? osversion : "Not set yet");
358#if defined(XNU_TARGET_OS_BRIDGE)
359 paniclog_append_noflush("macOS version: %.256s\n",
360 ('\0' != macosversion[0]) ? macosversion : "Not set");
361#endif
5ba3f43e 362 paniclog_append_noflush("Kernel version: %.512s\n", version);
d9a64523
A
363
364 if (kernelcache_uuid_valid) {
365 paniclog_append_noflush("KernelCache UUID: ");
0a7de745 366 for (size_t index = 0; index < sizeof(uuid_t); index++) {
d9a64523
A
367 paniclog_append_noflush("%02X", kernelcache_uuid[index]);
368 }
369 paniclog_append_noflush("\n");
5ba3f43e 370 }
d9a64523 371 panic_display_kernel_uuid();
5ba3f43e
A
372
373 paniclog_append_noflush("iBoot version: %.128s\n", firmware_version);
374 paniclog_append_noflush("secure boot?: %s\n", debug_enabled ? "NO": "YES");
5c9f4661
A
375#if defined(XNU_TARGET_OS_BRIDGE)
376 paniclog_append_noflush("x86 EFI Boot State: ");
377 if (PE_smc_stashed_x86_efi_boot_state != 0xFF) {
378 paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_efi_boot_state);
379 } else {
380 paniclog_append_noflush("not available\n");
381 }
382 paniclog_append_noflush("x86 System State: ");
383 if (PE_smc_stashed_x86_system_state != 0xFF) {
384 paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_system_state);
385 } else {
386 paniclog_append_noflush("not available\n");
387 }
388 paniclog_append_noflush("x86 Power State: ");
389 if (PE_smc_stashed_x86_power_state != 0xFF) {
390 paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_power_state);
391 } else {
392 paniclog_append_noflush("not available\n");
393 }
d9a64523
A
394 paniclog_append_noflush("x86 Shutdown Cause: ");
395 if (PE_smc_stashed_x86_shutdown_cause != 0xFF) {
396 paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_shutdown_cause);
397 } else {
398 paniclog_append_noflush("not available\n");
399 }
400 paniclog_append_noflush("x86 Previous Power Transitions: ");
401 if (PE_smc_stashed_x86_prev_power_transitions != UINT64_MAX) {
402 paniclog_append_noflush("0x%llx\n", PE_smc_stashed_x86_prev_power_transitions);
403 } else {
404 paniclog_append_noflush("not available\n");
405 }
406 paniclog_append_noflush("PCIeUp link state: ");
407 if (PE_pcie_stashed_link_state != UINT32_MAX) {
408 paniclog_append_noflush("0x%x\n", PE_pcie_stashed_link_state);
409 } else {
410 paniclog_append_noflush("not available\n");
411 }
5c9f4661 412#endif
0a7de745
A
413 if (panic_data_buffers != NULL) {
414 paniclog_append_noflush("%s data: ", panic_data_buffers->producer_name);
415 uint8_t *panic_buffer_data = (uint8_t *) panic_data_buffers->buf;
416 for (int i = 0; i < panic_data_buffers->len; i++) {
417 paniclog_append_noflush("%02X", panic_buffer_data[i]);
418 }
419 paniclog_append_noflush("\n");
420 }
5ba3f43e
A
421 paniclog_append_noflush("Paniclog version: %d\n", logversion);
422
423 panic_display_kernel_aslr();
424 panic_display_times();
425 panic_display_zprint();
426#if CONFIG_ZLEAKS
427 panic_display_ztrace();
428#endif /* CONFIG_ZLEAKS */
429#if CONFIG_ECC_LOGGING
430 panic_display_ecc_errors();
431#endif /* CONFIG_ECC_LOGGING */
432
d9a64523
A
433#if DEVELOPMENT || DEBUG
434 if (cs_debug_unsigned_exec_failures != 0 || cs_debug_unsigned_mmap_failures != 0) {
435 paniclog_append_noflush("Unsigned code exec failures: %u\n", cs_debug_unsigned_exec_failures);
436 paniclog_append_noflush("Unsigned code mmap failures: %u\n", cs_debug_unsigned_mmap_failures);
437 }
438#endif
439
5ba3f43e
A
440 // Just print threads with high CPU usage for WDT timeouts
441 if (strncmp(message, "WDT timeout", 11) == 0) {
0a7de745
A
442 thread_t top_runnable[5] = {0};
443 thread_t thread;
444 int total_cpu_usage = 0;
5ba3f43e
A
445
446 print_vnodes = 1;
447
0a7de745 448
5ba3f43e 449 for (thread = (thread_t)queue_first(&threads);
0a7de745
A
450 VALIDATE_PTR(thread) && !queue_end(&threads, (queue_entry_t)thread);
451 thread = (thread_t)queue_next(&thread->threads)) {
5ba3f43e 452 total_cpu_usage += thread->cpu_usage;
0a7de745 453
5ba3f43e
A
454 // Look for the 5 runnable threads with highest priority
455 if (thread->state & TH_RUN) {
0a7de745
A
456 int k;
457 thread_t comparison_thread = thread;
458
5ba3f43e
A
459 for (k = 0; k < TOP_RUNNABLE_LIMIT; k++) {
460 if (top_runnable[k] == 0) {
461 top_runnable[k] = comparison_thread;
462 break;
463 } else if (comparison_thread->sched_pri > top_runnable[k]->sched_pri) {
464 thread_t temp = top_runnable[k];
465 top_runnable[k] = comparison_thread;
466 comparison_thread = temp;
467 } // if comparison thread has higher priority than previously saved thread
468 } // loop through highest priority runnable threads
469 } // Check if thread is runnable
470 } // Loop through all threads
0a7de745 471
5ba3f43e
A
472 // Print the relevant info for each thread identified
473 paniclog_append_noflush("Total cpu_usage: %d\n", total_cpu_usage);
474 paniclog_append_noflush("Thread task pri cpu_usage\n");
475
0a7de745 476 for (int i = 0; i < TOP_RUNNABLE_LIMIT; i++) {
5ba3f43e 477 if (top_runnable[i] && VALIDATE_PTR(top_runnable[i]->task) &&
0a7de745 478 validate_ptr((vm_offset_t)top_runnable[i]->task->bsd_info, 1, "bsd_info")) {
5ba3f43e
A
479 char name[MAXCOMLEN + 1];
480 proc_name_kdp(top_runnable[i]->task, name, sizeof(name));
481 paniclog_append_noflush("%p %s %d %d\n",
0a7de745
A
482 top_runnable[i], name, top_runnable[i]->sched_pri, top_runnable[i]->cpu_usage);
483 }
5ba3f43e
A
484 } // Loop through highest priority runnable threads
485 paniclog_append_noflush("\n");
486 } // Check if message is "WDT timeout"
487
0a7de745 488 // print current task info
5ba3f43e 489 if (VALIDATE_PTR_LIST(cur_thread, cur_thread->task)) {
5ba3f43e
A
490 task = cur_thread->task;
491
492 if (VALIDATE_PTR_LIST(task->map, task->map->pmap)) {
493 paniclog_append_noflush("Panicked task %p: %d pages, %d threads: ",
0a7de745 494 task, task->map->pmap->stats.resident_count, task->thread_count);
5ba3f43e
A
495 } else {
496 paniclog_append_noflush("Panicked task %p: %d threads: ",
0a7de745 497 task, task->thread_count);
5ba3f43e
A
498 }
499
500 if (validate_ptr((vm_offset_t)task->bsd_info, 1, "bsd_info")) {
501 char name[MAXCOMLEN + 1];
502 int pid = proc_pid(task->bsd_info);
503 proc_name_kdp(task, name, sizeof(name));
504 paniclog_append_noflush("pid %d: %s", pid, name);
505 } else {
506 paniclog_append_noflush("unknown task");
507 }
508
509 paniclog_append_noflush("\n");
510 }
511
512 if (cur_fp < VM_MAX_KERNEL_ADDRESS) {
513 paniclog_append_noflush("Panicked thread: %p, backtrace: 0x%llx, tid: %llu\n",
0a7de745 514 cur_thread, (addr64_t)cur_fp, thread_tid(cur_thread));
5ba3f43e
A
515#if __LP64__
516 print_one_backtrace(kernel_pmap, cur_fp, nohilite_thread_marker, TRUE);
517#else
518 print_one_backtrace(kernel_pmap, cur_fp, nohilite_thread_marker, FALSE);
519#endif
520 } else {
521 paniclog_append_noflush("Could not print panicked thread backtrace:"
0a7de745 522 "frame pointer outside kernel vm.\n");
5ba3f43e
A
523 }
524
525 paniclog_append_noflush("\n");
526 panic_info->eph_panic_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->eph_panic_log_offset;
0a7de745
A
527 /* set the os version data in the panic header in the format 'Product Version (OS Version)' (only if they have been set) */
528 if ((osversion[0] != '\0') && (osproductversion[0] != '\0')) {
529 snprintf((char *)&panic_info->eph_os_version, sizeof(panic_info->eph_os_version), PANIC_HEADER_VERSION_FMT_STR,
530 osproductversion, osversion);
531 }
532#if defined(XNU_TARGET_OS_BRIDGE)
533 if ((macosversion[0] != '\0') && (macosproductversion[0] != '\0')) {
534 snprintf((char *)&panic_info->eph_macos_version, sizeof(panic_info->eph_macos_version), PANIC_HEADER_VERSION_FMT_STR,
535 macosproductversion, macosversion);
536 }
537#endif
5ba3f43e
A
538
539 if (debug_ack_timeout_count) {
540 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_DEBUGGERSYNC;
541 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
542 paniclog_append_noflush("!! debugger synchronization failed, no stackshot !!\n");
543 } else if (stackshot_active()) {
544 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_NESTED;
545 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
546 paniclog_append_noflush("!! panicked during stackshot, skipping panic stackshot !!\n");
547 } else {
548 /* Align the stackshot buffer to an 8-byte address (especially important for armv7k devices) */
549 debug_buf_ptr += (8 - ((uintptr_t)debug_buf_ptr % 8));
550 stackshot_begin_loc = debug_buf_ptr;
551
552 bytes_remaining = debug_buf_size - (unsigned int)((uintptr_t)stackshot_begin_loc - (uintptr_t)debug_buf_base);
553 err = kcdata_memory_static_init(&kc_panic_data, (mach_vm_address_t)debug_buf_ptr,
0a7de745
A
554 KCDATA_BUFFER_BEGIN_STACKSHOT, bytes_remaining - end_marker_bytes,
555 KCFLAG_USE_MEMCOPY);
5ba3f43e
A
556 if (err == KERN_SUCCESS) {
557 kdp_snapshot_preflight(-1, stackshot_begin_loc, bytes_remaining - end_marker_bytes,
0a7de745
A
558 (STACKSHOT_GET_GLOBAL_MEM_STATS | STACKSHOT_SAVE_LOADINFO | STACKSHOT_KCDATA_FORMAT |
559 STACKSHOT_ENABLE_BT_FAULTING | STACKSHOT_ENABLE_UUID_FAULTING | STACKSHOT_FROM_PANIC |
560 STACKSHOT_NO_IO_STATS | STACKSHOT_THREAD_WAITINFO), &kc_panic_data, 0);
5ba3f43e
A
561 err = do_stackshot(NULL);
562 bytes_traced = kdp_stack_snapshot_bytes_traced();
563 if (bytes_traced > 0 && !err) {
564 debug_buf_ptr += bytes_traced;
565 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_SUCCEEDED;
566 panic_info->eph_stackshot_offset = PE_get_offset_into_panic_region(stackshot_begin_loc);
567 panic_info->eph_stackshot_len = bytes_traced;
568
569 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
570 paniclog_append_noflush("\n** Stackshot Succeeded ** Bytes Traced %d **\n", bytes_traced);
571 } else {
572 bytes_used = kcdata_memory_get_used_bytes(&kc_panic_data);
573 if (bytes_used > 0) {
574 /* Zero out the stackshot data */
575 bzero(stackshot_begin_loc, bytes_used);
576 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_INCOMPLETE;
577
578 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
579 paniclog_append_noflush("\n** Stackshot Incomplete ** Bytes Filled %llu **\n", bytes_used);
580 } else {
581 bzero(stackshot_begin_loc, bytes_used);
582 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR;
583
584 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
585 paniclog_append_noflush("\n!! Stackshot Failed !! Bytes Traced %d, err %d\n", bytes_traced, err);
586 }
587 }
588 } else {
589 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR;
590 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
591 paniclog_append_noflush("\n!! Stackshot Failed !!\nkcdata_memory_static_init returned %d", err);
592 }
593 }
594
595 assert(panic_info->eph_other_log_offset != 0);
596
0a7de745 597 if (print_vnodes != 0) {
5ba3f43e 598 panic_print_vnodes();
0a7de745 599 }
5ba3f43e
A
600
601 panic_bt_depth--;
602}
603
604/*
605 * Entry to print_all_backtraces is serialized by the debugger lock
606 */
607static void
0a7de745 608print_all_backtraces(const char *message)
5ba3f43e
A
609{
610 unsigned int initial_not_in_kdp = not_in_kdp;
611
612 cpu_data_t * cpu_data_ptr = getCpuDatap();
613
614 assert(cpu_data_ptr->PAB_active == FALSE);
615 cpu_data_ptr->PAB_active = TRUE;
616
617 /*
618 * Because print all backtraces uses the pmap routines, it needs to
619 * avoid taking pmap locks. Right now, this is conditionalized on
620 * not_in_kdp.
621 */
622 not_in_kdp = 0;
623 do_print_all_backtraces(message);
624
625 not_in_kdp = initial_not_in_kdp;
626
627 cpu_data_ptr->PAB_active = FALSE;
628}
629
630void
631panic_display_times()
632{
633 if (kdp_clock_is_locked()) {
634 paniclog_append_noflush("Warning: clock is locked. Can't get time\n");
635 return;
636 }
637
0a7de745
A
638 if ((is_clock_configured) && (simple_lock_try(&clock_lock, LCK_GRP_NULL))) {
639 clock_sec_t secs, boot_secs;
640 clock_usec_t usecs, boot_usecs;
5ba3f43e
A
641
642 simple_unlock(&clock_lock);
643
644 clock_get_calendar_microtime(&secs, &usecs);
645 clock_get_boottime_microtime(&boot_secs, &boot_usecs);
646
0a7de745 647 paniclog_append_noflush("mach_absolute_time: 0x%llx\n", mach_absolute_time());
5ba3f43e
A
648 paniclog_append_noflush("Epoch Time: sec usec\n");
649 paniclog_append_noflush(" Boot : 0x%08x 0x%08x\n", (unsigned int)boot_secs, (unsigned int)boot_usecs);
650 paniclog_append_noflush(" Sleep : 0x%08x 0x%08x\n", (unsigned int)gIOLastSleepTime.tv_sec, (unsigned int)gIOLastSleepTime.tv_usec);
651 paniclog_append_noflush(" Wake : 0x%08x 0x%08x\n", (unsigned int)gIOLastWakeTime.tv_sec, (unsigned int)gIOLastWakeTime.tv_usec);
652 paniclog_append_noflush(" Calendar: 0x%08x 0x%08x\n\n", (unsigned int)secs, (unsigned int)usecs);
653 }
654}
655
0a7de745
A
656void
657panic_print_symbol_name(vm_address_t search)
5ba3f43e
A
658{
659#pragma unused(search)
660 // empty stub. Really only used on x86_64.
661 return;
662}
663
664void
665SavePanicInfo(
d9a64523 666 const char *message, __unused void *panic_data, __unused uint64_t panic_options)
5ba3f43e 667{
5ba3f43e
A
668 /* This should be initialized by the time we get here */
669 assert(panic_info->eph_panic_log_offset != 0);
670
671 if (panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
672 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_BUTTON_RESET_PANIC;
673 }
674
675 if (panic_options & DEBUGGER_OPTION_COPROC_INITIATED_PANIC) {
676 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COPROC_INITIATED_PANIC;
677 }
678
5c9f4661
A
679#if defined(XNU_TARGET_OS_BRIDGE)
680 panic_info->eph_x86_power_state = PE_smc_stashed_x86_power_state;
681 panic_info->eph_x86_efi_boot_state = PE_smc_stashed_x86_efi_boot_state;
682 panic_info->eph_x86_system_state = PE_smc_stashed_x86_system_state;
683#endif
684
5ba3f43e
A
685 /*
686 * On newer targets, panic data is stored directly into the iBoot panic region.
687 * If we re-enter SavePanicInfo (e.g. on a double panic) on such a target, update the
688 * panic CRC so that iBoot can hopefully find *something* useful in the panic region.
689 */
690 if (PanicInfoSaved && (debug_buf_base >= (char*)gPanicBase) && (debug_buf_base < (char*)gPanicBase + gPanicSize)) {
691 unsigned int pi_size = (unsigned int)(debug_buf_ptr - gPanicBase);
692 PE_save_buffer_to_vram((unsigned char*)gPanicBase, &pi_size);
693 PE_sync_panic_buffers(); // extra precaution; panic path likely isn't reliable if we're here
694 }
695
0a7de745 696 if (PanicInfoSaved || (debug_buf_size == 0)) {
5ba3f43e 697 return;
0a7de745 698 }
5ba3f43e
A
699
700 PanicInfoSaved = TRUE;
701
702 print_all_backtraces(message);
703
704 assert(panic_info->eph_panic_log_len != 0);
705 panic_info->eph_other_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->eph_other_log_offset;
706
707 PEHaltRestart(kPEPanicSync);
708
709 /*
710 * Notifies registered IOPlatformPanicAction callbacks
711 * (which includes one to disable the memcache) and flushes
712 * the buffer contents from the cache
713 */
714 paniclog_flush();
715}
716
717void
718paniclog_flush()
719{
720 unsigned int panicbuf_length = 0;
721
722 panicbuf_length = (unsigned int)(debug_buf_ptr - gPanicBase);
0a7de745 723 if (!panicbuf_length) {
5ba3f43e 724 return;
0a7de745 725 }
5ba3f43e
A
726
727 /*
728 * Updates the log length of the last part of the panic log.
729 */
730 panic_info->eph_other_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->eph_other_log_offset;
731
732 /*
733 * Updates the metadata at the beginning of the panic buffer,
734 * updates the CRC.
735 */
736 PE_save_buffer_to_vram((unsigned char *)gPanicBase, &panicbuf_length);
737
738 /*
739 * This is currently unused by platform KEXTs on embedded but is
740 * kept for compatibility with the published IOKit interfaces.
741 */
742 PESavePanicInfo((unsigned char *)gPanicBase, panicbuf_length);
743
744 PE_sync_panic_buffers();
745}
746
747/*
748 * @function DebuggerXCallEnter
749 *
750 * @abstract IPI other cores so this core can run in a single-threaded context.
751 *
752 * @discussion This function should be called with the debugger lock held. It
753 * signals the other cores to go into a busy loop so this core can run in a
754 * single-threaded context and inspect kernel memory.
755 *
756 * @param proceed_on_sync_failure If true, then go ahead and try to debug even
757 * if we can't synch with the other cores. This is inherently unsafe and should
758 * only be used if the kernel is going down in flames anyway.
759 *
760 * @result returns KERN_OPERATION_TIMED_OUT if synchronization times out and
761 * proceed_on_sync_failure is false.
762 */
763kern_return_t
764DebuggerXCallEnter(
765 boolean_t proceed_on_sync_failure)
766{
767 uint64_t max_mabs_time, current_mabs_time;
768 int cpu;
769 int max_cpu;
0a7de745
A
770 cpu_data_t *target_cpu_datap;
771 cpu_data_t *cpu_data_ptr = getCpuDatap();
5ba3f43e
A
772
773 /* Check for nested debugger entry. */
774 cpu_data_ptr->debugger_active++;
0a7de745 775 if (cpu_data_ptr->debugger_active != 1) {
5ba3f43e 776 return KERN_SUCCESS;
0a7de745 777 }
5ba3f43e
A
778
779 /*
780 * If debugger_sync is not 0, someone responded excessively late to the last
781 * debug request (we zero the sync variable in the return function). Zero it
782 * again here. This should prevent us from getting out of sync (heh) and
783 * timing out on every entry to the debugger if we timeout once.
784 */
785
786 debugger_sync = 0;
787 mp_kdp_trap = 1;
788
789 /*
790 * We need a barrier here to ensure CPUs see mp_kdp_trap and spin when responding
791 * to the signal.
792 */
793 __builtin_arm_dmb(DMB_ISH);
794
795 /*
796 * Try to signal all CPUs (except ourselves, of course). Use debugger_sync to
797 * synchronize with every CPU that we appeared to signal successfully (cpu_signal
798 * is not synchronous).
799 */
800 bool cpu_signal_failed = false;
801 max_cpu = ml_get_max_cpu_number();
802
803 boolean_t immediate_halt = FALSE;
0a7de745
A
804 if (proceed_on_sync_failure && force_immediate_debug_halt) {
805 immediate_halt = TRUE;
806 }
5ba3f43e
A
807
808 if (!immediate_halt) {
0a7de745 809 for (cpu = 0; cpu <= max_cpu; cpu++) {
5ba3f43e
A
810 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
811
0a7de745 812 if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr)) {
5ba3f43e 813 continue;
0a7de745 814 }
5ba3f43e 815
0a7de745 816 if (KERN_SUCCESS == cpu_signal(target_cpu_datap, SIGPdebug, (void *)NULL, NULL)) {
5ba3f43e
A
817 (void)hw_atomic_add(&debugger_sync, 1);
818 } else {
819 cpu_signal_failed = true;
820 kprintf("cpu_signal failed in DebuggerXCallEnter\n");
821 }
822 }
823
824 nanoseconds_to_absolutetime(DEBUG_ACK_TIMEOUT, &max_mabs_time);
825 current_mabs_time = mach_absolute_time();
826 max_mabs_time += current_mabs_time;
827 assert(max_mabs_time > current_mabs_time);
828
829 /*
830 * Wait for DEBUG_ACK_TIMEOUT ns for a response from everyone we IPI'd. If we
831 * timeout, that is simply too bad; we don't have a true NMI, and one CPU may be
832 * uninterruptibly spinning on someone else. The best we can hope for is that
833 * all other CPUs have either responded or are spinning in a context that is
834 * debugger safe.
835 */
0a7de745 836 while ((debugger_sync != 0) && (current_mabs_time < max_mabs_time)) {
5ba3f43e 837 current_mabs_time = mach_absolute_time();
0a7de745 838 }
5ba3f43e
A
839 }
840
841 if (cpu_signal_failed && !proceed_on_sync_failure) {
842 DebuggerXCallReturn();
843 return KERN_FAILURE;
844 } else if (immediate_halt || (current_mabs_time >= max_mabs_time)) {
845 /*
846 * For the moment, we're aiming for a timeout that the user shouldn't notice,
847 * but will be sufficient to let the other core respond.
848 */
849 __builtin_arm_dmb(DMB_ISH);
0a7de745 850 for (cpu = 0; cpu <= max_cpu; cpu++) {
5ba3f43e
A
851 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
852
0a7de745 853 if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr)) {
5ba3f43e 854 continue;
0a7de745
A
855 }
856 if (!(target_cpu_datap->cpu_signal & SIGPdebug) && !immediate_halt) {
5ba3f43e 857 continue;
0a7de745 858 }
5ba3f43e
A
859 if (proceed_on_sync_failure) {
860 paniclog_append_noflush("Attempting to forcibly halt cpu %d\n", cpu);
861 dbgwrap_status_t halt_status = ml_dbgwrap_halt_cpu(cpu, 0);
0a7de745 862 if (halt_status < 0) {
d9a64523 863 paniclog_append_noflush("cpu %d failed to halt with error %d: %s\n", cpu, halt_status, ml_dbgwrap_strerror(halt_status));
0a7de745
A
864 } else {
865 if (halt_status > 0) {
d9a64523 866 paniclog_append_noflush("cpu %d halted with warning %d: %s\n", cpu, halt_status, ml_dbgwrap_strerror(halt_status));
0a7de745 867 } else {
d9a64523 868 paniclog_append_noflush("cpu %d successfully halted\n", cpu);
0a7de745 869 }
5ba3f43e
A
870 target_cpu_datap->halt_status = CPU_HALTED;
871 }
0a7de745 872 } else {
5ba3f43e 873 kprintf("Debugger synch pending on cpu %d\n", cpu);
0a7de745 874 }
5ba3f43e
A
875 }
876 if (proceed_on_sync_failure) {
877 for (cpu = 0; cpu <= max_cpu; cpu++) {
878 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
879
880 if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr) ||
0a7de745 881 (target_cpu_datap->halt_status == CPU_NOT_HALTED)) {
5ba3f43e 882 continue;
0a7de745 883 }
5ba3f43e
A
884 dbgwrap_status_t halt_status = ml_dbgwrap_halt_cpu_with_state(cpu,
885 NSEC_PER_SEC, &target_cpu_datap->halt_state);
0a7de745 886 if ((halt_status < 0) || (halt_status == DBGWRAP_WARN_CPU_OFFLINE)) {
d9a64523 887 paniclog_append_noflush("Unable to obtain state for cpu %d with status %d: %s\n", cpu, halt_status, ml_dbgwrap_strerror(halt_status));
0a7de745 888 } else {
5ba3f43e 889 target_cpu_datap->halt_status = CPU_HALTED_WITH_STATE;
0a7de745 890 }
5ba3f43e 891 }
0a7de745 892 if (immediate_halt) {
5ba3f43e 893 paniclog_append_noflush("Immediate halt requested on all cores\n");
0a7de745 894 } else {
5ba3f43e 895 paniclog_append_noflush("Debugger synchronization timed out; waited %llu nanoseconds\n", DEBUG_ACK_TIMEOUT);
0a7de745 896 }
5ba3f43e
A
897 debug_ack_timeout_count++;
898 return KERN_SUCCESS;
899 } else {
900 DebuggerXCallReturn();
901 return KERN_OPERATION_TIMED_OUT;
902 }
903 } else {
904 return KERN_SUCCESS;
905 }
906}
907
908/*
909 * @function DebuggerXCallReturn
910 *
911 * @abstract Resume normal multicore operation after DebuggerXCallEnter()
912 *
913 * @discussion This function should be called with debugger lock held.
914 */
915void
916DebuggerXCallReturn(
917 void)
918{
0a7de745 919 cpu_data_t *cpu_data_ptr = getCpuDatap();
5ba3f43e
A
920
921 cpu_data_ptr->debugger_active--;
0a7de745 922 if (cpu_data_ptr->debugger_active != 0) {
5ba3f43e 923 return;
0a7de745 924 }
5ba3f43e
A
925
926 mp_kdp_trap = 0;
927 debugger_sync = 0;
928
929 /* Do we need a barrier here? */
930 __builtin_arm_dmb(DMB_ISH);
931}
932
933void
934DebuggerXCall(
0a7de745 935 void *ctx)
5ba3f43e 936{
0a7de745
A
937 boolean_t save_context = FALSE;
938 vm_offset_t kstackptr = 0;
939 arm_saved_state_t *regs = (arm_saved_state_t *) ctx;
5ba3f43e
A
940
941 if (regs != NULL) {
942#if defined(__arm64__)
943 save_context = PSR64_IS_KERNEL(get_saved_state_cpsr(regs));
944#else
945 save_context = PSR_IS_KERNEL(regs->cpsr);
946#endif
947 }
948
949 kstackptr = current_thread()->machine.kstackptr;
950 arm_saved_state_t *state = (arm_saved_state_t *)kstackptr;
951
952 if (save_context) {
953 /* Save the interrupted context before acknowledging the signal */
954 *state = *regs;
955 } else if (regs) {
956 /* zero old state so machine_trace_thread knows not to backtrace it */
957 set_saved_state_fp(state, 0);
958 set_saved_state_pc(state, 0);
959 set_saved_state_lr(state, 0);
960 set_saved_state_sp(state, 0);
961 }
962
963 (void)hw_atomic_sub(&debugger_sync, 1);
964 __builtin_arm_dmb(DMB_ISH);
0a7de745
A
965 while (mp_kdp_trap) {
966 ;
967 }
5ba3f43e
A
968
969 /* Any cleanup for our pushed context should go here */
970}
971
972
973void
974DebuggerCall(
0a7de745
A
975 unsigned int reason,
976 void *ctx)
5ba3f43e 977{
0a7de745 978#if !MACH_KDP
5ba3f43e
A
979#pragma unused(reason,ctx)
980#endif /* !MACH_KDP */
981
982#if ALTERNATE_DEBUGGER
983 alternate_debugger_enter();
984#endif
985
0a7de745 986#if MACH_KDP
5ba3f43e
A
987 kdp_trap(reason, (struct arm_saved_state *)ctx);
988#else
989 /* TODO: decide what to do if no debugger config */
990#endif
991}