2 * Copyright (c) 2007-2013 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 #include <kern/thread.h>
33 #include <machine/pmap.h>
34 #include <device/device_types.h>
36 #include <mach/vm_param.h>
37 #include <mach/clock_types.h>
38 #include <mach/machine.h>
39 #include <mach/kmod.h>
40 #include <pexpert/boot.h>
41 #include <pexpert/pexpert.h>
43 #include <kern/misc_protos.h>
44 #include <kern/startup.h>
45 #include <kern/clock.h>
46 #include <kern/debug.h>
47 #include <kern/processor.h>
48 #include <kdp/kdp_core.h>
49 #if ALTERNATE_DEBUGGER
50 #include <arm64/alternate_debugger.h>
52 #include <machine/atomic.h>
53 #include <machine/trap.h>
55 #include <pexpert/pexpert.h>
56 #include <kdp/kdp_callout.h>
57 #include <kdp/kdp_dyld.h>
58 #include <kdp/kdp_internal.h>
59 #include <uuid/uuid.h>
62 #include <IOKit/IOPlatformExpert.h>
64 #include <mach/vm_prot.h>
65 #include <vm/vm_map.h>
67 #include <vm/vm_shared_region.h>
68 #include <mach/time_value.h>
69 #include <machine/machparam.h> /* for btop */
71 #include <console/video_console.h>
72 #include <arm/cpu_data.h>
73 #include <arm/cpu_data_internal.h>
74 #include <arm/cpu_internal.h>
75 #include <arm/misc_protos.h>
76 #include <libkern/OSKextLibPrivate.h>
77 #include <vm/vm_kern.h>
78 #include <kern/kern_cdata.h>
81 void kdp_trap(unsigned int, struct arm_saved_state
*);
84 extern kern_return_t
do_stackshot(void *);
85 extern void kdp_snapshot_preflight(int pid
, void *tracebuf
,
86 uint32_t tracebuf_size
, uint32_t flags
,
87 kcdata_descriptor_t data_p
,
88 boolean_t enable_faulting
);
89 extern int kdp_stack_snapshot_bytes_traced(void);
92 * Increment the PANICLOG_VERSION if you change the format of the panic
95 #define PANICLOG_VERSION 8
96 static struct kcdata_descriptor kc_panic_data
;
98 extern char firmware_version
[];
99 extern volatile uint32_t debug_enabled
;
100 extern unsigned int not_in_kdp
;
102 extern int copyinframe(vm_address_t fp
, uint32_t * frame
);
103 extern void kdp_callouts(kdp_event_t event
);
105 /* #include <sys/proc.h> */
107 extern int proc_pid(void *p
);
108 extern void proc_name_kdp(task_t
, char *, int);
110 extern const char version
[];
111 extern char osversion
[];
112 extern uint8_t gPlatformECID
[8];
113 extern uint32_t gPlatformMemoryID
;
115 extern uint64_t last_hwaccess_thread
;
117 /*Choosing the size for gTargetTypeBuffer as 8 and size for gModelTypeBuffer as 32
118 since the target name and model name typically doesn't exceed this size */
119 extern char gTargetTypeBuffer
[8];
120 extern char gModelTypeBuffer
[32];
122 decl_simple_lock_data(extern,clock_lock
)
123 extern struct timeval gIOLastSleepTime
;
124 extern struct timeval gIOLastWakeTime
;
125 extern boolean_t is_clock_configured
;
126 extern uuid_t kernelcache_uuid
;
128 /* Definitions for frame pointers */
129 #define FP_ALIGNMENT_MASK ((uint32_t)(0x3))
130 #define FP_LR_OFFSET ((uint32_t)4)
131 #define FP_LR_OFFSET64 ((uint32_t)8)
132 #define FP_MAX_NUM_TO_EVALUATE (50)
134 /* Timeout (in nanoseconds) for all processors responding to debug crosscall */
135 #define DEBUG_ACK_TIMEOUT ((uint64_t) 10000000)
137 /* Forward functions definitions */
138 void panic_display_times(void) ;
139 void panic_print_symbol_name(vm_address_t search
);
142 /* Global variables */
143 static uint32_t panic_bt_depth
;
144 boolean_t PanicInfoSaved
= FALSE
;
145 boolean_t force_immediate_debug_halt
= FALSE
;
146 unsigned int debug_ack_timeout_count
= 0;
147 volatile unsigned int debugger_sync
= 0;
148 volatile unsigned int mp_kdp_trap
= 0; /* CPUs signalled by the debug CPU will spin on this */
149 unsigned int DebugContextCount
= 0;
151 // Convenient macros to easily validate one or more pointers if
152 // they have defined types
153 #define VALIDATE_PTR(ptr) \
154 validate_ptr((vm_offset_t)(ptr), sizeof(*(ptr)), #ptr)
156 #define VALIDATE_PTR_2(ptr0, ptr1) \
157 VALIDATE_PTR(ptr0) && VALIDATE_PTR(ptr1)
159 #define VALIDATE_PTR_3(ptr0, ptr1, ptr2) \
160 VALIDATE_PTR_2(ptr0, ptr1) && VALIDATE_PTR(ptr2)
162 #define VALIDATE_PTR_4(ptr0, ptr1, ptr2, ptr3) \
163 VALIDATE_PTR_2(ptr0, ptr1) && VALIDATE_PTR_2(ptr2, ptr3)
165 #define GET_MACRO(_1,_2,_3,_4,NAME,...) NAME
167 #define VALIDATE_PTR_LIST(...) GET_MACRO(__VA_ARGS__, VALIDATE_PTR_4, VALIDATE_PTR_3, VALIDATE_PTR_2, VALIDATE_PTR)(__VA_ARGS__)
170 * Evaluate if a pointer is valid
171 * Print a message if pointer is invalid
173 static boolean_t
validate_ptr(
174 vm_offset_t ptr
, vm_size_t size
, const char * ptr_name
)
177 if (ml_validate_nofault(ptr
, size
)) {
180 paniclog_append_noflush("Invalid %s pointer: %p size: %d\n",
181 ptr_name
, (void *)ptr
, (int)size
);
185 paniclog_append_noflush("NULL %s pointer\n", ptr_name
);
191 * Backtrace a single frame.
194 print_one_backtrace(pmap_t pmap
, vm_offset_t topfp
, const char *cur_marker
,
202 boolean_t dump_kernel_stack
;
208 if (fp
>= VM_MIN_KERNEL_ADDRESS
)
209 dump_kernel_stack
= TRUE
;
211 dump_kernel_stack
= FALSE
;
214 if ((fp
== 0) || ((fp
& FP_ALIGNMENT_MASK
) != 0))
216 if (dump_kernel_stack
&& ((fp
< VM_MIN_KERNEL_ADDRESS
) || (fp
> VM_MAX_KERNEL_ADDRESS
)))
218 if ((!dump_kernel_stack
) && (fp
>=VM_MIN_KERNEL_ADDRESS
))
222 * Check to see if current address will result in a different
223 * ppn than previously computed (to avoid recomputation) via
224 * (addr) ^ fp_for_ppn) >> PAGE_SHIFT)
226 if ((((fp
+ FP_LR_OFFSET
) ^ fp_for_ppn
) >> PAGE_SHIFT
) != 0x0U
) {
227 ppn
= pmap_find_phys(pmap
, fp
+ FP_LR_OFFSET
);
228 fp_for_ppn
= fp
+ (is_64_bit
? FP_LR_OFFSET64
: FP_LR_OFFSET
);
230 if (ppn
!= (ppnum_t
)NULL
) {
232 lr
= ml_phys_read_double_64(((((vm_offset_t
)ppn
) << PAGE_SHIFT
)) | ((fp
+ FP_LR_OFFSET64
) & PAGE_MASK
));
234 lr
= ml_phys_read_word(((((vm_offset_t
)ppn
) << PAGE_SHIFT
)) | ((fp
+ FP_LR_OFFSET
) & PAGE_MASK
));
238 paniclog_append_noflush("%s\t Could not read LR from frame at 0x%016llx\n", cur_marker
, fp
+ FP_LR_OFFSET64
);
240 paniclog_append_noflush("%s\t Could not read LR from frame at 0x%08x\n", cur_marker
, (uint32_t)(fp
+ FP_LR_OFFSET
));
244 if (((fp
^ fp_for_ppn
) >> PAGE_SHIFT
) != 0x0U
) {
245 ppn
= pmap_find_phys(pmap
, fp
);
248 if (ppn
!= (ppnum_t
)NULL
) {
250 fp
= ml_phys_read_double_64(((((vm_offset_t
)ppn
) << PAGE_SHIFT
)) | (fp
& PAGE_MASK
));
252 fp
= ml_phys_read_word(((((vm_offset_t
)ppn
) << PAGE_SHIFT
)) | (fp
& PAGE_MASK
));
256 paniclog_append_noflush("%s\t Could not read FP from frame at 0x%016llx\n", cur_marker
, fp
);
258 paniclog_append_noflush("%s\t Could not read FP from frame at 0x%08x\n", cur_marker
, (uint32_t)fp
);
265 paniclog_append_noflush("%s\t lr: 0x%016llx fp: 0x%016llx\n", cur_marker
, lr
, fp
);
267 paniclog_append_noflush("%s\t lr: 0x%08x fp: 0x%08x\n", cur_marker
, (uint32_t)lr
, (uint32_t)fp
);
270 } while ((++i
< FP_MAX_NUM_TO_EVALUATE
) && (fp
!= topfp
));
273 #define SANE_TASK_LIMIT 256
274 #define TOP_RUNNABLE_LIMIT 5
275 #define PANICLOG_UUID_BUF_SIZE 256
277 extern void panic_print_vnodes(void);
280 do_print_all_backtraces(
283 int logversion
= PANICLOG_VERSION
;
284 thread_t cur_thread
= current_thread();
289 int print_vnodes
= 0;
290 const char *nohilite_thread_marker
="\t";
292 /* end_marker_bytes set to 200 for printing END marker + stackshot summary info always */
293 int bytes_traced
= 0, bytes_remaining
= 0, end_marker_bytes
= 200;
294 uint64_t bytes_used
= 0ULL;
296 char *stackshot_begin_loc
= NULL
;
299 __asm__
volatile("mov %0, r7":"=r"(cur_fp
));
300 #elif defined(__arm64__)
301 __asm__
volatile("add %0, xzr, fp":"=r"(cur_fp
));
303 #error Unknown architecture.
305 if (panic_bt_depth
!= 0)
309 /* Truncate panic string to 1200 bytes -- WDT log can be ~1100 bytes */
310 paniclog_append_noflush("Debugger message: %.1200s\n", message
);
312 paniclog_append_noflush("Device: %s\n",
313 ('\0' != gTargetTypeBuffer
[0]) ? gTargetTypeBuffer
: "Not set yet");
314 paniclog_append_noflush("Hardware Model: %s\n",
315 ('\0' != gModelTypeBuffer
[0]) ? gModelTypeBuffer
:"Not set yet");
316 paniclog_append_noflush("ECID: %02X%02X%02X%02X%02X%02X%02X%02X\n", gPlatformECID
[7],
317 gPlatformECID
[6], gPlatformECID
[5], gPlatformECID
[4], gPlatformECID
[3],
318 gPlatformECID
[2], gPlatformECID
[1], gPlatformECID
[0]);
319 if (last_hwaccess_thread
) {
320 paniclog_append_noflush("AppleHWAccess Thread: 0x%llx\n", last_hwaccess_thread
);
323 paniclog_append_noflush("Memory ID: 0x%x\n", gPlatformMemoryID
);
324 paniclog_append_noflush("OS version: %.256s\n",
325 ('\0' != osversion
[0]) ? osversion
: "Not set yet");
326 paniclog_append_noflush("Kernel version: %.512s\n", version
);
327 paniclog_append_noflush("KernelCache UUID: ");
328 for (index
= 0; index
< sizeof(uuid_t
); index
++) {
329 paniclog_append_noflush("%02X", kernelcache_uuid
[index
]);
331 paniclog_append_noflush("\n");
333 paniclog_append_noflush("iBoot version: %.128s\n", firmware_version
);
334 paniclog_append_noflush("secure boot?: %s\n", debug_enabled
? "NO": "YES");
335 paniclog_append_noflush("Paniclog version: %d\n", logversion
);
337 panic_display_kernel_aslr();
338 panic_display_times();
339 panic_display_zprint();
341 panic_display_ztrace();
342 #endif /* CONFIG_ZLEAKS */
343 #if CONFIG_ECC_LOGGING
344 panic_display_ecc_errors();
345 #endif /* CONFIG_ECC_LOGGING */
347 // Just print threads with high CPU usage for WDT timeouts
348 if (strncmp(message
, "WDT timeout", 11) == 0) {
349 thread_t top_runnable
[5] = {0};
351 int total_cpu_usage
= 0;
356 for (thread
= (thread_t
)queue_first(&threads
);
357 VALIDATE_PTR(thread
) && !queue_end(&threads
, (queue_entry_t
)thread
);
358 thread
= (thread_t
)queue_next(&thread
->threads
)) {
360 total_cpu_usage
+= thread
->cpu_usage
;
362 // Look for the 5 runnable threads with highest priority
363 if (thread
->state
& TH_RUN
) {
365 thread_t comparison_thread
= thread
;
367 for (k
= 0; k
< TOP_RUNNABLE_LIMIT
; k
++) {
368 if (top_runnable
[k
] == 0) {
369 top_runnable
[k
] = comparison_thread
;
371 } else if (comparison_thread
->sched_pri
> top_runnable
[k
]->sched_pri
) {
372 thread_t temp
= top_runnable
[k
];
373 top_runnable
[k
] = comparison_thread
;
374 comparison_thread
= temp
;
375 } // if comparison thread has higher priority than previously saved thread
376 } // loop through highest priority runnable threads
377 } // Check if thread is runnable
378 } // Loop through all threads
380 // Print the relevant info for each thread identified
381 paniclog_append_noflush("Total cpu_usage: %d\n", total_cpu_usage
);
382 paniclog_append_noflush("Thread task pri cpu_usage\n");
384 for (i
= 0; i
< TOP_RUNNABLE_LIMIT
; i
++) {
386 if (top_runnable
[i
] && VALIDATE_PTR(top_runnable
[i
]->task
) &&
387 validate_ptr((vm_offset_t
)top_runnable
[i
]->task
->bsd_info
, 1, "bsd_info")) {
389 char name
[MAXCOMLEN
+ 1];
390 proc_name_kdp(top_runnable
[i
]->task
, name
, sizeof(name
));
391 paniclog_append_noflush("%p %s %d %d\n",
392 top_runnable
[i
], name
, top_runnable
[i
]->sched_pri
, top_runnable
[i
]->cpu_usage
);
394 } // Loop through highest priority runnable threads
395 paniclog_append_noflush("\n");
396 } // Check if message is "WDT timeout"
398 // print current task info
399 if (VALIDATE_PTR_LIST(cur_thread
, cur_thread
->task
)) {
401 task
= cur_thread
->task
;
403 if (VALIDATE_PTR_LIST(task
->map
, task
->map
->pmap
)) {
404 paniclog_append_noflush("Panicked task %p: %d pages, %d threads: ",
405 task
, task
->map
->pmap
->stats
.resident_count
, task
->thread_count
);
407 paniclog_append_noflush("Panicked task %p: %d threads: ",
408 task
, task
->thread_count
);
411 if (validate_ptr((vm_offset_t
)task
->bsd_info
, 1, "bsd_info")) {
412 char name
[MAXCOMLEN
+ 1];
413 int pid
= proc_pid(task
->bsd_info
);
414 proc_name_kdp(task
, name
, sizeof(name
));
415 paniclog_append_noflush("pid %d: %s", pid
, name
);
417 paniclog_append_noflush("unknown task");
420 paniclog_append_noflush("\n");
423 if (cur_fp
< VM_MAX_KERNEL_ADDRESS
) {
424 paniclog_append_noflush("Panicked thread: %p, backtrace: 0x%llx, tid: %llu\n",
425 cur_thread
, (addr64_t
)cur_fp
, thread_tid(cur_thread
));
427 print_one_backtrace(kernel_pmap
, cur_fp
, nohilite_thread_marker
, TRUE
);
429 print_one_backtrace(kernel_pmap
, cur_fp
, nohilite_thread_marker
, FALSE
);
432 paniclog_append_noflush("Could not print panicked thread backtrace:"
433 "frame pointer outside kernel vm.\n");
436 paniclog_append_noflush("\n");
437 panic_info
->eph_panic_log_len
= PE_get_offset_into_panic_region(debug_buf_ptr
) - panic_info
->eph_panic_log_offset
;
439 if (debug_ack_timeout_count
) {
440 panic_info
->eph_panic_flags
|= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_DEBUGGERSYNC
;
441 panic_info
->eph_other_log_offset
= PE_get_offset_into_panic_region(debug_buf_ptr
);
442 paniclog_append_noflush("!! debugger synchronization failed, no stackshot !!\n");
443 } else if (stackshot_active()) {
444 panic_info
->eph_panic_flags
|= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_NESTED
;
445 panic_info
->eph_other_log_offset
= PE_get_offset_into_panic_region(debug_buf_ptr
);
446 paniclog_append_noflush("!! panicked during stackshot, skipping panic stackshot !!\n");
448 /* Align the stackshot buffer to an 8-byte address (especially important for armv7k devices) */
449 debug_buf_ptr
+= (8 - ((uintptr_t)debug_buf_ptr
% 8));
450 stackshot_begin_loc
= debug_buf_ptr
;
452 bytes_remaining
= debug_buf_size
- (unsigned int)((uintptr_t)stackshot_begin_loc
- (uintptr_t)debug_buf_base
);
453 err
= kcdata_memory_static_init(&kc_panic_data
, (mach_vm_address_t
)debug_buf_ptr
,
454 KCDATA_BUFFER_BEGIN_STACKSHOT
, bytes_remaining
- end_marker_bytes
,
456 if (err
== KERN_SUCCESS
) {
457 kdp_snapshot_preflight(-1, stackshot_begin_loc
, bytes_remaining
- end_marker_bytes
,
458 (STACKSHOT_GET_GLOBAL_MEM_STATS
| STACKSHOT_SAVE_LOADINFO
| STACKSHOT_KCDATA_FORMAT
|
459 STACKSHOT_ENABLE_BT_FAULTING
| STACKSHOT_ENABLE_UUID_FAULTING
| STACKSHOT_FROM_PANIC
|
460 STACKSHOT_NO_IO_STATS
| STACKSHOT_THREAD_WAITINFO
), &kc_panic_data
, 0);
461 err
= do_stackshot(NULL
);
462 bytes_traced
= kdp_stack_snapshot_bytes_traced();
463 if (bytes_traced
> 0 && !err
) {
464 debug_buf_ptr
+= bytes_traced
;
465 panic_info
->eph_panic_flags
|= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_SUCCEEDED
;
466 panic_info
->eph_stackshot_offset
= PE_get_offset_into_panic_region(stackshot_begin_loc
);
467 panic_info
->eph_stackshot_len
= bytes_traced
;
469 panic_info
->eph_other_log_offset
= PE_get_offset_into_panic_region(debug_buf_ptr
);
470 paniclog_append_noflush("\n** Stackshot Succeeded ** Bytes Traced %d **\n", bytes_traced
);
472 bytes_used
= kcdata_memory_get_used_bytes(&kc_panic_data
);
473 if (bytes_used
> 0) {
474 /* Zero out the stackshot data */
475 bzero(stackshot_begin_loc
, bytes_used
);
476 panic_info
->eph_panic_flags
|= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_INCOMPLETE
;
478 panic_info
->eph_other_log_offset
= PE_get_offset_into_panic_region(debug_buf_ptr
);
479 paniclog_append_noflush("\n** Stackshot Incomplete ** Bytes Filled %llu **\n", bytes_used
);
481 bzero(stackshot_begin_loc
, bytes_used
);
482 panic_info
->eph_panic_flags
|= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR
;
484 panic_info
->eph_other_log_offset
= PE_get_offset_into_panic_region(debug_buf_ptr
);
485 paniclog_append_noflush("\n!! Stackshot Failed !! Bytes Traced %d, err %d\n", bytes_traced
, err
);
489 panic_info
->eph_panic_flags
|= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR
;
490 panic_info
->eph_other_log_offset
= PE_get_offset_into_panic_region(debug_buf_ptr
);
491 paniclog_append_noflush("\n!! Stackshot Failed !!\nkcdata_memory_static_init returned %d", err
);
495 assert(panic_info
->eph_other_log_offset
!= 0);
497 if (print_vnodes
!= 0)
498 panic_print_vnodes();
504 * Entry to print_all_backtraces is serialized by the debugger lock
507 print_all_backtraces(const char *message
)
509 unsigned int initial_not_in_kdp
= not_in_kdp
;
511 cpu_data_t
* cpu_data_ptr
= getCpuDatap();
513 assert(cpu_data_ptr
->PAB_active
== FALSE
);
514 cpu_data_ptr
->PAB_active
= TRUE
;
517 * Because print all backtraces uses the pmap routines, it needs to
518 * avoid taking pmap locks. Right now, this is conditionalized on
522 do_print_all_backtraces(message
);
524 not_in_kdp
= initial_not_in_kdp
;
526 cpu_data_ptr
->PAB_active
= FALSE
;
530 panic_display_times()
532 if (kdp_clock_is_locked()) {
533 paniclog_append_noflush("Warning: clock is locked. Can't get time\n");
537 if ((is_clock_configured
) && (simple_lock_try(&clock_lock
))) {
538 clock_sec_t secs
, boot_secs
;
539 clock_usec_t usecs
, boot_usecs
;
541 simple_unlock(&clock_lock
);
543 clock_get_calendar_microtime(&secs
, &usecs
);
544 clock_get_boottime_microtime(&boot_secs
, &boot_usecs
);
546 paniclog_append_noflush("Epoch Time: sec usec\n");
547 paniclog_append_noflush(" Boot : 0x%08x 0x%08x\n", (unsigned int)boot_secs
, (unsigned int)boot_usecs
);
548 paniclog_append_noflush(" Sleep : 0x%08x 0x%08x\n", (unsigned int)gIOLastSleepTime
.tv_sec
, (unsigned int)gIOLastSleepTime
.tv_usec
);
549 paniclog_append_noflush(" Wake : 0x%08x 0x%08x\n", (unsigned int)gIOLastWakeTime
.tv_sec
, (unsigned int)gIOLastWakeTime
.tv_usec
);
550 paniclog_append_noflush(" Calendar: 0x%08x 0x%08x\n\n", (unsigned int)secs
, (unsigned int)usecs
);
554 void panic_print_symbol_name(vm_address_t search
)
556 #pragma unused(search)
557 // empty stub. Really only used on x86_64.
563 const char *message
, __unused
uint64_t panic_options
)
566 /* This should be initialized by the time we get here */
567 assert(panic_info
->eph_panic_log_offset
!= 0);
569 if (panic_options
& DEBUGGER_OPTION_PANICLOGANDREBOOT
) {
570 panic_info
->eph_panic_flags
|= EMBEDDED_PANIC_HEADER_FLAG_BUTTON_RESET_PANIC
;
573 if (panic_options
& DEBUGGER_OPTION_COPROC_INITIATED_PANIC
) {
574 panic_info
->eph_panic_flags
|= EMBEDDED_PANIC_HEADER_FLAG_COPROC_INITIATED_PANIC
;
578 * On newer targets, panic data is stored directly into the iBoot panic region.
579 * If we re-enter SavePanicInfo (e.g. on a double panic) on such a target, update the
580 * panic CRC so that iBoot can hopefully find *something* useful in the panic region.
582 if (PanicInfoSaved
&& (debug_buf_base
>= (char*)gPanicBase
) && (debug_buf_base
< (char*)gPanicBase
+ gPanicSize
)) {
583 unsigned int pi_size
= (unsigned int)(debug_buf_ptr
- gPanicBase
);
584 PE_save_buffer_to_vram((unsigned char*)gPanicBase
, &pi_size
);
585 PE_sync_panic_buffers(); // extra precaution; panic path likely isn't reliable if we're here
588 if (PanicInfoSaved
|| (debug_buf_size
== 0))
591 PanicInfoSaved
= TRUE
;
593 print_all_backtraces(message
);
595 assert(panic_info
->eph_panic_log_len
!= 0);
596 panic_info
->eph_other_log_len
= PE_get_offset_into_panic_region(debug_buf_ptr
) - panic_info
->eph_other_log_offset
;
598 PEHaltRestart(kPEPanicSync
);
601 * Notifies registered IOPlatformPanicAction callbacks
602 * (which includes one to disable the memcache) and flushes
603 * the buffer contents from the cache
611 unsigned int panicbuf_length
= 0;
613 panicbuf_length
= (unsigned int)(debug_buf_ptr
- gPanicBase
);
614 if (!panicbuf_length
)
618 * Updates the log length of the last part of the panic log.
620 panic_info
->eph_other_log_len
= PE_get_offset_into_panic_region(debug_buf_ptr
) - panic_info
->eph_other_log_offset
;
623 * Updates the metadata at the beginning of the panic buffer,
626 PE_save_buffer_to_vram((unsigned char *)gPanicBase
, &panicbuf_length
);
629 * This is currently unused by platform KEXTs on embedded but is
630 * kept for compatibility with the published IOKit interfaces.
632 PESavePanicInfo((unsigned char *)gPanicBase
, panicbuf_length
);
634 PE_sync_panic_buffers();
638 * @function DebuggerXCallEnter
640 * @abstract IPI other cores so this core can run in a single-threaded context.
642 * @discussion This function should be called with the debugger lock held. It
643 * signals the other cores to go into a busy loop so this core can run in a
644 * single-threaded context and inspect kernel memory.
646 * @param proceed_on_sync_failure If true, then go ahead and try to debug even
647 * if we can't synch with the other cores. This is inherently unsafe and should
648 * only be used if the kernel is going down in flames anyway.
650 * @result returns KERN_OPERATION_TIMED_OUT if synchronization times out and
651 * proceed_on_sync_failure is false.
655 boolean_t proceed_on_sync_failure
)
657 uint64_t max_mabs_time
, current_mabs_time
;
660 cpu_data_t
*target_cpu_datap
;
661 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
663 /* Check for nested debugger entry. */
664 cpu_data_ptr
->debugger_active
++;
665 if (cpu_data_ptr
->debugger_active
!= 1)
669 * If debugger_sync is not 0, someone responded excessively late to the last
670 * debug request (we zero the sync variable in the return function). Zero it
671 * again here. This should prevent us from getting out of sync (heh) and
672 * timing out on every entry to the debugger if we timeout once.
679 * We need a barrier here to ensure CPUs see mp_kdp_trap and spin when responding
682 __builtin_arm_dmb(DMB_ISH
);
685 * Try to signal all CPUs (except ourselves, of course). Use debugger_sync to
686 * synchronize with every CPU that we appeared to signal successfully (cpu_signal
687 * is not synchronous).
689 bool cpu_signal_failed
= false;
690 max_cpu
= ml_get_max_cpu_number();
692 boolean_t immediate_halt
= FALSE
;
693 if (proceed_on_sync_failure
&& force_immediate_debug_halt
)
694 immediate_halt
= TRUE
;
696 if (!immediate_halt
) {
697 for (cpu
=0; cpu
<= max_cpu
; cpu
++) {
698 target_cpu_datap
= (cpu_data_t
*)CpuDataEntries
[cpu
].cpu_data_vaddr
;
700 if ((target_cpu_datap
== NULL
) || (target_cpu_datap
== cpu_data_ptr
))
703 if(KERN_SUCCESS
== cpu_signal(target_cpu_datap
, SIGPdebug
, (void *)NULL
, NULL
)) {
704 (void)hw_atomic_add(&debugger_sync
, 1);
706 cpu_signal_failed
= true;
707 kprintf("cpu_signal failed in DebuggerXCallEnter\n");
711 nanoseconds_to_absolutetime(DEBUG_ACK_TIMEOUT
, &max_mabs_time
);
712 current_mabs_time
= mach_absolute_time();
713 max_mabs_time
+= current_mabs_time
;
714 assert(max_mabs_time
> current_mabs_time
);
717 * Wait for DEBUG_ACK_TIMEOUT ns for a response from everyone we IPI'd. If we
718 * timeout, that is simply too bad; we don't have a true NMI, and one CPU may be
719 * uninterruptibly spinning on someone else. The best we can hope for is that
720 * all other CPUs have either responded or are spinning in a context that is
723 while ((debugger_sync
!= 0) && (current_mabs_time
< max_mabs_time
))
724 current_mabs_time
= mach_absolute_time();
728 if (cpu_signal_failed
&& !proceed_on_sync_failure
) {
729 DebuggerXCallReturn();
731 } else if (immediate_halt
|| (current_mabs_time
>= max_mabs_time
)) {
733 * For the moment, we're aiming for a timeout that the user shouldn't notice,
734 * but will be sufficient to let the other core respond.
736 __builtin_arm_dmb(DMB_ISH
);
737 for (cpu
=0; cpu
<= max_cpu
; cpu
++) {
738 target_cpu_datap
= (cpu_data_t
*)CpuDataEntries
[cpu
].cpu_data_vaddr
;
740 if ((target_cpu_datap
== NULL
) || (target_cpu_datap
== cpu_data_ptr
))
742 if (!(target_cpu_datap
->cpu_signal
& SIGPdebug
) && !immediate_halt
)
744 if (proceed_on_sync_failure
) {
745 paniclog_append_noflush("Attempting to forcibly halt cpu %d\n", cpu
);
746 dbgwrap_status_t halt_status
= ml_dbgwrap_halt_cpu(cpu
, 0);
748 paniclog_append_noflush("Unable to halt cpu %d: %d\n", cpu
, halt_status
);
751 paniclog_append_noflush("cpu %d halted with warning %d\n", cpu
, halt_status
);
752 target_cpu_datap
->halt_status
= CPU_HALTED
;
755 kprintf("Debugger synch pending on cpu %d\n", cpu
);
757 if (proceed_on_sync_failure
) {
758 for (cpu
= 0; cpu
<= max_cpu
; cpu
++) {
759 target_cpu_datap
= (cpu_data_t
*)CpuDataEntries
[cpu
].cpu_data_vaddr
;
761 if ((target_cpu_datap
== NULL
) || (target_cpu_datap
== cpu_data_ptr
) ||
762 (target_cpu_datap
->halt_status
== CPU_NOT_HALTED
))
764 dbgwrap_status_t halt_status
= ml_dbgwrap_halt_cpu_with_state(cpu
,
765 NSEC_PER_SEC
, &target_cpu_datap
->halt_state
);
766 if ((halt_status
< 0) || (halt_status
== DBGWRAP_WARN_CPU_OFFLINE
))
767 paniclog_append_noflush("Unable to obtain state for cpu %d: %d\n", cpu
, halt_status
);
769 target_cpu_datap
->halt_status
= CPU_HALTED_WITH_STATE
;
772 paniclog_append_noflush("Immediate halt requested on all cores\n");
774 paniclog_append_noflush("Debugger synchronization timed out; waited %llu nanoseconds\n", DEBUG_ACK_TIMEOUT
);
775 debug_ack_timeout_count
++;
778 DebuggerXCallReturn();
779 return KERN_OPERATION_TIMED_OUT
;
787 * @function DebuggerXCallReturn
789 * @abstract Resume normal multicore operation after DebuggerXCallEnter()
791 * @discussion This function should be called with debugger lock held.
797 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
799 cpu_data_ptr
->debugger_active
--;
800 if (cpu_data_ptr
->debugger_active
!= 0)
806 /* Do we need a barrier here? */
807 __builtin_arm_dmb(DMB_ISH
);
814 boolean_t save_context
= FALSE
;
815 vm_offset_t kstackptr
= 0;
816 arm_saved_state_t
*regs
= (arm_saved_state_t
*) ctx
;
819 #if defined(__arm64__)
820 save_context
= PSR64_IS_KERNEL(get_saved_state_cpsr(regs
));
822 save_context
= PSR_IS_KERNEL(regs
->cpsr
);
826 kstackptr
= current_thread()->machine
.kstackptr
;
827 arm_saved_state_t
*state
= (arm_saved_state_t
*)kstackptr
;
830 /* Save the interrupted context before acknowledging the signal */
833 /* zero old state so machine_trace_thread knows not to backtrace it */
834 set_saved_state_fp(state
, 0);
835 set_saved_state_pc(state
, 0);
836 set_saved_state_lr(state
, 0);
837 set_saved_state_sp(state
, 0);
840 (void)hw_atomic_sub(&debugger_sync
, 1);
841 __builtin_arm_dmb(DMB_ISH
);
844 /* Any cleanup for our pushed context should go here */
854 #pragma unused(reason,ctx)
855 #endif /* !MACH_KDP */
857 #if ALTERNATE_DEBUGGER
858 alternate_debugger_enter();
862 kdp_trap(reason
, (struct arm_saved_state
*)ctx
);
864 /* TODO: decide what to do if no debugger config */