]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/model_dep.c
xnu-4570.71.2.tar.gz
[apple/xnu.git] / osfmk / arm / model_dep.c
1 /*
2 * Copyright (c) 2007-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <debug.h>
30 #include <mach_kdp.h>
31
32 #include <kern/thread.h>
33 #include <machine/pmap.h>
34 #include <device/device_types.h>
35
36 #include <mach/vm_param.h>
37 #include <mach/clock_types.h>
38 #include <mach/machine.h>
39 #include <mach/kmod.h>
40 #include <pexpert/boot.h>
41 #include <pexpert/pexpert.h>
42
43 #include <kern/misc_protos.h>
44 #include <kern/startup.h>
45 #include <kern/clock.h>
46 #include <kern/debug.h>
47 #include <kern/processor.h>
48 #include <kdp/kdp_core.h>
49 #if ALTERNATE_DEBUGGER
50 #include <arm64/alternate_debugger.h>
51 #endif
52 #include <machine/atomic.h>
53 #include <machine/trap.h>
54 #include <kern/spl.h>
55 #include <pexpert/pexpert.h>
56 #include <kdp/kdp_callout.h>
57 #include <kdp/kdp_dyld.h>
58 #include <kdp/kdp_internal.h>
59 #include <uuid/uuid.h>
60 #include <sys/time.h>
61
62 #include <IOKit/IOPlatformExpert.h>
63
64 #include <mach/vm_prot.h>
65 #include <vm/vm_map.h>
66 #include <vm/pmap.h>
67 #include <vm/vm_shared_region.h>
68 #include <mach/time_value.h>
69 #include <machine/machparam.h> /* for btop */
70
71 #include <console/video_console.h>
72 #include <arm/cpu_data.h>
73 #include <arm/cpu_data_internal.h>
74 #include <arm/cpu_internal.h>
75 #include <arm/misc_protos.h>
76 #include <libkern/OSKextLibPrivate.h>
77 #include <vm/vm_kern.h>
78 #include <kern/kern_cdata.h>
79
80 #if MACH_KDP
81 void kdp_trap(unsigned int, struct arm_saved_state *);
82 #endif
83
84 extern kern_return_t do_stackshot(void *);
85 extern void kdp_snapshot_preflight(int pid, void *tracebuf,
86 uint32_t tracebuf_size, uint32_t flags,
87 kcdata_descriptor_t data_p,
88 boolean_t enable_faulting);
89 extern int kdp_stack_snapshot_bytes_traced(void);
90
91 /*
92 * Increment the PANICLOG_VERSION if you change the format of the panic
93 * log in any way.
94 */
95 #define PANICLOG_VERSION 9
96 static struct kcdata_descriptor kc_panic_data;
97
98 extern char firmware_version[];
99 extern volatile uint32_t debug_enabled;
100 extern unsigned int not_in_kdp;
101
102 extern int copyinframe(vm_address_t fp, uint32_t * frame);
103 extern void kdp_callouts(kdp_event_t event);
104
105 /* #include <sys/proc.h> */
106 #define MAXCOMLEN 16
107 extern int proc_pid(void *p);
108 extern void proc_name_kdp(task_t, char *, int);
109
110 extern const char version[];
111 extern char osversion[];
112 extern uint8_t gPlatformECID[8];
113 extern uint32_t gPlatformMemoryID;
114
115 extern uint64_t last_hwaccess_thread;
116
117 /*Choosing the size for gTargetTypeBuffer as 8 and size for gModelTypeBuffer as 32
118 since the target name and model name typically doesn't exceed this size */
119 extern char gTargetTypeBuffer[8];
120 extern char gModelTypeBuffer[32];
121
122 decl_simple_lock_data(extern,clock_lock)
123 extern struct timeval gIOLastSleepTime;
124 extern struct timeval gIOLastWakeTime;
125 extern boolean_t is_clock_configured;
126 extern uuid_t kernelcache_uuid;
127
128 /* Definitions for frame pointers */
129 #define FP_ALIGNMENT_MASK ((uint32_t)(0x3))
130 #define FP_LR_OFFSET ((uint32_t)4)
131 #define FP_LR_OFFSET64 ((uint32_t)8)
132 #define FP_MAX_NUM_TO_EVALUATE (50)
133
134 /* Timeout (in nanoseconds) for all processors responding to debug crosscall */
135 #define DEBUG_ACK_TIMEOUT ((uint64_t) 10000000)
136
137 /* Forward functions definitions */
138 void panic_display_times(void) ;
139 void panic_print_symbol_name(vm_address_t search);
140
141
142 /* Global variables */
143 static uint32_t panic_bt_depth;
144 boolean_t PanicInfoSaved = FALSE;
145 boolean_t force_immediate_debug_halt = FALSE;
146 unsigned int debug_ack_timeout_count = 0;
147 volatile unsigned int debugger_sync = 0;
148 volatile unsigned int mp_kdp_trap = 0; /* CPUs signalled by the debug CPU will spin on this */
149 unsigned int DebugContextCount = 0;
150
151 #if defined(__arm64__)
152 uint8_t PE_smc_stashed_x86_system_state = 0xFF;
153 uint8_t PE_smc_stashed_x86_power_state = 0xFF;
154 uint8_t PE_smc_stashed_x86_efi_boot_state = 0xFF;
155 uint32_t PE_pcie_stashed_link_state = UINT32_MAX;
156 #endif
157
158
159 // Convenient macros to easily validate one or more pointers if
160 // they have defined types
161 #define VALIDATE_PTR(ptr) \
162 validate_ptr((vm_offset_t)(ptr), sizeof(*(ptr)), #ptr)
163
164 #define VALIDATE_PTR_2(ptr0, ptr1) \
165 VALIDATE_PTR(ptr0) && VALIDATE_PTR(ptr1)
166
167 #define VALIDATE_PTR_3(ptr0, ptr1, ptr2) \
168 VALIDATE_PTR_2(ptr0, ptr1) && VALIDATE_PTR(ptr2)
169
170 #define VALIDATE_PTR_4(ptr0, ptr1, ptr2, ptr3) \
171 VALIDATE_PTR_2(ptr0, ptr1) && VALIDATE_PTR_2(ptr2, ptr3)
172
173 #define GET_MACRO(_1,_2,_3,_4,NAME,...) NAME
174
175 #define VALIDATE_PTR_LIST(...) GET_MACRO(__VA_ARGS__, VALIDATE_PTR_4, VALIDATE_PTR_3, VALIDATE_PTR_2, VALIDATE_PTR)(__VA_ARGS__)
176
177 /*
178 * Evaluate if a pointer is valid
179 * Print a message if pointer is invalid
180 */
181 static boolean_t validate_ptr(
182 vm_offset_t ptr, vm_size_t size, const char * ptr_name)
183 {
184 if (ptr) {
185 if (ml_validate_nofault(ptr, size)) {
186 return TRUE;
187 } else {
188 paniclog_append_noflush("Invalid %s pointer: %p size: %d\n",
189 ptr_name, (void *)ptr, (int)size);
190 return FALSE;
191 }
192 } else {
193 paniclog_append_noflush("NULL %s pointer\n", ptr_name);
194 return FALSE;
195 }
196 }
197
198 /*
199 * Backtrace a single frame.
200 */
201 static void
202 print_one_backtrace(pmap_t pmap, vm_offset_t topfp, const char *cur_marker,
203 boolean_t is_64_bit)
204 {
205 int i = 0;
206 addr64_t lr;
207 addr64_t fp;
208 addr64_t fp_for_ppn;
209 ppnum_t ppn;
210 boolean_t dump_kernel_stack;
211
212 fp = topfp;
213 fp_for_ppn = 0;
214 ppn = (ppnum_t)NULL;
215
216 if (fp >= VM_MIN_KERNEL_ADDRESS)
217 dump_kernel_stack = TRUE;
218 else
219 dump_kernel_stack = FALSE;
220
221 do {
222 if ((fp == 0) || ((fp & FP_ALIGNMENT_MASK) != 0))
223 break;
224 if (dump_kernel_stack && ((fp < VM_MIN_KERNEL_ADDRESS) || (fp > VM_MAX_KERNEL_ADDRESS)))
225 break;
226 if ((!dump_kernel_stack) && (fp >=VM_MIN_KERNEL_ADDRESS))
227 break;
228
229 /*
230 * Check to see if current address will result in a different
231 * ppn than previously computed (to avoid recomputation) via
232 * (addr) ^ fp_for_ppn) >> PAGE_SHIFT)
233 */
234 if ((((fp + FP_LR_OFFSET) ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) {
235 ppn = pmap_find_phys(pmap, fp + FP_LR_OFFSET);
236 fp_for_ppn = fp + (is_64_bit ? FP_LR_OFFSET64 : FP_LR_OFFSET);
237 }
238 if (ppn != (ppnum_t)NULL) {
239 if (is_64_bit) {
240 lr = ml_phys_read_double_64(((((vm_offset_t)ppn) << PAGE_SHIFT)) | ((fp + FP_LR_OFFSET64) & PAGE_MASK));
241 } else {
242 lr = ml_phys_read_word(((((vm_offset_t)ppn) << PAGE_SHIFT)) | ((fp + FP_LR_OFFSET) & PAGE_MASK));
243 }
244 } else {
245 if (is_64_bit) {
246 paniclog_append_noflush("%s\t Could not read LR from frame at 0x%016llx\n", cur_marker, fp + FP_LR_OFFSET64);
247 } else {
248 paniclog_append_noflush("%s\t Could not read LR from frame at 0x%08x\n", cur_marker, (uint32_t)(fp + FP_LR_OFFSET));
249 }
250 break;
251 }
252 if (((fp ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) {
253 ppn = pmap_find_phys(pmap, fp);
254 fp_for_ppn = fp;
255 }
256 if (ppn != (ppnum_t)NULL) {
257 if (is_64_bit) {
258 fp = ml_phys_read_double_64(((((vm_offset_t)ppn) << PAGE_SHIFT)) | (fp & PAGE_MASK));
259 } else {
260 fp = ml_phys_read_word(((((vm_offset_t)ppn) << PAGE_SHIFT)) | (fp & PAGE_MASK));
261 }
262 } else {
263 if (is_64_bit) {
264 paniclog_append_noflush("%s\t Could not read FP from frame at 0x%016llx\n", cur_marker, fp);
265 } else {
266 paniclog_append_noflush("%s\t Could not read FP from frame at 0x%08x\n", cur_marker, (uint32_t)fp);
267 }
268 break;
269 }
270
271 if (lr) {
272 if (is_64_bit) {
273 paniclog_append_noflush("%s\t lr: 0x%016llx fp: 0x%016llx\n", cur_marker, lr, fp);
274 } else {
275 paniclog_append_noflush("%s\t lr: 0x%08x fp: 0x%08x\n", cur_marker, (uint32_t)lr, (uint32_t)fp);
276 }
277 }
278 } while ((++i < FP_MAX_NUM_TO_EVALUATE) && (fp != topfp));
279 }
280
281 #define SANE_TASK_LIMIT 256
282 #define TOP_RUNNABLE_LIMIT 5
283 #define PANICLOG_UUID_BUF_SIZE 256
284
285 extern void panic_print_vnodes(void);
286
287 static void
288 do_print_all_backtraces(
289 const char *message)
290 {
291 int logversion = PANICLOG_VERSION;
292 thread_t cur_thread = current_thread();
293 uintptr_t cur_fp;
294 task_t task;
295 int i;
296 size_t index;
297 int print_vnodes = 0;
298 const char *nohilite_thread_marker="\t";
299
300 /* end_marker_bytes set to 200 for printing END marker + stackshot summary info always */
301 int bytes_traced = 0, bytes_remaining = 0, end_marker_bytes = 200;
302 uint64_t bytes_used = 0ULL;
303 int err = 0;
304 char *stackshot_begin_loc = NULL;
305
306 #if defined(__arm__)
307 __asm__ volatile("mov %0, r7":"=r"(cur_fp));
308 #elif defined(__arm64__)
309 __asm__ volatile("add %0, xzr, fp":"=r"(cur_fp));
310 #else
311 #error Unknown architecture.
312 #endif
313 if (panic_bt_depth != 0)
314 return;
315 panic_bt_depth++;
316
317 /* Truncate panic string to 1200 bytes -- WDT log can be ~1100 bytes */
318 paniclog_append_noflush("Debugger message: %.1200s\n", message);
319 if (debug_enabled) {
320 paniclog_append_noflush("Device: %s\n",
321 ('\0' != gTargetTypeBuffer[0]) ? gTargetTypeBuffer : "Not set yet");
322 paniclog_append_noflush("Hardware Model: %s\n",
323 ('\0' != gModelTypeBuffer[0]) ? gModelTypeBuffer:"Not set yet");
324 paniclog_append_noflush("ECID: %02X%02X%02X%02X%02X%02X%02X%02X\n", gPlatformECID[7],
325 gPlatformECID[6], gPlatformECID[5], gPlatformECID[4], gPlatformECID[3],
326 gPlatformECID[2], gPlatformECID[1], gPlatformECID[0]);
327 if (last_hwaccess_thread) {
328 paniclog_append_noflush("AppleHWAccess Thread: 0x%llx\n", last_hwaccess_thread);
329 }
330 #if defined(XNU_TARGET_OS_BRIDGE)
331 paniclog_append_noflush("PCIeUp link state: ");
332 if (PE_pcie_stashed_link_state != UINT32_MAX) {
333 paniclog_append_noflush("0x%x\n", PE_pcie_stashed_link_state);
334 } else {
335 paniclog_append_noflush("not available\n");
336 }
337 #endif
338 }
339 paniclog_append_noflush("Memory ID: 0x%x\n", gPlatformMemoryID);
340 paniclog_append_noflush("OS version: %.256s\n",
341 ('\0' != osversion[0]) ? osversion : "Not set yet");
342 paniclog_append_noflush("Kernel version: %.512s\n", version);
343 paniclog_append_noflush("KernelCache UUID: ");
344 for (index = 0; index < sizeof(uuid_t); index++) {
345 paniclog_append_noflush("%02X", kernelcache_uuid[index]);
346 }
347 paniclog_append_noflush("\n");
348
349 paniclog_append_noflush("iBoot version: %.128s\n", firmware_version);
350 paniclog_append_noflush("secure boot?: %s\n", debug_enabled ? "NO": "YES");
351 #if defined(XNU_TARGET_OS_BRIDGE)
352 paniclog_append_noflush("x86 EFI Boot State: ");
353 if (PE_smc_stashed_x86_efi_boot_state != 0xFF) {
354 paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_efi_boot_state);
355 } else {
356 paniclog_append_noflush("not available\n");
357 }
358 paniclog_append_noflush("x86 System State: ");
359 if (PE_smc_stashed_x86_system_state != 0xFF) {
360 paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_system_state);
361 } else {
362 paniclog_append_noflush("not available\n");
363 }
364 paniclog_append_noflush("x86 Power State: ");
365 if (PE_smc_stashed_x86_power_state != 0xFF) {
366 paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_power_state);
367 } else {
368 paniclog_append_noflush("not available\n");
369 }
370 #endif
371 paniclog_append_noflush("Paniclog version: %d\n", logversion);
372
373 panic_display_kernel_aslr();
374 panic_display_times();
375 panic_display_zprint();
376 #if CONFIG_ZLEAKS
377 panic_display_ztrace();
378 #endif /* CONFIG_ZLEAKS */
379 #if CONFIG_ECC_LOGGING
380 panic_display_ecc_errors();
381 #endif /* CONFIG_ECC_LOGGING */
382
383 // Just print threads with high CPU usage for WDT timeouts
384 if (strncmp(message, "WDT timeout", 11) == 0) {
385 thread_t top_runnable[5] = {0};
386 thread_t thread;
387 int total_cpu_usage = 0;
388
389 print_vnodes = 1;
390
391
392 for (thread = (thread_t)queue_first(&threads);
393 VALIDATE_PTR(thread) && !queue_end(&threads, (queue_entry_t)thread);
394 thread = (thread_t)queue_next(&thread->threads)) {
395
396 total_cpu_usage += thread->cpu_usage;
397
398 // Look for the 5 runnable threads with highest priority
399 if (thread->state & TH_RUN) {
400 int k;
401 thread_t comparison_thread = thread;
402
403 for (k = 0; k < TOP_RUNNABLE_LIMIT; k++) {
404 if (top_runnable[k] == 0) {
405 top_runnable[k] = comparison_thread;
406 break;
407 } else if (comparison_thread->sched_pri > top_runnable[k]->sched_pri) {
408 thread_t temp = top_runnable[k];
409 top_runnable[k] = comparison_thread;
410 comparison_thread = temp;
411 } // if comparison thread has higher priority than previously saved thread
412 } // loop through highest priority runnable threads
413 } // Check if thread is runnable
414 } // Loop through all threads
415
416 // Print the relevant info for each thread identified
417 paniclog_append_noflush("Total cpu_usage: %d\n", total_cpu_usage);
418 paniclog_append_noflush("Thread task pri cpu_usage\n");
419
420 for (i = 0; i < TOP_RUNNABLE_LIMIT; i++) {
421
422 if (top_runnable[i] && VALIDATE_PTR(top_runnable[i]->task) &&
423 validate_ptr((vm_offset_t)top_runnable[i]->task->bsd_info, 1, "bsd_info")) {
424
425 char name[MAXCOMLEN + 1];
426 proc_name_kdp(top_runnable[i]->task, name, sizeof(name));
427 paniclog_append_noflush("%p %s %d %d\n",
428 top_runnable[i], name, top_runnable[i]->sched_pri, top_runnable[i]->cpu_usage);
429 }
430 } // Loop through highest priority runnable threads
431 paniclog_append_noflush("\n");
432 } // Check if message is "WDT timeout"
433
434 // print current task info
435 if (VALIDATE_PTR_LIST(cur_thread, cur_thread->task)) {
436
437 task = cur_thread->task;
438
439 if (VALIDATE_PTR_LIST(task->map, task->map->pmap)) {
440 paniclog_append_noflush("Panicked task %p: %d pages, %d threads: ",
441 task, task->map->pmap->stats.resident_count, task->thread_count);
442 } else {
443 paniclog_append_noflush("Panicked task %p: %d threads: ",
444 task, task->thread_count);
445 }
446
447 if (validate_ptr((vm_offset_t)task->bsd_info, 1, "bsd_info")) {
448 char name[MAXCOMLEN + 1];
449 int pid = proc_pid(task->bsd_info);
450 proc_name_kdp(task, name, sizeof(name));
451 paniclog_append_noflush("pid %d: %s", pid, name);
452 } else {
453 paniclog_append_noflush("unknown task");
454 }
455
456 paniclog_append_noflush("\n");
457 }
458
459 if (cur_fp < VM_MAX_KERNEL_ADDRESS) {
460 paniclog_append_noflush("Panicked thread: %p, backtrace: 0x%llx, tid: %llu\n",
461 cur_thread, (addr64_t)cur_fp, thread_tid(cur_thread));
462 #if __LP64__
463 print_one_backtrace(kernel_pmap, cur_fp, nohilite_thread_marker, TRUE);
464 #else
465 print_one_backtrace(kernel_pmap, cur_fp, nohilite_thread_marker, FALSE);
466 #endif
467 } else {
468 paniclog_append_noflush("Could not print panicked thread backtrace:"
469 "frame pointer outside kernel vm.\n");
470 }
471
472 paniclog_append_noflush("\n");
473 panic_info->eph_panic_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->eph_panic_log_offset;
474
475 if (debug_ack_timeout_count) {
476 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_DEBUGGERSYNC;
477 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
478 paniclog_append_noflush("!! debugger synchronization failed, no stackshot !!\n");
479 } else if (stackshot_active()) {
480 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_NESTED;
481 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
482 paniclog_append_noflush("!! panicked during stackshot, skipping panic stackshot !!\n");
483 } else {
484 /* Align the stackshot buffer to an 8-byte address (especially important for armv7k devices) */
485 debug_buf_ptr += (8 - ((uintptr_t)debug_buf_ptr % 8));
486 stackshot_begin_loc = debug_buf_ptr;
487
488 bytes_remaining = debug_buf_size - (unsigned int)((uintptr_t)stackshot_begin_loc - (uintptr_t)debug_buf_base);
489 err = kcdata_memory_static_init(&kc_panic_data, (mach_vm_address_t)debug_buf_ptr,
490 KCDATA_BUFFER_BEGIN_STACKSHOT, bytes_remaining - end_marker_bytes,
491 KCFLAG_USE_MEMCOPY);
492 if (err == KERN_SUCCESS) {
493 kdp_snapshot_preflight(-1, stackshot_begin_loc, bytes_remaining - end_marker_bytes,
494 (STACKSHOT_GET_GLOBAL_MEM_STATS | STACKSHOT_SAVE_LOADINFO | STACKSHOT_KCDATA_FORMAT |
495 STACKSHOT_ENABLE_BT_FAULTING | STACKSHOT_ENABLE_UUID_FAULTING | STACKSHOT_FROM_PANIC |
496 STACKSHOT_NO_IO_STATS | STACKSHOT_THREAD_WAITINFO), &kc_panic_data, 0);
497 err = do_stackshot(NULL);
498 bytes_traced = kdp_stack_snapshot_bytes_traced();
499 if (bytes_traced > 0 && !err) {
500 debug_buf_ptr += bytes_traced;
501 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_SUCCEEDED;
502 panic_info->eph_stackshot_offset = PE_get_offset_into_panic_region(stackshot_begin_loc);
503 panic_info->eph_stackshot_len = bytes_traced;
504
505 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
506 paniclog_append_noflush("\n** Stackshot Succeeded ** Bytes Traced %d **\n", bytes_traced);
507 } else {
508 bytes_used = kcdata_memory_get_used_bytes(&kc_panic_data);
509 if (bytes_used > 0) {
510 /* Zero out the stackshot data */
511 bzero(stackshot_begin_loc, bytes_used);
512 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_INCOMPLETE;
513
514 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
515 paniclog_append_noflush("\n** Stackshot Incomplete ** Bytes Filled %llu **\n", bytes_used);
516 } else {
517 bzero(stackshot_begin_loc, bytes_used);
518 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR;
519
520 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
521 paniclog_append_noflush("\n!! Stackshot Failed !! Bytes Traced %d, err %d\n", bytes_traced, err);
522 }
523 }
524 } else {
525 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR;
526 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
527 paniclog_append_noflush("\n!! Stackshot Failed !!\nkcdata_memory_static_init returned %d", err);
528 }
529 }
530
531 assert(panic_info->eph_other_log_offset != 0);
532
533 if (print_vnodes != 0)
534 panic_print_vnodes();
535
536 panic_bt_depth--;
537 }
538
539 /*
540 * Entry to print_all_backtraces is serialized by the debugger lock
541 */
542 static void
543 print_all_backtraces(const char *message)
544 {
545 unsigned int initial_not_in_kdp = not_in_kdp;
546
547 cpu_data_t * cpu_data_ptr = getCpuDatap();
548
549 assert(cpu_data_ptr->PAB_active == FALSE);
550 cpu_data_ptr->PAB_active = TRUE;
551
552 /*
553 * Because print all backtraces uses the pmap routines, it needs to
554 * avoid taking pmap locks. Right now, this is conditionalized on
555 * not_in_kdp.
556 */
557 not_in_kdp = 0;
558 do_print_all_backtraces(message);
559
560 not_in_kdp = initial_not_in_kdp;
561
562 cpu_data_ptr->PAB_active = FALSE;
563 }
564
565 void
566 panic_display_times()
567 {
568 if (kdp_clock_is_locked()) {
569 paniclog_append_noflush("Warning: clock is locked. Can't get time\n");
570 return;
571 }
572
573 if ((is_clock_configured) && (simple_lock_try(&clock_lock))) {
574 clock_sec_t secs, boot_secs;
575 clock_usec_t usecs, boot_usecs;
576
577 simple_unlock(&clock_lock);
578
579 clock_get_calendar_microtime(&secs, &usecs);
580 clock_get_boottime_microtime(&boot_secs, &boot_usecs);
581
582 paniclog_append_noflush("Epoch Time: sec usec\n");
583 paniclog_append_noflush(" Boot : 0x%08x 0x%08x\n", (unsigned int)boot_secs, (unsigned int)boot_usecs);
584 paniclog_append_noflush(" Sleep : 0x%08x 0x%08x\n", (unsigned int)gIOLastSleepTime.tv_sec, (unsigned int)gIOLastSleepTime.tv_usec);
585 paniclog_append_noflush(" Wake : 0x%08x 0x%08x\n", (unsigned int)gIOLastWakeTime.tv_sec, (unsigned int)gIOLastWakeTime.tv_usec);
586 paniclog_append_noflush(" Calendar: 0x%08x 0x%08x\n\n", (unsigned int)secs, (unsigned int)usecs);
587 }
588 }
589
590 void panic_print_symbol_name(vm_address_t search)
591 {
592 #pragma unused(search)
593 // empty stub. Really only used on x86_64.
594 return;
595 }
596
597 void
598 SavePanicInfo(
599 const char *message, __unused uint64_t panic_options)
600 {
601
602 /* This should be initialized by the time we get here */
603 assert(panic_info->eph_panic_log_offset != 0);
604
605 if (panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
606 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_BUTTON_RESET_PANIC;
607 }
608
609 if (panic_options & DEBUGGER_OPTION_COPROC_INITIATED_PANIC) {
610 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COPROC_INITIATED_PANIC;
611 }
612
613 #if defined(XNU_TARGET_OS_BRIDGE)
614 panic_info->eph_x86_power_state = PE_smc_stashed_x86_power_state;
615 panic_info->eph_x86_efi_boot_state = PE_smc_stashed_x86_efi_boot_state;
616 panic_info->eph_x86_system_state = PE_smc_stashed_x86_system_state;
617 #endif
618
619 /*
620 * On newer targets, panic data is stored directly into the iBoot panic region.
621 * If we re-enter SavePanicInfo (e.g. on a double panic) on such a target, update the
622 * panic CRC so that iBoot can hopefully find *something* useful in the panic region.
623 */
624 if (PanicInfoSaved && (debug_buf_base >= (char*)gPanicBase) && (debug_buf_base < (char*)gPanicBase + gPanicSize)) {
625 unsigned int pi_size = (unsigned int)(debug_buf_ptr - gPanicBase);
626 PE_save_buffer_to_vram((unsigned char*)gPanicBase, &pi_size);
627 PE_sync_panic_buffers(); // extra precaution; panic path likely isn't reliable if we're here
628 }
629
630 if (PanicInfoSaved || (debug_buf_size == 0))
631 return;
632
633 PanicInfoSaved = TRUE;
634
635 print_all_backtraces(message);
636
637 assert(panic_info->eph_panic_log_len != 0);
638 panic_info->eph_other_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->eph_other_log_offset;
639
640 PEHaltRestart(kPEPanicSync);
641
642 /*
643 * Notifies registered IOPlatformPanicAction callbacks
644 * (which includes one to disable the memcache) and flushes
645 * the buffer contents from the cache
646 */
647 paniclog_flush();
648 }
649
650 void
651 paniclog_flush()
652 {
653 unsigned int panicbuf_length = 0;
654
655 panicbuf_length = (unsigned int)(debug_buf_ptr - gPanicBase);
656 if (!panicbuf_length)
657 return;
658
659 /*
660 * Updates the log length of the last part of the panic log.
661 */
662 panic_info->eph_other_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->eph_other_log_offset;
663
664 /*
665 * Updates the metadata at the beginning of the panic buffer,
666 * updates the CRC.
667 */
668 PE_save_buffer_to_vram((unsigned char *)gPanicBase, &panicbuf_length);
669
670 /*
671 * This is currently unused by platform KEXTs on embedded but is
672 * kept for compatibility with the published IOKit interfaces.
673 */
674 PESavePanicInfo((unsigned char *)gPanicBase, panicbuf_length);
675
676 PE_sync_panic_buffers();
677 }
678
679 /*
680 * @function DebuggerXCallEnter
681 *
682 * @abstract IPI other cores so this core can run in a single-threaded context.
683 *
684 * @discussion This function should be called with the debugger lock held. It
685 * signals the other cores to go into a busy loop so this core can run in a
686 * single-threaded context and inspect kernel memory.
687 *
688 * @param proceed_on_sync_failure If true, then go ahead and try to debug even
689 * if we can't synch with the other cores. This is inherently unsafe and should
690 * only be used if the kernel is going down in flames anyway.
691 *
692 * @result returns KERN_OPERATION_TIMED_OUT if synchronization times out and
693 * proceed_on_sync_failure is false.
694 */
695 kern_return_t
696 DebuggerXCallEnter(
697 boolean_t proceed_on_sync_failure)
698 {
699 uint64_t max_mabs_time, current_mabs_time;
700 int cpu;
701 int max_cpu;
702 cpu_data_t *target_cpu_datap;
703 cpu_data_t *cpu_data_ptr = getCpuDatap();
704
705 /* Check for nested debugger entry. */
706 cpu_data_ptr->debugger_active++;
707 if (cpu_data_ptr->debugger_active != 1)
708 return KERN_SUCCESS;
709
710 /*
711 * If debugger_sync is not 0, someone responded excessively late to the last
712 * debug request (we zero the sync variable in the return function). Zero it
713 * again here. This should prevent us from getting out of sync (heh) and
714 * timing out on every entry to the debugger if we timeout once.
715 */
716
717 debugger_sync = 0;
718 mp_kdp_trap = 1;
719
720 /*
721 * We need a barrier here to ensure CPUs see mp_kdp_trap and spin when responding
722 * to the signal.
723 */
724 __builtin_arm_dmb(DMB_ISH);
725
726 /*
727 * Try to signal all CPUs (except ourselves, of course). Use debugger_sync to
728 * synchronize with every CPU that we appeared to signal successfully (cpu_signal
729 * is not synchronous).
730 */
731 bool cpu_signal_failed = false;
732 max_cpu = ml_get_max_cpu_number();
733
734 boolean_t immediate_halt = FALSE;
735 if (proceed_on_sync_failure && force_immediate_debug_halt)
736 immediate_halt = TRUE;
737
738 if (!immediate_halt) {
739 for (cpu=0; cpu <= max_cpu; cpu++) {
740 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
741
742 if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr))
743 continue;
744
745 if(KERN_SUCCESS == cpu_signal(target_cpu_datap, SIGPdebug, (void *)NULL, NULL)) {
746 (void)hw_atomic_add(&debugger_sync, 1);
747 } else {
748 cpu_signal_failed = true;
749 kprintf("cpu_signal failed in DebuggerXCallEnter\n");
750 }
751 }
752
753 nanoseconds_to_absolutetime(DEBUG_ACK_TIMEOUT, &max_mabs_time);
754 current_mabs_time = mach_absolute_time();
755 max_mabs_time += current_mabs_time;
756 assert(max_mabs_time > current_mabs_time);
757
758 /*
759 * Wait for DEBUG_ACK_TIMEOUT ns for a response from everyone we IPI'd. If we
760 * timeout, that is simply too bad; we don't have a true NMI, and one CPU may be
761 * uninterruptibly spinning on someone else. The best we can hope for is that
762 * all other CPUs have either responded or are spinning in a context that is
763 * debugger safe.
764 */
765 while ((debugger_sync != 0) && (current_mabs_time < max_mabs_time))
766 current_mabs_time = mach_absolute_time();
767
768 }
769
770 if (cpu_signal_failed && !proceed_on_sync_failure) {
771 DebuggerXCallReturn();
772 return KERN_FAILURE;
773 } else if (immediate_halt || (current_mabs_time >= max_mabs_time)) {
774 /*
775 * For the moment, we're aiming for a timeout that the user shouldn't notice,
776 * but will be sufficient to let the other core respond.
777 */
778 __builtin_arm_dmb(DMB_ISH);
779 for (cpu=0; cpu <= max_cpu; cpu++) {
780 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
781
782 if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr))
783 continue;
784 if (!(target_cpu_datap->cpu_signal & SIGPdebug) && !immediate_halt)
785 continue;
786 if (proceed_on_sync_failure) {
787 paniclog_append_noflush("Attempting to forcibly halt cpu %d\n", cpu);
788 dbgwrap_status_t halt_status = ml_dbgwrap_halt_cpu(cpu, 0);
789 if (halt_status < 0)
790 paniclog_append_noflush("Unable to halt cpu %d: %d\n", cpu, halt_status);
791 else {
792 if (halt_status > 0)
793 paniclog_append_noflush("cpu %d halted with warning %d\n", cpu, halt_status);
794 target_cpu_datap->halt_status = CPU_HALTED;
795 }
796 } else
797 kprintf("Debugger synch pending on cpu %d\n", cpu);
798 }
799 if (proceed_on_sync_failure) {
800 for (cpu = 0; cpu <= max_cpu; cpu++) {
801 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
802
803 if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr) ||
804 (target_cpu_datap->halt_status == CPU_NOT_HALTED))
805 continue;
806 dbgwrap_status_t halt_status = ml_dbgwrap_halt_cpu_with_state(cpu,
807 NSEC_PER_SEC, &target_cpu_datap->halt_state);
808 if ((halt_status < 0) || (halt_status == DBGWRAP_WARN_CPU_OFFLINE))
809 paniclog_append_noflush("Unable to obtain state for cpu %d: %d\n", cpu, halt_status);
810 else
811 target_cpu_datap->halt_status = CPU_HALTED_WITH_STATE;
812 }
813 if (immediate_halt)
814 paniclog_append_noflush("Immediate halt requested on all cores\n");
815 else
816 paniclog_append_noflush("Debugger synchronization timed out; waited %llu nanoseconds\n", DEBUG_ACK_TIMEOUT);
817 debug_ack_timeout_count++;
818 return KERN_SUCCESS;
819 } else {
820 DebuggerXCallReturn();
821 return KERN_OPERATION_TIMED_OUT;
822 }
823 } else {
824 return KERN_SUCCESS;
825 }
826 }
827
828 /*
829 * @function DebuggerXCallReturn
830 *
831 * @abstract Resume normal multicore operation after DebuggerXCallEnter()
832 *
833 * @discussion This function should be called with debugger lock held.
834 */
835 void
836 DebuggerXCallReturn(
837 void)
838 {
839 cpu_data_t *cpu_data_ptr = getCpuDatap();
840
841 cpu_data_ptr->debugger_active--;
842 if (cpu_data_ptr->debugger_active != 0)
843 return;
844
845 mp_kdp_trap = 0;
846 debugger_sync = 0;
847
848 /* Do we need a barrier here? */
849 __builtin_arm_dmb(DMB_ISH);
850 }
851
852 void
853 DebuggerXCall(
854 void *ctx)
855 {
856 boolean_t save_context = FALSE;
857 vm_offset_t kstackptr = 0;
858 arm_saved_state_t *regs = (arm_saved_state_t *) ctx;
859
860 if (regs != NULL) {
861 #if defined(__arm64__)
862 save_context = PSR64_IS_KERNEL(get_saved_state_cpsr(regs));
863 #else
864 save_context = PSR_IS_KERNEL(regs->cpsr);
865 #endif
866 }
867
868 kstackptr = current_thread()->machine.kstackptr;
869 arm_saved_state_t *state = (arm_saved_state_t *)kstackptr;
870
871 if (save_context) {
872 /* Save the interrupted context before acknowledging the signal */
873 *state = *regs;
874 } else if (regs) {
875 /* zero old state so machine_trace_thread knows not to backtrace it */
876 set_saved_state_fp(state, 0);
877 set_saved_state_pc(state, 0);
878 set_saved_state_lr(state, 0);
879 set_saved_state_sp(state, 0);
880 }
881
882 (void)hw_atomic_sub(&debugger_sync, 1);
883 __builtin_arm_dmb(DMB_ISH);
884 while (mp_kdp_trap);
885
886 /* Any cleanup for our pushed context should go here */
887 }
888
889
890 void
891 DebuggerCall(
892 unsigned int reason,
893 void *ctx)
894 {
895 #if !MACH_KDP
896 #pragma unused(reason,ctx)
897 #endif /* !MACH_KDP */
898
899 #if ALTERNATE_DEBUGGER
900 alternate_debugger_enter();
901 #endif
902
903 #if MACH_KDP
904 kdp_trap(reason, (struct arm_saved_state *)ctx);
905 #else
906 /* TODO: decide what to do if no debugger config */
907 #endif
908 }
909
910