]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm/model_dep.c
xnu-4570.20.62.tar.gz
[apple/xnu.git] / osfmk / arm / model_dep.c
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2007-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <debug.h>
30#include <mach_kdp.h>
31
32#include <kern/thread.h>
33#include <machine/pmap.h>
34#include <device/device_types.h>
35
36#include <mach/vm_param.h>
37#include <mach/clock_types.h>
38#include <mach/machine.h>
39#include <mach/kmod.h>
40#include <pexpert/boot.h>
41#include <pexpert/pexpert.h>
42
43#include <kern/misc_protos.h>
44#include <kern/startup.h>
45#include <kern/clock.h>
46#include <kern/debug.h>
47#include <kern/processor.h>
48#include <kdp/kdp_core.h>
49#if ALTERNATE_DEBUGGER
50#include <arm64/alternate_debugger.h>
51#endif
52#include <machine/atomic.h>
53#include <machine/trap.h>
54#include <kern/spl.h>
55#include <pexpert/pexpert.h>
56#include <kdp/kdp_callout.h>
57#include <kdp/kdp_dyld.h>
58#include <kdp/kdp_internal.h>
59#include <uuid/uuid.h>
60#include <sys/time.h>
61
62#include <IOKit/IOPlatformExpert.h>
63
64#include <mach/vm_prot.h>
65#include <vm/vm_map.h>
66#include <vm/pmap.h>
67#include <vm/vm_shared_region.h>
68#include <mach/time_value.h>
69#include <machine/machparam.h> /* for btop */
70
71#include <console/video_console.h>
72#include <arm/cpu_data.h>
73#include <arm/cpu_data_internal.h>
74#include <arm/cpu_internal.h>
75#include <arm/misc_protos.h>
76#include <libkern/OSKextLibPrivate.h>
77#include <vm/vm_kern.h>
78#include <kern/kern_cdata.h>
79
80#if MACH_KDP
81void kdp_trap(unsigned int, struct arm_saved_state *);
82#endif
83
84extern kern_return_t do_stackshot(void *);
85extern void kdp_snapshot_preflight(int pid, void *tracebuf,
86 uint32_t tracebuf_size, uint32_t flags,
87 kcdata_descriptor_t data_p,
88 boolean_t enable_faulting);
89extern int kdp_stack_snapshot_bytes_traced(void);
90
91/*
92 * Increment the PANICLOG_VERSION if you change the format of the panic
93 * log in any way.
94 */
95#define PANICLOG_VERSION 8
96static struct kcdata_descriptor kc_panic_data;
97
98extern char firmware_version[];
99extern volatile uint32_t debug_enabled;
100extern unsigned int not_in_kdp;
101
102extern int copyinframe(vm_address_t fp, uint32_t * frame);
103extern void kdp_callouts(kdp_event_t event);
104
105/* #include <sys/proc.h> */
106#define MAXCOMLEN 16
107extern int proc_pid(void *p);
108extern void proc_name_kdp(task_t, char *, int);
109
110extern const char version[];
111extern char osversion[];
112extern uint8_t gPlatformECID[8];
113extern uint32_t gPlatformMemoryID;
114
115extern uint64_t last_hwaccess_thread;
116
117/*Choosing the size for gTargetTypeBuffer as 8 and size for gModelTypeBuffer as 32
118 since the target name and model name typically doesn't exceed this size */
119extern char gTargetTypeBuffer[8];
120extern char gModelTypeBuffer[32];
121
122decl_simple_lock_data(extern,clock_lock)
123extern struct timeval gIOLastSleepTime;
124extern struct timeval gIOLastWakeTime;
125extern boolean_t is_clock_configured;
126extern uuid_t kernelcache_uuid;
127
128/* Definitions for frame pointers */
129#define FP_ALIGNMENT_MASK ((uint32_t)(0x3))
130#define FP_LR_OFFSET ((uint32_t)4)
131#define FP_LR_OFFSET64 ((uint32_t)8)
132#define FP_MAX_NUM_TO_EVALUATE (50)
133
134/* Timeout (in nanoseconds) for all processors responding to debug crosscall */
135#define DEBUG_ACK_TIMEOUT ((uint64_t) 10000000)
136
137/* Forward functions definitions */
138void panic_display_times(void) ;
139void panic_print_symbol_name(vm_address_t search);
140
141
142/* Global variables */
143static uint32_t panic_bt_depth;
144boolean_t PanicInfoSaved = FALSE;
145boolean_t force_immediate_debug_halt = FALSE;
146unsigned int debug_ack_timeout_count = 0;
147volatile unsigned int debugger_sync = 0;
148volatile unsigned int mp_kdp_trap = 0; /* CPUs signalled by the debug CPU will spin on this */
149unsigned int DebugContextCount = 0;
150
151// Convenient macros to easily validate one or more pointers if
152// they have defined types
153#define VALIDATE_PTR(ptr) \
154 validate_ptr((vm_offset_t)(ptr), sizeof(*(ptr)), #ptr)
155
156#define VALIDATE_PTR_2(ptr0, ptr1) \
157 VALIDATE_PTR(ptr0) && VALIDATE_PTR(ptr1)
158
159#define VALIDATE_PTR_3(ptr0, ptr1, ptr2) \
160 VALIDATE_PTR_2(ptr0, ptr1) && VALIDATE_PTR(ptr2)
161
162#define VALIDATE_PTR_4(ptr0, ptr1, ptr2, ptr3) \
163 VALIDATE_PTR_2(ptr0, ptr1) && VALIDATE_PTR_2(ptr2, ptr3)
164
165#define GET_MACRO(_1,_2,_3,_4,NAME,...) NAME
166
167#define VALIDATE_PTR_LIST(...) GET_MACRO(__VA_ARGS__, VALIDATE_PTR_4, VALIDATE_PTR_3, VALIDATE_PTR_2, VALIDATE_PTR)(__VA_ARGS__)
168
169/*
170 * Evaluate if a pointer is valid
171 * Print a message if pointer is invalid
172 */
173static boolean_t validate_ptr(
174 vm_offset_t ptr, vm_size_t size, const char * ptr_name)
175{
176 if (ptr) {
177 if (ml_validate_nofault(ptr, size)) {
178 return TRUE;
179 } else {
180 paniclog_append_noflush("Invalid %s pointer: %p size: %d\n",
181 ptr_name, (void *)ptr, (int)size);
182 return FALSE;
183 }
184 } else {
185 paniclog_append_noflush("NULL %s pointer\n", ptr_name);
186 return FALSE;
187 }
188}
189
190/*
191 * Backtrace a single frame.
192 */
193static void
194print_one_backtrace(pmap_t pmap, vm_offset_t topfp, const char *cur_marker,
195 boolean_t is_64_bit)
196{
197 int i = 0;
198 addr64_t lr;
199 addr64_t fp;
200 addr64_t fp_for_ppn;
201 ppnum_t ppn;
202 boolean_t dump_kernel_stack;
203
204 fp = topfp;
205 fp_for_ppn = 0;
206 ppn = (ppnum_t)NULL;
207
208 if (fp >= VM_MIN_KERNEL_ADDRESS)
209 dump_kernel_stack = TRUE;
210 else
211 dump_kernel_stack = FALSE;
212
213 do {
214 if ((fp == 0) || ((fp & FP_ALIGNMENT_MASK) != 0))
215 break;
216 if (dump_kernel_stack && ((fp < VM_MIN_KERNEL_ADDRESS) || (fp > VM_MAX_KERNEL_ADDRESS)))
217 break;
218 if ((!dump_kernel_stack) && (fp >=VM_MIN_KERNEL_ADDRESS))
219 break;
220
221 /*
222 * Check to see if current address will result in a different
223 * ppn than previously computed (to avoid recomputation) via
224 * (addr) ^ fp_for_ppn) >> PAGE_SHIFT)
225 */
226 if ((((fp + FP_LR_OFFSET) ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) {
227 ppn = pmap_find_phys(pmap, fp + FP_LR_OFFSET);
228 fp_for_ppn = fp + (is_64_bit ? FP_LR_OFFSET64 : FP_LR_OFFSET);
229 }
230 if (ppn != (ppnum_t)NULL) {
231 if (is_64_bit) {
232 lr = ml_phys_read_double_64(((((vm_offset_t)ppn) << PAGE_SHIFT)) | ((fp + FP_LR_OFFSET64) & PAGE_MASK));
233 } else {
234 lr = ml_phys_read_word(((((vm_offset_t)ppn) << PAGE_SHIFT)) | ((fp + FP_LR_OFFSET) & PAGE_MASK));
235 }
236 } else {
237 if (is_64_bit) {
238 paniclog_append_noflush("%s\t Could not read LR from frame at 0x%016llx\n", cur_marker, fp + FP_LR_OFFSET64);
239 } else {
240 paniclog_append_noflush("%s\t Could not read LR from frame at 0x%08x\n", cur_marker, (uint32_t)(fp + FP_LR_OFFSET));
241 }
242 break;
243 }
244 if (((fp ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) {
245 ppn = pmap_find_phys(pmap, fp);
246 fp_for_ppn = fp;
247 }
248 if (ppn != (ppnum_t)NULL) {
249 if (is_64_bit) {
250 fp = ml_phys_read_double_64(((((vm_offset_t)ppn) << PAGE_SHIFT)) | (fp & PAGE_MASK));
251 } else {
252 fp = ml_phys_read_word(((((vm_offset_t)ppn) << PAGE_SHIFT)) | (fp & PAGE_MASK));
253 }
254 } else {
255 if (is_64_bit) {
256 paniclog_append_noflush("%s\t Could not read FP from frame at 0x%016llx\n", cur_marker, fp);
257 } else {
258 paniclog_append_noflush("%s\t Could not read FP from frame at 0x%08x\n", cur_marker, (uint32_t)fp);
259 }
260 break;
261 }
262
263 if (lr) {
264 if (is_64_bit) {
265 paniclog_append_noflush("%s\t lr: 0x%016llx fp: 0x%016llx\n", cur_marker, lr, fp);
266 } else {
267 paniclog_append_noflush("%s\t lr: 0x%08x fp: 0x%08x\n", cur_marker, (uint32_t)lr, (uint32_t)fp);
268 }
269 }
270 } while ((++i < FP_MAX_NUM_TO_EVALUATE) && (fp != topfp));
271}
272
273#define SANE_TASK_LIMIT 256
274#define TOP_RUNNABLE_LIMIT 5
275#define PANICLOG_UUID_BUF_SIZE 256
276
277extern void panic_print_vnodes(void);
278
279static void
280do_print_all_backtraces(
281 const char *message)
282{
283 int logversion = PANICLOG_VERSION;
284 thread_t cur_thread = current_thread();
285 uintptr_t cur_fp;
286 task_t task;
287 int i;
288 size_t index;
289 int print_vnodes = 0;
290 const char *nohilite_thread_marker="\t";
291
292 /* end_marker_bytes set to 200 for printing END marker + stackshot summary info always */
293 int bytes_traced = 0, bytes_remaining = 0, end_marker_bytes = 200;
294 uint64_t bytes_used = 0ULL;
295 int err = 0;
296 char *stackshot_begin_loc = NULL;
297
298#if defined(__arm__)
299 __asm__ volatile("mov %0, r7":"=r"(cur_fp));
300#elif defined(__arm64__)
301 __asm__ volatile("add %0, xzr, fp":"=r"(cur_fp));
302#else
303#error Unknown architecture.
304#endif
305 if (panic_bt_depth != 0)
306 return;
307 panic_bt_depth++;
308
309 /* Truncate panic string to 1200 bytes -- WDT log can be ~1100 bytes */
310 paniclog_append_noflush("Debugger message: %.1200s\n", message);
311 if (debug_enabled) {
312 paniclog_append_noflush("Device: %s\n",
313 ('\0' != gTargetTypeBuffer[0]) ? gTargetTypeBuffer : "Not set yet");
314 paniclog_append_noflush("Hardware Model: %s\n",
315 ('\0' != gModelTypeBuffer[0]) ? gModelTypeBuffer:"Not set yet");
316 paniclog_append_noflush("ECID: %02X%02X%02X%02X%02X%02X%02X%02X\n", gPlatformECID[7],
317 gPlatformECID[6], gPlatformECID[5], gPlatformECID[4], gPlatformECID[3],
318 gPlatformECID[2], gPlatformECID[1], gPlatformECID[0]);
319 if (last_hwaccess_thread) {
320 paniclog_append_noflush("AppleHWAccess Thread: 0x%llx\n", last_hwaccess_thread);
321 }
322 }
323 paniclog_append_noflush("Memory ID: 0x%x\n", gPlatformMemoryID);
324 paniclog_append_noflush("OS version: %.256s\n",
325 ('\0' != osversion[0]) ? osversion : "Not set yet");
326 paniclog_append_noflush("Kernel version: %.512s\n", version);
327 paniclog_append_noflush("KernelCache UUID: ");
328 for (index = 0; index < sizeof(uuid_t); index++) {
329 paniclog_append_noflush("%02X", kernelcache_uuid[index]);
330 }
331 paniclog_append_noflush("\n");
332
333 paniclog_append_noflush("iBoot version: %.128s\n", firmware_version);
334 paniclog_append_noflush("secure boot?: %s\n", debug_enabled ? "NO": "YES");
335 paniclog_append_noflush("Paniclog version: %d\n", logversion);
336
337 panic_display_kernel_aslr();
338 panic_display_times();
339 panic_display_zprint();
340#if CONFIG_ZLEAKS
341 panic_display_ztrace();
342#endif /* CONFIG_ZLEAKS */
343#if CONFIG_ECC_LOGGING
344 panic_display_ecc_errors();
345#endif /* CONFIG_ECC_LOGGING */
346
347 // Just print threads with high CPU usage for WDT timeouts
348 if (strncmp(message, "WDT timeout", 11) == 0) {
349 thread_t top_runnable[5] = {0};
350 thread_t thread;
351 int total_cpu_usage = 0;
352
353 print_vnodes = 1;
354
355
356 for (thread = (thread_t)queue_first(&threads);
357 VALIDATE_PTR(thread) && !queue_end(&threads, (queue_entry_t)thread);
358 thread = (thread_t)queue_next(&thread->threads)) {
359
360 total_cpu_usage += thread->cpu_usage;
361
362 // Look for the 5 runnable threads with highest priority
363 if (thread->state & TH_RUN) {
364 int k;
365 thread_t comparison_thread = thread;
366
367 for (k = 0; k < TOP_RUNNABLE_LIMIT; k++) {
368 if (top_runnable[k] == 0) {
369 top_runnable[k] = comparison_thread;
370 break;
371 } else if (comparison_thread->sched_pri > top_runnable[k]->sched_pri) {
372 thread_t temp = top_runnable[k];
373 top_runnable[k] = comparison_thread;
374 comparison_thread = temp;
375 } // if comparison thread has higher priority than previously saved thread
376 } // loop through highest priority runnable threads
377 } // Check if thread is runnable
378 } // Loop through all threads
379
380 // Print the relevant info for each thread identified
381 paniclog_append_noflush("Total cpu_usage: %d\n", total_cpu_usage);
382 paniclog_append_noflush("Thread task pri cpu_usage\n");
383
384 for (i = 0; i < TOP_RUNNABLE_LIMIT; i++) {
385
386 if (top_runnable[i] && VALIDATE_PTR(top_runnable[i]->task) &&
387 validate_ptr((vm_offset_t)top_runnable[i]->task->bsd_info, 1, "bsd_info")) {
388
389 char name[MAXCOMLEN + 1];
390 proc_name_kdp(top_runnable[i]->task, name, sizeof(name));
391 paniclog_append_noflush("%p %s %d %d\n",
392 top_runnable[i], name, top_runnable[i]->sched_pri, top_runnable[i]->cpu_usage);
393 }
394 } // Loop through highest priority runnable threads
395 paniclog_append_noflush("\n");
396 } // Check if message is "WDT timeout"
397
398 // print current task info
399 if (VALIDATE_PTR_LIST(cur_thread, cur_thread->task)) {
400
401 task = cur_thread->task;
402
403 if (VALIDATE_PTR_LIST(task->map, task->map->pmap)) {
404 paniclog_append_noflush("Panicked task %p: %d pages, %d threads: ",
405 task, task->map->pmap->stats.resident_count, task->thread_count);
406 } else {
407 paniclog_append_noflush("Panicked task %p: %d threads: ",
408 task, task->thread_count);
409 }
410
411 if (validate_ptr((vm_offset_t)task->bsd_info, 1, "bsd_info")) {
412 char name[MAXCOMLEN + 1];
413 int pid = proc_pid(task->bsd_info);
414 proc_name_kdp(task, name, sizeof(name));
415 paniclog_append_noflush("pid %d: %s", pid, name);
416 } else {
417 paniclog_append_noflush("unknown task");
418 }
419
420 paniclog_append_noflush("\n");
421 }
422
423 if (cur_fp < VM_MAX_KERNEL_ADDRESS) {
424 paniclog_append_noflush("Panicked thread: %p, backtrace: 0x%llx, tid: %llu\n",
425 cur_thread, (addr64_t)cur_fp, thread_tid(cur_thread));
426#if __LP64__
427 print_one_backtrace(kernel_pmap, cur_fp, nohilite_thread_marker, TRUE);
428#else
429 print_one_backtrace(kernel_pmap, cur_fp, nohilite_thread_marker, FALSE);
430#endif
431 } else {
432 paniclog_append_noflush("Could not print panicked thread backtrace:"
433 "frame pointer outside kernel vm.\n");
434 }
435
436 paniclog_append_noflush("\n");
437 panic_info->eph_panic_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->eph_panic_log_offset;
438
439 if (debug_ack_timeout_count) {
440 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_DEBUGGERSYNC;
441 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
442 paniclog_append_noflush("!! debugger synchronization failed, no stackshot !!\n");
443 } else if (stackshot_active()) {
444 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_NESTED;
445 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
446 paniclog_append_noflush("!! panicked during stackshot, skipping panic stackshot !!\n");
447 } else {
448 /* Align the stackshot buffer to an 8-byte address (especially important for armv7k devices) */
449 debug_buf_ptr += (8 - ((uintptr_t)debug_buf_ptr % 8));
450 stackshot_begin_loc = debug_buf_ptr;
451
452 bytes_remaining = debug_buf_size - (unsigned int)((uintptr_t)stackshot_begin_loc - (uintptr_t)debug_buf_base);
453 err = kcdata_memory_static_init(&kc_panic_data, (mach_vm_address_t)debug_buf_ptr,
454 KCDATA_BUFFER_BEGIN_STACKSHOT, bytes_remaining - end_marker_bytes,
455 KCFLAG_USE_MEMCOPY);
456 if (err == KERN_SUCCESS) {
457 kdp_snapshot_preflight(-1, stackshot_begin_loc, bytes_remaining - end_marker_bytes,
458 (STACKSHOT_GET_GLOBAL_MEM_STATS | STACKSHOT_SAVE_LOADINFO | STACKSHOT_KCDATA_FORMAT |
459 STACKSHOT_ENABLE_BT_FAULTING | STACKSHOT_ENABLE_UUID_FAULTING | STACKSHOT_FROM_PANIC |
460 STACKSHOT_NO_IO_STATS | STACKSHOT_THREAD_WAITINFO), &kc_panic_data, 0);
461 err = do_stackshot(NULL);
462 bytes_traced = kdp_stack_snapshot_bytes_traced();
463 if (bytes_traced > 0 && !err) {
464 debug_buf_ptr += bytes_traced;
465 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_SUCCEEDED;
466 panic_info->eph_stackshot_offset = PE_get_offset_into_panic_region(stackshot_begin_loc);
467 panic_info->eph_stackshot_len = bytes_traced;
468
469 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
470 paniclog_append_noflush("\n** Stackshot Succeeded ** Bytes Traced %d **\n", bytes_traced);
471 } else {
472 bytes_used = kcdata_memory_get_used_bytes(&kc_panic_data);
473 if (bytes_used > 0) {
474 /* Zero out the stackshot data */
475 bzero(stackshot_begin_loc, bytes_used);
476 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_INCOMPLETE;
477
478 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
479 paniclog_append_noflush("\n** Stackshot Incomplete ** Bytes Filled %llu **\n", bytes_used);
480 } else {
481 bzero(stackshot_begin_loc, bytes_used);
482 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR;
483
484 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
485 paniclog_append_noflush("\n!! Stackshot Failed !! Bytes Traced %d, err %d\n", bytes_traced, err);
486 }
487 }
488 } else {
489 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR;
490 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
491 paniclog_append_noflush("\n!! Stackshot Failed !!\nkcdata_memory_static_init returned %d", err);
492 }
493 }
494
495 assert(panic_info->eph_other_log_offset != 0);
496
497 if (print_vnodes != 0)
498 panic_print_vnodes();
499
500 panic_bt_depth--;
501}
502
503/*
504 * Entry to print_all_backtraces is serialized by the debugger lock
505 */
506static void
507print_all_backtraces(const char *message)
508{
509 unsigned int initial_not_in_kdp = not_in_kdp;
510
511 cpu_data_t * cpu_data_ptr = getCpuDatap();
512
513 assert(cpu_data_ptr->PAB_active == FALSE);
514 cpu_data_ptr->PAB_active = TRUE;
515
516 /*
517 * Because print all backtraces uses the pmap routines, it needs to
518 * avoid taking pmap locks. Right now, this is conditionalized on
519 * not_in_kdp.
520 */
521 not_in_kdp = 0;
522 do_print_all_backtraces(message);
523
524 not_in_kdp = initial_not_in_kdp;
525
526 cpu_data_ptr->PAB_active = FALSE;
527}
528
529void
530panic_display_times()
531{
532 if (kdp_clock_is_locked()) {
533 paniclog_append_noflush("Warning: clock is locked. Can't get time\n");
534 return;
535 }
536
537 if ((is_clock_configured) && (simple_lock_try(&clock_lock))) {
538 clock_sec_t secs, boot_secs;
539 clock_usec_t usecs, boot_usecs;
540
541 simple_unlock(&clock_lock);
542
543 clock_get_calendar_microtime(&secs, &usecs);
544 clock_get_boottime_microtime(&boot_secs, &boot_usecs);
545
546 paniclog_append_noflush("Epoch Time: sec usec\n");
547 paniclog_append_noflush(" Boot : 0x%08x 0x%08x\n", (unsigned int)boot_secs, (unsigned int)boot_usecs);
548 paniclog_append_noflush(" Sleep : 0x%08x 0x%08x\n", (unsigned int)gIOLastSleepTime.tv_sec, (unsigned int)gIOLastSleepTime.tv_usec);
549 paniclog_append_noflush(" Wake : 0x%08x 0x%08x\n", (unsigned int)gIOLastWakeTime.tv_sec, (unsigned int)gIOLastWakeTime.tv_usec);
550 paniclog_append_noflush(" Calendar: 0x%08x 0x%08x\n\n", (unsigned int)secs, (unsigned int)usecs);
551 }
552}
553
554void panic_print_symbol_name(vm_address_t search)
555{
556#pragma unused(search)
557 // empty stub. Really only used on x86_64.
558 return;
559}
560
561void
562SavePanicInfo(
563 const char *message, __unused uint64_t panic_options)
564{
565
566 /* This should be initialized by the time we get here */
567 assert(panic_info->eph_panic_log_offset != 0);
568
569 if (panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
570 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_BUTTON_RESET_PANIC;
571 }
572
573 if (panic_options & DEBUGGER_OPTION_COPROC_INITIATED_PANIC) {
574 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COPROC_INITIATED_PANIC;
575 }
576
577 /*
578 * On newer targets, panic data is stored directly into the iBoot panic region.
579 * If we re-enter SavePanicInfo (e.g. on a double panic) on such a target, update the
580 * panic CRC so that iBoot can hopefully find *something* useful in the panic region.
581 */
582 if (PanicInfoSaved && (debug_buf_base >= (char*)gPanicBase) && (debug_buf_base < (char*)gPanicBase + gPanicSize)) {
583 unsigned int pi_size = (unsigned int)(debug_buf_ptr - gPanicBase);
584 PE_save_buffer_to_vram((unsigned char*)gPanicBase, &pi_size);
585 PE_sync_panic_buffers(); // extra precaution; panic path likely isn't reliable if we're here
586 }
587
588 if (PanicInfoSaved || (debug_buf_size == 0))
589 return;
590
591 PanicInfoSaved = TRUE;
592
593 print_all_backtraces(message);
594
595 assert(panic_info->eph_panic_log_len != 0);
596 panic_info->eph_other_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->eph_other_log_offset;
597
598 PEHaltRestart(kPEPanicSync);
599
600 /*
601 * Notifies registered IOPlatformPanicAction callbacks
602 * (which includes one to disable the memcache) and flushes
603 * the buffer contents from the cache
604 */
605 paniclog_flush();
606}
607
608void
609paniclog_flush()
610{
611 unsigned int panicbuf_length = 0;
612
613 panicbuf_length = (unsigned int)(debug_buf_ptr - gPanicBase);
614 if (!panicbuf_length)
615 return;
616
617 /*
618 * Updates the log length of the last part of the panic log.
619 */
620 panic_info->eph_other_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->eph_other_log_offset;
621
622 /*
623 * Updates the metadata at the beginning of the panic buffer,
624 * updates the CRC.
625 */
626 PE_save_buffer_to_vram((unsigned char *)gPanicBase, &panicbuf_length);
627
628 /*
629 * This is currently unused by platform KEXTs on embedded but is
630 * kept for compatibility with the published IOKit interfaces.
631 */
632 PESavePanicInfo((unsigned char *)gPanicBase, panicbuf_length);
633
634 PE_sync_panic_buffers();
635}
636
637/*
638 * @function DebuggerXCallEnter
639 *
640 * @abstract IPI other cores so this core can run in a single-threaded context.
641 *
642 * @discussion This function should be called with the debugger lock held. It
643 * signals the other cores to go into a busy loop so this core can run in a
644 * single-threaded context and inspect kernel memory.
645 *
646 * @param proceed_on_sync_failure If true, then go ahead and try to debug even
647 * if we can't synch with the other cores. This is inherently unsafe and should
648 * only be used if the kernel is going down in flames anyway.
649 *
650 * @result returns KERN_OPERATION_TIMED_OUT if synchronization times out and
651 * proceed_on_sync_failure is false.
652 */
653kern_return_t
654DebuggerXCallEnter(
655 boolean_t proceed_on_sync_failure)
656{
657 uint64_t max_mabs_time, current_mabs_time;
658 int cpu;
659 int max_cpu;
660 cpu_data_t *target_cpu_datap;
661 cpu_data_t *cpu_data_ptr = getCpuDatap();
662
663 /* Check for nested debugger entry. */
664 cpu_data_ptr->debugger_active++;
665 if (cpu_data_ptr->debugger_active != 1)
666 return KERN_SUCCESS;
667
668 /*
669 * If debugger_sync is not 0, someone responded excessively late to the last
670 * debug request (we zero the sync variable in the return function). Zero it
671 * again here. This should prevent us from getting out of sync (heh) and
672 * timing out on every entry to the debugger if we timeout once.
673 */
674
675 debugger_sync = 0;
676 mp_kdp_trap = 1;
677
678 /*
679 * We need a barrier here to ensure CPUs see mp_kdp_trap and spin when responding
680 * to the signal.
681 */
682 __builtin_arm_dmb(DMB_ISH);
683
684 /*
685 * Try to signal all CPUs (except ourselves, of course). Use debugger_sync to
686 * synchronize with every CPU that we appeared to signal successfully (cpu_signal
687 * is not synchronous).
688 */
689 bool cpu_signal_failed = false;
690 max_cpu = ml_get_max_cpu_number();
691
692 boolean_t immediate_halt = FALSE;
693 if (proceed_on_sync_failure && force_immediate_debug_halt)
694 immediate_halt = TRUE;
695
696 if (!immediate_halt) {
697 for (cpu=0; cpu <= max_cpu; cpu++) {
698 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
699
700 if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr))
701 continue;
702
703 if(KERN_SUCCESS == cpu_signal(target_cpu_datap, SIGPdebug, (void *)NULL, NULL)) {
704 (void)hw_atomic_add(&debugger_sync, 1);
705 } else {
706 cpu_signal_failed = true;
707 kprintf("cpu_signal failed in DebuggerXCallEnter\n");
708 }
709 }
710
711 nanoseconds_to_absolutetime(DEBUG_ACK_TIMEOUT, &max_mabs_time);
712 current_mabs_time = mach_absolute_time();
713 max_mabs_time += current_mabs_time;
714 assert(max_mabs_time > current_mabs_time);
715
716 /*
717 * Wait for DEBUG_ACK_TIMEOUT ns for a response from everyone we IPI'd. If we
718 * timeout, that is simply too bad; we don't have a true NMI, and one CPU may be
719 * uninterruptibly spinning on someone else. The best we can hope for is that
720 * all other CPUs have either responded or are spinning in a context that is
721 * debugger safe.
722 */
723 while ((debugger_sync != 0) && (current_mabs_time < max_mabs_time))
724 current_mabs_time = mach_absolute_time();
725
726 }
727
728 if (cpu_signal_failed && !proceed_on_sync_failure) {
729 DebuggerXCallReturn();
730 return KERN_FAILURE;
731 } else if (immediate_halt || (current_mabs_time >= max_mabs_time)) {
732 /*
733 * For the moment, we're aiming for a timeout that the user shouldn't notice,
734 * but will be sufficient to let the other core respond.
735 */
736 __builtin_arm_dmb(DMB_ISH);
737 for (cpu=0; cpu <= max_cpu; cpu++) {
738 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
739
740 if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr))
741 continue;
742 if (!(target_cpu_datap->cpu_signal & SIGPdebug) && !immediate_halt)
743 continue;
744 if (proceed_on_sync_failure) {
745 paniclog_append_noflush("Attempting to forcibly halt cpu %d\n", cpu);
746 dbgwrap_status_t halt_status = ml_dbgwrap_halt_cpu(cpu, 0);
747 if (halt_status < 0)
748 paniclog_append_noflush("Unable to halt cpu %d: %d\n", cpu, halt_status);
749 else {
750 if (halt_status > 0)
751 paniclog_append_noflush("cpu %d halted with warning %d\n", cpu, halt_status);
752 target_cpu_datap->halt_status = CPU_HALTED;
753 }
754 } else
755 kprintf("Debugger synch pending on cpu %d\n", cpu);
756 }
757 if (proceed_on_sync_failure) {
758 for (cpu = 0; cpu <= max_cpu; cpu++) {
759 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
760
761 if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr) ||
762 (target_cpu_datap->halt_status == CPU_NOT_HALTED))
763 continue;
764 dbgwrap_status_t halt_status = ml_dbgwrap_halt_cpu_with_state(cpu,
765 NSEC_PER_SEC, &target_cpu_datap->halt_state);
766 if ((halt_status < 0) || (halt_status == DBGWRAP_WARN_CPU_OFFLINE))
767 paniclog_append_noflush("Unable to obtain state for cpu %d: %d\n", cpu, halt_status);
768 else
769 target_cpu_datap->halt_status = CPU_HALTED_WITH_STATE;
770 }
771 if (immediate_halt)
772 paniclog_append_noflush("Immediate halt requested on all cores\n");
773 else
774 paniclog_append_noflush("Debugger synchronization timed out; waited %llu nanoseconds\n", DEBUG_ACK_TIMEOUT);
775 debug_ack_timeout_count++;
776 return KERN_SUCCESS;
777 } else {
778 DebuggerXCallReturn();
779 return KERN_OPERATION_TIMED_OUT;
780 }
781 } else {
782 return KERN_SUCCESS;
783 }
784}
785
786/*
787 * @function DebuggerXCallReturn
788 *
789 * @abstract Resume normal multicore operation after DebuggerXCallEnter()
790 *
791 * @discussion This function should be called with debugger lock held.
792 */
793void
794DebuggerXCallReturn(
795 void)
796{
797 cpu_data_t *cpu_data_ptr = getCpuDatap();
798
799 cpu_data_ptr->debugger_active--;
800 if (cpu_data_ptr->debugger_active != 0)
801 return;
802
803 mp_kdp_trap = 0;
804 debugger_sync = 0;
805
806 /* Do we need a barrier here? */
807 __builtin_arm_dmb(DMB_ISH);
808}
809
810void
811DebuggerXCall(
812 void *ctx)
813{
814 boolean_t save_context = FALSE;
815 vm_offset_t kstackptr = 0;
816 arm_saved_state_t *regs = (arm_saved_state_t *) ctx;
817
818 if (regs != NULL) {
819#if defined(__arm64__)
820 save_context = PSR64_IS_KERNEL(get_saved_state_cpsr(regs));
821#else
822 save_context = PSR_IS_KERNEL(regs->cpsr);
823#endif
824 }
825
826 kstackptr = current_thread()->machine.kstackptr;
827 arm_saved_state_t *state = (arm_saved_state_t *)kstackptr;
828
829 if (save_context) {
830 /* Save the interrupted context before acknowledging the signal */
831 *state = *regs;
832 } else if (regs) {
833 /* zero old state so machine_trace_thread knows not to backtrace it */
834 set_saved_state_fp(state, 0);
835 set_saved_state_pc(state, 0);
836 set_saved_state_lr(state, 0);
837 set_saved_state_sp(state, 0);
838 }
839
840 (void)hw_atomic_sub(&debugger_sync, 1);
841 __builtin_arm_dmb(DMB_ISH);
842 while (mp_kdp_trap);
843
844 /* Any cleanup for our pushed context should go here */
845}
846
847
848void
849DebuggerCall(
850 unsigned int reason,
851 void *ctx)
852{
853#if !MACH_KDP
854#pragma unused(reason,ctx)
855#endif /* !MACH_KDP */
856
857#if ALTERNATE_DEBUGGER
858 alternate_debugger_enter();
859#endif
860
861#if MACH_KDP
862 kdp_trap(reason, (struct arm_saved_state *)ctx);
863#else
864 /* TODO: decide what to do if no debugger config */
865#endif
866}
867
868