+
+ va_end(listp);
+
+ os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, fmt, listp2, caller);
+ va_end(listp2);
+#endif
+}
+
+/*
+ * Skip appending log messages to the new logging infrastructure in contexts
+ * where safety is uncertain. These contexts include:
+ * - When we're in the debugger
+ * - We're in a panic
+ * - Interrupts are disabled
+ * - Or Pre-emption is disabled
+ * In all the above cases, it is potentially unsafe to log messages.
+ */
+
+boolean_t oslog_is_safe(void) {
+ return (debug_mode == 0 &&
+ not_in_kdp == 1 &&
+ get_preemption_level() == 0 &&
+ ml_get_interrupts_enabled() == TRUE);
+}
+
+void
+debug_putc(char c)
+{
+ if ((debug_buf_size != 0) &&
+ ((debug_buf_ptr-debug_buf_addr) < (int)debug_buf_size)) {
+ *debug_buf_ptr=c;
+ debug_buf_ptr++;
+ }
+}
+
+/* In-place packing routines -- inefficient, but they're called at most once.
+ * Assumes "buflen" is a multiple of 8.
+ */
+
+int packA(char *inbuf, uint32_t length, uint32_t buflen)
+{
+ unsigned int i, j = 0;
+ pasc_t pack;
+
+ length = MIN(((length + 7) & ~7), buflen);
+
+ for (i = 0; i < length; i+=8)
+ {
+ pack.a = inbuf[i];
+ pack.b = inbuf[i+1];
+ pack.c = inbuf[i+2];
+ pack.d = inbuf[i+3];
+ pack.e = inbuf[i+4];
+ pack.f = inbuf[i+5];
+ pack.g = inbuf[i+6];
+ pack.h = inbuf[i+7];
+ bcopy ((char *) &pack, inbuf + j, 7);
+ j += 7;
+ }
+ return j;
+}
+
+void unpackA(char *inbuf, uint32_t length)
+{
+ pasc_t packs;
+ unsigned i = 0;
+ length = (length * 8)/7;
+
+ while (i < length) {
+ packs = *(pasc_t *)&inbuf[i];
+ bcopy(&inbuf[i+7], &inbuf[i+8], MAX(0, (int) (length - i - 8)));
+ inbuf[i++] = packs.a;
+ inbuf[i++] = packs.b;
+ inbuf[i++] = packs.c;
+ inbuf[i++] = packs.d;
+ inbuf[i++] = packs.e;
+ inbuf[i++] = packs.f;
+ inbuf[i++] = packs.g;
+ inbuf[i++] = packs.h;
+ }
+}
+
+extern void *proc_name_address(void *p);
+
+static void
+panic_display_process_name(void) {
+ /* because of scoping issues len(p_comm) from proc_t is hard coded here */
+ char proc_name[17] = "Unknown";
+ task_t ctask = 0;
+ void *cbsd_info = 0;
+
+ if (ml_nofault_copy((vm_offset_t)¤t_thread()->task, (vm_offset_t) &ctask, sizeof(task_t)) == sizeof(task_t))
+ if(ml_nofault_copy((vm_offset_t)&ctask->bsd_info, (vm_offset_t)&cbsd_info, sizeof(cbsd_info)) == sizeof(cbsd_info))
+ if (cbsd_info && (ml_nofault_copy((vm_offset_t) proc_name_address(cbsd_info), (vm_offset_t) &proc_name, sizeof(proc_name)) > 0))
+ proc_name[sizeof(proc_name) - 1] = '\0';
+ kdb_printf("\nBSD process name corresponding to current thread: %s\n", proc_name);
+}
+
+unsigned panic_active(void) {
+ return ((panicstr != (char *) 0));
+}
+
+void populate_model_name(char *model_string) {
+ strlcpy(model_name, model_string, sizeof(model_name));
+}
+
+void panic_display_model_name(void) {
+ char tmp_model_name[sizeof(model_name)];
+
+ if (ml_nofault_copy((vm_offset_t) &model_name, (vm_offset_t) &tmp_model_name, sizeof(model_name)) != sizeof(model_name))
+ return;
+
+ tmp_model_name[sizeof(tmp_model_name) - 1] = '\0';
+
+ if (tmp_model_name[0] != 0)
+ kdb_printf("System model name: %s\n", tmp_model_name);
+}
+
+void panic_display_kernel_uuid(void) {
+ char tmp_kernel_uuid[sizeof(kernel_uuid_string)];
+
+ if (ml_nofault_copy((vm_offset_t) &kernel_uuid_string, (vm_offset_t) &tmp_kernel_uuid, sizeof(kernel_uuid_string)) != sizeof(kernel_uuid_string))
+ return;
+
+ if (tmp_kernel_uuid[0] != '\0')
+ kdb_printf("Kernel UUID: %s\n", tmp_kernel_uuid);
+}
+
+void panic_display_kernel_aslr(void) {
+ if (vm_kernel_slide) {
+ kdb_printf("Kernel slide: 0x%016lx\n", (unsigned long) vm_kernel_slide);
+ kdb_printf("Kernel text base: %p\n", (void *) vm_kernel_stext);
+ }
+}
+
+void panic_display_hibb(void) {
+#if defined(__i386__) || defined (__x86_64__)
+ kdb_printf("__HIB text base: %p\n", (void *) vm_hib_base);
+#endif
+}
+
+static void panic_display_uptime(void) {
+ uint64_t uptime;
+ absolutetime_to_nanoseconds(mach_absolute_time(), &uptime);
+
+ kdb_printf("\nSystem uptime in nanoseconds: %llu\n", uptime);
+}
+
+extern const char version[];
+extern char osversion[];
+
+static volatile uint32_t config_displayed = 0;
+
+__private_extern__ void panic_display_system_configuration(void) {
+
+ panic_display_process_name();
+ if (OSCompareAndSwap(0, 1, &config_displayed)) {
+ char buf[256];
+ if (strlcpy(buf, PE_boot_args(), sizeof(buf)))
+ kdb_printf("Boot args: %s\n", buf);
+ kdb_printf("\nMac OS version:\n%s\n",
+ (osversion[0] != 0) ? osversion : "Not yet set");
+ kdb_printf("\nKernel version:\n%s\n",version);
+ panic_display_kernel_uuid();
+ panic_display_kernel_aslr();
+ panic_display_hibb();
+ panic_display_pal_info();
+ panic_display_model_name();
+ panic_display_uptime();
+ panic_display_zprint();
+#if CONFIG_ZLEAKS
+ panic_display_ztrace();
+#endif /* CONFIG_ZLEAKS */
+ kext_dump_panic_lists(&kdb_log);
+ }
+}
+
+extern unsigned int stack_total;
+extern unsigned long long stack_allocs;
+
+#if defined(__i386__) || defined (__x86_64__)
+extern unsigned int inuse_ptepages_count;
+extern long long alloc_ptepages_count;
+#endif
+
+extern boolean_t panic_include_zprint;
+extern vm_offset_t panic_kext_memory_info;
+extern vm_size_t panic_kext_memory_size;
+
+__private_extern__ void panic_display_zprint()
+{
+ if(panic_include_zprint == TRUE) {
+
+ unsigned int i;
+ struct zone zone_copy;
+
+ kdb_printf("%-20s %10s %10s\n", "Zone Name", "Cur Size", "Free Size");
+ for (i = 0; i < num_zones; i++) {
+ if(ml_nofault_copy((vm_offset_t)(&zone_array[i]), (vm_offset_t)&zone_copy, sizeof(struct zone)) == sizeof(struct zone)) {
+ if(zone_copy.cur_size > (1024*1024)) {
+ kdb_printf("%-20s %10lu %10lu\n",zone_copy.zone_name, (uintptr_t)zone_copy.cur_size,(uintptr_t)(zone_copy.countfree * zone_copy.elem_size));
+ }
+ }
+ }
+
+ kdb_printf("%-20s %10lu\n", "Kernel Stacks", (uintptr_t)(kernel_stack_size * stack_total));
+
+#if defined(__i386__) || defined (__x86_64__)
+ kdb_printf("%-20s %10lu\n", "PageTables",(uintptr_t)(PAGE_SIZE * inuse_ptepages_count));