/*
- * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
*
* @Apple_LICENSE_HEADER_START@
- *
+ *
* The contents of this file constitute Original Code as defined in and
* are subject to the Apple Public Source License Version 1.1 (the
* "License"). You may not use this file except in compliance with the
* License. Please obtain a copy of the License at
* http://www.apple.com/publicsource and read it before using this file.
- *
+ *
* This Original Code and all software distributed under the License are
* distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
* License for the specific language governing rights and limitations
* under the License.
- *
+ *
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
-
-#include <machine/spl.h>
-
#include <sys/errno.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/vm.h>
#include <sys/sysctl.h>
#include <sys/kdebug.h>
+#include <sys/kauth.h>
+#include <sys/ktrace.h>
#include <sys/sysproto.h>
#include <sys/bsdtask_info.h>
#include <sys/random.h>
-#include <sys/stackshot.h>
-#define HZ 100
#include <mach/clock_types.h>
#include <mach/mach_types.h>
#include <mach/mach_time.h>
+#include <mach/mach_vm.h>
+#include <machine/atomic.h>
#include <machine/machine_routines.h>
+#include <mach/machine.h>
+#include <mach/vm_map.h>
+
#if defined(__i386__) || defined(__x86_64__)
#include <i386/rtclock_protos.h>
#include <i386/mp.h>
#include <i386/machine_routines.h>
+#include <i386/tsc.h>
#endif
#include <kern/clock.h>
#include <kern/task.h>
#include <kern/debug.h>
#include <kern/kalloc.h>
+#include <kern/cpu_number.h>
#include <kern/cpu_data.h>
#include <kern/assert.h>
#include <kern/telemetry.h>
#include <kern/sched_prim.h>
#include <vm/vm_kern.h>
#include <sys/lock.h>
+#include <kperf/kperf.h>
+#include <pexpert/device_tree.h>
#include <sys/malloc.h>
#include <sys/mcache.h>
-#include <sys/kauth.h>
#include <sys/vnode.h>
#include <sys/vnode_internal.h>
#include <sys/fcntl.h>
#include <sys/file_internal.h>
#include <sys/ubc.h>
-#include <sys/param.h> /* for isset() */
+#include <sys/param.h> /* for isset() */
-#include <mach/mach_host.h> /* for host_info() */
+#include <mach/mach_host.h> /* for host_info() */
#include <libkern/OSAtomic.h>
#include <machine/pal_routines.h>
+#include <machine/atomic.h>
-extern boolean_t kdebug_serial;
-#if KDEBUG_MOJO_TRACE
-#include <sys/kdebugevents.h>
-static void kdebug_serial_print( /* forward */
- uint32_t, uint32_t, uint64_t,
- uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
-#endif
+extern unsigned int wake_nkdbufs;
+extern unsigned int trace_wrap;
/*
* IOP(s)
*
- * https://coreoswiki.apple.com/wiki/pages/U6z3i0q9/Consistent_Logging_Implementers_Guide.html
- *
* IOP(s) are auxiliary cores that want to participate in kdebug event logging.
* They are registered dynamically. Each is assigned a cpu_id at registration.
*
*/
typedef struct kd_iop {
- kd_callback_t callback;
- uint32_t cpu_id;
- uint64_t last_timestamp; /* Prevent timer rollback */
- struct kd_iop* next;
+ kd_callback_t callback;
+ uint32_t cpu_id;
+ uint64_t last_timestamp; /* Prevent timer rollback */
+ struct kd_iop* next;
} kd_iop_t;
static kd_iop_t* kd_iops = NULL;
-/* XXX should have prototypes, but Mach does not provide one */
-void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *);
-int cpu_number(void); /* XXX <machine/...> include path broken */
-void commpage_update_kdebug_enable(void); /* XXX sign */
+/*
+ * Typefilter(s)
+ *
+ * A typefilter is a 8KB bitmap that is used to selectively filter events
+ * being recorded. It is able to individually address every class & subclass.
+ *
+ * There is a shared typefilter in the kernel which is lazily allocated. Once
+ * allocated, the shared typefilter is never deallocated. The shared typefilter
+ * is also mapped on demand into userspace processes that invoke kdebug_trace
+ * API from Libsyscall. When mapped into a userspace process, the memory is
+ * read only, and does not have a fixed address.
+ *
+ * It is a requirement that the kernel's shared typefilter always pass DBG_TRACE
+ * events. This is enforced automatically, by having the needed bits set any
+ * time the shared typefilter is mutated.
+ */
+
+typedef uint8_t* typefilter_t;
+
+static typefilter_t kdbg_typefilter;
+static mach_port_t kdbg_typefilter_memory_entry;
+
+/*
+ * There are 3 combinations of page sizes:
+ *
+ * 4KB / 4KB
+ * 4KB / 16KB
+ * 16KB / 16KB
+ *
+ * The typefilter is exactly 8KB. In the first two scenarios, we would like
+ * to use 2 pages exactly; in the third scenario we must make certain that
+ * a full page is allocated so we do not inadvertantly share 8KB of random
+ * data to userspace. The round_page_32 macro rounds to kernel page size.
+ */
+#define TYPEFILTER_ALLOC_SIZE MAX(round_page_32(KDBG_TYPEFILTER_BITMAP_SIZE), KDBG_TYPEFILTER_BITMAP_SIZE)
+
+static typefilter_t
+typefilter_create(void)
+{
+ typefilter_t tf;
+ if (KERN_SUCCESS == kmem_alloc(kernel_map, (vm_offset_t*)&tf, TYPEFILTER_ALLOC_SIZE, VM_KERN_MEMORY_DIAG)) {
+ memset(&tf[KDBG_TYPEFILTER_BITMAP_SIZE], 0, TYPEFILTER_ALLOC_SIZE - KDBG_TYPEFILTER_BITMAP_SIZE);
+ return tf;
+ }
+ return NULL;
+}
+
+static void
+typefilter_deallocate(typefilter_t tf)
+{
+ assert(tf != NULL);
+ assert(tf != kdbg_typefilter);
+ kmem_free(kernel_map, (vm_offset_t)tf, TYPEFILTER_ALLOC_SIZE);
+}
+
+static void
+typefilter_copy(typefilter_t dst, typefilter_t src)
+{
+ assert(src != NULL);
+ assert(dst != NULL);
+ memcpy(dst, src, KDBG_TYPEFILTER_BITMAP_SIZE);
+}
+
+static void
+typefilter_reject_all(typefilter_t tf)
+{
+ assert(tf != NULL);
+ memset(tf, 0, KDBG_TYPEFILTER_BITMAP_SIZE);
+}
+
+static void
+typefilter_allow_all(typefilter_t tf)
+{
+ assert(tf != NULL);
+ memset(tf, ~0, KDBG_TYPEFILTER_BITMAP_SIZE);
+}
+
+static void
+typefilter_allow_class(typefilter_t tf, uint8_t class)
+{
+ assert(tf != NULL);
+ const uint32_t BYTES_PER_CLASS = 256 / 8; // 256 subclasses, 1 bit each
+ memset(&tf[class * BYTES_PER_CLASS], 0xFF, BYTES_PER_CLASS);
+}
+
+static void
+typefilter_allow_csc(typefilter_t tf, uint16_t csc)
+{
+ assert(tf != NULL);
+ setbit(tf, csc);
+}
+
+static bool
+typefilter_is_debugid_allowed(typefilter_t tf, uint32_t id)
+{
+ assert(tf != NULL);
+ return isset(tf, KDBG_EXTRACT_CSC(id));
+}
+
+static mach_port_t
+typefilter_create_memory_entry(typefilter_t tf)
+{
+ assert(tf != NULL);
+
+ mach_port_t memory_entry = MACH_PORT_NULL;
+ memory_object_size_t size = TYPEFILTER_ALLOC_SIZE;
+
+ mach_make_memory_entry_64(kernel_map,
+ &size,
+ (memory_object_offset_t)tf,
+ VM_PROT_READ,
+ &memory_entry,
+ MACH_PORT_NULL);
+
+ return memory_entry;
+}
+
+static int kdbg_copyin_typefilter(user_addr_t addr, size_t size);
+static void kdbg_enable_typefilter(void);
+static void kdbg_disable_typefilter(void);
+
+/*
+ * External prototypes
+ */
+
+void task_act_iterate_wth_args(task_t, void (*)(thread_t, void *), void *);
+void commpage_update_kdebug_state(void); /* XXX sign */
+
+extern int log_leaks;
+
+/*
+ * This flag is for testing purposes only -- it's highly experimental and tools
+ * have not been updated to support it.
+ */
+static bool kdbg_continuous_time = false;
+
+static inline uint64_t
+kdbg_timestamp(void)
+{
+ if (kdbg_continuous_time) {
+ return mach_continuous_time();
+ } else {
+ return mach_absolute_time();
+ }
+}
+
+static int kdbg_debug = 0;
-/* XXX should probably be static, but it's debugging code... */
-int kdbg_read(user_addr_t, size_t *, vnode_t, vfs_context_t, uint32_t);
-void kdbg_control_chud(int, void *);
int kdbg_control(int *, u_int, user_addr_t, size_t *);
-int kdbg_readcpumap(user_addr_t, size_t *);
-int kdbg_readcurcpumap(user_addr_t, size_t *);
-int kdbg_readthrmap(user_addr_t, size_t *, vnode_t, vfs_context_t);
-int kdbg_readthrmap_v3(user_addr_t, size_t *, int);
-int kdbg_readcurthrmap(user_addr_t, size_t *);
-int kdbg_getreg(kd_regtype *);
-int kdbg_setreg(kd_regtype *);
-int kdbg_setrtcdec(kd_regtype *);
-int kdbg_setpidex(kd_regtype *);
-int kdbg_setpid(kd_regtype *);
-void kdbg_thrmap_init(void);
-int kdbg_reinit(boolean_t);
-int kdbg_bootstrap(boolean_t);
+
+static int kdbg_read(user_addr_t, size_t *, vnode_t, vfs_context_t, uint32_t);
+static int kdbg_readcpumap(user_addr_t, size_t *);
+static int kdbg_readthrmap_v3(user_addr_t, size_t, int);
+static int kdbg_readcurthrmap(user_addr_t, size_t *);
+static int kdbg_setreg(kd_regtype *);
+static int kdbg_setpidex(kd_regtype *);
+static int kdbg_setpid(kd_regtype *);
+static void kdbg_thrmap_init(void);
+static int kdbg_reinit(bool);
+static int kdbg_bootstrap(bool);
+static int kdbg_test(size_t flavor);
+
+static int kdbg_write_v1_header(bool write_thread_map, vnode_t vp, vfs_context_t ctx);
+static int kdbg_write_thread_map(vnode_t vp, vfs_context_t ctx);
+static int kdbg_copyout_thread_map(user_addr_t buffer, size_t *buffer_size);
+static void kdbg_clear_thread_map(void);
+
+static bool kdbg_wait(uint64_t timeout_ms, bool locked_wait);
+static void kdbg_wakeup(void);
int kdbg_cpumap_init_internal(kd_iop_t* iops, uint32_t cpu_count,
- uint8_t** cpumap, uint32_t* cpumap_size);
+ uint8_t** cpumap, uint32_t* cpumap_size);
-kd_threadmap* kdbg_thrmap_init_internal(unsigned int count,
- unsigned int *mapsize,
- unsigned int *mapcount);
+static kd_threadmap *kdbg_thrmap_init_internal(size_t max_count,
+ vm_size_t *map_size, vm_size_t *map_count);
-static boolean_t kdebug_current_proc_enabled(uint32_t debugid);
-static boolean_t kdebug_debugid_enabled(uint32_t debugid);
+static bool kdebug_current_proc_enabled(uint32_t debugid);
static errno_t kdebug_check_trace_string(uint32_t debugid, uint64_t str_id);
int kdbg_write_v3_header(user_addr_t, size_t *, int);
int kdbg_write_v3_chunk_header(user_addr_t buffer, uint32_t tag,
- uint32_t sub_tag, uint64_t length,
- vnode_t vp, vfs_context_t ctx);
+ uint32_t sub_tag, uint64_t length,
+ vnode_t vp, vfs_context_t ctx);
user_addr_t kdbg_write_v3_event_chunk_header(user_addr_t buffer, uint32_t tag,
- uint64_t length, vnode_t vp,
- vfs_context_t ctx);
+ uint64_t length, vnode_t vp,
+ vfs_context_t ctx);
-static int kdbg_enable_typefilter(void);
-static int kdbg_disable_typefilter(void);
-static int kdbg_allocate_typefilter(void);
-static int kdbg_deallocate_typefilter(void);
+// Helper functions
-static int create_buffers(boolean_t);
+static int create_buffers(bool);
static void delete_buffers(void);
+extern int tasks_count;
+extern int threads_count;
extern void IOSleep(int);
/* trace enable status */
unsigned int kdebug_enable = 0;
/* A static buffer to record events prior to the start of regular logging */
-#define KD_EARLY_BUFFER_MAX 64
-static kd_buf kd_early_buffer[KD_EARLY_BUFFER_MAX];
-static int kd_early_index = 0;
-static boolean_t kd_early_overflow = FALSE;
-#define SLOW_NOLOG 0x01
-#define SLOW_CHECKS 0x02
-#define SLOW_CHUD 0x08
+#define KD_EARLY_BUFFER_SIZE (16 * 1024)
+#define KD_EARLY_BUFFER_NBUFS (KD_EARLY_BUFFER_SIZE / sizeof(kd_buf))
+#if defined(__x86_64__)
+__attribute__((aligned(KD_EARLY_BUFFER_SIZE)))
+static kd_buf kd_early_buffer[KD_EARLY_BUFFER_NBUFS];
+#else /* defined(__x86_64__) */
+/*
+ * On ARM, the space for this is carved out by osfmk/arm/data.s -- clang
+ * has problems aligning to greater than 4K.
+ */
+extern kd_buf kd_early_buffer[KD_EARLY_BUFFER_NBUFS];
+#endif /* !defined(__x86_64__) */
+
+static unsigned int kd_early_index = 0;
+static bool kd_early_overflow = false;
+static bool kd_early_done = false;
-#define EVENTS_PER_STORAGE_UNIT 2048
-#define MIN_STORAGE_UNITS_PER_CPU 4
+#define SLOW_NOLOG 0x01
+#define SLOW_CHECKS 0x02
+
+#define EVENTS_PER_STORAGE_UNIT 2048
+#define MIN_STORAGE_UNITS_PER_CPU 4
#define POINTER_FROM_KDS_PTR(x) (&kd_bufs[x.buffer_index].kdsb_addr[x.offset])
};
struct kd_storage {
- union kds_ptr kds_next;
+ union kds_ptr kds_next;
uint32_t kds_bufindx;
uint32_t kds_bufcnt;
uint32_t kds_readlast;
- boolean_t kds_lostevents;
+ bool kds_lostevents;
uint64_t kds_timestamp;
- kd_buf kds_records[EVENTS_PER_STORAGE_UNIT];
+ kd_buf kds_records[EVENTS_PER_STORAGE_UNIT];
};
-#define MAX_BUFFER_SIZE (1024 * 1024 * 128)
-#define N_STORAGE_UNITS_PER_BUFFER (MAX_BUFFER_SIZE / sizeof(struct kd_storage))
+#define MAX_BUFFER_SIZE (1024 * 1024 * 128)
+#define N_STORAGE_UNITS_PER_BUFFER (MAX_BUFFER_SIZE / sizeof(struct kd_storage))
+static_assert(N_STORAGE_UNITS_PER_BUFFER <= 0x7ff,
+ "shoudn't overflow kds_ptr.offset");
struct kd_storage_buffers {
- struct kd_storage *kdsb_addr;
- uint32_t kdsb_size;
+ struct kd_storage *kdsb_addr;
+ uint32_t kdsb_size;
};
#define KDS_PTR_NULL 0xffffffff
struct kd_storage_buffers *kd_bufs = NULL;
-int n_storage_units = 0;
-int n_storage_buffers = 0;
-int n_storage_threshold = 0;
-int kds_waiter = 0;
+int n_storage_units = 0;
+unsigned int n_storage_buffers = 0;
+int n_storage_threshold = 0;
+int kds_waiter = 0;
#pragma pack(0)
struct kd_bufinfo {
union kds_ptr kd_list_head;
union kds_ptr kd_list_tail;
- boolean_t kd_lostevents;
+ bool kd_lostevents;
uint32_t _pad;
uint64_t kd_prev_timebase;
uint32_t num_bufs;
-} __attribute__(( aligned(MAX_CPU_CACHE_LINE_SIZE) ));
+} __attribute__((aligned(MAX_CPU_CACHE_LINE_SIZE)));
/*
*/
struct kd_ctrl_page_t {
union kds_ptr kds_free_list;
- uint32_t enabled :1;
- uint32_t _pad0 :31;
- int kds_inuse_count;
+ uint32_t enabled :1;
+ uint32_t _pad0 :31;
+ int kds_inuse_count;
uint32_t kdebug_flags;
uint32_t kdebug_slowcheck;
+ uint64_t oldest_time;
/*
* The number of kd_bufinfo structs allocated may not match the current
* number of active cpus. We capture the iops list head at initialization
*/
kd_iop_t* kdebug_iops;
uint32_t kdebug_cpus;
-} kd_ctrl_page = { .kds_free_list = {.raw = KDS_PTR_NULL}, .kdebug_slowcheck = SLOW_NOLOG };
+} kd_ctrl_page = {
+ .kds_free_list = {.raw = KDS_PTR_NULL},
+ .kdebug_slowcheck = SLOW_NOLOG,
+ .oldest_time = 0
+};
#pragma pack()
struct kd_bufinfo *kdbip = NULL;
-#define KDCOPYBUF_COUNT 8192
-#define KDCOPYBUF_SIZE (KDCOPYBUF_COUNT * sizeof(kd_buf))
+#define KDCOPYBUF_COUNT 8192
+#define KDCOPYBUF_SIZE (KDCOPYBUF_COUNT * sizeof(kd_buf))
-#define PAGE_4KB 4096
-#define PAGE_16KB 16384
+#define PAGE_4KB 4096
+#define PAGE_16KB 16384
kd_buf *kdcopybuf = NULL;
-boolean_t kdlog_bg_trace = FALSE;
-boolean_t kdlog_bg_trace_running = FALSE;
-unsigned int bg_nkdbufs = 0;
-
unsigned int nkdbufs = 0;
-unsigned int kdlog_beg=0;
-unsigned int kdlog_end=0;
-unsigned int kdlog_value1=0;
-unsigned int kdlog_value2=0;
-unsigned int kdlog_value3=0;
-unsigned int kdlog_value4=0;
+unsigned int kdlog_beg = 0;
+unsigned int kdlog_end = 0;
+unsigned int kdlog_value1 = 0;
+unsigned int kdlog_value2 = 0;
+unsigned int kdlog_value3 = 0;
+unsigned int kdlog_value4 = 0;
static lck_spin_t * kdw_spin_lock;
static lck_spin_t * kds_spin_lock;
-static lck_mtx_t * kd_trace_mtx_sysctl;
-static lck_grp_t * kd_trace_mtx_sysctl_grp;
-static lck_attr_t * kd_trace_mtx_sysctl_attr;
-static lck_grp_attr_t *kd_trace_mtx_sysctl_grp_attr;
-
-extern kern_return_t stack_snapshot2(int pid, user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t flags, int32_t *retval);
-
-#if CONFIG_TELEMETRY
-extern kern_return_t stack_microstackshot(user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t flags, int32_t *retval);
-#endif /* CONFIG_TELEMETRY */
-
-extern kern_return_t kern_stack_snapshot_with_reason(char* reason);
-
-extern kern_return_t kern_stack_snapshot_internal(int stackshot_config_version, void *stackshot_config, size_t stackshot_config_size, boolean_t stackshot_from_user);
-
-extern kern_return_t stack_snapshot_from_kernel_internal(int pid, void *buf, uint32_t size, uint32_t flags, unsigned *bytes_traced);
-
-int stack_snapshot_from_kernel(pid_t pid, void *buf, uint32_t size, uint32_t flags, unsigned *bytes_traced);
kd_threadmap *kd_mapptr = 0;
-unsigned int kd_mapsize = 0;
-unsigned int kd_mapcount = 0;
-
-off_t RAW_file_offset = 0;
-int RAW_file_written = 0;
+vm_size_t kd_mapsize = 0;
+vm_size_t kd_mapcount = 0;
-#define RAW_FLUSH_SIZE (2 * 1024 * 1024)
+off_t RAW_file_offset = 0;
+int RAW_file_written = 0;
-pid_t global_state_pid = -1; /* Used to control exclusive use of kd_buffer */
+#define RAW_FLUSH_SIZE (2 * 1024 * 1024)
/*
* A globally increasing counter for identifying strings in trace. Starts at
*/
static uint64_t g_str_id_signature = (0x70acULL << STR_ID_SIG_OFFSET);
-#define INTERRUPT 0x01050000
-#define MACH_vmfault 0x01300008
-#define BSC_SysCall 0x040c0000
-#define MACH_SysCall 0x010c0000
+#define INTERRUPT 0x01050000
+#define MACH_vmfault 0x01300008
+#define BSC_SysCall 0x040c0000
+#define MACH_SysCall 0x010c0000
-/* task to string structure */
-struct tts
-{
- task_t task; /* from procs task */
- pid_t pid; /* from procs p_pid */
- char task_comm[20]; /* from procs p_comm */
+struct kd_task_name {
+ task_t ktn_task;
+ pid_t ktn_pid;
+ char ktn_name[20];
};
-typedef struct tts tts_t;
-
-struct krt
-{
- kd_threadmap *map; /* pointer to the map buffer */
- int count;
- int maxcount;
- struct tts *atts;
+struct kd_resolver {
+ kd_threadmap *krs_map;
+ vm_size_t krs_count;
+ vm_size_t krs_maxcount;
+ struct kd_task_name *krs_task;
};
-typedef struct krt krt_t;
-
-/* This is for the CHUD toolkit call */
-typedef void (*kd_chudhook_fn) (uint32_t debugid, uintptr_t arg1,
- uintptr_t arg2, uintptr_t arg3,
- uintptr_t arg4, uintptr_t arg5);
-
-volatile kd_chudhook_fn kdebug_chudhook = 0; /* pointer to CHUD toolkit function */
-
-static uint8_t *type_filter_bitmap;
-
/*
- * This allows kperf to swap out the global state pid when kperf ownership is
- * passed from one process to another. It checks the old global state pid so
- * that kperf can't accidentally steal control of trace when a non-kperf trace user has
- * control of trace.
+ * TRACE file formats...
+ *
+ * RAW_VERSION0
+ *
+ * uint32_t #threadmaps
+ * kd_threadmap[]
+ * kd_buf[]
+ *
+ * RAW_VERSION1
+ *
+ * RAW_header, with version_no set to RAW_VERSION1
+ * kd_threadmap[]
+ * Empty space to pad alignment to the nearest page boundary.
+ * kd_buf[]
+ *
+ * RAW_VERSION1+
+ *
+ * RAW_header, with version_no set to RAW_VERSION1
+ * kd_threadmap[]
+ * kd_cpumap_header, with version_no set to RAW_VERSION1
+ * kd_cpumap[]
+ * Empty space to pad alignment to the nearest page boundary.
+ * kd_buf[]
+ *
+ * V1+ implementation details...
+ *
+ * It would have been nice to add the cpumap data "correctly", but there were
+ * several obstacles. Existing code attempts to parse both V1 and V0 files.
+ * Due to the fact that V0 has no versioning or header, the test looks like
+ * this:
+ *
+ * // Read header
+ * if (header.version_no != RAW_VERSION1) { // Assume V0 }
+ *
+ * If we add a VERSION2 file format, all existing code is going to treat that
+ * as a VERSION0 file when reading it, and crash terribly when trying to read
+ * RAW_VERSION2 threadmap entries.
+ *
+ * To differentiate between a V1 and V1+ file, read as V1 until you reach
+ * the padding bytes. Then:
+ *
+ * boolean_t is_v1plus = FALSE;
+ * if (padding_bytes >= sizeof(kd_cpumap_header)) {
+ * kd_cpumap_header header = // read header;
+ * if (header.version_no == RAW_VERSION1) {
+ * is_v1plus = TRUE;
+ * }
+ * }
+ *
*/
-void
-kdbg_swap_global_state_pid(pid_t old_pid, pid_t new_pid);
-
-void
-kdbg_swap_global_state_pid(pid_t old_pid, pid_t new_pid)
-{
- if (!(kd_ctrl_page.kdebug_flags & KDBG_LOCKINIT))
- return;
- lck_mtx_lock(kd_trace_mtx_sysctl);
+#define RAW_VERSION3 0x00001000
- if (old_pid == global_state_pid)
- global_state_pid = new_pid;
+// Version 3 header
+// The header chunk has the tag 0x00001000 which also serves as a magic word
+// that identifies the file as a version 3 trace file. The header payload is
+// a set of fixed fields followed by a variable number of sub-chunks:
+/*
+ * ____________________________________________________________________________
+ | Offset | Size | Field |
+ | ----------------------------------------------------------------------------
+ | 0 | 4 | Tag (0x00001000) |
+ | 4 | 4 | Sub-tag. Represents the version of the header. |
+ | 8 | 8 | Length of header payload (40+8x) |
+ | 16 | 8 | Time base info. Two 32-bit numbers, numer/denom, |
+ | | | for converting timestamps to nanoseconds. |
+ | 24 | 8 | Timestamp of trace start. |
+ | 32 | 8 | Wall time seconds since Unix epoch. |
+ | | | As returned by gettimeofday(). |
+ | 40 | 4 | Wall time microseconds. As returned by gettimeofday(). |
+ | 44 | 4 | Local time zone offset in minutes. ( " ) |
+ | 48 | 4 | Type of daylight savings time correction to apply. ( " ) |
+ | 52 | 4 | Flags. 1 = 64-bit. Remaining bits should be written |
+ | | | as 0 and ignored when reading. |
+ | 56 | 8x | Variable number of sub-chunks. None are required. |
+ | | | Ignore unknown chunks. |
+ | ----------------------------------------------------------------------------
+ */
+// NOTE: The header sub-chunks are considered part of the header chunk,
+// so they must be included in the header chunk’s length field.
+// The CPU map is an optional sub-chunk of the header chunk. It provides
+// information about the CPUs that are referenced from the trace events.
+typedef struct {
+ uint32_t tag;
+ uint32_t sub_tag;
+ uint64_t length;
+ uint32_t timebase_numer;
+ uint32_t timebase_denom;
+ uint64_t timestamp;
+ uint64_t walltime_secs;
+ uint32_t walltime_usecs;
+ uint32_t timezone_minuteswest;
+ uint32_t timezone_dst;
+ uint32_t flags;
+} __attribute__((packed)) kd_header_v3;
+
+typedef struct {
+ uint32_t tag;
+ uint32_t sub_tag;
+ uint64_t length;
+} __attribute__((packed)) kd_chunk_header_v3;
+
+#define V3_CONFIG 0x00001b00
+#define V3_CPU_MAP 0x00001c00
+#define V3_THREAD_MAP 0x00001d00
+#define V3_RAW_EVENTS 0x00001e00
+#define V3_NULL_CHUNK 0x00002000
+
+// The current version of all kernel managed chunks is 1. The
+// V3_CURRENT_CHUNK_VERSION is added to ease the simple case
+// when most/all the kernel managed chunks have the same version.
+
+#define V3_CURRENT_CHUNK_VERSION 1
+#define V3_HEADER_VERSION V3_CURRENT_CHUNK_VERSION
+#define V3_CPUMAP_VERSION V3_CURRENT_CHUNK_VERSION
+#define V3_THRMAP_VERSION V3_CURRENT_CHUNK_VERSION
+#define V3_EVENT_DATA_VERSION V3_CURRENT_CHUNK_VERSION
- lck_mtx_unlock(kd_trace_mtx_sysctl);
-}
+typedef struct krt krt_t;
static uint32_t
-kdbg_cpu_count(boolean_t early_trace)
+kdbg_cpu_count(bool early_trace)
{
if (early_trace) {
- /*
- * we've started tracing before the IOKit has even
- * started running... just use the static max value
- */
+#if defined(__x86_64__)
return max_ncpus;
+#else /* defined(__x86_64__) */
+ return ml_get_cpu_count();
+#endif /* !defined(__x86_64__) */
}
+#if defined(__x86_64__)
host_basic_info_data_t hinfo;
mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
host_info((host_t)1 /* BSD_HOST */, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
assert(hinfo.logical_cpu_max > 0);
return hinfo.logical_cpu_max;
+#else /* defined(__x86_64__) */
+ return ml_get_topology_info()->max_cpu_id + 1;
+#endif /* !defined(__x86_64__) */
}
#if MACH_ASSERT
+
+static bool
+kdbg_iop_list_is_valid(kd_iop_t* iop)
+{
+ if (iop) {
+ /* Is list sorted by cpu_id? */
+ kd_iop_t* temp = iop;
+ do {
+ assert(!temp->next || temp->next->cpu_id == temp->cpu_id - 1);
+ assert(temp->next || (temp->cpu_id == kdbg_cpu_count(false) || temp->cpu_id == kdbg_cpu_count(true)));
+ } while ((temp = temp->next));
+
+ /* Does each entry have a function and a name? */
+ temp = iop;
+ do {
+ assert(temp->callback.func);
+ assert(strlen(temp->callback.iop_name) < sizeof(temp->callback.iop_name));
+ } while ((temp = temp->next));
+ }
+
+ return true;
+}
+
#endif /* MACH_ASSERT */
static void
}
}
+static lck_grp_t *kdebug_lck_grp = NULL;
+
static void
-kdbg_set_tracing_enabled(boolean_t enabled, uint32_t trace_type)
+kdbg_set_tracing_enabled(bool enabled, uint32_t trace_type)
{
- int s = ml_set_interrupts_enabled(FALSE);
- lck_spin_lock(kds_spin_lock);
+ /*
+ * Drain any events from IOPs before making the state change. On
+ * enabling, this removes any stale events from before tracing. On
+ * disabling, this saves any events up to the point tracing is disabled.
+ */
+ kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_SYNC_FLUSH,
+ NULL);
+
+ int s = ml_set_interrupts_enabled(false);
+ lck_spin_lock_grp(kds_spin_lock, kdebug_lck_grp);
+
if (enabled) {
+ /*
+ * The oldest valid time is now; reject past events from IOPs.
+ */
+ kd_ctrl_page.oldest_time = kdbg_timestamp();
kdebug_enable |= trace_type;
kd_ctrl_page.kdebug_slowcheck &= ~SLOW_NOLOG;
kd_ctrl_page.enabled = 1;
- commpage_update_kdebug_enable();
+ commpage_update_kdebug_state();
} else {
- kdebug_enable &= ~(KDEBUG_ENABLE_TRACE|KDEBUG_ENABLE_PPT);
+ kdebug_enable &= ~(KDEBUG_ENABLE_TRACE | KDEBUG_ENABLE_PPT);
kd_ctrl_page.kdebug_slowcheck |= SLOW_NOLOG;
kd_ctrl_page.enabled = 0;
- commpage_update_kdebug_enable();
+ commpage_update_kdebug_state();
}
lck_spin_unlock(kds_spin_lock);
ml_set_interrupts_enabled(s);
if (enabled) {
- kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_KDEBUG_ENABLED, NULL);
+ kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops,
+ KD_CALLBACK_KDEBUG_ENABLED, NULL);
} else {
- /*
- * If you do not flush the IOP trace buffers, they can linger
- * for a considerable period; consider code which disables and
- * deallocates without a final sync flush.
- */
- kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_KDEBUG_DISABLED, NULL);
- kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_SYNC_FLUSH, NULL);
+ kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops,
+ KD_CALLBACK_KDEBUG_DISABLED, NULL);
}
}
static void
-kdbg_set_flags(int slowflag, int enableflag, boolean_t enabled)
+kdbg_set_flags(int slowflag, int enableflag, bool enabled)
{
- int s = ml_set_interrupts_enabled(FALSE);
- lck_spin_lock(kds_spin_lock);
+ int s = ml_set_interrupts_enabled(false);
+ lck_spin_lock_grp(kds_spin_lock, kdebug_lck_grp);
if (enabled) {
kd_ctrl_page.kdebug_slowcheck |= slowflag;
kd_ctrl_page.kdebug_slowcheck &= ~slowflag;
kdebug_enable &= ~enableflag;
}
-
+
lck_spin_unlock(kds_spin_lock);
ml_set_interrupts_enabled(s);
}
-void
+/*
+ * Disable wrapping and return true if trace wrapped, false otherwise.
+ */
+static bool
disable_wrap(uint32_t *old_slowcheck, uint32_t *old_flags)
{
- int s = ml_set_interrupts_enabled(FALSE);
- lck_spin_lock(kds_spin_lock);
+ bool wrapped;
+ int s = ml_set_interrupts_enabled(false);
+ lck_spin_lock_grp(kds_spin_lock, kdebug_lck_grp);
*old_slowcheck = kd_ctrl_page.kdebug_slowcheck;
*old_flags = kd_ctrl_page.kdebug_flags;
+ wrapped = kd_ctrl_page.kdebug_flags & KDBG_WRAPPED;
kd_ctrl_page.kdebug_flags &= ~KDBG_WRAPPED;
kd_ctrl_page.kdebug_flags |= KDBG_NOWRAP;
lck_spin_unlock(kds_spin_lock);
ml_set_interrupts_enabled(s);
+
+ return wrapped;
}
-void
-enable_wrap(uint32_t old_slowcheck, boolean_t lostevents)
+static void
+enable_wrap(uint32_t old_slowcheck)
{
- int s = ml_set_interrupts_enabled(FALSE);
- lck_spin_lock(kds_spin_lock);
+ int s = ml_set_interrupts_enabled(false);
+ lck_spin_lock_grp(kds_spin_lock, kdebug_lck_grp);
kd_ctrl_page.kdebug_flags &= ~KDBG_NOWRAP;
- if ( !(old_slowcheck & SLOW_NOLOG))
+ if (!(old_slowcheck & SLOW_NOLOG)) {
kd_ctrl_page.kdebug_slowcheck &= ~SLOW_NOLOG;
-
- if (lostevents == TRUE)
- kd_ctrl_page.kdebug_flags |= KDBG_WRAPPED;
+ }
lck_spin_unlock(kds_spin_lock);
ml_set_interrupts_enabled(s);
}
static int
-create_buffers(boolean_t early_trace)
+create_buffers(bool early_trace)
{
- int i;
- int p_buffer_size;
- int f_buffer_size;
- int f_buffers;
- int error = 0;
+ unsigned int i;
+ unsigned int p_buffer_size;
+ unsigned int f_buffer_size;
+ unsigned int f_buffers;
+ int error = 0;
/*
* For the duration of this allocation, trace code will only reference
*/
kd_ctrl_page.kdebug_iops = kd_iops;
+ assert(kdbg_iop_list_is_valid(kd_ctrl_page.kdebug_iops));
/*
* If the list is valid, it is sorted, newest -> oldest. Each iop entry
goto out;
}
- if (nkdbufs < (kd_ctrl_page.kdebug_cpus * EVENTS_PER_STORAGE_UNIT * MIN_STORAGE_UNITS_PER_CPU))
+ if (nkdbufs < (kd_ctrl_page.kdebug_cpus * EVENTS_PER_STORAGE_UNIT * MIN_STORAGE_UNITS_PER_CPU)) {
n_storage_units = kd_ctrl_page.kdebug_cpus * MIN_STORAGE_UNITS_PER_CPU;
- else
+ } else {
n_storage_units = nkdbufs / EVENTS_PER_STORAGE_UNIT;
+ }
nkdbufs = n_storage_units * EVENTS_PER_STORAGE_UNIT;
f_buffer_size = N_STORAGE_UNITS_PER_BUFFER * sizeof(struct kd_storage);
p_buffer_size = (n_storage_units % N_STORAGE_UNITS_PER_BUFFER) * sizeof(struct kd_storage);
- if (p_buffer_size)
+ if (p_buffer_size) {
n_storage_buffers++;
+ }
kd_bufs = NULL;
if (kdcopybuf == 0) {
- if (kmem_alloc(kernel_map, (vm_offset_t *)&kdcopybuf, (vm_size_t)KDCOPYBUF_SIZE, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) {
+ if (kmem_alloc(kernel_map, (vm_offset_t *)&kdcopybuf, (vm_size_t)KDCOPYBUF_SIZE, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) {
error = ENOSPC;
goto out;
}
for (i = 0; i < n_storage_buffers; i++) {
struct kd_storage *kds;
- int n_elements;
- int n;
+ uint16_t n_elements;
+ static_assert(N_STORAGE_UNITS_PER_BUFFER <= UINT16_MAX);
+ assert(kd_bufs[i].kdsb_size <= N_STORAGE_UNITS_PER_BUFFER *
+ sizeof(struct kd_storage));
n_elements = kd_bufs[i].kdsb_size / sizeof(struct kd_storage);
kds = kd_bufs[i].kdsb_addr;
- for (n = 0; n < n_elements; n++) {
+ for (uint16_t n = 0; n < n_elements; n++) {
kds[n].kds_next.buffer_index = kd_ctrl_page.kds_free_list.buffer_index;
kds[n].kds_next.offset = kd_ctrl_page.kds_free_list.offset;
bzero((char *)kdbip, sizeof(struct kd_bufinfo) * kd_ctrl_page.kdebug_cpus);
- for (i = 0; i < (int)kd_ctrl_page.kdebug_cpus; i++) {
+ for (i = 0; i < kd_ctrl_page.kdebug_cpus; i++) {
kdbip[i].kd_list_head.raw = KDS_PTR_NULL;
kdbip[i].kd_list_tail.raw = KDS_PTR_NULL;
- kdbip[i].kd_lostevents = FALSE;
+ kdbip[i].kd_lostevents = false;
kdbip[i].num_bufs = 0;
}
-
+
kd_ctrl_page.kdebug_flags |= KDBG_BUFINIT;
kd_ctrl_page.kds_inuse_count = 0;
n_storage_threshold = n_storage_units / 2;
out:
- if (error)
+ if (error) {
delete_buffers();
+ }
- return(error);
+ return error;
}
static void
delete_buffers(void)
{
- int i;
-
+ unsigned int i;
+
if (kd_bufs) {
for (i = 0; i < n_storage_buffers; i++) {
if (kd_bufs[i].kdsb_addr) {
if (kdbip) {
kmem_free(kernel_map, (vm_offset_t)kdbip, sizeof(struct kd_bufinfo) * kd_ctrl_page.kdebug_cpus);
-
+
kdbip = NULL;
}
- kd_ctrl_page.kdebug_iops = NULL;
+ kd_ctrl_page.kdebug_iops = NULL;
kd_ctrl_page.kdebug_cpus = 0;
kd_ctrl_page.kdebug_flags &= ~KDBG_BUFINIT;
}
release_storage_unit(int cpu, uint32_t kdsp_raw)
{
int s = 0;
- struct kd_storage *kdsp_actual;
+ struct kd_storage *kdsp_actual;
struct kd_bufinfo *kdbp;
union kds_ptr kdsp;
kdsp.raw = kdsp_raw;
- s = ml_set_interrupts_enabled(FALSE);
- lck_spin_lock(kds_spin_lock);
+ s = ml_set_interrupts_enabled(false);
+ lck_spin_lock_grp(kds_spin_lock, kdebug_lck_grp);
kdbp = &kdbip[cpu];
* it's possible for the storage unit pointed to
* by kdsp to have already been stolen... so
* check to see if it's still the head of the list
- * now that we're behind the lock that protects
+ * now that we're behind the lock that protects
* adding and removing from the queue...
* since we only ever release and steal units from
* that position, if it's no longer the head
ml_set_interrupts_enabled(s);
}
-
-boolean_t
+bool
allocate_storage_unit(int cpu)
{
- union kds_ptr kdsp;
- struct kd_storage *kdsp_actual, *kdsp_next_actual;
- struct kd_bufinfo *kdbp, *kdbp_vict, *kdbp_try;
- uint64_t oldest_ts, ts;
- boolean_t retval = TRUE;
- int s = 0;
-
- s = ml_set_interrupts_enabled(FALSE);
- lck_spin_lock(kds_spin_lock);
+ union kds_ptr kdsp;
+ struct kd_storage *kdsp_actual, *kdsp_next_actual;
+ struct kd_bufinfo *kdbp, *kdbp_vict, *kdbp_try;
+ uint64_t oldest_ts, ts;
+ bool retval = true;
+ int s = 0;
+
+ s = ml_set_interrupts_enabled(false);
+ lck_spin_lock_grp(kds_spin_lock, kdebug_lck_grp);
kdbp = &kdbip[cpu];
if (kdbp->kd_list_tail.raw != KDS_PTR_NULL) {
kdsp_actual = POINTER_FROM_KDS_PTR(kdbp->kd_list_tail);
- if (kdsp_actual->kds_bufindx < EVENTS_PER_STORAGE_UNIT)
+ if (kdsp_actual->kds_bufindx < EVENTS_PER_STORAGE_UNIT) {
goto out;
+ }
}
-
+
if ((kdsp = kd_ctrl_page.kds_free_list).raw != KDS_PTR_NULL) {
+ /*
+ * If there's a free page, grab it from the free list.
+ */
kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
kd_ctrl_page.kds_free_list = kdsp_actual->kds_next;
kd_ctrl_page.kds_inuse_count++;
} else {
+ /*
+ * Otherwise, we're going to lose events and repurpose the oldest
+ * storage unit we can find.
+ */
if (kd_ctrl_page.kdebug_flags & KDBG_NOWRAP) {
kd_ctrl_page.kdebug_slowcheck |= SLOW_NOLOG;
- kdbp->kd_lostevents = TRUE;
- retval = FALSE;
+ kdbp->kd_lostevents = true;
+ retval = false;
goto out;
}
kdbp_vict = NULL;
- oldest_ts = (uint64_t)-1;
+ oldest_ts = UINT64_MAX;
for (kdbp_try = &kdbip[0]; kdbp_try < &kdbip[kd_ctrl_page.kdebug_cpus]; kdbp_try++) {
-
if (kdbp_try->kd_list_head.raw == KDS_PTR_NULL) {
/*
* no storage unit to steal
*/
continue;
}
- ts = kdbg_get_timestamp(&kdsp_actual->kds_records[0]);
+ /*
+ * When wrapping, steal the storage unit with the
+ * earliest timestamp on its last event, instead of the
+ * earliest timestamp on the first event. This allows a
+ * storage unit with more recent events to be preserved,
+ * even if the storage unit contains events that are
+ * older than those found in other CPUs.
+ */
+ ts = kdbg_get_timestamp(&kdsp_actual->kds_records[EVENTS_PER_STORAGE_UNIT - 1]);
if (ts < oldest_ts) {
- /*
- * when 'wrapping', we want to steal the
- * storage unit that has the 'earliest' time
- * associated with it (first event time)
- */
oldest_ts = ts;
kdbp_vict = kdbp_try;
}
if (kdbp_vict == NULL) {
kdebug_enable = 0;
kd_ctrl_page.enabled = 0;
- commpage_update_kdebug_enable();
- retval = FALSE;
+ commpage_update_kdebug_state();
+ retval = false;
goto out;
}
kdsp = kdbp_vict->kd_list_head;
if (kdbp_vict->kd_list_head.raw != KDS_PTR_NULL) {
kdsp_next_actual = POINTER_FROM_KDS_PTR(kdbp_vict->kd_list_head);
- kdsp_next_actual->kds_lostevents = TRUE;
- } else
- kdbp_vict->kd_lostevents = TRUE;
+ kdsp_next_actual->kds_lostevents = true;
+ } else {
+ kdbp_vict->kd_lostevents = true;
+ }
+ if (kd_ctrl_page.oldest_time < oldest_ts) {
+ kd_ctrl_page.oldest_time = oldest_ts;
+ }
kd_ctrl_page.kdebug_flags |= KDBG_WRAPPED;
}
- kdsp_actual->kds_timestamp = mach_absolute_time();
+ kdsp_actual->kds_timestamp = kdbg_timestamp();
kdsp_actual->kds_next.raw = KDS_PTR_NULL;
- kdsp_actual->kds_bufcnt = 0;
+ kdsp_actual->kds_bufcnt = 0;
kdsp_actual->kds_readlast = 0;
kdsp_actual->kds_lostevents = kdbp->kd_lostevents;
- kdbp->kd_lostevents = FALSE;
- kdsp_actual->kds_bufindx = 0;
+ kdbp->kd_lostevents = false;
+ kdsp_actual->kds_bufindx = 0;
- if (kdbp->kd_list_head.raw == KDS_PTR_NULL)
+ if (kdbp->kd_list_head.raw == KDS_PTR_NULL) {
kdbp->kd_list_head = kdsp;
- else
+ } else {
POINTER_FROM_KDS_PTR(kdbp->kd_list_tail)->kds_next = kdsp;
+ }
kdbp->kd_list_tail = kdsp;
out:
lck_spin_unlock(kds_spin_lock);
ml_set_interrupts_enabled(s);
- return (retval);
+ return retval;
}
int
kd_iop_t* iop;
if (kmem_alloc(kernel_map, (vm_offset_t *)&iop, sizeof(kd_iop_t), VM_KERN_MEMORY_DIAG) == KERN_SUCCESS) {
memcpy(&iop->callback, &callback, sizeof(kd_callback_t));
-
+
/*
* <rdar://problem/13351477> Some IOP clients are not providing a name.
*
* Remove when fixed.
*/
{
- boolean_t is_valid_name = FALSE;
- for (uint32_t length=0; length<sizeof(callback.iop_name); ++length) {
+ bool is_valid_name = false;
+ for (uint32_t length = 0; length < sizeof(callback.iop_name); ++length) {
/* This is roughly isprintable(c) */
- if (callback.iop_name[length] > 0x20 && callback.iop_name[length] < 0x7F)
+ if (callback.iop_name[length] > 0x20 && callback.iop_name[length] < 0x7F) {
continue;
+ }
if (callback.iop_name[length] == 0) {
- if (length)
- is_valid_name = TRUE;
+ if (length) {
+ is_valid_name = true;
+ }
break;
}
}
-
+
if (!is_valid_name) {
strlcpy(iop->callback.iop_name, "IOP-???", sizeof(iop->callback.iop_name));
}
}
iop->last_timestamp = 0;
-
+
do {
/*
* We use two pieces of state, the old list head
* TLDR; Must not read kd_iops more than once per loop.
*/
iop->next = kd_iops;
- iop->cpu_id = iop->next ? (iop->next->cpu_id+1) : kdbg_cpu_count(FALSE);
+ iop->cpu_id = iop->next ? (iop->next->cpu_id + 1) : kdbg_cpu_count(false);
/*
* Header says OSCompareAndSwapPtr has a memory barrier
void
kernel_debug_enter(
- uint32_t coreid,
- uint32_t debugid,
- uint64_t timestamp,
- uintptr_t arg1,
- uintptr_t arg2,
- uintptr_t arg3,
- uintptr_t arg4,
- uintptr_t threadid
+ uint32_t coreid,
+ uint32_t debugid,
+ uint64_t timestamp,
+ uintptr_t arg1,
+ uintptr_t arg2,
+ uintptr_t arg3,
+ uintptr_t arg4,
+ uintptr_t threadid
)
{
- uint32_t bindx;
- kd_buf *kd;
+ uint32_t bindx;
+ kd_buf *kd;
struct kd_bufinfo *kdbp;
struct kd_storage *kdsp_actual;
union kds_ptr kds_raw;
if (kd_ctrl_page.kdebug_slowcheck) {
-
- if ( (kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) || !(kdebug_enable & (KDEBUG_ENABLE_TRACE|KDEBUG_ENABLE_PPT)))
+ if ((kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) || !(kdebug_enable & (KDEBUG_ENABLE_TRACE | KDEBUG_ENABLE_PPT))) {
goto out1;
-
+ }
+
if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
- /*
- * Recheck if TYPEFILTER is being used, and if so,
- * dereference bitmap. If the trace facility is being
- * disabled, we have ~100ms of preemption-free CPU
- * usage to access the bitmap.
- */
- disable_preemption();
- if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
- if (isset(type_filter_bitmap, KDBG_EXTRACT_CSC(debugid)))
- goto record_event_preempt_disabled;
+ if (typefilter_is_debugid_allowed(kdbg_typefilter, debugid)) {
+ goto record_event;
}
- enable_preemption();
goto out1;
- }
- else if (kd_ctrl_page.kdebug_flags & KDBG_RANGECHECK) {
- if (debugid >= kdlog_beg && debugid <= kdlog_end)
+ } else if (kd_ctrl_page.kdebug_flags & KDBG_RANGECHECK) {
+ if (debugid >= kdlog_beg && debugid <= kdlog_end) {
goto record_event;
+ }
goto out1;
- }
- else if (kd_ctrl_page.kdebug_flags & KDBG_VALCHECK) {
+ } else if (kd_ctrl_page.kdebug_flags & KDBG_VALCHECK) {
if ((debugid & KDBG_EVENTID_MASK) != kdlog_value1 &&
- (debugid & KDBG_EVENTID_MASK) != kdlog_value2 &&
- (debugid & KDBG_EVENTID_MASK) != kdlog_value3 &&
- (debugid & KDBG_EVENTID_MASK) != kdlog_value4)
+ (debugid & KDBG_EVENTID_MASK) != kdlog_value2 &&
+ (debugid & KDBG_EVENTID_MASK) != kdlog_value3 &&
+ (debugid & KDBG_EVENTID_MASK) != kdlog_value4) {
goto out1;
+ }
}
}
-
+
record_event:
+ if (timestamp < kd_ctrl_page.oldest_time) {
+ goto out1;
+ }
disable_preemption();
-record_event_preempt_disabled:
- if (kd_ctrl_page.enabled == 0)
+ if (kd_ctrl_page.enabled == 0) {
goto out;
+ }
kdbp = &kdbip[coreid];
timestamp &= KDBG_TIMESTAMP_MASK;
-#if KDEBUG_MOJO_TRACE
- if (kdebug_enable & KDEBUG_ENABLE_SERIAL)
- kdebug_serial_print(coreid, debugid, timestamp,
- arg1, arg2, arg3, arg4, threadid);
-#endif
-
retry_q:
kds_raw = kdbp->kd_list_tail;
if (kds_raw.raw != KDS_PTR_NULL) {
kdsp_actual = POINTER_FROM_KDS_PTR(kds_raw);
bindx = kdsp_actual->kds_bufindx;
- } else
+ } else {
kdsp_actual = NULL;
-
+ bindx = EVENTS_PER_STORAGE_UNIT;
+ }
+
if (kdsp_actual == NULL || bindx >= EVENTS_PER_STORAGE_UNIT) {
- if (allocate_storage_unit(coreid) == FALSE) {
+ if (allocate_storage_unit(coreid) == false) {
/*
* this can only happen if wrapping
* has been disabled
}
goto retry_q;
}
- if ( !OSCompareAndSwap(bindx, bindx + 1, &kdsp_actual->kds_bufindx))
+ if (!OSCompareAndSwap(bindx, bindx + 1, &kdsp_actual->kds_bufindx)) {
goto retry_q;
+ }
// IOP entries can be allocated before xnu allocates and inits the buffer
- if (timestamp < kdsp_actual->kds_timestamp)
+ if (timestamp < kdsp_actual->kds_timestamp) {
kdsp_actual->kds_timestamp = timestamp;
+ }
kd = &kdsp_actual->kds_records[bindx];
kd->arg3 = arg3;
kd->arg4 = arg4;
kd->arg5 = threadid;
-
+
kdbg_set_timestamp_and_cpu(kd, timestamp, coreid);
OSAddAtomic(1, &kdsp_actual->kds_bufcnt);
enable_preemption();
out1:
if ((kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold)) {
- boolean_t need_kds_wakeup = FALSE;
- int s;
-
- /*
- * try to take the lock here to synchronize with the
- * waiter entering the blocked state... use the try
- * mode to prevent deadlocks caused by re-entering this
- * routine due to various trace points triggered in the
- * lck_spin_sleep_xxxx routines used to actually enter
- * our wait condition... no problem if we fail,
- * there will be lots of additional events coming in that
- * will eventually succeed in grabbing this lock
- */
- s = ml_set_interrupts_enabled(FALSE);
+ kdbg_wakeup();
+ }
+}
- if (lck_spin_try_lock(kdw_spin_lock)) {
+/*
+ * Check if the given debug ID is allowed to be traced on the current process.
+ *
+ * Returns true if allowed and false otherwise.
+ */
+static inline bool
+kdebug_debugid_procfilt_allowed(uint32_t debugid)
+{
+ uint32_t procfilt_flags = kd_ctrl_page.kdebug_flags &
+ (KDBG_PIDCHECK | KDBG_PIDEXCLUDE);
- if (kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold) {
- kds_waiter = 0;
- need_kds_wakeup = TRUE;
- }
- lck_spin_unlock(kdw_spin_lock);
+ if (!procfilt_flags) {
+ return true;
+ }
- ml_set_interrupts_enabled(s);
-
- if (need_kds_wakeup == TRUE)
- wakeup(&kds_waiter);
- }
+ /*
+ * DBG_TRACE and MACH_SCHED tracepoints ignore the process filter.
+ */
+ if ((debugid & 0xffff0000) == MACHDBG_CODE(DBG_MACH_SCHED, 0) ||
+ (debugid >> 24 == DBG_TRACE)) {
+ return true;
}
-}
+ struct proc *curproc = current_proc();
+ /*
+ * If the process is missing (early in boot), allow it.
+ */
+ if (!curproc) {
+ return true;
+ }
+ if (procfilt_flags & KDBG_PIDCHECK) {
+ /*
+ * Allow only processes marked with the kdebug bit.
+ */
+ return curproc->p_kdebug;
+ } else if (procfilt_flags & KDBG_PIDEXCLUDE) {
+ /*
+ * Exclude any process marked with the kdebug bit.
+ */
+ return !curproc->p_kdebug;
+ } else {
+ panic("kdebug: invalid procfilt flags %x", kd_ctrl_page.kdebug_flags);
+ __builtin_unreachable();
+ }
+}
static void
kernel_debug_internal(
- uint32_t debugid,
- uintptr_t arg1,
- uintptr_t arg2,
- uintptr_t arg3,
- uintptr_t arg4,
- uintptr_t arg5)
+ uint32_t debugid,
+ uintptr_t arg1,
+ uintptr_t arg2,
+ uintptr_t arg3,
+ uintptr_t arg4,
+ uintptr_t arg5,
+ uint64_t flags)
{
- struct proc *curproc;
- uint64_t now;
- uint32_t bindx;
- boolean_t s;
- kd_buf *kd;
- int cpu;
+ uint64_t now;
+ uint32_t bindx;
+ kd_buf *kd;
+ int cpu;
struct kd_bufinfo *kdbp;
struct kd_storage *kdsp_actual;
- union kds_ptr kds_raw;
-
-
+ union kds_ptr kds_raw;
+ bool only_filter = flags & KDBG_FLAG_FILTERED;
+ bool observe_procfilt = !(flags & KDBG_FLAG_NOPROCFILT);
if (kd_ctrl_page.kdebug_slowcheck) {
-
- if (kdebug_enable & KDEBUG_ENABLE_CHUD) {
- kd_chudhook_fn chudhook;
- /*
- * Mask interrupts to minimize the interval across
- * which the driver providing the hook could be
- * unloaded.
- */
- s = ml_set_interrupts_enabled(FALSE);
- chudhook = kdebug_chudhook;
- if (chudhook)
- chudhook(debugid, arg1, arg2, arg3, arg4, arg5);
- ml_set_interrupts_enabled(s);
- }
- if ( (kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) || !(kdebug_enable & (KDEBUG_ENABLE_TRACE|KDEBUG_ENABLE_PPT)))
+ if ((kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) ||
+ !(kdebug_enable & (KDEBUG_ENABLE_TRACE | KDEBUG_ENABLE_PPT))) {
goto out1;
-
- if ( !ml_at_interrupt_context()) {
- if (kd_ctrl_page.kdebug_flags & KDBG_PIDCHECK) {
- /*
- * If kdebug flag is not set for current proc, return
- */
- curproc = current_proc();
-
- if ((curproc && !(curproc->p_kdebug)) &&
- ((debugid & 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)) &&
- (debugid >> 24 != DBG_TRACE))
- goto out1;
- }
- else if (kd_ctrl_page.kdebug_flags & KDBG_PIDEXCLUDE) {
- /*
- * If kdebug flag is set for current proc, return
- */
- curproc = current_proc();
+ }
- if ((curproc && curproc->p_kdebug) &&
- ((debugid & 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)) &&
- (debugid >> 24 != DBG_TRACE))
- goto out1;
- }
+ if (!ml_at_interrupt_context() && observe_procfilt &&
+ !kdebug_debugid_procfilt_allowed(debugid)) {
+ goto out1;
}
if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
- /* Always record trace system info */
- if (KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE)
+ if (typefilter_is_debugid_allowed(kdbg_typefilter, debugid)) {
goto record_event;
-
- /*
- * Recheck if TYPEFILTER is being used, and if so,
- * dereference bitmap. If the trace facility is being
- * disabled, we have ~100ms of preemption-free CPU
- * usage to access the bitmap.
- */
- disable_preemption();
- if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
- if (isset(type_filter_bitmap, KDBG_EXTRACT_CSC(debugid)))
- goto record_event_preempt_disabled;
}
- enable_preemption();
+
goto out1;
- }
- else if (kd_ctrl_page.kdebug_flags & KDBG_RANGECHECK) {
+ } else if (only_filter) {
+ goto out1;
+ } else if (kd_ctrl_page.kdebug_flags & KDBG_RANGECHECK) {
/* Always record trace system info */
- if (KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE)
+ if (KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE) {
goto record_event;
-
- if (debugid < kdlog_beg || debugid > kdlog_end)
+ }
+
+ if (debugid < kdlog_beg || debugid > kdlog_end) {
goto out1;
- }
- else if (kd_ctrl_page.kdebug_flags & KDBG_VALCHECK) {
+ }
+ } else if (kd_ctrl_page.kdebug_flags & KDBG_VALCHECK) {
/* Always record trace system info */
- if (KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE)
+ if (KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE) {
goto record_event;
-
+ }
+
if ((debugid & KDBG_EVENTID_MASK) != kdlog_value1 &&
(debugid & KDBG_EVENTID_MASK) != kdlog_value2 &&
(debugid & KDBG_EVENTID_MASK) != kdlog_value3 &&
- (debugid & KDBG_EVENTID_MASK) != kdlog_value4)
+ (debugid & KDBG_EVENTID_MASK) != kdlog_value4) {
goto out1;
+ }
}
+ } else if (only_filter) {
+ goto out1;
}
+
record_event:
disable_preemption();
-record_event_preempt_disabled:
- if (kd_ctrl_page.enabled == 0)
+ if (kd_ctrl_page.enabled == 0) {
goto out;
+ }
cpu = cpu_number();
kdbp = &kdbip[cpu];
-#if KDEBUG_MOJO_TRACE
- if (kdebug_enable & KDEBUG_ENABLE_SERIAL)
- kdebug_serial_print(cpu, debugid,
- mach_absolute_time() & KDBG_TIMESTAMP_MASK,
- arg1, arg2, arg3, arg4, arg5);
-#endif
-
retry_q:
kds_raw = kdbp->kd_list_tail;
if (kds_raw.raw != KDS_PTR_NULL) {
kdsp_actual = POINTER_FROM_KDS_PTR(kds_raw);
bindx = kdsp_actual->kds_bufindx;
- } else
+ } else {
kdsp_actual = NULL;
-
+ bindx = EVENTS_PER_STORAGE_UNIT;
+ }
+
if (kdsp_actual == NULL || bindx >= EVENTS_PER_STORAGE_UNIT) {
- if (allocate_storage_unit(cpu) == FALSE) {
+ if (allocate_storage_unit(cpu) == false) {
/*
* this can only happen if wrapping
* has been disabled
}
goto retry_q;
}
- now = mach_absolute_time() & KDBG_TIMESTAMP_MASK;
- if ( !OSCompareAndSwap(bindx, bindx + 1, &kdsp_actual->kds_bufindx))
+ now = kdbg_timestamp() & KDBG_TIMESTAMP_MASK;
+
+ if (!OSCompareAndSwap(bindx, bindx + 1, &kdsp_actual->kds_bufindx)) {
goto retry_q;
+ }
kd = &kdsp_actual->kds_records[bindx];
kd->arg3 = arg3;
kd->arg4 = arg4;
kd->arg5 = arg5;
-
+
kdbg_set_timestamp_and_cpu(kd, now, cpu);
OSAddAtomic(1, &kdsp_actual->kds_bufcnt);
+
+#if KPERF
+ kperf_kdebug_callback(debugid, __builtin_frame_address(0));
+#endif
out:
enable_preemption();
out1:
if (kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold) {
- uint32_t etype;
- uint32_t stype;
-
+ uint32_t etype;
+ uint32_t stype;
+
etype = debugid & KDBG_EVENTID_MASK;
stype = debugid & KDBG_CSC_MASK;
if (etype == INTERRUPT || etype == MACH_vmfault ||
stype == BSC_SysCall || stype == MACH_SysCall) {
-
- boolean_t need_kds_wakeup = FALSE;
-
- /*
- * try to take the lock here to synchronize with the
- * waiter entering the blocked state... use the try
- * mode to prevent deadlocks caused by re-entering this
- * routine due to various trace points triggered in the
- * lck_spin_sleep_xxxx routines used to actually enter
- * one of our 2 wait conditions... no problem if we fail,
- * there will be lots of additional events coming in that
- * will eventually succeed in grabbing this lock
- */
- s = ml_set_interrupts_enabled(FALSE);
-
- if (lck_spin_try_lock(kdw_spin_lock)) {
-
- if (kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold) {
- kds_waiter = 0;
- need_kds_wakeup = TRUE;
- }
- lck_spin_unlock(kdw_spin_lock);
- }
- ml_set_interrupts_enabled(s);
-
- if (need_kds_wakeup == TRUE)
- wakeup(&kds_waiter);
+ kdbg_wakeup();
}
}
}
+__attribute__((noinline))
void
kernel_debug(
- uint32_t debugid,
- uintptr_t arg1,
- uintptr_t arg2,
- uintptr_t arg3,
- uintptr_t arg4,
+ uint32_t debugid,
+ uintptr_t arg1,
+ uintptr_t arg2,
+ uintptr_t arg3,
+ uintptr_t arg4,
__unused uintptr_t arg5)
{
- kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, (uintptr_t)thread_tid(current_thread()));
+ kernel_debug_internal(debugid, arg1, arg2, arg3, arg4,
+ (uintptr_t)thread_tid(current_thread()), 0);
}
+__attribute__((noinline))
void
kernel_debug1(
- uint32_t debugid,
- uintptr_t arg1,
- uintptr_t arg2,
- uintptr_t arg3,
- uintptr_t arg4,
- uintptr_t arg5)
+ uint32_t debugid,
+ uintptr_t arg1,
+ uintptr_t arg2,
+ uintptr_t arg3,
+ uintptr_t arg4,
+ uintptr_t arg5)
+{
+ kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, arg5, 0);
+}
+
+__attribute__((noinline))
+void
+kernel_debug_flags(
+ uint32_t debugid,
+ uintptr_t arg1,
+ uintptr_t arg2,
+ uintptr_t arg3,
+ uintptr_t arg4,
+ uint64_t flags)
+{
+ kernel_debug_internal(debugid, arg1, arg2, arg3, arg4,
+ (uintptr_t)thread_tid(current_thread()), flags);
+}
+
+__attribute__((noinline))
+void
+kernel_debug_filtered(
+ uint32_t debugid,
+ uintptr_t arg1,
+ uintptr_t arg2,
+ uintptr_t arg3,
+ uintptr_t arg4)
{
- kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, arg5);
+ kernel_debug_flags(debugid, arg1, arg2, arg3, arg4, KDBG_FLAG_FILTERED);
}
void
-kernel_debug_string_simple(const char *message)
+kernel_debug_string_early(const char *message)
{
uintptr_t arg[4] = {0, 0, 0, 0};
/* Stuff the message string in the args and log it. */
- strncpy((char *)arg, message, MIN(sizeof(arg), strlen(message)));
+ strncpy((char *)arg, message, MIN(sizeof(arg), strlen(message)));
KERNEL_DEBUG_EARLY(
TRACE_INFO_STRING,
arg[0], arg[1], arg[2], arg[3]);
}
-extern int master_cpu; /* MACH_KERNEL_PRIVATE */
+#define SIMPLE_STR_LEN (64)
+static_assert(SIMPLE_STR_LEN % sizeof(uintptr_t) == 0);
+
+void
+kernel_debug_string_simple(uint32_t eventid, const char *str)
+{
+ if (!kdebug_enable) {
+ return;
+ }
+
+ /* array of uintptr_ts simplifies emitting the string as arguments */
+ uintptr_t str_buf[(SIMPLE_STR_LEN / sizeof(uintptr_t)) + 1] = { 0 };
+ size_t len = strlcpy((char *)str_buf, str, SIMPLE_STR_LEN + 1);
+
+ uintptr_t thread_id = (uintptr_t)thread_tid(current_thread());
+ uint32_t debugid = eventid | DBG_FUNC_START;
+
+ /* string can fit in a single tracepoint */
+ if (len <= (4 * sizeof(uintptr_t))) {
+ debugid |= DBG_FUNC_END;
+ }
+
+ kernel_debug_internal(debugid, str_buf[0],
+ str_buf[1],
+ str_buf[2],
+ str_buf[3], thread_id, 0);
+
+ debugid &= KDBG_EVENTID_MASK;
+ int i = 4;
+ size_t written = 4 * sizeof(uintptr_t);
+
+ for (; written < len; i += 4, written += 4 * sizeof(uintptr_t)) {
+ /* if this is the last tracepoint to be emitted */
+ if ((written + (4 * sizeof(uintptr_t))) >= len) {
+ debugid |= DBG_FUNC_END;
+ }
+ kernel_debug_internal(debugid, str_buf[i],
+ str_buf[i + 1],
+ str_buf[i + 2],
+ str_buf[i + 3], thread_id, 0);
+ }
+}
+
+extern int master_cpu; /* MACH_KERNEL_PRIVATE */
/*
* Used prior to start_kern_tracing() being called.
* Log temporarily into a static buffer.
*/
void
kernel_debug_early(
- uint32_t debugid,
- uintptr_t arg1,
- uintptr_t arg2,
- uintptr_t arg3,
- uintptr_t arg4)
+ uint32_t debugid,
+ uintptr_t arg1,
+ uintptr_t arg2,
+ uintptr_t arg3,
+ uintptr_t arg4)
{
- /* If tracing is already initialized, use it */
- if (nkdbufs) {
- KERNEL_DEBUG_CONSTANT(debugid, arg1, arg2, arg3, arg4, 0);
+#if defined(__x86_64__)
+ extern int early_boot;
+ /*
+ * Note that "early" isn't early enough in some cases where
+ * we're invoked before gsbase is set on x86, hence the
+ * check of "early_boot".
+ */
+ if (early_boot) {
+ return;
+ }
+#endif
+
+ /* If early tracing is over, use the normal path. */
+ if (kd_early_done) {
+ KDBG_RELEASE(debugid, arg1, arg2, arg3, arg4);
return;
}
- /* Do nothing if the buffer is full or we're not on the boot cpu */
- kd_early_overflow = kd_early_index >= KD_EARLY_BUFFER_MAX;
- if (kd_early_overflow ||
- cpu_number() != master_cpu)
+ /* Do nothing if the buffer is full or we're not on the boot cpu. */
+ kd_early_overflow = kd_early_index >= KD_EARLY_BUFFER_NBUFS;
+ if (kd_early_overflow || cpu_number() != master_cpu) {
return;
+ }
kd_early_buffer[kd_early_index].debugid = debugid;
kd_early_buffer[kd_early_index].timestamp = mach_absolute_time();
}
/*
- * Transfen the contents of the temporary buffer into the trace buffers.
+ * Transfer the contents of the temporary buffer into the trace buffers.
* Precede that by logging the rebase time (offset) - the TSC-based time (in ns)
* when mach_absolute_time is set to 0.
*/
static void
kernel_debug_early_end(void)
{
- int i;
-
- if (cpu_number() != master_cpu)
+ if (cpu_number() != master_cpu) {
panic("kernel_debug_early_end() not call on boot processor");
+ }
+ /* reset the current oldest time to allow early events */
+ kd_ctrl_page.oldest_time = 0;
+
+#if defined(__x86_64__)
/* Fake sentinel marking the start of kernel time relative to TSC */
- kernel_debug_enter(
- 0,
- TRACE_TIMESTAMPS,
- 0,
- (uint32_t)(tsc_rebase_abs_time >> 32),
- (uint32_t)tsc_rebase_abs_time,
- 0,
- 0,
- 0);
- for (i = 0; i < kd_early_index; i++) {
- kernel_debug_enter(
- 0,
- kd_early_buffer[i].debugid,
- kd_early_buffer[i].timestamp,
- kd_early_buffer[i].arg1,
- kd_early_buffer[i].arg2,
- kd_early_buffer[i].arg3,
- kd_early_buffer[i].arg4,
- 0);
+ kernel_debug_enter(0, TRACE_TIMESTAMPS, 0,
+ (uint32_t)(tsc_rebase_abs_time >> 32), (uint32_t)tsc_rebase_abs_time,
+ tsc_at_boot, 0, 0);
+#endif /* defined(__x86_64__) */
+ for (unsigned int i = 0; i < kd_early_index; i++) {
+ kernel_debug_enter(0,
+ kd_early_buffer[i].debugid,
+ kd_early_buffer[i].timestamp,
+ kd_early_buffer[i].arg1,
+ kd_early_buffer[i].arg2,
+ kd_early_buffer[i].arg3,
+ kd_early_buffer[i].arg4,
+ 0);
}
/* Cut events-lost event on overflow */
- if (kd_early_overflow)
- KERNEL_DEBUG_CONSTANT(
- TRACE_LOST_EVENTS, 0, 0, 0, 0, 0);
+ if (kd_early_overflow) {
+ KDBG_RELEASE(TRACE_LOST_EVENTS, 1);
+ }
+
+ kd_early_done = true;
/* This trace marks the start of kernel tracing */
- kernel_debug_string_simple("early trace done");
+ kernel_debug_string_early("early trace done");
+}
+
+void
+kernel_debug_disable(void)
+{
+ if (kdebug_enable) {
+ kdbg_set_tracing_enabled(false, 0);
+ }
}
/*
debugid_class = KDBG_EXTRACT_CLASS(debugid);
switch (debugid_class) {
- case DBG_TRACE:
- return EPERM;
+ case DBG_TRACE:
+ return EPERM;
}
return 0;
}
+/*
+ * Support syscall SYS_kdebug_typefilter.
+ */
+int
+kdebug_typefilter(__unused struct proc* p,
+ struct kdebug_typefilter_args* uap,
+ __unused int *retval)
+{
+ int ret = KERN_SUCCESS;
+
+ if (uap->addr == USER_ADDR_NULL ||
+ uap->size == USER_ADDR_NULL) {
+ return EINVAL;
+ }
+
+ /*
+ * The atomic load is to close a race window with setting the typefilter
+ * and memory entry values. A description follows:
+ *
+ * Thread 1 (writer)
+ *
+ * Allocate Typefilter
+ * Allocate MemoryEntry
+ * Write Global MemoryEntry Ptr
+ * Atomic Store (Release) Global Typefilter Ptr
+ *
+ * Thread 2 (reader, AKA us)
+ *
+ * if ((Atomic Load (Acquire) Global Typefilter Ptr) == NULL)
+ * return;
+ *
+ * Without the atomic store, it isn't guaranteed that the write of
+ * Global MemoryEntry Ptr is visible before we can see the write of
+ * Global Typefilter Ptr.
+ *
+ * Without the atomic load, it isn't guaranteed that the loads of
+ * Global MemoryEntry Ptr aren't speculated.
+ *
+ * The global pointers transition from NULL -> valid once and only once,
+ * and never change after becoming valid. This means that having passed
+ * the first atomic load test of Global Typefilter Ptr, this function
+ * can then safely use the remaining global state without atomic checks.
+ */
+ if (!os_atomic_load(&kdbg_typefilter, acquire)) {
+ return EINVAL;
+ }
+
+ assert(kdbg_typefilter_memory_entry);
+
+ mach_vm_offset_t user_addr = 0;
+ vm_map_t user_map = current_map();
+
+ ret = mach_to_bsd_errno(
+ mach_vm_map_kernel(user_map, // target map
+ &user_addr, // [in, out] target address
+ TYPEFILTER_ALLOC_SIZE, // initial size
+ 0, // mask (alignment?)
+ VM_FLAGS_ANYWHERE, // flags
+ VM_MAP_KERNEL_FLAGS_NONE,
+ VM_KERN_MEMORY_NONE,
+ kdbg_typefilter_memory_entry, // port (memory entry!)
+ 0, // offset (in memory entry)
+ false, // should copy
+ VM_PROT_READ, // cur_prot
+ VM_PROT_READ, // max_prot
+ VM_INHERIT_SHARE)); // inherit behavior on fork
+
+ if (ret == KERN_SUCCESS) {
+ vm_size_t user_ptr_size = vm_map_is_64bit(user_map) ? 8 : 4;
+ ret = copyout(CAST_DOWN(void *, &user_addr), uap->addr, user_ptr_size );
+
+ if (ret != KERN_SUCCESS) {
+ mach_vm_deallocate(user_map, user_addr, TYPEFILTER_ALLOC_SIZE);
+ }
+ }
+
+ return ret;
+}
+
/*
* Support syscall SYS_kdebug_trace. U64->K32 args may get truncated in kdebug_trace64
*/
}
/*
- * Support syscall SYS_kdebug_trace64. 64-bit args on K32 will get truncated to fit in 32-bit record format.
+ * Support syscall SYS_kdebug_trace64. 64-bit args on K32 will get truncated
+ * to fit in 32-bit record format.
+ *
+ * It is intentional that error conditions are not checked until kdebug is
+ * enabled. This is to match the userspace wrapper behavior, which is optimizing
+ * for non-error case performance.
*/
-int kdebug_trace64(__unused struct proc *p, struct kdebug_trace64_args *uap, __unused int32_t *retval)
+int
+kdebug_trace64(__unused struct proc *p, struct kdebug_trace64_args *uap, __unused int32_t *retval)
{
int err;
+ if (__probable(kdebug_enable == 0)) {
+ return 0;
+ }
+
if ((err = kdebug_validate_debugid(uap->code)) != 0) {
return err;
}
- if ( __probable(kdebug_enable == 0) )
- return(0);
+ kernel_debug_internal(uap->code, (uintptr_t)uap->arg1,
+ (uintptr_t)uap->arg2, (uintptr_t)uap->arg3, (uintptr_t)uap->arg4,
+ (uintptr_t)thread_tid(current_thread()), 0);
- kernel_debug_internal(uap->code, (uintptr_t)uap->arg1, (uintptr_t)uap->arg2, (uintptr_t)uap->arg3, (uintptr_t)uap->arg4, (uintptr_t)thread_tid(current_thread()));
-
- return(0);
+ return 0;
}
/*
*/
static uint64_t
kernel_debug_string_internal(uint32_t debugid, uint64_t str_id, void *vstr,
- size_t str_len)
+ size_t str_len)
{
/* str must be word-aligned */
uintptr_t *str = vstr;
uintptr_t thread_id;
int i;
uint32_t trace_debugid = TRACEDBG_CODE(DBG_TRACE_STRING,
- TRACE_STRING_GLOBAL);
+ TRACE_STRING_GLOBAL);
thread_id = (uintptr_t)thread_tid(current_thread());
/* if the ID is being invalidated, just emit that */
if (str_id != 0 && str_len == 0) {
kernel_debug_internal(trace_debugid | DBG_FUNC_START | DBG_FUNC_END,
- (uintptr_t)debugid, (uintptr_t)str_id, 0, 0,
- thread_id);
+ (uintptr_t)debugid, (uintptr_t)str_id, 0, 0, thread_id, 0);
return str_id;
}
trace_debugid |= DBG_FUNC_END;
}
- kernel_debug_internal(trace_debugid, (uintptr_t)debugid,
- (uintptr_t)str_id, str[0],
- str[1], thread_id);
+ kernel_debug_internal(trace_debugid, (uintptr_t)debugid, (uintptr_t)str_id,
+ str[0], str[1], thread_id, 0);
trace_debugid &= KDBG_EVENTID_MASK;
i = 2;
trace_debugid |= DBG_FUNC_END;
}
kernel_debug_internal(trace_debugid, str[i],
- str[i + 1],
- str[i + 2],
- str[i + 3], thread_id);
+ str[i + 1],
+ str[i + 2],
+ str[i + 3], thread_id, 0);
}
return str_id;
* Trace system and scheduling events circumvent this check, as do events
* emitted in interrupt context.
*/
-static boolean_t
+static bool
kdebug_current_proc_enabled(uint32_t debugid)
{
/* can't determine current process in interrupt context */
if (ml_at_interrupt_context()) {
- return TRUE;
+ return true;
}
/* always emit trace system and scheduling events */
if ((KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE ||
- (debugid & KDBG_CSC_MASK) == MACHDBG_CODE(DBG_MACH_SCHED, 0)))
- {
- return TRUE;
+ (debugid & KDBG_CSC_MASK) == MACHDBG_CODE(DBG_MACH_SCHED, 0))) {
+ return true;
}
if (kd_ctrl_page.kdebug_flags & KDBG_PIDCHECK) {
/* only the process with the kdebug bit set is allowed */
if (cur_proc && !(cur_proc->p_kdebug)) {
- return FALSE;
+ return false;
}
} else if (kd_ctrl_page.kdebug_flags & KDBG_PIDEXCLUDE) {
proc_t cur_proc = current_proc();
/* every process except the one with the kdebug bit set is allowed */
if (cur_proc && cur_proc->p_kdebug) {
- return FALSE;
+ return false;
}
}
- return TRUE;
+ return true;
}
-/*
- * Returns true if the debugid is disabled by filters, and false if the
- * debugid is allowed to be traced. A debugid may not be traced if the
- * typefilter disables its class and subclass, it's outside a range
- * check, or if it's not an allowed debugid in a value check. Trace
- * system events bypass this check.
- */
-static boolean_t
+bool
kdebug_debugid_enabled(uint32_t debugid)
{
- boolean_t is_enabled = TRUE;
-
/* if no filtering is enabled */
if (!kd_ctrl_page.kdebug_slowcheck) {
- return TRUE;
+ return true;
}
- if (KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE) {
- return TRUE;
- }
+ return kdebug_debugid_explicitly_enabled(debugid);
+}
+bool
+kdebug_debugid_explicitly_enabled(uint32_t debugid)
+{
if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
- disable_preemption();
-
- /*
- * Recheck if typefilter is still being used. If tracing is being
- * disabled, there's a 100ms sleep on the other end to keep the
- * bitmap around for this check.
- */
- if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
- if (!(isset(type_filter_bitmap, KDBG_EXTRACT_CSC(debugid)))) {
- is_enabled = FALSE;
- }
- }
-
- enable_preemption();
+ return typefilter_is_debugid_allowed(kdbg_typefilter, debugid);
+ } else if (KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE) {
+ return true;
} else if (kd_ctrl_page.kdebug_flags & KDBG_RANGECHECK) {
if (debugid < kdlog_beg || debugid > kdlog_end) {
- is_enabled = FALSE;
+ return false;
}
} else if (kd_ctrl_page.kdebug_flags & KDBG_VALCHECK) {
if ((debugid & KDBG_EVENTID_MASK) != kdlog_value1 &&
- (debugid & KDBG_EVENTID_MASK) != kdlog_value2 &&
- (debugid & KDBG_EVENTID_MASK) != kdlog_value3 &&
- (debugid & KDBG_EVENTID_MASK) != kdlog_value4)
- {
- is_enabled = FALSE;
+ (debugid & KDBG_EVENTID_MASK) != kdlog_value2 &&
+ (debugid & KDBG_EVENTID_MASK) != kdlog_value3 &&
+ (debugid & KDBG_EVENTID_MASK) != kdlog_value4) {
+ return false;
}
}
- return is_enabled;
+ return true;
+}
+
+bool
+kdebug_using_continuous_time(void)
+{
+ return kdebug_enable & KDEBUG_ENABLE_CONT_TIME;
}
/*
{
/* arguments to tracepoints must be word-aligned */
__attribute__((aligned(sizeof(uintptr_t)))) char str_buf[STR_BUF_SIZE];
- assert_static(sizeof(str_buf) > MAX_STR_LEN);
+ static_assert(sizeof(str_buf) > MAX_STR_LEN);
vm_size_t len_copied;
int err;
memset(str_buf, 0, sizeof(str_buf));
len_copied = strlcpy(str_buf, str, MAX_STR_LEN + 1);
*str_id = kernel_debug_string_internal(debugid, *str_id, str_buf,
- len_copied);
+ len_copied);
return 0;
}
*/
int
kdebug_trace_string(__unused struct proc *p,
- struct kdebug_trace_string_args *uap,
- uint64_t *retval)
+ struct kdebug_trace_string_args *uap,
+ uint64_t *retval)
{
__attribute__((aligned(sizeof(uintptr_t)))) char str_buf[STR_BUF_SIZE];
- assert_static(sizeof(str_buf) > MAX_STR_LEN);
+ static_assert(sizeof(str_buf) > MAX_STR_LEN);
size_t len_copied;
int err;
}
*retval = kernel_debug_string_internal(uap->debugid, uap->str_id,
- NULL, 0);
+ NULL, 0);
return 0;
}
len_copied--;
*retval = kernel_debug_string_internal(uap->debugid, uap->str_id, str_buf,
- len_copied);
+ len_copied);
return 0;
}
static void
kdbg_lock_init(void)
{
- if (kd_ctrl_page.kdebug_flags & KDBG_LOCKINIT)
+ static lck_grp_attr_t *kdebug_lck_grp_attr = NULL;
+ static lck_attr_t *kdebug_lck_attr = NULL;
+
+ if (kd_ctrl_page.kdebug_flags & KDBG_LOCKINIT) {
return;
-
- /*
- * allocate lock group attribute and group
- */
- kd_trace_mtx_sysctl_grp_attr = lck_grp_attr_alloc_init();
- kd_trace_mtx_sysctl_grp = lck_grp_alloc_init("kdebug", kd_trace_mtx_sysctl_grp_attr);
-
- /*
- * allocate the lock attribute
- */
- kd_trace_mtx_sysctl_attr = lck_attr_alloc_init();
+ }
+ assert(kdebug_lck_grp_attr == NULL);
+ kdebug_lck_grp_attr = lck_grp_attr_alloc_init();
+ kdebug_lck_grp = lck_grp_alloc_init("kdebug", kdebug_lck_grp_attr);
+ kdebug_lck_attr = lck_attr_alloc_init();
- /*
- * allocate and initialize mutex's
- */
- kd_trace_mtx_sysctl = lck_mtx_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
- kds_spin_lock = lck_spin_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
- kdw_spin_lock = lck_spin_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
+ kds_spin_lock = lck_spin_alloc_init(kdebug_lck_grp, kdebug_lck_attr);
+ kdw_spin_lock = lck_spin_alloc_init(kdebug_lck_grp, kdebug_lck_attr);
kd_ctrl_page.kdebug_flags |= KDBG_LOCKINIT;
}
-
int
-kdbg_bootstrap(boolean_t early_trace)
+kdbg_bootstrap(bool early_trace)
{
- kd_ctrl_page.kdebug_flags &= ~KDBG_WRAPPED;
+ kd_ctrl_page.kdebug_flags &= ~KDBG_WRAPPED;
- return (create_buffers(early_trace));
+ return create_buffers(early_trace);
}
int
-kdbg_reinit(boolean_t early_trace)
+kdbg_reinit(bool early_trace)
{
int ret = 0;
* First make sure we're not in
* the middle of cutting a trace
*/
- kdbg_set_tracing_enabled(FALSE, KDEBUG_ENABLE_TRACE);
+ kernel_debug_disable();
/*
* make sure the SLOW_NOLOG is seen
delete_buffers();
- if ((kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr) {
- kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
- kd_ctrl_page.kdebug_flags &= ~KDBG_MAPINIT;
- kd_mapsize = 0;
- kd_mapptr = NULL;
- kd_mapcount = 0;
- }
+ kdbg_clear_thread_map();
ret = kdbg_bootstrap(early_trace);
RAW_file_offset = 0;
RAW_file_written = 0;
- return(ret);
+ return ret;
}
void
-kdbg_trace_data(struct proc *proc, long *arg_pid)
+kdbg_trace_data(struct proc *proc, long *arg_pid, long *arg_uniqueid)
{
- if (!proc)
+ if (!proc) {
*arg_pid = 0;
- else
+ *arg_uniqueid = 0;
+ } else {
*arg_pid = proc->p_pid;
+ /* Fit in a trace point */
+ *arg_uniqueid = (long)proc->p_uniqueid;
+ if ((uint64_t) *arg_uniqueid != proc->p_uniqueid) {
+ *arg_uniqueid = 0;
+ }
+ }
}
void
-kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3, long *arg4)
+kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3,
+ long *arg4)
{
- char *dbg_nameptr;
- int dbg_namelen;
- long dbg_parms[4];
-
if (!proc) {
*arg1 = 0;
*arg2 = 0;
*arg4 = 0;
return;
}
- /*
- * Collect the pathname for tracing
- */
- dbg_nameptr = proc->p_comm;
- dbg_namelen = (int)strlen(proc->p_comm);
- dbg_parms[0]=0L;
- dbg_parms[1]=0L;
- dbg_parms[2]=0L;
- dbg_parms[3]=0L;
-
- if(dbg_namelen > (int)sizeof(dbg_parms))
- dbg_namelen = (int)sizeof(dbg_parms);
-
- strncpy((char *)dbg_parms, dbg_nameptr, dbg_namelen);
-
- *arg1=dbg_parms[0];
- *arg2=dbg_parms[1];
- *arg3=dbg_parms[2];
- *arg4=dbg_parms[3];
-}
-
-static void
-kdbg_resolve_map(thread_t th_act, void *opaque)
-{
- kd_threadmap *mapptr;
- krt_t *t = (krt_t *)opaque;
- if (t->count < t->maxcount) {
- mapptr = &t->map[t->count];
- mapptr->thread = (uintptr_t)thread_tid(th_act);
+ const char *procname = proc_best_name(proc);
+ size_t namelen = strlen(procname);
- (void) strlcpy (mapptr->command, t->atts->task_comm,
- sizeof(t->atts->task_comm));
- /*
- * Some kernel threads have no associated pid.
- * We still need to mark the entry as valid.
- */
- if (t->atts->pid)
- mapptr->valid = t->atts->pid;
- else
- mapptr->valid = 1;
+ long args[4] = { 0 };
- t->count++;
+ if (namelen > sizeof(args)) {
+ namelen = sizeof(args);
}
+
+ strncpy((char *)args, procname, namelen);
+
+ *arg1 = args[0];
+ *arg2 = args[1];
+ *arg3 = args[2];
+ *arg4 = args[3];
}
/*
*
* We may be reporting data from "now", or from the "past".
*
- * The "now" data would be for something like kdbg_readcurcpumap().
* The "past" data would be for kdbg_readcpumap().
*
* If we do not pass both iops and cpu_count, and iops is NULL, this function
uint32_t bytes_needed = sizeof(kd_cpumap_header) + cpu_count * sizeof(kd_cpumap);
uint32_t bytes_available = *cpumap_size;
*cpumap_size = bytes_needed;
-
+
if (*cpumap == NULL) {
if (kmem_alloc(kernel_map, (vm_offset_t*)cpumap, (vm_size_t)*cpumap_size, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) {
return ENOMEM;
}
+ bzero(*cpumap, *cpumap_size);
} else if (bytes_available < bytes_needed) {
return EINVAL;
}
while (iops) {
cpus[index].cpu_id = iops->cpu_id;
cpus[index].flags = KDBG_CPUMAP_IS_IOP;
- bzero(cpus[index].name, sizeof(cpus->name));
strlcpy(cpus[index].name, iops->callback.iop_name, sizeof(cpus->name));
-
+
iops = iops->next;
index--;
}
-
+
while (index >= 0) {
cpus[index].cpu_id = index;
cpus[index].flags = 0;
- bzero(cpus[index].name, sizeof(cpus->name));
strlcpy(cpus[index].name, "AP", sizeof(cpus->name));
index--;
}
-
+
return KERN_SUCCESS;
}
void
kdbg_thrmap_init(void)
{
- if (kd_ctrl_page.kdebug_flags & KDBG_MAPINIT)
+ ktrace_assert_lock_held();
+
+ if (kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) {
return;
+ }
kd_mapptr = kdbg_thrmap_init_internal(0, &kd_mapsize, &kd_mapcount);
- if (kd_mapptr)
+ if (kd_mapptr) {
kd_ctrl_page.kdebug_flags |= KDBG_MAPINIT;
+ }
}
+static void
+kd_resolve_map(thread_t thread, void *opaque)
+{
+ struct kd_resolver *resolve = opaque;
+
+ if (resolve->krs_count < resolve->krs_maxcount) {
+ kd_threadmap *map = &resolve->krs_map[resolve->krs_count];
+ struct kd_task_name *task_name = resolve->krs_task;
+ map->thread = (uintptr_t)thread_tid(thread);
+
+ (void)strlcpy(map->command, task_name->ktn_name, sizeof(map->command));
+ /*
+ * Kernel threads should still be marked with non-zero valid bit.
+ */
+ pid_t pid = resolve->krs_task->ktn_pid;
+ map->valid = pid == 0 ? 1 : pid;
+ resolve->krs_count++;
+ }
+}
-kd_threadmap* kdbg_thrmap_init_internal(unsigned int count, unsigned int *mapsize, unsigned int *mapcount)
+static vm_size_t
+kd_resolve_tasks(struct kd_task_name *task_names, vm_size_t ntasks)
{
- kd_threadmap *mapptr;
- struct proc *p;
- struct krt akrt;
- int tts_count; /* number of task-to-string structures */
- struct tts *tts_mapptr;
- unsigned int tts_mapsize = 0;
- int i;
- vm_offset_t kaddr;
+ vm_size_t i = 0;
+ proc_t p = PROC_NULL;
- /*
- * need to use PROC_SCANPROCLIST with proc_iterate
- */
proc_list_lock();
-
- /*
- * Calculate the sizes of map buffers
- */
- for (p = allproc.lh_first, *mapcount=0, tts_count=0; p; p = p->p_list.le_next) {
- *mapcount += get_task_numacts((task_t)p->task);
- tts_count++;
+ ALLPROC_FOREACH(p) {
+ if (i >= ntasks) {
+ break;
+ }
+ /*
+ * Only record processes that can be referenced and are not exiting.
+ */
+ if (p->task && (p->p_lflag & P_LEXIT) == 0) {
+ task_reference(p->task);
+ task_names[i].ktn_task = p->task;
+ task_names[i].ktn_pid = p->p_pid;
+ (void)strlcpy(task_names[i].ktn_name, proc_best_name(p),
+ sizeof(task_names[i].ktn_name));
+ i++;
+ }
}
proc_list_unlock();
- /*
- * The proc count could change during buffer allocation,
- * so introduce a small fudge factor to bump up the
- * buffer sizes. This gives new tasks some chance of
- * making into the tables. Bump up by 25%.
- */
- *mapcount += *mapcount/4;
- tts_count += tts_count/4;
-
- *mapsize = *mapcount * sizeof(kd_threadmap);
+ return i;
+}
- if (count && count < *mapcount)
- return (0);
+static vm_size_t
+kd_resolve_threads(kd_threadmap *map, struct kd_task_name *task_names,
+ vm_size_t ntasks, vm_size_t nthreads)
+{
+ struct kd_resolver resolver = {
+ .krs_map = map, .krs_count = 0, .krs_maxcount = nthreads,
+ };
- if ((kmem_alloc(kernel_map, &kaddr, (vm_size_t)*mapsize, VM_KERN_MEMORY_DIAG) == KERN_SUCCESS)) {
- bzero((void *)kaddr, *mapsize);
- mapptr = (kd_threadmap *)kaddr;
- } else
- return (0);
+ for (int i = 0; i < ntasks; i++) {
+ struct kd_task_name *cur_task = &task_names[i];
+ resolver.krs_task = cur_task;
+ task_act_iterate_wth_args(cur_task->ktn_task, kd_resolve_map,
+ &resolver);
+ task_deallocate(cur_task->ktn_task);
+ }
- tts_mapsize = tts_count * sizeof(struct tts);
+ return resolver.krs_count;
+}
- if ((kmem_alloc(kernel_map, &kaddr, (vm_size_t)tts_mapsize, VM_KERN_MEMORY_DIAG) == KERN_SUCCESS)) {
- bzero((void *)kaddr, tts_mapsize);
- tts_mapptr = (struct tts *)kaddr;
- } else {
- kmem_free(kernel_map, (vm_offset_t)mapptr, *mapsize);
+static kd_threadmap *
+kdbg_thrmap_init_internal(size_t maxthreads, vm_size_t *mapsize,
+ vm_size_t *mapcount)
+{
+ kd_threadmap *thread_map = NULL;
+ struct kd_task_name *task_names;
+ vm_size_t names_size = 0;
- return (0);
- }
- /*
- * We need to save the procs command string
- * and take a reference for each task associated
- * with a valid process
- */
+ assert(mapsize != NULL);
+ assert(mapcount != NULL);
- proc_list_lock();
+ vm_size_t nthreads = threads_count;
+ vm_size_t ntasks = tasks_count;
/*
- * should use proc_iterate
+ * Allow 25% more threads and tasks to be created between now and taking the
+ * proc_list_lock.
*/
- for (p = allproc.lh_first, i=0; p && i < tts_count; p = p->p_list.le_next) {
- if (p->p_lflag & P_LEXIT)
- continue;
-
- if (p->task) {
- task_reference(p->task);
- tts_mapptr[i].task = p->task;
- tts_mapptr[i].pid = p->p_pid;
- (void)strlcpy(tts_mapptr[i].task_comm, p->p_comm, sizeof(tts_mapptr[i].task_comm));
- i++;
- }
+ if (os_add_overflow(nthreads, nthreads / 4, &nthreads) ||
+ os_add_overflow(ntasks, ntasks / 4, &ntasks)) {
+ return NULL;
}
- tts_count = i;
- proc_list_unlock();
+ *mapcount = nthreads;
+ if (os_mul_overflow(nthreads, sizeof(kd_threadmap), mapsize)) {
+ return NULL;
+ }
+ if (os_mul_overflow(ntasks, sizeof(task_names[0]), &names_size)) {
+ return NULL;
+ }
/*
- * Initialize thread map data
+ * Wait until the out-parameters have been filled with the needed size to
+ * do the bounds checking on the provided maximum.
*/
- akrt.map = mapptr;
- akrt.count = 0;
- akrt.maxcount = *mapcount;
-
- for (i = 0; i < tts_count; i++) {
- akrt.atts = &tts_mapptr[i];
- task_act_iterate_wth_args(tts_mapptr[i].task, kdbg_resolve_map, &akrt);
- task_deallocate((task_t) tts_mapptr[i].task);
+ if (maxthreads != 0 && maxthreads < nthreads) {
+ return NULL;
}
- kmem_free(kernel_map, (vm_offset_t)tts_mapptr, tts_mapsize);
-
- *mapcount = akrt.count;
- return (mapptr);
+ thread_map = kalloc_tag(*mapsize, VM_KERN_MEMORY_DIAG);
+ bzero(thread_map, *mapsize);
+ task_names = kheap_alloc(KHEAP_TEMP, names_size, Z_WAITOK | Z_ZERO);
+ ntasks = kd_resolve_tasks(task_names, ntasks);
+ *mapcount = kd_resolve_threads(thread_map, task_names, ntasks, nthreads);
+ kheap_free(KHEAP_TEMP, task_names, names_size);
+ return thread_map;
}
static void
* First make sure we're not in
* the middle of cutting a trace
*/
- kdbg_set_tracing_enabled(FALSE, KDEBUG_ENABLE_TRACE);
+ kernel_debug_disable();
kdbg_disable_typefilter();
/*
*/
IOSleep(100);
- global_state_pid = -1;
+ /* reset kdebug state for each process */
+ if (kd_ctrl_page.kdebug_flags & (KDBG_PIDCHECK | KDBG_PIDEXCLUDE)) {
+ proc_list_lock();
+ proc_t p;
+ ALLPROC_FOREACH(p) {
+ p->p_kdebug = 0;
+ }
+ proc_list_unlock();
+ }
+
kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
kd_ctrl_page.kdebug_flags &= ~(KDBG_NOWRAP | KDBG_RANGECHECK | KDBG_VALCHECK);
kd_ctrl_page.kdebug_flags &= ~(KDBG_PIDCHECK | KDBG_PIDEXCLUDE);
-
- kdbg_deallocate_typefilter();
+
+ kd_ctrl_page.oldest_time = 0;
+
delete_buffers();
- nkdbufs = 0;
+ nkdbufs = 0;
/* Clean up the thread map buffer */
- kd_ctrl_page.kdebug_flags &= ~KDBG_MAPINIT;
- if (kd_mapptr) {
- kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
- kd_mapptr = (kd_threadmap *) 0;
- }
- kd_mapsize = 0;
- kd_mapcount = 0;
+ kdbg_clear_thread_map();
RAW_file_offset = 0;
RAW_file_written = 0;
}
+void
+kdebug_reset(void)
+{
+ ktrace_assert_lock_held();
+
+ kdbg_lock_init();
+
+ kdbg_clear();
+ if (kdbg_typefilter) {
+ typefilter_reject_all(kdbg_typefilter);
+ typefilter_allow_class(kdbg_typefilter, DBG_TRACE);
+ }
+}
+
+void
+kdebug_free_early_buf(void)
+{
+#if defined(__x86_64__)
+ /*
+ * Make Intel aware that the early buffer is no longer being used. ARM
+ * handles this as part of the BOOTDATA segment.
+ */
+ ml_static_mfree((vm_offset_t)&kd_early_buffer, sizeof(kd_early_buffer));
+#endif /* defined(__x86_64__) */
+}
+
int
kdbg_setpid(kd_regtype *kdr)
{
pid_t pid;
- int flag, ret=0;
+ int flag, ret = 0;
struct proc *p;
pid = (pid_t)kdr->value1;
flag = (int)kdr->value2;
- if (pid > 0) {
- if ((p = proc_find(pid)) == NULL)
+ if (pid >= 0) {
+ if ((p = proc_find(pid)) == NULL) {
ret = ESRCH;
- else {
+ } else {
if (flag == 1) {
/*
* turn on pid check for this and all pids
*/
kd_ctrl_page.kdebug_flags |= KDBG_PIDCHECK;
kd_ctrl_page.kdebug_flags &= ~KDBG_PIDEXCLUDE;
- kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
+ kdbg_set_flags(SLOW_CHECKS, 0, true);
p->p_kdebug = 1;
} else {
* Don't turn off all pid checking though
*
* kd_ctrl_page.kdebug_flags &= ~KDBG_PIDCHECK;
- */
+ */
p->p_kdebug = 0;
}
proc_rele(p);
}
- }
- else
+ } else {
ret = EINVAL;
+ }
- return(ret);
+ return ret;
}
/* This is for pid exclusion in the trace buffer */
kdbg_setpidex(kd_regtype *kdr)
{
pid_t pid;
- int flag, ret=0;
+ int flag, ret = 0;
struct proc *p;
pid = (pid_t)kdr->value1;
flag = (int)kdr->value2;
- if (pid > 0) {
- if ((p = proc_find(pid)) == NULL)
+ if (pid >= 0) {
+ if ((p = proc_find(pid)) == NULL) {
ret = ESRCH;
- else {
+ } else {
if (flag == 1) {
/*
* turn on pid exclusion
*/
kd_ctrl_page.kdebug_flags |= KDBG_PIDEXCLUDE;
kd_ctrl_page.kdebug_flags &= ~KDBG_PIDCHECK;
- kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
+ kdbg_set_flags(SLOW_CHECKS, 0, true);
p->p_kdebug = 1;
- }
- else {
+ } else {
/*
* turn off pid exclusion for this pid value
* Don't turn off all pid exclusion though
*
* kd_ctrl_page.kdebug_flags &= ~KDBG_PIDEXCLUDE;
- */
+ */
p->p_kdebug = 0;
}
proc_rele(p);
}
- } else
+ } else {
ret = EINVAL;
+ }
- return(ret);
+ return ret;
}
+/*
+ * The following functions all operate on the "global" typefilter singleton.
+ */
/*
- * This is for setting a maximum decrementer value
+ * The tf param is optional, you may pass either a valid typefilter or NULL.
+ * If you pass a valid typefilter, you release ownership of that typefilter.
*/
-int
-kdbg_setrtcdec(kd_regtype *kdr)
+static int
+kdbg_initialize_typefilter(typefilter_t tf)
{
- int ret = 0;
- natural_t decval;
+ ktrace_assert_lock_held();
+ assert(!kdbg_typefilter);
+ assert(!kdbg_typefilter_memory_entry);
+ typefilter_t deallocate_tf = NULL;
+
+ if (!tf && ((tf = deallocate_tf = typefilter_create()) == NULL)) {
+ return ENOMEM;
+ }
- decval = (natural_t)kdr->value1;
+ if ((kdbg_typefilter_memory_entry = typefilter_create_memory_entry(tf)) == MACH_PORT_NULL) {
+ if (deallocate_tf) {
+ typefilter_deallocate(deallocate_tf);
+ }
+ return ENOMEM;
+ }
- if (decval && decval < KDBG_MINRTCDEC)
- ret = EINVAL;
- else
- ret = ENOTSUP;
+ /*
+ * The atomic store closes a race window with
+ * the kdebug_typefilter syscall, which assumes
+ * that any non-null kdbg_typefilter means a
+ * valid memory_entry is available.
+ */
+ os_atomic_store(&kdbg_typefilter, tf, release);
- return(ret);
+ return KERN_SUCCESS;
}
-int
-kdbg_enable_typefilter(void)
+static int
+kdbg_copyin_typefilter(user_addr_t addr, size_t size)
{
- int ret;
+ int ret = ENOMEM;
+ typefilter_t tf;
- /* Allocate memory for bitmap if not already allocated */
- ret = kdbg_allocate_typefilter();
- if (ret) {
- return ret;
+ ktrace_assert_lock_held();
+
+ if (size != KDBG_TYPEFILTER_BITMAP_SIZE) {
+ return EINVAL;
}
- /* Turn off range and value checks */
- kd_ctrl_page.kdebug_flags &= ~(KDBG_RANGECHECK | KDBG_VALCHECK);
-
- /* Enable filter checking */
- kd_ctrl_page.kdebug_flags |= KDBG_TYPEFILTER_CHECK;
- kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
- return 0;
-}
+ if ((tf = typefilter_create())) {
+ if ((ret = copyin(addr, tf, KDBG_TYPEFILTER_BITMAP_SIZE)) == 0) {
+ /* The kernel typefilter must always allow DBG_TRACE */
+ typefilter_allow_class(tf, DBG_TRACE);
-int
-kdbg_disable_typefilter(void)
-{
- /* Disable filter checking */
- kd_ctrl_page.kdebug_flags &= ~KDBG_TYPEFILTER_CHECK;
+ /*
+ * If this is the first typefilter; claim it.
+ * Otherwise copy and deallocate.
+ *
+ * Allocating a typefilter for the copyin allows
+ * the kernel to hold the invariant that DBG_TRACE
+ * must always be allowed.
+ */
+ if (!kdbg_typefilter) {
+ if ((ret = kdbg_initialize_typefilter(tf))) {
+ return ret;
+ }
+ tf = NULL;
+ } else {
+ typefilter_copy(kdbg_typefilter, tf);
+ }
- /* Turn off slow checks unless pid checks are using them */
- if ( (kd_ctrl_page.kdebug_flags & (KDBG_PIDCHECK | KDBG_PIDEXCLUDE)) )
- kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
- else
- kdbg_set_flags(SLOW_CHECKS, 0, FALSE);
+ kdbg_enable_typefilter();
+ kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_TYPEFILTER_CHANGED, kdbg_typefilter);
+ }
- /* typefilter bitmap will be deallocated later */
+ if (tf) {
+ typefilter_deallocate(tf);
+ }
+ }
- return 0;
+ return ret;
}
-static int
-kdbg_allocate_typefilter(void)
+/*
+ * Enable the flags in the control page for the typefilter. Assumes that
+ * kdbg_typefilter has already been allocated, so events being written
+ * don't see a bad typefilter.
+ */
+static void
+kdbg_enable_typefilter(void)
{
- if (type_filter_bitmap == NULL) {
- vm_offset_t bitmap = 0;
-
- if (kmem_alloc(kernel_map, &bitmap, KDBG_TYPEFILTER_BITMAP_SIZE, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) {
- return ENOSPC;
- }
+ assert(kdbg_typefilter);
+ kd_ctrl_page.kdebug_flags &= ~(KDBG_RANGECHECK | KDBG_VALCHECK);
+ kd_ctrl_page.kdebug_flags |= KDBG_TYPEFILTER_CHECK;
+ kdbg_set_flags(SLOW_CHECKS, 0, true);
+ commpage_update_kdebug_state();
+}
- bzero((void *)bitmap, KDBG_TYPEFILTER_BITMAP_SIZE);
+/*
+ * Disable the flags in the control page for the typefilter. The typefilter
+ * may be safely deallocated shortly after this function returns.
+ */
+static void
+kdbg_disable_typefilter(void)
+{
+ bool notify_iops = kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK;
+ kd_ctrl_page.kdebug_flags &= ~KDBG_TYPEFILTER_CHECK;
- if (!OSCompareAndSwapPtr(NULL, (void *)bitmap, &type_filter_bitmap)) {
- kmem_free(kernel_map, bitmap, KDBG_TYPEFILTER_BITMAP_SIZE);
- return 0; /* someone assigned a buffer */
- }
+ if ((kd_ctrl_page.kdebug_flags & (KDBG_PIDCHECK | KDBG_PIDEXCLUDE))) {
+ kdbg_set_flags(SLOW_CHECKS, 0, true);
} else {
- bzero(type_filter_bitmap, KDBG_TYPEFILTER_BITMAP_SIZE);
+ kdbg_set_flags(SLOW_CHECKS, 0, false);
}
+ commpage_update_kdebug_state();
- return 0;
+ if (notify_iops) {
+ /*
+ * Notify IOPs that the typefilter will now allow everything.
+ * Otherwise, they won't know a typefilter is no longer in
+ * effect.
+ */
+ typefilter_allow_all(kdbg_typefilter);
+ kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops,
+ KD_CALLBACK_TYPEFILTER_CHANGED, kdbg_typefilter);
+ }
}
-static int
-kdbg_deallocate_typefilter(void)
+uint32_t
+kdebug_commpage_state(void)
{
- if(type_filter_bitmap) {
- vm_offset_t bitmap = (vm_offset_t)type_filter_bitmap;
-
- if (OSCompareAndSwapPtr((void *)bitmap, NULL, &type_filter_bitmap)) {
- kmem_free(kernel_map, bitmap, KDBG_TYPEFILTER_BITMAP_SIZE);
- return 0;
- } else {
- /* already swapped */
+ if (kdebug_enable) {
+ if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
+ return KDEBUG_COMMPAGE_ENABLE_TYPEFILTER | KDEBUG_COMMPAGE_ENABLE_TRACE;
}
+
+ return KDEBUG_COMMPAGE_ENABLE_TRACE;
}
return 0;
int
kdbg_setreg(kd_regtype * kdr)
{
- int ret=0;
+ int ret = 0;
unsigned int val_1, val_2, val;
switch (kdr->type) {
-
- case KDBG_CLASSTYPE :
+ case KDBG_CLASSTYPE:
val_1 = (kdr->value1 & 0xff);
val_2 = (kdr->value2 & 0xff);
- kdlog_beg = (val_1<<24);
- kdlog_end = (val_2<<24);
+ kdlog_beg = (val_1 << 24);
+ kdlog_end = (val_2 << 24);
kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
kd_ctrl_page.kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
- kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
+ kdbg_set_flags(SLOW_CHECKS, 0, true);
break;
- case KDBG_SUBCLSTYPE :
+ case KDBG_SUBCLSTYPE:
val_1 = (kdr->value1 & 0xff);
val_2 = (kdr->value2 & 0xff);
val = val_2 + 1;
- kdlog_beg = ((val_1<<24) | (val_2 << 16));
- kdlog_end = ((val_1<<24) | (val << 16));
+ kdlog_beg = ((val_1 << 24) | (val_2 << 16));
+ kdlog_end = ((val_1 << 24) | (val << 16));
kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
kd_ctrl_page.kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
- kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
+ kdbg_set_flags(SLOW_CHECKS, 0, true);
break;
- case KDBG_RANGETYPE :
+ case KDBG_RANGETYPE:
kdlog_beg = (kdr->value1);
kdlog_end = (kdr->value2);
kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
kd_ctrl_page.kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
- kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
+ kdbg_set_flags(SLOW_CHECKS, 0, true);
break;
case KDBG_VALCHECK:
kdlog_value1 = (kdr->value1);
kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
kd_ctrl_page.kdebug_flags &= ~KDBG_RANGECHECK; /* Turn off range check */
kd_ctrl_page.kdebug_flags |= KDBG_VALCHECK; /* Turn on specific value check */
- kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
+ kdbg_set_flags(SLOW_CHECKS, 0, true);
break;
- case KDBG_TYPENONE :
+ case KDBG_TYPENONE:
kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
- if ( (kd_ctrl_page.kdebug_flags & (KDBG_RANGECHECK | KDBG_VALCHECK |
- KDBG_PIDCHECK | KDBG_PIDEXCLUDE |
- KDBG_TYPEFILTER_CHECK)) )
- kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
- else
- kdbg_set_flags(SLOW_CHECKS, 0, FALSE);
-
- kdlog_beg = 0;
- kdlog_end = 0;
- break;
- default :
- ret = EINVAL;
- break;
- }
- return(ret);
-}
-
-int
-kdbg_getreg(__unused kd_regtype * kdr)
-{
-#if 0
- int i,j, ret=0;
- unsigned int val_1, val_2, val;
+ if ((kd_ctrl_page.kdebug_flags & (KDBG_RANGECHECK | KDBG_VALCHECK |
+ KDBG_PIDCHECK | KDBG_PIDEXCLUDE |
+ KDBG_TYPEFILTER_CHECK))) {
+ kdbg_set_flags(SLOW_CHECKS, 0, true);
+ } else {
+ kdbg_set_flags(SLOW_CHECKS, 0, false);
+ }
- switch (kdr->type) {
- case KDBG_CLASSTYPE :
- val_1 = (kdr->value1 & 0xff);
- val_2 = val_1 + 1;
- kdlog_beg = (val_1<<24);
- kdlog_end = (val_2<<24);
- kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
- kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
- break;
- case KDBG_SUBCLSTYPE :
- val_1 = (kdr->value1 & 0xff);
- val_2 = (kdr->value2 & 0xff);
- val = val_2 + 1;
- kdlog_beg = ((val_1<<24) | (val_2 << 16));
- kdlog_end = ((val_1<<24) | (val << 16));
- kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
- kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
- break;
- case KDBG_RANGETYPE :
- kdlog_beg = (kdr->value1);
- kdlog_end = (kdr->value2);
- kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
- kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
- break;
- case KDBG_TYPENONE :
- kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
kdlog_beg = 0;
kdlog_end = 0;
break;
- default :
+ default:
ret = EINVAL;
break;
}
-#endif /* 0 */
- return(EINVAL);
+ return ret;
}
static int
kdbg_write_to_vnode(caddr_t buffer, size_t size, vnode_t vp, vfs_context_t ctx, off_t file_offset)
{
- return vn_rdwr(UIO_WRITE, vp, buffer, size, file_offset, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT,
- vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
+ assert(size < INT_MAX);
+ return vn_rdwr(UIO_WRITE, vp, buffer, (int)size, file_offset, UIO_SYSSPACE, IO_NODELOCKED | IO_UNIT,
+ vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
}
int
kdbg_write_v3_chunk_header(user_addr_t buffer, uint32_t tag, uint32_t sub_tag, uint64_t length, vnode_t vp, vfs_context_t ctx)
{
int ret = KERN_SUCCESS;
- kd_chunk_header_v3 header;
-
- header.tag = tag;
- header.sub_tag = sub_tag;
- header.length = length;
+ kd_chunk_header_v3 header = {
+ .tag = tag,
+ .sub_tag = sub_tag,
+ .length = length,
+ };
// Check that only one of them is valid
assert(!buffer ^ !vp);
goto write_error;
}
RAW_file_offset += (sizeof(kd_chunk_header_v3));
- }
- else {
+ } else {
ret = copyout(&header, buffer, sizeof(kd_chunk_header_v3));
if (ret) {
goto write_error;
return ret;
}
-int
-kdbg_write_v3_chunk_header_to_buffer(void * buffer, uint32_t tag, uint32_t sub_tag, uint64_t length)
-{
- kd_chunk_header_v3 header;
-
- header.tag = tag;
- header.sub_tag = sub_tag;
- header.length = length;
-
- if (!buffer) {
- return 0;
- }
-
- memcpy(buffer, &header, sizeof(kd_chunk_header_v3));
-
- return (sizeof(kd_chunk_header_v3));
-}
-
-int
+static int
kdbg_write_v3_chunk_to_fd(uint32_t tag, uint32_t sub_tag, uint64_t length, void *payload, uint64_t payload_size, int fd)
{
proc_t p;
vnode_t vp;
p = current_proc();
- proc_fdlock(p);
- if ( (fp_lookup(p, fd, &fp, 1)) ) {
- proc_fdunlock(p);
- return EFAULT;
+ if (fp_get_ftype(p, fd, DTYPE_VNODE, EBADF, &fp)) {
+ return EBADF;
}
+ vp = fp->fp_glob->fg_data;
context.vc_thread = current_thread();
- context.vc_ucred = fp->f_fglob->fg_cred;
-
- if (FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_VNODE) {
- fp_drop(p, fd, fp, 1);
- proc_fdunlock(p);
- return EBADF;
- }
- vp = (struct vnode *) fp->f_fglob->fg_data;
- proc_fdunlock(p);
+ context.vc_ucred = fp->fp_glob->fg_cred;
- if ( (vnode_getwithref(vp)) == 0 ) {
- RAW_file_offset = fp->f_fglob->fg_offset;
+ if ((vnode_getwithref(vp)) == 0) {
+ RAW_file_offset = fp->fp_glob->fg_offset;
- kd_chunk_header_v3 chunk_header = { .tag = tag, .sub_tag = sub_tag, .length = length };
+ kd_chunk_header_v3 chunk_header = {
+ .tag = tag,
+ .sub_tag = sub_tag,
+ .length = length,
+ };
int ret = kdbg_write_to_vnode((caddr_t) &chunk_header, sizeof(kd_chunk_header_v3), vp, &context, RAW_file_offset);
if (!ret) {
RAW_file_offset += payload_size;
}
- fp->f_fglob->fg_offset = RAW_file_offset;
+ fp->fp_glob->fg_offset = RAW_file_offset;
vnode_put(vp);
}
user_addr_t
kdbg_write_v3_event_chunk_header(user_addr_t buffer, uint32_t tag, uint64_t length, vnode_t vp, vfs_context_t ctx)
{
- uint64_t future_chunk_timestamp = 0;
- length += sizeof(uint64_t);
-
- if (kdbg_write_v3_chunk_header(buffer, tag, V3_EVENT_DATA_VERSION, length, vp, ctx)) {
- return 0;
- }
- if (buffer) {
- buffer += sizeof(kd_chunk_header_v3);
- }
-
- // Check that only one of them is valid
- assert(!buffer ^ !vp);
- assert((vp == NULL) || (ctx != NULL));
-
- // Write the 8-byte future_chunk_timestamp field in the payload
- if (buffer || vp) {
- if (vp) {
- int ret = kdbg_write_to_vnode((caddr_t)&future_chunk_timestamp, sizeof(uint64_t), vp, ctx, RAW_file_offset);
- if (!ret) {
- RAW_file_offset += (sizeof(uint64_t));
- }
- }
- else {
- if (copyout(&future_chunk_timestamp, buffer, sizeof(uint64_t))) {
- return 0;
- }
- }
- }
-
- return (buffer + sizeof(uint64_t));
+ uint64_t future_chunk_timestamp = 0;
+ length += sizeof(uint64_t);
+
+ if (kdbg_write_v3_chunk_header(buffer, tag, V3_EVENT_DATA_VERSION, length, vp, ctx)) {
+ return 0;
+ }
+ if (buffer) {
+ buffer += sizeof(kd_chunk_header_v3);
+ }
+
+ // Check that only one of them is valid
+ assert(!buffer ^ !vp);
+ assert((vp == NULL) || (ctx != NULL));
+
+ // Write the 8-byte future_chunk_timestamp field in the payload
+ if (buffer || vp) {
+ if (vp) {
+ int ret = kdbg_write_to_vnode((caddr_t)&future_chunk_timestamp, sizeof(uint64_t), vp, ctx, RAW_file_offset);
+ if (!ret) {
+ RAW_file_offset += (sizeof(uint64_t));
+ }
+ } else {
+ if (copyout(&future_chunk_timestamp, buffer, sizeof(uint64_t))) {
+ return 0;
+ }
+ }
+ }
+
+ return buffer + sizeof(uint64_t);
}
int
kdbg_write_v3_header(user_addr_t user_header, size_t *user_header_size, int fd)
{
- int ret = KERN_SUCCESS;
- kd_header_v3 header;
-
- uint8_t* cpumap = 0;
- uint32_t cpumap_size = 0;
- uint32_t thrmap_size = 0;
-
- size_t bytes_needed = 0;
-
- // Check that only one of them is valid
- assert(!user_header ^ !fd);
- assert(user_header_size);
-
- if ( !(kd_ctrl_page.kdebug_flags & KDBG_BUFINIT) ) {
- ret = EINVAL;
- goto bail;
- }
-
- if ( !(user_header || fd) ) {
- ret = EINVAL;
- goto bail;
- }
-
- // Initialize the cpu map
- ret = kdbg_cpumap_init_internal(kd_ctrl_page.kdebug_iops, kd_ctrl_page.kdebug_cpus, &cpumap, &cpumap_size);
- if (ret != KERN_SUCCESS) {
- goto bail;
- }
-
- // Check if a thread map is initialized
- if ( !kd_mapptr ) {
- ret = EINVAL;
- goto bail;
- }
- thrmap_size = kd_mapcount * sizeof(kd_threadmap);
-
- // Setup the header.
- // See v3 header description in sys/kdebug.h for more inforamtion.
-
- header.tag = RAW_VERSION3;
- header.sub_tag = V3_HEADER_VERSION;
- header.length = ( sizeof(kd_header_v3) + cpumap_size - sizeof(kd_cpumap_header));
-
- mach_timebase_info_data_t timebase = {0, 0};
- clock_timebase_info(&timebase);
- header.timebase_numer = timebase.numer;
- header.timebase_denom = timebase.denom;
- header.timestamp = 0;
- header.walltime_secs = 0;
- header.walltime_usecs = 0;
- header.timezone_minuteswest = 0;
- header.timezone_dst = 0;
-
-#if defined __LP64__
- header.flags = 1;
+ int ret = KERN_SUCCESS;
+
+ uint8_t* cpumap = 0;
+ uint32_t cpumap_size = 0;
+ uint32_t thrmap_size = 0;
+
+ size_t bytes_needed = 0;
+
+ // Check that only one of them is valid
+ assert(!user_header ^ !fd);
+ assert(user_header_size);
+
+ if (!(kd_ctrl_page.kdebug_flags & KDBG_BUFINIT)) {
+ ret = EINVAL;
+ goto bail;
+ }
+
+ if (!(user_header || fd)) {
+ ret = EINVAL;
+ goto bail;
+ }
+
+ // Initialize the cpu map
+ ret = kdbg_cpumap_init_internal(kd_ctrl_page.kdebug_iops, kd_ctrl_page.kdebug_cpus, &cpumap, &cpumap_size);
+ if (ret != KERN_SUCCESS) {
+ goto bail;
+ }
+
+ // Check if a thread map is initialized
+ if (!kd_mapptr) {
+ ret = EINVAL;
+ goto bail;
+ }
+ if (os_mul_overflow(kd_mapcount, sizeof(kd_threadmap), &thrmap_size)) {
+ ret = ERANGE;
+ goto bail;
+ }
+
+ mach_timebase_info_data_t timebase = {0, 0};
+ clock_timebase_info(&timebase);
+
+ // Setup the header.
+ // See v3 header description in sys/kdebug.h for more inforamtion.
+ kd_header_v3 header = {
+ .tag = RAW_VERSION3,
+ .sub_tag = V3_HEADER_VERSION,
+ .length = (sizeof(kd_header_v3) + cpumap_size - sizeof(kd_cpumap_header)),
+ .timebase_numer = timebase.numer,
+ .timebase_denom = timebase.denom,
+ .timestamp = 0, /* FIXME rdar://problem/22053009 */
+ .walltime_secs = 0,
+ .walltime_usecs = 0,
+ .timezone_minuteswest = 0,
+ .timezone_dst = 0,
+#if defined(__LP64__)
+ .flags = 1,
#else
- header.flags = 0;
+ .flags = 0,
#endif
+ };
+
+ // If its a buffer, check if we have enough space to copy the header and the maps.
+ if (user_header) {
+ bytes_needed = (size_t)header.length + thrmap_size + (2 * sizeof(kd_chunk_header_v3));
+ if (*user_header_size < bytes_needed) {
+ ret = EINVAL;
+ goto bail;
+ }
+ }
+
+ // Start writing the header
+ if (fd) {
+ void *hdr_ptr = (void *)(((uintptr_t) &header) + sizeof(kd_chunk_header_v3));
+ size_t payload_size = (sizeof(kd_header_v3) - sizeof(kd_chunk_header_v3));
+
+ ret = kdbg_write_v3_chunk_to_fd(RAW_VERSION3, V3_HEADER_VERSION, header.length, hdr_ptr, payload_size, fd);
+ if (ret) {
+ goto bail;
+ }
+ } else {
+ if (copyout(&header, user_header, sizeof(kd_header_v3))) {
+ ret = EFAULT;
+ goto bail;
+ }
+ // Update the user pointer
+ user_header += sizeof(kd_header_v3);
+ }
+
+ // Write a cpu map. This is a sub chunk of the header
+ cpumap = (uint8_t*)((uintptr_t) cpumap + sizeof(kd_cpumap_header));
+ size_t payload_size = (size_t)(cpumap_size - sizeof(kd_cpumap_header));
+ if (fd) {
+ ret = kdbg_write_v3_chunk_to_fd(V3_CPU_MAP, V3_CPUMAP_VERSION, payload_size, (void *)cpumap, payload_size, fd);
+ if (ret) {
+ goto bail;
+ }
+ } else {
+ ret = kdbg_write_v3_chunk_header(user_header, V3_CPU_MAP, V3_CPUMAP_VERSION, payload_size, NULL, NULL);
+ if (ret) {
+ goto bail;
+ }
+ user_header += sizeof(kd_chunk_header_v3);
+ if (copyout(cpumap, user_header, payload_size)) {
+ ret = EFAULT;
+ goto bail;
+ }
+ // Update the user pointer
+ user_header += payload_size;
+ }
+
+ // Write a thread map
+ if (fd) {
+ ret = kdbg_write_v3_chunk_to_fd(V3_THREAD_MAP, V3_THRMAP_VERSION, thrmap_size, (void *)kd_mapptr, thrmap_size, fd);
+ if (ret) {
+ goto bail;
+ }
+ } else {
+ ret = kdbg_write_v3_chunk_header(user_header, V3_THREAD_MAP, V3_THRMAP_VERSION, thrmap_size, NULL, NULL);
+ if (ret) {
+ goto bail;
+ }
+ user_header += sizeof(kd_chunk_header_v3);
+ if (copyout(kd_mapptr, user_header, thrmap_size)) {
+ ret = EFAULT;
+ goto bail;
+ }
+ user_header += thrmap_size;
+ }
+
+ if (fd) {
+ RAW_file_written += bytes_needed;
+ }
- // If its a buffer, check if we have enough space to copy the header and the maps.
- if (user_header) {
- bytes_needed = header.length + thrmap_size + (2 * sizeof(kd_chunk_header_v3));
- if ( !user_header_size ) {
- ret = EINVAL;
- goto bail;
- }
- if (*user_header_size < bytes_needed) {
- ret = EINVAL;
- goto bail;
- }
- }
-
- // Start writing the header
- if (fd) {
- void *hdr_ptr = (void *)(((uintptr_t) &header) + sizeof(kd_chunk_header_v3));
- size_t payload_size = (sizeof(kd_header_v3) - sizeof(kd_chunk_header_v3));
-
- ret = kdbg_write_v3_chunk_to_fd(RAW_VERSION3, V3_HEADER_VERSION, header.length, hdr_ptr, payload_size, fd);
- if (ret) {
- goto bail;
- }
- }
- else {
- if (copyout(&header, user_header, sizeof(kd_header_v3))) {
- ret = EFAULT;
- goto bail;
- }
- // Update the user pointer
- user_header += sizeof(kd_header_v3);
- }
-
- // Write a cpu map. This is a sub chunk of the header
- cpumap = (uint8_t*)((uintptr_t) cpumap + sizeof(kd_cpumap_header));
- size_t payload_size = (size_t)(cpumap_size - sizeof(kd_cpumap_header));
- if (fd) {
- ret = kdbg_write_v3_chunk_to_fd(V3_CPU_MAP, V3_CPUMAP_VERSION, payload_size, (void *)cpumap, payload_size, fd);
- if (ret) {
- goto bail;
- }
- }
- else {
- ret = kdbg_write_v3_chunk_header(user_header, V3_CPU_MAP, V3_CPUMAP_VERSION, payload_size, NULL, NULL);
- if (ret) {
- goto bail;
- }
- user_header += sizeof(kd_chunk_header_v3);
- if (copyout(cpumap, user_header, payload_size)) {
- ret = EFAULT;
- goto bail;
- }
- // Update the user pointer
- user_header += payload_size;
- }
-
- // Write a thread map
- if (fd) {
- ret = kdbg_write_v3_chunk_to_fd(V3_THREAD_MAP, V3_THRMAP_VERSION, thrmap_size, (void *)kd_mapptr, thrmap_size, fd);
- if (ret) {
- goto bail;
- }
- }
- else {
- ret = kdbg_write_v3_chunk_header(user_header, V3_THREAD_MAP, V3_THRMAP_VERSION, thrmap_size, NULL, NULL);
- if (ret) {
- goto bail;
- }
- user_header += sizeof(kd_chunk_header_v3);
- if (copyout(kd_mapptr, user_header, thrmap_size)) {
- ret = EFAULT;
- goto bail;
- }
- user_header += thrmap_size;
- }
-
- if (fd) {
- RAW_file_written += bytes_needed;
- }
-
- *user_header_size = bytes_needed;
+ *user_header_size = bytes_needed;
bail:
- if (cpumap) {
- kmem_free(kernel_map, (vm_offset_t)cpumap, cpumap_size);
- }
- return (ret);
+ if (cpumap) {
+ kmem_free(kernel_map, (vm_offset_t)cpumap, cpumap_size);
+ }
+ return ret;
}
int
}
*user_cpumap_size = cpumap_size;
kmem_free(kernel_map, (vm_offset_t)cpumap, cpumap_size);
- } else
+ } else {
ret = EINVAL;
- } else
+ }
+ } else {
ret = EINVAL;
+ }
- return (ret);
+ return ret;
}
int
kdbg_readcurthrmap(user_addr_t buffer, size_t *bufsize)
{
kd_threadmap *mapptr;
- unsigned int mapsize;
- unsigned int mapcount;
- unsigned int count = 0;
+ vm_size_t mapsize;
+ vm_size_t mapcount;
int ret = 0;
+ size_t count = *bufsize / sizeof(kd_threadmap);
- count = *bufsize/sizeof(kd_threadmap);
*bufsize = 0;
- if ( (mapptr = kdbg_thrmap_init_internal(count, &mapsize, &mapcount)) ) {
- if (copyout(mapptr, buffer, mapcount * sizeof(kd_threadmap)))
+ if ((mapptr = kdbg_thrmap_init_internal(count, &mapsize, &mapcount))) {
+ if (copyout(mapptr, buffer, mapcount * sizeof(kd_threadmap))) {
ret = EFAULT;
- else
+ } else {
*bufsize = (mapcount * sizeof(kd_threadmap));
+ }
- kmem_free(kernel_map, (vm_offset_t)mapptr, mapsize);
- } else
+ kfree(mapptr, mapsize);
+ } else {
ret = EINVAL;
+ }
- return (ret);
+ return ret;
}
static int
-kdbg_write_v1_plus_header(uint32_t count, vnode_t vp, vfs_context_t ctx)
+kdbg_write_v1_header(bool write_thread_map, vnode_t vp, vfs_context_t ctx)
{
int ret = 0;
- RAW_header header;
- clock_sec_t secs;
- clock_usec_t usecs;
- char *pad_buf;
+ RAW_header header;
+ clock_sec_t secs;
+ clock_usec_t usecs;
+ char *pad_buf;
uint32_t pad_size;
uint32_t extra_thread_count = 0;
uint32_t cpumap_size;
- unsigned int mapsize = kd_mapcount * sizeof(kd_threadmap);
+ size_t map_size = 0;
+ uint32_t map_count = 0;
+
+ if (write_thread_map) {
+ assert(kd_ctrl_page.kdebug_flags & KDBG_MAPINIT);
+ if (kd_mapcount > UINT32_MAX) {
+ return ERANGE;
+ }
+ map_count = (uint32_t)kd_mapcount;
+ if (os_mul_overflow(map_count, sizeof(kd_threadmap), &map_size)) {
+ return ERANGE;
+ }
+ if (map_size >= INT_MAX) {
+ return ERANGE;
+ }
+ }
+
+ /*
+ * Without the buffers initialized, we cannot construct a CPU map or a
+ * thread map, and cannot write a header.
+ */
+ if (!(kd_ctrl_page.kdebug_flags & KDBG_BUFINIT)) {
+ return EINVAL;
+ }
/*
- * To write a RAW_VERSION1+ file, we
- * must embed a cpumap in the "padding"
- * used to page align the events following
- * the threadmap. If the threadmap happens
- * to not require enough padding, we
- * artificially increase its footprint
- * until it needs enough padding.
+ * To write a RAW_VERSION1+ file, we must embed a cpumap in the
+ * "padding" used to page align the events following the threadmap. If
+ * the threadmap happens to not require enough padding, we artificially
+ * increase its footprint until it needs enough padding.
*/
- assert(vp);
- assert(ctx);
+ assert(vp);
+ assert(ctx);
- pad_size = PAGE_16KB - ((sizeof(RAW_header) + (count * sizeof(kd_threadmap))) & PAGE_MASK_64);
+ pad_size = PAGE_16KB - ((sizeof(RAW_header) + map_size) & PAGE_MASK);
cpumap_size = sizeof(kd_cpumap_header) + kd_ctrl_page.kdebug_cpus * sizeof(kd_cpumap);
if (cpumap_size > pad_size) {
* we increase the pad_size by 16K. We do this so that the event
* data is always available on a page aligned boundary for both
* 4k and 16k systems. We enforce this alignment for the event
- * data so that we can take advantage of optimized file/disk writes.*/
+ * data so that we can take advantage of optimized file/disk writes.
+ */
pad_size += PAGE_16KB;
}
* the cpumap is embedded in the last 4K page before when the event data is expected.
* This way the tools can read the data starting the next page boundary on both
* 4K and 16K systems preserving compatibility with older versions of the tools
- */
+ */
if (pad_size > PAGE_4KB) {
pad_size -= PAGE_4KB;
extra_thread_count = (pad_size / sizeof(kd_threadmap)) + 1;
}
+ memset(&header, 0, sizeof(header));
header.version_no = RAW_VERSION1;
- header.thread_count = count + extra_thread_count;
+ header.thread_count = map_count + extra_thread_count;
clock_get_calendar_microtime(&secs, &usecs);
header.TOD_secs = secs;
header.TOD_usecs = usecs;
- ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)&header, sizeof(RAW_header), RAW_file_offset,
- UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
- if (ret)
+ ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)&header, (int)sizeof(RAW_header), RAW_file_offset,
+ UIO_SYSSPACE, IO_NODELOCKED | IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
+ if (ret) {
goto write_error;
+ }
RAW_file_offset += sizeof(RAW_header);
+ RAW_file_written += sizeof(RAW_header);
- ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)kd_mapptr, mapsize, RAW_file_offset,
- UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
- if (ret)
- goto write_error;
- RAW_file_offset += mapsize;
+ if (write_thread_map) {
+ assert(map_size < INT_MAX);
+ ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)kd_mapptr, (int)map_size, RAW_file_offset,
+ UIO_SYSSPACE, IO_NODELOCKED | IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
+ if (ret) {
+ goto write_error;
+ }
+
+ RAW_file_offset += map_size;
+ RAW_file_written += map_size;
+ }
if (extra_thread_count) {
pad_size = extra_thread_count * sizeof(kd_threadmap);
- pad_buf = (char *)kalloc(pad_size);
+ pad_buf = kheap_alloc(KHEAP_TEMP, pad_size, Z_WAITOK | Z_ZERO);
if (!pad_buf) {
ret = ENOMEM;
goto write_error;
}
- memset(pad_buf, 0, pad_size);
- ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)pad_buf, pad_size, RAW_file_offset,
- UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
- kfree(pad_buf, pad_size);
-
- if (ret)
+ assert(pad_size < INT_MAX);
+ ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)pad_buf, (int)pad_size, RAW_file_offset,
+ UIO_SYSSPACE, IO_NODELOCKED | IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
+ kheap_free(KHEAP_TEMP, pad_buf, pad_size);
+ if (ret) {
goto write_error;
- RAW_file_offset += pad_size;
+ }
+ RAW_file_offset += pad_size;
+ RAW_file_written += pad_size;
}
- pad_size = PAGE_SIZE - (RAW_file_offset & PAGE_MASK_64);
+ pad_size = PAGE_SIZE - (RAW_file_offset & PAGE_MASK);
if (pad_size) {
- pad_buf = (char *)kalloc(pad_size);
+ pad_buf = (char *)kheap_alloc(KHEAP_TEMP, pad_size, Z_WAITOK | Z_ZERO);
if (!pad_buf) {
ret = ENOMEM;
goto write_error;
}
- memset(pad_buf, 0, pad_size);
/*
* embed a cpumap in the padding bytes.
memset(pad_buf, 0, pad_size);
}
- ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)pad_buf, pad_size, RAW_file_offset,
- UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
- kfree(pad_buf, pad_size);
-
- if (ret)
+ assert(pad_size < INT_MAX);
+ ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)pad_buf, (int)pad_size, RAW_file_offset,
+ UIO_SYSSPACE, IO_NODELOCKED | IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
+ kheap_free(KHEAP_TEMP, pad_buf, pad_size);
+ if (ret) {
goto write_error;
+ }
+
RAW_file_offset += pad_size;
+ RAW_file_written += pad_size;
}
- RAW_file_written += sizeof(RAW_header) + mapsize + pad_size;
write_error:
return ret;
}
-int
-kdbg_readthrmap(user_addr_t buffer, size_t *number, vnode_t vp, vfs_context_t ctx)
+static void
+kdbg_clear_thread_map(void)
{
+ ktrace_assert_lock_held();
+
+ if (kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) {
+ assert(kd_mapptr != NULL);
+ kfree(kd_mapptr, kd_mapsize);
+ kd_mapptr = NULL;
+ kd_mapsize = 0;
+ kd_mapcount = 0;
+ kd_ctrl_page.kdebug_flags &= ~KDBG_MAPINIT;
+ }
+}
- int avail = 0;
+/*
+ * Write out a version 1 header and the thread map, if it is initialized, to a
+ * vnode. Used by KDWRITEMAP and kdbg_dump_trace_to_file.
+ *
+ * Returns write errors from vn_rdwr if a write fails. Returns ENODATA if the
+ * thread map has not been initialized, but the header will still be written.
+ * Returns ENOMEM if padding could not be allocated. Returns 0 otherwise.
+ */
+static int
+kdbg_write_thread_map(vnode_t vp, vfs_context_t ctx)
+{
int ret = 0;
- uint32_t count = 0;
- unsigned int mapsize;
+ bool map_initialized;
- if ((!vp && !buffer) || (vp && buffer)) {
- return EINVAL;
+ ktrace_assert_lock_held();
+ assert(ctx != NULL);
+
+ map_initialized = (kd_ctrl_page.kdebug_flags & KDBG_MAPINIT);
+
+ ret = kdbg_write_v1_header(map_initialized, vp, ctx);
+ if (ret == 0) {
+ if (map_initialized) {
+ kdbg_clear_thread_map();
+ } else {
+ ret = ENODATA;
+ }
}
- assert(number);
- assert((vp == NULL) || (ctx != NULL));
+ return ret;
+}
+
+/*
+ * Copy out the thread map to a user space buffer. Used by KDTHRMAP.
+ *
+ * Returns copyout errors if the copyout fails. Returns ENODATA if the thread
+ * map has not been initialized. Returns EINVAL if the buffer provided is not
+ * large enough for the entire thread map. Returns 0 otherwise.
+ */
+static int
+kdbg_copyout_thread_map(user_addr_t buffer, size_t *buffer_size)
+{
+ bool map_initialized;
+ size_t map_size;
+ int ret = 0;
- avail = *number;
- count = avail/sizeof (kd_threadmap);
- mapsize = kd_mapcount * sizeof(kd_threadmap);
+ ktrace_assert_lock_held();
+ assert(buffer_size != NULL);
- if (count && (count <= kd_mapcount)) {
- if ((kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr) {
- if (*number < mapsize)
- ret = EINVAL;
- else {
- if (vp) {
- ret = kdbg_write_v1_plus_header(count, vp, ctx);
- if (ret)
- goto write_error;
- }
- else {
- if (copyout(kd_mapptr, buffer, mapsize))
- ret = EINVAL;
- }
- }
- }
- else
- ret = EINVAL;
+ map_initialized = (kd_ctrl_page.kdebug_flags & KDBG_MAPINIT);
+ if (!map_initialized) {
+ return ENODATA;
}
- else
- ret = EINVAL;
- if (ret && vp)
- {
- count = 0;
+ map_size = kd_mapcount * sizeof(kd_threadmap);
+ if (*buffer_size < map_size) {
+ return EINVAL;
+ }
- ret = kdbg_write_to_vnode((caddr_t)&count, sizeof(uint32_t), vp, ctx, RAW_file_offset);
- if (!ret) {
- RAW_file_offset += sizeof(uint32_t);
- RAW_file_written += sizeof(uint32_t);
- }
+ ret = copyout(kd_mapptr, buffer, map_size);
+ if (ret == 0) {
+ kdbg_clear_thread_map();
}
-write_error:
- if ((kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
- {
- kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
- kd_ctrl_page.kdebug_flags &= ~KDBG_MAPINIT;
- kd_mapsize = 0;
- kd_mapptr = (kd_threadmap *) 0;
- kd_mapcount = 0;
- }
- return(ret);
+
+ return ret;
}
int
-kdbg_readthrmap_v3(user_addr_t buffer, size_t *number, int fd)
+kdbg_readthrmap_v3(user_addr_t buffer, size_t buffer_size, int fd)
{
- int avail = 0;
int ret = 0;
- uint32_t count = 0;
- unsigned int mapsize;
+ bool map_initialized;
+ size_t map_size;
+
+ ktrace_assert_lock_held();
if ((!fd && !buffer) || (fd && buffer)) {
return EINVAL;
}
- assert(number);
+ map_initialized = (kd_ctrl_page.kdebug_flags & KDBG_MAPINIT);
+ map_size = kd_mapcount * sizeof(kd_threadmap);
- avail = *number;
- count = avail/sizeof (kd_threadmap);
- mapsize = kd_mapcount * sizeof(kd_threadmap);
+ if (map_initialized && (buffer_size >= map_size)) {
+ ret = kdbg_write_v3_header(buffer, &buffer_size, fd);
- if (count && (count <= kd_mapcount)) {
- if ((kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr) {
- if (*number < mapsize) {
- ret = EINVAL;
- }
- else {
- ret = kdbg_write_v3_header(buffer, number, fd);
- if (ret) {
- goto write_error;
- }
- }
- }
- else {
- ret = EINVAL;
+ if (ret == 0) {
+ kdbg_clear_thread_map();
}
- }
- else {
+ } else {
ret = EINVAL;
}
-write_error:
- if ((kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr) {
- kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
- kd_ctrl_page.kdebug_flags &= ~KDBG_MAPINIT;
- kd_mapsize = 0;
- kd_mapptr = (kd_threadmap *) 0;
- kd_mapcount = 0;
- }
- return(ret);
-}
+ return ret;
+}
-static int
-kdbg_set_nkdbufs(unsigned int value)
+static void
+kdbg_set_nkdbufs(unsigned int req_nkdbufs)
{
- /*
- * We allow a maximum buffer size of 50% of either ram or max mapped address, whichever is smaller
- * 'value' is the desired number of trace entries
+ /*
+ * Only allow allocation up to half the available memory (sane_size).
*/
- unsigned int max_entries = (sane_size/2) / sizeof(kd_buf);
-
- if (value <= max_entries)
- return (value);
- else
- return (max_entries);
+ uint64_t max_nkdbufs = (sane_size / 2) / sizeof(kd_buf);
+ nkdbufs = (req_nkdbufs > max_nkdbufs) ? (unsigned int)max_nkdbufs :
+ req_nkdbufs;
}
-
-static int
-kdbg_enable_bg_trace(void)
+/*
+ * Block until there are `n_storage_threshold` storage units filled with
+ * events or `timeout_ms` milliseconds have passed. If `locked_wait` is true,
+ * `ktrace_lock` is held while waiting. This is necessary while waiting to
+ * write events out of the buffers.
+ *
+ * Returns true if the threshold was reached and false otherwise.
+ *
+ * Called with `ktrace_lock` locked and interrupts enabled.
+ */
+static bool
+kdbg_wait(uint64_t timeout_ms, bool locked_wait)
{
- int ret = 0;
+ int wait_result = THREAD_AWAKENED;
+ uint64_t abstime = 0;
- if (kdlog_bg_trace == TRUE && kdlog_bg_trace_running == FALSE && n_storage_buffers == 0) {
- nkdbufs = bg_nkdbufs;
- ret = kdbg_reinit(FALSE);
- if (0 == ret) {
- kdbg_set_tracing_enabled(TRUE, KDEBUG_ENABLE_TRACE);
- kdlog_bg_trace_running = TRUE;
+ ktrace_assert_lock_held();
+
+ if (timeout_ms != 0) {
+ uint64_t ns = timeout_ms * NSEC_PER_MSEC;
+ nanoseconds_to_absolutetime(ns, &abstime);
+ clock_absolutetime_interval_to_deadline(abstime, &abstime);
+ }
+
+ bool s = ml_set_interrupts_enabled(false);
+ if (!s) {
+ panic("kdbg_wait() called with interrupts disabled");
+ }
+ lck_spin_lock_grp(kdw_spin_lock, kdebug_lck_grp);
+
+ if (!locked_wait) {
+ /* drop the mutex to allow others to access trace */
+ ktrace_unlock();
+ }
+
+ while (wait_result == THREAD_AWAKENED &&
+ kd_ctrl_page.kds_inuse_count < n_storage_threshold) {
+ kds_waiter = 1;
+
+ if (abstime) {
+ wait_result = lck_spin_sleep_deadline(kdw_spin_lock, 0, &kds_waiter, THREAD_ABORTSAFE, abstime);
+ } else {
+ wait_result = lck_spin_sleep(kdw_spin_lock, 0, &kds_waiter, THREAD_ABORTSAFE);
}
- wakeup(&kdlog_bg_trace);
+
+ kds_waiter = 0;
}
- return ret;
+
+ /* check the count under the spinlock */
+ bool threshold_exceeded = (kd_ctrl_page.kds_inuse_count >= n_storage_threshold);
+
+ lck_spin_unlock(kdw_spin_lock);
+ ml_set_interrupts_enabled(s);
+
+ if (!locked_wait) {
+ /* pick the mutex back up again */
+ ktrace_lock();
+ }
+
+ /* write out whether we've exceeded the threshold */
+ return threshold_exceeded;
}
+/*
+ * Wakeup a thread waiting using `kdbg_wait` if there are at least
+ * `n_storage_threshold` storage units in use.
+ */
static void
-kdbg_disable_bg_trace(void)
+kdbg_wakeup(void)
{
- if (kdlog_bg_trace_running == TRUE) {
- kdlog_bg_trace_running = FALSE;
- kdbg_clear();
- }
-}
+ bool need_kds_wakeup = false;
+ /*
+ * Try to take the lock here to synchronize with the waiter entering
+ * the blocked state. Use the try mode to prevent deadlocks caused by
+ * re-entering this routine due to various trace points triggered in the
+ * lck_spin_sleep_xxxx routines used to actually enter one of our 2 wait
+ * conditions. No problem if we fail, there will be lots of additional
+ * events coming in that will eventually succeed in grabbing this lock.
+ */
+ bool s = ml_set_interrupts_enabled(false);
+ if (lck_spin_try_lock(kdw_spin_lock)) {
+ if (kds_waiter &&
+ (kd_ctrl_page.kds_inuse_count >= n_storage_threshold)) {
+ kds_waiter = 0;
+ need_kds_wakeup = true;
+ }
+ lck_spin_unlock(kdw_spin_lock);
+ }
-/*
- * This function is provided for the CHUD toolkit only.
- * int val:
- * zero disables kdebug_chudhook function call
- * non-zero enables kdebug_chudhook function call
- * char *fn:
- * address of the enabled kdebug_chudhook function
-*/
+ ml_set_interrupts_enabled(s);
-void
-kdbg_control_chud(int val, void *fn)
-{
- kdbg_lock_init();
-
- if (val) {
- /* enable chudhook */
- kdebug_chudhook = fn;
- kdbg_set_flags(SLOW_CHUD, KDEBUG_ENABLE_CHUD, TRUE);
- }
- else {
- /* disable chudhook */
- kdbg_set_flags(SLOW_CHUD, KDEBUG_ENABLE_CHUD, FALSE);
- kdebug_chudhook = 0;
+ if (need_kds_wakeup == true) {
+ wakeup(&kds_waiter);
}
}
-
int
kdbg_control(int *name, u_int namelen, user_addr_t where, size_t *sizep)
{
unsigned int value = 0;
kd_regtype kd_Reg;
kbufinfo_t kd_bufinfo;
- pid_t curpid;
- proc_t p, curproc;
-
- if (name[0] == KERN_KDGETENTROPY ||
- name[0] == KERN_KDWRITETR ||
- name[0] == KERN_KDWRITETR_V3 ||
- name[0] == KERN_KDWRITEMAP ||
- name[0] == KERN_KDWRITEMAP_V3 ||
- name[0] == KERN_KDEFLAGS ||
- name[0] == KERN_KDDFLAGS ||
- name[0] == KERN_KDENABLE ||
- name[0] == KERN_KDENABLE_BG_TRACE ||
- name[0] == KERN_KDSETBUF) {
-
- if ( namelen < 2 )
- return(EINVAL);
+ proc_t p;
+
+ if (name[0] == KERN_KDWRITETR ||
+ name[0] == KERN_KDWRITETR_V3 ||
+ name[0] == KERN_KDWRITEMAP ||
+ name[0] == KERN_KDWRITEMAP_V3 ||
+ name[0] == KERN_KDEFLAGS ||
+ name[0] == KERN_KDDFLAGS ||
+ name[0] == KERN_KDENABLE ||
+ name[0] == KERN_KDSETBUF) {
+ if (namelen < 2) {
+ return EINVAL;
+ }
value = name[1];
}
-
- kdbg_lock_init();
- if ( !(kd_ctrl_page.kdebug_flags & KDBG_LOCKINIT))
- return(ENOSPC);
+ kdbg_lock_init();
+ assert(kd_ctrl_page.kdebug_flags & KDBG_LOCKINIT);
- lck_mtx_lock(kd_trace_mtx_sysctl);
+ ktrace_lock();
- switch(name[0]) {
- case KERN_KDGETBUF:
- /*
- * Does not alter the global_state_pid
- * This is a passive request.
- */
- if (size < sizeof(kd_bufinfo.nkdbufs)) {
- /*
- * There is not enough room to return even
- * the first element of the info structure.
- */
- ret = EINVAL;
- goto out;
- }
- kd_bufinfo.nkdbufs = nkdbufs;
- kd_bufinfo.nkdthreads = kd_mapcount;
-
- if ( (kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) )
- kd_bufinfo.nolog = 1;
- else
- kd_bufinfo.nolog = 0;
-
- kd_bufinfo.flags = kd_ctrl_page.kdebug_flags;
-#if defined(__LP64__)
- kd_bufinfo.flags |= KDBG_LP64;
-#endif
- kd_bufinfo.bufid = global_state_pid;
-
- if (size >= sizeof(kd_bufinfo)) {
- /*
- * Provide all the info we have
- */
- if (copyout(&kd_bufinfo, where, sizeof(kd_bufinfo)))
- ret = EINVAL;
- } else {
- /*
- * For backwards compatibility, only provide
- * as much info as there is room for.
- */
- if (copyout(&kd_bufinfo, where, size))
- ret = EINVAL;
- }
- goto out;
- case KERN_KDGETENTROPY: {
- /* Obsolescent - just fake with a random buffer */
- char *buffer = (char *) kalloc(size);
- read_frandom((void *) buffer, size);
- ret = copyout(buffer, where, size);
- kfree(buffer, size);
+ /*
+ * Some requests only require "read" access to kdebug trace. Regardless,
+ * tell ktrace that a configuration or read is occurring (and see if it's
+ * allowed).
+ */
+ if (name[0] != KERN_KDGETBUF &&
+ name[0] != KERN_KDGETREG &&
+ name[0] != KERN_KDREADCURTHRMAP) {
+ if ((ret = ktrace_configure(KTRACE_KDEBUG))) {
goto out;
}
-
- case KERN_KDENABLE_BG_TRACE:
- bg_nkdbufs = kdbg_set_nkdbufs(value);
- kdlog_bg_trace = TRUE;
- ret = kdbg_enable_bg_trace();
- goto out;
-
- case KERN_KDDISABLE_BG_TRACE:
- kdlog_bg_trace = FALSE;
- kdbg_disable_bg_trace();
+ } else {
+ if ((ret = ktrace_read_check())) {
goto out;
+ }
+ }
- case KERN_KDWAIT_BG_TRACE_RESET:
- if (!kdlog_bg_trace){
- ret = EINVAL;
- goto out;
- }
- wait_result_t wait_result = assert_wait(&kdlog_bg_trace, THREAD_ABORTSAFE);
- lck_mtx_unlock(kd_trace_mtx_sysctl);
- if (wait_result == THREAD_WAITING)
- wait_result = thread_block(THREAD_CONTINUE_NULL);
- if (wait_result == THREAD_INTERRUPTED)
- ret = EINTR;
- lck_mtx_lock(kd_trace_mtx_sysctl);
- goto out;
+ switch (name[0]) {
+ case KERN_KDGETBUF:
+ if (size < sizeof(kd_bufinfo.nkdbufs)) {
+ /*
+ * There is not enough room to return even
+ * the first element of the info structure.
+ */
+ ret = EINVAL;
+ break;
+ }
- case KERN_KDSET_BG_TYPEFILTER:
- if (!kdlog_bg_trace || !kdlog_bg_trace_running){
- ret = EINVAL;
- goto out;
- }
+ memset(&kd_bufinfo, 0, sizeof(kd_bufinfo));
- if (size != KDBG_TYPEFILTER_BITMAP_SIZE) {
- ret = EINVAL;
- goto out;
- }
+ kd_bufinfo.nkdbufs = nkdbufs;
+ kd_bufinfo.nkdthreads = kd_mapcount < INT_MAX ? (int)kd_mapcount :
+ INT_MAX;
+ if ((kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG)) {
+ kd_bufinfo.nolog = 1;
+ } else {
+ kd_bufinfo.nolog = 0;
+ }
- if ((kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) == 0){
- if ((ret = kdbg_enable_typefilter()))
- goto out;
- }
+ kd_bufinfo.flags = kd_ctrl_page.kdebug_flags;
+#if defined(__LP64__)
+ kd_bufinfo.flags |= KDBG_LP64;
+#endif
+ {
+ int pid = ktrace_get_owning_pid();
+ kd_bufinfo.bufid = (pid == 0 ? -1 : pid);
+ }
- if (copyin(where, type_filter_bitmap, KDBG_TYPEFILTER_BITMAP_SIZE)) {
- ret = EINVAL;
- goto out;
- }
- kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_TYPEFILTER_CHANGED, type_filter_bitmap);
- goto out;
- }
-
- if ((curproc = current_proc()) != NULL)
- curpid = curproc->p_pid;
- else {
- ret = ESRCH;
- goto out;
- }
- if (global_state_pid == -1)
- global_state_pid = curpid;
- else if (global_state_pid != curpid) {
- if ((p = proc_find(global_state_pid)) == NULL) {
+ if (size >= sizeof(kd_bufinfo)) {
/*
- * The global pid no longer exists
+ * Provide all the info we have
*/
- global_state_pid = curpid;
+ if (copyout(&kd_bufinfo, where, sizeof(kd_bufinfo))) {
+ ret = EINVAL;
+ }
} else {
/*
- * The global pid exists, deny this request
+ * For backwards compatibility, only provide
+ * as much info as there is room for.
*/
- proc_rele(p);
-
- ret = EBUSY;
- goto out;
+ if (copyout(&kd_bufinfo, where, size)) {
+ ret = EINVAL;
+ }
}
- }
+ break;
- switch(name[0]) {
- case KERN_KDEFLAGS:
- kdbg_disable_bg_trace();
+ case KERN_KDREADCURTHRMAP:
+ ret = kdbg_readcurthrmap(where, sizep);
+ break;
- value &= KDBG_USERFLAGS;
- kd_ctrl_page.kdebug_flags |= value;
- break;
- case KERN_KDDFLAGS:
- kdbg_disable_bg_trace();
+ case KERN_KDEFLAGS:
+ value &= KDBG_USERFLAGS;
+ kd_ctrl_page.kdebug_flags |= value;
+ break;
- value &= KDBG_USERFLAGS;
- kd_ctrl_page.kdebug_flags &= ~value;
- break;
- case KERN_KDENABLE:
+ case KERN_KDDFLAGS:
+ value &= KDBG_USERFLAGS;
+ kd_ctrl_page.kdebug_flags &= ~value;
+ break;
+
+ case KERN_KDENABLE:
+ /*
+ * Enable tracing mechanism. Two types:
+ * KDEBUG_TRACE is the standard one,
+ * and KDEBUG_PPT which is a carefully
+ * chosen subset to avoid performance impact.
+ */
+ if (value) {
/*
- * Enable tracing mechanism. Two types:
- * KDEBUG_TRACE is the standard one,
- * and KDEBUG_PPT which is a carefully
- * chosen subset to avoid performance impact.
+ * enable only if buffer is initialized
*/
- if (value) {
- /*
- * enable only if buffer is initialized
- */
- if (!(kd_ctrl_page.kdebug_flags & KDBG_BUFINIT) ||
- !(value == KDEBUG_ENABLE_TRACE || value == KDEBUG_ENABLE_PPT)) {
- ret = EINVAL;
- break;
- }
- kdbg_thrmap_init();
-
- kdbg_set_tracing_enabled(TRUE, value);
- }
- else
- {
- kdbg_set_tracing_enabled(FALSE, 0);
- }
- break;
- case KERN_KDSETBUF:
- kdbg_disable_bg_trace();
-
- nkdbufs = kdbg_set_nkdbufs(value);
- break;
- case KERN_KDSETUP:
- kdbg_disable_bg_trace();
-
- ret = kdbg_reinit(FALSE);
- break;
- case KERN_KDREMOVE:
- kdbg_clear();
- ret = kdbg_enable_bg_trace();
- break;
- case KERN_KDSETREG:
- if(size < sizeof(kd_regtype)) {
- ret = EINVAL;
- break;
- }
- if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
+ if (!(kd_ctrl_page.kdebug_flags & KDBG_BUFINIT) ||
+ !(value == KDEBUG_ENABLE_TRACE || value == KDEBUG_ENABLE_PPT)) {
ret = EINVAL;
break;
}
- kdbg_disable_bg_trace();
+ kdbg_thrmap_init();
- ret = kdbg_setreg(&kd_Reg);
- break;
- case KERN_KDGETREG:
- if (size < sizeof(kd_regtype)) {
- ret = EINVAL;
+ kdbg_set_tracing_enabled(true, value);
+ } else {
+ if (!kdebug_enable) {
break;
}
- ret = kdbg_getreg(&kd_Reg);
- if (copyout(&kd_Reg, where, sizeof(kd_regtype))) {
- ret = EINVAL;
- }
- kdbg_disable_bg_trace();
-
- break;
- case KERN_KDREADTR:
- ret = kdbg_read(where, sizep, NULL, NULL, RAW_VERSION1);
- break;
- case KERN_KDWRITETR:
- case KERN_KDWRITETR_V3:
- case KERN_KDWRITEMAP:
- case KERN_KDWRITEMAP_V3:
- {
- struct vfs_context context;
- struct fileproc *fp;
- size_t number;
- vnode_t vp;
- int fd;
- if (name[0] == KERN_KDWRITETR || name[0] == KERN_KDWRITETR_V3) {
- int s;
- int wait_result = THREAD_AWAKENED;
- u_int64_t abstime;
- u_int64_t ns;
-
- if (*sizep) {
- ns = ((u_int64_t)*sizep) * (u_int64_t)(1000 * 1000);
- nanoseconds_to_absolutetime(ns, &abstime );
- clock_absolutetime_interval_to_deadline( abstime, &abstime );
- } else
- abstime = 0;
-
- s = ml_set_interrupts_enabled(FALSE);
- lck_spin_lock(kdw_spin_lock);
-
- while (wait_result == THREAD_AWAKENED && kd_ctrl_page.kds_inuse_count < n_storage_threshold) {
-
- kds_waiter = 1;
-
- if (abstime)
- wait_result = lck_spin_sleep_deadline(kdw_spin_lock, 0, &kds_waiter, THREAD_ABORTSAFE, abstime);
- else
- wait_result = lck_spin_sleep(kdw_spin_lock, 0, &kds_waiter, THREAD_ABORTSAFE);
-
- kds_waiter = 0;
- }
- lck_spin_unlock(kdw_spin_lock);
- ml_set_interrupts_enabled(s);
- }
- p = current_proc();
- fd = value;
+ kernel_debug_disable();
+ }
+ break;
- proc_fdlock(p);
- if ( (ret = fp_lookup(p, fd, &fp, 1)) ) {
- proc_fdunlock(p);
- break;
- }
- context.vc_thread = current_thread();
- context.vc_ucred = fp->f_fglob->fg_cred;
+ case KERN_KDSETBUF:
+ kdbg_set_nkdbufs(value);
+ break;
- if (FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_VNODE) {
- fp_drop(p, fd, fp, 1);
- proc_fdunlock(p);
+ case KERN_KDSETUP:
+ ret = kdbg_reinit(false);
+ break;
- ret = EBADF;
- break;
- }
- vp = (struct vnode *)fp->f_fglob->fg_data;
- proc_fdunlock(p);
-
- if ((ret = vnode_getwithref(vp)) == 0) {
- RAW_file_offset = fp->f_fglob->fg_offset;
- if (name[0] == KERN_KDWRITETR || name[0] == KERN_KDWRITETR_V3) {
- number = nkdbufs * sizeof(kd_buf);
-
- KERNEL_DEBUG_CONSTANT(TRACE_WRITING_EVENTS | DBG_FUNC_START, 0, 0, 0, 0, 0);
- if (name[0] == KERN_KDWRITETR_V3)
- ret = kdbg_read(0, &number, vp, &context, RAW_VERSION3);
- else
- ret = kdbg_read(0, &number, vp, &context, RAW_VERSION1);
- KERNEL_DEBUG_CONSTANT(TRACE_WRITING_EVENTS | DBG_FUNC_END, number, 0, 0, 0, 0);
-
- *sizep = number;
- } else {
- number = kd_mapcount * sizeof(kd_threadmap);
- if (name[0] == KERN_KDWRITEMAP_V3)
- kdbg_readthrmap_v3(0, &number, fd);
- else
- kdbg_readthrmap(0, &number, vp, &context);
- }
- fp->f_fglob->fg_offset = RAW_file_offset;
- vnode_put(vp);
- }
- fp_drop(p, fd, fp, 0);
+ case KERN_KDREMOVE:
+ ktrace_reset(KTRACE_KDEBUG);
+ break;
+ case KERN_KDSETREG:
+ if (size < sizeof(kd_regtype)) {
+ ret = EINVAL;
+ break;
+ }
+ if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
+ ret = EINVAL;
break;
}
- case KERN_KDBUFWAIT:
- {
- /* WRITETR lite -- just block until there's data */
- int s;
- int wait_result = THREAD_AWAKENED;
- u_int64_t abstime;
- u_int64_t ns;
- size_t number = 0;
-
- kdbg_disable_bg_trace();
-
-
- if (*sizep) {
- ns = ((u_int64_t)*sizep) * (u_int64_t)(1000 * 1000);
- nanoseconds_to_absolutetime(ns, &abstime );
- clock_absolutetime_interval_to_deadline( abstime, &abstime );
- } else
- abstime = 0;
-
- s = ml_set_interrupts_enabled(FALSE);
- if( !s )
- panic("trying to wait with interrupts off");
- lck_spin_lock(kdw_spin_lock);
-
- /* drop the mutex so don't exclude others from
- * accessing trace
- */
- lck_mtx_unlock(kd_trace_mtx_sysctl);
-
- while (wait_result == THREAD_AWAKENED &&
- kd_ctrl_page.kds_inuse_count < n_storage_threshold) {
- kds_waiter = 1;
+ ret = kdbg_setreg(&kd_Reg);
+ break;
- if (abstime)
- wait_result = lck_spin_sleep_deadline(kdw_spin_lock, 0, &kds_waiter, THREAD_ABORTSAFE, abstime);
- else
- wait_result = lck_spin_sleep(kdw_spin_lock, 0, &kds_waiter, THREAD_ABORTSAFE);
-
- kds_waiter = 0;
- }
+ case KERN_KDGETREG:
+ ret = EINVAL;
+ break;
- /* check the count under the spinlock */
- number = (kd_ctrl_page.kds_inuse_count >= n_storage_threshold);
+ case KERN_KDREADTR:
+ ret = kdbg_read(where, sizep, NULL, NULL, RAW_VERSION1);
+ break;
- lck_spin_unlock(kdw_spin_lock);
- ml_set_interrupts_enabled(s);
+ case KERN_KDWRITETR:
+ case KERN_KDWRITETR_V3:
+ case KERN_KDWRITEMAP:
+ case KERN_KDWRITEMAP_V3:
+ {
+ struct vfs_context context;
+ struct fileproc *fp;
+ size_t number;
+ vnode_t vp;
+ int fd;
+
+ if (name[0] == KERN_KDWRITETR || name[0] == KERN_KDWRITETR_V3) {
+ (void)kdbg_wait(size, true);
+ }
+ p = current_proc();
+ fd = value;
- /* pick the mutex back up again */
- lck_mtx_lock(kd_trace_mtx_sysctl);
- /* write out whether we've exceeded the threshold */
- *sizep = number;
+ if (fp_get_ftype(p, fd, DTYPE_VNODE, EBADF, &fp)) {
+ ret = EBADF;
break;
}
- case KERN_KDPIDTR:
- if (size < sizeof(kd_regtype)) {
- ret = EINVAL;
- break;
- }
- if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
- ret = EINVAL;
- break;
- }
- kdbg_disable_bg_trace();
- ret = kdbg_setpid(&kd_Reg);
- break;
- case KERN_KDPIDEX:
- if (size < sizeof(kd_regtype)) {
- ret = EINVAL;
- break;
- }
- if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
- ret = EINVAL;
- break;
+ vp = fp->fp_glob->fg_data;
+ context.vc_thread = current_thread();
+ context.vc_ucred = fp->fp_glob->fg_cred;
+
+ if ((ret = vnode_getwithref(vp)) == 0) {
+ RAW_file_offset = fp->fp_glob->fg_offset;
+ if (name[0] == KERN_KDWRITETR || name[0] == KERN_KDWRITETR_V3) {
+ number = nkdbufs * sizeof(kd_buf);
+
+ KDBG_RELEASE(TRACE_WRITING_EVENTS | DBG_FUNC_START);
+ if (name[0] == KERN_KDWRITETR_V3) {
+ ret = kdbg_read(0, &number, vp, &context, RAW_VERSION3);
+ } else {
+ ret = kdbg_read(0, &number, vp, &context, RAW_VERSION1);
+ }
+ KDBG_RELEASE(TRACE_WRITING_EVENTS | DBG_FUNC_END, number);
+
+ *sizep = number;
+ } else {
+ number = kd_mapcount * sizeof(kd_threadmap);
+ if (name[0] == KERN_KDWRITEMAP_V3) {
+ ret = kdbg_readthrmap_v3(0, number, fd);
+ } else {
+ ret = kdbg_write_thread_map(vp, &context);
+ }
}
- kdbg_disable_bg_trace();
+ fp->fp_glob->fg_offset = RAW_file_offset;
+ vnode_put(vp);
+ }
+ fp_drop(p, fd, fp, 0);
+
+ break;
+ }
+ case KERN_KDBUFWAIT:
+ *sizep = kdbg_wait(size, false);
+ break;
- ret = kdbg_setpidex(&kd_Reg);
+ case KERN_KDPIDTR:
+ if (size < sizeof(kd_regtype)) {
+ ret = EINVAL;
break;
- case KERN_KDCPUMAP:
- ret = kdbg_readcpumap(where, sizep);
+ }
+ if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
+ ret = EINVAL;
break;
- case KERN_KDTHRMAP:
- ret = kdbg_readthrmap(where, sizep, NULL, NULL);
+ }
+
+ ret = kdbg_setpid(&kd_Reg);
+ break;
+
+ case KERN_KDPIDEX:
+ if (size < sizeof(kd_regtype)) {
+ ret = EINVAL;
break;
- case KERN_KDREADCURTHRMAP:
- ret = kdbg_readcurthrmap(where, sizep);
+ }
+ if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
+ ret = EINVAL;
break;
- case KERN_KDSETRTCDEC:
- if (size < sizeof(kd_regtype)) {
- ret = EINVAL;
- break;
- }
- if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
- ret = EINVAL;
- break;
- }
- kdbg_disable_bg_trace();
+ }
- ret = kdbg_setrtcdec(&kd_Reg);
- break;
- case KERN_KDSET_TYPEFILTER:
- kdbg_disable_bg_trace();
+ ret = kdbg_setpidex(&kd_Reg);
+ break;
- if (size != KDBG_TYPEFILTER_BITMAP_SIZE) {
- ret = EINVAL;
- break;
- }
+ case KERN_KDCPUMAP:
+ ret = kdbg_readcpumap(where, sizep);
+ break;
- if ((kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) == 0){
- if ((ret = kdbg_enable_typefilter()))
- break;
- }
+ case KERN_KDTHRMAP:
+ ret = kdbg_copyout_thread_map(where, sizep);
+ break;
- if (copyin(where, type_filter_bitmap, KDBG_TYPEFILTER_BITMAP_SIZE)) {
- ret = EINVAL;
- break;
- }
- kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_TYPEFILTER_CHANGED, type_filter_bitmap);
- break;
- default:
- ret = EINVAL;
+ case KERN_KDSET_TYPEFILTER: {
+ ret = kdbg_copyin_typefilter(where, size);
+ break;
+ }
+
+ case KERN_KDTEST:
+ ret = kdbg_test(size);
+ break;
+
+ default:
+ ret = EINVAL;
+ break;
}
out:
- lck_mtx_unlock(kd_trace_mtx_sysctl);
+ ktrace_unlock();
- return(ret);
+ return ret;
}
int
kdbg_read(user_addr_t buffer, size_t *number, vnode_t vp, vfs_context_t ctx, uint32_t file_version)
{
- unsigned int count;
+ size_t count;
unsigned int cpu, min_cpu;
- uint64_t mintime, t, barrier = 0;
+ uint64_t barrier_min = 0, barrier_max = 0, t, earliest_time;
int error = 0;
kd_buf *tempbuf;
uint32_t rcursor;
kd_buf lostevent;
union kds_ptr kdsp;
+ bool traced_retrograde = false;
struct kd_storage *kdsp_actual;
struct kd_bufinfo *kdbp;
struct kd_bufinfo *min_kdbp;
- uint32_t tempbuf_count;
+ size_t tempbuf_count;
uint32_t tempbuf_number;
uint32_t old_kdebug_flags;
uint32_t old_kdebug_slowcheck;
- boolean_t lostevents = FALSE;
- boolean_t out_of_events = FALSE;
+ bool out_of_events = false;
+ bool wrapped = false;
- assert(number);
- count = *number/sizeof(kd_buf);
+ assert(number != NULL);
+ count = *number / sizeof(kd_buf);
*number = 0;
- if (count == 0 || !(kd_ctrl_page.kdebug_flags & KDBG_BUFINIT) || kdcopybuf == 0)
+ ktrace_assert_lock_held();
+
+ if (count == 0 || !(kd_ctrl_page.kdebug_flags & KDBG_BUFINIT) || kdcopybuf == 0) {
return EINVAL;
+ }
+
+ thread_set_eager_preempt(current_thread());
memset(&lostevent, 0, sizeof(lostevent));
lostevent.debugid = TRACE_LOST_EVENTS;
- /* Capture timestamp. Only sort events that have occured before the timestamp.
- * Since the iop is being flushed here, its possible that events occur on the AP
- * while running live tracing. If we are disabled, no new events should
- * occur on the AP.
- */
-
- if (kd_ctrl_page.enabled)
- {
- // timestamp is non-zero value
- barrier = mach_absolute_time() & KDBG_TIMESTAMP_MASK;
- }
-
- // Request each IOP to provide us with up to date entries before merging buffers together.
+ /*
+ * Request each IOP to provide us with up to date entries before merging
+ * buffers together.
+ */
kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_SYNC_FLUSH, NULL);
/*
- * because we hold kd_trace_mtx_sysctl, no other control threads can
- * be playing with kdebug_flags... the code that cuts new events could
- * be running, but it grabs kds_spin_lock if it needs to acquire a new
- * storage chunk which is where it examines kdebug_flags... it its adding
- * to the same chunk we're reading from, no problem...
+ * Capture the current time. Only sort events that have occured
+ * before now. Since the IOPs are being flushed here, it is possible
+ * that events occur on the AP while running live tracing.
*/
+ barrier_max = kdbg_timestamp() & KDBG_TIMESTAMP_MASK;
- disable_wrap(&old_kdebug_slowcheck, &old_kdebug_flags);
+ /*
+ * Disable wrap so storage units cannot be stolen out from underneath us
+ * while merging events.
+ *
+ * Because we hold ktrace_lock, no other control threads can be playing
+ * with kdebug_flags. The code that emits new events could be running,
+ * but it grabs kds_spin_lock if it needs to acquire a new storage
+ * chunk, which is where it examines kdebug_flags. If it is adding to
+ * the same chunk we're reading from, check for that below.
+ */
+ wrapped = disable_wrap(&old_kdebug_slowcheck, &old_kdebug_flags);
- if (count > nkdbufs)
+ if (count > nkdbufs) {
count = nkdbufs;
+ }
+
+ if ((tempbuf_count = count) > KDCOPYBUF_COUNT) {
+ tempbuf_count = KDCOPYBUF_COUNT;
+ }
- if ((tempbuf_count = count) > KDCOPYBUF_COUNT)
- tempbuf_count = KDCOPYBUF_COUNT;
+ /*
+ * If the buffers have wrapped, do not emit additional lost events for the
+ * oldest storage units.
+ */
+ if (wrapped) {
+ kd_ctrl_page.kdebug_flags &= ~KDBG_WRAPPED;
+
+ for (cpu = 0, kdbp = &kdbip[0]; cpu < kd_ctrl_page.kdebug_cpus; cpu++, kdbp++) {
+ if ((kdsp = kdbp->kd_list_head).raw == KDS_PTR_NULL) {
+ continue;
+ }
+ kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
+ kdsp_actual->kds_lostevents = false;
+ }
+ }
+ /*
+ * Capture the earliest time where there are events for all CPUs and don't
+ * emit events with timestamps prior.
+ */
+ barrier_min = kd_ctrl_page.oldest_time;
while (count) {
tempbuf = kdcopybuf;
tempbuf_number = 0;
- // While space
+ if (wrapped) {
+ /*
+ * Emit a lost events tracepoint to indicate that previous events
+ * were lost -- the thread map cannot be trusted. A new one must
+ * be taken so tools can analyze the trace in a backwards-facing
+ * fashion.
+ */
+ kdbg_set_timestamp_and_cpu(&lostevent, barrier_min, 0);
+ *tempbuf = lostevent;
+ wrapped = false;
+ goto nextevent;
+ }
+
+ /* While space left in merged events scratch buffer. */
while (tempbuf_count) {
- mintime = 0xffffffffffffffffULL;
+ bool lostevents = false;
+ int lostcpu = 0;
+ earliest_time = UINT64_MAX;
min_kdbp = NULL;
min_cpu = 0;
- // Check all CPUs
+ /* Check each CPU's buffers for the earliest event. */
for (cpu = 0, kdbp = &kdbip[0]; cpu < kd_ctrl_page.kdebug_cpus; cpu++, kdbp++) {
-
- // Find one with raw data
- if ((kdsp = kdbp->kd_list_head).raw == KDS_PTR_NULL)
- continue;
- /* Debugging aid: maintain a copy of the "kdsp"
- * index.
- */
- volatile union kds_ptr kdsp_shadow;
-
- kdsp_shadow = kdsp;
-
- // Get from cpu data to buffer header to buffer
+ /* Skip CPUs without data in their oldest storage unit. */
+ if ((kdsp = kdbp->kd_list_head).raw == KDS_PTR_NULL) {
+next_cpu:
+ continue;
+ }
+ /* From CPU data to buffer header to buffer. */
kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
- volatile struct kd_storage *kdsp_actual_shadow;
-
- kdsp_actual_shadow = kdsp_actual;
-
- // See if there are actual data left in this buffer
+next_event:
+ /* The next event to be read from this buffer. */
rcursor = kdsp_actual->kds_readlast;
- if (rcursor == kdsp_actual->kds_bufindx)
+ /* Skip this buffer if there are no events left. */
+ if (rcursor == kdsp_actual->kds_bufindx) {
continue;
+ }
+
+ /*
+ * Check that this storage unit wasn't stolen and events were
+ * lost. This must have happened while wrapping was disabled
+ * in this function.
+ */
+ if (kdsp_actual->kds_lostevents) {
+ lostevents = true;
+ kdsp_actual->kds_lostevents = false;
+
+ /*
+ * The earliest event we can trust is the first one in this
+ * stolen storage unit.
+ */
+ uint64_t lost_time =
+ kdbg_get_timestamp(&kdsp_actual->kds_records[0]);
+ if (kd_ctrl_page.oldest_time < lost_time) {
+ /*
+ * If this is the first time we've seen lost events for
+ * this gap, record its timestamp as the oldest
+ * timestamp we're willing to merge for the lost events
+ * tracepoint.
+ */
+ kd_ctrl_page.oldest_time = barrier_min = lost_time;
+ lostcpu = cpu;
+ }
+ }
t = kdbg_get_timestamp(&kdsp_actual->kds_records[rcursor]);
- if ((t > barrier) && (barrier > 0)) {
- /*
- * Need to wait to flush iop again before we
- * sort any more data from the buffers
- */
- out_of_events = TRUE;
- break;
- }
+ if (t > barrier_max) {
+ if (kdbg_debug) {
+ printf("kdebug: FUTURE EVENT: debugid %#8x: "
+ "time %lld from CPU %u "
+ "(barrier at time %lld, read %lu events)\n",
+ kdsp_actual->kds_records[rcursor].debugid,
+ t, cpu, barrier_max, *number + tempbuf_number);
+ }
+ goto next_cpu;
+ }
if (t < kdsp_actual->kds_timestamp) {
/*
- * indicates we've not yet completed filling
- * in this event...
- * this should only occur when we're looking
- * at the buf that the record head is utilizing
- * we'll pick these events up on the next
- * call to kdbg_read
- * we bail at this point so that we don't
- * get an out-of-order timestream by continuing
- * to read events from the other CPUs' timestream(s)
+ * This indicates the event emitter hasn't completed
+ * filling in the event (becuase we're looking at the
+ * buffer that the record head is using). The max barrier
+ * timestamp should have saved us from seeing these kinds
+ * of things, but other CPUs might be slow on the up-take.
+ *
+ * Bail out so we don't get out-of-order events by
+ * continuing to read events from other CPUs' events.
*/
- out_of_events = TRUE;
+ out_of_events = true;
break;
}
- if (t < mintime) {
- mintime = t;
+
+ /*
+ * Ignore events that have aged out due to wrapping or storage
+ * unit exhaustion while merging events.
+ */
+ if (t < barrier_min) {
+ kdsp_actual->kds_readlast++;
+ if (kdbg_debug) {
+ printf("kdebug: PAST EVENT: debugid %#8x: "
+ "time %lld from CPU %u "
+ "(barrier at time %lld)\n",
+ kdsp_actual->kds_records[rcursor].debugid,
+ t, cpu, barrier_min);
+ }
+
+ if (kdsp_actual->kds_readlast >= EVENTS_PER_STORAGE_UNIT) {
+ release_storage_unit(cpu, kdsp.raw);
+
+ if ((kdsp = kdbp->kd_list_head).raw == KDS_PTR_NULL) {
+ goto next_cpu;
+ }
+ kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
+ }
+
+ goto next_event;
+ }
+
+ /*
+ * Don't worry about merging any events -- just walk through
+ * the CPUs and find the latest timestamp of lost events.
+ */
+ if (lostevents) {
+ continue;
+ }
+
+ if (t < earliest_time) {
+ earliest_time = t;
min_kdbp = kdbp;
min_cpu = cpu;
}
}
- if (min_kdbp == NULL || out_of_events == TRUE) {
+ if (lostevents) {
/*
- * all buffers ran empty
+ * If any lost events were hit in the buffers, emit an event
+ * with the latest timestamp.
*/
- out_of_events = TRUE;
+ kdbg_set_timestamp_and_cpu(&lostevent, barrier_min, lostcpu);
+ *tempbuf = lostevent;
+ tempbuf->arg1 = 1;
+ goto nextevent;
+ }
+ if (min_kdbp == NULL) {
+ /* All buffers ran empty. */
+ out_of_events = true;
+ }
+ if (out_of_events) {
break;
}
- // Get data
kdsp = min_kdbp->kd_list_head;
kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
- if (kdsp_actual->kds_lostevents == TRUE) {
- kdbg_set_timestamp_and_cpu(&lostevent, kdsp_actual->kds_records[kdsp_actual->kds_readlast].timestamp, min_cpu);
- *tempbuf = lostevent;
-
- kdsp_actual->kds_lostevents = FALSE;
- lostevents = TRUE;
-
- goto nextevent;
- }
-
- // Copy into buffer
+ /* Copy earliest event into merged events scratch buffer. */
*tempbuf = kdsp_actual->kds_records[kdsp_actual->kds_readlast++];
- if (kdsp_actual->kds_readlast == EVENTS_PER_STORAGE_UNIT)
+ if (kdsp_actual->kds_readlast == EVENTS_PER_STORAGE_UNIT) {
release_storage_unit(min_cpu, kdsp.raw);
+ }
/*
- * Watch for out of order timestamps
- */
- if (mintime < min_kdbp->kd_prev_timebase) {
+ * Watch for out of order timestamps (from IOPs).
+ */
+ if (earliest_time < min_kdbp->kd_prev_timebase) {
/*
- * if so, use the previous timestamp + 1 cycle
+ * If we haven't already, emit a retrograde events event.
+ * Otherwise, ignore this event.
*/
- min_kdbp->kd_prev_timebase++;
+ if (traced_retrograde) {
+ continue;
+ }
+ if (kdbg_debug) {
+ printf("kdebug: RETRO EVENT: debugid %#8x: "
+ "time %lld from CPU %u "
+ "(barrier at time %lld)\n",
+ kdsp_actual->kds_records[rcursor].debugid,
+ t, cpu, barrier_min);
+ }
+
kdbg_set_timestamp_and_cpu(tempbuf, min_kdbp->kd_prev_timebase, kdbg_get_cpu(tempbuf));
- } else
- min_kdbp->kd_prev_timebase = mintime;
+ tempbuf->arg1 = tempbuf->debugid;
+ tempbuf->arg2 = (kd_buf_argtype)earliest_time;
+ tempbuf->arg3 = 0;
+ tempbuf->arg4 = 0;
+ tempbuf->debugid = TRACE_RETROGRADE_EVENTS;
+ traced_retrograde = true;
+ } else {
+ min_kdbp->kd_prev_timebase = earliest_time;
+ }
nextevent:
tempbuf_count--;
tempbuf_number++;
tempbuf++;
- if ((RAW_file_written += sizeof(kd_buf)) >= RAW_FLUSH_SIZE)
+ if ((RAW_file_written += sizeof(kd_buf)) >= RAW_FLUSH_SIZE) {
break;
+ }
}
if (tempbuf_number) {
+ /*
+ * Remember the latest timestamp of events that we've merged so we
+ * don't think we've lost events later.
+ */
+ uint64_t latest_time = kdbg_get_timestamp(tempbuf - 1);
+ if (kd_ctrl_page.oldest_time < latest_time) {
+ kd_ctrl_page.oldest_time = latest_time;
+ }
if (file_version == RAW_VERSION3) {
- if ( !(kdbg_write_v3_event_chunk_header(buffer, V3_RAW_EVENTS, (tempbuf_number * sizeof(kd_buf)), vp, ctx))) {
+ if (!(kdbg_write_v3_event_chunk_header(buffer, V3_RAW_EVENTS, (tempbuf_number * sizeof(kd_buf)), vp, ctx))) {
error = EFAULT;
goto check_error;
}
- if (buffer)
+ if (buffer) {
buffer += (sizeof(kd_chunk_header_v3) + sizeof(uint64_t));
+ }
assert(count >= (sizeof(kd_chunk_header_v3) + sizeof(uint64_t)));
count -= (sizeof(kd_chunk_header_v3) + sizeof(uint64_t));
if (vp) {
size_t write_size = tempbuf_number * sizeof(kd_buf);
error = kdbg_write_to_vnode((caddr_t)kdcopybuf, write_size, vp, ctx, RAW_file_offset);
- if (!error)
+ if (!error) {
RAW_file_offset += write_size;
-
+ }
+
if (RAW_file_written >= RAW_FLUSH_SIZE) {
- cluster_push(vp, 0);
+ error = VNOP_FSYNC(vp, MNT_NOWAIT, ctx);
RAW_file_written = 0;
}
count -= tempbuf_number;
*number += tempbuf_number;
}
- if (out_of_events == TRUE)
- /*
- * all trace buffers are empty
- */
- break;
+ if (out_of_events == true) {
+ /*
+ * all trace buffers are empty
+ */
+ break;
+ }
- if ((tempbuf_count = count) > KDCOPYBUF_COUNT)
- tempbuf_count = KDCOPYBUF_COUNT;
+ if ((tempbuf_count = count) > KDCOPYBUF_COUNT) {
+ tempbuf_count = KDCOPYBUF_COUNT;
+ }
}
- if ( !(old_kdebug_flags & KDBG_NOWRAP)) {
- enable_wrap(old_kdebug_slowcheck, lostevents);
+ if (!(old_kdebug_flags & KDBG_NOWRAP)) {
+ enable_wrap(old_kdebug_slowcheck);
}
- return (error);
+ thread_clear_eager_preempt(current_thread());
+ return error;
}
+#define KDEBUG_TEST_CODE(code) BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, (code))
-unsigned char *getProcName(struct proc *proc);
-unsigned char *getProcName(struct proc *proc) {
+/*
+ * A test IOP for the SYNC_FLUSH callback.
+ */
- return (unsigned char *) &proc->p_comm; /* Return pointer to the proc name */
+static int sync_flush_iop = 0;
+static void
+sync_flush_callback(void * __unused context, kd_callback_type reason,
+ void * __unused arg)
+{
+ assert(sync_flush_iop > 0);
+
+ if (reason == KD_CALLBACK_SYNC_FLUSH) {
+ kernel_debug_enter(sync_flush_iop, KDEBUG_TEST_CODE(0xff),
+ kdbg_timestamp(), 0, 0, 0, 0, 0);
+ }
}
+static struct kd_callback sync_flush_kdcb = {
+ .func = sync_flush_callback,
+ .iop_name = "test_sf",
+};
+
static int
-stackshot_kern_return_to_bsd_error(kern_return_t kr)
+kdbg_test(size_t flavor)
{
- switch (kr) {
- case KERN_SUCCESS:
- return 0;
- case KERN_RESOURCE_SHORTAGE:
- return ENOMEM;
- case KERN_NO_SPACE:
- return ENOSPC;
- case KERN_NO_ACCESS:
- return EPERM;
- case KERN_MEMORY_PRESENT:
- return EEXIST;
- case KERN_NOT_SUPPORTED:
- return ENOTSUP;
- case KERN_NOT_IN_SET:
- return ENOENT;
- default:
- return EINVAL;
+ int code = 0;
+ int dummy_iop = 0;
+
+ switch (flavor) {
+ case 1:
+ /* try each macro */
+ KDBG(KDEBUG_TEST_CODE(code)); code++;
+ KDBG(KDEBUG_TEST_CODE(code), 1); code++;
+ KDBG(KDEBUG_TEST_CODE(code), 1, 2); code++;
+ KDBG(KDEBUG_TEST_CODE(code), 1, 2, 3); code++;
+ KDBG(KDEBUG_TEST_CODE(code), 1, 2, 3, 4); code++;
+
+ KDBG_RELEASE(KDEBUG_TEST_CODE(code)); code++;
+ KDBG_RELEASE(KDEBUG_TEST_CODE(code), 1); code++;
+ KDBG_RELEASE(KDEBUG_TEST_CODE(code), 1, 2); code++;
+ KDBG_RELEASE(KDEBUG_TEST_CODE(code), 1, 2, 3); code++;
+ KDBG_RELEASE(KDEBUG_TEST_CODE(code), 1, 2, 3, 4); code++;
+
+ KDBG_FILTERED(KDEBUG_TEST_CODE(code)); code++;
+ KDBG_FILTERED(KDEBUG_TEST_CODE(code), 1); code++;
+ KDBG_FILTERED(KDEBUG_TEST_CODE(code), 1, 2); code++;
+ KDBG_FILTERED(KDEBUG_TEST_CODE(code), 1, 2, 3); code++;
+ KDBG_FILTERED(KDEBUG_TEST_CODE(code), 1, 2, 3, 4); code++;
+
+ KDBG_RELEASE_NOPROCFILT(KDEBUG_TEST_CODE(code)); code++;
+ KDBG_RELEASE_NOPROCFILT(KDEBUG_TEST_CODE(code), 1); code++;
+ KDBG_RELEASE_NOPROCFILT(KDEBUG_TEST_CODE(code), 1, 2); code++;
+ KDBG_RELEASE_NOPROCFILT(KDEBUG_TEST_CODE(code), 1, 2, 3); code++;
+ KDBG_RELEASE_NOPROCFILT(KDEBUG_TEST_CODE(code), 1, 2, 3, 4); code++;
+
+ KDBG_DEBUG(KDEBUG_TEST_CODE(code)); code++;
+ KDBG_DEBUG(KDEBUG_TEST_CODE(code), 1); code++;
+ KDBG_DEBUG(KDEBUG_TEST_CODE(code), 1, 2); code++;
+ KDBG_DEBUG(KDEBUG_TEST_CODE(code), 1, 2, 3); code++;
+ KDBG_DEBUG(KDEBUG_TEST_CODE(code), 1, 2, 3, 4); code++;
+ break;
+
+ case 2:
+ if (kd_ctrl_page.kdebug_iops) {
+ /* avoid the assertion in kernel_debug_enter for a valid IOP */
+ dummy_iop = kd_ctrl_page.kdebug_iops[0].cpu_id;
+ }
+
+ /* ensure old timestamps are not emitted from kernel_debug_enter */
+ kernel_debug_enter(dummy_iop, KDEBUG_TEST_CODE(code),
+ 100 /* very old timestamp */, 0, 0, 0, 0, 0);
+ code++;
+ kernel_debug_enter(dummy_iop, KDEBUG_TEST_CODE(code),
+ kdbg_timestamp(), 0, 0, 0, 0, 0);
+ code++;
+ break;
+
+ case 3:
+ if (kd_ctrl_page.kdebug_iops) {
+ dummy_iop = kd_ctrl_page.kdebug_iops[0].cpu_id;
+ }
+ kernel_debug_enter(dummy_iop, KDEBUG_TEST_CODE(code),
+ kdbg_timestamp() * 2 /* !!! */, 0, 0, 0, 0, 0);
+ break;
+
+ case 4:
+ if (!sync_flush_iop) {
+ sync_flush_iop = kernel_debug_register_callback(
+ sync_flush_kdcb);
+ assert(sync_flush_iop > 0);
+ }
+ break;
+
+ default:
+ return ENOTSUP;
}
+
+ return 0;
}
+#undef KDEBUG_TEST_CODE
-/*
- * DEPRECATION WARNING: THIS SYSCALL IS BEING REPLACED WITH SYS_stack_snapshot_with_config and SYS_microstackshot.
- *
- * stack_snapshot: Obtains a coherent set of stack traces for all threads
- * on the system, tracing both kernel and user stacks
- * where available. Uses machine specific trace routines
- * for ppc, ppc64 and x86.
- * Inputs: uap->pid - process id of process to be traced, or -1
- * for the entire system
- * uap->tracebuf - address of the user space destination
- * buffer
- * uap->tracebuf_size - size of the user space trace buffer
- * uap->options - various options, including the maximum
- * number of frames to trace.
- * Outputs: EPERM if the caller is not privileged
- * EINVAL if the supplied trace buffer isn't sanely sized
- * ENOMEM if we don't have enough memory to satisfy the
- * request
- * ENOENT if the target pid isn't found
- * ENOSPC if the supplied buffer is insufficient
- * *retval contains the number of bytes traced, if successful
- * and -1 otherwise. If the request failed due to
- * tracebuffer exhaustion, we copyout as much as possible.
- */
-int
-stack_snapshot(struct proc *p, register struct stack_snapshot_args *uap, int32_t *retval) {
- int error = 0;
- kern_return_t kr;
+void
+kdebug_init(unsigned int n_events, char *filter_desc, enum kdebug_opts opts)
+{
+ assert(filter_desc != NULL);
- if ((error = suser(kauth_cred_get(), &p->p_acflag)))
- return(error);
+ if (log_leaks && n_events == 0) {
+ n_events = 200000;
+ }
- kr = stack_snapshot2(uap->pid, uap->tracebuf, uap->tracebuf_size, uap->flags, retval);
- return stackshot_kern_return_to_bsd_error(kr);
+ kdebug_trace_start(n_events, filter_desc, opts);
}
-/*
- * stack_snapshot_with_config: Obtains a coherent set of stack traces for specified threads on the sysem,
- * tracing both kernel and user stacks where available. Allocates a buffer from the
- * kernel and maps the buffer into the calling task's address space.
- *
- * Inputs: uap->stackshot_config_version - version of the stackshot config that is being passed
- * uap->stackshot_config - pointer to the stackshot config
- * uap->stackshot_config_size- size of the stackshot config being passed
- * Outputs: EINVAL if there is a problem with the arguments
- * EFAULT if we failed to copy in the arguments succesfully
- * EPERM if the caller is not privileged
- * ENOTSUP if the caller is passing a version of arguments that is not supported by the kernel
- * (indicates libsyscall:kernel mismatch) or if the caller is requesting unsupported flags
- * ENOENT if the caller is requesting an existing buffer that doesn't exist or if the
- * requested PID isn't found
- * ENOMEM if the kernel is unable to allocate enough memory to serve the request
- * ENOSPC if there isn't enough space in the caller's address space to remap the buffer
- * ESRCH if the target PID isn't found
- * returns KERN_SUCCESS on success
- */
-int
-stack_snapshot_with_config(struct proc *p, struct stack_snapshot_with_config_args *uap, __unused int *retval)
+static void
+kdbg_set_typefilter_string(const char *filter_desc)
{
- int error = 0;
- kern_return_t kr;
+ char *end = NULL;
- if ((error = suser(kauth_cred_get(), &p->p_acflag)))
- return(error);
+ ktrace_assert_lock_held();
- if((void*)uap->stackshot_config == NULL) {
- return EINVAL;
+ assert(filter_desc != NULL);
+
+ typefilter_reject_all(kdbg_typefilter);
+ typefilter_allow_class(kdbg_typefilter, DBG_TRACE);
+
+ /* if the filter description starts with a number, assume it's a csc */
+ if (filter_desc[0] >= '0' && filter_desc[0] <= '9') {
+ unsigned long csc = strtoul(filter_desc, NULL, 0);
+ if (filter_desc != end && csc <= KDBG_CSC_MAX) {
+ typefilter_allow_csc(kdbg_typefilter, (uint16_t)csc);
+ }
+ return;
}
- switch (uap->stackshot_config_version) {
- case STACKSHOT_CONFIG_TYPE:
- if (uap->stackshot_config_size != sizeof(stackshot_config_t)) {
- return EINVAL;
+ while (filter_desc[0] != '\0') {
+ unsigned long allow_value;
+
+ char filter_type = filter_desc[0];
+ if (filter_type != 'C' && filter_type != 'S') {
+ printf("kdebug: unexpected filter type `%c'\n", filter_type);
+ return;
+ }
+ filter_desc++;
+
+ allow_value = strtoul(filter_desc, &end, 0);
+ if (filter_desc == end) {
+ printf("kdebug: cannot parse `%s' as integer\n", filter_desc);
+ return;
+ }
+
+ switch (filter_type) {
+ case 'C':
+ if (allow_value > KDBG_CLASS_MAX) {
+ printf("kdebug: class 0x%lx is invalid\n", allow_value);
+ return;
}
- stackshot_config_t config;
- error = copyin(uap->stackshot_config, &config, sizeof(stackshot_config_t));
- if (error != KERN_SUCCESS)
- {
- return EFAULT;
+ printf("kdebug: C 0x%lx\n", allow_value);
+ typefilter_allow_class(kdbg_typefilter, (uint8_t)allow_value);
+ break;
+ case 'S':
+ if (allow_value > KDBG_CSC_MAX) {
+ printf("kdebug: class-subclass 0x%lx is invalid\n", allow_value);
+ return;
}
- kr = kern_stack_snapshot_internal(uap->stackshot_config_version, &config, sizeof(stackshot_config_t), TRUE);
- return stackshot_kern_return_to_bsd_error(kr);
+ printf("kdebug: S 0x%lx\n", allow_value);
+ typefilter_allow_csc(kdbg_typefilter, (uint16_t)allow_value);
+ break;
default:
- return ENOTSUP;
+ __builtin_unreachable();
+ }
+
+ /* advance to next filter entry */
+ filter_desc = end;
+ if (filter_desc[0] == ',') {
+ filter_desc++;
+ }
}
}
-#if CONFIG_TELEMETRY
-/*
- * microstackshot: Catch all system call for microstackshot related operations, including
- * enabling/disabling both global and windowed microstackshots as well
- * as retrieving windowed or global stackshots and the boot profile.
- * Inputs: uap->tracebuf - address of the user space destination
- * buffer
- * uap->tracebuf_size - size of the user space trace buffer
- * uap->flags - various flags
- * Outputs: EPERM if the caller is not privileged
- * EINVAL if the supplied mss_args is NULL, mss_args.tracebuf is NULL or mss_args.tracebuf_size is not sane
- * ENOMEM if we don't have enough memory to satisfy the request
- * *retval contains the number of bytes traced, if successful
- * and -1 otherwise.
- */
-int
-microstackshot(struct proc *p, struct microstackshot_args *uap, int32_t *retval)
+uint64_t
+kdebug_wake(void)
{
- int error = 0;
- kern_return_t kr;
-
- if ((error = suser(kauth_cred_get(), &p->p_acflag)))
- return(error);
-
- kr = stack_microstackshot(uap->tracebuf, uap->tracebuf_size, uap->flags, retval);
- return stackshot_kern_return_to_bsd_error(kr);
+ if (!wake_nkdbufs) {
+ return 0;
+ }
+ uint64_t start = mach_absolute_time();
+ kdebug_trace_start(wake_nkdbufs, NULL, trace_wrap ? KDOPT_WRAPPING : 0);
+ return mach_absolute_time() - start;
}
-#endif /* CONFIG_TELEMETRY */
/*
- * kern_stack_snapshot_with_reason: Obtains a coherent set of stack traces for specified threads on the sysem,
- * tracing both kernel and user stacks where available. Allocates a buffer from the
- * kernel and stores the address of this buffer.
- *
- * Inputs: reason - the reason for triggering a stackshot (unused at the moment, but in the
- * future will be saved in the stackshot)
- * Outputs: EINVAL/ENOTSUP if there is a problem with the arguments
- * EPERM if the caller doesn't pass at least one KERNEL stackshot flag
- * ENOMEM if the kernel is unable to allocate enough memory to serve the request
- * ESRCH if the target PID isn't found
- * returns KERN_SUCCESS on success
+ * This function is meant to be called from the bootstrap thread or kdebug_wake.
*/
-int
-kern_stack_snapshot_with_reason(__unused char *reason)
-{
- stackshot_config_t config;
- kern_return_t kr;
-
- config.sc_pid = -1;
- config.sc_flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS | STACKSHOT_SAVE_IN_KERNEL_BUFFER |
- STACKSHOT_KCDATA_FORMAT);
- config.sc_since_timestamp = 0;
- config.sc_out_buffer_addr = 0;
- config.sc_out_size_addr = 0;
-
- kr = kern_stack_snapshot_internal(STACKSHOT_CONFIG_TYPE, &config, sizeof(stackshot_config_t), FALSE);
- return stackshot_kern_return_to_bsd_error(kr);
-}
-
-/*
- * stack_snapshot_from_kernel: Stackshot function for kernel consumers who have their own buffer.
- *
- * Inputs: pid - the PID to be traced or -1 for the whole system
- * buf - a pointer to the buffer where the stackshot should be written
- * size - the size of the buffer
- * flags - flags to be passed to the stackshot
- * *bytes_traced - a pointer to be filled with the length of the stackshot
- * Outputs: -1 if there is a problem with the arguments
- * the error returned by the stackshot code otherwise
- */
-int
-stack_snapshot_from_kernel(pid_t pid, void *buf, uint32_t size, uint32_t flags, unsigned *bytes_traced)
+void
+kdebug_trace_start(unsigned int n_events, const char *filter_desc,
+ enum kdebug_opts opts)
{
- kern_return_t kr;
-
- kr = stack_snapshot_from_kernel_internal(pid, buf, size, flags, bytes_traced);
- if (kr == KERN_FAILURE) {
- return -1;
+ if (!n_events) {
+ kd_early_done = true;
+ return;
}
- return kr;
-}
+ ktrace_start_single_threaded();
-void
-start_kern_tracing(unsigned int new_nkdbufs, boolean_t need_map)
-{
-
- if (!new_nkdbufs)
- return;
- nkdbufs = kdbg_set_nkdbufs(new_nkdbufs);
kdbg_lock_init();
- kernel_debug_string_simple("start_kern_tracing");
+ ktrace_kernel_configure(KTRACE_KDEBUG);
- if (0 == kdbg_reinit(TRUE)) {
+ kdbg_set_nkdbufs(n_events);
- if (need_map == TRUE) {
- uint32_t old1, old2;
+ kernel_debug_string_early("start_kern_tracing");
- kdbg_thrmap_init();
+ if (kdbg_reinit((opts & KDOPT_ATBOOT))) {
+ printf("error from kdbg_reinit, kernel tracing not started\n");
+ goto out;
+ }
+
+ /*
+ * Wrapping is disabled because boot and wake tracing is interested in
+ * the earliest events, at the expense of later ones.
+ */
+ if (!(opts & KDOPT_WRAPPING)) {
+ uint32_t old1, old2;
+ (void)disable_wrap(&old1, &old2);
+ }
- disable_wrap(&old1, &old2);
+ if (filter_desc && filter_desc[0] != '\0') {
+ if (kdbg_initialize_typefilter(NULL) == KERN_SUCCESS) {
+ kdbg_set_typefilter_string(filter_desc);
+ kdbg_enable_typefilter();
}
+ }
- /* Hold off interrupts until the early traces are cut */
- boolean_t s = ml_set_interrupts_enabled(FALSE);
+ /*
+ * Hold off interrupts between getting a thread map and enabling trace
+ * and until the early traces are recorded.
+ */
+ bool s = ml_set_interrupts_enabled(false);
+
+ if (!(opts & KDOPT_ATBOOT)) {
+ kdbg_thrmap_init();
+ }
- kdbg_set_tracing_enabled(
- TRUE,
- kdebug_serial ?
- (KDEBUG_ENABLE_TRACE | KDEBUG_ENABLE_SERIAL) :
- KDEBUG_ENABLE_TRACE);
+ kdbg_set_tracing_enabled(true, KDEBUG_ENABLE_TRACE);
+ if ((opts & KDOPT_ATBOOT)) {
/*
- * Transfer all very early events from the static buffer
- * into the real buffers.
+ * Transfer all very early events from the static buffer into the real
+ * buffers.
*/
kernel_debug_early_end();
-
- ml_set_interrupts_enabled(s);
-
- printf("kernel tracing started\n");
-#if KDEBUG_MOJO_TRACE
- if (kdebug_serial) {
- printf("serial output enabled with %lu named events\n",
- sizeof(kd_events)/sizeof(kd_event_t));
- }
-#endif
- } else {
- printf("error from kdbg_reinit, kernel tracing not started\n");
}
-}
-void
-start_kern_tracing_with_typefilter(unsigned int new_nkdbufs,
- boolean_t need_map,
- unsigned int typefilter)
-{
- /* startup tracing */
- start_kern_tracing(new_nkdbufs, need_map);
+ ml_set_interrupts_enabled(s);
- /* check that tracing was actually enabled */
- if (!(kdebug_enable & KDEBUG_ENABLE_TRACE))
- return;
+ printf("kernel tracing started with %u events, filter = %s\n", n_events,
+ filter_desc ?: "none");
- /* setup the typefiltering */
- if (0 == kdbg_enable_typefilter())
- setbit(type_filter_bitmap,
- typefilter & (KDBG_CSC_MASK >> KDBG_CSC_OFFSET));
+out:
+ ktrace_end_single_threaded();
}
void
kdbg_dump_trace_to_file(const char *filename)
{
- vfs_context_t ctx;
- vnode_t vp;
- int error;
- size_t number;
+ vfs_context_t ctx;
+ vnode_t vp;
+ size_t write_size;
+ int ret;
+ ktrace_lock();
- if ( !(kdebug_enable & KDEBUG_ENABLE_TRACE))
- return;
+ if (!(kdebug_enable & KDEBUG_ENABLE_TRACE)) {
+ goto out;
+ }
- if (global_state_pid != -1) {
- if ((proc_find(global_state_pid)) != NULL) {
- /*
- * The global pid exists, we're running
- * due to fs_usage, latency, etc...
- * don't cut the panic/shutdown trace file
- * Disable tracing from this point to avoid
- * perturbing state.
- */
- kdebug_enable = 0;
- kd_ctrl_page.enabled = 0;
- commpage_update_kdebug_enable();
- return;
- }
+ if (ktrace_get_owning_pid() != 0) {
+ /*
+ * Another process owns ktrace and is still active, disable tracing to
+ * prevent wrapping.
+ */
+ kdebug_enable = 0;
+ kd_ctrl_page.enabled = 0;
+ commpage_update_kdebug_state();
+ goto out;
}
- KERNEL_DEBUG_CONSTANT(TRACE_PANIC | DBG_FUNC_NONE, 0, 0, 0, 0, 0);
+
+ KDBG_RELEASE(TRACE_WRITING_EVENTS | DBG_FUNC_START);
kdebug_enable = 0;
kd_ctrl_page.enabled = 0;
- commpage_update_kdebug_enable();
+ commpage_update_kdebug_state();
ctx = vfs_context_kernel();
- if ((error = vnode_open(filename, (O_CREAT | FWRITE | O_NOFOLLOW), 0600, 0, &vp, ctx)))
- return;
+ if (vnode_open(filename, (O_CREAT | FWRITE | O_NOFOLLOW), 0600, 0, &vp, ctx)) {
+ goto out;
+ }
- number = kd_mapcount * sizeof(kd_threadmap);
- kdbg_readthrmap(0, &number, vp, ctx);
+ kdbg_write_thread_map(vp, ctx);
- number = nkdbufs*sizeof(kd_buf);
- kdbg_read(0, &number, vp, ctx, RAW_VERSION1);
-
- vnode_close(vp, FWRITE, ctx);
+ write_size = nkdbufs * sizeof(kd_buf);
+ ret = kdbg_read(0, &write_size, vp, ctx, RAW_VERSION1);
+ if (ret) {
+ goto out_close;
+ }
- sync(current_proc(), (void *)NULL, (int *)NULL);
-}
+ /*
+ * Wait to synchronize the file to capture the I/O in the
+ * TRACE_WRITING_EVENTS interval.
+ */
+ ret = VNOP_FSYNC(vp, MNT_WAIT, ctx);
-/* Helper function for filling in the BSD name for an address space
- * Defined here because the machine bindings know only Mach threads
- * and nothing about BSD processes.
- *
- * FIXME: need to grab a lock during this?
- */
-void kdbg_get_task_name(char* name_buf, int len, task_t task)
-{
- proc_t proc;
-
- /* Note: we can't use thread->task (and functions that rely on it) here
- * because it hasn't been initialized yet when this function is called.
- * We use the explicitly-passed task parameter instead.
+ /*
+ * Balance the starting TRACE_WRITING_EVENTS tracepoint manually.
*/
- proc = get_bsdtask_info(task);
- if (proc != PROC_NULL)
- snprintf(name_buf, len, "%s/%d", proc->p_comm, proc->p_pid);
- else
- snprintf(name_buf, len, "%p [!bsd]", task);
-}
+ kd_buf end_event = {
+ .debugid = TRACE_WRITING_EVENTS | DBG_FUNC_END,
+ .arg1 = write_size,
+ .arg2 = ret,
+ .arg5 = (kd_buf_argtype)thread_tid(current_thread()),
+ };
+ kdbg_set_timestamp_and_cpu(&end_event, kdbg_timestamp(),
+ cpu_number());
-#if KDEBUG_MOJO_TRACE
-static kd_event_t *
-binary_search(uint32_t id)
-{
- int low, high, mid;
+ /* this is best effort -- ignore any errors */
+ (void)kdbg_write_to_vnode((caddr_t)&end_event, sizeof(kd_buf), vp, ctx,
+ RAW_file_offset);
- low = 0;
- high = sizeof(kd_events)/sizeof(kd_event_t) - 1;
+out_close:
+ vnode_close(vp, FWRITE, ctx);
+ sync(current_proc(), (void *)NULL, (int *)NULL);
- while (TRUE)
- {
- mid = (low + high) / 2;
-
- if (low > high)
- return NULL; /* failed */
- else if ( low + 1 >= high) {
- /* We have a match */
- if (kd_events[high].id == id)
- return &kd_events[high];
- else if (kd_events[low].id == id)
- return &kd_events[low];
- else
- return NULL; /* search failed */
- }
- else if (id < kd_events[mid].id)
- high = mid;
- else
- low = mid;
- }
+out:
+ ktrace_unlock();
}
-/*
- * Look up event id to get name string.
- * Using a per-cpu cache of a single entry
- * before resorting to a binary search of the full table.
- */
-#define NCACHE 1
-static kd_event_t *last_hit[MAX_CPUS];
-static kd_event_t *
-event_lookup_cache(uint32_t cpu, uint32_t id)
+static int
+kdbg_sysctl_continuous SYSCTL_HANDLER_ARGS
{
- if (last_hit[cpu] == NULL || last_hit[cpu]->id != id)
- last_hit[cpu] = binary_search(id);
- return last_hit[cpu];
-}
+#pragma unused(oidp, arg1, arg2)
+ int value = kdbg_continuous_time;
+ int ret = sysctl_io_number(req, value, sizeof(value), &value, NULL);
-static uint64_t kd_last_timstamp;
+ if (ret || !req->newptr) {
+ return ret;
+ }
-static void
-kdebug_serial_print(
- uint32_t cpunum,
- uint32_t debugid,
- uint64_t timestamp,
- uintptr_t arg1,
- uintptr_t arg2,
- uintptr_t arg3,
- uintptr_t arg4,
- uintptr_t threadid
- )
-{
- char kprintf_line[192];
- char event[40];
- uint64_t us = timestamp / NSEC_PER_USEC;
- uint64_t us_tenth = (timestamp % NSEC_PER_USEC) / 100;
- uint64_t delta = timestamp - kd_last_timstamp;
- uint64_t delta_us = delta / NSEC_PER_USEC;
- uint64_t delta_us_tenth = (delta % NSEC_PER_USEC) / 100;
- uint32_t event_id = debugid & KDBG_EVENTID_MASK;
- const char *command;
- const char *bra;
- const char *ket;
- kd_event_t *ep;
-
- /* event time and delta from last */
- snprintf(kprintf_line, sizeof(kprintf_line),
- "%11llu.%1llu %8llu.%1llu ",
- us, us_tenth, delta_us, delta_us_tenth);
-
-
- /* event (id or name) - start prefixed by "[", end postfixed by "]" */
- bra = (debugid & DBG_FUNC_START) ? "[" : " ";
- ket = (debugid & DBG_FUNC_END) ? "]" : " ";
- ep = event_lookup_cache(cpunum, event_id);
- if (ep) {
- if (strlen(ep->name) < sizeof(event) - 3)
- snprintf(event, sizeof(event), "%s%s%s",
- bra, ep->name, ket);
- else
- snprintf(event, sizeof(event), "%s%x(name too long)%s",
- bra, event_id, ket);
- } else {
- snprintf(event, sizeof(event), "%s%x%s",
- bra, event_id, ket);
- }
- snprintf(kprintf_line + strlen(kprintf_line),
- sizeof(kprintf_line) - strlen(kprintf_line),
- "%-40s ", event);
-
- /* arg1 .. arg4 with special cases for strings */
- switch (event_id) {
- case VFS_LOOKUP:
- case VFS_LOOKUP_DONE:
- if (debugid & DBG_FUNC_START) {
- /* arg1 hex then arg2..arg4 chars */
- snprintf(kprintf_line + strlen(kprintf_line),
- sizeof(kprintf_line) - strlen(kprintf_line),
- "%-16lx %-8s%-8s%-8s ",
- arg1, (char*)&arg2, (char*)&arg3, (char*)&arg4);
- break;
- }
- /* else fall through for arg1..arg4 chars */
- case TRACE_STRING_EXEC:
- case TRACE_STRING_NEWTHREAD:
- case TRACE_INFO_STRING:
- snprintf(kprintf_line + strlen(kprintf_line),
- sizeof(kprintf_line) - strlen(kprintf_line),
- "%-8s%-8s%-8s%-8s ",
- (char*)&arg1, (char*)&arg2, (char*)&arg3, (char*)&arg4);
- break;
- default:
- snprintf(kprintf_line + strlen(kprintf_line),
- sizeof(kprintf_line) - strlen(kprintf_line),
- "%-16lx %-16lx %-16lx %-16lx",
- arg1, arg2, arg3, arg4);
- }
-
- /* threadid, cpu and command name */
- if (threadid == (uintptr_t)thread_tid(current_thread()) &&
- current_proc() &&
- current_proc()->p_comm[0])
- command = current_proc()->p_comm;
- else
- command = "-";
- snprintf(kprintf_line + strlen(kprintf_line),
- sizeof(kprintf_line) - strlen(kprintf_line),
- " %-16lx %-2d %s\n",
- threadid, cpunum, command);
-
- kprintf("%s", kprintf_line);
- kd_last_timstamp = timestamp;
+ kdbg_continuous_time = value;
+ return 0;
}
-#endif
+
+SYSCTL_NODE(_kern, OID_AUTO, kdbg, CTLFLAG_RD | CTLFLAG_LOCKED, 0,
+ "kdbg");
+
+SYSCTL_PROC(_kern_kdbg, OID_AUTO, experimental_continuous,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0,
+ sizeof(int), kdbg_sysctl_continuous, "I",
+ "Set kdebug to use mach_continuous_time");
+
+SYSCTL_INT(_kern_kdbg, OID_AUTO, debug,
+ CTLFLAG_RW | CTLFLAG_LOCKED,
+ &kdbg_debug, 0, "Set kdebug debug mode");
+
+SYSCTL_QUAD(_kern_kdbg, OID_AUTO, oldest_time,
+ CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
+ &kd_ctrl_page.oldest_time,
+ "Find the oldest timestamp still in trace");