+/*
+ * Adding enough padding to contain a full tracepoint for the last
+ * portion of the string greatly simplifies the logic of splitting the
+ * string between tracepoints. Full tracepoints can be generated using
+ * the buffer itself, without having to manually add zeros to pad the
+ * arguments.
+ */
+
+/* 2 string args in first tracepoint and 9 string data tracepoints */
+#define STR_BUF_ARGS (2 + (9 * 4))
+/* times the size of each arg on K64 */
+#define MAX_STR_LEN (STR_BUF_ARGS * sizeof(uint64_t))
+/* on K32, ending straddles a tracepoint, so reserve blanks */
+#define STR_BUF_SIZE (MAX_STR_LEN + (2 * sizeof(uint32_t)))
+
+/*
+ * This function does no error checking and assumes that it is called with
+ * the correct arguments, including that the buffer pointed to by str is at
+ * least STR_BUF_SIZE bytes. However, str must be aligned to word-size and
+ * be NUL-terminated. In cases where a string can fit evenly into a final
+ * tracepoint without its NUL-terminator, this function will not end those
+ * strings with a NUL in trace. It's up to clients to look at the function
+ * qualifier for DBG_FUNC_END in this case, to end the string.
+ */
+static uint64_t
+kernel_debug_string_internal(uint32_t debugid, uint64_t str_id, void *vstr,
+ size_t str_len)
+{
+ /* str must be word-aligned */
+ uintptr_t *str = vstr;
+ size_t written = 0;
+ uintptr_t thread_id;
+ int i;
+ uint32_t trace_debugid = TRACEDBG_CODE(DBG_TRACE_STRING,
+ TRACE_STRING_GLOBAL);
+
+ thread_id = (uintptr_t)thread_tid(current_thread());
+
+ /* if the ID is being invalidated, just emit that */
+ if (str_id != 0 && str_len == 0) {
+ kernel_debug_internal(trace_debugid | DBG_FUNC_START | DBG_FUNC_END,
+ (uintptr_t)debugid, (uintptr_t)str_id, 0, 0,
+ thread_id);
+ return str_id;
+ }
+
+ /* generate an ID, if necessary */
+ if (str_id == 0) {
+ str_id = OSIncrementAtomic64((SInt64 *)&g_curr_str_id);
+ str_id = (str_id & STR_ID_MASK) | g_str_id_signature;
+ }
+
+ trace_debugid |= DBG_FUNC_START;
+ /* string can fit in a single tracepoint */
+ if (str_len <= (2 * sizeof(uintptr_t))) {
+ trace_debugid |= DBG_FUNC_END;
+ }
+
+ kernel_debug_internal(trace_debugid, (uintptr_t)debugid,
+ (uintptr_t)str_id, str[0],
+ str[1], thread_id);
+
+ trace_debugid &= KDBG_EVENTID_MASK;
+ i = 2;
+ written += 2 * sizeof(uintptr_t);
+
+ for (; written < str_len; i += 4, written += 4 * sizeof(uintptr_t)) {
+ if ((written + (4 * sizeof(uintptr_t))) >= str_len) {
+ trace_debugid |= DBG_FUNC_END;
+ }
+ kernel_debug_internal(trace_debugid, str[i],
+ str[i + 1],
+ str[i + 2],
+ str[i + 3], thread_id);
+ }
+
+ return str_id;
+}
+
+/*
+ * Returns true if the current process can emit events, and false otherwise.
+ * Trace system and scheduling events circumvent this check, as do events
+ * emitted in interrupt context.
+ */
+static boolean_t
+kdebug_current_proc_enabled(uint32_t debugid)
+{
+ /* can't determine current process in interrupt context */
+ if (ml_at_interrupt_context()) {
+ return TRUE;
+ }
+
+ /* always emit trace system and scheduling events */
+ if ((KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE ||
+ (debugid & KDBG_CSC_MASK) == MACHDBG_CODE(DBG_MACH_SCHED, 0)))
+ {
+ return TRUE;
+ }
+
+ if (kd_ctrl_page.kdebug_flags & KDBG_PIDCHECK) {
+ proc_t cur_proc = current_proc();
+
+ /* only the process with the kdebug bit set is allowed */
+ if (cur_proc && !(cur_proc->p_kdebug)) {
+ return FALSE;
+ }
+ } else if (kd_ctrl_page.kdebug_flags & KDBG_PIDEXCLUDE) {
+ proc_t cur_proc = current_proc();
+
+ /* every process except the one with the kdebug bit set is allowed */
+ if (cur_proc && cur_proc->p_kdebug) {
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
+
+/*
+ * Returns true if the debugid is disabled by filters, and false if the
+ * debugid is allowed to be traced. A debugid may not be traced if the
+ * typefilter disables its class and subclass, it's outside a range
+ * check, or if it's not an allowed debugid in a value check. Trace
+ * system events bypass this check.
+ */
+static boolean_t
+kdebug_debugid_enabled(uint32_t debugid)
+{
+ boolean_t is_enabled = TRUE;
+
+ /* if no filtering is enabled */
+ if (!kd_ctrl_page.kdebug_slowcheck) {
+ return TRUE;
+ }
+
+ if (KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE) {
+ return TRUE;
+ }
+
+ if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
+ disable_preemption();
+
+ /*
+ * Recheck if typefilter is still being used. If tracing is being
+ * disabled, there's a 100ms sleep on the other end to keep the
+ * bitmap around for this check.
+ */
+ if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
+ if (!(isset(type_filter_bitmap, KDBG_EXTRACT_CSC(debugid)))) {
+ is_enabled = FALSE;
+ }
+ }
+
+ enable_preemption();
+ } else if (kd_ctrl_page.kdebug_flags & KDBG_RANGECHECK) {
+ if (debugid < kdlog_beg || debugid > kdlog_end) {
+ is_enabled = FALSE;
+ }
+ } else if (kd_ctrl_page.kdebug_flags & KDBG_VALCHECK) {
+ if ((debugid & KDBG_EVENTID_MASK) != kdlog_value1 &&
+ (debugid & KDBG_EVENTID_MASK) != kdlog_value2 &&
+ (debugid & KDBG_EVENTID_MASK) != kdlog_value3 &&
+ (debugid & KDBG_EVENTID_MASK) != kdlog_value4)
+ {
+ is_enabled = FALSE;
+ }
+ }
+
+ return is_enabled;
+}
+
+/*
+ * Returns 0 if a string can be traced with these arguments. Returns errno
+ * value if error occurred.
+ */
+static errno_t
+kdebug_check_trace_string(uint32_t debugid, uint64_t str_id)
+{
+ /* if there are function qualifiers on the debugid */
+ if (debugid & ~KDBG_EVENTID_MASK) {
+ return EINVAL;
+ }
+
+ if (kdebug_validate_debugid(debugid)) {
+ return EPERM;
+ }
+
+ if (str_id != 0 && (str_id & STR_ID_SIG_MASK) != g_str_id_signature) {
+ return EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Implementation of KPI kernel_debug_string.
+ */
+int
+kernel_debug_string(uint32_t debugid, uint64_t *str_id, const char *str)
+{
+ /* arguments to tracepoints must be word-aligned */
+ __attribute__((aligned(sizeof(uintptr_t)))) char str_buf[STR_BUF_SIZE];
+ assert_static(sizeof(str_buf) > MAX_STR_LEN);
+ vm_size_t len_copied;
+ int err;
+
+ assert(str_id);
+
+ if (__probable(kdebug_enable == 0)) {
+ return 0;
+ }
+
+ if (!kdebug_current_proc_enabled(debugid)) {
+ return 0;
+ }
+
+ if (!kdebug_debugid_enabled(debugid)) {
+ return 0;
+ }
+
+ if ((err = kdebug_check_trace_string(debugid, *str_id)) != 0) {
+ return err;
+ }
+
+ if (str == NULL) {
+ if (str_id == 0) {
+ return EINVAL;
+ }
+
+ *str_id = kernel_debug_string_internal(debugid, *str_id, NULL, 0);
+ return 0;
+ }
+
+ memset(str_buf, 0, sizeof(str_buf));
+ len_copied = strlcpy(str_buf, str, MAX_STR_LEN + 1);
+ *str_id = kernel_debug_string_internal(debugid, *str_id, str_buf,
+ len_copied);
+ return 0;
+}
+
+/*
+ * Support syscall kdebug_trace_string.
+ */
+int
+kdebug_trace_string(__unused struct proc *p,
+ struct kdebug_trace_string_args *uap,
+ uint64_t *retval)
+{
+ __attribute__((aligned(sizeof(uintptr_t)))) char str_buf[STR_BUF_SIZE];
+ assert_static(sizeof(str_buf) > MAX_STR_LEN);
+ size_t len_copied;
+ int err;
+
+ if (__probable(kdebug_enable == 0)) {
+ return 0;
+ }
+
+ if (!kdebug_current_proc_enabled(uap->debugid)) {
+ return 0;
+ }
+
+ if (!kdebug_debugid_enabled(uap->debugid)) {
+ return 0;
+ }
+
+ if ((err = kdebug_check_trace_string(uap->debugid, uap->str_id)) != 0) {
+ return err;
+ }
+
+ if (uap->str == USER_ADDR_NULL) {
+ if (uap->str_id == 0) {
+ return EINVAL;
+ }
+
+ *retval = kernel_debug_string_internal(uap->debugid, uap->str_id,
+ NULL, 0);
+ return 0;
+ }
+
+ memset(str_buf, 0, sizeof(str_buf));
+ err = copyinstr(uap->str, str_buf, MAX_STR_LEN + 1, &len_copied);
+
+ /* it's alright to truncate the string, so allow ENAMETOOLONG */
+ if (err == ENAMETOOLONG) {
+ str_buf[MAX_STR_LEN] = '\0';
+ } else if (err) {
+ return err;
+ }
+
+ if (len_copied <= 1) {
+ return EINVAL;
+ }
+
+ /* convert back to a length */
+ len_copied--;
+
+ *retval = kernel_debug_string_internal(uap->debugid, uap->str_id, str_buf,
+ len_copied);
+ return 0;
+}