* individual preprocessor macros in this header that declare new behavior as
* required.
*/
-#define IMG4_API_VERSION (20190125u)
+#define IMG4_API_VERSION (20191001u)
#if !defined(KERNEL) && !IMG4_PROJECT_BUILD
#define IMG4_API_AVAILABLE_20180112 \
API_AVAILABLE(ios(12.2), tvos(12.2), watchos(5.2))
#define IMG4_API_AVAILABLE_20190125 \
API_AVAILABLE(macos(10.15), ios(13.0), tvos(13.0), watchos(6.0))
+#define IMG4_API_AVAILABLE_20191001 \
+ API_AVAILABLE(macos(10.15.2), ios(13.3), tvos(13.3), watchos(6.1.1))
#else
#define IMG4_API_AVAILABLE_20180112
#define IMG4_API_AVAILABLE_20181004
#define IMG4_API_AVAILABLE_20181106
#define IMG4_API_AVAILABLE_20190125
+#define IMG4_API_AVAILABLE_20191001
#endif // !defined(KERNEL) && !IMG4_PROJECT_BUILD
#if !defined(OS_CLOSED_ENUM)
#include "tapi.h"
#endif
+/*!
+ * @const IMG4_IDENTITY_VERSION
+ * The version of the {@link img4_identity_t} supported by the implementation.
+ */
+#define IMG4_IDENTITY_VERSION (0u)
+
+/*!
+ * @const IMG4_ENVIRONMENT_LENGTH
+ * The minimum length for an allocation which can accommodate an
+ * img4_environment_t structure. This is the minimum length which must be given
+ * to {@link img4_environment_init_identity}.
+ */
+#define IMG4_ENVIRONMENT_LENGTH (160ul)
+
+/*!
+ * @const IMG4_IDENTITY_CRYPTO_SHA1
+ * The device-tree string indicating that the identity requires SHA1.
+ */
+#define IMG4_IDENTITY_CRYPTO_SHA1 "sha1"
+
+/*!
+ * @const IMG4_IDENTITY_CRYPTO_SHA2_384
+ * The device-tree string indicating that the identity requires SHA2-384.
+ */
+#define IMG4_IDENTITY_CRYPTO_SHA2_384 "sha2-384"
+
/*!
* @typedef img4_environment_t
* An opaque type describing an Image4 environment.
*/
typedef struct _img4_environment img4_environment_t;
+/*!
+ * @typedef img4_identity_t
+ * A structure describing a specific Image4 identity comprised of user-supplied
+ * identifiers.
+ *
+ * @field i4id_version
+ * The version of the identity structure; initialize to
+ * {@link IMG4_IDENTITY_VERSION}
+ *
+ * @field i4id_algo
+ * A string identifying the chosen crypto algorithm as represented in the device
+ * tree. Currently valid values are:
+ *
+ * - {@link IMG4_IDENTITY_CRYPTO_SHA1}
+ * - {@link IMG4_IDENTITY_CRYPTO_SHA2_384}
+ *
+ * @field i4id_cepo
+ * The minimum certificate epoch required,
+ *
+ * @field i4id_bord
+ * The board identifier.
+ *
+ * @field i4id_chip
+ * The chip identifier.
+ *
+ * @field i4id_ecid
+ * The unique chip identifier.
+ *
+ * @field i4id_sdom
+ * The security domain.
+ *
+ * @field i4id_cpro
+ * The certificate production status.
+ *
+ * @field i4id_csec
+ * The certificate security mode.
+ *
+ * @field i4id_epro
+ * The effective production status.
+ *
+ * @field i4id_esec
+ * The effective security mode.
+ */
+IMG4_API_AVAILABLE_20191001
+typedef struct _img4_identity {
+ img4_struct_version_t i4id_version;
+ char i4id_algo[12];
+ uint32_t i4id_cepo;
+ uint32_t i4id_bord;
+ uint32_t i4id_chip;
+ uint64_t i4id_ecid;
+ uint32_t i4id_sdom;
+ bool i4id_cpro;
+ bool i4id_csec;
+ bool i4id_epro;
+ bool i4id_esec;
+} img4_identity_t;
+
/*!
* @const IMG4_ENVIRONMENT_PLATFORM
* The environment for the host that uses the default platform implementation to
#define IMG4_ENVIRONMENT_TRUST_CACHE (img4if->i4if_environment_trust_cache)
#endif
+/*!
+ * @function img4_environment_init_identity
+ * Initializes a caller-supplied environment with custom identity information.
+ * This may be used for performing test evaluations or evaluations against
+ * environments not yet supported by the implementation.
+ *
+ * @param i4e
+ * A pointer to the storage which will hold the custom environment.
+ *
+ * @param len
+ * The length of the storage referenced by {@link i4e}. This must be at least
+ * {@link IMG4_ENVIRONMENT_LENGTH} bytes.
+ *
+ * @param i4id
+ * The identity with which to initialize the environment. The resulting
+ * environment object will provide these identitifers to the evaluator.
+ *
+ * @result
+ * Upon success, zero is returned. The implementation may also return one of the
+ * following error codes directly:
+ *
+ * [EOVERFLOW] The length provided is insufficient to initialize an
+ * environment structure
+ *
+ * @discussion
+ * When the resulting environment is given to {@link img4_get_trusted_payload}
+ * or {@link img4_get_trusted_external_payload}, the trust evaluation proceeds
+ * as though it were creating a new chain of trust and therefore acts as though
+ * {@link I4F_FIRST_STAGE} was given to {@link img4_init}. No prior stage of
+ * secure boot will be consulted for evaluation, and mix-n-match will be
+ * presumed to be permitted.
+ */
+#if !XNU_KERNEL_PRIVATE
+IMG4_API_AVAILABLE_20191001
+OS_EXPORT OS_WARN_RESULT OS_NONNULL1 OS_NONNULL3
+errno_t
+img4_environment_init_identity(img4_environment_t *i4e, size_t len,
+ const img4_identity_t *i4id);
+#else
+#define img4_environment_init_identity(...) \
+ (img4if->i4if_v4.environment_init_identity(__VA_ARGS__))
+#endif
+
#endif // __IMG4_ENVIRONMENT_H
* in the manifest
* [EIO] The payload could not be fetched
*
- * Additionally, errors from the routines specified in the
- * {@link img4_environment_t} may be returned.
- *
* @discussion
* This routine will perform the following validation:
*
*
* @discussion
* This routine performs the same validation steps as
- * {@link img4_get_trusted_payload}.
+ * {@link img4_get_trusted_payload} and has the same caveats.
*/
#if !XNU_KERNEL_PRIVATE
IMG4_API_AVAILABLE_20180112
/*
* DTrace Format Functions
*/
+
+static dtrace_format_t*
+dtrace_format_new(char *str)
+{
+ dtrace_format_t *fmt = NULL;
+ size_t bufsize = strlen(str) + 1;
+
+ fmt = kmem_zalloc(sizeof(*fmt) + bufsize, KM_SLEEP);
+
+ fmt->dtf_refcount = 1;
+ (void) strlcpy(fmt->dtf_str, str, bufsize);
+
+ return fmt;
+}
+
static uint16_t
dtrace_format_add(dtrace_state_t *state, char *str)
{
- char *fmt, **new;
- uint16_t ndx, len = strlen(str) + 1;
-
- fmt = kmem_zalloc(len, KM_SLEEP);
- bcopy(str, fmt, len);
+ dtrace_format_t **new;
+ uint16_t ndx;
for (ndx = 0; ndx < state->dts_nformats; ndx++) {
if (state->dts_formats[ndx] == NULL) {
- state->dts_formats[ndx] = fmt;
+ state->dts_formats[ndx] = dtrace_format_new(str);
+ return (ndx + 1);
+ }
+ else if (strcmp(state->dts_formats[ndx]->dtf_str, str) == 0) {
+ VERIFY(state->dts_formats[ndx]->dtf_refcount < UINT64_MAX);
+ state->dts_formats[ndx]->dtf_refcount++;
return (ndx + 1);
}
}
* This is only likely if a denial-of-service attack is being
* attempted. As such, it's okay to fail silently here.
*/
- kmem_free(fmt, len);
return (0);
}
* number of formats.
*/
ndx = state->dts_nformats++;
- new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP);
+ new = kmem_alloc((ndx + 1) * sizeof (*state->dts_formats), KM_SLEEP);
if (state->dts_formats != NULL) {
ASSERT(ndx != 0);
- bcopy(state->dts_formats, new, ndx * sizeof (char *));
- kmem_free(state->dts_formats, ndx * sizeof (char *));
+ bcopy(state->dts_formats, new, ndx * sizeof (*state->dts_formats));
+ kmem_free(state->dts_formats, ndx * sizeof (*state->dts_formats));
}
state->dts_formats = new;
- state->dts_formats[ndx] = fmt;
+ state->dts_formats[ndx] = dtrace_format_new(str);
return (ndx + 1);
}
static void
dtrace_format_remove(dtrace_state_t *state, uint16_t format)
{
- char *fmt;
+ dtrace_format_t *fmt;
ASSERT(state->dts_formats != NULL);
ASSERT(format <= state->dts_nformats);
- ASSERT(state->dts_formats[format - 1] != NULL);
fmt = state->dts_formats[format - 1];
- kmem_free(fmt, strlen(fmt) + 1);
- state->dts_formats[format - 1] = NULL;
+
+ ASSERT(fmt != NULL);
+ VERIFY(fmt->dtf_refcount > 0);
+
+ fmt->dtf_refcount--;
+
+ if (fmt->dtf_refcount == 0) {
+ kmem_free(fmt, DTRACE_FORMAT_SIZE(fmt));
+ state->dts_formats[format - 1] = NULL;
+ }
}
static void
ASSERT(state->dts_formats != NULL);
for (i = 0; i < state->dts_nformats; i++) {
- char *fmt = state->dts_formats[i];
+ dtrace_format_t *fmt = state->dts_formats[i];
if (fmt == NULL)
continue;
- kmem_free(fmt, strlen(fmt) + 1);
+ kmem_free(fmt, DTRACE_FORMAT_SIZE(fmt));
}
- kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *));
+ kmem_free(state->dts_formats, state->dts_nformats * sizeof (*state->dts_formats));
state->dts_nformats = 0;
state->dts_formats = NULL;
}
* and that the format for the specified index is non-NULL.
*/
ASSERT(state->dts_formats != NULL);
- str = state->dts_formats[fmt.dtfd_format - 1];
+ str = state->dts_formats[fmt.dtfd_format - 1]->dtf_str;
ASSERT(str != NULL);
len = strlen(str) + 1;
u_int32_t lastunit;
};
+#if DEVELOPMENT || DEBUG
+enum ctl_status {
+ KCTL_DISCONNECTED = 0,
+ KCTL_CONNECTING = 1,
+ KCTL_CONNECTED = 2
+};
+#endif /* DEVELOPMENT || DEBUG */
+
struct ctl_cb {
TAILQ_ENTRY(ctl_cb) next; /* controller chain */
lck_mtx_t *mtx;
struct sockaddr_ctl sac;
u_int32_t usecount;
u_int32_t kcb_usecount;
+#if DEVELOPMENT || DEBUG
+ enum ctl_status status;
+#endif /* DEVELOPMENT || DEBUG */
};
#ifndef ROUNDUP64
SYSCTL_INT(_net_systm_kctl, OID_AUTO, debug,
CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_debug, 0, "");
+#if DEVELOPMENT || DEBUG
+u_int32_t ctl_panic_debug = 0;
+SYSCTL_INT(_net_systm_kctl, OID_AUTO, panicdebug,
+ CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_panic_debug, 0, "");
+#endif /* DEVELOPMENT || DEBUG */
+
#define KCTL_TBL_INC 16
static uintptr_t kctl_tbl_size = 0;
}
soisdisconnected(so);
+#if DEVELOPMENT || DEBUG
+ kcb->status = KCTL_DISCONNECTED;
+#endif /* DEVELOPMENT || DEBUG */
so->so_flags |= SOF_PCBCLEARING;
clt_kcb_decrement_use_count(kcb);
return 0;
done:
if (error) {
soisdisconnected(so);
+#if DEVELOPMENT || DEBUG
+ kcb->status = KCTL_DISCONNECTED;
+#endif /* DEVELOPMENT || DEBUG */
lck_mtx_lock(ctl_mtx);
TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
kcb->kctl = NULL;
lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
ctl_kcb_increment_use_count(kcb, mtx_held);
+#if DEVELOPMENT || DEBUG
+ if (kcb->status != KCTL_DISCONNECTED && ctl_panic_debug) {
+ panic("kctl already connecting/connected");
+ }
+ kcb->status = KCTL_CONNECTING;
+#endif /* DEVELOPMENT || DEBUG */
+
error = ctl_setup_kctl(so, nam, p);
if (error) {
goto out;
goto end;
}
soisconnected(so);
+#if DEVELOPMENT || DEBUG
+ kcb->status = KCTL_CONNECTED;
+#endif /* DEVELOPMENT || DEBUG */
end:
if (error && kcb->kctl->disconnect) {
}
if (error) {
soisdisconnected(so);
+#if DEVELOPMENT || DEBUG
+ kcb->status = KCTL_DISCONNECTED;
+#endif /* DEVELOPMENT || DEBUG */
lck_mtx_lock(ctl_mtx);
TAILQ_REMOVE(&kcb->kctl->kcb_head, kcb, next);
kcb->kctl = NULL;
}
soisdisconnected(so);
+#if DEVELOPMENT || DEBUG
+ kcb->status = KCTL_DISCONNECTED;
+#endif /* DEVELOPMENT || DEBUG */
socket_unlock(so, 0);
lck_mtx_lock(ctl_mtx);
entropy[0] &= ~(0xffull << 8);
}
- int len = snprintf(str, sizeof(str), "%s0x%llx", key, entropy[0]);
+ int len = scnprintf(str, sizeof(str), "%s0x%llx", key, entropy[0]);
int remaining = sizeof(str) - len;
for (int i = 1; i < values && remaining > 0; ++i) {
int start = sizeof(str) - remaining;
- len = snprintf(&str[start], remaining, ",0x%llx", entropy[i]);
+ len = scnprintf(&str[start], remaining, ",0x%llx", entropy[i]);
remaining -= len;
}
#include <sys/persona.h>
#include <sys/sysent.h>
#include <sys/reason.h>
+#include <IOKit/IOBSD.h> /* IOTaskHasEntitlement() */
#ifdef CONFIG_32BIT_TELEMETRY
#include <sys/kasl.h>
case CS_OPS_IDENTITY:
case CS_OPS_BLOB:
case CS_OPS_TEAMID:
+ case CS_OPS_CLEAR_LV:
break; /* not restricted to root */
default:
if (forself == 0 && kauth_cred_issuser(kauth_cred_get()) != TRUE) {
case CS_OPS_SET_STATUS:
case CS_OPS_CLEARINSTALLER:
case CS_OPS_CLEARPLATFORM:
+ case CS_OPS_CLEAR_LV:
if ((error = mac_proc_check_set_cs_info(current_proc(), pt, ops))) {
goto out;
}
break;
}
+ case CS_OPS_CLEAR_LV: {
+ /*
+ * This option is used to remove library validation from
+ * a running process. This is used in plugin architectures
+ * when a program needs to load untrusted libraries. This
+ * allows the process to maintain library validation as
+ * long as possible, then drop it only when required.
+ * Once a process has loaded the untrusted library,
+ * relying on library validation in the future will
+ * not be effective. An alternative is to re-exec
+ * your application without library validation, or
+ * fork an untrusted child.
+ */
+#ifdef CONFIG_EMBEDDED
+ // On embedded platforms, we don't support dropping LV
+ error = ENOTSUP;
+#else
+ /*
+ * if we have the flag set, and the caller wants
+ * to remove it, and they're entitled to, then
+ * we remove it from the csflags
+ *
+ * NOTE: We are fine to poke into the task because
+ * we get a ref to pt when we do the proc_find
+ * at the beginning of this function.
+ *
+ * We also only allow altering ourselves.
+ */
+ if (forself == 1 && IOTaskHasEntitlement(pt->task, CLEAR_LV_ENTITLEMENT)) {
+ proc_lock(pt);
+ pt->p_csflags &= (~(CS_REQUIRE_LV & CS_FORCED_LV));
+ proc_unlock(pt);
+ error = 0;
+ } else {
+ error = EPERM;
+ }
+#endif
+ break;
+ }
case CS_OPS_BLOB: {
void *start;
size_t length;
* Get proc name and parent proc name; if the parent execs, we'll get a
* garbled name.
*/
- bytes_printed = snprintf(signature_cur_end,
+ bytes_printed = scnprintf(signature_cur_end,
signature_buf_end - signature_cur_end,
"%s,%s,", p->p_name,
(p->p_pptr ? p->p_pptr->p_name : ""));
identity = "";
}
- bytes_printed = snprintf(signature_cur_end,
+ bytes_printed = scnprintf(signature_cur_end,
signature_buf_end - signature_cur_end,
"%s,%s", teamid, identity);
*
* vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
*
- * vm_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
+ * vm_per_task_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
*
* These values are initialized to reasonable defaults at boot time based on the available physical memory in
* kmem_init().
* All values are in bytes.
*/
-vm_map_size_t vm_global_no_user_wire_amount;
vm_map_size_t vm_global_user_wire_limit;
-vm_map_size_t vm_user_wire_limit;
+vm_map_size_t vm_per_task_user_wire_limit;
+extern uint64_t max_mem;
+/*
+ * We used to have a global in the kernel called vm_global_no_user_wire_limit which was the inverse
+ * of vm_global_user_wire_limit. But maintaining both of those is silly, and vm_global_user_wire_limit is the
+ * real limit.
+ * This function is for backwards compatibility with userspace
+ * since we exposed the old global via a sysctl.
+ */
+STATIC int
+sysctl_global_no_user_wire_amount(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ vm_map_size_t old_value;
+ vm_map_size_t new_value;
+ int changed;
+ int error;
+
+ old_value = max_mem - vm_global_user_wire_limit;
+ error = sysctl_io_number(req, old_value, sizeof(vm_map_size_t), &new_value, &changed);
+ if (changed) {
+ if ((uint64_t)new_value > max_mem) {
+ error = EINVAL;
+ } else {
+ vm_global_user_wire_limit = max_mem - new_value;
+ }
+ }
+ return error;
+}
/*
* There needs to be a more automatic/elegant way to do this
*/
#if defined(__ARM__)
-SYSCTL_INT(_vm, OID_AUTO, global_no_user_wire_amount, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_no_user_wire_amount, 0, "");
SYSCTL_INT(_vm, OID_AUTO, global_user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_user_wire_limit, 0, "");
-SYSCTL_INT(_vm, OID_AUTO, user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_user_wire_limit, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_per_task_user_wire_limit, 0, "");
+SYSCTL_PROC(_vm, OID_AUTO, global_no_user_wire_amount, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, &sysctl_global_no_user_wire_amount, "I", "");
#else
-SYSCTL_QUAD(_vm, OID_AUTO, global_no_user_wire_amount, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_no_user_wire_amount, "");
SYSCTL_QUAD(_vm, OID_AUTO, global_user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_user_wire_limit, "");
-SYSCTL_QUAD(_vm, OID_AUTO, user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_user_wire_limit, "");
+SYSCTL_QUAD(_vm, OID_AUTO, user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_per_task_user_wire_limit, "");
+SYSCTL_PROC(_vm, OID_AUTO, global_no_user_wire_amount, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, &sysctl_global_no_user_wire_amount, "Q", "");
#endif
+#if DEVELOPMENT || DEBUG
+/* These sysyctls are used to test the wired limit. */
+extern unsigned int vm_page_wire_count;
+extern uint32_t vm_lopage_free_count;
+SYSCTL_INT(_vm, OID_AUTO, page_wire_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_wire_count, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, lopage_free_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_lopage_free_count, 0, "");
+#endif /* DEVELOPMENT */
+
extern int vm_map_copy_overwrite_aligned_src_not_internal;
extern int vm_map_copy_overwrite_aligned_src_not_symmetric;
extern int vm_map_copy_overwrite_aligned_src_large;
SYSCTL_PROC(_kern, OID_AUTO, wedge_thread, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, 0, 0, wedge_thread, "I", "wedge this thread so it cannot be cleaned up");
+extern unsigned long
+total_corpses_count(void);
+
+static int
+sysctl_total_corpses_count SYSCTL_HANDLER_ARGS;
+
+static int
+sysctl_total_corpses_count SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ int corpse_count = total_corpses_count();
+ return sysctl_io_opaque(req, &corpse_count, sizeof(int), NULL);
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, total_corpses_count, CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, 0, 0, sysctl_total_corpses_count, "I", "total corpses on the system");
+
static int
sysctl_turnstile_test_prim_lock SYSCTL_HANDLER_ARGS;
static int
printf("%s starting uncontended mutex test with %d iterations\n", __func__, iter);
- offset = snprintf(buffer, buffer_size, "STATS INNER LOOP");
+ offset = scnprintf(buffer, buffer_size, "STATS INNER LOOP");
offset += lck_mtx_test_mtx_uncontended(iter, &buffer[offset], buffer_size - offset);
- offset += snprintf(&buffer[offset], buffer_size - offset, "\nSTATS OUTER LOOP");
+ offset += scnprintf(&buffer[offset], buffer_size - offset, "\nSTATS OUTER LOOP");
offset += lck_mtx_test_mtx_uncontended_loop_time(iter, &buffer[offset], buffer_size - offset);
error = SYSCTL_OUT(req, buffer, offset);
printf("%s starting contended mutex test with %d iterations FULL_CONTENDED\n", __func__, iter);
- offset = snprintf(buffer, buffer_size, "STATS INNER LOOP");
+ offset = scnprintf(buffer, buffer_size, "STATS INNER LOOP");
offset += lck_mtx_test_mtx_contended(iter, &buffer[offset], buffer_size - offset, FULL_CONTENDED);
printf("%s starting contended mutex loop test with %d iterations FULL_CONTENDED\n", __func__, iter);
- offset += snprintf(&buffer[offset], buffer_size - offset, "\nSTATS OUTER LOOP");
+ offset += scnprintf(&buffer[offset], buffer_size - offset, "\nSTATS OUTER LOOP");
offset += lck_mtx_test_mtx_contended_loop_time(iter, &buffer[offset], buffer_size - offset, FULL_CONTENDED);
printf("%s starting contended mutex test with %d iterations HALF_CONTENDED\n", __func__, iter);
- offset += snprintf(&buffer[offset], buffer_size - offset, "STATS INNER LOOP");
+ offset += scnprintf(&buffer[offset], buffer_size - offset, "STATS INNER LOOP");
offset += lck_mtx_test_mtx_contended(iter, &buffer[offset], buffer_size - offset, HALF_CONTENDED);
printf("%s starting contended mutex loop test with %d iterations HALF_CONTENDED\n", __func__, iter);
- offset += snprintf(&buffer[offset], buffer_size - offset, "\nSTATS OUTER LOOP");
+ offset += scnprintf(&buffer[offset], buffer_size - offset, "\nSTATS OUTER LOOP");
offset += lck_mtx_test_mtx_contended_loop_time(iter, &buffer[offset], buffer_size - offset, HALF_CONTENDED);
error = SYSCTL_OUT(req, buffer, offset);
return retval;
}
+int
+vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
+{
+ ssize_t ssize = size;
+ int i;
+
+ i = vsnprintf(buf, size, fmt, args);
+
+ return (i >= ssize) ? (ssize - 1) : i;
+}
+
+int
+scnprintf(char *buf, size_t size, const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, fmt);
+ i = vscnprintf(buf, size, fmt, args);
+ va_end(args);
+
+ return i;
+}
+
static void
snprintf_func(int ch, void *arg)
{
if (totmbufs > m_mbufs) {
totmbufs = m_mbufs;
}
- k = snprintf(c, clen, "%lu/%u mbufs in use:\n", totmbufs, m_mbufs);
+ k = scnprintf(c, clen, "%lu/%u mbufs in use:\n", totmbufs, m_mbufs);
MBUF_DUMP_BUF_CHK();
bzero(&seen, sizeof(seen));
for (mp = mbtypes; mp->mt_name != NULL; mp++) {
if (mbstat.m_mtypes[mp->mt_type] != 0) {
seen[mp->mt_type] = 1;
- k = snprintf(c, clen, "\t%u mbufs allocated to %s\n",
+ k = scnprintf(c, clen, "\t%u mbufs allocated to %s\n",
mbstat.m_mtypes[mp->mt_type], mp->mt_name);
MBUF_DUMP_BUF_CHK();
}
seen[MT_FREE] = 1;
for (i = 0; i < nmbtypes; i++) {
if (!seen[i] && mbstat.m_mtypes[i] != 0) {
- k = snprintf(c, clen, "\t%u mbufs allocated to "
+ k = scnprintf(c, clen, "\t%u mbufs allocated to "
"<mbuf type %d>\n", mbstat.m_mtypes[i], i);
MBUF_DUMP_BUF_CHK();
}
}
if ((m_mbufs - totmbufs) > 0) {
- k = snprintf(c, clen, "\t%lu mbufs allocated to caches\n",
+ k = scnprintf(c, clen, "\t%lu mbufs allocated to caches\n",
m_mbufs - totmbufs);
MBUF_DUMP_BUF_CHK();
}
- k = snprintf(c, clen, "%u/%u mbuf 2KB clusters in use\n"
+ k = scnprintf(c, clen, "%u/%u mbuf 2KB clusters in use\n"
"%u/%u mbuf 4KB clusters in use\n",
(unsigned int)(mbstat.m_clusters - m_clfree),
(unsigned int)mbstat.m_clusters,
MBUF_DUMP_BUF_CHK();
if (njcl > 0) {
- k = snprintf(c, clen, "%u/%u mbuf %uKB clusters in use\n",
+ k = scnprintf(c, clen, "%u/%u mbuf %uKB clusters in use\n",
m_16kclusters - m_16kclfree, m_16kclusters,
njclbytes / 1024);
MBUF_DUMP_BUF_CHK();
u_long totused1 = totused / 100;
totpct = (totused1 * 100) / totmem1;
}
- k = snprintf(c, clen, "%lu KB allocated to network (approx. %lu%% "
+ k = scnprintf(c, clen, "%lu KB allocated to network (approx. %lu%% "
"in use)\n", totmem / 1024, totpct);
MBUF_DUMP_BUF_CHK();
- k = snprintf(c, clen, "%lu KB returned to the system\n",
+ k = scnprintf(c, clen, "%lu KB returned to the system\n",
totreturned / 1024);
MBUF_DUMP_BUF_CHK();
net_update_uptime();
- k = snprintf(c, clen,
+ k = scnprintf(c, clen,
"VM allocation failures: contiguous %u, normal %u, one page %u\n",
mb_kmem_contig_failed, mb_kmem_failed, mb_kmem_one_failed);
MBUF_DUMP_BUF_CHK();
if (mb_kmem_contig_failed_ts || mb_kmem_failed_ts ||
mb_kmem_one_failed_ts) {
- k = snprintf(c, clen,
+ k = scnprintf(c, clen,
"VM allocation failure timestamps: contiguous %llu "
"(size %llu), normal %llu (size %llu), one page %llu "
"(now %llu)\n",
mb_kmem_failed_ts, mb_kmem_failed_size,
mb_kmem_one_failed_ts, net_uptime());
MBUF_DUMP_BUF_CHK();
- k = snprintf(c, clen,
+ k = scnprintf(c, clen,
"VM return codes: ");
MBUF_DUMP_BUF_CHK();
for (i = 0;
i < sizeof(mb_kmem_stats) / sizeof(mb_kmem_stats[0]);
i++) {
- k = snprintf(c, clen, "%s: %u ", mb_kmem_stats_labels[i],
+ k = scnprintf(c, clen, "%s: %u ", mb_kmem_stats_labels[i],
mb_kmem_stats[i]);
MBUF_DUMP_BUF_CHK();
}
- k = snprintf(c, clen, "\n");
+ k = scnprintf(c, clen, "\n");
MBUF_DUMP_BUF_CHK();
}
- k = snprintf(c, clen,
+ k = scnprintf(c, clen,
"worker thread runs: %u, expansions: %llu, cl %llu/%llu, "
"bigcl %llu/%llu, 16k %llu/%llu\n", mbuf_worker_run_cnt,
mb_expand_cnt, mb_expand_cl_cnt, mb_expand_cl_total,
mb_expand_16kcl_total);
MBUF_DUMP_BUF_CHK();
if (mbuf_worker_last_runtime != 0) {
- k = snprintf(c, clen, "worker thread last run time: "
+ k = scnprintf(c, clen, "worker thread last run time: "
"%llu (%llu seconds ago)\n",
mbuf_worker_last_runtime,
net_uptime() - mbuf_worker_last_runtime);
MBUF_DUMP_BUF_CHK();
}
if (mbuf_drain_last_runtime != 0) {
- k = snprintf(c, clen, "drain routine last run time: "
+ k = scnprintf(c, clen, "drain routine last run time: "
"%llu (%llu seconds ago)\n",
mbuf_drain_last_runtime,
net_uptime() - mbuf_drain_last_runtime);
}
#if DEBUG || DEVELOPMENT
- k = snprintf(c, clen, "\nworker thread log:\n%s\n", mbwdog_logging);
+ k = scnprintf(c, clen, "\nworker thread log:\n%s\n", mbwdog_logging);
MBUF_DUMP_BUF_CHK();
#endif
continue;
}
if (printed_banner == false) {
- k = snprintf(c, clen,
+ k = scnprintf(c, clen,
"\nlargest allocation failure backtraces:\n");
MBUF_DUMP_BUF_CHK();
printed_banner = true;
}
- k = snprintf(c, clen, "size %llu: < ", trace->size);
+ k = scnprintf(c, clen, "size %llu: < ", trace->size);
MBUF_DUMP_BUF_CHK();
for (i = 0; i < trace->depth; i++) {
if (mleak_stat->ml_isaddr64) {
- k = snprintf(c, clen, "0x%0llx ",
+ k = scnprintf(c, clen, "0x%0llx ",
(uint64_t)VM_KERNEL_UNSLIDE(
trace->addr[i]));
} else {
- k = snprintf(c, clen,
+ k = scnprintf(c, clen,
"0x%08x ",
(uint32_t)VM_KERNEL_UNSLIDE(
trace->addr[i]));
}
MBUF_DUMP_BUF_CHK();
}
- k = snprintf(c, clen, ">\n");
+ k = scnprintf(c, clen, ">\n");
MBUF_DUMP_BUF_CHK();
}
/* mbuf leak detection statistics */
mleak_update_stats();
- k = snprintf(c, clen, "\nmbuf leak detection table:\n");
+ k = scnprintf(c, clen, "\nmbuf leak detection table:\n");
MBUF_DUMP_BUF_CHK();
- k = snprintf(c, clen, "\ttotal captured: %u (one per %u)\n",
+ k = scnprintf(c, clen, "\ttotal captured: %u (one per %u)\n",
mleak_table.mleak_capture / mleak_table.mleak_sample_factor,
mleak_table.mleak_sample_factor);
MBUF_DUMP_BUF_CHK();
- k = snprintf(c, clen, "\ttotal allocs outstanding: %llu\n",
+ k = scnprintf(c, clen, "\ttotal allocs outstanding: %llu\n",
mleak_table.outstanding_allocs);
MBUF_DUMP_BUF_CHK();
- k = snprintf(c, clen, "\tnew hash recorded: %llu allocs, %llu traces\n",
+ k = scnprintf(c, clen, "\tnew hash recorded: %llu allocs, %llu traces\n",
mleak_table.alloc_recorded, mleak_table.trace_recorded);
MBUF_DUMP_BUF_CHK();
- k = snprintf(c, clen, "\thash collisions: %llu allocs, %llu traces\n",
+ k = scnprintf(c, clen, "\thash collisions: %llu allocs, %llu traces\n",
mleak_table.alloc_collisions, mleak_table.trace_collisions);
MBUF_DUMP_BUF_CHK();
- k = snprintf(c, clen, "\toverwrites: %llu allocs, %llu traces\n",
+ k = scnprintf(c, clen, "\toverwrites: %llu allocs, %llu traces\n",
mleak_table.alloc_overwrites, mleak_table.trace_overwrites);
MBUF_DUMP_BUF_CHK();
- k = snprintf(c, clen, "\tlock conflicts: %llu\n\n",
+ k = scnprintf(c, clen, "\tlock conflicts: %llu\n\n",
mleak_table.total_conflicts);
MBUF_DUMP_BUF_CHK();
- k = snprintf(c, clen, "top %d outstanding traces:\n",
+ k = scnprintf(c, clen, "top %d outstanding traces:\n",
mleak_stat->ml_cnt);
MBUF_DUMP_BUF_CHK();
for (i = 0; i < mleak_stat->ml_cnt; i++) {
mltr = &mleak_stat->ml_trace[i];
- k = snprintf(c, clen, "[%d] %llu outstanding alloc(s), "
+ k = scnprintf(c, clen, "[%d] %llu outstanding alloc(s), "
"%llu hit(s), %llu collision(s)\n", (i + 1),
mltr->mltr_allocs, mltr->mltr_hitcount,
mltr->mltr_collisions);
}
if (mleak_stat->ml_isaddr64) {
- k = snprintf(c, clen, MB_LEAK_HDR_64);
+ k = scnprintf(c, clen, MB_LEAK_HDR_64);
} else {
- k = snprintf(c, clen, MB_LEAK_HDR_32);
+ k = scnprintf(c, clen, MB_LEAK_HDR_32);
}
MBUF_DUMP_BUF_CHK();
for (i = 0; i < MLEAK_STACK_DEPTH; i++) {
- k = snprintf(c, clen, "%2d: ", (i + 1));
+ k = scnprintf(c, clen, "%2d: ", (i + 1));
MBUF_DUMP_BUF_CHK();
for (j = 0; j < mleak_stat->ml_cnt; j++) {
mltr = &mleak_stat->ml_trace[j];
if (i < mltr->mltr_depth) {
if (mleak_stat->ml_isaddr64) {
- k = snprintf(c, clen, "0x%0llx ",
+ k = scnprintf(c, clen, "0x%0llx ",
(uint64_t)VM_KERNEL_UNSLIDE(
mltr->mltr_addr[i]));
} else {
- k = snprintf(c, clen,
+ k = scnprintf(c, clen,
"0x%08x ",
(uint32_t)VM_KERNEL_UNSLIDE(
mltr->mltr_addr[i]));
}
} else {
if (mleak_stat->ml_isaddr64) {
- k = snprintf(c, clen,
+ k = scnprintf(c, clen,
MB_LEAK_SPACING_64);
} else {
- k = snprintf(c, clen,
+ k = scnprintf(c, clen,
MB_LEAK_SPACING_32);
}
}
MBUF_DUMP_BUF_CHK();
}
- k = snprintf(c, clen, "\n");
+ k = scnprintf(c, clen, "\n");
MBUF_DUMP_BUF_CHK();
}
done:
vsnprintf(p, sizeof(p), fmt, ap);
va_end(ap);
microuptime(&now);
- len = snprintf(str, sizeof(str),
+ len = scnprintf(str, sizeof(str),
"\n%ld.%d (%d/%llx) %s:%d %s",
now.tv_sec, now.tv_usec,
current_proc()->p_pid,
} else {
error = (*so->so_proto->pr_usrreqs->pru_connect)
(so, nam, p);
+ if (error != 0) {
+ so->so_state &= ~SS_ISCONNECTING;
+ }
}
}
if (dolock) {
error = (*so->so_proto->pr_usrreqs->pru_connectx)
(so, src, dst, p, ifscope, aid, pcid,
flags, arg, arglen, auio, bytes_written);
+ if (error != 0) {
+ so->so_state &= ~SS_ISCONNECTING;
+ }
}
}
bzero(lock_history_str, sizeof(lock_history_str));
for (i = SO_LCKDBG_MAX - 1; i >= 0; i--) {
- n += snprintf(lock_history_str + n,
+ n += scnprintf(lock_history_str + n,
SO_LOCK_HISTORY_STR_LEN - n, "%p:%p ",
so->lock_lr[(so->next_lock_lr + i) % SO_LCKDBG_MAX],
so->unlock_lr[(so->next_unlock_lr + i) % SO_LCKDBG_MAX]);
}
error = soconnectlock(so, sa, 0);
if (error != 0) {
- so->so_state &= ~SS_ISCONNECTING;
goto out;
}
if ((so->so_state & SS_NBIO) && (so->so_state & SS_ISCONNECTING)) {
error = soconnectxlocked(so, src, dst, p, ifscope,
aid, pcid, 0, NULL, 0, auio, bytes_written);
if (error != 0) {
- so->so_state &= ~SS_ISCONNECTING;
goto out;
}
/*
extern void *memchr(const void *, int, size_t);
extern void url_decode(char *str);
+/*
+ * NOTE: snprintf() returns the full length of the formatted string even if it
+ * couldn't fit in the supplied buffer.
+ * Use scnprintf() if you need the actual number of bytes (minus the \0)
+ */
int snprintf(char *, size_t, const char *, ...) __printflike(3, 4);
+int scnprintf(char *, size_t, const char *, ...) __printflike(3, 4);
/* sprintf() is being deprecated. Please use snprintf() instead. */
int sprintf(char *bufp, const char *, ...) __deprecated __printflike(2, 3);
extern int vprintf(const char *, va_list) __printflike(1, 0);
extern int vsnprintf(char *, size_t, const char *, va_list) __printflike(3, 0);
+extern int vscnprintf(char *, size_t, const char *, va_list) __printflike(3, 0);
#if XNU_KERNEL_PRIVATE
extern int vprintf_log_locked(const char *, va_list, bool addcr) __printflike(1, 0);
bzero((caddr_t) dp, UIO_MX);
- dp->d_namlen = snprintf(dp->d_name, sizeof(dp->d_name),
+ dp->d_namlen = scnprintf(dp->d_name, sizeof(dp->d_name),
"%d", i);
dp->d_reclen = UIO_MX;
dp->d_type = DT_UNKNOWN;
ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_EXCLUSIVE);
VERIFY(ll_addr == NULL || ll_addr->sdl_alen == ifp->if_addrlen);
- namelen = snprintf(workbuf, sizeof(workbuf), "%s",
+ namelen = scnprintf(workbuf, sizeof(workbuf), "%s",
if_name(ifp));
masklen = offsetof(struct sockaddr_dl, sdl_data[0])
+ ((namelen > 0) ? namelen : 0);
result = EINVAL;
break;
}
- *len = snprintf(data, *len, "%s", pcb->ipsec_if_xname) + 1;
+ *len = scnprintf(data, *len, "%s", pcb->ipsec_if_xname) + 1;
}
break;
}
result = EINVAL;
break;
}
- *len = snprintf(data, *len, "%s", pcb->utun_if_xname) + 1;
+ *len = scnprintf(data, *len, "%s", pcb->utun_if_xname) + 1;
}
break;
* NECP - Network Extension Control Policy database
* ------------------------------------------------
* The goal of this module is to allow clients connecting via a
- * kernel control socket to create high-level policy sessions, which
+ * policy file descriptor to create high-level policy sessions, which
* are ingested into low-level kernel policies that control and tag
* traffic at the application, socket, and IP layers.
*
* can specify the sub-order for each policy it creates which will be
* used to further sort the kernel policies.
*
- * Kernel Control Socket --> 1 necp_session --> list of necp_session_policy structs
+ * Policy fd --> 1 necp_session --> list of necp_session_policy structs
*
* ------------------------------------------------
* Kernel Policies
unsigned __pad_bits : 6;
};
-static kern_ctl_ref necp_kctlref;
-static u_int32_t necp_family;
-static OSMallocTag necp_malloc_tag;
static lck_grp_attr_t *necp_kernel_policy_grp_attr = NULL;
static lck_attr_t *necp_kernel_policy_mtx_attr = NULL;
static lck_grp_t *necp_kernel_policy_mtx_grp = NULL;
static struct necp_session *necp_create_session(void);
static void necp_delete_session(struct necp_session *session);
-static necp_policy_id necp_handle_policy_add(struct necp_session *session, u_int32_t message_id, mbuf_t packet,
+static necp_policy_id necp_handle_policy_add(struct necp_session *session,
u_int8_t *tlv_buffer, size_t tlv_buffer_length, int offset, int *error);
-static void necp_handle_policy_get(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
-static void necp_handle_policy_delete(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
-static void necp_handle_policy_apply_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
-static void necp_handle_policy_list_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
-static void necp_handle_policy_delete_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
-static int necp_handle_policy_dump_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet,
- user_addr_t out_buffer, size_t out_buffer_length, int offset);
-static void necp_handle_set_session_priority(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
-static void necp_handle_lock_session_to_proc(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
-static void necp_handle_register_service(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
-static void necp_handle_unregister_service(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
+static int necp_handle_policy_dump_all(user_addr_t out_buffer, size_t out_buffer_length);
#define MAX_RESULT_STRING_LEN 64
static inline const char * necp_get_result_description(char *result_string, necp_kernel_policy_result result, necp_kernel_policy_result_parameter result_parameter);
goto done;
}
- necp_policy_id new_policy_id = necp_handle_policy_add(session, 0, NULL, tlv_buffer, uap->in_buffer_length, 0, &error);
+ necp_policy_id new_policy_id = necp_handle_policy_add(session, tlv_buffer, uap->in_buffer_length, 0, &error);
if (error != 0) {
NECPLOG(LOG_ERR, "necp_session_add_policy failed to add policy (%d)", error);
goto done;
static int
necp_session_dump_all(struct necp_session *session, struct necp_session_action_args *uap, int *retval)
{
+#pragma unused(session)
int error = 0;
if (uap->out_buffer_length == 0 || uap->out_buffer == 0) {
goto done;
}
- error = necp_handle_policy_dump_all(session, 0, NULL, uap->out_buffer, uap->out_buffer_length, 0);
+ error = necp_handle_policy_dump_all(uap->out_buffer, uap->out_buffer_length);
done:
*retval = error;
return error;
return return_value;
}
-// Kernel Control functions
-static errno_t necp_register_control(void);
-static errno_t necp_ctl_connect(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, void **unitinfo);
-static errno_t necp_ctl_disconnect(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo);
-static errno_t necp_ctl_send(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, mbuf_t m, int flags);
-static void necp_ctl_rcvd(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, int flags);
-static errno_t necp_ctl_getopt(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, int opt, void *data, size_t *len);
-static errno_t necp_ctl_setopt(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, int opt, void *data, size_t len);
-
-static bool necp_send_ctl_data(struct necp_session *session, u_int8_t *buffer, size_t buffer_size);
-
struct necp_resolver_key_state {
const struct ccdigest_info *digest_info;
uint8_t key[CCSHA256_OUTPUT_SIZE];
{
errno_t result = 0;
- result = necp_register_control();
- if (result != 0) {
- goto done;
- }
-
necp_kernel_policy_grp_attr = lck_grp_attr_alloc_init();
if (necp_kernel_policy_grp_attr == NULL) {
NECPLOG0(LOG_ERR, "lck_grp_attr_alloc_init failed");
lck_grp_attr_free(necp_route_rule_grp_attr);
necp_route_rule_grp_attr = NULL;
}
- if (necp_kctlref != NULL) {
- ctl_deregister(necp_kctlref);
- necp_kctlref = NULL;
- }
}
return result;
}
-static errno_t
-necp_register_control(void)
-{
- struct kern_ctl_reg kern_ctl;
- errno_t result = 0;
-
- // Create a tag to allocate memory
- necp_malloc_tag = OSMalloc_Tagalloc(NECP_CONTROL_NAME, OSMT_DEFAULT);
-
- // Find a unique value for our interface family
- result = mbuf_tag_id_find(NECP_CONTROL_NAME, &necp_family);
- if (result != 0) {
- NECPLOG(LOG_ERR, "mbuf_tag_id_find_internal failed: %d", result);
- return result;
- }
-
- bzero(&kern_ctl, sizeof(kern_ctl));
- strlcpy(kern_ctl.ctl_name, NECP_CONTROL_NAME, sizeof(kern_ctl.ctl_name));
- kern_ctl.ctl_name[sizeof(kern_ctl.ctl_name) - 1] = 0;
- kern_ctl.ctl_flags = CTL_FLAG_PRIVILEGED; // Require root
- kern_ctl.ctl_sendsize = 64 * 1024;
- kern_ctl.ctl_recvsize = 64 * 1024;
- kern_ctl.ctl_connect = necp_ctl_connect;
- kern_ctl.ctl_disconnect = necp_ctl_disconnect;
- kern_ctl.ctl_send = necp_ctl_send;
- kern_ctl.ctl_rcvd = necp_ctl_rcvd;
- kern_ctl.ctl_setopt = necp_ctl_setopt;
- kern_ctl.ctl_getopt = necp_ctl_getopt;
-
- result = ctl_register(&kern_ctl, &necp_kctlref);
- if (result != 0) {
- NECPLOG(LOG_ERR, "ctl_register failed: %d", result);
- return result;
- }
-
- return 0;
-}
-
static void
necp_post_change_event(struct kev_necp_policies_changed_data *necp_event_data)
{
kev_post_msg(&ev_msg);
}
-static errno_t
-necp_ctl_connect(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, void **unitinfo)
-{
-#pragma unused(kctlref, sac)
- *unitinfo = necp_create_session();
- if (*unitinfo == NULL) {
- // Could not allocate session
- return ENOBUFS;
- }
-
- return 0;
-}
-
-static errno_t
-necp_ctl_disconnect(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo)
-{
-#pragma unused(kctlref, unit)
- struct necp_session *session = (struct necp_session *)unitinfo;
- if (session != NULL) {
- necp_policy_mark_all_for_deletion(session);
- necp_policy_apply_all(session);
- necp_delete_session((struct necp_session *)unitinfo);
- }
-
- return 0;
-}
-
-
-// Message handling
-static int
-necp_packet_find_tlv(mbuf_t packet, int offset, u_int8_t type, int *err, int next)
-{
- size_t cursor = offset;
- int error = 0;
- u_int32_t curr_length;
- u_int8_t curr_type;
-
- *err = 0;
-
- do {
- if (!next) {
- error = mbuf_copydata(packet, cursor, sizeof(curr_type), &curr_type);
- if (error) {
- *err = ENOENT;
- return -1;
- }
- } else {
- next = 0;
- curr_type = NECP_TLV_NIL;
- }
-
- if (curr_type != type) {
- cursor += sizeof(curr_type);
- error = mbuf_copydata(packet, cursor, sizeof(curr_length), &curr_length);
- if (error) {
- *err = error;
- return -1;
- }
- cursor += (sizeof(curr_length) + curr_length);
- }
- } while (curr_type != type);
-
- return cursor;
-}
-
-static int
-necp_packet_get_tlv_at_offset(mbuf_t packet, int tlv_offset, u_int32_t buff_len, void *buff, u_int32_t *value_size)
-{
- int error = 0;
- u_int32_t length;
-
- if (tlv_offset < 0) {
- return EINVAL;
- }
-
- error = mbuf_copydata(packet, tlv_offset + sizeof(u_int8_t), sizeof(length), &length);
- if (error) {
- return error;
- }
-
- u_int32_t total_len = m_length2(packet, NULL);
- if (total_len < (tlv_offset + sizeof(u_int8_t) + sizeof(length) + length)) {
- NECPLOG(LOG_ERR, "Got a bad TLV, length (%u) + offset (%d) < total length (%u)",
- length, (tlv_offset + sizeof(u_int8_t) + sizeof(length)), total_len);
- return EINVAL;
- }
-
- if (value_size != NULL) {
- *value_size = length;
- }
-
- if (buff != NULL && buff_len > 0) {
- u_int32_t to_copy = (length < buff_len) ? length : buff_len;
- error = mbuf_copydata(packet, tlv_offset + sizeof(u_int8_t) + sizeof(length), to_copy, buff);
- if (error) {
- return error;
- }
- }
-
- return 0;
-}
-
-static u_int8_t *
-necp_buffer_write_packet_header(u_int8_t *buffer, u_int8_t packet_type, u_int8_t flags, u_int32_t message_id)
-{
- ((struct necp_packet_header *)(void *)buffer)->packet_type = packet_type;
- ((struct necp_packet_header *)(void *)buffer)->flags = flags;
- ((struct necp_packet_header *)(void *)buffer)->message_id = message_id;
- return buffer + sizeof(struct necp_packet_header);
-}
-
static inline bool
necp_buffer_write_tlv_validate(u_int8_t *cursor, u_int8_t type, u_int32_t length,
u_int8_t *buffer, u_int32_t buffer_length)
}
static int
-necp_find_tlv(mbuf_t packet, u_int8_t *buffer, u_int32_t buffer_length, int offset, u_int8_t type, int *err, int next)
+necp_find_tlv(u_int8_t *buffer, u_int32_t buffer_length, int offset, u_int8_t type, int *err, int next)
{
int cursor = -1;
- if (packet != NULL) {
- cursor = necp_packet_find_tlv(packet, offset, type, err, next);
- } else if (buffer != NULL) {
+ if (buffer != NULL) {
cursor = necp_buffer_find_tlv(buffer, buffer_length, offset, type, err, next);
}
return cursor;
}
static int
-necp_get_tlv_at_offset(mbuf_t packet, u_int8_t *buffer, u_int32_t buffer_length,
+necp_get_tlv_at_offset(u_int8_t *buffer, u_int32_t buffer_length,
int tlv_offset, u_int32_t out_buffer_length, void *out_buffer, u_int32_t *value_size)
{
- if (packet != NULL) {
- // Handle mbuf parsing
- return necp_packet_get_tlv_at_offset(packet, tlv_offset, out_buffer_length, out_buffer, value_size);
- }
-
if (buffer == NULL) {
NECPLOG0(LOG_ERR, "necp_get_tlv_at_offset buffer is NULL");
return EINVAL;
}
static int
-necp_get_tlv(mbuf_t packet, u_int8_t *buffer, u_int32_t buffer_length,
+necp_get_tlv(u_int8_t *buffer, u_int32_t buffer_length,
int offset, u_int8_t type, u_int32_t buff_len, void *buff, u_int32_t *value_size)
{
int error = 0;
- int tlv_offset = necp_find_tlv(packet, buffer, buffer_length, offset, type, &error, 0);
+ int tlv_offset = necp_find_tlv(buffer, buffer_length, offset, type, &error, 0);
if (tlv_offset < 0) {
return error;
}
- return necp_get_tlv_at_offset(packet, buffer, buffer_length, tlv_offset, buff_len, buff, value_size);
-}
-
-static bool
-necp_send_ctl_data(struct necp_session *session, u_int8_t *buffer, size_t buffer_size)
-{
- int error;
-
- if (necp_kctlref == NULL || session == NULL || buffer == NULL || buffer_size == 0) {
- return FALSE;
- }
-
- error = ctl_enqueuedata(necp_kctlref, session->control_unit, buffer, buffer_size, CTL_DATA_EOR);
-
- return error == 0;
-}
-
-static bool
-necp_send_success_response(struct necp_session *session, u_int8_t packet_type, u_int32_t message_id)
-{
- bool success = TRUE;
- u_int8_t *response = NULL;
- u_int8_t *cursor = NULL;
- size_t response_size = sizeof(struct necp_packet_header) + sizeof(u_int8_t) + sizeof(u_int32_t);
- MALLOC(response, u_int8_t *, response_size, M_NECP, M_WAITOK);
- if (response == NULL) {
- return FALSE;
- }
- cursor = response;
- cursor = necp_buffer_write_packet_header(cursor, packet_type, NECP_PACKET_FLAGS_RESPONSE, message_id);
- cursor = necp_buffer_write_tlv(cursor, NECP_TLV_NIL, 0, NULL, response, response_size);
-
- if (!(success = necp_send_ctl_data(session, (u_int8_t *)response, response_size))) {
- NECPLOG0(LOG_ERR, "Failed to send response");
- }
-
- FREE(response, M_NECP);
- return success;
-}
-
-static bool
-necp_send_error_response(struct necp_session *session, u_int8_t packet_type, u_int32_t message_id, u_int32_t error)
-{
- bool success = TRUE;
- u_int8_t *response = NULL;
- u_int8_t *cursor = NULL;
- size_t response_size = sizeof(struct necp_packet_header) + sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(u_int32_t);
- MALLOC(response, u_int8_t *, response_size, M_NECP, M_WAITOK);
- if (response == NULL) {
- return FALSE;
- }
- cursor = response;
- cursor = necp_buffer_write_packet_header(cursor, packet_type, NECP_PACKET_FLAGS_RESPONSE, message_id);
- cursor = necp_buffer_write_tlv(cursor, NECP_TLV_ERROR, sizeof(error), &error, response, response_size);
-
- if (!(success = necp_send_ctl_data(session, (u_int8_t *)response, response_size))) {
- NECPLOG0(LOG_ERR, "Failed to send response");
- }
-
- FREE(response, M_NECP);
- return success;
-}
-
-static bool
-necp_send_policy_id_response(struct necp_session *session, u_int8_t packet_type, u_int32_t message_id, necp_policy_id policy_id)
-{
- bool success = TRUE;
- u_int8_t *response = NULL;
- u_int8_t *cursor = NULL;
- size_t response_size = sizeof(struct necp_packet_header) + sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(u_int32_t);
- MALLOC(response, u_int8_t *, response_size, M_NECP, M_WAITOK);
- if (response == NULL) {
- return FALSE;
- }
- cursor = response;
- cursor = necp_buffer_write_packet_header(cursor, packet_type, NECP_PACKET_FLAGS_RESPONSE, message_id);
- cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_ID, sizeof(policy_id), &policy_id, response, response_size);
-
- if (!(success = necp_send_ctl_data(session, (u_int8_t *)response, response_size))) {
- NECPLOG0(LOG_ERR, "Failed to send response");
- }
-
- FREE(response, M_NECP);
- return success;
-}
-
-static errno_t
-necp_ctl_send(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, mbuf_t packet, int flags)
-{
-#pragma unused(kctlref, unit, flags)
- struct necp_session *session = (struct necp_session *)unitinfo;
- struct necp_packet_header header;
- int error = 0;
-
- if (session == NULL) {
- NECPLOG0(LOG_ERR, "Got a NULL session");
- error = EINVAL;
- goto done;
- }
-
- if (mbuf_pkthdr_len(packet) < sizeof(header)) {
- NECPLOG(LOG_ERR, "Got a bad packet, length (%lu) < sizeof header (%lu)", mbuf_pkthdr_len(packet), sizeof(header));
- error = EINVAL;
- goto done;
- }
-
- error = mbuf_copydata(packet, 0, sizeof(header), &header);
- if (error) {
- NECPLOG(LOG_ERR, "mbuf_copydata failed for the header: %d", error);
- error = ENOBUFS;
- goto done;
- }
-
- if (session->proc_locked) {
- // Verify that the calling process is allowed to send messages
- uuid_t proc_uuid;
- proc_getexecutableuuid(current_proc(), proc_uuid, sizeof(proc_uuid));
- if (uuid_compare(proc_uuid, session->proc_uuid) != 0) {
- necp_send_error_response(session, header.packet_type, header.message_id, NECP_ERROR_INVALID_PROCESS);
- goto done;
- }
- } else {
- // If not locked, update the proc_uuid and proc_pid of the session
- proc_getexecutableuuid(current_proc(), session->proc_uuid, sizeof(session->proc_uuid));
- session->proc_pid = proc_pid(current_proc());
- }
-
- switch (header.packet_type) {
- case NECP_PACKET_TYPE_POLICY_ADD: {
- necp_handle_policy_add(session, header.message_id, packet, NULL, 0, sizeof(header), NULL);
- break;
- }
- case NECP_PACKET_TYPE_POLICY_GET: {
- necp_handle_policy_get(session, header.message_id, packet, sizeof(header));
- break;
- }
- case NECP_PACKET_TYPE_POLICY_DELETE: {
- necp_handle_policy_delete(session, header.message_id, packet, sizeof(header));
- break;
- }
- case NECP_PACKET_TYPE_POLICY_APPLY_ALL: {
- necp_handle_policy_apply_all(session, header.message_id, packet, sizeof(header));
- break;
- }
- case NECP_PACKET_TYPE_POLICY_LIST_ALL: {
- necp_handle_policy_list_all(session, header.message_id, packet, sizeof(header));
- break;
- }
- case NECP_PACKET_TYPE_POLICY_DELETE_ALL: {
- necp_handle_policy_delete_all(session, header.message_id, packet, sizeof(header));
- break;
- }
- case NECP_PACKET_TYPE_POLICY_DUMP_ALL: {
- necp_handle_policy_dump_all(session, header.message_id, packet, 0, 0, sizeof(header));
- break;
- }
- case NECP_PACKET_TYPE_SET_SESSION_PRIORITY: {
- necp_handle_set_session_priority(session, header.message_id, packet, sizeof(header));
- break;
- }
- case NECP_PACKET_TYPE_LOCK_SESSION_TO_PROC: {
- necp_handle_lock_session_to_proc(session, header.message_id, packet, sizeof(header));
- break;
- }
- case NECP_PACKET_TYPE_REGISTER_SERVICE: {
- necp_handle_register_service(session, header.message_id, packet, sizeof(header));
- break;
- }
- case NECP_PACKET_TYPE_UNREGISTER_SERVICE: {
- necp_handle_unregister_service(session, header.message_id, packet, sizeof(header));
- break;
- }
- default: {
- NECPLOG(LOG_ERR, "Received unknown message type %d", header.packet_type);
- necp_send_error_response(session, header.packet_type, header.message_id, NECP_ERROR_UNKNOWN_PACKET_TYPE);
- break;
- }
- }
-
-done:
- mbuf_freem(packet);
- return error;
-}
-
-static void
-necp_ctl_rcvd(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, int flags)
-{
-#pragma unused(kctlref, unit, unitinfo, flags)
- return;
-}
-
-static errno_t
-necp_ctl_getopt(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, int opt, void *data, size_t *len)
-{
-#pragma unused(kctlref, unit, unitinfo, opt, data, len)
- return 0;
-}
-
-static errno_t
-necp_ctl_setopt(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, int opt, void *data, size_t len)
-{
-#pragma unused(kctlref, unit, unitinfo, opt, data, len)
- return 0;
+ return necp_get_tlv_at_offset(buffer, buffer_length, tlv_offset, buff_len, buff, value_size);
}
// Session Management
}
}
-static void
-necp_handle_set_session_priority(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
-{
- int error;
- struct necp_session_policy *policy = NULL;
- struct necp_session_policy *temp_policy = NULL;
- u_int32_t response_error = NECP_ERROR_INTERNAL;
- u_int32_t requested_session_priority = NECP_SESSION_PRIORITY_UNKNOWN;
-
- // Read policy id
- error = necp_get_tlv(packet, NULL, 0, offset, NECP_TLV_SESSION_PRIORITY, sizeof(requested_session_priority), &requested_session_priority, NULL);
- if (error) {
- NECPLOG(LOG_ERR, "Failed to get session priority: %d", error);
- response_error = NECP_ERROR_INVALID_TLV;
- goto fail;
- }
-
- if (session == NULL) {
- NECPLOG0(LOG_ERR, "Failed to find session");
- response_error = NECP_ERROR_INTERNAL;
- goto fail;
- }
-
- // Enforce special session priorities with entitlements
- if (requested_session_priority == NECP_SESSION_PRIORITY_CONTROL ||
- requested_session_priority == NECP_SESSION_PRIORITY_PRIVILEGED_TUNNEL) {
- errno_t cred_result = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES, 0);
- if (cred_result != 0) {
- NECPLOG(LOG_ERR, "Session does not hold necessary entitlement to claim priority level %d", requested_session_priority);
- goto fail;
- }
- }
-
- if (session->session_priority != requested_session_priority) {
- session->session_priority = requested_session_priority;
- session->session_order = necp_allocate_new_session_order(session->session_priority, session->control_unit);
- session->dirty = TRUE;
-
- // Mark all policies as needing updates
- LIST_FOREACH_SAFE(policy, &session->policies, chain, temp_policy) {
- policy->pending_update = TRUE;
- }
- }
-
- necp_send_success_response(session, NECP_PACKET_TYPE_SET_SESSION_PRIORITY, message_id);
- return;
-
-fail:
- necp_send_error_response(session, NECP_PACKET_TYPE_SET_SESSION_PRIORITY, message_id, response_error);
-}
-
-static void
-necp_handle_lock_session_to_proc(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
-{
-#pragma unused(packet, offset)
- // proc_uuid already filled out
- session->proc_locked = TRUE;
- necp_send_success_response(session, NECP_PACKET_TYPE_LOCK_SESSION_TO_PROC, message_id);
-}
-
-static void
-necp_handle_register_service(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
-{
- int error;
- struct necp_service_registration *new_service = NULL;
- u_int32_t response_error = NECP_ERROR_INTERNAL;
- uuid_t service_uuid;
- uuid_clear(service_uuid);
-
- if (session == NULL) {
- NECPLOG0(LOG_ERR, "Failed to find session");
- response_error = NECP_ERROR_INTERNAL;
- goto fail;
- }
-
- // Enforce entitlements
- errno_t cred_result = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES, 0);
- if (cred_result != 0) {
- NECPLOG0(LOG_ERR, "Session does not hold necessary entitlement to register service");
- goto fail;
- }
-
- // Read service uuid
- error = necp_get_tlv(packet, NULL, 0, offset, NECP_TLV_SERVICE_UUID, sizeof(uuid_t), service_uuid, NULL);
- if (error) {
- NECPLOG(LOG_ERR, "Failed to get service UUID: %d", error);
- response_error = NECP_ERROR_INVALID_TLV;
- goto fail;
- }
-
- MALLOC(new_service, struct necp_service_registration *, sizeof(*new_service), M_NECP, M_WAITOK);
- if (new_service == NULL) {
- NECPLOG0(LOG_ERR, "Failed to allocate service registration");
- response_error = NECP_ERROR_INTERNAL;
- goto fail;
- }
-
- lck_rw_lock_exclusive(&necp_kernel_policy_lock);
- memset(new_service, 0, sizeof(*new_service));
- new_service->service_id = necp_create_uuid_service_id_mapping(service_uuid);
- LIST_INSERT_HEAD(&session->services, new_service, session_chain);
- LIST_INSERT_HEAD(&necp_registered_service_list, new_service, kernel_chain);
- lck_rw_done(&necp_kernel_policy_lock);
-
- necp_send_success_response(session, NECP_PACKET_TYPE_REGISTER_SERVICE, message_id);
- return;
-fail:
- necp_send_error_response(session, NECP_PACKET_TYPE_REGISTER_SERVICE, message_id, response_error);
-}
-
-static void
-necp_handle_unregister_service(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
-{
- int error;
- struct necp_service_registration *service = NULL;
- struct necp_service_registration *temp_service = NULL;
- u_int32_t response_error = NECP_ERROR_INTERNAL;
- struct necp_uuid_id_mapping *mapping = NULL;
- uuid_t service_uuid;
- uuid_clear(service_uuid);
-
- if (session == NULL) {
- NECPLOG0(LOG_ERR, "Failed to find session");
- response_error = NECP_ERROR_INTERNAL;
- goto fail;
- }
-
- // Read service uuid
- error = necp_get_tlv(packet, NULL, 0, offset, NECP_TLV_SERVICE_UUID, sizeof(uuid_t), service_uuid, NULL);
- if (error) {
- NECPLOG(LOG_ERR, "Failed to get service UUID: %d", error);
- response_error = NECP_ERROR_INVALID_TLV;
- goto fail;
- }
-
- // Mark remove all matching services for this session
- lck_rw_lock_exclusive(&necp_kernel_policy_lock);
- mapping = necp_uuid_lookup_service_id_locked(service_uuid);
- if (mapping != NULL) {
- LIST_FOREACH_SAFE(service, &session->services, session_chain, temp_service) {
- if (service->service_id == mapping->id) {
- LIST_REMOVE(service, session_chain);
- LIST_REMOVE(service, kernel_chain);
- FREE(service, M_NECP);
- }
- }
- necp_remove_uuid_service_id_mapping(service_uuid);
- }
- lck_rw_done(&necp_kernel_policy_lock);
-
- necp_send_success_response(session, NECP_PACKET_TYPE_UNREGISTER_SERVICE, message_id);
- return;
-fail:
- necp_send_error_response(session, NECP_PACKET_TYPE_UNREGISTER_SERVICE, message_id, response_error);
-}
-
static necp_policy_id
-necp_handle_policy_add(struct necp_session *session, u_int32_t message_id, mbuf_t packet,
+necp_handle_policy_add(struct necp_session *session,
u_int8_t *tlv_buffer, size_t tlv_buffer_length, int offset, int *return_error)
{
bool has_default_condition = FALSE;
u_int32_t policy_result_size = 0;
// Read policy order
- error = necp_get_tlv(packet, tlv_buffer, tlv_buffer_length, offset, NECP_TLV_POLICY_ORDER, sizeof(order), &order, NULL);
+ error = necp_get_tlv(tlv_buffer, tlv_buffer_length, offset, NECP_TLV_POLICY_ORDER, sizeof(order), &order, NULL);
if (error) {
NECPLOG(LOG_ERR, "Failed to get policy order: %d", error);
response_error = NECP_ERROR_INVALID_TLV;
}
// Read policy result
- cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, offset, NECP_TLV_POLICY_RESULT, &error, 0);
+ cursor = necp_find_tlv(tlv_buffer, tlv_buffer_length, offset, NECP_TLV_POLICY_RESULT, &error, 0);
if (error || cursor < 0) {
NECPLOG(LOG_ERR, "Failed to find policy result TLV: %d", error);
response_error = NECP_ERROR_INVALID_TLV;
goto fail;
}
- error = necp_get_tlv_at_offset(packet, tlv_buffer, tlv_buffer_length, cursor, 0, NULL, &policy_result_size);
+ error = necp_get_tlv_at_offset(tlv_buffer, tlv_buffer_length, cursor, 0, NULL, &policy_result_size);
if (error || policy_result_size == 0) {
NECPLOG(LOG_ERR, "Failed to get policy result length: %d", error);
response_error = NECP_ERROR_INVALID_TLV;
response_error = NECP_ERROR_INTERNAL;
goto fail;
}
- error = necp_get_tlv_at_offset(packet, tlv_buffer, tlv_buffer_length, cursor, policy_result_size, policy_result, NULL);
+ error = necp_get_tlv_at_offset(tlv_buffer, tlv_buffer_length, cursor, policy_result_size, policy_result, NULL);
if (error) {
NECPLOG(LOG_ERR, "Failed to get policy result: %d", error);
response_error = NECP_ERROR_POLICY_RESULT_INVALID;
if (necp_policy_result_requires_route_rules(policy_result, policy_result_size)) {
// Read route rules conditions
- for (cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, offset, NECP_TLV_ROUTE_RULE, &error, 0);
+ for (cursor = necp_find_tlv(tlv_buffer, tlv_buffer_length, offset, NECP_TLV_ROUTE_RULE, &error, 0);
cursor >= 0;
- cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, cursor, NECP_TLV_ROUTE_RULE, &error, 1)) {
+ cursor = necp_find_tlv(tlv_buffer, tlv_buffer_length, cursor, NECP_TLV_ROUTE_RULE, &error, 1)) {
u_int32_t route_rule_size = 0;
- necp_get_tlv_at_offset(packet, tlv_buffer, tlv_buffer_length, cursor, 0, NULL, &route_rule_size);
+ necp_get_tlv_at_offset(tlv_buffer, tlv_buffer_length, cursor, 0, NULL, &route_rule_size);
if (os_add_overflow(route_rules_array_size,
(sizeof(u_int8_t) + sizeof(u_int32_t) + route_rule_size),
&route_rules_array_size)) {
}
route_rules_array_cursor = 0;
- for (cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, offset, NECP_TLV_ROUTE_RULE, &error, 0);
+ for (cursor = necp_find_tlv(tlv_buffer, tlv_buffer_length, offset, NECP_TLV_ROUTE_RULE, &error, 0);
cursor >= 0;
- cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, cursor, NECP_TLV_ROUTE_RULE, &error, 1)) {
+ cursor = necp_find_tlv(tlv_buffer, tlv_buffer_length, cursor, NECP_TLV_ROUTE_RULE, &error, 1)) {
u_int8_t route_rule_type = NECP_TLV_ROUTE_RULE;
u_int32_t route_rule_size = 0;
- necp_get_tlv_at_offset(packet, tlv_buffer, tlv_buffer_length, cursor, 0, NULL, &route_rule_size);
+ necp_get_tlv_at_offset(tlv_buffer, tlv_buffer_length, cursor, 0, NULL, &route_rule_size);
if (route_rule_size > 0 &&
(sizeof(route_rule_type) + sizeof(route_rule_size) + route_rule_size) <= (route_rules_array_size - route_rules_array_cursor)) {
// Add type
route_rules_array_cursor += sizeof(route_rule_size);
// Add value
- necp_get_tlv_at_offset(packet, tlv_buffer, tlv_buffer_length, cursor, route_rule_size, (route_rules_array + route_rules_array_cursor), NULL);
+ necp_get_tlv_at_offset(tlv_buffer, tlv_buffer_length, cursor, route_rule_size, (route_rules_array + route_rules_array_cursor), NULL);
if (!necp_policy_route_rule_is_valid((route_rules_array + route_rules_array_cursor), route_rule_size)) {
NECPLOG0(LOG_ERR, "Failed to validate policy route rule");
}
// Read policy conditions
- for (cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, offset, NECP_TLV_POLICY_CONDITION, &error, 0);
+ for (cursor = necp_find_tlv(tlv_buffer, tlv_buffer_length, offset, NECP_TLV_POLICY_CONDITION, &error, 0);
cursor >= 0;
- cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, cursor, NECP_TLV_POLICY_CONDITION, &error, 1)) {
+ cursor = necp_find_tlv(tlv_buffer, tlv_buffer_length, cursor, NECP_TLV_POLICY_CONDITION, &error, 1)) {
u_int32_t condition_size = 0;
- necp_get_tlv_at_offset(packet, tlv_buffer, tlv_buffer_length, cursor, 0, NULL, &condition_size);
+ necp_get_tlv_at_offset(tlv_buffer, tlv_buffer_length, cursor, 0, NULL, &condition_size);
if (condition_size > 0) {
if (os_add_overflow(conditions_array_size,
}
conditions_array_cursor = 0;
- for (cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, offset, NECP_TLV_POLICY_CONDITION, &error, 0);
+ for (cursor = necp_find_tlv(tlv_buffer, tlv_buffer_length, offset, NECP_TLV_POLICY_CONDITION, &error, 0);
cursor >= 0;
- cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, cursor, NECP_TLV_POLICY_CONDITION, &error, 1)) {
+ cursor = necp_find_tlv(tlv_buffer, tlv_buffer_length, cursor, NECP_TLV_POLICY_CONDITION, &error, 1)) {
u_int8_t condition_type = NECP_TLV_POLICY_CONDITION;
u_int32_t condition_size = 0;
- necp_get_tlv_at_offset(packet, tlv_buffer, tlv_buffer_length, cursor, 0, NULL, &condition_size);
+ necp_get_tlv_at_offset(tlv_buffer, tlv_buffer_length, cursor, 0, NULL, &condition_size);
if (condition_size > 0 &&
(sizeof(condition_type) + sizeof(condition_size) + condition_size) <= (conditions_array_size - conditions_array_cursor)) {
// Add type
conditions_array_cursor += sizeof(condition_size);
// Add value
- necp_get_tlv_at_offset(packet, tlv_buffer, tlv_buffer_length, cursor, condition_size, (conditions_array + conditions_array_cursor), NULL);
+ necp_get_tlv_at_offset(tlv_buffer, tlv_buffer_length, cursor, condition_size, (conditions_array + conditions_array_cursor), NULL);
if (!necp_policy_condition_is_valid((conditions_array + conditions_array_cursor), condition_size, necp_policy_result_get_type_from_buffer(policy_result, policy_result_size))) {
NECPLOG0(LOG_ERR, "Failed to validate policy condition");
response_error = NECP_ERROR_POLICY_CONDITIONS_INVALID;
goto fail;
}
- if (packet != NULL) {
- necp_send_policy_id_response(session, NECP_PACKET_TYPE_POLICY_ADD, message_id, policy->local_id);
- }
return policy->local_id;
fail:
FREE(route_rules_array, M_NECP);
}
- if (packet != NULL) {
- necp_send_error_response(session, NECP_PACKET_TYPE_POLICY_ADD, message_id, response_error);
- }
if (return_error != NULL) {
*return_error = necp_get_posix_error_for_necp_error(response_error);
}
return 0;
}
-static void
-necp_handle_policy_get(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
-{
-#pragma unused(offset)
- int error;
- u_int8_t *response = NULL;
- u_int8_t *cursor = NULL;
- u_int32_t response_error = NECP_ERROR_INTERNAL;
- necp_policy_id policy_id = 0;
- u_int32_t order_tlv_size = 0;
- u_int32_t result_tlv_size = 0;
- u_int32_t response_size = 0;
-
- struct necp_session_policy *policy = NULL;
-
- // Read policy id
- error = necp_get_tlv(packet, NULL, 0, offset, NECP_TLV_POLICY_ID, sizeof(policy_id), &policy_id, NULL);
- if (error) {
- NECPLOG(LOG_ERR, "Failed to get policy id: %d", error);
- response_error = NECP_ERROR_INVALID_TLV;
- goto fail;
- }
-
- policy = necp_policy_find(session, policy_id);
- if (policy == NULL || policy->pending_deletion) {
- NECPLOG(LOG_ERR, "Failed to find policy with id %d", policy_id);
- response_error = NECP_ERROR_POLICY_ID_NOT_FOUND;
- goto fail;
- }
-
- order_tlv_size = sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(necp_policy_order);
- result_tlv_size = (policy->result_size ? (sizeof(u_int8_t) + sizeof(u_int32_t) + policy->result_size) : 0);
- response_size = sizeof(struct necp_packet_header) + order_tlv_size + result_tlv_size + policy->conditions_size;
- MALLOC(response, u_int8_t *, response_size, M_NECP, M_WAITOK);
- if (response == NULL) {
- necp_send_error_response(session, NECP_PACKET_TYPE_POLICY_GET, message_id, NECP_ERROR_INTERNAL);
- return;
- }
-
- cursor = response;
- cursor = necp_buffer_write_packet_header(cursor, NECP_PACKET_TYPE_POLICY_GET, NECP_PACKET_FLAGS_RESPONSE, message_id);
- cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_ORDER, sizeof(necp_policy_order), &policy->order, response, response_size);
-
- if (result_tlv_size) {
- cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_RESULT, policy->result_size, &policy->result, response, response_size);
- }
- if (policy->conditions_size) {
- memcpy(((u_int8_t *)(void *)(cursor)), policy->conditions, policy->conditions_size);
- }
-
- if (!necp_send_ctl_data(session, (u_int8_t *)response, response_size)) {
- NECPLOG0(LOG_ERR, "Failed to send response");
- }
-
- FREE(response, M_NECP);
- return;
-
-fail:
- necp_send_error_response(session, NECP_PACKET_TYPE_POLICY_GET, message_id, response_error);
-}
-
-static void
-necp_handle_policy_delete(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
-{
- int error;
- u_int32_t response_error = NECP_ERROR_INTERNAL;
- necp_policy_id policy_id = 0;
-
- struct necp_session_policy *policy = NULL;
-
- // Read policy id
- error = necp_get_tlv(packet, NULL, 0, offset, NECP_TLV_POLICY_ID, sizeof(policy_id), &policy_id, NULL);
- if (error) {
- NECPLOG(LOG_ERR, "Failed to get policy id: %d", error);
- response_error = NECP_ERROR_INVALID_TLV;
- goto fail;
- }
-
- policy = necp_policy_find(session, policy_id);
- if (policy == NULL || policy->pending_deletion) {
- NECPLOG(LOG_ERR, "Failed to find policy with id %d", policy_id);
- response_error = NECP_ERROR_POLICY_ID_NOT_FOUND;
- goto fail;
- }
-
- necp_policy_mark_for_deletion(session, policy);
-
- necp_send_success_response(session, NECP_PACKET_TYPE_POLICY_DELETE, message_id);
- return;
-
-fail:
- necp_send_error_response(session, NECP_PACKET_TYPE_POLICY_DELETE, message_id, response_error);
-}
-
-static void
-necp_handle_policy_apply_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
-{
-#pragma unused(packet, offset)
- necp_policy_apply_all(session);
- necp_send_success_response(session, NECP_PACKET_TYPE_POLICY_APPLY_ALL, message_id);
-}
-
-static void
-necp_handle_policy_list_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
-{
-#pragma unused(packet, offset)
- u_int32_t tlv_size = (sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(u_int32_t));
- u_int32_t response_size = 0;
- u_int8_t *response = NULL;
- u_int8_t *cursor = NULL;
- int num_policies = 0;
- int cur_policy_index = 0;
- struct necp_session_policy *policy;
-
- LIST_FOREACH(policy, &session->policies, chain) {
- if (!policy->pending_deletion) {
- num_policies++;
- }
- }
-
- // Create a response with one Policy ID TLV for each policy
- response_size = sizeof(struct necp_packet_header) + num_policies * tlv_size;
- MALLOC(response, u_int8_t *, response_size, M_NECP, M_WAITOK);
- if (response == NULL) {
- necp_send_error_response(session, NECP_PACKET_TYPE_POLICY_LIST_ALL, message_id, NECP_ERROR_INTERNAL);
- return;
- }
-
- cursor = response;
- cursor = necp_buffer_write_packet_header(cursor, NECP_PACKET_TYPE_POLICY_LIST_ALL, NECP_PACKET_FLAGS_RESPONSE, message_id);
-
- LIST_FOREACH(policy, &session->policies, chain) {
- if (!policy->pending_deletion && cur_policy_index < num_policies) {
- cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_ID, sizeof(u_int32_t), &policy->local_id, response, response_size);
- cur_policy_index++;
- }
- }
-
- if (!necp_send_ctl_data(session, (u_int8_t *)response, response_size)) {
- NECPLOG0(LOG_ERR, "Failed to send response");
- }
-
- FREE(response, M_NECP);
-}
-
-static void
-necp_handle_policy_delete_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
-{
-#pragma unused(packet, offset)
- necp_policy_mark_all_for_deletion(session);
- necp_send_success_response(session, NECP_PACKET_TYPE_POLICY_DELETE_ALL, message_id);
-}
-
static necp_policy_id
necp_policy_get_new_id(struct necp_session *session)
{
* ...
*/
static int
-necp_handle_policy_dump_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet,
- user_addr_t out_buffer, size_t out_buffer_length, int offset)
+necp_handle_policy_dump_all(user_addr_t out_buffer, size_t out_buffer_length)
{
-#pragma unused(offset)
struct necp_kernel_socket_policy *policy = NULL;
int policy_i;
int policy_count = 0;
// UNLOCK
lck_rw_done(&necp_kernel_policy_lock);
- // Send packet
- if (packet != NULL) {
- u_int32_t total_result_length = sizeof(struct necp_packet_header) + total_tlv_len;
-
- // Allow malloc to wait, since the total buffer may be large and we are not holding any locks
- MALLOC(result_buf, u_int8_t *, total_result_length, M_NECP, M_WAITOK | M_ZERO);
- if (result_buf == NULL) {
- NECPLOG(LOG_DEBUG, "Failed to allocate result_buffer (%u bytes)", total_result_length);
- REPORT_ERROR(NECP_ERROR_INTERNAL);
- }
-
- result_buf_cursor = result_buf;
- result_buf_cursor = necp_buffer_write_packet_header(result_buf_cursor, NECP_PACKET_TYPE_POLICY_DUMP_ALL, NECP_PACKET_FLAGS_RESPONSE, message_id);
-
- for (int i = 0; i < policy_count; i++) {
- if (tlv_buffer_pointers[i] != NULL) {
- result_buf_cursor = necp_buffer_write_tlv(result_buf_cursor, NECP_TLV_POLICY_DUMP, tlv_buffer_lengths[i], tlv_buffer_pointers[i], result_buf, total_result_length);
- }
- }
-
- if (!necp_send_ctl_data(session, result_buf, result_buf_cursor - result_buf)) {
- NECPLOG(LOG_ERR, "Failed to send response (%u bytes)", result_buf_cursor - result_buf);
- } else {
- NECPLOG(LOG_ERR, "Sent data worth %u bytes. Total result buffer length was %u bytes", result_buf_cursor - result_buf, total_result_length);
- }
- }
-
// Copy out
if (out_buffer != 0) {
if (out_buffer_length < total_tlv_len + sizeof(u_int32_t)) {
done:
if (error_occured) {
- if (packet != NULL) {
- if (!necp_send_error_response(session, NECP_PACKET_TYPE_POLICY_DUMP_ALL, message_id, response_error)) {
- NECPLOG0(LOG_ERR, "Failed to send error response");
- } else {
- NECPLOG0(LOG_ERR, "Sent error response");
- }
- }
error_code = necp_get_posix_error_for_necp_error(response_error);
}
#include <net/if.h>
/*
- * Name registered by the ipsec kernel control
+ * Name registered by the NECP
*/
#define NECP_CONTROL_NAME "com.apple.net.necp_control"
#include <net/pktsched/pktsched.h>
#include <net/pktsched/pktsched_netem.h>
+/* <rdar://problem/55953523> M8 Perf: Remove norm_dist_table on armv7k (16K wired win) */
+/* compile out netem on platforms where skywalk is not enabled by default */
+#if __LP64__
+#define CONFIG_NETEM 1
+#else
+#define CONFIG_NETEM 0
+#endif
+
+#if CONFIG_NETEM
+
enum {
NETEM_LOG_ERROR = 0,
NETEM_LOG_INFO = 1,
netem_log(NETEM_LOG_INFO, "netem config ret %d", ret);
return ret;
}
+
+#else /* !CONFIG_NETEM */
+
+int
+netem_init(void)
+{
+ return 0;
+}
+
+int
+netem_config(struct netem **ne, const char *name,
+ const struct if_netem_params *p, void *output_handle,
+ int (*output_func)(void *handle, pktsched_pkt_t *pkts, uint32_t n_pkts),
+ uint32_t output_max_batch_size)
+{
+#pragma unused(ne, name, p, output_handle, output_func, output_max_batch_size)
+ printf("%s error %d: unavailable on this platform\n", __func__, ENOTSUP);
+ return ENOTSUP;
+}
+
+void
+__attribute__((noreturn))
+netem_get_params(struct netem *ne, struct if_netem_params *p)
+{
+#pragma unused(ne, p)
+ panic("unexpected netem call");
+}
+
+void
+__attribute__((noreturn))
+netem_destroy(struct netem *ne)
+{
+#pragma unused(ne)
+ panic("unexpected netem call");
+}
+
+int
+netem_enqueue(struct netem *ne, classq_pkt_t *p, boolean_t *pdrop)
+{
+#pragma unused(ne, p, pdrop)
+ panic("unexpected netem call");
+ return 0;
+}
+
+int
+netem_dequeue(struct netem *ne, pktsched_pkt_t *p, boolean_t *ppending)
+{
+#pragma unused(ne, p, ppending)
+ panic("unexpected netem call");
+ return 0;
+}
+#endif /* !CONFIG_NETEM */
if (af == AF_INET) {
bcopy(src, dst, sizeof (struct sockaddr_in));
+ dst->ss_len = sizeof(struct sockaddr_in);
if (pifscope == NULL || ifscope != IFSCOPE_NONE)
sin_set_ifscope(SA(dst), ifscope);
} else {
bcopy(src, dst, sizeof (struct sockaddr_in6));
+ dst->ss_len = sizeof(struct sockaddr_in6);
if (pifscope != NULL &&
IN6_IS_SCOPE_EMBED(&SIN6(dst)->sin6_addr)) {
unsigned int eifscope;
if ((cp + sa->sa_len) > cplim) {
return EINVAL;
}
+ if (sa->sa_len > sizeof(struct sockaddr_storage)) {
+ return EINVAL;
+ }
/*
* there are no more.. quit now
* If there are more bits, they are in error.
FDLOG(LOG_INFO, &nil_pcb, "Nodes count = %lu, child maps count = %lu, bytes_count = %lu",
new_trie.nodes_count, new_trie.child_maps_count, new_trie.bytes_count);
- nodes_mem_size = (sizeof(*new_trie.nodes) * new_trie.nodes_count);
- child_maps_mem_size = (sizeof(*new_trie.child_maps) * CHILD_MAP_SIZE * new_trie.child_maps_count);
- bytes_mem_size = (sizeof(*new_trie.bytes) * new_trie.bytes_count);
+ if (os_mul_overflow(sizeof(*new_trie.nodes), new_trie.nodes_count, &nodes_mem_size) ||
+ os_mul3_overflow(sizeof(*new_trie.child_maps), CHILD_MAP_SIZE, new_trie.child_maps_count, &child_maps_mem_size) ||
+ os_mul_overflow(sizeof(*new_trie.bytes), new_trie.bytes_count, &bytes_mem_size) ||
+ os_add3_overflow(nodes_mem_size, child_maps_mem_size, bytes_mem_size, &trie_memory_size)) {
+ FDLOG0(LOG_ERR, &nil_pcb, "Overflow while computing trie memory sizes");
+ lck_rw_done(&group->lck);
+ return;
+ }
- trie_memory_size = nodes_mem_size + child_maps_mem_size + bytes_mem_size;
if (trie_memory_size > FLOW_DIVERT_MAX_TRIE_MEMORY) {
FDLOG(LOG_ERR, &nil_pcb, "Trie memory size (%lu) is too big (maximum is %u)", trie_memory_size, FLOW_DIVERT_MAX_TRIE_MEMORY);
lck_rw_done(&group->lck);
bzero(msgBuf, msgsize);
bzero(&ev_msg, sizeof(struct kev_msg));
va_start( ap, format );
- loglen = vsnprintf(msgBuf, msgsize, format, ap);
+ loglen = vscnprintf(msgBuf, msgsize, format, ap);
va_end( ap );
ev_msg.vendor_code = KEV_VENDOR_APPLE;
if (f->reserved_1 == IPFW_RULE_INACTIVE) {
break;
}
- len = snprintf(SNPARGS(action2, 0), "Forward to %s",
+ len = scnprintf(SNPARGS(action2, 0), "Forward to %s",
inet_ntop(AF_INET, &sa->sa.sin_addr, ipv4str, sizeof(ipv4str)));
if (sa->sa.sin_port) {
snprintf(SNPARGS(action2, len), ":%d",
offset = ip_off & IP_OFFMASK;
switch (ip->ip_p) {
case IPPROTO_TCP:
- len = snprintf(SNPARGS(proto, 0), "TCP %s",
+ len = scnprintf(SNPARGS(proto, 0), "TCP %s",
inet_ntop(AF_INET, &ip->ip_src, ipv4str, sizeof(ipv4str)));
if (offset == 0) {
snprintf(SNPARGS(proto, len), ":%d %s:%d",
break;
case IPPROTO_UDP:
- len = snprintf(SNPARGS(proto, 0), "UDP %s",
+ len = scnprintf(SNPARGS(proto, 0), "UDP %s",
inet_ntop(AF_INET, &ip->ip_src, ipv4str, sizeof(ipv4str)));
if (offset == 0) {
snprintf(SNPARGS(proto, len), ":%d %s:%d",
case IPPROTO_ICMP:
if (offset == 0) {
- len = snprintf(SNPARGS(proto, 0),
+ len = scnprintf(SNPARGS(proto, 0),
"ICMP:%u.%u ",
icmp->icmp_type, icmp->icmp_code);
} else {
- len = snprintf(SNPARGS(proto, 0), "ICMP ");
+ len = scnprintf(SNPARGS(proto, 0), "ICMP ");
}
- len += snprintf(SNPARGS(proto, len), "%s",
+ len += scnprintf(SNPARGS(proto, len), "%s",
inet_ntop(AF_INET, &ip->ip_src, ipv4str, sizeof(ipv4str)));
snprintf(SNPARGS(proto, len), " %s",
inet_ntop(AF_INET, &ip->ip_dst, ipv4str, sizeof(ipv4str)));
break;
default:
- len = snprintf(SNPARGS(proto, 0), "P:%d %s", ip->ip_p,
+ len = scnprintf(SNPARGS(proto, 0), "P:%d %s", ip->ip_p,
inet_ntop(AF_INET, &ip->ip_src, ipv4str, sizeof(ipv4str)));
snprintf(SNPARGS(proto, len), " %s",
inet_ntop(AF_INET, &ip->ip_dst, ipv4str, sizeof(ipv4str)));
int error = 0;
mp_so = mptetoso(mpte);
- socket_lock_assert_owned(mp_so);
mp_tp = mpte->mpte_mptcb;
+ socket_lock_assert_owned(mp_so);
+
+ if (mp_so->so_flags & SOF_DEFUNCT) {
+ return 0;
+ }
+
VERIFY(!(mpte->mpte_mppcb->mpp_flags & MPP_WUPCALL));
mpte->mpte_mppcb->mpp_flags |= MPP_WUPCALL;
VERIFY(off >= 0);
+ if (m == NULL && (so->so_flags & SOF_DEFUNCT)) {
+ *dsn = 0;
+ *relseq = 0;
+ *data_len = 0;
+ *dss_csum = 0;
+ return;
+ }
+
/*
* In the subflow socket, the DSN sequencing can be discontiguous,
* but the subflow sequence mapping is contiguous. Use the subflow
}
}
- VERIFY(m);
VERIFY(off >= 0);
VERIFY(m->m_pkthdr.mp_rlen <= UINT16_MAX);
uint16_t mdss_data_len;
uint16_t dss_csum;
+ if (so->so_snd.sb_mb == NULL && (so->so_flags & SOF_DEFUNCT)) {
+ return 0;
+ }
+
mptcp_output_getm_dsnmap64(so, off, &mdss_dsn, &mdss_subflow_seq,
&mdss_data_len, &dss_csum);
mdss_subflow_off--;
}
- if (off < mdss_subflow_off) {
- printf("%s off %d mdss_subflow_off %d mdss_subflow_seq %u iss %u suna %u\n", __func__,
- off, mdss_subflow_off, mdss_subflow_seq, mpts->mpts_iss, tp->snd_una);
- }
VERIFY(off >= mdss_subflow_off);
- mptcplog((LOG_DEBUG, "%s dlen %u off %d sub_off %d sub_seq %u iss %u suna %u\n",
- __func__, mdss_data_len, off, mdss_subflow_off, mdss_subflow_seq,
- mpts->mpts_iss, tp->snd_una), MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE);
return mdss_data_len - (off - mdss_subflow_off);
}
TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, " in_pcbinshash failed");
goto drop;
}
+ socket_lock(oso, 0);
#if INET6
if (isipv6) {
/*
inp->inp_options = ip_srcroute();
inp->inp_ip_tos = oinp->inp_ip_tos;
}
- socket_lock(oso, 0);
#if IPSEC
/* copy old policy into new socket's */
if (sotoinpcb(oso)->inp_sp) {
tp->t_rexmtthresh = tcprexmtthresh;
}
- m_freem(m);
-
/*
* If all outstanding data are acked, stop
* retransmit timer, otherwise restart timer
tcp_tfo_rcv_ack(tp, th);
+ m_freem(m);
+
tcp_check_timer_state(tp);
tcp_handle_wakeup(so, read_wakeup, write_wakeup);
sysctl_msec_to_ticks SYSCTL_HANDLER_ARGS
{
#pragma unused(arg2)
- int error, s, tt;
+ int error, temp;
+ long s, tt;
tt = *(int *)arg1;
- if (tt < 0 || tt >= INT_MAX / 1000) {
+ s = tt * 1000 / TCP_RETRANSHZ;
+ if (tt < 0 || s > INT_MAX) {
return EINVAL;
}
- s = tt * 1000 / TCP_RETRANSHZ;
+ temp = (int)s;
- error = sysctl_handle_int(oidp, &s, 0, req);
+ error = sysctl_handle_int(oidp, &temp, 0, req);
if (error || !req->newptr) {
return error;
}
tt = s * TCP_RETRANSHZ / 1000;
- if (tt < 1) {
+ if (tt < 1 || tt > INT_MAX) {
return EINVAL;
}
- *(int *)arg1 = tt;
+ *(int *)arg1 = (int)tt;
SYSCTL_SKMEM_UPDATE_AT_OFFSET(arg2, *(int*)arg1);
return 0;
}
switch (nxt) {
case IPPROTO_TCP:
- len = snprintf(SNPARGS(proto, 0), "TCP [%s]",
+ len = scnprintf(SNPARGS(proto, 0), "TCP [%s]",
ip6_sprintf(&ip6->ip6_src));
if (off > 0) {
- len += snprintf(SNPARGS(proto, len), ":%d ",
+ len += scnprintf(SNPARGS(proto, len), ":%d ",
ntohs(tcp6->th_sport));
} else {
- len += snprintf(SNPARGS(proto, len), " ");
+ len += scnprintf(SNPARGS(proto, len), " ");
}
- len += snprintf(SNPARGS(proto, len), "[%s]",
+ len += scnprintf(SNPARGS(proto, len), "[%s]",
ip6_sprintf(&ip6->ip6_dst));
if (off > 0) {
- snprintf(SNPARGS(proto, len), ":%d",
+ scnprintf(SNPARGS(proto, len), ":%d",
ntohs(tcp6->th_dport));
}
break;
case IPPROTO_UDP:
- len = snprintf(SNPARGS(proto, 0), "UDP [%s]",
+ len = scnprintf(SNPARGS(proto, 0), "UDP [%s]",
ip6_sprintf(&ip6->ip6_src));
if (off > 0) {
- len += snprintf(SNPARGS(proto, len), ":%d ",
+ len += scnprintf(SNPARGS(proto, len), ":%d ",
ntohs(udp->uh_sport));
} else {
- len += snprintf(SNPARGS(proto, len), " ");
+ len += scnprintf(SNPARGS(proto, len), " ");
}
- len += snprintf(SNPARGS(proto, len), "[%s]",
+ len += scnprintf(SNPARGS(proto, len), "[%s]",
ip6_sprintf(&ip6->ip6_dst));
if (off > 0) {
- snprintf(SNPARGS(proto, len), ":%d",
+ scnprintf(SNPARGS(proto, len), ":%d",
ntohs(udp->uh_dport));
}
break;
case IPPROTO_ICMPV6:
if (off > 0) {
- len = snprintf(SNPARGS(proto, 0), "IPV6-ICMP:%u.%u ",
+ len = scnprintf(SNPARGS(proto, 0), "IPV6-ICMP:%u.%u ",
icmp6->icmp6_type, icmp6->icmp6_code);
} else {
- len = snprintf(SNPARGS(proto, 0), "IPV6-ICMP ");
+ len = scnprintf(SNPARGS(proto, 0), "IPV6-ICMP ");
}
- len += snprintf(SNPARGS(proto, len), "[%s]",
+ len += scnprintf(SNPARGS(proto, len), "[%s]",
ip6_sprintf(&ip6->ip6_src));
- snprintf(SNPARGS(proto, len), " [%s]",
+ scnprintf(SNPARGS(proto, len), " [%s]",
ip6_sprintf(&ip6->ip6_dst));
break;
default:
- len = snprintf(SNPARGS(proto, 0), "P:%d [%s]", nxt,
+ len = scnprintf(SNPARGS(proto, 0), "P:%d [%s]", nxt,
ip6_sprintf(&ip6->ip6_src));
- snprintf(SNPARGS(proto, len), " [%s]",
+ scnprintf(SNPARGS(proto, len), " [%s]",
ip6_sprintf(&ip6->ip6_dst));
break;
}
if (*fsl->nl_servers[idx.nli_serv]->ns_addresses[idx.nli_addr]) {
name = fsl->nl_servers[idx.nli_serv]->ns_addresses[idx.nli_addr];
}
- cnt = snprintf(p, size, "<%s>:", name);
+ cnt = scnprintf(p, size, "<%s>:", name);
} else {
- cnt = snprintf(p, size, "%s:", name);
+ cnt = scnprintf(p, size, "%s:", name);
}
p += cnt;
size -= cnt;
}
/* append each server path component */
for (i = 0; (size > 0) && (i < (int)fsl->nl_path.np_compcount); i++) {
- cnt = snprintf(p, size, "/%s", fsl->nl_path.np_components[i]);
+ cnt = scnprintf(p, size, "/%s", fsl->nl_path.np_components[i]);
p += cnt;
size -= cnt;
}
return true;
}
- if ((req_flags & TRP_POLICY) && cur_trp.trp_pol != cur_trp.trp_pol) {
+ if ((req_flags & TRP_POLICY) && req_trp.trp_pol != cur_trp.trp_pol) {
return true;
}
}
}
-
done:
if (qos_rv && voucher_rv) {
/* Both failed, give that a unique error. */
*/
wq->wq_creator = uth = workq_pop_idle_thread(wq, UT_WORKQ_OVERCOMMIT,
&needs_wakeup);
- if (workq_thread_needs_priority_change(req, uth)) {
- workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
- }
+ /* Always reset the priorities on the newly chosen creator */
+ workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
workq_turnstile_update_inheritor(wq, uth->uu_thread,
TURNSTILE_INHERITOR_THREAD);
WQ_TRACE_WQ(TRACE_wq_creator_select | DBG_FUNC_NONE,
WORKQ_SET_SELF_FIXEDPRIORITY_FLAG = 0x04,
WORKQ_SET_SELF_TIMESHARE_FLAG = 0x08,
WORKQ_SET_SELF_WQ_KEVENT_UNBIND = 0x10,
- WORKQ_SET_SELF_ALTERNATE_AMX = 0x20,
};
void workq_proc_suspended(struct proc *p);
/* MAC flags used by F_ADDFILESIGS_* */
#define MAC_VNODE_CHECK_DYLD_SIM 0x1 /* tells the MAC framework that dyld-sim is being loaded */
+#define CLEAR_LV_ENTITLEMENT "com.apple.private.security.clear-library-validation"
+
/* csops operations */
#define CS_OPS_STATUS 0 /* return status */
#define CS_OPS_MARKINVALID 1 /* invalidate process */
#define CS_OPS_CLEARINSTALLER 12 /* clear INSTALLER flag */
#define CS_OPS_CLEARPLATFORM 13 /* clear platform binary status (DEVELOPMENT-only) */
#define CS_OPS_TEAMID 14 /* get team id */
+#define CS_OPS_CLEAR_LV 15 /* clear the library validation flag */
#define CS_MAX_TEAMID_LEN 64
uint16_t dcr_action;
} dtrace_cred_t;
+typedef struct dtrace_format {
+ uint64_t dtf_refcount;
+ char dtf_str[];
+} dtrace_format_t;
+
+#define DTRACE_FORMAT_SIZE(fmt) (strlen(fmt->dtf_str) + 1 + sizeof(dtrace_format_t))
+
/*
* DTrace Consumer State
*
char dts_speculates; /* boolean: has speculations */
char dts_destructive; /* boolean: has dest. actions */
int dts_nformats; /* number of formats */
- char **dts_formats; /* format string array */
+ dtrace_format_t **dts_formats; /* format string array */
dtrace_optval_t dts_options[DTRACEOPT_MAX]; /* options */
dtrace_cred_t dts_cred; /* credentials */
size_t dts_nretained; /* number of retained enabs */
#endif /* DTRACE_ERRDEBUG */
-
typedef struct dtrace_string dtrace_string_t;
typedef struct dtrace_string {
char *end = p + sizeof(persona->pna_desc) - 1;
*end = 0;
- p += snprintf(p, end - p, "%s/%d:%d",
+ p += scnprintf(p, end - p, "%s/%d:%d",
persona->pna_login,
kauth_cred_getuid(persona->pna_cred),
kauth_cred_getgid(persona->pna_cred));
kret = copyout(&version[0], user_addr, length_to_copy);
RET_IF_OP_FAIL;
- length_to_copy = MIN((uint32_t)(strlen(PE_boot_args()) + 1), OSVERSIZE);
+ length_to_copy = MIN((uint32_t)(strlen(PE_boot_args()) + 1), BOOT_LINE_LENGTH);
kret = kcdata_get_memory_addr(&kcd, STACKSHOT_KCTYPE_BOOTARGS, length_to_copy, &user_addr);
RET_IF_OP_FAIL;
kret = copyout(PE_boot_args(), user_addr, length_to_copy);
--- /dev/null
+/*
+ * Copyright (c) 2018 Apple Inc. All rights reserved.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
+ */
+
+#include <sys/sysctl.h>
+
* int Index of special port
* ipc_port_t New special port
*
- * Returns: kern_return_t see task_set_special_port()
+ * Returns: kern_return_t see task_set_special_port_internal()
*/
kern_return_t
vfs_context_set_special_port(vfs_context_t ctx, int which, ipc_port_t port)
task = get_threadtask(ctx->vc_thread);
}
- return task_set_special_port(task, which, port);
+ return task_set_special_port_internal(task, which, port);
}
/*
* we can release the icoount which we used to get our usecount.
*/
proc_fdlock(p);
+ if (!(fdp->fd_flags & FD_CHROOT)) {
+ ndp->ni_rootdir = rootvnode;
+ } else {
+ ndp->ni_rootdir = fdp->fd_rdir;
+ }
- if ((ndp->ni_rootdir = fdp->fd_rdir) == NULLVP) {
+ if (!ndp->ni_rootdir) {
if (!(fdp->fd_flags & FD_CHROOT)) {
- ndp->ni_rootdir = rootvnode;
+ proc_fdunlock(p);
+ printf("rootvnode is not set\n");
} else {
proc_fdunlock(p);
/* This should be a panic */
- printf("proc is chrooted but does not have a root directory set\n");
- error = ENOENT;
- goto error_out;
+ printf("fdp->fd_rdir is not set\n");
}
+ error = ENOENT;
+ goto error_out;
}
/*
VATTR_INIT(&va);
cmode = ((uap->mode & ~fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT;
- VATTR_SET(&va, va_mode, cmode);
+ VATTR_SET(&va, va_mode, cmode & ACCESSPERMS);
if (uap->uid != KAUTH_UID_NONE) {
VATTR_SET(&va, va_uid, uap->uid);
}
/* We found the Finder Info entry. */
ainfop->finderinfo = &filehdr->entries[i];
- /*
- * Is the Finder Info "empty" (all zeroes)? If so,
- * we'll pretend like the Finder Info extended attribute
- * does not exist.
+ /* At this point check_and_swap_apple_double_header() call above
+ * verified that all apple double entires are valid:
+ * they point somewhere within the file.
*
- * Note: we have to make sure the Finder Info is
- * contained within the buffer we have already read,
- * to avoid accidentally accessing a bogus address.
- * If it is outside the buffer, we just assume the
- * Finder Info is non-empty.
+ * Now for finderinfo make sure that the fixed portion
+ * is within the buffer we read in.
*/
- if (ainfop->finderinfo->offset + FINDERINFOSIZE <= ainfop->rawsize &&
- bcmp((u_int8_t*)ainfop->filehdr + ainfop->finderinfo->offset, emptyfinfo, sizeof(emptyfinfo)) == 0) {
- ainfop->emptyfinderinfo = 1;
+ if (((ainfop->finderinfo->offset + FINDERINFOSIZE) > ainfop->finderinfo->offset) &&
+ ((ainfop->finderinfo->offset + FINDERINFOSIZE) <= ainfop->rawsize)) {
+ /*
+ * Is the Finder Info "empty" (all zeroes)? If so,
+ * we'll pretend like the Finder Info extended attribute
+ * does not exist.
+ */
+ if (bcmp((u_int8_t*)ainfop->filehdr + ainfop->finderinfo->offset, emptyfinfo, sizeof(emptyfinfo)) == 0) {
+ ainfop->emptyfinderinfo = 1;
+ }
+ } else {
+ error = ENOATTR;
+ goto bail;
}
}
if (filehdr->entries[i].type == AD_RESOURCE) {
__ZN24IOBufferMemoryDescriptor15initWithOptionsEmjjP4task
__ZN24IOBufferMemoryDescriptor17getVirtualSegmentEmPm
__ZN24IOBufferMemoryDescriptor17inTaskWithOptionsEP4taskmjj
+__ZN24IOBufferMemoryDescriptor17inTaskWithOptionsEP4taskmjjjj
__ZN24IOBufferMemoryDescriptor20initWithPhysicalMaskEP4taskmyyy
__ZN24IOBufferMemoryDescriptor22inTaskWithPhysicalMaskEP4taskmyy
__ZN24IOBufferMemoryDescriptor9setLengthEj
__ZN24IOBufferMemoryDescriptor12withCapacityEmjb
__ZN24IOBufferMemoryDescriptor14getBytesNoCopyEmm
__ZN24IOBufferMemoryDescriptor17inTaskWithOptionsEP4taskjmm
+__ZN24IOBufferMemoryDescriptor17inTaskWithOptionsEP4taskjmmjj
__ZN24IOBufferMemoryDescriptor20initWithPhysicalMaskEP4taskjyyy
__ZN24IOBufferMemoryDescriptor22inTaskWithPhysicalMaskEP4taskjyy
__ZN24IOBufferMemoryDescriptor9setLengthEm
__ZN24IOBufferMemoryDescriptor12withCapacityEmjb
__ZN24IOBufferMemoryDescriptor14getBytesNoCopyEmm
__ZN24IOBufferMemoryDescriptor17inTaskWithOptionsEP4taskjmm
+__ZN24IOBufferMemoryDescriptor17inTaskWithOptionsEP4taskjmmjj
__ZN24IOBufferMemoryDescriptor20initWithPhysicalMaskEP4taskjyyy
__ZN24IOBufferMemoryDescriptor22inTaskWithPhysicalMaskEP4taskjyy
__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor0Ev
_sha1_loop:_SHA1Update
_sha1_result:_SHA1Final_r
_snprintf
+_scnprintf
_sscanf
_strcasecmp
_strchr
_version_variant
_vprintf
_vsnprintf
+_vscnprintf
_vsscanf
_zError
_zlibVersion
-19.0.0
+19.2.0
# The first line of this file contains the master version number for the kernel.
# All other instances of the kernel version in xnu are derived from this file.
#define _IOBUFFERMEMORYDESCRIPTOR_INTASKWITHOPTIONS_ 1
#define _IOBUFFERMEMORYDESCRIPTOR_HOSTPHYSICALLYCONTIGUOUS_ 1
+#define IOBUFFERMEMORYDESCRIPTOR_SUPPORTS_INTASKWITHOPTIONS_TAGS 1
/*!
* @class IOBufferMemoryDescriptor
* @abstract Provides a simple memory descriptor that allocates its own buffer memory.
vm_size_t capacity,
vm_offset_t alignment = 1);
+/*! @function inTaskWithOptions
+ * @abstract Creates a memory buffer with memory descriptor for that buffer.
+ * @discussion Added in Mac OS X 10.2, this method allocates a memory buffer with a given size and alignment in the task's address space specified, and returns a memory descriptor instance representing the memory. It is recommended that memory allocated for I/O or sharing via mapping be created via IOBufferMemoryDescriptor. Options passed with the request specify the kind of memory to be allocated - pageablity and sharing are specified with option bits. This function may block and so should not be called from interrupt level or while a simple lock is held.
+ * @param inTask The task the buffer will be allocated in.
+ * @param options Options for the allocation:<br>
+ * kIODirectionOut, kIODirectionIn - set the direction of the I/O transfer.<br>
+ * kIOMemoryPhysicallyContiguous - pass to request memory be physically contiguous. This option is heavily discouraged. The request may fail if memory is fragmented, may cause large amounts of paging activity, and may take a very long time to execute.<br>
+ * kIOMemoryPageable - pass to request memory be non-wired - the default for kernel allocated memory is wired.<br>
+ * kIOMemoryPurgeable - pass to request memory that may later have its purgeable state set with IOMemoryDescriptor::setPurgeable. Only supported for kIOMemoryPageable allocations.<br>
+ * kIOMemoryKernelUserShared - pass to request memory that will be mapped into both the kernel and client applications.<br>
+ * kIOMapInhibitCache - allocate memory with inhibited cache setting. <br>
+ * kIOMapWriteThruCache - allocate memory with writethru cache setting. <br>
+ * kIOMapCopybackCache - allocate memory with copyback cache setting. <br>
+ * kIOMapWriteCombineCache - allocate memory with writecombined cache setting.
+ * @param capacity The number of bytes to allocate.
+ * @param alignment The minimum required alignment of the buffer in bytes - 1 is the default for no required alignment. For example, pass 256 to get memory allocated at an address with bits 0-7 zero.
+ * @param kernTag The kernel memory tag
+ * @param userTag The user memory tag
+ * @result Returns an instance of class IOBufferMemoryDescriptor to be released by the caller, which will free the memory desriptor and associated buffer. */
+
+ static IOBufferMemoryDescriptor * inTaskWithOptions(
+ task_t inTask,
+ IOOptionBits options,
+ vm_size_t capacity,
+ vm_offset_t alignment,
+ uint32_t kernTag,
+ uint32_t userTag);
+
/*! @function inTaskWithPhysicalMask
* @abstract Creates a memory buffer with memory descriptor for that buffer.
* @discussion Added in Mac OS X 10.5, this method allocates a memory buffer with a given size and alignment in the task's address space specified, and returns a memory descriptor instance representing the memory. It is recommended that memory allocated for I/O or sharing via mapping be created via IOBufferMemoryDescriptor. Options passed with the request specify the kind of memory to be allocated - pageablity and sharing are specified with option bits. This function may block and so should not be called from interrupt level or while a simple lock is held.
return me;
}
+IOBufferMemoryDescriptor *
+IOBufferMemoryDescriptor::inTaskWithOptions(
+ task_t inTask,
+ IOOptionBits options,
+ vm_size_t capacity,
+ vm_offset_t alignment,
+ uint32_t kernTag,
+ uint32_t userTag)
+{
+ IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
+
+ if (me) {
+ me->setVMTags(kernTag, userTag);
+
+ if (!me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) {
+ me->release();
+ me = NULL;
+ }
+ }
+ return me;
+}
+
IOBufferMemoryDescriptor *
IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
task_t inTask,
}
_acceptSystemWakeEvents = (_systemWakeEventsArray != NULL);
#if !(defined(RC_HIDE_N144) || defined(RC_HIDE_N146))
- if (!(kIOPMWakeEventAOTExitFlags & _aotPendingFlags))
+ if (!(_aotNow && (kIOPMWakeEventAOTExitFlags & _aotPendingFlags)))
#endif /* !(defined(RC_HIDE_N144) || defined(RC_HIDE_N146)) */
{
gWakeReasonString[0] = '\0';
#endif /* CONFIG_MACF */
-/* Routine io_registry_entry_get_properties */
+/* Routine io_registry_entry_get_properties_bin_buf */
kern_return_t
-is_io_registry_entry_get_properties_bin(
+is_io_registry_entry_get_properties_bin_buf(
io_object_t registry_entry,
+ mach_vm_address_t buf,
+ mach_vm_size_t *bufsize,
io_buf_ptr_t *properties,
mach_msg_type_number_t *propertiesCnt)
{
if (kIOReturnSuccess == err) {
len = s->getLength();
- *propertiesCnt = len;
- err = copyoutkdata(s->text(), len, properties);
+ if (buf && bufsize && len <= *bufsize) {
+ *bufsize = len;
+ *propertiesCnt = 0;
+ *properties = nullptr;
+ if (copyout(s->text(), buf, len)) {
+ err = kIOReturnVMError;
+ } else {
+ err = kIOReturnSuccess;
+ }
+ } else {
+ if (bufsize) {
+ *bufsize = 0;
+ }
+ *propertiesCnt = len;
+ err = copyoutkdata( s->text(), len, properties );
+ }
}
s->release();
return err;
}
-/* Routine io_registry_entry_get_property_bin */
+/* Routine io_registry_entry_get_properties_bin */
kern_return_t
-is_io_registry_entry_get_property_bin(
+is_io_registry_entry_get_properties_bin(
+ io_object_t registry_entry,
+ io_buf_ptr_t *properties,
+ mach_msg_type_number_t *propertiesCnt)
+{
+ return is_io_registry_entry_get_properties_bin_buf(registry_entry,
+ 0, NULL, properties, propertiesCnt);
+}
+
+/* Routine io_registry_entry_get_property_bin_buf */
+kern_return_t
+is_io_registry_entry_get_property_bin_buf(
io_object_t registry_entry,
io_name_t plane,
io_name_t property_name,
uint32_t options,
+ mach_vm_address_t buf,
+ mach_vm_size_t *bufsize,
io_buf_ptr_t *properties,
mach_msg_type_number_t *propertiesCnt )
{
if (obj->serialize( s )) {
len = s->getLength();
- *propertiesCnt = len;
- err = copyoutkdata( s->text(), len, properties );
+ if (buf && bufsize && len <= *bufsize) {
+ *bufsize = len;
+ *propertiesCnt = 0;
+ *properties = nullptr;
+ if (copyout(s->text(), buf, len)) {
+ err = kIOReturnVMError;
+ } else {
+ err = kIOReturnSuccess;
+ }
+ } else {
+ if (bufsize) {
+ *bufsize = 0;
+ }
+ *propertiesCnt = len;
+ err = copyoutkdata( s->text(), len, properties );
+ }
} else {
err = kIOReturnUnsupported;
}
return err;
}
+/* Routine io_registry_entry_get_property_bin */
+kern_return_t
+is_io_registry_entry_get_property_bin(
+ io_object_t registry_entry,
+ io_name_t plane,
+ io_name_t property_name,
+ uint32_t options,
+ io_buf_ptr_t *properties,
+ mach_msg_type_number_t *propertiesCnt )
+{
+ return is_io_registry_entry_get_property_bin_buf(registry_entry, plane,
+ property_name, options, 0, NULL, properties, propertiesCnt);
+}
+
/* Routine io_registry_entry_set_properties */
kern_return_t
return TRUE;
}
- cpos = snprintf(buffer, bufferLength, "%u", (uint32_t)vers_major);
+ cpos = scnprintf(buffer, bufferLength, "%u", (uint32_t)vers_major);
/* Always include the minor version; it just looks weird without.
*/
buffer[cpos] = '.';
cpos++;
- cpos += snprintf(buffer + cpos, bufferLength - cpos, "%u", (uint32_t)vers_minor);
+ cpos += scnprintf(buffer + cpos, bufferLength - cpos, "%u", (uint32_t)vers_minor);
/* The revision is displayed only if nonzero.
*/
if (vers_revision) {
buffer[cpos] = '.';
cpos++;
- cpos += snprintf(buffer + cpos, bufferLength - cpos, "%u",
+ cpos += scnprintf(buffer + cpos, bufferLength - cpos, "%u",
(uint32_t)vers_revision);
}
* it can be tested at build-time and not require rev-locked submissions of xnu
* and AppleImage4.
*/
-#define IMG4_INTERFACE_VERSION (3u)
+#define IMG4_INTERFACE_VERSION (4u)
/*!
* @typedef img4_init_t
* @typedef img4_payload_init_with_vnode_4xnu_t
* A type describing the {@link img4_payload_init_with_vnode_4xnu} function.
*/
-typedef errno_t (*img4_payload_init_with_vnode_4xnu_t)(
+typedef errno_t (*const img4_payload_init_with_vnode_4xnu_t)(
img4_payload_t *i4p,
img4_tag_t tag,
vnode_t vn,
img4_payload_flags_t flags
);
+/*!
+ * @typedef img4_environment_init_identity_t
+ * A type describing the {@link img4_environment_init_identity} function.
+ */
+typedef errno_t (*const img4_environment_init_identity_t)(
+ img4_environment_t *i4e,
+ size_t len,
+ const img4_identity_t *i4id
+ );
+
/*!
* @typedef img4_interface_t
* A structure describing the interface to the AppleImage4 kext.
*
* @field i4if_v3.nonce_domain_cryptex
* The {@link IMG4_NONCE_DOMAIN_CRYPTEX} global.
+ *
+ * @field i4if_v4.environment_init_identity
+ * A pointer to the {@link img4_environment_init_identity} function.
*/
typedef struct _img4_interface {
const img4_nonce_domain_t *nonce_domain_pdi;
const img4_nonce_domain_t *nonce_domain_cryptex;
} i4if_v3;
- void *__reserved[15];
+ struct {
+ const img4_environment_init_identity_t environment_init_identity;
+ } i4if_v4;
+ void *__reserved[14];
} img4_interface_t;
__BEGIN_DECLS
tp += strlen(tp);
break;
}
- tp += snprintf(tp, sizeof(tmp), "%x", words[i]);
+ tp += scnprintf(tp, sizeof(tmp), "%x", words[i]);
}
/* Was it a trailing run of 0x00's? */
if (best.base != -1 && (best.base + best.len) ==
T_LOG("Doing os_log of %llu TESTLOG msgs for fn " ident, count); \
for (uint64_t i = 0; i < count; i++) \
{ \
- datalen = snprintf(databuffer, sizeof(databuffer), TESTOSLOGFMT(ident), uniqid, i + 1, count); \
+ datalen = scnprintf(databuffer, sizeof(databuffer), TESTOSLOGFMT(ident), uniqid, i + 1, count); \
checksum = crc32(0, databuffer, datalen); \
callout_f(OS_LOG_DEFAULT, TESTOSLOG(ident), checksum, uniqid, i + 1, count); \
/*T_LOG(TESTOSLOG(ident), checksum, uniqid, i + 1, count);*/ \
T_ASSERT_NE_UINT(0, uniqid, "random number should not be zero");
T_ASSERT_NE_ULLONG(0, a, "absolute time should not be zero");
- datalen = snprintf(databuffer, sizeof(databuffer), TESTOSLOGFMT("printf_only"), uniqid, seqno, total_seqno);
+ datalen = scnprintf(databuffer, sizeof(databuffer), TESTOSLOGFMT("printf_only"), uniqid, seqno, total_seqno);
checksum = crc32(0, databuffer, datalen);
printf(TESTOSLOG("printf_only") "mat%llu\n", checksum, uniqid, seqno, total_seqno, a);
seqno += 1;
- datalen = snprintf(databuffer, sizeof(databuffer), TESTOSLOGFMT("printf_only"), uniqid, seqno, total_seqno);
+ datalen = scnprintf(databuffer, sizeof(databuffer), TESTOSLOGFMT("printf_only"), uniqid, seqno, total_seqno);
checksum = crc32(0, databuffer, datalen);
printf(TESTOSLOG("printf_only") "mat%llu\n", checksum, uniqid, seqno, total_seqno, a);
- datalen = snprintf(databuffer, sizeof(databuffer), "kernel^0^test^printf_only#mat%llu", a);
+ datalen = scnprintf(databuffer, sizeof(databuffer), "kernel^0^test^printf_only#mat%llu", a);
match_count = find_pattern_in_buffer(databuffer, datalen, total_seqno);
T_EXPECT_EQ_UINT(match_count, 2, "verify printf_only goes to systemlog buffer");
total_msg = oslog_p_total_msgcount;
saved_msg = oslog_p_saved_msgcount;
dropped_msg = oslog_p_dropped_msgcount;
- datalen = snprintf(databuffer, sizeof(databuffer), TESTOSLOGFMT("oslog_info"), uniqid, seqno, total_seqno);
+ datalen = scnprintf(databuffer, sizeof(databuffer), TESTOSLOGFMT("oslog_info"), uniqid, seqno, total_seqno);
checksum = crc32(0, databuffer, datalen);
os_log_info(log_handle, TESTOSLOG("oslog_info") "mat%llu", checksum, uniqid, seqno, total_seqno, a);
T_EXPECT_GE_UINT((oslog_p_total_msgcount - total_msg), 1, "total message count in buffer");
- datalen = snprintf(databuffer, sizeof(databuffer), "kernel^0^test^oslog_info#mat%llu", a);
+ datalen = scnprintf(databuffer, sizeof(databuffer), "kernel^0^test^oslog_info#mat%llu", a);
match_count = find_pattern_in_buffer(databuffer, datalen, total_seqno);
T_EXPECT_EQ_UINT(match_count, 1, "verify oslog_info does not go to systemlog buffer");
/* Any cleanup for our pushed context should go here */
}
-
void
DebuggerCall(
unsigned int reason,
/* TODO: decide what to do if no debugger config */
#endif
}
+
+boolean_t
+bootloader_valid_page(ppnum_t ppn)
+{
+ return pmap_bootloader_page(ppn);
+}
uint64_t addr;
uint64_t len;
#define PMAP_IO_RANGE_STRONG_SYNC (1UL << 31) // Strong DSB required for pages in this range
+ #define PMAP_IO_RANGE_CARVEOUT (1UL << 30) // Corresponds to memory carved out by bootloader
uint32_t wimg; // lower 16 bits treated as pp_attr_t, upper 16 bits contain additional mapping flags
uint32_t signature; // 4CC
} __attribute__((packed)) pmap_io_range_t;
PMAP_SUPPORT_PROTOTYPES(
kern_return_t,
- mapping_replenish, (void), MAPPING_REPLENISH_INDEX);
+ mapping_replenish, (uint32_t kern_target_count, uint32_t user_target_count), MAPPING_REPLENISH_INDEX);
PMAP_SUPPORT_PROTOTYPES(
boolean_t,
thread_t mapping_replenish_thread;
event_t mapping_replenish_event;
-event_t pmap_user_pv_throttle_event;
volatile uint32_t mappingrecurse = 0;
-uint64_t pmap_pv_throttle_stat;
-uint64_t pmap_pv_throttled_waiters;
-
unsigned pmap_mapping_thread_wakeups;
-unsigned pmap_kernel_reserve_replenish_stat MARK_AS_PMAP_DATA;
-unsigned pmap_user_reserve_replenish_stat MARK_AS_PMAP_DATA;
+unsigned pmap_reserve_replenish_stat MARK_AS_PMAP_DATA;
unsigned pmap_kern_reserve_alloc_stat MARK_AS_PMAP_DATA;
static inline void PV_ALLOC(pv_entry_t **pv_ep);
static inline void PV_KERN_ALLOC(pv_entry_t **pv_e);
-static inline void PV_FREE_LIST(pv_entry_t *pv_eh, pv_entry_t *pv_et, int pv_cnt);
-static inline void PV_KERN_FREE_LIST(pv_entry_t *pv_eh, pv_entry_t *pv_et, int pv_cnt);
-
-static inline void pmap_pv_throttle(pmap_t p);
+static inline void PV_FREE_LIST(pv_entry_t *pv_eh, pv_entry_t *pv_et, int pv_cnt, uint32_t kern_target);
static boolean_t
pv_alloc(
pv_cnt++;
pv_e++;
}
- PV_KERN_FREE_LIST(pv_eh, pv_et, pv_cnt);
+ PV_FREE_LIST(pv_eh, pv_et, pv_cnt, pv_kern_low_water_mark);
if (pmap != NULL) {
PMAP_LOCK(pmap);
}
} else {
UNLOCK_PVH(pai);
PMAP_UNLOCK(pmap);
- pmap_pv_throttle(pmap);
- {
- pv_entry_t *pv_e;
- pv_entry_t *pv_eh;
- pv_entry_t *pv_et;
- int pv_cnt;
- unsigned j;
- pmap_paddr_t pa;
- kern_return_t ret;
- ret = pmap_pages_alloc(&pa, PAGE_SIZE, 0);
+ pv_entry_t *pv_e;
+ pv_entry_t *pv_eh;
+ pv_entry_t *pv_et;
+ int pv_cnt;
+ unsigned j;
+ pmap_paddr_t pa;
+ kern_return_t ret;
- if (ret != KERN_SUCCESS) {
- panic("%s: failed to alloc page, ret=%d, "
- "pmap=%p, pai=%u, pvepp=%p",
- __FUNCTION__, ret,
- pmap, pai, pvepp);
- }
+ ret = pmap_pages_alloc(&pa, PAGE_SIZE, 0);
- pv_page_count++;
+ if (ret != KERN_SUCCESS) {
+ panic("%s: failed to alloc page, ret=%d, "
+ "pmap=%p, pai=%u, pvepp=%p",
+ __FUNCTION__, ret,
+ pmap, pai, pvepp);
+ }
- pv_e = (pv_entry_t *)phystokv(pa);
- pv_cnt = 0;
- pv_eh = pv_et = PV_ENTRY_NULL;
- *pvepp = pv_e;
- pv_e++;
+ pv_page_count++;
- for (j = 1; j < (PAGE_SIZE / sizeof(pv_entry_t)); j++) {
- pv_e->pve_next = pv_eh;
- pv_eh = pv_e;
+ pv_e = (pv_entry_t *)phystokv(pa);
+ pv_cnt = 0;
+ pv_eh = pv_et = PV_ENTRY_NULL;
+ *pvepp = pv_e;
+ pv_e++;
- if (pv_et == PV_ENTRY_NULL) {
- pv_et = pv_e;
- }
- pv_cnt++;
- pv_e++;
+ for (j = 1; j < (PAGE_SIZE / sizeof(pv_entry_t)); j++) {
+ pv_e->pve_next = pv_eh;
+ pv_eh = pv_e;
+
+ if (pv_et == PV_ENTRY_NULL) {
+ pv_et = pv_e;
}
- PV_FREE_LIST(pv_eh, pv_et, pv_cnt);
+ pv_cnt++;
+ pv_e++;
}
+
+ PV_FREE_LIST(pv_eh, pv_et, pv_cnt, pv_kern_low_water_mark);
+
PMAP_LOCK(pmap);
LOCK_PVH(pai);
return FALSE;
pv_free(
pv_entry_t *pvep)
{
- PV_FREE_LIST(pvep, pvep, 1);
+ PV_FREE_LIST(pvep, pvep, 1, pv_kern_low_water_mark);
}
static void
pv_entry_t *pvetp,
unsigned int cnt)
{
- PV_FREE_LIST(pvehp, pvetp, cnt);
+ PV_FREE_LIST(pvehp, pvetp, cnt, pv_kern_low_water_mark);
}
static inline void
PV_ALLOC(pv_entry_t **pv_ep)
{
assert(*pv_ep == PV_ENTRY_NULL);
+ if (pv_kern_free_count < pv_kern_low_water_mark) {
+ /*
+ * If the kernel reserved pool is low, let non-kernel mappings wait for a page
+ * from the VM.
+ */
+ return;
+ }
pmap_simple_lock(&pv_free_list_lock);
- /*
- * If the kernel reserved pool is low, let non-kernel mappings allocate
- * synchronously, possibly subject to a throttle.
- */
- if ((pv_kern_free_count >= pv_kern_low_water_mark) && ((*pv_ep = pv_free_list) != 0)) {
+
+ if ((*pv_ep = pv_free_list) != 0) {
pv_free_list = (pv_entry_t *)(*pv_ep)->pve_next;
(*pv_ep)->pve_next = PV_ENTRY_NULL;
pv_free_count--;
}
static inline void
-PV_FREE_LIST(pv_entry_t *pv_eh, pv_entry_t *pv_et, int pv_cnt)
+PV_FREE_LIST(pv_entry_t *pv_eh, pv_entry_t *pv_et, int pv_cnt, uint32_t kern_target)
{
- pmap_simple_lock(&pv_free_list_lock);
- pv_et->pve_next = (pv_entry_t *)pv_free_list;
- pv_free_list = pv_eh;
- pv_free_count += pv_cnt;
- pmap_simple_unlock(&pv_free_list_lock);
+ bool use_kernel_list = false;
+ pmap_simple_lock(&pv_kern_free_list_lock);
+ if (pv_kern_free_count < kern_target) {
+ pv_et->pve_next = pv_kern_free_list;
+ pv_kern_free_list = pv_eh;
+ pv_kern_free_count += pv_cnt;
+ use_kernel_list = true;
+ }
+ pmap_simple_unlock(&pv_kern_free_list_lock);
+
+ if (!use_kernel_list) {
+ pmap_simple_lock(&pv_free_list_lock);
+ pv_et->pve_next = (pv_entry_t *)pv_free_list;
+ pv_free_list = pv_eh;
+ pv_free_count += pv_cnt;
+ pmap_simple_unlock(&pv_free_list_lock);
+ }
}
static inline void
pmap_simple_unlock(&pv_kern_free_list_lock);
}
-static inline void
-PV_KERN_FREE_LIST(pv_entry_t *pv_eh, pv_entry_t *pv_et, int pv_cnt)
-{
- pmap_simple_lock(&pv_kern_free_list_lock);
- pv_et->pve_next = pv_kern_free_list;
- pv_kern_free_list = pv_eh;
- pv_kern_free_count += pv_cnt;
- pmap_simple_unlock(&pv_kern_free_list_lock);
-}
-
-static inline void
-pmap_pv_throttle(__unused pmap_t p)
-{
- assert(p != kernel_pmap);
- /* Apply throttle on non-kernel mappings */
- if (pv_kern_free_count < (pv_kern_low_water_mark / 2)) {
- pmap_pv_throttle_stat++;
- /* This doesn't need to be strictly accurate, merely a hint
- * to eliminate the timeout when the reserve is replenished.
- */
- pmap_pv_throttled_waiters++;
- assert_wait_timeout(&pmap_user_pv_throttle_event, THREAD_UNINT, 1, 1000 * NSEC_PER_USEC);
- thread_block(THREAD_CONTINUE_NULL);
- }
-}
-
/*
* Creates a target number of free pv_entry_t objects for the kernel free list
* and the general free list.
MARK_AS_PMAP_TEXT static kern_return_t
mapping_free_prime_internal(void)
{
- unsigned j;
- pmap_paddr_t pa;
- kern_return_t ret;
- pv_entry_t *pv_e;
- pv_entry_t *pv_eh;
- pv_entry_t *pv_et;
- int pv_cnt;
- int alloc_options = 0;
- int needed_pv_cnt = 0;
- int target_pv_free_cnt = 0;
-
SECURITY_READ_ONLY_LATE(static boolean_t) mapping_free_prime_internal_called = FALSE;
SECURITY_READ_ONLY_LATE(static boolean_t) mapping_free_prime_internal_done = FALSE;
pv_alloc_chunk = PV_ALLOC_CHUNK_INITIAL;
}
- pv_cnt = 0;
- pv_eh = pv_et = PV_ENTRY_NULL;
- target_pv_free_cnt = PV_ALLOC_INITIAL_TARGET;
-
- /*
- * We don't take the lock to read pv_free_count, as we should not be
- * invoking this from a multithreaded context.
- */
- needed_pv_cnt = target_pv_free_cnt - pv_free_count;
-
- if (needed_pv_cnt > target_pv_free_cnt) {
- needed_pv_cnt = 0;
- }
-
- while (pv_cnt < needed_pv_cnt) {
- ret = pmap_pages_alloc(&pa, PAGE_SIZE, alloc_options);
-
- assert(ret == KERN_SUCCESS);
-
- pv_page_count++;
-
- pv_e = (pv_entry_t *)phystokv(pa);
-
- for (j = 0; j < (PAGE_SIZE / sizeof(pv_entry_t)); j++) {
- pv_e->pve_next = pv_eh;
- pv_eh = pv_e;
-
- if (pv_et == PV_ENTRY_NULL) {
- pv_et = pv_e;
- }
- pv_cnt++;
- pv_e++;
- }
- }
-
- if (pv_cnt) {
- PV_FREE_LIST(pv_eh, pv_et, pv_cnt);
- }
-
- pv_cnt = 0;
- pv_eh = pv_et = PV_ENTRY_NULL;
- target_pv_free_cnt = PV_KERN_ALLOC_INITIAL_TARGET;
-
- /*
- * We don't take the lock to read pv_kern_free_count, as we should not
- * be invoking this from a multithreaded context.
- */
- needed_pv_cnt = target_pv_free_cnt - pv_kern_free_count;
-
- if (needed_pv_cnt > target_pv_free_cnt) {
- needed_pv_cnt = 0;
- }
-
- while (pv_cnt < needed_pv_cnt) {
- ret = pmap_pages_alloc(&pa, PAGE_SIZE, alloc_options);
-
- assert(ret == KERN_SUCCESS);
- pv_page_count++;
-
- pv_e = (pv_entry_t *)phystokv(pa);
-
- for (j = 0; j < (PAGE_SIZE / sizeof(pv_entry_t)); j++) {
- pv_e->pve_next = pv_eh;
- pv_eh = pv_e;
-
- if (pv_et == PV_ENTRY_NULL) {
- pv_et = pv_e;
- }
- pv_cnt++;
- pv_e++;
- }
- }
-
- if (pv_cnt) {
- PV_KERN_FREE_LIST(pv_eh, pv_et, pv_cnt);
- }
-
- mapping_free_prime_internal_done = TRUE;
- return KERN_SUCCESS;
+ return mapping_replenish_internal(PV_KERN_ALLOC_INITIAL_TARGET, PV_ALLOC_INITIAL_TARGET);
}
void
* Fills the kernel and general PV free lists back up to their low watermarks.
*/
MARK_AS_PMAP_TEXT static kern_return_t
-mapping_replenish_internal(void)
+mapping_replenish_internal(uint32_t kern_target_count, uint32_t user_target_count)
{
pv_entry_t *pv_e;
pv_entry_t *pv_eh;
pmap_paddr_t pa;
kern_return_t ret = KERN_SUCCESS;
- while (pv_kern_free_count < pv_kern_low_water_mark) {
+ while ((pv_free_count < user_target_count) || (pv_kern_free_count < kern_target_count)) {
pv_cnt = 0;
pv_eh = pv_et = PV_ENTRY_NULL;
pv_cnt++;
pv_e++;
}
- pmap_kernel_reserve_replenish_stat += pv_cnt;
- PV_KERN_FREE_LIST(pv_eh, pv_et, pv_cnt);
- }
-
- while (pv_free_count < pv_low_water_mark) {
- pv_cnt = 0;
- pv_eh = pv_et = PV_ENTRY_NULL;
-
- ret = pmap_pages_alloc(&pa, PAGE_SIZE, 0);
- assert(ret == KERN_SUCCESS);
-
- pv_page_count++;
-
- pv_e = (pv_entry_t *)phystokv(pa);
-
- for (j = 0; j < (PAGE_SIZE / sizeof(pv_entry_t)); j++) {
- pv_e->pve_next = pv_eh;
- pv_eh = pv_e;
-
- if (pv_et == PV_ENTRY_NULL) {
- pv_et = pv_e;
- }
- pv_cnt++;
- pv_e++;
- }
- pmap_user_reserve_replenish_stat += pv_cnt;
- PV_FREE_LIST(pv_eh, pv_et, pv_cnt);
+ pmap_reserve_replenish_stat += pv_cnt;
+ PV_FREE_LIST(pv_eh, pv_et, pv_cnt, kern_target_count);
}
return ret;
current_thread()->options |= TH_OPT_VMPRIV;
for (;;) {
- kr = mapping_replenish_internal();
+ kr = mapping_replenish_internal(pv_kern_low_water_mark, pv_low_water_mark);
if (kr != KERN_SUCCESS) {
panic("%s: failed, kr=%d", __FUNCTION__, kr);
}
- /*
- * Wake threads throttled while the kernel reserve was being replenished.
- */
- if (pmap_pv_throttled_waiters) {
- pmap_pv_throttled_waiters = 0;
- thread_wakeup(&pmap_user_pv_throttle_event);
- }
-
/* Check if the kernel pool has been depleted since the
* first pass, to reduce refill latency.
*/
return pa_valid(ptoa(pn));
}
+boolean_t
+pmap_bootloader_page(
+ ppnum_t pn)
+{
+ pmap_paddr_t paddr = ptoa(pn);
+
+ if (pa_valid(paddr)) {
+ return FALSE;
+ }
+ pmap_io_range_t *io_rgn = pmap_find_io_attr(paddr);
+ return (io_rgn != NULL) && (io_rgn->wimg & PMAP_IO_RANGE_CARVEOUT);
+}
+
MARK_AS_PMAP_TEXT static boolean_t
pmap_is_empty_internal(
pmap_t pmap,
uint64_t *, int *, int *, int *);
extern boolean_t pmap_valid_page(ppnum_t pn);
+extern boolean_t pmap_bootloader_page(ppnum_t pn);
#define MACHINE_PMAP_IS_EMPTY 1
extern boolean_t pmap_is_empty(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end);
#ifdef __x86_64__
uint32_t lock_timeout_ticks = UINT32_MAX;
#else
- uint32_t lock_timeout_ticks = LockTimeOut;
+ uint32_t lock_timeout_ticks = LockTimeOut * 2; // 250ms is not enough, 500 is just right
#endif
mp_disable_preemption();
out exists : boolean_t
);
+routine io_registry_entry_get_properties_bin_buf(
+ registry_entry : io_object_t;
+ in buf : mach_vm_address_t;
+ inout bufsize : mach_vm_size_t;
+ out properties : io_buf_ptr_t, physicalcopy
+ );
+
+routine io_registry_entry_get_property_bin_buf(
+ registry_entry : io_object_t;
+ in plane : io_name_t;
+ in property_name : io_name_t;
+ in options : uint32_t;
+ in buf : mach_vm_address_t;
+ inout bufsize : mach_vm_size_t;
+ out properties : io_buf_ptr_t, physicalcopy
+ );
+
#endif /* IOKIT */
/* vim: set ft=c : */
#include <mach/port.h>
#if PRIVATE
-#define IOKIT_SERVER_VERSION 20190423
+#define IOKIT_SERVER_VERSION 20190926
#endif
/* Returns TRUE if a page belongs to the EFI Runtime Services (code or data) */
boolean_t
-efi_valid_page(ppnum_t ppn)
+bootloader_valid_page(ppnum_t ppn)
{
boot_args *args = (boot_args *)PE_state.bootArgs;
ppnum_t pstart = args->efiRuntimeServicesPageStart;
}
static void
-physmap_init(uint8_t phys_random_L3)
+physmap_init(uint8_t phys_random_L3, uint64_t *new_physmap_base, uint64_t *new_physmap_max)
{
pt_entry_t *l3pte;
int pml4_index, i;
| INTEL_PTE_WRITE;
}
- physmap_base = KVADDR(kernPhysPML4Index, phys_random_L3, 0, 0);
+ *new_physmap_base = KVADDR(kernPhysPML4Index, phys_random_L3, 0, 0);
/*
* physAddr contains the last-mapped physical address, so that's what we
* add to physmap_base to derive the ending VA for the physmap.
*/
- physmap_max = physmap_base + physAddr;
+ *new_physmap_max = *new_physmap_base + physAddr;
- DBG("Physical address map base: 0x%qx\n", physmap_base);
+ DBG("Physical address map base: 0x%qx\n", *new_physmap_base);
for (i = kernPhysPML4Index; i < (kernPhysPML4Index + kernPhysPML4EntryCount); i++) {
DBG("Physical map idlepml4[%d]: 0x%llx\n", i, IdlePML4[i]);
}
Idle_PTs_init(void)
{
uint64_t rand64;
+ uint64_t new_physmap_base, new_physmap_max;
/* Allocate the "idle" kernel page tables: */
KPTphys = ALLOCPAGES(NKPT); /* level 1 */
* two 8-bit entropy values needed for address randomization.
*/
rand64 = early_random();
- physmap_init(rand64 & 0xFF);
+ physmap_init(rand64 & 0xFF, &new_physmap_base, &new_physmap_max);
doublemap_init((rand64 >> 8) & 0xFF);
idt64_remap();
postcode(VSTART_SET_CR3);
- // Switch to the page tables..
+ /*
+ * Switch to the page tables. We set physmap_base and physmap_max just
+ * before switching to the new page tables to avoid someone calling
+ * kprintf() or otherwise using physical memory in between.
+ * This is needed because kprintf() writes to physical memory using
+ * ml_phys_read_data and PHYSMAP_PTOV, which requires physmap_base to be
+ * set correctly.
+ */
+ physmap_base = new_physmap_base;
+ physmap_max = new_physmap_max;
set_cr3_raw((uintptr_t)ID_MAP_VTOP(IdlePML4));
}
boolean_t is_boot_cpu = !(boot_args_start == 0);
int cpu = 0;
uint32_t lphysfree;
+#if DEBUG
+ uint64_t gsbase;
+#endif
+
postcode(VSTART_ENTRY);
set_cr3_raw((uintptr_t)ID_MAP_VTOP(IdlePML4));
/* Find our logical cpu number */
cpu = lapic_to_cpu[(LAPIC_READ(ID) >> LAPIC_ID_SHIFT) & LAPIC_ID_MASK];
- DBG("CPU: %d, GSBASE initial value: 0x%llx\n", cpu, rdmsr64(MSR_IA32_GS_BASE));
+#if DEBUG
+ gsbase = rdmsr64(MSR_IA32_GS_BASE);
+#endif
cpu_desc_load(cpu_datap(cpu));
+ DBG("CPU: %d, GSBASE initial value: 0x%llx\n", cpu, gsbase);
}
early_boot = 0;
lck_mod_init();
+ printf_init(); /* Init this in case we need debugger */
+
/*
* Initialize the timer callout world
*/
postcode(CPU_INIT_D);
- printf_init(); /* Init this in case we need debugger */
panic_init(); /* Init this in case we need debugger */
/* setup debugging output if one has been chosen */
if (ppn != 0)
{
if (((vcur < debug_start) || (vcur >= debug_end))
- && !(EFI_VALID_PAGE(ppn) || pmap_valid_page(ppn))
+ && !(pmap_valid_page(ppn) || bootloader_valid_page(ppn))
#if defined(XNU_TARGET_OS_BRIDGE)
// include the macOS panic region if it's mapped
&& ((vcur < macos_panic_start) || (vcur >= macos_panic_end))
int kern_dump_seek_to_next_file(void *kdp_core_out_varss, uint64_t next_file_offset);
-extern boolean_t efi_valid_page(ppnum_t ppn);
-#if defined(__x86_64__)
-#define EFI_VALID_PAGE(x) efi_valid_page(x)
-#elif defined(__arm__) || defined(__arm64__)
-#define EFI_VALID_PAGE(x) (FALSE)
-#endif /* defined (__x86_64__) */
+extern boolean_t bootloader_valid_page(ppnum_t ppn);
#endif /* PRIVATE */
CS_HASH_MAX_SIZE = 48, /* max size of the hash we'll support */
/*
- * Currently only to support Legacy VPN plugins,
+ * Currently only to support Legacy VPN plugins, and Mac App Store
* but intended to replace all the various platform code, dev code etc. bits.
*/
CS_SIGNER_TYPE_UNKNOWN = 0,
CS_SIGNER_TYPE_LEGACYVPN = 5,
+ CS_SIGNER_TYPE_MAC_APP_STORE = 6,
};
#define KERNEL_HAVE_CS_CODEDIRECTORY 1
#include <security/mac_mach_internal.h>
+#if CONFIG_CSR
+#include <sys/csr.h>
+#endif
+
#if CONFIG_EMBEDDED && !SECURE_KERNEL
extern int cs_relax_platform_task_ports;
#endif
* KERN_INVALID_ARGUMENT The task is null.
* KERN_FAILURE The task/space is dead.
* KERN_INVALID_ARGUMENT Invalid special port.
- * KERN_NO_ACCESS Attempted overwrite of seatbelt port.
+ * KERN_NO_ACCESS Restricted access to set port.
*/
kern_return_t
int which,
ipc_port_t port)
{
- ipc_port_t *whichp;
- ipc_port_t old;
-
if (task == TASK_NULL) {
return KERN_INVALID_ARGUMENT;
}
return KERN_NO_ACCESS;
}
+ switch (which) {
+ case TASK_KERNEL_PORT:
+ case TASK_HOST_PORT:
+#if CONFIG_CSR
+ if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) == 0) {
+ /*
+ * Only allow setting of task-self / task-host
+ * special ports from user-space when SIP is
+ * disabled (for Mach-on-Mach emulation).
+ */
+ break;
+ }
+#endif
+ return KERN_NO_ACCESS;
+ default:
+ break;
+ }
+
+ return task_set_special_port_internal(task, which, port);
+}
+
+/*
+ * Routine: task_set_special_port_internal
+ * Purpose:
+ * Changes one of the task's special ports,
+ * setting it to the supplied send right.
+ * Conditions:
+ * Nothing locked. If successful, consumes
+ * the supplied send right.
+ * Returns:
+ * KERN_SUCCESS Changed the special port.
+ * KERN_INVALID_ARGUMENT The task is null.
+ * KERN_FAILURE The task/space is dead.
+ * KERN_INVALID_ARGUMENT Invalid special port.
+ * KERN_NO_ACCESS Restricted access to overwrite port.
+ */
+
+kern_return_t
+task_set_special_port_internal(
+ task_t task,
+ int which,
+ ipc_port_t port)
+{
+ ipc_port_t *whichp;
+ ipc_port_t old;
+
+ if (task == TASK_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
switch (which) {
case TASK_KERNEL_PORT:
whichp = &task->itk_sself;
return KERN_FAILURE;
}
- /* do not allow overwrite of seatbelt or task access ports */
- if ((TASK_SEATBELT_PORT == which || TASK_ACCESS_PORT == which)
- && IP_VALID(*whichp)) {
- itk_unlock(task);
- return KERN_NO_ACCESS;
+ /* Never allow overwrite of seatbelt, or task access ports */
+ switch (which) {
+ case TASK_SEATBELT_PORT:
+ case TASK_ACCESS_PORT:
+ if (IP_VALID(*whichp)) {
+ itk_unlock(task);
+ return KERN_NO_ACCESS;
+ }
+ break;
+ default:
+ break;
}
old = *whichp;
return KERN_SUCCESS;
}
-
/*
* Routine: mach_ports_register [kernel call]
* Purpose:
kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, STACKSHOT_KCTYPE_OSVERSION, length_to_copy, &out_addr));
stackshot_strlcpy((char*)out_addr, &version[0], length_to_copy);
- length_to_copy = MIN((uint32_t)(strlen(PE_boot_args()) + 1), OSVERSIZE);
+ length_to_copy = MIN((uint32_t)(strlen(PE_boot_args()) + 1), BOOT_LINE_LENGTH);
kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, STACKSHOT_KCTYPE_BOOTARGS, length_to_copy, &out_addr));
stackshot_strlcpy((char*)out_addr, PE_boot_args(), length_to_copy);
task_t leader = TASK_NULL;
jcs->jcs_id = coalition_id(coal);
jcs->jcs_flags = 0;
+ jcs->jcs_thread_group = 0;
if (coalition_term_requested(coal)) {
jcs->jcs_flags |= kCoalitionTermRequested;
extern void printf_init(void);
extern int snprintf(char *, size_t, const char *, ...) __printflike(3, 4);
+extern int scnprintf(char *, size_t, const char *, ...) __printflike(3, 4);
extern void log(int level, char *fmt, ...);
/* initialize exceptions */
exception_init();
+#if CONFIG_SCHED_SFI
+ kernel_bootstrap_log("sfi_init");
+ sfi_init();
+#endif
+
/*
* Create a kernel thread to execute the kernel bootstrap.
*/
arm_vm_prot_finalize(PE_state.bootArgs);
#endif
-#if CONFIG_SCHED_SFI
- kernel_bootstrap_log("sfi_init");
- sfi_init();
-#endif
-
/*
* Initialize the globals used for permuting kernel
* addresses that may be exported to userland as tokens
ipc_task_reset(task);
/* Remove the naked send right for task port, needed to arm no sender notification */
- task_set_special_port(task, TASK_KERNEL_PORT, IPC_PORT_NULL);
+ task_set_special_port_internal(task, TASK_KERNEL_PORT, IPC_PORT_NULL);
ipc_task_enable(task);
task_unlock(task);
task_lock(task);
task->sec_token = sec_token;
task->audit_token = audit_token;
-
task_unlock(task);
if (host_priv != HOST_PRIV_NULL) {
kr = host_get_host_port(host_priv_self(), &host_port);
}
assert(kr == KERN_SUCCESS);
- kr = task_set_special_port(task, TASK_HOST_PORT, host_port);
+
+ kr = task_set_special_port_internal(task, TASK_HOST_PORT, host_port);
return kr;
}
uint8_t t_returnwaitflags,
task_t *child_task); /* OUT */
+extern kern_return_t task_set_special_port_internal(
+ task_t task,
+ int which,
+ ipc_port_t port);
+
extern kern_return_t task_info(
task_t task,
task_flavor_t flavor,
break;
}
- return snprintf(buffer, size, "%s ", type);
+ return scnprintf(buffer, size, "%s ", type);
}
int
int string_off = 0;
int ret = 0;
- ret = snprintf(&buffer[string_off], size, "\n");
+ ret = scnprintf(&buffer[string_off], size, "\n");
size -= ret;
string_off += ret;
for (i = 0; i < TEST_MTX_MAX_STATS; i++) {
struct lck_mtx_test_stats_elem* stat = &lck_mtx_test_stats[i];
- ret = snprintf(&buffer[string_off], size, "{ ");
+ ret = scnprintf(&buffer[string_off], size, "{ ");
size -= ret;
string_off += ret;
lck_spin_lock(&stat->lock);
uint64_t time;
- ret = snprintf(&buffer[string_off], size, "samples %llu, ", stat->samples);
+ ret = scnprintf(&buffer[string_off], size, "samples %llu, ", stat->samples);
size -= ret;
string_off += ret;
absolutetime_to_nanoseconds(stat->tot, &time);
- ret = snprintf(&buffer[string_off], size, "tot %llu ns, ", time);
+ ret = scnprintf(&buffer[string_off], size, "tot %llu ns, ", time);
size -= ret;
string_off += ret;
absolutetime_to_nanoseconds(stat->avg, &time);
- ret = snprintf(&buffer[string_off], size, "avg %llu ns, ", time);
+ ret = scnprintf(&buffer[string_off], size, "avg %llu ns, ", time);
size -= ret;
string_off += ret;
absolutetime_to_nanoseconds(stat->max, &time);
- ret = snprintf(&buffer[string_off], size, "max %llu ns, ", time);
+ ret = scnprintf(&buffer[string_off], size, "max %llu ns, ", time);
size -= ret;
string_off += ret;
absolutetime_to_nanoseconds(stat->min, &time);
- ret = snprintf(&buffer[string_off], size, "min %llu ns", time);
+ ret = scnprintf(&buffer[string_off], size, "min %llu ns", time);
size -= ret;
string_off += ret;
lck_spin_unlock(&stat->lock);
- ret = snprintf(&buffer[string_off], size, " } ");
+ ret = scnprintf(&buffer[string_off], size, " } ");
size -= ret;
string_off += ret;
size -= ret;
string_off += ret;
- ret = snprintf(&buffer[string_off], size, "\n");
+ ret = scnprintf(&buffer[string_off], size, "\n");
size -= ret;
string_off += ret;
}
int string_off = 0;
int ret = 0;
- ret = snprintf(&buffer[string_off], size, "\n");
+ ret = scnprintf(&buffer[string_off], size, "\n");
size -= ret;
string_off += ret;
for (i = 0; i < TEST_MTX_MAX_STATS - 2; i++) {
- ret = snprintf(&buffer[string_off], size, "total time %llu ns total run time %llu ns ", tot_time[i], run_time[i]);
+ ret = scnprintf(&buffer[string_off], size, "total time %llu ns total run time %llu ns ", tot_time[i], run_time[i]);
size -= ret;
string_off += ret;
size -= ret;
string_off += ret;
- ret = snprintf(&buffer[string_off], size, "\n");
+ ret = scnprintf(&buffer[string_off], size, "\n");
size -= ret;
string_off += ret;
}
absolutetime_to_nanoseconds(end_loop_time - start_loop_time, &time);
absolutetime_to_nanoseconds(end_loop_time_run - start_loop_time_run, &time_run);
- ret = snprintf(buffer, buffer_size, "\n");
- ret += snprintf(&buffer[ret], buffer_size - ret, "total time %llu ns total run time %llu ns ", time, time_run);
+ ret = scnprintf(buffer, buffer_size, "\n");
+ ret += scnprintf(&buffer[ret], buffer_size - ret, "total time %llu ns total run time %llu ns ", time, time_run);
ret += print_test_mtx_stats_string_name(TEST_MTX_LOCK_STATS, &buffer[ret], buffer_size - ret);
- ret += snprintf(&buffer[ret], buffer_size - ret, "\n");
+ ret += scnprintf(&buffer[ret], buffer_size - ret, "\n");
return ret;
}
mask_saved_state_cpsr(arm_saved_state_t *iss, uint32_t set_bits, uint32_t clear_bits)
{
iss->cpsr |= set_bits;
- iss->cpsr &= clear_bits;
+ iss->cpsr &= ~clear_bits;
}
static inline void
#include <kern/misc_protos.h>
#include <vm/cpm.h>
#include <kern/ledger.h>
+#include <kern/bits.h>
#include <string.h>
*new_map = map;
return KERN_SUCCESS;
}
+/*
+ * The default percentage of memory that can be mlocked is scaled based on the total
+ * amount of memory in the system. These percentages are caclulated
+ * offline and stored in this table. We index this table by
+ * log2(max_mem) - VM_USER_WIREABLE_MIN_CONFIG. We clamp this index in the range
+ * [0, sizeof(wire_limit_percents) / sizeof(vm_map_size_t))
+ *
+ * Note that these values were picked for mac.
+ * If we ever have very large memory config arm devices, we may want to revisit
+ * since the kernel overhead is smaller there due to the larger page size.
+ */
+
+/* Start scaling iff we're managing > 2^32 = 4GB of RAM. */
+#define VM_USER_WIREABLE_MIN_CONFIG 32
+static vm_map_size_t wire_limit_percents[] =
+{ 70, 73, 76, 79, 82, 85, 88, 91, 94, 97};
+
+/*
+ * Sets the default global user wire limit which limits the amount of
+ * memory that can be locked via mlock() based on the above algorithm..
+ * This can be overridden via a sysctl.
+ */
+static void
+kmem_set_user_wire_limits(void)
+{
+ uint64_t available_mem_log;
+ uint64_t max_wire_percent;
+ size_t wire_limit_percents_length = sizeof(wire_limit_percents) /
+ sizeof(vm_map_size_t);
+ vm_map_size_t limit;
+ available_mem_log = bit_floor(max_mem);
+
+ if (available_mem_log < VM_USER_WIREABLE_MIN_CONFIG) {
+ available_mem_log = 0;
+ } else {
+ available_mem_log -= VM_USER_WIREABLE_MIN_CONFIG;
+ }
+ if (available_mem_log >= wire_limit_percents_length) {
+ available_mem_log = wire_limit_percents_length - 1;
+ }
+ max_wire_percent = wire_limit_percents[available_mem_log];
+
+ limit = max_mem * max_wire_percent / 100;
+ /* Cap the number of non lockable bytes at VM_NOT_USER_WIREABLE_MAX */
+ if (max_mem - limit > VM_NOT_USER_WIREABLE_MAX) {
+ limit = max_mem - VM_NOT_USER_WIREABLE_MAX;
+ }
+
+ vm_global_user_wire_limit = limit;
+ /* the default per task limit is the same as the global limit */
+ vm_per_task_user_wire_limit = limit;
+}
+
/*
* kmem_init:
}
#endif
- /*
- * Set the default global user wire limit which limits the amount of
- * memory that can be locked via mlock(). We set this to the total
- * amount of memory that are potentially usable by a user app (max_mem)
- * minus a certain amount. This can be overridden via a sysctl.
- */
- vm_global_no_user_wire_amount = MIN(max_mem * 20 / 100,
- VM_NOT_USER_WIREABLE);
- vm_global_user_wire_limit = max_mem - vm_global_no_user_wire_amount;
-
- /* the default per user limit is the same as the global limit */
- vm_user_wire_limit = vm_global_user_wire_limit;
+ kmem_set_user_wire_limits();
}
-
/*
* Routine: copyinmap
* Purpose:
vm_offset_t start,
vm_offset_t end);
-
extern kern_return_t copyinmap(
vm_map_t map,
vm_map_offset_t fromaddr,
#define VM_PROTECT_WX_FAIL 1
#endif /* CONFIG_EMBEDDED */
uint64_t vm_memory_malloc_no_cow_mask = 0ULL;
+#if DEBUG
+int vm_check_map_sanity = 0;
+#endif
/*
* vm_map_init:
&vm_memory_malloc_no_cow_mask,
sizeof(vm_memory_malloc_no_cow_mask));
}
+
+#if DEBUG
+ PE_parse_boot_argn("vm_check_map_sanity", &vm_check_map_sanity, sizeof(vm_check_map_sanity));
+ if (vm_check_map_sanity) {
+ kprintf("VM sanity checking enabled\n");
+ } else {
+ kprintf("VM sanity checking disabled. Set bootarg vm_check_map_sanity=1 to enable\n");
+ }
+#endif /* DEBUG */
}
void
/*
* Since this is the first time the user is wiring this map entry, check to see if we're
* exceeding the user wire limits. There is a per map limit which is the smaller of either
- * the process's rlimit or the global vm_user_wire_limit which caps this value. There is also
+ * the process's rlimit or the global vm_per_task_user_wire_limit which caps this value. There is also
* a system-wide limit on the amount of memory all users can wire. If the user is over either
* limit, then we fail.
*/
- if (size + map->user_wire_size > MIN(map->user_wire_limit, vm_user_wire_limit) ||
- size + ptoa_64(total_wire_count) > vm_global_user_wire_limit ||
- size + ptoa_64(total_wire_count) > max_mem - vm_global_no_user_wire_amount) {
+ if (size + map->user_wire_size > MIN(map->user_wire_limit, vm_per_task_user_wire_limit) ||
+ size + ptoa_64(total_wire_count) > vm_global_user_wire_limit) {
return KERN_RESOURCE_SHORTAGE;
}
* Attempt non-blocking copy-on-write optimizations.
*/
- if (src_destroy &&
- (src_object == VM_OBJECT_NULL ||
- (src_object->internal &&
- src_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC &&
- src_entry->vme_start <= src_addr &&
- src_entry->vme_end >= src_end &&
- !map_share))) {
- /*
- * If we are destroying the source, and the object
- * is internal, we can move the object reference
- * from the source to the copy. The copy is
- * copy-on-write only if the source is.
- * We make another reference to the object, because
- * destroying the source entry will deallocate it.
- *
- * This memory transfer has to be atomic (to prevent
- * the VM object from being shared or copied while
- * it's being moved here), so we can only do this
- * if we won't have to unlock the VM map, i.e. the
- * entire range must be covered by this map entry.
- */
- vm_object_reference(src_object);
-
- /*
- * Copy is always unwired. vm_map_copy_entry
- * set its wired count to zero.
- */
-
- goto CopySuccessful;
- }
-
+ /*
+ * If we are destroying the source, and the object
+ * is internal, we could move the object reference
+ * from the source to the copy. The copy is
+ * copy-on-write only if the source is.
+ * We make another reference to the object, because
+ * destroying the source entry will deallocate it.
+ *
+ * This memory transfer has to be atomic, (to prevent
+ * the VM object from being shared or copied while
+ * it's being moved here), so we could only do this
+ * if we won't have to unlock the VM map until the
+ * original mapping has been fully removed.
+ */
RestartCopy:
if ((src_object == VM_OBJECT_NULL ||
*/
#if DEBUG
+extern int vm_check_map_sanity;
+
static void
check_map_sanity(vm_map_t map, vm_map_entry_t old_hole_entry)
{
}
create_new_hole = FALSE;
#if DEBUG
- check_map_sanity(map, &old_hole_entry);
+ if (vm_check_map_sanity) {
+ check_map_sanity(map, &old_hole_entry);
+ }
#endif /* DEBUG */
break;
}
create_new_hole = FALSE;
#if DEBUG
- check_map_sanity(map, &old_hole_entry);
+ if (vm_check_map_sanity) {
+ check_map_sanity(map, &old_hole_entry);
+ }
#endif /* DEBUG */
break;
}
}
#if DEBUG
- check_map_sanity(map, &old_hole_entry);
+ if (vm_check_map_sanity) {
+ check_map_sanity(map, &old_hole_entry);
+ }
#endif /* DEBUG */
SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
vm_map_delete_hole(map, hole_entry);
#if DEBUG
- if (check_map_with_hole_sanity) {
+ if (vm_check_map_sanity && check_map_with_hole_sanity) {
check_map_sanity(map, &old_hole_entry);
}
#endif /* DEBUG */
assert(new_hole_entry->start < new_hole_entry->end);
#if DEBUG
- if (check_map_with_hole_sanity) {
+ if (vm_check_map_sanity && check_map_with_hole_sanity) {
check_map_sanity(map, &old_hole_entry);
}
#endif /* DEBUG */
}
#if DEBUG
- if (check_map_with_hole_sanity) {
+ if (vm_check_map_sanity && check_map_with_hole_sanity) {
check_map_sanity(map, &old_hole_entry);
}
#endif /* DEBUG */
}
#if DEBUG
- if (check_map_with_hole_sanity) {
+ if (vm_check_map_sanity && check_map_with_hole_sanity) {
check_map_sanity(map, &old_hole_entry);
}
#endif /* DEBUG */
* Wired memory is a very limited resource and we can't let users exhaust it
* and deadlock the entire system. We enforce the following limits:
*
- * vm_user_wire_limit (default: all memory minus vm_global_no_user_wire_amount)
+ * vm_per_task_user_wire_limit
* how much memory can be user-wired in one user task
*
- * vm_global_user_wire_limit (default: same as vm_user_wire_limit)
+ * vm_global_user_wire_limit (default: same as vm_per_task_user_wire_limit)
* how much memory can be user-wired in all user tasks
*
- * vm_global_no_user_wire_amount (default: VM_NOT_USER_WIREABLE)
- * how much memory must remain user-unwired at any time
+ * These values are set to defaults based on the number of pages managed
+ * by the VM system. They can be overriden via sysctls.
+ * See kmem_set_user_wire_limits for details on the default values.
+ *
+ * Regardless of the amount of memory in the system, we never reserve
+ * more than VM_NOT_USER_WIREABLE_MAX bytes as unlockable.
*/
-#define VM_NOT_USER_WIREABLE (64*1024*1024) /* 64MB */
+#if defined(__LP64__)
+#define VM_NOT_USER_WIREABLE_MAX (32ULL*1024*1024*1024) /* 32GB */
+#else
+#define VM_NOT_USER_WIREABLE_MAX (1UL*1024*1024*1024) /* 1GB */
+#endif /* __LP64__ */
extern
-vm_map_size_t vm_user_wire_limit;
+vm_map_size_t vm_per_task_user_wire_limit;
extern
vm_map_size_t vm_global_user_wire_limit;
-extern
-vm_map_size_t vm_global_no_user_wire_amount;
/*
* Each pageable resident page falls into one of three lists:
size_t l = 0;
num_frames = kasan_alloc_retrieve_bt(base, alloc_bt);
for (vm_size_t i = 0; i < num_frames; i++) {
- l += snprintf(string_rep + l, sizeof(string_rep) - l, " %lx", alloc_bt[i]);
+ l += scnprintf(string_rep + l, sizeof(string_rep) - l, " %lx", alloc_bt[i]);
}
}
shadow &= ~((uptr)0xf);
shadow -= 16 * before;
- n += snprintf(buf+n, len-n,
+ n += scnprintf(buf+n, len-n,
" Shadow 0 1 2 3 4 5 6 7 8 9 a b c d e f\n");
for (i = 0; i < 1 + before + after; i++, shadow += 16) {
continue;
}
- n += snprintf(buf+n, len-n, " %16lx:", shadow);
+ n += scnprintf(buf+n, len-n, " %16lx:", shadow);
char *left = " ";
char *right;
right = "";
}
- n += snprintf(buf+n, len-n, "%s%02x%s", left, (unsigned)*x, right);
+ n += scnprintf(buf+n, len-n, "%s%02x%s", left, (unsigned)*x, right);
left = "";
}
- n += snprintf(buf+n, len-n, "\n");
+ n += scnprintf(buf+n, len-n, "\n");
}
- n += snprintf(buf+n, len-n, "\n");
+ n += scnprintf(buf+n, len-n, "\n");
return n;
}
buf[0] = '\0';
if (reason == REASON_MOD_OOB || reason == REASON_BAD_METADATA) {
- n += snprintf(buf+n, len-n, "KASan: free of corrupted/invalid object %#lx\n", p);
+ n += scnprintf(buf+n, len-n, "KASan: free of corrupted/invalid object %#lx\n", p);
} else if (reason == REASON_MOD_AFTER_FREE) {
- n += snprintf(buf+n, len-n, "KASan: UaF of quarantined object %#lx\n", p);
+ n += scnprintf(buf+n, len-n, "KASan: UaF of quarantined object %#lx\n", p);
} else {
- n += snprintf(buf+n, len-n, "KASan: invalid %lu-byte %s %#lx [%s]\n",
+ n += scnprintf(buf+n, len-n, "KASan: invalid %lu-byte %s %#lx [%s]\n",
width, access_str(access), p, shadow_str);
}
n += kasan_shadow_crashlog(p, buf+n, len-n);
NULL); /* ignore current frame */
buf[0] = '\0';
- l += snprintf(buf+l, len-l, "Backtrace: ");
+ l += scnprintf(buf+l, len-l, "Backtrace: ");
for (uint32_t i = 0; i < nframes; i++) {
- l += snprintf(buf+l, len-l, "%lx,", VM_KERNEL_UNSLIDE(bt[i]));
+ l += scnprintf(buf+l, len-l, "%lx,", VM_KERNEL_UNSLIDE(bt[i]));
}
- l += snprintf(buf+l, len-l, "\n");
+ l += scnprintf(buf+l, len-l, "\n");
printf("%s", buf);
}
static size_t
format_loc(struct san_src_loc *loc, char *dst, size_t sz)
{
- return snprintf(dst, sz, " loc: %s:%d:%d\n",
+ return scnprintf(dst, sz, " loc: %s:%d:%d\n",
loc->filename,
loc->line & ~line_acquired,
loc->col
format_overflow(struct ubsan_violation *v, char *buf, size_t sz)
{
struct san_type_desc *ty = v->overflow->ty;
- return snprintf(buf, sz,
+ return scnprintf(buf, sz,
"%s overflow, op = %s, ty = %s, width = %d, lhs = 0x%llx, rhs = 0x%llx\n",
ty->issigned ? "signed" : "unsigned",
overflow_str[v->ubsan_type],
struct san_type_desc *l = v->shift->lhs_t;
struct san_type_desc *r = v->shift->rhs_t;
- n += snprintf(buf + n, sz - n, "bad shift\n");
- n += snprintf(buf + n, sz - n, " lhs: 0x%llx, ty = %s, signed = %d, width = %d\n", v->lhs, l->name, l->issigned, 1 << l->width);
- n += snprintf(buf + n, sz - n, " rhs: 0x%llx, ty = %s, signed = %d, width = %d\n", v->rhs, r->name, r->issigned, 1 << r->width);
+ n += scnprintf(buf + n, sz - n, "bad shift\n");
+ n += scnprintf(buf + n, sz - n, " lhs: 0x%llx, ty = %s, signed = %d, width = %d\n", v->lhs, l->name, l->issigned, 1 << l->width);
+ n += scnprintf(buf + n, sz - n, " rhs: 0x%llx, ty = %s, signed = %d, width = %d\n", v->rhs, r->name, r->issigned, 1 << r->width);
return n;
}
const char * kind = get_type_check_kind(v->align->kind);
if (NULL == ptr) {
//null pointer use
- n += snprintf(buf + n, sz - n, "%s NULL pointer of type %s\n", kind, v->align->ty->name);
+ n += scnprintf(buf + n, sz - n, "%s NULL pointer of type %s\n", kind, v->align->ty->name);
} else if (alignment && ((uintptr_t)ptr & (alignment - 1))) {
//misaligned pointer use
- n += snprintf(buf + n, sz - n, "%s mis-aligned address %p for type %s ", kind, (void*)v->lhs, v->align->ty->name);
- n += snprintf(buf + n, sz - n, "which requires %d byte alignment\n", 1 << v->align->align);
+ n += scnprintf(buf + n, sz - n, "%s mis-aligned address %p for type %s ", kind, (void*)v->lhs, v->align->ty->name);
+ n += scnprintf(buf + n, sz - n, "which requires %d byte alignment\n", 1 << v->align->align);
} else {
//insufficient object size
- n += snprintf(buf + n, sz - n, "%s address %p with insufficient space for an object of type %s\n",
+ n += scnprintf(buf + n, sz - n, "%s address %p with insufficient space for an object of type %s\n",
kind, ptr, v->align->ty->name);
}
struct san_type_desc *ity = v->oob->index_ty;
uintptr_t idx = v->lhs;
- n += snprintf(buf + n, sz - n, "OOB array access\n");
- n += snprintf(buf + n, sz - n, " idx %ld\n", idx);
- n += snprintf(buf + n, sz - n, " aty: ty = %s, signed = %d, width = %d\n", aty->name, aty->issigned, 1 << aty->width);
- n += snprintf(buf + n, sz - n, " ity: ty = %s, signed = %d, width = %d\n", ity->name, ity->issigned, 1 << ity->width);
+ n += scnprintf(buf + n, sz - n, "OOB array access\n");
+ n += scnprintf(buf + n, sz - n, " idx %ld\n", idx);
+ n += scnprintf(buf + n, sz - n, " aty: ty = %s, signed = %d, width = %d\n", aty->name, aty->issigned, 1 << aty->width);
+ n += scnprintf(buf + n, sz - n, " ity: ty = %s, signed = %d, width = %d\n", ity->name, ity->issigned, 1 << ity->width);
return n;
}
n += format_overflow(v, buf + n, sz - n);
break;
case UBSAN_UNREACHABLE:
- n += snprintf(buf + n, sz - n, "unreachable\n");
+ n += scnprintf(buf + n, sz - n, "unreachable\n");
break;
case UBSAN_SHIFT:
n += format_shift(v, buf + n, sz - n);
n += format_type_mismatch(v, buf + n, sz - n);
break;
case UBSAN_POINTER_OVERFLOW:
- n += snprintf(buf + n, sz - n, "pointer overflow, before = 0x%llx, after = 0x%llx\n", v->lhs, v->rhs);
+ n += scnprintf(buf + n, sz - n, "pointer overflow, before = 0x%llx, after = 0x%llx\n", v->lhs, v->rhs);
break;
case UBSAN_OOB:
n += format_oob(v, buf + n, sz - n);
break;
case UBSAN_GENERIC:
- n += snprintf(buf + n, sz - n, "%s\n", v->func);
+ n += scnprintf(buf + n, sz - n, "%s\n", v->func);
break;
default:
panic("unknown violation");
stackshot_tests: OTHER_LDFLAGS += -lkdd -ldarwintest_utils -framework Foundation
stackshot_tests: INVALID_ARCHS = i386
+stackshot_accuracy: OTHER_CFLAGS += -ldarwintest_utils -Wno-objc-messaging-id
+stackshot_accuracy: OTHER_LDFLAGS += -lkdd -ldarwintest_utils -framework Foundation
+stackshot_accuracy: INVALID_ARCHS = i386
+
telemetry: OTHER_LDFLAGS = -framework ktrace -framework CoreFoundation
telemetry: INVALID_ARCHS = i386
--- /dev/null
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+
+#include <net/route.h>
+#include <sys/socket.h>
+#include <unistd.h>
+
+#include <darwintest.h>
+
+#define ROUNDUP32(n) (((n) + sizeof(uint32_t) - 1) & ~(sizeof(uint32_t) - 1))
+
+T_DECL(route_output_stack_oflow_56033075, "Stack overflow via ma_copy through route_output")
+{
+ int s;
+ uint8_t buf[
+ sizeof(struct rt_msghdr) +
+ ROUNDUP32(sizeof(struct sockaddr_storage) + 1) + /* RTAX_DST */
+ ROUNDUP32(sizeof(struct sockaddr_storage) + 1) + /* RTAX_GATEWAY */
+ ROUNDUP32(sizeof(struct sockaddr_storage) + 1) /* RTAX_NETMASK */
+ ];
+ struct rt_msghdr *rtm = (struct rt_msghdr *)buf;
+ struct sockaddr *sa;
+ size_t len;
+
+ bzero(buf, sizeof(buf));
+ rtm->rtm_type = RTM_GET;
+ rtm->rtm_version = RTM_VERSION;
+ rtm->rtm_addrs = RTA_DST | RTA_GATEWAY | RTA_NETMASK;
+ len = sizeof(struct rt_msghdr);
+
+ /* RTAX_DST: */
+ sa = (struct sockaddr *)(rtm + 1);
+ sa->sa_family = AF_INET6;
+ sa->sa_len = sizeof(struct sockaddr_storage) + 1;
+ memset(&sa->sa_data[0], 0xff, sa->sa_len);
+ len += ROUNDUP32(sa->sa_len);
+
+ /* RTAX_GATEWAY: */
+ sa = (struct sockaddr *)((void *)buf + len);
+ sa->sa_family = AF_INET6;
+ sa->sa_len = sizeof(struct sockaddr_storage) + 1;
+ memset(&sa->sa_data[0], 0xff, sa->sa_len);
+ len += ROUNDUP32(sa->sa_len);
+
+ /* RTAX_NETMASK: */
+ sa = (struct sockaddr *)((void *)buf + len);
+ sa->sa_family = AF_INET6;
+ sa->sa_len = sizeof(struct sockaddr_storage) + 1;
+ memset(&sa->sa_data[0], 0x41, sa->sa_len);
+ len += ROUNDUP32(sa->sa_len);
+
+ T_SETUPBEGIN;
+ T_ASSERT_POSIX_SUCCESS(s = socket(PF_ROUTE, SOCK_RAW, PF_ROUTE), NULL);
+ T_SETUPEND;
+
+ /* check we get EINVAL for > sizeof(struct sockaddr_storage): */
+ rtm->rtm_msglen = len;
+ T_ASSERT_EQ(-1, send(s, buf, len, 0), NULL);
+ T_ASSERT_EQ(EINVAL, errno, NULL);
+
+ /* now check the ok case: */
+ len = sizeof(struct rt_msghdr);
+
+ /* RTAX_DST: */
+ sa = (struct sockaddr *)(rtm + 1);
+ sa->sa_family = AF_INET6;
+ sa->sa_len = sizeof(struct sockaddr_storage);
+ len += ROUNDUP32(sa->sa_len);
+
+ /* RTAX_GATEWAY: */
+ sa = (struct sockaddr *)((void *)buf + len);
+ sa->sa_family = AF_INET6;
+ sa->sa_len = sizeof(struct sockaddr_storage);
+ len += ROUNDUP32(sa->sa_len);
+
+ /* RTAX_NETMASK: */
+ sa = (struct sockaddr *)((void *)buf + len);
+ sa->sa_family = AF_INET6;
+ sa->sa_len = sizeof(struct sockaddr_storage);
+ len += ROUNDUP32(sa->sa_len);
+
+ rtm->rtm_msglen = len;
+ T_ASSERT_EQ(-1, send(s, buf, len, 0), NULL);
+ T_ASSERT_EQ(ESRCH, errno, NULL);
+}
--- /dev/null
+#include <darwintest.h>
+#include <darwintest_utils.h>
+#include <sys/kern_memorystatus.h>
+#include <kern/debug.h>
+#include <mach-o/dyld.h>
+#include <sys/stackshot.h>
+#include <kdd.h>
+#include <signal.h>
+
+#define RECURSIONS 25
+#define FIRST_RECURSIVE_FRAME 3
+
+T_GLOBAL_META(
+ T_META_NAMESPACE("xnu.stackshot.accuracy"),
+ T_META_CHECK_LEAKS(false),
+ T_META_ASROOT(true)
+ );
+
+
+void child_init(void);
+void parent_helper_singleproc(int);
+
+#define CHECK_FOR_FAULT_STATS (1 << 0)
+#define WRITE_STACKSHOT_BUFFER_TO_TMP (1 << 1)
+#define CHECK_FOR_KERNEL_THREADS (1 << 2)
+int check_stackshot(void *, int);
+
+/* used for WRITE_STACKSHOT_BUFFER_TO_TMP */
+static char const *current_scenario_name;
+static pid_t child_pid;
+
+/* helpers */
+
+static void __attribute__((noinline))
+child_recurse(int r, int spin, void (^cb)(void))
+{
+ if (r > 0) {
+ child_recurse(r - 1, spin, cb);
+ }
+
+ cb();
+
+ /* wait forever */
+ if (spin == 0) {
+ sleep(100000);
+ } else if (spin == 2) {
+ int v = 1;
+ /* ssh won't let the session die if we still have file handles open to its output. */
+ close(STDERR_FILENO);
+ close(STDOUT_FILENO);
+ T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.wedge_thread", NULL, NULL, &v, sizeof(v)),
+ "wedged thread in the kernel");
+ } else {
+ while (1) {
+ __asm__ volatile("" : : : "memory");
+ }
+ }
+}
+
+T_HELPER_DECL(simple_child_process, "child process that will be frozen and others")
+{
+ child_init();
+}
+
+T_HELPER_DECL(sid_child_process, "child process that setsid()s")
+{
+ pid_t ppid = getppid();
+
+ T_ASSERT_POSIX_SUCCESS(setsid(), "session id set");
+
+ child_recurse(RECURSIONS, 2, ^{
+ kill(ppid, SIGUSR1);
+ });
+
+ T_ASSERT_FAIL("child_init returned!");
+}
+
+static void
+kill_children(void)
+{
+ kill(child_pid, SIGKILL);
+}
+
+static void *
+take_stackshot(pid_t target_pid, uint32_t extra_flags, uint64_t since_timestamp)
+{
+ void *stackshot_config;
+ int err, retries = 5;
+ uint32_t stackshot_flags = STACKSHOT_KCDATA_FORMAT |
+ STACKSHOT_THREAD_WAITINFO |
+ STACKSHOT_GET_DQ;
+
+ /* we should be able to verify delta stackshots */
+ if (since_timestamp != 0) {
+ stackshot_flags |= STACKSHOT_COLLECT_DELTA_SNAPSHOT;
+ }
+
+ stackshot_flags |= extra_flags;
+
+ stackshot_config = stackshot_config_create();
+ T_ASSERT_NOTNULL(stackshot_config, "allocate stackshot config");
+
+ err = stackshot_config_set_flags(stackshot_config, stackshot_flags);
+ T_ASSERT_EQ(err, 0, "set flags on stackshot config");
+
+ err = stackshot_config_set_pid(stackshot_config, target_pid);
+ T_ASSERT_EQ(err, 0, "set target pid on stackshot config");
+
+ if (since_timestamp != 0) {
+ err = stackshot_config_set_delta_timestamp(stackshot_config, since_timestamp);
+ T_ASSERT_EQ(err, 0, "set prev snapshot time on stackshot config");
+ }
+
+ while (retries > 0) {
+ err = stackshot_capture_with_config(stackshot_config);
+ if (err == 0) {
+ break;
+ } else if (err == EBUSY || err == ETIMEDOUT) {
+ T_LOG("stackshot capture returned %d (%s)\n", err, strerror(err));
+ if (retries == 0) {
+ T_ASSERT_FAIL("failed to take stackshot with error after retries: %d: %s\n", err, strerror(err));
+ }
+
+ retries--;
+ continue;
+ } else {
+ T_ASSERT_FAIL("failed to take stackshot with error: %d: %s\n", err, strerror(err));
+ }
+ }
+
+ return stackshot_config;
+}
+
+int
+check_stackshot(void *stackshot_config, int flags)
+{
+ void *buf;
+ uint32_t buflen, kcdata_type;
+ kcdata_iter_t iter;
+ NSError *nserror = nil;
+ pid_t target_pid;
+ int ret = 0;
+ uint64_t expected_return_addr = 0;
+ bool found_fault_stats = false;
+ struct stackshot_fault_stats fault_stats = {0};
+
+ buf = stackshot_config_get_stackshot_buffer(stackshot_config);
+ T_ASSERT_NOTNULL(buf, "stackshot buffer is not null");
+ buflen = stackshot_config_get_stackshot_size(stackshot_config);
+ T_ASSERT_GT(buflen, 0, "valid stackshot buffer length");
+ target_pid = ((struct stackshot_config*)stackshot_config)->sc_pid;
+ T_ASSERT_GT(target_pid, 0, "valid target_pid");
+
+ /* if need to write it to fs, do it now */
+ if (flags & WRITE_STACKSHOT_BUFFER_TO_TMP) {
+ char sspath[MAXPATHLEN];
+ strlcpy(sspath, current_scenario_name, sizeof(sspath));
+ strlcat(sspath, ".kcdata", sizeof(sspath));
+ T_QUIET; T_ASSERT_POSIX_ZERO(dt_resultfile(sspath, sizeof(sspath)),
+ "create result file path");
+
+ FILE *f = fopen(sspath, "w");
+ T_WITH_ERRNO; T_QUIET; T_ASSERT_NOTNULL(f,
+ "open stackshot output file");
+
+ size_t written = fwrite(buf, buflen, 1, f);
+ T_QUIET; T_ASSERT_POSIX_SUCCESS(written, "wrote stackshot to file");
+
+ fclose(f);
+ }
+
+ /* begin iterating */
+ iter = kcdata_iter(buf, buflen);
+ T_ASSERT_EQ(kcdata_iter_type(iter), KCDATA_BUFFER_BEGIN_STACKSHOT, "buffer is a stackshot");
+
+ /* time to iterate */
+ iter = kcdata_iter_next(iter);
+ KCDATA_ITER_FOREACH(iter) {
+ kcdata_type = kcdata_iter_type(iter);
+ NSNumber *parsedPid;
+ NSMutableDictionary *parsedContainer, *parsedThreads;
+
+ if ((flags & CHECK_FOR_FAULT_STATS) != 0 &&
+ kcdata_type == STACKSHOT_KCTYPE_STACKSHOT_FAULT_STATS) {
+ memcpy(&fault_stats, kcdata_iter_payload(iter), sizeof(fault_stats));
+ found_fault_stats = true;
+ }
+
+ if (kcdata_type != KCDATA_TYPE_CONTAINER_BEGIN) {
+ continue;
+ }
+
+ if (kcdata_iter_container_type(iter) != STACKSHOT_KCCONTAINER_TASK) {
+ continue;
+ }
+
+ parsedContainer = parseKCDataContainer(&iter, &nserror);
+ T_ASSERT_NOTNULL(parsedContainer, "parsedContainer is not null");
+ T_ASSERT_NULL(nserror, "no NSError occured while parsing the kcdata container");
+
+ /*
+ * given that we've targetted the pid, we can be sure that this
+ * ts_pid will be the pid we expect
+ */
+ parsedPid = parsedContainer[@"task_snapshots"][@"task_snapshot"][@"ts_pid"];
+ T_ASSERT_EQ([parsedPid intValue], target_pid, "found correct pid");
+
+ /* start parsing the threads */
+ parsedThreads = parsedContainer[@"task_snapshots"][@"thread_snapshots"];
+ for (id th_key in parsedThreads) {
+ uint32_t frame_index = 0;
+
+ if ((flags & CHECK_FOR_KERNEL_THREADS) == 0) {
+ /* skip threads that don't have enough frames */
+ if ([parsedThreads[th_key][@"user_stack_frames"] count] < RECURSIONS) {
+ continue;
+ }
+
+ for (id frame in parsedThreads[th_key][@"user_stack_frames"]) {
+ if ((frame_index >= FIRST_RECURSIVE_FRAME) && (frame_index < (RECURSIONS - FIRST_RECURSIVE_FRAME))) {
+ if (expected_return_addr == 0ull) {
+ expected_return_addr = [frame[@"lr"] unsignedLongLongValue];
+ } else {
+ T_QUIET;
+ T_ASSERT_EQ(expected_return_addr, [frame[@"lr"] unsignedLongLongValue], "expected return address found");
+ }
+ }
+ frame_index ++;
+ }
+ } else {
+ T_ASSERT_NOTNULL(parsedThreads[th_key][@"kernel_stack_frames"],
+ "found kernel stack frames");
+ }
+
+ }
+ }
+
+ if (found_fault_stats) {
+ T_LOG("number of pages faulted in: %d", fault_stats.sfs_pages_faulted_in);
+ T_LOG("MATUs spent faulting: %lld", fault_stats.sfs_time_spent_faulting);
+ T_LOG("MATUS fault time limit: %lld", fault_stats.sfs_system_max_fault_time);
+ T_LOG("did we stop because of the limit?: %s", fault_stats.sfs_stopped_faulting ? "yes" : "no");
+ if (expected_return_addr != 0ull) {
+ T_ASSERT_GT(fault_stats.sfs_pages_faulted_in, 0, "faulted at least one page in");
+ T_LOG("NOTE: successfully faulted in the pages");
+ } else {
+ T_LOG("NOTE: We were not able to fault the stack's pages back in");
+
+ /* if we couldn't fault the pages back in, then at least verify that we tried */
+ T_ASSERT_GT(fault_stats.sfs_time_spent_faulting, 0ull, "spent time trying to fault");
+ }
+ } else if ((flags & CHECK_FOR_KERNEL_THREADS) == 0) {
+ T_ASSERT_NE(expected_return_addr, 0ull, "found child thread with recursions");
+ }
+
+ if (flags & CHECK_FOR_FAULT_STATS) {
+ T_ASSERT_EQ(found_fault_stats, true, "found fault stats");
+ }
+
+ return ret;
+}
+
+void
+child_init(void)
+{
+#if !TARGET_OS_OSX
+ int freeze_state;
+#endif /* !TARGET_OS_OSX */
+ pid_t pid = getpid();
+ char padding[16 * 1024];
+ __asm__ volatile(""::"r"(padding));
+
+ T_LOG("child pid: %d\n", pid);
+
+#if !TARGET_OS_OSX
+ /* allow us to be frozen */
+ freeze_state = memorystatus_control(MEMORYSTATUS_CMD_GET_PROCESS_IS_FREEZABLE, pid, 0, NULL, 0);
+ if (freeze_state == -1) {
+ T_SKIP("This device doesn't have CONFIG_FREEZE enabled.");
+ } else if (freeze_state == 0) {
+ T_LOG("CHILD was found to be UNFREEZABLE, enabling freezing.");
+ memorystatus_control(MEMORYSTATUS_CMD_SET_PROCESS_IS_FREEZABLE, pid, 1, NULL, 0);
+ freeze_state = memorystatus_control(MEMORYSTATUS_CMD_GET_PROCESS_IS_FREEZABLE, pid, 0, NULL, 0);
+ T_ASSERT_EQ(freeze_state, 1, "successfully set freezeability");
+ }
+#else
+ T_LOG("Cannot change freezeability as freezing is only available on embedded devices");
+#endif /* !TARGET_OS_OSX */
+
+ /*
+ * recurse a bunch of times to generate predictable data in the stackshot,
+ * then send SIGUSR1 to the parent to let it know that we are done.
+ */
+ child_recurse(RECURSIONS, 0, ^{
+ kill(getppid(), SIGUSR1);
+ });
+
+ T_ASSERT_FAIL("child_recurse returned, but it must not?");
+}
+
+void
+parent_helper_singleproc(int spin)
+{
+ dispatch_semaphore_t child_done_sema = dispatch_semaphore_create(0);
+ dispatch_queue_t dq = dispatch_queue_create("com.apple.stackshot_accuracy.basic_sp", NULL);
+ void *stackshot_config;
+
+ dispatch_async(dq, ^{
+ char padding[16 * 1024];
+ __asm__ volatile(""::"r"(padding));
+
+ child_recurse(RECURSIONS, spin, ^{
+ dispatch_semaphore_signal(child_done_sema);
+ });
+ });
+
+ dispatch_semaphore_wait(child_done_sema, DISPATCH_TIME_FOREVER);
+ T_LOG("done waiting for child");
+
+ /* take the stackshot and parse it */
+ stackshot_config = take_stackshot(getpid(), 0, 0);
+
+ /* check that the stackshot has the stack frames */
+ check_stackshot(stackshot_config, 0);
+
+ T_LOG("done!");
+}
+
+T_DECL(basic, "test that no-fault stackshot works correctly")
+{
+ char path[PATH_MAX];
+ uint32_t path_size = sizeof(path);
+ char *args[] = { path, "-n", "simple_child_process", NULL };
+ dispatch_queue_t dq = dispatch_queue_create("com.apple.stackshot_accuracy.basic", NULL);
+ dispatch_semaphore_t child_done_sema = dispatch_semaphore_create(0);
+ dispatch_source_t child_sig_src;
+ void *stackshot_config;
+
+ current_scenario_name = __func__;
+
+ T_LOG("parent pid: %d\n", getpid());
+ T_QUIET; T_ASSERT_POSIX_ZERO(_NSGetExecutablePath(path, &path_size), "_NSGetExecutablePath");
+
+ /* setup signal handling */
+ signal(SIGUSR1, SIG_IGN);
+ child_sig_src = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, dq);
+ dispatch_source_set_event_handler(child_sig_src, ^{
+ dispatch_semaphore_signal(child_done_sema);
+ });
+ dispatch_activate(child_sig_src);
+
+ /* create the child process */
+ T_ASSERT_POSIX_SUCCESS(dt_launch_tool(&child_pid, args, false, NULL, NULL), "child launched");
+ T_ATEND(kill_children);
+
+ /* wait until the child has recursed enough */
+ dispatch_semaphore_wait(child_done_sema, DISPATCH_TIME_FOREVER);
+
+ T_LOG("child finished, parent executing");
+
+ /* take the stackshot and parse it */
+ stackshot_config = take_stackshot(child_pid, 0, 0);
+
+ /* check that the stackshot has the stack frames */
+ check_stackshot(stackshot_config, 0);
+
+ T_LOG("all done, killing child");
+
+ /* tell the child to quit */
+ T_ASSERT_POSIX_SUCCESS(kill(child_pid, SIGTERM), "killed child");
+}
+
+T_DECL(basic_singleproc, "test that no-fault stackshot works correctly in single process setting")
+{
+ current_scenario_name = __func__;
+ parent_helper_singleproc(0);
+}
+
+T_DECL(basic_singleproc_spin, "test that no-fault stackshot works correctly in single process setting with spinning")
+{
+ current_scenario_name = __func__;
+ parent_helper_singleproc(1);
+}
+
+T_DECL(fault, "test that faulting stackshots work correctly")
+{
+ dispatch_queue_t dq = dispatch_queue_create("com.apple.stackshot_fault_accuracy", NULL);
+ dispatch_source_t child_sig_src;
+ dispatch_semaphore_t child_done_sema = dispatch_semaphore_create(0);
+ void *stackshot_config;
+ int oldftm, newval = 1, freeze_enabled, oldratio, newratio = 0;
+ size_t oldlen = sizeof(oldftm), fe_len = sizeof(freeze_enabled), ratiolen = sizeof(oldratio);
+ char path[PATH_MAX];
+ uint32_t path_size = sizeof(path);
+ char *args[] = { path, "-n", "simple_child_process", NULL };
+
+ current_scenario_name = __func__;
+ T_QUIET; T_ASSERT_POSIX_ZERO(_NSGetExecutablePath(path, &path_size), "_NSGetExecutablePath");
+
+#if TARGET_OS_OSX
+ T_SKIP("freezing is not available on macOS");
+#endif /* TARGET_OS_OSX */
+
+ /* Try checking if freezing is enabled at all */
+ if (sysctlbyname("vm.freeze_enabled", &freeze_enabled, &fe_len, NULL, 0) == -1) {
+ if (errno == ENOENT) {
+ T_SKIP("This device doesn't have CONFIG_FREEZE enabled.");
+ } else {
+ T_FAIL("failed to query vm.freeze_enabled, errno: %d", errno);
+ }
+ }
+
+ if (!freeze_enabled) {
+ T_SKIP("Freeze is not enabled, skipping test.");
+ }
+
+ /* signal handling */
+ signal(SIGUSR1, SIG_IGN);
+ child_sig_src = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, dq);
+ dispatch_source_set_event_handler(child_sig_src, ^{
+ dispatch_semaphore_signal(child_done_sema);
+ });
+ dispatch_activate(child_sig_src);
+
+ T_ASSERT_POSIX_SUCCESS(dt_launch_tool(&child_pid, args, false, NULL, NULL), "child launched");
+ T_ATEND(kill_children);
+
+ dispatch_semaphore_wait(child_done_sema, DISPATCH_TIME_FOREVER);
+
+ /* keep processes in memory */
+ T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.memorystatus_freeze_to_memory", &oldftm, &oldlen, &newval, sizeof(newval)),
+ "disabled freezing to disk");
+
+ /* set the ratio to zero */
+ T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.memorystatus_freeze_private_shared_pages_ratio", &oldratio, &ratiolen, &newratio, sizeof(newratio)), "disabled private:shared ratio checking");
+
+ /* freeze the child */
+ T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.memorystatus_freeze", NULL, 0, &child_pid, sizeof(child_pid)),
+ "froze child");
+
+ /* Sleep to allow the compressor to finish compressing the child */
+ sleep(5);
+
+ /* take the stackshot and parse it */
+ stackshot_config = take_stackshot(child_pid, STACKSHOT_ENABLE_BT_FAULTING | STACKSHOT_ENABLE_UUID_FAULTING, 0);
+
+ /* check that the stackshot has the stack frames */
+ check_stackshot(stackshot_config, CHECK_FOR_FAULT_STATS);
+
+ T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.memorystatus_freeze_to_memory", NULL, 0, &oldftm, sizeof(oldftm)),
+ "reset freezing to disk");
+
+ /* reset the private:shared ratio */
+ T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.memorystatus_freeze_private_shared_pages_ratio", NULL, 0, &oldratio, sizeof(oldratio)), "reset private:shared ratio");
+
+ T_LOG("all done, killing child");
+
+ /* tell the child to quit */
+ T_ASSERT_POSIX_SUCCESS(kill(child_pid, SIGTERM), "killed child");
+}
+
+T_DECL(fault_singleproc, "test that faulting stackshots work correctly in a single process setting")
+{
+ dispatch_semaphore_t child_done_sema = dispatch_semaphore_create(0);
+ dispatch_queue_t dq = dispatch_queue_create("com.apple.stackshot_accuracy.fault_sp", NULL);
+ void *stackshot_config;
+ __block pthread_t child_thread;
+ char *child_stack;
+ size_t child_stacklen;
+
+#if !TARGET_OS_OSX
+ T_SKIP("madvise(..., ..., MADV_PAGEOUT) is not available on embedded platforms");
+#endif /* !TARGET_OS_OSX */
+
+ dispatch_async(dq, ^{
+ char padding[16 * 1024];
+ __asm__ volatile(""::"r"(padding));
+
+ child_recurse(RECURSIONS, 0, ^{
+ child_thread = pthread_self();
+ dispatch_semaphore_signal(child_done_sema);
+ });
+ });
+
+ dispatch_semaphore_wait(child_done_sema, DISPATCH_TIME_FOREVER);
+ T_LOG("done waiting for child");
+
+ child_stack = pthread_get_stackaddr_np(child_thread);
+ child_stacklen = pthread_get_stacksize_np(child_thread);
+ child_stack -= child_stacklen;
+ T_LOG("child stack: [0x%p - 0x%p]: 0x%zu bytes", (void *)child_stack,
+ (void *)(child_stack + child_stacklen), child_stacklen);
+
+ /* paging out the child */
+ T_ASSERT_POSIX_SUCCESS(madvise(child_stack, child_stacklen, MADV_PAGEOUT), "paged out via madvise(2) the child stack");
+
+ /* take the stackshot and parse it */
+ stackshot_config = take_stackshot(getpid(), STACKSHOT_ENABLE_BT_FAULTING | STACKSHOT_ENABLE_UUID_FAULTING, 0);
+
+ /* check that the stackshot has the stack frames */
+ check_stackshot(stackshot_config, CHECK_FOR_FAULT_STATS);
+
+ T_LOG("done!");
+}
+
+T_DECL(zombie, "test that threads wedged in the kernel can be stackshot'd")
+{
+ dispatch_queue_t dq = dispatch_queue_create("com.apple.stackshot_accuracy.zombie", NULL);
+ dispatch_semaphore_t child_done_sema = dispatch_semaphore_create(0);
+ dispatch_source_t child_sig_src;
+ void *stackshot_config;
+ char path[PATH_MAX];
+ uint32_t path_size = sizeof(path);
+ char *args[] = { path, "-n", "sid_child_process", NULL };
+
+ current_scenario_name = __func__;
+ T_QUIET; T_ASSERT_POSIX_ZERO(_NSGetExecutablePath(path, &path_size), "_NSGetExecutablePath");
+
+ T_LOG("parent pid: %d\n", getpid());
+
+ /* setup signal handling */
+ signal(SIGUSR1, SIG_IGN);
+ child_sig_src = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, dq);
+ dispatch_source_set_event_handler(child_sig_src, ^{
+ dispatch_semaphore_signal(child_done_sema);
+ });
+ dispatch_activate(child_sig_src);
+
+ /* create the child process */
+ T_ASSERT_POSIX_SUCCESS(dt_launch_tool(&child_pid, args, false, NULL, NULL), "child launched");
+ T_ATEND(kill_children);
+
+ /* wait until the child has recursed enough */
+ dispatch_semaphore_wait(child_done_sema, DISPATCH_TIME_FOREVER);
+
+ T_LOG("child finished, parent executing. invoking jetsam");
+
+ T_ASSERT_POSIX_SUCCESS(memorystatus_control(MEMORYSTATUS_CMD_TEST_JETSAM, child_pid, 0, 0, 0),
+ "jetsam'd the child");
+
+ /* Sleep to allow the target process to become zombified */
+ sleep(1);
+
+ /* take the stackshot and parse it */
+ stackshot_config = take_stackshot(child_pid, 0, 0);
+
+ /* check that the stackshot has the stack frames */
+ check_stackshot(stackshot_config, CHECK_FOR_KERNEL_THREADS);
+
+ T_LOG("all done, unwedging and killing child");
+
+ int v = 1;
+ T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.unwedge_thread", NULL, NULL, &v, sizeof(v)),
+ "unwedged child");
+
+ /* tell the child to quit */
+ T_ASSERT_POSIX_SUCCESS(kill(child_pid, SIGTERM), "killed child");
+}
kr = vm_allocate(__self, &tmp_buf, tmp_size, VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE);
T_QUIET;
T_EXPECT_EQ(kr, KERN_SUCCESS, "vm_allocate(%zu) error 0x%x (%s)",
- tmp_size, kr, mach_error_string(kr));
+ (size_t) tmp_size, kr, mach_error_string(kr));
T_QUIET;
- T_EXPECT_NE(tmp_buf, 0UL, "failed to allocate temporary purgable buffer\n");
+ T_EXPECT_NE(tmp_buf, (vm_address_t) 0, "failed to allocate temporary purgable buffer\n");
kr = vm_allocate(__self, &tmp_buf2, tmp_size, VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE);
T_QUIET;
T_EXPECT_EQ(kr, KERN_SUCCESS, "vm_allocate(%zu) error 0x%x (%s)",
- tmp_size, kr, mach_error_string(kr));
+ (size_t) tmp_size, kr, mach_error_string(kr));
T_QUIET;
- T_EXPECT_NE(tmp_buf2, 0UL, "failed to allocate temporary purgable buffer\n");
+ T_EXPECT_NE(tmp_buf2, (vm_address_t) 0, "failed to allocate temporary purgable buffer\n");
/* expected failures */
out_size = tmp_size;
--- /dev/null
+#include <time.h>
+#include <errno.h>
+
+#include <mach/mach.h>
+#include <sys/kern_sysctl.h>
+#include <sys/mman.h>
+
+#include <darwintest.h>
+#include <darwintest_utils.h>
+
+
+static const char *g_sysctl_no_wire_name = "vm.global_no_user_wire_amount";
+static const char *g_sysctl_wire_name = "vm.global_user_wire_limit";
+static const char *g_sysctl_per_task_wire_name = "vm.user_wire_limit";
+static const char *g_sysctl_current_wired_count_name = "vm.page_wire_count";
+static const char *g_sysctl_current_free_count_name = "vm.lopage_free_count";
+static const char *g_sysctl_vm_page_size_name = "vm.pagesize";
+static const char *g_sysctl_memsize_name = "hw.memsize";
+
+static size_t
+ptoa(size_t num_pages)
+{
+ static size_t page_size = 0;
+ int ret;
+ size_t page_size_size = sizeof(page_size);
+ if (page_size == 0) {
+ ret = sysctlbyname(g_sysctl_vm_page_size_name, &page_size, &page_size_size, NULL, 0);
+ T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "Unable to get page size");
+ }
+ return num_pages * (size_t) page_size;
+}
+
+
+T_DECL(global_no_user_wire_amount, "no_user_wire_amount <= 32G") {
+ int ret;
+ vm_map_size_t no_wire;
+ size_t no_wire_size = sizeof(no_wire);
+ ret = sysctlbyname(g_sysctl_no_wire_name, &no_wire, &no_wire_size, NULL, 0);
+ T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "no_user_wire sysctl failed");
+ T_QUIET; T_EXPECT_LE(no_wire, 32 * 2ULL << 30, "no_user_wire_amount is too big.");
+}
+
+T_DECL(user_wire_amount, "max_mem > user_wire_amount >= 0.7 * max_mem") {
+ int ret;
+ vm_map_size_t wire;
+ uint64_t max_mem;
+ size_t max_mem_size = sizeof(max_mem);
+ size_t wire_size = sizeof(wire);
+ ret = sysctlbyname(g_sysctl_memsize_name, &max_mem, &max_mem_size, NULL, 0);
+ T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "memsize sysctl failed");
+ ret = sysctlbyname(g_sysctl_wire_name, &wire, &wire_size, NULL, 0);
+ T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "user_wire sysctl failed");
+ T_QUIET; T_ASSERT_LT(wire, max_mem, "wire limit is too big");
+ T_QUIET; T_ASSERT_GE(wire, max_mem * 70 / 100, "wire limit is too small.");
+}
+
+/*
+ * Sets the no wire limit, and ensures that the wire_limit
+ * changes correctly.
+ */
+static void
+set_no_wire_limit(vm_map_size_t value, uint64_t max_mem)
+{
+ vm_map_size_t wire;
+ size_t wire_size = sizeof(wire);
+ int ret;
+ ret = sysctlbyname(g_sysctl_no_wire_name, NULL, 0, &value, sizeof(value));
+ T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "no_user_wire sysctl set failed");
+ ret = sysctlbyname(g_sysctl_wire_name, &wire, &wire_size, NULL, 0);
+ T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "user_wire sysctl failed");
+ T_QUIET; T_ASSERT_EQ(max_mem - wire, value, "no wire size is incorrect");
+}
+
+/*
+ * Sets the wire limit, and ensures that the no_wire_limit
+ * changes correctly.
+ */
+static void
+set_wire_limit(vm_map_size_t value, uint64_t max_mem)
+{
+ vm_map_size_t no_wire;
+ size_t no_wire_size = sizeof(no_wire);
+ int ret;
+ ret = sysctlbyname(g_sysctl_wire_name, NULL, 0, &value, sizeof(value));
+ T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "user_wire sysctl set failed");
+ ret = sysctlbyname(g_sysctl_no_wire_name, &no_wire, &no_wire_size, NULL, 0);
+ T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "no_user_wire sysctl failed");
+ T_QUIET; T_ASSERT_EQ(max_mem - value, no_wire, "no wire size is incorrect");
+}
+
+T_DECL(set_global_no_user_wire_amount, "Setting no_user_wire_amount changes global_user_wire_amount", T_META_ASROOT(true)) {
+ int ret;
+ vm_map_size_t no_wire, wire;
+ vm_map_size_t no_wire_delta = 16 * (1 << 10);
+ uint64_t max_mem;
+ size_t no_wire_size = sizeof(no_wire);
+ size_t wire_size = sizeof(wire);
+ size_t max_mem_size = sizeof(max_mem);
+ ret = sysctlbyname(g_sysctl_memsize_name, &max_mem, &max_mem_size, NULL, 0);
+ T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "max_mem sysctl failed");
+ ret = sysctlbyname(g_sysctl_no_wire_name, &no_wire, &no_wire_size, NULL, 0);
+ T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "no_user_wire sysctl failed");
+ ret = sysctlbyname(g_sysctl_wire_name, &wire, &wire_size, NULL, 0);
+ T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "user_wire sysctl failed");
+ T_QUIET; T_ASSERT_EQ(max_mem - wire, no_wire, "no wire size is incorrect");
+
+ // Set the no_wire limit and ensure that the wire_size changed.
+ set_no_wire_limit(no_wire + no_wire_delta, max_mem);
+ set_no_wire_limit(no_wire, max_mem);
+ // Set the wire limit and ensure that the no_wire_limit has changed
+ set_wire_limit(wire - no_wire_delta, max_mem);
+ set_wire_limit(wire, max_mem);
+}
+
+T_DECL(set_user_wire_limit, "Set user_wire_limit", T_META_ASROOT(true)) {
+ vm_map_size_t wire, original_wire;
+ size_t wire_size = sizeof(wire);
+ int ret;
+ vm_map_size_t wire_delta = 48 * (1 << 10);
+ ret = sysctlbyname(g_sysctl_per_task_wire_name, &original_wire, &wire_size, NULL, 0);
+ T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "user_wire sysctl get failed");
+ wire = original_wire + wire_delta;
+ ret = sysctlbyname(g_sysctl_per_task_wire_name, NULL, 0, &wire, wire_size);
+ T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "user_wire sysctl set failed");
+ ret = sysctlbyname(g_sysctl_per_task_wire_name, &wire, &wire_size, NULL, 0);
+ T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "user_wire sysctl get failed");
+ T_QUIET; T_ASSERT_EQ(wire, original_wire + wire_delta, "user_wire sysctl didn't set the correct value.");
+
+ // Cleanup
+ ret = sysctlbyname(g_sysctl_per_task_wire_name, NULL, 0, &original_wire, wire_size);
+ T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "user_wire sysctl set failed");
+}
+
+#if TARGET_OS_OSX
+/*
+ * Test that wiring up to the limit doesn't hang the system.
+ * We only test this on OS X. On all other platforms, we'd expect
+ * to get jetsamm'ed for doing this.
+ */
+static void *
+wire_to_limit(size_t limit, size_t *size)
+{
+ // Trying to wire directly to the limit is likely to fail
+ // repeatedly since other wired pages are probably coming and going
+ // so we just try to get close.
+ const unsigned int wiggle_room_pages = 1000;
+ int ret;
+ unsigned int current_wired, current_free;
+ size_t buffer_size, offset_from_limit;
+ void *buffer;
+ size_t current_wired_size = sizeof(current_wired);
+ size_t current_free_size = sizeof(current_free);
+ while (true) {
+ ret = sysctlbyname(g_sysctl_current_wired_count_name, ¤t_wired, ¤t_wired_size, NULL, 0);
+ T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "get current wired count failed");
+ ret = sysctlbyname(g_sysctl_current_free_count_name, ¤t_free, ¤t_free_size, NULL, 0);
+ T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "get current free count failed");
+ offset_from_limit = ptoa(current_wired + current_free + wiggle_room_pages);
+ T_QUIET; T_ASSERT_GE(limit, offset_from_limit, "more pages are wired than the limit.");
+ buffer_size = limit - offset_from_limit;
+ buffer = malloc(buffer_size);
+ T_QUIET; T_ASSERT_NOTNULL(buffer, "Unable to allocate buffer");
+ ret = mlock(buffer, buffer_size);
+ if (ret == 0) {
+ break;
+ }
+ free(buffer);
+ }
+ *size = buffer_size;
+ return buffer;
+}
+
+T_DECL(wire_stress_test, "wire up to global_user_wire_limit and spin for 120 seconds.") {
+ static const int kNumSecondsToSpin = 120;
+ int ret;
+ struct timespec start, now;
+ size_t buffer_size;
+ size_t wire_limit;
+ size_t wire_limit_size = sizeof(wire_limit);
+ void *buffer;
+
+ ret = sysctlbyname(g_sysctl_wire_name, &wire_limit, &wire_limit_size, NULL, 0);
+ T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "user_wire sysctl failed");
+ buffer = wire_to_limit(wire_limit, &buffer_size);
+ ret = clock_gettime(CLOCK_MONOTONIC, &start);
+ T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "Unable to get current time.");
+ while (true) {
+ ret = clock_gettime(CLOCK_MONOTONIC, &now);
+ T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "Unable to get current time.");
+ if (now.tv_sec - start.tv_sec >= kNumSecondsToSpin) {
+ break;
+ }
+ }
+ ret = munlock(buffer, buffer_size);
+ T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "Unable to unlock memory.");
+ free(buffer);
+}
+#endif /* TARGET_OS_OSX */
--- /dev/null
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <arpa/inet.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <netinet/in.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <unistd.h>
+
+#include <darwintest.h>
+
+/* sizeof(struct ip6_pktopts) */
+#define SIZEOF_STRUCT_IP6_PKTOPTS 192
+
+static int finished = 0;
+
+static void *
+setopt_thread(void *data)
+{
+ int s = *(int *)data;
+ uint8_t optbuf[CMSG_LEN(0)];
+ uint8_t spraybuf[SIZEOF_STRUCT_IP6_PKTOPTS];
+
+ memset(optbuf, 0, sizeof(optbuf));
+ memset(spraybuf, 0x41, sizeof(spraybuf));
+
+ while (!finished) {
+ T_ASSERT_POSIX_SUCCESS(setsockopt(s, IPPROTO_IPV6, IPV6_2292PKTOPTIONS, optbuf, sizeof(optbuf)), NULL);
+
+ /* force an error to free: */
+ T_ASSERT_EQ(setsockopt(s, IPPROTO_IPV6, IPV6_2292PKTOPTIONS, optbuf, 1), -1, NULL);
+
+ /* realloc: */
+ T_ASSERT_EQ(ioctl(-1, _IOW('x', 0, spraybuf), spraybuf), -1, NULL);
+ }
+
+ return NULL;
+}
+
+static void *
+connect_thread(void *data)
+{
+ struct sockaddr_in6 *dst = data;
+ int s;
+
+ while (!finished) {
+ T_ASSERT_POSIX_SUCCESS(s = socket(AF_INET6, SOCK_STREAM, IPPROTO_TCP), NULL);
+ connect(s, (const struct sockaddr *)dst, sizeof(*dst));
+ close(s);
+ }
+
+ return NULL;
+}
+
+T_DECL(tcp_input_outputopts_uaf_56155583, "Use-after-free when accepting TCP6 connections.")
+{
+ int s;
+ struct sockaddr_in6 sin6 = {
+ .sin6_family = AF_INET6,
+ .sin6_port = htons(1337)
+ };
+ struct sockaddr_in6 addr;
+ socklen_t addr_len;
+ pthread_t threads[20];
+ int nthreads = 0;
+ int n;
+
+ T_SETUPBEGIN;
+ T_ASSERT_EQ(inet_pton(AF_INET6, "::1", &sin6.sin6_addr), 1, NULL);
+ T_ASSERT_POSIX_SUCCESS(s = socket(AF_INET6, SOCK_STREAM, IPPROTO_TCP), NULL);
+ T_ASSERT_POSIX_SUCCESS(bind(s, (const struct sockaddr *)&sin6, sizeof(sin6)), NULL);
+ T_ASSERT_POSIX_SUCCESS(listen(s, 32), NULL);
+ T_ASSERT_POSIX_SUCCESS(fcntl(s, F_SETFL, fcntl(s, F_GETFL) | O_NONBLOCK), NULL);
+ T_SETUPEND;
+
+ for (n = 0; n < 16; ++n) {
+ if (pthread_create(&threads[nthreads++], NULL, setopt_thread, &s)) {
+ T_ASSERT_FAIL("pthread_create failed");
+ }
+ }
+
+ for (n = 0; n < 4; ++n) {
+ if (pthread_create(&threads[nthreads++], NULL, connect_thread, &sin6)) {
+ T_ASSERT_FAIL("pthread_create failed");
+ }
+ }
+
+ for (n = 0; n < 200000; ++n) {
+ addr_len = sizeof(addr);
+ close(accept(s, (struct sockaddr *)&addr, &addr_len));
+ }
+
+ finished = 1;
+
+ for (n = 0; n < nthreads; ++n) {
+ pthread_join(threads[n], NULL);
+ }
+}
from xnu import *
from utils import *
from kdp import *
+from core import caching
import sys
+from collections import deque
######################################
# Globals
print out_string
+
+@lldb_command("showinterruptvectors")
+def ShowInterruptVectorInfo(cmd_args=None):
+ """
+ Shows interrupt vectors.
+ """
+
+ # Constants
+ kInterruptTriggerModeMask = 0x01
+ kInterruptTriggerModeEdge = 0x00
+ kInterruptTriggerModeLevel = kInterruptTriggerModeMask
+ kInterruptPolarityMask = 0x02
+ kInterruptPolarityHigh = 0x00
+ kInterruptPolarityLow = kInterruptPolarityMask
+ kInterruptShareableMask = 0x04
+ kInterruptNotShareable = 0x00
+ kInterruptIsShareable = kInterruptShareableMask
+ kIOInterruptTypePCIMessaged = 0x00010000
+
+ # Get all interrupt controllers
+ interrupt_controllers = list(SearchInterruptControllerDrivers())
+
+ print("Interrupt controllers: ")
+ for ic in interrupt_controllers:
+ print(" {}".format(ic))
+ print("")
+
+ # Iterate over all entries in the registry
+ for entry in GetMatchingEntries(lambda _: True):
+ # Get the name of the entry
+ entry_name = GetRegistryEntryName(entry)
+
+ # Get the location of the entry
+ entry_location = GetRegistryEntryLocationInPlane(entry, kern.globals.gIOServicePlane)
+ if entry_location is None:
+ entry_location = ""
+ else:
+ entry_location = "@" + entry_location
+
+ # Get the interrupt properties
+ (msi_mode, vectorDataList, vectorContList) = GetRegistryEntryInterruptProperties(entry)
+ should_print = False
+ out_str = ""
+ for (vector_data, vector_cont) in zip(vectorDataList, vectorContList):
+ # vector_cont is the name of the interrupt controller. Find the matching controller from
+ # the list of controllers obtained earlier
+ matching_ics = filter(lambda ic: ic.name == vector_cont, interrupt_controllers)
+
+ if len(matching_ics) > 0:
+ should_print = True
+ # Take the first match
+ matchingIC = matching_ics[0]
+
+ # Use the vector_data to determine the vector and any flags
+ data_ptr = vector_data.data
+ data_length = vector_data.length
+
+ # Dereference vector_data as a uint32_t * and add the base vector number
+ gsi = unsigned(dereference(Cast(data_ptr, 'uint32_t *')))
+ gsi += matchingIC.base_vector_number
+
+ # If data_length is >= 8 then vector_data contains interrupt flags
+ if data_length >= 8:
+ # Add sizeof(uint32_t) to data_ptr to get the flags pointer
+ flags_ptr = kern.GetValueFromAddress(unsigned(data_ptr) + sizeof("uint32_t"))
+ flags = unsigned(dereference(Cast(flags_ptr, 'uint32_t *')))
+ out_str += " +----- [Interrupt Controller {ic}] vector {gsi}, {trigger_level}, {active}, {shareable}{messaged}\n" \
+ .format(ic=matchingIC.name, gsi=hex(gsi),
+ trigger_level="level trigger" if flags & kInterruptTriggerModeLevel else "edge trigger",
+ active="active low" if flags & kInterruptPolarityLow else "active high",
+ shareable="shareable" if flags & kInterruptIsShareable else "exclusive",
+ messaged=", messaged" if flags & kIOInterruptTypePCIMessaged else "")
+ else:
+ out_str += " +----- [Interrupt Controller {ic}] vector {gsi}\n".format(ic=matchingIC.name, gsi=hex(gsi))
+ if should_print:
+ print("[ {entry_name}{entry_location} ]{msi_mode}\n{out_str}" \
+ .format(entry_name=entry_name,
+ entry_location=entry_location,
+ msi_mode=" - MSIs enabled" if msi_mode else "",
+ out_str=out_str))
+
+@lldb_command("showiokitclasshierarchy")
+def ShowIOKitClassHierarchy(cmd_args=None):
+ """
+ Show class hierarchy for a IOKit class
+ """
+ if not cmd_args:
+ print("Usage: showiokitclasshierarchy <IOKit class name>")
+ return
+
+ class_name = cmd_args[0]
+ metaclasses = GetMetaClasses()
+ if class_name not in metaclasses:
+ print("Class {} does not exist".format(class_name))
+ return
+ metaclass = metaclasses[class_name]
+
+ # loop over superclasses
+ hierarchy = []
+ current_metaclass = metaclass
+ while current_metaclass is not None:
+ hierarchy.insert(0, current_metaclass)
+ current_metaclass = current_metaclass.superclass()
+
+ for (index, mc) in enumerate(hierarchy):
+ indent = (" " * index) + "+---"
+ print("{}[ {} ] {}".format(indent, str(mc.className()), str(mc.data())))
+
+
+
+
######################################
# Helper routines
######################################
return registry_object
return None
+
+class IOKitMetaClass(object):
+ """
+ A class that represents a IOKit metaclass. This is used to represent the
+ IOKit inheritance hierarchy.
+ """
+
+ def __init__(self, meta):
+ """
+ Initialize a IOKitMetaClass object.
+
+ Args:
+ meta (core.cvalue.value): A LLDB value representing a
+ OSMetaClass *.
+ """
+ self._meta = meta
+ self._superclass = None
+
+ def data(self):
+ return self._meta
+
+ def setSuperclass(self, superclass):
+ """
+ Set the superclass for this metaclass.
+
+ Args:
+ superclass (core.cvalue.value): A LLDB value representing a
+ OSMetaClass *.
+ """
+ self._superclass = superclass
+
+ def superclass(self):
+ """
+ Get the superclass for this metaclass (set by the setSuperclass method).
+
+ Returns:
+ core.cvalue.value: A LLDB value representing a OSMetaClass *.
+ """
+ return self._superclass
+
+ def className(self):
+ """
+ Get the name of the class this metaclass represents.
+
+ Returns:
+ str: The class name
+ """
+ return self._meta.className.string
+
+ def inheritsFrom(self, other):
+ """
+ Check if the class represented by this metaclass inherits from a class
+ represented by another metaclass.
+
+ Args:
+ other (IOKitMetaClass): The other metaclass
+
+ Returns:
+ bool: Returns True if this class inherits from the other class and
+ False otherwise.
+ """
+ current = self
+ while current is not None:
+ if current == other:
+ return True
+ else:
+ current = current.superclass()
+
+
+def GetRegistryEntryClassName(entry):
+ """
+ Get the class name of a registry entry.
+
+ Args:
+ entry (core.cvalue.value): A LLDB value representing a
+ IORegistryEntry *.
+
+ Returns:
+ str: The class name of the entry or None if a class name could not be
+ found.
+ """
+ # Check using IOClass key
+ result = LookupKeyInOSDict(entry.fPropertyTable, kern.globals.gIOClassKey)
+ if result is not None:
+ return GetString(result).replace("\"", "")
+ else:
+ # Use the vtable of the entry to determine the concrete type
+ vt = dereference(Cast(entry, 'uintptr_t *')) - 2 * sizeof('uintptr_t')
+ vt = kern.StripKernelPAC(vt)
+ vtype = kern.SymbolicateFromAddress(vt)
+ if len(vtype) > 0:
+ vtableName = vtype[0].GetName()
+ return vtableName[11:] # strip off "vtable for "
+ else:
+ return None
+
+
+def GetRegistryEntryName(entry):
+ """
+ Get the name of a registry entry.
+
+ Args:
+ entry (core.cvalue.value): A LLDB value representing a
+ IORegistryEntry *.
+
+ Returns:
+ str: The name of the entry or None if a name could not be found.
+ """
+ name = None
+
+ # First check the IOService plane nameKey
+ result = LookupKeyInOSDict(entry.fRegistryTable, kern.globals.gIOServicePlane.nameKey)
+ if result is not None:
+ name = GetString(result)
+
+ # Check the global IOName key
+ if name is None:
+ result = LookupKeyInOSDict(entry.fRegistryTable, kern.globals.gIONameKey)
+ if result is not None:
+ name = GetString(result)
+
+ # Check the IOClass key
+ if name is None:
+ result = LookupKeyInOSDict(entry.fPropertyTable, kern.globals.gIOClassKey)
+ if result is not None:
+ name = GetString(result)
+
+ # Remove extra quotes
+ if name is not None:
+ return name.replace("\"", "")
+ else:
+ return GetRegistryEntryClassName(entry)
+
+
+def GetRegistryEntryLocationInPlane(entry, plane):
+ """
+ Get the registry entry location in a IOKit plane.
+
+ Args:
+ entry (core.cvalue.value): A LLDB value representing a
+ IORegistryEntry *.
+ plane: An IOKit plane such as kern.globals.gIOServicePlane.
+
+ Returns:
+ str: The location of the entry or None if a location could not be
+ found.
+ """
+ # Check the plane's pathLocationKey
+ sym = LookupKeyInOSDict(entry.fRegistryTable, plane.pathLocationKey)
+
+ # Check the global IOLocation key
+ if sym is None:
+ sym = LookupKeyInOSDict(entry.fRegistryTable, kern.globals.gIOLocationKey)
+ if sym is not None:
+ return GetString(sym).replace("\"", "")
+ else:
+ return None
+
+
+def GetMetaClasses():
+ """
+ Enumerate all IOKit metaclasses. Uses dynamic caching.
+
+ Returns:
+ Dict[str, IOKitMetaClass]: A dictionary mapping each metaclass name to
+ a IOKitMetaClass object representing the metaclass.
+ """
+ METACLASS_CACHE_KEY = "iokit_metaclasses"
+ cached_data = caching.GetDynamicCacheData(METACLASS_CACHE_KEY)
+
+ # If we have cached data, return immediately
+ if cached_data is not None:
+ return cached_data
+
+ # This method takes a while, so it prints a progress indicator
+ print("Enumerating IOKit metaclasses: ")
+
+ # Iterate over all classes present in sAllClassesDict
+ idx = 0
+ count = unsigned(kern.globals.sAllClassesDict.count)
+ metaclasses_by_address = {}
+ while idx < count:
+ # Print progress after every 10 items
+ if idx % 10 == 0:
+ print(" {} metaclass structures parsed...".format(idx))
+
+ # Address of metaclass
+ address = kern.globals.sAllClassesDict.dictionary[idx].value
+
+ # Create IOKitMetaClass and store in dict
+ metaclasses_by_address[int(address)] = IOKitMetaClass(CastIOKitClass(kern.globals.sAllClassesDict.dictionary[idx].value, 'OSMetaClass *'))
+ idx += 1
+
+ print(" Enumerated {} metaclasses.".format(count))
+
+ # At this point, each metaclass is independent of each other. We don't have superclass links set up yet.
+
+ for (address, metaclass) in metaclasses_by_address.items():
+ # Get the address of the superclass using the superClassLink in IOMetaClass
+ superclass_address = int(metaclass.data().superClassLink)
+
+ # Skip null superclass
+ if superclass_address == 0:
+ continue
+
+ # Find the superclass object in the dict
+ if superclass_address in metaclasses_by_address:
+ metaclass.setSuperclass(metaclasses_by_address[superclass_address])
+ else:
+ print("warning: could not find superclass for {}".format(str(metaclass.data())))
+
+ # This method returns a dictionary mapping each class name to the associated metaclass object
+ metaclasses_by_name = {}
+ for (_, metaclass) in metaclasses_by_address.items():
+ metaclasses_by_name[str(metaclass.className())] = metaclass
+
+ # Save the result in the cache
+ caching.SaveDynamicCacheData(METACLASS_CACHE_KEY, metaclasses_by_name)
+
+ return metaclasses_by_name
+
+
+def GetMatchingEntries(matcher):
+ """
+ Iterate over the IOKit registry and find entries that match specific
+ criteria.
+
+ Args:
+ matcher (function): A matching function that returns True for a match
+ and False otherwise.
+
+ Yields:
+ core.cvalue.value: LLDB values that represent IORegistryEntry * for
+ each registry entry found.
+ """
+
+ # Perform a BFS over the IOKit registry tree
+ bfs_queue = deque()
+ bfs_queue.append(kern.globals.gRegistryRoot)
+ while len(bfs_queue) > 0:
+ # Dequeue an entry
+ entry = bfs_queue.popleft()
+
+ # Check if entry matches
+ if matcher(entry):
+ yield entry
+
+ # Find children of this entry and enqueue them
+ child_array = LookupKeyInOSDict(entry.fRegistryTable, kern.globals.gIOServicePlane.keys[1])
+ if child_array is not None:
+ idx = 0
+ ca = CastIOKitClass(child_array, 'OSArray *')
+ count = unsigned(ca.count)
+ while idx < count:
+ bfs_queue.append(CastIOKitClass(ca.array[idx], 'IORegistryEntry *'))
+ idx += 1
+
+
+def FindMatchingServices(matching_name):
+ """
+ Finds registry entries that match the given string. Works similarly to:
+
+ io_iterator_t iter;
+ IOServiceGetMatchingServices(..., IOServiceMatching(matching_name), &iter);
+ while (( io_object_t next = IOIteratorNext(iter))) { ... }
+
+ Args:
+ matching_name (str): The class name to search for.
+
+ Yields:
+ core.cvalue.value: LLDB values that represent IORegistryEntry * for
+ each registry entry found.
+ """
+
+ # Check if the argument is valid
+ metaclasses = GetMetaClasses()
+ if matching_name not in metaclasses:
+ return
+ matching_metaclass = metaclasses[matching_name]
+
+ # An entry matches if it inherits from matching_metaclass
+ def matcher(entry):
+ # Get the class name of the entry and the associated metaclass
+ entry_name = GetRegistryEntryClassName(entry)
+ if entry_name in metaclasses:
+ entry_metaclass = metaclasses[entry_name]
+ return entry_metaclass.inheritsFrom(matching_metaclass)
+ else:
+ return False
+
+ # Search for entries
+ for entry in GetMatchingEntries(matcher):
+ yield entry
+
+
+def GetRegistryEntryParent(entry, iokit_plane=None):
+ """
+ Gets the parent entry of a registry entry.
+
+ Args:
+ entry (core.cvalue.value): A LLDB value representing a
+ IORegistryEntry *.
+ iokit_plane (core.cvalue.value, optional): A LLDB value representing a
+ IORegistryPlane *. By default, this method uses the IOService
+ plane.
+
+ Returns:
+ core.cvalue.value: A LLDB value representing a IORegistryEntry* that
+ is the parent entry of the entry argument in the specified plane.
+ Returns None if no entry could be found.
+ """
+ kParentSetIndex = 0
+ parent_key = None
+ if iokit_plane is None:
+ parent_key = kern.globals.gIOServicePlane.keys[kParentSetIndex]
+ else:
+ parent_key = plane.keys[kParentSetIndex]
+ parent_array = LookupKeyInOSDict(entry.fRegistryTable, parent_key)
+ parent_entry = None
+ if parent_array is not None:
+ idx = 0
+ ca = CastIOKitClass(parent_array, 'OSArray *')
+ count = unsigned(ca.count)
+ if count > 0:
+ parent_entry = CastIOKitClass(ca.array[0], 'IORegistryEntry *')
+ return parent_entry
+
+
+def GetRegistryEntryInterruptProperties(entry):
+ """
+ Get the interrupt properties of a registry entry.
+
+ Args:
+ entry (core.cvalue.value): A LLDB value representing a IORegistryEntry *.
+
+ Returns:
+ (bool, List[core.cvalue.value], List[str]): A tuple with the following
+ fields:
+ - First field (bool): Whether this entry has a non-null
+ IOPCIMSIMode.
+ - Second field (List[core.cvalue.value]): A list of LLDB values
+ representing OSData *. The OSData* pointer points to
+ interrupt vector data.
+ - Third field (List[str]): A list of strings representing the
+ interrupt controller names from the
+ IOInterruptControllers property.
+ """
+ INTERRUPT_SPECIFIERS_PROPERTY = "IOInterruptSpecifiers"
+ INTERRUPT_CONTROLLERS_PROPERTY = "IOInterruptControllers"
+ MSI_MODE_PROPERTY = "IOPCIMSIMode"
+
+ # Check IOInterruptSpecifiers
+ interrupt_specifiers = LookupKeyInPropTable(entry.fPropertyTable, INTERRUPT_SPECIFIERS_PROPERTY)
+ if interrupt_specifiers is not None:
+ interrupt_specifiers = CastIOKitClass(interrupt_specifiers, 'OSArray *')
+
+ # Check IOInterruptControllers
+ interrupt_controllers = LookupKeyInPropTable(entry.fPropertyTable, INTERRUPT_CONTROLLERS_PROPERTY)
+ if interrupt_controllers is not None:
+ interrupt_controllers = CastIOKitClass(interrupt_controllers, 'OSArray *')
+
+ # Check MSI mode
+ msi_mode = LookupKeyInPropTable(entry.fPropertyTable, MSI_MODE_PROPERTY)
+
+ result_vector_data = []
+ result_vector_cont = []
+ if interrupt_specifiers is not None and interrupt_controllers is not None:
+ interrupt_specifiers_array_count = unsigned(interrupt_specifiers.count)
+ interrupt_controllers_array_count = unsigned(interrupt_controllers.count)
+ # The array lengths should be the same
+ if interrupt_specifiers_array_count == interrupt_controllers_array_count and interrupt_specifiers_array_count > 0:
+ idx = 0
+ while idx < interrupt_specifiers_array_count:
+ # IOInterruptSpecifiers is an array of OSData *
+ vector_data = CastIOKitClass(interrupt_specifiers.array[idx], "OSData *")
+
+ # IOInterruptControllers is an array of OSString *
+ vector_cont = GetString(interrupt_controllers.array[idx])
+
+ result_vector_data.append(vector_data)
+ result_vector_cont.append(vector_cont)
+ idx += 1
+
+ return (msi_mode is not None, result_vector_data, result_vector_cont)
+
+
+class InterruptControllerDevice(object):
+ """Represents a IOInterruptController"""
+
+ def __init__(self, device, driver, base_vector_number, name):
+ """
+ Initialize a InterruptControllerDevice.
+
+ Args:
+ device (core.cvalue.value): The device object.
+ driver (core.cvalue.value): The driver object.
+ base_vector_number (int): The base interrupt vector.
+ name (str): The name of this interrupt controller.
+
+ Note:
+ Use the factory method makeInterruptControllerDevice to validate
+ properties.
+ """
+ self.device = device
+ self.driver = driver
+ self.name = name
+ self.base_vector_number = base_vector_number
+
+
+ def __str__(self):
+ """
+ String representation of this InterruptControllerDevice.
+ """
+ return " Name {}, base vector = {}, device = {}, driver = {}".format(
+ self.name, hex(self.base_vector_number), str(self.device), str(self.driver))
+
+ @staticmethod
+ def makeInterruptControllerDevice(device, driver):
+ """
+ Factory method to create a InterruptControllerDevice.
+
+ Args:
+ device (core.cvalue.value): The device object.
+ driver (core.cvalue.value): The driver object.
+
+ Returns:
+ InterruptControllerDevice: Returns an instance of
+ InterruptControllerDevice or None if the arguments do not have
+ the required properties.
+ """
+ BASE_VECTOR_PROPERTY = "Base Vector Number"
+ INTERRUPT_CONTROLLER_NAME_PROPERTY = "InterruptControllerName"
+ base_vector = LookupKeyInPropTable(device.fPropertyTable, BASE_VECTOR_PROPERTY)
+ if base_vector is None:
+ base_vector = LookupKeyInPropTable(driver.fPropertyTable, BASE_VECTOR_PROPERTY)
+ device_name = LookupKeyInPropTable(device.fPropertyTable, INTERRUPT_CONTROLLER_NAME_PROPERTY)
+ if device_name is None:
+ device_name = LookupKeyInPropTable(driver.fPropertyTable, INTERRUPT_CONTROLLER_NAME_PROPERTY)
+
+ if device_name is not None:
+ # Some interrupt controllers do not have a base vector number. Assume it is 0.
+ base_vector_number = 0
+ if base_vector is not None:
+ base_vector_number = unsigned(GetNumber(base_vector))
+ device_name = GetString(device_name)
+ # Construct object and return
+ return InterruptControllerDevice(device, driver, base_vector_number, device_name)
+ else:
+ # error case
+ return None
+
+
+def SearchInterruptControllerDrivers():
+ """
+ Search the IOKit registry for entries that match IOInterruptController.
+
+ Yields:
+ core.cvalue.value: A LLDB value representing a IORegistryEntry * that
+ inherits from IOInterruptController.
+ """
+ for entry in FindMatchingServices("IOInterruptController"):
+ # Get parent
+ parent = GetRegistryEntryParent(entry)
+
+ # Make the interrupt controller object
+ ic = InterruptControllerDevice.makeInterruptControllerDevice(parent, entry)
+
+ # Yield object
+ if ic is not None:
+ yield ic
+
+
def LookupKeyInOSDict(osdict, key):
""" Returns the value corresponding to a given key in a OSDictionary
Returns None if the key was not found
-@lldb_command('walklist_entry', 'S')
+@lldb_command('walklist_entry', 'SE')
def WalkList(cmd_args=[], cmd_options={}):
""" iterate over a list as defined with LIST_ENTRY in bsd/sys/queue.h
params:
element_type - str : Type of the next element
field_name - str : Name of the field in next element's structure
- Option: -S - suppress summary output.
+ Options: -S - suppress summary output.
+ -E - Iterate using SLIST_ENTRYs
+
Usage: (lldb) walklist_entry <obj with list_entry *> <struct type> <fieldname>
ex: (lldb) walklist_entry 0x7fffff80 "struct proc *" "p_sibling"
el_type = cmd_args[1]
queue_head = kern.GetValueFromAddress(cmd_args[0], el_type)
field_name = cmd_args[2]
-
showsummary = False
if el_type in lldb_summary_definitions:
showsummary = True
if '-S' in cmd_options:
showsummary = False
+ if '-E' in cmd_options:
+ prefix = 's'
+ else:
+ prefix = ''
elt = queue_head
while unsigned(elt) != 0:
i = elt
- elt = elt.__getattr__(field_name).le_next
+ elt = elt.__getattr__(field_name).__getattr__(prefix + 'le_next')
if showsummary:
print lldb_summary_definitions[el_type](i)
else:
kern.globals.traptrace_entries_per_cpu, MAX_TRAPTRACE_BACKTRACES)
+@lldb_command('showsysctls', 'P:')
+def ShowSysctls(cmd_args=[], cmd_options={}):
+ """ Walks the list of sysctl data structures, printing out each during traversal.
+ Arguments:
+ -P <string> : Limit output to sysctls starting with the specified prefix.
+ """
+ if '-P' in cmd_options:
+ _ShowSysctl_prefix = cmd_options['-P']
+ allowed_prefixes = _ShowSysctl_prefix.split('.')
+ if allowed_prefixes:
+ for x in xrange(1, len(allowed_prefixes)):
+ allowed_prefixes[x] = allowed_prefixes[x - 1] + "." + allowed_prefixes[x]
+ else:
+ _ShowSysctl_prefix = ''
+ allowed_prefixes = []
+ def IterateSysctls(oid, parent_str, i):
+ headp = oid
+ parentstr = "<none>" if parent_str is None else parent_str
+ for pp in IterateListEntry(headp, 'struct sysctl_oid *', 'oid_link', 's'):
+ type = pp.oid_kind & 0xf
+ next_parent = str(pp.oid_name)
+ if parent_str is not None:
+ next_parent = parent_str + "." + next_parent
+ st = (" " * i) + str(pp.GetSBValue().Dereference()).replace("\n", "\n" + (" " * i))
+ if type == 1 and pp.oid_arg1 != 0:
+ # Check allowed_prefixes to see if we can recurse from root to the allowed prefix.
+ # To recurse further, we need to check only the the next parent starts with the user-specified
+ # prefix
+ if next_parent not in allowed_prefixes and next_parent.startswith(_ShowSysctl_prefix) is False:
+ continue
+ print 'parent = "%s"' % parentstr, st[st.find("{"):]
+ IterateSysctls(Cast(pp.oid_arg1, "struct sysctl_oid_list *"), next_parent, i + 2)
+ elif _ShowSysctl_prefix == '' or next_parent.startswith(_ShowSysctl_prefix):
+ print ('parent = "%s"' % parentstr), st[st.find("{"):]
+ IterateSysctls(kern.globals.sysctl__children, None, 0)
+
from memory import *
if len(cmd_out) != 0:
cmd_out1 = cmd_out.split('\n')
if len(cmd_out1) != 0:
- print OutputAddress([unsigned(link_register)]) + ": " + cmd_out1[0].split(':')[1]
+ address = OutputAddress([unsigned(link_register)])
+ if address is None:
+ address = '0x%x <???>' % unsigned(link_register)
+ print address + ": " + cmd_out1[1].split(':', 1)[1]
a = dereference(kern.GetValueFromAddress(unsigned(a), 'uintptr_t *'))
# EndMacro: newbt