#include <ipc/ipc_port.h>
#include <ipc/ipc_space.h>
+#if CONFIG_MACF
+#include <security/mac_mach_internal.h>
+#endif
+
/*
* Forward declarations
*/
/* the rest of the special ports will be set up later */
+ bzero(&realhost.exc_actions[0], sizeof(realhost.exc_actions[0]));
for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
realhost.exc_actions[i].port = IP_NULL;
- }/* for */
+ /* The mac framework is not yet initialized, so we defer
+ * initializing the labels to later, when they are set
+ * for the first time. */
+ realhost.exc_actions[i].label = NULL;
+ /* initialize the entire exception action struct */
+ realhost.exc_actions[i].behavior = 0;
+ realhost.exc_actions[i].flavor = 0;
+ realhost.exc_actions[i].privileged = FALSE;
+ }/* for */
/*
* Set up ipc for default processor set.
host_self_trap(
__unused struct host_self_trap_args *args)
{
+ task_t self = current_task();
ipc_port_t sright;
mach_port_name_t name;
- sright = ipc_port_copy_send(current_task()->itk_host);
+ itk_lock(self);
+ sright = ipc_port_copy_send(self->itk_host);
+ itk_unlock(self);
name = ipc_port_copyout_send(sright, current_space());
return name;
}
host_t host = HOST_NULL;
if (IP_VALID(port)) {
- ip_lock(port);
- if (ip_active(port) &&
- ((ip_kotype(port) == IKOT_HOST) ||
- (ip_kotype(port) == IKOT_HOST_PRIV)
- ))
+ if (ip_kotype(port) == IKOT_HOST ||
+ ip_kotype(port) == IKOT_HOST_PRIV) {
host = (host_t) port->ip_kobject;
- ip_unlock(port);
+ assert(ip_active(port));
+ }
}
-
return host;
}
*/
kern_return_t
host_set_exception_ports(
- host_priv_t host_priv,
+ host_priv_t host_priv,
exception_mask_t exception_mask,
ipc_port_t new_port,
exception_behavior_t new_behavior,
thread_state_flavor_t new_flavor)
{
- register int i;
+ int i;
ipc_port_t old_port[EXC_TYPES_COUNT];
+#if CONFIG_MACF
+ struct label *deferred_labels[EXC_TYPES_COUNT];
+ struct label *new_label;
+#endif
+
if (host_priv == HOST_PRIV_NULL) {
return KERN_INVALID_ARGUMENT;
}
- assert(host_priv == &realhost);
-
if (exception_mask & ~EXC_MASK_VALID) {
return KERN_INVALID_ARGUMENT;
}
if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor))
return (KERN_INVALID_ARGUMENT);
+#if CONFIG_MACF
+ if (mac_task_check_set_host_exception_ports(current_task(), exception_mask) != 0)
+ return KERN_NO_ACCESS;
+
+ new_label = mac_exc_create_label_for_current_proc();
+
+ for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
+ if (host_priv->exc_actions[i].label == NULL) {
+ deferred_labels[i] = mac_exc_create_label();
+ } else {
+ deferred_labels[i] = NULL;
+ }
+ }
+#endif
+
+ assert(host_priv == &realhost);
+
host_lock(host_priv);
for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
- if (exception_mask & (1 << i)) {
+#if CONFIG_MACF
+ if (host_priv->exc_actions[i].label == NULL) {
+ // Lazy initialization (see ipc_port_init).
+ mac_exc_associate_action_label(&host_priv->exc_actions[i], deferred_labels[i]);
+ deferred_labels[i] = NULL; // Label is used, do not free.
+ }
+#endif
+
+ if ((exception_mask & (1 << i))
+#if CONFIG_MACF
+ && mac_exc_update_action_label(&host_priv->exc_actions[i], new_label) == 0
+#endif
+ ) {
old_port[i] = host_priv->exc_actions[i].port;
+
host_priv->exc_actions[i].port =
ipc_port_copy_send(new_port);
host_priv->exc_actions[i].behavior = new_behavior;
host_priv->exc_actions[i].flavor = new_flavor;
- } else
+ } else {
old_port[i] = IP_NULL;
+ }
}/* for */
/*
* Consume send rights without any lock held.
*/
host_unlock(host_priv);
- for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
+
+#if CONFIG_MACF
+ mac_exc_free_label(new_label);
+#endif
+
+ for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
if (IP_VALID(old_port[i]))
ipc_port_release_send(old_port[i]);
+#if CONFIG_MACF
+ if (deferred_labels[i] != NULL) {
+ /* Deferred label went unused: Another thread has completed the lazy initialization. */
+ mac_exc_free_label(deferred_labels[i]);
+ }
+#endif
+ }
if (IP_VALID(new_port)) /* consume send right */
ipc_port_release_send(new_port);
kern_return_t
host_swap_exception_ports(
- host_priv_t host_priv,
+ host_priv_t host_priv,
exception_mask_t exception_mask,
ipc_port_t new_port,
exception_behavior_t new_behavior,
count;
ipc_port_t old_port[EXC_TYPES_COUNT];
+#if CONFIG_MACF
+ struct label *deferred_labels[EXC_TYPES_COUNT];
+ struct label *new_label;
+#endif
+
if (host_priv == HOST_PRIV_NULL)
return KERN_INVALID_ARGUMENT;
if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor))
return (KERN_INVALID_ARGUMENT);
+#if CONFIG_MACF
+ if (mac_task_check_set_host_exception_ports(current_task(), exception_mask) != 0)
+ return KERN_NO_ACCESS;
+
+ new_label = mac_exc_create_label_for_current_proc();
+
+ for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
+ if (host_priv->exc_actions[i].label == NULL) {
+ deferred_labels[i] = mac_exc_create_label();
+ } else {
+ deferred_labels[i] = NULL;
+ }
+ }
+#endif /* CONFIG_MACF */
+
host_lock(host_priv);
assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
for (count=0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; i++) {
- if (exception_mask & (1 << i)) {
+#if CONFIG_MACF
+ if (host_priv->exc_actions[i].label == NULL) {
+ // Lazy initialization (see ipc_port_init).
+ mac_exc_associate_action_label(&host_priv->exc_actions[i], deferred_labels[i]);
+ deferred_labels[i] = NULL; // Label is used, do not free.
+ }
+#endif
+
+ if ((exception_mask & (1 << i))
+#if CONFIG_MACF
+ && mac_exc_update_action_label(&host_priv->exc_actions[i], new_label) == 0
+#endif
+ ) {
for (j = 0; j < count; j++) {
/*
* search for an identical entry, if found
ipc_port_copy_send(new_port);
host_priv->exc_actions[i].behavior = new_behavior;
host_priv->exc_actions[i].flavor = new_flavor;
- } else
+ } else {
old_port[i] = IP_NULL;
+ }
}/* for */
host_unlock(host_priv);
+#if CONFIG_MACF
+ mac_exc_free_label(new_label);
+#endif
+
/*
* Consume send rights without any lock held.
*/
while (--i >= FIRST_EXCEPTION) {
if (IP_VALID(old_port[i]))
ipc_port_release_send(old_port[i]);
+#if CONFIG_MACF
+ if (deferred_labels[i] != NULL) {
+ mac_exc_free_label(deferred_labels[i]); // Label unused.
+ }
+#endif
}
if (IP_VALID(new_port)) /* consume send right */