*/
#include <mach/mach_types.h>
+#include <mach/mach_traps.h>
#include <mach/notify.h>
#include <ipc/ipc_types.h>
#include <ipc/ipc_port.h>
#include <mach/mach_voucher_server.h>
#include <mach/mach_voucher_attr_control_server.h>
#include <mach/mach_host_server.h>
+#include <voucher/ipc_pthread_priority_types.h>
/*
* Sysctl variable; enable and disable tracing of voucher contents
ipc_voucher_t iv_alloc(iv_index_t entries);
void iv_dealloc(ipc_voucher_t iv, boolean_t unhash);
-static inline iv_refs_t
+os_refgrp_decl(static, iv_refgrp, "voucher", NULL);
+os_refgrp_decl(static, ivac_refgrp, "voucher attribute control", NULL);
+
+static inline void
iv_reference(ipc_voucher_t iv)
{
- iv_refs_t refs;
-
- refs = hw_atomic_add(&iv->iv_refs, 1);
- return refs;
+ os_ref_retain(&iv->iv_refs);
}
static inline void
iv_release(ipc_voucher_t iv)
{
- iv_refs_t refs;
-
- assert(0 < iv->iv_refs);
- refs = hw_atomic_sub(&iv->iv_refs, 1);
- if (0 == refs)
+ if (os_ref_release(&iv->iv_refs) == 0) {
iv_dealloc(iv, TRUE);
+ }
}
/*
if (IV_NULL == iv)
return IV_NULL;
- iv->iv_refs = 1;
+ os_ref_init(&iv->iv_refs, &iv_refgrp);
iv->iv_sum = 0;
iv->iv_hash = 0;
iv->iv_port = IP_NULL;
*/
if (unhash) {
ivht_lock();
- assert(0 == iv->iv_refs);
+ assert(os_ref_get_count(&iv->iv_refs) == 0);
assert(IV_HASH_BUCKETS > iv->iv_hash);
queue_remove(&ivht_bucket[iv->iv_hash], iv, ipc_voucher_t, iv_hash_link);
ivht_count--;
KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_VOUCHER_DESTROY) | DBG_FUNC_NONE,
VM_KERNEL_ADDRPERM((uintptr_t)iv), 0, ivht_count, 0, 0);
- } else
- assert(0 == --iv->iv_refs);
+ } else {
+ os_ref_count_t cnt __assert_only = os_ref_release(&iv->iv_refs);
+ assert(cnt == 0);
+ }
/*
* if a port was allocated for this voucher,
void
ipc_voucher_reference(ipc_voucher_t voucher)
{
- iv_refs_t refs;
-
if (IPC_VOUCHER_NULL == voucher)
return;
- refs = iv_reference(voucher);
- assert(1 < refs);
+ iv_reference(voucher);
}
void
if (IV_NULL == voucher)
return (IP_NULL);
- assert(0 < voucher->iv_refs);
+ assert(os_ref_get_count(&voucher->iv_refs) > 0);
/* create a port if needed */
port = voucher->iv_port;
if (IVAC_NULL == ivac)
return IVAC_NULL;
- ivac->ivac_refs = 1;
+ os_ref_init(&ivac->ivac_refs, &ivac_refgrp);
ivac->ivac_is_growing = FALSE;
ivac->ivac_port = IP_NULL;
* that the reference count is still zero.
*/
ivgt_lock();
- if (ivac->ivac_refs > 0) {
+ if (os_ref_get_count(&ivac->ivac_refs) > 0) {
ivgt_unlock();
return;
}
ip_unlock(port);
ivac_release(ivac);
+ } else {
+ ip_unlock(port);
}
- ip_unlock(port);
}
/*
assert(IP_NULL == port->ip_nsrequest);
ipc_port_nsrequest(port, port->ip_mscount, ipc_port_make_sonce_locked(port), &old_notify);
assert(IP_NULL == old_notify);
- ip_unlock(port);
+ /* ipc_port_nsrequest unlocks the port */
} else {
/* piggyback on the existing port reference, so consume ours */
ip_unlock(port);
* re-drive the release.
*/
if (ivace->ivace_made != made) {
- assert(made < ivace->ivace_made);
-
if (KERN_SUCCESS == kr)
ivace->ivace_made -= made;
assert(iv->iv_hash == hash);
/* if not already deallocating and sums match... */
- if (0 < iv->iv_refs && iv->iv_sum == sum) {
- iv_refs_t refs;
+ if ((os_ref_get_count(&iv->iv_refs) > 0) && (iv->iv_sum == sum)) {
iv_index_t i;
assert(iv->iv_table_size <= new_iv->iv_table_size);
/* can we get a ref before it hits 0
*
- * This is thread safe. The reference is just an atomic
- * add. If the reference count is zero when we adjust it,
- * no other thread can have a reference to the voucher.
+ * This is thread safe. If the reference count is zero before we
+ * adjust it, no other thread can have a reference to the voucher.
* The dealloc code requires holding the ivht_lock, so
* the voucher cannot be yanked out from under us.
*/
- refs = iv_reference(iv);
- if (1 == refs) {
- /* drats! going away. Put back to zero */
- iv->iv_refs = 0;
+ if (!os_ref_retain_try(&iv->iv_refs)) {
continue;
}
#define PAYLOAD_PER_TRACEPOINT (4 * sizeof(uintptr_t))
#define PAYLOAD_SIZE 1024
- _Static_assert(PAYLOAD_SIZE % PAYLOAD_PER_TRACEPOINT == 0, "size invariant violated");
+ static_assert(PAYLOAD_SIZE % PAYLOAD_PER_TRACEPOINT == 0, "size invariant violated");
mach_voucher_attr_raw_recipe_array_size_t payload_size = PAYLOAD_SIZE;
uintptr_t payload[PAYLOAD_SIZE / sizeof(uintptr_t)];
}
}
- KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_VOUCHER_CREATE) | DBG_FUNC_NONE,
- voucher_addr,
- new_iv->iv_table_size, ivht_count, payload_size, 0);
+ KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_VOUCHER_CREATE),
+ voucher_addr, new_iv->iv_table_size, ivht_count,
+ payload_size);
uintptr_t index = 0;
while (attr_tracepoints_needed--) {
- KERNEL_DEBUG_CONSTANT1(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_VOUCHER_CREATE_ATTR_DATA) | DBG_FUNC_NONE,
- payload[index],
- payload[index+1],
- payload[index+2],
- payload[index+3],
- voucher_addr);
+ KDBG(MACHDBG_CODE(DBG_MACH_IPC,
+ MACH_IPC_VOUCHER_CREATE_ATTR_DATA), payload[index],
+ payload[index + 1], payload[index + 2],
+ payload[index + 3]);
index += 4;
}
} else {
- KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_VOUCHER_CREATE) | DBG_FUNC_NONE,
- voucher_addr,
- new_iv->iv_table_size, ivht_count, 0, 0);
+ KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_VOUCHER_CREATE),
+ voucher_addr, new_iv->iv_table_size, ivht_count);
}
}
#endif /* KDEBUG_LEVEL >= KDEBUG_LEVEL_STANDARD */
key_index = control->ivac_key_index;
- assert(0 < voucher->iv_refs);
+ assert(os_ref_get_count(&voucher->iv_refs) > 0);
value_index = iv_lookup(voucher, key_index);
ivace_lookup_values(key_index, value_index,
out_values, in_out_size);
return KERN_NOT_SUPPORTED;
}
+/*
+ * Routine: ipc_get_pthpriority_from_kmsg_voucher
+ * Purpose:
+ * Get the canonicalized pthread priority from the voucher attached in the kmsg.
+ */
+kern_return_t
+ipc_get_pthpriority_from_kmsg_voucher(
+ ipc_kmsg_t kmsg,
+ ipc_pthread_priority_value_t *canonicalize_priority_value)
+{
+ ipc_voucher_t pthread_priority_voucher;
+ mach_voucher_attr_raw_recipe_size_t content_size =
+ sizeof(mach_voucher_attr_recipe_data_t) + sizeof(ipc_pthread_priority_value_t);
+ uint8_t content_data[content_size];
+ mach_voucher_attr_recipe_t cur_content;
+ kern_return_t kr = KERN_SUCCESS;
+
+ if (!IP_VALID(kmsg->ikm_voucher)) {
+ return KERN_FAILURE;
+ }
+
+ pthread_priority_voucher = (ipc_voucher_t)kmsg->ikm_voucher->ip_kobject;
+ kr = mach_voucher_extract_attr_recipe(pthread_priority_voucher,
+ MACH_VOUCHER_ATTR_KEY_PTHPRIORITY,
+ content_data,
+ &content_size);
+ if (kr != KERN_SUCCESS) {
+ return kr;
+ }
+
+ /* return KERN_INVALID_VALUE for default value */
+ if (content_size < sizeof(mach_voucher_attr_recipe_t)) {
+ return KERN_INVALID_VALUE;
+ }
+
+ cur_content = (mach_voucher_attr_recipe_t) (void *) &content_data[0];
+ assert(cur_content->content_size == sizeof(ipc_pthread_priority_value_t));
+ memcpy(canonicalize_priority_value, cur_content->content, sizeof(ipc_pthread_priority_value_t));
+
+ return KERN_SUCCESS;
+}
+
+
/*
* Routine: ipc_voucher_send_preprocessing
* Purpose:
return KERN_SUCCESS;
}
+/*
+ * Activity id Generation
+ */
+uint64_t voucher_activity_id;
+
+#define generate_activity_id(x) \
+ ((uint64_t)OSAddAtomic64((x), (int64_t *)&voucher_activity_id))
+
+/*
+ * Routine: mach_init_activity_id
+ * Purpose:
+ * Initialize voucher activity id.
+ */
+void
+mach_init_activity_id(void)
+{
+ voucher_activity_id = 1;
+}
+
+/*
+ * Routine: mach_generate_activity_id
+ * Purpose:
+ * Generate a system wide voucher activity id.
+ */
+kern_return_t
+mach_generate_activity_id(
+ struct mach_generate_activity_id_args *args)
+{
+ uint64_t activity_id;
+ kern_return_t kr = KERN_SUCCESS;
+
+ if (args->count <= 0 || args->count > MACH_ACTIVITY_ID_COUNT_MAX) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ activity_id = generate_activity_id(args->count);
+ kr = copyout(&activity_id, args->activity_id, sizeof (activity_id));
+
+ return (kr);
+}
+
#if defined(MACH_VOUCHER_ATTR_KEY_USER_DATA) || defined(MACH_VOUCHER_ATTR_KEY_TEST)
/*