int opcode = uap->opcode;
user_addr_t message = uap->message;
struct kauth_resolver_work *workp;
- struct kauth_cache_sizes sz_arg;
+ struct kauth_cache_sizes sz_arg = {};
int error;
pid_t new_id;
return 0;
}
- MALLOC( cred_listp, debug_ucred *, req->oldlen, M_TEMP, M_WAITOK );
+ MALLOC( cred_listp, debug_ucred *, req->oldlen, M_TEMP, M_WAITOK | M_ZERO);
if ( cred_listp == NULL ) {
return (ENOMEM);
}
return (ENOMEM);
}
- MALLOC( bt_bufp, cred_debug_buffer *, req->oldlen, M_TEMP, M_WAITOK );
+ MALLOC( bt_bufp, cred_debug_buffer *, req->oldlen, M_TEMP, M_WAITOK | M_ZERO);
if ( bt_bufp == NULL ) {
return (ENOMEM);
}
/*
* Function: csblob_get_base_offset
*
- * Description: This function returns the base offset into the Mach-O binary
+ * Description: This function returns the base offset into the (possibly universal) binary
* for a given blob.
*/
if (NULL == p->p_textvp)
return NULL;
+ if ((p->p_csflags & CS_SIGNED) == 0) {
+ return NULL;
+ }
+
return ubc_cs_blob_get(p->p_textvp, -1, p->p_textoff);
}
/*
- * Function: csproc_get_blob
+ * Function: csvnode_get_blob
*
* Description: This function returns the cs_blob
* for the vnode vp
int
csproc_get_platform_path(struct proc *p)
{
- struct cs_blob *csblob = csproc_get_blob(p);
+ struct cs_blob *csblob;
+
+ csblob = csproc_get_blob(p);
return (csblob == NULL) ? 0 : csblob->csb_platform_path;
}
*out_start = NULL;
*out_length = 0;
+ if ((p->p_csflags & CS_SIGNED) == 0) {
+ return 0;
+ }
+
if (NULL == p->p_textvp)
return EINVAL;
{
struct cs_blob *csblob;
+ if ((p->p_csflags & CS_SIGNED) == 0) {
+ return NULL;
+ }
+
if (NULL == p->p_textvp)
return NULL;
return csblob_get_identity(csblob);
}
-
-/* Retrieve the codesign blob for a process.
- * Returns:
- * EINVAL no text vnode associated with the process
- * 0 no error occurred
+/*
+ * DO NOT USE THIS FUNCTION!
+ * Use the properly guarded csproc_get_blob instead.
*
- * On success, out_start and out_length will point to the
- * cms blob if found; or will be set to NULL/zero
- * if there were no blob.
+ * This is currently here to allow detached signatures to work
+ * properly. The only user of this function is also checking
+ * for CS_VALID.
*/
int
{
struct cs_blob *csblob;
+ if ((p->p_csflags & CS_SIGNED) == 0) {
+ return NULL;
+ }
+
if (NULL == p->p_textvp)
return NULL;
}
}
if (px_persona->pspi_flags & POSIX_SPAWN_PERSONA_GROUPS) {
- int ngroups = 0;
+ unsigned ngroups = 0;
gid_t groups[NGROUPS_MAX];
if (persona_get_groups(persona, &ngroups, groups,
error = EINVAL;
goto out;
}
- if (ngroups != (int)px_persona->pspi_ngroups) {
+ if (ngroups != px_persona->pspi_ngroups) {
error = EINVAL;
goto out;
}
str[CS_CDHASH_STRING_SIZE - 1] = 0;
}
-/*
- * If the process is not signed or if it contains entitlements, we
- * need to communicate through the task_access_port to taskgated.
- *
- * taskgated will provide a detached code signature if present, and
- * will enforce any restrictions on entitlements.
- */
-
-static boolean_t
-taskgated_required(proc_t p, boolean_t *require_success)
-{
- size_t length;
- void *blob;
- int error;
-
- if (cs_debug > 2)
- csvnode_print_debug(p->p_textvp);
-
-#if !CONFIG_EMBEDDED
- const int can_skip_taskgated = csproc_get_platform_binary(p) && !csproc_get_platform_path(p);
-#else
- const int can_skip_taskgated = csproc_get_platform_binary(p);
-#endif
- if (can_skip_taskgated) {
- if (cs_debug) printf("taskgated not required for: %s\n", p->p_name);
- *require_success = FALSE;
- return FALSE;
- }
-
- if ((p->p_csflags & CS_VALID) == 0) {
- *require_success = FALSE;
- return TRUE;
- }
-
- error = cs_entitlements_blob_get(p, &blob, &length);
- if (error == 0 && blob != NULL) {
-#if !CONFIG_EMBEDDED
- /*
- * fatal on the desktop when entitlements are present,
- * unless we started in single-user mode
- */
- if ((boothowto & RB_SINGLE) == 0)
- *require_success = TRUE;
- /*
- * Allow initproc to run without causing taskgated to launch
- */
- if (p == initproc) {
- *require_success = FALSE;
- return FALSE;
- }
-
-#endif
- if (cs_debug) printf("taskgated required for: %s\n", p->p_name);
-
- return TRUE;
- }
-
- *require_success = FALSE;
- return FALSE;
-}
-
/*
* __EXEC_WAITING_ON_TASKGATED_CODE_SIGNATURE_UPCALL__
*
kern_return_t kr = KERN_FAILURE;
int error = EACCES;
boolean_t unexpected_failure = FALSE;
- unsigned char hash[CS_CDHASH_LEN];
+ struct cs_blob *csb;
boolean_t require_success = FALSE;
int spawn = (imgp->ip_flags & IMGPF_SPAWN);
int vfexec = (imgp->ip_flags & IMGPF_VFORK_EXEC);
goto done;
}
- /* check if callout to taskgated is needed */
- if (!taskgated_required(p, &require_success)) {
+ /* If the code signature came through the image activation path, we skip the
+ * taskgated / externally attached path. */
+ if (imgp->ip_csflags & CS_SIGNED) {
error = 0;
goto done;
}
+ /* The rest of the code is for signatures that either already have been externally
+ * attached (likely, but not necessarily by a previous run through the taskgated
+ * path), or that will now be attached by taskgated. */
+
kr = task_get_task_access_port(p->task, &port);
if (KERN_SUCCESS != kr || !IPC_PORT_VALID(port)) {
error = 0;
/* Only do this if exec_resettextvp() did not fail */
if (p->p_textvp != NULLVP) {
- /*
- * If there's a new code directory, mark this process
- * as signed.
- */
- if (0 == ubc_cs_getcdhash(p->p_textvp, p->p_textoff, hash)) {
- proc_lock(p);
- p->p_csflags |= CS_VALID;
- proc_unlock(p);
+ csb = ubc_cs_blob_get(p->p_textvp, -1, p->p_textoff);
+
+ if (csb != NULL) {
+ /* As the enforcement we can do here is very limited, we only allow things that
+ * are the only reason why this code path still exists:
+ * Adhoc signed non-platform binaries without special cs_flags and without any
+ * entitlements (unrestricted ones still pass AMFI). */
+ if (
+ /* Revalidate the blob if necessary through bumped generation count. */
+ (ubc_cs_generation_check(p->p_textvp) == 0 ||
+ ubc_cs_blob_revalidate(p->p_textvp, csb, imgp, 0) == 0) &&
+ /* Only CS_ADHOC, no CS_KILL, CS_HARD etc. */
+ (csb->csb_flags & CS_ALLOWED_MACHO) == CS_ADHOC &&
+ /* If it has a CMS blob, it's not adhoc. The CS_ADHOC flag can lie. */
+ csblob_find_blob_bytes((const uint8_t *)csb->csb_mem_kaddr, csb->csb_mem_size,
+ CSSLOT_SIGNATURESLOT,
+ CSMAGIC_BLOBWRAPPER) == NULL &&
+ /* It could still be in a trust cache (unlikely with CS_ADHOC), or a magic path. */
+ csb->csb_platform_binary == 0 &&
+ /* No entitlements, not even unrestricted ones. */
+ csb->csb_entitlements_blob == NULL) {
+
+ proc_lock(p);
+ p->p_csflags |= CS_SIGNED | CS_VALID;
+ proc_unlock(p);
+
+ } else {
+ uint8_t cdhash[CS_CDHASH_LEN];
+ char cdhash_string[CS_CDHASH_STRING_SIZE];
+ proc_getcdhash(p, cdhash);
+ cdhash_to_string(cdhash_string, cdhash);
+ printf("ignoring detached code signature on '%s' with cdhash '%s' "
+ "because it is invalid, or not a simple adhoc signature.\n",
+ p->p_name, cdhash_string);
+ }
+
}
}
error = ENOMEM;
} else {
if (IS_64BIT_PROCESS(q)) {
- struct user64_rusage my_rusage;
+ struct user64_rusage my_rusage = {};
munge_user64_rusage(&p->p_ru->ru, &my_rusage);
error = copyout((caddr_t)&my_rusage,
uap->rusage,
sizeof (my_rusage));
}
else {
- struct user32_rusage my_rusage;
+ struct user32_rusage my_rusage = {};
munge_user32_rusage(&p->p_ru->ru, &my_rusage);
error = copyout((caddr_t)&my_rusage,
uap->rusage,
req_vec_size_pages = (end - addr) >> PAGE_SHIFT;
cur_vec_size_pages = MIN(req_vec_size_pages, (int)(MAX_PAGE_RANGE_QUERY >> PAGE_SHIFT));
- kernel_vec = (void*) _MALLOC(cur_vec_size_pages * sizeof(char), M_TEMP, M_WAITOK);
+ kernel_vec = (void*) _MALLOC(cur_vec_size_pages * sizeof(char), M_TEMP, M_WAITOK | M_ZERO);
if (kernel_vec == NULL) {
return (ENOMEM);
NTP_UNLOCK(enable);
if (IS_64BIT_PROCESS(p)) {
- struct user64_ntptimeval user_ntv;
+ struct user64_ntptimeval user_ntv = {};
user_ntv.time.tv_sec = ntv.time.tv_sec;
user_ntv.time.tv_nsec = ntv.time.tv_nsec;
user_ntv.maxerror = ntv.maxerror;
user_ntv.time_state = ntv.time_state;
error = copyout(&user_ntv, uap->ntvp, sizeof(user_ntv));
} else {
- struct user32_ntptimeval user_ntv;
+ struct user32_ntptimeval user_ntv = {};
user_ntv.time.tv_sec = ntv.time.tv_sec;
user_ntv.time.tv_nsec = ntv.time.tv_nsec;
user_ntv.maxerror = ntv.maxerror;
* returned only by ntp_gettime();
*/
if (IS_64BIT_PROCESS(p)) {
- struct user64_timex user_ntv;
+ struct user64_timex user_ntv = {};
if (time_status & STA_NANO)
user_ntv.offset = L_GINT(time_offset);
}
else{
- struct user32_timex user_ntv;
+ struct user32_timex user_ntv = {};
if (time_status & STA_NANO)
user_ntv.offset = L_GINT(time_offset);
if (uap->olddelta) {
if (IS_64BIT_PROCESS(p)) {
- struct user64_timeval user_atv;
+ struct user64_timeval user_atv = {};
user_atv.tv_sec = atv.tv_sec;
user_atv.tv_usec = atv.tv_usec;
error = copyout(&user_atv, uap->olddelta, sizeof(user_atv));
} else {
- struct user32_timeval user_atv;
+ struct user32_timeval user_atv = {};
user_atv.tv_sec = atv.tv_sec;
user_atv.tv_usec = atv.tv_usec;
error = copyout(&user_atv, uap->olddelta, sizeof(user_atv));
return gid;
}
-int persona_set_groups(struct persona *persona, gid_t *groups, int ngroups, uid_t gmuid)
+int persona_set_groups(struct persona *persona, gid_t *groups, unsigned ngroups, uid_t gmuid)
{
int ret = 0;
kauth_cred_t my_cred, new_cred;
my_cred = persona->pna_cred;
kauth_cred_ref(my_cred);
- new_cred = kauth_cred_setgroups(my_cred, groups, ngroups, gmuid);
+ new_cred = kauth_cred_setgroups(my_cred, groups, (int)ngroups, gmuid);
if (new_cred != my_cred)
persona->pna_cred = new_cred;
kauth_cred_unref(&my_cred);
return ret;
}
-int persona_get_groups(struct persona *persona, int *ngroups, gid_t *groups, int groups_sz)
+int persona_get_groups(struct persona *persona, unsigned *ngroups, gid_t *groups, unsigned groups_sz)
{
int ret = EINVAL;
- if (!persona || !persona->pna_cred || !groups || !ngroups)
+ if (!persona || !persona->pna_cred || !groups || !ngroups || groups_sz > NGROUPS)
return EINVAL;
*ngroups = groups_sz;
persona_lock(persona);
if (persona_valid(persona)) {
- kauth_cred_getgroups(persona->pna_cred, groups, ngroups);
+ int kauth_ngroups = (int)groups_sz;
+ kauth_cred_getgroups(persona->pna_cred, groups, &kauth_ngroups);
+ *ngroups = (unsigned)kauth_ngroups;
ret = 0;
}
persona_unlock(persona);
#include <sys/bsdtask_info.h>
#include <sys/persona.h>
+#ifdef CONFIG_32BIT_TELEMETRY
+#include <sys/kasl.h>
+#endif /* CONFIG_32BIT_TELEMETRY */
+
#if CONFIG_CSR
#include <sys/csr.h>
#endif
#include <libkern/crypto/sha1.h>
+#ifdef CONFIG_32BIT_TELEMETRY
+#define MAX_32BIT_EXEC_SIG_SIZE 160
+#endif /* CONFIG_32BIT_TELEMETRY */
+
/*
* Structure associated with user cacheing.
*/
if (vnode_getwithref(tvp) == 0) {
return tvp;
}
- }
+ }
return NULLVP;
}
uthread_t uth = (uthread_t)uthread_v;
return (uth != NULL) ? uth->uu_threadlist : NULL;
}
+
+#ifdef CONFIG_32BIT_TELEMETRY
+void
+proc_log_32bit_telemetry(proc_t p)
+{
+ /* Gather info */
+ char signature_buf[MAX_32BIT_EXEC_SIG_SIZE] = { 0 };
+ char * signature_cur_end = &signature_buf[0];
+ char * signature_buf_end = &signature_buf[MAX_32BIT_EXEC_SIG_SIZE - 1];
+ int bytes_printed = 0;
+
+ const char * teamid = NULL;
+ const char * identity = NULL;
+ struct cs_blob * csblob = NULL;
+
+ proc_list_lock();
+
+ /*
+ * Get proc name and parent proc name; if the parent execs, we'll get a
+ * garbled name.
+ */
+ bytes_printed = snprintf(signature_cur_end,
+ signature_buf_end - signature_cur_end,
+ "%s,%s,", p->p_name,
+ (p->p_pptr ? p->p_pptr->p_name : ""));
+
+ if (bytes_printed > 0) {
+ signature_cur_end += bytes_printed;
+ }
+
+ proc_list_unlock();
+
+ /* Get developer info. */
+ vnode_t v = proc_getexecutablevnode(p);
+
+ if (v) {
+ csblob = csvnode_get_blob(v, 0);
+
+ if (csblob) {
+ teamid = csblob_get_teamid(csblob);
+ identity = csblob_get_identity(csblob);
+ }
+ }
+
+ if (teamid == NULL) {
+ teamid = "";
+ }
+
+ if (identity == NULL) {
+ identity = "";
+ }
+
+ bytes_printed = snprintf(signature_cur_end,
+ signature_buf_end - signature_cur_end,
+ "%s,%s", teamid, identity);
+
+ if (bytes_printed > 0) {
+ signature_cur_end += bytes_printed;
+ }
+
+ if (v) {
+ vnode_put(v);
+ }
+
+ /*
+ * We may want to rate limit here, although the SUMMARIZE key should
+ * help us aggregate events in userspace.
+ */
+
+ /* Emit log */
+ kern_asl_msg(LOG_DEBUG, "messagetracer", 3,
+ /* 0 */ "com.apple.message.domain", "com.apple.kernel.32bit_exec",
+ /* 1 */ "com.apple.message.signature", signature_buf,
+ /* 2 */ "com.apple.message.summarize", "YES",
+ NULL);
+}
+#endif /* CONFIG_32BIT_TELEMETRY */
int
getrlimit(struct proc *p, struct getrlimit_args *uap, __unused int32_t *retval)
{
- struct rlimit lim;
+ struct rlimit lim = {};
/*
* Take out flag now in case we need to use it to trigger variant
}
proc_unlock(p);
+#ifdef CONFIG_32BIT_TELEMETRY
+ if (task_consume_32bit_log_flag(p->task)) {
+ proc_log_32bit_telemetry(p);
+ }
+#endif /* CONFIG_32BIT_TELEMETRY */
+
if (!bsd_init_done) {
bsd_init_done = 1;
bsdinit_task();
}
-
}
/* ptrace set runnable */
#include <machine/exec.h>
#include <machine/pal_routines.h>
+#include <kern/ast.h>
#include <kern/kern_types.h>
#include <kern/cpu_number.h>
#include <kern/mach_loader.h>
task_rollup_accounting_info(get_threadtask(thread), task);
}
*mapp = map;
+
+#ifdef CONFIG_32BIT_TELEMETRY
+ if (!result->is64bit) {
+ /*
+ * This may not need to be an AST; we merely need to ensure that
+ * we gather telemetry at the point where all of the information
+ * that we want has been added to the process.
+ */
+ task_set_32bit_log_flag(get_threadtask(thread));
+ act_set_astbsd(thread);
+ }
+#endif /* CONFIG_32BIT_TELEMETRY */
+
return(LOAD_SUCCESS);
}
break;
}
- if (ret == LOAD_SUCCESS) {
- if (! got_code_signatures) {
- if (cs_enforcement(NULL)) {
- ret = LOAD_FAILURE;
- } else {
-#if !CONFIG_EMBEDDED
- /*
- * No embedded signatures: look for detached by taskgated,
- * this is only done on OSX, on embedded platforms we expect everything
- * to be have embedded signatures.
- */
- struct cs_blob *blob;
-
- blob = ubc_cs_blob_get(vp, -1, file_offset);
- if (blob != NULL) {
- unsigned int cs_flag_data = blob->csb_flags;
- if(0 != ubc_cs_generation_check(vp)) {
- if (0 != ubc_cs_blob_revalidate(vp, blob, imgp, 0)) {
- /* clear out the flag data if revalidation fails */
- cs_flag_data = 0;
- result->csflags &= ~CS_VALID;
- }
- }
- /* get flags to be applied to the process */
- result->csflags |= cs_flag_data;
- }
-#endif
- }
+ if (ret == LOAD_SUCCESS) {
+ if(!got_code_signatures && cs_enforcement(NULL)) {
+ ret = LOAD_FAILURE;
}
/* Make sure if we need dyld, we got it */
return rv;
}
-#if (MAC_POLICY_OPS_VERSION != 52)
+#if (MAC_POLICY_OPS_VERSION != 53)
# error "struct mac_policy_ops doesn't match definition in mac_policy.h"
#endif
/*
CHECK_SET_HOOK(exc_action_label_init)
CHECK_SET_HOOK(exc_action_label_update)
+ CHECK_SET_HOOK(vnode_check_trigger_resolve)
.mpo_reserved1 = (mpo_reserved_hook_t *)common_hook,
.mpo_reserved2 = (mpo_reserved_hook_t *)common_hook,
.mpo_reserved3 = (mpo_reserved_hook_t *)common_hook,
- .mpo_reserved4 = (mpo_reserved_hook_t *)common_hook,
CHECK_SET_HOOK(skywalk_flow_check_connect)
CHECK_SET_HOOK(skywalk_flow_check_listen)
coalition_info_resource_usage(coalition_t coal, user_addr_t buffer, user_size_t bufsize)
{
kern_return_t kr;
- struct coalition_resource_usage cru;
+ struct coalition_resource_usage cru = {};
kr = coalition_resource_usage_internal(coal, &cru);
kinfo.persona_id = persona->pna_id;
kinfo.persona_type = persona->pna_type;
kinfo.persona_gid = persona_get_gid(persona);
- int ngroups = 0;
+ unsigned ngroups = 0;
persona_get_groups(persona, &ngroups, kinfo.persona_groups, NGROUPS);
kinfo.persona_ngroups = ngroups;
kinfo.persona_gmuid = persona_get_gmuid(persona);
kinfo.persona_id = persona->pna_id;
kinfo.persona_type = persona->pna_type;
kinfo.persona_gid = persona_get_gid(persona);
- int ngroups = 0;
+ unsigned ngroups = 0;
persona_get_groups(persona, &ngroups, kinfo.persona_groups, NGROUPS);
kinfo.persona_ngroups = ngroups;
kinfo.persona_gmuid = persona_get_gmuid(persona);
goto integer;
case SO_NP_EXTENSIONS: {
- struct so_np_extensions sonpx;
+ struct so_np_extensions sonpx = {};
sonpx.npx_flags = (so->so_flags & SOF_NPX_SETOPTSHUT) ?
SONPX_SETOPTSHUT : 0;
* different size for 32 bits and 64 bits processes
*/
if (cp->cmsg_level == SOL_SOCKET && cp->cmsg_type == SCM_TIMESTAMP) {
- unsigned char tmp_buffer[CMSG_SPACE(sizeof(struct user64_timeval))];
+ unsigned char tmp_buffer[CMSG_SPACE(sizeof(struct user64_timeval))] = {};
struct cmsghdr *tmp_cp = (struct cmsghdr *)(void *)tmp_buffer;
int tmp_space;
struct timeval *tv = (struct timeval *)(void *)CMSG_DATA(cp);
case SIOCGIFORDER: { /* struct if_order */
struct if_order *ifo = (struct if_order *)(void *)data;
-
- u_int32_t ordered_count = if_ordered_count;
+ u_int32_t ordered_count = *((volatile u_int32_t *)&if_ordered_count);
if (ifo->ifo_count == 0 ||
ordered_count == 0) {
- ifo->ifo_count = ordered_count;
+ ifo->ifo_count = 0;
} else if (ifo->ifo_ordered_indices != USER_ADDR_NULL) {
u_int32_t count_to_copy =
MIN(ordered_count, ifo->ifo_count);
struct ifnet *ifp = NULL;
u_int32_t cursor = 0;
- ordered_indices = _MALLOC(length, M_NECP, M_WAITOK);
+ ordered_indices = _MALLOC(length, M_NECP, M_WAITOK | M_ZERO);
if (ordered_indices == NULL) {
error = ENOMEM;
break;
ifnet_head_lock_shared();
TAILQ_FOREACH(ifp, &ifnet_ordered_head, if_ordered_link) {
- if (cursor >= count_to_copy) {
+ if (cursor >= count_to_copy ||
+ cursor >= if_ordered_count) {
break;
}
ordered_indices[cursor] = ifp->if_index;
}
ifnet_head_done();
- ifo->ifo_count = count_to_copy;
+ /* We might have parsed less than the original length
+ * because the list could have changed.
+ */
+ length = cursor * sizeof(u_int32_t);
+ ifo->ifo_count = cursor;
error = copyout(ordered_indices,
ifo->ifo_ordered_indices, length);
} else {
unsigned int namelen;
uint32_t ifindex;
struct if_llreach *lr;
- struct if_llreach_info lri;
+ struct if_llreach_info lri = {};
struct ifnet *ifp;
name = (int *)arg1;
LIST_FOREACH(flow, &client->flow_list, flow_chain) {
if (flow->nexus || (flow->socket && flow->assigned)) {
// Write TLV headers
- struct necp_client_nexus_flow_header header;
+ struct necp_client_nexus_flow_header header = {};
u_int32_t length = 0;
u_int32_t flags = 0;
u_int8_t tfo_cookie_len = 0;
/* Read the protocol event and reset it */
if (flow->has_protoctl_event) {
- struct necp_client_flow_protoctl_event_header protoctl_event_header;
+ struct necp_client_flow_protoctl_event_header protoctl_event_header = {};
type = NECP_CLIENT_RESULT_PROTO_CTL_EVENT;
length = sizeof(protoctl_event_header.protoctl_event);
{
#pragma unused(retval)
u_int8_t *parameters = NULL;
- struct necp_aggregate_result returned_result;
+ struct necp_aggregate_result returned_result = {};
int error = 0;
if (uap == NULL) {
break;
}
- pstore = _MALLOC(sizeof (*pstore), M_TEMP, M_WAITOK);
+ pstore = _MALLOC(sizeof (*pstore), M_TEMP, M_WAITOK | M_ZERO);
if (pstore == NULL) {
error = ENOMEM;
break;
if (info.rti_info[RTAX_GATEWAY] != NULL &&
info.rti_info[RTAX_GATEWAY]->sa_family == AF_INET)
sin_set_ifscope(info.rti_info[RTAX_GATEWAY], IFSCOPE_NONE);
-
switch (rtm->rtm_type) {
case RTM_ADD:
if (info.rti_info[RTAX_GATEWAY] == NULL)
len = rt_msg2(rtm->rtm_type, &info, NULL, NULL, &cred);
if (ifa2 != NULL)
IFA_UNLOCK(ifa2);
- if (len > rtm->rtm_msglen) {
- struct rt_msghdr *new_rtm;
- R_Malloc(new_rtm, struct rt_msghdr *, len);
- if (new_rtm == NULL) {
- RT_UNLOCK(rt);
- if (ifa2 != NULL)
- IFA_REMREF(ifa2);
- senderr(ENOBUFS);
- }
- Bcopy(rtm, new_rtm, rtm->rtm_msglen);
- R_Free(rtm); rtm = new_rtm;
+ struct rt_msghdr *out_rtm;
+ R_Malloc(out_rtm, struct rt_msghdr *, len);
+ if (out_rtm == NULL) {
+ RT_UNLOCK(rt);
+ if (ifa2 != NULL)
+ IFA_REMREF(ifa2);
+ senderr(ENOBUFS);
}
+ Bcopy(rtm, out_rtm, sizeof(struct rt_msghdr));
if (ifa2 != NULL)
IFA_LOCK(ifa2);
- (void) rt_msg2(rtm->rtm_type, &info, (caddr_t)rtm,
+ (void) rt_msg2(out_rtm->rtm_type, &info, (caddr_t)out_rtm,
NULL, &cred);
if (ifa2 != NULL)
IFA_UNLOCK(ifa2);
+ R_Free(rtm);
+ rtm = out_rtm;
rtm->rtm_flags = rt->rt_flags;
rt_getmetrics(rt, &rtm->rtm_rmx);
rtm->rtm_addrs = info.rti_addrs;
static int
inp_get_source_filters(struct inpcb *inp, struct sockopt *sopt)
{
- struct __msfilterreq64 msfr, msfr64;
+ struct __msfilterreq64 msfr = {}, msfr64;
struct __msfilterreq32 msfr32;
struct sockaddr_in *gsa;
struct ifnet *ifp;
static int
inp_set_source_filters(struct inpcb *inp, struct sockopt *sopt)
{
- struct __msfilterreq64 msfr, msfr64;
+ struct __msfilterreq64 msfr = {}, msfr64;
struct __msfilterreq32 msfr32;
struct sockaddr_in *gsa;
struct ifnet *ifp;
{
#pragma unused(oidp)
- struct in_addr src, group;
+ struct in_addr src = {}, group;
struct ifnet *ifp;
struct in_multi *inm;
struct in_multistep step;
const size_t max_netsvctype_to_dscp_map_len =
_NET_SERVICE_TYPE_COUNT * sizeof(struct netsvctype_dscp_map);
size_t len;
- struct netsvctype_dscp_map netsvctype_dscp_map[_NET_SERVICE_TYPE_COUNT];
+ struct netsvctype_dscp_map netsvctype_dscp_map[_NET_SERVICE_TYPE_COUNT] = {};
size_t count;
if (req->oldptr == USER_ADDR_NULL) {
#pragma unused(oidp, arg1, arg2)
int error = 0;
size_t len = DSCP_ARRAY_SIZE * sizeof(struct netsvctype_dscp_map);
- struct netsvctype_dscp_map netsvctype_dscp_map[DSCP_ARRAY_SIZE];
+ struct netsvctype_dscp_map netsvctype_dscp_map[DSCP_ARRAY_SIZE] = {};
struct dcsp_msc_map dcsp_msc_map[DSCP_ARRAY_SIZE];
size_t count;
u_int32_t i;
if (!error && sopt->sopt_dir == SOPT_GET) {
/* convert back if necessary and copyout */
if (api_version == IP_FW_VERSION_0) {
- struct ip_old_fw rule_vers0;
+ struct ip_old_fw rule_vers0 = {};
ipfw_convert_from_latest(rule, &rule_vers0, api_version, is64user);
sopt->sopt_valsize = sizeof(struct ip_old_fw);
error = sooptcopyout(sopt, &rule_vers0, sizeof(struct ip_old_fw));
} else if (api_version == IP_FW_VERSION_1) {
- struct ip_fw_compat rule_vers1;
+ struct ip_fw_compat rule_vers1 = {};
ipfw_convert_from_latest(rule, &rule_vers1, api_version, is64user);
sopt->sopt_valsize = sizeof(struct ip_fw_compat);
error = sooptcopyout(sopt, &rule_vers1, sizeof(struct ip_fw_compat));
} else {
char *userrule;
- userrule = _MALLOC(savedsopt_valsize, M_TEMP, M_WAITOK);
+ userrule = _MALLOC(savedsopt_valsize, M_TEMP, M_WAITOK | M_ZERO);
if ( userrule == NULL )
userrule = (char*)rule;
if (proc_is64bit(sopt->sopt_p)){
static int
mptcp_getopt(struct mptses *mpte, struct sockopt *sopt)
{
- int error = 0, optval;
+ int error = 0, optval = 0;
VERIFY(sopt->sopt_dir == SOPT_GET);
mpte_lock_assert_held(mpte); /* same as MP socket lock */
tcp_sysctl_info(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
{
int error;
- struct tcp_info ti;
+ struct tcp_info ti = {};
struct info_tuple itpl;
#if !CONFIG_EMBEDDED
proc_t caller = PROC_NULL;
int
tcp_ctloutput(struct socket *so, struct sockopt *sopt)
{
- int error, opt, optval;
+ int error = 0, opt = 0, optval = 0;
struct inpcb *inp;
struct tcpcb *tp;
- error = 0;
inp = sotoinpcb(so);
if (inp == NULL) {
return (ECONNRESET);
goto done;
}
case TCP_MEASURE_BW_BURST: {
- struct tcp_measure_bw_burst out;
+ struct tcp_measure_bw_burst out = {};
if ((tp->t_flagsext & TF_MEASURESNDBW) == 0 ||
tp->t_bwmeas == NULL) {
error = EINVAL;
int
udp_ctloutput(struct socket *so, struct sockopt *sopt)
{
- int error, optval;
+ int error = 0, optval = 0;
struct inpcb *inp;
/* Allow <SOL_SOCKET,SO_FLUSH> at this level */
!(sopt->sopt_level == SOL_SOCKET && sopt->sopt_name == SO_FLUSH))
return (ip_ctloutput(so, sopt));
- error = 0;
inp = sotoinpcb(so);
switch (sopt->sopt_dir) {
static int
in6p_get_source_filters(struct inpcb *inp, struct sockopt *sopt)
{
- struct __msfilterreq64 msfr, msfr64;
+ struct __msfilterreq64 msfr = {}, msfr64;
struct __msfilterreq32 msfr32;
struct sockaddr_in6 *gsa;
struct ifnet *ifp;
static int
in6p_set_source_filters(struct inpcb *inp, struct sockopt *sopt)
{
- struct __msfilterreq64 msfr, msfr64;
+ struct __msfilterreq64 msfr = {}, msfr64;
struct __msfilterreq32 msfr32;
struct sockaddr_in6 *gsa;
struct ifnet *ifp;
/* Lookup in the export list first. */
if (nam != NULL) {
saddr = mbuf_data(nam);
+ if (saddr->sa_family > AF_MAX) {
+ /* Bogus sockaddr? Don't match anything. */
+ return (NULL);
+ }
rnh = nx->nx_rtable[saddr->sa_family];
if (rnh != NULL) {
no = (struct nfs_netopt *)
struct nfs_exportfs *nxfs;
struct nfs_export *nx;
struct nfs_active_user_list *ulist;
- struct nfs_export_stat_desc stat_desc;
+ struct nfs_export_stat_desc stat_desc = {};
struct nfs_export_stat_rec statrec;
struct nfs_user_stat_node *unode, *unode_next;
- struct nfs_user_stat_desc ustat_desc;
+ struct nfs_user_stat_desc ustat_desc = {};
struct nfs_user_stat_user_rec ustat_rec;
struct nfs_user_stat_path_rec upath_rec;
uint bytes_avail, bytes_total, recs_copied;
int persona_set_gid(struct persona *persona, gid_t gid);
gid_t persona_get_gid(struct persona *persona);
-int persona_set_groups(struct persona *persona, gid_t *groups, int ngroups, uid_t gmuid);
-int persona_get_groups(struct persona *persona, int *ngroups, gid_t *groups, int groups_sz);
+int persona_set_groups(struct persona *persona, gid_t *groups, unsigned ngroups, uid_t gmuid);
+int persona_get_groups(struct persona *persona, unsigned *ngroups, gid_t *groups, unsigned groups_sz);
uid_t persona_get_gmuid(struct persona *persona);
extern void proc_coalitionids(proc_t, uint64_t [COALITION_NUM_TYPES]);
+#ifdef CONFIG_32BIT_TELEMETRY
+extern void proc_log_32bit_telemetry(proc_t p);
+#endif /* CONFIG_32BIT_TELEMETRY */
+
#endif /* XNU_KERNEL_PRIVATE*/
#ifdef KERNEL_PRIVATE
io_size += start_offset;
- if ((upl_f_offset + io_size) >= newEOF && (u_int)io_size < upl_size) {
+ if (newEOF >= oldEOF && (upl_f_offset + io_size) >= newEOF && (u_int)io_size < upl_size) {
/*
* if we're extending the file with this write
* we'll zero fill the rest of the page so that
if (space < req->oldlen)
return (ENOMEM);
- MALLOC(fsidlst, fsid_t *, req->oldlen, M_TEMP, M_WAITOK);
+ MALLOC(fsidlst, fsid_t *, req->oldlen, M_TEMP, M_WAITOK | M_ZERO);
if (fsidlst == NULL) {
return (ENOMEM);
}
{
int *name, namelen;
struct vfstable *vfsp;
- struct vfsconf vfsc;
+ struct vfsconf vfsc = {};
(void)oidp;
name = arg1;
lck_mtx_unlock(&rp->vr_lock);
+#if CONFIG_MACF
+ int rv = mac_vnode_check_trigger_resolve(ctx, vp, &ndp->ni_cnd);
+ if (rv != 0)
+ return rv;
+#endif
+
/*
* XXX
* assumes that resolver will not access this trigger vnode (otherwise the kernel will deadlock)
union {
struct stat sb;
struct stat64 sb64;
- } source;
+ } source = {};
union {
struct user64_stat user64_sb;
struct user32_stat user32_sb;
struct user64_stat64 user64_sb64;
struct user32_stat64 user32_sb64;
- } dest;
+ } dest = {};
caddr_t sbp;
int error, my_size;
kauth_filesec_t fsec;
_mac_iokit_check_nvram_get
_mac_iokit_check_nvram_set
+_mac_vnode_check_trigger_resolve
+
_sbuf_cat
_sbuf_data
_sbuf_delete
#
options COPYOUT_SHIM # Shim for copyout memory analysis via kext #<copyout_shim>
+
+#
+# Telemetry for 32-bit process launch
+#
+options CONFIG_32BIT_TELEMETRY # # <config_32bit_telemetry>
# KERNEL_RELEASE = [ KERNEL_BASE ]
# KERNEL_DEV = [ KERNEL_BASE development mach_assert config_xnupost proc_ref_debug os_reason_debug ]
# KERNEL_DEBUG = [ KERNEL_BASE debug mach_assert config_xnupost config_ltable_stats config_ltable_debug config_waitq_stats config_waitq_debug ]
-# BSD_BASE = [ mach_bsd sysv_sem sysv_msg sysv_shm config_imageboot config_workqueue psynch config_proc_uuid_policy config_coredump pgo ]
+# BSD_BASE = [ mach_bsd sysv_sem sysv_msg sysv_shm config_imageboot config_workqueue psynch config_proc_uuid_policy config_coredump pgo config_32bit_telemetry ]
# BSD_RELEASE = [ BSD_BASE ]
# BSD_DEV = [ BSD_BASE config_vnguard ]
# BSD_DEBUG = [ BSD_BASE config_vnguard ]
-17.3.0
+17.4.0
# The first line of this file contains the master version number for the kernel.
# All other instances of the kernel version in xnu are derived from this file.
UInt8 __reservedA[1];
volatile SInt32 __ipc;
queue_head_t owners;
+ IOLock * lock;
#if __LP64__
- void * __reserved[5];
-#else
void * __reserved[4];
+#else
+ void * __reserved[3];
#endif
#else /* XNU_KERNEL_PRIVATE */
if( (client = OSDynamicCast( IOUserClient, obj )))
{
IOStatisticsClientCall();
+ IOLockLock(client->lock);
client->clientDied();
+ IOLockUnlock(client->lock);
}
}
else if( IKOT_IOKIT_OBJECT == type)
void IOUserClient::free()
{
if( mappings) mappings->release();
+ if (lock) IOLockFree(lock);
IOStatisticsUnregisterCounter();
client->sharedInstance = (0 != client->getProperty(kIOUserClientSharedInstanceKey));
client->closed = false;
+ client->lock = IOLockAlloc();
disallowAccess = (crossEndian
&& (kOSBooleanTrue != service->getProperty(kIOUserClientCrossEndianCompatibleKey))
if (client->sharedInstance || OSCompareAndSwap8(0, 1, &client->closed))
{
+ IOLockLock(client->lock);
client->clientClose();
+ IOLockUnlock(client->lock);
}
else
{
mach_port_t port,
uint32_t reference)
{
+ kern_return_t ret;
CHECK( IOUserClient, connection, client );
IOStatisticsClientCall();
- return( client->registerNotificationPort( port, notification_type,
- (io_user_reference_t) reference ));
+ IOLockLock(client->lock);
+ ret = client->registerNotificationPort( port, notification_type,
+ (io_user_reference_t) reference );
+ IOLockUnlock(client->lock);
+ return (ret);
}
/* Routine io_connect_set_notification_port */
mach_port_t port,
io_user_reference_t reference)
{
+ kern_return_t ret;
CHECK( IOUserClient, connection, client );
IOStatisticsClientCall();
- return( client->registerNotificationPort( port, notification_type,
- reference ));
+ IOLockLock(client->lock);
+ ret = client->registerNotificationPort( port, notification_type,
+ reference );
+ IOLockUnlock(client->lock);
+ return (ret);
}
/* Routine io_connect_map_memory_into_task */
if (gidarray == NULL)
return EINVAL;
- if (ngroups > NGROUPS)
+ if (ngroups > NGROUPS || ngroups < 0)
return EINVAL;
psattr = *(_posix_spawnattr_t *)attr;
struct mach_timebase_info_trap_args *args)
{
mach_vm_address_t out_info_addr = args->info;
- mach_timebase_info_data_t info;
+ mach_timebase_info_data_t info = {};
clock_timebase_info(&info);
int sleep_nsec = args->sleep_nsec;
mach_vm_address_t wakeup_time_addr = args->wakeup_time;
clock_t clock;
- mach_timespec_t swtime;
+ mach_timespec_t swtime = {};
kern_return_t rvalue;
/*
thread_bootstrap_return();
}
+#ifdef CONFIG_32BIT_TELEMETRY
+boolean_t
+task_consume_32bit_log_flag(task_t task)
+{
+ if ((task->t_procflags & TPF_LOG_32BIT_TELEMETRY) != 0) {
+ task->t_procflags &= ~TPF_LOG_32BIT_TELEMETRY;
+ return TRUE;
+ } else {
+ return FALSE;
+ }
+}
+
+void
+task_set_32bit_log_flag(task_t task)
+{
+ task->t_procflags |= TPF_LOG_32BIT_TELEMETRY;
+}
+#endif /* CONFIG_32BIT_TELEMETRY */
+
boolean_t
task_is_exec_copy(task_t task)
{
#define TPF_NONE 0
#define TPF_DID_EXEC 0x00000001 /* task has been execed to a new task */
#define TPF_EXEC_COPY 0x00000002 /* task is the new copy of an exec */
+#ifdef CONFIG_32BIT_TELEMETRY
+#define TPF_LOG_32BIT_TELEMETRY 0x00000004 /* task should log identifying information */
+#endif
#define task_did_exec_internal(task) \
(((task)->t_procflags & TPF_DID_EXEC) != 0)
extern void task_clear_exec_copy_flag(task_t task);
extern boolean_t task_is_exec_copy(task_t);
extern boolean_t task_did_exec(task_t task);
+#ifdef CONFIG_32BIT_TELEMETRY
+extern boolean_t task_consume_32bit_log_flag(task_t task);
+extern void task_set_32bit_log_flag(task_t task);
+#endif /* CONFIG_32BIT_TELEMETRY */
extern boolean_t task_is_active(task_t task);
extern boolean_t task_is_halting(task_t task);
extern void task_clear_return_wait(task_t task);
return 0;
}
+int mac_vnode_check_trigger_resolve(vfs_context_t ctx __unused, struct vnode *dvp __unused, struct componentname *cnp __unused);
+int mac_vnode_check_trigger_resolve(vfs_context_t ctx __unused, struct vnode *dvp __unused, struct componentname *cnp __unused)
+{
+ return 0;
+}
+
#endif /* !MAC */
int mac_vnode_check_setutimes(vfs_context_t ctx, struct vnode *vp,
struct timespec atime, struct timespec mtime);
int mac_vnode_check_signature(struct vnode *vp,
- struct cs_blob *cs_blob, struct image_params *imgp,
- unsigned int *cs_flags, unsigned int *signer_type,
- int flags);
+ struct cs_blob *cs_blob, struct image_params *imgp,
+ unsigned int *cs_flags, unsigned int *signer_type,
+ int flags);
int mac_vnode_check_stat(vfs_context_t ctx,
kauth_cred_t file_cred, struct vnode *vp);
+int mac_vnode_check_trigger_resolve(vfs_context_t ctx, struct vnode *dvp,
+ struct componentname *cnp);
int mac_vnode_check_truncate(vfs_context_t ctx,
kauth_cred_t file_cred, struct vnode *vp);
int mac_vnode_check_uipc_bind(vfs_context_t ctx, struct vnode *dvp,
struct vnode *vp,
struct label *label
);
+/**
+ @brief Access control check for vnode trigger resolution
+ @param cred Subject credential
+ @param dvp Object vnode
+ @param dlabel Policy label for dvp
+ @param cnp Component name that triggered resolution
+
+ Determine whether the subject identified by the credential can trigger
+ resolution of the passed name (cnp) in the passed directory vnode
+ via an external trigger resolver.
+
+ @return Return 0 if access is granted, otherwise an appropriate value for
+ errno should be returned. Suggested failure: EACCES for label mismatch or
+ EPERM for lack of privilege.
+*/
+typedef int mpo_vnode_check_trigger_resolve_t(
+ kauth_cred_t cred,
+ struct vnode *dvp,
+ struct label *dlabel,
+ struct componentname *cnp
+);
/**
@brief Access control check for truncate/ftruncate
@param active_cred Subject credential
* Please note that this should be kept in sync with the check assumptions
* policy in bsd/kern/policy_check.c (policy_ops struct).
*/
-#define MAC_POLICY_OPS_VERSION 52 /* inc when new reserved slots are taken */
+#define MAC_POLICY_OPS_VERSION 53 /* inc when new reserved slots are taken */
struct mac_policy_ops {
mpo_audit_check_postselect_t *mpo_audit_check_postselect;
mpo_audit_check_preselect_t *mpo_audit_check_preselect;
mpo_exc_action_label_init_t *mpo_exc_action_label_init;
mpo_exc_action_label_update_t *mpo_exc_action_label_update;
+ mpo_vnode_check_trigger_resolve_t *mpo_vnode_check_trigger_resolve;
mpo_reserved_hook_t *mpo_reserved1;
mpo_reserved_hook_t *mpo_reserved2;
mpo_reserved_hook_t *mpo_reserved3;
- mpo_reserved_hook_t *mpo_reserved4;
mpo_skywalk_flow_check_connect_t *mpo_skywalk_flow_check_connect;
mpo_skywalk_flow_check_listen_t *mpo_skywalk_flow_check_listen;
return (error);
}
+int
+mac_vnode_check_trigger_resolve(vfs_context_t ctx, struct vnode *dvp,
+ struct componentname *cnp)
+{
+ kauth_cred_t cred;
+ int error;
+
+#if SECURITY_MAC_CHECK_ENFORCE
+ /* 21167099 - only check if we allow write */
+ if (!mac_vnode_enforce)
+ return 0;
+#endif
+ cred = vfs_context_ucred(ctx);
+ if (!mac_cred_check_enforce(cred))
+ return (0);
+ MAC_CHECK(vnode_check_trigger_resolve, cred, dvp, dvp->v_label, cnp);
+ return (error);
+}
+
int
mac_vnode_check_truncate(vfs_context_t ctx, struct ucred *file_cred,
struct vnode *vp)
--- /dev/null
+/* -*- Mode: c; tab-width: 8; indent-tabs-mode: 1; c-basic-offset: 8; -*- */
+
+#include <darwintest.h>
+#include <poll.h>
+#include <sys/socket.h>
+#include <unistd.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <errno.h>
+#include <TargetConditionals.h>
+
+static int
+sockv6_open(void)
+{
+ int s;
+
+ s = socket(AF_INET6, SOCK_DGRAM, 0);
+ T_QUIET;
+ T_ASSERT_POSIX_SUCCESS(s, "socket(AF_INET6, SOCK_DGRAM, 0)");
+ return (s);
+}
+
+static int
+sockv6_bind(int s, in_port_t port)
+{
+ struct sockaddr_in6 sin6;
+
+ bzero(&sin6, sizeof(sin6));
+ sin6.sin6_len = sizeof(sin6);
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_port = port;
+ return (bind(s, (const struct sockaddr *)&sin6, sizeof(sin6)));
+}
+
+static void
+sockv6_set_v6only(int s)
+{
+ int on = 1;
+ int ret;
+
+ ret = setsockopt(s, IPPROTO_IPV6, IPV6_V6ONLY, &on, sizeof(on));
+ T_QUIET;
+ T_ASSERT_POSIX_SUCCESS(ret, "setsockopt(%d, IPV6_ONLY)", s);
+}
+
+static bool
+alloc_and_bind_ports(in_port_t port_start, in_port_t port_end,
+ int bind_attempts)
+{
+ int bound_count = 0;
+ bool success = true;
+
+ for (in_port_t i = port_start; success && i <= port_end; i++) {
+ int s6 = -1;
+ int s6_other = -1;
+ int ret;
+
+ s6 = sockv6_open();
+ sockv6_set_v6only(s6);
+ if (sockv6_bind(s6, i) != 0) {
+ /* find the next available port */
+ goto loop_done;
+ }
+ s6_other = sockv6_open();
+ ret = sockv6_bind(s6_other, i);
+ T_WITH_ERRNO;
+ T_QUIET;
+ T_ASSERT_TRUE(ret != 0, "socket %d bind %d", s6_other, i);
+ /*
+ * After bind fails, try binding to a different port.
+ * For non-root user, this will panic without the fix for
+ * <rdar://problem/35243417>.
+ */
+ if (sockv6_bind(s6_other, i + 1) == 0) {
+ bound_count++;
+ if (bound_count >= bind_attempts) {
+ break;
+ }
+ }
+ loop_done:
+ if (s6 >= 0) {
+ close(s6);
+ }
+ if (s6_other >= 0) {
+ close(s6_other);
+ }
+ }
+ T_ASSERT_TRUE(bound_count == bind_attempts,
+ "number of successful binds %d (out of %d)",
+ bound_count, bind_attempts);
+ return (success);
+}
+
+
+T_DECL(socket_bind_35243417,
+ "bind IPv6 only UDP socket, then bind IPv6 socket.",
+ T_META_ASROOT(false),
+ T_META_CHECK_LEAKS(false))
+{
+#if TARGET_OS_WATCH
+ T_SKIP("socket_bind_35243417 can't run on watch.");
+#else
+ alloc_and_bind_ports(1, 65534, 10);
+#endif
+}
+
+T_DECL(socket_bind_35243417_root,
+ "bind IPv6 only UDP socket, then bind IPv6 socket.",
+ T_META_ASROOT(true))
+{
+#if TARGET_OS_WATCH
+ T_SKIP("socket_bind_35243417_root can't run on watch.");
+#else
+ alloc_and_bind_ports(1, 65534, 10);
+#endif
+}
break;
case 'u':
ret = atoi(optarg);
- if (ret <= 0)
- err("Invalid UID: %s", optarg);
+ /* allow invalid / -1 as a wildcard for lookup */
+ if (ret < 0 && persona_op != PERSONA_OP_LOOKUP) {
+ err("Invalid UID:%s (%d)", optarg, ret);
+ }
uid = (uid_t)ret;
break;
case 'g':
}
}
- if (uid == (uid_t)-1)
+ if (uid == (uid_t)-1 && persona_op != PERSONA_OP_LOOKUP)
uid = kinfo.persona_id;
if (kinfo.persona_gmuid && kinfo.persona_ngroups == 0) {