/*
- * Copyright (c) 2007-2009 Apple Inc. All rights reserved.
+ * Copyright (c) 2007-2015 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <sys/ubc.h>
#include <sys/malloc.h>
#include <sys/kpi_mbuf.h>
+#include <sys/ucred.h>
#include <kern/host.h>
+#include <kern/task.h>
#include <libkern/libkern.h>
#include <mach/task.h>
-#include <mach/task_special_ports.h>
+#include <mach/host_special_ports.h>
#include <mach/host_priv.h>
#include <mach/thread_act.h>
#include <mach/mig_errors.h>
#include <nfs/xdr_subs.h>
#include <nfs/nfsm_subs.h>
#include <nfs/nfs_gss.h>
-
#include "nfs_gss_crypto.h"
+#include <mach_assert.h>
+#include <kern/assert.h>
+
+#define ASSERT(EX) assert(EX)
#define NFS_GSS_MACH_MAX_RETRIES 3
+#define NFS_GSS_DBG(...) NFS_DBG(NFS_FAC_GSS, 7, ## __VA_ARGS__)
+#define NFS_GSS_ISDBG (NFS_DEBUG_FACILITY & NFS_FAC_GSS)
+
typedef struct {
int type;
union {
* These octet strings are used to encode/decode ASN.1 tokens
* in the RPCSEC_GSS verifiers.
*/
-static u_char krb5_tokhead[] = { 0x60, 0x23 };
-static u_char krb5_mech[] = { 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x12, 0x01, 0x02, 0x02 };
+static u_char krb5_tokhead[] __attribute__((unused)) = { 0x60, 0x23 };
+ u_char krb5_mech[11] = { 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x12, 0x01, 0x02, 0x02 };
static u_char krb5_mic[] = { 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff };
static u_char krb5_mic3[] = { 0x01, 0x01, 0x04, 0x00, 0xff, 0xff, 0xff, 0xff };
static u_char krb5_wrap[] = { 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff };
#if NFSCLIENT
static int nfs_gss_clnt_ctx_find(struct nfsreq *);
-static int nfs_gss_clnt_ctx_failover(struct nfsreq *);
static int nfs_gss_clnt_ctx_init(struct nfsreq *, struct nfs_gss_clnt_ctx *);
+static int nfs_gss_clnt_ctx_init_retry(struct nfsreq *, struct nfs_gss_clnt_ctx *);
static int nfs_gss_clnt_ctx_callserver(struct nfsreq *, struct nfs_gss_clnt_ctx *);
-static char *nfs_gss_clnt_svcname(struct nfsmount *);
+static uint8_t *nfs_gss_clnt_svcname(struct nfsmount *, gssd_nametype *, uint32_t *);
static int nfs_gss_clnt_gssd_upcall(struct nfsreq *, struct nfs_gss_clnt_ctx *);
-static void nfs_gss_clnt_ctx_remove(struct nfsmount *, struct nfs_gss_clnt_ctx *);
-static int nfs_gss_clnt_ctx_delay(struct nfsreq *, int *);
+void nfs_gss_clnt_ctx_neg_cache_reap(struct nfsmount *);
+static void nfs_gss_clnt_ctx_clean(struct nfs_gss_clnt_ctx *);
+static int nfs_gss_clnt_ctx_copy(struct nfs_gss_clnt_ctx *, struct nfs_gss_clnt_ctx **, gss_key_info *);
+static void nfs_gss_clnt_ctx_destroy(struct nfs_gss_clnt_ctx *);
+static void nfs_gss_clnt_log_error(struct nfsreq *, struct nfs_gss_clnt_ctx *, uint32_t, uint32_t);
#endif /* NFSCLIENT */
#if NFSSERVER
static int nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx *, uint32_t);
#endif /* NFSSERVER */
-static void task_release_special_port(mach_port_t);
-static mach_port_t task_copy_special_port(mach_port_t);
+static void host_release_special_port(mach_port_t);
+static mach_port_t host_copy_special_port(mach_port_t);
static void nfs_gss_mach_alloc_buffer(u_char *, uint32_t, vm_map_copy_t *);
static int nfs_gss_mach_vmcopyout(vm_map_copy_t, uint32_t, u_char *);
static int nfs_gss_token_get(gss_key_info *ki, u_char *, u_char *, int, uint32_t *, u_char *);
*
* Note that the code allows superuser (uid == 0)
* to adopt the context of another user.
+ *
+ * We'll match on the audit session ids, since those
+ * processes will have acccess to the same credential cache.
*/
+
+#define kauth_cred_getasid(cred) ((cred)->cr_audit.as_aia_p->ai_asid)
+#define kauth_cred_getauid(cred) ((cred)->cr_audit.as_aia_p->ai_auid)
+
+#define SAFE_CAST_INTTYPE( type, intval ) \
+ ( (type)(intval)/(sizeof(type) < sizeof(intval) ? 0 : 1) )
+
+uid_t
+nfs_cred_getasid2uid(kauth_cred_t cred)
+{
+ uid_t result = SAFE_CAST_INTTYPE(uid_t, kauth_cred_getasid(cred));
+ return (result);
+}
+
+/*
+ * Debugging
+ */
+static void
+nfs_gss_clnt_ctx_dump(struct nfsmount *nmp)
+{
+ struct nfs_gss_clnt_ctx *cp;
+
+ lck_mtx_lock(&nmp->nm_lock);
+ NFS_GSS_DBG("Enter\n");
+ TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) {
+ lck_mtx_lock(cp->gss_clnt_mtx);
+ printf("context %d/%d: refcnt = %d, flags = %x\n",
+ kauth_cred_getasid(cp->gss_clnt_cred),
+ kauth_cred_getauid(cp->gss_clnt_cred),
+ cp->gss_clnt_refcnt, cp->gss_clnt_flags);
+ lck_mtx_unlock(cp->gss_clnt_mtx);
+ }
+ NFS_GSS_DBG("Exit\n");
+ lck_mtx_unlock(&nmp->nm_lock);
+}
+
+static char *
+nfs_gss_clnt_ctx_name(struct nfsmount *nmp, struct nfs_gss_clnt_ctx *cp, char *buf, int len)
+{
+ char *np;
+ int nlen;
+ const char *server = "";
+
+ if (nmp && nmp->nm_mountp)
+ server = vfs_statfs(nmp->nm_mountp)->f_mntfromname;
+
+ if (cp == NULL) {
+ snprintf(buf, len, "[%s] NULL context", server);
+ return (buf);
+ }
+
+ if (cp->gss_clnt_principal && !cp->gss_clnt_display) {
+ np = (char *)cp->gss_clnt_principal;
+ nlen = cp->gss_clnt_prinlen;
+ } else {
+ np = cp->gss_clnt_display;
+ nlen = np ? strlen(cp->gss_clnt_display) : 0;
+ }
+ if (nlen)
+ snprintf(buf, len, "[%s] %.*s %d/%d %s", server, nlen, np,
+ kauth_cred_getasid(cp->gss_clnt_cred),
+ kauth_cred_getuid(cp->gss_clnt_cred),
+ cp->gss_clnt_principal ? "" : "[from default cred] ");
+ else
+ snprintf(buf, len, "[%s] using default %d/%d ", server,
+ kauth_cred_getasid(cp->gss_clnt_cred),
+ kauth_cred_getuid(cp->gss_clnt_cred));
+ return (buf);
+}
+
+#define NFS_CTXBUFSZ 80
+#define NFS_GSS_CTX(req, cp) nfs_gss_clnt_ctx_name((req)->r_nmp, cp ? cp : (req)->r_gss_ctx, CTXBUF, sizeof(CTXBUF))
+
+#define NFS_GSS_CLNT_CTX_DUMP(nmp) \
+ do { \
+ if (NFS_GSS_ISDBG && (NFS_DEBUG_FLAGS & 0x2)) \
+ nfs_gss_clnt_ctx_dump((nmp)); \
+ } while (0)
+
static int
-nfs_gss_clnt_ctx_find(struct nfsreq *req)
+nfs_gss_clnt_ctx_cred_match(kauth_cred_t cred1, kauth_cred_t cred2)
+{
+ if (kauth_cred_getasid(cred1) == kauth_cred_getasid(cred2))
+ return (1);
+ return (0);
+}
+
+/*
+ * Busy the mount for each principal set on the mount
+ * so that the automounter will not unmount the file
+ * system underneath us. With out this, if an unmount
+ * occurs the principal that is set for an audit session
+ * will be lost and we may end up with a different identity.
+ *
+ * Note setting principals on the mount is a bad idea. This
+ * really should be handle by KIM (Kerberos Identity Management)
+ * so that defaults can be set by service identities.
+ */
+
+static void
+nfs_gss_clnt_mnt_ref(struct nfsmount *nmp)
+{
+ int error;
+ vnode_t rvp;
+
+ if (nmp == NULL ||
+ !(vfs_flags(nmp->nm_mountp) & MNT_AUTOMOUNTED))
+ return;
+
+ error = VFS_ROOT(nmp->nm_mountp, &rvp, NULL);
+ if (!error) {
+ vnode_ref(rvp);
+ vnode_put(rvp);
+ }
+}
+
+/*
+ * Unbusy the mout. See above comment,
+ */
+
+static void
+nfs_gss_clnt_mnt_rele(struct nfsmount *nmp)
+{
+ int error;
+ vnode_t rvp;
+
+ if (nmp == NULL ||
+ !(vfs_flags(nmp->nm_mountp) & MNT_AUTOMOUNTED))
+ return;
+
+ error = VFS_ROOT(nmp->nm_mountp, &rvp, NULL);
+ if (!error) {
+ vnode_rele(rvp);
+ vnode_put(rvp);
+ }
+}
+
+int nfs_root_steals_ctx = 1;
+
+static int
+nfs_gss_clnt_ctx_find_principal(struct nfsreq *req, uint8_t *principal, uint32_t plen, uint32_t nt)
{
struct nfsmount *nmp = req->r_nmp;
struct nfs_gss_clnt_ctx *cp;
- uid_t uid = kauth_cred_getuid(req->r_cred);
+ struct nfsreq treq;
int error = 0;
- int retrycnt = 0;
+ struct timeval now;
+ gss_key_info *ki;
+ char CTXBUF[NFS_CTXBUFSZ];
+ bzero(&treq, sizeof (struct nfsreq));
+ treq.r_nmp = nmp;
+
+ microuptime(&now);
lck_mtx_lock(&nmp->nm_lock);
TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) {
- if (cp->gss_clnt_uid == uid) {
- if (cp->gss_clnt_flags & GSS_CTX_INVAL)
- continue;
+ lck_mtx_lock(cp->gss_clnt_mtx);
+ if (cp->gss_clnt_flags & GSS_CTX_DESTROY) {
+ NFS_GSS_DBG("Found destroyed context %s refcnt = %d continuing\n",
+ NFS_GSS_CTX(req, cp),
+ cp->gss_clnt_refcnt);
+ lck_mtx_unlock(cp->gss_clnt_mtx);
+ continue;
+ }
+ if (nfs_gss_clnt_ctx_cred_match(cp->gss_clnt_cred, req->r_cred)) {
+ if (nmp->nm_gsscl.tqh_first != cp) {
+ TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries);
+ TAILQ_INSERT_HEAD(&nmp->nm_gsscl, cp, gss_clnt_entries);
+ }
+ if (principal) {
+ /*
+ * If we have a principal, but it does not match the current cred
+ * mark it for removal
+ */
+ if (cp->gss_clnt_prinlen != plen || cp->gss_clnt_prinnt != nt ||
+ bcmp(cp->gss_clnt_principal, principal, plen) != 0) {
+ cp->gss_clnt_flags |= (GSS_CTX_INVAL | GSS_CTX_DESTROY);
+ cp->gss_clnt_refcnt++;
+ lck_mtx_unlock(cp->gss_clnt_mtx);
+ NFS_GSS_DBG("Marking %s for deletion because %s does not match\n",
+ NFS_GSS_CTX(req, cp), principal);
+ NFS_GSS_DBG("len = (%d,%d), nt = (%d,%d)\n", cp->gss_clnt_prinlen, plen,
+ cp->gss_clnt_prinnt, nt);
+ treq.r_gss_ctx = cp;
+ cp = NULL;
+ break;
+ }
+ }
+ if (cp->gss_clnt_flags & GSS_CTX_INVAL) {
+ /*
+ * If we're still being used and we're not expired
+ * just return and don't bother gssd again. Note if
+ * gss_clnt_nctime is zero it is about to be set to now.
+ */
+ if (cp->gss_clnt_nctime + GSS_NEG_CACHE_TO >= now.tv_sec || cp->gss_clnt_nctime == 0) {
+ NFS_GSS_DBG("Context %s (refcnt = %d) not expired returning EAUTH nctime = %ld now = %ld\n",
+ NFS_GSS_CTX(req, cp), cp->gss_clnt_refcnt, cp->gss_clnt_nctime, now.tv_sec);
+ lck_mtx_unlock(cp->gss_clnt_mtx);
+ lck_mtx_unlock(&nmp->nm_lock);
+ return (NFSERR_EAUTH);
+ }
+ if (cp->gss_clnt_refcnt) {
+ struct nfs_gss_clnt_ctx *ncp;
+ /*
+ * If this context has references, we can't use it so we mark if for
+ * destruction and create a new context based on this one in the
+ * same manner as renewing one.
+ */
+ cp->gss_clnt_flags |= GSS_CTX_DESTROY;
+ NFS_GSS_DBG("Context %s has expired but we still have %d references\n",
+ NFS_GSS_CTX(req, cp), cp->gss_clnt_refcnt);
+ error = nfs_gss_clnt_ctx_copy(cp, &ncp, NULL);
+ lck_mtx_unlock(cp->gss_clnt_mtx);
+ if (error) {
+ lck_mtx_unlock(&nmp->nm_lock);
+ return (error);
+ }
+ cp = ncp;
+ break;
+ } else {
+ /* cp->gss_clnt_kinfo should be NULL here */
+ if (cp->gss_clnt_kinfo) {
+ FREE(cp->gss_clnt_kinfo, M_TEMP);
+ cp->gss_clnt_kinfo = NULL;
+ }
+ if (cp->gss_clnt_nctime)
+ nmp->nm_ncentries--;
+ lck_mtx_unlock(cp->gss_clnt_mtx);
+ TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries);
+ break;
+ }
+ }
+ /* Found a valid context to return */
+ cp->gss_clnt_refcnt++;
+ req->r_gss_ctx = cp;
+ lck_mtx_unlock(cp->gss_clnt_mtx);
lck_mtx_unlock(&nmp->nm_lock);
- nfs_gss_clnt_ctx_ref(req, cp);
return (0);
}
+ lck_mtx_unlock(cp->gss_clnt_mtx);
}
- if (uid == 0) {
+ if (!cp && nfs_root_steals_ctx && principal == NULL && kauth_cred_getuid(req->r_cred) == 0) {
/*
* If superuser is trying to get access, then co-opt
* the first valid context in the list.
* in case one is set up for it.
*/
TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) {
- if (!(cp->gss_clnt_flags & GSS_CTX_INVAL)) {
- lck_mtx_unlock(&nmp->nm_lock);
+ if (!(cp->gss_clnt_flags & (GSS_CTX_INVAL|GSS_CTX_DESTROY))) {
nfs_gss_clnt_ctx_ref(req, cp);
+ lck_mtx_unlock(&nmp->nm_lock);
+ NFS_GSS_DBG("Root stole context %s\n", NFS_GSS_CTX(req, NULL));
return (0);
}
}
}
- /*
- * Not found - create a new context
- */
+ MALLOC(ki, gss_key_info *, sizeof (gss_key_info), M_TEMP, M_WAITOK|M_ZERO);
+ if (ki == NULL) {
+ lck_mtx_unlock(&nmp->nm_lock);
+ return (ENOMEM);
+ }
+
+ NFS_GSS_DBG("Context %s%sfound in Neg Cache @ %ld\n",
+ NFS_GSS_CTX(req, cp),
+ cp == NULL ? " not " : "",
+ cp == NULL ? 0L : cp->gss_clnt_nctime);
/*
- * If the thread is async, then it cannot get
- * kerberos creds and set up a proper context.
- * If no sec= mount option is given, attempt
- * to failover to sec=sys.
+ * Not found - create a new context
*/
- if (req->r_thread == NULL) {
- if (nmp->nm_flag & NFSMNT_SECSYSOK) {
- error = nfs_gss_clnt_ctx_failover(req);
- } else {
- printf("nfs_gss_clnt_ctx_find: no context for async\n");
- error = NFSERR_EAUTH;
- }
- lck_mtx_unlock(&nmp->nm_lock);
- return (error);
- }
-
- MALLOC(cp, struct nfs_gss_clnt_ctx *, sizeof(*cp), M_TEMP, M_WAITOK|M_ZERO);
if (cp == NULL) {
- lck_mtx_unlock(&nmp->nm_lock);
- return (ENOMEM);
+ MALLOC(cp, struct nfs_gss_clnt_ctx *, sizeof(*cp), M_TEMP, M_WAITOK|M_ZERO);
+ if (cp == NULL) {
+ lck_mtx_unlock(&nmp->nm_lock);
+ return (ENOMEM);
+ }
+ cp->gss_clnt_kinfo = ki;
+ cp->gss_clnt_cred = req->r_cred;
+ kauth_cred_ref(cp->gss_clnt_cred);
+ cp->gss_clnt_mtx = lck_mtx_alloc_init(nfs_gss_clnt_grp, LCK_ATTR_NULL);
+ cp->gss_clnt_ptime = now.tv_sec - GSS_PRINT_DELAY;
+ if (principal) {
+ MALLOC(cp->gss_clnt_principal, uint8_t *, plen+1, M_TEMP, M_WAITOK|M_ZERO);
+ memcpy(cp->gss_clnt_principal, principal, plen);
+ cp->gss_clnt_prinlen = plen;
+ cp->gss_clnt_prinnt = nt;
+ cp->gss_clnt_flags |= GSS_CTX_STICKY;
+ nfs_gss_clnt_mnt_ref(nmp);
+ }
+ } else {
+ cp->gss_clnt_kinfo = ki;
+ nfs_gss_clnt_ctx_clean(cp);
+ if (principal) {
+ /*
+ * If we have a principal and we found a matching audit
+ * session, then to get here, the principal had to match.
+ * In walking the context list if it has a principal
+ * or the principal is not set then we mark the context
+ * for destruction and set cp to NULL and we fall to the
+ * if clause above. If the context still has references
+ * again we copy the context which will preserve the principal
+ * and we end up here with the correct principal set.
+ * If we don't have references the the principal must have
+ * match and we will fall through here.
+ */
+ cp->gss_clnt_flags |= GSS_CTX_STICKY;
+ }
}
- cp->gss_clnt_uid = uid;
- cp->gss_clnt_mtx = lck_mtx_alloc_init(nfs_gss_clnt_grp, LCK_ATTR_NULL);
cp->gss_clnt_thread = current_thread();
nfs_gss_clnt_ctx_ref(req, cp);
- TAILQ_INSERT_TAIL(&nmp->nm_gsscl, cp, gss_clnt_entries);
+ TAILQ_INSERT_HEAD(&nmp->nm_gsscl, cp, gss_clnt_entries);
lck_mtx_unlock(&nmp->nm_lock);
-retry:
- error = nfs_gss_clnt_ctx_init(req, cp);
- if (error == ENEEDAUTH) {
- error = nfs_gss_clnt_ctx_delay(req, &retrycnt);
- if (!error)
- goto retry;
-
- /* Giving up on this context */
- cp->gss_clnt_flags |= GSS_CTX_INVAL;
-
- /*
- * Wake any threads waiting to use the context
- */
- lck_mtx_lock(cp->gss_clnt_mtx);
- cp->gss_clnt_thread = NULL;
- if (cp->gss_clnt_flags & GSS_NEEDCTX) {
- cp->gss_clnt_flags &= ~GSS_NEEDCTX;
- wakeup(cp);
- }
- lck_mtx_unlock(cp->gss_clnt_mtx);
-
- }
-
- if (error)
+ error = nfs_gss_clnt_ctx_init_retry(req, cp); // Initialize new context
+ if (error) {
+ NFS_GSS_DBG("nfs_gss_clnt_ctx_init_retry returned %d for %s\n", error, NFS_GSS_CTX(req, cp));
nfs_gss_clnt_ctx_unref(req);
-
- /*
- * If we failed to set up a Kerberos context for this
- * user and no sec= mount option was given, but the
- * server indicated that it could support AUTH_SYS, then set
- * up a dummy context that allows this user to attempt
- * sec=sys calls.
- */
- if (error && (nmp->nm_flag & NFSMNT_SECSYSOK) &&
- (error != ENXIO) && (error != ETIMEDOUT)) {
- lck_mtx_lock(&nmp->nm_lock);
- error = nfs_gss_clnt_ctx_failover(req);
- lck_mtx_unlock(&nmp->nm_lock);
}
+ /* Remove any old matching contex that had a different principal */
+ nfs_gss_clnt_ctx_unref(&treq);
+
return (error);
}
-/*
- * Set up a dummy context to allow the use of sec=sys
- * for this user, if the server allows sec=sys.
- * The context is valid for GSS_CLNT_SYS_VALID seconds,
- * so that the user will periodically attempt to fail back
- * and get a real credential.
- *
- * Assumes context list (nm_lock) is locked
- */
static int
-nfs_gss_clnt_ctx_failover(struct nfsreq *req)
+nfs_gss_clnt_ctx_find(struct nfsreq *req)
{
- struct nfsmount *nmp = req->r_nmp;
- struct nfs_gss_clnt_ctx *cp;
- uid_t uid = kauth_cred_getuid(req->r_cred);
- struct timeval now;
-
- MALLOC(cp, struct nfs_gss_clnt_ctx *, sizeof(*cp), M_TEMP, M_WAITOK|M_ZERO);
- if (cp == NULL)
- return (ENOMEM);
-
- cp->gss_clnt_service = RPCSEC_GSS_SVC_SYS;
- cp->gss_clnt_uid = uid;
- cp->gss_clnt_mtx = lck_mtx_alloc_init(nfs_gss_clnt_grp, LCK_ATTR_NULL);
- microuptime(&now);
- cp->gss_clnt_ctime = now.tv_sec; // time stamp
- nfs_gss_clnt_ctx_ref(req, cp);
- TAILQ_INSERT_TAIL(&nmp->nm_gsscl, cp, gss_clnt_entries);
-
- return (0);
+ return (nfs_gss_clnt_ctx_find_principal(req, NULL, 0, 0));
}
/*
struct gss_seq *gsp;
u_char tokbuf[KRB5_SZ_TOKMAX(MAX_DIGEST)];
u_char cksum[MAX_DIGEST];
- struct timeval now;
gss_key_info *ki;
-
+
slpflag = (PZERO-1);
if (req->r_nmp) {
- slpflag |= ((req->r_nmp->nm_flag & NFSMNT_INT) && req->r_thread) ? PCATCH : 0;
+ slpflag |= (NMFLAG(req->r_nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) ? PCATCH : 0;
recordmark = (req->r_nmp->nm_sotype == SOCK_STREAM);
}
+
retry:
if (req->r_gss_ctx == NULL) {
/*
}
cp = req->r_gss_ctx;
- /*
- * If it's a dummy context for a user that's using
- * a fallback to sec=sys, then just return an error
- * so rpchead can encode an RPCAUTH_UNIX cred.
- */
- if (cp->gss_clnt_service == RPCSEC_GSS_SVC_SYS) {
- /*
- * The dummy context is valid for just
- * GSS_CLNT_SYS_VALID seconds. If the context
- * is older than this, mark it invalid and try
- * again to get a real one.
- */
- lck_mtx_lock(cp->gss_clnt_mtx);
- microuptime(&now);
- if (now.tv_sec > cp->gss_clnt_ctime + GSS_CLNT_SYS_VALID) {
- cp->gss_clnt_flags |= GSS_CTX_INVAL;
- lck_mtx_unlock(cp->gss_clnt_mtx);
- nfs_gss_clnt_ctx_unref(req);
- goto retry;
- }
- lck_mtx_unlock(cp->gss_clnt_mtx);
- return (ENEEDAUTH);
- }
-
/*
* If the context thread isn't null, then the context isn't
* yet complete and is for the exclusive use of the thread
if (cp->gss_clnt_thread && cp->gss_clnt_thread != current_thread()) {
cp->gss_clnt_flags |= GSS_NEEDCTX;
msleep(cp, cp->gss_clnt_mtx, slpflag | PDROP, "ctxwait", NULL);
+ slpflag &= ~PCATCH;
if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0)))
return (error);
nfs_gss_clnt_ctx_unref(req);
}
lck_mtx_unlock(cp->gss_clnt_mtx);
- ki = &cp->gss_clnt_kinfo;
+ ki = cp->gss_clnt_kinfo;
if (cp->gss_clnt_flags & GSS_CTX_COMPLETE) {
/*
* Get a sequence number for this request.
while (win_getbit(cp->gss_clnt_seqbits,
((cp->gss_clnt_seqnum - cp->gss_clnt_seqwin) + 1) % cp->gss_clnt_seqwin)) {
cp->gss_clnt_flags |= GSS_NEEDSEQ;
- msleep(cp, cp->gss_clnt_mtx, slpflag, "seqwin", NULL);
+ msleep(cp, cp->gss_clnt_mtx, slpflag | PDROP, "seqwin", NULL);
+ slpflag &= ~PCATCH;
if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) {
- lck_mtx_unlock(cp->gss_clnt_mtx);
return (error);
}
+ lck_mtx_lock(cp->gss_clnt_mtx);
if (cp->gss_clnt_flags & GSS_CTX_INVAL) {
/* Renewed while while we were waiting */
lck_mtx_unlock(cp->gss_clnt_mtx);
struct gss_seq *gsp;
uint32_t reslen, start, cksumlen, toklen;
int error = 0;
- gss_key_info *ki = &cp->gss_clnt_kinfo;
+ gss_key_info *ki = cp->gss_clnt_kinfo;
reslen = cksumlen = 0;
*accepted_statusp = 0;
if (verftype != RPCSEC_GSS) {
if (verftype != RPCAUTH_NULL)
return (NFSERR_EAUTH);
- if (cp->gss_clnt_flags & GSS_CTX_COMPLETE &&
- cp->gss_clnt_service != RPCSEC_GSS_SVC_SYS)
+ if (cp->gss_clnt_flags & GSS_CTX_COMPLETE)
return (NFSERR_EAUTH);
if (verflen > 0)
nfsm_chain_adv(error, nmc, nfsm_rndup(verflen));
* The location and length of the args is marked by two fields
* in the request structure: r_gss_argoff and r_gss_arglen,
* which are stashed when the NFS request is built.
- */
+ */
int
nfs_gss_clnt_args_restore(struct nfsreq *req)
{
struct nfsm_chain mchain, *nmc = &mchain;
int len, error = 0;
- if (cp == NULL)
+ if (cp == NULL)
return (NFSERR_EAUTH);
if ((cp->gss_clnt_flags & GSS_CTX_COMPLETE) == 0)
*/
len = req->r_gss_arglen;
len += len % 8 > 0 ? 4 : 8; // add DES padding length
- nfs_gss_encrypt_chain(&cp->gss_clnt_kinfo, nmc,
+ nfs_gss_encrypt_chain(cp->gss_clnt_kinfo, nmc,
req->r_gss_argoff, len, DES_DECRYPT);
nfsm_chain_adv(error, nmc, req->r_gss_arglen);
if (error)
int server_complete = 0;
u_char cksum1[MAX_DIGEST], cksum2[MAX_DIGEST];
int error = 0;
- struct timeval now;
- gss_key_info *ki = &cp->gss_clnt_kinfo;
+ gss_key_info *ki = cp->gss_clnt_kinfo;
/* Initialize a new client context */
- cp->gss_clnt_svcname = nfs_gss_clnt_svcname(nmp);
if (cp->gss_clnt_svcname == NULL) {
- error = NFSERR_EAUTH;
- goto nfsmout;
+ cp->gss_clnt_svcname = nfs_gss_clnt_svcname(nmp, &cp->gss_clnt_svcnt, &cp->gss_clnt_svcnamlen);
+ if (cp->gss_clnt_svcname == NULL) {
+ error = NFSERR_EAUTH;
+ goto nfsmout;
+ }
}
cp->gss_clnt_proc = RPCSEC_GSS_INIT;
cp->gss_clnt_service =
- nmp->nm_auth == RPCAUTH_KRB5 ? RPCSEC_GSS_SVC_NONE :
- nmp->nm_auth == RPCAUTH_KRB5I ? RPCSEC_GSS_SVC_INTEGRITY :
- nmp->nm_auth == RPCAUTH_KRB5P ? RPCSEC_GSS_SVC_PRIVACY : 0;
+ req->r_auth == RPCAUTH_KRB5 ? RPCSEC_GSS_SVC_NONE :
+ req->r_auth == RPCAUTH_KRB5I ? RPCSEC_GSS_SVC_INTEGRITY :
+ req->r_auth == RPCAUTH_KRB5P ? RPCSEC_GSS_SVC_PRIVACY : 0;
cp->gss_clnt_gssd_flags = (nfs_single_des ? GSSD_NFS_1DES : 0);
/*
*/
error = nfs_gss_clnt_ctx_callserver(req, cp);
if (error) {
- if (cp->gss_clnt_proc == RPCSEC_GSS_INIT &&
+ if (error == ENEEDAUTH && cp->gss_clnt_proc == RPCSEC_GSS_INIT &&
(cp->gss_clnt_gssd_flags & (GSSD_RESTART | GSSD_NFS_1DES)) == 0) {
+ NFS_GSS_DBG("Retrying with single DES for req %p\n", req);
cp->gss_clnt_gssd_flags = (GSSD_RESTART | GSSD_NFS_1DES);
if (cp->gss_clnt_token)
FREE(cp->gss_clnt_token, M_TEMP);
server_complete = 1;
if (client_complete)
break;
- } else if (cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) {
- error = NFSERR_EAUTH;
- goto nfsmout;
}
-
cp->gss_clnt_proc = RPCSEC_GSS_CONTINUE_INIT;
}
/*
* The context is apparently established successfully
*/
+ lck_mtx_lock(cp->gss_clnt_mtx);
cp->gss_clnt_flags |= GSS_CTX_COMPLETE;
+ lck_mtx_unlock(cp->gss_clnt_mtx);
cp->gss_clnt_proc = RPCSEC_GSS_DATA;
- microuptime(&now);
- cp->gss_clnt_ctime = now.tv_sec; // time stamp
-
/*
* Compute checksum of the server's window
if (cp->gss_clnt_seqbits == NULL)
error = NFSERR_EAUTH;
nfsmout:
- /*
+ /*
* If the error is ENEEDAUTH we're not done, so no need
* to wake up other threads again. This thread will retry in
* the find or renew routines.
*/
- if (error == ENEEDAUTH)
+ if (error == ENEEDAUTH)
return (error);
/*
* It will be removed when the reference count
* drops to zero.
*/
+ lck_mtx_lock(cp->gss_clnt_mtx);
if (error)
cp->gss_clnt_flags |= GSS_CTX_INVAL;
/*
* Wake any threads waiting to use the context
*/
- lck_mtx_lock(cp->gss_clnt_mtx);
cp->gss_clnt_thread = NULL;
if (cp->gss_clnt_flags & GSS_NEEDCTX) {
cp->gss_clnt_flags &= ~GSS_NEEDCTX;
return (error);
}
+/*
+ * This function calls nfs_gss_clnt_ctx_init() to set up a new context.
+ * But if there's a failure in trying to establish the context it keeps
+ * retrying at progressively longer intervals in case the failure is
+ * due to some transient condition. For instance, the server might be
+ * failing the context setup because directory services is not coming
+ * up in a timely fashion.
+ */
+static int
+nfs_gss_clnt_ctx_init_retry(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp)
+{
+ struct nfsmount *nmp = req->r_nmp;
+ struct timeval now;
+ time_t waituntil;
+ int error, slpflag;
+ int retries = 0;
+ int timeo = NFS_TRYLATERDEL;
+
+ if (nfs_mount_gone(nmp)) {
+ error = ENXIO;
+ goto bad;
+ }
+
+ /* For an "intr" mount allow a signal to interrupt the retries */
+ slpflag = (NMFLAG(nmp, INTR) && !(req->r_flags & R_NOINTR)) ? PCATCH : 0;
+
+ while ((error = nfs_gss_clnt_ctx_init(req, cp)) == ENEEDAUTH) {
+ microuptime(&now);
+ waituntil = now.tv_sec + timeo;
+ while (now.tv_sec < waituntil) {
+ tsleep(NULL, PSOCK | slpflag, "nfs_gss_clnt_ctx_init_retry", hz);
+ slpflag = 0;
+ error = nfs_sigintr(req->r_nmp, req, current_thread(), 0);
+ if (error)
+ goto bad;
+ microuptime(&now);
+ }
+
+ retries++;
+ /* If it's a soft mount just give up after a while */
+ if ((NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && (retries > nmp->nm_retry)) {
+ error = ETIMEDOUT;
+ goto bad;
+ }
+ timeo *= 2;
+ if (timeo > 60)
+ timeo = 60;
+ }
+
+ if (error == 0)
+ return 0; // success
+bad:
+ /*
+ * Give up on this context
+ */
+ lck_mtx_lock(cp->gss_clnt_mtx);
+ cp->gss_clnt_flags |= GSS_CTX_INVAL;
+
+ /*
+ * Wake any threads waiting to use the context
+ */
+ cp->gss_clnt_thread = NULL;
+ if (cp->gss_clnt_flags & GSS_NEEDCTX) {
+ cp->gss_clnt_flags &= ~GSS_NEEDCTX;
+ wakeup(cp);
+ }
+ lck_mtx_unlock(cp->gss_clnt_mtx);
+
+ return error;
+}
+
/*
* Call the NFS server using a null procedure for context setup.
* Even though it's a null procedure and nominally has no arguments
{
struct nfsm_chain nmreq, nmrep;
int error = 0, status;
+ uint32_t major = cp->gss_clnt_major, minor = cp->gss_clnt_minor;
int sz;
- if (!req->r_nmp)
+ if (nfs_mount_gone(req->r_nmp))
return (ENXIO);
nfsm_chain_null(&nmreq);
nfsm_chain_null(&nmrep);
*/
if (cp->gss_clnt_major != GSS_S_COMPLETE &&
cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) {
- char who[] = "server";
- char unknown[] = "<unknown>";
- (void) mach_gss_log_error(
- cp->gss_clnt_mport,
- !req->r_nmp ? unknown :
- vfs_statfs(req->r_nmp->nm_mountp)->f_mntfromname,
- cp->gss_clnt_uid,
- who,
- cp->gss_clnt_major,
- cp->gss_clnt_minor);
+ printf("nfs_gss_clnt_ctx_callserver: gss_clnt_major = %d\n", cp->gss_clnt_major);
+ nfs_gss_clnt_log_error(req, cp, major, minor);
+
}
nfsmout:
}
/*
- * Ugly hack to get the service principal from the f_mntfromname field in
- * the statfs struct. We assume a format of server:path. We don't currently
- * support url's or other bizarre formats like path@server. A better solution
- * here might be to allow passing the service principal down in the mount args.
- * For kerberos we just use the default realm.
+ * We construct the service principal as a gss hostbased service principal of
+ * the form nfs@<server>, unless the servers principal was passed down in the
+ * mount arguments. If the arguments don't specify the service principal, the
+ * server name is extracted the location passed in the mount argument if
+ * available. Otherwise assume a format of <server>:<path> in the
+ * mntfromname. We don't currently support url's or other bizarre formats like
+ * path@server. Mount_url will convert the nfs url into <server>:<path> when
+ * calling mount, so this works out well in practice.
+ *
*/
-static char *
-nfs_gss_clnt_svcname(struct nfsmount *nmp)
+
+static uint8_t *
+nfs_gss_clnt_svcname(struct nfsmount *nmp, gssd_nametype *nt, uint32_t *len)
{
- char *svcname, *d, *mntfromhere;
- int len;
+ char *svcname, *d, *server;
+ int lindx, sindx;
- if (!nmp)
- return (NULL);
- mntfromhere = &vfs_statfs(nmp->nm_mountp)->f_mntfromname[0];
- len = strlen(mntfromhere) + 5; /* "nfs/" plus null */
- MALLOC(svcname, char *, len, M_TEMP, M_NOWAIT);
- if (svcname == NULL)
+ if (nfs_mount_gone(nmp))
return (NULL);
- strlcpy(svcname, "nfs/", len);
- strlcat(svcname, mntfromhere, len);
- d = strchr(svcname, ':');
- if (d)
- *d = '\0';
- return (svcname);
+ if (nmp->nm_sprinc) {
+ *len = strlen(nmp->nm_sprinc) + 1;
+ MALLOC(svcname, char *, *len, M_TEMP, M_WAITOK);
+ *nt = GSSD_HOSTBASED;
+ if (svcname == NULL)
+ return (NULL);
+ strlcpy(svcname, nmp->nm_sprinc, *len);
+
+ return ((uint8_t *)svcname);
+ }
+
+ *nt = GSSD_HOSTBASED;
+ if (nmp->nm_locations.nl_numlocs && !(NFS_GSS_ISDBG && (NFS_DEBUG_FLAGS & 0x1))) {
+ lindx = nmp->nm_locations.nl_current.nli_loc;
+ sindx = nmp->nm_locations.nl_current.nli_serv;
+ server = nmp->nm_locations.nl_locations[lindx]->nl_servers[sindx]->ns_name;
+ *len = (uint32_t)strlen(server);
+ } else {
+ /* Older binaries using older mount args end up here */
+ server = vfs_statfs(nmp->nm_mountp)->f_mntfromname;
+ NFS_GSS_DBG("nfs getting gss svcname from %s\n", server);
+ d = strchr(server, ':');
+ *len = (uint32_t)(d ? (d - server) : strlen(server));
+ }
+
+ *len += 5; /* "nfs@" plus null */
+ MALLOC(svcname, char *, *len, M_TEMP, M_WAITOK);
+ strlcpy(svcname, "nfs", *len);
+ strlcat(svcname, "@", *len);
+ strlcat(svcname, server, *len);
+ NFS_GSS_DBG("nfs svcname = %s\n", svcname);
+
+ return ((uint8_t *)svcname);
+}
+
+/*
+ * Get a mach port to talk to gssd.
+ * gssd lives in the root bootstrap, so we call gssd's lookup routine
+ * to get a send right to talk to a new gssd instance that launchd has launched
+ * based on the cred's uid and audit session id.
+ */
+
+static mach_port_t
+nfs_gss_clnt_get_upcall_port(kauth_cred_t credp)
+{
+ mach_port_t gssd_host_port, uc_port = IPC_PORT_NULL;
+ kern_return_t kr;
+ au_asid_t asid;
+ uid_t uid;
+
+ kr = host_get_gssd_port(host_priv_self(), &gssd_host_port);
+ if (kr != KERN_SUCCESS) {
+ printf("nfs_gss_get_upcall_port: can't get gssd port, status %x (%d)\n", kr, kr);
+ return (IPC_PORT_NULL);
+ }
+ if (!IPC_PORT_VALID(gssd_host_port)) {
+ printf("nfs_gss_get_upcall_port: gssd port not valid\n");
+ return (IPC_PORT_NULL);
+ }
+
+ asid = kauth_cred_getasid(credp);
+ uid = kauth_cred_getauid(credp);
+ if (uid == AU_DEFAUDITID)
+ uid = kauth_cred_getuid(credp);
+ kr = mach_gss_lookup(gssd_host_port, uid, asid, &uc_port);
+ if (kr != KERN_SUCCESS)
+ printf("nfs_gss_clnt_get_upcall_port: mach_gssd_lookup failed: status %x (%d)\n", kr, kr);
+ host_release_special_port(gssd_host_port);
+
+ return (uc_port);
+}
+
+
+static void
+nfs_gss_clnt_log_error(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp, uint32_t major, uint32_t minor)
+{
+#define GETMAJERROR(x) (((x) >> GSS_C_ROUTINE_ERROR_OFFSET) & GSS_C_ROUTINE_ERROR_MASK)
+ struct nfsmount *nmp = req->r_nmp;
+ char who[] = "client";
+ uint32_t gss_error = GETMAJERROR(cp->gss_clnt_major);
+ const char *procn = "unkown";
+ proc_t proc;
+ pid_t pid = -1;
+ struct timeval now;
+
+ if (req->r_thread) {
+ proc = (proc_t)get_bsdthreadtask_info(req->r_thread);
+ if (proc != NULL && (proc->p_fd == NULL || (proc->p_lflag & P_LVFORK)))
+ proc = NULL;
+ if (proc) {
+ if (*proc->p_comm)
+ procn = proc->p_comm;
+ pid = proc->p_pid;
+ }
+ } else {
+ procn = "kernproc";
+ pid = 0;
+ }
+
+ microuptime(&now);
+ if ((cp->gss_clnt_major != major || cp->gss_clnt_minor != minor ||
+ cp->gss_clnt_ptime + GSS_PRINT_DELAY < now.tv_sec) &&
+ (nmp->nm_state & NFSSTA_MOUNTED)) {
+ /*
+ * Will let gssd do some logging in hopes that it can translate
+ * the minor code.
+ */
+ if (cp->gss_clnt_minor && cp->gss_clnt_minor != minor) {
+ (void) mach_gss_log_error(
+ cp->gss_clnt_mport,
+ vfs_statfs(nmp->nm_mountp)->f_mntfromname,
+ kauth_cred_getuid(cp->gss_clnt_cred),
+ who,
+ cp->gss_clnt_major,
+ cp->gss_clnt_minor);
+ }
+ gss_error = gss_error ? gss_error : cp->gss_clnt_major;
+
+ /*
+ *%%% It would be really nice to get the terminal from the proc or auditinfo_addr struct and print that here.
+ */
+ printf("NFS: gssd auth failure by %s on audit session %d uid %d proc %s/%d for mount %s. Error: major = %d minor = %d\n",
+ cp->gss_clnt_display ? cp->gss_clnt_display : who, kauth_cred_getasid(req->r_cred), kauth_cred_getuid(req->r_cred),
+ procn, pid, vfs_statfs(nmp->nm_mountp)->f_mntfromname, gss_error, (int32_t)cp->gss_clnt_minor);
+ cp->gss_clnt_ptime = now.tv_sec;
+ switch (gss_error) {
+ case 7: printf("NFS: gssd does not have credentials for session %d/%d, (kinit)?\n",
+ kauth_cred_getasid(req->r_cred), kauth_cred_getauid(req->r_cred));
+ break;
+ case 11: printf("NFS: gssd has expired credentals for session %d/%d, (kinit)?\n",
+ kauth_cred_getasid(req->r_cred), kauth_cred_getauid(req->r_cred));
+ break;
+ }
+ } else {
+ NFS_GSS_DBG("NFS: gssd auth failure by %s on audit session %d uid %d proc %s/%d for mount %s. Error: major = %d minor = %d\n",
+ cp->gss_clnt_display ? cp->gss_clnt_display : who, kauth_cred_getasid(req->r_cred), kauth_cred_getuid(req->r_cred),
+ procn, pid, vfs_statfs(nmp->nm_mountp)->f_mntfromname, gss_error, (int32_t)cp->gss_clnt_minor);
+ }
}
/*
* Make an upcall to the gssd using Mach RPC
- * The upcall is made using a task special port.
+ * The upcall is made using a host special port.
* This allows launchd to fire up the gssd in the
* user's session. This is important, since gssd
* must have access to the user's credential cache.
nfs_gss_clnt_gssd_upcall(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp)
{
kern_return_t kr;
- byte_buffer okey = NULL;
+ gssd_byte_buffer okey = NULL;
uint32_t skeylen = 0;
int retry_cnt = 0;
vm_map_copy_t itoken = NULL;
- byte_buffer otoken = NULL;
+ gssd_byte_buffer otoken = NULL;
mach_msg_type_number_t otokenlen;
int error = 0;
- char uprinc[1];
+ uint8_t *principal = NULL;
+ uint32_t plen = 0;
+ int32_t nt = GSSD_STRING_NAME;
+ vm_map_copy_t pname = NULL;
+ vm_map_copy_t svcname = NULL;
+ char display_name[MAX_DISPLAY_STR] = "";
uint32_t ret_flags;
-
+ uint32_t nfs_1des = (cp->gss_clnt_gssd_flags & GSSD_NFS_1DES);
+ struct nfsmount *nmp;
+ uint32_t major = cp->gss_clnt_major, minor = cp->gss_clnt_minor;
+
/*
* NFS currently only supports default principals or
- * principals based on the uid of the caller.
- *
- * N.B. Note we define a one character array for the principal
- * so that we can hold an empty string required by mach, since
- * the kernel is being compiled with -Wwrite-strings.
+ * principals based on the uid of the caller, unless
+ * the principal to use for the mounting cred was specified
+ * in the mount argmuments. If the realm to use was specified
+ * then will send that up as the principal since the realm is
+ * preceed by an "@" gssd that will try and select the default
+ * principal for that realm.
*/
- uprinc[0] = '\0';
- if (cp->gss_clnt_mport == NULL) {
- kr = task_get_gssd_port(get_threadtask(req->r_thread), &cp->gss_clnt_mport);
- if (kr != KERN_SUCCESS) {
- printf("nfs_gss_clnt_gssd_upcall: can't get gssd port, status %x (%d)\n", kr, kr);
- goto out;
- }
- if (!IPC_PORT_VALID(cp->gss_clnt_mport)) {
- printf("nfs_gss_clnt_gssd_upcall: gssd port not valid\n");
- cp->gss_clnt_mport = NULL;
+
+ nmp = req->r_nmp;
+ if (nmp == NULL || vfs_isforce(nmp->nm_mountp) || (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)))
+ return (ENXIO);
+
+ if (cp->gss_clnt_principal && cp->gss_clnt_prinlen) {
+ principal = cp->gss_clnt_principal;
+ plen = cp->gss_clnt_prinlen;
+ nt = cp->gss_clnt_prinnt;
+ } else if (nmp->nm_principal && IS_VALID_CRED(nmp->nm_mcred) && req->r_cred == nmp->nm_mcred) {
+ plen = (uint32_t)strlen(nmp->nm_principal);
+ MALLOC(principal, uint8_t *, plen, M_TEMP, M_WAITOK | M_ZERO);
+ if (principal == NULL)
+ return (ENOMEM);
+ bcopy(nmp->nm_principal, principal, plen);
+ cp->gss_clnt_prinnt = nt = GSSD_USER;
+ }
+ else if (nmp->nm_realm) {
+ plen = (uint32_t)strlen(nmp->nm_realm);
+ principal = (uint8_t *)nmp->nm_realm;
+ nt = GSSD_USER;
+ }
+
+ if (!IPC_PORT_VALID(cp->gss_clnt_mport)) {
+ cp->gss_clnt_mport = nfs_gss_clnt_get_upcall_port(req->r_cred);
+ if (cp->gss_clnt_mport == IPC_PORT_NULL)
goto out;
- }
}
- if (cp->gss_clnt_tokenlen > 0)
+ if (plen)
+ nfs_gss_mach_alloc_buffer(principal, plen, &pname);
+ if (cp->gss_clnt_svcnamlen)
+ nfs_gss_mach_alloc_buffer(cp->gss_clnt_svcname, cp->gss_clnt_svcnamlen, &svcname);
+ if (cp->gss_clnt_tokenlen)
nfs_gss_mach_alloc_buffer(cp->gss_clnt_token, cp->gss_clnt_tokenlen, &itoken);
retry:
- kr = mach_gss_init_sec_context(
+ kr = mach_gss_init_sec_context_v2(
cp->gss_clnt_mport,
- KRB5_MECH,
- (byte_buffer) itoken, (mach_msg_type_number_t) cp->gss_clnt_tokenlen,
- cp->gss_clnt_uid,
- uprinc,
- cp->gss_clnt_svcname,
+ GSSD_KRB5_MECH,
+ (gssd_byte_buffer) itoken, (mach_msg_type_number_t) cp->gss_clnt_tokenlen,
+ kauth_cred_getuid(cp->gss_clnt_cred),
+ nt,
+ (gssd_byte_buffer)pname, (mach_msg_type_number_t) plen,
+ cp->gss_clnt_svcnt,
+ (gssd_byte_buffer)svcname, (mach_msg_type_number_t) cp->gss_clnt_svcnamlen,
GSSD_MUTUAL_FLAG,
- cp->gss_clnt_gssd_flags,
+ &cp->gss_clnt_gssd_flags,
&cp->gss_clnt_context,
&cp->gss_clnt_cred_handle,
&ret_flags,
&okey, (mach_msg_type_number_t *) &skeylen,
&otoken, &otokenlen,
+ cp->gss_clnt_display ? NULL : display_name,
&cp->gss_clnt_major,
&cp->gss_clnt_minor);
+ /* Should be cleared and set in gssd ? */
cp->gss_clnt_gssd_flags &= ~GSSD_RESTART;
-
+ cp->gss_clnt_gssd_flags |= nfs_1des;
+
if (kr != KERN_SUCCESS) {
printf("nfs_gss_clnt_gssd_upcall: mach_gss_init_sec_context failed: %x (%d)\n", kr, kr);
if (kr == MIG_SERVER_DIED && cp->gss_clnt_cred_handle == 0 &&
- retry_cnt++ < NFS_GSS_MACH_MAX_RETRIES) {
+ retry_cnt++ < NFS_GSS_MACH_MAX_RETRIES &&
+ !vfs_isforce(nmp->nm_mountp) && (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) == 0) {
+ if (plen)
+ nfs_gss_mach_alloc_buffer(principal, plen, &pname);
+ if (cp->gss_clnt_svcnamlen)
+ nfs_gss_mach_alloc_buffer(cp->gss_clnt_svcname, cp->gss_clnt_svcnamlen, &svcname);
if (cp->gss_clnt_tokenlen > 0)
nfs_gss_mach_alloc_buffer(cp->gss_clnt_token, cp->gss_clnt_tokenlen, &itoken);
goto retry;
}
- task_release_special_port(cp->gss_clnt_mport);
- cp->gss_clnt_mport = NULL;
- goto out;
+
+ host_release_special_port(cp->gss_clnt_mport);
+ cp->gss_clnt_mport = IPC_PORT_NULL;
+ goto out;
+ }
+
+ if (cp->gss_clnt_display == NULL && *display_name != '\0') {
+ int dlen = strnlen(display_name, MAX_DISPLAY_STR) + 1; /* Add extra byte to include '\0' */
+
+ if (dlen < MAX_DISPLAY_STR) {
+ MALLOC(cp->gss_clnt_display, char *, dlen, M_TEMP, M_WAITOK);
+ if (cp->gss_clnt_display == NULL)
+ goto skip;
+ bcopy(display_name, cp->gss_clnt_display, dlen);
+ } else {
+ goto skip;
+ }
}
-
+skip:
/*
* Make sure any unusual errors are expanded and logged by gssd
+ *
+ * XXXX, we need to rethink this and just have gssd return a string for the major and minor codes.
*/
if (cp->gss_clnt_major != GSS_S_COMPLETE &&
cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) {
- char who[] = "client";
- char unknown[] = "<unknown>";
-
- (void) mach_gss_log_error(
- cp->gss_clnt_mport,
- !req->r_nmp ? unknown :
- vfs_statfs(req->r_nmp->nm_mountp)->f_mntfromname,
- cp->gss_clnt_uid,
- who,
- cp->gss_clnt_major,
- cp->gss_clnt_minor);
+ nfs_gss_clnt_log_error(req, cp, major, minor);
}
if (skeylen > 0) {
goto out;
}
error = nfs_gss_mach_vmcopyout((vm_map_copy_t) okey, skeylen,
- cp->gss_clnt_kinfo.skey);
+ cp->gss_clnt_kinfo->skey);
if (error) {
vm_map_copy_discard((vm_map_copy_t) otoken);
goto out;
}
- error = gss_key_init(&cp->gss_clnt_kinfo, skeylen);
+ error = gss_key_init(cp->gss_clnt_kinfo, skeylen);
if (error)
goto out;
}
{
struct nfsmount *nmp = req->r_nmp;
struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx;
+ int on_neg_cache = 0;
+ int neg_cache = 0;
+ int destroy = 0;
+ struct timeval now;
+ char CTXBUF[NFS_CTXBUFSZ];
if (cp == NULL)
return;
req->r_gss_ctx = NULL;
lck_mtx_lock(cp->gss_clnt_mtx);
- if (--cp->gss_clnt_refcnt == 0
- && cp->gss_clnt_flags & GSS_CTX_INVAL) {
- lck_mtx_unlock(cp->gss_clnt_mtx);
-
- if (nmp)
+ if (--cp->gss_clnt_refcnt < 0)
+ panic("Over release of gss context!\n");
+
+ if (cp->gss_clnt_refcnt == 0) {
+ if ((cp->gss_clnt_flags & GSS_CTX_INVAL) &&
+ cp->gss_clnt_kinfo) {
+ FREE(cp->gss_clnt_kinfo, M_TEMP);
+ cp->gss_clnt_kinfo = NULL;
+ }
+ if (cp->gss_clnt_flags & GSS_CTX_DESTROY) {
+ destroy = 1;
+ if (cp->gss_clnt_flags & GSS_CTX_STICKY)
+ nfs_gss_clnt_mnt_rele(nmp);
+ if (cp->gss_clnt_nctime)
+ on_neg_cache = 1;
+ }
+ }
+ if (!destroy && cp->gss_clnt_nctime == 0 &&
+ (cp->gss_clnt_flags & GSS_CTX_INVAL)) {
+ microuptime(&now);
+ cp->gss_clnt_nctime = now.tv_sec;
+ neg_cache = 1;
+ }
+ lck_mtx_unlock(cp->gss_clnt_mtx);
+ if (destroy) {
+ NFS_GSS_DBG("Destroying context %s\n", NFS_GSS_CTX(req, cp));
+ if (nmp) {
+ lck_mtx_lock(&nmp->nm_lock);
+ if (cp->gss_clnt_entries.tqe_next != NFSNOLIST) {
+ TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries);
+ }
+ if (on_neg_cache) {
+ nmp->nm_ncentries--;
+ }
+ lck_mtx_unlock(&nmp->nm_lock);
+ }
+ nfs_gss_clnt_ctx_destroy(cp);
+ } else if (neg_cache) {
+ NFS_GSS_DBG("Entering context %s into negative cache\n", NFS_GSS_CTX(req, cp));
+ if (nmp) {
lck_mtx_lock(&nmp->nm_lock);
- nfs_gss_clnt_ctx_remove(nmp, cp);
- if (nmp)
+ nmp->nm_ncentries++;
+ nfs_gss_clnt_ctx_neg_cache_reap(nmp);
lck_mtx_unlock(&nmp->nm_lock);
+ }
+ }
+ NFS_GSS_CLNT_CTX_DUMP(nmp);
+}
- return;
+/*
+ * Try and reap any old negative cache entries.
+ * cache queue.
+ */
+void
+nfs_gss_clnt_ctx_neg_cache_reap(struct nfsmount *nmp)
+{
+ struct nfs_gss_clnt_ctx *cp, *tcp;
+ struct timeval now;
+ int reaped = 0;
+
+ NFS_GSS_DBG("Reaping contexts ncentries = %d\n", nmp->nm_ncentries);
+ /* Try and reap old, unreferenced, expired contexts */
+
+ TAILQ_FOREACH_SAFE(cp, &nmp->nm_gsscl, gss_clnt_entries, tcp) {
+ int destroy = 0;
+
+ /* Don't reap STICKY contexts */
+ if ((cp->gss_clnt_flags & GSS_CTX_STICKY) ||
+ !(cp->gss_clnt_flags & GSS_CTX_INVAL))
+ continue;
+ /* Keep up to GSS_MAX_NEG_CACHE_ENTRIES */
+ if (nmp->nm_ncentries <= GSS_MAX_NEG_CACHE_ENTRIES)
+ break;
+ /* Contexts too young */
+ if (cp->gss_clnt_nctime + GSS_NEG_CACHE_TO >= now.tv_sec)
+ continue;
+ /* Not referenced, remove it. */
+ lck_mtx_lock(cp->gss_clnt_mtx);
+ if (cp->gss_clnt_refcnt == 0) {
+ cp->gss_clnt_flags |= GSS_CTX_DESTROY;
+ destroy = 1;
+ }
+ lck_mtx_unlock(cp->gss_clnt_mtx);
+ if (destroy) {
+ TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries);
+ nmp->nm_ncentries++;
+ reaped++;
+ nfs_gss_clnt_ctx_destroy(cp);
+ }
}
- lck_mtx_unlock(cp->gss_clnt_mtx);
+ NFS_GSS_DBG("Reaped %d contexts ncentries = %d\n", reaped, nmp->nm_ncentries);
}
/*
- * Remove a context
+ * Clean a context to be cached
*/
static void
-nfs_gss_clnt_ctx_remove(struct nfsmount *nmp, struct nfs_gss_clnt_ctx *cp)
+nfs_gss_clnt_ctx_clean(struct nfs_gss_clnt_ctx *cp)
{
+ /* Preserve gss_clnt_mtx */
+ assert(cp->gss_clnt_thread == NULL); /* Will be set to this thread */
+ /* gss_clnt_entries we should not be on any list at this point */
+ cp->gss_clnt_flags = 0;
+ /* gss_clnt_refcnt should be zero */
+ assert(cp->gss_clnt_refcnt == 0);
/*
- * If dequeueing, assume nmp->nm_lock is held
+ * We are who we are preserve:
+ * gss_clnt_cred
+ * gss_clnt_principal
+ * gss_clnt_prinlen
+ * gss_clnt_prinnt
+ * gss_clnt_desplay
*/
- if (nmp != NULL)
- TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries);
-
- if (cp->gss_clnt_mport)
- task_release_special_port(cp->gss_clnt_mport);
- if (cp->gss_clnt_mtx)
- lck_mtx_destroy(cp->gss_clnt_mtx, nfs_gss_clnt_grp);
- if (cp->gss_clnt_handle)
+ /* gss_clnt_proc will be set in nfs_gss_clnt_ctx_init */
+ cp->gss_clnt_seqnum = 0;
+ /* Preserve gss_clnt_service, we're not changing flavors */
+ if (cp->gss_clnt_handle) {
FREE(cp->gss_clnt_handle, M_TEMP);
- if (cp->gss_clnt_seqbits)
+ cp->gss_clnt_handle = NULL;
+ }
+ cp->gss_clnt_handle_len = 0;
+ cp->gss_clnt_nctime = 0;
+ cp->gss_clnt_seqwin = 0;
+ if (cp->gss_clnt_seqbits) {
FREE(cp->gss_clnt_seqbits, M_TEMP);
- if (cp->gss_clnt_token)
- FREE(cp->gss_clnt_token, M_TEMP);
- if (cp->gss_clnt_svcname)
+ cp->gss_clnt_seqbits = NULL;
+ }
+ /* Preserve gss_clnt_mport. Still talking to the same gssd */
+ if (cp->gss_clnt_verf) {
+ FREE(cp->gss_clnt_verf, M_TEMP);
+ cp->gss_clnt_verf = NULL;
+ }
+ /* Service name might change on failover, so reset it */
+ if (cp->gss_clnt_svcname) {
FREE(cp->gss_clnt_svcname, M_TEMP);
+ cp->gss_clnt_svcname = NULL;
+ cp->gss_clnt_svcnt = 0;
+ }
+ cp->gss_clnt_svcnamlen = 0;
+ cp->gss_clnt_cred_handle = 0;
+ cp->gss_clnt_context = 0;
+ if (cp->gss_clnt_token) {
+ FREE(cp->gss_clnt_token, M_TEMP);
+ cp->gss_clnt_token = NULL;
+ }
+ cp->gss_clnt_tokenlen = 0;
+ if (cp->gss_clnt_kinfo)
+ bzero(cp->gss_clnt_kinfo, sizeof(gss_key_info));
+ /*
+ * Preserve:
+ * gss_clnt_gssd_flags
+ * gss_clnt_major
+ * gss_clnt_minor
+ * gss_clnt_ptime
+ */
+}
+
+/*
+ * Copy a source context to a new context. This is used to create a new context
+ * with the identity of the old context for renewal. The old context is invalid
+ * at this point but may have reference still to it, so it is not safe to use that
+ * context.
+ */
+static int
+nfs_gss_clnt_ctx_copy(struct nfs_gss_clnt_ctx *scp, struct nfs_gss_clnt_ctx **dcpp, gss_key_info *ki)
+{
+ struct nfs_gss_clnt_ctx *dcp;
+
+ *dcpp = (struct nfs_gss_clnt_ctx *)NULL;
+ MALLOC(dcp, struct nfs_gss_clnt_ctx *, sizeof (struct nfs_gss_clnt_ctx), M_TEMP, M_WAITOK);
+ if (dcp == NULL)
+ return (ENOMEM);
+ bzero(dcp, sizeof (struct nfs_gss_clnt_ctx));
+ if (ki == NULL) {
+ MALLOC(dcp->gss_clnt_kinfo, gss_key_info *, sizeof (gss_key_info), M_TEMP, M_WAITOK);
+ if (dcp->gss_clnt_kinfo == NULL) {
+ FREE(dcp, M_TEMP);
+ return (ENOMEM);
+ }
+ } else {
+ dcp->gss_clnt_kinfo = ki;
+ }
+ bzero(dcp->gss_clnt_kinfo, sizeof (gss_key_info));
+ dcp->gss_clnt_mtx = lck_mtx_alloc_init(nfs_gss_clnt_grp, LCK_ATTR_NULL);
+ dcp->gss_clnt_cred = scp->gss_clnt_cred;
+ kauth_cred_ref(dcp->gss_clnt_cred);
+ dcp->gss_clnt_prinlen = scp->gss_clnt_prinlen;
+ dcp->gss_clnt_prinnt = scp->gss_clnt_prinnt;
+ if (scp->gss_clnt_principal) {
+ MALLOC(dcp->gss_clnt_principal, uint8_t *, dcp->gss_clnt_prinlen, M_TEMP, M_WAITOK | M_ZERO);
+ if (dcp->gss_clnt_principal == NULL) {
+ FREE(dcp->gss_clnt_kinfo, M_TEMP);
+ FREE(dcp, M_TEMP);
+ return (ENOMEM);
+ }
+ bcopy(scp->gss_clnt_principal, dcp->gss_clnt_principal, dcp->gss_clnt_prinlen);
+ }
+ /* Note we don't preserve the display name, that will be set by a successful up call */
+ dcp->gss_clnt_service = scp->gss_clnt_service;
+ dcp->gss_clnt_mport = host_copy_special_port(scp->gss_clnt_mport);
+ /* gss_clnt_kinfo allocated above */
+ dcp->gss_clnt_gssd_flags = scp->gss_clnt_gssd_flags;
+ dcp->gss_clnt_major = scp->gss_clnt_major;
+ dcp->gss_clnt_minor = scp->gss_clnt_minor;
+ dcp->gss_clnt_ptime = scp->gss_clnt_ptime;
+
+ *dcpp = dcp;
+
+ return (0);
+}
+
+/*
+ * Remove a context
+ */
+static void
+nfs_gss_clnt_ctx_destroy(struct nfs_gss_clnt_ctx *cp)
+{
+ NFS_GSS_DBG("Destroying context %d/%d\n",
+ kauth_cred_getasid(cp->gss_clnt_cred),
+ kauth_cred_getauid(cp->gss_clnt_cred));
+
+ host_release_special_port(cp->gss_clnt_mport);
+ cp->gss_clnt_mport = IPC_PORT_NULL;
+
+ if (cp->gss_clnt_mtx) {
+ lck_mtx_destroy(cp->gss_clnt_mtx, nfs_gss_clnt_grp);
+ cp->gss_clnt_mtx = (lck_mtx_t *)NULL;
+ }
+ if (IS_VALID_CRED(cp->gss_clnt_cred))
+ kauth_cred_unref(&cp->gss_clnt_cred);
+ cp->gss_clnt_entries.tqe_next = NFSNOLIST;
+ cp->gss_clnt_entries.tqe_prev = NFSNOLIST;
+ if (cp->gss_clnt_principal) {
+ FREE(cp->gss_clnt_principal, M_TEMP);
+ cp->gss_clnt_principal = NULL;
+ }
+ if (cp->gss_clnt_display) {
+ FREE(cp->gss_clnt_display, M_TEMP);
+ cp->gss_clnt_display = NULL;
+ }
+ if (cp->gss_clnt_kinfo) {
+ FREE(cp->gss_clnt_kinfo, M_TEMP);
+ cp->gss_clnt_kinfo = NULL;
+ }
+
+ nfs_gss_clnt_ctx_clean(cp);
+
FREE(cp, M_TEMP);
}
nfs_gss_clnt_ctx_renew(struct nfsreq *req)
{
struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx;
- struct nfsmount *nmp = req->r_nmp;
struct nfs_gss_clnt_ctx *ncp;
+ struct nfsmount *nmp;
int error = 0;
- uid_t saved_uid;
- mach_port_t saved_mport;
- int retrycnt = 0;
+ char CTXBUF[NFS_CTXBUFSZ];
if (cp == NULL)
return (0);
+ if (req->r_nmp == NULL)
+ return (ENXIO);
+ nmp = req->r_nmp;
+
lck_mtx_lock(cp->gss_clnt_mtx);
if (cp->gss_clnt_flags & GSS_CTX_INVAL) {
lck_mtx_unlock(cp->gss_clnt_mtx);
nfs_gss_clnt_ctx_unref(req);
return (0); // already being renewed
}
- saved_uid = cp->gss_clnt_uid;
- saved_mport = task_copy_special_port(cp->gss_clnt_mport);
- /* Remove the old context */
- cp->gss_clnt_flags |= GSS_CTX_INVAL;
+ cp->gss_clnt_flags |= (GSS_CTX_INVAL | GSS_CTX_DESTROY);
- /*
- * If there's a thread waiting
- * in the old context, wake it up.
- */
if (cp->gss_clnt_flags & (GSS_NEEDCTX | GSS_NEEDSEQ)) {
cp->gss_clnt_flags &= ~GSS_NEEDSEQ;
wakeup(cp);
}
lck_mtx_unlock(cp->gss_clnt_mtx);
+ error = nfs_gss_clnt_ctx_copy(cp, &ncp, NULL);
+ NFS_GSS_DBG("Renewing context %s\n", NFS_GSS_CTX(req, ncp));
+ nfs_gss_clnt_ctx_unref(req);
+ if (error)
+ return (error);
+
+ lck_mtx_lock(&nmp->nm_lock);
/*
- * Create a new context
+ * Note we don't bother taking the new context mutex as we're
+ * not findable at the moment.
*/
- MALLOC(ncp, struct nfs_gss_clnt_ctx *, sizeof(*ncp),
- M_TEMP, M_WAITOK|M_ZERO);
- if (ncp == NULL) {
- error = ENOMEM;
- goto out;
- }
-
- ncp->gss_clnt_uid = saved_uid;
- ncp->gss_clnt_mport = task_copy_special_port(saved_mport); // re-use the gssd port
- ncp->gss_clnt_mtx = lck_mtx_alloc_init(nfs_gss_clnt_grp, LCK_ATTR_NULL);
ncp->gss_clnt_thread = current_thread();
- lck_mtx_lock(&nmp->nm_lock);
- TAILQ_INSERT_TAIL(&nmp->nm_gsscl, ncp, gss_clnt_entries);
- lck_mtx_unlock(&nmp->nm_lock);
-
- /* Adjust reference counts to new and old context */
- nfs_gss_clnt_ctx_unref(req);
nfs_gss_clnt_ctx_ref(req, ncp);
+ TAILQ_INSERT_HEAD(&nmp->nm_gsscl, ncp, gss_clnt_entries);
+ lck_mtx_unlock(&nmp->nm_lock);
-retry:
- error = nfs_gss_clnt_ctx_init(req, ncp); // Initialize new context
- if (error == ENEEDAUTH) {
- error = nfs_gss_clnt_ctx_delay(req, &retrycnt);
- if (!error)
- goto retry;
- }
-out:
- task_release_special_port(saved_mport);
+ error = nfs_gss_clnt_ctx_init_retry(req, ncp); // Initialize new context
if (error)
nfs_gss_clnt_ctx_unref(req);
return (error);
}
+
/*
* Destroy all the contexts associated with a mount.
* The contexts are also destroyed by the server.
*/
void
-nfs_gss_clnt_ctx_unmount(struct nfsmount *nmp, int mntflags)
+nfs_gss_clnt_ctx_unmount(struct nfsmount *nmp)
{
struct nfs_gss_clnt_ctx *cp;
- struct ucred temp_cred;
- kauth_cred_t cred;
struct nfsm_chain nmreq, nmrep;
int error, status;
struct nfsreq req;
-
- bzero((caddr_t) &temp_cred, sizeof(temp_cred));
- temp_cred.cr_ngroups = 1;
req.r_nmp = nmp;
- for (;;) {
- lck_mtx_lock(&nmp->nm_lock);
- cp = TAILQ_FIRST(&nmp->nm_gsscl);
- lck_mtx_unlock(&nmp->nm_lock);
- if (cp == NULL)
- break;
+ if (!nmp)
+ return;
- nfs_gss_clnt_ctx_ref(&req, cp);
+ lck_mtx_lock(&nmp->nm_lock);
+ while((cp = TAILQ_FIRST(&nmp->nm_gsscl))) {
+ TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries);
+ cp->gss_clnt_entries.tqe_next = NFSNOLIST;
+ lck_mtx_lock(cp->gss_clnt_mtx);
+ if (cp->gss_clnt_flags & GSS_CTX_DESTROY) {
+ lck_mtx_unlock(cp->gss_clnt_mtx);
+ continue;
+ }
+ cp->gss_clnt_refcnt++;
+ lck_mtx_unlock(cp->gss_clnt_mtx);
+ req.r_gss_ctx = cp;
+
+ lck_mtx_unlock(&nmp->nm_lock);
/*
* Tell the server to destroy its context.
- * But don't bother if it's a forced unmount
- * or if it's a dummy sec=sys context.
+ * But don't bother if it's a forced unmount.
*/
- if (!(mntflags & MNT_FORCE) && cp->gss_clnt_service != RPCSEC_GSS_SVC_SYS) {
- temp_cred.cr_uid = cp->gss_clnt_uid;
- cred = kauth_cred_create(&temp_cred);
+ if (!nfs_mount_gone(nmp) &&
+ (cp->gss_clnt_flags & (GSS_CTX_INVAL | GSS_CTX_DESTROY | GSS_CTX_COMPLETE)) == GSS_CTX_COMPLETE) {
cp->gss_clnt_proc = RPCSEC_GSS_DESTROY;
error = 0;
nfsm_chain_build_done(error, &nmreq);
if (!error)
nfs_request_gss(nmp->nm_mountp, &nmreq,
- current_thread(), cred, 0, cp, &nmrep, &status);
+ current_thread(), cp->gss_clnt_cred, 0, cp, &nmrep, &status);
nfsm_chain_cleanup(&nmreq);
nfsm_chain_cleanup(&nmrep);
- kauth_cred_unref(&cred);
}
/*
* the reference to remove it if its
* refcount is zero.
*/
- cp->gss_clnt_flags |= GSS_CTX_INVAL;
+ lck_mtx_lock(cp->gss_clnt_mtx);
+ cp->gss_clnt_flags |= (GSS_CTX_INVAL | GSS_CTX_DESTROY);
+ lck_mtx_unlock(cp->gss_clnt_mtx);
nfs_gss_clnt_ctx_unref(&req);
+ lck_mtx_lock(&nmp->nm_lock);
}
+ lck_mtx_unlock(&nmp->nm_lock);
+ assert(TAILQ_EMPTY(&nmp->nm_gsscl));
}
+
/*
- * If we get a failure in trying to establish a context we need to wait a
- * little while to see if the server is feeling better. In our case this is
- * probably a failure in directory services not coming up in a timely fashion.
- * This routine sort of mimics receiving a jukebox error.
+ * Removes a mounts context for a credential
*/
-static int
-nfs_gss_clnt_ctx_delay(struct nfsreq *req, int *retry)
+int
+nfs_gss_clnt_ctx_remove(struct nfsmount *nmp, kauth_cred_t cred)
{
- int timeo = (1 << *retry) * NFS_TRYLATERDEL;
- int error = 0;
- struct nfsmount *nmp = req->r_nmp;
- struct timeval now;
- time_t waituntil;
+ struct nfs_gss_clnt_ctx *cp;
+ struct nfsreq req;
- if (!nmp)
- return (ENXIO);
- if ((nmp->nm_flag & NFSMNT_SOFT) && *retry > nmp->nm_retry)
- return (ETIMEDOUT);
- if (timeo > 60)
- timeo = 60;
+ req.r_nmp = nmp;
- microuptime(&now);
- waituntil = now.tv_sec + timeo;
- while (now.tv_sec < waituntil) {
- tsleep(&lbolt, PSOCK, "nfs_gss_clnt_ctx_delay", 0);
- error = nfs_sigintr(req->r_nmp, req, current_thread(), 0);
- if (error)
- break;
- microuptime(&now);
+ NFS_GSS_DBG("Enter\n");
+ NFS_GSS_CLNT_CTX_DUMP(nmp);
+ lck_mtx_lock(&nmp->nm_lock);
+ TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) {
+ lck_mtx_lock(cp->gss_clnt_mtx);
+ if (nfs_gss_clnt_ctx_cred_match(cp->gss_clnt_cred, cred)) {
+ if (cp->gss_clnt_flags & GSS_CTX_DESTROY) {
+ NFS_GSS_DBG("Found destroyed context %d/%d. refcnt = %d continuing\n",
+ kauth_cred_getasid(cp->gss_clnt_cred),
+ kauth_cred_getauid(cp->gss_clnt_cred),
+ cp->gss_clnt_refcnt);
+ lck_mtx_unlock(cp->gss_clnt_mtx);
+ continue;
+ }
+ cp->gss_clnt_refcnt++;
+ cp->gss_clnt_flags |= (GSS_CTX_INVAL | GSS_CTX_DESTROY);
+ lck_mtx_unlock(cp->gss_clnt_mtx);
+ req.r_gss_ctx = cp;
+ lck_mtx_unlock(&nmp->nm_lock);
+ /*
+ * Drop the reference to remove it if its
+ * refcount is zero.
+ */
+ NFS_GSS_DBG("Removed context %d/%d refcnt = %d\n",
+ kauth_cred_getasid(cp->gss_clnt_cred),
+ kauth_cred_getuid(cp->gss_clnt_cred),
+ cp->gss_clnt_refcnt);
+ nfs_gss_clnt_ctx_unref(&req);
+ return (0);
+ }
+ lck_mtx_unlock(cp->gss_clnt_mtx);
}
- *retry += 1;
+
+ lck_mtx_unlock(&nmp->nm_lock);
+
+ NFS_GSS_DBG("Returning ENOENT\n");
+ return (ENOENT);
+}
+
+/*
+ * Sets a mounts principal for a session associated with cred.
+ */
+int
+nfs_gss_clnt_ctx_set_principal(struct nfsmount *nmp, vfs_context_t ctx,
+ uint8_t *principal, uint32_t princlen, uint32_t nametype)
+
+{
+ struct nfsreq req;
+ int error;
+
+ NFS_GSS_DBG("Enter:\n");
+
+ bzero(&req, sizeof(struct nfsreq));
+ req.r_nmp = nmp;
+ req.r_gss_ctx = NULL;
+ req.r_auth = nmp->nm_auth;
+ req.r_thread = vfs_context_thread(ctx);
+ req.r_cred = vfs_context_ucred(ctx);
+
+ error = nfs_gss_clnt_ctx_find_principal(&req, principal, princlen, nametype);
+ NFS_GSS_DBG("nfs_gss_clnt_ctx_find_principal returned %d\n", error);
+ /*
+ * We don't care about auth errors. Those would indicate that the context is in the
+ * neagative cache and if and when the user has credentials for the principal
+ * we should be good to go in that we will select those credentials for this principal.
+ */
+ if (error == EACCES || error == EAUTH || error == ENEEDAUTH)
+ error = 0;
+
+ /* We're done with this request */
+ nfs_gss_clnt_ctx_unref(&req);
return (error);
}
+/*
+ * Gets a mounts principal from a session associated with cred
+ */
+int
+nfs_gss_clnt_ctx_get_principal(struct nfsmount *nmp, vfs_context_t ctx,
+ struct user_nfs_gss_principal *p)
+{
+ struct nfsreq req;
+ int error = 0;
+ struct nfs_gss_clnt_ctx *cp;
+ kauth_cred_t cred = vfs_context_ucred(ctx);
+ const char *princ;
+ char CTXBUF[NFS_CTXBUFSZ];
+
+ req.r_nmp = nmp;
+ lck_mtx_lock(&nmp->nm_lock);
+ TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) {
+ lck_mtx_lock(cp->gss_clnt_mtx);
+ if (cp->gss_clnt_flags & GSS_CTX_DESTROY) {
+ NFS_GSS_DBG("Found destroyed context %s refcnt = %d continuing\n",
+ NFS_GSS_CTX(&req, cp),
+ cp->gss_clnt_refcnt);
+ lck_mtx_unlock(cp->gss_clnt_mtx);
+ continue;
+ }
+ if (nfs_gss_clnt_ctx_cred_match(cp->gss_clnt_cred, cred)) {
+ cp->gss_clnt_refcnt++;
+ lck_mtx_unlock(cp->gss_clnt_mtx);
+ goto out;
+ }
+ lck_mtx_unlock(cp->gss_clnt_mtx);
+ }
+
+out:
+ if (cp == NULL) {
+ lck_mtx_unlock(&nmp->nm_lock);
+ p->princlen = 0;
+ p->principal = USER_ADDR_NULL;
+ p->nametype = GSSD_STRING_NAME;
+ p->flags |= NFS_IOC_NO_CRED_FLAG;
+ NFS_GSS_DBG("No context found for session %d by uid %d\n",
+ kauth_cred_getasid(cred), kauth_cred_getuid(cred));
+ return (0);
+ }
+
+ princ = cp->gss_clnt_principal ? (char *)cp->gss_clnt_principal : cp->gss_clnt_display;
+ p->princlen = cp->gss_clnt_principal ? cp->gss_clnt_prinlen :
+ (cp->gss_clnt_display ? strlen(cp->gss_clnt_display) : 0);
+ p->nametype = cp->gss_clnt_prinnt;
+ if (princ) {
+ char *pp;
+
+ MALLOC(pp, char *, p->princlen, M_TEMP, M_WAITOK);
+ if (pp) {
+ bcopy(princ, pp, p->princlen);
+ p->principal = CAST_USER_ADDR_T(pp);
+ }
+ else
+ error = ENOMEM;
+ }
+ lck_mtx_unlock(&nmp->nm_lock);
+ req.r_gss_ctx = cp;
+ NFS_GSS_DBG("Found context %s\n", NFS_GSS_CTX(&req, NULL));
+ nfs_gss_clnt_ctx_unref(&req);
+ return (error);
+}
#endif /* NFSCLIENT */
/*************
lck_mtx_lock(nfs_gss_svc_ctx_mutex);
- LIST_FOREACH(cp, head, gss_svc_entries)
+ LIST_FOREACH(cp, head, gss_svc_entries) {
if (cp->gss_svc_handle == handle) {
if (timenow > cp->gss_svc_incarnation + GSS_SVC_CTX_TTL) {
/*
*/
cp->gss_svc_handle = 0;
/*
- * Make sure though that we stay around for GSS_CTC_PEND seconds
+ * Make sure though that we stay around for GSS_CTX_PEND seconds
* for other threads that might be using the context.
*/
cp->gss_svc_incarnation = timenow;
+
cp = NULL;
+ break;
}
+ lck_mtx_lock(cp->gss_svc_mtx);
+ cp->gss_svc_refcnt++;
+ lck_mtx_unlock(cp->gss_svc_mtx);
break;
}
+ }
lck_mtx_unlock(nfs_gss_svc_ctx_mutex);
nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx *cp)
{
struct nfs_gss_svc_ctx_hashhead *head;
+ struct nfs_gss_svc_ctx *p;
+ lck_mtx_lock(nfs_gss_svc_ctx_mutex);
+
+ /*
+ * Give the client a random handle so that if we reboot
+ * it's unlikely the client will get a bad context match.
+ * Make sure it's not zero or already assigned.
+ */
+retry:
+ cp->gss_svc_handle = random();
+ if (cp->gss_svc_handle == 0)
+ goto retry;
head = &nfs_gss_svc_ctx_hashtbl[SVC_CTX_HASH(cp->gss_svc_handle)];
+ LIST_FOREACH(p, head, gss_svc_entries)
+ if (p->gss_svc_handle == cp->gss_svc_handle)
+ goto retry;
- lck_mtx_lock(nfs_gss_svc_ctx_mutex);
+ clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC,
+ &cp->gss_svc_incarnation);
LIST_INSERT_HEAD(head, cp, gss_svc_entries);
nfs_gss_ctx_count++;
nfs_gss_timer_on = 1;
nfs_interval_timer_start(nfs_gss_svc_ctx_timer_call,
- min(GSS_TIMER_PERIOD, max(GSS_CTX_TTL_MIN, GSS_SVC_CTX_TTL)) * MSECS_PER_SEC);
+ min(GSS_TIMER_PERIOD, max(GSS_CTX_TTL_MIN, nfsrv_gss_context_ttl)) * MSECS_PER_SEC);
}
lck_mtx_unlock(nfs_gss_svc_ctx_mutex);
void
nfs_gss_svc_ctx_timer(__unused void *param1, __unused void *param2)
{
- struct nfs_gss_svc_ctx_hashhead *head;
struct nfs_gss_svc_ctx *cp, *next;
uint64_t timenow;
int contexts = 0;
lck_mtx_lock(nfs_gss_svc_ctx_mutex);
clock_get_uptime(&timenow);
+ NFS_GSS_DBG("is running\n");
+
/*
* Scan all the hash chains
- * Assume nfs_gss_svc_ctx_mutex is held
*/
for (i = 0; i < SVC_CTX_HASHSZ; i++) {
/*
* For each hash chain, look for entries
* that haven't been used in a while.
*/
- head = &nfs_gss_svc_ctx_hashtbl[i];
- for (cp = LIST_FIRST(head); cp; cp = next) {
+ LIST_FOREACH_SAFE(cp, &nfs_gss_svc_ctx_hashtbl[i], gss_svc_entries, next) {
contexts++;
- next = LIST_NEXT(cp, gss_svc_entries);
- if (timenow > cp->gss_svc_incarnation +
- (cp->gss_svc_handle ? GSS_SVC_CTX_TTL : 0)) {
+ if (timenow > cp->gss_svc_incarnation +
+ (cp->gss_svc_handle ? GSS_SVC_CTX_TTL : 0)
+ && cp->gss_svc_refcnt == 0) {
/*
* A stale context - remove it
*/
LIST_REMOVE(cp, gss_svc_entries);
+ NFS_GSS_DBG("Removing contex for %d\n", cp->gss_svc_uid);
if (cp->gss_svc_seqbits)
FREE(cp->gss_svc_seqbits, M_TEMP);
lck_mtx_destroy(cp->gss_svc_mtx, nfs_gss_svc_grp);
nfs_gss_timer_on = nfs_gss_ctx_count > 0;
if (nfs_gss_timer_on)
nfs_interval_timer_start(nfs_gss_svc_ctx_timer_call,
- min(GSS_TIMER_PERIOD, max(GSS_CTX_TTL_MIN, GSS_SVC_CTX_TTL)) * MSECS_PER_SEC);
+ min(GSS_TIMER_PERIOD, max(GSS_CTX_TTL_MIN, nfsrv_gss_context_ttl)) * MSECS_PER_SEC);
lck_mtx_unlock(nfs_gss_svc_ctx_mutex);
}
error = ENOMEM;
goto nfsmout;
}
+ cp->gss_svc_mtx = lck_mtx_alloc_init(nfs_gss_svc_grp, LCK_ATTR_NULL);
+ cp->gss_svc_refcnt = 1;
} else {
/*
ki = &cp->gss_svc_kinfo;
if (proc == RPCSEC_GSS_DATA || proc == RPCSEC_GSS_DESTROY) {
- struct ucred temp_cred;
+ struct posix_cred temp_pcred;
if (cp->gss_svc_seqwin == 0) {
/*
*/
nfsm_chain_get_32(error, nmc, flavor);
nfsm_chain_get_32(error, nmc, verflen);
+ if (error)
+ goto nfsmout;
if (flavor != RPCSEC_GSS || verflen != KRB5_SZ_TOKEN(ki->hash_len))
error = NFSERR_AUTHERR | AUTH_BADVERF;
nfsm_chain_get_opaque(error, nmc, verflen, tokbuf);
/*
* Set up the user's cred
*/
- bzero(&temp_cred, sizeof(temp_cred));
- temp_cred.cr_uid = cp->gss_svc_uid;
- bcopy(cp->gss_svc_gids, temp_cred.cr_groups,
+ bzero(&temp_pcred, sizeof(temp_pcred));
+ temp_pcred.cr_uid = cp->gss_svc_uid;
+ bcopy(cp->gss_svc_gids, temp_pcred.cr_groups,
sizeof(gid_t) * cp->gss_svc_ngroups);
- temp_cred.cr_ngroups = cp->gss_svc_ngroups;
+ temp_pcred.cr_ngroups = cp->gss_svc_ngroups;
- nd->nd_cr = kauth_cred_create(&temp_cred);
+ nd->nd_cr = posix_cred_create(&temp_pcred);
if (nd->nd_cr == NULL) {
error = ENOMEM;
goto nfsmout;
nfsm_chain_get_32(error, nmc, verflen);
if (error || flavor != RPCAUTH_NULL || verflen > 0)
error = NFSERR_AUTHERR | RPCSEC_GSS_CREDPROBLEM;
- if (error)
+ if (error) {
+ if (proc == RPCSEC_GSS_INIT) {
+ lck_mtx_destroy(cp->gss_svc_mtx, nfs_gss_svc_grp);
+ FREE(cp, M_TEMP);
+ cp = NULL;
+ }
goto nfsmout;
+ }
}
nd->nd_gss_context = cp;
+ return 0;
nfsmout:
+ if (cp)
+ nfs_gss_svc_ctx_deref(cp);
return (error);
}
nfs_gss_svc_ctx_init(struct nfsrv_descript *nd, struct nfsrv_sock *slp, mbuf_t *mrepp)
{
struct nfs_gss_svc_ctx *cp = NULL;
- uint32_t handle = 0;
int error = 0;
int autherr = 0;
struct nfsm_chain *nmreq, nmrep;
switch (cp->gss_svc_proc) {
case RPCSEC_GSS_INIT:
- /*
- * Give the client a random handle so that
- * if we reboot it's unlikely the client
- * will get a bad context match.
- * Make sure it's not zero, or already assigned.
- */
- do {
- handle = random();
- } while (nfs_gss_svc_ctx_find(handle) != NULL || handle == 0);
- cp->gss_svc_handle = handle;
- cp->gss_svc_mtx = lck_mtx_alloc_init(nfs_gss_svc_grp, LCK_ATTR_NULL);
- clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC,
- &cp->gss_svc_incarnation);
-
nfs_gss_svc_ctx_insert(cp);
-
/* FALLTHRU */
case RPCSEC_GSS_CONTINUE_INIT:
kern_return_t kr;
mach_port_t mp;
int retry_cnt = 0;
- byte_buffer okey = NULL;
+ gssd_byte_buffer okey = NULL;
uint32_t skeylen = 0;
uint32_t ret_flags;
vm_map_copy_t itoken = NULL;
- byte_buffer otoken = NULL;
+ gssd_byte_buffer otoken = NULL;
mach_msg_type_number_t otokenlen;
int error = 0;
char svcname[] = "nfs";
- kr = task_get_gssd_port(get_threadtask(current_thread()), &mp);
+ kr = host_get_gssd_port(host_priv_self(), &mp);
if (kr != KERN_SUCCESS) {
printf("nfs_gss_svc_gssd_upcall: can't get gssd port, status %x (%d)\n", kr, kr);
goto out;
retry:
kr = mach_gss_accept_sec_context(
mp,
- (byte_buffer) itoken, (mach_msg_type_number_t) cp->gss_svc_tokenlen,
+ (gssd_byte_buffer) itoken, (mach_msg_type_number_t) cp->gss_svc_tokenlen,
svcname,
0,
&cp->gss_svc_context,
nfs_gss_mach_alloc_buffer(cp->gss_svc_token, cp->gss_svc_tokenlen, &itoken);
goto retry;
}
- task_release_special_port(mp);
+ host_release_special_port(mp);
goto out;
}
- task_release_special_port(mp);
+ host_release_special_port(mp);
if (skeylen > 0) {
if (skeylen != SKEYLEN && skeylen != SKEYLEN3) {
return (1);
}
+/*
+ * Drop a reference to a context
+ *
+ * Note that it's OK for the context to exist
+ * with a refcount of zero. The refcount isn't
+ * checked until we're about to reap an expired one.
+ */
+void
+nfs_gss_svc_ctx_deref(struct nfs_gss_svc_ctx *cp)
+{
+ lck_mtx_lock(cp->gss_svc_mtx);
+ if (cp->gss_svc_refcnt > 0)
+ cp->gss_svc_refcnt--;
+ else
+ printf("nfs_gss_ctx_deref: zero refcount\n");
+ lck_mtx_unlock(cp->gss_svc_mtx);
+}
+
/*
* Called at NFS server shutdown - destroy all contexts
*/
*/
/*
- * Release a task special port that was obtained by task_get_special_port
- * or one of its macros (task_get_gssd_port in this case).
+ * Release a host special port that was obtained by host_get_special_port
+ * or one of its macros (host_get_gssd_port in this case).
* This really should be in a public kpi.
*/
extern ipc_port_t ipc_port_copy_send(ipc_port_t);
static void
-task_release_special_port(mach_port_t mp)
+host_release_special_port(mach_port_t mp)
{
-
- ipc_port_release_send(mp);
+ if (IPC_PORT_VALID(mp))
+ ipc_port_release_send(mp);
}
static mach_port_t
-task_copy_special_port(mach_port_t mp)
+host_copy_special_port(mach_port_t mp)
{
- return ipc_port_copy_send(mp);
+ return (ipc_port_copy_send(mp));
}
/*
if (buf == NULL || buflen == 0)
return;
- tbuflen = round_page(buflen);
- kr = vm_allocate(ipc_kernel_map, &kmem_buf, tbuflen, VM_FLAGS_ANYWHERE);
+ tbuflen = vm_map_round_page(buflen,
+ vm_map_page_mask(ipc_kernel_map));
+ kr = vm_allocate(ipc_kernel_map, &kmem_buf, tbuflen, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_FILE));
if (kr != 0) {
printf("nfs_gss_mach_alloc_buffer: vm_allocate failed\n");
return;
}
- kr = vm_map_wire(ipc_kernel_map, vm_map_trunc_page(kmem_buf),
- vm_map_round_page(kmem_buf + tbuflen),
- VM_PROT_READ|VM_PROT_WRITE, FALSE);
+ kr = vm_map_wire(ipc_kernel_map,
+ vm_map_trunc_page(kmem_buf,
+ vm_map_page_mask(ipc_kernel_map)),
+ vm_map_round_page(kmem_buf + tbuflen,
+ vm_map_page_mask(ipc_kernel_map)),
+ VM_PROT_READ|VM_PROT_WRITE|VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_FILE), FALSE);
if (kr != 0) {
printf("nfs_gss_mach_alloc_buffer: vm_map_wire failed\n");
return;
// Shouldn't need to bzero below since vm_allocate returns zeroed pages
// bzero(kmem_buf + buflen, tbuflen - buflen);
- kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(kmem_buf),
- vm_map_round_page(kmem_buf + tbuflen), FALSE);
+ kr = vm_map_unwire(ipc_kernel_map,
+ vm_map_trunc_page(kmem_buf,
+ vm_map_page_mask(ipc_kernel_map)),
+ vm_map_round_page(kmem_buf + tbuflen,
+ vm_map_page_mask(ipc_kernel_map)),
+ FALSE);
if (kr != 0) {
printf("nfs_gss_mach_alloc_buffer: vm_map_unwire failed\n");
return;
switch (ki->type) {
case NFS_GSS_1DES:
{
- des_key_schedule *sched = ((usage == KG_USAGE_SEAL) ?
+ des_cbc_key_schedule *sched = ((usage == KG_USAGE_SEAL) ?
&ki->ks_u.des.gss_sched_Ke :
&ki->ks_u.des.gss_sched);
- des_cbc_encrypt(in, out, len, *sched, iv, retiv, encrypt);
+ des_cbc_encrypt(in, out, len, sched, iv, retiv, encrypt);
}
break;
case NFS_GSS_3DES:
- des3_cbc_encrypt(in, out, len, ki->ks_u.des3.gss_sched, iv, retiv, encrypt);
+ des3_cbc_encrypt(in, out, len, &ki->ks_u.des3.gss_sched, iv, retiv, encrypt);
break;
}
}
ki->type = NFS_GSS_1DES;
ki->hash_len = MD5_DESCBC_DIGEST_LENGTH;
ki->ks_u.des.key = (des_cblock *)ki->skey;
- rc = des_key_sched(ki->ks_u.des.key, ki->ks_u.des.gss_sched);
+ rc = des_cbc_key_sched(ki->ks_u.des.key, &ki->ks_u.des.gss_sched);
if (rc)
return (rc);
for (i = 0; i < ki->keybytes; i++)
k[0][i] = 0xf0 ^ (*ki->ks_u.des.key)[i];
- rc = des_key_sched(&k[0], ki->ks_u.des.gss_sched_Ke);
+ rc = des_cbc_key_sched(&k[0], &ki->ks_u.des.gss_sched_Ke);
break;
case 3*sizeof(des_cblock):
ki->type = NFS_GSS_3DES;
ki->ks_u.des3.key = (des_cblock (*)[3])ki->skey;
des3_derive_key(*ki->ks_u.des3.key, ki->ks_u.des3.ckey,
KEY_USAGE_DES3_SIGN, KEY_USAGE_LEN);
- rc = des3_key_sched(*ki->ks_u.des3.key, ki->ks_u.des3.gss_sched);
+ rc = des3_cbc_key_sched(*ki->ks_u.des3.key, &ki->ks_u.des3.gss_sched);
if (rc)
return (rc);
break;