/*
- * Copyright (c) 2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2007-2015 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <sys/ubc.h>
#include <sys/malloc.h>
#include <sys/kpi_mbuf.h>
+#include <sys/ucred.h>
#include <kern/host.h>
+#include <kern/task.h>
#include <libkern/libkern.h>
#include <mach/task.h>
-#include <mach/task_special_ports.h>
+#include <mach/host_special_ports.h>
#include <mach/host_priv.h>
#include <mach/thread_act.h>
#include <mach/mig_errors.h>
#include <nfs/xdr_subs.h>
#include <nfs/nfsm_subs.h>
#include <nfs/nfs_gss.h>
+#include "nfs_gss_crypto.h"
+#include <mach_assert.h>
+#include <kern/assert.h>
+
+#define ASSERT(EX) assert(EX)
#define NFS_GSS_MACH_MAX_RETRIES 3
+#define NFS_GSS_DBG(...) NFS_DBG(NFS_FAC_GSS, 7, ## __VA_ARGS__)
+#define NFS_GSS_ISDBG (NFS_DEBUG_FACILITY & NFS_FAC_GSS)
+
+typedef struct {
+ int type;
+ union {
+ MD5_DESCBC_CTX m_ctx;
+ HMAC_SHA1_DES3KD_CTX h_ctx;
+ };
+} GSS_DIGEST_CTX;
+
+#define MAX_DIGEST SHA_DIGEST_LENGTH
+#ifdef NFS_KERNEL_DEBUG
+#define HASHLEN(ki) (((ki)->hash_len > MAX_DIGEST) ? \
+ (panic("nfs_gss.c:%d ki->hash_len is invalid = %d\n", __LINE__, (ki)->hash_len), MAX_DIGEST) : (ki)->hash_len)
+#else
+#define HASHLEN(ki) (((ki)->hash_len > MAX_DIGEST) ? \
+ (printf("nfs_gss.c:%d ki->hash_len is invalid = %d\n", __LINE__, (ki)->hash_len), MAX_DIGEST) : (ki)->hash_len)
+#endif
+
#if NFSSERVER
u_long nfs_gss_svc_ctx_hash;
struct nfs_gss_svc_ctx_hashhead *nfs_gss_svc_ctx_hashtbl;
lck_mtx_t *nfs_gss_svc_ctx_mutex;
lck_grp_t *nfs_gss_svc_grp;
+uint32_t nfsrv_gss_context_ttl = GSS_CTX_EXPIRE;
+#define GSS_SVC_CTX_TTL ((uint64_t)max(2*GSS_CTX_PEND, nfsrv_gss_context_ttl) * NSEC_PER_SEC)
#endif /* NFSSERVER */
#if NFSCLIENT
lck_grp_t *nfs_gss_clnt_grp;
+int nfs_single_des;
#endif /* NFSCLIENT */
/*
* These octet strings are used to encode/decode ASN.1 tokens
* in the RPCSEC_GSS verifiers.
*/
-static u_char krb5_tokhead[] = { 0x60, 0x23 };
-static u_char krb5_mech[] = { 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x12, 0x01, 0x02, 0x02 };
+static u_char krb5_tokhead[] __attribute__((unused)) = { 0x60, 0x23 };
+ u_char krb5_mech[11] = { 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x12, 0x01, 0x02, 0x02 };
static u_char krb5_mic[] = { 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff };
+static u_char krb5_mic3[] = { 0x01, 0x01, 0x04, 0x00, 0xff, 0xff, 0xff, 0xff };
static u_char krb5_wrap[] = { 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff };
+static u_char krb5_wrap3[] = { 0x02, 0x01, 0x04, 0x00, 0x02, 0x00, 0xff, 0xff };
static u_char iv0[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; // DES MAC Initialization Vector
+#define ALG_MIC(ki) (((ki)->type == NFS_GSS_1DES) ? krb5_mic : krb5_mic3)
+#define ALG_WRAP(ki) (((ki)->type == NFS_GSS_1DES) ? krb5_wrap : krb5_wrap3)
+
/*
* The size of the Kerberos v5 ASN.1 token
* in the verifier.
#define KRB5_SZ_MECH sizeof(krb5_mech)
#define KRB5_SZ_ALG sizeof(krb5_mic) // 8 - same as krb5_wrap
#define KRB5_SZ_SEQ 8
-#define KRB5_SZ_CKSUM 8
#define KRB5_SZ_EXTRA 3 // a wrap token may be longer by up to this many octets
-#define KRB5_SZ_TOKEN (KRB5_SZ_TOKHEAD + KRB5_SZ_MECH + KRB5_SZ_ALG + KRB5_SZ_SEQ + KRB5_SZ_CKSUM)
-#define KRB5_SZ_TOKMAX (KRB5_SZ_TOKEN + KRB5_SZ_EXTRA)
+#define KRB5_SZ_TOKEN_NOSUM (KRB5_SZ_TOKHEAD + KRB5_SZ_MECH + KRB5_SZ_ALG + KRB5_SZ_SEQ)
+#define KRB5_SZ_TOKEN(cksumlen) ((cksumlen) + KRB5_SZ_TOKEN_NOSUM)
+#define KRB5_SZ_TOKMAX(cksumlen) (KRB5_SZ_TOKEN(cksumlen) + KRB5_SZ_EXTRA)
#if NFSCLIENT
static int nfs_gss_clnt_ctx_find(struct nfsreq *);
-static int nfs_gss_clnt_ctx_failover(struct nfsreq *);
static int nfs_gss_clnt_ctx_init(struct nfsreq *, struct nfs_gss_clnt_ctx *);
+static int nfs_gss_clnt_ctx_init_retry(struct nfsreq *, struct nfs_gss_clnt_ctx *);
static int nfs_gss_clnt_ctx_callserver(struct nfsreq *, struct nfs_gss_clnt_ctx *);
-static char *nfs_gss_clnt_svcname(struct nfsmount *);
+static uint8_t *nfs_gss_clnt_svcname(struct nfsmount *, gssd_nametype *, uint32_t *);
static int nfs_gss_clnt_gssd_upcall(struct nfsreq *, struct nfs_gss_clnt_ctx *);
-static void nfs_gss_clnt_ctx_remove(struct nfsmount *, struct nfs_gss_clnt_ctx *);
-static int nfs_gss_clnt_ctx_delay(struct nfsreq *, int *);
+void nfs_gss_clnt_ctx_neg_cache_reap(struct nfsmount *);
+static void nfs_gss_clnt_ctx_clean(struct nfs_gss_clnt_ctx *);
+static int nfs_gss_clnt_ctx_copy(struct nfs_gss_clnt_ctx *, struct nfs_gss_clnt_ctx **, gss_key_info *);
+static void nfs_gss_clnt_ctx_destroy(struct nfs_gss_clnt_ctx *);
+static void nfs_gss_clnt_log_error(struct nfsreq *, struct nfs_gss_clnt_ctx *, uint32_t, uint32_t);
#endif /* NFSCLIENT */
#if NFSSERVER
static int nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx *, uint32_t);
#endif /* NFSSERVER */
-static void task_release_special_port(mach_port_t);
-static mach_port_t task_copy_special_port(mach_port_t);
+static void host_release_special_port(mach_port_t);
+static mach_port_t host_copy_special_port(mach_port_t);
static void nfs_gss_mach_alloc_buffer(u_char *, uint32_t, vm_map_copy_t *);
static int nfs_gss_mach_vmcopyout(vm_map_copy_t, uint32_t, u_char *);
-static int nfs_gss_token_get(des_key_schedule, u_char *, u_char *, int, uint32_t *, u_char *);
-static int nfs_gss_token_put(des_key_schedule, u_char *, u_char *, int, int, u_char *);
+static int nfs_gss_token_get(gss_key_info *ki, u_char *, u_char *, int, uint32_t *, u_char *);
+static int nfs_gss_token_put(gss_key_info *ki, u_char *, u_char *, int, int, u_char *);
static int nfs_gss_der_length_size(int);
static void nfs_gss_der_length_put(u_char **, int);
static int nfs_gss_der_length_get(u_char **);
static int nfs_gss_mchain_length(mbuf_t);
static int nfs_gss_append_chain(struct nfsm_chain *, mbuf_t);
static void nfs_gss_nfsm_chain(struct nfsm_chain *, mbuf_t);
-static void nfs_gss_cksum_mchain(des_key_schedule, mbuf_t, u_char *, int, int, u_char *);
-static void nfs_gss_cksum_chain(des_key_schedule, struct nfsm_chain *, u_char *, int, int, u_char *);
-static void nfs_gss_cksum_rep(des_key_schedule, uint32_t, u_char *);
-static void nfs_gss_encrypt_mchain(u_char *, mbuf_t, int, int, int);
-static void nfs_gss_encrypt_chain(u_char *, struct nfsm_chain *, int, int, int);
-static DES_LONG des_cbc_cksum(des_cblock *, des_cblock *, long, des_key_schedule, des_cblock *);
-static void des_cbc_encrypt(des_cblock *, des_cblock *, long, des_key_schedule,
- des_cblock *, des_cblock *, int);
+static void nfs_gss_cksum_mchain(gss_key_info *, mbuf_t, u_char *, int, int, u_char *);
+static void nfs_gss_cksum_chain(gss_key_info *, struct nfsm_chain *, u_char *, int, int, u_char *);
+static void nfs_gss_cksum_rep(gss_key_info *, uint32_t, u_char *);
+static void nfs_gss_encrypt_mchain(gss_key_info *, mbuf_t, int, int, int);
+static void nfs_gss_encrypt_chain(gss_key_info *, struct nfsm_chain *, int, int, int);
+
+static void gss_digest_Init(GSS_DIGEST_CTX *, gss_key_info *);
+static void gss_digest_Update(GSS_DIGEST_CTX *, void *, size_t);
+static void gss_digest_Final(GSS_DIGEST_CTX *, void *);
+static void gss_des_crypt(gss_key_info *, des_cblock *, des_cblock *,
+ int32_t, des_cblock *, des_cblock *, int, int);
+static int gss_key_init(gss_key_info *, uint32_t);
#if NFSSERVER
thread_call_t nfs_gss_svc_ctx_timer_call;
*
* Note that the code allows superuser (uid == 0)
* to adopt the context of another user.
+ *
+ * We'll match on the audit session ids, since those
+ * processes will have acccess to the same credential cache.
+ */
+
+#define kauth_cred_getasid(cred) ((cred)->cr_audit.as_aia_p->ai_asid)
+#define kauth_cred_getauid(cred) ((cred)->cr_audit.as_aia_p->ai_auid)
+
+#define SAFE_CAST_INTTYPE( type, intval ) \
+ ( (type)(intval)/(sizeof(type) < sizeof(intval) ? 0 : 1) )
+
+uid_t
+nfs_cred_getasid2uid(kauth_cred_t cred)
+{
+ uid_t result = SAFE_CAST_INTTYPE(uid_t, kauth_cred_getasid(cred));
+ return (result);
+}
+
+/*
+ * Debugging
*/
+static void
+nfs_gss_clnt_ctx_dump(struct nfsmount *nmp)
+{
+ struct nfs_gss_clnt_ctx *cp;
+
+ lck_mtx_lock(&nmp->nm_lock);
+ NFS_GSS_DBG("Enter\n");
+ TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) {
+ lck_mtx_lock(cp->gss_clnt_mtx);
+ printf("context %d/%d: refcnt = %d, flags = %x\n",
+ kauth_cred_getasid(cp->gss_clnt_cred),
+ kauth_cred_getauid(cp->gss_clnt_cred),
+ cp->gss_clnt_refcnt, cp->gss_clnt_flags);
+ lck_mtx_unlock(cp->gss_clnt_mtx);
+ }
+ NFS_GSS_DBG("Exit\n");
+ lck_mtx_unlock(&nmp->nm_lock);
+}
+
+static char *
+nfs_gss_clnt_ctx_name(struct nfsmount *nmp, struct nfs_gss_clnt_ctx *cp, char *buf, int len)
+{
+ char *np;
+ int nlen;
+ const char *server = "";
+
+ if (nmp && nmp->nm_mountp)
+ server = vfs_statfs(nmp->nm_mountp)->f_mntfromname;
+
+ if (cp == NULL) {
+ snprintf(buf, len, "[%s] NULL context", server);
+ return (buf);
+ }
+
+ if (cp->gss_clnt_principal && !cp->gss_clnt_display) {
+ np = (char *)cp->gss_clnt_principal;
+ nlen = cp->gss_clnt_prinlen;
+ } else {
+ np = cp->gss_clnt_display;
+ nlen = np ? strlen(cp->gss_clnt_display) : 0;
+ }
+ if (nlen)
+ snprintf(buf, len, "[%s] %.*s %d/%d %s", server, nlen, np,
+ kauth_cred_getasid(cp->gss_clnt_cred),
+ kauth_cred_getuid(cp->gss_clnt_cred),
+ cp->gss_clnt_principal ? "" : "[from default cred] ");
+ else
+ snprintf(buf, len, "[%s] using default %d/%d ", server,
+ kauth_cred_getasid(cp->gss_clnt_cred),
+ kauth_cred_getuid(cp->gss_clnt_cred));
+ return (buf);
+}
+
+#define NFS_CTXBUFSZ 80
+#define NFS_GSS_CTX(req, cp) nfs_gss_clnt_ctx_name((req)->r_nmp, cp ? cp : (req)->r_gss_ctx, CTXBUF, sizeof(CTXBUF))
+
+#define NFS_GSS_CLNT_CTX_DUMP(nmp) \
+ do { \
+ if (NFS_GSS_ISDBG && (NFS_DEBUG_FLAGS & 0x2)) \
+ nfs_gss_clnt_ctx_dump((nmp)); \
+ } while (0)
+
static int
-nfs_gss_clnt_ctx_find(struct nfsreq *req)
+nfs_gss_clnt_ctx_cred_match(kauth_cred_t cred1, kauth_cred_t cred2)
+{
+ if (kauth_cred_getasid(cred1) == kauth_cred_getasid(cred2))
+ return (1);
+ return (0);
+}
+
+/*
+ * Busy the mount for each principal set on the mount
+ * so that the automounter will not unmount the file
+ * system underneath us. With out this, if an unmount
+ * occurs the principal that is set for an audit session
+ * will be lost and we may end up with a different identity.
+ *
+ * Note setting principals on the mount is a bad idea. This
+ * really should be handle by KIM (Kerberos Identity Management)
+ * so that defaults can be set by service identities.
+ */
+
+static void
+nfs_gss_clnt_mnt_ref(struct nfsmount *nmp)
+{
+ int error;
+ vnode_t rvp;
+
+ if (nmp == NULL ||
+ !(vfs_flags(nmp->nm_mountp) & MNT_AUTOMOUNTED))
+ return;
+
+ error = VFS_ROOT(nmp->nm_mountp, &rvp, NULL);
+ if (!error) {
+ vnode_ref(rvp);
+ vnode_put(rvp);
+ }
+}
+
+/*
+ * Unbusy the mout. See above comment,
+ */
+
+static void
+nfs_gss_clnt_mnt_rele(struct nfsmount *nmp)
+{
+ int error;
+ vnode_t rvp;
+
+ if (nmp == NULL ||
+ !(vfs_flags(nmp->nm_mountp) & MNT_AUTOMOUNTED))
+ return;
+
+ error = VFS_ROOT(nmp->nm_mountp, &rvp, NULL);
+ if (!error) {
+ vnode_rele(rvp);
+ vnode_put(rvp);
+ }
+}
+
+int nfs_root_steals_ctx = 1;
+
+static int
+nfs_gss_clnt_ctx_find_principal(struct nfsreq *req, uint8_t *principal, uint32_t plen, uint32_t nt)
{
struct nfsmount *nmp = req->r_nmp;
struct nfs_gss_clnt_ctx *cp;
- uid_t uid = kauth_cred_getuid(req->r_cred);
+ struct nfsreq treq;
int error = 0;
- int retrycnt = 0;
+ struct timeval now;
+ gss_key_info *ki;
+ char CTXBUF[NFS_CTXBUFSZ];
-retry:
+ bzero(&treq, sizeof (struct nfsreq));
+ treq.r_nmp = nmp;
+
+ microuptime(&now);
lck_mtx_lock(&nmp->nm_lock);
TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) {
- if (cp->gss_clnt_uid == uid) {
- if (cp->gss_clnt_flags & GSS_CTX_INVAL)
- continue;
+ lck_mtx_lock(cp->gss_clnt_mtx);
+ if (cp->gss_clnt_flags & GSS_CTX_DESTROY) {
+ NFS_GSS_DBG("Found destroyed context %s refcnt = %d continuing\n",
+ NFS_GSS_CTX(req, cp),
+ cp->gss_clnt_refcnt);
+ lck_mtx_unlock(cp->gss_clnt_mtx);
+ continue;
+ }
+ if (nfs_gss_clnt_ctx_cred_match(cp->gss_clnt_cred, req->r_cred)) {
+ if (nmp->nm_gsscl.tqh_first != cp) {
+ TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries);
+ TAILQ_INSERT_HEAD(&nmp->nm_gsscl, cp, gss_clnt_entries);
+ }
+ if (principal) {
+ /*
+ * If we have a principal, but it does not match the current cred
+ * mark it for removal
+ */
+ if (cp->gss_clnt_prinlen != plen || cp->gss_clnt_prinnt != nt ||
+ bcmp(cp->gss_clnt_principal, principal, plen) != 0) {
+ cp->gss_clnt_flags |= (GSS_CTX_INVAL | GSS_CTX_DESTROY);
+ cp->gss_clnt_refcnt++;
+ lck_mtx_unlock(cp->gss_clnt_mtx);
+ NFS_GSS_DBG("Marking %s for deletion because %s does not match\n",
+ NFS_GSS_CTX(req, cp), principal);
+ NFS_GSS_DBG("len = (%d,%d), nt = (%d,%d)\n", cp->gss_clnt_prinlen, plen,
+ cp->gss_clnt_prinnt, nt);
+ treq.r_gss_ctx = cp;
+ cp = NULL;
+ break;
+ }
+ }
+ if (cp->gss_clnt_flags & GSS_CTX_INVAL) {
+ /*
+ * If we're still being used and we're not expired
+ * just return and don't bother gssd again. Note if
+ * gss_clnt_nctime is zero it is about to be set to now.
+ */
+ if (cp->gss_clnt_nctime + GSS_NEG_CACHE_TO >= now.tv_sec || cp->gss_clnt_nctime == 0) {
+ NFS_GSS_DBG("Context %s (refcnt = %d) not expired returning EAUTH nctime = %ld now = %ld\n",
+ NFS_GSS_CTX(req, cp), cp->gss_clnt_refcnt, cp->gss_clnt_nctime, now.tv_sec);
+ lck_mtx_unlock(cp->gss_clnt_mtx);
+ lck_mtx_unlock(&nmp->nm_lock);
+ return (NFSERR_EAUTH);
+ }
+ if (cp->gss_clnt_refcnt) {
+ struct nfs_gss_clnt_ctx *ncp;
+ /*
+ * If this context has references, we can't use it so we mark if for
+ * destruction and create a new context based on this one in the
+ * same manner as renewing one.
+ */
+ cp->gss_clnt_flags |= GSS_CTX_DESTROY;
+ NFS_GSS_DBG("Context %s has expired but we still have %d references\n",
+ NFS_GSS_CTX(req, cp), cp->gss_clnt_refcnt);
+ error = nfs_gss_clnt_ctx_copy(cp, &ncp, NULL);
+ lck_mtx_unlock(cp->gss_clnt_mtx);
+ if (error) {
+ lck_mtx_unlock(&nmp->nm_lock);
+ return (error);
+ }
+ cp = ncp;
+ break;
+ } else {
+ /* cp->gss_clnt_kinfo should be NULL here */
+ if (cp->gss_clnt_kinfo) {
+ FREE(cp->gss_clnt_kinfo, M_TEMP);
+ cp->gss_clnt_kinfo = NULL;
+ }
+ if (cp->gss_clnt_nctime)
+ nmp->nm_ncentries--;
+ lck_mtx_unlock(cp->gss_clnt_mtx);
+ TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries);
+ break;
+ }
+ }
+ /* Found a valid context to return */
+ cp->gss_clnt_refcnt++;
+ req->r_gss_ctx = cp;
+ lck_mtx_unlock(cp->gss_clnt_mtx);
lck_mtx_unlock(&nmp->nm_lock);
- nfs_gss_clnt_ctx_ref(req, cp);
return (0);
}
+ lck_mtx_unlock(cp->gss_clnt_mtx);
}
- if (uid == 0) {
+ if (!cp && nfs_root_steals_ctx && principal == NULL && kauth_cred_getuid(req->r_cred) == 0) {
/*
* If superuser is trying to get access, then co-opt
* the first valid context in the list.
* in case one is set up for it.
*/
TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) {
- if (!(cp->gss_clnt_flags & GSS_CTX_INVAL)) {
- lck_mtx_unlock(&nmp->nm_lock);
+ if (!(cp->gss_clnt_flags & (GSS_CTX_INVAL|GSS_CTX_DESTROY))) {
nfs_gss_clnt_ctx_ref(req, cp);
+ lck_mtx_unlock(&nmp->nm_lock);
+ NFS_GSS_DBG("Root stole context %s\n", NFS_GSS_CTX(req, NULL));
return (0);
}
}
}
- /*
- * Not found - create a new context
- */
+ MALLOC(ki, gss_key_info *, sizeof (gss_key_info), M_TEMP, M_WAITOK|M_ZERO);
+ if (ki == NULL) {
+ lck_mtx_unlock(&nmp->nm_lock);
+ return (ENOMEM);
+ }
+
+ NFS_GSS_DBG("Context %s%sfound in Neg Cache @ %ld\n",
+ NFS_GSS_CTX(req, cp),
+ cp == NULL ? " not " : "",
+ cp == NULL ? 0L : cp->gss_clnt_nctime);
/*
- * If the thread is async, then it cannot get
- * kerberos creds and set up a proper context.
- * If no sec= mount option is given, attempt
- * to failover to sec=sys.
+ * Not found - create a new context
*/
- if (req->r_thread == NULL) {
- if ((nmp->nm_flag & NFSMNT_SECGIVEN) == 0) {
- error = nfs_gss_clnt_ctx_failover(req);
- } else {
- printf("nfs_gss_clnt_ctx_find: no context for async\n");
- error = EAUTH;
- }
- lck_mtx_unlock(&nmp->nm_lock);
- return (error);
- }
-
-
- MALLOC(cp, struct nfs_gss_clnt_ctx *, sizeof(*cp), M_TEMP, M_WAITOK|M_ZERO);
if (cp == NULL) {
- lck_mtx_unlock(&nmp->nm_lock);
- return (ENOMEM);
+ MALLOC(cp, struct nfs_gss_clnt_ctx *, sizeof(*cp), M_TEMP, M_WAITOK|M_ZERO);
+ if (cp == NULL) {
+ lck_mtx_unlock(&nmp->nm_lock);
+ return (ENOMEM);
+ }
+ cp->gss_clnt_kinfo = ki;
+ cp->gss_clnt_cred = req->r_cred;
+ kauth_cred_ref(cp->gss_clnt_cred);
+ cp->gss_clnt_mtx = lck_mtx_alloc_init(nfs_gss_clnt_grp, LCK_ATTR_NULL);
+ cp->gss_clnt_ptime = now.tv_sec - GSS_PRINT_DELAY;
+ if (principal) {
+ MALLOC(cp->gss_clnt_principal, uint8_t *, plen+1, M_TEMP, M_WAITOK|M_ZERO);
+ memcpy(cp->gss_clnt_principal, principal, plen);
+ cp->gss_clnt_prinlen = plen;
+ cp->gss_clnt_prinnt = nt;
+ cp->gss_clnt_flags |= GSS_CTX_STICKY;
+ nfs_gss_clnt_mnt_ref(nmp);
+ }
+ } else {
+ cp->gss_clnt_kinfo = ki;
+ nfs_gss_clnt_ctx_clean(cp);
+ if (principal) {
+ /*
+ * If we have a principal and we found a matching audit
+ * session, then to get here, the principal had to match.
+ * In walking the context list if it has a principal
+ * or the principal is not set then we mark the context
+ * for destruction and set cp to NULL and we fall to the
+ * if clause above. If the context still has references
+ * again we copy the context which will preserve the principal
+ * and we end up here with the correct principal set.
+ * If we don't have references the the principal must have
+ * match and we will fall through here.
+ */
+ cp->gss_clnt_flags |= GSS_CTX_STICKY;
+ }
}
- cp->gss_clnt_uid = uid;
- cp->gss_clnt_mtx = lck_mtx_alloc_init(nfs_gss_clnt_grp, LCK_ATTR_NULL);
cp->gss_clnt_thread = current_thread();
nfs_gss_clnt_ctx_ref(req, cp);
- TAILQ_INSERT_TAIL(&nmp->nm_gsscl, cp, gss_clnt_entries);
+ TAILQ_INSERT_HEAD(&nmp->nm_gsscl, cp, gss_clnt_entries);
lck_mtx_unlock(&nmp->nm_lock);
- error = nfs_gss_clnt_ctx_init(req, cp);
- if (error)
+ error = nfs_gss_clnt_ctx_init_retry(req, cp); // Initialize new context
+ if (error) {
+ NFS_GSS_DBG("nfs_gss_clnt_ctx_init_retry returned %d for %s\n", error, NFS_GSS_CTX(req, cp));
nfs_gss_clnt_ctx_unref(req);
-
- if (error == ENEEDAUTH) {
- error = nfs_gss_clnt_ctx_delay(req, &retrycnt);
- if (!error)
- goto retry;
}
- /*
- * If we failed to set up a Kerberos context for this
- * user and no sec= mount option was given then set
- * up a dummy context that allows this user to attempt
- * sec=sys calls.
- */
- if (error && (nmp->nm_flag & NFSMNT_SECGIVEN) == 0) {
- lck_mtx_lock(&nmp->nm_lock);
- error = nfs_gss_clnt_ctx_failover(req);
- lck_mtx_unlock(&nmp->nm_lock);
- }
+ /* Remove any old matching contex that had a different principal */
+ nfs_gss_clnt_ctx_unref(&treq);
return (error);
}
-/*
- * Set up a dummy context to allow the use of sec=sys
- * for this user, if the server allows sec=sys.
- * The context is valid for GSS_CLNT_SYS_VALID seconds,
- * so that the user will periodically attempt to fail back
- * and get a real credential.
- *
- * Assumes context list (nm_lock) is locked
- */
static int
-nfs_gss_clnt_ctx_failover(struct nfsreq *req)
+nfs_gss_clnt_ctx_find(struct nfsreq *req)
{
- struct nfsmount *nmp = req->r_nmp;
- struct nfs_gss_clnt_ctx *cp;
- uid_t uid = kauth_cred_getuid(req->r_cred);
- struct timeval now;
-
- MALLOC(cp, struct nfs_gss_clnt_ctx *, sizeof(*cp), M_TEMP, M_WAITOK|M_ZERO);
- if (cp == NULL)
- return (ENOMEM);
-
- cp->gss_clnt_service = RPCSEC_GSS_SVC_SYS;
- cp->gss_clnt_uid = uid;
- cp->gss_clnt_mtx = lck_mtx_alloc_init(nfs_gss_clnt_grp, LCK_ATTR_NULL);
- microuptime(&now);
- cp->gss_clnt_ctime = now.tv_sec; // time stamp
- nfs_gss_clnt_ctx_ref(req, cp);
- TAILQ_INSERT_TAIL(&nmp->nm_gsscl, cp, gss_clnt_entries);
-
- return (0);
+ return (nfs_gss_clnt_ctx_find_principal(req, NULL, 0, 0));
}
/*
int
nfs_gss_clnt_cred_put(struct nfsreq *req, struct nfsm_chain *nmc, mbuf_t args)
{
- struct nfsmount *nmp = req->r_nmp;
struct nfs_gss_clnt_ctx *cp;
uint32_t seqnum = 0;
int error = 0;
- int slpflag = 0;
+ int slpflag, recordmark = 0;
int start, len, offset = 0;
int pad, toklen;
struct nfsm_chain nmc_tmp;
struct gss_seq *gsp;
- u_char tokbuf[KRB5_SZ_TOKMAX];
- u_char cksum[8];
- struct timeval now;
+ u_char tokbuf[KRB5_SZ_TOKMAX(MAX_DIGEST)];
+ u_char cksum[MAX_DIGEST];
+ gss_key_info *ki;
+
+ slpflag = (PZERO-1);
+ if (req->r_nmp) {
+ slpflag |= (NMFLAG(req->r_nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) ? PCATCH : 0;
+ recordmark = (req->r_nmp->nm_sotype == SOCK_STREAM);
+ }
retry:
if (req->r_gss_ctx == NULL) {
}
cp = req->r_gss_ctx;
- /*
- * If it's a dummy context for a user that's using
- * a fallback to sec=sys, then just return an error
- * so rpchead can encode an RPCAUTH_UNIX cred.
- */
- if (cp->gss_clnt_service == RPCSEC_GSS_SVC_SYS) {
- /*
- * The dummy context is valid for just
- * GSS_CLNT_SYS_VALID seconds. If the context
- * is older than this, mark it invalid and try
- * again to get a real one.
- */
- lck_mtx_lock(cp->gss_clnt_mtx);
- microuptime(&now);
- if (now.tv_sec > cp->gss_clnt_ctime + GSS_CLNT_SYS_VALID) {
- cp->gss_clnt_flags |= GSS_CTX_INVAL;
- lck_mtx_unlock(cp->gss_clnt_mtx);
- nfs_gss_clnt_ctx_unref(req);
- goto retry;
- }
- lck_mtx_unlock(cp->gss_clnt_mtx);
- return (ENEEDAUTH);
- }
-
/*
* If the context thread isn't null, then the context isn't
* yet complete and is for the exclusive use of the thread
lck_mtx_lock(cp->gss_clnt_mtx);
if (cp->gss_clnt_thread && cp->gss_clnt_thread != current_thread()) {
cp->gss_clnt_flags |= GSS_NEEDCTX;
- slpflag = (PZERO-1) | PDROP | (((nmp->nm_flag & NFSMNT_INT) && req->r_thread) ? PCATCH : 0);
- msleep(cp, cp->gss_clnt_mtx, slpflag, "ctxwait", NULL);
- if ((error = nfs_sigintr(nmp, req, req->r_thread, 0)))
+ msleep(cp, cp->gss_clnt_mtx, slpflag | PDROP, "ctxwait", NULL);
+ slpflag &= ~PCATCH;
+ if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0)))
return (error);
nfs_gss_clnt_ctx_unref(req);
goto retry;
}
lck_mtx_unlock(cp->gss_clnt_mtx);
+ ki = cp->gss_clnt_kinfo;
if (cp->gss_clnt_flags & GSS_CTX_COMPLETE) {
/*
* Get a sequence number for this request.
while (win_getbit(cp->gss_clnt_seqbits,
((cp->gss_clnt_seqnum - cp->gss_clnt_seqwin) + 1) % cp->gss_clnt_seqwin)) {
cp->gss_clnt_flags |= GSS_NEEDSEQ;
- slpflag = (PZERO-1) | (((nmp->nm_flag & NFSMNT_INT) && req->r_thread) ? PCATCH : 0);
- msleep(cp, cp->gss_clnt_mtx, slpflag, "seqwin", NULL);
- if ((error = nfs_sigintr(nmp, req, req->r_thread, 0))) {
- lck_mtx_unlock(cp->gss_clnt_mtx);
+ msleep(cp, cp->gss_clnt_mtx, slpflag | PDROP, "seqwin", NULL);
+ slpflag &= ~PCATCH;
+ if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) {
return (error);
}
+ lck_mtx_lock(cp->gss_clnt_mtx);
if (cp->gss_clnt_flags & GSS_CTX_INVAL) {
/* Renewed while while we were waiting */
lck_mtx_unlock(cp->gss_clnt_mtx);
nfsm_chain_add_32(error, nmc, seqnum);
nfsm_chain_add_32(error, nmc, cp->gss_clnt_service);
nfsm_chain_add_32(error, nmc, cp->gss_clnt_handle_len);
- nfsm_chain_add_opaque(error, nmc, cp->gss_clnt_handle, cp->gss_clnt_handle_len);
-
+ if (cp->gss_clnt_handle_len > 0) {
+ if (cp->gss_clnt_handle == NULL)
+ return (EBADRPC);
+ nfsm_chain_add_opaque(error, nmc, cp->gss_clnt_handle, cp->gss_clnt_handle_len);
+ }
+ if (error)
+ return(error);
/*
* Now add the verifier
*/
return (error);
}
- offset = nmp->nm_sotype == SOCK_STREAM ? NFSX_UNSIGNED : 0; // record mark
+ offset = recordmark ? NFSX_UNSIGNED : 0; // record mark
nfsm_chain_build_done(error, nmc);
- nfs_gss_cksum_chain(cp->gss_clnt_sched, nmc, krb5_mic, offset, 0, cksum);
+ nfs_gss_cksum_chain(ki, nmc, ALG_MIC(ki), offset, 0, cksum);
- toklen = nfs_gss_token_put(cp->gss_clnt_sched, krb5_mic, tokbuf, 1, 0, cksum);
+ toklen = nfs_gss_token_put(ki, ALG_MIC(ki), tokbuf, 1, 0, cksum);
nfsm_chain_add_32(error, nmc, RPCSEC_GSS); // flavor
nfsm_chain_add_32(error, nmc, toklen); // length
nfsm_chain_add_opaque(error, nmc, tokbuf, toklen);
nfs_gss_append_chain(nmc, args); // Append the args mbufs
/* Now compute a checksum over the seqnum + args */
- nfs_gss_cksum_chain(cp->gss_clnt_sched, nmc, krb5_mic, start, len, cksum);
+ nfs_gss_cksum_chain(ki, nmc, ALG_MIC(ki), start, len, cksum);
/* Insert it into a token and append to the request */
- toklen = nfs_gss_token_put(cp->gss_clnt_sched, krb5_mic, tokbuf, 1, 0, cksum);
+ toklen = nfs_gss_token_put(ki, ALG_MIC(ki), tokbuf, 1, 0, cksum);
nfsm_chain_finish_mbuf(error, nmc); // force checksum into new mbuf
nfsm_chain_add_32(error, nmc, toklen);
nfsm_chain_add_opaque(error, nmc, tokbuf, toklen);
nfsm_chain_build_done(error, &nmc_tmp);
/* Now compute a checksum over the confounder + seqnum + args */
- nfs_gss_cksum_chain(cp->gss_clnt_sched, &nmc_tmp, krb5_wrap, 0, len, cksum);
+ nfs_gss_cksum_chain(ki, &nmc_tmp, ALG_WRAP(ki), 0, len, cksum);
/* Insert it into a token */
- toklen = nfs_gss_token_put(cp->gss_clnt_sched, krb5_wrap, tokbuf, 1, len, cksum);
+ toklen = nfs_gss_token_put(ki, ALG_WRAP(ki), tokbuf, 1, len, cksum);
nfsm_chain_add_32(error, nmc, toklen + len); // token + args length
nfsm_chain_add_opaque_nopad(error, nmc, tokbuf, toklen);
req->r_gss_argoff = nfsm_chain_offset(nmc); // Stash offset
nfs_gss_append_chain(nmc, nmc_tmp.nmc_mhead); // Append the args mbufs
/* Finally, encrypt the args */
- nfs_gss_encrypt_chain(cp->gss_clnt_skey, &nmc_tmp, 0, len, DES_ENCRYPT);
+ nfs_gss_encrypt_chain(ki, &nmc_tmp, 0, len, DES_ENCRYPT);
/* Add null XDR pad if the ASN.1 token misaligned the data */
pad = nfsm_pad(toklen + len);
uint32_t verflen,
uint32_t *accepted_statusp)
{
- u_char tokbuf[KRB5_SZ_TOKMAX];
- u_char cksum1[8], cksum2[8];
+ u_char tokbuf[KRB5_SZ_TOKMAX(MAX_DIGEST)];
+ u_char cksum1[MAX_DIGEST], cksum2[MAX_DIGEST];
uint32_t seqnum = 0;
struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx;
struct nfsm_chain nmc_tmp;
struct gss_seq *gsp;
uint32_t reslen, start, cksumlen, toklen;
int error = 0;
+ gss_key_info *ki = cp->gss_clnt_kinfo;
reslen = cksumlen = 0;
*accepted_statusp = 0;
if (cp == NULL)
- return (EAUTH);
+ return (NFSERR_EAUTH);
/*
* If it's not an RPCSEC_GSS verifier, then it has to
* be a null verifier that resulted from either
*/
if (verftype != RPCSEC_GSS) {
if (verftype != RPCAUTH_NULL)
- return (EAUTH);
- if (cp->gss_clnt_flags & GSS_CTX_COMPLETE &&
- cp->gss_clnt_service != RPCSEC_GSS_SVC_SYS)
- return (EAUTH);
+ return (NFSERR_EAUTH);
+ if (cp->gss_clnt_flags & GSS_CTX_COMPLETE)
+ return (NFSERR_EAUTH);
if (verflen > 0)
nfsm_chain_adv(error, nmc, nfsm_rndup(verflen));
nfsm_chain_get_32(error, nmc, *accepted_statusp);
return (error);
}
- if (verflen != KRB5_SZ_TOKEN)
- return (EAUTH);
-
/*
* If we received an RPCSEC_GSS verifier but the
* context isn't yet complete, then it must be
return (error);
}
+ if (verflen != KRB5_SZ_TOKEN(ki->hash_len))
+ return (NFSERR_EAUTH);
+
/*
* Get the 8 octet sequence number
* checksum out of the verifier token.
nfsm_chain_get_opaque(error, nmc, verflen, tokbuf);
if (error)
goto nfsmout;
- error = nfs_gss_token_get(cp->gss_clnt_sched, krb5_mic, tokbuf, 0, NULL, cksum1);
+ error = nfs_gss_token_get(ki, ALG_MIC(ki), tokbuf, 0, NULL, cksum1);
if (error)
goto nfsmout;
* the one in the verifier returned by the server.
*/
SLIST_FOREACH(gsp, &req->r_gss_seqlist, gss_seqnext) {
- nfs_gss_cksum_rep(cp->gss_clnt_sched, gsp->gss_seqnum, cksum2);
- if (bcmp(cksum1, cksum2, 8) == 0)
+ nfs_gss_cksum_rep(ki, gsp->gss_seqnum, cksum2);
+ if (bcmp(cksum1, cksum2, HASHLEN(ki)) == 0)
break;
}
if (gsp == NULL)
- return (EAUTH);
+ return (NFSERR_EAUTH);
/*
* Get the RPC accepted status
/* Compute a checksum over the sequence number + results */
start = nfsm_chain_offset(nmc);
- nfs_gss_cksum_chain(cp->gss_clnt_sched, nmc, krb5_mic, start, reslen, cksum1);
+ nfs_gss_cksum_chain(ki, nmc, ALG_MIC(ki), start, reslen, cksum1);
/*
* Get the sequence number prepended to the results
reslen -= NFSX_UNSIGNED; // already skipped seqnum
nfsm_chain_adv(error, &nmc_tmp, reslen); // skip over the results
nfsm_chain_get_32(error, &nmc_tmp, cksumlen); // length of checksum
- if (cksumlen != KRB5_SZ_TOKEN) {
+ if (cksumlen != KRB5_SZ_TOKEN(ki->hash_len)) {
error = EBADRPC;
goto nfsmout;
}
nfsm_chain_get_opaque(error, &nmc_tmp, cksumlen, tokbuf);
if (error)
goto nfsmout;
- error = nfs_gss_token_get(cp->gss_clnt_sched, krb5_mic, tokbuf, 0,
- NULL, cksum2);
+ error = nfs_gss_token_get(ki, ALG_MIC(ki), tokbuf, 0, NULL, cksum2);
if (error)
goto nfsmout;
/* Verify that the checksums are the same */
- if (bcmp(cksum1, cksum2, 8) != 0) {
+ if (bcmp(cksum1, cksum2, HASHLEN(ki)) != 0) {
error = EBADRPC;
goto nfsmout;
}
}
/* Get the token that prepends the encrypted results */
- nfsm_chain_get_opaque(error, nmc, KRB5_SZ_TOKMAX, tokbuf);
+ nfsm_chain_get_opaque(error, nmc, KRB5_SZ_TOKMAX(ki->hash_len), tokbuf);
if (error)
goto nfsmout;
- error = nfs_gss_token_get(cp->gss_clnt_sched, krb5_wrap, tokbuf, 0,
+ error = nfs_gss_token_get(ki, ALG_WRAP(ki), tokbuf, 0,
&toklen, cksum1);
if (error)
goto nfsmout;
/* decrypt the confounder + sequence number + results */
start = nfsm_chain_offset(nmc);
- nfs_gss_encrypt_chain(cp->gss_clnt_skey, nmc, start, reslen, DES_DECRYPT);
+ nfs_gss_encrypt_chain(ki, nmc, start, reslen, DES_DECRYPT);
/* Compute a checksum over the confounder + sequence number + results */
- nfs_gss_cksum_chain(cp->gss_clnt_sched, nmc, krb5_wrap, start, reslen, cksum2);
+ nfs_gss_cksum_chain(ki, nmc, ALG_WRAP(ki), start, reslen, cksum2);
/* Verify that the checksums are the same */
- if (bcmp(cksum1, cksum2, 8) != 0) {
+ if (bcmp(cksum1, cksum2, HASHLEN(ki)) != 0) {
error = EBADRPC;
goto nfsmout;
}
* The location and length of the args is marked by two fields
* in the request structure: r_gss_argoff and r_gss_arglen,
* which are stashed when the NFS request is built.
- */
+ */
int
nfs_gss_clnt_args_restore(struct nfsreq *req)
{
struct nfsm_chain mchain, *nmc = &mchain;
int len, error = 0;
- if (cp == NULL)
- return (EAUTH);
+ if (cp == NULL)
+ return (NFSERR_EAUTH);
if ((cp->gss_clnt_flags & GSS_CTX_COMPLETE) == 0)
return (ENEEDAUTH);
*/
len = req->r_gss_arglen;
len += len % 8 > 0 ? 4 : 8; // add DES padding length
- nfs_gss_encrypt_chain(cp->gss_clnt_skey, nmc,
- req->r_gss_argoff, len, DES_DECRYPT);
+ nfs_gss_encrypt_chain(cp->gss_clnt_kinfo, nmc,
+ req->r_gss_argoff, len, DES_DECRYPT);
nfsm_chain_adv(error, nmc, req->r_gss_arglen);
if (error)
return (error);
struct nfsmount *nmp = req->r_nmp;
int client_complete = 0;
int server_complete = 0;
- u_char cksum1[8], cksum2[8];
+ u_char cksum1[MAX_DIGEST], cksum2[MAX_DIGEST];
int error = 0;
- struct timeval now;
+ gss_key_info *ki = cp->gss_clnt_kinfo;
/* Initialize a new client context */
- cp->gss_clnt_svcname = nfs_gss_clnt_svcname(nmp);
if (cp->gss_clnt_svcname == NULL) {
- error = EAUTH;
- goto nfsmout;
+ cp->gss_clnt_svcname = nfs_gss_clnt_svcname(nmp, &cp->gss_clnt_svcnt, &cp->gss_clnt_svcnamlen);
+ if (cp->gss_clnt_svcname == NULL) {
+ error = NFSERR_EAUTH;
+ goto nfsmout;
+ }
}
+
cp->gss_clnt_proc = RPCSEC_GSS_INIT;
cp->gss_clnt_service =
- nmp->nm_auth == RPCAUTH_KRB5 ? RPCSEC_GSS_SVC_NONE :
- nmp->nm_auth == RPCAUTH_KRB5I ? RPCSEC_GSS_SVC_INTEGRITY :
- nmp->nm_auth == RPCAUTH_KRB5P ? RPCSEC_GSS_SVC_PRIVACY : 0;
+ req->r_auth == RPCAUTH_KRB5 ? RPCSEC_GSS_SVC_NONE :
+ req->r_auth == RPCAUTH_KRB5I ? RPCSEC_GSS_SVC_INTEGRITY :
+ req->r_auth == RPCAUTH_KRB5P ? RPCSEC_GSS_SVC_PRIVACY : 0;
+ cp->gss_clnt_gssd_flags = (nfs_single_des ? GSSD_NFS_1DES : 0);
/*
* Now loop around alternating gss_init_sec_context and
* gss_accept_sec_context upcalls to the gssd on the client
*/
for (;;) {
+retry:
/* Upcall to the gss_init_sec_context in the gssd */
error = nfs_gss_clnt_gssd_upcall(req, cp);
if (error)
if (server_complete)
break;
} else if (cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) {
- error = EAUTH;
+ error = NFSERR_EAUTH;
goto nfsmout;
}
* Pass the token to the server.
*/
error = nfs_gss_clnt_ctx_callserver(req, cp);
- if (error)
+ if (error) {
+ if (error == ENEEDAUTH && cp->gss_clnt_proc == RPCSEC_GSS_INIT &&
+ (cp->gss_clnt_gssd_flags & (GSSD_RESTART | GSSD_NFS_1DES)) == 0) {
+ NFS_GSS_DBG("Retrying with single DES for req %p\n", req);
+ cp->gss_clnt_gssd_flags = (GSSD_RESTART | GSSD_NFS_1DES);
+ if (cp->gss_clnt_token)
+ FREE(cp->gss_clnt_token, M_TEMP);
+ cp->gss_clnt_token = NULL;
+ cp->gss_clnt_tokenlen = 0;
+ goto retry;
+ }
+ // Reset flags, if error = ENEEDAUTH we will try 3des again
+ cp->gss_clnt_gssd_flags = 0;
goto nfsmout;
-
+ }
if (cp->gss_clnt_major == GSS_S_COMPLETE) {
server_complete = 1;
if (client_complete)
break;
- } else if (cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) {
- error = EAUTH;
- goto nfsmout;
}
-
cp->gss_clnt_proc = RPCSEC_GSS_CONTINUE_INIT;
}
/*
* The context is apparently established successfully
*/
+ lck_mtx_lock(cp->gss_clnt_mtx);
cp->gss_clnt_flags |= GSS_CTX_COMPLETE;
+ lck_mtx_unlock(cp->gss_clnt_mtx);
cp->gss_clnt_proc = RPCSEC_GSS_DATA;
- microuptime(&now);
- cp->gss_clnt_ctime = now.tv_sec; // time stamp
-
- /*
- * Construct a key schedule from our shiny new session key
- */
- error = des_key_sched((des_cblock *) cp->gss_clnt_skey, cp->gss_clnt_sched);
- if (error) {
- error = EAUTH;
- goto nfsmout;
- }
/*
* Compute checksum of the server's window
*/
- nfs_gss_cksum_rep(cp->gss_clnt_sched, cp->gss_clnt_seqwin, cksum1);
+ nfs_gss_cksum_rep(ki, cp->gss_clnt_seqwin, cksum1);
/*
* and see if it matches the one in the
* verifier the server returned.
*/
- error = nfs_gss_token_get(cp->gss_clnt_sched, krb5_mic, cp->gss_clnt_verf, 0,
+ error = nfs_gss_token_get(ki, ALG_MIC(ki), cp->gss_clnt_verf, 0,
NULL, cksum2);
FREE(cp->gss_clnt_verf, M_TEMP);
cp->gss_clnt_verf = NULL;
- if (error || bcmp(cksum1, cksum2, 8) != 0) {
- error = EAUTH;
+ if (error || bcmp(cksum1, cksum2, HASHLEN(ki)) != 0) {
+ error = NFSERR_EAUTH;
goto nfsmout;
}
MALLOC(cp->gss_clnt_seqbits, uint32_t *,
nfsm_rndup((cp->gss_clnt_seqwin + 7) / 8), M_TEMP, M_WAITOK|M_ZERO);
if (cp->gss_clnt_seqbits == NULL)
- error = EAUTH;
+ error = NFSERR_EAUTH;
nfsmout:
+ /*
+ * If the error is ENEEDAUTH we're not done, so no need
+ * to wake up other threads again. This thread will retry in
+ * the find or renew routines.
+ */
+ if (error == ENEEDAUTH)
+ return (error);
+
/*
* If there's an error, just mark it as invalid.
* It will be removed when the reference count
* drops to zero.
*/
+ lck_mtx_lock(cp->gss_clnt_mtx);
if (error)
cp->gss_clnt_flags |= GSS_CTX_INVAL;
/*
* Wake any threads waiting to use the context
*/
- lck_mtx_lock(cp->gss_clnt_mtx);
cp->gss_clnt_thread = NULL;
if (cp->gss_clnt_flags & GSS_NEEDCTX) {
cp->gss_clnt_flags &= ~GSS_NEEDCTX;
return (error);
}
+/*
+ * This function calls nfs_gss_clnt_ctx_init() to set up a new context.
+ * But if there's a failure in trying to establish the context it keeps
+ * retrying at progressively longer intervals in case the failure is
+ * due to some transient condition. For instance, the server might be
+ * failing the context setup because directory services is not coming
+ * up in a timely fashion.
+ */
+static int
+nfs_gss_clnt_ctx_init_retry(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp)
+{
+ struct nfsmount *nmp = req->r_nmp;
+ struct timeval now;
+ time_t waituntil;
+ int error, slpflag;
+ int retries = 0;
+ int timeo = NFS_TRYLATERDEL;
+
+ if (nfs_mount_gone(nmp)) {
+ error = ENXIO;
+ goto bad;
+ }
+
+ /* For an "intr" mount allow a signal to interrupt the retries */
+ slpflag = (NMFLAG(nmp, INTR) && !(req->r_flags & R_NOINTR)) ? PCATCH : 0;
+
+ while ((error = nfs_gss_clnt_ctx_init(req, cp)) == ENEEDAUTH) {
+ microuptime(&now);
+ waituntil = now.tv_sec + timeo;
+ while (now.tv_sec < waituntil) {
+ tsleep(NULL, PSOCK | slpflag, "nfs_gss_clnt_ctx_init_retry", hz);
+ slpflag = 0;
+ error = nfs_sigintr(req->r_nmp, req, current_thread(), 0);
+ if (error)
+ goto bad;
+ microuptime(&now);
+ }
+
+ retries++;
+ /* If it's a soft mount just give up after a while */
+ if ((NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && (retries > nmp->nm_retry)) {
+ error = ETIMEDOUT;
+ goto bad;
+ }
+ timeo *= 2;
+ if (timeo > 60)
+ timeo = 60;
+ }
+
+ if (error == 0)
+ return 0; // success
+bad:
+ /*
+ * Give up on this context
+ */
+ lck_mtx_lock(cp->gss_clnt_mtx);
+ cp->gss_clnt_flags |= GSS_CTX_INVAL;
+
+ /*
+ * Wake any threads waiting to use the context
+ */
+ cp->gss_clnt_thread = NULL;
+ if (cp->gss_clnt_flags & GSS_NEEDCTX) {
+ cp->gss_clnt_flags &= ~GSS_NEEDCTX;
+ wakeup(cp);
+ }
+ lck_mtx_unlock(cp->gss_clnt_mtx);
+
+ return error;
+}
+
/*
* Call the NFS server using a null procedure for context setup.
* Even though it's a null procedure and nominally has no arguments
static int
nfs_gss_clnt_ctx_callserver(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp)
{
- struct nfsmount *nmp = req->r_nmp;
struct nfsm_chain nmreq, nmrep;
int error = 0, status;
- u_int64_t xid;
+ uint32_t major = cp->gss_clnt_major, minor = cp->gss_clnt_minor;
int sz;
+ if (nfs_mount_gone(req->r_nmp))
+ return (ENXIO);
nfsm_chain_null(&nmreq);
nfsm_chain_null(&nmrep);
sz = NFSX_UNSIGNED + nfsm_rndup(cp->gss_clnt_tokenlen);
nfsm_chain_build_alloc_init(error, &nmreq, sz);
nfsm_chain_add_32(error, &nmreq, cp->gss_clnt_tokenlen);
- nfsm_chain_add_opaque(error, &nmreq, cp->gss_clnt_token, cp->gss_clnt_tokenlen);
+ if (cp->gss_clnt_tokenlen > 0)
+ nfsm_chain_add_opaque(error, &nmreq, cp->gss_clnt_token, cp->gss_clnt_tokenlen);
nfsm_chain_build_done(error, &nmreq);
if (error)
goto nfsmout;
/* Call the server */
- error = nfs_request2(NULL, nmp->nm_mountp, &nmreq, NFSPROC_NULL,
- req->r_thread, req->r_cred, 0, &nmrep, &xid, &status);
+ error = nfs_request_gss(req->r_nmp->nm_mountp, &nmreq, req->r_thread, req->r_cred,
+ (req->r_flags & R_OPTMASK), cp, &nmrep, &status);
if (cp->gss_clnt_token != NULL) {
FREE(cp->gss_clnt_token, M_TEMP);
cp->gss_clnt_token = NULL;
/* Get the server's reply */
nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_handle_len);
- if (cp->gss_clnt_handle != NULL)
+ if (cp->gss_clnt_handle != NULL) {
FREE(cp->gss_clnt_handle, M_TEMP);
+ cp->gss_clnt_handle = NULL;
+ }
if (cp->gss_clnt_handle_len > 0) {
MALLOC(cp->gss_clnt_handle, u_char *, cp->gss_clnt_handle_len, M_TEMP, M_WAITOK);
if (cp->gss_clnt_handle == NULL) {
*/
if (cp->gss_clnt_major != GSS_S_COMPLETE &&
cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) {
- char who[] = "server";
- (void) mach_gss_log_error(
- cp->gss_clnt_mport,
- vfs_statfs(nmp->nm_mountp)->f_mntfromname,
- cp->gss_clnt_uid,
- who,
- cp->gss_clnt_major,
- cp->gss_clnt_minor);
+ printf("nfs_gss_clnt_ctx_callserver: gss_clnt_major = %d\n", cp->gss_clnt_major);
+ nfs_gss_clnt_log_error(req, cp, major, minor);
+
}
nfsmout:
}
/*
- * Ugly hack to get the service principal from the f_mntfromname field in
- * the statfs struct. We assume a format of server:path. We don't currently
- * support url's or other bizarre formats like path@server. A better solution
- * here might be to allow passing the service principal down in the mount args.
- * For kerberos we just use the default realm.
+ * We construct the service principal as a gss hostbased service principal of
+ * the form nfs@<server>, unless the servers principal was passed down in the
+ * mount arguments. If the arguments don't specify the service principal, the
+ * server name is extracted the location passed in the mount argument if
+ * available. Otherwise assume a format of <server>:<path> in the
+ * mntfromname. We don't currently support url's or other bizarre formats like
+ * path@server. Mount_url will convert the nfs url into <server>:<path> when
+ * calling mount, so this works out well in practice.
+ *
*/
-static char *
-nfs_gss_clnt_svcname(struct nfsmount *nmp)
+
+static uint8_t *
+nfs_gss_clnt_svcname(struct nfsmount *nmp, gssd_nametype *nt, uint32_t *len)
{
- char *svcname, *d;
- char* mntfromhere = &vfs_statfs(nmp->nm_mountp)->f_mntfromname[0];
- int len;
+ char *svcname, *d, *server;
+ int lindx, sindx;
- len = strlen(mntfromhere) + 5; /* "nfs/" plus null */
- MALLOC(svcname, char *, len, M_TEMP, M_NOWAIT);
- if (svcname == NULL)
+ if (nfs_mount_gone(nmp))
return (NULL);
- strlcpy(svcname, "nfs/", len);
- strlcat(svcname, mntfromhere, len);
- d = strchr(svcname, ':');
- if (d)
- *d = '\0';
- return (svcname);
+ if (nmp->nm_sprinc) {
+ *len = strlen(nmp->nm_sprinc) + 1;
+ MALLOC(svcname, char *, *len, M_TEMP, M_WAITOK);
+ *nt = GSSD_HOSTBASED;
+ if (svcname == NULL)
+ return (NULL);
+ strlcpy(svcname, nmp->nm_sprinc, *len);
+
+ return ((uint8_t *)svcname);
+ }
+
+ *nt = GSSD_HOSTBASED;
+ if (nmp->nm_locations.nl_numlocs && !(NFS_GSS_ISDBG && (NFS_DEBUG_FLAGS & 0x1))) {
+ lindx = nmp->nm_locations.nl_current.nli_loc;
+ sindx = nmp->nm_locations.nl_current.nli_serv;
+ server = nmp->nm_locations.nl_locations[lindx]->nl_servers[sindx]->ns_name;
+ *len = (uint32_t)strlen(server);
+ } else {
+ /* Older binaries using older mount args end up here */
+ server = vfs_statfs(nmp->nm_mountp)->f_mntfromname;
+ NFS_GSS_DBG("nfs getting gss svcname from %s\n", server);
+ d = strchr(server, ':');
+ *len = (uint32_t)(d ? (d - server) : strlen(server));
+ }
+
+ *len += 5; /* "nfs@" plus null */
+ MALLOC(svcname, char *, *len, M_TEMP, M_WAITOK);
+ strlcpy(svcname, "nfs", *len);
+ strlcat(svcname, "@", *len);
+ strlcat(svcname, server, *len);
+ NFS_GSS_DBG("nfs svcname = %s\n", svcname);
+
+ return ((uint8_t *)svcname);
+}
+
+/*
+ * Get a mach port to talk to gssd.
+ * gssd lives in the root bootstrap, so we call gssd's lookup routine
+ * to get a send right to talk to a new gssd instance that launchd has launched
+ * based on the cred's uid and audit session id.
+ */
+
+static mach_port_t
+nfs_gss_clnt_get_upcall_port(kauth_cred_t credp)
+{
+ mach_port_t gssd_host_port, uc_port = IPC_PORT_NULL;
+ kern_return_t kr;
+ au_asid_t asid;
+ uid_t uid;
+
+ kr = host_get_gssd_port(host_priv_self(), &gssd_host_port);
+ if (kr != KERN_SUCCESS) {
+ printf("nfs_gss_get_upcall_port: can't get gssd port, status %x (%d)\n", kr, kr);
+ return (IPC_PORT_NULL);
+ }
+ if (!IPC_PORT_VALID(gssd_host_port)) {
+ printf("nfs_gss_get_upcall_port: gssd port not valid\n");
+ return (IPC_PORT_NULL);
+ }
+
+ asid = kauth_cred_getasid(credp);
+ uid = kauth_cred_getauid(credp);
+ if (uid == AU_DEFAUDITID)
+ uid = kauth_cred_getuid(credp);
+ kr = mach_gss_lookup(gssd_host_port, uid, asid, &uc_port);
+ if (kr != KERN_SUCCESS)
+ printf("nfs_gss_clnt_get_upcall_port: mach_gssd_lookup failed: status %x (%d)\n", kr, kr);
+ host_release_special_port(gssd_host_port);
+
+ return (uc_port);
+}
+
+
+static void
+nfs_gss_clnt_log_error(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp, uint32_t major, uint32_t minor)
+{
+#define GETMAJERROR(x) (((x) >> GSS_C_ROUTINE_ERROR_OFFSET) & GSS_C_ROUTINE_ERROR_MASK)
+ struct nfsmount *nmp = req->r_nmp;
+ char who[] = "client";
+ uint32_t gss_error = GETMAJERROR(cp->gss_clnt_major);
+ const char *procn = "unkown";
+ proc_t proc;
+ pid_t pid = -1;
+ struct timeval now;
+
+ if (req->r_thread) {
+ proc = (proc_t)get_bsdthreadtask_info(req->r_thread);
+ if (proc != NULL && (proc->p_fd == NULL || (proc->p_lflag & P_LVFORK)))
+ proc = NULL;
+ if (proc) {
+ if (*proc->p_comm)
+ procn = proc->p_comm;
+ pid = proc->p_pid;
+ }
+ } else {
+ procn = "kernproc";
+ pid = 0;
+ }
+
+ microuptime(&now);
+ if ((cp->gss_clnt_major != major || cp->gss_clnt_minor != minor ||
+ cp->gss_clnt_ptime + GSS_PRINT_DELAY < now.tv_sec) &&
+ (nmp->nm_state & NFSSTA_MOUNTED)) {
+ /*
+ * Will let gssd do some logging in hopes that it can translate
+ * the minor code.
+ */
+ if (cp->gss_clnt_minor && cp->gss_clnt_minor != minor) {
+ (void) mach_gss_log_error(
+ cp->gss_clnt_mport,
+ vfs_statfs(nmp->nm_mountp)->f_mntfromname,
+ kauth_cred_getuid(cp->gss_clnt_cred),
+ who,
+ cp->gss_clnt_major,
+ cp->gss_clnt_minor);
+ }
+ gss_error = gss_error ? gss_error : cp->gss_clnt_major;
+
+ /*
+ *%%% It would be really nice to get the terminal from the proc or auditinfo_addr struct and print that here.
+ */
+ printf("NFS: gssd auth failure by %s on audit session %d uid %d proc %s/%d for mount %s. Error: major = %d minor = %d\n",
+ cp->gss_clnt_display ? cp->gss_clnt_display : who, kauth_cred_getasid(req->r_cred), kauth_cred_getuid(req->r_cred),
+ procn, pid, vfs_statfs(nmp->nm_mountp)->f_mntfromname, gss_error, (int32_t)cp->gss_clnt_minor);
+ cp->gss_clnt_ptime = now.tv_sec;
+ switch (gss_error) {
+ case 7: printf("NFS: gssd does not have credentials for session %d/%d, (kinit)?\n",
+ kauth_cred_getasid(req->r_cred), kauth_cred_getauid(req->r_cred));
+ break;
+ case 11: printf("NFS: gssd has expired credentals for session %d/%d, (kinit)?\n",
+ kauth_cred_getasid(req->r_cred), kauth_cred_getauid(req->r_cred));
+ break;
+ }
+ } else {
+ NFS_GSS_DBG("NFS: gssd auth failure by %s on audit session %d uid %d proc %s/%d for mount %s. Error: major = %d minor = %d\n",
+ cp->gss_clnt_display ? cp->gss_clnt_display : who, kauth_cred_getasid(req->r_cred), kauth_cred_getuid(req->r_cred),
+ procn, pid, vfs_statfs(nmp->nm_mountp)->f_mntfromname, gss_error, (int32_t)cp->gss_clnt_minor);
+ }
}
/*
* Make an upcall to the gssd using Mach RPC
- * The upcall is made using a task special port.
+ * The upcall is made using a host special port.
* This allows launchd to fire up the gssd in the
* user's session. This is important, since gssd
* must have access to the user's credential cache.
nfs_gss_clnt_gssd_upcall(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp)
{
kern_return_t kr;
- byte_buffer okey = NULL;
+ gssd_byte_buffer okey = NULL;
uint32_t skeylen = 0;
int retry_cnt = 0;
vm_map_copy_t itoken = NULL;
- byte_buffer otoken = NULL;
+ gssd_byte_buffer otoken = NULL;
+ mach_msg_type_number_t otokenlen;
int error = 0;
- char uprinc[1];
-
+ uint8_t *principal = NULL;
+ uint32_t plen = 0;
+ int32_t nt = GSSD_STRING_NAME;
+ vm_map_copy_t pname = NULL;
+ vm_map_copy_t svcname = NULL;
+ char display_name[MAX_DISPLAY_STR] = "";
+ uint32_t ret_flags;
+ uint32_t nfs_1des = (cp->gss_clnt_gssd_flags & GSSD_NFS_1DES);
+ struct nfsmount *nmp;
+ uint32_t major = cp->gss_clnt_major, minor = cp->gss_clnt_minor;
+
/*
* NFS currently only supports default principals or
- * principals based on the uid of the caller.
- *
- * N.B. Note we define a one character array for the principal
- * so that we can hold an empty string required by mach, since
- * the kernel is being compiled with -Wwrite-strings.
+ * principals based on the uid of the caller, unless
+ * the principal to use for the mounting cred was specified
+ * in the mount argmuments. If the realm to use was specified
+ * then will send that up as the principal since the realm is
+ * preceed by an "@" gssd that will try and select the default
+ * principal for that realm.
*/
- uprinc[0] = '\0';
- if (cp->gss_clnt_mport == NULL) {
- kr = task_get_gssd_port(get_threadtask(req->r_thread), &cp->gss_clnt_mport);
- if (kr != KERN_SUCCESS) {
- printf("nfs_gss_clnt_gssd_upcall: can't get gssd port, status %d\n", kr);
- return (EAUTH);
- }
- if (!IPC_PORT_VALID(cp->gss_clnt_mport)) {
- printf("nfs_gss_clnt_gssd_upcall: gssd port not valid\n");
- cp->gss_clnt_mport = NULL;
- return (EAUTH);
- }
+
+ nmp = req->r_nmp;
+ if (nmp == NULL || vfs_isforce(nmp->nm_mountp) || (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)))
+ return (ENXIO);
+
+ if (cp->gss_clnt_principal && cp->gss_clnt_prinlen) {
+ principal = cp->gss_clnt_principal;
+ plen = cp->gss_clnt_prinlen;
+ nt = cp->gss_clnt_prinnt;
+ } else if (nmp->nm_principal && IS_VALID_CRED(nmp->nm_mcred) && req->r_cred == nmp->nm_mcred) {
+ plen = (uint32_t)strlen(nmp->nm_principal);
+ MALLOC(principal, uint8_t *, plen, M_TEMP, M_WAITOK | M_ZERO);
+ if (principal == NULL)
+ return (ENOMEM);
+ bcopy(nmp->nm_principal, principal, plen);
+ cp->gss_clnt_prinnt = nt = GSSD_USER;
+ }
+ else if (nmp->nm_realm) {
+ plen = (uint32_t)strlen(nmp->nm_realm);
+ principal = (uint8_t *)nmp->nm_realm;
+ nt = GSSD_USER;
}
- if (cp->gss_clnt_tokenlen > 0)
+ if (!IPC_PORT_VALID(cp->gss_clnt_mport)) {
+ cp->gss_clnt_mport = nfs_gss_clnt_get_upcall_port(req->r_cred);
+ if (cp->gss_clnt_mport == IPC_PORT_NULL)
+ goto out;
+ }
+
+ if (plen)
+ nfs_gss_mach_alloc_buffer(principal, plen, &pname);
+ if (cp->gss_clnt_svcnamlen)
+ nfs_gss_mach_alloc_buffer(cp->gss_clnt_svcname, cp->gss_clnt_svcnamlen, &svcname);
+ if (cp->gss_clnt_tokenlen)
nfs_gss_mach_alloc_buffer(cp->gss_clnt_token, cp->gss_clnt_tokenlen, &itoken);
retry:
- kr = mach_gss_init_sec_context(
+ kr = mach_gss_init_sec_context_v2(
cp->gss_clnt_mport,
- KRB5_MECH,
- (byte_buffer) itoken, (mach_msg_type_number_t) cp->gss_clnt_tokenlen,
- cp->gss_clnt_uid,
- uprinc,
- cp->gss_clnt_svcname,
- GSSD_MUTUAL_FLAG | GSSD_NO_UI,
- &cp->gss_clnt_gssd_verf,
+ GSSD_KRB5_MECH,
+ (gssd_byte_buffer) itoken, (mach_msg_type_number_t) cp->gss_clnt_tokenlen,
+ kauth_cred_getuid(cp->gss_clnt_cred),
+ nt,
+ (gssd_byte_buffer)pname, (mach_msg_type_number_t) plen,
+ cp->gss_clnt_svcnt,
+ (gssd_byte_buffer)svcname, (mach_msg_type_number_t) cp->gss_clnt_svcnamlen,
+ GSSD_MUTUAL_FLAG,
+ &cp->gss_clnt_gssd_flags,
&cp->gss_clnt_context,
&cp->gss_clnt_cred_handle,
+ &ret_flags,
&okey, (mach_msg_type_number_t *) &skeylen,
- &otoken, (mach_msg_type_number_t *) &cp->gss_clnt_tokenlen,
+ &otoken, &otokenlen,
+ cp->gss_clnt_display ? NULL : display_name,
&cp->gss_clnt_major,
&cp->gss_clnt_minor);
- if (kr != 0) {
- printf("nfs_gss_clnt_gssd_upcall: mach_gss_init_sec_context failed: %x\n", kr);
+ /* Should be cleared and set in gssd ? */
+ cp->gss_clnt_gssd_flags &= ~GSSD_RESTART;
+ cp->gss_clnt_gssd_flags |= nfs_1des;
+
+ if (kr != KERN_SUCCESS) {
+ printf("nfs_gss_clnt_gssd_upcall: mach_gss_init_sec_context failed: %x (%d)\n", kr, kr);
if (kr == MIG_SERVER_DIED && cp->gss_clnt_cred_handle == 0 &&
- retry_cnt++ < NFS_GSS_MACH_MAX_RETRIES)
+ retry_cnt++ < NFS_GSS_MACH_MAX_RETRIES &&
+ !vfs_isforce(nmp->nm_mountp) && (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) == 0) {
+ if (plen)
+ nfs_gss_mach_alloc_buffer(principal, plen, &pname);
+ if (cp->gss_clnt_svcnamlen)
+ nfs_gss_mach_alloc_buffer(cp->gss_clnt_svcname, cp->gss_clnt_svcnamlen, &svcname);
+ if (cp->gss_clnt_tokenlen > 0)
+ nfs_gss_mach_alloc_buffer(cp->gss_clnt_token, cp->gss_clnt_tokenlen, &itoken);
goto retry;
- task_release_special_port(cp->gss_clnt_mport);
- cp->gss_clnt_mport = NULL;
- return (EAUTH);
+ }
+
+ host_release_special_port(cp->gss_clnt_mport);
+ cp->gss_clnt_mport = IPC_PORT_NULL;
+ goto out;
}
+ if (cp->gss_clnt_display == NULL && *display_name != '\0') {
+ int dlen = strnlen(display_name, MAX_DISPLAY_STR) + 1; /* Add extra byte to include '\0' */
+
+ if (dlen < MAX_DISPLAY_STR) {
+ MALLOC(cp->gss_clnt_display, char *, dlen, M_TEMP, M_WAITOK);
+ if (cp->gss_clnt_display == NULL)
+ goto skip;
+ bcopy(display_name, cp->gss_clnt_display, dlen);
+ } else {
+ goto skip;
+ }
+ }
+skip:
/*
* Make sure any unusual errors are expanded and logged by gssd
+ *
+ * XXXX, we need to rethink this and just have gssd return a string for the major and minor codes.
*/
if (cp->gss_clnt_major != GSS_S_COMPLETE &&
cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) {
- char who[] = "client";
-
- (void) mach_gss_log_error(
- cp->gss_clnt_mport,
- vfs_statfs(req->r_nmp->nm_mountp)->f_mntfromname,
- cp->gss_clnt_uid,
- who,
- cp->gss_clnt_major,
- cp->gss_clnt_minor);
+ nfs_gss_clnt_log_error(req, cp, major, minor);
}
if (skeylen > 0) {
- if (skeylen != SKEYLEN) {
+ if (skeylen != SKEYLEN && skeylen != SKEYLEN3) {
printf("nfs_gss_clnt_gssd_upcall: bad key length (%d)\n", skeylen);
- return (EAUTH);
+ vm_map_copy_discard((vm_map_copy_t) okey);
+ vm_map_copy_discard((vm_map_copy_t) otoken);
+ goto out;
}
- error = nfs_gss_mach_vmcopyout((vm_map_copy_t) okey, skeylen, cp->gss_clnt_skey);
+ error = nfs_gss_mach_vmcopyout((vm_map_copy_t) okey, skeylen,
+ cp->gss_clnt_kinfo->skey);
+ if (error) {
+ vm_map_copy_discard((vm_map_copy_t) otoken);
+ goto out;
+ }
+
+ error = gss_key_init(cp->gss_clnt_kinfo, skeylen);
if (error)
- return (EAUTH);
+ goto out;
}
- if (cp->gss_clnt_tokenlen > 0) {
- MALLOC(cp->gss_clnt_token, u_char *, cp->gss_clnt_tokenlen, M_TEMP, M_WAITOK);
- if (cp->gss_clnt_token == NULL)
+ /* Free context token used as input */
+ if (cp->gss_clnt_token)
+ FREE(cp->gss_clnt_token, M_TEMP);
+ cp->gss_clnt_token = NULL;
+ cp->gss_clnt_tokenlen = 0;
+
+ if (otokenlen > 0) {
+ /* Set context token to gss output token */
+ MALLOC(cp->gss_clnt_token, u_char *, otokenlen, M_TEMP, M_WAITOK);
+ if (cp->gss_clnt_token == NULL) {
+ printf("nfs_gss_clnt_gssd_upcall: could not allocate %d bytes\n", otokenlen);
+ vm_map_copy_discard((vm_map_copy_t) otoken);
return (ENOMEM);
- error = nfs_gss_mach_vmcopyout((vm_map_copy_t) otoken, cp->gss_clnt_tokenlen,
- cp->gss_clnt_token);
- if (error)
- return (EAUTH);
+ }
+ error = nfs_gss_mach_vmcopyout((vm_map_copy_t) otoken, otokenlen, cp->gss_clnt_token);
+ if (error) {
+ FREE(cp->gss_clnt_token, M_TEMP);
+ cp->gss_clnt_token = NULL;
+ return (NFSERR_EAUTH);
+ }
+ cp->gss_clnt_tokenlen = otokenlen;
}
return (0);
+
+out:
+ if (cp->gss_clnt_token)
+ FREE(cp->gss_clnt_token, M_TEMP);
+ cp->gss_clnt_token = NULL;
+ cp->gss_clnt_tokenlen = 0;
+
+ return (NFSERR_EAUTH);
}
/*
{
struct nfsmount *nmp = req->r_nmp;
struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx;
+ int on_neg_cache = 0;
+ int neg_cache = 0;
+ int destroy = 0;
+ struct timeval now;
+ char CTXBUF[NFS_CTXBUFSZ];
if (cp == NULL)
return;
req->r_gss_ctx = NULL;
lck_mtx_lock(cp->gss_clnt_mtx);
- if (--cp->gss_clnt_refcnt == 0
- && cp->gss_clnt_flags & GSS_CTX_INVAL) {
- lck_mtx_unlock(cp->gss_clnt_mtx);
-
- if (nmp)
+ if (--cp->gss_clnt_refcnt < 0)
+ panic("Over release of gss context!\n");
+
+ if (cp->gss_clnt_refcnt == 0) {
+ if ((cp->gss_clnt_flags & GSS_CTX_INVAL) &&
+ cp->gss_clnt_kinfo) {
+ FREE(cp->gss_clnt_kinfo, M_TEMP);
+ cp->gss_clnt_kinfo = NULL;
+ }
+ if (cp->gss_clnt_flags & GSS_CTX_DESTROY) {
+ destroy = 1;
+ if (cp->gss_clnt_flags & GSS_CTX_STICKY)
+ nfs_gss_clnt_mnt_rele(nmp);
+ if (cp->gss_clnt_nctime)
+ on_neg_cache = 1;
+ }
+ }
+ if (!destroy && cp->gss_clnt_nctime == 0 &&
+ (cp->gss_clnt_flags & GSS_CTX_INVAL)) {
+ microuptime(&now);
+ cp->gss_clnt_nctime = now.tv_sec;
+ neg_cache = 1;
+ }
+ lck_mtx_unlock(cp->gss_clnt_mtx);
+ if (destroy) {
+ NFS_GSS_DBG("Destroying context %s\n", NFS_GSS_CTX(req, cp));
+ if (nmp) {
lck_mtx_lock(&nmp->nm_lock);
- nfs_gss_clnt_ctx_remove(nmp, cp);
- if (nmp)
+ if (cp->gss_clnt_entries.tqe_next != NFSNOLIST) {
+ TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries);
+ }
+ if (on_neg_cache) {
+ nmp->nm_ncentries--;
+ }
lck_mtx_unlock(&nmp->nm_lock);
+ }
+ nfs_gss_clnt_ctx_destroy(cp);
+ } else if (neg_cache) {
+ NFS_GSS_DBG("Entering context %s into negative cache\n", NFS_GSS_CTX(req, cp));
+ if (nmp) {
+ lck_mtx_lock(&nmp->nm_lock);
+ nmp->nm_ncentries++;
+ nfs_gss_clnt_ctx_neg_cache_reap(nmp);
+ lck_mtx_unlock(&nmp->nm_lock);
+ }
+ }
+ NFS_GSS_CLNT_CTX_DUMP(nmp);
+}
- return;
+/*
+ * Try and reap any old negative cache entries.
+ * cache queue.
+ */
+void
+nfs_gss_clnt_ctx_neg_cache_reap(struct nfsmount *nmp)
+{
+ struct nfs_gss_clnt_ctx *cp, *tcp;
+ struct timeval now;
+ int reaped = 0;
+
+ NFS_GSS_DBG("Reaping contexts ncentries = %d\n", nmp->nm_ncentries);
+ /* Try and reap old, unreferenced, expired contexts */
+
+ TAILQ_FOREACH_SAFE(cp, &nmp->nm_gsscl, gss_clnt_entries, tcp) {
+ int destroy = 0;
+
+ /* Don't reap STICKY contexts */
+ if ((cp->gss_clnt_flags & GSS_CTX_STICKY) ||
+ !(cp->gss_clnt_flags & GSS_CTX_INVAL))
+ continue;
+ /* Keep up to GSS_MAX_NEG_CACHE_ENTRIES */
+ if (nmp->nm_ncentries <= GSS_MAX_NEG_CACHE_ENTRIES)
+ break;
+ /* Contexts too young */
+ if (cp->gss_clnt_nctime + GSS_NEG_CACHE_TO >= now.tv_sec)
+ continue;
+ /* Not referenced, remove it. */
+ lck_mtx_lock(cp->gss_clnt_mtx);
+ if (cp->gss_clnt_refcnt == 0) {
+ cp->gss_clnt_flags |= GSS_CTX_DESTROY;
+ destroy = 1;
+ }
+ lck_mtx_unlock(cp->gss_clnt_mtx);
+ if (destroy) {
+ TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries);
+ nmp->nm_ncentries++;
+ reaped++;
+ nfs_gss_clnt_ctx_destroy(cp);
+ }
}
- lck_mtx_unlock(cp->gss_clnt_mtx);
+ NFS_GSS_DBG("Reaped %d contexts ncentries = %d\n", reaped, nmp->nm_ncentries);
}
/*
- * Remove a context
+ * Clean a context to be cached
*/
static void
-nfs_gss_clnt_ctx_remove(struct nfsmount *nmp, struct nfs_gss_clnt_ctx *cp)
+nfs_gss_clnt_ctx_clean(struct nfs_gss_clnt_ctx *cp)
{
+ /* Preserve gss_clnt_mtx */
+ assert(cp->gss_clnt_thread == NULL); /* Will be set to this thread */
+ /* gss_clnt_entries we should not be on any list at this point */
+ cp->gss_clnt_flags = 0;
+ /* gss_clnt_refcnt should be zero */
+ assert(cp->gss_clnt_refcnt == 0);
/*
- * If dequeueing, assume nmp->nm_lock is held
+ * We are who we are preserve:
+ * gss_clnt_cred
+ * gss_clnt_principal
+ * gss_clnt_prinlen
+ * gss_clnt_prinnt
+ * gss_clnt_desplay
*/
- if (nmp != NULL)
- TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries);
-
- if (cp->gss_clnt_mport)
- task_release_special_port(cp->gss_clnt_mport);
- if (cp->gss_clnt_mtx)
- lck_mtx_destroy(cp->gss_clnt_mtx, nfs_gss_clnt_grp);
- if (cp->gss_clnt_handle)
+ /* gss_clnt_proc will be set in nfs_gss_clnt_ctx_init */
+ cp->gss_clnt_seqnum = 0;
+ /* Preserve gss_clnt_service, we're not changing flavors */
+ if (cp->gss_clnt_handle) {
FREE(cp->gss_clnt_handle, M_TEMP);
- if (cp->gss_clnt_seqbits)
+ cp->gss_clnt_handle = NULL;
+ }
+ cp->gss_clnt_handle_len = 0;
+ cp->gss_clnt_nctime = 0;
+ cp->gss_clnt_seqwin = 0;
+ if (cp->gss_clnt_seqbits) {
FREE(cp->gss_clnt_seqbits, M_TEMP);
- if (cp->gss_clnt_token)
- FREE(cp->gss_clnt_token, M_TEMP);
- if (cp->gss_clnt_svcname)
+ cp->gss_clnt_seqbits = NULL;
+ }
+ /* Preserve gss_clnt_mport. Still talking to the same gssd */
+ if (cp->gss_clnt_verf) {
+ FREE(cp->gss_clnt_verf, M_TEMP);
+ cp->gss_clnt_verf = NULL;
+ }
+ /* Service name might change on failover, so reset it */
+ if (cp->gss_clnt_svcname) {
FREE(cp->gss_clnt_svcname, M_TEMP);
+ cp->gss_clnt_svcname = NULL;
+ cp->gss_clnt_svcnt = 0;
+ }
+ cp->gss_clnt_svcnamlen = 0;
+ cp->gss_clnt_cred_handle = 0;
+ cp->gss_clnt_context = 0;
+ if (cp->gss_clnt_token) {
+ FREE(cp->gss_clnt_token, M_TEMP);
+ cp->gss_clnt_token = NULL;
+ }
+ cp->gss_clnt_tokenlen = 0;
+ if (cp->gss_clnt_kinfo)
+ bzero(cp->gss_clnt_kinfo, sizeof(gss_key_info));
+ /*
+ * Preserve:
+ * gss_clnt_gssd_flags
+ * gss_clnt_major
+ * gss_clnt_minor
+ * gss_clnt_ptime
+ */
+}
+
+/*
+ * Copy a source context to a new context. This is used to create a new context
+ * with the identity of the old context for renewal. The old context is invalid
+ * at this point but may have reference still to it, so it is not safe to use that
+ * context.
+ */
+static int
+nfs_gss_clnt_ctx_copy(struct nfs_gss_clnt_ctx *scp, struct nfs_gss_clnt_ctx **dcpp, gss_key_info *ki)
+{
+ struct nfs_gss_clnt_ctx *dcp;
+
+ *dcpp = (struct nfs_gss_clnt_ctx *)NULL;
+ MALLOC(dcp, struct nfs_gss_clnt_ctx *, sizeof (struct nfs_gss_clnt_ctx), M_TEMP, M_WAITOK);
+ if (dcp == NULL)
+ return (ENOMEM);
+ bzero(dcp, sizeof (struct nfs_gss_clnt_ctx));
+ if (ki == NULL) {
+ MALLOC(dcp->gss_clnt_kinfo, gss_key_info *, sizeof (gss_key_info), M_TEMP, M_WAITOK);
+ if (dcp->gss_clnt_kinfo == NULL) {
+ FREE(dcp, M_TEMP);
+ return (ENOMEM);
+ }
+ } else {
+ dcp->gss_clnt_kinfo = ki;
+ }
+ bzero(dcp->gss_clnt_kinfo, sizeof (gss_key_info));
+ dcp->gss_clnt_mtx = lck_mtx_alloc_init(nfs_gss_clnt_grp, LCK_ATTR_NULL);
+ dcp->gss_clnt_cred = scp->gss_clnt_cred;
+ kauth_cred_ref(dcp->gss_clnt_cred);
+ dcp->gss_clnt_prinlen = scp->gss_clnt_prinlen;
+ dcp->gss_clnt_prinnt = scp->gss_clnt_prinnt;
+ if (scp->gss_clnt_principal) {
+ MALLOC(dcp->gss_clnt_principal, uint8_t *, dcp->gss_clnt_prinlen, M_TEMP, M_WAITOK | M_ZERO);
+ if (dcp->gss_clnt_principal == NULL) {
+ FREE(dcp->gss_clnt_kinfo, M_TEMP);
+ FREE(dcp, M_TEMP);
+ return (ENOMEM);
+ }
+ bcopy(scp->gss_clnt_principal, dcp->gss_clnt_principal, dcp->gss_clnt_prinlen);
+ }
+ /* Note we don't preserve the display name, that will be set by a successful up call */
+ dcp->gss_clnt_service = scp->gss_clnt_service;
+ dcp->gss_clnt_mport = host_copy_special_port(scp->gss_clnt_mport);
+ /* gss_clnt_kinfo allocated above */
+ dcp->gss_clnt_gssd_flags = scp->gss_clnt_gssd_flags;
+ dcp->gss_clnt_major = scp->gss_clnt_major;
+ dcp->gss_clnt_minor = scp->gss_clnt_minor;
+ dcp->gss_clnt_ptime = scp->gss_clnt_ptime;
+
+ *dcpp = dcp;
+
+ return (0);
+}
+
+/*
+ * Remove a context
+ */
+static void
+nfs_gss_clnt_ctx_destroy(struct nfs_gss_clnt_ctx *cp)
+{
+ NFS_GSS_DBG("Destroying context %d/%d\n",
+ kauth_cred_getasid(cp->gss_clnt_cred),
+ kauth_cred_getauid(cp->gss_clnt_cred));
+
+ host_release_special_port(cp->gss_clnt_mport);
+ cp->gss_clnt_mport = IPC_PORT_NULL;
+
+ if (cp->gss_clnt_mtx) {
+ lck_mtx_destroy(cp->gss_clnt_mtx, nfs_gss_clnt_grp);
+ cp->gss_clnt_mtx = (lck_mtx_t *)NULL;
+ }
+ if (IS_VALID_CRED(cp->gss_clnt_cred))
+ kauth_cred_unref(&cp->gss_clnt_cred);
+ cp->gss_clnt_entries.tqe_next = NFSNOLIST;
+ cp->gss_clnt_entries.tqe_prev = NFSNOLIST;
+ if (cp->gss_clnt_principal) {
+ FREE(cp->gss_clnt_principal, M_TEMP);
+ cp->gss_clnt_principal = NULL;
+ }
+ if (cp->gss_clnt_display) {
+ FREE(cp->gss_clnt_display, M_TEMP);
+ cp->gss_clnt_display = NULL;
+ }
+ if (cp->gss_clnt_kinfo) {
+ FREE(cp->gss_clnt_kinfo, M_TEMP);
+ cp->gss_clnt_kinfo = NULL;
+ }
+
+ nfs_gss_clnt_ctx_clean(cp);
+
FREE(cp, M_TEMP);
}
nfs_gss_clnt_ctx_renew(struct nfsreq *req)
{
struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx;
- struct nfsmount *nmp = req->r_nmp;
struct nfs_gss_clnt_ctx *ncp;
+ struct nfsmount *nmp;
int error = 0;
- uid_t saved_uid;
- mach_port_t saved_mport;
- int retrycnt = 0;
+ char CTXBUF[NFS_CTXBUFSZ];
- if (cp == NULL || !(cp->gss_clnt_flags & GSS_CTX_COMPLETE))
+ if (cp == NULL)
return (0);
+ if (req->r_nmp == NULL)
+ return (ENXIO);
+ nmp = req->r_nmp;
+
lck_mtx_lock(cp->gss_clnt_mtx);
if (cp->gss_clnt_flags & GSS_CTX_INVAL) {
lck_mtx_unlock(cp->gss_clnt_mtx);
nfs_gss_clnt_ctx_unref(req);
return (0); // already being renewed
}
- saved_uid = cp->gss_clnt_uid;
- saved_mport = task_copy_special_port(cp->gss_clnt_mport);
- /* Remove the old context */
- lck_mtx_lock(&nmp->nm_lock);
- cp->gss_clnt_flags |= GSS_CTX_INVAL;
- lck_mtx_unlock(&nmp->nm_lock);
+ cp->gss_clnt_flags |= (GSS_CTX_INVAL | GSS_CTX_DESTROY);
- /*
- * If there's a thread waiting
- * in the old context, wake it up.
- */
if (cp->gss_clnt_flags & (GSS_NEEDCTX | GSS_NEEDSEQ)) {
cp->gss_clnt_flags &= ~GSS_NEEDSEQ;
wakeup(cp);
}
lck_mtx_unlock(cp->gss_clnt_mtx);
-retry:
- /*
- * Create a new context
- */
- MALLOC(ncp, struct nfs_gss_clnt_ctx *, sizeof(*ncp),
- M_TEMP, M_WAITOK|M_ZERO);
- if (ncp == NULL) {
- return (ENOMEM);
- }
+ error = nfs_gss_clnt_ctx_copy(cp, &ncp, NULL);
+ NFS_GSS_DBG("Renewing context %s\n", NFS_GSS_CTX(req, ncp));
+ nfs_gss_clnt_ctx_unref(req);
+ if (error)
+ return (error);
- ncp->gss_clnt_uid = saved_uid;
- ncp->gss_clnt_mport = task_copy_special_port(saved_mport); // re-use the gssd port
- ncp->gss_clnt_mtx = lck_mtx_alloc_init(nfs_gss_clnt_grp, LCK_ATTR_NULL);
- ncp->gss_clnt_thread = current_thread();
lck_mtx_lock(&nmp->nm_lock);
- TAILQ_INSERT_TAIL(&nmp->nm_gsscl, ncp, gss_clnt_entries);
- lck_mtx_unlock(&nmp->nm_lock);
-
- /* Adjust reference counts to new and old context */
- nfs_gss_clnt_ctx_unref(req);
+ /*
+ * Note we don't bother taking the new context mutex as we're
+ * not findable at the moment.
+ */
+ ncp->gss_clnt_thread = current_thread();
nfs_gss_clnt_ctx_ref(req, ncp);
+ TAILQ_INSERT_HEAD(&nmp->nm_gsscl, ncp, gss_clnt_entries);
+ lck_mtx_unlock(&nmp->nm_lock);
- error = nfs_gss_clnt_ctx_init(req, ncp); // Initialize new context
- if (error == ENEEDAUTH) {
- error = nfs_gss_clnt_ctx_delay(req, &retrycnt);
- if (!error)
- goto retry;
- }
-
- task_release_special_port(saved_mport);
+ error = nfs_gss_clnt_ctx_init_retry(req, ncp); // Initialize new context
if (error)
nfs_gss_clnt_ctx_unref(req);
return (error);
}
+
/*
* Destroy all the contexts associated with a mount.
* The contexts are also destroyed by the server.
*/
void
-nfs_gss_clnt_ctx_unmount(struct nfsmount *nmp, int mntflags)
+nfs_gss_clnt_ctx_unmount(struct nfsmount *nmp)
{
struct nfs_gss_clnt_ctx *cp;
- struct ucred temp_cred;
- kauth_cred_t cred;
struct nfsm_chain nmreq, nmrep;
- u_int64_t xid;
int error, status;
struct nfsreq req;
-
- bzero((caddr_t) &temp_cred, sizeof(temp_cred));
- temp_cred.cr_ngroups = 1;
req.r_nmp = nmp;
- for (;;) {
- lck_mtx_lock(&nmp->nm_lock);
- cp = TAILQ_FIRST(&nmp->nm_gsscl);
- lck_mtx_unlock(&nmp->nm_lock);
- if (cp == NULL)
- break;
+ if (!nmp)
+ return;
- nfs_gss_clnt_ctx_ref(&req, cp);
+ lck_mtx_lock(&nmp->nm_lock);
+ while((cp = TAILQ_FIRST(&nmp->nm_gsscl))) {
+ TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries);
+ cp->gss_clnt_entries.tqe_next = NFSNOLIST;
+ lck_mtx_lock(cp->gss_clnt_mtx);
+ if (cp->gss_clnt_flags & GSS_CTX_DESTROY) {
+ lck_mtx_unlock(cp->gss_clnt_mtx);
+ continue;
+ }
+ cp->gss_clnt_refcnt++;
+ lck_mtx_unlock(cp->gss_clnt_mtx);
+ req.r_gss_ctx = cp;
+
+ lck_mtx_unlock(&nmp->nm_lock);
/*
* Tell the server to destroy its context.
- * But don't bother if it's a forced unmount
- * or if it's a dummy sec=sys context.
+ * But don't bother if it's a forced unmount.
*/
- if (!(mntflags & MNT_FORCE) && cp->gss_clnt_service != RPCSEC_GSS_SVC_SYS) {
- temp_cred.cr_uid = cp->gss_clnt_uid;
- cred = kauth_cred_create(&temp_cred);
+ if (!nfs_mount_gone(nmp) &&
+ (cp->gss_clnt_flags & (GSS_CTX_INVAL | GSS_CTX_DESTROY | GSS_CTX_COMPLETE)) == GSS_CTX_COMPLETE) {
cp->gss_clnt_proc = RPCSEC_GSS_DESTROY;
error = 0;
nfsm_chain_build_alloc_init(error, &nmreq, 0);
nfsm_chain_build_done(error, &nmreq);
if (!error)
- nfs_request2(NULL, nmp->nm_mountp, &nmreq, NFSPROC_NULL,
- current_thread(), cred, 0, &nmrep, &xid, &status);
+ nfs_request_gss(nmp->nm_mountp, &nmreq,
+ current_thread(), cp->gss_clnt_cred, 0, cp, &nmrep, &status);
nfsm_chain_cleanup(&nmreq);
nfsm_chain_cleanup(&nmrep);
- kauth_cred_unref(&cred);
}
/*
* the reference to remove it if its
* refcount is zero.
*/
- cp->gss_clnt_flags |= GSS_CTX_INVAL;
+ lck_mtx_lock(cp->gss_clnt_mtx);
+ cp->gss_clnt_flags |= (GSS_CTX_INVAL | GSS_CTX_DESTROY);
+ lck_mtx_unlock(cp->gss_clnt_mtx);
nfs_gss_clnt_ctx_unref(&req);
+ lck_mtx_lock(&nmp->nm_lock);
}
+ lck_mtx_unlock(&nmp->nm_lock);
+ assert(TAILQ_EMPTY(&nmp->nm_gsscl));
}
+
/*
- * If we get a failure in trying to establish a context we need to wait a
- * little while to see if the server is feeling better. In our case this is
- * probably a failure in directory services not coming up in a timely fashion.
- * This routine sort of mimics receiving a jukebox error.
+ * Removes a mounts context for a credential
*/
-static int
-nfs_gss_clnt_ctx_delay(struct nfsreq *req, int *retry)
+int
+nfs_gss_clnt_ctx_remove(struct nfsmount *nmp, kauth_cred_t cred)
{
- int timeo = (1 << *retry) * NFS_TRYLATERDEL;
- int error = 0;
- struct nfsmount *nmp = req->r_nmp;
- struct timeval now;
- time_t waituntil;
+ struct nfs_gss_clnt_ctx *cp;
+ struct nfsreq req;
- if ((nmp->nm_flag & NFSMNT_SOFT) && *retry > nmp->nm_retry)
- return (ETIMEDOUT);
- if (timeo > 60)
- timeo = 60;
+ req.r_nmp = nmp;
- microuptime(&now);
- waituntil = now.tv_sec + timeo;
- while (now.tv_sec < waituntil) {
- tsleep(&lbolt, PSOCK, "nfs_gss_clnt_ctx_delay", 0);
- error = nfs_sigintr(nmp, req, current_thread(), 0);
- if (error)
- break;
- microuptime(&now);
+ NFS_GSS_DBG("Enter\n");
+ NFS_GSS_CLNT_CTX_DUMP(nmp);
+ lck_mtx_lock(&nmp->nm_lock);
+ TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) {
+ lck_mtx_lock(cp->gss_clnt_mtx);
+ if (nfs_gss_clnt_ctx_cred_match(cp->gss_clnt_cred, cred)) {
+ if (cp->gss_clnt_flags & GSS_CTX_DESTROY) {
+ NFS_GSS_DBG("Found destroyed context %d/%d. refcnt = %d continuing\n",
+ kauth_cred_getasid(cp->gss_clnt_cred),
+ kauth_cred_getauid(cp->gss_clnt_cred),
+ cp->gss_clnt_refcnt);
+ lck_mtx_unlock(cp->gss_clnt_mtx);
+ continue;
+ }
+ cp->gss_clnt_refcnt++;
+ cp->gss_clnt_flags |= (GSS_CTX_INVAL | GSS_CTX_DESTROY);
+ lck_mtx_unlock(cp->gss_clnt_mtx);
+ req.r_gss_ctx = cp;
+ lck_mtx_unlock(&nmp->nm_lock);
+ /*
+ * Drop the reference to remove it if its
+ * refcount is zero.
+ */
+ NFS_GSS_DBG("Removed context %d/%d refcnt = %d\n",
+ kauth_cred_getasid(cp->gss_clnt_cred),
+ kauth_cred_getuid(cp->gss_clnt_cred),
+ cp->gss_clnt_refcnt);
+ nfs_gss_clnt_ctx_unref(&req);
+ return (0);
+ }
+ lck_mtx_unlock(cp->gss_clnt_mtx);
}
- *retry += 1;
+
+ lck_mtx_unlock(&nmp->nm_lock);
+
+ NFS_GSS_DBG("Returning ENOENT\n");
+ return (ENOENT);
+}
+
+/*
+ * Sets a mounts principal for a session associated with cred.
+ */
+int
+nfs_gss_clnt_ctx_set_principal(struct nfsmount *nmp, vfs_context_t ctx,
+ uint8_t *principal, uint32_t princlen, uint32_t nametype)
+
+{
+ struct nfsreq req;
+ int error;
+
+ NFS_GSS_DBG("Enter:\n");
+
+ bzero(&req, sizeof(struct nfsreq));
+ req.r_nmp = nmp;
+ req.r_gss_ctx = NULL;
+ req.r_auth = nmp->nm_auth;
+ req.r_thread = vfs_context_thread(ctx);
+ req.r_cred = vfs_context_ucred(ctx);
+
+ error = nfs_gss_clnt_ctx_find_principal(&req, principal, princlen, nametype);
+ NFS_GSS_DBG("nfs_gss_clnt_ctx_find_principal returned %d\n", error);
+ /*
+ * We don't care about auth errors. Those would indicate that the context is in the
+ * neagative cache and if and when the user has credentials for the principal
+ * we should be good to go in that we will select those credentials for this principal.
+ */
+ if (error == EACCES || error == EAUTH || error == ENEEDAUTH)
+ error = 0;
+
+ /* We're done with this request */
+ nfs_gss_clnt_ctx_unref(&req);
return (error);
}
+/*
+ * Gets a mounts principal from a session associated with cred
+ */
+int
+nfs_gss_clnt_ctx_get_principal(struct nfsmount *nmp, vfs_context_t ctx,
+ struct user_nfs_gss_principal *p)
+{
+ struct nfsreq req;
+ int error = 0;
+ struct nfs_gss_clnt_ctx *cp;
+ kauth_cred_t cred = vfs_context_ucred(ctx);
+ const char *princ;
+ char CTXBUF[NFS_CTXBUFSZ];
+
+ req.r_nmp = nmp;
+ lck_mtx_lock(&nmp->nm_lock);
+ TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) {
+ lck_mtx_lock(cp->gss_clnt_mtx);
+ if (cp->gss_clnt_flags & GSS_CTX_DESTROY) {
+ NFS_GSS_DBG("Found destroyed context %s refcnt = %d continuing\n",
+ NFS_GSS_CTX(&req, cp),
+ cp->gss_clnt_refcnt);
+ lck_mtx_unlock(cp->gss_clnt_mtx);
+ continue;
+ }
+ if (nfs_gss_clnt_ctx_cred_match(cp->gss_clnt_cred, cred)) {
+ cp->gss_clnt_refcnt++;
+ lck_mtx_unlock(cp->gss_clnt_mtx);
+ goto out;
+ }
+ lck_mtx_unlock(cp->gss_clnt_mtx);
+ }
+
+out:
+ if (cp == NULL) {
+ lck_mtx_unlock(&nmp->nm_lock);
+ p->princlen = 0;
+ p->principal = USER_ADDR_NULL;
+ p->nametype = GSSD_STRING_NAME;
+ p->flags |= NFS_IOC_NO_CRED_FLAG;
+ NFS_GSS_DBG("No context found for session %d by uid %d\n",
+ kauth_cred_getasid(cred), kauth_cred_getuid(cred));
+ return (0);
+ }
+
+ princ = cp->gss_clnt_principal ? (char *)cp->gss_clnt_principal : cp->gss_clnt_display;
+ p->princlen = cp->gss_clnt_principal ? cp->gss_clnt_prinlen :
+ (cp->gss_clnt_display ? strlen(cp->gss_clnt_display) : 0);
+ p->nametype = cp->gss_clnt_prinnt;
+ if (princ) {
+ char *pp;
+
+ MALLOC(pp, char *, p->princlen, M_TEMP, M_WAITOK);
+ if (pp) {
+ bcopy(princ, pp, p->princlen);
+ p->principal = CAST_USER_ADDR_T(pp);
+ }
+ else
+ error = ENOMEM;
+ }
+ lck_mtx_unlock(&nmp->nm_lock);
+ req.r_gss_ctx = cp;
+ NFS_GSS_DBG("Found context %s\n", NFS_GSS_CTX(&req, NULL));
+ nfs_gss_clnt_ctx_unref(&req);
+ return (error);
+}
#endif /* NFSCLIENT */
/*************
{
struct nfs_gss_svc_ctx_hashhead *head;
struct nfs_gss_svc_ctx *cp;
-
+ uint64_t timenow;
+
+ if (handle == 0)
+ return (NULL);
+
head = &nfs_gss_svc_ctx_hashtbl[SVC_CTX_HASH(handle)];
+ /*
+ * Don't return a context that is going to expire in GSS_CTX_PEND seconds
+ */
+ clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC, &timenow);
lck_mtx_lock(nfs_gss_svc_ctx_mutex);
- LIST_FOREACH(cp, head, gss_svc_entries)
- if (cp->gss_svc_handle == handle)
+
+ LIST_FOREACH(cp, head, gss_svc_entries) {
+ if (cp->gss_svc_handle == handle) {
+ if (timenow > cp->gss_svc_incarnation + GSS_SVC_CTX_TTL) {
+ /*
+ * Context has or is about to expire. Don't use.
+ * We'll return null and the client will have to create
+ * a new context.
+ */
+ cp->gss_svc_handle = 0;
+ /*
+ * Make sure though that we stay around for GSS_CTX_PEND seconds
+ * for other threads that might be using the context.
+ */
+ cp->gss_svc_incarnation = timenow;
+
+ cp = NULL;
+ break;
+ }
+ lck_mtx_lock(cp->gss_svc_mtx);
+ cp->gss_svc_refcnt++;
+ lck_mtx_unlock(cp->gss_svc_mtx);
break;
+ }
+ }
+
lck_mtx_unlock(nfs_gss_svc_ctx_mutex);
return (cp);
nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx *cp)
{
struct nfs_gss_svc_ctx_hashhead *head;
+ struct nfs_gss_svc_ctx *p;
+ lck_mtx_lock(nfs_gss_svc_ctx_mutex);
+
+ /*
+ * Give the client a random handle so that if we reboot
+ * it's unlikely the client will get a bad context match.
+ * Make sure it's not zero or already assigned.
+ */
+retry:
+ cp->gss_svc_handle = random();
+ if (cp->gss_svc_handle == 0)
+ goto retry;
head = &nfs_gss_svc_ctx_hashtbl[SVC_CTX_HASH(cp->gss_svc_handle)];
+ LIST_FOREACH(p, head, gss_svc_entries)
+ if (p->gss_svc_handle == cp->gss_svc_handle)
+ goto retry;
- lck_mtx_lock(nfs_gss_svc_ctx_mutex);
+ clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC,
+ &cp->gss_svc_incarnation);
LIST_INSERT_HEAD(head, cp, gss_svc_entries);
nfs_gss_ctx_count++;
if (!nfs_gss_timer_on) {
nfs_gss_timer_on = 1;
+
nfs_interval_timer_start(nfs_gss_svc_ctx_timer_call,
- GSS_TIMER_PERIOD * MSECS_PER_SEC);
+ min(GSS_TIMER_PERIOD, max(GSS_CTX_TTL_MIN, nfsrv_gss_context_ttl)) * MSECS_PER_SEC);
}
+
lck_mtx_unlock(nfs_gss_svc_ctx_mutex);
}
void
nfs_gss_svc_ctx_timer(__unused void *param1, __unused void *param2)
{
- struct nfs_gss_svc_ctx_hashhead *head;
struct nfs_gss_svc_ctx *cp, *next;
uint64_t timenow;
int contexts = 0;
lck_mtx_lock(nfs_gss_svc_ctx_mutex);
clock_get_uptime(&timenow);
+ NFS_GSS_DBG("is running\n");
+
/*
* Scan all the hash chains
- * Assume nfs_gss_svc_ctx_mutex is held
*/
for (i = 0; i < SVC_CTX_HASHSZ; i++) {
/*
* For each hash chain, look for entries
* that haven't been used in a while.
*/
- head = &nfs_gss_svc_ctx_hashtbl[i];
- for (cp = LIST_FIRST(head); cp; cp = next) {
+ LIST_FOREACH_SAFE(cp, &nfs_gss_svc_ctx_hashtbl[i], gss_svc_entries, next) {
contexts++;
- next = LIST_NEXT(cp, gss_svc_entries);
- if (timenow > cp->gss_svc_expiretime) {
+ if (timenow > cp->gss_svc_incarnation +
+ (cp->gss_svc_handle ? GSS_SVC_CTX_TTL : 0)
+ && cp->gss_svc_refcnt == 0) {
/*
* A stale context - remove it
*/
LIST_REMOVE(cp, gss_svc_entries);
+ NFS_GSS_DBG("Removing contex for %d\n", cp->gss_svc_uid);
if (cp->gss_svc_seqbits)
FREE(cp->gss_svc_seqbits, M_TEMP);
lck_mtx_destroy(cp->gss_svc_mtx, nfs_gss_svc_grp);
nfs_gss_timer_on = nfs_gss_ctx_count > 0;
if (nfs_gss_timer_on)
nfs_interval_timer_start(nfs_gss_svc_ctx_timer_call,
- GSS_TIMER_PERIOD * MSECS_PER_SEC);
+ min(GSS_TIMER_PERIOD, max(GSS_CTX_TTL_MIN, nfsrv_gss_context_ttl)) * MSECS_PER_SEC);
lck_mtx_unlock(nfs_gss_svc_ctx_mutex);
}
uint32_t flavor = 0, verflen = 0;
int error = 0;
uint32_t arglen, start, toklen, cksumlen;
- u_char tokbuf[KRB5_SZ_TOKMAX];
- u_char cksum1[8], cksum2[8];
+ u_char tokbuf[KRB5_SZ_TOKMAX(MAX_DIGEST)];
+ u_char cksum1[MAX_DIGEST], cksum2[MAX_DIGEST];
struct nfsm_chain nmc_tmp;
-
+ gss_key_info *ki;
+
vers = proc = seqnum = service = handle_len = 0;
arglen = cksumlen = 0;
error = ENOMEM;
goto nfsmout;
}
+ cp->gss_svc_mtx = lck_mtx_alloc_init(nfs_gss_svc_grp, LCK_ATTR_NULL);
+ cp->gss_svc_refcnt = 1;
} else {
/*
}
cp->gss_svc_proc = proc;
+ ki = &cp->gss_svc_kinfo;
if (proc == RPCSEC_GSS_DATA || proc == RPCSEC_GSS_DESTROY) {
- struct ucred temp_cred;
+ struct posix_cred temp_pcred;
if (cp->gss_svc_seqwin == 0) {
/*
}
/* Now compute the client's call header checksum */
- nfs_gss_cksum_chain(cp->gss_svc_sched, nmc, krb5_mic, 0, 0, cksum1);
+ nfs_gss_cksum_chain(ki, nmc, ALG_MIC(ki), 0, 0, cksum1);
/*
* Validate the verifier.
*/
nfsm_chain_get_32(error, nmc, flavor);
nfsm_chain_get_32(error, nmc, verflen);
- if (flavor != RPCSEC_GSS || verflen != KRB5_SZ_TOKEN)
+ if (error)
+ goto nfsmout;
+ if (flavor != RPCSEC_GSS || verflen != KRB5_SZ_TOKEN(ki->hash_len))
error = NFSERR_AUTHERR | AUTH_BADVERF;
nfsm_chain_get_opaque(error, nmc, verflen, tokbuf);
if (error)
goto nfsmout;
/* Get the checksum from the token inside the verifier */
- error = nfs_gss_token_get(cp->gss_svc_sched, krb5_mic, tokbuf, 1,
+ error = nfs_gss_token_get(ki, ALG_MIC(ki), tokbuf, 1,
NULL, cksum2);
if (error)
goto nfsmout;
- if (bcmp(cksum1, cksum2, 8) != 0) {
+ if (bcmp(cksum1, cksum2, HASHLEN(ki)) != 0) {
error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
goto nfsmout;
}
/*
* Set up the user's cred
*/
- bzero(&temp_cred, sizeof(temp_cred));
- temp_cred.cr_uid = cp->gss_svc_uid;
- bcopy(cp->gss_svc_gids, temp_cred.cr_groups,
+ bzero(&temp_pcred, sizeof(temp_pcred));
+ temp_pcred.cr_uid = cp->gss_svc_uid;
+ bcopy(cp->gss_svc_gids, temp_pcred.cr_groups,
sizeof(gid_t) * cp->gss_svc_ngroups);
- temp_cred.cr_ngroups = cp->gss_svc_ngroups;
+ temp_pcred.cr_ngroups = cp->gss_svc_ngroups;
- nd->nd_cr = kauth_cred_create(&temp_cred);
+ nd->nd_cr = posix_cred_create(&temp_pcred);
if (nd->nd_cr == NULL) {
error = ENOMEM;
goto nfsmout;
}
- clock_interval_to_deadline(GSS_CTX_EXPIRE, NSEC_PER_SEC,
- &cp->gss_svc_expiretime);
+ clock_get_uptime(&cp->gss_svc_incarnation);
/*
* If the call arguments are integrity or privacy protected
/* Compute the checksum over the call args */
start = nfsm_chain_offset(nmc);
- nfs_gss_cksum_chain(cp->gss_svc_sched, nmc, krb5_mic, start, arglen, cksum1);
+ nfs_gss_cksum_chain(ki, nmc, ALG_MIC(ki), start, arglen, cksum1);
/*
* Get the sequence number prepended to the args
arglen -= NFSX_UNSIGNED; // skipped seqnum
nfsm_chain_adv(error, &nmc_tmp, arglen); // skip args
nfsm_chain_get_32(error, &nmc_tmp, cksumlen); // length of checksum
- if (cksumlen != KRB5_SZ_TOKEN) {
+ if (cksumlen != KRB5_SZ_TOKEN(ki->hash_len)) {
error = EBADRPC;
goto nfsmout;
}
nfsm_chain_get_opaque(error, &nmc_tmp, cksumlen, tokbuf);
if (error)
goto nfsmout;
- error = nfs_gss_token_get(cp->gss_svc_sched, krb5_mic, tokbuf, 1,
+ error = nfs_gss_token_get(ki, ALG_MIC(ki), tokbuf, 1,
NULL, cksum2);
/* Verify that the checksums are the same */
- if (error || bcmp(cksum1, cksum2, 8) != 0) {
+ if (error || bcmp(cksum1, cksum2, HASHLEN(ki)) != 0) {
error = EBADRPC;
goto nfsmout;
}
}
/* Get the token that prepends the encrypted args */
- nfsm_chain_get_opaque(error, nmc, KRB5_SZ_TOKMAX, tokbuf);
+ nfsm_chain_get_opaque(error, nmc, KRB5_SZ_TOKMAX(ki->hash_len), tokbuf);
if (error)
goto nfsmout;
- error = nfs_gss_token_get(cp->gss_svc_sched, krb5_wrap, tokbuf, 1,
- &toklen, cksum1);
+ error = nfs_gss_token_get(ki, ALG_WRAP(ki), tokbuf, 1,
+ &toklen, cksum1);
if (error)
goto nfsmout;
nfsm_chain_reverse(nmc, nfsm_pad(toklen));
/* decrypt the 8 byte confounder + seqnum + args */
start = nfsm_chain_offset(nmc);
arglen -= toklen;
- nfs_gss_encrypt_chain(cp->gss_svc_skey, nmc, start, arglen, DES_DECRYPT);
+ nfs_gss_encrypt_chain(ki, nmc, start, arglen, DES_DECRYPT);
/* Compute a checksum over the sequence number + results */
- nfs_gss_cksum_chain(cp->gss_svc_sched, nmc, krb5_wrap, start, arglen, cksum2);
+ nfs_gss_cksum_chain(ki, nmc, ALG_WRAP(ki), start, arglen, cksum2);
/* Verify that the checksums are the same */
- if (bcmp(cksum1, cksum2, 8) != 0) {
+ if (bcmp(cksum1, cksum2, HASHLEN(ki)) != 0) {
error = EBADRPC;
goto nfsmout;
}
nfsm_chain_get_32(error, nmc, verflen);
if (error || flavor != RPCAUTH_NULL || verflen > 0)
error = NFSERR_AUTHERR | RPCSEC_GSS_CREDPROBLEM;
- if (error)
+ if (error) {
+ if (proc == RPCSEC_GSS_INIT) {
+ lck_mtx_destroy(cp->gss_svc_mtx, nfs_gss_svc_grp);
+ FREE(cp, M_TEMP);
+ cp = NULL;
+ }
goto nfsmout;
+ }
}
nd->nd_gss_context = cp;
+ return 0;
nfsmout:
+ if (cp)
+ nfs_gss_svc_ctx_deref(cp);
return (error);
}
{
struct nfs_gss_svc_ctx *cp;
int error = 0;
- u_char tokbuf[KRB5_SZ_TOKEN];
+ u_char tokbuf[KRB5_SZ_TOKEN(MAX_DIGEST)];
int toklen;
- u_char cksum[8];
+ u_char cksum[MAX_DIGEST];
+ gss_key_info *ki;
cp = nd->nd_gss_context;
-
+ ki = &cp->gss_svc_kinfo;
+
if (cp->gss_svc_major != GSS_S_COMPLETE) {
/*
* If the context isn't yet complete
*/
if (cp->gss_svc_proc == RPCSEC_GSS_INIT ||
cp->gss_svc_proc == RPCSEC_GSS_CONTINUE_INIT)
- nfs_gss_cksum_rep(cp->gss_svc_sched, cp->gss_svc_seqwin, cksum);
+ nfs_gss_cksum_rep(ki, cp->gss_svc_seqwin, cksum);
else
- nfs_gss_cksum_rep(cp->gss_svc_sched, nd->nd_gss_seqnum, cksum);
+ nfs_gss_cksum_rep(ki, nd->nd_gss_seqnum, cksum);
/*
* Now wrap it in a token and add
* the verifier to the reply.
*/
- toklen = nfs_gss_token_put(cp->gss_svc_sched, krb5_mic, tokbuf, 0, 0, cksum);
+ toklen = nfs_gss_token_put(ki, ALG_MIC(ki), tokbuf, 0, 0, cksum);
nfsm_chain_add_32(error, nmc, RPCSEC_GSS);
nfsm_chain_add_32(error, nmc, toklen);
nfsm_chain_add_opaque(error, nmc, tokbuf, toklen);
struct nfsm_chain nmrep_pre, *nmc_pre = &nmrep_pre;
mbuf_t mb, results;
uint32_t reslen;
- u_char tokbuf[KRB5_SZ_TOKMAX];
+ u_char tokbuf[KRB5_SZ_TOKMAX(MAX_DIGEST)];
int pad, toklen;
- u_char cksum[8];
+ u_char cksum[MAX_DIGEST];
int error = 0;
+ gss_key_info *ki = &cp->gss_svc_kinfo;
/*
* Using a reference to the mbuf where we previously split the reply
nfs_gss_append_chain(nmc_pre, results); // Append the results mbufs
/* Now compute the checksum over the results data */
- nfs_gss_cksum_mchain(cp->gss_svc_sched, results, krb5_mic, 0, reslen, cksum);
+ nfs_gss_cksum_mchain(ki, results, ALG_MIC(ki), 0, reslen, cksum);
/* Put it into a token and append to the request */
- toklen = nfs_gss_token_put(cp->gss_svc_sched, krb5_mic, tokbuf, 0, 0, cksum);
+ toklen = nfs_gss_token_put(ki, ALG_MIC(ki), tokbuf, 0, 0, cksum);
nfsm_chain_add_32(error, nmc_res, toklen);
nfsm_chain_add_opaque(error, nmc_res, tokbuf, toklen);
nfsm_chain_build_done(error, nmc_res);
nfsm_chain_build_done(error, nmc_res);
/* Now compute the checksum over the results data */
- nfs_gss_cksum_mchain(cp->gss_svc_sched, results, krb5_wrap, 0, reslen, cksum);
+ nfs_gss_cksum_mchain(ki, results, ALG_WRAP(ki), 0, reslen, cksum);
/* Put it into a token and insert in the reply */
- toklen = nfs_gss_token_put(cp->gss_svc_sched, krb5_wrap, tokbuf, 0, reslen, cksum);
+ toklen = nfs_gss_token_put(ki, ALG_WRAP(ki), tokbuf, 0, reslen, cksum);
nfsm_chain_add_32(error, nmc_pre, toklen + reslen);
nfsm_chain_add_opaque_nopad(error, nmc_pre, tokbuf, toklen);
nfsm_chain_build_done(error, nmc_pre);
nfs_gss_append_chain(nmc_pre, results); // Append the results mbufs
/* Encrypt the confounder + seqnum + results */
- nfs_gss_encrypt_mchain(cp->gss_svc_skey, results, 0, reslen, DES_ENCRYPT);
+ nfs_gss_encrypt_mchain(ki, results, 0, reslen, DES_ENCRYPT);
/* Add null XDR pad if the ASN.1 token misaligned the data */
pad = nfsm_pad(toklen + reslen);
nfs_gss_svc_ctx_init(struct nfsrv_descript *nd, struct nfsrv_sock *slp, mbuf_t *mrepp)
{
struct nfs_gss_svc_ctx *cp = NULL;
- uint32_t handle = 0;
int error = 0;
int autherr = 0;
struct nfsm_chain *nmreq, nmrep;
switch (cp->gss_svc_proc) {
case RPCSEC_GSS_INIT:
- /*
- * Give the client a random handle so that
- * if we reboot it's unlikely the client
- * will get a bad context match.
- * Make sure it's not zero, or already assigned.
- */
- do {
- handle = random();
- } while (nfs_gss_svc_ctx_find(handle) != NULL || handle == 0);
- cp->gss_svc_handle = handle;
- cp->gss_svc_mtx = lck_mtx_alloc_init(nfs_gss_svc_grp, LCK_ATTR_NULL);
- clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC,
- &cp->gss_svc_expiretime);
-
nfs_gss_svc_ctx_insert(cp);
-
/* FALLTHRU */
case RPCSEC_GSS_CONTINUE_INIT:
error = nfs_gss_svc_gssd_upcall(cp);
if (error) {
autherr = RPCSEC_GSS_CREDPROBLEM;
- if (error == EAUTH)
+ if (error == NFSERR_EAUTH)
error = 0;
break;
}
* Now the server context is complete.
* Finish setup.
*/
- clock_interval_to_deadline(GSS_CTX_EXPIRE, NSEC_PER_SEC,
- &cp->gss_svc_expiretime);
+ clock_get_uptime(&cp->gss_svc_incarnation);
+
cp->gss_svc_seqwin = GSS_SVC_SEQWINDOW;
MALLOC(cp->gss_svc_seqbits, uint32_t *,
nfsm_rndup((cp->gss_svc_seqwin + 7) / 8), M_TEMP, M_WAITOK|M_ZERO);
autherr = RPCSEC_GSS_CREDPROBLEM;
break;
}
-
- /*
- * Generate a key schedule from our shiny new DES key
- */
- error = des_key_sched((des_cblock *) cp->gss_svc_skey, cp->gss_svc_sched);
- if (error) {
- autherr = RPCSEC_GSS_CREDPROBLEM;
- error = 0;
- break;
- }
break;
case RPCSEC_GSS_DATA:
cp->gss_svc_handle = 0; // so it can't be found
lck_mtx_lock(cp->gss_svc_mtx);
clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC,
- &cp->gss_svc_expiretime);
+ &cp->gss_svc_incarnation);
lck_mtx_unlock(cp->gss_svc_mtx);
}
break;
nfsm_chain_add_32(error, &nmrep, cp->gss_svc_seqwin);
nfsm_chain_add_32(error, &nmrep, cp->gss_svc_tokenlen);
- nfsm_chain_add_opaque(error, &nmrep, cp->gss_svc_token, cp->gss_svc_tokenlen);
if (cp->gss_svc_token != NULL) {
+ nfsm_chain_add_opaque(error, &nmrep, cp->gss_svc_token, cp->gss_svc_tokenlen);
FREE(cp->gss_svc_token, M_TEMP);
cp->gss_svc_token = NULL;
}
nfsmout:
if (autherr != 0) {
+ nd->nd_gss_context = NULL;
LIST_REMOVE(cp, gss_svc_entries);
if (cp->gss_svc_seqbits != NULL)
FREE(cp->gss_svc_seqbits, M_TEMP);
kern_return_t kr;
mach_port_t mp;
int retry_cnt = 0;
- byte_buffer okey = NULL;
+ gssd_byte_buffer okey = NULL;
uint32_t skeylen = 0;
+ uint32_t ret_flags;
vm_map_copy_t itoken = NULL;
- byte_buffer otoken = NULL;
+ gssd_byte_buffer otoken = NULL;
+ mach_msg_type_number_t otokenlen;
int error = 0;
char svcname[] = "nfs";
- kr = task_get_gssd_port(get_threadtask(current_thread()), &mp);
+ kr = host_get_gssd_port(host_priv_self(), &mp);
if (kr != KERN_SUCCESS) {
- printf("nfs_gss_svc_gssd_upcall: can't get gssd port, status 0x%08x\n", kr);
- return (EAUTH);
+ printf("nfs_gss_svc_gssd_upcall: can't get gssd port, status %x (%d)\n", kr, kr);
+ goto out;
}
if (!IPC_PORT_VALID(mp)) {
printf("nfs_gss_svc_gssd_upcall: gssd port not valid\n");
- return (EAUTH);
+ goto out;
}
if (cp->gss_svc_tokenlen > 0)
retry:
kr = mach_gss_accept_sec_context(
mp,
- (byte_buffer) itoken, (mach_msg_type_number_t) cp->gss_svc_tokenlen,
+ (gssd_byte_buffer) itoken, (mach_msg_type_number_t) cp->gss_svc_tokenlen,
svcname,
0,
- &cp->gss_svc_gssd_verf,
&cp->gss_svc_context,
&cp->gss_svc_cred_handle,
+ &ret_flags,
&cp->gss_svc_uid,
cp->gss_svc_gids,
&cp->gss_svc_ngroups,
&okey, (mach_msg_type_number_t *) &skeylen,
- &otoken, (mach_msg_type_number_t *) &cp->gss_svc_tokenlen,
+ &otoken, &otokenlen,
&cp->gss_svc_major,
&cp->gss_svc_minor);
if (kr != KERN_SUCCESS) {
- printf("nfs_gss_svc_gssd_upcall failed: %d\n", kr);
+ printf("nfs_gss_svc_gssd_upcall failed: %x (%d)\n", kr, kr);
if (kr == MIG_SERVER_DIED && cp->gss_svc_context == 0 &&
- retry_cnt++ < NFS_GSS_MACH_MAX_RETRIES)
+ retry_cnt++ < NFS_GSS_MACH_MAX_RETRIES) {
+ if (cp->gss_svc_tokenlen > 0)
+ nfs_gss_mach_alloc_buffer(cp->gss_svc_token, cp->gss_svc_tokenlen, &itoken);
goto retry;
- task_release_special_port(mp);
- return (EAUTH);
+ }
+ host_release_special_port(mp);
+ goto out;
}
- task_release_special_port(mp);
+ host_release_special_port(mp);
+
if (skeylen > 0) {
- if (skeylen != SKEYLEN) {
+ if (skeylen != SKEYLEN && skeylen != SKEYLEN3) {
printf("nfs_gss_svc_gssd_upcall: bad key length (%d)\n", skeylen);
- return (EAUTH);
+ vm_map_copy_discard((vm_map_copy_t) okey);
+ vm_map_copy_discard((vm_map_copy_t) otoken);
+ goto out;
+ }
+ error = nfs_gss_mach_vmcopyout((vm_map_copy_t) okey, skeylen, cp->gss_svc_kinfo.skey);
+ if (error) {
+ vm_map_copy_discard((vm_map_copy_t) otoken);
+ goto out;
}
- error = nfs_gss_mach_vmcopyout((vm_map_copy_t) okey, skeylen, cp->gss_svc_skey);
+ error = gss_key_init(&cp->gss_svc_kinfo, skeylen);
if (error)
- return (EAUTH);
+ goto out;
+
}
- if (cp->gss_svc_tokenlen > 0) {
- MALLOC(cp->gss_svc_token, u_char *, cp->gss_svc_tokenlen, M_TEMP, M_WAITOK);
- if (cp->gss_svc_token == NULL)
+ /* Free context token used as input */
+ if (cp->gss_svc_token)
+ FREE(cp->gss_svc_token, M_TEMP);
+ cp->gss_svc_token = NULL;
+ cp->gss_svc_tokenlen = 0;
+
+ if (otokenlen > 0) {
+ /* Set context token to gss output token */
+ MALLOC(cp->gss_svc_token, u_char *, otokenlen, M_TEMP, M_WAITOK);
+ if (cp->gss_svc_token == NULL) {
+ printf("nfs_gss_svc_gssd_upcall: could not allocate %d bytes\n", otokenlen);
+ vm_map_copy_discard((vm_map_copy_t) otoken);
return (ENOMEM);
- error = nfs_gss_mach_vmcopyout((vm_map_copy_t) otoken, cp->gss_svc_tokenlen,
- cp->gss_svc_token);
- if (error)
- return (EAUTH);
+ }
+ error = nfs_gss_mach_vmcopyout((vm_map_copy_t) otoken, otokenlen, cp->gss_svc_token);
+ if (error) {
+ FREE(cp->gss_svc_token, M_TEMP);
+ cp->gss_svc_token = NULL;
+ return (NFSERR_EAUTH);
+ }
+ cp->gss_svc_tokenlen = otokenlen;
}
- return (kr);
+ return (0);
+
+out:
+ FREE(cp->gss_svc_token, M_TEMP);
+ cp->gss_svc_tokenlen = 0;
+ cp->gss_svc_token = NULL;
+
+ return (NFSERR_EAUTH);
}
/*
return (1);
}
+/*
+ * Drop a reference to a context
+ *
+ * Note that it's OK for the context to exist
+ * with a refcount of zero. The refcount isn't
+ * checked until we're about to reap an expired one.
+ */
+void
+nfs_gss_svc_ctx_deref(struct nfs_gss_svc_ctx *cp)
+{
+ lck_mtx_lock(cp->gss_svc_mtx);
+ if (cp->gss_svc_refcnt > 0)
+ cp->gss_svc_refcnt--;
+ else
+ printf("nfs_gss_ctx_deref: zero refcount\n");
+ lck_mtx_unlock(cp->gss_svc_mtx);
+}
+
/*
* Called at NFS server shutdown - destroy all contexts
*/
*/
/*
- * Release a task special port that was obtained by task_get_special_port
- * or one of its macros (task_get_gssd_port in this case).
+ * Release a host special port that was obtained by host_get_special_port
+ * or one of its macros (host_get_gssd_port in this case).
* This really should be in a public kpi.
*/
extern ipc_port_t ipc_port_copy_send(ipc_port_t);
static void
-task_release_special_port(mach_port_t mp)
+host_release_special_port(mach_port_t mp)
{
-
- ipc_port_release_send(mp);
+ if (IPC_PORT_VALID(mp))
+ ipc_port_release_send(mp);
}
static mach_port_t
-task_copy_special_port(mach_port_t mp)
+host_copy_special_port(mach_port_t mp)
{
- return ipc_port_copy_send(mp);
+ return (ipc_port_copy_send(mp));
}
/*
if (buf == NULL || buflen == 0)
return;
- tbuflen = round_page(buflen);
- kr = vm_allocate(ipc_kernel_map, &kmem_buf, tbuflen, VM_FLAGS_ANYWHERE);
+ tbuflen = vm_map_round_page(buflen,
+ vm_map_page_mask(ipc_kernel_map));
+ kr = vm_allocate(ipc_kernel_map, &kmem_buf, tbuflen, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_FILE));
if (kr != 0) {
printf("nfs_gss_mach_alloc_buffer: vm_allocate failed\n");
return;
}
- kr = vm_map_wire(ipc_kernel_map, vm_map_trunc_page(kmem_buf),
- vm_map_round_page(kmem_buf + tbuflen),
- VM_PROT_READ|VM_PROT_WRITE, FALSE);
-
+ kr = vm_map_wire(ipc_kernel_map,
+ vm_map_trunc_page(kmem_buf,
+ vm_map_page_mask(ipc_kernel_map)),
+ vm_map_round_page(kmem_buf + tbuflen,
+ vm_map_page_mask(ipc_kernel_map)),
+ VM_PROT_READ|VM_PROT_WRITE|VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_FILE), FALSE);
+ if (kr != 0) {
+ printf("nfs_gss_mach_alloc_buffer: vm_map_wire failed\n");
+ return;
+ }
+
bcopy(buf, (void *) kmem_buf, buflen);
-
- kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(kmem_buf),
- vm_map_round_page(kmem_buf + tbuflen), FALSE);
+ // Shouldn't need to bzero below since vm_allocate returns zeroed pages
+ // bzero(kmem_buf + buflen, tbuflen - buflen);
+
+ kr = vm_map_unwire(ipc_kernel_map,
+ vm_map_trunc_page(kmem_buf,
+ vm_map_page_mask(ipc_kernel_map)),
+ vm_map_round_page(kmem_buf + tbuflen,
+ vm_map_page_mask(ipc_kernel_map)),
+ FALSE);
if (kr != 0) {
printf("nfs_gss_mach_alloc_buffer: vm_map_unwire failed\n");
return;
printf("nfs_gss_mach_alloc_buffer: vm_map_copyin failed\n");
return;
}
-
- if (buflen != tbuflen)
- kmem_free(ipc_kernel_map, kmem_buf + buflen, tbuflen - buflen);
}
/*
*/
static int
nfs_gss_token_put(
- des_key_schedule sched,
+ gss_key_info *ki,
u_char *alg,
u_char *p,
int initiator,
* MIC token, or 35 + encrypted octets for a wrap token;
*/
*p++ = 0x060;
- toklen = KRB5_SZ_MECH + KRB5_SZ_ALG + KRB5_SZ_SEQ + KRB5_SZ_CKSUM;
+ toklen = KRB5_SZ_MECH + KRB5_SZ_ALG + KRB5_SZ_SEQ + HASHLEN(ki);
nfs_gss_der_length_put(&p, toklen + datalen);
/*
plain[i] = (u_char) ((seqnum >> (i * 8)) & 0xff);
for (i = 4; i < 8; i++)
plain[i] = initiator ? 0x00 : 0xff;
- des_cbc_encrypt((des_cblock *) plain, (des_cblock *) p, 8,
- sched, (des_cblock *) cksum, NULL, DES_ENCRYPT);
+ gss_des_crypt(ki, (des_cblock *) plain, (des_cblock *) p, 8,
+ (des_cblock *) cksum, NULL, DES_ENCRYPT, KG_USAGE_SEQ);
p += 8;
/*
- * Finally, append 8 octets of DES MAC MD5
+ * Finally, append the octets of the
* checksum of the alg + plaintext data.
* The plaintext could be an RPC call header,
* the window value, or a sequence number.
*/
- bcopy(cksum, p, 8);
- p += 8;
+ bcopy(cksum, p, HASHLEN(ki));
+ p += HASHLEN(ki);
return (p - psave);
}
*/
static int
nfs_gss_token_get(
- des_key_schedule sched,
+ gss_key_info *ki,
u_char *alg,
u_char *p,
int initiator,
/*
* Now decrypt the sequence number.
- * Note that the DES CBC decryption uses the first 8 octets
+ * Note that the gss decryption uses the first 8 octets
* of the checksum field as an initialization vector (p + 8).
* Per RFC 2203 section 5.2.2 we don't check the sequence number
* in the ASN.1 token because the RPCSEC_GSS protocol has its
* own sequence number described in section 5.3.3.1
*/
seqnum = 0;
- des_cbc_encrypt((des_cblock *) p, (des_cblock *) plain, 8,
- sched, (des_cblock *) (p + 8), NULL, DES_DECRYPT);
+ gss_des_crypt(ki, (des_cblock *)p, (des_cblock *) plain, 8,
+ (des_cblock *) (p + 8), NULL, DES_DECRYPT, KG_USAGE_SEQ);
p += 8;
for (i = 0; i < 4; i++)
seqnum |= plain[i] << (i * 8);
/*
* Finally, get the checksum
*/
- bcopy(p, cksum, 8);
- p += 8;
+ bcopy(p, cksum, HASHLEN(ki));
+ p += HASHLEN(ki);
if (len != NULL)
*len = p - psave;
*/
static void
nfs_gss_cksum_mchain(
- des_key_schedule sched,
+ gss_key_info *ki,
mbuf_t mhead,
u_char *alg,
int offset,
int len,
- u_char *cksum)
+ u_char *digest)
{
mbuf_t mb;
u_char *ptr;
int left, bytes;
- MD5_CTX context;
- u_char digest[16];
+ GSS_DIGEST_CTX context;
- MD5Init(&context);
+ gss_digest_Init(&context, ki);
/*
* Logically prepend the first 8 bytes of the algorithm
* field as required by RFC 1964, section 1.2.1.1
*/
- MD5Update(&context, alg, KRB5_SZ_ALG);
+ gss_digest_Update(&context, alg, KRB5_SZ_ALG);
/*
* Move down the mbuf chain until we reach the given
bytes = left < len ? left : len;
if (bytes > 0)
- MD5Update(&context, ptr, bytes);
+ gss_digest_Update(&context, ptr, bytes);
len -= bytes;
}
- MD5Final(digest, &context);
-
- /*
- * Now get the DES CBC checksum for the digest.
- */
- (void) des_cbc_cksum((des_cblock *) digest, (des_cblock *) cksum,
- sizeof(digest), sched, (des_cblock *) iv0);
+ gss_digest_Final(&context, digest);
}
/*
*/
static void
nfs_gss_cksum_chain(
- des_key_schedule sched,
+ gss_key_info *ki,
struct nfsm_chain *nmc,
u_char *alg,
int offset,
if (len == 0)
len = nfsm_chain_offset(nmc) - offset;
- return (nfs_gss_cksum_mchain(sched, nmc->nmc_mhead, alg, offset, len, cksum));
+ return (nfs_gss_cksum_mchain(ki, nmc->nmc_mhead, alg, offset, len, cksum));
}
/*
* of an RPCSEC_GSS reply.
*/
static void
-nfs_gss_cksum_rep(des_key_schedule sched, uint32_t seqnum, u_char *cksum)
+nfs_gss_cksum_rep(gss_key_info *ki, uint32_t seqnum, u_char *cksum)
{
- MD5_CTX context;
- u_char digest[16];
+ GSS_DIGEST_CTX context;
uint32_t val = htonl(seqnum);
- MD5Init(&context);
+ gss_digest_Init(&context, ki);
/*
* Logically prepend the first 8 bytes of the MIC
* token as required by RFC 1964, section 1.2.1.1
*/
- MD5Update(&context, krb5_mic, KRB5_SZ_ALG);
+ gss_digest_Update(&context, ALG_MIC(ki), KRB5_SZ_ALG);
/*
* Compute the digest of the seqnum in network order
*/
- MD5Update(&context, (u_char *) &val, 4);
- MD5Final(digest, &context);
-
- /*
- * Now get the DES CBC checksum for the digest.
- */
- (void) des_cbc_cksum((des_cblock *) digest, (des_cblock *) cksum,
- sizeof(digest), sched, (des_cblock *) iv0);
+ gss_digest_Update(&context, &val, 4);
+ gss_digest_Final(&context, cksum);
}
/*
*/
static void
nfs_gss_encrypt_mchain(
- u_char *key,
+ gss_key_info *ki,
mbuf_t mhead,
int offset,
int len,
int encrypt)
{
- des_key_schedule sched;
mbuf_t mb, mbn;
u_char *ptr, *nptr;
u_char tmp[8], ivec[8];
- int i, left, left8, remain;
+ int left, left8, remain;
- /*
- * Make the key schedule per RFC 1964 section 1.2.2.3
- */
- for (i = 0; i < 8; i++)
- tmp[i] = key[i] ^ 0xf0;
- bzero(ivec, 8);
- (void) des_key_sched((des_cblock *) tmp, sched);
+ bzero(ivec, 8);
/*
* Move down the mbuf chain until we reach the given
offset = 0;
/*
- * DES CBC has to encrypt 8 bytes at a time.
+ * DES or DES3 CBC has to encrypt 8 bytes at a time.
* If the number of bytes to be encrypted in this
* mbuf isn't some multiple of 8 bytes, encrypt all
* the 8 byte blocks, then combine the remaining
left8 = left - remain;
left = left8 < len ? left8 : len;
if (left > 0) {
- des_cbc_encrypt((des_cblock *) ptr, (des_cblock *) ptr, left, sched,
- (des_cblock *) ivec, (des_cblock *) ivec, encrypt);
+ gss_des_crypt(ki, (des_cblock *) ptr, (des_cblock *) ptr,
+ left, &ivec, &ivec, encrypt, KG_USAGE_SEAL);
len -= left;
}
offset = 8 - remain;
bcopy(ptr + left, tmp, remain); // grab from this mbuf
bcopy(nptr, tmp + remain, offset); // grab from next mbuf
- des_cbc_encrypt((des_cblock *) tmp, (des_cblock *) tmp, 8, sched,
- (des_cblock *) ivec, (des_cblock *) ivec, encrypt);
+ gss_des_crypt(ki, (des_cblock *) tmp, (des_cblock *) tmp, 8,
+ &ivec, &ivec, encrypt, KG_USAGE_SEAL);
bcopy(tmp, ptr + left, remain); // return to this mbuf
bcopy(tmp + remain, nptr, offset); // return to next mbuf
len -= 8;
*/
static void
nfs_gss_encrypt_chain(
- u_char *key,
+ gss_key_info *ki,
struct nfsm_chain *nmc,
int offset,
int len,
if (len == 0)
len = nfsm_chain_offset(nmc) - offset;
- return (nfs_gss_encrypt_mchain(key, nmc->nmc_mhead, offset, len, encrypt));
+ return (nfs_gss_encrypt_mchain(ki, nmc->nmc_mhead, offset, len, encrypt));
}
/*
- * XXX This function borrowed from OpenBSD.
- * It will likely be moved into kernel crypto.
+ * The routines that follow provide abstractions for doing digests and crypto.
*/
-static DES_LONG
-des_cbc_cksum(input, output, length, schedule, ivec)
- des_cblock (*input);
- des_cblock (*output);
- long length;
- des_key_schedule schedule;
- des_cblock (*ivec);
-{
- register unsigned long tout0,tout1,tin0,tin1;
- register long l=length;
- unsigned long tin[2];
- unsigned char *in,*out,*iv;
-
- in=(unsigned char *)input;
- out=(unsigned char *)output;
- iv=(unsigned char *)ivec;
-
- c2l(iv,tout0);
- c2l(iv,tout1);
- for (; l>0; l-=8) {
- if (l >= 8) {
- c2l(in,tin0);
- c2l(in,tin1);
- } else
- c2ln(in,tin0,tin1,l);
-
- tin0^=tout0; tin[0]=tin0;
- tin1^=tout1; tin[1]=tin1;
- des_encrypt1((DES_LONG *)tin,schedule,DES_ENCRYPT);
- /* fix 15/10/91 eay - thanks to keithr@sco.COM */
- tout0=tin[0];
- tout1=tin[1];
+
+static void
+gss_digest_Init(GSS_DIGEST_CTX *ctx, gss_key_info *ki)
+{
+ ctx->type = ki->type;
+ switch (ki->type) {
+ case NFS_GSS_1DES: MD5_DESCBC_Init(&ctx->m_ctx, &ki->ks_u.des.gss_sched);
+ break;
+ case NFS_GSS_3DES: HMAC_SHA1_DES3KD_Init(&ctx->h_ctx, ki->ks_u.des3.ckey, 0);
+ break;
+ default:
+ printf("gss_digest_Init: Unknown key info type %d\n", ki->type);
}
- if (out != NULL) {
- l2c(tout0,out);
- l2c(tout1,out);
+}
+
+static void
+gss_digest_Update(GSS_DIGEST_CTX *ctx, void *data, size_t len)
+{
+ switch (ctx->type) {
+ case NFS_GSS_1DES: MD5_DESCBC_Update(&ctx->m_ctx, data, len);
+ break;
+ case NFS_GSS_3DES: HMAC_SHA1_DES3KD_Update(&ctx->h_ctx, data, len);
+ break;
}
- tout0=tin0=tin1=tin[0]=tin[1]=0;
- return(tout1);
}
-/*
- * XXX This function borrowed from OpenBSD.
- * It will likely be moved into kernel crypto.
- */
static void
-des_cbc_encrypt(input, output, length, schedule, ivec, retvec, encrypt)
- des_cblock (*input);
- des_cblock (*output);
- long length;
- des_key_schedule schedule;
- des_cblock (*ivec);
- des_cblock (*retvec);
- int encrypt;
-{
- register unsigned long tin0,tin1;
- register unsigned long tout0,tout1,xor0,xor1;
- register unsigned char *in,*out,*retval;
- register long l=length;
- unsigned long tin[2];
- unsigned char *iv;
- tin0 = tin1 = 0;
-
- in=(unsigned char *)input;
- out=(unsigned char *)output;
- retval=(unsigned char *)retvec;
- iv=(unsigned char *)ivec;
-
- if (encrypt) {
- c2l(iv,tout0);
- c2l(iv,tout1);
- for (l-=8; l>=0; l-=8) {
- c2l(in,tin0);
- c2l(in,tin1);
- tin0^=tout0; tin[0]=tin0;
- tin1^=tout1; tin[1]=tin1;
- des_encrypt1((DES_LONG *)tin,schedule,DES_ENCRYPT);
- tout0=tin[0]; l2c(tout0,out);
- tout1=tin[1]; l2c(tout1,out);
- }
- if (l != -8) {
- c2ln(in,tin0,tin1,l+8);
- tin0^=tout0; tin[0]=tin0;
- tin1^=tout1; tin[1]=tin1;
- des_encrypt1((DES_LONG *)tin,schedule,DES_ENCRYPT);
- tout0=tin[0]; l2c(tout0,out);
- tout1=tin[1]; l2c(tout1,out);
- }
- if (retval) {
- l2c(tout0,retval);
- l2c(tout1,retval);
- }
- } else {
- c2l(iv,xor0);
- c2l(iv,xor1);
- for (l-=8; l>=0; l-=8) {
- c2l(in,tin0); tin[0]=tin0;
- c2l(in,tin1); tin[1]=tin1;
- des_encrypt1((DES_LONG *)tin,schedule,DES_DECRYPT);
- tout0=tin[0]^xor0;
- tout1=tin[1]^xor1;
- l2c(tout0,out);
- l2c(tout1,out);
- xor0=tin0;
- xor1=tin1;
- }
- if (l != -8) {
- c2l(in,tin0); tin[0]=tin0;
- c2l(in,tin1); tin[1]=tin1;
- des_encrypt1((DES_LONG *)tin,schedule,DES_DECRYPT);
- tout0=tin[0]^xor0;
- tout1=tin[1]^xor1;
- l2cn(tout0,tout1,out,l+8);
- /* xor0=tin0;
- xor1=tin1; */
- }
- if (retval) {
- l2c(tin0,retval);
- l2c(tin1,retval);
- }
- }
- tin0=tin1=tout0=tout1=xor0=xor1=0;
- tin[0]=tin[1]=0;
+gss_digest_Final(GSS_DIGEST_CTX *ctx, void *digest)
+{
+ switch (ctx->type) {
+ case NFS_GSS_1DES: MD5_DESCBC_Final(digest, &ctx->m_ctx);
+ break;
+ case NFS_GSS_3DES: HMAC_SHA1_DES3KD_Final(digest, &ctx->h_ctx);
+ break;
+ }
+}
+
+static void
+gss_des_crypt(gss_key_info *ki, des_cblock *in, des_cblock *out,
+ int32_t len, des_cblock *iv, des_cblock *retiv, int encrypt, int usage)
+{
+ switch (ki->type) {
+ case NFS_GSS_1DES:
+ {
+ des_cbc_key_schedule *sched = ((usage == KG_USAGE_SEAL) ?
+ &ki->ks_u.des.gss_sched_Ke :
+ &ki->ks_u.des.gss_sched);
+ des_cbc_encrypt(in, out, len, sched, iv, retiv, encrypt);
+ }
+ break;
+ case NFS_GSS_3DES:
+
+ des3_cbc_encrypt(in, out, len, &ki->ks_u.des3.gss_sched, iv, retiv, encrypt);
+ break;
+ }
+}
+
+static int
+gss_key_init(gss_key_info *ki, uint32_t skeylen)
+{
+ size_t i;
+ int rc;
+ des_cblock k[3];
+
+ ki->keybytes = skeylen;
+ switch (skeylen) {
+ case sizeof(des_cblock):
+ ki->type = NFS_GSS_1DES;
+ ki->hash_len = MD5_DESCBC_DIGEST_LENGTH;
+ ki->ks_u.des.key = (des_cblock *)ki->skey;
+ rc = des_cbc_key_sched(ki->ks_u.des.key, &ki->ks_u.des.gss_sched);
+ if (rc)
+ return (rc);
+ for (i = 0; i < ki->keybytes; i++)
+ k[0][i] = 0xf0 ^ (*ki->ks_u.des.key)[i];
+ rc = des_cbc_key_sched(&k[0], &ki->ks_u.des.gss_sched_Ke);
+ break;
+ case 3*sizeof(des_cblock):
+ ki->type = NFS_GSS_3DES;
+ ki->hash_len = SHA_DIGEST_LENGTH;
+ ki->ks_u.des3.key = (des_cblock (*)[3])ki->skey;
+ des3_derive_key(*ki->ks_u.des3.key, ki->ks_u.des3.ckey,
+ KEY_USAGE_DES3_SIGN, KEY_USAGE_LEN);
+ rc = des3_cbc_key_sched(*ki->ks_u.des3.key, &ki->ks_u.des3.gss_sched);
+ if (rc)
+ return (rc);
+ break;
+ default:
+ printf("gss_key_init: Invalid key length %d\n", skeylen);
+ rc = EINVAL;
+ break;
+ }
+
+ return (rc);
+}
+
+#if 0
+#define DISPLAYLEN 16
+#define MAXDISPLAYLEN 256
+
+static void
+hexdump(const char *msg, void *data, size_t len)
+{
+ size_t i, j;
+ u_char *d = data;
+ char *p, disbuf[3*DISPLAYLEN+1];
+
+ printf("NFS DEBUG %s len=%d:\n", msg, (uint32_t)len);
+ if (len > MAXDISPLAYLEN)
+ len = MAXDISPLAYLEN;
+
+ for (i = 0; i < len; i += DISPLAYLEN) {
+ for (p = disbuf, j = 0; (j + i) < len && j < DISPLAYLEN; j++, p += 3)
+ snprintf(p, 4, "%02x ", d[i + j]);
+ printf("\t%s\n", disbuf);
+ }
}
+#endif