/*
- * Copyright (c) 2007-2009 Apple Inc. All rights reserved.
+ * Copyright (c) 2007-2010 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <libkern/libkern.h>
#include <mach/task.h>
-#include <mach/task_special_ports.h>
+#include <mach/host_special_ports.h>
#include <mach/host_priv.h>
#include <mach/thread_act.h>
#include <mach/mig_errors.h>
* These octet strings are used to encode/decode ASN.1 tokens
* in the RPCSEC_GSS verifiers.
*/
-static u_char krb5_tokhead[] = { 0x60, 0x23 };
-static u_char krb5_mech[] = { 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x12, 0x01, 0x02, 0x02 };
+static u_char krb5_tokhead[] __attribute__((unused)) = { 0x60, 0x23 };
+ u_char krb5_mech[11] = { 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x12, 0x01, 0x02, 0x02 };
static u_char krb5_mic[] = { 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff };
static u_char krb5_mic3[] = { 0x01, 0x01, 0x04, 0x00, 0xff, 0xff, 0xff, 0xff };
static u_char krb5_wrap[] = { 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff };
static int nfs_gss_clnt_ctx_find(struct nfsreq *);
static int nfs_gss_clnt_ctx_failover(struct nfsreq *);
static int nfs_gss_clnt_ctx_init(struct nfsreq *, struct nfs_gss_clnt_ctx *);
+static int nfs_gss_clnt_ctx_init_retry(struct nfsreq *, struct nfs_gss_clnt_ctx *);
static int nfs_gss_clnt_ctx_callserver(struct nfsreq *, struct nfs_gss_clnt_ctx *);
static char *nfs_gss_clnt_svcname(struct nfsmount *);
static int nfs_gss_clnt_gssd_upcall(struct nfsreq *, struct nfs_gss_clnt_ctx *);
static void nfs_gss_clnt_ctx_remove(struct nfsmount *, struct nfs_gss_clnt_ctx *);
-static int nfs_gss_clnt_ctx_delay(struct nfsreq *, int *);
#endif /* NFSCLIENT */
#if NFSSERVER
static int nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx *, uint32_t);
#endif /* NFSSERVER */
-static void task_release_special_port(mach_port_t);
-static mach_port_t task_copy_special_port(mach_port_t);
+static void host_release_special_port(mach_port_t);
+static mach_port_t host_copy_special_port(mach_port_t);
static void nfs_gss_mach_alloc_buffer(u_char *, uint32_t, vm_map_copy_t *);
static int nfs_gss_mach_vmcopyout(vm_map_copy_t, uint32_t, u_char *);
static int nfs_gss_token_get(gss_key_info *ki, u_char *, u_char *, int, uint32_t *, u_char *);
#if NFSCLIENT
+/*
+ * Is it OK to fall back to using AUTH_SYS?
+ */
+static int
+nfs_gss_sysok(struct nfsreq *req)
+{
+ struct nfsmount *nmp = req->r_nmp;
+ int i;
+
+ if (req->r_wrongsec) /* Not OK if we're trying to handle a wrongsec error */
+ return (0);
+ if (!nmp->nm_sec.count) /* assume it's OK if we don't have a set of flavors */
+ return (1);
+ for (i=0; i < nmp->nm_sec.count; i++)
+ if (nmp->nm_sec.flavors[i] == RPCAUTH_SYS)
+ return (1);
+ return (0);
+}
+
/*
* Find the context for a particular user.
*
struct nfs_gss_clnt_ctx *cp;
uid_t uid = kauth_cred_getuid(req->r_cred);
int error = 0;
- int retrycnt = 0;
lck_mtx_lock(&nmp->nm_lock);
TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) {
if (cp->gss_clnt_uid == uid) {
if (cp->gss_clnt_flags & GSS_CTX_INVAL)
continue;
- lck_mtx_unlock(&nmp->nm_lock);
nfs_gss_clnt_ctx_ref(req, cp);
+ lck_mtx_unlock(&nmp->nm_lock);
return (0);
}
}
*/
TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) {
if (!(cp->gss_clnt_flags & GSS_CTX_INVAL)) {
- lck_mtx_unlock(&nmp->nm_lock);
nfs_gss_clnt_ctx_ref(req, cp);
+ lck_mtx_unlock(&nmp->nm_lock);
return (0);
}
}
* to failover to sec=sys.
*/
if (req->r_thread == NULL) {
- if (nmp->nm_flag & NFSMNT_SECSYSOK) {
+ if (nfs_gss_sysok(req)) {
error = nfs_gss_clnt_ctx_failover(req);
} else {
printf("nfs_gss_clnt_ctx_find: no context for async\n");
TAILQ_INSERT_TAIL(&nmp->nm_gsscl, cp, gss_clnt_entries);
lck_mtx_unlock(&nmp->nm_lock);
-retry:
- error = nfs_gss_clnt_ctx_init(req, cp);
- if (error == ENEEDAUTH) {
- error = nfs_gss_clnt_ctx_delay(req, &retrycnt);
- if (!error)
- goto retry;
-
- /* Giving up on this context */
- cp->gss_clnt_flags |= GSS_CTX_INVAL;
-
- /*
- * Wake any threads waiting to use the context
- */
- lck_mtx_lock(cp->gss_clnt_mtx);
- cp->gss_clnt_thread = NULL;
- if (cp->gss_clnt_flags & GSS_NEEDCTX) {
- cp->gss_clnt_flags &= ~GSS_NEEDCTX;
- wakeup(cp);
- }
- lck_mtx_unlock(cp->gss_clnt_mtx);
-
- }
-
+ error = nfs_gss_clnt_ctx_init_retry(req, cp); // Initialize new context
if (error)
nfs_gss_clnt_ctx_unref(req);
* up a dummy context that allows this user to attempt
* sec=sys calls.
*/
- if (error && (nmp->nm_flag & NFSMNT_SECSYSOK) &&
+ if (error && nfs_gss_sysok(req) &&
(error != ENXIO) && (error != ETIMEDOUT)) {
lck_mtx_lock(&nmp->nm_lock);
error = nfs_gss_clnt_ctx_failover(req);
slpflag = (PZERO-1);
if (req->r_nmp) {
- slpflag |= ((req->r_nmp->nm_flag & NFSMNT_INT) && req->r_thread) ? PCATCH : 0;
+ slpflag |= (NMFLAG(req->r_nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) ? PCATCH : 0;
recordmark = (req->r_nmp->nm_sotype == SOCK_STREAM);
}
retry:
if (cp->gss_clnt_thread && cp->gss_clnt_thread != current_thread()) {
cp->gss_clnt_flags |= GSS_NEEDCTX;
msleep(cp, cp->gss_clnt_mtx, slpflag | PDROP, "ctxwait", NULL);
+ slpflag &= ~PCATCH;
if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0)))
return (error);
nfs_gss_clnt_ctx_unref(req);
while (win_getbit(cp->gss_clnt_seqbits,
((cp->gss_clnt_seqnum - cp->gss_clnt_seqwin) + 1) % cp->gss_clnt_seqwin)) {
cp->gss_clnt_flags |= GSS_NEEDSEQ;
- msleep(cp, cp->gss_clnt_mtx, slpflag, "seqwin", NULL);
+ msleep(cp, cp->gss_clnt_mtx, slpflag | PDROP, "seqwin", NULL);
+ slpflag &= ~PCATCH;
if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) {
- lck_mtx_unlock(cp->gss_clnt_mtx);
return (error);
}
+ lck_mtx_lock(cp->gss_clnt_mtx);
if (cp->gss_clnt_flags & GSS_CTX_INVAL) {
/* Renewed while while we were waiting */
lck_mtx_unlock(cp->gss_clnt_mtx);
cp->gss_clnt_proc = RPCSEC_GSS_INIT;
cp->gss_clnt_service =
- nmp->nm_auth == RPCAUTH_KRB5 ? RPCSEC_GSS_SVC_NONE :
- nmp->nm_auth == RPCAUTH_KRB5I ? RPCSEC_GSS_SVC_INTEGRITY :
- nmp->nm_auth == RPCAUTH_KRB5P ? RPCSEC_GSS_SVC_PRIVACY : 0;
+ req->r_auth == RPCAUTH_KRB5 ? RPCSEC_GSS_SVC_NONE :
+ req->r_auth == RPCAUTH_KRB5I ? RPCSEC_GSS_SVC_INTEGRITY :
+ req->r_auth == RPCAUTH_KRB5P ? RPCSEC_GSS_SVC_PRIVACY : 0;
cp->gss_clnt_gssd_flags = (nfs_single_des ? GSSD_NFS_1DES : 0);
/*
/*
* The context is apparently established successfully
*/
+ lck_mtx_lock(cp->gss_clnt_mtx);
cp->gss_clnt_flags |= GSS_CTX_COMPLETE;
+ lck_mtx_unlock(cp->gss_clnt_mtx);
cp->gss_clnt_proc = RPCSEC_GSS_DATA;
microuptime(&now);
cp->gss_clnt_ctime = now.tv_sec; // time stamp
* It will be removed when the reference count
* drops to zero.
*/
+ lck_mtx_lock(cp->gss_clnt_mtx);
if (error)
cp->gss_clnt_flags |= GSS_CTX_INVAL;
/*
* Wake any threads waiting to use the context
*/
- lck_mtx_lock(cp->gss_clnt_mtx);
cp->gss_clnt_thread = NULL;
if (cp->gss_clnt_flags & GSS_NEEDCTX) {
cp->gss_clnt_flags &= ~GSS_NEEDCTX;
return (error);
}
+/*
+ * This function calls nfs_gss_clnt_ctx_init() to set up a new context.
+ * But if there's a failure in trying to establish the context it keeps
+ * retrying at progressively longer intervals in case the failure is
+ * due to some transient condition. For instance, the server might be
+ * failing the context setup because directory services is not coming
+ * up in a timely fashion.
+ */
+static int
+nfs_gss_clnt_ctx_init_retry(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp)
+{
+ struct nfsmount *nmp = req->r_nmp;
+ struct timeval now;
+ time_t waituntil;
+ int error, slpflag;
+ int retries = 0;
+ int timeo = NFS_TRYLATERDEL;
+
+ if (nmp == NULL) {
+ error = ENXIO;
+ goto bad;
+ }
+
+ /* For an "intr" mount allow a signal to interrupt the retries */
+ slpflag = (NMFLAG(nmp, INTR) && !(req->r_flags & R_NOINTR)) ? PCATCH : 0;
+
+ while ((error = nfs_gss_clnt_ctx_init(req, cp)) == ENEEDAUTH) {
+ microuptime(&now);
+ waituntil = now.tv_sec + timeo;
+ while (now.tv_sec < waituntil) {
+ tsleep(&lbolt, PSOCK | slpflag, "nfs_gss_clnt_ctx_init_retry", 0);
+ slpflag = 0;
+ error = nfs_sigintr(req->r_nmp, req, current_thread(), 0);
+ if (error)
+ goto bad;
+ microuptime(&now);
+ }
+
+ retries++;
+ /* If it's a soft mount just give up after a while */
+ if (NMFLAG(nmp, SOFT) && (retries > nmp->nm_retry)) {
+ error = ETIMEDOUT;
+ goto bad;
+ }
+ timeo *= 2;
+ if (timeo > 60)
+ timeo = 60;
+ }
+
+ if (error == 0)
+ return 0; // success
+bad:
+ /*
+ * Give up on this context
+ */
+ lck_mtx_lock(cp->gss_clnt_mtx);
+ cp->gss_clnt_flags |= GSS_CTX_INVAL;
+
+ /*
+ * Wake any threads waiting to use the context
+ */
+ cp->gss_clnt_thread = NULL;
+ if (cp->gss_clnt_flags & GSS_NEEDCTX) {
+ cp->gss_clnt_flags &= ~GSS_NEEDCTX;
+ wakeup(cp);
+ }
+ lck_mtx_unlock(cp->gss_clnt_mtx);
+
+ return error;
+}
+
/*
* Call the NFS server using a null procedure for context setup.
* Even though it's a null procedure and nominally has no arguments
return (svcname);
}
+/*
+ * Get a mach port to talk to gssd.
+ * gssd lives in the root bootstrap, so we call gssd's lookup routine
+ * to get a send right to talk to a new gssd instance that launchd has launched
+ * based on the cred's uid and audit session id.
+ */
+#define kauth_cred_getasid(cred) ((cred)->cr_audit.as_aia_p->ai_asid)
+#define kauth_cred_getauid(cred) ((cred)->cr_audit.as_aia_p->ai_auid)
+
+static mach_port_t
+nfs_gss_clnt_get_upcall_port(kauth_cred_t credp)
+{
+ mach_port_t gssd_host_port, uc_port = IPC_PORT_NULL;
+ kern_return_t kr;
+ au_asid_t asid;
+ uid_t uid;
+
+ kr = host_get_gssd_port(host_priv_self(), &gssd_host_port);
+ if (kr != KERN_SUCCESS) {
+ printf("nfs_gss_get_upcall_port: can't get gssd port, status %x (%d)\n", kr, kr);
+ return (IPC_PORT_NULL);
+ }
+ if (!IPC_PORT_VALID(gssd_host_port)) {
+ printf("nfs_gss_get_upcall_port: gssd port not valid\n");
+ return (IPC_PORT_NULL);
+ }
+
+ asid = kauth_cred_getasid(credp);
+ uid = kauth_cred_getauid(credp);
+ if (uid == AU_DEFAUDITID)
+ uid = kauth_cred_getuid(credp);
+ kr = mach_gss_lookup(gssd_host_port, uid, asid, &uc_port);
+ if (kr != KERN_SUCCESS)
+ printf("nfs_gss_clnt_get_upcall_port: mach_gssd_lookup failed: status %x (%d)\n", kr, kr);
+
+ return (uc_port);
+}
+
/*
* Make an upcall to the gssd using Mach RPC
- * The upcall is made using a task special port.
+ * The upcall is made using a host special port.
* This allows launchd to fire up the gssd in the
* user's session. This is important, since gssd
* must have access to the user's credential cache.
nfs_gss_clnt_gssd_upcall(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp)
{
kern_return_t kr;
- byte_buffer okey = NULL;
+ gssd_byte_buffer okey = NULL;
uint32_t skeylen = 0;
int retry_cnt = 0;
vm_map_copy_t itoken = NULL;
- byte_buffer otoken = NULL;
+ gssd_byte_buffer otoken = NULL;
mach_msg_type_number_t otokenlen;
int error = 0;
char uprinc[1];
* the kernel is being compiled with -Wwrite-strings.
*/
uprinc[0] = '\0';
- if (cp->gss_clnt_mport == NULL) {
- kr = task_get_gssd_port(get_threadtask(req->r_thread), &cp->gss_clnt_mport);
- if (kr != KERN_SUCCESS) {
- printf("nfs_gss_clnt_gssd_upcall: can't get gssd port, status %x (%d)\n", kr, kr);
- goto out;
- }
- if (!IPC_PORT_VALID(cp->gss_clnt_mport)) {
- printf("nfs_gss_clnt_gssd_upcall: gssd port not valid\n");
- cp->gss_clnt_mport = NULL;
+ if (!IPC_PORT_VALID(cp->gss_clnt_mport)) {
+ cp->gss_clnt_mport = nfs_gss_clnt_get_upcall_port(req->r_cred);
+ if (cp->gss_clnt_mport == IPC_PORT_NULL)
goto out;
- }
}
if (cp->gss_clnt_tokenlen > 0)
retry:
kr = mach_gss_init_sec_context(
cp->gss_clnt_mport,
- KRB5_MECH,
- (byte_buffer) itoken, (mach_msg_type_number_t) cp->gss_clnt_tokenlen,
+ GSSD_KRB5_MECH,
+ (gssd_byte_buffer) itoken, (mach_msg_type_number_t) cp->gss_clnt_tokenlen,
cp->gss_clnt_uid,
uprinc,
cp->gss_clnt_svcname,
nfs_gss_mach_alloc_buffer(cp->gss_clnt_token, cp->gss_clnt_tokenlen, &itoken);
goto retry;
}
- task_release_special_port(cp->gss_clnt_mport);
- cp->gss_clnt_mport = NULL;
+
+ host_release_special_port(cp->gss_clnt_mport);
+ cp->gss_clnt_mport = IPC_PORT_NULL;
goto out;
}
if (nmp != NULL)
TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries);
- if (cp->gss_clnt_mport)
- task_release_special_port(cp->gss_clnt_mport);
+ host_release_special_port(cp->gss_clnt_mport);
+
if (cp->gss_clnt_mtx)
lck_mtx_destroy(cp->gss_clnt_mtx, nfs_gss_clnt_grp);
if (cp->gss_clnt_handle)
int error = 0;
uid_t saved_uid;
mach_port_t saved_mport;
- int retrycnt = 0;
if (cp == NULL)
return (0);
return (0); // already being renewed
}
saved_uid = cp->gss_clnt_uid;
- saved_mport = task_copy_special_port(cp->gss_clnt_mport);
+ saved_mport = host_copy_special_port(cp->gss_clnt_mport);
/* Remove the old context */
cp->gss_clnt_flags |= GSS_CTX_INVAL;
}
ncp->gss_clnt_uid = saved_uid;
- ncp->gss_clnt_mport = task_copy_special_port(saved_mport); // re-use the gssd port
+ ncp->gss_clnt_mport = host_copy_special_port(saved_mport); // re-use the gssd port
ncp->gss_clnt_mtx = lck_mtx_alloc_init(nfs_gss_clnt_grp, LCK_ATTR_NULL);
ncp->gss_clnt_thread = current_thread();
lck_mtx_lock(&nmp->nm_lock);
nfs_gss_clnt_ctx_unref(req);
nfs_gss_clnt_ctx_ref(req, ncp);
-retry:
- error = nfs_gss_clnt_ctx_init(req, ncp); // Initialize new context
- if (error == ENEEDAUTH) {
- error = nfs_gss_clnt_ctx_delay(req, &retrycnt);
- if (!error)
- goto retry;
- }
+ error = nfs_gss_clnt_ctx_init_retry(req, ncp); // Initialize new context
out:
- task_release_special_port(saved_mport);
+ host_release_special_port(saved_mport);
if (error)
nfs_gss_clnt_ctx_unref(req);
* The contexts are also destroyed by the server.
*/
void
-nfs_gss_clnt_ctx_unmount(struct nfsmount *nmp, int mntflags)
+nfs_gss_clnt_ctx_unmount(struct nfsmount *nmp)
{
struct nfs_gss_clnt_ctx *cp;
- struct ucred temp_cred;
- kauth_cred_t cred;
struct nfsm_chain nmreq, nmrep;
int error, status;
struct nfsreq req;
- bzero((caddr_t) &temp_cred, sizeof(temp_cred));
- temp_cred.cr_ngroups = 1;
req.r_nmp = nmp;
for (;;) {
* But don't bother if it's a forced unmount
* or if it's a dummy sec=sys context.
*/
- if (!(mntflags & MNT_FORCE) && cp->gss_clnt_service != RPCSEC_GSS_SVC_SYS) {
- temp_cred.cr_uid = cp->gss_clnt_uid;
- cred = kauth_cred_create(&temp_cred);
+ if (!(nmp->nm_state & NFSSTA_FORCE) && (cp->gss_clnt_service != RPCSEC_GSS_SVC_SYS)) {
+ kauth_cred_t cred;
+ struct posix_cred temp_pcred;
+
+ bzero((caddr_t) &temp_pcred, sizeof(temp_pcred));
+ temp_pcred.cr_ngroups = 1;
+ temp_pcred.cr_uid = cp->gss_clnt_uid;
+ cred = posix_cred_create(&temp_pcred);
cp->gss_clnt_proc = RPCSEC_GSS_DESTROY;
error = 0;
* the reference to remove it if its
* refcount is zero.
*/
+ lck_mtx_lock(cp->gss_clnt_mtx);
cp->gss_clnt_flags |= GSS_CTX_INVAL;
+ lck_mtx_unlock(cp->gss_clnt_mtx);
nfs_gss_clnt_ctx_unref(&req);
}
}
-/*
- * If we get a failure in trying to establish a context we need to wait a
- * little while to see if the server is feeling better. In our case this is
- * probably a failure in directory services not coming up in a timely fashion.
- * This routine sort of mimics receiving a jukebox error.
- */
-static int
-nfs_gss_clnt_ctx_delay(struct nfsreq *req, int *retry)
-{
- int timeo = (1 << *retry) * NFS_TRYLATERDEL;
- int error = 0;
- struct nfsmount *nmp = req->r_nmp;
- struct timeval now;
- time_t waituntil;
-
- if (!nmp)
- return (ENXIO);
- if ((nmp->nm_flag & NFSMNT_SOFT) && *retry > nmp->nm_retry)
- return (ETIMEDOUT);
- if (timeo > 60)
- timeo = 60;
-
- microuptime(&now);
- waituntil = now.tv_sec + timeo;
- while (now.tv_sec < waituntil) {
- tsleep(&lbolt, PSOCK, "nfs_gss_clnt_ctx_delay", 0);
- error = nfs_sigintr(req->r_nmp, req, current_thread(), 0);
- if (error)
- break;
- microuptime(&now);
- }
- *retry += 1;
-
- return (error);
-}
-
-
#endif /* NFSCLIENT */
/*************
lck_mtx_lock(nfs_gss_svc_ctx_mutex);
- LIST_FOREACH(cp, head, gss_svc_entries)
+ LIST_FOREACH(cp, head, gss_svc_entries) {
if (cp->gss_svc_handle == handle) {
if (timenow > cp->gss_svc_incarnation + GSS_SVC_CTX_TTL) {
/*
*/
cp->gss_svc_handle = 0;
/*
- * Make sure though that we stay around for GSS_CTC_PEND seconds
+ * Make sure though that we stay around for GSS_CTX_PEND seconds
* for other threads that might be using the context.
*/
cp->gss_svc_incarnation = timenow;
+
cp = NULL;
+ break;
}
+ lck_mtx_lock(cp->gss_svc_mtx);
+ cp->gss_svc_refcnt++;
+ lck_mtx_unlock(cp->gss_svc_mtx);
break;
}
+ }
lck_mtx_unlock(nfs_gss_svc_ctx_mutex);
nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx *cp)
{
struct nfs_gss_svc_ctx_hashhead *head;
+ struct nfs_gss_svc_ctx *p;
+ lck_mtx_lock(nfs_gss_svc_ctx_mutex);
+
+ /*
+ * Give the client a random handle so that if we reboot
+ * it's unlikely the client will get a bad context match.
+ * Make sure it's not zero or already assigned.
+ */
+retry:
+ cp->gss_svc_handle = random();
+ if (cp->gss_svc_handle == 0)
+ goto retry;
head = &nfs_gss_svc_ctx_hashtbl[SVC_CTX_HASH(cp->gss_svc_handle)];
+ LIST_FOREACH(p, head, gss_svc_entries)
+ if (p->gss_svc_handle == cp->gss_svc_handle)
+ goto retry;
- lck_mtx_lock(nfs_gss_svc_ctx_mutex);
+ clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC,
+ &cp->gss_svc_incarnation);
LIST_INSERT_HEAD(head, cp, gss_svc_entries);
nfs_gss_ctx_count++;
nfs_gss_timer_on = 1;
nfs_interval_timer_start(nfs_gss_svc_ctx_timer_call,
- min(GSS_TIMER_PERIOD, max(GSS_CTX_TTL_MIN, GSS_SVC_CTX_TTL)) * MSECS_PER_SEC);
+ min(GSS_TIMER_PERIOD, max(GSS_CTX_TTL_MIN, nfsrv_gss_context_ttl)) * MSECS_PER_SEC);
}
lck_mtx_unlock(nfs_gss_svc_ctx_mutex);
void
nfs_gss_svc_ctx_timer(__unused void *param1, __unused void *param2)
{
- struct nfs_gss_svc_ctx_hashhead *head;
struct nfs_gss_svc_ctx *cp, *next;
uint64_t timenow;
int contexts = 0;
/*
* Scan all the hash chains
- * Assume nfs_gss_svc_ctx_mutex is held
*/
for (i = 0; i < SVC_CTX_HASHSZ; i++) {
/*
* For each hash chain, look for entries
* that haven't been used in a while.
*/
- head = &nfs_gss_svc_ctx_hashtbl[i];
- for (cp = LIST_FIRST(head); cp; cp = next) {
+ LIST_FOREACH_SAFE(cp, &nfs_gss_svc_ctx_hashtbl[i], gss_svc_entries, next) {
contexts++;
- next = LIST_NEXT(cp, gss_svc_entries);
- if (timenow > cp->gss_svc_incarnation +
- (cp->gss_svc_handle ? GSS_SVC_CTX_TTL : 0)) {
+ if (timenow > cp->gss_svc_incarnation +
+ (cp->gss_svc_handle ? GSS_SVC_CTX_TTL : 0)
+ && cp->gss_svc_refcnt == 0) {
/*
* A stale context - remove it
*/
nfs_gss_timer_on = nfs_gss_ctx_count > 0;
if (nfs_gss_timer_on)
nfs_interval_timer_start(nfs_gss_svc_ctx_timer_call,
- min(GSS_TIMER_PERIOD, max(GSS_CTX_TTL_MIN, GSS_SVC_CTX_TTL)) * MSECS_PER_SEC);
+ min(GSS_TIMER_PERIOD, max(GSS_CTX_TTL_MIN, nfsrv_gss_context_ttl)) * MSECS_PER_SEC);
lck_mtx_unlock(nfs_gss_svc_ctx_mutex);
}
error = ENOMEM;
goto nfsmout;
}
+ cp->gss_svc_mtx = lck_mtx_alloc_init(nfs_gss_svc_grp, LCK_ATTR_NULL);
+ cp->gss_svc_refcnt = 1;
} else {
/*
ki = &cp->gss_svc_kinfo;
if (proc == RPCSEC_GSS_DATA || proc == RPCSEC_GSS_DESTROY) {
- struct ucred temp_cred;
+ struct posix_cred temp_pcred;
if (cp->gss_svc_seqwin == 0) {
/*
*/
nfsm_chain_get_32(error, nmc, flavor);
nfsm_chain_get_32(error, nmc, verflen);
+ if (error)
+ goto nfsmout;
if (flavor != RPCSEC_GSS || verflen != KRB5_SZ_TOKEN(ki->hash_len))
error = NFSERR_AUTHERR | AUTH_BADVERF;
nfsm_chain_get_opaque(error, nmc, verflen, tokbuf);
/*
* Set up the user's cred
*/
- bzero(&temp_cred, sizeof(temp_cred));
- temp_cred.cr_uid = cp->gss_svc_uid;
- bcopy(cp->gss_svc_gids, temp_cred.cr_groups,
+ bzero(&temp_pcred, sizeof(temp_pcred));
+ temp_pcred.cr_uid = cp->gss_svc_uid;
+ bcopy(cp->gss_svc_gids, temp_pcred.cr_groups,
sizeof(gid_t) * cp->gss_svc_ngroups);
- temp_cred.cr_ngroups = cp->gss_svc_ngroups;
+ temp_pcred.cr_ngroups = cp->gss_svc_ngroups;
- nd->nd_cr = kauth_cred_create(&temp_cred);
+ nd->nd_cr = posix_cred_create(&temp_pcred);
if (nd->nd_cr == NULL) {
error = ENOMEM;
goto nfsmout;
nfsm_chain_get_32(error, nmc, verflen);
if (error || flavor != RPCAUTH_NULL || verflen > 0)
error = NFSERR_AUTHERR | RPCSEC_GSS_CREDPROBLEM;
- if (error)
+ if (error) {
+ if (proc == RPCSEC_GSS_INIT) {
+ lck_mtx_destroy(cp->gss_svc_mtx, nfs_gss_svc_grp);
+ FREE(cp, M_TEMP);
+ cp = NULL;
+ }
goto nfsmout;
+ }
}
nd->nd_gss_context = cp;
+ return 0;
nfsmout:
+ if (cp)
+ nfs_gss_svc_ctx_deref(cp);
return (error);
}
nfs_gss_svc_ctx_init(struct nfsrv_descript *nd, struct nfsrv_sock *slp, mbuf_t *mrepp)
{
struct nfs_gss_svc_ctx *cp = NULL;
- uint32_t handle = 0;
int error = 0;
int autherr = 0;
struct nfsm_chain *nmreq, nmrep;
switch (cp->gss_svc_proc) {
case RPCSEC_GSS_INIT:
- /*
- * Give the client a random handle so that
- * if we reboot it's unlikely the client
- * will get a bad context match.
- * Make sure it's not zero, or already assigned.
- */
- do {
- handle = random();
- } while (nfs_gss_svc_ctx_find(handle) != NULL || handle == 0);
- cp->gss_svc_handle = handle;
- cp->gss_svc_mtx = lck_mtx_alloc_init(nfs_gss_svc_grp, LCK_ATTR_NULL);
- clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC,
- &cp->gss_svc_incarnation);
-
nfs_gss_svc_ctx_insert(cp);
-
/* FALLTHRU */
case RPCSEC_GSS_CONTINUE_INIT:
kern_return_t kr;
mach_port_t mp;
int retry_cnt = 0;
- byte_buffer okey = NULL;
+ gssd_byte_buffer okey = NULL;
uint32_t skeylen = 0;
uint32_t ret_flags;
vm_map_copy_t itoken = NULL;
- byte_buffer otoken = NULL;
+ gssd_byte_buffer otoken = NULL;
mach_msg_type_number_t otokenlen;
int error = 0;
char svcname[] = "nfs";
- kr = task_get_gssd_port(get_threadtask(current_thread()), &mp);
+ kr = host_get_gssd_port(host_priv_self(), &mp);
if (kr != KERN_SUCCESS) {
printf("nfs_gss_svc_gssd_upcall: can't get gssd port, status %x (%d)\n", kr, kr);
goto out;
retry:
kr = mach_gss_accept_sec_context(
mp,
- (byte_buffer) itoken, (mach_msg_type_number_t) cp->gss_svc_tokenlen,
+ (gssd_byte_buffer) itoken, (mach_msg_type_number_t) cp->gss_svc_tokenlen,
svcname,
0,
&cp->gss_svc_context,
nfs_gss_mach_alloc_buffer(cp->gss_svc_token, cp->gss_svc_tokenlen, &itoken);
goto retry;
}
- task_release_special_port(mp);
+ host_release_special_port(mp);
goto out;
}
- task_release_special_port(mp);
+ host_release_special_port(mp);
if (skeylen > 0) {
if (skeylen != SKEYLEN && skeylen != SKEYLEN3) {
return (1);
}
+/*
+ * Drop a reference to a context
+ *
+ * Note that it's OK for the context to exist
+ * with a refcount of zero. The refcount isn't
+ * checked until we're about to reap an expired one.
+ */
+void
+nfs_gss_svc_ctx_deref(struct nfs_gss_svc_ctx *cp)
+{
+ lck_mtx_lock(cp->gss_svc_mtx);
+ if (cp->gss_svc_refcnt > 0)
+ cp->gss_svc_refcnt--;
+ else
+ printf("nfs_gss_ctx_deref: zero refcount\n");
+ lck_mtx_unlock(cp->gss_svc_mtx);
+}
+
/*
* Called at NFS server shutdown - destroy all contexts
*/
*/
/*
- * Release a task special port that was obtained by task_get_special_port
- * or one of its macros (task_get_gssd_port in this case).
+ * Release a host special port that was obtained by host_get_special_port
+ * or one of its macros (host_get_gssd_port in this case).
* This really should be in a public kpi.
*/
extern ipc_port_t ipc_port_copy_send(ipc_port_t);
static void
-task_release_special_port(mach_port_t mp)
+host_release_special_port(mach_port_t mp)
{
-
- ipc_port_release_send(mp);
+ if (IPC_PORT_VALID(mp))
+ ipc_port_release_send(mp);
}
static mach_port_t
-task_copy_special_port(mach_port_t mp)
+host_copy_special_port(mach_port_t mp)
{
- return ipc_port_copy_send(mp);
+ return (ipc_port_copy_send(mp));
}
/*
switch (ki->type) {
case NFS_GSS_1DES:
{
- des_key_schedule *sched = ((usage == KG_USAGE_SEAL) ?
+ des_cbc_key_schedule *sched = ((usage == KG_USAGE_SEAL) ?
&ki->ks_u.des.gss_sched_Ke :
&ki->ks_u.des.gss_sched);
- des_cbc_encrypt(in, out, len, *sched, iv, retiv, encrypt);
+ des_cbc_encrypt(in, out, len, sched, iv, retiv, encrypt);
}
break;
case NFS_GSS_3DES:
- des3_cbc_encrypt(in, out, len, ki->ks_u.des3.gss_sched, iv, retiv, encrypt);
+ des3_cbc_encrypt(in, out, len, &ki->ks_u.des3.gss_sched, iv, retiv, encrypt);
break;
}
}
ki->type = NFS_GSS_1DES;
ki->hash_len = MD5_DESCBC_DIGEST_LENGTH;
ki->ks_u.des.key = (des_cblock *)ki->skey;
- rc = des_key_sched(ki->ks_u.des.key, ki->ks_u.des.gss_sched);
+ rc = des_cbc_key_sched(ki->ks_u.des.key, &ki->ks_u.des.gss_sched);
if (rc)
return (rc);
for (i = 0; i < ki->keybytes; i++)
k[0][i] = 0xf0 ^ (*ki->ks_u.des.key)[i];
- rc = des_key_sched(&k[0], ki->ks_u.des.gss_sched_Ke);
+ rc = des_cbc_key_sched(&k[0], &ki->ks_u.des.gss_sched_Ke);
break;
case 3*sizeof(des_cblock):
ki->type = NFS_GSS_3DES;
ki->ks_u.des3.key = (des_cblock (*)[3])ki->skey;
des3_derive_key(*ki->ks_u.des3.key, ki->ks_u.des3.ckey,
KEY_USAGE_DES3_SIGN, KEY_USAGE_LEN);
- rc = des3_key_sched(*ki->ks_u.des3.key, ki->ks_u.des3.gss_sched);
+ rc = des3_cbc_key_sched(*ki->ks_u.des3.key, &ki->ks_u.des3.gss_sched);
if (rc)
return (rc);
break;