/*
- * Copyright (c) 2007-2015 Apple Inc. All rights reserved.
+ * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#if CONFIG_NFS_SERVER
u_long nfs_gss_svc_ctx_hash;
struct nfs_gss_svc_ctx_hashhead *nfs_gss_svc_ctx_hashtbl;
-lck_mtx_t *nfs_gss_svc_ctx_mutex;
-lck_grp_t *nfs_gss_svc_grp;
+static LCK_GRP_DECLARE(nfs_gss_svc_grp, "rpcsec_gss_svc");
+static LCK_MTX_DECLARE(nfs_gss_svc_ctx_mutex, &nfs_gss_svc_grp);
uint32_t nfsrv_gss_context_ttl = GSS_CTX_EXPIRE;
#define GSS_SVC_CTX_TTL ((uint64_t)max(2*GSS_CTX_PEND, nfsrv_gss_context_ttl) * NSEC_PER_SEC)
#endif /* CONFIG_NFS_SERVER */
#if CONFIG_NFS_CLIENT
-lck_grp_t *nfs_gss_clnt_grp;
+LCK_GRP_DECLARE(nfs_gss_clnt_grp, "rpcsec_gss_clnt");
#endif /* CONFIG_NFS_CLIENT */
#define KRB5_MAX_MIC_SIZE 128
static int nfs_gss_clnt_ctx_init(struct nfsreq *, struct nfs_gss_clnt_ctx *);
static int nfs_gss_clnt_ctx_init_retry(struct nfsreq *, struct nfs_gss_clnt_ctx *);
static int nfs_gss_clnt_ctx_callserver(struct nfsreq *, struct nfs_gss_clnt_ctx *);
-static uint8_t *nfs_gss_clnt_svcname(struct nfsmount *, gssd_nametype *, uint32_t *);
+static uint8_t *nfs_gss_clnt_svcname(struct nfsmount *, gssd_nametype *, size_t *);
static int nfs_gss_clnt_gssd_upcall(struct nfsreq *, struct nfs_gss_clnt_ctx *, uint32_t);
void nfs_gss_clnt_ctx_neg_cache_reap(struct nfsmount *);
static void nfs_gss_clnt_ctx_clean(struct nfs_gss_clnt_ctx *);
static void host_release_special_port(mach_port_t);
static mach_port_t host_copy_special_port(mach_port_t);
-static void nfs_gss_mach_alloc_buffer(u_char *, uint32_t, vm_map_copy_t *);
+static void nfs_gss_mach_alloc_buffer(u_char *, size_t, vm_map_copy_t *);
static int nfs_gss_mach_vmcopyout(vm_map_copy_t, uint32_t, u_char *);
static int nfs_gss_mchain_length(mbuf_t);
void
nfs_gss_init(void)
{
-#if CONFIG_NFS_CLIENT
- nfs_gss_clnt_grp = lck_grp_alloc_init("rpcsec_gss_clnt", LCK_GRP_ATTR_NULL);
-#endif /* CONFIG_NFS_CLIENT */
-
#if CONFIG_NFS_SERVER
- nfs_gss_svc_grp = lck_grp_alloc_init("rpcsec_gss_svc", LCK_GRP_ATTR_NULL);
-
nfs_gss_svc_ctx_hashtbl = hashinit(SVC_CTX_HASHSZ, M_TEMP, &nfs_gss_svc_ctx_hash);
- nfs_gss_svc_ctx_mutex = lck_mtx_alloc_init(nfs_gss_svc_grp, LCK_ATTR_NULL);
nfs_gss_svc_ctx_timer_call = thread_call_allocate(nfs_gss_svc_ctx_timer, NULL);
#endif /* CONFIG_NFS_SERVER */
{
uint32_t major, error;
mbuf_t mb = *mb_head, next;
- uint32_t plen;
- size_t length;
+ size_t plen, length;
gss_qop_t qop = GSS_C_QOP_REVERSE;
/* Chop of the opaque length */
lck_mtx_lock(&nmp->nm_lock);
NFS_GSS_DBG("Enter\n");
TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) {
- lck_mtx_lock(cp->gss_clnt_mtx);
+ lck_mtx_lock(&cp->gss_clnt_mtx);
printf("context %d/%d: refcnt = %d, flags = %x\n",
kauth_cred_getasid(cp->gss_clnt_cred),
kauth_cred_getauid(cp->gss_clnt_cred),
cp->gss_clnt_refcnt, cp->gss_clnt_flags);
- lck_mtx_unlock(cp->gss_clnt_mtx);
+ lck_mtx_unlock(&cp->gss_clnt_mtx);
}
NFS_GSS_DBG("Exit\n");
lck_mtx_unlock(&nmp->nm_lock);
nfs_gss_clnt_ctx_name(struct nfsmount *nmp, struct nfs_gss_clnt_ctx *cp, char *buf, int len)
{
char *np;
- int nlen;
+ size_t nlen;
const char *server = "";
if (nmp && nmp->nm_mountp) {
nlen = np ? strlen(cp->gss_clnt_display) : 0;
}
if (nlen) {
- snprintf(buf, len, "[%s] %.*s %d/%d %s", server, nlen, np,
+ snprintf(buf, len, "[%s] %.*s %d/%d %s", server, nlen > INT_MAX ? INT_MAX : (int)nlen, np,
kauth_cred_getasid(cp->gss_clnt_cred),
kauth_cred_getuid(cp->gss_clnt_cred),
cp->gss_clnt_principal ? "" : "[from default cred] ");
* so that defaults can be set by service identities.
*/
-static void
+static int
nfs_gss_clnt_mnt_ref(struct nfsmount *nmp)
{
int error;
if (nmp == NULL ||
!(vfs_flags(nmp->nm_mountp) & MNT_AUTOMOUNTED)) {
- return;
+ return EINVAL;
}
error = VFS_ROOT(nmp->nm_mountp, &rvp, NULL);
if (!error) {
- vnode_ref(rvp);
+ error = vnode_ref(rvp);
vnode_put(rvp);
}
+
+ return error;
}
/*
- * Unbusy the mout. See above comment,
+ * Unbusy the mount. See above comment,
*/
-static void
+static int
nfs_gss_clnt_mnt_rele(struct nfsmount *nmp)
{
int error;
if (nmp == NULL ||
!(vfs_flags(nmp->nm_mountp) & MNT_AUTOMOUNTED)) {
- return;
+ return EINVAL;
}
error = VFS_ROOT(nmp->nm_mountp, &rvp, NULL);
vnode_rele(rvp);
vnode_put(rvp);
}
+
+ return error;
}
int nfs_root_steals_ctx = 0;
static int
-nfs_gss_clnt_ctx_find_principal(struct nfsreq *req, uint8_t *principal, uint32_t plen, uint32_t nt)
+nfs_gss_clnt_ctx_find_principal(struct nfsreq *req, uint8_t *principal, size_t plen, uint32_t nt)
{
struct nfsmount *nmp = req->r_nmp;
- struct nfs_gss_clnt_ctx *cp;
- struct nfsreq treq;
+ struct nfs_gss_clnt_ctx *cp, *tcp;
+ struct nfsreq *treq;
int error = 0;
struct timeval now;
char CTXBUF[NFS_CTXBUFSZ];
- bzero(&treq, sizeof(struct nfsreq));
- treq.r_nmp = nmp;
+ treq = zalloc_flags(nfs_req_zone, Z_WAITOK | Z_ZERO);
+ treq->r_nmp = nmp;
microuptime(&now);
lck_mtx_lock(&nmp->nm_lock);
- TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) {
- lck_mtx_lock(cp->gss_clnt_mtx);
+ TAILQ_FOREACH_SAFE(cp, &nmp->nm_gsscl, gss_clnt_entries, tcp) {
+ lck_mtx_lock(&cp->gss_clnt_mtx);
if (cp->gss_clnt_flags & GSS_CTX_DESTROY) {
NFS_GSS_DBG("Found destroyed context %s refcnt = %d continuing\n",
NFS_GSS_CTX(req, cp),
cp->gss_clnt_refcnt);
- lck_mtx_unlock(cp->gss_clnt_mtx);
+ lck_mtx_unlock(&cp->gss_clnt_mtx);
continue;
}
if (nfs_gss_clnt_ctx_cred_match(cp->gss_clnt_cred, req->r_cred)) {
bcmp(cp->gss_clnt_principal, principal, plen) != 0) {
cp->gss_clnt_flags |= (GSS_CTX_INVAL | GSS_CTX_DESTROY);
cp->gss_clnt_refcnt++;
- lck_mtx_unlock(cp->gss_clnt_mtx);
+ lck_mtx_unlock(&cp->gss_clnt_mtx);
NFS_GSS_DBG("Marking %s for deletion because %s does not match\n",
NFS_GSS_CTX(req, cp), principal);
- NFS_GSS_DBG("len = (%d,%d), nt = (%d,%d)\n", cp->gss_clnt_prinlen, plen,
+ NFS_GSS_DBG("len = (%zu,%zu), nt = (%d,%d)\n", cp->gss_clnt_prinlen, plen,
cp->gss_clnt_prinnt, nt);
- treq.r_gss_ctx = cp;
+ treq->r_gss_ctx = cp;
cp = NULL;
break;
}
if (cp->gss_clnt_nctime + GSS_NEG_CACHE_TO >= now.tv_sec || cp->gss_clnt_nctime == 0) {
NFS_GSS_DBG("Context %s (refcnt = %d) not expired returning EAUTH nctime = %ld now = %ld\n",
NFS_GSS_CTX(req, cp), cp->gss_clnt_refcnt, cp->gss_clnt_nctime, now.tv_sec);
- lck_mtx_unlock(cp->gss_clnt_mtx);
+ lck_mtx_unlock(&cp->gss_clnt_mtx);
lck_mtx_unlock(&nmp->nm_lock);
+ NFS_ZFREE(nfs_req_zone, treq);
return NFSERR_EAUTH;
}
if (cp->gss_clnt_refcnt) {
NFS_GSS_DBG("Context %s has expired but we still have %d references\n",
NFS_GSS_CTX(req, cp), cp->gss_clnt_refcnt);
error = nfs_gss_clnt_ctx_copy(cp, &ncp);
- lck_mtx_unlock(cp->gss_clnt_mtx);
+ lck_mtx_unlock(&cp->gss_clnt_mtx);
if (error) {
lck_mtx_unlock(&nmp->nm_lock);
+ NFS_ZFREE(nfs_req_zone, treq);
return error;
}
cp = ncp;
if (cp->gss_clnt_nctime) {
nmp->nm_ncentries--;
}
- lck_mtx_unlock(cp->gss_clnt_mtx);
+ lck_mtx_unlock(&cp->gss_clnt_mtx);
TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries);
break;
}
/* Found a valid context to return */
cp->gss_clnt_refcnt++;
req->r_gss_ctx = cp;
- lck_mtx_unlock(cp->gss_clnt_mtx);
+ lck_mtx_unlock(&cp->gss_clnt_mtx);
lck_mtx_unlock(&nmp->nm_lock);
+ NFS_ZFREE(nfs_req_zone, treq);
return 0;
}
- lck_mtx_unlock(cp->gss_clnt_mtx);
+ lck_mtx_unlock(&cp->gss_clnt_mtx);
}
if (!cp && nfs_root_steals_ctx && principal == NULL && kauth_cred_getuid(req->r_cred) == 0) {
nfs_gss_clnt_ctx_ref(req, cp);
lck_mtx_unlock(&nmp->nm_lock);
NFS_GSS_DBG("Root stole context %s\n", NFS_GSS_CTX(req, NULL));
+ NFS_ZFREE(nfs_req_zone, treq);
return 0;
}
}
MALLOC(cp, struct nfs_gss_clnt_ctx *, sizeof(*cp), M_TEMP, M_WAITOK | M_ZERO);
if (cp == NULL) {
lck_mtx_unlock(&nmp->nm_lock);
+ NFS_ZFREE(nfs_req_zone, treq);
return ENOMEM;
}
cp->gss_clnt_cred = req->r_cred;
kauth_cred_ref(cp->gss_clnt_cred);
- cp->gss_clnt_mtx = lck_mtx_alloc_init(nfs_gss_clnt_grp, LCK_ATTR_NULL);
+ lck_mtx_init(&cp->gss_clnt_mtx, &nfs_gss_clnt_grp, LCK_ATTR_NULL);
cp->gss_clnt_ptime = now.tv_sec - GSS_PRINT_DELAY;
if (principal) {
MALLOC(cp->gss_clnt_principal, uint8_t *, plen + 1, M_TEMP, M_WAITOK | M_ZERO);
cp->gss_clnt_prinlen = plen;
cp->gss_clnt_prinnt = nt;
cp->gss_clnt_flags |= GSS_CTX_STICKY;
- nfs_gss_clnt_mnt_ref(nmp);
+ if (!nfs_gss_clnt_mnt_ref(nmp)) {
+ cp->gss_clnt_flags |= GSS_CTX_USECOUNT;
+ }
}
} else {
+ uint32_t oldflags = cp->gss_clnt_flags;
nfs_gss_clnt_ctx_clean(cp);
if (principal) {
/*
* match and we will fall through here.
*/
cp->gss_clnt_flags |= GSS_CTX_STICKY;
+
+ /*
+ * We are preserving old flags if it set, and we take a ref if not set.
+ * Also, because of the short circuit we will not take extra refs here.
+ */
+ if ((oldflags & GSS_CTX_USECOUNT) || !nfs_gss_clnt_mnt_ref(nmp)) {
+ cp->gss_clnt_flags |= GSS_CTX_USECOUNT;
+ }
}
}
}
/* Remove any old matching contex that had a different principal */
- nfs_gss_clnt_ctx_unref(&treq);
-
+ nfs_gss_clnt_ctx_unref(treq);
+ NFS_ZFREE(nfs_req_zone, treq);
return error;
}
* doing the context setup. Wait until the context thread
* is null.
*/
- lck_mtx_lock(cp->gss_clnt_mtx);
+ lck_mtx_lock(&cp->gss_clnt_mtx);
if (cp->gss_clnt_thread && cp->gss_clnt_thread != current_thread()) {
cp->gss_clnt_flags |= GSS_NEEDCTX;
- msleep(cp, cp->gss_clnt_mtx, slpflag | PDROP, "ctxwait", NULL);
+ msleep(cp, &cp->gss_clnt_mtx, slpflag | PDROP, "ctxwait", NULL);
slpflag &= ~PCATCH;
if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) {
return error;
nfs_gss_clnt_ctx_unref(req);
goto retry;
}
- lck_mtx_unlock(cp->gss_clnt_mtx);
+ lck_mtx_unlock(&cp->gss_clnt_mtx);
if (cp->gss_clnt_flags & GSS_CTX_COMPLETE) {
/*
* we allocate a new sequence number and allow this request
* to proceed.
*/
- lck_mtx_lock(cp->gss_clnt_mtx);
+ lck_mtx_lock(&cp->gss_clnt_mtx);
while (win_getbit(cp->gss_clnt_seqbits,
((cp->gss_clnt_seqnum - cp->gss_clnt_seqwin) + 1) % cp->gss_clnt_seqwin)) {
cp->gss_clnt_flags |= GSS_NEEDSEQ;
- msleep(cp, cp->gss_clnt_mtx, slpflag | PDROP, "seqwin", NULL);
+ msleep(cp, &cp->gss_clnt_mtx, slpflag | PDROP, "seqwin", NULL);
slpflag &= ~PCATCH;
if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) {
return error;
}
- lck_mtx_lock(cp->gss_clnt_mtx);
+ lck_mtx_lock(&cp->gss_clnt_mtx);
if (cp->gss_clnt_flags & GSS_CTX_INVAL) {
/* Renewed while while we were waiting */
- lck_mtx_unlock(cp->gss_clnt_mtx);
+ lck_mtx_unlock(&cp->gss_clnt_mtx);
nfs_gss_clnt_ctx_unref(req);
goto retry;
}
}
seqnum = ++cp->gss_clnt_seqnum;
win_setbit(cp->gss_clnt_seqbits, seqnum % cp->gss_clnt_seqwin);
- lck_mtx_unlock(cp->gss_clnt_mtx);
+ lck_mtx_unlock(&cp->gss_clnt_mtx);
MALLOC(gsp, struct gss_seq *, sizeof(*gsp), M_TEMP, M_WAITOK | M_ZERO);
if (gsp == NULL) {
struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx;
struct nfsm_chain nmc_tmp;
struct gss_seq *gsp;
- uint32_t reslen, offset;
+ uint32_t reslen;
int error = 0;
mbuf_t results_mbuf, prev_mbuf, pad_mbuf;
- size_t ressize;
+ size_t ressize, offset;
reslen = 0;
*accepted_statusp = 0;
/*
* The context is apparently established successfully
*/
- lck_mtx_lock(cp->gss_clnt_mtx);
+ lck_mtx_lock(&cp->gss_clnt_mtx);
cp->gss_clnt_flags |= GSS_CTX_COMPLETE;
- lck_mtx_unlock(cp->gss_clnt_mtx);
+ lck_mtx_unlock(&cp->gss_clnt_mtx);
cp->gss_clnt_proc = RPCSEC_GSS_DATA;
network_seqnum = htonl(cp->gss_clnt_seqwin);
* It will be removed when the reference count
* drops to zero.
*/
- lck_mtx_lock(cp->gss_clnt_mtx);
+ lck_mtx_lock(&cp->gss_clnt_mtx);
if (error) {
cp->gss_clnt_flags |= GSS_CTX_INVAL;
}
cp->gss_clnt_flags &= ~GSS_NEEDCTX;
wakeup(cp);
}
- lck_mtx_unlock(cp->gss_clnt_mtx);
+ lck_mtx_unlock(&cp->gss_clnt_mtx);
NFS_GSS_DBG("Returning error = %d\n", error);
return error;
/*
* Give up on this context
*/
- lck_mtx_lock(cp->gss_clnt_mtx);
+ lck_mtx_lock(&cp->gss_clnt_mtx);
cp->gss_clnt_flags |= GSS_CTX_INVAL;
/*
cp->gss_clnt_flags &= ~GSS_NEEDCTX;
wakeup(cp);
}
- lck_mtx_unlock(cp->gss_clnt_mtx);
+ lck_mtx_unlock(&cp->gss_clnt_mtx);
return error;
}
*/
static uint8_t *
-nfs_gss_clnt_svcname(struct nfsmount *nmp, gssd_nametype *nt, uint32_t *len)
+nfs_gss_clnt_svcname(struct nfsmount *nmp, gssd_nametype *nt, size_t *len)
{
char *svcname, *d, *server;
int lindx, sindx;
mach_msg_type_number_t otokenlen;
int error = 0;
uint8_t *principal = NULL;
- uint32_t plen = 0;
+ size_t plen = 0;
int32_t nt = GSSD_STRING_NAME;
vm_map_copy_t pname = NULL;
vm_map_copy_t svcname = NULL;
}
if (cp->gss_clnt_display == NULL && *display_name != '\0') {
- int dlen = strnlen(display_name, MAX_DISPLAY_STR) + 1; /* Add extra byte to include '\0' */
+ size_t dlen = strnlen(display_name, MAX_DISPLAY_STR) + 1; /* Add extra byte to include '\0' */
if (dlen < MAX_DISPLAY_STR) {
MALLOC(cp->gss_clnt_display, char *, dlen, M_TEMP, M_WAITOK);
* sequence number window to indicate it's done.
* We do this even if the request timed out.
*/
- lck_mtx_lock(cp->gss_clnt_mtx);
+ lck_mtx_lock(&cp->gss_clnt_mtx);
gsp = SLIST_FIRST(&req->r_gss_seqlist);
if (gsp && gsp->gss_seqnum > (cp->gss_clnt_seqnum - cp->gss_clnt_seqwin)) {
win_resetbit(cp->gss_clnt_seqbits,
cp->gss_clnt_flags &= ~GSS_NEEDSEQ;
wakeup(cp);
}
- lck_mtx_unlock(cp->gss_clnt_mtx);
+ lck_mtx_unlock(&cp->gss_clnt_mtx);
}
/*
{
req->r_gss_ctx = cp;
- lck_mtx_lock(cp->gss_clnt_mtx);
+ lck_mtx_lock(&cp->gss_clnt_mtx);
cp->gss_clnt_refcnt++;
- lck_mtx_unlock(cp->gss_clnt_mtx);
+ lck_mtx_unlock(&cp->gss_clnt_mtx);
}
/*
req->r_gss_ctx = NULL;
- lck_mtx_lock(cp->gss_clnt_mtx);
+ lck_mtx_lock(&cp->gss_clnt_mtx);
if (--cp->gss_clnt_refcnt < 0) {
panic("Over release of gss context!\n");
}
}
if (cp->gss_clnt_flags & GSS_CTX_DESTROY) {
destroy = 1;
- if (cp->gss_clnt_flags & GSS_CTX_STICKY) {
- nfs_gss_clnt_mnt_rele(nmp);
+ if ((cp->gss_clnt_flags & GSS_CTX_USECOUNT) && !nfs_gss_clnt_mnt_rele(nmp)) {
+ cp->gss_clnt_flags &= ~GSS_CTX_USECOUNT;
}
if (cp->gss_clnt_nctime) {
on_neg_cache = 1;
cp->gss_clnt_nctime = now.tv_sec;
neg_cache = 1;
}
- lck_mtx_unlock(cp->gss_clnt_mtx);
+ lck_mtx_unlock(&cp->gss_clnt_mtx);
if (destroy) {
NFS_GSS_DBG("Destroying context %s\n", NFS_GSS_CTX(req, cp));
if (nmp) {
continue;
}
/* Not referenced, remove it. */
- lck_mtx_lock(cp->gss_clnt_mtx);
+ lck_mtx_lock(&cp->gss_clnt_mtx);
if (cp->gss_clnt_refcnt == 0) {
cp->gss_clnt_flags |= GSS_CTX_DESTROY;
destroy = 1;
}
- lck_mtx_unlock(cp->gss_clnt_mtx);
+ lck_mtx_unlock(&cp->gss_clnt_mtx);
if (destroy) {
TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries);
nmp->nm_ncentries++;
return ENOMEM;
}
bzero(dcp, sizeof(struct nfs_gss_clnt_ctx));
- dcp->gss_clnt_mtx = lck_mtx_alloc_init(nfs_gss_clnt_grp, LCK_ATTR_NULL);
+ lck_mtx_init(&dcp->gss_clnt_mtx, &nfs_gss_clnt_grp, LCK_ATTR_NULL);
dcp->gss_clnt_cred = scp->gss_clnt_cred;
kauth_cred_ref(dcp->gss_clnt_cred);
dcp->gss_clnt_prinlen = scp->gss_clnt_prinlen;
host_release_special_port(cp->gss_clnt_mport);
cp->gss_clnt_mport = IPC_PORT_NULL;
- if (cp->gss_clnt_mtx) {
- lck_mtx_destroy(cp->gss_clnt_mtx, nfs_gss_clnt_grp);
- cp->gss_clnt_mtx = (lck_mtx_t *)NULL;
- }
+ lck_mtx_destroy(&cp->gss_clnt_mtx, &nfs_gss_clnt_grp);
+
if (IS_VALID_CRED(cp->gss_clnt_cred)) {
kauth_cred_unref(&cp->gss_clnt_cred);
}
}
nmp = req->r_nmp;
- lck_mtx_lock(cp->gss_clnt_mtx);
+ lck_mtx_lock(&cp->gss_clnt_mtx);
if (cp->gss_clnt_flags & GSS_CTX_INVAL) {
- lck_mtx_unlock(cp->gss_clnt_mtx);
+ lck_mtx_unlock(&cp->gss_clnt_mtx);
nfs_gss_clnt_ctx_unref(req);
return 0; // already being renewed
}
cp->gss_clnt_flags &= ~GSS_NEEDSEQ;
wakeup(cp);
}
- lck_mtx_unlock(cp->gss_clnt_mtx);
+ lck_mtx_unlock(&cp->gss_clnt_mtx);
if (cp->gss_clnt_proc == RPCSEC_GSS_DESTROY) {
return EACCES; /* Destroying a context is best effort. Don't renew. */
struct nfs_gss_clnt_ctx *cp;
struct nfsm_chain nmreq, nmrep;
int error, status;
- struct nfsreq req;
- req.r_nmp = nmp;
+ struct nfsreq *req;
if (!nmp) {
return;
}
-
+ req = zalloc(nfs_req_zone);
+ req->r_nmp = nmp;
lck_mtx_lock(&nmp->nm_lock);
while ((cp = TAILQ_FIRST(&nmp->nm_gsscl))) {
TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries);
cp->gss_clnt_entries.tqe_next = NFSNOLIST;
- lck_mtx_lock(cp->gss_clnt_mtx);
+ lck_mtx_lock(&cp->gss_clnt_mtx);
if (cp->gss_clnt_flags & GSS_CTX_DESTROY) {
- lck_mtx_unlock(cp->gss_clnt_mtx);
+ lck_mtx_unlock(&cp->gss_clnt_mtx);
continue;
}
cp->gss_clnt_refcnt++;
- lck_mtx_unlock(cp->gss_clnt_mtx);
- req.r_gss_ctx = cp;
+ lck_mtx_unlock(&cp->gss_clnt_mtx);
+ req->r_gss_ctx = cp;
lck_mtx_unlock(&nmp->nm_lock);
/*
* the reference to remove it if its
* refcount is zero.
*/
- lck_mtx_lock(cp->gss_clnt_mtx);
+ lck_mtx_lock(&cp->gss_clnt_mtx);
cp->gss_clnt_flags |= (GSS_CTX_INVAL | GSS_CTX_DESTROY);
- lck_mtx_unlock(cp->gss_clnt_mtx);
- nfs_gss_clnt_ctx_unref(&req);
+ lck_mtx_unlock(&cp->gss_clnt_mtx);
+ nfs_gss_clnt_ctx_unref(req);
lck_mtx_lock(&nmp->nm_lock);
}
lck_mtx_unlock(&nmp->nm_lock);
assert(TAILQ_EMPTY(&nmp->nm_gsscl));
+ NFS_ZFREE(nfs_req_zone, req);
}
int
nfs_gss_clnt_ctx_remove(struct nfsmount *nmp, kauth_cred_t cred)
{
- struct nfs_gss_clnt_ctx *cp;
- struct nfsreq req;
+ struct nfs_gss_clnt_ctx *cp, *tcp;
+ struct nfsreq *req;
- req.r_nmp = nmp;
+ req = zalloc(nfs_req_zone);
+ req->r_nmp = nmp;
NFS_GSS_DBG("Enter\n");
NFS_GSS_CLNT_CTX_DUMP(nmp);
lck_mtx_lock(&nmp->nm_lock);
- TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) {
- lck_mtx_lock(cp->gss_clnt_mtx);
+ TAILQ_FOREACH_SAFE(cp, &nmp->nm_gsscl, gss_clnt_entries, tcp) {
+ lck_mtx_lock(&cp->gss_clnt_mtx);
if (nfs_gss_clnt_ctx_cred_match(cp->gss_clnt_cred, cred)) {
if (cp->gss_clnt_flags & GSS_CTX_DESTROY) {
NFS_GSS_DBG("Found destroyed context %d/%d. refcnt = %d continuing\n",
kauth_cred_getasid(cp->gss_clnt_cred),
kauth_cred_getauid(cp->gss_clnt_cred),
cp->gss_clnt_refcnt);
- lck_mtx_unlock(cp->gss_clnt_mtx);
+ lck_mtx_unlock(&cp->gss_clnt_mtx);
continue;
}
cp->gss_clnt_refcnt++;
cp->gss_clnt_flags |= (GSS_CTX_INVAL | GSS_CTX_DESTROY);
- lck_mtx_unlock(cp->gss_clnt_mtx);
- req.r_gss_ctx = cp;
+ lck_mtx_unlock(&cp->gss_clnt_mtx);
+ req->r_gss_ctx = cp;
lck_mtx_unlock(&nmp->nm_lock);
/*
* Drop the reference to remove it if its
kauth_cred_getasid(cp->gss_clnt_cred),
kauth_cred_getuid(cp->gss_clnt_cred),
cp->gss_clnt_refcnt);
- nfs_gss_clnt_ctx_unref(&req);
+ nfs_gss_clnt_ctx_unref(req);
+ NFS_ZFREE(nfs_req_zone, req);
return 0;
}
- lck_mtx_unlock(cp->gss_clnt_mtx);
+ lck_mtx_unlock(&cp->gss_clnt_mtx);
}
lck_mtx_unlock(&nmp->nm_lock);
+ NFS_ZFREE(nfs_req_zone, req);
NFS_GSS_DBG("Returning ENOENT\n");
return ENOENT;
}
*/
int
nfs_gss_clnt_ctx_set_principal(struct nfsmount *nmp, vfs_context_t ctx,
- uint8_t *principal, uint32_t princlen, uint32_t nametype)
+ uint8_t *principal, size_t princlen, uint32_t nametype)
{
- struct nfsreq req;
+ struct nfsreq *req;
int error;
NFS_GSS_DBG("Enter:\n");
- bzero(&req, sizeof(struct nfsreq));
- req.r_nmp = nmp;
- req.r_gss_ctx = NULL;
- req.r_auth = nmp->nm_auth;
- req.r_thread = vfs_context_thread(ctx);
- req.r_cred = vfs_context_ucred(ctx);
+ req = zalloc_flags(nfs_req_zone, Z_WAITOK | Z_ZERO);
+ req->r_nmp = nmp;
+ req->r_auth = nmp->nm_auth;
+ req->r_thread = vfs_context_thread(ctx);
+ req->r_cred = vfs_context_ucred(ctx);
- error = nfs_gss_clnt_ctx_find_principal(&req, principal, princlen, nametype);
+ error = nfs_gss_clnt_ctx_find_principal(req, principal, princlen, nametype);
NFS_GSS_DBG("nfs_gss_clnt_ctx_find_principal returned %d\n", error);
/*
* We don't care about auth errors. Those would indicate that the context is in the
}
/* We're done with this request */
- nfs_gss_clnt_ctx_unref(&req);
-
+ nfs_gss_clnt_ctx_unref(req);
+ NFS_ZFREE(nfs_req_zone, req);
return error;
}
nfs_gss_clnt_ctx_get_principal(struct nfsmount *nmp, vfs_context_t ctx,
struct user_nfs_gss_principal *p)
{
- struct nfsreq req;
+ struct nfsreq *req;
int error = 0;
struct nfs_gss_clnt_ctx *cp;
kauth_cred_t cred = vfs_context_ucred(ctx);
p->princlen = 0;
p->flags = 0;
- req.r_nmp = nmp;
+ req = zalloc_flags(nfs_req_zone, Z_WAITOK);
+ req->r_nmp = nmp;
lck_mtx_lock(&nmp->nm_lock);
TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) {
- lck_mtx_lock(cp->gss_clnt_mtx);
+ lck_mtx_lock(&cp->gss_clnt_mtx);
if (cp->gss_clnt_flags & GSS_CTX_DESTROY) {
NFS_GSS_DBG("Found destroyed context %s refcnt = %d continuing\n",
- NFS_GSS_CTX(&req, cp),
+ NFS_GSS_CTX(req, cp),
cp->gss_clnt_refcnt);
- lck_mtx_unlock(cp->gss_clnt_mtx);
+ lck_mtx_unlock(&cp->gss_clnt_mtx);
continue;
}
if (nfs_gss_clnt_ctx_cred_match(cp->gss_clnt_cred, cred)) {
cp->gss_clnt_refcnt++;
- lck_mtx_unlock(cp->gss_clnt_mtx);
+ lck_mtx_unlock(&cp->gss_clnt_mtx);
goto out;
}
- lck_mtx_unlock(cp->gss_clnt_mtx);
+ lck_mtx_unlock(&cp->gss_clnt_mtx);
}
out:
p->flags |= NFS_IOC_NO_CRED_FLAG; /* No credentials, valid or invalid on this mount */
NFS_GSS_DBG("No context found for session %d by uid %d\n",
kauth_cred_getasid(cred), kauth_cred_getuid(cred));
+ NFS_ZFREE(nfs_req_zone, req);
return 0;
}
lck_mtx_unlock(&nmp->nm_lock);
- req.r_gss_ctx = cp;
- NFS_GSS_DBG("Found context %s\n", NFS_GSS_CTX(&req, NULL));
- nfs_gss_clnt_ctx_unref(&req);
+ req->r_gss_ctx = cp;
+ NFS_GSS_DBG("Found context %s\n", NFS_GSS_CTX(req, NULL));
+ nfs_gss_clnt_ctx_unref(req);
+ NFS_ZFREE(nfs_req_zone, req);
return error;
}
#endif /* CONFIG_NFS_CLIENT */
*/
clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC, &timenow);
- lck_mtx_lock(nfs_gss_svc_ctx_mutex);
+ lck_mtx_lock(&nfs_gss_svc_ctx_mutex);
LIST_FOREACH(cp, head, gss_svc_entries) {
if (cp->gss_svc_handle == handle) {
cp = NULL;
break;
}
- lck_mtx_lock(cp->gss_svc_mtx);
+ lck_mtx_lock(&cp->gss_svc_mtx);
cp->gss_svc_refcnt++;
- lck_mtx_unlock(cp->gss_svc_mtx);
+ lck_mtx_unlock(&cp->gss_svc_mtx);
break;
}
}
- lck_mtx_unlock(nfs_gss_svc_ctx_mutex);
+ lck_mtx_unlock(&nfs_gss_svc_ctx_mutex);
return cp;
}
struct nfs_gss_svc_ctx_hashhead *head;
struct nfs_gss_svc_ctx *p;
- lck_mtx_lock(nfs_gss_svc_ctx_mutex);
+ lck_mtx_lock(&nfs_gss_svc_ctx_mutex);
/*
* Give the client a random handle so that if we reboot
min(GSS_TIMER_PERIOD, max(GSS_CTX_TTL_MIN, nfsrv_gss_context_ttl)) * MSECS_PER_SEC);
}
- lck_mtx_unlock(nfs_gss_svc_ctx_mutex);
+ lck_mtx_unlock(&nfs_gss_svc_ctx_mutex);
}
/*
int contexts = 0;
int i;
- lck_mtx_lock(nfs_gss_svc_ctx_mutex);
+ lck_mtx_lock(&nfs_gss_svc_ctx_mutex);
clock_get_uptime(&timenow);
NFS_GSS_DBG("is running\n");
if (cp->gss_svc_seqbits) {
FREE(cp->gss_svc_seqbits, M_TEMP);
}
- lck_mtx_destroy(cp->gss_svc_mtx, nfs_gss_svc_grp);
+ lck_mtx_destroy(&cp->gss_svc_mtx, &nfs_gss_svc_grp);
FREE(cp, M_TEMP);
contexts--;
}
min(GSS_TIMER_PERIOD, max(GSS_CTX_TTL_MIN, nfsrv_gss_context_ttl)) * MSECS_PER_SEC);
}
- lck_mtx_unlock(nfs_gss_svc_ctx_mutex);
+ lck_mtx_unlock(&nfs_gss_svc_ctx_mutex);
}
/*
uint32_t handle, handle_len;
uint32_t major;
struct nfs_gss_svc_ctx *cp = NULL;
- uint32_t flavor = 0, header_len;
+ uint32_t flavor = 0;
int error = 0;
- uint32_t arglen, start;
- size_t argsize;
+ uint32_t arglen;
+ size_t argsize, start, header_len;
gss_buffer_desc cksum;
struct nfsm_chain nmc_tmp;
mbuf_t reply_mbuf, prev_mbuf, pad_mbuf;
error = ENOMEM;
goto nfsmout;
}
- cp->gss_svc_mtx = lck_mtx_alloc_init(nfs_gss_svc_grp, LCK_ATTR_NULL);
+ lck_mtx_init(&cp->gss_svc_mtx, &nfs_gss_svc_grp, LCK_ATTR_NULL);
cp->gss_svc_refcnt = 1;
} else {
/*
temp_pcred.cr_uid = cp->gss_svc_uid;
bcopy(cp->gss_svc_gids, temp_pcred.cr_groups,
sizeof(gid_t) * cp->gss_svc_ngroups);
- temp_pcred.cr_ngroups = cp->gss_svc_ngroups;
+ temp_pcred.cr_ngroups = (short)cp->gss_svc_ngroups;
nd->nd_cr = posix_cred_create(&temp_pcred);
if (nd->nd_cr == NULL) {
}
if (error) {
if (proc == RPCSEC_GSS_INIT) {
- lck_mtx_destroy(cp->gss_svc_mtx, nfs_gss_svc_grp);
+ lck_mtx_destroy(&cp->gss_svc_mtx, &nfs_gss_svc_grp);
FREE(cp, M_TEMP);
cp = NULL;
}
switch (cp->gss_svc_proc) {
case RPCSEC_GSS_INIT:
nfs_gss_svc_ctx_insert(cp);
- /* FALLTHRU */
+ OS_FALLTHROUGH;
case RPCSEC_GSS_CONTINUE_INIT:
/* Get the token from the request */
cp = nfs_gss_svc_ctx_find(cp->gss_svc_handle);
if (cp != NULL) {
cp->gss_svc_handle = 0; // so it can't be found
- lck_mtx_lock(cp->gss_svc_mtx);
+ lck_mtx_lock(&cp->gss_svc_mtx);
clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC,
&cp->gss_svc_incarnation);
- lck_mtx_unlock(cp->gss_svc_mtx);
+ lck_mtx_unlock(&cp->gss_svc_mtx);
}
break;
default:
if (cp->gss_svc_token != NULL) {
FREE(cp->gss_svc_token, M_TEMP);
}
- lck_mtx_destroy(cp->gss_svc_mtx, nfs_gss_svc_grp);
+ lck_mtx_destroy(&cp->gss_svc_mtx, &nfs_gss_svc_grp);
FREE(cp, M_TEMP);
}
uint32_t win = cp->gss_svc_seqwin;
uint32_t i;
- lck_mtx_lock(cp->gss_svc_mtx);
+ lck_mtx_lock(&cp->gss_svc_mtx);
/*
* If greater than the window upper bound,
}
win_setbit(bits, seq % win);
cp->gss_svc_seqmax = seq;
- lck_mtx_unlock(cp->gss_svc_mtx);
+ lck_mtx_unlock(&cp->gss_svc_mtx);
return 1;
}
* Invalid if below the lower bound of the window
*/
if (seq <= cp->gss_svc_seqmax - win) {
- lck_mtx_unlock(cp->gss_svc_mtx);
+ lck_mtx_unlock(&cp->gss_svc_mtx);
return 0;
}
* In the window, invalid if the bit is already set
*/
if (win_getbit(bits, seq % win)) {
- lck_mtx_unlock(cp->gss_svc_mtx);
+ lck_mtx_unlock(&cp->gss_svc_mtx);
return 0;
}
win_setbit(bits, seq % win);
- lck_mtx_unlock(cp->gss_svc_mtx);
+ lck_mtx_unlock(&cp->gss_svc_mtx);
return 1;
}
void
nfs_gss_svc_ctx_deref(struct nfs_gss_svc_ctx *cp)
{
- lck_mtx_lock(cp->gss_svc_mtx);
+ lck_mtx_lock(&cp->gss_svc_mtx);
if (cp->gss_svc_refcnt > 0) {
cp->gss_svc_refcnt--;
} else {
printf("nfs_gss_ctx_deref: zero refcount\n");
}
- lck_mtx_unlock(cp->gss_svc_mtx);
+ lck_mtx_unlock(&cp->gss_svc_mtx);
}
/*
struct nfs_gss_svc_ctx *cp, *ncp;
int i;
- lck_mtx_lock(nfs_gss_svc_ctx_mutex);
+ lck_mtx_lock(&nfs_gss_svc_ctx_mutex);
/*
* Run through all the buckets
if (cp->gss_svc_seqbits) {
FREE(cp->gss_svc_seqbits, M_TEMP);
}
- lck_mtx_destroy(cp->gss_svc_mtx, nfs_gss_svc_grp);
+ lck_mtx_destroy(&cp->gss_svc_mtx, &nfs_gss_svc_grp);
FREE(cp, M_TEMP);
}
}
- lck_mtx_unlock(nfs_gss_svc_ctx_mutex);
+ lck_mtx_unlock(&nfs_gss_svc_ctx_mutex);
}
#endif /* CONFIG_NFS_SERVER */
* complete.
*/
static void
-nfs_gss_mach_alloc_buffer(u_char *buf, uint32_t buflen, vm_map_copy_t *addr)
+nfs_gss_mach_alloc_buffer(u_char *buf, size_t buflen, vm_map_copy_t *addr)
{
kern_return_t kr;
vm_offset_t kmem_buf;