/*
- * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
- *
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
- *
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
- *
- * @APPLE_LICENSE_HEADER_END@
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
/*
* FreeBSD-Id: nfs_srvcache.c,v 1.15 1997/10/12 20:25:46 phk Exp $
*/
-#ifndef NFS_NOSERVER
+#if NFSSERVER
/*
* Reference: Chet Juszczak, "Improving the Performance and Correctness
* of an NFS Server", in Proc. Winter 1989 USENIX Conference,
#include <sys/kpi_mbuf.h>
#include <sys/malloc.h>
#include <sys/socket.h>
-#include <sys/socketvar.h> /* for dup_sockaddr */
#include <libkern/OSAtomic.h>
#include <netinet/in.h>
-#if ISO
-#include <netiso/iso.h>
-#endif
#include <nfs/rpcv2.h>
#include <nfs/nfsproto.h>
#include <nfs/nfs.h>
#include <nfs/nfsrvcache.h>
-extern struct nfsstats nfsstats;
extern int nfsv2_procid[NFS_NPROCS];
-long numnfsrvcache;
-static long desirednfsrvcache = NFSRVCACHESIZ;
+static int nfsrv_reqcache_count;
+int nfsrv_reqcache_size = NFSRVCACHESIZ;
-#define NFSRCHASH(xid) \
- (&nfsrvhashtbl[((xid) + ((xid) >> 24)) & nfsrvhash])
-LIST_HEAD(nfsrvhash, nfsrvcache) *nfsrvhashtbl;
-TAILQ_HEAD(nfsrvlru, nfsrvcache) nfsrvlruhead;
-u_long nfsrvhash;
+#define NFSRCHASH(xid) \
+ (&nfsrv_reqcache_hashtbl[((xid) + ((xid) >> 24)) & nfsrv_reqcache_hash])
+LIST_HEAD(nfsrv_reqcache_hash, nfsrvcache) * nfsrv_reqcache_hashtbl;
+TAILQ_HEAD(nfsrv_reqcache_lru, nfsrvcache) nfsrv_reqcache_lruhead;
+u_long nfsrv_reqcache_hash;
lck_grp_t *nfsrv_reqcache_lck_grp;
-lck_grp_attr_t *nfsrv_reqcache_lck_grp_attr;
-lck_attr_t *nfsrv_reqcache_lck_attr;
lck_mtx_t *nfsrv_reqcache_mutex;
-#define NETFAMILY(rp) \
- (((rp)->rc_flag & RC_INETADDR) ? AF_INET : AF_ISO)
-
/*
* Static array that defines which nfs rpc's are nonidempotent
*/
* Initialize the server request cache list
*/
void
-nfsrv_initcache()
+nfsrv_initcache(void)
{
- /* init nfs server request cache mutex */
- nfsrv_reqcache_lck_grp_attr = lck_grp_attr_alloc_init();
- lck_grp_attr_setstat(nfsrv_reqcache_lck_grp_attr);
- nfsrv_reqcache_lck_grp = lck_grp_alloc_init("nfsrv_reqcache", nfsrv_reqcache_lck_grp_attr);
- nfsrv_reqcache_lck_attr = lck_attr_alloc_init();
- nfsrv_reqcache_mutex = lck_mtx_alloc_init(nfsrv_reqcache_lck_grp, nfsrv_reqcache_lck_attr);
+ if (nfsrv_reqcache_size <= 0) {
+ return;
+ }
- nfsrvhashtbl = hashinit(desirednfsrvcache, M_NFSD, &nfsrvhash);
- TAILQ_INIT(&nfsrvlruhead);
+ lck_mtx_lock(nfsrv_reqcache_mutex);
+ /* init nfs server request cache hash table */
+ nfsrv_reqcache_hashtbl = hashinit(nfsrv_reqcache_size, M_NFSD, &nfsrv_reqcache_hash);
+ TAILQ_INIT(&nfsrv_reqcache_lruhead);
+ lck_mtx_unlock(nfsrv_reqcache_mutex);
+}
+
+/*
+ * This function compares two net addresses by family and returns TRUE
+ * if they are the same host.
+ * If there is any doubt, return FALSE.
+ * The AF_INET family is handled as a special case so that address mbufs
+ * don't need to be saved to store "struct in_addr", which is only 4 bytes.
+ * Ditto for AF_INET6 which is only 16 bytes.
+ */
+static int
+netaddr_match(
+ int family,
+ union nethostaddr *haddr,
+ mbuf_t nam)
+{
+ struct sockaddr_in *inetaddr;
+ struct sockaddr_in6 *inet6addr;
+
+ switch (family) {
+ case AF_INET:
+ inetaddr = mbuf_data(nam);
+ if ((inetaddr->sin_family == AF_INET) &&
+ (inetaddr->sin_addr.s_addr == haddr->had_inetaddr)) {
+ return 1;
+ }
+ break;
+ case AF_INET6:
+ inet6addr = mbuf_data(nam);
+ if ((inet6addr->sin6_family == AF_INET6) &&
+ !bcmp(&inet6addr->sin6_addr, &haddr->had_inet6addr, sizeof(inet6addr->sin6_addr))) {
+ return 1;
+ }
+ break;
+ }
+ return 0;
}
/*
* Update/add new request at end of lru list
*/
int
-nfsrv_getcache(nd, slp, repp)
- struct nfsrv_descript *nd;
- struct nfssvc_sock *slp;
- mbuf_t *repp;
+nfsrv_getcache(
+ struct nfsrv_descript *nd,
+ struct nfsrv_sock *slp,
+ mbuf_t *mrepp)
{
struct nfsrvcache *rp;
- mbuf_t mb;
- struct sockaddr_in *saddr;
- caddr_t bpos;
+ struct nfsm_chain nmrep;
+ struct sockaddr *saddr;
int ret, error;
/*
* Don't cache recent requests for reliable transport protocols.
* (Maybe we should for the case of a reconnect, but..)
*/
- if (!nd->nd_nam2)
- return (RC_DOIT);
+ if (!nd->nd_nam2) {
+ return RC_DOIT;
+ }
lck_mtx_lock(nfsrv_reqcache_mutex);
loop:
for (rp = NFSRCHASH(nd->nd_retxid)->lh_first; rp != 0;
rp = rp->rc_hash.le_next) {
- if (nd->nd_retxid == rp->rc_xid && nd->nd_procnum == rp->rc_proc &&
- netaddr_match(NETFAMILY(rp), &rp->rc_haddr, nd->nd_nam)) {
+ if (nd->nd_retxid == rp->rc_xid && nd->nd_procnum == rp->rc_proc &&
+ netaddr_match(rp->rc_family, &rp->rc_haddr, nd->nd_nam)) {
if ((rp->rc_flag & RC_LOCKED) != 0) {
rp->rc_flag |= RC_WANTED;
- (void) tsleep((caddr_t)rp, PZERO-1, "nfsrc", 0);
+ msleep(rp, nfsrv_reqcache_mutex, PZERO - 1, "nfsrc", NULL);
goto loop;
}
rp->rc_flag |= RC_LOCKED;
/* If not at end of LRU chain, move it there */
if (rp->rc_lru.tqe_next) {
- TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru);
- TAILQ_INSERT_TAIL(&nfsrvlruhead, rp, rc_lru);
+ TAILQ_REMOVE(&nfsrv_reqcache_lruhead, rp, rc_lru);
+ TAILQ_INSERT_TAIL(&nfsrv_reqcache_lruhead, rp, rc_lru);
}
- if (rp->rc_state == RC_UNUSED)
+ if (rp->rc_state == RC_UNUSED) {
panic("nfsrv cache");
+ }
if (rp->rc_state == RC_INPROG) {
- OSAddAtomic(1, (SInt32*)&nfsstats.srvcache_inproghits);
+ OSAddAtomic64(1, &nfsstats.srvcache_inproghits);
ret = RC_DROPIT;
} else if (rp->rc_flag & RC_REPSTATUS) {
- OSAddAtomic(1, (SInt32*)&nfsstats.srvcache_nonidemdonehits);
- nfs_rephead(0, nd, slp, rp->rc_status, repp, &mb, &bpos);
- ret = RC_REPLY;
+ OSAddAtomic64(1, &nfsstats.srvcache_nonidemdonehits);
+ nd->nd_repstat = rp->rc_status;
+ error = nfsrv_rephead(nd, slp, &nmrep, 0);
+ if (error) {
+ printf("nfsrv cache: reply alloc failed for nonidem request hit\n");
+ ret = RC_DROPIT;
+ *mrepp = NULL;
+ } else {
+ ret = RC_REPLY;
+ *mrepp = nmrep.nmc_mhead;
+ }
} else if (rp->rc_flag & RC_REPMBUF) {
- OSAddAtomic(1, (SInt32*)&nfsstats.srvcache_nonidemdonehits);
- error = mbuf_copym(rp->rc_reply, 0, MBUF_COPYALL, MBUF_WAITOK, repp);
+ OSAddAtomic64(1, &nfsstats.srvcache_nonidemdonehits);
+ error = mbuf_copym(rp->rc_reply, 0, MBUF_COPYALL, MBUF_WAITOK, mrepp);
if (error) {
printf("nfsrv cache: reply copym failed for nonidem request hit\n");
ret = RC_DROPIT;
ret = RC_REPLY;
}
} else {
- OSAddAtomic(1, (SInt32*)&nfsstats.srvcache_idemdonehits);
+ OSAddAtomic64(1, &nfsstats.srvcache_idemdonehits);
rp->rc_state = RC_INPROG;
ret = RC_DOIT;
}
rp->rc_flag &= ~RC_LOCKED;
if (rp->rc_flag & RC_WANTED) {
rp->rc_flag &= ~RC_WANTED;
- wakeup((caddr_t)rp);
+ wakeup(rp);
}
lck_mtx_unlock(nfsrv_reqcache_mutex);
- return (ret);
+ return ret;
}
}
- OSAddAtomic(1, (SInt32*)&nfsstats.srvcache_misses);
- if (numnfsrvcache < desirednfsrvcache) {
+ OSAddAtomic64(1, &nfsstats.srvcache_misses);
+ if (nfsrv_reqcache_count < nfsrv_reqcache_size) {
/* try to allocate a new entry */
MALLOC(rp, struct nfsrvcache *, sizeof *rp, M_NFSD, M_WAITOK);
if (rp) {
bzero((char *)rp, sizeof *rp);
- numnfsrvcache++;
+ nfsrv_reqcache_count++;
rp->rc_flag = RC_LOCKED;
}
} else {
}
if (!rp) {
/* try to reuse the least recently used entry */
- rp = nfsrvlruhead.tqh_first;
+ rp = nfsrv_reqcache_lruhead.tqh_first;
if (!rp) {
/* no entry to reuse? */
/* OK, we just won't be able to cache this request */
lck_mtx_unlock(nfsrv_reqcache_mutex);
- return (RC_DOIT);
+ return RC_DOIT;
}
while ((rp->rc_flag & RC_LOCKED) != 0) {
rp->rc_flag |= RC_WANTED;
- (void) tsleep((caddr_t)rp, PZERO-1, "nfsrc", 0);
- rp = nfsrvlruhead.tqh_first;
+ msleep(rp, nfsrv_reqcache_mutex, PZERO - 1, "nfsrc", NULL);
+ rp = nfsrv_reqcache_lruhead.tqh_first;
}
rp->rc_flag |= RC_LOCKED;
LIST_REMOVE(rp, rc_hash);
- TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru);
- if (rp->rc_flag & RC_REPMBUF)
+ TAILQ_REMOVE(&nfsrv_reqcache_lruhead, rp, rc_lru);
+ if (rp->rc_flag & RC_REPMBUF) {
mbuf_freem(rp->rc_reply);
- if (rp->rc_flag & RC_NAM)
+ }
+ if (rp->rc_flag & RC_NAM) {
mbuf_freem(rp->rc_nam);
+ }
rp->rc_flag &= (RC_LOCKED | RC_WANTED);
}
- TAILQ_INSERT_TAIL(&nfsrvlruhead, rp, rc_lru);
+ TAILQ_INSERT_TAIL(&nfsrv_reqcache_lruhead, rp, rc_lru);
rp->rc_state = RC_INPROG;
rp->rc_xid = nd->nd_retxid;
saddr = mbuf_data(nd->nd_nam);
- switch (saddr->sin_family) {
+ rp->rc_family = saddr->sa_family;
+ switch (saddr->sa_family) {
case AF_INET:
rp->rc_flag |= RC_INETADDR;
- rp->rc_inetaddr = saddr->sin_addr.s_addr;
+ rp->rc_inetaddr = ((struct sockaddr_in*)saddr)->sin_addr.s_addr;
+ break;
+ case AF_INET6:
+ rp->rc_flag |= RC_INETADDR;
+ rp->rc_inet6addr = ((struct sockaddr_in6*)saddr)->sin6_addr;
break;
- case AF_ISO:
default:
error = mbuf_copym(nd->nd_nam, 0, MBUF_COPYALL, MBUF_WAITOK, &rp->rc_nam);
- if (error)
+ if (error) {
printf("nfsrv cache: nam copym failed\n");
- else
+ } else {
rp->rc_flag |= RC_NAM;
+ }
break;
- };
+ }
+ ;
rp->rc_proc = nd->nd_procnum;
LIST_INSERT_HEAD(NFSRCHASH(nd->nd_retxid), rp, rc_hash);
rp->rc_flag &= ~RC_LOCKED;
if (rp->rc_flag & RC_WANTED) {
rp->rc_flag &= ~RC_WANTED;
- wakeup((caddr_t)rp);
+ wakeup(rp);
}
lck_mtx_unlock(nfsrv_reqcache_mutex);
- return (RC_DOIT);
+ return RC_DOIT;
}
/*
* Update a request cache entry after the rpc has been done
*/
void
-nfsrv_updatecache(nd, repvalid, repmbuf)
- struct nfsrv_descript *nd;
- int repvalid;
- mbuf_t repmbuf;
+nfsrv_updatecache(
+ struct nfsrv_descript *nd,
+ int repvalid,
+ mbuf_t repmbuf)
{
struct nfsrvcache *rp;
int error;
- if (!nd->nd_nam2)
+ if (!nd->nd_nam2) {
return;
+ }
lck_mtx_lock(nfsrv_reqcache_mutex);
loop:
for (rp = NFSRCHASH(nd->nd_retxid)->lh_first; rp != 0;
rp = rp->rc_hash.le_next) {
- if (nd->nd_retxid == rp->rc_xid && nd->nd_procnum == rp->rc_proc &&
- netaddr_match(NETFAMILY(rp), &rp->rc_haddr, nd->nd_nam)) {
+ if (nd->nd_retxid == rp->rc_xid && nd->nd_procnum == rp->rc_proc &&
+ netaddr_match(rp->rc_family, &rp->rc_haddr, nd->nd_nam)) {
if ((rp->rc_flag & RC_LOCKED) != 0) {
rp->rc_flag |= RC_WANTED;
- (void) tsleep((caddr_t)rp, PZERO-1, "nfsrc", 0);
+ msleep(rp, nfsrv_reqcache_mutex, PZERO - 1, "nfsrc", NULL);
goto loop;
}
rp->rc_flag |= RC_LOCKED;
+ if (rp->rc_state == RC_DONE) {
+ /*
+ * This can occur if the cache is too small.
+ * Retransmits of the same request aren't
+ * dropped so we may see the operation
+ * complete more then once.
+ */
+ if (rp->rc_flag & RC_REPMBUF) {
+ mbuf_freem(rp->rc_reply);
+ rp->rc_flag &= ~RC_REPMBUF;
+ }
+ }
rp->rc_state = RC_DONE;
/*
* If we have a valid reply update status and save
* the reply for non-idempotent rpc's.
*/
if (repvalid && nonidempotent[nd->nd_procnum]) {
- if ((nd->nd_flag & ND_NFSV3) == 0 &&
- nfsv2_repstat[nfsv2_procid[nd->nd_procnum]]) {
+ if ((nd->nd_vers == NFS_VER2) &&
+ nfsv2_repstat[nfsv2_procid[nd->nd_procnum]]) {
rp->rc_status = nd->nd_repstat;
rp->rc_flag |= RC_REPSTATUS;
} else {
error = mbuf_copym(repmbuf, 0, MBUF_COPYALL, MBUF_WAITOK, &rp->rc_reply);
- if (!error)
+ if (!error) {
rp->rc_flag |= RC_REPMBUF;
+ }
}
}
rp->rc_flag &= ~RC_LOCKED;
if (rp->rc_flag & RC_WANTED) {
rp->rc_flag &= ~RC_WANTED;
- wakeup((caddr_t)rp);
+ wakeup(rp);
}
lck_mtx_unlock(nfsrv_reqcache_mutex);
return;
* Clean out the cache. Called when the last nfsd terminates.
*/
void
-nfsrv_cleancache()
+nfsrv_cleancache(void)
{
struct nfsrvcache *rp, *nextrp;
lck_mtx_lock(nfsrv_reqcache_mutex);
- for (rp = nfsrvlruhead.tqh_first; rp != 0; rp = nextrp) {
+ for (rp = nfsrv_reqcache_lruhead.tqh_first; rp != 0; rp = nextrp) {
nextrp = rp->rc_lru.tqe_next;
LIST_REMOVE(rp, rc_hash);
- TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru);
+ TAILQ_REMOVE(&nfsrv_reqcache_lruhead, rp, rc_lru);
_FREE(rp, M_NFSD);
}
- numnfsrvcache = 0;
+ nfsrv_reqcache_count = 0;
+ FREE(nfsrv_reqcache_hashtbl, M_TEMP);
lck_mtx_unlock(nfsrv_reqcache_mutex);
}
-#endif /* NFS_NOSERVER */
+#endif /* NFSSERVER */