+SYSCTL_INT(_vfs_generic_nfs_server, OID_AUTO, nfsd_thread_max, CTLFLAG_RW | CTLFLAG_LOCKED, &nfsd_thread_max, 0, "");
+SYSCTL_INT(_vfs_generic_nfs_server, OID_AUTO, nfsd_thread_count, CTLFLAG_RD | CTLFLAG_LOCKED, &nfsd_thread_count, 0, "");
+#ifdef NFS_UC_Q_DEBUG
+SYSCTL_INT(_vfs_generic_nfs_server, OID_AUTO, use_upcall_svc, CTLFLAG_RW | CTLFLAG_LOCKED, &nfsrv_uc_use_proxy, 0, "");
+SYSCTL_INT(_vfs_generic_nfs_server, OID_AUTO, upcall_queue_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &nfsrv_uc_queue_limit, 0, "");
+SYSCTL_INT(_vfs_generic_nfs_server, OID_AUTO, upcall_queue_max_seen, CTLFLAG_RW | CTLFLAG_LOCKED, &nfsrv_uc_queue_max_seen, 0, "");
+SYSCTL_INT(_vfs_generic_nfs_server, OID_AUTO, upcall_queue_count, CTLFLAG_RD | CTLFLAG_LOCKED, (int *)&nfsrv_uc_queue_count, 0, "");
+#endif
+#endif /* NFSSERVER */
+
+
+#if NFSCLIENT
+
+int
+nfsclnt(proc_t p, struct nfsclnt_args *uap, __unused int *retval)
+{
+ struct lockd_ans la;
+ int error;
+
+ switch (uap->flag) {
+ case NFSCLNT_LOCKDANS:
+ error = copyin(uap->argp, &la, sizeof(la));
+ if (!error)
+ error = nfslockdans(p, &la);
+ break;
+ case NFSCLNT_LOCKDNOTIFY:
+ error = nfslockdnotify(p, uap->argp);
+ break;
+ default:
+ error = EINVAL;
+ }
+ return (error);
+}
+
+/*
+ * Asynchronous I/O threads for client NFS.
+ * They do read-ahead and write-behind operations on the block I/O cache.
+ *
+ * The pool of up to nfsiod_thread_max threads is launched on demand and exit
+ * when unused for a while. There are as many nfsiod structs as there are
+ * nfsiod threads; however there's no strict tie between a thread and a struct.
+ * Each thread puts an nfsiod on the free list and sleeps on it. When it wakes
+ * up, it removes the next struct nfsiod from the queue and services it. Then
+ * it will put the struct at the head of free list and sleep on it.
+ * Async requests will pull the next struct nfsiod from the head of the free list,
+ * put it on the work queue, and wake whatever thread is waiting on that struct.
+ */
+
+/*
+ * nfsiod thread exit routine
+ *
+ * Must be called with nfsiod_mutex held so that the
+ * decision to terminate is atomic with the termination.
+ */
+void
+nfsiod_terminate(struct nfsiod *niod)
+{
+ nfsiod_thread_count--;
+ lck_mtx_unlock(nfsiod_mutex);
+ if (niod)
+ FREE(niod, M_TEMP);
+ else
+ printf("nfsiod: terminating without niod\n");
+ thread_terminate(current_thread());
+ /*NOTREACHED*/
+}
+
+/* nfsiod thread startup routine */
+void
+nfsiod_thread(void)
+{
+ struct nfsiod *niod;
+ int error;
+
+ MALLOC(niod, struct nfsiod *, sizeof(struct nfsiod), M_TEMP, M_WAITOK);
+ if (!niod) {
+ lck_mtx_lock(nfsiod_mutex);
+ nfsiod_thread_count--;
+ wakeup(current_thread());
+ lck_mtx_unlock(nfsiod_mutex);
+ thread_terminate(current_thread());
+ /*NOTREACHED*/
+ }
+ bzero(niod, sizeof(*niod));
+ lck_mtx_lock(nfsiod_mutex);
+ TAILQ_INSERT_HEAD(&nfsiodfree, niod, niod_link);
+ wakeup(current_thread());
+ error = msleep0(niod, nfsiod_mutex, PWAIT | PDROP, "nfsiod", NFS_ASYNCTHREADMAXIDLE*hz, nfsiod_continue);
+ /* shouldn't return... so we have an error */
+ /* remove an old nfsiod struct and terminate */
+ lck_mtx_lock(nfsiod_mutex);
+ if ((niod = TAILQ_LAST(&nfsiodfree, nfsiodlist)))
+ TAILQ_REMOVE(&nfsiodfree, niod, niod_link);
+ nfsiod_terminate(niod);
+ /*NOTREACHED*/
+}
+
+/*
+ * Start up another nfsiod thread.
+ * (unless we're already maxed out and there are nfsiods running)
+ */
+int
+nfsiod_start(void)
+{
+ thread_t thd = THREAD_NULL;
+
+ lck_mtx_lock(nfsiod_mutex);
+ if ((nfsiod_thread_count >= NFSIOD_MAX) && (nfsiod_thread_count > 0)) {
+ lck_mtx_unlock(nfsiod_mutex);
+ return (EBUSY);
+ }
+ nfsiod_thread_count++;
+ if (kernel_thread_start((thread_continue_t)nfsiod_thread, NULL, &thd) != KERN_SUCCESS) {
+ lck_mtx_unlock(nfsiod_mutex);
+ return (EBUSY);
+ }
+ /* wait for the thread to complete startup */
+ msleep(thd, nfsiod_mutex, PWAIT | PDROP, "nfsiodw", NULL);
+ thread_deallocate(thd);
+ return (0);
+}
+
+/*
+ * Continuation for Asynchronous I/O threads for NFS client.
+ *
+ * Grab an nfsiod struct to work on, do some work, then drop it
+ */
+int
+nfsiod_continue(int error)
+{
+ struct nfsiod *niod;
+ struct nfsmount *nmp;
+ struct nfsreq *req, *treq;
+ struct nfs_reqqhead iodq;
+ int morework;
+
+ lck_mtx_lock(nfsiod_mutex);
+ niod = TAILQ_FIRST(&nfsiodwork);
+ if (!niod) {
+ /* there's no work queued up */
+ /* remove an old nfsiod struct and terminate */
+ if ((niod = TAILQ_LAST(&nfsiodfree, nfsiodlist)))
+ TAILQ_REMOVE(&nfsiodfree, niod, niod_link);
+ nfsiod_terminate(niod);
+ /*NOTREACHED*/
+ }
+ TAILQ_REMOVE(&nfsiodwork, niod, niod_link);
+
+worktodo:
+ while ((nmp = niod->niod_nmp)) {
+ /*
+ * Service this mount's async I/O queue.
+ *
+ * In order to ensure some level of fairness between mounts,
+ * we grab all the work up front before processing it so any
+ * new work that arrives will be serviced on a subsequent
+ * iteration - and we have a chance to see if other work needs
+ * to be done (e.g. the delayed write queue needs to be pushed
+ * or other mounts are waiting for an nfsiod).
+ */
+ /* grab the current contents of the queue */
+ TAILQ_INIT(&iodq);
+ TAILQ_CONCAT(&iodq, &nmp->nm_iodq, r_achain);
+ lck_mtx_unlock(nfsiod_mutex);
+
+ /* process the queue */
+ TAILQ_FOREACH_SAFE(req, &iodq, r_achain, treq) {
+ TAILQ_REMOVE(&iodq, req, r_achain);
+ req->r_achain.tqe_next = NFSREQNOLIST;
+ req->r_callback.rcb_func(req);
+ }
+
+ /* now check if there's more/other work to be done */
+ lck_mtx_lock(nfsiod_mutex);
+ morework = !TAILQ_EMPTY(&nmp->nm_iodq);
+ if (!morework || !TAILQ_EMPTY(&nfsiodmounts)) {
+ /* we're going to stop working on this mount */
+ if (morework) /* mount still needs more work so queue it up */
+ TAILQ_INSERT_TAIL(&nfsiodmounts, nmp, nm_iodlink);
+ nmp->nm_niod = NULL;
+ niod->niod_nmp = NULL;
+ }
+ }
+
+ /* loop if there's still a mount to work on */
+ if (!niod->niod_nmp && !TAILQ_EMPTY(&nfsiodmounts)) {
+ niod->niod_nmp = TAILQ_FIRST(&nfsiodmounts);
+ TAILQ_REMOVE(&nfsiodmounts, niod->niod_nmp, nm_iodlink);
+ }
+ if (niod->niod_nmp)
+ goto worktodo;
+
+ /* queue ourselves back up - if there aren't too many threads running */
+ if (nfsiod_thread_count <= NFSIOD_MAX) {
+ TAILQ_INSERT_HEAD(&nfsiodfree, niod, niod_link);
+ error = msleep0(niod, nfsiod_mutex, PWAIT | PDROP, "nfsiod", NFS_ASYNCTHREADMAXIDLE*hz, nfsiod_continue);
+ /* shouldn't return... so we have an error */
+ /* remove an old nfsiod struct and terminate */
+ lck_mtx_lock(nfsiod_mutex);
+ if ((niod = TAILQ_LAST(&nfsiodfree, nfsiodlist)))
+ TAILQ_REMOVE(&nfsiodfree, niod, niod_link);
+ }
+ nfsiod_terminate(niod);
+ /*NOTREACHED*/
+ return (0);
+}
+
+#endif /* NFSCLIENT */
+
+
+#if NFSSERVER