+ if (error) {
+ nfsm_chain_cleanup(&nmrep);
+ *mrepp = NULL;
+ }
+ return error;
+}
+
+#if CONFIG_FSE
+/*
+ * NFS File modification reporting
+ *
+ * When the contents of a file are changed, a "content modified"
+ * fsevent needs to be issued. Normally this would be done at
+ * file close time. This is difficult for NFS because the protocol
+ * has no "close" operation. The client sends a stream of write
+ * requests that just stop. So we keep a hash table full of
+ * vnodes that have been written to recently, and issue a
+ * "content modified" fsevent only if there are no writes to
+ * a vnode for nfsrv_fmod_pendtime milliseconds.
+ */
+int nfsrv_fmod_pending; /* count of vnodes being written to */
+int nfsrv_fmod_pendtime = 1000; /* msec to wait */
+int nfsrv_fmod_min_interval = 100; /* msec min interval between callbacks */
+
+/*
+ * This function is called via the kernel's callout
+ * mechanism. Calls are made only when there are
+ * vnodes pending a fsevent creation, and no more
+ * frequently than every nfsrv_fmod_min_interval ms.
+ */
+void
+nfsrv_fmod_timer(__unused void *param0, __unused void *param1)
+{
+ struct nfsrv_fmod_hashhead *headp, firehead;
+ struct nfsrv_fmod *fp, *nfp, *pfp;
+ uint64_t timenow, next_deadline;
+ time_t interval = 0;
+ int i, fmod_fire;
+
+ LIST_INIT(&firehead);
+ lck_mtx_lock(&nfsrv_fmod_mutex);
+again:
+ clock_get_uptime(&timenow);
+ clock_interval_to_deadline(nfsrv_fmod_pendtime, 1000 * 1000,
+ &next_deadline);
+
+ /*
+ * Scan all the hash chains
+ */
+ fmod_fire = 0;
+ for (i = 0; i < NFSRVFMODHASHSZ; i++) {
+ /*
+ * For each hash chain, look for an entry
+ * that has exceeded the deadline.
+ */
+ headp = &nfsrv_fmod_hashtbl[i];
+ LIST_FOREACH(fp, headp, fm_link) {
+ if (timenow >= fp->fm_deadline) {
+ break;
+ }
+ if (fp->fm_deadline < next_deadline) {
+ next_deadline = fp->fm_deadline;
+ }
+ }
+
+ /*
+ * If we have an entry that's exceeded the
+ * deadline, then the same is true for all
+ * following entries in the chain, since they're
+ * sorted in time order.
+ */
+ pfp = NULL;
+ while (fp) {
+ /* move each entry to the fire list */
+ nfp = LIST_NEXT(fp, fm_link);
+ LIST_REMOVE(fp, fm_link);
+ fmod_fire++;
+ if (pfp) {
+ LIST_INSERT_AFTER(pfp, fp, fm_link);
+ } else {
+ LIST_INSERT_HEAD(&firehead, fp, fm_link);
+ }
+ pfp = fp;
+ fp = nfp;
+ }
+ }
+
+ if (fmod_fire) {
+ lck_mtx_unlock(&nfsrv_fmod_mutex);
+ /*
+ * Fire off the content modified fsevent for each
+ * entry and free it.
+ */
+ LIST_FOREACH_SAFE(fp, &firehead, fm_link, nfp) {
+ if (nfsrv_fsevents_enabled) {
+ fp->fm_context.vc_thread = current_thread();
+ add_fsevent(FSE_CONTENT_MODIFIED, &fp->fm_context,
+ FSE_ARG_VNODE, fp->fm_vp,
+ FSE_ARG_DONE);
+ }
+ vnode_put(fp->fm_vp);
+ kauth_cred_unref(&fp->fm_context.vc_ucred);
+ LIST_REMOVE(fp, fm_link);
+ FREE(fp, M_TEMP);
+ }
+ lck_mtx_lock(&nfsrv_fmod_mutex);
+ nfsrv_fmod_pending -= fmod_fire;
+ goto again;
+ }
+
+ /*
+ * If there are still pending entries, set up another
+ * callout to handle them later. Set the timeout deadline
+ * so that the callout happens when the oldest pending
+ * entry is ready to send its fsevent.
+ */
+ if (nfsrv_fmod_pending > 0) {
+ interval = ((time_t)(next_deadline - timenow)) / (1000 * 1000);
+ if (interval < nfsrv_fmod_min_interval) {
+ interval = nfsrv_fmod_min_interval;
+ }
+ }
+
+ nfsrv_fmod_timer_on = interval > 0;
+ if (nfsrv_fmod_timer_on) {
+ nfs_interval_timer_start(nfsrv_fmod_timer_call, interval);
+ }
+
+ lck_mtx_unlock(&nfsrv_fmod_mutex);