+/*
+ * NFS callback channel state
+ *
+ * One listening socket for accepting socket connections from servers and
+ * a list of connected sockets to handle callback requests on.
+ * Mounts registered with the callback channel are assigned IDs and
+ * put on a list so that the callback request handling code can match
+ * the requests up with mounts.
+ */
+socket_t nfs4_cb_so = NULL;
+in_port_t nfs4_cb_port = 0;
+uint32_t nfs4_cb_id = 0;
+uint32_t nfs4_cb_so_usecount = 0;
+TAILQ_HEAD(nfs4_cb_sock_list,nfs_callback_socket) nfs4_cb_socks;
+TAILQ_HEAD(nfs4_cb_mount_list,nfsmount) nfs4_cb_mounts;
+
+int nfs4_cb_handler(struct nfs_callback_socket *, mbuf_t);
+
+/*
+ * Set up the callback channel for the NFS mount.
+ *
+ * Initializes the callback channel socket state and
+ * assigns a callback ID to the mount.
+ */
+void
+nfs4_mount_callback_setup(struct nfsmount *nmp)
+{
+ struct sockaddr_in sin;
+ socket_t so = NULL;
+ struct timeval timeo;
+ int error, on = 1;
+
+ lck_mtx_lock(nfs_global_mutex);
+ if (nfs4_cb_id == 0) {
+ TAILQ_INIT(&nfs4_cb_mounts);
+ TAILQ_INIT(&nfs4_cb_socks);
+ nfs4_cb_id++;
+ }
+ nmp->nm_cbid = nfs4_cb_id++;
+ if (nmp->nm_cbid == 0)
+ nmp->nm_cbid = nfs4_cb_id++;
+ nfs4_cb_so_usecount++;
+ TAILQ_INSERT_HEAD(&nfs4_cb_mounts, nmp, nm_cblink);
+
+ if (nfs4_cb_so) {
+ lck_mtx_unlock(nfs_global_mutex);
+ return;
+ }
+
+ error = sock_socket(AF_INET, SOCK_STREAM, IPPROTO_TCP, nfs4_cb_accept, NULL, &nfs4_cb_so);
+ if (error) {
+ log(LOG_INFO, "nfs callback setup: error %d creating listening socket\n", error);
+ goto fail;
+ }
+ so = nfs4_cb_so;
+
+ sin.sin_len = sizeof(struct sockaddr_in);
+ sin.sin_family = AF_INET;
+ sin.sin_addr.s_addr = htonl(INADDR_ANY);
+ sin.sin_port = 0;
+ error = sock_bind(so, (struct sockaddr *)&sin);
+ if (error) {
+ log(LOG_INFO, "nfs callback setup: error %d binding listening socket\n", error);
+ goto fail;
+ }
+ error = sock_getsockname(so, (struct sockaddr *)&sin, sin.sin_len);
+ if (error) {
+ log(LOG_INFO, "nfs callback setup: error %d getting listening socket port\n", error);
+ goto fail;
+ }
+ nfs4_cb_port = ntohs(sin.sin_port);
+
+ error = sock_listen(so, 32);
+ if (error) {
+ log(LOG_INFO, "nfs callback setup: error %d on listen\n", error);
+ goto fail;
+ }
+
+ /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
+ timeo.tv_usec = 0;
+ timeo.tv_sec = 60;
+ error = sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
+ if (error)
+ log(LOG_INFO, "nfs callback setup: error %d setting socket rx timeout\n", error);
+ error = sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
+ if (error)
+ log(LOG_INFO, "nfs callback setup: error %d setting socket tx timeout\n", error);
+ sock_setsockopt(so, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
+ sock_setsockopt(so, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
+ sock_setsockopt(so, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
+ error = 0;
+
+fail:
+ if (error) {
+ nfs4_cb_so = NULL;
+ lck_mtx_unlock(nfs_global_mutex);
+ if (so) {
+ sock_shutdown(so, SHUT_RDWR);
+ sock_close(so);
+ }
+ } else {
+ lck_mtx_unlock(nfs_global_mutex);
+ }
+}
+
+/*
+ * Shut down the callback channel for the NFS mount.
+ *
+ * Clears the mount's callback ID and releases the mounts
+ * reference on the callback socket. Last reference dropped
+ * will also shut down the callback socket(s).
+ */
+void
+nfs4_mount_callback_shutdown(struct nfsmount *nmp)
+{
+ struct nfs_callback_socket *ncbsp;
+ socket_t so;
+ struct nfs4_cb_sock_list cb_socks;
+ struct timespec ts = {1,0};
+
+ lck_mtx_lock(nfs_global_mutex);
+ TAILQ_REMOVE(&nfs4_cb_mounts, nmp, nm_cblink);
+ /* wait for any callbacks in progress to complete */
+ while (nmp->nm_cbrefs)
+ msleep(&nmp->nm_cbrefs, nfs_global_mutex, PSOCK, "cbshutwait", &ts);
+ if (--nfs4_cb_so_usecount) {
+ lck_mtx_unlock(nfs_global_mutex);
+ return;
+ }
+ so = nfs4_cb_so;
+ nfs4_cb_so = NULL;
+ TAILQ_INIT(&cb_socks);
+ TAILQ_CONCAT(&cb_socks, &nfs4_cb_socks, ncbs_link);
+ lck_mtx_unlock(nfs_global_mutex);
+ if (so) {
+ sock_shutdown(so, SHUT_RDWR);
+ sock_close(so);
+ }
+ while ((ncbsp = TAILQ_FIRST(&cb_socks))) {
+ TAILQ_REMOVE(&cb_socks, ncbsp, ncbs_link);
+ sock_shutdown(ncbsp->ncbs_so, SHUT_RDWR);
+ sock_close(ncbsp->ncbs_so);
+ FREE(ncbsp, M_TEMP);
+ }
+}
+
+/*
+ * Check periodically for stale/unused nfs callback sockets
+ */
+#define NFS4_CB_TIMER_PERIOD 30
+#define NFS4_CB_IDLE_MAX 300
+void
+nfs4_callback_timer(__unused void *param0, __unused void *param1)
+{
+ struct nfs_callback_socket *ncbsp, *nextncbsp;
+ struct timeval now;
+
+loop:
+ lck_mtx_lock(nfs_global_mutex);
+ if (TAILQ_EMPTY(&nfs4_cb_socks)) {
+ nfs4_callback_timer_on = 0;
+ lck_mtx_unlock(nfs_global_mutex);
+ return;
+ }
+ microuptime(&now);
+ TAILQ_FOREACH_SAFE(ncbsp, &nfs4_cb_socks, ncbs_link, nextncbsp) {
+ if (!(ncbsp->ncbs_flags & NCBSOCK_DEAD) &&
+ (now.tv_sec < (ncbsp->ncbs_stamp + NFS4_CB_IDLE_MAX)))
+ continue;
+ TAILQ_REMOVE(&nfs4_cb_socks, ncbsp, ncbs_link);
+ lck_mtx_unlock(nfs_global_mutex);
+ sock_shutdown(ncbsp->ncbs_so, SHUT_RDWR);
+ sock_close(ncbsp->ncbs_so);
+ FREE(ncbsp, M_TEMP);
+ goto loop;
+ }
+ nfs4_callback_timer_on = 1;
+ nfs_interval_timer_start(nfs4_callback_timer_call,
+ NFS4_CB_TIMER_PERIOD * 1000);
+ lck_mtx_unlock(nfs_global_mutex);
+}
+
+/*
+ * Accept a new callback socket.
+ */
+void
+nfs4_cb_accept(socket_t so, __unused void *arg, __unused int waitflag)
+{
+ socket_t newso = NULL;
+ struct nfs_callback_socket *ncbsp;
+ struct nfsmount *nmp;
+ struct timeval timeo, now;
+ struct sockaddr_in *saddr;
+ int error, on = 1;
+
+ if (so != nfs4_cb_so)
+ return;
+
+ /* allocate/initialize a new nfs_callback_socket */
+ MALLOC(ncbsp, struct nfs_callback_socket *, sizeof(struct nfs_callback_socket), M_TEMP, M_WAITOK);
+ if (!ncbsp) {
+ log(LOG_ERR, "nfs callback accept: no memory for new socket\n");
+ return;
+ }
+ bzero(ncbsp, sizeof(*ncbsp));
+ ncbsp->ncbs_sin.sin_len = sizeof(struct sockaddr_in);
+ ncbsp->ncbs_rrs.nrrs_markerleft = sizeof(ncbsp->ncbs_rrs.nrrs_fragleft);
+
+ /* accept a new socket */
+ error = sock_accept(so, (struct sockaddr*)&ncbsp->ncbs_sin,
+ ncbsp->ncbs_sin.sin_len, MSG_DONTWAIT,
+ nfs4_cb_rcv, ncbsp, &newso);
+ if (error) {
+ log(LOG_INFO, "nfs callback accept: error %d accepting socket\n", error);
+ FREE(ncbsp, M_TEMP);
+ return;
+ }
+
+ /* set up the new socket */
+ /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
+ timeo.tv_usec = 0;
+ timeo.tv_sec = 60;
+ error = sock_setsockopt(newso, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
+ if (error)
+ log(LOG_INFO, "nfs callback socket: error %d setting socket rx timeout\n", error);
+ error = sock_setsockopt(newso, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
+ if (error)
+ log(LOG_INFO, "nfs callback socket: error %d setting socket tx timeout\n", error);
+ sock_setsockopt(newso, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
+ sock_setsockopt(newso, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
+ sock_setsockopt(newso, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
+
+ ncbsp->ncbs_so = newso;
+ microuptime(&now);
+ ncbsp->ncbs_stamp = now.tv_sec;
+
+ lck_mtx_lock(nfs_global_mutex);
+
+ /* add it to the list */
+ TAILQ_INSERT_HEAD(&nfs4_cb_socks, ncbsp, ncbs_link);
+
+ /* verify it's from a host we have mounted */
+ TAILQ_FOREACH(nmp, &nfs4_cb_mounts, nm_cblink) {
+ /* check socket's source address matches this mount's server address */
+ saddr = mbuf_data(nmp->nm_nam);
+ if ((ncbsp->ncbs_sin.sin_len == saddr->sin_len) &&
+ (ncbsp->ncbs_sin.sin_family == saddr->sin_family) &&
+ (ncbsp->ncbs_sin.sin_addr.s_addr == saddr->sin_addr.s_addr))
+ break;
+ }
+ if (!nmp) /* we don't want this socket, mark it dead */
+ ncbsp->ncbs_flags |= NCBSOCK_DEAD;
+
+ /* make sure the callback socket cleanup timer is running */
+ /* (shorten the timer if we've got a socket we don't want) */
+ if (!nfs4_callback_timer_on) {
+ nfs4_callback_timer_on = 1;
+ nfs_interval_timer_start(nfs4_callback_timer_call,
+ !nmp ? 500 : (NFS4_CB_TIMER_PERIOD * 1000));
+ } else if (!nmp && (nfs4_callback_timer_on < 2)) {
+ nfs4_callback_timer_on = 2;
+ thread_call_cancel(nfs4_callback_timer_call);
+ nfs_interval_timer_start(nfs4_callback_timer_call, 500);
+ }
+
+ lck_mtx_unlock(nfs_global_mutex);
+}
+
+/*
+ * Receive mbufs from callback sockets into RPC records and process each record.
+ * Detect connection has been closed and shut down.
+ */
+void
+nfs4_cb_rcv(socket_t so, void *arg, __unused int waitflag)
+{
+ struct nfs_callback_socket *ncbsp = arg;
+ struct timespec ts = {1,0};
+ struct timeval now;
+ mbuf_t m;
+ int error = 0, recv = 1;
+
+ lck_mtx_lock(nfs_global_mutex);
+ while (ncbsp->ncbs_flags & NCBSOCK_UPCALL) {
+ /* wait if upcall is already in progress */
+ ncbsp->ncbs_flags |= NCBSOCK_UPCALLWANT;
+ msleep(ncbsp, nfs_global_mutex, PSOCK, "cbupcall", &ts);
+ }
+ ncbsp->ncbs_flags |= NCBSOCK_UPCALL;
+ lck_mtx_unlock(nfs_global_mutex);
+
+ /* loop while we make error-free progress */
+ while (!error && recv) {
+ error = nfs_rpc_record_read(so, &ncbsp->ncbs_rrs, &recv, &m);
+ if (m) /* handle the request */
+ error = nfs4_cb_handler(ncbsp, m);
+ }
+
+ /* note: no error and no data indicates server closed its end */
+ if ((error != EWOULDBLOCK) && (error || !recv)) {
+ /*
+ * Socket is either being closed or should be.
+ * We can't close the socket in the context of the upcall.
+ * So we mark it as dead and leave it for the cleanup timer to reap.
+ */
+ ncbsp->ncbs_stamp = 0;
+ ncbsp->ncbs_flags |= NCBSOCK_DEAD;
+ } else {
+ microuptime(&now);
+ ncbsp->ncbs_stamp = now.tv_sec;
+ }
+
+ lck_mtx_lock(nfs_global_mutex);
+ ncbsp->ncbs_flags &= ~NCBSOCK_UPCALL;
+ lck_mtx_unlock(nfs_global_mutex);
+ wakeup(ncbsp);
+}
+
+/*
+ * Handle an NFS callback channel request.
+ */
+int
+nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq)
+{
+ socket_t so = ncbsp->ncbs_so;
+ struct nfsm_chain nmreq, nmrep;
+ mbuf_t mhead = NULL, mrest = NULL, m;
+ struct sockaddr_in *saddr;
+ struct msghdr msg;
+ struct nfsmount *nmp;
+ fhandle_t fh;
+ nfsnode_t np;
+ nfs_stateid stateid;
+ uint32_t bitmap[NFS_ATTR_BITMAP_LEN], rbitmap[NFS_ATTR_BITMAP_LEN], bmlen, truncate, attrbytes;
+ uint32_t val, xid, procnum, taglen, cbid, numops, op, status;
+ uint32_t auth_type, auth_len;
+ uint32_t numres, *pnumres;
+ int error = 0, replen, len;
+ size_t sentlen = 0;
+
+ xid = numops = op = status = procnum = taglen = cbid = 0;
+
+ nfsm_chain_dissect_init(error, &nmreq, mreq);
+ nfsm_chain_get_32(error, &nmreq, xid); // RPC XID
+ nfsm_chain_get_32(error, &nmreq, val); // RPC Call
+ nfsm_assert(error, (val == RPC_CALL), EBADRPC);
+ nfsm_chain_get_32(error, &nmreq, val); // RPC Version
+ nfsm_assert(error, (val == RPC_VER2), ERPCMISMATCH);
+ nfsm_chain_get_32(error, &nmreq, val); // RPC Program Number
+ nfsm_assert(error, (val == NFS4_CALLBACK_PROG), EPROGUNAVAIL);
+ nfsm_chain_get_32(error, &nmreq, val); // NFS Callback Program Version Number
+ nfsm_assert(error, (val == NFS4_CALLBACK_PROG_VERSION), EPROGMISMATCH);
+ nfsm_chain_get_32(error, &nmreq, procnum); // NFS Callback Procedure Number
+ nfsm_assert(error, (procnum <= NFSPROC4_CB_COMPOUND), EPROCUNAVAIL);
+
+ /* Handle authentication */
+ /* XXX just ignore auth for now - handling kerberos may be tricky */
+ nfsm_chain_get_32(error, &nmreq, auth_type); // RPC Auth Flavor
+ nfsm_chain_get_32(error, &nmreq, auth_len); // RPC Auth Length
+ nfsm_assert(error, (auth_len <= RPCAUTH_MAXSIZ), EBADRPC);
+ if (!error && (auth_len > 0))
+ nfsm_chain_adv(error, &nmreq, nfsm_rndup(auth_len));
+ nfsm_chain_adv(error, &nmreq, NFSX_UNSIGNED); // verifier flavor (should be AUTH_NONE)
+ nfsm_chain_get_32(error, &nmreq, auth_len); // verifier length
+ nfsm_assert(error, (auth_len <= RPCAUTH_MAXSIZ), EBADRPC);
+ if (!error && (auth_len > 0))
+ nfsm_chain_adv(error, &nmreq, nfsm_rndup(auth_len));
+ if (error) {
+ status = error;
+ error = 0;
+ goto nfsmout;
+ }
+
+ switch (procnum) {
+ case NFSPROC4_CB_NULL:
+ status = NFSERR_RETVOID;
+ break;
+ case NFSPROC4_CB_COMPOUND:
+ /* tag, minorversion, cb ident, numops, op array */
+ nfsm_chain_get_32(error, &nmreq, taglen); /* tag length */
+ nfsm_assert(error, (val <= NFS4_OPAQUE_LIMIT), EBADRPC);
+
+ /* start building the body of the response */
+ nfsm_mbuf_get(error, &mrest, nfsm_rndup(taglen) + 5*NFSX_UNSIGNED);
+ nfsm_chain_init(&nmrep, mrest);
+
+ /* copy tag from request to response */
+ nfsm_chain_add_32(error, &nmrep, taglen); /* tag length */
+ for (len = (int)taglen; !error && (len > 0); len -= NFSX_UNSIGNED) {
+ nfsm_chain_get_32(error, &nmreq, val);
+ nfsm_chain_add_32(error, &nmrep, val);
+ }
+
+ /* insert number of results placeholder */
+ numres = 0;
+ nfsm_chain_add_32(error, &nmrep, numres);
+ pnumres = (uint32_t*)(nmrep.nmc_ptr - NFSX_UNSIGNED);
+
+ nfsm_chain_get_32(error, &nmreq, val); /* minorversion */
+ nfsm_assert(error, (val == 0), NFSERR_MINOR_VERS_MISMATCH);
+ nfsm_chain_get_32(error, &nmreq, cbid); /* callback ID */
+ nfsm_chain_get_32(error, &nmreq, numops); /* number of operations */
+ if (error) {
+ if ((error == EBADRPC) || (error == NFSERR_MINOR_VERS_MISMATCH))
+ status = error;
+ else if ((error == ENOBUFS) || (error == ENOMEM))
+ status = NFSERR_RESOURCE;
+ else
+ status = NFSERR_SERVERFAULT;
+ error = 0;
+ nfsm_chain_null(&nmrep);
+ goto nfsmout;
+ }
+ /* match the callback ID to a registered mount */
+ lck_mtx_lock(nfs_global_mutex);
+ TAILQ_FOREACH(nmp, &nfs4_cb_mounts, nm_cblink) {
+ if (nmp->nm_cbid != cbid)
+ continue;
+ /* verify socket's source address matches this mount's server address */
+ saddr = mbuf_data(nmp->nm_nam);
+ if ((ncbsp->ncbs_sin.sin_len != saddr->sin_len) ||
+ (ncbsp->ncbs_sin.sin_family != saddr->sin_family) ||
+ (ncbsp->ncbs_sin.sin_addr.s_addr != saddr->sin_addr.s_addr))
+ continue;
+ break;
+ }
+ /* mark the NFS mount as busy */
+ if (nmp)
+ nmp->nm_cbrefs++;
+ lck_mtx_unlock(nfs_global_mutex);
+ if (!nmp) {
+ /* if no mount match, just drop socket. */
+ error = EPERM;
+ nfsm_chain_null(&nmrep);
+ goto out;
+ }
+
+ /* process ops, adding results to mrest */
+ while (numops > 0) {
+ numops--;
+ nfsm_chain_get_32(error, &nmreq, op);
+ if (error)
+ break;
+ switch (op) {
+ case NFS_OP_CB_GETATTR:
+ // (FH, BITMAP) -> (STATUS, BITMAP, ATTRS)
+ np = NULL;
+ nfsm_chain_get_fh(error, &nmreq, NFS_VER4, &fh);
+ bmlen = NFS_ATTR_BITMAP_LEN;
+ nfsm_chain_get_bitmap(error, &nmreq, bitmap, bmlen);
+ if (error) {
+ status = error;
+ error = 0;
+ numops = 0; /* don't process any more ops */
+ } else {
+ /* find the node for the file handle */
+ error = nfs_nget(nmp->nm_mountp, NULL, NULL, fh.fh_data, fh.fh_len, NULL, NULL, NG_NOCREATE, &np);
+ if (error || !np) {
+ status = NFSERR_BADHANDLE;
+ error = 0;
+ np = NULL;
+ numops = 0; /* don't process any more ops */
+ }
+ }
+ nfsm_chain_add_32(error, &nmrep, op);
+ nfsm_chain_add_32(error, &nmrep, status);
+ if (!error && (status == EBADRPC))
+ error = status;
+ if (np) {
+ /* only allow returning size, change, and mtime attrs */
+ NFS_CLEAR_ATTRIBUTES(&rbitmap);
+ attrbytes = 0;
+ if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_CHANGE)) {
+ NFS_BITMAP_SET(&rbitmap, NFS_FATTR_CHANGE);
+ attrbytes += 2 * NFSX_UNSIGNED;
+ }
+ if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_SIZE)) {
+ NFS_BITMAP_SET(&rbitmap, NFS_FATTR_SIZE);
+ attrbytes += 2 * NFSX_UNSIGNED;
+ }
+ if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_TIME_MODIFY)) {
+ NFS_BITMAP_SET(&rbitmap, NFS_FATTR_TIME_MODIFY);
+ attrbytes += 3 * NFSX_UNSIGNED;
+ }
+ nfsm_chain_add_bitmap(error, &nmrep, rbitmap, NFS_ATTR_BITMAP_LEN);
+ nfsm_chain_add_32(error, &nmrep, attrbytes);
+ if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_CHANGE))
+ nfsm_chain_add_64(error, &nmrep,
+ np->n_vattr.nva_change + ((np->n_flag & NMODIFIED) ? 1 : 0));
+ if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_SIZE))
+ nfsm_chain_add_64(error, &nmrep, np->n_size);
+ if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_TIME_MODIFY)) {
+ nfsm_chain_add_64(error, &nmrep, np->n_vattr.nva_timesec[NFSTIME_MODIFY]);
+ nfsm_chain_add_32(error, &nmrep, np->n_vattr.nva_timensec[NFSTIME_MODIFY]);
+ }
+ nfs_node_unlock(np);
+ vnode_put(NFSTOV(np));
+ np = NULL;