+
+/*
+ * attempt to bind a socket to a reserved port
+ */
+static int
+nfs_bind_resv(struct nfsmount *nmp)
+{
+ struct socket *so = nmp->nm_so;
+ struct sockaddr_in sin;
+ int error;
+ u_short tport;
+
+ if (!so)
+ return (EINVAL);
+
+ sin.sin_len = sizeof (struct sockaddr_in);
+ sin.sin_family = AF_INET;
+ sin.sin_addr.s_addr = INADDR_ANY;
+ tport = IPPORT_RESERVED - 1;
+ sin.sin_port = htons(tport);
+
+ while (((error = sobind(so, (struct sockaddr *) &sin)) == EADDRINUSE) &&
+ (--tport > IPPORT_RESERVED / 2))
+ sin.sin_port = htons(tport);
+ return (error);
+}
+
+/*
+ * variables for managing the nfs_bind_resv_thread
+ */
+int nfs_resv_mounts = 0;
+static int nfs_bind_resv_thread_state = 0;
+#define NFS_BIND_RESV_THREAD_STATE_INITTED 1
+#define NFS_BIND_RESV_THREAD_STATE_RUNNING 2
+static struct slock nfs_bind_resv_slock;
+struct nfs_bind_resv_request {
+ TAILQ_ENTRY(nfs_bind_resv_request) brr_chain;
+ struct nfsmount *brr_nmp;
+ int brr_error;
+};
+static TAILQ_HEAD(, nfs_bind_resv_request) nfs_bind_resv_request_queue;
+
+/*
+ * thread to handle any reserved port bind requests
+ */
+static void
+nfs_bind_resv_thread(void)
+{
+ struct nfs_bind_resv_request *brreq;
+ boolean_t funnel_state;
+
+ funnel_state = thread_funnel_set(network_flock, TRUE);
+ nfs_bind_resv_thread_state = NFS_BIND_RESV_THREAD_STATE_RUNNING;
+
+ while (nfs_resv_mounts > 0) {
+ simple_lock(&nfs_bind_resv_slock);
+ while ((brreq = TAILQ_FIRST(&nfs_bind_resv_request_queue))) {
+ TAILQ_REMOVE(&nfs_bind_resv_request_queue, brreq, brr_chain);
+ simple_unlock(&nfs_bind_resv_slock);
+ brreq->brr_error = nfs_bind_resv(brreq->brr_nmp);
+ wakeup(brreq);
+ simple_lock(&nfs_bind_resv_slock);
+ }
+ simple_unlock(&nfs_bind_resv_slock);
+ (void)tsleep((caddr_t)&nfs_bind_resv_request_queue, PSOCK,
+ "nfs_bind_resv_request_queue", 0);
+ }
+
+ nfs_bind_resv_thread_state = NFS_BIND_RESV_THREAD_STATE_INITTED;
+ (void) thread_funnel_set(network_flock, funnel_state);
+ (void) thread_terminate(current_act());
+}
+
+int
+nfs_bind_resv_thread_wake(void)
+{
+ if (nfs_bind_resv_thread_state < NFS_BIND_RESV_THREAD_STATE_RUNNING)
+ return (EIO);
+ wakeup(&nfs_bind_resv_request_queue);
+ return (0);
+}
+
+/*
+ * underprivileged procs call this to request nfs_bind_resv_thread
+ * to perform the reserved port binding for them.
+ */
+static int
+nfs_bind_resv_nopriv(struct nfsmount *nmp)
+{
+ struct nfs_bind_resv_request brreq;
+ int error;
+
+ if (nfs_bind_resv_thread_state < NFS_BIND_RESV_THREAD_STATE_RUNNING) {
+ if (nfs_bind_resv_thread_state < NFS_BIND_RESV_THREAD_STATE_INITTED) {
+ simple_lock_init(&nfs_bind_resv_slock);
+ TAILQ_INIT(&nfs_bind_resv_request_queue);
+ nfs_bind_resv_thread_state = NFS_BIND_RESV_THREAD_STATE_INITTED;
+ }
+ kernel_thread(kernel_task, nfs_bind_resv_thread);
+ nfs_bind_resv_thread_state = NFS_BIND_RESV_THREAD_STATE_RUNNING;
+ }
+
+ brreq.brr_nmp = nmp;
+ brreq.brr_error = 0;
+
+ simple_lock(&nfs_bind_resv_slock);
+ TAILQ_INSERT_TAIL(&nfs_bind_resv_request_queue, &brreq, brr_chain);
+ simple_unlock(&nfs_bind_resv_slock);
+
+ error = nfs_bind_resv_thread_wake();
+ if (error) {
+ TAILQ_REMOVE(&nfs_bind_resv_request_queue, &brreq, brr_chain);
+ /* Note: we might be able to simply restart the thread */
+ return (error);
+ }
+
+ (void) tsleep((caddr_t)&brreq, PSOCK, "nfsbindresv", 0);
+
+ return (brreq.brr_error);
+}
+