+
+/*
+ * An in-kernel sync for power management to call.
+ */
+__private_extern__ int
+sync_internal(void)
+{
+ boolean_t funnel_state;
+ int error;
+
+ struct sync_args data;
+
+ int retval[2];
+
+ funnel_state = thread_funnel_set(kernel_flock, TRUE);
+
+ error = sync(current_proc(), &data, &retval);
+
+ thread_funnel_set(kernel_flock, funnel_state);
+
+ return (error);
+} /* end of sync_internal call */
+
+
+
+// XXXdbg fmod watching calls
+#define NUM_CHANGE_NODES 256
+static int changed_init=0;
+static volatile int fmod_watch_enabled = 0;
+static pid_t fmod_watch_owner;
+static simple_lock_data_t changed_nodes_lock; // guard access
+static volatile struct vnode *changed_nodes[NUM_CHANGE_NODES];
+static volatile pid_t changed_nodes_pid[NUM_CHANGE_NODES];
+static volatile int changed_rd_index=0, changed_wr_index=0;
+static volatile int notifier_sleeping=0;
+
+
+void
+notify_filemod_watchers(struct vnode *vp, struct proc *p)
+{
+ int ret;
+
+ // only want notification on regular files.
+ if (fmod_watch_enabled == 0 || (vp->v_type != VREG && vp->v_type != VDIR)) {
+ return;
+ }
+
+ // grab a reference so it doesn't go away
+ if (vget(vp, 0, p) != 0) {
+ return;
+ }
+
+ retry:
+ simple_lock(&changed_nodes_lock);
+
+ // If the table is full, block until it clears up
+ if (((changed_wr_index+1) % NUM_CHANGE_NODES) == changed_rd_index) {
+ simple_unlock(&changed_nodes_lock);
+
+ notifier_sleeping++;
+ // wait up to 10 seconds for the queue to drain
+ ret = tsleep((caddr_t)&changed_wr_index, PINOD, "changed_nodes_full", 10*hz);
+ if (ret != 0 || fmod_watch_enabled == 0) {
+ notifier_sleeping--;
+ printf("notify_filemod: err %d from tsleep/enabled %d. bailing out (vp 0x%x).\n",
+ ret, fmod_watch_enabled, vp);
+ vrele(vp);
+ return;
+ }
+
+ notifier_sleeping--;
+ goto retry;
+ }
+
+ // insert our new guy
+ if (changed_nodes[changed_wr_index] != NULL) {
+ panic("notify_fmod_watchers: index %d is 0x%x, not null!\n",
+ changed_wr_index, changed_nodes[changed_wr_index]);
+ }
+ changed_nodes[changed_wr_index] = vp;
+ changed_nodes_pid[changed_wr_index] = current_proc()->p_pid;
+ changed_wr_index = (changed_wr_index + 1) % NUM_CHANGE_NODES;
+
+ simple_unlock(&changed_nodes_lock);
+
+ wakeup((caddr_t)&changed_rd_index);
+}
+
+
+struct fmod_watch_args {
+ int *new_fd;
+ char *pathbuf;
+ int len;
+ pid_t pid;
+};
+
+int
+fmod_watch(struct proc *p, struct fmod_watch_args *uap, register_t *retval)
+{
+ int fd, didhold = 0;
+ struct filedesc *fdp;
+ struct file *fp;
+ struct vnode *vp;
+ int flags;
+ int type, indx, error, need_wakeup=0;
+ struct flock lf;
+ struct nameidata nd;
+ extern struct fileops vnops;
+ pid_t pid;
+
+ if (fmod_watch_enabled == 0) {
+ *retval = -1;
+ return EINVAL;
+ }
+
+ p = current_proc();
+
+ if (changed_init == 0) {
+ changed_init = 1;
+ simple_lock_init(&changed_nodes_lock);
+ }
+
+ if (changed_rd_index == changed_wr_index) {
+ // there's nothing to do, go to sleep
+ error = tsleep((caddr_t)&changed_rd_index, PUSER|PCATCH, "changed_nodes_empty", 0);
+ if (error != 0) {
+ // XXXdbg - what if after we unblock the changed_nodes
+ // table is full? We should wakeup() the writer.
+ *retval = -1;
+ return error;
+ }
+ }
+
+ simple_lock(&changed_nodes_lock);
+
+ vp = (struct vnode *)changed_nodes[changed_rd_index];
+ pid = changed_nodes_pid[changed_rd_index];
+
+ changed_nodes[changed_rd_index] = NULL;
+ changed_rd_index = (changed_rd_index + 1) % NUM_CHANGE_NODES;
+
+ if (vp == NULL) {
+ printf("watch_file_changes: Someone put a null vnode in my table! (%d %d)\n",
+ changed_rd_index, changed_wr_index);
+ error = EINVAL;
+ goto err0;
+ }
+
+ simple_unlock(&changed_nodes_lock);
+
+ // if the writers are blocked, wake them up as we just freed up
+ // some space for them.
+ if (notifier_sleeping > 0) {
+ wakeup((caddr_t)&changed_wr_index);
+ }
+
+ if (vp->v_type != VREG && vp->v_type != VDIR) {
+ error = EBADF;
+ goto err1;
+ }
+
+ if ((error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p)) != 0) {
+ printf("fmod_watch: vn_lock returned %d\n", error);
+ goto err1;
+ }
+
+ // first copy out the name
+ if (uap->pathbuf) {
+ char *buff;
+ int len=MAXPATHLEN;
+
+ MALLOC(buff, char *, len, M_TEMP, M_WAITOK);
+ error = vn_getpath(vp, buff, &len);
+ if (error == 0) {
+ if (len < uap->len)
+ error = copyout(buff, (void *)uap->pathbuf, len);
+ else
+ error = ENOSPC;
+ }
+ FREE(buff, M_TEMP);
+ if (error) {
+ goto err1;
+ }
+ }
+
+ // now copy out the pid of the person that changed the file
+ if (uap->pid) {
+ if ((error = copyout((caddr_t)&pid, (void *)uap->pid, sizeof(pid_t))) != 0) {
+ printf("fmod_watch: failed to copy out the pid (%d)\n", pid);
+ goto err1;
+ }
+ }
+
+ // now create a file descriptor for this vnode
+ fdp = p->p_fd;
+ flags = FREAD;
+ if (error = falloc(p, &fp, &indx)) {
+ printf("fmod_watch: failed to allocate an fd...\n");
+ goto err2;
+ }
+
+ if ((error = copyout((caddr_t)&indx, (void *)uap->new_fd, sizeof(int))) != 0) {
+ printf("fmod_watch: failed to copy out the new fd (%d)\n", indx);
+ goto err3;
+ }
+
+ fp->f_flag = flags & FMASK;
+ fp->f_type = DTYPE_VNODE;
+ fp->f_ops = &vnops;
+ fp->f_data = (caddr_t)vp;
+
+ if (UBCINFOEXISTS(vp) && ((didhold = ubc_hold(vp)) == 0)) {
+ goto err3;
+ }
+
+ error = VOP_OPEN(vp, flags, p->p_ucred, p);
+ if (error) {
+ goto err4;
+ }
+
+ VOP_UNLOCK(vp, 0, p);
+
+ *fdflags(p, indx) &= ~UF_RESERVED;
+
+ // note: we explicitly don't vrele() here because it
+ // happens when the fd is closed.
+
+ return error;
+
+ err4:
+ if (didhold) {
+ ubc_rele(vp);
+ }
+ err3:
+ ffree(fp);
+ fdrelse(p, indx);
+ err2:
+ VOP_UNLOCK(vp, 0, p);
+ err1:
+ vrele(vp); // undoes the vref() in notify_filemod_watchers()
+
+ err0:
+ *retval = -1;
+ return error;
+}
+
+static int
+enable_fmod_watching(register_t *retval)
+{
+ *retval = -1;
+
+ if (!is_suser()) {
+ return EPERM;
+ }
+
+ // XXXdbg for now we only allow one watcher at a time.
+ if (fmod_watch_enabled) {
+ return EBUSY;
+ }
+
+ fmod_watch_enabled++;
+ fmod_watch_owner = current_proc()->p_pid;
+
+ *retval = 0;
+ return 0;
+}
+
+static int
+disable_fmod_watching(register_t *retval)
+{
+ if (!is_suser()) {
+ return EPERM;
+ }
+
+ if (fmod_watch_enabled < 1) {
+ printf("fmod_watching: too many disables! (%d)\n", fmod_watch_enabled);
+ return EINVAL;
+ }
+
+ fmod_watch_enabled--;
+
+ // if we're the last guy, clear out any remaining vnodes
+ // in the table so they don't remain referenced.
+ //
+ if (fmod_watch_enabled == 0) {
+ int i;
+ for(i=changed_rd_index; i != changed_wr_index; ) {
+ if (changed_nodes[i] == NULL) {
+ panic("disable_fmod_watch: index %d is NULL!\n", i);
+ }
+ vrele((struct vnode *)changed_nodes[i]);
+ changed_nodes[i] = NULL;
+ i = (i + 1) % NUM_CHANGE_NODES;
+ }
+ changed_wr_index = changed_rd_index = 0;
+
+ fmod_watch_owner = 0;
+ }
+
+ // wake up anyone that may be waiting for the
+ // queue to clear out.
+ //
+ while(notifier_sleeping) {
+ wakeup((caddr_t)&changed_wr_index);
+
+ // yield the cpu so the notifiers can run
+ tsleep((caddr_t)&fmod_watch_enabled, PINOD, "disable_fmod_watch", 1);
+ }
+
+ *retval = 0;
+ return 0;
+}
+
+
+struct fmod_watch_enable_args {
+ int on_or_off;
+};
+
+int
+fmod_watch_enable(struct proc *p, struct fmod_watch_enable_args *uap, register_t *retval)
+{
+ int ret;
+
+ if (uap->on_or_off != 0) {
+ ret = enable_fmod_watching(retval);
+ } else {
+ ret = disable_fmod_watching(retval);
+ }
+
+ return ret;
+}
+
+void
+clean_up_fmod_watch(struct proc *p)
+{
+ if (fmod_watch_enabled && fmod_watch_owner == p->p_pid) {
+ register_t *retval;
+
+ disable_fmod_watching(&retval);
+ }
+}