/*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
static void vnode_list_add(vnode_t);
static void vnode_list_remove(vnode_t);
+static void vnode_list_remove_locked(vnode_t);
static errno_t vnode_drain(vnode_t);
static void vgone(vnode_t, int flags);
* Alias, but not in use, so flush it out.
*/
if ((vp->v_iocount == 1) && (vp->v_usecount == 0)) {
- vnode_reclaim_internal(vp, 1, 0, 0);
+ vnode_reclaim_internal(vp, 1, 1, 0);
+ vnode_put_locked(vp);
vnode_unlock(vp);
- vnode_put(vp);
goto loop;
}
}
if (vp != NULLVP) {
nvp->v_flag |= VALIASED;
vp->v_flag |= VALIASED;
+ vnode_put_locked(vp);
vnode_unlock(vp);
- vnode_put(vp);
}
return (NULLVP);
}
vnode_list_unlock();
}
+
+/*
+ * remove the vnode from appropriate free list.
+ * called with vnode LOCKED and
+ * the list lock held
+ */
+static void
+vnode_list_remove_locked(vnode_t vp)
+{
+ if (VONLIST(vp)) {
+ /*
+ * the v_listflag field is
+ * protected by the vnode_list_lock
+ */
+ if (vp->v_listflag & VLIST_RAGE)
+ VREMRAGE("vnode_list_remove", vp);
+ else if (vp->v_listflag & VLIST_DEAD)
+ VREMDEAD("vnode_list_remove", vp);
+ else
+ VREMFREE("vnode_list_remove", vp);
+ }
+}
+
+
/*
* remove the vnode from appropriate free list.
+ * called with vnode LOCKED
*/
static void
vnode_list_remove(vnode_t vp)
/*
* however, we're not guaranteed that
* we won't go from the on-list state
- * to the non-on-list state until we
+ * to the not-on-list state until we
* hold the vnode_list_lock... this
- * is due to new_vnode removing vnodes
+ * is due to "new_vnode" removing vnodes
* from the free list uder the list_lock
* w/o the vnode lock... so we need to
* check again whether we're currently
* on the free list
*/
- if (VONLIST(vp)) {
- if (vp->v_listflag & VLIST_RAGE)
- VREMRAGE("vnode_list_remove", vp);
- else if (vp->v_listflag & VLIST_DEAD)
- VREMDEAD("vnode_list_remove", vp);
- else
- VREMFREE("vnode_list_remove", vp);
+ vnode_list_remove_locked(vp);
- VLISTNONE(vp);
- }
vnode_list_unlock();
}
}
goto defer_reclaim;
}
vnode_lock_convert(vp);
- vnode_reclaim_internal(vp, 1, 0, 0);
+ vnode_reclaim_internal(vp, 1, 1, 0);
}
vnode_dropiocount(vp);
vnode_list_add(vp);
#ifdef JOE_DEBUG
record_vp(vp, 1);
#endif
- vnode_reclaim_internal(vp, 1, 0, 0);
+ vnode_reclaim_internal(vp, 1, 1, 0);
vnode_dropiocount(vp);
vnode_list_add(vp);
-
vnode_unlock(vp);
+
reclaimed++;
mount_lock(mp);
continue;
#ifdef JOE_DEBUG
record_vp(vp, 1);
#endif
- vnode_reclaim_internal(vp, 1, 0, 0);
+ vnode_reclaim_internal(vp, 1, 1, 0);
vnode_dropiocount(vp);
vnode_list_add(vp);
vnode_unlock(vp);
int already_terminating;
int clflags = 0;
+#if NAMEDSTREAMS
+ int is_namedstream;
+#endif
+
/*
* Check to see if the vnode is in use.
* If so we have to reference it before we clean it out
*/
insmntque(vp, (struct mount *)0);
+#if NAMEDSTREAMS
+ is_namedstream = vnode_isnamedstream(vp);
+#endif
+
vnode_unlock(vp);
OSAddAtomic(1, &num_recycledvnodes);
if (active || need_inactive)
VNOP_INACTIVE(vp, ctx);
+#if NAMEDSTREAMS
+ /* Delete the shadow stream file before we reclaim its vnode */
+ if ((is_namedstream != 0) &&
+ (vp->v_parent != NULLVP) &&
+ ((vp->v_parent->v_mount->mnt_kern_flag & MNTK_NAMED_STREAMS) == 0)) {
+ vnode_relenamedstream(vp->v_parent, vp, ctx);
+ }
+#endif
+
/*
* Destroy ubc named reference
* cluster_release is done on this path
SPECHASH_LOCK();
break;
}
- vnode_reclaim_internal(vq, 0, 0, 0);
+ vnode_reclaim_internal(vq, 0, 1, 0);
vnode_put(vq);
SPECHASH_LOCK();
break;
return(0);
}
vnode_reclaim_internal(vp, 1, 0, 0);
+
vnode_unlock(vp);
return (1);
/*
* Alias, but not in use, so flush it out.
*/
- vnode_reclaim_internal(vq, 1, 0, 0);
+ vnode_reclaim_internal(vq, 1, 1, 0);
+ vnode_put_locked(vq);
vnode_unlock(vq);
- vnode_put(vq);
goto loop;
}
count += (vq->v_usecount - vq->v_kusecount);
struct timeval current_tv;
struct unsafe_fsnode *l_unsafefs = 0;
proc_t curproc = current_proc();
- pid_t current_pid = proc_pid(curproc);
retry:
microuptime(¤t_tv);
mac_vnode_label_init(vp);
#endif /* MAC */
+ vp->v_iocount = 1;
goto done;
}
if ( !(vp->v_listflag & VLIST_RAGE) || !(vp->v_flag & VRAGE))
panic("new_vnode: vp on RAGE list not marked both VLIST_RAGE and VRAGE");
- // skip vnodes which have a dependency on this process
- // (i.e. they're vnodes in a disk image and this process
- // is diskimages-helper)
+ // if we're a dependency-capable process, skip vnodes that can
+ // cause recycling deadlocks. (i.e. this process is diskimages
+ // helper and the vnode is in a disk image).
//
- if (vp->v_mount && vp->v_mount->mnt_dependent_pid != current_pid && vp->v_mount->mnt_dependent_process != curproc) {
+ if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL || vp->v_mount->mnt_dependent_process == NULL) {
break;
}
*/
walk_count = 0;
TAILQ_FOREACH(vp, &vnode_free_list, v_freelist) {
- // skip vnodes which have a dependency on this process
- // (i.e. they're vnodes in a disk image and this process
- // is diskimages-helper)
+ // if we're a dependency-capable process, skip vnodes that can
+ // cause recycling deadlocks. (i.e. this process is diskimages
+ // helper and the vnode is in a disk image)
//
- if (vp->v_mount && vp->v_mount->mnt_dependent_pid != current_pid && vp->v_mount->mnt_dependent_process != curproc) {
+ if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL || vp->v_mount->mnt_dependent_process == NULL) {
break;
}
log(LOG_EMERG, "%d desired, %d numvnodes, "
"%d free, %d dead, %d rage\n",
desiredvnodes, numvnodes, freevnodes, deadvnodes, ragevnodes);
+#if CONFIG_EMBEDDED
+ /*
+ * Running out of vnodes tends to make a system unusable. On an
+ * embedded system, it's unlikely that the user can do anything
+ * about it (or would know what to do, if they could). So panic
+ * the system so it will automatically restart (and hopefully we
+ * can get a panic log that tells us why we ran out).
+ */
+ panic("vnode table is full\n");
+#endif
*vpp = NULL;
return (ENFILE);
}
steal_this_vp:
vpid = vp->v_id;
- /*
- * the v_listflag field is
- * protected by the vnode_list_lock
- */
- if (vp->v_listflag & VLIST_DEAD)
- VREMDEAD("new_vnode", vp);
- else if (vp->v_listflag & VLIST_RAGE)
- VREMRAGE("new_vnode", vp);
- else
- VREMFREE("new_vnode", vp);
- VLISTNONE(vp);
+ vnode_list_remove_locked(vp);
vnode_list_unlock();
vnode_lock_spin(vp);
if (vp->v_lflag & VL_DEAD)
panic("new_vnode: the vnode is VL_DEAD but not VBAD");
vnode_lock_convert(vp);
-
(void)vnode_reclaim_internal(vp, 1, 1, 0);
if ((VONLIST(vp)))
}
#endif /* MAC */
+ vp->v_iocount = 1;
vp->v_lflag = 0;
vp->v_writecount = 0;
vp->v_references = 0;
}
+__private_extern__ int
+vnode_getalways(vnode_t vp)
+{
+ return(vget_internal(vp, 0, VNODE_ALWAYS));
+}
+
int
vnode_put(vnode_t vp)
{
if ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM) {
vnode_lock_convert(vp);
- vnode_reclaim_internal(vp, 1, 0, 0);
+ vnode_reclaim_internal(vp, 1, 1, 0);
}
vnode_dropiocount(vp);
vnode_list_add(vp);
{
int nodead = vflags & VNODE_NODEAD;
int nosusp = vflags & VNODE_NOSUSPEND;
+ int always = vflags & VNODE_ALWAYS;
for (;;) {
/*
(vp->v_owner == current_thread())) {
break;
}
+ if (always != 0)
+ break;
vnode_lock_convert(vp);
if (vp->v_lflag & VL_TERMINATE) {
* once new_vnode drops the list_lock, it will block trying to take
* the vnode lock until we release it... at that point it will evaluate
* whether the v_vid has changed
+ * also need to make sure that the vnode isn't on a list where "new_vnode"
+ * can find it after the v_id has been bumped until we are completely done
+ * with the vnode (i.e. putting it back on a list has to be the very last
+ * thing we do to this vnode... many of the callers of vnode_reclaim_internal
+ * are holding an io_count on the vnode... they need to drop the io_count
+ * BEFORE doing a vnode_list_add or make sure to hold the vnode lock until
+ * they are completely done with the vnode
*/
vnode_list_lock();
+
+ vnode_list_remove_locked(vp);
vp->v_id++;
+
vnode_list_unlock();
if (isfifo) {
if (vp->v_data)
panic("vnode_reclaim_internal: cleaned vnode isn't");
if (vp->v_numoutput)
- panic("vnode_reclaim_internal: Clean vnode has pending I/O's");
+ panic("vnode_reclaim_internal: clean vnode has pending I/O's");
if (UBCINFOEXISTS(vp))
panic("vnode_reclaim_internal: ubcinfo not cleaned");
if (vp->v_parent)
vp->v_lflag &= ~VL_TERMWANT;
wakeup(&vp->v_lflag);
}
- if (!reuse && vp->v_usecount == 0) {
+ if (!reuse) {
/*
* make sure we get on the
- * dead list
+ * dead list if appropriate
*/
- vnode_list_remove(vp);
vnode_list_add(vp);
}
if (!locked)
}
/* USAGE:
- * The following api creates a vnode and associates all the parameter specified in vnode_fsparam
- * structure and returns a vnode handle with a reference. device aliasing is handled here so checkalias
- * is obsoleted by this.
* vnode_create(int flavor, size_t size, void * param, vnode_t *vp)
*/
int
vp->v_op = param->vnfs_vops;
vp->v_type = param->vnfs_vtype;
vp->v_data = param->vnfs_fsnode;
- vp->v_iocount = 1;
if (param->vnfs_markroot)
vp->v_flag |= VROOT;
cpos += dp->d_reclen;
dp = (struct dirent*)cpos;
}
+
+ /*
+ * workaround for HFS/NFS setting eofflag before end of file
+ */
+ if (vp->v_tag == VT_HFS && nentries > 2)
+ eofflag=0;
+
+ if (vp->v_tag == VT_NFS) {
+ if (eofflag && !full_erase_flag) {
+ full_erase_flag = 1;
+ eofflag = 0;
+ uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
+ }
+ else if (!eofflag && full_erase_flag)
+ full_erase_flag = 0;
+ }
} while (!eofflag);
/*
uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
eofflag = 0;
+ full_erase_flag = 0;
do {
siz = UIO_BUFF_SIZE;