/*
- * Copyright (c) 1999-2001 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1999-2002 Apple Computer, Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
simple_unlock(&vp->v_interlock);
}
+/*
+ * Serialize the requests to the VM
+ * Returns:
+ * 0 - Failure
+ * 1 - Sucessful in acquiring the lock
+ * 2 - Sucessful in acquiring the lock recursively
+ * do not call ubc_unbusy()
+ * [This is strange, but saves 4 bytes in struct ubc_info]
+ */
+static int
+ubc_busy(struct vnode *vp)
+{
+ register struct ubc_info *uip;
+
+ if (!UBCINFOEXISTS(vp))
+ return (0);
+
+ uip = vp->v_ubcinfo;
+
+ while (ISSET(uip->ui_flags, UI_BUSY)) {
+
+ if (uip->ui_owner == (void *)current_thread())
+ return (2);
+
+ SET(uip->ui_flags, UI_WANTED);
+ (void) tsleep((caddr_t)&vp->v_ubcinfo, PINOD, "ubcbusy", 0);
+
+ if (!UBCINFOEXISTS(vp))
+ return (0);
+ }
+ uip->ui_owner = (void *)current_thread();
+
+ SET(uip->ui_flags, UI_BUSY);
+
+ return (1);
+}
+
+static void
+ubc_unbusy(struct vnode *vp)
+{
+ register struct ubc_info *uip;
+
+ if (!UBCINFOEXISTS(vp)) {
+ wakeup((caddr_t)&vp->v_ubcinfo);
+ return;
+ }
+ uip = vp->v_ubcinfo;
+ CLR(uip->ui_flags, UI_BUSY);
+ uip->ui_owner = (void *)NULL;
+
+ if (ISSET(uip->ui_flags, UI_WANTED)) {
+ CLR(uip->ui_flags, UI_WANTED);
+ wakeup((caddr_t)&vp->v_ubcinfo);
+ }
+}
+
/*
* Initialization of the zone for Unified Buffer Cache.
*/
uip->ui_refcount = 1;
uip->ui_size = 0;
uip->ui_mapped = 0;
+ uip->ui_owner = (void *)NULL;
ubc_lock(vp);
}
#if DIAGNOSTIC
void
ubc_info_deallocate(struct ubc_info *uip)
{
+
assert(uip->ui_refcount > 0);
- if (uip->ui_refcount-- == 1)
+ if (uip->ui_refcount-- == 1) {
+ struct vnode *vp;
+
+ vp = uip->ui_vnode;
+ if (ISSET(uip->ui_flags, UI_WANTED)) {
+ CLR(uip->ui_flags, UI_WANTED);
+ wakeup((caddr_t)&vp->v_ubcinfo);
+ }
+
ubc_info_free(uip);
+ }
}
/*
{
kern_return_t kret;
struct ubc_info *uip;
+ int recursed;
memory_object_control_t control;
memory_object_perf_info_data_t perf;
if (!UBCINFOEXISTS(vp))
return (0);
+ if ((recursed = ubc_busy(vp)) == 0)
+ return (0);
+
uip = vp->v_ubcinfo;
assert(uip != UBC_INFO_NULL);
if (kret != KERN_SUCCESS) {
printf("ubc_uncache: memory_object_change_attributes_named "
"kret = %d", kret);
+ if (recursed == 1)
+ ubc_unbusy(vp);
return (0);
}
ubc_release_named(vp);
+ if (recursed == 1)
+ ubc_unbusy(vp);
return (1);
}
ubc_getobject(struct vnode *vp, int flags)
{
struct ubc_info *uip;
+ int recursed;
memory_object_control_t control;
- uip = vp->v_ubcinfo;
-
if (UBCINVALID(vp))
return (0);
- ubc_lock(vp);
+ if ((recursed = ubc_busy(vp)) == 0)
+ return (0);
+ uip = vp->v_ubcinfo;
control = uip->ui_control;
if ((flags & UBC_HOLDOBJECT) && (!ISSET(uip->ui_flags, UI_HASOBJREF))) {
* Take a temporary reference on the ubc info so that it won't go
* away during our recovery attempt.
*/
+ ubc_lock(vp);
uip->ui_refcount++;
ubc_unlock(vp);
if (memory_object_recover_named(control, TRUE) == KERN_SUCCESS) {
- ubc_lock(vp);
SET(uip->ui_flags, UI_HASOBJREF);
- ubc_unlock(vp);
} else {
control = MEMORY_OBJECT_CONTROL_NULL;
}
+ if (recursed == 1)
+ ubc_unbusy(vp);
ubc_info_deallocate(uip);
} else {
- ubc_unlock(vp);
+ if (recursed == 1)
+ ubc_unbusy(vp);
}
return (control);
ubc_hold(struct vnode *vp)
{
struct ubc_info *uip;
+ int recursed;
memory_object_control_t object;
if (UBCINVALID(vp))
return (0);
- if (!UBCINFOEXISTS(vp)) {
+ if ((recursed = ubc_busy(vp)) == 0) {
/* must be invalid or dying vnode */
assert(UBCINVALID(vp) ||
- ((vp->v_flag & VXLOCK) || (vp->v_flag & VTERMINATE)));
+ ((vp->v_flag & VXLOCK) || (vp->v_flag & VTERMINATE)));
return (0);
}
ubc_lock(vp);
uip->ui_refcount++;
+ ubc_unlock(vp);
if (!ISSET(uip->ui_flags, UI_HASOBJREF)) {
- ubc_unlock(vp);
- if (memory_object_recover_named(uip->ui_control, TRUE) != KERN_SUCCESS) {
+ if (memory_object_recover_named(uip->ui_control, TRUE)
+ != KERN_SUCCESS) {
+ if (recursed == 1)
+ ubc_unbusy(vp);
ubc_info_deallocate(uip);
return (0);
}
- ubc_lock(vp);
SET(uip->ui_flags, UI_HASOBJREF);
- ubc_unlock(vp);
- } else {
- ubc_unlock(vp);
}
+ if (recursed == 1)
+ ubc_unbusy(vp);
assert(uip->ui_refcount > 0);
+
return (1);
}
ubc_release_named(struct vnode *vp)
{
struct ubc_info *uip;
+ int recursed;
memory_object_control_t control;
- kern_return_t kret;
+ kern_return_t kret = KERN_FAILURE;
if (UBCINVALID(vp))
return (0);
- if (!UBCINFOEXISTS(vp))
+ if ((recursed = ubc_busy(vp)) == 0)
return (0);
-
uip = vp->v_ubcinfo;
/* can not release held or mapped vnodes */
if (ISSET(uip->ui_flags, UI_HASOBJREF) &&
- (uip->ui_refcount == 1) && !uip->ui_mapped) {
+ (uip->ui_refcount == 1) && !uip->ui_mapped) {
control = uip->ui_control;
assert(control);
CLR(uip->ui_flags, UI_HASOBJREF);
kret = memory_object_release_name(control,
MEMORY_OBJECT_RESPECT_CACHE);
- return ((kret != KERN_SUCCESS) ? 0 : 1);
- } else
- return (0);
+ }
+
+ if (recursed == 1)
+ ubc_unbusy(vp);
+ return ((kret != KERN_SUCCESS) ? 0 : 1);
}
/*