]> git.saurik.com Git - apple/xnu.git/blobdiff - bsd/kern/ubc_subr.c
xnu-344.23.tar.gz
[apple/xnu.git] / bsd / kern / ubc_subr.c
index 955b6b638e5702d987244782c36e872475bcfb14..47cb041ab19db3152d4c426a69cd2d0c41532813 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999-2001 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1999-2002 Apple Computer, Inc. All rights reserved.
  *
  * @APPLE_LICENSE_HEADER_START@
  * 
@@ -78,6 +78,62 @@ ubc_unlock(struct vnode *vp)
        simple_unlock(&vp->v_interlock);
 }
 
+/*
+ * Serialize the requests to the VM
+ * Returns:
+ *             0       -       Failure
+ *             1       -       Sucessful in acquiring the lock
+ *             2       -       Sucessful in acquiring the lock recursively
+ *                             do not call ubc_unbusy()
+ *                             [This is strange, but saves 4 bytes in struct ubc_info]
+ */
+static int
+ubc_busy(struct vnode *vp)
+{
+       register struct ubc_info        *uip;
+
+       if (!UBCINFOEXISTS(vp))
+               return (0);
+
+       uip = vp->v_ubcinfo;
+
+       while (ISSET(uip->ui_flags, UI_BUSY)) {
+
+               if (uip->ui_owner == (void *)current_thread())
+                       return (2);
+
+               SET(uip->ui_flags, UI_WANTED);
+               (void) tsleep((caddr_t)&vp->v_ubcinfo, PINOD, "ubcbusy", 0);
+
+               if (!UBCINFOEXISTS(vp))
+                       return (0);
+       }
+       uip->ui_owner = (void *)current_thread();
+
+       SET(uip->ui_flags, UI_BUSY);
+
+       return (1);
+}
+
+static void
+ubc_unbusy(struct vnode *vp)
+{
+       register struct ubc_info        *uip;
+
+       if (!UBCINFOEXISTS(vp)) {
+               wakeup((caddr_t)&vp->v_ubcinfo);
+               return;
+       }
+       uip = vp->v_ubcinfo;
+       CLR(uip->ui_flags, UI_BUSY);
+       uip->ui_owner = (void *)NULL;
+
+       if (ISSET(uip->ui_flags, UI_WANTED)) {
+               CLR(uip->ui_flags, UI_WANTED);
+               wakeup((caddr_t)&vp->v_ubcinfo);
+       }
+}
+
 /*
  *     Initialization of the zone for Unified Buffer Cache.
  */
@@ -139,6 +195,7 @@ ubc_info_init(struct vnode *vp)
                uip->ui_refcount = 1;
                uip->ui_size = 0;
                uip->ui_mapped = 0;
+               uip->ui_owner = (void *)NULL;
                ubc_lock(vp);
        }
 #if DIAGNOSTIC
@@ -232,10 +289,20 @@ ubc_info_free(struct ubc_info *uip)
 void
 ubc_info_deallocate(struct ubc_info *uip)
 {
+
        assert(uip->ui_refcount > 0);
 
-    if (uip->ui_refcount-- == 1)
+    if (uip->ui_refcount-- == 1) {
+               struct vnode *vp;
+
+               vp = uip->ui_vnode;
+               if (ISSET(uip->ui_flags, UI_WANTED)) {
+                       CLR(uip->ui_flags, UI_WANTED);
+                       wakeup((caddr_t)&vp->v_ubcinfo);
+               }
+
                ubc_info_free(uip);
+       }
 }
 
 /*
@@ -339,12 +406,16 @@ ubc_uncache(struct vnode *vp)
 {
        kern_return_t kret;
        struct ubc_info *uip;
+       int    recursed;
        memory_object_control_t control;
        memory_object_perf_info_data_t   perf;
 
        if (!UBCINFOEXISTS(vp))
                return (0);
 
+       if ((recursed = ubc_busy(vp)) == 0)
+               return (0);
+
        uip = vp->v_ubcinfo;
 
        assert(uip != UBC_INFO_NULL);
@@ -372,11 +443,15 @@ ubc_uncache(struct vnode *vp)
        if (kret != KERN_SUCCESS) {
                printf("ubc_uncache: memory_object_change_attributes_named "
                        "kret = %d", kret);
+               if (recursed == 1)
+                       ubc_unbusy(vp);
                return (0);
        }
 
        ubc_release_named(vp);
 
+       if (recursed == 1)
+               ubc_unbusy(vp);
        return (1);
 }
 
@@ -506,15 +581,16 @@ memory_object_control_t
 ubc_getobject(struct vnode *vp, int flags)
 {
        struct ubc_info *uip;
+       int    recursed;
        memory_object_control_t control;
 
-       uip = vp->v_ubcinfo;
-
        if (UBCINVALID(vp))
                return (0);
 
-       ubc_lock(vp);
+       if ((recursed = ubc_busy(vp)) == 0)
+               return (0);
 
+       uip = vp->v_ubcinfo;
        control = uip->ui_control;
 
        if ((flags & UBC_HOLDOBJECT) && (!ISSET(uip->ui_flags, UI_HASOBJREF))) {
@@ -523,19 +599,21 @@ ubc_getobject(struct vnode *vp, int flags)
                 * Take a temporary reference on the ubc info so that it won't go
                 * away during our recovery attempt.
                 */
+               ubc_lock(vp);
                uip->ui_refcount++;
                ubc_unlock(vp);
                if (memory_object_recover_named(control, TRUE) == KERN_SUCCESS) {
-                       ubc_lock(vp);
                        SET(uip->ui_flags, UI_HASOBJREF);
-                       ubc_unlock(vp);
                } else {
                        control = MEMORY_OBJECT_CONTROL_NULL;
                }
+               if (recursed == 1)
+                       ubc_unbusy(vp);
                ubc_info_deallocate(uip);
 
        } else {
-               ubc_unlock(vp);
+               if (recursed == 1)
+                       ubc_unbusy(vp);
        }
 
        return (control);
@@ -770,15 +848,16 @@ int
 ubc_hold(struct vnode *vp)
 {
        struct ubc_info *uip;
+       int    recursed;
        memory_object_control_t object;
 
        if (UBCINVALID(vp))
                return (0);
 
-       if (!UBCINFOEXISTS(vp)) {
+       if ((recursed = ubc_busy(vp)) == 0) {
                /* must be invalid or dying vnode */
                assert(UBCINVALID(vp) ||
-                          ((vp->v_flag & VXLOCK) || (vp->v_flag & VTERMINATE)));
+                       ((vp->v_flag & VXLOCK) || (vp->v_flag & VTERMINATE)));
                return (0);
        }
 
@@ -787,21 +866,23 @@ ubc_hold(struct vnode *vp)
 
        ubc_lock(vp);
        uip->ui_refcount++;
+       ubc_unlock(vp);
 
        if (!ISSET(uip->ui_flags, UI_HASOBJREF)) {
-               ubc_unlock(vp);
-               if (memory_object_recover_named(uip->ui_control, TRUE) != KERN_SUCCESS) {
+               if (memory_object_recover_named(uip->ui_control, TRUE)
+                       != KERN_SUCCESS) {
+                       if (recursed == 1)
+                               ubc_unbusy(vp);
                        ubc_info_deallocate(uip);
                        return (0);
                }
-               ubc_lock(vp);
                SET(uip->ui_flags, UI_HASOBJREF);
-               ubc_unlock(vp);
-       } else {
-               ubc_unlock(vp);
        }
+       if (recursed == 1)
+               ubc_unbusy(vp);
 
        assert(uip->ui_refcount > 0);
+
        return (1);
 }
 
@@ -872,28 +953,30 @@ int
 ubc_release_named(struct vnode *vp)
 {
        struct ubc_info *uip;
+       int    recursed;
        memory_object_control_t control;
-       kern_return_t kret;
+       kern_return_t kret = KERN_FAILURE;
 
        if (UBCINVALID(vp))
                return (0);
 
-       if (!UBCINFOEXISTS(vp))
+       if ((recursed = ubc_busy(vp)) == 0)
                return (0);
-
        uip = vp->v_ubcinfo;
 
        /* can not release held or mapped vnodes */
        if (ISSET(uip->ui_flags, UI_HASOBJREF) && 
-           (uip->ui_refcount == 1) && !uip->ui_mapped) {
+               (uip->ui_refcount == 1) && !uip->ui_mapped) {
                control = uip->ui_control;
                assert(control);
                CLR(uip->ui_flags, UI_HASOBJREF);
                kret = memory_object_release_name(control,
                                MEMORY_OBJECT_RESPECT_CACHE);
-               return ((kret != KERN_SUCCESS) ? 0 : 1);
-       } else 
-               return (0);
+       }
+
+       if (recursed == 1)
+               ubc_unbusy(vp);
+       return ((kret != KERN_SUCCESS) ? 0 : 1);
 }
 
 /*