2  * Copyright (c) 1999-2002 Apple Computer, Inc. All rights reserved. 
   4  * @APPLE_LICENSE_HEADER_START@ 
   6  * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved. 
   8  * This file contains Original Code and/or Modifications of Original Code 
   9  * as defined in and that are subject to the Apple Public Source License 
  10  * Version 2.0 (the 'License'). You may not use this file except in 
  11  * compliance with the License. Please obtain a copy of the License at 
  12  * http://www.opensource.apple.com/apsl/ and read it before using this 
  15  * The Original Code and all software distributed under the License are 
  16  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  17  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  18  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  19  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  20  * Please see the License for the specific language governing rights and 
  21  * limitations under the License. 
  23  * @APPLE_LICENSE_HEADER_END@ 
  27  *      Author: Umesh Vaishampayan [umeshv@apple.com] 
  28  *              05-Aug-1999     umeshv  Created. 
  30  *      Functions related to Unified Buffer cache. 
  32  * Caller of UBC functions MUST have a valid reference on the vnode. 
  39 #include <sys/types.h> 
  40 #include <sys/param.h> 
  41 #include <sys/systm.h> 
  44 #include <sys/mount.h> 
  45 #include <sys/vnode.h> 
  47 #include <sys/ucred.h> 
  51 #include <mach/mach_types.h> 
  52 #include <mach/memory_object_types.h> 
  54 #include <kern/zalloc.h> 
  60 #define assert(cond)    \ 
  61     ((void) ((cond) ? 0 : panic("%s:%d (%s)", __FILE__, __LINE__, # cond))) 
  63 #include <kern/assert.h> 
  64 #endif /* DIAGNOSTIC */ 
  66 struct zone     
*ubc_info_zone
; 
  68 /* lock for changes to struct UBC */ 
  69 static __inline__ 
void 
  70 ubc_lock(struct vnode 
*vp
) 
  72         /* For now, just use the v_interlock */ 
  73         simple_lock(&vp
->v_interlock
); 
  77 static __inline__ 
void 
  78 ubc_unlock(struct vnode 
*vp
) 
  80         /* For now, just use the v_interlock */ 
  81         simple_unlock(&vp
->v_interlock
); 
  85  * Serialize the requests to the VM 
  88  *              1       -       Sucessful in acquiring the lock 
  89  *              2       -       Sucessful in acquiring the lock recursively 
  90  *                              do not call ubc_unbusy() 
  91  *                              [This is strange, but saves 4 bytes in struct ubc_info] 
  94 ubc_busy(struct vnode 
*vp
) 
  96         register struct ubc_info        
*uip
; 
  98         if (!UBCINFOEXISTS(vp
)) 
 103         while (ISSET(uip
->ui_flags
, UI_BUSY
)) { 
 105                 if (uip
->ui_owner 
== (void *)current_thread()) 
 108                 SET(uip
->ui_flags
, UI_WANTED
); 
 109                 (void) tsleep((caddr_t
)&vp
->v_ubcinfo
, PINOD
, "ubcbusy", 0); 
 111                 if (!UBCINFOEXISTS(vp
)) 
 114         uip
->ui_owner 
= (void *)current_thread(); 
 116         SET(uip
->ui_flags
, UI_BUSY
); 
 122 ubc_unbusy(struct vnode 
*vp
) 
 124         register struct ubc_info        
*uip
; 
 126         if (!UBCINFOEXISTS(vp
)) { 
 127                 wakeup((caddr_t
)&vp
->v_ubcinfo
); 
 131         CLR(uip
->ui_flags
, UI_BUSY
); 
 132         uip
->ui_owner 
= (void *)NULL
; 
 134         if (ISSET(uip
->ui_flags
, UI_WANTED
)) { 
 135                 CLR(uip
->ui_flags
, UI_WANTED
); 
 136                 wakeup((caddr_t
)&vp
->v_ubcinfo
); 
 141  *      Initialization of the zone for Unified Buffer Cache. 
 143 __private_extern__ 
void 
 148         i 
= (vm_size_t
) sizeof (struct ubc_info
); 
 149         /* XXX  the number of elements should be tied in to maxvnodes */ 
 150         ubc_info_zone 
= zinit (i
, 10000*i
, 8192, "ubc_info zone"); 
 155  *      Initialize a ubc_info structure for a vnode. 
 158 ubc_info_init(struct vnode 
*vp
) 
 160         register struct ubc_info        
*uip
; 
 163         struct proc 
*p 
= current_proc(); 
 166         memory_object_control_t control
; 
 172         if (ISSET(vp
->v_flag
,  VUINIT
)) { 
 174                  * other thread is already doing this 
 177                 while (ISSET(vp
->v_flag
,  VUINIT
)) { 
 178                         SET(vp
->v_flag
, VUWANT
); /* XXX overloaded! */ 
 180                         (void) tsleep((caddr_t
)vp
, PINOD
, "ubcinfo", 0); 
 186                 SET(vp
->v_flag
, VUINIT
); 
 190         if ((uip 
== UBC_INFO_NULL
) || (uip 
== UBC_NOINFO
)) { 
 192                 uip 
= (struct ubc_info 
*) zalloc(ubc_info_zone
); 
 193                 uip
->ui_pager 
= MEMORY_OBJECT_NULL
; 
 194                 uip
->ui_control 
= MEMORY_OBJECT_CONTROL_NULL
; 
 195                 uip
->ui_flags 
= UI_INITED
; 
 197                 uip
->ui_ucred 
= NOCRED
; 
 198                 uip
->ui_refcount 
= 1; 
 201                 uip
->ui_owner 
= (void *)NULL
; 
 206                 Debugger("ubc_info_init: already"); 
 207 #endif /* DIAGNOSTIC */ 
 209         assert(uip
->ui_flags 
!= UI_NONE
); 
 210         assert(uip
->ui_vnode 
== vp
); 
 213         if(ISSET(uip
->ui_flags
, UI_HASPAGER
)) 
 217         /* now set this ubc_info in the vnode */ 
 219         SET(uip
->ui_flags
, UI_HASPAGER
); 
 221         pager 
= (void *)vnode_pager_setup(vp
, uip
->ui_pager
); 
 223         ubc_setpager(vp
, pager
); 
 226          * Note: We can not use VOP_GETATTR() to get accurate 
 227          * value of ui_size. Thanks to NFS. 
 228          * nfs_getattr() can call vinvalbuf() and in this case 
 229          * ubc_info is not set up to deal with that. 
 234          * create a vnode - vm_object association 
 235          * memory_object_create_named() creates a "named" reference on the 
 236          * memory object we hold this reference as long as the vnode is 
 237          * "alive."  Since memory_object_create_named() took its own reference 
 238          * on the vnode pager we passed it, we can drop the reference 
 239          * vnode_pager_setup() returned here. 
 241         kret 
= memory_object_create_named(pager
, 
 242                 (memory_object_size_t
)uip
->ui_size
, &control
); 
 243         vnode_pager_deallocate(pager
);  
 244         if (kret 
!= KERN_SUCCESS
) 
 245                 panic("ubc_info_init: memory_object_create_named returned %d", kret
); 
 248         uip
->ui_control 
= control
;      /* cache the value of the mo control */ 
 249         SET(uip
->ui_flags
, UI_HASOBJREF
);       /* with a named reference */ 
 250         /* create a pager reference on the vnode */ 
 251         error 
= vnode_pager_vget(vp
); 
 253                 panic("ubc_info_init: vnode_pager_vget error = %d", error
); 
 255         /* initialize the size */ 
 256         error 
= VOP_GETATTR(vp
, &vattr
, p
->p_ucred
, p
); 
 259         uip
->ui_size 
= (error 
? 0: vattr
.va_size
); 
 262         CLR(vp
->v_flag
, VUINIT
); 
 263         if (ISSET(vp
->v_flag
, VUWANT
)) { 
 264                 CLR(vp
->v_flag
, VUWANT
); 
 273 /* Free the ubc_info */ 
 275 ubc_info_free(struct ubc_info 
*uip
) 
 279         credp 
= uip
->ui_ucred
; 
 280         if (credp 
!= NOCRED
) { 
 281                 uip
->ui_ucred 
= NOCRED
; 
 285         if (uip
->ui_control 
!= MEMORY_OBJECT_CONTROL_NULL
) 
 286                 memory_object_control_deallocate(uip
->ui_control
); 
 288         zfree(ubc_info_zone
, (vm_offset_t
)uip
); 
 293 ubc_info_deallocate(struct ubc_info 
*uip
) 
 296         assert(uip
->ui_refcount 
> 0); 
 298     if (uip
->ui_refcount
-- == 1) { 
 302                 if (ISSET(uip
->ui_flags
, UI_WANTED
)) { 
 303                         CLR(uip
->ui_flags
, UI_WANTED
); 
 304                         wakeup((caddr_t
)&vp
->v_ubcinfo
); 
 312  * Communicate with VM the size change of the file 
 313  * returns 1 on success, 0 on failure 
 316 ubc_setsize(struct vnode 
*vp
, off_t nsize
) 
 318         off_t osize
;    /* ui_size before change */ 
 319         off_t lastpg
, olastpgend
, lastoff
; 
 320         struct ubc_info 
*uip
; 
 321         memory_object_control_t control
; 
 324         assert(nsize 
>= (off_t
)0); 
 329         if (!UBCINFOEXISTS(vp
)) 
 333         osize 
= uip
->ui_size
;   /* call ubc_getsize() ??? */ 
 334         /* Update the size before flushing the VM */ 
 335         uip
->ui_size 
= nsize
; 
 337         if (nsize 
>= osize
)     /* Nothing more to do */ 
 338                 return (1);             /* return success */ 
 341          * When the file shrinks, invalidate the pages beyond the 
 342          * new size. Also get rid of garbage beyond nsize on the 
 343          * last page. The ui_size already has the nsize. This 
 344          * insures that the pageout would not write beyond the new 
 348         lastpg 
= trunc_page_64(nsize
); 
 349         olastpgend 
= round_page_64(osize
); 
 350         control 
= uip
->ui_control
; 
 352         lastoff 
= (nsize 
& PAGE_MASK_64
); 
 355          * If length is multiple of page size, we should not flush 
 356          * invalidating is sufficient 
 359         /* invalidate last page and old contents beyond nsize */ 
 360         kret 
= memory_object_lock_request(control
, 
 361                     (memory_object_offset_t
)lastpg
, 
 362                     (memory_object_size_t
)(olastpgend 
- lastpg
), 
 363                     MEMORY_OBJECT_RETURN_NONE
, MEMORY_OBJECT_DATA_FLUSH
, 
 365         if (kret 
!= KERN_SUCCESS
) 
 366             printf("ubc_setsize: invalidate failed (error = %d)\n", kret
); 
 368                 return ((kret 
== KERN_SUCCESS
) ? 1 : 0); 
 371         /* flush the last page */ 
 372         kret 
= memory_object_lock_request(control
, 
 373                                 (memory_object_offset_t
)lastpg
, 
 375                                 MEMORY_OBJECT_RETURN_DIRTY
, FALSE
, 
 378         if (kret 
== KERN_SUCCESS
) { 
 379                 /* invalidate last page and old contents beyond nsize */ 
 380                 kret 
= memory_object_lock_request(control
, 
 381                                         (memory_object_offset_t
)lastpg
, 
 382                                         (memory_object_size_t
)(olastpgend 
- lastpg
), 
 383                                         MEMORY_OBJECT_RETURN_NONE
, MEMORY_OBJECT_DATA_FLUSH
, 
 385                 if (kret 
!= KERN_SUCCESS
) 
 386                         printf("ubc_setsize: invalidate failed (error = %d)\n", kret
); 
 388                 printf("ubc_setsize: flush failed (error = %d)\n", kret
); 
 390         return ((kret 
== KERN_SUCCESS
) ? 1 : 0); 
 394  * Get the size of the file 
 397 ubc_getsize(struct vnode 
*vp
) 
 399         return (vp
->v_ubcinfo
->ui_size
); 
 403  * Caller indicate that the object corresponding to the vnode  
 404  * can not be cached in object cache. Make it so. 
 405  * returns 1 on success, 0 on failure 
 408 ubc_uncache(struct vnode 
*vp
) 
 411         struct ubc_info 
*uip
; 
 413         memory_object_control_t control
; 
 414         memory_object_perf_info_data_t   perf
; 
 416         if (!UBCINFOEXISTS(vp
)) 
 419         if ((recursed 
= ubc_busy(vp
)) == 0) 
 424         assert(uip 
!= UBC_INFO_NULL
); 
 427          * AGE it so that vfree() can make sure that it 
 428          * would get recycled soon after the last reference is gone 
 429          * This will insure that .nfs turds would not linger 
 433         /* set the "do not cache" bit */ 
 434         SET(uip
->ui_flags
, UI_DONTCACHE
); 
 436         control 
= uip
->ui_control
; 
 439         perf
.cluster_size 
= PAGE_SIZE
; /* XXX use real cluster_size. */ 
 440         perf
.may_cache 
= FALSE
; 
 441         kret 
= memory_object_change_attributes(control
, 
 442                                 MEMORY_OBJECT_PERFORMANCE_INFO
, 
 443                                 (memory_object_info_t
) &perf
, 
 444                                 MEMORY_OBJECT_PERF_INFO_COUNT
); 
 446         if (kret 
!= KERN_SUCCESS
) { 
 447                 printf("ubc_uncache: memory_object_change_attributes_named " 
 454         ubc_release_named(vp
); 
 462  * call ubc_clean() and ubc_uncache() on all the vnodes 
 463  * for this mount point. 
 464  * returns 1 on success, 0 on failure 
 466 __private_extern__ 
int 
 467 ubc_umount(struct mount 
*mp
) 
 469         struct proc 
*p 
= current_proc(); 
 470         struct vnode 
*vp
, *nvp
; 
 474         simple_lock(&mntvnode_slock
); 
 475         for (vp 
= mp
->mnt_vnodelist
.lh_first
; vp
; vp 
= nvp
) { 
 476                 if (vp
->v_mount 
!= mp
) { 
 477                         simple_unlock(&mntvnode_slock
); 
 480                 nvp 
= vp
->v_mntvnodes
.le_next
; 
 481                 simple_unlock(&mntvnode_slock
); 
 482                 if (UBCINFOEXISTS(vp
)) { 
 485                          * Must get a valid reference on the vnode 
 486                          * before callig UBC functions 
 488                         if (vget(vp
, 0, p
)) { 
 490                                 simple_lock(&mntvnode_slock
); 
 491                                 continue; /* move on to the next vnode */ 
 493                         ret 
&= ubc_clean(vp
, 0); /* do not invalidate */ 
 494                         ret 
&= ubc_uncache(vp
); 
 497                 simple_lock(&mntvnode_slock
); 
 499         simple_unlock(&mntvnode_slock
); 
 504  * Call ubc_unmount() for all filesystems. 
 505  * The list is traversed in reverse order 
 506  * of mounting to avoid dependencies. 
 508 __private_extern__ 
void 
 511         struct mount 
*mp
, *nmp
; 
 514          * Since this only runs when rebooting, it is not interlocked. 
 516         for (mp 
= mountlist
.cqh_last
; mp 
!= (void *)&mountlist
; mp 
= nmp
) { 
 517                 nmp 
= mp
->mnt_list
.cqe_prev
; 
 518                 (void) ubc_umount(mp
); 
 522 /* Get the credentials */ 
 524 ubc_getcred(struct vnode 
*vp
) 
 526         struct ubc_info 
*uip
; 
 533         return (uip
->ui_ucred
); 
 537  * Set the credentials 
 538  * existing credentials are not changed 
 539  * returns 1 on success and 0 on failure 
 542 ubc_setcred(struct vnode 
*vp
, struct proc 
*p
) 
 544         struct ubc_info 
*uip
; 
 552         credp 
= uip
->ui_ucred
; 
 553         if (credp 
== NOCRED
) { 
 555                 uip
->ui_ucred 
= p
->p_ucred
; 
 562 __private_extern__ memory_object_t
 
 563 ubc_getpager(struct vnode 
*vp
) 
 565         struct ubc_info 
*uip
; 
 572         return (uip
->ui_pager
); 
 576  * Get the memory object associated with this vnode 
 577  * If the vnode was reactivated, memory object would not exist. 
 578  * Unless "do not rectivate" was specified, look it up using the pager. 
 579  * If hold was requested create an object reference of one does not 
 583 memory_object_control_t
 
 584 ubc_getobject(struct vnode 
*vp
, int flags
) 
 586         struct ubc_info 
*uip
; 
 588         memory_object_control_t control
; 
 593         if (flags 
& UBC_FOR_PAGEOUT
) 
 594                 return(vp
->v_ubcinfo
->ui_control
); 
 596         if ((recursed 
= ubc_busy(vp
)) == 0) 
 600         control 
= uip
->ui_control
; 
 602         if ((flags 
& UBC_HOLDOBJECT
) && (!ISSET(uip
->ui_flags
, UI_HASOBJREF
))) { 
 605                  * Take a temporary reference on the ubc info so that it won't go 
 606                  * away during our recovery attempt. 
 611                 if (memory_object_recover_named(control
, TRUE
) == KERN_SUCCESS
) { 
 612                         SET(uip
->ui_flags
, UI_HASOBJREF
); 
 614                         control 
= MEMORY_OBJECT_CONTROL_NULL
; 
 618                 ubc_info_deallocate(uip
); 
 630 ubc_setpager(struct vnode 
*vp
, memory_object_t pager
) 
 632         struct ubc_info 
*uip
; 
 639         uip
->ui_pager 
= pager
; 
 644 ubc_setflags(struct vnode 
* vp
, int  flags
) 
 646         struct ubc_info 
*uip
; 
 653         SET(uip
->ui_flags
, flags
); 
 659 ubc_clearflags(struct vnode 
* vp
, int  flags
) 
 661         struct ubc_info 
*uip
; 
 668         CLR(uip
->ui_flags
, flags
); 
 675 ubc_issetflags(struct vnode 
* vp
, int  flags
) 
 677         struct ubc_info 
*uip
; 
 684         return (ISSET(uip
->ui_flags
, flags
)); 
 688 ubc_blktooff(struct vnode 
*vp
, daddr_t blkno
) 
 696         error 
= VOP_BLKTOOFF(vp
, blkno
, &file_offset
); 
 700         return (file_offset
); 
 704 ubc_offtoblk(struct vnode 
*vp
, off_t offset
) 
 709     if (UBCINVALID(vp
)) {  
 710         return ((daddr_t
)-1); 
 713         error 
= VOP_OFFTOBLK(vp
, offset
, &blkno
); 
 721  * Cause the file data in VM to be pushed out to the storage 
 722  * it also causes all currently valid pages to be released 
 723  * returns 1 on success, 0 on failure 
 726 ubc_clean(struct vnode 
*vp
, int invalidate
) 
 729         struct ubc_info 
*uip
; 
 730         memory_object_control_t control
; 
 737         if (!UBCINFOEXISTS(vp
)) 
 741          * if invalidate was requested, write dirty data and then discard 
 745                 flags 
= (MEMORY_OBJECT_DATA_FLUSH 
| MEMORY_OBJECT_DATA_NO_CHANGE
); 
 748         size 
= uip
->ui_size
;    /* call ubc_getsize() ??? */ 
 750         control 
= uip
->ui_control
; 
 753         vp
->v_flag 
&= ~VHASDIRTY
; 
 756         /* Write the dirty data in the file and discard cached pages */ 
 757         kret 
= memory_object_lock_request(control
, 
 758                                 (memory_object_offset_t
)0, 
 759                                 (memory_object_size_t
)round_page_64(size
), 
 760                                 MEMORY_OBJECT_RETURN_ALL
, flags
, 
 763         if (kret 
!= KERN_SUCCESS
) 
 764                 printf("ubc_clean: clean failed (error = %d)\n", kret
); 
 766         return ((kret 
== KERN_SUCCESS
) ? 1 : 0); 
 770  * Cause the file data in VM to be pushed out to the storage 
 771  * currently valid pages are NOT invalidated 
 772  * returns 1 on success, 0 on failure 
 775 ubc_pushdirty(struct vnode 
*vp
) 
 778         struct ubc_info 
*uip
; 
 779         memory_object_control_t control
; 
 785         if (!UBCINFOEXISTS(vp
)) 
 789         size 
= uip
->ui_size
;    /* call ubc_getsize() ??? */ 
 791         control 
= uip
->ui_control
; 
 794         vp
->v_flag 
&= ~VHASDIRTY
; 
 797         /* Write the dirty data in the file and discard cached pages */ 
 798         kret 
= memory_object_lock_request(control
, 
 799                                 (memory_object_offset_t
)0, 
 800                                 (memory_object_size_t
)round_page_64(size
), 
 801                                 MEMORY_OBJECT_RETURN_DIRTY
, FALSE
, 
 804         if (kret 
!= KERN_SUCCESS
) 
 805                 printf("ubc_pushdirty: flush failed (error = %d)\n", kret
); 
 807         return ((kret 
== KERN_SUCCESS
) ? 1 : 0); 
 811  * Cause the file data in VM to be pushed out to the storage 
 812  * currently valid pages are NOT invalidated 
 813  * returns 1 on success, 0 on failure 
 816 ubc_pushdirty_range(struct vnode 
*vp
, off_t offset
, off_t size
) 
 818         struct ubc_info 
*uip
; 
 819         memory_object_control_t control
; 
 825         if (!UBCINFOEXISTS(vp
)) 
 830         control 
= uip
->ui_control
; 
 833         /* Write any dirty pages in the requested range of the file: */ 
 834         kret 
= memory_object_lock_request(control
, 
 835                                 (memory_object_offset_t
)offset
, 
 836                                 (memory_object_size_t
)round_page_64(size
), 
 837                                 MEMORY_OBJECT_RETURN_DIRTY
, FALSE
, 
 840         if (kret 
!= KERN_SUCCESS
) 
 841                 printf("ubc_pushdirty_range: flush failed (error = %d)\n", kret
); 
 843         return ((kret 
== KERN_SUCCESS
) ? 1 : 0); 
 847  * Make sure the vm object does not vanish  
 848  * returns 1 if the hold count was incremented 
 849  * returns 0 if the hold count was not incremented 
 850  * This return value should be used to balance  
 851  * ubc_hold() and ubc_rele(). 
 854 ubc_hold(struct vnode 
*vp
) 
 856         struct ubc_info 
*uip
; 
 858         memory_object_control_t object
; 
 863         if ((recursed 
= ubc_busy(vp
)) == 0) { 
 864                 /* must be invalid or dying vnode */ 
 865                 assert(UBCINVALID(vp
) || 
 866                         ((vp
->v_flag 
& VXLOCK
) || (vp
->v_flag 
& VTERMINATE
))); 
 871         assert(uip
->ui_control 
!= MEMORY_OBJECT_CONTROL_NULL
); 
 877         if (!ISSET(uip
->ui_flags
, UI_HASOBJREF
)) { 
 878                 if (memory_object_recover_named(uip
->ui_control
, TRUE
) 
 882                         ubc_info_deallocate(uip
); 
 885                 SET(uip
->ui_flags
, UI_HASOBJREF
); 
 890         assert(uip
->ui_refcount 
> 0); 
 896  * Drop the holdcount. 
 897  * release the reference on the vm object if the this is "uncached" 
 901 ubc_rele(struct vnode 
*vp
) 
 903         struct ubc_info 
*uip
; 
 908         if (!UBCINFOEXISTS(vp
)) { 
 909                 /* nothing more to do for a dying vnode */ 
 910                 if ((vp
->v_flag 
& VXLOCK
) || (vp
->v_flag 
& VTERMINATE
)) 
 912                 panic("ubc_rele: can not"); 
 917         if (uip
->ui_refcount 
== 1) 
 918                 panic("ubc_rele: ui_refcount"); 
 922         if ((uip
->ui_refcount 
== 1) 
 923                 && ISSET(uip
->ui_flags
, UI_DONTCACHE
)) 
 924                 (void) ubc_release_named(vp
); 
 930  * The vnode is mapped explicitly, mark it so. 
 932 __private_extern__ 
void 
 933 ubc_map(struct vnode 
*vp
) 
 935         struct ubc_info 
*uip
; 
 940         if (!UBCINFOEXISTS(vp
)) 
 946         SET(uip
->ui_flags
, UI_WASMAPPED
); 
 954  * Release the memory object reference on the vnode 
 955  * only if it is not in use 
 956  * Return 1 if the reference was released, 0 otherwise. 
 959 ubc_release_named(struct vnode 
*vp
) 
 961         struct ubc_info 
*uip
; 
 963         memory_object_control_t control
; 
 964         kern_return_t kret 
= KERN_FAILURE
; 
 969         if ((recursed 
= ubc_busy(vp
)) == 0) 
 973         /* can not release held or mapped vnodes */ 
 974         if (ISSET(uip
->ui_flags
, UI_HASOBJREF
) &&  
 975                 (uip
->ui_refcount 
== 1) && !uip
->ui_mapped
) { 
 976                 control 
= uip
->ui_control
; 
 978                 CLR(uip
->ui_flags
, UI_HASOBJREF
); 
 979                 kret 
= memory_object_release_name(control
, 
 980                                 MEMORY_OBJECT_RESPECT_CACHE
); 
 985         return ((kret 
!= KERN_SUCCESS
) ? 0 : 1); 
 989  * This function used to called by extensions directly.  Some may 
 990  * still exist with this behavior.  In those cases, we will do the 
 991  * release as part of reclaiming or cleaning the vnode.  We don't 
 992  * need anything explicit - so just stub this out until those callers 
1003  * destroy the named reference for a given vnode 
1005 __private_extern__ 
int 
1009         memory_object_control_t control
; 
1011         struct ubc_info 
*uip
; 
1015          * We may already have had the object terminated 
1016          * and the ubcinfo released as a side effect of 
1017          * some earlier processing.  If so, pretend we did 
1018          * it, because it probably was a result of our 
1021         if (!UBCINFOEXISTS(vp
)) 
1024         uip 
= vp
->v_ubcinfo
; 
1026         /* can not destroy held vnodes */ 
1027         if (uip
->ui_refcount 
> 1) 
1031          * Terminate the memory object. 
1032          * memory_object_destroy() will result in 
1033          * vnode_pager_no_senders().  
1034          * That will release the pager reference 
1035          * and the vnode will move to the free list. 
1037         control 
= ubc_getobject(vp
, UBC_HOLDOBJECT
); 
1038         if (control 
!= MEMORY_OBJECT_CONTROL_NULL
) { 
1040                 if (ISSET(vp
->v_flag
, VTERMINATE
)) 
1041                         panic("ubc_destroy_named: already teminating"); 
1042                 SET(vp
->v_flag
, VTERMINATE
); 
1044                 kret 
= memory_object_destroy(control
, 0); 
1045                 if (kret 
!= KERN_SUCCESS
) 
1049                  * memory_object_destroy() is asynchronous 
1050                  * with respect to vnode_pager_no_senders(). 
1051                  * wait for vnode_pager_no_senders() to clear 
1054                 while (ISSET(vp
->v_flag
, VTERMINATE
)) { 
1055                         SET(vp
->v_flag
, VTERMWANT
); 
1056                         (void)tsleep((caddr_t
)&vp
->v_ubcinfo
, 
1057                                                  PINOD
, "ubc_destroy_named", 0); 
1065  * Invalidate a range in the memory object that backs this 
1066  * vnode. The offset is truncated to the page boundary and the 
1067  * size is adjusted to include the last page in the range. 
1070 ubc_invalidate(struct vnode 
*vp
, off_t offset
, size_t size
) 
1072         struct ubc_info 
*uip
; 
1073         memory_object_control_t control
; 
1081         if (!UBCINFOEXISTS(vp
)) 
1084         toff 
= trunc_page_64(offset
); 
1085         tsize 
= (size_t)(round_page_64(offset
+size
) - toff
); 
1086         uip 
= vp
->v_ubcinfo
; 
1087         control 
= uip
->ui_control
; 
1090         /* invalidate pages in the range requested */ 
1091         kret 
= memory_object_lock_request(control
, 
1092                                 (memory_object_offset_t
)toff
, 
1093                                 (memory_object_size_t
)tsize
, 
1094                                 MEMORY_OBJECT_RETURN_NONE
, 
1095                                 (MEMORY_OBJECT_DATA_NO_CHANGE
| MEMORY_OBJECT_DATA_FLUSH
), 
1097         if (kret 
!= KERN_SUCCESS
) 
1098                 printf("ubc_invalidate: invalidate failed (error = %d)\n", kret
); 
1100         return ((kret 
== KERN_SUCCESS
) ? 1 : 0); 
1104  * Find out whether a vnode is in use by UBC 
1105  * Returns 1 if file is in use by UBC, 0 if not 
1108 ubc_isinuse(struct vnode 
*vp
, int tookref
) 
1110         int busycount 
= tookref 
? 2 : 1; 
1112         if (!UBCINFOEXISTS(vp
)) 
1116                 printf("ubc_isinuse: called without a valid reference" 
1117                     ": v_tag = %d\v", vp
->v_tag
); 
1118                 vprint("ubc_isinuse", vp
); 
1122         if (vp
->v_usecount 
> busycount
) 
1125         if ((vp
->v_usecount 
== busycount
) 
1126                 && (vp
->v_ubcinfo
->ui_mapped 
== 1)) 
1133  * The backdoor routine to clear the ui_mapped. 
1134  * MUST only be called by the VM 
1136  * Note that this routine is not called under funnel. There are numerous 
1137  * things about the calling sequence that make this work on SMP. 
1138  * Any code change in those paths can break this. 
1141 __private_extern__ 
void 
1142 ubc_unmap(struct vnode 
*vp
) 
1144         struct ubc_info 
*uip
; 
1145         boolean_t       funnel_state
; 
1150         if (!UBCINFOEXISTS(vp
)) 
1154         uip 
= vp
->v_ubcinfo
; 
1156         if ((uip
->ui_refcount 
> 1) || !ISSET(uip
->ui_flags
, UI_DONTCACHE
)) { 
1162         funnel_state 
= thread_funnel_set(kernel_flock
, TRUE
); 
1163         (void) ubc_release_named(vp
); 
1164         (void) thread_funnel_set(kernel_flock
, funnel_state
); 
1172         ppnum_t 
*phys_entryp
, 
1175         memory_object_control_t         control
; 
1177         control 
= ubc_getobject(vp
, UBC_FLAGS_NONE
); 
1178         if (control 
== MEMORY_OBJECT_CONTROL_NULL
) 
1179                 return KERN_INVALID_ARGUMENT
; 
1181         return (memory_object_page_op(control
, 
1182                                       (memory_object_offset_t
)f_offset
, 
1194         upl_page_info_t 
**plp
, 
1197         memory_object_control_t         control
; 
1203         if (bufsize 
& 0xfff) 
1204                 return KERN_INVALID_ARGUMENT
; 
1206         if (uplflags 
& UPL_FOR_PAGEOUT
) { 
1207                 uplflags 
&= ~UPL_FOR_PAGEOUT
; 
1208                 ubcflags  
=  UBC_FOR_PAGEOUT
; 
1210                 ubcflags 
= UBC_FLAGS_NONE
; 
1212         control 
= ubc_getobject(vp
, ubcflags
); 
1213         if (control 
== MEMORY_OBJECT_CONTROL_NULL
) 
1214                 return KERN_INVALID_ARGUMENT
; 
1216         uplflags 
|= (UPL_NO_SYNC
|UPL_CLEAN_IN_PLACE
|UPL_SET_INTERNAL
); 
1218         kr 
= memory_object_upl_request(control
, f_offset
, bufsize
, 
1219                                                                    uplp
, NULL
, &count
, uplflags
); 
1221                         *plp 
= UPL_GET_INTERNAL_PAGE_LIST(*uplp
); 
1229         vm_offset_t     
*dst_addr
) 
1231         return (vm_upl_map(kernel_map
, upl
, dst_addr
)); 
1239         return(vm_upl_unmap(kernel_map
, upl
)); 
1246         upl_page_info_t 
*pl
; 
1249         pl 
= UPL_GET_INTERNAL_PAGE_LIST(upl
); 
1250         kr 
= upl_commit(upl
, pl
, MAX_UPL_TRANSFER
); 
1251         upl_deallocate(upl
); 
1257 ubc_upl_commit_range( 
1263         upl_page_info_t 
*pl
; 
1267         if (flags 
& UPL_COMMIT_FREE_ON_EMPTY
) 
1268                 flags 
|= UPL_COMMIT_NOTIFY_EMPTY
; 
1270         pl 
= UPL_GET_INTERNAL_PAGE_LIST(upl
); 
1272         kr 
= upl_commit_range(upl
, offset
, size
, flags
, 
1273                                                   pl
, MAX_UPL_TRANSFER
, &empty
); 
1275         if((flags 
& UPL_COMMIT_FREE_ON_EMPTY
) && empty
) 
1276                 upl_deallocate(upl
); 
1282 ubc_upl_abort_range( 
1289         boolean_t               empty 
= FALSE
; 
1291         if (abort_flags 
& UPL_ABORT_FREE_ON_EMPTY
) 
1292                 abort_flags 
|= UPL_ABORT_NOTIFY_EMPTY
; 
1294         kr 
= upl_abort_range(upl
, offset
, size
, abort_flags
, &empty
); 
1296         if((abort_flags 
& UPL_ABORT_FREE_ON_EMPTY
) && empty
) 
1297                 upl_deallocate(upl
); 
1309         kr 
= upl_abort(upl
, abort_type
); 
1310         upl_deallocate(upl
); 
1318         return (UPL_GET_INTERNAL_PAGE_LIST(upl
));