2 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
24 * Author: Umesh Vaishampayan [umeshv@apple.com]
25 * 05-Aug-1999 umeshv Created.
27 * Functions related to Unified Buffer cache.
33 #include <sys/types.h>
34 #include <sys/param.h>
35 #include <sys/systm.h>
38 #include <sys/mount.h>
39 #include <sys/vnode.h>
41 #include <sys/ucred.h>
45 #include <mach/mach_types.h>
46 #include <mach/memory_object_types.h>
48 #include <kern/zalloc.h>
54 #define assert(cond) \
55 if (!(cond)) panic("%s:%d (%s)", __FILE__, __LINE__, # cond)
57 #include <kern/assert.h>
58 #endif /* DIAGNOSTIC */
60 struct zone
*ubc_info_zone
;
63 #define USHOULDNOT(fun) panic("%s: should not", (fun));
65 #define USHOULDNOT(fun)
66 #endif /* DIAGNOSTIC */
69 static void *_ubc_getobject(struct vnode
*, int);
70 static void ubc_lock(struct vnode
*);
71 static void ubc_unlock(struct vnode
*);
74 ubc_getobjref(struct vnode
*vp
)
76 register struct ubc_info
*uip
;
82 if (pager_cport
= (void *)vnode_pager_lookup(vp
, uip
->ui_pager
))
83 object
= (void *)vm_object_lookup(pager_cport
);
85 if (object
!= uip
->ui_object
) {
87 Debugger("ubc_getobjref: object changed");
89 uip
->ui_object
= object
;
92 if (uip
->ui_object
== NULL
)
93 panic("ubc_getobjref: lost object");
97 * Initialization of the zone for Unified Buffer Cache.
104 i
= (vm_size_t
) sizeof (struct ubc_info
);
105 /* XXX the number of elements should be tied in to maxvnodes */
106 ubc_info_zone
= zinit (i
, 10000*i
, 8192, "ubc_info zone");
111 * Initialize a ubc_info structure for a vnode.
114 ubc_info_init(struct vnode
*vp
)
116 register struct ubc_info
*uip
;
119 struct proc
*p
= current_proc();
125 assert(UBCISVALID(vp
));
128 if (ISSET(vp
->v_flag
, VUINIT
)) {
130 * other thread is already doing this
133 while (ISSET(vp
->v_flag
, VUINIT
)) {
134 SET(vp
->v_flag
, VUWANT
); /* XXX overloaded! */
136 (void) tsleep((caddr_t
)vp
, PINOD
, "ubcinfo", 0);
142 SET(vp
->v_flag
, VUINIT
);
146 if ((uip
== UBC_INFO_NULL
) || (uip
== UBC_NOINFO
)) {
148 uip
= (struct ubc_info
*) zalloc(ubc_info_zone
);
149 bzero(uip
, sizeof(struct ubc_info
));
151 SET(uip
->ui_flags
, UI_INITED
);
153 uip
->ui_ucred
= NOCRED
;
156 assert(uip
->ui_flags
!= UI_NONE
);
157 assert(uip
->ui_vnode
== vp
);
160 if(ISSET(uip
->ui_flags
, UI_HASPAGER
))
164 /* now set this ubc_info in the vnode */
166 SET(uip
->ui_flags
, UI_HASPAGER
);
168 pager
= (void *)vnode_pager_setup(vp
, uip
->ui_pager
);
172 * Can not use VOP_GETATTR() to get accurate value
173 * of ui_size. Thanks to NFS.
174 * nfs_getattr() can call vinvalbuf() and in this case
175 * ubc_info is not set up to deal with that.
179 /* create a vm_object association */
180 kret
= vm_object_create_nomap(pager
, (vm_object_offset_t
)uip
->ui_size
);
181 if (kret
!= KERN_SUCCESS
)
182 panic("ubc_info_init: vm_object_create_nomap returned %d", kret
);
184 /* _ubc_getobject() gets a reference on the memory object */
185 if (_ubc_getobject(vp
, 0) == NULL
)
186 panic("ubc_info_init: lost vmobject : uip = 0X%08x", uip
);
189 * vm_object_allocate() called from vm_object_create_nomap()
190 * created the object with a refcount of 1
191 * need to drop the reference gained by vm_object_lookup()
193 vm_object_deallocate(uip
->ui_object
);
195 /* create a pager reference on the vnode */
196 error
= vget(vp
, LK_INTERLOCK
, p
);
198 panic("ubc_info_init: vget error = %d", error
);
200 /* initialize the size */
201 error
= VOP_GETATTR(vp
, &vattr
, p
->p_ucred
, p
);
204 uip
->ui_size
= (error
? 0: vattr
.va_size
);
207 CLR(vp
->v_flag
, VUINIT
);
208 if (ISSET(vp
->v_flag
, VUWANT
)) {
209 CLR(vp
->v_flag
, VUWANT
);
218 /* Free the ubc_info */
220 ubc_info_free(struct vnode
*vp
)
222 register struct ubc_info
*uip
;
228 vp
->v_ubcinfo
= UBC_INFO_NULL
;
229 credp
= uip
->ui_ucred
;
230 if (credp
!= NOCRED
) {
231 uip
->ui_ucred
= NOCRED
;
234 zfree(ubc_info_zone
, (vm_offset_t
)uip
);
239 * Communicate with VM the size change of the file
240 * returns 1 on success, 0 on failure
243 ubc_setsize(struct vnode
*vp
, off_t nsize
)
245 off_t osize
; /* ui_size before change */
246 off_t lastpg
, olastpgend
, lastoff
;
247 struct ubc_info
*uip
;
254 assert(nsize
>= (off_t
)0);
260 if (!UBCINFOEXISTS(vp
))
264 osize
= uip
->ui_size
; /* call ubc_getsize() ??? */
265 /* Update the size before flushing the VM */
266 uip
->ui_size
= nsize
;
268 if (nsize
>= osize
) /* Nothing more to do */
272 * When the file shrinks, invalidate the pages beyond the
273 * new size. Also get rid of garbage beyond nsize on the
274 * last page. The ui_size already has the nsize. This
275 * insures that the pageout would not write beyond the new
279 didhold
= ubc_hold(vp
);
280 lastpg
= trunc_page_64(nsize
);
281 olastpgend
= round_page_64(osize
);
282 object
= _ubc_getobject(vp
, UBC_NOREACTIVATE
);
284 lastoff
= (nsize
& PAGE_MASK_64
);
287 * If length is multiple of page size, we should not flush
288 * invalidating is sufficient
292 * memory_object_lock_request() drops an object
293 * reference. gain a reference before calling it
297 /* invalidate last page and old contents beyond nsize */
298 kret
= memory_object_lock_request(object
,
299 (vm_object_offset_t
)lastpg
,
300 (memory_object_size_t
)(olastpgend
- lastpg
),
301 MEMORY_OBJECT_RETURN_NONE
,TRUE
,
302 VM_PROT_NO_CHANGE
,MACH_PORT_NULL
);
303 if (kret
!= KERN_SUCCESS
)
304 printf("ubc_setsize: invalidate failed (error = %d)\n", kret
);
308 return ((kret
== KERN_SUCCESS
) ? 1 : 0);
312 * memory_object_lock_request() drops an object
313 * reference. gain a reference before calling it
317 /* flush the last page */
318 kret
= memory_object_lock_request(object
,
319 (vm_object_offset_t
)lastpg
,
321 MEMORY_OBJECT_RETURN_DIRTY
,FALSE
,
322 VM_PROT_NO_CHANGE
,MACH_PORT_NULL
);
324 if (kret
== KERN_SUCCESS
) {
326 * memory_object_lock_request() drops an object
327 * reference. gain a reference before calling it
331 /* invalidate last page and old contents beyond nsize */
332 kret
= memory_object_lock_request(object
,
333 (vm_object_offset_t
)lastpg
,
334 (memory_object_size_t
)(olastpgend
- lastpg
),
335 MEMORY_OBJECT_RETURN_NONE
,TRUE
,
336 VM_PROT_NO_CHANGE
,MACH_PORT_NULL
);
337 if (kret
!= KERN_SUCCESS
)
338 printf("ubc_setsize: invalidate failed (error = %d)\n", kret
);
340 printf("ubc_setsize: flush failed (error = %d)\n", kret
);
344 return ((kret
== KERN_SUCCESS
) ? 1 : 0);
348 * Get the size of the file
349 * For local file systems the size is locally cached. For NFS
350 * there might be a network transaction for this.
353 ubc_getsize(struct vnode
*vp
)
355 /* XXX deal with NFS */
356 return (vp
->v_ubcinfo
->ui_size
);
359 /* lock for changes to struct UBC */
361 ubc_lock(struct vnode
*vp
)
363 /* For now, just use the v_interlock */
364 simple_lock(&vp
->v_interlock
);
369 ubc_unlock(struct vnode
*vp
)
371 /* For now, just use the v_interlock */
372 simple_unlock(&vp
->v_interlock
);
376 * Caller indicate that the object corresponding to the vnode
377 * can not be cached in object cache. Make it so.
378 * returns 1 on success, 0 on failure
380 * Caller of ubc_uncache() MUST have a valid reference on the vnode.
383 ubc_uncache(struct vnode
*vp
)
387 struct ubc_info
*uip
;
388 memory_object_perf_info_data_t perf
;
393 if (!UBCINFOEXISTS(vp
))
398 assert(uip
!= UBC_INFO_NULL
);
401 * AGE it so that vfree() can make sure that it
402 * would get recycled soon after the last reference is gone
403 * This will insure that .nfs turds would not linger
407 /* set the "do not cache" bit */
408 SET(uip
->ui_flags
, UI_DONTCACHE
);
410 didhold
= ubc_hold(vp
);
412 object
= _ubc_getobject(vp
, UBC_NOREACTIVATE
);
416 * memory_object_change_attributes() drops an object
417 * reference. gain a reference before calling it
421 perf
.cluster_size
= PAGE_SIZE
; /* XXX use real cluster_size. */
422 perf
.may_cache
= FALSE
;
423 kret
= memory_object_change_attributes(object
,
424 MEMORY_OBJECT_PERFORMANCE_INFO
,
425 (memory_object_info_t
) &perf
,
426 MEMORY_OBJECT_PERF_INFO_COUNT
,
432 if (kret
!= KERN_SUCCESS
) {
434 panic("ubc_uncache: memory_object_change_attributes "
436 #endif /* DIAGNOSTIC */
445 * call ubc_clean() and ubc_uncache() on all the vnodes
446 * for this mount point.
447 * returns 1 on success, 0 on failure
450 ubc_umount(struct mount
*mp
)
452 struct proc
*p
= current_proc();
453 struct vnode
*vp
, *nvp
;
457 simple_lock(&mntvnode_slock
);
458 for (vp
= mp
->mnt_vnodelist
.lh_first
; vp
; vp
= nvp
) {
459 if (vp
->v_mount
!= mp
) {
460 simple_unlock(&mntvnode_slock
);
463 nvp
= vp
->v_mntvnodes
.le_next
;
464 simple_unlock(&mntvnode_slock
);
465 if (UBCINFOEXISTS(vp
)) {
466 ret
&= ubc_clean(vp
, 0); /* do not invalidate */
467 ret
&= ubc_uncache(vp
);
470 simple_lock(&mntvnode_slock
);
472 simple_unlock(&mntvnode_slock
);
477 * Call ubc_unmount() for all filesystems.
478 * The list is traversed in reverse order
479 * of mounting to avoid dependencies.
484 struct mount
*mp
, *nmp
;
487 * Since this only runs when rebooting, it is not interlocked.
489 for (mp
= mountlist
.cqh_last
; mp
!= (void *)&mountlist
; mp
= nmp
) {
490 nmp
= mp
->mnt_list
.cqe_prev
;
491 (void) ubc_umount(mp
);
495 /* Get the credentials */
497 ubc_getcred(struct vnode
*vp
)
499 struct ubc_info
*uip
;
507 if (UBCINVALID(vp
)) {
511 return (uip
->ui_ucred
);
515 * Set the credentials
516 * existing credentials are not changed
517 * returns 1 on success and 0 on failure
521 ubc_setcred(struct vnode
*vp
, struct proc
*p
)
523 struct ubc_info
*uip
;
533 if (UBCINVALID(vp
)) {
534 USHOULDNOT("ubc_setcred");
538 credp
= uip
->ui_ucred
;
539 if (credp
== NOCRED
) {
541 uip
->ui_ucred
= p
->p_ucred
;
549 ubc_getpager(struct vnode
*vp
)
551 struct ubc_info
*uip
;
559 if (UBCINVALID(vp
)) {
560 USHOULDNOT("ubc_getpager");
564 return (uip
->ui_pager
);
568 * Get the memory object associated with this vnode
569 * If the vnode was reactivated, memory object would not exist.
570 * Unless "do not rectivate" was specified, look it up using the pager.
571 * The vm_object_lookup() would create a reference on the memory object.
572 * If hold was requested create an object reference of one does not
577 _ubc_getobject(struct vnode
*vp
, int flags
)
579 struct ubc_info
*uip
;
583 object
= uip
->ui_object
;
585 if ((object
== NULL
) && ISSET(uip
->ui_flags
, UI_HASPAGER
)
586 && !(flags
& UBC_NOREACTIVATE
)) {
589 if (ISSET(uip
->ui_flags
, UI_HASOBJREF
))
590 panic("ubc_getobject: lost object");
592 if (pager_cport
= (void *)vnode_pager_lookup(vp
, uip
->ui_pager
)) {
593 object
= (void *)vm_object_lookup(pager_cport
);
595 if ((uip
->ui_object
) && (uip
->ui_object
!= object
))
596 Debugger("_ubc_getobject: object changed");
599 uip
->ui_object
= object
;
603 SET(uip
->ui_flags
, UI_HASOBJREF
);
606 if ((flags
& UBC_HOLDOBJECT
)
607 && (object
!= NULL
)) {
608 if (!ISSET(uip
->ui_flags
, UI_HASOBJREF
)) {
610 SET(uip
->ui_flags
, UI_HASOBJREF
);
613 return (uip
->ui_object
);
617 ubc_getobject(struct vnode
*vp
, int flags
)
619 struct ubc_info
*uip
;
626 if (UBCINVALID(vp
)) {
630 object
= _ubc_getobject(vp
, flags
);
633 if (!ISSET(uip
->ui_flags
, (UI_HASOBJREF
|UI_WASMAPPED
))
634 && !(uip
->ui_holdcnt
)) {
635 if (!(flags
& UBC_PAGINGOP
))
636 panic("ubc_getobject: lost reference");
642 ubc_setpager(struct vnode
*vp
, void *pager
)
644 struct ubc_info
*uip
;
652 if (UBCINVALID(vp
)) {
653 USHOULDNOT("ubc_setpager");
657 uip
->ui_pager
= pager
;
662 ubc_setflags(struct vnode
* vp
, int flags
)
664 struct ubc_info
*uip
;
666 if (UBCINVALID(vp
)) {
667 USHOULDNOT("ubc_setflags");
677 SET(uip
->ui_flags
, flags
);
683 ubc_clearflags(struct vnode
* vp
, int flags
)
685 struct ubc_info
*uip
;
687 if (UBCINVALID(vp
)) {
688 USHOULDNOT("ubc_clearflags");
698 CLR(uip
->ui_flags
, flags
);
705 ubc_issetflags(struct vnode
* vp
, int flags
)
707 struct ubc_info
*uip
;
709 if (UBCINVALID(vp
)) {
710 USHOULDNOT("ubc_issetflags");
720 return(ISSET(uip
->ui_flags
, flags
));
724 ubc_blktooff(struct vnode
*vp
, daddr_t blkno
)
730 if (UBCINVALID(vp
)) {
731 USHOULDNOT("ubc_blktooff");
735 error
= VOP_BLKTOOFF(vp
, blkno
, &file_offset
);
739 return (file_offset
);
742 ubc_offtoblk(struct vnode
*vp
, off_t offset
)
748 if (UBCINVALID(vp
)) {
749 return ((daddr_t
)-1);
752 error
= VOP_OFFTOBLK(vp
, offset
, &blkno
);
760 * Cause the file data in VM to be pushed out to the storage
761 * it also causes all currently valid pages to be released
762 * returns 1 on success, 0 on failure
765 ubc_clean(struct vnode
*vp
, int invalidate
)
768 struct ubc_info
*uip
;
781 if (!UBCINFOEXISTS(vp
))
785 * if invalidate was requested, write dirty data and then discard
789 flags
= (MEMORY_OBJECT_DATA_FLUSH
| MEMORY_OBJECT_DATA_NO_CHANGE
);
791 didhold
= ubc_hold(vp
);
793 size
= uip
->ui_size
; /* call ubc_getsize() ??? */
795 object
= _ubc_getobject(vp
, UBC_NOREACTIVATE
);
799 * memory_object_lock_request() drops an object
800 * reference. gain a reference before calling it
804 vp
->v_flag
&= ~VHASDIRTY
;
807 /* Write the dirty data in the file and discard cached pages */
808 kret
= memory_object_lock_request(object
,
809 (vm_object_offset_t
)0,
810 (memory_object_size_t
)round_page_64(size
),
811 MEMORY_OBJECT_RETURN_ALL
, flags
,
812 VM_PROT_NO_CHANGE
,MACH_PORT_NULL
);
814 if (kret
!= KERN_SUCCESS
) {
815 printf("ubc_clean: clean failed (error = %d)\n", kret
);
821 return ((kret
== KERN_SUCCESS
) ? 1 : 0);
825 * Cause the file data in VM to be pushed out to the storage
826 * currently valid pages are NOT invalidated
827 * returns 1 on success, 0 on failure
830 ubc_pushdirty(struct vnode
*vp
)
833 struct ubc_info
*uip
;
845 if (!UBCINFOEXISTS(vp
))
848 didhold
= ubc_hold(vp
);
850 size
= uip
->ui_size
; /* call ubc_getsize() ??? */
852 object
= _ubc_getobject(vp
, UBC_NOREACTIVATE
);
856 * memory_object_lock_request() drops an object
857 * reference. gain a reference before calling it
861 vp
->v_flag
&= ~VHASDIRTY
;
864 /* Write the dirty data in the file and discard cached pages */
865 kret
= memory_object_lock_request(object
,
866 (vm_object_offset_t
)0,
867 (memory_object_size_t
)round_page_64(size
),
868 MEMORY_OBJECT_RETURN_DIRTY
,FALSE
,
869 VM_PROT_NO_CHANGE
,MACH_PORT_NULL
);
871 if (kret
!= KERN_SUCCESS
) {
872 printf("ubc_pushdirty: flush failed (error = %d)\n", kret
);
878 return ((kret
== KERN_SUCCESS
) ? 1 : 0);
882 * Make sure the vm object does not vanish
883 * returns 1 if the hold count was incremented
884 * returns 0 if the hold count was not incremented
885 * This return value should be used to balance
886 * ubc_hold() and ubc_rele().
889 ubc_hold(struct vnode
*vp
)
891 struct ubc_info
*uip
;
897 if (!UBCINFOEXISTS(vp
)) {
898 /* nothing more to do for a dying vnode */
899 if ((vp
->v_flag
& VXLOCK
) || (vp
->v_flag
& VTERMINATE
))
901 vp
->v_ubcinfo
= UBC_INFO_NULL
;
905 object
= _ubc_getobject(vp
, UBC_NOREACTIVATE
);
908 if (uip
->ui_holdcnt
++ == 0)
910 if (uip
->ui_holdcnt
< 0)
911 panic("ubc_hold: ui_holdcnt");
916 /* relese the reference on the vm object */
918 ubc_rele(struct vnode
*vp
)
920 struct ubc_info
*uip
;
926 if (!UBCINFOEXISTS(vp
)) {
927 /* nothing more to do for a dying vnode */
928 if ((vp
->v_flag
& VXLOCK
) || (vp
->v_flag
& VTERMINATE
))
930 panic("ubc_rele: can not");
935 /* get the object before loosing to hold count */
936 object
= _ubc_getobject(vp
, UBC_NOREACTIVATE
);
938 if (uip
->ui_holdcnt
== 0)
939 panic("ubc_rele: ui_holdcnt");
941 if (--uip
->ui_holdcnt
== 0) {
942 /* If the object is already dead do nothing */
944 vm_object_deallocate(object
);
947 printf("ubc_rele: null object for %x", vp
);
948 #endif /* DIAGNOSTIC */
955 * The vnode is mapped explicitly
956 * Mark it so, and release the vm object reference gained in
960 ubc_map(struct vnode
*vp
)
962 struct ubc_info
*uip
;
970 if (UBCINVALID(vp
)) {
975 if (!UBCINFOEXISTS(vp
))
976 panic("ubc_map: can not");
980 SET(uip
->ui_flags
, UI_WASMAPPED
);
986 * Do not release the ubc reference on the
987 * memory object right away. Let vnreclaim
992 * Release the ubc reference. memory object cahe
993 * is responsible for caching this object now.
995 if (ISSET(uip
->ui_flags
, UI_HASOBJREF
)) {
996 object
= _ubc_getobject(vp
, UBC_NOREACTIVATE
);
998 CLR(uip
->ui_flags
, UI_HASOBJREF
);
999 vm_object_deallocate(object
);
1008 * Release the memory object reference on the vnode
1009 * only if it is not in use
1010 * Return 1 if the reference was released, 0 otherwise.
1013 ubc_release(struct vnode
*vp
)
1015 struct ubc_info
*uip
;
1024 if (!UBCINFOEXISTS(vp
))
1025 panic("ubc_release: can not");
1027 uip
= vp
->v_ubcinfo
;
1029 /* can not release held vnodes */
1030 if (uip
->ui_holdcnt
)
1033 if (ISSET(uip
->ui_flags
, UI_HASOBJREF
)) {
1034 object
= _ubc_getobject(vp
, UBC_NOREACTIVATE
);
1036 CLR(uip
->ui_flags
, UI_HASOBJREF
);
1037 vm_object_deallocate(object
);
1044 * Invalidate a range in the memory object that backs this
1045 * vnode. The offset is truncated to the page boundary and the
1046 * size is adjusted to include the last page in the range.
1049 ubc_invalidate(struct vnode
*vp
, off_t offset
, size_t size
)
1051 struct ubc_info
*uip
;
1065 if (!UBCINFOEXISTS(vp
))
1066 panic("ubc_invalidate: can not");
1068 didhold
= ubc_hold(vp
);
1069 toff
= trunc_page_64(offset
);
1070 tsize
= (size_t)(round_page_64(offset
+size
) - toff
);
1071 uip
= vp
->v_ubcinfo
;
1072 object
= _ubc_getobject(vp
, UBC_NOREACTIVATE
);
1076 * memory_object_lock_request() drops an object
1077 * reference. gain a reference before calling it
1081 /* invalidate pages in the range requested */
1082 kret
= memory_object_lock_request(object
,
1083 (vm_object_offset_t
)toff
,
1084 (memory_object_size_t
)tsize
,
1085 MEMORY_OBJECT_RETURN_NONE
,
1086 (MEMORY_OBJECT_DATA_NO_CHANGE
| MEMORY_OBJECT_DATA_FLUSH
),
1087 VM_PROT_NO_CHANGE
,MACH_PORT_NULL
);
1088 if (kret
!= KERN_SUCCESS
)
1089 printf("ubc_invalidate: invalidate failed (error = %d)\n", kret
);
1094 return ((kret
== KERN_SUCCESS
) ? 1 : 0);
1098 * Find out whether a vnode is in use by UBC
1099 * Returns 1 if file is in use by UBC, 0 if not
1102 ubc_isinuse(struct vnode
*vp
, int tookref
)
1104 int busycount
= tookref
? 2 : 1;
1106 if (!UBCINFOEXISTS(vp
))
1109 if (vp
->v_usecount
> busycount
)
1112 if ((vp
->v_usecount
== busycount
)
1113 && (vp
->v_ubcinfo
->ui_mapped
== 1))
1120 /* -- UGLY HACK ALERT -- */
1122 * The backdoor routine to clear the UI_WASMAPPED bit.
1123 * MUST only be called by the VM
1125 * Note that this routine is not under funnel. There are numerous
1126 * thing about the calling sequence that make this work on SMP.
1127 * Any code change in those paths can break this.
1129 * This will be replaced soon.
1132 ubc_unmap(struct vnode
*vp
)
1134 struct ubc_info
*uip
;
1140 if (UBCINVALID(vp
)) {
1144 if (!UBCINFOEXISTS(vp
))
1145 panic("ubc_unmap: can not");
1148 uip
= vp
->v_ubcinfo
;