2 * Copyright (c) 1999-2002 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
24 * Author: Umesh Vaishampayan [umeshv@apple.com]
25 * 05-Aug-1999 umeshv Created.
27 * Functions related to Unified Buffer cache.
29 * Caller of UBC functions MUST have a valid reference on the vnode.
36 #include <sys/types.h>
37 #include <sys/param.h>
38 #include <sys/systm.h>
41 #include <sys/mount.h>
42 #include <sys/vnode.h>
44 #include <sys/ucred.h>
48 #include <mach/mach_types.h>
49 #include <mach/memory_object_types.h>
51 #include <kern/zalloc.h>
57 #define assert(cond) \
58 ((void) ((cond) ? 0 : panic("%s:%d (%s)", __FILE__, __LINE__, # cond)))
60 #include <kern/assert.h>
61 #endif /* DIAGNOSTIC */
63 struct zone
*ubc_info_zone
;
65 /* lock for changes to struct UBC */
66 static __inline__
void
67 ubc_lock(struct vnode
*vp
)
69 /* For now, just use the v_interlock */
70 simple_lock(&vp
->v_interlock
);
74 static __inline__
void
75 ubc_unlock(struct vnode
*vp
)
77 /* For now, just use the v_interlock */
78 simple_unlock(&vp
->v_interlock
);
82 * Serialize the requests to the VM
85 * 1 - Sucessful in acquiring the lock
86 * 2 - Sucessful in acquiring the lock recursively
87 * do not call ubc_unbusy()
88 * [This is strange, but saves 4 bytes in struct ubc_info]
91 ubc_busy(struct vnode
*vp
)
93 register struct ubc_info
*uip
;
95 if (!UBCINFOEXISTS(vp
))
100 while (ISSET(uip
->ui_flags
, UI_BUSY
)) {
102 if (uip
->ui_owner
== (void *)current_act())
105 SET(uip
->ui_flags
, UI_WANTED
);
106 (void) tsleep((caddr_t
)&vp
->v_ubcinfo
, PINOD
, "ubcbusy", 0);
108 if (!UBCINFOEXISTS(vp
))
111 uip
->ui_owner
= (void *)current_act();
113 SET(uip
->ui_flags
, UI_BUSY
);
119 ubc_unbusy(struct vnode
*vp
)
121 register struct ubc_info
*uip
;
123 if (!UBCINFOEXISTS(vp
)) {
124 wakeup((caddr_t
)&vp
->v_ubcinfo
);
128 CLR(uip
->ui_flags
, UI_BUSY
);
129 uip
->ui_owner
= (void *)NULL
;
131 if (ISSET(uip
->ui_flags
, UI_WANTED
)) {
132 CLR(uip
->ui_flags
, UI_WANTED
);
133 wakeup((caddr_t
)&vp
->v_ubcinfo
);
138 * Initialization of the zone for Unified Buffer Cache.
140 __private_extern__
void
145 i
= (vm_size_t
) sizeof (struct ubc_info
);
146 /* XXX the number of elements should be tied in to maxvnodes */
147 ubc_info_zone
= zinit (i
, 10000*i
, 8192, "ubc_info zone");
152 * Initialize a ubc_info structure for a vnode.
155 ubc_info_init(struct vnode
*vp
)
157 register struct ubc_info
*uip
;
160 struct proc
*p
= current_proc();
163 memory_object_control_t control
;
169 if (ISSET(vp
->v_flag
, VUINIT
)) {
171 * other thread is already doing this
174 while (ISSET(vp
->v_flag
, VUINIT
)) {
175 SET(vp
->v_flag
, VUWANT
); /* XXX overloaded! */
177 (void) tsleep((caddr_t
)vp
, PINOD
, "ubcinfo", 0);
183 SET(vp
->v_flag
, VUINIT
);
187 if ((uip
== UBC_INFO_NULL
) || (uip
== UBC_NOINFO
)) {
189 uip
= (struct ubc_info
*) zalloc(ubc_info_zone
);
190 uip
->ui_pager
= MEMORY_OBJECT_NULL
;
191 uip
->ui_control
= MEMORY_OBJECT_CONTROL_NULL
;
192 uip
->ui_flags
= UI_INITED
;
194 uip
->ui_ucred
= NOCRED
;
195 uip
->ui_refcount
= 1;
198 uip
->ui_owner
= (void *)NULL
;
203 Debugger("ubc_info_init: already");
204 #endif /* DIAGNOSTIC */
206 assert(uip
->ui_flags
!= UI_NONE
);
207 assert(uip
->ui_vnode
== vp
);
210 if(ISSET(uip
->ui_flags
, UI_HASPAGER
))
214 /* now set this ubc_info in the vnode */
216 SET(uip
->ui_flags
, UI_HASPAGER
);
218 pager
= (void *)vnode_pager_setup(vp
, uip
->ui_pager
);
220 ubc_setpager(vp
, pager
);
223 * Note: We can not use VOP_GETATTR() to get accurate
224 * value of ui_size. Thanks to NFS.
225 * nfs_getattr() can call vinvalbuf() and in this case
226 * ubc_info is not set up to deal with that.
231 * create a vnode - vm_object association
232 * memory_object_create_named() creates a "named" reference on the
233 * memory object we hold this reference as long as the vnode is
234 * "alive." Since memory_object_create_named() took its own reference
235 * on the vnode pager we passed it, we can drop the reference
236 * vnode_pager_setup() returned here.
238 kret
= memory_object_create_named(pager
,
239 (memory_object_size_t
)uip
->ui_size
, &control
);
240 vnode_pager_deallocate(pager
);
241 if (kret
!= KERN_SUCCESS
)
242 panic("ubc_info_init: memory_object_create_named returned %d", kret
);
245 uip
->ui_control
= control
; /* cache the value of the mo control */
246 SET(uip
->ui_flags
, UI_HASOBJREF
); /* with a named reference */
247 /* create a pager reference on the vnode */
248 error
= vnode_pager_vget(vp
);
250 panic("ubc_info_init: vnode_pager_vget error = %d", error
);
252 /* initialize the size */
253 error
= VOP_GETATTR(vp
, &vattr
, p
->p_ucred
, p
);
256 uip
->ui_size
= (error
? 0: vattr
.va_size
);
259 CLR(vp
->v_flag
, VUINIT
);
260 if (ISSET(vp
->v_flag
, VUWANT
)) {
261 CLR(vp
->v_flag
, VUWANT
);
270 /* Free the ubc_info */
272 ubc_info_free(struct ubc_info
*uip
)
276 credp
= uip
->ui_ucred
;
277 if (credp
!= NOCRED
) {
278 uip
->ui_ucred
= NOCRED
;
282 if (uip
->ui_control
!= MEMORY_OBJECT_CONTROL_NULL
)
283 memory_object_control_deallocate(uip
->ui_control
);
285 zfree(ubc_info_zone
, (vm_offset_t
)uip
);
290 ubc_info_deallocate(struct ubc_info
*uip
)
293 assert(uip
->ui_refcount
> 0);
295 if (uip
->ui_refcount
-- == 1) {
299 if (ISSET(uip
->ui_flags
, UI_WANTED
)) {
300 CLR(uip
->ui_flags
, UI_WANTED
);
301 wakeup((caddr_t
)&vp
->v_ubcinfo
);
309 * Communicate with VM the size change of the file
310 * returns 1 on success, 0 on failure
313 ubc_setsize(struct vnode
*vp
, off_t nsize
)
315 off_t osize
; /* ui_size before change */
316 off_t lastpg
, olastpgend
, lastoff
;
317 struct ubc_info
*uip
;
318 memory_object_control_t control
;
321 if (nsize
< (off_t
)0)
327 if (!UBCINFOEXISTS(vp
))
331 osize
= uip
->ui_size
; /* call ubc_getsize() ??? */
332 /* Update the size before flushing the VM */
333 uip
->ui_size
= nsize
;
335 if (nsize
>= osize
) /* Nothing more to do */
336 return (1); /* return success */
339 * When the file shrinks, invalidate the pages beyond the
340 * new size. Also get rid of garbage beyond nsize on the
341 * last page. The ui_size already has the nsize. This
342 * insures that the pageout would not write beyond the new
346 lastpg
= trunc_page_64(nsize
);
347 olastpgend
= round_page_64(osize
);
348 control
= uip
->ui_control
;
350 lastoff
= (nsize
& PAGE_MASK_64
);
353 * If length is multiple of page size, we should not flush
354 * invalidating is sufficient
357 /* invalidate last page and old contents beyond nsize */
358 kret
= memory_object_lock_request(control
,
359 (memory_object_offset_t
)lastpg
,
360 (memory_object_size_t
)(olastpgend
- lastpg
),
361 MEMORY_OBJECT_RETURN_NONE
, MEMORY_OBJECT_DATA_FLUSH
,
363 if (kret
!= KERN_SUCCESS
)
364 printf("ubc_setsize: invalidate failed (error = %d)\n", kret
);
366 return ((kret
== KERN_SUCCESS
) ? 1 : 0);
369 /* flush the last page */
370 kret
= memory_object_lock_request(control
,
371 (memory_object_offset_t
)lastpg
,
373 MEMORY_OBJECT_RETURN_DIRTY
, FALSE
,
376 if (kret
== KERN_SUCCESS
) {
377 /* invalidate last page and old contents beyond nsize */
378 kret
= memory_object_lock_request(control
,
379 (memory_object_offset_t
)lastpg
,
380 (memory_object_size_t
)(olastpgend
- lastpg
),
381 MEMORY_OBJECT_RETURN_NONE
, MEMORY_OBJECT_DATA_FLUSH
,
383 if (kret
!= KERN_SUCCESS
)
384 printf("ubc_setsize: invalidate failed (error = %d)\n", kret
);
386 printf("ubc_setsize: flush failed (error = %d)\n", kret
);
388 return ((kret
== KERN_SUCCESS
) ? 1 : 0);
392 * Get the size of the file
395 ubc_getsize(struct vnode
*vp
)
397 return (vp
->v_ubcinfo
->ui_size
);
401 * Caller indicate that the object corresponding to the vnode
402 * can not be cached in object cache. Make it so.
403 * returns 1 on success, 0 on failure
406 ubc_uncache(struct vnode
*vp
)
409 struct ubc_info
*uip
;
411 memory_object_control_t control
;
412 memory_object_perf_info_data_t perf
;
414 if (!UBCINFOEXISTS(vp
))
417 if ((recursed
= ubc_busy(vp
)) == 0)
422 assert(uip
!= UBC_INFO_NULL
);
425 * AGE it so that vfree() can make sure that it
426 * would get recycled soon after the last reference is gone
427 * This will insure that .nfs turds would not linger
431 /* set the "do not cache" bit */
432 SET(uip
->ui_flags
, UI_DONTCACHE
);
434 control
= uip
->ui_control
;
437 perf
.cluster_size
= PAGE_SIZE
; /* XXX use real cluster_size. */
438 perf
.may_cache
= FALSE
;
439 kret
= memory_object_change_attributes(control
,
440 MEMORY_OBJECT_PERFORMANCE_INFO
,
441 (memory_object_info_t
) &perf
,
442 MEMORY_OBJECT_PERF_INFO_COUNT
);
444 if (kret
!= KERN_SUCCESS
) {
445 printf("ubc_uncache: memory_object_change_attributes_named "
452 ubc_release_named(vp
);
460 * call ubc_clean() and ubc_uncache() on all the vnodes
461 * for this mount point.
462 * returns 1 on success, 0 on failure
464 __private_extern__
int
465 ubc_umount(struct mount
*mp
)
467 struct proc
*p
= current_proc();
468 struct vnode
*vp
, *nvp
;
472 simple_lock(&mntvnode_slock
);
473 for (vp
= mp
->mnt_vnodelist
.lh_first
; vp
; vp
= nvp
) {
474 if (vp
->v_mount
!= mp
) {
475 simple_unlock(&mntvnode_slock
);
478 nvp
= vp
->v_mntvnodes
.le_next
;
479 simple_unlock(&mntvnode_slock
);
480 if (UBCINFOEXISTS(vp
)) {
483 * Must get a valid reference on the vnode
484 * before callig UBC functions
486 if (vget(vp
, 0, p
)) {
488 simple_lock(&mntvnode_slock
);
489 continue; /* move on to the next vnode */
491 ret
&= ubc_clean(vp
, 0); /* do not invalidate */
492 ret
&= ubc_uncache(vp
);
495 simple_lock(&mntvnode_slock
);
497 simple_unlock(&mntvnode_slock
);
502 * Call ubc_unmount() for all filesystems.
503 * The list is traversed in reverse order
504 * of mounting to avoid dependencies.
506 __private_extern__
void
509 struct mount
*mp
, *nmp
;
512 * Since this only runs when rebooting, it is not interlocked.
514 for (mp
= mountlist
.cqh_last
; mp
!= (void *)&mountlist
; mp
= nmp
) {
515 nmp
= mp
->mnt_list
.cqe_prev
;
516 (void) ubc_umount(mp
);
520 /* Get the credentials */
522 ubc_getcred(struct vnode
*vp
)
524 struct ubc_info
*uip
;
531 return (uip
->ui_ucred
);
535 * Set the credentials
536 * existing credentials are not changed
537 * returns 1 on success and 0 on failure
540 ubc_setcred(struct vnode
*vp
, struct proc
*p
)
542 struct ubc_info
*uip
;
550 credp
= uip
->ui_ucred
;
551 if (credp
== NOCRED
) {
553 uip
->ui_ucred
= p
->p_ucred
;
560 __private_extern__ memory_object_t
561 ubc_getpager(struct vnode
*vp
)
563 struct ubc_info
*uip
;
570 return (uip
->ui_pager
);
574 * Get the memory object associated with this vnode
575 * If the vnode was reactivated, memory object would not exist.
576 * Unless "do not rectivate" was specified, look it up using the pager.
577 * If hold was requested create an object reference of one does not
581 memory_object_control_t
582 ubc_getobject(struct vnode
*vp
, int flags
)
584 struct ubc_info
*uip
;
586 memory_object_control_t control
;
591 if (flags
& UBC_FOR_PAGEOUT
)
592 return(vp
->v_ubcinfo
->ui_control
);
594 if ((recursed
= ubc_busy(vp
)) == 0)
598 control
= uip
->ui_control
;
600 if ((flags
& UBC_HOLDOBJECT
) && (!ISSET(uip
->ui_flags
, UI_HASOBJREF
))) {
603 * Take a temporary reference on the ubc info so that it won't go
604 * away during our recovery attempt.
609 if (memory_object_recover_named(control
, TRUE
) == KERN_SUCCESS
) {
610 SET(uip
->ui_flags
, UI_HASOBJREF
);
612 control
= MEMORY_OBJECT_CONTROL_NULL
;
616 ubc_info_deallocate(uip
);
628 ubc_setpager(struct vnode
*vp
, memory_object_t pager
)
630 struct ubc_info
*uip
;
637 uip
->ui_pager
= pager
;
642 ubc_setflags(struct vnode
* vp
, int flags
)
644 struct ubc_info
*uip
;
651 SET(uip
->ui_flags
, flags
);
657 ubc_clearflags(struct vnode
* vp
, int flags
)
659 struct ubc_info
*uip
;
666 CLR(uip
->ui_flags
, flags
);
673 ubc_issetflags(struct vnode
* vp
, int flags
)
675 struct ubc_info
*uip
;
682 return (ISSET(uip
->ui_flags
, flags
));
686 ubc_blktooff(struct vnode
*vp
, daddr_t blkno
)
694 error
= VOP_BLKTOOFF(vp
, blkno
, &file_offset
);
698 return (file_offset
);
702 ubc_offtoblk(struct vnode
*vp
, off_t offset
)
707 if (UBCINVALID(vp
)) {
708 return ((daddr_t
)-1);
711 error
= VOP_OFFTOBLK(vp
, offset
, &blkno
);
719 * Cause the file data in VM to be pushed out to the storage
720 * it also causes all currently valid pages to be released
721 * returns 1 on success, 0 on failure
724 ubc_clean(struct vnode
*vp
, int invalidate
)
727 struct ubc_info
*uip
;
728 memory_object_control_t control
;
735 if (!UBCINFOEXISTS(vp
))
739 * if invalidate was requested, write dirty data and then discard
743 flags
= (MEMORY_OBJECT_DATA_FLUSH
| MEMORY_OBJECT_DATA_NO_CHANGE
);
746 size
= uip
->ui_size
; /* call ubc_getsize() ??? */
748 control
= uip
->ui_control
;
754 /* Write the dirty data in the file and discard cached pages */
755 kret
= memory_object_lock_request(control
,
756 (memory_object_offset_t
)0,
757 (memory_object_size_t
)round_page_64(size
),
758 MEMORY_OBJECT_RETURN_ALL
, flags
,
761 if (kret
!= KERN_SUCCESS
)
762 printf("ubc_clean: clean failed (error = %d)\n", kret
);
764 return ((kret
== KERN_SUCCESS
) ? 1 : 0);
768 * Cause the file data in VM to be pushed out to the storage
769 * currently valid pages are NOT invalidated
770 * returns 1 on success, 0 on failure
773 ubc_pushdirty(struct vnode
*vp
)
776 struct ubc_info
*uip
;
777 memory_object_control_t control
;
783 if (!UBCINFOEXISTS(vp
))
787 size
= uip
->ui_size
; /* call ubc_getsize() ??? */
789 control
= uip
->ui_control
;
792 vp
->v_flag
&= ~VHASDIRTY
;
795 /* Write the dirty data in the file and discard cached pages */
796 kret
= memory_object_lock_request(control
,
797 (memory_object_offset_t
)0,
798 (memory_object_size_t
)round_page_64(size
),
799 MEMORY_OBJECT_RETURN_DIRTY
, FALSE
,
802 if (kret
!= KERN_SUCCESS
)
803 printf("ubc_pushdirty: flush failed (error = %d)\n", kret
);
805 return ((kret
== KERN_SUCCESS
) ? 1 : 0);
809 * Cause the file data in VM to be pushed out to the storage
810 * currently valid pages are NOT invalidated
811 * returns 1 on success, 0 on failure
814 ubc_pushdirty_range(struct vnode
*vp
, off_t offset
, off_t size
)
816 struct ubc_info
*uip
;
817 memory_object_control_t control
;
823 if (!UBCINFOEXISTS(vp
))
828 control
= uip
->ui_control
;
831 /* Write any dirty pages in the requested range of the file: */
832 kret
= memory_object_lock_request(control
,
833 (memory_object_offset_t
)offset
,
834 (memory_object_size_t
)round_page_64(size
),
835 MEMORY_OBJECT_RETURN_DIRTY
, FALSE
,
838 if (kret
!= KERN_SUCCESS
)
839 printf("ubc_pushdirty_range: flush failed (error = %d)\n", kret
);
841 return ((kret
== KERN_SUCCESS
) ? 1 : 0);
845 * Make sure the vm object does not vanish
846 * returns 1 if the hold count was incremented
847 * returns 0 if the hold count was not incremented
848 * This return value should be used to balance
849 * ubc_hold() and ubc_rele().
852 ubc_hold(struct vnode
*vp
)
854 struct ubc_info
*uip
;
856 memory_object_control_t object
;
864 if (ISSET(vp
->v_flag
, VUINIT
)) {
866 * other thread is not done initializing this
867 * yet, wait till it's done and try again
869 while (ISSET(vp
->v_flag
, VUINIT
)) {
870 SET(vp
->v_flag
, VUWANT
); /* XXX overloaded! */
872 (void) tsleep((caddr_t
)vp
, PINOD
, "ubchold", 0);
880 if ((recursed
= ubc_busy(vp
)) == 0) {
881 /* must be invalid or dying vnode */
882 assert(UBCINVALID(vp
) ||
883 ((vp
->v_flag
& VXLOCK
) || (vp
->v_flag
& VTERMINATE
)));
888 assert(uip
->ui_control
!= MEMORY_OBJECT_CONTROL_NULL
);
894 if (!ISSET(uip
->ui_flags
, UI_HASOBJREF
)) {
895 if (memory_object_recover_named(uip
->ui_control
, TRUE
)
899 ubc_info_deallocate(uip
);
902 SET(uip
->ui_flags
, UI_HASOBJREF
);
907 assert(uip
->ui_refcount
> 0);
913 * Drop the holdcount.
914 * release the reference on the vm object if the this is "uncached"
918 ubc_rele(struct vnode
*vp
)
920 struct ubc_info
*uip
;
925 if (!UBCINFOEXISTS(vp
)) {
926 /* nothing more to do for a dying vnode */
927 if ((vp
->v_flag
& VXLOCK
) || (vp
->v_flag
& VTERMINATE
))
929 panic("ubc_rele: can not");
934 if (uip
->ui_refcount
== 1)
935 panic("ubc_rele: ui_refcount");
939 if ((uip
->ui_refcount
== 1)
940 && ISSET(uip
->ui_flags
, UI_DONTCACHE
))
941 (void) ubc_release_named(vp
);
947 * The vnode is mapped explicitly, mark it so.
949 __private_extern__
void
950 ubc_map(struct vnode
*vp
)
952 struct ubc_info
*uip
;
957 if (!UBCINFOEXISTS(vp
))
963 SET(uip
->ui_flags
, UI_WASMAPPED
);
971 * Release the memory object reference on the vnode
972 * only if it is not in use
973 * Return 1 if the reference was released, 0 otherwise.
976 ubc_release_named(struct vnode
*vp
)
978 struct ubc_info
*uip
;
980 memory_object_control_t control
;
981 kern_return_t kret
= KERN_FAILURE
;
986 if ((recursed
= ubc_busy(vp
)) == 0)
990 /* can not release held or mapped vnodes */
991 if (ISSET(uip
->ui_flags
, UI_HASOBJREF
) &&
992 (uip
->ui_refcount
== 1) && !uip
->ui_mapped
) {
993 control
= uip
->ui_control
;
997 if (vp
->v_flag
& VDELETED
) {
998 ubc_setsize(vp
, (off_t
)0);
1001 CLR(uip
->ui_flags
, UI_HASOBJREF
);
1002 kret
= memory_object_release_name(control
,
1003 MEMORY_OBJECT_RESPECT_CACHE
);
1008 return ((kret
!= KERN_SUCCESS
) ? 0 : 1);
1012 * This function used to called by extensions directly. Some may
1013 * still exist with this behavior. In those cases, we will do the
1014 * release as part of reclaiming or cleaning the vnode. We don't
1015 * need anything explicit - so just stub this out until those callers
1026 * destroy the named reference for a given vnode
1028 __private_extern__
int
1032 memory_object_control_t control
;
1034 struct ubc_info
*uip
;
1038 * We may already have had the object terminated
1039 * and the ubcinfo released as a side effect of
1040 * some earlier processing. If so, pretend we did
1041 * it, because it probably was a result of our
1044 if (!UBCINFOEXISTS(vp
))
1047 uip
= vp
->v_ubcinfo
;
1049 /* can not destroy held vnodes */
1050 if (uip
->ui_refcount
> 1)
1054 * Terminate the memory object.
1055 * memory_object_destroy() will result in
1056 * vnode_pager_no_senders().
1057 * That will release the pager reference
1058 * and the vnode will move to the free list.
1060 control
= ubc_getobject(vp
, UBC_HOLDOBJECT
);
1061 if (control
!= MEMORY_OBJECT_CONTROL_NULL
) {
1063 if (ISSET(vp
->v_flag
, VTERMINATE
))
1064 panic("ubc_destroy_named: already teminating");
1065 SET(vp
->v_flag
, VTERMINATE
);
1067 kret
= memory_object_destroy(control
, 0);
1068 if (kret
!= KERN_SUCCESS
)
1072 * memory_object_destroy() is asynchronous
1073 * with respect to vnode_pager_no_senders().
1074 * wait for vnode_pager_no_senders() to clear
1077 while (ISSET(vp
->v_flag
, VTERMINATE
)) {
1078 SET(vp
->v_flag
, VTERMWANT
);
1079 (void)tsleep((caddr_t
)&vp
->v_ubcinfo
,
1080 PINOD
, "ubc_destroy_named", 0);
1088 * Invalidate a range in the memory object that backs this
1089 * vnode. The offset is truncated to the page boundary and the
1090 * size is adjusted to include the last page in the range.
1093 ubc_invalidate(struct vnode
*vp
, off_t offset
, size_t size
)
1095 struct ubc_info
*uip
;
1096 memory_object_control_t control
;
1104 if (!UBCINFOEXISTS(vp
))
1107 toff
= trunc_page_64(offset
);
1108 tsize
= (size_t)(round_page_64(offset
+size
) - toff
);
1109 uip
= vp
->v_ubcinfo
;
1110 control
= uip
->ui_control
;
1113 /* invalidate pages in the range requested */
1114 kret
= memory_object_lock_request(control
,
1115 (memory_object_offset_t
)toff
,
1116 (memory_object_size_t
)tsize
,
1117 MEMORY_OBJECT_RETURN_NONE
,
1118 (MEMORY_OBJECT_DATA_NO_CHANGE
| MEMORY_OBJECT_DATA_FLUSH
),
1120 if (kret
!= KERN_SUCCESS
)
1121 printf("ubc_invalidate: invalidate failed (error = %d)\n", kret
);
1123 return ((kret
== KERN_SUCCESS
) ? 1 : 0);
1127 * Find out whether a vnode is in use by UBC
1128 * Returns 1 if file is in use by UBC, 0 if not
1131 ubc_isinuse(struct vnode
*vp
, int busycount
)
1133 if (!UBCINFOEXISTS(vp
))
1136 if (busycount
== 0) {
1137 printf("ubc_isinuse: called without a valid reference"
1138 ": v_tag = %d\v", vp
->v_tag
);
1139 vprint("ubc_isinuse", vp
);
1143 if (vp
->v_usecount
> busycount
+1)
1146 if ((vp
->v_usecount
== busycount
+1)
1147 && (vp
->v_ubcinfo
->ui_mapped
== 1))
1154 * The backdoor routine to clear the ui_mapped.
1155 * MUST only be called by the VM
1157 * Note that this routine is not called under funnel. There are numerous
1158 * things about the calling sequence that make this work on SMP.
1159 * Any code change in those paths can break this.
1162 __private_extern__
void
1163 ubc_unmap(struct vnode
*vp
)
1165 struct ubc_info
*uip
;
1166 boolean_t funnel_state
;
1171 if (!UBCINFOEXISTS(vp
))
1175 uip
= vp
->v_ubcinfo
;
1177 if ((uip
->ui_refcount
> 1) || !ISSET(uip
->ui_flags
, UI_DONTCACHE
)) {
1183 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
1184 (void) ubc_release_named(vp
);
1185 (void) thread_funnel_set(kernel_flock
, funnel_state
);
1193 ppnum_t
*phys_entryp
,
1196 memory_object_control_t control
;
1198 control
= ubc_getobject(vp
, UBC_FLAGS_NONE
);
1199 if (control
== MEMORY_OBJECT_CONTROL_NULL
)
1200 return KERN_INVALID_ARGUMENT
;
1202 return (memory_object_page_op(control
,
1203 (memory_object_offset_t
)f_offset
,
1209 __private_extern__ kern_return_t
1210 ubc_page_op_with_control(
1211 memory_object_control_t control
,
1214 ppnum_t
*phys_entryp
,
1217 return (memory_object_page_op(control
,
1218 (memory_object_offset_t
)f_offset
,
1232 memory_object_control_t control
;
1234 control
= ubc_getobject(vp
, UBC_FLAGS_NONE
);
1235 if (control
== MEMORY_OBJECT_CONTROL_NULL
)
1236 return KERN_INVALID_ARGUMENT
;
1238 return (memory_object_range_op(control
,
1239 (memory_object_offset_t
)f_offset_beg
,
1240 (memory_object_offset_t
)f_offset_end
,
1251 upl_page_info_t
**plp
,
1254 memory_object_control_t control
;
1260 if (bufsize
& 0xfff)
1261 return KERN_INVALID_ARGUMENT
;
1263 if (uplflags
& UPL_FOR_PAGEOUT
) {
1264 uplflags
&= ~UPL_FOR_PAGEOUT
;
1265 ubcflags
= UBC_FOR_PAGEOUT
;
1267 ubcflags
= UBC_FLAGS_NONE
;
1269 control
= ubc_getobject(vp
, ubcflags
);
1270 if (control
== MEMORY_OBJECT_CONTROL_NULL
)
1271 return KERN_INVALID_ARGUMENT
;
1273 if (uplflags
& UPL_WILL_BE_DUMPED
) {
1274 uplflags
&= ~UPL_WILL_BE_DUMPED
;
1275 uplflags
|= (UPL_NO_SYNC
|UPL_SET_INTERNAL
);
1277 uplflags
|= (UPL_NO_SYNC
|UPL_CLEAN_IN_PLACE
|UPL_SET_INTERNAL
);
1279 kr
= memory_object_upl_request(control
, f_offset
, bufsize
,
1280 uplp
, NULL
, &count
, uplflags
);
1282 *plp
= UPL_GET_INTERNAL_PAGE_LIST(*uplp
);
1290 vm_offset_t
*dst_addr
)
1292 return (vm_upl_map(kernel_map
, upl
, dst_addr
));
1300 return(vm_upl_unmap(kernel_map
, upl
));
1307 upl_page_info_t
*pl
;
1310 pl
= UPL_GET_INTERNAL_PAGE_LIST(upl
);
1311 kr
= upl_commit(upl
, pl
, MAX_UPL_TRANSFER
);
1312 upl_deallocate(upl
);
1318 ubc_upl_commit_range(
1324 upl_page_info_t
*pl
;
1328 if (flags
& UPL_COMMIT_FREE_ON_EMPTY
)
1329 flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
1331 pl
= UPL_GET_INTERNAL_PAGE_LIST(upl
);
1333 kr
= upl_commit_range(upl
, offset
, size
, flags
,
1334 pl
, MAX_UPL_TRANSFER
, &empty
);
1336 if((flags
& UPL_COMMIT_FREE_ON_EMPTY
) && empty
)
1337 upl_deallocate(upl
);
1343 ubc_upl_abort_range(
1350 boolean_t empty
= FALSE
;
1352 if (abort_flags
& UPL_ABORT_FREE_ON_EMPTY
)
1353 abort_flags
|= UPL_ABORT_NOTIFY_EMPTY
;
1355 kr
= upl_abort_range(upl
, offset
, size
, abort_flags
, &empty
);
1357 if((abort_flags
& UPL_ABORT_FREE_ON_EMPTY
) && empty
)
1358 upl_deallocate(upl
);
1370 kr
= upl_abort(upl
, abort_type
);
1371 upl_deallocate(upl
);
1379 return (UPL_GET_INTERNAL_PAGE_LIST(upl
));