2 * Copyright (c) 1999-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
24 * Author: Umesh Vaishampayan [umeshv@apple.com]
25 * 05-Aug-1999 umeshv Created.
27 * Functions related to Unified Buffer cache.
29 * Caller of UBC functions MUST have a valid reference on the vnode.
36 #include <sys/types.h>
37 #include <sys/param.h>
38 #include <sys/systm.h>
41 #include <sys/mount_internal.h>
42 #include <sys/vnode_internal.h>
43 #include <sys/ubc_internal.h>
44 #include <sys/ucred.h>
45 #include <sys/proc_internal.h>
46 #include <sys/kauth.h>
49 #include <mach/mach_types.h>
50 #include <mach/memory_object_types.h>
51 #include <mach/memory_object_control.h>
52 #include <mach/vm_map.h>
55 #include <kern/kern_types.h>
56 #include <kern/zalloc.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_protos.h> /* last */
64 #define assert(cond) \
65 ((void) ((cond) ? 0 : panic("%s:%d (%s)", __FILE__, __LINE__, # cond)))
67 #include <kern/assert.h>
68 #endif /* DIAGNOSTIC */
70 int ubc_info_init_internal(struct vnode
*vp
, int withfsize
, off_t filesize
);
71 int ubc_umcallback(vnode_t
, void *);
72 int ubc_isinuse_locked(vnode_t
, int, int);
73 int ubc_msync_internal(vnode_t
, off_t
, off_t
, off_t
*, int, int *);
75 struct zone
*ubc_info_zone
;
78 * Initialization of the zone for Unified Buffer Cache.
80 __private_extern__
void
85 i
= (vm_size_t
) sizeof (struct ubc_info
);
86 /* XXX the number of elements should be tied in to maxvnodes */
87 ubc_info_zone
= zinit (i
, 10000*i
, 8192, "ubc_info zone");
92 * Initialize a ubc_info structure for a vnode.
95 ubc_info_init(struct vnode
*vp
)
97 return(ubc_info_init_internal(vp
, 0, 0));
100 ubc_info_init_withsize(struct vnode
*vp
, off_t filesize
)
102 return(ubc_info_init_internal(vp
, 1, filesize
));
106 ubc_info_init_internal(struct vnode
*vp
, int withfsize
, off_t filesize
)
108 register struct ubc_info
*uip
;
110 struct proc
*p
= current_proc();
113 memory_object_control_t control
;
117 if (uip
== UBC_INFO_NULL
) {
119 uip
= (struct ubc_info
*) zalloc(ubc_info_zone
);
120 bzero((char *)uip
, sizeof(struct ubc_info
));
123 uip
->ui_flags
= UI_INITED
;
124 uip
->ui_ucred
= NOCRED
;
128 Debugger("ubc_info_init: already");
129 #endif /* DIAGNOSTIC */
131 assert(uip
->ui_flags
!= UI_NONE
);
132 assert(uip
->ui_vnode
== vp
);
134 /* now set this ubc_info in the vnode */
137 pager
= (void *)vnode_pager_setup(vp
, uip
->ui_pager
);
140 SET(uip
->ui_flags
, UI_HASPAGER
);
141 uip
->ui_pager
= pager
;
144 * Note: We can not use VNOP_GETATTR() to get accurate
145 * value of ui_size. Thanks to NFS.
146 * nfs_getattr() can call vinvalbuf() and in this case
147 * ubc_info is not set up to deal with that.
152 * create a vnode - vm_object association
153 * memory_object_create_named() creates a "named" reference on the
154 * memory object we hold this reference as long as the vnode is
155 * "alive." Since memory_object_create_named() took its own reference
156 * on the vnode pager we passed it, we can drop the reference
157 * vnode_pager_setup() returned here.
159 kret
= memory_object_create_named(pager
,
160 (memory_object_size_t
)uip
->ui_size
, &control
);
161 vnode_pager_deallocate(pager
);
162 if (kret
!= KERN_SUCCESS
)
163 panic("ubc_info_init: memory_object_create_named returned %d", kret
);
166 uip
->ui_control
= control
; /* cache the value of the mo control */
167 SET(uip
->ui_flags
, UI_HASOBJREF
); /* with a named reference */
169 /* create a pager reference on the vnode */
170 error
= vnode_pager_vget(vp
);
172 panic("ubc_info_init: vnode_pager_vget error = %d", error
);
174 if (withfsize
== 0) {
175 struct vfs_context context
;
176 /* initialize the size */
178 context
.vc_ucred
= kauth_cred_get();
179 error
= vnode_size(vp
, &uip
->ui_size
, &context
);
183 uip
->ui_size
= filesize
;
185 vp
->v_lflag
|= VNAMED_UBC
;
190 /* Free the ubc_info */
192 ubc_info_free(struct ubc_info
*uip
)
196 credp
= uip
->ui_ucred
;
197 if (credp
!= NOCRED
) {
198 uip
->ui_ucred
= NOCRED
;
199 kauth_cred_rele(credp
);
202 if (uip
->ui_control
!= MEMORY_OBJECT_CONTROL_NULL
)
203 memory_object_control_deallocate(uip
->ui_control
);
205 cluster_release(uip
);
207 zfree(ubc_info_zone
, (vm_offset_t
)uip
);
212 ubc_info_deallocate(struct ubc_info
*uip
)
218 * Communicate with VM the size change of the file
219 * returns 1 on success, 0 on failure
222 ubc_setsize(struct vnode
*vp
, off_t nsize
)
224 off_t osize
; /* ui_size before change */
225 off_t lastpg
, olastpgend
, lastoff
;
226 struct ubc_info
*uip
;
227 memory_object_control_t control
;
230 if (nsize
< (off_t
)0)
233 if (!UBCINFOEXISTS(vp
))
237 osize
= uip
->ui_size
; /* call ubc_getsize() ??? */
238 /* Update the size before flushing the VM */
239 uip
->ui_size
= nsize
;
241 if (nsize
>= osize
) /* Nothing more to do */
242 return (1); /* return success */
245 * When the file shrinks, invalidate the pages beyond the
246 * new size. Also get rid of garbage beyond nsize on the
247 * last page. The ui_size already has the nsize. This
248 * insures that the pageout would not write beyond the new
252 lastpg
= trunc_page_64(nsize
);
253 olastpgend
= round_page_64(osize
);
254 control
= uip
->ui_control
;
256 lastoff
= (nsize
& PAGE_MASK_64
);
259 * If length is multiple of page size, we should not flush
260 * invalidating is sufficient
263 /* invalidate last page and old contents beyond nsize */
264 kret
= memory_object_lock_request(control
,
265 (memory_object_offset_t
)lastpg
,
266 (memory_object_size_t
)(olastpgend
- lastpg
), NULL
, NULL
,
267 MEMORY_OBJECT_RETURN_NONE
, MEMORY_OBJECT_DATA_FLUSH
,
269 if (kret
!= KERN_SUCCESS
)
270 printf("ubc_setsize: invalidate failed (error = %d)\n", kret
);
272 return ((kret
== KERN_SUCCESS
) ? 1 : 0);
275 /* flush the last page */
276 kret
= memory_object_lock_request(control
,
277 (memory_object_offset_t
)lastpg
,
278 PAGE_SIZE_64
, NULL
, NULL
,
279 MEMORY_OBJECT_RETURN_DIRTY
, FALSE
,
282 if (kret
== KERN_SUCCESS
) {
283 /* invalidate last page and old contents beyond nsize */
284 kret
= memory_object_lock_request(control
,
285 (memory_object_offset_t
)lastpg
,
286 (memory_object_size_t
)(olastpgend
- lastpg
), NULL
, NULL
,
287 MEMORY_OBJECT_RETURN_NONE
, MEMORY_OBJECT_DATA_FLUSH
,
289 if (kret
!= KERN_SUCCESS
)
290 printf("ubc_setsize: invalidate failed (error = %d)\n", kret
);
292 printf("ubc_setsize: flush failed (error = %d)\n", kret
);
294 return ((kret
== KERN_SUCCESS
) ? 1 : 0);
298 * Get the size of the file
301 ubc_getsize(struct vnode
*vp
)
303 /* people depend on the side effect of this working this way
304 * as they call this for directory
306 if (!UBCINFOEXISTS(vp
))
308 return (vp
->v_ubcinfo
->ui_size
);
312 * call ubc_sync_range(vp, 0, EOF, UBC_PUSHALL) on all the vnodes
313 * for this mount point.
314 * returns 1 on success, 0 on failure
317 __private_extern__
int
318 ubc_umount(struct mount
*mp
)
320 vnode_iterate(mp
, 0, ubc_umcallback
, 0);
325 ubc_umcallback(vnode_t vp
, __unused
void * args
)
328 if (UBCINFOEXISTS(vp
)) {
332 (void) ubc_msync(vp
, (off_t
)0, ubc_getsize(vp
), NULL
, UBC_PUSHALL
);
334 return (VNODE_RETURNED
);
339 /* Get the credentials */
341 ubc_getcred(struct vnode
*vp
)
343 if (UBCINFOEXISTS(vp
))
344 return (vp
->v_ubcinfo
->ui_ucred
);
350 * Set the credentials
351 * existing credentials are not changed
352 * returns 1 on success and 0 on failure
355 ubc_setcred(struct vnode
*vp
, struct proc
*p
)
357 struct ubc_info
*uip
;
360 if ( !UBCINFOEXISTS(vp
))
366 credp
= uip
->ui_ucred
;
368 if (credp
== NOCRED
) {
369 uip
->ui_ucred
= kauth_cred_proc_ref(p
);
377 __private_extern__ memory_object_t
378 ubc_getpager(struct vnode
*vp
)
380 if (UBCINFOEXISTS(vp
))
381 return (vp
->v_ubcinfo
->ui_pager
);
387 * Get the memory object associated with this vnode
388 * If the vnode was reactivated, memory object would not exist.
389 * Unless "do not rectivate" was specified, look it up using the pager.
390 * If hold was requested create an object reference of one does not
394 memory_object_control_t
395 ubc_getobject(struct vnode
*vp
, __unused
int flags
)
397 if (UBCINFOEXISTS(vp
))
398 return((vp
->v_ubcinfo
->ui_control
));
405 ubc_blktooff(vnode_t vp
, daddr64_t blkno
)
413 error
= VNOP_BLKTOOFF(vp
, blkno
, &file_offset
);
417 return (file_offset
);
421 ubc_offtoblk(vnode_t vp
, off_t offset
)
427 return ((daddr64_t
)-1);
429 error
= VNOP_OFFTOBLK(vp
, offset
, &blkno
);
437 ubc_pages_resident(vnode_t vp
)
440 boolean_t has_pages_resident
;
442 if ( !UBCINFOEXISTS(vp
))
445 kret
= memory_object_pages_resident(vp
->v_ubcinfo
->ui_control
, &has_pages_resident
);
447 if (kret
!= KERN_SUCCESS
)
450 if (has_pages_resident
== TRUE
)
459 * This interface will eventually be deprecated
461 * clean and/or invalidate a range in the memory object that backs this
462 * vnode. The start offset is truncated to the page boundary and the
463 * size is adjusted to include the last page in the range.
465 * returns 1 for success, 0 for failure
468 ubc_sync_range(vnode_t vp
, off_t beg_off
, off_t end_off
, int flags
)
470 return (ubc_msync_internal(vp
, beg_off
, end_off
, NULL
, flags
, NULL
));
475 * clean and/or invalidate a range in the memory object that backs this
476 * vnode. The start offset is truncated to the page boundary and the
477 * size is adjusted to include the last page in the range.
481 ubc_msync(vnode_t vp
, off_t beg_off
, off_t end_off
, off_t
*resid_off
, int flags
)
487 *resid_off
= beg_off
;
489 retval
= ubc_msync_internal(vp
, beg_off
, end_off
, resid_off
, flags
, &io_errno
);
491 if (retval
== 0 && io_errno
== 0)
499 * clean and/or invalidate a range in the memory object that backs this
500 * vnode. The start offset is truncated to the page boundary and the
501 * size is adjusted to include the last page in the range.
504 ubc_msync_internal(vnode_t vp
, off_t beg_off
, off_t end_off
, off_t
*resid_off
, int flags
, int *io_errno
)
506 memory_object_size_t tsize
;
508 int request_flags
= 0;
509 int flush_flags
= MEMORY_OBJECT_RETURN_NONE
;
511 if ( !UBCINFOEXISTS(vp
))
513 if (end_off
<= beg_off
)
515 if ((flags
& (UBC_INVALIDATE
| UBC_PUSHDIRTY
| UBC_PUSHALL
)) == 0)
518 if (flags
& UBC_INVALIDATE
)
520 * discard the resident pages
522 request_flags
= (MEMORY_OBJECT_DATA_FLUSH
| MEMORY_OBJECT_DATA_NO_CHANGE
);
524 if (flags
& UBC_SYNC
)
526 * wait for all the I/O to complete before returning
528 request_flags
|= MEMORY_OBJECT_IO_SYNC
;
530 if (flags
& UBC_PUSHDIRTY
)
532 * we only return the dirty pages in the range
534 flush_flags
= MEMORY_OBJECT_RETURN_DIRTY
;
536 if (flags
& UBC_PUSHALL
)
538 * then return all the interesting pages in the range (both dirty and precious)
541 flush_flags
= MEMORY_OBJECT_RETURN_ALL
;
543 beg_off
= trunc_page_64(beg_off
);
544 end_off
= round_page_64(end_off
);
545 tsize
= (memory_object_size_t
)end_off
- beg_off
;
547 /* flush and/or invalidate pages in the range requested */
548 kret
= memory_object_lock_request(vp
->v_ubcinfo
->ui_control
,
549 beg_off
, tsize
, resid_off
, io_errno
,
550 flush_flags
, request_flags
, VM_PROT_NO_CHANGE
);
552 return ((kret
== KERN_SUCCESS
) ? 1 : 0);
557 * The vnode is mapped explicitly, mark it so.
559 __private_extern__
int
560 ubc_map(vnode_t vp
, int flags
)
562 struct ubc_info
*uip
;
565 struct vfs_context context
;
567 if (vnode_getwithref(vp
))
570 if (UBCINFOEXISTS(vp
)) {
571 context
.vc_proc
= current_proc();
572 context
.vc_ucred
= kauth_cred_get();
574 error
= VNOP_MMAP(vp
, flags
, &context
);
584 if ( !ISSET(uip
->ui_flags
, UI_ISMAPPED
))
586 SET(uip
->ui_flags
, (UI_WASMAPPED
| UI_ISMAPPED
));
600 * destroy the named reference for a given vnode
602 __private_extern__
int
603 ubc_destroy_named(struct vnode
*vp
)
605 memory_object_control_t control
;
606 struct ubc_info
*uip
;
610 * We may already have had the object terminated
611 * and the ubcinfo released as a side effect of
612 * some earlier processing. If so, pretend we did
613 * it, because it probably was a result of our
616 if (!UBCINFOEXISTS(vp
))
622 * Terminate the memory object.
623 * memory_object_destroy() will result in
624 * vnode_pager_no_senders().
625 * That will release the pager reference
626 * and the vnode will move to the free list.
628 control
= ubc_getobject(vp
, UBC_HOLDOBJECT
);
629 if (control
!= MEMORY_OBJECT_CONTROL_NULL
) {
632 * XXXXX - should we hold the vnode lock here?
634 if (ISSET(vp
->v_flag
, VTERMINATE
))
635 panic("ubc_destroy_named: already teminating");
636 SET(vp
->v_flag
, VTERMINATE
);
638 kret
= memory_object_destroy(control
, 0);
639 if (kret
!= KERN_SUCCESS
)
643 * memory_object_destroy() is asynchronous
644 * with respect to vnode_pager_no_senders().
645 * wait for vnode_pager_no_senders() to clear
649 while (ISSET(vp
->v_lflag
, VNAMED_UBC
)) {
650 (void)msleep((caddr_t
)&vp
->v_lflag
, &vp
->v_lock
,
651 PINOD
, "ubc_destroy_named", 0);
660 * Find out whether a vnode is in use by UBC
661 * Returns 1 if file is in use by UBC, 0 if not
664 ubc_isinuse(struct vnode
*vp
, int busycount
)
666 if ( !UBCINFOEXISTS(vp
))
668 return(ubc_isinuse_locked(vp
, busycount
, 0));
673 ubc_isinuse_locked(struct vnode
*vp
, int busycount
, int locked
)
681 if ((vp
->v_usecount
- vp
->v_kusecount
) > busycount
)
691 * MUST only be called by the VM
693 __private_extern__
void
694 ubc_unmap(struct vnode
*vp
)
696 struct vfs_context context
;
697 struct ubc_info
*uip
;
700 if (vnode_getwithref(vp
))
703 if (UBCINFOEXISTS(vp
)) {
707 if (ISSET(uip
->ui_flags
, UI_ISMAPPED
)) {
708 CLR(uip
->ui_flags
, UI_ISMAPPED
);
714 context
.vc_proc
= current_proc();
715 context
.vc_ucred
= kauth_cred_get();
716 (void)VNOP_MNOMAP(vp
, &context
);
722 * the drop of the vnode ref will cleanup
732 ppnum_t
*phys_entryp
,
735 memory_object_control_t control
;
737 control
= ubc_getobject(vp
, UBC_FLAGS_NONE
);
738 if (control
== MEMORY_OBJECT_CONTROL_NULL
)
739 return KERN_INVALID_ARGUMENT
;
741 return (memory_object_page_op(control
,
742 (memory_object_offset_t
)f_offset
,
748 __private_extern__ kern_return_t
749 ubc_page_op_with_control(
750 memory_object_control_t control
,
753 ppnum_t
*phys_entryp
,
756 return (memory_object_page_op(control
,
757 (memory_object_offset_t
)f_offset
,
771 memory_object_control_t control
;
773 control
= ubc_getobject(vp
, UBC_FLAGS_NONE
);
774 if (control
== MEMORY_OBJECT_CONTROL_NULL
)
775 return KERN_INVALID_ARGUMENT
;
777 return (memory_object_range_op(control
,
778 (memory_object_offset_t
)f_offset_beg
,
779 (memory_object_offset_t
)f_offset_end
,
790 upl_page_info_t
**plp
,
793 memory_object_control_t control
;
799 return KERN_INVALID_ARGUMENT
;
801 if (uplflags
& UPL_FOR_PAGEOUT
) {
802 uplflags
&= ~UPL_FOR_PAGEOUT
;
803 ubcflags
= UBC_FOR_PAGEOUT
;
805 ubcflags
= UBC_FLAGS_NONE
;
807 control
= ubc_getobject(vp
, ubcflags
);
808 if (control
== MEMORY_OBJECT_CONTROL_NULL
)
809 return KERN_INVALID_ARGUMENT
;
811 if (uplflags
& UPL_WILL_BE_DUMPED
) {
812 uplflags
&= ~UPL_WILL_BE_DUMPED
;
813 uplflags
|= (UPL_NO_SYNC
|UPL_SET_INTERNAL
);
815 uplflags
|= (UPL_NO_SYNC
|UPL_CLEAN_IN_PLACE
|UPL_SET_INTERNAL
);
817 kr
= memory_object_upl_request(control
, f_offset
, bufsize
,
818 uplp
, NULL
, &count
, uplflags
);
820 *plp
= UPL_GET_INTERNAL_PAGE_LIST(*uplp
);
828 vm_offset_t
*dst_addr
)
830 return (vm_upl_map(kernel_map
, upl
, dst_addr
));
838 return(vm_upl_unmap(kernel_map
, upl
));
848 pl
= UPL_GET_INTERNAL_PAGE_LIST(upl
);
849 kr
= upl_commit(upl
, pl
, MAX_UPL_TRANSFER
);
856 ubc_upl_commit_range(
866 if (flags
& UPL_COMMIT_FREE_ON_EMPTY
)
867 flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
869 pl
= UPL_GET_INTERNAL_PAGE_LIST(upl
);
871 kr
= upl_commit_range(upl
, offset
, size
, flags
,
872 pl
, MAX_UPL_TRANSFER
, &empty
);
874 if((flags
& UPL_COMMIT_FREE_ON_EMPTY
) && empty
)
888 boolean_t empty
= FALSE
;
890 if (abort_flags
& UPL_ABORT_FREE_ON_EMPTY
)
891 abort_flags
|= UPL_ABORT_NOTIFY_EMPTY
;
893 kr
= upl_abort_range(upl
, offset
, size
, abort_flags
, &empty
);
895 if((abort_flags
& UPL_ABORT_FREE_ON_EMPTY
) && empty
)
908 kr
= upl_abort(upl
, abort_type
);
917 return (UPL_GET_INTERNAL_PAGE_LIST(upl
));
920 /************* UBC APIS **************/
923 UBCINFOMISSING(struct vnode
* vp
)
925 return((vp
) && ((vp
)->v_type
== VREG
) && ((vp
)->v_ubcinfo
== UBC_INFO_NULL
));
929 UBCINFORECLAIMED(struct vnode
* vp
)
931 return((vp
) && ((vp
)->v_type
== VREG
) && ((vp
)->v_ubcinfo
== UBC_INFO_NULL
));
936 UBCINFOEXISTS(struct vnode
* vp
)
938 return((vp
) && ((vp
)->v_type
== VREG
) && ((vp
)->v_ubcinfo
!= UBC_INFO_NULL
));
941 UBCISVALID(struct vnode
* vp
)
943 return((vp
) && ((vp
)->v_type
== VREG
) && !((vp
)->v_flag
& VSYSTEM
));
946 UBCINVALID(struct vnode
* vp
)
948 return(((vp
) == NULL
) || ((vp
) && ((vp
)->v_type
!= VREG
))
949 || ((vp
) && ((vp
)->v_flag
& VSYSTEM
)));
952 UBCINFOCHECK(const char * fun
, struct vnode
* vp
)
954 if ((vp
) && ((vp
)->v_type
== VREG
) &&
955 ((vp
)->v_ubcinfo
== UBC_INFO_NULL
)) {
956 panic("%s: lost ubc_info", (fun
));