2 * Copyright (c) 1999-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
25 * Author: Umesh Vaishampayan [umeshv@apple.com]
26 * 05-Aug-1999 umeshv Created.
28 * Functions related to Unified Buffer cache.
30 * Caller of UBC functions MUST have a valid reference on the vnode.
37 #include <sys/types.h>
38 #include <sys/param.h>
39 #include <sys/systm.h>
42 #include <sys/mount_internal.h>
43 #include <sys/vnode_internal.h>
44 #include <sys/ubc_internal.h>
45 #include <sys/ucred.h>
46 #include <sys/proc_internal.h>
47 #include <sys/kauth.h>
50 #include <mach/mach_types.h>
51 #include <mach/memory_object_types.h>
52 #include <mach/memory_object_control.h>
53 #include <mach/vm_map.h>
56 #include <kern/kern_types.h>
57 #include <kern/zalloc.h>
58 #include <vm/vm_kern.h>
59 #include <vm/vm_protos.h> /* last */
65 #define assert(cond) \
66 ((void) ((cond) ? 0 : panic("%s:%d (%s)", __FILE__, __LINE__, # cond)))
68 #include <kern/assert.h>
69 #endif /* DIAGNOSTIC */
71 int ubc_info_init_internal(struct vnode
*vp
, int withfsize
, off_t filesize
);
72 int ubc_umcallback(vnode_t
, void *);
73 int ubc_isinuse_locked(vnode_t
, int, int);
74 int ubc_msync_internal(vnode_t
, off_t
, off_t
, off_t
*, int, int *);
76 struct zone
*ubc_info_zone
;
79 * Initialization of the zone for Unified Buffer Cache.
81 __private_extern__
void
86 i
= (vm_size_t
) sizeof (struct ubc_info
);
87 /* XXX the number of elements should be tied in to maxvnodes */
88 ubc_info_zone
= zinit (i
, 10000*i
, 8192, "ubc_info zone");
93 * Initialize a ubc_info structure for a vnode.
96 ubc_info_init(struct vnode
*vp
)
98 return(ubc_info_init_internal(vp
, 0, 0));
101 ubc_info_init_withsize(struct vnode
*vp
, off_t filesize
)
103 return(ubc_info_init_internal(vp
, 1, filesize
));
107 ubc_info_init_internal(struct vnode
*vp
, int withfsize
, off_t filesize
)
109 register struct ubc_info
*uip
;
111 struct proc
*p
= current_proc();
114 memory_object_control_t control
;
118 if (uip
== UBC_INFO_NULL
) {
120 uip
= (struct ubc_info
*) zalloc(ubc_info_zone
);
121 bzero((char *)uip
, sizeof(struct ubc_info
));
124 uip
->ui_flags
= UI_INITED
;
125 uip
->ui_ucred
= NOCRED
;
129 Debugger("ubc_info_init: already");
130 #endif /* DIAGNOSTIC */
132 assert(uip
->ui_flags
!= UI_NONE
);
133 assert(uip
->ui_vnode
== vp
);
135 /* now set this ubc_info in the vnode */
138 pager
= (void *)vnode_pager_setup(vp
, uip
->ui_pager
);
141 SET(uip
->ui_flags
, UI_HASPAGER
);
142 uip
->ui_pager
= pager
;
145 * Note: We can not use VNOP_GETATTR() to get accurate
146 * value of ui_size. Thanks to NFS.
147 * nfs_getattr() can call vinvalbuf() and in this case
148 * ubc_info is not set up to deal with that.
153 * create a vnode - vm_object association
154 * memory_object_create_named() creates a "named" reference on the
155 * memory object we hold this reference as long as the vnode is
156 * "alive." Since memory_object_create_named() took its own reference
157 * on the vnode pager we passed it, we can drop the reference
158 * vnode_pager_setup() returned here.
160 kret
= memory_object_create_named(pager
,
161 (memory_object_size_t
)uip
->ui_size
, &control
);
162 vnode_pager_deallocate(pager
);
163 if (kret
!= KERN_SUCCESS
)
164 panic("ubc_info_init: memory_object_create_named returned %d", kret
);
167 uip
->ui_control
= control
; /* cache the value of the mo control */
168 SET(uip
->ui_flags
, UI_HASOBJREF
); /* with a named reference */
170 /* create a pager reference on the vnode */
171 error
= vnode_pager_vget(vp
);
173 panic("ubc_info_init: vnode_pager_vget error = %d", error
);
175 if (withfsize
== 0) {
176 struct vfs_context context
;
177 /* initialize the size */
179 context
.vc_ucred
= kauth_cred_get();
180 error
= vnode_size(vp
, &uip
->ui_size
, &context
);
184 uip
->ui_size
= filesize
;
186 vp
->v_lflag
|= VNAMED_UBC
;
191 /* Free the ubc_info */
193 ubc_info_free(struct ubc_info
*uip
)
197 credp
= uip
->ui_ucred
;
198 if (credp
!= NOCRED
) {
199 uip
->ui_ucred
= NOCRED
;
200 kauth_cred_rele(credp
);
203 if (uip
->ui_control
!= MEMORY_OBJECT_CONTROL_NULL
)
204 memory_object_control_deallocate(uip
->ui_control
);
206 cluster_release(uip
);
208 zfree(ubc_info_zone
, (vm_offset_t
)uip
);
213 ubc_info_deallocate(struct ubc_info
*uip
)
219 * Communicate with VM the size change of the file
220 * returns 1 on success, 0 on failure
223 ubc_setsize(struct vnode
*vp
, off_t nsize
)
225 off_t osize
; /* ui_size before change */
226 off_t lastpg
, olastpgend
, lastoff
;
227 struct ubc_info
*uip
;
228 memory_object_control_t control
;
231 if (nsize
< (off_t
)0)
234 if (!UBCINFOEXISTS(vp
))
238 osize
= uip
->ui_size
; /* call ubc_getsize() ??? */
239 /* Update the size before flushing the VM */
240 uip
->ui_size
= nsize
;
242 if (nsize
>= osize
) /* Nothing more to do */
243 return (1); /* return success */
246 * When the file shrinks, invalidate the pages beyond the
247 * new size. Also get rid of garbage beyond nsize on the
248 * last page. The ui_size already has the nsize. This
249 * insures that the pageout would not write beyond the new
253 lastpg
= trunc_page_64(nsize
);
254 olastpgend
= round_page_64(osize
);
255 control
= uip
->ui_control
;
257 lastoff
= (nsize
& PAGE_MASK_64
);
260 * If length is multiple of page size, we should not flush
261 * invalidating is sufficient
264 /* invalidate last page and old contents beyond nsize */
265 kret
= memory_object_lock_request(control
,
266 (memory_object_offset_t
)lastpg
,
267 (memory_object_size_t
)(olastpgend
- lastpg
), NULL
, NULL
,
268 MEMORY_OBJECT_RETURN_NONE
, MEMORY_OBJECT_DATA_FLUSH
,
270 if (kret
!= KERN_SUCCESS
)
271 printf("ubc_setsize: invalidate failed (error = %d)\n", kret
);
273 return ((kret
== KERN_SUCCESS
) ? 1 : 0);
276 /* flush the last page */
277 kret
= memory_object_lock_request(control
,
278 (memory_object_offset_t
)lastpg
,
279 PAGE_SIZE_64
, NULL
, NULL
,
280 MEMORY_OBJECT_RETURN_DIRTY
, FALSE
,
283 if (kret
== KERN_SUCCESS
) {
284 /* invalidate last page and old contents beyond nsize */
285 kret
= memory_object_lock_request(control
,
286 (memory_object_offset_t
)lastpg
,
287 (memory_object_size_t
)(olastpgend
- lastpg
), NULL
, NULL
,
288 MEMORY_OBJECT_RETURN_NONE
, MEMORY_OBJECT_DATA_FLUSH
,
290 if (kret
!= KERN_SUCCESS
)
291 printf("ubc_setsize: invalidate failed (error = %d)\n", kret
);
293 printf("ubc_setsize: flush failed (error = %d)\n", kret
);
295 return ((kret
== KERN_SUCCESS
) ? 1 : 0);
299 * Get the size of the file
302 ubc_getsize(struct vnode
*vp
)
304 /* people depend on the side effect of this working this way
305 * as they call this for directory
307 if (!UBCINFOEXISTS(vp
))
309 return (vp
->v_ubcinfo
->ui_size
);
313 * call ubc_sync_range(vp, 0, EOF, UBC_PUSHALL) on all the vnodes
314 * for this mount point.
315 * returns 1 on success, 0 on failure
318 __private_extern__
int
319 ubc_umount(struct mount
*mp
)
321 vnode_iterate(mp
, 0, ubc_umcallback
, 0);
326 ubc_umcallback(vnode_t vp
, __unused
void * args
)
329 if (UBCINFOEXISTS(vp
)) {
333 (void) ubc_msync(vp
, (off_t
)0, ubc_getsize(vp
), NULL
, UBC_PUSHALL
);
335 return (VNODE_RETURNED
);
340 /* Get the credentials */
342 ubc_getcred(struct vnode
*vp
)
344 if (UBCINFOEXISTS(vp
))
345 return (vp
->v_ubcinfo
->ui_ucred
);
351 * Set the credentials
352 * existing credentials are not changed
353 * returns 1 on success and 0 on failure
356 ubc_setcred(struct vnode
*vp
, struct proc
*p
)
358 struct ubc_info
*uip
;
361 if ( !UBCINFOEXISTS(vp
))
367 credp
= uip
->ui_ucred
;
369 if (credp
== NOCRED
) {
370 uip
->ui_ucred
= kauth_cred_proc_ref(p
);
378 __private_extern__ memory_object_t
379 ubc_getpager(struct vnode
*vp
)
381 if (UBCINFOEXISTS(vp
))
382 return (vp
->v_ubcinfo
->ui_pager
);
388 * Get the memory object associated with this vnode
389 * If the vnode was reactivated, memory object would not exist.
390 * Unless "do not rectivate" was specified, look it up using the pager.
391 * If hold was requested create an object reference of one does not
395 memory_object_control_t
396 ubc_getobject(struct vnode
*vp
, __unused
int flags
)
398 if (UBCINFOEXISTS(vp
))
399 return((vp
->v_ubcinfo
->ui_control
));
406 ubc_blktooff(vnode_t vp
, daddr64_t blkno
)
414 error
= VNOP_BLKTOOFF(vp
, blkno
, &file_offset
);
418 return (file_offset
);
422 ubc_offtoblk(vnode_t vp
, off_t offset
)
428 return ((daddr64_t
)-1);
430 error
= VNOP_OFFTOBLK(vp
, offset
, &blkno
);
438 ubc_pages_resident(vnode_t vp
)
441 boolean_t has_pages_resident
;
443 if ( !UBCINFOEXISTS(vp
))
446 kret
= memory_object_pages_resident(vp
->v_ubcinfo
->ui_control
, &has_pages_resident
);
448 if (kret
!= KERN_SUCCESS
)
451 if (has_pages_resident
== TRUE
)
460 * This interface will eventually be deprecated
462 * clean and/or invalidate a range in the memory object that backs this
463 * vnode. The start offset is truncated to the page boundary and the
464 * size is adjusted to include the last page in the range.
466 * returns 1 for success, 0 for failure
469 ubc_sync_range(vnode_t vp
, off_t beg_off
, off_t end_off
, int flags
)
471 return (ubc_msync_internal(vp
, beg_off
, end_off
, NULL
, flags
, NULL
));
476 * clean and/or invalidate a range in the memory object that backs this
477 * vnode. The start offset is truncated to the page boundary and the
478 * size is adjusted to include the last page in the range.
482 ubc_msync(vnode_t vp
, off_t beg_off
, off_t end_off
, off_t
*resid_off
, int flags
)
488 *resid_off
= beg_off
;
490 retval
= ubc_msync_internal(vp
, beg_off
, end_off
, resid_off
, flags
, &io_errno
);
492 if (retval
== 0 && io_errno
== 0)
500 * clean and/or invalidate a range in the memory object that backs this
501 * vnode. The start offset is truncated to the page boundary and the
502 * size is adjusted to include the last page in the range.
505 ubc_msync_internal(vnode_t vp
, off_t beg_off
, off_t end_off
, off_t
*resid_off
, int flags
, int *io_errno
)
507 memory_object_size_t tsize
;
509 int request_flags
= 0;
510 int flush_flags
= MEMORY_OBJECT_RETURN_NONE
;
512 if ( !UBCINFOEXISTS(vp
))
514 if (end_off
<= beg_off
)
516 if ((flags
& (UBC_INVALIDATE
| UBC_PUSHDIRTY
| UBC_PUSHALL
)) == 0)
519 if (flags
& UBC_INVALIDATE
)
521 * discard the resident pages
523 request_flags
= (MEMORY_OBJECT_DATA_FLUSH
| MEMORY_OBJECT_DATA_NO_CHANGE
);
525 if (flags
& UBC_SYNC
)
527 * wait for all the I/O to complete before returning
529 request_flags
|= MEMORY_OBJECT_IO_SYNC
;
531 if (flags
& UBC_PUSHDIRTY
)
533 * we only return the dirty pages in the range
535 flush_flags
= MEMORY_OBJECT_RETURN_DIRTY
;
537 if (flags
& UBC_PUSHALL
)
539 * then return all the interesting pages in the range (both dirty and precious)
542 flush_flags
= MEMORY_OBJECT_RETURN_ALL
;
544 beg_off
= trunc_page_64(beg_off
);
545 end_off
= round_page_64(end_off
);
546 tsize
= (memory_object_size_t
)end_off
- beg_off
;
548 /* flush and/or invalidate pages in the range requested */
549 kret
= memory_object_lock_request(vp
->v_ubcinfo
->ui_control
,
550 beg_off
, tsize
, resid_off
, io_errno
,
551 flush_flags
, request_flags
, VM_PROT_NO_CHANGE
);
553 return ((kret
== KERN_SUCCESS
) ? 1 : 0);
558 * The vnode is mapped explicitly, mark it so.
560 __private_extern__
int
561 ubc_map(vnode_t vp
, int flags
)
563 struct ubc_info
*uip
;
566 struct vfs_context context
;
568 if (vnode_getwithref(vp
))
571 if (UBCINFOEXISTS(vp
)) {
572 context
.vc_proc
= current_proc();
573 context
.vc_ucred
= kauth_cred_get();
575 error
= VNOP_MMAP(vp
, flags
, &context
);
585 if ( !ISSET(uip
->ui_flags
, UI_ISMAPPED
))
587 SET(uip
->ui_flags
, (UI_WASMAPPED
| UI_ISMAPPED
));
601 * destroy the named reference for a given vnode
603 __private_extern__
int
604 ubc_destroy_named(struct vnode
*vp
)
606 memory_object_control_t control
;
607 struct ubc_info
*uip
;
611 * We may already have had the object terminated
612 * and the ubcinfo released as a side effect of
613 * some earlier processing. If so, pretend we did
614 * it, because it probably was a result of our
617 if (!UBCINFOEXISTS(vp
))
623 * Terminate the memory object.
624 * memory_object_destroy() will result in
625 * vnode_pager_no_senders().
626 * That will release the pager reference
627 * and the vnode will move to the free list.
629 control
= ubc_getobject(vp
, UBC_HOLDOBJECT
);
630 if (control
!= MEMORY_OBJECT_CONTROL_NULL
) {
633 * XXXXX - should we hold the vnode lock here?
635 if (ISSET(vp
->v_flag
, VTERMINATE
))
636 panic("ubc_destroy_named: already teminating");
637 SET(vp
->v_flag
, VTERMINATE
);
639 kret
= memory_object_destroy(control
, 0);
640 if (kret
!= KERN_SUCCESS
)
644 * memory_object_destroy() is asynchronous
645 * with respect to vnode_pager_no_senders().
646 * wait for vnode_pager_no_senders() to clear
650 while (ISSET(vp
->v_lflag
, VNAMED_UBC
)) {
651 (void)msleep((caddr_t
)&vp
->v_lflag
, &vp
->v_lock
,
652 PINOD
, "ubc_destroy_named", 0);
661 * Find out whether a vnode is in use by UBC
662 * Returns 1 if file is in use by UBC, 0 if not
665 ubc_isinuse(struct vnode
*vp
, int busycount
)
667 if ( !UBCINFOEXISTS(vp
))
669 return(ubc_isinuse_locked(vp
, busycount
, 0));
674 ubc_isinuse_locked(struct vnode
*vp
, int busycount
, int locked
)
682 if ((vp
->v_usecount
- vp
->v_kusecount
) > busycount
)
692 * MUST only be called by the VM
694 __private_extern__
void
695 ubc_unmap(struct vnode
*vp
)
697 struct vfs_context context
;
698 struct ubc_info
*uip
;
701 if (vnode_getwithref(vp
))
704 if (UBCINFOEXISTS(vp
)) {
708 if (ISSET(uip
->ui_flags
, UI_ISMAPPED
)) {
709 CLR(uip
->ui_flags
, UI_ISMAPPED
);
715 context
.vc_proc
= current_proc();
716 context
.vc_ucred
= kauth_cred_get();
717 (void)VNOP_MNOMAP(vp
, &context
);
723 * the drop of the vnode ref will cleanup
733 ppnum_t
*phys_entryp
,
736 memory_object_control_t control
;
738 control
= ubc_getobject(vp
, UBC_FLAGS_NONE
);
739 if (control
== MEMORY_OBJECT_CONTROL_NULL
)
740 return KERN_INVALID_ARGUMENT
;
742 return (memory_object_page_op(control
,
743 (memory_object_offset_t
)f_offset
,
749 __private_extern__ kern_return_t
750 ubc_page_op_with_control(
751 memory_object_control_t control
,
754 ppnum_t
*phys_entryp
,
757 return (memory_object_page_op(control
,
758 (memory_object_offset_t
)f_offset
,
772 memory_object_control_t control
;
774 control
= ubc_getobject(vp
, UBC_FLAGS_NONE
);
775 if (control
== MEMORY_OBJECT_CONTROL_NULL
)
776 return KERN_INVALID_ARGUMENT
;
778 return (memory_object_range_op(control
,
779 (memory_object_offset_t
)f_offset_beg
,
780 (memory_object_offset_t
)f_offset_end
,
791 upl_page_info_t
**plp
,
794 memory_object_control_t control
;
800 return KERN_INVALID_ARGUMENT
;
802 if (uplflags
& UPL_FOR_PAGEOUT
) {
803 uplflags
&= ~UPL_FOR_PAGEOUT
;
804 ubcflags
= UBC_FOR_PAGEOUT
;
806 ubcflags
= UBC_FLAGS_NONE
;
808 control
= ubc_getobject(vp
, ubcflags
);
809 if (control
== MEMORY_OBJECT_CONTROL_NULL
)
810 return KERN_INVALID_ARGUMENT
;
812 if (uplflags
& UPL_WILL_BE_DUMPED
) {
813 uplflags
&= ~UPL_WILL_BE_DUMPED
;
814 uplflags
|= (UPL_NO_SYNC
|UPL_SET_INTERNAL
);
816 uplflags
|= (UPL_NO_SYNC
|UPL_CLEAN_IN_PLACE
|UPL_SET_INTERNAL
);
818 kr
= memory_object_upl_request(control
, f_offset
, bufsize
,
819 uplp
, NULL
, &count
, uplflags
);
821 *plp
= UPL_GET_INTERNAL_PAGE_LIST(*uplp
);
829 vm_offset_t
*dst_addr
)
831 return (vm_upl_map(kernel_map
, upl
, dst_addr
));
839 return(vm_upl_unmap(kernel_map
, upl
));
849 pl
= UPL_GET_INTERNAL_PAGE_LIST(upl
);
850 kr
= upl_commit(upl
, pl
, MAX_UPL_TRANSFER
);
857 ubc_upl_commit_range(
867 if (flags
& UPL_COMMIT_FREE_ON_EMPTY
)
868 flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
870 pl
= UPL_GET_INTERNAL_PAGE_LIST(upl
);
872 kr
= upl_commit_range(upl
, offset
, size
, flags
,
873 pl
, MAX_UPL_TRANSFER
, &empty
);
875 if((flags
& UPL_COMMIT_FREE_ON_EMPTY
) && empty
)
889 boolean_t empty
= FALSE
;
891 if (abort_flags
& UPL_ABORT_FREE_ON_EMPTY
)
892 abort_flags
|= UPL_ABORT_NOTIFY_EMPTY
;
894 kr
= upl_abort_range(upl
, offset
, size
, abort_flags
, &empty
);
896 if((abort_flags
& UPL_ABORT_FREE_ON_EMPTY
) && empty
)
909 kr
= upl_abort(upl
, abort_type
);
918 return (UPL_GET_INTERNAL_PAGE_LIST(upl
));
921 /************* UBC APIS **************/
924 UBCINFOMISSING(struct vnode
* vp
)
926 return((vp
) && ((vp
)->v_type
== VREG
) && ((vp
)->v_ubcinfo
== UBC_INFO_NULL
));
930 UBCINFORECLAIMED(struct vnode
* vp
)
932 return((vp
) && ((vp
)->v_type
== VREG
) && ((vp
)->v_ubcinfo
== UBC_INFO_NULL
));
937 UBCINFOEXISTS(struct vnode
* vp
)
939 return((vp
) && ((vp
)->v_type
== VREG
) && ((vp
)->v_ubcinfo
!= UBC_INFO_NULL
));
942 UBCISVALID(struct vnode
* vp
)
944 return((vp
) && ((vp
)->v_type
== VREG
) && !((vp
)->v_flag
& VSYSTEM
));
947 UBCINVALID(struct vnode
* vp
)
949 return(((vp
) == NULL
) || ((vp
) && ((vp
)->v_type
!= VREG
))
950 || ((vp
) && ((vp
)->v_flag
& VSYSTEM
)));
953 UBCINFOCHECK(const char * fun
, struct vnode
* vp
)
955 if ((vp
) && ((vp
)->v_type
== VREG
) &&
956 ((vp
)->v_ubcinfo
== UBC_INFO_NULL
)) {
957 panic("%s: lost ubc_info", (fun
));