2 * Copyright (c) 1999-2001 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
24 * Author: Umesh Vaishampayan [umeshv@apple.com]
25 * 05-Aug-1999 umeshv Created.
27 * Functions related to Unified Buffer cache.
29 * Caller of UBC functions MUST have a valid reference on the vnode.
36 #include <sys/types.h>
37 #include <sys/param.h>
38 #include <sys/systm.h>
41 #include <sys/mount.h>
42 #include <sys/vnode.h>
44 #include <sys/ucred.h>
48 #include <mach/mach_types.h>
49 #include <mach/memory_object_types.h>
51 #include <kern/zalloc.h>
57 #define assert(cond) \
58 if (!(cond)) panic("%s:%d (%s)", __FILE__, __LINE__, # cond)
60 #include <kern/assert.h>
61 #endif /* DIAGNOSTIC */
63 struct zone
*ubc_info_zone
;
65 /* lock for changes to struct UBC */
66 static __inline__
void
67 ubc_lock(struct vnode
*vp
)
69 /* For now, just use the v_interlock */
70 simple_lock(&vp
->v_interlock
);
74 static __inline__
void
75 ubc_unlock(struct vnode
*vp
)
77 /* For now, just use the v_interlock */
78 simple_unlock(&vp
->v_interlock
);
82 * Initialization of the zone for Unified Buffer Cache.
84 __private_extern__
void
89 i
= (vm_size_t
) sizeof (struct ubc_info
);
90 /* XXX the number of elements should be tied in to maxvnodes */
91 ubc_info_zone
= zinit (i
, 10000*i
, 8192, "ubc_info zone");
96 * Initialize a ubc_info structure for a vnode.
99 ubc_info_init(struct vnode
*vp
)
101 register struct ubc_info
*uip
;
104 struct proc
*p
= current_proc();
107 memory_object_control_t control
;
113 if (ISSET(vp
->v_flag
, VUINIT
)) {
115 * other thread is already doing this
118 while (ISSET(vp
->v_flag
, VUINIT
)) {
119 SET(vp
->v_flag
, VUWANT
); /* XXX overloaded! */
121 (void) tsleep((caddr_t
)vp
, PINOD
, "ubcinfo", 0);
127 SET(vp
->v_flag
, VUINIT
);
131 if ((uip
== UBC_INFO_NULL
) || (uip
== UBC_NOINFO
)) {
133 uip
= (struct ubc_info
*) zalloc(ubc_info_zone
);
134 uip
->ui_pager
= MEMORY_OBJECT_NULL
;
135 uip
->ui_control
= MEMORY_OBJECT_CONTROL_NULL
;
136 uip
->ui_flags
= UI_INITED
;
138 uip
->ui_ucred
= NOCRED
;
139 uip
->ui_refcount
= 1;
146 Debugger("ubc_info_init: already");
147 #endif /* DIAGNOSTIC */
149 assert(uip
->ui_flags
!= UI_NONE
);
150 assert(uip
->ui_vnode
== vp
);
153 if(ISSET(uip
->ui_flags
, UI_HASPAGER
))
157 /* now set this ubc_info in the vnode */
159 SET(uip
->ui_flags
, UI_HASPAGER
);
161 pager
= (void *)vnode_pager_setup(vp
, uip
->ui_pager
);
163 ubc_setpager(vp
, pager
);
166 * Note: We can not use VOP_GETATTR() to get accurate
167 * value of ui_size. Thanks to NFS.
168 * nfs_getattr() can call vinvalbuf() and in this case
169 * ubc_info is not set up to deal with that.
174 * create a vnode - vm_object association
175 * memory_object_create_named() creates a "named" reference on the
176 * memory object we hold this reference as long as the vnode is
177 * "alive." Since memory_object_create_named() took its own reference
178 * on the vnode pager we passed it, we can drop the reference
179 * vnode_pager_setup() returned here.
181 kret
= memory_object_create_named(pager
,
182 (memory_object_size_t
)uip
->ui_size
, &control
);
183 vnode_pager_deallocate(pager
);
184 if (kret
!= KERN_SUCCESS
)
185 panic("ubc_info_init: memory_object_create_named returned %d", kret
);
188 uip
->ui_control
= control
; /* cache the value of the mo control */
189 SET(uip
->ui_flags
, UI_HASOBJREF
); /* with a named reference */
190 /* create a pager reference on the vnode */
191 error
= vnode_pager_vget(vp
);
193 panic("ubc_info_init: vnode_pager_vget error = %d", error
);
195 /* initialize the size */
196 error
= VOP_GETATTR(vp
, &vattr
, p
->p_ucred
, p
);
199 uip
->ui_size
= (error
? 0: vattr
.va_size
);
202 CLR(vp
->v_flag
, VUINIT
);
203 if (ISSET(vp
->v_flag
, VUWANT
)) {
204 CLR(vp
->v_flag
, VUWANT
);
213 /* Free the ubc_info */
215 ubc_info_free(struct ubc_info
*uip
)
219 credp
= uip
->ui_ucred
;
220 if (credp
!= NOCRED
) {
221 uip
->ui_ucred
= NOCRED
;
225 if (uip
->ui_control
!= MEMORY_OBJECT_CONTROL_NULL
)
226 memory_object_control_deallocate(uip
->ui_control
);
228 zfree(ubc_info_zone
, (vm_offset_t
)uip
);
233 ubc_info_deallocate(struct ubc_info
*uip
)
235 assert(uip
->ui_refcount
> 0);
237 if (uip
->ui_refcount
-- == 1)
242 * Communicate with VM the size change of the file
243 * returns 1 on success, 0 on failure
246 ubc_setsize(struct vnode
*vp
, off_t nsize
)
248 off_t osize
; /* ui_size before change */
249 off_t lastpg
, olastpgend
, lastoff
;
250 struct ubc_info
*uip
;
251 memory_object_control_t control
;
254 assert(nsize
>= (off_t
)0);
259 if (!UBCINFOEXISTS(vp
))
263 osize
= uip
->ui_size
; /* call ubc_getsize() ??? */
264 /* Update the size before flushing the VM */
265 uip
->ui_size
= nsize
;
267 if (nsize
>= osize
) /* Nothing more to do */
268 return (1); /* return success */
271 * When the file shrinks, invalidate the pages beyond the
272 * new size. Also get rid of garbage beyond nsize on the
273 * last page. The ui_size already has the nsize. This
274 * insures that the pageout would not write beyond the new
278 lastpg
= trunc_page_64(nsize
);
279 olastpgend
= round_page_64(osize
);
280 control
= uip
->ui_control
;
282 lastoff
= (nsize
& PAGE_MASK_64
);
285 * If length is multiple of page size, we should not flush
286 * invalidating is sufficient
289 /* invalidate last page and old contents beyond nsize */
290 kret
= memory_object_lock_request(control
,
291 (memory_object_offset_t
)lastpg
,
292 (memory_object_size_t
)(olastpgend
- lastpg
),
293 MEMORY_OBJECT_RETURN_NONE
, MEMORY_OBJECT_DATA_FLUSH
,
295 if (kret
!= KERN_SUCCESS
)
296 printf("ubc_setsize: invalidate failed (error = %d)\n", kret
);
298 return ((kret
== KERN_SUCCESS
) ? 1 : 0);
301 /* flush the last page */
302 kret
= memory_object_lock_request(control
,
303 (memory_object_offset_t
)lastpg
,
305 MEMORY_OBJECT_RETURN_DIRTY
, FALSE
,
308 if (kret
== KERN_SUCCESS
) {
309 /* invalidate last page and old contents beyond nsize */
310 kret
= memory_object_lock_request(control
,
311 (memory_object_offset_t
)lastpg
,
312 (memory_object_size_t
)(olastpgend
- lastpg
),
313 MEMORY_OBJECT_RETURN_NONE
, MEMORY_OBJECT_DATA_FLUSH
,
315 if (kret
!= KERN_SUCCESS
)
316 printf("ubc_setsize: invalidate failed (error = %d)\n", kret
);
318 printf("ubc_setsize: flush failed (error = %d)\n", kret
);
320 return ((kret
== KERN_SUCCESS
) ? 1 : 0);
324 * Get the size of the file
325 * For local file systems the size is locally cached. For NFS
326 * there might be a network transaction for this.
329 ubc_getsize(struct vnode
*vp
)
331 /* XXX deal with NFS */
332 return (vp
->v_ubcinfo
->ui_size
);
336 * Caller indicate that the object corresponding to the vnode
337 * can not be cached in object cache. Make it so.
338 * returns 1 on success, 0 on failure
341 ubc_uncache(struct vnode
*vp
)
344 struct ubc_info
*uip
;
345 memory_object_control_t control
;
346 memory_object_perf_info_data_t perf
;
348 if (!UBCINFOEXISTS(vp
))
353 assert(uip
!= UBC_INFO_NULL
);
356 * AGE it so that vfree() can make sure that it
357 * would get recycled soon after the last reference is gone
358 * This will insure that .nfs turds would not linger
362 /* set the "do not cache" bit */
363 SET(uip
->ui_flags
, UI_DONTCACHE
);
365 control
= uip
->ui_control
;
368 perf
.cluster_size
= PAGE_SIZE
; /* XXX use real cluster_size. */
369 perf
.may_cache
= FALSE
;
370 kret
= memory_object_change_attributes(control
,
371 MEMORY_OBJECT_PERFORMANCE_INFO
,
372 (memory_object_info_t
) &perf
,
373 MEMORY_OBJECT_PERF_INFO_COUNT
);
375 if (kret
!= KERN_SUCCESS
) {
376 printf("ubc_uncache: memory_object_change_attributes_named "
381 ubc_release_named(vp
);
387 * call ubc_clean() and ubc_uncache() on all the vnodes
388 * for this mount point.
389 * returns 1 on success, 0 on failure
391 __private_extern__
int
392 ubc_umount(struct mount
*mp
)
394 struct proc
*p
= current_proc();
395 struct vnode
*vp
, *nvp
;
399 simple_lock(&mntvnode_slock
);
400 for (vp
= mp
->mnt_vnodelist
.lh_first
; vp
; vp
= nvp
) {
401 if (vp
->v_mount
!= mp
) {
402 simple_unlock(&mntvnode_slock
);
405 nvp
= vp
->v_mntvnodes
.le_next
;
406 simple_unlock(&mntvnode_slock
);
407 if (UBCINFOEXISTS(vp
)) {
410 * Must get a valid reference on the vnode
411 * before callig UBC functions
413 if (vget(vp
, 0, p
)) {
415 simple_lock(&mntvnode_slock
);
416 continue; /* move on to the next vnode */
418 ret
&= ubc_clean(vp
, 0); /* do not invalidate */
419 ret
&= ubc_uncache(vp
);
422 simple_lock(&mntvnode_slock
);
424 simple_unlock(&mntvnode_slock
);
429 * Call ubc_unmount() for all filesystems.
430 * The list is traversed in reverse order
431 * of mounting to avoid dependencies.
433 __private_extern__
void
436 struct mount
*mp
, *nmp
;
439 * Since this only runs when rebooting, it is not interlocked.
441 for (mp
= mountlist
.cqh_last
; mp
!= (void *)&mountlist
; mp
= nmp
) {
442 nmp
= mp
->mnt_list
.cqe_prev
;
443 (void) ubc_umount(mp
);
447 /* Get the credentials */
449 ubc_getcred(struct vnode
*vp
)
451 struct ubc_info
*uip
;
458 return (uip
->ui_ucred
);
462 * Set the credentials
463 * existing credentials are not changed
464 * returns 1 on success and 0 on failure
467 ubc_setcred(struct vnode
*vp
, struct proc
*p
)
469 struct ubc_info
*uip
;
477 credp
= uip
->ui_ucred
;
478 if (credp
== NOCRED
) {
480 uip
->ui_ucred
= p
->p_ucred
;
487 __private_extern__ memory_object_t
488 ubc_getpager(struct vnode
*vp
)
490 struct ubc_info
*uip
;
497 return (uip
->ui_pager
);
501 * Get the memory object associated with this vnode
502 * If the vnode was reactivated, memory object would not exist.
503 * Unless "do not rectivate" was specified, look it up using the pager.
504 * If hold was requested create an object reference of one does not
508 memory_object_control_t
509 ubc_getobject(struct vnode
*vp
, int flags
)
511 struct ubc_info
*uip
;
512 memory_object_control_t control
;
521 control
= uip
->ui_control
;
523 if ((flags
& UBC_HOLDOBJECT
) && (!ISSET(uip
->ui_flags
, UI_HASOBJREF
))) {
526 * Take a temporary reference on the ubc info so that it won't go
527 * away during our recovery attempt.
531 if (memory_object_recover_named(control
, TRUE
) == KERN_SUCCESS
) {
533 SET(uip
->ui_flags
, UI_HASOBJREF
);
536 control
= MEMORY_OBJECT_CONTROL_NULL
;
538 ubc_info_deallocate(uip
);
549 ubc_setpager(struct vnode
*vp
, memory_object_t pager
)
551 struct ubc_info
*uip
;
558 uip
->ui_pager
= pager
;
563 ubc_setflags(struct vnode
* vp
, int flags
)
565 struct ubc_info
*uip
;
572 SET(uip
->ui_flags
, flags
);
578 ubc_clearflags(struct vnode
* vp
, int flags
)
580 struct ubc_info
*uip
;
587 CLR(uip
->ui_flags
, flags
);
594 ubc_issetflags(struct vnode
* vp
, int flags
)
596 struct ubc_info
*uip
;
603 return (ISSET(uip
->ui_flags
, flags
));
607 ubc_blktooff(struct vnode
*vp
, daddr_t blkno
)
615 error
= VOP_BLKTOOFF(vp
, blkno
, &file_offset
);
619 return (file_offset
);
623 ubc_offtoblk(struct vnode
*vp
, off_t offset
)
628 if (UBCINVALID(vp
)) {
629 return ((daddr_t
)-1);
632 error
= VOP_OFFTOBLK(vp
, offset
, &blkno
);
640 * Cause the file data in VM to be pushed out to the storage
641 * it also causes all currently valid pages to be released
642 * returns 1 on success, 0 on failure
645 ubc_clean(struct vnode
*vp
, int invalidate
)
648 struct ubc_info
*uip
;
649 memory_object_control_t control
;
656 if (!UBCINFOEXISTS(vp
))
660 * if invalidate was requested, write dirty data and then discard
664 flags
= (MEMORY_OBJECT_DATA_FLUSH
| MEMORY_OBJECT_DATA_NO_CHANGE
);
667 size
= uip
->ui_size
; /* call ubc_getsize() ??? */
669 control
= uip
->ui_control
;
672 vp
->v_flag
&= ~VHASDIRTY
;
675 /* Write the dirty data in the file and discard cached pages */
676 kret
= memory_object_lock_request(control
,
677 (memory_object_offset_t
)0,
678 (memory_object_size_t
)round_page_64(size
),
679 MEMORY_OBJECT_RETURN_ALL
, flags
,
682 if (kret
!= KERN_SUCCESS
)
683 printf("ubc_clean: clean failed (error = %d)\n", kret
);
685 return ((kret
== KERN_SUCCESS
) ? 1 : 0);
689 * Cause the file data in VM to be pushed out to the storage
690 * currently valid pages are NOT invalidated
691 * returns 1 on success, 0 on failure
694 ubc_pushdirty(struct vnode
*vp
)
697 struct ubc_info
*uip
;
698 memory_object_control_t control
;
704 if (!UBCINFOEXISTS(vp
))
708 size
= uip
->ui_size
; /* call ubc_getsize() ??? */
710 control
= uip
->ui_control
;
713 vp
->v_flag
&= ~VHASDIRTY
;
716 /* Write the dirty data in the file and discard cached pages */
717 kret
= memory_object_lock_request(control
,
718 (memory_object_offset_t
)0,
719 (memory_object_size_t
)round_page_64(size
),
720 MEMORY_OBJECT_RETURN_DIRTY
, FALSE
,
723 if (kret
!= KERN_SUCCESS
)
724 printf("ubc_pushdirty: flush failed (error = %d)\n", kret
);
726 return ((kret
== KERN_SUCCESS
) ? 1 : 0);
730 * Cause the file data in VM to be pushed out to the storage
731 * currently valid pages are NOT invalidated
732 * returns 1 on success, 0 on failure
735 ubc_pushdirty_range(struct vnode
*vp
, off_t offset
, off_t size
)
737 struct ubc_info
*uip
;
738 memory_object_control_t control
;
744 if (!UBCINFOEXISTS(vp
))
749 control
= uip
->ui_control
;
752 /* Write any dirty pages in the requested range of the file: */
753 kret
= memory_object_lock_request(control
,
754 (memory_object_offset_t
)offset
,
755 (memory_object_size_t
)round_page_64(size
),
756 MEMORY_OBJECT_RETURN_DIRTY
, FALSE
,
759 if (kret
!= KERN_SUCCESS
)
760 printf("ubc_pushdirty_range: flush failed (error = %d)\n", kret
);
762 return ((kret
== KERN_SUCCESS
) ? 1 : 0);
766 * Make sure the vm object does not vanish
767 * returns 1 if the hold count was incremented
768 * returns 0 if the hold count was not incremented
769 * This return value should be used to balance
770 * ubc_hold() and ubc_rele().
773 ubc_hold(struct vnode
*vp
)
775 struct ubc_info
*uip
;
776 memory_object_control_t object
;
781 if (!UBCINFOEXISTS(vp
)) {
782 /* must be invalid or dying vnode */
783 assert(UBCINVALID(vp
) ||
784 ((vp
->v_flag
& VXLOCK
) || (vp
->v_flag
& VTERMINATE
)));
789 assert(uip
->ui_control
!= MEMORY_OBJECT_CONTROL_NULL
);
794 if (!ISSET(uip
->ui_flags
, UI_HASOBJREF
)) {
796 if (memory_object_recover_named(uip
->ui_control
, TRUE
) != KERN_SUCCESS
) {
797 ubc_info_deallocate(uip
);
801 SET(uip
->ui_flags
, UI_HASOBJREF
);
807 assert(uip
->ui_refcount
> 0);
812 * Drop the holdcount.
813 * release the reference on the vm object if the this is "uncached"
817 ubc_rele(struct vnode
*vp
)
819 struct ubc_info
*uip
;
824 if (!UBCINFOEXISTS(vp
)) {
825 /* nothing more to do for a dying vnode */
826 if ((vp
->v_flag
& VXLOCK
) || (vp
->v_flag
& VTERMINATE
))
828 panic("ubc_rele: can not");
833 if (uip
->ui_refcount
== 1)
834 panic("ubc_rele: ui_refcount");
838 if ((uip
->ui_refcount
== 1)
839 && ISSET(uip
->ui_flags
, UI_DONTCACHE
))
840 (void) ubc_release_named(vp
);
846 * The vnode is mapped explicitly, mark it so.
848 __private_extern__
void
849 ubc_map(struct vnode
*vp
)
851 struct ubc_info
*uip
;
856 if (!UBCINFOEXISTS(vp
))
862 SET(uip
->ui_flags
, UI_WASMAPPED
);
870 * Release the memory object reference on the vnode
871 * only if it is not in use
872 * Return 1 if the reference was released, 0 otherwise.
875 ubc_release_named(struct vnode
*vp
)
877 struct ubc_info
*uip
;
878 memory_object_control_t control
;
884 if (!UBCINFOEXISTS(vp
))
889 /* can not release held or mapped vnodes */
890 if (ISSET(uip
->ui_flags
, UI_HASOBJREF
) &&
891 (uip
->ui_refcount
== 1) && !uip
->ui_mapped
) {
892 control
= uip
->ui_control
;
894 CLR(uip
->ui_flags
, UI_HASOBJREF
);
895 kret
= memory_object_release_name(control
,
896 MEMORY_OBJECT_RESPECT_CACHE
);
897 return ((kret
!= KERN_SUCCESS
) ? 0 : 1);
903 * This function used to called by extensions directly. Some may
904 * still exist with this behavior. In those cases, we will do the
905 * release as part of reclaiming or cleaning the vnode. We don't
906 * need anything explicit - so just stub this out until those callers
917 * destroy the named reference for a given vnode
919 __private_extern__
int
923 memory_object_control_t control
;
925 struct ubc_info
*uip
;
929 * We may already have had the object terminated
930 * and the ubcinfo released as a side effect of
931 * some earlier processing. If so, pretend we did
932 * it, because it probably was a result of our
935 if (!UBCINFOEXISTS(vp
))
940 /* can not destroy held vnodes */
941 if (uip
->ui_refcount
> 1)
945 * Terminate the memory object.
946 * memory_object_destroy() will result in
947 * vnode_pager_no_senders().
948 * That will release the pager reference
949 * and the vnode will move to the free list.
951 control
= ubc_getobject(vp
, UBC_HOLDOBJECT
);
952 if (control
!= MEMORY_OBJECT_CONTROL_NULL
) {
954 if (ISSET(vp
->v_flag
, VTERMINATE
))
955 panic("ubc_destroy_named: already teminating");
956 SET(vp
->v_flag
, VTERMINATE
);
958 kret
= memory_object_destroy(control
, 0);
959 if (kret
!= KERN_SUCCESS
)
963 * memory_object_destroy() is asynchronous
964 * with respect to vnode_pager_no_senders().
965 * wait for vnode_pager_no_senders() to clear
968 while (ISSET(vp
->v_flag
, VTERMINATE
)) {
969 SET(vp
->v_flag
, VTERMWANT
);
970 (void)tsleep((caddr_t
)&vp
->v_ubcinfo
,
971 PINOD
, "ubc_destroy_named", 0);
979 * Invalidate a range in the memory object that backs this
980 * vnode. The offset is truncated to the page boundary and the
981 * size is adjusted to include the last page in the range.
984 ubc_invalidate(struct vnode
*vp
, off_t offset
, size_t size
)
986 struct ubc_info
*uip
;
987 memory_object_control_t control
;
995 if (!UBCINFOEXISTS(vp
))
998 toff
= trunc_page_64(offset
);
999 tsize
= (size_t)(round_page_64(offset
+size
) - toff
);
1000 uip
= vp
->v_ubcinfo
;
1001 control
= uip
->ui_control
;
1004 /* invalidate pages in the range requested */
1005 kret
= memory_object_lock_request(control
,
1006 (memory_object_offset_t
)toff
,
1007 (memory_object_size_t
)tsize
,
1008 MEMORY_OBJECT_RETURN_NONE
,
1009 (MEMORY_OBJECT_DATA_NO_CHANGE
| MEMORY_OBJECT_DATA_FLUSH
),
1011 if (kret
!= KERN_SUCCESS
)
1012 printf("ubc_invalidate: invalidate failed (error = %d)\n", kret
);
1014 return ((kret
== KERN_SUCCESS
) ? 1 : 0);
1018 * Find out whether a vnode is in use by UBC
1019 * Returns 1 if file is in use by UBC, 0 if not
1022 ubc_isinuse(struct vnode
*vp
, int tookref
)
1024 int busycount
= tookref
? 2 : 1;
1026 if (!UBCINFOEXISTS(vp
))
1029 if (vp
->v_usecount
> busycount
)
1032 if ((vp
->v_usecount
== busycount
)
1033 && (vp
->v_ubcinfo
->ui_mapped
== 1))
1040 * The backdoor routine to clear the ui_mapped.
1041 * MUST only be called by the VM
1043 * Note that this routine is not called under funnel. There are numerous
1044 * things about the calling sequence that make this work on SMP.
1045 * Any code change in those paths can break this.
1048 __private_extern__
void
1049 ubc_unmap(struct vnode
*vp
)
1051 struct ubc_info
*uip
;
1052 boolean_t funnel_state
;
1057 if (!UBCINFOEXISTS(vp
))
1061 uip
= vp
->v_ubcinfo
;
1063 if ((uip
->ui_refcount
> 1) || !ISSET(uip
->ui_flags
, UI_DONTCACHE
)) {
1069 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
1070 (void) ubc_release_named(vp
);
1071 (void) thread_funnel_set(kernel_flock
, funnel_state
);
1079 vm_offset_t
*phys_entryp
,
1082 memory_object_control_t control
;
1084 control
= ubc_getobject(vp
, UBC_FLAGS_NONE
);
1085 if (control
== MEMORY_OBJECT_CONTROL_NULL
)
1086 return KERN_INVALID_ARGUMENT
;
1088 return (memory_object_page_op(control
,
1089 (memory_object_offset_t
)f_offset
,
1101 upl_page_info_t
**plp
,
1104 memory_object_control_t control
;
1109 if (bufsize
& 0xfff)
1110 return KERN_INVALID_ARGUMENT
;
1112 control
= ubc_getobject(vp
, UBC_FLAGS_NONE
);
1113 if (control
== MEMORY_OBJECT_CONTROL_NULL
)
1114 return KERN_INVALID_ARGUMENT
;
1116 uplflags
|= (UPL_NO_SYNC
|UPL_CLEAN_IN_PLACE
|UPL_SET_INTERNAL
);
1118 kr
= memory_object_upl_request(control
, f_offset
, bufsize
,
1119 uplp
, NULL
, &count
, uplflags
);
1121 *plp
= UPL_GET_INTERNAL_PAGE_LIST(*uplp
);
1129 vm_offset_t
*dst_addr
)
1131 return (vm_upl_map(kernel_map
, upl
, dst_addr
));
1139 return(vm_upl_unmap(kernel_map
, upl
));
1146 upl_page_info_t
*pl
;
1149 pl
= UPL_GET_INTERNAL_PAGE_LIST(upl
);
1150 kr
= upl_commit(upl
, pl
, MAX_UPL_TRANSFER
);
1151 upl_deallocate(upl
);
1157 ubc_upl_commit_range(
1163 upl_page_info_t
*pl
;
1167 if (flags
& UPL_COMMIT_FREE_ON_EMPTY
)
1168 flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
1170 pl
= UPL_GET_INTERNAL_PAGE_LIST(upl
);
1172 kr
= upl_commit_range(upl
, offset
, size
, flags
,
1173 pl
, MAX_UPL_TRANSFER
, &empty
);
1175 if((flags
& UPL_COMMIT_FREE_ON_EMPTY
) && empty
)
1176 upl_deallocate(upl
);
1182 ubc_upl_abort_range(
1189 boolean_t empty
= FALSE
;
1191 if (abort_flags
& UPL_ABORT_FREE_ON_EMPTY
)
1192 abort_flags
|= UPL_ABORT_NOTIFY_EMPTY
;
1194 kr
= upl_abort_range(upl
, offset
, size
, abort_flags
, &empty
);
1196 if((abort_flags
& UPL_ABORT_FREE_ON_EMPTY
) && empty
)
1197 upl_deallocate(upl
);
1209 kr
= upl_abort(upl
, abort_type
);
1210 upl_deallocate(upl
);
1218 return (UPL_GET_INTERNAL_PAGE_LIST(upl
));