]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/kern/ubc_subr.c
xnu-792.2.4.tar.gz
[apple/xnu.git] / bsd / kern / ubc_subr.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 1999-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * File: ubc_subr.c
24 * Author: Umesh Vaishampayan [umeshv@apple.com]
25 * 05-Aug-1999 umeshv Created.
26 *
27 * Functions related to Unified Buffer cache.
28 *
29 * Caller of UBC functions MUST have a valid reference on the vnode.
30 *
31 */
32
33#undef DIAGNOSTIC
34#define DIAGNOSTIC 1
35
36#include <sys/types.h>
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/lock.h>
40#include <sys/mman.h>
41#include <sys/mount_internal.h>
42#include <sys/vnode_internal.h>
43#include <sys/ubc_internal.h>
44#include <sys/ucred.h>
45#include <sys/proc_internal.h>
46#include <sys/kauth.h>
47#include <sys/buf.h>
48
49#include <mach/mach_types.h>
50#include <mach/memory_object_types.h>
51#include <mach/memory_object_control.h>
52#include <mach/vm_map.h>
53#include <mach/upl.h>
54
55#include <kern/kern_types.h>
56#include <kern/zalloc.h>
57#include <vm/vm_kern.h>
58#include <vm/vm_protos.h> /* last */
59
60#if DIAGNOSTIC
61#if defined(assert)
62#undef assert()
63#endif
64#define assert(cond) \
65 ((void) ((cond) ? 0 : panic("%s:%d (%s)", __FILE__, __LINE__, # cond)))
66#else
67#include <kern/assert.h>
68#endif /* DIAGNOSTIC */
69
70int ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize);
71int ubc_umcallback(vnode_t, void *);
72int ubc_isinuse_locked(vnode_t, int, int);
73int ubc_msync_internal(vnode_t, off_t, off_t, off_t *, int, int *);
74
75struct zone *ubc_info_zone;
76
77/*
78 * Initialization of the zone for Unified Buffer Cache.
79 */
80__private_extern__ void
81ubc_init()
82{
83 int i;
84
85 i = (vm_size_t) sizeof (struct ubc_info);
86 /* XXX the number of elements should be tied in to maxvnodes */
87 ubc_info_zone = zinit (i, 10000*i, 8192, "ubc_info zone");
88 return;
89}
90
91/*
92 * Initialize a ubc_info structure for a vnode.
93 */
94int
95ubc_info_init(struct vnode *vp)
96{
97 return(ubc_info_init_internal(vp, 0, 0));
98}
99int
100ubc_info_init_withsize(struct vnode *vp, off_t filesize)
101{
102 return(ubc_info_init_internal(vp, 1, filesize));
103}
104
105int
106ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize)
107{
108 register struct ubc_info *uip;
109 void * pager;
110 struct proc *p = current_proc();
111 int error = 0;
112 kern_return_t kret;
113 memory_object_control_t control;
114
115 uip = vp->v_ubcinfo;
116
117 if (uip == UBC_INFO_NULL) {
118
119 uip = (struct ubc_info *) zalloc(ubc_info_zone);
120 bzero((char *)uip, sizeof(struct ubc_info));
121
122 uip->ui_vnode = vp;
123 uip->ui_flags = UI_INITED;
124 uip->ui_ucred = NOCRED;
125 }
126#if DIAGNOSTIC
127 else
128 Debugger("ubc_info_init: already");
129#endif /* DIAGNOSTIC */
130
131 assert(uip->ui_flags != UI_NONE);
132 assert(uip->ui_vnode == vp);
133
134 /* now set this ubc_info in the vnode */
135 vp->v_ubcinfo = uip;
136
137 pager = (void *)vnode_pager_setup(vp, uip->ui_pager);
138 assert(pager);
139
140 SET(uip->ui_flags, UI_HASPAGER);
141 uip->ui_pager = pager;
142
143 /*
144 * Note: We can not use VNOP_GETATTR() to get accurate
145 * value of ui_size. Thanks to NFS.
146 * nfs_getattr() can call vinvalbuf() and in this case
147 * ubc_info is not set up to deal with that.
148 * So use bogus size.
149 */
150
151 /*
152 * create a vnode - vm_object association
153 * memory_object_create_named() creates a "named" reference on the
154 * memory object we hold this reference as long as the vnode is
155 * "alive." Since memory_object_create_named() took its own reference
156 * on the vnode pager we passed it, we can drop the reference
157 * vnode_pager_setup() returned here.
158 */
159 kret = memory_object_create_named(pager,
160 (memory_object_size_t)uip->ui_size, &control);
161 vnode_pager_deallocate(pager);
162 if (kret != KERN_SUCCESS)
163 panic("ubc_info_init: memory_object_create_named returned %d", kret);
164
165 assert(control);
166 uip->ui_control = control; /* cache the value of the mo control */
167 SET(uip->ui_flags, UI_HASOBJREF); /* with a named reference */
168#if 0
169 /* create a pager reference on the vnode */
170 error = vnode_pager_vget(vp);
171 if (error)
172 panic("ubc_info_init: vnode_pager_vget error = %d", error);
173#endif
174 if (withfsize == 0) {
175 struct vfs_context context;
176 /* initialize the size */
177 context.vc_proc = p;
178 context.vc_ucred = kauth_cred_get();
179 error = vnode_size(vp, &uip->ui_size, &context);
180 if (error)
181 uip->ui_size = 0;
182 } else {
183 uip->ui_size = filesize;
184 }
185 vp->v_lflag |= VNAMED_UBC;
186
187 return (error);
188}
189
190/* Free the ubc_info */
191static void
192ubc_info_free(struct ubc_info *uip)
193{
194 kauth_cred_t credp;
195
196 credp = uip->ui_ucred;
197 if (credp != NOCRED) {
198 uip->ui_ucred = NOCRED;
199 kauth_cred_rele(credp);
200 }
201
202 if (uip->ui_control != MEMORY_OBJECT_CONTROL_NULL)
203 memory_object_control_deallocate(uip->ui_control);
204
205 cluster_release(uip);
206
207 zfree(ubc_info_zone, (vm_offset_t)uip);
208 return;
209}
210
211void
212ubc_info_deallocate(struct ubc_info *uip)
213{
214 ubc_info_free(uip);
215}
216
217/*
218 * Communicate with VM the size change of the file
219 * returns 1 on success, 0 on failure
220 */
221int
222ubc_setsize(struct vnode *vp, off_t nsize)
223{
224 off_t osize; /* ui_size before change */
225 off_t lastpg, olastpgend, lastoff;
226 struct ubc_info *uip;
227 memory_object_control_t control;
228 kern_return_t kret;
229
230 if (nsize < (off_t)0)
231 return (0);
232
233 if (!UBCINFOEXISTS(vp))
234 return (0);
235
236 uip = vp->v_ubcinfo;
237 osize = uip->ui_size; /* call ubc_getsize() ??? */
238 /* Update the size before flushing the VM */
239 uip->ui_size = nsize;
240
241 if (nsize >= osize) /* Nothing more to do */
242 return (1); /* return success */
243
244 /*
245 * When the file shrinks, invalidate the pages beyond the
246 * new size. Also get rid of garbage beyond nsize on the
247 * last page. The ui_size already has the nsize. This
248 * insures that the pageout would not write beyond the new
249 * end of the file.
250 */
251
252 lastpg = trunc_page_64(nsize);
253 olastpgend = round_page_64(osize);
254 control = uip->ui_control;
255 assert(control);
256 lastoff = (nsize & PAGE_MASK_64);
257
258 /*
259 * If length is multiple of page size, we should not flush
260 * invalidating is sufficient
261 */
262 if (!lastoff) {
263 /* invalidate last page and old contents beyond nsize */
264 kret = memory_object_lock_request(control,
265 (memory_object_offset_t)lastpg,
266 (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
267 MEMORY_OBJECT_RETURN_NONE, MEMORY_OBJECT_DATA_FLUSH,
268 VM_PROT_NO_CHANGE);
269 if (kret != KERN_SUCCESS)
270 printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
271
272 return ((kret == KERN_SUCCESS) ? 1 : 0);
273 }
274
275 /* flush the last page */
276 kret = memory_object_lock_request(control,
277 (memory_object_offset_t)lastpg,
278 PAGE_SIZE_64, NULL, NULL,
279 MEMORY_OBJECT_RETURN_DIRTY, FALSE,
280 VM_PROT_NO_CHANGE);
281
282 if (kret == KERN_SUCCESS) {
283 /* invalidate last page and old contents beyond nsize */
284 kret = memory_object_lock_request(control,
285 (memory_object_offset_t)lastpg,
286 (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
287 MEMORY_OBJECT_RETURN_NONE, MEMORY_OBJECT_DATA_FLUSH,
288 VM_PROT_NO_CHANGE);
289 if (kret != KERN_SUCCESS)
290 printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
291 } else
292 printf("ubc_setsize: flush failed (error = %d)\n", kret);
293
294 return ((kret == KERN_SUCCESS) ? 1 : 0);
295}
296
297/*
298 * Get the size of the file
299 */
300off_t
301ubc_getsize(struct vnode *vp)
302{
303 /* people depend on the side effect of this working this way
304 * as they call this for directory
305 */
306 if (!UBCINFOEXISTS(vp))
307 return ((off_t)0);
308 return (vp->v_ubcinfo->ui_size);
309}
310
311/*
312 * call ubc_sync_range(vp, 0, EOF, UBC_PUSHALL) on all the vnodes
313 * for this mount point.
314 * returns 1 on success, 0 on failure
315 */
316
317__private_extern__ int
318ubc_umount(struct mount *mp)
319{
320 vnode_iterate(mp, 0, ubc_umcallback, 0);
321 return(0);
322}
323
324static int
325ubc_umcallback(vnode_t vp, __unused void * args)
326{
327
328 if (UBCINFOEXISTS(vp)) {
329
330 cluster_push(vp, 0);
331
332 (void) ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL);
333 }
334 return (VNODE_RETURNED);
335}
336
337
338
339/* Get the credentials */
340kauth_cred_t
341ubc_getcred(struct vnode *vp)
342{
343 if (UBCINFOEXISTS(vp))
344 return (vp->v_ubcinfo->ui_ucred);
345
346 return (NOCRED);
347}
348
349/*
350 * Set the credentials
351 * existing credentials are not changed
352 * returns 1 on success and 0 on failure
353 */
354int
355ubc_setcred(struct vnode *vp, struct proc *p)
356{
357 struct ubc_info *uip;
358 kauth_cred_t credp;
359
360 if ( !UBCINFOEXISTS(vp))
361 return (0);
362
363 vnode_lock(vp);
364
365 uip = vp->v_ubcinfo;
366 credp = uip->ui_ucred;
367
368 if (credp == NOCRED) {
369 uip->ui_ucred = kauth_cred_proc_ref(p);
370 }
371 vnode_unlock(vp);
372
373 return (1);
374}
375
376/* Get the pager */
377__private_extern__ memory_object_t
378ubc_getpager(struct vnode *vp)
379{
380 if (UBCINFOEXISTS(vp))
381 return (vp->v_ubcinfo->ui_pager);
382
383 return (0);
384}
385
386/*
387 * Get the memory object associated with this vnode
388 * If the vnode was reactivated, memory object would not exist.
389 * Unless "do not rectivate" was specified, look it up using the pager.
390 * If hold was requested create an object reference of one does not
391 * exist already.
392 */
393
394memory_object_control_t
395ubc_getobject(struct vnode *vp, __unused int flags)
396{
397 if (UBCINFOEXISTS(vp))
398 return((vp->v_ubcinfo->ui_control));
399
400 return (0);
401}
402
403
404off_t
405ubc_blktooff(vnode_t vp, daddr64_t blkno)
406{
407 off_t file_offset;
408 int error;
409
410 if (UBCINVALID(vp))
411 return ((off_t)-1);
412
413 error = VNOP_BLKTOOFF(vp, blkno, &file_offset);
414 if (error)
415 file_offset = -1;
416
417 return (file_offset);
418}
419
420daddr64_t
421ubc_offtoblk(vnode_t vp, off_t offset)
422{
423 daddr64_t blkno;
424 int error = 0;
425
426 if (UBCINVALID(vp))
427 return ((daddr64_t)-1);
428
429 error = VNOP_OFFTOBLK(vp, offset, &blkno);
430 if (error)
431 blkno = -1;
432
433 return (blkno);
434}
435
436int
437ubc_pages_resident(vnode_t vp)
438{
439 kern_return_t kret;
440 boolean_t has_pages_resident;
441
442 if ( !UBCINFOEXISTS(vp))
443 return (0);
444
445 kret = memory_object_pages_resident(vp->v_ubcinfo->ui_control, &has_pages_resident);
446
447 if (kret != KERN_SUCCESS)
448 return (0);
449
450 if (has_pages_resident == TRUE)
451 return (1);
452
453 return (0);
454}
455
456
457
458/*
459 * This interface will eventually be deprecated
460 *
461 * clean and/or invalidate a range in the memory object that backs this
462 * vnode. The start offset is truncated to the page boundary and the
463 * size is adjusted to include the last page in the range.
464 *
465 * returns 1 for success, 0 for failure
466 */
467int
468ubc_sync_range(vnode_t vp, off_t beg_off, off_t end_off, int flags)
469{
470 return (ubc_msync_internal(vp, beg_off, end_off, NULL, flags, NULL));
471}
472
473
474/*
475 * clean and/or invalidate a range in the memory object that backs this
476 * vnode. The start offset is truncated to the page boundary and the
477 * size is adjusted to include the last page in the range.
478 * if a
479 */
480errno_t
481ubc_msync(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags)
482{
483 int retval;
484 int io_errno = 0;
485
486 if (resid_off)
487 *resid_off = beg_off;
488
489 retval = ubc_msync_internal(vp, beg_off, end_off, resid_off, flags, &io_errno);
490
491 if (retval == 0 && io_errno == 0)
492 return (EINVAL);
493 return (io_errno);
494}
495
496
497
498/*
499 * clean and/or invalidate a range in the memory object that backs this
500 * vnode. The start offset is truncated to the page boundary and the
501 * size is adjusted to include the last page in the range.
502 */
503static int
504ubc_msync_internal(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags, int *io_errno)
505{
506 memory_object_size_t tsize;
507 kern_return_t kret;
508 int request_flags = 0;
509 int flush_flags = MEMORY_OBJECT_RETURN_NONE;
510
511 if ( !UBCINFOEXISTS(vp))
512 return (0);
513 if (end_off <= beg_off)
514 return (0);
515 if ((flags & (UBC_INVALIDATE | UBC_PUSHDIRTY | UBC_PUSHALL)) == 0)
516 return (0);
517
518 if (flags & UBC_INVALIDATE)
519 /*
520 * discard the resident pages
521 */
522 request_flags = (MEMORY_OBJECT_DATA_FLUSH | MEMORY_OBJECT_DATA_NO_CHANGE);
523
524 if (flags & UBC_SYNC)
525 /*
526 * wait for all the I/O to complete before returning
527 */
528 request_flags |= MEMORY_OBJECT_IO_SYNC;
529
530 if (flags & UBC_PUSHDIRTY)
531 /*
532 * we only return the dirty pages in the range
533 */
534 flush_flags = MEMORY_OBJECT_RETURN_DIRTY;
535
536 if (flags & UBC_PUSHALL)
537 /*
538 * then return all the interesting pages in the range (both dirty and precious)
539 * to the pager
540 */
541 flush_flags = MEMORY_OBJECT_RETURN_ALL;
542
543 beg_off = trunc_page_64(beg_off);
544 end_off = round_page_64(end_off);
545 tsize = (memory_object_size_t)end_off - beg_off;
546
547 /* flush and/or invalidate pages in the range requested */
548 kret = memory_object_lock_request(vp->v_ubcinfo->ui_control,
549 beg_off, tsize, resid_off, io_errno,
550 flush_flags, request_flags, VM_PROT_NO_CHANGE);
551
552 return ((kret == KERN_SUCCESS) ? 1 : 0);
553}
554
555
556/*
557 * The vnode is mapped explicitly, mark it so.
558 */
559__private_extern__ int
560ubc_map(vnode_t vp, int flags)
561{
562 struct ubc_info *uip;
563 int error = 0;
564 int need_ref = 0;
565 struct vfs_context context;
566
567 if (vnode_getwithref(vp))
568 return (0);
569
570 if (UBCINFOEXISTS(vp)) {
571 context.vc_proc = current_proc();
572 context.vc_ucred = kauth_cred_get();
573
574 error = VNOP_MMAP(vp, flags, &context);
575
576 if (error != EPERM)
577 error = 0;
578
579 if (error == 0) {
580 vnode_lock(vp);
581
582 uip = vp->v_ubcinfo;
583
584 if ( !ISSET(uip->ui_flags, UI_ISMAPPED))
585 need_ref = 1;
586 SET(uip->ui_flags, (UI_WASMAPPED | UI_ISMAPPED));
587
588 vnode_unlock(vp);
589
590 if (need_ref)
591 vnode_ref(vp);
592 }
593 }
594 vnode_put(vp);
595
596 return (error);
597}
598
599/*
600 * destroy the named reference for a given vnode
601 */
602__private_extern__ int
603ubc_destroy_named(struct vnode *vp)
604{
605 memory_object_control_t control;
606 struct ubc_info *uip;
607 kern_return_t kret;
608
609 /*
610 * We may already have had the object terminated
611 * and the ubcinfo released as a side effect of
612 * some earlier processing. If so, pretend we did
613 * it, because it probably was a result of our
614 * efforts.
615 */
616 if (!UBCINFOEXISTS(vp))
617 return (1);
618
619 uip = vp->v_ubcinfo;
620
621 /*
622 * Terminate the memory object.
623 * memory_object_destroy() will result in
624 * vnode_pager_no_senders().
625 * That will release the pager reference
626 * and the vnode will move to the free list.
627 */
628 control = ubc_getobject(vp, UBC_HOLDOBJECT);
629 if (control != MEMORY_OBJECT_CONTROL_NULL) {
630
631 /*
632 * XXXXX - should we hold the vnode lock here?
633 */
634 if (ISSET(vp->v_flag, VTERMINATE))
635 panic("ubc_destroy_named: already teminating");
636 SET(vp->v_flag, VTERMINATE);
637
638 kret = memory_object_destroy(control, 0);
639 if (kret != KERN_SUCCESS)
640 return (0);
641
642 /*
643 * memory_object_destroy() is asynchronous
644 * with respect to vnode_pager_no_senders().
645 * wait for vnode_pager_no_senders() to clear
646 * VTERMINATE
647 */
648 vnode_lock(vp);
649 while (ISSET(vp->v_lflag, VNAMED_UBC)) {
650 (void)msleep((caddr_t)&vp->v_lflag, &vp->v_lock,
651 PINOD, "ubc_destroy_named", 0);
652 }
653 vnode_unlock(vp);
654 }
655 return (1);
656}
657
658
659/*
660 * Find out whether a vnode is in use by UBC
661 * Returns 1 if file is in use by UBC, 0 if not
662 */
663int
664ubc_isinuse(struct vnode *vp, int busycount)
665{
666 if ( !UBCINFOEXISTS(vp))
667 return (0);
668 return(ubc_isinuse_locked(vp, busycount, 0));
669}
670
671
672int
673ubc_isinuse_locked(struct vnode *vp, int busycount, int locked)
674{
675 int retval = 0;
676
677
678 if (!locked)
679 vnode_lock(vp);
680
681 if ((vp->v_usecount - vp->v_kusecount) > busycount)
682 retval = 1;
683
684 if (!locked)
685 vnode_unlock(vp);
686 return (retval);
687}
688
689
690/*
691 * MUST only be called by the VM
692 */
693__private_extern__ void
694ubc_unmap(struct vnode *vp)
695{
696 struct vfs_context context;
697 struct ubc_info *uip;
698 int need_rele = 0;
699
700 if (vnode_getwithref(vp))
701 return;
702
703 if (UBCINFOEXISTS(vp)) {
704 vnode_lock(vp);
705
706 uip = vp->v_ubcinfo;
707 if (ISSET(uip->ui_flags, UI_ISMAPPED)) {
708 CLR(uip->ui_flags, UI_ISMAPPED);
709 need_rele = 1;
710 }
711 vnode_unlock(vp);
712
713 if (need_rele) {
714 context.vc_proc = current_proc();
715 context.vc_ucred = kauth_cred_get();
716 (void)VNOP_MNOMAP(vp, &context);
717
718 vnode_rele(vp);
719 }
720 }
721 /*
722 * the drop of the vnode ref will cleanup
723 */
724 vnode_put(vp);
725}
726
727kern_return_t
728ubc_page_op(
729 struct vnode *vp,
730 off_t f_offset,
731 int ops,
732 ppnum_t *phys_entryp,
733 int *flagsp)
734{
735 memory_object_control_t control;
736
737 control = ubc_getobject(vp, UBC_FLAGS_NONE);
738 if (control == MEMORY_OBJECT_CONTROL_NULL)
739 return KERN_INVALID_ARGUMENT;
740
741 return (memory_object_page_op(control,
742 (memory_object_offset_t)f_offset,
743 ops,
744 phys_entryp,
745 flagsp));
746}
747
748__private_extern__ kern_return_t
749ubc_page_op_with_control(
750 memory_object_control_t control,
751 off_t f_offset,
752 int ops,
753 ppnum_t *phys_entryp,
754 int *flagsp)
755{
756 return (memory_object_page_op(control,
757 (memory_object_offset_t)f_offset,
758 ops,
759 phys_entryp,
760 flagsp));
761}
762
763kern_return_t
764ubc_range_op(
765 struct vnode *vp,
766 off_t f_offset_beg,
767 off_t f_offset_end,
768 int ops,
769 int *range)
770{
771 memory_object_control_t control;
772
773 control = ubc_getobject(vp, UBC_FLAGS_NONE);
774 if (control == MEMORY_OBJECT_CONTROL_NULL)
775 return KERN_INVALID_ARGUMENT;
776
777 return (memory_object_range_op(control,
778 (memory_object_offset_t)f_offset_beg,
779 (memory_object_offset_t)f_offset_end,
780 ops,
781 range));
782}
783
784kern_return_t
785ubc_create_upl(
786 struct vnode *vp,
787 off_t f_offset,
788 long bufsize,
789 upl_t *uplp,
790 upl_page_info_t **plp,
791 int uplflags)
792{
793 memory_object_control_t control;
794 int count;
795 int ubcflags;
796 kern_return_t kr;
797
798 if (bufsize & 0xfff)
799 return KERN_INVALID_ARGUMENT;
800
801 if (uplflags & UPL_FOR_PAGEOUT) {
802 uplflags &= ~UPL_FOR_PAGEOUT;
803 ubcflags = UBC_FOR_PAGEOUT;
804 } else
805 ubcflags = UBC_FLAGS_NONE;
806
807 control = ubc_getobject(vp, ubcflags);
808 if (control == MEMORY_OBJECT_CONTROL_NULL)
809 return KERN_INVALID_ARGUMENT;
810
811 if (uplflags & UPL_WILL_BE_DUMPED) {
812 uplflags &= ~UPL_WILL_BE_DUMPED;
813 uplflags |= (UPL_NO_SYNC|UPL_SET_INTERNAL);
814 } else
815 uplflags |= (UPL_NO_SYNC|UPL_CLEAN_IN_PLACE|UPL_SET_INTERNAL);
816 count = 0;
817 kr = memory_object_upl_request(control, f_offset, bufsize,
818 uplp, NULL, &count, uplflags);
819 if (plp != NULL)
820 *plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp);
821 return kr;
822}
823
824
825kern_return_t
826ubc_upl_map(
827 upl_t upl,
828 vm_offset_t *dst_addr)
829{
830 return (vm_upl_map(kernel_map, upl, dst_addr));
831}
832
833
834kern_return_t
835ubc_upl_unmap(
836 upl_t upl)
837{
838 return(vm_upl_unmap(kernel_map, upl));
839}
840
841kern_return_t
842ubc_upl_commit(
843 upl_t upl)
844{
845 upl_page_info_t *pl;
846 kern_return_t kr;
847
848 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
849 kr = upl_commit(upl, pl, MAX_UPL_TRANSFER);
850 upl_deallocate(upl);
851 return kr;
852}
853
854
855kern_return_t
856ubc_upl_commit_range(
857 upl_t upl,
858 vm_offset_t offset,
859 vm_size_t size,
860 int flags)
861{
862 upl_page_info_t *pl;
863 boolean_t empty;
864 kern_return_t kr;
865
866 if (flags & UPL_COMMIT_FREE_ON_EMPTY)
867 flags |= UPL_COMMIT_NOTIFY_EMPTY;
868
869 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
870
871 kr = upl_commit_range(upl, offset, size, flags,
872 pl, MAX_UPL_TRANSFER, &empty);
873
874 if((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty)
875 upl_deallocate(upl);
876
877 return kr;
878}
879
880kern_return_t
881ubc_upl_abort_range(
882 upl_t upl,
883 vm_offset_t offset,
884 vm_size_t size,
885 int abort_flags)
886{
887 kern_return_t kr;
888 boolean_t empty = FALSE;
889
890 if (abort_flags & UPL_ABORT_FREE_ON_EMPTY)
891 abort_flags |= UPL_ABORT_NOTIFY_EMPTY;
892
893 kr = upl_abort_range(upl, offset, size, abort_flags, &empty);
894
895 if((abort_flags & UPL_ABORT_FREE_ON_EMPTY) && empty)
896 upl_deallocate(upl);
897
898 return kr;
899}
900
901kern_return_t
902ubc_upl_abort(
903 upl_t upl,
904 int abort_type)
905{
906 kern_return_t kr;
907
908 kr = upl_abort(upl, abort_type);
909 upl_deallocate(upl);
910 return kr;
911}
912
913upl_page_info_t *
914ubc_upl_pageinfo(
915 upl_t upl)
916{
917 return (UPL_GET_INTERNAL_PAGE_LIST(upl));
918}
919
920/************* UBC APIS **************/
921
922int
923UBCINFOMISSING(struct vnode * vp)
924{
925 return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo == UBC_INFO_NULL));
926}
927
928int
929UBCINFORECLAIMED(struct vnode * vp)
930{
931 return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo == UBC_INFO_NULL));
932}
933
934
935int
936UBCINFOEXISTS(struct vnode * vp)
937{
938 return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo != UBC_INFO_NULL));
939}
940int
941UBCISVALID(struct vnode * vp)
942{
943 return((vp) && ((vp)->v_type == VREG) && !((vp)->v_flag & VSYSTEM));
944}
945int
946UBCINVALID(struct vnode * vp)
947{
948 return(((vp) == NULL) || ((vp) && ((vp)->v_type != VREG))
949 || ((vp) && ((vp)->v_flag & VSYSTEM)));
950}
951int
952UBCINFOCHECK(const char * fun, struct vnode * vp)
953{
954 if ((vp) && ((vp)->v_type == VREG) &&
955 ((vp)->v_ubcinfo == UBC_INFO_NULL)) {
956 panic("%s: lost ubc_info", (fun));
957 return(1);
958 } else
959 return(0);
960}
961