]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/ubc_subr.c
xnu-792.6.61.tar.gz
[apple/xnu.git] / bsd / kern / ubc_subr.c
1 /*
2 * Copyright (c) 1999-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * File: ubc_subr.c
24 * Author: Umesh Vaishampayan [umeshv@apple.com]
25 * 05-Aug-1999 umeshv Created.
26 *
27 * Functions related to Unified Buffer cache.
28 *
29 * Caller of UBC functions MUST have a valid reference on the vnode.
30 *
31 */
32
33 #undef DIAGNOSTIC
34 #define DIAGNOSTIC 1
35
36 #include <sys/types.h>
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/lock.h>
40 #include <sys/mman.h>
41 #include <sys/mount_internal.h>
42 #include <sys/vnode_internal.h>
43 #include <sys/ubc_internal.h>
44 #include <sys/ucred.h>
45 #include <sys/proc_internal.h>
46 #include <sys/kauth.h>
47 #include <sys/buf.h>
48
49 #include <mach/mach_types.h>
50 #include <mach/memory_object_types.h>
51 #include <mach/memory_object_control.h>
52 #include <mach/vm_map.h>
53 #include <mach/upl.h>
54
55 #include <kern/kern_types.h>
56 #include <kern/zalloc.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_protos.h> /* last */
59
60 #if DIAGNOSTIC
61 #if defined(assert)
62 #undef assert()
63 #endif
64 #define assert(cond) \
65 ((void) ((cond) ? 0 : panic("%s:%d (%s)", __FILE__, __LINE__, # cond)))
66 #else
67 #include <kern/assert.h>
68 #endif /* DIAGNOSTIC */
69
70 int ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize);
71 int ubc_umcallback(vnode_t, void *);
72 int ubc_isinuse_locked(vnode_t, int, int);
73 int ubc_msync_internal(vnode_t, off_t, off_t, off_t *, int, int *);
74
75 struct zone *ubc_info_zone;
76
77 /*
78 * Initialization of the zone for Unified Buffer Cache.
79 */
80 __private_extern__ void
81 ubc_init()
82 {
83 int i;
84
85 i = (vm_size_t) sizeof (struct ubc_info);
86 /* XXX the number of elements should be tied in to maxvnodes */
87 ubc_info_zone = zinit (i, 10000*i, 8192, "ubc_info zone");
88 return;
89 }
90
91 /*
92 * Initialize a ubc_info structure for a vnode.
93 */
94 int
95 ubc_info_init(struct vnode *vp)
96 {
97 return(ubc_info_init_internal(vp, 0, 0));
98 }
99 int
100 ubc_info_init_withsize(struct vnode *vp, off_t filesize)
101 {
102 return(ubc_info_init_internal(vp, 1, filesize));
103 }
104
105 int
106 ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize)
107 {
108 register struct ubc_info *uip;
109 void * pager;
110 struct proc *p = current_proc();
111 int error = 0;
112 kern_return_t kret;
113 memory_object_control_t control;
114
115 uip = vp->v_ubcinfo;
116
117 if (uip == UBC_INFO_NULL) {
118
119 uip = (struct ubc_info *) zalloc(ubc_info_zone);
120 bzero((char *)uip, sizeof(struct ubc_info));
121
122 uip->ui_vnode = vp;
123 uip->ui_flags = UI_INITED;
124 uip->ui_ucred = NOCRED;
125 }
126 #if DIAGNOSTIC
127 else
128 Debugger("ubc_info_init: already");
129 #endif /* DIAGNOSTIC */
130
131 assert(uip->ui_flags != UI_NONE);
132 assert(uip->ui_vnode == vp);
133
134 /* now set this ubc_info in the vnode */
135 vp->v_ubcinfo = uip;
136
137 pager = (void *)vnode_pager_setup(vp, uip->ui_pager);
138 assert(pager);
139
140 SET(uip->ui_flags, UI_HASPAGER);
141 uip->ui_pager = pager;
142
143 /*
144 * Note: We can not use VNOP_GETATTR() to get accurate
145 * value of ui_size. Thanks to NFS.
146 * nfs_getattr() can call vinvalbuf() and in this case
147 * ubc_info is not set up to deal with that.
148 * So use bogus size.
149 */
150
151 /*
152 * create a vnode - vm_object association
153 * memory_object_create_named() creates a "named" reference on the
154 * memory object we hold this reference as long as the vnode is
155 * "alive." Since memory_object_create_named() took its own reference
156 * on the vnode pager we passed it, we can drop the reference
157 * vnode_pager_setup() returned here.
158 */
159 kret = memory_object_create_named(pager,
160 (memory_object_size_t)uip->ui_size, &control);
161 vnode_pager_deallocate(pager);
162 if (kret != KERN_SUCCESS)
163 panic("ubc_info_init: memory_object_create_named returned %d", kret);
164
165 assert(control);
166 uip->ui_control = control; /* cache the value of the mo control */
167 SET(uip->ui_flags, UI_HASOBJREF); /* with a named reference */
168 #if 0
169 /* create a pager reference on the vnode */
170 error = vnode_pager_vget(vp);
171 if (error)
172 panic("ubc_info_init: vnode_pager_vget error = %d", error);
173 #endif
174 if (withfsize == 0) {
175 struct vfs_context context;
176 /* initialize the size */
177 context.vc_proc = p;
178 context.vc_ucred = kauth_cred_get();
179 error = vnode_size(vp, &uip->ui_size, &context);
180 if (error)
181 uip->ui_size = 0;
182 } else {
183 uip->ui_size = filesize;
184 }
185 vp->v_lflag |= VNAMED_UBC;
186
187 return (error);
188 }
189
190 /* Free the ubc_info */
191 static void
192 ubc_info_free(struct ubc_info *uip)
193 {
194 kauth_cred_t credp;
195
196 credp = uip->ui_ucred;
197 if (credp != NOCRED) {
198 uip->ui_ucred = NOCRED;
199 kauth_cred_rele(credp);
200 }
201
202 if (uip->ui_control != MEMORY_OBJECT_CONTROL_NULL)
203 memory_object_control_deallocate(uip->ui_control);
204
205 cluster_release(uip);
206
207 zfree(ubc_info_zone, (vm_offset_t)uip);
208 return;
209 }
210
211 void
212 ubc_info_deallocate(struct ubc_info *uip)
213 {
214 ubc_info_free(uip);
215 }
216
217 /*
218 * Communicate with VM the size change of the file
219 * returns 1 on success, 0 on failure
220 */
221 int
222 ubc_setsize(struct vnode *vp, off_t nsize)
223 {
224 off_t osize; /* ui_size before change */
225 off_t lastpg, olastpgend, lastoff;
226 struct ubc_info *uip;
227 memory_object_control_t control;
228 kern_return_t kret;
229
230 if (nsize < (off_t)0)
231 return (0);
232
233 if (!UBCINFOEXISTS(vp))
234 return (0);
235
236 uip = vp->v_ubcinfo;
237 osize = uip->ui_size; /* call ubc_getsize() ??? */
238 /* Update the size before flushing the VM */
239 uip->ui_size = nsize;
240
241 if (nsize >= osize) /* Nothing more to do */
242 return (1); /* return success */
243
244 /*
245 * When the file shrinks, invalidate the pages beyond the
246 * new size. Also get rid of garbage beyond nsize on the
247 * last page. The ui_size already has the nsize. This
248 * insures that the pageout would not write beyond the new
249 * end of the file.
250 */
251
252 lastpg = trunc_page_64(nsize);
253 olastpgend = round_page_64(osize);
254 control = uip->ui_control;
255 assert(control);
256 lastoff = (nsize & PAGE_MASK_64);
257
258 /*
259 * If length is multiple of page size, we should not flush
260 * invalidating is sufficient
261 */
262 if (!lastoff) {
263 /* invalidate last page and old contents beyond nsize */
264 kret = memory_object_lock_request(control,
265 (memory_object_offset_t)lastpg,
266 (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
267 MEMORY_OBJECT_RETURN_NONE, MEMORY_OBJECT_DATA_FLUSH,
268 VM_PROT_NO_CHANGE);
269 if (kret != KERN_SUCCESS)
270 printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
271
272 return ((kret == KERN_SUCCESS) ? 1 : 0);
273 }
274
275 /* flush the last page */
276 kret = memory_object_lock_request(control,
277 (memory_object_offset_t)lastpg,
278 PAGE_SIZE_64, NULL, NULL,
279 MEMORY_OBJECT_RETURN_DIRTY, FALSE,
280 VM_PROT_NO_CHANGE);
281
282 if (kret == KERN_SUCCESS) {
283 /* invalidate last page and old contents beyond nsize */
284 kret = memory_object_lock_request(control,
285 (memory_object_offset_t)lastpg,
286 (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
287 MEMORY_OBJECT_RETURN_NONE, MEMORY_OBJECT_DATA_FLUSH,
288 VM_PROT_NO_CHANGE);
289 if (kret != KERN_SUCCESS)
290 printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
291 } else
292 printf("ubc_setsize: flush failed (error = %d)\n", kret);
293
294 return ((kret == KERN_SUCCESS) ? 1 : 0);
295 }
296
297 /*
298 * Get the size of the file
299 */
300 off_t
301 ubc_getsize(struct vnode *vp)
302 {
303 /* people depend on the side effect of this working this way
304 * as they call this for directory
305 */
306 if (!UBCINFOEXISTS(vp))
307 return ((off_t)0);
308 return (vp->v_ubcinfo->ui_size);
309 }
310
311 /*
312 * call ubc_sync_range(vp, 0, EOF, UBC_PUSHALL) on all the vnodes
313 * for this mount point.
314 * returns 1 on success, 0 on failure
315 */
316
317 __private_extern__ int
318 ubc_umount(struct mount *mp)
319 {
320 vnode_iterate(mp, 0, ubc_umcallback, 0);
321 return(0);
322 }
323
324 static int
325 ubc_umcallback(vnode_t vp, __unused void * args)
326 {
327
328 if (UBCINFOEXISTS(vp)) {
329
330 cluster_push(vp, 0);
331
332 (void) ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL);
333 }
334 return (VNODE_RETURNED);
335 }
336
337
338
339 /* Get the credentials */
340 kauth_cred_t
341 ubc_getcred(struct vnode *vp)
342 {
343 if (UBCINFOEXISTS(vp))
344 return (vp->v_ubcinfo->ui_ucred);
345
346 return (NOCRED);
347 }
348
349 /*
350 * Set the credentials
351 * existing credentials are not changed
352 * returns 1 on success and 0 on failure
353 */
354 int
355 ubc_setcred(struct vnode *vp, struct proc *p)
356 {
357 struct ubc_info *uip;
358 kauth_cred_t credp;
359
360 if ( !UBCINFOEXISTS(vp))
361 return (0);
362
363 vnode_lock(vp);
364
365 uip = vp->v_ubcinfo;
366 credp = uip->ui_ucred;
367
368 if (credp == NOCRED) {
369 uip->ui_ucred = kauth_cred_proc_ref(p);
370 }
371 vnode_unlock(vp);
372
373 return (1);
374 }
375
376 /* Get the pager */
377 __private_extern__ memory_object_t
378 ubc_getpager(struct vnode *vp)
379 {
380 if (UBCINFOEXISTS(vp))
381 return (vp->v_ubcinfo->ui_pager);
382
383 return (0);
384 }
385
386 /*
387 * Get the memory object associated with this vnode
388 * If the vnode was reactivated, memory object would not exist.
389 * Unless "do not rectivate" was specified, look it up using the pager.
390 * If hold was requested create an object reference of one does not
391 * exist already.
392 */
393
394 memory_object_control_t
395 ubc_getobject(struct vnode *vp, __unused int flags)
396 {
397 if (UBCINFOEXISTS(vp))
398 return((vp->v_ubcinfo->ui_control));
399
400 return (0);
401 }
402
403
404 off_t
405 ubc_blktooff(vnode_t vp, daddr64_t blkno)
406 {
407 off_t file_offset;
408 int error;
409
410 if (UBCINVALID(vp))
411 return ((off_t)-1);
412
413 error = VNOP_BLKTOOFF(vp, blkno, &file_offset);
414 if (error)
415 file_offset = -1;
416
417 return (file_offset);
418 }
419
420 daddr64_t
421 ubc_offtoblk(vnode_t vp, off_t offset)
422 {
423 daddr64_t blkno;
424 int error = 0;
425
426 if (UBCINVALID(vp))
427 return ((daddr64_t)-1);
428
429 error = VNOP_OFFTOBLK(vp, offset, &blkno);
430 if (error)
431 blkno = -1;
432
433 return (blkno);
434 }
435
436 int
437 ubc_pages_resident(vnode_t vp)
438 {
439 kern_return_t kret;
440 boolean_t has_pages_resident;
441
442 if ( !UBCINFOEXISTS(vp))
443 return (0);
444
445 kret = memory_object_pages_resident(vp->v_ubcinfo->ui_control, &has_pages_resident);
446
447 if (kret != KERN_SUCCESS)
448 return (0);
449
450 if (has_pages_resident == TRUE)
451 return (1);
452
453 return (0);
454 }
455
456
457
458 /*
459 * This interface will eventually be deprecated
460 *
461 * clean and/or invalidate a range in the memory object that backs this
462 * vnode. The start offset is truncated to the page boundary and the
463 * size is adjusted to include the last page in the range.
464 *
465 * returns 1 for success, 0 for failure
466 */
467 int
468 ubc_sync_range(vnode_t vp, off_t beg_off, off_t end_off, int flags)
469 {
470 return (ubc_msync_internal(vp, beg_off, end_off, NULL, flags, NULL));
471 }
472
473
474 /*
475 * clean and/or invalidate a range in the memory object that backs this
476 * vnode. The start offset is truncated to the page boundary and the
477 * size is adjusted to include the last page in the range.
478 * if a
479 */
480 errno_t
481 ubc_msync(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags)
482 {
483 int retval;
484 int io_errno = 0;
485
486 if (resid_off)
487 *resid_off = beg_off;
488
489 retval = ubc_msync_internal(vp, beg_off, end_off, resid_off, flags, &io_errno);
490
491 if (retval == 0 && io_errno == 0)
492 return (EINVAL);
493 return (io_errno);
494 }
495
496
497
498 /*
499 * clean and/or invalidate a range in the memory object that backs this
500 * vnode. The start offset is truncated to the page boundary and the
501 * size is adjusted to include the last page in the range.
502 */
503 static int
504 ubc_msync_internal(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags, int *io_errno)
505 {
506 memory_object_size_t tsize;
507 kern_return_t kret;
508 int request_flags = 0;
509 int flush_flags = MEMORY_OBJECT_RETURN_NONE;
510
511 if ( !UBCINFOEXISTS(vp))
512 return (0);
513 if (end_off <= beg_off)
514 return (0);
515 if ((flags & (UBC_INVALIDATE | UBC_PUSHDIRTY | UBC_PUSHALL)) == 0)
516 return (0);
517
518 if (flags & UBC_INVALIDATE)
519 /*
520 * discard the resident pages
521 */
522 request_flags = (MEMORY_OBJECT_DATA_FLUSH | MEMORY_OBJECT_DATA_NO_CHANGE);
523
524 if (flags & UBC_SYNC)
525 /*
526 * wait for all the I/O to complete before returning
527 */
528 request_flags |= MEMORY_OBJECT_IO_SYNC;
529
530 if (flags & UBC_PUSHDIRTY)
531 /*
532 * we only return the dirty pages in the range
533 */
534 flush_flags = MEMORY_OBJECT_RETURN_DIRTY;
535
536 if (flags & UBC_PUSHALL)
537 /*
538 * then return all the interesting pages in the range (both dirty and precious)
539 * to the pager
540 */
541 flush_flags = MEMORY_OBJECT_RETURN_ALL;
542
543 beg_off = trunc_page_64(beg_off);
544 end_off = round_page_64(end_off);
545 tsize = (memory_object_size_t)end_off - beg_off;
546
547 /* flush and/or invalidate pages in the range requested */
548 kret = memory_object_lock_request(vp->v_ubcinfo->ui_control,
549 beg_off, tsize, resid_off, io_errno,
550 flush_flags, request_flags, VM_PROT_NO_CHANGE);
551
552 return ((kret == KERN_SUCCESS) ? 1 : 0);
553 }
554
555
556 /*
557 * The vnode is mapped explicitly, mark it so.
558 */
559 __private_extern__ int
560 ubc_map(vnode_t vp, int flags)
561 {
562 struct ubc_info *uip;
563 int error = 0;
564 int need_ref = 0;
565 struct vfs_context context;
566
567 if (vnode_getwithref(vp))
568 return (0);
569
570 if (UBCINFOEXISTS(vp)) {
571 context.vc_proc = current_proc();
572 context.vc_ucred = kauth_cred_get();
573
574 error = VNOP_MMAP(vp, flags, &context);
575
576 if (error != EPERM)
577 error = 0;
578
579 if (error == 0) {
580 vnode_lock(vp);
581
582 uip = vp->v_ubcinfo;
583
584 if ( !ISSET(uip->ui_flags, UI_ISMAPPED))
585 need_ref = 1;
586 SET(uip->ui_flags, (UI_WASMAPPED | UI_ISMAPPED));
587
588 vnode_unlock(vp);
589
590 if (need_ref)
591 vnode_ref(vp);
592 }
593 }
594 vnode_put(vp);
595
596 return (error);
597 }
598
599 /*
600 * destroy the named reference for a given vnode
601 */
602 __private_extern__ int
603 ubc_destroy_named(struct vnode *vp)
604 {
605 memory_object_control_t control;
606 struct ubc_info *uip;
607 kern_return_t kret;
608
609 /*
610 * We may already have had the object terminated
611 * and the ubcinfo released as a side effect of
612 * some earlier processing. If so, pretend we did
613 * it, because it probably was a result of our
614 * efforts.
615 */
616 if (!UBCINFOEXISTS(vp))
617 return (1);
618
619 uip = vp->v_ubcinfo;
620
621 /*
622 * Terminate the memory object.
623 * memory_object_destroy() will result in
624 * vnode_pager_no_senders().
625 * That will release the pager reference
626 * and the vnode will move to the free list.
627 */
628 control = ubc_getobject(vp, UBC_HOLDOBJECT);
629 if (control != MEMORY_OBJECT_CONTROL_NULL) {
630
631 /*
632 * XXXXX - should we hold the vnode lock here?
633 */
634 if (ISSET(vp->v_flag, VTERMINATE))
635 panic("ubc_destroy_named: already teminating");
636 SET(vp->v_flag, VTERMINATE);
637
638 kret = memory_object_destroy(control, 0);
639 if (kret != KERN_SUCCESS)
640 return (0);
641
642 /*
643 * memory_object_destroy() is asynchronous
644 * with respect to vnode_pager_no_senders().
645 * wait for vnode_pager_no_senders() to clear
646 * VTERMINATE
647 */
648 vnode_lock(vp);
649 while (ISSET(vp->v_lflag, VNAMED_UBC)) {
650 (void)msleep((caddr_t)&vp->v_lflag, &vp->v_lock,
651 PINOD, "ubc_destroy_named", 0);
652 }
653 vnode_unlock(vp);
654 }
655 return (1);
656 }
657
658
659 /*
660 * Find out whether a vnode is in use by UBC
661 * Returns 1 if file is in use by UBC, 0 if not
662 */
663 int
664 ubc_isinuse(struct vnode *vp, int busycount)
665 {
666 if ( !UBCINFOEXISTS(vp))
667 return (0);
668 return(ubc_isinuse_locked(vp, busycount, 0));
669 }
670
671
672 int
673 ubc_isinuse_locked(struct vnode *vp, int busycount, int locked)
674 {
675 int retval = 0;
676
677
678 if (!locked)
679 vnode_lock(vp);
680
681 if ((vp->v_usecount - vp->v_kusecount) > busycount)
682 retval = 1;
683
684 if (!locked)
685 vnode_unlock(vp);
686 return (retval);
687 }
688
689
690 /*
691 * MUST only be called by the VM
692 */
693 __private_extern__ void
694 ubc_unmap(struct vnode *vp)
695 {
696 struct vfs_context context;
697 struct ubc_info *uip;
698 int need_rele = 0;
699
700 if (vnode_getwithref(vp))
701 return;
702
703 if (UBCINFOEXISTS(vp)) {
704 vnode_lock(vp);
705
706 uip = vp->v_ubcinfo;
707 if (ISSET(uip->ui_flags, UI_ISMAPPED)) {
708 CLR(uip->ui_flags, UI_ISMAPPED);
709 need_rele = 1;
710 }
711 vnode_unlock(vp);
712
713 if (need_rele) {
714 context.vc_proc = current_proc();
715 context.vc_ucred = kauth_cred_get();
716 (void)VNOP_MNOMAP(vp, &context);
717
718 vnode_rele(vp);
719 }
720 }
721 /*
722 * the drop of the vnode ref will cleanup
723 */
724 vnode_put(vp);
725 }
726
727 kern_return_t
728 ubc_page_op(
729 struct vnode *vp,
730 off_t f_offset,
731 int ops,
732 ppnum_t *phys_entryp,
733 int *flagsp)
734 {
735 memory_object_control_t control;
736
737 control = ubc_getobject(vp, UBC_FLAGS_NONE);
738 if (control == MEMORY_OBJECT_CONTROL_NULL)
739 return KERN_INVALID_ARGUMENT;
740
741 return (memory_object_page_op(control,
742 (memory_object_offset_t)f_offset,
743 ops,
744 phys_entryp,
745 flagsp));
746 }
747
748 __private_extern__ kern_return_t
749 ubc_page_op_with_control(
750 memory_object_control_t control,
751 off_t f_offset,
752 int ops,
753 ppnum_t *phys_entryp,
754 int *flagsp)
755 {
756 return (memory_object_page_op(control,
757 (memory_object_offset_t)f_offset,
758 ops,
759 phys_entryp,
760 flagsp));
761 }
762
763 kern_return_t
764 ubc_range_op(
765 struct vnode *vp,
766 off_t f_offset_beg,
767 off_t f_offset_end,
768 int ops,
769 int *range)
770 {
771 memory_object_control_t control;
772
773 control = ubc_getobject(vp, UBC_FLAGS_NONE);
774 if (control == MEMORY_OBJECT_CONTROL_NULL)
775 return KERN_INVALID_ARGUMENT;
776
777 return (memory_object_range_op(control,
778 (memory_object_offset_t)f_offset_beg,
779 (memory_object_offset_t)f_offset_end,
780 ops,
781 range));
782 }
783
784 kern_return_t
785 ubc_create_upl(
786 struct vnode *vp,
787 off_t f_offset,
788 long bufsize,
789 upl_t *uplp,
790 upl_page_info_t **plp,
791 int uplflags)
792 {
793 memory_object_control_t control;
794 int count;
795 int ubcflags;
796 kern_return_t kr;
797
798 if (bufsize & 0xfff)
799 return KERN_INVALID_ARGUMENT;
800
801 if (uplflags & UPL_FOR_PAGEOUT) {
802 uplflags &= ~UPL_FOR_PAGEOUT;
803 ubcflags = UBC_FOR_PAGEOUT;
804 } else
805 ubcflags = UBC_FLAGS_NONE;
806
807 control = ubc_getobject(vp, ubcflags);
808 if (control == MEMORY_OBJECT_CONTROL_NULL)
809 return KERN_INVALID_ARGUMENT;
810
811 if (uplflags & UPL_WILL_BE_DUMPED) {
812 uplflags &= ~UPL_WILL_BE_DUMPED;
813 uplflags |= (UPL_NO_SYNC|UPL_SET_INTERNAL);
814 } else
815 uplflags |= (UPL_NO_SYNC|UPL_CLEAN_IN_PLACE|UPL_SET_INTERNAL);
816 count = 0;
817 kr = memory_object_upl_request(control, f_offset, bufsize,
818 uplp, NULL, &count, uplflags);
819 if (plp != NULL)
820 *plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp);
821 return kr;
822 }
823
824
825 kern_return_t
826 ubc_upl_map(
827 upl_t upl,
828 vm_offset_t *dst_addr)
829 {
830 return (vm_upl_map(kernel_map, upl, dst_addr));
831 }
832
833
834 kern_return_t
835 ubc_upl_unmap(
836 upl_t upl)
837 {
838 return(vm_upl_unmap(kernel_map, upl));
839 }
840
841 kern_return_t
842 ubc_upl_commit(
843 upl_t upl)
844 {
845 upl_page_info_t *pl;
846 kern_return_t kr;
847
848 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
849 kr = upl_commit(upl, pl, MAX_UPL_TRANSFER);
850 upl_deallocate(upl);
851 return kr;
852 }
853
854
855 kern_return_t
856 ubc_upl_commit_range(
857 upl_t upl,
858 vm_offset_t offset,
859 vm_size_t size,
860 int flags)
861 {
862 upl_page_info_t *pl;
863 boolean_t empty;
864 kern_return_t kr;
865
866 if (flags & UPL_COMMIT_FREE_ON_EMPTY)
867 flags |= UPL_COMMIT_NOTIFY_EMPTY;
868
869 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
870
871 kr = upl_commit_range(upl, offset, size, flags,
872 pl, MAX_UPL_TRANSFER, &empty);
873
874 if((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty)
875 upl_deallocate(upl);
876
877 return kr;
878 }
879
880 kern_return_t
881 ubc_upl_abort_range(
882 upl_t upl,
883 vm_offset_t offset,
884 vm_size_t size,
885 int abort_flags)
886 {
887 kern_return_t kr;
888 boolean_t empty = FALSE;
889
890 if (abort_flags & UPL_ABORT_FREE_ON_EMPTY)
891 abort_flags |= UPL_ABORT_NOTIFY_EMPTY;
892
893 kr = upl_abort_range(upl, offset, size, abort_flags, &empty);
894
895 if((abort_flags & UPL_ABORT_FREE_ON_EMPTY) && empty)
896 upl_deallocate(upl);
897
898 return kr;
899 }
900
901 kern_return_t
902 ubc_upl_abort(
903 upl_t upl,
904 int abort_type)
905 {
906 kern_return_t kr;
907
908 kr = upl_abort(upl, abort_type);
909 upl_deallocate(upl);
910 return kr;
911 }
912
913 upl_page_info_t *
914 ubc_upl_pageinfo(
915 upl_t upl)
916 {
917 return (UPL_GET_INTERNAL_PAGE_LIST(upl));
918 }
919
920 /************* UBC APIS **************/
921
922 int
923 UBCINFOMISSING(struct vnode * vp)
924 {
925 return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo == UBC_INFO_NULL));
926 }
927
928 int
929 UBCINFORECLAIMED(struct vnode * vp)
930 {
931 return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo == UBC_INFO_NULL));
932 }
933
934
935 int
936 UBCINFOEXISTS(struct vnode * vp)
937 {
938 return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo != UBC_INFO_NULL));
939 }
940 int
941 UBCISVALID(struct vnode * vp)
942 {
943 return((vp) && ((vp)->v_type == VREG) && !((vp)->v_flag & VSYSTEM));
944 }
945 int
946 UBCINVALID(struct vnode * vp)
947 {
948 return(((vp) == NULL) || ((vp) && ((vp)->v_type != VREG))
949 || ((vp) && ((vp)->v_flag & VSYSTEM)));
950 }
951 int
952 UBCINFOCHECK(const char * fun, struct vnode * vp)
953 {
954 if ((vp) && ((vp)->v_type == VREG) &&
955 ((vp)->v_ubcinfo == UBC_INFO_NULL)) {
956 panic("%s: lost ubc_info", (fun));
957 return(1);
958 } else
959 return(0);
960 }
961