]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/ubc_subr.c
3b1396ef02596e201ed5c68e528c75a47a9772e6
[apple/xnu.git] / bsd / kern / ubc_subr.c
1 /*
2 * Copyright (c) 1999-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * File: ubc_subr.c
25 * Author: Umesh Vaishampayan [umeshv@apple.com]
26 * 05-Aug-1999 umeshv Created.
27 *
28 * Functions related to Unified Buffer cache.
29 *
30 * Caller of UBC functions MUST have a valid reference on the vnode.
31 *
32 */
33
34 #undef DIAGNOSTIC
35 #define DIAGNOSTIC 1
36
37 #include <sys/types.h>
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/lock.h>
41 #include <sys/mman.h>
42 #include <sys/mount_internal.h>
43 #include <sys/vnode_internal.h>
44 #include <sys/ubc_internal.h>
45 #include <sys/ucred.h>
46 #include <sys/proc_internal.h>
47 #include <sys/kauth.h>
48 #include <sys/buf.h>
49
50 #include <mach/mach_types.h>
51 #include <mach/memory_object_types.h>
52 #include <mach/memory_object_control.h>
53 #include <mach/vm_map.h>
54 #include <mach/upl.h>
55
56 #include <kern/kern_types.h>
57 #include <kern/zalloc.h>
58 #include <vm/vm_kern.h>
59 #include <vm/vm_protos.h> /* last */
60
61 #if DIAGNOSTIC
62 #if defined(assert)
63 #undef assert()
64 #endif
65 #define assert(cond) \
66 ((void) ((cond) ? 0 : panic("%s:%d (%s)", __FILE__, __LINE__, # cond)))
67 #else
68 #include <kern/assert.h>
69 #endif /* DIAGNOSTIC */
70
71 int ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize);
72 int ubc_umcallback(vnode_t, void *);
73 int ubc_isinuse_locked(vnode_t, int, int);
74 int ubc_msync_internal(vnode_t, off_t, off_t, off_t *, int, int *);
75
76 struct zone *ubc_info_zone;
77
78 /*
79 * Initialization of the zone for Unified Buffer Cache.
80 */
81 __private_extern__ void
82 ubc_init()
83 {
84 int i;
85
86 i = (vm_size_t) sizeof (struct ubc_info);
87 /* XXX the number of elements should be tied in to maxvnodes */
88 ubc_info_zone = zinit (i, 10000*i, 8192, "ubc_info zone");
89 return;
90 }
91
92 /*
93 * Initialize a ubc_info structure for a vnode.
94 */
95 int
96 ubc_info_init(struct vnode *vp)
97 {
98 return(ubc_info_init_internal(vp, 0, 0));
99 }
100 int
101 ubc_info_init_withsize(struct vnode *vp, off_t filesize)
102 {
103 return(ubc_info_init_internal(vp, 1, filesize));
104 }
105
106 int
107 ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize)
108 {
109 register struct ubc_info *uip;
110 void * pager;
111 struct proc *p = current_proc();
112 int error = 0;
113 kern_return_t kret;
114 memory_object_control_t control;
115
116 uip = vp->v_ubcinfo;
117
118 if (uip == UBC_INFO_NULL) {
119
120 uip = (struct ubc_info *) zalloc(ubc_info_zone);
121 bzero((char *)uip, sizeof(struct ubc_info));
122
123 uip->ui_vnode = vp;
124 uip->ui_flags = UI_INITED;
125 uip->ui_ucred = NOCRED;
126 }
127 #if DIAGNOSTIC
128 else
129 Debugger("ubc_info_init: already");
130 #endif /* DIAGNOSTIC */
131
132 assert(uip->ui_flags != UI_NONE);
133 assert(uip->ui_vnode == vp);
134
135 /* now set this ubc_info in the vnode */
136 vp->v_ubcinfo = uip;
137
138 pager = (void *)vnode_pager_setup(vp, uip->ui_pager);
139 assert(pager);
140
141 SET(uip->ui_flags, UI_HASPAGER);
142 uip->ui_pager = pager;
143
144 /*
145 * Note: We can not use VNOP_GETATTR() to get accurate
146 * value of ui_size. Thanks to NFS.
147 * nfs_getattr() can call vinvalbuf() and in this case
148 * ubc_info is not set up to deal with that.
149 * So use bogus size.
150 */
151
152 /*
153 * create a vnode - vm_object association
154 * memory_object_create_named() creates a "named" reference on the
155 * memory object we hold this reference as long as the vnode is
156 * "alive." Since memory_object_create_named() took its own reference
157 * on the vnode pager we passed it, we can drop the reference
158 * vnode_pager_setup() returned here.
159 */
160 kret = memory_object_create_named(pager,
161 (memory_object_size_t)uip->ui_size, &control);
162 vnode_pager_deallocate(pager);
163 if (kret != KERN_SUCCESS)
164 panic("ubc_info_init: memory_object_create_named returned %d", kret);
165
166 assert(control);
167 uip->ui_control = control; /* cache the value of the mo control */
168 SET(uip->ui_flags, UI_HASOBJREF); /* with a named reference */
169 #if 0
170 /* create a pager reference on the vnode */
171 error = vnode_pager_vget(vp);
172 if (error)
173 panic("ubc_info_init: vnode_pager_vget error = %d", error);
174 #endif
175 if (withfsize == 0) {
176 struct vfs_context context;
177 /* initialize the size */
178 context.vc_proc = p;
179 context.vc_ucred = kauth_cred_get();
180 error = vnode_size(vp, &uip->ui_size, &context);
181 if (error)
182 uip->ui_size = 0;
183 } else {
184 uip->ui_size = filesize;
185 }
186 vp->v_lflag |= VNAMED_UBC;
187
188 return (error);
189 }
190
191 /* Free the ubc_info */
192 static void
193 ubc_info_free(struct ubc_info *uip)
194 {
195 kauth_cred_t credp;
196
197 credp = uip->ui_ucred;
198 if (credp != NOCRED) {
199 uip->ui_ucred = NOCRED;
200 kauth_cred_rele(credp);
201 }
202
203 if (uip->ui_control != MEMORY_OBJECT_CONTROL_NULL)
204 memory_object_control_deallocate(uip->ui_control);
205
206 cluster_release(uip);
207
208 zfree(ubc_info_zone, (vm_offset_t)uip);
209 return;
210 }
211
212 void
213 ubc_info_deallocate(struct ubc_info *uip)
214 {
215 ubc_info_free(uip);
216 }
217
218 /*
219 * Communicate with VM the size change of the file
220 * returns 1 on success, 0 on failure
221 */
222 int
223 ubc_setsize(struct vnode *vp, off_t nsize)
224 {
225 off_t osize; /* ui_size before change */
226 off_t lastpg, olastpgend, lastoff;
227 struct ubc_info *uip;
228 memory_object_control_t control;
229 kern_return_t kret;
230
231 if (nsize < (off_t)0)
232 return (0);
233
234 if (!UBCINFOEXISTS(vp))
235 return (0);
236
237 uip = vp->v_ubcinfo;
238 osize = uip->ui_size; /* call ubc_getsize() ??? */
239 /* Update the size before flushing the VM */
240 uip->ui_size = nsize;
241
242 if (nsize >= osize) /* Nothing more to do */
243 return (1); /* return success */
244
245 /*
246 * When the file shrinks, invalidate the pages beyond the
247 * new size. Also get rid of garbage beyond nsize on the
248 * last page. The ui_size already has the nsize. This
249 * insures that the pageout would not write beyond the new
250 * end of the file.
251 */
252
253 lastpg = trunc_page_64(nsize);
254 olastpgend = round_page_64(osize);
255 control = uip->ui_control;
256 assert(control);
257 lastoff = (nsize & PAGE_MASK_64);
258
259 /*
260 * If length is multiple of page size, we should not flush
261 * invalidating is sufficient
262 */
263 if (!lastoff) {
264 /* invalidate last page and old contents beyond nsize */
265 kret = memory_object_lock_request(control,
266 (memory_object_offset_t)lastpg,
267 (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
268 MEMORY_OBJECT_RETURN_NONE, MEMORY_OBJECT_DATA_FLUSH,
269 VM_PROT_NO_CHANGE);
270 if (kret != KERN_SUCCESS)
271 printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
272
273 return ((kret == KERN_SUCCESS) ? 1 : 0);
274 }
275
276 /* flush the last page */
277 kret = memory_object_lock_request(control,
278 (memory_object_offset_t)lastpg,
279 PAGE_SIZE_64, NULL, NULL,
280 MEMORY_OBJECT_RETURN_DIRTY, FALSE,
281 VM_PROT_NO_CHANGE);
282
283 if (kret == KERN_SUCCESS) {
284 /* invalidate last page and old contents beyond nsize */
285 kret = memory_object_lock_request(control,
286 (memory_object_offset_t)lastpg,
287 (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
288 MEMORY_OBJECT_RETURN_NONE, MEMORY_OBJECT_DATA_FLUSH,
289 VM_PROT_NO_CHANGE);
290 if (kret != KERN_SUCCESS)
291 printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
292 } else
293 printf("ubc_setsize: flush failed (error = %d)\n", kret);
294
295 return ((kret == KERN_SUCCESS) ? 1 : 0);
296 }
297
298 /*
299 * Get the size of the file
300 */
301 off_t
302 ubc_getsize(struct vnode *vp)
303 {
304 /* people depend on the side effect of this working this way
305 * as they call this for directory
306 */
307 if (!UBCINFOEXISTS(vp))
308 return ((off_t)0);
309 return (vp->v_ubcinfo->ui_size);
310 }
311
312 /*
313 * call ubc_sync_range(vp, 0, EOF, UBC_PUSHALL) on all the vnodes
314 * for this mount point.
315 * returns 1 on success, 0 on failure
316 */
317
318 __private_extern__ int
319 ubc_umount(struct mount *mp)
320 {
321 vnode_iterate(mp, 0, ubc_umcallback, 0);
322 return(0);
323 }
324
325 static int
326 ubc_umcallback(vnode_t vp, __unused void * args)
327 {
328
329 if (UBCINFOEXISTS(vp)) {
330
331 cluster_push(vp, 0);
332
333 (void) ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL);
334 }
335 return (VNODE_RETURNED);
336 }
337
338
339
340 /* Get the credentials */
341 kauth_cred_t
342 ubc_getcred(struct vnode *vp)
343 {
344 if (UBCINFOEXISTS(vp))
345 return (vp->v_ubcinfo->ui_ucred);
346
347 return (NOCRED);
348 }
349
350 /*
351 * Set the credentials
352 * existing credentials are not changed
353 * returns 1 on success and 0 on failure
354 */
355 int
356 ubc_setcred(struct vnode *vp, struct proc *p)
357 {
358 struct ubc_info *uip;
359 kauth_cred_t credp;
360
361 if ( !UBCINFOEXISTS(vp))
362 return (0);
363
364 vnode_lock(vp);
365
366 uip = vp->v_ubcinfo;
367 credp = uip->ui_ucred;
368
369 if (credp == NOCRED) {
370 uip->ui_ucred = kauth_cred_proc_ref(p);
371 }
372 vnode_unlock(vp);
373
374 return (1);
375 }
376
377 /* Get the pager */
378 __private_extern__ memory_object_t
379 ubc_getpager(struct vnode *vp)
380 {
381 if (UBCINFOEXISTS(vp))
382 return (vp->v_ubcinfo->ui_pager);
383
384 return (0);
385 }
386
387 /*
388 * Get the memory object associated with this vnode
389 * If the vnode was reactivated, memory object would not exist.
390 * Unless "do not rectivate" was specified, look it up using the pager.
391 * If hold was requested create an object reference of one does not
392 * exist already.
393 */
394
395 memory_object_control_t
396 ubc_getobject(struct vnode *vp, __unused int flags)
397 {
398 if (UBCINFOEXISTS(vp))
399 return((vp->v_ubcinfo->ui_control));
400
401 return (0);
402 }
403
404
405 off_t
406 ubc_blktooff(vnode_t vp, daddr64_t blkno)
407 {
408 off_t file_offset;
409 int error;
410
411 if (UBCINVALID(vp))
412 return ((off_t)-1);
413
414 error = VNOP_BLKTOOFF(vp, blkno, &file_offset);
415 if (error)
416 file_offset = -1;
417
418 return (file_offset);
419 }
420
421 daddr64_t
422 ubc_offtoblk(vnode_t vp, off_t offset)
423 {
424 daddr64_t blkno;
425 int error = 0;
426
427 if (UBCINVALID(vp))
428 return ((daddr64_t)-1);
429
430 error = VNOP_OFFTOBLK(vp, offset, &blkno);
431 if (error)
432 blkno = -1;
433
434 return (blkno);
435 }
436
437 int
438 ubc_pages_resident(vnode_t vp)
439 {
440 kern_return_t kret;
441 boolean_t has_pages_resident;
442
443 if ( !UBCINFOEXISTS(vp))
444 return (0);
445
446 kret = memory_object_pages_resident(vp->v_ubcinfo->ui_control, &has_pages_resident);
447
448 if (kret != KERN_SUCCESS)
449 return (0);
450
451 if (has_pages_resident == TRUE)
452 return (1);
453
454 return (0);
455 }
456
457
458
459 /*
460 * This interface will eventually be deprecated
461 *
462 * clean and/or invalidate a range in the memory object that backs this
463 * vnode. The start offset is truncated to the page boundary and the
464 * size is adjusted to include the last page in the range.
465 *
466 * returns 1 for success, 0 for failure
467 */
468 int
469 ubc_sync_range(vnode_t vp, off_t beg_off, off_t end_off, int flags)
470 {
471 return (ubc_msync_internal(vp, beg_off, end_off, NULL, flags, NULL));
472 }
473
474
475 /*
476 * clean and/or invalidate a range in the memory object that backs this
477 * vnode. The start offset is truncated to the page boundary and the
478 * size is adjusted to include the last page in the range.
479 * if a
480 */
481 errno_t
482 ubc_msync(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags)
483 {
484 int retval;
485 int io_errno = 0;
486
487 if (resid_off)
488 *resid_off = beg_off;
489
490 retval = ubc_msync_internal(vp, beg_off, end_off, resid_off, flags, &io_errno);
491
492 if (retval == 0 && io_errno == 0)
493 return (EINVAL);
494 return (io_errno);
495 }
496
497
498
499 /*
500 * clean and/or invalidate a range in the memory object that backs this
501 * vnode. The start offset is truncated to the page boundary and the
502 * size is adjusted to include the last page in the range.
503 */
504 static int
505 ubc_msync_internal(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags, int *io_errno)
506 {
507 memory_object_size_t tsize;
508 kern_return_t kret;
509 int request_flags = 0;
510 int flush_flags = MEMORY_OBJECT_RETURN_NONE;
511
512 if ( !UBCINFOEXISTS(vp))
513 return (0);
514 if (end_off <= beg_off)
515 return (0);
516 if ((flags & (UBC_INVALIDATE | UBC_PUSHDIRTY | UBC_PUSHALL)) == 0)
517 return (0);
518
519 if (flags & UBC_INVALIDATE)
520 /*
521 * discard the resident pages
522 */
523 request_flags = (MEMORY_OBJECT_DATA_FLUSH | MEMORY_OBJECT_DATA_NO_CHANGE);
524
525 if (flags & UBC_SYNC)
526 /*
527 * wait for all the I/O to complete before returning
528 */
529 request_flags |= MEMORY_OBJECT_IO_SYNC;
530
531 if (flags & UBC_PUSHDIRTY)
532 /*
533 * we only return the dirty pages in the range
534 */
535 flush_flags = MEMORY_OBJECT_RETURN_DIRTY;
536
537 if (flags & UBC_PUSHALL)
538 /*
539 * then return all the interesting pages in the range (both dirty and precious)
540 * to the pager
541 */
542 flush_flags = MEMORY_OBJECT_RETURN_ALL;
543
544 beg_off = trunc_page_64(beg_off);
545 end_off = round_page_64(end_off);
546 tsize = (memory_object_size_t)end_off - beg_off;
547
548 /* flush and/or invalidate pages in the range requested */
549 kret = memory_object_lock_request(vp->v_ubcinfo->ui_control,
550 beg_off, tsize, resid_off, io_errno,
551 flush_flags, request_flags, VM_PROT_NO_CHANGE);
552
553 return ((kret == KERN_SUCCESS) ? 1 : 0);
554 }
555
556
557 /*
558 * The vnode is mapped explicitly, mark it so.
559 */
560 __private_extern__ int
561 ubc_map(vnode_t vp, int flags)
562 {
563 struct ubc_info *uip;
564 int error = 0;
565 int need_ref = 0;
566 struct vfs_context context;
567
568 if (vnode_getwithref(vp))
569 return (0);
570
571 if (UBCINFOEXISTS(vp)) {
572 context.vc_proc = current_proc();
573 context.vc_ucred = kauth_cred_get();
574
575 error = VNOP_MMAP(vp, flags, &context);
576
577 if (error != EPERM)
578 error = 0;
579
580 if (error == 0) {
581 vnode_lock(vp);
582
583 uip = vp->v_ubcinfo;
584
585 if ( !ISSET(uip->ui_flags, UI_ISMAPPED))
586 need_ref = 1;
587 SET(uip->ui_flags, (UI_WASMAPPED | UI_ISMAPPED));
588
589 vnode_unlock(vp);
590
591 if (need_ref)
592 vnode_ref(vp);
593 }
594 }
595 vnode_put(vp);
596
597 return (error);
598 }
599
600 /*
601 * destroy the named reference for a given vnode
602 */
603 __private_extern__ int
604 ubc_destroy_named(struct vnode *vp)
605 {
606 memory_object_control_t control;
607 struct ubc_info *uip;
608 kern_return_t kret;
609
610 /*
611 * We may already have had the object terminated
612 * and the ubcinfo released as a side effect of
613 * some earlier processing. If so, pretend we did
614 * it, because it probably was a result of our
615 * efforts.
616 */
617 if (!UBCINFOEXISTS(vp))
618 return (1);
619
620 uip = vp->v_ubcinfo;
621
622 /*
623 * Terminate the memory object.
624 * memory_object_destroy() will result in
625 * vnode_pager_no_senders().
626 * That will release the pager reference
627 * and the vnode will move to the free list.
628 */
629 control = ubc_getobject(vp, UBC_HOLDOBJECT);
630 if (control != MEMORY_OBJECT_CONTROL_NULL) {
631
632 /*
633 * XXXXX - should we hold the vnode lock here?
634 */
635 if (ISSET(vp->v_flag, VTERMINATE))
636 panic("ubc_destroy_named: already teminating");
637 SET(vp->v_flag, VTERMINATE);
638
639 kret = memory_object_destroy(control, 0);
640 if (kret != KERN_SUCCESS)
641 return (0);
642
643 /*
644 * memory_object_destroy() is asynchronous
645 * with respect to vnode_pager_no_senders().
646 * wait for vnode_pager_no_senders() to clear
647 * VTERMINATE
648 */
649 vnode_lock(vp);
650 while (ISSET(vp->v_lflag, VNAMED_UBC)) {
651 (void)msleep((caddr_t)&vp->v_lflag, &vp->v_lock,
652 PINOD, "ubc_destroy_named", 0);
653 }
654 vnode_unlock(vp);
655 }
656 return (1);
657 }
658
659
660 /*
661 * Find out whether a vnode is in use by UBC
662 * Returns 1 if file is in use by UBC, 0 if not
663 */
664 int
665 ubc_isinuse(struct vnode *vp, int busycount)
666 {
667 if ( !UBCINFOEXISTS(vp))
668 return (0);
669 return(ubc_isinuse_locked(vp, busycount, 0));
670 }
671
672
673 int
674 ubc_isinuse_locked(struct vnode *vp, int busycount, int locked)
675 {
676 int retval = 0;
677
678
679 if (!locked)
680 vnode_lock(vp);
681
682 if ((vp->v_usecount - vp->v_kusecount) > busycount)
683 retval = 1;
684
685 if (!locked)
686 vnode_unlock(vp);
687 return (retval);
688 }
689
690
691 /*
692 * MUST only be called by the VM
693 */
694 __private_extern__ void
695 ubc_unmap(struct vnode *vp)
696 {
697 struct vfs_context context;
698 struct ubc_info *uip;
699 int need_rele = 0;
700
701 if (vnode_getwithref(vp))
702 return;
703
704 if (UBCINFOEXISTS(vp)) {
705 vnode_lock(vp);
706
707 uip = vp->v_ubcinfo;
708 if (ISSET(uip->ui_flags, UI_ISMAPPED)) {
709 CLR(uip->ui_flags, UI_ISMAPPED);
710 need_rele = 1;
711 }
712 vnode_unlock(vp);
713
714 if (need_rele) {
715 context.vc_proc = current_proc();
716 context.vc_ucred = kauth_cred_get();
717 (void)VNOP_MNOMAP(vp, &context);
718
719 vnode_rele(vp);
720 }
721 }
722 /*
723 * the drop of the vnode ref will cleanup
724 */
725 vnode_put(vp);
726 }
727
728 kern_return_t
729 ubc_page_op(
730 struct vnode *vp,
731 off_t f_offset,
732 int ops,
733 ppnum_t *phys_entryp,
734 int *flagsp)
735 {
736 memory_object_control_t control;
737
738 control = ubc_getobject(vp, UBC_FLAGS_NONE);
739 if (control == MEMORY_OBJECT_CONTROL_NULL)
740 return KERN_INVALID_ARGUMENT;
741
742 return (memory_object_page_op(control,
743 (memory_object_offset_t)f_offset,
744 ops,
745 phys_entryp,
746 flagsp));
747 }
748
749 __private_extern__ kern_return_t
750 ubc_page_op_with_control(
751 memory_object_control_t control,
752 off_t f_offset,
753 int ops,
754 ppnum_t *phys_entryp,
755 int *flagsp)
756 {
757 return (memory_object_page_op(control,
758 (memory_object_offset_t)f_offset,
759 ops,
760 phys_entryp,
761 flagsp));
762 }
763
764 kern_return_t
765 ubc_range_op(
766 struct vnode *vp,
767 off_t f_offset_beg,
768 off_t f_offset_end,
769 int ops,
770 int *range)
771 {
772 memory_object_control_t control;
773
774 control = ubc_getobject(vp, UBC_FLAGS_NONE);
775 if (control == MEMORY_OBJECT_CONTROL_NULL)
776 return KERN_INVALID_ARGUMENT;
777
778 return (memory_object_range_op(control,
779 (memory_object_offset_t)f_offset_beg,
780 (memory_object_offset_t)f_offset_end,
781 ops,
782 range));
783 }
784
785 kern_return_t
786 ubc_create_upl(
787 struct vnode *vp,
788 off_t f_offset,
789 long bufsize,
790 upl_t *uplp,
791 upl_page_info_t **plp,
792 int uplflags)
793 {
794 memory_object_control_t control;
795 int count;
796 int ubcflags;
797 kern_return_t kr;
798
799 if (bufsize & 0xfff)
800 return KERN_INVALID_ARGUMENT;
801
802 if (uplflags & UPL_FOR_PAGEOUT) {
803 uplflags &= ~UPL_FOR_PAGEOUT;
804 ubcflags = UBC_FOR_PAGEOUT;
805 } else
806 ubcflags = UBC_FLAGS_NONE;
807
808 control = ubc_getobject(vp, ubcflags);
809 if (control == MEMORY_OBJECT_CONTROL_NULL)
810 return KERN_INVALID_ARGUMENT;
811
812 if (uplflags & UPL_WILL_BE_DUMPED) {
813 uplflags &= ~UPL_WILL_BE_DUMPED;
814 uplflags |= (UPL_NO_SYNC|UPL_SET_INTERNAL);
815 } else
816 uplflags |= (UPL_NO_SYNC|UPL_CLEAN_IN_PLACE|UPL_SET_INTERNAL);
817 count = 0;
818 kr = memory_object_upl_request(control, f_offset, bufsize,
819 uplp, NULL, &count, uplflags);
820 if (plp != NULL)
821 *plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp);
822 return kr;
823 }
824
825
826 kern_return_t
827 ubc_upl_map(
828 upl_t upl,
829 vm_offset_t *dst_addr)
830 {
831 return (vm_upl_map(kernel_map, upl, dst_addr));
832 }
833
834
835 kern_return_t
836 ubc_upl_unmap(
837 upl_t upl)
838 {
839 return(vm_upl_unmap(kernel_map, upl));
840 }
841
842 kern_return_t
843 ubc_upl_commit(
844 upl_t upl)
845 {
846 upl_page_info_t *pl;
847 kern_return_t kr;
848
849 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
850 kr = upl_commit(upl, pl, MAX_UPL_TRANSFER);
851 upl_deallocate(upl);
852 return kr;
853 }
854
855
856 kern_return_t
857 ubc_upl_commit_range(
858 upl_t upl,
859 vm_offset_t offset,
860 vm_size_t size,
861 int flags)
862 {
863 upl_page_info_t *pl;
864 boolean_t empty;
865 kern_return_t kr;
866
867 if (flags & UPL_COMMIT_FREE_ON_EMPTY)
868 flags |= UPL_COMMIT_NOTIFY_EMPTY;
869
870 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
871
872 kr = upl_commit_range(upl, offset, size, flags,
873 pl, MAX_UPL_TRANSFER, &empty);
874
875 if((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty)
876 upl_deallocate(upl);
877
878 return kr;
879 }
880
881 kern_return_t
882 ubc_upl_abort_range(
883 upl_t upl,
884 vm_offset_t offset,
885 vm_size_t size,
886 int abort_flags)
887 {
888 kern_return_t kr;
889 boolean_t empty = FALSE;
890
891 if (abort_flags & UPL_ABORT_FREE_ON_EMPTY)
892 abort_flags |= UPL_ABORT_NOTIFY_EMPTY;
893
894 kr = upl_abort_range(upl, offset, size, abort_flags, &empty);
895
896 if((abort_flags & UPL_ABORT_FREE_ON_EMPTY) && empty)
897 upl_deallocate(upl);
898
899 return kr;
900 }
901
902 kern_return_t
903 ubc_upl_abort(
904 upl_t upl,
905 int abort_type)
906 {
907 kern_return_t kr;
908
909 kr = upl_abort(upl, abort_type);
910 upl_deallocate(upl);
911 return kr;
912 }
913
914 upl_page_info_t *
915 ubc_upl_pageinfo(
916 upl_t upl)
917 {
918 return (UPL_GET_INTERNAL_PAGE_LIST(upl));
919 }
920
921 /************* UBC APIS **************/
922
923 int
924 UBCINFOMISSING(struct vnode * vp)
925 {
926 return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo == UBC_INFO_NULL));
927 }
928
929 int
930 UBCINFORECLAIMED(struct vnode * vp)
931 {
932 return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo == UBC_INFO_NULL));
933 }
934
935
936 int
937 UBCINFOEXISTS(struct vnode * vp)
938 {
939 return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo != UBC_INFO_NULL));
940 }
941 int
942 UBCISVALID(struct vnode * vp)
943 {
944 return((vp) && ((vp)->v_type == VREG) && !((vp)->v_flag & VSYSTEM));
945 }
946 int
947 UBCINVALID(struct vnode * vp)
948 {
949 return(((vp) == NULL) || ((vp) && ((vp)->v_type != VREG))
950 || ((vp) && ((vp)->v_flag & VSYSTEM)));
951 }
952 int
953 UBCINFOCHECK(const char * fun, struct vnode * vp)
954 {
955 if ((vp) && ((vp)->v_type == VREG) &&
956 ((vp)->v_ubcinfo == UBC_INFO_NULL)) {
957 panic("%s: lost ubc_info", (fun));
958 return(1);
959 } else
960 return(0);
961 }
962