]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/ubc_subr.c
xnu-792.25.20.tar.gz
[apple/xnu.git] / bsd / kern / ubc_subr.c
1 /*
2 * Copyright (c) 1999-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * File: ubc_subr.c
24 * Author: Umesh Vaishampayan [umeshv@apple.com]
25 * 05-Aug-1999 umeshv Created.
26 *
27 * Functions related to Unified Buffer cache.
28 *
29 * Caller of UBC functions MUST have a valid reference on the vnode.
30 *
31 */
32
33 #undef DIAGNOSTIC
34 #define DIAGNOSTIC 1
35
36 #include <sys/types.h>
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/lock.h>
40 #include <sys/mman.h>
41 #include <sys/mount_internal.h>
42 #include <sys/vnode_internal.h>
43 #include <sys/ubc_internal.h>
44 #include <sys/ucred.h>
45 #include <sys/proc_internal.h>
46 #include <sys/kauth.h>
47 #include <sys/buf.h>
48 #include <sys/user.h>
49
50 #include <mach/mach_types.h>
51 #include <mach/memory_object_types.h>
52 #include <mach/memory_object_control.h>
53 #include <mach/vm_map.h>
54 #include <mach/upl.h>
55
56 #include <kern/kern_types.h>
57 #include <kern/zalloc.h>
58 #include <kern/thread.h>
59 #include <vm/vm_kern.h>
60 #include <vm/vm_protos.h> /* last */
61
62 #if DIAGNOSTIC
63 #if defined(assert)
64 #undef assert()
65 #endif
66 #define assert(cond) \
67 ((void) ((cond) ? 0 : panic("%s:%d (%s)", __FILE__, __LINE__, # cond)))
68 #else
69 #include <kern/assert.h>
70 #endif /* DIAGNOSTIC */
71
72 int ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize);
73 static int ubc_umcallback(vnode_t, void *);
74 int ubc_isinuse_locked(vnode_t, int, int);
75 static int ubc_msync_internal(vnode_t, off_t, off_t, off_t *, int, int *);
76
77 struct zone *ubc_info_zone;
78
79 /*
80 * Initialization of the zone for Unified Buffer Cache.
81 */
82 __private_extern__ void
83 ubc_init()
84 {
85 int i;
86
87 i = (vm_size_t) sizeof (struct ubc_info);
88 /* XXX the number of elements should be tied in to maxvnodes */
89 ubc_info_zone = zinit (i, 10000*i, 8192, "ubc_info zone");
90 return;
91 }
92
93 /*
94 * Initialize a ubc_info structure for a vnode.
95 */
96 int
97 ubc_info_init(struct vnode *vp)
98 {
99 return(ubc_info_init_internal(vp, 0, 0));
100 }
101 int
102 ubc_info_init_withsize(struct vnode *vp, off_t filesize)
103 {
104 return(ubc_info_init_internal(vp, 1, filesize));
105 }
106
107 int
108 ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize)
109 {
110 register struct ubc_info *uip;
111 void * pager;
112 struct proc *p = current_proc();
113 int error = 0;
114 kern_return_t kret;
115 memory_object_control_t control;
116
117 uip = vp->v_ubcinfo;
118
119 if (uip == UBC_INFO_NULL) {
120
121 uip = (struct ubc_info *) zalloc(ubc_info_zone);
122 bzero((char *)uip, sizeof(struct ubc_info));
123
124 uip->ui_vnode = vp;
125 uip->ui_flags = UI_INITED;
126 uip->ui_ucred = NOCRED;
127 }
128 #if DIAGNOSTIC
129 else
130 Debugger("ubc_info_init: already");
131 #endif /* DIAGNOSTIC */
132
133 assert(uip->ui_flags != UI_NONE);
134 assert(uip->ui_vnode == vp);
135
136 /* now set this ubc_info in the vnode */
137 vp->v_ubcinfo = uip;
138
139 pager = (void *)vnode_pager_setup(vp, uip->ui_pager);
140 assert(pager);
141
142 SET(uip->ui_flags, UI_HASPAGER);
143 uip->ui_pager = pager;
144
145 /*
146 * Note: We can not use VNOP_GETATTR() to get accurate
147 * value of ui_size. Thanks to NFS.
148 * nfs_getattr() can call vinvalbuf() and in this case
149 * ubc_info is not set up to deal with that.
150 * So use bogus size.
151 */
152
153 /*
154 * create a vnode - vm_object association
155 * memory_object_create_named() creates a "named" reference on the
156 * memory object we hold this reference as long as the vnode is
157 * "alive." Since memory_object_create_named() took its own reference
158 * on the vnode pager we passed it, we can drop the reference
159 * vnode_pager_setup() returned here.
160 */
161 kret = memory_object_create_named(pager,
162 (memory_object_size_t)uip->ui_size, &control);
163 vnode_pager_deallocate(pager);
164 if (kret != KERN_SUCCESS)
165 panic("ubc_info_init: memory_object_create_named returned %d", kret);
166
167 assert(control);
168 uip->ui_control = control; /* cache the value of the mo control */
169 SET(uip->ui_flags, UI_HASOBJREF); /* with a named reference */
170 #if 0
171 /* create a pager reference on the vnode */
172 error = vnode_pager_vget(vp);
173 if (error)
174 panic("ubc_info_init: vnode_pager_vget error = %d", error);
175 #endif
176 if (withfsize == 0) {
177 struct vfs_context context;
178 /* initialize the size */
179 context.vc_proc = p;
180 context.vc_ucred = kauth_cred_get();
181 error = vnode_size(vp, &uip->ui_size, &context);
182 if (error)
183 uip->ui_size = 0;
184 } else {
185 uip->ui_size = filesize;
186 }
187 vp->v_lflag |= VNAMED_UBC;
188
189 return (error);
190 }
191
192 /* Free the ubc_info */
193 static void
194 ubc_info_free(struct ubc_info *uip)
195 {
196 if (IS_VALID_CRED(uip->ui_ucred)) {
197 kauth_cred_unref(&uip->ui_ucred);
198 }
199
200 if (uip->ui_control != MEMORY_OBJECT_CONTROL_NULL)
201 memory_object_control_deallocate(uip->ui_control);
202
203 cluster_release(uip);
204
205 zfree(ubc_info_zone, (vm_offset_t)uip);
206 return;
207 }
208
209 void
210 ubc_info_deallocate(struct ubc_info *uip)
211 {
212 ubc_info_free(uip);
213 }
214
215 /*
216 * Communicate with VM the size change of the file
217 * returns 1 on success, 0 on failure
218 */
219 int
220 ubc_setsize(struct vnode *vp, off_t nsize)
221 {
222 off_t osize; /* ui_size before change */
223 off_t lastpg, olastpgend, lastoff;
224 struct ubc_info *uip;
225 memory_object_control_t control;
226 kern_return_t kret;
227
228 if (nsize < (off_t)0)
229 return (0);
230
231 if (!UBCINFOEXISTS(vp))
232 return (0);
233
234 uip = vp->v_ubcinfo;
235 osize = uip->ui_size; /* call ubc_getsize() ??? */
236 /* Update the size before flushing the VM */
237 uip->ui_size = nsize;
238
239 if (nsize >= osize) /* Nothing more to do */
240 return (1); /* return success */
241
242 /*
243 * When the file shrinks, invalidate the pages beyond the
244 * new size. Also get rid of garbage beyond nsize on the
245 * last page. The ui_size already has the nsize. This
246 * insures that the pageout would not write beyond the new
247 * end of the file.
248 */
249
250 lastpg = trunc_page_64(nsize);
251 olastpgend = round_page_64(osize);
252 control = uip->ui_control;
253 assert(control);
254 lastoff = (nsize & PAGE_MASK_64);
255
256 /*
257 * If length is multiple of page size, we should not flush
258 * invalidating is sufficient
259 */
260 if (!lastoff) {
261 /* invalidate last page and old contents beyond nsize */
262 kret = memory_object_lock_request(control,
263 (memory_object_offset_t)lastpg,
264 (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
265 MEMORY_OBJECT_RETURN_NONE, MEMORY_OBJECT_DATA_FLUSH,
266 VM_PROT_NO_CHANGE);
267 if (kret != KERN_SUCCESS)
268 printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
269
270 return ((kret == KERN_SUCCESS) ? 1 : 0);
271 }
272
273 /* flush the last page */
274 kret = memory_object_lock_request(control,
275 (memory_object_offset_t)lastpg,
276 PAGE_SIZE_64, NULL, NULL,
277 MEMORY_OBJECT_RETURN_DIRTY, FALSE,
278 VM_PROT_NO_CHANGE);
279
280 if (kret == KERN_SUCCESS) {
281 /* invalidate last page and old contents beyond nsize */
282 kret = memory_object_lock_request(control,
283 (memory_object_offset_t)lastpg,
284 (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
285 MEMORY_OBJECT_RETURN_NONE, MEMORY_OBJECT_DATA_FLUSH,
286 VM_PROT_NO_CHANGE);
287 if (kret != KERN_SUCCESS)
288 printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
289 } else
290 printf("ubc_setsize: flush failed (error = %d)\n", kret);
291
292 return ((kret == KERN_SUCCESS) ? 1 : 0);
293 }
294
295 /*
296 * Get the size of the file
297 */
298 off_t
299 ubc_getsize(struct vnode *vp)
300 {
301 /* people depend on the side effect of this working this way
302 * as they call this for directory
303 */
304 if (!UBCINFOEXISTS(vp))
305 return ((off_t)0);
306 return (vp->v_ubcinfo->ui_size);
307 }
308
309 /*
310 * call ubc_sync_range(vp, 0, EOF, UBC_PUSHALL) on all the vnodes
311 * for this mount point.
312 * returns 1 on success, 0 on failure
313 */
314
315 __private_extern__ int
316 ubc_umount(struct mount *mp)
317 {
318 vnode_iterate(mp, 0, ubc_umcallback, 0);
319 return(0);
320 }
321
322 static int
323 ubc_umcallback(vnode_t vp, __unused void * args)
324 {
325
326 if (UBCINFOEXISTS(vp)) {
327
328 cluster_push(vp, 0);
329
330 (void) ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL);
331 }
332 return (VNODE_RETURNED);
333 }
334
335
336
337 /* Get the credentials */
338 kauth_cred_t
339 ubc_getcred(struct vnode *vp)
340 {
341 if (UBCINFOEXISTS(vp))
342 return (vp->v_ubcinfo->ui_ucred);
343
344 return (NOCRED);
345 }
346
347 int
348 ubc_setthreadcred(struct vnode *vp, struct proc *p, thread_t thread)
349 {
350 struct ubc_info *uip;
351 kauth_cred_t credp;
352 struct uthread *uthread = get_bsdthread_info(thread);
353
354 if (!UBCINFOEXISTS(vp))
355 return (1);
356
357 vnode_lock(vp);
358
359 uip = vp->v_ubcinfo;
360 credp = uip->ui_ucred;
361
362 if (!IS_VALID_CRED(credp)) {
363 /* use per-thread cred, if assumed identity, else proc cred */
364 if (uthread == NULL || (uthread->uu_flag & UT_SETUID) == 0) {
365 uip->ui_ucred = kauth_cred_proc_ref(p);
366 } else {
367 uip->ui_ucred = uthread->uu_ucred;
368 kauth_cred_ref(uip->ui_ucred);
369 }
370 }
371 vnode_unlock(vp);
372
373 return (0);
374 }
375
376 /*
377 * Set the credentials
378 * existing credentials are not changed
379 * returns 1 on success and 0 on failure
380 */
381 int
382 ubc_setcred(struct vnode *vp, struct proc *p)
383 {
384 struct ubc_info *uip;
385 kauth_cred_t credp;
386
387 if ( !UBCINFOEXISTS(vp))
388 return (0);
389
390 vnode_lock(vp);
391
392 uip = vp->v_ubcinfo;
393 credp = uip->ui_ucred;
394
395 if (!IS_VALID_CRED(credp)) {
396 uip->ui_ucred = kauth_cred_proc_ref(p);
397 }
398 vnode_unlock(vp);
399
400 return (1);
401 }
402
403 /* Get the pager */
404 __private_extern__ memory_object_t
405 ubc_getpager(struct vnode *vp)
406 {
407 if (UBCINFOEXISTS(vp))
408 return (vp->v_ubcinfo->ui_pager);
409
410 return (0);
411 }
412
413 /*
414 * Get the memory object associated with this vnode
415 * If the vnode was reactivated, memory object would not exist.
416 * Unless "do not rectivate" was specified, look it up using the pager.
417 * If hold was requested create an object reference of one does not
418 * exist already.
419 */
420
421 memory_object_control_t
422 ubc_getobject(struct vnode *vp, __unused int flags)
423 {
424 if (UBCINFOEXISTS(vp))
425 return((vp->v_ubcinfo->ui_control));
426
427 return (0);
428 }
429
430
431 off_t
432 ubc_blktooff(vnode_t vp, daddr64_t blkno)
433 {
434 off_t file_offset;
435 int error;
436
437 if (UBCINVALID(vp))
438 return ((off_t)-1);
439
440 error = VNOP_BLKTOOFF(vp, blkno, &file_offset);
441 if (error)
442 file_offset = -1;
443
444 return (file_offset);
445 }
446
447 daddr64_t
448 ubc_offtoblk(vnode_t vp, off_t offset)
449 {
450 daddr64_t blkno;
451 int error = 0;
452
453 if (UBCINVALID(vp))
454 return ((daddr64_t)-1);
455
456 error = VNOP_OFFTOBLK(vp, offset, &blkno);
457 if (error)
458 blkno = -1;
459
460 return (blkno);
461 }
462
463 int
464 ubc_pages_resident(vnode_t vp)
465 {
466 kern_return_t kret;
467 boolean_t has_pages_resident;
468
469 if ( !UBCINFOEXISTS(vp))
470 return (0);
471
472 kret = memory_object_pages_resident(vp->v_ubcinfo->ui_control, &has_pages_resident);
473
474 if (kret != KERN_SUCCESS)
475 return (0);
476
477 if (has_pages_resident == TRUE)
478 return (1);
479
480 return (0);
481 }
482
483
484
485 /*
486 * This interface will eventually be deprecated
487 *
488 * clean and/or invalidate a range in the memory object that backs this
489 * vnode. The start offset is truncated to the page boundary and the
490 * size is adjusted to include the last page in the range.
491 *
492 * returns 1 for success, 0 for failure
493 */
494 int
495 ubc_sync_range(vnode_t vp, off_t beg_off, off_t end_off, int flags)
496 {
497 return (ubc_msync_internal(vp, beg_off, end_off, NULL, flags, NULL));
498 }
499
500
501 /*
502 * clean and/or invalidate a range in the memory object that backs this
503 * vnode. The start offset is truncated to the page boundary and the
504 * size is adjusted to include the last page in the range.
505 * if a
506 */
507 errno_t
508 ubc_msync(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags)
509 {
510 int retval;
511 int io_errno = 0;
512
513 if (resid_off)
514 *resid_off = beg_off;
515
516 retval = ubc_msync_internal(vp, beg_off, end_off, resid_off, flags, &io_errno);
517
518 if (retval == 0 && io_errno == 0)
519 return (EINVAL);
520 return (io_errno);
521 }
522
523
524
525 /*
526 * clean and/or invalidate a range in the memory object that backs this
527 * vnode. The start offset is truncated to the page boundary and the
528 * size is adjusted to include the last page in the range.
529 */
530 static int
531 ubc_msync_internal(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags, int *io_errno)
532 {
533 memory_object_size_t tsize;
534 kern_return_t kret;
535 int request_flags = 0;
536 int flush_flags = MEMORY_OBJECT_RETURN_NONE;
537
538 if ( !UBCINFOEXISTS(vp))
539 return (0);
540 if (end_off <= beg_off)
541 return (0);
542 if ((flags & (UBC_INVALIDATE | UBC_PUSHDIRTY | UBC_PUSHALL)) == 0)
543 return (0);
544
545 if (flags & UBC_INVALIDATE)
546 /*
547 * discard the resident pages
548 */
549 request_flags = (MEMORY_OBJECT_DATA_FLUSH | MEMORY_OBJECT_DATA_NO_CHANGE);
550
551 if (flags & UBC_SYNC)
552 /*
553 * wait for all the I/O to complete before returning
554 */
555 request_flags |= MEMORY_OBJECT_IO_SYNC;
556
557 if (flags & UBC_PUSHDIRTY)
558 /*
559 * we only return the dirty pages in the range
560 */
561 flush_flags = MEMORY_OBJECT_RETURN_DIRTY;
562
563 if (flags & UBC_PUSHALL)
564 /*
565 * then return all the interesting pages in the range (both dirty and precious)
566 * to the pager
567 */
568 flush_flags = MEMORY_OBJECT_RETURN_ALL;
569
570 beg_off = trunc_page_64(beg_off);
571 end_off = round_page_64(end_off);
572 tsize = (memory_object_size_t)end_off - beg_off;
573
574 /* flush and/or invalidate pages in the range requested */
575 kret = memory_object_lock_request(vp->v_ubcinfo->ui_control,
576 beg_off, tsize, resid_off, io_errno,
577 flush_flags, request_flags, VM_PROT_NO_CHANGE);
578
579 return ((kret == KERN_SUCCESS) ? 1 : 0);
580 }
581
582
583 /*
584 * The vnode is mapped explicitly, mark it so.
585 */
586 __private_extern__ int
587 ubc_map(vnode_t vp, int flags)
588 {
589 struct ubc_info *uip;
590 int error = 0;
591 int need_ref = 0;
592 struct vfs_context context;
593
594 if (vnode_getwithref(vp))
595 return (0);
596
597 if (UBCINFOEXISTS(vp)) {
598 context.vc_proc = current_proc();
599 context.vc_ucred = kauth_cred_get();
600
601 error = VNOP_MMAP(vp, flags, &context);
602
603 if (error != EPERM)
604 error = 0;
605
606 if (error == 0) {
607 vnode_lock(vp);
608
609 uip = vp->v_ubcinfo;
610
611 if ( !ISSET(uip->ui_flags, UI_ISMAPPED))
612 need_ref = 1;
613 SET(uip->ui_flags, (UI_WASMAPPED | UI_ISMAPPED));
614
615 vnode_unlock(vp);
616
617 if (need_ref)
618 vnode_ref(vp);
619 }
620 }
621 vnode_put(vp);
622
623 return (error);
624 }
625
626 /*
627 * destroy the named reference for a given vnode
628 */
629 __private_extern__ int
630 ubc_destroy_named(struct vnode *vp)
631 {
632 memory_object_control_t control;
633 struct ubc_info *uip;
634 kern_return_t kret;
635
636 /*
637 * We may already have had the object terminated
638 * and the ubcinfo released as a side effect of
639 * some earlier processing. If so, pretend we did
640 * it, because it probably was a result of our
641 * efforts.
642 */
643 if (!UBCINFOEXISTS(vp))
644 return (1);
645
646 uip = vp->v_ubcinfo;
647
648 /*
649 * Terminate the memory object.
650 * memory_object_destroy() will result in
651 * vnode_pager_no_senders().
652 * That will release the pager reference
653 * and the vnode will move to the free list.
654 */
655 control = ubc_getobject(vp, UBC_HOLDOBJECT);
656 if (control != MEMORY_OBJECT_CONTROL_NULL) {
657
658 /*
659 * XXXXX - should we hold the vnode lock here?
660 */
661 if (ISSET(vp->v_flag, VTERMINATE))
662 panic("ubc_destroy_named: already teminating");
663 SET(vp->v_flag, VTERMINATE);
664
665 kret = memory_object_destroy(control, 0);
666 if (kret != KERN_SUCCESS)
667 return (0);
668
669 /*
670 * memory_object_destroy() is asynchronous
671 * with respect to vnode_pager_no_senders().
672 * wait for vnode_pager_no_senders() to clear
673 * VTERMINATE
674 */
675 vnode_lock(vp);
676 while (ISSET(vp->v_lflag, VNAMED_UBC)) {
677 (void)msleep((caddr_t)&vp->v_lflag, &vp->v_lock,
678 PINOD, "ubc_destroy_named", 0);
679 }
680 vnode_unlock(vp);
681 }
682 return (1);
683 }
684
685
686 /*
687 * Find out whether a vnode is in use by UBC
688 * Returns 1 if file is in use by UBC, 0 if not
689 */
690 int
691 ubc_isinuse(struct vnode *vp, int busycount)
692 {
693 if ( !UBCINFOEXISTS(vp))
694 return (0);
695 return(ubc_isinuse_locked(vp, busycount, 0));
696 }
697
698
699 int
700 ubc_isinuse_locked(struct vnode *vp, int busycount, int locked)
701 {
702 int retval = 0;
703
704
705 if (!locked)
706 vnode_lock(vp);
707
708 if ((vp->v_usecount - vp->v_kusecount) > busycount)
709 retval = 1;
710
711 if (!locked)
712 vnode_unlock(vp);
713 return (retval);
714 }
715
716
717 /*
718 * MUST only be called by the VM
719 */
720 __private_extern__ void
721 ubc_unmap(struct vnode *vp)
722 {
723 struct vfs_context context;
724 struct ubc_info *uip;
725 int need_rele = 0;
726
727 if (vnode_getwithref(vp))
728 return;
729
730 if (UBCINFOEXISTS(vp)) {
731 vnode_lock(vp);
732
733 uip = vp->v_ubcinfo;
734 if (ISSET(uip->ui_flags, UI_ISMAPPED)) {
735 CLR(uip->ui_flags, UI_ISMAPPED);
736 need_rele = 1;
737 }
738 vnode_unlock(vp);
739
740 if (need_rele) {
741 context.vc_proc = current_proc();
742 context.vc_ucred = kauth_cred_get();
743 (void)VNOP_MNOMAP(vp, &context);
744
745 vnode_rele(vp);
746 }
747 }
748 /*
749 * the drop of the vnode ref will cleanup
750 */
751 vnode_put(vp);
752 }
753
754 kern_return_t
755 ubc_page_op(
756 struct vnode *vp,
757 off_t f_offset,
758 int ops,
759 ppnum_t *phys_entryp,
760 int *flagsp)
761 {
762 memory_object_control_t control;
763
764 control = ubc_getobject(vp, UBC_FLAGS_NONE);
765 if (control == MEMORY_OBJECT_CONTROL_NULL)
766 return KERN_INVALID_ARGUMENT;
767
768 return (memory_object_page_op(control,
769 (memory_object_offset_t)f_offset,
770 ops,
771 phys_entryp,
772 flagsp));
773 }
774
775 __private_extern__ kern_return_t
776 ubc_page_op_with_control(
777 memory_object_control_t control,
778 off_t f_offset,
779 int ops,
780 ppnum_t *phys_entryp,
781 int *flagsp)
782 {
783 return (memory_object_page_op(control,
784 (memory_object_offset_t)f_offset,
785 ops,
786 phys_entryp,
787 flagsp));
788 }
789
790 kern_return_t
791 ubc_range_op(
792 struct vnode *vp,
793 off_t f_offset_beg,
794 off_t f_offset_end,
795 int ops,
796 int *range)
797 {
798 memory_object_control_t control;
799
800 control = ubc_getobject(vp, UBC_FLAGS_NONE);
801 if (control == MEMORY_OBJECT_CONTROL_NULL)
802 return KERN_INVALID_ARGUMENT;
803
804 return (memory_object_range_op(control,
805 (memory_object_offset_t)f_offset_beg,
806 (memory_object_offset_t)f_offset_end,
807 ops,
808 range));
809 }
810
811 kern_return_t
812 ubc_create_upl(
813 struct vnode *vp,
814 off_t f_offset,
815 long bufsize,
816 upl_t *uplp,
817 upl_page_info_t **plp,
818 int uplflags)
819 {
820 memory_object_control_t control;
821 int count;
822 int ubcflags;
823 kern_return_t kr;
824
825 if (bufsize & 0xfff)
826 return KERN_INVALID_ARGUMENT;
827
828 if (uplflags & UPL_FOR_PAGEOUT) {
829 uplflags &= ~UPL_FOR_PAGEOUT;
830 ubcflags = UBC_FOR_PAGEOUT;
831 } else
832 ubcflags = UBC_FLAGS_NONE;
833
834 control = ubc_getobject(vp, ubcflags);
835 if (control == MEMORY_OBJECT_CONTROL_NULL)
836 return KERN_INVALID_ARGUMENT;
837
838 if (uplflags & UPL_WILL_BE_DUMPED) {
839 uplflags &= ~UPL_WILL_BE_DUMPED;
840 uplflags |= (UPL_NO_SYNC|UPL_SET_INTERNAL);
841 } else
842 uplflags |= (UPL_NO_SYNC|UPL_CLEAN_IN_PLACE|UPL_SET_INTERNAL);
843 count = 0;
844 kr = memory_object_upl_request(control, f_offset, bufsize,
845 uplp, NULL, &count, uplflags);
846 if (plp != NULL)
847 *plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp);
848 return kr;
849 }
850
851
852 kern_return_t
853 ubc_upl_map(
854 upl_t upl,
855 vm_offset_t *dst_addr)
856 {
857 return (vm_upl_map(kernel_map, upl, dst_addr));
858 }
859
860
861 kern_return_t
862 ubc_upl_unmap(
863 upl_t upl)
864 {
865 return(vm_upl_unmap(kernel_map, upl));
866 }
867
868 kern_return_t
869 ubc_upl_commit(
870 upl_t upl)
871 {
872 upl_page_info_t *pl;
873 kern_return_t kr;
874
875 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
876 kr = upl_commit(upl, pl, MAX_UPL_TRANSFER);
877 upl_deallocate(upl);
878 return kr;
879 }
880
881
882 kern_return_t
883 ubc_upl_commit_range(
884 upl_t upl,
885 vm_offset_t offset,
886 vm_size_t size,
887 int flags)
888 {
889 upl_page_info_t *pl;
890 boolean_t empty;
891 kern_return_t kr;
892
893 if (flags & UPL_COMMIT_FREE_ON_EMPTY)
894 flags |= UPL_COMMIT_NOTIFY_EMPTY;
895
896 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
897
898 kr = upl_commit_range(upl, offset, size, flags,
899 pl, MAX_UPL_TRANSFER, &empty);
900
901 if((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty)
902 upl_deallocate(upl);
903
904 return kr;
905 }
906
907 kern_return_t
908 ubc_upl_abort_range(
909 upl_t upl,
910 vm_offset_t offset,
911 vm_size_t size,
912 int abort_flags)
913 {
914 kern_return_t kr;
915 boolean_t empty = FALSE;
916
917 if (abort_flags & UPL_ABORT_FREE_ON_EMPTY)
918 abort_flags |= UPL_ABORT_NOTIFY_EMPTY;
919
920 kr = upl_abort_range(upl, offset, size, abort_flags, &empty);
921
922 if((abort_flags & UPL_ABORT_FREE_ON_EMPTY) && empty)
923 upl_deallocate(upl);
924
925 return kr;
926 }
927
928 kern_return_t
929 ubc_upl_abort(
930 upl_t upl,
931 int abort_type)
932 {
933 kern_return_t kr;
934
935 kr = upl_abort(upl, abort_type);
936 upl_deallocate(upl);
937 return kr;
938 }
939
940 upl_page_info_t *
941 ubc_upl_pageinfo(
942 upl_t upl)
943 {
944 return (UPL_GET_INTERNAL_PAGE_LIST(upl));
945 }
946
947 /************* UBC APIS **************/
948
949 int
950 UBCINFOMISSING(struct vnode * vp)
951 {
952 return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo == UBC_INFO_NULL));
953 }
954
955 int
956 UBCINFORECLAIMED(struct vnode * vp)
957 {
958 return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo == UBC_INFO_NULL));
959 }
960
961
962 int
963 UBCINFOEXISTS(struct vnode * vp)
964 {
965 return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo != UBC_INFO_NULL));
966 }
967 int
968 UBCISVALID(struct vnode * vp)
969 {
970 return((vp) && ((vp)->v_type == VREG) && !((vp)->v_flag & VSYSTEM));
971 }
972 int
973 UBCINVALID(struct vnode * vp)
974 {
975 return(((vp) == NULL) || ((vp) && ((vp)->v_type != VREG))
976 || ((vp) && ((vp)->v_flag & VSYSTEM)));
977 }
978 int
979 UBCINFOCHECK(const char * fun, struct vnode * vp)
980 {
981 if ((vp) && ((vp)->v_type == VREG) &&
982 ((vp)->v_ubcinfo == UBC_INFO_NULL)) {
983 panic("%s: lost ubc_info", (fun));
984 return(1);
985 } else
986 return(0);
987 }
988