]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/ubc_subr.c
xnu-792.10.96.tar.gz
[apple/xnu.git] / bsd / kern / ubc_subr.c
1 /*
2 * Copyright (c) 1999-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * File: ubc_subr.c
24 * Author: Umesh Vaishampayan [umeshv@apple.com]
25 * 05-Aug-1999 umeshv Created.
26 *
27 * Functions related to Unified Buffer cache.
28 *
29 * Caller of UBC functions MUST have a valid reference on the vnode.
30 *
31 */
32
33 #undef DIAGNOSTIC
34 #define DIAGNOSTIC 1
35
36 #include <sys/types.h>
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/lock.h>
40 #include <sys/mman.h>
41 #include <sys/mount_internal.h>
42 #include <sys/vnode_internal.h>
43 #include <sys/ubc_internal.h>
44 #include <sys/ucred.h>
45 #include <sys/proc_internal.h>
46 #include <sys/kauth.h>
47 #include <sys/buf.h>
48 #include <sys/user.h>
49
50 #include <mach/mach_types.h>
51 #include <mach/memory_object_types.h>
52 #include <mach/memory_object_control.h>
53 #include <mach/vm_map.h>
54 #include <mach/upl.h>
55
56 #include <kern/kern_types.h>
57 #include <kern/zalloc.h>
58 #include <kern/thread.h>
59 #include <vm/vm_kern.h>
60 #include <vm/vm_protos.h> /* last */
61
62 #if DIAGNOSTIC
63 #if defined(assert)
64 #undef assert()
65 #endif
66 #define assert(cond) \
67 ((void) ((cond) ? 0 : panic("%s:%d (%s)", __FILE__, __LINE__, # cond)))
68 #else
69 #include <kern/assert.h>
70 #endif /* DIAGNOSTIC */
71
72 int ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize);
73 static int ubc_umcallback(vnode_t, void *);
74 int ubc_isinuse_locked(vnode_t, int, int);
75 static int ubc_msync_internal(vnode_t, off_t, off_t, off_t *, int, int *);
76
77 struct zone *ubc_info_zone;
78
79 /*
80 * Initialization of the zone for Unified Buffer Cache.
81 */
82 __private_extern__ void
83 ubc_init()
84 {
85 int i;
86
87 i = (vm_size_t) sizeof (struct ubc_info);
88 /* XXX the number of elements should be tied in to maxvnodes */
89 ubc_info_zone = zinit (i, 10000*i, 8192, "ubc_info zone");
90 return;
91 }
92
93 /*
94 * Initialize a ubc_info structure for a vnode.
95 */
96 int
97 ubc_info_init(struct vnode *vp)
98 {
99 return(ubc_info_init_internal(vp, 0, 0));
100 }
101 int
102 ubc_info_init_withsize(struct vnode *vp, off_t filesize)
103 {
104 return(ubc_info_init_internal(vp, 1, filesize));
105 }
106
107 int
108 ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize)
109 {
110 register struct ubc_info *uip;
111 void * pager;
112 struct proc *p = current_proc();
113 int error = 0;
114 kern_return_t kret;
115 memory_object_control_t control;
116
117 uip = vp->v_ubcinfo;
118
119 if (uip == UBC_INFO_NULL) {
120
121 uip = (struct ubc_info *) zalloc(ubc_info_zone);
122 bzero((char *)uip, sizeof(struct ubc_info));
123
124 uip->ui_vnode = vp;
125 uip->ui_flags = UI_INITED;
126 uip->ui_ucred = NOCRED;
127 }
128 #if DIAGNOSTIC
129 else
130 Debugger("ubc_info_init: already");
131 #endif /* DIAGNOSTIC */
132
133 assert(uip->ui_flags != UI_NONE);
134 assert(uip->ui_vnode == vp);
135
136 /* now set this ubc_info in the vnode */
137 vp->v_ubcinfo = uip;
138
139 pager = (void *)vnode_pager_setup(vp, uip->ui_pager);
140 assert(pager);
141
142 SET(uip->ui_flags, UI_HASPAGER);
143 uip->ui_pager = pager;
144
145 /*
146 * Note: We can not use VNOP_GETATTR() to get accurate
147 * value of ui_size. Thanks to NFS.
148 * nfs_getattr() can call vinvalbuf() and in this case
149 * ubc_info is not set up to deal with that.
150 * So use bogus size.
151 */
152
153 /*
154 * create a vnode - vm_object association
155 * memory_object_create_named() creates a "named" reference on the
156 * memory object we hold this reference as long as the vnode is
157 * "alive." Since memory_object_create_named() took its own reference
158 * on the vnode pager we passed it, we can drop the reference
159 * vnode_pager_setup() returned here.
160 */
161 kret = memory_object_create_named(pager,
162 (memory_object_size_t)uip->ui_size, &control);
163 vnode_pager_deallocate(pager);
164 if (kret != KERN_SUCCESS)
165 panic("ubc_info_init: memory_object_create_named returned %d", kret);
166
167 assert(control);
168 uip->ui_control = control; /* cache the value of the mo control */
169 SET(uip->ui_flags, UI_HASOBJREF); /* with a named reference */
170 #if 0
171 /* create a pager reference on the vnode */
172 error = vnode_pager_vget(vp);
173 if (error)
174 panic("ubc_info_init: vnode_pager_vget error = %d", error);
175 #endif
176 if (withfsize == 0) {
177 struct vfs_context context;
178 /* initialize the size */
179 context.vc_proc = p;
180 context.vc_ucred = kauth_cred_get();
181 error = vnode_size(vp, &uip->ui_size, &context);
182 if (error)
183 uip->ui_size = 0;
184 } else {
185 uip->ui_size = filesize;
186 }
187 vp->v_lflag |= VNAMED_UBC;
188
189 return (error);
190 }
191
192 /* Free the ubc_info */
193 static void
194 ubc_info_free(struct ubc_info *uip)
195 {
196 kauth_cred_t credp;
197
198 credp = uip->ui_ucred;
199 if (credp != NOCRED) {
200 uip->ui_ucred = NOCRED;
201 kauth_cred_rele(credp);
202 }
203
204 if (uip->ui_control != MEMORY_OBJECT_CONTROL_NULL)
205 memory_object_control_deallocate(uip->ui_control);
206
207 cluster_release(uip);
208
209 zfree(ubc_info_zone, (vm_offset_t)uip);
210 return;
211 }
212
213 void
214 ubc_info_deallocate(struct ubc_info *uip)
215 {
216 ubc_info_free(uip);
217 }
218
219 /*
220 * Communicate with VM the size change of the file
221 * returns 1 on success, 0 on failure
222 */
223 int
224 ubc_setsize(struct vnode *vp, off_t nsize)
225 {
226 off_t osize; /* ui_size before change */
227 off_t lastpg, olastpgend, lastoff;
228 struct ubc_info *uip;
229 memory_object_control_t control;
230 kern_return_t kret;
231
232 if (nsize < (off_t)0)
233 return (0);
234
235 if (!UBCINFOEXISTS(vp))
236 return (0);
237
238 uip = vp->v_ubcinfo;
239 osize = uip->ui_size; /* call ubc_getsize() ??? */
240 /* Update the size before flushing the VM */
241 uip->ui_size = nsize;
242
243 if (nsize >= osize) /* Nothing more to do */
244 return (1); /* return success */
245
246 /*
247 * When the file shrinks, invalidate the pages beyond the
248 * new size. Also get rid of garbage beyond nsize on the
249 * last page. The ui_size already has the nsize. This
250 * insures that the pageout would not write beyond the new
251 * end of the file.
252 */
253
254 lastpg = trunc_page_64(nsize);
255 olastpgend = round_page_64(osize);
256 control = uip->ui_control;
257 assert(control);
258 lastoff = (nsize & PAGE_MASK_64);
259
260 /*
261 * If length is multiple of page size, we should not flush
262 * invalidating is sufficient
263 */
264 if (!lastoff) {
265 /* invalidate last page and old contents beyond nsize */
266 kret = memory_object_lock_request(control,
267 (memory_object_offset_t)lastpg,
268 (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
269 MEMORY_OBJECT_RETURN_NONE, MEMORY_OBJECT_DATA_FLUSH,
270 VM_PROT_NO_CHANGE);
271 if (kret != KERN_SUCCESS)
272 printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
273
274 return ((kret == KERN_SUCCESS) ? 1 : 0);
275 }
276
277 /* flush the last page */
278 kret = memory_object_lock_request(control,
279 (memory_object_offset_t)lastpg,
280 PAGE_SIZE_64, NULL, NULL,
281 MEMORY_OBJECT_RETURN_DIRTY, FALSE,
282 VM_PROT_NO_CHANGE);
283
284 if (kret == KERN_SUCCESS) {
285 /* invalidate last page and old contents beyond nsize */
286 kret = memory_object_lock_request(control,
287 (memory_object_offset_t)lastpg,
288 (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
289 MEMORY_OBJECT_RETURN_NONE, MEMORY_OBJECT_DATA_FLUSH,
290 VM_PROT_NO_CHANGE);
291 if (kret != KERN_SUCCESS)
292 printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
293 } else
294 printf("ubc_setsize: flush failed (error = %d)\n", kret);
295
296 return ((kret == KERN_SUCCESS) ? 1 : 0);
297 }
298
299 /*
300 * Get the size of the file
301 */
302 off_t
303 ubc_getsize(struct vnode *vp)
304 {
305 /* people depend on the side effect of this working this way
306 * as they call this for directory
307 */
308 if (!UBCINFOEXISTS(vp))
309 return ((off_t)0);
310 return (vp->v_ubcinfo->ui_size);
311 }
312
313 /*
314 * call ubc_sync_range(vp, 0, EOF, UBC_PUSHALL) on all the vnodes
315 * for this mount point.
316 * returns 1 on success, 0 on failure
317 */
318
319 __private_extern__ int
320 ubc_umount(struct mount *mp)
321 {
322 vnode_iterate(mp, 0, ubc_umcallback, 0);
323 return(0);
324 }
325
326 static int
327 ubc_umcallback(vnode_t vp, __unused void * args)
328 {
329
330 if (UBCINFOEXISTS(vp)) {
331
332 cluster_push(vp, 0);
333
334 (void) ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL);
335 }
336 return (VNODE_RETURNED);
337 }
338
339
340
341 /* Get the credentials */
342 kauth_cred_t
343 ubc_getcred(struct vnode *vp)
344 {
345 if (UBCINFOEXISTS(vp))
346 return (vp->v_ubcinfo->ui_ucred);
347
348 return (NOCRED);
349 }
350
351 int
352 ubc_setthreadcred(struct vnode *vp, struct proc *p, thread_t thread)
353 {
354 struct ubc_info *uip;
355 kauth_cred_t credp;
356 struct uthread *uthread = get_bsdthread_info(thread);
357
358 if (!UBCINFOEXISTS(vp))
359 return (1);
360
361 vnode_lock(vp);
362
363 uip = vp->v_ubcinfo;
364 credp = uip->ui_ucred;
365
366 if (credp == NOCRED) {
367 /* use per-thread cred, if assumed identity, else proc cred */
368 if (uthread == NULL || (uthread->uu_flag & UT_SETUID) == 0) {
369 uip->ui_ucred = kauth_cred_proc_ref(p);
370 } else {
371 uip->ui_ucred = uthread->uu_ucred;
372 kauth_cred_ref(uip->ui_ucred);
373 }
374 }
375 vnode_unlock(vp);
376
377 return (0);
378 }
379
380 /*
381 * Set the credentials
382 * existing credentials are not changed
383 * returns 1 on success and 0 on failure
384 */
385 int
386 ubc_setcred(struct vnode *vp, struct proc *p)
387 {
388 struct ubc_info *uip;
389 kauth_cred_t credp;
390
391 if ( !UBCINFOEXISTS(vp))
392 return (0);
393
394 vnode_lock(vp);
395
396 uip = vp->v_ubcinfo;
397 credp = uip->ui_ucred;
398
399 if (credp == NOCRED) {
400 uip->ui_ucred = kauth_cred_proc_ref(p);
401 }
402 vnode_unlock(vp);
403
404 return (1);
405 }
406
407 /* Get the pager */
408 __private_extern__ memory_object_t
409 ubc_getpager(struct vnode *vp)
410 {
411 if (UBCINFOEXISTS(vp))
412 return (vp->v_ubcinfo->ui_pager);
413
414 return (0);
415 }
416
417 /*
418 * Get the memory object associated with this vnode
419 * If the vnode was reactivated, memory object would not exist.
420 * Unless "do not rectivate" was specified, look it up using the pager.
421 * If hold was requested create an object reference of one does not
422 * exist already.
423 */
424
425 memory_object_control_t
426 ubc_getobject(struct vnode *vp, __unused int flags)
427 {
428 if (UBCINFOEXISTS(vp))
429 return((vp->v_ubcinfo->ui_control));
430
431 return (0);
432 }
433
434
435 off_t
436 ubc_blktooff(vnode_t vp, daddr64_t blkno)
437 {
438 off_t file_offset;
439 int error;
440
441 if (UBCINVALID(vp))
442 return ((off_t)-1);
443
444 error = VNOP_BLKTOOFF(vp, blkno, &file_offset);
445 if (error)
446 file_offset = -1;
447
448 return (file_offset);
449 }
450
451 daddr64_t
452 ubc_offtoblk(vnode_t vp, off_t offset)
453 {
454 daddr64_t blkno;
455 int error = 0;
456
457 if (UBCINVALID(vp))
458 return ((daddr64_t)-1);
459
460 error = VNOP_OFFTOBLK(vp, offset, &blkno);
461 if (error)
462 blkno = -1;
463
464 return (blkno);
465 }
466
467 int
468 ubc_pages_resident(vnode_t vp)
469 {
470 kern_return_t kret;
471 boolean_t has_pages_resident;
472
473 if ( !UBCINFOEXISTS(vp))
474 return (0);
475
476 kret = memory_object_pages_resident(vp->v_ubcinfo->ui_control, &has_pages_resident);
477
478 if (kret != KERN_SUCCESS)
479 return (0);
480
481 if (has_pages_resident == TRUE)
482 return (1);
483
484 return (0);
485 }
486
487
488
489 /*
490 * This interface will eventually be deprecated
491 *
492 * clean and/or invalidate a range in the memory object that backs this
493 * vnode. The start offset is truncated to the page boundary and the
494 * size is adjusted to include the last page in the range.
495 *
496 * returns 1 for success, 0 for failure
497 */
498 int
499 ubc_sync_range(vnode_t vp, off_t beg_off, off_t end_off, int flags)
500 {
501 return (ubc_msync_internal(vp, beg_off, end_off, NULL, flags, NULL));
502 }
503
504
505 /*
506 * clean and/or invalidate a range in the memory object that backs this
507 * vnode. The start offset is truncated to the page boundary and the
508 * size is adjusted to include the last page in the range.
509 * if a
510 */
511 errno_t
512 ubc_msync(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags)
513 {
514 int retval;
515 int io_errno = 0;
516
517 if (resid_off)
518 *resid_off = beg_off;
519
520 retval = ubc_msync_internal(vp, beg_off, end_off, resid_off, flags, &io_errno);
521
522 if (retval == 0 && io_errno == 0)
523 return (EINVAL);
524 return (io_errno);
525 }
526
527
528
529 /*
530 * clean and/or invalidate a range in the memory object that backs this
531 * vnode. The start offset is truncated to the page boundary and the
532 * size is adjusted to include the last page in the range.
533 */
534 static int
535 ubc_msync_internal(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags, int *io_errno)
536 {
537 memory_object_size_t tsize;
538 kern_return_t kret;
539 int request_flags = 0;
540 int flush_flags = MEMORY_OBJECT_RETURN_NONE;
541
542 if ( !UBCINFOEXISTS(vp))
543 return (0);
544 if (end_off <= beg_off)
545 return (0);
546 if ((flags & (UBC_INVALIDATE | UBC_PUSHDIRTY | UBC_PUSHALL)) == 0)
547 return (0);
548
549 if (flags & UBC_INVALIDATE)
550 /*
551 * discard the resident pages
552 */
553 request_flags = (MEMORY_OBJECT_DATA_FLUSH | MEMORY_OBJECT_DATA_NO_CHANGE);
554
555 if (flags & UBC_SYNC)
556 /*
557 * wait for all the I/O to complete before returning
558 */
559 request_flags |= MEMORY_OBJECT_IO_SYNC;
560
561 if (flags & UBC_PUSHDIRTY)
562 /*
563 * we only return the dirty pages in the range
564 */
565 flush_flags = MEMORY_OBJECT_RETURN_DIRTY;
566
567 if (flags & UBC_PUSHALL)
568 /*
569 * then return all the interesting pages in the range (both dirty and precious)
570 * to the pager
571 */
572 flush_flags = MEMORY_OBJECT_RETURN_ALL;
573
574 beg_off = trunc_page_64(beg_off);
575 end_off = round_page_64(end_off);
576 tsize = (memory_object_size_t)end_off - beg_off;
577
578 /* flush and/or invalidate pages in the range requested */
579 kret = memory_object_lock_request(vp->v_ubcinfo->ui_control,
580 beg_off, tsize, resid_off, io_errno,
581 flush_flags, request_flags, VM_PROT_NO_CHANGE);
582
583 return ((kret == KERN_SUCCESS) ? 1 : 0);
584 }
585
586
587 /*
588 * The vnode is mapped explicitly, mark it so.
589 */
590 __private_extern__ int
591 ubc_map(vnode_t vp, int flags)
592 {
593 struct ubc_info *uip;
594 int error = 0;
595 int need_ref = 0;
596 struct vfs_context context;
597
598 if (vnode_getwithref(vp))
599 return (0);
600
601 if (UBCINFOEXISTS(vp)) {
602 context.vc_proc = current_proc();
603 context.vc_ucred = kauth_cred_get();
604
605 error = VNOP_MMAP(vp, flags, &context);
606
607 if (error != EPERM)
608 error = 0;
609
610 if (error == 0) {
611 vnode_lock(vp);
612
613 uip = vp->v_ubcinfo;
614
615 if ( !ISSET(uip->ui_flags, UI_ISMAPPED))
616 need_ref = 1;
617 SET(uip->ui_flags, (UI_WASMAPPED | UI_ISMAPPED));
618
619 vnode_unlock(vp);
620
621 if (need_ref)
622 vnode_ref(vp);
623 }
624 }
625 vnode_put(vp);
626
627 return (error);
628 }
629
630 /*
631 * destroy the named reference for a given vnode
632 */
633 __private_extern__ int
634 ubc_destroy_named(struct vnode *vp)
635 {
636 memory_object_control_t control;
637 struct ubc_info *uip;
638 kern_return_t kret;
639
640 /*
641 * We may already have had the object terminated
642 * and the ubcinfo released as a side effect of
643 * some earlier processing. If so, pretend we did
644 * it, because it probably was a result of our
645 * efforts.
646 */
647 if (!UBCINFOEXISTS(vp))
648 return (1);
649
650 uip = vp->v_ubcinfo;
651
652 /*
653 * Terminate the memory object.
654 * memory_object_destroy() will result in
655 * vnode_pager_no_senders().
656 * That will release the pager reference
657 * and the vnode will move to the free list.
658 */
659 control = ubc_getobject(vp, UBC_HOLDOBJECT);
660 if (control != MEMORY_OBJECT_CONTROL_NULL) {
661
662 /*
663 * XXXXX - should we hold the vnode lock here?
664 */
665 if (ISSET(vp->v_flag, VTERMINATE))
666 panic("ubc_destroy_named: already teminating");
667 SET(vp->v_flag, VTERMINATE);
668
669 kret = memory_object_destroy(control, 0);
670 if (kret != KERN_SUCCESS)
671 return (0);
672
673 /*
674 * memory_object_destroy() is asynchronous
675 * with respect to vnode_pager_no_senders().
676 * wait for vnode_pager_no_senders() to clear
677 * VTERMINATE
678 */
679 vnode_lock(vp);
680 while (ISSET(vp->v_lflag, VNAMED_UBC)) {
681 (void)msleep((caddr_t)&vp->v_lflag, &vp->v_lock,
682 PINOD, "ubc_destroy_named", 0);
683 }
684 vnode_unlock(vp);
685 }
686 return (1);
687 }
688
689
690 /*
691 * Find out whether a vnode is in use by UBC
692 * Returns 1 if file is in use by UBC, 0 if not
693 */
694 int
695 ubc_isinuse(struct vnode *vp, int busycount)
696 {
697 if ( !UBCINFOEXISTS(vp))
698 return (0);
699 return(ubc_isinuse_locked(vp, busycount, 0));
700 }
701
702
703 int
704 ubc_isinuse_locked(struct vnode *vp, int busycount, int locked)
705 {
706 int retval = 0;
707
708
709 if (!locked)
710 vnode_lock(vp);
711
712 if ((vp->v_usecount - vp->v_kusecount) > busycount)
713 retval = 1;
714
715 if (!locked)
716 vnode_unlock(vp);
717 return (retval);
718 }
719
720
721 /*
722 * MUST only be called by the VM
723 */
724 __private_extern__ void
725 ubc_unmap(struct vnode *vp)
726 {
727 struct vfs_context context;
728 struct ubc_info *uip;
729 int need_rele = 0;
730
731 if (vnode_getwithref(vp))
732 return;
733
734 if (UBCINFOEXISTS(vp)) {
735 vnode_lock(vp);
736
737 uip = vp->v_ubcinfo;
738 if (ISSET(uip->ui_flags, UI_ISMAPPED)) {
739 CLR(uip->ui_flags, UI_ISMAPPED);
740 need_rele = 1;
741 }
742 vnode_unlock(vp);
743
744 if (need_rele) {
745 context.vc_proc = current_proc();
746 context.vc_ucred = kauth_cred_get();
747 (void)VNOP_MNOMAP(vp, &context);
748
749 vnode_rele(vp);
750 }
751 }
752 /*
753 * the drop of the vnode ref will cleanup
754 */
755 vnode_put(vp);
756 }
757
758 kern_return_t
759 ubc_page_op(
760 struct vnode *vp,
761 off_t f_offset,
762 int ops,
763 ppnum_t *phys_entryp,
764 int *flagsp)
765 {
766 memory_object_control_t control;
767
768 control = ubc_getobject(vp, UBC_FLAGS_NONE);
769 if (control == MEMORY_OBJECT_CONTROL_NULL)
770 return KERN_INVALID_ARGUMENT;
771
772 return (memory_object_page_op(control,
773 (memory_object_offset_t)f_offset,
774 ops,
775 phys_entryp,
776 flagsp));
777 }
778
779 __private_extern__ kern_return_t
780 ubc_page_op_with_control(
781 memory_object_control_t control,
782 off_t f_offset,
783 int ops,
784 ppnum_t *phys_entryp,
785 int *flagsp)
786 {
787 return (memory_object_page_op(control,
788 (memory_object_offset_t)f_offset,
789 ops,
790 phys_entryp,
791 flagsp));
792 }
793
794 kern_return_t
795 ubc_range_op(
796 struct vnode *vp,
797 off_t f_offset_beg,
798 off_t f_offset_end,
799 int ops,
800 int *range)
801 {
802 memory_object_control_t control;
803
804 control = ubc_getobject(vp, UBC_FLAGS_NONE);
805 if (control == MEMORY_OBJECT_CONTROL_NULL)
806 return KERN_INVALID_ARGUMENT;
807
808 return (memory_object_range_op(control,
809 (memory_object_offset_t)f_offset_beg,
810 (memory_object_offset_t)f_offset_end,
811 ops,
812 range));
813 }
814
815 kern_return_t
816 ubc_create_upl(
817 struct vnode *vp,
818 off_t f_offset,
819 long bufsize,
820 upl_t *uplp,
821 upl_page_info_t **plp,
822 int uplflags)
823 {
824 memory_object_control_t control;
825 int count;
826 int ubcflags;
827 kern_return_t kr;
828
829 if (bufsize & 0xfff)
830 return KERN_INVALID_ARGUMENT;
831
832 if (uplflags & UPL_FOR_PAGEOUT) {
833 uplflags &= ~UPL_FOR_PAGEOUT;
834 ubcflags = UBC_FOR_PAGEOUT;
835 } else
836 ubcflags = UBC_FLAGS_NONE;
837
838 control = ubc_getobject(vp, ubcflags);
839 if (control == MEMORY_OBJECT_CONTROL_NULL)
840 return KERN_INVALID_ARGUMENT;
841
842 if (uplflags & UPL_WILL_BE_DUMPED) {
843 uplflags &= ~UPL_WILL_BE_DUMPED;
844 uplflags |= (UPL_NO_SYNC|UPL_SET_INTERNAL);
845 } else
846 uplflags |= (UPL_NO_SYNC|UPL_CLEAN_IN_PLACE|UPL_SET_INTERNAL);
847 count = 0;
848 kr = memory_object_upl_request(control, f_offset, bufsize,
849 uplp, NULL, &count, uplflags);
850 if (plp != NULL)
851 *plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp);
852 return kr;
853 }
854
855
856 kern_return_t
857 ubc_upl_map(
858 upl_t upl,
859 vm_offset_t *dst_addr)
860 {
861 return (vm_upl_map(kernel_map, upl, dst_addr));
862 }
863
864
865 kern_return_t
866 ubc_upl_unmap(
867 upl_t upl)
868 {
869 return(vm_upl_unmap(kernel_map, upl));
870 }
871
872 kern_return_t
873 ubc_upl_commit(
874 upl_t upl)
875 {
876 upl_page_info_t *pl;
877 kern_return_t kr;
878
879 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
880 kr = upl_commit(upl, pl, MAX_UPL_TRANSFER);
881 upl_deallocate(upl);
882 return kr;
883 }
884
885
886 kern_return_t
887 ubc_upl_commit_range(
888 upl_t upl,
889 vm_offset_t offset,
890 vm_size_t size,
891 int flags)
892 {
893 upl_page_info_t *pl;
894 boolean_t empty;
895 kern_return_t kr;
896
897 if (flags & UPL_COMMIT_FREE_ON_EMPTY)
898 flags |= UPL_COMMIT_NOTIFY_EMPTY;
899
900 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
901
902 kr = upl_commit_range(upl, offset, size, flags,
903 pl, MAX_UPL_TRANSFER, &empty);
904
905 if((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty)
906 upl_deallocate(upl);
907
908 return kr;
909 }
910
911 kern_return_t
912 ubc_upl_abort_range(
913 upl_t upl,
914 vm_offset_t offset,
915 vm_size_t size,
916 int abort_flags)
917 {
918 kern_return_t kr;
919 boolean_t empty = FALSE;
920
921 if (abort_flags & UPL_ABORT_FREE_ON_EMPTY)
922 abort_flags |= UPL_ABORT_NOTIFY_EMPTY;
923
924 kr = upl_abort_range(upl, offset, size, abort_flags, &empty);
925
926 if((abort_flags & UPL_ABORT_FREE_ON_EMPTY) && empty)
927 upl_deallocate(upl);
928
929 return kr;
930 }
931
932 kern_return_t
933 ubc_upl_abort(
934 upl_t upl,
935 int abort_type)
936 {
937 kern_return_t kr;
938
939 kr = upl_abort(upl, abort_type);
940 upl_deallocate(upl);
941 return kr;
942 }
943
944 upl_page_info_t *
945 ubc_upl_pageinfo(
946 upl_t upl)
947 {
948 return (UPL_GET_INTERNAL_PAGE_LIST(upl));
949 }
950
951 /************* UBC APIS **************/
952
953 int
954 UBCINFOMISSING(struct vnode * vp)
955 {
956 return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo == UBC_INFO_NULL));
957 }
958
959 int
960 UBCINFORECLAIMED(struct vnode * vp)
961 {
962 return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo == UBC_INFO_NULL));
963 }
964
965
966 int
967 UBCINFOEXISTS(struct vnode * vp)
968 {
969 return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo != UBC_INFO_NULL));
970 }
971 int
972 UBCISVALID(struct vnode * vp)
973 {
974 return((vp) && ((vp)->v_type == VREG) && !((vp)->v_flag & VSYSTEM));
975 }
976 int
977 UBCINVALID(struct vnode * vp)
978 {
979 return(((vp) == NULL) || ((vp) && ((vp)->v_type != VREG))
980 || ((vp) && ((vp)->v_flag & VSYSTEM)));
981 }
982 int
983 UBCINFOCHECK(const char * fun, struct vnode * vp)
984 {
985 if ((vp) && ((vp)->v_type == VREG) &&
986 ((vp)->v_ubcinfo == UBC_INFO_NULL)) {
987 panic("%s: lost ubc_info", (fun));
988 return(1);
989 } else
990 return(0);
991 }
992