]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/ubc_subr.c
22dc9e5804ab8a174f8133099fc9d38a71edf681
[apple/xnu.git] / bsd / kern / ubc_subr.c
1 /*
2 * Copyright (c) 1999-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * File: ubc_subr.c
32 * Author: Umesh Vaishampayan [umeshv@apple.com]
33 * 05-Aug-1999 umeshv Created.
34 *
35 * Functions related to Unified Buffer cache.
36 *
37 * Caller of UBC functions MUST have a valid reference on the vnode.
38 *
39 */
40
41 #undef DIAGNOSTIC
42 #define DIAGNOSTIC 1
43
44 #include <sys/types.h>
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/lock.h>
48 #include <sys/mman.h>
49 #include <sys/mount_internal.h>
50 #include <sys/vnode_internal.h>
51 #include <sys/ubc_internal.h>
52 #include <sys/ucred.h>
53 #include <sys/proc_internal.h>
54 #include <sys/kauth.h>
55 #include <sys/buf.h>
56 #include <sys/user.h>
57
58 #include <mach/mach_types.h>
59 #include <mach/memory_object_types.h>
60 #include <mach/memory_object_control.h>
61 #include <mach/vm_map.h>
62 #include <mach/upl.h>
63
64 #include <kern/kern_types.h>
65 #include <kern/zalloc.h>
66 #include <kern/thread.h>
67 #include <vm/vm_kern.h>
68 #include <vm/vm_protos.h> /* last */
69
70 #if DIAGNOSTIC
71 #if defined(assert)
72 #undef assert()
73 #endif
74 #define assert(cond) \
75 ((void) ((cond) ? 0 : panic("%s:%d (%s)", __FILE__, __LINE__, # cond)))
76 #else
77 #include <kern/assert.h>
78 #endif /* DIAGNOSTIC */
79
80 int ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize);
81 int ubc_umcallback(vnode_t, void *);
82 int ubc_isinuse_locked(vnode_t, int, int);
83 int ubc_msync_internal(vnode_t, off_t, off_t, off_t *, int, int *);
84
85 struct zone *ubc_info_zone;
86
87 /*
88 * Initialization of the zone for Unified Buffer Cache.
89 */
90 __private_extern__ void
91 ubc_init()
92 {
93 int i;
94
95 i = (vm_size_t) sizeof (struct ubc_info);
96 /* XXX the number of elements should be tied in to maxvnodes */
97 ubc_info_zone = zinit (i, 10000*i, 8192, "ubc_info zone");
98 return;
99 }
100
101 /*
102 * Initialize a ubc_info structure for a vnode.
103 */
104 int
105 ubc_info_init(struct vnode *vp)
106 {
107 return(ubc_info_init_internal(vp, 0, 0));
108 }
109 int
110 ubc_info_init_withsize(struct vnode *vp, off_t filesize)
111 {
112 return(ubc_info_init_internal(vp, 1, filesize));
113 }
114
115 int
116 ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize)
117 {
118 register struct ubc_info *uip;
119 void * pager;
120 struct proc *p = current_proc();
121 int error = 0;
122 kern_return_t kret;
123 memory_object_control_t control;
124
125 uip = vp->v_ubcinfo;
126
127 if (uip == UBC_INFO_NULL) {
128
129 uip = (struct ubc_info *) zalloc(ubc_info_zone);
130 bzero((char *)uip, sizeof(struct ubc_info));
131
132 uip->ui_vnode = vp;
133 uip->ui_flags = UI_INITED;
134 uip->ui_ucred = NOCRED;
135 }
136 #if DIAGNOSTIC
137 else
138 Debugger("ubc_info_init: already");
139 #endif /* DIAGNOSTIC */
140
141 assert(uip->ui_flags != UI_NONE);
142 assert(uip->ui_vnode == vp);
143
144 /* now set this ubc_info in the vnode */
145 vp->v_ubcinfo = uip;
146
147 pager = (void *)vnode_pager_setup(vp, uip->ui_pager);
148 assert(pager);
149
150 SET(uip->ui_flags, UI_HASPAGER);
151 uip->ui_pager = pager;
152
153 /*
154 * Note: We can not use VNOP_GETATTR() to get accurate
155 * value of ui_size. Thanks to NFS.
156 * nfs_getattr() can call vinvalbuf() and in this case
157 * ubc_info is not set up to deal with that.
158 * So use bogus size.
159 */
160
161 /*
162 * create a vnode - vm_object association
163 * memory_object_create_named() creates a "named" reference on the
164 * memory object we hold this reference as long as the vnode is
165 * "alive." Since memory_object_create_named() took its own reference
166 * on the vnode pager we passed it, we can drop the reference
167 * vnode_pager_setup() returned here.
168 */
169 kret = memory_object_create_named(pager,
170 (memory_object_size_t)uip->ui_size, &control);
171 vnode_pager_deallocate(pager);
172 if (kret != KERN_SUCCESS)
173 panic("ubc_info_init: memory_object_create_named returned %d", kret);
174
175 assert(control);
176 uip->ui_control = control; /* cache the value of the mo control */
177 SET(uip->ui_flags, UI_HASOBJREF); /* with a named reference */
178 #if 0
179 /* create a pager reference on the vnode */
180 error = vnode_pager_vget(vp);
181 if (error)
182 panic("ubc_info_init: vnode_pager_vget error = %d", error);
183 #endif
184 if (withfsize == 0) {
185 struct vfs_context context;
186 /* initialize the size */
187 context.vc_proc = p;
188 context.vc_ucred = kauth_cred_get();
189 error = vnode_size(vp, &uip->ui_size, &context);
190 if (error)
191 uip->ui_size = 0;
192 } else {
193 uip->ui_size = filesize;
194 }
195 vp->v_lflag |= VNAMED_UBC;
196
197 return (error);
198 }
199
200 /* Free the ubc_info */
201 static void
202 ubc_info_free(struct ubc_info *uip)
203 {
204 kauth_cred_t credp;
205
206 credp = uip->ui_ucred;
207 if (credp != NOCRED) {
208 uip->ui_ucred = NOCRED;
209 kauth_cred_rele(credp);
210 }
211
212 if (uip->ui_control != MEMORY_OBJECT_CONTROL_NULL)
213 memory_object_control_deallocate(uip->ui_control);
214
215 cluster_release(uip);
216
217 zfree(ubc_info_zone, (vm_offset_t)uip);
218 return;
219 }
220
221 void
222 ubc_info_deallocate(struct ubc_info *uip)
223 {
224 ubc_info_free(uip);
225 }
226
227 /*
228 * Communicate with VM the size change of the file
229 * returns 1 on success, 0 on failure
230 */
231 int
232 ubc_setsize(struct vnode *vp, off_t nsize)
233 {
234 off_t osize; /* ui_size before change */
235 off_t lastpg, olastpgend, lastoff;
236 struct ubc_info *uip;
237 memory_object_control_t control;
238 kern_return_t kret;
239
240 if (nsize < (off_t)0)
241 return (0);
242
243 if (!UBCINFOEXISTS(vp))
244 return (0);
245
246 uip = vp->v_ubcinfo;
247 osize = uip->ui_size; /* call ubc_getsize() ??? */
248 /* Update the size before flushing the VM */
249 uip->ui_size = nsize;
250
251 if (nsize >= osize) /* Nothing more to do */
252 return (1); /* return success */
253
254 /*
255 * When the file shrinks, invalidate the pages beyond the
256 * new size. Also get rid of garbage beyond nsize on the
257 * last page. The ui_size already has the nsize. This
258 * insures that the pageout would not write beyond the new
259 * end of the file.
260 */
261
262 lastpg = trunc_page_64(nsize);
263 olastpgend = round_page_64(osize);
264 control = uip->ui_control;
265 assert(control);
266 lastoff = (nsize & PAGE_MASK_64);
267
268 /*
269 * If length is multiple of page size, we should not flush
270 * invalidating is sufficient
271 */
272 if (!lastoff) {
273 /* invalidate last page and old contents beyond nsize */
274 kret = memory_object_lock_request(control,
275 (memory_object_offset_t)lastpg,
276 (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
277 MEMORY_OBJECT_RETURN_NONE, MEMORY_OBJECT_DATA_FLUSH,
278 VM_PROT_NO_CHANGE);
279 if (kret != KERN_SUCCESS)
280 printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
281
282 return ((kret == KERN_SUCCESS) ? 1 : 0);
283 }
284
285 /* flush the last page */
286 kret = memory_object_lock_request(control,
287 (memory_object_offset_t)lastpg,
288 PAGE_SIZE_64, NULL, NULL,
289 MEMORY_OBJECT_RETURN_DIRTY, FALSE,
290 VM_PROT_NO_CHANGE);
291
292 if (kret == KERN_SUCCESS) {
293 /* invalidate last page and old contents beyond nsize */
294 kret = memory_object_lock_request(control,
295 (memory_object_offset_t)lastpg,
296 (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
297 MEMORY_OBJECT_RETURN_NONE, MEMORY_OBJECT_DATA_FLUSH,
298 VM_PROT_NO_CHANGE);
299 if (kret != KERN_SUCCESS)
300 printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
301 } else
302 printf("ubc_setsize: flush failed (error = %d)\n", kret);
303
304 return ((kret == KERN_SUCCESS) ? 1 : 0);
305 }
306
307 /*
308 * Get the size of the file
309 */
310 off_t
311 ubc_getsize(struct vnode *vp)
312 {
313 /* people depend on the side effect of this working this way
314 * as they call this for directory
315 */
316 if (!UBCINFOEXISTS(vp))
317 return ((off_t)0);
318 return (vp->v_ubcinfo->ui_size);
319 }
320
321 /*
322 * call ubc_sync_range(vp, 0, EOF, UBC_PUSHALL) on all the vnodes
323 * for this mount point.
324 * returns 1 on success, 0 on failure
325 */
326
327 __private_extern__ int
328 ubc_umount(struct mount *mp)
329 {
330 vnode_iterate(mp, 0, ubc_umcallback, 0);
331 return(0);
332 }
333
334 static int
335 ubc_umcallback(vnode_t vp, __unused void * args)
336 {
337
338 if (UBCINFOEXISTS(vp)) {
339
340 cluster_push(vp, 0);
341
342 (void) ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL);
343 }
344 return (VNODE_RETURNED);
345 }
346
347
348
349 /* Get the credentials */
350 kauth_cred_t
351 ubc_getcred(struct vnode *vp)
352 {
353 if (UBCINFOEXISTS(vp))
354 return (vp->v_ubcinfo->ui_ucred);
355
356 return (NOCRED);
357 }
358
359 int
360 ubc_setthreadcred(struct vnode *vp, struct proc *p, thread_t thread)
361 {
362 struct ubc_info *uip;
363 kauth_cred_t credp;
364 struct uthread *uthread = get_bsdthread_info(thread);
365
366 if (!UBCINFOEXISTS(vp))
367 return (1);
368
369 vnode_lock(vp);
370
371 uip = vp->v_ubcinfo;
372 credp = uip->ui_ucred;
373
374 if (credp == NOCRED) {
375 /* use per-thread cred, if assumed identity, else proc cred */
376 if (uthread == NULL || (uthread->uu_flag & UT_SETUID) == 0) {
377 uip->ui_ucred = kauth_cred_proc_ref(p);
378 } else {
379 uip->ui_ucred = uthread->uu_ucred;
380 kauth_cred_ref(uip->ui_ucred);
381 }
382 }
383 vnode_unlock(vp);
384
385 return (0);
386 }
387
388 /*
389 * Set the credentials
390 * existing credentials are not changed
391 * returns 1 on success and 0 on failure
392 */
393 int
394 ubc_setcred(struct vnode *vp, struct proc *p)
395 {
396 struct ubc_info *uip;
397 kauth_cred_t credp;
398
399 if ( !UBCINFOEXISTS(vp))
400 return (0);
401
402 vnode_lock(vp);
403
404 uip = vp->v_ubcinfo;
405 credp = uip->ui_ucred;
406
407 if (credp == NOCRED) {
408 uip->ui_ucred = kauth_cred_proc_ref(p);
409 }
410 vnode_unlock(vp);
411
412 return (1);
413 }
414
415 /* Get the pager */
416 __private_extern__ memory_object_t
417 ubc_getpager(struct vnode *vp)
418 {
419 if (UBCINFOEXISTS(vp))
420 return (vp->v_ubcinfo->ui_pager);
421
422 return (0);
423 }
424
425 /*
426 * Get the memory object associated with this vnode
427 * If the vnode was reactivated, memory object would not exist.
428 * Unless "do not rectivate" was specified, look it up using the pager.
429 * If hold was requested create an object reference of one does not
430 * exist already.
431 */
432
433 memory_object_control_t
434 ubc_getobject(struct vnode *vp, __unused int flags)
435 {
436 if (UBCINFOEXISTS(vp))
437 return((vp->v_ubcinfo->ui_control));
438
439 return (0);
440 }
441
442
443 off_t
444 ubc_blktooff(vnode_t vp, daddr64_t blkno)
445 {
446 off_t file_offset;
447 int error;
448
449 if (UBCINVALID(vp))
450 return ((off_t)-1);
451
452 error = VNOP_BLKTOOFF(vp, blkno, &file_offset);
453 if (error)
454 file_offset = -1;
455
456 return (file_offset);
457 }
458
459 daddr64_t
460 ubc_offtoblk(vnode_t vp, off_t offset)
461 {
462 daddr64_t blkno;
463 int error = 0;
464
465 if (UBCINVALID(vp))
466 return ((daddr64_t)-1);
467
468 error = VNOP_OFFTOBLK(vp, offset, &blkno);
469 if (error)
470 blkno = -1;
471
472 return (blkno);
473 }
474
475 int
476 ubc_pages_resident(vnode_t vp)
477 {
478 kern_return_t kret;
479 boolean_t has_pages_resident;
480
481 if ( !UBCINFOEXISTS(vp))
482 return (0);
483
484 kret = memory_object_pages_resident(vp->v_ubcinfo->ui_control, &has_pages_resident);
485
486 if (kret != KERN_SUCCESS)
487 return (0);
488
489 if (has_pages_resident == TRUE)
490 return (1);
491
492 return (0);
493 }
494
495
496
497 /*
498 * This interface will eventually be deprecated
499 *
500 * clean and/or invalidate a range in the memory object that backs this
501 * vnode. The start offset is truncated to the page boundary and the
502 * size is adjusted to include the last page in the range.
503 *
504 * returns 1 for success, 0 for failure
505 */
506 int
507 ubc_sync_range(vnode_t vp, off_t beg_off, off_t end_off, int flags)
508 {
509 return (ubc_msync_internal(vp, beg_off, end_off, NULL, flags, NULL));
510 }
511
512
513 /*
514 * clean and/or invalidate a range in the memory object that backs this
515 * vnode. The start offset is truncated to the page boundary and the
516 * size is adjusted to include the last page in the range.
517 * if a
518 */
519 errno_t
520 ubc_msync(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags)
521 {
522 int retval;
523 int io_errno = 0;
524
525 if (resid_off)
526 *resid_off = beg_off;
527
528 retval = ubc_msync_internal(vp, beg_off, end_off, resid_off, flags, &io_errno);
529
530 if (retval == 0 && io_errno == 0)
531 return (EINVAL);
532 return (io_errno);
533 }
534
535
536
537 /*
538 * clean and/or invalidate a range in the memory object that backs this
539 * vnode. The start offset is truncated to the page boundary and the
540 * size is adjusted to include the last page in the range.
541 */
542 static int
543 ubc_msync_internal(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags, int *io_errno)
544 {
545 memory_object_size_t tsize;
546 kern_return_t kret;
547 int request_flags = 0;
548 int flush_flags = MEMORY_OBJECT_RETURN_NONE;
549
550 if ( !UBCINFOEXISTS(vp))
551 return (0);
552 if (end_off <= beg_off)
553 return (0);
554 if ((flags & (UBC_INVALIDATE | UBC_PUSHDIRTY | UBC_PUSHALL)) == 0)
555 return (0);
556
557 if (flags & UBC_INVALIDATE)
558 /*
559 * discard the resident pages
560 */
561 request_flags = (MEMORY_OBJECT_DATA_FLUSH | MEMORY_OBJECT_DATA_NO_CHANGE);
562
563 if (flags & UBC_SYNC)
564 /*
565 * wait for all the I/O to complete before returning
566 */
567 request_flags |= MEMORY_OBJECT_IO_SYNC;
568
569 if (flags & UBC_PUSHDIRTY)
570 /*
571 * we only return the dirty pages in the range
572 */
573 flush_flags = MEMORY_OBJECT_RETURN_DIRTY;
574
575 if (flags & UBC_PUSHALL)
576 /*
577 * then return all the interesting pages in the range (both dirty and precious)
578 * to the pager
579 */
580 flush_flags = MEMORY_OBJECT_RETURN_ALL;
581
582 beg_off = trunc_page_64(beg_off);
583 end_off = round_page_64(end_off);
584 tsize = (memory_object_size_t)end_off - beg_off;
585
586 /* flush and/or invalidate pages in the range requested */
587 kret = memory_object_lock_request(vp->v_ubcinfo->ui_control,
588 beg_off, tsize, resid_off, io_errno,
589 flush_flags, request_flags, VM_PROT_NO_CHANGE);
590
591 return ((kret == KERN_SUCCESS) ? 1 : 0);
592 }
593
594
595 /*
596 * The vnode is mapped explicitly, mark it so.
597 */
598 __private_extern__ int
599 ubc_map(vnode_t vp, int flags)
600 {
601 struct ubc_info *uip;
602 int error = 0;
603 int need_ref = 0;
604 struct vfs_context context;
605
606 if (vnode_getwithref(vp))
607 return (0);
608
609 if (UBCINFOEXISTS(vp)) {
610 context.vc_proc = current_proc();
611 context.vc_ucred = kauth_cred_get();
612
613 error = VNOP_MMAP(vp, flags, &context);
614
615 if (error != EPERM)
616 error = 0;
617
618 if (error == 0) {
619 vnode_lock(vp);
620
621 uip = vp->v_ubcinfo;
622
623 if ( !ISSET(uip->ui_flags, UI_ISMAPPED))
624 need_ref = 1;
625 SET(uip->ui_flags, (UI_WASMAPPED | UI_ISMAPPED));
626
627 vnode_unlock(vp);
628
629 if (need_ref)
630 vnode_ref(vp);
631 }
632 }
633 vnode_put(vp);
634
635 return (error);
636 }
637
638 /*
639 * destroy the named reference for a given vnode
640 */
641 __private_extern__ int
642 ubc_destroy_named(struct vnode *vp)
643 {
644 memory_object_control_t control;
645 struct ubc_info *uip;
646 kern_return_t kret;
647
648 /*
649 * We may already have had the object terminated
650 * and the ubcinfo released as a side effect of
651 * some earlier processing. If so, pretend we did
652 * it, because it probably was a result of our
653 * efforts.
654 */
655 if (!UBCINFOEXISTS(vp))
656 return (1);
657
658 uip = vp->v_ubcinfo;
659
660 /*
661 * Terminate the memory object.
662 * memory_object_destroy() will result in
663 * vnode_pager_no_senders().
664 * That will release the pager reference
665 * and the vnode will move to the free list.
666 */
667 control = ubc_getobject(vp, UBC_HOLDOBJECT);
668 if (control != MEMORY_OBJECT_CONTROL_NULL) {
669
670 /*
671 * XXXXX - should we hold the vnode lock here?
672 */
673 if (ISSET(vp->v_flag, VTERMINATE))
674 panic("ubc_destroy_named: already teminating");
675 SET(vp->v_flag, VTERMINATE);
676
677 kret = memory_object_destroy(control, 0);
678 if (kret != KERN_SUCCESS)
679 return (0);
680
681 /*
682 * memory_object_destroy() is asynchronous
683 * with respect to vnode_pager_no_senders().
684 * wait for vnode_pager_no_senders() to clear
685 * VTERMINATE
686 */
687 vnode_lock(vp);
688 while (ISSET(vp->v_lflag, VNAMED_UBC)) {
689 (void)msleep((caddr_t)&vp->v_lflag, &vp->v_lock,
690 PINOD, "ubc_destroy_named", 0);
691 }
692 vnode_unlock(vp);
693 }
694 return (1);
695 }
696
697
698 /*
699 * Find out whether a vnode is in use by UBC
700 * Returns 1 if file is in use by UBC, 0 if not
701 */
702 int
703 ubc_isinuse(struct vnode *vp, int busycount)
704 {
705 if ( !UBCINFOEXISTS(vp))
706 return (0);
707 return(ubc_isinuse_locked(vp, busycount, 0));
708 }
709
710
711 int
712 ubc_isinuse_locked(struct vnode *vp, int busycount, int locked)
713 {
714 int retval = 0;
715
716
717 if (!locked)
718 vnode_lock(vp);
719
720 if ((vp->v_usecount - vp->v_kusecount) > busycount)
721 retval = 1;
722
723 if (!locked)
724 vnode_unlock(vp);
725 return (retval);
726 }
727
728
729 /*
730 * MUST only be called by the VM
731 */
732 __private_extern__ void
733 ubc_unmap(struct vnode *vp)
734 {
735 struct vfs_context context;
736 struct ubc_info *uip;
737 int need_rele = 0;
738
739 if (vnode_getwithref(vp))
740 return;
741
742 if (UBCINFOEXISTS(vp)) {
743 vnode_lock(vp);
744
745 uip = vp->v_ubcinfo;
746 if (ISSET(uip->ui_flags, UI_ISMAPPED)) {
747 CLR(uip->ui_flags, UI_ISMAPPED);
748 need_rele = 1;
749 }
750 vnode_unlock(vp);
751
752 if (need_rele) {
753 context.vc_proc = current_proc();
754 context.vc_ucred = kauth_cred_get();
755 (void)VNOP_MNOMAP(vp, &context);
756
757 vnode_rele(vp);
758 }
759 }
760 /*
761 * the drop of the vnode ref will cleanup
762 */
763 vnode_put(vp);
764 }
765
766 kern_return_t
767 ubc_page_op(
768 struct vnode *vp,
769 off_t f_offset,
770 int ops,
771 ppnum_t *phys_entryp,
772 int *flagsp)
773 {
774 memory_object_control_t control;
775
776 control = ubc_getobject(vp, UBC_FLAGS_NONE);
777 if (control == MEMORY_OBJECT_CONTROL_NULL)
778 return KERN_INVALID_ARGUMENT;
779
780 return (memory_object_page_op(control,
781 (memory_object_offset_t)f_offset,
782 ops,
783 phys_entryp,
784 flagsp));
785 }
786
787 __private_extern__ kern_return_t
788 ubc_page_op_with_control(
789 memory_object_control_t control,
790 off_t f_offset,
791 int ops,
792 ppnum_t *phys_entryp,
793 int *flagsp)
794 {
795 return (memory_object_page_op(control,
796 (memory_object_offset_t)f_offset,
797 ops,
798 phys_entryp,
799 flagsp));
800 }
801
802 kern_return_t
803 ubc_range_op(
804 struct vnode *vp,
805 off_t f_offset_beg,
806 off_t f_offset_end,
807 int ops,
808 int *range)
809 {
810 memory_object_control_t control;
811
812 control = ubc_getobject(vp, UBC_FLAGS_NONE);
813 if (control == MEMORY_OBJECT_CONTROL_NULL)
814 return KERN_INVALID_ARGUMENT;
815
816 return (memory_object_range_op(control,
817 (memory_object_offset_t)f_offset_beg,
818 (memory_object_offset_t)f_offset_end,
819 ops,
820 range));
821 }
822
823 kern_return_t
824 ubc_create_upl(
825 struct vnode *vp,
826 off_t f_offset,
827 long bufsize,
828 upl_t *uplp,
829 upl_page_info_t **plp,
830 int uplflags)
831 {
832 memory_object_control_t control;
833 int count;
834 int ubcflags;
835 kern_return_t kr;
836
837 if (bufsize & 0xfff)
838 return KERN_INVALID_ARGUMENT;
839
840 if (uplflags & UPL_FOR_PAGEOUT) {
841 uplflags &= ~UPL_FOR_PAGEOUT;
842 ubcflags = UBC_FOR_PAGEOUT;
843 } else
844 ubcflags = UBC_FLAGS_NONE;
845
846 control = ubc_getobject(vp, ubcflags);
847 if (control == MEMORY_OBJECT_CONTROL_NULL)
848 return KERN_INVALID_ARGUMENT;
849
850 if (uplflags & UPL_WILL_BE_DUMPED) {
851 uplflags &= ~UPL_WILL_BE_DUMPED;
852 uplflags |= (UPL_NO_SYNC|UPL_SET_INTERNAL);
853 } else
854 uplflags |= (UPL_NO_SYNC|UPL_CLEAN_IN_PLACE|UPL_SET_INTERNAL);
855 count = 0;
856 kr = memory_object_upl_request(control, f_offset, bufsize,
857 uplp, NULL, &count, uplflags);
858 if (plp != NULL)
859 *plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp);
860 return kr;
861 }
862
863
864 kern_return_t
865 ubc_upl_map(
866 upl_t upl,
867 vm_offset_t *dst_addr)
868 {
869 return (vm_upl_map(kernel_map, upl, dst_addr));
870 }
871
872
873 kern_return_t
874 ubc_upl_unmap(
875 upl_t upl)
876 {
877 return(vm_upl_unmap(kernel_map, upl));
878 }
879
880 kern_return_t
881 ubc_upl_commit(
882 upl_t upl)
883 {
884 upl_page_info_t *pl;
885 kern_return_t kr;
886
887 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
888 kr = upl_commit(upl, pl, MAX_UPL_TRANSFER);
889 upl_deallocate(upl);
890 return kr;
891 }
892
893
894 kern_return_t
895 ubc_upl_commit_range(
896 upl_t upl,
897 vm_offset_t offset,
898 vm_size_t size,
899 int flags)
900 {
901 upl_page_info_t *pl;
902 boolean_t empty;
903 kern_return_t kr;
904
905 if (flags & UPL_COMMIT_FREE_ON_EMPTY)
906 flags |= UPL_COMMIT_NOTIFY_EMPTY;
907
908 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
909
910 kr = upl_commit_range(upl, offset, size, flags,
911 pl, MAX_UPL_TRANSFER, &empty);
912
913 if((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty)
914 upl_deallocate(upl);
915
916 return kr;
917 }
918
919 kern_return_t
920 ubc_upl_abort_range(
921 upl_t upl,
922 vm_offset_t offset,
923 vm_size_t size,
924 int abort_flags)
925 {
926 kern_return_t kr;
927 boolean_t empty = FALSE;
928
929 if (abort_flags & UPL_ABORT_FREE_ON_EMPTY)
930 abort_flags |= UPL_ABORT_NOTIFY_EMPTY;
931
932 kr = upl_abort_range(upl, offset, size, abort_flags, &empty);
933
934 if((abort_flags & UPL_ABORT_FREE_ON_EMPTY) && empty)
935 upl_deallocate(upl);
936
937 return kr;
938 }
939
940 kern_return_t
941 ubc_upl_abort(
942 upl_t upl,
943 int abort_type)
944 {
945 kern_return_t kr;
946
947 kr = upl_abort(upl, abort_type);
948 upl_deallocate(upl);
949 return kr;
950 }
951
952 upl_page_info_t *
953 ubc_upl_pageinfo(
954 upl_t upl)
955 {
956 return (UPL_GET_INTERNAL_PAGE_LIST(upl));
957 }
958
959 /************* UBC APIS **************/
960
961 int
962 UBCINFOMISSING(struct vnode * vp)
963 {
964 return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo == UBC_INFO_NULL));
965 }
966
967 int
968 UBCINFORECLAIMED(struct vnode * vp)
969 {
970 return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo == UBC_INFO_NULL));
971 }
972
973
974 int
975 UBCINFOEXISTS(struct vnode * vp)
976 {
977 return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo != UBC_INFO_NULL));
978 }
979 int
980 UBCISVALID(struct vnode * vp)
981 {
982 return((vp) && ((vp)->v_type == VREG) && !((vp)->v_flag & VSYSTEM));
983 }
984 int
985 UBCINVALID(struct vnode * vp)
986 {
987 return(((vp) == NULL) || ((vp) && ((vp)->v_type != VREG))
988 || ((vp) && ((vp)->v_flag & VSYSTEM)));
989 }
990 int
991 UBCINFOCHECK(const char * fun, struct vnode * vp)
992 {
993 if ((vp) && ((vp)->v_type == VREG) &&
994 ((vp)->v_ubcinfo == UBC_INFO_NULL)) {
995 panic("%s: lost ubc_info", (fun));
996 return(1);
997 } else
998 return(0);
999 }
1000