]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/ubc_subr.c
deaaa090a5e3dc053e5d92609bc318d36dfd2e55
[apple/xnu.git] / bsd / kern / ubc_subr.c
1 /*
2 * Copyright (c) 1999-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * File: ubc_subr.c
30 * Author: Umesh Vaishampayan [umeshv@apple.com]
31 * 05-Aug-1999 umeshv Created.
32 *
33 * Functions related to Unified Buffer cache.
34 *
35 * Caller of UBC functions MUST have a valid reference on the vnode.
36 *
37 */
38
39 #undef DIAGNOSTIC
40 #define DIAGNOSTIC 1
41
42 #include <sys/types.h>
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/lock.h>
46 #include <sys/mman.h>
47 #include <sys/mount_internal.h>
48 #include <sys/vnode_internal.h>
49 #include <sys/ubc_internal.h>
50 #include <sys/ucred.h>
51 #include <sys/proc_internal.h>
52 #include <sys/kauth.h>
53 #include <sys/buf.h>
54 #include <sys/user.h>
55
56 #include <mach/mach_types.h>
57 #include <mach/memory_object_types.h>
58 #include <mach/memory_object_control.h>
59 #include <mach/vm_map.h>
60 #include <mach/upl.h>
61
62 #include <kern/kern_types.h>
63 #include <kern/zalloc.h>
64 #include <kern/thread.h>
65 #include <vm/vm_kern.h>
66 #include <vm/vm_protos.h> /* last */
67
68 #if DIAGNOSTIC
69 #if defined(assert)
70 #undef assert()
71 #endif
72 #define assert(cond) \
73 ((void) ((cond) ? 0 : panic("%s:%d (%s)", __FILE__, __LINE__, # cond)))
74 #else
75 #include <kern/assert.h>
76 #endif /* DIAGNOSTIC */
77
78 int ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize);
79 static int ubc_umcallback(vnode_t, void *);
80 int ubc_isinuse_locked(vnode_t, int, int);
81 static int ubc_msync_internal(vnode_t, off_t, off_t, off_t *, int, int *);
82
83 struct zone *ubc_info_zone;
84
85 /*
86 * Initialization of the zone for Unified Buffer Cache.
87 */
88 __private_extern__ void
89 ubc_init()
90 {
91 int i;
92
93 i = (vm_size_t) sizeof (struct ubc_info);
94 /* XXX the number of elements should be tied in to maxvnodes */
95 ubc_info_zone = zinit (i, 10000*i, 8192, "ubc_info zone");
96 return;
97 }
98
99 /*
100 * Initialize a ubc_info structure for a vnode.
101 */
102 int
103 ubc_info_init(struct vnode *vp)
104 {
105 return(ubc_info_init_internal(vp, 0, 0));
106 }
107 int
108 ubc_info_init_withsize(struct vnode *vp, off_t filesize)
109 {
110 return(ubc_info_init_internal(vp, 1, filesize));
111 }
112
113 int
114 ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize)
115 {
116 register struct ubc_info *uip;
117 void * pager;
118 struct proc *p = current_proc();
119 int error = 0;
120 kern_return_t kret;
121 memory_object_control_t control;
122
123 uip = vp->v_ubcinfo;
124
125 if (uip == UBC_INFO_NULL) {
126
127 uip = (struct ubc_info *) zalloc(ubc_info_zone);
128 bzero((char *)uip, sizeof(struct ubc_info));
129
130 uip->ui_vnode = vp;
131 uip->ui_flags = UI_INITED;
132 uip->ui_ucred = NOCRED;
133 }
134 #if DIAGNOSTIC
135 else
136 Debugger("ubc_info_init: already");
137 #endif /* DIAGNOSTIC */
138
139 assert(uip->ui_flags != UI_NONE);
140 assert(uip->ui_vnode == vp);
141
142 /* now set this ubc_info in the vnode */
143 vp->v_ubcinfo = uip;
144
145 pager = (void *)vnode_pager_setup(vp, uip->ui_pager);
146 assert(pager);
147
148 SET(uip->ui_flags, UI_HASPAGER);
149 uip->ui_pager = pager;
150
151 /*
152 * Note: We can not use VNOP_GETATTR() to get accurate
153 * value of ui_size. Thanks to NFS.
154 * nfs_getattr() can call vinvalbuf() and in this case
155 * ubc_info is not set up to deal with that.
156 * So use bogus size.
157 */
158
159 /*
160 * create a vnode - vm_object association
161 * memory_object_create_named() creates a "named" reference on the
162 * memory object we hold this reference as long as the vnode is
163 * "alive." Since memory_object_create_named() took its own reference
164 * on the vnode pager we passed it, we can drop the reference
165 * vnode_pager_setup() returned here.
166 */
167 kret = memory_object_create_named(pager,
168 (memory_object_size_t)uip->ui_size, &control);
169 vnode_pager_deallocate(pager);
170 if (kret != KERN_SUCCESS)
171 panic("ubc_info_init: memory_object_create_named returned %d", kret);
172
173 assert(control);
174 uip->ui_control = control; /* cache the value of the mo control */
175 SET(uip->ui_flags, UI_HASOBJREF); /* with a named reference */
176 #if 0
177 /* create a pager reference on the vnode */
178 error = vnode_pager_vget(vp);
179 if (error)
180 panic("ubc_info_init: vnode_pager_vget error = %d", error);
181 #endif
182 if (withfsize == 0) {
183 struct vfs_context context;
184 /* initialize the size */
185 context.vc_proc = p;
186 context.vc_ucred = kauth_cred_get();
187 error = vnode_size(vp, &uip->ui_size, &context);
188 if (error)
189 uip->ui_size = 0;
190 } else {
191 uip->ui_size = filesize;
192 }
193 vp->v_lflag |= VNAMED_UBC;
194
195 return (error);
196 }
197
198 /* Free the ubc_info */
199 static void
200 ubc_info_free(struct ubc_info *uip)
201 {
202 if (IS_VALID_CRED(uip->ui_ucred)) {
203 kauth_cred_unref(&uip->ui_ucred);
204 }
205
206 if (uip->ui_control != MEMORY_OBJECT_CONTROL_NULL)
207 memory_object_control_deallocate(uip->ui_control);
208
209 cluster_release(uip);
210
211 zfree(ubc_info_zone, (vm_offset_t)uip);
212 return;
213 }
214
215 void
216 ubc_info_deallocate(struct ubc_info *uip)
217 {
218 ubc_info_free(uip);
219 }
220
221 /*
222 * Communicate with VM the size change of the file
223 * returns 1 on success, 0 on failure
224 */
225 int
226 ubc_setsize(struct vnode *vp, off_t nsize)
227 {
228 off_t osize; /* ui_size before change */
229 off_t lastpg, olastpgend, lastoff;
230 struct ubc_info *uip;
231 memory_object_control_t control;
232 kern_return_t kret;
233
234 if (nsize < (off_t)0)
235 return (0);
236
237 if (!UBCINFOEXISTS(vp))
238 return (0);
239
240 uip = vp->v_ubcinfo;
241 osize = uip->ui_size; /* call ubc_getsize() ??? */
242 /* Update the size before flushing the VM */
243 uip->ui_size = nsize;
244
245 if (nsize >= osize) /* Nothing more to do */
246 return (1); /* return success */
247
248 /*
249 * When the file shrinks, invalidate the pages beyond the
250 * new size. Also get rid of garbage beyond nsize on the
251 * last page. The ui_size already has the nsize. This
252 * insures that the pageout would not write beyond the new
253 * end of the file.
254 */
255
256 lastpg = trunc_page_64(nsize);
257 olastpgend = round_page_64(osize);
258 control = uip->ui_control;
259 assert(control);
260 lastoff = (nsize & PAGE_MASK_64);
261
262 /*
263 * If length is multiple of page size, we should not flush
264 * invalidating is sufficient
265 */
266 if (!lastoff) {
267 /* invalidate last page and old contents beyond nsize */
268 kret = memory_object_lock_request(control,
269 (memory_object_offset_t)lastpg,
270 (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
271 MEMORY_OBJECT_RETURN_NONE, MEMORY_OBJECT_DATA_FLUSH,
272 VM_PROT_NO_CHANGE);
273 if (kret != KERN_SUCCESS)
274 printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
275
276 return ((kret == KERN_SUCCESS) ? 1 : 0);
277 }
278
279 /* flush the last page */
280 kret = memory_object_lock_request(control,
281 (memory_object_offset_t)lastpg,
282 PAGE_SIZE_64, NULL, NULL,
283 MEMORY_OBJECT_RETURN_DIRTY, FALSE,
284 VM_PROT_NO_CHANGE);
285
286 if (kret == KERN_SUCCESS) {
287 /* invalidate last page and old contents beyond nsize */
288 kret = memory_object_lock_request(control,
289 (memory_object_offset_t)lastpg,
290 (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
291 MEMORY_OBJECT_RETURN_NONE, MEMORY_OBJECT_DATA_FLUSH,
292 VM_PROT_NO_CHANGE);
293 if (kret != KERN_SUCCESS)
294 printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
295 } else
296 printf("ubc_setsize: flush failed (error = %d)\n", kret);
297
298 return ((kret == KERN_SUCCESS) ? 1 : 0);
299 }
300
301 /*
302 * Get the size of the file
303 */
304 off_t
305 ubc_getsize(struct vnode *vp)
306 {
307 /* people depend on the side effect of this working this way
308 * as they call this for directory
309 */
310 if (!UBCINFOEXISTS(vp))
311 return ((off_t)0);
312 return (vp->v_ubcinfo->ui_size);
313 }
314
315 /*
316 * call ubc_sync_range(vp, 0, EOF, UBC_PUSHALL) on all the vnodes
317 * for this mount point.
318 * returns 1 on success, 0 on failure
319 */
320
321 __private_extern__ int
322 ubc_umount(struct mount *mp)
323 {
324 vnode_iterate(mp, 0, ubc_umcallback, 0);
325 return(0);
326 }
327
328 static int
329 ubc_umcallback(vnode_t vp, __unused void * args)
330 {
331
332 if (UBCINFOEXISTS(vp)) {
333
334 cluster_push(vp, 0);
335
336 (void) ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL);
337 }
338 return (VNODE_RETURNED);
339 }
340
341
342
343 /* Get the credentials */
344 kauth_cred_t
345 ubc_getcred(struct vnode *vp)
346 {
347 if (UBCINFOEXISTS(vp))
348 return (vp->v_ubcinfo->ui_ucred);
349
350 return (NOCRED);
351 }
352
353 int
354 ubc_setthreadcred(struct vnode *vp, struct proc *p, thread_t thread)
355 {
356 struct ubc_info *uip;
357 kauth_cred_t credp;
358 struct uthread *uthread = get_bsdthread_info(thread);
359
360 if (!UBCINFOEXISTS(vp))
361 return (1);
362
363 vnode_lock(vp);
364
365 uip = vp->v_ubcinfo;
366 credp = uip->ui_ucred;
367
368 if (!IS_VALID_CRED(credp)) {
369 /* use per-thread cred, if assumed identity, else proc cred */
370 if (uthread == NULL || (uthread->uu_flag & UT_SETUID) == 0) {
371 uip->ui_ucred = kauth_cred_proc_ref(p);
372 } else {
373 uip->ui_ucred = uthread->uu_ucred;
374 kauth_cred_ref(uip->ui_ucred);
375 }
376 }
377 vnode_unlock(vp);
378
379 return (0);
380 }
381
382 /*
383 * Set the credentials
384 * existing credentials are not changed
385 * returns 1 on success and 0 on failure
386 */
387 int
388 ubc_setcred(struct vnode *vp, struct proc *p)
389 {
390 struct ubc_info *uip;
391 kauth_cred_t credp;
392
393 if ( !UBCINFOEXISTS(vp))
394 return (0);
395
396 vnode_lock(vp);
397
398 uip = vp->v_ubcinfo;
399 credp = uip->ui_ucred;
400
401 if (!IS_VALID_CRED(credp)) {
402 uip->ui_ucred = kauth_cred_proc_ref(p);
403 }
404 vnode_unlock(vp);
405
406 return (1);
407 }
408
409 /* Get the pager */
410 __private_extern__ memory_object_t
411 ubc_getpager(struct vnode *vp)
412 {
413 if (UBCINFOEXISTS(vp))
414 return (vp->v_ubcinfo->ui_pager);
415
416 return (0);
417 }
418
419 /*
420 * Get the memory object associated with this vnode
421 * If the vnode was reactivated, memory object would not exist.
422 * Unless "do not rectivate" was specified, look it up using the pager.
423 * If hold was requested create an object reference of one does not
424 * exist already.
425 */
426
427 memory_object_control_t
428 ubc_getobject(struct vnode *vp, __unused int flags)
429 {
430 if (UBCINFOEXISTS(vp))
431 return((vp->v_ubcinfo->ui_control));
432
433 return (0);
434 }
435
436
437 off_t
438 ubc_blktooff(vnode_t vp, daddr64_t blkno)
439 {
440 off_t file_offset;
441 int error;
442
443 if (UBCINVALID(vp))
444 return ((off_t)-1);
445
446 error = VNOP_BLKTOOFF(vp, blkno, &file_offset);
447 if (error)
448 file_offset = -1;
449
450 return (file_offset);
451 }
452
453 daddr64_t
454 ubc_offtoblk(vnode_t vp, off_t offset)
455 {
456 daddr64_t blkno;
457 int error = 0;
458
459 if (UBCINVALID(vp))
460 return ((daddr64_t)-1);
461
462 error = VNOP_OFFTOBLK(vp, offset, &blkno);
463 if (error)
464 blkno = -1;
465
466 return (blkno);
467 }
468
469 int
470 ubc_pages_resident(vnode_t vp)
471 {
472 kern_return_t kret;
473 boolean_t has_pages_resident;
474
475 if ( !UBCINFOEXISTS(vp))
476 return (0);
477
478 kret = memory_object_pages_resident(vp->v_ubcinfo->ui_control, &has_pages_resident);
479
480 if (kret != KERN_SUCCESS)
481 return (0);
482
483 if (has_pages_resident == TRUE)
484 return (1);
485
486 return (0);
487 }
488
489
490
491 /*
492 * This interface will eventually be deprecated
493 *
494 * clean and/or invalidate a range in the memory object that backs this
495 * vnode. The start offset is truncated to the page boundary and the
496 * size is adjusted to include the last page in the range.
497 *
498 * returns 1 for success, 0 for failure
499 */
500 int
501 ubc_sync_range(vnode_t vp, off_t beg_off, off_t end_off, int flags)
502 {
503 return (ubc_msync_internal(vp, beg_off, end_off, NULL, flags, NULL));
504 }
505
506
507 /*
508 * clean and/or invalidate a range in the memory object that backs this
509 * vnode. The start offset is truncated to the page boundary and the
510 * size is adjusted to include the last page in the range.
511 * if a
512 */
513 errno_t
514 ubc_msync(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags)
515 {
516 int retval;
517 int io_errno = 0;
518
519 if (resid_off)
520 *resid_off = beg_off;
521
522 retval = ubc_msync_internal(vp, beg_off, end_off, resid_off, flags, &io_errno);
523
524 if (retval == 0 && io_errno == 0)
525 return (EINVAL);
526 return (io_errno);
527 }
528
529
530
531 /*
532 * clean and/or invalidate a range in the memory object that backs this
533 * vnode. The start offset is truncated to the page boundary and the
534 * size is adjusted to include the last page in the range.
535 */
536 static int
537 ubc_msync_internal(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags, int *io_errno)
538 {
539 memory_object_size_t tsize;
540 kern_return_t kret;
541 int request_flags = 0;
542 int flush_flags = MEMORY_OBJECT_RETURN_NONE;
543
544 if ( !UBCINFOEXISTS(vp))
545 return (0);
546 if (end_off <= beg_off)
547 return (0);
548 if ((flags & (UBC_INVALIDATE | UBC_PUSHDIRTY | UBC_PUSHALL)) == 0)
549 return (0);
550
551 if (flags & UBC_INVALIDATE)
552 /*
553 * discard the resident pages
554 */
555 request_flags = (MEMORY_OBJECT_DATA_FLUSH | MEMORY_OBJECT_DATA_NO_CHANGE);
556
557 if (flags & UBC_SYNC)
558 /*
559 * wait for all the I/O to complete before returning
560 */
561 request_flags |= MEMORY_OBJECT_IO_SYNC;
562
563 if (flags & UBC_PUSHDIRTY)
564 /*
565 * we only return the dirty pages in the range
566 */
567 flush_flags = MEMORY_OBJECT_RETURN_DIRTY;
568
569 if (flags & UBC_PUSHALL)
570 /*
571 * then return all the interesting pages in the range (both dirty and precious)
572 * to the pager
573 */
574 flush_flags = MEMORY_OBJECT_RETURN_ALL;
575
576 beg_off = trunc_page_64(beg_off);
577 end_off = round_page_64(end_off);
578 tsize = (memory_object_size_t)end_off - beg_off;
579
580 /* flush and/or invalidate pages in the range requested */
581 kret = memory_object_lock_request(vp->v_ubcinfo->ui_control,
582 beg_off, tsize, resid_off, io_errno,
583 flush_flags, request_flags, VM_PROT_NO_CHANGE);
584
585 return ((kret == KERN_SUCCESS) ? 1 : 0);
586 }
587
588
589 /*
590 * The vnode is mapped explicitly, mark it so.
591 */
592 __private_extern__ int
593 ubc_map(vnode_t vp, int flags)
594 {
595 struct ubc_info *uip;
596 int error = 0;
597 int need_ref = 0;
598 struct vfs_context context;
599
600 if (vnode_getwithref(vp))
601 return (0);
602
603 if (UBCINFOEXISTS(vp)) {
604 context.vc_proc = current_proc();
605 context.vc_ucred = kauth_cred_get();
606
607 error = VNOP_MMAP(vp, flags, &context);
608
609 if (error != EPERM)
610 error = 0;
611
612 if (error == 0) {
613 vnode_lock(vp);
614
615 uip = vp->v_ubcinfo;
616
617 if ( !ISSET(uip->ui_flags, UI_ISMAPPED))
618 need_ref = 1;
619 SET(uip->ui_flags, (UI_WASMAPPED | UI_ISMAPPED));
620
621 vnode_unlock(vp);
622
623 if (need_ref)
624 vnode_ref(vp);
625 }
626 }
627 vnode_put(vp);
628
629 return (error);
630 }
631
632 /*
633 * destroy the named reference for a given vnode
634 */
635 __private_extern__ int
636 ubc_destroy_named(struct vnode *vp)
637 {
638 memory_object_control_t control;
639 struct ubc_info *uip;
640 kern_return_t kret;
641
642 /*
643 * We may already have had the object terminated
644 * and the ubcinfo released as a side effect of
645 * some earlier processing. If so, pretend we did
646 * it, because it probably was a result of our
647 * efforts.
648 */
649 if (!UBCINFOEXISTS(vp))
650 return (1);
651
652 uip = vp->v_ubcinfo;
653
654 /*
655 * Terminate the memory object.
656 * memory_object_destroy() will result in
657 * vnode_pager_no_senders().
658 * That will release the pager reference
659 * and the vnode will move to the free list.
660 */
661 control = ubc_getobject(vp, UBC_HOLDOBJECT);
662 if (control != MEMORY_OBJECT_CONTROL_NULL) {
663
664 /*
665 * XXXXX - should we hold the vnode lock here?
666 */
667 if (ISSET(vp->v_flag, VTERMINATE))
668 panic("ubc_destroy_named: already teminating");
669 SET(vp->v_flag, VTERMINATE);
670
671 kret = memory_object_destroy(control, 0);
672 if (kret != KERN_SUCCESS)
673 return (0);
674
675 /*
676 * memory_object_destroy() is asynchronous
677 * with respect to vnode_pager_no_senders().
678 * wait for vnode_pager_no_senders() to clear
679 * VTERMINATE
680 */
681 vnode_lock(vp);
682 while (ISSET(vp->v_lflag, VNAMED_UBC)) {
683 (void)msleep((caddr_t)&vp->v_lflag, &vp->v_lock,
684 PINOD, "ubc_destroy_named", 0);
685 }
686 vnode_unlock(vp);
687 }
688 return (1);
689 }
690
691
692 /*
693 * Find out whether a vnode is in use by UBC
694 * Returns 1 if file is in use by UBC, 0 if not
695 */
696 int
697 ubc_isinuse(struct vnode *vp, int busycount)
698 {
699 if ( !UBCINFOEXISTS(vp))
700 return (0);
701 return(ubc_isinuse_locked(vp, busycount, 0));
702 }
703
704
705 int
706 ubc_isinuse_locked(struct vnode *vp, int busycount, int locked)
707 {
708 int retval = 0;
709
710
711 if (!locked)
712 vnode_lock(vp);
713
714 if ((vp->v_usecount - vp->v_kusecount) > busycount)
715 retval = 1;
716
717 if (!locked)
718 vnode_unlock(vp);
719 return (retval);
720 }
721
722
723 /*
724 * MUST only be called by the VM
725 */
726 __private_extern__ void
727 ubc_unmap(struct vnode *vp)
728 {
729 struct vfs_context context;
730 struct ubc_info *uip;
731 int need_rele = 0;
732
733 if (vnode_getwithref(vp))
734 return;
735
736 if (UBCINFOEXISTS(vp)) {
737 vnode_lock(vp);
738
739 uip = vp->v_ubcinfo;
740 if (ISSET(uip->ui_flags, UI_ISMAPPED)) {
741 CLR(uip->ui_flags, UI_ISMAPPED);
742 need_rele = 1;
743 }
744 vnode_unlock(vp);
745
746 if (need_rele) {
747 context.vc_proc = current_proc();
748 context.vc_ucred = kauth_cred_get();
749 (void)VNOP_MNOMAP(vp, &context);
750
751 vnode_rele(vp);
752 }
753 }
754 /*
755 * the drop of the vnode ref will cleanup
756 */
757 vnode_put(vp);
758 }
759
760 kern_return_t
761 ubc_page_op(
762 struct vnode *vp,
763 off_t f_offset,
764 int ops,
765 ppnum_t *phys_entryp,
766 int *flagsp)
767 {
768 memory_object_control_t control;
769
770 control = ubc_getobject(vp, UBC_FLAGS_NONE);
771 if (control == MEMORY_OBJECT_CONTROL_NULL)
772 return KERN_INVALID_ARGUMENT;
773
774 return (memory_object_page_op(control,
775 (memory_object_offset_t)f_offset,
776 ops,
777 phys_entryp,
778 flagsp));
779 }
780
781 __private_extern__ kern_return_t
782 ubc_page_op_with_control(
783 memory_object_control_t control,
784 off_t f_offset,
785 int ops,
786 ppnum_t *phys_entryp,
787 int *flagsp)
788 {
789 return (memory_object_page_op(control,
790 (memory_object_offset_t)f_offset,
791 ops,
792 phys_entryp,
793 flagsp));
794 }
795
796 kern_return_t
797 ubc_range_op(
798 struct vnode *vp,
799 off_t f_offset_beg,
800 off_t f_offset_end,
801 int ops,
802 int *range)
803 {
804 memory_object_control_t control;
805
806 control = ubc_getobject(vp, UBC_FLAGS_NONE);
807 if (control == MEMORY_OBJECT_CONTROL_NULL)
808 return KERN_INVALID_ARGUMENT;
809
810 return (memory_object_range_op(control,
811 (memory_object_offset_t)f_offset_beg,
812 (memory_object_offset_t)f_offset_end,
813 ops,
814 range));
815 }
816
817 kern_return_t
818 ubc_create_upl(
819 struct vnode *vp,
820 off_t f_offset,
821 long bufsize,
822 upl_t *uplp,
823 upl_page_info_t **plp,
824 int uplflags)
825 {
826 memory_object_control_t control;
827 int count;
828 int ubcflags;
829 kern_return_t kr;
830
831 if (bufsize & 0xfff)
832 return KERN_INVALID_ARGUMENT;
833
834 if (uplflags & UPL_FOR_PAGEOUT) {
835 uplflags &= ~UPL_FOR_PAGEOUT;
836 ubcflags = UBC_FOR_PAGEOUT;
837 } else
838 ubcflags = UBC_FLAGS_NONE;
839
840 control = ubc_getobject(vp, ubcflags);
841 if (control == MEMORY_OBJECT_CONTROL_NULL)
842 return KERN_INVALID_ARGUMENT;
843
844 if (uplflags & UPL_WILL_BE_DUMPED) {
845 uplflags &= ~UPL_WILL_BE_DUMPED;
846 uplflags |= (UPL_NO_SYNC|UPL_SET_INTERNAL);
847 } else
848 uplflags |= (UPL_NO_SYNC|UPL_CLEAN_IN_PLACE|UPL_SET_INTERNAL);
849 count = 0;
850 kr = memory_object_upl_request(control, f_offset, bufsize,
851 uplp, NULL, &count, uplflags);
852 if (plp != NULL)
853 *plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp);
854 return kr;
855 }
856
857
858 kern_return_t
859 ubc_upl_map(
860 upl_t upl,
861 vm_offset_t *dst_addr)
862 {
863 return (vm_upl_map(kernel_map, upl, dst_addr));
864 }
865
866
867 kern_return_t
868 ubc_upl_unmap(
869 upl_t upl)
870 {
871 return(vm_upl_unmap(kernel_map, upl));
872 }
873
874 kern_return_t
875 ubc_upl_commit(
876 upl_t upl)
877 {
878 upl_page_info_t *pl;
879 kern_return_t kr;
880
881 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
882 kr = upl_commit(upl, pl, MAX_UPL_TRANSFER);
883 upl_deallocate(upl);
884 return kr;
885 }
886
887
888 kern_return_t
889 ubc_upl_commit_range(
890 upl_t upl,
891 vm_offset_t offset,
892 vm_size_t size,
893 int flags)
894 {
895 upl_page_info_t *pl;
896 boolean_t empty;
897 kern_return_t kr;
898
899 if (flags & UPL_COMMIT_FREE_ON_EMPTY)
900 flags |= UPL_COMMIT_NOTIFY_EMPTY;
901
902 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
903
904 kr = upl_commit_range(upl, offset, size, flags,
905 pl, MAX_UPL_TRANSFER, &empty);
906
907 if((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty)
908 upl_deallocate(upl);
909
910 return kr;
911 }
912
913 kern_return_t
914 ubc_upl_abort_range(
915 upl_t upl,
916 vm_offset_t offset,
917 vm_size_t size,
918 int abort_flags)
919 {
920 kern_return_t kr;
921 boolean_t empty = FALSE;
922
923 if (abort_flags & UPL_ABORT_FREE_ON_EMPTY)
924 abort_flags |= UPL_ABORT_NOTIFY_EMPTY;
925
926 kr = upl_abort_range(upl, offset, size, abort_flags, &empty);
927
928 if((abort_flags & UPL_ABORT_FREE_ON_EMPTY) && empty)
929 upl_deallocate(upl);
930
931 return kr;
932 }
933
934 kern_return_t
935 ubc_upl_abort(
936 upl_t upl,
937 int abort_type)
938 {
939 kern_return_t kr;
940
941 kr = upl_abort(upl, abort_type);
942 upl_deallocate(upl);
943 return kr;
944 }
945
946 upl_page_info_t *
947 ubc_upl_pageinfo(
948 upl_t upl)
949 {
950 return (UPL_GET_INTERNAL_PAGE_LIST(upl));
951 }
952
953 /************* UBC APIS **************/
954
955 int
956 UBCINFOMISSING(struct vnode * vp)
957 {
958 return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo == UBC_INFO_NULL));
959 }
960
961 int
962 UBCINFORECLAIMED(struct vnode * vp)
963 {
964 return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo == UBC_INFO_NULL));
965 }
966
967
968 int
969 UBCINFOEXISTS(struct vnode * vp)
970 {
971 return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo != UBC_INFO_NULL));
972 }
973 int
974 UBCISVALID(struct vnode * vp)
975 {
976 return((vp) && ((vp)->v_type == VREG) && !((vp)->v_flag & VSYSTEM));
977 }
978 int
979 UBCINVALID(struct vnode * vp)
980 {
981 return(((vp) == NULL) || ((vp) && ((vp)->v_type != VREG))
982 || ((vp) && ((vp)->v_flag & VSYSTEM)));
983 }
984 int
985 UBCINFOCHECK(const char * fun, struct vnode * vp)
986 {
987 if ((vp) && ((vp)->v_type == VREG) &&
988 ((vp)->v_ubcinfo == UBC_INFO_NULL)) {
989 panic("%s: lost ubc_info", (fun));
990 return(1);
991 } else
992 return(0);
993 }
994