]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/ubc_subr.c
xnu-201.42.3.tar.gz
[apple/xnu.git] / bsd / kern / ubc_subr.c
1 /*
2 * Copyright (c) 1999-2001 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * File: ubc_subr.c
24 * Author: Umesh Vaishampayan [umeshv@apple.com]
25 * 05-Aug-1999 umeshv Created.
26 *
27 * Functions related to Unified Buffer cache.
28 *
29 * Caller of UBC functions MUST have a valid reference on the vnode.
30 *
31 */
32
33 #undef DIAGNOSTIC
34 #define DIAGNOSTIC 1
35
36 #include <sys/types.h>
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/lock.h>
40 #include <sys/ubc.h>
41 #include <sys/mount.h>
42 #include <sys/vnode.h>
43 #include <sys/ubc.h>
44 #include <sys/ucred.h>
45 #include <sys/proc.h>
46 #include <sys/buf.h>
47
48 #include <mach/mach_types.h>
49 #include <mach/memory_object_types.h>
50
51 #include <kern/zalloc.h>
52
53 #if DIAGNOSTIC
54 #if defined(assert)
55 #undef assert()
56 #endif
57 #define assert(cond) \
58 if (!(cond)) panic("%s:%d (%s)", __FILE__, __LINE__, # cond)
59 #else
60 #include <kern/assert.h>
61 #endif /* DIAGNOSTIC */
62
63 struct zone *ubc_info_zone;
64
65 /* lock for changes to struct UBC */
66 static __inline__ void
67 ubc_lock(struct vnode *vp)
68 {
69 /* For now, just use the v_interlock */
70 simple_lock(&vp->v_interlock);
71 }
72
73 /* unlock */
74 static __inline__ void
75 ubc_unlock(struct vnode *vp)
76 {
77 /* For now, just use the v_interlock */
78 simple_unlock(&vp->v_interlock);
79 }
80
81 /*
82 * Initialization of the zone for Unified Buffer Cache.
83 */
84 __private_extern__ void
85 ubc_init()
86 {
87 int i;
88
89 i = (vm_size_t) sizeof (struct ubc_info);
90 /* XXX the number of elements should be tied in to maxvnodes */
91 ubc_info_zone = zinit (i, 10000*i, 8192, "ubc_info zone");
92 return;
93 }
94
95 /*
96 * Initialize a ubc_info structure for a vnode.
97 */
98 int
99 ubc_info_init(struct vnode *vp)
100 {
101 register struct ubc_info *uip;
102 void * pager;
103 struct vattr vattr;
104 struct proc *p = current_proc();
105 int error = 0;
106 kern_return_t kret;
107 memory_object_control_t control;
108
109 if (!UBCISVALID(vp))
110 return (EINVAL);
111
112 ubc_lock(vp);
113 if (ISSET(vp->v_flag, VUINIT)) {
114 /*
115 * other thread is already doing this
116 * wait till done
117 */
118 while (ISSET(vp->v_flag, VUINIT)) {
119 SET(vp->v_flag, VUWANT); /* XXX overloaded! */
120 ubc_unlock(vp);
121 (void) tsleep((caddr_t)vp, PINOD, "ubcinfo", 0);
122 ubc_lock(vp);
123 }
124 ubc_unlock(vp);
125 return (0);
126 } else {
127 SET(vp->v_flag, VUINIT);
128 }
129
130 uip = vp->v_ubcinfo;
131 if ((uip == UBC_INFO_NULL) || (uip == UBC_NOINFO)) {
132 ubc_unlock(vp);
133 uip = (struct ubc_info *) zalloc(ubc_info_zone);
134 uip->ui_pager = MEMORY_OBJECT_NULL;
135 uip->ui_control = MEMORY_OBJECT_CONTROL_NULL;
136 uip->ui_flags = UI_INITED;
137 uip->ui_vnode = vp;
138 uip->ui_ucred = NOCRED;
139 uip->ui_refcount = 1;
140 uip->ui_size = 0;
141 uip->ui_mapped = 0;
142 ubc_lock(vp);
143 }
144 #if DIAGNOSTIC
145 else
146 Debugger("ubc_info_init: already");
147 #endif /* DIAGNOSTIC */
148
149 assert(uip->ui_flags != UI_NONE);
150 assert(uip->ui_vnode == vp);
151
152 #if 0
153 if(ISSET(uip->ui_flags, UI_HASPAGER))
154 goto done;
155 #endif /* 0 */
156
157 /* now set this ubc_info in the vnode */
158 vp->v_ubcinfo = uip;
159 SET(uip->ui_flags, UI_HASPAGER);
160 ubc_unlock(vp);
161 pager = (void *)vnode_pager_setup(vp, uip->ui_pager);
162 assert(pager);
163 ubc_setpager(vp, pager);
164
165 /*
166 * Note: We can not use VOP_GETATTR() to get accurate
167 * value of ui_size. Thanks to NFS.
168 * nfs_getattr() can call vinvalbuf() and in this case
169 * ubc_info is not set up to deal with that.
170 * So use bogus size.
171 */
172
173 /*
174 * create a vnode - vm_object association
175 * memory_object_create_named() creates a "named" reference on the
176 * memory object we hold this reference as long as the vnode is
177 * "alive." Since memory_object_create_named() took its own reference
178 * on the vnode pager we passed it, we can drop the reference
179 * vnode_pager_setup() returned here.
180 */
181 kret = memory_object_create_named(pager,
182 (memory_object_size_t)uip->ui_size, &control);
183 vnode_pager_deallocate(pager);
184 if (kret != KERN_SUCCESS)
185 panic("ubc_info_init: memory_object_create_named returned %d", kret);
186
187 assert(control);
188 uip->ui_control = control; /* cache the value of the mo control */
189 SET(uip->ui_flags, UI_HASOBJREF); /* with a named reference */
190 /* create a pager reference on the vnode */
191 error = vnode_pager_vget(vp);
192 if (error)
193 panic("ubc_info_init: vnode_pager_vget error = %d", error);
194
195 /* initialize the size */
196 error = VOP_GETATTR(vp, &vattr, p->p_ucred, p);
197
198 ubc_lock(vp);
199 uip->ui_size = (error ? 0: vattr.va_size);
200
201 done:
202 CLR(vp->v_flag, VUINIT);
203 if (ISSET(vp->v_flag, VUWANT)) {
204 CLR(vp->v_flag, VUWANT);
205 ubc_unlock(vp);
206 wakeup((caddr_t)vp);
207 } else
208 ubc_unlock(vp);
209
210 return (error);
211 }
212
213 /* Free the ubc_info */
214 static void
215 ubc_info_free(struct ubc_info *uip)
216 {
217 struct ucred *credp;
218
219 credp = uip->ui_ucred;
220 if (credp != NOCRED) {
221 uip->ui_ucred = NOCRED;
222 crfree(credp);
223 }
224
225 if (uip->ui_control != MEMORY_OBJECT_CONTROL_NULL)
226 memory_object_control_deallocate(uip->ui_control);
227
228 zfree(ubc_info_zone, (vm_offset_t)uip);
229 return;
230 }
231
232 void
233 ubc_info_deallocate(struct ubc_info *uip)
234 {
235 assert(uip->ui_refcount > 0);
236
237 if (uip->ui_refcount-- == 1)
238 ubc_info_free(uip);
239 }
240
241 /*
242 * Communicate with VM the size change of the file
243 * returns 1 on success, 0 on failure
244 */
245 int
246 ubc_setsize(struct vnode *vp, off_t nsize)
247 {
248 off_t osize; /* ui_size before change */
249 off_t lastpg, olastpgend, lastoff;
250 struct ubc_info *uip;
251 memory_object_control_t control;
252 kern_return_t kret;
253
254 assert(nsize >= (off_t)0);
255
256 if (UBCINVALID(vp))
257 return (0);
258
259 if (!UBCINFOEXISTS(vp))
260 return (0);
261
262 uip = vp->v_ubcinfo;
263 osize = uip->ui_size; /* call ubc_getsize() ??? */
264 /* Update the size before flushing the VM */
265 uip->ui_size = nsize;
266
267 if (nsize >= osize) /* Nothing more to do */
268 return (1); /* return success */
269
270 /*
271 * When the file shrinks, invalidate the pages beyond the
272 * new size. Also get rid of garbage beyond nsize on the
273 * last page. The ui_size already has the nsize. This
274 * insures that the pageout would not write beyond the new
275 * end of the file.
276 */
277
278 lastpg = trunc_page_64(nsize);
279 olastpgend = round_page_64(osize);
280 control = uip->ui_control;
281 assert(control);
282 lastoff = (nsize & PAGE_MASK_64);
283
284 /*
285 * If length is multiple of page size, we should not flush
286 * invalidating is sufficient
287 */
288 if (!lastoff) {
289 /* invalidate last page and old contents beyond nsize */
290 kret = memory_object_lock_request(control,
291 (memory_object_offset_t)lastpg,
292 (memory_object_size_t)(olastpgend - lastpg),
293 MEMORY_OBJECT_RETURN_NONE, MEMORY_OBJECT_DATA_FLUSH,
294 VM_PROT_NO_CHANGE);
295 if (kret != KERN_SUCCESS)
296 printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
297
298 return ((kret == KERN_SUCCESS) ? 1 : 0);
299 }
300
301 /* flush the last page */
302 kret = memory_object_lock_request(control,
303 (memory_object_offset_t)lastpg,
304 PAGE_SIZE_64,
305 MEMORY_OBJECT_RETURN_DIRTY, FALSE,
306 VM_PROT_NO_CHANGE);
307
308 if (kret == KERN_SUCCESS) {
309 /* invalidate last page and old contents beyond nsize */
310 kret = memory_object_lock_request(control,
311 (memory_object_offset_t)lastpg,
312 (memory_object_size_t)(olastpgend - lastpg),
313 MEMORY_OBJECT_RETURN_NONE, MEMORY_OBJECT_DATA_FLUSH,
314 VM_PROT_NO_CHANGE);
315 if (kret != KERN_SUCCESS)
316 printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
317 } else
318 printf("ubc_setsize: flush failed (error = %d)\n", kret);
319
320 return ((kret == KERN_SUCCESS) ? 1 : 0);
321 }
322
323 /*
324 * Get the size of the file
325 */
326 off_t
327 ubc_getsize(struct vnode *vp)
328 {
329 return (vp->v_ubcinfo->ui_size);
330 }
331
332 /*
333 * Caller indicate that the object corresponding to the vnode
334 * can not be cached in object cache. Make it so.
335 * returns 1 on success, 0 on failure
336 */
337 int
338 ubc_uncache(struct vnode *vp)
339 {
340 kern_return_t kret;
341 struct ubc_info *uip;
342 memory_object_control_t control;
343 memory_object_perf_info_data_t perf;
344
345 if (!UBCINFOEXISTS(vp))
346 return (0);
347
348 uip = vp->v_ubcinfo;
349
350 assert(uip != UBC_INFO_NULL);
351
352 /*
353 * AGE it so that vfree() can make sure that it
354 * would get recycled soon after the last reference is gone
355 * This will insure that .nfs turds would not linger
356 */
357 vagevp(vp);
358
359 /* set the "do not cache" bit */
360 SET(uip->ui_flags, UI_DONTCACHE);
361
362 control = uip->ui_control;
363 assert(control);
364
365 perf.cluster_size = PAGE_SIZE; /* XXX use real cluster_size. */
366 perf.may_cache = FALSE;
367 kret = memory_object_change_attributes(control,
368 MEMORY_OBJECT_PERFORMANCE_INFO,
369 (memory_object_info_t) &perf,
370 MEMORY_OBJECT_PERF_INFO_COUNT);
371
372 if (kret != KERN_SUCCESS) {
373 printf("ubc_uncache: memory_object_change_attributes_named "
374 "kret = %d", kret);
375 return (0);
376 }
377
378 ubc_release_named(vp);
379
380 return (1);
381 }
382
383 /*
384 * call ubc_clean() and ubc_uncache() on all the vnodes
385 * for this mount point.
386 * returns 1 on success, 0 on failure
387 */
388 __private_extern__ int
389 ubc_umount(struct mount *mp)
390 {
391 struct proc *p = current_proc();
392 struct vnode *vp, *nvp;
393 int ret = 1;
394
395 loop:
396 simple_lock(&mntvnode_slock);
397 for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) {
398 if (vp->v_mount != mp) {
399 simple_unlock(&mntvnode_slock);
400 goto loop;
401 }
402 nvp = vp->v_mntvnodes.le_next;
403 simple_unlock(&mntvnode_slock);
404 if (UBCINFOEXISTS(vp)) {
405
406 /*
407 * Must get a valid reference on the vnode
408 * before callig UBC functions
409 */
410 if (vget(vp, 0, p)) {
411 ret = 0;
412 simple_lock(&mntvnode_slock);
413 continue; /* move on to the next vnode */
414 }
415 ret &= ubc_clean(vp, 0); /* do not invalidate */
416 ret &= ubc_uncache(vp);
417 vrele(vp);
418 }
419 simple_lock(&mntvnode_slock);
420 }
421 simple_unlock(&mntvnode_slock);
422 return (ret);
423 }
424
425 /*
426 * Call ubc_unmount() for all filesystems.
427 * The list is traversed in reverse order
428 * of mounting to avoid dependencies.
429 */
430 __private_extern__ void
431 ubc_unmountall()
432 {
433 struct mount *mp, *nmp;
434
435 /*
436 * Since this only runs when rebooting, it is not interlocked.
437 */
438 for (mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) {
439 nmp = mp->mnt_list.cqe_prev;
440 (void) ubc_umount(mp);
441 }
442 }
443
444 /* Get the credentials */
445 struct ucred *
446 ubc_getcred(struct vnode *vp)
447 {
448 struct ubc_info *uip;
449
450 uip = vp->v_ubcinfo;
451
452 if (UBCINVALID(vp))
453 return (NOCRED);
454
455 return (uip->ui_ucred);
456 }
457
458 /*
459 * Set the credentials
460 * existing credentials are not changed
461 * returns 1 on success and 0 on failure
462 */
463 int
464 ubc_setcred(struct vnode *vp, struct proc *p)
465 {
466 struct ubc_info *uip;
467 struct ucred *credp;
468
469 uip = vp->v_ubcinfo;
470
471 if (UBCINVALID(vp))
472 return (0);
473
474 credp = uip->ui_ucred;
475 if (credp == NOCRED) {
476 crhold(p->p_ucred);
477 uip->ui_ucred = p->p_ucred;
478 }
479
480 return (1);
481 }
482
483 /* Get the pager */
484 __private_extern__ memory_object_t
485 ubc_getpager(struct vnode *vp)
486 {
487 struct ubc_info *uip;
488
489 uip = vp->v_ubcinfo;
490
491 if (UBCINVALID(vp))
492 return (0);
493
494 return (uip->ui_pager);
495 }
496
497 /*
498 * Get the memory object associated with this vnode
499 * If the vnode was reactivated, memory object would not exist.
500 * Unless "do not rectivate" was specified, look it up using the pager.
501 * If hold was requested create an object reference of one does not
502 * exist already.
503 */
504
505 memory_object_control_t
506 ubc_getobject(struct vnode *vp, int flags)
507 {
508 struct ubc_info *uip;
509 memory_object_control_t control;
510
511 uip = vp->v_ubcinfo;
512
513 if (UBCINVALID(vp))
514 return (0);
515
516 ubc_lock(vp);
517
518 control = uip->ui_control;
519
520 if ((flags & UBC_HOLDOBJECT) && (!ISSET(uip->ui_flags, UI_HASOBJREF))) {
521
522 /*
523 * Take a temporary reference on the ubc info so that it won't go
524 * away during our recovery attempt.
525 */
526 uip->ui_refcount++;
527 ubc_unlock(vp);
528 if (memory_object_recover_named(control, TRUE) == KERN_SUCCESS) {
529 ubc_lock(vp);
530 SET(uip->ui_flags, UI_HASOBJREF);
531 ubc_unlock(vp);
532 } else {
533 control = MEMORY_OBJECT_CONTROL_NULL;
534 }
535 ubc_info_deallocate(uip);
536
537 } else {
538 ubc_unlock(vp);
539 }
540
541 return (control);
542 }
543
544 /* Set the pager */
545 int
546 ubc_setpager(struct vnode *vp, memory_object_t pager)
547 {
548 struct ubc_info *uip;
549
550 uip = vp->v_ubcinfo;
551
552 if (UBCINVALID(vp))
553 return (0);
554
555 uip->ui_pager = pager;
556 return (1);
557 }
558
559 int
560 ubc_setflags(struct vnode * vp, int flags)
561 {
562 struct ubc_info *uip;
563
564 if (UBCINVALID(vp))
565 return (0);
566
567 uip = vp->v_ubcinfo;
568
569 SET(uip->ui_flags, flags);
570
571 return (1);
572 }
573
574 int
575 ubc_clearflags(struct vnode * vp, int flags)
576 {
577 struct ubc_info *uip;
578
579 if (UBCINVALID(vp))
580 return (0);
581
582 uip = vp->v_ubcinfo;
583
584 CLR(uip->ui_flags, flags);
585
586 return (1);
587 }
588
589
590 int
591 ubc_issetflags(struct vnode * vp, int flags)
592 {
593 struct ubc_info *uip;
594
595 if (UBCINVALID(vp))
596 return (0);
597
598 uip = vp->v_ubcinfo;
599
600 return (ISSET(uip->ui_flags, flags));
601 }
602
603 off_t
604 ubc_blktooff(struct vnode *vp, daddr_t blkno)
605 {
606 off_t file_offset;
607 int error;
608
609 if (UBCINVALID(vp))
610 return ((off_t)-1);
611
612 error = VOP_BLKTOOFF(vp, blkno, &file_offset);
613 if (error)
614 file_offset = -1;
615
616 return (file_offset);
617 }
618
619 daddr_t
620 ubc_offtoblk(struct vnode *vp, off_t offset)
621 {
622 daddr_t blkno;
623 int error = 0;
624
625 if (UBCINVALID(vp)) {
626 return ((daddr_t)-1);
627 }
628
629 error = VOP_OFFTOBLK(vp, offset, &blkno);
630 if (error)
631 blkno = -1;
632
633 return (blkno);
634 }
635
636 /*
637 * Cause the file data in VM to be pushed out to the storage
638 * it also causes all currently valid pages to be released
639 * returns 1 on success, 0 on failure
640 */
641 int
642 ubc_clean(struct vnode *vp, int invalidate)
643 {
644 off_t size;
645 struct ubc_info *uip;
646 memory_object_control_t control;
647 kern_return_t kret;
648 int flags = 0;
649
650 if (UBCINVALID(vp))
651 return (0);
652
653 if (!UBCINFOEXISTS(vp))
654 return (0);
655
656 /*
657 * if invalidate was requested, write dirty data and then discard
658 * the resident pages
659 */
660 if (invalidate)
661 flags = (MEMORY_OBJECT_DATA_FLUSH | MEMORY_OBJECT_DATA_NO_CHANGE);
662
663 uip = vp->v_ubcinfo;
664 size = uip->ui_size; /* call ubc_getsize() ??? */
665
666 control = uip->ui_control;
667 assert(control);
668
669 vp->v_flag &= ~VHASDIRTY;
670 vp->v_clen = 0;
671
672 /* Write the dirty data in the file and discard cached pages */
673 kret = memory_object_lock_request(control,
674 (memory_object_offset_t)0,
675 (memory_object_size_t)round_page_64(size),
676 MEMORY_OBJECT_RETURN_ALL, flags,
677 VM_PROT_NO_CHANGE);
678
679 if (kret != KERN_SUCCESS)
680 printf("ubc_clean: clean failed (error = %d)\n", kret);
681
682 return ((kret == KERN_SUCCESS) ? 1 : 0);
683 }
684
685 /*
686 * Cause the file data in VM to be pushed out to the storage
687 * currently valid pages are NOT invalidated
688 * returns 1 on success, 0 on failure
689 */
690 int
691 ubc_pushdirty(struct vnode *vp)
692 {
693 off_t size;
694 struct ubc_info *uip;
695 memory_object_control_t control;
696 kern_return_t kret;
697
698 if (UBCINVALID(vp))
699 return (0);
700
701 if (!UBCINFOEXISTS(vp))
702 return (0);
703
704 uip = vp->v_ubcinfo;
705 size = uip->ui_size; /* call ubc_getsize() ??? */
706
707 control = uip->ui_control;
708 assert(control);
709
710 vp->v_flag &= ~VHASDIRTY;
711 vp->v_clen = 0;
712
713 /* Write the dirty data in the file and discard cached pages */
714 kret = memory_object_lock_request(control,
715 (memory_object_offset_t)0,
716 (memory_object_size_t)round_page_64(size),
717 MEMORY_OBJECT_RETURN_DIRTY, FALSE,
718 VM_PROT_NO_CHANGE);
719
720 if (kret != KERN_SUCCESS)
721 printf("ubc_pushdirty: flush failed (error = %d)\n", kret);
722
723 return ((kret == KERN_SUCCESS) ? 1 : 0);
724 }
725
726 /*
727 * Cause the file data in VM to be pushed out to the storage
728 * currently valid pages are NOT invalidated
729 * returns 1 on success, 0 on failure
730 */
731 int
732 ubc_pushdirty_range(struct vnode *vp, off_t offset, off_t size)
733 {
734 struct ubc_info *uip;
735 memory_object_control_t control;
736 kern_return_t kret;
737
738 if (UBCINVALID(vp))
739 return (0);
740
741 if (!UBCINFOEXISTS(vp))
742 return (0);
743
744 uip = vp->v_ubcinfo;
745
746 control = uip->ui_control;
747 assert(control);
748
749 /* Write any dirty pages in the requested range of the file: */
750 kret = memory_object_lock_request(control,
751 (memory_object_offset_t)offset,
752 (memory_object_size_t)round_page_64(size),
753 MEMORY_OBJECT_RETURN_DIRTY, FALSE,
754 VM_PROT_NO_CHANGE);
755
756 if (kret != KERN_SUCCESS)
757 printf("ubc_pushdirty_range: flush failed (error = %d)\n", kret);
758
759 return ((kret == KERN_SUCCESS) ? 1 : 0);
760 }
761
762 /*
763 * Make sure the vm object does not vanish
764 * returns 1 if the hold count was incremented
765 * returns 0 if the hold count was not incremented
766 * This return value should be used to balance
767 * ubc_hold() and ubc_rele().
768 */
769 int
770 ubc_hold(struct vnode *vp)
771 {
772 struct ubc_info *uip;
773 memory_object_control_t object;
774
775 if (UBCINVALID(vp))
776 return (0);
777
778 if (!UBCINFOEXISTS(vp)) {
779 /* must be invalid or dying vnode */
780 assert(UBCINVALID(vp) ||
781 ((vp->v_flag & VXLOCK) || (vp->v_flag & VTERMINATE)));
782 return (0);
783 }
784
785 uip = vp->v_ubcinfo;
786 assert(uip->ui_control != MEMORY_OBJECT_CONTROL_NULL);
787
788 ubc_lock(vp);
789 uip->ui_refcount++;
790
791 if (!ISSET(uip->ui_flags, UI_HASOBJREF)) {
792 ubc_unlock(vp);
793 if (memory_object_recover_named(uip->ui_control, TRUE) != KERN_SUCCESS) {
794 ubc_info_deallocate(uip);
795 return (0);
796 }
797 ubc_lock(vp);
798 SET(uip->ui_flags, UI_HASOBJREF);
799 ubc_unlock(vp);
800 } else {
801 ubc_unlock(vp);
802 }
803
804 assert(uip->ui_refcount > 0);
805 return (1);
806 }
807
808 /*
809 * Drop the holdcount.
810 * release the reference on the vm object if the this is "uncached"
811 * ubc_info.
812 */
813 void
814 ubc_rele(struct vnode *vp)
815 {
816 struct ubc_info *uip;
817
818 if (UBCINVALID(vp))
819 return;
820
821 if (!UBCINFOEXISTS(vp)) {
822 /* nothing more to do for a dying vnode */
823 if ((vp->v_flag & VXLOCK) || (vp->v_flag & VTERMINATE))
824 return;
825 panic("ubc_rele: can not");
826 }
827
828 uip = vp->v_ubcinfo;
829
830 if (uip->ui_refcount == 1)
831 panic("ubc_rele: ui_refcount");
832
833 --uip->ui_refcount;
834
835 if ((uip->ui_refcount == 1)
836 && ISSET(uip->ui_flags, UI_DONTCACHE))
837 (void) ubc_release_named(vp);
838
839 return;
840 }
841
842 /*
843 * The vnode is mapped explicitly, mark it so.
844 */
845 __private_extern__ void
846 ubc_map(struct vnode *vp)
847 {
848 struct ubc_info *uip;
849
850 if (UBCINVALID(vp))
851 return;
852
853 if (!UBCINFOEXISTS(vp))
854 return;
855
856 ubc_lock(vp);
857 uip = vp->v_ubcinfo;
858
859 SET(uip->ui_flags, UI_WASMAPPED);
860 uip->ui_mapped = 1;
861 ubc_unlock(vp);
862
863 return;
864 }
865
866 /*
867 * Release the memory object reference on the vnode
868 * only if it is not in use
869 * Return 1 if the reference was released, 0 otherwise.
870 */
871 int
872 ubc_release_named(struct vnode *vp)
873 {
874 struct ubc_info *uip;
875 memory_object_control_t control;
876 kern_return_t kret;
877
878 if (UBCINVALID(vp))
879 return (0);
880
881 if (!UBCINFOEXISTS(vp))
882 return (0);
883
884 uip = vp->v_ubcinfo;
885
886 /* can not release held or mapped vnodes */
887 if (ISSET(uip->ui_flags, UI_HASOBJREF) &&
888 (uip->ui_refcount == 1) && !uip->ui_mapped) {
889 control = uip->ui_control;
890 assert(control);
891 CLR(uip->ui_flags, UI_HASOBJREF);
892 kret = memory_object_release_name(control,
893 MEMORY_OBJECT_RESPECT_CACHE);
894 return ((kret != KERN_SUCCESS) ? 0 : 1);
895 } else
896 return (0);
897 }
898
899 /*
900 * This function used to called by extensions directly. Some may
901 * still exist with this behavior. In those cases, we will do the
902 * release as part of reclaiming or cleaning the vnode. We don't
903 * need anything explicit - so just stub this out until those callers
904 * get cleaned up.
905 */
906 int
907 ubc_release(
908 struct vnode *vp)
909 {
910 return 0;
911 }
912
913 /*
914 * destroy the named reference for a given vnode
915 */
916 __private_extern__ int
917 ubc_destroy_named(
918 struct vnode *vp)
919 {
920 memory_object_control_t control;
921 struct proc *p;
922 struct ubc_info *uip;
923 kern_return_t kret;
924
925 /*
926 * We may already have had the object terminated
927 * and the ubcinfo released as a side effect of
928 * some earlier processing. If so, pretend we did
929 * it, because it probably was a result of our
930 * efforts.
931 */
932 if (!UBCINFOEXISTS(vp))
933 return (1);
934
935 uip = vp->v_ubcinfo;
936
937 /* can not destroy held vnodes */
938 if (uip->ui_refcount > 1)
939 return (0);
940
941 /*
942 * Terminate the memory object.
943 * memory_object_destroy() will result in
944 * vnode_pager_no_senders().
945 * That will release the pager reference
946 * and the vnode will move to the free list.
947 */
948 control = ubc_getobject(vp, UBC_HOLDOBJECT);
949 if (control != MEMORY_OBJECT_CONTROL_NULL) {
950
951 if (ISSET(vp->v_flag, VTERMINATE))
952 panic("ubc_destroy_named: already teminating");
953 SET(vp->v_flag, VTERMINATE);
954
955 kret = memory_object_destroy(control, 0);
956 if (kret != KERN_SUCCESS)
957 return (0);
958
959 /*
960 * memory_object_destroy() is asynchronous
961 * with respect to vnode_pager_no_senders().
962 * wait for vnode_pager_no_senders() to clear
963 * VTERMINATE
964 */
965 while (ISSET(vp->v_flag, VTERMINATE)) {
966 SET(vp->v_flag, VTERMWANT);
967 (void)tsleep((caddr_t)&vp->v_ubcinfo,
968 PINOD, "ubc_destroy_named", 0);
969 }
970 }
971 return (1);
972 }
973
974
975 /*
976 * Invalidate a range in the memory object that backs this
977 * vnode. The offset is truncated to the page boundary and the
978 * size is adjusted to include the last page in the range.
979 */
980 int
981 ubc_invalidate(struct vnode *vp, off_t offset, size_t size)
982 {
983 struct ubc_info *uip;
984 memory_object_control_t control;
985 kern_return_t kret;
986 off_t toff;
987 size_t tsize;
988
989 if (UBCINVALID(vp))
990 return (0);
991
992 if (!UBCINFOEXISTS(vp))
993 return (0);
994
995 toff = trunc_page_64(offset);
996 tsize = (size_t)(round_page_64(offset+size) - toff);
997 uip = vp->v_ubcinfo;
998 control = uip->ui_control;
999 assert(control);
1000
1001 /* invalidate pages in the range requested */
1002 kret = memory_object_lock_request(control,
1003 (memory_object_offset_t)toff,
1004 (memory_object_size_t)tsize,
1005 MEMORY_OBJECT_RETURN_NONE,
1006 (MEMORY_OBJECT_DATA_NO_CHANGE| MEMORY_OBJECT_DATA_FLUSH),
1007 VM_PROT_NO_CHANGE);
1008 if (kret != KERN_SUCCESS)
1009 printf("ubc_invalidate: invalidate failed (error = %d)\n", kret);
1010
1011 return ((kret == KERN_SUCCESS) ? 1 : 0);
1012 }
1013
1014 /*
1015 * Find out whether a vnode is in use by UBC
1016 * Returns 1 if file is in use by UBC, 0 if not
1017 */
1018 int
1019 ubc_isinuse(struct vnode *vp, int tookref)
1020 {
1021 int busycount = tookref ? 2 : 1;
1022
1023 if (!UBCINFOEXISTS(vp))
1024 return (0);
1025
1026 if (vp->v_usecount > busycount)
1027 return (1);
1028
1029 if ((vp->v_usecount == busycount)
1030 && (vp->v_ubcinfo->ui_mapped == 1))
1031 return (1);
1032 else
1033 return (0);
1034 }
1035
1036 /*
1037 * The backdoor routine to clear the ui_mapped.
1038 * MUST only be called by the VM
1039 *
1040 * Note that this routine is not called under funnel. There are numerous
1041 * things about the calling sequence that make this work on SMP.
1042 * Any code change in those paths can break this.
1043 *
1044 */
1045 __private_extern__ void
1046 ubc_unmap(struct vnode *vp)
1047 {
1048 struct ubc_info *uip;
1049 boolean_t funnel_state;
1050
1051 if (UBCINVALID(vp))
1052 return;
1053
1054 if (!UBCINFOEXISTS(vp))
1055 return;
1056
1057 ubc_lock(vp);
1058 uip = vp->v_ubcinfo;
1059 uip->ui_mapped = 0;
1060 if ((uip->ui_refcount > 1) || !ISSET(uip->ui_flags, UI_DONTCACHE)) {
1061 ubc_unlock(vp);
1062 return;
1063 }
1064 ubc_unlock(vp);
1065
1066 funnel_state = thread_funnel_set(kernel_flock, TRUE);
1067 (void) ubc_release_named(vp);
1068 (void) thread_funnel_set(kernel_flock, funnel_state);
1069 }
1070
1071 kern_return_t
1072 ubc_page_op(
1073 struct vnode *vp,
1074 off_t f_offset,
1075 int ops,
1076 vm_offset_t *phys_entryp,
1077 int *flagsp)
1078 {
1079 memory_object_control_t control;
1080
1081 control = ubc_getobject(vp, UBC_FLAGS_NONE);
1082 if (control == MEMORY_OBJECT_CONTROL_NULL)
1083 return KERN_INVALID_ARGUMENT;
1084
1085 return (memory_object_page_op(control,
1086 (memory_object_offset_t)f_offset,
1087 ops,
1088 phys_entryp,
1089 flagsp));
1090 }
1091
1092 kern_return_t
1093 ubc_create_upl(
1094 struct vnode *vp,
1095 off_t f_offset,
1096 long bufsize,
1097 upl_t *uplp,
1098 upl_page_info_t **plp,
1099 int uplflags)
1100 {
1101 memory_object_control_t control;
1102 int count;
1103 off_t file_offset;
1104 kern_return_t kr;
1105
1106 if (bufsize & 0xfff)
1107 return KERN_INVALID_ARGUMENT;
1108
1109 control = ubc_getobject(vp, UBC_FLAGS_NONE);
1110 if (control == MEMORY_OBJECT_CONTROL_NULL)
1111 return KERN_INVALID_ARGUMENT;
1112
1113 uplflags |= (UPL_NO_SYNC|UPL_CLEAN_IN_PLACE|UPL_SET_INTERNAL);
1114 count = 0;
1115 kr = memory_object_upl_request(control, f_offset, bufsize,
1116 uplp, NULL, &count, uplflags);
1117 if (plp != NULL)
1118 *plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp);
1119 return kr;
1120 }
1121
1122
1123 kern_return_t
1124 ubc_upl_map(
1125 upl_t upl,
1126 vm_offset_t *dst_addr)
1127 {
1128 return (vm_upl_map(kernel_map, upl, dst_addr));
1129 }
1130
1131
1132 kern_return_t
1133 ubc_upl_unmap(
1134 upl_t upl)
1135 {
1136 return(vm_upl_unmap(kernel_map, upl));
1137 }
1138
1139 kern_return_t
1140 ubc_upl_commit(
1141 upl_t upl)
1142 {
1143 upl_page_info_t *pl;
1144 kern_return_t kr;
1145
1146 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
1147 kr = upl_commit(upl, pl, MAX_UPL_TRANSFER);
1148 upl_deallocate(upl);
1149 return kr;
1150 }
1151
1152
1153 kern_return_t
1154 ubc_upl_commit_range(
1155 upl_t upl,
1156 vm_offset_t offset,
1157 vm_size_t size,
1158 int flags)
1159 {
1160 upl_page_info_t *pl;
1161 boolean_t empty;
1162 kern_return_t kr;
1163
1164 if (flags & UPL_COMMIT_FREE_ON_EMPTY)
1165 flags |= UPL_COMMIT_NOTIFY_EMPTY;
1166
1167 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
1168
1169 kr = upl_commit_range(upl, offset, size, flags,
1170 pl, MAX_UPL_TRANSFER, &empty);
1171
1172 if((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty)
1173 upl_deallocate(upl);
1174
1175 return kr;
1176 }
1177
1178 kern_return_t
1179 ubc_upl_abort_range(
1180 upl_t upl,
1181 vm_offset_t offset,
1182 vm_size_t size,
1183 int abort_flags)
1184 {
1185 kern_return_t kr;
1186 boolean_t empty = FALSE;
1187
1188 if (abort_flags & UPL_ABORT_FREE_ON_EMPTY)
1189 abort_flags |= UPL_ABORT_NOTIFY_EMPTY;
1190
1191 kr = upl_abort_range(upl, offset, size, abort_flags, &empty);
1192
1193 if((abort_flags & UPL_ABORT_FREE_ON_EMPTY) && empty)
1194 upl_deallocate(upl);
1195
1196 return kr;
1197 }
1198
1199 kern_return_t
1200 ubc_upl_abort(
1201 upl_t upl,
1202 int abort_type)
1203 {
1204 kern_return_t kr;
1205
1206 kr = upl_abort(upl, abort_type);
1207 upl_deallocate(upl);
1208 return kr;
1209 }
1210
1211 upl_page_info_t *
1212 ubc_upl_pageinfo(
1213 upl_t upl)
1214 {
1215 return (UPL_GET_INTERNAL_PAGE_LIST(upl));
1216 }