]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/ubc_subr.c
xnu-124.1.tar.gz
[apple/xnu.git] / bsd / kern / ubc_subr.c
1 /*
2 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * File: ubc_subr.c
24 * Author: Umesh Vaishampayan [umeshv@apple.com]
25 * 05-Aug-1999 umeshv Created.
26 *
27 * Functions related to Unified Buffer cache.
28 *
29 */
30
31 #define DIAGNOSTIC 1
32
33 #include <sys/types.h>
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/lock.h>
37 #include <sys/ubc.h>
38 #include <sys/mount.h>
39 #include <sys/vnode.h>
40 #include <sys/ubc.h>
41 #include <sys/ucred.h>
42 #include <sys/proc.h>
43 #include <sys/buf.h>
44
45 #include <mach/mach_types.h>
46 #include <mach/memory_object_types.h>
47
48 #include <kern/zalloc.h>
49
50 #if DIAGNOSTIC
51 #if defined(assert)
52 #undef assert()
53 #endif
54 #define assert(cond) \
55 if (!(cond)) panic("%s:%d (%s)", __FILE__, __LINE__, # cond)
56 #else
57 #include <kern/assert.h>
58 #endif /* DIAGNOSTIC */
59
60 struct zone *ubc_info_zone;
61
62 #if DIAGNOSTIC
63 #define USHOULDNOT(fun) panic("%s: should not", (fun));
64 #else
65 #define USHOULDNOT(fun)
66 #endif /* DIAGNOSTIC */
67
68
69 static void *_ubc_getobject(struct vnode *, int);
70 static void ubc_lock(struct vnode *);
71 static void ubc_unlock(struct vnode *);
72
73 static void
74 ubc_getobjref(struct vnode *vp)
75 {
76 register struct ubc_info *uip;
77 void *pager_cport;
78 void *object;
79
80 uip = vp->v_ubcinfo;
81
82 if (pager_cport = (void *)vnode_pager_lookup(vp, uip->ui_pager))
83 object = (void *)vm_object_lookup(pager_cport);
84
85 if (object != uip->ui_object) {
86 #if 0
87 Debugger("ubc_getobjref: object changed");
88 #endif /* 0 */
89 uip->ui_object = object;
90 }
91
92 if (uip->ui_object == NULL)
93 panic("ubc_getobjref: lost object");
94 }
95
96 /*
97 * Initialization of the zone for Unified Buffer Cache.
98 */
99 void
100 ubc_init()
101 {
102 int i;
103
104 i = (vm_size_t) sizeof (struct ubc_info);
105 /* XXX the number of elements should be tied in to maxvnodes */
106 ubc_info_zone = zinit (i, 10000*i, 8192, "ubc_info zone");
107 return;
108 }
109
110 /*
111 * Initialize a ubc_info structure for a vnode.
112 */
113 int
114 ubc_info_init(struct vnode *vp)
115 {
116 register struct ubc_info *uip;
117 void * pager;
118 struct vattr vattr;
119 struct proc *p = current_proc();
120 int error = 0;
121 kern_return_t kret;
122 void * pager_cport;
123
124 assert(vp);
125 assert(UBCISVALID(vp));
126
127 ubc_lock(vp);
128 if (ISSET(vp->v_flag, VUINIT)) {
129 /*
130 * other thread is already doing this
131 * wait till done
132 */
133 while (ISSET(vp->v_flag, VUINIT)) {
134 SET(vp->v_flag, VUWANT); /* XXX overloaded! */
135 ubc_unlock(vp);
136 (void) tsleep((caddr_t)vp, PINOD, "ubcinfo", 0);
137 ubc_lock(vp);
138 }
139 ubc_unlock(vp);
140 return (0);
141 } else {
142 SET(vp->v_flag, VUINIT);
143 }
144
145 uip = vp->v_ubcinfo;
146 if ((uip == UBC_INFO_NULL) || (uip == UBC_NOINFO)) {
147 ubc_unlock(vp);
148 uip = (struct ubc_info *) zalloc(ubc_info_zone);
149 bzero(uip, sizeof(struct ubc_info));
150 ubc_lock(vp);
151 SET(uip->ui_flags, UI_INITED);
152 uip->ui_vnode = vp;
153 uip->ui_ucred = NOCRED;
154 }
155
156 assert(uip->ui_flags != UI_NONE);
157 assert(uip->ui_vnode == vp);
158
159 #if 0
160 if(ISSET(uip->ui_flags, UI_HASPAGER))
161 goto done;
162 #endif /* 0 */
163
164 /* now set this ubc_info in the vnode */
165 vp->v_ubcinfo = uip;
166 SET(uip->ui_flags, UI_HASPAGER);
167 ubc_unlock(vp);
168 pager = (void *)vnode_pager_setup(vp, uip->ui_pager);
169 assert(pager);
170
171 /*
172 * Can not use VOP_GETATTR() to get accurate value
173 * of ui_size. Thanks to NFS.
174 * nfs_getattr() can call vinvalbuf() and in this case
175 * ubc_info is not set up to deal with that.
176 * So use bogus size.
177 */
178
179 /* create a vm_object association */
180 kret = vm_object_create_nomap(pager, (vm_object_offset_t)uip->ui_size);
181 if (kret != KERN_SUCCESS)
182 panic("ubc_info_init: vm_object_create_nomap returned %d", kret);
183
184 /* _ubc_getobject() gets a reference on the memory object */
185 if (_ubc_getobject(vp, 0) == NULL)
186 panic("ubc_info_init: lost vmobject : uip = 0X%08x", uip);
187
188 /*
189 * vm_object_allocate() called from vm_object_create_nomap()
190 * created the object with a refcount of 1
191 * need to drop the reference gained by vm_object_lookup()
192 */
193 vm_object_deallocate(uip->ui_object);
194
195 /* create a pager reference on the vnode */
196 error = vget(vp, LK_INTERLOCK, p);
197 if (error)
198 panic("ubc_info_init: vget error = %d", error);
199
200 /* initialize the size */
201 error = VOP_GETATTR(vp, &vattr, p->p_ucred, p);
202
203 ubc_lock(vp);
204 uip->ui_size = (error ? 0: vattr.va_size);
205
206 done:
207 CLR(vp->v_flag, VUINIT);
208 if (ISSET(vp->v_flag, VUWANT)) {
209 CLR(vp->v_flag, VUWANT);
210 ubc_unlock(vp);
211 wakeup((caddr_t)vp);
212 } else
213 ubc_unlock(vp);
214
215 return(error);
216 }
217
218 /* Free the ubc_info */
219 void
220 ubc_info_free(struct vnode *vp)
221 {
222 register struct ubc_info *uip;
223 struct ucred *credp;
224
225 assert(vp);
226
227 uip = vp->v_ubcinfo;
228 vp->v_ubcinfo = UBC_INFO_NULL;
229 credp = uip->ui_ucred;
230 if (credp != NOCRED) {
231 uip->ui_ucred = NOCRED;
232 crfree(credp);
233 }
234 zfree(ubc_info_zone, (vm_offset_t)uip);
235 return;
236 }
237
238 /*
239 * Communicate with VM the size change of the file
240 * returns 1 on success, 0 on failure
241 */
242 int
243 ubc_setsize(struct vnode *vp, off_t nsize)
244 {
245 off_t osize; /* ui_size before change */
246 off_t lastpg, olastpgend, lastoff;
247 struct ubc_info *uip;
248 void *object;
249 kern_return_t kret;
250 int didhold;
251
252 #if DIAGNOSTIC
253 assert(vp);
254 assert(nsize >= (off_t)0);
255 #endif
256
257 if (UBCINVALID(vp))
258 return(0);
259
260 if (!UBCINFOEXISTS(vp))
261 return(0);
262
263 uip = vp->v_ubcinfo;
264 osize = uip->ui_size; /* call ubc_getsize() ??? */
265 /* Update the size before flushing the VM */
266 uip->ui_size = nsize;
267
268 if (nsize >= osize) /* Nothing more to do */
269 return(0);
270
271 /*
272 * When the file shrinks, invalidate the pages beyond the
273 * new size. Also get rid of garbage beyond nsize on the
274 * last page. The ui_size already has the nsize. This
275 * insures that the pageout would not write beyond the new
276 * end of the file.
277 */
278
279 didhold = ubc_hold(vp);
280 lastpg = trunc_page_64(nsize);
281 olastpgend = round_page_64(osize);
282 object = _ubc_getobject(vp, UBC_NOREACTIVATE);
283 assert(object);
284 lastoff = (nsize & PAGE_MASK_64);
285
286 /*
287 * If length is multiple of page size, we should not flush
288 * invalidating is sufficient
289 */
290 if (!lastoff) {
291 /*
292 * memory_object_lock_request() drops an object
293 * reference. gain a reference before calling it
294 */
295 ubc_getobjref(vp);
296
297 /* invalidate last page and old contents beyond nsize */
298 kret = memory_object_lock_request(object,
299 (vm_object_offset_t)lastpg,
300 (memory_object_size_t)(olastpgend - lastpg),
301 MEMORY_OBJECT_RETURN_NONE,TRUE,
302 VM_PROT_NO_CHANGE,MACH_PORT_NULL);
303 if (kret != KERN_SUCCESS)
304 printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
305
306 if (didhold)
307 ubc_rele(vp);
308 return ((kret == KERN_SUCCESS) ? 1 : 0);
309 }
310
311 /*
312 * memory_object_lock_request() drops an object
313 * reference. gain a reference before calling it
314 */
315 ubc_getobjref(vp);
316
317 /* flush the last page */
318 kret = memory_object_lock_request(object,
319 (vm_object_offset_t)lastpg,
320 PAGE_SIZE_64,
321 MEMORY_OBJECT_RETURN_DIRTY,FALSE,
322 VM_PROT_NO_CHANGE,MACH_PORT_NULL);
323
324 if (kret == KERN_SUCCESS) {
325 /*
326 * memory_object_lock_request() drops an object
327 * reference. gain a reference before calling it
328 */
329 ubc_getobjref(vp);
330
331 /* invalidate last page and old contents beyond nsize */
332 kret = memory_object_lock_request(object,
333 (vm_object_offset_t)lastpg,
334 (memory_object_size_t)(olastpgend - lastpg),
335 MEMORY_OBJECT_RETURN_NONE,TRUE,
336 VM_PROT_NO_CHANGE,MACH_PORT_NULL);
337 if (kret != KERN_SUCCESS)
338 printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
339 } else
340 printf("ubc_setsize: flush failed (error = %d)\n", kret);
341
342 if (didhold)
343 ubc_rele(vp);
344 return ((kret == KERN_SUCCESS) ? 1 : 0);
345 }
346
347 /*
348 * Get the size of the file
349 * For local file systems the size is locally cached. For NFS
350 * there might be a network transaction for this.
351 */
352 off_t
353 ubc_getsize(struct vnode *vp)
354 {
355 /* XXX deal with NFS */
356 return (vp->v_ubcinfo->ui_size);
357 }
358
359 /* lock for changes to struct UBC */
360 static void
361 ubc_lock(struct vnode *vp)
362 {
363 /* For now, just use the v_interlock */
364 simple_lock(&vp->v_interlock);
365 }
366
367 /* unlock */
368 static void
369 ubc_unlock(struct vnode *vp)
370 {
371 /* For now, just use the v_interlock */
372 simple_unlock(&vp->v_interlock);
373 }
374
375 /*
376 * Caller indicate that the object corresponding to the vnode
377 * can not be cached in object cache. Make it so.
378 * returns 1 on success, 0 on failure
379 *
380 * Caller of ubc_uncache() MUST have a valid reference on the vnode.
381 */
382 int
383 ubc_uncache(struct vnode *vp)
384 {
385 void *object;
386 kern_return_t kret;
387 struct ubc_info *uip;
388 memory_object_perf_info_data_t perf;
389 int didhold;
390
391 assert(vp);
392
393 if (!UBCINFOEXISTS(vp))
394 return (0);
395
396 uip = vp->v_ubcinfo;
397
398 assert(uip != UBC_INFO_NULL);
399
400 /*
401 * AGE it so that vfree() can make sure that it
402 * would get recycled soon after the last reference is gone
403 * This will insure that .nfs turds would not linger
404 */
405 vagevp(vp);
406
407 /* set the "do not cache" bit */
408 SET(uip->ui_flags, UI_DONTCACHE);
409
410 didhold = ubc_hold(vp);
411
412 object = _ubc_getobject(vp, UBC_NOREACTIVATE);
413 assert(object);
414
415 /*
416 * memory_object_change_attributes() drops an object
417 * reference. gain a reference before calling it
418 */
419 ubc_getobjref(vp);
420
421 perf.cluster_size = PAGE_SIZE; /* XXX use real cluster_size. */
422 perf.may_cache = FALSE;
423 kret = memory_object_change_attributes(object,
424 MEMORY_OBJECT_PERFORMANCE_INFO,
425 (memory_object_info_t) &perf,
426 MEMORY_OBJECT_PERF_INFO_COUNT,
427 MACH_PORT_NULL, 0);
428
429 if (didhold)
430 ubc_rele(vp);
431
432 if (kret != KERN_SUCCESS) {
433 #if DIAGNOSTIC
434 panic("ubc_uncache: memory_object_change_attributes "
435 "kret = %d", kret);
436 #endif /* DIAGNOSTIC */
437 return (0);
438 }
439
440 return (1);
441 }
442
443
444 /*
445 * call ubc_clean() and ubc_uncache() on all the vnodes
446 * for this mount point.
447 * returns 1 on success, 0 on failure
448 */
449 int
450 ubc_umount(struct mount *mp)
451 {
452 struct proc *p = current_proc();
453 struct vnode *vp, *nvp;
454 int ret = 1;
455
456 loop:
457 simple_lock(&mntvnode_slock);
458 for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) {
459 if (vp->v_mount != mp) {
460 simple_unlock(&mntvnode_slock);
461 goto loop;
462 }
463 nvp = vp->v_mntvnodes.le_next;
464 simple_unlock(&mntvnode_slock);
465 if (UBCINFOEXISTS(vp)) {
466 ret &= ubc_clean(vp, 0); /* do not invalidate */
467 ret &= ubc_uncache(vp);
468 ubc_release(vp);
469 }
470 simple_lock(&mntvnode_slock);
471 }
472 simple_unlock(&mntvnode_slock);
473 return (ret);
474 }
475
476 /*
477 * Call ubc_unmount() for all filesystems.
478 * The list is traversed in reverse order
479 * of mounting to avoid dependencies.
480 */
481 void
482 ubc_unmountall()
483 {
484 struct mount *mp, *nmp;
485
486 /*
487 * Since this only runs when rebooting, it is not interlocked.
488 */
489 for (mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) {
490 nmp = mp->mnt_list.cqe_prev;
491 (void) ubc_umount(mp);
492 }
493 }
494
495 /* Get the credentials */
496 struct ucred *
497 ubc_getcred(struct vnode *vp)
498 {
499 struct ubc_info *uip;
500
501 assert(vp);
502
503 uip = vp->v_ubcinfo;
504
505 assert(uip);
506
507 if (UBCINVALID(vp)) {
508 return (NOCRED);
509 }
510
511 return (uip->ui_ucred);
512 }
513
514 /*
515 * Set the credentials
516 * existing credentials are not changed
517 * returns 1 on success and 0 on failure
518 */
519
520 int
521 ubc_setcred(struct vnode *vp, struct proc *p)
522 {
523 struct ubc_info *uip;
524 struct ucred *credp;
525
526 assert(vp);
527 assert(p);
528
529 uip = vp->v_ubcinfo;
530
531 assert(uip);
532
533 if (UBCINVALID(vp)) {
534 USHOULDNOT("ubc_setcred");
535 return (0);
536 }
537
538 credp = uip->ui_ucred;
539 if (credp == NOCRED) {
540 crhold(p->p_ucred);
541 uip->ui_ucred = p->p_ucred;
542 }
543
544 return (1);
545 }
546
547 /* Get the pager */
548 void *
549 ubc_getpager(struct vnode *vp)
550 {
551 struct ubc_info *uip;
552
553 assert(vp);
554
555 uip = vp->v_ubcinfo;
556
557 assert(uip);
558
559 if (UBCINVALID(vp)) {
560 USHOULDNOT("ubc_getpager");
561 return (0);
562 }
563
564 return (uip->ui_pager);
565 }
566
567 /*
568 * Get the memory object associated with this vnode
569 * If the vnode was reactivated, memory object would not exist.
570 * Unless "do not rectivate" was specified, look it up using the pager.
571 * The vm_object_lookup() would create a reference on the memory object.
572 * If hold was requested create an object reference of one does not
573 * exist already.
574 */
575
576 static void *
577 _ubc_getobject(struct vnode *vp, int flags)
578 {
579 struct ubc_info *uip;
580 void *object;
581
582 uip = vp->v_ubcinfo;
583 object = uip->ui_object;
584
585 if ((object == NULL) && ISSET(uip->ui_flags, UI_HASPAGER)
586 && !(flags & UBC_NOREACTIVATE)) {
587 void *pager_cport;
588
589 if (ISSET(uip->ui_flags, UI_HASOBJREF))
590 panic("ubc_getobject: lost object");
591
592 if (pager_cport = (void *)vnode_pager_lookup(vp, uip->ui_pager)) {
593 object = (void *)vm_object_lookup(pager_cport);
594 #if 0
595 if ((uip->ui_object) && (uip->ui_object != object))
596 Debugger("_ubc_getobject: object changed");
597 #endif /* 0 */
598
599 uip->ui_object = object;
600 }
601
602 if (object != NULL)
603 SET(uip->ui_flags, UI_HASOBJREF);
604 }
605
606 if ((flags & UBC_HOLDOBJECT)
607 && (object != NULL)) {
608 if (!ISSET(uip->ui_flags, UI_HASOBJREF)) {
609 ubc_getobjref(vp);
610 SET(uip->ui_flags, UI_HASOBJREF);
611 }
612 }
613 return (uip->ui_object);
614 }
615
616 void *
617 ubc_getobject(struct vnode *vp, int flags)
618 {
619 struct ubc_info *uip;
620 void *object;
621
622 assert(vp);
623 uip = vp->v_ubcinfo;
624 assert(uip);
625
626 if (UBCINVALID(vp)) {
627 return (0);
628 }
629
630 object = _ubc_getobject(vp, flags);
631 assert(object);
632
633 if (!ISSET(uip->ui_flags, (UI_HASOBJREF|UI_WASMAPPED))
634 && !(uip->ui_holdcnt)) {
635 if (!(flags & UBC_PAGINGOP))
636 panic("ubc_getobject: lost reference");
637 }
638 }
639
640 /* Set the pager */
641 int
642 ubc_setpager(struct vnode *vp, void *pager)
643 {
644 struct ubc_info *uip;
645
646 assert(vp);
647
648 uip = vp->v_ubcinfo;
649
650 assert(uip);
651
652 if (UBCINVALID(vp)) {
653 USHOULDNOT("ubc_setpager");
654 return (0);
655 }
656
657 uip->ui_pager = pager;
658 return (1);
659 }
660
661 int
662 ubc_setflags(struct vnode * vp, int flags)
663 {
664 struct ubc_info *uip;
665
666 if (UBCINVALID(vp)) {
667 USHOULDNOT("ubc_setflags");
668 return (EINVAL);
669 }
670
671 assert(vp);
672
673 uip = vp->v_ubcinfo;
674
675 assert(uip);
676
677 SET(uip->ui_flags, flags);
678
679 return(0);
680 }
681
682 int
683 ubc_clearflags(struct vnode * vp, int flags)
684 {
685 struct ubc_info *uip;
686
687 if (UBCINVALID(vp)) {
688 USHOULDNOT("ubc_clearflags");
689 return (EINVAL);
690 }
691
692 assert(vp);
693
694 uip = vp->v_ubcinfo;
695
696 assert(uip);
697
698 CLR(uip->ui_flags, flags);
699
700 return(0);
701 }
702
703
704 int
705 ubc_issetflags(struct vnode * vp, int flags)
706 {
707 struct ubc_info *uip;
708
709 if (UBCINVALID(vp)) {
710 USHOULDNOT("ubc_issetflags");
711 return (EINVAL);
712 }
713
714 assert(vp);
715
716 uip = vp->v_ubcinfo;
717
718 assert(uip);
719
720 return(ISSET(uip->ui_flags, flags));
721 }
722
723 off_t
724 ubc_blktooff(struct vnode *vp, daddr_t blkno)
725 {
726 off_t file_offset;
727 int error;
728
729 assert(vp);
730 if (UBCINVALID(vp)) {
731 USHOULDNOT("ubc_blktooff");
732 return ((off_t)-1);
733 }
734
735 error = VOP_BLKTOOFF(vp, blkno, &file_offset);
736 if (error)
737 file_offset = -1;
738
739 return (file_offset);
740 }
741 daddr_t
742 ubc_offtoblk(struct vnode *vp, off_t offset)
743 {
744 daddr_t blkno;
745 int error=0;
746
747 assert(vp);
748 if (UBCINVALID(vp)) {
749 return ((daddr_t)-1);
750 }
751
752 error = VOP_OFFTOBLK(vp, offset, &blkno);
753 if (error)
754 blkno = -1;
755
756 return (blkno);
757 }
758
759 /*
760 * Cause the file data in VM to be pushed out to the storage
761 * it also causes all currently valid pages to be released
762 * returns 1 on success, 0 on failure
763 */
764 int
765 ubc_clean(struct vnode *vp, int invalidate)
766 {
767 off_t size;
768 struct ubc_info *uip;
769 void *object;
770 kern_return_t kret;
771 int flags = 0;
772 int didhold;
773
774 #if DIAGNOSTIC
775 assert(vp);
776 #endif
777
778 if (UBCINVALID(vp))
779 return(0);
780
781 if (!UBCINFOEXISTS(vp))
782 return(0);
783
784 /*
785 * if invalidate was requested, write dirty data and then discard
786 * the resident pages
787 */
788 if (invalidate)
789 flags = (MEMORY_OBJECT_DATA_FLUSH | MEMORY_OBJECT_DATA_NO_CHANGE);
790
791 didhold = ubc_hold(vp);
792 uip = vp->v_ubcinfo;
793 size = uip->ui_size; /* call ubc_getsize() ??? */
794
795 object = _ubc_getobject(vp, UBC_NOREACTIVATE);
796 assert(object);
797
798 /*
799 * memory_object_lock_request() drops an object
800 * reference. gain a reference before calling it
801 */
802 ubc_getobjref(vp);
803
804 vp->v_flag &= ~VHASDIRTY;
805 vp->v_clen = 0;
806
807 /* Write the dirty data in the file and discard cached pages */
808 kret = memory_object_lock_request(object,
809 (vm_object_offset_t)0,
810 (memory_object_size_t)round_page_64(size),
811 MEMORY_OBJECT_RETURN_ALL, flags,
812 VM_PROT_NO_CHANGE,MACH_PORT_NULL);
813
814 if (kret != KERN_SUCCESS) {
815 printf("ubc_clean: clean failed (error = %d)\n", kret);
816 }
817
818 if (didhold)
819 ubc_rele(vp);
820
821 return ((kret == KERN_SUCCESS) ? 1 : 0);
822 }
823
824 /*
825 * Cause the file data in VM to be pushed out to the storage
826 * currently valid pages are NOT invalidated
827 * returns 1 on success, 0 on failure
828 */
829 int
830 ubc_pushdirty(struct vnode *vp)
831 {
832 off_t size;
833 struct ubc_info *uip;
834 void *object;
835 kern_return_t kret;
836 int didhold;
837
838 #if DIAGNOSTIC
839 assert(vp);
840 #endif
841
842 if (UBCINVALID(vp))
843 return(0);
844
845 if (!UBCINFOEXISTS(vp))
846 return(0);
847
848 didhold = ubc_hold(vp);
849 uip = vp->v_ubcinfo;
850 size = uip->ui_size; /* call ubc_getsize() ??? */
851
852 object = _ubc_getobject(vp, UBC_NOREACTIVATE);
853 assert(object);
854
855 /*
856 * memory_object_lock_request() drops an object
857 * reference. gain a reference before calling it
858 */
859 ubc_getobjref(vp);
860
861 vp->v_flag &= ~VHASDIRTY;
862 vp->v_clen = 0;
863
864 /* Write the dirty data in the file and discard cached pages */
865 kret = memory_object_lock_request(object,
866 (vm_object_offset_t)0,
867 (memory_object_size_t)round_page_64(size),
868 MEMORY_OBJECT_RETURN_DIRTY,FALSE,
869 VM_PROT_NO_CHANGE,MACH_PORT_NULL);
870
871 if (kret != KERN_SUCCESS) {
872 printf("ubc_pushdirty: flush failed (error = %d)\n", kret);
873 }
874
875 if (didhold)
876 ubc_rele(vp);
877
878 return ((kret == KERN_SUCCESS) ? 1 : 0);
879 }
880
881 /*
882 * Make sure the vm object does not vanish
883 * returns 1 if the hold count was incremented
884 * returns 0 if the hold count was not incremented
885 * This return value should be used to balance
886 * ubc_hold() and ubc_rele().
887 */
888 int
889 ubc_hold(struct vnode *vp)
890 {
891 struct ubc_info *uip;
892 void *object;
893
894 if (UBCINVALID(vp))
895 return (0);
896
897 if (!UBCINFOEXISTS(vp)) {
898 /* nothing more to do for a dying vnode */
899 if ((vp->v_flag & VXLOCK) || (vp->v_flag & VTERMINATE))
900 return (0);
901 vp->v_ubcinfo = UBC_INFO_NULL;
902 ubc_info_init(vp);
903 }
904 uip = vp->v_ubcinfo;
905 object = _ubc_getobject(vp, UBC_NOREACTIVATE);
906 assert(object);
907
908 if (uip->ui_holdcnt++ == 0)
909 ubc_getobjref(vp);
910 if (uip->ui_holdcnt < 0)
911 panic("ubc_hold: ui_holdcnt");
912
913 return (1);
914 }
915
916 /* relese the reference on the vm object */
917 void
918 ubc_rele(struct vnode *vp)
919 {
920 struct ubc_info *uip;
921 void *object;
922
923 if (UBCINVALID(vp))
924 return;
925
926 if (!UBCINFOEXISTS(vp)) {
927 /* nothing more to do for a dying vnode */
928 if ((vp->v_flag & VXLOCK) || (vp->v_flag & VTERMINATE))
929 return;
930 panic("ubc_rele: can not");
931 }
932
933 uip = vp->v_ubcinfo;
934
935 /* get the object before loosing to hold count */
936 object = _ubc_getobject(vp, UBC_NOREACTIVATE);
937
938 if (uip->ui_holdcnt == 0)
939 panic("ubc_rele: ui_holdcnt");
940
941 if (--uip->ui_holdcnt == 0) {
942 /* If the object is already dead do nothing */
943 if (object)
944 vm_object_deallocate(object);
945 #if DIAGNOSTIC
946 else
947 printf("ubc_rele: null object for %x", vp);
948 #endif /* DIAGNOSTIC */
949 }
950
951 return;
952 }
953
954 /*
955 * The vnode is mapped explicitly
956 * Mark it so, and release the vm object reference gained in
957 * ubc_info_init()
958 */
959 void
960 ubc_map(struct vnode *vp)
961 {
962 struct ubc_info *uip;
963 void *object;
964
965 ubc_lock(vp);
966 #if DIAGNOSTIC
967 assert(vp);
968 #endif
969
970 if (UBCINVALID(vp)) {
971 ubc_unlock(vp);
972 return;
973 }
974
975 if (!UBCINFOEXISTS(vp))
976 panic("ubc_map: can not");
977
978 uip = vp->v_ubcinfo;
979
980 SET(uip->ui_flags, UI_WASMAPPED);
981 uip->ui_mapped = 1;
982 ubc_unlock(vp);
983
984 #if 1
985 /*
986 * Do not release the ubc reference on the
987 * memory object right away. Let vnreclaim
988 * deal with that
989 */
990 #else
991 /*
992 * Release the ubc reference. memory object cahe
993 * is responsible for caching this object now.
994 */
995 if (ISSET(uip->ui_flags, UI_HASOBJREF)) {
996 object = _ubc_getobject(vp, UBC_NOREACTIVATE);
997 assert(object);
998 CLR(uip->ui_flags, UI_HASOBJREF);
999 vm_object_deallocate(object);
1000 }
1001 #endif
1002
1003 return;
1004
1005 }
1006
1007 /*
1008 * Release the memory object reference on the vnode
1009 * only if it is not in use
1010 * Return 1 if the reference was released, 0 otherwise.
1011 */
1012 int
1013 ubc_release(struct vnode *vp)
1014 {
1015 struct ubc_info *uip;
1016 void *object;
1017 #if DIAGNOSTIC
1018 assert(vp);
1019 #endif
1020
1021 if (UBCINVALID(vp))
1022 return (0);
1023
1024 if (!UBCINFOEXISTS(vp))
1025 panic("ubc_release: can not");
1026
1027 uip = vp->v_ubcinfo;
1028
1029 /* can not release held vnodes */
1030 if (uip->ui_holdcnt)
1031 return (0);
1032
1033 if (ISSET(uip->ui_flags, UI_HASOBJREF)) {
1034 object = _ubc_getobject(vp, UBC_NOREACTIVATE);
1035 assert(object);
1036 CLR(uip->ui_flags, UI_HASOBJREF);
1037 vm_object_deallocate(object);
1038 return (1);
1039 } else
1040 return (0);
1041 }
1042
1043 /*
1044 * Invalidate a range in the memory object that backs this
1045 * vnode. The offset is truncated to the page boundary and the
1046 * size is adjusted to include the last page in the range.
1047 */
1048 int
1049 ubc_invalidate(struct vnode *vp, off_t offset, size_t size)
1050 {
1051 struct ubc_info *uip;
1052 void *object;
1053 kern_return_t kret;
1054 off_t toff;
1055 size_t tsize;
1056 int didhold;
1057
1058 #if DIAGNOSTIC
1059 assert(vp);
1060 #endif
1061
1062 if (UBCINVALID(vp))
1063 return;
1064
1065 if (!UBCINFOEXISTS(vp))
1066 panic("ubc_invalidate: can not");
1067
1068 didhold = ubc_hold(vp);
1069 toff = trunc_page_64(offset);
1070 tsize = (size_t)(round_page_64(offset+size) - toff);
1071 uip = vp->v_ubcinfo;
1072 object = _ubc_getobject(vp, UBC_NOREACTIVATE);
1073 assert(object);
1074
1075 /*
1076 * memory_object_lock_request() drops an object
1077 * reference. gain a reference before calling it
1078 */
1079 ubc_getobjref(vp);
1080
1081 /* invalidate pages in the range requested */
1082 kret = memory_object_lock_request(object,
1083 (vm_object_offset_t)toff,
1084 (memory_object_size_t)tsize,
1085 MEMORY_OBJECT_RETURN_NONE,
1086 (MEMORY_OBJECT_DATA_NO_CHANGE| MEMORY_OBJECT_DATA_FLUSH),
1087 VM_PROT_NO_CHANGE,MACH_PORT_NULL);
1088 if (kret != KERN_SUCCESS)
1089 printf("ubc_invalidate: invalidate failed (error = %d)\n", kret);
1090
1091 if (didhold)
1092 ubc_rele(vp);
1093
1094 return ((kret == KERN_SUCCESS) ? 1 : 0);
1095 }
1096
1097 /*
1098 * Find out whether a vnode is in use by UBC
1099 * Returns 1 if file is in use by UBC, 0 if not
1100 */
1101 int
1102 ubc_isinuse(struct vnode *vp, int tookref)
1103 {
1104 int busycount = tookref ? 2 : 1;
1105
1106 if (!UBCINFOEXISTS(vp))
1107 return(0);
1108
1109 if (vp->v_usecount > busycount)
1110 return (1);
1111
1112 if ((vp->v_usecount == busycount)
1113 && (vp->v_ubcinfo->ui_mapped == 1))
1114 return(1);
1115 else
1116 return(0);
1117 }
1118
1119
1120 /* -- UGLY HACK ALERT -- */
1121 /*
1122 * The backdoor routine to clear the UI_WASMAPPED bit.
1123 * MUST only be called by the VM
1124 *
1125 * Note that this routine is not under funnel. There are numerous
1126 * thing about the calling sequence that make this work on SMP.
1127 * Any code change in those paths can break this.
1128 *
1129 * This will be replaced soon.
1130 */
1131 void
1132 ubc_unmap(struct vnode *vp)
1133 {
1134 struct ubc_info *uip;
1135
1136 #if DIAGNOSTIC
1137 assert(vp);
1138 #endif
1139
1140 if (UBCINVALID(vp)) {
1141 return;
1142 }
1143
1144 if (!UBCINFOEXISTS(vp))
1145 panic("ubc_unmap: can not");
1146
1147 ubc_lock(vp);
1148 uip = vp->v_ubcinfo;
1149
1150 uip->ui_mapped = 0;
1151 ubc_unlock(vp);
1152
1153 return;
1154 }
1155