]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/ubc_subr.c
xnu-792.10.96.tar.gz
[apple/xnu.git] / bsd / kern / ubc_subr.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 1999-2004 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
37839358
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
37839358
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
37839358
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * File: ubc_subr.c
24 * Author: Umesh Vaishampayan [umeshv@apple.com]
25 * 05-Aug-1999 umeshv Created.
26 *
27 * Functions related to Unified Buffer cache.
28 *
0b4e3aa0
A
29 * Caller of UBC functions MUST have a valid reference on the vnode.
30 *
1c79356b
A
31 */
32
0b4e3aa0 33#undef DIAGNOSTIC
1c79356b
A
34#define DIAGNOSTIC 1
35
36#include <sys/types.h>
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/lock.h>
91447636
A
40#include <sys/mman.h>
41#include <sys/mount_internal.h>
42#include <sys/vnode_internal.h>
43#include <sys/ubc_internal.h>
1c79356b 44#include <sys/ucred.h>
91447636
A
45#include <sys/proc_internal.h>
46#include <sys/kauth.h>
1c79356b 47#include <sys/buf.h>
13fec989 48#include <sys/user.h>
1c79356b
A
49
50#include <mach/mach_types.h>
51#include <mach/memory_object_types.h>
91447636
A
52#include <mach/memory_object_control.h>
53#include <mach/vm_map.h>
54#include <mach/upl.h>
1c79356b 55
91447636 56#include <kern/kern_types.h>
1c79356b 57#include <kern/zalloc.h>
13fec989 58#include <kern/thread.h>
91447636
A
59#include <vm/vm_kern.h>
60#include <vm/vm_protos.h> /* last */
1c79356b
A
61
62#if DIAGNOSTIC
63#if defined(assert)
64#undef assert()
65#endif
66#define assert(cond) \
9bccf70c 67 ((void) ((cond) ? 0 : panic("%s:%d (%s)", __FILE__, __LINE__, # cond)))
1c79356b
A
68#else
69#include <kern/assert.h>
70#endif /* DIAGNOSTIC */
71
91447636 72int ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize);
c0fea474 73static int ubc_umcallback(vnode_t, void *);
91447636 74int ubc_isinuse_locked(vnode_t, int, int);
c0fea474 75static int ubc_msync_internal(vnode_t, off_t, off_t, off_t *, int, int *);
b4c24cb9 76
91447636 77struct zone *ubc_info_zone;
b4c24cb9 78
1c79356b
A
79/*
80 * Initialization of the zone for Unified Buffer Cache.
81 */
0b4e3aa0 82__private_extern__ void
1c79356b
A
83ubc_init()
84{
85 int i;
86
87 i = (vm_size_t) sizeof (struct ubc_info);
88 /* XXX the number of elements should be tied in to maxvnodes */
89 ubc_info_zone = zinit (i, 10000*i, 8192, "ubc_info zone");
90 return;
91}
92
93/*
94 * Initialize a ubc_info structure for a vnode.
95 */
96int
97ubc_info_init(struct vnode *vp)
91447636
A
98{
99 return(ubc_info_init_internal(vp, 0, 0));
100}
101int
102ubc_info_init_withsize(struct vnode *vp, off_t filesize)
103{
104 return(ubc_info_init_internal(vp, 1, filesize));
105}
106
107int
108ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize)
1c79356b
A
109{
110 register struct ubc_info *uip;
111 void * pager;
1c79356b
A
112 struct proc *p = current_proc();
113 int error = 0;
114 kern_return_t kret;
0b4e3aa0 115 memory_object_control_t control;
1c79356b 116
91447636 117 uip = vp->v_ubcinfo;
1c79356b 118
91447636 119 if (uip == UBC_INFO_NULL) {
1c79356b 120
1c79356b 121 uip = (struct ubc_info *) zalloc(ubc_info_zone);
91447636
A
122 bzero((char *)uip, sizeof(struct ubc_info));
123
1c79356b 124 uip->ui_vnode = vp;
91447636 125 uip->ui_flags = UI_INITED;
1c79356b
A
126 uip->ui_ucred = NOCRED;
127 }
0b4e3aa0
A
128#if DIAGNOSTIC
129 else
130 Debugger("ubc_info_init: already");
131#endif /* DIAGNOSTIC */
1c79356b
A
132
133 assert(uip->ui_flags != UI_NONE);
134 assert(uip->ui_vnode == vp);
135
1c79356b
A
136 /* now set this ubc_info in the vnode */
137 vp->v_ubcinfo = uip;
91447636 138
1c79356b
A
139 pager = (void *)vnode_pager_setup(vp, uip->ui_pager);
140 assert(pager);
91447636
A
141
142 SET(uip->ui_flags, UI_HASPAGER);
143 uip->ui_pager = pager;
1c79356b
A
144
145 /*
91447636 146 * Note: We can not use VNOP_GETATTR() to get accurate
0b4e3aa0 147 * value of ui_size. Thanks to NFS.
1c79356b
A
148 * nfs_getattr() can call vinvalbuf() and in this case
149 * ubc_info is not set up to deal with that.
150 * So use bogus size.
151 */
152
1c79356b 153 /*
0b4e3aa0
A
154 * create a vnode - vm_object association
155 * memory_object_create_named() creates a "named" reference on the
156 * memory object we hold this reference as long as the vnode is
157 * "alive." Since memory_object_create_named() took its own reference
158 * on the vnode pager we passed it, we can drop the reference
159 * vnode_pager_setup() returned here.
1c79356b 160 */
0b4e3aa0
A
161 kret = memory_object_create_named(pager,
162 (memory_object_size_t)uip->ui_size, &control);
163 vnode_pager_deallocate(pager);
164 if (kret != KERN_SUCCESS)
165 panic("ubc_info_init: memory_object_create_named returned %d", kret);
1c79356b 166
0b4e3aa0
A
167 assert(control);
168 uip->ui_control = control; /* cache the value of the mo control */
169 SET(uip->ui_flags, UI_HASOBJREF); /* with a named reference */
91447636 170#if 0
1c79356b 171 /* create a pager reference on the vnode */
0b4e3aa0 172 error = vnode_pager_vget(vp);
1c79356b 173 if (error)
0b4e3aa0 174 panic("ubc_info_init: vnode_pager_vget error = %d", error);
91447636
A
175#endif
176 if (withfsize == 0) {
177 struct vfs_context context;
178 /* initialize the size */
179 context.vc_proc = p;
180 context.vc_ucred = kauth_cred_get();
181 error = vnode_size(vp, &uip->ui_size, &context);
182 if (error)
183 uip->ui_size = 0;
184 } else {
185 uip->ui_size = filesize;
186 }
187 vp->v_lflag |= VNAMED_UBC;
1c79356b 188
0b4e3aa0 189 return (error);
1c79356b
A
190}
191
192/* Free the ubc_info */
0b4e3aa0
A
193static void
194ubc_info_free(struct ubc_info *uip)
1c79356b 195{
91447636 196 kauth_cred_t credp;
1c79356b 197
1c79356b
A
198 credp = uip->ui_ucred;
199 if (credp != NOCRED) {
200 uip->ui_ucred = NOCRED;
91447636 201 kauth_cred_rele(credp);
1c79356b 202 }
0b4e3aa0
A
203
204 if (uip->ui_control != MEMORY_OBJECT_CONTROL_NULL)
205 memory_object_control_deallocate(uip->ui_control);
91447636
A
206
207 cluster_release(uip);
0b4e3aa0 208
1c79356b
A
209 zfree(ubc_info_zone, (vm_offset_t)uip);
210 return;
211}
212
0b4e3aa0
A
213void
214ubc_info_deallocate(struct ubc_info *uip)
215{
91447636 216 ubc_info_free(uip);
0b4e3aa0
A
217}
218
1c79356b
A
219/*
220 * Communicate with VM the size change of the file
221 * returns 1 on success, 0 on failure
222 */
223int
224ubc_setsize(struct vnode *vp, off_t nsize)
225{
226 off_t osize; /* ui_size before change */
227 off_t lastpg, olastpgend, lastoff;
228 struct ubc_info *uip;
0b4e3aa0 229 memory_object_control_t control;
1c79356b 230 kern_return_t kret;
1c79356b 231
55e303ae
A
232 if (nsize < (off_t)0)
233 return (0);
1c79356b 234
1c79356b 235 if (!UBCINFOEXISTS(vp))
0b4e3aa0 236 return (0);
1c79356b
A
237
238 uip = vp->v_ubcinfo;
239 osize = uip->ui_size; /* call ubc_getsize() ??? */
240 /* Update the size before flushing the VM */
241 uip->ui_size = nsize;
242
243 if (nsize >= osize) /* Nothing more to do */
0b4e3aa0 244 return (1); /* return success */
1c79356b
A
245
246 /*
247 * When the file shrinks, invalidate the pages beyond the
248 * new size. Also get rid of garbage beyond nsize on the
249 * last page. The ui_size already has the nsize. This
250 * insures that the pageout would not write beyond the new
251 * end of the file.
252 */
253
1c79356b
A
254 lastpg = trunc_page_64(nsize);
255 olastpgend = round_page_64(osize);
0b4e3aa0
A
256 control = uip->ui_control;
257 assert(control);
1c79356b
A
258 lastoff = (nsize & PAGE_MASK_64);
259
260 /*
261 * If length is multiple of page size, we should not flush
262 * invalidating is sufficient
263 */
264 if (!lastoff) {
1c79356b 265 /* invalidate last page and old contents beyond nsize */
0b4e3aa0
A
266 kret = memory_object_lock_request(control,
267 (memory_object_offset_t)lastpg,
91447636 268 (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
0b4e3aa0
A
269 MEMORY_OBJECT_RETURN_NONE, MEMORY_OBJECT_DATA_FLUSH,
270 VM_PROT_NO_CHANGE);
1c79356b
A
271 if (kret != KERN_SUCCESS)
272 printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
273
1c79356b
A
274 return ((kret == KERN_SUCCESS) ? 1 : 0);
275 }
276
1c79356b 277 /* flush the last page */
0b4e3aa0
A
278 kret = memory_object_lock_request(control,
279 (memory_object_offset_t)lastpg,
91447636 280 PAGE_SIZE_64, NULL, NULL,
0b4e3aa0
A
281 MEMORY_OBJECT_RETURN_DIRTY, FALSE,
282 VM_PROT_NO_CHANGE);
1c79356b
A
283
284 if (kret == KERN_SUCCESS) {
1c79356b 285 /* invalidate last page and old contents beyond nsize */
0b4e3aa0
A
286 kret = memory_object_lock_request(control,
287 (memory_object_offset_t)lastpg,
91447636 288 (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
0b4e3aa0
A
289 MEMORY_OBJECT_RETURN_NONE, MEMORY_OBJECT_DATA_FLUSH,
290 VM_PROT_NO_CHANGE);
1c79356b
A
291 if (kret != KERN_SUCCESS)
292 printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
293 } else
294 printf("ubc_setsize: flush failed (error = %d)\n", kret);
295
1c79356b
A
296 return ((kret == KERN_SUCCESS) ? 1 : 0);
297}
298
299/*
300 * Get the size of the file
1c79356b
A
301 */
302off_t
303ubc_getsize(struct vnode *vp)
304{
91447636
A
305 /* people depend on the side effect of this working this way
306 * as they call this for directory
1c79356b 307 */
91447636
A
308 if (!UBCINFOEXISTS(vp))
309 return ((off_t)0);
310 return (vp->v_ubcinfo->ui_size);
1c79356b
A
311}
312
1c79356b 313/*
91447636 314 * call ubc_sync_range(vp, 0, EOF, UBC_PUSHALL) on all the vnodes
1c79356b
A
315 * for this mount point.
316 * returns 1 on success, 0 on failure
317 */
91447636 318
0b4e3aa0 319__private_extern__ int
1c79356b
A
320ubc_umount(struct mount *mp)
321{
91447636
A
322 vnode_iterate(mp, 0, ubc_umcallback, 0);
323 return(0);
1c79356b
A
324}
325
91447636
A
326static int
327ubc_umcallback(vnode_t vp, __unused void * args)
1c79356b 328{
1c79356b 329
91447636
A
330 if (UBCINFOEXISTS(vp)) {
331
332 cluster_push(vp, 0);
333
334 (void) ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL);
1c79356b 335 }
91447636 336 return (VNODE_RETURNED);
1c79356b
A
337}
338
91447636
A
339
340
1c79356b 341/* Get the credentials */
91447636 342kauth_cred_t
1c79356b
A
343ubc_getcred(struct vnode *vp)
344{
91447636
A
345 if (UBCINFOEXISTS(vp))
346 return (vp->v_ubcinfo->ui_ucred);
1c79356b 347
91447636 348 return (NOCRED);
1c79356b
A
349}
350
13fec989
A
351int
352ubc_setthreadcred(struct vnode *vp, struct proc *p, thread_t thread)
353{
354 struct ubc_info *uip;
355 kauth_cred_t credp;
356 struct uthread *uthread = get_bsdthread_info(thread);
357
358 if (!UBCINFOEXISTS(vp))
359 return (1);
360
361 vnode_lock(vp);
362
363 uip = vp->v_ubcinfo;
364 credp = uip->ui_ucred;
365
366 if (credp == NOCRED) {
367 /* use per-thread cred, if assumed identity, else proc cred */
368 if (uthread == NULL || (uthread->uu_flag & UT_SETUID) == 0) {
369 uip->ui_ucred = kauth_cred_proc_ref(p);
370 } else {
371 uip->ui_ucred = uthread->uu_ucred;
372 kauth_cred_ref(uip->ui_ucred);
373 }
374 }
375 vnode_unlock(vp);
376
377 return (0);
378}
379
1c79356b
A
380/*
381 * Set the credentials
382 * existing credentials are not changed
383 * returns 1 on success and 0 on failure
384 */
1c79356b
A
385int
386ubc_setcred(struct vnode *vp, struct proc *p)
387{
388 struct ubc_info *uip;
91447636 389 kauth_cred_t credp;
1c79356b 390
91447636 391 if ( !UBCINFOEXISTS(vp))
1c79356b 392 return (0);
1c79356b 393
91447636
A
394 vnode_lock(vp);
395
396 uip = vp->v_ubcinfo;
1c79356b 397 credp = uip->ui_ucred;
91447636 398
1c79356b 399 if (credp == NOCRED) {
91447636 400 uip->ui_ucred = kauth_cred_proc_ref(p);
1c79356b 401 }
91447636 402 vnode_unlock(vp);
1c79356b
A
403
404 return (1);
405}
406
407/* Get the pager */
0b4e3aa0 408__private_extern__ memory_object_t
1c79356b
A
409ubc_getpager(struct vnode *vp)
410{
91447636
A
411 if (UBCINFOEXISTS(vp))
412 return (vp->v_ubcinfo->ui_pager);
1c79356b 413
91447636 414 return (0);
1c79356b
A
415}
416
417/*
418 * Get the memory object associated with this vnode
419 * If the vnode was reactivated, memory object would not exist.
420 * Unless "do not rectivate" was specified, look it up using the pager.
1c79356b
A
421 * If hold was requested create an object reference of one does not
422 * exist already.
423 */
424
0b4e3aa0 425memory_object_control_t
91447636 426ubc_getobject(struct vnode *vp, __unused int flags)
1c79356b 427{
91447636
A
428 if (UBCINFOEXISTS(vp))
429 return((vp->v_ubcinfo->ui_control));
1c79356b 430
91447636 431 return (0);
1c79356b
A
432}
433
1c79356b
A
434
435off_t
91447636 436ubc_blktooff(vnode_t vp, daddr64_t blkno)
1c79356b
A
437{
438 off_t file_offset;
439 int error;
440
91447636
A
441 if (UBCINVALID(vp))
442 return ((off_t)-1);
1c79356b 443
91447636 444 error = VNOP_BLKTOOFF(vp, blkno, &file_offset);
1c79356b
A
445 if (error)
446 file_offset = -1;
447
448 return (file_offset);
449}
0b4e3aa0 450
91447636
A
451daddr64_t
452ubc_offtoblk(vnode_t vp, off_t offset)
1c79356b 453{
91447636 454 daddr64_t blkno;
0b4e3aa0 455 int error = 0;
1c79356b 456
91447636
A
457 if (UBCINVALID(vp))
458 return ((daddr64_t)-1);
1c79356b 459
91447636 460 error = VNOP_OFFTOBLK(vp, offset, &blkno);
1c79356b
A
461 if (error)
462 blkno = -1;
463
464 return (blkno);
465}
466
1c79356b 467int
91447636 468ubc_pages_resident(vnode_t vp)
1c79356b 469{
91447636
A
470 kern_return_t kret;
471 boolean_t has_pages_resident;
472
473 if ( !UBCINFOEXISTS(vp))
0b4e3aa0 474 return (0);
91447636
A
475
476 kret = memory_object_pages_resident(vp->v_ubcinfo->ui_control, &has_pages_resident);
477
478 if (kret != KERN_SUCCESS)
0b4e3aa0 479 return (0);
91447636
A
480
481 if (has_pages_resident == TRUE)
482 return (1);
483
484 return (0);
485}
1c79356b 486
1c79356b 487
1c79356b
A
488
489/*
91447636
A
490 * This interface will eventually be deprecated
491 *
492 * clean and/or invalidate a range in the memory object that backs this
493 * vnode. The start offset is truncated to the page boundary and the
494 * size is adjusted to include the last page in the range.
495 *
496 * returns 1 for success, 0 for failure
1c79356b
A
497 */
498int
91447636 499ubc_sync_range(vnode_t vp, off_t beg_off, off_t end_off, int flags)
1c79356b 500{
91447636 501 return (ubc_msync_internal(vp, beg_off, end_off, NULL, flags, NULL));
0b4e3aa0
A
502}
503
91447636 504
0b4e3aa0 505/*
91447636
A
506 * clean and/or invalidate a range in the memory object that backs this
507 * vnode. The start offset is truncated to the page boundary and the
508 * size is adjusted to include the last page in the range.
509 * if a
0b4e3aa0 510 */
91447636
A
511errno_t
512ubc_msync(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags)
0b4e3aa0 513{
91447636
A
514 int retval;
515 int io_errno = 0;
516
517 if (resid_off)
518 *resid_off = beg_off;
0b4e3aa0 519
91447636 520 retval = ubc_msync_internal(vp, beg_off, end_off, resid_off, flags, &io_errno);
0b4e3aa0 521
91447636
A
522 if (retval == 0 && io_errno == 0)
523 return (EINVAL);
524 return (io_errno);
525}
0b4e3aa0 526
1c79356b 527
1c79356b
A
528
529/*
91447636
A
530 * clean and/or invalidate a range in the memory object that backs this
531 * vnode. The start offset is truncated to the page boundary and the
532 * size is adjusted to include the last page in the range.
1c79356b 533 */
91447636
A
534static int
535ubc_msync_internal(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags, int *io_errno)
1c79356b 536{
91447636
A
537 memory_object_size_t tsize;
538 kern_return_t kret;
539 int request_flags = 0;
540 int flush_flags = MEMORY_OBJECT_RETURN_NONE;
541
542 if ( !UBCINFOEXISTS(vp))
543 return (0);
544 if (end_off <= beg_off)
545 return (0);
546 if ((flags & (UBC_INVALIDATE | UBC_PUSHDIRTY | UBC_PUSHALL)) == 0)
547 return (0);
548
549 if (flags & UBC_INVALIDATE)
550 /*
551 * discard the resident pages
552 */
553 request_flags = (MEMORY_OBJECT_DATA_FLUSH | MEMORY_OBJECT_DATA_NO_CHANGE);
1c79356b 554
91447636
A
555 if (flags & UBC_SYNC)
556 /*
557 * wait for all the I/O to complete before returning
55e303ae 558 */
91447636 559 request_flags |= MEMORY_OBJECT_IO_SYNC;
55e303ae 560
91447636
A
561 if (flags & UBC_PUSHDIRTY)
562 /*
563 * we only return the dirty pages in the range
564 */
565 flush_flags = MEMORY_OBJECT_RETURN_DIRTY;
0b4e3aa0 566
91447636
A
567 if (flags & UBC_PUSHALL)
568 /*
569 * then return all the interesting pages in the range (both dirty and precious)
570 * to the pager
571 */
572 flush_flags = MEMORY_OBJECT_RETURN_ALL;
0b4e3aa0 573
91447636
A
574 beg_off = trunc_page_64(beg_off);
575 end_off = round_page_64(end_off);
576 tsize = (memory_object_size_t)end_off - beg_off;
b4c24cb9 577
91447636
A
578 /* flush and/or invalidate pages in the range requested */
579 kret = memory_object_lock_request(vp->v_ubcinfo->ui_control,
580 beg_off, tsize, resid_off, io_errno,
581 flush_flags, request_flags, VM_PROT_NO_CHANGE);
582
583 return ((kret == KERN_SUCCESS) ? 1 : 0);
1c79356b
A
584}
585
1c79356b
A
586
587/*
0b4e3aa0 588 * The vnode is mapped explicitly, mark it so.
1c79356b 589 */
91447636
A
590__private_extern__ int
591ubc_map(vnode_t vp, int flags)
1c79356b
A
592{
593 struct ubc_info *uip;
91447636
A
594 int error = 0;
595 int need_ref = 0;
596 struct vfs_context context;
1c79356b 597
91447636
A
598 if (vnode_getwithref(vp))
599 return (0);
1c79356b 600
91447636
A
601 if (UBCINFOEXISTS(vp)) {
602 context.vc_proc = current_proc();
603 context.vc_ucred = kauth_cred_get();
1c79356b 604
91447636 605 error = VNOP_MMAP(vp, flags, &context);
1c79356b 606
91447636
A
607 if (error != EPERM)
608 error = 0;
1c79356b 609
91447636
A
610 if (error == 0) {
611 vnode_lock(vp);
612
613 uip = vp->v_ubcinfo;
1c79356b 614
91447636
A
615 if ( !ISSET(uip->ui_flags, UI_ISMAPPED))
616 need_ref = 1;
617 SET(uip->ui_flags, (UI_WASMAPPED | UI_ISMAPPED));
55e303ae 618
91447636
A
619 vnode_unlock(vp);
620
621 if (need_ref)
622 vnode_ref(vp);
55e303ae 623 }
b4c24cb9 624 }
91447636 625 vnode_put(vp);
b4c24cb9 626
91447636 627 return (error);
0b4e3aa0
A
628}
629
630/*
631 * destroy the named reference for a given vnode
632 */
633__private_extern__ int
91447636 634ubc_destroy_named(struct vnode *vp)
0b4e3aa0
A
635{
636 memory_object_control_t control;
0b4e3aa0
A
637 struct ubc_info *uip;
638 kern_return_t kret;
639
640 /*
641 * We may already have had the object terminated
642 * and the ubcinfo released as a side effect of
643 * some earlier processing. If so, pretend we did
644 * it, because it probably was a result of our
645 * efforts.
646 */
647 if (!UBCINFOEXISTS(vp))
1c79356b 648 return (1);
0b4e3aa0
A
649
650 uip = vp->v_ubcinfo;
651
0b4e3aa0
A
652 /*
653 * Terminate the memory object.
654 * memory_object_destroy() will result in
655 * vnode_pager_no_senders().
656 * That will release the pager reference
657 * and the vnode will move to the free list.
658 */
659 control = ubc_getobject(vp, UBC_HOLDOBJECT);
660 if (control != MEMORY_OBJECT_CONTROL_NULL) {
661
91447636
A
662 /*
663 * XXXXX - should we hold the vnode lock here?
664 */
0b4e3aa0
A
665 if (ISSET(vp->v_flag, VTERMINATE))
666 panic("ubc_destroy_named: already teminating");
667 SET(vp->v_flag, VTERMINATE);
668
669 kret = memory_object_destroy(control, 0);
670 if (kret != KERN_SUCCESS)
671 return (0);
672
673 /*
674 * memory_object_destroy() is asynchronous
675 * with respect to vnode_pager_no_senders().
676 * wait for vnode_pager_no_senders() to clear
677 * VTERMINATE
678 */
91447636
A
679 vnode_lock(vp);
680 while (ISSET(vp->v_lflag, VNAMED_UBC)) {
681 (void)msleep((caddr_t)&vp->v_lflag, &vp->v_lock,
0b4e3aa0
A
682 PINOD, "ubc_destroy_named", 0);
683 }
91447636 684 vnode_unlock(vp);
0b4e3aa0
A
685 }
686 return (1);
1c79356b
A
687}
688
0b4e3aa0 689
1c79356b 690/*
91447636
A
691 * Find out whether a vnode is in use by UBC
692 * Returns 1 if file is in use by UBC, 0 if not
1c79356b
A
693 */
694int
91447636 695ubc_isinuse(struct vnode *vp, int busycount)
1c79356b 696{
91447636 697 if ( !UBCINFOEXISTS(vp))
0b4e3aa0 698 return (0);
91447636 699 return(ubc_isinuse_locked(vp, busycount, 0));
1c79356b
A
700}
701
91447636 702
1c79356b 703int
91447636 704ubc_isinuse_locked(struct vnode *vp, int busycount, int locked)
1c79356b 705{
91447636 706 int retval = 0;
1c79356b 707
9bccf70c 708
91447636
A
709 if (!locked)
710 vnode_lock(vp);
1c79356b 711
91447636
A
712 if ((vp->v_usecount - vp->v_kusecount) > busycount)
713 retval = 1;
714
715 if (!locked)
716 vnode_unlock(vp);
717 return (retval);
1c79356b
A
718}
719
91447636 720
1c79356b 721/*
1c79356b 722 * MUST only be called by the VM
1c79356b 723 */
0b4e3aa0 724__private_extern__ void
1c79356b
A
725ubc_unmap(struct vnode *vp)
726{
91447636 727 struct vfs_context context;
1c79356b 728 struct ubc_info *uip;
91447636 729 int need_rele = 0;
1c79356b 730
91447636
A
731 if (vnode_getwithref(vp))
732 return;
1c79356b 733
91447636
A
734 if (UBCINFOEXISTS(vp)) {
735 vnode_lock(vp);
1c79356b 736
91447636
A
737 uip = vp->v_ubcinfo;
738 if (ISSET(uip->ui_flags, UI_ISMAPPED)) {
739 CLR(uip->ui_flags, UI_ISMAPPED);
740 need_rele = 1;
741 }
742 vnode_unlock(vp);
743
744 if (need_rele) {
745 context.vc_proc = current_proc();
746 context.vc_ucred = kauth_cred_get();
747 (void)VNOP_MNOMAP(vp, &context);
748
749 vnode_rele(vp);
750 }
751 }
752 /*
753 * the drop of the vnode ref will cleanup
754 */
755 vnode_put(vp);
0b4e3aa0
A
756}
757
758kern_return_t
759ubc_page_op(
760 struct vnode *vp,
761 off_t f_offset,
762 int ops,
55e303ae 763 ppnum_t *phys_entryp,
0b4e3aa0
A
764 int *flagsp)
765{
766 memory_object_control_t control;
767
768 control = ubc_getobject(vp, UBC_FLAGS_NONE);
769 if (control == MEMORY_OBJECT_CONTROL_NULL)
770 return KERN_INVALID_ARGUMENT;
771
772 return (memory_object_page_op(control,
773 (memory_object_offset_t)f_offset,
774 ops,
775 phys_entryp,
776 flagsp));
777}
778
55e303ae
A
779__private_extern__ kern_return_t
780ubc_page_op_with_control(
781 memory_object_control_t control,
782 off_t f_offset,
783 int ops,
784 ppnum_t *phys_entryp,
785 int *flagsp)
786{
787 return (memory_object_page_op(control,
788 (memory_object_offset_t)f_offset,
789 ops,
790 phys_entryp,
791 flagsp));
792}
793
794kern_return_t
795ubc_range_op(
796 struct vnode *vp,
797 off_t f_offset_beg,
798 off_t f_offset_end,
799 int ops,
800 int *range)
801{
802 memory_object_control_t control;
803
804 control = ubc_getobject(vp, UBC_FLAGS_NONE);
805 if (control == MEMORY_OBJECT_CONTROL_NULL)
806 return KERN_INVALID_ARGUMENT;
807
808 return (memory_object_range_op(control,
809 (memory_object_offset_t)f_offset_beg,
810 (memory_object_offset_t)f_offset_end,
811 ops,
812 range));
813}
814
0b4e3aa0
A
815kern_return_t
816ubc_create_upl(
817 struct vnode *vp,
818 off_t f_offset,
819 long bufsize,
820 upl_t *uplp,
821 upl_page_info_t **plp,
822 int uplflags)
823{
824 memory_object_control_t control;
55e303ae
A
825 int count;
826 int ubcflags;
55e303ae 827 kern_return_t kr;
0b4e3aa0
A
828
829 if (bufsize & 0xfff)
830 return KERN_INVALID_ARGUMENT;
831
55e303ae
A
832 if (uplflags & UPL_FOR_PAGEOUT) {
833 uplflags &= ~UPL_FOR_PAGEOUT;
834 ubcflags = UBC_FOR_PAGEOUT;
835 } else
836 ubcflags = UBC_FLAGS_NONE;
837
838 control = ubc_getobject(vp, ubcflags);
0b4e3aa0
A
839 if (control == MEMORY_OBJECT_CONTROL_NULL)
840 return KERN_INVALID_ARGUMENT;
841
55e303ae
A
842 if (uplflags & UPL_WILL_BE_DUMPED) {
843 uplflags &= ~UPL_WILL_BE_DUMPED;
844 uplflags |= (UPL_NO_SYNC|UPL_SET_INTERNAL);
845 } else
846 uplflags |= (UPL_NO_SYNC|UPL_CLEAN_IN_PLACE|UPL_SET_INTERNAL);
0b4e3aa0
A
847 count = 0;
848 kr = memory_object_upl_request(control, f_offset, bufsize,
849 uplp, NULL, &count, uplflags);
850 if (plp != NULL)
851 *plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp);
852 return kr;
853}
854
855
856kern_return_t
857ubc_upl_map(
858 upl_t upl,
859 vm_offset_t *dst_addr)
860{
861 return (vm_upl_map(kernel_map, upl, dst_addr));
862}
863
864
865kern_return_t
866ubc_upl_unmap(
867 upl_t upl)
868{
869 return(vm_upl_unmap(kernel_map, upl));
870}
871
872kern_return_t
873ubc_upl_commit(
874 upl_t upl)
875{
876 upl_page_info_t *pl;
877 kern_return_t kr;
878
879 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
880 kr = upl_commit(upl, pl, MAX_UPL_TRANSFER);
881 upl_deallocate(upl);
882 return kr;
1c79356b
A
883}
884
0b4e3aa0
A
885
886kern_return_t
887ubc_upl_commit_range(
888 upl_t upl,
889 vm_offset_t offset,
890 vm_size_t size,
891 int flags)
892{
893 upl_page_info_t *pl;
894 boolean_t empty;
895 kern_return_t kr;
896
897 if (flags & UPL_COMMIT_FREE_ON_EMPTY)
898 flags |= UPL_COMMIT_NOTIFY_EMPTY;
899
900 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
901
902 kr = upl_commit_range(upl, offset, size, flags,
903 pl, MAX_UPL_TRANSFER, &empty);
904
905 if((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty)
906 upl_deallocate(upl);
907
908 return kr;
909}
910
911kern_return_t
912ubc_upl_abort_range(
913 upl_t upl,
914 vm_offset_t offset,
915 vm_size_t size,
916 int abort_flags)
917{
918 kern_return_t kr;
919 boolean_t empty = FALSE;
920
921 if (abort_flags & UPL_ABORT_FREE_ON_EMPTY)
922 abort_flags |= UPL_ABORT_NOTIFY_EMPTY;
923
924 kr = upl_abort_range(upl, offset, size, abort_flags, &empty);
925
926 if((abort_flags & UPL_ABORT_FREE_ON_EMPTY) && empty)
927 upl_deallocate(upl);
928
929 return kr;
930}
931
932kern_return_t
933ubc_upl_abort(
934 upl_t upl,
935 int abort_type)
936{
937 kern_return_t kr;
938
939 kr = upl_abort(upl, abort_type);
940 upl_deallocate(upl);
941 return kr;
942}
943
944upl_page_info_t *
945ubc_upl_pageinfo(
946 upl_t upl)
947{
948 return (UPL_GET_INTERNAL_PAGE_LIST(upl));
949}
91447636
A
950
951/************* UBC APIS **************/
952
953int
954UBCINFOMISSING(struct vnode * vp)
955{
956 return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo == UBC_INFO_NULL));
957}
958
959int
960UBCINFORECLAIMED(struct vnode * vp)
961{
962 return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo == UBC_INFO_NULL));
963}
964
965
966int
967UBCINFOEXISTS(struct vnode * vp)
968{
969 return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo != UBC_INFO_NULL));
970}
971int
972UBCISVALID(struct vnode * vp)
973{
974 return((vp) && ((vp)->v_type == VREG) && !((vp)->v_flag & VSYSTEM));
975}
976int
977UBCINVALID(struct vnode * vp)
978{
979 return(((vp) == NULL) || ((vp) && ((vp)->v_type != VREG))
980 || ((vp) && ((vp)->v_flag & VSYSTEM)));
981}
982int
983UBCINFOCHECK(const char * fun, struct vnode * vp)
984{
985 if ((vp) && ((vp)->v_type == VREG) &&
986 ((vp)->v_ubcinfo == UBC_INFO_NULL)) {
987 panic("%s: lost ubc_info", (fun));
988 return(1);
989 } else
990 return(0);
991}
992