]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/ubc_subr.c
xnu-792.22.5.tar.gz
[apple/xnu.git] / bsd / kern / ubc_subr.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 1999-2004 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
8f6c56a5 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
8f6c56a5
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
8ad349bb 24 * limitations under the License.
8f6c56a5
A
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * File: ubc_subr.c
30 * Author: Umesh Vaishampayan [umeshv@apple.com]
31 * 05-Aug-1999 umeshv Created.
32 *
33 * Functions related to Unified Buffer cache.
34 *
0b4e3aa0
A
35 * Caller of UBC functions MUST have a valid reference on the vnode.
36 *
1c79356b
A
37 */
38
0b4e3aa0 39#undef DIAGNOSTIC
1c79356b
A
40#define DIAGNOSTIC 1
41
42#include <sys/types.h>
43#include <sys/param.h>
44#include <sys/systm.h>
45#include <sys/lock.h>
91447636
A
46#include <sys/mman.h>
47#include <sys/mount_internal.h>
48#include <sys/vnode_internal.h>
49#include <sys/ubc_internal.h>
1c79356b 50#include <sys/ucred.h>
91447636
A
51#include <sys/proc_internal.h>
52#include <sys/kauth.h>
1c79356b 53#include <sys/buf.h>
13fec989 54#include <sys/user.h>
1c79356b
A
55
56#include <mach/mach_types.h>
57#include <mach/memory_object_types.h>
91447636
A
58#include <mach/memory_object_control.h>
59#include <mach/vm_map.h>
60#include <mach/upl.h>
1c79356b 61
91447636 62#include <kern/kern_types.h>
1c79356b 63#include <kern/zalloc.h>
13fec989 64#include <kern/thread.h>
91447636
A
65#include <vm/vm_kern.h>
66#include <vm/vm_protos.h> /* last */
1c79356b
A
67
68#if DIAGNOSTIC
69#if defined(assert)
70#undef assert()
71#endif
72#define assert(cond) \
9bccf70c 73 ((void) ((cond) ? 0 : panic("%s:%d (%s)", __FILE__, __LINE__, # cond)))
1c79356b
A
74#else
75#include <kern/assert.h>
76#endif /* DIAGNOSTIC */
77
91447636 78int ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize);
4452a7af 79static int ubc_umcallback(vnode_t, void *);
91447636 80int ubc_isinuse_locked(vnode_t, int, int);
4452a7af 81static int ubc_msync_internal(vnode_t, off_t, off_t, off_t *, int, int *);
b4c24cb9 82
91447636 83struct zone *ubc_info_zone;
b4c24cb9 84
1c79356b
A
85/*
86 * Initialization of the zone for Unified Buffer Cache.
87 */
0b4e3aa0 88__private_extern__ void
1c79356b
A
89ubc_init()
90{
91 int i;
92
93 i = (vm_size_t) sizeof (struct ubc_info);
94 /* XXX the number of elements should be tied in to maxvnodes */
95 ubc_info_zone = zinit (i, 10000*i, 8192, "ubc_info zone");
96 return;
97}
98
99/*
100 * Initialize a ubc_info structure for a vnode.
101 */
102int
103ubc_info_init(struct vnode *vp)
91447636
A
104{
105 return(ubc_info_init_internal(vp, 0, 0));
106}
107int
108ubc_info_init_withsize(struct vnode *vp, off_t filesize)
109{
110 return(ubc_info_init_internal(vp, 1, filesize));
111}
112
113int
114ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize)
1c79356b
A
115{
116 register struct ubc_info *uip;
117 void * pager;
1c79356b
A
118 struct proc *p = current_proc();
119 int error = 0;
120 kern_return_t kret;
0b4e3aa0 121 memory_object_control_t control;
1c79356b 122
91447636 123 uip = vp->v_ubcinfo;
1c79356b 124
91447636 125 if (uip == UBC_INFO_NULL) {
1c79356b 126
1c79356b 127 uip = (struct ubc_info *) zalloc(ubc_info_zone);
91447636
A
128 bzero((char *)uip, sizeof(struct ubc_info));
129
1c79356b 130 uip->ui_vnode = vp;
91447636 131 uip->ui_flags = UI_INITED;
1c79356b
A
132 uip->ui_ucred = NOCRED;
133 }
0b4e3aa0
A
134#if DIAGNOSTIC
135 else
136 Debugger("ubc_info_init: already");
137#endif /* DIAGNOSTIC */
1c79356b
A
138
139 assert(uip->ui_flags != UI_NONE);
140 assert(uip->ui_vnode == vp);
141
1c79356b
A
142 /* now set this ubc_info in the vnode */
143 vp->v_ubcinfo = uip;
91447636 144
1c79356b
A
145 pager = (void *)vnode_pager_setup(vp, uip->ui_pager);
146 assert(pager);
91447636
A
147
148 SET(uip->ui_flags, UI_HASPAGER);
149 uip->ui_pager = pager;
1c79356b
A
150
151 /*
91447636 152 * Note: We can not use VNOP_GETATTR() to get accurate
0b4e3aa0 153 * value of ui_size. Thanks to NFS.
1c79356b
A
154 * nfs_getattr() can call vinvalbuf() and in this case
155 * ubc_info is not set up to deal with that.
156 * So use bogus size.
157 */
158
1c79356b 159 /*
0b4e3aa0
A
160 * create a vnode - vm_object association
161 * memory_object_create_named() creates a "named" reference on the
162 * memory object we hold this reference as long as the vnode is
163 * "alive." Since memory_object_create_named() took its own reference
164 * on the vnode pager we passed it, we can drop the reference
165 * vnode_pager_setup() returned here.
1c79356b 166 */
0b4e3aa0
A
167 kret = memory_object_create_named(pager,
168 (memory_object_size_t)uip->ui_size, &control);
169 vnode_pager_deallocate(pager);
170 if (kret != KERN_SUCCESS)
171 panic("ubc_info_init: memory_object_create_named returned %d", kret);
1c79356b 172
0b4e3aa0
A
173 assert(control);
174 uip->ui_control = control; /* cache the value of the mo control */
175 SET(uip->ui_flags, UI_HASOBJREF); /* with a named reference */
91447636 176#if 0
1c79356b 177 /* create a pager reference on the vnode */
0b4e3aa0 178 error = vnode_pager_vget(vp);
1c79356b 179 if (error)
0b4e3aa0 180 panic("ubc_info_init: vnode_pager_vget error = %d", error);
91447636
A
181#endif
182 if (withfsize == 0) {
183 struct vfs_context context;
184 /* initialize the size */
185 context.vc_proc = p;
186 context.vc_ucred = kauth_cred_get();
187 error = vnode_size(vp, &uip->ui_size, &context);
188 if (error)
189 uip->ui_size = 0;
190 } else {
191 uip->ui_size = filesize;
192 }
193 vp->v_lflag |= VNAMED_UBC;
1c79356b 194
0b4e3aa0 195 return (error);
1c79356b
A
196}
197
198/* Free the ubc_info */
0b4e3aa0
A
199static void
200ubc_info_free(struct ubc_info *uip)
1c79356b 201{
4452a7af
A
202 if (IS_VALID_CRED(uip->ui_ucred)) {
203 kauth_cred_unref(&uip->ui_ucred);
1c79356b 204 }
0b4e3aa0
A
205
206 if (uip->ui_control != MEMORY_OBJECT_CONTROL_NULL)
207 memory_object_control_deallocate(uip->ui_control);
91447636
A
208
209 cluster_release(uip);
0b4e3aa0 210
1c79356b
A
211 zfree(ubc_info_zone, (vm_offset_t)uip);
212 return;
213}
214
0b4e3aa0
A
215void
216ubc_info_deallocate(struct ubc_info *uip)
217{
91447636 218 ubc_info_free(uip);
0b4e3aa0
A
219}
220
1c79356b
A
221/*
222 * Communicate with VM the size change of the file
223 * returns 1 on success, 0 on failure
224 */
225int
226ubc_setsize(struct vnode *vp, off_t nsize)
227{
228 off_t osize; /* ui_size before change */
229 off_t lastpg, olastpgend, lastoff;
230 struct ubc_info *uip;
0b4e3aa0 231 memory_object_control_t control;
1c79356b 232 kern_return_t kret;
1c79356b 233
55e303ae
A
234 if (nsize < (off_t)0)
235 return (0);
1c79356b 236
1c79356b 237 if (!UBCINFOEXISTS(vp))
0b4e3aa0 238 return (0);
1c79356b
A
239
240 uip = vp->v_ubcinfo;
241 osize = uip->ui_size; /* call ubc_getsize() ??? */
242 /* Update the size before flushing the VM */
243 uip->ui_size = nsize;
244
245 if (nsize >= osize) /* Nothing more to do */
0b4e3aa0 246 return (1); /* return success */
1c79356b
A
247
248 /*
249 * When the file shrinks, invalidate the pages beyond the
250 * new size. Also get rid of garbage beyond nsize on the
251 * last page. The ui_size already has the nsize. This
252 * insures that the pageout would not write beyond the new
253 * end of the file.
254 */
255
1c79356b
A
256 lastpg = trunc_page_64(nsize);
257 olastpgend = round_page_64(osize);
0b4e3aa0
A
258 control = uip->ui_control;
259 assert(control);
1c79356b
A
260 lastoff = (nsize & PAGE_MASK_64);
261
262 /*
263 * If length is multiple of page size, we should not flush
264 * invalidating is sufficient
265 */
266 if (!lastoff) {
1c79356b 267 /* invalidate last page and old contents beyond nsize */
0b4e3aa0
A
268 kret = memory_object_lock_request(control,
269 (memory_object_offset_t)lastpg,
91447636 270 (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
0b4e3aa0
A
271 MEMORY_OBJECT_RETURN_NONE, MEMORY_OBJECT_DATA_FLUSH,
272 VM_PROT_NO_CHANGE);
1c79356b
A
273 if (kret != KERN_SUCCESS)
274 printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
275
1c79356b
A
276 return ((kret == KERN_SUCCESS) ? 1 : 0);
277 }
278
1c79356b 279 /* flush the last page */
0b4e3aa0
A
280 kret = memory_object_lock_request(control,
281 (memory_object_offset_t)lastpg,
91447636 282 PAGE_SIZE_64, NULL, NULL,
0b4e3aa0
A
283 MEMORY_OBJECT_RETURN_DIRTY, FALSE,
284 VM_PROT_NO_CHANGE);
1c79356b
A
285
286 if (kret == KERN_SUCCESS) {
1c79356b 287 /* invalidate last page and old contents beyond nsize */
0b4e3aa0
A
288 kret = memory_object_lock_request(control,
289 (memory_object_offset_t)lastpg,
91447636 290 (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
0b4e3aa0
A
291 MEMORY_OBJECT_RETURN_NONE, MEMORY_OBJECT_DATA_FLUSH,
292 VM_PROT_NO_CHANGE);
1c79356b
A
293 if (kret != KERN_SUCCESS)
294 printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
295 } else
296 printf("ubc_setsize: flush failed (error = %d)\n", kret);
297
1c79356b
A
298 return ((kret == KERN_SUCCESS) ? 1 : 0);
299}
300
301/*
302 * Get the size of the file
1c79356b
A
303 */
304off_t
305ubc_getsize(struct vnode *vp)
306{
91447636
A
307 /* people depend on the side effect of this working this way
308 * as they call this for directory
1c79356b 309 */
91447636
A
310 if (!UBCINFOEXISTS(vp))
311 return ((off_t)0);
312 return (vp->v_ubcinfo->ui_size);
1c79356b
A
313}
314
1c79356b 315/*
91447636 316 * call ubc_sync_range(vp, 0, EOF, UBC_PUSHALL) on all the vnodes
1c79356b
A
317 * for this mount point.
318 * returns 1 on success, 0 on failure
319 */
91447636 320
0b4e3aa0 321__private_extern__ int
1c79356b
A
322ubc_umount(struct mount *mp)
323{
91447636
A
324 vnode_iterate(mp, 0, ubc_umcallback, 0);
325 return(0);
1c79356b
A
326}
327
91447636
A
328static int
329ubc_umcallback(vnode_t vp, __unused void * args)
1c79356b 330{
1c79356b 331
91447636
A
332 if (UBCINFOEXISTS(vp)) {
333
334 cluster_push(vp, 0);
335
336 (void) ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL);
1c79356b 337 }
91447636 338 return (VNODE_RETURNED);
1c79356b
A
339}
340
91447636
A
341
342
1c79356b 343/* Get the credentials */
91447636 344kauth_cred_t
1c79356b
A
345ubc_getcred(struct vnode *vp)
346{
91447636
A
347 if (UBCINFOEXISTS(vp))
348 return (vp->v_ubcinfo->ui_ucred);
1c79356b 349
91447636 350 return (NOCRED);
1c79356b
A
351}
352
13fec989
A
353int
354ubc_setthreadcred(struct vnode *vp, struct proc *p, thread_t thread)
355{
356 struct ubc_info *uip;
357 kauth_cred_t credp;
358 struct uthread *uthread = get_bsdthread_info(thread);
359
360 if (!UBCINFOEXISTS(vp))
361 return (1);
362
363 vnode_lock(vp);
364
365 uip = vp->v_ubcinfo;
366 credp = uip->ui_ucred;
367
4452a7af 368 if (!IS_VALID_CRED(credp)) {
13fec989
A
369 /* use per-thread cred, if assumed identity, else proc cred */
370 if (uthread == NULL || (uthread->uu_flag & UT_SETUID) == 0) {
371 uip->ui_ucred = kauth_cred_proc_ref(p);
372 } else {
373 uip->ui_ucred = uthread->uu_ucred;
374 kauth_cred_ref(uip->ui_ucred);
375 }
376 }
377 vnode_unlock(vp);
378
379 return (0);
380}
381
1c79356b
A
382/*
383 * Set the credentials
384 * existing credentials are not changed
385 * returns 1 on success and 0 on failure
386 */
1c79356b
A
387int
388ubc_setcred(struct vnode *vp, struct proc *p)
389{
390 struct ubc_info *uip;
91447636 391 kauth_cred_t credp;
1c79356b 392
91447636 393 if ( !UBCINFOEXISTS(vp))
1c79356b 394 return (0);
1c79356b 395
91447636
A
396 vnode_lock(vp);
397
398 uip = vp->v_ubcinfo;
1c79356b 399 credp = uip->ui_ucred;
91447636 400
4452a7af 401 if (!IS_VALID_CRED(credp)) {
91447636 402 uip->ui_ucred = kauth_cred_proc_ref(p);
1c79356b 403 }
91447636 404 vnode_unlock(vp);
1c79356b
A
405
406 return (1);
407}
408
409/* Get the pager */
0b4e3aa0 410__private_extern__ memory_object_t
1c79356b
A
411ubc_getpager(struct vnode *vp)
412{
91447636
A
413 if (UBCINFOEXISTS(vp))
414 return (vp->v_ubcinfo->ui_pager);
1c79356b 415
91447636 416 return (0);
1c79356b
A
417}
418
419/*
420 * Get the memory object associated with this vnode
421 * If the vnode was reactivated, memory object would not exist.
422 * Unless "do not rectivate" was specified, look it up using the pager.
1c79356b
A
423 * If hold was requested create an object reference of one does not
424 * exist already.
425 */
426
0b4e3aa0 427memory_object_control_t
91447636 428ubc_getobject(struct vnode *vp, __unused int flags)
1c79356b 429{
91447636
A
430 if (UBCINFOEXISTS(vp))
431 return((vp->v_ubcinfo->ui_control));
1c79356b 432
91447636 433 return (0);
1c79356b
A
434}
435
1c79356b
A
436
437off_t
91447636 438ubc_blktooff(vnode_t vp, daddr64_t blkno)
1c79356b
A
439{
440 off_t file_offset;
441 int error;
442
91447636
A
443 if (UBCINVALID(vp))
444 return ((off_t)-1);
1c79356b 445
91447636 446 error = VNOP_BLKTOOFF(vp, blkno, &file_offset);
1c79356b
A
447 if (error)
448 file_offset = -1;
449
450 return (file_offset);
451}
0b4e3aa0 452
91447636
A
453daddr64_t
454ubc_offtoblk(vnode_t vp, off_t offset)
1c79356b 455{
91447636 456 daddr64_t blkno;
0b4e3aa0 457 int error = 0;
1c79356b 458
91447636
A
459 if (UBCINVALID(vp))
460 return ((daddr64_t)-1);
1c79356b 461
91447636 462 error = VNOP_OFFTOBLK(vp, offset, &blkno);
1c79356b
A
463 if (error)
464 blkno = -1;
465
466 return (blkno);
467}
468
1c79356b 469int
91447636 470ubc_pages_resident(vnode_t vp)
1c79356b 471{
91447636
A
472 kern_return_t kret;
473 boolean_t has_pages_resident;
474
475 if ( !UBCINFOEXISTS(vp))
0b4e3aa0 476 return (0);
91447636
A
477
478 kret = memory_object_pages_resident(vp->v_ubcinfo->ui_control, &has_pages_resident);
479
480 if (kret != KERN_SUCCESS)
0b4e3aa0 481 return (0);
91447636
A
482
483 if (has_pages_resident == TRUE)
484 return (1);
485
486 return (0);
487}
1c79356b 488
1c79356b 489
1c79356b
A
490
491/*
91447636
A
492 * This interface will eventually be deprecated
493 *
494 * clean and/or invalidate a range in the memory object that backs this
495 * vnode. The start offset is truncated to the page boundary and the
496 * size is adjusted to include the last page in the range.
497 *
498 * returns 1 for success, 0 for failure
1c79356b
A
499 */
500int
91447636 501ubc_sync_range(vnode_t vp, off_t beg_off, off_t end_off, int flags)
1c79356b 502{
91447636 503 return (ubc_msync_internal(vp, beg_off, end_off, NULL, flags, NULL));
0b4e3aa0
A
504}
505
91447636 506
0b4e3aa0 507/*
91447636
A
508 * clean and/or invalidate a range in the memory object that backs this
509 * vnode. The start offset is truncated to the page boundary and the
510 * size is adjusted to include the last page in the range.
511 * if a
0b4e3aa0 512 */
91447636
A
513errno_t
514ubc_msync(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags)
0b4e3aa0 515{
91447636
A
516 int retval;
517 int io_errno = 0;
518
519 if (resid_off)
520 *resid_off = beg_off;
0b4e3aa0 521
91447636 522 retval = ubc_msync_internal(vp, beg_off, end_off, resid_off, flags, &io_errno);
0b4e3aa0 523
91447636
A
524 if (retval == 0 && io_errno == 0)
525 return (EINVAL);
526 return (io_errno);
527}
0b4e3aa0 528
1c79356b 529
1c79356b
A
530
531/*
91447636
A
532 * clean and/or invalidate a range in the memory object that backs this
533 * vnode. The start offset is truncated to the page boundary and the
534 * size is adjusted to include the last page in the range.
1c79356b 535 */
91447636
A
536static int
537ubc_msync_internal(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags, int *io_errno)
1c79356b 538{
91447636
A
539 memory_object_size_t tsize;
540 kern_return_t kret;
541 int request_flags = 0;
542 int flush_flags = MEMORY_OBJECT_RETURN_NONE;
543
544 if ( !UBCINFOEXISTS(vp))
545 return (0);
546 if (end_off <= beg_off)
547 return (0);
548 if ((flags & (UBC_INVALIDATE | UBC_PUSHDIRTY | UBC_PUSHALL)) == 0)
549 return (0);
550
551 if (flags & UBC_INVALIDATE)
552 /*
553 * discard the resident pages
554 */
555 request_flags = (MEMORY_OBJECT_DATA_FLUSH | MEMORY_OBJECT_DATA_NO_CHANGE);
1c79356b 556
91447636
A
557 if (flags & UBC_SYNC)
558 /*
559 * wait for all the I/O to complete before returning
55e303ae 560 */
91447636 561 request_flags |= MEMORY_OBJECT_IO_SYNC;
55e303ae 562
91447636
A
563 if (flags & UBC_PUSHDIRTY)
564 /*
565 * we only return the dirty pages in the range
566 */
567 flush_flags = MEMORY_OBJECT_RETURN_DIRTY;
0b4e3aa0 568
91447636
A
569 if (flags & UBC_PUSHALL)
570 /*
571 * then return all the interesting pages in the range (both dirty and precious)
572 * to the pager
573 */
574 flush_flags = MEMORY_OBJECT_RETURN_ALL;
0b4e3aa0 575
91447636
A
576 beg_off = trunc_page_64(beg_off);
577 end_off = round_page_64(end_off);
578 tsize = (memory_object_size_t)end_off - beg_off;
b4c24cb9 579
91447636
A
580 /* flush and/or invalidate pages in the range requested */
581 kret = memory_object_lock_request(vp->v_ubcinfo->ui_control,
582 beg_off, tsize, resid_off, io_errno,
583 flush_flags, request_flags, VM_PROT_NO_CHANGE);
584
585 return ((kret == KERN_SUCCESS) ? 1 : 0);
1c79356b
A
586}
587
1c79356b
A
588
589/*
0b4e3aa0 590 * The vnode is mapped explicitly, mark it so.
1c79356b 591 */
91447636
A
592__private_extern__ int
593ubc_map(vnode_t vp, int flags)
1c79356b
A
594{
595 struct ubc_info *uip;
91447636
A
596 int error = 0;
597 int need_ref = 0;
598 struct vfs_context context;
1c79356b 599
91447636
A
600 if (vnode_getwithref(vp))
601 return (0);
1c79356b 602
91447636
A
603 if (UBCINFOEXISTS(vp)) {
604 context.vc_proc = current_proc();
605 context.vc_ucred = kauth_cred_get();
1c79356b 606
91447636 607 error = VNOP_MMAP(vp, flags, &context);
1c79356b 608
91447636
A
609 if (error != EPERM)
610 error = 0;
1c79356b 611
91447636
A
612 if (error == 0) {
613 vnode_lock(vp);
614
615 uip = vp->v_ubcinfo;
1c79356b 616
91447636
A
617 if ( !ISSET(uip->ui_flags, UI_ISMAPPED))
618 need_ref = 1;
619 SET(uip->ui_flags, (UI_WASMAPPED | UI_ISMAPPED));
55e303ae 620
91447636
A
621 vnode_unlock(vp);
622
623 if (need_ref)
624 vnode_ref(vp);
55e303ae 625 }
b4c24cb9 626 }
91447636 627 vnode_put(vp);
b4c24cb9 628
91447636 629 return (error);
0b4e3aa0
A
630}
631
632/*
633 * destroy the named reference for a given vnode
634 */
635__private_extern__ int
91447636 636ubc_destroy_named(struct vnode *vp)
0b4e3aa0
A
637{
638 memory_object_control_t control;
0b4e3aa0
A
639 struct ubc_info *uip;
640 kern_return_t kret;
641
642 /*
643 * We may already have had the object terminated
644 * and the ubcinfo released as a side effect of
645 * some earlier processing. If so, pretend we did
646 * it, because it probably was a result of our
647 * efforts.
648 */
649 if (!UBCINFOEXISTS(vp))
1c79356b 650 return (1);
0b4e3aa0
A
651
652 uip = vp->v_ubcinfo;
653
0b4e3aa0
A
654 /*
655 * Terminate the memory object.
656 * memory_object_destroy() will result in
657 * vnode_pager_no_senders().
658 * That will release the pager reference
659 * and the vnode will move to the free list.
660 */
661 control = ubc_getobject(vp, UBC_HOLDOBJECT);
662 if (control != MEMORY_OBJECT_CONTROL_NULL) {
663
91447636
A
664 /*
665 * XXXXX - should we hold the vnode lock here?
666 */
0b4e3aa0
A
667 if (ISSET(vp->v_flag, VTERMINATE))
668 panic("ubc_destroy_named: already teminating");
669 SET(vp->v_flag, VTERMINATE);
670
671 kret = memory_object_destroy(control, 0);
672 if (kret != KERN_SUCCESS)
673 return (0);
674
675 /*
676 * memory_object_destroy() is asynchronous
677 * with respect to vnode_pager_no_senders().
678 * wait for vnode_pager_no_senders() to clear
679 * VTERMINATE
680 */
91447636
A
681 vnode_lock(vp);
682 while (ISSET(vp->v_lflag, VNAMED_UBC)) {
683 (void)msleep((caddr_t)&vp->v_lflag, &vp->v_lock,
0b4e3aa0
A
684 PINOD, "ubc_destroy_named", 0);
685 }
91447636 686 vnode_unlock(vp);
0b4e3aa0
A
687 }
688 return (1);
1c79356b
A
689}
690
0b4e3aa0 691
1c79356b 692/*
91447636
A
693 * Find out whether a vnode is in use by UBC
694 * Returns 1 if file is in use by UBC, 0 if not
1c79356b
A
695 */
696int
91447636 697ubc_isinuse(struct vnode *vp, int busycount)
1c79356b 698{
91447636 699 if ( !UBCINFOEXISTS(vp))
0b4e3aa0 700 return (0);
91447636 701 return(ubc_isinuse_locked(vp, busycount, 0));
1c79356b
A
702}
703
91447636 704
1c79356b 705int
91447636 706ubc_isinuse_locked(struct vnode *vp, int busycount, int locked)
1c79356b 707{
91447636 708 int retval = 0;
1c79356b 709
9bccf70c 710
91447636
A
711 if (!locked)
712 vnode_lock(vp);
1c79356b 713
91447636
A
714 if ((vp->v_usecount - vp->v_kusecount) > busycount)
715 retval = 1;
716
717 if (!locked)
718 vnode_unlock(vp);
719 return (retval);
1c79356b
A
720}
721
91447636 722
1c79356b 723/*
1c79356b 724 * MUST only be called by the VM
1c79356b 725 */
0b4e3aa0 726__private_extern__ void
1c79356b
A
727ubc_unmap(struct vnode *vp)
728{
91447636 729 struct vfs_context context;
1c79356b 730 struct ubc_info *uip;
91447636 731 int need_rele = 0;
1c79356b 732
91447636
A
733 if (vnode_getwithref(vp))
734 return;
1c79356b 735
91447636
A
736 if (UBCINFOEXISTS(vp)) {
737 vnode_lock(vp);
1c79356b 738
91447636
A
739 uip = vp->v_ubcinfo;
740 if (ISSET(uip->ui_flags, UI_ISMAPPED)) {
741 CLR(uip->ui_flags, UI_ISMAPPED);
742 need_rele = 1;
743 }
744 vnode_unlock(vp);
745
746 if (need_rele) {
747 context.vc_proc = current_proc();
748 context.vc_ucred = kauth_cred_get();
749 (void)VNOP_MNOMAP(vp, &context);
750
751 vnode_rele(vp);
752 }
753 }
754 /*
755 * the drop of the vnode ref will cleanup
756 */
757 vnode_put(vp);
0b4e3aa0
A
758}
759
760kern_return_t
761ubc_page_op(
762 struct vnode *vp,
763 off_t f_offset,
764 int ops,
55e303ae 765 ppnum_t *phys_entryp,
0b4e3aa0
A
766 int *flagsp)
767{
768 memory_object_control_t control;
769
770 control = ubc_getobject(vp, UBC_FLAGS_NONE);
771 if (control == MEMORY_OBJECT_CONTROL_NULL)
772 return KERN_INVALID_ARGUMENT;
773
774 return (memory_object_page_op(control,
775 (memory_object_offset_t)f_offset,
776 ops,
777 phys_entryp,
778 flagsp));
779}
780
55e303ae
A
781__private_extern__ kern_return_t
782ubc_page_op_with_control(
783 memory_object_control_t control,
784 off_t f_offset,
785 int ops,
786 ppnum_t *phys_entryp,
787 int *flagsp)
788{
789 return (memory_object_page_op(control,
790 (memory_object_offset_t)f_offset,
791 ops,
792 phys_entryp,
793 flagsp));
794}
795
796kern_return_t
797ubc_range_op(
798 struct vnode *vp,
799 off_t f_offset_beg,
800 off_t f_offset_end,
801 int ops,
802 int *range)
803{
804 memory_object_control_t control;
805
806 control = ubc_getobject(vp, UBC_FLAGS_NONE);
807 if (control == MEMORY_OBJECT_CONTROL_NULL)
808 return KERN_INVALID_ARGUMENT;
809
810 return (memory_object_range_op(control,
811 (memory_object_offset_t)f_offset_beg,
812 (memory_object_offset_t)f_offset_end,
813 ops,
814 range));
815}
816
0b4e3aa0
A
817kern_return_t
818ubc_create_upl(
819 struct vnode *vp,
820 off_t f_offset,
821 long bufsize,
822 upl_t *uplp,
823 upl_page_info_t **plp,
824 int uplflags)
825{
826 memory_object_control_t control;
55e303ae
A
827 int count;
828 int ubcflags;
55e303ae 829 kern_return_t kr;
0b4e3aa0
A
830
831 if (bufsize & 0xfff)
832 return KERN_INVALID_ARGUMENT;
833
55e303ae
A
834 if (uplflags & UPL_FOR_PAGEOUT) {
835 uplflags &= ~UPL_FOR_PAGEOUT;
836 ubcflags = UBC_FOR_PAGEOUT;
837 } else
838 ubcflags = UBC_FLAGS_NONE;
839
840 control = ubc_getobject(vp, ubcflags);
0b4e3aa0
A
841 if (control == MEMORY_OBJECT_CONTROL_NULL)
842 return KERN_INVALID_ARGUMENT;
843
55e303ae
A
844 if (uplflags & UPL_WILL_BE_DUMPED) {
845 uplflags &= ~UPL_WILL_BE_DUMPED;
846 uplflags |= (UPL_NO_SYNC|UPL_SET_INTERNAL);
847 } else
848 uplflags |= (UPL_NO_SYNC|UPL_CLEAN_IN_PLACE|UPL_SET_INTERNAL);
0b4e3aa0
A
849 count = 0;
850 kr = memory_object_upl_request(control, f_offset, bufsize,
851 uplp, NULL, &count, uplflags);
852 if (plp != NULL)
853 *plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp);
854 return kr;
855}
856
857
858kern_return_t
859ubc_upl_map(
860 upl_t upl,
861 vm_offset_t *dst_addr)
862{
863 return (vm_upl_map(kernel_map, upl, dst_addr));
864}
865
866
867kern_return_t
868ubc_upl_unmap(
869 upl_t upl)
870{
871 return(vm_upl_unmap(kernel_map, upl));
872}
873
874kern_return_t
875ubc_upl_commit(
876 upl_t upl)
877{
878 upl_page_info_t *pl;
879 kern_return_t kr;
880
881 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
882 kr = upl_commit(upl, pl, MAX_UPL_TRANSFER);
883 upl_deallocate(upl);
884 return kr;
1c79356b
A
885}
886
0b4e3aa0
A
887
888kern_return_t
889ubc_upl_commit_range(
890 upl_t upl,
891 vm_offset_t offset,
892 vm_size_t size,
893 int flags)
894{
895 upl_page_info_t *pl;
896 boolean_t empty;
897 kern_return_t kr;
898
899 if (flags & UPL_COMMIT_FREE_ON_EMPTY)
900 flags |= UPL_COMMIT_NOTIFY_EMPTY;
901
902 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
903
904 kr = upl_commit_range(upl, offset, size, flags,
905 pl, MAX_UPL_TRANSFER, &empty);
906
907 if((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty)
908 upl_deallocate(upl);
909
910 return kr;
911}
912
913kern_return_t
914ubc_upl_abort_range(
915 upl_t upl,
916 vm_offset_t offset,
917 vm_size_t size,
918 int abort_flags)
919{
920 kern_return_t kr;
921 boolean_t empty = FALSE;
922
923 if (abort_flags & UPL_ABORT_FREE_ON_EMPTY)
924 abort_flags |= UPL_ABORT_NOTIFY_EMPTY;
925
926 kr = upl_abort_range(upl, offset, size, abort_flags, &empty);
927
928 if((abort_flags & UPL_ABORT_FREE_ON_EMPTY) && empty)
929 upl_deallocate(upl);
930
931 return kr;
932}
933
934kern_return_t
935ubc_upl_abort(
936 upl_t upl,
937 int abort_type)
938{
939 kern_return_t kr;
940
941 kr = upl_abort(upl, abort_type);
942 upl_deallocate(upl);
943 return kr;
944}
945
946upl_page_info_t *
947ubc_upl_pageinfo(
948 upl_t upl)
949{
950 return (UPL_GET_INTERNAL_PAGE_LIST(upl));
951}
91447636
A
952
953/************* UBC APIS **************/
954
955int
956UBCINFOMISSING(struct vnode * vp)
957{
958 return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo == UBC_INFO_NULL));
959}
960
961int
962UBCINFORECLAIMED(struct vnode * vp)
963{
964 return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo == UBC_INFO_NULL));
965}
966
967
968int
969UBCINFOEXISTS(struct vnode * vp)
970{
971 return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo != UBC_INFO_NULL));
972}
973int
974UBCISVALID(struct vnode * vp)
975{
976 return((vp) && ((vp)->v_type == VREG) && !((vp)->v_flag & VSYSTEM));
977}
978int
979UBCINVALID(struct vnode * vp)
980{
981 return(((vp) == NULL) || ((vp) && ((vp)->v_type != VREG))
982 || ((vp) && ((vp)->v_flag & VSYSTEM)));
983}
984int
985UBCINFOCHECK(const char * fun, struct vnode * vp)
986{
987 if ((vp) && ((vp)->v_type == VREG) &&
988 ((vp)->v_ubcinfo == UBC_INFO_NULL)) {
989 panic("%s: lost ubc_info", (fun));
990 return(1);
991 } else
992 return(0);
993}
994