]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/ubc_subr.c
xnu-792.21.3.tar.gz
[apple/xnu.git] / bsd / kern / ubc_subr.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 1999-2004 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
8f6c56a5 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
8f6c56a5
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
8ad349bb 24 * limitations under the License.
8f6c56a5
A
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * File: ubc_subr.c
30 * Author: Umesh Vaishampayan [umeshv@apple.com]
31 * 05-Aug-1999 umeshv Created.
32 *
33 * Functions related to Unified Buffer cache.
34 *
0b4e3aa0
A
35 * Caller of UBC functions MUST have a valid reference on the vnode.
36 *
1c79356b
A
37 */
38
0b4e3aa0 39#undef DIAGNOSTIC
1c79356b
A
40#define DIAGNOSTIC 1
41
42#include <sys/types.h>
43#include <sys/param.h>
44#include <sys/systm.h>
45#include <sys/lock.h>
91447636
A
46#include <sys/mman.h>
47#include <sys/mount_internal.h>
48#include <sys/vnode_internal.h>
49#include <sys/ubc_internal.h>
1c79356b 50#include <sys/ucred.h>
91447636
A
51#include <sys/proc_internal.h>
52#include <sys/kauth.h>
1c79356b 53#include <sys/buf.h>
13fec989 54#include <sys/user.h>
1c79356b
A
55
56#include <mach/mach_types.h>
57#include <mach/memory_object_types.h>
91447636
A
58#include <mach/memory_object_control.h>
59#include <mach/vm_map.h>
60#include <mach/upl.h>
1c79356b 61
91447636 62#include <kern/kern_types.h>
1c79356b 63#include <kern/zalloc.h>
13fec989 64#include <kern/thread.h>
91447636
A
65#include <vm/vm_kern.h>
66#include <vm/vm_protos.h> /* last */
1c79356b
A
67
68#if DIAGNOSTIC
69#if defined(assert)
70#undef assert()
71#endif
72#define assert(cond) \
9bccf70c 73 ((void) ((cond) ? 0 : panic("%s:%d (%s)", __FILE__, __LINE__, # cond)))
1c79356b
A
74#else
75#include <kern/assert.h>
76#endif /* DIAGNOSTIC */
77
91447636 78int ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize);
21362eb3 79int ubc_umcallback(vnode_t, void *);
91447636 80int ubc_isinuse_locked(vnode_t, int, int);
21362eb3 81int ubc_msync_internal(vnode_t, off_t, off_t, off_t *, int, int *);
b4c24cb9 82
91447636 83struct zone *ubc_info_zone;
b4c24cb9 84
1c79356b
A
85/*
86 * Initialization of the zone for Unified Buffer Cache.
87 */
0b4e3aa0 88__private_extern__ void
1c79356b
A
89ubc_init()
90{
91 int i;
92
93 i = (vm_size_t) sizeof (struct ubc_info);
94 /* XXX the number of elements should be tied in to maxvnodes */
95 ubc_info_zone = zinit (i, 10000*i, 8192, "ubc_info zone");
96 return;
97}
98
99/*
100 * Initialize a ubc_info structure for a vnode.
101 */
102int
103ubc_info_init(struct vnode *vp)
91447636
A
104{
105 return(ubc_info_init_internal(vp, 0, 0));
106}
107int
108ubc_info_init_withsize(struct vnode *vp, off_t filesize)
109{
110 return(ubc_info_init_internal(vp, 1, filesize));
111}
112
113int
114ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize)
1c79356b
A
115{
116 register struct ubc_info *uip;
117 void * pager;
1c79356b
A
118 struct proc *p = current_proc();
119 int error = 0;
120 kern_return_t kret;
0b4e3aa0 121 memory_object_control_t control;
1c79356b 122
91447636 123 uip = vp->v_ubcinfo;
1c79356b 124
91447636 125 if (uip == UBC_INFO_NULL) {
1c79356b 126
1c79356b 127 uip = (struct ubc_info *) zalloc(ubc_info_zone);
91447636
A
128 bzero((char *)uip, sizeof(struct ubc_info));
129
1c79356b 130 uip->ui_vnode = vp;
91447636 131 uip->ui_flags = UI_INITED;
1c79356b
A
132 uip->ui_ucred = NOCRED;
133 }
0b4e3aa0
A
134#if DIAGNOSTIC
135 else
136 Debugger("ubc_info_init: already");
137#endif /* DIAGNOSTIC */
1c79356b
A
138
139 assert(uip->ui_flags != UI_NONE);
140 assert(uip->ui_vnode == vp);
141
1c79356b
A
142 /* now set this ubc_info in the vnode */
143 vp->v_ubcinfo = uip;
91447636 144
1c79356b
A
145 pager = (void *)vnode_pager_setup(vp, uip->ui_pager);
146 assert(pager);
91447636
A
147
148 SET(uip->ui_flags, UI_HASPAGER);
149 uip->ui_pager = pager;
1c79356b
A
150
151 /*
91447636 152 * Note: We can not use VNOP_GETATTR() to get accurate
0b4e3aa0 153 * value of ui_size. Thanks to NFS.
1c79356b
A
154 * nfs_getattr() can call vinvalbuf() and in this case
155 * ubc_info is not set up to deal with that.
156 * So use bogus size.
157 */
158
1c79356b 159 /*
0b4e3aa0
A
160 * create a vnode - vm_object association
161 * memory_object_create_named() creates a "named" reference on the
162 * memory object we hold this reference as long as the vnode is
163 * "alive." Since memory_object_create_named() took its own reference
164 * on the vnode pager we passed it, we can drop the reference
165 * vnode_pager_setup() returned here.
1c79356b 166 */
0b4e3aa0
A
167 kret = memory_object_create_named(pager,
168 (memory_object_size_t)uip->ui_size, &control);
169 vnode_pager_deallocate(pager);
170 if (kret != KERN_SUCCESS)
171 panic("ubc_info_init: memory_object_create_named returned %d", kret);
1c79356b 172
0b4e3aa0
A
173 assert(control);
174 uip->ui_control = control; /* cache the value of the mo control */
175 SET(uip->ui_flags, UI_HASOBJREF); /* with a named reference */
91447636 176#if 0
1c79356b 177 /* create a pager reference on the vnode */
0b4e3aa0 178 error = vnode_pager_vget(vp);
1c79356b 179 if (error)
0b4e3aa0 180 panic("ubc_info_init: vnode_pager_vget error = %d", error);
91447636
A
181#endif
182 if (withfsize == 0) {
183 struct vfs_context context;
184 /* initialize the size */
185 context.vc_proc = p;
186 context.vc_ucred = kauth_cred_get();
187 error = vnode_size(vp, &uip->ui_size, &context);
188 if (error)
189 uip->ui_size = 0;
190 } else {
191 uip->ui_size = filesize;
192 }
193 vp->v_lflag |= VNAMED_UBC;
1c79356b 194
0b4e3aa0 195 return (error);
1c79356b
A
196}
197
198/* Free the ubc_info */
0b4e3aa0
A
199static void
200ubc_info_free(struct ubc_info *uip)
1c79356b 201{
21362eb3
A
202 kauth_cred_t credp;
203
204 credp = uip->ui_ucred;
205 if (credp != NOCRED) {
206 uip->ui_ucred = NOCRED;
207 kauth_cred_rele(credp);
1c79356b 208 }
0b4e3aa0
A
209
210 if (uip->ui_control != MEMORY_OBJECT_CONTROL_NULL)
211 memory_object_control_deallocate(uip->ui_control);
91447636
A
212
213 cluster_release(uip);
0b4e3aa0 214
1c79356b
A
215 zfree(ubc_info_zone, (vm_offset_t)uip);
216 return;
217}
218
0b4e3aa0
A
219void
220ubc_info_deallocate(struct ubc_info *uip)
221{
91447636 222 ubc_info_free(uip);
0b4e3aa0
A
223}
224
1c79356b
A
225/*
226 * Communicate with VM the size change of the file
227 * returns 1 on success, 0 on failure
228 */
229int
230ubc_setsize(struct vnode *vp, off_t nsize)
231{
232 off_t osize; /* ui_size before change */
233 off_t lastpg, olastpgend, lastoff;
234 struct ubc_info *uip;
0b4e3aa0 235 memory_object_control_t control;
1c79356b 236 kern_return_t kret;
1c79356b 237
55e303ae
A
238 if (nsize < (off_t)0)
239 return (0);
1c79356b 240
1c79356b 241 if (!UBCINFOEXISTS(vp))
0b4e3aa0 242 return (0);
1c79356b
A
243
244 uip = vp->v_ubcinfo;
245 osize = uip->ui_size; /* call ubc_getsize() ??? */
246 /* Update the size before flushing the VM */
247 uip->ui_size = nsize;
248
249 if (nsize >= osize) /* Nothing more to do */
0b4e3aa0 250 return (1); /* return success */
1c79356b
A
251
252 /*
253 * When the file shrinks, invalidate the pages beyond the
254 * new size. Also get rid of garbage beyond nsize on the
255 * last page. The ui_size already has the nsize. This
256 * insures that the pageout would not write beyond the new
257 * end of the file.
258 */
259
1c79356b
A
260 lastpg = trunc_page_64(nsize);
261 olastpgend = round_page_64(osize);
0b4e3aa0
A
262 control = uip->ui_control;
263 assert(control);
1c79356b
A
264 lastoff = (nsize & PAGE_MASK_64);
265
266 /*
267 * If length is multiple of page size, we should not flush
268 * invalidating is sufficient
269 */
270 if (!lastoff) {
1c79356b 271 /* invalidate last page and old contents beyond nsize */
0b4e3aa0
A
272 kret = memory_object_lock_request(control,
273 (memory_object_offset_t)lastpg,
91447636 274 (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
0b4e3aa0
A
275 MEMORY_OBJECT_RETURN_NONE, MEMORY_OBJECT_DATA_FLUSH,
276 VM_PROT_NO_CHANGE);
1c79356b
A
277 if (kret != KERN_SUCCESS)
278 printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
279
1c79356b
A
280 return ((kret == KERN_SUCCESS) ? 1 : 0);
281 }
282
1c79356b 283 /* flush the last page */
0b4e3aa0
A
284 kret = memory_object_lock_request(control,
285 (memory_object_offset_t)lastpg,
91447636 286 PAGE_SIZE_64, NULL, NULL,
0b4e3aa0
A
287 MEMORY_OBJECT_RETURN_DIRTY, FALSE,
288 VM_PROT_NO_CHANGE);
1c79356b
A
289
290 if (kret == KERN_SUCCESS) {
1c79356b 291 /* invalidate last page and old contents beyond nsize */
0b4e3aa0
A
292 kret = memory_object_lock_request(control,
293 (memory_object_offset_t)lastpg,
91447636 294 (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
0b4e3aa0
A
295 MEMORY_OBJECT_RETURN_NONE, MEMORY_OBJECT_DATA_FLUSH,
296 VM_PROT_NO_CHANGE);
1c79356b
A
297 if (kret != KERN_SUCCESS)
298 printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
299 } else
300 printf("ubc_setsize: flush failed (error = %d)\n", kret);
301
1c79356b
A
302 return ((kret == KERN_SUCCESS) ? 1 : 0);
303}
304
305/*
306 * Get the size of the file
1c79356b
A
307 */
308off_t
309ubc_getsize(struct vnode *vp)
310{
91447636
A
311 /* people depend on the side effect of this working this way
312 * as they call this for directory
1c79356b 313 */
91447636
A
314 if (!UBCINFOEXISTS(vp))
315 return ((off_t)0);
316 return (vp->v_ubcinfo->ui_size);
1c79356b
A
317}
318
1c79356b 319/*
91447636 320 * call ubc_sync_range(vp, 0, EOF, UBC_PUSHALL) on all the vnodes
1c79356b
A
321 * for this mount point.
322 * returns 1 on success, 0 on failure
323 */
91447636 324
0b4e3aa0 325__private_extern__ int
1c79356b
A
326ubc_umount(struct mount *mp)
327{
91447636
A
328 vnode_iterate(mp, 0, ubc_umcallback, 0);
329 return(0);
1c79356b
A
330}
331
91447636
A
332static int
333ubc_umcallback(vnode_t vp, __unused void * args)
1c79356b 334{
1c79356b 335
91447636
A
336 if (UBCINFOEXISTS(vp)) {
337
338 cluster_push(vp, 0);
339
340 (void) ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL);
1c79356b 341 }
91447636 342 return (VNODE_RETURNED);
1c79356b
A
343}
344
91447636
A
345
346
1c79356b 347/* Get the credentials */
91447636 348kauth_cred_t
1c79356b
A
349ubc_getcred(struct vnode *vp)
350{
91447636
A
351 if (UBCINFOEXISTS(vp))
352 return (vp->v_ubcinfo->ui_ucred);
1c79356b 353
91447636 354 return (NOCRED);
1c79356b
A
355}
356
13fec989
A
357int
358ubc_setthreadcred(struct vnode *vp, struct proc *p, thread_t thread)
359{
360 struct ubc_info *uip;
361 kauth_cred_t credp;
362 struct uthread *uthread = get_bsdthread_info(thread);
363
364 if (!UBCINFOEXISTS(vp))
365 return (1);
366
367 vnode_lock(vp);
368
369 uip = vp->v_ubcinfo;
370 credp = uip->ui_ucred;
371
21362eb3 372 if (credp == NOCRED) {
13fec989
A
373 /* use per-thread cred, if assumed identity, else proc cred */
374 if (uthread == NULL || (uthread->uu_flag & UT_SETUID) == 0) {
375 uip->ui_ucred = kauth_cred_proc_ref(p);
376 } else {
377 uip->ui_ucred = uthread->uu_ucred;
378 kauth_cred_ref(uip->ui_ucred);
379 }
380 }
381 vnode_unlock(vp);
382
383 return (0);
384}
385
1c79356b
A
386/*
387 * Set the credentials
388 * existing credentials are not changed
389 * returns 1 on success and 0 on failure
390 */
1c79356b
A
391int
392ubc_setcred(struct vnode *vp, struct proc *p)
393{
394 struct ubc_info *uip;
91447636 395 kauth_cred_t credp;
1c79356b 396
91447636 397 if ( !UBCINFOEXISTS(vp))
1c79356b 398 return (0);
1c79356b 399
91447636
A
400 vnode_lock(vp);
401
402 uip = vp->v_ubcinfo;
1c79356b 403 credp = uip->ui_ucred;
91447636 404
21362eb3 405 if (credp == NOCRED) {
91447636 406 uip->ui_ucred = kauth_cred_proc_ref(p);
1c79356b 407 }
91447636 408 vnode_unlock(vp);
1c79356b
A
409
410 return (1);
411}
412
413/* Get the pager */
0b4e3aa0 414__private_extern__ memory_object_t
1c79356b
A
415ubc_getpager(struct vnode *vp)
416{
91447636
A
417 if (UBCINFOEXISTS(vp))
418 return (vp->v_ubcinfo->ui_pager);
1c79356b 419
91447636 420 return (0);
1c79356b
A
421}
422
423/*
424 * Get the memory object associated with this vnode
425 * If the vnode was reactivated, memory object would not exist.
426 * Unless "do not rectivate" was specified, look it up using the pager.
1c79356b
A
427 * If hold was requested create an object reference of one does not
428 * exist already.
429 */
430
0b4e3aa0 431memory_object_control_t
91447636 432ubc_getobject(struct vnode *vp, __unused int flags)
1c79356b 433{
91447636
A
434 if (UBCINFOEXISTS(vp))
435 return((vp->v_ubcinfo->ui_control));
1c79356b 436
91447636 437 return (0);
1c79356b
A
438}
439
1c79356b
A
440
441off_t
91447636 442ubc_blktooff(vnode_t vp, daddr64_t blkno)
1c79356b
A
443{
444 off_t file_offset;
445 int error;
446
91447636
A
447 if (UBCINVALID(vp))
448 return ((off_t)-1);
1c79356b 449
91447636 450 error = VNOP_BLKTOOFF(vp, blkno, &file_offset);
1c79356b
A
451 if (error)
452 file_offset = -1;
453
454 return (file_offset);
455}
0b4e3aa0 456
91447636
A
457daddr64_t
458ubc_offtoblk(vnode_t vp, off_t offset)
1c79356b 459{
91447636 460 daddr64_t blkno;
0b4e3aa0 461 int error = 0;
1c79356b 462
91447636
A
463 if (UBCINVALID(vp))
464 return ((daddr64_t)-1);
1c79356b 465
91447636 466 error = VNOP_OFFTOBLK(vp, offset, &blkno);
1c79356b
A
467 if (error)
468 blkno = -1;
469
470 return (blkno);
471}
472
1c79356b 473int
91447636 474ubc_pages_resident(vnode_t vp)
1c79356b 475{
91447636
A
476 kern_return_t kret;
477 boolean_t has_pages_resident;
478
479 if ( !UBCINFOEXISTS(vp))
0b4e3aa0 480 return (0);
91447636
A
481
482 kret = memory_object_pages_resident(vp->v_ubcinfo->ui_control, &has_pages_resident);
483
484 if (kret != KERN_SUCCESS)
0b4e3aa0 485 return (0);
91447636
A
486
487 if (has_pages_resident == TRUE)
488 return (1);
489
490 return (0);
491}
1c79356b 492
1c79356b 493
1c79356b
A
494
495/*
91447636
A
496 * This interface will eventually be deprecated
497 *
498 * clean and/or invalidate a range in the memory object that backs this
499 * vnode. The start offset is truncated to the page boundary and the
500 * size is adjusted to include the last page in the range.
501 *
502 * returns 1 for success, 0 for failure
1c79356b
A
503 */
504int
91447636 505ubc_sync_range(vnode_t vp, off_t beg_off, off_t end_off, int flags)
1c79356b 506{
91447636 507 return (ubc_msync_internal(vp, beg_off, end_off, NULL, flags, NULL));
0b4e3aa0
A
508}
509
91447636 510
0b4e3aa0 511/*
91447636
A
512 * clean and/or invalidate a range in the memory object that backs this
513 * vnode. The start offset is truncated to the page boundary and the
514 * size is adjusted to include the last page in the range.
515 * if a
0b4e3aa0 516 */
91447636
A
517errno_t
518ubc_msync(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags)
0b4e3aa0 519{
91447636
A
520 int retval;
521 int io_errno = 0;
522
523 if (resid_off)
524 *resid_off = beg_off;
0b4e3aa0 525
91447636 526 retval = ubc_msync_internal(vp, beg_off, end_off, resid_off, flags, &io_errno);
0b4e3aa0 527
91447636
A
528 if (retval == 0 && io_errno == 0)
529 return (EINVAL);
530 return (io_errno);
531}
0b4e3aa0 532
1c79356b 533
1c79356b
A
534
535/*
91447636
A
536 * clean and/or invalidate a range in the memory object that backs this
537 * vnode. The start offset is truncated to the page boundary and the
538 * size is adjusted to include the last page in the range.
1c79356b 539 */
91447636
A
540static int
541ubc_msync_internal(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags, int *io_errno)
1c79356b 542{
91447636
A
543 memory_object_size_t tsize;
544 kern_return_t kret;
545 int request_flags = 0;
546 int flush_flags = MEMORY_OBJECT_RETURN_NONE;
547
548 if ( !UBCINFOEXISTS(vp))
549 return (0);
550 if (end_off <= beg_off)
551 return (0);
552 if ((flags & (UBC_INVALIDATE | UBC_PUSHDIRTY | UBC_PUSHALL)) == 0)
553 return (0);
554
555 if (flags & UBC_INVALIDATE)
556 /*
557 * discard the resident pages
558 */
559 request_flags = (MEMORY_OBJECT_DATA_FLUSH | MEMORY_OBJECT_DATA_NO_CHANGE);
1c79356b 560
91447636
A
561 if (flags & UBC_SYNC)
562 /*
563 * wait for all the I/O to complete before returning
55e303ae 564 */
91447636 565 request_flags |= MEMORY_OBJECT_IO_SYNC;
55e303ae 566
91447636
A
567 if (flags & UBC_PUSHDIRTY)
568 /*
569 * we only return the dirty pages in the range
570 */
571 flush_flags = MEMORY_OBJECT_RETURN_DIRTY;
0b4e3aa0 572
91447636
A
573 if (flags & UBC_PUSHALL)
574 /*
575 * then return all the interesting pages in the range (both dirty and precious)
576 * to the pager
577 */
578 flush_flags = MEMORY_OBJECT_RETURN_ALL;
0b4e3aa0 579
91447636
A
580 beg_off = trunc_page_64(beg_off);
581 end_off = round_page_64(end_off);
582 tsize = (memory_object_size_t)end_off - beg_off;
b4c24cb9 583
91447636
A
584 /* flush and/or invalidate pages in the range requested */
585 kret = memory_object_lock_request(vp->v_ubcinfo->ui_control,
586 beg_off, tsize, resid_off, io_errno,
587 flush_flags, request_flags, VM_PROT_NO_CHANGE);
588
589 return ((kret == KERN_SUCCESS) ? 1 : 0);
1c79356b
A
590}
591
1c79356b
A
592
593/*
0b4e3aa0 594 * The vnode is mapped explicitly, mark it so.
1c79356b 595 */
91447636
A
596__private_extern__ int
597ubc_map(vnode_t vp, int flags)
1c79356b
A
598{
599 struct ubc_info *uip;
91447636
A
600 int error = 0;
601 int need_ref = 0;
602 struct vfs_context context;
1c79356b 603
91447636
A
604 if (vnode_getwithref(vp))
605 return (0);
1c79356b 606
91447636
A
607 if (UBCINFOEXISTS(vp)) {
608 context.vc_proc = current_proc();
609 context.vc_ucred = kauth_cred_get();
1c79356b 610
91447636 611 error = VNOP_MMAP(vp, flags, &context);
1c79356b 612
91447636
A
613 if (error != EPERM)
614 error = 0;
1c79356b 615
91447636
A
616 if (error == 0) {
617 vnode_lock(vp);
618
619 uip = vp->v_ubcinfo;
1c79356b 620
91447636
A
621 if ( !ISSET(uip->ui_flags, UI_ISMAPPED))
622 need_ref = 1;
623 SET(uip->ui_flags, (UI_WASMAPPED | UI_ISMAPPED));
55e303ae 624
91447636
A
625 vnode_unlock(vp);
626
627 if (need_ref)
628 vnode_ref(vp);
55e303ae 629 }
b4c24cb9 630 }
91447636 631 vnode_put(vp);
b4c24cb9 632
91447636 633 return (error);
0b4e3aa0
A
634}
635
636/*
637 * destroy the named reference for a given vnode
638 */
639__private_extern__ int
91447636 640ubc_destroy_named(struct vnode *vp)
0b4e3aa0
A
641{
642 memory_object_control_t control;
0b4e3aa0
A
643 struct ubc_info *uip;
644 kern_return_t kret;
645
646 /*
647 * We may already have had the object terminated
648 * and the ubcinfo released as a side effect of
649 * some earlier processing. If so, pretend we did
650 * it, because it probably was a result of our
651 * efforts.
652 */
653 if (!UBCINFOEXISTS(vp))
1c79356b 654 return (1);
0b4e3aa0
A
655
656 uip = vp->v_ubcinfo;
657
0b4e3aa0
A
658 /*
659 * Terminate the memory object.
660 * memory_object_destroy() will result in
661 * vnode_pager_no_senders().
662 * That will release the pager reference
663 * and the vnode will move to the free list.
664 */
665 control = ubc_getobject(vp, UBC_HOLDOBJECT);
666 if (control != MEMORY_OBJECT_CONTROL_NULL) {
667
91447636
A
668 /*
669 * XXXXX - should we hold the vnode lock here?
670 */
0b4e3aa0
A
671 if (ISSET(vp->v_flag, VTERMINATE))
672 panic("ubc_destroy_named: already teminating");
673 SET(vp->v_flag, VTERMINATE);
674
675 kret = memory_object_destroy(control, 0);
676 if (kret != KERN_SUCCESS)
677 return (0);
678
679 /*
680 * memory_object_destroy() is asynchronous
681 * with respect to vnode_pager_no_senders().
682 * wait for vnode_pager_no_senders() to clear
683 * VTERMINATE
684 */
91447636
A
685 vnode_lock(vp);
686 while (ISSET(vp->v_lflag, VNAMED_UBC)) {
687 (void)msleep((caddr_t)&vp->v_lflag, &vp->v_lock,
0b4e3aa0
A
688 PINOD, "ubc_destroy_named", 0);
689 }
91447636 690 vnode_unlock(vp);
0b4e3aa0
A
691 }
692 return (1);
1c79356b
A
693}
694
0b4e3aa0 695
1c79356b 696/*
91447636
A
697 * Find out whether a vnode is in use by UBC
698 * Returns 1 if file is in use by UBC, 0 if not
1c79356b
A
699 */
700int
91447636 701ubc_isinuse(struct vnode *vp, int busycount)
1c79356b 702{
91447636 703 if ( !UBCINFOEXISTS(vp))
0b4e3aa0 704 return (0);
91447636 705 return(ubc_isinuse_locked(vp, busycount, 0));
1c79356b
A
706}
707
91447636 708
1c79356b 709int
91447636 710ubc_isinuse_locked(struct vnode *vp, int busycount, int locked)
1c79356b 711{
91447636 712 int retval = 0;
1c79356b 713
9bccf70c 714
91447636
A
715 if (!locked)
716 vnode_lock(vp);
1c79356b 717
91447636
A
718 if ((vp->v_usecount - vp->v_kusecount) > busycount)
719 retval = 1;
720
721 if (!locked)
722 vnode_unlock(vp);
723 return (retval);
1c79356b
A
724}
725
91447636 726
1c79356b 727/*
1c79356b 728 * MUST only be called by the VM
1c79356b 729 */
0b4e3aa0 730__private_extern__ void
1c79356b
A
731ubc_unmap(struct vnode *vp)
732{
91447636 733 struct vfs_context context;
1c79356b 734 struct ubc_info *uip;
91447636 735 int need_rele = 0;
1c79356b 736
91447636
A
737 if (vnode_getwithref(vp))
738 return;
1c79356b 739
91447636
A
740 if (UBCINFOEXISTS(vp)) {
741 vnode_lock(vp);
1c79356b 742
91447636
A
743 uip = vp->v_ubcinfo;
744 if (ISSET(uip->ui_flags, UI_ISMAPPED)) {
745 CLR(uip->ui_flags, UI_ISMAPPED);
746 need_rele = 1;
747 }
748 vnode_unlock(vp);
749
750 if (need_rele) {
751 context.vc_proc = current_proc();
752 context.vc_ucred = kauth_cred_get();
753 (void)VNOP_MNOMAP(vp, &context);
754
755 vnode_rele(vp);
756 }
757 }
758 /*
759 * the drop of the vnode ref will cleanup
760 */
761 vnode_put(vp);
0b4e3aa0
A
762}
763
764kern_return_t
765ubc_page_op(
766 struct vnode *vp,
767 off_t f_offset,
768 int ops,
55e303ae 769 ppnum_t *phys_entryp,
0b4e3aa0
A
770 int *flagsp)
771{
772 memory_object_control_t control;
773
774 control = ubc_getobject(vp, UBC_FLAGS_NONE);
775 if (control == MEMORY_OBJECT_CONTROL_NULL)
776 return KERN_INVALID_ARGUMENT;
777
778 return (memory_object_page_op(control,
779 (memory_object_offset_t)f_offset,
780 ops,
781 phys_entryp,
782 flagsp));
783}
784
55e303ae
A
785__private_extern__ kern_return_t
786ubc_page_op_with_control(
787 memory_object_control_t control,
788 off_t f_offset,
789 int ops,
790 ppnum_t *phys_entryp,
791 int *flagsp)
792{
793 return (memory_object_page_op(control,
794 (memory_object_offset_t)f_offset,
795 ops,
796 phys_entryp,
797 flagsp));
798}
799
800kern_return_t
801ubc_range_op(
802 struct vnode *vp,
803 off_t f_offset_beg,
804 off_t f_offset_end,
805 int ops,
806 int *range)
807{
808 memory_object_control_t control;
809
810 control = ubc_getobject(vp, UBC_FLAGS_NONE);
811 if (control == MEMORY_OBJECT_CONTROL_NULL)
812 return KERN_INVALID_ARGUMENT;
813
814 return (memory_object_range_op(control,
815 (memory_object_offset_t)f_offset_beg,
816 (memory_object_offset_t)f_offset_end,
817 ops,
818 range));
819}
820
0b4e3aa0
A
821kern_return_t
822ubc_create_upl(
823 struct vnode *vp,
824 off_t f_offset,
825 long bufsize,
826 upl_t *uplp,
827 upl_page_info_t **plp,
828 int uplflags)
829{
830 memory_object_control_t control;
55e303ae
A
831 int count;
832 int ubcflags;
55e303ae 833 kern_return_t kr;
0b4e3aa0
A
834
835 if (bufsize & 0xfff)
836 return KERN_INVALID_ARGUMENT;
837
55e303ae
A
838 if (uplflags & UPL_FOR_PAGEOUT) {
839 uplflags &= ~UPL_FOR_PAGEOUT;
840 ubcflags = UBC_FOR_PAGEOUT;
841 } else
842 ubcflags = UBC_FLAGS_NONE;
843
844 control = ubc_getobject(vp, ubcflags);
0b4e3aa0
A
845 if (control == MEMORY_OBJECT_CONTROL_NULL)
846 return KERN_INVALID_ARGUMENT;
847
55e303ae
A
848 if (uplflags & UPL_WILL_BE_DUMPED) {
849 uplflags &= ~UPL_WILL_BE_DUMPED;
850 uplflags |= (UPL_NO_SYNC|UPL_SET_INTERNAL);
851 } else
852 uplflags |= (UPL_NO_SYNC|UPL_CLEAN_IN_PLACE|UPL_SET_INTERNAL);
0b4e3aa0
A
853 count = 0;
854 kr = memory_object_upl_request(control, f_offset, bufsize,
855 uplp, NULL, &count, uplflags);
856 if (plp != NULL)
857 *plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp);
858 return kr;
859}
860
861
862kern_return_t
863ubc_upl_map(
864 upl_t upl,
865 vm_offset_t *dst_addr)
866{
867 return (vm_upl_map(kernel_map, upl, dst_addr));
868}
869
870
871kern_return_t
872ubc_upl_unmap(
873 upl_t upl)
874{
875 return(vm_upl_unmap(kernel_map, upl));
876}
877
878kern_return_t
879ubc_upl_commit(
880 upl_t upl)
881{
882 upl_page_info_t *pl;
883 kern_return_t kr;
884
885 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
886 kr = upl_commit(upl, pl, MAX_UPL_TRANSFER);
887 upl_deallocate(upl);
888 return kr;
1c79356b
A
889}
890
0b4e3aa0
A
891
892kern_return_t
893ubc_upl_commit_range(
894 upl_t upl,
895 vm_offset_t offset,
896 vm_size_t size,
897 int flags)
898{
899 upl_page_info_t *pl;
900 boolean_t empty;
901 kern_return_t kr;
902
903 if (flags & UPL_COMMIT_FREE_ON_EMPTY)
904 flags |= UPL_COMMIT_NOTIFY_EMPTY;
905
906 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
907
908 kr = upl_commit_range(upl, offset, size, flags,
909 pl, MAX_UPL_TRANSFER, &empty);
910
911 if((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty)
912 upl_deallocate(upl);
913
914 return kr;
915}
916
917kern_return_t
918ubc_upl_abort_range(
919 upl_t upl,
920 vm_offset_t offset,
921 vm_size_t size,
922 int abort_flags)
923{
924 kern_return_t kr;
925 boolean_t empty = FALSE;
926
927 if (abort_flags & UPL_ABORT_FREE_ON_EMPTY)
928 abort_flags |= UPL_ABORT_NOTIFY_EMPTY;
929
930 kr = upl_abort_range(upl, offset, size, abort_flags, &empty);
931
932 if((abort_flags & UPL_ABORT_FREE_ON_EMPTY) && empty)
933 upl_deallocate(upl);
934
935 return kr;
936}
937
938kern_return_t
939ubc_upl_abort(
940 upl_t upl,
941 int abort_type)
942{
943 kern_return_t kr;
944
945 kr = upl_abort(upl, abort_type);
946 upl_deallocate(upl);
947 return kr;
948}
949
950upl_page_info_t *
951ubc_upl_pageinfo(
952 upl_t upl)
953{
954 return (UPL_GET_INTERNAL_PAGE_LIST(upl));
955}
91447636
A
956
957/************* UBC APIS **************/
958
959int
960UBCINFOMISSING(struct vnode * vp)
961{
962 return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo == UBC_INFO_NULL));
963}
964
965int
966UBCINFORECLAIMED(struct vnode * vp)
967{
968 return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo == UBC_INFO_NULL));
969}
970
971
972int
973UBCINFOEXISTS(struct vnode * vp)
974{
975 return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo != UBC_INFO_NULL));
976}
977int
978UBCISVALID(struct vnode * vp)
979{
980 return((vp) && ((vp)->v_type == VREG) && !((vp)->v_flag & VSYSTEM));
981}
982int
983UBCINVALID(struct vnode * vp)
984{
985 return(((vp) == NULL) || ((vp) && ((vp)->v_type != VREG))
986 || ((vp) && ((vp)->v_flag & VSYSTEM)));
987}
988int
989UBCINFOCHECK(const char * fun, struct vnode * vp)
990{
991 if ((vp) && ((vp)->v_type == VREG) &&
992 ((vp)->v_ubcinfo == UBC_INFO_NULL)) {
993 panic("%s: lost ubc_info", (fun));
994 return(1);
995 } else
996 return(0);
997}
998