]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/ubc_subr.c
xnu-517.tar.gz
[apple/xnu.git] / bsd / kern / ubc_subr.c
CommitLineData
1c79356b 1/*
b4c24cb9 2 * Copyright (c) 1999-2002 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
43866e37 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
43866e37
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
43866e37
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*
26 * File: ubc_subr.c
27 * Author: Umesh Vaishampayan [umeshv@apple.com]
28 * 05-Aug-1999 umeshv Created.
29 *
30 * Functions related to Unified Buffer cache.
31 *
0b4e3aa0
A
32 * Caller of UBC functions MUST have a valid reference on the vnode.
33 *
1c79356b
A
34 */
35
0b4e3aa0 36#undef DIAGNOSTIC
1c79356b
A
37#define DIAGNOSTIC 1
38
39#include <sys/types.h>
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/lock.h>
43#include <sys/ubc.h>
44#include <sys/mount.h>
45#include <sys/vnode.h>
46#include <sys/ubc.h>
47#include <sys/ucred.h>
48#include <sys/proc.h>
49#include <sys/buf.h>
50
51#include <mach/mach_types.h>
52#include <mach/memory_object_types.h>
53
54#include <kern/zalloc.h>
55
56#if DIAGNOSTIC
57#if defined(assert)
58#undef assert()
59#endif
60#define assert(cond) \
9bccf70c 61 ((void) ((cond) ? 0 : panic("%s:%d (%s)", __FILE__, __LINE__, # cond)))
1c79356b
A
62#else
63#include <kern/assert.h>
64#endif /* DIAGNOSTIC */
65
66struct zone *ubc_info_zone;
67
0b4e3aa0
A
68/* lock for changes to struct UBC */
69static __inline__ void
70ubc_lock(struct vnode *vp)
1c79356b 71{
0b4e3aa0
A
72 /* For now, just use the v_interlock */
73 simple_lock(&vp->v_interlock);
74}
1c79356b 75
0b4e3aa0
A
76/* unlock */
77static __inline__ void
78ubc_unlock(struct vnode *vp)
79{
80 /* For now, just use the v_interlock */
81 simple_unlock(&vp->v_interlock);
1c79356b
A
82}
83
b4c24cb9
A
84/*
85 * Serialize the requests to the VM
86 * Returns:
87 * 0 - Failure
88 * 1 - Sucessful in acquiring the lock
89 * 2 - Sucessful in acquiring the lock recursively
90 * do not call ubc_unbusy()
91 * [This is strange, but saves 4 bytes in struct ubc_info]
92 */
93static int
94ubc_busy(struct vnode *vp)
95{
96 register struct ubc_info *uip;
97
98 if (!UBCINFOEXISTS(vp))
99 return (0);
100
101 uip = vp->v_ubcinfo;
102
103 while (ISSET(uip->ui_flags, UI_BUSY)) {
104
55e303ae 105 if (uip->ui_owner == (void *)current_act())
b4c24cb9
A
106 return (2);
107
108 SET(uip->ui_flags, UI_WANTED);
109 (void) tsleep((caddr_t)&vp->v_ubcinfo, PINOD, "ubcbusy", 0);
110
111 if (!UBCINFOEXISTS(vp))
112 return (0);
113 }
55e303ae 114 uip->ui_owner = (void *)current_act();
b4c24cb9
A
115
116 SET(uip->ui_flags, UI_BUSY);
117
118 return (1);
119}
120
121static void
122ubc_unbusy(struct vnode *vp)
123{
124 register struct ubc_info *uip;
125
126 if (!UBCINFOEXISTS(vp)) {
127 wakeup((caddr_t)&vp->v_ubcinfo);
128 return;
129 }
130 uip = vp->v_ubcinfo;
131 CLR(uip->ui_flags, UI_BUSY);
132 uip->ui_owner = (void *)NULL;
133
134 if (ISSET(uip->ui_flags, UI_WANTED)) {
135 CLR(uip->ui_flags, UI_WANTED);
136 wakeup((caddr_t)&vp->v_ubcinfo);
137 }
138}
139
1c79356b
A
140/*
141 * Initialization of the zone for Unified Buffer Cache.
142 */
0b4e3aa0 143__private_extern__ void
1c79356b
A
144ubc_init()
145{
146 int i;
147
148 i = (vm_size_t) sizeof (struct ubc_info);
149 /* XXX the number of elements should be tied in to maxvnodes */
150 ubc_info_zone = zinit (i, 10000*i, 8192, "ubc_info zone");
151 return;
152}
153
154/*
155 * Initialize a ubc_info structure for a vnode.
156 */
157int
158ubc_info_init(struct vnode *vp)
159{
160 register struct ubc_info *uip;
161 void * pager;
162 struct vattr vattr;
163 struct proc *p = current_proc();
164 int error = 0;
165 kern_return_t kret;
0b4e3aa0 166 memory_object_control_t control;
1c79356b 167
0b4e3aa0
A
168 if (!UBCISVALID(vp))
169 return (EINVAL);
1c79356b
A
170
171 ubc_lock(vp);
172 if (ISSET(vp->v_flag, VUINIT)) {
173 /*
174 * other thread is already doing this
175 * wait till done
176 */
177 while (ISSET(vp->v_flag, VUINIT)) {
178 SET(vp->v_flag, VUWANT); /* XXX overloaded! */
179 ubc_unlock(vp);
180 (void) tsleep((caddr_t)vp, PINOD, "ubcinfo", 0);
181 ubc_lock(vp);
182 }
183 ubc_unlock(vp);
184 return (0);
185 } else {
186 SET(vp->v_flag, VUINIT);
187 }
188
189 uip = vp->v_ubcinfo;
190 if ((uip == UBC_INFO_NULL) || (uip == UBC_NOINFO)) {
191 ubc_unlock(vp);
192 uip = (struct ubc_info *) zalloc(ubc_info_zone);
0b4e3aa0
A
193 uip->ui_pager = MEMORY_OBJECT_NULL;
194 uip->ui_control = MEMORY_OBJECT_CONTROL_NULL;
195 uip->ui_flags = UI_INITED;
1c79356b
A
196 uip->ui_vnode = vp;
197 uip->ui_ucred = NOCRED;
0b4e3aa0
A
198 uip->ui_refcount = 1;
199 uip->ui_size = 0;
200 uip->ui_mapped = 0;
b4c24cb9 201 uip->ui_owner = (void *)NULL;
0b4e3aa0 202 ubc_lock(vp);
1c79356b 203 }
0b4e3aa0
A
204#if DIAGNOSTIC
205 else
206 Debugger("ubc_info_init: already");
207#endif /* DIAGNOSTIC */
1c79356b
A
208
209 assert(uip->ui_flags != UI_NONE);
210 assert(uip->ui_vnode == vp);
211
212#if 0
213 if(ISSET(uip->ui_flags, UI_HASPAGER))
214 goto done;
215#endif /* 0 */
216
217 /* now set this ubc_info in the vnode */
218 vp->v_ubcinfo = uip;
219 SET(uip->ui_flags, UI_HASPAGER);
220 ubc_unlock(vp);
221 pager = (void *)vnode_pager_setup(vp, uip->ui_pager);
222 assert(pager);
0b4e3aa0 223 ubc_setpager(vp, pager);
1c79356b
A
224
225 /*
0b4e3aa0
A
226 * Note: We can not use VOP_GETATTR() to get accurate
227 * value of ui_size. Thanks to NFS.
1c79356b
A
228 * nfs_getattr() can call vinvalbuf() and in this case
229 * ubc_info is not set up to deal with that.
230 * So use bogus size.
231 */
232
1c79356b 233 /*
0b4e3aa0
A
234 * create a vnode - vm_object association
235 * memory_object_create_named() creates a "named" reference on the
236 * memory object we hold this reference as long as the vnode is
237 * "alive." Since memory_object_create_named() took its own reference
238 * on the vnode pager we passed it, we can drop the reference
239 * vnode_pager_setup() returned here.
1c79356b 240 */
0b4e3aa0
A
241 kret = memory_object_create_named(pager,
242 (memory_object_size_t)uip->ui_size, &control);
243 vnode_pager_deallocate(pager);
244 if (kret != KERN_SUCCESS)
245 panic("ubc_info_init: memory_object_create_named returned %d", kret);
1c79356b 246
0b4e3aa0
A
247 assert(control);
248 uip->ui_control = control; /* cache the value of the mo control */
249 SET(uip->ui_flags, UI_HASOBJREF); /* with a named reference */
1c79356b 250 /* create a pager reference on the vnode */
0b4e3aa0 251 error = vnode_pager_vget(vp);
1c79356b 252 if (error)
0b4e3aa0 253 panic("ubc_info_init: vnode_pager_vget error = %d", error);
1c79356b
A
254
255 /* initialize the size */
256 error = VOP_GETATTR(vp, &vattr, p->p_ucred, p);
257
258 ubc_lock(vp);
259 uip->ui_size = (error ? 0: vattr.va_size);
260
261done:
262 CLR(vp->v_flag, VUINIT);
263 if (ISSET(vp->v_flag, VUWANT)) {
264 CLR(vp->v_flag, VUWANT);
265 ubc_unlock(vp);
266 wakeup((caddr_t)vp);
267 } else
268 ubc_unlock(vp);
269
0b4e3aa0 270 return (error);
1c79356b
A
271}
272
273/* Free the ubc_info */
0b4e3aa0
A
274static void
275ubc_info_free(struct ubc_info *uip)
1c79356b 276{
1c79356b
A
277 struct ucred *credp;
278
1c79356b
A
279 credp = uip->ui_ucred;
280 if (credp != NOCRED) {
281 uip->ui_ucred = NOCRED;
282 crfree(credp);
283 }
0b4e3aa0
A
284
285 if (uip->ui_control != MEMORY_OBJECT_CONTROL_NULL)
286 memory_object_control_deallocate(uip->ui_control);
287
1c79356b
A
288 zfree(ubc_info_zone, (vm_offset_t)uip);
289 return;
290}
291
0b4e3aa0
A
292void
293ubc_info_deallocate(struct ubc_info *uip)
294{
b4c24cb9 295
0b4e3aa0
A
296 assert(uip->ui_refcount > 0);
297
b4c24cb9
A
298 if (uip->ui_refcount-- == 1) {
299 struct vnode *vp;
300
301 vp = uip->ui_vnode;
302 if (ISSET(uip->ui_flags, UI_WANTED)) {
303 CLR(uip->ui_flags, UI_WANTED);
304 wakeup((caddr_t)&vp->v_ubcinfo);
305 }
306
0b4e3aa0 307 ubc_info_free(uip);
b4c24cb9 308 }
0b4e3aa0
A
309}
310
1c79356b
A
311/*
312 * Communicate with VM the size change of the file
313 * returns 1 on success, 0 on failure
314 */
315int
316ubc_setsize(struct vnode *vp, off_t nsize)
317{
318 off_t osize; /* ui_size before change */
319 off_t lastpg, olastpgend, lastoff;
320 struct ubc_info *uip;
0b4e3aa0 321 memory_object_control_t control;
1c79356b 322 kern_return_t kret;
1c79356b 323
55e303ae
A
324 if (nsize < (off_t)0)
325 return (0);
1c79356b
A
326
327 if (UBCINVALID(vp))
0b4e3aa0 328 return (0);
1c79356b
A
329
330 if (!UBCINFOEXISTS(vp))
0b4e3aa0 331 return (0);
1c79356b
A
332
333 uip = vp->v_ubcinfo;
334 osize = uip->ui_size; /* call ubc_getsize() ??? */
335 /* Update the size before flushing the VM */
336 uip->ui_size = nsize;
337
338 if (nsize >= osize) /* Nothing more to do */
0b4e3aa0 339 return (1); /* return success */
1c79356b
A
340
341 /*
342 * When the file shrinks, invalidate the pages beyond the
343 * new size. Also get rid of garbage beyond nsize on the
344 * last page. The ui_size already has the nsize. This
345 * insures that the pageout would not write beyond the new
346 * end of the file.
347 */
348
1c79356b
A
349 lastpg = trunc_page_64(nsize);
350 olastpgend = round_page_64(osize);
0b4e3aa0
A
351 control = uip->ui_control;
352 assert(control);
1c79356b
A
353 lastoff = (nsize & PAGE_MASK_64);
354
355 /*
356 * If length is multiple of page size, we should not flush
357 * invalidating is sufficient
358 */
359 if (!lastoff) {
1c79356b 360 /* invalidate last page and old contents beyond nsize */
0b4e3aa0
A
361 kret = memory_object_lock_request(control,
362 (memory_object_offset_t)lastpg,
1c79356b 363 (memory_object_size_t)(olastpgend - lastpg),
0b4e3aa0
A
364 MEMORY_OBJECT_RETURN_NONE, MEMORY_OBJECT_DATA_FLUSH,
365 VM_PROT_NO_CHANGE);
1c79356b
A
366 if (kret != KERN_SUCCESS)
367 printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
368
1c79356b
A
369 return ((kret == KERN_SUCCESS) ? 1 : 0);
370 }
371
1c79356b 372 /* flush the last page */
0b4e3aa0
A
373 kret = memory_object_lock_request(control,
374 (memory_object_offset_t)lastpg,
1c79356b 375 PAGE_SIZE_64,
0b4e3aa0
A
376 MEMORY_OBJECT_RETURN_DIRTY, FALSE,
377 VM_PROT_NO_CHANGE);
1c79356b
A
378
379 if (kret == KERN_SUCCESS) {
1c79356b 380 /* invalidate last page and old contents beyond nsize */
0b4e3aa0
A
381 kret = memory_object_lock_request(control,
382 (memory_object_offset_t)lastpg,
1c79356b 383 (memory_object_size_t)(olastpgend - lastpg),
0b4e3aa0
A
384 MEMORY_OBJECT_RETURN_NONE, MEMORY_OBJECT_DATA_FLUSH,
385 VM_PROT_NO_CHANGE);
1c79356b
A
386 if (kret != KERN_SUCCESS)
387 printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
388 } else
389 printf("ubc_setsize: flush failed (error = %d)\n", kret);
390
1c79356b
A
391 return ((kret == KERN_SUCCESS) ? 1 : 0);
392}
393
394/*
395 * Get the size of the file
1c79356b
A
396 */
397off_t
398ubc_getsize(struct vnode *vp)
399{
1c79356b
A
400 return (vp->v_ubcinfo->ui_size);
401}
402
1c79356b
A
403/*
404 * Caller indicate that the object corresponding to the vnode
405 * can not be cached in object cache. Make it so.
406 * returns 1 on success, 0 on failure
1c79356b
A
407 */
408int
409ubc_uncache(struct vnode *vp)
410{
1c79356b
A
411 kern_return_t kret;
412 struct ubc_info *uip;
b4c24cb9 413 int recursed;
0b4e3aa0 414 memory_object_control_t control;
1c79356b 415 memory_object_perf_info_data_t perf;
1c79356b
A
416
417 if (!UBCINFOEXISTS(vp))
418 return (0);
419
b4c24cb9
A
420 if ((recursed = ubc_busy(vp)) == 0)
421 return (0);
422
1c79356b
A
423 uip = vp->v_ubcinfo;
424
425 assert(uip != UBC_INFO_NULL);
426
427 /*
428 * AGE it so that vfree() can make sure that it
429 * would get recycled soon after the last reference is gone
430 * This will insure that .nfs turds would not linger
431 */
432 vagevp(vp);
433
434 /* set the "do not cache" bit */
435 SET(uip->ui_flags, UI_DONTCACHE);
436
0b4e3aa0
A
437 control = uip->ui_control;
438 assert(control);
1c79356b
A
439
440 perf.cluster_size = PAGE_SIZE; /* XXX use real cluster_size. */
441 perf.may_cache = FALSE;
0b4e3aa0 442 kret = memory_object_change_attributes(control,
1c79356b
A
443 MEMORY_OBJECT_PERFORMANCE_INFO,
444 (memory_object_info_t) &perf,
0b4e3aa0 445 MEMORY_OBJECT_PERF_INFO_COUNT);
1c79356b
A
446
447 if (kret != KERN_SUCCESS) {
0b4e3aa0 448 printf("ubc_uncache: memory_object_change_attributes_named "
1c79356b 449 "kret = %d", kret);
b4c24cb9
A
450 if (recursed == 1)
451 ubc_unbusy(vp);
1c79356b
A
452 return (0);
453 }
454
0b4e3aa0
A
455 ubc_release_named(vp);
456
b4c24cb9
A
457 if (recursed == 1)
458 ubc_unbusy(vp);
1c79356b
A
459 return (1);
460}
461
1c79356b
A
462/*
463 * call ubc_clean() and ubc_uncache() on all the vnodes
464 * for this mount point.
465 * returns 1 on success, 0 on failure
466 */
0b4e3aa0 467__private_extern__ int
1c79356b
A
468ubc_umount(struct mount *mp)
469{
470 struct proc *p = current_proc();
471 struct vnode *vp, *nvp;
472 int ret = 1;
473
474loop:
475 simple_lock(&mntvnode_slock);
476 for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) {
477 if (vp->v_mount != mp) {
478 simple_unlock(&mntvnode_slock);
479 goto loop;
480 }
481 nvp = vp->v_mntvnodes.le_next;
482 simple_unlock(&mntvnode_slock);
483 if (UBCINFOEXISTS(vp)) {
0b4e3aa0
A
484
485 /*
486 * Must get a valid reference on the vnode
487 * before callig UBC functions
488 */
489 if (vget(vp, 0, p)) {
490 ret = 0;
491 simple_lock(&mntvnode_slock);
492 continue; /* move on to the next vnode */
493 }
1c79356b
A
494 ret &= ubc_clean(vp, 0); /* do not invalidate */
495 ret &= ubc_uncache(vp);
0b4e3aa0 496 vrele(vp);
1c79356b
A
497 }
498 simple_lock(&mntvnode_slock);
499 }
500 simple_unlock(&mntvnode_slock);
501 return (ret);
502}
503
504/*
505 * Call ubc_unmount() for all filesystems.
506 * The list is traversed in reverse order
507 * of mounting to avoid dependencies.
508 */
0b4e3aa0 509__private_extern__ void
1c79356b
A
510ubc_unmountall()
511{
512 struct mount *mp, *nmp;
513
514 /*
515 * Since this only runs when rebooting, it is not interlocked.
516 */
517 for (mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) {
518 nmp = mp->mnt_list.cqe_prev;
519 (void) ubc_umount(mp);
520 }
521}
522
523/* Get the credentials */
524struct ucred *
525ubc_getcred(struct vnode *vp)
526{
527 struct ubc_info *uip;
528
1c79356b
A
529 uip = vp->v_ubcinfo;
530
0b4e3aa0 531 if (UBCINVALID(vp))
1c79356b 532 return (NOCRED);
1c79356b
A
533
534 return (uip->ui_ucred);
535}
536
537/*
538 * Set the credentials
539 * existing credentials are not changed
540 * returns 1 on success and 0 on failure
541 */
1c79356b
A
542int
543ubc_setcred(struct vnode *vp, struct proc *p)
544{
545 struct ubc_info *uip;
546 struct ucred *credp;
547
1c79356b
A
548 uip = vp->v_ubcinfo;
549
0b4e3aa0 550 if (UBCINVALID(vp))
1c79356b 551 return (0);
1c79356b
A
552
553 credp = uip->ui_ucred;
554 if (credp == NOCRED) {
555 crhold(p->p_ucred);
556 uip->ui_ucred = p->p_ucred;
557 }
558
559 return (1);
560}
561
562/* Get the pager */
0b4e3aa0 563__private_extern__ memory_object_t
1c79356b
A
564ubc_getpager(struct vnode *vp)
565{
566 struct ubc_info *uip;
567
1c79356b
A
568 uip = vp->v_ubcinfo;
569
0b4e3aa0 570 if (UBCINVALID(vp))
1c79356b 571 return (0);
1c79356b
A
572
573 return (uip->ui_pager);
574}
575
576/*
577 * Get the memory object associated with this vnode
578 * If the vnode was reactivated, memory object would not exist.
579 * Unless "do not rectivate" was specified, look it up using the pager.
1c79356b
A
580 * If hold was requested create an object reference of one does not
581 * exist already.
582 */
583
0b4e3aa0
A
584memory_object_control_t
585ubc_getobject(struct vnode *vp, int flags)
1c79356b
A
586{
587 struct ubc_info *uip;
b4c24cb9 588 int recursed;
0b4e3aa0 589 memory_object_control_t control;
1c79356b 590
0b4e3aa0
A
591 if (UBCINVALID(vp))
592 return (0);
1c79356b 593
55e303ae
A
594 if (flags & UBC_FOR_PAGEOUT)
595 return(vp->v_ubcinfo->ui_control);
596
b4c24cb9
A
597 if ((recursed = ubc_busy(vp)) == 0)
598 return (0);
1c79356b 599
b4c24cb9 600 uip = vp->v_ubcinfo;
0b4e3aa0 601 control = uip->ui_control;
1c79356b 602
0b4e3aa0 603 if ((flags & UBC_HOLDOBJECT) && (!ISSET(uip->ui_flags, UI_HASOBJREF))) {
1c79356b 604
0b4e3aa0
A
605 /*
606 * Take a temporary reference on the ubc info so that it won't go
607 * away during our recovery attempt.
608 */
b4c24cb9 609 ubc_lock(vp);
0b4e3aa0
A
610 uip->ui_refcount++;
611 ubc_unlock(vp);
612 if (memory_object_recover_named(control, TRUE) == KERN_SUCCESS) {
1c79356b 613 SET(uip->ui_flags, UI_HASOBJREF);
0b4e3aa0
A
614 } else {
615 control = MEMORY_OBJECT_CONTROL_NULL;
1c79356b 616 }
b4c24cb9
A
617 if (recursed == 1)
618 ubc_unbusy(vp);
0b4e3aa0 619 ubc_info_deallocate(uip);
1c79356b 620
0b4e3aa0 621 } else {
b4c24cb9
A
622 if (recursed == 1)
623 ubc_unbusy(vp);
1c79356b
A
624 }
625
0b4e3aa0 626 return (control);
1c79356b
A
627}
628
629/* Set the pager */
630int
0b4e3aa0 631ubc_setpager(struct vnode *vp, memory_object_t pager)
1c79356b
A
632{
633 struct ubc_info *uip;
634
1c79356b
A
635 uip = vp->v_ubcinfo;
636
0b4e3aa0 637 if (UBCINVALID(vp))
1c79356b 638 return (0);
1c79356b
A
639
640 uip->ui_pager = pager;
641 return (1);
642}
643
644int
645ubc_setflags(struct vnode * vp, int flags)
646{
647 struct ubc_info *uip;
648
0b4e3aa0
A
649 if (UBCINVALID(vp))
650 return (0);
1c79356b
A
651
652 uip = vp->v_ubcinfo;
653
1c79356b
A
654 SET(uip->ui_flags, flags);
655
0b4e3aa0 656 return (1);
1c79356b
A
657}
658
659int
660ubc_clearflags(struct vnode * vp, int flags)
661{
662 struct ubc_info *uip;
663
0b4e3aa0
A
664 if (UBCINVALID(vp))
665 return (0);
1c79356b
A
666
667 uip = vp->v_ubcinfo;
668
1c79356b
A
669 CLR(uip->ui_flags, flags);
670
0b4e3aa0 671 return (1);
1c79356b
A
672}
673
674
675int
676ubc_issetflags(struct vnode * vp, int flags)
677{
678 struct ubc_info *uip;
679
0b4e3aa0
A
680 if (UBCINVALID(vp))
681 return (0);
1c79356b
A
682
683 uip = vp->v_ubcinfo;
684
0b4e3aa0 685 return (ISSET(uip->ui_flags, flags));
1c79356b
A
686}
687
688off_t
689ubc_blktooff(struct vnode *vp, daddr_t blkno)
690{
691 off_t file_offset;
692 int error;
693
0b4e3aa0 694 if (UBCINVALID(vp))
1c79356b 695 return ((off_t)-1);
1c79356b
A
696
697 error = VOP_BLKTOOFF(vp, blkno, &file_offset);
698 if (error)
699 file_offset = -1;
700
701 return (file_offset);
702}
0b4e3aa0 703
1c79356b
A
704daddr_t
705ubc_offtoblk(struct vnode *vp, off_t offset)
706{
707 daddr_t blkno;
0b4e3aa0 708 int error = 0;
1c79356b 709
1c79356b
A
710 if (UBCINVALID(vp)) {
711 return ((daddr_t)-1);
712 }
713
714 error = VOP_OFFTOBLK(vp, offset, &blkno);
715 if (error)
716 blkno = -1;
717
718 return (blkno);
719}
720
721/*
722 * Cause the file data in VM to be pushed out to the storage
723 * it also causes all currently valid pages to be released
724 * returns 1 on success, 0 on failure
725 */
726int
727ubc_clean(struct vnode *vp, int invalidate)
728{
729 off_t size;
730 struct ubc_info *uip;
0b4e3aa0 731 memory_object_control_t control;
1c79356b
A
732 kern_return_t kret;
733 int flags = 0;
1c79356b
A
734
735 if (UBCINVALID(vp))
0b4e3aa0 736 return (0);
1c79356b
A
737
738 if (!UBCINFOEXISTS(vp))
0b4e3aa0 739 return (0);
1c79356b
A
740
741 /*
742 * if invalidate was requested, write dirty data and then discard
743 * the resident pages
744 */
745 if (invalidate)
746 flags = (MEMORY_OBJECT_DATA_FLUSH | MEMORY_OBJECT_DATA_NO_CHANGE);
747
1c79356b
A
748 uip = vp->v_ubcinfo;
749 size = uip->ui_size; /* call ubc_getsize() ??? */
750
0b4e3aa0
A
751 control = uip->ui_control;
752 assert(control);
1c79356b 753
55e303ae 754 cluster_release(vp);
1c79356b
A
755 vp->v_clen = 0;
756
757 /* Write the dirty data in the file and discard cached pages */
0b4e3aa0
A
758 kret = memory_object_lock_request(control,
759 (memory_object_offset_t)0,
1c79356b
A
760 (memory_object_size_t)round_page_64(size),
761 MEMORY_OBJECT_RETURN_ALL, flags,
0b4e3aa0 762 VM_PROT_NO_CHANGE);
1c79356b 763
0b4e3aa0 764 if (kret != KERN_SUCCESS)
1c79356b 765 printf("ubc_clean: clean failed (error = %d)\n", kret);
1c79356b
A
766
767 return ((kret == KERN_SUCCESS) ? 1 : 0);
768}
769
770/*
771 * Cause the file data in VM to be pushed out to the storage
772 * currently valid pages are NOT invalidated
773 * returns 1 on success, 0 on failure
774 */
775int
776ubc_pushdirty(struct vnode *vp)
777{
778 off_t size;
779 struct ubc_info *uip;
0b4e3aa0 780 memory_object_control_t control;
1c79356b 781 kern_return_t kret;
1c79356b
A
782
783 if (UBCINVALID(vp))
0b4e3aa0 784 return (0);
1c79356b
A
785
786 if (!UBCINFOEXISTS(vp))
0b4e3aa0 787 return (0);
1c79356b 788
1c79356b
A
789 uip = vp->v_ubcinfo;
790 size = uip->ui_size; /* call ubc_getsize() ??? */
791
0b4e3aa0
A
792 control = uip->ui_control;
793 assert(control);
1c79356b
A
794
795 vp->v_flag &= ~VHASDIRTY;
796 vp->v_clen = 0;
797
798 /* Write the dirty data in the file and discard cached pages */
0b4e3aa0
A
799 kret = memory_object_lock_request(control,
800 (memory_object_offset_t)0,
1c79356b 801 (memory_object_size_t)round_page_64(size),
0b4e3aa0
A
802 MEMORY_OBJECT_RETURN_DIRTY, FALSE,
803 VM_PROT_NO_CHANGE);
1c79356b 804
0b4e3aa0 805 if (kret != KERN_SUCCESS)
1c79356b 806 printf("ubc_pushdirty: flush failed (error = %d)\n", kret);
1c79356b 807
0b4e3aa0
A
808 return ((kret == KERN_SUCCESS) ? 1 : 0);
809}
810
811/*
812 * Cause the file data in VM to be pushed out to the storage
813 * currently valid pages are NOT invalidated
814 * returns 1 on success, 0 on failure
815 */
816int
817ubc_pushdirty_range(struct vnode *vp, off_t offset, off_t size)
818{
819 struct ubc_info *uip;
820 memory_object_control_t control;
821 kern_return_t kret;
822
823 if (UBCINVALID(vp))
824 return (0);
825
826 if (!UBCINFOEXISTS(vp))
827 return (0);
828
829 uip = vp->v_ubcinfo;
830
831 control = uip->ui_control;
832 assert(control);
833
834 /* Write any dirty pages in the requested range of the file: */
835 kret = memory_object_lock_request(control,
836 (memory_object_offset_t)offset,
837 (memory_object_size_t)round_page_64(size),
838 MEMORY_OBJECT_RETURN_DIRTY, FALSE,
839 VM_PROT_NO_CHANGE);
840
841 if (kret != KERN_SUCCESS)
842 printf("ubc_pushdirty_range: flush failed (error = %d)\n", kret);
1c79356b
A
843
844 return ((kret == KERN_SUCCESS) ? 1 : 0);
845}
846
847/*
848 * Make sure the vm object does not vanish
849 * returns 1 if the hold count was incremented
850 * returns 0 if the hold count was not incremented
851 * This return value should be used to balance
852 * ubc_hold() and ubc_rele().
853 */
854int
855ubc_hold(struct vnode *vp)
856{
857 struct ubc_info *uip;
b4c24cb9 858 int recursed;
0b4e3aa0 859 memory_object_control_t object;
1c79356b 860
55e303ae
A
861retry:
862
1c79356b
A
863 if (UBCINVALID(vp))
864 return (0);
865
55e303ae
A
866 ubc_lock(vp);
867 if (ISSET(vp->v_flag, VUINIT)) {
868 /*
869 * other thread is not done initializing this
870 * yet, wait till it's done and try again
871 */
872 while (ISSET(vp->v_flag, VUINIT)) {
873 SET(vp->v_flag, VUWANT); /* XXX overloaded! */
874 ubc_unlock(vp);
875 (void) tsleep((caddr_t)vp, PINOD, "ubchold", 0);
876 ubc_lock(vp);
877 }
878 ubc_unlock(vp);
879 goto retry;
880 }
881 ubc_unlock(vp);
882
b4c24cb9 883 if ((recursed = ubc_busy(vp)) == 0) {
0b4e3aa0
A
884 /* must be invalid or dying vnode */
885 assert(UBCINVALID(vp) ||
b4c24cb9 886 ((vp->v_flag & VXLOCK) || (vp->v_flag & VTERMINATE)));
0b4e3aa0 887 return (0);
1c79356b 888 }
0b4e3aa0 889
1c79356b 890 uip = vp->v_ubcinfo;
0b4e3aa0 891 assert(uip->ui_control != MEMORY_OBJECT_CONTROL_NULL);
1c79356b 892
0b4e3aa0
A
893 ubc_lock(vp);
894 uip->ui_refcount++;
b4c24cb9 895 ubc_unlock(vp);
1c79356b 896
0b4e3aa0 897 if (!ISSET(uip->ui_flags, UI_HASOBJREF)) {
b4c24cb9
A
898 if (memory_object_recover_named(uip->ui_control, TRUE)
899 != KERN_SUCCESS) {
900 if (recursed == 1)
901 ubc_unbusy(vp);
0b4e3aa0
A
902 ubc_info_deallocate(uip);
903 return (0);
904 }
0b4e3aa0 905 SET(uip->ui_flags, UI_HASOBJREF);
0b4e3aa0 906 }
b4c24cb9
A
907 if (recursed == 1)
908 ubc_unbusy(vp);
0b4e3aa0
A
909
910 assert(uip->ui_refcount > 0);
b4c24cb9 911
1c79356b
A
912 return (1);
913}
914
0b4e3aa0
A
915/*
916 * Drop the holdcount.
917 * release the reference on the vm object if the this is "uncached"
918 * ubc_info.
919 */
1c79356b
A
920void
921ubc_rele(struct vnode *vp)
922{
923 struct ubc_info *uip;
1c79356b
A
924
925 if (UBCINVALID(vp))
926 return;
927
928 if (!UBCINFOEXISTS(vp)) {
929 /* nothing more to do for a dying vnode */
930 if ((vp->v_flag & VXLOCK) || (vp->v_flag & VTERMINATE))
931 return;
932 panic("ubc_rele: can not");
933 }
934
935 uip = vp->v_ubcinfo;
936
0b4e3aa0
A
937 if (uip->ui_refcount == 1)
938 panic("ubc_rele: ui_refcount");
1c79356b 939
0b4e3aa0 940 --uip->ui_refcount;
1c79356b 941
0b4e3aa0
A
942 if ((uip->ui_refcount == 1)
943 && ISSET(uip->ui_flags, UI_DONTCACHE))
944 (void) ubc_release_named(vp);
1c79356b
A
945
946 return;
947}
948
949/*
0b4e3aa0 950 * The vnode is mapped explicitly, mark it so.
1c79356b 951 */
0b4e3aa0 952__private_extern__ void
1c79356b
A
953ubc_map(struct vnode *vp)
954{
955 struct ubc_info *uip;
1c79356b 956
0b4e3aa0 957 if (UBCINVALID(vp))
1c79356b 958 return;
1c79356b
A
959
960 if (!UBCINFOEXISTS(vp))
0b4e3aa0 961 return;
1c79356b 962
0b4e3aa0 963 ubc_lock(vp);
1c79356b
A
964 uip = vp->v_ubcinfo;
965
966 SET(uip->ui_flags, UI_WASMAPPED);
967 uip->ui_mapped = 1;
968 ubc_unlock(vp);
969
1c79356b 970 return;
1c79356b
A
971}
972
973/*
974 * Release the memory object reference on the vnode
975 * only if it is not in use
976 * Return 1 if the reference was released, 0 otherwise.
977 */
978int
0b4e3aa0 979ubc_release_named(struct vnode *vp)
1c79356b
A
980{
981 struct ubc_info *uip;
b4c24cb9 982 int recursed;
0b4e3aa0 983 memory_object_control_t control;
b4c24cb9 984 kern_return_t kret = KERN_FAILURE;
1c79356b
A
985
986 if (UBCINVALID(vp))
987 return (0);
988
b4c24cb9 989 if ((recursed = ubc_busy(vp)) == 0)
0b4e3aa0 990 return (0);
1c79356b
A
991 uip = vp->v_ubcinfo;
992
0b4e3aa0
A
993 /* can not release held or mapped vnodes */
994 if (ISSET(uip->ui_flags, UI_HASOBJREF) &&
b4c24cb9 995 (uip->ui_refcount == 1) && !uip->ui_mapped) {
0b4e3aa0
A
996 control = uip->ui_control;
997 assert(control);
55e303ae
A
998
999 // XXXdbg
1000 if (vp->v_flag & VDELETED) {
1001 ubc_setsize(vp, (off_t)0);
1002 }
1003
0b4e3aa0
A
1004 CLR(uip->ui_flags, UI_HASOBJREF);
1005 kret = memory_object_release_name(control,
1006 MEMORY_OBJECT_RESPECT_CACHE);
b4c24cb9
A
1007 }
1008
1009 if (recursed == 1)
1010 ubc_unbusy(vp);
1011 return ((kret != KERN_SUCCESS) ? 0 : 1);
0b4e3aa0 1012}
1c79356b 1013
0b4e3aa0
A
1014/*
1015 * This function used to called by extensions directly. Some may
1016 * still exist with this behavior. In those cases, we will do the
1017 * release as part of reclaiming or cleaning the vnode. We don't
1018 * need anything explicit - so just stub this out until those callers
1019 * get cleaned up.
1020 */
1021int
1022ubc_release(
1023 struct vnode *vp)
1024{
1025 return 0;
1026}
1027
1028/*
1029 * destroy the named reference for a given vnode
1030 */
1031__private_extern__ int
1032ubc_destroy_named(
1033 struct vnode *vp)
1034{
1035 memory_object_control_t control;
1036 struct proc *p;
1037 struct ubc_info *uip;
1038 kern_return_t kret;
1039
1040 /*
1041 * We may already have had the object terminated
1042 * and the ubcinfo released as a side effect of
1043 * some earlier processing. If so, pretend we did
1044 * it, because it probably was a result of our
1045 * efforts.
1046 */
1047 if (!UBCINFOEXISTS(vp))
1c79356b 1048 return (1);
0b4e3aa0
A
1049
1050 uip = vp->v_ubcinfo;
1051
1052 /* can not destroy held vnodes */
1053 if (uip->ui_refcount > 1)
1c79356b 1054 return (0);
0b4e3aa0
A
1055
1056 /*
1057 * Terminate the memory object.
1058 * memory_object_destroy() will result in
1059 * vnode_pager_no_senders().
1060 * That will release the pager reference
1061 * and the vnode will move to the free list.
1062 */
1063 control = ubc_getobject(vp, UBC_HOLDOBJECT);
1064 if (control != MEMORY_OBJECT_CONTROL_NULL) {
1065
1066 if (ISSET(vp->v_flag, VTERMINATE))
1067 panic("ubc_destroy_named: already teminating");
1068 SET(vp->v_flag, VTERMINATE);
1069
1070 kret = memory_object_destroy(control, 0);
1071 if (kret != KERN_SUCCESS)
1072 return (0);
1073
1074 /*
1075 * memory_object_destroy() is asynchronous
1076 * with respect to vnode_pager_no_senders().
1077 * wait for vnode_pager_no_senders() to clear
1078 * VTERMINATE
1079 */
1080 while (ISSET(vp->v_flag, VTERMINATE)) {
1081 SET(vp->v_flag, VTERMWANT);
1082 (void)tsleep((caddr_t)&vp->v_ubcinfo,
1083 PINOD, "ubc_destroy_named", 0);
1084 }
1085 }
1086 return (1);
1c79356b
A
1087}
1088
0b4e3aa0 1089
1c79356b
A
1090/*
1091 * Invalidate a range in the memory object that backs this
1092 * vnode. The offset is truncated to the page boundary and the
1093 * size is adjusted to include the last page in the range.
1094 */
1095int
1096ubc_invalidate(struct vnode *vp, off_t offset, size_t size)
1097{
1098 struct ubc_info *uip;
0b4e3aa0 1099 memory_object_control_t control;
1c79356b
A
1100 kern_return_t kret;
1101 off_t toff;
1102 size_t tsize;
1c79356b
A
1103
1104 if (UBCINVALID(vp))
0b4e3aa0 1105 return (0);
1c79356b
A
1106
1107 if (!UBCINFOEXISTS(vp))
0b4e3aa0 1108 return (0);
1c79356b 1109
1c79356b
A
1110 toff = trunc_page_64(offset);
1111 tsize = (size_t)(round_page_64(offset+size) - toff);
1112 uip = vp->v_ubcinfo;
0b4e3aa0
A
1113 control = uip->ui_control;
1114 assert(control);
1c79356b
A
1115
1116 /* invalidate pages in the range requested */
0b4e3aa0
A
1117 kret = memory_object_lock_request(control,
1118 (memory_object_offset_t)toff,
1c79356b
A
1119 (memory_object_size_t)tsize,
1120 MEMORY_OBJECT_RETURN_NONE,
1121 (MEMORY_OBJECT_DATA_NO_CHANGE| MEMORY_OBJECT_DATA_FLUSH),
0b4e3aa0 1122 VM_PROT_NO_CHANGE);
1c79356b
A
1123 if (kret != KERN_SUCCESS)
1124 printf("ubc_invalidate: invalidate failed (error = %d)\n", kret);
1125
1c79356b
A
1126 return ((kret == KERN_SUCCESS) ? 1 : 0);
1127}
1128
1129/*
1130 * Find out whether a vnode is in use by UBC
1131 * Returns 1 if file is in use by UBC, 0 if not
1132 */
1133int
55e303ae 1134ubc_isinuse(struct vnode *vp, int busycount)
1c79356b 1135{
1c79356b 1136 if (!UBCINFOEXISTS(vp))
0b4e3aa0 1137 return (0);
1c79356b 1138
55e303ae 1139 if (busycount == 0) {
9bccf70c
A
1140 printf("ubc_isinuse: called without a valid reference"
1141 ": v_tag = %d\v", vp->v_tag);
1142 vprint("ubc_isinuse", vp);
1143 return (0);
1144 }
1145
55e303ae 1146 if (vp->v_usecount > busycount+1)
1c79356b
A
1147 return (1);
1148
55e303ae 1149 if ((vp->v_usecount == busycount+1)
1c79356b 1150 && (vp->v_ubcinfo->ui_mapped == 1))
0b4e3aa0 1151 return (1);
1c79356b 1152 else
0b4e3aa0 1153 return (0);
1c79356b
A
1154}
1155
1c79356b 1156/*
0b4e3aa0 1157 * The backdoor routine to clear the ui_mapped.
1c79356b
A
1158 * MUST only be called by the VM
1159 *
0b4e3aa0
A
1160 * Note that this routine is not called under funnel. There are numerous
1161 * things about the calling sequence that make this work on SMP.
1c79356b
A
1162 * Any code change in those paths can break this.
1163 *
1c79356b 1164 */
0b4e3aa0 1165__private_extern__ void
1c79356b
A
1166ubc_unmap(struct vnode *vp)
1167{
1168 struct ubc_info *uip;
0b4e3aa0 1169 boolean_t funnel_state;
1c79356b 1170
0b4e3aa0 1171 if (UBCINVALID(vp))
1c79356b 1172 return;
1c79356b
A
1173
1174 if (!UBCINFOEXISTS(vp))
0b4e3aa0 1175 return;
1c79356b
A
1176
1177 ubc_lock(vp);
1178 uip = vp->v_ubcinfo;
1c79356b 1179 uip->ui_mapped = 0;
0b4e3aa0
A
1180 if ((uip->ui_refcount > 1) || !ISSET(uip->ui_flags, UI_DONTCACHE)) {
1181 ubc_unlock(vp);
1182 return;
1183 }
1c79356b
A
1184 ubc_unlock(vp);
1185
0b4e3aa0
A
1186 funnel_state = thread_funnel_set(kernel_flock, TRUE);
1187 (void) ubc_release_named(vp);
1188 (void) thread_funnel_set(kernel_flock, funnel_state);
1189}
1190
1191kern_return_t
1192ubc_page_op(
1193 struct vnode *vp,
1194 off_t f_offset,
1195 int ops,
55e303ae 1196 ppnum_t *phys_entryp,
0b4e3aa0
A
1197 int *flagsp)
1198{
1199 memory_object_control_t control;
1200
1201 control = ubc_getobject(vp, UBC_FLAGS_NONE);
1202 if (control == MEMORY_OBJECT_CONTROL_NULL)
1203 return KERN_INVALID_ARGUMENT;
1204
1205 return (memory_object_page_op(control,
1206 (memory_object_offset_t)f_offset,
1207 ops,
1208 phys_entryp,
1209 flagsp));
1210}
1211
55e303ae
A
1212__private_extern__ kern_return_t
1213ubc_page_op_with_control(
1214 memory_object_control_t control,
1215 off_t f_offset,
1216 int ops,
1217 ppnum_t *phys_entryp,
1218 int *flagsp)
1219{
1220 return (memory_object_page_op(control,
1221 (memory_object_offset_t)f_offset,
1222 ops,
1223 phys_entryp,
1224 flagsp));
1225}
1226
1227kern_return_t
1228ubc_range_op(
1229 struct vnode *vp,
1230 off_t f_offset_beg,
1231 off_t f_offset_end,
1232 int ops,
1233 int *range)
1234{
1235 memory_object_control_t control;
1236
1237 control = ubc_getobject(vp, UBC_FLAGS_NONE);
1238 if (control == MEMORY_OBJECT_CONTROL_NULL)
1239 return KERN_INVALID_ARGUMENT;
1240
1241 return (memory_object_range_op(control,
1242 (memory_object_offset_t)f_offset_beg,
1243 (memory_object_offset_t)f_offset_end,
1244 ops,
1245 range));
1246}
1247
0b4e3aa0
A
1248kern_return_t
1249ubc_create_upl(
1250 struct vnode *vp,
1251 off_t f_offset,
1252 long bufsize,
1253 upl_t *uplp,
1254 upl_page_info_t **plp,
1255 int uplflags)
1256{
1257 memory_object_control_t control;
55e303ae
A
1258 int count;
1259 int ubcflags;
1260 off_t file_offset;
1261 kern_return_t kr;
0b4e3aa0
A
1262
1263 if (bufsize & 0xfff)
1264 return KERN_INVALID_ARGUMENT;
1265
55e303ae
A
1266 if (uplflags & UPL_FOR_PAGEOUT) {
1267 uplflags &= ~UPL_FOR_PAGEOUT;
1268 ubcflags = UBC_FOR_PAGEOUT;
1269 } else
1270 ubcflags = UBC_FLAGS_NONE;
1271
1272 control = ubc_getobject(vp, ubcflags);
0b4e3aa0
A
1273 if (control == MEMORY_OBJECT_CONTROL_NULL)
1274 return KERN_INVALID_ARGUMENT;
1275
55e303ae
A
1276 if (uplflags & UPL_WILL_BE_DUMPED) {
1277 uplflags &= ~UPL_WILL_BE_DUMPED;
1278 uplflags |= (UPL_NO_SYNC|UPL_SET_INTERNAL);
1279 } else
1280 uplflags |= (UPL_NO_SYNC|UPL_CLEAN_IN_PLACE|UPL_SET_INTERNAL);
0b4e3aa0
A
1281 count = 0;
1282 kr = memory_object_upl_request(control, f_offset, bufsize,
1283 uplp, NULL, &count, uplflags);
1284 if (plp != NULL)
1285 *plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp);
1286 return kr;
1287}
1288
1289
1290kern_return_t
1291ubc_upl_map(
1292 upl_t upl,
1293 vm_offset_t *dst_addr)
1294{
1295 return (vm_upl_map(kernel_map, upl, dst_addr));
1296}
1297
1298
1299kern_return_t
1300ubc_upl_unmap(
1301 upl_t upl)
1302{
1303 return(vm_upl_unmap(kernel_map, upl));
1304}
1305
1306kern_return_t
1307ubc_upl_commit(
1308 upl_t upl)
1309{
1310 upl_page_info_t *pl;
1311 kern_return_t kr;
1312
1313 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
1314 kr = upl_commit(upl, pl, MAX_UPL_TRANSFER);
1315 upl_deallocate(upl);
1316 return kr;
1c79356b
A
1317}
1318
0b4e3aa0
A
1319
1320kern_return_t
1321ubc_upl_commit_range(
1322 upl_t upl,
1323 vm_offset_t offset,
1324 vm_size_t size,
1325 int flags)
1326{
1327 upl_page_info_t *pl;
1328 boolean_t empty;
1329 kern_return_t kr;
1330
1331 if (flags & UPL_COMMIT_FREE_ON_EMPTY)
1332 flags |= UPL_COMMIT_NOTIFY_EMPTY;
1333
1334 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
1335
1336 kr = upl_commit_range(upl, offset, size, flags,
1337 pl, MAX_UPL_TRANSFER, &empty);
1338
1339 if((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty)
1340 upl_deallocate(upl);
1341
1342 return kr;
1343}
1344
1345kern_return_t
1346ubc_upl_abort_range(
1347 upl_t upl,
1348 vm_offset_t offset,
1349 vm_size_t size,
1350 int abort_flags)
1351{
1352 kern_return_t kr;
1353 boolean_t empty = FALSE;
1354
1355 if (abort_flags & UPL_ABORT_FREE_ON_EMPTY)
1356 abort_flags |= UPL_ABORT_NOTIFY_EMPTY;
1357
1358 kr = upl_abort_range(upl, offset, size, abort_flags, &empty);
1359
1360 if((abort_flags & UPL_ABORT_FREE_ON_EMPTY) && empty)
1361 upl_deallocate(upl);
1362
1363 return kr;
1364}
1365
1366kern_return_t
1367ubc_upl_abort(
1368 upl_t upl,
1369 int abort_type)
1370{
1371 kern_return_t kr;
1372
1373 kr = upl_abort(upl, abort_type);
1374 upl_deallocate(upl);
1375 return kr;
1376}
1377
1378upl_page_info_t *
1379ubc_upl_pageinfo(
1380 upl_t upl)
1381{
1382 return (UPL_GET_INTERNAL_PAGE_LIST(upl));
1383}