2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Mach Operating System
30 * Copyright (c) 1987 Carnegie-Mellon University
31 * All rights reserved. The CMU software License Agreement specifies
32 * the terms and conditions for use and redistribution.
37 * "Swap" pager that pages to/from vnodes. Also
38 * handles demand paging from files.
42 #include <mach/boolean.h>
43 #include <sys/param.h>
44 #include <sys/systm.h>
47 #include <sys/kauth.h>
50 #include <sys/vnode_internal.h>
51 #include <sys/namei.h>
52 #include <sys/mount_internal.h> /* needs internal due to fhandle_t */
53 #include <sys/ubc_internal.h>
56 #include <mach/mach_types.h>
57 #include <mach/memory_object_types.h>
58 #include <mach/memory_object_control.h>
59 #include <mach/vm_map.h>
60 #include <mach/mach_vm.h>
64 #include <vm/vm_map.h>
65 #include <vm/vm_kern.h>
66 #include <kern/zalloc.h>
67 #include <kern/kalloc.h>
68 #include <libkern/libkern.h>
70 #include <vm/vnode_pager.h>
71 #include <vm/vm_pageout.h>
73 #include <kern/assert.h>
74 #include <sys/kdebug.h>
75 #include <machine/spl.h>
77 #include <nfs/rpcv2.h>
78 #include <nfs/nfsproto.h>
81 #include <vm/vm_protos.h>
85 vnode_pager_isinuse(struct vnode
*vp
)
87 if (vp
->v_usecount
> vp
->v_kusecount
)
93 vnode_pager_return_hard_throttle_limit(struct vnode
*vp
, uint32_t *limit
, uint32_t hard_throttle
)
95 return(cluster_hard_throttle_limit(vp
, limit
, hard_throttle
));
99 vnode_pager_get_filesize(struct vnode
*vp
)
102 return (vm_object_offset_t
) ubc_getsize(vp
);
106 vnode_pager_get_pathname(
113 len
= (int) *length_p
;
114 error
= vn_getpath(vp
, pathname
, &len
);
118 *length_p
= (vm_size_t
) len
;
123 vnode_pager_get_filename(
125 const char **filename
)
127 *filename
= vp
->v_name
;
132 vnode_pager_get_cs_blobs(
136 *blobs
= ubc_get_cs_blobs(vp
);
141 vnode_pageout(struct vnode
*vp
,
143 upl_offset_t upl_offset
,
144 vm_object_offset_t f_offset
,
149 int result
= PAGER_SUCCESS
;
158 vfs_context_t ctx
= vfs_context_current(); /* pager context */
163 result
= PAGER_ERROR
;
168 if (UBCINFOEXISTS(vp
) == 0) {
169 result
= PAGER_ERROR
;
172 if (upl
&& !(flags
& UPL_NOCOMMIT
))
173 ubc_upl_abort_range(upl
, upl_offset
, size
, UPL_ABORT_FREE_ON_EMPTY
);
176 if ( !(flags
& UPL_VNODE_PAGER
)) {
178 * This is a pageout from the default pager,
179 * just go ahead and call vnop_pageout since
180 * it has already sorted out the dirty ranges
182 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, 1)) | DBG_FUNC_START
,
185 if ( (error_ret
= VNOP_PAGEOUT(vp
, upl
, upl_offset
, (off_t
)f_offset
,
186 (size_t)size
, flags
, ctx
)) )
187 result
= PAGER_ERROR
;
189 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, 1)) | DBG_FUNC_END
,
197 if (vp
->v_mount
->mnt_vtable
->vfc_vfsflags
& VFC_VFSVNOP_PAGEOUTV2
) {
199 * filesystem has requested the new form of VNOP_PAGEOUT for file
200 * backed objects... we will not grab the UPL befofe calling VNOP_PAGEOUT...
201 * it is the fileystem's responsibility to grab the range we're denoting
202 * via 'f_offset' and 'size' into a UPL... this allows the filesystem to first
203 * take any locks it needs, before effectively locking the pages into a UPL...
205 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, 1)) | DBG_FUNC_START
,
206 size
, (int)f_offset
, 0, 0, 0);
208 if ( (error_ret
= VNOP_PAGEOUT(vp
, NULL
, upl_offset
, (off_t
)f_offset
,
209 size
, flags
, ctx
)) ) {
210 result
= PAGER_ERROR
;
212 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, 1)) | DBG_FUNC_END
,
217 if (flags
& UPL_MSYNC
)
218 request_flags
= UPL_UBC_MSYNC
| UPL_RET_ONLY_DIRTY
;
220 request_flags
= UPL_UBC_PAGEOUT
| UPL_RET_ONLY_DIRTY
;
222 ubc_create_upl(vp
, f_offset
, size
, &upl
, &pl
, request_flags
);
224 if (upl
== (upl_t
)NULL
) {
225 result
= PAGER_ERROR
;
231 pl
= ubc_upl_pageinfo(upl
);
234 * we come here for pageouts to 'real' files and
235 * for msyncs... the upl may not contain any
236 * dirty pages.. it's our responsibility to sort
237 * through it and find the 'runs' of dirty pages
238 * to call VNOP_PAGEOUT on...
240 if (ubc_getsize(vp
) == 0) {
242 * if the file has been effectively deleted, then
243 * we need to go through the UPL and invalidate any
244 * buffer headers we might have that reference any
247 for (offset
= upl_offset
; isize
; isize
-= PAGE_SIZE
, offset
+= PAGE_SIZE
) {
249 if (vp
->v_tag
== VT_NFS
)
250 /* check with nfs if page is OK to drop */
251 error
= nfs_buf_page_inval(vp
, (off_t
)f_offset
);
255 blkno
= ubc_offtoblk(vp
, (off_t
)f_offset
);
256 error
= buf_invalblkno(vp
, blkno
, 0);
259 if ( !(flags
& UPL_NOCOMMIT
))
260 ubc_upl_abort_range(upl
, offset
, PAGE_SIZE
, UPL_ABORT_FREE_ON_EMPTY
);
263 result
= PAGER_ERROR
;
265 } else if ( !(flags
& UPL_NOCOMMIT
)) {
266 ubc_upl_commit_range(upl
, offset
, PAGE_SIZE
, UPL_COMMIT_FREE_ON_EMPTY
);
268 f_offset
+= PAGE_SIZE
;
273 * Ignore any non-present pages at the end of the
274 * UPL so that we aren't looking at a upl that
275 * may already have been freed by the preceeding
276 * aborts/completions.
278 base_index
= upl_offset
/ PAGE_SIZE
;
280 for (pg_index
= (upl_offset
+ isize
) / PAGE_SIZE
; pg_index
> base_index
;) {
281 if (upl_page_present(pl
, --pg_index
))
283 if (pg_index
== base_index
) {
285 * no pages were returned, so release
286 * our hold on the upl and leave
288 if ( !(flags
& UPL_NOCOMMIT
))
289 ubc_upl_abort_range(upl
, upl_offset
, isize
, UPL_ABORT_FREE_ON_EMPTY
);
294 isize
= ((pg_index
+ 1) - base_index
) * PAGE_SIZE
;
297 pg_index
= base_index
;
303 if ( !upl_page_present(pl
, pg_index
)) {
305 * we asked for RET_ONLY_DIRTY, so it's possible
306 * to get back empty slots in the UPL
307 * just skip over them
309 f_offset
+= PAGE_SIZE
;
316 if ( !upl_dirty_page(pl
, pg_index
)) {
318 * if the page is not dirty and reached here it is
319 * marked precious or it is due to invalidation in
320 * memory_object_lock request as part of truncation
321 * We also get here from vm_object_terminate()
322 * So all you need to do in these
323 * cases is to invalidate incore buffer if it is there
324 * Note we must not sleep here if the buffer is busy - that is
325 * a lock inversion which causes deadlock.
328 if (vp
->v_tag
== VT_NFS
)
329 /* check with nfs if page is OK to drop */
330 error
= nfs_buf_page_inval(vp
, (off_t
)f_offset
);
334 blkno
= ubc_offtoblk(vp
, (off_t
)f_offset
);
335 error
= buf_invalblkno(vp
, blkno
, 0);
338 if ( !(flags
& UPL_NOCOMMIT
))
339 ubc_upl_abort_range(upl
, offset
, PAGE_SIZE
, UPL_ABORT_FREE_ON_EMPTY
);
342 result
= PAGER_ERROR
;
344 } else if ( !(flags
& UPL_NOCOMMIT
)) {
345 ubc_upl_commit_range(upl
, offset
, PAGE_SIZE
, UPL_COMMIT_FREE_ON_EMPTY
);
347 f_offset
+= PAGE_SIZE
;
355 xsize
= isize
- PAGE_SIZE
;
358 if ( !upl_dirty_page(pl
, pg_index
+ num_of_pages
))
363 xsize
= num_of_pages
* PAGE_SIZE
;
365 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, 1)) | DBG_FUNC_START
,
366 xsize
, (int)f_offset
, 0, 0, 0);
368 if ( (error
= VNOP_PAGEOUT(vp
, upl
, offset
, (off_t
)f_offset
,
369 xsize
, flags
, ctx
)) ) {
372 result
= PAGER_ERROR
;
374 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, 1)) | DBG_FUNC_END
,
380 pg_index
+= num_of_pages
;
394 upl_offset_t upl_offset
,
395 vm_object_offset_t f_offset
,
402 int result
= PAGER_SUCCESS
;
411 if (flags
& UPL_NOCOMMIT
)
414 if (UBCINFOEXISTS(vp
) == 0) {
415 result
= PAGER_ERROR
;
418 if (upl
&& must_commit
)
419 ubc_upl_abort_range(upl
, upl_offset
, size
, UPL_ABORT_FREE_ON_EMPTY
| UPL_ABORT_ERROR
);
423 if (upl
== (upl_t
)NULL
) {
424 flags
&= ~UPL_NOCOMMIT
;
426 if (size
> (MAX_UPL_SIZE
* PAGE_SIZE
)) {
427 result
= PAGER_ERROR
;
431 if (vp
->v_mount
->mnt_vtable
->vfc_vfsflags
& VFC_VFSVNOP_PAGEINV2
) {
433 * filesystem has requested the new form of VNOP_PAGEIN for file
434 * backed objects... we will not grab the UPL befofe calling VNOP_PAGEIN...
435 * it is the fileystem's responsibility to grab the range we're denoting
436 * via 'f_offset' and 'size' into a UPL... this allows the filesystem to first
437 * take any locks it needs, before effectively locking the pages into a UPL...
438 * so we pass a NULL into the filesystem instead of a UPL pointer... the 'upl_offset'
439 * is used to identify the "must have" page in the extent... the filesystem is free
440 * to clip the extent to better fit the underlying FS blocksize if it desires as
441 * long as it continues to include the "must have" page... 'f_offset' + 'upl_offset'
442 * identifies that page
444 if ( (error
= VNOP_PAGEIN(vp
, NULL
, upl_offset
, (off_t
)f_offset
,
445 size
, flags
, vfs_context_current())) ) {
446 result
= PAGER_ERROR
;
451 ubc_create_upl(vp
, f_offset
, size
, &upl
, &pl
, UPL_UBC_PAGEIN
| UPL_RET_ONLY_ABSENT
);
453 if (upl
== (upl_t
)NULL
) {
454 result
= PAGER_ABSENT
;
455 error
= PAGER_ABSENT
;
462 * if we get here, we've created the upl and
463 * are responsible for commiting/aborting it
464 * regardless of what the caller has passed in
468 pl
= ubc_upl_pageinfo(upl
);
469 first_pg
= upl_offset
/ PAGE_SIZE
;
471 pages_in_upl
= size
/ PAGE_SIZE
;
472 DTRACE_VM2(pgpgin
, int, pages_in_upl
, (uint64_t *), NULL
);
475 * before we start marching forward, we must make sure we end on
476 * a present page, otherwise we will be working with a freed
479 for (last_pg
= pages_in_upl
- 1; last_pg
>= first_pg
; last_pg
--) {
480 if (upl_page_present(pl
, last_pg
))
482 if (last_pg
== first_pg
) {
484 * empty UPL, no pages are present
487 ubc_upl_abort_range(upl
, upl_offset
, size
, UPL_ABORT_FREE_ON_EMPTY
);
491 pages_in_upl
= last_pg
+ 1;
494 while (last_pg
< pages_in_upl
) {
496 * skip over missing pages...
498 for ( ; last_pg
< pages_in_upl
; last_pg
++) {
499 if (upl_page_present(pl
, last_pg
))
503 * skip over 'valid' pages... we don't want to issue I/O for these
505 for (start_pg
= last_pg
; last_pg
< pages_in_upl
; last_pg
++) {
506 if (!upl_valid_page(pl
, last_pg
))
509 if (last_pg
> start_pg
) {
511 * we've found a range of valid pages
512 * if we've got COMMIT responsibility
513 * commit this range of pages back to the
516 xsize
= (last_pg
- start_pg
) * PAGE_SIZE
;
519 ubc_upl_abort_range(upl
, start_pg
* PAGE_SIZE
, xsize
, UPL_ABORT_FREE_ON_EMPTY
);
521 if (last_pg
== pages_in_upl
)
523 * we're done... all pages that were present
524 * have either had I/O issued on them or
525 * were aborted unchanged...
529 if (!upl_page_present(pl
, last_pg
)) {
531 * we found a range of valid pages
532 * terminated by a missing page...
533 * bump index to the next page and continue on
539 * scan from the found invalid page looking for a valid
540 * or non-present page before the end of the upl is reached, if we
541 * find one, then it will be the last page of the request to
544 for (start_pg
= last_pg
; last_pg
< pages_in_upl
; last_pg
++) {
545 if (upl_valid_page(pl
, last_pg
) || !upl_page_present(pl
, last_pg
))
548 if (last_pg
> start_pg
) {
550 xsize
= (last_pg
- start_pg
) * PAGE_SIZE
;
551 xoff
= start_pg
* PAGE_SIZE
;
553 if ( (error
= VNOP_PAGEIN(vp
, upl
, (upl_offset_t
) xoff
,
554 (off_t
)f_offset
+ xoff
,
555 xsize
, flags
, vfs_context_current())) ) {
557 * Usually this UPL will be aborted/committed by the lower cluster layer.
558 * In the case of decmpfs, however, we may return an error (EAGAIN) to avoid
559 * a deadlock with another thread already inflating the file. In that case,
560 * we must take care of our UPL at this layer itself.
563 if(error
== EAGAIN
) {
564 ubc_upl_abort_range(upl
, (upl_offset_t
) xoff
, xsize
, UPL_ABORT_FREE_ON_EMPTY
| UPL_ABORT_RESTART
);
567 result
= PAGER_ERROR
;
577 ut
= get_bsdthread_info(current_thread());
579 if (ut
->uu_lowpri_window
) {
581 * task is marked as a low priority I/O type
582 * and the I/O we issued while in this page fault
583 * collided with normal I/O operations... we'll
584 * delay in order to mitigate the impact of this
585 * task on the normal operation of the system
587 throttle_lowpri_io(TRUE
);
593 vnode_pager_shutdown(void)
598 for(i
= 0; i
< MAX_BACKING_STORE
; i
++) {
599 vp
= (vnode_t
)(bs_port_table
[i
]).vp
;
601 (bs_port_table
[i
]).vp
= 0;
603 /* get rid of macx_swapon() reference */
611 upl_get_internal_page_list(upl_t upl
)
613 return(UPL_GET_INTERNAL_PAGE_LIST(upl
));