2 * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1987 Carnegie-Mellon University
28 * All rights reserved. The CMU software License Agreement specifies
29 * the terms and conditions for use and redistribution.
34 * "Swap" pager that pages to/from vnodes. Also
35 * handles demand paging from files.
39 #include <mach/boolean.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
45 #include <sys/vnode.h>
46 #include <sys/namei.h>
47 #include <sys/mount.h>
51 #include <mach/mach_types.h>
52 #include <mach/memory_object_types.h>
54 #include <vm/vm_map.h>
55 #include <vm/vm_kern.h>
56 #include <kern/zalloc.h>
57 #include <kern/kalloc.h>
58 #include <libkern/libkern.h>
60 #include <vm/vnode_pager.h>
61 #include <vm/vm_pageout.h>
63 #include <kern/assert.h>
64 #include <sys/kdebug.h>
66 unsigned int vp_pagein
=0;
67 unsigned int vp_pgodirty
=0;
68 unsigned int vp_pgoclean
=0;
69 unsigned int dp_pgouts
=0; /* Default pager pageouts */
70 unsigned int dp_pgins
=0; /* Default pager pageins */
73 vnode_pager_get_filesize(struct vnode
*vp
)
76 return (vm_object_offset_t
) 0;
79 return (vm_object_offset_t
) ubc_getsize(vp
);
84 vnode_pageout(struct vnode
*vp
,
86 vm_offset_t upl_offset
,
87 vm_object_offset_t f_offset
,
92 int result
= PAGER_SUCCESS
;
93 struct proc
*p
= current_proc();
100 boolean_t funnel_state
;
104 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
109 result
= error
= PAGER_ERROR
;
112 UBCINFOCHECK("vnode_pageout", vp
);
114 if (UBCINVALID(vp
)) {
115 result
= error
= PAGER_ERROR
;
117 if (upl
&& !(flags
& UPL_NOCOMMIT
))
118 ubc_upl_abort_range(upl
, upl_offset
, size
, UPL_ABORT_FREE_ON_EMPTY
);
123 * This is a pageout from the Default pager,
124 * just go ahead and call VOP_PAGEOUT
128 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, 1)) | DBG_FUNC_START
,
131 if (error
= VOP_PAGEOUT(vp
, upl
, upl_offset
, (off_t
)f_offset
,
132 (size_t)size
, p
->p_ucred
, flags
))
133 result
= error
= PAGER_ERROR
;
135 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, 1)) | DBG_FUNC_END
,
140 ubc_create_upl(vp
, f_offset
, isize
, &vpupl
, &pl
, UPL_FOR_PAGEOUT
| UPL_COPYOUT_FROM
| UPL_SET_LITE
);
142 if (vpupl
== (upl_t
) 0) {
143 result
= error
= PAGER_ABSENT
;
147 * if we get here, we've created the upl and
148 * are responsible for commiting/aborting it
149 * regardless of what the caller has passed in
151 flags
&= ~UPL_NOCOMMIT
;
153 if (ubc_getsize(vp
) == 0) {
154 for (offset
= 0; isize
; isize
-= PAGE_SIZE
,
155 offset
+= PAGE_SIZE
) {
156 blkno
= ubc_offtoblk(vp
, (off_t
)f_offset
);
157 f_offset
+= PAGE_SIZE
;
158 if ((bp
= incore(vp
, blkno
)) &&
159 ISSET(bp
->b_flags
, B_BUSY
)) {
160 ubc_upl_abort_range(vpupl
, offset
, PAGE_SIZE
,
161 UPL_ABORT_FREE_ON_EMPTY
);
162 result
= error
= PAGER_ERROR
;
166 SET(bp
->b_flags
, B_BUSY
| B_INVAL
);
169 ubc_upl_commit_range(vpupl
, offset
, PAGE_SIZE
,
170 UPL_COMMIT_FREE_ON_EMPTY
);
181 if ( !upl_valid_page(pl
, pg_index
)) {
182 ubc_upl_abort_range(vpupl
, offset
, PAGE_SIZE
,
183 UPL_ABORT_FREE_ON_EMPTY
);
190 if ( !upl_dirty_page(pl
, pg_index
)) {
192 * if the page is not dirty and reached here it is
193 * marked precious or it is due to invalidation in
194 * memory_object_lock request as part of truncation
195 * We also get here from vm_object_terminate()
196 * So all you need to do in these
197 * cases is to invalidate incore buffer if it is there
198 * Note we must not sleep here if B_BUSY - that is
199 * a lock inversion which causes deadlock.
201 blkno
= ubc_offtoblk(vp
, (off_t
)(f_offset
+ offset
));
204 if (vp
->v_tag
== VT_NFS
) {
205 /* check with nfs if page is OK to drop */
206 error
= nfs_buf_page_inval(vp
, (off_t
)(f_offset
+ offset
));
209 ubc_upl_abort_range(vpupl
, offset
, PAGE_SIZE
,
210 UPL_ABORT_FREE_ON_EMPTY
);
211 result
= error
= PAGER_ERROR
;
217 } else if ((bp
= incore(vp
, blkno
)) &&
218 ISSET(bp
->b_flags
, B_BUSY
| B_NEEDCOMMIT
)) {
220 ubc_upl_abort_range(vpupl
, offset
, PAGE_SIZE
,
221 UPL_ABORT_FREE_ON_EMPTY
);
222 result
= error
= PAGER_ERROR
;
229 SET(bp
->b_flags
, B_BUSY
| B_INVAL
);
235 ubc_upl_commit_range(vpupl
, offset
, PAGE_SIZE
,
236 UPL_COMMIT_FREE_ON_EMPTY
);
246 xsize
= isize
- PAGE_SIZE
;
249 if ( !upl_valid_page(pl
, pg_index
+ num_of_pages
))
251 if ( !upl_dirty_page(pl
, pg_index
+ num_of_pages
))
256 xsize
= num_of_pages
* PAGE_SIZE
;
258 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, 1)) | DBG_FUNC_START
,
261 if (error
= VOP_PAGEOUT(vp
, vpupl
, (vm_offset_t
)offset
,
262 (off_t
)(f_offset
+ offset
), xsize
,
264 result
= error
= PAGER_ERROR
;
266 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, 1)) | DBG_FUNC_END
,
271 pg_index
+= num_of_pages
;
277 thread_funnel_set(kernel_flock
, funnel_state
);
287 vm_offset_t upl_offset
,
288 vm_object_offset_t f_offset
,
293 struct proc
*p
= current_proc();
295 int result
= PAGER_SUCCESS
;
303 int abort_needed
= 1;
304 boolean_t funnel_state
;
307 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
309 UBCINFOCHECK("vnode_pagein", vp
);
311 if (UBCINVALID(vp
)) {
312 result
= PAGER_ERROR
;
314 if (upl
&& !(flags
& UPL_NOCOMMIT
)) {
315 ubc_upl_abort_range(upl
, upl_offset
, size
, UPL_ABORT_FREE_ON_EMPTY
| UPL_ABORT_ERROR
);
319 if (upl
== (upl_t
)NULL
) {
320 if (size
> (MAX_UPL_TRANSFER
* PAGE_SIZE
)) {
321 result
= PAGER_ERROR
;
325 ubc_create_upl(vp
, f_offset
, size
, &upl
, &pl
, UPL_RET_ONLY_ABSENT
| UPL_SET_LITE
);
327 if (upl
== (upl_t
)NULL
) {
328 result
= PAGER_ABSENT
;
329 error
= PAGER_ABSENT
;
334 * if we get here, we've created the upl and
335 * are responsible for commiting/aborting it
336 * regardless of what the caller has passed in
338 flags
&= ~UPL_NOCOMMIT
;
342 pl
= ubc_upl_pageinfo(upl
);
346 pages_in_upl
= size
/ PAGE_SIZE
;
347 first_pg
= upl_offset
/ PAGE_SIZE
;
350 * before we start marching forward, we must make sure we end on
351 * a present page, otherwise we will be working with a freed
354 for (last_pg
= pages_in_upl
- 1; last_pg
>= first_pg
; last_pg
--) {
355 if (upl_page_present(pl
, last_pg
))
358 pages_in_upl
= last_pg
+ 1;
360 for (last_pg
= first_pg
; last_pg
< pages_in_upl
;) {
362 * scan the upl looking for the next
363 * page that is present.... if all of the
364 * pages are absent, we're done
366 for (start_pg
= last_pg
; last_pg
< pages_in_upl
; last_pg
++) {
367 if (upl_page_present(pl
, last_pg
))
370 if (last_pg
== pages_in_upl
)
374 * if we get here, we've sitting on a page
375 * that is present... we want to skip over
376 * any range of 'valid' pages... if this takes
377 * us to the end of the request, than we're done
379 for (start_pg
= last_pg
; last_pg
< pages_in_upl
; last_pg
++) {
380 if (!upl_valid_page(pl
, last_pg
) || !upl_page_present(pl
, last_pg
))
383 if (last_pg
> start_pg
) {
385 * we've found a range of valid pages
386 * if we've got COMMIT responsibility
387 * commit this range of pages back to the
390 xsize
= (last_pg
- start_pg
) * PAGE_SIZE
;
392 if (!(flags
& UPL_NOCOMMIT
))
393 ubc_upl_abort_range(upl
, start_pg
* PAGE_SIZE
, xsize
, UPL_ABORT_FREE_ON_EMPTY
);
397 if (last_pg
== pages_in_upl
)
400 if (!upl_page_present(pl
, last_pg
))
402 * if we found a range of valid pages
403 * terminated by a non-present page
409 * scan from the found invalid page looking for a valid
410 * or non-present page before the end of the upl is reached, if we
411 * find one, then it will be the last page of the request to
414 for (start_pg
= last_pg
; last_pg
< pages_in_upl
; last_pg
++) {
415 if (upl_valid_page(pl
, last_pg
) || !upl_page_present(pl
, last_pg
))
418 if (last_pg
> start_pg
) {
421 xsize
= (last_pg
- start_pg
) * PAGE_SIZE
;
422 xoff
= start_pg
* PAGE_SIZE
;
424 if (error
= VOP_PAGEIN(vp
, upl
, (vm_offset_t
) xoff
,
425 (off_t
)f_offset
+ xoff
,
428 result
= PAGER_ERROR
;
435 if (!(flags
& UPL_NOCOMMIT
) && abort_needed
)
436 ubc_upl_abort_range(upl
, upl_offset
, size
, UPL_ABORT_FREE_ON_EMPTY
);
440 thread_funnel_set(kernel_flock
, funnel_state
);
446 vnode_pager_shutdown()
449 extern struct bs_map bs_port_table
[];
452 for(i
= 0; i
< MAX_BACKING_STORE
; i
++) {
453 vp
= (struct vnode
*)(bs_port_table
[i
]).vp
;
455 (bs_port_table
[i
]).vp
= 0;
457 /* get rid of macx_swapon() namei() reference */
460 /* get rid of macx_swapon() "extra" reference */
468 upl_get_internal_page_list(upl_t upl
)
470 return(UPL_GET_INTERNAL_PAGE_LIST(upl
));