2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Mach Operating System
24 * Copyright (c) 1987 Carnegie-Mellon University
25 * All rights reserved. The CMU software License Agreement specifies
26 * the terms and conditions for use and redistribution.
31 * "Swap" pager that pages to/from vnodes. Also
32 * handles demand paging from files.
36 #include <mach/boolean.h>
37 #include <sys/param.h>
38 #include <sys/systm.h>
42 #include <sys/vnode.h>
43 #include <sys/namei.h>
44 #include <sys/mount.h>
48 #include <mach/mach_types.h>
49 #include <mach/memory_object_types.h>
51 #include <vm/vm_map.h>
52 #include <vm/vm_kern.h>
53 #include <kern/parallel.h>
54 #include <kern/zalloc.h>
55 #include <kern/kalloc.h>
56 #include <libkern/libkern.h>
58 #include <vm/vnode_pager.h>
59 #include <vm/vm_pageout.h>
61 #include <kern/assert.h>
63 unsigned int vp_pagein
=0;
64 unsigned int vp_pgodirty
=0;
65 unsigned int vp_pgoclean
=0;
66 unsigned int dp_pgouts
=0; /* Default pager pageouts */
67 unsigned int dp_pgins
=0; /* Default pager pageins */
70 vnode_pageout(struct vnode
*vp
,
72 vm_offset_t upl_offset
,
73 vm_object_offset_t f_offset
,
78 int result
= PAGER_SUCCESS
;
79 struct proc
*p
= current_proc();
87 boolean_t funnel_state
;
93 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
95 if (upl
!= (upl_t
)NULL
) {
101 panic("-ve count in vnode_pageout");
103 panic("vnode_pageout: size == 0\n");
105 UBCINFOCHECK("vnode_pageout", vp
);
107 if (UBCINVALID(vp
)) {
108 result
= PAGER_ERROR
;
114 * This is a pageout form the Default pager,
115 * just go ahead and call VOP_PAGEOUT
118 if (error
= VOP_PAGEOUT(vp
, upl
, upl_offset
,
119 (off_t
)f_offset
,(size_t)size
, p
->p_ucred
, flags
)) {
120 result
= PAGER_ERROR
;
126 object
= ubc_getobject(vp
, UBC_PAGINGOP
|UBC_NOREACTIVATE
);
128 panic("vnode_pageout: null object");
129 vp_size
= ubc_getsize(vp
);
131 vm_fault_list_request(object
,
132 f_offset
, isize
, &vpupl
, NULL
, 0,
133 UPL_NO_SYNC
| UPL_CLEAN_IN_PLACE
| UPL_COPYOUT_FROM
| UPL_SET_INTERNAL
);
134 if (vpupl
== (upl_t
) 0)
135 panic("vnode_pageout: null upl returned");
137 pl
= UPL_GET_INTERNAL_PAGE_LIST(vpupl
);
142 blkno
= ubc_offtoblk(vp
, (off_t
)f_offset
);
144 if (bp
= incore(vp
, blkno
)) {
145 if (ISSET(bp
->b_flags
, B_BUSY
)) {
146 SET(bp
->b_flags
, B_WANTED
);
147 error
= tsleep(bp
, (PRIBIO
+ 1), "vnpgout", 0);
151 SET(bp
->b_flags
, (B_BUSY
|B_INVAL
));
156 f_offset
+= PAGE_SIZE
;
159 kernel_upl_commit_range(vpupl
, 0, size
, UPL_COMMIT_FREE_ON_EMPTY
,
160 pl
, MAX_UPL_TRANSFER
);
172 if ( !upl_valid_page(pl
, pg_index
)) {
173 kernel_upl_abort_range(vpupl
, offset
, PAGE_SIZE
, UPL_ABORT_FREE_ON_EMPTY
);
181 if ( !upl_dirty_page(pl
, pg_index
)) {
183 * if the page is not dirty and reached here it is
184 * marked precious or it is due to invalidation in
185 * memory_object_lock request as part of truncation
186 * We also get here from vm_object_terminate()
187 * So all you need to do in these
188 * cases is to invalidate incore buffer if it is there
190 blkno
= ubc_offtoblk(vp
, (off_t
)(f_offset
+ offset
));
194 if (bp
= incore(vp
, blkno
)) {
195 if (ISSET(bp
->b_flags
, B_BUSY
)) {
196 SET(bp
->b_flags
, B_WANTED
);
197 error
= tsleep(bp
, (PRIBIO
+ 1), "vnpgout", 0);
201 SET(bp
->b_flags
, (B_BUSY
|B_INVAL
));
208 kernel_upl_commit_range(vpupl
, offset
, PAGE_SIZE
,
209 UPL_COMMIT_FREE_ON_EMPTY
, pl
, MAX_UPL_TRANSFER
);
220 xsize
= isize
- PAGE_SIZE
;
223 if ( !upl_valid_page(pl
, pg_index
+ num_of_pages
))
225 if ( !upl_dirty_page(pl
, pg_index
+ num_of_pages
))
230 xsize
= num_of_pages
* PAGE_SIZE
;
232 /* By defn callee will commit or abort upls */
233 if (error
= VOP_PAGEOUT(vp
, vpupl
, (vm_offset_t
) offset
,
234 (off_t
)(f_offset
+ offset
),
235 xsize
, p
->p_ucred
, flags
& ~UPL_NOCOMMIT
)) {
236 result
= PAGER_ERROR
;
241 pg_index
+= num_of_pages
;
247 thread_funnel_set(kernel_flock
, funnel_state
);
257 vm_offset_t pl_offset
,
258 vm_object_offset_t f_offset
,
263 int result
= PAGER_SUCCESS
;
264 struct proc
*p
= current_proc();
266 boolean_t funnel_state
;
272 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
275 if(pl
->page_list
.npages
>1 )
276 panic("vnode_pageout: Can't handle more than one page");
279 if (pl
!= (upl_t
)NULL
) {
282 UBCINFOCHECK("vnode_pagein", vp
);
284 if (UBCINVALID(vp
)) {
285 result
= PAGER_ERROR
;
292 if (error
= VOP_PAGEIN(vp
, pl
, pl_offset
, (off_t
)f_offset
,
293 size
,p
->p_ucred
, flags
)) {
294 result
= PAGER_ERROR
;
298 object
= ubc_getobject(vp
, UBC_PAGINGOP
|UBC_NOREACTIVATE
);
299 if (object
== (void *)NULL
)
300 panic("vnode_pagein: null object");
301 vm_fault_list_request(object
, f_offset
, size
, &vpupl
, NULL
, 0,
302 UPL_NO_SYNC
| UPL_CLEAN_IN_PLACE
| UPL_SET_INTERNAL
);
304 if (vpupl
== (upl_t
) 0)
305 panic("vnode_pagein: null upl returned");
309 /* By defn callee will commit or abort upls */
310 if (error
= VOP_PAGEIN(vp
, vpupl
, (vm_offset_t
) 0,
311 (off_t
)f_offset
, size
,p
->p_ucred
, flags
& ~UPL_NOCOMMIT
)) {
312 result
= PAGER_ERROR
;
319 thread_funnel_set(kernel_flock
, funnel_state
);
325 vnode_pager_shutdown()
328 extern struct bs_map bs_port_table
[];
331 for(i
= 0; i
< MAX_BACKING_STORE
; i
++) {
332 vp
= (struct vnode
*)(bs_port_table
[i
]).vp
;
334 (bs_port_table
[i
]).vp
= 0;
336 /* get rid of macx_swapon() namei() reference */
339 /* get rid of macx_swapon() "extra" reference */
346 upl_get_internal_page_list(upl_t upl
)
348 return(UPL_GET_INTERNAL_PAGE_LIST(upl
));