]> git.saurik.com Git - apple/xnu.git/blob - bsd/vm/vnode_pager.c
xnu-124.13.tar.gz
[apple/xnu.git] / bsd / vm / vnode_pager.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * Mach Operating System
24 * Copyright (c) 1987 Carnegie-Mellon University
25 * All rights reserved. The CMU software License Agreement specifies
26 * the terms and conditions for use and redistribution.
27 */
28 /*
29 * File: vnode_pager.c
30 *
31 * "Swap" pager that pages to/from vnodes. Also
32 * handles demand paging from files.
33 *
34 */
35
36 #include <mach/boolean.h>
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/proc.h>
40 #include <sys/buf.h>
41 #include <sys/uio.h>
42 #include <sys/vnode.h>
43 #include <sys/namei.h>
44 #include <sys/mount.h>
45 #include <sys/ubc.h>
46 #include <sys/lock.h>
47
48 #include <mach/mach_types.h>
49 #include <mach/memory_object_types.h>
50
51 #include <vm/vm_map.h>
52 #include <vm/vm_kern.h>
53 #include <kern/parallel.h>
54 #include <kern/zalloc.h>
55 #include <kern/kalloc.h>
56 #include <libkern/libkern.h>
57
58 #include <vm/vnode_pager.h>
59 #include <vm/vm_pageout.h>
60
61 #include <kern/assert.h>
62
63 unsigned int vp_pagein=0;
64 unsigned int vp_pgodirty=0;
65 unsigned int vp_pgoclean=0;
66 unsigned int dp_pgouts=0; /* Default pager pageouts */
67 unsigned int dp_pgins=0; /* Default pager pageins */
68
69 pager_return_t
70 vnode_pageout(struct vnode *vp,
71 upl_t upl,
72 vm_offset_t upl_offset,
73 vm_object_offset_t f_offset,
74 vm_size_t size,
75 int flags,
76 int *errorp)
77 {
78 int result = PAGER_SUCCESS;
79 struct proc *p = current_proc();
80 int error = 0;
81 int vp_size = 0;
82 int blkno=0, s;
83 int cnt, isize;
84 int pg_index;
85 int offset;
86 struct buf *bp;
87 boolean_t funnel_state;
88 int haveupl=0;
89 void * object;
90 upl_page_info_t *pl;
91 upl_t vpupl;
92
93 funnel_state = thread_funnel_set(kernel_flock, TRUE);
94
95 if (upl != (upl_t)NULL) {
96 haveupl = 1;
97 }
98 isize = (int)size;
99
100 if (isize < 0)
101 panic("-ve count in vnode_pageout");
102 if (isize == 0)
103 panic("vnode_pageout: size == 0\n");
104
105 UBCINFOCHECK("vnode_pageout", vp);
106
107 if (UBCINVALID(vp)) {
108 result = PAGER_ERROR;
109 error = PAGER_ERROR;
110 goto out;
111 }
112 if (haveupl) {
113 /*
114 * This is a pageout form the Default pager,
115 * just go ahead and call VOP_PAGEOUT
116 */
117 dp_pgouts++;
118 if (error = VOP_PAGEOUT(vp, upl, upl_offset,
119 (off_t)f_offset,(size_t)size, p->p_ucred, flags)) {
120 result = PAGER_ERROR;
121 error = PAGER_ERROR;
122 }
123 goto out;
124 }
125
126 object = ubc_getobject(vp, UBC_PAGINGOP|UBC_NOREACTIVATE);
127 if (object == NULL)
128 panic("vnode_pageout: null object");
129 vp_size = ubc_getsize(vp);
130
131 vm_fault_list_request(object,
132 f_offset, isize, &vpupl, NULL, 0,
133 UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_COPYOUT_FROM | UPL_SET_INTERNAL);
134 if (vpupl == (upl_t) 0)
135 panic("vnode_pageout: null upl returned");
136
137 pl = UPL_GET_INTERNAL_PAGE_LIST(vpupl);
138
139 if (vp_size == 0) {
140
141 while (isize) {
142 blkno = ubc_offtoblk(vp, (off_t)f_offset);
143 start0:
144 if (bp = incore(vp, blkno)) {
145 if (ISSET(bp->b_flags, B_BUSY)) {
146 SET(bp->b_flags, B_WANTED);
147 error = tsleep(bp, (PRIBIO + 1), "vnpgout", 0);
148 goto start0;
149 } else {
150 bremfree(bp);
151 SET(bp->b_flags, (B_BUSY|B_INVAL));
152 }
153 }
154 if (bp)
155 brelse(bp);
156 f_offset += PAGE_SIZE;
157 isize -= PAGE_SIZE;
158 }
159 kernel_upl_commit_range(vpupl, 0, size, UPL_COMMIT_FREE_ON_EMPTY,
160 pl, MAX_UPL_TRANSFER);
161
162 error = 0;
163 goto out;
164 }
165 pg_index = 0;
166 offset = 0;
167
168 while (isize) {
169 int xsize;
170 int num_of_pages;
171
172 if ( !upl_valid_page(pl, pg_index)) {
173 kernel_upl_abort_range(vpupl, offset, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
174
175 offset += PAGE_SIZE;
176 isize -= PAGE_SIZE;
177 pg_index++;
178
179 continue;
180 }
181 if ( !upl_dirty_page(pl, pg_index)) {
182 /*
183 * if the page is not dirty and reached here it is
184 * marked precious or it is due to invalidation in
185 * memory_object_lock request as part of truncation
186 * We also get here from vm_object_terminate()
187 * So all you need to do in these
188 * cases is to invalidate incore buffer if it is there
189 */
190 blkno = ubc_offtoblk(vp, (off_t)(f_offset + offset));
191 s = splbio();
192 vp_pgoclean++;
193 start:
194 if (bp = incore(vp, blkno)) {
195 if (ISSET(bp->b_flags, B_BUSY)) {
196 SET(bp->b_flags, B_WANTED);
197 error = tsleep(bp, (PRIBIO + 1), "vnpgout", 0);
198 goto start;
199 } else {
200 bremfree(bp);
201 SET(bp->b_flags, (B_BUSY|B_INVAL));
202 }
203 }
204 splx(s);
205 if (bp)
206 brelse(bp);
207
208 kernel_upl_commit_range(vpupl, offset, PAGE_SIZE,
209 UPL_COMMIT_FREE_ON_EMPTY, pl, MAX_UPL_TRANSFER);
210
211 offset += PAGE_SIZE;
212 isize -= PAGE_SIZE;
213 pg_index++;
214
215 continue;
216 }
217 vp_pgodirty++;
218
219 num_of_pages = 1;
220 xsize = isize - PAGE_SIZE;
221
222 while (xsize) {
223 if ( !upl_valid_page(pl, pg_index + num_of_pages))
224 break;
225 if ( !upl_dirty_page(pl, pg_index + num_of_pages))
226 break;
227 num_of_pages++;
228 xsize -= PAGE_SIZE;
229 }
230 xsize = num_of_pages * PAGE_SIZE;
231
232 /* By defn callee will commit or abort upls */
233 if (error = VOP_PAGEOUT(vp, vpupl, (vm_offset_t) offset,
234 (off_t)(f_offset + offset),
235 xsize, p->p_ucred, flags & ~UPL_NOCOMMIT)) {
236 result = PAGER_ERROR;
237 error = PAGER_ERROR;
238 }
239 offset += xsize;
240 isize -= xsize;
241 pg_index += num_of_pages;
242 }
243 out:
244 if (errorp)
245 *errorp = result;
246
247 thread_funnel_set(kernel_flock, funnel_state);
248
249 return (error);
250 }
251
252
253 pager_return_t
254 vnode_pagein(
255 struct vnode *vp,
256 upl_t pl,
257 vm_offset_t pl_offset,
258 vm_object_offset_t f_offset,
259 vm_size_t size,
260 int flags,
261 int *errorp)
262 {
263 int result = PAGER_SUCCESS;
264 struct proc *p = current_proc();
265 int error = 0;
266 boolean_t funnel_state;
267 int haveupl=0;
268 void * object;
269 upl_t vpupl;
270 unsigned int ioaddr;
271
272 funnel_state = thread_funnel_set(kernel_flock, TRUE);
273
274 #if 0
275 if(pl->page_list.npages >1 )
276 panic("vnode_pageout: Can't handle more than one page");
277 #endif /* 0 */
278
279 if (pl != (upl_t)NULL) {
280 haveupl = 1;
281 }
282 UBCINFOCHECK("vnode_pagein", vp);
283
284 if (UBCINVALID(vp)) {
285 result = PAGER_ERROR;
286 error = PAGER_ERROR;
287 goto out;
288 }
289
290 if (haveupl) {
291 dp_pgins++;
292 if (error = VOP_PAGEIN(vp, pl, pl_offset, (off_t)f_offset,
293 size,p->p_ucred, flags)) {
294 result = PAGER_ERROR;
295 }
296 } else {
297
298 object = ubc_getobject(vp, UBC_PAGINGOP|UBC_NOREACTIVATE);
299 if (object == (void *)NULL)
300 panic("vnode_pagein: null object");
301 vm_fault_list_request(object, f_offset, size, &vpupl, NULL, 0,
302 UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL);
303
304 if (vpupl == (upl_t) 0)
305 panic("vnode_pagein: null upl returned");
306
307 vp_pagein++;
308
309 /* By defn callee will commit or abort upls */
310 if (error = VOP_PAGEIN(vp, vpupl, (vm_offset_t) 0,
311 (off_t)f_offset, size,p->p_ucred, flags & ~UPL_NOCOMMIT)) {
312 result = PAGER_ERROR;
313 error = PAGER_ERROR;
314 }
315 }
316 out:
317 if (errorp)
318 *errorp = result;
319 thread_funnel_set(kernel_flock, funnel_state);
320
321 return (error);
322 }
323
324 void
325 vnode_pager_shutdown()
326 {
327 int i;
328 extern struct bs_map bs_port_table[];
329 struct vnode *vp;
330
331 for(i = 0; i < MAX_BACKING_STORE; i++) {
332 vp = (struct vnode *)(bs_port_table[i]).vp;
333 if (vp) {
334 (bs_port_table[i]).vp = 0;
335 ubc_rele(vp);
336 /* get rid of macx_swapon() namei() reference */
337 vrele(vp);
338
339 /* get rid of macx_swapon() "extra" reference */
340 vrele(vp);
341 }
342 }
343 }
344
345 void *
346 upl_get_internal_page_list(upl_t upl)
347 {
348 return(UPL_GET_INTERNAL_PAGE_LIST(upl));
349
350 }
351