]> git.saurik.com Git - apple/xnu.git/blob - bsd/vm/vnode_pager.c
8b1be28436be2b6a042ede8aa7cca4a1dc781234
[apple/xnu.git] / bsd / vm / vnode_pager.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1987 Carnegie-Mellon University
28 * All rights reserved. The CMU software License Agreement specifies
29 * the terms and conditions for use and redistribution.
30 */
31 /*
32 * File: vnode_pager.c
33 *
34 * "Swap" pager that pages to/from vnodes. Also
35 * handles demand paging from files.
36 *
37 */
38
39 #include <mach/boolean.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/proc.h>
43 #include <sys/buf.h>
44 #include <sys/uio.h>
45 #include <sys/vnode.h>
46 #include <sys/namei.h>
47 #include <sys/mount.h>
48 #include <sys/ubc.h>
49 #include <sys/lock.h>
50
51 #include <mach/mach_types.h>
52 #include <mach/memory_object_types.h>
53
54 #include <vm/vm_map.h>
55 #include <vm/vm_kern.h>
56 #include <kern/zalloc.h>
57 #include <kern/kalloc.h>
58 #include <libkern/libkern.h>
59
60 #include <vm/vnode_pager.h>
61 #include <vm/vm_pageout.h>
62
63 #include <kern/assert.h>
64 #include <sys/kdebug.h>
65
66 unsigned int vp_pagein=0;
67 unsigned int vp_pgodirty=0;
68 unsigned int vp_pgoclean=0;
69 unsigned int dp_pgouts=0; /* Default pager pageouts */
70 unsigned int dp_pgins=0; /* Default pager pageins */
71
72 vm_object_offset_t
73 vnode_pager_get_filesize(struct vnode *vp)
74 {
75 if (UBCINVALID(vp)) {
76 return (vm_object_offset_t) 0;
77 }
78
79 return (vm_object_offset_t) ubc_getsize(vp);
80
81 }
82
83 pager_return_t
84 vnode_pageout(struct vnode *vp,
85 upl_t upl,
86 vm_offset_t upl_offset,
87 vm_object_offset_t f_offset,
88 vm_size_t size,
89 int flags,
90 int *errorp)
91 {
92 int result = PAGER_SUCCESS;
93 struct proc *p = current_proc();
94 int error = 0;
95 int blkno=0, s;
96 int cnt, isize;
97 int pg_index;
98 int offset;
99 struct buf *bp;
100 boolean_t funnel_state;
101 upl_page_info_t *pl;
102 upl_t vpupl = NULL;
103
104 funnel_state = thread_funnel_set(kernel_flock, TRUE);
105
106 isize = (int)size;
107
108 if (isize <= 0) {
109 result = error = PAGER_ERROR;
110 goto out;
111 }
112 UBCINFOCHECK("vnode_pageout", vp);
113
114 if (UBCINVALID(vp)) {
115 result = error = PAGER_ERROR;
116
117 if (upl && !(flags & UPL_NOCOMMIT))
118 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
119 goto out;
120 }
121 if (upl) {
122 /*
123 * This is a pageout from the Default pager,
124 * just go ahead and call VOP_PAGEOUT
125 */
126 dp_pgouts++;
127
128 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_START,
129 size, 1, 0, 0, 0);
130
131 if (error = VOP_PAGEOUT(vp, upl, upl_offset, (off_t)f_offset,
132 (size_t)size, p->p_ucred, flags))
133 result = error = PAGER_ERROR;
134
135 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_END,
136 size, 1, 0, 0, 0);
137
138 goto out;
139 }
140 ubc_create_upl(vp, f_offset, isize, &vpupl, &pl, UPL_FOR_PAGEOUT | UPL_COPYOUT_FROM);
141
142 if (vpupl == (upl_t) 0) {
143 result = error = PAGER_ABSENT;
144 goto out;
145 }
146 /*
147 * if we get here, we've created the upl and
148 * are responsible for commiting/aborting it
149 * regardless of what the caller has passed in
150 */
151 flags &= ~UPL_NOCOMMIT;
152
153 if (ubc_getsize(vp) == 0) {
154 for (offset = 0; isize; isize -= PAGE_SIZE,
155 offset += PAGE_SIZE) {
156 blkno = ubc_offtoblk(vp, (off_t)f_offset);
157 f_offset += PAGE_SIZE;
158 if ((bp = incore(vp, blkno)) &&
159 ISSET(bp->b_flags, B_BUSY)) {
160 ubc_upl_abort_range(vpupl, offset, PAGE_SIZE,
161 UPL_ABORT_FREE_ON_EMPTY);
162 result = error = PAGER_ERROR;
163 continue;
164 } else if (bp) {
165 bremfree(bp);
166 SET(bp->b_flags, B_BUSY | B_INVAL);
167 brelse(bp);
168 }
169 ubc_upl_commit_range(vpupl, offset, PAGE_SIZE,
170 UPL_COMMIT_FREE_ON_EMPTY);
171 }
172 goto out;
173 }
174 pg_index = 0;
175 offset = 0;
176
177 while (isize) {
178 int xsize;
179 int num_of_pages;
180
181 if ( !upl_valid_page(pl, pg_index)) {
182 ubc_upl_abort_range(vpupl, offset, PAGE_SIZE,
183 UPL_ABORT_FREE_ON_EMPTY);
184 offset += PAGE_SIZE;
185 isize -= PAGE_SIZE;
186 pg_index++;
187
188 continue;
189 }
190 if ( !upl_dirty_page(pl, pg_index)) {
191 /*
192 * if the page is not dirty and reached here it is
193 * marked precious or it is due to invalidation in
194 * memory_object_lock request as part of truncation
195 * We also get here from vm_object_terminate()
196 * So all you need to do in these
197 * cases is to invalidate incore buffer if it is there
198 * Note we must not sleep here if B_BUSY - that is
199 * a lock inversion which causes deadlock.
200 */
201 blkno = ubc_offtoblk(vp, (off_t)(f_offset + offset));
202 s = splbio();
203 vp_pgoclean++;
204 if ((bp = incore(vp, blkno)) &&
205 ISSET(bp->b_flags, B_BUSY | B_NEEDCOMMIT)) {
206 splx(s);
207 ubc_upl_abort_range(vpupl, offset, PAGE_SIZE,
208 UPL_ABORT_FREE_ON_EMPTY);
209 result = error = PAGER_ERROR;
210 offset += PAGE_SIZE;
211 isize -= PAGE_SIZE;
212 pg_index++;
213 continue;
214 } else if (bp) {
215 bremfree(bp);
216 SET(bp->b_flags, B_BUSY | B_INVAL );
217 splx(s);
218 brelse(bp);
219 } else
220 splx(s);
221
222 ubc_upl_commit_range(vpupl, offset, PAGE_SIZE,
223 UPL_COMMIT_FREE_ON_EMPTY);
224 offset += PAGE_SIZE;
225 isize -= PAGE_SIZE;
226 pg_index++;
227
228 continue;
229 }
230 vp_pgodirty++;
231
232 num_of_pages = 1;
233 xsize = isize - PAGE_SIZE;
234
235 while (xsize) {
236 if ( !upl_valid_page(pl, pg_index + num_of_pages))
237 break;
238 if ( !upl_dirty_page(pl, pg_index + num_of_pages))
239 break;
240 num_of_pages++;
241 xsize -= PAGE_SIZE;
242 }
243 xsize = num_of_pages * PAGE_SIZE;
244
245 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_START,
246 xsize, 0, 0, 0, 0);
247
248 if (error = VOP_PAGEOUT(vp, vpupl, (vm_offset_t)offset,
249 (off_t)(f_offset + offset), xsize,
250 p->p_ucred, flags))
251 result = error = PAGER_ERROR;
252
253 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_END,
254 xsize, 0, 0, 0, 0);
255
256 offset += xsize;
257 isize -= xsize;
258 pg_index += num_of_pages;
259 }
260 out:
261 if (errorp)
262 *errorp = result;
263
264 thread_funnel_set(kernel_flock, funnel_state);
265
266 return (error);
267 }
268
269
270 pager_return_t
271 vnode_pagein(
272 struct vnode *vp,
273 upl_t upl,
274 vm_offset_t upl_offset,
275 vm_object_offset_t f_offset,
276 vm_size_t size,
277 int flags,
278 int *errorp)
279 {
280 struct proc *p = current_proc();
281 upl_page_info_t *pl;
282 int result = PAGER_SUCCESS;
283 int error = 0;
284 int xfer_size;
285 int pages_in_upl;
286 int start_pg;
287 int last_pg;
288 int first_pg;
289 int xsize;
290 int abort_needed = 1;
291 boolean_t funnel_state;
292
293
294 funnel_state = thread_funnel_set(kernel_flock, TRUE);
295
296 UBCINFOCHECK("vnode_pagein", vp);
297
298 if (UBCINVALID(vp)) {
299 result = PAGER_ERROR;
300 error = PAGER_ERROR;
301 if (upl && !(flags & UPL_NOCOMMIT)) {
302 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
303 }
304 goto out;
305 }
306 if (upl == (upl_t)NULL) {
307 if (size > (MAX_UPL_TRANSFER * PAGE_SIZE)) {
308 result = PAGER_ERROR;
309 error = PAGER_ERROR;
310 goto out;
311 }
312 ubc_create_upl(vp, f_offset, size, &upl, &pl, UPL_RET_ONLY_ABSENT);
313
314 if (upl == (upl_t)NULL) {
315 result = PAGER_ABSENT;
316 error = PAGER_ABSENT;
317 goto out;
318 }
319 upl_offset = 0;
320 /*
321 * if we get here, we've created the upl and
322 * are responsible for commiting/aborting it
323 * regardless of what the caller has passed in
324 */
325 flags &= ~UPL_NOCOMMIT;
326
327 vp_pagein++;
328 } else {
329 pl = ubc_upl_pageinfo(upl);
330
331 dp_pgins++;
332 }
333 pages_in_upl = size / PAGE_SIZE;
334 first_pg = upl_offset / PAGE_SIZE;
335
336 /*
337 * before we start marching forward, we must make sure we end on
338 * a present page, otherwise we will be working with a freed
339 * upl
340 */
341 for (last_pg = pages_in_upl - 1; last_pg >= first_pg; last_pg--) {
342 if (upl_page_present(pl, last_pg))
343 break;
344 }
345 pages_in_upl = last_pg + 1;
346
347 for (last_pg = first_pg; last_pg < pages_in_upl;) {
348 /*
349 * scan the upl looking for the next
350 * page that is present.... if all of the
351 * pages are absent, we're done
352 */
353 for (start_pg = last_pg; last_pg < pages_in_upl; last_pg++) {
354 if (upl_page_present(pl, last_pg))
355 break;
356 }
357 if (last_pg == pages_in_upl)
358 break;
359
360 /*
361 * if we get here, we've sitting on a page
362 * that is present... we want to skip over
363 * any range of 'valid' pages... if this takes
364 * us to the end of the request, than we're done
365 */
366 for (start_pg = last_pg; last_pg < pages_in_upl; last_pg++) {
367 if (!upl_valid_page(pl, last_pg) || !upl_page_present(pl, last_pg))
368 break;
369 }
370 if (last_pg > start_pg) {
371 /*
372 * we've found a range of valid pages
373 * if we've got COMMIT responsibility
374 * commit this range of pages back to the
375 * cache unchanged
376 */
377 xsize = (last_pg - start_pg) * PAGE_SIZE;
378
379 if (!(flags & UPL_NOCOMMIT))
380 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, xsize, UPL_ABORT_FREE_ON_EMPTY);
381
382 abort_needed = 0;
383 }
384 if (last_pg == pages_in_upl)
385 break;
386
387 if (!upl_page_present(pl, last_pg))
388 /*
389 * if we found a range of valid pages
390 * terminated by a non-present page
391 * than start over
392 */
393 continue;
394
395 /*
396 * scan from the found invalid page looking for a valid
397 * or non-present page before the end of the upl is reached, if we
398 * find one, then it will be the last page of the request to
399 * 'cluster_io'
400 */
401 for (start_pg = last_pg; last_pg < pages_in_upl; last_pg++) {
402 if (upl_valid_page(pl, last_pg) || !upl_page_present(pl, last_pg))
403 break;
404 }
405 if (last_pg > start_pg) {
406 int xoff;
407
408 xsize = (last_pg - start_pg) * PAGE_SIZE;
409 xoff = start_pg * PAGE_SIZE;
410
411 if (error = VOP_PAGEIN(vp, upl, (vm_offset_t) xoff,
412 (off_t)f_offset + xoff,
413 xsize, p->p_ucred,
414 flags)) {
415 result = PAGER_ERROR;
416 error = PAGER_ERROR;
417
418 }
419 abort_needed = 0;
420 }
421 }
422 if (!(flags & UPL_NOCOMMIT) && abort_needed)
423 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
424 out:
425 if (errorp)
426 *errorp = result;
427 thread_funnel_set(kernel_flock, funnel_state);
428
429 return (error);
430 }
431
432 void
433 vnode_pager_shutdown()
434 {
435 int i;
436 extern struct bs_map bs_port_table[];
437 struct vnode *vp;
438
439 for(i = 0; i < MAX_BACKING_STORE; i++) {
440 vp = (struct vnode *)(bs_port_table[i]).vp;
441 if (vp) {
442 (bs_port_table[i]).vp = 0;
443 ubc_rele(vp);
444 /* get rid of macx_swapon() namei() reference */
445 vrele(vp);
446
447 /* get rid of macx_swapon() "extra" reference */
448 vrele(vp);
449 }
450 }
451 }
452
453
454 void *
455 upl_get_internal_page_list(upl_t upl)
456 {
457 return(UPL_GET_INTERNAL_PAGE_LIST(upl));
458
459 }