]> git.saurik.com Git - apple/xnu.git/blame - bsd/vm/vnode_pager.c
xnu-517.3.15.tar.gz
[apple/xnu.git] / bsd / vm / vnode_pager.c
CommitLineData
1c79356b 1/*
55e303ae 2 * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
43866e37 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
43866e37
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
43866e37
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1987 Carnegie-Mellon University
28 * All rights reserved. The CMU software License Agreement specifies
29 * the terms and conditions for use and redistribution.
30 */
31/*
32 * File: vnode_pager.c
33 *
34 * "Swap" pager that pages to/from vnodes. Also
35 * handles demand paging from files.
36 *
37 */
38
39#include <mach/boolean.h>
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/proc.h>
43#include <sys/buf.h>
44#include <sys/uio.h>
45#include <sys/vnode.h>
46#include <sys/namei.h>
47#include <sys/mount.h>
48#include <sys/ubc.h>
49#include <sys/lock.h>
50
51#include <mach/mach_types.h>
52#include <mach/memory_object_types.h>
53
54#include <vm/vm_map.h>
55#include <vm/vm_kern.h>
1c79356b
A
56#include <kern/zalloc.h>
57#include <kern/kalloc.h>
58#include <libkern/libkern.h>
59
60#include <vm/vnode_pager.h>
61#include <vm/vm_pageout.h>
62
63#include <kern/assert.h>
9bccf70c 64#include <sys/kdebug.h>
1c79356b
A
65
66unsigned int vp_pagein=0;
67unsigned int vp_pgodirty=0;
68unsigned int vp_pgoclean=0;
69unsigned int dp_pgouts=0; /* Default pager pageouts */
70unsigned int dp_pgins=0; /* Default pager pageins */
71
0b4e3aa0
A
72vm_object_offset_t
73vnode_pager_get_filesize(struct vnode *vp)
74{
75 if (UBCINVALID(vp)) {
76 return (vm_object_offset_t) 0;
77 }
78
79 return (vm_object_offset_t) ubc_getsize(vp);
80
81}
82
1c79356b
A
83pager_return_t
84vnode_pageout(struct vnode *vp,
85 upl_t upl,
86 vm_offset_t upl_offset,
87 vm_object_offset_t f_offset,
88 vm_size_t size,
89 int flags,
90 int *errorp)
91{
92 int result = PAGER_SUCCESS;
93 struct proc *p = current_proc();
94 int error = 0;
1c79356b
A
95 int blkno=0, s;
96 int cnt, isize;
97 int pg_index;
98 int offset;
99 struct buf *bp;
100 boolean_t funnel_state;
1c79356b 101 upl_page_info_t *pl;
0b4e3aa0 102 upl_t vpupl = NULL;
1c79356b
A
103
104 funnel_state = thread_funnel_set(kernel_flock, TRUE);
105
1c79356b
A
106 isize = (int)size;
107
9bccf70c
A
108 if (isize <= 0) {
109 result = error = PAGER_ERROR;
110 goto out;
111 }
1c79356b
A
112 UBCINFOCHECK("vnode_pageout", vp);
113
114 if (UBCINVALID(vp)) {
9bccf70c
A
115 result = error = PAGER_ERROR;
116
fa4905b1 117 if (upl && !(flags & UPL_NOCOMMIT))
9bccf70c 118 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
1c79356b
A
119 goto out;
120 }
fa4905b1 121 if (upl) {
1c79356b 122 /*
fa4905b1 123 * This is a pageout from the Default pager,
1c79356b
A
124 * just go ahead and call VOP_PAGEOUT
125 */
126 dp_pgouts++;
9bccf70c
A
127
128 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_START,
129 size, 1, 0, 0, 0);
130
fa4905b1
A
131 if (error = VOP_PAGEOUT(vp, upl, upl_offset, (off_t)f_offset,
132 (size_t)size, p->p_ucred, flags))
133 result = error = PAGER_ERROR;
9bccf70c
A
134
135 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_END,
136 size, 1, 0, 0, 0);
137
1c79356b
A
138 goto out;
139 }
55e303ae 140 ubc_create_upl(vp, f_offset, isize, &vpupl, &pl, UPL_FOR_PAGEOUT | UPL_COPYOUT_FROM | UPL_SET_LITE);
9bccf70c
A
141
142 if (vpupl == (upl_t) 0) {
143 result = error = PAGER_ABSENT;
144 goto out;
145 }
146 /*
147 * if we get here, we've created the upl and
148 * are responsible for commiting/aborting it
149 * regardless of what the caller has passed in
150 */
151 flags &= ~UPL_NOCOMMIT;
1c79356b 152
fa4905b1
A
153 if (ubc_getsize(vp) == 0) {
154 for (offset = 0; isize; isize -= PAGE_SIZE,
155 offset += PAGE_SIZE) {
1c79356b 156 blkno = ubc_offtoblk(vp, (off_t)f_offset);
1c79356b 157 f_offset += PAGE_SIZE;
fa4905b1
A
158 if ((bp = incore(vp, blkno)) &&
159 ISSET(bp->b_flags, B_BUSY)) {
160 ubc_upl_abort_range(vpupl, offset, PAGE_SIZE,
161 UPL_ABORT_FREE_ON_EMPTY);
162 result = error = PAGER_ERROR;
163 continue;
164 } else if (bp) {
165 bremfree(bp);
166 SET(bp->b_flags, B_BUSY | B_INVAL);
167 brelse(bp);
168 }
169 ubc_upl_commit_range(vpupl, offset, PAGE_SIZE,
170 UPL_COMMIT_FREE_ON_EMPTY);
1c79356b 171 }
1c79356b
A
172 goto out;
173 }
174 pg_index = 0;
175 offset = 0;
176
177 while (isize) {
178 int xsize;
179 int num_of_pages;
180
181 if ( !upl_valid_page(pl, pg_index)) {
0b4e3aa0 182 ubc_upl_abort_range(vpupl, offset, PAGE_SIZE,
fa4905b1 183 UPL_ABORT_FREE_ON_EMPTY);
1c79356b
A
184 offset += PAGE_SIZE;
185 isize -= PAGE_SIZE;
186 pg_index++;
187
188 continue;
189 }
190 if ( !upl_dirty_page(pl, pg_index)) {
191 /*
192 * if the page is not dirty and reached here it is
193 * marked precious or it is due to invalidation in
194 * memory_object_lock request as part of truncation
195 * We also get here from vm_object_terminate()
196 * So all you need to do in these
197 * cases is to invalidate incore buffer if it is there
fa4905b1
A
198 * Note we must not sleep here if B_BUSY - that is
199 * a lock inversion which causes deadlock.
1c79356b
A
200 */
201 blkno = ubc_offtoblk(vp, (off_t)(f_offset + offset));
202 s = splbio();
203 vp_pgoclean++;
55e303ae
A
204 if (vp->v_tag == VT_NFS) {
205 /* check with nfs if page is OK to drop */
206 error = nfs_buf_page_inval(vp, (off_t)(f_offset + offset));
207 splx(s);
208 if (error) {
209 ubc_upl_abort_range(vpupl, offset, PAGE_SIZE,
210 UPL_ABORT_FREE_ON_EMPTY);
211 result = error = PAGER_ERROR;
212 offset += PAGE_SIZE;
213 isize -= PAGE_SIZE;
214 pg_index++;
215 continue;
216 }
217 } else if ((bp = incore(vp, blkno)) &&
fa4905b1
A
218 ISSET(bp->b_flags, B_BUSY | B_NEEDCOMMIT)) {
219 splx(s);
220 ubc_upl_abort_range(vpupl, offset, PAGE_SIZE,
221 UPL_ABORT_FREE_ON_EMPTY);
222 result = error = PAGER_ERROR;
223 offset += PAGE_SIZE;
224 isize -= PAGE_SIZE;
225 pg_index++;
226 continue;
227 } else if (bp) {
228 bremfree(bp);
229 SET(bp->b_flags, B_BUSY | B_INVAL );
230 splx(s);
1c79356b 231 brelse(bp);
fa4905b1
A
232 } else
233 splx(s);
1c79356b 234
0b4e3aa0 235 ubc_upl_commit_range(vpupl, offset, PAGE_SIZE,
fa4905b1 236 UPL_COMMIT_FREE_ON_EMPTY);
1c79356b
A
237 offset += PAGE_SIZE;
238 isize -= PAGE_SIZE;
239 pg_index++;
240
241 continue;
242 }
243 vp_pgodirty++;
244
245 num_of_pages = 1;
246 xsize = isize - PAGE_SIZE;
247
248 while (xsize) {
249 if ( !upl_valid_page(pl, pg_index + num_of_pages))
250 break;
251 if ( !upl_dirty_page(pl, pg_index + num_of_pages))
252 break;
253 num_of_pages++;
254 xsize -= PAGE_SIZE;
255 }
256 xsize = num_of_pages * PAGE_SIZE;
257
9bccf70c
A
258 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_START,
259 xsize, 0, 0, 0, 0);
260
fa4905b1
A
261 if (error = VOP_PAGEOUT(vp, vpupl, (vm_offset_t)offset,
262 (off_t)(f_offset + offset), xsize,
9bccf70c 263 p->p_ucred, flags))
fa4905b1 264 result = error = PAGER_ERROR;
9bccf70c
A
265
266 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_END,
267 xsize, 0, 0, 0, 0);
268
1c79356b
A
269 offset += xsize;
270 isize -= xsize;
271 pg_index += num_of_pages;
272 }
273out:
274 if (errorp)
275 *errorp = result;
276
277 thread_funnel_set(kernel_flock, funnel_state);
278
279 return (error);
280}
281
282
283pager_return_t
284vnode_pagein(
285 struct vnode *vp,
9bccf70c
A
286 upl_t upl,
287 vm_offset_t upl_offset,
1c79356b
A
288 vm_object_offset_t f_offset,
289 vm_size_t size,
290 int flags,
291 int *errorp)
292{
9bccf70c
A
293 struct proc *p = current_proc();
294 upl_page_info_t *pl;
295 int result = PAGER_SUCCESS;
1c79356b 296 int error = 0;
0b4e3aa0 297 int xfer_size;
9bccf70c
A
298 int pages_in_upl;
299 int start_pg;
300 int last_pg;
301 int first_pg;
302 int xsize;
303 int abort_needed = 1;
1c79356b 304 boolean_t funnel_state;
9bccf70c 305
1c79356b
A
306
307 funnel_state = thread_funnel_set(kernel_flock, TRUE);
308
1c79356b
A
309 UBCINFOCHECK("vnode_pagein", vp);
310
311 if (UBCINVALID(vp)) {
312 result = PAGER_ERROR;
313 error = PAGER_ERROR;
9bccf70c
A
314 if (upl && !(flags & UPL_NOCOMMIT)) {
315 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
fa4905b1 316 }
1c79356b
A
317 goto out;
318 }
9bccf70c
A
319 if (upl == (upl_t)NULL) {
320 if (size > (MAX_UPL_TRANSFER * PAGE_SIZE)) {
321 result = PAGER_ERROR;
322 error = PAGER_ERROR;
323 goto out;
324 }
55e303ae 325 ubc_create_upl(vp, f_offset, size, &upl, &pl, UPL_RET_ONLY_ABSENT | UPL_SET_LITE);
1c79356b 326
9bccf70c
A
327 if (upl == (upl_t)NULL) {
328 result = PAGER_ABSENT;
329 error = PAGER_ABSENT;
330 goto out;
1c79356b 331 }
9bccf70c
A
332 upl_offset = 0;
333 /*
334 * if we get here, we've created the upl and
335 * are responsible for commiting/aborting it
336 * regardless of what the caller has passed in
337 */
338 flags &= ~UPL_NOCOMMIT;
339
340 vp_pagein++;
1c79356b 341 } else {
9bccf70c 342 pl = ubc_upl_pageinfo(upl);
1c79356b 343
9bccf70c
A
344 dp_pgins++;
345 }
346 pages_in_upl = size / PAGE_SIZE;
347 first_pg = upl_offset / PAGE_SIZE;
348
349 /*
350 * before we start marching forward, we must make sure we end on
351 * a present page, otherwise we will be working with a freed
352 * upl
353 */
354 for (last_pg = pages_in_upl - 1; last_pg >= first_pg; last_pg--) {
355 if (upl_page_present(pl, last_pg))
356 break;
357 }
358 pages_in_upl = last_pg + 1;
359
360 for (last_pg = first_pg; last_pg < pages_in_upl;) {
361 /*
362 * scan the upl looking for the next
363 * page that is present.... if all of the
364 * pages are absent, we're done
365 */
366 for (start_pg = last_pg; last_pg < pages_in_upl; last_pg++) {
367 if (upl_page_present(pl, last_pg))
368 break;
369 }
370 if (last_pg == pages_in_upl)
371 break;
372
373 /*
374 * if we get here, we've sitting on a page
375 * that is present... we want to skip over
376 * any range of 'valid' pages... if this takes
377 * us to the end of the request, than we're done
378 */
379 for (start_pg = last_pg; last_pg < pages_in_upl; last_pg++) {
380 if (!upl_valid_page(pl, last_pg) || !upl_page_present(pl, last_pg))
381 break;
382 }
383 if (last_pg > start_pg) {
384 /*
385 * we've found a range of valid pages
386 * if we've got COMMIT responsibility
387 * commit this range of pages back to the
388 * cache unchanged
389 */
390 xsize = (last_pg - start_pg) * PAGE_SIZE;
1c79356b 391
9bccf70c
A
392 if (!(flags & UPL_NOCOMMIT))
393 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, xsize, UPL_ABORT_FREE_ON_EMPTY);
1c79356b 394
9bccf70c
A
395 abort_needed = 0;
396 }
397 if (last_pg == pages_in_upl)
398 break;
399
400 if (!upl_page_present(pl, last_pg))
401 /*
402 * if we found a range of valid pages
403 * terminated by a non-present page
404 * than start over
405 */
406 continue;
407
408 /*
409 * scan from the found invalid page looking for a valid
410 * or non-present page before the end of the upl is reached, if we
411 * find one, then it will be the last page of the request to
412 * 'cluster_io'
413 */
414 for (start_pg = last_pg; last_pg < pages_in_upl; last_pg++) {
415 if (upl_valid_page(pl, last_pg) || !upl_page_present(pl, last_pg))
416 break;
417 }
418 if (last_pg > start_pg) {
419 int xoff;
420
421 xsize = (last_pg - start_pg) * PAGE_SIZE;
422 xoff = start_pg * PAGE_SIZE;
423
424 if (error = VOP_PAGEIN(vp, upl, (vm_offset_t) xoff,
425 (off_t)f_offset + xoff,
426 xsize, p->p_ucred,
427 flags)) {
0b4e3aa0
A
428 result = PAGER_ERROR;
429 error = PAGER_ERROR;
9bccf70c 430
0b4e3aa0 431 }
9bccf70c 432 abort_needed = 0;
1c79356b 433 }
9bccf70c
A
434 }
435 if (!(flags & UPL_NOCOMMIT) && abort_needed)
436 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
1c79356b
A
437out:
438 if (errorp)
fa4905b1 439 *errorp = result;
1c79356b
A
440 thread_funnel_set(kernel_flock, funnel_state);
441
442 return (error);
443}
444
445void
446vnode_pager_shutdown()
447{
448 int i;
449 extern struct bs_map bs_port_table[];
450 struct vnode *vp;
451
452 for(i = 0; i < MAX_BACKING_STORE; i++) {
453 vp = (struct vnode *)(bs_port_table[i]).vp;
454 if (vp) {
455 (bs_port_table[i]).vp = 0;
456 ubc_rele(vp);
457 /* get rid of macx_swapon() namei() reference */
458 vrele(vp);
459
460 /* get rid of macx_swapon() "extra" reference */
461 vrele(vp);
462 }
463 }
464}
465
0b4e3aa0
A
466
467void *
1c79356b
A
468upl_get_internal_page_list(upl_t upl)
469{
0b4e3aa0 470 return(UPL_GET_INTERNAL_PAGE_LIST(upl));
1c79356b
A
471
472}