]> git.saurik.com Git - apple/xnu.git/blame - bsd/vm/vnode_pager.c
xnu-344.tar.gz
[apple/xnu.git] / bsd / vm / vnode_pager.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * Mach Operating System
24 * Copyright (c) 1987 Carnegie-Mellon University
25 * All rights reserved. The CMU software License Agreement specifies
26 * the terms and conditions for use and redistribution.
27 */
28/*
29 * File: vnode_pager.c
30 *
31 * "Swap" pager that pages to/from vnodes. Also
32 * handles demand paging from files.
33 *
34 */
35
36#include <mach/boolean.h>
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/proc.h>
40#include <sys/buf.h>
41#include <sys/uio.h>
42#include <sys/vnode.h>
43#include <sys/namei.h>
44#include <sys/mount.h>
45#include <sys/ubc.h>
46#include <sys/lock.h>
47
48#include <mach/mach_types.h>
49#include <mach/memory_object_types.h>
50
51#include <vm/vm_map.h>
52#include <vm/vm_kern.h>
1c79356b
A
53#include <kern/zalloc.h>
54#include <kern/kalloc.h>
55#include <libkern/libkern.h>
56
57#include <vm/vnode_pager.h>
58#include <vm/vm_pageout.h>
59
60#include <kern/assert.h>
9bccf70c 61#include <sys/kdebug.h>
1c79356b
A
62
63unsigned int vp_pagein=0;
64unsigned int vp_pgodirty=0;
65unsigned int vp_pgoclean=0;
66unsigned int dp_pgouts=0; /* Default pager pageouts */
67unsigned int dp_pgins=0; /* Default pager pageins */
68
0b4e3aa0
A
69vm_object_offset_t
70vnode_pager_get_filesize(struct vnode *vp)
71{
72 if (UBCINVALID(vp)) {
73 return (vm_object_offset_t) 0;
74 }
75
76 return (vm_object_offset_t) ubc_getsize(vp);
77
78}
79
1c79356b
A
80pager_return_t
81vnode_pageout(struct vnode *vp,
82 upl_t upl,
83 vm_offset_t upl_offset,
84 vm_object_offset_t f_offset,
85 vm_size_t size,
86 int flags,
87 int *errorp)
88{
89 int result = PAGER_SUCCESS;
90 struct proc *p = current_proc();
91 int error = 0;
1c79356b
A
92 int blkno=0, s;
93 int cnt, isize;
94 int pg_index;
95 int offset;
96 struct buf *bp;
97 boolean_t funnel_state;
1c79356b 98 upl_page_info_t *pl;
0b4e3aa0 99 upl_t vpupl = NULL;
1c79356b
A
100
101 funnel_state = thread_funnel_set(kernel_flock, TRUE);
102
1c79356b
A
103 isize = (int)size;
104
9bccf70c
A
105 if (isize <= 0) {
106 result = error = PAGER_ERROR;
107 goto out;
108 }
1c79356b
A
109 UBCINFOCHECK("vnode_pageout", vp);
110
111 if (UBCINVALID(vp)) {
9bccf70c
A
112 result = error = PAGER_ERROR;
113
fa4905b1 114 if (upl && !(flags & UPL_NOCOMMIT))
9bccf70c 115 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
1c79356b
A
116 goto out;
117 }
fa4905b1 118 if (upl) {
1c79356b 119 /*
fa4905b1 120 * This is a pageout from the Default pager,
1c79356b
A
121 * just go ahead and call VOP_PAGEOUT
122 */
123 dp_pgouts++;
9bccf70c
A
124
125 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_START,
126 size, 1, 0, 0, 0);
127
fa4905b1
A
128 if (error = VOP_PAGEOUT(vp, upl, upl_offset, (off_t)f_offset,
129 (size_t)size, p->p_ucred, flags))
130 result = error = PAGER_ERROR;
9bccf70c
A
131
132 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_END,
133 size, 1, 0, 0, 0);
134
1c79356b
A
135 goto out;
136 }
fa4905b1 137 ubc_create_upl(vp, f_offset, isize, &vpupl, &pl, UPL_COPYOUT_FROM);
9bccf70c
A
138
139 if (vpupl == (upl_t) 0) {
140 result = error = PAGER_ABSENT;
141 goto out;
142 }
143 /*
144 * if we get here, we've created the upl and
145 * are responsible for commiting/aborting it
146 * regardless of what the caller has passed in
147 */
148 flags &= ~UPL_NOCOMMIT;
1c79356b 149
fa4905b1
A
150 if (ubc_getsize(vp) == 0) {
151 for (offset = 0; isize; isize -= PAGE_SIZE,
152 offset += PAGE_SIZE) {
1c79356b 153 blkno = ubc_offtoblk(vp, (off_t)f_offset);
1c79356b 154 f_offset += PAGE_SIZE;
fa4905b1
A
155 if ((bp = incore(vp, blkno)) &&
156 ISSET(bp->b_flags, B_BUSY)) {
157 ubc_upl_abort_range(vpupl, offset, PAGE_SIZE,
158 UPL_ABORT_FREE_ON_EMPTY);
159 result = error = PAGER_ERROR;
160 continue;
161 } else if (bp) {
162 bremfree(bp);
163 SET(bp->b_flags, B_BUSY | B_INVAL);
164 brelse(bp);
165 }
166 ubc_upl_commit_range(vpupl, offset, PAGE_SIZE,
167 UPL_COMMIT_FREE_ON_EMPTY);
1c79356b 168 }
1c79356b
A
169 goto out;
170 }
171 pg_index = 0;
172 offset = 0;
173
174 while (isize) {
175 int xsize;
176 int num_of_pages;
177
178 if ( !upl_valid_page(pl, pg_index)) {
0b4e3aa0 179 ubc_upl_abort_range(vpupl, offset, PAGE_SIZE,
fa4905b1 180 UPL_ABORT_FREE_ON_EMPTY);
1c79356b
A
181 offset += PAGE_SIZE;
182 isize -= PAGE_SIZE;
183 pg_index++;
184
185 continue;
186 }
187 if ( !upl_dirty_page(pl, pg_index)) {
188 /*
189 * if the page is not dirty and reached here it is
190 * marked precious or it is due to invalidation in
191 * memory_object_lock request as part of truncation
192 * We also get here from vm_object_terminate()
193 * So all you need to do in these
194 * cases is to invalidate incore buffer if it is there
fa4905b1
A
195 * Note we must not sleep here if B_BUSY - that is
196 * a lock inversion which causes deadlock.
1c79356b
A
197 */
198 blkno = ubc_offtoblk(vp, (off_t)(f_offset + offset));
199 s = splbio();
200 vp_pgoclean++;
fa4905b1
A
201 if ((bp = incore(vp, blkno)) &&
202 ISSET(bp->b_flags, B_BUSY | B_NEEDCOMMIT)) {
203 splx(s);
204 ubc_upl_abort_range(vpupl, offset, PAGE_SIZE,
205 UPL_ABORT_FREE_ON_EMPTY);
206 result = error = PAGER_ERROR;
207 offset += PAGE_SIZE;
208 isize -= PAGE_SIZE;
209 pg_index++;
210 continue;
211 } else if (bp) {
212 bremfree(bp);
213 SET(bp->b_flags, B_BUSY | B_INVAL );
214 splx(s);
1c79356b 215 brelse(bp);
fa4905b1
A
216 } else
217 splx(s);
1c79356b 218
0b4e3aa0 219 ubc_upl_commit_range(vpupl, offset, PAGE_SIZE,
fa4905b1 220 UPL_COMMIT_FREE_ON_EMPTY);
1c79356b
A
221 offset += PAGE_SIZE;
222 isize -= PAGE_SIZE;
223 pg_index++;
224
225 continue;
226 }
227 vp_pgodirty++;
228
229 num_of_pages = 1;
230 xsize = isize - PAGE_SIZE;
231
232 while (xsize) {
233 if ( !upl_valid_page(pl, pg_index + num_of_pages))
234 break;
235 if ( !upl_dirty_page(pl, pg_index + num_of_pages))
236 break;
237 num_of_pages++;
238 xsize -= PAGE_SIZE;
239 }
240 xsize = num_of_pages * PAGE_SIZE;
241
9bccf70c
A
242 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_START,
243 xsize, 0, 0, 0, 0);
244
fa4905b1
A
245 if (error = VOP_PAGEOUT(vp, vpupl, (vm_offset_t)offset,
246 (off_t)(f_offset + offset), xsize,
9bccf70c 247 p->p_ucred, flags))
fa4905b1 248 result = error = PAGER_ERROR;
9bccf70c
A
249
250 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_END,
251 xsize, 0, 0, 0, 0);
252
1c79356b
A
253 offset += xsize;
254 isize -= xsize;
255 pg_index += num_of_pages;
256 }
257out:
258 if (errorp)
259 *errorp = result;
260
261 thread_funnel_set(kernel_flock, funnel_state);
262
263 return (error);
264}
265
266
267pager_return_t
268vnode_pagein(
269 struct vnode *vp,
9bccf70c
A
270 upl_t upl,
271 vm_offset_t upl_offset,
1c79356b
A
272 vm_object_offset_t f_offset,
273 vm_size_t size,
274 int flags,
275 int *errorp)
276{
9bccf70c
A
277 struct proc *p = current_proc();
278 upl_page_info_t *pl;
279 int result = PAGER_SUCCESS;
1c79356b 280 int error = 0;
0b4e3aa0 281 int xfer_size;
9bccf70c
A
282 int pages_in_upl;
283 int start_pg;
284 int last_pg;
285 int first_pg;
286 int xsize;
287 int abort_needed = 1;
1c79356b 288 boolean_t funnel_state;
9bccf70c 289
1c79356b
A
290
291 funnel_state = thread_funnel_set(kernel_flock, TRUE);
292
1c79356b
A
293 UBCINFOCHECK("vnode_pagein", vp);
294
295 if (UBCINVALID(vp)) {
296 result = PAGER_ERROR;
297 error = PAGER_ERROR;
9bccf70c
A
298 if (upl && !(flags & UPL_NOCOMMIT)) {
299 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
fa4905b1 300 }
1c79356b
A
301 goto out;
302 }
9bccf70c
A
303 if (upl == (upl_t)NULL) {
304 if (size > (MAX_UPL_TRANSFER * PAGE_SIZE)) {
305 result = PAGER_ERROR;
306 error = PAGER_ERROR;
307 goto out;
308 }
309 ubc_create_upl(vp, f_offset, size, &upl, &pl, UPL_RET_ONLY_ABSENT);
1c79356b 310
9bccf70c
A
311 if (upl == (upl_t)NULL) {
312 result = PAGER_ABSENT;
313 error = PAGER_ABSENT;
314 goto out;
1c79356b 315 }
9bccf70c
A
316 upl_offset = 0;
317 /*
318 * if we get here, we've created the upl and
319 * are responsible for commiting/aborting it
320 * regardless of what the caller has passed in
321 */
322 flags &= ~UPL_NOCOMMIT;
323
324 vp_pagein++;
1c79356b 325 } else {
9bccf70c 326 pl = ubc_upl_pageinfo(upl);
1c79356b 327
9bccf70c
A
328 dp_pgins++;
329 }
330 pages_in_upl = size / PAGE_SIZE;
331 first_pg = upl_offset / PAGE_SIZE;
332
333 /*
334 * before we start marching forward, we must make sure we end on
335 * a present page, otherwise we will be working with a freed
336 * upl
337 */
338 for (last_pg = pages_in_upl - 1; last_pg >= first_pg; last_pg--) {
339 if (upl_page_present(pl, last_pg))
340 break;
341 }
342 pages_in_upl = last_pg + 1;
343
344 for (last_pg = first_pg; last_pg < pages_in_upl;) {
345 /*
346 * scan the upl looking for the next
347 * page that is present.... if all of the
348 * pages are absent, we're done
349 */
350 for (start_pg = last_pg; last_pg < pages_in_upl; last_pg++) {
351 if (upl_page_present(pl, last_pg))
352 break;
353 }
354 if (last_pg == pages_in_upl)
355 break;
356
357 /*
358 * if we get here, we've sitting on a page
359 * that is present... we want to skip over
360 * any range of 'valid' pages... if this takes
361 * us to the end of the request, than we're done
362 */
363 for (start_pg = last_pg; last_pg < pages_in_upl; last_pg++) {
364 if (!upl_valid_page(pl, last_pg) || !upl_page_present(pl, last_pg))
365 break;
366 }
367 if (last_pg > start_pg) {
368 /*
369 * we've found a range of valid pages
370 * if we've got COMMIT responsibility
371 * commit this range of pages back to the
372 * cache unchanged
373 */
374 xsize = (last_pg - start_pg) * PAGE_SIZE;
1c79356b 375
9bccf70c
A
376 if (!(flags & UPL_NOCOMMIT))
377 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, xsize, UPL_ABORT_FREE_ON_EMPTY);
1c79356b 378
9bccf70c
A
379 abort_needed = 0;
380 }
381 if (last_pg == pages_in_upl)
382 break;
383
384 if (!upl_page_present(pl, last_pg))
385 /*
386 * if we found a range of valid pages
387 * terminated by a non-present page
388 * than start over
389 */
390 continue;
391
392 /*
393 * scan from the found invalid page looking for a valid
394 * or non-present page before the end of the upl is reached, if we
395 * find one, then it will be the last page of the request to
396 * 'cluster_io'
397 */
398 for (start_pg = last_pg; last_pg < pages_in_upl; last_pg++) {
399 if (upl_valid_page(pl, last_pg) || !upl_page_present(pl, last_pg))
400 break;
401 }
402 if (last_pg > start_pg) {
403 int xoff;
404
405 xsize = (last_pg - start_pg) * PAGE_SIZE;
406 xoff = start_pg * PAGE_SIZE;
407
408 if (error = VOP_PAGEIN(vp, upl, (vm_offset_t) xoff,
409 (off_t)f_offset + xoff,
410 xsize, p->p_ucred,
411 flags)) {
0b4e3aa0
A
412 result = PAGER_ERROR;
413 error = PAGER_ERROR;
9bccf70c 414
0b4e3aa0 415 }
9bccf70c 416 abort_needed = 0;
1c79356b 417 }
9bccf70c
A
418 }
419 if (!(flags & UPL_NOCOMMIT) && abort_needed)
420 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
1c79356b
A
421out:
422 if (errorp)
fa4905b1 423 *errorp = result;
1c79356b
A
424 thread_funnel_set(kernel_flock, funnel_state);
425
426 return (error);
427}
428
429void
430vnode_pager_shutdown()
431{
432 int i;
433 extern struct bs_map bs_port_table[];
434 struct vnode *vp;
435
436 for(i = 0; i < MAX_BACKING_STORE; i++) {
437 vp = (struct vnode *)(bs_port_table[i]).vp;
438 if (vp) {
439 (bs_port_table[i]).vp = 0;
440 ubc_rele(vp);
441 /* get rid of macx_swapon() namei() reference */
442 vrele(vp);
443
444 /* get rid of macx_swapon() "extra" reference */
445 vrele(vp);
446 }
447 }
448}
449
0b4e3aa0
A
450
451void *
1c79356b
A
452upl_get_internal_page_list(upl_t upl)
453{
0b4e3aa0 454 return(UPL_GET_INTERNAL_PAGE_LIST(upl));
1c79356b
A
455
456}