]> git.saurik.com Git - apple/xnu.git/blame - bsd/vm/vnode_pager.c
xnu-201.42.3.tar.gz
[apple/xnu.git] / bsd / vm / vnode_pager.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * Mach Operating System
24 * Copyright (c) 1987 Carnegie-Mellon University
25 * All rights reserved. The CMU software License Agreement specifies
26 * the terms and conditions for use and redistribution.
27 */
28/*
29 * File: vnode_pager.c
30 *
31 * "Swap" pager that pages to/from vnodes. Also
32 * handles demand paging from files.
33 *
34 */
35
36#include <mach/boolean.h>
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/proc.h>
40#include <sys/buf.h>
41#include <sys/uio.h>
42#include <sys/vnode.h>
43#include <sys/namei.h>
44#include <sys/mount.h>
45#include <sys/ubc.h>
46#include <sys/lock.h>
47
48#include <mach/mach_types.h>
49#include <mach/memory_object_types.h>
50
51#include <vm/vm_map.h>
52#include <vm/vm_kern.h>
53#include <kern/parallel.h>
54#include <kern/zalloc.h>
55#include <kern/kalloc.h>
56#include <libkern/libkern.h>
57
58#include <vm/vnode_pager.h>
59#include <vm/vm_pageout.h>
60
61#include <kern/assert.h>
62
63unsigned int vp_pagein=0;
64unsigned int vp_pgodirty=0;
65unsigned int vp_pgoclean=0;
66unsigned int dp_pgouts=0; /* Default pager pageouts */
67unsigned int dp_pgins=0; /* Default pager pageins */
68
0b4e3aa0
A
69vm_object_offset_t
70vnode_pager_get_filesize(struct vnode *vp)
71{
72 if (UBCINVALID(vp)) {
73 return (vm_object_offset_t) 0;
74 }
75
76 return (vm_object_offset_t) ubc_getsize(vp);
77
78}
79
1c79356b
A
80pager_return_t
81vnode_pageout(struct vnode *vp,
82 upl_t upl,
83 vm_offset_t upl_offset,
84 vm_object_offset_t f_offset,
85 vm_size_t size,
86 int flags,
87 int *errorp)
88{
89 int result = PAGER_SUCCESS;
90 struct proc *p = current_proc();
91 int error = 0;
1c79356b
A
92 int blkno=0, s;
93 int cnt, isize;
94 int pg_index;
95 int offset;
96 struct buf *bp;
97 boolean_t funnel_state;
1c79356b 98 upl_page_info_t *pl;
0b4e3aa0 99 upl_t vpupl = NULL;
1c79356b
A
100
101 funnel_state = thread_funnel_set(kernel_flock, TRUE);
102
1c79356b
A
103 isize = (int)size;
104
105 if (isize < 0)
106 panic("-ve count in vnode_pageout");
107 if (isize == 0)
108 panic("vnode_pageout: size == 0\n");
109
110 UBCINFOCHECK("vnode_pageout", vp);
111
112 if (UBCINVALID(vp)) {
113 result = PAGER_ERROR;
114 error = PAGER_ERROR;
fa4905b1
A
115 if (upl && !(flags & UPL_NOCOMMIT))
116 ubc_upl_abort(upl, 0);
1c79356b
A
117 goto out;
118 }
fa4905b1 119 if (upl) {
1c79356b 120 /*
fa4905b1 121 * This is a pageout from the Default pager,
1c79356b
A
122 * just go ahead and call VOP_PAGEOUT
123 */
124 dp_pgouts++;
fa4905b1
A
125 if (error = VOP_PAGEOUT(vp, upl, upl_offset, (off_t)f_offset,
126 (size_t)size, p->p_ucred, flags))
127 result = error = PAGER_ERROR;
1c79356b
A
128 goto out;
129 }
fa4905b1 130 ubc_create_upl(vp, f_offset, isize, &vpupl, &pl, UPL_COPYOUT_FROM);
1c79356b 131 if (vpupl == (upl_t) 0)
0b4e3aa0 132 return PAGER_ABSENT;
1c79356b 133
fa4905b1
A
134 if (ubc_getsize(vp) == 0) {
135 for (offset = 0; isize; isize -= PAGE_SIZE,
136 offset += PAGE_SIZE) {
1c79356b 137 blkno = ubc_offtoblk(vp, (off_t)f_offset);
1c79356b 138 f_offset += PAGE_SIZE;
fa4905b1
A
139 if ((bp = incore(vp, blkno)) &&
140 ISSET(bp->b_flags, B_BUSY)) {
141 ubc_upl_abort_range(vpupl, offset, PAGE_SIZE,
142 UPL_ABORT_FREE_ON_EMPTY);
143 result = error = PAGER_ERROR;
144 continue;
145 } else if (bp) {
146 bremfree(bp);
147 SET(bp->b_flags, B_BUSY | B_INVAL);
148 brelse(bp);
149 }
150 ubc_upl_commit_range(vpupl, offset, PAGE_SIZE,
151 UPL_COMMIT_FREE_ON_EMPTY);
1c79356b 152 }
1c79356b
A
153 goto out;
154 }
155 pg_index = 0;
156 offset = 0;
157
158 while (isize) {
159 int xsize;
160 int num_of_pages;
161
162 if ( !upl_valid_page(pl, pg_index)) {
0b4e3aa0 163 ubc_upl_abort_range(vpupl, offset, PAGE_SIZE,
fa4905b1 164 UPL_ABORT_FREE_ON_EMPTY);
1c79356b
A
165 offset += PAGE_SIZE;
166 isize -= PAGE_SIZE;
167 pg_index++;
168
169 continue;
170 }
171 if ( !upl_dirty_page(pl, pg_index)) {
172 /*
173 * if the page is not dirty and reached here it is
174 * marked precious or it is due to invalidation in
175 * memory_object_lock request as part of truncation
176 * We also get here from vm_object_terminate()
177 * So all you need to do in these
178 * cases is to invalidate incore buffer if it is there
fa4905b1
A
179 * Note we must not sleep here if B_BUSY - that is
180 * a lock inversion which causes deadlock.
1c79356b
A
181 */
182 blkno = ubc_offtoblk(vp, (off_t)(f_offset + offset));
183 s = splbio();
184 vp_pgoclean++;
fa4905b1
A
185 if ((bp = incore(vp, blkno)) &&
186 ISSET(bp->b_flags, B_BUSY | B_NEEDCOMMIT)) {
187 splx(s);
188 ubc_upl_abort_range(vpupl, offset, PAGE_SIZE,
189 UPL_ABORT_FREE_ON_EMPTY);
190 result = error = PAGER_ERROR;
191 offset += PAGE_SIZE;
192 isize -= PAGE_SIZE;
193 pg_index++;
194 continue;
195 } else if (bp) {
196 bremfree(bp);
197 SET(bp->b_flags, B_BUSY | B_INVAL );
198 splx(s);
1c79356b 199 brelse(bp);
fa4905b1
A
200 } else
201 splx(s);
1c79356b 202
0b4e3aa0 203 ubc_upl_commit_range(vpupl, offset, PAGE_SIZE,
fa4905b1 204 UPL_COMMIT_FREE_ON_EMPTY);
1c79356b
A
205 offset += PAGE_SIZE;
206 isize -= PAGE_SIZE;
207 pg_index++;
208
209 continue;
210 }
211 vp_pgodirty++;
212
213 num_of_pages = 1;
214 xsize = isize - PAGE_SIZE;
215
216 while (xsize) {
217 if ( !upl_valid_page(pl, pg_index + num_of_pages))
218 break;
219 if ( !upl_dirty_page(pl, pg_index + num_of_pages))
220 break;
221 num_of_pages++;
222 xsize -= PAGE_SIZE;
223 }
224 xsize = num_of_pages * PAGE_SIZE;
225
226 /* By defn callee will commit or abort upls */
fa4905b1
A
227 if (error = VOP_PAGEOUT(vp, vpupl, (vm_offset_t)offset,
228 (off_t)(f_offset + offset), xsize,
229 p->p_ucred, flags & ~UPL_NOCOMMIT))
230 result = error = PAGER_ERROR;
1c79356b
A
231 offset += xsize;
232 isize -= xsize;
233 pg_index += num_of_pages;
234 }
235out:
236 if (errorp)
237 *errorp = result;
238
239 thread_funnel_set(kernel_flock, funnel_state);
240
241 return (error);
242}
243
244
245pager_return_t
246vnode_pagein(
247 struct vnode *vp,
248 upl_t pl,
249 vm_offset_t pl_offset,
250 vm_object_offset_t f_offset,
251 vm_size_t size,
252 int flags,
253 int *errorp)
254{
255 int result = PAGER_SUCCESS;
256 struct proc *p = current_proc();
257 int error = 0;
0b4e3aa0 258 int xfer_size;
1c79356b 259 boolean_t funnel_state;
0b4e3aa0
A
260 upl_t vpupl = NULL;
261 off_t local_offset;
1c79356b
A
262 unsigned int ioaddr;
263
264 funnel_state = thread_funnel_set(kernel_flock, TRUE);
265
1c79356b
A
266 UBCINFOCHECK("vnode_pagein", vp);
267
268 if (UBCINVALID(vp)) {
269 result = PAGER_ERROR;
270 error = PAGER_ERROR;
fa4905b1
A
271 if (pl && !(flags & UPL_NOCOMMIT)) {
272 ubc_upl_abort(pl, 0);
273 }
1c79356b
A
274 goto out;
275 }
276
fa4905b1 277 if (pl) {
1c79356b
A
278 dp_pgins++;
279 if (error = VOP_PAGEIN(vp, pl, pl_offset, (off_t)f_offset,
fa4905b1 280 size, p->p_ucred, flags)) {
1c79356b
A
281 result = PAGER_ERROR;
282 }
283 } else {
284
0b4e3aa0
A
285 local_offset = 0;
286 while (size) {
fa4905b1 287 if(size > 4096 && vp->v_tag == VT_NFS) {
0b4e3aa0
A
288 xfer_size = 4096;
289 size = size - xfer_size;
290 } else {
291 xfer_size = size;
292 size = 0;
293 }
fa4905b1
A
294 ubc_create_upl(vp, f_offset + local_offset, xfer_size,
295 &vpupl, NULL, UPL_FLAGS_NONE);
0b4e3aa0
A
296 if (vpupl == (upl_t) 0) {
297 result = PAGER_ABSENT;
298 error = PAGER_ABSENT;
299 goto out;
300 }
1c79356b 301
0b4e3aa0 302 vp_pagein++;
1c79356b 303
0b4e3aa0
A
304 /* By defn callee will commit or abort upls */
305 if (error = VOP_PAGEIN(vp, vpupl, (vm_offset_t) 0,
fa4905b1
A
306 (off_t)f_offset + local_offset,
307 xfer_size, p->p_ucred,
308 flags & ~UPL_NOCOMMIT)) {
0b4e3aa0
A
309 result = PAGER_ERROR;
310 error = PAGER_ERROR;
311 }
312 local_offset += PAGE_SIZE_64;
1c79356b
A
313 }
314 }
315out:
316 if (errorp)
fa4905b1 317 *errorp = result;
1c79356b
A
318 thread_funnel_set(kernel_flock, funnel_state);
319
320 return (error);
321}
322
323void
324vnode_pager_shutdown()
325{
326 int i;
327 extern struct bs_map bs_port_table[];
328 struct vnode *vp;
329
330 for(i = 0; i < MAX_BACKING_STORE; i++) {
331 vp = (struct vnode *)(bs_port_table[i]).vp;
332 if (vp) {
333 (bs_port_table[i]).vp = 0;
334 ubc_rele(vp);
335 /* get rid of macx_swapon() namei() reference */
336 vrele(vp);
337
338 /* get rid of macx_swapon() "extra" reference */
339 vrele(vp);
340 }
341 }
342}
343
0b4e3aa0
A
344
345void *
1c79356b
A
346upl_get_internal_page_list(upl_t upl)
347{
0b4e3aa0 348 return(UPL_GET_INTERNAL_PAGE_LIST(upl));
1c79356b
A
349
350}