]> git.saurik.com Git - apple/xnu.git/blob - bsd/vm/vnode_pager.c
xnu-201.tar.gz
[apple/xnu.git] / bsd / vm / vnode_pager.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * Mach Operating System
24 * Copyright (c) 1987 Carnegie-Mellon University
25 * All rights reserved. The CMU software License Agreement specifies
26 * the terms and conditions for use and redistribution.
27 */
28 /*
29 * File: vnode_pager.c
30 *
31 * "Swap" pager that pages to/from vnodes. Also
32 * handles demand paging from files.
33 *
34 */
35
36 #include <mach/boolean.h>
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/proc.h>
40 #include <sys/buf.h>
41 #include <sys/uio.h>
42 #include <sys/vnode.h>
43 #include <sys/namei.h>
44 #include <sys/mount.h>
45 #include <sys/ubc.h>
46 #include <sys/lock.h>
47
48 #include <mach/mach_types.h>
49 #include <mach/memory_object_types.h>
50
51 #include <vm/vm_map.h>
52 #include <vm/vm_kern.h>
53 #include <kern/parallel.h>
54 #include <kern/zalloc.h>
55 #include <kern/kalloc.h>
56 #include <libkern/libkern.h>
57
58 #include <vm/vnode_pager.h>
59 #include <vm/vm_pageout.h>
60
61 #include <kern/assert.h>
62
63 unsigned int vp_pagein=0;
64 unsigned int vp_pgodirty=0;
65 unsigned int vp_pgoclean=0;
66 unsigned int dp_pgouts=0; /* Default pager pageouts */
67 unsigned int dp_pgins=0; /* Default pager pageins */
68
69 vm_object_offset_t
70 vnode_pager_get_filesize(struct vnode *vp)
71 {
72 if (UBCINVALID(vp)) {
73 return (vm_object_offset_t) 0;
74 }
75
76 return (vm_object_offset_t) ubc_getsize(vp);
77
78 }
79
80 pager_return_t
81 vnode_pageout(struct vnode *vp,
82 upl_t upl,
83 vm_offset_t upl_offset,
84 vm_object_offset_t f_offset,
85 vm_size_t size,
86 int flags,
87 int *errorp)
88 {
89 int result = PAGER_SUCCESS;
90 struct proc *p = current_proc();
91 int error = 0;
92 int vp_size = 0;
93 int blkno=0, s;
94 int cnt, isize;
95 int pg_index;
96 int offset;
97 struct buf *bp;
98 boolean_t funnel_state;
99 int haveupl=0;
100 upl_page_info_t *pl;
101 upl_t vpupl = NULL;
102
103 funnel_state = thread_funnel_set(kernel_flock, TRUE);
104
105 if (upl != (upl_t)NULL) {
106 haveupl = 1;
107 }
108 isize = (int)size;
109
110 if (isize < 0)
111 panic("-ve count in vnode_pageout");
112 if (isize == 0)
113 panic("vnode_pageout: size == 0\n");
114
115 UBCINFOCHECK("vnode_pageout", vp);
116
117 if (UBCINVALID(vp)) {
118 result = PAGER_ERROR;
119 error = PAGER_ERROR;
120 goto out;
121 }
122 if (haveupl) {
123 /*
124 * This is a pageout form the Default pager,
125 * just go ahead and call VOP_PAGEOUT
126 */
127 dp_pgouts++;
128 if (error = VOP_PAGEOUT(vp, upl, upl_offset,
129 (off_t)f_offset,(size_t)size, p->p_ucred, flags)) {
130 result = PAGER_ERROR;
131 error = PAGER_ERROR;
132 }
133 goto out;
134 }
135 ubc_create_upl( vp,
136 f_offset,
137 isize,
138 &vpupl,
139 &pl,
140 UPL_COPYOUT_FROM);
141 if (vpupl == (upl_t) 0)
142 return PAGER_ABSENT;
143
144 vp_size = ubc_getsize(vp);
145 if (vp_size == 0) {
146
147 while (isize) {
148 blkno = ubc_offtoblk(vp, (off_t)f_offset);
149 start0:
150 if (bp = incore(vp, blkno)) {
151 if (ISSET(bp->b_flags, B_BUSY)) {
152 SET(bp->b_flags, B_WANTED);
153 error = tsleep(bp, (PRIBIO + 1), "vnpgout", 0);
154 goto start0;
155 } else {
156 bremfree(bp);
157 SET(bp->b_flags, (B_BUSY|B_INVAL));
158 }
159 }
160 if (bp)
161 brelse(bp);
162 f_offset += PAGE_SIZE;
163 isize -= PAGE_SIZE;
164 }
165 ubc_upl_commit_range(vpupl, 0, size, UPL_COMMIT_FREE_ON_EMPTY);
166
167 error = 0;
168 goto out;
169 }
170 pg_index = 0;
171 offset = 0;
172
173 while (isize) {
174 int xsize;
175 int num_of_pages;
176
177 if ( !upl_valid_page(pl, pg_index)) {
178 ubc_upl_abort_range(vpupl, offset, PAGE_SIZE,
179 UPL_ABORT_FREE_ON_EMPTY);
180
181 offset += PAGE_SIZE;
182 isize -= PAGE_SIZE;
183 pg_index++;
184
185 continue;
186 }
187 if ( !upl_dirty_page(pl, pg_index)) {
188 /*
189 * if the page is not dirty and reached here it is
190 * marked precious or it is due to invalidation in
191 * memory_object_lock request as part of truncation
192 * We also get here from vm_object_terminate()
193 * So all you need to do in these
194 * cases is to invalidate incore buffer if it is there
195 */
196 blkno = ubc_offtoblk(vp, (off_t)(f_offset + offset));
197 s = splbio();
198 vp_pgoclean++;
199 start:
200 if (bp = incore(vp, blkno)) {
201 if (ISSET(bp->b_flags, B_BUSY)) {
202 SET(bp->b_flags, B_WANTED);
203 error = tsleep(bp, (PRIBIO + 1), "vnpgout", 0);
204 goto start;
205 } else {
206 bremfree(bp);
207 SET(bp->b_flags, (B_BUSY|B_INVAL));
208 }
209 }
210 splx(s);
211 if (bp)
212 brelse(bp);
213
214 ubc_upl_commit_range(vpupl, offset, PAGE_SIZE,
215 UPL_COMMIT_FREE_ON_EMPTY);
216
217 offset += PAGE_SIZE;
218 isize -= PAGE_SIZE;
219 pg_index++;
220
221 continue;
222 }
223 vp_pgodirty++;
224
225 num_of_pages = 1;
226 xsize = isize - PAGE_SIZE;
227
228 while (xsize) {
229 if ( !upl_valid_page(pl, pg_index + num_of_pages))
230 break;
231 if ( !upl_dirty_page(pl, pg_index + num_of_pages))
232 break;
233 num_of_pages++;
234 xsize -= PAGE_SIZE;
235 }
236 xsize = num_of_pages * PAGE_SIZE;
237
238 /* By defn callee will commit or abort upls */
239 if (error = VOP_PAGEOUT(vp, vpupl, (vm_offset_t) offset,
240 (off_t)(f_offset + offset),
241 xsize, p->p_ucred, flags & ~UPL_NOCOMMIT)) {
242 result = PAGER_ERROR;
243 error = PAGER_ERROR;
244 }
245 offset += xsize;
246 isize -= xsize;
247 pg_index += num_of_pages;
248 }
249 out:
250 if (errorp)
251 *errorp = result;
252
253 thread_funnel_set(kernel_flock, funnel_state);
254
255 return (error);
256 }
257
258
259 pager_return_t
260 vnode_pagein(
261 struct vnode *vp,
262 upl_t pl,
263 vm_offset_t pl_offset,
264 vm_object_offset_t f_offset,
265 vm_size_t size,
266 int flags,
267 int *errorp)
268 {
269 int result = PAGER_SUCCESS;
270 struct proc *p = current_proc();
271 int error = 0;
272 int xfer_size;
273 boolean_t funnel_state;
274 int haveupl=0;
275 upl_t vpupl = NULL;
276 off_t local_offset;
277 unsigned int ioaddr;
278
279 funnel_state = thread_funnel_set(kernel_flock, TRUE);
280
281 #if 0
282 if(pl->page_list.npages >1 )
283 panic("vnode_pageout: Can't handle more than one page");
284 #endif /* 0 */
285
286 if (pl != (upl_t)NULL) {
287 haveupl = 1;
288 }
289 UBCINFOCHECK("vnode_pagein", vp);
290
291 if (UBCINVALID(vp)) {
292 result = PAGER_ERROR;
293 error = PAGER_ERROR;
294 goto out;
295 }
296
297 if (haveupl) {
298 dp_pgins++;
299 if (error = VOP_PAGEIN(vp, pl, pl_offset, (off_t)f_offset,
300 size,p->p_ucred, flags)) {
301 result = PAGER_ERROR;
302 }
303 } else {
304
305 local_offset = 0;
306 while (size) {
307 if((size > 4096) && (vp->v_tag == VT_NFS)) {
308 xfer_size = 4096;
309 size = size - xfer_size;
310 } else {
311 xfer_size = size;
312 size = 0;
313 }
314 ubc_create_upl( vp,
315 f_offset+local_offset,
316 xfer_size,
317 &vpupl,
318 NULL,
319 UPL_FLAGS_NONE);
320 if (vpupl == (upl_t) 0) {
321 result = PAGER_ABSENT;
322 error = PAGER_ABSENT;
323 goto out;
324 }
325
326 vp_pagein++;
327
328 /* By defn callee will commit or abort upls */
329 if (error = VOP_PAGEIN(vp, vpupl, (vm_offset_t) 0,
330 (off_t)f_offset+local_offset, xfer_size,p->p_ucred, flags & ~UPL_NOCOMMIT)) {
331 result = PAGER_ERROR;
332 error = PAGER_ERROR;
333 }
334 local_offset += PAGE_SIZE_64;
335 }
336 }
337 out:
338 if (errorp)
339 *errorp = result;
340 thread_funnel_set(kernel_flock, funnel_state);
341
342 return (error);
343 }
344
345 void
346 vnode_pager_shutdown()
347 {
348 int i;
349 extern struct bs_map bs_port_table[];
350 struct vnode *vp;
351
352 for(i = 0; i < MAX_BACKING_STORE; i++) {
353 vp = (struct vnode *)(bs_port_table[i]).vp;
354 if (vp) {
355 (bs_port_table[i]).vp = 0;
356 ubc_rele(vp);
357 /* get rid of macx_swapon() namei() reference */
358 vrele(vp);
359
360 /* get rid of macx_swapon() "extra" reference */
361 vrele(vp);
362 }
363 }
364 }
365
366
367 void *
368 upl_get_internal_page_list(upl_t upl)
369 {
370 return(UPL_GET_INTERNAL_PAGE_LIST(upl));
371
372 }