]> git.saurik.com Git - apple/xnu.git/blob - bsd/vm/dp_backing_file.c
xnu-517.3.15.tar.gz
[apple/xnu.git] / bsd / vm / dp_backing_file.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25
26 #include <mach/boolean.h>
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/lock.h>
30 #include <sys/proc.h>
31 #include <sys/buf.h>
32 #include <sys/uio.h>
33 #include <sys/vnode.h>
34 #include <sys/namei.h>
35 #include <sys/ubc.h>
36
37 #include <mach/mach_types.h>
38 #include <vm/vm_map.h>
39 #include <vm/vm_kern.h>
40 #include <kern/host.h>
41 #include <kern/zalloc.h>
42 #include <kern/kalloc.h>
43 #include <libkern/libkern.h>
44 #include <sys/malloc.h>
45
46 #include <vm/vnode_pager.h>
47
48 /*
49 * temporary support for delayed instantiation
50 * of default_pager
51 */
52 int default_pager_init_flag = 0;
53
54 struct bs_map bs_port_table[MAX_BACKING_STORE] = {
55 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
56 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
57 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
58 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
59 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
60 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
61 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
62 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
63 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
64 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}};
65
66 /* ###################################################### */
67
68
69 #include <kern/assert.h>
70
71 /*
72 * Routine: macx_backing_store_recovery
73 * Function:
74 * Syscall interface to set a tasks privilege
75 * level so that it is not subject to
76 * macx_backing_store_suspend
77 */
78 int
79 macx_backing_store_recovery(
80 int pid)
81 {
82 int error;
83 struct proc *p = current_proc();
84 boolean_t funnel_state;
85
86 funnel_state = thread_funnel_set(kernel_flock, TRUE);
87 if ((error = suser(p->p_ucred, &p->p_acflag)))
88 goto backing_store_recovery_return;
89
90 /* for now restrict backing_store_recovery */
91 /* usage to only present task */
92 if(pid != p->p_pid) {
93 error = EINVAL;
94 goto backing_store_recovery_return;
95 }
96
97 task_backing_store_privileged(p->task);
98
99 backing_store_recovery_return:
100 (void) thread_funnel_set(kernel_flock, FALSE);
101 return(error);
102 }
103
104 /*
105 * Routine: macx_backing_store_suspend
106 * Function:
107 * Syscall interface to stop new demand for
108 * backing store when backing store is low
109 */
110
111 int
112 macx_backing_store_suspend(
113 boolean_t suspend)
114 {
115 int error;
116 struct proc *p = current_proc();
117 boolean_t funnel_state;
118
119 funnel_state = thread_funnel_set(kernel_flock, TRUE);
120 if ((error = suser(p->p_ucred, &p->p_acflag)))
121 goto backing_store_suspend_return;
122
123 vm_backing_store_disable(suspend);
124
125 backing_store_suspend_return:
126 (void) thread_funnel_set(kernel_flock, FALSE);
127 return(error);
128 }
129
130 /*
131 * Routine: macx_swapon
132 * Function:
133 * Syscall interface to add a file to backing store
134 */
135 int
136 macx_swapon(
137 char *filename,
138 int flags,
139 long size,
140 long priority)
141 {
142 struct vnode *vp = 0;
143 struct nameidata nd, *ndp;
144 struct proc *p = current_proc();
145 pager_file_t pf;
146 register int error;
147 kern_return_t kr;
148 mach_port_t backing_store;
149 memory_object_default_t default_pager;
150 int i;
151 boolean_t funnel_state;
152
153 struct vattr vattr;
154
155 funnel_state = thread_funnel_set(kernel_flock, TRUE);
156 ndp = &nd;
157
158 if ((error = suser(p->p_ucred, &p->p_acflag)))
159 goto swapon_bailout;
160
161 if(default_pager_init_flag == 0) {
162 start_def_pager(NULL);
163 default_pager_init_flag = 1;
164 }
165
166 /*
167 * Get a vnode for the paging area.
168 */
169 NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
170 filename, p);
171
172 if ((error = namei(ndp)))
173 goto swapon_bailout;
174 vp = ndp->ni_vp;
175
176 if (vp->v_type != VREG) {
177 error = EINVAL;
178 VOP_UNLOCK(vp, 0, p);
179 goto swapon_bailout;
180 }
181 UBCINFOCHECK("macx_swapon", vp);
182
183 if (error = VOP_GETATTR(vp, &vattr, p->p_ucred, p)) {
184 VOP_UNLOCK(vp, 0, p);
185 goto swapon_bailout;
186 }
187
188 if (vattr.va_size < (u_quad_t)size) {
189 vattr_null(&vattr);
190 vattr.va_size = (u_quad_t)size;
191 error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
192 if (error) {
193 VOP_UNLOCK(vp, 0, p);
194 goto swapon_bailout;
195 }
196 }
197
198 /* add new backing store to list */
199 i = 0;
200 while(bs_port_table[i].vp != 0) {
201 if(i == MAX_BACKING_STORE)
202 break;
203 i++;
204 }
205 if(i == MAX_BACKING_STORE) {
206 error = ENOMEM;
207 VOP_UNLOCK(vp, 0, p);
208 goto swapon_bailout;
209 }
210
211 /* remember the vnode. This vnode has namei() reference */
212 bs_port_table[i].vp = vp;
213
214 /*
215 * Look to see if we are already paging to this file.
216 */
217 /* make certain the copy send of kernel call will work */
218 default_pager = MEMORY_OBJECT_DEFAULT_NULL;
219 kr = host_default_memory_manager(host_priv_self(), &default_pager, 0);
220 if(kr != KERN_SUCCESS) {
221 error = EAGAIN;
222 VOP_UNLOCK(vp, 0, p);
223 bs_port_table[i].vp = 0;
224 goto swapon_bailout;
225 }
226
227 kr = default_pager_backing_store_create(default_pager,
228 -1, /* default priority */
229 0, /* default cluster size */
230 &backing_store);
231 memory_object_default_deallocate(default_pager);
232
233 if(kr != KERN_SUCCESS) {
234 error = ENOMEM;
235 VOP_UNLOCK(vp, 0, p);
236 bs_port_table[i].vp = 0;
237 goto swapon_bailout;
238 }
239
240 /*
241 * NOTE: we are able to supply PAGE_SIZE here instead of
242 * an actual record size or block number because:
243 * a: we do not support offsets from the beginning of the
244 * file (allowing for non page size/record modulo offsets.
245 * b: because allow paging will be done modulo page size
246 */
247
248 VOP_UNLOCK(vp, 0, p);
249 kr = default_pager_add_file(backing_store, vp, PAGE_SIZE,
250 ((int)vattr.va_size)/PAGE_SIZE);
251 if(kr != KERN_SUCCESS) {
252 bs_port_table[i].vp = 0;
253 if(kr == KERN_INVALID_ARGUMENT)
254 error = EINVAL;
255 else
256 error = ENOMEM;
257 goto swapon_bailout;
258 }
259 bs_port_table[i].bs = (void *)backing_store;
260 error = 0;
261 if (!ubc_hold(vp))
262 panic("macx_swapon: hold");
263
264 /* Mark this vnode as being used for swapfile */
265 SET(vp->v_flag, VSWAP);
266
267 ubc_setcred(vp, p);
268
269 /*
270 * take an extra reference on the vnode to keep
271 * vnreclaim() away from this vnode.
272 */
273 VREF(vp);
274
275 /* Hold on to the namei reference to the paging file vnode */
276 vp = 0;
277
278 swapon_bailout:
279 if (vp) {
280 vrele(vp);
281 }
282 (void) thread_funnel_set(kernel_flock, FALSE);
283 return(error);
284 }
285
286 /*
287 * Routine: macx_swapoff
288 * Function:
289 * Syscall interface to remove a file from backing store
290 */
291 int
292 macx_swapoff(
293 char *filename,
294 int flags)
295 {
296 kern_return_t kr;
297 mach_port_t backing_store;
298
299 struct vnode *vp = 0;
300 struct nameidata nd, *ndp;
301 struct proc *p = current_proc();
302 int i;
303 int error;
304 boolean_t funnel_state;
305
306 funnel_state = thread_funnel_set(kernel_flock, TRUE);
307 backing_store = NULL;
308 ndp = &nd;
309
310 if ((error = suser(p->p_ucred, &p->p_acflag)))
311 goto swapoff_bailout;
312
313 /*
314 * Get the vnode for the paging area.
315 */
316 NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
317 filename, p);
318
319 if ((error = namei(ndp)))
320 goto swapoff_bailout;
321 vp = ndp->ni_vp;
322
323 if (vp->v_type != VREG) {
324 error = EINVAL;
325 VOP_UNLOCK(vp, 0, p);
326 goto swapoff_bailout;
327 }
328
329 for(i = 0; i < MAX_BACKING_STORE; i++) {
330 if(bs_port_table[i].vp == vp) {
331 backing_store;
332 break;
333 }
334 }
335 if (i == MAX_BACKING_STORE) {
336 error = EINVAL;
337 VOP_UNLOCK(vp, 0, p);
338 goto swapoff_bailout;
339 }
340 backing_store = (mach_port_t)bs_port_table[i].bs;
341
342 VOP_UNLOCK(vp, 0, p);
343 kr = default_pager_backing_store_delete(backing_store);
344 switch (kr) {
345 case KERN_SUCCESS:
346 error = 0;
347 bs_port_table[i].vp = 0;
348 ubc_rele(vp);
349 /* This vnode is no longer used for swapfile */
350 CLR(vp->v_flag, VSWAP);
351
352 /* get rid of macx_swapon() namei() reference */
353 vrele(vp);
354
355 /* get rid of macx_swapon() "extra" reference */
356 vrele(vp);
357 break;
358 case KERN_FAILURE:
359 error = EAGAIN;
360 break;
361 default:
362 error = EAGAIN;
363 break;
364 }
365
366 swapoff_bailout:
367 /* get rid of macx_swapoff() namei() reference */
368 if (vp)
369 vrele(vp);
370
371 (void) thread_funnel_set(kernel_flock, FALSE);
372 return(error);
373 }