]> git.saurik.com Git - apple/xnu.git/blob - bsd/vm/dp_backing_file.c
359d95fd10612d1b4ca5ac367a411c20ebb88c5d
[apple/xnu.git] / bsd / vm / dp_backing_file.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <mach/boolean.h>
24 #include <sys/param.h>
25 #include <sys/systm.h>
26 #include <sys/lock.h>
27 #include <sys/proc.h>
28 #include <sys/buf.h>
29 #include <sys/uio.h>
30 #include <sys/vnode.h>
31 #include <sys/namei.h>
32 #include <sys/ubc.h>
33
34 #include <mach/mach_types.h>
35 #include <vm/vm_map.h>
36 #include <vm/vm_kern.h>
37 #include <kern/host.h>
38 #include <kern/parallel.h>
39 #include <kern/zalloc.h>
40 #include <kern/kalloc.h>
41 #include <libkern/libkern.h>
42 #include <sys/malloc.h>
43
44 #include <vm/vnode_pager.h>
45
46 /*
47 * temporary support for delayed instantiation
48 * of default_pager
49 */
50 int default_pager_init_flag = 0;
51
52 struct bs_map bs_port_table[MAX_BACKING_STORE] = {
53 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
54 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
55 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
56 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
57 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
58 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
59 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
60 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
61 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
62 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}};
63
64 /* ###################################################### */
65
66
67 #include <kern/assert.h>
68
69 /*
70 * Routine: macx_swapon
71 * Function:
72 * Syscall interface to add a file to backing store
73 */
74 int
75 macx_swapon(
76 char *filename,
77 int flags,
78 long size,
79 long priority)
80 {
81 struct vnode *vp = 0;
82 struct nameidata nd, *ndp;
83 struct proc *p = current_proc();
84 pager_file_t pf;
85 register int error;
86 kern_return_t kr;
87 mach_port_t backing_store;
88 memory_object_default_t default_pager;
89 int i;
90 boolean_t funnel_state;
91
92 struct vattr vattr;
93
94 funnel_state = thread_funnel_set(kernel_flock, TRUE);
95 ndp = &nd;
96
97 if ((error = suser(p->p_ucred, &p->p_acflag)))
98 goto swapon_bailout;
99
100 unix_master();
101
102 if(default_pager_init_flag == 0) {
103 start_def_pager(NULL);
104 default_pager_init_flag = 1;
105 }
106
107 /*
108 * Get a vnode for the paging area.
109 */
110 NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
111 filename, p);
112
113 if ((error = namei(ndp)))
114 goto swapon_bailout;
115 vp = ndp->ni_vp;
116
117 if (vp->v_type != VREG) {
118 error = EINVAL;
119 VOP_UNLOCK(vp, 0, p);
120 goto swapon_bailout;
121 }
122 UBCINFOCHECK("macx_swapon", vp);
123
124 if (error = VOP_GETATTR(vp, &vattr, p->p_ucred, p)) {
125 VOP_UNLOCK(vp, 0, p);
126 goto swapon_bailout;
127 }
128
129 if (vattr.va_size < (u_quad_t)size) {
130 vattr_null(&vattr);
131 vattr.va_size = (u_quad_t)size;
132 error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
133 if (error) {
134 VOP_UNLOCK(vp, 0, p);
135 goto swapon_bailout;
136 }
137 }
138
139 /* add new backing store to list */
140 i = 0;
141 while(bs_port_table[i].vp != 0) {
142 if(i == MAX_BACKING_STORE)
143 break;
144 i++;
145 }
146 if(i == MAX_BACKING_STORE) {
147 error = ENOMEM;
148 VOP_UNLOCK(vp, 0, p);
149 goto swapon_bailout;
150 }
151
152 /* remember the vnode. This vnode has namei() reference */
153 bs_port_table[i].vp = vp;
154
155 /*
156 * Look to see if we are already paging to this file.
157 */
158 /* make certain the copy send of kernel call will work */
159 default_pager = MEMORY_OBJECT_DEFAULT_NULL;
160 kr = host_default_memory_manager(host_priv_self(), &default_pager, 0);
161 if(kr != KERN_SUCCESS) {
162 error = EAGAIN;
163 VOP_UNLOCK(vp, 0, p);
164 bs_port_table[i].vp = 0;
165 goto swapon_bailout;
166 }
167
168 kr = default_pager_backing_store_create(default_pager,
169 -1, /* default priority */
170 0, /* default cluster size */
171 &backing_store);
172 memory_object_default_deallocate(default_pager);
173
174 if(kr != KERN_SUCCESS) {
175 error = ENOMEM;
176 VOP_UNLOCK(vp, 0, p);
177 bs_port_table[i].vp = 0;
178 goto swapon_bailout;
179 }
180
181 /*
182 * NOTE: we are able to supply PAGE_SIZE here instead of
183 * an actual record size or block number because:
184 * a: we do not support offsets from the beginning of the
185 * file (allowing for non page size/record modulo offsets.
186 * b: because allow paging will be done modulo page size
187 */
188
189 VOP_UNLOCK(vp, 0, p);
190 kr = default_pager_add_file(backing_store, vp, PAGE_SIZE,
191 ((int)vattr.va_size)/PAGE_SIZE);
192 if(kr != KERN_SUCCESS) {
193 bs_port_table[i].vp = 0;
194 if(kr == KERN_INVALID_ARGUMENT)
195 error = EINVAL;
196 else
197 error = ENOMEM;
198 goto swapon_bailout;
199 }
200 bs_port_table[i].bs = (void *)backing_store;
201 error = 0;
202 if (!ubc_hold(vp))
203 panic("macx_swapon: hold");
204
205 /* Mark this vnode as being used for swapfile */
206 SET(vp->v_flag, VSWAP);
207
208 /*
209 * take an extra reference on the vnode to keep
210 * vnreclaim() away from this vnode.
211 */
212 VREF(vp);
213
214 /* Hold on to the namei reference to the paging file vnode */
215 vp = 0;
216
217 swapon_bailout:
218 if (vp) {
219 vrele(vp);
220 }
221 unix_release();
222 (void) thread_funnel_set(kernel_flock, FALSE);
223 return(error);
224 }
225
226 /*
227 * Routine: macx_swapoff
228 * Function:
229 * Syscall interface to remove a file from backing store
230 */
231 int
232 macx_swapoff(
233 char *filename,
234 int flags)
235 {
236 kern_return_t kr;
237 mach_port_t backing_store;
238
239 struct vnode *vp = 0;
240 struct nameidata nd, *ndp;
241 struct proc *p = current_proc();
242 int i;
243 int error;
244 boolean_t funnel_state;
245
246 funnel_state = thread_funnel_set(kernel_flock, TRUE);
247 backing_store = NULL;
248 ndp = &nd;
249
250 if ((error = suser(p->p_ucred, &p->p_acflag)))
251 goto swapoff_bailout;
252
253 unix_master();
254
255 /*
256 * Get the vnode for the paging area.
257 */
258 NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
259 filename, p);
260
261 if ((error = namei(ndp)))
262 goto swapoff_bailout;
263 vp = ndp->ni_vp;
264
265 if (vp->v_type != VREG) {
266 error = EINVAL;
267 VOP_UNLOCK(vp, 0, p);
268 goto swapoff_bailout;
269 }
270
271 for(i = 0; i < MAX_BACKING_STORE; i++) {
272 if(bs_port_table[i].vp == vp) {
273 backing_store;
274 break;
275 }
276 }
277 if (i == MAX_BACKING_STORE) {
278 error = EINVAL;
279 VOP_UNLOCK(vp, 0, p);
280 goto swapoff_bailout;
281 }
282 backing_store = (mach_port_t)bs_port_table[i].bs;
283
284 VOP_UNLOCK(vp, 0, p);
285 kr = default_pager_backing_store_delete(backing_store);
286 switch (kr) {
287 case KERN_SUCCESS:
288 error = 0;
289 bs_port_table[i].vp = 0;
290 ubc_rele(vp);
291 /* This vnode is no longer used for swapfile */
292 CLR(vp->v_flag, VSWAP);
293
294 /* get rid of macx_swapon() namei() reference */
295 vrele(vp);
296
297 /* get rid of macx_swapon() "extra" reference */
298 vrele(vp);
299 break;
300 case KERN_FAILURE:
301 error = EAGAIN;
302 break;
303 default:
304 error = EAGAIN;
305 break;
306 }
307
308 swapoff_bailout:
309 /* get rid of macx_swapoff() namei() reference */
310 if (vp)
311 vrele(vp);
312
313 unix_release();
314 (void) thread_funnel_set(kernel_flock, FALSE);
315 return(error);
316 }