]> git.saurik.com Git - apple/xnu.git/blame - bsd/vm/dp_backing_file.c
xnu-344.tar.gz
[apple/xnu.git] / bsd / vm / dp_backing_file.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23#include <mach/boolean.h>
24#include <sys/param.h>
25#include <sys/systm.h>
26#include <sys/lock.h>
27#include <sys/proc.h>
28#include <sys/buf.h>
29#include <sys/uio.h>
30#include <sys/vnode.h>
31#include <sys/namei.h>
32#include <sys/ubc.h>
33
34#include <mach/mach_types.h>
35#include <vm/vm_map.h>
36#include <vm/vm_kern.h>
37#include <kern/host.h>
1c79356b
A
38#include <kern/zalloc.h>
39#include <kern/kalloc.h>
40#include <libkern/libkern.h>
41#include <sys/malloc.h>
42
43#include <vm/vnode_pager.h>
44
45/*
46 * temporary support for delayed instantiation
47 * of default_pager
48 */
49int default_pager_init_flag = 0;
50
51struct bs_map bs_port_table[MAX_BACKING_STORE] = {
52 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
53 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
54 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
55 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
56 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
57 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
58 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
59 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
60 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
61 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}};
62
63/* ###################################################### */
64
65
66#include <kern/assert.h>
67
68/*
69 * Routine: macx_swapon
70 * Function:
71 * Syscall interface to add a file to backing store
72 */
73int
74macx_swapon(
75 char *filename,
76 int flags,
77 long size,
78 long priority)
79{
80 struct vnode *vp = 0;
81 struct nameidata nd, *ndp;
82 struct proc *p = current_proc();
83 pager_file_t pf;
84 register int error;
85 kern_return_t kr;
86 mach_port_t backing_store;
0b4e3aa0 87 memory_object_default_t default_pager;
1c79356b
A
88 int i;
89 boolean_t funnel_state;
90
91 struct vattr vattr;
92
93 funnel_state = thread_funnel_set(kernel_flock, TRUE);
94 ndp = &nd;
95
96 if ((error = suser(p->p_ucred, &p->p_acflag)))
97 goto swapon_bailout;
98
1c79356b
A
99 if(default_pager_init_flag == 0) {
100 start_def_pager(NULL);
101 default_pager_init_flag = 1;
102 }
103
104 /*
105 * Get a vnode for the paging area.
106 */
107 NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
108 filename, p);
109
110 if ((error = namei(ndp)))
111 goto swapon_bailout;
112 vp = ndp->ni_vp;
113
114 if (vp->v_type != VREG) {
115 error = EINVAL;
116 VOP_UNLOCK(vp, 0, p);
117 goto swapon_bailout;
118 }
119 UBCINFOCHECK("macx_swapon", vp);
120
121 if (error = VOP_GETATTR(vp, &vattr, p->p_ucred, p)) {
122 VOP_UNLOCK(vp, 0, p);
123 goto swapon_bailout;
124 }
125
126 if (vattr.va_size < (u_quad_t)size) {
127 vattr_null(&vattr);
128 vattr.va_size = (u_quad_t)size;
129 error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
130 if (error) {
131 VOP_UNLOCK(vp, 0, p);
132 goto swapon_bailout;
133 }
134 }
135
136 /* add new backing store to list */
137 i = 0;
138 while(bs_port_table[i].vp != 0) {
139 if(i == MAX_BACKING_STORE)
140 break;
141 i++;
142 }
143 if(i == MAX_BACKING_STORE) {
144 error = ENOMEM;
145 VOP_UNLOCK(vp, 0, p);
146 goto swapon_bailout;
147 }
148
149 /* remember the vnode. This vnode has namei() reference */
150 bs_port_table[i].vp = vp;
151
152 /*
153 * Look to see if we are already paging to this file.
154 */
155 /* make certain the copy send of kernel call will work */
0b4e3aa0
A
156 default_pager = MEMORY_OBJECT_DEFAULT_NULL;
157 kr = host_default_memory_manager(host_priv_self(), &default_pager, 0);
1c79356b
A
158 if(kr != KERN_SUCCESS) {
159 error = EAGAIN;
160 VOP_UNLOCK(vp, 0, p);
161 bs_port_table[i].vp = 0;
162 goto swapon_bailout;
163 }
164
0b4e3aa0 165 kr = default_pager_backing_store_create(default_pager,
1c79356b
A
166 -1, /* default priority */
167 0, /* default cluster size */
168 &backing_store);
0b4e3aa0
A
169 memory_object_default_deallocate(default_pager);
170
1c79356b
A
171 if(kr != KERN_SUCCESS) {
172 error = ENOMEM;
173 VOP_UNLOCK(vp, 0, p);
174 bs_port_table[i].vp = 0;
175 goto swapon_bailout;
176 }
177
178 /*
179 * NOTE: we are able to supply PAGE_SIZE here instead of
180 * an actual record size or block number because:
181 * a: we do not support offsets from the beginning of the
182 * file (allowing for non page size/record modulo offsets.
183 * b: because allow paging will be done modulo page size
184 */
185
186 VOP_UNLOCK(vp, 0, p);
187 kr = default_pager_add_file(backing_store, vp, PAGE_SIZE,
188 ((int)vattr.va_size)/PAGE_SIZE);
189 if(kr != KERN_SUCCESS) {
190 bs_port_table[i].vp = 0;
191 if(kr == KERN_INVALID_ARGUMENT)
192 error = EINVAL;
193 else
194 error = ENOMEM;
195 goto swapon_bailout;
196 }
197 bs_port_table[i].bs = (void *)backing_store;
198 error = 0;
199 if (!ubc_hold(vp))
200 panic("macx_swapon: hold");
201
202 /* Mark this vnode as being used for swapfile */
203 SET(vp->v_flag, VSWAP);
204
205 /*
206 * take an extra reference on the vnode to keep
207 * vnreclaim() away from this vnode.
208 */
209 VREF(vp);
210
211 /* Hold on to the namei reference to the paging file vnode */
212 vp = 0;
213
214swapon_bailout:
215 if (vp) {
216 vrele(vp);
217 }
1c79356b
A
218 (void) thread_funnel_set(kernel_flock, FALSE);
219 return(error);
220}
221
222/*
223 * Routine: macx_swapoff
224 * Function:
225 * Syscall interface to remove a file from backing store
226 */
227int
228macx_swapoff(
229 char *filename,
230 int flags)
231{
232 kern_return_t kr;
233 mach_port_t backing_store;
234
235 struct vnode *vp = 0;
236 struct nameidata nd, *ndp;
237 struct proc *p = current_proc();
238 int i;
239 int error;
240 boolean_t funnel_state;
241
242 funnel_state = thread_funnel_set(kernel_flock, TRUE);
243 backing_store = NULL;
244 ndp = &nd;
245
246 if ((error = suser(p->p_ucred, &p->p_acflag)))
247 goto swapoff_bailout;
248
1c79356b
A
249 /*
250 * Get the vnode for the paging area.
251 */
252 NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
253 filename, p);
254
255 if ((error = namei(ndp)))
256 goto swapoff_bailout;
257 vp = ndp->ni_vp;
258
259 if (vp->v_type != VREG) {
260 error = EINVAL;
261 VOP_UNLOCK(vp, 0, p);
262 goto swapoff_bailout;
263 }
264
265 for(i = 0; i < MAX_BACKING_STORE; i++) {
266 if(bs_port_table[i].vp == vp) {
267 backing_store;
268 break;
269 }
270 }
271 if (i == MAX_BACKING_STORE) {
272 error = EINVAL;
273 VOP_UNLOCK(vp, 0, p);
274 goto swapoff_bailout;
275 }
276 backing_store = (mach_port_t)bs_port_table[i].bs;
277
278 VOP_UNLOCK(vp, 0, p);
279 kr = default_pager_backing_store_delete(backing_store);
280 switch (kr) {
281 case KERN_SUCCESS:
282 error = 0;
283 bs_port_table[i].vp = 0;
284 ubc_rele(vp);
285 /* This vnode is no longer used for swapfile */
286 CLR(vp->v_flag, VSWAP);
287
288 /* get rid of macx_swapon() namei() reference */
289 vrele(vp);
290
291 /* get rid of macx_swapon() "extra" reference */
292 vrele(vp);
293 break;
294 case KERN_FAILURE:
295 error = EAGAIN;
296 break;
297 default:
298 error = EAGAIN;
299 break;
300 }
301
302swapoff_bailout:
303 /* get rid of macx_swapoff() namei() reference */
304 if (vp)
305 vrele(vp);
306
1c79356b
A
307 (void) thread_funnel_set(kernel_flock, FALSE);
308 return(error);
309}