2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 #include <mach/boolean.h>
24 #include <sys/param.h>
25 #include <sys/systm.h>
30 #include <sys/vnode.h>
31 #include <sys/namei.h>
34 #include <mach/mach_types.h>
35 #include <vm/vm_map.h>
36 #include <vm/vm_kern.h>
37 #include <kern/host.h>
38 #include <kern/zalloc.h>
39 #include <kern/kalloc.h>
40 #include <libkern/libkern.h>
41 #include <sys/malloc.h>
43 #include <vm/vnode_pager.h>
46 * temporary support for delayed instantiation
49 int default_pager_init_flag
= 0;
51 struct bs_map bs_port_table
[MAX_BACKING_STORE
] = {
52 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
53 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
54 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
55 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
56 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
57 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
58 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
59 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
60 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
61 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}};
63 /* ###################################################### */
66 #include <kern/assert.h>
69 * Routine: macx_swapon
71 * Syscall interface to add a file to backing store
81 struct nameidata nd
, *ndp
;
82 struct proc
*p
= current_proc();
86 mach_port_t backing_store
;
87 memory_object_default_t default_pager
;
89 boolean_t funnel_state
;
93 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
96 if ((error
= suser(p
->p_ucred
, &p
->p_acflag
)))
99 if(default_pager_init_flag
== 0) {
100 start_def_pager(NULL
);
101 default_pager_init_flag
= 1;
105 * Get a vnode for the paging area.
107 NDINIT(ndp
, LOOKUP
, FOLLOW
| LOCKLEAF
, UIO_USERSPACE
,
110 if ((error
= namei(ndp
)))
114 if (vp
->v_type
!= VREG
) {
116 VOP_UNLOCK(vp
, 0, p
);
119 UBCINFOCHECK("macx_swapon", vp
);
121 if (error
= VOP_GETATTR(vp
, &vattr
, p
->p_ucred
, p
)) {
122 VOP_UNLOCK(vp
, 0, p
);
126 if (vattr
.va_size
< (u_quad_t
)size
) {
128 vattr
.va_size
= (u_quad_t
)size
;
129 error
= VOP_SETATTR(vp
, &vattr
, p
->p_ucred
, p
);
131 VOP_UNLOCK(vp
, 0, p
);
136 /* add new backing store to list */
138 while(bs_port_table
[i
].vp
!= 0) {
139 if(i
== MAX_BACKING_STORE
)
143 if(i
== MAX_BACKING_STORE
) {
145 VOP_UNLOCK(vp
, 0, p
);
149 /* remember the vnode. This vnode has namei() reference */
150 bs_port_table
[i
].vp
= vp
;
153 * Look to see if we are already paging to this file.
155 /* make certain the copy send of kernel call will work */
156 default_pager
= MEMORY_OBJECT_DEFAULT_NULL
;
157 kr
= host_default_memory_manager(host_priv_self(), &default_pager
, 0);
158 if(kr
!= KERN_SUCCESS
) {
160 VOP_UNLOCK(vp
, 0, p
);
161 bs_port_table
[i
].vp
= 0;
165 kr
= default_pager_backing_store_create(default_pager
,
166 -1, /* default priority */
167 0, /* default cluster size */
169 memory_object_default_deallocate(default_pager
);
171 if(kr
!= KERN_SUCCESS
) {
173 VOP_UNLOCK(vp
, 0, p
);
174 bs_port_table
[i
].vp
= 0;
179 * NOTE: we are able to supply PAGE_SIZE here instead of
180 * an actual record size or block number because:
181 * a: we do not support offsets from the beginning of the
182 * file (allowing for non page size/record modulo offsets.
183 * b: because allow paging will be done modulo page size
186 VOP_UNLOCK(vp
, 0, p
);
187 kr
= default_pager_add_file(backing_store
, vp
, PAGE_SIZE
,
188 ((int)vattr
.va_size
)/PAGE_SIZE
);
189 if(kr
!= KERN_SUCCESS
) {
190 bs_port_table
[i
].vp
= 0;
191 if(kr
== KERN_INVALID_ARGUMENT
)
197 bs_port_table
[i
].bs
= (void *)backing_store
;
200 panic("macx_swapon: hold");
202 /* Mark this vnode as being used for swapfile */
203 SET(vp
->v_flag
, VSWAP
);
206 * take an extra reference on the vnode to keep
207 * vnreclaim() away from this vnode.
211 /* Hold on to the namei reference to the paging file vnode */
218 (void) thread_funnel_set(kernel_flock
, FALSE
);
223 * Routine: macx_swapoff
225 * Syscall interface to remove a file from backing store
233 mach_port_t backing_store
;
235 struct vnode
*vp
= 0;
236 struct nameidata nd
, *ndp
;
237 struct proc
*p
= current_proc();
240 boolean_t funnel_state
;
242 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
243 backing_store
= NULL
;
246 if ((error
= suser(p
->p_ucred
, &p
->p_acflag
)))
247 goto swapoff_bailout
;
250 * Get the vnode for the paging area.
252 NDINIT(ndp
, LOOKUP
, FOLLOW
| LOCKLEAF
, UIO_USERSPACE
,
255 if ((error
= namei(ndp
)))
256 goto swapoff_bailout
;
259 if (vp
->v_type
!= VREG
) {
261 VOP_UNLOCK(vp
, 0, p
);
262 goto swapoff_bailout
;
265 for(i
= 0; i
< MAX_BACKING_STORE
; i
++) {
266 if(bs_port_table
[i
].vp
== vp
) {
271 if (i
== MAX_BACKING_STORE
) {
273 VOP_UNLOCK(vp
, 0, p
);
274 goto swapoff_bailout
;
276 backing_store
= (mach_port_t
)bs_port_table
[i
].bs
;
278 VOP_UNLOCK(vp
, 0, p
);
279 kr
= default_pager_backing_store_delete(backing_store
);
283 bs_port_table
[i
].vp
= 0;
285 /* This vnode is no longer used for swapfile */
286 CLR(vp
->v_flag
, VSWAP
);
288 /* get rid of macx_swapon() namei() reference */
291 /* get rid of macx_swapon() "extra" reference */
303 /* get rid of macx_swapoff() namei() reference */
307 (void) thread_funnel_set(kernel_flock
, FALSE
);