2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 #include <sys/param.h>
24 #include <sys/systm.h>
26 #include <sys/proc_internal.h>
27 #include <sys/kauth.h>
30 #include <sys/vnode_internal.h>
31 #include <sys/namei.h>
32 #include <sys/ubc_internal.h>
33 #include <sys/malloc.h>
35 #include <default_pager/default_pager_types.h>
36 #include <default_pager/default_pager_object.h>
38 #include <bsm/audit_kernel.h>
39 #include <bsm/audit_kevents.h>
41 #include <mach/mach_types.h>
42 #include <mach/host_priv.h>
43 #include <mach/mach_traps.h>
44 #include <mach/boolean.h>
46 #include <kern/kern_types.h>
47 #include <kern/host.h>
48 #include <kern/task.h>
49 #include <kern/zalloc.h>
50 #include <kern/kalloc.h>
51 #include <kern/assert.h>
53 #include <libkern/libkern.h>
55 #include <vm/vm_pageout.h>
56 #include <vm/vm_map.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vnode_pager.h>
59 #include <vm/vm_protos.h>
61 extern thread_t
current_act(void);
64 * temporary support for delayed instantiation
67 int default_pager_init_flag
= 0;
69 struct bs_map bs_port_table
[MAX_BACKING_STORE
] = {
70 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
71 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
72 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
73 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
74 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
75 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
76 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
77 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
78 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
79 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}};
81 /* ###################################################### */
85 * Routine: macx_backing_store_recovery
87 * Syscall interface to set a tasks privilege
88 * level so that it is not subject to
89 * macx_backing_store_suspend
92 macx_backing_store_recovery(
93 struct macx_backing_store_recovery_args
*args
)
97 struct proc
*p
= current_proc();
98 boolean_t funnel_state
;
100 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
101 if ((error
= suser(kauth_cred_get(), 0)))
102 goto backing_store_recovery_return
;
104 /* for now restrict backing_store_recovery */
105 /* usage to only present task */
106 if(pid
!= proc_selfpid()) {
108 goto backing_store_recovery_return
;
111 task_backing_store_privileged(p
->task
);
113 backing_store_recovery_return
:
114 (void) thread_funnel_set(kernel_flock
, FALSE
);
119 * Routine: macx_backing_store_suspend
121 * Syscall interface to stop new demand for
122 * backing store when backing store is low
126 macx_backing_store_suspend(
127 struct macx_backing_store_suspend_args
*args
)
129 boolean_t suspend
= args
->suspend
;
131 boolean_t funnel_state
;
133 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
134 if ((error
= suser(kauth_cred_get(), 0)))
135 goto backing_store_suspend_return
;
137 vm_backing_store_disable(suspend
);
139 backing_store_suspend_return
:
140 (void) thread_funnel_set(kernel_flock
, FALSE
);
145 * Routine: macx_swapon
147 * Syscall interface to add a file to backing store
151 struct macx_swapon_args
*args
)
153 int size
= args
->size
;
154 vnode_t vp
= (vnode_t
)NULL
;
155 struct nameidata nd
, *ndp
;
156 struct proc
*p
= current_proc();
159 mach_port_t backing_store
;
160 memory_object_default_t default_pager
;
162 boolean_t funnel_state
;
164 struct vfs_context context
;
167 context
.vc_ucred
= kauth_cred_get();
169 AUDIT_MACH_SYSCALL_ENTER(AUE_SWAPON
);
170 AUDIT_ARG(value
, args
->priority
);
172 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
175 if ((error
= suser(kauth_cred_get(), 0)))
178 if(default_pager_init_flag
== 0) {
179 start_def_pager(NULL
);
180 default_pager_init_flag
= 1;
184 * Get a vnode for the paging area.
186 NDINIT(ndp
, LOOKUP
, FOLLOW
| LOCKLEAF
| AUDITVNPATH1
,
187 ((IS_64BIT_PROCESS(p
)) ? UIO_USERSPACE64
: UIO_USERSPACE32
),
188 CAST_USER_ADDR_T(args
->filename
), &context
);
190 if ((error
= namei(ndp
)))
195 if (vp
->v_type
!= VREG
) {
199 UBCINFOCHECK("macx_swapon", vp
);
202 if ((error
= vnode_size(vp
, &file_size
, &context
)) != 0)
205 /* resize to desired size if it's too small */
206 if ((file_size
< (off_t
)size
) && ((error
= vnode_setsize(vp
, (off_t
)size
, 0, &context
)) != 0))
209 /* add new backing store to list */
211 while(bs_port_table
[i
].vp
!= 0) {
212 if(i
== MAX_BACKING_STORE
)
216 if(i
== MAX_BACKING_STORE
) {
221 /* remember the vnode. This vnode has namei() reference */
222 bs_port_table
[i
].vp
= vp
;
225 * Look to see if we are already paging to this file.
227 /* make certain the copy send of kernel call will work */
228 default_pager
= MEMORY_OBJECT_DEFAULT_NULL
;
229 kr
= host_default_memory_manager(host_priv_self(), &default_pager
, 0);
230 if(kr
!= KERN_SUCCESS
) {
232 bs_port_table
[i
].vp
= 0;
236 kr
= default_pager_backing_store_create(default_pager
,
237 -1, /* default priority */
238 0, /* default cluster size */
240 memory_object_default_deallocate(default_pager
);
242 if(kr
!= KERN_SUCCESS
) {
244 bs_port_table
[i
].vp
= 0;
249 * NOTE: we are able to supply PAGE_SIZE here instead of
250 * an actual record size or block number because:
251 * a: we do not support offsets from the beginning of the
252 * file (allowing for non page size/record modulo offsets.
253 * b: because allow paging will be done modulo page size
256 kr
= default_pager_add_file(backing_store
, (vnode_ptr_t
) vp
,
257 PAGE_SIZE
, (int)(file_size
/PAGE_SIZE
));
258 if(kr
!= KERN_SUCCESS
) {
259 bs_port_table
[i
].vp
= 0;
260 if(kr
== KERN_INVALID_ARGUMENT
)
266 bs_port_table
[i
].bs
= (void *)backing_store
;
269 /* Mark this vnode as being used for swapfile */
270 SET(vp
->v_flag
, VSWAP
);
275 * take a long term reference on the vnode to keep
276 * vnreclaim() away from this vnode.
284 (void) thread_funnel_set(kernel_flock
, FALSE
);
285 AUDIT_MACH_SYSCALL_EXIT(error
);
290 * Routine: macx_swapoff
292 * Syscall interface to remove a file from backing store
296 struct macx_swapoff_args
*args
)
298 __unused
int flags
= args
->flags
;
300 mach_port_t backing_store
;
302 struct vnode
*vp
= 0;
303 struct nameidata nd
, *ndp
;
304 struct proc
*p
= current_proc();
307 boolean_t funnel_state
;
308 struct vfs_context context
;
311 context
.vc_ucred
= kauth_cred_get();
313 AUDIT_MACH_SYSCALL_ENTER(AUE_SWAPOFF
);
315 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
316 backing_store
= NULL
;
319 if ((error
= suser(kauth_cred_get(), 0)))
320 goto swapoff_bailout
;
323 * Get the vnode for the paging area.
325 NDINIT(ndp
, LOOKUP
, FOLLOW
| LOCKLEAF
| AUDITVNPATH1
,
326 ((IS_64BIT_PROCESS(p
)) ? UIO_USERSPACE64
: UIO_USERSPACE32
),
327 CAST_USER_ADDR_T(args
->filename
), &context
);
329 if ((error
= namei(ndp
)))
330 goto swapoff_bailout
;
334 if (vp
->v_type
!= VREG
) {
336 goto swapoff_bailout
;
339 for(i
= 0; i
< MAX_BACKING_STORE
; i
++) {
340 if(bs_port_table
[i
].vp
== vp
) {
344 if (i
== MAX_BACKING_STORE
) {
346 goto swapoff_bailout
;
348 backing_store
= (mach_port_t
)bs_port_table
[i
].bs
;
350 kr
= default_pager_backing_store_delete(backing_store
);
354 bs_port_table
[i
].vp
= 0;
355 /* This vnode is no longer used for swapfile */
356 CLR(vp
->v_flag
, VSWAP
);
358 /* get rid of macx_swapon() "long term" reference */
371 /* get rid of macx_swapoff() namei() reference */
375 (void) thread_funnel_set(kernel_flock
, FALSE
);
376 AUDIT_MACH_SYSCALL_EXIT(error
);
381 * Routine: macx_swapinfo
383 * Syscall interface to get general swap statistics
387 memory_object_size_t
*total_p
,
388 memory_object_size_t
*avail_p
,
389 vm_size_t
*pagesize_p
,
390 boolean_t
*encrypted_p
)
393 memory_object_default_t default_pager
;
394 default_pager_info_64_t dpi64
;
400 * Get a handle on the default pager.
402 default_pager
= MEMORY_OBJECT_DEFAULT_NULL
;
403 kr
= host_default_memory_manager(host_priv_self(), &default_pager
, 0);
404 if (kr
!= KERN_SUCCESS
) {
405 error
= EAGAIN
; /* XXX why EAGAIN ? */
408 if (default_pager
== MEMORY_OBJECT_DEFAULT_NULL
) {
410 * The default pager has not initialized yet,
411 * so it can't be using any swap space at all.
416 *encrypted_p
= FALSE
;
421 * Get swap usage data from default pager.
423 kr
= default_pager_info_64(default_pager
, &dpi64
);
424 if (kr
!= KERN_SUCCESS
) {
430 * Provide default pager info to caller.
432 *total_p
= dpi64
.dpi_total_space
;
433 *avail_p
= dpi64
.dpi_free_space
;
434 *pagesize_p
= dpi64
.dpi_page_size
;
435 if (dpi64
.dpi_flags
& DPI_ENCRYPTED
) {
438 *encrypted_p
= FALSE
;
442 if (default_pager
!= MEMORY_OBJECT_DEFAULT_NULL
) {
443 /* release our handle on default pager */
444 memory_object_default_deallocate(default_pager
);