]> git.saurik.com Git - apple/xnu.git/blob - bsd/vm/dp_backing_file.c
xnu-1504.15.3.tar.gz
[apple/xnu.git] / bsd / vm / dp_backing_file.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
30 * support for mandatory and extensible security protections. This notice
31 * is included in support of clause 2.2 (b) of the Apple Public License,
32 * Version 2.0.
33 */
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/lock.h>
38 #include <sys/proc_internal.h>
39 #include <sys/kauth.h>
40 #include <sys/buf.h>
41 #include <sys/uio.h>
42 #include <sys/vnode_internal.h>
43 #include <sys/namei.h>
44 #include <sys/ubc_internal.h>
45 #include <sys/mount_internal.h>
46 #include <sys/malloc.h>
47
48 #include <default_pager/default_pager_types.h>
49 #include <default_pager/default_pager_object.h>
50
51 #include <security/audit/audit.h>
52 #include <bsm/audit_kevents.h>
53
54 #include <mach/mach_types.h>
55 #include <mach/host_priv.h>
56 #include <mach/mach_traps.h>
57 #include <mach/boolean.h>
58
59 #include <kern/kern_types.h>
60 #include <kern/host.h>
61 #include <kern/task.h>
62 #include <kern/zalloc.h>
63 #include <kern/kalloc.h>
64 #include <kern/assert.h>
65
66 #include <libkern/libkern.h>
67
68 #include <vm/vm_pageout.h>
69 #include <vm/vm_map.h>
70 #include <vm/vm_kern.h>
71 #include <vm/vnode_pager.h>
72 #include <vm/vm_protos.h>
73 #if CONFIG_MACF
74 #include <security/mac_framework.h>
75 #endif
76
77 /*
78 * temporary support for delayed instantiation
79 * of default_pager
80 */
81 int default_pager_init_flag = 0;
82
83 struct bs_map bs_port_table[MAX_BACKING_STORE] = {
84 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
85 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
86 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
87 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
88 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
89 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
90 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
91 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
92 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
93 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}};
94
95 /* ###################################################### */
96
97
98 /*
99 * Routine: macx_backing_store_recovery
100 * Function:
101 * Syscall interface to set a tasks privilege
102 * level so that it is not subject to
103 * macx_backing_store_suspend
104 */
105 int
106 macx_backing_store_recovery(
107 struct macx_backing_store_recovery_args *args)
108 {
109 int pid = args->pid;
110 int error;
111 struct proc *p = current_proc();
112 boolean_t funnel_state;
113
114 funnel_state = thread_funnel_set(kernel_flock, TRUE);
115 if ((error = suser(kauth_cred_get(), 0)))
116 goto backing_store_recovery_return;
117
118 /* for now restrict backing_store_recovery */
119 /* usage to only present task */
120 if(pid != proc_selfpid()) {
121 error = EINVAL;
122 goto backing_store_recovery_return;
123 }
124
125 task_backing_store_privileged(p->task);
126
127 backing_store_recovery_return:
128 (void) thread_funnel_set(kernel_flock, FALSE);
129 return(error);
130 }
131
132 /*
133 * Routine: macx_backing_store_suspend
134 * Function:
135 * Syscall interface to stop new demand for
136 * backing store when backing store is low
137 */
138
139 int
140 macx_backing_store_suspend(
141 struct macx_backing_store_suspend_args *args)
142 {
143 boolean_t suspend = args->suspend;
144 int error;
145 boolean_t funnel_state;
146
147 funnel_state = thread_funnel_set(kernel_flock, TRUE);
148 if ((error = suser(kauth_cred_get(), 0)))
149 goto backing_store_suspend_return;
150
151 vm_backing_store_disable(suspend);
152
153 backing_store_suspend_return:
154 (void) thread_funnel_set(kernel_flock, FALSE);
155 return(error);
156 }
157
158 extern boolean_t backing_store_stop_compaction;
159
160 /*
161 * Routine: macx_backing_store_compaction
162 * Function:
163 * Turn compaction of swap space on or off. This is
164 * used during shutdown/restart so that the kernel
165 * doesn't waste time compacting swap files that are
166 * about to be deleted anyway. Compaction is always
167 * on by default when the system comes up and is turned
168 * off when a shutdown/restart is requested. It is
169 * re-enabled if the shutdown/restart is aborted for any reason.
170 */
171
172 int
173 macx_backing_store_compaction(int flags)
174 {
175 int error;
176
177 if ((error = suser(kauth_cred_get(), 0)))
178 return error;
179
180 if (flags & SWAP_COMPACT_DISABLE) {
181 backing_store_stop_compaction = TRUE;
182
183 } else if (flags & SWAP_COMPACT_ENABLE) {
184 backing_store_stop_compaction = FALSE;
185 }
186
187 return 0;
188 }
189
190 /*
191 * Routine: macx_triggers
192 * Function:
193 * Syscall interface to set the call backs for low and
194 * high water marks.
195 */
196 int
197 macx_triggers(
198 struct macx_triggers_args *args)
199 {
200 int error;
201
202 error = suser(kauth_cred_get(), 0);
203 if (error)
204 return error;
205
206 return mach_macx_triggers(args);
207 }
208
209
210 extern boolean_t dp_isssd;
211
212 /*
213 * Routine: macx_swapon
214 * Function:
215 * Syscall interface to add a file to backing store
216 */
217 int
218 macx_swapon(
219 struct macx_swapon_args *args)
220 {
221 int size = args->size;
222 vnode_t vp = (vnode_t)NULL;
223 struct nameidata nd, *ndp;
224 register int error;
225 kern_return_t kr;
226 mach_port_t backing_store;
227 memory_object_default_t default_pager;
228 int i;
229 boolean_t funnel_state;
230 off_t file_size;
231 vfs_context_t ctx = vfs_context_current();
232 struct proc *p = current_proc();
233 int dp_cluster_size;
234
235
236 AUDIT_MACH_SYSCALL_ENTER(AUE_SWAPON);
237 AUDIT_ARG(value32, args->priority);
238
239 funnel_state = thread_funnel_set(kernel_flock, TRUE);
240 ndp = &nd;
241
242 if ((error = suser(kauth_cred_get(), 0)))
243 goto swapon_bailout;
244
245 /*
246 * Get a vnode for the paging area.
247 */
248 NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1,
249 ((IS_64BIT_PROCESS(p)) ? UIO_USERSPACE64 : UIO_USERSPACE32),
250 (user_addr_t) args->filename, ctx);
251
252 if ((error = namei(ndp)))
253 goto swapon_bailout;
254 nameidone(ndp);
255 vp = ndp->ni_vp;
256
257 if (vp->v_type != VREG) {
258 error = EINVAL;
259 goto swapon_bailout;
260 }
261
262 /* get file size */
263 if ((error = vnode_size(vp, &file_size, ctx)) != 0)
264 goto swapon_bailout;
265 #if CONFIG_MACF
266 vnode_lock(vp);
267 error = mac_system_check_swapon(vfs_context_ucred(ctx), vp);
268 vnode_unlock(vp);
269 if (error)
270 goto swapon_bailout;
271 #endif
272
273 /* resize to desired size if it's too small */
274 if ((file_size < (off_t)size) && ((error = vnode_setsize(vp, (off_t)size, 0, ctx)) != 0))
275 goto swapon_bailout;
276
277 if (default_pager_init_flag == 0) {
278 start_def_pager(NULL);
279 default_pager_init_flag = 1;
280 }
281
282 /* add new backing store to list */
283 i = 0;
284 while(bs_port_table[i].vp != 0) {
285 if(i == MAX_BACKING_STORE)
286 break;
287 i++;
288 }
289 if(i == MAX_BACKING_STORE) {
290 error = ENOMEM;
291 goto swapon_bailout;
292 }
293
294 /* remember the vnode. This vnode has namei() reference */
295 bs_port_table[i].vp = vp;
296
297 /*
298 * Look to see if we are already paging to this file.
299 */
300 /* make certain the copy send of kernel call will work */
301 default_pager = MEMORY_OBJECT_DEFAULT_NULL;
302 kr = host_default_memory_manager(host_priv_self(), &default_pager, 0);
303 if(kr != KERN_SUCCESS) {
304 error = EAGAIN;
305 bs_port_table[i].vp = 0;
306 goto swapon_bailout;
307 }
308
309 if (vp->v_mount->mnt_kern_flag & MNTK_SSD) {
310 /*
311 * keep the cluster size small since the
312 * seek cost is effectively 0 which means
313 * we don't care much about fragmentation
314 */
315 dp_isssd = TRUE;
316 dp_cluster_size = 2 * PAGE_SIZE;
317 } else {
318 /*
319 * use the default cluster size
320 */
321 dp_isssd = FALSE;
322 dp_cluster_size = 0;
323 }
324 kr = default_pager_backing_store_create(default_pager,
325 -1, /* default priority */
326 dp_cluster_size,
327 &backing_store);
328 memory_object_default_deallocate(default_pager);
329
330 if(kr != KERN_SUCCESS) {
331 error = ENOMEM;
332 bs_port_table[i].vp = 0;
333 goto swapon_bailout;
334 }
335
336 /* Mark this vnode as being used for swapfile */
337 vnode_lock_spin(vp);
338 SET(vp->v_flag, VSWAP);
339 vnode_unlock(vp);
340
341 /*
342 * NOTE: we are able to supply PAGE_SIZE here instead of
343 * an actual record size or block number because:
344 * a: we do not support offsets from the beginning of the
345 * file (allowing for non page size/record modulo offsets.
346 * b: because allow paging will be done modulo page size
347 */
348
349 kr = default_pager_add_file(backing_store, (vnode_ptr_t) vp,
350 PAGE_SIZE, (int)(file_size/PAGE_SIZE));
351 if(kr != KERN_SUCCESS) {
352 bs_port_table[i].vp = 0;
353 if(kr == KERN_INVALID_ARGUMENT)
354 error = EINVAL;
355 else
356 error = ENOMEM;
357
358 /* This vnode is not to be used for swapfile */
359 vnode_lock_spin(vp);
360 CLR(vp->v_flag, VSWAP);
361 vnode_unlock(vp);
362
363 goto swapon_bailout;
364 }
365 bs_port_table[i].bs = (void *)backing_store;
366 error = 0;
367
368 ubc_setthreadcred(vp, p, current_thread());
369
370 /*
371 * take a long term reference on the vnode to keep
372 * vnreclaim() away from this vnode.
373 */
374 vnode_ref(vp);
375
376 swapon_bailout:
377 if (vp) {
378 vnode_put(vp);
379 }
380 (void) thread_funnel_set(kernel_flock, FALSE);
381 AUDIT_MACH_SYSCALL_EXIT(error);
382 return(error);
383 }
384
385 /*
386 * Routine: macx_swapoff
387 * Function:
388 * Syscall interface to remove a file from backing store
389 */
390 int
391 macx_swapoff(
392 struct macx_swapoff_args *args)
393 {
394 __unused int flags = args->flags;
395 kern_return_t kr;
396 mach_port_t backing_store;
397
398 struct vnode *vp = 0;
399 struct nameidata nd, *ndp;
400 struct proc *p = current_proc();
401 int i;
402 int error;
403 boolean_t funnel_state;
404 vfs_context_t ctx = vfs_context_current();
405
406 AUDIT_MACH_SYSCALL_ENTER(AUE_SWAPOFF);
407
408 funnel_state = thread_funnel_set(kernel_flock, TRUE);
409 backing_store = NULL;
410 ndp = &nd;
411
412 if ((error = suser(kauth_cred_get(), 0)))
413 goto swapoff_bailout;
414
415 /*
416 * Get the vnode for the paging area.
417 */
418 NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1,
419 ((IS_64BIT_PROCESS(p)) ? UIO_USERSPACE64 : UIO_USERSPACE32),
420 (user_addr_t) args->filename, ctx);
421
422 if ((error = namei(ndp)))
423 goto swapoff_bailout;
424 nameidone(ndp);
425 vp = ndp->ni_vp;
426
427 if (vp->v_type != VREG) {
428 error = EINVAL;
429 goto swapoff_bailout;
430 }
431 #if CONFIG_MACF
432 vnode_lock(vp);
433 error = mac_system_check_swapoff(vfs_context_ucred(ctx), vp);
434 vnode_unlock(vp);
435 if (error)
436 goto swapoff_bailout;
437 #endif
438
439 for(i = 0; i < MAX_BACKING_STORE; i++) {
440 if(bs_port_table[i].vp == vp) {
441 break;
442 }
443 }
444 if (i == MAX_BACKING_STORE) {
445 error = EINVAL;
446 goto swapoff_bailout;
447 }
448 backing_store = (mach_port_t)bs_port_table[i].bs;
449
450 kr = default_pager_backing_store_delete(backing_store);
451 switch (kr) {
452 case KERN_SUCCESS:
453 error = 0;
454 bs_port_table[i].vp = 0;
455 /* This vnode is no longer used for swapfile */
456 vnode_lock_spin(vp);
457 CLR(vp->v_flag, VSWAP);
458 vnode_unlock(vp);
459
460 /* get rid of macx_swapon() "long term" reference */
461 vnode_rele(vp);
462
463 break;
464 case KERN_FAILURE:
465 error = EAGAIN;
466 break;
467 default:
468 error = EAGAIN;
469 break;
470 }
471
472 swapoff_bailout:
473 /* get rid of macx_swapoff() namei() reference */
474 if (vp)
475 vnode_put(vp);
476
477 (void) thread_funnel_set(kernel_flock, FALSE);
478 AUDIT_MACH_SYSCALL_EXIT(error);
479 return(error);
480 }
481
482 /*
483 * Routine: macx_swapinfo
484 * Function:
485 * Syscall interface to get general swap statistics
486 */
487 int
488 macx_swapinfo(
489 memory_object_size_t *total_p,
490 memory_object_size_t *avail_p,
491 vm_size_t *pagesize_p,
492 boolean_t *encrypted_p)
493 {
494 int error;
495 memory_object_default_t default_pager;
496 default_pager_info_64_t dpi64;
497 kern_return_t kr;
498
499 error = 0;
500
501 /*
502 * Get a handle on the default pager.
503 */
504 default_pager = MEMORY_OBJECT_DEFAULT_NULL;
505 kr = host_default_memory_manager(host_priv_self(), &default_pager, 0);
506 if (kr != KERN_SUCCESS) {
507 error = EAGAIN; /* XXX why EAGAIN ? */
508 goto done;
509 }
510 if (default_pager == MEMORY_OBJECT_DEFAULT_NULL) {
511 /*
512 * The default pager has not initialized yet,
513 * so it can't be using any swap space at all.
514 */
515 *total_p = 0;
516 *avail_p = 0;
517 *pagesize_p = 0;
518 *encrypted_p = FALSE;
519 goto done;
520 }
521
522 /*
523 * Get swap usage data from default pager.
524 */
525 kr = default_pager_info_64(default_pager, &dpi64);
526 if (kr != KERN_SUCCESS) {
527 error = ENOTSUP;
528 goto done;
529 }
530
531 /*
532 * Provide default pager info to caller.
533 */
534 *total_p = dpi64.dpi_total_space;
535 *avail_p = dpi64.dpi_free_space;
536 *pagesize_p = dpi64.dpi_page_size;
537 if (dpi64.dpi_flags & DPI_ENCRYPTED) {
538 *encrypted_p = TRUE;
539 } else {
540 *encrypted_p = FALSE;
541 }
542
543 done:
544 if (default_pager != MEMORY_OBJECT_DEFAULT_NULL) {
545 /* release our handle on default pager */
546 memory_object_default_deallocate(default_pager);
547 }
548 return error;
549 }