]> git.saurik.com Git - apple/xnu.git/blob - bsd/vm/dp_backing_file.c
xnu-1699.26.8.tar.gz
[apple/xnu.git] / bsd / vm / dp_backing_file.c
1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
30 * support for mandatory and extensible security protections. This notice
31 * is included in support of clause 2.2 (b) of the Apple Public License,
32 * Version 2.0.
33 */
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/lock.h>
38 #include <sys/proc_internal.h>
39 #include <sys/kauth.h>
40 #include <sys/buf.h>
41 #include <sys/uio.h>
42 #include <sys/vnode_internal.h>
43 #include <sys/namei.h>
44 #include <sys/ubc_internal.h>
45 #include <sys/malloc.h>
46 #include <sys/user.h>
47 #if CONFIG_PROTECT
48 #include <sys/cprotect.h>
49 #endif
50
51 #include <default_pager/default_pager_types.h>
52 #include <default_pager/default_pager_object.h>
53
54 #include <security/audit/audit.h>
55 #include <bsm/audit_kevents.h>
56
57 #include <mach/mach_types.h>
58 #include <mach/host_priv.h>
59 #include <mach/mach_traps.h>
60 #include <mach/boolean.h>
61
62 #include <kern/kern_types.h>
63 #include <kern/host.h>
64 #include <kern/task.h>
65 #include <kern/zalloc.h>
66 #include <kern/kalloc.h>
67 #include <kern/assert.h>
68
69 #include <libkern/libkern.h>
70
71 #include <vm/vm_pageout.h>
72 #include <vm/vm_map.h>
73 #include <vm/vm_kern.h>
74 #include <vm/vnode_pager.h>
75 #include <vm/vm_protos.h>
76 #if CONFIG_MACF
77 #include <security/mac_framework.h>
78 #endif
79
80 /*
81 * temporary support for delayed instantiation
82 * of default_pager
83 */
84 int default_pager_init_flag = 0;
85
86 struct bs_map bs_port_table[MAX_BACKING_STORE] = {
87 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
88 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
89 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
90 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
91 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
92 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
93 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
94 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
95 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
96 {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}};
97
98 /* ###################################################### */
99
100
101 /*
102 * Routine: macx_backing_store_recovery
103 * Function:
104 * Syscall interface to set a tasks privilege
105 * level so that it is not subject to
106 * macx_backing_store_suspend
107 */
108 int
109 macx_backing_store_recovery(
110 struct macx_backing_store_recovery_args *args)
111 {
112 int pid = args->pid;
113 int error;
114 struct proc *p = current_proc();
115 boolean_t funnel_state;
116
117 funnel_state = thread_funnel_set(kernel_flock, TRUE);
118 if ((error = suser(kauth_cred_get(), 0)))
119 goto backing_store_recovery_return;
120
121 /* for now restrict backing_store_recovery */
122 /* usage to only present task */
123 if(pid != proc_selfpid()) {
124 error = EINVAL;
125 goto backing_store_recovery_return;
126 }
127
128 task_backing_store_privileged(p->task);
129
130 backing_store_recovery_return:
131 (void) thread_funnel_set(kernel_flock, FALSE);
132 return(error);
133 }
134
135 /*
136 * Routine: macx_backing_store_suspend
137 * Function:
138 * Syscall interface to stop new demand for
139 * backing store when backing store is low
140 */
141
142 int
143 macx_backing_store_suspend(
144 struct macx_backing_store_suspend_args *args)
145 {
146 boolean_t suspend = args->suspend;
147 int error;
148 boolean_t funnel_state;
149
150 funnel_state = thread_funnel_set(kernel_flock, TRUE);
151 if ((error = suser(kauth_cred_get(), 0)))
152 goto backing_store_suspend_return;
153
154 vm_backing_store_disable(suspend);
155
156 backing_store_suspend_return:
157 (void) thread_funnel_set(kernel_flock, FALSE);
158 return(error);
159 }
160
161 extern boolean_t backing_store_stop_compaction;
162
163 /*
164 * Routine: macx_backing_store_compaction
165 * Function:
166 * Turn compaction of swap space on or off. This is
167 * used during shutdown/restart so that the kernel
168 * doesn't waste time compacting swap files that are
169 * about to be deleted anyway. Compaction is always
170 * on by default when the system comes up and is turned
171 * off when a shutdown/restart is requested. It is
172 * re-enabled if the shutdown/restart is aborted for any reason.
173 */
174
175 int
176 macx_backing_store_compaction(int flags)
177 {
178 int error;
179
180 if ((error = suser(kauth_cred_get(), 0)))
181 return error;
182
183 if (flags & SWAP_COMPACT_DISABLE) {
184 backing_store_stop_compaction = TRUE;
185
186 } else if (flags & SWAP_COMPACT_ENABLE) {
187 backing_store_stop_compaction = FALSE;
188 }
189
190 return 0;
191 }
192
193 /*
194 * Routine: macx_triggers
195 * Function:
196 * Syscall interface to set the call backs for low and
197 * high water marks.
198 */
199 int
200 macx_triggers(
201 struct macx_triggers_args *args)
202 {
203 int error;
204
205 error = suser(kauth_cred_get(), 0);
206 if (error)
207 return error;
208
209 return mach_macx_triggers(args);
210 }
211
212
213 extern boolean_t dp_isssd;
214
215 /*
216 * Routine: macx_swapon
217 * Function:
218 * Syscall interface to add a file to backing store
219 */
220 int
221 macx_swapon(
222 struct macx_swapon_args *args)
223 {
224 int size = args->size;
225 vnode_t vp = (vnode_t)NULL;
226 struct nameidata nd, *ndp;
227 register int error;
228 kern_return_t kr;
229 mach_port_t backing_store;
230 memory_object_default_t default_pager;
231 int i;
232 boolean_t funnel_state;
233 off_t file_size;
234 vfs_context_t ctx = vfs_context_current();
235 struct proc *p = current_proc();
236 int dp_cluster_size;
237
238
239 AUDIT_MACH_SYSCALL_ENTER(AUE_SWAPON);
240 AUDIT_ARG(value32, args->priority);
241
242 funnel_state = thread_funnel_set(kernel_flock, TRUE);
243 ndp = &nd;
244
245 if ((error = suser(kauth_cred_get(), 0)))
246 goto swapon_bailout;
247
248 /*
249 * Get a vnode for the paging area.
250 */
251 NDINIT(ndp, LOOKUP, OP_LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1,
252 ((IS_64BIT_PROCESS(p)) ? UIO_USERSPACE64 : UIO_USERSPACE32),
253 (user_addr_t) args->filename, ctx);
254
255 if ((error = namei(ndp)))
256 goto swapon_bailout;
257 nameidone(ndp);
258 vp = ndp->ni_vp;
259
260 if (vp->v_type != VREG) {
261 error = EINVAL;
262 goto swapon_bailout;
263 }
264
265 /* get file size */
266 if ((error = vnode_size(vp, &file_size, ctx)) != 0)
267 goto swapon_bailout;
268 #if CONFIG_MACF
269 vnode_lock(vp);
270 error = mac_system_check_swapon(vfs_context_ucred(ctx), vp);
271 vnode_unlock(vp);
272 if (error)
273 goto swapon_bailout;
274 #endif
275
276 /* resize to desired size if it's too small */
277 if ((file_size < (off_t)size) && ((error = vnode_setsize(vp, (off_t)size, 0, ctx)) != 0))
278 goto swapon_bailout;
279
280 #if CONFIG_PROTECT
281 {
282 void *cnode = NULL;
283 /* initialize content protection keys manually */
284 if ((cnode = cp_get_protected_cnode(vp)) != 0) {
285 if ((error = cp_handle_vnop(cnode, CP_WRITE_ACCESS)) != 0)
286 goto swapon_bailout;
287 }
288 }
289 #endif
290
291
292 if (default_pager_init_flag == 0) {
293 start_def_pager(NULL);
294 default_pager_init_flag = 1;
295 }
296
297 /* add new backing store to list */
298 i = 0;
299 while(bs_port_table[i].vp != 0) {
300 if(i == MAX_BACKING_STORE)
301 break;
302 i++;
303 }
304 if(i == MAX_BACKING_STORE) {
305 error = ENOMEM;
306 goto swapon_bailout;
307 }
308
309 /* remember the vnode. This vnode has namei() reference */
310 bs_port_table[i].vp = vp;
311
312 /*
313 * Look to see if we are already paging to this file.
314 */
315 /* make certain the copy send of kernel call will work */
316 default_pager = MEMORY_OBJECT_DEFAULT_NULL;
317 kr = host_default_memory_manager(host_priv_self(), &default_pager, 0);
318 if(kr != KERN_SUCCESS) {
319 error = EAGAIN;
320 bs_port_table[i].vp = 0;
321 goto swapon_bailout;
322 }
323
324 #if CONFIG_EMBEDDED
325 dp_cluster_size = 1 * PAGE_SIZE;
326 #else
327 if ((dp_isssd = vnode_pager_isSSD(vp)) == TRUE) {
328 /*
329 * keep the cluster size small since the
330 * seek cost is effectively 0 which means
331 * we don't care much about fragmentation
332 */
333 dp_cluster_size = 2 * PAGE_SIZE;
334 } else {
335 /*
336 * use the default cluster size
337 */
338 dp_cluster_size = 0;
339 }
340 #endif
341 kr = default_pager_backing_store_create(default_pager,
342 -1, /* default priority */
343 dp_cluster_size,
344 &backing_store);
345 memory_object_default_deallocate(default_pager);
346
347 if(kr != KERN_SUCCESS) {
348 error = ENOMEM;
349 bs_port_table[i].vp = 0;
350 goto swapon_bailout;
351 }
352
353 /* Mark this vnode as being used for swapfile */
354 vnode_lock_spin(vp);
355 SET(vp->v_flag, VSWAP);
356 vnode_unlock(vp);
357
358 /*
359 * NOTE: we are able to supply PAGE_SIZE here instead of
360 * an actual record size or block number because:
361 * a: we do not support offsets from the beginning of the
362 * file (allowing for non page size/record modulo offsets.
363 * b: because allow paging will be done modulo page size
364 */
365
366 kr = default_pager_add_file(backing_store, (vnode_ptr_t) vp,
367 PAGE_SIZE, (int)(file_size/PAGE_SIZE));
368 if(kr != KERN_SUCCESS) {
369 bs_port_table[i].vp = 0;
370 if(kr == KERN_INVALID_ARGUMENT)
371 error = EINVAL;
372 else
373 error = ENOMEM;
374
375 /* This vnode is not to be used for swapfile */
376 vnode_lock_spin(vp);
377 CLR(vp->v_flag, VSWAP);
378 vnode_unlock(vp);
379
380 goto swapon_bailout;
381 }
382 bs_port_table[i].bs = (void *)backing_store;
383 error = 0;
384
385 ubc_setthreadcred(vp, p, current_thread());
386
387 /*
388 * take a long term reference on the vnode to keep
389 * vnreclaim() away from this vnode.
390 */
391 vnode_ref(vp);
392
393 swapon_bailout:
394 if (vp) {
395 vnode_put(vp);
396 }
397 (void) thread_funnel_set(kernel_flock, FALSE);
398 AUDIT_MACH_SYSCALL_EXIT(error);
399
400 if (error)
401 printf("macx_swapon FAILED - %d\n", error);
402 else
403 printf("macx_swapon SUCCESS\n");
404
405 return(error);
406 }
407
408 /*
409 * Routine: macx_swapoff
410 * Function:
411 * Syscall interface to remove a file from backing store
412 */
413 int
414 macx_swapoff(
415 struct macx_swapoff_args *args)
416 {
417 __unused int flags = args->flags;
418 kern_return_t kr;
419 mach_port_t backing_store;
420
421 struct vnode *vp = 0;
422 struct nameidata nd, *ndp;
423 struct proc *p = current_proc();
424 int i;
425 int error;
426 boolean_t funnel_state;
427 vfs_context_t ctx = vfs_context_current();
428 struct uthread *ut;
429 int orig_iopol_disk;
430
431 AUDIT_MACH_SYSCALL_ENTER(AUE_SWAPOFF);
432
433 funnel_state = thread_funnel_set(kernel_flock, TRUE);
434 backing_store = NULL;
435 ndp = &nd;
436
437 if ((error = suser(kauth_cred_get(), 0)))
438 goto swapoff_bailout;
439
440 /*
441 * Get the vnode for the paging area.
442 */
443 NDINIT(ndp, LOOKUP, OP_LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1,
444 ((IS_64BIT_PROCESS(p)) ? UIO_USERSPACE64 : UIO_USERSPACE32),
445 (user_addr_t) args->filename, ctx);
446
447 if ((error = namei(ndp)))
448 goto swapoff_bailout;
449 nameidone(ndp);
450 vp = ndp->ni_vp;
451
452 if (vp->v_type != VREG) {
453 error = EINVAL;
454 goto swapoff_bailout;
455 }
456 #if CONFIG_MACF
457 vnode_lock(vp);
458 error = mac_system_check_swapoff(vfs_context_ucred(ctx), vp);
459 vnode_unlock(vp);
460 if (error)
461 goto swapoff_bailout;
462 #endif
463
464 for(i = 0; i < MAX_BACKING_STORE; i++) {
465 if(bs_port_table[i].vp == vp) {
466 break;
467 }
468 }
469 if (i == MAX_BACKING_STORE) {
470 error = EINVAL;
471 goto swapoff_bailout;
472 }
473 backing_store = (mach_port_t)bs_port_table[i].bs;
474
475 ut = get_bsdthread_info(current_thread());
476
477 #if !CONFIG_EMBEDDED
478 orig_iopol_disk = proc_get_thread_selfdiskacc();
479 proc_apply_thread_selfdiskacc(IOPOL_THROTTLE);
480 #else /* !CONFIG_EMBEDDED */
481 orig_iopol_disk = ut->uu_iopol_disk;
482 ut->uu_iopol_disk = IOPOL_THROTTLE;
483 #endif /* !CONFIG_EMBEDDED */
484
485 kr = default_pager_backing_store_delete(backing_store);
486
487 #if !CONFIG_EMBEDDED
488 proc_apply_thread_selfdiskacc(orig_iopol_disk);
489 #else /* !CONFIG_EMBEDDED */
490 ut->uu_iopol_disk = orig_iopol_disk;
491 #endif /* !CONFIG_EMBEDDED */
492
493 switch (kr) {
494 case KERN_SUCCESS:
495 error = 0;
496 bs_port_table[i].vp = 0;
497 /* This vnode is no longer used for swapfile */
498 vnode_lock_spin(vp);
499 CLR(vp->v_flag, VSWAP);
500 vnode_unlock(vp);
501
502 /* get rid of macx_swapon() "long term" reference */
503 vnode_rele(vp);
504
505 break;
506 case KERN_FAILURE:
507 error = EAGAIN;
508 break;
509 default:
510 error = EAGAIN;
511 break;
512 }
513
514 swapoff_bailout:
515 /* get rid of macx_swapoff() namei() reference */
516 if (vp)
517 vnode_put(vp);
518
519 (void) thread_funnel_set(kernel_flock, FALSE);
520 AUDIT_MACH_SYSCALL_EXIT(error);
521
522 if (error)
523 printf("macx_swapoff FAILED - %d\n", error);
524 else
525 printf("macx_swapoff SUCCESS\n");
526
527 return(error);
528 }
529
530 /*
531 * Routine: macx_swapinfo
532 * Function:
533 * Syscall interface to get general swap statistics
534 */
535 int
536 macx_swapinfo(
537 memory_object_size_t *total_p,
538 memory_object_size_t *avail_p,
539 vm_size_t *pagesize_p,
540 boolean_t *encrypted_p)
541 {
542 int error;
543 memory_object_default_t default_pager;
544 default_pager_info_64_t dpi64;
545 kern_return_t kr;
546
547 error = 0;
548
549 /*
550 * Get a handle on the default pager.
551 */
552 default_pager = MEMORY_OBJECT_DEFAULT_NULL;
553 kr = host_default_memory_manager(host_priv_self(), &default_pager, 0);
554 if (kr != KERN_SUCCESS) {
555 error = EAGAIN; /* XXX why EAGAIN ? */
556 goto done;
557 }
558 if (default_pager == MEMORY_OBJECT_DEFAULT_NULL) {
559 /*
560 * The default pager has not initialized yet,
561 * so it can't be using any swap space at all.
562 */
563 *total_p = 0;
564 *avail_p = 0;
565 *pagesize_p = 0;
566 *encrypted_p = FALSE;
567 goto done;
568 }
569
570 /*
571 * Get swap usage data from default pager.
572 */
573 kr = default_pager_info_64(default_pager, &dpi64);
574 if (kr != KERN_SUCCESS) {
575 error = ENOTSUP;
576 goto done;
577 }
578
579 /*
580 * Provide default pager info to caller.
581 */
582 *total_p = dpi64.dpi_total_space;
583 *avail_p = dpi64.dpi_free_space;
584 *pagesize_p = dpi64.dpi_page_size;
585 if (dpi64.dpi_flags & DPI_ENCRYPTED) {
586 *encrypted_p = TRUE;
587 } else {
588 *encrypted_p = FALSE;
589 }
590
591 done:
592 if (default_pager != MEMORY_OBJECT_DEFAULT_NULL) {
593 /* release our handle on default pager */
594 memory_object_default_deallocate(default_pager);
595 }
596 return error;
597 }