2 * Copyright (c) 2018 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/filedesc.h>
32 #include <sys/kernel.h>
33 #include <sys/file_internal.h>
34 #include <kern/exc_guard.h>
35 #include <sys/guarded.h>
36 #include <kern/kalloc.h>
37 #include <sys/sysproto.h>
38 #include <sys/vnode.h>
39 #include <sys/vnode_internal.h>
40 #include <sys/uio_internal.h>
41 #include <sys/ubc_internal.h>
42 #include <vfs/vfs_support.h>
43 #include <security/audit/audit.h>
44 #include <sys/syscall.h>
45 #include <sys/kauth.h>
46 #include <sys/kdebug.h>
48 #include <vm/vm_protos.h>
49 #include <libkern/section_keywords.h>
50 #if CONFIG_MACF && CONFIG_VNGUARD
51 #include <security/mac.h>
52 #include <security/mac_framework.h>
53 #include <security/mac_policy.h>
54 #include <pexpert/pexpert.h>
55 #include <sys/sysctl.h>
56 #include <sys/reason.h>
60 #define f_flag f_fglob->fg_flag
61 #define f_type f_fglob->fg_ops->fo_type
62 extern int dofilewrite(vfs_context_t ctx
, struct fileproc
*fp
,
63 user_addr_t bufp
, user_size_t nbyte
, off_t offset
,
64 int flags
, user_ssize_t
*retval
);
65 extern int wr_uio(struct proc
*p
, struct fileproc
*fp
, uio_t uio
, user_ssize_t
*retval
);
68 * Experimental guarded file descriptor support.
71 kern_return_t
task_exception_notify(exception_type_t exception
,
72 mach_exception_data_type_t code
, mach_exception_data_type_t subcode
);
73 kern_return_t
task_violated_guard(mach_exception_code_t
, mach_exception_subcode_t
, void *);
76 * Most fd's have an underlying fileproc struct; but some may be
77 * guarded_fileproc structs which implement guarded fds. The latter
78 * struct (below) embeds the former.
80 * The two types should be distinguished by the "type" portion of f_flags.
81 * There's also a magic number to help catch misuse and bugs.
83 * This is a bit unpleasant, but results from the desire to allow
84 * alternate file behaviours for a few file descriptors without
85 * growing the fileproc data structure.
88 struct guarded_fileproc
{
89 struct fileproc gf_fileproc
;
95 const size_t sizeof_guarded_fileproc
= sizeof(struct guarded_fileproc
);
97 #define FP_TO_GFP(fp) ((struct guarded_fileproc *)(fp))
98 #define GFP_TO_FP(gfp) (&(gfp)->gf_fileproc)
100 #define GUARDED_FILEPROC_MAGIC 0x29083
107 #ifdef OS_REFCNT_DEBUG
108 extern struct os_refgrp f_iocount_refgrp
;
111 static struct fileproc
*
112 guarded_fileproc_alloc_init(void *crarg
)
114 struct gfp_crarg
*aarg
= crarg
;
115 struct guarded_fileproc
*gfp
;
117 if ((gfp
= kalloc(sizeof(*gfp
))) == NULL
) {
121 bzero(gfp
, sizeof(*gfp
));
123 struct fileproc
*fp
= &gfp
->gf_fileproc
;
124 os_ref_init(&fp
->f_iocount
, &f_iocount_refgrp
);
125 fp
->f_flags
= FTYPE_GUARDED
;
127 gfp
->gf_magic
= GUARDED_FILEPROC_MAGIC
;
128 gfp
->gf_guard
= aarg
->gca_guard
;
129 gfp
->gf_attrs
= aarg
->gca_attrs
;
131 return GFP_TO_FP(gfp
);
135 guarded_fileproc_free(struct fileproc
*fp
)
137 struct guarded_fileproc
*gfp
= FP_TO_GFP(fp
);
139 if (FILEPROC_TYPE(fp
) != FTYPE_GUARDED
||
140 GUARDED_FILEPROC_MAGIC
!= gfp
->gf_magic
) {
141 panic("%s: corrupt fp %p flags %x", __func__
, fp
, fp
->f_flags
);
144 kfree(gfp
, sizeof(*gfp
));
148 fp_lookup_guarded(proc_t p
, int fd
, guardid_t guard
,
149 struct guarded_fileproc
**gfpp
, int locked
)
154 if ((error
= fp_lookup(p
, fd
, &fp
, locked
)) != 0) {
157 if (FILEPROC_TYPE(fp
) != FTYPE_GUARDED
) {
158 (void) fp_drop(p
, fd
, fp
, locked
);
161 struct guarded_fileproc
*gfp
= FP_TO_GFP(fp
);
163 if (GUARDED_FILEPROC_MAGIC
!= gfp
->gf_magic
) {
164 panic("%s: corrupt fp %p", __func__
, fp
);
167 if (guard
!= gfp
->gf_guard
) {
168 (void) fp_drop(p
, fd
, fp
, locked
);
169 return EPERM
; /* *not* a mismatch exception */
178 * Expected use pattern:
180 * if (FP_ISGUARDED(fp, GUARD_CLOSE)) {
181 * error = fp_guard_exception(p, fd, fp, kGUARD_EXC_CLOSE);
188 fp_isguarded(struct fileproc
*fp
, u_int attrs
)
190 if (FILEPROC_TYPE(fp
) == FTYPE_GUARDED
) {
191 struct guarded_fileproc
*gfp
= FP_TO_GFP(fp
);
193 if (GUARDED_FILEPROC_MAGIC
!= gfp
->gf_magic
) {
194 panic("%s: corrupt gfp %p flags %x",
195 __func__
, gfp
, fp
->f_flags
);
197 return (attrs
& gfp
->gf_attrs
) == attrs
;
202 extern char *proc_name_address(void *p
);
205 fp_guard_exception(proc_t p
, int fd
, struct fileproc
*fp
, u_int flavor
)
207 if (FILEPROC_TYPE(fp
) != FTYPE_GUARDED
) {
208 panic("%s corrupt fp %p flags %x", __func__
, fp
, fp
->f_flags
);
211 struct guarded_fileproc
*gfp
= FP_TO_GFP(fp
);
212 /* all gfd fields protected via proc_fdlock() */
213 proc_fdlock_assert(p
, LCK_MTX_ASSERT_OWNED
);
215 mach_exception_code_t code
= 0;
216 EXC_GUARD_ENCODE_TYPE(code
, GUARD_TYPE_FD
);
217 EXC_GUARD_ENCODE_FLAVOR(code
, flavor
);
218 EXC_GUARD_ENCODE_TARGET(code
, fd
);
219 mach_exception_subcode_t subcode
= gfp
->gf_guard
;
221 thread_t t
= current_thread();
222 thread_guard_violation(t
, code
, subcode
, TRUE
);
227 * (Invoked before returning to userland from the syscall handler.)
232 mach_exception_code_t code
,
233 mach_exception_subcode_t subcode
)
235 task_exception_notify(EXC_GUARD
, code
, subcode
);
236 proc_t p
= current_proc();
241 * Experimental guarded file descriptor SPIs
245 * int guarded_open_np(const char *pathname, int flags,
246 * const guardid_t *guard, u_int guardflags, ...);
248 * In this initial implementation, GUARD_DUP must be specified.
249 * GUARD_CLOSE, GUARD_SOCKET_IPC and GUARD_FILEPORT are optional.
251 * If GUARD_DUP wasn't specified, then we'd have to do the (extra) work
252 * to allow dup-ing a descriptor to inherit the guard onto the new
253 * descriptor. (Perhaps GUARD_DUP behaviours should just always be true
254 * for a guarded fd? Or, more sanely, all the dup operations should
255 * just always propagate the guard?)
257 * Guarded descriptors are always close-on-exec, and GUARD_CLOSE
258 * requires close-on-fork; O_CLOEXEC must be set in flags.
259 * This setting is immutable; attempts to clear the flag will
260 * cause a guard exception.
262 * XXX It's somewhat broken that change_fdguard_np() can completely
263 * remove the guard and thus revoke down the immutability
264 * promises above. Ick.
267 guarded_open_np(proc_t p
, struct guarded_open_np_args
*uap
, int32_t *retval
)
269 if ((uap
->flags
& O_CLOEXEC
) == 0) {
273 #define GUARD_REQUIRED (GUARD_DUP)
274 #define GUARD_ALL (GUARD_REQUIRED | \
275 (GUARD_CLOSE | GUARD_SOCKET_IPC | GUARD_FILEPORT | GUARD_WRITE))
277 if (((uap
->guardflags
& GUARD_REQUIRED
) != GUARD_REQUIRED
) ||
278 ((uap
->guardflags
& ~GUARD_ALL
) != 0)) {
283 struct gfp_crarg crarg
= {
284 .gca_attrs
= uap
->guardflags
287 if ((error
= copyin(uap
->guard
,
288 &(crarg
.gca_guard
), sizeof(crarg
.gca_guard
))) != 0) {
293 * Disallow certain guard values -- is zero enough?
295 if (crarg
.gca_guard
== 0) {
299 struct filedesc
*fdp
= p
->p_fd
;
300 struct vnode_attr va
;
302 vfs_context_t ctx
= vfs_context_current();
306 cmode
= ((uap
->mode
& ~fdp
->fd_cmask
) & ALLPERMS
) & ~S_ISTXT
;
307 VATTR_SET(&va
, va_mode
, cmode
& ACCESSPERMS
);
309 NDINIT(&nd
, LOOKUP
, OP_OPEN
, FOLLOW
| AUDITVNPATH1
, UIO_USERSPACE
,
312 return open1(ctx
, &nd
, uap
->flags
| O_CLOFORK
, &va
,
313 guarded_fileproc_alloc_init
, &crarg
, retval
);
317 * int guarded_open_dprotected_np(const char *pathname, int flags,
318 * const guardid_t *guard, u_int guardflags, int dpclass, int dpflags, ...);
320 * This SPI is extension of guarded_open_np() to include dataprotection class on creation
321 * in "dpclass" and dataprotection flags 'dpflags'. Otherwise behaviors are same as in
325 guarded_open_dprotected_np(proc_t p
, struct guarded_open_dprotected_np_args
*uap
, int32_t *retval
)
327 if ((uap
->flags
& O_CLOEXEC
) == 0) {
331 if (((uap
->guardflags
& GUARD_REQUIRED
) != GUARD_REQUIRED
) ||
332 ((uap
->guardflags
& ~GUARD_ALL
) != 0)) {
337 struct gfp_crarg crarg
= {
338 .gca_attrs
= uap
->guardflags
341 if ((error
= copyin(uap
->guard
,
342 &(crarg
.gca_guard
), sizeof(crarg
.gca_guard
))) != 0) {
347 * Disallow certain guard values -- is zero enough?
349 if (crarg
.gca_guard
== 0) {
353 struct filedesc
*fdp
= p
->p_fd
;
354 struct vnode_attr va
;
356 vfs_context_t ctx
= vfs_context_current();
360 cmode
= ((uap
->mode
& ~fdp
->fd_cmask
) & ALLPERMS
) & ~S_ISTXT
;
361 VATTR_SET(&va
, va_mode
, cmode
& ACCESSPERMS
);
363 NDINIT(&nd
, LOOKUP
, OP_OPEN
, FOLLOW
| AUDITVNPATH1
, UIO_USERSPACE
,
367 * Initialize the extra fields in vnode_attr to pass down dataprotection
369 * 1. target cprotect class.
370 * 2. set a flag to mark it as requiring open-raw-encrypted semantics.
372 if (uap
->flags
& O_CREAT
) {
373 VATTR_SET(&va
, va_dataprotect_class
, uap
->dpclass
);
376 if (uap
->dpflags
& (O_DP_GETRAWENCRYPTED
| O_DP_GETRAWUNENCRYPTED
)) {
377 if (uap
->flags
& (O_RDWR
| O_WRONLY
)) {
378 /* Not allowed to write raw encrypted bytes */
381 if (uap
->dpflags
& O_DP_GETRAWENCRYPTED
) {
382 VATTR_SET(&va
, va_dataprotect_flags
, VA_DP_RAWENCRYPTED
);
384 if (uap
->dpflags
& O_DP_GETRAWUNENCRYPTED
) {
385 VATTR_SET(&va
, va_dataprotect_flags
, VA_DP_RAWUNENCRYPTED
);
389 return open1(ctx
, &nd
, uap
->flags
| O_CLOFORK
, &va
,
390 guarded_fileproc_alloc_init
, &crarg
, retval
);
394 * int guarded_kqueue_np(const guardid_t *guard, u_int guardflags);
396 * Create a guarded kqueue descriptor with guardid and guardflags.
398 * Same restrictions on guardflags as for guarded_open_np().
399 * All kqueues are -always- close-on-exec and close-on-fork by themselves
400 * and are not sendable.
403 guarded_kqueue_np(proc_t p
, struct guarded_kqueue_np_args
*uap
, int32_t *retval
)
405 if (((uap
->guardflags
& GUARD_REQUIRED
) != GUARD_REQUIRED
) ||
406 ((uap
->guardflags
& ~GUARD_ALL
) != 0)) {
411 struct gfp_crarg crarg
= {
412 .gca_attrs
= uap
->guardflags
415 if ((error
= copyin(uap
->guard
,
416 &(crarg
.gca_guard
), sizeof(crarg
.gca_guard
))) != 0) {
420 if (crarg
.gca_guard
== 0) {
424 return kqueue_internal(p
, guarded_fileproc_alloc_init
, &crarg
, retval
);
428 * int guarded_close_np(int fd, const guardid_t *guard);
431 guarded_close_np(proc_t p
, struct guarded_close_np_args
*uap
,
432 __unused
int32_t *retval
)
434 struct guarded_fileproc
*gfp
;
439 AUDIT_SYSCLOSE(p
, fd
);
441 if ((error
= copyin(uap
->guard
, &uguard
, sizeof(uguard
))) != 0) {
446 if ((error
= fp_lookup_guarded(p
, fd
, uguard
, &gfp
, 1)) != 0) {
450 error
= close_internal_locked(p
, fd
, GFP_TO_FP(gfp
), 0);
457 * change_fdguard_np(int fd, const guardid_t *guard, u_int guardflags,
458 * const guardid_t *nguard, u_int nguardflags, int *fdflagsp);
460 * Given a file descriptor, atomically exchange <guard, guardflags> for
461 * a new guard <nguard, nguardflags>, returning the previous fd
462 * flags (see fcntl:F_SETFD) in *fdflagsp.
464 * This syscall can be used to either (a) add a new guard to an existing
465 * unguarded file descriptor (b) remove the old guard from an existing
466 * guarded file descriptor or (c) change the guard (guardid and/or
467 * guardflags) on a guarded file descriptor.
469 * If 'guard' is NULL, fd must be unguarded at entry. If the call completes
470 * successfully the fd will be guarded with <nguard, nguardflags>.
472 * Guarding a file descriptor has some side-effects on the "fdflags"
473 * associated with the descriptor - in particular FD_CLOEXEC is
474 * forced ON unconditionally, and FD_CLOFORK is forced ON by GUARD_CLOSE.
475 * Callers who wish to subsequently restore the state of the fd should save
476 * the value of *fdflagsp after a successful invocation.
478 * If 'nguard' is NULL, fd must be guarded at entry, <guard, guardflags>
479 * must match with what's already guarding the descriptor, and the
480 * result will be to completely remove the guard. Note also that the
481 * fdflags are copied to the descriptor from the incoming *fdflagsp argument.
483 * If the descriptor is guarded, and neither 'guard' nor 'nguard' is NULL
484 * and <guard, guardflags> matches what's already guarding the descriptor,
485 * then <nguard, nguardflags> becomes the new guard. In this case, even if
486 * the GUARD_CLOSE flag is being cleared, it is still possible to continue
487 * to keep FD_CLOFORK on the descriptor by passing FD_CLOFORK via fdflagsp.
489 * (File descriptors whose underlying fileglobs are marked FG_CONFINED are
490 * still close-on-fork, regardless of the setting of FD_CLOFORK.)
492 * Example 1: Guard an unguarded descriptor during a set of operations,
493 * then restore the original state of the descriptor.
496 * change_fdguard_np(fd, NULL, 0, &myguard, GUARD_CLOSE, &sav_flags);
497 * // do things with now guarded 'fd'
498 * change_fdguard_np(fd, &myguard, GUARD_CLOSE, NULL, 0, &sav_flags);
499 * // fd now unguarded.
501 * Example 2: Change the guard of a guarded descriptor during a set of
502 * operations, then restore the original state of the descriptor.
504 * int sav_flags = (gdflags & GUARD_CLOSE) ? FD_CLOFORK : 0;
505 * change_fdguard_np(fd, &gd, gdflags, &myguard, GUARD_CLOSE, &sav_flags);
506 * // do things with 'fd' with a different guard
507 * change_fdguard_np(fd, &myg, GUARD_CLOSE, &gd, gdflags, &sav_flags);
508 * // back to original guarded state
510 * XXX This SPI is too much of a chainsaw and should be revised.
514 change_fdguard_np(proc_t p
, struct change_fdguard_np_args
*uap
,
515 __unused
int32_t *retval
)
520 guardid_t oldg
= 0, newg
= 0;
523 if (0 != uap
->guard
&&
524 0 != (error
= copyin(uap
->guard
, &oldg
, sizeof(oldg
)))) {
525 return error
; /* can't copyin current guard */
527 if (0 != uap
->nguard
&&
528 0 != (error
= copyin(uap
->nguard
, &newg
, sizeof(newg
)))) {
529 return error
; /* can't copyin new guard */
531 if (0 != uap
->fdflagsp
&&
532 0 != (error
= copyin(uap
->fdflagsp
, &nfdflags
, sizeof(nfdflags
)))) {
533 return error
; /* can't copyin new fdflags */
537 if ((error
= fp_lookup(p
, fd
, &fp
, 1)) != 0) {
542 if (0 != uap
->fdflagsp
) {
543 int ofdflags
= FDFLAGS_GET(p
, fd
);
544 int ofl
= ((ofdflags
& UF_EXCLOSE
) ? FD_CLOEXEC
: 0) |
545 ((ofdflags
& UF_FORKCLOSE
) ? FD_CLOFORK
: 0);
547 if (0 != (error
= copyout(&ofl
, uap
->fdflagsp
, sizeof(ofl
)))) {
549 goto dropout
; /* can't copyout old fdflags */
554 if (FILEPROC_TYPE(fp
) == FTYPE_GUARDED
) {
555 if (0 == uap
->guard
|| 0 == uap
->guardflags
) {
556 error
= EINVAL
; /* missing guard! */
557 } else if (0 == oldg
) {
558 error
= EPERM
; /* guardids cannot be zero */
561 if (0 != uap
->guard
|| 0 != uap
->guardflags
) {
562 error
= EINVAL
; /* guard provided, but none needed! */
570 if (0 != uap
->nguard
) {
572 * There's a new guard in town.
575 error
= EINVAL
; /* guards cannot contain zero */
576 } else if (((uap
->nguardflags
& GUARD_REQUIRED
) != GUARD_REQUIRED
) ||
577 ((uap
->nguardflags
& ~GUARD_ALL
) != 0)) {
578 error
= EINVAL
; /* must have valid attributes too */
584 if (FILEPROC_TYPE(fp
) == FTYPE_GUARDED
) {
586 * Replace old guard with new guard
588 struct guarded_fileproc
*gfp
= FP_TO_GFP(fp
);
590 if (GUARDED_FILEPROC_MAGIC
!= gfp
->gf_magic
) {
591 panic("%s: corrupt gfp %p flags %x",
592 __func__
, gfp
, fp
->f_flags
);
595 if (oldg
== gfp
->gf_guard
&&
596 uap
->guardflags
== gfp
->gf_attrs
) {
598 * Must match existing guard + attributes
599 * before we'll swap them to new ones, managing
600 * fdflags "side-effects" as we go. Note that
601 * userland can request FD_CLOFORK semantics.
603 if (gfp
->gf_attrs
& GUARD_CLOSE
) {
604 FDFLAGS_CLR(p
, fd
, UF_FORKCLOSE
);
606 gfp
->gf_guard
= newg
;
607 gfp
->gf_attrs
= uap
->nguardflags
;
608 if (gfp
->gf_attrs
& GUARD_CLOSE
) {
609 FDFLAGS_SET(p
, fd
, UF_FORKCLOSE
);
612 (nfdflags
& FD_CLOFORK
) ? UF_FORKCLOSE
: 0);
613 /* FG_CONFINED enforced regardless */
620 * Add a guard to a previously unguarded descriptor
622 switch (FILEGLOB_DTYPE(fp
->f_fglob
)) {
627 case DTYPE_NETPOLICY
:
636 struct gfp_crarg crarg
= {
638 .gca_attrs
= uap
->nguardflags
640 struct fileproc
*nfp
=
641 guarded_fileproc_alloc_init(&crarg
);
642 struct guarded_fileproc
*gfp
;
646 switch (error
= fp_tryswap(p
, fd
, nfp
)) {
647 case 0: /* success; guarded-ness comes with side-effects */
649 gfp
= FP_TO_GFP(nfp
);
650 if (gfp
->gf_attrs
& GUARD_CLOSE
) {
651 FDFLAGS_SET(p
, fd
, UF_FORKCLOSE
);
653 FDFLAGS_SET(p
, fd
, UF_EXCLOSE
);
654 (void) fp_drop(p
, fd
, nfp
, 1);
656 case EKEEPLOOKING
: /* f_iocount indicates a collision */
657 (void) fp_drop(p
, fd
, fp
, 1);
661 (void) fp_drop(p
, fd
, fp
, 1);
672 if (FILEPROC_TYPE(fp
) == FTYPE_GUARDED
) {
674 * Remove the guard altogether.
676 struct guarded_fileproc
*gfp
= FP_TO_GFP(fp
);
678 if (0 != uap
->nguardflags
) {
683 if (GUARDED_FILEPROC_MAGIC
!= gfp
->gf_magic
) {
684 panic("%s: corrupt gfp %p flags %x",
685 __func__
, gfp
, fp
->f_flags
);
688 if (oldg
!= gfp
->gf_guard
||
689 uap
->guardflags
!= gfp
->gf_attrs
) {
695 struct fileproc
*nfp
= fileproc_alloc_init(NULL
);
698 switch (error
= fp_tryswap(p
, fd
, nfp
)) {
699 case 0: /* success; undo side-effects of guarded-ness */
701 FDFLAGS_CLR(p
, fd
, UF_FORKCLOSE
| UF_EXCLOSE
);
703 (nfdflags
& FD_CLOFORK
) ? UF_FORKCLOSE
: 0);
704 /* FG_CONFINED enforced regardless */
706 (nfdflags
& FD_CLOEXEC
) ? UF_EXCLOSE
: 0);
707 (void) fp_drop(p
, fd
, nfp
, 1);
709 case EKEEPLOOKING
: /* f_iocount indicates collision */
710 (void) fp_drop(p
, fd
, fp
, 1);
714 (void) fp_drop(p
, fd
, fp
, 1);
722 * Not already guarded, and no new guard?
729 (void) fp_drop(p
, fd
, fp
, 1);
735 * user_ssize_t guarded_write_np(int fd, const guardid_t *guard,
736 * user_addr_t cbuf, user_ssize_t nbyte);
738 * Initial implementation of guarded writes.
741 guarded_write_np(struct proc
*p
, struct guarded_write_np_args
*uap
, user_ssize_t
*retval
)
747 struct guarded_fileproc
*gfp
;
748 bool wrote_some
= false;
752 if ((error
= copyin(uap
->guard
, &uguard
, sizeof(uguard
))) != 0) {
756 error
= fp_lookup_guarded(p
, fd
, uguard
, &gfp
, 0);
762 if ((fp
->f_flag
& FWRITE
) == 0) {
765 struct vfs_context context
= *(vfs_context_current());
766 context
.vc_ucred
= fp
->f_fglob
->fg_cred
;
768 error
= dofilewrite(&context
, fp
, uap
->cbuf
, uap
->nbyte
,
769 (off_t
)-1, 0, retval
);
770 wrote_some
= *retval
> 0;
773 fp_drop_written(p
, fd
, fp
);
775 fp_drop(p
, fd
, fp
, 0);
781 * user_ssize_t guarded_pwrite_np(int fd, const guardid_t *guard,
782 * user_addr_t buf, user_size_t nbyte, off_t offset);
784 * Initial implementation of guarded pwrites.
787 guarded_pwrite_np(struct proc
*p
, struct guarded_pwrite_np_args
*uap
, user_ssize_t
*retval
)
792 vnode_t vp
= (vnode_t
)0;
794 struct guarded_fileproc
*gfp
;
795 bool wrote_some
= false;
799 if ((error
= copyin(uap
->guard
, &uguard
, sizeof(uguard
))) != 0) {
803 error
= fp_lookup_guarded(p
, fd
, uguard
, &gfp
, 0);
809 if ((fp
->f_flag
& FWRITE
) == 0) {
812 struct vfs_context context
= *vfs_context_current();
813 context
.vc_ucred
= fp
->f_fglob
->fg_cred
;
815 if (fp
->f_type
!= DTYPE_VNODE
) {
819 vp
= (vnode_t
)fp
->f_fglob
->fg_data
;
820 if (vnode_isfifo(vp
)) {
824 if ((vp
->v_flag
& VISTTY
)) {
828 if (uap
->offset
== (off_t
)-1) {
833 error
= dofilewrite(&context
, fp
, uap
->buf
, uap
->nbyte
,
834 uap
->offset
, FOF_OFFSET
, retval
);
835 wrote_some
= *retval
> 0;
839 fp_drop_written(p
, fd
, fp
);
841 fp_drop(p
, fd
, fp
, 0);
844 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO
, SYS_guarded_pwrite_np
) | DBG_FUNC_NONE
),
845 uap
->fd
, uap
->nbyte
, (unsigned int)((uap
->offset
>> 32)), (unsigned int)(uap
->offset
), 0);
851 * user_ssize_t guarded_writev_np(int fd, const guardid_t *guard,
852 * struct iovec *iovp, u_int iovcnt);
854 * Initial implementation of guarded writev.
858 guarded_writev_np(struct proc
*p
, struct guarded_writev_np_args
*uap
, user_ssize_t
*retval
)
863 struct user_iovec
*iovp
;
865 struct guarded_fileproc
*gfp
;
866 bool wrote_some
= false;
868 AUDIT_ARG(fd
, uap
->fd
);
870 /* Verify range bedfore calling uio_create() */
871 if (uap
->iovcnt
<= 0 || uap
->iovcnt
> UIO_MAXIOV
) {
875 /* allocate a uio large enough to hold the number of iovecs passed */
876 auio
= uio_create(uap
->iovcnt
, 0,
877 (IS_64BIT_PROCESS(p
) ? UIO_USERSPACE64
: UIO_USERSPACE32
),
880 /* get location of iovecs within the uio. then copyin the iovecs from
883 iovp
= uio_iovsaddr(auio
);
886 goto ExitThisRoutine
;
888 error
= copyin_user_iovec_array(uap
->iovp
,
889 IS_64BIT_PROCESS(p
) ? UIO_USERSPACE64
: UIO_USERSPACE32
,
892 goto ExitThisRoutine
;
895 /* finalize uio_t for use and do the IO
897 error
= uio_calculateresid(auio
);
899 goto ExitThisRoutine
;
902 if ((error
= copyin(uap
->guard
, &uguard
, sizeof(uguard
))) != 0) {
903 goto ExitThisRoutine
;
906 error
= fp_lookup_guarded(p
, uap
->fd
, uguard
, &gfp
, 0);
908 goto ExitThisRoutine
;
912 if ((fp
->f_flag
& FWRITE
) == 0) {
915 error
= wr_uio(p
, fp
, auio
, retval
);
916 wrote_some
= *retval
> 0;
920 fp_drop_written(p
, uap
->fd
, fp
);
922 fp_drop(p
, uap
->fd
, fp
, 0);
932 * int falloc_guarded(struct proc *p, struct fileproc **fp, int *fd,
933 * vfs_context_t ctx, const guardid_t *guard, u_int attrs);
935 * This SPI is the guarded variant of falloc(). It borrows the same
936 * restrictions as those used by the rest of the guarded_* routines.
939 falloc_guarded(struct proc
*p
, struct fileproc
**fp
, int *fd
,
940 vfs_context_t ctx
, const guardid_t
*guard
, u_int attrs
)
942 struct gfp_crarg crarg
;
944 if (((attrs
& GUARD_REQUIRED
) != GUARD_REQUIRED
) ||
945 ((attrs
& ~GUARD_ALL
) != 0) || (*guard
== 0)) {
949 bzero(&crarg
, sizeof(crarg
));
950 crarg
.gca_guard
= *guard
;
951 crarg
.gca_attrs
= attrs
;
953 return falloc_withalloc(p
, fp
, fd
, ctx
, guarded_fileproc_alloc_init
,
957 #if CONFIG_MACF && CONFIG_VNGUARD
962 * Uses MAC hooks to guard operations on vnodes in the system. Given an fd,
963 * add data to the label on the fileglob and the vnode it points at.
964 * The data contains a pointer to the fileglob, the set of attributes to
965 * guard, a guard value for uniquification, and the pid of the process
966 * who set the guard up in the first place.
968 * The fd must have been opened read/write, and the underlying
969 * fileglob is FG_CONFINED so that there's no ambiguity about the
972 * When there's a callback for a vnode operation of interest (rename, unlink,
973 * etc.) check to see if the guard permits that operation, and if not
974 * take an action e.g. log a message or generate a crash report.
976 * The label is removed from the vnode and the fileglob when the fileglob
979 * The initial action to be taken can be specified by a boot arg (vnguard=0x42)
980 * and change via the "kern.vnguard.flags" sysctl.
985 struct vng_info
{ /* lives on the vnode label */
988 TAILQ_HEAD(, vng_owner
) vgi_owners
;
991 struct vng_owner
{ /* lives on the fileglob label */
993 struct fileglob
*vgo_fg
;
994 struct vng_info
*vgo_vgi
;
995 TAILQ_ENTRY(vng_owner
) vgo_link
;
998 static struct vng_info
*
999 new_vgi(unsigned attrs
, guardid_t guard
)
1001 struct vng_info
*vgi
= kalloc(sizeof(*vgi
));
1002 vgi
->vgi_guard
= guard
;
1003 vgi
->vgi_attrs
= attrs
;
1004 TAILQ_INIT(&vgi
->vgi_owners
);
1008 static struct vng_owner
*
1009 new_vgo(proc_t p
, struct fileglob
*fg
)
1011 struct vng_owner
*vgo
= kalloc(sizeof(*vgo
));
1012 memset(vgo
, 0, sizeof(*vgo
));
1019 vgi_add_vgo(struct vng_info
*vgi
, struct vng_owner
*vgo
)
1022 TAILQ_INSERT_HEAD(&vgi
->vgi_owners
, vgo
, vgo_link
);
1026 vgi_remove_vgo(struct vng_info
*vgi
, struct vng_owner
*vgo
)
1028 TAILQ_REMOVE(&vgi
->vgi_owners
, vgo
, vgo_link
);
1029 vgo
->vgo_vgi
= NULL
;
1030 return TAILQ_EMPTY(&vgi
->vgi_owners
);
1034 free_vgi(struct vng_info
*vgi
)
1036 assert(TAILQ_EMPTY(&vgi
->vgi_owners
));
1037 #if DEVELOP || DEBUG
1038 memset(vgi
, 0xbeadfade, sizeof(*vgi
));
1040 kfree(vgi
, sizeof(*vgi
));
1044 free_vgo(struct vng_owner
*vgo
)
1046 #if DEVELOP || DEBUG
1047 memset(vgo
, 0x2bedf1d0, sizeof(*vgo
));
1049 kfree(vgo
, sizeof(*vgo
));
1052 static int label_slot
;
1053 static lck_rw_t llock
;
1054 static lck_grp_t
*llock_grp
;
1056 static __inline
void *
1057 vng_lbl_get(struct label
*label
)
1059 lck_rw_assert(&llock
, LCK_RW_ASSERT_HELD
);
1061 if (NULL
== label
) {
1064 data
= (void *)mac_label_get(label
, label_slot
);
1069 static __inline
struct vng_info
*
1070 vng_lbl_get_withattr(struct label
*label
, unsigned attrmask
)
1072 struct vng_info
*vgi
= vng_lbl_get(label
);
1073 assert(NULL
== vgi
|| (vgi
->vgi_attrs
& ~VNG_ALL
) == 0);
1074 if (NULL
!= vgi
&& 0 == (vgi
->vgi_attrs
& attrmask
)) {
1080 static __inline
void
1081 vng_lbl_set(struct label
*label
, void *data
)
1083 assert(NULL
!= label
);
1084 lck_rw_assert(&llock
, LCK_RW_ASSERT_EXCLUSIVE
);
1085 mac_label_set(label
, label_slot
, (intptr_t)data
);
1089 vnguard_sysc_getguardattr(proc_t p
, struct vnguard_getattr
*vga
)
1091 const int fd
= vga
->vga_fd
;
1093 if (0 == vga
->vga_guard
) {
1098 struct fileproc
*fp
;
1099 if (0 != (error
= fp_lookup(p
, fd
, &fp
, 0))) {
1103 struct fileglob
*fg
= fp
->f_fglob
;
1104 if (FILEGLOB_DTYPE(fg
) != DTYPE_VNODE
) {
1108 struct vnode
*vp
= fg
->fg_data
;
1109 if (!vnode_isreg(vp
) || NULL
== vp
->v_mount
) {
1113 error
= vnode_getwithref(vp
);
1120 lck_rw_lock_shared(&llock
);
1122 if (NULL
!= vp
->v_label
) {
1123 const struct vng_info
*vgi
= vng_lbl_get(vp
->v_label
);
1125 if (vgi
->vgi_guard
!= vga
->vga_guard
) {
1128 vga
->vga_attrs
= vgi
->vgi_attrs
;
1133 lck_rw_unlock_shared(&llock
);
1137 fp_drop(p
, fd
, fp
, 0);
1142 vnguard_sysc_setguard(proc_t p
, const struct vnguard_set
*vns
)
1144 const int fd
= vns
->vns_fd
;
1146 if ((vns
->vns_attrs
& ~VNG_ALL
) != 0 ||
1147 0 == vns
->vns_attrs
|| 0 == vns
->vns_guard
) {
1152 struct fileproc
*fp
;
1153 if (0 != (error
= fp_lookup(p
, fd
, &fp
, 0))) {
1158 * To avoid trivial DoS, insist that the caller
1159 * has read/write access to the file.
1161 if ((FREAD
| FWRITE
) != (fp
->f_flag
& (FREAD
| FWRITE
))) {
1165 struct fileglob
*fg
= fp
->f_fglob
;
1166 if (FILEGLOB_DTYPE(fg
) != DTYPE_VNODE
) {
1171 * Confinement means there's only one fd pointing at
1172 * this fileglob, and will always be associated with
1175 if (0 == (FG_CONFINED
& fg
->fg_lflags
)) {
1179 struct vnode
*vp
= fg
->fg_data
;
1180 if (!vnode_isreg(vp
) || NULL
== vp
->v_mount
) {
1184 error
= vnode_getwithref(vp
);
1189 /* Ensure the target vnode -has- a label */
1190 struct vfs_context
*ctx
= vfs_context_current();
1191 mac_vnode_label_update(ctx
, vp
, NULL
);
1193 struct vng_info
*nvgi
= new_vgi(vns
->vns_attrs
, vns
->vns_guard
);
1194 struct vng_owner
*nvgo
= new_vgo(p
, fg
);
1196 lck_rw_lock_exclusive(&llock
);
1200 * A vnode guard is associated with one or more
1201 * fileglobs in one or more processes.
1203 struct vng_info
*vgi
= vng_lbl_get(vp
->v_label
);
1204 struct vng_owner
*vgo
= vng_lbl_get(fg
->fg_label
);
1207 /* vnode unguarded, add the first guard */
1209 panic("vnguard label on fileglob "
1212 /* add a kusecount so we can unlabel later */
1213 error
= vnode_ref_ext(vp
, O_EVTONLY
, 0);
1216 vgi_add_vgo(nvgi
, nvgo
);
1217 vng_lbl_set(vp
->v_label
, nvgi
);
1218 vng_lbl_set(fg
->fg_label
, nvgo
);
1224 /* vnode already guarded */
1226 if (vgi
->vgi_guard
!= vns
->vns_guard
) {
1227 error
= EPERM
; /* guard mismatch */
1228 } else if (vgi
->vgi_attrs
!= vns
->vns_attrs
) {
1230 * Temporary workaround for older versions of SQLite:
1231 * allow newer guard attributes to be silently cleared.
1233 const unsigned mask
= ~(VNG_WRITE_OTHER
| VNG_TRUNC_OTHER
);
1234 if ((vgi
->vgi_attrs
& mask
) == (vns
->vns_attrs
& mask
)) {
1235 vgi
->vgi_attrs
&= vns
->vns_attrs
;
1237 error
= EACCES
; /* attr mismatch */
1240 if (0 != error
|| NULL
!= vgo
) {
1244 /* record shared ownership */
1245 vgi_add_vgo(vgi
, nvgo
);
1246 vng_lbl_set(fg
->fg_label
, nvgo
);
1250 lck_rw_unlock_exclusive(&llock
);
1254 fp_drop(p
, fd
, fp
, 0);
1259 vng_policy_syscall(proc_t p
, int cmd
, user_addr_t arg
)
1269 case VNG_SYSC_SET_GUARD
: {
1270 struct vnguard_set vns
;
1271 error
= copyin(arg
, (void *)&vns
, sizeof(vns
));
1275 error
= vnguard_sysc_setguard(p
, &vns
);
1278 case VNG_SYSC_GET_ATTR
: {
1279 struct vnguard_getattr vga
;
1280 error
= copyin(arg
, (void *)&vga
, sizeof(vga
));
1284 error
= vnguard_sysc_getguardattr(p
, &vga
);
1288 error
= copyout((void *)&vga
, arg
, sizeof(vga
));
1298 * This is called just before the fileglob disappears in fg_free().
1299 * Take the exclusive lock: no other thread can add or remove
1300 * a vng_info to any vnode in the system.
1303 vng_file_label_destroy(struct label
*label
)
1305 lck_rw_lock_exclusive(&llock
);
1306 struct vng_owner
*lvgo
= vng_lbl_get(label
);
1308 vng_lbl_set(label
, 0);
1309 struct vng_info
*vgi
= lvgo
->vgo_vgi
;
1311 if (vgi_remove_vgo(vgi
, lvgo
)) {
1312 /* that was the last reference */
1314 struct fileglob
*fg
= lvgo
->vgo_fg
;
1316 if (DTYPE_VNODE
== FILEGLOB_DTYPE(fg
)) {
1317 struct vnode
*vp
= fg
->fg_data
;
1318 int error
= vnode_getwithref(vp
);
1320 vng_lbl_set(vp
->v_label
, 0);
1321 lck_rw_unlock_exclusive(&llock
);
1322 /* may trigger VNOP_INACTIVE */
1323 vnode_rele_ext(vp
, O_EVTONLY
, 0);
1333 lck_rw_unlock_exclusive(&llock
);
1337 vng_reason_from_pathname(const char *path
, uint32_t pathlen
)
1339 os_reason_t r
= os_reason_create(OS_REASON_GUARD
, GUARD_REASON_VNODE
);
1344 * If the pathname is very long, just keep the trailing part
1346 const uint32_t pathmax
= 3 * EXIT_REASON_USER_DESC_MAX_LEN
/ 4;
1347 if (pathlen
> pathmax
) {
1348 path
+= (pathlen
- pathmax
);
1351 uint32_t rsize
= kcdata_estimate_required_buffer_size(1, pathlen
);
1352 if (0 == os_reason_alloc_buffer(r
, rsize
)) {
1353 struct kcdata_descriptor
*kcd
= &r
->osr_kcd_descriptor
;
1354 mach_vm_address_t addr
;
1355 if (kcdata_get_memory_addr(kcd
,
1356 EXIT_REASON_USER_DESC
, pathlen
, &addr
) == KERN_SUCCESS
) {
1357 kcdata_memcpy(kcd
, addr
, path
, pathlen
);
1362 return OS_REASON_NULL
;
1365 static int vng_policy_flags
;
1368 * Note: if an EXC_GUARD is generated, llock will be dropped and
1369 * subsequently reacquired by this routine. Data derived from
1370 * any label in the caller should be regenerated.
1373 vng_guard_violation(const struct vng_info
*vgi
,
1374 unsigned opval
, vnode_t vp
)
1378 if (vng_policy_flags
& kVNG_POLICY_EPERM
) {
1379 /* deny the operation */
1383 if (vng_policy_flags
& (kVNG_POLICY_LOGMSG
| kVNG_POLICY_UPRINTMSG
)) {
1387 case VNG_RENAME_FROM
:
1402 case VNG_WRITE_OTHER
:
1405 case VNG_TRUNC_OTHER
:
1413 const char *nm
= vnode_getname(vp
);
1414 proc_t p
= current_proc();
1415 const struct vng_owner
*vgo
;
1416 TAILQ_FOREACH(vgo
, &vgi
->vgi_owners
, vgo_link
) {
1418 "%s[%d]: %s%s: '%s' guarded by %s[%d] (0x%llx)\n";
1420 if (vng_policy_flags
& kVNG_POLICY_LOGMSG
) {
1422 proc_name_address(p
), proc_pid(p
), op
,
1423 0 != retval
? " denied" : "",
1424 NULL
!= nm
? nm
: "(unknown)",
1425 proc_name_address(vgo
->vgo_p
),
1426 proc_pid(vgo
->vgo_p
), vgi
->vgi_guard
);
1428 if (vng_policy_flags
& kVNG_POLICY_UPRINTMSG
) {
1430 proc_name_address(p
), proc_pid(p
), op
,
1431 0 != retval
? " denied" : "",
1432 NULL
!= nm
? nm
: "(unknown)",
1433 proc_name_address(vgo
->vgo_p
),
1434 proc_pid(vgo
->vgo_p
), vgi
->vgi_guard
);
1442 if (vng_policy_flags
& (kVNG_POLICY_EXC
| kVNG_POLICY_EXC_CORPSE
)) {
1443 /* EXC_GUARD exception */
1444 const struct vng_owner
*vgo
= TAILQ_FIRST(&vgi
->vgi_owners
);
1445 pid_t pid
= vgo
? proc_pid(vgo
->vgo_p
) : 0;
1446 mach_exception_code_t code
;
1447 mach_exception_subcode_t subcode
;
1450 EXC_GUARD_ENCODE_TYPE(code
, GUARD_TYPE_VN
);
1451 EXC_GUARD_ENCODE_FLAVOR(code
, opval
);
1452 EXC_GUARD_ENCODE_TARGET(code
, pid
);
1453 subcode
= vgi
->vgi_guard
;
1455 lck_rw_unlock_shared(&llock
);
1457 if (vng_policy_flags
& kVNG_POLICY_EXC_CORPSE
) {
1459 int len
= MAXPATHLEN
;
1460 MALLOC(path
, char *, len
, M_TEMP
, M_WAITOK
);
1461 os_reason_t r
= NULL
;
1463 vn_getpath(vp
, path
, &len
);
1465 r
= vng_reason_from_pathname(path
, len
);
1468 task_violated_guard(code
, subcode
, r
); /* not fatal */
1476 thread_t t
= current_thread();
1477 thread_guard_violation(t
, code
, subcode
, TRUE
);
1480 lck_rw_lock_shared(&llock
);
1481 } else if (vng_policy_flags
& kVNG_POLICY_SIGKILL
) {
1482 proc_t p
= current_proc();
1483 psignal(p
, SIGKILL
);
1490 * A fatal vnode guard was tripped on this thread.
1492 * (Invoked before returning to userland from the syscall handler.)
1495 vn_guard_ast(thread_t __unused t
,
1496 mach_exception_data_type_t code
, mach_exception_data_type_t subcode
)
1498 task_exception_notify(EXC_GUARD
, code
, subcode
);
1499 proc_t p
= current_proc();
1500 psignal(p
, SIGKILL
);
1508 vng_vnode_check_rename(kauth_cred_t __unused cred
,
1509 struct vnode
*__unused dvp
, struct label
*__unused dlabel
,
1510 struct vnode
*vp
, struct label
*label
,
1511 struct componentname
*__unused cnp
,
1512 struct vnode
*__unused tdvp
, struct label
*__unused tdlabel
,
1513 struct vnode
*tvp
, struct label
*tlabel
,
1514 struct componentname
*__unused tcnp
)
1517 if (NULL
!= label
|| NULL
!= tlabel
) {
1518 lck_rw_lock_shared(&llock
);
1519 const struct vng_info
*vgi
=
1520 vng_lbl_get_withattr(label
, VNG_RENAME_FROM
);
1522 error
= vng_guard_violation(vgi
, VNG_RENAME_FROM
, vp
);
1525 vgi
= vng_lbl_get_withattr(tlabel
, VNG_RENAME_TO
);
1527 error
= vng_guard_violation(vgi
,
1528 VNG_RENAME_TO
, tvp
);
1531 lck_rw_unlock_shared(&llock
);
1537 vng_vnode_check_link(kauth_cred_t __unused cred
,
1538 struct vnode
*__unused dvp
, struct label
*__unused dlabel
,
1539 struct vnode
*vp
, struct label
*label
, struct componentname
*__unused cnp
)
1542 if (NULL
!= label
) {
1543 lck_rw_lock_shared(&llock
);
1544 const struct vng_info
*vgi
=
1545 vng_lbl_get_withattr(label
, VNG_LINK
);
1547 error
= vng_guard_violation(vgi
, VNG_LINK
, vp
);
1549 lck_rw_unlock_shared(&llock
);
1555 vng_vnode_check_unlink(kauth_cred_t __unused cred
,
1556 struct vnode
*__unused dvp
, struct label
*__unused dlabel
,
1557 struct vnode
*vp
, struct label
*label
, struct componentname
*__unused cnp
)
1560 if (NULL
!= label
) {
1561 lck_rw_lock_shared(&llock
);
1562 const struct vng_info
*vgi
=
1563 vng_lbl_get_withattr(label
, VNG_UNLINK
);
1565 error
= vng_guard_violation(vgi
, VNG_UNLINK
, vp
);
1567 lck_rw_unlock_shared(&llock
);
1573 * Only check violations for writes performed by "other processes"
1576 vng_vnode_check_write(kauth_cred_t __unused actv_cred
,
1577 kauth_cred_t __unused file_cred
, struct vnode
*vp
, struct label
*label
)
1580 if (NULL
!= label
) {
1581 lck_rw_lock_shared(&llock
);
1582 const struct vng_info
*vgi
=
1583 vng_lbl_get_withattr(label
, VNG_WRITE_OTHER
);
1585 proc_t p
= current_proc();
1586 const struct vng_owner
*vgo
;
1587 TAILQ_FOREACH(vgo
, &vgi
->vgi_owners
, vgo_link
) {
1588 if (vgo
->vgo_p
== p
) {
1592 error
= vng_guard_violation(vgi
, VNG_WRITE_OTHER
, vp
);
1595 lck_rw_unlock_shared(&llock
);
1601 * Only check violations for truncates performed by "other processes"
1604 vng_vnode_check_truncate(kauth_cred_t __unused actv_cred
,
1605 kauth_cred_t __unused file_cred
, struct vnode
*vp
,
1606 struct label
*label
)
1609 if (NULL
!= label
) {
1610 lck_rw_lock_shared(&llock
);
1611 const struct vng_info
*vgi
=
1612 vng_lbl_get_withattr(label
, VNG_TRUNC_OTHER
);
1614 proc_t p
= current_proc();
1615 const struct vng_owner
*vgo
;
1616 TAILQ_FOREACH(vgo
, &vgi
->vgi_owners
, vgo_link
) {
1617 if (vgo
->vgo_p
== p
) {
1621 error
= vng_guard_violation(vgi
, VNG_TRUNC_OTHER
, vp
);
1624 lck_rw_unlock_shared(&llock
);
1630 vng_vnode_check_exchangedata(kauth_cred_t __unused cred
,
1631 struct vnode
*fvp
, struct label
*flabel
,
1632 struct vnode
*svp
, struct label
*slabel
)
1635 if (NULL
!= flabel
|| NULL
!= slabel
) {
1636 lck_rw_lock_shared(&llock
);
1637 const struct vng_info
*vgi
=
1638 vng_lbl_get_withattr(flabel
, VNG_EXCHDATA
);
1640 error
= vng_guard_violation(vgi
, VNG_EXCHDATA
, fvp
);
1643 vgi
= vng_lbl_get_withattr(slabel
, VNG_EXCHDATA
);
1645 error
= vng_guard_violation(vgi
,
1649 lck_rw_unlock_shared(&llock
);
1654 /* Intercept open-time truncations (by "other") of a guarded vnode */
1657 vng_vnode_check_open(kauth_cred_t cred
,
1658 struct vnode
*vp
, struct label
*label
, int acc_mode
)
1660 if (0 == (acc_mode
& O_TRUNC
)) {
1663 return vng_vnode_check_truncate(cred
, NULL
, vp
, label
);
1667 * Configuration gorp
1671 vng_init(struct mac_policy_conf
*mpc
)
1673 llock_grp
= lck_grp_alloc_init(mpc
->mpc_name
, LCK_GRP_ATTR_NULL
);
1674 lck_rw_init(&llock
, llock_grp
, LCK_ATTR_NULL
);
1677 SECURITY_READ_ONLY_EARLY(static struct mac_policy_ops
) vng_policy_ops
= {
1678 .mpo_file_label_destroy
= vng_file_label_destroy
,
1680 .mpo_vnode_check_link
= vng_vnode_check_link
,
1681 .mpo_vnode_check_unlink
= vng_vnode_check_unlink
,
1682 .mpo_vnode_check_rename
= vng_vnode_check_rename
,
1683 .mpo_vnode_check_write
= vng_vnode_check_write
,
1684 .mpo_vnode_check_truncate
= vng_vnode_check_truncate
,
1685 .mpo_vnode_check_exchangedata
= vng_vnode_check_exchangedata
,
1686 .mpo_vnode_check_open
= vng_vnode_check_open
,
1688 .mpo_policy_syscall
= vng_policy_syscall
,
1689 .mpo_policy_init
= vng_init
,
1692 static const char *vng_labelnames
[] = {
1696 #define ACOUNT(arr) ((unsigned)(sizeof (arr) / sizeof (arr[0])))
1698 SECURITY_READ_ONLY_LATE(static struct mac_policy_conf
) vng_policy_conf
= {
1699 .mpc_name
= VNG_POLICY_NAME
,
1700 .mpc_fullname
= "Guarded vnode policy",
1701 .mpc_field_off
= &label_slot
,
1702 .mpc_labelnames
= vng_labelnames
,
1703 .mpc_labelname_count
= ACOUNT(vng_labelnames
),
1704 .mpc_ops
= &vng_policy_ops
,
1705 .mpc_loadtime_flags
= 0,
1706 .mpc_runtime_flags
= 0
1709 SECURITY_READ_ONLY_LATE(static mac_policy_handle_t
) vng_policy_handle
;
1712 vnguard_policy_init(void)
1714 if (0 == PE_i_can_has_debugger(NULL
)) {
1717 vng_policy_flags
= kVNG_POLICY_LOGMSG
|
1718 kVNG_POLICY_EXC_CORPSE
| kVNG_POLICY_UPRINTMSG
;
1719 PE_parse_boot_argn("vnguard", &vng_policy_flags
, sizeof(vng_policy_flags
));
1720 if (vng_policy_flags
) {
1721 mac_policy_register(&vng_policy_conf
, &vng_policy_handle
, NULL
);
1725 #if DEBUG || DEVELOPMENT
1726 #include <sys/sysctl.h>
1728 SYSCTL_DECL(_kern_vnguard
);
1729 SYSCTL_NODE(_kern
, OID_AUTO
, vnguard
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "vnguard");
1730 SYSCTL_INT(_kern_vnguard
, OID_AUTO
, flags
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
1731 &vng_policy_flags
, 0, "vnguard policy flags");
1734 #endif /* CONFIG_MACF && CONFIG_VNGUARD */