]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_guarded.c
xnu-7195.81.3.tar.gz
[apple/xnu.git] / bsd / kern / kern_guarded.c
1 /*
2 * Copyright (c) 2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/filedesc.h>
32 #include <sys/kernel.h>
33 #include <sys/file_internal.h>
34 #include <kern/exc_guard.h>
35 #include <sys/guarded.h>
36 #include <kern/kalloc.h>
37 #include <sys/sysproto.h>
38 #include <sys/vnode.h>
39 #include <sys/vnode_internal.h>
40 #include <sys/uio_internal.h>
41 #include <sys/ubc_internal.h>
42 #include <vfs/vfs_support.h>
43 #include <security/audit/audit.h>
44 #include <sys/syscall.h>
45 #include <sys/kauth.h>
46 #include <sys/kdebug.h>
47 #include <stdbool.h>
48 #include <vm/vm_protos.h>
49 #include <libkern/section_keywords.h>
50 #if CONFIG_MACF && CONFIG_VNGUARD
51 #include <security/mac.h>
52 #include <security/mac_framework.h>
53 #include <security/mac_policy.h>
54 #include <pexpert/pexpert.h>
55 #include <sys/sysctl.h>
56 #include <sys/reason.h>
57 #endif
58
59 #define f_flag fp_glob->fg_flag
60 extern int dofilewrite(vfs_context_t ctx, struct fileproc *fp,
61 user_addr_t bufp, user_size_t nbyte, off_t offset,
62 int flags, user_ssize_t *retval );
63 extern int do_uiowrite(struct proc *p, struct fileproc *fp, uio_t uio, int flags, user_ssize_t *retval);
64
65 /*
66 * Experimental guarded file descriptor support.
67 */
68
69 kern_return_t task_exception_notify(exception_type_t exception,
70 mach_exception_data_type_t code, mach_exception_data_type_t subcode);
71 kern_return_t task_violated_guard(mach_exception_code_t, mach_exception_subcode_t, void *);
72
73 /*
74 * Most fd's have an underlying fileproc struct; but some may be
75 * guarded_fileproc structs which implement guarded fds. The latter
76 * struct (below) embeds the former.
77 *
78 * The two types should be distinguished by the "type" portion of fp_flags.
79 * There's also a magic number to help catch misuse and bugs.
80 *
81 * This is a bit unpleasant, but results from the desire to allow
82 * alternate file behaviours for a few file descriptors without
83 * growing the fileproc data structure.
84 */
85
86 struct guarded_fileproc {
87 struct fileproc gf_fileproc;
88 u_int gf_attrs;
89 guardid_t gf_guard;
90 };
91
92 ZONE_DECLARE(gfp_zone, "guarded_fileproc",
93 sizeof(struct guarded_fileproc),
94 ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM);
95
96 static inline struct guarded_fileproc *
97 FP_TO_GFP(struct fileproc *fp)
98 {
99 struct guarded_fileproc *gfp =
100 __container_of(fp, struct guarded_fileproc, gf_fileproc);
101
102 zone_require(gfp_zone, gfp);
103 return gfp;
104 }
105
106 #define GFP_TO_FP(gfp) (&(gfp)->gf_fileproc)
107
108 struct gfp_crarg {
109 guardid_t gca_guard;
110 u_int gca_attrs;
111 };
112
113 static struct fileproc *
114 guarded_fileproc_alloc_init(void *crarg)
115 {
116 struct gfp_crarg *aarg = crarg;
117 struct guarded_fileproc *gfp;
118
119 gfp = zalloc_flags(gfp_zone, Z_WAITOK | Z_ZERO);
120
121 struct fileproc *fp = &gfp->gf_fileproc;
122 os_ref_init(&fp->fp_iocount, &f_refgrp);
123 fp->fp_flags = FTYPE_GUARDED;
124
125 gfp->gf_guard = aarg->gca_guard;
126 gfp->gf_attrs = aarg->gca_attrs;
127
128 return GFP_TO_FP(gfp);
129 }
130
131 void
132 guarded_fileproc_free(struct fileproc *fp)
133 {
134 struct guarded_fileproc *gfp = FP_TO_GFP(fp);
135 zfree(gfp_zone, gfp);
136 }
137
138 static int
139 fp_lookup_guarded(proc_t p, int fd, guardid_t guard,
140 struct guarded_fileproc **gfpp, int locked)
141 {
142 struct fileproc *fp;
143 int error;
144
145 if ((error = fp_lookup(p, fd, &fp, locked)) != 0) {
146 return error;
147 }
148 if (FILEPROC_TYPE(fp) != FTYPE_GUARDED) {
149 (void) fp_drop(p, fd, fp, locked);
150 return EINVAL;
151 }
152 struct guarded_fileproc *gfp = FP_TO_GFP(fp);
153
154 if (guard != gfp->gf_guard) {
155 (void) fp_drop(p, fd, fp, locked);
156 return EPERM; /* *not* a mismatch exception */
157 }
158 if (gfpp) {
159 *gfpp = gfp;
160 }
161 return 0;
162 }
163
164 /*
165 * Expected use pattern:
166 *
167 * if (fp_isguarded(fp, GUARD_CLOSE)) {
168 * error = fp_guard_exception(p, fd, fp, kGUARD_EXC_CLOSE);
169 * proc_fdunlock(p);
170 * return error;
171 * }
172 *
173 * Passing `0` to `attrs` returns whether the fp is guarded at all.
174 */
175
176 int
177 fp_isguarded(struct fileproc *fp, u_int attrs)
178 {
179 if (FILEPROC_TYPE(fp) == FTYPE_GUARDED) {
180 return (attrs & FP_TO_GFP(fp)->gf_attrs) == attrs;
181 }
182 return 0;
183 }
184
185 extern char *proc_name_address(void *p);
186
187 int
188 fp_guard_exception(proc_t p, int fd, struct fileproc *fp, u_int flavor)
189 {
190 if (FILEPROC_TYPE(fp) != FTYPE_GUARDED) {
191 panic("%s corrupt fp %p flags %x", __func__, fp, fp->fp_flags);
192 }
193
194 struct guarded_fileproc *gfp = FP_TO_GFP(fp);
195 /* all gfd fields protected via proc_fdlock() */
196 proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED);
197
198 mach_exception_code_t code = 0;
199 EXC_GUARD_ENCODE_TYPE(code, GUARD_TYPE_FD);
200 EXC_GUARD_ENCODE_FLAVOR(code, flavor);
201 EXC_GUARD_ENCODE_TARGET(code, fd);
202 mach_exception_subcode_t subcode = gfp->gf_guard;
203
204 thread_t t = current_thread();
205 thread_guard_violation(t, code, subcode, TRUE);
206 return EPERM;
207 }
208
209 /*
210 * (Invoked before returning to userland from the syscall handler.)
211 */
212 void
213 fd_guard_ast(
214 thread_t __unused t,
215 mach_exception_code_t code,
216 mach_exception_subcode_t subcode)
217 {
218 task_exception_notify(EXC_GUARD, code, subcode);
219 proc_t p = current_proc();
220 psignal(p, SIGKILL);
221 }
222
223 /*
224 * Experimental guarded file descriptor SPIs
225 */
226
227 /*
228 * int guarded_open_np(const char *pathname, int flags,
229 * const guardid_t *guard, u_int guardflags, ...);
230 *
231 * In this initial implementation, GUARD_DUP must be specified.
232 * GUARD_CLOSE, GUARD_SOCKET_IPC and GUARD_FILEPORT are optional.
233 *
234 * If GUARD_DUP wasn't specified, then we'd have to do the (extra) work
235 * to allow dup-ing a descriptor to inherit the guard onto the new
236 * descriptor. (Perhaps GUARD_DUP behaviours should just always be true
237 * for a guarded fd? Or, more sanely, all the dup operations should
238 * just always propagate the guard?)
239 *
240 * Guarded descriptors are always close-on-exec, and GUARD_CLOSE
241 * requires close-on-fork; O_CLOEXEC must be set in flags.
242 * This setting is immutable; attempts to clear the flag will
243 * cause a guard exception.
244 *
245 * XXX It's somewhat broken that change_fdguard_np() can completely
246 * remove the guard and thus revoke down the immutability
247 * promises above. Ick.
248 */
249 int
250 guarded_open_np(proc_t p, struct guarded_open_np_args *uap, int32_t *retval)
251 {
252 if ((uap->flags & O_CLOEXEC) == 0) {
253 return EINVAL;
254 }
255
256 #define GUARD_REQUIRED (GUARD_DUP)
257 #define GUARD_ALL (GUARD_REQUIRED | \
258 (GUARD_CLOSE | GUARD_SOCKET_IPC | GUARD_FILEPORT | GUARD_WRITE))
259
260 if (((uap->guardflags & GUARD_REQUIRED) != GUARD_REQUIRED) ||
261 ((uap->guardflags & ~GUARD_ALL) != 0)) {
262 return EINVAL;
263 }
264
265 int error;
266 struct gfp_crarg crarg = {
267 .gca_attrs = uap->guardflags
268 };
269
270 if ((error = copyin(uap->guard,
271 &(crarg.gca_guard), sizeof(crarg.gca_guard))) != 0) {
272 return error;
273 }
274
275 /*
276 * Disallow certain guard values -- is zero enough?
277 */
278 if (crarg.gca_guard == 0) {
279 return EINVAL;
280 }
281
282 struct filedesc *fdp = p->p_fd;
283 struct vnode_attr va;
284 struct nameidata nd;
285 vfs_context_t ctx = vfs_context_current();
286 int cmode;
287
288 VATTR_INIT(&va);
289 cmode = ((uap->mode & ~fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT;
290 VATTR_SET(&va, va_mode, cmode & ACCESSPERMS);
291
292 NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW | AUDITVNPATH1, UIO_USERSPACE,
293 uap->path, ctx);
294
295 return open1(ctx, &nd, uap->flags | O_CLOFORK, &va,
296 guarded_fileproc_alloc_init, &crarg, retval);
297 }
298
299 /*
300 * int guarded_open_dprotected_np(const char *pathname, int flags,
301 * const guardid_t *guard, u_int guardflags, int dpclass, int dpflags, ...);
302 *
303 * This SPI is extension of guarded_open_np() to include dataprotection class on creation
304 * in "dpclass" and dataprotection flags 'dpflags'. Otherwise behaviors are same as in
305 * guarded_open_np()
306 */
307 int
308 guarded_open_dprotected_np(proc_t p, struct guarded_open_dprotected_np_args *uap, int32_t *retval)
309 {
310 if ((uap->flags & O_CLOEXEC) == 0) {
311 return EINVAL;
312 }
313
314 if (((uap->guardflags & GUARD_REQUIRED) != GUARD_REQUIRED) ||
315 ((uap->guardflags & ~GUARD_ALL) != 0)) {
316 return EINVAL;
317 }
318
319 int error;
320 struct gfp_crarg crarg = {
321 .gca_attrs = uap->guardflags
322 };
323
324 if ((error = copyin(uap->guard,
325 &(crarg.gca_guard), sizeof(crarg.gca_guard))) != 0) {
326 return error;
327 }
328
329 /*
330 * Disallow certain guard values -- is zero enough?
331 */
332 if (crarg.gca_guard == 0) {
333 return EINVAL;
334 }
335
336 struct filedesc *fdp = p->p_fd;
337 struct vnode_attr va;
338 struct nameidata nd;
339 vfs_context_t ctx = vfs_context_current();
340 int cmode;
341
342 VATTR_INIT(&va);
343 cmode = ((uap->mode & ~fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT;
344 VATTR_SET(&va, va_mode, cmode & ACCESSPERMS);
345
346 NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW | AUDITVNPATH1, UIO_USERSPACE,
347 uap->path, ctx);
348
349 /*
350 * Initialize the extra fields in vnode_attr to pass down dataprotection
351 * extra fields.
352 * 1. target cprotect class.
353 * 2. set a flag to mark it as requiring open-raw-encrypted semantics.
354 */
355 if (uap->flags & O_CREAT) {
356 VATTR_SET(&va, va_dataprotect_class, uap->dpclass);
357 }
358
359 if (uap->dpflags & (O_DP_GETRAWENCRYPTED | O_DP_GETRAWUNENCRYPTED)) {
360 if (uap->flags & (O_RDWR | O_WRONLY)) {
361 /* Not allowed to write raw encrypted bytes */
362 return EINVAL;
363 }
364 if (uap->dpflags & O_DP_GETRAWENCRYPTED) {
365 VATTR_SET(&va, va_dataprotect_flags, VA_DP_RAWENCRYPTED);
366 }
367 if (uap->dpflags & O_DP_GETRAWUNENCRYPTED) {
368 VATTR_SET(&va, va_dataprotect_flags, VA_DP_RAWUNENCRYPTED);
369 }
370 }
371
372 return open1(ctx, &nd, uap->flags | O_CLOFORK, &va,
373 guarded_fileproc_alloc_init, &crarg, retval);
374 }
375
376 /*
377 * int guarded_kqueue_np(const guardid_t *guard, u_int guardflags);
378 *
379 * Create a guarded kqueue descriptor with guardid and guardflags.
380 *
381 * Same restrictions on guardflags as for guarded_open_np().
382 * All kqueues are -always- close-on-exec and close-on-fork by themselves
383 * and are not sendable.
384 */
385 int
386 guarded_kqueue_np(proc_t p, struct guarded_kqueue_np_args *uap, int32_t *retval)
387 {
388 if (((uap->guardflags & GUARD_REQUIRED) != GUARD_REQUIRED) ||
389 ((uap->guardflags & ~GUARD_ALL) != 0)) {
390 return EINVAL;
391 }
392
393 int error;
394 struct gfp_crarg crarg = {
395 .gca_attrs = uap->guardflags
396 };
397
398 if ((error = copyin(uap->guard,
399 &(crarg.gca_guard), sizeof(crarg.gca_guard))) != 0) {
400 return error;
401 }
402
403 if (crarg.gca_guard == 0) {
404 return EINVAL;
405 }
406
407 return kqueue_internal(p, guarded_fileproc_alloc_init, &crarg, retval);
408 }
409
410 /*
411 * int guarded_close_np(int fd, const guardid_t *guard);
412 */
413 int
414 guarded_close_np(proc_t p, struct guarded_close_np_args *uap,
415 __unused int32_t *retval)
416 {
417 struct guarded_fileproc *gfp;
418 int fd = uap->fd;
419 int error;
420 guardid_t uguard;
421
422 AUDIT_SYSCLOSE(p, fd);
423
424 if ((error = copyin(uap->guard, &uguard, sizeof(uguard))) != 0) {
425 return error;
426 }
427
428 proc_fdlock(p);
429 if ((error = fp_lookup_guarded(p, fd, uguard, &gfp, 1)) != 0) {
430 proc_fdunlock(p);
431 return error;
432 }
433 fp_drop(p, fd, GFP_TO_FP(gfp), 1);
434 return fp_close_and_unlock(p, fd, GFP_TO_FP(gfp), 0);
435 }
436
437 /*
438 * int
439 * change_fdguard_np(int fd, const guardid_t *guard, u_int guardflags,
440 * const guardid_t *nguard, u_int nguardflags, int *fdflagsp);
441 *
442 * Given a file descriptor, atomically exchange <guard, guardflags> for
443 * a new guard <nguard, nguardflags>, returning the previous fd
444 * flags (see fcntl:F_SETFD) in *fdflagsp.
445 *
446 * This syscall can be used to either (a) add a new guard to an existing
447 * unguarded file descriptor (b) remove the old guard from an existing
448 * guarded file descriptor or (c) change the guard (guardid and/or
449 * guardflags) on a guarded file descriptor.
450 *
451 * If 'guard' is NULL, fd must be unguarded at entry. If the call completes
452 * successfully the fd will be guarded with <nguard, nguardflags>.
453 *
454 * Guarding a file descriptor has some side-effects on the "fdflags"
455 * associated with the descriptor - in particular FD_CLOEXEC is
456 * forced ON unconditionally, and FD_CLOFORK is forced ON by GUARD_CLOSE.
457 * Callers who wish to subsequently restore the state of the fd should save
458 * the value of *fdflagsp after a successful invocation.
459 *
460 * If 'nguard' is NULL, fd must be guarded at entry, <guard, guardflags>
461 * must match with what's already guarding the descriptor, and the
462 * result will be to completely remove the guard. Note also that the
463 * fdflags are copied to the descriptor from the incoming *fdflagsp argument.
464 *
465 * If the descriptor is guarded, and neither 'guard' nor 'nguard' is NULL
466 * and <guard, guardflags> matches what's already guarding the descriptor,
467 * then <nguard, nguardflags> becomes the new guard. In this case, even if
468 * the GUARD_CLOSE flag is being cleared, it is still possible to continue
469 * to keep FD_CLOFORK on the descriptor by passing FD_CLOFORK via fdflagsp.
470 *
471 * (File descriptors whose underlying fileglobs are marked FG_CONFINED are
472 * still close-on-fork, regardless of the setting of FD_CLOFORK.)
473 *
474 * Example 1: Guard an unguarded descriptor during a set of operations,
475 * then restore the original state of the descriptor.
476 *
477 * int sav_flags = 0;
478 * change_fdguard_np(fd, NULL, 0, &myguard, GUARD_CLOSE, &sav_flags);
479 * // do things with now guarded 'fd'
480 * change_fdguard_np(fd, &myguard, GUARD_CLOSE, NULL, 0, &sav_flags);
481 * // fd now unguarded.
482 *
483 * Example 2: Change the guard of a guarded descriptor during a set of
484 * operations, then restore the original state of the descriptor.
485 *
486 * int sav_flags = (gdflags & GUARD_CLOSE) ? FD_CLOFORK : 0;
487 * change_fdguard_np(fd, &gd, gdflags, &myguard, GUARD_CLOSE, &sav_flags);
488 * // do things with 'fd' with a different guard
489 * change_fdguard_np(fd, &myg, GUARD_CLOSE, &gd, gdflags, &sav_flags);
490 * // back to original guarded state
491 *
492 * XXX This SPI is too much of a chainsaw and should be revised.
493 */
494
495 int
496 change_fdguard_np(proc_t p, struct change_fdguard_np_args *uap,
497 __unused int32_t *retval)
498 {
499 struct fileproc *fp;
500 int fd = uap->fd;
501 int error;
502 guardid_t oldg = 0, newg = 0;
503 int nfdflags = 0;
504
505 if (0 != uap->guard &&
506 0 != (error = copyin(uap->guard, &oldg, sizeof(oldg)))) {
507 return error; /* can't copyin current guard */
508 }
509 if (0 != uap->nguard &&
510 0 != (error = copyin(uap->nguard, &newg, sizeof(newg)))) {
511 return error; /* can't copyin new guard */
512 }
513 if (0 != uap->fdflagsp &&
514 0 != (error = copyin(uap->fdflagsp, &nfdflags, sizeof(nfdflags)))) {
515 return error; /* can't copyin new fdflags */
516 }
517 proc_fdlock(p);
518 restart:
519 if ((error = fp_lookup(p, fd, &fp, 1)) != 0) {
520 proc_fdunlock(p);
521 return error;
522 }
523
524 if (0 != uap->fdflagsp) {
525 int ofdflags = FDFLAGS_GET(p, fd);
526 int ofl = ((ofdflags & UF_EXCLOSE) ? FD_CLOEXEC : 0) |
527 ((ofdflags & UF_FORKCLOSE) ? FD_CLOFORK : 0);
528 proc_fdunlock(p);
529 if (0 != (error = copyout(&ofl, uap->fdflagsp, sizeof(ofl)))) {
530 proc_fdlock(p);
531 goto dropout; /* can't copyout old fdflags */
532 }
533 proc_fdlock(p);
534 }
535
536 if (FILEPROC_TYPE(fp) == FTYPE_GUARDED) {
537 if (0 == uap->guard || 0 == uap->guardflags) {
538 error = EINVAL; /* missing guard! */
539 } else if (0 == oldg) {
540 error = EPERM; /* guardids cannot be zero */
541 }
542 } else {
543 if (0 != uap->guard || 0 != uap->guardflags) {
544 error = EINVAL; /* guard provided, but none needed! */
545 }
546 }
547
548 if (0 != error) {
549 goto dropout;
550 }
551
552 if (0 != uap->nguard) {
553 /*
554 * There's a new guard in town.
555 */
556 if (0 == newg) {
557 error = EINVAL; /* guards cannot contain zero */
558 } else if (((uap->nguardflags & GUARD_REQUIRED) != GUARD_REQUIRED) ||
559 ((uap->nguardflags & ~GUARD_ALL) != 0)) {
560 error = EINVAL; /* must have valid attributes too */
561 }
562 if (0 != error) {
563 goto dropout;
564 }
565
566 if (FILEPROC_TYPE(fp) == FTYPE_GUARDED) {
567 /*
568 * Replace old guard with new guard
569 */
570 struct guarded_fileproc *gfp = FP_TO_GFP(fp);
571
572 if (oldg == gfp->gf_guard &&
573 uap->guardflags == gfp->gf_attrs) {
574 /*
575 * Must match existing guard + attributes
576 * before we'll swap them to new ones, managing
577 * fdflags "side-effects" as we go. Note that
578 * userland can request FD_CLOFORK semantics.
579 */
580 if (gfp->gf_attrs & GUARD_CLOSE) {
581 FDFLAGS_CLR(p, fd, UF_FORKCLOSE);
582 }
583 gfp->gf_guard = newg;
584 gfp->gf_attrs = uap->nguardflags;
585 if (gfp->gf_attrs & GUARD_CLOSE) {
586 FDFLAGS_SET(p, fd, UF_FORKCLOSE);
587 }
588 FDFLAGS_SET(p, fd,
589 (nfdflags & FD_CLOFORK) ? UF_FORKCLOSE : 0);
590 /* FG_CONFINED enforced regardless */
591 } else {
592 error = EPERM;
593 }
594 goto dropout;
595 } else {
596 /*
597 * Add a guard to a previously unguarded descriptor
598 */
599 switch (FILEGLOB_DTYPE(fp->fp_glob)) {
600 case DTYPE_VNODE:
601 case DTYPE_PIPE:
602 case DTYPE_SOCKET:
603 case DTYPE_KQUEUE:
604 case DTYPE_NETPOLICY:
605 break;
606 default:
607 error = ENOTSUP;
608 goto dropout;
609 }
610
611 proc_fdunlock(p);
612
613 struct gfp_crarg crarg = {
614 .gca_guard = newg,
615 .gca_attrs = uap->nguardflags
616 };
617 struct fileproc *nfp =
618 guarded_fileproc_alloc_init(&crarg);
619 struct guarded_fileproc *gfp;
620
621 proc_fdlock(p);
622
623 switch (error = fp_tryswap(p, fd, nfp)) {
624 case 0: /* success; guarded-ness comes with side-effects */
625 fp = NULL;
626 gfp = FP_TO_GFP(nfp);
627 if (gfp->gf_attrs & GUARD_CLOSE) {
628 FDFLAGS_SET(p, fd, UF_FORKCLOSE);
629 }
630 FDFLAGS_SET(p, fd, UF_EXCLOSE);
631 (void) fp_drop(p, fd, nfp, 1);
632 break;
633 case EKEEPLOOKING: /* fp_iocount indicates a collision */
634 (void) fp_drop(p, fd, fp, 1);
635 fileproc_free(nfp);
636 goto restart;
637 default:
638 (void) fp_drop(p, fd, fp, 1);
639 fileproc_free(nfp);
640 break;
641 }
642 proc_fdunlock(p);
643 return error;
644 }
645 } else {
646 /*
647 * No new guard.
648 */
649 if (FILEPROC_TYPE(fp) == FTYPE_GUARDED) {
650 /*
651 * Remove the guard altogether.
652 */
653 struct guarded_fileproc *gfp = FP_TO_GFP(fp);
654
655 if (0 != uap->nguardflags) {
656 error = EINVAL;
657 goto dropout;
658 }
659
660 if (oldg != gfp->gf_guard ||
661 uap->guardflags != gfp->gf_attrs) {
662 error = EPERM;
663 goto dropout;
664 }
665
666 proc_fdunlock(p);
667 struct fileproc *nfp = fileproc_alloc_init(NULL);
668 proc_fdlock(p);
669
670 switch (error = fp_tryswap(p, fd, nfp)) {
671 case 0: /* success; undo side-effects of guarded-ness */
672 fp = NULL;
673 FDFLAGS_CLR(p, fd, UF_FORKCLOSE | UF_EXCLOSE);
674 FDFLAGS_SET(p, fd,
675 (nfdflags & FD_CLOFORK) ? UF_FORKCLOSE : 0);
676 /* FG_CONFINED enforced regardless */
677 FDFLAGS_SET(p, fd,
678 (nfdflags & FD_CLOEXEC) ? UF_EXCLOSE : 0);
679 (void) fp_drop(p, fd, nfp, 1);
680 break;
681 case EKEEPLOOKING: /* fp_iocount indicates collision */
682 (void) fp_drop(p, fd, fp, 1);
683 fileproc_free(nfp);
684 goto restart;
685 default:
686 (void) fp_drop(p, fd, fp, 1);
687 fileproc_free(nfp);
688 break;
689 }
690 proc_fdunlock(p);
691 return error;
692 } else {
693 /*
694 * Not already guarded, and no new guard?
695 */
696 error = EINVAL;
697 }
698 }
699
700 dropout:
701 (void) fp_drop(p, fd, fp, 1);
702 proc_fdunlock(p);
703 return error;
704 }
705
706 /*
707 * user_ssize_t guarded_write_np(int fd, const guardid_t *guard,
708 * user_addr_t cbuf, user_ssize_t nbyte);
709 *
710 * Initial implementation of guarded writes.
711 */
712 int
713 guarded_write_np(struct proc *p, struct guarded_write_np_args *uap, user_ssize_t *retval)
714 {
715 int error;
716 int fd = uap->fd;
717 guardid_t uguard;
718 struct fileproc *fp;
719 struct guarded_fileproc *gfp;
720
721 AUDIT_ARG(fd, fd);
722
723 if ((error = copyin(uap->guard, &uguard, sizeof(uguard))) != 0) {
724 return error;
725 }
726
727 error = fp_lookup_guarded(p, fd, uguard, &gfp, 0);
728 if (error) {
729 return error;
730 }
731
732 fp = GFP_TO_FP(gfp);
733 if ((fp->f_flag & FWRITE) == 0) {
734 error = EBADF;
735 } else {
736 struct vfs_context context = *(vfs_context_current());
737 context.vc_ucred = fp->fp_glob->fg_cred;
738
739 error = dofilewrite(&context, fp, uap->cbuf, uap->nbyte,
740 (off_t)-1, 0, retval);
741 }
742
743 fp_drop(p, fd, fp, 0);
744
745 return error;
746 }
747
748 /*
749 * user_ssize_t guarded_pwrite_np(int fd, const guardid_t *guard,
750 * user_addr_t buf, user_size_t nbyte, off_t offset);
751 *
752 * Initial implementation of guarded pwrites.
753 */
754 int
755 guarded_pwrite_np(struct proc *p, struct guarded_pwrite_np_args *uap, user_ssize_t *retval)
756 {
757 struct fileproc *fp;
758 int error;
759 int fd = uap->fd;
760 vnode_t vp = (vnode_t)0;
761 guardid_t uguard;
762 struct guarded_fileproc *gfp;
763
764 AUDIT_ARG(fd, fd);
765
766 if ((error = copyin(uap->guard, &uguard, sizeof(uguard))) != 0) {
767 return error;
768 }
769
770 error = fp_lookup_guarded(p, fd, uguard, &gfp, 0);
771 if (error) {
772 return error;
773 }
774
775 fp = GFP_TO_FP(gfp);
776 if ((fp->f_flag & FWRITE) == 0) {
777 error = EBADF;
778 } else {
779 struct vfs_context context = *vfs_context_current();
780 context.vc_ucred = fp->fp_glob->fg_cred;
781
782 if (FILEGLOB_DTYPE(fp->fp_glob) != DTYPE_VNODE) {
783 error = ESPIPE;
784 goto errout;
785 }
786 vp = (vnode_t)fp->fp_glob->fg_data;
787 if (vnode_isfifo(vp)) {
788 error = ESPIPE;
789 goto errout;
790 }
791 if ((vp->v_flag & VISTTY)) {
792 error = ENXIO;
793 goto errout;
794 }
795 if (uap->offset == (off_t)-1) {
796 error = EINVAL;
797 goto errout;
798 }
799
800 error = dofilewrite(&context, fp, uap->buf, uap->nbyte,
801 uap->offset, FOF_OFFSET, retval);
802 }
803 errout:
804 fp_drop(p, fd, fp, 0);
805
806 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_guarded_pwrite_np) | DBG_FUNC_NONE),
807 uap->fd, uap->nbyte, (unsigned int)((uap->offset >> 32)), (unsigned int)(uap->offset), 0);
808
809 return error;
810 }
811
812 /*
813 * user_ssize_t guarded_writev_np(int fd, const guardid_t *guard,
814 * struct iovec *iovp, u_int iovcnt);
815 *
816 * Initial implementation of guarded writev.
817 *
818 */
819 int
820 guarded_writev_np(struct proc *p, struct guarded_writev_np_args *uap, user_ssize_t *retval)
821 {
822 uio_t auio = NULL;
823 int error;
824 struct fileproc *fp;
825 struct user_iovec *iovp;
826 guardid_t uguard;
827 struct guarded_fileproc *gfp;
828
829 AUDIT_ARG(fd, uap->fd);
830
831 /* Verify range bedfore calling uio_create() */
832 if (uap->iovcnt <= 0 || uap->iovcnt > UIO_MAXIOV) {
833 return EINVAL;
834 }
835
836 /* allocate a uio large enough to hold the number of iovecs passed */
837 auio = uio_create(uap->iovcnt, 0,
838 (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32),
839 UIO_WRITE);
840
841 /* get location of iovecs within the uio. then copyin the iovecs from
842 * user space.
843 */
844 iovp = uio_iovsaddr(auio);
845 if (iovp == NULL) {
846 error = ENOMEM;
847 goto ExitThisRoutine;
848 }
849 error = copyin_user_iovec_array(uap->iovp,
850 IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32,
851 uap->iovcnt, iovp);
852 if (error) {
853 goto ExitThisRoutine;
854 }
855
856 /* finalize uio_t for use and do the IO
857 */
858 error = uio_calculateresid(auio);
859 if (error) {
860 goto ExitThisRoutine;
861 }
862
863 if ((error = copyin(uap->guard, &uguard, sizeof(uguard))) != 0) {
864 goto ExitThisRoutine;
865 }
866
867 error = fp_lookup_guarded(p, uap->fd, uguard, &gfp, 0);
868 if (error) {
869 goto ExitThisRoutine;
870 }
871
872 fp = GFP_TO_FP(gfp);
873 if ((fp->f_flag & FWRITE) == 0) {
874 error = EBADF;
875 } else {
876 error = do_uiowrite(p, fp, auio, 0, retval);
877 }
878
879 fp_drop(p, uap->fd, fp, 0);
880 ExitThisRoutine:
881 if (auio != NULL) {
882 uio_free(auio);
883 }
884 return error;
885 }
886
887 /*
888 * int falloc_guarded(struct proc *p, struct fileproc **fp, int *fd,
889 * vfs_context_t ctx, const guardid_t *guard, u_int attrs);
890 *
891 * This SPI is the guarded variant of falloc(). It borrows the same
892 * restrictions as those used by the rest of the guarded_* routines.
893 */
894 int
895 falloc_guarded(struct proc *p, struct fileproc **fp, int *fd,
896 vfs_context_t ctx, const guardid_t *guard, u_int attrs)
897 {
898 struct gfp_crarg crarg;
899
900 if (((attrs & GUARD_REQUIRED) != GUARD_REQUIRED) ||
901 ((attrs & ~GUARD_ALL) != 0) || (*guard == 0)) {
902 return EINVAL;
903 }
904
905 bzero(&crarg, sizeof(crarg));
906 crarg.gca_guard = *guard;
907 crarg.gca_attrs = attrs;
908
909 return falloc_withalloc(p, fp, fd, ctx, guarded_fileproc_alloc_init,
910 &crarg);
911 }
912
913 #if CONFIG_MACF && CONFIG_VNGUARD
914
915 /*
916 * Guarded vnodes
917 *
918 * Uses MAC hooks to guard operations on vnodes in the system. Given an fd,
919 * add data to the label on the fileglob and the vnode it points at.
920 * The data contains a pointer to the fileglob, the set of attributes to
921 * guard, a guard value for uniquification, and the pid of the process
922 * who set the guard up in the first place.
923 *
924 * The fd must have been opened read/write, and the underlying
925 * fileglob is FG_CONFINED so that there's no ambiguity about the
926 * owning process.
927 *
928 * When there's a callback for a vnode operation of interest (rename, unlink,
929 * etc.) check to see if the guard permits that operation, and if not
930 * take an action e.g. log a message or generate a crash report.
931 *
932 * The label is removed from the vnode and the fileglob when the fileglob
933 * is closed.
934 *
935 * The initial action to be taken can be specified by a boot arg (vnguard=0x42)
936 * and change via the "kern.vnguard.flags" sysctl.
937 */
938
939 struct vng_owner;
940
941 struct vng_info { /* lives on the vnode label */
942 guardid_t vgi_guard;
943 unsigned vgi_attrs;
944 TAILQ_HEAD(, vng_owner) vgi_owners;
945 };
946
947 struct vng_owner { /* lives on the fileglob label */
948 proc_t vgo_p;
949 struct fileglob *vgo_fg;
950 struct vng_info *vgo_vgi;
951 TAILQ_ENTRY(vng_owner) vgo_link;
952 };
953
954 static struct vng_info *
955 new_vgi(unsigned attrs, guardid_t guard)
956 {
957 struct vng_info *vgi = kalloc(sizeof(*vgi));
958 vgi->vgi_guard = guard;
959 vgi->vgi_attrs = attrs;
960 TAILQ_INIT(&vgi->vgi_owners);
961 return vgi;
962 }
963
964 static struct vng_owner *
965 new_vgo(proc_t p, struct fileglob *fg)
966 {
967 struct vng_owner *vgo = kalloc(sizeof(*vgo));
968 memset(vgo, 0, sizeof(*vgo));
969 vgo->vgo_p = p;
970 vgo->vgo_fg = fg;
971 return vgo;
972 }
973
974 static void
975 vgi_add_vgo(struct vng_info *vgi, struct vng_owner *vgo)
976 {
977 vgo->vgo_vgi = vgi;
978 TAILQ_INSERT_HEAD(&vgi->vgi_owners, vgo, vgo_link);
979 }
980
981 static boolean_t
982 vgi_remove_vgo(struct vng_info *vgi, struct vng_owner *vgo)
983 {
984 TAILQ_REMOVE(&vgi->vgi_owners, vgo, vgo_link);
985 vgo->vgo_vgi = NULL;
986 return TAILQ_EMPTY(&vgi->vgi_owners);
987 }
988
989 static void
990 free_vgi(struct vng_info *vgi)
991 {
992 assert(TAILQ_EMPTY(&vgi->vgi_owners));
993 #if DEVELOP || DEBUG
994 memset(vgi, 0xbeadfade, sizeof(*vgi));
995 #endif
996 kfree(vgi, sizeof(*vgi));
997 }
998
999 static void
1000 free_vgo(struct vng_owner *vgo)
1001 {
1002 #if DEVELOP || DEBUG
1003 memset(vgo, 0x2bedf1d0, sizeof(*vgo));
1004 #endif
1005 kfree(vgo, sizeof(*vgo));
1006 }
1007
1008 static int label_slot;
1009 static lck_rw_t llock;
1010 static lck_grp_t *llock_grp;
1011
1012 static __inline void *
1013 vng_lbl_get(struct label *label)
1014 {
1015 lck_rw_assert(&llock, LCK_RW_ASSERT_HELD);
1016 void *data;
1017 if (NULL == label) {
1018 data = NULL;
1019 } else {
1020 data = (void *)mac_label_get(label, label_slot);
1021 }
1022 return data;
1023 }
1024
1025 static __inline struct vng_info *
1026 vng_lbl_get_withattr(struct label *label, unsigned attrmask)
1027 {
1028 struct vng_info *vgi = vng_lbl_get(label);
1029 assert(NULL == vgi || (vgi->vgi_attrs & ~VNG_ALL) == 0);
1030 if (NULL != vgi && 0 == (vgi->vgi_attrs & attrmask)) {
1031 vgi = NULL;
1032 }
1033 return vgi;
1034 }
1035
1036 static __inline void
1037 vng_lbl_set(struct label *label, void *data)
1038 {
1039 assert(NULL != label);
1040 lck_rw_assert(&llock, LCK_RW_ASSERT_EXCLUSIVE);
1041 mac_label_set(label, label_slot, (intptr_t)data);
1042 }
1043
1044 static int
1045 vnguard_sysc_getguardattr(proc_t p, struct vnguard_getattr *vga)
1046 {
1047 const int fd = vga->vga_fd;
1048
1049 if (0 == vga->vga_guard) {
1050 return EINVAL;
1051 }
1052
1053 int error;
1054 struct fileproc *fp;
1055 if (0 != (error = fp_lookup(p, fd, &fp, 0))) {
1056 return error;
1057 }
1058 do {
1059 struct fileglob *fg = fp->fp_glob;
1060 if (FILEGLOB_DTYPE(fg) != DTYPE_VNODE) {
1061 error = EBADF;
1062 break;
1063 }
1064 struct vnode *vp = fg->fg_data;
1065 if (!vnode_isreg(vp) || NULL == vp->v_mount) {
1066 error = EBADF;
1067 break;
1068 }
1069 error = vnode_getwithref(vp);
1070 if (0 != error) {
1071 break;
1072 }
1073
1074 vga->vga_attrs = 0;
1075
1076 lck_rw_lock_shared(&llock);
1077
1078 if (NULL != vp->v_label) {
1079 const struct vng_info *vgi = vng_lbl_get(vp->v_label);
1080 if (NULL != vgi) {
1081 if (vgi->vgi_guard != vga->vga_guard) {
1082 error = EPERM;
1083 } else {
1084 vga->vga_attrs = vgi->vgi_attrs;
1085 }
1086 }
1087 }
1088
1089 lck_rw_unlock_shared(&llock);
1090 vnode_put(vp);
1091 } while (0);
1092
1093 fp_drop(p, fd, fp, 0);
1094 return error;
1095 }
1096
1097 static int
1098 vnguard_sysc_setguard(proc_t p, const struct vnguard_set *vns)
1099 {
1100 const int fd = vns->vns_fd;
1101
1102 if ((vns->vns_attrs & ~VNG_ALL) != 0 ||
1103 0 == vns->vns_attrs || 0 == vns->vns_guard) {
1104 return EINVAL;
1105 }
1106
1107 int error;
1108 struct fileproc *fp;
1109 if (0 != (error = fp_lookup(p, fd, &fp, 0))) {
1110 return error;
1111 }
1112 do {
1113 /*
1114 * To avoid trivial DoS, insist that the caller
1115 * has read/write access to the file.
1116 */
1117 if ((FREAD | FWRITE) != (fp->f_flag & (FREAD | FWRITE))) {
1118 error = EBADF;
1119 break;
1120 }
1121 struct fileglob *fg = fp->fp_glob;
1122 if (FILEGLOB_DTYPE(fg) != DTYPE_VNODE) {
1123 error = EBADF;
1124 break;
1125 }
1126 /*
1127 * Confinement means there's only one fd pointing at
1128 * this fileglob, and will always be associated with
1129 * this pid.
1130 */
1131 if (0 == (FG_CONFINED & fg->fg_lflags)) {
1132 error = EBADF;
1133 break;
1134 }
1135 struct vnode *vp = fg->fg_data;
1136 if (!vnode_isreg(vp) || NULL == vp->v_mount) {
1137 error = EBADF;
1138 break;
1139 }
1140 error = vnode_getwithref(vp);
1141 if (0 != error) {
1142 break;
1143 }
1144
1145 /* Ensure the target vnode -has- a label */
1146 struct vfs_context *ctx = vfs_context_current();
1147 mac_vnode_label_update(ctx, vp, NULL);
1148
1149 struct vng_info *nvgi = new_vgi(vns->vns_attrs, vns->vns_guard);
1150 struct vng_owner *nvgo = new_vgo(p, fg);
1151
1152 lck_rw_lock_exclusive(&llock);
1153
1154 do {
1155 /*
1156 * A vnode guard is associated with one or more
1157 * fileglobs in one or more processes.
1158 */
1159 struct vng_info *vgi = vng_lbl_get(vp->v_label);
1160 struct vng_owner *vgo = vng_lbl_get(fg->fg_label);
1161
1162 if (NULL == vgi) {
1163 /* vnode unguarded, add the first guard */
1164 if (NULL != vgo) {
1165 panic("vnguard label on fileglob "
1166 "but not vnode");
1167 }
1168 /* add a kusecount so we can unlabel later */
1169 error = vnode_ref_ext(vp, O_EVTONLY, 0);
1170 if (0 == error) {
1171 /* add the guard */
1172 vgi_add_vgo(nvgi, nvgo);
1173 vng_lbl_set(vp->v_label, nvgi);
1174 vng_lbl_set(fg->fg_label, nvgo);
1175 } else {
1176 free_vgo(nvgo);
1177 free_vgi(nvgi);
1178 }
1179 } else {
1180 /* vnode already guarded */
1181 free_vgi(nvgi);
1182 if (vgi->vgi_guard != vns->vns_guard) {
1183 error = EPERM; /* guard mismatch */
1184 } else if (vgi->vgi_attrs != vns->vns_attrs) {
1185 /*
1186 * Temporary workaround for older versions of SQLite:
1187 * allow newer guard attributes to be silently cleared.
1188 */
1189 const unsigned mask = ~(VNG_WRITE_OTHER | VNG_TRUNC_OTHER);
1190 if ((vgi->vgi_attrs & mask) == (vns->vns_attrs & mask)) {
1191 vgi->vgi_attrs &= vns->vns_attrs;
1192 } else {
1193 error = EACCES; /* attr mismatch */
1194 }
1195 }
1196 if (0 != error || NULL != vgo) {
1197 free_vgo(nvgo);
1198 break;
1199 }
1200 /* record shared ownership */
1201 vgi_add_vgo(vgi, nvgo);
1202 vng_lbl_set(fg->fg_label, nvgo);
1203 }
1204 } while (0);
1205
1206 lck_rw_unlock_exclusive(&llock);
1207 vnode_put(vp);
1208 } while (0);
1209
1210 fp_drop(p, fd, fp, 0);
1211 return error;
1212 }
1213
1214 static int
1215 vng_policy_syscall(proc_t p, int cmd, user_addr_t arg)
1216 {
1217 int error = EINVAL;
1218
1219 switch (cmd) {
1220 case VNG_SYSC_PING:
1221 if (0 == arg) {
1222 error = 0;
1223 }
1224 break;
1225 case VNG_SYSC_SET_GUARD: {
1226 struct vnguard_set vns;
1227 error = copyin(arg, (void *)&vns, sizeof(vns));
1228 if (error) {
1229 break;
1230 }
1231 error = vnguard_sysc_setguard(p, &vns);
1232 break;
1233 }
1234 case VNG_SYSC_GET_ATTR: {
1235 struct vnguard_getattr vga;
1236 error = copyin(arg, (void *)&vga, sizeof(vga));
1237 if (error) {
1238 break;
1239 }
1240 error = vnguard_sysc_getguardattr(p, &vga);
1241 if (error) {
1242 break;
1243 }
1244 error = copyout((void *)&vga, arg, sizeof(vga));
1245 break;
1246 }
1247 default:
1248 break;
1249 }
1250 return error;
1251 }
1252
1253 /*
1254 * This is called just before the fileglob disappears in fg_free().
1255 * Take the exclusive lock: no other thread can add or remove
1256 * a vng_info to any vnode in the system.
1257 */
1258 static void
1259 vng_file_label_destroy(struct label *label)
1260 {
1261 lck_rw_lock_exclusive(&llock);
1262 struct vng_owner *lvgo = vng_lbl_get(label);
1263 if (lvgo) {
1264 vng_lbl_set(label, 0);
1265 struct vng_info *vgi = lvgo->vgo_vgi;
1266 assert(vgi);
1267 if (vgi_remove_vgo(vgi, lvgo)) {
1268 /* that was the last reference */
1269 vgi->vgi_attrs = 0;
1270 struct fileglob *fg = lvgo->vgo_fg;
1271 assert(fg);
1272 if (DTYPE_VNODE == FILEGLOB_DTYPE(fg)) {
1273 struct vnode *vp = fg->fg_data;
1274 int error = vnode_getwithref(vp);
1275 if (0 == error) {
1276 vng_lbl_set(vp->v_label, 0);
1277 lck_rw_unlock_exclusive(&llock);
1278 /* may trigger VNOP_INACTIVE */
1279 vnode_rele_ext(vp, O_EVTONLY, 0);
1280 vnode_put(vp);
1281 free_vgi(vgi);
1282 free_vgo(lvgo);
1283 return;
1284 }
1285 }
1286 }
1287 free_vgo(lvgo);
1288 }
1289 lck_rw_unlock_exclusive(&llock);
1290 }
1291
1292 static os_reason_t
1293 vng_reason_from_pathname(const char *path, uint32_t pathlen)
1294 {
1295 os_reason_t r = os_reason_create(OS_REASON_GUARD, GUARD_REASON_VNODE);
1296 if (NULL == r) {
1297 return r;
1298 }
1299 /*
1300 * If the pathname is very long, just keep the trailing part
1301 */
1302 const uint32_t pathmax = 3 * EXIT_REASON_USER_DESC_MAX_LEN / 4;
1303 if (pathlen > pathmax) {
1304 path += (pathlen - pathmax);
1305 pathlen = pathmax;
1306 }
1307 uint32_t rsize = kcdata_estimate_required_buffer_size(1, pathlen);
1308 if (0 == os_reason_alloc_buffer(r, rsize)) {
1309 struct kcdata_descriptor *kcd = &r->osr_kcd_descriptor;
1310 mach_vm_address_t addr;
1311 if (kcdata_get_memory_addr(kcd,
1312 EXIT_REASON_USER_DESC, pathlen, &addr) == KERN_SUCCESS) {
1313 kcdata_memcpy(kcd, addr, path, pathlen);
1314 return r;
1315 }
1316 }
1317 os_reason_free(r);
1318 return OS_REASON_NULL;
1319 }
1320
1321 static int vng_policy_flags;
1322
1323 /*
1324 * Note: if an EXC_GUARD is generated, llock will be dropped and
1325 * subsequently reacquired by this routine. Data derived from
1326 * any label in the caller should be regenerated.
1327 */
1328 static int
1329 vng_guard_violation(const struct vng_info *vgi,
1330 unsigned opval, vnode_t vp)
1331 {
1332 int retval = 0;
1333
1334 if (vng_policy_flags & kVNG_POLICY_EPERM) {
1335 /* deny the operation */
1336 retval = EPERM;
1337 }
1338
1339 if (vng_policy_flags & (kVNG_POLICY_LOGMSG | kVNG_POLICY_UPRINTMSG)) {
1340 /* log a message */
1341 const char *op;
1342 switch (opval) {
1343 case VNG_RENAME_FROM:
1344 op = "rename-from";
1345 break;
1346 case VNG_RENAME_TO:
1347 op = "rename-to";
1348 break;
1349 case VNG_UNLINK:
1350 op = "unlink";
1351 break;
1352 case VNG_LINK:
1353 op = "link";
1354 break;
1355 case VNG_EXCHDATA:
1356 op = "exchdata";
1357 break;
1358 case VNG_WRITE_OTHER:
1359 op = "write";
1360 break;
1361 case VNG_TRUNC_OTHER:
1362 op = "truncate";
1363 break;
1364 default:
1365 op = "(unknown)";
1366 break;
1367 }
1368
1369 const char *nm = vnode_getname(vp);
1370 proc_t p = current_proc();
1371 const struct vng_owner *vgo;
1372 TAILQ_FOREACH(vgo, &vgi->vgi_owners, vgo_link) {
1373 const char fmt[] =
1374 "%s[%d]: %s%s: '%s' guarded by %s[%d] (0x%llx)\n";
1375
1376 if (vng_policy_flags & kVNG_POLICY_LOGMSG) {
1377 printf(fmt,
1378 proc_name_address(p), proc_pid(p), op,
1379 0 != retval ? " denied" : "",
1380 NULL != nm ? nm : "(unknown)",
1381 proc_name_address(vgo->vgo_p),
1382 proc_pid(vgo->vgo_p), vgi->vgi_guard);
1383 }
1384 if (vng_policy_flags & kVNG_POLICY_UPRINTMSG) {
1385 uprintf(fmt,
1386 proc_name_address(p), proc_pid(p), op,
1387 0 != retval ? " denied" : "",
1388 NULL != nm ? nm : "(unknown)",
1389 proc_name_address(vgo->vgo_p),
1390 proc_pid(vgo->vgo_p), vgi->vgi_guard);
1391 }
1392 }
1393 if (NULL != nm) {
1394 vnode_putname(nm);
1395 }
1396 }
1397
1398 if (vng_policy_flags & (kVNG_POLICY_EXC | kVNG_POLICY_EXC_CORPSE)) {
1399 /* EXC_GUARD exception */
1400 const struct vng_owner *vgo = TAILQ_FIRST(&vgi->vgi_owners);
1401 pid_t pid = vgo ? proc_pid(vgo->vgo_p) : 0;
1402 mach_exception_code_t code;
1403 mach_exception_subcode_t subcode;
1404
1405 code = 0;
1406 EXC_GUARD_ENCODE_TYPE(code, GUARD_TYPE_VN);
1407 EXC_GUARD_ENCODE_FLAVOR(code, opval);
1408 EXC_GUARD_ENCODE_TARGET(code, pid);
1409 subcode = vgi->vgi_guard;
1410
1411 lck_rw_unlock_shared(&llock);
1412
1413 if (vng_policy_flags & kVNG_POLICY_EXC_CORPSE) {
1414 char *path;
1415 int len = MAXPATHLEN;
1416 MALLOC(path, char *, len, M_TEMP, M_WAITOK);
1417 os_reason_t r = NULL;
1418 if (NULL != path) {
1419 vn_getpath(vp, path, &len);
1420 if (*path && len) {
1421 r = vng_reason_from_pathname(path, len);
1422 }
1423 }
1424 task_violated_guard(code, subcode, r); /* not fatal */
1425 if (NULL != r) {
1426 os_reason_free(r);
1427 }
1428 if (NULL != path) {
1429 FREE(path, M_TEMP);
1430 }
1431 } else {
1432 thread_t t = current_thread();
1433 thread_guard_violation(t, code, subcode, TRUE);
1434 }
1435
1436 lck_rw_lock_shared(&llock);
1437 } else if (vng_policy_flags & kVNG_POLICY_SIGKILL) {
1438 proc_t p = current_proc();
1439 psignal(p, SIGKILL);
1440 }
1441
1442 return retval;
1443 }
1444
1445 /*
1446 * A fatal vnode guard was tripped on this thread.
1447 *
1448 * (Invoked before returning to userland from the syscall handler.)
1449 */
1450 void
1451 vn_guard_ast(thread_t __unused t,
1452 mach_exception_data_type_t code, mach_exception_data_type_t subcode)
1453 {
1454 task_exception_notify(EXC_GUARD, code, subcode);
1455 proc_t p = current_proc();
1456 psignal(p, SIGKILL);
1457 }
1458
1459 /*
1460 * vnode callbacks
1461 */
1462
1463 static int
1464 vng_vnode_check_rename(kauth_cred_t __unused cred,
1465 struct vnode *__unused dvp, struct label *__unused dlabel,
1466 struct vnode *vp, struct label *label,
1467 struct componentname *__unused cnp,
1468 struct vnode *__unused tdvp, struct label *__unused tdlabel,
1469 struct vnode *tvp, struct label *tlabel,
1470 struct componentname *__unused tcnp)
1471 {
1472 int error = 0;
1473 if (NULL != label || NULL != tlabel) {
1474 lck_rw_lock_shared(&llock);
1475 const struct vng_info *vgi =
1476 vng_lbl_get_withattr(label, VNG_RENAME_FROM);
1477 if (NULL != vgi) {
1478 error = vng_guard_violation(vgi, VNG_RENAME_FROM, vp);
1479 }
1480 if (0 == error) {
1481 vgi = vng_lbl_get_withattr(tlabel, VNG_RENAME_TO);
1482 if (NULL != vgi) {
1483 error = vng_guard_violation(vgi,
1484 VNG_RENAME_TO, tvp);
1485 }
1486 }
1487 lck_rw_unlock_shared(&llock);
1488 }
1489 return error;
1490 }
1491
1492 static int
1493 vng_vnode_check_link(kauth_cred_t __unused cred,
1494 struct vnode *__unused dvp, struct label *__unused dlabel,
1495 struct vnode *vp, struct label *label, struct componentname *__unused cnp)
1496 {
1497 int error = 0;
1498 if (NULL != label) {
1499 lck_rw_lock_shared(&llock);
1500 const struct vng_info *vgi =
1501 vng_lbl_get_withattr(label, VNG_LINK);
1502 if (vgi) {
1503 error = vng_guard_violation(vgi, VNG_LINK, vp);
1504 }
1505 lck_rw_unlock_shared(&llock);
1506 }
1507 return error;
1508 }
1509
1510 static int
1511 vng_vnode_check_unlink(kauth_cred_t __unused cred,
1512 struct vnode *__unused dvp, struct label *__unused dlabel,
1513 struct vnode *vp, struct label *label, struct componentname *__unused cnp)
1514 {
1515 int error = 0;
1516 if (NULL != label) {
1517 lck_rw_lock_shared(&llock);
1518 const struct vng_info *vgi =
1519 vng_lbl_get_withattr(label, VNG_UNLINK);
1520 if (vgi) {
1521 error = vng_guard_violation(vgi, VNG_UNLINK, vp);
1522 }
1523 lck_rw_unlock_shared(&llock);
1524 }
1525 return error;
1526 }
1527
1528 /*
1529 * Only check violations for writes performed by "other processes"
1530 */
1531 static int
1532 vng_vnode_check_write(kauth_cred_t __unused actv_cred,
1533 kauth_cred_t __unused file_cred, struct vnode *vp, struct label *label)
1534 {
1535 int error = 0;
1536 if (NULL != label) {
1537 lck_rw_lock_shared(&llock);
1538 const struct vng_info *vgi =
1539 vng_lbl_get_withattr(label, VNG_WRITE_OTHER);
1540 if (vgi) {
1541 proc_t p = current_proc();
1542 const struct vng_owner *vgo;
1543 TAILQ_FOREACH(vgo, &vgi->vgi_owners, vgo_link) {
1544 if (vgo->vgo_p == p) {
1545 goto done;
1546 }
1547 }
1548 error = vng_guard_violation(vgi, VNG_WRITE_OTHER, vp);
1549 }
1550 done:
1551 lck_rw_unlock_shared(&llock);
1552 }
1553 return error;
1554 }
1555
1556 /*
1557 * Only check violations for truncates performed by "other processes"
1558 */
1559 static int
1560 vng_vnode_check_truncate(kauth_cred_t __unused actv_cred,
1561 kauth_cred_t __unused file_cred, struct vnode *vp,
1562 struct label *label)
1563 {
1564 int error = 0;
1565 if (NULL != label) {
1566 lck_rw_lock_shared(&llock);
1567 const struct vng_info *vgi =
1568 vng_lbl_get_withattr(label, VNG_TRUNC_OTHER);
1569 if (vgi) {
1570 proc_t p = current_proc();
1571 const struct vng_owner *vgo;
1572 TAILQ_FOREACH(vgo, &vgi->vgi_owners, vgo_link) {
1573 if (vgo->vgo_p == p) {
1574 goto done;
1575 }
1576 }
1577 error = vng_guard_violation(vgi, VNG_TRUNC_OTHER, vp);
1578 }
1579 done:
1580 lck_rw_unlock_shared(&llock);
1581 }
1582 return error;
1583 }
1584
1585 static int
1586 vng_vnode_check_exchangedata(kauth_cred_t __unused cred,
1587 struct vnode *fvp, struct label *flabel,
1588 struct vnode *svp, struct label *slabel)
1589 {
1590 int error = 0;
1591 if (NULL != flabel || NULL != slabel) {
1592 lck_rw_lock_shared(&llock);
1593 const struct vng_info *vgi =
1594 vng_lbl_get_withattr(flabel, VNG_EXCHDATA);
1595 if (NULL != vgi) {
1596 error = vng_guard_violation(vgi, VNG_EXCHDATA, fvp);
1597 }
1598 if (0 == error) {
1599 vgi = vng_lbl_get_withattr(slabel, VNG_EXCHDATA);
1600 if (NULL != vgi) {
1601 error = vng_guard_violation(vgi,
1602 VNG_EXCHDATA, svp);
1603 }
1604 }
1605 lck_rw_unlock_shared(&llock);
1606 }
1607 return error;
1608 }
1609
1610 /* Intercept open-time truncations (by "other") of a guarded vnode */
1611
1612 static int
1613 vng_vnode_check_open(kauth_cred_t cred,
1614 struct vnode *vp, struct label *label, int acc_mode)
1615 {
1616 if (0 == (acc_mode & O_TRUNC)) {
1617 return 0;
1618 }
1619 return vng_vnode_check_truncate(cred, NULL, vp, label);
1620 }
1621
1622 /*
1623 * Configuration gorp
1624 */
1625
1626 static void
1627 vng_init(struct mac_policy_conf *mpc)
1628 {
1629 llock_grp = lck_grp_alloc_init(mpc->mpc_name, LCK_GRP_ATTR_NULL);
1630 lck_rw_init(&llock, llock_grp, LCK_ATTR_NULL);
1631 }
1632
1633 SECURITY_READ_ONLY_EARLY(static struct mac_policy_ops) vng_policy_ops = {
1634 .mpo_file_label_destroy = vng_file_label_destroy,
1635
1636 .mpo_vnode_check_link = vng_vnode_check_link,
1637 .mpo_vnode_check_unlink = vng_vnode_check_unlink,
1638 .mpo_vnode_check_rename = vng_vnode_check_rename,
1639 .mpo_vnode_check_write = vng_vnode_check_write,
1640 .mpo_vnode_check_truncate = vng_vnode_check_truncate,
1641 .mpo_vnode_check_exchangedata = vng_vnode_check_exchangedata,
1642 .mpo_vnode_check_open = vng_vnode_check_open,
1643
1644 .mpo_policy_syscall = vng_policy_syscall,
1645 .mpo_policy_init = vng_init,
1646 };
1647
1648 static const char *vng_labelnames[] = {
1649 "vnguard",
1650 };
1651
1652 #define ACOUNT(arr) ((unsigned)(sizeof (arr) / sizeof (arr[0])))
1653
1654 SECURITY_READ_ONLY_LATE(static struct mac_policy_conf) vng_policy_conf = {
1655 .mpc_name = VNG_POLICY_NAME,
1656 .mpc_fullname = "Guarded vnode policy",
1657 .mpc_field_off = &label_slot,
1658 .mpc_labelnames = vng_labelnames,
1659 .mpc_labelname_count = ACOUNT(vng_labelnames),
1660 .mpc_ops = &vng_policy_ops,
1661 .mpc_loadtime_flags = 0,
1662 .mpc_runtime_flags = 0
1663 };
1664
1665 SECURITY_READ_ONLY_LATE(static mac_policy_handle_t) vng_policy_handle;
1666
1667 void
1668 vnguard_policy_init(void)
1669 {
1670 if (0 == PE_i_can_has_debugger(NULL)) {
1671 return;
1672 }
1673 vng_policy_flags = kVNG_POLICY_LOGMSG |
1674 kVNG_POLICY_EXC_CORPSE | kVNG_POLICY_UPRINTMSG;
1675 PE_parse_boot_argn("vnguard", &vng_policy_flags, sizeof(vng_policy_flags));
1676 if (vng_policy_flags) {
1677 mac_policy_register(&vng_policy_conf, &vng_policy_handle, NULL);
1678 }
1679 }
1680
1681 #if DEBUG || DEVELOPMENT
1682 #include <sys/sysctl.h>
1683
1684 SYSCTL_DECL(_kern_vnguard);
1685 SYSCTL_NODE(_kern, OID_AUTO, vnguard, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "vnguard");
1686 SYSCTL_INT(_kern_vnguard, OID_AUTO, flags, CTLFLAG_RW | CTLFLAG_LOCKED,
1687 &vng_policy_flags, 0, "vnguard policy flags");
1688 #endif
1689
1690 #endif /* CONFIG_MACF && CONFIG_VNGUARD */