]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_guarded.c
xnu-4570.1.46.tar.gz
[apple/xnu.git] / bsd / kern / kern_guarded.c
1 /*
2 * Copyright (c) 2015-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/filedesc.h>
32 #include <sys/kernel.h>
33 #include <sys/file_internal.h>
34 #include <kern/exc_guard.h>
35 #include <sys/guarded.h>
36 #include <kern/kalloc.h>
37 #include <sys/sysproto.h>
38 #include <sys/vnode.h>
39 #include <sys/vnode_internal.h>
40 #include <sys/uio_internal.h>
41 #include <sys/ubc_internal.h>
42 #include <vfs/vfs_support.h>
43 #include <security/audit/audit.h>
44 #include <sys/syscall.h>
45 #include <sys/kauth.h>
46 #include <sys/kdebug.h>
47 #include <stdbool.h>
48 #include <vm/vm_protos.h>
49 #include <libkern/section_keywords.h>
50 #if CONFIG_MACF && CONFIG_VNGUARD
51 #include <security/mac.h>
52 #include <security/mac_framework.h>
53 #include <security/mac_policy.h>
54 #include <pexpert/pexpert.h>
55 #include <sys/sysctl.h>
56 #endif
57
58
59 #define f_flag f_fglob->fg_flag
60 #define f_type f_fglob->fg_ops->fo_type
61 extern int dofilewrite(vfs_context_t ctx, struct fileproc *fp,
62 user_addr_t bufp, user_size_t nbyte, off_t offset,
63 int flags, user_ssize_t *retval );
64 extern int wr_uio(struct proc *p, struct fileproc *fp, uio_t uio, user_ssize_t *retval);
65
66 /*
67 * Experimental guarded file descriptor support.
68 */
69
70 kern_return_t task_exception_notify(exception_type_t exception,
71 mach_exception_data_type_t code, mach_exception_data_type_t subcode);
72 kern_return_t task_violated_guard(mach_exception_code_t, mach_exception_subcode_t, void *);
73
74 /*
75 * Most fd's have an underlying fileproc struct; but some may be
76 * guarded_fileproc structs which implement guarded fds. The latter
77 * struct (below) embeds the former.
78 *
79 * The two types should be distinguished by the "type" portion of f_flags.
80 * There's also a magic number to help catch misuse and bugs.
81 *
82 * This is a bit unpleasant, but results from the desire to allow
83 * alternate file behaviours for a few file descriptors without
84 * growing the fileproc data structure.
85 */
86
87 struct guarded_fileproc {
88 struct fileproc gf_fileproc;
89 u_int gf_magic;
90 u_int gf_attrs;
91 guardid_t gf_guard;
92 };
93
94 const size_t sizeof_guarded_fileproc = sizeof (struct guarded_fileproc);
95
96 #define FP_TO_GFP(fp) ((struct guarded_fileproc *)(fp))
97 #define GFP_TO_FP(gfp) (&(gfp)->gf_fileproc)
98
99 #define GUARDED_FILEPROC_MAGIC 0x29083
100
101 struct gfp_crarg {
102 guardid_t gca_guard;
103 u_int gca_attrs;
104 };
105
106 static struct fileproc *
107 guarded_fileproc_alloc_init(void *crarg)
108 {
109 struct gfp_crarg *aarg = crarg;
110 struct guarded_fileproc *gfp;
111
112 if ((gfp = kalloc(sizeof (*gfp))) == NULL)
113 return (NULL);
114
115 bzero(gfp, sizeof (*gfp));
116 gfp->gf_fileproc.f_flags = FTYPE_GUARDED;
117 gfp->gf_magic = GUARDED_FILEPROC_MAGIC;
118 gfp->gf_guard = aarg->gca_guard;
119 gfp->gf_attrs = aarg->gca_attrs;
120
121 return (GFP_TO_FP(gfp));
122 }
123
124 void
125 guarded_fileproc_free(struct fileproc *fp)
126 {
127 struct guarded_fileproc *gfp = FP_TO_GFP(fp);
128
129 if (FILEPROC_TYPE(fp) != FTYPE_GUARDED ||
130 GUARDED_FILEPROC_MAGIC != gfp->gf_magic)
131 panic("%s: corrupt fp %p flags %x", __func__, fp, fp->f_flags);
132
133 kfree(gfp, sizeof (*gfp));
134 }
135
136 static int
137 fp_lookup_guarded(proc_t p, int fd, guardid_t guard,
138 struct guarded_fileproc **gfpp, int locked)
139 {
140 struct fileproc *fp;
141 int error;
142
143 if ((error = fp_lookup(p, fd, &fp, locked)) != 0)
144 return (error);
145 if (FILEPROC_TYPE(fp) != FTYPE_GUARDED) {
146 (void) fp_drop(p, fd, fp, locked);
147 return (EINVAL);
148 }
149 struct guarded_fileproc *gfp = FP_TO_GFP(fp);
150
151 if (GUARDED_FILEPROC_MAGIC != gfp->gf_magic)
152 panic("%s: corrupt fp %p", __func__, fp);
153
154 if (guard != gfp->gf_guard) {
155 (void) fp_drop(p, fd, fp, locked);
156 return (EPERM); /* *not* a mismatch exception */
157 }
158 if (gfpp)
159 *gfpp = gfp;
160 return (0);
161 }
162
163 /*
164 * Expected use pattern:
165 *
166 * if (FP_ISGUARDED(fp, GUARD_CLOSE)) {
167 * error = fp_guard_exception(p, fd, fp, kGUARD_EXC_CLOSE);
168 * proc_fdunlock(p);
169 * return (error);
170 * }
171 */
172
173 int
174 fp_isguarded(struct fileproc *fp, u_int attrs)
175 {
176 if (FILEPROC_TYPE(fp) == FTYPE_GUARDED) {
177 struct guarded_fileproc *gfp = FP_TO_GFP(fp);
178
179 if (GUARDED_FILEPROC_MAGIC != gfp->gf_magic)
180 panic("%s: corrupt gfp %p flags %x",
181 __func__, gfp, fp->f_flags);
182 return ((attrs & gfp->gf_attrs) == attrs);
183 }
184 return (0);
185 }
186
187 extern char *proc_name_address(void *p);
188
189 int
190 fp_guard_exception(proc_t p, int fd, struct fileproc *fp, u_int flavor)
191 {
192 if (FILEPROC_TYPE(fp) != FTYPE_GUARDED)
193 panic("%s corrupt fp %p flags %x", __func__, fp, fp->f_flags);
194
195 struct guarded_fileproc *gfp = FP_TO_GFP(fp);
196 /* all gfd fields protected via proc_fdlock() */
197 proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED);
198
199 mach_exception_code_t code = 0;
200 EXC_GUARD_ENCODE_TYPE(code, GUARD_TYPE_FD);
201 EXC_GUARD_ENCODE_FLAVOR(code, flavor);
202 EXC_GUARD_ENCODE_TARGET(code, fd);
203 mach_exception_subcode_t subcode = gfp->gf_guard;
204
205 thread_t t = current_thread();
206 thread_guard_violation(t, code, subcode);
207 return (EPERM);
208 }
209
210 /*
211 * (Invoked before returning to userland from the syscall handler.)
212 */
213 void
214 fd_guard_ast(
215 thread_t __unused t,
216 mach_exception_code_t code,
217 mach_exception_subcode_t subcode)
218 {
219 task_exception_notify(EXC_GUARD, code, subcode);
220 proc_t p = current_proc();
221 psignal(p, SIGKILL);
222 }
223
224 /*
225 * Experimental guarded file descriptor SPIs
226 */
227
228 /*
229 * int guarded_open_np(const char *pathname, int flags,
230 * const guardid_t *guard, u_int guardflags, ...);
231 *
232 * In this initial implementation, GUARD_DUP must be specified.
233 * GUARD_CLOSE, GUARD_SOCKET_IPC and GUARD_FILEPORT are optional.
234 *
235 * If GUARD_DUP wasn't specified, then we'd have to do the (extra) work
236 * to allow dup-ing a descriptor to inherit the guard onto the new
237 * descriptor. (Perhaps GUARD_DUP behaviours should just always be true
238 * for a guarded fd? Or, more sanely, all the dup operations should
239 * just always propagate the guard?)
240 *
241 * Guarded descriptors are always close-on-exec, and GUARD_CLOSE
242 * requires close-on-fork; O_CLOEXEC must be set in flags.
243 * This setting is immutable; attempts to clear the flag will
244 * cause a guard exception.
245 *
246 * XXX It's somewhat broken that change_fdguard_np() can completely
247 * remove the guard and thus revoke down the immutability
248 * promises above. Ick.
249 */
250 int
251 guarded_open_np(proc_t p, struct guarded_open_np_args *uap, int32_t *retval)
252 {
253 if ((uap->flags & O_CLOEXEC) == 0)
254 return (EINVAL);
255
256 #define GUARD_REQUIRED (GUARD_DUP)
257 #define GUARD_ALL (GUARD_REQUIRED | \
258 (GUARD_CLOSE | GUARD_SOCKET_IPC | GUARD_FILEPORT | GUARD_WRITE))
259
260 if (((uap->guardflags & GUARD_REQUIRED) != GUARD_REQUIRED) ||
261 ((uap->guardflags & ~GUARD_ALL) != 0))
262 return (EINVAL);
263
264 int error;
265 struct gfp_crarg crarg = {
266 .gca_attrs = uap->guardflags
267 };
268
269 if ((error = copyin(uap->guard,
270 &(crarg.gca_guard), sizeof (crarg.gca_guard))) != 0)
271 return (error);
272
273 /*
274 * Disallow certain guard values -- is zero enough?
275 */
276 if (crarg.gca_guard == 0)
277 return (EINVAL);
278
279 struct filedesc *fdp = p->p_fd;
280 struct vnode_attr va;
281 struct nameidata nd;
282 vfs_context_t ctx = vfs_context_current();
283 int cmode;
284
285 VATTR_INIT(&va);
286 cmode = ((uap->mode & ~fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT;
287 VATTR_SET(&va, va_mode, cmode & ACCESSPERMS);
288
289 NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW | AUDITVNPATH1, UIO_USERSPACE,
290 uap->path, ctx);
291
292 return (open1(ctx, &nd, uap->flags | O_CLOFORK, &va,
293 guarded_fileproc_alloc_init, &crarg, retval));
294 }
295
296 /*
297 * int guarded_open_dprotected_np(const char *pathname, int flags,
298 * const guardid_t *guard, u_int guardflags, int dpclass, int dpflags, ...);
299 *
300 * This SPI is extension of guarded_open_np() to include dataprotection class on creation
301 * in "dpclass" and dataprotection flags 'dpflags'. Otherwise behaviors are same as in
302 * guarded_open_np()
303 */
304 int
305 guarded_open_dprotected_np(proc_t p, struct guarded_open_dprotected_np_args *uap, int32_t *retval)
306 {
307 if ((uap->flags & O_CLOEXEC) == 0)
308 return (EINVAL);
309
310 if (((uap->guardflags & GUARD_REQUIRED) != GUARD_REQUIRED) ||
311 ((uap->guardflags & ~GUARD_ALL) != 0))
312 return (EINVAL);
313
314 int error;
315 struct gfp_crarg crarg = {
316 .gca_attrs = uap->guardflags
317 };
318
319 if ((error = copyin(uap->guard,
320 &(crarg.gca_guard), sizeof (crarg.gca_guard))) != 0)
321 return (error);
322
323 /*
324 * Disallow certain guard values -- is zero enough?
325 */
326 if (crarg.gca_guard == 0)
327 return (EINVAL);
328
329 struct filedesc *fdp = p->p_fd;
330 struct vnode_attr va;
331 struct nameidata nd;
332 vfs_context_t ctx = vfs_context_current();
333 int cmode;
334
335 VATTR_INIT(&va);
336 cmode = ((uap->mode & ~fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT;
337 VATTR_SET(&va, va_mode, cmode & ACCESSPERMS);
338
339 NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW | AUDITVNPATH1, UIO_USERSPACE,
340 uap->path, ctx);
341
342 /*
343 * Initialize the extra fields in vnode_attr to pass down dataprotection
344 * extra fields.
345 * 1. target cprotect class.
346 * 2. set a flag to mark it as requiring open-raw-encrypted semantics.
347 */
348 if (uap->flags & O_CREAT) {
349 VATTR_SET(&va, va_dataprotect_class, uap->dpclass);
350 }
351
352 if (uap->dpflags & (O_DP_GETRAWENCRYPTED|O_DP_GETRAWUNENCRYPTED)) {
353 if ( uap->flags & (O_RDWR | O_WRONLY)) {
354 /* Not allowed to write raw encrypted bytes */
355 return EINVAL;
356 }
357 if (uap->dpflags & O_DP_GETRAWENCRYPTED) {
358 VATTR_SET(&va, va_dataprotect_flags, VA_DP_RAWENCRYPTED);
359 }
360 if (uap->dpflags & O_DP_GETRAWUNENCRYPTED) {
361 VATTR_SET(&va, va_dataprotect_flags, VA_DP_RAWUNENCRYPTED);
362 }
363 }
364
365 return (open1(ctx, &nd, uap->flags | O_CLOFORK, &va,
366 guarded_fileproc_alloc_init, &crarg, retval));
367 }
368
369 /*
370 * int guarded_kqueue_np(const guardid_t *guard, u_int guardflags);
371 *
372 * Create a guarded kqueue descriptor with guardid and guardflags.
373 *
374 * Same restrictions on guardflags as for guarded_open_np().
375 * All kqueues are -always- close-on-exec and close-on-fork by themselves
376 * and are not sendable.
377 */
378 int
379 guarded_kqueue_np(proc_t p, struct guarded_kqueue_np_args *uap, int32_t *retval)
380 {
381 if (((uap->guardflags & GUARD_REQUIRED) != GUARD_REQUIRED) ||
382 ((uap->guardflags & ~GUARD_ALL) != 0))
383 return (EINVAL);
384
385 int error;
386 struct gfp_crarg crarg = {
387 .gca_attrs = uap->guardflags
388 };
389
390 if ((error = copyin(uap->guard,
391 &(crarg.gca_guard), sizeof (crarg.gca_guard))) != 0)
392 return (error);
393
394 if (crarg.gca_guard == 0)
395 return (EINVAL);
396
397 return (kqueue_body(p, guarded_fileproc_alloc_init, &crarg, retval));
398 }
399
400 /*
401 * int guarded_close_np(int fd, const guardid_t *guard);
402 */
403 int
404 guarded_close_np(proc_t p, struct guarded_close_np_args *uap,
405 __unused int32_t *retval)
406 {
407 struct guarded_fileproc *gfp;
408 int fd = uap->fd;
409 int error;
410 guardid_t uguard;
411
412 AUDIT_SYSCLOSE(p, fd);
413
414 if ((error = copyin(uap->guard, &uguard, sizeof (uguard))) != 0)
415 return (error);
416
417 proc_fdlock(p);
418 if ((error = fp_lookup_guarded(p, fd, uguard, &gfp, 1)) != 0) {
419 proc_fdunlock(p);
420 return (error);
421 }
422 error = close_internal_locked(p, fd, GFP_TO_FP(gfp), 0);
423 proc_fdunlock(p);
424 return (error);
425 }
426
427 /*
428 * int
429 * change_fdguard_np(int fd, const guardid_t *guard, u_int guardflags,
430 * const guardid_t *nguard, u_int nguardflags, int *fdflagsp);
431 *
432 * Given a file descriptor, atomically exchange <guard, guardflags> for
433 * a new guard <nguard, nguardflags>, returning the previous fd
434 * flags (see fcntl:F_SETFD) in *fdflagsp.
435 *
436 * This syscall can be used to either (a) add a new guard to an existing
437 * unguarded file descriptor (b) remove the old guard from an existing
438 * guarded file descriptor or (c) change the guard (guardid and/or
439 * guardflags) on a guarded file descriptor.
440 *
441 * If 'guard' is NULL, fd must be unguarded at entry. If the call completes
442 * successfully the fd will be guarded with <nguard, nguardflags>.
443 *
444 * Guarding a file descriptor has some side-effects on the "fdflags"
445 * associated with the descriptor - in particular FD_CLOEXEC is
446 * forced ON unconditionally, and FD_CLOFORK is forced ON by GUARD_CLOSE.
447 * Callers who wish to subsequently restore the state of the fd should save
448 * the value of *fdflagsp after a successful invocation.
449 *
450 * If 'nguard' is NULL, fd must be guarded at entry, <guard, guardflags>
451 * must match with what's already guarding the descriptor, and the
452 * result will be to completely remove the guard. Note also that the
453 * fdflags are copied to the descriptor from the incoming *fdflagsp argument.
454 *
455 * If the descriptor is guarded, and neither 'guard' nor 'nguard' is NULL
456 * and <guard, guardflags> matches what's already guarding the descriptor,
457 * then <nguard, nguardflags> becomes the new guard. In this case, even if
458 * the GUARD_CLOSE flag is being cleared, it is still possible to continue
459 * to keep FD_CLOFORK on the descriptor by passing FD_CLOFORK via fdflagsp.
460 *
461 * (File descriptors whose underlying fileglobs are marked FG_CONFINED are
462 * still close-on-fork, regardless of the setting of FD_CLOFORK.)
463 *
464 * Example 1: Guard an unguarded descriptor during a set of operations,
465 * then restore the original state of the descriptor.
466 *
467 * int sav_flags = 0;
468 * change_fdguard_np(fd, NULL, 0, &myguard, GUARD_CLOSE, &sav_flags);
469 * // do things with now guarded 'fd'
470 * change_fdguard_np(fd, &myguard, GUARD_CLOSE, NULL, 0, &sav_flags);
471 * // fd now unguarded.
472 *
473 * Example 2: Change the guard of a guarded descriptor during a set of
474 * operations, then restore the original state of the descriptor.
475 *
476 * int sav_flags = (gdflags & GUARD_CLOSE) ? FD_CLOFORK : 0;
477 * change_fdguard_np(fd, &gd, gdflags, &myguard, GUARD_CLOSE, &sav_flags);
478 * // do things with 'fd' with a different guard
479 * change_fdguard_np(fd, &myg, GUARD_CLOSE, &gd, gdflags, &sav_flags);
480 * // back to original guarded state
481 *
482 * XXX This SPI is too much of a chainsaw and should be revised.
483 */
484
485 int
486 change_fdguard_np(proc_t p, struct change_fdguard_np_args *uap,
487 __unused int32_t *retval)
488 {
489 struct fileproc *fp;
490 int fd = uap->fd;
491 int error;
492 guardid_t oldg = 0, newg = 0;
493 int nfdflags = 0;
494
495 if (0 != uap->guard &&
496 0 != (error = copyin(uap->guard, &oldg, sizeof (oldg))))
497 return (error); /* can't copyin current guard */
498
499 if (0 != uap->nguard &&
500 0 != (error = copyin(uap->nguard, &newg, sizeof (newg))))
501 return (error); /* can't copyin new guard */
502
503 if (0 != uap->fdflagsp &&
504 0 != (error = copyin(uap->fdflagsp, &nfdflags, sizeof (nfdflags))))
505 return (error); /* can't copyin new fdflags */
506
507 proc_fdlock(p);
508 restart:
509 if ((error = fp_lookup(p, fd, &fp, 1)) != 0) {
510 proc_fdunlock(p);
511 return (error);
512 }
513
514 if (0 != uap->fdflagsp) {
515 int ofdflags = FDFLAGS_GET(p, fd);
516 int ofl = ((ofdflags & UF_EXCLOSE) ? FD_CLOEXEC : 0) |
517 ((ofdflags & UF_FORKCLOSE) ? FD_CLOFORK : 0);
518 proc_fdunlock(p);
519 if (0 != (error = copyout(&ofl, uap->fdflagsp, sizeof (ofl)))) {
520 proc_fdlock(p);
521 goto dropout; /* can't copyout old fdflags */
522 }
523 proc_fdlock(p);
524 }
525
526 if (FILEPROC_TYPE(fp) == FTYPE_GUARDED) {
527 if (0 == uap->guard || 0 == uap->guardflags)
528 error = EINVAL; /* missing guard! */
529 else if (0 == oldg)
530 error = EPERM; /* guardids cannot be zero */
531 } else {
532 if (0 != uap->guard || 0 != uap->guardflags)
533 error = EINVAL; /* guard provided, but none needed! */
534 }
535
536 if (0 != error)
537 goto dropout;
538
539 if (0 != uap->nguard) {
540 /*
541 * There's a new guard in town.
542 */
543 if (0 == newg)
544 error = EINVAL; /* guards cannot contain zero */
545 else if (((uap->nguardflags & GUARD_REQUIRED) != GUARD_REQUIRED) ||
546 ((uap->nguardflags & ~GUARD_ALL) != 0))
547 error = EINVAL; /* must have valid attributes too */
548 if (0 != error)
549 goto dropout;
550
551 if (FILEPROC_TYPE(fp) == FTYPE_GUARDED) {
552 /*
553 * Replace old guard with new guard
554 */
555 struct guarded_fileproc *gfp = FP_TO_GFP(fp);
556
557 if (GUARDED_FILEPROC_MAGIC != gfp->gf_magic)
558 panic("%s: corrupt gfp %p flags %x",
559 __func__, gfp, fp->f_flags);
560
561 if (oldg == gfp->gf_guard &&
562 uap->guardflags == gfp->gf_attrs) {
563 /*
564 * Must match existing guard + attributes
565 * before we'll swap them to new ones, managing
566 * fdflags "side-effects" as we go. Note that
567 * userland can request FD_CLOFORK semantics.
568 */
569 if (gfp->gf_attrs & GUARD_CLOSE)
570 FDFLAGS_CLR(p, fd, UF_FORKCLOSE);
571 gfp->gf_guard = newg;
572 gfp->gf_attrs = uap->nguardflags;
573 if (gfp->gf_attrs & GUARD_CLOSE)
574 FDFLAGS_SET(p, fd, UF_FORKCLOSE);
575 FDFLAGS_SET(p, fd,
576 (nfdflags & FD_CLOFORK) ? UF_FORKCLOSE : 0);
577 /* FG_CONFINED enforced regardless */
578 } else {
579 error = EPERM;
580 }
581 goto dropout;
582 } else {
583 /*
584 * Add a guard to a previously unguarded descriptor
585 */
586 switch (FILEGLOB_DTYPE(fp->f_fglob)) {
587 case DTYPE_VNODE:
588 case DTYPE_PIPE:
589 case DTYPE_SOCKET:
590 case DTYPE_KQUEUE:
591 case DTYPE_NETPOLICY:
592 break;
593 default:
594 error = ENOTSUP;
595 goto dropout;
596 }
597
598 proc_fdunlock(p);
599
600 struct gfp_crarg crarg = {
601 .gca_guard = newg,
602 .gca_attrs = uap->nguardflags
603 };
604 struct fileproc *nfp =
605 guarded_fileproc_alloc_init(&crarg);
606 struct guarded_fileproc *gfp;
607
608 proc_fdlock(p);
609
610 switch (error = fp_tryswap(p, fd, nfp)) {
611 case 0: /* guarded-ness comes with side-effects */
612 gfp = FP_TO_GFP(nfp);
613 if (gfp->gf_attrs & GUARD_CLOSE)
614 FDFLAGS_SET(p, fd, UF_FORKCLOSE);
615 FDFLAGS_SET(p, fd, UF_EXCLOSE);
616 (void) fp_drop(p, fd, nfp, 1);
617 fileproc_free(fp);
618 break;
619 case EKEEPLOOKING: /* f_iocount indicates a collision */
620 (void) fp_drop(p, fd, fp, 1);
621 fileproc_free(nfp);
622 goto restart;
623 default:
624 (void) fp_drop(p, fd, fp, 1);
625 fileproc_free(nfp);
626 break;
627 }
628 proc_fdunlock(p);
629 return (error);
630 }
631 } else {
632 /*
633 * No new guard.
634 */
635 if (FILEPROC_TYPE(fp) == FTYPE_GUARDED) {
636 /*
637 * Remove the guard altogether.
638 */
639 struct guarded_fileproc *gfp = FP_TO_GFP(fp);
640
641 if (0 != uap->nguardflags) {
642 error = EINVAL;
643 goto dropout;
644 }
645
646 if (GUARDED_FILEPROC_MAGIC != gfp->gf_magic)
647 panic("%s: corrupt gfp %p flags %x",
648 __func__, gfp, fp->f_flags);
649
650 if (oldg != gfp->gf_guard ||
651 uap->guardflags != gfp->gf_attrs) {
652 error = EPERM;
653 goto dropout;
654 }
655
656 proc_fdunlock(p);
657 struct fileproc *nfp = fileproc_alloc_init(NULL);
658 proc_fdlock(p);
659
660 switch (error = fp_tryswap(p, fd, nfp)) {
661 case 0: /* undo side-effects of guarded-ness */
662 FDFLAGS_CLR(p, fd, UF_FORKCLOSE | UF_EXCLOSE);
663 FDFLAGS_SET(p, fd,
664 (nfdflags & FD_CLOFORK) ? UF_FORKCLOSE : 0);
665 /* FG_CONFINED enforced regardless */
666 FDFLAGS_SET(p, fd,
667 (nfdflags & FD_CLOEXEC) ? UF_EXCLOSE : 0);
668 (void) fp_drop(p, fd, nfp, 1);
669 fileproc_free(fp);
670 break;
671 case EKEEPLOOKING: /* f_iocount indicates collision */
672 (void) fp_drop(p, fd, fp, 1);
673 fileproc_free(nfp);
674 goto restart;
675 default:
676 (void) fp_drop(p, fd, fp, 1);
677 fileproc_free(nfp);
678 break;
679 }
680 proc_fdunlock(p);
681 return (error);
682 } else {
683 /*
684 * Not already guarded, and no new guard?
685 */
686 error = EINVAL;
687 }
688 }
689
690 dropout:
691 (void) fp_drop(p, fd, fp, 1);
692 proc_fdunlock(p);
693 return (error);
694 }
695
696 /*
697 * user_ssize_t guarded_write_np(int fd, const guardid_t *guard,
698 * user_addr_t cbuf, user_ssize_t nbyte);
699 *
700 * Initial implementation of guarded writes.
701 */
702 int
703 guarded_write_np(struct proc *p, struct guarded_write_np_args *uap, user_ssize_t *retval)
704 {
705 int error;
706 int fd = uap->fd;
707 guardid_t uguard;
708 struct fileproc *fp;
709 struct guarded_fileproc *gfp;
710 bool wrote_some = false;
711
712 AUDIT_ARG(fd, fd);
713
714 if ((error = copyin(uap->guard, &uguard, sizeof (uguard))) != 0)
715 return (error);
716
717 error = fp_lookup_guarded(p, fd, uguard, &gfp, 0);
718 if (error)
719 return(error);
720
721 fp = GFP_TO_FP(gfp);
722 if ((fp->f_flag & FWRITE) == 0) {
723 error = EBADF;
724 } else {
725
726 struct vfs_context context = *(vfs_context_current());
727 context.vc_ucred = fp->f_fglob->fg_cred;
728
729 error = dofilewrite(&context, fp, uap->cbuf, uap->nbyte,
730 (off_t)-1, 0, retval);
731 wrote_some = *retval > 0;
732 }
733 if (wrote_some)
734 fp_drop_written(p, fd, fp);
735 else
736 fp_drop(p, fd, fp, 0);
737 return(error);
738 }
739
740 /*
741 * user_ssize_t guarded_pwrite_np(int fd, const guardid_t *guard,
742 * user_addr_t buf, user_size_t nbyte, off_t offset);
743 *
744 * Initial implementation of guarded pwrites.
745 */
746 int
747 guarded_pwrite_np(struct proc *p, struct guarded_pwrite_np_args *uap, user_ssize_t *retval)
748 {
749 struct fileproc *fp;
750 int error;
751 int fd = uap->fd;
752 vnode_t vp = (vnode_t)0;
753 guardid_t uguard;
754 struct guarded_fileproc *gfp;
755 bool wrote_some = false;
756
757 AUDIT_ARG(fd, fd);
758
759 if ((error = copyin(uap->guard, &uguard, sizeof (uguard))) != 0)
760 return (error);
761
762 error = fp_lookup_guarded(p, fd, uguard, &gfp, 0);
763 if (error)
764 return(error);
765
766 fp = GFP_TO_FP(gfp);
767 if ((fp->f_flag & FWRITE) == 0) {
768 error = EBADF;
769 } else {
770 struct vfs_context context = *vfs_context_current();
771 context.vc_ucred = fp->f_fglob->fg_cred;
772
773 if (fp->f_type != DTYPE_VNODE) {
774 error = ESPIPE;
775 goto errout;
776 }
777 vp = (vnode_t)fp->f_fglob->fg_data;
778 if (vnode_isfifo(vp)) {
779 error = ESPIPE;
780 goto errout;
781 }
782 if ((vp->v_flag & VISTTY)) {
783 error = ENXIO;
784 goto errout;
785 }
786 if (uap->offset == (off_t)-1) {
787 error = EINVAL;
788 goto errout;
789 }
790
791 error = dofilewrite(&context, fp, uap->buf, uap->nbyte,
792 uap->offset, FOF_OFFSET, retval);
793 wrote_some = *retval > 0;
794 }
795 errout:
796 if (wrote_some)
797 fp_drop_written(p, fd, fp);
798 else
799 fp_drop(p, fd, fp, 0);
800
801 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_guarded_pwrite_np) | DBG_FUNC_NONE),
802 uap->fd, uap->nbyte, (unsigned int)((uap->offset >> 32)), (unsigned int)(uap->offset), 0);
803
804 return(error);
805 }
806
807 /*
808 * user_ssize_t guarded_writev_np(int fd, const guardid_t *guard,
809 * struct iovec *iovp, u_int iovcnt);
810 *
811 * Initial implementation of guarded writev.
812 *
813 */
814 int
815 guarded_writev_np(struct proc *p, struct guarded_writev_np_args *uap, user_ssize_t *retval)
816 {
817 uio_t auio = NULL;
818 int error;
819 struct fileproc *fp;
820 struct user_iovec *iovp;
821 guardid_t uguard;
822 struct guarded_fileproc *gfp;
823 bool wrote_some = false;
824
825 AUDIT_ARG(fd, uap->fd);
826
827 /* Verify range bedfore calling uio_create() */
828 if (uap->iovcnt <= 0 || uap->iovcnt > UIO_MAXIOV)
829 return (EINVAL);
830
831 /* allocate a uio large enough to hold the number of iovecs passed */
832 auio = uio_create(uap->iovcnt, 0,
833 (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32),
834 UIO_WRITE);
835
836 /* get location of iovecs within the uio. then copyin the iovecs from
837 * user space.
838 */
839 iovp = uio_iovsaddr(auio);
840 if (iovp == NULL) {
841 error = ENOMEM;
842 goto ExitThisRoutine;
843 }
844 error = copyin_user_iovec_array(uap->iovp,
845 IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32,
846 uap->iovcnt, iovp);
847 if (error) {
848 goto ExitThisRoutine;
849 }
850
851 /* finalize uio_t for use and do the IO
852 */
853 error = uio_calculateresid(auio);
854 if (error) {
855 goto ExitThisRoutine;
856 }
857
858 if ((error = copyin(uap->guard, &uguard, sizeof (uguard))) != 0)
859 goto ExitThisRoutine;
860
861 error = fp_lookup_guarded(p, uap->fd, uguard, &gfp, 0);
862 if (error)
863 goto ExitThisRoutine;
864
865 fp = GFP_TO_FP(gfp);
866 if ((fp->f_flag & FWRITE) == 0) {
867 error = EBADF;
868 } else {
869 error = wr_uio(p, fp, auio, retval);
870 wrote_some = *retval > 0;
871 }
872
873 if (wrote_some)
874 fp_drop_written(p, uap->fd, fp);
875 else
876 fp_drop(p, uap->fd, fp, 0);
877 ExitThisRoutine:
878 if (auio != NULL) {
879 uio_free(auio);
880 }
881 return (error);
882 }
883
884 /*
885 * int falloc_guarded(struct proc *p, struct fileproc **fp, int *fd,
886 * vfs_context_t ctx, const guardid_t *guard, u_int attrs);
887 *
888 * This SPI is the guarded variant of falloc(). It borrows the same
889 * restrictions as those used by the rest of the guarded_* routines.
890 */
891 int
892 falloc_guarded(struct proc *p, struct fileproc **fp, int *fd,
893 vfs_context_t ctx, const guardid_t *guard, u_int attrs)
894 {
895 struct gfp_crarg crarg;
896
897 if (((attrs & GUARD_REQUIRED) != GUARD_REQUIRED) ||
898 ((attrs & ~GUARD_ALL) != 0) || (*guard == 0))
899 return (EINVAL);
900
901 bzero(&crarg, sizeof (crarg));
902 crarg.gca_guard = *guard;
903 crarg.gca_attrs = attrs;
904
905 return (falloc_withalloc(p, fp, fd, ctx, guarded_fileproc_alloc_init,
906 &crarg));
907 }
908
909 #if CONFIG_MACF && CONFIG_VNGUARD
910
911 /*
912 * Guarded vnodes
913 *
914 * Uses MAC hooks to guard operations on vnodes in the system. Given an fd,
915 * add data to the label on the fileglob and the vnode it points at.
916 * The data contains a pointer to the fileglob, the set of attributes to
917 * guard, a guard value for uniquification, and the pid of the process
918 * who set the guard up in the first place.
919 *
920 * The fd must have been opened read/write, and the underlying
921 * fileglob is FG_CONFINED so that there's no ambiguity about the
922 * owning process.
923 *
924 * When there's a callback for a vnode operation of interest (rename, unlink,
925 * etc.) check to see if the guard permits that operation, and if not
926 * take an action e.g. log a message or generate a crash report.
927 *
928 * The label is removed from the vnode and the fileglob when the fileglob
929 * is closed.
930 *
931 * The initial action to be taken can be specified by a boot arg (vnguard=0x42)
932 * and change via the "kern.vnguard.flags" sysctl.
933 */
934
935 struct vng_owner;
936
937 struct vng_info { /* lives on the vnode label */
938 guardid_t vgi_guard;
939 unsigned vgi_attrs;
940 TAILQ_HEAD(, vng_owner) vgi_owners;
941 };
942
943 struct vng_owner { /* lives on the fileglob label */
944 proc_t vgo_p;
945 struct fileglob *vgo_fg;
946 struct vng_info *vgo_vgi;
947 TAILQ_ENTRY(vng_owner) vgo_link;
948 };
949
950 static struct vng_info *
951 new_vgi(unsigned attrs, guardid_t guard)
952 {
953 struct vng_info *vgi = kalloc(sizeof (*vgi));
954 vgi->vgi_guard = guard;
955 vgi->vgi_attrs = attrs;
956 TAILQ_INIT(&vgi->vgi_owners);
957 return vgi;
958 }
959
960 static struct vng_owner *
961 new_vgo(proc_t p, struct fileglob *fg)
962 {
963 struct vng_owner *vgo = kalloc(sizeof (*vgo));
964 memset(vgo, 0, sizeof (*vgo));
965 vgo->vgo_p = p;
966 vgo->vgo_fg = fg;
967 return vgo;
968 }
969
970 static void
971 vgi_add_vgo(struct vng_info *vgi, struct vng_owner *vgo)
972 {
973 vgo->vgo_vgi = vgi;
974 TAILQ_INSERT_HEAD(&vgi->vgi_owners, vgo, vgo_link);
975 }
976
977 static boolean_t
978 vgi_remove_vgo(struct vng_info *vgi, struct vng_owner *vgo)
979 {
980 TAILQ_REMOVE(&vgi->vgi_owners, vgo, vgo_link);
981 vgo->vgo_vgi = NULL;
982 return TAILQ_EMPTY(&vgi->vgi_owners);
983 }
984
985 static void
986 free_vgi(struct vng_info *vgi)
987 {
988 assert(TAILQ_EMPTY(&vgi->vgi_owners));
989 #if DEVELOP || DEBUG
990 memset(vgi, 0xbeadfade, sizeof (*vgi));
991 #endif
992 kfree(vgi, sizeof (*vgi));
993 }
994
995 static void
996 free_vgo(struct vng_owner *vgo)
997 {
998 #if DEVELOP || DEBUG
999 memset(vgo, 0x2bedf1d0, sizeof (*vgo));
1000 #endif
1001 kfree(vgo, sizeof (*vgo));
1002 }
1003
1004 static int label_slot;
1005 static lck_rw_t llock;
1006 static lck_grp_t *llock_grp;
1007
1008 static __inline void *
1009 vng_lbl_get(struct label *label)
1010 {
1011 lck_rw_assert(&llock, LCK_RW_ASSERT_HELD);
1012 void *data;
1013 if (NULL == label)
1014 data = NULL;
1015 else
1016 data = (void *)mac_label_get(label, label_slot);
1017 return data;
1018 }
1019
1020 static __inline struct vng_info *
1021 vng_lbl_get_withattr(struct label *label, unsigned attrmask)
1022 {
1023 struct vng_info *vgi = vng_lbl_get(label);
1024 assert(NULL == vgi || (vgi->vgi_attrs & ~VNG_ALL) == 0);
1025 if (NULL != vgi && 0 == (vgi->vgi_attrs & attrmask))
1026 vgi = NULL;
1027 return vgi;
1028 }
1029
1030 static __inline void
1031 vng_lbl_set(struct label *label, void *data)
1032 {
1033 assert(NULL != label);
1034 lck_rw_assert(&llock, LCK_RW_ASSERT_EXCLUSIVE);
1035 mac_label_set(label, label_slot, (intptr_t)data);
1036 }
1037
1038 static int
1039 vnguard_sysc_setguard(proc_t p, const struct vnguard_set *vns)
1040 {
1041 const int fd = vns->vns_fd;
1042
1043 if ((vns->vns_attrs & ~VNG_ALL) != 0 ||
1044 0 == vns->vns_attrs || 0 == vns->vns_guard)
1045 return EINVAL;
1046
1047 int error;
1048 struct fileproc *fp;
1049 if (0 != (error = fp_lookup(p, fd, &fp, 0)))
1050 return error;
1051 do {
1052 /*
1053 * To avoid trivial DoS, insist that the caller
1054 * has read/write access to the file.
1055 */
1056 if ((FREAD|FWRITE) != (fp->f_flag & (FREAD|FWRITE))) {
1057 error = EBADF;
1058 break;
1059 }
1060 struct fileglob *fg = fp->f_fglob;
1061 if (FILEGLOB_DTYPE(fg) != DTYPE_VNODE) {
1062 error = EBADF;
1063 break;
1064 }
1065 /*
1066 * Confinement means there's only one fd pointing at
1067 * this fileglob, and will always be associated with
1068 * this pid.
1069 */
1070 if (0 == (FG_CONFINED & fg->fg_lflags)) {
1071 error = EBADF;
1072 break;
1073 }
1074 struct vnode *vp = fg->fg_data;
1075 if (!vnode_isreg(vp) || NULL == vp->v_mount) {
1076 error = EBADF;
1077 break;
1078 }
1079 error = vnode_getwithref(vp);
1080 if (0 != error) {
1081 fp_drop(p, fd, fp, 0);
1082 break;
1083 }
1084 /* Ensure the target vnode -has- a label */
1085 struct vfs_context *ctx = vfs_context_current();
1086 mac_vnode_label_update(ctx, vp, NULL);
1087
1088 struct vng_info *nvgi = new_vgi(vns->vns_attrs, vns->vns_guard);
1089 struct vng_owner *nvgo = new_vgo(p, fg);
1090
1091 lck_rw_lock_exclusive(&llock);
1092
1093 do {
1094 /*
1095 * A vnode guard is associated with one or more
1096 * fileglobs in one or more processes.
1097 */
1098 struct vng_info *vgi = vng_lbl_get(vp->v_label);
1099 struct vng_owner *vgo = vng_lbl_get(fg->fg_label);
1100
1101 if (NULL == vgi) {
1102 /* vnode unguarded, add the first guard */
1103 if (NULL != vgo)
1104 panic("vnguard label on fileglob "
1105 "but not vnode");
1106 /* add a kusecount so we can unlabel later */
1107 error = vnode_ref_ext(vp, O_EVTONLY, 0);
1108 if (0 == error) {
1109 /* add the guard */
1110 vgi_add_vgo(nvgi, nvgo);
1111 vng_lbl_set(vp->v_label, nvgi);
1112 vng_lbl_set(fg->fg_label, nvgo);
1113 } else {
1114 free_vgo(nvgo);
1115 free_vgi(nvgi);
1116 }
1117 } else {
1118 /* vnode already guarded */
1119 free_vgi(nvgi);
1120 if (vgi->vgi_guard != vns->vns_guard)
1121 error = EPERM; /* guard mismatch */
1122 else if (vgi->vgi_attrs != vns->vns_attrs)
1123 error = EACCES; /* attr mismatch */
1124 if (0 != error || NULL != vgo) {
1125 free_vgo(nvgo);
1126 break;
1127 }
1128 /* record shared ownership */
1129 vgi_add_vgo(vgi, nvgo);
1130 vng_lbl_set(fg->fg_label, nvgo);
1131 }
1132 } while (0);
1133
1134 lck_rw_unlock_exclusive(&llock);
1135 vnode_put(vp);
1136 } while (0);
1137
1138 fp_drop(p, fd, fp, 0);
1139 return error;
1140 }
1141
1142 static int
1143 vng_policy_syscall(proc_t p, int cmd, user_addr_t arg)
1144 {
1145 int error = EINVAL;
1146
1147 switch (cmd) {
1148 case VNG_SYSC_PING:
1149 if (0 == arg)
1150 error = 0;
1151 break;
1152 case VNG_SYSC_SET_GUARD: {
1153 struct vnguard_set vns;
1154 error = copyin(arg, (void *)&vns, sizeof (vns));
1155 if (error)
1156 break;
1157 error = vnguard_sysc_setguard(p, &vns);
1158 break;
1159 }
1160 default:
1161 break;
1162 }
1163 return (error);
1164 }
1165
1166 /*
1167 * This is called just before the fileglob disappears in fg_free().
1168 * Take the exclusive lock: no other thread can add or remove
1169 * a vng_info to any vnode in the system.
1170 */
1171 static void
1172 vng_file_label_destroy(struct label *label)
1173 {
1174 lck_rw_lock_exclusive(&llock);
1175 struct vng_owner *lvgo = vng_lbl_get(label);
1176 if (lvgo) {
1177 vng_lbl_set(label, 0);
1178 struct vng_info *vgi = lvgo->vgo_vgi;
1179 assert(vgi);
1180 if (vgi_remove_vgo(vgi, lvgo)) {
1181 /* that was the last reference */
1182 vgi->vgi_attrs = 0;
1183 struct fileglob *fg = lvgo->vgo_fg;
1184 assert(fg);
1185 if (DTYPE_VNODE == FILEGLOB_DTYPE(fg)) {
1186 struct vnode *vp = fg->fg_data;
1187 int error = vnode_getwithref(vp);
1188 if (0 == error) {
1189 vng_lbl_set(vp->v_label, 0);
1190 lck_rw_unlock_exclusive(&llock);
1191 /* may trigger VNOP_INACTIVE */
1192 vnode_rele_ext(vp, O_EVTONLY, 0);
1193 vnode_put(vp);
1194 free_vgi(vgi);
1195 free_vgo(lvgo);
1196 return;
1197 }
1198 }
1199 }
1200 free_vgo(lvgo);
1201 }
1202 lck_rw_unlock_exclusive(&llock);
1203 }
1204
1205 static int vng_policy_flags;
1206
1207 static int
1208 vng_guard_violation(const struct vng_info *vgi,
1209 unsigned opval, const char *nm)
1210 {
1211 int retval = 0;
1212
1213 if (vng_policy_flags & kVNG_POLICY_EPERM) {
1214 /* deny the operation */
1215 retval = EPERM;
1216 }
1217
1218 if (vng_policy_flags & kVNG_POLICY_LOGMSG) {
1219 /* log a message */
1220 const char *op;
1221 switch (opval) {
1222 case VNG_RENAME_FROM:
1223 op = "rename-from";
1224 break;
1225 case VNG_RENAME_TO:
1226 op = "rename-to";
1227 break;
1228 case VNG_UNLINK:
1229 op = "unlink";
1230 break;
1231 case VNG_LINK:
1232 op = "link";
1233 break;
1234 case VNG_EXCHDATA:
1235 op = "exchdata";
1236 break;
1237 case VNG_WRITE_OTHER:
1238 op = "write";
1239 break;
1240 case VNG_TRUNC_OTHER:
1241 op = "truncate";
1242 break;
1243 default:
1244 op = "(unknown)";
1245 break;
1246 }
1247 proc_t p = current_proc();
1248 const struct vng_owner *vgo;
1249 TAILQ_FOREACH(vgo, &vgi->vgi_owners, vgo_link) {
1250 printf("%s[%d]: %s%s: '%s' guarded by %s[%d] (0x%llx)\n",
1251 proc_name_address(p), proc_pid(p), op,
1252 0 != retval ? " denied" : "",
1253 NULL != nm ? nm : "(unknown)",
1254 proc_name_address(vgo->vgo_p), proc_pid(vgo->vgo_p),
1255 vgi->vgi_guard);
1256 }
1257 }
1258
1259 if (vng_policy_flags & (kVNG_POLICY_EXC|kVNG_POLICY_EXC_CORPSE)) {
1260 /* EXC_GUARD exception */
1261 const struct vng_owner *vgo = TAILQ_FIRST(&vgi->vgi_owners);
1262 pid_t pid = vgo ? proc_pid(vgo->vgo_p) : 0;
1263 mach_exception_code_t code;
1264 mach_exception_subcode_t subcode;
1265
1266 code = 0;
1267 EXC_GUARD_ENCODE_TYPE(code, GUARD_TYPE_VN);
1268 EXC_GUARD_ENCODE_FLAVOR(code, opval);
1269 EXC_GUARD_ENCODE_TARGET(code, pid);
1270 subcode = vgi->vgi_guard;
1271
1272 if (vng_policy_flags & kVNG_POLICY_EXC_CORPSE) {
1273 task_violated_guard(code, subcode, NULL);
1274 /* not fatal */
1275 } else {
1276 thread_t t = current_thread();
1277 thread_guard_violation(t, code, subcode);
1278 }
1279 } else if (vng_policy_flags & kVNG_POLICY_SIGKILL) {
1280 proc_t p = current_proc();
1281 psignal(p, SIGKILL);
1282 }
1283
1284 return retval;
1285 }
1286
1287 /*
1288 * A vnode guard was tripped on this thread.
1289 *
1290 * (Invoked before returning to userland from the syscall handler.)
1291 */
1292 void
1293 vn_guard_ast(thread_t __unused t,
1294 mach_exception_data_type_t code, mach_exception_data_type_t subcode)
1295 {
1296 task_exception_notify(EXC_GUARD, code, subcode);
1297 proc_t p = current_proc();
1298 psignal(p, SIGKILL);
1299 }
1300
1301 /*
1302 * vnode callbacks
1303 */
1304
1305 static int
1306 vng_vnode_check_rename(kauth_cred_t __unused cred,
1307 struct vnode *__unused dvp, struct label *__unused dlabel,
1308 struct vnode *__unused vp, struct label *label,
1309 struct componentname *cnp,
1310 struct vnode *__unused tdvp, struct label *__unused tdlabel,
1311 struct vnode *__unused tvp, struct label *tlabel,
1312 struct componentname *tcnp)
1313 {
1314 int error = 0;
1315 if (NULL != label || NULL != tlabel) {
1316 lck_rw_lock_shared(&llock);
1317 const struct vng_info *vgi =
1318 vng_lbl_get_withattr(label, VNG_RENAME_FROM);
1319 if (NULL != vgi)
1320 error = vng_guard_violation(vgi,
1321 VNG_RENAME_FROM, cnp->cn_nameptr);
1322 if (0 == error) {
1323 vgi = vng_lbl_get_withattr(tlabel, VNG_RENAME_TO);
1324 if (NULL != vgi)
1325 error = vng_guard_violation(vgi,
1326 VNG_RENAME_TO, tcnp->cn_nameptr);
1327 }
1328 lck_rw_unlock_shared(&llock);
1329 }
1330 return error;
1331 }
1332
1333 static int
1334 vng_vnode_check_link(kauth_cred_t __unused cred,
1335 struct vnode *__unused dvp, struct label *__unused dlabel,
1336 struct vnode *vp, struct label *label, struct componentname *__unused cnp)
1337 {
1338 int error = 0;
1339 if (NULL != label) {
1340 lck_rw_lock_shared(&llock);
1341 const struct vng_info *vgi =
1342 vng_lbl_get_withattr(label, VNG_LINK);
1343 if (vgi) {
1344 const char *nm = vnode_getname(vp);
1345 error = vng_guard_violation(vgi, VNG_LINK, nm);
1346 if (nm)
1347 vnode_putname(nm);
1348 }
1349 lck_rw_unlock_shared(&llock);
1350 }
1351 return error;
1352 }
1353
1354 static int
1355 vng_vnode_check_unlink(kauth_cred_t __unused cred,
1356 struct vnode *__unused dvp, struct label *__unused dlabel,
1357 struct vnode *__unused vp, struct label *label, struct componentname *cnp)
1358 {
1359 int error = 0;
1360 if (NULL != label) {
1361 lck_rw_lock_shared(&llock);
1362 const struct vng_info *vgi =
1363 vng_lbl_get_withattr(label, VNG_UNLINK);
1364 if (vgi)
1365 error = vng_guard_violation(vgi, VNG_UNLINK,
1366 cnp->cn_nameptr);
1367 lck_rw_unlock_shared(&llock);
1368 }
1369 return error;
1370 }
1371
1372 /*
1373 * Only check violations for writes performed by "other processes"
1374 */
1375 static int
1376 vng_vnode_check_write(kauth_cred_t __unused actv_cred,
1377 kauth_cred_t __unused file_cred, struct vnode *vp, struct label *label)
1378 {
1379 int error = 0;
1380 if (NULL != label) {
1381 lck_rw_lock_shared(&llock);
1382 const struct vng_info *vgi =
1383 vng_lbl_get_withattr(label, VNG_WRITE_OTHER);
1384 if (vgi) {
1385 proc_t p = current_proc();
1386 const struct vng_owner *vgo;
1387 TAILQ_FOREACH(vgo, &vgi->vgi_owners, vgo_link) {
1388 if (vgo->vgo_p == p)
1389 goto done;
1390 }
1391 const char *nm = vnode_getname(vp);
1392 error = vng_guard_violation(vgi,
1393 VNG_WRITE_OTHER, nm);
1394 if (nm)
1395 vnode_putname(nm);
1396 }
1397 done:
1398 lck_rw_unlock_shared(&llock);
1399 }
1400 return error;
1401 }
1402
1403 /*
1404 * Only check violations for truncates performed by "other processes"
1405 */
1406 static int
1407 vng_vnode_check_truncate(kauth_cred_t __unused actv_cred,
1408 kauth_cred_t __unused file_cred, struct vnode *vp,
1409 struct label *label)
1410 {
1411 int error = 0;
1412 if (NULL != label) {
1413 lck_rw_lock_shared(&llock);
1414 const struct vng_info *vgi =
1415 vng_lbl_get_withattr(label, VNG_TRUNC_OTHER);
1416 if (vgi) {
1417 proc_t p = current_proc();
1418 const struct vng_owner *vgo;
1419 TAILQ_FOREACH(vgo, &vgi->vgi_owners, vgo_link) {
1420 if (vgo->vgo_p == p)
1421 goto done;
1422 }
1423 const char *nm = vnode_getname(vp);
1424 error = vng_guard_violation(vgi,
1425 VNG_TRUNC_OTHER, nm);
1426 if (nm)
1427 vnode_putname(nm);
1428 }
1429 done:
1430 lck_rw_unlock_shared(&llock);
1431 }
1432 return error;
1433 }
1434
1435 static int
1436 vng_vnode_check_exchangedata(kauth_cred_t __unused cred,
1437 struct vnode *fvp, struct label *flabel,
1438 struct vnode *svp, struct label *slabel)
1439 {
1440 int error = 0;
1441 if (NULL != flabel || NULL != slabel) {
1442 lck_rw_lock_shared(&llock);
1443 const struct vng_info *vgi =
1444 vng_lbl_get_withattr(flabel, VNG_EXCHDATA);
1445 if (NULL != vgi) {
1446 const char *nm = vnode_getname(fvp);
1447 error = vng_guard_violation(vgi,
1448 VNG_EXCHDATA, nm);
1449 if (nm)
1450 vnode_putname(nm);
1451 }
1452 if (0 == error) {
1453 vgi = vng_lbl_get_withattr(slabel, VNG_EXCHDATA);
1454 if (NULL != vgi) {
1455 const char *nm = vnode_getname(svp);
1456 error = vng_guard_violation(vgi,
1457 VNG_EXCHDATA, nm);
1458 if (nm)
1459 vnode_putname(nm);
1460 }
1461 }
1462 lck_rw_unlock_shared(&llock);
1463 }
1464 return error;
1465 }
1466
1467 /*
1468 * Configuration gorp
1469 */
1470
1471 static void
1472 vng_init(struct mac_policy_conf *mpc)
1473 {
1474 llock_grp = lck_grp_alloc_init(mpc->mpc_name, LCK_GRP_ATTR_NULL);
1475 lck_rw_init(&llock, llock_grp, LCK_ATTR_NULL);
1476 }
1477
1478 SECURITY_READ_ONLY_EARLY(static struct mac_policy_ops) vng_policy_ops = {
1479 .mpo_file_label_destroy = vng_file_label_destroy,
1480
1481 .mpo_vnode_check_link = vng_vnode_check_link,
1482 .mpo_vnode_check_unlink = vng_vnode_check_unlink,
1483 .mpo_vnode_check_rename = vng_vnode_check_rename,
1484 .mpo_vnode_check_write = vng_vnode_check_write,
1485 .mpo_vnode_check_truncate = vng_vnode_check_truncate,
1486 .mpo_vnode_check_exchangedata = vng_vnode_check_exchangedata,
1487
1488 .mpo_policy_syscall = vng_policy_syscall,
1489 .mpo_policy_init = vng_init,
1490 };
1491
1492 static const char *vng_labelnames[] = {
1493 "vnguard",
1494 };
1495
1496 #define ACOUNT(arr) ((unsigned)(sizeof (arr) / sizeof (arr[0])))
1497
1498 SECURITY_READ_ONLY_LATE(static struct mac_policy_conf) vng_policy_conf = {
1499 .mpc_name = VNG_POLICY_NAME,
1500 .mpc_fullname = "Guarded vnode policy",
1501 .mpc_field_off = &label_slot,
1502 .mpc_labelnames = vng_labelnames,
1503 .mpc_labelname_count = ACOUNT(vng_labelnames),
1504 .mpc_ops = &vng_policy_ops,
1505 .mpc_loadtime_flags = 0,
1506 .mpc_runtime_flags = 0
1507 };
1508
1509 static mac_policy_handle_t vng_policy_handle;
1510
1511 void
1512 vnguard_policy_init(void)
1513 {
1514 if (0 == PE_i_can_has_debugger(NULL))
1515 return;
1516 vng_policy_flags = kVNG_POLICY_LOGMSG | kVNG_POLICY_EXC_CORPSE;
1517 PE_parse_boot_argn("vnguard", &vng_policy_flags, sizeof (vng_policy_flags));
1518 if (vng_policy_flags)
1519 mac_policy_register(&vng_policy_conf, &vng_policy_handle, NULL);
1520 }
1521
1522 #if DEBUG || DEVELOPMENT
1523 #include <sys/sysctl.h>
1524
1525 SYSCTL_DECL(_kern_vnguard);
1526 SYSCTL_NODE(_kern, OID_AUTO, vnguard, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "vnguard");
1527 SYSCTL_INT(_kern_vnguard, OID_AUTO, flags, CTLFLAG_RW | CTLFLAG_LOCKED,
1528 &vng_policy_flags, 0, "vnguard policy flags");
1529 #endif
1530
1531 #endif /* CONFIG_MACF && CONFIG_VNGUARD */