2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
24 * Centralized authorisation framework.
27 #include <sys/appleapiopts.h>
28 #include <sys/param.h> /* XXX trim includes */
30 #include <sys/systm.h>
31 #include <sys/ucred.h>
32 #include <sys/proc_internal.h>
33 #include <sys/timeb.h>
34 #include <sys/times.h>
35 #include <sys/malloc.h>
36 #include <sys/vnode_internal.h>
37 #include <sys/kauth.h>
40 #include <bsm/audit_kernel.h>
42 #include <sys/mount.h>
43 #include <sys/sysproto.h>
44 #include <mach/message.h>
45 #include <mach/host_security.h>
47 #include <kern/locks.h>
51 * Authorization scopes.
54 lck_grp_t
*kauth_lck_grp
;
55 static lck_mtx_t
*kauth_scope_mtx
;
56 #define KAUTH_SCOPELOCK() lck_mtx_lock(kauth_scope_mtx);
57 #define KAUTH_SCOPEUNLOCK() lck_mtx_unlock(kauth_scope_mtx);
60 * We support listeners for scopes that have not been registered yet.
61 * If a listener comes in for a scope that is not active we hang the listener
62 * off our kauth_dangling_listeners list and once the scope becomes active we
63 * remove it from kauth_dangling_listeners and add it to the active scope.
65 struct kauth_listener
{
66 TAILQ_ENTRY(kauth_listener
) kl_link
;
67 const char * kl_identifier
;
68 kauth_scope_callback_t kl_callback
;
72 /* XXX - kauth_todo - there is a race if a scope listener is removed while we
73 * we are in the kauth_authorize_action code path. We intentionally do not take
74 * a scope lock in order to get the best possible performance. we will fix this
76 * Until the race is fixed our kext clients are responsible for all active
77 * requests that may be in their callback code or on the way to their callback
78 * code before they free kauth_listener.kl_callback or kauth_listener.kl_idata.
79 * We keep copies of these in our kauth_local_listener in an attempt to limit
80 * our expose to unlisten race.
82 struct kauth_local_listener
{
83 kauth_listener_t kll_listenerp
;
84 kauth_scope_callback_t kll_callback
;
87 typedef struct kauth_local_listener
*kauth_local_listener_t
;
89 static TAILQ_HEAD(,kauth_listener
) kauth_dangling_listeners
;
92 * Scope listeners need to be reworked to be dynamic.
93 * We intentionally used a static table to avoid locking issues with linked
94 * lists. The listeners may be called quite often.
97 #define KAUTH_SCOPE_MAX_LISTENERS 15
100 TAILQ_ENTRY(kauth_scope
) ks_link
;
101 volatile struct kauth_local_listener ks_listeners
[KAUTH_SCOPE_MAX_LISTENERS
];
102 const char * ks_identifier
;
103 kauth_scope_callback_t ks_callback
;
108 /* values for kauth_scope.ks_flags */
109 #define KS_F_HAS_LISTENERS (1 << 0)
111 static TAILQ_HEAD(,kauth_scope
) kauth_scopes
;
113 static int kauth_add_callback_to_scope(kauth_scope_t sp
, kauth_listener_t klp
);
114 static void kauth_scope_init(void);
115 static kauth_scope_t
kauth_alloc_scope(const char *identifier
, kauth_scope_callback_t callback
, void *idata
);
116 static kauth_listener_t
kauth_alloc_listener(const char *identifier
, kauth_scope_callback_t callback
, void *idata
);
118 static int kauth_scope_valid(kauth_scope_t scope
);
121 kauth_scope_t kauth_scope_process
;
122 static int kauth_authorize_process_callback(kauth_cred_t _credential
, void *_idata
, kauth_action_t _action
,
123 uintptr_t arg0
, uintptr_t arg1
, __unused
uintptr_t arg2
, __unused
uintptr_t arg3
);
124 kauth_scope_t kauth_scope_generic
;
125 static int kauth_authorize_generic_callback(kauth_cred_t _credential
, void *_idata
, kauth_action_t _action
,
126 uintptr_t arg0
, uintptr_t arg1
, uintptr_t arg2
, uintptr_t arg3
);
127 kauth_scope_t kauth_scope_fileop
;
129 extern int cansignal(struct proc
*, kauth_cred_t
, struct proc
*, int);
130 extern char * get_pathbuff(void);
131 extern void release_pathbuff(char *path
);
139 lck_grp_attr_t
*grp_attributes
;
141 TAILQ_INIT(&kauth_scopes
);
142 TAILQ_INIT(&kauth_dangling_listeners
);
144 /* set up our lock group */
145 grp_attributes
= lck_grp_attr_alloc_init();
146 kauth_lck_grp
= lck_grp_alloc_init("kauth", grp_attributes
);
147 lck_grp_attr_free(grp_attributes
);
149 /* bring up kauth subsystem components */
151 kauth_identity_init();
154 kauth_resolver_init();
156 /* can't alloc locks after this */
157 lck_grp_free(kauth_lck_grp
);
158 kauth_lck_grp
= NULL
;
162 kauth_scope_init(void)
164 kauth_scope_mtx
= lck_mtx_alloc_init(kauth_lck_grp
, 0 /*LCK_ATTR_NULL*/);
165 kauth_scope_process
= kauth_register_scope(KAUTH_SCOPE_PROCESS
, kauth_authorize_process_callback
, NULL
);
166 kauth_scope_generic
= kauth_register_scope(KAUTH_SCOPE_GENERIC
, kauth_authorize_generic_callback
, NULL
);
167 kauth_scope_fileop
= kauth_register_scope(KAUTH_SCOPE_FILEOP
, NULL
, NULL
);
171 * Scope registration.
175 kauth_alloc_scope(const char *identifier
, kauth_scope_callback_t callback
, void *idata
)
180 * Allocate and populate the scope structure.
182 MALLOC(sp
, kauth_scope_t
, sizeof(*sp
), M_KAUTH
, M_WAITOK
);
185 bzero(&sp
->ks_listeners
, sizeof(sp
->ks_listeners
));
187 sp
->ks_identifier
= identifier
;
188 sp
->ks_idata
= idata
;
189 sp
->ks_callback
= callback
;
193 static kauth_listener_t
194 kauth_alloc_listener(const char *identifier
, kauth_scope_callback_t callback
, void *idata
)
196 kauth_listener_t lsp
;
199 * Allocate and populate the listener structure.
201 MALLOC(lsp
, kauth_listener_t
, sizeof(*lsp
), M_KAUTH
, M_WAITOK
);
204 lsp
->kl_identifier
= identifier
;
205 lsp
->kl_idata
= idata
;
206 lsp
->kl_callback
= callback
;
211 kauth_register_scope(const char *identifier
, kauth_scope_callback_t callback
, void *idata
)
213 kauth_scope_t sp
, tsp
;
214 kauth_listener_t klp
;
216 if ((sp
= kauth_alloc_scope(identifier
, callback
, idata
)) == NULL
)
220 * Lock the list and insert.
223 TAILQ_FOREACH(tsp
, &kauth_scopes
, ks_link
) {
225 if (strcmp(tsp
->ks_identifier
, identifier
) == 0) {
231 TAILQ_INSERT_TAIL(&kauth_scopes
, sp
, ks_link
);
234 * Look for listeners waiting for this scope, move them to the active scope
236 * Note that we have to restart the scan every time we remove an entry
237 * from the list, since we can't remove the current item from the list.
240 TAILQ_FOREACH(klp
, &kauth_dangling_listeners
, kl_link
) {
241 if (strcmp(klp
->kl_identifier
, sp
->ks_identifier
) == 0) {
242 /* found a match on the dangling listener list. add it to the
245 if (kauth_add_callback_to_scope(sp
, klp
) == 0) {
246 TAILQ_REMOVE(&kauth_dangling_listeners
, klp
, kl_link
);
250 printf("%s - failed to add listener to scope \"%s\" \n", __FUNCTION__
, sp
->ks_identifier
);
265 kauth_deregister_scope(kauth_scope_t scope
)
271 TAILQ_REMOVE(&kauth_scopes
, scope
, ks_link
);
273 /* relocate listeners back to the waiting list */
274 for (i
= 0; i
< KAUTH_SCOPE_MAX_LISTENERS
; i
++) {
275 if (scope
->ks_listeners
[i
].kll_listenerp
!= NULL
) {
276 TAILQ_INSERT_TAIL(&kauth_dangling_listeners
, scope
->ks_listeners
[i
].kll_listenerp
, kl_link
);
277 scope
->ks_listeners
[i
].kll_listenerp
= NULL
;
279 * XXX - kauth_todo - WARNING, do not clear kll_callback or
280 * kll_idata here. they are part of our scope unlisten race hack
285 FREE(scope
, M_KAUTH
);
291 kauth_listen_scope(const char *identifier
, kauth_scope_callback_t callback
, void *idata
)
293 kauth_listener_t klp
;
296 if ((klp
= kauth_alloc_listener(identifier
, callback
, idata
)) == NULL
)
300 * Lock the scope list and check to see whether this scope already exists.
303 TAILQ_FOREACH(sp
, &kauth_scopes
, ks_link
) {
304 if (strcmp(sp
->ks_identifier
, identifier
) == 0) {
305 /* scope exists, add it to scope listener table */
306 if (kauth_add_callback_to_scope(sp
, klp
) == 0) {
310 /* table already full */
317 /* scope doesn't exist, put on waiting list. */
318 TAILQ_INSERT_TAIL(&kauth_dangling_listeners
, klp
, kl_link
);
326 kauth_unlisten_scope(kauth_listener_t listener
)
329 kauth_listener_t klp
;
330 int i
, listener_count
, do_free
;
334 /* search the active scope for this listener */
335 TAILQ_FOREACH(sp
, &kauth_scopes
, ks_link
) {
337 if ((sp
->ks_flags
& KS_F_HAS_LISTENERS
) != 0) {
339 for (i
= 0; i
< KAUTH_SCOPE_MAX_LISTENERS
; i
++) {
340 if (sp
->ks_listeners
[i
].kll_listenerp
== listener
) {
341 sp
->ks_listeners
[i
].kll_listenerp
= NULL
;
344 * XXX - kauth_todo - WARNING, do not clear kll_callback or
345 * kll_idata here. they are part of our scope unlisten race hack
348 else if (sp
->ks_listeners
[i
].kll_listenerp
!= NULL
) {
353 if (listener_count
== 0) {
354 sp
->ks_flags
&= ~KS_F_HAS_LISTENERS
;
357 FREE(listener
, M_KAUTH
);
363 /* if not active, check the dangling list */
364 TAILQ_FOREACH(klp
, &kauth_dangling_listeners
, kl_link
) {
365 if (klp
== listener
) {
366 TAILQ_REMOVE(&kauth_dangling_listeners
, klp
, kl_link
);
368 FREE(listener
, M_KAUTH
);
378 * Authorization requests.
381 kauth_authorize_action(kauth_scope_t scope
, kauth_cred_t credential
, kauth_action_t action
,
382 uintptr_t arg0
, uintptr_t arg1
, uintptr_t arg2
, uintptr_t arg3
)
387 if (scope
->ks_callback
!= NULL
)
388 result
= scope
->ks_callback(credential
, scope
->ks_idata
, action
, arg0
, arg1
, arg2
, arg3
);
390 result
= KAUTH_RESULT_DEFER
;
392 /* check with listeners */
393 if ((scope
->ks_flags
& KS_F_HAS_LISTENERS
) != 0) {
394 for (i
= 0; i
< KAUTH_SCOPE_MAX_LISTENERS
; i
++) {
395 /* XXX - kauth_todo - there is a race here if listener is removed - we will fix this post Tiger.
396 * Until the race is fixed our kext clients are responsible for all active requests that may
397 * be in their callbacks or on the way to their callbacks before they free kl_callback or kl_idata.
398 * We keep copies of these in our kauth_local_listener in an attempt to limit our expose to
401 if (scope
->ks_listeners
[i
].kll_listenerp
== NULL
||
402 scope
->ks_listeners
[i
].kll_callback
== NULL
)
405 ret
= scope
->ks_listeners
[i
].kll_callback(
406 credential
, scope
->ks_listeners
[i
].kll_idata
,
407 action
, arg0
, arg1
, arg2
, arg3
);
408 if ((ret
== KAUTH_RESULT_DENY
) ||
409 (result
== KAUTH_RESULT_DEFER
))
414 /* we need an explicit allow, or the auth fails */
415 /* XXX need a mechanism for auth failure to be signalled vs. denial */
416 return(result
== KAUTH_RESULT_ALLOW
? 0 : EPERM
);
420 * Default authorization handlers.
423 kauth_authorize_allow(__unused kauth_cred_t credential
, __unused
void *idata
, __unused kauth_action_t action
,
424 __unused
uintptr_t arg0
, __unused
uintptr_t arg1
, __unused
uintptr_t arg2
, __unused
uintptr_t arg3
)
427 return(KAUTH_RESULT_ALLOW
);
435 kauth_scope_valid(kauth_scope_t scope
)
440 TAILQ_FOREACH(sp
, &kauth_scopes
, ks_link
) {
445 return((sp
== NULL
) ? 0 : 1);
450 * Process authorization scope.
454 kauth_authorize_process(kauth_cred_t credential
, kauth_action_t action
, struct proc
*process
, uintptr_t arg1
, uintptr_t arg2
, uintptr_t arg3
)
456 return(kauth_authorize_action(kauth_scope_process
, credential
, action
, (uintptr_t)process
, arg1
, arg2
, arg3
));
460 kauth_authorize_process_callback(kauth_cred_t credential
, __unused
void *idata
, kauth_action_t action
,
461 uintptr_t arg0
, uintptr_t arg1
, __unused
uintptr_t arg2
, __unused
uintptr_t arg3
)
464 case KAUTH_PROCESS_CANSIGNAL
:
465 panic("KAUTH_PROCESS_CANSIGNAL not implemented");
466 /* XXX credential wrong here */
467 /* arg0 - process to signal
468 * arg1 - signal to send the process
470 if (cansignal(current_proc(), credential
, (struct proc
*)arg0
, (int)arg1
))
471 return(KAUTH_RESULT_ALLOW
);
473 case KAUTH_PROCESS_CANTRACE
:
474 /* current_proc() - process that will do the tracing
475 * arg0 - process to be traced
476 * arg1 - pointer to int - reason (errno) for denial
478 if (cantrace(current_proc(), credential
, (proc_t
)arg0
, (int *)arg1
))
479 return(KAUTH_RESULT_ALLOW
);
483 /* no explicit result, so defer to others in the chain */
484 return(KAUTH_RESULT_DEFER
);
488 * File system operation authorization scope. This is really only a notification
489 * of the file system operation, not an authorization check. Thus the result is
491 * arguments passed to KAUTH_FILEOP_OPEN listeners
492 * arg0 is pointer to vnode (vnode *) for given user path.
493 * arg1 is pointer to path (char *) passed in to open.
494 * arguments passed to KAUTH_FILEOP_CLOSE listeners
495 * arg0 is pointer to vnode (vnode *) for file to be closed.
496 * arg1 is pointer to path (char *) of file to be closed.
497 * arg2 is close flags.
498 * arguments passed to KAUTH_FILEOP_RENAME listeners
499 * arg0 is pointer to "from" path (char *).
500 * arg1 is pointer to "to" path (char *).
501 * arguments passed to KAUTH_FILEOP_EXCHANGE listeners
502 * arg0 is pointer to file 1 path (char *).
503 * arg1 is pointer to file 2 path (char *).
504 * arguments passed to KAUTH_FILEOP_EXEC listeners
505 * arg0 is pointer to vnode (vnode *) for executable.
506 * arg1 is pointer to path (char *) to executable.
510 kauth_authorize_fileop_has_listeners(void)
513 * return 1 if we have any listeners for the fileop scope
516 if ((kauth_scope_fileop
->ks_flags
& KS_F_HAS_LISTENERS
) != 0) {
523 kauth_authorize_fileop(kauth_cred_t credential
, kauth_action_t action
, uintptr_t arg0
, uintptr_t arg1
)
529 /* we do not have a primary handler for the fileop scope so bail out if
530 * there are no listeners.
532 if ((kauth_scope_fileop
->ks_flags
& KS_F_HAS_LISTENERS
) == 0) {
536 if (action
== KAUTH_FILEOP_OPEN
|| action
== KAUTH_FILEOP_CLOSE
|| action
== KAUTH_FILEOP_EXEC
) {
537 /* get path to the given vnode as a convenience to our listeners.
539 namep
= get_pathbuff();
540 name_len
= MAXPATHLEN
;
541 if (vn_getpath((vnode_t
)arg0
, namep
, &name_len
) != 0) {
542 release_pathbuff(namep
);
545 if (action
== KAUTH_FILEOP_CLOSE
) {
546 arg2
= arg1
; /* close has some flags that come in via arg1 */
548 arg1
= (uintptr_t)namep
;
550 kauth_authorize_action(kauth_scope_fileop
, credential
, action
, arg0
, arg1
, arg2
, 0);
553 release_pathbuff(namep
);
560 * Generic authorization scope.
564 kauth_authorize_generic(kauth_cred_t credential
, kauth_action_t action
)
566 if (credential
== NULL
)
567 panic("auth against NULL credential");
569 return(kauth_authorize_action(kauth_scope_generic
, credential
, action
, 0, 0, 0, 0));
574 kauth_authorize_generic_callback(kauth_cred_t credential
, __unused
void *idata
, kauth_action_t action
,
575 __unused
uintptr_t arg0
, __unused
uintptr_t arg1
, __unused
uintptr_t arg2
, __unused
uintptr_t arg3
)
578 case KAUTH_GENERIC_ISSUSER
:
580 return((kauth_cred_getuid(credential
) == 0) ?
581 KAUTH_RESULT_ALLOW
: KAUTH_RESULT_DENY
);
585 /* no explicit result, so defer to others in the chain */
586 return(KAUTH_RESULT_DEFER
);
592 * Determines whether the credential has the requested rights for an object secured by the supplied
595 * Evaluation proceeds from the top down, with access denied if any ACE denies any of the requested
596 * rights, or granted if all of the requested rights are satisfied by the ACEs so far.
599 kauth_acl_evaluate(kauth_cred_t cred
, kauth_acl_eval_t eval
)
601 int applies
, error
, i
;
607 /* always allowed to do nothing */
608 if (eval
->ae_requested
== 0) {
609 eval
->ae_result
= KAUTH_RESULT_ALLOW
;
613 eval
->ae_residual
= eval
->ae_requested
;
616 * Get our guid for comparison purposes.
618 if ((error
= kauth_cred_getguid(cred
, &guid
)) != 0) {
619 eval
->ae_result
= KAUTH_RESULT_DENY
;
620 KAUTH_DEBUG(" ACL - can't get credential GUID (%d), ACL denied", error
);
624 KAUTH_DEBUG(" ACL - %d entries, initial residual %x", eval
->ae_count
, eval
->ae_residual
);
625 for (i
= 0, ace
= eval
->ae_acl
; i
< eval
->ae_count
; i
++, ace
++) {
628 * Skip inherit-only entries.
630 if (ace
->ace_flags
& KAUTH_ACE_ONLY_INHERIT
)
634 * Expand generic rights, if appropriate.
636 rights
= ace
->ace_rights
;
637 if (rights
& KAUTH_ACE_GENERIC_ALL
)
638 rights
|= eval
->ae_exp_gall
;
639 if (rights
& KAUTH_ACE_GENERIC_READ
)
640 rights
|= eval
->ae_exp_gread
;
641 if (rights
& KAUTH_ACE_GENERIC_WRITE
)
642 rights
|= eval
->ae_exp_gwrite
;
643 if (rights
& KAUTH_ACE_GENERIC_EXECUTE
)
644 rights
|= eval
->ae_exp_gexec
;
647 * Determine whether this entry applies to the current request. This
648 * saves us checking the GUID if the entry has nothing to do with what
649 * we're currently doing.
651 switch(ace
->ace_flags
& KAUTH_ACE_KINDMASK
) {
652 case KAUTH_ACE_PERMIT
:
653 if (!(eval
->ae_residual
& rights
))
657 if (!(eval
->ae_requested
& rights
))
661 /* we don't recognise this ACE, skip it */
666 * Verify whether this entry applies to the credential.
668 wkguid
= kauth_wellknown_guid(&ace
->ace_applicable
);
670 case KAUTH_WKG_OWNER
:
671 applies
= eval
->ae_options
& KAUTH_AEVAL_IS_OWNER
;
673 case KAUTH_WKG_GROUP
:
674 applies
= eval
->ae_options
& KAUTH_AEVAL_IN_GROUP
;
676 /* we short-circuit these here rather than wasting time calling the group membership code */
677 case KAUTH_WKG_EVERYBODY
:
680 case KAUTH_WKG_NOBODY
:
685 /* check to see whether it's exactly us, or a group we are a member of */
686 applies
= kauth_guid_equal(&guid
, &ace
->ace_applicable
);
687 KAUTH_DEBUG(" ACL - ACE applicable " K_UUID_FMT
" caller " K_UUID_FMT
" %smatched",
688 K_UUID_ARG(ace
->ace_applicable
), K_UUID_ARG(guid
), applies
? "" : "not ");
691 error
= kauth_cred_ismember_guid(cred
, &ace
->ace_applicable
, &applies
);
693 * If we can't resolve group membership, we have to limit misbehaviour.
694 * If the ACE is an 'allow' ACE, assume the cred is not a member (avoid
695 * granting excess access). If the ACE is a 'deny' ACE, assume the cred
696 * is a member (avoid failing to deny).
699 KAUTH_DEBUG(" ACL[%d] - can't get membership, making pessimistic assumption", i
);
700 switch(ace
->ace_flags
& KAUTH_ACE_KINDMASK
) {
701 case KAUTH_ACE_PERMIT
:
709 KAUTH_DEBUG(" ACL - %s group member", applies
? "is" : "not");
712 KAUTH_DEBUG(" ACL - entry matches caller");
719 * Apply ACE to outstanding rights.
721 switch(ace
->ace_flags
& KAUTH_ACE_KINDMASK
) {
722 case KAUTH_ACE_PERMIT
:
723 /* satisfy any rights that this ACE grants */
724 eval
->ae_residual
= eval
->ae_residual
& ~rights
;
725 KAUTH_DEBUG(" ACL[%d] - rights %x leave residual %x", i
, rights
, eval
->ae_residual
);
726 /* all rights satisfied? */
727 if (eval
->ae_residual
== 0) {
728 eval
->ae_result
= KAUTH_RESULT_ALLOW
;
733 /* deny the request if any of the requested rights is denied */
734 if (eval
->ae_requested
& rights
) {
735 KAUTH_DEBUG(" ACL[%d] - denying based on %x", i
, rights
);
736 eval
->ae_result
= KAUTH_RESULT_DENY
;
741 KAUTH_DEBUG(" ACL - unknown entry kind %d", ace
->ace_flags
& KAUTH_ACE_KINDMASK
);
745 /* if not permitted, defer to other modes of authorisation */
746 eval
->ae_result
= KAUTH_RESULT_DEFER
;
751 * Perform ACL inheritance and umask-ACL handling.
753 * Entries are inherited from the ACL on dvp. A caller-supplied
754 * ACL is in initial, and the result is output into product.
755 * If the process has a umask ACL and one is not supplied, we use
757 * If isdir is set, the resultant ACL is for a directory, otherwise it is for a file.
760 kauth_acl_inherit(vnode_t dvp
, kauth_acl_t initial
, kauth_acl_t
*product
, int isdir
, vfs_context_t ctx
)
762 int entries
, error
, index
;
764 struct vnode_attr dva
;
765 kauth_acl_t inherit
, result
;
768 * Fetch the ACL from the directory. This should never fail. Note that we don't
769 * manage inheritance when the remote server is doing authorization; we just
770 * want to compose the umask-ACL and any initial ACL.
773 if ((dvp
!= NULL
) && !vfs_authopaque(vnode_mount(dvp
))) {
775 VATTR_WANTED(&dva
, va_acl
);
776 if ((error
= vnode_getattr(dvp
, &dva
, ctx
)) != 0) {
777 KAUTH_DEBUG(" ERROR - could not get parent directory ACL for inheritance");
780 if (VATTR_IS_SUPPORTED(&dva
, va_acl
))
781 inherit
= dva
.va_acl
;
785 * Compute the number of entries in the result ACL by scanning the input lists.
788 if (inherit
!= NULL
) {
789 for (i
= 0; i
< inherit
->acl_entrycount
; i
++) {
790 if (inherit
->acl_ace
[i
].ace_flags
& (isdir
? KAUTH_ACE_DIRECTORY_INHERIT
: KAUTH_ACE_FILE_INHERIT
))
795 if (initial
== NULL
) {
796 /* XXX 3634665 TODO: fetch umask ACL from the process, set in initial */
799 if (initial
!= NULL
) {
800 entries
+= initial
->acl_entrycount
;
804 * If there is no initial ACL, and no inheritable entries, the
805 * object should have no ACL at all.
806 * Note that this differs from the case where the initial ACL
807 * is empty, in which case the object must also have an empty ACL.
809 if ((entries
== 0) && (initial
== NULL
)) {
816 * Allocate the result buffer.
818 if ((result
= kauth_acl_alloc(entries
)) == NULL
) {
819 KAUTH_DEBUG(" ERROR - could not allocate %d-entry result buffer for inherited ACL");
825 * Composition is simply:
830 if (initial
!= NULL
) {
831 for (i
= 0; i
< initial
->acl_entrycount
; i
++)
832 result
->acl_ace
[index
++] = initial
->acl_ace
[i
];
833 KAUTH_DEBUG(" INHERIT - applied %d initial entries", index
);
835 if (inherit
!= NULL
) {
836 for (i
= 0; i
< inherit
->acl_entrycount
; i
++) {
837 /* inherit onto this object? */
838 if (inherit
->acl_ace
[i
].ace_flags
& (isdir
? KAUTH_ACE_DIRECTORY_INHERIT
: KAUTH_ACE_FILE_INHERIT
)) {
839 result
->acl_ace
[index
] = inherit
->acl_ace
[i
];
840 result
->acl_ace
[index
].ace_flags
|= KAUTH_ACE_INHERITED
;
841 /* don't re-inherit? */
842 if (result
->acl_ace
[index
].ace_flags
& KAUTH_ACE_LIMIT_INHERIT
)
843 result
->acl_ace
[index
].ace_flags
&=
844 ~(KAUTH_ACE_DIRECTORY_INHERIT
| KAUTH_ACE_FILE_INHERIT
| KAUTH_ACE_LIMIT_INHERIT
);
849 result
->acl_entrycount
= index
;
851 KAUTH_DEBUG(" INHERIT - product ACL has %d entries", index
);
855 kauth_acl_free(inherit
);
860 * Optimistically copy in a kauth_filesec structure
862 * Parameters: xsecurity user space kauth_filesec_t
863 * xsecdstpp pointer to kauth_filesec_t to be
864 * modified to contain the contain a
865 * pointer to an allocated copy of the
866 * user space argument
869 * ENOMEM Insufficient memory for the copy.
870 * EINVAL The user space data was invalid, or
871 * there were too many ACE entries.
872 * EFAULT The user space address was invalid;
873 * this may mean 'fsec_entrycount' in
874 * the user copy is corrupt/incorrect.
876 * Implicit returns: xsecdestpp, modified (only if successful!)
878 * Notes: The returned kauth_filesec_t is in host byte order
880 * The caller is responsible for freeing the returned
881 * kauth_filesec_t in the success case using the function
882 * kauth_filesec_free()
884 * Our largest initial guess is 32; this needs to move to
885 * a manifest constant in <sys/kauth.h>.
888 kauth_copyinfilesec(user_addr_t xsecurity
, kauth_filesec_t
*xsecdestpp
)
890 user_addr_t uaddr
, known_bound
;
892 kauth_filesec_t fsec
;
900 * Make a guess at the size of the filesec. We start with the base
901 * pointer, and look at how much room is left on the page, clipped
902 * to a sensible upper bound. If it turns out this isn't enough,
903 * we'll size based on the actual ACL contents and come back again.
905 * The upper bound must be less than KAUTH_ACL_MAX_ENTRIES. The
906 * value here is fairly arbitrary. It's ok to have a zero count.
908 known_bound
= xsecurity
+ sizeof(struct kauth_filesec
);
909 uaddr
= mach_vm_round_page(known_bound
);
910 count
= (uaddr
- known_bound
) / sizeof(struct kauth_ace
);
914 if ((fsec
= kauth_filesec_alloc(count
)) == NULL
) {
918 copysize
= KAUTH_FILESEC_SIZE(count
);
919 if ((error
= copyin(xsecurity
, (caddr_t
)fsec
, copysize
)) != 0)
922 /* validate the filesec header */
923 if (fsec
->fsec_magic
!= KAUTH_FILESEC_MAGIC
) {
929 * Is there an ACL payload, and is it too big?
931 if ((fsec
->fsec_entrycount
!= KAUTH_FILESEC_NOACL
) &&
932 (fsec
->fsec_entrycount
> count
)) {
933 if (fsec
->fsec_entrycount
> KAUTH_ACL_MAX_ENTRIES
) {
934 /* XXX This should be E2BIG */
938 count
= fsec
->fsec_entrycount
;
939 kauth_filesec_free(fsec
);
946 kauth_filesec_free(fsec
);
954 * Allocate a block of memory containing a filesec structure, immediately
955 * followed by 'count' kauth_ace structures.
957 * Parameters: count Number of kauth_ace structures needed
959 * Returns: !NULL A pointer to the allocated block
960 * NULL Invalid 'count' or insufficient memory
962 * Notes: Returned memory area assumes that the structures are packed
963 * densely, so this function may only be used by code that also
964 * assumes no padding following structures.
966 * The returned structure must be freed by the caller using the
967 * function kauth_filesec_free(), in case we decide to use an
968 * allocation mechanism that is aware of the object size at some
969 * point, since the object size is only available by introspecting
973 kauth_filesec_alloc(int count
)
977 /* if the caller hasn't given us a valid size hint, assume the worst */
978 if ((count
< 0) || (count
> KAUTH_ACL_MAX_ENTRIES
))
981 MALLOC(fsp
, kauth_filesec_t
, KAUTH_FILESEC_SIZE(count
), M_KAUTH
, M_WAITOK
);
983 fsp
->fsec_magic
= KAUTH_FILESEC_MAGIC
;
984 fsp
->fsec_owner
= kauth_null_guid
;
985 fsp
->fsec_group
= kauth_null_guid
;
986 fsp
->fsec_entrycount
= KAUTH_FILESEC_NOACL
;
993 * Free a kauth_filesec_t that was previous allocated, either by a direct
994 * call to kauth_filesec_alloc() or by calling a function that calls it.
996 * Parameters: fsp kauth_filesec_t to free
1000 * Notes: The kauth_filesec_t to be freed is assumed to be in host
1001 * byte order so that this function can introspect it in the
1002 * future to determine its size, if necesssary.
1005 kauth_filesec_free(kauth_filesec_t fsp
)
1007 #ifdef KAUTH_DEBUG_ENABLE
1008 if (fsp
== KAUTH_FILESEC_NONE
)
1009 panic("freeing KAUTH_FILESEC_NONE");
1010 if (fsp
== KAUTH_FILESEC_WANTED
)
1011 panic("freeing KAUTH_FILESEC_WANTED");
1017 * Set the endianness of a filesec and an ACL; if 'acl' is NULL, use the
1018 * ACL interior to 'fsec' instead. If the endianness doesn't change, then
1019 * this function will have no effect.
1021 * Parameters: kendian The endianness to set; this is either
1022 * KAUTH_ENDIAN_HOST or KAUTH_ENDIAN_DISK.
1023 * fsec The filesec to convert.
1024 * acl The ACL to convert (optional)
1028 * Notes: We use ntohl() because it has a transitive property on Intel
1029 * machines and no effect on PPC mancines. This guarantees us
1030 * that the swapping only occurs if the endiannes is wrong.
1033 kauth_filesec_acl_setendian(int kendian
, kauth_filesec_t fsec
, kauth_acl_t acl
)
1035 uint32_t compare_magic
= KAUTH_FILESEC_MAGIC
;
1036 uint32_t invert_magic
= ntohl(KAUTH_FILESEC_MAGIC
);
1037 uint32_t compare_acl_entrycount
;
1040 if (compare_magic
== invert_magic
)
1043 /* If no ACL, use ACL interior to 'fsec' instead */
1045 acl
= &fsec
->fsec_acl
;
1047 compare_acl_entrycount
= acl
->acl_entrycount
;
1050 * Only convert what needs to be converted, and only if the arguments
1051 * are valid. The following switch and tests effectively reject
1052 * conversions on invalid magic numbers as a desirable side effect.
1055 case KAUTH_ENDIAN_HOST
: /* not in host, convert to host */
1056 if (fsec
->fsec_magic
!= invert_magic
)
1058 /* acl_entrycount is byteswapped */
1059 compare_acl_entrycount
= ntohl(acl
->acl_entrycount
);
1061 case KAUTH_ENDIAN_DISK
: /* not in disk, convert to disk */
1062 if (fsec
->fsec_magic
!= compare_magic
)
1065 default: /* bad argument */
1069 /* We are go for conversion */
1070 fsec
->fsec_magic
= ntohl(fsec
->fsec_magic
);
1071 acl
->acl_entrycount
= ntohl(acl
->acl_entrycount
);
1072 if (compare_acl_entrycount
!= KAUTH_FILESEC_NOACL
) {
1073 acl
->acl_flags
= ntohl(acl
->acl_flags
);
1075 /* swap ACE rights and flags */
1076 for (i
= 0; i
< compare_acl_entrycount
; i
++) {
1077 acl
->acl_ace
[i
].ace_flags
= ntohl(acl
->acl_ace
[i
].ace_flags
);
1078 acl
->acl_ace
[i
].ace_rights
= ntohl(acl
->acl_ace
[i
].ace_rights
);
1085 * Allocate an ACL buffer.
1088 kauth_acl_alloc(int count
)
1092 /* if the caller hasn't given us a valid size hint, assume the worst */
1093 if ((count
< 0) || (count
> KAUTH_ACL_MAX_ENTRIES
))
1096 MALLOC(aclp
, kauth_acl_t
, KAUTH_ACL_SIZE(count
), M_KAUTH
, M_WAITOK
);
1098 aclp
->acl_entrycount
= 0;
1099 aclp
->acl_flags
= 0;
1105 kauth_acl_free(kauth_acl_t aclp
)
1107 FREE(aclp
, M_KAUTH
);
1112 * WARNING - caller must hold KAUTH_SCOPELOCK
1114 static int kauth_add_callback_to_scope(kauth_scope_t sp
, kauth_listener_t klp
)
1118 for (i
= 0; i
< KAUTH_SCOPE_MAX_LISTENERS
; i
++) {
1119 if (sp
->ks_listeners
[i
].kll_listenerp
== NULL
) {
1120 sp
->ks_listeners
[i
].kll_callback
= klp
->kl_callback
;
1121 sp
->ks_listeners
[i
].kll_idata
= klp
->kl_idata
;
1122 sp
->ks_listeners
[i
].kll_listenerp
= klp
;
1123 sp
->ks_flags
|= KS_F_HAS_LISTENERS
;