2 * Copyright (c) 2008-2010 Apple Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Apple Inc. ("Apple") nor the names of
15 * its contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include <sys/kernel.h>
34 #include <sys/systm.h>
36 #include <kern/host.h>
37 #include <kern/kalloc.h>
38 #include <kern/locks.h>
39 #include <kern/sched_prim.h>
41 #include <libkern/OSAtomic.h>
43 #include <bsm/audit.h>
44 #include <bsm/audit_internal.h>
46 #include <security/audit/audit_bsd.h>
47 #include <security/audit/audit.h>
48 #include <security/audit/audit_private.h>
50 #include <mach/host_priv.h>
51 #include <mach/host_special_ports.h>
52 #include <mach/audit_triggers_server.h>
54 #include <os/overflow.h>
56 extern void ipc_port_release_send(ipc_port_t port
);
61 au_malloc_type_t
*mh_type
;
67 * The lock group for the audit subsystem.
69 static lck_grp_t
*audit_lck_grp
= NULL
;
71 #define AUDIT_MHMAGIC 0x4D656C53
73 #if AUDIT_MALLOC_DEBUG
74 #define AU_MAX_SHORTDESC 20
75 #define AU_MAX_LASTCALLER 20
76 struct au_malloc_debug_info
{
83 char md_shortdesc
[AU_MAX_SHORTDESC
];
84 char md_lastcaller
[AU_MAX_LASTCALLER
];
86 typedef struct au_malloc_debug_info au_malloc_debug_info_t
;
88 au_malloc_type_t
*audit_malloc_types
[NUM_MALLOC_TYPES
];
90 static int audit_sysctl_malloc_debug(struct sysctl_oid
*oidp
, void *arg1
,
91 int arg2
, struct sysctl_req
*req
);
93 SYSCTL_PROC(_kern
, OID_AUTO
, audit_malloc_debug
, CTLFLAG_RD
, NULL
, 0,
94 audit_sysctl_malloc_debug
, "S,audit_malloc_debug",
95 "Current malloc debug info for auditing.");
97 #define AU_MALLOC_DBINFO_SZ \
98 (NUM_MALLOC_TYPES * sizeof(au_malloc_debug_info_t))
101 * Copy out the malloc debug info via the sysctl interface. The userland code
102 * is something like the following:
104 * error = sysctlbyname("kern.audit_malloc_debug", buffer_ptr, &buffer_len,
108 audit_sysctl_malloc_debug(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
109 __unused
int arg2
, struct sysctl_req
*req
)
113 au_malloc_debug_info_t
*amdi_ptr
, *nxt_ptr
;
117 * This provides a read-only node.
119 if (req
->newptr
!= USER_ADDR_NULL
) {
124 * If just querying then return the space required.
126 if (req
->oldptr
== USER_ADDR_NULL
) {
127 req
->oldidx
= AU_MALLOC_DBINFO_SZ
;
132 * Alloc a temporary buffer.
134 if (req
->oldlen
< AU_MALLOC_DBINFO_SZ
) {
137 amdi_ptr
= (au_malloc_debug_info_t
*)kalloc(AU_MALLOC_DBINFO_SZ
);
138 if (amdi_ptr
== NULL
) {
141 bzero(amdi_ptr
, AU_MALLOC_DBINFO_SZ
);
144 * Build the record array.
148 for (i
= 0; i
< NUM_MALLOC_TYPES
; i
++) {
149 if (audit_malloc_types
[i
] == NULL
) {
152 if (audit_malloc_types
[i
]->mt_magic
!= M_MAGIC
) {
153 nxt_ptr
->md_magic
= audit_malloc_types
[i
]->mt_magic
;
156 nxt_ptr
->md_magic
= audit_malloc_types
[i
]->mt_magic
;
157 nxt_ptr
->md_size
= audit_malloc_types
[i
]->mt_size
;
158 nxt_ptr
->md_maxsize
= audit_malloc_types
[i
]->mt_maxsize
;
159 nxt_ptr
->md_inuse
= (int)audit_malloc_types
[i
]->mt_inuse
;
160 nxt_ptr
->md_maxused
= (int)audit_malloc_types
[i
]->mt_maxused
;
161 strlcpy(nxt_ptr
->md_shortdesc
,
162 audit_malloc_types
[i
]->mt_shortdesc
, AU_MAX_SHORTDESC
- 1);
163 strlcpy(nxt_ptr
->md_lastcaller
,
164 audit_malloc_types
[i
]->mt_lastcaller
, AU_MAX_LASTCALLER
- 1);
165 sz
+= sizeof(au_malloc_debug_info_t
);
170 err
= SYSCTL_OUT(req
, amdi_ptr
, sz
);
171 kfree(amdi_ptr
, AU_MALLOC_DBINFO_SZ
);
175 #endif /* AUDIT_MALLOC_DEBUG */
180 * If the M_NOWAIT flag is set then it may not block and return NULL.
181 * If the M_ZERO flag is set then zero out the buffer.
184 #if AUDIT_MALLOC_DEBUG
185 _audit_malloc(size_t size
, au_malloc_type_t
*type
, int flags
, const char *fn
)
187 _audit_malloc(size_t size
, au_malloc_type_t
* type
, int flags
)
192 if (os_add_overflow(sizeof(*hdr
), size
, &memsize
)) {
199 if (flags
& M_NOWAIT
) {
200 hdr
= (void *)kalloc_noblock(memsize
);
202 hdr
= (void *)kalloc(memsize
);
204 panic("_audit_malloc: kernel memory exhausted");
210 hdr
->mh_size
= memsize
;
212 hdr
->mh_magic
= AUDIT_MHMAGIC
;
213 if (flags
& M_ZERO
) {
214 memset(hdr
->mh_data
, 0, size
);
216 #if AUDIT_MALLOC_DEBUG
217 if (type
!= NULL
&& type
->mt_type
< NUM_MALLOC_TYPES
) {
218 OSAddAtomic64(memsize
, &type
->mt_size
);
219 type
->mt_maxsize
= max(type
->mt_size
, type
->mt_maxsize
);
220 OSAddAtomic(1, &type
->mt_inuse
);
221 type
->mt_maxused
= max(type
->mt_inuse
, type
->mt_maxused
);
222 type
->mt_lastcaller
= fn
;
223 audit_malloc_types
[type
->mt_type
] = type
;
225 #endif /* AUDIT_MALLOC_DEBUG */
233 #if AUDIT_MALLOC_DEBUG
234 _audit_free(void *addr
, au_malloc_type_t
*type
)
236 _audit_free(void *addr
, __unused au_malloc_type_t
*type
)
246 if (hdr
->mh_magic
!= AUDIT_MHMAGIC
) {
247 panic("_audit_free(): hdr->mh_magic (%lx) != AUDIT_MHMAGIC", hdr
->mh_magic
);
250 #if AUDIT_MALLOC_DEBUG
252 OSAddAtomic64(-hdr
->mh_size
, &type
->mt_size
);
253 OSAddAtomic(-1, &type
->mt_inuse
);
255 #endif /* AUDIT_MALLOC_DEBUG */
256 kfree(hdr
, hdr
->mh_size
);
260 * Initialize a condition variable. Must be called before use.
263 _audit_cv_init(struct cv
*cvp
, const char *desc
)
266 cvp
->cv_description
= "UNKNOWN";
268 cvp
->cv_description
= desc
;
274 * Destory a condition variable.
277 _audit_cv_destroy(struct cv
*cvp
)
279 cvp
->cv_description
= NULL
;
284 * Signal a condition variable, wakes up one waiting thread.
287 _audit_cv_signal(struct cv
*cvp
)
289 if (cvp
->cv_waiters
> 0) {
290 wakeup_one((caddr_t
)cvp
);
296 * Broadcast a signal to a condition variable.
299 _audit_cv_broadcast(struct cv
*cvp
)
301 if (cvp
->cv_waiters
> 0) {
302 wakeup((caddr_t
)cvp
);
308 * Wait on a condition variable. A cv_signal or cv_broadcast on the same
309 * condition variable will resume the thread. It is recommended that the mutex
310 * be held when cv_signal or cv_broadcast are called.
313 _audit_cv_wait(struct cv
*cvp
, lck_mtx_t
*mp
, const char *desc
)
316 (void) msleep(cvp
, mp
, PZERO
, desc
, 0);
320 * Wait on a condition variable, allowing interruption by signals. Return 0
321 * if the thread was resumed with cv_signal or cv_broadcast, EINTR or
322 * ERESTART if a signal was caught. If ERESTART is returned the system call
323 * should be restarted if possible.
326 _audit_cv_wait_sig(struct cv
*cvp
, lck_mtx_t
*mp
, const char *desc
)
329 return msleep(cvp
, mp
, PSOCK
| PCATCH
, desc
, 0);
337 _audit_mtx_init(struct mtx
*mp
, const char *lckname
)
339 _audit_mtx_init(struct mtx
*mp
, __unused
const char *lckname
)
342 mp
->mtx_lock
= lck_mtx_alloc_init(audit_lck_grp
, LCK_ATTR_NULL
);
343 KASSERT(mp
->mtx_lock
!= NULL
,
344 ("_audit_mtx_init: Could not allocate a mutex."));
346 strlcpy(mp
->mtx_name
, lckname
, AU_MAX_LCK_NAME
);
351 _audit_mtx_destroy(struct mtx
*mp
)
354 lck_mtx_free(mp
->mtx_lock
, audit_lck_grp
);
364 _audit_rw_init(struct rwlock
*lp
, const char *lckname
)
366 _audit_rw_init(struct rwlock
*lp
, __unused
const char *lckname
)
369 lp
->rw_lock
= lck_rw_alloc_init(audit_lck_grp
, LCK_ATTR_NULL
);
370 KASSERT(lp
->rw_lock
!= NULL
,
371 ("_audit_rw_init: Could not allocate a rw lock."));
373 strlcpy(lp
->rw_name
, lckname
, AU_MAX_LCK_NAME
);
378 _audit_rw_destroy(struct rwlock
*lp
)
381 lck_rw_free(lp
->rw_lock
, audit_lck_grp
);
386 * Wait on a condition variable in a continuation (i.e. yield kernel stack).
387 * A cv_signal or cv_broadcast on the same condition variable will cause
388 * the thread to be scheduled.
391 _audit_cv_wait_continuation(struct cv
*cvp
, lck_mtx_t
*mp
, thread_continue_t function
)
393 int status
= KERN_SUCCESS
;
396 assert_wait(cvp
, THREAD_UNINT
);
399 status
= thread_block(function
);
401 /* should not be reached, but just in case, re-lock */
408 * Simple recursive lock.
412 _audit_rlck_init(struct rlck
*lp
, const char *lckname
)
414 _audit_rlck_init(struct rlck
*lp
, __unused
const char *lckname
)
417 lp
->rl_mtx
= lck_mtx_alloc_init(audit_lck_grp
, LCK_ATTR_NULL
);
418 KASSERT(lp
->rl_mtx
!= NULL
,
419 ("_audit_rlck_init: Could not allocate a recursive lock."));
421 strlcpy(lp
->rl_name
, lckname
, AU_MAX_LCK_NAME
);
428 * Recursive lock. Allow same thread to recursively lock the same lock.
431 _audit_rlck_lock(struct rlck
*lp
)
433 if (lp
->rl_thread
== current_thread()) {
434 OSAddAtomic(1, &lp
->rl_recurse
);
435 KASSERT(lp
->rl_recurse
< 10000,
436 ("_audit_rlck_lock: lock nested too deep."));
438 lck_mtx_lock(lp
->rl_mtx
);
439 lp
->rl_thread
= current_thread();
445 * Recursive unlock. It should be the same thread that does the unlock.
448 _audit_rlck_unlock(struct rlck
*lp
)
450 KASSERT(lp
->rl_thread
== current_thread(),
451 ("_audit_rlck_unlock(): Don't own lock."));
453 /* Note: OSAddAtomic returns old value. */
454 if (OSAddAtomic(-1, &lp
->rl_recurse
) == 1) {
456 lck_mtx_unlock(lp
->rl_mtx
);
461 _audit_rlck_destroy(struct rlck
*lp
)
464 lck_mtx_free(lp
->rl_mtx
, audit_lck_grp
);
470 * Recursive lock assert.
473 _audit_rlck_assert(struct rlck
*lp
, u_int
assert)
475 thread_t cthd
= current_thread();
477 if (assert == LCK_MTX_ASSERT_OWNED
&& lp
->rl_thread
== cthd
) {
478 panic("recursive lock (%p) not held by this thread (%p).",
481 if (assert == LCK_MTX_ASSERT_NOTOWNED
&& lp
->rl_thread
!= 0) {
482 panic("recursive lock (%p) held by thread (%p).",
492 _audit_slck_init(struct slck
*lp
, const char *lckname
)
494 _audit_slck_init(struct slck
*lp
, __unused
const char *lckname
)
497 lp
->sl_mtx
= lck_mtx_alloc_init(audit_lck_grp
, LCK_ATTR_NULL
);
498 KASSERT(lp
->sl_mtx
!= NULL
,
499 ("_audit_slck_init: Could not allocate a sleep lock."));
501 strlcpy(lp
->sl_name
, lckname
, AU_MAX_LCK_NAME
);
508 * Sleep lock lock. The 'intr' flag determines if the lock is interruptible.
509 * If 'intr' is true then signals or other events can interrupt the sleep lock.
512 _audit_slck_lock(struct slck
*lp
, int intr
)
514 wait_result_t res
= THREAD_AWAKENED
;
516 lck_mtx_lock(lp
->sl_mtx
);
517 while (lp
->sl_locked
&& res
== THREAD_AWAKENED
) {
519 res
= lck_mtx_sleep(lp
->sl_mtx
, LCK_SLEEP_DEFAULT
,
520 (event_t
) lp
, (intr
) ? THREAD_INTERRUPTIBLE
: THREAD_UNINT
);
522 if (res
== THREAD_AWAKENED
) {
525 lck_mtx_unlock(lp
->sl_mtx
);
531 * Sleep lock unlock. Wake up all the threads waiting for this lock.
534 _audit_slck_unlock(struct slck
*lp
)
536 lck_mtx_lock(lp
->sl_mtx
);
538 if (lp
->sl_waiting
) {
541 /* Wake up *all* sleeping threads. */
542 wakeup((event_t
) lp
);
544 lck_mtx_unlock(lp
->sl_mtx
);
548 * Sleep lock try. Don't sleep if it doesn't get the lock.
551 _audit_slck_trylock(struct slck
*lp
)
555 lck_mtx_lock(lp
->sl_mtx
);
556 result
= !lp
->sl_locked
;
560 lck_mtx_unlock(lp
->sl_mtx
);
569 _audit_slck_assert(struct slck
*lp
, u_int
assert)
571 if (assert == LCK_MTX_ASSERT_OWNED
&& lp
->sl_locked
== 0) {
572 panic("sleep lock (%p) not held.", lp
);
574 if (assert == LCK_MTX_ASSERT_NOTOWNED
&& lp
->sl_locked
== 1) {
575 panic("sleep lock (%p) held.", lp
);
580 _audit_slck_destroy(struct slck
*lp
)
583 lck_mtx_free(lp
->sl_mtx
, audit_lck_grp
);
589 * XXXss - This code was taken from bsd/netinet6/icmp6.c. Maybe ppsratecheck()
590 * should be made global in icmp6.c.
593 #define timersub(tvp, uvp, vvp) \
595 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \
596 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \
597 if ((vvp)->tv_usec < 0) { \
599 (vvp)->tv_usec += 1000000; \
605 * Packets (or events) per second limitation.
608 _audit_ppsratecheck(struct timeval
*lasttime
, int *curpps
, int maxpps
)
610 struct timeval tv
, delta
;
615 timersub(&tv
, lasttime
, &delta
);
618 * Check for 0,0 so that the message will be seen at least once.
619 * If more than one second has passed since the last update of
620 * lasttime, reset the counter.
622 * we do increment *curpps even in *curpps < maxpps case, as some may
623 * try to use *curpps for stat purposes as well.
625 if ((lasttime
->tv_sec
== 0 && lasttime
->tv_usec
== 0) ||
630 } else if (maxpps
< 0) {
632 } else if (*curpps
< maxpps
) {
637 if (*curpps
+ 1 > 0) {
638 *curpps
= *curpps
+ 1;
645 * Initialize lock group for audit related locks/mutexes.
648 _audit_lck_grp_init(void)
650 audit_lck_grp
= lck_grp_alloc_init("Audit", LCK_GRP_ATTR_NULL
);
652 KASSERT(audit_lck_grp
!= NULL
,
653 ("audit_get_lck_grp: Could not allocate the audit lock group."));
657 audit_send_trigger(unsigned int trigger
)
659 mach_port_t audit_port
;
662 error
= host_get_audit_control_port(host_priv_self(), &audit_port
);
663 if (error
== KERN_SUCCESS
&& audit_port
!= MACH_PORT_NULL
) {
664 (void)audit_triggers(audit_port
, trigger
);
665 ipc_port_release_send(audit_port
);
668 printf("Cannot get audit control port\n");
672 #endif /* CONFIG_AUDIT */