X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/b0d623f7f2ae71ed96e60569f61f9a9a27016e80..HEAD:/bsd/security/audit/audit_bsd.c diff --git a/bsd/security/audit/audit_bsd.c b/bsd/security/audit/audit_bsd.c index fdae0d79d..2f3adbace 100644 --- a/bsd/security/audit/audit_bsd.c +++ b/bsd/security/audit/audit_bsd.c @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2008-2009 Apple Inc. + * Copyright (c) 2008-2010 Apple Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -50,33 +50,43 @@ #include #include #include +#include + +#include + +extern void ipc_port_release_send(ipc_port_t port); #if CONFIG_AUDIT struct mhdr { - size_t mh_size; - au_malloc_type_t *mh_type; - u_long mh_magic; - char mh_data[0]; + size_t mh_size; + au_malloc_type_t *mh_type; + u_long mh_magic; + char mh_data[0]; }; -#define AUDIT_MHMAGIC 0x4D656C53 +/* + * The lock group for the audit subsystem. + */ +static LCK_GRP_DECLARE(audit_lck_grp, "Audit"); + +#define AUDIT_MHMAGIC 0x4D656C53 #if AUDIT_MALLOC_DEBUG -#define AU_MAX_SHORTDESC 20 -#define AU_MAX_LASTCALLER 20 +#define AU_MAX_SHORTDESC 20 +#define AU_MAX_LASTCALLER 20 struct au_malloc_debug_info { - SInt64 md_size; - SInt64 md_maxsize; - SInt32 md_inuse; - SInt32 md_maxused; - unsigned md_type; - unsigned md_magic; - char md_shortdesc[AU_MAX_SHORTDESC]; - char md_lastcaller[AU_MAX_LASTCALLER]; + SInt64 md_size; + SInt64 md_maxsize; + SInt32 md_inuse; + SInt32 md_maxused; + unsigned md_type; + unsigned md_magic; + char md_shortdesc[AU_MAX_SHORTDESC]; + char md_lastcaller[AU_MAX_LASTCALLER]; }; typedef struct au_malloc_debug_info au_malloc_debug_info_t; -au_malloc_type_t *audit_malloc_types[NUM_MALLOC_TYPES]; +au_malloc_type_t *audit_malloc_types[NUM_MALLOC_TYPES]; static int audit_sysctl_malloc_debug(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req); @@ -85,11 +95,11 @@ SYSCTL_PROC(_kern, OID_AUTO, audit_malloc_debug, CTLFLAG_RD, NULL, 0, audit_sysctl_malloc_debug, "S,audit_malloc_debug", "Current malloc debug info for auditing."); -#define AU_MALLOC_DBINFO_SZ \ +#define AU_MALLOC_DBINFO_SZ \ (NUM_MALLOC_TYPES * sizeof(au_malloc_debug_info_t)) /* - * Copy out the malloc debug info via the sysctl interface. The userland code + * Copy out the malloc debug info via the sysctl interface. The userland code * is something like the following: * * error = sysctlbyname("kern.audit_malloc_debug", buffer_ptr, &buffer_len, @@ -107,35 +117,38 @@ audit_sysctl_malloc_debug(__unused struct sysctl_oid *oidp, __unused void *arg1, /* * This provides a read-only node. */ - if (req->newptr != USER_ADDR_NULL) - return (EPERM); + if (req->newptr != USER_ADDR_NULL) { + return EPERM; + } /* - * If just querying then return the space required. + * If just querying then return the space required. */ if (req->oldptr == USER_ADDR_NULL) { - req->oldidx = AU_MALLOC_DBINFO_SZ; - return (0); + req->oldidx = AU_MALLOC_DBINFO_SZ; + return 0; } /* * Alloc a temporary buffer. */ - if (req->oldlen < AU_MALLOC_DBINFO_SZ) - return (ENOMEM); - amdi_ptr = (au_malloc_debug_info_t *)kalloc(AU_MALLOC_DBINFO_SZ); - if (amdi_ptr == NULL) - return (ENOMEM); - bzero(amdi_ptr, AU_MALLOC_DBINFO_SZ); + if (req->oldlen < AU_MALLOC_DBINFO_SZ) { + return ENOMEM; + } + amdi_ptr = kheap_alloc(KHEAP_TEMP, AU_MALLOC_DBINFO_SZ, Z_WAITOK | Z_ZERO); + if (amdi_ptr == NULL) { + return ENOMEM; + } /* - * Build the record array. + * Build the record array. */ sz = 0; nxt_ptr = amdi_ptr; - for(i = 0; i < NUM_MALLOC_TYPES; i++) { - if (audit_malloc_types[i] == NULL) + for (i = 0; i < NUM_MALLOC_TYPES; i++) { + if (audit_malloc_types[i] == NULL) { continue; + } if (audit_malloc_types[i]->mt_magic != M_MAGIC) { nxt_ptr->md_magic = audit_malloc_types[i]->mt_magic; continue; @@ -148,22 +161,22 @@ audit_sysctl_malloc_debug(__unused struct sysctl_oid *oidp, __unused void *arg1, strlcpy(nxt_ptr->md_shortdesc, audit_malloc_types[i]->mt_shortdesc, AU_MAX_SHORTDESC - 1); strlcpy(nxt_ptr->md_lastcaller, - audit_malloc_types[i]->mt_lastcaller, AU_MAX_LASTCALLER-1); + audit_malloc_types[i]->mt_lastcaller, AU_MAX_LASTCALLER - 1); sz += sizeof(au_malloc_debug_info_t); nxt_ptr++; } req->oldlen = sz; err = SYSCTL_OUT(req, amdi_ptr, sz); - kfree(amdi_ptr, AU_MALLOC_DBINFO_SZ); + kheap_free(KHEAP_TEMP, amdi_ptr, AU_MALLOC_DBINFO_SZ); - return (err); + return err; } #endif /* AUDIT_MALLOC_DEBUG */ - + /* * BSD malloc() - * + * * If the M_NOWAIT flag is set then it may not block and return NULL. * If the M_ZERO flag is set then zero out the buffer. */ @@ -171,31 +184,25 @@ void * #if AUDIT_MALLOC_DEBUG _audit_malloc(size_t size, au_malloc_type_t *type, int flags, const char *fn) #else -_audit_malloc(size_t size, au_malloc_type_t *type, int flags) +_audit_malloc(size_t size, au_malloc_type_t * type, int flags) #endif { - union { - struct mhdr hdr; - char mem[size + sizeof (struct mhdr)]; - } *mem; - size_t memsize = sizeof (*mem); - - if (size == 0) - return (NULL); - if (flags & M_NOWAIT) { - mem = (void *)kalloc_noblock(memsize); - } else { - mem = (void *)kalloc(memsize); - if (mem == NULL) - panic("_audit_malloc: kernel memory exhausted"); - } - if (mem == NULL) - return (NULL); - mem->hdr.mh_size = memsize; - mem->hdr.mh_type = type; - mem->hdr.mh_magic = AUDIT_MHMAGIC; - if (flags & M_ZERO) - memset(mem->hdr.mh_data, 0, size); + struct mhdr *hdr; + size_t memsize; + if (os_add_overflow(sizeof(*hdr), size, &memsize)) { + return NULL; + } + + if (size == 0) { + return NULL; + } + hdr = kheap_alloc(KHEAP_AUDIT, memsize, flags); + if (hdr == NULL) { + return NULL; + } + hdr->mh_size = memsize; + hdr->mh_type = type; + hdr->mh_magic = AUDIT_MHMAGIC; #if AUDIT_MALLOC_DEBUG if (type != NULL && type->mt_type < NUM_MALLOC_TYPES) { OSAddAtomic64(memsize, &type->mt_size); @@ -206,7 +213,7 @@ _audit_malloc(size_t size, au_malloc_type_t *type, int flags) audit_malloc_types[type->mt_type] = type; } #endif /* AUDIT_MALLOC_DEBUG */ - return (mem->hdr.mh_data); + return hdr->mh_data; } /* @@ -220,13 +227,15 @@ _audit_free(void *addr, __unused au_malloc_type_t *type) #endif { struct mhdr *hdr; - - if (addr == NULL) + + if (addr == NULL) { return; + } hdr = addr; hdr--; - KASSERT(hdr->mh_magic == AUDIT_MHMAGIC, - ("_audit_free(): hdr->mh_magic != AUDIT_MHMAGIC")); + if (hdr->mh_magic != AUDIT_MHMAGIC) { + panic("_audit_free(): hdr->mh_magic (%lx) != AUDIT_MHMAGIC", hdr->mh_magic); + } #if AUDIT_MALLOC_DEBUG if (type != NULL) { @@ -234,7 +243,7 @@ _audit_free(void *addr, __unused au_malloc_type_t *type) OSAddAtomic(-1, &type->mt_inuse); } #endif /* AUDIT_MALLOC_DEBUG */ - kfree(hdr, hdr->mh_size); + kheap_free(KHEAP_AUDIT, hdr, hdr->mh_size); } /* @@ -243,11 +252,11 @@ _audit_free(void *addr, __unused au_malloc_type_t *type) void _audit_cv_init(struct cv *cvp, const char *desc) { - - if (desc == NULL) + if (desc == NULL) { cvp->cv_description = "UNKNOWN"; - else + } else { cvp->cv_description = desc; + } cvp->cv_waiters = 0; } @@ -257,7 +266,6 @@ _audit_cv_init(struct cv *cvp, const char *desc) void _audit_cv_destroy(struct cv *cvp) { - cvp->cv_description = NULL; cvp->cv_waiters = 0; } @@ -268,7 +276,6 @@ _audit_cv_destroy(struct cv *cvp) void _audit_cv_signal(struct cv *cvp) { - if (cvp->cv_waiters > 0) { wakeup_one((caddr_t)cvp); cvp->cv_waiters--; @@ -281,7 +288,6 @@ _audit_cv_signal(struct cv *cvp) void _audit_cv_broadcast(struct cv *cvp) { - if (cvp->cv_waiters > 0) { wakeup((caddr_t)cvp); cvp->cv_waiters = 0; @@ -296,7 +302,6 @@ _audit_cv_broadcast(struct cv *cvp) void _audit_cv_wait(struct cv *cvp, lck_mtx_t *mp, const char *desc) { - cvp->cv_waiters++; (void) msleep(cvp, mp, PZERO, desc, 0); } @@ -310,32 +315,111 @@ _audit_cv_wait(struct cv *cvp, lck_mtx_t *mp, const char *desc) int _audit_cv_wait_sig(struct cv *cvp, lck_mtx_t *mp, const char *desc) { - cvp->cv_waiters++; - return (msleep(cvp, mp, PSOCK | PCATCH, desc, 0)); + return msleep(cvp, mp, PSOCK | PCATCH, desc, 0); } /* - * Simple recursive lock. + * BSD Mutexes. */ void -_audit_rlck_init(struct rlck *lp, const char *grpname) +#if DIAGNOSTIC +_audit_mtx_init(struct mtx *mp, const char *lckname) +#else +_audit_mtx_init(struct mtx *mp, __unused const char *lckname) +#endif +{ + mp->mtx_lock = lck_mtx_alloc_init(&audit_lck_grp, LCK_ATTR_NULL); + KASSERT(mp->mtx_lock != NULL, + ("_audit_mtx_init: Could not allocate a mutex.")); +#if DIAGNOSTIC + strlcpy(mp->mtx_name, lckname, AU_MAX_LCK_NAME); +#endif +} + +void +_audit_mtx_destroy(struct mtx *mp) +{ + if (mp->mtx_lock) { + lck_mtx_free(mp->mtx_lock, &audit_lck_grp); + mp->mtx_lock = NULL; + } +} + +/* + * BSD rw locks. + */ +void +#if DIAGNOSTIC +_audit_rw_init(struct rwlock *lp, const char *lckname) +#else +_audit_rw_init(struct rwlock *lp, __unused const char *lckname) +#endif +{ + lp->rw_lock = lck_rw_alloc_init(&audit_lck_grp, LCK_ATTR_NULL); + KASSERT(lp->rw_lock != NULL, + ("_audit_rw_init: Could not allocate a rw lock.")); +#if DIAGNOSTIC + strlcpy(lp->rw_name, lckname, AU_MAX_LCK_NAME); +#endif +} + +void +_audit_rw_destroy(struct rwlock *lp) +{ + if (lp->rw_lock) { + lck_rw_free(lp->rw_lock, &audit_lck_grp); + lp->rw_lock = NULL; + } +} +/* + * Wait on a condition variable in a continuation (i.e. yield kernel stack). + * A cv_signal or cv_broadcast on the same condition variable will cause + * the thread to be scheduled. + */ +int +_audit_cv_wait_continuation(struct cv *cvp, lck_mtx_t *mp, thread_continue_t function) { + int status = KERN_SUCCESS; - lp->rl_grp = lck_grp_alloc_init(grpname, LCK_GRP_ATTR_NULL); - lp->rl_mtx = lck_mtx_alloc_init(lp->rl_grp, LCK_ATTR_NULL); + cvp->cv_waiters++; + assert_wait(cvp, THREAD_UNINT); + lck_mtx_unlock(mp); + status = thread_block(function); + + /* should not be reached, but just in case, re-lock */ + lck_mtx_lock(mp); + + return status; +} + +/* + * Simple recursive lock. + */ +void +#if DIAGNOSTIC +_audit_rlck_init(struct rlck *lp, const char *lckname) +#else +_audit_rlck_init(struct rlck *lp, __unused const char *lckname) +#endif +{ + lp->rl_mtx = lck_mtx_alloc_init(&audit_lck_grp, LCK_ATTR_NULL); + KASSERT(lp->rl_mtx != NULL, + ("_audit_rlck_init: Could not allocate a recursive lock.")); +#if DIAGNOSTIC + strlcpy(lp->rl_name, lckname, AU_MAX_LCK_NAME); +#endif lp->rl_thread = 0; lp->rl_recurse = 0; } /* * Recursive lock. Allow same thread to recursively lock the same lock. - */ + */ void _audit_rlck_lock(struct rlck *lp) { - if (lp->rl_thread == current_thread()) { OSAddAtomic(1, &lp->rl_recurse); KASSERT(lp->rl_recurse < 10000, @@ -353,7 +437,7 @@ _audit_rlck_lock(struct rlck *lp) void _audit_rlck_unlock(struct rlck *lp) { - KASSERT(lp->rl_thread == current_thread(), + KASSERT(lp->rl_thread == current_thread(), ("_audit_rlck_unlock(): Don't own lock.")); /* Note: OSAddAtomic returns old value. */ @@ -362,18 +446,13 @@ _audit_rlck_unlock(struct rlck *lp) lck_mtx_unlock(lp->rl_mtx); } } - + void _audit_rlck_destroy(struct rlck *lp) { - if (lp->rl_mtx) { - lck_mtx_free(lp->rl_mtx, lp->rl_grp); - lp->rl_mtx = 0; - } - if (lp->rl_grp) { - lck_grp_free(lp->rl_grp); - lp->rl_grp = 0; + lck_mtx_free(lp->rl_mtx, &audit_lck_grp); + lp->rl_mtx = NULL; } } @@ -384,32 +463,40 @@ void _audit_rlck_assert(struct rlck *lp, u_int assert) { thread_t cthd = current_thread(); - - if (assert == LCK_MTX_ASSERT_OWNED && lp->rl_thread == cthd) + + if (assert == LCK_MTX_ASSERT_OWNED && lp->rl_thread == cthd) { panic("recursive lock (%p) not held by this thread (%p).", lp, cthd); - if (assert == LCK_MTX_ASSERT_NOTOWNED && lp->rl_thread != 0) + } + if (assert == LCK_MTX_ASSERT_NOTOWNED && lp->rl_thread != 0) { panic("recursive lock (%p) held by thread (%p).", lp, cthd); + } } /* * Simple sleep lock. */ void -_audit_slck_init(struct slck *lp, const char *grpname) +#if DIAGNOSTIC +_audit_slck_init(struct slck *lp, const char *lckname) +#else +_audit_slck_init(struct slck *lp, __unused const char *lckname) +#endif { - - lp->sl_grp = lck_grp_alloc_init(grpname, LCK_GRP_ATTR_NULL); - lp->sl_mtx = lck_mtx_alloc_init(lp->sl_grp, LCK_ATTR_NULL); - + lp->sl_mtx = lck_mtx_alloc_init(&audit_lck_grp, LCK_ATTR_NULL); + KASSERT(lp->sl_mtx != NULL, + ("_audit_slck_init: Could not allocate a sleep lock.")); +#if DIAGNOSTIC + strlcpy(lp->sl_name, lckname, AU_MAX_LCK_NAME); +#endif lp->sl_locked = 0; lp->sl_waiting = 0; } /* * Sleep lock lock. The 'intr' flag determines if the lock is interruptible. - * If 'intr' is true then signals or other events can interrupt the sleep lock. + * If 'intr' is true then signals or other events can interrupt the sleep lock. */ wait_result_t _audit_slck_lock(struct slck *lp, int intr) @@ -420,13 +507,14 @@ _audit_slck_lock(struct slck *lp, int intr) while (lp->sl_locked && res == THREAD_AWAKENED) { lp->sl_waiting = 1; res = lck_mtx_sleep(lp->sl_mtx, LCK_SLEEP_DEFAULT, - (event_t) lp, (intr) ? THREAD_INTERRUPTIBLE : THREAD_UNINT); + (event_t) lp, (intr) ? THREAD_INTERRUPTIBLE : THREAD_UNINT); } - if (res == THREAD_AWAKENED) + if (res == THREAD_AWAKENED) { lp->sl_locked = 1; + } lck_mtx_unlock(lp->sl_mtx); - - return (res); + + return res; } /* @@ -435,20 +523,19 @@ _audit_slck_lock(struct slck *lp, int intr) void _audit_slck_unlock(struct slck *lp) { - lck_mtx_lock(lp->sl_mtx); lp->sl_locked = 0; if (lp->sl_waiting) { lp->sl_waiting = 0; /* Wake up *all* sleeping threads. */ - thread_wakeup_prim((event_t) lp, /*1 thr*/ 0, THREAD_AWAKENED); + wakeup((event_t) lp); } lck_mtx_unlock(lp->sl_mtx); } /* - * Sleep lock try. Don't sleep if it doesn't get the lock. + * Sleep lock try. Don't sleep if it doesn't get the lock. */ int _audit_slck_trylock(struct slck *lp) @@ -457,11 +544,12 @@ _audit_slck_trylock(struct slck *lp) lck_mtx_lock(lp->sl_mtx); result = !lp->sl_locked; - if (result) + if (result) { lp->sl_locked = 1; + } lck_mtx_unlock(lp->sl_mtx); - return (result); + return result; } /* @@ -470,24 +558,20 @@ _audit_slck_trylock(struct slck *lp) void _audit_slck_assert(struct slck *lp, u_int assert) { - - if (assert == LCK_MTX_ASSERT_OWNED && lp->sl_locked == 0) + if (assert == LCK_MTX_ASSERT_OWNED && lp->sl_locked == 0) { panic("sleep lock (%p) not held.", lp); - if (assert == LCK_MTX_ASSERT_NOTOWNED && lp->sl_locked == 1) + } + if (assert == LCK_MTX_ASSERT_NOTOWNED && lp->sl_locked == 1) { panic("sleep lock (%p) held.", lp); + } } void _audit_slck_destroy(struct slck *lp) { - if (lp->sl_mtx) { - lck_mtx_free(lp->sl_mtx, lp->sl_grp); - lp->sl_mtx = 0; - } - if (lp->sl_grp) { - lck_grp_free(lp->sl_grp); - lp->sl_grp = 0; + lck_mtx_free(lp->sl_mtx, &audit_lck_grp); + lp->sl_mtx = NULL; } } @@ -497,14 +581,14 @@ _audit_slck_destroy(struct slck *lp) */ #ifndef timersub #define timersub(tvp, uvp, vvp) \ - do { \ - (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ - (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \ - if ((vvp)->tv_usec < 0) { \ - (vvp)->tv_sec--; \ - (vvp)->tv_usec += 1000000; \ - } \ - } while (0) + do { \ + (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ + (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \ + if ((vvp)->tv_usec < 0) { \ + (vvp)->tv_sec--; \ + (vvp)->tv_usec += 1000000; \ + } \ + } while (0) #endif /* @@ -533,16 +617,18 @@ _audit_ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps) *lasttime = tv; *curpps = 0; rv = 1; - } else if (maxpps < 0) + } else if (maxpps < 0) { rv = 1; - else if (*curpps < maxpps) + } else if (*curpps < maxpps) { rv = 1; - else + } else { rv = 0; - if (*curpps + 1 > 0) + } + if (*curpps + 1 > 0) { *curpps = *curpps + 1; + } - return (rv); + return rv; } int @@ -553,11 +639,30 @@ audit_send_trigger(unsigned int trigger) error = host_get_audit_control_port(host_priv_self(), &audit_port); if (error == KERN_SUCCESS && audit_port != MACH_PORT_NULL) { - audit_triggers(audit_port, trigger); - return (0); + (void)audit_triggers(audit_port, trigger); + ipc_port_release_send(audit_port); + return 0; } else { printf("Cannot get audit control port\n"); - return (error); + return error; } } + +int +audit_send_analytics(char* signing_id, char* process_name) +{ + mach_port_t audit_port; + int error; + + error = host_get_audit_control_port(host_priv_self(), &audit_port); + if (error == KERN_SUCCESS && audit_port != MACH_PORT_NULL) { + (void)audit_analytics(audit_port, signing_id, process_name); + ipc_port_release_send(audit_port); + return 0; + } else { + printf("Cannot get audit control port for analytics \n"); + return error; + } +} + #endif /* CONFIG_AUDIT */