]>
Commit | Line | Data |
---|---|---|
b0d623f7 A |
1 | /*- |
2 | * Copyright (c) 2008-2009 Apple Inc. | |
3 | * All rights reserved. | |
4 | * | |
5 | * Redistribution and use in source and binary forms, with or without | |
6 | * modification, are permitted provided that the following conditions | |
7 | * are met: | |
8 | * | |
9 | * 1. Redistributions of source code must retain the above copyright | |
10 | * notice, this list of conditions and the following disclaimer. | |
11 | * 2. Redistributions in binary form must reproduce the above copyright | |
12 | * notice, this list of conditions and the following disclaimer in the | |
13 | * documentation and/or other materials provided with the distribution. | |
14 | * 3. Neither the name of Apple Inc. ("Apple") nor the names of | |
15 | * its contributors may be used to endorse or promote products derived | |
16 | * from this software without specific prior written permission. | |
17 | * | |
18 | * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY | |
19 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | |
20 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | |
21 | * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY | |
22 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | |
23 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
24 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | |
25 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | |
27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
28 | */ | |
29 | ||
30 | #include <string.h> | |
31 | ||
32 | #include <sys/kernel.h> | |
33 | #include <sys/proc.h> | |
34 | #include <sys/systm.h> | |
35 | ||
36 | #include <kern/host.h> | |
37 | #include <kern/kalloc.h> | |
38 | #include <kern/locks.h> | |
39 | #include <kern/sched_prim.h> | |
40 | ||
41 | #include <libkern/OSAtomic.h> | |
42 | ||
43 | #include <bsm/audit.h> | |
44 | #include <bsm/audit_internal.h> | |
45 | ||
46 | #include <security/audit/audit_bsd.h> | |
47 | #include <security/audit/audit.h> | |
48 | #include <security/audit/audit_private.h> | |
49 | ||
50 | #include <mach/host_priv.h> | |
51 | #include <mach/host_special_ports.h> | |
52 | #include <mach/audit_triggers_server.h> | |
53 | ||
54 | #if CONFIG_AUDIT | |
55 | struct mhdr { | |
56 | size_t mh_size; | |
57 | au_malloc_type_t *mh_type; | |
58 | u_long mh_magic; | |
59 | char mh_data[0]; | |
60 | }; | |
61 | ||
62 | #define AUDIT_MHMAGIC 0x4D656C53 | |
63 | ||
64 | #if AUDIT_MALLOC_DEBUG | |
65 | #define AU_MAX_SHORTDESC 20 | |
66 | #define AU_MAX_LASTCALLER 20 | |
67 | struct au_malloc_debug_info { | |
68 | SInt64 md_size; | |
69 | SInt64 md_maxsize; | |
70 | SInt32 md_inuse; | |
71 | SInt32 md_maxused; | |
72 | unsigned md_type; | |
73 | unsigned md_magic; | |
74 | char md_shortdesc[AU_MAX_SHORTDESC]; | |
75 | char md_lastcaller[AU_MAX_LASTCALLER]; | |
76 | }; | |
77 | typedef struct au_malloc_debug_info au_malloc_debug_info_t; | |
78 | ||
79 | au_malloc_type_t *audit_malloc_types[NUM_MALLOC_TYPES]; | |
80 | ||
81 | static int audit_sysctl_malloc_debug(struct sysctl_oid *oidp, void *arg1, | |
82 | int arg2, struct sysctl_req *req); | |
83 | ||
84 | SYSCTL_PROC(_kern, OID_AUTO, audit_malloc_debug, CTLFLAG_RD, NULL, 0, | |
85 | audit_sysctl_malloc_debug, "S,audit_malloc_debug", | |
86 | "Current malloc debug info for auditing."); | |
87 | ||
88 | #define AU_MALLOC_DBINFO_SZ \ | |
89 | (NUM_MALLOC_TYPES * sizeof(au_malloc_debug_info_t)) | |
90 | ||
91 | /* | |
92 | * Copy out the malloc debug info via the sysctl interface. The userland code | |
93 | * is something like the following: | |
94 | * | |
95 | * error = sysctlbyname("kern.audit_malloc_debug", buffer_ptr, &buffer_len, | |
96 | * NULL, 0); | |
97 | */ | |
98 | static int | |
99 | audit_sysctl_malloc_debug(__unused struct sysctl_oid *oidp, __unused void *arg1, | |
100 | __unused int arg2, struct sysctl_req *req) | |
101 | { | |
102 | int i; | |
103 | size_t sz; | |
104 | au_malloc_debug_info_t *amdi_ptr, *nxt_ptr; | |
105 | int err; | |
106 | ||
107 | /* | |
108 | * This provides a read-only node. | |
109 | */ | |
110 | if (req->newptr != USER_ADDR_NULL) | |
111 | return (EPERM); | |
112 | ||
113 | /* | |
114 | * If just querying then return the space required. | |
115 | */ | |
116 | if (req->oldptr == USER_ADDR_NULL) { | |
117 | req->oldidx = AU_MALLOC_DBINFO_SZ; | |
118 | return (0); | |
119 | } | |
120 | ||
121 | /* | |
122 | * Alloc a temporary buffer. | |
123 | */ | |
124 | if (req->oldlen < AU_MALLOC_DBINFO_SZ) | |
125 | return (ENOMEM); | |
126 | amdi_ptr = (au_malloc_debug_info_t *)kalloc(AU_MALLOC_DBINFO_SZ); | |
127 | if (amdi_ptr == NULL) | |
128 | return (ENOMEM); | |
129 | bzero(amdi_ptr, AU_MALLOC_DBINFO_SZ); | |
130 | ||
131 | /* | |
132 | * Build the record array. | |
133 | */ | |
134 | sz = 0; | |
135 | nxt_ptr = amdi_ptr; | |
136 | for(i = 0; i < NUM_MALLOC_TYPES; i++) { | |
137 | if (audit_malloc_types[i] == NULL) | |
138 | continue; | |
139 | if (audit_malloc_types[i]->mt_magic != M_MAGIC) { | |
140 | nxt_ptr->md_magic = audit_malloc_types[i]->mt_magic; | |
141 | continue; | |
142 | } | |
143 | nxt_ptr->md_magic = audit_malloc_types[i]->mt_magic; | |
144 | nxt_ptr->md_size = audit_malloc_types[i]->mt_size; | |
145 | nxt_ptr->md_maxsize = audit_malloc_types[i]->mt_maxsize; | |
146 | nxt_ptr->md_inuse = (int)audit_malloc_types[i]->mt_inuse; | |
147 | nxt_ptr->md_maxused = (int)audit_malloc_types[i]->mt_maxused; | |
148 | strlcpy(nxt_ptr->md_shortdesc, | |
149 | audit_malloc_types[i]->mt_shortdesc, AU_MAX_SHORTDESC - 1); | |
150 | strlcpy(nxt_ptr->md_lastcaller, | |
151 | audit_malloc_types[i]->mt_lastcaller, AU_MAX_LASTCALLER-1); | |
152 | sz += sizeof(au_malloc_debug_info_t); | |
153 | nxt_ptr++; | |
154 | } | |
155 | ||
156 | req->oldlen = sz; | |
157 | err = SYSCTL_OUT(req, amdi_ptr, sz); | |
158 | kfree(amdi_ptr, AU_MALLOC_DBINFO_SZ); | |
159 | ||
160 | return (err); | |
161 | } | |
162 | #endif /* AUDIT_MALLOC_DEBUG */ | |
163 | ||
164 | /* | |
165 | * BSD malloc() | |
166 | * | |
167 | * If the M_NOWAIT flag is set then it may not block and return NULL. | |
168 | * If the M_ZERO flag is set then zero out the buffer. | |
169 | */ | |
170 | void * | |
171 | #if AUDIT_MALLOC_DEBUG | |
172 | _audit_malloc(size_t size, au_malloc_type_t *type, int flags, const char *fn) | |
173 | #else | |
174 | _audit_malloc(size_t size, au_malloc_type_t *type, int flags) | |
175 | #endif | |
176 | { | |
177 | union { | |
178 | struct mhdr hdr; | |
179 | char mem[size + sizeof (struct mhdr)]; | |
180 | } *mem; | |
181 | size_t memsize = sizeof (*mem); | |
182 | ||
183 | if (size == 0) | |
184 | return (NULL); | |
185 | if (flags & M_NOWAIT) { | |
186 | mem = (void *)kalloc_noblock(memsize); | |
187 | } else { | |
188 | mem = (void *)kalloc(memsize); | |
189 | if (mem == NULL) | |
190 | panic("_audit_malloc: kernel memory exhausted"); | |
191 | } | |
192 | if (mem == NULL) | |
193 | return (NULL); | |
194 | mem->hdr.mh_size = memsize; | |
195 | mem->hdr.mh_type = type; | |
196 | mem->hdr.mh_magic = AUDIT_MHMAGIC; | |
197 | if (flags & M_ZERO) | |
198 | memset(mem->hdr.mh_data, 0, size); | |
199 | #if AUDIT_MALLOC_DEBUG | |
200 | if (type != NULL && type->mt_type < NUM_MALLOC_TYPES) { | |
201 | OSAddAtomic64(memsize, &type->mt_size); | |
202 | type->mt_maxsize = max(type->mt_size, type->mt_maxsize); | |
203 | OSAddAtomic(1, &type->mt_inuse); | |
204 | type->mt_maxused = max(type->mt_inuse, type->mt_maxused); | |
205 | type->mt_lastcaller = fn; | |
206 | audit_malloc_types[type->mt_type] = type; | |
207 | } | |
208 | #endif /* AUDIT_MALLOC_DEBUG */ | |
209 | return (mem->hdr.mh_data); | |
210 | } | |
211 | ||
212 | /* | |
213 | * BSD free() | |
214 | */ | |
215 | void | |
216 | #if AUDIT_MALLOC_DEBUG | |
217 | _audit_free(void *addr, au_malloc_type_t *type) | |
218 | #else | |
219 | _audit_free(void *addr, __unused au_malloc_type_t *type) | |
220 | #endif | |
221 | { | |
222 | struct mhdr *hdr; | |
223 | ||
224 | if (addr == NULL) | |
225 | return; | |
226 | hdr = addr; hdr--; | |
227 | ||
228 | KASSERT(hdr->mh_magic == AUDIT_MHMAGIC, | |
229 | ("_audit_free(): hdr->mh_magic != AUDIT_MHMAGIC")); | |
230 | ||
231 | #if AUDIT_MALLOC_DEBUG | |
232 | if (type != NULL) { | |
233 | OSAddAtomic64(-hdr->mh_size, &type->mt_size); | |
234 | OSAddAtomic(-1, &type->mt_inuse); | |
235 | } | |
236 | #endif /* AUDIT_MALLOC_DEBUG */ | |
237 | kfree(hdr, hdr->mh_size); | |
238 | } | |
239 | ||
240 | /* | |
241 | * Initialize a condition variable. Must be called before use. | |
242 | */ | |
243 | void | |
244 | _audit_cv_init(struct cv *cvp, const char *desc) | |
245 | { | |
246 | ||
247 | if (desc == NULL) | |
248 | cvp->cv_description = "UNKNOWN"; | |
249 | else | |
250 | cvp->cv_description = desc; | |
251 | cvp->cv_waiters = 0; | |
252 | } | |
253 | ||
254 | /* | |
255 | * Destory a condition variable. | |
256 | */ | |
257 | void | |
258 | _audit_cv_destroy(struct cv *cvp) | |
259 | { | |
260 | ||
261 | cvp->cv_description = NULL; | |
262 | cvp->cv_waiters = 0; | |
263 | } | |
264 | ||
265 | /* | |
266 | * Signal a condition variable, wakes up one waiting thread. | |
267 | */ | |
268 | void | |
269 | _audit_cv_signal(struct cv *cvp) | |
270 | { | |
271 | ||
272 | if (cvp->cv_waiters > 0) { | |
273 | wakeup_one((caddr_t)cvp); | |
274 | cvp->cv_waiters--; | |
275 | } | |
276 | } | |
277 | ||
278 | /* | |
279 | * Broadcast a signal to a condition variable. | |
280 | */ | |
281 | void | |
282 | _audit_cv_broadcast(struct cv *cvp) | |
283 | { | |
284 | ||
285 | if (cvp->cv_waiters > 0) { | |
286 | wakeup((caddr_t)cvp); | |
287 | cvp->cv_waiters = 0; | |
288 | } | |
289 | } | |
290 | ||
291 | /* | |
292 | * Wait on a condition variable. A cv_signal or cv_broadcast on the same | |
293 | * condition variable will resume the thread. It is recommended that the mutex | |
294 | * be held when cv_signal or cv_broadcast are called. | |
295 | */ | |
296 | void | |
297 | _audit_cv_wait(struct cv *cvp, lck_mtx_t *mp, const char *desc) | |
298 | { | |
299 | ||
300 | cvp->cv_waiters++; | |
301 | (void) msleep(cvp, mp, PZERO, desc, 0); | |
302 | } | |
303 | ||
304 | /* | |
305 | * Wait on a condition variable, allowing interruption by signals. Return 0 | |
306 | * if the thread was resumed with cv_signal or cv_broadcast, EINTR or | |
307 | * ERESTART if a signal was caught. If ERESTART is returned the system call | |
308 | * should be restarted if possible. | |
309 | */ | |
310 | int | |
311 | _audit_cv_wait_sig(struct cv *cvp, lck_mtx_t *mp, const char *desc) | |
312 | { | |
313 | ||
314 | cvp->cv_waiters++; | |
315 | return (msleep(cvp, mp, PSOCK | PCATCH, desc, 0)); | |
316 | } | |
317 | ||
318 | /* | |
319 | * Simple recursive lock. | |
320 | */ | |
321 | void | |
322 | _audit_rlck_init(struct rlck *lp, const char *grpname) | |
323 | { | |
324 | ||
325 | lp->rl_grp = lck_grp_alloc_init(grpname, LCK_GRP_ATTR_NULL); | |
326 | lp->rl_mtx = lck_mtx_alloc_init(lp->rl_grp, LCK_ATTR_NULL); | |
327 | ||
328 | lp->rl_thread = 0; | |
329 | lp->rl_recurse = 0; | |
330 | } | |
331 | ||
332 | /* | |
333 | * Recursive lock. Allow same thread to recursively lock the same lock. | |
334 | */ | |
335 | void | |
336 | _audit_rlck_lock(struct rlck *lp) | |
337 | { | |
338 | ||
339 | if (lp->rl_thread == current_thread()) { | |
340 | OSAddAtomic(1, &lp->rl_recurse); | |
341 | KASSERT(lp->rl_recurse < 10000, | |
342 | ("_audit_rlck_lock: lock nested too deep.")); | |
343 | } else { | |
344 | lck_mtx_lock(lp->rl_mtx); | |
345 | lp->rl_thread = current_thread(); | |
346 | lp->rl_recurse = 1; | |
347 | } | |
348 | } | |
349 | ||
350 | /* | |
351 | * Recursive unlock. It should be the same thread that does the unlock. | |
352 | */ | |
353 | void | |
354 | _audit_rlck_unlock(struct rlck *lp) | |
355 | { | |
356 | KASSERT(lp->rl_thread == current_thread(), | |
357 | ("_audit_rlck_unlock(): Don't own lock.")); | |
358 | ||
359 | /* Note: OSAddAtomic returns old value. */ | |
360 | if (OSAddAtomic(-1, &lp->rl_recurse) == 1) { | |
361 | lp->rl_thread = 0; | |
362 | lck_mtx_unlock(lp->rl_mtx); | |
363 | } | |
364 | } | |
365 | ||
366 | void | |
367 | _audit_rlck_destroy(struct rlck *lp) | |
368 | { | |
369 | ||
370 | if (lp->rl_mtx) { | |
371 | lck_mtx_free(lp->rl_mtx, lp->rl_grp); | |
372 | lp->rl_mtx = 0; | |
373 | } | |
374 | if (lp->rl_grp) { | |
375 | lck_grp_free(lp->rl_grp); | |
376 | lp->rl_grp = 0; | |
377 | } | |
378 | } | |
379 | ||
380 | /* | |
381 | * Recursive lock assert. | |
382 | */ | |
383 | void | |
384 | _audit_rlck_assert(struct rlck *lp, u_int assert) | |
385 | { | |
386 | thread_t cthd = current_thread(); | |
387 | ||
388 | if (assert == LCK_MTX_ASSERT_OWNED && lp->rl_thread == cthd) | |
389 | panic("recursive lock (%p) not held by this thread (%p).", | |
390 | lp, cthd); | |
391 | if (assert == LCK_MTX_ASSERT_NOTOWNED && lp->rl_thread != 0) | |
392 | panic("recursive lock (%p) held by thread (%p).", | |
393 | lp, cthd); | |
394 | } | |
395 | ||
396 | /* | |
397 | * Simple sleep lock. | |
398 | */ | |
399 | void | |
400 | _audit_slck_init(struct slck *lp, const char *grpname) | |
401 | { | |
402 | ||
403 | lp->sl_grp = lck_grp_alloc_init(grpname, LCK_GRP_ATTR_NULL); | |
404 | lp->sl_mtx = lck_mtx_alloc_init(lp->sl_grp, LCK_ATTR_NULL); | |
405 | ||
406 | lp->sl_locked = 0; | |
407 | lp->sl_waiting = 0; | |
408 | } | |
409 | ||
410 | /* | |
411 | * Sleep lock lock. The 'intr' flag determines if the lock is interruptible. | |
412 | * If 'intr' is true then signals or other events can interrupt the sleep lock. | |
413 | */ | |
414 | wait_result_t | |
415 | _audit_slck_lock(struct slck *lp, int intr) | |
416 | { | |
417 | wait_result_t res = THREAD_AWAKENED; | |
418 | ||
419 | lck_mtx_lock(lp->sl_mtx); | |
420 | while (lp->sl_locked && res == THREAD_AWAKENED) { | |
421 | lp->sl_waiting = 1; | |
422 | res = lck_mtx_sleep(lp->sl_mtx, LCK_SLEEP_DEFAULT, | |
423 | (event_t) lp, (intr) ? THREAD_INTERRUPTIBLE : THREAD_UNINT); | |
424 | } | |
425 | if (res == THREAD_AWAKENED) | |
426 | lp->sl_locked = 1; | |
427 | lck_mtx_unlock(lp->sl_mtx); | |
428 | ||
429 | return (res); | |
430 | } | |
431 | ||
432 | /* | |
433 | * Sleep lock unlock. Wake up all the threads waiting for this lock. | |
434 | */ | |
435 | void | |
436 | _audit_slck_unlock(struct slck *lp) | |
437 | { | |
438 | ||
439 | lck_mtx_lock(lp->sl_mtx); | |
440 | lp->sl_locked = 0; | |
441 | if (lp->sl_waiting) { | |
442 | lp->sl_waiting = 0; | |
443 | ||
444 | /* Wake up *all* sleeping threads. */ | |
445 | thread_wakeup_prim((event_t) lp, /*1 thr*/ 0, THREAD_AWAKENED); | |
446 | } | |
447 | lck_mtx_unlock(lp->sl_mtx); | |
448 | } | |
449 | ||
450 | /* | |
451 | * Sleep lock try. Don't sleep if it doesn't get the lock. | |
452 | */ | |
453 | int | |
454 | _audit_slck_trylock(struct slck *lp) | |
455 | { | |
456 | int result; | |
457 | ||
458 | lck_mtx_lock(lp->sl_mtx); | |
459 | result = !lp->sl_locked; | |
460 | if (result) | |
461 | lp->sl_locked = 1; | |
462 | lck_mtx_unlock(lp->sl_mtx); | |
463 | ||
464 | return (result); | |
465 | } | |
466 | ||
467 | /* | |
468 | * Sleep lock assert. | |
469 | */ | |
470 | void | |
471 | _audit_slck_assert(struct slck *lp, u_int assert) | |
472 | { | |
473 | ||
474 | if (assert == LCK_MTX_ASSERT_OWNED && lp->sl_locked == 0) | |
475 | panic("sleep lock (%p) not held.", lp); | |
476 | if (assert == LCK_MTX_ASSERT_NOTOWNED && lp->sl_locked == 1) | |
477 | panic("sleep lock (%p) held.", lp); | |
478 | } | |
479 | ||
480 | void | |
481 | _audit_slck_destroy(struct slck *lp) | |
482 | { | |
483 | ||
484 | if (lp->sl_mtx) { | |
485 | lck_mtx_free(lp->sl_mtx, lp->sl_grp); | |
486 | lp->sl_mtx = 0; | |
487 | } | |
488 | if (lp->sl_grp) { | |
489 | lck_grp_free(lp->sl_grp); | |
490 | lp->sl_grp = 0; | |
491 | } | |
492 | } | |
493 | ||
494 | /* | |
495 | * XXXss - This code was taken from bsd/netinet6/icmp6.c. Maybe ppsratecheck() | |
496 | * should be made global in icmp6.c. | |
497 | */ | |
498 | #ifndef timersub | |
499 | #define timersub(tvp, uvp, vvp) \ | |
500 | do { \ | |
501 | (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ | |
502 | (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \ | |
503 | if ((vvp)->tv_usec < 0) { \ | |
504 | (vvp)->tv_sec--; \ | |
505 | (vvp)->tv_usec += 1000000; \ | |
506 | } \ | |
507 | } while (0) | |
508 | #endif | |
509 | ||
510 | /* | |
511 | * Packets (or events) per second limitation. | |
512 | */ | |
513 | int | |
514 | _audit_ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps) | |
515 | { | |
516 | struct timeval tv, delta; | |
517 | int rv; | |
518 | ||
519 | microtime(&tv); | |
520 | ||
521 | timersub(&tv, lasttime, &delta); | |
522 | ||
523 | /* | |
524 | * Check for 0,0 so that the message will be seen at least once. | |
525 | * If more than one second has passed since the last update of | |
526 | * lasttime, reset the counter. | |
527 | * | |
528 | * we do increment *curpps even in *curpps < maxpps case, as some may | |
529 | * try to use *curpps for stat purposes as well. | |
530 | */ | |
531 | if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) || | |
532 | delta.tv_sec >= 1) { | |
533 | *lasttime = tv; | |
534 | *curpps = 0; | |
535 | rv = 1; | |
536 | } else if (maxpps < 0) | |
537 | rv = 1; | |
538 | else if (*curpps < maxpps) | |
539 | rv = 1; | |
540 | else | |
541 | rv = 0; | |
542 | if (*curpps + 1 > 0) | |
543 | *curpps = *curpps + 1; | |
544 | ||
545 | return (rv); | |
546 | } | |
547 | ||
548 | int | |
549 | audit_send_trigger(unsigned int trigger) | |
550 | { | |
551 | mach_port_t audit_port; | |
552 | int error; | |
553 | ||
554 | error = host_get_audit_control_port(host_priv_self(), &audit_port); | |
555 | if (error == KERN_SUCCESS && audit_port != MACH_PORT_NULL) { | |
556 | audit_triggers(audit_port, trigger); | |
557 | return (0); | |
558 | } else { | |
559 | printf("Cannot get audit control port\n"); | |
560 | return (error); | |
561 | } | |
562 | } | |
563 | #endif /* CONFIG_AUDIT */ |