]>
Commit | Line | Data |
---|---|---|
b0d623f7 | 1 | /*- |
316670eb | 2 | * Copyright (c) 1999-2011 Apple Inc. |
b0d623f7 A |
3 | * Copyright (c) 2006-2008 Robert N. M. Watson |
4 | * All rights reserved. | |
5 | * | |
6 | * Redistribution and use in source and binary forms, with or without | |
7 | * modification, are permitted provided that the following conditions | |
8 | * are met: | |
9 | * 1. Redistributions of source code must retain the above copyright | |
10 | * notice, this list of conditions and the following disclaimer. | |
11 | * 2. Redistributions in binary form must reproduce the above copyright | |
12 | * notice, this list of conditions and the following disclaimer in the | |
13 | * documentation and/or other materials provided with the distribution. | |
14 | * 3. Neither the name of Apple Inc. ("Apple") nor the names of | |
15 | * its contributors may be used to endorse or promote products derived | |
16 | * from this software without specific prior written permission. | |
17 | * | |
18 | * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND | |
19 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
20 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
21 | * ARE DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR | |
22 | * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
23 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
24 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
25 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | |
26 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | |
27 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
28 | * POSSIBILITY OF SUCH DAMAGE. | |
29 | */ | |
30 | ||
31 | #include <sys/param.h> | |
32 | #include <sys/fcntl.h> | |
33 | #include <sys/kernel.h> | |
34 | #include <sys/lock.h> | |
35 | #include <sys/namei.h> | |
36 | #include <sys/proc_internal.h> | |
37 | #include <sys/kauth.h> | |
38 | #include <sys/queue.h> | |
39 | #include <sys/systm.h> | |
40 | #include <sys/time.h> | |
41 | #include <sys/ucred.h> | |
42 | #include <sys/uio.h> | |
43 | #include <sys/unistd.h> | |
44 | #include <sys/file_internal.h> | |
45 | #include <sys/vnode_internal.h> | |
46 | #include <sys/user.h> | |
47 | #include <sys/syscall.h> | |
48 | #include <sys/malloc.h> | |
49 | #include <sys/un.h> | |
50 | #include <sys/sysent.h> | |
51 | #include <sys/sysproto.h> | |
52 | #include <sys/vfs_context.h> | |
53 | #include <sys/domain.h> | |
54 | #include <sys/protosw.h> | |
55 | #include <sys/socketvar.h> | |
56 | ||
57 | #include <bsm/audit.h> | |
58 | #include <bsm/audit_internal.h> | |
59 | #include <bsm/audit_kevents.h> | |
60 | ||
61 | #include <security/audit/audit.h> | |
62 | #include <security/audit/audit_bsd.h> | |
63 | #include <security/audit/audit_private.h> | |
64 | ||
65 | #include <mach/host_priv.h> | |
66 | #include <mach/host_special_ports.h> | |
67 | #include <mach/audit_triggers_server.h> | |
68 | ||
69 | #include <kern/host.h> | |
70 | #include <kern/zalloc.h> | |
b0d623f7 A |
71 | #include <kern/sched_prim.h> |
72 | #include <kern/task.h> | |
b0d623f7 A |
73 | |
74 | #include <net/route.h> | |
75 | ||
76 | #include <netinet/in.h> | |
77 | #include <netinet/in_pcb.h> | |
78 | ||
79 | /* | |
80 | * Worker thread that will schedule disk I/O, etc. | |
81 | */ | |
82 | static thread_t audit_thread; | |
83 | ||
84 | /* | |
85 | * audit_ctx and audit_vp are the stored credential and vnode to use for | |
86 | * active audit trail. They are protected by audit_worker_sl, which will be | |
87 | * held across all I/O and all rotation to prevent them from being replaced | |
88 | * (rotated) while in use. The audit_file_rotate_wait flag is set when the | |
89 | * kernel has delivered a trigger to auditd to rotate the trail, and is | |
90 | * cleared when the next rotation takes place. It is also protected by | |
91 | * audit_worker_sl. | |
92 | */ | |
93 | static int audit_file_rotate_wait; | |
94 | static struct slck audit_worker_sl; | |
95 | static struct vfs_context audit_ctx; | |
96 | static struct vnode *audit_vp; | |
97 | ||
98 | #define AUDIT_WORKER_SX_INIT() slck_init(&audit_worker_sl, \ | |
99 | "audit_worker_sl") | |
100 | #define AUDIT_WORKER_SX_XLOCK() slck_lock(&audit_worker_sl) | |
101 | #define AUDIT_WORKER_SX_XUNLOCK() slck_unlock(&audit_worker_sl) | |
102 | #define AUDIT_WORKER_SX_ASSERT() slck_assert(&audit_worker_sl, SL_OWNED) | |
103 | #define AUDIT_WORKER_SX_DESTROY() slck_destroy(&audit_worker_sl) | |
104 | ||
105 | /* | |
316670eb | 106 | * The audit_q_draining flag is set when audit is disabled and the audit |
b0d623f7 A |
107 | * worker queue is being drained. |
108 | */ | |
316670eb | 109 | static int audit_q_draining; |
b0d623f7 A |
110 | |
111 | /* | |
112 | * The special kernel audit record, audit_drain_kar, is used to mark the end of | |
113 | * the queue when draining it. | |
114 | */ | |
115 | static struct kaudit_record audit_drain_kar = { | |
116 | .k_ar = { | |
117 | .ar_event = AUE_NULL, | |
118 | }, | |
119 | .k_ar_commit = AR_DRAIN_QUEUE, | |
120 | }; | |
121 | ||
122 | /* | |
123 | * Write an audit record to a file, performed as the last stage after both | |
124 | * preselection and BSM conversion. Both space management and write failures | |
125 | * are handled in this function. | |
126 | * | |
127 | * No attempt is made to deal with possible failure to deliver a trigger to | |
128 | * the audit daemon, since the message is asynchronous anyway. | |
129 | */ | |
130 | static void | |
131 | audit_record_write(struct vnode *vp, struct vfs_context *ctx, void *data, | |
132 | size_t len) | |
133 | { | |
134 | static struct timeval last_lowspace_trigger; | |
135 | static struct timeval last_fail; | |
136 | static int cur_lowspace_trigger; | |
137 | struct vfsstatfs *mnt_stat; | |
138 | int error; | |
139 | static int cur_fail; | |
140 | uint64_t temp; | |
141 | off_t file_size; | |
142 | ||
143 | AUDIT_WORKER_SX_ASSERT(); /* audit_file_rotate_wait. */ | |
144 | ||
145 | if (vp == NULL) | |
146 | return; | |
147 | ||
148 | if (vnode_getwithref(vp)) | |
149 | return /*(ENOENT)*/; | |
150 | ||
151 | mnt_stat = &vp->v_mount->mnt_vfsstat; | |
152 | ||
153 | /* | |
154 | * First, gather statistics on the audit log file and file system so | |
155 | * that we know how we're doing on space. Consider failure of these | |
156 | * operations to indicate a future inability to write to the file. | |
157 | */ | |
158 | error = vfs_update_vfsstat(vp->v_mount, ctx, VFS_KERNEL_EVENT); | |
159 | if (error) | |
160 | goto fail; | |
161 | error = vnode_size(vp, &file_size, ctx); | |
162 | if (error) | |
163 | goto fail; | |
164 | audit_fstat.af_currsz = (u_quad_t)file_size; | |
165 | ||
166 | /* | |
167 | * We handle four different space-related limits: | |
168 | * | |
169 | * - A fixed (hard) limit on the minimum free blocks we require on | |
170 | * the file system, and results in record loss, a trigger, and | |
171 | * possible fail stop due to violating invariants. | |
172 | * | |
173 | * - An administrative (soft) limit, which when fallen below, results | |
174 | * in the kernel notifying the audit daemon of low space. | |
175 | * | |
176 | * - An audit trail size limit, which when gone above, results in the | |
177 | * kernel notifying the audit daemon that rotation is desired. | |
178 | * | |
179 | * - The total depth of the kernel audit record exceeding free space, | |
180 | * which can lead to possible fail stop (with drain), in order to | |
181 | * prevent violating invariants. Failure here doesn't halt | |
182 | * immediately, but prevents new records from being generated. | |
183 | * | |
184 | * Possibly, the last of these should be handled differently, always | |
185 | * allowing a full queue to be lost, rather than trying to prevent | |
186 | * loss. | |
187 | * | |
188 | * First, handle the hard limit, which generates a trigger and may | |
189 | * fail stop. This is handled in the same manner as ENOSPC from | |
190 | * VOP_WRITE, and results in record loss. | |
191 | */ | |
192 | if (mnt_stat->f_bfree < AUDIT_HARD_LIMIT_FREE_BLOCKS) { | |
193 | error = ENOSPC; | |
194 | goto fail_enospc; | |
195 | } | |
196 | ||
197 | /* | |
198 | * Second, handle falling below the soft limit, if defined; we send | |
199 | * the daemon a trigger and continue processing the record. Triggers | |
200 | * are limited to 1/sec. | |
201 | */ | |
202 | if (audit_qctrl.aq_minfree != 0) { | |
203 | temp = mnt_stat->f_blocks / (100 / audit_qctrl.aq_minfree); | |
6d2010ae A |
204 | if (mnt_stat->f_bfree < temp && |
205 | ppsratecheck(&last_lowspace_trigger, | |
206 | &cur_lowspace_trigger, 1)) | |
b0d623f7 A |
207 | (void)audit_send_trigger( |
208 | AUDIT_TRIGGER_LOW_SPACE); | |
b0d623f7 A |
209 | } |
210 | ||
211 | /* | |
212 | * If the current file is getting full, generate a rotation trigger | |
213 | * to the daemon. This is only approximate, which is fine as more | |
214 | * records may be generated before the daemon rotates the file. | |
215 | */ | |
216 | if ((audit_fstat.af_filesz != 0) && (audit_file_rotate_wait == 0) && | |
217 | ((u_quad_t)file_size >= audit_fstat.af_filesz)) { | |
218 | AUDIT_WORKER_SX_ASSERT(); | |
219 | ||
220 | audit_file_rotate_wait = 1; | |
221 | (void)audit_send_trigger(AUDIT_TRIGGER_ROTATE_KERNEL); | |
222 | } | |
223 | ||
224 | /* | |
225 | * If the estimated amount of audit data in the audit event queue | |
226 | * (plus records allocated but not yet queued) has reached the amount | |
227 | * of free space on the disk, then we need to go into an audit fail | |
228 | * stop state, in which we do not permit the allocation/committing of | |
229 | * any new audit records. We continue to process records but don't | |
230 | * allow any activities that might generate new records. In the | |
231 | * future, we might want to detect when space is available again and | |
232 | * allow operation to continue, but this behavior is sufficient to | |
233 | * meet fail stop requirements in CAPP. | |
234 | */ | |
235 | if (audit_fail_stop) { | |
236 | if ((unsigned long)((audit_q_len + audit_pre_q_len + 1) * | |
237 | MAX_AUDIT_RECORD_SIZE) / mnt_stat->f_bsize >= | |
238 | (unsigned long)(mnt_stat->f_bfree)) { | |
239 | if (ppsratecheck(&last_fail, &cur_fail, 1)) | |
240 | printf("audit_record_write: free space " | |
241 | "below size of audit queue, failing " | |
242 | "stop\n"); | |
243 | audit_in_failure = 1; | |
244 | } else if (audit_in_failure) { | |
245 | /* | |
246 | * Note: if we want to handle recovery, this is the | |
247 | * spot to do it: unset audit_in_failure, and issue a | |
248 | * wakeup on the cv. | |
249 | */ | |
250 | } | |
251 | } | |
252 | ||
253 | error = vn_rdwr(UIO_WRITE, vp, data, len, (off_t)0, UIO_SYSSPACE, | |
254 | IO_APPEND|IO_UNIT, vfs_context_ucred(ctx), NULL, | |
255 | vfs_context_proc(ctx)); | |
256 | if (error == ENOSPC) | |
257 | goto fail_enospc; | |
258 | else if (error) | |
259 | goto fail; | |
260 | ||
261 | /* | |
262 | * Catch completion of a queue drain here; if we're draining and the | |
263 | * queue is now empty, fail stop. That audit_fail_stop is implicitly | |
264 | * true, since audit_in_failure can only be set of audit_fail_stop is | |
265 | * set. | |
266 | * | |
267 | * Note: if we handle recovery from audit_in_failure, then we need to | |
268 | * make panic here conditional. | |
269 | */ | |
270 | if (audit_in_failure) { | |
271 | if (audit_q_len == 0 && audit_pre_q_len == 0) { | |
272 | (void)VNOP_FSYNC(vp, MNT_WAIT, ctx); | |
273 | panic("Audit store overflow; record queue drained."); | |
274 | } | |
275 | } | |
276 | ||
277 | vnode_put(vp); | |
278 | return; | |
279 | ||
280 | fail_enospc: | |
281 | /* | |
282 | * ENOSPC is considered a special case with respect to failures, as | |
283 | * this can reflect either our preemptive detection of insufficient | |
284 | * space, or ENOSPC returned by the vnode write call. | |
285 | */ | |
286 | if (audit_fail_stop) { | |
287 | (void)VNOP_FSYNC(vp, MNT_WAIT, ctx); | |
288 | panic("Audit log space exhausted and fail-stop set."); | |
289 | } | |
290 | (void)audit_send_trigger(AUDIT_TRIGGER_NO_SPACE); | |
291 | audit_suspended = 1; | |
292 | ||
293 | /* FALLTHROUGH */ | |
294 | fail: | |
295 | /* | |
296 | * We have failed to write to the file, so the current record is | |
297 | * lost, which may require an immediate system halt. | |
298 | */ | |
299 | if (audit_panic_on_write_fail) { | |
300 | (void)VNOP_FSYNC(vp, MNT_WAIT, ctx); | |
301 | panic("audit_worker: write error %d\n", error); | |
302 | } else if (ppsratecheck(&last_fail, &cur_fail, 1)) | |
303 | printf("audit_worker: write error %d\n", error); | |
304 | vnode_put(vp); | |
305 | } | |
306 | ||
307 | /* | |
308 | * Given a kernel audit record, process as required. Kernel audit records | |
309 | * are converted to one, or possibly two, BSM records, depending on whether | |
310 | * there is a user audit record present also. Kernel records need be | |
311 | * converted to BSM before they can be written out. Both types will be | |
312 | * written to disk, and audit pipes. | |
313 | */ | |
314 | static void | |
315 | audit_worker_process_record(struct kaudit_record *ar) | |
316 | { | |
317 | struct au_record *bsm; | |
318 | au_class_t class; | |
319 | au_event_t event; | |
320 | au_id_t auid; | |
321 | int error, sorf; | |
322 | int trail_locked; | |
323 | ||
324 | /* | |
325 | * We hold the audit_worker_sl lock over both writes, if there are | |
326 | * two, so that the two records won't be split across a rotation and | |
327 | * end up in two different trail files. | |
328 | */ | |
329 | if (((ar->k_ar_commit & AR_COMMIT_USER) && | |
330 | (ar->k_ar_commit & AR_PRESELECT_USER_TRAIL)) || | |
331 | (ar->k_ar_commit & AR_PRESELECT_TRAIL)) { | |
332 | AUDIT_WORKER_SX_XLOCK(); | |
333 | trail_locked = 1; | |
334 | } else | |
335 | trail_locked = 0; | |
336 | ||
337 | /* | |
338 | * First, handle the user record, if any: commit to the system trail | |
339 | * and audit pipes as selected. | |
340 | */ | |
341 | if ((ar->k_ar_commit & AR_COMMIT_USER) && | |
342 | (ar->k_ar_commit & AR_PRESELECT_USER_TRAIL)) { | |
343 | AUDIT_WORKER_SX_ASSERT(); | |
344 | audit_record_write(audit_vp, &audit_ctx, ar->k_udata, | |
345 | ar->k_ulen); | |
346 | } | |
347 | ||
348 | if ((ar->k_ar_commit & AR_COMMIT_USER) && | |
349 | (ar->k_ar_commit & AR_PRESELECT_USER_PIPE)) | |
350 | audit_pipe_submit_user(ar->k_udata, ar->k_ulen); | |
351 | ||
352 | if (!(ar->k_ar_commit & AR_COMMIT_KERNEL) || | |
353 | ((ar->k_ar_commit & AR_PRESELECT_PIPE) == 0 && | |
6d2010ae A |
354 | (ar->k_ar_commit & AR_PRESELECT_TRAIL) == 0 && |
355 | (ar->k_ar_commit & AR_PRESELECT_FILTER) == 0)) | |
b0d623f7 A |
356 | goto out; |
357 | ||
358 | auid = ar->k_ar.ar_subj_auid; | |
359 | event = ar->k_ar.ar_event; | |
360 | class = au_event_class(event); | |
361 | if (ar->k_ar.ar_errno == 0) | |
362 | sorf = AU_PRS_SUCCESS; | |
363 | else | |
364 | sorf = AU_PRS_FAILURE; | |
365 | ||
366 | error = kaudit_to_bsm(ar, &bsm); | |
367 | switch (error) { | |
368 | case BSM_NOAUDIT: | |
369 | goto out; | |
370 | ||
371 | case BSM_FAILURE: | |
372 | printf("audit_worker_process_record: BSM_FAILURE\n"); | |
373 | goto out; | |
374 | ||
375 | case BSM_SUCCESS: | |
376 | break; | |
377 | ||
378 | default: | |
379 | panic("kaudit_to_bsm returned %d", error); | |
380 | } | |
381 | ||
382 | if (ar->k_ar_commit & AR_PRESELECT_TRAIL) { | |
383 | AUDIT_WORKER_SX_ASSERT(); | |
384 | audit_record_write(audit_vp, &audit_ctx, bsm->data, bsm->len); | |
385 | } | |
386 | ||
387 | if (ar->k_ar_commit & AR_PRESELECT_PIPE) | |
388 | audit_pipe_submit(auid, event, class, sorf, | |
389 | ar->k_ar_commit & AR_PRESELECT_TRAIL, bsm->data, | |
390 | bsm->len); | |
391 | ||
6d2010ae A |
392 | if (ar->k_ar_commit & AR_PRESELECT_FILTER) { |
393 | ||
394 | /* | |
395 | * XXXss - This needs to be generalized so new filters can | |
396 | * be easily plugged in. | |
397 | */ | |
398 | audit_sdev_submit(auid, ar->k_ar.ar_subj_asid, bsm->data, | |
399 | bsm->len); | |
400 | } | |
401 | ||
b0d623f7 A |
402 | kau_free(bsm); |
403 | out: | |
404 | if (trail_locked) | |
405 | AUDIT_WORKER_SX_XUNLOCK(); | |
406 | } | |
407 | ||
408 | /* | |
409 | * The audit_worker thread is responsible for watching the event queue, | |
410 | * dequeueing records, converting them to BSM format, and committing them to | |
411 | * disk. In order to minimize lock thrashing, records are dequeued in sets | |
412 | * to a thread-local work queue. | |
413 | * | |
414 | * Note: this means that the effect bound on the size of the pending record | |
415 | * queue is 2x the length of the global queue. | |
416 | */ | |
417 | static void | |
418 | audit_worker(void) | |
419 | { | |
420 | struct kaudit_queue ar_worklist; | |
421 | struct kaudit_record *ar; | |
422 | int lowater_signal; | |
423 | ||
6d2010ae A |
424 | if (audit_ctx.vc_thread == NULL) |
425 | audit_ctx.vc_thread = current_thread(); | |
426 | ||
b0d623f7 A |
427 | TAILQ_INIT(&ar_worklist); |
428 | mtx_lock(&audit_mtx); | |
429 | while (1) { | |
430 | mtx_assert(&audit_mtx, MA_OWNED); | |
431 | ||
432 | /* | |
433 | * Wait for a record. | |
434 | */ | |
435 | while (TAILQ_EMPTY(&audit_q)) | |
6d2010ae A |
436 | cv_wait_continuation(&audit_worker_cv, &audit_mtx, |
437 | (thread_continue_t)audit_worker); | |
b0d623f7 A |
438 | |
439 | /* | |
440 | * If there are records in the global audit record queue, | |
441 | * transfer them to a thread-local queue and process them | |
442 | * one by one. If we cross the low watermark threshold, | |
443 | * signal any waiting processes that they may wake up and | |
444 | * continue generating records. | |
445 | */ | |
446 | lowater_signal = 0; | |
447 | while ((ar = TAILQ_FIRST(&audit_q))) { | |
448 | TAILQ_REMOVE(&audit_q, ar, k_q); | |
449 | audit_q_len--; | |
450 | if (audit_q_len == audit_qctrl.aq_lowater) | |
451 | lowater_signal++; | |
452 | TAILQ_INSERT_TAIL(&ar_worklist, ar, k_q); | |
453 | } | |
454 | if (lowater_signal) | |
455 | cv_broadcast(&audit_watermark_cv); | |
456 | ||
457 | mtx_unlock(&audit_mtx); | |
458 | while ((ar = TAILQ_FIRST(&ar_worklist))) { | |
459 | TAILQ_REMOVE(&ar_worklist, ar, k_q); | |
460 | if (ar->k_ar_commit & AR_DRAIN_QUEUE) { | |
316670eb | 461 | audit_q_draining = 0; |
b0d623f7 A |
462 | cv_broadcast(&audit_drain_cv); |
463 | } else { | |
464 | audit_worker_process_record(ar); | |
465 | audit_free(ar); | |
466 | } | |
467 | } | |
468 | mtx_lock(&audit_mtx); | |
469 | } | |
470 | } | |
471 | ||
472 | /* | |
473 | * audit_rotate_vnode() is called by a user or kernel thread to configure or | |
474 | * de-configure auditing on a vnode. The arguments are the replacement | |
475 | * credential (referenced) and vnode (referenced and opened) to substitute | |
476 | * for the current credential and vnode, if any. If either is set to NULL, | |
477 | * both should be NULL, and this is used to indicate that audit is being | |
478 | * disabled. Any previous cred/vnode will be closed and freed. We re-enable | |
479 | * generating rotation requests to auditd. | |
480 | */ | |
481 | void | |
482 | audit_rotate_vnode(kauth_cred_t cred, struct vnode *vp) | |
483 | { | |
484 | kauth_cred_t old_audit_cred; | |
485 | struct vnode *old_audit_vp; | |
b0d623f7 A |
486 | |
487 | KASSERT((cred != NULL && vp != NULL) || (cred == NULL && vp == NULL), | |
488 | ("audit_rotate_vnode: cred %p vp %p", cred, vp)); | |
489 | ||
b0d623f7 | 490 | |
b0d623f7 | 491 | mtx_lock(&audit_mtx); |
316670eb A |
492 | if (audit_enabled && (NULL == vp)) { |
493 | /* Auditing is currently enabled but will be disabled. */ | |
494 | ||
b0d623f7 | 495 | /* |
316670eb A |
496 | * Disable auditing now so nothing more is added while the |
497 | * audit worker thread is draining the audit record queue. | |
b0d623f7 | 498 | */ |
316670eb A |
499 | audit_enabled = 0; |
500 | ||
501 | /* | |
502 | * Drain the auditing queue by inserting a drain record at the | |
503 | * end of the queue and waiting for the audit worker thread | |
504 | * to find this record and signal that it is done before | |
505 | * we close the audit trail. | |
506 | */ | |
507 | audit_q_draining = 1; | |
b0d623f7 A |
508 | while (audit_q_len >= audit_qctrl.aq_hiwater) |
509 | cv_wait(&audit_watermark_cv, &audit_mtx); | |
510 | TAILQ_INSERT_TAIL(&audit_q, &audit_drain_kar, k_q); | |
511 | audit_q_len++; | |
512 | cv_signal(&audit_worker_cv); | |
b0d623f7 | 513 | } |
316670eb A |
514 | |
515 | /* If the audit queue is draining then wait here until it's done. */ | |
516 | while (audit_q_draining) | |
517 | cv_wait(&audit_drain_cv, &audit_mtx); | |
b0d623f7 A |
518 | mtx_unlock(&audit_mtx); |
519 | ||
316670eb A |
520 | |
521 | /* | |
522 | * Rotate the vnode/cred, and clear the rotate flag so that we will | |
523 | * send a rotate trigger if the new file fills. | |
524 | */ | |
525 | AUDIT_WORKER_SX_XLOCK(); | |
526 | old_audit_cred = audit_ctx.vc_ucred; | |
527 | old_audit_vp = audit_vp; | |
528 | audit_ctx.vc_ucred = cred; | |
529 | audit_vp = vp; | |
530 | audit_file_rotate_wait = 0; | |
531 | audit_enabled = (audit_vp != NULL); | |
532 | AUDIT_WORKER_SX_XUNLOCK(); | |
533 | ||
b0d623f7 A |
534 | /* |
535 | * If there was an old vnode/credential, close and free. | |
536 | */ | |
537 | if (old_audit_vp != NULL) { | |
538 | if (vnode_get(old_audit_vp) == 0) { | |
539 | vn_close(old_audit_vp, AUDIT_CLOSE_FLAGS, | |
540 | vfs_context_kernel()); | |
541 | vnode_put(old_audit_vp); | |
542 | } else | |
543 | printf("audit_rotate_vnode: Couldn't close " | |
544 | "audit file.\n"); | |
545 | kauth_cred_unref(&old_audit_cred); | |
546 | } | |
547 | } | |
548 | ||
549 | void | |
550 | audit_worker_init(void) | |
551 | { | |
552 | ||
553 | AUDIT_WORKER_SX_INIT(); | |
554 | kernel_thread_start((thread_continue_t)audit_worker, NULL, | |
555 | &audit_thread); | |
556 | if (audit_thread == THREAD_NULL) | |
557 | panic("audit_worker_init: Couldn't create audit_worker thread"); | |
558 | } |