]> git.saurik.com Git - apple/xnu.git/blame - bsd/vfs/vfs_fsevents.c
xnu-792.6.61.tar.gz
[apple/xnu.git] / bsd / vfs / vfs_fsevents.c
CommitLineData
91447636
A
1/*
2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
37839358
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
91447636 11 *
37839358
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
91447636
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
37839358
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
91447636
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22#include <stdarg.h>
23#include <sys/param.h>
24#include <sys/systm.h>
25#include <sys/namei.h>
26#include <sys/filedesc.h>
27#include <sys/kernel.h>
28#include <sys/file_internal.h>
29#include <sys/stat.h>
30#include <sys/vnode_internal.h>
31#include <sys/mount_internal.h>
32#include <sys/proc_internal.h>
33#include <sys/kauth.h>
34#include <sys/uio.h>
35#include <sys/malloc.h>
36#include <sys/dirent.h>
37#include <sys/attr.h>
38#include <sys/sysctl.h>
39#include <sys/ubc.h>
40#include <machine/cons.h>
41#include <miscfs/specfs/specdev.h>
42#include <miscfs/devfs/devfs.h>
43#include <sys/filio.h>
44#include <architecture/byte_order.h>
45#include <kern/locks.h>
46#include <libkern/OSAtomic.h>
47
48#include <bsm/audit_kernel.h>
49#include <bsm/audit_kevents.h>
50
51// where all our structs and defines come from
52#include <sys/fsevents.h>
53
54
55typedef struct kfs_event_arg {
56 u_int16_t type;
57 u_int16_t len;
58 union {
59 struct vnode *vp;
60 char *str;
61 void *ptr;
62 int32_t int32;
63 dev_t dev;
64 ino_t ino;
65 int32_t mode;
66 uid_t uid;
67 gid_t gid;
68 } data;
69}kfs_event_arg;
70
71#define KFS_NUM_ARGS FSE_MAX_ARGS
72typedef struct kfs_event {
73 int32_t type; // type code of this event
74 u_int32_t refcount; // number of clients referencing this
75 pid_t pid; // pid of the process that did the op
76 kfs_event_arg args[KFS_NUM_ARGS];
77} kfs_event;
78
79
80typedef struct fs_event_watcher {
81 SLIST_ENTRY(fs_event_watcher) link;
82 int8_t *event_list; // the events we're interested in
83 int32_t num_events;
84 dev_t *devices_to_watch; // only report events from these devices
85 uint32_t num_devices;
86 int32_t flags;
87 kfs_event **event_queue;
88 int32_t eventq_size; // number of event pointers in queue
89 int32_t rd, wr; // indices to the event_queue
90 int32_t blockers;
91} fs_event_watcher;
92
93// fs_event_watcher flags
94#define WATCHER_DROPPED_EVENTS 0x0001
95#define WATCHER_CLOSING 0x0002
96
97static SLIST_HEAD(watch_list, fs_event_watcher) watch_list_head = { NULL };
98
99
100#define MAX_KFS_EVENTS 2048
101
102// this array holds each pending event
103static kfs_event fs_event_buf[MAX_KFS_EVENTS];
104static int free_event_idx = 0;
105static int fs_event_init = 0;
106
107//
108// this array records whether anyone is interested in a
109// particular type of event. if no one is, we bail out
110// early from the event delivery
111//
112static int16_t fs_event_type_watchers[FSE_MAX_EVENTS];
113
114static int watcher_add_event(fs_event_watcher *watcher, kfs_event *kfse);
115
116//
117// Locks
118//
119static lck_grp_attr_t * fsevent_group_attr;
120static lck_attr_t * fsevent_lock_attr;
121static lck_grp_t * fsevent_mutex_group;
122
123static lck_grp_t * fsevent_rw_group;
124
125static lck_rw_t fsevent_big_lock; // always grab this first
126static lck_mtx_t watch_list_lock;
127static lck_mtx_t event_buf_lock;
128
129
130static void init_pathbuff(void);
131
132
133static void
134fsevents_internal_init(void)
135{
136 int i;
137
138 if (fs_event_init++ != 0) {
139 return;
140 }
141
142 for(i=0; i < FSE_MAX_EVENTS; i++) {
143 fs_event_type_watchers[i] = 0;
144 }
145
146 for(i=0; i < MAX_KFS_EVENTS; i++) {
147 fs_event_buf[i].type = FSE_INVALID;
148 fs_event_buf[i].refcount = 0;
149 }
150
151 SLIST_INIT(&watch_list_head);
152
153 fsevent_lock_attr = lck_attr_alloc_init();
154 fsevent_group_attr = lck_grp_attr_alloc_init();
155 fsevent_mutex_group = lck_grp_alloc_init("fsevent-mutex", fsevent_group_attr);
156 fsevent_rw_group = lck_grp_alloc_init("fsevent-rw", fsevent_group_attr);
157
158 lck_mtx_init(&watch_list_lock, fsevent_mutex_group, fsevent_lock_attr);
159 lck_mtx_init(&event_buf_lock, fsevent_mutex_group, fsevent_lock_attr);
160
161 lck_rw_init(&fsevent_big_lock, fsevent_rw_group, fsevent_lock_attr);
162
163 init_pathbuff();
164}
165
166static void
167lock_watch_list(void)
168{
169 lck_mtx_lock(&watch_list_lock);
170}
171
172static void
173unlock_watch_list(void)
174{
175 lck_mtx_unlock(&watch_list_lock);
176}
177
178static void
179lock_fs_event_buf(void)
180{
181 lck_mtx_lock(&event_buf_lock);
182}
183
184static void
185unlock_fs_event_buf(void)
186{
187 lck_mtx_unlock(&event_buf_lock);
188}
189
190// forward prototype
191static void do_free_event(kfs_event *kfse);
192
193static int
194watcher_cares_about_dev(fs_event_watcher *watcher, dev_t dev)
195{
196 unsigned int i;
197
198 // if there is not list of devices to watch, then always
199 // say we're interested so we'll report all events from
200 // all devices
201 if (watcher->devices_to_watch == NULL) {
202 return 1;
203 }
204
205 for(i=0; i < watcher->num_devices; i++) {
206 if (dev == watcher->devices_to_watch[i]) {
207 // found a match! that means we want events
208 // from this device.
209 return 1;
210 }
211 }
212
213 // if we're here it's not in the devices_to_watch[]
214 // list so that means we do not care about it
215 return 0;
216}
217
218
219int
220need_fsevent(int type, vnode_t vp)
221{
222 fs_event_watcher *watcher;
223 dev_t dev;
224
225 if (fs_event_type_watchers[type] == 0)
226 return (0);
227 dev = (dev_t)(vp->v_mount->mnt_vfsstat.f_fsid.val[0]);
228
229 lock_watch_list();
230
231 SLIST_FOREACH(watcher, &watch_list_head, link) {
232 if (watcher->event_list[type] == FSE_REPORT && watcher_cares_about_dev(watcher, dev)) {
233 unlock_watch_list();
234 return (1);
235 }
236 }
237 unlock_watch_list();
238
239 return (0);
240}
241
242
243int
244add_fsevent(int type, vfs_context_t ctx, ...)
245{
246 struct proc *p = vfs_context_proc(ctx);
247 int i, arg_idx, num_deliveries=0;
248 kfs_event_arg *kea;
249 kfs_event *kfse;
250 fs_event_watcher *watcher;
251 va_list ap;
252 int error = 0;
253 dev_t dev = 0;
254
255 va_start(ap, ctx);
256
257 // if no one cares about this type of event, bail out
258 if (fs_event_type_watchers[type] == 0) {
259 va_end(ap);
260 return 0;
261 }
262
263 lck_rw_lock_shared(&fsevent_big_lock);
264
265 // find a free event and snag it for our use
266 // NOTE: do not do anything that would block until
267 // the lock is dropped.
268 lock_fs_event_buf();
269
270 for(i=0; i < MAX_KFS_EVENTS; i++) {
271 if (fs_event_buf[(free_event_idx + i) % MAX_KFS_EVENTS].type == FSE_INVALID) {
272 break;
273 }
274 }
275
276 if (i >= MAX_KFS_EVENTS) {
277 // yikes! no free slots
278 unlock_fs_event_buf();
279 va_end(ap);
280
281 lock_watch_list();
282 SLIST_FOREACH(watcher, &watch_list_head, link) {
283 watcher->flags |= WATCHER_DROPPED_EVENTS;
284 wakeup((caddr_t)watcher);
285 }
286 unlock_watch_list();
287 lck_rw_done(&fsevent_big_lock);
288
289 printf("fs_events: add_event: event queue is full! dropping events.\n");
290 return ENOSPC;
291 }
292
293 kfse = &fs_event_buf[(free_event_idx + i) % MAX_KFS_EVENTS];
294
295 free_event_idx++;
296
297 kfse->type = type;
298 kfse->refcount = 0;
299 kfse->pid = p->p_pid;
300
301 unlock_fs_event_buf(); // at this point it's safe to unlock
302
303 //
304 // now process the arguments passed in and copy them into
305 // the kfse
306 //
307 arg_idx = 0;
308 while(arg_idx < KFS_NUM_ARGS) {
309 kea = &kfse->args[arg_idx++];
310 kea->type = va_arg(ap, int32_t);
311
312 if (kea->type == FSE_ARG_DONE) {
313 break;
314 }
315
316 switch(kea->type) {
317 case FSE_ARG_VNODE: {
318 // this expands out into multiple arguments to the client
319 struct vnode *vp;
320 struct vnode_attr va;
321
322 kea->data.vp = vp = va_arg(ap, struct vnode *);
323 if (kea->data.vp == NULL) {
324 panic("add_fsevent: you can't pass me a NULL vnode ptr (type %d)!\n",
325 kfse->type);
326 }
327
328 if (vnode_ref_ext(kea->data.vp, O_EVTONLY) != 0) {
329 kea->type = FSE_ARG_DONE;
330
331 error = EINVAL;
332 goto clean_up;
333 }
334 VATTR_INIT(&va);
335 VATTR_WANTED(&va, va_fsid);
336 VATTR_WANTED(&va, va_fileid);
337 VATTR_WANTED(&va, va_mode);
338 VATTR_WANTED(&va, va_uid);
339 VATTR_WANTED(&va, va_gid);
340 if (vnode_getattr(kea->data.vp, &va, ctx) != 0) {
341 vnode_rele_ext(kea->data.vp, O_EVTONLY, 0);
342 kea->type = FSE_ARG_DONE;
343
344 error = EINVAL;
345 goto clean_up;
346 }
347
348 kea++;
349 kea->type = FSE_ARG_DEV;
350 kea->data.dev = dev = (dev_t)va.va_fsid;
351
352 kea++;
353 kea->type = FSE_ARG_INO;
354 kea->data.ino = (ino_t)va.va_fileid;
355
356 kea++;
357 kea->type = FSE_ARG_MODE;
358 kea->data.mode = (int32_t)vnode_vttoif(vnode_vtype(vp)) | va.va_mode;
359
360 kea++;
361 kea->type = FSE_ARG_UID;
362 kea->data.uid = va.va_uid;
363
364 kea++;
365 kea->type = FSE_ARG_GID;
366 kea->data.gid = va.va_gid;
367 arg_idx += 5;
368 break;
369 }
370
371 case FSE_ARG_FINFO: {
372 fse_info *fse;
373
374 fse = va_arg(ap, fse_info *);
375
376 kea->type = FSE_ARG_DEV;
377 kea->data.dev = dev = (dev_t)fse->dev;
378
379 kea++;
380 kea->type = FSE_ARG_INO;
381 kea->data.ino = (ino_t)fse->ino;
382
383 kea++;
384 kea->type = FSE_ARG_MODE;
385 kea->data.mode = (int32_t)fse->mode;
386
387 kea++;
388 kea->type = FSE_ARG_UID;
389 kea->data.uid = (uid_t)fse->uid;
390
391 kea++;
392 kea->type = FSE_ARG_GID;
393 kea->data.gid = (uid_t)fse->gid;
394 arg_idx += 4;
395 break;
396 }
397
398 case FSE_ARG_STRING:
399 kea->len = (int16_t)(va_arg(ap, int32_t) & 0xffff);
400 kea->data.str = vfs_addname(va_arg(ap, char *), kea->len, 0, 0);
401 break;
402
403 case FSE_ARG_INT32:
404 kea->data.int32 = va_arg(ap, int32_t);
405 break;
406
407 case FSE_ARG_INT64:
408 printf("fs_events: 64-bit args not implemented.\n");
409// kea->data.int64 = va_arg(ap, int64_t);
410 break;
411
412 case FSE_ARG_RAW:
413 kea->len = (int16_t)(va_arg(ap, int32_t) & 0xffff);
414 MALLOC(kea->data.ptr, void *, kea->len, M_TEMP, M_WAITOK);
415 memcpy(kea->data.ptr, va_arg(ap, void *), kea->len);
416 break;
417
418 case FSE_ARG_DEV:
419 kea->data.dev = dev = va_arg(ap, dev_t);
420 break;
421
422 case FSE_ARG_MODE:
423 kea->data.mode = va_arg(ap, int32_t);
424 break;
425
426 case FSE_ARG_INO:
427 kea->data.ino = va_arg(ap, ino_t);
428 break;
429
430 case FSE_ARG_UID:
431 kea->data.uid = va_arg(ap, uid_t);
432 break;
433
434 case FSE_ARG_GID:
435 kea->data.gid = va_arg(ap, gid_t);
436 break;
437
438 default:
439 printf("add_fsevent: unknown type %d\n", kea->type);
440 // just skip one 32-bit word and hope we sync up...
441 (void)va_arg(ap, int32_t);
442 }
443 }
444
445 va_end(ap);
446
447 //
448 // now we have to go and let everyone know that
449 // is interested in this type of event...
450 //
451 lock_watch_list();
452
453 SLIST_FOREACH(watcher, &watch_list_head, link) {
454 if (watcher->event_list[type] == FSE_REPORT && watcher_cares_about_dev(watcher, dev)) {
455 if (watcher_add_event(watcher, kfse) == 0) {
456 num_deliveries++;
457 }
458 }
459 }
460
461 unlock_watch_list();
462
463 clean_up:
464 // just in case no one was interested after all...
465 if (num_deliveries == 0) {
466 do_free_event(kfse);
467 free_event_idx = (int)(kfse - &fs_event_buf[0]);
468 }
469
470 lck_rw_done(&fsevent_big_lock);
471 return error;
472}
473
474static void
475do_free_event(kfs_event *kfse)
476{
477 int i;
478 kfs_event_arg *kea, all_args[KFS_NUM_ARGS];
479
480 lock_fs_event_buf();
481
482 // mark this fsevent as invalid
483 kfse->type = FSE_INVALID;
484
485 // make a copy of this so we can free things without
486 // holding the fs_event_buf lock
487 //
488 memcpy(&all_args[0], &kfse->args[0], sizeof(all_args));
489
490 // and just to be anal, set this so that there are no args
491 kfse->args[0].type = FSE_ARG_DONE;
492
493 free_event_idx = (kfse - fs_event_buf);
494
495 unlock_fs_event_buf();
496
497 for(i=0; i < KFS_NUM_ARGS; i++) {
498 kea = &all_args[i];
499 if (kea->type == FSE_ARG_DONE) {
500 break;
501 }
502
503 switch(kea->type) {
504 case FSE_ARG_VNODE:
505 vnode_rele_ext(kea->data.vp, O_EVTONLY, 0);
506 break;
507 case FSE_ARG_STRING:
508 vfs_removename(kea->data.str);
509 break;
510 case FSE_ARG_RAW:
511 FREE(kea->data.ptr, M_TEMP);
512 break;
513 }
514 }
515}
516
517
518static int
519add_watcher(int8_t *event_list, int32_t num_events, int32_t eventq_size, fs_event_watcher **watcher_out)
520{
521 int i;
522 fs_event_watcher *watcher;
523
524 if (eventq_size < 0 || eventq_size > MAX_KFS_EVENTS) {
525 eventq_size = MAX_KFS_EVENTS;
526 }
527
528 // Note: the event_queue follows the fs_event_watcher struct
529 // in memory so we only have to do one allocation
530 MALLOC(watcher,
531 fs_event_watcher *,
532 sizeof(fs_event_watcher) + eventq_size * sizeof(kfs_event *),
533 M_TEMP, M_WAITOK);
534
535 watcher->event_list = event_list;
536 watcher->num_events = num_events;
537 watcher->devices_to_watch = NULL;
538 watcher->num_devices = 0;
539 watcher->flags = 0;
540 watcher->event_queue = (kfs_event **)&watcher[1];
541 watcher->eventq_size = eventq_size;
542 watcher->rd = 0;
543 watcher->wr = 0;
544 watcher->blockers = 0;
545
546 lock_watch_list();
547
548 // now update the global list of who's interested in
549 // events of a particular type...
550 for(i=0; i < num_events; i++) {
551 if (event_list[i] != FSE_IGNORE && i < FSE_MAX_EVENTS) {
552 fs_event_type_watchers[i]++;
553 }
554 }
555
556 SLIST_INSERT_HEAD(&watch_list_head, watcher, link);
557
558 unlock_watch_list();
559
560 *watcher_out = watcher;
561
562 return 0;
563}
564
565static void
566remove_watcher(fs_event_watcher *target)
567{
568 int i;
569 fs_event_watcher *watcher;
570 kfs_event *kfse;
571
572 lck_rw_lock_shared(&fsevent_big_lock);
573
574 lock_watch_list();
575
576 SLIST_FOREACH(watcher, &watch_list_head, link) {
577 if (watcher == target) {
578 SLIST_REMOVE(&watch_list_head, watcher, fs_event_watcher, link);
579
580 for(i=0; i < watcher->num_events; i++) {
581 if (watcher->event_list[i] != FSE_IGNORE && i < FSE_MAX_EVENTS) {
582 fs_event_type_watchers[i]--;
583 }
584 }
585
586 unlock_watch_list();
587
588 // drain the event_queue
589 for(i=watcher->rd; i != watcher->wr; i=(i+1) % watcher->eventq_size) {
590 kfse = watcher->event_queue[i];
591
592 if (OSAddAtomic(-1, (SInt32 *)&kfse->refcount) == 1) {
593 do_free_event(kfse);
594 }
595 }
596
597 if (watcher->event_list) {
598 FREE(watcher->event_list, M_TEMP);
599 watcher->event_list = NULL;
600 }
601 if (watcher->devices_to_watch) {
602 FREE(watcher->devices_to_watch, M_TEMP);
603 watcher->devices_to_watch = NULL;
604 }
605 FREE(watcher, M_TEMP);
606
607 lck_rw_done(&fsevent_big_lock);
608 return;
609 }
610 }
611
612 unlock_watch_list();
613 lck_rw_done(&fsevent_big_lock);
614}
615
616
617static int
618watcher_add_event(fs_event_watcher *watcher, kfs_event *kfse)
619{
620 if (((watcher->wr + 1) % watcher->eventq_size) == watcher->rd) {
621 watcher->flags |= WATCHER_DROPPED_EVENTS;
622 wakeup((caddr_t)watcher);
623 return ENOSPC;
624 }
625
626 watcher->event_queue[watcher->wr] = kfse;
627 OSAddAtomic(1, (SInt32 *)&kfse->refcount);
628 watcher->wr = (watcher->wr + 1) % watcher->eventq_size;
629
630 // wake up the watcher if he's waiting!
631 wakeup((caddr_t)watcher);
632
633 return 0;
634}
635
636
637static int
638fmod_watch(fs_event_watcher *watcher, struct uio *uio)
639{
640 int i, error=0, last_full_event_resid;
641 kfs_event *kfse;
642 kfs_event_arg *kea;
643 uint16_t tmp16;
644
645 // LP64todo - fix this
646 last_full_event_resid = uio_resid(uio);
647
648 // need at least 2048 bytes of space (maxpathlen + 1 event buf)
649 if (uio_resid(uio) < 2048 || watcher == NULL) {
650 return EINVAL;
651 }
652
653
654 if (watcher->rd == watcher->wr) {
655 if (watcher->flags & WATCHER_CLOSING) {
656 return 0;
657 }
658 OSAddAtomic(1, (SInt32 *)&watcher->blockers);
659
660 // there's nothing to do, go to sleep
661 error = tsleep((caddr_t)watcher, PUSER|PCATCH, "fsevents_empty", 0);
662
663 OSAddAtomic(-1, (SInt32 *)&watcher->blockers);
664
665 if (error != 0 || (watcher->flags & WATCHER_CLOSING)) {
666 return error;
667 }
668 }
669
670 // if we dropped events, return that as an event first
671 if (watcher->flags & WATCHER_DROPPED_EVENTS) {
672 int32_t val = FSE_EVENTS_DROPPED;
673
674 error = uiomove((caddr_t)&val, sizeof(int32_t), uio);
675 if (error == 0) {
676 val = 0; // a fake pid
677 error = uiomove((caddr_t)&val, sizeof(int32_t), uio);
678
679 tmp16 = FSE_ARG_DONE; // makes it a consistent msg
680 error = uiomove((caddr_t)&tmp16, sizeof(int16_t), uio);
681 }
682
683 if (error) {
684 return error;
685 }
686
687 watcher->flags &= ~WATCHER_DROPPED_EVENTS;
688 }
689
690// check if the next chunk of data will fit in the user's
691// buffer. if not, just goto get_out which will return
692// the number of bytes worth of events that we did read.
693// this leaves the event that didn't fit in the queue.
694//
695 // LP64todo - fix this
696#define CHECK_UPTR(size) if (size > (unsigned)uio_resid(uio)) { \
697 uio_setresid(uio, last_full_event_resid); \
698 goto get_out; \
699 }
700
701 for (; uio_resid(uio) > 0 && watcher->rd != watcher->wr; ) {
702 kfse = watcher->event_queue[watcher->rd];
703
704 // copy out the type of the event
705 CHECK_UPTR(sizeof(int32_t));
706 if ((error = uiomove((caddr_t)&kfse->type, sizeof(int32_t), uio)) != 0) {
707 goto get_out;
708 }
709
710 // now copy out the pid of the person that changed the file
711 CHECK_UPTR(sizeof(pid_t));
712 if ((error = uiomove((caddr_t)&kfse->pid, sizeof(pid_t), uio)) != 0) {
713 goto get_out;
714 }
715
716 error = 0;
717 for(i=0; i < KFS_NUM_ARGS && error == 0; i++) {
718 char *pathbuff;
719 int pathbuff_len;
720
721 kea = &kfse->args[i];
722
723 tmp16 = (uint16_t)kea->type;
724 CHECK_UPTR(sizeof(uint16_t));
725 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
726 if (error || kea->type == FSE_ARG_DONE) {
727 break;
728 }
729
730 switch(kea->type) {
731 case FSE_ARG_VNODE:
732 pathbuff = get_pathbuff();
733 pathbuff_len = MAXPATHLEN;
734 if (kea->data.vp == NULL) {
735 printf("fmod_watch: whoa... vp == NULL (%d)!\n", kfse->type);
736 i--;
737 release_pathbuff(pathbuff);
738 continue;
739 }
740
741 if (vn_getpath(kea->data.vp, pathbuff, &pathbuff_len) != 0 || pathbuff[0] == '\0') {
742// printf("fmod_watch: vn_getpath failed! vp 0x%x vname 0x%x (%s) vparent 0x%x\n",
743// kea->data.vp,
744// VNAME(kea->data.vp),
745// VNAME(kea->data.vp) ? VNAME(kea->data.vp) : "<null>",
746// VPARENT(kea->data.vp));
747 }
748 CHECK_UPTR(sizeof(uint16_t));
749 tmp16 = (uint16_t)pathbuff_len;
750 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
751
752 CHECK_UPTR((unsigned)pathbuff_len);
753 error = uiomove((caddr_t)pathbuff, pathbuff_len, uio);
754 release_pathbuff(pathbuff);
755 break;
756
757
758 case FSE_ARG_STRING:
759 tmp16 = (int32_t)kea->len;
760 CHECK_UPTR(sizeof(uint16_t));
761 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
762
763 CHECK_UPTR(kea->len);
764 error = uiomove((caddr_t)kea->data.str, kea->len, uio);
765 break;
766
767 case FSE_ARG_INT32:
768 CHECK_UPTR(sizeof(uint16_t) + sizeof(int32_t));
769 tmp16 = sizeof(int32_t);
770 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
771 error = uiomove((caddr_t)&kea->data.int32, sizeof(int32_t), uio);
772 break;
773
774 case FSE_ARG_INT64:
775 printf("fs_events: 64-bit args not implemented on copyout.\n");
776// CHECK_UPTR(sizeof(uint16_t) + sizeof(int64_t));
777// tmp16 = sizeof(int64_t);
778// error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
779// error = uiomove((caddr_t)&kea->data.int64, sizeof(int64_t), uio);
780 break;
781
782 case FSE_ARG_RAW:
783 tmp16 = (uint16_t)kea->len;
784 CHECK_UPTR(sizeof(uint16_t));
785 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
786
787 CHECK_UPTR(kea->len);
788 error = uiomove((caddr_t)kea->data.ptr, kea->len, uio);
789 break;
790
791 case FSE_ARG_DEV:
792 CHECK_UPTR(sizeof(uint16_t) + sizeof(dev_t));
793 tmp16 = sizeof(dev_t);
794 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
795 error = uiomove((caddr_t)&kea->data.dev, sizeof(dev_t), uio);
796 break;
797
798 case FSE_ARG_INO:
799 CHECK_UPTR(sizeof(uint16_t) + sizeof(ino_t));
800 tmp16 = sizeof(ino_t);
801 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
802 error = uiomove((caddr_t)&kea->data.ino, sizeof(ino_t), uio);
803 break;
804
805 case FSE_ARG_MODE:
806 // XXXdbg - NOTE: we use 32-bits for the mode, not
807 // 16-bits like a real mode_t
808 CHECK_UPTR(sizeof(uint16_t) + sizeof(int32_t));
809 tmp16 = sizeof(int32_t);
810 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
811 error = uiomove((caddr_t)&kea->data.mode, sizeof(int32_t), uio);
812 break;
813
814 case FSE_ARG_UID:
815 CHECK_UPTR(sizeof(uint16_t) + sizeof(uid_t));
816 tmp16 = sizeof(uid_t);
817 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
818 error = uiomove((caddr_t)&kea->data.uid, sizeof(uid_t), uio);
819 break;
820
821 case FSE_ARG_GID:
822 CHECK_UPTR(sizeof(uint16_t) + sizeof(gid_t));
823 tmp16 = sizeof(gid_t);
824 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
825 error = uiomove((caddr_t)&kea->data.gid, sizeof(gid_t), uio);
826 break;
827
828 default:
829 printf("fmod_watch: unknown arg type %d.\n", kea->type);
830 break;
831 }
832 }
833
834 // make sure that we always end with a FSE_ARG_DONE
835 if (i >= KFS_NUM_ARGS) {
836 tmp16 = FSE_ARG_DONE;
837 CHECK_UPTR(sizeof(uint16_t));
838 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
839 }
840
841
842 // LP64todo - fix this
843 last_full_event_resid = uio_resid(uio);
844
845 watcher->rd = (watcher->rd + 1) % watcher->eventq_size;
846
847 if (OSAddAtomic(-1, (SInt32 *)&kfse->refcount) == 1) {
848 do_free_event(kfse);
849 }
850 }
851
852 get_out:
853 return error;
854}
855
856
857// release any references we might have on vnodes which are
858// the mount point passed to us (so that it can be cleanly
859// unmounted).
860//
861// since we don't want to lose the events we'll convert the
862// vnode refs to the full path, inode #, and uid.
863//
864void
865fsevent_unmount(struct mount *mp)
866{
867 int i, j;
868 kfs_event *kfse;
869 kfs_event_arg *kea;
870
871 lck_rw_lock_exclusive(&fsevent_big_lock);
872 lock_fs_event_buf();
873
874 for(i=0; i < MAX_KFS_EVENTS; i++) {
875 if (fs_event_buf[i].type == FSE_INVALID) {
876 continue;
877 }
878
879 kfse = &fs_event_buf[i];
880 for(j=0; j < KFS_NUM_ARGS; j++) {
881 kea = &kfse->args[j];
882 if (kea->type == FSE_ARG_DONE) {
883 break;
884 }
885
886 if (kea->type == FSE_ARG_VNODE && kea->data.vp->v_mount == mp) {
887 struct vnode *vp;
888 char *pathbuff;
889 int pathbuff_len;
890
891 vp = kea->data.vp;
892 pathbuff = get_pathbuff();
893 pathbuff_len = MAXPATHLEN;
894
895 if (vn_getpath(vp, pathbuff, &pathbuff_len) != 0 || pathbuff[0] == '\0') {
896 char *vname;
897
898 vname = vnode_getname(vp);
899
900 printf("fsevent_unmount: vn_getpath failed! vp 0x%x vname 0x%x (%s) vparent 0x%x\n",
901 vp, vname, vname ? vname : "<null>", vp->v_parent);
902
903 if (vname)
904 vnode_putname(vname);
905 }
906
907 // switch the type of the string
908 kea->type = FSE_ARG_STRING;
909 kea->data.str = vfs_addname(pathbuff, pathbuff_len, 0, 0);
910 kea->len = pathbuff_len;
911 release_pathbuff(pathbuff);
912
913 // and finally let go of the reference on the vnode
914 vnode_rele_ext(vp, O_EVTONLY, 0);
915 }
916 }
917 }
918
919 unlock_fs_event_buf();
920 lck_rw_done(&fsevent_big_lock);
921}
922
923
924//
925// /dev/fsevents device code
926//
927static int fsevents_installed = 0;
928static struct lock__bsd__ fsevents_lck;
929
930typedef struct fsevent_handle {
931 fs_event_watcher *watcher;
932 struct selinfo si;
933} fsevent_handle;
934
935
936static int
937fseventsf_read(struct fileproc *fp, struct uio *uio,
938 __unused kauth_cred_t *cred, __unused int flags,
939 __unused struct proc *p)
940{
941 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
942 int error;
943
944 error = fmod_watch(fseh->watcher, uio);
945
946 return error;
947}
948
949static int
950fseventsf_write(__unused struct fileproc *fp, __unused struct uio *uio,
951 __unused kauth_cred_t *cred, __unused int flags,
952 __unused struct proc *p)
953{
954 return EIO;
955}
956
957
958static int
959fseventsf_ioctl(struct fileproc *fp, u_long cmd, caddr_t data, struct proc *p)
960{
961 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
962 int ret = 0;
963 pid_t pid = 0;
964 fsevent_dev_filter_args *devfilt_args=(fsevent_dev_filter_args *)data;
965
966 switch (cmd) {
967 case FIONBIO:
968 case FIOASYNC:
969 return 0;
970
971 case FSEVENTS_DEVICE_FILTER: {
972 int new_num_devices;
973 dev_t *devices_to_watch, *tmp=NULL;
974
975 if (devfilt_args->num_devices > 256) {
976 ret = EINVAL;
977 break;
978 }
979
980 new_num_devices = devfilt_args->num_devices;
981 if (new_num_devices == 0) {
982 tmp = fseh->watcher->devices_to_watch;
983
984 lock_watch_list();
985 fseh->watcher->devices_to_watch = NULL;
986 fseh->watcher->num_devices = new_num_devices;
987 unlock_watch_list();
988
989 if (tmp) {
990 FREE(tmp, M_TEMP);
991 }
992 break;
993 }
994
995 MALLOC(devices_to_watch, dev_t *,
996 new_num_devices * sizeof(dev_t),
997 M_TEMP, M_WAITOK);
998 if (devices_to_watch == NULL) {
999 ret = ENOMEM;
1000 break;
1001 }
1002
1003 ret = copyin(CAST_USER_ADDR_T(devfilt_args->devices),
1004 (void *)devices_to_watch,
1005 new_num_devices * sizeof(dev_t));
1006 if (ret) {
1007 FREE(devices_to_watch, M_TEMP);
1008 break;
1009 }
1010
1011 lock_watch_list();
1012 fseh->watcher->num_devices = new_num_devices;
1013 tmp = fseh->watcher->devices_to_watch;
1014 fseh->watcher->devices_to_watch = devices_to_watch;
1015 unlock_watch_list();
1016
1017 if (tmp) {
1018 FREE(tmp, M_TEMP);
1019 }
1020
1021 break;
1022 }
1023
1024 default:
1025 ret = EINVAL;
1026 break;
1027 }
1028
1029 return (ret);
1030}
1031
1032
1033static int
1034fseventsf_select(struct fileproc *fp, int which, void *wql, struct proc *p)
1035{
1036 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
1037 int ready = 0;
1038
1039 if ((which != FREAD) || (fseh->watcher->flags & WATCHER_CLOSING)) {
1040 return 0;
1041 }
1042
1043
1044 // if there's nothing in the queue, we're not ready
1045 if (fseh->watcher->rd == fseh->watcher->wr) {
1046 ready = 0;
1047 } else {
1048 ready = 1;
1049 }
1050
1051 if (!ready) {
1052 selrecord(p, &fseh->si, wql);
1053 }
1054
1055 return ready;
1056}
1057
1058
1059static int
1060fseventsf_stat(struct fileproc *fp, struct stat *sb, struct proc *p)
1061{
1062 return ENOTSUP;
1063}
1064
1065
1066static int
1067fseventsf_close(struct fileglob *fg, struct proc *p)
1068{
1069 fsevent_handle *fseh = (struct fsevent_handle *)fg->fg_data;
1070
1071 remove_watcher(fseh->watcher);
1072
1073 fg->fg_data = NULL;
1074 fseh->watcher = NULL;
1075 FREE(fseh, M_TEMP);
1076
1077 return 0;
1078}
1079
1080int
1081fseventsf_kqfilter(struct fileproc *fp, struct knote *kn, struct proc *p)
1082{
1083 // XXXdbg
1084 return 0;
1085}
1086
1087
1088static int
1089fseventsf_drain(struct fileproc *fp, struct proc *p)
1090{
1091 int counter = 0;
1092 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
1093
1094 fseh->watcher->flags |= WATCHER_CLOSING;
1095
1096 // if there are people still waiting, sleep for 10ms to
1097 // let them clean up and get out of there. however we
1098 // also don't want to get stuck forever so if they don't
1099 // exit after 5 seconds we're tearing things down anyway.
1100 while(fseh->watcher->blockers && counter++ < 500) {
1101 // issue wakeup in case anyone is blocked waiting for an event
1102 // do this each time we wakeup in case the blocker missed
1103 // the wakeup due to the unprotected test of WATCHER_CLOSING
1104 // and decision to tsleep in fmod_watch... this bit of
1105 // latency is a decent tradeoff against not having to
1106 // take and drop a lock in fmod_watch
1107 wakeup((caddr_t)fseh->watcher);
1108
1109 tsleep((caddr_t)fseh->watcher, PRIBIO, "watcher-close", 1);
1110 }
1111
1112 return 0;
1113}
1114
1115
1116static int
1117fseventsopen(dev_t dev, int flag, int mode, struct proc *p)
1118{
1119 if (!is_suser()) {
1120 return EPERM;
1121 }
1122
1123 return 0;
1124}
1125
1126static int
1127fseventsclose(dev_t dev, int flag, int mode, struct proc *p)
1128{
1129 return 0;
1130}
1131
1132static int
1133fseventsread(dev_t dev, struct uio *uio, int ioflag)
1134{
1135 return EIO;
1136}
1137
1138static int
1139fseventswrite(dev_t dev, struct uio *uio, int ioflag)
1140{
1141 return EIO;
1142}
1143
1144
1145static struct fileops fsevents_fops = {
1146 fseventsf_read,
1147 fseventsf_write,
1148 fseventsf_ioctl,
1149 fseventsf_select,
1150 fseventsf_close,
1151 fseventsf_kqfilter,
1152 fseventsf_drain
1153};
1154
1155
1156
1157static int
1158fseventsioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
1159{
1160 struct fileproc *f;
1161 int fd, error;
1162 fsevent_handle *fseh = NULL;
1163 fsevent_clone_args *fse_clone_args=(fsevent_clone_args *)data;
1164 int8_t *event_list;
1165
1166 switch (cmd) {
1167 case FSEVENTS_CLONE:
1168 if (fse_clone_args->num_events < 0 || fse_clone_args->num_events > 4096) {
1169 return EINVAL;
1170 }
1171
1172 MALLOC(fseh, fsevent_handle *, sizeof(fsevent_handle),
1173 M_TEMP, M_WAITOK);
1174 memset(fseh, 0, sizeof(fsevent_handle));
1175
1176 MALLOC(event_list, int8_t *,
1177 fse_clone_args->num_events * sizeof(int8_t),
1178 M_TEMP, M_WAITOK);
1179
1180 error = copyin(CAST_USER_ADDR_T(fse_clone_args->event_list),
1181 (void *)event_list,
1182 fse_clone_args->num_events * sizeof(int8_t));
1183 if (error) {
1184 FREE(event_list, M_TEMP);
1185 FREE(fseh, M_TEMP);
1186 return error;
1187 }
1188
1189 error = add_watcher(event_list,
1190 fse_clone_args->num_events,
1191 fse_clone_args->event_queue_depth,
1192 &fseh->watcher);
1193 if (error) {
1194 FREE(event_list, M_TEMP);
1195 FREE(fseh, M_TEMP);
1196 return error;
1197 }
1198
1199 error = falloc(p, &f, &fd);
1200 if (error) {
1201 FREE(event_list, M_TEMP);
1202 FREE(fseh, M_TEMP);
1203 return (error);
1204 }
1205 proc_fdlock(p);
1206 f->f_fglob->fg_flag = FREAD | FWRITE;
1207 f->f_fglob->fg_type = DTYPE_FSEVENTS;
1208 f->f_fglob->fg_ops = &fsevents_fops;
1209 f->f_fglob->fg_data = (caddr_t) fseh;
1210 proc_fdunlock(p);
1211 copyout((void *)&fd, CAST_USER_ADDR_T(fse_clone_args->fd), sizeof(int32_t));
1212 proc_fdlock(p);
1213 *fdflags(p, fd) &= ~UF_RESERVED;
1214 fp_drop(p, fd, f, 1);
1215 proc_fdunlock(p);
1216 break;
1217
1218 default:
1219 error = EINVAL;
1220 break;
1221 }
1222
1223 return error;
1224}
1225
1226static int
1227fseventsselect(dev_t dev, int rw, struct proc *p)
1228{
1229 return 0;
1230}
1231
1232static void
1233fsevents_wakeup(fsevent_handle *fseh)
1234{
1235 wakeup((caddr_t)fseh);
1236 selwakeup(&fseh->si);
1237}
1238
1239
1240/*
1241 * A struct describing which functions will get invoked for certain
1242 * actions.
1243 */
1244static struct cdevsw fsevents_cdevsw =
1245{
1246 fseventsopen, /* open */
1247 fseventsclose, /* close */
1248 fseventsread, /* read */
1249 fseventswrite, /* write */
1250 fseventsioctl, /* ioctl */
1251 nulldev, /* stop */
1252 nulldev, /* reset */
1253 NULL, /* tty's */
1254 eno_select, /* select */
1255 eno_mmap, /* mmap */
1256 eno_strat, /* strategy */
1257 eno_getc, /* getc */
1258 eno_putc, /* putc */
1259 0 /* type */
1260};
1261
1262
1263/*
1264 * Called to initialize our device,
1265 * and to register ourselves with devfs
1266 */
1267
1268void
1269fsevents_init(void)
1270{
1271 int ret;
1272
1273 if (fsevents_installed) {
1274 return;
1275 }
1276
1277 fsevents_installed = 1;
1278
1279 lockinit(&fsevents_lck, PLOCK, "fsevents", 0, 0);
1280
1281 ret = cdevsw_add(-1, &fsevents_cdevsw);
1282 if (ret < 0) {
1283 fsevents_installed = 0;
1284 return;
1285 }
1286
1287 devfs_make_node(makedev (ret, 0), DEVFS_CHAR,
1288 UID_ROOT, GID_WHEEL, 0644, "fsevents", 0);
1289
1290 fsevents_internal_init();
1291}
1292
1293
1294
1295//
1296// XXXdbg - temporary path buffer handling
1297//
1298#define NUM_PATH_BUFFS 16
1299static char path_buff[NUM_PATH_BUFFS][MAXPATHLEN];
1300static char path_buff_inuse[NUM_PATH_BUFFS];
1301
1302static lck_grp_attr_t * pathbuff_group_attr;
1303static lck_attr_t * pathbuff_lock_attr;
1304static lck_grp_t * pathbuff_mutex_group;
1305static lck_mtx_t pathbuff_lock;
1306
1307static void
1308init_pathbuff(void)
1309{
1310 pathbuff_lock_attr = lck_attr_alloc_init();
1311 pathbuff_group_attr = lck_grp_attr_alloc_init();
1312 pathbuff_mutex_group = lck_grp_alloc_init("pathbuff-mutex", pathbuff_group_attr);
1313
1314 lck_mtx_init(&pathbuff_lock, pathbuff_mutex_group, pathbuff_lock_attr);
1315}
1316
1317static void
1318lock_pathbuff(void)
1319{
1320 lck_mtx_lock(&pathbuff_lock);
1321}
1322
1323static void
1324unlock_pathbuff(void)
1325{
1326 lck_mtx_unlock(&pathbuff_lock);
1327}
1328
1329
1330char *
1331get_pathbuff(void)
1332{
1333 int i;
1334
1335 lock_pathbuff();
1336 for(i=0; i < NUM_PATH_BUFFS; i++) {
1337 if (path_buff_inuse[i] == 0) {
1338 break;
1339 }
1340 }
1341
1342 if (i >= NUM_PATH_BUFFS) {
1343 char *path;
1344
1345 unlock_pathbuff();
1346 MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
1347 return path;
1348 }
1349
1350 path_buff_inuse[i] = 1;
1351 unlock_pathbuff();
1352 return &path_buff[i][0];
1353}
1354
1355void
1356release_pathbuff(char *path)
1357{
1358 int i;
1359
1360 if (path == NULL) {
1361 return;
1362 }
1363
1364 lock_pathbuff();
1365 for(i=0; i < NUM_PATH_BUFFS; i++) {
1366 if (path == &path_buff[i][0]) {
1367 path_buff[i][0] = '\0';
1368 path_buff_inuse[i] = 0;
1369 unlock_pathbuff();
1370 return;
1371 }
1372 }
1373
1374 unlock_pathbuff();
1375
1376 // if we get here then it wasn't one of our temp buffers
1377 FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
1378}
1379
1380int
1381get_fse_info(struct vnode *vp, fse_info *fse, vfs_context_t ctx)
1382{
1383 struct vnode_attr va;
1384
1385 VATTR_INIT(&va);
1386 VATTR_WANTED(&va, va_fsid);
1387 VATTR_WANTED(&va, va_fileid);
1388 VATTR_WANTED(&va, va_mode);
1389 VATTR_WANTED(&va, va_uid);
1390 VATTR_WANTED(&va, va_gid);
1391 if (vnode_getattr(vp, &va, ctx) != 0) {
1392 return -1;
1393 }
1394
1395 fse->dev = (dev_t)va.va_fsid;
1396 fse->ino = (ino_t)va.va_fileid;
1397 fse->mode = (int32_t)vnode_vttoif(vnode_vtype(vp)) | va.va_mode;
1398 fse->uid = (uid_t)va.va_uid;
1399 fse->gid = (gid_t)va.va_gid;
1400
1401 return 0;
1402}