]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/vfs_fsevents.c
3688244efe1c06ac46ee86a50b51cde3feed8515
[apple/xnu.git] / bsd / vfs / vfs_fsevents.c
1 /*
2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 #include <stdarg.h>
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/namei.h>
26 #include <sys/filedesc.h>
27 #include <sys/kernel.h>
28 #include <sys/file_internal.h>
29 #include <sys/stat.h>
30 #include <sys/vnode_internal.h>
31 #include <sys/mount_internal.h>
32 #include <sys/proc_internal.h>
33 #include <sys/kauth.h>
34 #include <sys/uio.h>
35 #include <sys/malloc.h>
36 #include <sys/dirent.h>
37 #include <sys/attr.h>
38 #include <sys/sysctl.h>
39 #include <sys/ubc.h>
40 #include <machine/cons.h>
41 #include <miscfs/specfs/specdev.h>
42 #include <miscfs/devfs/devfs.h>
43 #include <sys/filio.h>
44 #include <kern/locks.h>
45 #include <libkern/OSAtomic.h>
46
47 #include <bsm/audit_kernel.h>
48 #include <bsm/audit_kevents.h>
49
50 // where all our structs and defines come from
51 #include <sys/fsevents.h>
52
53
54 typedef struct kfs_event_arg {
55 u_int16_t type;
56 u_int16_t len;
57 union {
58 struct vnode *vp;
59 char *str;
60 void *ptr;
61 int32_t int32;
62 dev_t dev;
63 ino_t ino;
64 int32_t mode;
65 uid_t uid;
66 gid_t gid;
67 } data;
68 }kfs_event_arg;
69
70 #define KFS_NUM_ARGS FSE_MAX_ARGS
71 typedef struct kfs_event {
72 int32_t type; // type code of this event
73 u_int32_t refcount; // number of clients referencing this
74 pid_t pid; // pid of the process that did the op
75 kfs_event_arg args[KFS_NUM_ARGS];
76 } kfs_event;
77
78
79 typedef struct fs_event_watcher {
80 SLIST_ENTRY(fs_event_watcher) link;
81 int8_t *event_list; // the events we're interested in
82 int32_t num_events;
83 dev_t *devices_to_watch; // only report events from these devices
84 uint32_t num_devices;
85 int32_t flags;
86 kfs_event **event_queue;
87 int32_t eventq_size; // number of event pointers in queue
88 int32_t rd, wr; // indices to the event_queue
89 int32_t blockers;
90 int32_t num_readers;
91 } fs_event_watcher;
92
93 // fs_event_watcher flags
94 #define WATCHER_DROPPED_EVENTS 0x0001
95 #define WATCHER_CLOSING 0x0002
96
97 static SLIST_HEAD(watch_list, fs_event_watcher) watch_list_head = { NULL };
98
99
100 #define MAX_KFS_EVENTS 2048
101
102 // this array holds each pending event
103 static kfs_event fs_event_buf[MAX_KFS_EVENTS];
104 static int free_event_idx = 0;
105 static int fs_event_init = 0;
106
107 //
108 // this array records whether anyone is interested in a
109 // particular type of event. if no one is, we bail out
110 // early from the event delivery
111 //
112 static int16_t fs_event_type_watchers[FSE_MAX_EVENTS];
113
114 static int watcher_add_event(fs_event_watcher *watcher, kfs_event *kfse);
115
116 //
117 // Locks
118 //
119 static lck_grp_attr_t * fsevent_group_attr;
120 static lck_attr_t * fsevent_lock_attr;
121 static lck_grp_t * fsevent_mutex_group;
122
123 static lck_grp_t * fsevent_rw_group;
124
125 static lck_rw_t fsevent_big_lock; // always grab this first
126 static lck_mtx_t watch_list_lock;
127 static lck_mtx_t event_buf_lock;
128
129
130 static void init_pathbuff(void);
131
132
133 static void
134 fsevents_internal_init(void)
135 {
136 int i;
137
138 if (fs_event_init++ != 0) {
139 return;
140 }
141
142 for(i=0; i < FSE_MAX_EVENTS; i++) {
143 fs_event_type_watchers[i] = 0;
144 }
145
146 for(i=0; i < MAX_KFS_EVENTS; i++) {
147 fs_event_buf[i].type = FSE_INVALID;
148 fs_event_buf[i].refcount = 0;
149 }
150
151 SLIST_INIT(&watch_list_head);
152
153 fsevent_lock_attr = lck_attr_alloc_init();
154 fsevent_group_attr = lck_grp_attr_alloc_init();
155 fsevent_mutex_group = lck_grp_alloc_init("fsevent-mutex", fsevent_group_attr);
156 fsevent_rw_group = lck_grp_alloc_init("fsevent-rw", fsevent_group_attr);
157
158 lck_mtx_init(&watch_list_lock, fsevent_mutex_group, fsevent_lock_attr);
159 lck_mtx_init(&event_buf_lock, fsevent_mutex_group, fsevent_lock_attr);
160
161 lck_rw_init(&fsevent_big_lock, fsevent_rw_group, fsevent_lock_attr);
162
163 init_pathbuff();
164 }
165
166 static void
167 lock_watch_list(void)
168 {
169 lck_mtx_lock(&watch_list_lock);
170 }
171
172 static void
173 unlock_watch_list(void)
174 {
175 lck_mtx_unlock(&watch_list_lock);
176 }
177
178 static void
179 lock_fs_event_buf(void)
180 {
181 lck_mtx_lock(&event_buf_lock);
182 }
183
184 static void
185 unlock_fs_event_buf(void)
186 {
187 lck_mtx_unlock(&event_buf_lock);
188 }
189
190 // forward prototype
191 static void do_free_event(kfs_event *kfse);
192
193 static int
194 watcher_cares_about_dev(fs_event_watcher *watcher, dev_t dev)
195 {
196 unsigned int i;
197
198 // if there is not list of devices to watch, then always
199 // say we're interested so we'll report all events from
200 // all devices
201 if (watcher->devices_to_watch == NULL) {
202 return 1;
203 }
204
205 for(i=0; i < watcher->num_devices; i++) {
206 if (dev == watcher->devices_to_watch[i]) {
207 // found a match! that means we want events
208 // from this device.
209 return 1;
210 }
211 }
212
213 // if we're here it's not in the devices_to_watch[]
214 // list so that means we do not care about it
215 return 0;
216 }
217
218
219 int
220 need_fsevent(int type, vnode_t vp)
221 {
222 fs_event_watcher *watcher;
223 dev_t dev;
224
225 if (fs_event_type_watchers[type] == 0)
226 return (0);
227 dev = (dev_t)(vp->v_mount->mnt_vfsstat.f_fsid.val[0]);
228
229 lock_watch_list();
230
231 SLIST_FOREACH(watcher, &watch_list_head, link) {
232 if (watcher->event_list[type] == FSE_REPORT && watcher_cares_about_dev(watcher, dev)) {
233 unlock_watch_list();
234 return (1);
235 }
236 }
237 unlock_watch_list();
238
239 return (0);
240 }
241
242
243 int
244 add_fsevent(int type, vfs_context_t ctx, ...)
245 {
246 struct proc *p = vfs_context_proc(ctx);
247 int i, arg_idx, num_deliveries=0;
248 kfs_event_arg *kea;
249 kfs_event *kfse;
250 fs_event_watcher *watcher;
251 va_list ap;
252 int error = 0, base;
253 dev_t dev = 0;
254
255 va_start(ap, ctx);
256
257 // if no one cares about this type of event, bail out
258 if (fs_event_type_watchers[type] == 0) {
259 va_end(ap);
260 return 0;
261 }
262
263 lck_rw_lock_shared(&fsevent_big_lock);
264
265 // find a free event and snag it for our use
266 // NOTE: do not do anything that would block until
267 // the lock is dropped.
268 lock_fs_event_buf();
269
270 base = free_event_idx;
271 for(i=0; i < MAX_KFS_EVENTS; i++) {
272 if (fs_event_buf[(base + i) % MAX_KFS_EVENTS].type == FSE_INVALID) {
273 break;
274 }
275 }
276
277 if (i >= MAX_KFS_EVENTS) {
278 // yikes! no free slots
279 unlock_fs_event_buf();
280 va_end(ap);
281
282 lock_watch_list();
283 SLIST_FOREACH(watcher, &watch_list_head, link) {
284 watcher->flags |= WATCHER_DROPPED_EVENTS;
285 wakeup((caddr_t)watcher);
286 }
287 unlock_watch_list();
288 lck_rw_done(&fsevent_big_lock);
289
290 printf("fs_events: add_event: event queue is full! dropping events.\n");
291 return ENOSPC;
292 }
293
294 kfse = &fs_event_buf[(base + i) % MAX_KFS_EVENTS];
295
296 free_event_idx = ((base + i) % MAX_KFS_EVENTS) + 1;
297
298 kfse->type = type;
299 kfse->refcount = 1;
300 kfse->pid = p->p_pid;
301
302 unlock_fs_event_buf(); // at this point it's safe to unlock
303
304 //
305 // now process the arguments passed in and copy them into
306 // the kfse
307 //
308 arg_idx = 0;
309 while(arg_idx < KFS_NUM_ARGS) {
310 kea = &kfse->args[arg_idx++];
311 kea->type = va_arg(ap, int32_t);
312
313 if (kea->type == FSE_ARG_DONE) {
314 break;
315 }
316
317 switch(kea->type) {
318 case FSE_ARG_VNODE: {
319 // this expands out into multiple arguments to the client
320 struct vnode *vp;
321 struct vnode_attr va;
322
323 kea->data.vp = vp = va_arg(ap, struct vnode *);
324 if (kea->data.vp == NULL) {
325 panic("add_fsevent: you can't pass me a NULL vnode ptr (type %d)!\n",
326 kfse->type);
327 }
328
329 if (vnode_ref_ext(kea->data.vp, O_EVTONLY) != 0) {
330 kea->type = FSE_ARG_DONE;
331
332 error = EINVAL;
333 goto clean_up;
334 }
335 VATTR_INIT(&va);
336 VATTR_WANTED(&va, va_fsid);
337 VATTR_WANTED(&va, va_fileid);
338 VATTR_WANTED(&va, va_mode);
339 VATTR_WANTED(&va, va_uid);
340 VATTR_WANTED(&va, va_gid);
341 if (vnode_getattr(kea->data.vp, &va, ctx) != 0) {
342 vnode_rele_ext(kea->data.vp, O_EVTONLY, 0);
343 kea->type = FSE_ARG_DONE;
344
345 error = EINVAL;
346 goto clean_up;
347 }
348
349 kea++;
350 kea->type = FSE_ARG_DEV;
351 kea->data.dev = dev = (dev_t)va.va_fsid;
352
353 kea++;
354 kea->type = FSE_ARG_INO;
355 kea->data.ino = (ino_t)va.va_fileid;
356
357 kea++;
358 kea->type = FSE_ARG_MODE;
359 kea->data.mode = (int32_t)vnode_vttoif(vnode_vtype(vp)) | va.va_mode;
360
361 kea++;
362 kea->type = FSE_ARG_UID;
363 kea->data.uid = va.va_uid;
364
365 kea++;
366 kea->type = FSE_ARG_GID;
367 kea->data.gid = va.va_gid;
368 arg_idx += 5;
369 break;
370 }
371
372 case FSE_ARG_FINFO: {
373 fse_info *fse;
374
375 fse = va_arg(ap, fse_info *);
376
377 kea->type = FSE_ARG_DEV;
378 kea->data.dev = dev = (dev_t)fse->dev;
379
380 kea++;
381 kea->type = FSE_ARG_INO;
382 kea->data.ino = (ino_t)fse->ino;
383
384 kea++;
385 kea->type = FSE_ARG_MODE;
386 kea->data.mode = (int32_t)fse->mode;
387
388 kea++;
389 kea->type = FSE_ARG_UID;
390 kea->data.uid = (uid_t)fse->uid;
391
392 kea++;
393 kea->type = FSE_ARG_GID;
394 kea->data.gid = (uid_t)fse->gid;
395 arg_idx += 4;
396 break;
397 }
398
399 case FSE_ARG_STRING:
400 kea->len = (int16_t)(va_arg(ap, int32_t) & 0xffff);
401 kea->data.str = vfs_addname(va_arg(ap, char *), kea->len, 0, 0);
402 break;
403
404 case FSE_ARG_INT32:
405 kea->data.int32 = va_arg(ap, int32_t);
406 break;
407
408 case FSE_ARG_INT64:
409 printf("fs_events: 64-bit args not implemented.\n");
410 // kea->data.int64 = va_arg(ap, int64_t);
411 break;
412
413 case FSE_ARG_RAW:
414 kea->len = (int16_t)(va_arg(ap, int32_t) & 0xffff);
415 MALLOC(kea->data.ptr, void *, kea->len, M_TEMP, M_WAITOK);
416 memcpy(kea->data.ptr, va_arg(ap, void *), kea->len);
417 break;
418
419 case FSE_ARG_DEV:
420 kea->data.dev = dev = va_arg(ap, dev_t);
421 break;
422
423 case FSE_ARG_MODE:
424 kea->data.mode = va_arg(ap, int32_t);
425 break;
426
427 case FSE_ARG_INO:
428 kea->data.ino = va_arg(ap, ino_t);
429 break;
430
431 case FSE_ARG_UID:
432 kea->data.uid = va_arg(ap, uid_t);
433 break;
434
435 case FSE_ARG_GID:
436 kea->data.gid = va_arg(ap, gid_t);
437 break;
438
439 default:
440 printf("add_fsevent: unknown type %d\n", kea->type);
441 // just skip one 32-bit word and hope we sync up...
442 (void)va_arg(ap, int32_t);
443 }
444 }
445
446 va_end(ap);
447
448 //
449 // now we have to go and let everyone know that
450 // is interested in this type of event...
451 //
452 lock_watch_list();
453
454 SLIST_FOREACH(watcher, &watch_list_head, link) {
455 if (watcher->event_list[type] == FSE_REPORT && watcher_cares_about_dev(watcher, dev)) {
456 if (watcher_add_event(watcher, kfse) == 0) {
457 num_deliveries++;
458 }
459 }
460 }
461
462 unlock_watch_list();
463
464 clean_up:
465 // just in case no one was interested after all...
466 if (OSAddAtomic(-1, (SInt32 *)&kfse->refcount) == 1) {
467 do_free_event(kfse);
468 }
469
470 lck_rw_done(&fsevent_big_lock);
471 return error;
472 }
473
474 static void
475 do_free_event(kfs_event *kfse)
476 {
477 int i;
478 kfs_event_arg *kea, all_args[KFS_NUM_ARGS];
479
480 lock_fs_event_buf();
481
482 if (kfse->refcount > 0) {
483 panic("do_free_event: free'ing a kfsevent w/refcount == %d (kfse %p)\n",
484 kfse->refcount, kfse);
485 }
486
487 // make a copy of this so we can free things without
488 // holding the fs_event_buf lock
489 //
490 memcpy(&all_args[0], &kfse->args[0], sizeof(all_args));
491
492 // and just to be anal, set this so that there are no args
493 kfse->args[0].type = FSE_ARG_DONE;
494
495 // mark this fsevent as invalid
496 kfse->type = FSE_INVALID;
497
498 free_event_idx = (kfse - fs_event_buf);
499
500 unlock_fs_event_buf();
501
502 for(i=0; i < KFS_NUM_ARGS; i++) {
503 kea = &all_args[i];
504 if (kea->type == FSE_ARG_DONE) {
505 break;
506 }
507
508 switch(kea->type) {
509 case FSE_ARG_VNODE:
510 vnode_rele_ext(kea->data.vp, O_EVTONLY, 0);
511 break;
512 case FSE_ARG_STRING:
513 vfs_removename(kea->data.str);
514 break;
515 case FSE_ARG_RAW:
516 FREE(kea->data.ptr, M_TEMP);
517 break;
518 }
519 }
520 }
521
522
523 static int
524 add_watcher(int8_t *event_list, int32_t num_events, int32_t eventq_size, fs_event_watcher **watcher_out)
525 {
526 int i;
527 fs_event_watcher *watcher;
528
529 if (eventq_size < 0 || eventq_size > MAX_KFS_EVENTS) {
530 eventq_size = MAX_KFS_EVENTS;
531 }
532
533 // Note: the event_queue follows the fs_event_watcher struct
534 // in memory so we only have to do one allocation
535 MALLOC(watcher,
536 fs_event_watcher *,
537 sizeof(fs_event_watcher) + eventq_size * sizeof(kfs_event *),
538 M_TEMP, M_WAITOK);
539
540 watcher->event_list = event_list;
541 watcher->num_events = num_events;
542 watcher->devices_to_watch = NULL;
543 watcher->num_devices = 0;
544 watcher->flags = 0;
545 watcher->event_queue = (kfs_event **)&watcher[1];
546 watcher->eventq_size = eventq_size;
547 watcher->rd = 0;
548 watcher->wr = 0;
549 watcher->blockers = 0;
550 watcher->num_readers = 0;
551
552 lock_watch_list();
553
554 // now update the global list of who's interested in
555 // events of a particular type...
556 for(i=0; i < num_events; i++) {
557 if (event_list[i] != FSE_IGNORE && i < FSE_MAX_EVENTS) {
558 fs_event_type_watchers[i]++;
559 }
560 }
561
562 SLIST_INSERT_HEAD(&watch_list_head, watcher, link);
563
564 unlock_watch_list();
565
566 *watcher_out = watcher;
567
568 return 0;
569 }
570
571 static void
572 remove_watcher(fs_event_watcher *target)
573 {
574 int i;
575 fs_event_watcher *watcher;
576 kfs_event *kfse;
577
578 lck_rw_lock_shared(&fsevent_big_lock);
579
580 lock_watch_list();
581
582 SLIST_FOREACH(watcher, &watch_list_head, link) {
583 if (watcher == target) {
584 SLIST_REMOVE(&watch_list_head, watcher, fs_event_watcher, link);
585
586 for(i=0; i < watcher->num_events; i++) {
587 if (watcher->event_list[i] != FSE_IGNORE && i < FSE_MAX_EVENTS) {
588 fs_event_type_watchers[i]--;
589 }
590 }
591
592 unlock_watch_list();
593
594 // drain the event_queue
595 for(i=watcher->rd; i != watcher->wr; i=(i+1) % watcher->eventq_size) {
596 kfse = watcher->event_queue[i];
597
598 if (OSAddAtomic(-1, (SInt32 *)&kfse->refcount) == 1) {
599 do_free_event(kfse);
600 }
601 }
602
603 if (watcher->event_list) {
604 FREE(watcher->event_list, M_TEMP);
605 watcher->event_list = NULL;
606 }
607 if (watcher->devices_to_watch) {
608 FREE(watcher->devices_to_watch, M_TEMP);
609 watcher->devices_to_watch = NULL;
610 }
611 FREE(watcher, M_TEMP);
612
613 lck_rw_done(&fsevent_big_lock);
614 return;
615 }
616 }
617
618 unlock_watch_list();
619 lck_rw_done(&fsevent_big_lock);
620 }
621
622
623 static int
624 watcher_add_event(fs_event_watcher *watcher, kfs_event *kfse)
625 {
626 if (((watcher->wr + 1) % watcher->eventq_size) == watcher->rd) {
627 watcher->flags |= WATCHER_DROPPED_EVENTS;
628 wakeup((caddr_t)watcher);
629 return ENOSPC;
630 }
631
632 watcher->event_queue[watcher->wr] = kfse;
633 OSAddAtomic(1, (SInt32 *)&kfse->refcount);
634 watcher->wr = (watcher->wr + 1) % watcher->eventq_size;
635
636 // wake up the watcher if he's waiting!
637 wakeup((caddr_t)watcher);
638
639 return 0;
640 }
641
642
643 static int
644 fmod_watch(fs_event_watcher *watcher, struct uio *uio)
645 {
646 int i, error=0, last_full_event_resid;
647 kfs_event *kfse;
648 kfs_event_arg *kea;
649 uint16_t tmp16;
650
651 // LP64todo - fix this
652 last_full_event_resid = uio_resid(uio);
653
654 // need at least 2048 bytes of space (maxpathlen + 1 event buf)
655 if (uio_resid(uio) < 2048 || watcher == NULL) {
656 return EINVAL;
657 }
658
659 if (OSAddAtomic(1, (SInt32 *)&watcher->num_readers) != 0) {
660 // don't allow multiple threads to read from the fd at the same time
661 OSAddAtomic(-1, (SInt32 *)&watcher->num_readers);
662 return EAGAIN;
663 }
664
665 if (watcher->rd == watcher->wr) {
666 if (watcher->flags & WATCHER_CLOSING) {
667 OSAddAtomic(-1, (SInt32 *)&watcher->num_readers);
668 return 0;
669 }
670 OSAddAtomic(1, (SInt32 *)&watcher->blockers);
671
672 // there's nothing to do, go to sleep
673 error = tsleep((caddr_t)watcher, PUSER|PCATCH, "fsevents_empty", 0);
674
675 OSAddAtomic(-1, (SInt32 *)&watcher->blockers);
676
677 if (error != 0 || (watcher->flags & WATCHER_CLOSING)) {
678 OSAddAtomic(-1, (SInt32 *)&watcher->num_readers);
679 return error;
680 }
681 }
682
683 // if we dropped events, return that as an event first
684 if (watcher->flags & WATCHER_DROPPED_EVENTS) {
685 int32_t val = FSE_EVENTS_DROPPED;
686
687 error = uiomove((caddr_t)&val, sizeof(int32_t), uio);
688 if (error == 0) {
689 val = 0; // a fake pid
690 error = uiomove((caddr_t)&val, sizeof(int32_t), uio);
691
692 tmp16 = FSE_ARG_DONE; // makes it a consistent msg
693 error = uiomove((caddr_t)&tmp16, sizeof(int16_t), uio);
694 }
695
696 if (error) {
697 OSAddAtomic(-1, (SInt32 *)&watcher->num_readers);
698 return error;
699 }
700
701 watcher->flags &= ~WATCHER_DROPPED_EVENTS;
702 }
703
704 // check if the next chunk of data will fit in the user's
705 // buffer. if not, just goto get_out which will return
706 // the number of bytes worth of events that we did read.
707 // this leaves the event that didn't fit in the queue.
708 //
709 // LP64todo - fix this
710 #define CHECK_UPTR(size) if (size > (unsigned)uio_resid(uio)) { \
711 uio_setresid(uio, last_full_event_resid); \
712 goto get_out; \
713 }
714
715 for (; uio_resid(uio) > 0 && watcher->rd != watcher->wr; ) {
716 kfse = watcher->event_queue[watcher->rd];
717
718 // copy out the type of the event
719 CHECK_UPTR(sizeof(int32_t));
720 if ((error = uiomove((caddr_t)&kfse->type, sizeof(int32_t), uio)) != 0) {
721 goto get_out;
722 }
723
724 // now copy out the pid of the person that changed the file
725 CHECK_UPTR(sizeof(pid_t));
726 if ((error = uiomove((caddr_t)&kfse->pid, sizeof(pid_t), uio)) != 0) {
727 goto get_out;
728 }
729
730 error = 0;
731 for(i=0; i < KFS_NUM_ARGS && error == 0; i++) {
732 char *pathbuff;
733 int pathbuff_len;
734
735 kea = &kfse->args[i];
736
737 tmp16 = (uint16_t)kea->type;
738 CHECK_UPTR(sizeof(uint16_t));
739 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
740 if (error || kea->type == FSE_ARG_DONE) {
741 break;
742 }
743
744 switch(kea->type) {
745 case FSE_ARG_VNODE:
746 pathbuff = get_pathbuff();
747 pathbuff_len = MAXPATHLEN;
748 if (kea->data.vp == NULL) {
749 printf("fmod_watch: whoa... vp == NULL (%d)!\n", kfse->type);
750 i--;
751 release_pathbuff(pathbuff);
752 continue;
753 }
754
755 if (vn_getpath(kea->data.vp, pathbuff, &pathbuff_len) != 0 || pathbuff[0] == '\0') {
756 // printf("fmod_watch: vn_getpath failed! vp 0x%x vname 0x%x (%s) vparent 0x%x\n",
757 // kea->data.vp,
758 // VNAME(kea->data.vp),
759 // VNAME(kea->data.vp) ? VNAME(kea->data.vp) : "<null>",
760 // VPARENT(kea->data.vp));
761 }
762 CHECK_UPTR(sizeof(uint16_t));
763 tmp16 = (uint16_t)pathbuff_len;
764 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
765
766 CHECK_UPTR((unsigned)pathbuff_len);
767 error = uiomove((caddr_t)pathbuff, pathbuff_len, uio);
768 release_pathbuff(pathbuff);
769 break;
770
771
772 case FSE_ARG_STRING:
773 tmp16 = (int32_t)kea->len;
774 CHECK_UPTR(sizeof(uint16_t));
775 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
776
777 CHECK_UPTR(kea->len);
778 error = uiomove((caddr_t)kea->data.str, kea->len, uio);
779 break;
780
781 case FSE_ARG_INT32:
782 CHECK_UPTR(sizeof(uint16_t) + sizeof(int32_t));
783 tmp16 = sizeof(int32_t);
784 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
785 error = uiomove((caddr_t)&kea->data.int32, sizeof(int32_t), uio);
786 break;
787
788 case FSE_ARG_INT64:
789 printf("fs_events: 64-bit args not implemented on copyout.\n");
790 // CHECK_UPTR(sizeof(uint16_t) + sizeof(int64_t));
791 // tmp16 = sizeof(int64_t);
792 // error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
793 // error = uiomove((caddr_t)&kea->data.int64, sizeof(int64_t), uio);
794 break;
795
796 case FSE_ARG_RAW:
797 tmp16 = (uint16_t)kea->len;
798 CHECK_UPTR(sizeof(uint16_t));
799 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
800
801 CHECK_UPTR(kea->len);
802 error = uiomove((caddr_t)kea->data.ptr, kea->len, uio);
803 break;
804
805 case FSE_ARG_DEV:
806 CHECK_UPTR(sizeof(uint16_t) + sizeof(dev_t));
807 tmp16 = sizeof(dev_t);
808 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
809 error = uiomove((caddr_t)&kea->data.dev, sizeof(dev_t), uio);
810 break;
811
812 case FSE_ARG_INO:
813 CHECK_UPTR(sizeof(uint16_t) + sizeof(ino_t));
814 tmp16 = sizeof(ino_t);
815 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
816 error = uiomove((caddr_t)&kea->data.ino, sizeof(ino_t), uio);
817 break;
818
819 case FSE_ARG_MODE:
820 // XXXdbg - NOTE: we use 32-bits for the mode, not
821 // 16-bits like a real mode_t
822 CHECK_UPTR(sizeof(uint16_t) + sizeof(int32_t));
823 tmp16 = sizeof(int32_t);
824 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
825 error = uiomove((caddr_t)&kea->data.mode, sizeof(int32_t), uio);
826 break;
827
828 case FSE_ARG_UID:
829 CHECK_UPTR(sizeof(uint16_t) + sizeof(uid_t));
830 tmp16 = sizeof(uid_t);
831 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
832 error = uiomove((caddr_t)&kea->data.uid, sizeof(uid_t), uio);
833 break;
834
835 case FSE_ARG_GID:
836 CHECK_UPTR(sizeof(uint16_t) + sizeof(gid_t));
837 tmp16 = sizeof(gid_t);
838 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
839 error = uiomove((caddr_t)&kea->data.gid, sizeof(gid_t), uio);
840 break;
841
842 default:
843 printf("fmod_watch: unknown arg type %d.\n", kea->type);
844 break;
845 }
846 }
847
848 // make sure that we always end with a FSE_ARG_DONE
849 if (i >= KFS_NUM_ARGS) {
850 tmp16 = FSE_ARG_DONE;
851 CHECK_UPTR(sizeof(uint16_t));
852 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
853 }
854
855
856 // LP64todo - fix this
857 last_full_event_resid = uio_resid(uio);
858
859 watcher->rd = (watcher->rd + 1) % watcher->eventq_size;
860
861 if (OSAddAtomic(-1, (SInt32 *)&kfse->refcount) == 1) {
862 do_free_event(kfse);
863 }
864 }
865
866 get_out:
867 OSAddAtomic(-1, (SInt32 *)&watcher->num_readers);
868 return error;
869 }
870
871
872 // release any references we might have on vnodes which are
873 // the mount point passed to us (so that it can be cleanly
874 // unmounted).
875 //
876 // since we don't want to lose the events we'll convert the
877 // vnode refs to the full path, inode #, and uid.
878 //
879 void
880 fsevent_unmount(struct mount *mp)
881 {
882 int i, j;
883 kfs_event *kfse;
884 kfs_event_arg *kea;
885
886 lck_rw_lock_exclusive(&fsevent_big_lock);
887 lock_fs_event_buf();
888
889 for(i=0; i < MAX_KFS_EVENTS; i++) {
890 if (fs_event_buf[i].type == FSE_INVALID) {
891 continue;
892 }
893
894 kfse = &fs_event_buf[i];
895 for(j=0; j < KFS_NUM_ARGS; j++) {
896 kea = &kfse->args[j];
897 if (kea->type == FSE_ARG_DONE) {
898 break;
899 }
900
901 if (kea->type == FSE_ARG_VNODE && kea->data.vp->v_mount == mp) {
902 struct vnode *vp;
903 char *pathbuff;
904 int pathbuff_len;
905
906 vp = kea->data.vp;
907 pathbuff = get_pathbuff();
908 pathbuff_len = MAXPATHLEN;
909
910 if (vn_getpath(vp, pathbuff, &pathbuff_len) != 0 || pathbuff[0] == '\0') {
911 char *vname;
912
913 vname = vnode_getname(vp);
914
915 printf("fsevent_unmount: vn_getpath failed! vp 0x%x vname 0x%x (%s) vparent 0x%x\n",
916 vp, vname, vname ? vname : "<null>", vp->v_parent);
917
918 if (vname)
919 vnode_putname(vname);
920
921 strcpy(pathbuff, "UNKNOWN-FILE");
922 pathbuff_len = strlen(pathbuff) + 1;
923 }
924
925 // switch the type of the string
926 kea->type = FSE_ARG_STRING;
927 kea->data.str = vfs_addname(pathbuff, pathbuff_len, 0, 0);
928 kea->len = pathbuff_len;
929 release_pathbuff(pathbuff);
930
931 // and finally let go of the reference on the vnode
932 vnode_rele_ext(vp, O_EVTONLY, 0);
933 }
934 }
935 }
936
937 unlock_fs_event_buf();
938 lck_rw_done(&fsevent_big_lock);
939 }
940
941
942 //
943 // /dev/fsevents device code
944 //
945 static int fsevents_installed = 0;
946 static struct lock__bsd__ fsevents_lck;
947
948 typedef struct fsevent_handle {
949 fs_event_watcher *watcher;
950 struct selinfo si;
951 } fsevent_handle;
952
953
954 static int
955 fseventsf_read(struct fileproc *fp, struct uio *uio,
956 __unused kauth_cred_t *cred, __unused int flags,
957 __unused struct proc *p)
958 {
959 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
960 int error;
961
962 error = fmod_watch(fseh->watcher, uio);
963
964 return error;
965 }
966
967 static int
968 fseventsf_write(__unused struct fileproc *fp, __unused struct uio *uio,
969 __unused kauth_cred_t *cred, __unused int flags,
970 __unused struct proc *p)
971 {
972 return EIO;
973 }
974
975
976 static int
977 fseventsf_ioctl(struct fileproc *fp, u_long cmd, caddr_t data, struct proc *p)
978 {
979 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
980 int ret = 0;
981 pid_t pid = 0;
982 fsevent_dev_filter_args *devfilt_args=(fsevent_dev_filter_args *)data;
983
984 switch (cmd) {
985 case FIONBIO:
986 case FIOASYNC:
987 return 0;
988
989 case FSEVENTS_DEVICE_FILTER: {
990 int new_num_devices;
991 dev_t *devices_to_watch, *tmp=NULL;
992
993 if (devfilt_args->num_devices > 256) {
994 ret = EINVAL;
995 break;
996 }
997
998 new_num_devices = devfilt_args->num_devices;
999 if (new_num_devices == 0) {
1000 tmp = fseh->watcher->devices_to_watch;
1001
1002 lock_watch_list();
1003 fseh->watcher->devices_to_watch = NULL;
1004 fseh->watcher->num_devices = new_num_devices;
1005 unlock_watch_list();
1006
1007 if (tmp) {
1008 FREE(tmp, M_TEMP);
1009 }
1010 break;
1011 }
1012
1013 MALLOC(devices_to_watch, dev_t *,
1014 new_num_devices * sizeof(dev_t),
1015 M_TEMP, M_WAITOK);
1016 if (devices_to_watch == NULL) {
1017 ret = ENOMEM;
1018 break;
1019 }
1020
1021 ret = copyin(CAST_USER_ADDR_T(devfilt_args->devices),
1022 (void *)devices_to_watch,
1023 new_num_devices * sizeof(dev_t));
1024 if (ret) {
1025 FREE(devices_to_watch, M_TEMP);
1026 break;
1027 }
1028
1029 lock_watch_list();
1030 fseh->watcher->num_devices = new_num_devices;
1031 tmp = fseh->watcher->devices_to_watch;
1032 fseh->watcher->devices_to_watch = devices_to_watch;
1033 unlock_watch_list();
1034
1035 if (tmp) {
1036 FREE(tmp, M_TEMP);
1037 }
1038
1039 break;
1040 }
1041
1042 default:
1043 ret = EINVAL;
1044 break;
1045 }
1046
1047 return (ret);
1048 }
1049
1050
1051 static int
1052 fseventsf_select(struct fileproc *fp, int which, void *wql, struct proc *p)
1053 {
1054 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
1055 int ready = 0;
1056
1057 if ((which != FREAD) || (fseh->watcher->flags & WATCHER_CLOSING)) {
1058 return 0;
1059 }
1060
1061
1062 // if there's nothing in the queue, we're not ready
1063 if (fseh->watcher->rd == fseh->watcher->wr) {
1064 ready = 0;
1065 } else {
1066 ready = 1;
1067 }
1068
1069 if (!ready) {
1070 selrecord(p, &fseh->si, wql);
1071 }
1072
1073 return ready;
1074 }
1075
1076
1077 static int
1078 fseventsf_stat(struct fileproc *fp, struct stat *sb, struct proc *p)
1079 {
1080 return ENOTSUP;
1081 }
1082
1083
1084 static int
1085 fseventsf_close(struct fileglob *fg, struct proc *p)
1086 {
1087 fsevent_handle *fseh = (struct fsevent_handle *)fg->fg_data;
1088
1089 remove_watcher(fseh->watcher);
1090
1091 fg->fg_data = NULL;
1092 fseh->watcher = NULL;
1093 FREE(fseh, M_TEMP);
1094
1095 return 0;
1096 }
1097
1098 int
1099 fseventsf_kqfilter(struct fileproc *fp, struct knote *kn, struct proc *p)
1100 {
1101 // XXXdbg
1102 return 0;
1103 }
1104
1105
1106 static int
1107 fseventsf_drain(struct fileproc *fp, struct proc *p)
1108 {
1109 int counter = 0;
1110 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
1111
1112 fseh->watcher->flags |= WATCHER_CLOSING;
1113
1114 // if there are people still waiting, sleep for 10ms to
1115 // let them clean up and get out of there. however we
1116 // also don't want to get stuck forever so if they don't
1117 // exit after 5 seconds we're tearing things down anyway.
1118 while(fseh->watcher->blockers && counter++ < 500) {
1119 // issue wakeup in case anyone is blocked waiting for an event
1120 // do this each time we wakeup in case the blocker missed
1121 // the wakeup due to the unprotected test of WATCHER_CLOSING
1122 // and decision to tsleep in fmod_watch... this bit of
1123 // latency is a decent tradeoff against not having to
1124 // take and drop a lock in fmod_watch
1125 wakeup((caddr_t)fseh->watcher);
1126
1127 tsleep((caddr_t)fseh->watcher, PRIBIO, "watcher-close", 1);
1128 }
1129
1130 return 0;
1131 }
1132
1133
1134 static int
1135 fseventsopen(dev_t dev, int flag, int mode, struct proc *p)
1136 {
1137 if (!is_suser()) {
1138 return EPERM;
1139 }
1140
1141 return 0;
1142 }
1143
1144 static int
1145 fseventsclose(dev_t dev, int flag, int mode, struct proc *p)
1146 {
1147 return 0;
1148 }
1149
1150 static int
1151 fseventsread(dev_t dev, struct uio *uio, int ioflag)
1152 {
1153 return EIO;
1154 }
1155
1156 static int
1157 fseventswrite(dev_t dev, struct uio *uio, int ioflag)
1158 {
1159 return EIO;
1160 }
1161
1162
1163 static struct fileops fsevents_fops = {
1164 fseventsf_read,
1165 fseventsf_write,
1166 fseventsf_ioctl,
1167 fseventsf_select,
1168 fseventsf_close,
1169 fseventsf_kqfilter,
1170 fseventsf_drain
1171 };
1172
1173
1174
1175 static int
1176 fseventsioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
1177 {
1178 struct fileproc *f;
1179 int fd, error;
1180 fsevent_handle *fseh = NULL;
1181 fsevent_clone_args *fse_clone_args=(fsevent_clone_args *)data;
1182 int8_t *event_list;
1183
1184 switch (cmd) {
1185 case FSEVENTS_CLONE:
1186 if (fse_clone_args->num_events < 0 || fse_clone_args->num_events > 4096) {
1187 return EINVAL;
1188 }
1189
1190 MALLOC(fseh, fsevent_handle *, sizeof(fsevent_handle),
1191 M_TEMP, M_WAITOK);
1192 memset(fseh, 0, sizeof(fsevent_handle));
1193
1194 MALLOC(event_list, int8_t *,
1195 fse_clone_args->num_events * sizeof(int8_t),
1196 M_TEMP, M_WAITOK);
1197
1198 error = copyin(CAST_USER_ADDR_T(fse_clone_args->event_list),
1199 (void *)event_list,
1200 fse_clone_args->num_events * sizeof(int8_t));
1201 if (error) {
1202 FREE(event_list, M_TEMP);
1203 FREE(fseh, M_TEMP);
1204 return error;
1205 }
1206
1207 error = add_watcher(event_list,
1208 fse_clone_args->num_events,
1209 fse_clone_args->event_queue_depth,
1210 &fseh->watcher);
1211 if (error) {
1212 FREE(event_list, M_TEMP);
1213 FREE(fseh, M_TEMP);
1214 return error;
1215 }
1216
1217 error = falloc(p, &f, &fd);
1218 if (error) {
1219 FREE(event_list, M_TEMP);
1220 FREE(fseh, M_TEMP);
1221 return (error);
1222 }
1223 proc_fdlock(p);
1224 f->f_fglob->fg_flag = FREAD | FWRITE;
1225 f->f_fglob->fg_type = DTYPE_FSEVENTS;
1226 f->f_fglob->fg_ops = &fsevents_fops;
1227 f->f_fglob->fg_data = (caddr_t) fseh;
1228 proc_fdunlock(p);
1229 copyout((void *)&fd, CAST_USER_ADDR_T(fse_clone_args->fd), sizeof(int32_t));
1230 proc_fdlock(p);
1231 *fdflags(p, fd) &= ~UF_RESERVED;
1232 fp_drop(p, fd, f, 1);
1233 proc_fdunlock(p);
1234 break;
1235
1236 default:
1237 error = EINVAL;
1238 break;
1239 }
1240
1241 return error;
1242 }
1243
1244 static int
1245 fseventsselect(dev_t dev, int rw, struct proc *p)
1246 {
1247 return 0;
1248 }
1249
1250 static void
1251 fsevents_wakeup(fsevent_handle *fseh)
1252 {
1253 wakeup((caddr_t)fseh);
1254 selwakeup(&fseh->si);
1255 }
1256
1257
1258 /*
1259 * A struct describing which functions will get invoked for certain
1260 * actions.
1261 */
1262 static struct cdevsw fsevents_cdevsw =
1263 {
1264 fseventsopen, /* open */
1265 fseventsclose, /* close */
1266 fseventsread, /* read */
1267 fseventswrite, /* write */
1268 fseventsioctl, /* ioctl */
1269 nulldev, /* stop */
1270 nulldev, /* reset */
1271 NULL, /* tty's */
1272 eno_select, /* select */
1273 eno_mmap, /* mmap */
1274 eno_strat, /* strategy */
1275 eno_getc, /* getc */
1276 eno_putc, /* putc */
1277 0 /* type */
1278 };
1279
1280
1281 /*
1282 * Called to initialize our device,
1283 * and to register ourselves with devfs
1284 */
1285
1286 void
1287 fsevents_init(void)
1288 {
1289 int ret;
1290
1291 if (fsevents_installed) {
1292 return;
1293 }
1294
1295 fsevents_installed = 1;
1296
1297 lockinit(&fsevents_lck, PLOCK, "fsevents", 0, 0);
1298
1299 ret = cdevsw_add(-1, &fsevents_cdevsw);
1300 if (ret < 0) {
1301 fsevents_installed = 0;
1302 return;
1303 }
1304
1305 devfs_make_node(makedev (ret, 0), DEVFS_CHAR,
1306 UID_ROOT, GID_WHEEL, 0644, "fsevents", 0);
1307
1308 fsevents_internal_init();
1309 }
1310
1311
1312
1313 //
1314 // XXXdbg - temporary path buffer handling
1315 //
1316 #define NUM_PATH_BUFFS 16
1317 static char path_buff[NUM_PATH_BUFFS][MAXPATHLEN];
1318 static char path_buff_inuse[NUM_PATH_BUFFS];
1319
1320 static lck_grp_attr_t * pathbuff_group_attr;
1321 static lck_attr_t * pathbuff_lock_attr;
1322 static lck_grp_t * pathbuff_mutex_group;
1323 static lck_mtx_t pathbuff_lock;
1324
1325 static void
1326 init_pathbuff(void)
1327 {
1328 pathbuff_lock_attr = lck_attr_alloc_init();
1329 pathbuff_group_attr = lck_grp_attr_alloc_init();
1330 pathbuff_mutex_group = lck_grp_alloc_init("pathbuff-mutex", pathbuff_group_attr);
1331
1332 lck_mtx_init(&pathbuff_lock, pathbuff_mutex_group, pathbuff_lock_attr);
1333 }
1334
1335 static void
1336 lock_pathbuff(void)
1337 {
1338 lck_mtx_lock(&pathbuff_lock);
1339 }
1340
1341 static void
1342 unlock_pathbuff(void)
1343 {
1344 lck_mtx_unlock(&pathbuff_lock);
1345 }
1346
1347
1348 char *
1349 get_pathbuff(void)
1350 {
1351 int i;
1352
1353 lock_pathbuff();
1354 for(i=0; i < NUM_PATH_BUFFS; i++) {
1355 if (path_buff_inuse[i] == 0) {
1356 break;
1357 }
1358 }
1359
1360 if (i >= NUM_PATH_BUFFS) {
1361 char *path;
1362
1363 unlock_pathbuff();
1364 MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
1365 return path;
1366 }
1367
1368 path_buff_inuse[i] = 1;
1369 unlock_pathbuff();
1370 return &path_buff[i][0];
1371 }
1372
1373 void
1374 release_pathbuff(char *path)
1375 {
1376 int i;
1377
1378 if (path == NULL) {
1379 return;
1380 }
1381
1382 lock_pathbuff();
1383 for(i=0; i < NUM_PATH_BUFFS; i++) {
1384 if (path == &path_buff[i][0]) {
1385 path_buff[i][0] = '\0';
1386 path_buff_inuse[i] = 0;
1387 unlock_pathbuff();
1388 return;
1389 }
1390 }
1391
1392 unlock_pathbuff();
1393
1394 // if we get here then it wasn't one of our temp buffers
1395 FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
1396 }
1397
1398 int
1399 get_fse_info(struct vnode *vp, fse_info *fse, vfs_context_t ctx)
1400 {
1401 struct vnode_attr va;
1402
1403 VATTR_INIT(&va);
1404 VATTR_WANTED(&va, va_fsid);
1405 VATTR_WANTED(&va, va_fileid);
1406 VATTR_WANTED(&va, va_mode);
1407 VATTR_WANTED(&va, va_uid);
1408 VATTR_WANTED(&va, va_gid);
1409 if (vnode_getattr(vp, &va, ctx) != 0) {
1410 return -1;
1411 }
1412
1413 fse->dev = (dev_t)va.va_fsid;
1414 fse->ino = (ino_t)va.va_fileid;
1415 fse->mode = (int32_t)vnode_vttoif(vnode_vtype(vp)) | va.va_mode;
1416 fse->uid = (uid_t)va.va_uid;
1417 fse->gid = (gid_t)va.va_gid;
1418
1419 return 0;
1420 }