]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/vfs_fsevents.c
d3eed8126bb005e8698ca85206a711124a4cf4eb
[apple/xnu.git] / bsd / vfs / vfs_fsevents.c
1 /*
2 * Copyright (c) 2006 Apple Computer, Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 #include <stdarg.h>
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/namei.h>
34 #include <sys/filedesc.h>
35 #include <sys/kernel.h>
36 #include <sys/file_internal.h>
37 #include <sys/stat.h>
38 #include <sys/vnode_internal.h>
39 #include <sys/mount_internal.h>
40 #include <sys/proc_internal.h>
41 #include <sys/kauth.h>
42 #include <sys/uio.h>
43 #include <sys/malloc.h>
44 #include <sys/dirent.h>
45 #include <sys/attr.h>
46 #include <sys/sysctl.h>
47 #include <sys/ubc.h>
48 #include <machine/cons.h>
49 #include <miscfs/specfs/specdev.h>
50 #include <miscfs/devfs/devfs.h>
51 #include <sys/filio.h>
52 #include <architecture/byte_order.h>
53 #include <kern/locks.h>
54 #include <libkern/OSAtomic.h>
55
56 #include <bsm/audit_kernel.h>
57 #include <bsm/audit_kevents.h>
58
59 // where all our structs and defines come from
60 #include <sys/fsevents.h>
61
62
63 typedef struct kfs_event_arg {
64 u_int16_t type;
65 u_int16_t len;
66 union {
67 struct vnode *vp;
68 char *str;
69 void *ptr;
70 int32_t int32;
71 dev_t dev;
72 ino_t ino;
73 int32_t mode;
74 uid_t uid;
75 gid_t gid;
76 } data;
77 }kfs_event_arg;
78
79 #define KFS_NUM_ARGS FSE_MAX_ARGS
80 typedef struct kfs_event {
81 int32_t type; // type code of this event
82 u_int32_t refcount; // number of clients referencing this
83 pid_t pid; // pid of the process that did the op
84 kfs_event_arg args[KFS_NUM_ARGS];
85 } kfs_event;
86
87
88 typedef struct fs_event_watcher {
89 SLIST_ENTRY(fs_event_watcher) link;
90 int8_t *event_list; // the events we're interested in
91 int32_t num_events;
92 dev_t *devices_to_watch; // only report events from these devices
93 uint32_t num_devices;
94 int32_t flags;
95 kfs_event **event_queue;
96 int32_t eventq_size; // number of event pointers in queue
97 int32_t rd, wr; // indices to the event_queue
98 int32_t blockers;
99 } fs_event_watcher;
100
101 // fs_event_watcher flags
102 #define WATCHER_DROPPED_EVENTS 0x0001
103 #define WATCHER_CLOSING 0x0002
104
105 static SLIST_HEAD(watch_list, fs_event_watcher) watch_list_head = { NULL };
106
107
108 #define MAX_KFS_EVENTS 2048
109
110 // this array holds each pending event
111 static kfs_event fs_event_buf[MAX_KFS_EVENTS];
112 static int free_event_idx = 0;
113 static int fs_event_init = 0;
114
115 //
116 // this array records whether anyone is interested in a
117 // particular type of event. if no one is, we bail out
118 // early from the event delivery
119 //
120 static int16_t fs_event_type_watchers[FSE_MAX_EVENTS];
121
122 static int watcher_add_event(fs_event_watcher *watcher, kfs_event *kfse);
123
124 //
125 // Locks
126 //
127 static lck_grp_attr_t * fsevent_group_attr;
128 static lck_attr_t * fsevent_lock_attr;
129 static lck_grp_t * fsevent_mutex_group;
130
131 static lck_grp_t * fsevent_rw_group;
132
133 static lck_rw_t fsevent_big_lock; // always grab this first
134 static lck_mtx_t watch_list_lock;
135 static lck_mtx_t event_buf_lock;
136
137
138 static void init_pathbuff(void);
139
140
141 static void
142 fsevents_internal_init(void)
143 {
144 int i;
145
146 if (fs_event_init++ != 0) {
147 return;
148 }
149
150 for(i=0; i < FSE_MAX_EVENTS; i++) {
151 fs_event_type_watchers[i] = 0;
152 }
153
154 for(i=0; i < MAX_KFS_EVENTS; i++) {
155 fs_event_buf[i].type = FSE_INVALID;
156 fs_event_buf[i].refcount = 0;
157 }
158
159 SLIST_INIT(&watch_list_head);
160
161 fsevent_lock_attr = lck_attr_alloc_init();
162 fsevent_group_attr = lck_grp_attr_alloc_init();
163 fsevent_mutex_group = lck_grp_alloc_init("fsevent-mutex", fsevent_group_attr);
164 fsevent_rw_group = lck_grp_alloc_init("fsevent-rw", fsevent_group_attr);
165
166 lck_mtx_init(&watch_list_lock, fsevent_mutex_group, fsevent_lock_attr);
167 lck_mtx_init(&event_buf_lock, fsevent_mutex_group, fsevent_lock_attr);
168
169 lck_rw_init(&fsevent_big_lock, fsevent_rw_group, fsevent_lock_attr);
170
171 init_pathbuff();
172 }
173
174 static void
175 lock_watch_list(void)
176 {
177 lck_mtx_lock(&watch_list_lock);
178 }
179
180 static void
181 unlock_watch_list(void)
182 {
183 lck_mtx_unlock(&watch_list_lock);
184 }
185
186 static void
187 lock_fs_event_buf(void)
188 {
189 lck_mtx_lock(&event_buf_lock);
190 }
191
192 static void
193 unlock_fs_event_buf(void)
194 {
195 lck_mtx_unlock(&event_buf_lock);
196 }
197
198 // forward prototype
199 static void do_free_event(kfs_event *kfse);
200
201 static int
202 watcher_cares_about_dev(fs_event_watcher *watcher, dev_t dev)
203 {
204 unsigned int i;
205
206 // if there is not list of devices to watch, then always
207 // say we're interested so we'll report all events from
208 // all devices
209 if (watcher->devices_to_watch == NULL) {
210 return 1;
211 }
212
213 for(i=0; i < watcher->num_devices; i++) {
214 if (dev == watcher->devices_to_watch[i]) {
215 // found a match! that means we want events
216 // from this device.
217 return 1;
218 }
219 }
220
221 // if we're here it's not in the devices_to_watch[]
222 // list so that means we do not care about it
223 return 0;
224 }
225
226
227 int
228 need_fsevent(int type, vnode_t vp)
229 {
230 fs_event_watcher *watcher;
231 dev_t dev;
232
233 if (fs_event_type_watchers[type] == 0)
234 return (0);
235 dev = (dev_t)(vp->v_mount->mnt_vfsstat.f_fsid.val[0]);
236
237 lock_watch_list();
238
239 SLIST_FOREACH(watcher, &watch_list_head, link) {
240 if (watcher->event_list[type] == FSE_REPORT && watcher_cares_about_dev(watcher, dev)) {
241 unlock_watch_list();
242 return (1);
243 }
244 }
245 unlock_watch_list();
246
247 return (0);
248 }
249
250
251 int
252 add_fsevent(int type, vfs_context_t ctx, ...)
253 {
254 struct proc *p = vfs_context_proc(ctx);
255 int i, arg_idx, num_deliveries=0;
256 kfs_event_arg *kea;
257 kfs_event *kfse;
258 fs_event_watcher *watcher;
259 va_list ap;
260 int error = 0;
261 dev_t dev = 0;
262
263 va_start(ap, ctx);
264
265 // if no one cares about this type of event, bail out
266 if (fs_event_type_watchers[type] == 0) {
267 va_end(ap);
268 return 0;
269 }
270
271 lck_rw_lock_shared(&fsevent_big_lock);
272
273 // find a free event and snag it for our use
274 // NOTE: do not do anything that would block until
275 // the lock is dropped.
276 lock_fs_event_buf();
277
278 for(i=0; i < MAX_KFS_EVENTS; i++) {
279 if (fs_event_buf[(free_event_idx + i) % MAX_KFS_EVENTS].type == FSE_INVALID) {
280 break;
281 }
282 }
283
284 if (i >= MAX_KFS_EVENTS) {
285 // yikes! no free slots
286 unlock_fs_event_buf();
287 va_end(ap);
288
289 lock_watch_list();
290 SLIST_FOREACH(watcher, &watch_list_head, link) {
291 watcher->flags |= WATCHER_DROPPED_EVENTS;
292 wakeup((caddr_t)watcher);
293 }
294 unlock_watch_list();
295 lck_rw_done(&fsevent_big_lock);
296
297 printf("fs_events: add_event: event queue is full! dropping events.\n");
298 return ENOSPC;
299 }
300
301 kfse = &fs_event_buf[(free_event_idx + i) % MAX_KFS_EVENTS];
302
303 free_event_idx++;
304
305 kfse->type = type;
306 kfse->refcount = 0;
307 kfse->pid = p->p_pid;
308
309 unlock_fs_event_buf(); // at this point it's safe to unlock
310
311 //
312 // now process the arguments passed in and copy them into
313 // the kfse
314 //
315 arg_idx = 0;
316 while(arg_idx < KFS_NUM_ARGS) {
317 kea = &kfse->args[arg_idx++];
318 kea->type = va_arg(ap, int32_t);
319
320 if (kea->type == FSE_ARG_DONE) {
321 break;
322 }
323
324 switch(kea->type) {
325 case FSE_ARG_VNODE: {
326 // this expands out into multiple arguments to the client
327 struct vnode *vp;
328 struct vnode_attr va;
329
330 kea->data.vp = vp = va_arg(ap, struct vnode *);
331 if (kea->data.vp == NULL) {
332 panic("add_fsevent: you can't pass me a NULL vnode ptr (type %d)!\n",
333 kfse->type);
334 }
335
336 if (vnode_ref_ext(kea->data.vp, O_EVTONLY) != 0) {
337 kea->type = FSE_ARG_DONE;
338
339 error = EINVAL;
340 goto clean_up;
341 }
342 VATTR_INIT(&va);
343 VATTR_WANTED(&va, va_fsid);
344 VATTR_WANTED(&va, va_fileid);
345 VATTR_WANTED(&va, va_mode);
346 VATTR_WANTED(&va, va_uid);
347 VATTR_WANTED(&va, va_gid);
348 if (vnode_getattr(kea->data.vp, &va, ctx) != 0) {
349 vnode_rele_ext(kea->data.vp, O_EVTONLY, 0);
350 kea->type = FSE_ARG_DONE;
351
352 error = EINVAL;
353 goto clean_up;
354 }
355
356 kea++;
357 kea->type = FSE_ARG_DEV;
358 kea->data.dev = dev = (dev_t)va.va_fsid;
359
360 kea++;
361 kea->type = FSE_ARG_INO;
362 kea->data.ino = (ino_t)va.va_fileid;
363
364 kea++;
365 kea->type = FSE_ARG_MODE;
366 kea->data.mode = (int32_t)vnode_vttoif(vnode_vtype(vp)) | va.va_mode;
367
368 kea++;
369 kea->type = FSE_ARG_UID;
370 kea->data.uid = va.va_uid;
371
372 kea++;
373 kea->type = FSE_ARG_GID;
374 kea->data.gid = va.va_gid;
375 arg_idx += 5;
376 break;
377 }
378
379 case FSE_ARG_FINFO: {
380 fse_info *fse;
381
382 fse = va_arg(ap, fse_info *);
383
384 kea->type = FSE_ARG_DEV;
385 kea->data.dev = dev = (dev_t)fse->dev;
386
387 kea++;
388 kea->type = FSE_ARG_INO;
389 kea->data.ino = (ino_t)fse->ino;
390
391 kea++;
392 kea->type = FSE_ARG_MODE;
393 kea->data.mode = (int32_t)fse->mode;
394
395 kea++;
396 kea->type = FSE_ARG_UID;
397 kea->data.uid = (uid_t)fse->uid;
398
399 kea++;
400 kea->type = FSE_ARG_GID;
401 kea->data.gid = (uid_t)fse->gid;
402 arg_idx += 4;
403 break;
404 }
405
406 case FSE_ARG_STRING:
407 kea->len = (int16_t)(va_arg(ap, int32_t) & 0xffff);
408 kea->data.str = vfs_addname(va_arg(ap, char *), kea->len, 0, 0);
409 break;
410
411 case FSE_ARG_INT32:
412 kea->data.int32 = va_arg(ap, int32_t);
413 break;
414
415 case FSE_ARG_INT64:
416 printf("fs_events: 64-bit args not implemented.\n");
417 // kea->data.int64 = va_arg(ap, int64_t);
418 break;
419
420 case FSE_ARG_RAW:
421 kea->len = (int16_t)(va_arg(ap, int32_t) & 0xffff);
422 MALLOC(kea->data.ptr, void *, kea->len, M_TEMP, M_WAITOK);
423 memcpy(kea->data.ptr, va_arg(ap, void *), kea->len);
424 break;
425
426 case FSE_ARG_DEV:
427 kea->data.dev = dev = va_arg(ap, dev_t);
428 break;
429
430 case FSE_ARG_MODE:
431 kea->data.mode = va_arg(ap, int32_t);
432 break;
433
434 case FSE_ARG_INO:
435 kea->data.ino = va_arg(ap, ino_t);
436 break;
437
438 case FSE_ARG_UID:
439 kea->data.uid = va_arg(ap, uid_t);
440 break;
441
442 case FSE_ARG_GID:
443 kea->data.gid = va_arg(ap, gid_t);
444 break;
445
446 default:
447 printf("add_fsevent: unknown type %d\n", kea->type);
448 // just skip one 32-bit word and hope we sync up...
449 (void)va_arg(ap, int32_t);
450 }
451 }
452
453 va_end(ap);
454
455 //
456 // now we have to go and let everyone know that
457 // is interested in this type of event...
458 //
459 lock_watch_list();
460
461 SLIST_FOREACH(watcher, &watch_list_head, link) {
462 if (watcher->event_list[type] == FSE_REPORT && watcher_cares_about_dev(watcher, dev)) {
463 if (watcher_add_event(watcher, kfse) == 0) {
464 num_deliveries++;
465 }
466 }
467 }
468
469 unlock_watch_list();
470
471 clean_up:
472 // just in case no one was interested after all...
473 if (num_deliveries == 0) {
474 do_free_event(kfse);
475 free_event_idx = (int)(kfse - &fs_event_buf[0]);
476 }
477
478 lck_rw_done(&fsevent_big_lock);
479 return error;
480 }
481
482 static void
483 do_free_event(kfs_event *kfse)
484 {
485 int i;
486 kfs_event_arg *kea, all_args[KFS_NUM_ARGS];
487
488 lock_fs_event_buf();
489
490 // mark this fsevent as invalid
491 kfse->type = FSE_INVALID;
492
493 // make a copy of this so we can free things without
494 // holding the fs_event_buf lock
495 //
496 memcpy(&all_args[0], &kfse->args[0], sizeof(all_args));
497
498 // and just to be anal, set this so that there are no args
499 kfse->args[0].type = FSE_ARG_DONE;
500
501 free_event_idx = (kfse - fs_event_buf);
502
503 unlock_fs_event_buf();
504
505 for(i=0; i < KFS_NUM_ARGS; i++) {
506 kea = &all_args[i];
507 if (kea->type == FSE_ARG_DONE) {
508 break;
509 }
510
511 switch(kea->type) {
512 case FSE_ARG_VNODE:
513 vnode_rele_ext(kea->data.vp, O_EVTONLY, 0);
514 break;
515 case FSE_ARG_STRING:
516 vfs_removename(kea->data.str);
517 break;
518 case FSE_ARG_RAW:
519 FREE(kea->data.ptr, M_TEMP);
520 break;
521 }
522 }
523 }
524
525
526 static int
527 add_watcher(int8_t *event_list, int32_t num_events, int32_t eventq_size, fs_event_watcher **watcher_out)
528 {
529 int i;
530 fs_event_watcher *watcher;
531
532 if (eventq_size < 0 || eventq_size > MAX_KFS_EVENTS) {
533 eventq_size = MAX_KFS_EVENTS;
534 }
535
536 // Note: the event_queue follows the fs_event_watcher struct
537 // in memory so we only have to do one allocation
538 MALLOC(watcher,
539 fs_event_watcher *,
540 sizeof(fs_event_watcher) + eventq_size * sizeof(kfs_event *),
541 M_TEMP, M_WAITOK);
542
543 watcher->event_list = event_list;
544 watcher->num_events = num_events;
545 watcher->devices_to_watch = NULL;
546 watcher->num_devices = 0;
547 watcher->flags = 0;
548 watcher->event_queue = (kfs_event **)&watcher[1];
549 watcher->eventq_size = eventq_size;
550 watcher->rd = 0;
551 watcher->wr = 0;
552 watcher->blockers = 0;
553
554 lock_watch_list();
555
556 // now update the global list of who's interested in
557 // events of a particular type...
558 for(i=0; i < num_events; i++) {
559 if (event_list[i] != FSE_IGNORE && i < FSE_MAX_EVENTS) {
560 fs_event_type_watchers[i]++;
561 }
562 }
563
564 SLIST_INSERT_HEAD(&watch_list_head, watcher, link);
565
566 unlock_watch_list();
567
568 *watcher_out = watcher;
569
570 return 0;
571 }
572
573 static void
574 remove_watcher(fs_event_watcher *target)
575 {
576 int i;
577 fs_event_watcher *watcher;
578 kfs_event *kfse;
579
580 lck_rw_lock_shared(&fsevent_big_lock);
581
582 lock_watch_list();
583
584 SLIST_FOREACH(watcher, &watch_list_head, link) {
585 if (watcher == target) {
586 SLIST_REMOVE(&watch_list_head, watcher, fs_event_watcher, link);
587
588 for(i=0; i < watcher->num_events; i++) {
589 if (watcher->event_list[i] != FSE_IGNORE && i < FSE_MAX_EVENTS) {
590 fs_event_type_watchers[i]--;
591 }
592 }
593
594 unlock_watch_list();
595
596 // drain the event_queue
597 for(i=watcher->rd; i != watcher->wr; i=(i+1) % watcher->eventq_size) {
598 kfse = watcher->event_queue[i];
599
600 if (OSAddAtomic(-1, (SInt32 *)&kfse->refcount) == 1) {
601 do_free_event(kfse);
602 }
603 }
604
605 if (watcher->event_list) {
606 FREE(watcher->event_list, M_TEMP);
607 watcher->event_list = NULL;
608 }
609 if (watcher->devices_to_watch) {
610 FREE(watcher->devices_to_watch, M_TEMP);
611 watcher->devices_to_watch = NULL;
612 }
613 FREE(watcher, M_TEMP);
614
615 lck_rw_done(&fsevent_big_lock);
616 return;
617 }
618 }
619
620 unlock_watch_list();
621 lck_rw_done(&fsevent_big_lock);
622 }
623
624
625 static int
626 watcher_add_event(fs_event_watcher *watcher, kfs_event *kfse)
627 {
628 if (((watcher->wr + 1) % watcher->eventq_size) == watcher->rd) {
629 watcher->flags |= WATCHER_DROPPED_EVENTS;
630 wakeup((caddr_t)watcher);
631 return ENOSPC;
632 }
633
634 watcher->event_queue[watcher->wr] = kfse;
635 OSAddAtomic(1, (SInt32 *)&kfse->refcount);
636 watcher->wr = (watcher->wr + 1) % watcher->eventq_size;
637
638 // wake up the watcher if he's waiting!
639 wakeup((caddr_t)watcher);
640
641 return 0;
642 }
643
644
645 static int
646 fmod_watch(fs_event_watcher *watcher, struct uio *uio)
647 {
648 int i, error=0, last_full_event_resid;
649 kfs_event *kfse;
650 kfs_event_arg *kea;
651 uint16_t tmp16;
652
653 // LP64todo - fix this
654 last_full_event_resid = uio_resid(uio);
655
656 // need at least 2048 bytes of space (maxpathlen + 1 event buf)
657 if (uio_resid(uio) < 2048 || watcher == NULL) {
658 return EINVAL;
659 }
660
661
662 if (watcher->rd == watcher->wr) {
663 if (watcher->flags & WATCHER_CLOSING) {
664 return 0;
665 }
666 OSAddAtomic(1, (SInt32 *)&watcher->blockers);
667
668 // there's nothing to do, go to sleep
669 error = tsleep((caddr_t)watcher, PUSER|PCATCH, "fsevents_empty", 0);
670
671 OSAddAtomic(-1, (SInt32 *)&watcher->blockers);
672
673 if (error != 0 || (watcher->flags & WATCHER_CLOSING)) {
674 return error;
675 }
676 }
677
678 // if we dropped events, return that as an event first
679 if (watcher->flags & WATCHER_DROPPED_EVENTS) {
680 int32_t val = FSE_EVENTS_DROPPED;
681
682 error = uiomove((caddr_t)&val, sizeof(int32_t), uio);
683 if (error == 0) {
684 val = 0; // a fake pid
685 error = uiomove((caddr_t)&val, sizeof(int32_t), uio);
686
687 tmp16 = FSE_ARG_DONE; // makes it a consistent msg
688 error = uiomove((caddr_t)&tmp16, sizeof(int16_t), uio);
689 }
690
691 if (error) {
692 return error;
693 }
694
695 watcher->flags &= ~WATCHER_DROPPED_EVENTS;
696 }
697
698 // check if the next chunk of data will fit in the user's
699 // buffer. if not, just goto get_out which will return
700 // the number of bytes worth of events that we did read.
701 // this leaves the event that didn't fit in the queue.
702 //
703 // LP64todo - fix this
704 #define CHECK_UPTR(size) if (size > (unsigned)uio_resid(uio)) { \
705 uio_setresid(uio, last_full_event_resid); \
706 goto get_out; \
707 }
708
709 for (; uio_resid(uio) > 0 && watcher->rd != watcher->wr; ) {
710 kfse = watcher->event_queue[watcher->rd];
711
712 // copy out the type of the event
713 CHECK_UPTR(sizeof(int32_t));
714 if ((error = uiomove((caddr_t)&kfse->type, sizeof(int32_t), uio)) != 0) {
715 goto get_out;
716 }
717
718 // now copy out the pid of the person that changed the file
719 CHECK_UPTR(sizeof(pid_t));
720 if ((error = uiomove((caddr_t)&kfse->pid, sizeof(pid_t), uio)) != 0) {
721 goto get_out;
722 }
723
724 error = 0;
725 for(i=0; i < KFS_NUM_ARGS && error == 0; i++) {
726 char *pathbuff;
727 int pathbuff_len;
728
729 kea = &kfse->args[i];
730
731 tmp16 = (uint16_t)kea->type;
732 CHECK_UPTR(sizeof(uint16_t));
733 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
734 if (error || kea->type == FSE_ARG_DONE) {
735 break;
736 }
737
738 switch(kea->type) {
739 case FSE_ARG_VNODE:
740 pathbuff = get_pathbuff();
741 pathbuff_len = MAXPATHLEN;
742 if (kea->data.vp == NULL) {
743 printf("fmod_watch: whoa... vp == NULL (%d)!\n", kfse->type);
744 i--;
745 release_pathbuff(pathbuff);
746 continue;
747 }
748
749 if (vn_getpath(kea->data.vp, pathbuff, &pathbuff_len) != 0 || pathbuff[0] == '\0') {
750 // printf("fmod_watch: vn_getpath failed! vp 0x%x vname 0x%x (%s) vparent 0x%x\n",
751 // kea->data.vp,
752 // VNAME(kea->data.vp),
753 // VNAME(kea->data.vp) ? VNAME(kea->data.vp) : "<null>",
754 // VPARENT(kea->data.vp));
755 }
756 CHECK_UPTR(sizeof(uint16_t));
757 tmp16 = (uint16_t)pathbuff_len;
758 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
759
760 CHECK_UPTR((unsigned)pathbuff_len);
761 error = uiomove((caddr_t)pathbuff, pathbuff_len, uio);
762 release_pathbuff(pathbuff);
763 break;
764
765
766 case FSE_ARG_STRING:
767 tmp16 = (int32_t)kea->len;
768 CHECK_UPTR(sizeof(uint16_t));
769 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
770
771 CHECK_UPTR(kea->len);
772 error = uiomove((caddr_t)kea->data.str, kea->len, uio);
773 break;
774
775 case FSE_ARG_INT32:
776 CHECK_UPTR(sizeof(uint16_t) + sizeof(int32_t));
777 tmp16 = sizeof(int32_t);
778 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
779 error = uiomove((caddr_t)&kea->data.int32, sizeof(int32_t), uio);
780 break;
781
782 case FSE_ARG_INT64:
783 printf("fs_events: 64-bit args not implemented on copyout.\n");
784 // CHECK_UPTR(sizeof(uint16_t) + sizeof(int64_t));
785 // tmp16 = sizeof(int64_t);
786 // error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
787 // error = uiomove((caddr_t)&kea->data.int64, sizeof(int64_t), uio);
788 break;
789
790 case FSE_ARG_RAW:
791 tmp16 = (uint16_t)kea->len;
792 CHECK_UPTR(sizeof(uint16_t));
793 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
794
795 CHECK_UPTR(kea->len);
796 error = uiomove((caddr_t)kea->data.ptr, kea->len, uio);
797 break;
798
799 case FSE_ARG_DEV:
800 CHECK_UPTR(sizeof(uint16_t) + sizeof(dev_t));
801 tmp16 = sizeof(dev_t);
802 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
803 error = uiomove((caddr_t)&kea->data.dev, sizeof(dev_t), uio);
804 break;
805
806 case FSE_ARG_INO:
807 CHECK_UPTR(sizeof(uint16_t) + sizeof(ino_t));
808 tmp16 = sizeof(ino_t);
809 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
810 error = uiomove((caddr_t)&kea->data.ino, sizeof(ino_t), uio);
811 break;
812
813 case FSE_ARG_MODE:
814 // XXXdbg - NOTE: we use 32-bits for the mode, not
815 // 16-bits like a real mode_t
816 CHECK_UPTR(sizeof(uint16_t) + sizeof(int32_t));
817 tmp16 = sizeof(int32_t);
818 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
819 error = uiomove((caddr_t)&kea->data.mode, sizeof(int32_t), uio);
820 break;
821
822 case FSE_ARG_UID:
823 CHECK_UPTR(sizeof(uint16_t) + sizeof(uid_t));
824 tmp16 = sizeof(uid_t);
825 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
826 error = uiomove((caddr_t)&kea->data.uid, sizeof(uid_t), uio);
827 break;
828
829 case FSE_ARG_GID:
830 CHECK_UPTR(sizeof(uint16_t) + sizeof(gid_t));
831 tmp16 = sizeof(gid_t);
832 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
833 error = uiomove((caddr_t)&kea->data.gid, sizeof(gid_t), uio);
834 break;
835
836 default:
837 printf("fmod_watch: unknown arg type %d.\n", kea->type);
838 break;
839 }
840 }
841
842 // make sure that we always end with a FSE_ARG_DONE
843 if (i >= KFS_NUM_ARGS) {
844 tmp16 = FSE_ARG_DONE;
845 CHECK_UPTR(sizeof(uint16_t));
846 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
847 }
848
849
850 // LP64todo - fix this
851 last_full_event_resid = uio_resid(uio);
852
853 watcher->rd = (watcher->rd + 1) % watcher->eventq_size;
854
855 if (OSAddAtomic(-1, (SInt32 *)&kfse->refcount) == 1) {
856 do_free_event(kfse);
857 }
858 }
859
860 get_out:
861 return error;
862 }
863
864
865 // release any references we might have on vnodes which are
866 // the mount point passed to us (so that it can be cleanly
867 // unmounted).
868 //
869 // since we don't want to lose the events we'll convert the
870 // vnode refs to the full path, inode #, and uid.
871 //
872 void
873 fsevent_unmount(struct mount *mp)
874 {
875 int i, j;
876 kfs_event *kfse;
877 kfs_event_arg *kea;
878
879 lck_rw_lock_exclusive(&fsevent_big_lock);
880 lock_fs_event_buf();
881
882 for(i=0; i < MAX_KFS_EVENTS; i++) {
883 if (fs_event_buf[i].type == FSE_INVALID) {
884 continue;
885 }
886
887 kfse = &fs_event_buf[i];
888 for(j=0; j < KFS_NUM_ARGS; j++) {
889 kea = &kfse->args[j];
890 if (kea->type == FSE_ARG_DONE) {
891 break;
892 }
893
894 if (kea->type == FSE_ARG_VNODE && kea->data.vp->v_mount == mp) {
895 struct vnode *vp;
896 char *pathbuff;
897 int pathbuff_len;
898
899 vp = kea->data.vp;
900 pathbuff = get_pathbuff();
901 pathbuff_len = MAXPATHLEN;
902
903 if (vn_getpath(vp, pathbuff, &pathbuff_len) != 0 || pathbuff[0] == '\0') {
904 char *vname;
905
906 vname = vnode_getname(vp);
907
908 printf("fsevent_unmount: vn_getpath failed! vp 0x%x vname 0x%x (%s) vparent 0x%x\n",
909 vp, vname, vname ? vname : "<null>", vp->v_parent);
910
911 if (vname)
912 vnode_putname(vname);
913 }
914
915 // switch the type of the string
916 kea->type = FSE_ARG_STRING;
917 kea->data.str = vfs_addname(pathbuff, pathbuff_len, 0, 0);
918 kea->len = pathbuff_len;
919 release_pathbuff(pathbuff);
920
921 // and finally let go of the reference on the vnode
922 vnode_rele_ext(vp, O_EVTONLY, 0);
923 }
924 }
925 }
926
927 unlock_fs_event_buf();
928 lck_rw_done(&fsevent_big_lock);
929 }
930
931
932 //
933 // /dev/fsevents device code
934 //
935 static int fsevents_installed = 0;
936 static struct lock__bsd__ fsevents_lck;
937
938 typedef struct fsevent_handle {
939 fs_event_watcher *watcher;
940 struct selinfo si;
941 } fsevent_handle;
942
943
944 static int
945 fseventsf_read(struct fileproc *fp, struct uio *uio,
946 __unused kauth_cred_t *cred, __unused int flags,
947 __unused struct proc *p)
948 {
949 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
950 int error;
951
952 error = fmod_watch(fseh->watcher, uio);
953
954 return error;
955 }
956
957 static int
958 fseventsf_write(__unused struct fileproc *fp, __unused struct uio *uio,
959 __unused kauth_cred_t *cred, __unused int flags,
960 __unused struct proc *p)
961 {
962 return EIO;
963 }
964
965
966 static int
967 fseventsf_ioctl(struct fileproc *fp, u_long cmd, caddr_t data, struct proc *p)
968 {
969 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
970 int ret = 0;
971 pid_t pid = 0;
972 fsevent_dev_filter_args *devfilt_args=(fsevent_dev_filter_args *)data;
973
974 switch (cmd) {
975 case FIONBIO:
976 case FIOASYNC:
977 return 0;
978
979 case FSEVENTS_DEVICE_FILTER: {
980 int new_num_devices;
981 dev_t *devices_to_watch, *tmp=NULL;
982
983 if (devfilt_args->num_devices > 256) {
984 ret = EINVAL;
985 break;
986 }
987
988 new_num_devices = devfilt_args->num_devices;
989 if (new_num_devices == 0) {
990 tmp = fseh->watcher->devices_to_watch;
991
992 lock_watch_list();
993 fseh->watcher->devices_to_watch = NULL;
994 fseh->watcher->num_devices = new_num_devices;
995 unlock_watch_list();
996
997 if (tmp) {
998 FREE(tmp, M_TEMP);
999 }
1000 break;
1001 }
1002
1003 MALLOC(devices_to_watch, dev_t *,
1004 new_num_devices * sizeof(dev_t),
1005 M_TEMP, M_WAITOK);
1006 if (devices_to_watch == NULL) {
1007 ret = ENOMEM;
1008 break;
1009 }
1010
1011 ret = copyin(CAST_USER_ADDR_T(devfilt_args->devices),
1012 (void *)devices_to_watch,
1013 new_num_devices * sizeof(dev_t));
1014 if (ret) {
1015 FREE(devices_to_watch, M_TEMP);
1016 break;
1017 }
1018
1019 lock_watch_list();
1020 fseh->watcher->num_devices = new_num_devices;
1021 tmp = fseh->watcher->devices_to_watch;
1022 fseh->watcher->devices_to_watch = devices_to_watch;
1023 unlock_watch_list();
1024
1025 if (tmp) {
1026 FREE(tmp, M_TEMP);
1027 }
1028
1029 break;
1030 }
1031
1032 default:
1033 ret = EINVAL;
1034 break;
1035 }
1036
1037 return (ret);
1038 }
1039
1040
1041 static int
1042 fseventsf_select(struct fileproc *fp, int which, void *wql, struct proc *p)
1043 {
1044 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
1045 int ready = 0;
1046
1047 if ((which != FREAD) || (fseh->watcher->flags & WATCHER_CLOSING)) {
1048 return 0;
1049 }
1050
1051
1052 // if there's nothing in the queue, we're not ready
1053 if (fseh->watcher->rd == fseh->watcher->wr) {
1054 ready = 0;
1055 } else {
1056 ready = 1;
1057 }
1058
1059 if (!ready) {
1060 selrecord(p, &fseh->si, wql);
1061 }
1062
1063 return ready;
1064 }
1065
1066
1067 static int
1068 fseventsf_stat(struct fileproc *fp, struct stat *sb, struct proc *p)
1069 {
1070 return ENOTSUP;
1071 }
1072
1073
1074 static int
1075 fseventsf_close(struct fileglob *fg, struct proc *p)
1076 {
1077 fsevent_handle *fseh = (struct fsevent_handle *)fg->fg_data;
1078
1079 remove_watcher(fseh->watcher);
1080
1081 fg->fg_data = NULL;
1082 fseh->watcher = NULL;
1083 FREE(fseh, M_TEMP);
1084
1085 return 0;
1086 }
1087
1088 int
1089 fseventsf_kqfilter(struct fileproc *fp, struct knote *kn, struct proc *p)
1090 {
1091 // XXXdbg
1092 return 0;
1093 }
1094
1095
1096 static int
1097 fseventsf_drain(struct fileproc *fp, struct proc *p)
1098 {
1099 int counter = 0;
1100 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
1101
1102 fseh->watcher->flags |= WATCHER_CLOSING;
1103
1104 // if there are people still waiting, sleep for 10ms to
1105 // let them clean up and get out of there. however we
1106 // also don't want to get stuck forever so if they don't
1107 // exit after 5 seconds we're tearing things down anyway.
1108 while(fseh->watcher->blockers && counter++ < 500) {
1109 // issue wakeup in case anyone is blocked waiting for an event
1110 // do this each time we wakeup in case the blocker missed
1111 // the wakeup due to the unprotected test of WATCHER_CLOSING
1112 // and decision to tsleep in fmod_watch... this bit of
1113 // latency is a decent tradeoff against not having to
1114 // take and drop a lock in fmod_watch
1115 wakeup((caddr_t)fseh->watcher);
1116
1117 tsleep((caddr_t)fseh->watcher, PRIBIO, "watcher-close", 1);
1118 }
1119
1120 return 0;
1121 }
1122
1123
1124 static int
1125 fseventsopen(dev_t dev, int flag, int mode, struct proc *p)
1126 {
1127 if (!is_suser()) {
1128 return EPERM;
1129 }
1130
1131 return 0;
1132 }
1133
1134 static int
1135 fseventsclose(dev_t dev, int flag, int mode, struct proc *p)
1136 {
1137 return 0;
1138 }
1139
1140 static int
1141 fseventsread(dev_t dev, struct uio *uio, int ioflag)
1142 {
1143 return EIO;
1144 }
1145
1146 static int
1147 fseventswrite(dev_t dev, struct uio *uio, int ioflag)
1148 {
1149 return EIO;
1150 }
1151
1152
1153 static struct fileops fsevents_fops = {
1154 fseventsf_read,
1155 fseventsf_write,
1156 fseventsf_ioctl,
1157 fseventsf_select,
1158 fseventsf_close,
1159 fseventsf_kqfilter,
1160 fseventsf_drain
1161 };
1162
1163
1164
1165 static int
1166 fseventsioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
1167 {
1168 struct fileproc *f;
1169 int fd, error;
1170 fsevent_handle *fseh = NULL;
1171 fsevent_clone_args *fse_clone_args=(fsevent_clone_args *)data;
1172 int8_t *event_list;
1173
1174 switch (cmd) {
1175 case FSEVENTS_CLONE:
1176 if (fse_clone_args->num_events < 0 || fse_clone_args->num_events > 4096) {
1177 return EINVAL;
1178 }
1179
1180 MALLOC(fseh, fsevent_handle *, sizeof(fsevent_handle),
1181 M_TEMP, M_WAITOK);
1182 memset(fseh, 0, sizeof(fsevent_handle));
1183
1184 MALLOC(event_list, int8_t *,
1185 fse_clone_args->num_events * sizeof(int8_t),
1186 M_TEMP, M_WAITOK);
1187
1188 error = copyin(CAST_USER_ADDR_T(fse_clone_args->event_list),
1189 (void *)event_list,
1190 fse_clone_args->num_events * sizeof(int8_t));
1191 if (error) {
1192 FREE(event_list, M_TEMP);
1193 FREE(fseh, M_TEMP);
1194 return error;
1195 }
1196
1197 error = add_watcher(event_list,
1198 fse_clone_args->num_events,
1199 fse_clone_args->event_queue_depth,
1200 &fseh->watcher);
1201 if (error) {
1202 FREE(event_list, M_TEMP);
1203 FREE(fseh, M_TEMP);
1204 return error;
1205 }
1206
1207 error = falloc(p, &f, &fd);
1208 if (error) {
1209 FREE(event_list, M_TEMP);
1210 FREE(fseh, M_TEMP);
1211 return (error);
1212 }
1213 proc_fdlock(p);
1214 f->f_fglob->fg_flag = FREAD | FWRITE;
1215 f->f_fglob->fg_type = DTYPE_FSEVENTS;
1216 f->f_fglob->fg_ops = &fsevents_fops;
1217 f->f_fglob->fg_data = (caddr_t) fseh;
1218 proc_fdunlock(p);
1219 copyout((void *)&fd, CAST_USER_ADDR_T(fse_clone_args->fd), sizeof(int32_t));
1220 proc_fdlock(p);
1221 *fdflags(p, fd) &= ~UF_RESERVED;
1222 fp_drop(p, fd, f, 1);
1223 proc_fdunlock(p);
1224 break;
1225
1226 default:
1227 error = EINVAL;
1228 break;
1229 }
1230
1231 return error;
1232 }
1233
1234 static int
1235 fseventsselect(dev_t dev, int rw, struct proc *p)
1236 {
1237 return 0;
1238 }
1239
1240 static void
1241 fsevents_wakeup(fsevent_handle *fseh)
1242 {
1243 wakeup((caddr_t)fseh);
1244 selwakeup(&fseh->si);
1245 }
1246
1247
1248 /*
1249 * A struct describing which functions will get invoked for certain
1250 * actions.
1251 */
1252 static struct cdevsw fsevents_cdevsw =
1253 {
1254 fseventsopen, /* open */
1255 fseventsclose, /* close */
1256 fseventsread, /* read */
1257 fseventswrite, /* write */
1258 fseventsioctl, /* ioctl */
1259 nulldev, /* stop */
1260 nulldev, /* reset */
1261 NULL, /* tty's */
1262 eno_select, /* select */
1263 eno_mmap, /* mmap */
1264 eno_strat, /* strategy */
1265 eno_getc, /* getc */
1266 eno_putc, /* putc */
1267 0 /* type */
1268 };
1269
1270
1271 /*
1272 * Called to initialize our device,
1273 * and to register ourselves with devfs
1274 */
1275
1276 void
1277 fsevents_init(void)
1278 {
1279 int ret;
1280
1281 if (fsevents_installed) {
1282 return;
1283 }
1284
1285 fsevents_installed = 1;
1286
1287 lockinit(&fsevents_lck, PLOCK, "fsevents", 0, 0);
1288
1289 ret = cdevsw_add(-1, &fsevents_cdevsw);
1290 if (ret < 0) {
1291 fsevents_installed = 0;
1292 return;
1293 }
1294
1295 devfs_make_node(makedev (ret, 0), DEVFS_CHAR,
1296 UID_ROOT, GID_WHEEL, 0644, "fsevents", 0);
1297
1298 fsevents_internal_init();
1299 }
1300
1301
1302
1303 //
1304 // XXXdbg - temporary path buffer handling
1305 //
1306 #define NUM_PATH_BUFFS 16
1307 static char path_buff[NUM_PATH_BUFFS][MAXPATHLEN];
1308 static char path_buff_inuse[NUM_PATH_BUFFS];
1309
1310 static lck_grp_attr_t * pathbuff_group_attr;
1311 static lck_attr_t * pathbuff_lock_attr;
1312 static lck_grp_t * pathbuff_mutex_group;
1313 static lck_mtx_t pathbuff_lock;
1314
1315 static void
1316 init_pathbuff(void)
1317 {
1318 pathbuff_lock_attr = lck_attr_alloc_init();
1319 pathbuff_group_attr = lck_grp_attr_alloc_init();
1320 pathbuff_mutex_group = lck_grp_alloc_init("pathbuff-mutex", pathbuff_group_attr);
1321
1322 lck_mtx_init(&pathbuff_lock, pathbuff_mutex_group, pathbuff_lock_attr);
1323 }
1324
1325 static void
1326 lock_pathbuff(void)
1327 {
1328 lck_mtx_lock(&pathbuff_lock);
1329 }
1330
1331 static void
1332 unlock_pathbuff(void)
1333 {
1334 lck_mtx_unlock(&pathbuff_lock);
1335 }
1336
1337
1338 char *
1339 get_pathbuff(void)
1340 {
1341 int i;
1342
1343 lock_pathbuff();
1344 for(i=0; i < NUM_PATH_BUFFS; i++) {
1345 if (path_buff_inuse[i] == 0) {
1346 break;
1347 }
1348 }
1349
1350 if (i >= NUM_PATH_BUFFS) {
1351 char *path;
1352
1353 unlock_pathbuff();
1354 MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
1355 return path;
1356 }
1357
1358 path_buff_inuse[i] = 1;
1359 unlock_pathbuff();
1360 return &path_buff[i][0];
1361 }
1362
1363 void
1364 release_pathbuff(char *path)
1365 {
1366 int i;
1367
1368 if (path == NULL) {
1369 return;
1370 }
1371
1372 lock_pathbuff();
1373 for(i=0; i < NUM_PATH_BUFFS; i++) {
1374 if (path == &path_buff[i][0]) {
1375 path_buff[i][0] = '\0';
1376 path_buff_inuse[i] = 0;
1377 unlock_pathbuff();
1378 return;
1379 }
1380 }
1381
1382 unlock_pathbuff();
1383
1384 // if we get here then it wasn't one of our temp buffers
1385 FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
1386 }
1387
1388 int
1389 get_fse_info(struct vnode *vp, fse_info *fse, vfs_context_t ctx)
1390 {
1391 struct vnode_attr va;
1392
1393 VATTR_INIT(&va);
1394 VATTR_WANTED(&va, va_fsid);
1395 VATTR_WANTED(&va, va_fileid);
1396 VATTR_WANTED(&va, va_mode);
1397 VATTR_WANTED(&va, va_uid);
1398 VATTR_WANTED(&va, va_gid);
1399 if (vnode_getattr(vp, &va, ctx) != 0) {
1400 return -1;
1401 }
1402
1403 fse->dev = (dev_t)va.va_fsid;
1404 fse->ino = (ino_t)va.va_fileid;
1405 fse->mode = (int32_t)vnode_vttoif(vnode_vtype(vp)) | va.va_mode;
1406 fse->uid = (uid_t)va.va_uid;
1407 fse->gid = (gid_t)va.va_gid;
1408
1409 return 0;
1410 }