]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/vfs_fsevents.c
65e17e278705908bd95f506ad29b37cb0e2f98b5
[apple/xnu.git] / bsd / vfs / vfs_fsevents.c
1 /*
2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 #include <stdarg.h>
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/namei.h>
34 #include <sys/filedesc.h>
35 #include <sys/kernel.h>
36 #include <sys/file_internal.h>
37 #include <sys/stat.h>
38 #include <sys/vnode_internal.h>
39 #include <sys/mount_internal.h>
40 #include <sys/proc_internal.h>
41 #include <sys/kauth.h>
42 #include <sys/uio.h>
43 #include <sys/malloc.h>
44 #include <sys/dirent.h>
45 #include <sys/attr.h>
46 #include <sys/sysctl.h>
47 #include <sys/ubc.h>
48 #include <machine/cons.h>
49 #include <miscfs/specfs/specdev.h>
50 #include <miscfs/devfs/devfs.h>
51 #include <sys/filio.h>
52 #include <kern/locks.h>
53 #include <libkern/OSAtomic.h>
54
55 #include <bsm/audit_kernel.h>
56 #include <bsm/audit_kevents.h>
57
58 // where all our structs and defines come from
59 #include <sys/fsevents.h>
60
61
62 typedef struct kfs_event_arg {
63 u_int16_t type;
64 u_int16_t len;
65 union {
66 struct vnode *vp;
67 char *str;
68 void *ptr;
69 int32_t int32;
70 dev_t dev;
71 ino_t ino;
72 int32_t mode;
73 uid_t uid;
74 gid_t gid;
75 } data;
76 }kfs_event_arg;
77
78 #define KFS_NUM_ARGS FSE_MAX_ARGS
79 typedef struct kfs_event {
80 int32_t type; // type code of this event
81 u_int32_t refcount; // number of clients referencing this
82 pid_t pid; // pid of the process that did the op
83 kfs_event_arg args[KFS_NUM_ARGS];
84 } kfs_event;
85
86
87 typedef struct fs_event_watcher {
88 SLIST_ENTRY(fs_event_watcher) link;
89 int8_t *event_list; // the events we're interested in
90 int32_t num_events;
91 dev_t *devices_to_watch; // only report events from these devices
92 uint32_t num_devices;
93 int32_t flags;
94 kfs_event **event_queue;
95 int32_t eventq_size; // number of event pointers in queue
96 int32_t rd, wr; // indices to the event_queue
97 int32_t blockers;
98 int32_t num_readers;
99 } fs_event_watcher;
100
101 // fs_event_watcher flags
102 #define WATCHER_DROPPED_EVENTS 0x0001
103 #define WATCHER_CLOSING 0x0002
104
105 static SLIST_HEAD(watch_list, fs_event_watcher) watch_list_head = { NULL };
106
107
108 #define MAX_KFS_EVENTS 2048
109
110 // this array holds each pending event
111 static kfs_event fs_event_buf[MAX_KFS_EVENTS];
112 static int free_event_idx = 0;
113 static int fs_event_init = 0;
114
115 //
116 // this array records whether anyone is interested in a
117 // particular type of event. if no one is, we bail out
118 // early from the event delivery
119 //
120 static int16_t fs_event_type_watchers[FSE_MAX_EVENTS];
121
122 static int watcher_add_event(fs_event_watcher *watcher, kfs_event *kfse);
123
124 //
125 // Locks
126 //
127 static lck_grp_attr_t * fsevent_group_attr;
128 static lck_attr_t * fsevent_lock_attr;
129 static lck_grp_t * fsevent_mutex_group;
130
131 static lck_grp_t * fsevent_rw_group;
132
133 static lck_rw_t fsevent_big_lock; // always grab this first
134 static lck_mtx_t watch_list_lock;
135 static lck_mtx_t event_buf_lock;
136
137
138 static void init_pathbuff(void);
139
140
141 static void
142 fsevents_internal_init(void)
143 {
144 int i;
145
146 if (fs_event_init++ != 0) {
147 return;
148 }
149
150 for(i=0; i < FSE_MAX_EVENTS; i++) {
151 fs_event_type_watchers[i] = 0;
152 }
153
154 for(i=0; i < MAX_KFS_EVENTS; i++) {
155 fs_event_buf[i].type = FSE_INVALID;
156 fs_event_buf[i].refcount = 0;
157 }
158
159 SLIST_INIT(&watch_list_head);
160
161 fsevent_lock_attr = lck_attr_alloc_init();
162 fsevent_group_attr = lck_grp_attr_alloc_init();
163 fsevent_mutex_group = lck_grp_alloc_init("fsevent-mutex", fsevent_group_attr);
164 fsevent_rw_group = lck_grp_alloc_init("fsevent-rw", fsevent_group_attr);
165
166 lck_mtx_init(&watch_list_lock, fsevent_mutex_group, fsevent_lock_attr);
167 lck_mtx_init(&event_buf_lock, fsevent_mutex_group, fsevent_lock_attr);
168
169 lck_rw_init(&fsevent_big_lock, fsevent_rw_group, fsevent_lock_attr);
170
171 init_pathbuff();
172 }
173
174 static void
175 lock_watch_list(void)
176 {
177 lck_mtx_lock(&watch_list_lock);
178 }
179
180 static void
181 unlock_watch_list(void)
182 {
183 lck_mtx_unlock(&watch_list_lock);
184 }
185
186 static void
187 lock_fs_event_buf(void)
188 {
189 lck_mtx_lock(&event_buf_lock);
190 }
191
192 static void
193 unlock_fs_event_buf(void)
194 {
195 lck_mtx_unlock(&event_buf_lock);
196 }
197
198 // forward prototype
199 static void do_free_event(kfs_event *kfse);
200
201 static int
202 watcher_cares_about_dev(fs_event_watcher *watcher, dev_t dev)
203 {
204 unsigned int i;
205
206 // if there is not list of devices to watch, then always
207 // say we're interested so we'll report all events from
208 // all devices
209 if (watcher->devices_to_watch == NULL) {
210 return 1;
211 }
212
213 for(i=0; i < watcher->num_devices; i++) {
214 if (dev == watcher->devices_to_watch[i]) {
215 // found a match! that means we want events
216 // from this device.
217 return 1;
218 }
219 }
220
221 // if we're here it's not in the devices_to_watch[]
222 // list so that means we do not care about it
223 return 0;
224 }
225
226
227 int
228 need_fsevent(int type, vnode_t vp)
229 {
230 fs_event_watcher *watcher;
231 dev_t dev;
232
233 if (fs_event_type_watchers[type] == 0)
234 return (0);
235 dev = (dev_t)(vp->v_mount->mnt_vfsstat.f_fsid.val[0]);
236
237 lock_watch_list();
238
239 SLIST_FOREACH(watcher, &watch_list_head, link) {
240 if (watcher->event_list[type] == FSE_REPORT && watcher_cares_about_dev(watcher, dev)) {
241 unlock_watch_list();
242 return (1);
243 }
244 }
245 unlock_watch_list();
246
247 return (0);
248 }
249
250
251 int
252 add_fsevent(int type, vfs_context_t ctx, ...)
253 {
254 struct proc *p = vfs_context_proc(ctx);
255 int i, arg_idx, num_deliveries=0;
256 kfs_event_arg *kea;
257 kfs_event *kfse;
258 fs_event_watcher *watcher;
259 va_list ap;
260 int error = 0, base;
261 dev_t dev = 0;
262
263 va_start(ap, ctx);
264
265 // if no one cares about this type of event, bail out
266 if (fs_event_type_watchers[type] == 0) {
267 va_end(ap);
268 return 0;
269 }
270
271 lck_rw_lock_shared(&fsevent_big_lock);
272
273 // find a free event and snag it for our use
274 // NOTE: do not do anything that would block until
275 // the lock is dropped.
276 lock_fs_event_buf();
277
278 base = free_event_idx;
279 for(i=0; i < MAX_KFS_EVENTS; i++) {
280 if (fs_event_buf[(base + i) % MAX_KFS_EVENTS].type == FSE_INVALID) {
281 break;
282 }
283 }
284
285 if (i >= MAX_KFS_EVENTS) {
286 // yikes! no free slots
287 unlock_fs_event_buf();
288 va_end(ap);
289
290 lock_watch_list();
291 SLIST_FOREACH(watcher, &watch_list_head, link) {
292 watcher->flags |= WATCHER_DROPPED_EVENTS;
293 wakeup((caddr_t)watcher);
294 }
295 unlock_watch_list();
296 lck_rw_done(&fsevent_big_lock);
297
298 printf("fs_events: add_event: event queue is full! dropping events.\n");
299 return ENOSPC;
300 }
301
302 kfse = &fs_event_buf[(base + i) % MAX_KFS_EVENTS];
303
304 free_event_idx = ((base + i) % MAX_KFS_EVENTS) + 1;
305
306 kfse->type = type;
307 kfse->refcount = 1;
308 kfse->pid = p->p_pid;
309
310 unlock_fs_event_buf(); // at this point it's safe to unlock
311
312 //
313 // now process the arguments passed in and copy them into
314 // the kfse
315 //
316 arg_idx = 0;
317 while(arg_idx < KFS_NUM_ARGS) {
318 kea = &kfse->args[arg_idx++];
319 kea->type = va_arg(ap, int32_t);
320
321 if (kea->type == FSE_ARG_DONE) {
322 break;
323 }
324
325 switch(kea->type) {
326 case FSE_ARG_VNODE: {
327 // this expands out into multiple arguments to the client
328 struct vnode *vp;
329 struct vnode_attr va;
330
331 kea->data.vp = vp = va_arg(ap, struct vnode *);
332 if (kea->data.vp == NULL) {
333 panic("add_fsevent: you can't pass me a NULL vnode ptr (type %d)!\n",
334 kfse->type);
335 }
336
337 if (vnode_ref_ext(kea->data.vp, O_EVTONLY) != 0) {
338 kea->type = FSE_ARG_DONE;
339
340 error = EINVAL;
341 goto clean_up;
342 }
343 VATTR_INIT(&va);
344 VATTR_WANTED(&va, va_fsid);
345 VATTR_WANTED(&va, va_fileid);
346 VATTR_WANTED(&va, va_mode);
347 VATTR_WANTED(&va, va_uid);
348 VATTR_WANTED(&va, va_gid);
349 if (vnode_getattr(kea->data.vp, &va, ctx) != 0) {
350 vnode_rele_ext(kea->data.vp, O_EVTONLY, 0);
351 kea->type = FSE_ARG_DONE;
352
353 error = EINVAL;
354 goto clean_up;
355 }
356
357 kea++;
358 kea->type = FSE_ARG_DEV;
359 kea->data.dev = dev = (dev_t)va.va_fsid;
360
361 kea++;
362 kea->type = FSE_ARG_INO;
363 kea->data.ino = (ino_t)va.va_fileid;
364
365 kea++;
366 kea->type = FSE_ARG_MODE;
367 kea->data.mode = (int32_t)vnode_vttoif(vnode_vtype(vp)) | va.va_mode;
368
369 kea++;
370 kea->type = FSE_ARG_UID;
371 kea->data.uid = va.va_uid;
372
373 kea++;
374 kea->type = FSE_ARG_GID;
375 kea->data.gid = va.va_gid;
376 arg_idx += 5;
377 break;
378 }
379
380 case FSE_ARG_FINFO: {
381 fse_info *fse;
382
383 fse = va_arg(ap, fse_info *);
384
385 kea->type = FSE_ARG_DEV;
386 kea->data.dev = dev = (dev_t)fse->dev;
387
388 kea++;
389 kea->type = FSE_ARG_INO;
390 kea->data.ino = (ino_t)fse->ino;
391
392 kea++;
393 kea->type = FSE_ARG_MODE;
394 kea->data.mode = (int32_t)fse->mode;
395
396 kea++;
397 kea->type = FSE_ARG_UID;
398 kea->data.uid = (uid_t)fse->uid;
399
400 kea++;
401 kea->type = FSE_ARG_GID;
402 kea->data.gid = (uid_t)fse->gid;
403 arg_idx += 4;
404 break;
405 }
406
407 case FSE_ARG_STRING:
408 kea->len = (int16_t)(va_arg(ap, int32_t) & 0xffff);
409 kea->data.str = vfs_addname(va_arg(ap, char *), kea->len, 0, 0);
410 break;
411
412 case FSE_ARG_INT32:
413 kea->data.int32 = va_arg(ap, int32_t);
414 break;
415
416 case FSE_ARG_INT64:
417 printf("fs_events: 64-bit args not implemented.\n");
418 // kea->data.int64 = va_arg(ap, int64_t);
419 break;
420
421 case FSE_ARG_RAW:
422 kea->len = (int16_t)(va_arg(ap, int32_t) & 0xffff);
423 MALLOC(kea->data.ptr, void *, kea->len, M_TEMP, M_WAITOK);
424 memcpy(kea->data.ptr, va_arg(ap, void *), kea->len);
425 break;
426
427 case FSE_ARG_DEV:
428 kea->data.dev = dev = va_arg(ap, dev_t);
429 break;
430
431 case FSE_ARG_MODE:
432 kea->data.mode = va_arg(ap, int32_t);
433 break;
434
435 case FSE_ARG_INO:
436 kea->data.ino = va_arg(ap, ino_t);
437 break;
438
439 case FSE_ARG_UID:
440 kea->data.uid = va_arg(ap, uid_t);
441 break;
442
443 case FSE_ARG_GID:
444 kea->data.gid = va_arg(ap, gid_t);
445 break;
446
447 default:
448 printf("add_fsevent: unknown type %d\n", kea->type);
449 // just skip one 32-bit word and hope we sync up...
450 (void)va_arg(ap, int32_t);
451 }
452 }
453
454 va_end(ap);
455
456 //
457 // now we have to go and let everyone know that
458 // is interested in this type of event...
459 //
460 lock_watch_list();
461
462 SLIST_FOREACH(watcher, &watch_list_head, link) {
463 if (watcher->event_list[type] == FSE_REPORT && watcher_cares_about_dev(watcher, dev)) {
464 if (watcher_add_event(watcher, kfse) == 0) {
465 num_deliveries++;
466 }
467 }
468 }
469
470 unlock_watch_list();
471
472 clean_up:
473 // just in case no one was interested after all...
474 if (OSAddAtomic(-1, (SInt32 *)&kfse->refcount) == 1) {
475 do_free_event(kfse);
476 }
477
478 lck_rw_done(&fsevent_big_lock);
479 return error;
480 }
481
482 static void
483 do_free_event(kfs_event *kfse)
484 {
485 int i;
486 kfs_event_arg *kea, all_args[KFS_NUM_ARGS];
487
488 lock_fs_event_buf();
489
490 if (kfse->refcount > 0) {
491 panic("do_free_event: free'ing a kfsevent w/refcount == %d (kfse %p)\n",
492 kfse->refcount, kfse);
493 }
494
495 // make a copy of this so we can free things without
496 // holding the fs_event_buf lock
497 //
498 memcpy(&all_args[0], &kfse->args[0], sizeof(all_args));
499
500 // and just to be anal, set this so that there are no args
501 kfse->args[0].type = FSE_ARG_DONE;
502
503 // mark this fsevent as invalid
504 kfse->type = FSE_INVALID;
505
506 free_event_idx = (kfse - fs_event_buf);
507
508 unlock_fs_event_buf();
509
510 for(i=0; i < KFS_NUM_ARGS; i++) {
511 kea = &all_args[i];
512 if (kea->type == FSE_ARG_DONE) {
513 break;
514 }
515
516 switch(kea->type) {
517 case FSE_ARG_VNODE:
518 vnode_rele_ext(kea->data.vp, O_EVTONLY, 0);
519 break;
520 case FSE_ARG_STRING:
521 vfs_removename(kea->data.str);
522 break;
523 case FSE_ARG_RAW:
524 FREE(kea->data.ptr, M_TEMP);
525 break;
526 }
527 }
528 }
529
530
531 static int
532 add_watcher(int8_t *event_list, int32_t num_events, int32_t eventq_size, fs_event_watcher **watcher_out)
533 {
534 int i;
535 fs_event_watcher *watcher;
536
537 if (eventq_size < 0 || eventq_size > MAX_KFS_EVENTS) {
538 eventq_size = MAX_KFS_EVENTS;
539 }
540
541 // Note: the event_queue follows the fs_event_watcher struct
542 // in memory so we only have to do one allocation
543 MALLOC(watcher,
544 fs_event_watcher *,
545 sizeof(fs_event_watcher) + eventq_size * sizeof(kfs_event *),
546 M_TEMP, M_WAITOK);
547
548 watcher->event_list = event_list;
549 watcher->num_events = num_events;
550 watcher->devices_to_watch = NULL;
551 watcher->num_devices = 0;
552 watcher->flags = 0;
553 watcher->event_queue = (kfs_event **)&watcher[1];
554 watcher->eventq_size = eventq_size;
555 watcher->rd = 0;
556 watcher->wr = 0;
557 watcher->blockers = 0;
558 watcher->num_readers = 0;
559
560 lock_watch_list();
561
562 // now update the global list of who's interested in
563 // events of a particular type...
564 for(i=0; i < num_events; i++) {
565 if (event_list[i] != FSE_IGNORE && i < FSE_MAX_EVENTS) {
566 fs_event_type_watchers[i]++;
567 }
568 }
569
570 SLIST_INSERT_HEAD(&watch_list_head, watcher, link);
571
572 unlock_watch_list();
573
574 *watcher_out = watcher;
575
576 return 0;
577 }
578
579 static void
580 remove_watcher(fs_event_watcher *target)
581 {
582 int i;
583 fs_event_watcher *watcher;
584 kfs_event *kfse;
585
586 lck_rw_lock_shared(&fsevent_big_lock);
587
588 lock_watch_list();
589
590 SLIST_FOREACH(watcher, &watch_list_head, link) {
591 if (watcher == target) {
592 SLIST_REMOVE(&watch_list_head, watcher, fs_event_watcher, link);
593
594 for(i=0; i < watcher->num_events; i++) {
595 if (watcher->event_list[i] != FSE_IGNORE && i < FSE_MAX_EVENTS) {
596 fs_event_type_watchers[i]--;
597 }
598 }
599
600 unlock_watch_list();
601
602 // drain the event_queue
603 for(i=watcher->rd; i != watcher->wr; i=(i+1) % watcher->eventq_size) {
604 kfse = watcher->event_queue[i];
605
606 if (OSAddAtomic(-1, (SInt32 *)&kfse->refcount) == 1) {
607 do_free_event(kfse);
608 }
609 }
610
611 if (watcher->event_list) {
612 FREE(watcher->event_list, M_TEMP);
613 watcher->event_list = NULL;
614 }
615 if (watcher->devices_to_watch) {
616 FREE(watcher->devices_to_watch, M_TEMP);
617 watcher->devices_to_watch = NULL;
618 }
619 FREE(watcher, M_TEMP);
620
621 lck_rw_done(&fsevent_big_lock);
622 return;
623 }
624 }
625
626 unlock_watch_list();
627 lck_rw_done(&fsevent_big_lock);
628 }
629
630
631 static int
632 watcher_add_event(fs_event_watcher *watcher, kfs_event *kfse)
633 {
634 if (((watcher->wr + 1) % watcher->eventq_size) == watcher->rd) {
635 watcher->flags |= WATCHER_DROPPED_EVENTS;
636 wakeup((caddr_t)watcher);
637 return ENOSPC;
638 }
639
640 watcher->event_queue[watcher->wr] = kfse;
641 OSAddAtomic(1, (SInt32 *)&kfse->refcount);
642 watcher->wr = (watcher->wr + 1) % watcher->eventq_size;
643
644 // wake up the watcher if he's waiting!
645 wakeup((caddr_t)watcher);
646
647 return 0;
648 }
649
650
651 static int
652 fmod_watch(fs_event_watcher *watcher, struct uio *uio)
653 {
654 int i, error=0, last_full_event_resid;
655 kfs_event *kfse;
656 kfs_event_arg *kea;
657 uint16_t tmp16;
658
659 // LP64todo - fix this
660 last_full_event_resid = uio_resid(uio);
661
662 // need at least 2048 bytes of space (maxpathlen + 1 event buf)
663 if (uio_resid(uio) < 2048 || watcher == NULL) {
664 return EINVAL;
665 }
666
667 if (OSAddAtomic(1, (SInt32 *)&watcher->num_readers) != 0) {
668 // don't allow multiple threads to read from the fd at the same time
669 OSAddAtomic(-1, (SInt32 *)&watcher->num_readers);
670 return EAGAIN;
671 }
672
673 if (watcher->rd == watcher->wr) {
674 if (watcher->flags & WATCHER_CLOSING) {
675 OSAddAtomic(-1, (SInt32 *)&watcher->num_readers);
676 return 0;
677 }
678 OSAddAtomic(1, (SInt32 *)&watcher->blockers);
679
680 // there's nothing to do, go to sleep
681 error = tsleep((caddr_t)watcher, PUSER|PCATCH, "fsevents_empty", 0);
682
683 OSAddAtomic(-1, (SInt32 *)&watcher->blockers);
684
685 if (error != 0 || (watcher->flags & WATCHER_CLOSING)) {
686 OSAddAtomic(-1, (SInt32 *)&watcher->num_readers);
687 return error;
688 }
689 }
690
691 // if we dropped events, return that as an event first
692 if (watcher->flags & WATCHER_DROPPED_EVENTS) {
693 int32_t val = FSE_EVENTS_DROPPED;
694
695 error = uiomove((caddr_t)&val, sizeof(int32_t), uio);
696 if (error == 0) {
697 val = 0; // a fake pid
698 error = uiomove((caddr_t)&val, sizeof(int32_t), uio);
699
700 tmp16 = FSE_ARG_DONE; // makes it a consistent msg
701 error = uiomove((caddr_t)&tmp16, sizeof(int16_t), uio);
702 }
703
704 if (error) {
705 OSAddAtomic(-1, (SInt32 *)&watcher->num_readers);
706 return error;
707 }
708
709 watcher->flags &= ~WATCHER_DROPPED_EVENTS;
710 }
711
712 // check if the next chunk of data will fit in the user's
713 // buffer. if not, just goto get_out which will return
714 // the number of bytes worth of events that we did read.
715 // this leaves the event that didn't fit in the queue.
716 //
717 // LP64todo - fix this
718 #define CHECK_UPTR(size) if (size > (unsigned)uio_resid(uio)) { \
719 uio_setresid(uio, last_full_event_resid); \
720 goto get_out; \
721 }
722
723 for (; uio_resid(uio) > 0 && watcher->rd != watcher->wr; ) {
724 kfse = watcher->event_queue[watcher->rd];
725
726 // copy out the type of the event
727 CHECK_UPTR(sizeof(int32_t));
728 if ((error = uiomove((caddr_t)&kfse->type, sizeof(int32_t), uio)) != 0) {
729 goto get_out;
730 }
731
732 // now copy out the pid of the person that changed the file
733 CHECK_UPTR(sizeof(pid_t));
734 if ((error = uiomove((caddr_t)&kfse->pid, sizeof(pid_t), uio)) != 0) {
735 goto get_out;
736 }
737
738 error = 0;
739 for(i=0; i < KFS_NUM_ARGS && error == 0; i++) {
740 char *pathbuff;
741 int pathbuff_len;
742
743 kea = &kfse->args[i];
744
745 tmp16 = (uint16_t)kea->type;
746 CHECK_UPTR(sizeof(uint16_t));
747 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
748 if (error || kea->type == FSE_ARG_DONE) {
749 break;
750 }
751
752 switch(kea->type) {
753 case FSE_ARG_VNODE:
754 pathbuff = get_pathbuff();
755 pathbuff_len = MAXPATHLEN;
756 if (kea->data.vp == NULL) {
757 printf("fmod_watch: whoa... vp == NULL (%d)!\n", kfse->type);
758 i--;
759 release_pathbuff(pathbuff);
760 continue;
761 }
762
763 if (vn_getpath(kea->data.vp, pathbuff, &pathbuff_len) != 0 || pathbuff[0] == '\0') {
764 // printf("fmod_watch: vn_getpath failed! vp 0x%x vname 0x%x (%s) vparent 0x%x\n",
765 // kea->data.vp,
766 // VNAME(kea->data.vp),
767 // VNAME(kea->data.vp) ? VNAME(kea->data.vp) : "<null>",
768 // VPARENT(kea->data.vp));
769 }
770 CHECK_UPTR(sizeof(uint16_t));
771 tmp16 = (uint16_t)pathbuff_len;
772 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
773
774 CHECK_UPTR((unsigned)pathbuff_len);
775 error = uiomove((caddr_t)pathbuff, pathbuff_len, uio);
776 release_pathbuff(pathbuff);
777 break;
778
779
780 case FSE_ARG_STRING:
781 tmp16 = (int32_t)kea->len;
782 CHECK_UPTR(sizeof(uint16_t));
783 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
784
785 CHECK_UPTR(kea->len);
786 error = uiomove((caddr_t)kea->data.str, kea->len, uio);
787 break;
788
789 case FSE_ARG_INT32:
790 CHECK_UPTR(sizeof(uint16_t) + sizeof(int32_t));
791 tmp16 = sizeof(int32_t);
792 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
793 error = uiomove((caddr_t)&kea->data.int32, sizeof(int32_t), uio);
794 break;
795
796 case FSE_ARG_INT64:
797 printf("fs_events: 64-bit args not implemented on copyout.\n");
798 // CHECK_UPTR(sizeof(uint16_t) + sizeof(int64_t));
799 // tmp16 = sizeof(int64_t);
800 // error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
801 // error = uiomove((caddr_t)&kea->data.int64, sizeof(int64_t), uio);
802 break;
803
804 case FSE_ARG_RAW:
805 tmp16 = (uint16_t)kea->len;
806 CHECK_UPTR(sizeof(uint16_t));
807 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
808
809 CHECK_UPTR(kea->len);
810 error = uiomove((caddr_t)kea->data.ptr, kea->len, uio);
811 break;
812
813 case FSE_ARG_DEV:
814 CHECK_UPTR(sizeof(uint16_t) + sizeof(dev_t));
815 tmp16 = sizeof(dev_t);
816 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
817 error = uiomove((caddr_t)&kea->data.dev, sizeof(dev_t), uio);
818 break;
819
820 case FSE_ARG_INO:
821 CHECK_UPTR(sizeof(uint16_t) + sizeof(ino_t));
822 tmp16 = sizeof(ino_t);
823 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
824 error = uiomove((caddr_t)&kea->data.ino, sizeof(ino_t), uio);
825 break;
826
827 case FSE_ARG_MODE:
828 // XXXdbg - NOTE: we use 32-bits for the mode, not
829 // 16-bits like a real mode_t
830 CHECK_UPTR(sizeof(uint16_t) + sizeof(int32_t));
831 tmp16 = sizeof(int32_t);
832 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
833 error = uiomove((caddr_t)&kea->data.mode, sizeof(int32_t), uio);
834 break;
835
836 case FSE_ARG_UID:
837 CHECK_UPTR(sizeof(uint16_t) + sizeof(uid_t));
838 tmp16 = sizeof(uid_t);
839 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
840 error = uiomove((caddr_t)&kea->data.uid, sizeof(uid_t), uio);
841 break;
842
843 case FSE_ARG_GID:
844 CHECK_UPTR(sizeof(uint16_t) + sizeof(gid_t));
845 tmp16 = sizeof(gid_t);
846 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
847 error = uiomove((caddr_t)&kea->data.gid, sizeof(gid_t), uio);
848 break;
849
850 default:
851 printf("fmod_watch: unknown arg type %d.\n", kea->type);
852 break;
853 }
854 }
855
856 // make sure that we always end with a FSE_ARG_DONE
857 if (i >= KFS_NUM_ARGS) {
858 tmp16 = FSE_ARG_DONE;
859 CHECK_UPTR(sizeof(uint16_t));
860 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
861 }
862
863
864 // LP64todo - fix this
865 last_full_event_resid = uio_resid(uio);
866
867 watcher->rd = (watcher->rd + 1) % watcher->eventq_size;
868
869 if (OSAddAtomic(-1, (SInt32 *)&kfse->refcount) == 1) {
870 do_free_event(kfse);
871 }
872 }
873
874 get_out:
875 OSAddAtomic(-1, (SInt32 *)&watcher->num_readers);
876 return error;
877 }
878
879
880 // release any references we might have on vnodes which are
881 // the mount point passed to us (so that it can be cleanly
882 // unmounted).
883 //
884 // since we don't want to lose the events we'll convert the
885 // vnode refs to the full path, inode #, and uid.
886 //
887 void
888 fsevent_unmount(struct mount *mp)
889 {
890 int i, j;
891 kfs_event *kfse;
892 kfs_event_arg *kea;
893
894 lck_rw_lock_exclusive(&fsevent_big_lock);
895 lock_fs_event_buf();
896
897 for(i=0; i < MAX_KFS_EVENTS; i++) {
898 if (fs_event_buf[i].type == FSE_INVALID) {
899 continue;
900 }
901
902 kfse = &fs_event_buf[i];
903 for(j=0; j < KFS_NUM_ARGS; j++) {
904 kea = &kfse->args[j];
905 if (kea->type == FSE_ARG_DONE) {
906 break;
907 }
908
909 if (kea->type == FSE_ARG_VNODE && kea->data.vp->v_mount == mp) {
910 struct vnode *vp;
911 char *pathbuff;
912 int pathbuff_len;
913
914 vp = kea->data.vp;
915 pathbuff = get_pathbuff();
916 pathbuff_len = MAXPATHLEN;
917
918 if (vn_getpath(vp, pathbuff, &pathbuff_len) != 0 || pathbuff[0] == '\0') {
919 char *vname;
920
921 vname = vnode_getname(vp);
922
923 printf("fsevent_unmount: vn_getpath failed! vp 0x%x vname 0x%x (%s) vparent 0x%x\n",
924 vp, vname, vname ? vname : "<null>", vp->v_parent);
925
926 if (vname)
927 vnode_putname(vname);
928
929 strcpy(pathbuff, "UNKNOWN-FILE");
930 pathbuff_len = strlen(pathbuff) + 1;
931 }
932
933 // switch the type of the string
934 kea->type = FSE_ARG_STRING;
935 kea->data.str = vfs_addname(pathbuff, pathbuff_len, 0, 0);
936 kea->len = pathbuff_len;
937 release_pathbuff(pathbuff);
938
939 // and finally let go of the reference on the vnode
940 vnode_rele_ext(vp, O_EVTONLY, 0);
941 }
942 }
943 }
944
945 unlock_fs_event_buf();
946 lck_rw_done(&fsevent_big_lock);
947 }
948
949
950 //
951 // /dev/fsevents device code
952 //
953 static int fsevents_installed = 0;
954 static struct lock__bsd__ fsevents_lck;
955
956 typedef struct fsevent_handle {
957 fs_event_watcher *watcher;
958 struct selinfo si;
959 } fsevent_handle;
960
961
962 static int
963 fseventsf_read(struct fileproc *fp, struct uio *uio,
964 __unused kauth_cred_t *cred, __unused int flags,
965 __unused struct proc *p)
966 {
967 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
968 int error;
969
970 error = fmod_watch(fseh->watcher, uio);
971
972 return error;
973 }
974
975 static int
976 fseventsf_write(__unused struct fileproc *fp, __unused struct uio *uio,
977 __unused kauth_cred_t *cred, __unused int flags,
978 __unused struct proc *p)
979 {
980 return EIO;
981 }
982
983
984 static int
985 fseventsf_ioctl(struct fileproc *fp, u_long cmd, caddr_t data, struct proc *p)
986 {
987 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
988 int ret = 0;
989 pid_t pid = 0;
990 fsevent_dev_filter_args *devfilt_args=(fsevent_dev_filter_args *)data;
991
992 switch (cmd) {
993 case FIONBIO:
994 case FIOASYNC:
995 return 0;
996
997 case FSEVENTS_DEVICE_FILTER: {
998 int new_num_devices;
999 dev_t *devices_to_watch, *tmp=NULL;
1000
1001 if (devfilt_args->num_devices > 256) {
1002 ret = EINVAL;
1003 break;
1004 }
1005
1006 new_num_devices = devfilt_args->num_devices;
1007 if (new_num_devices == 0) {
1008 tmp = fseh->watcher->devices_to_watch;
1009
1010 lock_watch_list();
1011 fseh->watcher->devices_to_watch = NULL;
1012 fseh->watcher->num_devices = new_num_devices;
1013 unlock_watch_list();
1014
1015 if (tmp) {
1016 FREE(tmp, M_TEMP);
1017 }
1018 break;
1019 }
1020
1021 MALLOC(devices_to_watch, dev_t *,
1022 new_num_devices * sizeof(dev_t),
1023 M_TEMP, M_WAITOK);
1024 if (devices_to_watch == NULL) {
1025 ret = ENOMEM;
1026 break;
1027 }
1028
1029 ret = copyin(CAST_USER_ADDR_T(devfilt_args->devices),
1030 (void *)devices_to_watch,
1031 new_num_devices * sizeof(dev_t));
1032 if (ret) {
1033 FREE(devices_to_watch, M_TEMP);
1034 break;
1035 }
1036
1037 lock_watch_list();
1038 fseh->watcher->num_devices = new_num_devices;
1039 tmp = fseh->watcher->devices_to_watch;
1040 fseh->watcher->devices_to_watch = devices_to_watch;
1041 unlock_watch_list();
1042
1043 if (tmp) {
1044 FREE(tmp, M_TEMP);
1045 }
1046
1047 break;
1048 }
1049
1050 default:
1051 ret = EINVAL;
1052 break;
1053 }
1054
1055 return (ret);
1056 }
1057
1058
1059 static int
1060 fseventsf_select(struct fileproc *fp, int which, void *wql, struct proc *p)
1061 {
1062 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
1063 int ready = 0;
1064
1065 if ((which != FREAD) || (fseh->watcher->flags & WATCHER_CLOSING)) {
1066 return 0;
1067 }
1068
1069
1070 // if there's nothing in the queue, we're not ready
1071 if (fseh->watcher->rd == fseh->watcher->wr) {
1072 ready = 0;
1073 } else {
1074 ready = 1;
1075 }
1076
1077 if (!ready) {
1078 selrecord(p, &fseh->si, wql);
1079 }
1080
1081 return ready;
1082 }
1083
1084
1085 static int
1086 fseventsf_stat(struct fileproc *fp, struct stat *sb, struct proc *p)
1087 {
1088 return ENOTSUP;
1089 }
1090
1091
1092 static int
1093 fseventsf_close(struct fileglob *fg, struct proc *p)
1094 {
1095 fsevent_handle *fseh = (struct fsevent_handle *)fg->fg_data;
1096
1097 remove_watcher(fseh->watcher);
1098
1099 fg->fg_data = NULL;
1100 fseh->watcher = NULL;
1101 FREE(fseh, M_TEMP);
1102
1103 return 0;
1104 }
1105
1106 int
1107 fseventsf_kqfilter(struct fileproc *fp, struct knote *kn, struct proc *p)
1108 {
1109 // XXXdbg
1110 return 0;
1111 }
1112
1113
1114 static int
1115 fseventsf_drain(struct fileproc *fp, struct proc *p)
1116 {
1117 int counter = 0;
1118 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
1119
1120 fseh->watcher->flags |= WATCHER_CLOSING;
1121
1122 // if there are people still waiting, sleep for 10ms to
1123 // let them clean up and get out of there. however we
1124 // also don't want to get stuck forever so if they don't
1125 // exit after 5 seconds we're tearing things down anyway.
1126 while(fseh->watcher->blockers && counter++ < 500) {
1127 // issue wakeup in case anyone is blocked waiting for an event
1128 // do this each time we wakeup in case the blocker missed
1129 // the wakeup due to the unprotected test of WATCHER_CLOSING
1130 // and decision to tsleep in fmod_watch... this bit of
1131 // latency is a decent tradeoff against not having to
1132 // take and drop a lock in fmod_watch
1133 wakeup((caddr_t)fseh->watcher);
1134
1135 tsleep((caddr_t)fseh->watcher, PRIBIO, "watcher-close", 1);
1136 }
1137
1138 return 0;
1139 }
1140
1141
1142 static int
1143 fseventsopen(dev_t dev, int flag, int mode, struct proc *p)
1144 {
1145 if (!is_suser()) {
1146 return EPERM;
1147 }
1148
1149 return 0;
1150 }
1151
1152 static int
1153 fseventsclose(dev_t dev, int flag, int mode, struct proc *p)
1154 {
1155 return 0;
1156 }
1157
1158 static int
1159 fseventsread(dev_t dev, struct uio *uio, int ioflag)
1160 {
1161 return EIO;
1162 }
1163
1164 static int
1165 fseventswrite(dev_t dev, struct uio *uio, int ioflag)
1166 {
1167 return EIO;
1168 }
1169
1170
1171 static struct fileops fsevents_fops = {
1172 fseventsf_read,
1173 fseventsf_write,
1174 fseventsf_ioctl,
1175 fseventsf_select,
1176 fseventsf_close,
1177 fseventsf_kqfilter,
1178 fseventsf_drain
1179 };
1180
1181
1182
1183 static int
1184 fseventsioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
1185 {
1186 struct fileproc *f;
1187 int fd, error;
1188 fsevent_handle *fseh = NULL;
1189 fsevent_clone_args *fse_clone_args=(fsevent_clone_args *)data;
1190 int8_t *event_list;
1191
1192 switch (cmd) {
1193 case FSEVENTS_CLONE:
1194 if (fse_clone_args->num_events < 0 || fse_clone_args->num_events > 4096) {
1195 return EINVAL;
1196 }
1197
1198 MALLOC(fseh, fsevent_handle *, sizeof(fsevent_handle),
1199 M_TEMP, M_WAITOK);
1200 memset(fseh, 0, sizeof(fsevent_handle));
1201
1202 MALLOC(event_list, int8_t *,
1203 fse_clone_args->num_events * sizeof(int8_t),
1204 M_TEMP, M_WAITOK);
1205
1206 error = copyin(CAST_USER_ADDR_T(fse_clone_args->event_list),
1207 (void *)event_list,
1208 fse_clone_args->num_events * sizeof(int8_t));
1209 if (error) {
1210 FREE(event_list, M_TEMP);
1211 FREE(fseh, M_TEMP);
1212 return error;
1213 }
1214
1215 error = add_watcher(event_list,
1216 fse_clone_args->num_events,
1217 fse_clone_args->event_queue_depth,
1218 &fseh->watcher);
1219 if (error) {
1220 FREE(event_list, M_TEMP);
1221 FREE(fseh, M_TEMP);
1222 return error;
1223 }
1224
1225 error = falloc(p, &f, &fd);
1226 if (error) {
1227 FREE(event_list, M_TEMP);
1228 FREE(fseh, M_TEMP);
1229 return (error);
1230 }
1231 proc_fdlock(p);
1232 f->f_fglob->fg_flag = FREAD | FWRITE;
1233 f->f_fglob->fg_type = DTYPE_FSEVENTS;
1234 f->f_fglob->fg_ops = &fsevents_fops;
1235 f->f_fglob->fg_data = (caddr_t) fseh;
1236 proc_fdunlock(p);
1237 copyout((void *)&fd, CAST_USER_ADDR_T(fse_clone_args->fd), sizeof(int32_t));
1238 proc_fdlock(p);
1239 *fdflags(p, fd) &= ~UF_RESERVED;
1240 fp_drop(p, fd, f, 1);
1241 proc_fdunlock(p);
1242 break;
1243
1244 default:
1245 error = EINVAL;
1246 break;
1247 }
1248
1249 return error;
1250 }
1251
1252 static int
1253 fseventsselect(dev_t dev, int rw, struct proc *p)
1254 {
1255 return 0;
1256 }
1257
1258 static void
1259 fsevents_wakeup(fsevent_handle *fseh)
1260 {
1261 wakeup((caddr_t)fseh);
1262 selwakeup(&fseh->si);
1263 }
1264
1265
1266 /*
1267 * A struct describing which functions will get invoked for certain
1268 * actions.
1269 */
1270 static struct cdevsw fsevents_cdevsw =
1271 {
1272 fseventsopen, /* open */
1273 fseventsclose, /* close */
1274 fseventsread, /* read */
1275 fseventswrite, /* write */
1276 fseventsioctl, /* ioctl */
1277 nulldev, /* stop */
1278 nulldev, /* reset */
1279 NULL, /* tty's */
1280 eno_select, /* select */
1281 eno_mmap, /* mmap */
1282 eno_strat, /* strategy */
1283 eno_getc, /* getc */
1284 eno_putc, /* putc */
1285 0 /* type */
1286 };
1287
1288
1289 /*
1290 * Called to initialize our device,
1291 * and to register ourselves with devfs
1292 */
1293
1294 void
1295 fsevents_init(void)
1296 {
1297 int ret;
1298
1299 if (fsevents_installed) {
1300 return;
1301 }
1302
1303 fsevents_installed = 1;
1304
1305 lockinit(&fsevents_lck, PLOCK, "fsevents", 0, 0);
1306
1307 ret = cdevsw_add(-1, &fsevents_cdevsw);
1308 if (ret < 0) {
1309 fsevents_installed = 0;
1310 return;
1311 }
1312
1313 devfs_make_node(makedev (ret, 0), DEVFS_CHAR,
1314 UID_ROOT, GID_WHEEL, 0644, "fsevents", 0);
1315
1316 fsevents_internal_init();
1317 }
1318
1319
1320
1321 //
1322 // XXXdbg - temporary path buffer handling
1323 //
1324 #define NUM_PATH_BUFFS 16
1325 static char path_buff[NUM_PATH_BUFFS][MAXPATHLEN];
1326 static char path_buff_inuse[NUM_PATH_BUFFS];
1327
1328 static lck_grp_attr_t * pathbuff_group_attr;
1329 static lck_attr_t * pathbuff_lock_attr;
1330 static lck_grp_t * pathbuff_mutex_group;
1331 static lck_mtx_t pathbuff_lock;
1332
1333 static void
1334 init_pathbuff(void)
1335 {
1336 pathbuff_lock_attr = lck_attr_alloc_init();
1337 pathbuff_group_attr = lck_grp_attr_alloc_init();
1338 pathbuff_mutex_group = lck_grp_alloc_init("pathbuff-mutex", pathbuff_group_attr);
1339
1340 lck_mtx_init(&pathbuff_lock, pathbuff_mutex_group, pathbuff_lock_attr);
1341 }
1342
1343 static void
1344 lock_pathbuff(void)
1345 {
1346 lck_mtx_lock(&pathbuff_lock);
1347 }
1348
1349 static void
1350 unlock_pathbuff(void)
1351 {
1352 lck_mtx_unlock(&pathbuff_lock);
1353 }
1354
1355
1356 char *
1357 get_pathbuff(void)
1358 {
1359 int i;
1360
1361 lock_pathbuff();
1362 for(i=0; i < NUM_PATH_BUFFS; i++) {
1363 if (path_buff_inuse[i] == 0) {
1364 break;
1365 }
1366 }
1367
1368 if (i >= NUM_PATH_BUFFS) {
1369 char *path;
1370
1371 unlock_pathbuff();
1372 MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
1373 return path;
1374 }
1375
1376 path_buff_inuse[i] = 1;
1377 unlock_pathbuff();
1378 return &path_buff[i][0];
1379 }
1380
1381 void
1382 release_pathbuff(char *path)
1383 {
1384 int i;
1385
1386 if (path == NULL) {
1387 return;
1388 }
1389
1390 lock_pathbuff();
1391 for(i=0; i < NUM_PATH_BUFFS; i++) {
1392 if (path == &path_buff[i][0]) {
1393 path_buff[i][0] = '\0';
1394 path_buff_inuse[i] = 0;
1395 unlock_pathbuff();
1396 return;
1397 }
1398 }
1399
1400 unlock_pathbuff();
1401
1402 // if we get here then it wasn't one of our temp buffers
1403 FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
1404 }
1405
1406 int
1407 get_fse_info(struct vnode *vp, fse_info *fse, vfs_context_t ctx)
1408 {
1409 struct vnode_attr va;
1410
1411 VATTR_INIT(&va);
1412 VATTR_WANTED(&va, va_fsid);
1413 VATTR_WANTED(&va, va_fileid);
1414 VATTR_WANTED(&va, va_mode);
1415 VATTR_WANTED(&va, va_uid);
1416 VATTR_WANTED(&va, va_gid);
1417 if (vnode_getattr(vp, &va, ctx) != 0) {
1418 return -1;
1419 }
1420
1421 fse->dev = (dev_t)va.va_fsid;
1422 fse->ino = (ino_t)va.va_fileid;
1423 fse->mode = (int32_t)vnode_vttoif(vnode_vtype(vp)) | va.va_mode;
1424 fse->uid = (uid_t)va.va_uid;
1425 fse->gid = (gid_t)va.va_gid;
1426
1427 return 0;
1428 }