]> git.saurik.com Git - apple/xnu.git/blame - bsd/vfs/vfs_fsevents.c
xnu-792.6.56.tar.gz
[apple/xnu.git] / bsd / vfs / vfs_fsevents.c
CommitLineData
91447636
A
1/*
2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
ff6e181a
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
91447636 12 *
ff6e181a
A
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
91447636
A
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
ff6e181a
A
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
91447636
A
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23#include <stdarg.h>
24#include <sys/param.h>
25#include <sys/systm.h>
26#include <sys/namei.h>
27#include <sys/filedesc.h>
28#include <sys/kernel.h>
29#include <sys/file_internal.h>
30#include <sys/stat.h>
31#include <sys/vnode_internal.h>
32#include <sys/mount_internal.h>
33#include <sys/proc_internal.h>
34#include <sys/kauth.h>
35#include <sys/uio.h>
36#include <sys/malloc.h>
37#include <sys/dirent.h>
38#include <sys/attr.h>
39#include <sys/sysctl.h>
40#include <sys/ubc.h>
41#include <machine/cons.h>
42#include <miscfs/specfs/specdev.h>
43#include <miscfs/devfs/devfs.h>
44#include <sys/filio.h>
45#include <architecture/byte_order.h>
46#include <kern/locks.h>
47#include <libkern/OSAtomic.h>
48
49#include <bsm/audit_kernel.h>
50#include <bsm/audit_kevents.h>
51
52// where all our structs and defines come from
53#include <sys/fsevents.h>
54
55
56typedef struct kfs_event_arg {
57 u_int16_t type;
58 u_int16_t len;
59 union {
60 struct vnode *vp;
61 char *str;
62 void *ptr;
63 int32_t int32;
64 dev_t dev;
65 ino_t ino;
66 int32_t mode;
67 uid_t uid;
68 gid_t gid;
69 } data;
70}kfs_event_arg;
71
72#define KFS_NUM_ARGS FSE_MAX_ARGS
73typedef struct kfs_event {
74 int32_t type; // type code of this event
75 u_int32_t refcount; // number of clients referencing this
76 pid_t pid; // pid of the process that did the op
77 kfs_event_arg args[KFS_NUM_ARGS];
78} kfs_event;
79
80
81typedef struct fs_event_watcher {
82 SLIST_ENTRY(fs_event_watcher) link;
83 int8_t *event_list; // the events we're interested in
84 int32_t num_events;
85 dev_t *devices_to_watch; // only report events from these devices
86 uint32_t num_devices;
87 int32_t flags;
88 kfs_event **event_queue;
89 int32_t eventq_size; // number of event pointers in queue
90 int32_t rd, wr; // indices to the event_queue
91 int32_t blockers;
92} fs_event_watcher;
93
94// fs_event_watcher flags
95#define WATCHER_DROPPED_EVENTS 0x0001
96#define WATCHER_CLOSING 0x0002
97
98static SLIST_HEAD(watch_list, fs_event_watcher) watch_list_head = { NULL };
99
100
101#define MAX_KFS_EVENTS 2048
102
103// this array holds each pending event
104static kfs_event fs_event_buf[MAX_KFS_EVENTS];
105static int free_event_idx = 0;
106static int fs_event_init = 0;
107
108//
109// this array records whether anyone is interested in a
110// particular type of event. if no one is, we bail out
111// early from the event delivery
112//
113static int16_t fs_event_type_watchers[FSE_MAX_EVENTS];
114
115static int watcher_add_event(fs_event_watcher *watcher, kfs_event *kfse);
116
117//
118// Locks
119//
120static lck_grp_attr_t * fsevent_group_attr;
121static lck_attr_t * fsevent_lock_attr;
122static lck_grp_t * fsevent_mutex_group;
123
124static lck_grp_t * fsevent_rw_group;
125
126static lck_rw_t fsevent_big_lock; // always grab this first
127static lck_mtx_t watch_list_lock;
128static lck_mtx_t event_buf_lock;
129
130
131static void init_pathbuff(void);
132
133
134static void
135fsevents_internal_init(void)
136{
137 int i;
138
139 if (fs_event_init++ != 0) {
140 return;
141 }
142
143 for(i=0; i < FSE_MAX_EVENTS; i++) {
144 fs_event_type_watchers[i] = 0;
145 }
146
147 for(i=0; i < MAX_KFS_EVENTS; i++) {
148 fs_event_buf[i].type = FSE_INVALID;
149 fs_event_buf[i].refcount = 0;
150 }
151
152 SLIST_INIT(&watch_list_head);
153
154 fsevent_lock_attr = lck_attr_alloc_init();
155 fsevent_group_attr = lck_grp_attr_alloc_init();
156 fsevent_mutex_group = lck_grp_alloc_init("fsevent-mutex", fsevent_group_attr);
157 fsevent_rw_group = lck_grp_alloc_init("fsevent-rw", fsevent_group_attr);
158
159 lck_mtx_init(&watch_list_lock, fsevent_mutex_group, fsevent_lock_attr);
160 lck_mtx_init(&event_buf_lock, fsevent_mutex_group, fsevent_lock_attr);
161
162 lck_rw_init(&fsevent_big_lock, fsevent_rw_group, fsevent_lock_attr);
163
164 init_pathbuff();
165}
166
167static void
168lock_watch_list(void)
169{
170 lck_mtx_lock(&watch_list_lock);
171}
172
173static void
174unlock_watch_list(void)
175{
176 lck_mtx_unlock(&watch_list_lock);
177}
178
179static void
180lock_fs_event_buf(void)
181{
182 lck_mtx_lock(&event_buf_lock);
183}
184
185static void
186unlock_fs_event_buf(void)
187{
188 lck_mtx_unlock(&event_buf_lock);
189}
190
191// forward prototype
192static void do_free_event(kfs_event *kfse);
193
194static int
195watcher_cares_about_dev(fs_event_watcher *watcher, dev_t dev)
196{
197 unsigned int i;
198
199 // if there is not list of devices to watch, then always
200 // say we're interested so we'll report all events from
201 // all devices
202 if (watcher->devices_to_watch == NULL) {
203 return 1;
204 }
205
206 for(i=0; i < watcher->num_devices; i++) {
207 if (dev == watcher->devices_to_watch[i]) {
208 // found a match! that means we want events
209 // from this device.
210 return 1;
211 }
212 }
213
214 // if we're here it's not in the devices_to_watch[]
215 // list so that means we do not care about it
216 return 0;
217}
218
219
220int
221need_fsevent(int type, vnode_t vp)
222{
223 fs_event_watcher *watcher;
224 dev_t dev;
225
226 if (fs_event_type_watchers[type] == 0)
227 return (0);
228 dev = (dev_t)(vp->v_mount->mnt_vfsstat.f_fsid.val[0]);
229
230 lock_watch_list();
231
232 SLIST_FOREACH(watcher, &watch_list_head, link) {
233 if (watcher->event_list[type] == FSE_REPORT && watcher_cares_about_dev(watcher, dev)) {
234 unlock_watch_list();
235 return (1);
236 }
237 }
238 unlock_watch_list();
239
240 return (0);
241}
242
243
244int
245add_fsevent(int type, vfs_context_t ctx, ...)
246{
247 struct proc *p = vfs_context_proc(ctx);
248 int i, arg_idx, num_deliveries=0;
249 kfs_event_arg *kea;
250 kfs_event *kfse;
251 fs_event_watcher *watcher;
252 va_list ap;
253 int error = 0;
254 dev_t dev = 0;
255
256 va_start(ap, ctx);
257
258 // if no one cares about this type of event, bail out
259 if (fs_event_type_watchers[type] == 0) {
260 va_end(ap);
261 return 0;
262 }
263
264 lck_rw_lock_shared(&fsevent_big_lock);
265
266 // find a free event and snag it for our use
267 // NOTE: do not do anything that would block until
268 // the lock is dropped.
269 lock_fs_event_buf();
270
271 for(i=0; i < MAX_KFS_EVENTS; i++) {
272 if (fs_event_buf[(free_event_idx + i) % MAX_KFS_EVENTS].type == FSE_INVALID) {
273 break;
274 }
275 }
276
277 if (i >= MAX_KFS_EVENTS) {
278 // yikes! no free slots
279 unlock_fs_event_buf();
280 va_end(ap);
281
282 lock_watch_list();
283 SLIST_FOREACH(watcher, &watch_list_head, link) {
284 watcher->flags |= WATCHER_DROPPED_EVENTS;
285 wakeup((caddr_t)watcher);
286 }
287 unlock_watch_list();
288 lck_rw_done(&fsevent_big_lock);
289
290 printf("fs_events: add_event: event queue is full! dropping events.\n");
291 return ENOSPC;
292 }
293
294 kfse = &fs_event_buf[(free_event_idx + i) % MAX_KFS_EVENTS];
295
296 free_event_idx++;
297
298 kfse->type = type;
299 kfse->refcount = 0;
300 kfse->pid = p->p_pid;
301
302 unlock_fs_event_buf(); // at this point it's safe to unlock
303
304 //
305 // now process the arguments passed in and copy them into
306 // the kfse
307 //
308 arg_idx = 0;
309 while(arg_idx < KFS_NUM_ARGS) {
310 kea = &kfse->args[arg_idx++];
311 kea->type = va_arg(ap, int32_t);
312
313 if (kea->type == FSE_ARG_DONE) {
314 break;
315 }
316
317 switch(kea->type) {
318 case FSE_ARG_VNODE: {
319 // this expands out into multiple arguments to the client
320 struct vnode *vp;
321 struct vnode_attr va;
322
323 kea->data.vp = vp = va_arg(ap, struct vnode *);
324 if (kea->data.vp == NULL) {
325 panic("add_fsevent: you can't pass me a NULL vnode ptr (type %d)!\n",
326 kfse->type);
327 }
328
329 if (vnode_ref_ext(kea->data.vp, O_EVTONLY) != 0) {
330 kea->type = FSE_ARG_DONE;
331
332 error = EINVAL;
333 goto clean_up;
334 }
335 VATTR_INIT(&va);
336 VATTR_WANTED(&va, va_fsid);
337 VATTR_WANTED(&va, va_fileid);
338 VATTR_WANTED(&va, va_mode);
339 VATTR_WANTED(&va, va_uid);
340 VATTR_WANTED(&va, va_gid);
341 if (vnode_getattr(kea->data.vp, &va, ctx) != 0) {
342 vnode_rele_ext(kea->data.vp, O_EVTONLY, 0);
343 kea->type = FSE_ARG_DONE;
344
345 error = EINVAL;
346 goto clean_up;
347 }
348
349 kea++;
350 kea->type = FSE_ARG_DEV;
351 kea->data.dev = dev = (dev_t)va.va_fsid;
352
353 kea++;
354 kea->type = FSE_ARG_INO;
355 kea->data.ino = (ino_t)va.va_fileid;
356
357 kea++;
358 kea->type = FSE_ARG_MODE;
359 kea->data.mode = (int32_t)vnode_vttoif(vnode_vtype(vp)) | va.va_mode;
360
361 kea++;
362 kea->type = FSE_ARG_UID;
363 kea->data.uid = va.va_uid;
364
365 kea++;
366 kea->type = FSE_ARG_GID;
367 kea->data.gid = va.va_gid;
368 arg_idx += 5;
369 break;
370 }
371
372 case FSE_ARG_FINFO: {
373 fse_info *fse;
374
375 fse = va_arg(ap, fse_info *);
376
377 kea->type = FSE_ARG_DEV;
378 kea->data.dev = dev = (dev_t)fse->dev;
379
380 kea++;
381 kea->type = FSE_ARG_INO;
382 kea->data.ino = (ino_t)fse->ino;
383
384 kea++;
385 kea->type = FSE_ARG_MODE;
386 kea->data.mode = (int32_t)fse->mode;
387
388 kea++;
389 kea->type = FSE_ARG_UID;
390 kea->data.uid = (uid_t)fse->uid;
391
392 kea++;
393 kea->type = FSE_ARG_GID;
394 kea->data.gid = (uid_t)fse->gid;
395 arg_idx += 4;
396 break;
397 }
398
399 case FSE_ARG_STRING:
400 kea->len = (int16_t)(va_arg(ap, int32_t) & 0xffff);
401 kea->data.str = vfs_addname(va_arg(ap, char *), kea->len, 0, 0);
402 break;
403
404 case FSE_ARG_INT32:
405 kea->data.int32 = va_arg(ap, int32_t);
406 break;
407
408 case FSE_ARG_INT64:
409 printf("fs_events: 64-bit args not implemented.\n");
410// kea->data.int64 = va_arg(ap, int64_t);
411 break;
412
413 case FSE_ARG_RAW:
414 kea->len = (int16_t)(va_arg(ap, int32_t) & 0xffff);
415 MALLOC(kea->data.ptr, void *, kea->len, M_TEMP, M_WAITOK);
416 memcpy(kea->data.ptr, va_arg(ap, void *), kea->len);
417 break;
418
419 case FSE_ARG_DEV:
420 kea->data.dev = dev = va_arg(ap, dev_t);
421 break;
422
423 case FSE_ARG_MODE:
424 kea->data.mode = va_arg(ap, int32_t);
425 break;
426
427 case FSE_ARG_INO:
428 kea->data.ino = va_arg(ap, ino_t);
429 break;
430
431 case FSE_ARG_UID:
432 kea->data.uid = va_arg(ap, uid_t);
433 break;
434
435 case FSE_ARG_GID:
436 kea->data.gid = va_arg(ap, gid_t);
437 break;
438
439 default:
440 printf("add_fsevent: unknown type %d\n", kea->type);
441 // just skip one 32-bit word and hope we sync up...
442 (void)va_arg(ap, int32_t);
443 }
444 }
445
446 va_end(ap);
447
448 //
449 // now we have to go and let everyone know that
450 // is interested in this type of event...
451 //
452 lock_watch_list();
453
454 SLIST_FOREACH(watcher, &watch_list_head, link) {
455 if (watcher->event_list[type] == FSE_REPORT && watcher_cares_about_dev(watcher, dev)) {
456 if (watcher_add_event(watcher, kfse) == 0) {
457 num_deliveries++;
458 }
459 }
460 }
461
462 unlock_watch_list();
463
464 clean_up:
465 // just in case no one was interested after all...
466 if (num_deliveries == 0) {
467 do_free_event(kfse);
468 free_event_idx = (int)(kfse - &fs_event_buf[0]);
469 }
470
471 lck_rw_done(&fsevent_big_lock);
472 return error;
473}
474
475static void
476do_free_event(kfs_event *kfse)
477{
478 int i;
479 kfs_event_arg *kea, all_args[KFS_NUM_ARGS];
480
481 lock_fs_event_buf();
482
483 // mark this fsevent as invalid
484 kfse->type = FSE_INVALID;
485
486 // make a copy of this so we can free things without
487 // holding the fs_event_buf lock
488 //
489 memcpy(&all_args[0], &kfse->args[0], sizeof(all_args));
490
491 // and just to be anal, set this so that there are no args
492 kfse->args[0].type = FSE_ARG_DONE;
493
494 free_event_idx = (kfse - fs_event_buf);
495
496 unlock_fs_event_buf();
497
498 for(i=0; i < KFS_NUM_ARGS; i++) {
499 kea = &all_args[i];
500 if (kea->type == FSE_ARG_DONE) {
501 break;
502 }
503
504 switch(kea->type) {
505 case FSE_ARG_VNODE:
506 vnode_rele_ext(kea->data.vp, O_EVTONLY, 0);
507 break;
508 case FSE_ARG_STRING:
509 vfs_removename(kea->data.str);
510 break;
511 case FSE_ARG_RAW:
512 FREE(kea->data.ptr, M_TEMP);
513 break;
514 }
515 }
516}
517
518
519static int
520add_watcher(int8_t *event_list, int32_t num_events, int32_t eventq_size, fs_event_watcher **watcher_out)
521{
522 int i;
523 fs_event_watcher *watcher;
524
525 if (eventq_size < 0 || eventq_size > MAX_KFS_EVENTS) {
526 eventq_size = MAX_KFS_EVENTS;
527 }
528
529 // Note: the event_queue follows the fs_event_watcher struct
530 // in memory so we only have to do one allocation
531 MALLOC(watcher,
532 fs_event_watcher *,
533 sizeof(fs_event_watcher) + eventq_size * sizeof(kfs_event *),
534 M_TEMP, M_WAITOK);
535
536 watcher->event_list = event_list;
537 watcher->num_events = num_events;
538 watcher->devices_to_watch = NULL;
539 watcher->num_devices = 0;
540 watcher->flags = 0;
541 watcher->event_queue = (kfs_event **)&watcher[1];
542 watcher->eventq_size = eventq_size;
543 watcher->rd = 0;
544 watcher->wr = 0;
545 watcher->blockers = 0;
546
547 lock_watch_list();
548
549 // now update the global list of who's interested in
550 // events of a particular type...
551 for(i=0; i < num_events; i++) {
552 if (event_list[i] != FSE_IGNORE && i < FSE_MAX_EVENTS) {
553 fs_event_type_watchers[i]++;
554 }
555 }
556
557 SLIST_INSERT_HEAD(&watch_list_head, watcher, link);
558
559 unlock_watch_list();
560
561 *watcher_out = watcher;
562
563 return 0;
564}
565
566static void
567remove_watcher(fs_event_watcher *target)
568{
569 int i;
570 fs_event_watcher *watcher;
571 kfs_event *kfse;
572
573 lck_rw_lock_shared(&fsevent_big_lock);
574
575 lock_watch_list();
576
577 SLIST_FOREACH(watcher, &watch_list_head, link) {
578 if (watcher == target) {
579 SLIST_REMOVE(&watch_list_head, watcher, fs_event_watcher, link);
580
581 for(i=0; i < watcher->num_events; i++) {
582 if (watcher->event_list[i] != FSE_IGNORE && i < FSE_MAX_EVENTS) {
583 fs_event_type_watchers[i]--;
584 }
585 }
586
587 unlock_watch_list();
588
589 // drain the event_queue
590 for(i=watcher->rd; i != watcher->wr; i=(i+1) % watcher->eventq_size) {
591 kfse = watcher->event_queue[i];
592
593 if (OSAddAtomic(-1, (SInt32 *)&kfse->refcount) == 1) {
594 do_free_event(kfse);
595 }
596 }
597
598 if (watcher->event_list) {
599 FREE(watcher->event_list, M_TEMP);
600 watcher->event_list = NULL;
601 }
602 if (watcher->devices_to_watch) {
603 FREE(watcher->devices_to_watch, M_TEMP);
604 watcher->devices_to_watch = NULL;
605 }
606 FREE(watcher, M_TEMP);
607
608 lck_rw_done(&fsevent_big_lock);
609 return;
610 }
611 }
612
613 unlock_watch_list();
614 lck_rw_done(&fsevent_big_lock);
615}
616
617
618static int
619watcher_add_event(fs_event_watcher *watcher, kfs_event *kfse)
620{
621 if (((watcher->wr + 1) % watcher->eventq_size) == watcher->rd) {
622 watcher->flags |= WATCHER_DROPPED_EVENTS;
623 wakeup((caddr_t)watcher);
624 return ENOSPC;
625 }
626
627 watcher->event_queue[watcher->wr] = kfse;
628 OSAddAtomic(1, (SInt32 *)&kfse->refcount);
629 watcher->wr = (watcher->wr + 1) % watcher->eventq_size;
630
631 // wake up the watcher if he's waiting!
632 wakeup((caddr_t)watcher);
633
634 return 0;
635}
636
637
638static int
639fmod_watch(fs_event_watcher *watcher, struct uio *uio)
640{
641 int i, error=0, last_full_event_resid;
642 kfs_event *kfse;
643 kfs_event_arg *kea;
644 uint16_t tmp16;
645
646 // LP64todo - fix this
647 last_full_event_resid = uio_resid(uio);
648
649 // need at least 2048 bytes of space (maxpathlen + 1 event buf)
650 if (uio_resid(uio) < 2048 || watcher == NULL) {
651 return EINVAL;
652 }
653
654
655 if (watcher->rd == watcher->wr) {
656 if (watcher->flags & WATCHER_CLOSING) {
657 return 0;
658 }
659 OSAddAtomic(1, (SInt32 *)&watcher->blockers);
660
661 // there's nothing to do, go to sleep
662 error = tsleep((caddr_t)watcher, PUSER|PCATCH, "fsevents_empty", 0);
663
664 OSAddAtomic(-1, (SInt32 *)&watcher->blockers);
665
666 if (error != 0 || (watcher->flags & WATCHER_CLOSING)) {
667 return error;
668 }
669 }
670
671 // if we dropped events, return that as an event first
672 if (watcher->flags & WATCHER_DROPPED_EVENTS) {
673 int32_t val = FSE_EVENTS_DROPPED;
674
675 error = uiomove((caddr_t)&val, sizeof(int32_t), uio);
676 if (error == 0) {
677 val = 0; // a fake pid
678 error = uiomove((caddr_t)&val, sizeof(int32_t), uio);
679
680 tmp16 = FSE_ARG_DONE; // makes it a consistent msg
681 error = uiomove((caddr_t)&tmp16, sizeof(int16_t), uio);
682 }
683
684 if (error) {
685 return error;
686 }
687
688 watcher->flags &= ~WATCHER_DROPPED_EVENTS;
689 }
690
691// check if the next chunk of data will fit in the user's
692// buffer. if not, just goto get_out which will return
693// the number of bytes worth of events that we did read.
694// this leaves the event that didn't fit in the queue.
695//
696 // LP64todo - fix this
697#define CHECK_UPTR(size) if (size > (unsigned)uio_resid(uio)) { \
698 uio_setresid(uio, last_full_event_resid); \
699 goto get_out; \
700 }
701
702 for (; uio_resid(uio) > 0 && watcher->rd != watcher->wr; ) {
703 kfse = watcher->event_queue[watcher->rd];
704
705 // copy out the type of the event
706 CHECK_UPTR(sizeof(int32_t));
707 if ((error = uiomove((caddr_t)&kfse->type, sizeof(int32_t), uio)) != 0) {
708 goto get_out;
709 }
710
711 // now copy out the pid of the person that changed the file
712 CHECK_UPTR(sizeof(pid_t));
713 if ((error = uiomove((caddr_t)&kfse->pid, sizeof(pid_t), uio)) != 0) {
714 goto get_out;
715 }
716
717 error = 0;
718 for(i=0; i < KFS_NUM_ARGS && error == 0; i++) {
719 char *pathbuff;
720 int pathbuff_len;
721
722 kea = &kfse->args[i];
723
724 tmp16 = (uint16_t)kea->type;
725 CHECK_UPTR(sizeof(uint16_t));
726 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
727 if (error || kea->type == FSE_ARG_DONE) {
728 break;
729 }
730
731 switch(kea->type) {
732 case FSE_ARG_VNODE:
733 pathbuff = get_pathbuff();
734 pathbuff_len = MAXPATHLEN;
735 if (kea->data.vp == NULL) {
736 printf("fmod_watch: whoa... vp == NULL (%d)!\n", kfse->type);
737 i--;
738 release_pathbuff(pathbuff);
739 continue;
740 }
741
742 if (vn_getpath(kea->data.vp, pathbuff, &pathbuff_len) != 0 || pathbuff[0] == '\0') {
743// printf("fmod_watch: vn_getpath failed! vp 0x%x vname 0x%x (%s) vparent 0x%x\n",
744// kea->data.vp,
745// VNAME(kea->data.vp),
746// VNAME(kea->data.vp) ? VNAME(kea->data.vp) : "<null>",
747// VPARENT(kea->data.vp));
748 }
749 CHECK_UPTR(sizeof(uint16_t));
750 tmp16 = (uint16_t)pathbuff_len;
751 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
752
753 CHECK_UPTR((unsigned)pathbuff_len);
754 error = uiomove((caddr_t)pathbuff, pathbuff_len, uio);
755 release_pathbuff(pathbuff);
756 break;
757
758
759 case FSE_ARG_STRING:
760 tmp16 = (int32_t)kea->len;
761 CHECK_UPTR(sizeof(uint16_t));
762 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
763
764 CHECK_UPTR(kea->len);
765 error = uiomove((caddr_t)kea->data.str, kea->len, uio);
766 break;
767
768 case FSE_ARG_INT32:
769 CHECK_UPTR(sizeof(uint16_t) + sizeof(int32_t));
770 tmp16 = sizeof(int32_t);
771 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
772 error = uiomove((caddr_t)&kea->data.int32, sizeof(int32_t), uio);
773 break;
774
775 case FSE_ARG_INT64:
776 printf("fs_events: 64-bit args not implemented on copyout.\n");
777// CHECK_UPTR(sizeof(uint16_t) + sizeof(int64_t));
778// tmp16 = sizeof(int64_t);
779// error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
780// error = uiomove((caddr_t)&kea->data.int64, sizeof(int64_t), uio);
781 break;
782
783 case FSE_ARG_RAW:
784 tmp16 = (uint16_t)kea->len;
785 CHECK_UPTR(sizeof(uint16_t));
786 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
787
788 CHECK_UPTR(kea->len);
789 error = uiomove((caddr_t)kea->data.ptr, kea->len, uio);
790 break;
791
792 case FSE_ARG_DEV:
793 CHECK_UPTR(sizeof(uint16_t) + sizeof(dev_t));
794 tmp16 = sizeof(dev_t);
795 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
796 error = uiomove((caddr_t)&kea->data.dev, sizeof(dev_t), uio);
797 break;
798
799 case FSE_ARG_INO:
800 CHECK_UPTR(sizeof(uint16_t) + sizeof(ino_t));
801 tmp16 = sizeof(ino_t);
802 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
803 error = uiomove((caddr_t)&kea->data.ino, sizeof(ino_t), uio);
804 break;
805
806 case FSE_ARG_MODE:
807 // XXXdbg - NOTE: we use 32-bits for the mode, not
808 // 16-bits like a real mode_t
809 CHECK_UPTR(sizeof(uint16_t) + sizeof(int32_t));
810 tmp16 = sizeof(int32_t);
811 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
812 error = uiomove((caddr_t)&kea->data.mode, sizeof(int32_t), uio);
813 break;
814
815 case FSE_ARG_UID:
816 CHECK_UPTR(sizeof(uint16_t) + sizeof(uid_t));
817 tmp16 = sizeof(uid_t);
818 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
819 error = uiomove((caddr_t)&kea->data.uid, sizeof(uid_t), uio);
820 break;
821
822 case FSE_ARG_GID:
823 CHECK_UPTR(sizeof(uint16_t) + sizeof(gid_t));
824 tmp16 = sizeof(gid_t);
825 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
826 error = uiomove((caddr_t)&kea->data.gid, sizeof(gid_t), uio);
827 break;
828
829 default:
830 printf("fmod_watch: unknown arg type %d.\n", kea->type);
831 break;
832 }
833 }
834
835 // make sure that we always end with a FSE_ARG_DONE
836 if (i >= KFS_NUM_ARGS) {
837 tmp16 = FSE_ARG_DONE;
838 CHECK_UPTR(sizeof(uint16_t));
839 error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
840 }
841
842
843 // LP64todo - fix this
844 last_full_event_resid = uio_resid(uio);
845
846 watcher->rd = (watcher->rd + 1) % watcher->eventq_size;
847
848 if (OSAddAtomic(-1, (SInt32 *)&kfse->refcount) == 1) {
849 do_free_event(kfse);
850 }
851 }
852
853 get_out:
854 return error;
855}
856
857
858// release any references we might have on vnodes which are
859// the mount point passed to us (so that it can be cleanly
860// unmounted).
861//
862// since we don't want to lose the events we'll convert the
863// vnode refs to the full path, inode #, and uid.
864//
865void
866fsevent_unmount(struct mount *mp)
867{
868 int i, j;
869 kfs_event *kfse;
870 kfs_event_arg *kea;
871
872 lck_rw_lock_exclusive(&fsevent_big_lock);
873 lock_fs_event_buf();
874
875 for(i=0; i < MAX_KFS_EVENTS; i++) {
876 if (fs_event_buf[i].type == FSE_INVALID) {
877 continue;
878 }
879
880 kfse = &fs_event_buf[i];
881 for(j=0; j < KFS_NUM_ARGS; j++) {
882 kea = &kfse->args[j];
883 if (kea->type == FSE_ARG_DONE) {
884 break;
885 }
886
887 if (kea->type == FSE_ARG_VNODE && kea->data.vp->v_mount == mp) {
888 struct vnode *vp;
889 char *pathbuff;
890 int pathbuff_len;
891
892 vp = kea->data.vp;
893 pathbuff = get_pathbuff();
894 pathbuff_len = MAXPATHLEN;
895
896 if (vn_getpath(vp, pathbuff, &pathbuff_len) != 0 || pathbuff[0] == '\0') {
897 char *vname;
898
899 vname = vnode_getname(vp);
900
901 printf("fsevent_unmount: vn_getpath failed! vp 0x%x vname 0x%x (%s) vparent 0x%x\n",
902 vp, vname, vname ? vname : "<null>", vp->v_parent);
903
904 if (vname)
905 vnode_putname(vname);
906 }
907
908 // switch the type of the string
909 kea->type = FSE_ARG_STRING;
910 kea->data.str = vfs_addname(pathbuff, pathbuff_len, 0, 0);
911 kea->len = pathbuff_len;
912 release_pathbuff(pathbuff);
913
914 // and finally let go of the reference on the vnode
915 vnode_rele_ext(vp, O_EVTONLY, 0);
916 }
917 }
918 }
919
920 unlock_fs_event_buf();
921 lck_rw_done(&fsevent_big_lock);
922}
923
924
925//
926// /dev/fsevents device code
927//
928static int fsevents_installed = 0;
929static struct lock__bsd__ fsevents_lck;
930
931typedef struct fsevent_handle {
932 fs_event_watcher *watcher;
933 struct selinfo si;
934} fsevent_handle;
935
936
937static int
938fseventsf_read(struct fileproc *fp, struct uio *uio,
939 __unused kauth_cred_t *cred, __unused int flags,
940 __unused struct proc *p)
941{
942 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
943 int error;
944
945 error = fmod_watch(fseh->watcher, uio);
946
947 return error;
948}
949
950static int
951fseventsf_write(__unused struct fileproc *fp, __unused struct uio *uio,
952 __unused kauth_cred_t *cred, __unused int flags,
953 __unused struct proc *p)
954{
955 return EIO;
956}
957
958
959static int
960fseventsf_ioctl(struct fileproc *fp, u_long cmd, caddr_t data, struct proc *p)
961{
962 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
963 int ret = 0;
964 pid_t pid = 0;
965 fsevent_dev_filter_args *devfilt_args=(fsevent_dev_filter_args *)data;
966
967 switch (cmd) {
968 case FIONBIO:
969 case FIOASYNC:
970 return 0;
971
972 case FSEVENTS_DEVICE_FILTER: {
973 int new_num_devices;
974 dev_t *devices_to_watch, *tmp=NULL;
975
976 if (devfilt_args->num_devices > 256) {
977 ret = EINVAL;
978 break;
979 }
980
981 new_num_devices = devfilt_args->num_devices;
982 if (new_num_devices == 0) {
983 tmp = fseh->watcher->devices_to_watch;
984
985 lock_watch_list();
986 fseh->watcher->devices_to_watch = NULL;
987 fseh->watcher->num_devices = new_num_devices;
988 unlock_watch_list();
989
990 if (tmp) {
991 FREE(tmp, M_TEMP);
992 }
993 break;
994 }
995
996 MALLOC(devices_to_watch, dev_t *,
997 new_num_devices * sizeof(dev_t),
998 M_TEMP, M_WAITOK);
999 if (devices_to_watch == NULL) {
1000 ret = ENOMEM;
1001 break;
1002 }
1003
1004 ret = copyin(CAST_USER_ADDR_T(devfilt_args->devices),
1005 (void *)devices_to_watch,
1006 new_num_devices * sizeof(dev_t));
1007 if (ret) {
1008 FREE(devices_to_watch, M_TEMP);
1009 break;
1010 }
1011
1012 lock_watch_list();
1013 fseh->watcher->num_devices = new_num_devices;
1014 tmp = fseh->watcher->devices_to_watch;
1015 fseh->watcher->devices_to_watch = devices_to_watch;
1016 unlock_watch_list();
1017
1018 if (tmp) {
1019 FREE(tmp, M_TEMP);
1020 }
1021
1022 break;
1023 }
1024
1025 default:
1026 ret = EINVAL;
1027 break;
1028 }
1029
1030 return (ret);
1031}
1032
1033
1034static int
1035fseventsf_select(struct fileproc *fp, int which, void *wql, struct proc *p)
1036{
1037 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
1038 int ready = 0;
1039
1040 if ((which != FREAD) || (fseh->watcher->flags & WATCHER_CLOSING)) {
1041 return 0;
1042 }
1043
1044
1045 // if there's nothing in the queue, we're not ready
1046 if (fseh->watcher->rd == fseh->watcher->wr) {
1047 ready = 0;
1048 } else {
1049 ready = 1;
1050 }
1051
1052 if (!ready) {
1053 selrecord(p, &fseh->si, wql);
1054 }
1055
1056 return ready;
1057}
1058
1059
1060static int
1061fseventsf_stat(struct fileproc *fp, struct stat *sb, struct proc *p)
1062{
1063 return ENOTSUP;
1064}
1065
1066
1067static int
1068fseventsf_close(struct fileglob *fg, struct proc *p)
1069{
1070 fsevent_handle *fseh = (struct fsevent_handle *)fg->fg_data;
1071
1072 remove_watcher(fseh->watcher);
1073
1074 fg->fg_data = NULL;
1075 fseh->watcher = NULL;
1076 FREE(fseh, M_TEMP);
1077
1078 return 0;
1079}
1080
1081int
1082fseventsf_kqfilter(struct fileproc *fp, struct knote *kn, struct proc *p)
1083{
1084 // XXXdbg
1085 return 0;
1086}
1087
1088
1089static int
1090fseventsf_drain(struct fileproc *fp, struct proc *p)
1091{
1092 int counter = 0;
1093 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
1094
1095 fseh->watcher->flags |= WATCHER_CLOSING;
1096
1097 // if there are people still waiting, sleep for 10ms to
1098 // let them clean up and get out of there. however we
1099 // also don't want to get stuck forever so if they don't
1100 // exit after 5 seconds we're tearing things down anyway.
1101 while(fseh->watcher->blockers && counter++ < 500) {
1102 // issue wakeup in case anyone is blocked waiting for an event
1103 // do this each time we wakeup in case the blocker missed
1104 // the wakeup due to the unprotected test of WATCHER_CLOSING
1105 // and decision to tsleep in fmod_watch... this bit of
1106 // latency is a decent tradeoff against not having to
1107 // take and drop a lock in fmod_watch
1108 wakeup((caddr_t)fseh->watcher);
1109
1110 tsleep((caddr_t)fseh->watcher, PRIBIO, "watcher-close", 1);
1111 }
1112
1113 return 0;
1114}
1115
1116
1117static int
1118fseventsopen(dev_t dev, int flag, int mode, struct proc *p)
1119{
1120 if (!is_suser()) {
1121 return EPERM;
1122 }
1123
1124 return 0;
1125}
1126
1127static int
1128fseventsclose(dev_t dev, int flag, int mode, struct proc *p)
1129{
1130 return 0;
1131}
1132
1133static int
1134fseventsread(dev_t dev, struct uio *uio, int ioflag)
1135{
1136 return EIO;
1137}
1138
1139static int
1140fseventswrite(dev_t dev, struct uio *uio, int ioflag)
1141{
1142 return EIO;
1143}
1144
1145
1146static struct fileops fsevents_fops = {
1147 fseventsf_read,
1148 fseventsf_write,
1149 fseventsf_ioctl,
1150 fseventsf_select,
1151 fseventsf_close,
1152 fseventsf_kqfilter,
1153 fseventsf_drain
1154};
1155
1156
1157
1158static int
1159fseventsioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
1160{
1161 struct fileproc *f;
1162 int fd, error;
1163 fsevent_handle *fseh = NULL;
1164 fsevent_clone_args *fse_clone_args=(fsevent_clone_args *)data;
1165 int8_t *event_list;
1166
1167 switch (cmd) {
1168 case FSEVENTS_CLONE:
1169 if (fse_clone_args->num_events < 0 || fse_clone_args->num_events > 4096) {
1170 return EINVAL;
1171 }
1172
1173 MALLOC(fseh, fsevent_handle *, sizeof(fsevent_handle),
1174 M_TEMP, M_WAITOK);
1175 memset(fseh, 0, sizeof(fsevent_handle));
1176
1177 MALLOC(event_list, int8_t *,
1178 fse_clone_args->num_events * sizeof(int8_t),
1179 M_TEMP, M_WAITOK);
1180
1181 error = copyin(CAST_USER_ADDR_T(fse_clone_args->event_list),
1182 (void *)event_list,
1183 fse_clone_args->num_events * sizeof(int8_t));
1184 if (error) {
1185 FREE(event_list, M_TEMP);
1186 FREE(fseh, M_TEMP);
1187 return error;
1188 }
1189
1190 error = add_watcher(event_list,
1191 fse_clone_args->num_events,
1192 fse_clone_args->event_queue_depth,
1193 &fseh->watcher);
1194 if (error) {
1195 FREE(event_list, M_TEMP);
1196 FREE(fseh, M_TEMP);
1197 return error;
1198 }
1199
1200 error = falloc(p, &f, &fd);
1201 if (error) {
1202 FREE(event_list, M_TEMP);
1203 FREE(fseh, M_TEMP);
1204 return (error);
1205 }
1206 proc_fdlock(p);
1207 f->f_fglob->fg_flag = FREAD | FWRITE;
1208 f->f_fglob->fg_type = DTYPE_FSEVENTS;
1209 f->f_fglob->fg_ops = &fsevents_fops;
1210 f->f_fglob->fg_data = (caddr_t) fseh;
1211 proc_fdunlock(p);
1212 copyout((void *)&fd, CAST_USER_ADDR_T(fse_clone_args->fd), sizeof(int32_t));
1213 proc_fdlock(p);
1214 *fdflags(p, fd) &= ~UF_RESERVED;
1215 fp_drop(p, fd, f, 1);
1216 proc_fdunlock(p);
1217 break;
1218
1219 default:
1220 error = EINVAL;
1221 break;
1222 }
1223
1224 return error;
1225}
1226
1227static int
1228fseventsselect(dev_t dev, int rw, struct proc *p)
1229{
1230 return 0;
1231}
1232
1233static void
1234fsevents_wakeup(fsevent_handle *fseh)
1235{
1236 wakeup((caddr_t)fseh);
1237 selwakeup(&fseh->si);
1238}
1239
1240
1241/*
1242 * A struct describing which functions will get invoked for certain
1243 * actions.
1244 */
1245static struct cdevsw fsevents_cdevsw =
1246{
1247 fseventsopen, /* open */
1248 fseventsclose, /* close */
1249 fseventsread, /* read */
1250 fseventswrite, /* write */
1251 fseventsioctl, /* ioctl */
1252 nulldev, /* stop */
1253 nulldev, /* reset */
1254 NULL, /* tty's */
1255 eno_select, /* select */
1256 eno_mmap, /* mmap */
1257 eno_strat, /* strategy */
1258 eno_getc, /* getc */
1259 eno_putc, /* putc */
1260 0 /* type */
1261};
1262
1263
1264/*
1265 * Called to initialize our device,
1266 * and to register ourselves with devfs
1267 */
1268
1269void
1270fsevents_init(void)
1271{
1272 int ret;
1273
1274 if (fsevents_installed) {
1275 return;
1276 }
1277
1278 fsevents_installed = 1;
1279
1280 lockinit(&fsevents_lck, PLOCK, "fsevents", 0, 0);
1281
1282 ret = cdevsw_add(-1, &fsevents_cdevsw);
1283 if (ret < 0) {
1284 fsevents_installed = 0;
1285 return;
1286 }
1287
1288 devfs_make_node(makedev (ret, 0), DEVFS_CHAR,
1289 UID_ROOT, GID_WHEEL, 0644, "fsevents", 0);
1290
1291 fsevents_internal_init();
1292}
1293
1294
1295
1296//
1297// XXXdbg - temporary path buffer handling
1298//
1299#define NUM_PATH_BUFFS 16
1300static char path_buff[NUM_PATH_BUFFS][MAXPATHLEN];
1301static char path_buff_inuse[NUM_PATH_BUFFS];
1302
1303static lck_grp_attr_t * pathbuff_group_attr;
1304static lck_attr_t * pathbuff_lock_attr;
1305static lck_grp_t * pathbuff_mutex_group;
1306static lck_mtx_t pathbuff_lock;
1307
1308static void
1309init_pathbuff(void)
1310{
1311 pathbuff_lock_attr = lck_attr_alloc_init();
1312 pathbuff_group_attr = lck_grp_attr_alloc_init();
1313 pathbuff_mutex_group = lck_grp_alloc_init("pathbuff-mutex", pathbuff_group_attr);
1314
1315 lck_mtx_init(&pathbuff_lock, pathbuff_mutex_group, pathbuff_lock_attr);
1316}
1317
1318static void
1319lock_pathbuff(void)
1320{
1321 lck_mtx_lock(&pathbuff_lock);
1322}
1323
1324static void
1325unlock_pathbuff(void)
1326{
1327 lck_mtx_unlock(&pathbuff_lock);
1328}
1329
1330
1331char *
1332get_pathbuff(void)
1333{
1334 int i;
1335
1336 lock_pathbuff();
1337 for(i=0; i < NUM_PATH_BUFFS; i++) {
1338 if (path_buff_inuse[i] == 0) {
1339 break;
1340 }
1341 }
1342
1343 if (i >= NUM_PATH_BUFFS) {
1344 char *path;
1345
1346 unlock_pathbuff();
1347 MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
1348 return path;
1349 }
1350
1351 path_buff_inuse[i] = 1;
1352 unlock_pathbuff();
1353 return &path_buff[i][0];
1354}
1355
1356void
1357release_pathbuff(char *path)
1358{
1359 int i;
1360
1361 if (path == NULL) {
1362 return;
1363 }
1364
1365 lock_pathbuff();
1366 for(i=0; i < NUM_PATH_BUFFS; i++) {
1367 if (path == &path_buff[i][0]) {
1368 path_buff[i][0] = '\0';
1369 path_buff_inuse[i] = 0;
1370 unlock_pathbuff();
1371 return;
1372 }
1373 }
1374
1375 unlock_pathbuff();
1376
1377 // if we get here then it wasn't one of our temp buffers
1378 FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
1379}
1380
1381int
1382get_fse_info(struct vnode *vp, fse_info *fse, vfs_context_t ctx)
1383{
1384 struct vnode_attr va;
1385
1386 VATTR_INIT(&va);
1387 VATTR_WANTED(&va, va_fsid);
1388 VATTR_WANTED(&va, va_fileid);
1389 VATTR_WANTED(&va, va_mode);
1390 VATTR_WANTED(&va, va_uid);
1391 VATTR_WANTED(&va, va_gid);
1392 if (vnode_getattr(vp, &va, ctx) != 0) {
1393 return -1;
1394 }
1395
1396 fse->dev = (dev_t)va.va_fsid;
1397 fse->ino = (ino_t)va.va_fileid;
1398 fse->mode = (int32_t)vnode_vttoif(vnode_vtype(vp)) | va.va_mode;
1399 fse->uid = (uid_t)va.va_uid;
1400 fse->gid = (gid_t)va.va_gid;
1401
1402 return 0;
1403}