2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/namei.h>
26 #include <sys/filedesc.h>
27 #include <sys/kernel.h>
28 #include <sys/file_internal.h>
30 #include <sys/vnode_internal.h>
31 #include <sys/mount_internal.h>
32 #include <sys/proc_internal.h>
33 #include <sys/kauth.h>
35 #include <sys/malloc.h>
36 #include <sys/dirent.h>
38 #include <sys/sysctl.h>
40 #include <machine/cons.h>
41 #include <miscfs/specfs/specdev.h>
42 #include <miscfs/devfs/devfs.h>
43 #include <sys/filio.h>
44 #include <kern/locks.h>
45 #include <libkern/OSAtomic.h>
47 #include <bsm/audit_kernel.h>
48 #include <bsm/audit_kevents.h>
50 // where all our structs and defines come from
51 #include <sys/fsevents.h>
54 typedef struct kfs_event_arg
{
70 #define KFS_NUM_ARGS FSE_MAX_ARGS
71 typedef struct kfs_event
{
72 int32_t type
; // type code of this event
73 u_int32_t refcount
; // number of clients referencing this
74 pid_t pid
; // pid of the process that did the op
75 kfs_event_arg args
[KFS_NUM_ARGS
];
79 typedef struct fs_event_watcher
{
80 SLIST_ENTRY(fs_event_watcher
) link
;
81 int8_t *event_list
; // the events we're interested in
83 dev_t
*devices_to_watch
; // only report events from these devices
86 kfs_event
**event_queue
;
87 int32_t eventq_size
; // number of event pointers in queue
88 int32_t rd
, wr
; // indices to the event_queue
93 // fs_event_watcher flags
94 #define WATCHER_DROPPED_EVENTS 0x0001
95 #define WATCHER_CLOSING 0x0002
97 static SLIST_HEAD(watch_list
, fs_event_watcher
) watch_list_head
= { NULL
};
100 #define MAX_KFS_EVENTS 2048
102 // this array holds each pending event
103 static kfs_event fs_event_buf
[MAX_KFS_EVENTS
];
104 static int free_event_idx
= 0;
105 static int fs_event_init
= 0;
108 // this array records whether anyone is interested in a
109 // particular type of event. if no one is, we bail out
110 // early from the event delivery
112 static int16_t fs_event_type_watchers
[FSE_MAX_EVENTS
];
114 static int watcher_add_event(fs_event_watcher
*watcher
, kfs_event
*kfse
);
119 static lck_grp_attr_t
* fsevent_group_attr
;
120 static lck_attr_t
* fsevent_lock_attr
;
121 static lck_grp_t
* fsevent_mutex_group
;
123 static lck_grp_t
* fsevent_rw_group
;
125 static lck_rw_t fsevent_big_lock
; // always grab this first
126 static lck_mtx_t watch_list_lock
;
127 static lck_mtx_t event_buf_lock
;
130 static void init_pathbuff(void);
134 fsevents_internal_init(void)
138 if (fs_event_init
++ != 0) {
142 for(i
=0; i
< FSE_MAX_EVENTS
; i
++) {
143 fs_event_type_watchers
[i
] = 0;
146 for(i
=0; i
< MAX_KFS_EVENTS
; i
++) {
147 fs_event_buf
[i
].type
= FSE_INVALID
;
148 fs_event_buf
[i
].refcount
= 0;
151 SLIST_INIT(&watch_list_head
);
153 fsevent_lock_attr
= lck_attr_alloc_init();
154 fsevent_group_attr
= lck_grp_attr_alloc_init();
155 fsevent_mutex_group
= lck_grp_alloc_init("fsevent-mutex", fsevent_group_attr
);
156 fsevent_rw_group
= lck_grp_alloc_init("fsevent-rw", fsevent_group_attr
);
158 lck_mtx_init(&watch_list_lock
, fsevent_mutex_group
, fsevent_lock_attr
);
159 lck_mtx_init(&event_buf_lock
, fsevent_mutex_group
, fsevent_lock_attr
);
161 lck_rw_init(&fsevent_big_lock
, fsevent_rw_group
, fsevent_lock_attr
);
167 lock_watch_list(void)
169 lck_mtx_lock(&watch_list_lock
);
173 unlock_watch_list(void)
175 lck_mtx_unlock(&watch_list_lock
);
179 lock_fs_event_buf(void)
181 lck_mtx_lock(&event_buf_lock
);
185 unlock_fs_event_buf(void)
187 lck_mtx_unlock(&event_buf_lock
);
191 static void do_free_event(kfs_event
*kfse
);
194 watcher_cares_about_dev(fs_event_watcher
*watcher
, dev_t dev
)
198 // if there is not list of devices to watch, then always
199 // say we're interested so we'll report all events from
201 if (watcher
->devices_to_watch
== NULL
) {
205 for(i
=0; i
< watcher
->num_devices
; i
++) {
206 if (dev
== watcher
->devices_to_watch
[i
]) {
207 // found a match! that means we want events
213 // if we're here it's not in the devices_to_watch[]
214 // list so that means we do not care about it
220 need_fsevent(int type
, vnode_t vp
)
222 fs_event_watcher
*watcher
;
225 if (fs_event_type_watchers
[type
] == 0)
227 dev
= (dev_t
)(vp
->v_mount
->mnt_vfsstat
.f_fsid
.val
[0]);
231 SLIST_FOREACH(watcher
, &watch_list_head
, link
) {
232 if (watcher
->event_list
[type
] == FSE_REPORT
&& watcher_cares_about_dev(watcher
, dev
)) {
244 add_fsevent(int type
, vfs_context_t ctx
, ...)
246 struct proc
*p
= vfs_context_proc(ctx
);
247 int i
, arg_idx
, num_deliveries
=0;
250 fs_event_watcher
*watcher
;
257 // if no one cares about this type of event, bail out
258 if (fs_event_type_watchers
[type
] == 0) {
263 lck_rw_lock_shared(&fsevent_big_lock
);
265 // find a free event and snag it for our use
266 // NOTE: do not do anything that would block until
267 // the lock is dropped.
270 base
= free_event_idx
;
271 for(i
=0; i
< MAX_KFS_EVENTS
; i
++) {
272 if (fs_event_buf
[(base
+ i
) % MAX_KFS_EVENTS
].type
== FSE_INVALID
) {
277 if (i
>= MAX_KFS_EVENTS
) {
278 // yikes! no free slots
279 unlock_fs_event_buf();
283 SLIST_FOREACH(watcher
, &watch_list_head
, link
) {
284 watcher
->flags
|= WATCHER_DROPPED_EVENTS
;
285 wakeup((caddr_t
)watcher
);
288 lck_rw_done(&fsevent_big_lock
);
290 printf("fs_events: add_event: event queue is full! dropping events.\n");
294 kfse
= &fs_event_buf
[(base
+ i
) % MAX_KFS_EVENTS
];
296 free_event_idx
= ((base
+ i
) % MAX_KFS_EVENTS
) + 1;
300 kfse
->pid
= p
->p_pid
;
302 unlock_fs_event_buf(); // at this point it's safe to unlock
305 // now process the arguments passed in and copy them into
309 while(arg_idx
< KFS_NUM_ARGS
) {
310 kea
= &kfse
->args
[arg_idx
++];
311 kea
->type
= va_arg(ap
, int32_t);
313 if (kea
->type
== FSE_ARG_DONE
) {
318 case FSE_ARG_VNODE
: {
319 // this expands out into multiple arguments to the client
321 struct vnode_attr va
;
323 kea
->data
.vp
= vp
= va_arg(ap
, struct vnode
*);
324 if (kea
->data
.vp
== NULL
) {
325 panic("add_fsevent: you can't pass me a NULL vnode ptr (type %d)!\n",
329 if (vnode_ref_ext(kea
->data
.vp
, O_EVTONLY
) != 0) {
330 kea
->type
= FSE_ARG_DONE
;
336 VATTR_WANTED(&va
, va_fsid
);
337 VATTR_WANTED(&va
, va_fileid
);
338 VATTR_WANTED(&va
, va_mode
);
339 VATTR_WANTED(&va
, va_uid
);
340 VATTR_WANTED(&va
, va_gid
);
341 if (vnode_getattr(kea
->data
.vp
, &va
, ctx
) != 0) {
342 vnode_rele_ext(kea
->data
.vp
, O_EVTONLY
, 0);
343 kea
->type
= FSE_ARG_DONE
;
350 kea
->type
= FSE_ARG_DEV
;
351 kea
->data
.dev
= dev
= (dev_t
)va
.va_fsid
;
354 kea
->type
= FSE_ARG_INO
;
355 kea
->data
.ino
= (ino_t
)va
.va_fileid
;
358 kea
->type
= FSE_ARG_MODE
;
359 kea
->data
.mode
= (int32_t)vnode_vttoif(vnode_vtype(vp
)) | va
.va_mode
;
362 kea
->type
= FSE_ARG_UID
;
363 kea
->data
.uid
= va
.va_uid
;
366 kea
->type
= FSE_ARG_GID
;
367 kea
->data
.gid
= va
.va_gid
;
372 case FSE_ARG_FINFO
: {
375 fse
= va_arg(ap
, fse_info
*);
377 kea
->type
= FSE_ARG_DEV
;
378 kea
->data
.dev
= dev
= (dev_t
)fse
->dev
;
381 kea
->type
= FSE_ARG_INO
;
382 kea
->data
.ino
= (ino_t
)fse
->ino
;
385 kea
->type
= FSE_ARG_MODE
;
386 kea
->data
.mode
= (int32_t)fse
->mode
;
389 kea
->type
= FSE_ARG_UID
;
390 kea
->data
.uid
= (uid_t
)fse
->uid
;
393 kea
->type
= FSE_ARG_GID
;
394 kea
->data
.gid
= (uid_t
)fse
->gid
;
400 kea
->len
= (int16_t)(va_arg(ap
, int32_t) & 0xffff);
401 kea
->data
.str
= vfs_addname(va_arg(ap
, char *), kea
->len
, 0, 0);
405 kea
->data
.int32
= va_arg(ap
, int32_t);
409 printf("fs_events: 64-bit args not implemented.\n");
410 // kea->data.int64 = va_arg(ap, int64_t);
414 kea
->len
= (int16_t)(va_arg(ap
, int32_t) & 0xffff);
415 MALLOC(kea
->data
.ptr
, void *, kea
->len
, M_TEMP
, M_WAITOK
);
416 memcpy(kea
->data
.ptr
, va_arg(ap
, void *), kea
->len
);
420 kea
->data
.dev
= dev
= va_arg(ap
, dev_t
);
424 kea
->data
.mode
= va_arg(ap
, int32_t);
428 kea
->data
.ino
= va_arg(ap
, ino_t
);
432 kea
->data
.uid
= va_arg(ap
, uid_t
);
436 kea
->data
.gid
= va_arg(ap
, gid_t
);
440 printf("add_fsevent: unknown type %d\n", kea
->type
);
441 // just skip one 32-bit word and hope we sync up...
442 (void)va_arg(ap
, int32_t);
449 // now we have to go and let everyone know that
450 // is interested in this type of event...
454 SLIST_FOREACH(watcher
, &watch_list_head
, link
) {
455 if (watcher
->event_list
[type
] == FSE_REPORT
&& watcher_cares_about_dev(watcher
, dev
)) {
456 if (watcher_add_event(watcher
, kfse
) == 0) {
465 // just in case no one was interested after all...
466 if (OSAddAtomic(-1, (SInt32
*)&kfse
->refcount
) == 1) {
470 lck_rw_done(&fsevent_big_lock
);
475 do_free_event(kfs_event
*kfse
)
478 kfs_event_arg
*kea
, all_args
[KFS_NUM_ARGS
];
482 if (kfse
->refcount
> 0) {
483 panic("do_free_event: free'ing a kfsevent w/refcount == %d (kfse %p)\n",
484 kfse
->refcount
, kfse
);
487 // make a copy of this so we can free things without
488 // holding the fs_event_buf lock
490 memcpy(&all_args
[0], &kfse
->args
[0], sizeof(all_args
));
492 // and just to be anal, set this so that there are no args
493 kfse
->args
[0].type
= FSE_ARG_DONE
;
495 // mark this fsevent as invalid
496 kfse
->type
= FSE_INVALID
;
498 free_event_idx
= (kfse
- fs_event_buf
);
500 unlock_fs_event_buf();
502 for(i
=0; i
< KFS_NUM_ARGS
; i
++) {
504 if (kea
->type
== FSE_ARG_DONE
) {
510 vnode_rele_ext(kea
->data
.vp
, O_EVTONLY
, 0);
513 vfs_removename(kea
->data
.str
);
516 FREE(kea
->data
.ptr
, M_TEMP
);
524 add_watcher(int8_t *event_list
, int32_t num_events
, int32_t eventq_size
, fs_event_watcher
**watcher_out
)
527 fs_event_watcher
*watcher
;
529 if (eventq_size
< 0 || eventq_size
> MAX_KFS_EVENTS
) {
530 eventq_size
= MAX_KFS_EVENTS
;
533 // Note: the event_queue follows the fs_event_watcher struct
534 // in memory so we only have to do one allocation
537 sizeof(fs_event_watcher
) + eventq_size
* sizeof(kfs_event
*),
540 watcher
->event_list
= event_list
;
541 watcher
->num_events
= num_events
;
542 watcher
->devices_to_watch
= NULL
;
543 watcher
->num_devices
= 0;
545 watcher
->event_queue
= (kfs_event
**)&watcher
[1];
546 watcher
->eventq_size
= eventq_size
;
549 watcher
->blockers
= 0;
550 watcher
->num_readers
= 0;
554 // now update the global list of who's interested in
555 // events of a particular type...
556 for(i
=0; i
< num_events
; i
++) {
557 if (event_list
[i
] != FSE_IGNORE
&& i
< FSE_MAX_EVENTS
) {
558 fs_event_type_watchers
[i
]++;
562 SLIST_INSERT_HEAD(&watch_list_head
, watcher
, link
);
566 *watcher_out
= watcher
;
572 remove_watcher(fs_event_watcher
*target
)
575 fs_event_watcher
*watcher
;
578 lck_rw_lock_shared(&fsevent_big_lock
);
582 SLIST_FOREACH(watcher
, &watch_list_head
, link
) {
583 if (watcher
== target
) {
584 SLIST_REMOVE(&watch_list_head
, watcher
, fs_event_watcher
, link
);
586 for(i
=0; i
< watcher
->num_events
; i
++) {
587 if (watcher
->event_list
[i
] != FSE_IGNORE
&& i
< FSE_MAX_EVENTS
) {
588 fs_event_type_watchers
[i
]--;
594 // drain the event_queue
595 for(i
=watcher
->rd
; i
!= watcher
->wr
; i
=(i
+1) % watcher
->eventq_size
) {
596 kfse
= watcher
->event_queue
[i
];
598 if (OSAddAtomic(-1, (SInt32
*)&kfse
->refcount
) == 1) {
603 if (watcher
->event_list
) {
604 FREE(watcher
->event_list
, M_TEMP
);
605 watcher
->event_list
= NULL
;
607 if (watcher
->devices_to_watch
) {
608 FREE(watcher
->devices_to_watch
, M_TEMP
);
609 watcher
->devices_to_watch
= NULL
;
611 FREE(watcher
, M_TEMP
);
613 lck_rw_done(&fsevent_big_lock
);
619 lck_rw_done(&fsevent_big_lock
);
624 watcher_add_event(fs_event_watcher
*watcher
, kfs_event
*kfse
)
626 if (((watcher
->wr
+ 1) % watcher
->eventq_size
) == watcher
->rd
) {
627 watcher
->flags
|= WATCHER_DROPPED_EVENTS
;
628 wakeup((caddr_t
)watcher
);
632 watcher
->event_queue
[watcher
->wr
] = kfse
;
633 OSAddAtomic(1, (SInt32
*)&kfse
->refcount
);
634 watcher
->wr
= (watcher
->wr
+ 1) % watcher
->eventq_size
;
636 // wake up the watcher if he's waiting!
637 wakeup((caddr_t
)watcher
);
644 fmod_watch(fs_event_watcher
*watcher
, struct uio
*uio
)
646 int i
, error
=0, last_full_event_resid
;
651 // LP64todo - fix this
652 last_full_event_resid
= uio_resid(uio
);
654 // need at least 2048 bytes of space (maxpathlen + 1 event buf)
655 if (uio_resid(uio
) < 2048 || watcher
== NULL
) {
659 if (OSAddAtomic(1, (SInt32
*)&watcher
->num_readers
) != 0) {
660 // don't allow multiple threads to read from the fd at the same time
661 OSAddAtomic(-1, (SInt32
*)&watcher
->num_readers
);
665 if (watcher
->rd
== watcher
->wr
) {
666 if (watcher
->flags
& WATCHER_CLOSING
) {
667 OSAddAtomic(-1, (SInt32
*)&watcher
->num_readers
);
670 OSAddAtomic(1, (SInt32
*)&watcher
->blockers
);
672 // there's nothing to do, go to sleep
673 error
= tsleep((caddr_t
)watcher
, PUSER
|PCATCH
, "fsevents_empty", 0);
675 OSAddAtomic(-1, (SInt32
*)&watcher
->blockers
);
677 if (error
!= 0 || (watcher
->flags
& WATCHER_CLOSING
)) {
678 OSAddAtomic(-1, (SInt32
*)&watcher
->num_readers
);
683 // if we dropped events, return that as an event first
684 if (watcher
->flags
& WATCHER_DROPPED_EVENTS
) {
685 int32_t val
= FSE_EVENTS_DROPPED
;
687 error
= uiomove((caddr_t
)&val
, sizeof(int32_t), uio
);
689 val
= 0; // a fake pid
690 error
= uiomove((caddr_t
)&val
, sizeof(int32_t), uio
);
692 tmp16
= FSE_ARG_DONE
; // makes it a consistent msg
693 error
= uiomove((caddr_t
)&tmp16
, sizeof(int16_t), uio
);
697 OSAddAtomic(-1, (SInt32
*)&watcher
->num_readers
);
701 watcher
->flags
&= ~WATCHER_DROPPED_EVENTS
;
704 // check if the next chunk of data will fit in the user's
705 // buffer. if not, just goto get_out which will return
706 // the number of bytes worth of events that we did read.
707 // this leaves the event that didn't fit in the queue.
709 // LP64todo - fix this
710 #define CHECK_UPTR(size) if (size > (unsigned)uio_resid(uio)) { \
711 uio_setresid(uio, last_full_event_resid); \
715 for (; uio_resid(uio
) > 0 && watcher
->rd
!= watcher
->wr
; ) {
716 kfse
= watcher
->event_queue
[watcher
->rd
];
718 // copy out the type of the event
719 CHECK_UPTR(sizeof(int32_t));
720 if ((error
= uiomove((caddr_t
)&kfse
->type
, sizeof(int32_t), uio
)) != 0) {
724 // now copy out the pid of the person that changed the file
725 CHECK_UPTR(sizeof(pid_t
));
726 if ((error
= uiomove((caddr_t
)&kfse
->pid
, sizeof(pid_t
), uio
)) != 0) {
731 for(i
=0; i
< KFS_NUM_ARGS
&& error
== 0; i
++) {
735 kea
= &kfse
->args
[i
];
737 tmp16
= (uint16_t)kea
->type
;
738 CHECK_UPTR(sizeof(uint16_t));
739 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
740 if (error
|| kea
->type
== FSE_ARG_DONE
) {
746 pathbuff
= get_pathbuff();
747 pathbuff_len
= MAXPATHLEN
;
748 if (kea
->data
.vp
== NULL
) {
749 printf("fmod_watch: whoa... vp == NULL (%d)!\n", kfse
->type
);
751 release_pathbuff(pathbuff
);
755 if (vn_getpath(kea
->data
.vp
, pathbuff
, &pathbuff_len
) != 0 || pathbuff
[0] == '\0') {
756 // printf("fmod_watch: vn_getpath failed! vp 0x%x vname 0x%x (%s) vparent 0x%x\n",
758 // VNAME(kea->data.vp),
759 // VNAME(kea->data.vp) ? VNAME(kea->data.vp) : "<null>",
760 // VPARENT(kea->data.vp));
762 CHECK_UPTR(sizeof(uint16_t));
763 tmp16
= (uint16_t)pathbuff_len
;
764 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
766 CHECK_UPTR((unsigned)pathbuff_len
);
767 error
= uiomove((caddr_t
)pathbuff
, pathbuff_len
, uio
);
768 release_pathbuff(pathbuff
);
773 tmp16
= (int32_t)kea
->len
;
774 CHECK_UPTR(sizeof(uint16_t));
775 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
777 CHECK_UPTR(kea
->len
);
778 error
= uiomove((caddr_t
)kea
->data
.str
, kea
->len
, uio
);
782 CHECK_UPTR(sizeof(uint16_t) + sizeof(int32_t));
783 tmp16
= sizeof(int32_t);
784 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
785 error
= uiomove((caddr_t
)&kea
->data
.int32
, sizeof(int32_t), uio
);
789 printf("fs_events: 64-bit args not implemented on copyout.\n");
790 // CHECK_UPTR(sizeof(uint16_t) + sizeof(int64_t));
791 // tmp16 = sizeof(int64_t);
792 // error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
793 // error = uiomove((caddr_t)&kea->data.int64, sizeof(int64_t), uio);
797 tmp16
= (uint16_t)kea
->len
;
798 CHECK_UPTR(sizeof(uint16_t));
799 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
801 CHECK_UPTR(kea
->len
);
802 error
= uiomove((caddr_t
)kea
->data
.ptr
, kea
->len
, uio
);
806 CHECK_UPTR(sizeof(uint16_t) + sizeof(dev_t
));
807 tmp16
= sizeof(dev_t
);
808 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
809 error
= uiomove((caddr_t
)&kea
->data
.dev
, sizeof(dev_t
), uio
);
813 CHECK_UPTR(sizeof(uint16_t) + sizeof(ino_t
));
814 tmp16
= sizeof(ino_t
);
815 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
816 error
= uiomove((caddr_t
)&kea
->data
.ino
, sizeof(ino_t
), uio
);
820 // XXXdbg - NOTE: we use 32-bits for the mode, not
821 // 16-bits like a real mode_t
822 CHECK_UPTR(sizeof(uint16_t) + sizeof(int32_t));
823 tmp16
= sizeof(int32_t);
824 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
825 error
= uiomove((caddr_t
)&kea
->data
.mode
, sizeof(int32_t), uio
);
829 CHECK_UPTR(sizeof(uint16_t) + sizeof(uid_t
));
830 tmp16
= sizeof(uid_t
);
831 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
832 error
= uiomove((caddr_t
)&kea
->data
.uid
, sizeof(uid_t
), uio
);
836 CHECK_UPTR(sizeof(uint16_t) + sizeof(gid_t
));
837 tmp16
= sizeof(gid_t
);
838 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
839 error
= uiomove((caddr_t
)&kea
->data
.gid
, sizeof(gid_t
), uio
);
843 printf("fmod_watch: unknown arg type %d.\n", kea
->type
);
848 // make sure that we always end with a FSE_ARG_DONE
849 if (i
>= KFS_NUM_ARGS
) {
850 tmp16
= FSE_ARG_DONE
;
851 CHECK_UPTR(sizeof(uint16_t));
852 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
856 // LP64todo - fix this
857 last_full_event_resid
= uio_resid(uio
);
859 watcher
->rd
= (watcher
->rd
+ 1) % watcher
->eventq_size
;
861 if (OSAddAtomic(-1, (SInt32
*)&kfse
->refcount
) == 1) {
867 OSAddAtomic(-1, (SInt32
*)&watcher
->num_readers
);
872 // release any references we might have on vnodes which are
873 // the mount point passed to us (so that it can be cleanly
876 // since we don't want to lose the events we'll convert the
877 // vnode refs to the full path, inode #, and uid.
880 fsevent_unmount(struct mount
*mp
)
886 lck_rw_lock_exclusive(&fsevent_big_lock
);
889 for(i
=0; i
< MAX_KFS_EVENTS
; i
++) {
890 if (fs_event_buf
[i
].type
== FSE_INVALID
) {
894 kfse
= &fs_event_buf
[i
];
895 for(j
=0; j
< KFS_NUM_ARGS
; j
++) {
896 kea
= &kfse
->args
[j
];
897 if (kea
->type
== FSE_ARG_DONE
) {
901 if (kea
->type
== FSE_ARG_VNODE
&& kea
->data
.vp
->v_mount
== mp
) {
907 pathbuff
= get_pathbuff();
908 pathbuff_len
= MAXPATHLEN
;
910 if (vn_getpath(vp
, pathbuff
, &pathbuff_len
) != 0 || pathbuff
[0] == '\0') {
913 vname
= vnode_getname(vp
);
915 printf("fsevent_unmount: vn_getpath failed! vp 0x%x vname 0x%x (%s) vparent 0x%x\n",
916 vp
, vname
, vname
? vname
: "<null>", vp
->v_parent
);
919 vnode_putname(vname
);
921 strcpy(pathbuff
, "UNKNOWN-FILE");
922 pathbuff_len
= strlen(pathbuff
) + 1;
925 // switch the type of the string
926 kea
->type
= FSE_ARG_STRING
;
927 kea
->data
.str
= vfs_addname(pathbuff
, pathbuff_len
, 0, 0);
928 kea
->len
= pathbuff_len
;
929 release_pathbuff(pathbuff
);
931 // and finally let go of the reference on the vnode
932 vnode_rele_ext(vp
, O_EVTONLY
, 0);
937 unlock_fs_event_buf();
938 lck_rw_done(&fsevent_big_lock
);
943 // /dev/fsevents device code
945 static int fsevents_installed
= 0;
946 static struct lock__bsd__ fsevents_lck
;
948 typedef struct fsevent_handle
{
949 fs_event_watcher
*watcher
;
955 fseventsf_read(struct fileproc
*fp
, struct uio
*uio
,
956 __unused kauth_cred_t
*cred
, __unused
int flags
,
957 __unused
struct proc
*p
)
959 fsevent_handle
*fseh
= (struct fsevent_handle
*)fp
->f_fglob
->fg_data
;
962 error
= fmod_watch(fseh
->watcher
, uio
);
968 fseventsf_write(__unused
struct fileproc
*fp
, __unused
struct uio
*uio
,
969 __unused kauth_cred_t
*cred
, __unused
int flags
,
970 __unused
struct proc
*p
)
977 fseventsf_ioctl(struct fileproc
*fp
, u_long cmd
, caddr_t data
, struct proc
*p
)
979 fsevent_handle
*fseh
= (struct fsevent_handle
*)fp
->f_fglob
->fg_data
;
982 fsevent_dev_filter_args
*devfilt_args
=(fsevent_dev_filter_args
*)data
;
989 case FSEVENTS_DEVICE_FILTER
: {
991 dev_t
*devices_to_watch
, *tmp
=NULL
;
993 if (devfilt_args
->num_devices
> 256) {
998 new_num_devices
= devfilt_args
->num_devices
;
999 if (new_num_devices
== 0) {
1000 tmp
= fseh
->watcher
->devices_to_watch
;
1003 fseh
->watcher
->devices_to_watch
= NULL
;
1004 fseh
->watcher
->num_devices
= new_num_devices
;
1005 unlock_watch_list();
1013 MALLOC(devices_to_watch
, dev_t
*,
1014 new_num_devices
* sizeof(dev_t
),
1016 if (devices_to_watch
== NULL
) {
1021 ret
= copyin(CAST_USER_ADDR_T(devfilt_args
->devices
),
1022 (void *)devices_to_watch
,
1023 new_num_devices
* sizeof(dev_t
));
1025 FREE(devices_to_watch
, M_TEMP
);
1030 fseh
->watcher
->num_devices
= new_num_devices
;
1031 tmp
= fseh
->watcher
->devices_to_watch
;
1032 fseh
->watcher
->devices_to_watch
= devices_to_watch
;
1033 unlock_watch_list();
1052 fseventsf_select(struct fileproc
*fp
, int which
, void *wql
, struct proc
*p
)
1054 fsevent_handle
*fseh
= (struct fsevent_handle
*)fp
->f_fglob
->fg_data
;
1057 if ((which
!= FREAD
) || (fseh
->watcher
->flags
& WATCHER_CLOSING
)) {
1062 // if there's nothing in the queue, we're not ready
1063 if (fseh
->watcher
->rd
== fseh
->watcher
->wr
) {
1070 selrecord(p
, &fseh
->si
, wql
);
1078 fseventsf_stat(struct fileproc
*fp
, struct stat
*sb
, struct proc
*p
)
1085 fseventsf_close(struct fileglob
*fg
, struct proc
*p
)
1087 fsevent_handle
*fseh
= (struct fsevent_handle
*)fg
->fg_data
;
1089 remove_watcher(fseh
->watcher
);
1092 fseh
->watcher
= NULL
;
1099 fseventsf_kqfilter(struct fileproc
*fp
, struct knote
*kn
, struct proc
*p
)
1107 fseventsf_drain(struct fileproc
*fp
, struct proc
*p
)
1110 fsevent_handle
*fseh
= (struct fsevent_handle
*)fp
->f_fglob
->fg_data
;
1112 fseh
->watcher
->flags
|= WATCHER_CLOSING
;
1114 // if there are people still waiting, sleep for 10ms to
1115 // let them clean up and get out of there. however we
1116 // also don't want to get stuck forever so if they don't
1117 // exit after 5 seconds we're tearing things down anyway.
1118 while(fseh
->watcher
->blockers
&& counter
++ < 500) {
1119 // issue wakeup in case anyone is blocked waiting for an event
1120 // do this each time we wakeup in case the blocker missed
1121 // the wakeup due to the unprotected test of WATCHER_CLOSING
1122 // and decision to tsleep in fmod_watch... this bit of
1123 // latency is a decent tradeoff against not having to
1124 // take and drop a lock in fmod_watch
1125 wakeup((caddr_t
)fseh
->watcher
);
1127 tsleep((caddr_t
)fseh
->watcher
, PRIBIO
, "watcher-close", 1);
1135 fseventsopen(dev_t dev
, int flag
, int mode
, struct proc
*p
)
1145 fseventsclose(dev_t dev
, int flag
, int mode
, struct proc
*p
)
1151 fseventsread(dev_t dev
, struct uio
*uio
, int ioflag
)
1157 fseventswrite(dev_t dev
, struct uio
*uio
, int ioflag
)
1163 static struct fileops fsevents_fops
= {
1176 fseventsioctl(dev_t dev
, u_long cmd
, caddr_t data
, int flag
, struct proc
*p
)
1180 fsevent_handle
*fseh
= NULL
;
1181 fsevent_clone_args
*fse_clone_args
=(fsevent_clone_args
*)data
;
1185 case FSEVENTS_CLONE
:
1186 if (fse_clone_args
->num_events
< 0 || fse_clone_args
->num_events
> 4096) {
1190 MALLOC(fseh
, fsevent_handle
*, sizeof(fsevent_handle
),
1192 memset(fseh
, 0, sizeof(fsevent_handle
));
1194 MALLOC(event_list
, int8_t *,
1195 fse_clone_args
->num_events
* sizeof(int8_t),
1198 error
= copyin(CAST_USER_ADDR_T(fse_clone_args
->event_list
),
1200 fse_clone_args
->num_events
* sizeof(int8_t));
1202 FREE(event_list
, M_TEMP
);
1207 error
= add_watcher(event_list
,
1208 fse_clone_args
->num_events
,
1209 fse_clone_args
->event_queue_depth
,
1212 FREE(event_list
, M_TEMP
);
1217 error
= falloc(p
, &f
, &fd
);
1219 FREE(event_list
, M_TEMP
);
1224 f
->f_fglob
->fg_flag
= FREAD
| FWRITE
;
1225 f
->f_fglob
->fg_type
= DTYPE_FSEVENTS
;
1226 f
->f_fglob
->fg_ops
= &fsevents_fops
;
1227 f
->f_fglob
->fg_data
= (caddr_t
) fseh
;
1229 copyout((void *)&fd
, CAST_USER_ADDR_T(fse_clone_args
->fd
), sizeof(int32_t));
1231 *fdflags(p
, fd
) &= ~UF_RESERVED
;
1232 fp_drop(p
, fd
, f
, 1);
1245 fseventsselect(dev_t dev
, int rw
, struct proc
*p
)
1251 fsevents_wakeup(fsevent_handle
*fseh
)
1253 wakeup((caddr_t
)fseh
);
1254 selwakeup(&fseh
->si
);
1259 * A struct describing which functions will get invoked for certain
1262 static struct cdevsw fsevents_cdevsw
=
1264 fseventsopen
, /* open */
1265 fseventsclose
, /* close */
1266 fseventsread
, /* read */
1267 fseventswrite
, /* write */
1268 fseventsioctl
, /* ioctl */
1270 nulldev
, /* reset */
1272 eno_select
, /* select */
1273 eno_mmap
, /* mmap */
1274 eno_strat
, /* strategy */
1275 eno_getc
, /* getc */
1276 eno_putc
, /* putc */
1282 * Called to initialize our device,
1283 * and to register ourselves with devfs
1291 if (fsevents_installed
) {
1295 fsevents_installed
= 1;
1297 lockinit(&fsevents_lck
, PLOCK
, "fsevents", 0, 0);
1299 ret
= cdevsw_add(-1, &fsevents_cdevsw
);
1301 fsevents_installed
= 0;
1305 devfs_make_node(makedev (ret
, 0), DEVFS_CHAR
,
1306 UID_ROOT
, GID_WHEEL
, 0644, "fsevents", 0);
1308 fsevents_internal_init();
1314 // XXXdbg - temporary path buffer handling
1316 #define NUM_PATH_BUFFS 16
1317 static char path_buff
[NUM_PATH_BUFFS
][MAXPATHLEN
];
1318 static char path_buff_inuse
[NUM_PATH_BUFFS
];
1320 static lck_grp_attr_t
* pathbuff_group_attr
;
1321 static lck_attr_t
* pathbuff_lock_attr
;
1322 static lck_grp_t
* pathbuff_mutex_group
;
1323 static lck_mtx_t pathbuff_lock
;
1328 pathbuff_lock_attr
= lck_attr_alloc_init();
1329 pathbuff_group_attr
= lck_grp_attr_alloc_init();
1330 pathbuff_mutex_group
= lck_grp_alloc_init("pathbuff-mutex", pathbuff_group_attr
);
1332 lck_mtx_init(&pathbuff_lock
, pathbuff_mutex_group
, pathbuff_lock_attr
);
1338 lck_mtx_lock(&pathbuff_lock
);
1342 unlock_pathbuff(void)
1344 lck_mtx_unlock(&pathbuff_lock
);
1354 for(i
=0; i
< NUM_PATH_BUFFS
; i
++) {
1355 if (path_buff_inuse
[i
] == 0) {
1360 if (i
>= NUM_PATH_BUFFS
) {
1364 MALLOC_ZONE(path
, char *, MAXPATHLEN
, M_NAMEI
, M_WAITOK
);
1368 path_buff_inuse
[i
] = 1;
1370 return &path_buff
[i
][0];
1374 release_pathbuff(char *path
)
1383 for(i
=0; i
< NUM_PATH_BUFFS
; i
++) {
1384 if (path
== &path_buff
[i
][0]) {
1385 path_buff
[i
][0] = '\0';
1386 path_buff_inuse
[i
] = 0;
1394 // if we get here then it wasn't one of our temp buffers
1395 FREE_ZONE(path
, MAXPATHLEN
, M_NAMEI
);
1399 get_fse_info(struct vnode
*vp
, fse_info
*fse
, vfs_context_t ctx
)
1401 struct vnode_attr va
;
1404 VATTR_WANTED(&va
, va_fsid
);
1405 VATTR_WANTED(&va
, va_fileid
);
1406 VATTR_WANTED(&va
, va_mode
);
1407 VATTR_WANTED(&va
, va_uid
);
1408 VATTR_WANTED(&va
, va_gid
);
1409 if (vnode_getattr(vp
, &va
, ctx
) != 0) {
1413 fse
->dev
= (dev_t
)va
.va_fsid
;
1414 fse
->ino
= (ino_t
)va
.va_fileid
;
1415 fse
->mode
= (int32_t)vnode_vttoif(vnode_vtype(vp
)) | va
.va_mode
;
1416 fse
->uid
= (uid_t
)va
.va_uid
;
1417 fse
->gid
= (gid_t
)va
.va_gid
;