2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/namei.h>
26 #include <sys/filedesc.h>
27 #include <sys/kernel.h>
28 #include <sys/file_internal.h>
30 #include <sys/vnode_internal.h>
31 #include <sys/mount_internal.h>
32 #include <sys/proc_internal.h>
33 #include <sys/kauth.h>
35 #include <sys/malloc.h>
36 #include <sys/dirent.h>
38 #include <sys/sysctl.h>
40 #include <machine/cons.h>
41 #include <miscfs/specfs/specdev.h>
42 #include <miscfs/devfs/devfs.h>
43 #include <sys/filio.h>
44 #include <architecture/byte_order.h>
45 #include <kern/locks.h>
46 #include <libkern/OSAtomic.h>
48 #include <bsm/audit_kernel.h>
49 #include <bsm/audit_kevents.h>
51 // where all our structs and defines come from
52 #include <sys/fsevents.h>
55 typedef struct kfs_event_arg
{
71 #define KFS_NUM_ARGS FSE_MAX_ARGS
72 typedef struct kfs_event
{
73 int32_t type
; // type code of this event
74 u_int32_t refcount
; // number of clients referencing this
75 pid_t pid
; // pid of the process that did the op
76 kfs_event_arg args
[KFS_NUM_ARGS
];
80 typedef struct fs_event_watcher
{
81 SLIST_ENTRY(fs_event_watcher
) link
;
82 int8_t *event_list
; // the events we're interested in
84 dev_t
*devices_to_watch
; // only report events from these devices
87 kfs_event
**event_queue
;
88 int32_t eventq_size
; // number of event pointers in queue
89 int32_t rd
, wr
; // indices to the event_queue
93 // fs_event_watcher flags
94 #define WATCHER_DROPPED_EVENTS 0x0001
95 #define WATCHER_CLOSING 0x0002
97 static SLIST_HEAD(watch_list
, fs_event_watcher
) watch_list_head
= { NULL
};
100 #define MAX_KFS_EVENTS 2048
102 // this array holds each pending event
103 static kfs_event fs_event_buf
[MAX_KFS_EVENTS
];
104 static int free_event_idx
= 0;
105 static int fs_event_init
= 0;
108 // this array records whether anyone is interested in a
109 // particular type of event. if no one is, we bail out
110 // early from the event delivery
112 static int16_t fs_event_type_watchers
[FSE_MAX_EVENTS
];
114 static int watcher_add_event(fs_event_watcher
*watcher
, kfs_event
*kfse
);
119 static lck_grp_attr_t
* fsevent_group_attr
;
120 static lck_attr_t
* fsevent_lock_attr
;
121 static lck_grp_t
* fsevent_mutex_group
;
123 static lck_grp_t
* fsevent_rw_group
;
125 static lck_rw_t fsevent_big_lock
; // always grab this first
126 static lck_mtx_t watch_list_lock
;
127 static lck_mtx_t event_buf_lock
;
130 static void init_pathbuff(void);
134 fsevents_internal_init(void)
138 if (fs_event_init
++ != 0) {
142 for(i
=0; i
< FSE_MAX_EVENTS
; i
++) {
143 fs_event_type_watchers
[i
] = 0;
146 for(i
=0; i
< MAX_KFS_EVENTS
; i
++) {
147 fs_event_buf
[i
].type
= FSE_INVALID
;
148 fs_event_buf
[i
].refcount
= 0;
151 SLIST_INIT(&watch_list_head
);
153 fsevent_lock_attr
= lck_attr_alloc_init();
154 fsevent_group_attr
= lck_grp_attr_alloc_init();
155 fsevent_mutex_group
= lck_grp_alloc_init("fsevent-mutex", fsevent_group_attr
);
156 fsevent_rw_group
= lck_grp_alloc_init("fsevent-rw", fsevent_group_attr
);
158 lck_mtx_init(&watch_list_lock
, fsevent_mutex_group
, fsevent_lock_attr
);
159 lck_mtx_init(&event_buf_lock
, fsevent_mutex_group
, fsevent_lock_attr
);
161 lck_rw_init(&fsevent_big_lock
, fsevent_rw_group
, fsevent_lock_attr
);
167 lock_watch_list(void)
169 lck_mtx_lock(&watch_list_lock
);
173 unlock_watch_list(void)
175 lck_mtx_unlock(&watch_list_lock
);
179 lock_fs_event_buf(void)
181 lck_mtx_lock(&event_buf_lock
);
185 unlock_fs_event_buf(void)
187 lck_mtx_unlock(&event_buf_lock
);
191 static void do_free_event(kfs_event
*kfse
);
194 watcher_cares_about_dev(fs_event_watcher
*watcher
, dev_t dev
)
198 // if there is not list of devices to watch, then always
199 // say we're interested so we'll report all events from
201 if (watcher
->devices_to_watch
== NULL
) {
205 for(i
=0; i
< watcher
->num_devices
; i
++) {
206 if (dev
== watcher
->devices_to_watch
[i
]) {
207 // found a match! that means we want events
213 // if we're here it's not in the devices_to_watch[]
214 // list so that means we do not care about it
220 need_fsevent(int type
, vnode_t vp
)
222 fs_event_watcher
*watcher
;
225 if (fs_event_type_watchers
[type
] == 0)
227 dev
= (dev_t
)(vp
->v_mount
->mnt_vfsstat
.f_fsid
.val
[0]);
231 SLIST_FOREACH(watcher
, &watch_list_head
, link
) {
232 if (watcher
->event_list
[type
] == FSE_REPORT
&& watcher_cares_about_dev(watcher
, dev
)) {
244 add_fsevent(int type
, vfs_context_t ctx
, ...)
246 struct proc
*p
= vfs_context_proc(ctx
);
247 int i
, arg_idx
, num_deliveries
=0;
250 fs_event_watcher
*watcher
;
257 // if no one cares about this type of event, bail out
258 if (fs_event_type_watchers
[type
] == 0) {
263 lck_rw_lock_shared(&fsevent_big_lock
);
265 // find a free event and snag it for our use
266 // NOTE: do not do anything that would block until
267 // the lock is dropped.
270 for(i
=0; i
< MAX_KFS_EVENTS
; i
++) {
271 if (fs_event_buf
[(free_event_idx
+ i
) % MAX_KFS_EVENTS
].type
== FSE_INVALID
) {
276 if (i
>= MAX_KFS_EVENTS
) {
277 // yikes! no free slots
278 unlock_fs_event_buf();
282 SLIST_FOREACH(watcher
, &watch_list_head
, link
) {
283 watcher
->flags
|= WATCHER_DROPPED_EVENTS
;
284 wakeup((caddr_t
)watcher
);
287 lck_rw_done(&fsevent_big_lock
);
289 printf("fs_events: add_event: event queue is full! dropping events.\n");
293 kfse
= &fs_event_buf
[(free_event_idx
+ i
) % MAX_KFS_EVENTS
];
299 kfse
->pid
= p
->p_pid
;
301 unlock_fs_event_buf(); // at this point it's safe to unlock
304 // now process the arguments passed in and copy them into
308 while(arg_idx
< KFS_NUM_ARGS
) {
309 kea
= &kfse
->args
[arg_idx
++];
310 kea
->type
= va_arg(ap
, int32_t);
312 if (kea
->type
== FSE_ARG_DONE
) {
317 case FSE_ARG_VNODE
: {
318 // this expands out into multiple arguments to the client
320 struct vnode_attr va
;
322 kea
->data
.vp
= vp
= va_arg(ap
, struct vnode
*);
323 if (kea
->data
.vp
== NULL
) {
324 panic("add_fsevent: you can't pass me a NULL vnode ptr (type %d)!\n",
328 if (vnode_ref_ext(kea
->data
.vp
, O_EVTONLY
) != 0) {
329 kea
->type
= FSE_ARG_DONE
;
335 VATTR_WANTED(&va
, va_fsid
);
336 VATTR_WANTED(&va
, va_fileid
);
337 VATTR_WANTED(&va
, va_mode
);
338 VATTR_WANTED(&va
, va_uid
);
339 VATTR_WANTED(&va
, va_gid
);
340 if (vnode_getattr(kea
->data
.vp
, &va
, ctx
) != 0) {
341 vnode_rele_ext(kea
->data
.vp
, O_EVTONLY
, 0);
342 kea
->type
= FSE_ARG_DONE
;
349 kea
->type
= FSE_ARG_DEV
;
350 kea
->data
.dev
= dev
= (dev_t
)va
.va_fsid
;
353 kea
->type
= FSE_ARG_INO
;
354 kea
->data
.ino
= (ino_t
)va
.va_fileid
;
357 kea
->type
= FSE_ARG_MODE
;
358 kea
->data
.mode
= (int32_t)vnode_vttoif(vnode_vtype(vp
)) | va
.va_mode
;
361 kea
->type
= FSE_ARG_UID
;
362 kea
->data
.uid
= va
.va_uid
;
365 kea
->type
= FSE_ARG_GID
;
366 kea
->data
.gid
= va
.va_gid
;
371 case FSE_ARG_FINFO
: {
374 fse
= va_arg(ap
, fse_info
*);
376 kea
->type
= FSE_ARG_DEV
;
377 kea
->data
.dev
= dev
= (dev_t
)fse
->dev
;
380 kea
->type
= FSE_ARG_INO
;
381 kea
->data
.ino
= (ino_t
)fse
->ino
;
384 kea
->type
= FSE_ARG_MODE
;
385 kea
->data
.mode
= (int32_t)fse
->mode
;
388 kea
->type
= FSE_ARG_UID
;
389 kea
->data
.uid
= (uid_t
)fse
->uid
;
392 kea
->type
= FSE_ARG_GID
;
393 kea
->data
.gid
= (uid_t
)fse
->gid
;
399 kea
->len
= (int16_t)(va_arg(ap
, int32_t) & 0xffff);
400 kea
->data
.str
= vfs_addname(va_arg(ap
, char *), kea
->len
, 0, 0);
404 kea
->data
.int32
= va_arg(ap
, int32_t);
408 printf("fs_events: 64-bit args not implemented.\n");
409 // kea->data.int64 = va_arg(ap, int64_t);
413 kea
->len
= (int16_t)(va_arg(ap
, int32_t) & 0xffff);
414 MALLOC(kea
->data
.ptr
, void *, kea
->len
, M_TEMP
, M_WAITOK
);
415 memcpy(kea
->data
.ptr
, va_arg(ap
, void *), kea
->len
);
419 kea
->data
.dev
= dev
= va_arg(ap
, dev_t
);
423 kea
->data
.mode
= va_arg(ap
, int32_t);
427 kea
->data
.ino
= va_arg(ap
, ino_t
);
431 kea
->data
.uid
= va_arg(ap
, uid_t
);
435 kea
->data
.gid
= va_arg(ap
, gid_t
);
439 printf("add_fsevent: unknown type %d\n", kea
->type
);
440 // just skip one 32-bit word and hope we sync up...
441 (void)va_arg(ap
, int32_t);
448 // now we have to go and let everyone know that
449 // is interested in this type of event...
453 SLIST_FOREACH(watcher
, &watch_list_head
, link
) {
454 if (watcher
->event_list
[type
] == FSE_REPORT
&& watcher_cares_about_dev(watcher
, dev
)) {
455 if (watcher_add_event(watcher
, kfse
) == 0) {
464 // just in case no one was interested after all...
465 if (num_deliveries
== 0) {
467 free_event_idx
= (int)(kfse
- &fs_event_buf
[0]);
470 lck_rw_done(&fsevent_big_lock
);
475 do_free_event(kfs_event
*kfse
)
478 kfs_event_arg
*kea
, all_args
[KFS_NUM_ARGS
];
482 // mark this fsevent as invalid
483 kfse
->type
= FSE_INVALID
;
485 // make a copy of this so we can free things without
486 // holding the fs_event_buf lock
488 memcpy(&all_args
[0], &kfse
->args
[0], sizeof(all_args
));
490 // and just to be anal, set this so that there are no args
491 kfse
->args
[0].type
= FSE_ARG_DONE
;
493 free_event_idx
= (kfse
- fs_event_buf
);
495 unlock_fs_event_buf();
497 for(i
=0; i
< KFS_NUM_ARGS
; i
++) {
499 if (kea
->type
== FSE_ARG_DONE
) {
505 vnode_rele_ext(kea
->data
.vp
, O_EVTONLY
, 0);
508 vfs_removename(kea
->data
.str
);
511 FREE(kea
->data
.ptr
, M_TEMP
);
519 add_watcher(int8_t *event_list
, int32_t num_events
, int32_t eventq_size
, fs_event_watcher
**watcher_out
)
522 fs_event_watcher
*watcher
;
524 if (eventq_size
< 0 || eventq_size
> MAX_KFS_EVENTS
) {
525 eventq_size
= MAX_KFS_EVENTS
;
528 // Note: the event_queue follows the fs_event_watcher struct
529 // in memory so we only have to do one allocation
532 sizeof(fs_event_watcher
) + eventq_size
* sizeof(kfs_event
*),
535 watcher
->event_list
= event_list
;
536 watcher
->num_events
= num_events
;
537 watcher
->devices_to_watch
= NULL
;
538 watcher
->num_devices
= 0;
540 watcher
->event_queue
= (kfs_event
**)&watcher
[1];
541 watcher
->eventq_size
= eventq_size
;
544 watcher
->blockers
= 0;
548 // now update the global list of who's interested in
549 // events of a particular type...
550 for(i
=0; i
< num_events
; i
++) {
551 if (event_list
[i
] != FSE_IGNORE
&& i
< FSE_MAX_EVENTS
) {
552 fs_event_type_watchers
[i
]++;
556 SLIST_INSERT_HEAD(&watch_list_head
, watcher
, link
);
560 *watcher_out
= watcher
;
566 remove_watcher(fs_event_watcher
*target
)
569 fs_event_watcher
*watcher
;
572 lck_rw_lock_shared(&fsevent_big_lock
);
576 SLIST_FOREACH(watcher
, &watch_list_head
, link
) {
577 if (watcher
== target
) {
578 SLIST_REMOVE(&watch_list_head
, watcher
, fs_event_watcher
, link
);
580 for(i
=0; i
< watcher
->num_events
; i
++) {
581 if (watcher
->event_list
[i
] != FSE_IGNORE
&& i
< FSE_MAX_EVENTS
) {
582 fs_event_type_watchers
[i
]--;
588 // drain the event_queue
589 for(i
=watcher
->rd
; i
!= watcher
->wr
; i
=(i
+1) % watcher
->eventq_size
) {
590 kfse
= watcher
->event_queue
[i
];
592 if (OSAddAtomic(-1, (SInt32
*)&kfse
->refcount
) == 1) {
597 if (watcher
->event_list
) {
598 FREE(watcher
->event_list
, M_TEMP
);
599 watcher
->event_list
= NULL
;
601 if (watcher
->devices_to_watch
) {
602 FREE(watcher
->devices_to_watch
, M_TEMP
);
603 watcher
->devices_to_watch
= NULL
;
605 FREE(watcher
, M_TEMP
);
607 lck_rw_done(&fsevent_big_lock
);
613 lck_rw_done(&fsevent_big_lock
);
618 watcher_add_event(fs_event_watcher
*watcher
, kfs_event
*kfse
)
620 if (((watcher
->wr
+ 1) % watcher
->eventq_size
) == watcher
->rd
) {
621 watcher
->flags
|= WATCHER_DROPPED_EVENTS
;
622 wakeup((caddr_t
)watcher
);
626 watcher
->event_queue
[watcher
->wr
] = kfse
;
627 OSAddAtomic(1, (SInt32
*)&kfse
->refcount
);
628 watcher
->wr
= (watcher
->wr
+ 1) % watcher
->eventq_size
;
630 // wake up the watcher if he's waiting!
631 wakeup((caddr_t
)watcher
);
638 fmod_watch(fs_event_watcher
*watcher
, struct uio
*uio
)
640 int i
, error
=0, last_full_event_resid
;
645 // LP64todo - fix this
646 last_full_event_resid
= uio_resid(uio
);
648 // need at least 2048 bytes of space (maxpathlen + 1 event buf)
649 if (uio_resid(uio
) < 2048 || watcher
== NULL
) {
654 if (watcher
->rd
== watcher
->wr
) {
655 if (watcher
->flags
& WATCHER_CLOSING
) {
658 OSAddAtomic(1, (SInt32
*)&watcher
->blockers
);
660 // there's nothing to do, go to sleep
661 error
= tsleep((caddr_t
)watcher
, PUSER
|PCATCH
, "fsevents_empty", 0);
663 OSAddAtomic(-1, (SInt32
*)&watcher
->blockers
);
665 if (error
!= 0 || (watcher
->flags
& WATCHER_CLOSING
)) {
670 // if we dropped events, return that as an event first
671 if (watcher
->flags
& WATCHER_DROPPED_EVENTS
) {
672 int32_t val
= FSE_EVENTS_DROPPED
;
674 error
= uiomove((caddr_t
)&val
, sizeof(int32_t), uio
);
676 val
= 0; // a fake pid
677 error
= uiomove((caddr_t
)&val
, sizeof(int32_t), uio
);
679 tmp16
= FSE_ARG_DONE
; // makes it a consistent msg
680 error
= uiomove((caddr_t
)&tmp16
, sizeof(int16_t), uio
);
687 watcher
->flags
&= ~WATCHER_DROPPED_EVENTS
;
690 // check if the next chunk of data will fit in the user's
691 // buffer. if not, just goto get_out which will return
692 // the number of bytes worth of events that we did read.
693 // this leaves the event that didn't fit in the queue.
695 // LP64todo - fix this
696 #define CHECK_UPTR(size) if (size > (unsigned)uio_resid(uio)) { \
697 uio_setresid(uio, last_full_event_resid); \
701 for (; uio_resid(uio
) > 0 && watcher
->rd
!= watcher
->wr
; ) {
702 kfse
= watcher
->event_queue
[watcher
->rd
];
704 // copy out the type of the event
705 CHECK_UPTR(sizeof(int32_t));
706 if ((error
= uiomove((caddr_t
)&kfse
->type
, sizeof(int32_t), uio
)) != 0) {
710 // now copy out the pid of the person that changed the file
711 CHECK_UPTR(sizeof(pid_t
));
712 if ((error
= uiomove((caddr_t
)&kfse
->pid
, sizeof(pid_t
), uio
)) != 0) {
717 for(i
=0; i
< KFS_NUM_ARGS
&& error
== 0; i
++) {
721 kea
= &kfse
->args
[i
];
723 tmp16
= (uint16_t)kea
->type
;
724 CHECK_UPTR(sizeof(uint16_t));
725 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
726 if (error
|| kea
->type
== FSE_ARG_DONE
) {
732 pathbuff
= get_pathbuff();
733 pathbuff_len
= MAXPATHLEN
;
734 if (kea
->data
.vp
== NULL
) {
735 printf("fmod_watch: whoa... vp == NULL (%d)!\n", kfse
->type
);
737 release_pathbuff(pathbuff
);
741 if (vn_getpath(kea
->data
.vp
, pathbuff
, &pathbuff_len
) != 0 || pathbuff
[0] == '\0') {
742 // printf("fmod_watch: vn_getpath failed! vp 0x%x vname 0x%x (%s) vparent 0x%x\n",
744 // VNAME(kea->data.vp),
745 // VNAME(kea->data.vp) ? VNAME(kea->data.vp) : "<null>",
746 // VPARENT(kea->data.vp));
748 CHECK_UPTR(sizeof(uint16_t));
749 tmp16
= (uint16_t)pathbuff_len
;
750 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
752 CHECK_UPTR((unsigned)pathbuff_len
);
753 error
= uiomove((caddr_t
)pathbuff
, pathbuff_len
, uio
);
754 release_pathbuff(pathbuff
);
759 tmp16
= (int32_t)kea
->len
;
760 CHECK_UPTR(sizeof(uint16_t));
761 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
763 CHECK_UPTR(kea
->len
);
764 error
= uiomove((caddr_t
)kea
->data
.str
, kea
->len
, uio
);
768 CHECK_UPTR(sizeof(uint16_t) + sizeof(int32_t));
769 tmp16
= sizeof(int32_t);
770 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
771 error
= uiomove((caddr_t
)&kea
->data
.int32
, sizeof(int32_t), uio
);
775 printf("fs_events: 64-bit args not implemented on copyout.\n");
776 // CHECK_UPTR(sizeof(uint16_t) + sizeof(int64_t));
777 // tmp16 = sizeof(int64_t);
778 // error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
779 // error = uiomove((caddr_t)&kea->data.int64, sizeof(int64_t), uio);
783 tmp16
= (uint16_t)kea
->len
;
784 CHECK_UPTR(sizeof(uint16_t));
785 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
787 CHECK_UPTR(kea
->len
);
788 error
= uiomove((caddr_t
)kea
->data
.ptr
, kea
->len
, uio
);
792 CHECK_UPTR(sizeof(uint16_t) + sizeof(dev_t
));
793 tmp16
= sizeof(dev_t
);
794 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
795 error
= uiomove((caddr_t
)&kea
->data
.dev
, sizeof(dev_t
), uio
);
799 CHECK_UPTR(sizeof(uint16_t) + sizeof(ino_t
));
800 tmp16
= sizeof(ino_t
);
801 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
802 error
= uiomove((caddr_t
)&kea
->data
.ino
, sizeof(ino_t
), uio
);
806 // XXXdbg - NOTE: we use 32-bits for the mode, not
807 // 16-bits like a real mode_t
808 CHECK_UPTR(sizeof(uint16_t) + sizeof(int32_t));
809 tmp16
= sizeof(int32_t);
810 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
811 error
= uiomove((caddr_t
)&kea
->data
.mode
, sizeof(int32_t), uio
);
815 CHECK_UPTR(sizeof(uint16_t) + sizeof(uid_t
));
816 tmp16
= sizeof(uid_t
);
817 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
818 error
= uiomove((caddr_t
)&kea
->data
.uid
, sizeof(uid_t
), uio
);
822 CHECK_UPTR(sizeof(uint16_t) + sizeof(gid_t
));
823 tmp16
= sizeof(gid_t
);
824 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
825 error
= uiomove((caddr_t
)&kea
->data
.gid
, sizeof(gid_t
), uio
);
829 printf("fmod_watch: unknown arg type %d.\n", kea
->type
);
834 // make sure that we always end with a FSE_ARG_DONE
835 if (i
>= KFS_NUM_ARGS
) {
836 tmp16
= FSE_ARG_DONE
;
837 CHECK_UPTR(sizeof(uint16_t));
838 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
842 // LP64todo - fix this
843 last_full_event_resid
= uio_resid(uio
);
845 watcher
->rd
= (watcher
->rd
+ 1) % watcher
->eventq_size
;
847 if (OSAddAtomic(-1, (SInt32
*)&kfse
->refcount
) == 1) {
857 // release any references we might have on vnodes which are
858 // the mount point passed to us (so that it can be cleanly
861 // since we don't want to lose the events we'll convert the
862 // vnode refs to the full path, inode #, and uid.
865 fsevent_unmount(struct mount
*mp
)
871 lck_rw_lock_exclusive(&fsevent_big_lock
);
874 for(i
=0; i
< MAX_KFS_EVENTS
; i
++) {
875 if (fs_event_buf
[i
].type
== FSE_INVALID
) {
879 kfse
= &fs_event_buf
[i
];
880 for(j
=0; j
< KFS_NUM_ARGS
; j
++) {
881 kea
= &kfse
->args
[j
];
882 if (kea
->type
== FSE_ARG_DONE
) {
886 if (kea
->type
== FSE_ARG_VNODE
&& kea
->data
.vp
->v_mount
== mp
) {
892 pathbuff
= get_pathbuff();
893 pathbuff_len
= MAXPATHLEN
;
895 if (vn_getpath(vp
, pathbuff
, &pathbuff_len
) != 0 || pathbuff
[0] == '\0') {
898 vname
= vnode_getname(vp
);
900 printf("fsevent_unmount: vn_getpath failed! vp 0x%x vname 0x%x (%s) vparent 0x%x\n",
901 vp
, vname
, vname
? vname
: "<null>", vp
->v_parent
);
904 vnode_putname(vname
);
907 // switch the type of the string
908 kea
->type
= FSE_ARG_STRING
;
909 kea
->data
.str
= vfs_addname(pathbuff
, pathbuff_len
, 0, 0);
910 kea
->len
= pathbuff_len
;
911 release_pathbuff(pathbuff
);
913 // and finally let go of the reference on the vnode
914 vnode_rele_ext(vp
, O_EVTONLY
, 0);
919 unlock_fs_event_buf();
920 lck_rw_done(&fsevent_big_lock
);
925 // /dev/fsevents device code
927 static int fsevents_installed
= 0;
928 static struct lock__bsd__ fsevents_lck
;
930 typedef struct fsevent_handle
{
931 fs_event_watcher
*watcher
;
937 fseventsf_read(struct fileproc
*fp
, struct uio
*uio
,
938 __unused kauth_cred_t
*cred
, __unused
int flags
,
939 __unused
struct proc
*p
)
941 fsevent_handle
*fseh
= (struct fsevent_handle
*)fp
->f_fglob
->fg_data
;
944 error
= fmod_watch(fseh
->watcher
, uio
);
950 fseventsf_write(__unused
struct fileproc
*fp
, __unused
struct uio
*uio
,
951 __unused kauth_cred_t
*cred
, __unused
int flags
,
952 __unused
struct proc
*p
)
959 fseventsf_ioctl(struct fileproc
*fp
, u_long cmd
, caddr_t data
, struct proc
*p
)
961 fsevent_handle
*fseh
= (struct fsevent_handle
*)fp
->f_fglob
->fg_data
;
964 fsevent_dev_filter_args
*devfilt_args
=(fsevent_dev_filter_args
*)data
;
971 case FSEVENTS_DEVICE_FILTER
: {
973 dev_t
*devices_to_watch
, *tmp
=NULL
;
975 if (devfilt_args
->num_devices
> 256) {
980 new_num_devices
= devfilt_args
->num_devices
;
981 if (new_num_devices
== 0) {
982 tmp
= fseh
->watcher
->devices_to_watch
;
985 fseh
->watcher
->devices_to_watch
= NULL
;
986 fseh
->watcher
->num_devices
= new_num_devices
;
995 MALLOC(devices_to_watch
, dev_t
*,
996 new_num_devices
* sizeof(dev_t
),
998 if (devices_to_watch
== NULL
) {
1003 ret
= copyin(CAST_USER_ADDR_T(devfilt_args
->devices
),
1004 (void *)devices_to_watch
,
1005 new_num_devices
* sizeof(dev_t
));
1007 FREE(devices_to_watch
, M_TEMP
);
1012 fseh
->watcher
->num_devices
= new_num_devices
;
1013 tmp
= fseh
->watcher
->devices_to_watch
;
1014 fseh
->watcher
->devices_to_watch
= devices_to_watch
;
1015 unlock_watch_list();
1034 fseventsf_select(struct fileproc
*fp
, int which
, void *wql
, struct proc
*p
)
1036 fsevent_handle
*fseh
= (struct fsevent_handle
*)fp
->f_fglob
->fg_data
;
1039 if ((which
!= FREAD
) || (fseh
->watcher
->flags
& WATCHER_CLOSING
)) {
1044 // if there's nothing in the queue, we're not ready
1045 if (fseh
->watcher
->rd
== fseh
->watcher
->wr
) {
1052 selrecord(p
, &fseh
->si
, wql
);
1060 fseventsf_stat(struct fileproc
*fp
, struct stat
*sb
, struct proc
*p
)
1067 fseventsf_close(struct fileglob
*fg
, struct proc
*p
)
1069 fsevent_handle
*fseh
= (struct fsevent_handle
*)fg
->fg_data
;
1071 remove_watcher(fseh
->watcher
);
1074 fseh
->watcher
= NULL
;
1081 fseventsf_kqfilter(struct fileproc
*fp
, struct knote
*kn
, struct proc
*p
)
1089 fseventsf_drain(struct fileproc
*fp
, struct proc
*p
)
1092 fsevent_handle
*fseh
= (struct fsevent_handle
*)fp
->f_fglob
->fg_data
;
1094 fseh
->watcher
->flags
|= WATCHER_CLOSING
;
1096 // if there are people still waiting, sleep for 10ms to
1097 // let them clean up and get out of there. however we
1098 // also don't want to get stuck forever so if they don't
1099 // exit after 5 seconds we're tearing things down anyway.
1100 while(fseh
->watcher
->blockers
&& counter
++ < 500) {
1101 // issue wakeup in case anyone is blocked waiting for an event
1102 // do this each time we wakeup in case the blocker missed
1103 // the wakeup due to the unprotected test of WATCHER_CLOSING
1104 // and decision to tsleep in fmod_watch... this bit of
1105 // latency is a decent tradeoff against not having to
1106 // take and drop a lock in fmod_watch
1107 wakeup((caddr_t
)fseh
->watcher
);
1109 tsleep((caddr_t
)fseh
->watcher
, PRIBIO
, "watcher-close", 1);
1117 fseventsopen(dev_t dev
, int flag
, int mode
, struct proc
*p
)
1127 fseventsclose(dev_t dev
, int flag
, int mode
, struct proc
*p
)
1133 fseventsread(dev_t dev
, struct uio
*uio
, int ioflag
)
1139 fseventswrite(dev_t dev
, struct uio
*uio
, int ioflag
)
1145 static struct fileops fsevents_fops
= {
1158 fseventsioctl(dev_t dev
, u_long cmd
, caddr_t data
, int flag
, struct proc
*p
)
1162 fsevent_handle
*fseh
= NULL
;
1163 fsevent_clone_args
*fse_clone_args
=(fsevent_clone_args
*)data
;
1167 case FSEVENTS_CLONE
:
1168 if (fse_clone_args
->num_events
< 0 || fse_clone_args
->num_events
> 4096) {
1172 MALLOC(fseh
, fsevent_handle
*, sizeof(fsevent_handle
),
1174 memset(fseh
, 0, sizeof(fsevent_handle
));
1176 MALLOC(event_list
, int8_t *,
1177 fse_clone_args
->num_events
* sizeof(int8_t),
1180 error
= copyin(CAST_USER_ADDR_T(fse_clone_args
->event_list
),
1182 fse_clone_args
->num_events
* sizeof(int8_t));
1184 FREE(event_list
, M_TEMP
);
1189 error
= add_watcher(event_list
,
1190 fse_clone_args
->num_events
,
1191 fse_clone_args
->event_queue_depth
,
1194 FREE(event_list
, M_TEMP
);
1199 error
= falloc(p
, &f
, &fd
);
1201 FREE(event_list
, M_TEMP
);
1206 f
->f_fglob
->fg_flag
= FREAD
| FWRITE
;
1207 f
->f_fglob
->fg_type
= DTYPE_FSEVENTS
;
1208 f
->f_fglob
->fg_ops
= &fsevents_fops
;
1209 f
->f_fglob
->fg_data
= (caddr_t
) fseh
;
1211 copyout((void *)&fd
, CAST_USER_ADDR_T(fse_clone_args
->fd
), sizeof(int32_t));
1213 *fdflags(p
, fd
) &= ~UF_RESERVED
;
1214 fp_drop(p
, fd
, f
, 1);
1227 fseventsselect(dev_t dev
, int rw
, struct proc
*p
)
1233 fsevents_wakeup(fsevent_handle
*fseh
)
1235 wakeup((caddr_t
)fseh
);
1236 selwakeup(&fseh
->si
);
1241 * A struct describing which functions will get invoked for certain
1244 static struct cdevsw fsevents_cdevsw
=
1246 fseventsopen
, /* open */
1247 fseventsclose
, /* close */
1248 fseventsread
, /* read */
1249 fseventswrite
, /* write */
1250 fseventsioctl
, /* ioctl */
1252 nulldev
, /* reset */
1254 eno_select
, /* select */
1255 eno_mmap
, /* mmap */
1256 eno_strat
, /* strategy */
1257 eno_getc
, /* getc */
1258 eno_putc
, /* putc */
1264 * Called to initialize our device,
1265 * and to register ourselves with devfs
1273 if (fsevents_installed
) {
1277 fsevents_installed
= 1;
1279 lockinit(&fsevents_lck
, PLOCK
, "fsevents", 0, 0);
1281 ret
= cdevsw_add(-1, &fsevents_cdevsw
);
1283 fsevents_installed
= 0;
1287 devfs_make_node(makedev (ret
, 0), DEVFS_CHAR
,
1288 UID_ROOT
, GID_WHEEL
, 0644, "fsevents", 0);
1290 fsevents_internal_init();
1296 // XXXdbg - temporary path buffer handling
1298 #define NUM_PATH_BUFFS 16
1299 static char path_buff
[NUM_PATH_BUFFS
][MAXPATHLEN
];
1300 static char path_buff_inuse
[NUM_PATH_BUFFS
];
1302 static lck_grp_attr_t
* pathbuff_group_attr
;
1303 static lck_attr_t
* pathbuff_lock_attr
;
1304 static lck_grp_t
* pathbuff_mutex_group
;
1305 static lck_mtx_t pathbuff_lock
;
1310 pathbuff_lock_attr
= lck_attr_alloc_init();
1311 pathbuff_group_attr
= lck_grp_attr_alloc_init();
1312 pathbuff_mutex_group
= lck_grp_alloc_init("pathbuff-mutex", pathbuff_group_attr
);
1314 lck_mtx_init(&pathbuff_lock
, pathbuff_mutex_group
, pathbuff_lock_attr
);
1320 lck_mtx_lock(&pathbuff_lock
);
1324 unlock_pathbuff(void)
1326 lck_mtx_unlock(&pathbuff_lock
);
1336 for(i
=0; i
< NUM_PATH_BUFFS
; i
++) {
1337 if (path_buff_inuse
[i
] == 0) {
1342 if (i
>= NUM_PATH_BUFFS
) {
1346 MALLOC_ZONE(path
, char *, MAXPATHLEN
, M_NAMEI
, M_WAITOK
);
1350 path_buff_inuse
[i
] = 1;
1352 return &path_buff
[i
][0];
1356 release_pathbuff(char *path
)
1365 for(i
=0; i
< NUM_PATH_BUFFS
; i
++) {
1366 if (path
== &path_buff
[i
][0]) {
1367 path_buff
[i
][0] = '\0';
1368 path_buff_inuse
[i
] = 0;
1376 // if we get here then it wasn't one of our temp buffers
1377 FREE_ZONE(path
, MAXPATHLEN
, M_NAMEI
);
1381 get_fse_info(struct vnode
*vp
, fse_info
*fse
, vfs_context_t ctx
)
1383 struct vnode_attr va
;
1386 VATTR_WANTED(&va
, va_fsid
);
1387 VATTR_WANTED(&va
, va_fileid
);
1388 VATTR_WANTED(&va
, va_mode
);
1389 VATTR_WANTED(&va
, va_uid
);
1390 VATTR_WANTED(&va
, va_gid
);
1391 if (vnode_getattr(vp
, &va
, ctx
) != 0) {
1395 fse
->dev
= (dev_t
)va
.va_fsid
;
1396 fse
->ino
= (ino_t
)va
.va_fileid
;
1397 fse
->mode
= (int32_t)vnode_vttoif(vnode_vtype(vp
)) | va
.va_mode
;
1398 fse
->uid
= (uid_t
)va
.va_uid
;
1399 fse
->gid
= (gid_t
)va
.va_gid
;