2 * Copyright (c) 2006 Apple Computer, Inc. All Rights Reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/namei.h>
34 #include <sys/filedesc.h>
35 #include <sys/kernel.h>
36 #include <sys/file_internal.h>
38 #include <sys/vnode_internal.h>
39 #include <sys/mount_internal.h>
40 #include <sys/proc_internal.h>
41 #include <sys/kauth.h>
43 #include <sys/malloc.h>
44 #include <sys/dirent.h>
46 #include <sys/sysctl.h>
48 #include <machine/cons.h>
49 #include <miscfs/specfs/specdev.h>
50 #include <miscfs/devfs/devfs.h>
51 #include <sys/filio.h>
52 #include <architecture/byte_order.h>
53 #include <kern/locks.h>
54 #include <libkern/OSAtomic.h>
56 #include <bsm/audit_kernel.h>
57 #include <bsm/audit_kevents.h>
59 // where all our structs and defines come from
60 #include <sys/fsevents.h>
63 typedef struct kfs_event_arg
{
79 #define KFS_NUM_ARGS FSE_MAX_ARGS
80 typedef struct kfs_event
{
81 int32_t type
; // type code of this event
82 u_int32_t refcount
; // number of clients referencing this
83 pid_t pid
; // pid of the process that did the op
84 kfs_event_arg args
[KFS_NUM_ARGS
];
88 typedef struct fs_event_watcher
{
89 SLIST_ENTRY(fs_event_watcher
) link
;
90 int8_t *event_list
; // the events we're interested in
92 dev_t
*devices_to_watch
; // only report events from these devices
95 kfs_event
**event_queue
;
96 int32_t eventq_size
; // number of event pointers in queue
97 int32_t rd
, wr
; // indices to the event_queue
101 // fs_event_watcher flags
102 #define WATCHER_DROPPED_EVENTS 0x0001
103 #define WATCHER_CLOSING 0x0002
105 static SLIST_HEAD(watch_list
, fs_event_watcher
) watch_list_head
= { NULL
};
108 #define MAX_KFS_EVENTS 2048
110 // this array holds each pending event
111 static kfs_event fs_event_buf
[MAX_KFS_EVENTS
];
112 static int free_event_idx
= 0;
113 static int fs_event_init
= 0;
116 // this array records whether anyone is interested in a
117 // particular type of event. if no one is, we bail out
118 // early from the event delivery
120 static int16_t fs_event_type_watchers
[FSE_MAX_EVENTS
];
122 static int watcher_add_event(fs_event_watcher
*watcher
, kfs_event
*kfse
);
127 static lck_grp_attr_t
* fsevent_group_attr
;
128 static lck_attr_t
* fsevent_lock_attr
;
129 static lck_grp_t
* fsevent_mutex_group
;
131 static lck_grp_t
* fsevent_rw_group
;
133 static lck_rw_t fsevent_big_lock
; // always grab this first
134 static lck_mtx_t watch_list_lock
;
135 static lck_mtx_t event_buf_lock
;
138 static void init_pathbuff(void);
142 fsevents_internal_init(void)
146 if (fs_event_init
++ != 0) {
150 for(i
=0; i
< FSE_MAX_EVENTS
; i
++) {
151 fs_event_type_watchers
[i
] = 0;
154 for(i
=0; i
< MAX_KFS_EVENTS
; i
++) {
155 fs_event_buf
[i
].type
= FSE_INVALID
;
156 fs_event_buf
[i
].refcount
= 0;
159 SLIST_INIT(&watch_list_head
);
161 fsevent_lock_attr
= lck_attr_alloc_init();
162 fsevent_group_attr
= lck_grp_attr_alloc_init();
163 fsevent_mutex_group
= lck_grp_alloc_init("fsevent-mutex", fsevent_group_attr
);
164 fsevent_rw_group
= lck_grp_alloc_init("fsevent-rw", fsevent_group_attr
);
166 lck_mtx_init(&watch_list_lock
, fsevent_mutex_group
, fsevent_lock_attr
);
167 lck_mtx_init(&event_buf_lock
, fsevent_mutex_group
, fsevent_lock_attr
);
169 lck_rw_init(&fsevent_big_lock
, fsevent_rw_group
, fsevent_lock_attr
);
175 lock_watch_list(void)
177 lck_mtx_lock(&watch_list_lock
);
181 unlock_watch_list(void)
183 lck_mtx_unlock(&watch_list_lock
);
187 lock_fs_event_buf(void)
189 lck_mtx_lock(&event_buf_lock
);
193 unlock_fs_event_buf(void)
195 lck_mtx_unlock(&event_buf_lock
);
199 static void do_free_event(kfs_event
*kfse
);
202 watcher_cares_about_dev(fs_event_watcher
*watcher
, dev_t dev
)
206 // if there is not list of devices to watch, then always
207 // say we're interested so we'll report all events from
209 if (watcher
->devices_to_watch
== NULL
) {
213 for(i
=0; i
< watcher
->num_devices
; i
++) {
214 if (dev
== watcher
->devices_to_watch
[i
]) {
215 // found a match! that means we want events
221 // if we're here it's not in the devices_to_watch[]
222 // list so that means we do not care about it
228 need_fsevent(int type
, vnode_t vp
)
230 fs_event_watcher
*watcher
;
233 if (fs_event_type_watchers
[type
] == 0)
235 dev
= (dev_t
)(vp
->v_mount
->mnt_vfsstat
.f_fsid
.val
[0]);
239 SLIST_FOREACH(watcher
, &watch_list_head
, link
) {
240 if (watcher
->event_list
[type
] == FSE_REPORT
&& watcher_cares_about_dev(watcher
, dev
)) {
252 add_fsevent(int type
, vfs_context_t ctx
, ...)
254 struct proc
*p
= vfs_context_proc(ctx
);
255 int i
, arg_idx
, num_deliveries
=0;
258 fs_event_watcher
*watcher
;
265 // if no one cares about this type of event, bail out
266 if (fs_event_type_watchers
[type
] == 0) {
271 lck_rw_lock_shared(&fsevent_big_lock
);
273 // find a free event and snag it for our use
274 // NOTE: do not do anything that would block until
275 // the lock is dropped.
278 for(i
=0; i
< MAX_KFS_EVENTS
; i
++) {
279 if (fs_event_buf
[(free_event_idx
+ i
) % MAX_KFS_EVENTS
].type
== FSE_INVALID
) {
284 if (i
>= MAX_KFS_EVENTS
) {
285 // yikes! no free slots
286 unlock_fs_event_buf();
290 SLIST_FOREACH(watcher
, &watch_list_head
, link
) {
291 watcher
->flags
|= WATCHER_DROPPED_EVENTS
;
292 wakeup((caddr_t
)watcher
);
295 lck_rw_done(&fsevent_big_lock
);
297 printf("fs_events: add_event: event queue is full! dropping events.\n");
301 kfse
= &fs_event_buf
[(free_event_idx
+ i
) % MAX_KFS_EVENTS
];
307 kfse
->pid
= p
->p_pid
;
309 unlock_fs_event_buf(); // at this point it's safe to unlock
312 // now process the arguments passed in and copy them into
316 while(arg_idx
< KFS_NUM_ARGS
) {
317 kea
= &kfse
->args
[arg_idx
++];
318 kea
->type
= va_arg(ap
, int32_t);
320 if (kea
->type
== FSE_ARG_DONE
) {
325 case FSE_ARG_VNODE
: {
326 // this expands out into multiple arguments to the client
328 struct vnode_attr va
;
330 kea
->data
.vp
= vp
= va_arg(ap
, struct vnode
*);
331 if (kea
->data
.vp
== NULL
) {
332 panic("add_fsevent: you can't pass me a NULL vnode ptr (type %d)!\n",
336 if (vnode_ref_ext(kea
->data
.vp
, O_EVTONLY
) != 0) {
337 kea
->type
= FSE_ARG_DONE
;
343 VATTR_WANTED(&va
, va_fsid
);
344 VATTR_WANTED(&va
, va_fileid
);
345 VATTR_WANTED(&va
, va_mode
);
346 VATTR_WANTED(&va
, va_uid
);
347 VATTR_WANTED(&va
, va_gid
);
348 if (vnode_getattr(kea
->data
.vp
, &va
, ctx
) != 0) {
349 vnode_rele_ext(kea
->data
.vp
, O_EVTONLY
, 0);
350 kea
->type
= FSE_ARG_DONE
;
357 kea
->type
= FSE_ARG_DEV
;
358 kea
->data
.dev
= dev
= (dev_t
)va
.va_fsid
;
361 kea
->type
= FSE_ARG_INO
;
362 kea
->data
.ino
= (ino_t
)va
.va_fileid
;
365 kea
->type
= FSE_ARG_MODE
;
366 kea
->data
.mode
= (int32_t)vnode_vttoif(vnode_vtype(vp
)) | va
.va_mode
;
369 kea
->type
= FSE_ARG_UID
;
370 kea
->data
.uid
= va
.va_uid
;
373 kea
->type
= FSE_ARG_GID
;
374 kea
->data
.gid
= va
.va_gid
;
379 case FSE_ARG_FINFO
: {
382 fse
= va_arg(ap
, fse_info
*);
384 kea
->type
= FSE_ARG_DEV
;
385 kea
->data
.dev
= dev
= (dev_t
)fse
->dev
;
388 kea
->type
= FSE_ARG_INO
;
389 kea
->data
.ino
= (ino_t
)fse
->ino
;
392 kea
->type
= FSE_ARG_MODE
;
393 kea
->data
.mode
= (int32_t)fse
->mode
;
396 kea
->type
= FSE_ARG_UID
;
397 kea
->data
.uid
= (uid_t
)fse
->uid
;
400 kea
->type
= FSE_ARG_GID
;
401 kea
->data
.gid
= (uid_t
)fse
->gid
;
407 kea
->len
= (int16_t)(va_arg(ap
, int32_t) & 0xffff);
408 kea
->data
.str
= vfs_addname(va_arg(ap
, char *), kea
->len
, 0, 0);
412 kea
->data
.int32
= va_arg(ap
, int32_t);
416 printf("fs_events: 64-bit args not implemented.\n");
417 // kea->data.int64 = va_arg(ap, int64_t);
421 kea
->len
= (int16_t)(va_arg(ap
, int32_t) & 0xffff);
422 MALLOC(kea
->data
.ptr
, void *, kea
->len
, M_TEMP
, M_WAITOK
);
423 memcpy(kea
->data
.ptr
, va_arg(ap
, void *), kea
->len
);
427 kea
->data
.dev
= dev
= va_arg(ap
, dev_t
);
431 kea
->data
.mode
= va_arg(ap
, int32_t);
435 kea
->data
.ino
= va_arg(ap
, ino_t
);
439 kea
->data
.uid
= va_arg(ap
, uid_t
);
443 kea
->data
.gid
= va_arg(ap
, gid_t
);
447 printf("add_fsevent: unknown type %d\n", kea
->type
);
448 // just skip one 32-bit word and hope we sync up...
449 (void)va_arg(ap
, int32_t);
456 // now we have to go and let everyone know that
457 // is interested in this type of event...
461 SLIST_FOREACH(watcher
, &watch_list_head
, link
) {
462 if (watcher
->event_list
[type
] == FSE_REPORT
&& watcher_cares_about_dev(watcher
, dev
)) {
463 if (watcher_add_event(watcher
, kfse
) == 0) {
472 // just in case no one was interested after all...
473 if (num_deliveries
== 0) {
475 free_event_idx
= (int)(kfse
- &fs_event_buf
[0]);
478 lck_rw_done(&fsevent_big_lock
);
483 do_free_event(kfs_event
*kfse
)
486 kfs_event_arg
*kea
, all_args
[KFS_NUM_ARGS
];
490 // mark this fsevent as invalid
491 kfse
->type
= FSE_INVALID
;
493 // make a copy of this so we can free things without
494 // holding the fs_event_buf lock
496 memcpy(&all_args
[0], &kfse
->args
[0], sizeof(all_args
));
498 // and just to be anal, set this so that there are no args
499 kfse
->args
[0].type
= FSE_ARG_DONE
;
501 free_event_idx
= (kfse
- fs_event_buf
);
503 unlock_fs_event_buf();
505 for(i
=0; i
< KFS_NUM_ARGS
; i
++) {
507 if (kea
->type
== FSE_ARG_DONE
) {
513 vnode_rele_ext(kea
->data
.vp
, O_EVTONLY
, 0);
516 vfs_removename(kea
->data
.str
);
519 FREE(kea
->data
.ptr
, M_TEMP
);
527 add_watcher(int8_t *event_list
, int32_t num_events
, int32_t eventq_size
, fs_event_watcher
**watcher_out
)
530 fs_event_watcher
*watcher
;
532 if (eventq_size
< 0 || eventq_size
> MAX_KFS_EVENTS
) {
533 eventq_size
= MAX_KFS_EVENTS
;
536 // Note: the event_queue follows the fs_event_watcher struct
537 // in memory so we only have to do one allocation
540 sizeof(fs_event_watcher
) + eventq_size
* sizeof(kfs_event
*),
543 watcher
->event_list
= event_list
;
544 watcher
->num_events
= num_events
;
545 watcher
->devices_to_watch
= NULL
;
546 watcher
->num_devices
= 0;
548 watcher
->event_queue
= (kfs_event
**)&watcher
[1];
549 watcher
->eventq_size
= eventq_size
;
552 watcher
->blockers
= 0;
556 // now update the global list of who's interested in
557 // events of a particular type...
558 for(i
=0; i
< num_events
; i
++) {
559 if (event_list
[i
] != FSE_IGNORE
&& i
< FSE_MAX_EVENTS
) {
560 fs_event_type_watchers
[i
]++;
564 SLIST_INSERT_HEAD(&watch_list_head
, watcher
, link
);
568 *watcher_out
= watcher
;
574 remove_watcher(fs_event_watcher
*target
)
577 fs_event_watcher
*watcher
;
580 lck_rw_lock_shared(&fsevent_big_lock
);
584 SLIST_FOREACH(watcher
, &watch_list_head
, link
) {
585 if (watcher
== target
) {
586 SLIST_REMOVE(&watch_list_head
, watcher
, fs_event_watcher
, link
);
588 for(i
=0; i
< watcher
->num_events
; i
++) {
589 if (watcher
->event_list
[i
] != FSE_IGNORE
&& i
< FSE_MAX_EVENTS
) {
590 fs_event_type_watchers
[i
]--;
596 // drain the event_queue
597 for(i
=watcher
->rd
; i
!= watcher
->wr
; i
=(i
+1) % watcher
->eventq_size
) {
598 kfse
= watcher
->event_queue
[i
];
600 if (OSAddAtomic(-1, (SInt32
*)&kfse
->refcount
) == 1) {
605 if (watcher
->event_list
) {
606 FREE(watcher
->event_list
, M_TEMP
);
607 watcher
->event_list
= NULL
;
609 if (watcher
->devices_to_watch
) {
610 FREE(watcher
->devices_to_watch
, M_TEMP
);
611 watcher
->devices_to_watch
= NULL
;
613 FREE(watcher
, M_TEMP
);
615 lck_rw_done(&fsevent_big_lock
);
621 lck_rw_done(&fsevent_big_lock
);
626 watcher_add_event(fs_event_watcher
*watcher
, kfs_event
*kfse
)
628 if (((watcher
->wr
+ 1) % watcher
->eventq_size
) == watcher
->rd
) {
629 watcher
->flags
|= WATCHER_DROPPED_EVENTS
;
630 wakeup((caddr_t
)watcher
);
634 watcher
->event_queue
[watcher
->wr
] = kfse
;
635 OSAddAtomic(1, (SInt32
*)&kfse
->refcount
);
636 watcher
->wr
= (watcher
->wr
+ 1) % watcher
->eventq_size
;
638 // wake up the watcher if he's waiting!
639 wakeup((caddr_t
)watcher
);
646 fmod_watch(fs_event_watcher
*watcher
, struct uio
*uio
)
648 int i
, error
=0, last_full_event_resid
;
653 // LP64todo - fix this
654 last_full_event_resid
= uio_resid(uio
);
656 // need at least 2048 bytes of space (maxpathlen + 1 event buf)
657 if (uio_resid(uio
) < 2048 || watcher
== NULL
) {
662 if (watcher
->rd
== watcher
->wr
) {
663 if (watcher
->flags
& WATCHER_CLOSING
) {
666 OSAddAtomic(1, (SInt32
*)&watcher
->blockers
);
668 // there's nothing to do, go to sleep
669 error
= tsleep((caddr_t
)watcher
, PUSER
|PCATCH
, "fsevents_empty", 0);
671 OSAddAtomic(-1, (SInt32
*)&watcher
->blockers
);
673 if (error
!= 0 || (watcher
->flags
& WATCHER_CLOSING
)) {
678 // if we dropped events, return that as an event first
679 if (watcher
->flags
& WATCHER_DROPPED_EVENTS
) {
680 int32_t val
= FSE_EVENTS_DROPPED
;
682 error
= uiomove((caddr_t
)&val
, sizeof(int32_t), uio
);
684 val
= 0; // a fake pid
685 error
= uiomove((caddr_t
)&val
, sizeof(int32_t), uio
);
687 tmp16
= FSE_ARG_DONE
; // makes it a consistent msg
688 error
= uiomove((caddr_t
)&tmp16
, sizeof(int16_t), uio
);
695 watcher
->flags
&= ~WATCHER_DROPPED_EVENTS
;
698 // check if the next chunk of data will fit in the user's
699 // buffer. if not, just goto get_out which will return
700 // the number of bytes worth of events that we did read.
701 // this leaves the event that didn't fit in the queue.
703 // LP64todo - fix this
704 #define CHECK_UPTR(size) if (size > (unsigned)uio_resid(uio)) { \
705 uio_setresid(uio, last_full_event_resid); \
709 for (; uio_resid(uio
) > 0 && watcher
->rd
!= watcher
->wr
; ) {
710 kfse
= watcher
->event_queue
[watcher
->rd
];
712 // copy out the type of the event
713 CHECK_UPTR(sizeof(int32_t));
714 if ((error
= uiomove((caddr_t
)&kfse
->type
, sizeof(int32_t), uio
)) != 0) {
718 // now copy out the pid of the person that changed the file
719 CHECK_UPTR(sizeof(pid_t
));
720 if ((error
= uiomove((caddr_t
)&kfse
->pid
, sizeof(pid_t
), uio
)) != 0) {
725 for(i
=0; i
< KFS_NUM_ARGS
&& error
== 0; i
++) {
729 kea
= &kfse
->args
[i
];
731 tmp16
= (uint16_t)kea
->type
;
732 CHECK_UPTR(sizeof(uint16_t));
733 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
734 if (error
|| kea
->type
== FSE_ARG_DONE
) {
740 pathbuff
= get_pathbuff();
741 pathbuff_len
= MAXPATHLEN
;
742 if (kea
->data
.vp
== NULL
) {
743 printf("fmod_watch: whoa... vp == NULL (%d)!\n", kfse
->type
);
745 release_pathbuff(pathbuff
);
749 if (vn_getpath(kea
->data
.vp
, pathbuff
, &pathbuff_len
) != 0 || pathbuff
[0] == '\0') {
750 // printf("fmod_watch: vn_getpath failed! vp 0x%x vname 0x%x (%s) vparent 0x%x\n",
752 // VNAME(kea->data.vp),
753 // VNAME(kea->data.vp) ? VNAME(kea->data.vp) : "<null>",
754 // VPARENT(kea->data.vp));
756 CHECK_UPTR(sizeof(uint16_t));
757 tmp16
= (uint16_t)pathbuff_len
;
758 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
760 CHECK_UPTR((unsigned)pathbuff_len
);
761 error
= uiomove((caddr_t
)pathbuff
, pathbuff_len
, uio
);
762 release_pathbuff(pathbuff
);
767 tmp16
= (int32_t)kea
->len
;
768 CHECK_UPTR(sizeof(uint16_t));
769 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
771 CHECK_UPTR(kea
->len
);
772 error
= uiomove((caddr_t
)kea
->data
.str
, kea
->len
, uio
);
776 CHECK_UPTR(sizeof(uint16_t) + sizeof(int32_t));
777 tmp16
= sizeof(int32_t);
778 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
779 error
= uiomove((caddr_t
)&kea
->data
.int32
, sizeof(int32_t), uio
);
783 printf("fs_events: 64-bit args not implemented on copyout.\n");
784 // CHECK_UPTR(sizeof(uint16_t) + sizeof(int64_t));
785 // tmp16 = sizeof(int64_t);
786 // error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
787 // error = uiomove((caddr_t)&kea->data.int64, sizeof(int64_t), uio);
791 tmp16
= (uint16_t)kea
->len
;
792 CHECK_UPTR(sizeof(uint16_t));
793 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
795 CHECK_UPTR(kea
->len
);
796 error
= uiomove((caddr_t
)kea
->data
.ptr
, kea
->len
, uio
);
800 CHECK_UPTR(sizeof(uint16_t) + sizeof(dev_t
));
801 tmp16
= sizeof(dev_t
);
802 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
803 error
= uiomove((caddr_t
)&kea
->data
.dev
, sizeof(dev_t
), uio
);
807 CHECK_UPTR(sizeof(uint16_t) + sizeof(ino_t
));
808 tmp16
= sizeof(ino_t
);
809 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
810 error
= uiomove((caddr_t
)&kea
->data
.ino
, sizeof(ino_t
), uio
);
814 // XXXdbg - NOTE: we use 32-bits for the mode, not
815 // 16-bits like a real mode_t
816 CHECK_UPTR(sizeof(uint16_t) + sizeof(int32_t));
817 tmp16
= sizeof(int32_t);
818 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
819 error
= uiomove((caddr_t
)&kea
->data
.mode
, sizeof(int32_t), uio
);
823 CHECK_UPTR(sizeof(uint16_t) + sizeof(uid_t
));
824 tmp16
= sizeof(uid_t
);
825 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
826 error
= uiomove((caddr_t
)&kea
->data
.uid
, sizeof(uid_t
), uio
);
830 CHECK_UPTR(sizeof(uint16_t) + sizeof(gid_t
));
831 tmp16
= sizeof(gid_t
);
832 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
833 error
= uiomove((caddr_t
)&kea
->data
.gid
, sizeof(gid_t
), uio
);
837 printf("fmod_watch: unknown arg type %d.\n", kea
->type
);
842 // make sure that we always end with a FSE_ARG_DONE
843 if (i
>= KFS_NUM_ARGS
) {
844 tmp16
= FSE_ARG_DONE
;
845 CHECK_UPTR(sizeof(uint16_t));
846 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
850 // LP64todo - fix this
851 last_full_event_resid
= uio_resid(uio
);
853 watcher
->rd
= (watcher
->rd
+ 1) % watcher
->eventq_size
;
855 if (OSAddAtomic(-1, (SInt32
*)&kfse
->refcount
) == 1) {
865 // release any references we might have on vnodes which are
866 // the mount point passed to us (so that it can be cleanly
869 // since we don't want to lose the events we'll convert the
870 // vnode refs to the full path, inode #, and uid.
873 fsevent_unmount(struct mount
*mp
)
879 lck_rw_lock_exclusive(&fsevent_big_lock
);
882 for(i
=0; i
< MAX_KFS_EVENTS
; i
++) {
883 if (fs_event_buf
[i
].type
== FSE_INVALID
) {
887 kfse
= &fs_event_buf
[i
];
888 for(j
=0; j
< KFS_NUM_ARGS
; j
++) {
889 kea
= &kfse
->args
[j
];
890 if (kea
->type
== FSE_ARG_DONE
) {
894 if (kea
->type
== FSE_ARG_VNODE
&& kea
->data
.vp
->v_mount
== mp
) {
900 pathbuff
= get_pathbuff();
901 pathbuff_len
= MAXPATHLEN
;
903 if (vn_getpath(vp
, pathbuff
, &pathbuff_len
) != 0 || pathbuff
[0] == '\0') {
906 vname
= vnode_getname(vp
);
908 printf("fsevent_unmount: vn_getpath failed! vp 0x%x vname 0x%x (%s) vparent 0x%x\n",
909 vp
, vname
, vname
? vname
: "<null>", vp
->v_parent
);
912 vnode_putname(vname
);
915 // switch the type of the string
916 kea
->type
= FSE_ARG_STRING
;
917 kea
->data
.str
= vfs_addname(pathbuff
, pathbuff_len
, 0, 0);
918 kea
->len
= pathbuff_len
;
919 release_pathbuff(pathbuff
);
921 // and finally let go of the reference on the vnode
922 vnode_rele_ext(vp
, O_EVTONLY
, 0);
927 unlock_fs_event_buf();
928 lck_rw_done(&fsevent_big_lock
);
933 // /dev/fsevents device code
935 static int fsevents_installed
= 0;
936 static struct lock__bsd__ fsevents_lck
;
938 typedef struct fsevent_handle
{
939 fs_event_watcher
*watcher
;
945 fseventsf_read(struct fileproc
*fp
, struct uio
*uio
,
946 __unused kauth_cred_t
*cred
, __unused
int flags
,
947 __unused
struct proc
*p
)
949 fsevent_handle
*fseh
= (struct fsevent_handle
*)fp
->f_fglob
->fg_data
;
952 error
= fmod_watch(fseh
->watcher
, uio
);
958 fseventsf_write(__unused
struct fileproc
*fp
, __unused
struct uio
*uio
,
959 __unused kauth_cred_t
*cred
, __unused
int flags
,
960 __unused
struct proc
*p
)
967 fseventsf_ioctl(struct fileproc
*fp
, u_long cmd
, caddr_t data
, struct proc
*p
)
969 fsevent_handle
*fseh
= (struct fsevent_handle
*)fp
->f_fglob
->fg_data
;
972 fsevent_dev_filter_args
*devfilt_args
=(fsevent_dev_filter_args
*)data
;
979 case FSEVENTS_DEVICE_FILTER
: {
981 dev_t
*devices_to_watch
, *tmp
=NULL
;
983 if (devfilt_args
->num_devices
> 256) {
988 new_num_devices
= devfilt_args
->num_devices
;
989 if (new_num_devices
== 0) {
990 tmp
= fseh
->watcher
->devices_to_watch
;
993 fseh
->watcher
->devices_to_watch
= NULL
;
994 fseh
->watcher
->num_devices
= new_num_devices
;
1003 MALLOC(devices_to_watch
, dev_t
*,
1004 new_num_devices
* sizeof(dev_t
),
1006 if (devices_to_watch
== NULL
) {
1011 ret
= copyin(CAST_USER_ADDR_T(devfilt_args
->devices
),
1012 (void *)devices_to_watch
,
1013 new_num_devices
* sizeof(dev_t
));
1015 FREE(devices_to_watch
, M_TEMP
);
1020 fseh
->watcher
->num_devices
= new_num_devices
;
1021 tmp
= fseh
->watcher
->devices_to_watch
;
1022 fseh
->watcher
->devices_to_watch
= devices_to_watch
;
1023 unlock_watch_list();
1042 fseventsf_select(struct fileproc
*fp
, int which
, void *wql
, struct proc
*p
)
1044 fsevent_handle
*fseh
= (struct fsevent_handle
*)fp
->f_fglob
->fg_data
;
1047 if ((which
!= FREAD
) || (fseh
->watcher
->flags
& WATCHER_CLOSING
)) {
1052 // if there's nothing in the queue, we're not ready
1053 if (fseh
->watcher
->rd
== fseh
->watcher
->wr
) {
1060 selrecord(p
, &fseh
->si
, wql
);
1068 fseventsf_stat(struct fileproc
*fp
, struct stat
*sb
, struct proc
*p
)
1075 fseventsf_close(struct fileglob
*fg
, struct proc
*p
)
1077 fsevent_handle
*fseh
= (struct fsevent_handle
*)fg
->fg_data
;
1079 remove_watcher(fseh
->watcher
);
1082 fseh
->watcher
= NULL
;
1089 fseventsf_kqfilter(struct fileproc
*fp
, struct knote
*kn
, struct proc
*p
)
1097 fseventsf_drain(struct fileproc
*fp
, struct proc
*p
)
1100 fsevent_handle
*fseh
= (struct fsevent_handle
*)fp
->f_fglob
->fg_data
;
1102 fseh
->watcher
->flags
|= WATCHER_CLOSING
;
1104 // if there are people still waiting, sleep for 10ms to
1105 // let them clean up and get out of there. however we
1106 // also don't want to get stuck forever so if they don't
1107 // exit after 5 seconds we're tearing things down anyway.
1108 while(fseh
->watcher
->blockers
&& counter
++ < 500) {
1109 // issue wakeup in case anyone is blocked waiting for an event
1110 // do this each time we wakeup in case the blocker missed
1111 // the wakeup due to the unprotected test of WATCHER_CLOSING
1112 // and decision to tsleep in fmod_watch... this bit of
1113 // latency is a decent tradeoff against not having to
1114 // take and drop a lock in fmod_watch
1115 wakeup((caddr_t
)fseh
->watcher
);
1117 tsleep((caddr_t
)fseh
->watcher
, PRIBIO
, "watcher-close", 1);
1125 fseventsopen(dev_t dev
, int flag
, int mode
, struct proc
*p
)
1135 fseventsclose(dev_t dev
, int flag
, int mode
, struct proc
*p
)
1141 fseventsread(dev_t dev
, struct uio
*uio
, int ioflag
)
1147 fseventswrite(dev_t dev
, struct uio
*uio
, int ioflag
)
1153 static struct fileops fsevents_fops
= {
1166 fseventsioctl(dev_t dev
, u_long cmd
, caddr_t data
, int flag
, struct proc
*p
)
1170 fsevent_handle
*fseh
= NULL
;
1171 fsevent_clone_args
*fse_clone_args
=(fsevent_clone_args
*)data
;
1175 case FSEVENTS_CLONE
:
1176 if (fse_clone_args
->num_events
< 0 || fse_clone_args
->num_events
> 4096) {
1180 MALLOC(fseh
, fsevent_handle
*, sizeof(fsevent_handle
),
1182 memset(fseh
, 0, sizeof(fsevent_handle
));
1184 MALLOC(event_list
, int8_t *,
1185 fse_clone_args
->num_events
* sizeof(int8_t),
1188 error
= copyin(CAST_USER_ADDR_T(fse_clone_args
->event_list
),
1190 fse_clone_args
->num_events
* sizeof(int8_t));
1192 FREE(event_list
, M_TEMP
);
1197 error
= add_watcher(event_list
,
1198 fse_clone_args
->num_events
,
1199 fse_clone_args
->event_queue_depth
,
1202 FREE(event_list
, M_TEMP
);
1207 error
= falloc(p
, &f
, &fd
);
1209 FREE(event_list
, M_TEMP
);
1214 f
->f_fglob
->fg_flag
= FREAD
| FWRITE
;
1215 f
->f_fglob
->fg_type
= DTYPE_FSEVENTS
;
1216 f
->f_fglob
->fg_ops
= &fsevents_fops
;
1217 f
->f_fglob
->fg_data
= (caddr_t
) fseh
;
1219 copyout((void *)&fd
, CAST_USER_ADDR_T(fse_clone_args
->fd
), sizeof(int32_t));
1221 *fdflags(p
, fd
) &= ~UF_RESERVED
;
1222 fp_drop(p
, fd
, f
, 1);
1235 fseventsselect(dev_t dev
, int rw
, struct proc
*p
)
1241 fsevents_wakeup(fsevent_handle
*fseh
)
1243 wakeup((caddr_t
)fseh
);
1244 selwakeup(&fseh
->si
);
1249 * A struct describing which functions will get invoked for certain
1252 static struct cdevsw fsevents_cdevsw
=
1254 fseventsopen
, /* open */
1255 fseventsclose
, /* close */
1256 fseventsread
, /* read */
1257 fseventswrite
, /* write */
1258 fseventsioctl
, /* ioctl */
1260 nulldev
, /* reset */
1262 eno_select
, /* select */
1263 eno_mmap
, /* mmap */
1264 eno_strat
, /* strategy */
1265 eno_getc
, /* getc */
1266 eno_putc
, /* putc */
1272 * Called to initialize our device,
1273 * and to register ourselves with devfs
1281 if (fsevents_installed
) {
1285 fsevents_installed
= 1;
1287 lockinit(&fsevents_lck
, PLOCK
, "fsevents", 0, 0);
1289 ret
= cdevsw_add(-1, &fsevents_cdevsw
);
1291 fsevents_installed
= 0;
1295 devfs_make_node(makedev (ret
, 0), DEVFS_CHAR
,
1296 UID_ROOT
, GID_WHEEL
, 0644, "fsevents", 0);
1298 fsevents_internal_init();
1304 // XXXdbg - temporary path buffer handling
1306 #define NUM_PATH_BUFFS 16
1307 static char path_buff
[NUM_PATH_BUFFS
][MAXPATHLEN
];
1308 static char path_buff_inuse
[NUM_PATH_BUFFS
];
1310 static lck_grp_attr_t
* pathbuff_group_attr
;
1311 static lck_attr_t
* pathbuff_lock_attr
;
1312 static lck_grp_t
* pathbuff_mutex_group
;
1313 static lck_mtx_t pathbuff_lock
;
1318 pathbuff_lock_attr
= lck_attr_alloc_init();
1319 pathbuff_group_attr
= lck_grp_attr_alloc_init();
1320 pathbuff_mutex_group
= lck_grp_alloc_init("pathbuff-mutex", pathbuff_group_attr
);
1322 lck_mtx_init(&pathbuff_lock
, pathbuff_mutex_group
, pathbuff_lock_attr
);
1328 lck_mtx_lock(&pathbuff_lock
);
1332 unlock_pathbuff(void)
1334 lck_mtx_unlock(&pathbuff_lock
);
1344 for(i
=0; i
< NUM_PATH_BUFFS
; i
++) {
1345 if (path_buff_inuse
[i
] == 0) {
1350 if (i
>= NUM_PATH_BUFFS
) {
1354 MALLOC_ZONE(path
, char *, MAXPATHLEN
, M_NAMEI
, M_WAITOK
);
1358 path_buff_inuse
[i
] = 1;
1360 return &path_buff
[i
][0];
1364 release_pathbuff(char *path
)
1373 for(i
=0; i
< NUM_PATH_BUFFS
; i
++) {
1374 if (path
== &path_buff
[i
][0]) {
1375 path_buff
[i
][0] = '\0';
1376 path_buff_inuse
[i
] = 0;
1384 // if we get here then it wasn't one of our temp buffers
1385 FREE_ZONE(path
, MAXPATHLEN
, M_NAMEI
);
1389 get_fse_info(struct vnode
*vp
, fse_info
*fse
, vfs_context_t ctx
)
1391 struct vnode_attr va
;
1394 VATTR_WANTED(&va
, va_fsid
);
1395 VATTR_WANTED(&va
, va_fileid
);
1396 VATTR_WANTED(&va
, va_mode
);
1397 VATTR_WANTED(&va
, va_uid
);
1398 VATTR_WANTED(&va
, va_gid
);
1399 if (vnode_getattr(vp
, &va
, ctx
) != 0) {
1403 fse
->dev
= (dev_t
)va
.va_fsid
;
1404 fse
->ino
= (ino_t
)va
.va_fileid
;
1405 fse
->mode
= (int32_t)vnode_vttoif(vnode_vtype(vp
)) | va
.va_mode
;
1406 fse
->uid
= (uid_t
)va
.va_uid
;
1407 fse
->gid
= (gid_t
)va
.va_gid
;