2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/namei.h>
34 #include <sys/filedesc.h>
35 #include <sys/kernel.h>
36 #include <sys/file_internal.h>
38 #include <sys/vnode_internal.h>
39 #include <sys/mount_internal.h>
40 #include <sys/proc_internal.h>
41 #include <sys/kauth.h>
43 #include <sys/malloc.h>
44 #include <sys/dirent.h>
46 #include <sys/sysctl.h>
48 #include <machine/cons.h>
49 #include <miscfs/specfs/specdev.h>
50 #include <miscfs/devfs/devfs.h>
51 #include <sys/filio.h>
52 #include <kern/locks.h>
53 #include <libkern/OSAtomic.h>
55 #include <bsm/audit_kernel.h>
56 #include <bsm/audit_kevents.h>
58 // where all our structs and defines come from
59 #include <sys/fsevents.h>
62 typedef struct kfs_event_arg
{
78 #define KFS_NUM_ARGS FSE_MAX_ARGS
79 typedef struct kfs_event
{
80 int32_t type
; // type code of this event
81 u_int32_t refcount
; // number of clients referencing this
82 pid_t pid
; // pid of the process that did the op
83 kfs_event_arg args
[KFS_NUM_ARGS
];
87 typedef struct fs_event_watcher
{
88 SLIST_ENTRY(fs_event_watcher
) link
;
89 int8_t *event_list
; // the events we're interested in
91 dev_t
*devices_to_watch
; // only report events from these devices
94 kfs_event
**event_queue
;
95 int32_t eventq_size
; // number of event pointers in queue
96 int32_t rd
, wr
; // indices to the event_queue
101 // fs_event_watcher flags
102 #define WATCHER_DROPPED_EVENTS 0x0001
103 #define WATCHER_CLOSING 0x0002
105 static SLIST_HEAD(watch_list
, fs_event_watcher
) watch_list_head
= { NULL
};
108 #define MAX_KFS_EVENTS 2048
110 // this array holds each pending event
111 static kfs_event fs_event_buf
[MAX_KFS_EVENTS
];
112 static int free_event_idx
= 0;
113 static int fs_event_init
= 0;
116 // this array records whether anyone is interested in a
117 // particular type of event. if no one is, we bail out
118 // early from the event delivery
120 static int16_t fs_event_type_watchers
[FSE_MAX_EVENTS
];
122 static int watcher_add_event(fs_event_watcher
*watcher
, kfs_event
*kfse
);
127 static lck_grp_attr_t
* fsevent_group_attr
;
128 static lck_attr_t
* fsevent_lock_attr
;
129 static lck_grp_t
* fsevent_mutex_group
;
131 static lck_grp_t
* fsevent_rw_group
;
133 static lck_rw_t fsevent_big_lock
; // always grab this first
134 static lck_mtx_t watch_list_lock
;
135 static lck_mtx_t event_buf_lock
;
138 static void init_pathbuff(void);
142 fsevents_internal_init(void)
146 if (fs_event_init
++ != 0) {
150 for(i
=0; i
< FSE_MAX_EVENTS
; i
++) {
151 fs_event_type_watchers
[i
] = 0;
154 for(i
=0; i
< MAX_KFS_EVENTS
; i
++) {
155 fs_event_buf
[i
].type
= FSE_INVALID
;
156 fs_event_buf
[i
].refcount
= 0;
159 SLIST_INIT(&watch_list_head
);
161 fsevent_lock_attr
= lck_attr_alloc_init();
162 fsevent_group_attr
= lck_grp_attr_alloc_init();
163 fsevent_mutex_group
= lck_grp_alloc_init("fsevent-mutex", fsevent_group_attr
);
164 fsevent_rw_group
= lck_grp_alloc_init("fsevent-rw", fsevent_group_attr
);
166 lck_mtx_init(&watch_list_lock
, fsevent_mutex_group
, fsevent_lock_attr
);
167 lck_mtx_init(&event_buf_lock
, fsevent_mutex_group
, fsevent_lock_attr
);
169 lck_rw_init(&fsevent_big_lock
, fsevent_rw_group
, fsevent_lock_attr
);
175 lock_watch_list(void)
177 lck_mtx_lock(&watch_list_lock
);
181 unlock_watch_list(void)
183 lck_mtx_unlock(&watch_list_lock
);
187 lock_fs_event_buf(void)
189 lck_mtx_lock(&event_buf_lock
);
193 unlock_fs_event_buf(void)
195 lck_mtx_unlock(&event_buf_lock
);
199 static void do_free_event(kfs_event
*kfse
);
202 watcher_cares_about_dev(fs_event_watcher
*watcher
, dev_t dev
)
206 // if there is not list of devices to watch, then always
207 // say we're interested so we'll report all events from
209 if (watcher
->devices_to_watch
== NULL
) {
213 for(i
=0; i
< watcher
->num_devices
; i
++) {
214 if (dev
== watcher
->devices_to_watch
[i
]) {
215 // found a match! that means we want events
221 // if we're here it's not in the devices_to_watch[]
222 // list so that means we do not care about it
228 need_fsevent(int type
, vnode_t vp
)
230 fs_event_watcher
*watcher
;
233 if (fs_event_type_watchers
[type
] == 0)
235 dev
= (dev_t
)(vp
->v_mount
->mnt_vfsstat
.f_fsid
.val
[0]);
239 SLIST_FOREACH(watcher
, &watch_list_head
, link
) {
240 if (watcher
->event_list
[type
] == FSE_REPORT
&& watcher_cares_about_dev(watcher
, dev
)) {
252 add_fsevent(int type
, vfs_context_t ctx
, ...)
254 struct proc
*p
= vfs_context_proc(ctx
);
255 int i
, arg_idx
, num_deliveries
=0;
258 fs_event_watcher
*watcher
;
265 // if no one cares about this type of event, bail out
266 if (fs_event_type_watchers
[type
] == 0) {
271 lck_rw_lock_shared(&fsevent_big_lock
);
273 // find a free event and snag it for our use
274 // NOTE: do not do anything that would block until
275 // the lock is dropped.
278 base
= free_event_idx
;
279 for(i
=0; i
< MAX_KFS_EVENTS
; i
++) {
280 if (fs_event_buf
[(base
+ i
) % MAX_KFS_EVENTS
].type
== FSE_INVALID
) {
285 if (i
>= MAX_KFS_EVENTS
) {
286 // yikes! no free slots
287 unlock_fs_event_buf();
291 SLIST_FOREACH(watcher
, &watch_list_head
, link
) {
292 watcher
->flags
|= WATCHER_DROPPED_EVENTS
;
293 wakeup((caddr_t
)watcher
);
296 lck_rw_done(&fsevent_big_lock
);
298 printf("fs_events: add_event: event queue is full! dropping events.\n");
302 kfse
= &fs_event_buf
[(base
+ i
) % MAX_KFS_EVENTS
];
304 free_event_idx
= ((base
+ i
) % MAX_KFS_EVENTS
) + 1;
308 kfse
->pid
= p
->p_pid
;
310 unlock_fs_event_buf(); // at this point it's safe to unlock
313 // now process the arguments passed in and copy them into
317 while(arg_idx
< KFS_NUM_ARGS
) {
318 kea
= &kfse
->args
[arg_idx
++];
319 kea
->type
= va_arg(ap
, int32_t);
321 if (kea
->type
== FSE_ARG_DONE
) {
326 case FSE_ARG_VNODE
: {
327 // this expands out into multiple arguments to the client
329 struct vnode_attr va
;
331 kea
->data
.vp
= vp
= va_arg(ap
, struct vnode
*);
332 if (kea
->data
.vp
== NULL
) {
333 panic("add_fsevent: you can't pass me a NULL vnode ptr (type %d)!\n",
337 if (vnode_ref_ext(kea
->data
.vp
, O_EVTONLY
) != 0) {
338 kea
->type
= FSE_ARG_DONE
;
344 VATTR_WANTED(&va
, va_fsid
);
345 VATTR_WANTED(&va
, va_fileid
);
346 VATTR_WANTED(&va
, va_mode
);
347 VATTR_WANTED(&va
, va_uid
);
348 VATTR_WANTED(&va
, va_gid
);
349 if (vnode_getattr(kea
->data
.vp
, &va
, ctx
) != 0) {
350 vnode_rele_ext(kea
->data
.vp
, O_EVTONLY
, 0);
351 kea
->type
= FSE_ARG_DONE
;
358 kea
->type
= FSE_ARG_DEV
;
359 kea
->data
.dev
= dev
= (dev_t
)va
.va_fsid
;
362 kea
->type
= FSE_ARG_INO
;
363 kea
->data
.ino
= (ino_t
)va
.va_fileid
;
366 kea
->type
= FSE_ARG_MODE
;
367 kea
->data
.mode
= (int32_t)vnode_vttoif(vnode_vtype(vp
)) | va
.va_mode
;
370 kea
->type
= FSE_ARG_UID
;
371 kea
->data
.uid
= va
.va_uid
;
374 kea
->type
= FSE_ARG_GID
;
375 kea
->data
.gid
= va
.va_gid
;
380 case FSE_ARG_FINFO
: {
383 fse
= va_arg(ap
, fse_info
*);
385 kea
->type
= FSE_ARG_DEV
;
386 kea
->data
.dev
= dev
= (dev_t
)fse
->dev
;
389 kea
->type
= FSE_ARG_INO
;
390 kea
->data
.ino
= (ino_t
)fse
->ino
;
393 kea
->type
= FSE_ARG_MODE
;
394 kea
->data
.mode
= (int32_t)fse
->mode
;
397 kea
->type
= FSE_ARG_UID
;
398 kea
->data
.uid
= (uid_t
)fse
->uid
;
401 kea
->type
= FSE_ARG_GID
;
402 kea
->data
.gid
= (uid_t
)fse
->gid
;
408 kea
->len
= (int16_t)(va_arg(ap
, int32_t) & 0xffff);
409 kea
->data
.str
= vfs_addname(va_arg(ap
, char *), kea
->len
, 0, 0);
413 kea
->data
.int32
= va_arg(ap
, int32_t);
417 printf("fs_events: 64-bit args not implemented.\n");
418 // kea->data.int64 = va_arg(ap, int64_t);
422 kea
->len
= (int16_t)(va_arg(ap
, int32_t) & 0xffff);
423 MALLOC(kea
->data
.ptr
, void *, kea
->len
, M_TEMP
, M_WAITOK
);
424 memcpy(kea
->data
.ptr
, va_arg(ap
, void *), kea
->len
);
428 kea
->data
.dev
= dev
= va_arg(ap
, dev_t
);
432 kea
->data
.mode
= va_arg(ap
, int32_t);
436 kea
->data
.ino
= va_arg(ap
, ino_t
);
440 kea
->data
.uid
= va_arg(ap
, uid_t
);
444 kea
->data
.gid
= va_arg(ap
, gid_t
);
448 printf("add_fsevent: unknown type %d\n", kea
->type
);
449 // just skip one 32-bit word and hope we sync up...
450 (void)va_arg(ap
, int32_t);
457 // now we have to go and let everyone know that
458 // is interested in this type of event...
462 SLIST_FOREACH(watcher
, &watch_list_head
, link
) {
463 if (watcher
->event_list
[type
] == FSE_REPORT
&& watcher_cares_about_dev(watcher
, dev
)) {
464 if (watcher_add_event(watcher
, kfse
) == 0) {
473 // just in case no one was interested after all...
474 if (OSAddAtomic(-1, (SInt32
*)&kfse
->refcount
) == 1) {
478 lck_rw_done(&fsevent_big_lock
);
483 do_free_event(kfs_event
*kfse
)
486 kfs_event_arg
*kea
, all_args
[KFS_NUM_ARGS
];
490 if (kfse
->refcount
> 0) {
491 panic("do_free_event: free'ing a kfsevent w/refcount == %d (kfse %p)\n",
492 kfse
->refcount
, kfse
);
495 // make a copy of this so we can free things without
496 // holding the fs_event_buf lock
498 memcpy(&all_args
[0], &kfse
->args
[0], sizeof(all_args
));
500 // and just to be anal, set this so that there are no args
501 kfse
->args
[0].type
= FSE_ARG_DONE
;
503 // mark this fsevent as invalid
504 kfse
->type
= FSE_INVALID
;
506 free_event_idx
= (kfse
- fs_event_buf
);
508 unlock_fs_event_buf();
510 for(i
=0; i
< KFS_NUM_ARGS
; i
++) {
512 if (kea
->type
== FSE_ARG_DONE
) {
518 vnode_rele_ext(kea
->data
.vp
, O_EVTONLY
, 0);
521 vfs_removename(kea
->data
.str
);
524 FREE(kea
->data
.ptr
, M_TEMP
);
532 add_watcher(int8_t *event_list
, int32_t num_events
, int32_t eventq_size
, fs_event_watcher
**watcher_out
)
535 fs_event_watcher
*watcher
;
537 if (eventq_size
< 0 || eventq_size
> MAX_KFS_EVENTS
) {
538 eventq_size
= MAX_KFS_EVENTS
;
541 // Note: the event_queue follows the fs_event_watcher struct
542 // in memory so we only have to do one allocation
545 sizeof(fs_event_watcher
) + eventq_size
* sizeof(kfs_event
*),
548 watcher
->event_list
= event_list
;
549 watcher
->num_events
= num_events
;
550 watcher
->devices_to_watch
= NULL
;
551 watcher
->num_devices
= 0;
553 watcher
->event_queue
= (kfs_event
**)&watcher
[1];
554 watcher
->eventq_size
= eventq_size
;
557 watcher
->blockers
= 0;
558 watcher
->num_readers
= 0;
562 // now update the global list of who's interested in
563 // events of a particular type...
564 for(i
=0; i
< num_events
; i
++) {
565 if (event_list
[i
] != FSE_IGNORE
&& i
< FSE_MAX_EVENTS
) {
566 fs_event_type_watchers
[i
]++;
570 SLIST_INSERT_HEAD(&watch_list_head
, watcher
, link
);
574 *watcher_out
= watcher
;
580 remove_watcher(fs_event_watcher
*target
)
583 fs_event_watcher
*watcher
;
586 lck_rw_lock_shared(&fsevent_big_lock
);
590 SLIST_FOREACH(watcher
, &watch_list_head
, link
) {
591 if (watcher
== target
) {
592 SLIST_REMOVE(&watch_list_head
, watcher
, fs_event_watcher
, link
);
594 for(i
=0; i
< watcher
->num_events
; i
++) {
595 if (watcher
->event_list
[i
] != FSE_IGNORE
&& i
< FSE_MAX_EVENTS
) {
596 fs_event_type_watchers
[i
]--;
602 // drain the event_queue
603 for(i
=watcher
->rd
; i
!= watcher
->wr
; i
=(i
+1) % watcher
->eventq_size
) {
604 kfse
= watcher
->event_queue
[i
];
606 if (OSAddAtomic(-1, (SInt32
*)&kfse
->refcount
) == 1) {
611 if (watcher
->event_list
) {
612 FREE(watcher
->event_list
, M_TEMP
);
613 watcher
->event_list
= NULL
;
615 if (watcher
->devices_to_watch
) {
616 FREE(watcher
->devices_to_watch
, M_TEMP
);
617 watcher
->devices_to_watch
= NULL
;
619 FREE(watcher
, M_TEMP
);
621 lck_rw_done(&fsevent_big_lock
);
627 lck_rw_done(&fsevent_big_lock
);
632 watcher_add_event(fs_event_watcher
*watcher
, kfs_event
*kfse
)
634 if (((watcher
->wr
+ 1) % watcher
->eventq_size
) == watcher
->rd
) {
635 watcher
->flags
|= WATCHER_DROPPED_EVENTS
;
636 wakeup((caddr_t
)watcher
);
640 watcher
->event_queue
[watcher
->wr
] = kfse
;
641 OSAddAtomic(1, (SInt32
*)&kfse
->refcount
);
642 watcher
->wr
= (watcher
->wr
+ 1) % watcher
->eventq_size
;
644 // wake up the watcher if he's waiting!
645 wakeup((caddr_t
)watcher
);
652 fmod_watch(fs_event_watcher
*watcher
, struct uio
*uio
)
654 int i
, error
=0, last_full_event_resid
;
659 // LP64todo - fix this
660 last_full_event_resid
= uio_resid(uio
);
662 // need at least 2048 bytes of space (maxpathlen + 1 event buf)
663 if (uio_resid(uio
) < 2048 || watcher
== NULL
) {
667 if (OSAddAtomic(1, (SInt32
*)&watcher
->num_readers
) != 0) {
668 // don't allow multiple threads to read from the fd at the same time
669 OSAddAtomic(-1, (SInt32
*)&watcher
->num_readers
);
673 if (watcher
->rd
== watcher
->wr
) {
674 if (watcher
->flags
& WATCHER_CLOSING
) {
675 OSAddAtomic(-1, (SInt32
*)&watcher
->num_readers
);
678 OSAddAtomic(1, (SInt32
*)&watcher
->blockers
);
680 // there's nothing to do, go to sleep
681 error
= tsleep((caddr_t
)watcher
, PUSER
|PCATCH
, "fsevents_empty", 0);
683 OSAddAtomic(-1, (SInt32
*)&watcher
->blockers
);
685 if (error
!= 0 || (watcher
->flags
& WATCHER_CLOSING
)) {
686 OSAddAtomic(-1, (SInt32
*)&watcher
->num_readers
);
691 // if we dropped events, return that as an event first
692 if (watcher
->flags
& WATCHER_DROPPED_EVENTS
) {
693 int32_t val
= FSE_EVENTS_DROPPED
;
695 error
= uiomove((caddr_t
)&val
, sizeof(int32_t), uio
);
697 val
= 0; // a fake pid
698 error
= uiomove((caddr_t
)&val
, sizeof(int32_t), uio
);
700 tmp16
= FSE_ARG_DONE
; // makes it a consistent msg
701 error
= uiomove((caddr_t
)&tmp16
, sizeof(int16_t), uio
);
705 OSAddAtomic(-1, (SInt32
*)&watcher
->num_readers
);
709 watcher
->flags
&= ~WATCHER_DROPPED_EVENTS
;
712 // check if the next chunk of data will fit in the user's
713 // buffer. if not, just goto get_out which will return
714 // the number of bytes worth of events that we did read.
715 // this leaves the event that didn't fit in the queue.
717 // LP64todo - fix this
718 #define CHECK_UPTR(size) if (size > (unsigned)uio_resid(uio)) { \
719 uio_setresid(uio, last_full_event_resid); \
723 for (; uio_resid(uio
) > 0 && watcher
->rd
!= watcher
->wr
; ) {
724 kfse
= watcher
->event_queue
[watcher
->rd
];
726 // copy out the type of the event
727 CHECK_UPTR(sizeof(int32_t));
728 if ((error
= uiomove((caddr_t
)&kfse
->type
, sizeof(int32_t), uio
)) != 0) {
732 // now copy out the pid of the person that changed the file
733 CHECK_UPTR(sizeof(pid_t
));
734 if ((error
= uiomove((caddr_t
)&kfse
->pid
, sizeof(pid_t
), uio
)) != 0) {
739 for(i
=0; i
< KFS_NUM_ARGS
&& error
== 0; i
++) {
743 kea
= &kfse
->args
[i
];
745 tmp16
= (uint16_t)kea
->type
;
746 CHECK_UPTR(sizeof(uint16_t));
747 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
748 if (error
|| kea
->type
== FSE_ARG_DONE
) {
754 pathbuff
= get_pathbuff();
755 pathbuff_len
= MAXPATHLEN
;
756 if (kea
->data
.vp
== NULL
) {
757 printf("fmod_watch: whoa... vp == NULL (%d)!\n", kfse
->type
);
759 release_pathbuff(pathbuff
);
763 if (vn_getpath(kea
->data
.vp
, pathbuff
, &pathbuff_len
) != 0 || pathbuff
[0] == '\0') {
764 // printf("fmod_watch: vn_getpath failed! vp 0x%x vname 0x%x (%s) vparent 0x%x\n",
766 // VNAME(kea->data.vp),
767 // VNAME(kea->data.vp) ? VNAME(kea->data.vp) : "<null>",
768 // VPARENT(kea->data.vp));
770 CHECK_UPTR(sizeof(uint16_t));
771 tmp16
= (uint16_t)pathbuff_len
;
772 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
774 CHECK_UPTR((unsigned)pathbuff_len
);
775 error
= uiomove((caddr_t
)pathbuff
, pathbuff_len
, uio
);
776 release_pathbuff(pathbuff
);
781 tmp16
= (int32_t)kea
->len
;
782 CHECK_UPTR(sizeof(uint16_t));
783 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
785 CHECK_UPTR(kea
->len
);
786 error
= uiomove((caddr_t
)kea
->data
.str
, kea
->len
, uio
);
790 CHECK_UPTR(sizeof(uint16_t) + sizeof(int32_t));
791 tmp16
= sizeof(int32_t);
792 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
793 error
= uiomove((caddr_t
)&kea
->data
.int32
, sizeof(int32_t), uio
);
797 printf("fs_events: 64-bit args not implemented on copyout.\n");
798 // CHECK_UPTR(sizeof(uint16_t) + sizeof(int64_t));
799 // tmp16 = sizeof(int64_t);
800 // error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio);
801 // error = uiomove((caddr_t)&kea->data.int64, sizeof(int64_t), uio);
805 tmp16
= (uint16_t)kea
->len
;
806 CHECK_UPTR(sizeof(uint16_t));
807 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
809 CHECK_UPTR(kea
->len
);
810 error
= uiomove((caddr_t
)kea
->data
.ptr
, kea
->len
, uio
);
814 CHECK_UPTR(sizeof(uint16_t) + sizeof(dev_t
));
815 tmp16
= sizeof(dev_t
);
816 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
817 error
= uiomove((caddr_t
)&kea
->data
.dev
, sizeof(dev_t
), uio
);
821 CHECK_UPTR(sizeof(uint16_t) + sizeof(ino_t
));
822 tmp16
= sizeof(ino_t
);
823 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
824 error
= uiomove((caddr_t
)&kea
->data
.ino
, sizeof(ino_t
), uio
);
828 // XXXdbg - NOTE: we use 32-bits for the mode, not
829 // 16-bits like a real mode_t
830 CHECK_UPTR(sizeof(uint16_t) + sizeof(int32_t));
831 tmp16
= sizeof(int32_t);
832 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
833 error
= uiomove((caddr_t
)&kea
->data
.mode
, sizeof(int32_t), uio
);
837 CHECK_UPTR(sizeof(uint16_t) + sizeof(uid_t
));
838 tmp16
= sizeof(uid_t
);
839 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
840 error
= uiomove((caddr_t
)&kea
->data
.uid
, sizeof(uid_t
), uio
);
844 CHECK_UPTR(sizeof(uint16_t) + sizeof(gid_t
));
845 tmp16
= sizeof(gid_t
);
846 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
847 error
= uiomove((caddr_t
)&kea
->data
.gid
, sizeof(gid_t
), uio
);
851 printf("fmod_watch: unknown arg type %d.\n", kea
->type
);
856 // make sure that we always end with a FSE_ARG_DONE
857 if (i
>= KFS_NUM_ARGS
) {
858 tmp16
= FSE_ARG_DONE
;
859 CHECK_UPTR(sizeof(uint16_t));
860 error
= uiomove((caddr_t
)&tmp16
, sizeof(uint16_t), uio
);
864 // LP64todo - fix this
865 last_full_event_resid
= uio_resid(uio
);
867 watcher
->rd
= (watcher
->rd
+ 1) % watcher
->eventq_size
;
869 if (OSAddAtomic(-1, (SInt32
*)&kfse
->refcount
) == 1) {
875 OSAddAtomic(-1, (SInt32
*)&watcher
->num_readers
);
880 // release any references we might have on vnodes which are
881 // the mount point passed to us (so that it can be cleanly
884 // since we don't want to lose the events we'll convert the
885 // vnode refs to the full path, inode #, and uid.
888 fsevent_unmount(struct mount
*mp
)
894 lck_rw_lock_exclusive(&fsevent_big_lock
);
897 for(i
=0; i
< MAX_KFS_EVENTS
; i
++) {
898 if (fs_event_buf
[i
].type
== FSE_INVALID
) {
902 kfse
= &fs_event_buf
[i
];
903 for(j
=0; j
< KFS_NUM_ARGS
; j
++) {
904 kea
= &kfse
->args
[j
];
905 if (kea
->type
== FSE_ARG_DONE
) {
909 if (kea
->type
== FSE_ARG_VNODE
&& kea
->data
.vp
->v_mount
== mp
) {
915 pathbuff
= get_pathbuff();
916 pathbuff_len
= MAXPATHLEN
;
918 if (vn_getpath(vp
, pathbuff
, &pathbuff_len
) != 0 || pathbuff
[0] == '\0') {
921 vname
= vnode_getname(vp
);
923 printf("fsevent_unmount: vn_getpath failed! vp 0x%x vname 0x%x (%s) vparent 0x%x\n",
924 vp
, vname
, vname
? vname
: "<null>", vp
->v_parent
);
927 vnode_putname(vname
);
929 strcpy(pathbuff
, "UNKNOWN-FILE");
930 pathbuff_len
= strlen(pathbuff
) + 1;
933 // switch the type of the string
934 kea
->type
= FSE_ARG_STRING
;
935 kea
->data
.str
= vfs_addname(pathbuff
, pathbuff_len
, 0, 0);
936 kea
->len
= pathbuff_len
;
937 release_pathbuff(pathbuff
);
939 // and finally let go of the reference on the vnode
940 vnode_rele_ext(vp
, O_EVTONLY
, 0);
945 unlock_fs_event_buf();
946 lck_rw_done(&fsevent_big_lock
);
951 // /dev/fsevents device code
953 static int fsevents_installed
= 0;
954 static struct lock__bsd__ fsevents_lck
;
956 typedef struct fsevent_handle
{
957 fs_event_watcher
*watcher
;
963 fseventsf_read(struct fileproc
*fp
, struct uio
*uio
,
964 __unused kauth_cred_t
*cred
, __unused
int flags
,
965 __unused
struct proc
*p
)
967 fsevent_handle
*fseh
= (struct fsevent_handle
*)fp
->f_fglob
->fg_data
;
970 error
= fmod_watch(fseh
->watcher
, uio
);
976 fseventsf_write(__unused
struct fileproc
*fp
, __unused
struct uio
*uio
,
977 __unused kauth_cred_t
*cred
, __unused
int flags
,
978 __unused
struct proc
*p
)
985 fseventsf_ioctl(struct fileproc
*fp
, u_long cmd
, caddr_t data
, struct proc
*p
)
987 fsevent_handle
*fseh
= (struct fsevent_handle
*)fp
->f_fglob
->fg_data
;
990 fsevent_dev_filter_args
*devfilt_args
=(fsevent_dev_filter_args
*)data
;
997 case FSEVENTS_DEVICE_FILTER
: {
999 dev_t
*devices_to_watch
, *tmp
=NULL
;
1001 if (devfilt_args
->num_devices
> 256) {
1006 new_num_devices
= devfilt_args
->num_devices
;
1007 if (new_num_devices
== 0) {
1008 tmp
= fseh
->watcher
->devices_to_watch
;
1011 fseh
->watcher
->devices_to_watch
= NULL
;
1012 fseh
->watcher
->num_devices
= new_num_devices
;
1013 unlock_watch_list();
1021 MALLOC(devices_to_watch
, dev_t
*,
1022 new_num_devices
* sizeof(dev_t
),
1024 if (devices_to_watch
== NULL
) {
1029 ret
= copyin(CAST_USER_ADDR_T(devfilt_args
->devices
),
1030 (void *)devices_to_watch
,
1031 new_num_devices
* sizeof(dev_t
));
1033 FREE(devices_to_watch
, M_TEMP
);
1038 fseh
->watcher
->num_devices
= new_num_devices
;
1039 tmp
= fseh
->watcher
->devices_to_watch
;
1040 fseh
->watcher
->devices_to_watch
= devices_to_watch
;
1041 unlock_watch_list();
1060 fseventsf_select(struct fileproc
*fp
, int which
, void *wql
, struct proc
*p
)
1062 fsevent_handle
*fseh
= (struct fsevent_handle
*)fp
->f_fglob
->fg_data
;
1065 if ((which
!= FREAD
) || (fseh
->watcher
->flags
& WATCHER_CLOSING
)) {
1070 // if there's nothing in the queue, we're not ready
1071 if (fseh
->watcher
->rd
== fseh
->watcher
->wr
) {
1078 selrecord(p
, &fseh
->si
, wql
);
1086 fseventsf_stat(struct fileproc
*fp
, struct stat
*sb
, struct proc
*p
)
1093 fseventsf_close(struct fileglob
*fg
, struct proc
*p
)
1095 fsevent_handle
*fseh
= (struct fsevent_handle
*)fg
->fg_data
;
1097 remove_watcher(fseh
->watcher
);
1100 fseh
->watcher
= NULL
;
1107 fseventsf_kqfilter(struct fileproc
*fp
, struct knote
*kn
, struct proc
*p
)
1115 fseventsf_drain(struct fileproc
*fp
, struct proc
*p
)
1118 fsevent_handle
*fseh
= (struct fsevent_handle
*)fp
->f_fglob
->fg_data
;
1120 fseh
->watcher
->flags
|= WATCHER_CLOSING
;
1122 // if there are people still waiting, sleep for 10ms to
1123 // let them clean up and get out of there. however we
1124 // also don't want to get stuck forever so if they don't
1125 // exit after 5 seconds we're tearing things down anyway.
1126 while(fseh
->watcher
->blockers
&& counter
++ < 500) {
1127 // issue wakeup in case anyone is blocked waiting for an event
1128 // do this each time we wakeup in case the blocker missed
1129 // the wakeup due to the unprotected test of WATCHER_CLOSING
1130 // and decision to tsleep in fmod_watch... this bit of
1131 // latency is a decent tradeoff against not having to
1132 // take and drop a lock in fmod_watch
1133 wakeup((caddr_t
)fseh
->watcher
);
1135 tsleep((caddr_t
)fseh
->watcher
, PRIBIO
, "watcher-close", 1);
1143 fseventsopen(dev_t dev
, int flag
, int mode
, struct proc
*p
)
1153 fseventsclose(dev_t dev
, int flag
, int mode
, struct proc
*p
)
1159 fseventsread(dev_t dev
, struct uio
*uio
, int ioflag
)
1165 fseventswrite(dev_t dev
, struct uio
*uio
, int ioflag
)
1171 static struct fileops fsevents_fops
= {
1184 fseventsioctl(dev_t dev
, u_long cmd
, caddr_t data
, int flag
, struct proc
*p
)
1188 fsevent_handle
*fseh
= NULL
;
1189 fsevent_clone_args
*fse_clone_args
=(fsevent_clone_args
*)data
;
1193 case FSEVENTS_CLONE
:
1194 if (fse_clone_args
->num_events
< 0 || fse_clone_args
->num_events
> 4096) {
1198 MALLOC(fseh
, fsevent_handle
*, sizeof(fsevent_handle
),
1200 memset(fseh
, 0, sizeof(fsevent_handle
));
1202 MALLOC(event_list
, int8_t *,
1203 fse_clone_args
->num_events
* sizeof(int8_t),
1206 error
= copyin(CAST_USER_ADDR_T(fse_clone_args
->event_list
),
1208 fse_clone_args
->num_events
* sizeof(int8_t));
1210 FREE(event_list
, M_TEMP
);
1215 error
= add_watcher(event_list
,
1216 fse_clone_args
->num_events
,
1217 fse_clone_args
->event_queue_depth
,
1220 FREE(event_list
, M_TEMP
);
1225 error
= falloc(p
, &f
, &fd
);
1227 FREE(event_list
, M_TEMP
);
1232 f
->f_fglob
->fg_flag
= FREAD
| FWRITE
;
1233 f
->f_fglob
->fg_type
= DTYPE_FSEVENTS
;
1234 f
->f_fglob
->fg_ops
= &fsevents_fops
;
1235 f
->f_fglob
->fg_data
= (caddr_t
) fseh
;
1237 copyout((void *)&fd
, CAST_USER_ADDR_T(fse_clone_args
->fd
), sizeof(int32_t));
1239 *fdflags(p
, fd
) &= ~UF_RESERVED
;
1240 fp_drop(p
, fd
, f
, 1);
1253 fseventsselect(dev_t dev
, int rw
, struct proc
*p
)
1259 fsevents_wakeup(fsevent_handle
*fseh
)
1261 wakeup((caddr_t
)fseh
);
1262 selwakeup(&fseh
->si
);
1267 * A struct describing which functions will get invoked for certain
1270 static struct cdevsw fsevents_cdevsw
=
1272 fseventsopen
, /* open */
1273 fseventsclose
, /* close */
1274 fseventsread
, /* read */
1275 fseventswrite
, /* write */
1276 fseventsioctl
, /* ioctl */
1278 nulldev
, /* reset */
1280 eno_select
, /* select */
1281 eno_mmap
, /* mmap */
1282 eno_strat
, /* strategy */
1283 eno_getc
, /* getc */
1284 eno_putc
, /* putc */
1290 * Called to initialize our device,
1291 * and to register ourselves with devfs
1299 if (fsevents_installed
) {
1303 fsevents_installed
= 1;
1305 lockinit(&fsevents_lck
, PLOCK
, "fsevents", 0, 0);
1307 ret
= cdevsw_add(-1, &fsevents_cdevsw
);
1309 fsevents_installed
= 0;
1313 devfs_make_node(makedev (ret
, 0), DEVFS_CHAR
,
1314 UID_ROOT
, GID_WHEEL
, 0644, "fsevents", 0);
1316 fsevents_internal_init();
1322 // XXXdbg - temporary path buffer handling
1324 #define NUM_PATH_BUFFS 16
1325 static char path_buff
[NUM_PATH_BUFFS
][MAXPATHLEN
];
1326 static char path_buff_inuse
[NUM_PATH_BUFFS
];
1328 static lck_grp_attr_t
* pathbuff_group_attr
;
1329 static lck_attr_t
* pathbuff_lock_attr
;
1330 static lck_grp_t
* pathbuff_mutex_group
;
1331 static lck_mtx_t pathbuff_lock
;
1336 pathbuff_lock_attr
= lck_attr_alloc_init();
1337 pathbuff_group_attr
= lck_grp_attr_alloc_init();
1338 pathbuff_mutex_group
= lck_grp_alloc_init("pathbuff-mutex", pathbuff_group_attr
);
1340 lck_mtx_init(&pathbuff_lock
, pathbuff_mutex_group
, pathbuff_lock_attr
);
1346 lck_mtx_lock(&pathbuff_lock
);
1350 unlock_pathbuff(void)
1352 lck_mtx_unlock(&pathbuff_lock
);
1362 for(i
=0; i
< NUM_PATH_BUFFS
; i
++) {
1363 if (path_buff_inuse
[i
] == 0) {
1368 if (i
>= NUM_PATH_BUFFS
) {
1372 MALLOC_ZONE(path
, char *, MAXPATHLEN
, M_NAMEI
, M_WAITOK
);
1376 path_buff_inuse
[i
] = 1;
1378 return &path_buff
[i
][0];
1382 release_pathbuff(char *path
)
1391 for(i
=0; i
< NUM_PATH_BUFFS
; i
++) {
1392 if (path
== &path_buff
[i
][0]) {
1393 path_buff
[i
][0] = '\0';
1394 path_buff_inuse
[i
] = 0;
1402 // if we get here then it wasn't one of our temp buffers
1403 FREE_ZONE(path
, MAXPATHLEN
, M_NAMEI
);
1407 get_fse_info(struct vnode
*vp
, fse_info
*fse
, vfs_context_t ctx
)
1409 struct vnode_attr va
;
1412 VATTR_WANTED(&va
, va_fsid
);
1413 VATTR_WANTED(&va
, va_fileid
);
1414 VATTR_WANTED(&va
, va_mode
);
1415 VATTR_WANTED(&va
, va_uid
);
1416 VATTR_WANTED(&va
, va_gid
);
1417 if (vnode_getattr(vp
, &va
, ctx
) != 0) {
1421 fse
->dev
= (dev_t
)va
.va_fsid
;
1422 fse
->ino
= (ino_t
)va
.va_fileid
;
1423 fse
->mode
= (int32_t)vnode_vttoif(vnode_vtype(vp
)) | va
.va_mode
;
1424 fse
->uid
= (uid_t
)va
.va_uid
;
1425 fse
->gid
= (gid_t
)va
.va_gid
;