]> git.saurik.com Git - apple/xnu.git/blame - bsd/vfs/vfs_fsevents.c
xnu-2422.100.13.tar.gz
[apple/xnu.git] / bsd / vfs / vfs_fsevents.c
CommitLineData
91447636 1/*
b0d623f7 2 * Copyright (c) 2004-2008 Apple Inc. All rights reserved.
5d5c5d0d 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
91447636 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
91447636
A
27 */
28#include <stdarg.h>
29#include <sys/param.h>
30#include <sys/systm.h>
b0d623f7 31#include <sys/event.h> // for kqueue related stuff
2d21ac55
A
32#include <sys/fsevents.h>
33
34#if CONFIG_FSE
91447636
A
35#include <sys/namei.h>
36#include <sys/filedesc.h>
37#include <sys/kernel.h>
38#include <sys/file_internal.h>
39#include <sys/stat.h>
40#include <sys/vnode_internal.h>
41#include <sys/mount_internal.h>
42#include <sys/proc_internal.h>
43#include <sys/kauth.h>
44#include <sys/uio.h>
45#include <sys/malloc.h>
46#include <sys/dirent.h>
47#include <sys/attr.h>
48#include <sys/sysctl.h>
49#include <sys/ubc.h>
50#include <machine/cons.h>
51#include <miscfs/specfs/specdev.h>
52#include <miscfs/devfs/devfs.h>
53#include <sys/filio.h>
91447636
A
54#include <kern/locks.h>
55#include <libkern/OSAtomic.h>
2d21ac55
A
56#include <kern/zalloc.h>
57#include <mach/mach_time.h>
58#include <kern/thread_call.h>
59#include <kern/clock.h>
91447636 60
b0d623f7 61#include <security/audit/audit.h>
91447636
A
62#include <bsm/audit_kevents.h>
63
316670eb 64#include <pexpert/pexpert.h>
91447636 65
91447636 66typedef struct kfs_event {
2d21ac55
A
67 LIST_ENTRY(kfs_event) kevent_list;
68 int16_t type; // type code of this event
69 u_int16_t flags, // per-event flags
70 len; // the length of the path in "str"
71 int32_t refcount; // number of clients referencing this
72 pid_t pid; // pid of the process that did the op
73
74 uint64_t abstime; // when this event happened (mach_absolute_time())
75 ino64_t ino;
76 dev_t dev;
77 int32_t mode;
78 uid_t uid;
79 gid_t gid;
80
81 const char *str;
82
83 struct kfs_event *dest; // if this is a two-file op
91447636
A
84} kfs_event;
85
2d21ac55
A
86// flags for the flags field
87#define KFSE_COMBINED_EVENTS 0x0001
88#define KFSE_CONTAINS_DROPPED_EVENTS 0x0002
89#define KFSE_RECYCLED_EVENT 0x0004
90#define KFSE_BEING_CREATED 0x0008
91
92LIST_HEAD(kfse_list, kfs_event) kfse_list_head = LIST_HEAD_INITIALIZER(x);
93int num_events_outstanding = 0;
94int num_pending_rename = 0;
95
96
97struct fsevent_handle;
91447636
A
98
99typedef struct fs_event_watcher {
91447636
A
100 int8_t *event_list; // the events we're interested in
101 int32_t num_events;
b0d623f7 102 dev_t *devices_not_to_watch; // report events from devices not in this list
91447636
A
103 uint32_t num_devices;
104 int32_t flags;
105 kfs_event **event_queue;
106 int32_t eventq_size; // number of event pointers in queue
0c530ab8 107 int32_t num_readers;
2d21ac55
A
108 int32_t rd; // read index into the event_queue
109 int32_t wr; // write index into the event_queue
110 int32_t blockers;
111 int32_t my_id;
112 uint32_t num_dropped;
b0d623f7 113 uint64_t max_event_id;
2d21ac55 114 struct fsevent_handle *fseh;
99c3a104
A
115 pid_t pid;
116 char proc_name[(2 * MAXCOMLEN) + 1];
91447636
A
117} fs_event_watcher;
118
119// fs_event_watcher flags
2d21ac55
A
120#define WATCHER_DROPPED_EVENTS 0x0001
121#define WATCHER_CLOSING 0x0002
122#define WATCHER_WANTS_COMPACT_EVENTS 0x0004
123#define WATCHER_WANTS_EXTENDED_INFO 0x0008
99c3a104 124#define WATCHER_APPLE_SYSTEM_SERVICE 0x0010 // fseventsd, coreservicesd, mds
91447636 125
2d21ac55
A
126#define MAX_WATCHERS 8
127static fs_event_watcher *watcher_table[MAX_WATCHERS];
91447636 128
316670eb
A
129#define DEFAULT_MAX_KFS_EVENTS 4096
130static int max_kfs_events = DEFAULT_MAX_KFS_EVENTS;
91447636 131
2d21ac55
A
132// we allocate kfs_event structures out of this zone
133static zone_t event_zone;
91447636
A
134static int fs_event_init = 0;
135
136//
137// this array records whether anyone is interested in a
138// particular type of event. if no one is, we bail out
139// early from the event delivery
140//
141static int16_t fs_event_type_watchers[FSE_MAX_EVENTS];
142
143static int watcher_add_event(fs_event_watcher *watcher, kfs_event *kfse);
2d21ac55 144static void fsevents_wakeup(fs_event_watcher *watcher);
91447636
A
145
146//
147// Locks
148//
149static lck_grp_attr_t * fsevent_group_attr;
150static lck_attr_t * fsevent_lock_attr;
151static lck_grp_t * fsevent_mutex_group;
152
153static lck_grp_t * fsevent_rw_group;
154
2d21ac55
A
155static lck_rw_t event_handling_lock; // handles locking for event manipulation and recycling
156static lck_mtx_t watch_table_lock;
91447636 157static lck_mtx_t event_buf_lock;
2d21ac55 158static lck_mtx_t event_writer_lock;
91447636 159
b0d623f7
A
160
161/* Explicitly declare qsort so compiler doesn't complain */
162__private_extern__ void qsort(
163 void * array,
164 size_t nmembers,
165 size_t member_size,
166 int (*)(const void *, const void *));
91447636 167
99c3a104
A
168static int
169is_ignored_directory(const char *path) {
170
171 if (!path) {
172 return 0;
173 }
174
175#define IS_TLD(x) strnstr((char *) path, x, MAXPATHLEN)
176 if (IS_TLD("/.Spotlight-V100/") ||
177 IS_TLD("/.MobileBackups/") ||
178 IS_TLD("/Backups.backupdb/")) {
179 return 1;
180 }
181#undef IS_TLD
182
183 return 0;
184}
185
91447636
A
186static void
187fsevents_internal_init(void)
188{
189 int i;
190
191 if (fs_event_init++ != 0) {
192 return;
193 }
194
195 for(i=0; i < FSE_MAX_EVENTS; i++) {
196 fs_event_type_watchers[i] = 0;
197 }
198
2d21ac55 199 memset(watcher_table, 0, sizeof(watcher_table));
91447636
A
200
201 fsevent_lock_attr = lck_attr_alloc_init();
202 fsevent_group_attr = lck_grp_attr_alloc_init();
203 fsevent_mutex_group = lck_grp_alloc_init("fsevent-mutex", fsevent_group_attr);
204 fsevent_rw_group = lck_grp_alloc_init("fsevent-rw", fsevent_group_attr);
205
2d21ac55 206 lck_mtx_init(&watch_table_lock, fsevent_mutex_group, fsevent_lock_attr);
91447636 207 lck_mtx_init(&event_buf_lock, fsevent_mutex_group, fsevent_lock_attr);
2d21ac55
A
208 lck_mtx_init(&event_writer_lock, fsevent_mutex_group, fsevent_lock_attr);
209
210 lck_rw_init(&event_handling_lock, fsevent_rw_group, fsevent_lock_attr);
91447636 211
316670eb
A
212 PE_get_default("kern.maxkfsevents", &max_kfs_events, sizeof(max_kfs_events));
213
2d21ac55 214 event_zone = zinit(sizeof(kfs_event),
316670eb
A
215 max_kfs_events * sizeof(kfs_event),
216 max_kfs_events * sizeof(kfs_event),
2d21ac55
A
217 "fs-event-buf");
218 if (event_zone == NULL) {
219 printf("fsevents: failed to initialize the event zone.\n");
220 }
221
2d21ac55
A
222 // mark the zone as exhaustible so that it will not
223 // ever grow beyond what we initially filled it with
224 zone_change(event_zone, Z_EXHAUST, TRUE);
225 zone_change(event_zone, Z_COLLECT, FALSE);
6d2010ae 226 zone_change(event_zone, Z_CALLERACCT, FALSE);
7ddcb079 227
316670eb 228 if (zfill(event_zone, max_kfs_events) < max_kfs_events) {
7ddcb079
A
229 printf("fsevents: failed to pre-fill the event zone.\n");
230 }
231
91447636
A
232}
233
234static void
2d21ac55 235lock_watch_table(void)
91447636 236{
2d21ac55 237 lck_mtx_lock(&watch_table_lock);
91447636
A
238}
239
240static void
2d21ac55 241unlock_watch_table(void)
91447636 242{
2d21ac55 243 lck_mtx_unlock(&watch_table_lock);
91447636
A
244}
245
246static void
2d21ac55 247lock_fs_event_list(void)
91447636
A
248{
249 lck_mtx_lock(&event_buf_lock);
250}
251
252static void
2d21ac55 253unlock_fs_event_list(void)
91447636
A
254{
255 lck_mtx_unlock(&event_buf_lock);
256}
257
258// forward prototype
2d21ac55 259static void release_event_ref(kfs_event *kfse);
91447636
A
260
261static int
262watcher_cares_about_dev(fs_event_watcher *watcher, dev_t dev)
263{
264 unsigned int i;
265
b0d623f7
A
266 // if devices_not_to_watch is NULL then we care about all
267 // events from all devices
268 if (watcher->devices_not_to_watch == NULL) {
91447636
A
269 return 1;
270 }
271
272 for(i=0; i < watcher->num_devices; i++) {
b0d623f7
A
273 if (dev == watcher->devices_not_to_watch[i]) {
274 // found a match! that means we do not
275 // want events from this device.
276 return 0;
91447636
A
277 }
278 }
279
b0d623f7
A
280 // if we're here it's not in the devices_not_to_watch[]
281 // list so that means we do care about it
282 return 1;
91447636
A
283}
284
285
286int
287need_fsevent(int type, vnode_t vp)
288{
2d21ac55
A
289 if (type >= 0 && type < FSE_MAX_EVENTS && fs_event_type_watchers[type] == 0)
290 return (0);
291
292 // events in /dev aren't really interesting...
293 if (vp->v_tag == VT_DEVFS) {
294 return (0);
295 }
296
297 return 1;
298}
299
2d21ac55
A
300
301#define is_throw_away(x) ((x) == FSE_STAT_CHANGED || (x) == FSE_CONTENT_MODIFIED)
91447636 302
91447636 303
2d21ac55
A
304// Ways that an event can be reused:
305//
306// "combined" events mean that there were two events for
307// the same vnode or path and we're combining both events
308// into a single event. The primary event gets a bit that
309// marks it as having been combined. The secondary event
310// is essentially dropped and the kfse structure reused.
311//
312// "collapsed" means that multiple events below a given
313// directory are collapsed into a single event. in this
314// case, the directory that we collapse into and all of
315// its children must be re-scanned.
316//
317// "recycled" means that we're completely blowing away
318// the event since there are other events that have info
319// about the same vnode or path (and one of those other
320// events will be marked as combined or collapsed as
321// appropriate).
322//
323#define KFSE_COMBINED 0x0001
324#define KFSE_COLLAPSED 0x0002
325#define KFSE_RECYCLED 0x0004
326
327int num_dropped = 0;
2d21ac55
A
328int num_parent_switch = 0;
329int num_recycled_rename = 0;
330
2d21ac55
A
331static struct timeval last_print;
332
333//
334// These variables are used to track coalescing multiple identical
335// events for the same vnode/pathname. If we get the same event
336// type and same vnode/pathname as the previous event, we just drop
337// the event since it's superfluous. This improves some micro-
338// benchmarks considerably and actually has a real-world impact on
339// tests like a Finder copy where multiple stat-changed events can
340// get coalesced.
341//
342static int last_event_type=-1;
343static void *last_ptr=NULL;
344static char last_str[MAXPATHLEN];
345static int last_nlen=0;
346static int last_vid=-1;
347static uint64_t last_coalesced_time=0;
b0d623f7 348static void *last_event_ptr=NULL;
2d21ac55
A
349int last_coalesced = 0;
350static mach_timebase_info_data_t sTimebaseInfo = { 0, 0 };
351
352
91447636
A
353int
354add_fsevent(int type, vfs_context_t ctx, ...)
355{
356 struct proc *p = vfs_context_proc(ctx);
99c3a104 357 int i, arg_type, ret;
2d21ac55 358 kfs_event *kfse, *kfse_dest=NULL, *cur;
91447636
A
359 fs_event_watcher *watcher;
360 va_list ap;
db609669 361 int error = 0, did_alloc=0;
91447636 362 dev_t dev = 0;
2d21ac55 363 uint64_t now, elapsed;
2d21ac55
A
364 char *pathbuff=NULL;
365 int pathbuff_len;
366
91447636 367
b0d623f7 368
91447636
A
369 va_start(ap, ctx);
370
2d21ac55
A
371 // ignore bogus event types..
372 if (type < 0 || type >= FSE_MAX_EVENTS) {
373 return EINVAL;
374 }
375
91447636
A
376 // if no one cares about this type of event, bail out
377 if (fs_event_type_watchers[type] == 0) {
378 va_end(ap);
b0d623f7 379
91447636
A
380 return 0;
381 }
382
2d21ac55 383 now = mach_absolute_time();
91447636
A
384
385 // find a free event and snag it for our use
386 // NOTE: do not do anything that would block until
387 // the lock is dropped.
2d21ac55 388 lock_fs_event_list();
91447636 389
2d21ac55
A
390 //
391 // check if this event is identical to the previous one...
392 // (as long as it's not an event type that can never be the
393 // same as a previous event)
394 //
22ba694c 395 if (type != FSE_CREATE_FILE && type != FSE_DELETE && type != FSE_RENAME && type != FSE_EXCHANGE && type != FSE_CHOWN && type != FSE_DOCID_CHANGED && type != FSE_DOCID_CREATED) {
2d21ac55
A
396 void *ptr=NULL;
397 int vid=0, was_str=0, nlen=0;
398
399 for(arg_type=va_arg(ap, int32_t); arg_type != FSE_ARG_DONE; arg_type=va_arg(ap, int32_t)) {
400 switch(arg_type) {
401 case FSE_ARG_VNODE: {
402 ptr = va_arg(ap, void *);
403 vid = vnode_vid((struct vnode *)ptr);
404 last_str[0] = '\0';
405 break;
406 }
407 case FSE_ARG_STRING: {
408 nlen = va_arg(ap, int32_t);
409 ptr = va_arg(ap, void *);
410 was_str = 1;
411 break;
412 }
413 }
414 if (ptr != NULL) {
415 break;
416 }
417 }
418
419 if ( sTimebaseInfo.denom == 0 ) {
420 (void) clock_timebase_info(&sTimebaseInfo);
421 }
422
423 elapsed = (now - last_coalesced_time);
424 if (sTimebaseInfo.denom != sTimebaseInfo.numer) {
425 if (sTimebaseInfo.denom == 1) {
426 elapsed *= sTimebaseInfo.numer;
427 } else {
428 // this could overflow... the worst that will happen is that we'll
429 // send (or not send) an extra event so I'm not going to worry about
430 // doing the math right like dtrace_abs_to_nano() does.
431 elapsed = (elapsed * sTimebaseInfo.numer) / (uint64_t)sTimebaseInfo.denom;
432 }
433 }
434
435 if (type == last_event_type
436 && (elapsed < 1000000000)
437 &&
438 ((vid && vid == last_vid && last_ptr == ptr)
439 ||
440 (last_str[0] && last_nlen == nlen && ptr && strcmp(last_str, ptr) == 0))
441 ) {
442
443 last_coalesced++;
444 unlock_fs_event_list();
445 va_end(ap);
b0d623f7 446
2d21ac55
A
447 return 0;
448 } else {
449 last_ptr = ptr;
450 if (was_str) {
451 strlcpy(last_str, ptr, sizeof(last_str));
452 }
453 last_nlen = nlen;
454 last_vid = vid;
455 last_event_type = type;
456 last_coalesced_time = now;
91447636
A
457 }
458 }
2d21ac55
A
459 va_start(ap, ctx);
460
461
462 kfse = zalloc_noblock(event_zone);
463 if (kfse && (type == FSE_RENAME || type == FSE_EXCHANGE)) {
464 kfse_dest = zalloc_noblock(event_zone);
465 if (kfse_dest == NULL) {
466 did_alloc = 1;
467 zfree(event_zone, kfse);
468 kfse = NULL;
469 }
470 }
471
472
473 if (kfse == NULL) { // yikes! no free events
2d21ac55
A
474 unlock_fs_event_list();
475 lock_watch_table();
476
477 for(i=0; i < MAX_WATCHERS; i++) {
478 watcher = watcher_table[i];
479 if (watcher == NULL) {
480 continue;
481 }
482
483 watcher->flags |= WATCHER_DROPPED_EVENTS;
484 fsevents_wakeup(watcher);
485 }
486 unlock_watch_table();
487
488 {
489 struct timeval current_tv;
490
491 num_dropped++;
492
493 // only print a message at most once every 5 seconds
494 microuptime(&current_tv);
495 if ((current_tv.tv_sec - last_print.tv_sec) > 10) {
496 int ii;
497 void *junkptr=zalloc_noblock(event_zone), *listhead=kfse_list_head.lh_first;
498
499 printf("add_fsevent: event queue is full! dropping events (num dropped events: %d; num events outstanding: %d).\n", num_dropped, num_events_outstanding);
500 printf("add_fsevent: kfse_list head %p ; num_pending_rename %d\n", listhead, num_pending_rename);
501 printf("add_fsevent: zalloc sez: %p\n", junkptr);
b0d623f7 502 printf("add_fsevent: event_zone info: %d 0x%x\n", ((int *)event_zone)[0], ((int *)event_zone)[1]);
2d21ac55
A
503 for(ii=0; ii < MAX_WATCHERS; ii++) {
504 if (watcher_table[ii] == NULL) {
505 continue;
506 }
507
99c3a104
A
508 printf("add_fsevent: watcher %s %p: rd %4d wr %4d q_size %4d flags 0x%x\n",
509 watcher_table[ii]->proc_name,
510 watcher_table[ii],
511 watcher_table[ii]->rd, watcher_table[ii]->wr,
512 watcher_table[ii]->eventq_size, watcher_table[ii]->flags);
2d21ac55
A
513 }
514
515 last_print = current_tv;
516 if (junkptr) {
517 zfree(event_zone, junkptr);
518 }
519 }
520 }
521
522 if (pathbuff) {
523 release_pathbuff(pathbuff);
524 pathbuff = NULL;
525 }
2d21ac55
A
526 return ENOSPC;
527 }
528
2d21ac55
A
529 memset(kfse, 0, sizeof(kfs_event));
530 kfse->refcount = 1;
531 OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse->flags);
91447636 532
b0d623f7 533 last_event_ptr = kfse;
91447636 534 kfse->type = type;
2d21ac55 535 kfse->abstime = now;
91447636 536 kfse->pid = p->p_pid;
2d21ac55 537 if (type == FSE_RENAME || type == FSE_EXCHANGE) {
db609669
A
538 memset(kfse_dest, 0, sizeof(kfs_event));
539 kfse_dest->refcount = 1;
540 OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse_dest->flags);
2d21ac55
A
541 kfse_dest->type = type;
542 kfse_dest->pid = p->p_pid;
543 kfse_dest->abstime = now;
544
545 kfse->dest = kfse_dest;
546 }
547
548 num_events_outstanding++;
549 if (kfse->type == FSE_RENAME) {
550 num_pending_rename++;
551 }
552 LIST_INSERT_HEAD(&kfse_list_head, kfse, kevent_list);
91447636 553
2d21ac55
A
554 if (kfse->refcount < 1) {
555 panic("add_fsevent: line %d: kfse recount %d but should be at least 1\n", __LINE__, kfse->refcount);
556 }
557
558 unlock_fs_event_list(); // at this point it's safe to unlock
91447636
A
559
560 //
561 // now process the arguments passed in and copy them into
562 // the kfse
563 //
2d21ac55
A
564
565 cur = kfse;
22ba694c
A
566
567 if (type == FSE_DOCID_CREATED || type == FSE_DOCID_CHANGED) {
568 uint64_t val;
569
570 //
571 // These events are special and not like the other events. They only
572 // have a dev_t, src inode #, dest inode #, and a doc-id. We use the
573 // fields that we can in the kfse but have to overlay the dest inode
574 // number and the doc-id on the other fields.
575 //
576
577 // First the dev_t
578 arg_type = va_arg(ap, int32_t);
579 if (arg_type == FSE_ARG_DEV) {
580 cur->dev = (dev_t)(va_arg(ap, dev_t));
581 } else {
582 cur->dev = (dev_t)0xbadc0de1;
583 }
584
585 // next the source inode #
586 arg_type = va_arg(ap, int32_t);
587 if (arg_type == FSE_ARG_INO) {
588 cur->ino = (ino64_t)(va_arg(ap, ino64_t));
589 } else {
590 cur->ino = 0xbadc0de2;
591 }
592
593 // now the dest inode #
594 arg_type = va_arg(ap, int32_t);
595 if (arg_type == FSE_ARG_INO) {
596 val = (ino64_t)(va_arg(ap, ino64_t));
597 } else {
598 val = 0xbadc0de2;
599 }
600 // overlay the dest inode number on the str/dest pointer fields
601 memcpy(&cur->str, &val, sizeof(ino64_t));
602
603
604 // and last the document-id
605 arg_type = va_arg(ap, int32_t);
606 if (arg_type == FSE_ARG_INT32) {
607 val = (uint64_t)va_arg(ap, uint32_t);
608 } else if (arg_type == FSE_ARG_INT64) {
609 val = (uint64_t)va_arg(ap, uint64_t);
610 } else {
611 val = 0xbadc0de3;
612 }
613
614 // the docid is 64-bit and overlays the uid/gid fields
615 memcpy(&cur->uid, &val, sizeof(uint64_t));
616
617 goto done_with_args;
618 }
619
2d21ac55 620 for(arg_type=va_arg(ap, int32_t); arg_type != FSE_ARG_DONE; arg_type=va_arg(ap, int32_t))
91447636 621
2d21ac55 622 switch(arg_type) {
91447636
A
623 case FSE_ARG_VNODE: {
624 // this expands out into multiple arguments to the client
625 struct vnode *vp;
626 struct vnode_attr va;
627
2d21ac55
A
628 if (kfse->str != NULL) {
629 cur = kfse_dest;
91447636
A
630 }
631
2d21ac55
A
632 vp = va_arg(ap, struct vnode *);
633 if (vp == NULL) {
634 panic("add_fsevent: you can't pass me a NULL vnode ptr (type %d)!\n",
635 cur->type);
91447636 636 }
2d21ac55 637
91447636
A
638 VATTR_INIT(&va);
639 VATTR_WANTED(&va, va_fsid);
640 VATTR_WANTED(&va, va_fileid);
641 VATTR_WANTED(&va, va_mode);
642 VATTR_WANTED(&va, va_uid);
643 VATTR_WANTED(&va, va_gid);
cf7d32b8 644 if ((ret = vnode_getattr(vp, &va, vfs_context_kernel())) != 0) {
2d21ac55
A
645 // printf("add_fsevent: failed to getattr on vp %p (%d)\n", cur->fref.vp, ret);
646 cur->str = NULL;
91447636
A
647 error = EINVAL;
648 goto clean_up;
649 }
650
2d21ac55 651 cur->dev = dev = (dev_t)va.va_fsid;
b0d623f7 652 cur->ino = (ino64_t)va.va_fileid;
2d21ac55
A
653 cur->mode = (int32_t)vnode_vttoif(vnode_vtype(vp)) | va.va_mode;
654 cur->uid = va.va_uid;
655 cur->gid = va.va_gid;
91447636 656
2d21ac55
A
657 // if we haven't gotten the path yet, get it.
658 if (pathbuff == NULL) {
659 pathbuff = get_pathbuff();
660 pathbuff_len = MAXPATHLEN;
661
662 pathbuff[0] = '\0';
b0d623f7 663 if ((ret = vn_getpath(vp, pathbuff, &pathbuff_len)) != 0 || pathbuff[0] == '\0') {
b0d623f7 664
b0d623f7
A
665 cur->flags |= KFSE_CONTAINS_DROPPED_EVENTS;
666
667 do {
668 if (vp->v_parent != NULL) {
669 vp = vp->v_parent;
670 } else if (vp->v_mount) {
671 strlcpy(pathbuff, vp->v_mount->mnt_vfsstat.f_mntonname, MAXPATHLEN);
672 break;
673 } else {
674 vp = NULL;
675 }
676
677 if (vp == NULL) {
678 break;
679 }
680
681 pathbuff_len = MAXPATHLEN;
682 ret = vn_getpath(vp, pathbuff, &pathbuff_len);
683 } while (ret == ENOSPC);
684
685 if (ret != 0 || vp == NULL) {
b0d623f7 686 error = ENOENT;
b0d623f7 687 goto clean_up;
2d21ac55 688 }
2d21ac55
A
689 }
690 }
91447636 691
2d21ac55
A
692 // store the path by adding it to the global string table
693 cur->len = pathbuff_len;
694 cur->str = vfs_addname(pathbuff, pathbuff_len, 0, 0);
695 if (cur->str == NULL || cur->str[0] == '\0') {
696 panic("add_fsevent: was not able to add path %s to event %p.\n", pathbuff, cur);
697 }
698
699 release_pathbuff(pathbuff);
700 pathbuff = NULL;
91447636 701
91447636
A
702 break;
703 }
704
705 case FSE_ARG_FINFO: {
706 fse_info *fse;
707
708 fse = va_arg(ap, fse_info *);
709
2d21ac55 710 cur->dev = dev = (dev_t)fse->dev;
b0d623f7 711 cur->ino = (ino64_t)fse->ino;
2d21ac55
A
712 cur->mode = (int32_t)fse->mode;
713 cur->uid = (uid_t)fse->uid;
714 cur->gid = (uid_t)fse->gid;
715 // if it's a hard-link and this is the last link, flag it
716 if ((fse->mode & FSE_MODE_HLINK) && fse->nlink == 0) {
717 cur->mode |= FSE_MODE_LAST_HLINK;
718 }
b0d623f7
A
719 if (cur->mode & FSE_TRUNCATED_PATH) {
720 cur->flags |= KFSE_CONTAINS_DROPPED_EVENTS;
721 cur->mode &= ~FSE_TRUNCATED_PATH;
722 }
91447636
A
723 break;
724 }
725
726 case FSE_ARG_STRING:
2d21ac55
A
727 if (kfse->str != NULL) {
728 cur = kfse_dest;
729 }
91447636 730
2d21ac55
A
731 cur->len = (int16_t)(va_arg(ap, int32_t) & 0x7fff);
732 if (cur->len >= 1) {
733 cur->str = vfs_addname(va_arg(ap, char *), cur->len, 0, 0);
734 } else {
735 printf("add_fsevent: funny looking string length: %d\n", (int)cur->len);
736 cur->len = 2;
737 cur->str = vfs_addname("/", cur->len, 0, 0);
738 }
739 if (cur->str[0] == 0) {
740 printf("add_fsevent: bogus looking string (len %d)\n", cur->len);
741 }
91447636
A
742 break;
743
22ba694c
A
744 case FSE_ARG_INT32: {
745 uint32_t ival = (uint32_t)va_arg(ap, int32_t);
746 kfse->uid = (ino64_t)ival;
747 break;
748 }
749
91447636 750 default:
2d21ac55 751 printf("add_fsevent: unknown type %d\n", arg_type);
91447636
A
752 // just skip one 32-bit word and hope we sync up...
753 (void)va_arg(ap, int32_t);
754 }
91447636 755
22ba694c 756done_with_args:
91447636
A
757 va_end(ap);
758
2d21ac55
A
759 OSBitAndAtomic16(~KFSE_BEING_CREATED, &kfse->flags);
760 if (kfse_dest) {
761 OSBitAndAtomic16(~KFSE_BEING_CREATED, &kfse_dest->flags);
762 }
763
91447636
A
764 //
765 // now we have to go and let everyone know that
2d21ac55 766 // is interested in this type of event
91447636 767 //
2d21ac55 768 lock_watch_table();
91447636 769
2d21ac55
A
770 for(i=0; i < MAX_WATCHERS; i++) {
771 watcher = watcher_table[i];
772 if (watcher == NULL) {
773 continue;
774 }
775
776 if ( watcher->event_list[type] == FSE_REPORT
777 && watcher_cares_about_dev(watcher, dev)) {
778
779 if (watcher_add_event(watcher, kfse) != 0) {
780 watcher->num_dropped++;
db609669 781 continue;
91447636
A
782 }
783 }
2d21ac55 784
db609669
A
785 // if (kfse->refcount < 1) {
786 // panic("add_fsevent: line %d: kfse recount %d but should be at least 1\n", __LINE__, kfse->refcount);
787 // }
91447636
A
788 }
789
2d21ac55
A
790 unlock_watch_table();
791
91447636 792 clean_up:
2d21ac55
A
793
794 if (pathbuff) {
795 release_pathbuff(pathbuff);
796 pathbuff = NULL;
797 }
798
799 release_event_ref(kfse);
91447636 800
91447636
A
801 return error;
802}
803
2d21ac55 804
91447636 805static void
2d21ac55 806release_event_ref(kfs_event *kfse)
91447636 807{
2d21ac55
A
808 int old_refcount;
809 kfs_event copy, dest_copy;
91447636 810
91447636 811
b0d623f7 812 old_refcount = OSAddAtomic(-1, &kfse->refcount);
2d21ac55
A
813 if (old_refcount > 1) {
814 return;
815 }
816
817 lock_fs_event_list();
b0d623f7
A
818 if (last_event_ptr == kfse) {
819 last_event_ptr = NULL;
820 last_event_type = -1;
821 last_coalesced_time = 0;
822 }
823
2d21ac55
A
824 if (kfse->refcount < 0) {
825 panic("release_event_ref: bogus kfse refcount %d\n", kfse->refcount);
0c530ab8 826 }
91447636 827
2d21ac55
A
828 if (kfse->refcount > 0 || kfse->type == FSE_INVALID) {
829 // This is very subtle. Either of these conditions can
830 // be true if an event got recycled while we were waiting
831 // on the fs_event_list lock or the event got recycled,
832 // delivered, _and_ free'd by someone else while we were
833 // waiting on the fs event list lock. In either case
834 // we need to just unlock the list and return without
835 // doing anything because if the refcount is > 0 then
836 // someone else will take care of free'ing it and when
837 // the kfse->type is invalid then someone else already
838 // has handled free'ing the event (while we were blocked
839 // on the event list lock).
840 //
841 unlock_fs_event_list();
842 return;
843 }
844
845 //
91447636
A
846 // make a copy of this so we can free things without
847 // holding the fs_event_buf lock
848 //
2d21ac55 849 copy = *kfse;
b0d623f7 850 if (kfse->dest && OSAddAtomic(-1, &kfse->dest->refcount) == 1) {
2d21ac55
A
851 dest_copy = *kfse->dest;
852 } else {
853 dest_copy.str = NULL;
854 dest_copy.len = 0;
855 dest_copy.type = FSE_INVALID;
856 }
857
858 kfse->pid = kfse->type; // save this off for debugging...
b0d623f7
A
859 kfse->uid = (uid_t)(long)kfse->str; // save this off for debugging...
860 kfse->gid = (gid_t)(long)current_thread();
2d21ac55
A
861
862 kfse->str = (char *)0xdeadbeef; // XXXdbg - catch any cheaters...
863
864 if (dest_copy.type != FSE_INVALID) {
865 kfse->dest->str = (char *)0xbadc0de; // XXXdbg - catch any cheaters...
866 kfse->dest->type = FSE_INVALID;
867
868 if (kfse->dest->kevent_list.le_prev != NULL) {
869 num_events_outstanding--;
870 LIST_REMOVE(kfse->dest, kevent_list);
871 memset(&kfse->dest->kevent_list, 0xa5, sizeof(kfse->dest->kevent_list));
872 }
873
874 zfree(event_zone, kfse->dest);
875 }
91447636 876
0c530ab8 877 // mark this fsevent as invalid
2d21ac55
A
878 {
879 int otype;
880
881 otype = kfse->type;
0c530ab8
A
882 kfse->type = FSE_INVALID;
883
2d21ac55
A
884 if (kfse->kevent_list.le_prev != NULL) {
885 num_events_outstanding--;
886 if (otype == FSE_RENAME) {
887 num_pending_rename--;
888 }
889 LIST_REMOVE(kfse, kevent_list);
890 memset(&kfse->kevent_list, 0, sizeof(kfse->kevent_list));
891 }
892 }
91447636 893
2d21ac55
A
894 zfree(event_zone, kfse);
895
896 unlock_fs_event_list();
897
898 // if we have a pointer in the union
22ba694c 899 if (copy.str && copy.type != FSE_DOCID_CHANGED) {
2d21ac55
A
900 if (copy.len == 0) { // and it's not a string
901 panic("%s:%d: no more fref.vp!\n", __FILE__, __LINE__);
902 // vnode_rele_ext(copy.fref.vp, O_EVTONLY, 0);
903 } else { // else it's a string
904 vfs_removename(copy.str);
91447636 905 }
2d21ac55 906 }
91447636 907
2d21ac55
A
908 if (dest_copy.type != FSE_INVALID && dest_copy.str) {
909 if (dest_copy.len == 0) {
910 panic("%s:%d: no more fref.vp!\n", __FILE__, __LINE__);
911 // vnode_rele_ext(dest_copy.fref.vp, O_EVTONLY, 0);
912 } else {
913 vfs_removename(dest_copy.str);
91447636
A
914 }
915 }
916}
917
91447636 918static int
316670eb 919add_watcher(int8_t *event_list, int32_t num_events, int32_t eventq_size, fs_event_watcher **watcher_out, void *fseh)
91447636
A
920{
921 int i;
922 fs_event_watcher *watcher;
923
316670eb
A
924 if (eventq_size <= 0 || eventq_size > 100*max_kfs_events) {
925 eventq_size = max_kfs_events;
91447636
A
926 }
927
928 // Note: the event_queue follows the fs_event_watcher struct
929 // in memory so we only have to do one allocation
930 MALLOC(watcher,
931 fs_event_watcher *,
932 sizeof(fs_event_watcher) + eventq_size * sizeof(kfs_event *),
933 M_TEMP, M_WAITOK);
2d21ac55
A
934 if (watcher == NULL) {
935 return ENOMEM;
936 }
91447636
A
937
938 watcher->event_list = event_list;
939 watcher->num_events = num_events;
b0d623f7 940 watcher->devices_not_to_watch = NULL;
91447636
A
941 watcher->num_devices = 0;
942 watcher->flags = 0;
943 watcher->event_queue = (kfs_event **)&watcher[1];
944 watcher->eventq_size = eventq_size;
945 watcher->rd = 0;
946 watcher->wr = 0;
947 watcher->blockers = 0;
0c530ab8 948 watcher->num_readers = 0;
b0d623f7 949 watcher->max_event_id = 0;
316670eb 950 watcher->fseh = fseh;
99c3a104
A
951 watcher->pid = proc_selfpid();
952 proc_selfname(watcher->proc_name, sizeof(watcher->proc_name));
2d21ac55
A
953
954 watcher->num_dropped = 0; // XXXdbg - debugging
91447636 955
99c3a104
A
956 if (!strncmp(watcher->proc_name, "fseventsd", sizeof(watcher->proc_name)) ||
957 !strncmp(watcher->proc_name, "coreservicesd", sizeof(watcher->proc_name)) ||
958 !strncmp(watcher->proc_name, "mds", sizeof(watcher->proc_name))) {
959 watcher->flags |= WATCHER_APPLE_SYSTEM_SERVICE;
db609669
A
960 } else {
961 printf("fsevents: watcher %s (pid: %d) - Using /dev/fsevents directly is unsupported. Migrate to FSEventsFramework\n",
962 watcher->proc_name, watcher->pid);
99c3a104
A
963 }
964
2d21ac55 965 lock_watch_table();
91447636
A
966
967 // now update the global list of who's interested in
968 // events of a particular type...
969 for(i=0; i < num_events; i++) {
970 if (event_list[i] != FSE_IGNORE && i < FSE_MAX_EVENTS) {
971 fs_event_type_watchers[i]++;
972 }
973 }
974
2d21ac55
A
975 for(i=0; i < MAX_WATCHERS; i++) {
976 if (watcher_table[i] == NULL) {
977 watcher->my_id = i;
978 watcher_table[i] = watcher;
979 break;
980 }
981 }
982
983 if (i > MAX_WATCHERS) {
984 printf("fsevents: too many watchers!\n");
985 unlock_watch_table();
986 return ENOSPC;
987 }
91447636 988
2d21ac55 989 unlock_watch_table();
91447636
A
990
991 *watcher_out = watcher;
992
993 return 0;
994}
995
2d21ac55
A
996
997
91447636
A
998static void
999remove_watcher(fs_event_watcher *target)
1000{
2d21ac55 1001 int i, j, counter=0;
91447636
A
1002 fs_event_watcher *watcher;
1003 kfs_event *kfse;
1004
2d21ac55 1005 lock_watch_table();
91447636 1006
2d21ac55
A
1007 for(j=0; j < MAX_WATCHERS; j++) {
1008 watcher = watcher_table[j];
1009 if (watcher != target) {
1010 continue;
1011 }
91447636 1012
2d21ac55
A
1013 watcher_table[j] = NULL;
1014
1015 for(i=0; i < watcher->num_events; i++) {
1016 if (watcher->event_list[i] != FSE_IGNORE && i < FSE_MAX_EVENTS) {
1017 fs_event_type_watchers[i]--;
91447636 1018 }
2d21ac55 1019 }
91447636 1020
2d21ac55
A
1021 if (watcher->flags & WATCHER_CLOSING) {
1022 unlock_watch_table();
1023 return;
1024 }
1025
1026 // printf("fsevents: removing watcher %p (rd %d wr %d num_readers %d flags 0x%x)\n", watcher, watcher->rd, watcher->wr, watcher->num_readers, watcher->flags);
1027 watcher->flags |= WATCHER_CLOSING;
b0d623f7 1028 OSAddAtomic(1, &watcher->num_readers);
2d21ac55
A
1029
1030 unlock_watch_table();
91447636 1031
2d21ac55 1032 while (watcher->num_readers > 1 && counter++ < 5000) {
99c3a104 1033 lock_watch_table();
2d21ac55 1034 fsevents_wakeup(watcher); // in case they're asleep
99c3a104 1035 unlock_watch_table();
2d21ac55
A
1036
1037 tsleep(watcher, PRIBIO, "fsevents-close", 1);
1038 }
1039 if (counter++ >= 5000) {
1040 // printf("fsevents: close: still have readers! (%d)\n", watcher->num_readers);
1041 panic("fsevents: close: still have readers! (%d)\n", watcher->num_readers);
1042 }
1043
1044 // drain the event_queue
2d21ac55 1045
db609669 1046 lck_rw_lock_exclusive(&event_handling_lock);
99c3a104 1047 while(watcher->rd != watcher->wr) {
2d21ac55 1048 kfse = watcher->event_queue[watcher->rd];
99c3a104 1049 watcher->event_queue[watcher->rd] = NULL;
2d21ac55 1050 watcher->rd = (watcher->rd+1) % watcher->eventq_size;
99c3a104 1051 OSSynchronizeIO();
db609669 1052 if (kfse != NULL && kfse->type != FSE_INVALID && kfse->refcount >= 1) {
2d21ac55 1053 release_event_ref(kfse);
91447636 1054 }
2d21ac55 1055 }
db609669 1056 lck_rw_unlock_exclusive(&event_handling_lock);
91447636 1057
2d21ac55
A
1058 if (watcher->event_list) {
1059 FREE(watcher->event_list, M_TEMP);
1060 watcher->event_list = NULL;
1061 }
b0d623f7
A
1062 if (watcher->devices_not_to_watch) {
1063 FREE(watcher->devices_not_to_watch, M_TEMP);
1064 watcher->devices_not_to_watch = NULL;
2d21ac55
A
1065 }
1066 FREE(watcher, M_TEMP);
1067
1068 return;
1069 }
1070
1071 unlock_watch_table();
1072}
1073
1074
1075#define EVENT_DELAY_IN_MS 10
1076static thread_call_t event_delivery_timer = NULL;
1077static int timer_set = 0;
1078
1079
1080static void
1081delayed_event_delivery(__unused void *param0, __unused void *param1)
1082{
1083 int i;
1084
1085 lock_watch_table();
1086
1087 for(i=0; i < MAX_WATCHERS; i++) {
1088 if (watcher_table[i] != NULL && watcher_table[i]->rd != watcher_table[i]->wr) {
1089 fsevents_wakeup(watcher_table[i]);
1090 }
1091 }
1092
1093 timer_set = 0;
1094
1095 unlock_watch_table();
1096}
1097
1098
1099//
1100// The watch table must be locked before calling this function.
1101//
1102static void
1103schedule_event_wakeup(void)
1104{
1105 uint64_t deadline;
1106
1107 if (event_delivery_timer == NULL) {
1108 event_delivery_timer = thread_call_allocate((thread_call_func_t)delayed_event_delivery, NULL);
1109 }
1110
1111 clock_interval_to_deadline(EVENT_DELAY_IN_MS, 1000 * 1000, &deadline);
1112
1113 thread_call_enter_delayed(event_delivery_timer, deadline);
1114 timer_set = 1;
1115}
1116
1117
1118
1119#define MAX_NUM_PENDING 16
1120
1121//
1122// NOTE: the watch table must be locked before calling
1123// this routine.
1124//
1125static int
1126watcher_add_event(fs_event_watcher *watcher, kfs_event *kfse)
1127{
b0d623f7
A
1128 if (kfse->abstime > watcher->max_event_id) {
1129 watcher->max_event_id = kfse->abstime;
1130 }
1131
2d21ac55
A
1132 if (((watcher->wr + 1) % watcher->eventq_size) == watcher->rd) {
1133 watcher->flags |= WATCHER_DROPPED_EVENTS;
1134 fsevents_wakeup(watcher);
1135 return ENOSPC;
1136 }
1137
b0d623f7 1138 OSAddAtomic(1, &kfse->refcount);
2d21ac55
A
1139 watcher->event_queue[watcher->wr] = kfse;
1140 OSSynchronizeIO();
1141 watcher->wr = (watcher->wr + 1) % watcher->eventq_size;
db609669 1142
2d21ac55
A
1143 //
1144 // wake up the watcher if there are more than MAX_NUM_PENDING events.
1145 // otherwise schedule a timer (if one isn't already set) which will
1146 // send any pending events if no more are received in the next
1147 // EVENT_DELAY_IN_MS milli-seconds.
1148 //
db609669
A
1149 int32_t num_pending = 0;
1150 if (watcher->rd < watcher->wr) {
1151 num_pending = watcher->wr - watcher->rd;
1152 }
1153
1154 if (watcher->rd > watcher->wr) {
1155 num_pending = watcher->wr + watcher->eventq_size - watcher->rd;
1156 }
1157
1158 if (num_pending > (watcher->eventq_size*3/4) && !(watcher->flags & WATCHER_APPLE_SYSTEM_SERVICE)) {
1159 /* Non-Apple Service is falling behind, start dropping events for this process */
1160 lck_rw_lock_exclusive(&event_handling_lock);
1161 while (watcher->rd != watcher->wr) {
1162 kfse = watcher->event_queue[watcher->rd];
1163 watcher->event_queue[watcher->rd] = NULL;
1164 watcher->rd = (watcher->rd+1) % watcher->eventq_size;
1165 OSSynchronizeIO();
1166 if (kfse != NULL && kfse->type != FSE_INVALID && kfse->refcount >= 1) {
1167 release_event_ref(kfse);
99c3a104 1168 }
db609669
A
1169 }
1170 watcher->flags |= WATCHER_DROPPED_EVENTS;
1171 lck_rw_unlock_exclusive(&event_handling_lock);
2d21ac55 1172
db609669
A
1173 printf("fsevents: watcher falling behind: %s (pid: %d) rd: %4d wr: %4d q_size: %4d flags: 0x%x\n",
1174 watcher->proc_name, watcher->pid, watcher->rd, watcher->wr,
1175 watcher->eventq_size, watcher->flags);
2d21ac55 1176
db609669
A
1177 fsevents_wakeup(watcher);
1178 } else if (num_pending > MAX_NUM_PENDING) {
1179 fsevents_wakeup(watcher);
1180 } else if (timer_set == 0) {
1181 schedule_event_wakeup();
1182 }
1183
1184 return 0;
2d21ac55
A
1185}
1186
2d21ac55
A
1187static int
1188fill_buff(uint16_t type, int32_t size, const void *data,
1189 char *buff, int32_t *_buff_idx, int32_t buff_sz,
1190 struct uio *uio)
1191{
1192 int32_t amt, error = 0, buff_idx = *_buff_idx;
1193 uint16_t tmp;
1194
1195 //
1196 // the +1 on the size is to guarantee that the main data
1197 // copy loop will always copy at least 1 byte
1198 //
1199 if ((buff_sz - buff_idx) <= (int)(2*sizeof(uint16_t) + 1)) {
1200 if (buff_idx > uio_resid(uio)) {
1201 error = ENOSPC;
1202 goto get_out;
1203 }
1204
1205 error = uiomove(buff, buff_idx, uio);
1206 if (error) {
1207 goto get_out;
1208 }
1209 buff_idx = 0;
1210 }
1211
1212 // copy out the header (type & size)
1213 memcpy(&buff[buff_idx], &type, sizeof(uint16_t));
1214 buff_idx += sizeof(uint16_t);
1215
1216 tmp = size & 0xffff;
1217 memcpy(&buff[buff_idx], &tmp, sizeof(uint16_t));
1218 buff_idx += sizeof(uint16_t);
1219
1220 // now copy the body of the data, flushing along the way
1221 // if the buffer fills up.
1222 //
1223 while(size > 0) {
1224 amt = (size < (buff_sz - buff_idx)) ? size : (buff_sz - buff_idx);
1225 memcpy(&buff[buff_idx], data, amt);
1226
1227 size -= amt;
1228 buff_idx += amt;
1229 data = (const char *)data + amt;
1230 if (size > (buff_sz - buff_idx)) {
1231 if (buff_idx > uio_resid(uio)) {
1232 error = ENOSPC;
1233 goto get_out;
91447636 1234 }
2d21ac55
A
1235 error = uiomove(buff, buff_idx, uio);
1236 if (error) {
1237 goto get_out;
91447636 1238 }
2d21ac55
A
1239 buff_idx = 0;
1240 }
1241
1242 if (amt == 0) { // just in case...
1243 break;
1244 }
1245 }
1246
1247 get_out:
1248 *_buff_idx = buff_idx;
1249
1250 return error;
1251}
1252
1253
1254static int copy_out_kfse(fs_event_watcher *watcher, kfs_event *kfse, struct uio *uio) __attribute__((noinline));
1255
1256static int
1257copy_out_kfse(fs_event_watcher *watcher, kfs_event *kfse, struct uio *uio)
1258{
1259 int error;
1260 uint16_t tmp16;
1261 int32_t type;
1262 kfs_event *cur;
1263 char evbuff[512];
1264 int evbuff_idx = 0;
1265
1266 if (kfse->type == FSE_INVALID) {
1267 panic("fsevents: copy_out_kfse: asked to copy out an invalid event (kfse %p, refcount %d fref ptr %p)\n", kfse, kfse->refcount, kfse->str);
1268 }
1269
1270 if (kfse->flags & KFSE_BEING_CREATED) {
1271 return 0;
1272 }
1273
1274 if (kfse->type == FSE_RENAME && kfse->dest == NULL) {
1275 //
1276 // This can happen if an event gets recycled but we had a
1277 // pointer to it in our event queue. The event is the
1278 // destination of a rename which we'll process separately
1279 // (that is, another kfse points to this one so it's ok
1280 // to skip this guy because we'll process it when we process
1281 // the other one)
1282 error = 0;
1283 goto get_out;
1284 }
1285
1286 if (watcher->flags & WATCHER_WANTS_EXTENDED_INFO) {
1287
1288 type = (kfse->type & 0xfff);
1289
1290 if (kfse->flags & KFSE_CONTAINS_DROPPED_EVENTS) {
1291 type |= (FSE_CONTAINS_DROPPED_EVENTS << FSE_FLAG_SHIFT);
1292 } else if (kfse->flags & KFSE_COMBINED_EVENTS) {
1293 type |= (FSE_COMBINED_EVENTS << FSE_FLAG_SHIFT);
1294 }
1295
1296 } else {
1297 type = (int32_t)kfse->type;
1298 }
1299
1300 // copy out the type of the event
1301 memcpy(evbuff, &type, sizeof(int32_t));
1302 evbuff_idx += sizeof(int32_t);
1303
1304 // copy out the pid of the person that generated the event
1305 memcpy(&evbuff[evbuff_idx], &kfse->pid, sizeof(pid_t));
1306 evbuff_idx += sizeof(pid_t);
1307
1308 cur = kfse;
1309
1310 copy_again:
1311
22ba694c
A
1312 if (kfse->type == FSE_DOCID_CHANGED || kfse->type == FSE_DOCID_CREATED) {
1313 dev_t dev = cur->dev;
1314 ino_t ino = cur->ino;
1315 uint64_t ival;
1316
1317 error = fill_buff(FSE_ARG_DEV, sizeof(dev_t), &dev, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1318 if (error != 0) {
1319 goto get_out;
1320 }
1321
1322 error = fill_buff(FSE_ARG_INO, sizeof(ino_t), &ino, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1323 if (error != 0) {
1324 goto get_out;
1325 }
1326
1327 memcpy(&ino, &cur->str, sizeof(ino_t));
1328 error = fill_buff(FSE_ARG_INO, sizeof(ino_t), &ino, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1329 if (error != 0) {
1330 goto get_out;
1331 }
1332
1333 memcpy(&ival, &cur->uid, sizeof(uint64_t)); // the docid gets stuffed into the ino field
1334 error = fill_buff(FSE_ARG_INT64, sizeof(uint64_t), &ival, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1335 if (error != 0) {
1336 goto get_out;
1337 }
1338
1339 goto done;
1340 }
1341
2d21ac55
A
1342 if (cur->str == NULL || cur->str[0] == '\0') {
1343 printf("copy_out_kfse:2: empty/short path (%s)\n", cur->str);
1344 error = fill_buff(FSE_ARG_STRING, 2, "/", evbuff, &evbuff_idx, sizeof(evbuff), uio);
1345 } else {
1346 error = fill_buff(FSE_ARG_STRING, cur->len, cur->str, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1347 }
1348 if (error != 0) {
1349 goto get_out;
1350 }
1351
1352 if (cur->dev == 0 && cur->ino == 0) {
1353 // this happens when a rename event happens and the
1354 // destination of the rename did not previously exist.
1355 // it thus has no other file info so skip copying out
1356 // the stuff below since it isn't initialized
1357 goto done;
1358 }
1359
1360
1361 if (watcher->flags & WATCHER_WANTS_COMPACT_EVENTS) {
1362 int32_t finfo_size;
1363
1364 finfo_size = sizeof(dev_t) + sizeof(ino64_t) + sizeof(int32_t) + sizeof(uid_t) + sizeof(gid_t);
1365 error = fill_buff(FSE_ARG_FINFO, finfo_size, &cur->ino, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1366 if (error != 0) {
1367 goto get_out;
1368 }
1369 } else {
1370 ino_t ino;
1371
1372 error = fill_buff(FSE_ARG_DEV, sizeof(dev_t), &cur->dev, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1373 if (error != 0) {
1374 goto get_out;
1375 }
1376
1377 ino = (ino_t)cur->ino;
1378 error = fill_buff(FSE_ARG_INO, sizeof(ino_t), &ino, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1379 if (error != 0) {
1380 goto get_out;
1381 }
1382
1383 error = fill_buff(FSE_ARG_MODE, sizeof(int32_t), &cur->mode, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1384 if (error != 0) {
1385 goto get_out;
1386 }
1387
1388 error = fill_buff(FSE_ARG_UID, sizeof(uid_t), &cur->uid, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1389 if (error != 0) {
1390 goto get_out;
1391 }
91447636 1392
2d21ac55
A
1393 error = fill_buff(FSE_ARG_GID, sizeof(gid_t), &cur->gid, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1394 if (error != 0) {
1395 goto get_out;
91447636
A
1396 }
1397 }
1398
91447636 1399
2d21ac55
A
1400 if (cur->dest) {
1401 cur = cur->dest;
1402 goto copy_again;
1403 }
91447636 1404
2d21ac55
A
1405 done:
1406 // very last thing: the time stamp
1407 error = fill_buff(FSE_ARG_INT64, sizeof(uint64_t), &cur->abstime, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1408 if (error != 0) {
1409 goto get_out;
91447636
A
1410 }
1411
2d21ac55
A
1412 // check if the FSE_ARG_DONE will fit
1413 if (sizeof(uint16_t) > sizeof(evbuff) - evbuff_idx) {
1414 if (evbuff_idx > uio_resid(uio)) {
1415 error = ENOSPC;
1416 goto get_out;
1417 }
1418 error = uiomove(evbuff, evbuff_idx, uio);
1419 if (error) {
1420 goto get_out;
1421 }
1422 evbuff_idx = 0;
1423 }
91447636 1424
2d21ac55
A
1425 tmp16 = FSE_ARG_DONE;
1426 memcpy(&evbuff[evbuff_idx], &tmp16, sizeof(uint16_t));
1427 evbuff_idx += sizeof(uint16_t);
1428
1429 // flush any remaining data in the buffer (and hopefully
1430 // in most cases this is the only uiomove we'll do)
1431 if (evbuff_idx > uio_resid(uio)) {
1432 error = ENOSPC;
1433 } else {
1434 error = uiomove(evbuff, evbuff_idx, uio);
1435 }
1436
1437 get_out:
1438
1439 return error;
91447636
A
1440}
1441
1442
2d21ac55 1443
91447636
A
1444static int
1445fmod_watch(fs_event_watcher *watcher, struct uio *uio)
1446{
b0d623f7
A
1447 int error=0;
1448 user_ssize_t last_full_event_resid;
91447636 1449 kfs_event *kfse;
91447636 1450 uint16_t tmp16;
99c3a104 1451 int skipped;
91447636 1452
91447636
A
1453 last_full_event_resid = uio_resid(uio);
1454
1455 // need at least 2048 bytes of space (maxpathlen + 1 event buf)
1456 if (uio_resid(uio) < 2048 || watcher == NULL) {
1457 return EINVAL;
1458 }
1459
2d21ac55
A
1460 if (watcher->flags & WATCHER_CLOSING) {
1461 return 0;
1462 }
1463
b0d623f7 1464 if (OSAddAtomic(1, &watcher->num_readers) != 0) {
0c530ab8 1465 // don't allow multiple threads to read from the fd at the same time
b0d623f7 1466 OSAddAtomic(-1, &watcher->num_readers);
0c530ab8
A
1467 return EAGAIN;
1468 }
91447636 1469
99c3a104 1470 restart_watch:
91447636
A
1471 if (watcher->rd == watcher->wr) {
1472 if (watcher->flags & WATCHER_CLOSING) {
b0d623f7 1473 OSAddAtomic(-1, &watcher->num_readers);
91447636
A
1474 return 0;
1475 }
b0d623f7 1476 OSAddAtomic(1, &watcher->blockers);
91447636
A
1477
1478 // there's nothing to do, go to sleep
1479 error = tsleep((caddr_t)watcher, PUSER|PCATCH, "fsevents_empty", 0);
1480
b0d623f7 1481 OSAddAtomic(-1, &watcher->blockers);
91447636
A
1482
1483 if (error != 0 || (watcher->flags & WATCHER_CLOSING)) {
b0d623f7 1484 OSAddAtomic(-1, &watcher->num_readers);
91447636
A
1485 return error;
1486 }
1487 }
1488
1489 // if we dropped events, return that as an event first
1490 if (watcher->flags & WATCHER_DROPPED_EVENTS) {
1491 int32_t val = FSE_EVENTS_DROPPED;
1492
1493 error = uiomove((caddr_t)&val, sizeof(int32_t), uio);
1494 if (error == 0) {
1495 val = 0; // a fake pid
1496 error = uiomove((caddr_t)&val, sizeof(int32_t), uio);
1497
1498 tmp16 = FSE_ARG_DONE; // makes it a consistent msg
1499 error = uiomove((caddr_t)&tmp16, sizeof(int16_t), uio);
2d21ac55 1500
2d21ac55 1501 last_full_event_resid = uio_resid(uio);
91447636
A
1502 }
1503
1504 if (error) {
b0d623f7 1505 OSAddAtomic(-1, &watcher->num_readers);
91447636
A
1506 return error;
1507 }
1508
1509 watcher->flags &= ~WATCHER_DROPPED_EVENTS;
1510 }
1511
99c3a104 1512 skipped = 0;
db609669
A
1513
1514 lck_rw_lock_shared(&event_handling_lock);
2d21ac55
A
1515 while (uio_resid(uio) > 0 && watcher->rd != watcher->wr) {
1516 if (watcher->flags & WATCHER_CLOSING) {
1517 break;
91447636 1518 }
2d21ac55
A
1519
1520 //
1521 // check if the event is something of interest to us
1522 // (since it may have been recycled/reused and changed
1523 // its type or which device it is for)
1524 //
2d21ac55 1525 kfse = watcher->event_queue[watcher->rd];
99c3a104 1526 if (!kfse || kfse->type == FSE_INVALID || kfse->refcount < 1) {
99c3a104 1527 break;
91447636
A
1528 }
1529
2d21ac55
A
1530 if (watcher->event_list[kfse->type] == FSE_REPORT && watcher_cares_about_dev(watcher, kfse->dev)) {
1531
22ba694c 1532 if (!(watcher->flags & WATCHER_APPLE_SYSTEM_SERVICE) && kfse->type != FSE_DOCID_CHANGED && is_ignored_directory(kfse->str)) {
99c3a104
A
1533 // If this is not an Apple System Service, skip specified directories
1534 // radar://12034844
1535 error = 0;
1536 skipped = 1;
1537 } else {
1538
1539 skipped = 0;
6d2010ae
A
1540 if (last_event_ptr == kfse) {
1541 last_event_ptr = NULL;
1542 last_event_type = -1;
1543 last_coalesced_time = 0;
1544 }
2d21ac55
A
1545 error = copy_out_kfse(watcher, kfse, uio);
1546 if (error != 0) {
1547 // if an event won't fit or encountered an error while
1548 // we were copying it out, then backup to the last full
1549 // event and just bail out. if the error was ENOENT
1550 // then we can continue regular processing, otherwise
1551 // we should unlock things and return.
1552 uio_setresid(uio, last_full_event_resid);
1553 if (error != ENOENT) {
1554 lck_rw_unlock_shared(&event_handling_lock);
1555 error = 0;
1556 goto get_out;
1557 }
91447636
A
1558 }
1559
2d21ac55 1560 last_full_event_resid = uio_resid(uio);
99c3a104 1561 }
91447636
A
1562 }
1563
db609669 1564 watcher->event_queue[watcher->rd] = NULL;
91447636 1565 watcher->rd = (watcher->rd + 1) % watcher->eventq_size;
2d21ac55 1566 OSSynchronizeIO();
2d21ac55 1567 release_event_ref(kfse);
99c3a104 1568 }
db609669 1569 lck_rw_unlock_shared(&event_handling_lock);
99c3a104
A
1570
1571 if (skipped && error == 0) {
1572 goto restart_watch;
91447636
A
1573 }
1574
1575 get_out:
b0d623f7 1576 OSAddAtomic(-1, &watcher->num_readers);
2d21ac55 1577
91447636
A
1578 return error;
1579}
1580
1581
1582// release any references we might have on vnodes which are
1583// the mount point passed to us (so that it can be cleanly
1584// unmounted).
1585//
1586// since we don't want to lose the events we'll convert the
2d21ac55 1587// vnode refs to full paths.
91447636
A
1588//
1589void
2d21ac55 1590fsevent_unmount(__unused struct mount *mp)
91447636 1591{
2d21ac55
A
1592 // we no longer maintain pointers to vnodes so
1593 // there is nothing to do...
91447636
A
1594}
1595
1596
1597//
1598// /dev/fsevents device code
1599//
1600static int fsevents_installed = 0;
91447636
A
1601
1602typedef struct fsevent_handle {
0c530ab8
A
1603 UInt32 flags;
1604 SInt32 active;
91447636 1605 fs_event_watcher *watcher;
b0d623f7 1606 struct klist knotes;
91447636
A
1607 struct selinfo si;
1608} fsevent_handle;
1609
0c530ab8 1610#define FSEH_CLOSING 0x0001
91447636
A
1611
1612static int
1613fseventsf_read(struct fileproc *fp, struct uio *uio,
2d21ac55 1614 __unused int flags, __unused vfs_context_t ctx)
91447636
A
1615{
1616 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
1617 int error;
1618
1619 error = fmod_watch(fseh->watcher, uio);
1620
1621 return error;
1622}
1623
2d21ac55 1624
91447636
A
1625static int
1626fseventsf_write(__unused struct fileproc *fp, __unused struct uio *uio,
2d21ac55 1627 __unused int flags, __unused vfs_context_t ctx)
91447636
A
1628{
1629 return EIO;
1630}
1631
b0d623f7 1632#pragma pack(push, 4)
2d21ac55
A
1633typedef struct ext_fsevent_dev_filter_args {
1634 uint32_t num_devices;
1635 user_addr_t devices;
1636} ext_fsevent_dev_filter_args;
b0d623f7 1637#pragma pack(pop)
2d21ac55 1638
316670eb
A
1639#define NEW_FSEVENTS_DEVICE_FILTER _IOW('s', 100, ext_fsevent_dev_filter_args)
1640
2d21ac55
A
1641typedef struct old_fsevent_dev_filter_args {
1642 uint32_t num_devices;
1643 int32_t devices;
1644} old_fsevent_dev_filter_args;
1645
1646#define OLD_FSEVENTS_DEVICE_FILTER _IOW('s', 100, old_fsevent_dev_filter_args)
2d21ac55 1647
b0d623f7
A
1648#if __LP64__
1649/* need this in spite of the padding due to alignment of devices */
1650typedef struct fsevent_dev_filter_args32 {
1651 uint32_t num_devices;
1652 uint32_t devices;
1653 int32_t pad1;
1654} fsevent_dev_filter_args32;
1655#endif
91447636
A
1656
1657static int
2d21ac55 1658fseventsf_ioctl(struct fileproc *fp, u_long cmd, caddr_t data, vfs_context_t ctx)
91447636
A
1659{
1660 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
1661 int ret = 0;
2d21ac55
A
1662 ext_fsevent_dev_filter_args *devfilt_args, _devfilt_args;
1663
1664 if (proc_is64bit(vfs_context_proc(ctx))) {
1665 devfilt_args = (ext_fsevent_dev_filter_args *)data;
316670eb
A
1666 }
1667 else if (cmd == OLD_FSEVENTS_DEVICE_FILTER) {
2d21ac55
A
1668 old_fsevent_dev_filter_args *udev_filt_args = (old_fsevent_dev_filter_args *)data;
1669
1670 devfilt_args = &_devfilt_args;
1671 memset(devfilt_args, 0, sizeof(ext_fsevent_dev_filter_args));
1672
1673 devfilt_args->num_devices = udev_filt_args->num_devices;
1674 devfilt_args->devices = CAST_USER_ADDR_T(udev_filt_args->devices);
316670eb
A
1675 }
1676 else {
b0d623f7
A
1677#if __LP64__
1678 fsevent_dev_filter_args32 *udev_filt_args = (fsevent_dev_filter_args32 *)data;
1679#else
2d21ac55 1680 fsevent_dev_filter_args *udev_filt_args = (fsevent_dev_filter_args *)data;
b0d623f7 1681#endif
2d21ac55
A
1682
1683 devfilt_args = &_devfilt_args;
1684 memset(devfilt_args, 0, sizeof(ext_fsevent_dev_filter_args));
1685
1686 devfilt_args->num_devices = udev_filt_args->num_devices;
1687 devfilt_args->devices = CAST_USER_ADDR_T(udev_filt_args->devices);
1688 }
91447636 1689
0c530ab8
A
1690 OSAddAtomic(1, &fseh->active);
1691 if (fseh->flags & FSEH_CLOSING) {
1692 OSAddAtomic(-1, &fseh->active);
1693 return 0;
1694 }
1695
91447636
A
1696 switch (cmd) {
1697 case FIONBIO:
1698 case FIOASYNC:
0c530ab8 1699 break;
91447636 1700
2d21ac55
A
1701 case FSEVENTS_WANT_COMPACT_EVENTS: {
1702 fseh->watcher->flags |= WATCHER_WANTS_COMPACT_EVENTS;
1703 break;
1704 }
1705
1706 case FSEVENTS_WANT_EXTENDED_INFO: {
1707 fseh->watcher->flags |= WATCHER_WANTS_EXTENDED_INFO;
1708 break;
1709 }
1710
b0d623f7
A
1711 case FSEVENTS_GET_CURRENT_ID: {
1712 *(uint64_t *)data = fseh->watcher->max_event_id;
1713 ret = 0;
1714 break;
1715 }
1716
2d21ac55
A
1717 case OLD_FSEVENTS_DEVICE_FILTER:
1718 case NEW_FSEVENTS_DEVICE_FILTER: {
91447636 1719 int new_num_devices;
b0d623f7 1720 dev_t *devices_not_to_watch, *tmp=NULL;
91447636
A
1721
1722 if (devfilt_args->num_devices > 256) {
1723 ret = EINVAL;
1724 break;
1725 }
1726
1727 new_num_devices = devfilt_args->num_devices;
1728 if (new_num_devices == 0) {
b0d623f7 1729 tmp = fseh->watcher->devices_not_to_watch;
91447636 1730
2d21ac55 1731 lock_watch_table();
b0d623f7 1732 fseh->watcher->devices_not_to_watch = NULL;
91447636 1733 fseh->watcher->num_devices = new_num_devices;
2d21ac55 1734 unlock_watch_table();
91447636
A
1735
1736 if (tmp) {
1737 FREE(tmp, M_TEMP);
1738 }
1739 break;
1740 }
1741
b0d623f7 1742 MALLOC(devices_not_to_watch, dev_t *,
91447636
A
1743 new_num_devices * sizeof(dev_t),
1744 M_TEMP, M_WAITOK);
b0d623f7 1745 if (devices_not_to_watch == NULL) {
91447636
A
1746 ret = ENOMEM;
1747 break;
1748 }
1749
2d21ac55 1750 ret = copyin(devfilt_args->devices,
b0d623f7 1751 (void *)devices_not_to_watch,
91447636
A
1752 new_num_devices * sizeof(dev_t));
1753 if (ret) {
b0d623f7 1754 FREE(devices_not_to_watch, M_TEMP);
91447636
A
1755 break;
1756 }
1757
2d21ac55 1758 lock_watch_table();
91447636 1759 fseh->watcher->num_devices = new_num_devices;
b0d623f7
A
1760 tmp = fseh->watcher->devices_not_to_watch;
1761 fseh->watcher->devices_not_to_watch = devices_not_to_watch;
2d21ac55 1762 unlock_watch_table();
91447636
A
1763
1764 if (tmp) {
1765 FREE(tmp, M_TEMP);
1766 }
1767
1768 break;
1769 }
1770
1771 default:
1772 ret = EINVAL;
1773 break;
1774 }
1775
0c530ab8 1776 OSAddAtomic(-1, &fseh->active);
91447636
A
1777 return (ret);
1778}
1779
1780
1781static int
2d21ac55 1782fseventsf_select(struct fileproc *fp, int which, __unused void *wql, vfs_context_t ctx)
91447636
A
1783{
1784 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
1785 int ready = 0;
1786
1787 if ((which != FREAD) || (fseh->watcher->flags & WATCHER_CLOSING)) {
1788 return 0;
1789 }
1790
1791
1792 // if there's nothing in the queue, we're not ready
2d21ac55 1793 if (fseh->watcher->rd != fseh->watcher->wr) {
91447636
A
1794 ready = 1;
1795 }
1796
1797 if (!ready) {
2d21ac55 1798 selrecord(vfs_context_proc(ctx), &fseh->si, wql);
91447636
A
1799 }
1800
1801 return ready;
1802}
1803
1804
2d21ac55 1805#if NOTUSED
91447636 1806static int
2d21ac55 1807fseventsf_stat(__unused struct fileproc *fp, __unused struct stat *sb, __unused vfs_context_t ctx)
91447636
A
1808{
1809 return ENOTSUP;
1810}
2d21ac55 1811#endif
91447636
A
1812
1813static int
2d21ac55 1814fseventsf_close(struct fileglob *fg, __unused vfs_context_t ctx)
91447636
A
1815{
1816 fsevent_handle *fseh = (struct fsevent_handle *)fg->fg_data;
0c530ab8 1817 fs_event_watcher *watcher;
2d21ac55 1818
0c530ab8
A
1819 OSBitOrAtomic(FSEH_CLOSING, &fseh->flags);
1820 while (OSAddAtomic(0, &fseh->active) > 0) {
1821 tsleep((caddr_t)fseh->watcher, PRIBIO, "fsevents-close", 1);
1822 }
91447636 1823
0c530ab8 1824 watcher = fseh->watcher;
0c530ab8 1825 fg->fg_data = NULL;
2d21ac55 1826 fseh->watcher = NULL;
0c530ab8
A
1827
1828 remove_watcher(watcher);
91447636
A
1829 FREE(fseh, M_TEMP);
1830
1831 return 0;
1832}
1833
b0d623f7
A
1834static void
1835filt_fsevent_detach(struct knote *kn)
1836{
1837 fsevent_handle *fseh = (struct fsevent_handle *)kn->kn_hook;
1838
1839 lock_watch_table();
1840
1841 KNOTE_DETACH(&fseh->knotes, kn);
1842
1843 unlock_watch_table();
1844}
1845
1846/*
1847 * Determine whether this knote should be active
1848 *
1849 * This is kind of subtle.
1850 * --First, notice if the vnode has been revoked: in so, override hint
1851 * --EVFILT_READ knotes are checked no matter what the hint is
1852 * --Other knotes activate based on hint.
1853 * --If hint is revoke, set special flags and activate
1854 */
1855static int
1856filt_fsevent(struct knote *kn, long hint)
1857{
1858 fsevent_handle *fseh = (struct fsevent_handle *)kn->kn_hook;
1859 int activate = 0;
1860 int32_t rd, wr, amt;
1861
1862 if (NOTE_REVOKE == hint) {
1863 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
1864 activate = 1;
1865 }
1866
1867 rd = fseh->watcher->rd;
1868 wr = fseh->watcher->wr;
1869 if (rd <= wr) {
1870 amt = wr - rd;
1871 } else {
1872 amt = fseh->watcher->eventq_size - (rd - wr);
1873 }
1874
1875 switch(kn->kn_filter) {
1876 case EVFILT_READ:
1877 kn->kn_data = amt;
1878
1879 if (kn->kn_data != 0) {
1880 activate = 1;
1881 }
1882 break;
1883 case EVFILT_VNODE:
1884 /* Check events this note matches against the hint */
1885 if (kn->kn_sfflags & hint) {
1886 kn->kn_fflags |= hint; /* Set which event occurred */
1887 }
1888 if (kn->kn_fflags != 0) {
1889 activate = 1;
1890 }
1891 break;
1892 default: {
1893 // nothing to do...
1894 break;
1895 }
1896 }
1897
1898 return (activate);
1899}
1900
1901
1902struct filterops fsevent_filtops = {
1903 .f_isfd = 1,
1904 .f_attach = NULL,
1905 .f_detach = filt_fsevent_detach,
1906 .f_event = filt_fsevent
1907};
1908
2d21ac55
A
1909static int
1910fseventsf_kqfilter(__unused struct fileproc *fp, __unused struct knote *kn, __unused vfs_context_t ctx)
91447636 1911{
b0d623f7
A
1912 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
1913
1914 kn->kn_hook = (void*)fseh;
1915 kn->kn_hookid = 1;
1916 kn->kn_fop = &fsevent_filtops;
1917
1918 lock_watch_table();
1919
1920 KNOTE_ATTACH(&fseh->knotes, kn);
1921
1922 unlock_watch_table();
91447636
A
1923 return 0;
1924}
1925
1926
1927static int
2d21ac55 1928fseventsf_drain(struct fileproc *fp, __unused vfs_context_t ctx)
91447636
A
1929{
1930 int counter = 0;
1931 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
1932
1933 fseh->watcher->flags |= WATCHER_CLOSING;
1934
1935 // if there are people still waiting, sleep for 10ms to
1936 // let them clean up and get out of there. however we
1937 // also don't want to get stuck forever so if they don't
1938 // exit after 5 seconds we're tearing things down anyway.
1939 while(fseh->watcher->blockers && counter++ < 500) {
1940 // issue wakeup in case anyone is blocked waiting for an event
1941 // do this each time we wakeup in case the blocker missed
1942 // the wakeup due to the unprotected test of WATCHER_CLOSING
1943 // and decision to tsleep in fmod_watch... this bit of
1944 // latency is a decent tradeoff against not having to
1945 // take and drop a lock in fmod_watch
99c3a104 1946 lock_watch_table();
2d21ac55 1947 fsevents_wakeup(fseh->watcher);
99c3a104 1948 unlock_watch_table();
91447636
A
1949
1950 tsleep((caddr_t)fseh->watcher, PRIBIO, "watcher-close", 1);
1951 }
1952
1953 return 0;
1954}
1955
1956
1957static int
2d21ac55 1958fseventsopen(__unused dev_t dev, __unused int flag, __unused int mode, __unused struct proc *p)
91447636 1959{
39236c6e 1960 if (!kauth_cred_issuser(kauth_cred_get())) {
91447636
A
1961 return EPERM;
1962 }
1963
1964 return 0;
1965}
1966
1967static int
2d21ac55 1968fseventsclose(__unused dev_t dev, __unused int flag, __unused int mode, __unused struct proc *p)
91447636
A
1969{
1970 return 0;
1971}
1972
1973static int
2d21ac55 1974fseventsread(__unused dev_t dev, __unused struct uio *uio, __unused int ioflag)
91447636
A
1975{
1976 return EIO;
1977}
1978
2d21ac55 1979
91447636 1980static int
2d21ac55 1981parse_buffer_and_add_events(const char *buffer, int bufsize, vfs_context_t ctx, long *remainder)
91447636 1982{
2d21ac55
A
1983 const fse_info *finfo, *dest_finfo;
1984 const char *path, *ptr, *dest_path, *event_start=buffer;
1985 int path_len, type, dest_path_len, err = 0;
1986
1987
1988 ptr = buffer;
1989 while ((ptr+sizeof(int)+sizeof(fse_info)+1) < buffer+bufsize) {
1990 type = *(const int *)ptr;
1991 if (type < 0 || type >= FSE_MAX_EVENTS) {
1992 err = EINVAL;
1993 break;
1994 }
1995
1996 ptr += sizeof(int);
1997
1998 finfo = (const fse_info *)ptr;
1999 ptr += sizeof(fse_info);
2000
2001 path = ptr;
2002 while(ptr < buffer+bufsize && *ptr != '\0') {
2003 ptr++;
2004 }
2005
2006 if (ptr >= buffer+bufsize) {
2007 break;
2008 }
2009
2010 ptr++; // advance over the trailing '\0'
2011
2012 path_len = ptr - path;
2013
2014 if (type != FSE_RENAME && type != FSE_EXCHANGE) {
2015 event_start = ptr; // record where the next event starts
2016
2017 err = add_fsevent(type, ctx, FSE_ARG_STRING, path_len, path, FSE_ARG_FINFO, finfo, FSE_ARG_DONE);
2018 if (err) {
2019 break;
2020 }
2021 continue;
2022 }
2023
2024 //
2025 // if we're here we have to slurp up the destination finfo
2026 // and path so that we can pass them to the add_fsevent()
2027 // call. basically it's a copy of the above code.
2028 //
2029 dest_finfo = (const fse_info *)ptr;
2030 ptr += sizeof(fse_info);
2031
2032 dest_path = ptr;
2033 while(ptr < buffer+bufsize && *ptr != '\0') {
2034 ptr++;
2035 }
2036
2037 if (ptr >= buffer+bufsize) {
2038 break;
2039 }
2040
2041 ptr++; // advance over the trailing '\0'
2042 event_start = ptr; // record where the next event starts
2043
2044 dest_path_len = ptr - dest_path;
b0d623f7
A
2045 //
2046 // If the destination inode number is non-zero, generate a rename
2047 // with both source and destination FSE_ARG_FINFO. Otherwise generate
2048 // a rename with only one FSE_ARG_FINFO. If you need to inject an
2049 // exchange with an inode of zero, just make that inode (and its path)
2050 // come in as the first one, not the second.
2051 //
2052 if (dest_finfo->ino) {
2053 err = add_fsevent(type, ctx,
2054 FSE_ARG_STRING, path_len, path, FSE_ARG_FINFO, finfo,
2055 FSE_ARG_STRING, dest_path_len, dest_path, FSE_ARG_FINFO, dest_finfo,
2056 FSE_ARG_DONE);
2057 } else {
2058 err = add_fsevent(type, ctx,
2059 FSE_ARG_STRING, path_len, path, FSE_ARG_FINFO, finfo,
2060 FSE_ARG_STRING, dest_path_len, dest_path,
2061 FSE_ARG_DONE);
2062 }
2063
2d21ac55
A
2064 if (err) {
2065 break;
2066 }
2067
2068 }
2069
2070 // if the last event wasn't complete, set the remainder
2071 // to be the last event start boundary.
2072 //
2073 *remainder = (long)((buffer+bufsize) - event_start);
2074
2075 return err;
2076}
2077
2078
2079//
2080// Note: this buffer size can not ever be less than
2081// 2*MAXPATHLEN + 2*sizeof(fse_info) + sizeof(int)
2082// because that is the max size for a single event.
2083// I made it 4k to be a "nice" size. making it
2084// smaller is not a good idea.
2085//
2086#define WRITE_BUFFER_SIZE 4096
2087char *write_buffer=NULL;
2088
2089static int
2090fseventswrite(__unused dev_t dev, struct uio *uio, __unused int ioflag)
2091{
2092 int error=0, count;
2093 vfs_context_t ctx = vfs_context_current();
2094 long offset=0, remainder;
2095
2096 lck_mtx_lock(&event_writer_lock);
2097
2098 if (write_buffer == NULL) {
2099 if (kmem_alloc(kernel_map, (vm_offset_t *)&write_buffer, WRITE_BUFFER_SIZE)) {
2100 lck_mtx_unlock(&event_writer_lock);
2101 return ENOMEM;
2102 }
2103 }
2104
2105 //
2106 // this loop copies in and processes the events written.
2107 // it takes care to copy in reasonable size chunks and
2108 // process them. if there is an event that spans a chunk
2109 // boundary we're careful to copy those bytes down to the
2110 // beginning of the buffer and read the next chunk in just
2111 // after it.
2112 //
2113 while(uio_resid(uio)) {
2114 if (uio_resid(uio) > (WRITE_BUFFER_SIZE-offset)) {
2115 count = WRITE_BUFFER_SIZE - offset;
2116 } else {
2117 count = uio_resid(uio);
2118 }
2119
2120 error = uiomove(write_buffer+offset, count, uio);
2121 if (error) {
2122 break;
2123 }
2124
2125 // printf("fsevents: write: copied in %d bytes (offset: %ld)\n", count, offset);
2126 error = parse_buffer_and_add_events(write_buffer, offset+count, ctx, &remainder);
2127 if (error) {
2128 break;
2129 }
2130
2131 //
2132 // if there's any remainder, copy it down to the beginning
2133 // of the buffer so that it will get processed the next time
2134 // through the loop. note that the remainder always starts
2135 // at an event boundary.
2136 //
2137 if (remainder != 0) {
2138 // printf("fsevents: write: an event spanned a %d byte boundary. remainder: %ld\n",
2139 // WRITE_BUFFER_SIZE, remainder);
2140 memmove(write_buffer, (write_buffer+count+offset) - remainder, remainder);
2141 offset = remainder;
2142 } else {
2143 offset = 0;
2144 }
2145 }
2146
2147 lck_mtx_unlock(&event_writer_lock);
2148
2149 return error;
91447636
A
2150}
2151
2152
39236c6e
A
2153static const struct fileops fsevents_fops = {
2154 DTYPE_FSEVENTS,
91447636
A
2155 fseventsf_read,
2156 fseventsf_write,
2157 fseventsf_ioctl,
2158 fseventsf_select,
2159 fseventsf_close,
2160 fseventsf_kqfilter,
2161 fseventsf_drain
2162};
2163
2d21ac55
A
2164typedef struct ext_fsevent_clone_args {
2165 user_addr_t event_list;
2166 int32_t num_events;
2167 int32_t event_queue_depth;
2168 user_addr_t fd;
2169} ext_fsevent_clone_args;
2170
2171typedef struct old_fsevent_clone_args {
b0d623f7 2172 uint32_t event_list;
2d21ac55
A
2173 int32_t num_events;
2174 int32_t event_queue_depth;
b0d623f7 2175 uint32_t fd;
2d21ac55 2176} old_fsevent_clone_args;
91447636 2177
2d21ac55 2178#define OLD_FSEVENTS_CLONE _IOW('s', 1, old_fsevent_clone_args)
91447636
A
2179
2180static int
2d21ac55 2181fseventsioctl(__unused dev_t dev, u_long cmd, caddr_t data, __unused int flag, struct proc *p)
91447636
A
2182{
2183 struct fileproc *f;
2184 int fd, error;
2185 fsevent_handle *fseh = NULL;
2d21ac55 2186 ext_fsevent_clone_args *fse_clone_args, _fse_clone;
91447636 2187 int8_t *event_list;
2d21ac55 2188 int is64bit = proc_is64bit(p);
91447636
A
2189
2190 switch (cmd) {
2d21ac55
A
2191 case OLD_FSEVENTS_CLONE: {
2192 old_fsevent_clone_args *old_args = (old_fsevent_clone_args *)data;
2193
2194 fse_clone_args = &_fse_clone;
2195 memset(fse_clone_args, 0, sizeof(ext_fsevent_clone_args));
2196
2197 fse_clone_args->event_list = CAST_USER_ADDR_T(old_args->event_list);
2198 fse_clone_args->num_events = old_args->num_events;
2199 fse_clone_args->event_queue_depth = old_args->event_queue_depth;
2200 fse_clone_args->fd = CAST_USER_ADDR_T(old_args->fd);
2201 goto handle_clone;
2202 }
2203
91447636 2204 case FSEVENTS_CLONE:
2d21ac55
A
2205 if (is64bit) {
2206 fse_clone_args = (ext_fsevent_clone_args *)data;
2207 } else {
2208 fsevent_clone_args *ufse_clone = (fsevent_clone_args *)data;
2209
2210 fse_clone_args = &_fse_clone;
2211 memset(fse_clone_args, 0, sizeof(ext_fsevent_clone_args));
2212
2213 fse_clone_args->event_list = CAST_USER_ADDR_T(ufse_clone->event_list);
2214 fse_clone_args->num_events = ufse_clone->num_events;
2215 fse_clone_args->event_queue_depth = ufse_clone->event_queue_depth;
2216 fse_clone_args->fd = CAST_USER_ADDR_T(ufse_clone->fd);
2217 }
2218
2219 handle_clone:
91447636
A
2220 if (fse_clone_args->num_events < 0 || fse_clone_args->num_events > 4096) {
2221 return EINVAL;
2222 }
2223
2224 MALLOC(fseh, fsevent_handle *, sizeof(fsevent_handle),
2225 M_TEMP, M_WAITOK);
2d21ac55
A
2226 if (fseh == NULL) {
2227 return ENOMEM;
2228 }
91447636 2229 memset(fseh, 0, sizeof(fsevent_handle));
b0d623f7
A
2230
2231 klist_init(&fseh->knotes);
91447636
A
2232
2233 MALLOC(event_list, int8_t *,
2234 fse_clone_args->num_events * sizeof(int8_t),
2235 M_TEMP, M_WAITOK);
2d21ac55
A
2236 if (event_list == NULL) {
2237 FREE(fseh, M_TEMP);
2238 return ENOMEM;
2239 }
91447636 2240
2d21ac55 2241 error = copyin(fse_clone_args->event_list,
91447636
A
2242 (void *)event_list,
2243 fse_clone_args->num_events * sizeof(int8_t));
2244 if (error) {
2245 FREE(event_list, M_TEMP);
2246 FREE(fseh, M_TEMP);
2247 return error;
2248 }
2249
2250 error = add_watcher(event_list,
2251 fse_clone_args->num_events,
2252 fse_clone_args->event_queue_depth,
316670eb
A
2253 &fseh->watcher,
2254 fseh);
91447636
A
2255 if (error) {
2256 FREE(event_list, M_TEMP);
2257 FREE(fseh, M_TEMP);
2258 return error;
2259 }
2260
2d21ac55
A
2261 fseh->watcher->fseh = fseh;
2262
2263 error = falloc(p, &f, &fd, vfs_context_current());
91447636
A
2264 if (error) {
2265 FREE(event_list, M_TEMP);
2266 FREE(fseh, M_TEMP);
2267 return (error);
2268 }
2269 proc_fdlock(p);
2270 f->f_fglob->fg_flag = FREAD | FWRITE;
91447636
A
2271 f->f_fglob->fg_ops = &fsevents_fops;
2272 f->f_fglob->fg_data = (caddr_t) fseh;
2d21ac55
A
2273 proc_fdunlock(p);
2274 error = copyout((void *)&fd, fse_clone_args->fd, sizeof(int32_t));
2275 if (error != 0) {
2276 fp_free(p, fd, f);
2277 } else {
91447636 2278 proc_fdlock(p);
6601e61a 2279 procfdtbl_releasefd(p, fd, NULL);
91447636
A
2280 fp_drop(p, fd, f, 1);
2281 proc_fdunlock(p);
2d21ac55 2282 }
91447636
A
2283 break;
2284
2285 default:
2286 error = EINVAL;
2287 break;
2288 }
2289
2290 return error;
2291}
2292
91447636 2293static void
2d21ac55 2294fsevents_wakeup(fs_event_watcher *watcher)
91447636 2295{
2d21ac55 2296 selwakeup(&watcher->fseh->si);
b0d623f7
A
2297 KNOTE(&watcher->fseh->knotes, NOTE_WRITE|NOTE_NONE);
2298 wakeup((caddr_t)watcher);
91447636
A
2299}
2300
2301
2302/*
2303 * A struct describing which functions will get invoked for certain
2304 * actions.
2305 */
2306static struct cdevsw fsevents_cdevsw =
2307{
2308 fseventsopen, /* open */
2309 fseventsclose, /* close */
2310 fseventsread, /* read */
2311 fseventswrite, /* write */
2312 fseventsioctl, /* ioctl */
2d21ac55
A
2313 (stop_fcn_t *)&nulldev, /* stop */
2314 (reset_fcn_t *)&nulldev, /* reset */
91447636
A
2315 NULL, /* tty's */
2316 eno_select, /* select */
2317 eno_mmap, /* mmap */
2318 eno_strat, /* strategy */
2319 eno_getc, /* getc */
2320 eno_putc, /* putc */
2321 0 /* type */
2322};
2323
2324
2325/*
2326 * Called to initialize our device,
2327 * and to register ourselves with devfs
2328 */
2329
2330void
2331fsevents_init(void)
2332{
2333 int ret;
2334
2335 if (fsevents_installed) {
2336 return;
2337 }
2338
2339 fsevents_installed = 1;
2340
91447636
A
2341 ret = cdevsw_add(-1, &fsevents_cdevsw);
2342 if (ret < 0) {
2343 fsevents_installed = 0;
2344 return;
2345 }
2346
2347 devfs_make_node(makedev (ret, 0), DEVFS_CHAR,
2348 UID_ROOT, GID_WHEEL, 0644, "fsevents", 0);
2349
2350 fsevents_internal_init();
2351}
2352
2353
91447636
A
2354char *
2355get_pathbuff(void)
2356{
b0d623f7 2357 char *path;
91447636 2358
b0d623f7
A
2359 MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
2360 return path;
91447636
A
2361}
2362
2363void
2364release_pathbuff(char *path)
2365{
91447636
A
2366
2367 if (path == NULL) {
2368 return;
2369 }
91447636
A
2370 FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
2371}
2372
2373int
cf7d32b8 2374get_fse_info(struct vnode *vp, fse_info *fse, __unused vfs_context_t ctx)
91447636
A
2375{
2376 struct vnode_attr va;
2377
2378 VATTR_INIT(&va);
2379 VATTR_WANTED(&va, va_fsid);
2380 VATTR_WANTED(&va, va_fileid);
2381 VATTR_WANTED(&va, va_mode);
2382 VATTR_WANTED(&va, va_uid);
2383 VATTR_WANTED(&va, va_gid);
2d21ac55
A
2384 if (vp->v_flag & VISHARDLINK) {
2385 if (vp->v_type == VDIR) {
2386 VATTR_WANTED(&va, va_dirlinkcount);
2387 } else {
2388 VATTR_WANTED(&va, va_nlink);
2389 }
2390 }
2391
cf7d32b8 2392 if (vnode_getattr(vp, &va, vfs_context_kernel()) != 0) {
2d21ac55 2393 memset(fse, 0, sizeof(fse_info));
91447636
A
2394 return -1;
2395 }
6d2010ae
A
2396
2397 return vnode_get_fse_info_from_vap(vp, fse, &va);
2398}
2399
2400int
2401vnode_get_fse_info_from_vap(vnode_t vp, fse_info *fse, struct vnode_attr *vap)
2402{
2403 fse->ino = (ino64_t)vap->va_fileid;
2404 fse->dev = (dev_t)vap->va_fsid;
2405 fse->mode = (int32_t)vnode_vttoif(vnode_vtype(vp)) | vap->va_mode;
2406 fse->uid = (uid_t)vap->va_uid;
2407 fse->gid = (gid_t)vap->va_gid;
2d21ac55
A
2408 if (vp->v_flag & VISHARDLINK) {
2409 fse->mode |= FSE_MODE_HLINK;
2410 if (vp->v_type == VDIR) {
6d2010ae 2411 fse->nlink = (uint64_t)vap->va_dirlinkcount;
2d21ac55 2412 } else {
6d2010ae 2413 fse->nlink = (uint64_t)vap->va_nlink;
2d21ac55
A
2414 }
2415 }
2416
91447636
A
2417 return 0;
2418}
2d21ac55 2419
b0d623f7
A
2420void
2421create_fsevent_from_kevent(vnode_t vp, uint32_t kevents, struct vnode_attr *vap)
2422{
2423 int fsevent_type=FSE_CONTENT_MODIFIED, len; // the default is the most pessimistic
2424 char pathbuf[MAXPATHLEN];
2425 fse_info fse;
2426
2427
2428 if (kevents & VNODE_EVENT_DELETE) {
2429 fsevent_type = FSE_DELETE;
2430 } else if (kevents & (VNODE_EVENT_EXTEND|VNODE_EVENT_WRITE)) {
2431 fsevent_type = FSE_CONTENT_MODIFIED;
2432 } else if (kevents & VNODE_EVENT_LINK) {
2433 fsevent_type = FSE_CREATE_FILE;
2434 } else if (kevents & VNODE_EVENT_RENAME) {
2435 fsevent_type = FSE_CREATE_FILE; // XXXdbg - should use FSE_RENAME but we don't have the destination info;
2436 } else if (kevents & (VNODE_EVENT_FILE_CREATED|VNODE_EVENT_FILE_REMOVED|VNODE_EVENT_DIR_CREATED|VNODE_EVENT_DIR_REMOVED)) {
2437 fsevent_type = FSE_STAT_CHANGED; // XXXdbg - because vp is a dir and the thing created/removed lived inside it
2438 } else { // a catch all for VNODE_EVENT_PERMS, VNODE_EVENT_ATTRIB and anything else
2439 fsevent_type = FSE_STAT_CHANGED;
2440 }
2441
2442 // printf("convert_kevent: kevents 0x%x fsevent type 0x%x (for %s)\n", kevents, fsevent_type, vp->v_name ? vp->v_name : "(no-name)");
2443
2444 fse.dev = vap->va_fsid;
2445 fse.ino = vap->va_fileid;
2446 fse.mode = vnode_vttoif(vnode_vtype(vp)) | (uint32_t)vap->va_mode;
2447 if (vp->v_flag & VISHARDLINK) {
2448 fse.mode |= FSE_MODE_HLINK;
2449 if (vp->v_type == VDIR) {
2450 fse.nlink = vap->va_dirlinkcount;
2451 } else {
2452 fse.nlink = vap->va_nlink;
2453 }
2454 }
2455
2456 if (vp->v_type == VDIR) {
2457 fse.mode |= FSE_REMOTE_DIR_EVENT;
2458 }
2459
2460
2461 fse.uid = vap->va_uid;
2462 fse.gid = vap->va_gid;
2463
2464 len = sizeof(pathbuf);
2465 if (vn_getpath(vp, pathbuf, &len) == 0) {
2466 add_fsevent(fsevent_type, vfs_context_current(), FSE_ARG_STRING, len, pathbuf, FSE_ARG_FINFO, &fse, FSE_ARG_DONE);
2467 }
2468 return;
2469}
2470
2d21ac55
A
2471#else /* CONFIG_FSE */
2472/*
2473 * The get_pathbuff and release_pathbuff routines are used in places not
2474 * related to fsevents, and it's a handy abstraction, so define trivial
2475 * versions that don't cache a pool of buffers. This way, we don't have
2476 * to conditionalize the callers, and they still get the advantage of the
2477 * pool of buffers if CONFIG_FSE is turned on.
2478 */
2479char *
2480get_pathbuff(void)
2481{
2482 char *path;
2483 MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
2484 return path;
2485}
2486
2487void
2488release_pathbuff(char *path)
2489{
2490 FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
2491}
2492#endif /* CONFIG_FSE */