]> git.saurik.com Git - apple/xnu.git/blame - bsd/vfs/vfs_fsevents.c
xnu-3789.70.16.tar.gz
[apple/xnu.git] / bsd / vfs / vfs_fsevents.c
CommitLineData
91447636 1/*
04b8595b 2 * Copyright (c) 2004-2014 Apple Inc. All rights reserved.
5d5c5d0d 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
91447636 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
91447636
A
27 */
28#include <stdarg.h>
29#include <sys/param.h>
30#include <sys/systm.h>
b0d623f7 31#include <sys/event.h> // for kqueue related stuff
2d21ac55
A
32#include <sys/fsevents.h>
33
34#if CONFIG_FSE
91447636
A
35#include <sys/namei.h>
36#include <sys/filedesc.h>
37#include <sys/kernel.h>
38#include <sys/file_internal.h>
39#include <sys/stat.h>
40#include <sys/vnode_internal.h>
41#include <sys/mount_internal.h>
42#include <sys/proc_internal.h>
43#include <sys/kauth.h>
44#include <sys/uio.h>
45#include <sys/malloc.h>
46#include <sys/dirent.h>
47#include <sys/attr.h>
48#include <sys/sysctl.h>
49#include <sys/ubc.h>
50#include <machine/cons.h>
51#include <miscfs/specfs/specdev.h>
52#include <miscfs/devfs/devfs.h>
53#include <sys/filio.h>
91447636
A
54#include <kern/locks.h>
55#include <libkern/OSAtomic.h>
2d21ac55
A
56#include <kern/zalloc.h>
57#include <mach/mach_time.h>
58#include <kern/thread_call.h>
59#include <kern/clock.h>
91447636 60
b0d623f7 61#include <security/audit/audit.h>
91447636
A
62#include <bsm/audit_kevents.h>
63
316670eb 64#include <pexpert/pexpert.h>
91447636 65
91447636 66typedef struct kfs_event {
2d21ac55
A
67 LIST_ENTRY(kfs_event) kevent_list;
68 int16_t type; // type code of this event
69 u_int16_t flags, // per-event flags
70 len; // the length of the path in "str"
71 int32_t refcount; // number of clients referencing this
72 pid_t pid; // pid of the process that did the op
73
74 uint64_t abstime; // when this event happened (mach_absolute_time())
75 ino64_t ino;
76 dev_t dev;
77 int32_t mode;
78 uid_t uid;
79 gid_t gid;
80
81 const char *str;
82
83 struct kfs_event *dest; // if this is a two-file op
91447636
A
84} kfs_event;
85
2d21ac55
A
86// flags for the flags field
87#define KFSE_COMBINED_EVENTS 0x0001
88#define KFSE_CONTAINS_DROPPED_EVENTS 0x0002
89#define KFSE_RECYCLED_EVENT 0x0004
90#define KFSE_BEING_CREATED 0x0008
91
92LIST_HEAD(kfse_list, kfs_event) kfse_list_head = LIST_HEAD_INITIALIZER(x);
93int num_events_outstanding = 0;
94int num_pending_rename = 0;
95
96
97struct fsevent_handle;
91447636
A
98
99typedef struct fs_event_watcher {
91447636
A
100 int8_t *event_list; // the events we're interested in
101 int32_t num_events;
b0d623f7 102 dev_t *devices_not_to_watch; // report events from devices not in this list
91447636
A
103 uint32_t num_devices;
104 int32_t flags;
105 kfs_event **event_queue;
106 int32_t eventq_size; // number of event pointers in queue
0c530ab8 107 int32_t num_readers;
2d21ac55
A
108 int32_t rd; // read index into the event_queue
109 int32_t wr; // write index into the event_queue
110 int32_t blockers;
111 int32_t my_id;
112 uint32_t num_dropped;
b0d623f7 113 uint64_t max_event_id;
2d21ac55 114 struct fsevent_handle *fseh;
99c3a104
A
115 pid_t pid;
116 char proc_name[(2 * MAXCOMLEN) + 1];
91447636
A
117} fs_event_watcher;
118
119// fs_event_watcher flags
2d21ac55
A
120#define WATCHER_DROPPED_EVENTS 0x0001
121#define WATCHER_CLOSING 0x0002
122#define WATCHER_WANTS_COMPACT_EVENTS 0x0004
123#define WATCHER_WANTS_EXTENDED_INFO 0x0008
813fb2f6 124#define WATCHER_APPLE_SYSTEM_SERVICE 0x0010 // fseventsd, coreservicesd, mds, revisiond
91447636 125
2d21ac55
A
126#define MAX_WATCHERS 8
127static fs_event_watcher *watcher_table[MAX_WATCHERS];
91447636 128
316670eb
A
129#define DEFAULT_MAX_KFS_EVENTS 4096
130static int max_kfs_events = DEFAULT_MAX_KFS_EVENTS;
91447636 131
2d21ac55
A
132// we allocate kfs_event structures out of this zone
133static zone_t event_zone;
91447636
A
134static int fs_event_init = 0;
135
136//
137// this array records whether anyone is interested in a
138// particular type of event. if no one is, we bail out
139// early from the event delivery
140//
141static int16_t fs_event_type_watchers[FSE_MAX_EVENTS];
142
813fb2f6
A
143// the device currently being unmounted:
144static dev_t fsevent_unmount_dev = 0;
145// how many ACKs are still outstanding:
146static int fsevent_unmount_ack_count = 0;
147
91447636 148static int watcher_add_event(fs_event_watcher *watcher, kfs_event *kfse);
2d21ac55 149static void fsevents_wakeup(fs_event_watcher *watcher);
91447636
A
150
151//
152// Locks
153//
154static lck_grp_attr_t * fsevent_group_attr;
155static lck_attr_t * fsevent_lock_attr;
156static lck_grp_t * fsevent_mutex_group;
157
158static lck_grp_t * fsevent_rw_group;
159
2d21ac55
A
160static lck_rw_t event_handling_lock; // handles locking for event manipulation and recycling
161static lck_mtx_t watch_table_lock;
91447636 162static lck_mtx_t event_buf_lock;
2d21ac55 163static lck_mtx_t event_writer_lock;
91447636 164
b0d623f7
A
165
166/* Explicitly declare qsort so compiler doesn't complain */
167__private_extern__ void qsort(
168 void * array,
169 size_t nmembers,
170 size_t member_size,
171 int (*)(const void *, const void *));
91447636 172
99c3a104
A
173static int
174is_ignored_directory(const char *path) {
175
176 if (!path) {
177 return 0;
178 }
179
3e170ce0 180#define IS_TLD(x) strnstr(__DECONST(char *, path), x, MAXPATHLEN)
99c3a104
A
181 if (IS_TLD("/.Spotlight-V100/") ||
182 IS_TLD("/.MobileBackups/") ||
183 IS_TLD("/Backups.backupdb/")) {
184 return 1;
185 }
186#undef IS_TLD
3e170ce0 187
99c3a104
A
188 return 0;
189}
190
91447636
A
191static void
192fsevents_internal_init(void)
193{
194 int i;
195
196 if (fs_event_init++ != 0) {
197 return;
198 }
199
200 for(i=0; i < FSE_MAX_EVENTS; i++) {
201 fs_event_type_watchers[i] = 0;
202 }
203
2d21ac55 204 memset(watcher_table, 0, sizeof(watcher_table));
91447636
A
205
206 fsevent_lock_attr = lck_attr_alloc_init();
207 fsevent_group_attr = lck_grp_attr_alloc_init();
208 fsevent_mutex_group = lck_grp_alloc_init("fsevent-mutex", fsevent_group_attr);
209 fsevent_rw_group = lck_grp_alloc_init("fsevent-rw", fsevent_group_attr);
210
2d21ac55 211 lck_mtx_init(&watch_table_lock, fsevent_mutex_group, fsevent_lock_attr);
91447636 212 lck_mtx_init(&event_buf_lock, fsevent_mutex_group, fsevent_lock_attr);
2d21ac55
A
213 lck_mtx_init(&event_writer_lock, fsevent_mutex_group, fsevent_lock_attr);
214
215 lck_rw_init(&event_handling_lock, fsevent_rw_group, fsevent_lock_attr);
91447636 216
316670eb
A
217 PE_get_default("kern.maxkfsevents", &max_kfs_events, sizeof(max_kfs_events));
218
2d21ac55 219 event_zone = zinit(sizeof(kfs_event),
316670eb
A
220 max_kfs_events * sizeof(kfs_event),
221 max_kfs_events * sizeof(kfs_event),
2d21ac55
A
222 "fs-event-buf");
223 if (event_zone == NULL) {
224 printf("fsevents: failed to initialize the event zone.\n");
225 }
226
2d21ac55
A
227 // mark the zone as exhaustible so that it will not
228 // ever grow beyond what we initially filled it with
229 zone_change(event_zone, Z_EXHAUST, TRUE);
230 zone_change(event_zone, Z_COLLECT, FALSE);
6d2010ae 231 zone_change(event_zone, Z_CALLERACCT, FALSE);
7ddcb079 232
316670eb 233 if (zfill(event_zone, max_kfs_events) < max_kfs_events) {
7ddcb079
A
234 printf("fsevents: failed to pre-fill the event zone.\n");
235 }
236
91447636
A
237}
238
239static void
2d21ac55 240lock_watch_table(void)
91447636 241{
2d21ac55 242 lck_mtx_lock(&watch_table_lock);
91447636
A
243}
244
245static void
2d21ac55 246unlock_watch_table(void)
91447636 247{
2d21ac55 248 lck_mtx_unlock(&watch_table_lock);
91447636
A
249}
250
251static void
2d21ac55 252lock_fs_event_list(void)
91447636
A
253{
254 lck_mtx_lock(&event_buf_lock);
255}
256
257static void
2d21ac55 258unlock_fs_event_list(void)
91447636
A
259{
260 lck_mtx_unlock(&event_buf_lock);
261}
262
263// forward prototype
2d21ac55 264static void release_event_ref(kfs_event *kfse);
91447636
A
265
266static int
267watcher_cares_about_dev(fs_event_watcher *watcher, dev_t dev)
268{
269 unsigned int i;
270
b0d623f7
A
271 // if devices_not_to_watch is NULL then we care about all
272 // events from all devices
273 if (watcher->devices_not_to_watch == NULL) {
91447636
A
274 return 1;
275 }
276
277 for(i=0; i < watcher->num_devices; i++) {
b0d623f7
A
278 if (dev == watcher->devices_not_to_watch[i]) {
279 // found a match! that means we do not
280 // want events from this device.
281 return 0;
91447636
A
282 }
283 }
284
b0d623f7
A
285 // if we're here it's not in the devices_not_to_watch[]
286 // list so that means we do care about it
287 return 1;
91447636
A
288}
289
290
291int
292need_fsevent(int type, vnode_t vp)
293{
2d21ac55
A
294 if (type >= 0 && type < FSE_MAX_EVENTS && fs_event_type_watchers[type] == 0)
295 return (0);
296
297 // events in /dev aren't really interesting...
298 if (vp->v_tag == VT_DEVFS) {
299 return (0);
300 }
301
302 return 1;
303}
304
2d21ac55
A
305
306#define is_throw_away(x) ((x) == FSE_STAT_CHANGED || (x) == FSE_CONTENT_MODIFIED)
91447636 307
91447636 308
2d21ac55
A
309// Ways that an event can be reused:
310//
311// "combined" events mean that there were two events for
312// the same vnode or path and we're combining both events
313// into a single event. The primary event gets a bit that
314// marks it as having been combined. The secondary event
315// is essentially dropped and the kfse structure reused.
316//
317// "collapsed" means that multiple events below a given
318// directory are collapsed into a single event. in this
319// case, the directory that we collapse into and all of
320// its children must be re-scanned.
321//
322// "recycled" means that we're completely blowing away
323// the event since there are other events that have info
324// about the same vnode or path (and one of those other
325// events will be marked as combined or collapsed as
326// appropriate).
327//
328#define KFSE_COMBINED 0x0001
329#define KFSE_COLLAPSED 0x0002
330#define KFSE_RECYCLED 0x0004
331
332int num_dropped = 0;
2d21ac55
A
333int num_parent_switch = 0;
334int num_recycled_rename = 0;
335
2d21ac55
A
336static struct timeval last_print;
337
338//
339// These variables are used to track coalescing multiple identical
340// events for the same vnode/pathname. If we get the same event
341// type and same vnode/pathname as the previous event, we just drop
342// the event since it's superfluous. This improves some micro-
343// benchmarks considerably and actually has a real-world impact on
344// tests like a Finder copy where multiple stat-changed events can
345// get coalesced.
346//
347static int last_event_type=-1;
348static void *last_ptr=NULL;
349static char last_str[MAXPATHLEN];
350static int last_nlen=0;
351static int last_vid=-1;
352static uint64_t last_coalesced_time=0;
b0d623f7 353static void *last_event_ptr=NULL;
2d21ac55
A
354int last_coalesced = 0;
355static mach_timebase_info_data_t sTimebaseInfo = { 0, 0 };
356
357
91447636
A
358int
359add_fsevent(int type, vfs_context_t ctx, ...)
360{
361 struct proc *p = vfs_context_proc(ctx);
99c3a104 362 int i, arg_type, ret;
2d21ac55 363 kfs_event *kfse, *kfse_dest=NULL, *cur;
91447636
A
364 fs_event_watcher *watcher;
365 va_list ap;
db609669 366 int error = 0, did_alloc=0;
91447636 367 dev_t dev = 0;
2d21ac55 368 uint64_t now, elapsed;
2d21ac55
A
369 char *pathbuff=NULL;
370 int pathbuff_len;
371
91447636 372
b0d623f7 373
91447636
A
374 va_start(ap, ctx);
375
2d21ac55
A
376 // ignore bogus event types..
377 if (type < 0 || type >= FSE_MAX_EVENTS) {
378 return EINVAL;
379 }
380
91447636
A
381 // if no one cares about this type of event, bail out
382 if (fs_event_type_watchers[type] == 0) {
383 va_end(ap);
b0d623f7 384
91447636
A
385 return 0;
386 }
387
2d21ac55 388 now = mach_absolute_time();
91447636
A
389
390 // find a free event and snag it for our use
391 // NOTE: do not do anything that would block until
392 // the lock is dropped.
2d21ac55 393 lock_fs_event_list();
91447636 394
2d21ac55
A
395 //
396 // check if this event is identical to the previous one...
397 // (as long as it's not an event type that can never be the
398 // same as a previous event)
399 //
22ba694c 400 if (type != FSE_CREATE_FILE && type != FSE_DELETE && type != FSE_RENAME && type != FSE_EXCHANGE && type != FSE_CHOWN && type != FSE_DOCID_CHANGED && type != FSE_DOCID_CREATED) {
2d21ac55
A
401 void *ptr=NULL;
402 int vid=0, was_str=0, nlen=0;
403
404 for(arg_type=va_arg(ap, int32_t); arg_type != FSE_ARG_DONE; arg_type=va_arg(ap, int32_t)) {
405 switch(arg_type) {
406 case FSE_ARG_VNODE: {
407 ptr = va_arg(ap, void *);
408 vid = vnode_vid((struct vnode *)ptr);
409 last_str[0] = '\0';
410 break;
411 }
412 case FSE_ARG_STRING: {
413 nlen = va_arg(ap, int32_t);
414 ptr = va_arg(ap, void *);
415 was_str = 1;
416 break;
417 }
418 }
419 if (ptr != NULL) {
420 break;
421 }
422 }
423
424 if ( sTimebaseInfo.denom == 0 ) {
425 (void) clock_timebase_info(&sTimebaseInfo);
426 }
427
428 elapsed = (now - last_coalesced_time);
429 if (sTimebaseInfo.denom != sTimebaseInfo.numer) {
430 if (sTimebaseInfo.denom == 1) {
431 elapsed *= sTimebaseInfo.numer;
432 } else {
433 // this could overflow... the worst that will happen is that we'll
434 // send (or not send) an extra event so I'm not going to worry about
435 // doing the math right like dtrace_abs_to_nano() does.
436 elapsed = (elapsed * sTimebaseInfo.numer) / (uint64_t)sTimebaseInfo.denom;
437 }
438 }
439
440 if (type == last_event_type
441 && (elapsed < 1000000000)
442 &&
443 ((vid && vid == last_vid && last_ptr == ptr)
444 ||
445 (last_str[0] && last_nlen == nlen && ptr && strcmp(last_str, ptr) == 0))
446 ) {
447
448 last_coalesced++;
449 unlock_fs_event_list();
450 va_end(ap);
b0d623f7 451
2d21ac55
A
452 return 0;
453 } else {
454 last_ptr = ptr;
455 if (was_str) {
456 strlcpy(last_str, ptr, sizeof(last_str));
457 }
458 last_nlen = nlen;
459 last_vid = vid;
460 last_event_type = type;
461 last_coalesced_time = now;
91447636
A
462 }
463 }
2d21ac55
A
464 va_start(ap, ctx);
465
466
467 kfse = zalloc_noblock(event_zone);
468 if (kfse && (type == FSE_RENAME || type == FSE_EXCHANGE)) {
469 kfse_dest = zalloc_noblock(event_zone);
470 if (kfse_dest == NULL) {
471 did_alloc = 1;
472 zfree(event_zone, kfse);
473 kfse = NULL;
474 }
475 }
476
477
478 if (kfse == NULL) { // yikes! no free events
2d21ac55
A
479 unlock_fs_event_list();
480 lock_watch_table();
481
482 for(i=0; i < MAX_WATCHERS; i++) {
483 watcher = watcher_table[i];
484 if (watcher == NULL) {
485 continue;
486 }
487
488 watcher->flags |= WATCHER_DROPPED_EVENTS;
489 fsevents_wakeup(watcher);
490 }
491 unlock_watch_table();
492
493 {
494 struct timeval current_tv;
495
496 num_dropped++;
497
498 // only print a message at most once every 5 seconds
499 microuptime(&current_tv);
500 if ((current_tv.tv_sec - last_print.tv_sec) > 10) {
501 int ii;
502 void *junkptr=zalloc_noblock(event_zone), *listhead=kfse_list_head.lh_first;
503
504 printf("add_fsevent: event queue is full! dropping events (num dropped events: %d; num events outstanding: %d).\n", num_dropped, num_events_outstanding);
505 printf("add_fsevent: kfse_list head %p ; num_pending_rename %d\n", listhead, num_pending_rename);
506 printf("add_fsevent: zalloc sez: %p\n", junkptr);
b0d623f7 507 printf("add_fsevent: event_zone info: %d 0x%x\n", ((int *)event_zone)[0], ((int *)event_zone)[1]);
fe8ab488 508 lock_watch_table();
2d21ac55
A
509 for(ii=0; ii < MAX_WATCHERS; ii++) {
510 if (watcher_table[ii] == NULL) {
511 continue;
512 }
513
99c3a104
A
514 printf("add_fsevent: watcher %s %p: rd %4d wr %4d q_size %4d flags 0x%x\n",
515 watcher_table[ii]->proc_name,
516 watcher_table[ii],
517 watcher_table[ii]->rd, watcher_table[ii]->wr,
518 watcher_table[ii]->eventq_size, watcher_table[ii]->flags);
2d21ac55 519 }
fe8ab488 520 unlock_watch_table();
2d21ac55
A
521
522 last_print = current_tv;
523 if (junkptr) {
524 zfree(event_zone, junkptr);
525 }
526 }
527 }
528
529 if (pathbuff) {
530 release_pathbuff(pathbuff);
531 pathbuff = NULL;
532 }
2d21ac55
A
533 return ENOSPC;
534 }
535
2d21ac55
A
536 memset(kfse, 0, sizeof(kfs_event));
537 kfse->refcount = 1;
538 OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse->flags);
91447636 539
b0d623f7 540 last_event_ptr = kfse;
91447636 541 kfse->type = type;
2d21ac55 542 kfse->abstime = now;
91447636 543 kfse->pid = p->p_pid;
2d21ac55 544 if (type == FSE_RENAME || type == FSE_EXCHANGE) {
db609669
A
545 memset(kfse_dest, 0, sizeof(kfs_event));
546 kfse_dest->refcount = 1;
547 OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse_dest->flags);
2d21ac55
A
548 kfse_dest->type = type;
549 kfse_dest->pid = p->p_pid;
550 kfse_dest->abstime = now;
551
552 kfse->dest = kfse_dest;
553 }
554
555 num_events_outstanding++;
556 if (kfse->type == FSE_RENAME) {
557 num_pending_rename++;
558 }
559 LIST_INSERT_HEAD(&kfse_list_head, kfse, kevent_list);
91447636 560
2d21ac55
A
561 if (kfse->refcount < 1) {
562 panic("add_fsevent: line %d: kfse recount %d but should be at least 1\n", __LINE__, kfse->refcount);
563 }
564
565 unlock_fs_event_list(); // at this point it's safe to unlock
91447636
A
566
567 //
568 // now process the arguments passed in and copy them into
569 // the kfse
570 //
2d21ac55
A
571
572 cur = kfse;
22ba694c
A
573
574 if (type == FSE_DOCID_CREATED || type == FSE_DOCID_CHANGED) {
575 uint64_t val;
576
577 //
578 // These events are special and not like the other events. They only
579 // have a dev_t, src inode #, dest inode #, and a doc-id. We use the
580 // fields that we can in the kfse but have to overlay the dest inode
581 // number and the doc-id on the other fields.
582 //
583
584 // First the dev_t
585 arg_type = va_arg(ap, int32_t);
586 if (arg_type == FSE_ARG_DEV) {
587 cur->dev = (dev_t)(va_arg(ap, dev_t));
588 } else {
589 cur->dev = (dev_t)0xbadc0de1;
590 }
591
592 // next the source inode #
593 arg_type = va_arg(ap, int32_t);
594 if (arg_type == FSE_ARG_INO) {
595 cur->ino = (ino64_t)(va_arg(ap, ino64_t));
596 } else {
597 cur->ino = 0xbadc0de2;
598 }
599
600 // now the dest inode #
601 arg_type = va_arg(ap, int32_t);
602 if (arg_type == FSE_ARG_INO) {
603 val = (ino64_t)(va_arg(ap, ino64_t));
604 } else {
605 val = 0xbadc0de2;
606 }
607 // overlay the dest inode number on the str/dest pointer fields
608 memcpy(&cur->str, &val, sizeof(ino64_t));
609
610
611 // and last the document-id
612 arg_type = va_arg(ap, int32_t);
613 if (arg_type == FSE_ARG_INT32) {
614 val = (uint64_t)va_arg(ap, uint32_t);
615 } else if (arg_type == FSE_ARG_INT64) {
616 val = (uint64_t)va_arg(ap, uint64_t);
617 } else {
618 val = 0xbadc0de3;
619 }
620
621 // the docid is 64-bit and overlays the uid/gid fields
622 memcpy(&cur->uid, &val, sizeof(uint64_t));
623
624 goto done_with_args;
625 }
626
813fb2f6
A
627 if (type == FSE_UNMOUNT_PENDING) {
628
629 // Just a dev_t
630 arg_type = va_arg(ap, int32_t);
631 if (arg_type == FSE_ARG_DEV) {
632 cur->dev = (dev_t)(va_arg(ap, dev_t));
633 } else {
634 cur->dev = (dev_t)0xbadc0de1;
635 }
636
637 goto done_with_args;
638 }
639
2d21ac55 640 for(arg_type=va_arg(ap, int32_t); arg_type != FSE_ARG_DONE; arg_type=va_arg(ap, int32_t))
91447636 641
2d21ac55 642 switch(arg_type) {
91447636
A
643 case FSE_ARG_VNODE: {
644 // this expands out into multiple arguments to the client
645 struct vnode *vp;
646 struct vnode_attr va;
647
2d21ac55
A
648 if (kfse->str != NULL) {
649 cur = kfse_dest;
91447636
A
650 }
651
2d21ac55
A
652 vp = va_arg(ap, struct vnode *);
653 if (vp == NULL) {
654 panic("add_fsevent: you can't pass me a NULL vnode ptr (type %d)!\n",
655 cur->type);
91447636 656 }
2d21ac55 657
91447636
A
658 VATTR_INIT(&va);
659 VATTR_WANTED(&va, va_fsid);
660 VATTR_WANTED(&va, va_fileid);
661 VATTR_WANTED(&va, va_mode);
662 VATTR_WANTED(&va, va_uid);
663 VATTR_WANTED(&va, va_gid);
04b8595b 664 VATTR_WANTED(&va, va_nlink);
cf7d32b8 665 if ((ret = vnode_getattr(vp, &va, vfs_context_kernel())) != 0) {
2d21ac55
A
666 // printf("add_fsevent: failed to getattr on vp %p (%d)\n", cur->fref.vp, ret);
667 cur->str = NULL;
91447636
A
668 error = EINVAL;
669 goto clean_up;
670 }
671
2d21ac55 672 cur->dev = dev = (dev_t)va.va_fsid;
b0d623f7 673 cur->ino = (ino64_t)va.va_fileid;
2d21ac55
A
674 cur->mode = (int32_t)vnode_vttoif(vnode_vtype(vp)) | va.va_mode;
675 cur->uid = va.va_uid;
676 cur->gid = va.va_gid;
04b8595b
A
677 if (vp->v_flag & VISHARDLINK) {
678 cur->mode |= FSE_MODE_HLINK;
679 if ((vp->v_type == VDIR && va.va_dirlinkcount == 0) || (vp->v_type == VREG && va.va_nlink == 0)) {
680 cur->mode |= FSE_MODE_LAST_HLINK;
681 }
682 }
91447636 683
2d21ac55
A
684 // if we haven't gotten the path yet, get it.
685 if (pathbuff == NULL) {
686 pathbuff = get_pathbuff();
687 pathbuff_len = MAXPATHLEN;
688
689 pathbuff[0] = '\0';
b0d623f7 690 if ((ret = vn_getpath(vp, pathbuff, &pathbuff_len)) != 0 || pathbuff[0] == '\0') {
b0d623f7 691
b0d623f7
A
692 cur->flags |= KFSE_CONTAINS_DROPPED_EVENTS;
693
694 do {
695 if (vp->v_parent != NULL) {
696 vp = vp->v_parent;
697 } else if (vp->v_mount) {
698 strlcpy(pathbuff, vp->v_mount->mnt_vfsstat.f_mntonname, MAXPATHLEN);
699 break;
700 } else {
701 vp = NULL;
702 }
703
704 if (vp == NULL) {
705 break;
706 }
707
708 pathbuff_len = MAXPATHLEN;
709 ret = vn_getpath(vp, pathbuff, &pathbuff_len);
710 } while (ret == ENOSPC);
711
712 if (ret != 0 || vp == NULL) {
b0d623f7 713 error = ENOENT;
b0d623f7 714 goto clean_up;
2d21ac55 715 }
2d21ac55
A
716 }
717 }
91447636 718
2d21ac55
A
719 // store the path by adding it to the global string table
720 cur->len = pathbuff_len;
721 cur->str = vfs_addname(pathbuff, pathbuff_len, 0, 0);
722 if (cur->str == NULL || cur->str[0] == '\0') {
723 panic("add_fsevent: was not able to add path %s to event %p.\n", pathbuff, cur);
724 }
725
726 release_pathbuff(pathbuff);
727 pathbuff = NULL;
91447636 728
91447636
A
729 break;
730 }
731
732 case FSE_ARG_FINFO: {
733 fse_info *fse;
734
735 fse = va_arg(ap, fse_info *);
736
2d21ac55 737 cur->dev = dev = (dev_t)fse->dev;
b0d623f7 738 cur->ino = (ino64_t)fse->ino;
2d21ac55
A
739 cur->mode = (int32_t)fse->mode;
740 cur->uid = (uid_t)fse->uid;
741 cur->gid = (uid_t)fse->gid;
742 // if it's a hard-link and this is the last link, flag it
743 if ((fse->mode & FSE_MODE_HLINK) && fse->nlink == 0) {
744 cur->mode |= FSE_MODE_LAST_HLINK;
745 }
b0d623f7
A
746 if (cur->mode & FSE_TRUNCATED_PATH) {
747 cur->flags |= KFSE_CONTAINS_DROPPED_EVENTS;
748 cur->mode &= ~FSE_TRUNCATED_PATH;
749 }
91447636
A
750 break;
751 }
752
753 case FSE_ARG_STRING:
2d21ac55
A
754 if (kfse->str != NULL) {
755 cur = kfse_dest;
756 }
91447636 757
2d21ac55
A
758 cur->len = (int16_t)(va_arg(ap, int32_t) & 0x7fff);
759 if (cur->len >= 1) {
760 cur->str = vfs_addname(va_arg(ap, char *), cur->len, 0, 0);
761 } else {
762 printf("add_fsevent: funny looking string length: %d\n", (int)cur->len);
763 cur->len = 2;
764 cur->str = vfs_addname("/", cur->len, 0, 0);
765 }
766 if (cur->str[0] == 0) {
767 printf("add_fsevent: bogus looking string (len %d)\n", cur->len);
768 }
91447636
A
769 break;
770
22ba694c
A
771 case FSE_ARG_INT32: {
772 uint32_t ival = (uint32_t)va_arg(ap, int32_t);
773 kfse->uid = (ino64_t)ival;
774 break;
775 }
776
91447636 777 default:
2d21ac55 778 printf("add_fsevent: unknown type %d\n", arg_type);
91447636
A
779 // just skip one 32-bit word and hope we sync up...
780 (void)va_arg(ap, int32_t);
781 }
91447636 782
22ba694c 783done_with_args:
91447636
A
784 va_end(ap);
785
2d21ac55
A
786 OSBitAndAtomic16(~KFSE_BEING_CREATED, &kfse->flags);
787 if (kfse_dest) {
788 OSBitAndAtomic16(~KFSE_BEING_CREATED, &kfse_dest->flags);
789 }
790
91447636
A
791 //
792 // now we have to go and let everyone know that
2d21ac55 793 // is interested in this type of event
91447636 794 //
2d21ac55 795 lock_watch_table();
91447636 796
2d21ac55
A
797 for(i=0; i < MAX_WATCHERS; i++) {
798 watcher = watcher_table[i];
799 if (watcher == NULL) {
800 continue;
801 }
802
3e170ce0
A
803 if ( type < watcher->num_events
804 && watcher->event_list[type] == FSE_REPORT
2d21ac55
A
805 && watcher_cares_about_dev(watcher, dev)) {
806
807 if (watcher_add_event(watcher, kfse) != 0) {
808 watcher->num_dropped++;
db609669 809 continue;
91447636
A
810 }
811 }
2d21ac55 812
db609669
A
813 // if (kfse->refcount < 1) {
814 // panic("add_fsevent: line %d: kfse recount %d but should be at least 1\n", __LINE__, kfse->refcount);
815 // }
91447636
A
816 }
817
2d21ac55
A
818 unlock_watch_table();
819
91447636 820 clean_up:
2d21ac55
A
821
822 if (pathbuff) {
823 release_pathbuff(pathbuff);
824 pathbuff = NULL;
825 }
826
827 release_event_ref(kfse);
91447636 828
91447636
A
829 return error;
830}
831
2d21ac55 832
91447636 833static void
2d21ac55 834release_event_ref(kfs_event *kfse)
91447636 835{
2d21ac55
A
836 int old_refcount;
837 kfs_event copy, dest_copy;
91447636 838
91447636 839
b0d623f7 840 old_refcount = OSAddAtomic(-1, &kfse->refcount);
2d21ac55
A
841 if (old_refcount > 1) {
842 return;
843 }
844
845 lock_fs_event_list();
b0d623f7
A
846 if (last_event_ptr == kfse) {
847 last_event_ptr = NULL;
848 last_event_type = -1;
849 last_coalesced_time = 0;
850 }
851
2d21ac55
A
852 if (kfse->refcount < 0) {
853 panic("release_event_ref: bogus kfse refcount %d\n", kfse->refcount);
0c530ab8 854 }
91447636 855
2d21ac55
A
856 if (kfse->refcount > 0 || kfse->type == FSE_INVALID) {
857 // This is very subtle. Either of these conditions can
858 // be true if an event got recycled while we were waiting
859 // on the fs_event_list lock or the event got recycled,
860 // delivered, _and_ free'd by someone else while we were
861 // waiting on the fs event list lock. In either case
862 // we need to just unlock the list and return without
863 // doing anything because if the refcount is > 0 then
864 // someone else will take care of free'ing it and when
865 // the kfse->type is invalid then someone else already
866 // has handled free'ing the event (while we were blocked
867 // on the event list lock).
868 //
869 unlock_fs_event_list();
870 return;
871 }
872
873 //
91447636
A
874 // make a copy of this so we can free things without
875 // holding the fs_event_buf lock
876 //
2d21ac55 877 copy = *kfse;
813fb2f6
A
878 if (kfse->type != FSE_DOCID_CREATED && kfse->type != FSE_DOCID_CHANGED && kfse->dest && OSAddAtomic(-1, &kfse->dest->refcount) == 1) {
879 dest_copy = *kfse->dest;
2d21ac55
A
880 } else {
881 dest_copy.str = NULL;
882 dest_copy.len = 0;
883 dest_copy.type = FSE_INVALID;
884 }
885
886 kfse->pid = kfse->type; // save this off for debugging...
b0d623f7
A
887 kfse->uid = (uid_t)(long)kfse->str; // save this off for debugging...
888 kfse->gid = (gid_t)(long)current_thread();
2d21ac55
A
889
890 kfse->str = (char *)0xdeadbeef; // XXXdbg - catch any cheaters...
891
892 if (dest_copy.type != FSE_INVALID) {
893 kfse->dest->str = (char *)0xbadc0de; // XXXdbg - catch any cheaters...
894 kfse->dest->type = FSE_INVALID;
895
896 if (kfse->dest->kevent_list.le_prev != NULL) {
897 num_events_outstanding--;
898 LIST_REMOVE(kfse->dest, kevent_list);
899 memset(&kfse->dest->kevent_list, 0xa5, sizeof(kfse->dest->kevent_list));
900 }
901
902 zfree(event_zone, kfse->dest);
903 }
91447636 904
0c530ab8 905 // mark this fsevent as invalid
2d21ac55
A
906 {
907 int otype;
908
909 otype = kfse->type;
0c530ab8
A
910 kfse->type = FSE_INVALID;
911
2d21ac55
A
912 if (kfse->kevent_list.le_prev != NULL) {
913 num_events_outstanding--;
914 if (otype == FSE_RENAME) {
915 num_pending_rename--;
916 }
917 LIST_REMOVE(kfse, kevent_list);
918 memset(&kfse->kevent_list, 0, sizeof(kfse->kevent_list));
919 }
920 }
91447636 921
2d21ac55
A
922 zfree(event_zone, kfse);
923
924 unlock_fs_event_list();
925
926 // if we have a pointer in the union
813fb2f6 927 if (copy.str && copy.type != FSE_DOCID_CREATED && copy.type != FSE_DOCID_CHANGED) {
2d21ac55
A
928 if (copy.len == 0) { // and it's not a string
929 panic("%s:%d: no more fref.vp!\n", __FILE__, __LINE__);
930 // vnode_rele_ext(copy.fref.vp, O_EVTONLY, 0);
931 } else { // else it's a string
932 vfs_removename(copy.str);
91447636 933 }
2d21ac55 934 }
91447636 935
2d21ac55
A
936 if (dest_copy.type != FSE_INVALID && dest_copy.str) {
937 if (dest_copy.len == 0) {
938 panic("%s:%d: no more fref.vp!\n", __FILE__, __LINE__);
939 // vnode_rele_ext(dest_copy.fref.vp, O_EVTONLY, 0);
940 } else {
941 vfs_removename(dest_copy.str);
91447636
A
942 }
943 }
944}
945
91447636 946static int
316670eb 947add_watcher(int8_t *event_list, int32_t num_events, int32_t eventq_size, fs_event_watcher **watcher_out, void *fseh)
91447636
A
948{
949 int i;
950 fs_event_watcher *watcher;
951
316670eb
A
952 if (eventq_size <= 0 || eventq_size > 100*max_kfs_events) {
953 eventq_size = max_kfs_events;
91447636
A
954 }
955
956 // Note: the event_queue follows the fs_event_watcher struct
957 // in memory so we only have to do one allocation
958 MALLOC(watcher,
959 fs_event_watcher *,
960 sizeof(fs_event_watcher) + eventq_size * sizeof(kfs_event *),
961 M_TEMP, M_WAITOK);
2d21ac55
A
962 if (watcher == NULL) {
963 return ENOMEM;
964 }
91447636
A
965
966 watcher->event_list = event_list;
967 watcher->num_events = num_events;
b0d623f7 968 watcher->devices_not_to_watch = NULL;
91447636
A
969 watcher->num_devices = 0;
970 watcher->flags = 0;
971 watcher->event_queue = (kfs_event **)&watcher[1];
972 watcher->eventq_size = eventq_size;
973 watcher->rd = 0;
974 watcher->wr = 0;
975 watcher->blockers = 0;
0c530ab8 976 watcher->num_readers = 0;
b0d623f7 977 watcher->max_event_id = 0;
316670eb 978 watcher->fseh = fseh;
99c3a104
A
979 watcher->pid = proc_selfpid();
980 proc_selfname(watcher->proc_name, sizeof(watcher->proc_name));
2d21ac55
A
981
982 watcher->num_dropped = 0; // XXXdbg - debugging
91447636 983
99c3a104
A
984 if (!strncmp(watcher->proc_name, "fseventsd", sizeof(watcher->proc_name)) ||
985 !strncmp(watcher->proc_name, "coreservicesd", sizeof(watcher->proc_name)) ||
813fb2f6 986 !strncmp(watcher->proc_name, "revisiond", sizeof(watcher->proc_name)) ||
99c3a104
A
987 !strncmp(watcher->proc_name, "mds", sizeof(watcher->proc_name))) {
988 watcher->flags |= WATCHER_APPLE_SYSTEM_SERVICE;
db609669
A
989 } else {
990 printf("fsevents: watcher %s (pid: %d) - Using /dev/fsevents directly is unsupported. Migrate to FSEventsFramework\n",
991 watcher->proc_name, watcher->pid);
99c3a104
A
992 }
993
2d21ac55 994 lock_watch_table();
91447636 995
fe8ab488 996 // find a slot for the new watcher
2d21ac55
A
997 for(i=0; i < MAX_WATCHERS; i++) {
998 if (watcher_table[i] == NULL) {
999 watcher->my_id = i;
1000 watcher_table[i] = watcher;
1001 break;
1002 }
1003 }
1004
fe8ab488 1005 if (i >= MAX_WATCHERS) {
2d21ac55
A
1006 printf("fsevents: too many watchers!\n");
1007 unlock_watch_table();
fe8ab488 1008 FREE(watcher, M_TEMP);
2d21ac55
A
1009 return ENOSPC;
1010 }
91447636 1011
fe8ab488
A
1012 // now update the global list of who's interested in
1013 // events of a particular type...
1014 for(i=0; i < num_events; i++) {
1015 if (event_list[i] != FSE_IGNORE && i < FSE_MAX_EVENTS) {
1016 fs_event_type_watchers[i]++;
1017 }
1018 }
1019
2d21ac55 1020 unlock_watch_table();
91447636
A
1021
1022 *watcher_out = watcher;
1023
1024 return 0;
1025}
1026
2d21ac55
A
1027
1028
91447636
A
1029static void
1030remove_watcher(fs_event_watcher *target)
1031{
2d21ac55 1032 int i, j, counter=0;
91447636
A
1033 fs_event_watcher *watcher;
1034 kfs_event *kfse;
1035
2d21ac55 1036 lock_watch_table();
91447636 1037
2d21ac55
A
1038 for(j=0; j < MAX_WATCHERS; j++) {
1039 watcher = watcher_table[j];
1040 if (watcher != target) {
1041 continue;
1042 }
91447636 1043
2d21ac55
A
1044 watcher_table[j] = NULL;
1045
1046 for(i=0; i < watcher->num_events; i++) {
1047 if (watcher->event_list[i] != FSE_IGNORE && i < FSE_MAX_EVENTS) {
1048 fs_event_type_watchers[i]--;
91447636 1049 }
2d21ac55 1050 }
91447636 1051
2d21ac55
A
1052 if (watcher->flags & WATCHER_CLOSING) {
1053 unlock_watch_table();
1054 return;
1055 }
1056
1057 // printf("fsevents: removing watcher %p (rd %d wr %d num_readers %d flags 0x%x)\n", watcher, watcher->rd, watcher->wr, watcher->num_readers, watcher->flags);
1058 watcher->flags |= WATCHER_CLOSING;
b0d623f7 1059 OSAddAtomic(1, &watcher->num_readers);
2d21ac55
A
1060
1061 unlock_watch_table();
91447636 1062
2d21ac55 1063 while (watcher->num_readers > 1 && counter++ < 5000) {
99c3a104 1064 lock_watch_table();
2d21ac55 1065 fsevents_wakeup(watcher); // in case they're asleep
99c3a104 1066 unlock_watch_table();
2d21ac55
A
1067
1068 tsleep(watcher, PRIBIO, "fsevents-close", 1);
1069 }
1070 if (counter++ >= 5000) {
1071 // printf("fsevents: close: still have readers! (%d)\n", watcher->num_readers);
1072 panic("fsevents: close: still have readers! (%d)\n", watcher->num_readers);
1073 }
1074
1075 // drain the event_queue
2d21ac55 1076
db609669 1077 lck_rw_lock_exclusive(&event_handling_lock);
99c3a104 1078 while(watcher->rd != watcher->wr) {
2d21ac55 1079 kfse = watcher->event_queue[watcher->rd];
99c3a104 1080 watcher->event_queue[watcher->rd] = NULL;
2d21ac55 1081 watcher->rd = (watcher->rd+1) % watcher->eventq_size;
99c3a104 1082 OSSynchronizeIO();
db609669 1083 if (kfse != NULL && kfse->type != FSE_INVALID && kfse->refcount >= 1) {
2d21ac55 1084 release_event_ref(kfse);
91447636 1085 }
2d21ac55 1086 }
db609669 1087 lck_rw_unlock_exclusive(&event_handling_lock);
91447636 1088
2d21ac55
A
1089 if (watcher->event_list) {
1090 FREE(watcher->event_list, M_TEMP);
1091 watcher->event_list = NULL;
1092 }
b0d623f7
A
1093 if (watcher->devices_not_to_watch) {
1094 FREE(watcher->devices_not_to_watch, M_TEMP);
1095 watcher->devices_not_to_watch = NULL;
2d21ac55
A
1096 }
1097 FREE(watcher, M_TEMP);
1098
1099 return;
1100 }
1101
1102 unlock_watch_table();
1103}
1104
1105
1106#define EVENT_DELAY_IN_MS 10
1107static thread_call_t event_delivery_timer = NULL;
1108static int timer_set = 0;
1109
1110
1111static void
1112delayed_event_delivery(__unused void *param0, __unused void *param1)
1113{
1114 int i;
1115
1116 lock_watch_table();
1117
1118 for(i=0; i < MAX_WATCHERS; i++) {
1119 if (watcher_table[i] != NULL && watcher_table[i]->rd != watcher_table[i]->wr) {
1120 fsevents_wakeup(watcher_table[i]);
1121 }
1122 }
1123
1124 timer_set = 0;
1125
1126 unlock_watch_table();
1127}
1128
1129
1130//
1131// The watch table must be locked before calling this function.
1132//
1133static void
1134schedule_event_wakeup(void)
1135{
1136 uint64_t deadline;
1137
1138 if (event_delivery_timer == NULL) {
1139 event_delivery_timer = thread_call_allocate((thread_call_func_t)delayed_event_delivery, NULL);
1140 }
1141
1142 clock_interval_to_deadline(EVENT_DELAY_IN_MS, 1000 * 1000, &deadline);
1143
1144 thread_call_enter_delayed(event_delivery_timer, deadline);
1145 timer_set = 1;
1146}
1147
1148
1149
1150#define MAX_NUM_PENDING 16
1151
1152//
1153// NOTE: the watch table must be locked before calling
1154// this routine.
1155//
1156static int
1157watcher_add_event(fs_event_watcher *watcher, kfs_event *kfse)
1158{
b0d623f7
A
1159 if (kfse->abstime > watcher->max_event_id) {
1160 watcher->max_event_id = kfse->abstime;
1161 }
1162
2d21ac55
A
1163 if (((watcher->wr + 1) % watcher->eventq_size) == watcher->rd) {
1164 watcher->flags |= WATCHER_DROPPED_EVENTS;
1165 fsevents_wakeup(watcher);
1166 return ENOSPC;
1167 }
1168
b0d623f7 1169 OSAddAtomic(1, &kfse->refcount);
2d21ac55
A
1170 watcher->event_queue[watcher->wr] = kfse;
1171 OSSynchronizeIO();
1172 watcher->wr = (watcher->wr + 1) % watcher->eventq_size;
db609669 1173
2d21ac55
A
1174 //
1175 // wake up the watcher if there are more than MAX_NUM_PENDING events.
1176 // otherwise schedule a timer (if one isn't already set) which will
1177 // send any pending events if no more are received in the next
1178 // EVENT_DELAY_IN_MS milli-seconds.
1179 //
db609669
A
1180 int32_t num_pending = 0;
1181 if (watcher->rd < watcher->wr) {
1182 num_pending = watcher->wr - watcher->rd;
1183 }
1184
1185 if (watcher->rd > watcher->wr) {
1186 num_pending = watcher->wr + watcher->eventq_size - watcher->rd;
1187 }
1188
1189 if (num_pending > (watcher->eventq_size*3/4) && !(watcher->flags & WATCHER_APPLE_SYSTEM_SERVICE)) {
1190 /* Non-Apple Service is falling behind, start dropping events for this process */
1191 lck_rw_lock_exclusive(&event_handling_lock);
1192 while (watcher->rd != watcher->wr) {
1193 kfse = watcher->event_queue[watcher->rd];
1194 watcher->event_queue[watcher->rd] = NULL;
1195 watcher->rd = (watcher->rd+1) % watcher->eventq_size;
1196 OSSynchronizeIO();
1197 if (kfse != NULL && kfse->type != FSE_INVALID && kfse->refcount >= 1) {
1198 release_event_ref(kfse);
99c3a104 1199 }
db609669
A
1200 }
1201 watcher->flags |= WATCHER_DROPPED_EVENTS;
1202 lck_rw_unlock_exclusive(&event_handling_lock);
2d21ac55 1203
db609669
A
1204 printf("fsevents: watcher falling behind: %s (pid: %d) rd: %4d wr: %4d q_size: %4d flags: 0x%x\n",
1205 watcher->proc_name, watcher->pid, watcher->rd, watcher->wr,
1206 watcher->eventq_size, watcher->flags);
2d21ac55 1207
db609669
A
1208 fsevents_wakeup(watcher);
1209 } else if (num_pending > MAX_NUM_PENDING) {
1210 fsevents_wakeup(watcher);
1211 } else if (timer_set == 0) {
1212 schedule_event_wakeup();
1213 }
1214
1215 return 0;
2d21ac55
A
1216}
1217
2d21ac55
A
1218static int
1219fill_buff(uint16_t type, int32_t size, const void *data,
1220 char *buff, int32_t *_buff_idx, int32_t buff_sz,
1221 struct uio *uio)
1222{
1223 int32_t amt, error = 0, buff_idx = *_buff_idx;
1224 uint16_t tmp;
1225
1226 //
1227 // the +1 on the size is to guarantee that the main data
1228 // copy loop will always copy at least 1 byte
1229 //
1230 if ((buff_sz - buff_idx) <= (int)(2*sizeof(uint16_t) + 1)) {
1231 if (buff_idx > uio_resid(uio)) {
1232 error = ENOSPC;
1233 goto get_out;
1234 }
1235
1236 error = uiomove(buff, buff_idx, uio);
1237 if (error) {
1238 goto get_out;
1239 }
1240 buff_idx = 0;
1241 }
1242
1243 // copy out the header (type & size)
1244 memcpy(&buff[buff_idx], &type, sizeof(uint16_t));
1245 buff_idx += sizeof(uint16_t);
1246
1247 tmp = size & 0xffff;
1248 memcpy(&buff[buff_idx], &tmp, sizeof(uint16_t));
1249 buff_idx += sizeof(uint16_t);
1250
1251 // now copy the body of the data, flushing along the way
1252 // if the buffer fills up.
1253 //
1254 while(size > 0) {
1255 amt = (size < (buff_sz - buff_idx)) ? size : (buff_sz - buff_idx);
1256 memcpy(&buff[buff_idx], data, amt);
1257
1258 size -= amt;
1259 buff_idx += amt;
1260 data = (const char *)data + amt;
1261 if (size > (buff_sz - buff_idx)) {
1262 if (buff_idx > uio_resid(uio)) {
1263 error = ENOSPC;
1264 goto get_out;
91447636 1265 }
2d21ac55
A
1266 error = uiomove(buff, buff_idx, uio);
1267 if (error) {
1268 goto get_out;
91447636 1269 }
2d21ac55
A
1270 buff_idx = 0;
1271 }
1272
1273 if (amt == 0) { // just in case...
1274 break;
1275 }
1276 }
1277
1278 get_out:
1279 *_buff_idx = buff_idx;
1280
1281 return error;
1282}
1283
1284
1285static int copy_out_kfse(fs_event_watcher *watcher, kfs_event *kfse, struct uio *uio) __attribute__((noinline));
1286
1287static int
1288copy_out_kfse(fs_event_watcher *watcher, kfs_event *kfse, struct uio *uio)
1289{
1290 int error;
1291 uint16_t tmp16;
1292 int32_t type;
1293 kfs_event *cur;
1294 char evbuff[512];
1295 int evbuff_idx = 0;
1296
1297 if (kfse->type == FSE_INVALID) {
1298 panic("fsevents: copy_out_kfse: asked to copy out an invalid event (kfse %p, refcount %d fref ptr %p)\n", kfse, kfse->refcount, kfse->str);
1299 }
1300
1301 if (kfse->flags & KFSE_BEING_CREATED) {
1302 return 0;
1303 }
1304
1305 if (kfse->type == FSE_RENAME && kfse->dest == NULL) {
1306 //
1307 // This can happen if an event gets recycled but we had a
1308 // pointer to it in our event queue. The event is the
1309 // destination of a rename which we'll process separately
1310 // (that is, another kfse points to this one so it's ok
1311 // to skip this guy because we'll process it when we process
1312 // the other one)
1313 error = 0;
1314 goto get_out;
1315 }
1316
1317 if (watcher->flags & WATCHER_WANTS_EXTENDED_INFO) {
1318
1319 type = (kfse->type & 0xfff);
1320
1321 if (kfse->flags & KFSE_CONTAINS_DROPPED_EVENTS) {
1322 type |= (FSE_CONTAINS_DROPPED_EVENTS << FSE_FLAG_SHIFT);
1323 } else if (kfse->flags & KFSE_COMBINED_EVENTS) {
1324 type |= (FSE_COMBINED_EVENTS << FSE_FLAG_SHIFT);
1325 }
1326
1327 } else {
1328 type = (int32_t)kfse->type;
1329 }
1330
1331 // copy out the type of the event
1332 memcpy(evbuff, &type, sizeof(int32_t));
1333 evbuff_idx += sizeof(int32_t);
1334
1335 // copy out the pid of the person that generated the event
1336 memcpy(&evbuff[evbuff_idx], &kfse->pid, sizeof(pid_t));
1337 evbuff_idx += sizeof(pid_t);
1338
1339 cur = kfse;
1340
1341 copy_again:
1342
22ba694c
A
1343 if (kfse->type == FSE_DOCID_CHANGED || kfse->type == FSE_DOCID_CREATED) {
1344 dev_t dev = cur->dev;
813fb2f6 1345 ino64_t ino = cur->ino;
22ba694c
A
1346 uint64_t ival;
1347
1348 error = fill_buff(FSE_ARG_DEV, sizeof(dev_t), &dev, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1349 if (error != 0) {
1350 goto get_out;
1351 }
1352
813fb2f6 1353 error = fill_buff(FSE_ARG_INO, sizeof(ino64_t), &ino, evbuff, &evbuff_idx, sizeof(evbuff), uio);
22ba694c
A
1354 if (error != 0) {
1355 goto get_out;
1356 }
1357
813fb2f6
A
1358 memcpy(&ino, &cur->str, sizeof(ino64_t));
1359 error = fill_buff(FSE_ARG_INO, sizeof(ino64_t), &ino, evbuff, &evbuff_idx, sizeof(evbuff), uio);
22ba694c
A
1360 if (error != 0) {
1361 goto get_out;
1362 }
1363
1364 memcpy(&ival, &cur->uid, sizeof(uint64_t)); // the docid gets stuffed into the ino field
1365 error = fill_buff(FSE_ARG_INT64, sizeof(uint64_t), &ival, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1366 if (error != 0) {
1367 goto get_out;
1368 }
1369
1370 goto done;
1371 }
1372
813fb2f6
A
1373 if (kfse->type == FSE_UNMOUNT_PENDING) {
1374 dev_t dev = cur->dev;
1375
1376 error = fill_buff(FSE_ARG_DEV, sizeof(dev_t), &dev, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1377 if (error != 0) {
1378 goto get_out;
1379 }
1380
1381 goto done;
1382 }
1383
2d21ac55
A
1384 if (cur->str == NULL || cur->str[0] == '\0') {
1385 printf("copy_out_kfse:2: empty/short path (%s)\n", cur->str);
1386 error = fill_buff(FSE_ARG_STRING, 2, "/", evbuff, &evbuff_idx, sizeof(evbuff), uio);
1387 } else {
1388 error = fill_buff(FSE_ARG_STRING, cur->len, cur->str, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1389 }
1390 if (error != 0) {
1391 goto get_out;
1392 }
1393
1394 if (cur->dev == 0 && cur->ino == 0) {
1395 // this happens when a rename event happens and the
1396 // destination of the rename did not previously exist.
1397 // it thus has no other file info so skip copying out
1398 // the stuff below since it isn't initialized
1399 goto done;
1400 }
1401
1402
1403 if (watcher->flags & WATCHER_WANTS_COMPACT_EVENTS) {
1404 int32_t finfo_size;
1405
1406 finfo_size = sizeof(dev_t) + sizeof(ino64_t) + sizeof(int32_t) + sizeof(uid_t) + sizeof(gid_t);
1407 error = fill_buff(FSE_ARG_FINFO, finfo_size, &cur->ino, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1408 if (error != 0) {
1409 goto get_out;
1410 }
1411 } else {
2d21ac55
A
1412 error = fill_buff(FSE_ARG_DEV, sizeof(dev_t), &cur->dev, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1413 if (error != 0) {
1414 goto get_out;
1415 }
1416
813fb2f6 1417 error = fill_buff(FSE_ARG_INO, sizeof(ino64_t), &cur->ino, evbuff, &evbuff_idx, sizeof(evbuff), uio);
2d21ac55
A
1418 if (error != 0) {
1419 goto get_out;
1420 }
1421
1422 error = fill_buff(FSE_ARG_MODE, sizeof(int32_t), &cur->mode, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1423 if (error != 0) {
1424 goto get_out;
1425 }
1426
1427 error = fill_buff(FSE_ARG_UID, sizeof(uid_t), &cur->uid, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1428 if (error != 0) {
1429 goto get_out;
1430 }
91447636 1431
2d21ac55
A
1432 error = fill_buff(FSE_ARG_GID, sizeof(gid_t), &cur->gid, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1433 if (error != 0) {
1434 goto get_out;
91447636
A
1435 }
1436 }
1437
91447636 1438
2d21ac55
A
1439 if (cur->dest) {
1440 cur = cur->dest;
1441 goto copy_again;
1442 }
91447636 1443
2d21ac55
A
1444 done:
1445 // very last thing: the time stamp
1446 error = fill_buff(FSE_ARG_INT64, sizeof(uint64_t), &cur->abstime, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1447 if (error != 0) {
1448 goto get_out;
91447636
A
1449 }
1450
2d21ac55
A
1451 // check if the FSE_ARG_DONE will fit
1452 if (sizeof(uint16_t) > sizeof(evbuff) - evbuff_idx) {
1453 if (evbuff_idx > uio_resid(uio)) {
1454 error = ENOSPC;
1455 goto get_out;
1456 }
1457 error = uiomove(evbuff, evbuff_idx, uio);
1458 if (error) {
1459 goto get_out;
1460 }
1461 evbuff_idx = 0;
1462 }
91447636 1463
2d21ac55
A
1464 tmp16 = FSE_ARG_DONE;
1465 memcpy(&evbuff[evbuff_idx], &tmp16, sizeof(uint16_t));
1466 evbuff_idx += sizeof(uint16_t);
1467
1468 // flush any remaining data in the buffer (and hopefully
1469 // in most cases this is the only uiomove we'll do)
1470 if (evbuff_idx > uio_resid(uio)) {
1471 error = ENOSPC;
1472 } else {
1473 error = uiomove(evbuff, evbuff_idx, uio);
1474 }
1475
1476 get_out:
1477
1478 return error;
91447636
A
1479}
1480
1481
2d21ac55 1482
91447636
A
1483static int
1484fmod_watch(fs_event_watcher *watcher, struct uio *uio)
1485{
b0d623f7
A
1486 int error=0;
1487 user_ssize_t last_full_event_resid;
91447636 1488 kfs_event *kfse;
91447636 1489 uint16_t tmp16;
99c3a104 1490 int skipped;
91447636 1491
91447636
A
1492 last_full_event_resid = uio_resid(uio);
1493
1494 // need at least 2048 bytes of space (maxpathlen + 1 event buf)
1495 if (uio_resid(uio) < 2048 || watcher == NULL) {
1496 return EINVAL;
1497 }
1498
2d21ac55
A
1499 if (watcher->flags & WATCHER_CLOSING) {
1500 return 0;
1501 }
1502
b0d623f7 1503 if (OSAddAtomic(1, &watcher->num_readers) != 0) {
0c530ab8 1504 // don't allow multiple threads to read from the fd at the same time
b0d623f7 1505 OSAddAtomic(-1, &watcher->num_readers);
0c530ab8
A
1506 return EAGAIN;
1507 }
91447636 1508
99c3a104 1509 restart_watch:
91447636
A
1510 if (watcher->rd == watcher->wr) {
1511 if (watcher->flags & WATCHER_CLOSING) {
b0d623f7 1512 OSAddAtomic(-1, &watcher->num_readers);
91447636
A
1513 return 0;
1514 }
b0d623f7 1515 OSAddAtomic(1, &watcher->blockers);
91447636
A
1516
1517 // there's nothing to do, go to sleep
1518 error = tsleep((caddr_t)watcher, PUSER|PCATCH, "fsevents_empty", 0);
1519
b0d623f7 1520 OSAddAtomic(-1, &watcher->blockers);
91447636
A
1521
1522 if (error != 0 || (watcher->flags & WATCHER_CLOSING)) {
b0d623f7 1523 OSAddAtomic(-1, &watcher->num_readers);
91447636
A
1524 return error;
1525 }
1526 }
1527
1528 // if we dropped events, return that as an event first
1529 if (watcher->flags & WATCHER_DROPPED_EVENTS) {
1530 int32_t val = FSE_EVENTS_DROPPED;
1531
1532 error = uiomove((caddr_t)&val, sizeof(int32_t), uio);
1533 if (error == 0) {
1534 val = 0; // a fake pid
1535 error = uiomove((caddr_t)&val, sizeof(int32_t), uio);
1536
1537 tmp16 = FSE_ARG_DONE; // makes it a consistent msg
1538 error = uiomove((caddr_t)&tmp16, sizeof(int16_t), uio);
2d21ac55 1539
2d21ac55 1540 last_full_event_resid = uio_resid(uio);
91447636
A
1541 }
1542
1543 if (error) {
b0d623f7 1544 OSAddAtomic(-1, &watcher->num_readers);
91447636
A
1545 return error;
1546 }
1547
1548 watcher->flags &= ~WATCHER_DROPPED_EVENTS;
1549 }
1550
99c3a104 1551 skipped = 0;
db609669
A
1552
1553 lck_rw_lock_shared(&event_handling_lock);
2d21ac55
A
1554 while (uio_resid(uio) > 0 && watcher->rd != watcher->wr) {
1555 if (watcher->flags & WATCHER_CLOSING) {
1556 break;
91447636 1557 }
2d21ac55
A
1558
1559 //
1560 // check if the event is something of interest to us
1561 // (since it may have been recycled/reused and changed
1562 // its type or which device it is for)
1563 //
2d21ac55 1564 kfse = watcher->event_queue[watcher->rd];
3e170ce0 1565 if (!kfse || kfse->type == FSE_INVALID || kfse->type >= watcher->num_events || kfse->refcount < 1) {
99c3a104 1566 break;
91447636
A
1567 }
1568
2d21ac55
A
1569 if (watcher->event_list[kfse->type] == FSE_REPORT && watcher_cares_about_dev(watcher, kfse->dev)) {
1570
813fb2f6 1571 if (!(watcher->flags & WATCHER_APPLE_SYSTEM_SERVICE) && kfse->type != FSE_DOCID_CREATED && kfse->type != FSE_DOCID_CHANGED && is_ignored_directory(kfse->str)) {
99c3a104
A
1572 // If this is not an Apple System Service, skip specified directories
1573 // radar://12034844
1574 error = 0;
1575 skipped = 1;
1576 } else {
1577
1578 skipped = 0;
6d2010ae
A
1579 if (last_event_ptr == kfse) {
1580 last_event_ptr = NULL;
1581 last_event_type = -1;
1582 last_coalesced_time = 0;
1583 }
2d21ac55
A
1584 error = copy_out_kfse(watcher, kfse, uio);
1585 if (error != 0) {
1586 // if an event won't fit or encountered an error while
1587 // we were copying it out, then backup to the last full
1588 // event and just bail out. if the error was ENOENT
1589 // then we can continue regular processing, otherwise
1590 // we should unlock things and return.
1591 uio_setresid(uio, last_full_event_resid);
1592 if (error != ENOENT) {
1593 lck_rw_unlock_shared(&event_handling_lock);
1594 error = 0;
1595 goto get_out;
1596 }
91447636
A
1597 }
1598
2d21ac55 1599 last_full_event_resid = uio_resid(uio);
99c3a104 1600 }
91447636
A
1601 }
1602
db609669 1603 watcher->event_queue[watcher->rd] = NULL;
91447636 1604 watcher->rd = (watcher->rd + 1) % watcher->eventq_size;
2d21ac55 1605 OSSynchronizeIO();
2d21ac55 1606 release_event_ref(kfse);
99c3a104 1607 }
db609669 1608 lck_rw_unlock_shared(&event_handling_lock);
99c3a104
A
1609
1610 if (skipped && error == 0) {
1611 goto restart_watch;
91447636
A
1612 }
1613
1614 get_out:
b0d623f7 1615 OSAddAtomic(-1, &watcher->num_readers);
2d21ac55 1616
91447636
A
1617 return error;
1618}
1619
1620
91447636 1621//
813fb2f6
A
1622// Shoo watchers away from a volume that's about to be unmounted
1623// (so that it can be cleanly unmounted).
91447636
A
1624//
1625void
813fb2f6 1626fsevent_unmount(__unused struct mount *mp, __unused vfs_context_t ctx)
91447636 1627{
91447636
A
1628}
1629
1630
1631//
1632// /dev/fsevents device code
1633//
1634static int fsevents_installed = 0;
91447636
A
1635
1636typedef struct fsevent_handle {
0c530ab8
A
1637 UInt32 flags;
1638 SInt32 active;
91447636 1639 fs_event_watcher *watcher;
b0d623f7 1640 struct klist knotes;
91447636
A
1641 struct selinfo si;
1642} fsevent_handle;
1643
0c530ab8 1644#define FSEH_CLOSING 0x0001
91447636
A
1645
1646static int
1647fseventsf_read(struct fileproc *fp, struct uio *uio,
2d21ac55 1648 __unused int flags, __unused vfs_context_t ctx)
91447636
A
1649{
1650 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
1651 int error;
1652
1653 error = fmod_watch(fseh->watcher, uio);
1654
1655 return error;
1656}
1657
2d21ac55 1658
91447636
A
1659static int
1660fseventsf_write(__unused struct fileproc *fp, __unused struct uio *uio,
2d21ac55 1661 __unused int flags, __unused vfs_context_t ctx)
91447636
A
1662{
1663 return EIO;
1664}
1665
b0d623f7 1666#pragma pack(push, 4)
b0d623f7 1667typedef struct fsevent_dev_filter_args32 {
3e170ce0
A
1668 uint32_t num_devices;
1669 user32_addr_t devices;
b0d623f7 1670} fsevent_dev_filter_args32;
3e170ce0
A
1671typedef struct fsevent_dev_filter_args64 {
1672 uint32_t num_devices;
1673 user64_addr_t devices;
1674} fsevent_dev_filter_args64;
1675#pragma pack(pop)
1676
1677#define FSEVENTS_DEVICE_FILTER_32 _IOW('s', 100, fsevent_dev_filter_args32)
1678#define FSEVENTS_DEVICE_FILTER_64 _IOW('s', 100, fsevent_dev_filter_args64)
91447636
A
1679
1680static int
2d21ac55 1681fseventsf_ioctl(struct fileproc *fp, u_long cmd, caddr_t data, vfs_context_t ctx)
91447636
A
1682{
1683 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
1684 int ret = 0;
3e170ce0 1685 fsevent_dev_filter_args64 *devfilt_args, _devfilt_args;
91447636 1686
0c530ab8
A
1687 OSAddAtomic(1, &fseh->active);
1688 if (fseh->flags & FSEH_CLOSING) {
1689 OSAddAtomic(-1, &fseh->active);
1690 return 0;
1691 }
1692
91447636
A
1693 switch (cmd) {
1694 case FIONBIO:
1695 case FIOASYNC:
0c530ab8 1696 break;
91447636 1697
2d21ac55
A
1698 case FSEVENTS_WANT_COMPACT_EVENTS: {
1699 fseh->watcher->flags |= WATCHER_WANTS_COMPACT_EVENTS;
1700 break;
1701 }
1702
1703 case FSEVENTS_WANT_EXTENDED_INFO: {
1704 fseh->watcher->flags |= WATCHER_WANTS_EXTENDED_INFO;
1705 break;
1706 }
1707
b0d623f7
A
1708 case FSEVENTS_GET_CURRENT_ID: {
1709 *(uint64_t *)data = fseh->watcher->max_event_id;
1710 ret = 0;
1711 break;
1712 }
1713
3e170ce0
A
1714 case FSEVENTS_DEVICE_FILTER_32: {
1715 if (proc_is64bit(vfs_context_proc(ctx))) {
1716 ret = EINVAL;
1717 break;
1718 }
1719 fsevent_dev_filter_args32 *devfilt_args32 = (fsevent_dev_filter_args32 *)data;
1720
1721 devfilt_args = &_devfilt_args;
1722 memset(devfilt_args, 0, sizeof(fsevent_dev_filter_args64));
1723 devfilt_args->num_devices = devfilt_args32->num_devices;
1724 devfilt_args->devices = CAST_USER_ADDR_T(devfilt_args32->devices);
1725 goto handle_dev_filter;
1726 }
1727
1728 case FSEVENTS_DEVICE_FILTER_64:
1729 if (!proc_is64bit(vfs_context_proc(ctx))) {
1730 ret = EINVAL;
1731 break;
1732 }
1733 devfilt_args = (fsevent_dev_filter_args64 *)data;
1734
1735 handle_dev_filter:
1736 {
91447636 1737 int new_num_devices;
b0d623f7 1738 dev_t *devices_not_to_watch, *tmp=NULL;
91447636
A
1739
1740 if (devfilt_args->num_devices > 256) {
1741 ret = EINVAL;
1742 break;
1743 }
1744
1745 new_num_devices = devfilt_args->num_devices;
1746 if (new_num_devices == 0) {
2d21ac55 1747 lock_watch_table();
813fb2f6
A
1748
1749 tmp = fseh->watcher->devices_not_to_watch;
b0d623f7 1750 fseh->watcher->devices_not_to_watch = NULL;
91447636 1751 fseh->watcher->num_devices = new_num_devices;
91447636 1752
813fb2f6 1753 unlock_watch_table();
91447636
A
1754 if (tmp) {
1755 FREE(tmp, M_TEMP);
1756 }
1757 break;
1758 }
1759
b0d623f7 1760 MALLOC(devices_not_to_watch, dev_t *,
91447636
A
1761 new_num_devices * sizeof(dev_t),
1762 M_TEMP, M_WAITOK);
b0d623f7 1763 if (devices_not_to_watch == NULL) {
91447636
A
1764 ret = ENOMEM;
1765 break;
1766 }
1767
2d21ac55 1768 ret = copyin(devfilt_args->devices,
b0d623f7 1769 (void *)devices_not_to_watch,
91447636
A
1770 new_num_devices * sizeof(dev_t));
1771 if (ret) {
b0d623f7 1772 FREE(devices_not_to_watch, M_TEMP);
91447636
A
1773 break;
1774 }
1775
2d21ac55 1776 lock_watch_table();
91447636 1777 fseh->watcher->num_devices = new_num_devices;
b0d623f7
A
1778 tmp = fseh->watcher->devices_not_to_watch;
1779 fseh->watcher->devices_not_to_watch = devices_not_to_watch;
2d21ac55 1780 unlock_watch_table();
91447636
A
1781
1782 if (tmp) {
1783 FREE(tmp, M_TEMP);
1784 }
1785
1786 break;
1787 }
1788
813fb2f6
A
1789 case FSEVENTS_UNMOUNT_PENDING_ACK: {
1790 lock_watch_table();
1791 dev_t dev = *(dev_t *)data;
1792 if (fsevent_unmount_dev == dev) {
1793 if (--fsevent_unmount_ack_count <= 0) {
1794 fsevent_unmount_dev = 0;
1795 wakeup((caddr_t)&fsevent_unmount_dev);
1796 }
1797 } else {
1798 printf("unexpected unmount pending ack %d (%d)\n", dev, fsevent_unmount_dev);
1799 ret = EINVAL;
1800 }
1801 unlock_watch_table();
1802 break;
1803 }
1804
91447636
A
1805 default:
1806 ret = EINVAL;
1807 break;
1808 }
1809
0c530ab8 1810 OSAddAtomic(-1, &fseh->active);
91447636
A
1811 return (ret);
1812}
1813
1814
1815static int
2d21ac55 1816fseventsf_select(struct fileproc *fp, int which, __unused void *wql, vfs_context_t ctx)
91447636
A
1817{
1818 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
1819 int ready = 0;
1820
1821 if ((which != FREAD) || (fseh->watcher->flags & WATCHER_CLOSING)) {
1822 return 0;
1823 }
1824
1825
1826 // if there's nothing in the queue, we're not ready
2d21ac55 1827 if (fseh->watcher->rd != fseh->watcher->wr) {
91447636
A
1828 ready = 1;
1829 }
1830
1831 if (!ready) {
2d21ac55 1832 selrecord(vfs_context_proc(ctx), &fseh->si, wql);
91447636
A
1833 }
1834
1835 return ready;
1836}
1837
1838
2d21ac55 1839#if NOTUSED
91447636 1840static int
2d21ac55 1841fseventsf_stat(__unused struct fileproc *fp, __unused struct stat *sb, __unused vfs_context_t ctx)
91447636
A
1842{
1843 return ENOTSUP;
1844}
2d21ac55 1845#endif
91447636
A
1846
1847static int
2d21ac55 1848fseventsf_close(struct fileglob *fg, __unused vfs_context_t ctx)
91447636
A
1849{
1850 fsevent_handle *fseh = (struct fsevent_handle *)fg->fg_data;
0c530ab8 1851 fs_event_watcher *watcher;
2d21ac55 1852
0c530ab8
A
1853 OSBitOrAtomic(FSEH_CLOSING, &fseh->flags);
1854 while (OSAddAtomic(0, &fseh->active) > 0) {
1855 tsleep((caddr_t)fseh->watcher, PRIBIO, "fsevents-close", 1);
1856 }
91447636 1857
0c530ab8 1858 watcher = fseh->watcher;
0c530ab8 1859 fg->fg_data = NULL;
2d21ac55 1860 fseh->watcher = NULL;
0c530ab8
A
1861
1862 remove_watcher(watcher);
91447636
A
1863 FREE(fseh, M_TEMP);
1864
1865 return 0;
1866}
1867
b0d623f7
A
1868static void
1869filt_fsevent_detach(struct knote *kn)
1870{
1871 fsevent_handle *fseh = (struct fsevent_handle *)kn->kn_hook;
1872
1873 lock_watch_table();
1874
1875 KNOTE_DETACH(&fseh->knotes, kn);
1876
1877 unlock_watch_table();
1878}
1879
1880/*
1881 * Determine whether this knote should be active
1882 *
1883 * This is kind of subtle.
1884 * --First, notice if the vnode has been revoked: in so, override hint
1885 * --EVFILT_READ knotes are checked no matter what the hint is
1886 * --Other knotes activate based on hint.
1887 * --If hint is revoke, set special flags and activate
1888 */
1889static int
1890filt_fsevent(struct knote *kn, long hint)
1891{
1892 fsevent_handle *fseh = (struct fsevent_handle *)kn->kn_hook;
1893 int activate = 0;
1894 int32_t rd, wr, amt;
1895
1896 if (NOTE_REVOKE == hint) {
1897 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
1898 activate = 1;
1899 }
1900
1901 rd = fseh->watcher->rd;
1902 wr = fseh->watcher->wr;
1903 if (rd <= wr) {
1904 amt = wr - rd;
1905 } else {
1906 amt = fseh->watcher->eventq_size - (rd - wr);
1907 }
1908
1909 switch(kn->kn_filter) {
1910 case EVFILT_READ:
1911 kn->kn_data = amt;
1912
1913 if (kn->kn_data != 0) {
1914 activate = 1;
1915 }
1916 break;
1917 case EVFILT_VNODE:
1918 /* Check events this note matches against the hint */
1919 if (kn->kn_sfflags & hint) {
1920 kn->kn_fflags |= hint; /* Set which event occurred */
1921 }
1922 if (kn->kn_fflags != 0) {
1923 activate = 1;
1924 }
1925 break;
1926 default: {
1927 // nothing to do...
1928 break;
1929 }
1930 }
1931
1932 return (activate);
1933}
1934
1935
39037602
A
1936static int
1937filt_fsevent_touch(struct knote *kn, struct kevent_internal_s *kev)
1938{
1939 int res;
1940
1941 lock_watch_table();
1942
1943 /* accept new fflags/data as saved */
1944 kn->kn_sfflags = kev->fflags;
1945 kn->kn_sdata = kev->data;
1946 if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0)
1947 kn->kn_udata = kev->udata;
1948
1949 /* restrict the current results to the (smaller?) set of new interest */
1950 /*
1951 * For compatibility with previous implementations, we leave kn_fflags
1952 * as they were before.
1953 */
1954 //kn->kn_fflags &= kev->fflags;
1955
1956 /* determine if the filter is now fired */
1957 res = filt_fsevent(kn, 0);
1958
1959 unlock_watch_table();
1960
1961 return res;
1962}
1963
1964static int
1965filt_fsevent_process(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev)
1966{
1967#pragma unused(data)
1968 int res;
1969
1970 lock_watch_table();
1971
1972 res = filt_fsevent(kn, 0);
1973 if (res) {
1974 *kev = kn->kn_kevent;
1975 if (kev->flags & EV_CLEAR) {
1976 kn->kn_data = 0;
1977 kn->kn_fflags = 0;
1978 }
1979 }
1980
1981 unlock_watch_table();
1982 return res;
1983}
1984
b0d623f7
A
1985struct filterops fsevent_filtops = {
1986 .f_isfd = 1,
1987 .f_attach = NULL,
1988 .f_detach = filt_fsevent_detach,
39037602
A
1989 .f_event = filt_fsevent,
1990 .f_touch = filt_fsevent_touch,
1991 .f_process = filt_fsevent_process,
b0d623f7
A
1992};
1993
2d21ac55
A
1994static int
1995fseventsf_kqfilter(__unused struct fileproc *fp, __unused struct knote *kn, __unused vfs_context_t ctx)
91447636 1996{
b0d623f7 1997 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
39037602 1998 int res;
b0d623f7
A
1999
2000 kn->kn_hook = (void*)fseh;
2001 kn->kn_hookid = 1;
39037602
A
2002 kn->kn_filtid = EVFILTID_FSEVENT;
2003
b0d623f7
A
2004 lock_watch_table();
2005
2006 KNOTE_ATTACH(&fseh->knotes, kn);
2007
39037602
A
2008 /* check to see if it is fired already */
2009 res = filt_fsevent(kn, 0);
2010
b0d623f7 2011 unlock_watch_table();
39037602
A
2012
2013 return res;
91447636
A
2014}
2015
2016
2017static int
2d21ac55 2018fseventsf_drain(struct fileproc *fp, __unused vfs_context_t ctx)
91447636
A
2019{
2020 int counter = 0;
2021 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
2022
2023 fseh->watcher->flags |= WATCHER_CLOSING;
2024
2025 // if there are people still waiting, sleep for 10ms to
2026 // let them clean up and get out of there. however we
2027 // also don't want to get stuck forever so if they don't
2028 // exit after 5 seconds we're tearing things down anyway.
2029 while(fseh->watcher->blockers && counter++ < 500) {
2030 // issue wakeup in case anyone is blocked waiting for an event
2031 // do this each time we wakeup in case the blocker missed
2032 // the wakeup due to the unprotected test of WATCHER_CLOSING
2033 // and decision to tsleep in fmod_watch... this bit of
2034 // latency is a decent tradeoff against not having to
2035 // take and drop a lock in fmod_watch
99c3a104 2036 lock_watch_table();
2d21ac55 2037 fsevents_wakeup(fseh->watcher);
99c3a104 2038 unlock_watch_table();
91447636
A
2039
2040 tsleep((caddr_t)fseh->watcher, PRIBIO, "watcher-close", 1);
2041 }
2042
2043 return 0;
2044}
2045
2046
2047static int
2d21ac55 2048fseventsopen(__unused dev_t dev, __unused int flag, __unused int mode, __unused struct proc *p)
91447636 2049{
39236c6e 2050 if (!kauth_cred_issuser(kauth_cred_get())) {
91447636
A
2051 return EPERM;
2052 }
2053
2054 return 0;
2055}
2056
2057static int
2d21ac55 2058fseventsclose(__unused dev_t dev, __unused int flag, __unused int mode, __unused struct proc *p)
91447636
A
2059{
2060 return 0;
2061}
2062
2063static int
2d21ac55 2064fseventsread(__unused dev_t dev, __unused struct uio *uio, __unused int ioflag)
91447636
A
2065{
2066 return EIO;
2067}
2068
2d21ac55 2069
91447636 2070static int
2d21ac55 2071parse_buffer_and_add_events(const char *buffer, int bufsize, vfs_context_t ctx, long *remainder)
91447636 2072{
2d21ac55
A
2073 const fse_info *finfo, *dest_finfo;
2074 const char *path, *ptr, *dest_path, *event_start=buffer;
2075 int path_len, type, dest_path_len, err = 0;
2076
2077
2078 ptr = buffer;
2079 while ((ptr+sizeof(int)+sizeof(fse_info)+1) < buffer+bufsize) {
2080 type = *(const int *)ptr;
2081 if (type < 0 || type >= FSE_MAX_EVENTS) {
2082 err = EINVAL;
2083 break;
2084 }
2085
2086 ptr += sizeof(int);
2087
2088 finfo = (const fse_info *)ptr;
2089 ptr += sizeof(fse_info);
2090
2091 path = ptr;
2092 while(ptr < buffer+bufsize && *ptr != '\0') {
2093 ptr++;
2094 }
2095
2096 if (ptr >= buffer+bufsize) {
2097 break;
2098 }
2099
2100 ptr++; // advance over the trailing '\0'
2101
2102 path_len = ptr - path;
2103
2104 if (type != FSE_RENAME && type != FSE_EXCHANGE) {
2105 event_start = ptr; // record where the next event starts
2106
2107 err = add_fsevent(type, ctx, FSE_ARG_STRING, path_len, path, FSE_ARG_FINFO, finfo, FSE_ARG_DONE);
2108 if (err) {
2109 break;
2110 }
2111 continue;
2112 }
2113
2114 //
2115 // if we're here we have to slurp up the destination finfo
2116 // and path so that we can pass them to the add_fsevent()
2117 // call. basically it's a copy of the above code.
2118 //
2119 dest_finfo = (const fse_info *)ptr;
2120 ptr += sizeof(fse_info);
2121
2122 dest_path = ptr;
2123 while(ptr < buffer+bufsize && *ptr != '\0') {
2124 ptr++;
2125 }
2126
2127 if (ptr >= buffer+bufsize) {
2128 break;
2129 }
2130
2131 ptr++; // advance over the trailing '\0'
2132 event_start = ptr; // record where the next event starts
2133
2134 dest_path_len = ptr - dest_path;
b0d623f7
A
2135 //
2136 // If the destination inode number is non-zero, generate a rename
2137 // with both source and destination FSE_ARG_FINFO. Otherwise generate
2138 // a rename with only one FSE_ARG_FINFO. If you need to inject an
2139 // exchange with an inode of zero, just make that inode (and its path)
2140 // come in as the first one, not the second.
2141 //
2142 if (dest_finfo->ino) {
2143 err = add_fsevent(type, ctx,
2144 FSE_ARG_STRING, path_len, path, FSE_ARG_FINFO, finfo,
2145 FSE_ARG_STRING, dest_path_len, dest_path, FSE_ARG_FINFO, dest_finfo,
2146 FSE_ARG_DONE);
2147 } else {
2148 err = add_fsevent(type, ctx,
2149 FSE_ARG_STRING, path_len, path, FSE_ARG_FINFO, finfo,
2150 FSE_ARG_STRING, dest_path_len, dest_path,
2151 FSE_ARG_DONE);
2152 }
2153
2d21ac55
A
2154 if (err) {
2155 break;
2156 }
2157
2158 }
2159
2160 // if the last event wasn't complete, set the remainder
2161 // to be the last event start boundary.
2162 //
2163 *remainder = (long)((buffer+bufsize) - event_start);
2164
2165 return err;
2166}
2167
2168
2169//
2170// Note: this buffer size can not ever be less than
2171// 2*MAXPATHLEN + 2*sizeof(fse_info) + sizeof(int)
2172// because that is the max size for a single event.
2173// I made it 4k to be a "nice" size. making it
2174// smaller is not a good idea.
2175//
2176#define WRITE_BUFFER_SIZE 4096
2177char *write_buffer=NULL;
2178
2179static int
2180fseventswrite(__unused dev_t dev, struct uio *uio, __unused int ioflag)
2181{
2182 int error=0, count;
2183 vfs_context_t ctx = vfs_context_current();
2184 long offset=0, remainder;
2185
2186 lck_mtx_lock(&event_writer_lock);
2187
2188 if (write_buffer == NULL) {
3e170ce0 2189 if (kmem_alloc(kernel_map, (vm_offset_t *)&write_buffer, WRITE_BUFFER_SIZE, VM_KERN_MEMORY_FILE)) {
2d21ac55
A
2190 lck_mtx_unlock(&event_writer_lock);
2191 return ENOMEM;
2192 }
2193 }
2194
2195 //
2196 // this loop copies in and processes the events written.
2197 // it takes care to copy in reasonable size chunks and
2198 // process them. if there is an event that spans a chunk
2199 // boundary we're careful to copy those bytes down to the
2200 // beginning of the buffer and read the next chunk in just
2201 // after it.
2202 //
2203 while(uio_resid(uio)) {
2204 if (uio_resid(uio) > (WRITE_BUFFER_SIZE-offset)) {
2205 count = WRITE_BUFFER_SIZE - offset;
2206 } else {
2207 count = uio_resid(uio);
2208 }
2209
2210 error = uiomove(write_buffer+offset, count, uio);
2211 if (error) {
2212 break;
2213 }
2214
2215 // printf("fsevents: write: copied in %d bytes (offset: %ld)\n", count, offset);
2216 error = parse_buffer_and_add_events(write_buffer, offset+count, ctx, &remainder);
2217 if (error) {
2218 break;
2219 }
2220
2221 //
2222 // if there's any remainder, copy it down to the beginning
2223 // of the buffer so that it will get processed the next time
2224 // through the loop. note that the remainder always starts
2225 // at an event boundary.
2226 //
2227 if (remainder != 0) {
2228 // printf("fsevents: write: an event spanned a %d byte boundary. remainder: %ld\n",
2229 // WRITE_BUFFER_SIZE, remainder);
2230 memmove(write_buffer, (write_buffer+count+offset) - remainder, remainder);
2231 offset = remainder;
2232 } else {
2233 offset = 0;
2234 }
2235 }
2236
2237 lck_mtx_unlock(&event_writer_lock);
2238
2239 return error;
91447636
A
2240}
2241
2242
39236c6e 2243static const struct fileops fsevents_fops = {
39037602
A
2244 .fo_type = DTYPE_FSEVENTS,
2245 .fo_read = fseventsf_read,
2246 .fo_write = fseventsf_write,
2247 .fo_ioctl = fseventsf_ioctl,
2248 .fo_select = fseventsf_select,
2249 .fo_close = fseventsf_close,
2250 .fo_kqfilter = fseventsf_kqfilter,
2251 .fo_drain = fseventsf_drain,
91447636
A
2252};
2253
3e170ce0
A
2254typedef struct fsevent_clone_args32 {
2255 user32_addr_t event_list;
2256 int32_t num_events;
2257 int32_t event_queue_depth;
2258 user32_addr_t fd;
2259} fsevent_clone_args32;
2d21ac55 2260
3e170ce0
A
2261typedef struct fsevent_clone_args64 {
2262 user64_addr_t event_list;
2263 int32_t num_events;
2264 int32_t event_queue_depth;
2265 user64_addr_t fd;
2266} fsevent_clone_args64;
91447636 2267
3e170ce0
A
2268#define FSEVENTS_CLONE_32 _IOW('s', 1, fsevent_clone_args32)
2269#define FSEVENTS_CLONE_64 _IOW('s', 1, fsevent_clone_args64)
91447636
A
2270
2271static int
2d21ac55 2272fseventsioctl(__unused dev_t dev, u_long cmd, caddr_t data, __unused int flag, struct proc *p)
91447636
A
2273{
2274 struct fileproc *f;
2275 int fd, error;
2276 fsevent_handle *fseh = NULL;
3e170ce0 2277 fsevent_clone_args64 *fse_clone_args, _fse_clone;
91447636 2278 int8_t *event_list;
2d21ac55 2279 int is64bit = proc_is64bit(p);
91447636
A
2280
2281 switch (cmd) {
3e170ce0
A
2282 case FSEVENTS_CLONE_32: {
2283 if (is64bit) {
2284 return EINVAL;
2285 }
2286 fsevent_clone_args32 *args32 = (fsevent_clone_args32 *)data;
2d21ac55
A
2287
2288 fse_clone_args = &_fse_clone;
3e170ce0 2289 memset(fse_clone_args, 0, sizeof(fsevent_clone_args64));
2d21ac55 2290
3e170ce0
A
2291 fse_clone_args->event_list = CAST_USER_ADDR_T(args32->event_list);
2292 fse_clone_args->num_events = args32->num_events;
2293 fse_clone_args->event_queue_depth = args32->event_queue_depth;
2294 fse_clone_args->fd = CAST_USER_ADDR_T(args32->fd);
2d21ac55
A
2295 goto handle_clone;
2296 }
2d21ac55 2297
3e170ce0
A
2298 case FSEVENTS_CLONE_64:
2299 if (!is64bit) {
2300 return EINVAL;
2d21ac55 2301 }
3e170ce0 2302 fse_clone_args = (fsevent_clone_args64 *)data;
2d21ac55
A
2303
2304 handle_clone:
91447636
A
2305 if (fse_clone_args->num_events < 0 || fse_clone_args->num_events > 4096) {
2306 return EINVAL;
2307 }
2308
2309 MALLOC(fseh, fsevent_handle *, sizeof(fsevent_handle),
2310 M_TEMP, M_WAITOK);
2d21ac55
A
2311 if (fseh == NULL) {
2312 return ENOMEM;
2313 }
91447636 2314 memset(fseh, 0, sizeof(fsevent_handle));
b0d623f7
A
2315
2316 klist_init(&fseh->knotes);
91447636
A
2317
2318 MALLOC(event_list, int8_t *,
2319 fse_clone_args->num_events * sizeof(int8_t),
2320 M_TEMP, M_WAITOK);
2d21ac55
A
2321 if (event_list == NULL) {
2322 FREE(fseh, M_TEMP);
2323 return ENOMEM;
2324 }
91447636 2325
2d21ac55 2326 error = copyin(fse_clone_args->event_list,
91447636
A
2327 (void *)event_list,
2328 fse_clone_args->num_events * sizeof(int8_t));
2329 if (error) {
2330 FREE(event_list, M_TEMP);
2331 FREE(fseh, M_TEMP);
2332 return error;
2333 }
2334
2335 error = add_watcher(event_list,
2336 fse_clone_args->num_events,
2337 fse_clone_args->event_queue_depth,
316670eb
A
2338 &fseh->watcher,
2339 fseh);
91447636
A
2340 if (error) {
2341 FREE(event_list, M_TEMP);
2342 FREE(fseh, M_TEMP);
2343 return error;
2344 }
2345
2d21ac55
A
2346 fseh->watcher->fseh = fseh;
2347
2348 error = falloc(p, &f, &fd, vfs_context_current());
91447636 2349 if (error) {
fe8ab488 2350 remove_watcher(fseh->watcher);
91447636
A
2351 FREE(event_list, M_TEMP);
2352 FREE(fseh, M_TEMP);
2353 return (error);
2354 }
2355 proc_fdlock(p);
2356 f->f_fglob->fg_flag = FREAD | FWRITE;
91447636
A
2357 f->f_fglob->fg_ops = &fsevents_fops;
2358 f->f_fglob->fg_data = (caddr_t) fseh;
2d21ac55
A
2359 proc_fdunlock(p);
2360 error = copyout((void *)&fd, fse_clone_args->fd, sizeof(int32_t));
2361 if (error != 0) {
2362 fp_free(p, fd, f);
2363 } else {
91447636 2364 proc_fdlock(p);
6601e61a 2365 procfdtbl_releasefd(p, fd, NULL);
91447636
A
2366 fp_drop(p, fd, f, 1);
2367 proc_fdunlock(p);
2d21ac55 2368 }
91447636
A
2369 break;
2370
2371 default:
2372 error = EINVAL;
2373 break;
2374 }
2375
2376 return error;
2377}
2378
91447636 2379static void
2d21ac55 2380fsevents_wakeup(fs_event_watcher *watcher)
91447636 2381{
2d21ac55 2382 selwakeup(&watcher->fseh->si);
b0d623f7
A
2383 KNOTE(&watcher->fseh->knotes, NOTE_WRITE|NOTE_NONE);
2384 wakeup((caddr_t)watcher);
91447636
A
2385}
2386
2387
2388/*
2389 * A struct describing which functions will get invoked for certain
2390 * actions.
2391 */
2392static struct cdevsw fsevents_cdevsw =
2393{
2394 fseventsopen, /* open */
2395 fseventsclose, /* close */
2396 fseventsread, /* read */
2397 fseventswrite, /* write */
2398 fseventsioctl, /* ioctl */
2d21ac55
A
2399 (stop_fcn_t *)&nulldev, /* stop */
2400 (reset_fcn_t *)&nulldev, /* reset */
91447636
A
2401 NULL, /* tty's */
2402 eno_select, /* select */
2403 eno_mmap, /* mmap */
2404 eno_strat, /* strategy */
2405 eno_getc, /* getc */
2406 eno_putc, /* putc */
2407 0 /* type */
2408};
2409
2410
2411/*
2412 * Called to initialize our device,
2413 * and to register ourselves with devfs
2414 */
2415
2416void
2417fsevents_init(void)
2418{
2419 int ret;
2420
2421 if (fsevents_installed) {
2422 return;
2423 }
2424
2425 fsevents_installed = 1;
2426
91447636
A
2427 ret = cdevsw_add(-1, &fsevents_cdevsw);
2428 if (ret < 0) {
2429 fsevents_installed = 0;
2430 return;
2431 }
2432
2433 devfs_make_node(makedev (ret, 0), DEVFS_CHAR,
2434 UID_ROOT, GID_WHEEL, 0644, "fsevents", 0);
2435
2436 fsevents_internal_init();
2437}
2438
2439
91447636
A
2440char *
2441get_pathbuff(void)
2442{
b0d623f7 2443 char *path;
91447636 2444
b0d623f7
A
2445 MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
2446 return path;
91447636
A
2447}
2448
2449void
2450release_pathbuff(char *path)
2451{
91447636
A
2452
2453 if (path == NULL) {
2454 return;
2455 }
91447636
A
2456 FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
2457}
2458
2459int
cf7d32b8 2460get_fse_info(struct vnode *vp, fse_info *fse, __unused vfs_context_t ctx)
91447636
A
2461{
2462 struct vnode_attr va;
2463
2464 VATTR_INIT(&va);
2465 VATTR_WANTED(&va, va_fsid);
2466 VATTR_WANTED(&va, va_fileid);
2467 VATTR_WANTED(&va, va_mode);
2468 VATTR_WANTED(&va, va_uid);
2469 VATTR_WANTED(&va, va_gid);
2d21ac55
A
2470 if (vp->v_flag & VISHARDLINK) {
2471 if (vp->v_type == VDIR) {
2472 VATTR_WANTED(&va, va_dirlinkcount);
2473 } else {
2474 VATTR_WANTED(&va, va_nlink);
2475 }
2476 }
2477
cf7d32b8 2478 if (vnode_getattr(vp, &va, vfs_context_kernel()) != 0) {
2d21ac55 2479 memset(fse, 0, sizeof(fse_info));
91447636
A
2480 return -1;
2481 }
6d2010ae
A
2482
2483 return vnode_get_fse_info_from_vap(vp, fse, &va);
2484}
2485
2486int
2487vnode_get_fse_info_from_vap(vnode_t vp, fse_info *fse, struct vnode_attr *vap)
2488{
2489 fse->ino = (ino64_t)vap->va_fileid;
2490 fse->dev = (dev_t)vap->va_fsid;
2491 fse->mode = (int32_t)vnode_vttoif(vnode_vtype(vp)) | vap->va_mode;
2492 fse->uid = (uid_t)vap->va_uid;
2493 fse->gid = (gid_t)vap->va_gid;
2d21ac55
A
2494 if (vp->v_flag & VISHARDLINK) {
2495 fse->mode |= FSE_MODE_HLINK;
2496 if (vp->v_type == VDIR) {
6d2010ae 2497 fse->nlink = (uint64_t)vap->va_dirlinkcount;
2d21ac55 2498 } else {
6d2010ae 2499 fse->nlink = (uint64_t)vap->va_nlink;
2d21ac55
A
2500 }
2501 }
2502
91447636
A
2503 return 0;
2504}
2d21ac55 2505
b0d623f7
A
2506void
2507create_fsevent_from_kevent(vnode_t vp, uint32_t kevents, struct vnode_attr *vap)
2508{
2509 int fsevent_type=FSE_CONTENT_MODIFIED, len; // the default is the most pessimistic
2510 char pathbuf[MAXPATHLEN];
2511 fse_info fse;
2512
2513
2514 if (kevents & VNODE_EVENT_DELETE) {
2515 fsevent_type = FSE_DELETE;
2516 } else if (kevents & (VNODE_EVENT_EXTEND|VNODE_EVENT_WRITE)) {
2517 fsevent_type = FSE_CONTENT_MODIFIED;
2518 } else if (kevents & VNODE_EVENT_LINK) {
2519 fsevent_type = FSE_CREATE_FILE;
2520 } else if (kevents & VNODE_EVENT_RENAME) {
2521 fsevent_type = FSE_CREATE_FILE; // XXXdbg - should use FSE_RENAME but we don't have the destination info;
2522 } else if (kevents & (VNODE_EVENT_FILE_CREATED|VNODE_EVENT_FILE_REMOVED|VNODE_EVENT_DIR_CREATED|VNODE_EVENT_DIR_REMOVED)) {
2523 fsevent_type = FSE_STAT_CHANGED; // XXXdbg - because vp is a dir and the thing created/removed lived inside it
2524 } else { // a catch all for VNODE_EVENT_PERMS, VNODE_EVENT_ATTRIB and anything else
2525 fsevent_type = FSE_STAT_CHANGED;
2526 }
2527
2528 // printf("convert_kevent: kevents 0x%x fsevent type 0x%x (for %s)\n", kevents, fsevent_type, vp->v_name ? vp->v_name : "(no-name)");
2529
2530 fse.dev = vap->va_fsid;
2531 fse.ino = vap->va_fileid;
2532 fse.mode = vnode_vttoif(vnode_vtype(vp)) | (uint32_t)vap->va_mode;
2533 if (vp->v_flag & VISHARDLINK) {
2534 fse.mode |= FSE_MODE_HLINK;
2535 if (vp->v_type == VDIR) {
2536 fse.nlink = vap->va_dirlinkcount;
2537 } else {
2538 fse.nlink = vap->va_nlink;
2539 }
2540 }
2541
2542 if (vp->v_type == VDIR) {
2543 fse.mode |= FSE_REMOTE_DIR_EVENT;
2544 }
2545
2546
2547 fse.uid = vap->va_uid;
2548 fse.gid = vap->va_gid;
2549
2550 len = sizeof(pathbuf);
2551 if (vn_getpath(vp, pathbuf, &len) == 0) {
2552 add_fsevent(fsevent_type, vfs_context_current(), FSE_ARG_STRING, len, pathbuf, FSE_ARG_FINFO, &fse, FSE_ARG_DONE);
2553 }
2554 return;
2555}
2556
2d21ac55 2557#else /* CONFIG_FSE */
39037602
A
2558
2559#include <sys/fsevents.h>
2560
2d21ac55
A
2561/*
2562 * The get_pathbuff and release_pathbuff routines are used in places not
2563 * related to fsevents, and it's a handy abstraction, so define trivial
2564 * versions that don't cache a pool of buffers. This way, we don't have
2565 * to conditionalize the callers, and they still get the advantage of the
2566 * pool of buffers if CONFIG_FSE is turned on.
2567 */
2568char *
2569get_pathbuff(void)
2570{
2571 char *path;
2572 MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
2573 return path;
2574}
2575
2576void
2577release_pathbuff(char *path)
2578{
2579 FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
2580}
39037602
A
2581
2582int
2583add_fsevent(__unused int type, __unused vfs_context_t ctx, ...)
2584{
2585 return 0;
2586}
2587
2588int need_fsevent(__unused int type, __unused vnode_t vp)
2589{
2590 return 0;
2591}
2592
2d21ac55 2593#endif /* CONFIG_FSE */