]> git.saurik.com Git - apple/xnu.git/blame - bsd/vfs/vfs_fsevents.c
xnu-1699.32.7.tar.gz
[apple/xnu.git] / bsd / vfs / vfs_fsevents.c
CommitLineData
91447636 1/*
b0d623f7 2 * Copyright (c) 2004-2008 Apple Inc. All rights reserved.
5d5c5d0d 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
91447636 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
91447636
A
27 */
28#include <stdarg.h>
29#include <sys/param.h>
30#include <sys/systm.h>
b0d623f7 31#include <sys/event.h> // for kqueue related stuff
2d21ac55
A
32#include <sys/fsevents.h>
33
34#if CONFIG_FSE
91447636
A
35#include <sys/namei.h>
36#include <sys/filedesc.h>
37#include <sys/kernel.h>
38#include <sys/file_internal.h>
39#include <sys/stat.h>
40#include <sys/vnode_internal.h>
41#include <sys/mount_internal.h>
42#include <sys/proc_internal.h>
43#include <sys/kauth.h>
44#include <sys/uio.h>
45#include <sys/malloc.h>
46#include <sys/dirent.h>
47#include <sys/attr.h>
48#include <sys/sysctl.h>
49#include <sys/ubc.h>
50#include <machine/cons.h>
51#include <miscfs/specfs/specdev.h>
52#include <miscfs/devfs/devfs.h>
53#include <sys/filio.h>
91447636
A
54#include <kern/locks.h>
55#include <libkern/OSAtomic.h>
2d21ac55
A
56#include <kern/zalloc.h>
57#include <mach/mach_time.h>
58#include <kern/thread_call.h>
59#include <kern/clock.h>
91447636 60
b0d623f7 61#include <security/audit/audit.h>
91447636
A
62#include <bsm/audit_kevents.h>
63
91447636
A
64
65
91447636 66typedef struct kfs_event {
2d21ac55
A
67 LIST_ENTRY(kfs_event) kevent_list;
68 int16_t type; // type code of this event
69 u_int16_t flags, // per-event flags
70 len; // the length of the path in "str"
71 int32_t refcount; // number of clients referencing this
72 pid_t pid; // pid of the process that did the op
73
74 uint64_t abstime; // when this event happened (mach_absolute_time())
75 ino64_t ino;
76 dev_t dev;
77 int32_t mode;
78 uid_t uid;
79 gid_t gid;
80
81 const char *str;
82
83 struct kfs_event *dest; // if this is a two-file op
91447636
A
84} kfs_event;
85
2d21ac55
A
86// flags for the flags field
87#define KFSE_COMBINED_EVENTS 0x0001
88#define KFSE_CONTAINS_DROPPED_EVENTS 0x0002
89#define KFSE_RECYCLED_EVENT 0x0004
90#define KFSE_BEING_CREATED 0x0008
91
92LIST_HEAD(kfse_list, kfs_event) kfse_list_head = LIST_HEAD_INITIALIZER(x);
93int num_events_outstanding = 0;
94int num_pending_rename = 0;
95
96
97struct fsevent_handle;
91447636
A
98
99typedef struct fs_event_watcher {
91447636
A
100 int8_t *event_list; // the events we're interested in
101 int32_t num_events;
b0d623f7 102 dev_t *devices_not_to_watch; // report events from devices not in this list
91447636
A
103 uint32_t num_devices;
104 int32_t flags;
105 kfs_event **event_queue;
106 int32_t eventq_size; // number of event pointers in queue
0c530ab8 107 int32_t num_readers;
2d21ac55
A
108 int32_t rd; // read index into the event_queue
109 int32_t wr; // write index into the event_queue
110 int32_t blockers;
111 int32_t my_id;
112 uint32_t num_dropped;
b0d623f7 113 uint64_t max_event_id;
2d21ac55 114 struct fsevent_handle *fseh;
91447636
A
115} fs_event_watcher;
116
117// fs_event_watcher flags
2d21ac55
A
118#define WATCHER_DROPPED_EVENTS 0x0001
119#define WATCHER_CLOSING 0x0002
120#define WATCHER_WANTS_COMPACT_EVENTS 0x0004
121#define WATCHER_WANTS_EXTENDED_INFO 0x0008
122
91447636 123
2d21ac55
A
124#define MAX_WATCHERS 8
125static fs_event_watcher *watcher_table[MAX_WATCHERS];
91447636
A
126
127
2d21ac55 128#define MAX_KFS_EVENTS 4096
91447636 129
2d21ac55
A
130// we allocate kfs_event structures out of this zone
131static zone_t event_zone;
91447636
A
132static int fs_event_init = 0;
133
134//
135// this array records whether anyone is interested in a
136// particular type of event. if no one is, we bail out
137// early from the event delivery
138//
139static int16_t fs_event_type_watchers[FSE_MAX_EVENTS];
140
141static int watcher_add_event(fs_event_watcher *watcher, kfs_event *kfse);
2d21ac55 142static void fsevents_wakeup(fs_event_watcher *watcher);
91447636
A
143
144//
145// Locks
146//
147static lck_grp_attr_t * fsevent_group_attr;
148static lck_attr_t * fsevent_lock_attr;
149static lck_grp_t * fsevent_mutex_group;
150
151static lck_grp_t * fsevent_rw_group;
152
2d21ac55
A
153static lck_rw_t event_handling_lock; // handles locking for event manipulation and recycling
154static lck_mtx_t watch_table_lock;
91447636 155static lck_mtx_t event_buf_lock;
2d21ac55 156static lck_mtx_t event_writer_lock;
91447636 157
b0d623f7
A
158
159/* Explicitly declare qsort so compiler doesn't complain */
160__private_extern__ void qsort(
161 void * array,
162 size_t nmembers,
163 size_t member_size,
164 int (*)(const void *, const void *));
91447636
A
165
166
167static void
168fsevents_internal_init(void)
169{
170 int i;
171
172 if (fs_event_init++ != 0) {
173 return;
174 }
175
176 for(i=0; i < FSE_MAX_EVENTS; i++) {
177 fs_event_type_watchers[i] = 0;
178 }
179
2d21ac55 180 memset(watcher_table, 0, sizeof(watcher_table));
91447636
A
181
182 fsevent_lock_attr = lck_attr_alloc_init();
183 fsevent_group_attr = lck_grp_attr_alloc_init();
184 fsevent_mutex_group = lck_grp_alloc_init("fsevent-mutex", fsevent_group_attr);
185 fsevent_rw_group = lck_grp_alloc_init("fsevent-rw", fsevent_group_attr);
186
2d21ac55 187 lck_mtx_init(&watch_table_lock, fsevent_mutex_group, fsevent_lock_attr);
91447636 188 lck_mtx_init(&event_buf_lock, fsevent_mutex_group, fsevent_lock_attr);
2d21ac55
A
189 lck_mtx_init(&event_writer_lock, fsevent_mutex_group, fsevent_lock_attr);
190
191 lck_rw_init(&event_handling_lock, fsevent_rw_group, fsevent_lock_attr);
91447636 192
2d21ac55
A
193 event_zone = zinit(sizeof(kfs_event),
194 MAX_KFS_EVENTS * sizeof(kfs_event),
195 MAX_KFS_EVENTS * sizeof(kfs_event),
196 "fs-event-buf");
197 if (event_zone == NULL) {
198 printf("fsevents: failed to initialize the event zone.\n");
199 }
200
2d21ac55
A
201 // mark the zone as exhaustible so that it will not
202 // ever grow beyond what we initially filled it with
203 zone_change(event_zone, Z_EXHAUST, TRUE);
204 zone_change(event_zone, Z_COLLECT, FALSE);
6d2010ae 205 zone_change(event_zone, Z_CALLERACCT, FALSE);
7ddcb079
A
206
207 if (zfill(event_zone, MAX_KFS_EVENTS) < MAX_KFS_EVENTS) {
208 printf("fsevents: failed to pre-fill the event zone.\n");
209 }
210
91447636
A
211}
212
213static void
2d21ac55 214lock_watch_table(void)
91447636 215{
2d21ac55 216 lck_mtx_lock(&watch_table_lock);
91447636
A
217}
218
219static void
2d21ac55 220unlock_watch_table(void)
91447636 221{
2d21ac55 222 lck_mtx_unlock(&watch_table_lock);
91447636
A
223}
224
225static void
2d21ac55 226lock_fs_event_list(void)
91447636
A
227{
228 lck_mtx_lock(&event_buf_lock);
229}
230
231static void
2d21ac55 232unlock_fs_event_list(void)
91447636
A
233{
234 lck_mtx_unlock(&event_buf_lock);
235}
236
237// forward prototype
2d21ac55 238static void release_event_ref(kfs_event *kfse);
91447636
A
239
240static int
241watcher_cares_about_dev(fs_event_watcher *watcher, dev_t dev)
242{
243 unsigned int i;
244
b0d623f7
A
245 // if devices_not_to_watch is NULL then we care about all
246 // events from all devices
247 if (watcher->devices_not_to_watch == NULL) {
91447636
A
248 return 1;
249 }
250
251 for(i=0; i < watcher->num_devices; i++) {
b0d623f7
A
252 if (dev == watcher->devices_not_to_watch[i]) {
253 // found a match! that means we do not
254 // want events from this device.
255 return 0;
91447636
A
256 }
257 }
258
b0d623f7
A
259 // if we're here it's not in the devices_not_to_watch[]
260 // list so that means we do care about it
261 return 1;
91447636
A
262}
263
264
265int
266need_fsevent(int type, vnode_t vp)
267{
2d21ac55
A
268 if (type >= 0 && type < FSE_MAX_EVENTS && fs_event_type_watchers[type] == 0)
269 return (0);
270
271 // events in /dev aren't really interesting...
272 if (vp->v_tag == VT_DEVFS) {
273 return (0);
274 }
275
276 return 1;
277}
278
279static int
280prefix_match_len(const char *str1, const char *str2)
281{
282 int len=0;
283
284 while(*str1 && *str2 && *str1 == *str2) {
285 len++;
286 str1++;
287 str2++;
288 }
289
290 if (*str1 == '\0' && *str2 == '\0') {
291 len++;
292 }
293
294 return len;
295}
296
297
298struct history_item {
299 kfs_event *kfse;
300 kfs_event *oldest_kfse;
301 int counter;
302};
303
304static int
305compare_history_items(const void *_a, const void *_b)
306{
307 const struct history_item *a = (const struct history_item *)_a;
308 const struct history_item *b = (const struct history_item *)_b;
309
310 // we want a descending order
311 return (b->counter - a->counter);
312}
313
314#define is_throw_away(x) ((x) == FSE_STAT_CHANGED || (x) == FSE_CONTENT_MODIFIED)
91447636 315
91447636 316
2d21ac55
A
317// Ways that an event can be reused:
318//
319// "combined" events mean that there were two events for
320// the same vnode or path and we're combining both events
321// into a single event. The primary event gets a bit that
322// marks it as having been combined. The secondary event
323// is essentially dropped and the kfse structure reused.
324//
325// "collapsed" means that multiple events below a given
326// directory are collapsed into a single event. in this
327// case, the directory that we collapse into and all of
328// its children must be re-scanned.
329//
330// "recycled" means that we're completely blowing away
331// the event since there are other events that have info
332// about the same vnode or path (and one of those other
333// events will be marked as combined or collapsed as
334// appropriate).
335//
336#define KFSE_COMBINED 0x0001
337#define KFSE_COLLAPSED 0x0002
338#define KFSE_RECYCLED 0x0004
339
340int num_dropped = 0;
341int num_combined_events = 0;
342int num_added_to_parent = 0;
343int num_parent_switch = 0;
344int num_recycled_rename = 0;
345
346//
347// NOTE: you must call lock_fs_event_list() before calling
348// this function.
349//
350static kfs_event *
351find_an_event(const char *str, int len, kfs_event *do_not_reuse, int *reuse_type, int *longest_match_len)
352{
353 kfs_event *kfse, *best_kfse=NULL;
354
355// this seems to be enough to find most duplicate events for the same vnode
356#define MAX_HISTORY 12
357 struct history_item history[MAX_HISTORY];
358 int i;
359
360 *longest_match_len = 0;
361 *reuse_type = 0;
91447636 362
2d21ac55
A
363 memset(history, 0, sizeof(history));
364
365 //
366 // now walk the list of events and try to find the best match
367 // for this event. if we have a vnode, we look for an event
368 // that already references the vnode. if we don't find one
369 // we'll also take the parent of this vnode (in which case it
370 // will be marked as having dropped events within it).
371 //
372 // if we have a string we look for the longest match on the
373 // path we have.
374 //
375
376 LIST_FOREACH(kfse, &kfse_list_head, kevent_list) {
377 int match_len;
378
379 //
380 // don't look at events that are still in the process of being
381 // created, have a null vnode ptr or rename/exchange events.
382 //
383 if ( (kfse->flags & KFSE_BEING_CREATED) || kfse->type == FSE_RENAME || kfse->type == FSE_EXCHANGE) {
384
385 continue;
386 }
387
388 if (str != NULL) {
389 if (kfse->len != 0 && kfse->str != NULL) {
390 match_len = prefix_match_len(str, kfse->str);
391 if (match_len > *longest_match_len) {
392 best_kfse = kfse;
393 *longest_match_len = match_len;
91447636 394 }
2d21ac55
A
395 }
396 }
397
398 if (kfse == do_not_reuse) {
399 continue;
400 }
401
402 for(i=0; i < MAX_HISTORY; i++) {
403 if (history[i].kfse == NULL) {
404 break;
405 }
406
407 //
408 // do a quick check to see if we've got two simple events
409 // that we can cheaply combine. if the event we're looking
410 // at and one of the events in the history table are for the
411 // same path then we'll just mark the newer event as combined
412 // and recyle the older event.
413 //
414 if (history[i].kfse->str == kfse->str) {
415
416 OSBitOrAtomic16(KFSE_COMBINED_EVENTS, &kfse->flags);
417 *reuse_type = KFSE_RECYCLED;
418 history[i].kfse->flags |= KFSE_RECYCLED_EVENT;
419 return history[i].kfse;
420 }
421 }
422
423 if (i < MAX_HISTORY && history[i].kfse == NULL) {
424 history[i].kfse = kfse;
425 history[i].counter = 1;
426 } else if (i >= MAX_HISTORY) {
427 qsort(history, MAX_HISTORY, sizeof(struct history_item), compare_history_items);
428
429 // pluck off the lowest guy if he's only got a count of 1
430 if (history[MAX_HISTORY-1].counter == 1) {
431 history[MAX_HISTORY-1].kfse = kfse;
432 }
91447636 433 }
2d21ac55
A
434 }
435
91447636 436
2d21ac55
A
437 if (str != NULL && best_kfse) {
438 if (*longest_match_len <= 1) {
439 // if the best match we had was "/" then basically we're toast...
440 *longest_match_len = 0;
441 best_kfse = NULL;
442 } else if (*longest_match_len != len) {
443 OSBitOrAtomic16(KFSE_CONTAINS_DROPPED_EVENTS, &best_kfse->flags);
444 *reuse_type = KFSE_COLLAPSED;
445 } else {
446 OSBitOrAtomic16(KFSE_COMBINED_EVENTS, &best_kfse->flags);
447 *reuse_type = KFSE_COMBINED;
448 }
449 }
450
451 return best_kfse;
91447636
A
452}
453
454
2d21ac55
A
455static struct timeval last_print;
456
457//
458// These variables are used to track coalescing multiple identical
459// events for the same vnode/pathname. If we get the same event
460// type and same vnode/pathname as the previous event, we just drop
461// the event since it's superfluous. This improves some micro-
462// benchmarks considerably and actually has a real-world impact on
463// tests like a Finder copy where multiple stat-changed events can
464// get coalesced.
465//
466static int last_event_type=-1;
467static void *last_ptr=NULL;
468static char last_str[MAXPATHLEN];
469static int last_nlen=0;
470static int last_vid=-1;
471static uint64_t last_coalesced_time=0;
b0d623f7 472static void *last_event_ptr=NULL;
2d21ac55
A
473int last_coalesced = 0;
474static mach_timebase_info_data_t sTimebaseInfo = { 0, 0 };
475
476
91447636
A
477int
478add_fsevent(int type, vfs_context_t ctx, ...)
479{
480 struct proc *p = vfs_context_proc(ctx);
2d21ac55
A
481 int i, arg_type, skip_init=0, longest_match_len, ret;
482 kfs_event *kfse, *kfse_dest=NULL, *cur;
91447636
A
483 fs_event_watcher *watcher;
484 va_list ap;
2d21ac55 485 int error = 0, did_alloc=0, need_event_unlock = 0;
91447636 486 dev_t dev = 0;
2d21ac55
A
487 uint64_t now, elapsed;
488 int reuse_type = 0;
489 char *pathbuff=NULL;
490 int pathbuff_len;
491
91447636 492
b0d623f7 493
91447636
A
494 va_start(ap, ctx);
495
2d21ac55
A
496 // ignore bogus event types..
497 if (type < 0 || type >= FSE_MAX_EVENTS) {
498 return EINVAL;
499 }
500
91447636
A
501 // if no one cares about this type of event, bail out
502 if (fs_event_type_watchers[type] == 0) {
503 va_end(ap);
b0d623f7 504
91447636
A
505 return 0;
506 }
507
2d21ac55 508 now = mach_absolute_time();
91447636
A
509
510 // find a free event and snag it for our use
511 // NOTE: do not do anything that would block until
512 // the lock is dropped.
2d21ac55 513 lock_fs_event_list();
91447636 514
2d21ac55
A
515 //
516 // check if this event is identical to the previous one...
517 // (as long as it's not an event type that can never be the
518 // same as a previous event)
519 //
cf7d32b8 520 if (type != FSE_CREATE_FILE && type != FSE_DELETE && type != FSE_RENAME && type != FSE_EXCHANGE && type != FSE_CHOWN) {
2d21ac55
A
521 void *ptr=NULL;
522 int vid=0, was_str=0, nlen=0;
523
524 for(arg_type=va_arg(ap, int32_t); arg_type != FSE_ARG_DONE; arg_type=va_arg(ap, int32_t)) {
525 switch(arg_type) {
526 case FSE_ARG_VNODE: {
527 ptr = va_arg(ap, void *);
528 vid = vnode_vid((struct vnode *)ptr);
529 last_str[0] = '\0';
530 break;
531 }
532 case FSE_ARG_STRING: {
533 nlen = va_arg(ap, int32_t);
534 ptr = va_arg(ap, void *);
535 was_str = 1;
536 break;
537 }
538 }
539 if (ptr != NULL) {
540 break;
541 }
542 }
543
544 if ( sTimebaseInfo.denom == 0 ) {
545 (void) clock_timebase_info(&sTimebaseInfo);
546 }
547
548 elapsed = (now - last_coalesced_time);
549 if (sTimebaseInfo.denom != sTimebaseInfo.numer) {
550 if (sTimebaseInfo.denom == 1) {
551 elapsed *= sTimebaseInfo.numer;
552 } else {
553 // this could overflow... the worst that will happen is that we'll
554 // send (or not send) an extra event so I'm not going to worry about
555 // doing the math right like dtrace_abs_to_nano() does.
556 elapsed = (elapsed * sTimebaseInfo.numer) / (uint64_t)sTimebaseInfo.denom;
557 }
558 }
559
560 if (type == last_event_type
561 && (elapsed < 1000000000)
562 &&
563 ((vid && vid == last_vid && last_ptr == ptr)
564 ||
565 (last_str[0] && last_nlen == nlen && ptr && strcmp(last_str, ptr) == 0))
566 ) {
567
568 last_coalesced++;
569 unlock_fs_event_list();
570 va_end(ap);
b0d623f7 571
2d21ac55
A
572 return 0;
573 } else {
574 last_ptr = ptr;
575 if (was_str) {
576 strlcpy(last_str, ptr, sizeof(last_str));
577 }
578 last_nlen = nlen;
579 last_vid = vid;
580 last_event_type = type;
581 last_coalesced_time = now;
91447636
A
582 }
583 }
2d21ac55
A
584 va_start(ap, ctx);
585
586
587 kfse = zalloc_noblock(event_zone);
588 if (kfse && (type == FSE_RENAME || type == FSE_EXCHANGE)) {
589 kfse_dest = zalloc_noblock(event_zone);
590 if (kfse_dest == NULL) {
591 did_alloc = 1;
592 zfree(event_zone, kfse);
593 kfse = NULL;
594 }
595 }
596
597
598 if (kfse == NULL) { // yikes! no free events
599 int len=0;
600 char *str;
601
602 //
603 // Figure out what kind of reference we have to the
604 // file in this event. This helps us find an event
605 // to combine/collapse into to make room.
606 //
607 // If we have a rename or exchange event then we
608 // don't want to go through the normal path, we
609 // want to "steal" an event instead (which is what
610 // find_an_event() will do if str is null).
611 //
612 arg_type = va_arg(ap, int32_t);
613 if (type == FSE_RENAME || type == FSE_EXCHANGE) {
614 str = NULL;
615 } else if (arg_type == FSE_ARG_STRING) {
616 len = va_arg(ap, int32_t);
617 str = va_arg(ap, char *);
618 } else if (arg_type == FSE_ARG_VNODE) {
619 struct vnode *vp;
620
621 vp = va_arg(ap, struct vnode *);
622 pathbuff = get_pathbuff();
623 pathbuff_len = MAXPATHLEN;
624 if (vn_getpath(vp, pathbuff, &pathbuff_len) != 0 || pathbuff[0] == '\0') {
625 release_pathbuff(pathbuff);
626 pathbuff = NULL;
627 }
628 str = pathbuff;
629 } else {
630 str = NULL;
631 }
632
633 //
634 // This will go through all events and find one that we
635 // can combine with (hopefully), or "collapse" into (i.e
636 // it has the same parent) or in the worst case we have
637 // to "recycle" an event which means that it will combine
638 // two other events and return us the now unused event.
639 // failing all that, find_an_event() could still return
640 // null and if it does then we have a catastrophic dropped
641 // events scenario.
642 //
643 kfse = find_an_event(str, len, NULL, &reuse_type, &longest_match_len);
644
645 if (kfse == NULL) {
646 bail_early:
647
648 unlock_fs_event_list();
649 lock_watch_table();
650
651 for(i=0; i < MAX_WATCHERS; i++) {
652 watcher = watcher_table[i];
653 if (watcher == NULL) {
654 continue;
655 }
656
657 watcher->flags |= WATCHER_DROPPED_EVENTS;
658 fsevents_wakeup(watcher);
659 }
660 unlock_watch_table();
661
662 {
663 struct timeval current_tv;
664
665 num_dropped++;
666
667 // only print a message at most once every 5 seconds
668 microuptime(&current_tv);
669 if ((current_tv.tv_sec - last_print.tv_sec) > 10) {
670 int ii;
671 void *junkptr=zalloc_noblock(event_zone), *listhead=kfse_list_head.lh_first;
672
673 printf("add_fsevent: event queue is full! dropping events (num dropped events: %d; num events outstanding: %d).\n", num_dropped, num_events_outstanding);
674 printf("add_fsevent: kfse_list head %p ; num_pending_rename %d\n", listhead, num_pending_rename);
675 printf("add_fsevent: zalloc sez: %p\n", junkptr);
b0d623f7 676 printf("add_fsevent: event_zone info: %d 0x%x\n", ((int *)event_zone)[0], ((int *)event_zone)[1]);
2d21ac55
A
677 for(ii=0; ii < MAX_WATCHERS; ii++) {
678 if (watcher_table[ii] == NULL) {
679 continue;
680 }
681
682 printf("add_fsevent: watcher %p: num dropped %d rd %4d wr %4d q_size %4d flags 0x%x\n",
683 watcher_table[ii], watcher_table[ii]->num_dropped,
684 watcher_table[ii]->rd, watcher_table[ii]->wr,
685 watcher_table[ii]->eventq_size, watcher_table[ii]->flags);
686 }
687
688 last_print = current_tv;
689 if (junkptr) {
690 zfree(event_zone, junkptr);
691 }
692 }
693 }
694
695 if (pathbuff) {
696 release_pathbuff(pathbuff);
697 pathbuff = NULL;
698 }
699
700 return ENOSPC;
701 }
702
703 if ((type == FSE_RENAME || type == FSE_EXCHANGE) && reuse_type != KFSE_RECYCLED) {
704 panic("add_fsevent: type == %d but reuse type == %d!\n", type, reuse_type);
705 } else if ((kfse->type == FSE_RENAME || kfse->type == FSE_EXCHANGE) && kfse->dest == NULL) {
706 panic("add_fsevent: bogus kfse %p (type %d, but dest is NULL)\n", kfse, kfse->type);
707 } else if (kfse->type == FSE_RENAME || kfse->type == FSE_EXCHANGE) {
708 panic("add_fsevent: we should never re-use rename events (kfse %p reuse type %d)!\n", kfse, reuse_type);
709 }
710
711 if (reuse_type == KFSE_COLLAPSED) {
712 if (str) {
713 const char *tmp_ptr, *new_str;
714
715 //
716 // if we collapsed and have a string we have to chop off the
717 // tail component of the pathname to get the parent.
718 //
719 // NOTE: it is VERY IMPORTANT that we leave the trailing slash
720 // on the pathname. user-level code depends on this.
721 //
722 if (str[0] == '\0' || longest_match_len <= 1) {
723 printf("add_fsevent: strange state (str %s / longest_match_len %d)\n", str, longest_match_len);
724 if (longest_match_len < 0) {
725 panic("add_fsevent: longest_match_len %d\n", longest_match_len);
726 }
727 }
728 // chop off the tail component if it's not the
729 // first character...
730 if (longest_match_len > 1) {
731 str[longest_match_len] = '\0';
732 } else if (longest_match_len == 0) {
733 longest_match_len = 1;
734 }
735
736 new_str = vfs_addname(str, longest_match_len, 0, 0);
737 if (new_str == NULL || new_str[0] == '\0') {
738 panic("add_fsevent: longest match is strange (new_str %p).\n", new_str);
739 }
740
741 lck_rw_lock_exclusive(&event_handling_lock);
742
743 kfse->len = longest_match_len;
744 tmp_ptr = kfse->str;
745 kfse->str = new_str;
746 kfse->ino = 0;
747 kfse->mode = 0;
748 kfse->uid = 0;
749 kfse->gid = 0;
750
751 lck_rw_unlock_exclusive(&event_handling_lock);
752
753 vfs_removename(tmp_ptr);
754 } else {
755 panic("add_fsevent: don't have a vnode or a string pointer (kfse %p)\n", kfse);
756 }
757 }
758
759 if (reuse_type == KFSE_RECYCLED && (type == FSE_RENAME || type == FSE_EXCHANGE)) {
760
761 // if we're recycling this kfse and we have a rename or
762 // exchange event then we need to also get an event for
763 // kfse_dest.
764 //
765 if (did_alloc) {
766 // only happens if we allocated one but then failed
767 // for kfse_dest (and thus free'd the first one we
768 // allocated)
769 kfse_dest = zalloc_noblock(event_zone);
770 if (kfse_dest != NULL) {
771 memset(kfse_dest, 0, sizeof(kfs_event));
772 kfse_dest->refcount = 1;
773 OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse_dest->flags);
774 } else {
775 did_alloc = 0;
776 }
777 }
778
779 if (kfse_dest == NULL) {
780 int dest_reuse_type, dest_match_len;
781
782 kfse_dest = find_an_event(NULL, 0, kfse, &dest_reuse_type, &dest_match_len);
783
784 if (kfse_dest == NULL) {
785 // nothing we can do... gotta bail out
786 goto bail_early;
787 }
788
789 if (dest_reuse_type != KFSE_RECYCLED) {
790 panic("add_fsevent: type == %d but dest_reuse type == %d!\n", type, dest_reuse_type);
791 }
792 }
793 }
794
795
796 //
797 // Here we check for some fast-path cases so that we can
798 // jump over the normal initialization and just get on
799 // with delivering the event. These cases are when we're
800 // combining/collapsing an event and so basically there is
801 // no more work to do (aside from a little book-keeping)
802 //
803 if (str && kfse->len != 0) {
804 kfse->abstime = now;
b0d623f7 805 OSAddAtomic(1, &kfse->refcount);
2d21ac55
A
806 skip_init = 1;
807
808 if (reuse_type == KFSE_COMBINED) {
809 num_combined_events++;
810 } else if (reuse_type == KFSE_COLLAPSED) {
811 num_added_to_parent++;
812 }
813 } else if (reuse_type != KFSE_RECYCLED) {
814 panic("add_fsevent: I'm so confused! (reuse_type %d str %p kfse->len %d)\n",
815 reuse_type, str, kfse->len);
816 }
91447636 817
91447636
A
818 va_end(ap);
819
2d21ac55
A
820
821 if (skip_init) {
822 if (kfse->refcount < 1) {
823 panic("add_fsevent: line %d: kfse recount %d but should be at least 1\n", __LINE__, kfse->refcount);
824 }
825
b0d623f7 826 last_event_ptr = kfse;
2d21ac55
A
827 unlock_fs_event_list();
828 goto normal_delivery;
829
830 } else if (reuse_type == KFSE_RECYCLED || reuse_type == KFSE_COMBINED) {
831
832 //
833 // If we're here we have to clear out the kfs_event(s)
834 // that we were given by find_an_event() and set it
835 // up to be re-filled in by the normal code path.
836 //
837 va_start(ap, ctx);
838
839 need_event_unlock = 1;
840 lck_rw_lock_exclusive(&event_handling_lock);
841
b0d623f7 842 OSAddAtomic(1, &kfse->refcount);
2d21ac55
A
843
844 if (kfse->refcount < 1) {
845 panic("add_fsevent: line %d: kfse recount %d but should be at least 1\n", __LINE__, kfse->refcount);
846 }
847
848 if (kfse->len == 0) {
849 panic("%s:%d: no more fref.vp\n", __FILE__, __LINE__);
850 // vnode_rele_ext(kfse->fref.vp, O_EVTONLY, 0);
851 } else {
852 vfs_removename(kfse->str);
853 kfse->len = 0;
854 }
855 kfse->str = NULL;
856
857 if (kfse->kevent_list.le_prev != NULL) {
858 num_events_outstanding--;
859 if (kfse->type == FSE_RENAME) {
860 num_pending_rename--;
861 }
862 LIST_REMOVE(kfse, kevent_list);
863 memset(&kfse->kevent_list, 0, sizeof(kfse->kevent_list));
864 }
865
866 kfse->flags = 0 | KFSE_RECYCLED_EVENT;
867
868 if (kfse_dest) {
b0d623f7 869 OSAddAtomic(1, &kfse_dest->refcount);
2d21ac55
A
870 kfse_dest->flags = 0 | KFSE_RECYCLED_EVENT;
871
872 if (did_alloc == 0) {
873 if (kfse_dest->len == 0) {
874 panic("%s:%d: no more fref.vp\n", __FILE__, __LINE__);
875 // vnode_rele_ext(kfse_dest->fref.vp, O_EVTONLY, 0);
876 } else {
877 vfs_removename(kfse_dest->str);
878 kfse_dest->len = 0;
879 }
880 kfse_dest->str = NULL;
881
882 if (kfse_dest->kevent_list.le_prev != NULL) {
883 num_events_outstanding--;
884 LIST_REMOVE(kfse_dest, kevent_list);
885 memset(&kfse_dest->kevent_list, 0, sizeof(kfse_dest->kevent_list));
886 }
887
888 if (kfse_dest->dest) {
889 panic("add_fsevent: should never recycle a rename event! kfse %p\n", kfse);
890 }
891 }
892 }
893
894 OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse->flags);
895 if (kfse_dest) {
896 OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse_dest->flags);
897 }
898
899 goto process_normally;
91447636 900 }
2d21ac55 901 }
91447636 902
2d21ac55
A
903 if (reuse_type != 0) {
904 panic("fsevents: we have a reuse_type (%d) but are about to clear out kfse %p\n", reuse_type, kfse);
91447636
A
905 }
906
2d21ac55
A
907 //
908 // we only want to do this for brand new events, not
909 // events which have been recycled.
910 //
911 memset(kfse, 0, sizeof(kfs_event));
912 kfse->refcount = 1;
913 OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse->flags);
91447636 914
2d21ac55 915 process_normally:
b0d623f7 916 last_event_ptr = kfse;
91447636 917 kfse->type = type;
2d21ac55 918 kfse->abstime = now;
91447636 919 kfse->pid = p->p_pid;
2d21ac55
A
920 if (type == FSE_RENAME || type == FSE_EXCHANGE) {
921 if (need_event_unlock == 0) {
922 memset(kfse_dest, 0, sizeof(kfs_event));
923 kfse_dest->refcount = 1;
924 OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse_dest->flags);
925 }
926 kfse_dest->type = type;
927 kfse_dest->pid = p->p_pid;
928 kfse_dest->abstime = now;
929
930 kfse->dest = kfse_dest;
931 }
932
933 num_events_outstanding++;
934 if (kfse->type == FSE_RENAME) {
935 num_pending_rename++;
936 }
937 LIST_INSERT_HEAD(&kfse_list_head, kfse, kevent_list);
91447636 938
2d21ac55
A
939 if (kfse->refcount < 1) {
940 panic("add_fsevent: line %d: kfse recount %d but should be at least 1\n", __LINE__, kfse->refcount);
941 }
942
943 unlock_fs_event_list(); // at this point it's safe to unlock
91447636
A
944
945 //
946 // now process the arguments passed in and copy them into
947 // the kfse
948 //
2d21ac55
A
949 if (need_event_unlock == 0) {
950 lck_rw_lock_shared(&event_handling_lock);
951 }
952
953 cur = kfse;
954 for(arg_type=va_arg(ap, int32_t); arg_type != FSE_ARG_DONE; arg_type=va_arg(ap, int32_t))
91447636 955
2d21ac55 956 switch(arg_type) {
91447636
A
957 case FSE_ARG_VNODE: {
958 // this expands out into multiple arguments to the client
959 struct vnode *vp;
960 struct vnode_attr va;
961
2d21ac55
A
962 if (kfse->str != NULL) {
963 cur = kfse_dest;
91447636
A
964 }
965
2d21ac55
A
966 vp = va_arg(ap, struct vnode *);
967 if (vp == NULL) {
968 panic("add_fsevent: you can't pass me a NULL vnode ptr (type %d)!\n",
969 cur->type);
91447636 970 }
2d21ac55 971
91447636
A
972 VATTR_INIT(&va);
973 VATTR_WANTED(&va, va_fsid);
974 VATTR_WANTED(&va, va_fileid);
975 VATTR_WANTED(&va, va_mode);
976 VATTR_WANTED(&va, va_uid);
977 VATTR_WANTED(&va, va_gid);
cf7d32b8 978 if ((ret = vnode_getattr(vp, &va, vfs_context_kernel())) != 0) {
2d21ac55
A
979 // printf("add_fsevent: failed to getattr on vp %p (%d)\n", cur->fref.vp, ret);
980 cur->str = NULL;
91447636 981 error = EINVAL;
2d21ac55
A
982 if (need_event_unlock == 0) {
983 // then we only grabbed it shared
984 lck_rw_unlock_shared(&event_handling_lock);
985 }
91447636
A
986 goto clean_up;
987 }
988
2d21ac55 989 cur->dev = dev = (dev_t)va.va_fsid;
b0d623f7 990 cur->ino = (ino64_t)va.va_fileid;
2d21ac55
A
991 cur->mode = (int32_t)vnode_vttoif(vnode_vtype(vp)) | va.va_mode;
992 cur->uid = va.va_uid;
993 cur->gid = va.va_gid;
91447636 994
2d21ac55
A
995 // if we haven't gotten the path yet, get it.
996 if (pathbuff == NULL) {
997 pathbuff = get_pathbuff();
998 pathbuff_len = MAXPATHLEN;
999
1000 pathbuff[0] = '\0';
b0d623f7
A
1001 if ((ret = vn_getpath(vp, pathbuff, &pathbuff_len)) != 0 || pathbuff[0] == '\0') {
1002 struct vnode *orig_vp = vp;
1003
1004 if (ret != ENOSPC) {
1005 printf("add_fsevent: unable to get path for vp %p (%s; ret %d; type %d)\n",
1006 vp, vp->v_name ? vp->v_name : "-UNKNOWN-FILE", ret, type);
1007 }
1008
1009 cur->flags |= KFSE_CONTAINS_DROPPED_EVENTS;
1010
1011 do {
1012 if (vp->v_parent != NULL) {
1013 vp = vp->v_parent;
1014 } else if (vp->v_mount) {
1015 strlcpy(pathbuff, vp->v_mount->mnt_vfsstat.f_mntonname, MAXPATHLEN);
1016 break;
1017 } else {
1018 vp = NULL;
1019 }
1020
1021 if (vp == NULL) {
1022 break;
1023 }
1024
1025 pathbuff_len = MAXPATHLEN;
1026 ret = vn_getpath(vp, pathbuff, &pathbuff_len);
1027 } while (ret == ENOSPC);
1028
1029 if (ret != 0 || vp == NULL) {
1030 printf("add_fsevent: unabled to get a path for vp %p. dropping the event.\n", orig_vp);
1031 error = ENOENT;
1032 if (need_event_unlock == 0) {
1033 // then we only grabbed it shared
1034 lck_rw_unlock_shared(&event_handling_lock);
1035 }
1036 goto clean_up;
2d21ac55 1037 }
2d21ac55
A
1038 }
1039 }
91447636 1040
2d21ac55
A
1041 // store the path by adding it to the global string table
1042 cur->len = pathbuff_len;
1043 cur->str = vfs_addname(pathbuff, pathbuff_len, 0, 0);
1044 if (cur->str == NULL || cur->str[0] == '\0') {
1045 panic("add_fsevent: was not able to add path %s to event %p.\n", pathbuff, cur);
1046 }
1047
1048 release_pathbuff(pathbuff);
1049 pathbuff = NULL;
91447636 1050
91447636
A
1051 break;
1052 }
1053
1054 case FSE_ARG_FINFO: {
1055 fse_info *fse;
1056
1057 fse = va_arg(ap, fse_info *);
1058
2d21ac55 1059 cur->dev = dev = (dev_t)fse->dev;
b0d623f7 1060 cur->ino = (ino64_t)fse->ino;
2d21ac55
A
1061 cur->mode = (int32_t)fse->mode;
1062 cur->uid = (uid_t)fse->uid;
1063 cur->gid = (uid_t)fse->gid;
1064 // if it's a hard-link and this is the last link, flag it
1065 if ((fse->mode & FSE_MODE_HLINK) && fse->nlink == 0) {
1066 cur->mode |= FSE_MODE_LAST_HLINK;
1067 }
b0d623f7
A
1068 if (cur->mode & FSE_TRUNCATED_PATH) {
1069 cur->flags |= KFSE_CONTAINS_DROPPED_EVENTS;
1070 cur->mode &= ~FSE_TRUNCATED_PATH;
1071 }
91447636
A
1072 break;
1073 }
1074
1075 case FSE_ARG_STRING:
2d21ac55
A
1076 if (kfse->str != NULL) {
1077 cur = kfse_dest;
1078 }
91447636 1079
2d21ac55
A
1080 cur->len = (int16_t)(va_arg(ap, int32_t) & 0x7fff);
1081 if (cur->len >= 1) {
1082 cur->str = vfs_addname(va_arg(ap, char *), cur->len, 0, 0);
1083 } else {
1084 printf("add_fsevent: funny looking string length: %d\n", (int)cur->len);
1085 cur->len = 2;
1086 cur->str = vfs_addname("/", cur->len, 0, 0);
1087 }
1088 if (cur->str[0] == 0) {
1089 printf("add_fsevent: bogus looking string (len %d)\n", cur->len);
1090 }
91447636
A
1091 break;
1092
91447636 1093 default:
2d21ac55 1094 printf("add_fsevent: unknown type %d\n", arg_type);
91447636
A
1095 // just skip one 32-bit word and hope we sync up...
1096 (void)va_arg(ap, int32_t);
1097 }
91447636
A
1098
1099 va_end(ap);
1100
2d21ac55
A
1101 OSBitAndAtomic16(~KFSE_BEING_CREATED, &kfse->flags);
1102 if (kfse_dest) {
1103 OSBitAndAtomic16(~KFSE_BEING_CREATED, &kfse_dest->flags);
1104 }
1105
1106 if (need_event_unlock == 0) {
1107 // then we only grabbed it shared
1108 lck_rw_unlock_shared(&event_handling_lock);
1109 }
1110
1111 normal_delivery:
1112 // unlock this here so we don't hold it across the
1113 // event delivery loop.
1114 if (need_event_unlock) {
1115 lck_rw_unlock_exclusive(&event_handling_lock);
1116 need_event_unlock = 0;
1117 }
1118
91447636
A
1119 //
1120 // now we have to go and let everyone know that
2d21ac55 1121 // is interested in this type of event
91447636 1122 //
2d21ac55 1123 lock_watch_table();
91447636 1124
2d21ac55
A
1125 for(i=0; i < MAX_WATCHERS; i++) {
1126 watcher = watcher_table[i];
1127 if (watcher == NULL) {
1128 continue;
1129 }
1130
1131 if ( watcher->event_list[type] == FSE_REPORT
1132 && watcher_cares_about_dev(watcher, dev)) {
1133
1134 if (watcher_add_event(watcher, kfse) != 0) {
1135 watcher->num_dropped++;
91447636
A
1136 }
1137 }
2d21ac55
A
1138
1139 if (kfse->refcount < 1) {
1140 panic("add_fsevent: line %d: kfse recount %d but should be at least 1\n", __LINE__, kfse->refcount);
1141 }
91447636
A
1142 }
1143
2d21ac55
A
1144 unlock_watch_table();
1145
91447636 1146 clean_up:
2d21ac55
A
1147 // have to check if this needs to be unlocked (in
1148 // case we came here from an error handling path)
1149 if (need_event_unlock) {
1150 lck_rw_unlock_exclusive(&event_handling_lock);
1151 need_event_unlock = 0;
1152 }
1153
1154 if (pathbuff) {
1155 release_pathbuff(pathbuff);
1156 pathbuff = NULL;
1157 }
1158
1159 release_event_ref(kfse);
91447636 1160
91447636
A
1161 return error;
1162}
1163
2d21ac55 1164
91447636 1165static void
2d21ac55 1166release_event_ref(kfs_event *kfse)
91447636 1167{
2d21ac55
A
1168 int old_refcount;
1169 kfs_event copy, dest_copy;
91447636 1170
91447636 1171
b0d623f7 1172 old_refcount = OSAddAtomic(-1, &kfse->refcount);
2d21ac55
A
1173 if (old_refcount > 1) {
1174 return;
1175 }
1176
1177 lock_fs_event_list();
b0d623f7
A
1178 if (last_event_ptr == kfse) {
1179 last_event_ptr = NULL;
1180 last_event_type = -1;
1181 last_coalesced_time = 0;
1182 }
1183
2d21ac55
A
1184 if (kfse->refcount < 0) {
1185 panic("release_event_ref: bogus kfse refcount %d\n", kfse->refcount);
0c530ab8 1186 }
91447636 1187
2d21ac55
A
1188 if (kfse->refcount > 0 || kfse->type == FSE_INVALID) {
1189 // This is very subtle. Either of these conditions can
1190 // be true if an event got recycled while we were waiting
1191 // on the fs_event_list lock or the event got recycled,
1192 // delivered, _and_ free'd by someone else while we were
1193 // waiting on the fs event list lock. In either case
1194 // we need to just unlock the list and return without
1195 // doing anything because if the refcount is > 0 then
1196 // someone else will take care of free'ing it and when
1197 // the kfse->type is invalid then someone else already
1198 // has handled free'ing the event (while we were blocked
1199 // on the event list lock).
1200 //
1201 unlock_fs_event_list();
1202 return;
1203 }
1204
1205 //
91447636
A
1206 // make a copy of this so we can free things without
1207 // holding the fs_event_buf lock
1208 //
2d21ac55 1209 copy = *kfse;
b0d623f7 1210 if (kfse->dest && OSAddAtomic(-1, &kfse->dest->refcount) == 1) {
2d21ac55
A
1211 dest_copy = *kfse->dest;
1212 } else {
1213 dest_copy.str = NULL;
1214 dest_copy.len = 0;
1215 dest_copy.type = FSE_INVALID;
1216 }
1217
1218 kfse->pid = kfse->type; // save this off for debugging...
b0d623f7
A
1219 kfse->uid = (uid_t)(long)kfse->str; // save this off for debugging...
1220 kfse->gid = (gid_t)(long)current_thread();
2d21ac55
A
1221
1222 kfse->str = (char *)0xdeadbeef; // XXXdbg - catch any cheaters...
1223
1224 if (dest_copy.type != FSE_INVALID) {
1225 kfse->dest->str = (char *)0xbadc0de; // XXXdbg - catch any cheaters...
1226 kfse->dest->type = FSE_INVALID;
1227
1228 if (kfse->dest->kevent_list.le_prev != NULL) {
1229 num_events_outstanding--;
1230 LIST_REMOVE(kfse->dest, kevent_list);
1231 memset(&kfse->dest->kevent_list, 0xa5, sizeof(kfse->dest->kevent_list));
1232 }
1233
1234 zfree(event_zone, kfse->dest);
1235 }
91447636 1236
0c530ab8 1237 // mark this fsevent as invalid
2d21ac55
A
1238 {
1239 int otype;
1240
1241 otype = kfse->type;
0c530ab8
A
1242 kfse->type = FSE_INVALID;
1243
2d21ac55
A
1244 if (kfse->kevent_list.le_prev != NULL) {
1245 num_events_outstanding--;
1246 if (otype == FSE_RENAME) {
1247 num_pending_rename--;
1248 }
1249 LIST_REMOVE(kfse, kevent_list);
1250 memset(&kfse->kevent_list, 0, sizeof(kfse->kevent_list));
1251 }
1252 }
91447636 1253
2d21ac55
A
1254 zfree(event_zone, kfse);
1255
1256 unlock_fs_event_list();
1257
1258 // if we have a pointer in the union
1259 if (copy.str) {
1260 if (copy.len == 0) { // and it's not a string
1261 panic("%s:%d: no more fref.vp!\n", __FILE__, __LINE__);
1262 // vnode_rele_ext(copy.fref.vp, O_EVTONLY, 0);
1263 } else { // else it's a string
1264 vfs_removename(copy.str);
91447636 1265 }
2d21ac55 1266 }
91447636 1267
2d21ac55
A
1268 if (dest_copy.type != FSE_INVALID && dest_copy.str) {
1269 if (dest_copy.len == 0) {
1270 panic("%s:%d: no more fref.vp!\n", __FILE__, __LINE__);
1271 // vnode_rele_ext(dest_copy.fref.vp, O_EVTONLY, 0);
1272 } else {
1273 vfs_removename(dest_copy.str);
91447636
A
1274 }
1275 }
1276}
1277
1278
1279static int
1280add_watcher(int8_t *event_list, int32_t num_events, int32_t eventq_size, fs_event_watcher **watcher_out)
1281{
1282 int i;
1283 fs_event_watcher *watcher;
1284
2d21ac55 1285 if (eventq_size <= 0 || eventq_size > 100*MAX_KFS_EVENTS) {
91447636
A
1286 eventq_size = MAX_KFS_EVENTS;
1287 }
1288
1289 // Note: the event_queue follows the fs_event_watcher struct
1290 // in memory so we only have to do one allocation
1291 MALLOC(watcher,
1292 fs_event_watcher *,
1293 sizeof(fs_event_watcher) + eventq_size * sizeof(kfs_event *),
1294 M_TEMP, M_WAITOK);
2d21ac55
A
1295 if (watcher == NULL) {
1296 return ENOMEM;
1297 }
91447636
A
1298
1299 watcher->event_list = event_list;
1300 watcher->num_events = num_events;
b0d623f7 1301 watcher->devices_not_to_watch = NULL;
91447636
A
1302 watcher->num_devices = 0;
1303 watcher->flags = 0;
1304 watcher->event_queue = (kfs_event **)&watcher[1];
1305 watcher->eventq_size = eventq_size;
1306 watcher->rd = 0;
1307 watcher->wr = 0;
1308 watcher->blockers = 0;
0c530ab8 1309 watcher->num_readers = 0;
b0d623f7 1310 watcher->max_event_id = 0;
2d21ac55
A
1311 watcher->fseh = NULL;
1312
1313 watcher->num_dropped = 0; // XXXdbg - debugging
91447636 1314
2d21ac55 1315 lock_watch_table();
91447636
A
1316
1317 // now update the global list of who's interested in
1318 // events of a particular type...
1319 for(i=0; i < num_events; i++) {
1320 if (event_list[i] != FSE_IGNORE && i < FSE_MAX_EVENTS) {
1321 fs_event_type_watchers[i]++;
1322 }
1323 }
1324
2d21ac55
A
1325 for(i=0; i < MAX_WATCHERS; i++) {
1326 if (watcher_table[i] == NULL) {
1327 watcher->my_id = i;
1328 watcher_table[i] = watcher;
1329 break;
1330 }
1331 }
1332
1333 if (i > MAX_WATCHERS) {
1334 printf("fsevents: too many watchers!\n");
1335 unlock_watch_table();
1336 return ENOSPC;
1337 }
91447636 1338
2d21ac55 1339 unlock_watch_table();
91447636
A
1340
1341 *watcher_out = watcher;
1342
1343 return 0;
1344}
1345
2d21ac55
A
1346
1347
91447636
A
1348static void
1349remove_watcher(fs_event_watcher *target)
1350{
2d21ac55 1351 int i, j, counter=0;
91447636
A
1352 fs_event_watcher *watcher;
1353 kfs_event *kfse;
1354
2d21ac55 1355 lock_watch_table();
91447636 1356
2d21ac55
A
1357 for(j=0; j < MAX_WATCHERS; j++) {
1358 watcher = watcher_table[j];
1359 if (watcher != target) {
1360 continue;
1361 }
91447636 1362
2d21ac55
A
1363 watcher_table[j] = NULL;
1364
1365 for(i=0; i < watcher->num_events; i++) {
1366 if (watcher->event_list[i] != FSE_IGNORE && i < FSE_MAX_EVENTS) {
1367 fs_event_type_watchers[i]--;
91447636 1368 }
2d21ac55 1369 }
91447636 1370
2d21ac55
A
1371 if (watcher->flags & WATCHER_CLOSING) {
1372 unlock_watch_table();
1373 return;
1374 }
1375
1376 // printf("fsevents: removing watcher %p (rd %d wr %d num_readers %d flags 0x%x)\n", watcher, watcher->rd, watcher->wr, watcher->num_readers, watcher->flags);
1377 watcher->flags |= WATCHER_CLOSING;
b0d623f7 1378 OSAddAtomic(1, &watcher->num_readers);
2d21ac55
A
1379
1380 unlock_watch_table();
91447636 1381
2d21ac55
A
1382 while (watcher->num_readers > 1 && counter++ < 5000) {
1383 fsevents_wakeup(watcher); // in case they're asleep
1384
1385 tsleep(watcher, PRIBIO, "fsevents-close", 1);
1386 }
1387 if (counter++ >= 5000) {
1388 // printf("fsevents: close: still have readers! (%d)\n", watcher->num_readers);
1389 panic("fsevents: close: still have readers! (%d)\n", watcher->num_readers);
1390 }
1391
1392 // drain the event_queue
1393 while(watcher->rd != watcher->wr) {
1394 lck_rw_lock_shared(&event_handling_lock);
1395
1396 kfse = watcher->event_queue[watcher->rd];
1397 if (kfse->type == FSE_INVALID || kfse->refcount < 1) {
1398 panic("remove_watcher: bogus kfse %p during cleanup (type %d refcount %d rd %d wr %d)\n", kfse, kfse->type, kfse->refcount, watcher->rd, watcher->wr);
1399 }
1400
1401 lck_rw_unlock_shared(&event_handling_lock);
1402
1403 watcher->rd = (watcher->rd+1) % watcher->eventq_size;
1404
1405 if (kfse != NULL) {
1406 release_event_ref(kfse);
91447636 1407 }
2d21ac55 1408 }
91447636 1409
2d21ac55
A
1410 if (watcher->event_list) {
1411 FREE(watcher->event_list, M_TEMP);
1412 watcher->event_list = NULL;
1413 }
b0d623f7
A
1414 if (watcher->devices_not_to_watch) {
1415 FREE(watcher->devices_not_to_watch, M_TEMP);
1416 watcher->devices_not_to_watch = NULL;
2d21ac55
A
1417 }
1418 FREE(watcher, M_TEMP);
1419
1420 return;
1421 }
1422
1423 unlock_watch_table();
1424}
1425
1426
1427#define EVENT_DELAY_IN_MS 10
1428static thread_call_t event_delivery_timer = NULL;
1429static int timer_set = 0;
1430
1431
1432static void
1433delayed_event_delivery(__unused void *param0, __unused void *param1)
1434{
1435 int i;
1436
1437 lock_watch_table();
1438
1439 for(i=0; i < MAX_WATCHERS; i++) {
1440 if (watcher_table[i] != NULL && watcher_table[i]->rd != watcher_table[i]->wr) {
1441 fsevents_wakeup(watcher_table[i]);
1442 }
1443 }
1444
1445 timer_set = 0;
1446
1447 unlock_watch_table();
1448}
1449
1450
1451//
1452// The watch table must be locked before calling this function.
1453//
1454static void
1455schedule_event_wakeup(void)
1456{
1457 uint64_t deadline;
1458
1459 if (event_delivery_timer == NULL) {
1460 event_delivery_timer = thread_call_allocate((thread_call_func_t)delayed_event_delivery, NULL);
1461 }
1462
1463 clock_interval_to_deadline(EVENT_DELAY_IN_MS, 1000 * 1000, &deadline);
1464
1465 thread_call_enter_delayed(event_delivery_timer, deadline);
1466 timer_set = 1;
1467}
1468
1469
1470
1471#define MAX_NUM_PENDING 16
1472
1473//
1474// NOTE: the watch table must be locked before calling
1475// this routine.
1476//
1477static int
1478watcher_add_event(fs_event_watcher *watcher, kfs_event *kfse)
1479{
b0d623f7
A
1480 if (kfse->abstime > watcher->max_event_id) {
1481 watcher->max_event_id = kfse->abstime;
1482 }
1483
2d21ac55
A
1484 if (((watcher->wr + 1) % watcher->eventq_size) == watcher->rd) {
1485 watcher->flags |= WATCHER_DROPPED_EVENTS;
1486 fsevents_wakeup(watcher);
1487 return ENOSPC;
1488 }
1489
b0d623f7 1490 OSAddAtomic(1, &kfse->refcount);
2d21ac55
A
1491 watcher->event_queue[watcher->wr] = kfse;
1492 OSSynchronizeIO();
1493 watcher->wr = (watcher->wr + 1) % watcher->eventq_size;
1494
1495 //
1496 // wake up the watcher if there are more than MAX_NUM_PENDING events.
1497 // otherwise schedule a timer (if one isn't already set) which will
1498 // send any pending events if no more are received in the next
1499 // EVENT_DELAY_IN_MS milli-seconds.
1500 //
1501 if ( (watcher->rd < watcher->wr && (watcher->wr - watcher->rd) > MAX_NUM_PENDING)
1502 || (watcher->rd > watcher->wr && (watcher->wr + watcher->eventq_size - watcher->rd) > MAX_NUM_PENDING)) {
1503
1504 fsevents_wakeup(watcher);
1505
1506 } else if (timer_set == 0) {
1507
1508 schedule_event_wakeup();
1509 }
1510
1511 return 0;
1512}
1513
2d21ac55
A
1514static int
1515fill_buff(uint16_t type, int32_t size, const void *data,
1516 char *buff, int32_t *_buff_idx, int32_t buff_sz,
1517 struct uio *uio)
1518{
1519 int32_t amt, error = 0, buff_idx = *_buff_idx;
1520 uint16_t tmp;
1521
1522 //
1523 // the +1 on the size is to guarantee that the main data
1524 // copy loop will always copy at least 1 byte
1525 //
1526 if ((buff_sz - buff_idx) <= (int)(2*sizeof(uint16_t) + 1)) {
1527 if (buff_idx > uio_resid(uio)) {
1528 error = ENOSPC;
1529 goto get_out;
1530 }
1531
1532 error = uiomove(buff, buff_idx, uio);
1533 if (error) {
1534 goto get_out;
1535 }
1536 buff_idx = 0;
1537 }
1538
1539 // copy out the header (type & size)
1540 memcpy(&buff[buff_idx], &type, sizeof(uint16_t));
1541 buff_idx += sizeof(uint16_t);
1542
1543 tmp = size & 0xffff;
1544 memcpy(&buff[buff_idx], &tmp, sizeof(uint16_t));
1545 buff_idx += sizeof(uint16_t);
1546
1547 // now copy the body of the data, flushing along the way
1548 // if the buffer fills up.
1549 //
1550 while(size > 0) {
1551 amt = (size < (buff_sz - buff_idx)) ? size : (buff_sz - buff_idx);
1552 memcpy(&buff[buff_idx], data, amt);
1553
1554 size -= amt;
1555 buff_idx += amt;
1556 data = (const char *)data + amt;
1557 if (size > (buff_sz - buff_idx)) {
1558 if (buff_idx > uio_resid(uio)) {
1559 error = ENOSPC;
1560 goto get_out;
91447636 1561 }
2d21ac55
A
1562 error = uiomove(buff, buff_idx, uio);
1563 if (error) {
1564 goto get_out;
91447636 1565 }
2d21ac55
A
1566 buff_idx = 0;
1567 }
1568
1569 if (amt == 0) { // just in case...
1570 break;
1571 }
1572 }
1573
1574 get_out:
1575 *_buff_idx = buff_idx;
1576
1577 return error;
1578}
1579
1580
1581static int copy_out_kfse(fs_event_watcher *watcher, kfs_event *kfse, struct uio *uio) __attribute__((noinline));
1582
1583static int
1584copy_out_kfse(fs_event_watcher *watcher, kfs_event *kfse, struct uio *uio)
1585{
1586 int error;
1587 uint16_t tmp16;
1588 int32_t type;
1589 kfs_event *cur;
1590 char evbuff[512];
1591 int evbuff_idx = 0;
1592
1593 if (kfse->type == FSE_INVALID) {
1594 panic("fsevents: copy_out_kfse: asked to copy out an invalid event (kfse %p, refcount %d fref ptr %p)\n", kfse, kfse->refcount, kfse->str);
1595 }
1596
1597 if (kfse->flags & KFSE_BEING_CREATED) {
1598 return 0;
1599 }
1600
1601 if (kfse->type == FSE_RENAME && kfse->dest == NULL) {
1602 //
1603 // This can happen if an event gets recycled but we had a
1604 // pointer to it in our event queue. The event is the
1605 // destination of a rename which we'll process separately
1606 // (that is, another kfse points to this one so it's ok
1607 // to skip this guy because we'll process it when we process
1608 // the other one)
1609 error = 0;
1610 goto get_out;
1611 }
1612
1613 if (watcher->flags & WATCHER_WANTS_EXTENDED_INFO) {
1614
1615 type = (kfse->type & 0xfff);
1616
1617 if (kfse->flags & KFSE_CONTAINS_DROPPED_EVENTS) {
1618 type |= (FSE_CONTAINS_DROPPED_EVENTS << FSE_FLAG_SHIFT);
1619 } else if (kfse->flags & KFSE_COMBINED_EVENTS) {
1620 type |= (FSE_COMBINED_EVENTS << FSE_FLAG_SHIFT);
1621 }
1622
1623 } else {
1624 type = (int32_t)kfse->type;
1625 }
1626
1627 // copy out the type of the event
1628 memcpy(evbuff, &type, sizeof(int32_t));
1629 evbuff_idx += sizeof(int32_t);
1630
1631 // copy out the pid of the person that generated the event
1632 memcpy(&evbuff[evbuff_idx], &kfse->pid, sizeof(pid_t));
1633 evbuff_idx += sizeof(pid_t);
1634
1635 cur = kfse;
1636
1637 copy_again:
1638
1639 if (cur->str == NULL || cur->str[0] == '\0') {
1640 printf("copy_out_kfse:2: empty/short path (%s)\n", cur->str);
1641 error = fill_buff(FSE_ARG_STRING, 2, "/", evbuff, &evbuff_idx, sizeof(evbuff), uio);
1642 } else {
1643 error = fill_buff(FSE_ARG_STRING, cur->len, cur->str, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1644 }
1645 if (error != 0) {
1646 goto get_out;
1647 }
1648
1649 if (cur->dev == 0 && cur->ino == 0) {
1650 // this happens when a rename event happens and the
1651 // destination of the rename did not previously exist.
1652 // it thus has no other file info so skip copying out
1653 // the stuff below since it isn't initialized
1654 goto done;
1655 }
1656
1657
1658 if (watcher->flags & WATCHER_WANTS_COMPACT_EVENTS) {
1659 int32_t finfo_size;
1660
1661 finfo_size = sizeof(dev_t) + sizeof(ino64_t) + sizeof(int32_t) + sizeof(uid_t) + sizeof(gid_t);
1662 error = fill_buff(FSE_ARG_FINFO, finfo_size, &cur->ino, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1663 if (error != 0) {
1664 goto get_out;
1665 }
1666 } else {
1667 ino_t ino;
1668
1669 error = fill_buff(FSE_ARG_DEV, sizeof(dev_t), &cur->dev, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1670 if (error != 0) {
1671 goto get_out;
1672 }
1673
1674 ino = (ino_t)cur->ino;
1675 error = fill_buff(FSE_ARG_INO, sizeof(ino_t), &ino, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1676 if (error != 0) {
1677 goto get_out;
1678 }
1679
1680 error = fill_buff(FSE_ARG_MODE, sizeof(int32_t), &cur->mode, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1681 if (error != 0) {
1682 goto get_out;
1683 }
1684
1685 error = fill_buff(FSE_ARG_UID, sizeof(uid_t), &cur->uid, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1686 if (error != 0) {
1687 goto get_out;
1688 }
91447636 1689
2d21ac55
A
1690 error = fill_buff(FSE_ARG_GID, sizeof(gid_t), &cur->gid, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1691 if (error != 0) {
1692 goto get_out;
91447636
A
1693 }
1694 }
1695
91447636 1696
2d21ac55
A
1697 if (cur->dest) {
1698 cur = cur->dest;
1699 goto copy_again;
1700 }
91447636 1701
2d21ac55
A
1702 done:
1703 // very last thing: the time stamp
1704 error = fill_buff(FSE_ARG_INT64, sizeof(uint64_t), &cur->abstime, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1705 if (error != 0) {
1706 goto get_out;
91447636
A
1707 }
1708
2d21ac55
A
1709 // check if the FSE_ARG_DONE will fit
1710 if (sizeof(uint16_t) > sizeof(evbuff) - evbuff_idx) {
1711 if (evbuff_idx > uio_resid(uio)) {
1712 error = ENOSPC;
1713 goto get_out;
1714 }
1715 error = uiomove(evbuff, evbuff_idx, uio);
1716 if (error) {
1717 goto get_out;
1718 }
1719 evbuff_idx = 0;
1720 }
91447636 1721
2d21ac55
A
1722 tmp16 = FSE_ARG_DONE;
1723 memcpy(&evbuff[evbuff_idx], &tmp16, sizeof(uint16_t));
1724 evbuff_idx += sizeof(uint16_t);
1725
1726 // flush any remaining data in the buffer (and hopefully
1727 // in most cases this is the only uiomove we'll do)
1728 if (evbuff_idx > uio_resid(uio)) {
1729 error = ENOSPC;
1730 } else {
1731 error = uiomove(evbuff, evbuff_idx, uio);
1732 }
1733
1734 get_out:
1735
1736 return error;
91447636
A
1737}
1738
1739
2d21ac55 1740
91447636
A
1741static int
1742fmod_watch(fs_event_watcher *watcher, struct uio *uio)
1743{
b0d623f7
A
1744 int error=0;
1745 user_ssize_t last_full_event_resid;
91447636 1746 kfs_event *kfse;
91447636
A
1747 uint16_t tmp16;
1748
91447636
A
1749 last_full_event_resid = uio_resid(uio);
1750
1751 // need at least 2048 bytes of space (maxpathlen + 1 event buf)
1752 if (uio_resid(uio) < 2048 || watcher == NULL) {
1753 return EINVAL;
1754 }
1755
2d21ac55
A
1756 if (watcher->flags & WATCHER_CLOSING) {
1757 return 0;
1758 }
1759
b0d623f7 1760 if (OSAddAtomic(1, &watcher->num_readers) != 0) {
0c530ab8 1761 // don't allow multiple threads to read from the fd at the same time
b0d623f7 1762 OSAddAtomic(-1, &watcher->num_readers);
0c530ab8
A
1763 return EAGAIN;
1764 }
91447636
A
1765
1766 if (watcher->rd == watcher->wr) {
1767 if (watcher->flags & WATCHER_CLOSING) {
b0d623f7 1768 OSAddAtomic(-1, &watcher->num_readers);
91447636
A
1769 return 0;
1770 }
b0d623f7 1771 OSAddAtomic(1, &watcher->blockers);
91447636
A
1772
1773 // there's nothing to do, go to sleep
1774 error = tsleep((caddr_t)watcher, PUSER|PCATCH, "fsevents_empty", 0);
1775
b0d623f7 1776 OSAddAtomic(-1, &watcher->blockers);
91447636
A
1777
1778 if (error != 0 || (watcher->flags & WATCHER_CLOSING)) {
b0d623f7 1779 OSAddAtomic(-1, &watcher->num_readers);
91447636
A
1780 return error;
1781 }
1782 }
1783
1784 // if we dropped events, return that as an event first
1785 if (watcher->flags & WATCHER_DROPPED_EVENTS) {
1786 int32_t val = FSE_EVENTS_DROPPED;
1787
1788 error = uiomove((caddr_t)&val, sizeof(int32_t), uio);
1789 if (error == 0) {
1790 val = 0; // a fake pid
1791 error = uiomove((caddr_t)&val, sizeof(int32_t), uio);
1792
1793 tmp16 = FSE_ARG_DONE; // makes it a consistent msg
1794 error = uiomove((caddr_t)&tmp16, sizeof(int16_t), uio);
2d21ac55 1795
2d21ac55 1796 last_full_event_resid = uio_resid(uio);
91447636
A
1797 }
1798
1799 if (error) {
b0d623f7 1800 OSAddAtomic(-1, &watcher->num_readers);
91447636
A
1801 return error;
1802 }
1803
1804 watcher->flags &= ~WATCHER_DROPPED_EVENTS;
1805 }
1806
2d21ac55
A
1807 while (uio_resid(uio) > 0 && watcher->rd != watcher->wr) {
1808 if (watcher->flags & WATCHER_CLOSING) {
1809 break;
91447636 1810 }
2d21ac55
A
1811
1812 //
1813 // check if the event is something of interest to us
1814 // (since it may have been recycled/reused and changed
1815 // its type or which device it is for)
1816 //
1817 lck_rw_lock_shared(&event_handling_lock);
91447636 1818
2d21ac55
A
1819 kfse = watcher->event_queue[watcher->rd];
1820 if (kfse->type == FSE_INVALID || kfse->refcount < 1) {
1821 panic("fmod_watch: someone left me a bogus kfse %p (type %d refcount %d rd %d wr %d)\n", kfse, kfse->type, kfse->refcount, watcher->rd, watcher->wr);
91447636
A
1822 }
1823
2d21ac55
A
1824 if (watcher->event_list[kfse->type] == FSE_REPORT && watcher_cares_about_dev(watcher, kfse->dev)) {
1825
6d2010ae
A
1826 if (last_event_ptr == kfse) {
1827 last_event_ptr = NULL;
1828 last_event_type = -1;
1829 last_coalesced_time = 0;
1830 }
2d21ac55
A
1831 error = copy_out_kfse(watcher, kfse, uio);
1832 if (error != 0) {
1833 // if an event won't fit or encountered an error while
1834 // we were copying it out, then backup to the last full
1835 // event and just bail out. if the error was ENOENT
1836 // then we can continue regular processing, otherwise
1837 // we should unlock things and return.
1838 uio_setresid(uio, last_full_event_resid);
1839 if (error != ENOENT) {
1840 lck_rw_unlock_shared(&event_handling_lock);
1841 error = 0;
1842 goto get_out;
1843 }
91447636
A
1844 }
1845
2d21ac55 1846 last_full_event_resid = uio_resid(uio);
91447636
A
1847 }
1848
2d21ac55 1849 lck_rw_unlock_shared(&event_handling_lock);
91447636 1850
91447636 1851 watcher->rd = (watcher->rd + 1) % watcher->eventq_size;
2d21ac55 1852 OSSynchronizeIO();
91447636 1853
2d21ac55
A
1854 if (kfse->type == FSE_INVALID || kfse->refcount < 1) {
1855 panic("fmod_watch:2: my kfse became bogus! kfse %p (type %d refcount %d rd %d wr %d)\n", kfse, kfse->type, kfse->refcount, watcher->rd, watcher->wr);
91447636 1856 }
2d21ac55
A
1857
1858 release_event_ref(kfse);
91447636
A
1859 }
1860
1861 get_out:
b0d623f7 1862 OSAddAtomic(-1, &watcher->num_readers);
2d21ac55 1863
91447636
A
1864 return error;
1865}
1866
1867
1868// release any references we might have on vnodes which are
1869// the mount point passed to us (so that it can be cleanly
1870// unmounted).
1871//
1872// since we don't want to lose the events we'll convert the
2d21ac55 1873// vnode refs to full paths.
91447636
A
1874//
1875void
2d21ac55 1876fsevent_unmount(__unused struct mount *mp)
91447636 1877{
2d21ac55
A
1878 // we no longer maintain pointers to vnodes so
1879 // there is nothing to do...
91447636
A
1880}
1881
1882
1883//
1884// /dev/fsevents device code
1885//
1886static int fsevents_installed = 0;
91447636
A
1887
1888typedef struct fsevent_handle {
0c530ab8
A
1889 UInt32 flags;
1890 SInt32 active;
91447636 1891 fs_event_watcher *watcher;
b0d623f7 1892 struct klist knotes;
91447636
A
1893 struct selinfo si;
1894} fsevent_handle;
1895
0c530ab8 1896#define FSEH_CLOSING 0x0001
91447636
A
1897
1898static int
1899fseventsf_read(struct fileproc *fp, struct uio *uio,
2d21ac55 1900 __unused int flags, __unused vfs_context_t ctx)
91447636
A
1901{
1902 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
1903 int error;
1904
1905 error = fmod_watch(fseh->watcher, uio);
1906
1907 return error;
1908}
1909
2d21ac55 1910
91447636
A
1911static int
1912fseventsf_write(__unused struct fileproc *fp, __unused struct uio *uio,
2d21ac55 1913 __unused int flags, __unused vfs_context_t ctx)
91447636
A
1914{
1915 return EIO;
1916}
1917
b0d623f7 1918#pragma pack(push, 4)
2d21ac55
A
1919typedef struct ext_fsevent_dev_filter_args {
1920 uint32_t num_devices;
1921 user_addr_t devices;
1922} ext_fsevent_dev_filter_args;
b0d623f7 1923#pragma pack(pop)
2d21ac55
A
1924
1925typedef struct old_fsevent_dev_filter_args {
1926 uint32_t num_devices;
1927 int32_t devices;
1928} old_fsevent_dev_filter_args;
1929
1930#define OLD_FSEVENTS_DEVICE_FILTER _IOW('s', 100, old_fsevent_dev_filter_args)
1931#define NEW_FSEVENTS_DEVICE_FILTER _IOW('s', 100, ext_fsevent_dev_filter_args)
1932
b0d623f7
A
1933#if __LP64__
1934/* need this in spite of the padding due to alignment of devices */
1935typedef struct fsevent_dev_filter_args32 {
1936 uint32_t num_devices;
1937 uint32_t devices;
1938 int32_t pad1;
1939} fsevent_dev_filter_args32;
1940#endif
91447636
A
1941
1942static int
2d21ac55 1943fseventsf_ioctl(struct fileproc *fp, u_long cmd, caddr_t data, vfs_context_t ctx)
91447636
A
1944{
1945 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
1946 int ret = 0;
2d21ac55
A
1947 ext_fsevent_dev_filter_args *devfilt_args, _devfilt_args;
1948
1949 if (proc_is64bit(vfs_context_proc(ctx))) {
1950 devfilt_args = (ext_fsevent_dev_filter_args *)data;
1951 } else if (cmd == OLD_FSEVENTS_DEVICE_FILTER) {
1952 old_fsevent_dev_filter_args *udev_filt_args = (old_fsevent_dev_filter_args *)data;
1953
1954 devfilt_args = &_devfilt_args;
1955 memset(devfilt_args, 0, sizeof(ext_fsevent_dev_filter_args));
1956
1957 devfilt_args->num_devices = udev_filt_args->num_devices;
1958 devfilt_args->devices = CAST_USER_ADDR_T(udev_filt_args->devices);
1959 } else {
b0d623f7
A
1960#if __LP64__
1961 fsevent_dev_filter_args32 *udev_filt_args = (fsevent_dev_filter_args32 *)data;
1962#else
2d21ac55 1963 fsevent_dev_filter_args *udev_filt_args = (fsevent_dev_filter_args *)data;
b0d623f7 1964#endif
2d21ac55
A
1965
1966 devfilt_args = &_devfilt_args;
1967 memset(devfilt_args, 0, sizeof(ext_fsevent_dev_filter_args));
1968
1969 devfilt_args->num_devices = udev_filt_args->num_devices;
1970 devfilt_args->devices = CAST_USER_ADDR_T(udev_filt_args->devices);
1971 }
91447636 1972
0c530ab8
A
1973 OSAddAtomic(1, &fseh->active);
1974 if (fseh->flags & FSEH_CLOSING) {
1975 OSAddAtomic(-1, &fseh->active);
1976 return 0;
1977 }
1978
91447636
A
1979 switch (cmd) {
1980 case FIONBIO:
1981 case FIOASYNC:
0c530ab8 1982 break;
91447636 1983
2d21ac55
A
1984 case FSEVENTS_WANT_COMPACT_EVENTS: {
1985 fseh->watcher->flags |= WATCHER_WANTS_COMPACT_EVENTS;
1986 break;
1987 }
1988
1989 case FSEVENTS_WANT_EXTENDED_INFO: {
1990 fseh->watcher->flags |= WATCHER_WANTS_EXTENDED_INFO;
1991 break;
1992 }
1993
b0d623f7
A
1994 case FSEVENTS_GET_CURRENT_ID: {
1995 *(uint64_t *)data = fseh->watcher->max_event_id;
1996 ret = 0;
1997 break;
1998 }
1999
2d21ac55
A
2000 case OLD_FSEVENTS_DEVICE_FILTER:
2001 case NEW_FSEVENTS_DEVICE_FILTER: {
91447636 2002 int new_num_devices;
b0d623f7 2003 dev_t *devices_not_to_watch, *tmp=NULL;
91447636
A
2004
2005 if (devfilt_args->num_devices > 256) {
2006 ret = EINVAL;
2007 break;
2008 }
2009
2010 new_num_devices = devfilt_args->num_devices;
2011 if (new_num_devices == 0) {
b0d623f7 2012 tmp = fseh->watcher->devices_not_to_watch;
91447636 2013
2d21ac55 2014 lock_watch_table();
b0d623f7 2015 fseh->watcher->devices_not_to_watch = NULL;
91447636 2016 fseh->watcher->num_devices = new_num_devices;
2d21ac55 2017 unlock_watch_table();
91447636
A
2018
2019 if (tmp) {
2020 FREE(tmp, M_TEMP);
2021 }
2022 break;
2023 }
2024
b0d623f7 2025 MALLOC(devices_not_to_watch, dev_t *,
91447636
A
2026 new_num_devices * sizeof(dev_t),
2027 M_TEMP, M_WAITOK);
b0d623f7 2028 if (devices_not_to_watch == NULL) {
91447636
A
2029 ret = ENOMEM;
2030 break;
2031 }
2032
2d21ac55 2033 ret = copyin(devfilt_args->devices,
b0d623f7 2034 (void *)devices_not_to_watch,
91447636
A
2035 new_num_devices * sizeof(dev_t));
2036 if (ret) {
b0d623f7 2037 FREE(devices_not_to_watch, M_TEMP);
91447636
A
2038 break;
2039 }
2040
2d21ac55 2041 lock_watch_table();
91447636 2042 fseh->watcher->num_devices = new_num_devices;
b0d623f7
A
2043 tmp = fseh->watcher->devices_not_to_watch;
2044 fseh->watcher->devices_not_to_watch = devices_not_to_watch;
2d21ac55 2045 unlock_watch_table();
91447636
A
2046
2047 if (tmp) {
2048 FREE(tmp, M_TEMP);
2049 }
2050
2051 break;
2052 }
2053
2054 default:
2055 ret = EINVAL;
2056 break;
2057 }
2058
0c530ab8 2059 OSAddAtomic(-1, &fseh->active);
91447636
A
2060 return (ret);
2061}
2062
2063
2064static int
2d21ac55 2065fseventsf_select(struct fileproc *fp, int which, __unused void *wql, vfs_context_t ctx)
91447636
A
2066{
2067 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
2068 int ready = 0;
2069
2070 if ((which != FREAD) || (fseh->watcher->flags & WATCHER_CLOSING)) {
2071 return 0;
2072 }
2073
2074
2075 // if there's nothing in the queue, we're not ready
2d21ac55 2076 if (fseh->watcher->rd != fseh->watcher->wr) {
91447636
A
2077 ready = 1;
2078 }
2079
2080 if (!ready) {
2d21ac55 2081 selrecord(vfs_context_proc(ctx), &fseh->si, wql);
91447636
A
2082 }
2083
2084 return ready;
2085}
2086
2087
2d21ac55 2088#if NOTUSED
91447636 2089static int
2d21ac55 2090fseventsf_stat(__unused struct fileproc *fp, __unused struct stat *sb, __unused vfs_context_t ctx)
91447636
A
2091{
2092 return ENOTSUP;
2093}
2d21ac55 2094#endif
91447636
A
2095
2096static int
2d21ac55 2097fseventsf_close(struct fileglob *fg, __unused vfs_context_t ctx)
91447636
A
2098{
2099 fsevent_handle *fseh = (struct fsevent_handle *)fg->fg_data;
0c530ab8 2100 fs_event_watcher *watcher;
2d21ac55 2101
0c530ab8
A
2102 OSBitOrAtomic(FSEH_CLOSING, &fseh->flags);
2103 while (OSAddAtomic(0, &fseh->active) > 0) {
2104 tsleep((caddr_t)fseh->watcher, PRIBIO, "fsevents-close", 1);
2105 }
91447636 2106
0c530ab8 2107 watcher = fseh->watcher;
0c530ab8 2108 fg->fg_data = NULL;
2d21ac55 2109 fseh->watcher = NULL;
0c530ab8
A
2110
2111 remove_watcher(watcher);
91447636
A
2112 FREE(fseh, M_TEMP);
2113
2114 return 0;
2115}
2116
b0d623f7
A
2117static void
2118filt_fsevent_detach(struct knote *kn)
2119{
2120 fsevent_handle *fseh = (struct fsevent_handle *)kn->kn_hook;
2121
2122 lock_watch_table();
2123
2124 KNOTE_DETACH(&fseh->knotes, kn);
2125
2126 unlock_watch_table();
2127}
2128
2129/*
2130 * Determine whether this knote should be active
2131 *
2132 * This is kind of subtle.
2133 * --First, notice if the vnode has been revoked: in so, override hint
2134 * --EVFILT_READ knotes are checked no matter what the hint is
2135 * --Other knotes activate based on hint.
2136 * --If hint is revoke, set special flags and activate
2137 */
2138static int
2139filt_fsevent(struct knote *kn, long hint)
2140{
2141 fsevent_handle *fseh = (struct fsevent_handle *)kn->kn_hook;
2142 int activate = 0;
2143 int32_t rd, wr, amt;
2144
2145 if (NOTE_REVOKE == hint) {
2146 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
2147 activate = 1;
2148 }
2149
2150 rd = fseh->watcher->rd;
2151 wr = fseh->watcher->wr;
2152 if (rd <= wr) {
2153 amt = wr - rd;
2154 } else {
2155 amt = fseh->watcher->eventq_size - (rd - wr);
2156 }
2157
2158 switch(kn->kn_filter) {
2159 case EVFILT_READ:
2160 kn->kn_data = amt;
2161
2162 if (kn->kn_data != 0) {
2163 activate = 1;
2164 }
2165 break;
2166 case EVFILT_VNODE:
2167 /* Check events this note matches against the hint */
2168 if (kn->kn_sfflags & hint) {
2169 kn->kn_fflags |= hint; /* Set which event occurred */
2170 }
2171 if (kn->kn_fflags != 0) {
2172 activate = 1;
2173 }
2174 break;
2175 default: {
2176 // nothing to do...
2177 break;
2178 }
2179 }
2180
2181 return (activate);
2182}
2183
2184
2185struct filterops fsevent_filtops = {
2186 .f_isfd = 1,
2187 .f_attach = NULL,
2188 .f_detach = filt_fsevent_detach,
2189 .f_event = filt_fsevent
2190};
2191
2d21ac55
A
2192static int
2193fseventsf_kqfilter(__unused struct fileproc *fp, __unused struct knote *kn, __unused vfs_context_t ctx)
91447636 2194{
b0d623f7
A
2195 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
2196
2197 kn->kn_hook = (void*)fseh;
2198 kn->kn_hookid = 1;
2199 kn->kn_fop = &fsevent_filtops;
2200
2201 lock_watch_table();
2202
2203 KNOTE_ATTACH(&fseh->knotes, kn);
2204
2205 unlock_watch_table();
91447636
A
2206 return 0;
2207}
2208
2209
2210static int
2d21ac55 2211fseventsf_drain(struct fileproc *fp, __unused vfs_context_t ctx)
91447636
A
2212{
2213 int counter = 0;
2214 fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
2215
2216 fseh->watcher->flags |= WATCHER_CLOSING;
2217
2218 // if there are people still waiting, sleep for 10ms to
2219 // let them clean up and get out of there. however we
2220 // also don't want to get stuck forever so if they don't
2221 // exit after 5 seconds we're tearing things down anyway.
2222 while(fseh->watcher->blockers && counter++ < 500) {
2223 // issue wakeup in case anyone is blocked waiting for an event
2224 // do this each time we wakeup in case the blocker missed
2225 // the wakeup due to the unprotected test of WATCHER_CLOSING
2226 // and decision to tsleep in fmod_watch... this bit of
2227 // latency is a decent tradeoff against not having to
2228 // take and drop a lock in fmod_watch
2d21ac55 2229 fsevents_wakeup(fseh->watcher);
91447636
A
2230
2231 tsleep((caddr_t)fseh->watcher, PRIBIO, "watcher-close", 1);
2232 }
2233
2234 return 0;
2235}
2236
2237
2238static int
2d21ac55 2239fseventsopen(__unused dev_t dev, __unused int flag, __unused int mode, __unused struct proc *p)
91447636
A
2240{
2241 if (!is_suser()) {
2242 return EPERM;
2243 }
2244
2245 return 0;
2246}
2247
2248static int
2d21ac55 2249fseventsclose(__unused dev_t dev, __unused int flag, __unused int mode, __unused struct proc *p)
91447636
A
2250{
2251 return 0;
2252}
2253
2254static int
2d21ac55 2255fseventsread(__unused dev_t dev, __unused struct uio *uio, __unused int ioflag)
91447636
A
2256{
2257 return EIO;
2258}
2259
2d21ac55 2260
91447636 2261static int
2d21ac55 2262parse_buffer_and_add_events(const char *buffer, int bufsize, vfs_context_t ctx, long *remainder)
91447636 2263{
2d21ac55
A
2264 const fse_info *finfo, *dest_finfo;
2265 const char *path, *ptr, *dest_path, *event_start=buffer;
2266 int path_len, type, dest_path_len, err = 0;
2267
2268
2269 ptr = buffer;
2270 while ((ptr+sizeof(int)+sizeof(fse_info)+1) < buffer+bufsize) {
2271 type = *(const int *)ptr;
2272 if (type < 0 || type >= FSE_MAX_EVENTS) {
2273 err = EINVAL;
2274 break;
2275 }
2276
2277 ptr += sizeof(int);
2278
2279 finfo = (const fse_info *)ptr;
2280 ptr += sizeof(fse_info);
2281
2282 path = ptr;
2283 while(ptr < buffer+bufsize && *ptr != '\0') {
2284 ptr++;
2285 }
2286
2287 if (ptr >= buffer+bufsize) {
2288 break;
2289 }
2290
2291 ptr++; // advance over the trailing '\0'
2292
2293 path_len = ptr - path;
2294
2295 if (type != FSE_RENAME && type != FSE_EXCHANGE) {
2296 event_start = ptr; // record where the next event starts
2297
2298 err = add_fsevent(type, ctx, FSE_ARG_STRING, path_len, path, FSE_ARG_FINFO, finfo, FSE_ARG_DONE);
2299 if (err) {
2300 break;
2301 }
2302 continue;
2303 }
2304
2305 //
2306 // if we're here we have to slurp up the destination finfo
2307 // and path so that we can pass them to the add_fsevent()
2308 // call. basically it's a copy of the above code.
2309 //
2310 dest_finfo = (const fse_info *)ptr;
2311 ptr += sizeof(fse_info);
2312
2313 dest_path = ptr;
2314 while(ptr < buffer+bufsize && *ptr != '\0') {
2315 ptr++;
2316 }
2317
2318 if (ptr >= buffer+bufsize) {
2319 break;
2320 }
2321
2322 ptr++; // advance over the trailing '\0'
2323 event_start = ptr; // record where the next event starts
2324
2325 dest_path_len = ptr - dest_path;
b0d623f7
A
2326 //
2327 // If the destination inode number is non-zero, generate a rename
2328 // with both source and destination FSE_ARG_FINFO. Otherwise generate
2329 // a rename with only one FSE_ARG_FINFO. If you need to inject an
2330 // exchange with an inode of zero, just make that inode (and its path)
2331 // come in as the first one, not the second.
2332 //
2333 if (dest_finfo->ino) {
2334 err = add_fsevent(type, ctx,
2335 FSE_ARG_STRING, path_len, path, FSE_ARG_FINFO, finfo,
2336 FSE_ARG_STRING, dest_path_len, dest_path, FSE_ARG_FINFO, dest_finfo,
2337 FSE_ARG_DONE);
2338 } else {
2339 err = add_fsevent(type, ctx,
2340 FSE_ARG_STRING, path_len, path, FSE_ARG_FINFO, finfo,
2341 FSE_ARG_STRING, dest_path_len, dest_path,
2342 FSE_ARG_DONE);
2343 }
2344
2d21ac55
A
2345 if (err) {
2346 break;
2347 }
2348
2349 }
2350
2351 // if the last event wasn't complete, set the remainder
2352 // to be the last event start boundary.
2353 //
2354 *remainder = (long)((buffer+bufsize) - event_start);
2355
2356 return err;
2357}
2358
2359
2360//
2361// Note: this buffer size can not ever be less than
2362// 2*MAXPATHLEN + 2*sizeof(fse_info) + sizeof(int)
2363// because that is the max size for a single event.
2364// I made it 4k to be a "nice" size. making it
2365// smaller is not a good idea.
2366//
2367#define WRITE_BUFFER_SIZE 4096
2368char *write_buffer=NULL;
2369
2370static int
2371fseventswrite(__unused dev_t dev, struct uio *uio, __unused int ioflag)
2372{
2373 int error=0, count;
2374 vfs_context_t ctx = vfs_context_current();
2375 long offset=0, remainder;
2376
2377 lck_mtx_lock(&event_writer_lock);
2378
2379 if (write_buffer == NULL) {
2380 if (kmem_alloc(kernel_map, (vm_offset_t *)&write_buffer, WRITE_BUFFER_SIZE)) {
2381 lck_mtx_unlock(&event_writer_lock);
2382 return ENOMEM;
2383 }
2384 }
2385
2386 //
2387 // this loop copies in and processes the events written.
2388 // it takes care to copy in reasonable size chunks and
2389 // process them. if there is an event that spans a chunk
2390 // boundary we're careful to copy those bytes down to the
2391 // beginning of the buffer and read the next chunk in just
2392 // after it.
2393 //
2394 while(uio_resid(uio)) {
2395 if (uio_resid(uio) > (WRITE_BUFFER_SIZE-offset)) {
2396 count = WRITE_BUFFER_SIZE - offset;
2397 } else {
2398 count = uio_resid(uio);
2399 }
2400
2401 error = uiomove(write_buffer+offset, count, uio);
2402 if (error) {
2403 break;
2404 }
2405
2406 // printf("fsevents: write: copied in %d bytes (offset: %ld)\n", count, offset);
2407 error = parse_buffer_and_add_events(write_buffer, offset+count, ctx, &remainder);
2408 if (error) {
2409 break;
2410 }
2411
2412 //
2413 // if there's any remainder, copy it down to the beginning
2414 // of the buffer so that it will get processed the next time
2415 // through the loop. note that the remainder always starts
2416 // at an event boundary.
2417 //
2418 if (remainder != 0) {
2419 // printf("fsevents: write: an event spanned a %d byte boundary. remainder: %ld\n",
2420 // WRITE_BUFFER_SIZE, remainder);
2421 memmove(write_buffer, (write_buffer+count+offset) - remainder, remainder);
2422 offset = remainder;
2423 } else {
2424 offset = 0;
2425 }
2426 }
2427
2428 lck_mtx_unlock(&event_writer_lock);
2429
2430 return error;
91447636
A
2431}
2432
2433
2434static struct fileops fsevents_fops = {
2435 fseventsf_read,
2436 fseventsf_write,
2437 fseventsf_ioctl,
2438 fseventsf_select,
2439 fseventsf_close,
2440 fseventsf_kqfilter,
2441 fseventsf_drain
2442};
2443
2d21ac55
A
2444typedef struct ext_fsevent_clone_args {
2445 user_addr_t event_list;
2446 int32_t num_events;
2447 int32_t event_queue_depth;
2448 user_addr_t fd;
2449} ext_fsevent_clone_args;
2450
2451typedef struct old_fsevent_clone_args {
b0d623f7 2452 uint32_t event_list;
2d21ac55
A
2453 int32_t num_events;
2454 int32_t event_queue_depth;
b0d623f7 2455 uint32_t fd;
2d21ac55 2456} old_fsevent_clone_args;
91447636 2457
2d21ac55 2458#define OLD_FSEVENTS_CLONE _IOW('s', 1, old_fsevent_clone_args)
91447636
A
2459
2460static int
2d21ac55 2461fseventsioctl(__unused dev_t dev, u_long cmd, caddr_t data, __unused int flag, struct proc *p)
91447636
A
2462{
2463 struct fileproc *f;
2464 int fd, error;
2465 fsevent_handle *fseh = NULL;
2d21ac55 2466 ext_fsevent_clone_args *fse_clone_args, _fse_clone;
91447636 2467 int8_t *event_list;
2d21ac55 2468 int is64bit = proc_is64bit(p);
91447636
A
2469
2470 switch (cmd) {
2d21ac55
A
2471 case OLD_FSEVENTS_CLONE: {
2472 old_fsevent_clone_args *old_args = (old_fsevent_clone_args *)data;
2473
2474 fse_clone_args = &_fse_clone;
2475 memset(fse_clone_args, 0, sizeof(ext_fsevent_clone_args));
2476
2477 fse_clone_args->event_list = CAST_USER_ADDR_T(old_args->event_list);
2478 fse_clone_args->num_events = old_args->num_events;
2479 fse_clone_args->event_queue_depth = old_args->event_queue_depth;
2480 fse_clone_args->fd = CAST_USER_ADDR_T(old_args->fd);
2481 goto handle_clone;
2482 }
2483
91447636 2484 case FSEVENTS_CLONE:
2d21ac55
A
2485 if (is64bit) {
2486 fse_clone_args = (ext_fsevent_clone_args *)data;
2487 } else {
2488 fsevent_clone_args *ufse_clone = (fsevent_clone_args *)data;
2489
2490 fse_clone_args = &_fse_clone;
2491 memset(fse_clone_args, 0, sizeof(ext_fsevent_clone_args));
2492
2493 fse_clone_args->event_list = CAST_USER_ADDR_T(ufse_clone->event_list);
2494 fse_clone_args->num_events = ufse_clone->num_events;
2495 fse_clone_args->event_queue_depth = ufse_clone->event_queue_depth;
2496 fse_clone_args->fd = CAST_USER_ADDR_T(ufse_clone->fd);
2497 }
2498
2499 handle_clone:
91447636
A
2500 if (fse_clone_args->num_events < 0 || fse_clone_args->num_events > 4096) {
2501 return EINVAL;
2502 }
2503
2504 MALLOC(fseh, fsevent_handle *, sizeof(fsevent_handle),
2505 M_TEMP, M_WAITOK);
2d21ac55
A
2506 if (fseh == NULL) {
2507 return ENOMEM;
2508 }
91447636 2509 memset(fseh, 0, sizeof(fsevent_handle));
b0d623f7
A
2510
2511 klist_init(&fseh->knotes);
91447636
A
2512
2513 MALLOC(event_list, int8_t *,
2514 fse_clone_args->num_events * sizeof(int8_t),
2515 M_TEMP, M_WAITOK);
2d21ac55
A
2516 if (event_list == NULL) {
2517 FREE(fseh, M_TEMP);
2518 return ENOMEM;
2519 }
91447636 2520
2d21ac55 2521 error = copyin(fse_clone_args->event_list,
91447636
A
2522 (void *)event_list,
2523 fse_clone_args->num_events * sizeof(int8_t));
2524 if (error) {
2525 FREE(event_list, M_TEMP);
2526 FREE(fseh, M_TEMP);
2527 return error;
2528 }
2529
2530 error = add_watcher(event_list,
2531 fse_clone_args->num_events,
2532 fse_clone_args->event_queue_depth,
2533 &fseh->watcher);
2534 if (error) {
2535 FREE(event_list, M_TEMP);
2536 FREE(fseh, M_TEMP);
2537 return error;
2538 }
2539
2d21ac55
A
2540 // connect up the watcher with this fsevent_handle
2541 fseh->watcher->fseh = fseh;
2542
2543 error = falloc(p, &f, &fd, vfs_context_current());
91447636
A
2544 if (error) {
2545 FREE(event_list, M_TEMP);
2546 FREE(fseh, M_TEMP);
2547 return (error);
2548 }
2549 proc_fdlock(p);
2550 f->f_fglob->fg_flag = FREAD | FWRITE;
2551 f->f_fglob->fg_type = DTYPE_FSEVENTS;
2552 f->f_fglob->fg_ops = &fsevents_fops;
2553 f->f_fglob->fg_data = (caddr_t) fseh;
2d21ac55
A
2554 proc_fdunlock(p);
2555 error = copyout((void *)&fd, fse_clone_args->fd, sizeof(int32_t));
2556 if (error != 0) {
2557 fp_free(p, fd, f);
2558 } else {
91447636 2559 proc_fdlock(p);
6601e61a 2560 procfdtbl_releasefd(p, fd, NULL);
91447636
A
2561 fp_drop(p, fd, f, 1);
2562 proc_fdunlock(p);
2d21ac55 2563 }
91447636
A
2564 break;
2565
2566 default:
2567 error = EINVAL;
2568 break;
2569 }
2570
2571 return error;
2572}
2573
91447636 2574static void
2d21ac55 2575fsevents_wakeup(fs_event_watcher *watcher)
91447636 2576{
2d21ac55 2577 selwakeup(&watcher->fseh->si);
b0d623f7
A
2578 KNOTE(&watcher->fseh->knotes, NOTE_WRITE|NOTE_NONE);
2579 wakeup((caddr_t)watcher);
91447636
A
2580}
2581
2582
2583/*
2584 * A struct describing which functions will get invoked for certain
2585 * actions.
2586 */
2587static struct cdevsw fsevents_cdevsw =
2588{
2589 fseventsopen, /* open */
2590 fseventsclose, /* close */
2591 fseventsread, /* read */
2592 fseventswrite, /* write */
2593 fseventsioctl, /* ioctl */
2d21ac55
A
2594 (stop_fcn_t *)&nulldev, /* stop */
2595 (reset_fcn_t *)&nulldev, /* reset */
91447636
A
2596 NULL, /* tty's */
2597 eno_select, /* select */
2598 eno_mmap, /* mmap */
2599 eno_strat, /* strategy */
2600 eno_getc, /* getc */
2601 eno_putc, /* putc */
2602 0 /* type */
2603};
2604
2605
2606/*
2607 * Called to initialize our device,
2608 * and to register ourselves with devfs
2609 */
2610
2611void
2612fsevents_init(void)
2613{
2614 int ret;
2615
2616 if (fsevents_installed) {
2617 return;
2618 }
2619
2620 fsevents_installed = 1;
2621
91447636
A
2622 ret = cdevsw_add(-1, &fsevents_cdevsw);
2623 if (ret < 0) {
2624 fsevents_installed = 0;
2625 return;
2626 }
2627
2628 devfs_make_node(makedev (ret, 0), DEVFS_CHAR,
2629 UID_ROOT, GID_WHEEL, 0644, "fsevents", 0);
2630
2631 fsevents_internal_init();
2632}
2633
2634
91447636
A
2635char *
2636get_pathbuff(void)
2637{
b0d623f7 2638 char *path;
91447636 2639
b0d623f7
A
2640 MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
2641 return path;
91447636
A
2642}
2643
2644void
2645release_pathbuff(char *path)
2646{
91447636
A
2647
2648 if (path == NULL) {
2649 return;
2650 }
91447636
A
2651 FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
2652}
2653
2654int
cf7d32b8 2655get_fse_info(struct vnode *vp, fse_info *fse, __unused vfs_context_t ctx)
91447636
A
2656{
2657 struct vnode_attr va;
2658
2659 VATTR_INIT(&va);
2660 VATTR_WANTED(&va, va_fsid);
2661 VATTR_WANTED(&va, va_fileid);
2662 VATTR_WANTED(&va, va_mode);
2663 VATTR_WANTED(&va, va_uid);
2664 VATTR_WANTED(&va, va_gid);
2d21ac55
A
2665 if (vp->v_flag & VISHARDLINK) {
2666 if (vp->v_type == VDIR) {
2667 VATTR_WANTED(&va, va_dirlinkcount);
2668 } else {
2669 VATTR_WANTED(&va, va_nlink);
2670 }
2671 }
2672
cf7d32b8 2673 if (vnode_getattr(vp, &va, vfs_context_kernel()) != 0) {
2d21ac55 2674 memset(fse, 0, sizeof(fse_info));
91447636
A
2675 return -1;
2676 }
6d2010ae
A
2677
2678 return vnode_get_fse_info_from_vap(vp, fse, &va);
2679}
2680
2681int
2682vnode_get_fse_info_from_vap(vnode_t vp, fse_info *fse, struct vnode_attr *vap)
2683{
2684 fse->ino = (ino64_t)vap->va_fileid;
2685 fse->dev = (dev_t)vap->va_fsid;
2686 fse->mode = (int32_t)vnode_vttoif(vnode_vtype(vp)) | vap->va_mode;
2687 fse->uid = (uid_t)vap->va_uid;
2688 fse->gid = (gid_t)vap->va_gid;
2d21ac55
A
2689 if (vp->v_flag & VISHARDLINK) {
2690 fse->mode |= FSE_MODE_HLINK;
2691 if (vp->v_type == VDIR) {
6d2010ae 2692 fse->nlink = (uint64_t)vap->va_dirlinkcount;
2d21ac55 2693 } else {
6d2010ae 2694 fse->nlink = (uint64_t)vap->va_nlink;
2d21ac55
A
2695 }
2696 }
2697
91447636
A
2698 return 0;
2699}
2d21ac55 2700
b0d623f7
A
2701void
2702create_fsevent_from_kevent(vnode_t vp, uint32_t kevents, struct vnode_attr *vap)
2703{
2704 int fsevent_type=FSE_CONTENT_MODIFIED, len; // the default is the most pessimistic
2705 char pathbuf[MAXPATHLEN];
2706 fse_info fse;
2707
2708
2709 if (kevents & VNODE_EVENT_DELETE) {
2710 fsevent_type = FSE_DELETE;
2711 } else if (kevents & (VNODE_EVENT_EXTEND|VNODE_EVENT_WRITE)) {
2712 fsevent_type = FSE_CONTENT_MODIFIED;
2713 } else if (kevents & VNODE_EVENT_LINK) {
2714 fsevent_type = FSE_CREATE_FILE;
2715 } else if (kevents & VNODE_EVENT_RENAME) {
2716 fsevent_type = FSE_CREATE_FILE; // XXXdbg - should use FSE_RENAME but we don't have the destination info;
2717 } else if (kevents & (VNODE_EVENT_FILE_CREATED|VNODE_EVENT_FILE_REMOVED|VNODE_EVENT_DIR_CREATED|VNODE_EVENT_DIR_REMOVED)) {
2718 fsevent_type = FSE_STAT_CHANGED; // XXXdbg - because vp is a dir and the thing created/removed lived inside it
2719 } else { // a catch all for VNODE_EVENT_PERMS, VNODE_EVENT_ATTRIB and anything else
2720 fsevent_type = FSE_STAT_CHANGED;
2721 }
2722
2723 // printf("convert_kevent: kevents 0x%x fsevent type 0x%x (for %s)\n", kevents, fsevent_type, vp->v_name ? vp->v_name : "(no-name)");
2724
2725 fse.dev = vap->va_fsid;
2726 fse.ino = vap->va_fileid;
2727 fse.mode = vnode_vttoif(vnode_vtype(vp)) | (uint32_t)vap->va_mode;
2728 if (vp->v_flag & VISHARDLINK) {
2729 fse.mode |= FSE_MODE_HLINK;
2730 if (vp->v_type == VDIR) {
2731 fse.nlink = vap->va_dirlinkcount;
2732 } else {
2733 fse.nlink = vap->va_nlink;
2734 }
2735 }
2736
2737 if (vp->v_type == VDIR) {
2738 fse.mode |= FSE_REMOTE_DIR_EVENT;
2739 }
2740
2741
2742 fse.uid = vap->va_uid;
2743 fse.gid = vap->va_gid;
2744
2745 len = sizeof(pathbuf);
2746 if (vn_getpath(vp, pathbuf, &len) == 0) {
2747 add_fsevent(fsevent_type, vfs_context_current(), FSE_ARG_STRING, len, pathbuf, FSE_ARG_FINFO, &fse, FSE_ARG_DONE);
2748 }
2749 return;
2750}
2751
2d21ac55
A
2752#else /* CONFIG_FSE */
2753/*
2754 * The get_pathbuff and release_pathbuff routines are used in places not
2755 * related to fsevents, and it's a handy abstraction, so define trivial
2756 * versions that don't cache a pool of buffers. This way, we don't have
2757 * to conditionalize the callers, and they still get the advantage of the
2758 * pool of buffers if CONFIG_FSE is turned on.
2759 */
2760char *
2761get_pathbuff(void)
2762{
2763 char *path;
2764 MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
2765 return path;
2766}
2767
2768void
2769release_pathbuff(char *path)
2770{
2771 FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
2772}
2773#endif /* CONFIG_FSE */