]> git.saurik.com Git - apple/xnu.git/blame - bsd/vfs/vfs_fsevents.c
xnu-7195.50.7.100.1.tar.gz
[apple/xnu.git] / bsd / vfs / vfs_fsevents.c
CommitLineData
91447636 1/*
f427ee49 2 * Copyright (c) 2004-2020 Apple Inc. All rights reserved.
5d5c5d0d 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
91447636
A
27 */
28#include <stdarg.h>
29#include <sys/param.h>
30#include <sys/systm.h>
b0d623f7 31#include <sys/event.h> // for kqueue related stuff
2d21ac55
A
32#include <sys/fsevents.h>
33
34#if CONFIG_FSE
91447636
A
35#include <sys/namei.h>
36#include <sys/filedesc.h>
37#include <sys/kernel.h>
38#include <sys/file_internal.h>
39#include <sys/stat.h>
40#include <sys/vnode_internal.h>
41#include <sys/mount_internal.h>
42#include <sys/proc_internal.h>
43#include <sys/kauth.h>
44#include <sys/uio.h>
f427ee49 45#include <kern/kalloc.h>
91447636
A
46#include <sys/dirent.h>
47#include <sys/attr.h>
48#include <sys/sysctl.h>
49#include <sys/ubc.h>
50#include <machine/cons.h>
51#include <miscfs/specfs/specdev.h>
52#include <miscfs/devfs/devfs.h>
53#include <sys/filio.h>
91447636
A
54#include <kern/locks.h>
55#include <libkern/OSAtomic.h>
2d21ac55
A
56#include <kern/zalloc.h>
57#include <mach/mach_time.h>
58#include <kern/thread_call.h>
59#include <kern/clock.h>
91447636 60
b0d623f7 61#include <security/audit/audit.h>
91447636
A
62#include <bsm/audit_kevents.h>
63
316670eb 64#include <pexpert/pexpert.h>
5ba3f43e 65#include <libkern/section_keywords.h>
91447636 66
91447636 67typedef struct kfs_event {
0a7de745
A
68 LIST_ENTRY(kfs_event) kevent_list;
69 int16_t type; // type code of this event
70 u_int16_t flags, // per-event flags
71 len; // the length of the path in "str"
72 int32_t refcount; // number of clients referencing this
73 pid_t pid; // pid of the process that did the op
74
75 uint64_t abstime; // when this event happened (mach_absolute_time())
76 ino64_t ino;
77 dev_t dev;
78 int32_t mode;
79 uid_t uid;
80 gid_t gid;
81
82 const char *str;
83
84 struct kfs_event *dest; // if this is a two-file op
91447636
A
85} kfs_event;
86
2d21ac55
A
87// flags for the flags field
88#define KFSE_COMBINED_EVENTS 0x0001
89#define KFSE_CONTAINS_DROPPED_EVENTS 0x0002
90#define KFSE_RECYCLED_EVENT 0x0004
91#define KFSE_BEING_CREATED 0x0008
92
93LIST_HEAD(kfse_list, kfs_event) kfse_list_head = LIST_HEAD_INITIALIZER(x);
94int num_events_outstanding = 0;
95int num_pending_rename = 0;
96
97
98struct fsevent_handle;
91447636
A
99
100typedef struct fs_event_watcher {
0a7de745
A
101 int8_t *event_list; // the events we're interested in
102 int32_t num_events;
103 dev_t *devices_not_to_watch;// report events from devices not in this list
104 uint32_t num_devices;
105 int32_t flags;
106 kfs_event **event_queue;
107 int32_t eventq_size; // number of event pointers in queue
108 int32_t num_readers;
109 int32_t rd; // read index into the event_queue
110 int32_t wr; // write index into the event_queue
111 int32_t blockers;
112 int32_t my_id;
113 uint32_t num_dropped;
114 uint64_t max_event_id;
115 struct fsevent_handle *fseh;
116 pid_t pid;
117 char proc_name[(2 * MAXCOMLEN) + 1];
91447636
A
118} fs_event_watcher;
119
120// fs_event_watcher flags
2d21ac55
A
121#define WATCHER_DROPPED_EVENTS 0x0001
122#define WATCHER_CLOSING 0x0002
123#define WATCHER_WANTS_COMPACT_EVENTS 0x0004
124#define WATCHER_WANTS_EXTENDED_INFO 0x0008
813fb2f6 125#define WATCHER_APPLE_SYSTEM_SERVICE 0x0010 // fseventsd, coreservicesd, mds, revisiond
91447636 126
2d21ac55
A
127#define MAX_WATCHERS 8
128static fs_event_watcher *watcher_table[MAX_WATCHERS];
91447636 129
316670eb
A
130#define DEFAULT_MAX_KFS_EVENTS 4096
131static int max_kfs_events = DEFAULT_MAX_KFS_EVENTS;
91447636 132
2d21ac55
A
133// we allocate kfs_event structures out of this zone
134static zone_t event_zone;
91447636
A
135static int fs_event_init = 0;
136
137//
138// this array records whether anyone is interested in a
139// particular type of event. if no one is, we bail out
140// early from the event delivery
141//
142static int16_t fs_event_type_watchers[FSE_MAX_EVENTS];
143
813fb2f6
A
144// the device currently being unmounted:
145static dev_t fsevent_unmount_dev = 0;
146// how many ACKs are still outstanding:
147static int fsevent_unmount_ack_count = 0;
148
91447636 149static int watcher_add_event(fs_event_watcher *watcher, kfs_event *kfse);
2d21ac55 150static void fsevents_wakeup(fs_event_watcher *watcher);
91447636
A
151
152//
153// Locks
154//
155static lck_grp_attr_t * fsevent_group_attr;
156static lck_attr_t * fsevent_lock_attr;
157static lck_grp_t * fsevent_mutex_group;
158
159static lck_grp_t * fsevent_rw_group;
160
2d21ac55
A
161static lck_rw_t event_handling_lock; // handles locking for event manipulation and recycling
162static lck_mtx_t watch_table_lock;
91447636 163static lck_mtx_t event_buf_lock;
2d21ac55 164static lck_mtx_t event_writer_lock;
91447636 165
b0d623f7
A
166
167/* Explicitly declare qsort so compiler doesn't complain */
168__private_extern__ void qsort(
0a7de745
A
169 void * array,
170 size_t nmembers,
171 size_t member_size,
172 int (*)(const void *, const void *));
91447636 173
99c3a104 174static int
0a7de745
A
175is_ignored_directory(const char *path)
176{
177 if (!path) {
178 return 0;
179 }
99c3a104 180
3e170ce0 181#define IS_TLD(x) strnstr(__DECONST(char *, path), x, MAXPATHLEN)
0a7de745
A
182 if (IS_TLD("/.Spotlight-V100/") ||
183 IS_TLD("/.MobileBackups/") ||
184 IS_TLD("/Backups.backupdb/")) {
185 return 1;
186 }
99c3a104 187#undef IS_TLD
0a7de745
A
188
189 return 0;
99c3a104
A
190}
191
91447636
A
192static void
193fsevents_internal_init(void)
194{
0a7de745 195 int i;
91447636 196
0a7de745
A
197 if (fs_event_init++ != 0) {
198 return;
199 }
91447636 200
0a7de745
A
201 for (i = 0; i < FSE_MAX_EVENTS; i++) {
202 fs_event_type_watchers[i] = 0;
203 }
91447636 204
0a7de745 205 memset(watcher_table, 0, sizeof(watcher_table));
91447636 206
0a7de745
A
207 fsevent_lock_attr = lck_attr_alloc_init();
208 fsevent_group_attr = lck_grp_attr_alloc_init();
209 fsevent_mutex_group = lck_grp_alloc_init("fsevent-mutex", fsevent_group_attr);
210 fsevent_rw_group = lck_grp_alloc_init("fsevent-rw", fsevent_group_attr);
2d21ac55 211
0a7de745
A
212 lck_mtx_init(&watch_table_lock, fsevent_mutex_group, fsevent_lock_attr);
213 lck_mtx_init(&event_buf_lock, fsevent_mutex_group, fsevent_lock_attr);
214 lck_mtx_init(&event_writer_lock, fsevent_mutex_group, fsevent_lock_attr);
91447636 215
0a7de745 216 lck_rw_init(&event_handling_lock, fsevent_rw_group, fsevent_lock_attr);
316670eb 217
0a7de745 218 PE_get_default("kern.maxkfsevents", &max_kfs_events, sizeof(max_kfs_events));
2d21ac55 219
f427ee49
A
220 event_zone = zone_create_ext("fs-event-buf", sizeof(kfs_event),
221 ZC_NOGC | ZC_NOCALLOUT, ZONE_ID_ANY, ^(zone_t z) {
222 // mark the zone as exhaustible so that it will not
223 // ever grow beyond what we initially filled it with
224 zone_set_exhaustible(z, max_kfs_events * sizeof(kfs_event));
225 });
0a7de745
A
226
227 if (zfill(event_zone, max_kfs_events) < max_kfs_events) {
228 printf("fsevents: failed to pre-fill the event zone.\n");
229 }
91447636
A
230}
231
232static void
2d21ac55 233lock_watch_table(void)
91447636 234{
0a7de745 235 lck_mtx_lock(&watch_table_lock);
91447636
A
236}
237
238static void
2d21ac55 239unlock_watch_table(void)
91447636 240{
0a7de745 241 lck_mtx_unlock(&watch_table_lock);
91447636
A
242}
243
244static void
2d21ac55 245lock_fs_event_list(void)
91447636 246{
0a7de745 247 lck_mtx_lock(&event_buf_lock);
91447636
A
248}
249
250static void
2d21ac55 251unlock_fs_event_list(void)
91447636 252{
0a7de745 253 lck_mtx_unlock(&event_buf_lock);
91447636
A
254}
255
256// forward prototype
2d21ac55 257static void release_event_ref(kfs_event *kfse);
91447636 258
ea3f0419 259static boolean_t
91447636
A
260watcher_cares_about_dev(fs_event_watcher *watcher, dev_t dev)
261{
0a7de745 262 unsigned int i;
91447636 263
0a7de745
A
264 // if devices_not_to_watch is NULL then we care about all
265 // events from all devices
266 if (watcher->devices_not_to_watch == NULL) {
ea3f0419 267 return true;
91447636 268 }
91447636 269
0a7de745
A
270 for (i = 0; i < watcher->num_devices; i++) {
271 if (dev == watcher->devices_not_to_watch[i]) {
272 // found a match! that means we do not
273 // want events from this device.
ea3f0419 274 return false;
0a7de745
A
275 }
276 }
277
278 // if we're here it's not in the devices_not_to_watch[]
279 // list so that means we do care about it
ea3f0419 280 return true;
91447636
A
281}
282
283
284int
285need_fsevent(int type, vnode_t vp)
286{
0a7de745
A
287 if (type >= 0 && type < FSE_MAX_EVENTS && fs_event_type_watchers[type] == 0) {
288 return 0;
289 }
2d21ac55 290
0a7de745
A
291 // events in /dev aren't really interesting...
292 if (vp->v_tag == VT_DEVFS) {
293 return 0;
294 }
2d21ac55 295
0a7de745 296 return 1;
2d21ac55
A
297}
298
2d21ac55
A
299
300#define is_throw_away(x) ((x) == FSE_STAT_CHANGED || (x) == FSE_CONTENT_MODIFIED)
91447636 301
91447636 302
2d21ac55
A
303// Ways that an event can be reused:
304//
0a7de745
A
305// "combined" events mean that there were two events for
306// the same vnode or path and we're combining both events
2d21ac55 307// into a single event. The primary event gets a bit that
0a7de745 308// marks it as having been combined. The secondary event
2d21ac55
A
309// is essentially dropped and the kfse structure reused.
310//
311// "collapsed" means that multiple events below a given
312// directory are collapsed into a single event. in this
313// case, the directory that we collapse into and all of
314// its children must be re-scanned.
315//
0a7de745
A
316// "recycled" means that we're completely blowing away
317// the event since there are other events that have info
318// about the same vnode or path (and one of those other
2d21ac55
A
319// events will be marked as combined or collapsed as
320// appropriate).
321//
322#define KFSE_COMBINED 0x0001
323#define KFSE_COLLAPSED 0x0002
324#define KFSE_RECYCLED 0x0004
325
326int num_dropped = 0;
2d21ac55
A
327int num_parent_switch = 0;
328int num_recycled_rename = 0;
329
2d21ac55
A
330static struct timeval last_print;
331
332//
333// These variables are used to track coalescing multiple identical
334// events for the same vnode/pathname. If we get the same event
335// type and same vnode/pathname as the previous event, we just drop
336// the event since it's superfluous. This improves some micro-
337// benchmarks considerably and actually has a real-world impact on
338// tests like a Finder copy where multiple stat-changed events can
339// get coalesced.
340//
0a7de745
A
341static int last_event_type = -1;
342static void *last_ptr = NULL;
2d21ac55 343static char last_str[MAXPATHLEN];
0a7de745
A
344static int last_nlen = 0;
345static int last_vid = -1;
346static uint64_t last_coalesced_time = 0;
347static void *last_event_ptr = NULL;
f427ee49 348static pid_t last_pid = -1;
2d21ac55
A
349int last_coalesced = 0;
350static mach_timebase_info_data_t sTimebaseInfo = { 0, 0 };
351
352
91447636 353int
0a7de745 354add_fsevent(int type, vfs_context_t ctx, ...)
91447636 355{
0a7de745
A
356 struct proc *p = vfs_context_proc(ctx);
357 int i, arg_type, ret;
358 kfs_event *kfse, *kfse_dest = NULL, *cur;
359 fs_event_watcher *watcher;
360 va_list ap;
361 int error = 0, did_alloc = 0;
362 dev_t dev = 0;
363 uint64_t now, elapsed;
364 char *pathbuff = NULL;
365 int pathbuff_len;
2d21ac55 366
91447636 367
b0d623f7 368
0a7de745 369 va_start(ap, ctx);
91447636 370
0a7de745
A
371 // ignore bogus event types..
372 if (type < 0 || type >= FSE_MAX_EVENTS) {
373 return EINVAL;
374 }
2d21ac55 375
0a7de745
A
376 // if no one cares about this type of event, bail out
377 if (fs_event_type_watchers[type] == 0) {
378 va_end(ap);
b0d623f7 379
0a7de745 380 return 0;
91447636 381 }
2d21ac55 382
0a7de745 383 now = mach_absolute_time();
2d21ac55 384
0a7de745
A
385 // find a free event and snag it for our use
386 // NOTE: do not do anything that would block until
387 // the lock is dropped.
388 lock_fs_event_list();
2d21ac55 389
0a7de745
A
390 //
391 // check if this event is identical to the previous one...
392 // (as long as it's not an event type that can never be the
393 // same as a previous event)
394 //
395 if (type != FSE_CREATE_FILE && type != FSE_DELETE && type != FSE_RENAME && type != FSE_EXCHANGE && type != FSE_CHOWN && type != FSE_DOCID_CHANGED && type != FSE_DOCID_CREATED && type != FSE_CLONE) {
396 void *ptr = NULL;
397 int vid = 0, was_str = 0, nlen = 0;
398
399 for (arg_type = va_arg(ap, int32_t); arg_type != FSE_ARG_DONE; arg_type = va_arg(ap, int32_t)) {
400 switch (arg_type) {
401 case FSE_ARG_VNODE: {
402 ptr = va_arg(ap, void *);
403 vid = vnode_vid((struct vnode *)ptr);
404 last_str[0] = '\0';
405 break;
406 }
407 case FSE_ARG_STRING: {
408 nlen = va_arg(ap, int32_t);
409 ptr = va_arg(ap, void *);
410 was_str = 1;
411 break;
412 }
413 }
414 if (ptr != NULL) {
415 break;
416 }
417 }
2d21ac55 418
0a7de745
A
419 if (sTimebaseInfo.denom == 0) {
420 (void) clock_timebase_info(&sTimebaseInfo);
421 }
2d21ac55 422
0a7de745
A
423 elapsed = (now - last_coalesced_time);
424 if (sTimebaseInfo.denom != sTimebaseInfo.numer) {
425 if (sTimebaseInfo.denom == 1) {
426 elapsed *= sTimebaseInfo.numer;
427 } else {
428 // this could overflow... the worst that will happen is that we'll
429 // send (or not send) an extra event so I'm not going to worry about
430 // doing the math right like dtrace_abs_to_nano() does.
431 elapsed = (elapsed * sTimebaseInfo.numer) / (uint64_t)sTimebaseInfo.denom;
432 }
2d21ac55
A
433 }
434
0a7de745
A
435 if (type == last_event_type
436 && (elapsed < 1000000000)
f427ee49 437 && (last_pid == p->p_pid)
0a7de745
A
438 &&
439 ((vid && vid == last_vid && last_ptr == ptr)
440 ||
441 (last_str[0] && last_nlen == nlen && ptr && strcmp(last_str, ptr) == 0))
442 ) {
443 last_coalesced++;
444 unlock_fs_event_list();
445 va_end(ap);
446
447 return 0;
448 } else {
449 last_ptr = ptr;
450 if (was_str) {
451 strlcpy(last_str, ptr, sizeof(last_str));
2d21ac55 452 }
0a7de745
A
453 last_nlen = nlen;
454 last_vid = vid;
455 last_event_type = type;
456 last_coalesced_time = now;
f427ee49 457 last_pid = p->p_pid;
0a7de745
A
458 }
459 }
460 va_start(ap, ctx);
461
462
463 kfse = zalloc_noblock(event_zone);
464 if (kfse && (type == FSE_RENAME || type == FSE_EXCHANGE || type == FSE_CLONE)) {
465 kfse_dest = zalloc_noblock(event_zone);
466 if (kfse_dest == NULL) {
467 did_alloc = 1;
468 zfree(event_zone, kfse);
469 kfse = NULL;
470 }
471 }
472
473
474 if (kfse == NULL) { // yikes! no free events
475 unlock_fs_event_list();
476 lock_watch_table();
477
478 for (i = 0; i < MAX_WATCHERS; i++) {
479 watcher = watcher_table[i];
480 if (watcher == NULL) {
481 continue;
04b8595b 482 }
0a7de745
A
483
484 watcher->flags |= WATCHER_DROPPED_EVENTS;
485 fsevents_wakeup(watcher);
04b8595b 486 }
0a7de745 487 unlock_watch_table();
91447636 488
0a7de745
A
489 {
490 struct timeval current_tv;
491
492 num_dropped++;
493
494 // only print a message at most once every 5 seconds
495 microuptime(&current_tv);
496 if ((current_tv.tv_sec - last_print.tv_sec) > 10) {
497 int ii;
498 void *junkptr = zalloc_noblock(event_zone), *listhead = kfse_list_head.lh_first;
499
500 printf("add_fsevent: event queue is full! dropping events (num dropped events: %d; num events outstanding: %d).\n", num_dropped, num_events_outstanding);
501 printf("add_fsevent: kfse_list head %p ; num_pending_rename %d\n", listhead, num_pending_rename);
502 printf("add_fsevent: zalloc sez: %p\n", junkptr);
503 printf("add_fsevent: event_zone info: %d 0x%x\n", ((int *)event_zone)[0], ((int *)event_zone)[1]);
504 lock_watch_table();
505 for (ii = 0; ii < MAX_WATCHERS; ii++) {
506 if (watcher_table[ii] == NULL) {
507 continue;
508 }
509
510 printf("add_fsevent: watcher %s %p: rd %4d wr %4d q_size %4d flags 0x%x\n",
511 watcher_table[ii]->proc_name,
512 watcher_table[ii],
513 watcher_table[ii]->rd, watcher_table[ii]->wr,
514 watcher_table[ii]->eventq_size, watcher_table[ii]->flags);
b0d623f7 515 }
0a7de745 516 unlock_watch_table();
b0d623f7 517
0a7de745
A
518 last_print = current_tv;
519 if (junkptr) {
520 zfree(event_zone, junkptr);
b0d623f7 521 }
2d21ac55 522 }
2d21ac55 523 }
91447636 524
0a7de745
A
525 if (pathbuff) {
526 release_pathbuff(pathbuff);
527 pathbuff = NULL;
2d21ac55 528 }
0a7de745
A
529 return ENOSPC;
530 }
91447636 531
0a7de745
A
532 memset(kfse, 0, sizeof(kfs_event));
533 kfse->refcount = 1;
534 OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse->flags);
535
536 last_event_ptr = kfse;
f427ee49 537 kfse->type = (int16_t)type;
0a7de745
A
538 kfse->abstime = now;
539 kfse->pid = p->p_pid;
540 if (type == FSE_RENAME || type == FSE_EXCHANGE || type == FSE_CLONE) {
541 memset(kfse_dest, 0, sizeof(kfs_event));
542 kfse_dest->refcount = 1;
543 OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse_dest->flags);
f427ee49 544 kfse_dest->type = (int16_t)type;
0a7de745
A
545 kfse_dest->pid = p->p_pid;
546 kfse_dest->abstime = now;
547
548 kfse->dest = kfse_dest;
549 }
550
551 num_events_outstanding++;
552 if (kfse->type == FSE_RENAME) {
553 num_pending_rename++;
554 }
555 LIST_INSERT_HEAD(&kfse_list_head, kfse, kevent_list);
556
557 if (kfse->refcount < 1) {
558 panic("add_fsevent: line %d: kfse recount %d but should be at least 1\n", __LINE__, kfse->refcount);
559 }
560
561 unlock_fs_event_list(); // at this point it's safe to unlock
562
563 //
564 // now process the arguments passed in and copy them into
565 // the kfse
566 //
567
568 cur = kfse;
569
570 if (type == FSE_DOCID_CREATED || type == FSE_DOCID_CHANGED) {
571 uint64_t val;
572
573 //
574 // These events are special and not like the other events. They only
575 // have a dev_t, src inode #, dest inode #, and a doc-id. We use the
576 // fields that we can in the kfse but have to overlay the dest inode
577 // number and the doc-id on the other fields.
578 //
579
580 // First the dev_t
581 arg_type = va_arg(ap, int32_t);
582 if (arg_type == FSE_ARG_DEV) {
583 cur->dev = (dev_t)(va_arg(ap, dev_t));
584 } else {
585 cur->dev = (dev_t)0xbadc0de1;
b0d623f7 586 }
91447636 587
0a7de745
A
588 // next the source inode #
589 arg_type = va_arg(ap, int32_t);
590 if (arg_type == FSE_ARG_INO) {
591 cur->ino = (ino64_t)(va_arg(ap, ino64_t));
592 } else {
593 cur->ino = 0xbadc0de2;
2d21ac55 594 }
91447636 595
0a7de745
A
596 // now the dest inode #
597 arg_type = va_arg(ap, int32_t);
598 if (arg_type == FSE_ARG_INO) {
599 val = (ino64_t)(va_arg(ap, ino64_t));
2d21ac55 600 } else {
0a7de745 601 val = 0xbadc0de2;
2d21ac55 602 }
0a7de745 603 // overlay the dest inode number on the str/dest pointer fields
cb323159 604 __nochk_memcpy(&cur->str, &val, sizeof(ino64_t));
0a7de745
A
605
606
607 // and last the document-id
608 arg_type = va_arg(ap, int32_t);
609 if (arg_type == FSE_ARG_INT32) {
610 val = (uint64_t)va_arg(ap, uint32_t);
611 } else if (arg_type == FSE_ARG_INT64) {
612 val = (uint64_t)va_arg(ap, uint64_t);
613 } else {
614 val = 0xbadc0de3;
2d21ac55 615 }
91447636 616
0a7de745 617 // the docid is 64-bit and overlays the uid/gid fields
cb323159
A
618 static_assert(sizeof(cur->uid) + sizeof(cur->gid) == sizeof(val), "gid/uid size mismatch");
619 static_assert(offsetof(struct kfs_event, gid) - offsetof(struct kfs_event, uid) == sizeof(cur->uid), "unexpected struct kfs_event layout");
620 memcpy(&cur->uid, &val, sizeof(cur->uid));
621 memcpy(&cur->gid, (u_int8_t *)&val + sizeof(cur->uid), sizeof(cur->gid));
0a7de745
A
622
623 goto done_with_args;
91447636 624 }
91447636 625
0a7de745
A
626 if (type == FSE_UNMOUNT_PENDING) {
627 // Just a dev_t
628 arg_type = va_arg(ap, int32_t);
629 if (arg_type == FSE_ARG_DEV) {
630 cur->dev = (dev_t)(va_arg(ap, dev_t));
631 } else {
632 cur->dev = (dev_t)0xbadc0de1;
633 }
634
635 goto done_with_args;
636 }
637
638 for (arg_type = va_arg(ap, int32_t); arg_type != FSE_ARG_DONE; arg_type = va_arg(ap, int32_t)) {
639 switch (arg_type) {
640 case FSE_ARG_VNODE: {
641 // this expands out into multiple arguments to the client
642 struct vnode *vp;
643 struct vnode_attr va;
644
645 if (kfse->str != NULL) {
646 cur = kfse_dest;
647 }
648
649 vp = va_arg(ap, struct vnode *);
650 if (vp == NULL) {
651 panic("add_fsevent: you can't pass me a NULL vnode ptr (type %d)!\n",
652 cur->type);
653 }
654
655 VATTR_INIT(&va);
656 VATTR_WANTED(&va, va_fsid);
657 VATTR_WANTED(&va, va_fileid);
658 VATTR_WANTED(&va, va_mode);
659 VATTR_WANTED(&va, va_uid);
660 VATTR_WANTED(&va, va_gid);
661 VATTR_WANTED(&va, va_nlink);
662 if ((ret = vnode_getattr(vp, &va, vfs_context_kernel())) != 0) {
663 // printf("add_fsevent: failed to getattr on vp %p (%d)\n", cur->fref.vp, ret);
664 cur->str = NULL;
665 error = EINVAL;
666 goto clean_up;
667 }
668
669 cur->dev = dev = (dev_t)va.va_fsid;
670 cur->ino = (ino64_t)va.va_fileid;
671 cur->mode = (int32_t)vnode_vttoif(vnode_vtype(vp)) | va.va_mode;
672 cur->uid = va.va_uid;
673 cur->gid = va.va_gid;
674 if (vp->v_flag & VISHARDLINK) {
675 cur->mode |= FSE_MODE_HLINK;
676 if ((vp->v_type == VDIR && va.va_dirlinkcount == 0) || (vp->v_type == VREG && va.va_nlink == 0)) {
677 cur->mode |= FSE_MODE_LAST_HLINK;
678 }
679 }
680
681 // if we haven't gotten the path yet, get it.
682 if (pathbuff == NULL) {
683 pathbuff = get_pathbuff();
684 pathbuff_len = MAXPATHLEN;
685
686 pathbuff[0] = '\0';
cb323159 687 if ((ret = vn_getpath_no_firmlink(vp, pathbuff, &pathbuff_len)) != 0 || pathbuff[0] == '\0') {
0a7de745
A
688 cur->flags |= KFSE_CONTAINS_DROPPED_EVENTS;
689
690 do {
691 if (vp->v_parent != NULL) {
692 vp = vp->v_parent;
693 } else if (vp->v_mount) {
694 strlcpy(pathbuff, vp->v_mount->mnt_vfsstat.f_mntonname, MAXPATHLEN);
695 break;
696 } else {
697 vp = NULL;
698 }
699
700 if (vp == NULL) {
701 break;
702 }
703
704 pathbuff_len = MAXPATHLEN;
cb323159 705 ret = vn_getpath_no_firmlink(vp, pathbuff, &pathbuff_len);
0a7de745
A
706 } while (ret == ENOSPC);
707
708 if (ret != 0 || vp == NULL) {
709 error = ENOENT;
710 goto clean_up;
711 }
712 }
713 }
714
715 // store the path by adding it to the global string table
f427ee49 716 cur->len = (u_int16_t)pathbuff_len;
0a7de745
A
717 cur->str = vfs_addname(pathbuff, pathbuff_len, 0, 0);
718 if (cur->str == NULL || cur->str[0] == '\0') {
719 panic("add_fsevent: was not able to add path %s to event %p.\n", pathbuff, cur);
720 }
721
722 release_pathbuff(pathbuff);
723 pathbuff = NULL;
724
725 break;
726 }
727
728 case FSE_ARG_FINFO: {
729 fse_info *fse;
730
731 fse = va_arg(ap, fse_info *);
732
733 cur->dev = dev = (dev_t)fse->dev;
734 cur->ino = (ino64_t)fse->ino;
735 cur->mode = (int32_t)fse->mode;
736 cur->uid = (uid_t)fse->uid;
737 cur->gid = (uid_t)fse->gid;
738 // if it's a hard-link and this is the last link, flag it
739 if ((fse->mode & FSE_MODE_HLINK) && fse->nlink == 0) {
740 cur->mode |= FSE_MODE_LAST_HLINK;
741 }
742 if (cur->mode & FSE_TRUNCATED_PATH) {
743 cur->flags |= KFSE_CONTAINS_DROPPED_EVENTS;
744 cur->mode &= ~FSE_TRUNCATED_PATH;
745 }
746 break;
747 }
748
749 case FSE_ARG_STRING:
750 if (kfse->str != NULL) {
751 cur = kfse_dest;
752 }
753
754 cur->len = (int16_t)(va_arg(ap, int32_t) & 0x7fff);
755 if (cur->len >= 1) {
756 cur->str = vfs_addname(va_arg(ap, char *), cur->len, 0, 0);
757 } else {
758 printf("add_fsevent: funny looking string length: %d\n", (int)cur->len);
759 cur->len = 2;
760 cur->str = vfs_addname("/", cur->len, 0, 0);
761 }
762 if (cur->str[0] == 0) {
763 printf("add_fsevent: bogus looking string (len %d)\n", cur->len);
764 }
765 break;
766
767 case FSE_ARG_INT32: {
768 uint32_t ival = (uint32_t)va_arg(ap, int32_t);
f427ee49 769 kfse->uid = ival;
0a7de745
A
770 break;
771 }
772
773 default:
774 printf("add_fsevent: unknown type %d\n", arg_type);
775 // just skip one 32-bit word and hope we sync up...
776 (void)va_arg(ap, int32_t);
777 }
2d21ac55 778 }
0a7de745
A
779
780done_with_args:
781 va_end(ap);
782
783 OSBitAndAtomic16(~KFSE_BEING_CREATED, &kfse->flags);
784 if (kfse_dest) {
785 OSBitAndAtomic16(~KFSE_BEING_CREATED, &kfse_dest->flags);
91447636 786 }
2d21ac55 787
0a7de745
A
788 //
789 // now we have to go and let everyone know that
790 // is interested in this type of event
791 //
792 lock_watch_table();
793
794 for (i = 0; i < MAX_WATCHERS; i++) {
795 watcher = watcher_table[i];
796 if (watcher == NULL) {
797 continue;
798 }
91447636 799
0a7de745
A
800 if (type < watcher->num_events
801 && watcher->event_list[type] == FSE_REPORT
802 && watcher_cares_about_dev(watcher, dev)) {
803 if (watcher_add_event(watcher, kfse) != 0) {
804 watcher->num_dropped++;
805 continue;
806 }
807 }
2d21ac55 808
0a7de745
A
809 // if (kfse->refcount < 1) {
810 // panic("add_fsevent: line %d: kfse recount %d but should be at least 1\n", __LINE__, kfse->refcount);
811 // }
812 }
2d21ac55 813
0a7de745 814 unlock_watch_table();
2d21ac55 815
0a7de745 816clean_up:
91447636 817
0a7de745
A
818 if (pathbuff) {
819 release_pathbuff(pathbuff);
820 pathbuff = NULL;
821 }
822
823 release_event_ref(kfse);
824
825 return error;
91447636
A
826}
827
2d21ac55 828
91447636 829static void
2d21ac55 830release_event_ref(kfs_event *kfse)
91447636 831{
0a7de745
A
832 int old_refcount;
833 kfs_event copy, dest_copy;
834
835
836 old_refcount = OSAddAtomic(-1, &kfse->refcount);
837 if (old_refcount > 1) {
838 return;
839 }
840
841 lock_fs_event_list();
842 if (last_event_ptr == kfse) {
843 last_event_ptr = NULL;
844 last_event_type = -1;
845 last_coalesced_time = 0;
846 }
847
848 if (kfse->refcount < 0) {
849 panic("release_event_ref: bogus kfse refcount %d\n", kfse->refcount);
850 }
851
852 if (kfse->refcount > 0 || kfse->type == FSE_INVALID) {
853 // This is very subtle. Either of these conditions can
854 // be true if an event got recycled while we were waiting
855 // on the fs_event_list lock or the event got recycled,
856 // delivered, _and_ free'd by someone else while we were
857 // waiting on the fs event list lock. In either case
858 // we need to just unlock the list and return without
859 // doing anything because if the refcount is > 0 then
860 // someone else will take care of free'ing it and when
861 // the kfse->type is invalid then someone else already
862 // has handled free'ing the event (while we were blocked
863 // on the event list lock).
864 //
865 unlock_fs_event_list();
866 return;
867 }
868
2d21ac55 869 //
0a7de745
A
870 // make a copy of this so we can free things without
871 // holding the fs_event_buf lock
872 //
873 copy = *kfse;
874 if (kfse->type != FSE_DOCID_CREATED && kfse->type != FSE_DOCID_CHANGED && kfse->dest && OSAddAtomic(-1, &kfse->dest->refcount) == 1) {
875 dest_copy = *kfse->dest;
2d21ac55 876 } else {
0a7de745
A
877 dest_copy.str = NULL;
878 dest_copy.len = 0;
879 dest_copy.type = FSE_INVALID;
880 }
881
882 kfse->pid = kfse->type; // save this off for debugging...
883 kfse->uid = (uid_t)(long)kfse->str; // save this off for debugging...
884 kfse->gid = (gid_t)(long)current_thread();
885
886 kfse->str = (char *)0xdeadbeef; // XXXdbg - catch any cheaters...
887
888 if (dest_copy.type != FSE_INVALID) {
889 kfse->dest->str = (char *)0xbadc0de; // XXXdbg - catch any cheaters...
890 kfse->dest->type = FSE_INVALID;
891
892 if (kfse->dest->kevent_list.le_prev != NULL) {
893 num_events_outstanding--;
894 LIST_REMOVE(kfse->dest, kevent_list);
895 memset(&kfse->dest->kevent_list, 0xa5, sizeof(kfse->dest->kevent_list));
896 }
897
898 zfree(event_zone, kfse->dest);
899 }
900
901 // mark this fsevent as invalid
902 {
903 int otype;
904
905 otype = kfse->type;
906 kfse->type = FSE_INVALID;
907
908 if (kfse->kevent_list.le_prev != NULL) {
909 num_events_outstanding--;
910 if (otype == FSE_RENAME) {
911 num_pending_rename--;
912 }
913 LIST_REMOVE(kfse, kevent_list);
914 memset(&kfse->kevent_list, 0, sizeof(kfse->kevent_list));
915 }
916 }
917
918 zfree(event_zone, kfse);
919
920 unlock_fs_event_list();
921
922 // if we have a pointer in the union
923 if (copy.str && copy.type != FSE_DOCID_CREATED && copy.type != FSE_DOCID_CHANGED) {
924 if (copy.len == 0) { // and it's not a string
925 panic("%s:%d: no more fref.vp!\n", __FILE__, __LINE__);
926 // vnode_rele_ext(copy.fref.vp, O_EVTONLY, 0);
927 } else { // else it's a string
928 vfs_removename(copy.str);
929 }
930 }
931
932 if (dest_copy.type != FSE_INVALID && dest_copy.str) {
933 if (dest_copy.len == 0) {
934 panic("%s:%d: no more fref.vp!\n", __FILE__, __LINE__);
935 // vnode_rele_ext(dest_copy.fref.vp, O_EVTONLY, 0);
936 } else {
937 vfs_removename(dest_copy.str);
938 }
91447636 939 }
91447636
A
940}
941
91447636 942static int
316670eb 943add_watcher(int8_t *event_list, int32_t num_events, int32_t eventq_size, fs_event_watcher **watcher_out, void *fseh)
91447636 944{
0a7de745
A
945 int i;
946 fs_event_watcher *watcher;
947
948 if (eventq_size <= 0 || eventq_size > 100 * max_kfs_events) {
949 eventq_size = max_kfs_events;
950 }
951
952 // Note: the event_queue follows the fs_event_watcher struct
953 // in memory so we only have to do one allocation
f427ee49
A
954 watcher = kheap_alloc(KHEAP_DEFAULT,
955 sizeof(fs_event_watcher) + eventq_size * sizeof(kfs_event *), Z_WAITOK);
0a7de745
A
956 if (watcher == NULL) {
957 return ENOMEM;
958 }
91447636 959
0a7de745
A
960 watcher->event_list = event_list;
961 watcher->num_events = num_events;
962 watcher->devices_not_to_watch = NULL;
963 watcher->num_devices = 0;
964 watcher->flags = 0;
965 watcher->event_queue = (kfs_event **)&watcher[1];
966 watcher->eventq_size = eventq_size;
967 watcher->rd = 0;
968 watcher->wr = 0;
969 watcher->blockers = 0;
970 watcher->num_readers = 0;
971 watcher->max_event_id = 0;
972 watcher->fseh = fseh;
973 watcher->pid = proc_selfpid();
974 proc_selfname(watcher->proc_name, sizeof(watcher->proc_name));
975
976 watcher->num_dropped = 0; // XXXdbg - debugging
977
978 if (!strncmp(watcher->proc_name, "fseventsd", sizeof(watcher->proc_name)) ||
979 !strncmp(watcher->proc_name, "coreservicesd", sizeof(watcher->proc_name)) ||
980 !strncmp(watcher->proc_name, "revisiond", sizeof(watcher->proc_name)) ||
981 !strncmp(watcher->proc_name, "mds", sizeof(watcher->proc_name))) {
982 watcher->flags |= WATCHER_APPLE_SYSTEM_SERVICE;
983 } else {
984 printf("fsevents: watcher %s (pid: %d) - Using /dev/fsevents directly is unsupported. Migrate to FSEventsFramework\n",
985 watcher->proc_name, watcher->pid);
fe8ab488 986 }
fe8ab488 987
0a7de745
A
988 lock_watch_table();
989
990 // find a slot for the new watcher
991 for (i = 0; i < MAX_WATCHERS; i++) {
992 if (watcher_table[i] == NULL) {
993 watcher->my_id = i;
994 watcher_table[i] = watcher;
995 break;
996 }
997 }
91447636 998
0a7de745
A
999 if (i >= MAX_WATCHERS) {
1000 printf("fsevents: too many watchers!\n");
1001 unlock_watch_table();
f427ee49
A
1002 kheap_free(KHEAP_DEFAULT, watcher,
1003 sizeof(fs_event_watcher) + watcher->eventq_size * sizeof(kfs_event *));
0a7de745
A
1004 return ENOSPC;
1005 }
1006
1007 // now update the global list of who's interested in
1008 // events of a particular type...
1009 for (i = 0; i < num_events; i++) {
1010 if (event_list[i] != FSE_IGNORE && i < FSE_MAX_EVENTS) {
1011 fs_event_type_watchers[i]++;
1012 }
1013 }
91447636 1014
0a7de745
A
1015 unlock_watch_table();
1016
1017 *watcher_out = watcher;
1018
1019 return 0;
91447636
A
1020}
1021
2d21ac55
A
1022
1023
91447636
A
1024static void
1025remove_watcher(fs_event_watcher *target)
1026{
0a7de745
A
1027 int i, j, counter = 0;
1028 fs_event_watcher *watcher;
1029 kfs_event *kfse;
91447636 1030
0a7de745 1031 lock_watch_table();
2d21ac55 1032
0a7de745
A
1033 for (j = 0; j < MAX_WATCHERS; j++) {
1034 watcher = watcher_table[j];
1035 if (watcher != target) {
1036 continue;
1037 }
91447636 1038
0a7de745 1039 watcher_table[j] = NULL;
2d21ac55 1040
0a7de745
A
1041 for (i = 0; i < watcher->num_events; i++) {
1042 if (watcher->event_list[i] != FSE_IGNORE && i < FSE_MAX_EVENTS) {
1043 fs_event_type_watchers[i]--;
1044 }
1045 }
2d21ac55 1046
0a7de745
A
1047 if (watcher->flags & WATCHER_CLOSING) {
1048 unlock_watch_table();
1049 return;
1050 }
2d21ac55 1051
0a7de745
A
1052 // printf("fsevents: removing watcher %p (rd %d wr %d num_readers %d flags 0x%x)\n", watcher, watcher->rd, watcher->wr, watcher->num_readers, watcher->flags);
1053 watcher->flags |= WATCHER_CLOSING;
1054 OSAddAtomic(1, &watcher->num_readers);
1055
1056 unlock_watch_table();
1057
1058 while (watcher->num_readers > 1 && counter++ < 5000) {
1059 lock_watch_table();
1060 fsevents_wakeup(watcher); // in case they're asleep
1061 unlock_watch_table();
1062
1063 tsleep(watcher, PRIBIO, "fsevents-close", 1);
1064 }
1065 if (counter++ >= 5000) {
1066 // printf("fsevents: close: still have readers! (%d)\n", watcher->num_readers);
1067 panic("fsevents: close: still have readers! (%d)\n", watcher->num_readers);
1068 }
1069
1070 // drain the event_queue
1071
1072 lck_rw_lock_exclusive(&event_handling_lock);
1073 while (watcher->rd != watcher->wr) {
1074 kfse = watcher->event_queue[watcher->rd];
1075 watcher->event_queue[watcher->rd] = NULL;
1076 watcher->rd = (watcher->rd + 1) % watcher->eventq_size;
1077 OSSynchronizeIO();
1078 if (kfse != NULL && kfse->type != FSE_INVALID && kfse->refcount >= 1) {
1079 release_event_ref(kfse);
1080 }
1081 }
1082 lck_rw_unlock_exclusive(&event_handling_lock);
1083
f427ee49
A
1084 kheap_free(KHEAP_DEFAULT, watcher->event_list,
1085 watcher->num_events * sizeof(int8_t));
1086 kheap_free(KHEAP_DEFAULT, watcher->devices_not_to_watch,
1087 watcher->num_devices * sizeof(dev_t));
1088 kheap_free(KHEAP_DEFAULT, watcher,
1089 sizeof(fs_event_watcher) + watcher->eventq_size * sizeof(kfs_event *));
0a7de745
A
1090 return;
1091 }
1092
1093 unlock_watch_table();
2d21ac55
A
1094}
1095
1096
1097#define EVENT_DELAY_IN_MS 10
1098static thread_call_t event_delivery_timer = NULL;
1099static int timer_set = 0;
1100
1101
1102static void
1103delayed_event_delivery(__unused void *param0, __unused void *param1)
1104{
0a7de745 1105 int i;
2d21ac55 1106
0a7de745
A
1107 lock_watch_table();
1108
1109 for (i = 0; i < MAX_WATCHERS; i++) {
1110 if (watcher_table[i] != NULL && watcher_table[i]->rd != watcher_table[i]->wr) {
1111 fsevents_wakeup(watcher_table[i]);
1112 }
2d21ac55 1113 }
2d21ac55 1114
0a7de745 1115 timer_set = 0;
2d21ac55 1116
0a7de745 1117 unlock_watch_table();
2d21ac55
A
1118}
1119
1120
1121//
1122// The watch table must be locked before calling this function.
1123//
1124static void
1125schedule_event_wakeup(void)
1126{
0a7de745
A
1127 uint64_t deadline;
1128
1129 if (event_delivery_timer == NULL) {
1130 event_delivery_timer = thread_call_allocate((thread_call_func_t)delayed_event_delivery, NULL);
1131 }
1132
1133 clock_interval_to_deadline(EVENT_DELAY_IN_MS, 1000 * 1000, &deadline);
1134
1135 thread_call_enter_delayed(event_delivery_timer, deadline);
1136 timer_set = 1;
2d21ac55
A
1137}
1138
1139
1140
1141#define MAX_NUM_PENDING 16
1142
1143//
1144// NOTE: the watch table must be locked before calling
1145// this routine.
1146//
1147static int
1148watcher_add_event(fs_event_watcher *watcher, kfs_event *kfse)
1149{
0a7de745
A
1150 if (kfse->abstime > watcher->max_event_id) {
1151 watcher->max_event_id = kfse->abstime;
1152 }
1153
1154 if (((watcher->wr + 1) % watcher->eventq_size) == watcher->rd) {
1155 watcher->flags |= WATCHER_DROPPED_EVENTS;
1156 fsevents_wakeup(watcher);
1157 return ENOSPC;
1158 }
1159
1160 OSAddAtomic(1, &kfse->refcount);
1161 watcher->event_queue[watcher->wr] = kfse;
db609669 1162 OSSynchronizeIO();
0a7de745
A
1163 watcher->wr = (watcher->wr + 1) % watcher->eventq_size;
1164
1165 //
1166 // wake up the watcher if there are more than MAX_NUM_PENDING events.
1167 // otherwise schedule a timer (if one isn't already set) which will
1168 // send any pending events if no more are received in the next
1169 // EVENT_DELAY_IN_MS milli-seconds.
1170 //
1171 int32_t num_pending = 0;
1172 if (watcher->rd < watcher->wr) {
1173 num_pending = watcher->wr - watcher->rd;
1174 }
1175
1176 if (watcher->rd > watcher->wr) {
1177 num_pending = watcher->wr + watcher->eventq_size - watcher->rd;
99c3a104 1178 }
2d21ac55 1179
0a7de745
A
1180 if (num_pending > (watcher->eventq_size * 3 / 4) && !(watcher->flags & WATCHER_APPLE_SYSTEM_SERVICE)) {
1181 /* Non-Apple Service is falling behind, start dropping events for this process */
1182 lck_rw_lock_exclusive(&event_handling_lock);
1183 while (watcher->rd != watcher->wr) {
1184 kfse = watcher->event_queue[watcher->rd];
1185 watcher->event_queue[watcher->rd] = NULL;
1186 watcher->rd = (watcher->rd + 1) % watcher->eventq_size;
1187 OSSynchronizeIO();
1188 if (kfse != NULL && kfse->type != FSE_INVALID && kfse->refcount >= 1) {
1189 release_event_ref(kfse);
1190 }
1191 }
1192 watcher->flags |= WATCHER_DROPPED_EVENTS;
1193 lck_rw_unlock_exclusive(&event_handling_lock);
2d21ac55 1194
0a7de745
A
1195 printf("fsevents: watcher falling behind: %s (pid: %d) rd: %4d wr: %4d q_size: %4d flags: 0x%x\n",
1196 watcher->proc_name, watcher->pid, watcher->rd, watcher->wr,
1197 watcher->eventq_size, watcher->flags);
db609669 1198
0a7de745
A
1199 fsevents_wakeup(watcher);
1200 } else if (num_pending > MAX_NUM_PENDING) {
1201 fsevents_wakeup(watcher);
1202 } else if (timer_set == 0) {
1203 schedule_event_wakeup();
1204 }
1205
1206 return 0;
2d21ac55
A
1207}
1208
2d21ac55
A
1209static int
1210fill_buff(uint16_t type, int32_t size, const void *data,
0a7de745
A
1211 char *buff, int32_t *_buff_idx, int32_t buff_sz,
1212 struct uio *uio)
2d21ac55 1213{
0a7de745
A
1214 int32_t amt, error = 0, buff_idx = *_buff_idx;
1215 uint16_t tmp;
1216
1217 //
1218 // the +1 on the size is to guarantee that the main data
1219 // copy loop will always copy at least 1 byte
1220 //
1221 if ((buff_sz - buff_idx) <= (int)(2 * sizeof(uint16_t) + 1)) {
1222 if (buff_idx > uio_resid(uio)) {
1223 error = ENOSPC;
1224 goto get_out;
1225 }
1226
1227 error = uiomove(buff, buff_idx, uio);
1228 if (error) {
1229 goto get_out;
1230 }
1231 buff_idx = 0;
2d21ac55
A
1232 }
1233
0a7de745
A
1234 // copy out the header (type & size)
1235 memcpy(&buff[buff_idx], &type, sizeof(uint16_t));
1236 buff_idx += sizeof(uint16_t);
1237
1238 tmp = size & 0xffff;
1239 memcpy(&buff[buff_idx], &tmp, sizeof(uint16_t));
1240 buff_idx += sizeof(uint16_t);
1241
1242 // now copy the body of the data, flushing along the way
1243 // if the buffer fills up.
1244 //
1245 while (size > 0) {
1246 amt = (size < (buff_sz - buff_idx)) ? size : (buff_sz - buff_idx);
1247 memcpy(&buff[buff_idx], data, amt);
1248
1249 size -= amt;
1250 buff_idx += amt;
1251 data = (const char *)data + amt;
1252 if (size > (buff_sz - buff_idx)) {
1253 if (buff_idx > uio_resid(uio)) {
1254 error = ENOSPC;
1255 goto get_out;
1256 }
1257 error = uiomove(buff, buff_idx, uio);
1258 if (error) {
1259 goto get_out;
1260 }
1261 buff_idx = 0;
1262 }
1263
1264 if (amt == 0) { // just in case...
1265 break;
1266 }
2d21ac55 1267 }
2d21ac55 1268
0a7de745
A
1269get_out:
1270 *_buff_idx = buff_idx;
1271
1272 return error;
2d21ac55
A
1273}
1274
1275
1276static int copy_out_kfse(fs_event_watcher *watcher, kfs_event *kfse, struct uio *uio) __attribute__((noinline));
1277
1278static int
1279copy_out_kfse(fs_event_watcher *watcher, kfs_event *kfse, struct uio *uio)
1280{
0a7de745
A
1281 int error;
1282 uint16_t tmp16;
1283 int32_t type;
1284 kfs_event *cur;
1285 char evbuff[512];
1286 int evbuff_idx = 0;
1287
1288 if (kfse->type == FSE_INVALID) {
1289 panic("fsevents: copy_out_kfse: asked to copy out an invalid event (kfse %p, refcount %d fref ptr %p)\n", kfse, kfse->refcount, kfse->str);
1290 }
1291
1292 if (kfse->flags & KFSE_BEING_CREATED) {
1293 return 0;
1294 }
1295
1296 if (((kfse->type == FSE_RENAME) || (kfse->type == FSE_CLONE)) && kfse->dest == NULL) {
1297 //
1298 // This can happen if an event gets recycled but we had a
1299 // pointer to it in our event queue. The event is the
1300 // destination of a rename or clone which we'll process separately
1301 // (that is, another kfse points to this one so it's ok
1302 // to skip this guy because we'll process it when we process
1303 // the other one)
1304 error = 0;
1305 goto get_out;
1306 }
2d21ac55 1307
0a7de745
A
1308 if (watcher->flags & WATCHER_WANTS_EXTENDED_INFO) {
1309 type = (kfse->type & 0xfff);
2d21ac55 1310
0a7de745
A
1311 if (kfse->flags & KFSE_CONTAINS_DROPPED_EVENTS) {
1312 type |= (FSE_CONTAINS_DROPPED_EVENTS << FSE_FLAG_SHIFT);
1313 } else if (kfse->flags & KFSE_COMBINED_EVENTS) {
1314 type |= (FSE_COMBINED_EVENTS << FSE_FLAG_SHIFT);
1315 }
1316 } else {
1317 type = (int32_t)kfse->type;
1318 }
2d21ac55 1319
0a7de745
A
1320 // copy out the type of the event
1321 memcpy(evbuff, &type, sizeof(int32_t));
1322 evbuff_idx += sizeof(int32_t);
2d21ac55 1323
0a7de745
A
1324 // copy out the pid of the person that generated the event
1325 memcpy(&evbuff[evbuff_idx], &kfse->pid, sizeof(pid_t));
1326 evbuff_idx += sizeof(pid_t);
2d21ac55 1327
0a7de745 1328 cur = kfse;
2d21ac55 1329
0a7de745 1330copy_again:
2d21ac55 1331
0a7de745
A
1332 if (kfse->type == FSE_DOCID_CHANGED || kfse->type == FSE_DOCID_CREATED) {
1333 dev_t dev = cur->dev;
1334 ino64_t ino = cur->ino;
1335 uint64_t ival;
2d21ac55 1336
0a7de745
A
1337 error = fill_buff(FSE_ARG_DEV, sizeof(dev_t), &dev, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1338 if (error != 0) {
1339 goto get_out;
1340 }
2d21ac55 1341
0a7de745
A
1342 error = fill_buff(FSE_ARG_INO, sizeof(ino64_t), &ino, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1343 if (error != 0) {
1344 goto get_out;
1345 }
2d21ac55 1346
0a7de745
A
1347 memcpy(&ino, &cur->str, sizeof(ino64_t));
1348 error = fill_buff(FSE_ARG_INO, sizeof(ino64_t), &ino, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1349 if (error != 0) {
1350 goto get_out;
1351 }
22ba694c 1352
0a7de745
A
1353 memcpy(&ival, &cur->uid, sizeof(uint64_t)); // the docid gets stuffed into the ino field
1354 error = fill_buff(FSE_ARG_INT64, sizeof(uint64_t), &ival, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1355 if (error != 0) {
1356 goto get_out;
1357 }
1358
1359 goto done;
22ba694c
A
1360 }
1361
0a7de745
A
1362 if (kfse->type == FSE_UNMOUNT_PENDING) {
1363 dev_t dev = cur->dev;
1364
1365 error = fill_buff(FSE_ARG_DEV, sizeof(dev_t), &dev, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1366 if (error != 0) {
1367 goto get_out;
1368 }
1369
1370 goto done;
22ba694c
A
1371 }
1372
0a7de745
A
1373 if (cur->str == NULL || cur->str[0] == '\0') {
1374 printf("copy_out_kfse:2: empty/short path (%s)\n", cur->str);
1375 error = fill_buff(FSE_ARG_STRING, 2, "/", evbuff, &evbuff_idx, sizeof(evbuff), uio);
1376 } else {
1377 error = fill_buff(FSE_ARG_STRING, cur->len, cur->str, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1378 }
22ba694c 1379 if (error != 0) {
0a7de745 1380 goto get_out;
22ba694c
A
1381 }
1382
0a7de745
A
1383 if (cur->dev == 0 && cur->ino == 0) {
1384 // this happens when a rename event happens and the
1385 // destination of the rename did not previously exist.
1386 // it thus has no other file info so skip copying out
1387 // the stuff below since it isn't initialized
1388 goto done;
22ba694c
A
1389 }
1390
22ba694c 1391
0a7de745
A
1392 if (watcher->flags & WATCHER_WANTS_COMPACT_EVENTS) {
1393 int32_t finfo_size;
1394
1395 finfo_size = sizeof(dev_t) + sizeof(ino64_t) + sizeof(int32_t) + sizeof(uid_t) + sizeof(gid_t);
1396 error = fill_buff(FSE_ARG_FINFO, finfo_size, &cur->ino, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1397 if (error != 0) {
1398 goto get_out;
1399 }
1400 } else {
1401 error = fill_buff(FSE_ARG_DEV, sizeof(dev_t), &cur->dev, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1402 if (error != 0) {
1403 goto get_out;
1404 }
1405
1406 error = fill_buff(FSE_ARG_INO, sizeof(ino64_t), &cur->ino, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1407 if (error != 0) {
1408 goto get_out;
1409 }
1410
1411 error = fill_buff(FSE_ARG_MODE, sizeof(int32_t), &cur->mode, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1412 if (error != 0) {
1413 goto get_out;
1414 }
1415
1416 error = fill_buff(FSE_ARG_UID, sizeof(uid_t), &cur->uid, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1417 if (error != 0) {
1418 goto get_out;
1419 }
813fb2f6 1420
0a7de745
A
1421 error = fill_buff(FSE_ARG_GID, sizeof(gid_t), &cur->gid, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1422 if (error != 0) {
1423 goto get_out;
1424 }
2d21ac55
A
1425 }
1426
2d21ac55 1427
0a7de745
A
1428 if (cur->dest) {
1429 cur = cur->dest;
1430 goto copy_again;
2d21ac55
A
1431 }
1432
0a7de745
A
1433done:
1434 // very last thing: the time stamp
1435 error = fill_buff(FSE_ARG_INT64, sizeof(uint64_t), &cur->abstime, evbuff, &evbuff_idx, sizeof(evbuff), uio);
2d21ac55 1436 if (error != 0) {
0a7de745 1437 goto get_out;
2d21ac55 1438 }
91447636 1439
0a7de745
A
1440 // check if the FSE_ARG_DONE will fit
1441 if (sizeof(uint16_t) > sizeof(evbuff) - evbuff_idx) {
1442 if (evbuff_idx > uio_resid(uio)) {
1443 error = ENOSPC;
1444 goto get_out;
1445 }
1446 error = uiomove(evbuff, evbuff_idx, uio);
1447 if (error) {
1448 goto get_out;
1449 }
1450 evbuff_idx = 0;
91447636 1451 }
91447636 1452
0a7de745
A
1453 tmp16 = FSE_ARG_DONE;
1454 memcpy(&evbuff[evbuff_idx], &tmp16, sizeof(uint16_t));
1455 evbuff_idx += sizeof(uint16_t);
91447636 1456
0a7de745
A
1457 // flush any remaining data in the buffer (and hopefully
1458 // in most cases this is the only uiomove we'll do)
2d21ac55 1459 if (evbuff_idx > uio_resid(uio)) {
0a7de745
A
1460 error = ENOSPC;
1461 } else {
1462 error = uiomove(evbuff, evbuff_idx, uio);
2d21ac55 1463 }
2d21ac55 1464
0a7de745 1465get_out:
2d21ac55 1466
0a7de745 1467 return error;
91447636
A
1468}
1469
1470
2d21ac55 1471
91447636
A
1472static int
1473fmod_watch(fs_event_watcher *watcher, struct uio *uio)
1474{
0a7de745
A
1475 int error = 0;
1476 user_ssize_t last_full_event_resid;
1477 kfs_event *kfse;
1478 uint16_t tmp16;
1479 int skipped;
91447636 1480
0a7de745 1481 last_full_event_resid = uio_resid(uio);
91447636 1482
0a7de745
A
1483 // need at least 2048 bytes of space (maxpathlen + 1 event buf)
1484 if (uio_resid(uio) < 2048 || watcher == NULL) {
1485 return EINVAL;
1486 }
91447636 1487
91447636 1488 if (watcher->flags & WATCHER_CLOSING) {
0a7de745 1489 return 0;
91447636 1490 }
91447636 1491
0a7de745
A
1492 if (OSAddAtomic(1, &watcher->num_readers) != 0) {
1493 // don't allow multiple threads to read from the fd at the same time
1494 OSAddAtomic(-1, &watcher->num_readers);
1495 return EAGAIN;
91447636 1496 }
91447636 1497
0a7de745
A
1498restart_watch:
1499 if (watcher->rd == watcher->wr) {
1500 if (watcher->flags & WATCHER_CLOSING) {
1501 OSAddAtomic(-1, &watcher->num_readers);
1502 return 0;
1503 }
1504 OSAddAtomic(1, &watcher->blockers);
91447636 1505
0a7de745
A
1506 // there's nothing to do, go to sleep
1507 error = tsleep((caddr_t)watcher, PUSER | PCATCH, "fsevents_empty", 0);
2d21ac55 1508
0a7de745 1509 OSAddAtomic(-1, &watcher->blockers);
91447636 1510
0a7de745
A
1511 if (error != 0 || (watcher->flags & WATCHER_CLOSING)) {
1512 OSAddAtomic(-1, &watcher->num_readers);
1513 return error;
1514 }
91447636 1515 }
91447636 1516
0a7de745
A
1517 // if we dropped events, return that as an event first
1518 if (watcher->flags & WATCHER_DROPPED_EVENTS) {
1519 int32_t val = FSE_EVENTS_DROPPED;
db609669 1520
0a7de745
A
1521 error = uiomove((caddr_t)&val, sizeof(int32_t), uio);
1522 if (error == 0) {
1523 val = 0; // a fake pid
1524 error = uiomove((caddr_t)&val, sizeof(int32_t), uio);
1525
1526 tmp16 = FSE_ARG_DONE; // makes it a consistent msg
1527 error = uiomove((caddr_t)&tmp16, sizeof(int16_t), uio);
1528
1529 last_full_event_resid = uio_resid(uio);
1530 }
1531
1532 if (error) {
1533 OSAddAtomic(-1, &watcher->num_readers);
1534 return error;
1535 }
1536
1537 watcher->flags &= ~WATCHER_DROPPED_EVENTS;
91447636
A
1538 }
1539
0a7de745
A
1540 skipped = 0;
1541
1542 lck_rw_lock_shared(&event_handling_lock);
1543 while (uio_resid(uio) > 0 && watcher->rd != watcher->wr) {
1544 if (watcher->flags & WATCHER_CLOSING) {
1545 break;
1546 }
2d21ac55 1547
0a7de745
A
1548 //
1549 // check if the event is something of interest to us
1550 // (since it may have been recycled/reused and changed
1551 // its type or which device it is for)
1552 //
1553 kfse = watcher->event_queue[watcher->rd];
1554 if (!kfse || kfse->type == FSE_INVALID || kfse->type >= watcher->num_events || kfse->refcount < 1) {
1555 break;
1556 }
99c3a104 1557
ea3f0419
A
1558 if (watcher->event_list[kfse->type] == FSE_REPORT) {
1559 boolean_t watcher_cares;
1560
1561 if (watcher->devices_not_to_watch == NULL) {
1562 watcher_cares = true;
0a7de745 1563 } else {
ea3f0419
A
1564 lock_watch_table();
1565 watcher_cares = watcher_cares_about_dev(watcher, kfse->dev);
1566 unlock_watch_table();
1567 }
1568
1569 if (watcher_cares) {
1570 if (!(watcher->flags & WATCHER_APPLE_SYSTEM_SERVICE) && kfse->type != FSE_DOCID_CREATED && kfse->type != FSE_DOCID_CHANGED && is_ignored_directory(kfse->str)) {
1571 // If this is not an Apple System Service, skip specified directories
1572 // radar://12034844
1573 error = 0;
1574 skipped = 1;
1575 } else {
1576 skipped = 0;
1577 if (last_event_ptr == kfse) {
1578 last_event_ptr = NULL;
1579 last_event_type = -1;
1580 last_coalesced_time = 0;
1581 }
1582 error = copy_out_kfse(watcher, kfse, uio);
1583 if (error != 0) {
1584 // if an event won't fit or encountered an error while
1585 // we were copying it out, then backup to the last full
1586 // event and just bail out. if the error was ENOENT
1587 // then we can continue regular processing, otherwise
1588 // we should unlock things and return.
1589 uio_setresid(uio, last_full_event_resid);
1590 if (error != ENOENT) {
1591 lck_rw_unlock_shared(&event_handling_lock);
1592 error = 0;
1593 goto get_out;
1594 }
0a7de745 1595 }
0a7de745 1596
ea3f0419
A
1597 last_full_event_resid = uio_resid(uio);
1598 }
0a7de745
A
1599 }
1600 }
1601
1602 watcher->event_queue[watcher->rd] = NULL;
1603 watcher->rd = (watcher->rd + 1) % watcher->eventq_size;
1604 OSSynchronizeIO();
1605 release_event_ref(kfse);
1606 }
1607 lck_rw_unlock_shared(&event_handling_lock);
99c3a104 1608
0a7de745
A
1609 if (skipped && error == 0) {
1610 goto restart_watch;
1611 }
91447636 1612
0a7de745
A
1613get_out:
1614 OSAddAtomic(-1, &watcher->num_readers);
2d21ac55 1615
0a7de745 1616 return error;
91447636
A
1617}
1618
1619
91447636 1620//
813fb2f6
A
1621// Shoo watchers away from a volume that's about to be unmounted
1622// (so that it can be cleanly unmounted).
91447636
A
1623//
1624void
813fb2f6 1625fsevent_unmount(__unused struct mount *mp, __unused vfs_context_t ctx)
91447636 1626{
f427ee49 1627#if !defined(XNU_TARGET_OS_OSX)
0a7de745
A
1628 dev_t dev = mp->mnt_vfsstat.f_fsid.val[0];
1629 int error, waitcount = 0;
cb323159 1630 struct timespec ts = {.tv_sec = 1, .tv_nsec = 0};
0a7de745
A
1631
1632 // wait for any other pending unmounts to complete
1633 lock_watch_table();
1634 while (fsevent_unmount_dev != 0) {
1635 error = msleep((caddr_t)&fsevent_unmount_dev, &watch_table_lock, PRIBIO, "fsevent_unmount_wait", &ts);
1636 if (error == EWOULDBLOCK) {
1637 error = 0;
1638 }
1639 if (!error && (++waitcount >= 10)) {
1640 error = EWOULDBLOCK;
1641 printf("timeout waiting to signal unmount pending for dev %d (fsevent_unmount_dev %d)\n", dev, fsevent_unmount_dev);
1642 }
1643 if (error) {
1644 // there's a problem, bail out
1645 unlock_watch_table();
1646 return;
1647 }
1648 }
1649 if (fs_event_type_watchers[FSE_UNMOUNT_PENDING] == 0) {
1650 // nobody watching for unmount pending events
1651 unlock_watch_table();
1652 return;
1653 }
1654 // this is now the current unmount pending
1655 fsevent_unmount_dev = dev;
1656 fsevent_unmount_ack_count = fs_event_type_watchers[FSE_UNMOUNT_PENDING];
1657 unlock_watch_table();
1658
1659 // send an event to notify the watcher they need to get off the mount
1660 error = add_fsevent(FSE_UNMOUNT_PENDING, ctx, FSE_ARG_DEV, dev, FSE_ARG_DONE);
1661
1662 // wait for acknowledgment(s) (give up if it takes too long)
1663 lock_watch_table();
1664 waitcount = 0;
1665 while (fsevent_unmount_dev == dev) {
1666 error = msleep((caddr_t)&fsevent_unmount_dev, &watch_table_lock, PRIBIO, "fsevent_unmount_pending", &ts);
1667 if (error == EWOULDBLOCK) {
1668 error = 0;
1669 }
1670 if (!error && (++waitcount >= 10)) {
1671 error = EWOULDBLOCK;
1672 printf("unmount pending ack timeout for dev %d\n", dev);
1673 }
1674 if (error) {
1675 // there's a problem, bail out
1676 if (fsevent_unmount_dev == dev) {
1677 fsevent_unmount_dev = 0;
1678 fsevent_unmount_ack_count = 0;
1679 }
1680 wakeup((caddr_t)&fsevent_unmount_dev);
1681 break;
1682 }
1683 }
1684 unlock_watch_table();
f427ee49 1685#endif /* ! XNU_TARGET_OS_OSX */
91447636
A
1686}
1687
1688
1689//
1690// /dev/fsevents device code
1691//
1692static int fsevents_installed = 0;
91447636
A
1693
1694typedef struct fsevent_handle {
0a7de745
A
1695 UInt32 flags;
1696 SInt32 active;
1697 fs_event_watcher *watcher;
1698 struct klist knotes;
1699 struct selinfo si;
91447636
A
1700} fsevent_handle;
1701
0c530ab8 1702#define FSEH_CLOSING 0x0001
91447636
A
1703
1704static int
1705fseventsf_read(struct fileproc *fp, struct uio *uio,
0a7de745 1706 __unused int flags, __unused vfs_context_t ctx)
91447636 1707{
f427ee49 1708 fsevent_handle *fseh = (struct fsevent_handle *)fp->fp_glob->fg_data;
0a7de745 1709 int error;
91447636 1710
0a7de745 1711 error = fmod_watch(fseh->watcher, uio);
91447636 1712
0a7de745 1713 return error;
91447636
A
1714}
1715
2d21ac55 1716
b0d623f7 1717#pragma pack(push, 4)
b0d623f7 1718typedef struct fsevent_dev_filter_args32 {
0a7de745
A
1719 uint32_t num_devices;
1720 user32_addr_t devices;
b0d623f7 1721} fsevent_dev_filter_args32;
3e170ce0 1722typedef struct fsevent_dev_filter_args64 {
0a7de745
A
1723 uint32_t num_devices;
1724 user64_addr_t devices;
3e170ce0
A
1725} fsevent_dev_filter_args64;
1726#pragma pack(pop)
1727
0a7de745
A
1728#define FSEVENTS_DEVICE_FILTER_32 _IOW('s', 100, fsevent_dev_filter_args32)
1729#define FSEVENTS_DEVICE_FILTER_64 _IOW('s', 100, fsevent_dev_filter_args64)
91447636
A
1730
1731static int
2d21ac55 1732fseventsf_ioctl(struct fileproc *fp, u_long cmd, caddr_t data, vfs_context_t ctx)
91447636 1733{
f427ee49 1734 fsevent_handle *fseh = (struct fsevent_handle *)fp->fp_glob->fg_data;
0a7de745
A
1735 int ret = 0;
1736 fsevent_dev_filter_args64 *devfilt_args, _devfilt_args;
91447636 1737
0a7de745
A
1738 OSAddAtomic(1, &fseh->active);
1739 if (fseh->flags & FSEH_CLOSING) {
1740 OSAddAtomic(-1, &fseh->active);
1741 return 0;
1742 }
0c530ab8 1743
0a7de745 1744 switch (cmd) {
91447636
A
1745 case FIONBIO:
1746 case FIOASYNC:
0a7de745 1747 break;
91447636 1748
2d21ac55 1749 case FSEVENTS_WANT_COMPACT_EVENTS: {
0a7de745
A
1750 fseh->watcher->flags |= WATCHER_WANTS_COMPACT_EVENTS;
1751 break;
2d21ac55
A
1752 }
1753
1754 case FSEVENTS_WANT_EXTENDED_INFO: {
0a7de745
A
1755 fseh->watcher->flags |= WATCHER_WANTS_EXTENDED_INFO;
1756 break;
2d21ac55
A
1757 }
1758
b0d623f7
A
1759 case FSEVENTS_GET_CURRENT_ID: {
1760 *(uint64_t *)data = fseh->watcher->max_event_id;
1761 ret = 0;
1762 break;
1763 }
1764
3e170ce0 1765 case FSEVENTS_DEVICE_FILTER_32: {
0a7de745
A
1766 if (proc_is64bit(vfs_context_proc(ctx))) {
1767 ret = EINVAL;
1768 break;
1769 }
1770 fsevent_dev_filter_args32 *devfilt_args32 = (fsevent_dev_filter_args32 *)data;
3e170ce0 1771
0a7de745
A
1772 devfilt_args = &_devfilt_args;
1773 memset(devfilt_args, 0, sizeof(fsevent_dev_filter_args64));
1774 devfilt_args->num_devices = devfilt_args32->num_devices;
1775 devfilt_args->devices = CAST_USER_ADDR_T(devfilt_args32->devices);
1776 goto handle_dev_filter;
3e170ce0
A
1777 }
1778
1779 case FSEVENTS_DEVICE_FILTER_64:
0a7de745
A
1780 if (!proc_is64bit(vfs_context_proc(ctx))) {
1781 ret = EINVAL;
1782 break;
1783 }
1784 devfilt_args = (fsevent_dev_filter_args64 *)data;
3e170ce0 1785
0a7de745
A
1786handle_dev_filter:
1787 {
f427ee49 1788 int new_num_devices, old_num_devices = 0;
0a7de745 1789 dev_t *devices_not_to_watch, *tmp = NULL;
813fb2f6 1790
0a7de745
A
1791 if (devfilt_args->num_devices > 256) {
1792 ret = EINVAL;
1793 break;
1794 }
91447636 1795
0a7de745
A
1796 new_num_devices = devfilt_args->num_devices;
1797 if (new_num_devices == 0) {
1798 lock_watch_table();
91447636 1799
0a7de745
A
1800 tmp = fseh->watcher->devices_not_to_watch;
1801 fseh->watcher->devices_not_to_watch = NULL;
f427ee49 1802 old_num_devices = fseh->watcher->num_devices;
0a7de745
A
1803 fseh->watcher->num_devices = new_num_devices;
1804
1805 unlock_watch_table();
f427ee49 1806 kheap_free(KHEAP_DEFAULT, tmp, old_num_devices * sizeof(dev_t));
0a7de745
A
1807 break;
1808 }
1809
f427ee49
A
1810 devices_not_to_watch = kheap_alloc(KHEAP_DEFAULT,
1811 new_num_devices * sizeof(dev_t), Z_WAITOK);
0a7de745
A
1812 if (devices_not_to_watch == NULL) {
1813 ret = ENOMEM;
1814 break;
1815 }
1816
f427ee49 1817 ret = copyin((user_addr_t)devfilt_args->devices,
0a7de745
A
1818 (void *)devices_not_to_watch,
1819 new_num_devices * sizeof(dev_t));
1820 if (ret) {
f427ee49
A
1821 kheap_free(KHEAP_DEFAULT, devices_not_to_watch,
1822 new_num_devices * sizeof(dev_t));
0a7de745
A
1823 break;
1824 }
1825
1826 lock_watch_table();
f427ee49 1827 old_num_devices = fseh->watcher->num_devices;
0a7de745
A
1828 fseh->watcher->num_devices = new_num_devices;
1829 tmp = fseh->watcher->devices_not_to_watch;
1830 fseh->watcher->devices_not_to_watch = devices_not_to_watch;
1831 unlock_watch_table();
91447636 1832
f427ee49 1833 kheap_free(KHEAP_DEFAULT, tmp, old_num_devices * sizeof(dev_t));
91447636 1834
0a7de745
A
1835 break;
1836 }
91447636 1837
813fb2f6 1838 case FSEVENTS_UNMOUNT_PENDING_ACK: {
0a7de745
A
1839 lock_watch_table();
1840 dev_t dev = *(dev_t *)data;
1841 if (fsevent_unmount_dev == dev) {
1842 if (--fsevent_unmount_ack_count <= 0) {
1843 fsevent_unmount_dev = 0;
1844 wakeup((caddr_t)&fsevent_unmount_dev);
1845 }
1846 } else {
1847 printf("unexpected unmount pending ack %d (%d)\n", dev, fsevent_unmount_dev);
1848 ret = EINVAL;
813fb2f6 1849 }
0a7de745
A
1850 unlock_watch_table();
1851 break;
813fb2f6
A
1852 }
1853
91447636 1854 default:
0a7de745
A
1855 ret = EINVAL;
1856 break;
1857 }
91447636 1858
0a7de745
A
1859 OSAddAtomic(-1, &fseh->active);
1860 return ret;
91447636
A
1861}
1862
1863
1864static int
2d21ac55 1865fseventsf_select(struct fileproc *fp, int which, __unused void *wql, vfs_context_t ctx)
91447636 1866{
f427ee49 1867 fsevent_handle *fseh = (struct fsevent_handle *)fp->fp_glob->fg_data;
0a7de745 1868 int ready = 0;
91447636 1869
0a7de745
A
1870 if ((which != FREAD) || (fseh->watcher->flags & WATCHER_CLOSING)) {
1871 return 0;
1872 }
91447636
A
1873
1874
0a7de745
A
1875 // if there's nothing in the queue, we're not ready
1876 if (fseh->watcher->rd != fseh->watcher->wr) {
1877 ready = 1;
1878 }
91447636 1879
0a7de745
A
1880 if (!ready) {
1881 selrecord(vfs_context_proc(ctx), &fseh->si, wql);
1882 }
91447636 1883
0a7de745 1884 return ready;
91447636
A
1885}
1886
1887
2d21ac55 1888#if NOTUSED
91447636 1889static int
2d21ac55 1890fseventsf_stat(__unused struct fileproc *fp, __unused struct stat *sb, __unused vfs_context_t ctx)
91447636 1891{
0a7de745 1892 return ENOTSUP;
91447636 1893}
2d21ac55 1894#endif
91447636
A
1895
1896static int
2d21ac55 1897fseventsf_close(struct fileglob *fg, __unused vfs_context_t ctx)
91447636 1898{
0a7de745
A
1899 fsevent_handle *fseh = (struct fsevent_handle *)fg->fg_data;
1900 fs_event_watcher *watcher;
2d21ac55 1901
0a7de745
A
1902 OSBitOrAtomic(FSEH_CLOSING, &fseh->flags);
1903 while (OSAddAtomic(0, &fseh->active) > 0) {
1904 tsleep((caddr_t)fseh->watcher, PRIBIO, "fsevents-close", 1);
1905 }
91447636 1906
0a7de745
A
1907 watcher = fseh->watcher;
1908 fg->fg_data = NULL;
1909 fseh->watcher = NULL;
0c530ab8 1910
0a7de745 1911 remove_watcher(watcher);
f427ee49 1912 kheap_free(KHEAP_DEFAULT, fseh, sizeof(fsevent_handle));
91447636 1913
0a7de745 1914 return 0;
91447636
A
1915}
1916
b0d623f7
A
1917static void
1918filt_fsevent_detach(struct knote *kn)
1919{
1920 fsevent_handle *fseh = (struct fsevent_handle *)kn->kn_hook;
1921
1922 lock_watch_table();
1923
1924 KNOTE_DETACH(&fseh->knotes, kn);
0a7de745 1925
b0d623f7
A
1926 unlock_watch_table();
1927}
1928
0a7de745 1929/*
b0d623f7 1930 * Determine whether this knote should be active
0a7de745
A
1931 *
1932 * This is kind of subtle.
1933 * --First, notice if the vnode has been revoked: in so, override hint
1934 * --EVFILT_READ knotes are checked no matter what the hint is
1935 * --Other knotes activate based on hint.
1936 * --If hint is revoke, set special flags and activate
b0d623f7
A
1937 */
1938static int
cb323159 1939filt_fsevent_common(struct knote *kn, struct kevent_qos_s *kev, long hint)
b0d623f7
A
1940{
1941 fsevent_handle *fseh = (struct fsevent_handle *)kn->kn_hook;
1942 int activate = 0;
1943 int32_t rd, wr, amt;
cb323159 1944 int64_t data = 0;
b0d623f7
A
1945
1946 if (NOTE_REVOKE == hint) {
1947 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
1948 activate = 1;
1949 }
1950
1951 rd = fseh->watcher->rd;
1952 wr = fseh->watcher->wr;
1953 if (rd <= wr) {
0a7de745 1954 amt = wr - rd;
b0d623f7 1955 } else {
0a7de745 1956 amt = fseh->watcher->eventq_size - (rd - wr);
b0d623f7
A
1957 }
1958
0a7de745
A
1959 switch (kn->kn_filter) {
1960 case EVFILT_READ:
cb323159
A
1961 data = amt;
1962 activate = (data != 0);
0a7de745
A
1963 break;
1964 case EVFILT_VNODE:
1965 /* Check events this note matches against the hint */
1966 if (kn->kn_sfflags & hint) {
1967 kn->kn_fflags |= hint; /* Set which event occurred */
1968 }
1969 if (kn->kn_fflags != 0) {
1970 activate = 1;
b0d623f7 1971 }
0a7de745 1972 break;
cb323159 1973 default:
0a7de745
A
1974 // nothing to do...
1975 break;
1976 }
b0d623f7 1977
cb323159
A
1978 if (activate && kev) {
1979 knote_fill_kevent(kn, kev, data);
1980 }
0a7de745 1981 return activate;
b0d623f7
A
1982}
1983
cb323159
A
1984static int
1985filt_fsevent(struct knote *kn, long hint)
1986{
1987 return filt_fsevent_common(kn, NULL, hint);
1988}
b0d623f7 1989
39037602 1990static int
cb323159 1991filt_fsevent_touch(struct knote *kn, struct kevent_qos_s *kev)
39037602
A
1992{
1993 int res;
1994
1995 lock_watch_table();
1996
1997 /* accept new fflags/data as saved */
1998 kn->kn_sfflags = kev->fflags;
1999 kn->kn_sdata = kev->data;
39037602
A
2000
2001 /* restrict the current results to the (smaller?) set of new interest */
2002 /*
2003 * For compatibility with previous implementations, we leave kn_fflags
2004 * as they were before.
2005 */
2006 //kn->kn_fflags &= kev->fflags;
2007
2008 /* determine if the filter is now fired */
cb323159 2009 res = filt_fsevent_common(kn, NULL, 0);
39037602
A
2010
2011 unlock_watch_table();
2012
2013 return res;
2014}
2015
2016static int
cb323159 2017filt_fsevent_process(struct knote *kn, struct kevent_qos_s *kev)
39037602 2018{
39037602
A
2019 int res;
2020
2021 lock_watch_table();
2022
cb323159 2023 res = filt_fsevent_common(kn, kev, 0);
39037602
A
2024
2025 unlock_watch_table();
cb323159 2026
39037602
A
2027 return res;
2028}
2029
5ba3f43e
A
2030SECURITY_READ_ONLY_EARLY(struct filterops) fsevent_filtops = {
2031 .f_isfd = 1,
2032 .f_attach = NULL,
2033 .f_detach = filt_fsevent_detach,
39037602
A
2034 .f_event = filt_fsevent,
2035 .f_touch = filt_fsevent_touch,
2036 .f_process = filt_fsevent_process,
b0d623f7
A
2037};
2038
2d21ac55 2039static int
cb323159
A
2040fseventsf_kqfilter(struct fileproc *fp, struct knote *kn,
2041 __unused struct kevent_qos_s *kev)
91447636 2042{
f427ee49 2043 fsevent_handle *fseh = (struct fsevent_handle *)fp->fp_glob->fg_data;
0a7de745 2044 int res;
b0d623f7 2045
0a7de745 2046 kn->kn_hook = (void*)fseh;
5ba3f43e 2047 kn->kn_filtid = EVFILTID_FSEVENT;
39037602 2048
0a7de745 2049 lock_watch_table();
b0d623f7 2050
0a7de745 2051 KNOTE_ATTACH(&fseh->knotes, kn);
b0d623f7 2052
0a7de745 2053 /* check to see if it is fired already */
cb323159 2054 res = filt_fsevent_common(kn, NULL, 0);
39037602 2055
0a7de745 2056 unlock_watch_table();
39037602 2057
0a7de745 2058 return res;
91447636
A
2059}
2060
2061
2062static int
2d21ac55 2063fseventsf_drain(struct fileproc *fp, __unused vfs_context_t ctx)
91447636 2064{
0a7de745 2065 int counter = 0;
f427ee49 2066 fsevent_handle *fseh = (struct fsevent_handle *)fp->fp_glob->fg_data;
0a7de745
A
2067
2068 // if there are people still waiting, sleep for 10ms to
2069 // let them clean up and get out of there. however we
2070 // also don't want to get stuck forever so if they don't
2071 // exit after 5 seconds we're tearing things down anyway.
2072 while (fseh->watcher->blockers && counter++ < 500) {
2073 // issue wakeup in case anyone is blocked waiting for an event
2074 // do this each time we wakeup in case the blocker missed
2075 // the wakeup due to the unprotected test of WATCHER_CLOSING
2076 // and decision to tsleep in fmod_watch... this bit of
2077 // latency is a decent tradeoff against not having to
2078 // take and drop a lock in fmod_watch
2079 lock_watch_table();
2080 fsevents_wakeup(fseh->watcher);
2081 unlock_watch_table();
91447636 2082
0a7de745
A
2083 tsleep((caddr_t)fseh->watcher, PRIBIO, "watcher-close", 1);
2084 }
91447636 2085
0a7de745 2086 return 0;
91447636
A
2087}
2088
2089
2090static int
2d21ac55 2091fseventsopen(__unused dev_t dev, __unused int flag, __unused int mode, __unused struct proc *p)
91447636 2092{
0a7de745
A
2093 if (!kauth_cred_issuser(kauth_cred_get())) {
2094 return EPERM;
2095 }
2096
2097 return 0;
91447636
A
2098}
2099
2100static int
2d21ac55 2101fseventsclose(__unused dev_t dev, __unused int flag, __unused int mode, __unused struct proc *p)
91447636 2102{
0a7de745 2103 return 0;
91447636
A
2104}
2105
2106static int
2d21ac55 2107fseventsread(__unused dev_t dev, __unused struct uio *uio, __unused int ioflag)
91447636 2108{
0a7de745 2109 return EIO;
91447636
A
2110}
2111
2d21ac55 2112
91447636 2113static int
f427ee49 2114parse_buffer_and_add_events(const char *buffer, size_t bufsize, vfs_context_t ctx, size_t *remainder)
91447636 2115{
0a7de745
A
2116 const fse_info *finfo, *dest_finfo;
2117 const char *path, *ptr, *dest_path, *event_start = buffer;
f427ee49
A
2118 size_t path_len, dest_path_len;
2119 int type, err = 0;
2d21ac55
A
2120
2121
0a7de745
A
2122 ptr = buffer;
2123 while ((ptr + sizeof(int) + sizeof(fse_info) + 1) < buffer + bufsize) {
2124 type = *(const int *)ptr;
2125 if (type < 0 || type >= FSE_MAX_EVENTS) {
2126 err = EINVAL;
2127 break;
2128 }
2d21ac55 2129
0a7de745 2130 ptr += sizeof(int);
2d21ac55 2131
0a7de745
A
2132 finfo = (const fse_info *)ptr;
2133 ptr += sizeof(fse_info);
2d21ac55 2134
0a7de745
A
2135 path = ptr;
2136 while (ptr < buffer + bufsize && *ptr != '\0') {
2137 ptr++;
2138 }
2d21ac55 2139
0a7de745
A
2140 if (ptr >= buffer + bufsize) {
2141 break;
2142 }
2d21ac55 2143
0a7de745 2144 ptr++; // advance over the trailing '\0'
2d21ac55 2145
0a7de745 2146 path_len = ptr - path;
2d21ac55 2147
0a7de745
A
2148 if (type != FSE_RENAME && type != FSE_EXCHANGE && type != FSE_CLONE) {
2149 event_start = ptr; // record where the next event starts
2d21ac55 2150
0a7de745
A
2151 err = add_fsevent(type, ctx, FSE_ARG_STRING, path_len, path, FSE_ARG_FINFO, finfo, FSE_ARG_DONE);
2152 if (err) {
2153 break;
2154 }
2155 continue;
2156 }
2d21ac55 2157
0a7de745
A
2158 //
2159 // if we're here we have to slurp up the destination finfo
2160 // and path so that we can pass them to the add_fsevent()
2161 // call. basically it's a copy of the above code.
2162 //
2163 dest_finfo = (const fse_info *)ptr;
2164 ptr += sizeof(fse_info);
2165
2166 dest_path = ptr;
2167 while (ptr < buffer + bufsize && *ptr != '\0') {
2168 ptr++;
2169 }
2d21ac55 2170
0a7de745
A
2171 if (ptr >= buffer + bufsize) {
2172 break;
2173 }
2d21ac55 2174
0a7de745
A
2175 ptr++; // advance over the trailing '\0'
2176 event_start = ptr; // record where the next event starts
2177
2178 dest_path_len = ptr - dest_path;
2179 //
2180 // If the destination inode number is non-zero, generate a rename
2181 // with both source and destination FSE_ARG_FINFO. Otherwise generate
2182 // a rename with only one FSE_ARG_FINFO. If you need to inject an
2183 // exchange with an inode of zero, just make that inode (and its path)
2184 // come in as the first one, not the second.
2185 //
2186 if (dest_finfo->ino) {
2187 err = add_fsevent(type, ctx,
2188 FSE_ARG_STRING, path_len, path, FSE_ARG_FINFO, finfo,
2189 FSE_ARG_STRING, dest_path_len, dest_path, FSE_ARG_FINFO, dest_finfo,
2190 FSE_ARG_DONE);
2191 } else {
2192 err = add_fsevent(type, ctx,
2193 FSE_ARG_STRING, path_len, path, FSE_ARG_FINFO, finfo,
2194 FSE_ARG_STRING, dest_path_len, dest_path,
2195 FSE_ARG_DONE);
2196 }
b0d623f7 2197
0a7de745
A
2198 if (err) {
2199 break;
2200 }
2d21ac55
A
2201 }
2202
0a7de745
A
2203 // if the last event wasn't complete, set the remainder
2204 // to be the last event start boundary.
2205 //
2206 *remainder = (long)((buffer + bufsize) - event_start);
2d21ac55 2207
0a7de745 2208 return err;
2d21ac55
A
2209}
2210
2211
2212//
2213// Note: this buffer size can not ever be less than
2214// 2*MAXPATHLEN + 2*sizeof(fse_info) + sizeof(int)
2215// because that is the max size for a single event.
2216// I made it 4k to be a "nice" size. making it
2217// smaller is not a good idea.
2218//
2219#define WRITE_BUFFER_SIZE 4096
0a7de745 2220char *write_buffer = NULL;
2d21ac55
A
2221
2222static int
2223fseventswrite(__unused dev_t dev, struct uio *uio, __unused int ioflag)
2224{
f427ee49
A
2225 int error = 0;
2226 size_t count, offset = 0, remainder = 0;
0a7de745 2227 vfs_context_t ctx = vfs_context_current();
2d21ac55 2228
0a7de745 2229 lck_mtx_lock(&event_writer_lock);
2d21ac55 2230
0a7de745
A
2231 if (write_buffer == NULL) {
2232 if (kmem_alloc(kernel_map, (vm_offset_t *)&write_buffer, WRITE_BUFFER_SIZE, VM_KERN_MEMORY_FILE)) {
2233 lck_mtx_unlock(&event_writer_lock);
2234 return ENOMEM;
2235 }
2236 }
2d21ac55
A
2237
2238 //
0a7de745
A
2239 // this loop copies in and processes the events written.
2240 // it takes care to copy in reasonable size chunks and
2241 // process them. if there is an event that spans a chunk
2242 // boundary we're careful to copy those bytes down to the
2243 // beginning of the buffer and read the next chunk in just
2244 // after it.
2d21ac55 2245 //
0a7de745 2246 while (uio_resid(uio)) {
f427ee49 2247 count = MIN(WRITE_BUFFER_SIZE - offset, (size_t)uio_resid(uio));
0a7de745 2248
f427ee49 2249 error = uiomove(write_buffer + offset, (int)count, uio);
0a7de745
A
2250 if (error) {
2251 break;
2252 }
2253
0a7de745
A
2254 error = parse_buffer_and_add_events(write_buffer, offset + count, ctx, &remainder);
2255 if (error) {
2256 break;
2257 }
2258
2259 //
2260 // if there's any remainder, copy it down to the beginning
2261 // of the buffer so that it will get processed the next time
2262 // through the loop. note that the remainder always starts
2263 // at an event boundary.
2264 //
f427ee49
A
2265 memmove(write_buffer, (write_buffer + count + offset) - remainder, remainder);
2266 offset = remainder;
2d21ac55 2267 }
2d21ac55 2268
0a7de745 2269 lck_mtx_unlock(&event_writer_lock);
2d21ac55 2270
0a7de745 2271 return error;
91447636
A
2272}
2273
2274
39236c6e 2275static const struct fileops fsevents_fops = {
cb323159
A
2276 .fo_type = DTYPE_FSEVENTS,
2277 .fo_read = fseventsf_read,
2278 .fo_write = fo_no_write,
2279 .fo_ioctl = fseventsf_ioctl,
2280 .fo_select = fseventsf_select,
2281 .fo_close = fseventsf_close,
0a7de745 2282 .fo_kqfilter = fseventsf_kqfilter,
cb323159 2283 .fo_drain = fseventsf_drain,
91447636
A
2284};
2285
3e170ce0 2286typedef struct fsevent_clone_args32 {
0a7de745
A
2287 user32_addr_t event_list;
2288 int32_t num_events;
2289 int32_t event_queue_depth;
2290 user32_addr_t fd;
3e170ce0 2291} fsevent_clone_args32;
2d21ac55 2292
3e170ce0 2293typedef struct fsevent_clone_args64 {
0a7de745
A
2294 user64_addr_t event_list;
2295 int32_t num_events;
2296 int32_t event_queue_depth;
2297 user64_addr_t fd;
3e170ce0 2298} fsevent_clone_args64;
91447636 2299
0a7de745
A
2300#define FSEVENTS_CLONE_32 _IOW('s', 1, fsevent_clone_args32)
2301#define FSEVENTS_CLONE_64 _IOW('s', 1, fsevent_clone_args64)
91447636
A
2302
2303static int
2d21ac55 2304fseventsioctl(__unused dev_t dev, u_long cmd, caddr_t data, __unused int flag, struct proc *p)
91447636 2305{
0a7de745
A
2306 struct fileproc *f;
2307 int fd, error;
2308 fsevent_handle *fseh = NULL;
2309 fsevent_clone_args64 *fse_clone_args, _fse_clone;
2310 int8_t *event_list;
2311 int is64bit = proc_is64bit(p);
2312
2313 switch (cmd) {
3e170ce0 2314 case FSEVENTS_CLONE_32: {
0a7de745
A
2315 if (is64bit) {
2316 return EINVAL;
2317 }
2318 fsevent_clone_args32 *args32 = (fsevent_clone_args32 *)data;
2d21ac55 2319
0a7de745
A
2320 fse_clone_args = &_fse_clone;
2321 memset(fse_clone_args, 0, sizeof(fsevent_clone_args64));
2d21ac55 2322
0a7de745
A
2323 fse_clone_args->event_list = CAST_USER_ADDR_T(args32->event_list);
2324 fse_clone_args->num_events = args32->num_events;
2325 fse_clone_args->event_queue_depth = args32->event_queue_depth;
2326 fse_clone_args->fd = CAST_USER_ADDR_T(args32->fd);
2327 goto handle_clone;
2d21ac55 2328 }
2d21ac55 2329
3e170ce0 2330 case FSEVENTS_CLONE_64:
0a7de745
A
2331 if (!is64bit) {
2332 return EINVAL;
2333 }
2334 fse_clone_args = (fsevent_clone_args64 *)data;
2d21ac55 2335
0a7de745 2336handle_clone:
f427ee49 2337 if (fse_clone_args->num_events <= 0 || fse_clone_args->num_events > 4096) {
0a7de745
A
2338 return EINVAL;
2339 }
91447636 2340
f427ee49 2341 fseh = kheap_alloc(KHEAP_DEFAULT, sizeof(fsevent_handle), Z_WAITOK | Z_ZERO);
0a7de745
A
2342 if (fseh == NULL) {
2343 return ENOMEM;
2344 }
0a7de745
A
2345
2346 klist_init(&fseh->knotes);
2347
f427ee49
A
2348 event_list = kheap_alloc(KHEAP_DEFAULT,
2349 fse_clone_args->num_events * sizeof(int8_t), Z_WAITOK);
0a7de745 2350 if (event_list == NULL) {
f427ee49 2351 kheap_free(KHEAP_DEFAULT, fseh, sizeof(fsevent_handle));
0a7de745
A
2352 return ENOMEM;
2353 }
2354
f427ee49 2355 error = copyin((user_addr_t)fse_clone_args->event_list,
0a7de745
A
2356 (void *)event_list,
2357 fse_clone_args->num_events * sizeof(int8_t));
2358 if (error) {
f427ee49
A
2359 kheap_free(KHEAP_DEFAULT, event_list,
2360 fse_clone_args->num_events * sizeof(int8_t));
2361 kheap_free(KHEAP_DEFAULT, fseh, sizeof(fsevent_handle));
0a7de745
A
2362 return error;
2363 }
2364
cb323159
A
2365 /*
2366 * Lock down the user's "fd" result buffer so it's safe
2367 * to hold locks while we copy it out.
2368 */
2369 error = vslock((user_addr_t)fse_clone_args->fd,
2370 sizeof(int32_t));
2371 if (error) {
f427ee49
A
2372 kheap_free(KHEAP_DEFAULT, event_list,
2373 fse_clone_args->num_events * sizeof(int8_t));
2374 kheap_free(KHEAP_DEFAULT, fseh, sizeof(fsevent_handle));
cb323159
A
2375 return error;
2376 }
2377
0a7de745
A
2378 error = add_watcher(event_list,
2379 fse_clone_args->num_events,
2380 fse_clone_args->event_queue_depth,
2381 &fseh->watcher,
2382 fseh);
2383 if (error) {
cb323159
A
2384 vsunlock((user_addr_t)fse_clone_args->fd,
2385 sizeof(int32_t), 0);
f427ee49
A
2386 kheap_free(KHEAP_DEFAULT, event_list,
2387 fse_clone_args->num_events * sizeof(int8_t));
2388 kheap_free(KHEAP_DEFAULT, fseh, sizeof(fsevent_handle));
0a7de745
A
2389 return error;
2390 }
2391
2392 fseh->watcher->fseh = fseh;
2393
2394 error = falloc(p, &f, &fd, vfs_context_current());
2395 if (error) {
2396 remove_watcher(fseh->watcher);
cb323159
A
2397 vsunlock((user_addr_t)fse_clone_args->fd,
2398 sizeof(int32_t), 0);
f427ee49
A
2399 kheap_free(KHEAP_DEFAULT, event_list,
2400 fse_clone_args->num_events * sizeof(int8_t));
2401 kheap_free(KHEAP_DEFAULT, fseh, sizeof(fsevent_handle));
0a7de745
A
2402 return error;
2403 }
91447636 2404 proc_fdlock(p);
f427ee49
A
2405 f->fp_glob->fg_flag = FREAD | FWRITE;
2406 f->fp_glob->fg_ops = &fsevents_fops;
2407 f->fp_glob->fg_data = (caddr_t) fseh;
cb323159
A
2408 /*
2409 * We can safely hold the proc_fdlock across this copyout()
2410 * because of the vslock() call above. The vslock() call
2411 * also ensures that we will never get an error, so assert
2412 * this.
2413 */
f427ee49 2414 error = copyout((void *)&fd, (user_addr_t)fse_clone_args->fd, sizeof(int32_t));
cb323159
A
2415 assert(error == 0);
2416
2417 procfdtbl_releasefd(p, fd, NULL);
2418 fp_drop(p, fd, f, 1);
2419 proc_fdunlock(p);
2420
2421 vsunlock((user_addr_t)fse_clone_args->fd,
2422 sizeof(int32_t), 1);
0a7de745 2423 break;
91447636
A
2424
2425 default:
0a7de745
A
2426 error = EINVAL;
2427 break;
2428 }
91447636 2429
0a7de745 2430 return error;
91447636
A
2431}
2432
91447636 2433static void
2d21ac55 2434fsevents_wakeup(fs_event_watcher *watcher)
91447636 2435{
0a7de745
A
2436 selwakeup(&watcher->fseh->si);
2437 KNOTE(&watcher->fseh->knotes, NOTE_WRITE | NOTE_NONE);
2438 wakeup((caddr_t)watcher);
91447636
A
2439}
2440
2441
2442/*
2443 * A struct describing which functions will get invoked for certain
2444 * actions.
2445 */
f427ee49 2446static const struct cdevsw fsevents_cdevsw =
91447636 2447{
f427ee49
A
2448 .d_open = fseventsopen,
2449 .d_close = fseventsclose,
2450 .d_read = fseventsread,
2451 .d_write = fseventswrite,
2452 .d_ioctl = fseventsioctl,
2453 .d_stop = (stop_fcn_t *)&nulldev,
2454 .d_reset = (reset_fcn_t *)&nulldev,
2455 .d_select = eno_select,
2456 .d_mmap = eno_mmap,
2457 .d_strategy = eno_strat,
2458 .d_reserved_1 = eno_getc,
2459 .d_reserved_2 = eno_putc,
91447636
A
2460};
2461
2462
2463/*
2464 * Called to initialize our device,
2465 * and to register ourselves with devfs
2466 */
2467
2468void
2469fsevents_init(void)
2470{
0a7de745 2471 int ret;
91447636 2472
0a7de745
A
2473 if (fsevents_installed) {
2474 return;
2475 }
91447636 2476
0a7de745 2477 fsevents_installed = 1;
91447636 2478
0a7de745
A
2479 ret = cdevsw_add(-1, &fsevents_cdevsw);
2480 if (ret < 0) {
2481 fsevents_installed = 0;
2482 return;
2483 }
91447636 2484
0a7de745
A
2485 devfs_make_node(makedev(ret, 0), DEVFS_CHAR,
2486 UID_ROOT, GID_WHEEL, 0644, "fsevents", 0);
91447636 2487
0a7de745 2488 fsevents_internal_init();
91447636
A
2489}
2490
2491
91447636
A
2492char *
2493get_pathbuff(void)
2494{
f427ee49 2495 return zalloc(ZV_NAMEI);
91447636
A
2496}
2497
2498void
2499release_pathbuff(char *path)
2500{
0a7de745
A
2501 if (path == NULL) {
2502 return;
2503 }
f427ee49 2504 zfree(ZV_NAMEI, path);
91447636
A
2505}
2506
2507int
cf7d32b8 2508get_fse_info(struct vnode *vp, fse_info *fse, __unused vfs_context_t ctx)
91447636 2509{
0a7de745
A
2510 struct vnode_attr va;
2511
2512 VATTR_INIT(&va);
2513 VATTR_WANTED(&va, va_fsid);
cb323159 2514 va.va_vaflags |= VA_REALFSID;
0a7de745
A
2515 VATTR_WANTED(&va, va_fileid);
2516 VATTR_WANTED(&va, va_mode);
2517 VATTR_WANTED(&va, va_uid);
2518 VATTR_WANTED(&va, va_gid);
2519 if (vp->v_flag & VISHARDLINK) {
2520 if (vp->v_type == VDIR) {
2521 VATTR_WANTED(&va, va_dirlinkcount);
2522 } else {
2523 VATTR_WANTED(&va, va_nlink);
2524 }
2525 }
2526
2527 if (vnode_getattr(vp, &va, vfs_context_kernel()) != 0) {
2528 memset(fse, 0, sizeof(fse_info));
2529 return -1;
2d21ac55 2530 }
6d2010ae 2531
0a7de745 2532 return vnode_get_fse_info_from_vap(vp, fse, &va);
6d2010ae
A
2533}
2534
2535int
0a7de745 2536vnode_get_fse_info_from_vap(vnode_t vp, fse_info *fse, struct vnode_attr *vap)
6d2010ae 2537{
0a7de745
A
2538 fse->ino = (ino64_t)vap->va_fileid;
2539 fse->dev = (dev_t)vap->va_fsid;
2540 fse->mode = (int32_t)vnode_vttoif(vnode_vtype(vp)) | vap->va_mode;
2541 fse->uid = (uid_t)vap->va_uid;
2542 fse->gid = (gid_t)vap->va_gid;
2543 if (vp->v_flag & VISHARDLINK) {
2544 fse->mode |= FSE_MODE_HLINK;
2545 if (vp->v_type == VDIR) {
2546 fse->nlink = (uint64_t)vap->va_dirlinkcount;
2547 } else {
2548 fse->nlink = (uint64_t)vap->va_nlink;
2549 }
2d21ac55 2550 }
2d21ac55 2551
0a7de745 2552 return 0;
91447636 2553}
2d21ac55 2554
b0d623f7
A
2555void
2556create_fsevent_from_kevent(vnode_t vp, uint32_t kevents, struct vnode_attr *vap)
2557{
0a7de745
A
2558 int fsevent_type = FSE_CONTENT_MODIFIED, len; // the default is the most pessimistic
2559 char pathbuf[MAXPATHLEN];
2560 fse_info fse;
2561
2562
2563 if (kevents & VNODE_EVENT_DELETE) {
2564 fsevent_type = FSE_DELETE;
2565 } else if (kevents & (VNODE_EVENT_EXTEND | VNODE_EVENT_WRITE)) {
2566 fsevent_type = FSE_CONTENT_MODIFIED;
2567 } else if (kevents & VNODE_EVENT_LINK) {
2568 fsevent_type = FSE_CREATE_FILE;
2569 } else if (kevents & VNODE_EVENT_RENAME) {
2570 fsevent_type = FSE_CREATE_FILE; // XXXdbg - should use FSE_RENAME but we don't have the destination info;
2571 } else if (kevents & (VNODE_EVENT_FILE_CREATED | VNODE_EVENT_FILE_REMOVED | VNODE_EVENT_DIR_CREATED | VNODE_EVENT_DIR_REMOVED)) {
2572 fsevent_type = FSE_STAT_CHANGED; // XXXdbg - because vp is a dir and the thing created/removed lived inside it
2573 } else { // a catch all for VNODE_EVENT_PERMS, VNODE_EVENT_ATTRIB and anything else
2574 fsevent_type = FSE_STAT_CHANGED;
2575 }
2576
2577 // printf("convert_kevent: kevents 0x%x fsevent type 0x%x (for %s)\n", kevents, fsevent_type, vp->v_name ? vp->v_name : "(no-name)");
2578
2579 fse.dev = vap->va_fsid;
2580 fse.ino = vap->va_fileid;
2581 fse.mode = vnode_vttoif(vnode_vtype(vp)) | (uint32_t)vap->va_mode;
2582 if (vp->v_flag & VISHARDLINK) {
2583 fse.mode |= FSE_MODE_HLINK;
2584 if (vp->v_type == VDIR) {
2585 fse.nlink = vap->va_dirlinkcount;
2586 } else {
2587 fse.nlink = vap->va_nlink;
2588 }
2589 }
2590
b0d623f7 2591 if (vp->v_type == VDIR) {
0a7de745 2592 fse.mode |= FSE_REMOTE_DIR_EVENT;
b0d623f7 2593 }
b0d623f7 2594
b0d623f7 2595
0a7de745
A
2596 fse.uid = vap->va_uid;
2597 fse.gid = vap->va_gid;
b0d623f7 2598
0a7de745 2599 len = sizeof(pathbuf);
cb323159 2600 if (vn_getpath_no_firmlink(vp, pathbuf, &len) == 0) {
0a7de745
A
2601 add_fsevent(fsevent_type, vfs_context_current(), FSE_ARG_STRING, len, pathbuf, FSE_ARG_FINFO, &fse, FSE_ARG_DONE);
2602 }
2603 return;
b0d623f7
A
2604}
2605
2d21ac55 2606#else /* CONFIG_FSE */
39037602
A
2607
2608#include <sys/fsevents.h>
2609
2d21ac55
A
2610/*
2611 * The get_pathbuff and release_pathbuff routines are used in places not
2612 * related to fsevents, and it's a handy abstraction, so define trivial
2613 * versions that don't cache a pool of buffers. This way, we don't have
2614 * to conditionalize the callers, and they still get the advantage of the
2615 * pool of buffers if CONFIG_FSE is turned on.
2616 */
2617char *
2618get_pathbuff(void)
2619{
f427ee49 2620 return zalloc(ZV_NAMEI);
2d21ac55
A
2621}
2622
2623void
2624release_pathbuff(char *path)
2625{
f427ee49 2626 zfree(ZV_NAMEI, path);
2d21ac55 2627}
39037602
A
2628
2629int
2630add_fsevent(__unused int type, __unused vfs_context_t ctx, ...)
2631{
2632 return 0;
2633}
2634
0a7de745
A
2635int
2636need_fsevent(__unused int type, __unused vnode_t vp)
39037602
A
2637{
2638 return 0;
2639}
2640
2d21ac55 2641#endif /* CONFIG_FSE */