X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/7ddcb079202367355dddccdfa4318e57d50318be..b226f5e54a60dc81db17b1260381d7dbfea3cdf1:/bsd/vfs/vfs_fsevents.c?ds=inline diff --git a/bsd/vfs/vfs_fsevents.c b/bsd/vfs/vfs_fsevents.c index b92b69a28..5b8eac30e 100644 --- a/bsd/vfs/vfs_fsevents.c +++ b/bsd/vfs/vfs_fsevents.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2008 Apple Inc. All rights reserved. + * Copyright (c) 2004-2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -61,7 +61,8 @@ #include #include - +#include +#include typedef struct kfs_event { LIST_ENTRY(kfs_event) kevent_list; @@ -112,6 +113,8 @@ typedef struct fs_event_watcher { uint32_t num_dropped; uint64_t max_event_id; struct fsevent_handle *fseh; + pid_t pid; + char proc_name[(2 * MAXCOMLEN) + 1]; } fs_event_watcher; // fs_event_watcher flags @@ -119,13 +122,13 @@ typedef struct fs_event_watcher { #define WATCHER_CLOSING 0x0002 #define WATCHER_WANTS_COMPACT_EVENTS 0x0004 #define WATCHER_WANTS_EXTENDED_INFO 0x0008 - +#define WATCHER_APPLE_SYSTEM_SERVICE 0x0010 // fseventsd, coreservicesd, mds, revisiond #define MAX_WATCHERS 8 static fs_event_watcher *watcher_table[MAX_WATCHERS]; - -#define MAX_KFS_EVENTS 4096 +#define DEFAULT_MAX_KFS_EVENTS 4096 +static int max_kfs_events = DEFAULT_MAX_KFS_EVENTS; // we allocate kfs_event structures out of this zone static zone_t event_zone; @@ -138,6 +141,11 @@ static int fs_event_init = 0; // static int16_t fs_event_type_watchers[FSE_MAX_EVENTS]; +// the device currently being unmounted: +static dev_t fsevent_unmount_dev = 0; +// how many ACKs are still outstanding: +static int fsevent_unmount_ack_count = 0; + static int watcher_add_event(fs_event_watcher *watcher, kfs_event *kfse); static void fsevents_wakeup(fs_event_watcher *watcher); @@ -163,6 +171,23 @@ __private_extern__ void qsort( size_t member_size, int (*)(const void *, const void *)); +static int +is_ignored_directory(const char *path) { + + if (!path) { + return 0; + } + +#define IS_TLD(x) strnstr(__DECONST(char *, path), x, MAXPATHLEN) + if (IS_TLD("/.Spotlight-V100/") || + IS_TLD("/.MobileBackups/") || + IS_TLD("/Backups.backupdb/")) { + return 1; + } +#undef IS_TLD + + return 0; +} static void fsevents_internal_init(void) @@ -190,9 +215,11 @@ fsevents_internal_init(void) lck_rw_init(&event_handling_lock, fsevent_rw_group, fsevent_lock_attr); + PE_get_default("kern.maxkfsevents", &max_kfs_events, sizeof(max_kfs_events)); + event_zone = zinit(sizeof(kfs_event), - MAX_KFS_EVENTS * sizeof(kfs_event), - MAX_KFS_EVENTS * sizeof(kfs_event), + max_kfs_events * sizeof(kfs_event), + max_kfs_events * sizeof(kfs_event), "fs-event-buf"); if (event_zone == NULL) { printf("fsevents: failed to initialize the event zone.\n"); @@ -204,7 +231,7 @@ fsevents_internal_init(void) zone_change(event_zone, Z_COLLECT, FALSE); zone_change(event_zone, Z_CALLERACCT, FALSE); - if (zfill(event_zone, MAX_KFS_EVENTS) < MAX_KFS_EVENTS) { + if (zfill(event_zone, max_kfs_events) < max_kfs_events) { printf("fsevents: failed to pre-fill the event zone.\n"); } @@ -276,40 +303,6 @@ need_fsevent(int type, vnode_t vp) return 1; } -static int -prefix_match_len(const char *str1, const char *str2) -{ - int len=0; - - while(*str1 && *str2 && *str1 == *str2) { - len++; - str1++; - str2++; - } - - if (*str1 == '\0' && *str2 == '\0') { - len++; - } - - return len; -} - - -struct history_item { - kfs_event *kfse; - kfs_event *oldest_kfse; - int counter; -}; - -static int -compare_history_items(const void *_a, const void *_b) -{ - const struct history_item *a = (const struct history_item *)_a; - const struct history_item *b = (const struct history_item *)_b; - - // we want a descending order - return (b->counter - a->counter); -} #define is_throw_away(x) ((x) == FSE_STAT_CHANGED || (x) == FSE_CONTENT_MODIFIED) @@ -338,120 +331,9 @@ compare_history_items(const void *_a, const void *_b) #define KFSE_RECYCLED 0x0004 int num_dropped = 0; -int num_combined_events = 0; -int num_added_to_parent = 0; int num_parent_switch = 0; int num_recycled_rename = 0; -// -// NOTE: you must call lock_fs_event_list() before calling -// this function. -// -static kfs_event * -find_an_event(const char *str, int len, kfs_event *do_not_reuse, int *reuse_type, int *longest_match_len) -{ - kfs_event *kfse, *best_kfse=NULL; - -// this seems to be enough to find most duplicate events for the same vnode -#define MAX_HISTORY 12 - struct history_item history[MAX_HISTORY]; - int i; - - *longest_match_len = 0; - *reuse_type = 0; - - memset(history, 0, sizeof(history)); - - // - // now walk the list of events and try to find the best match - // for this event. if we have a vnode, we look for an event - // that already references the vnode. if we don't find one - // we'll also take the parent of this vnode (in which case it - // will be marked as having dropped events within it). - // - // if we have a string we look for the longest match on the - // path we have. - // - - LIST_FOREACH(kfse, &kfse_list_head, kevent_list) { - int match_len; - - // - // don't look at events that are still in the process of being - // created, have a null vnode ptr or rename/exchange events. - // - if ( (kfse->flags & KFSE_BEING_CREATED) || kfse->type == FSE_RENAME || kfse->type == FSE_EXCHANGE) { - - continue; - } - - if (str != NULL) { - if (kfse->len != 0 && kfse->str != NULL) { - match_len = prefix_match_len(str, kfse->str); - if (match_len > *longest_match_len) { - best_kfse = kfse; - *longest_match_len = match_len; - } - } - } - - if (kfse == do_not_reuse) { - continue; - } - - for(i=0; i < MAX_HISTORY; i++) { - if (history[i].kfse == NULL) { - break; - } - - // - // do a quick check to see if we've got two simple events - // that we can cheaply combine. if the event we're looking - // at and one of the events in the history table are for the - // same path then we'll just mark the newer event as combined - // and recyle the older event. - // - if (history[i].kfse->str == kfse->str) { - - OSBitOrAtomic16(KFSE_COMBINED_EVENTS, &kfse->flags); - *reuse_type = KFSE_RECYCLED; - history[i].kfse->flags |= KFSE_RECYCLED_EVENT; - return history[i].kfse; - } - } - - if (i < MAX_HISTORY && history[i].kfse == NULL) { - history[i].kfse = kfse; - history[i].counter = 1; - } else if (i >= MAX_HISTORY) { - qsort(history, MAX_HISTORY, sizeof(struct history_item), compare_history_items); - - // pluck off the lowest guy if he's only got a count of 1 - if (history[MAX_HISTORY-1].counter == 1) { - history[MAX_HISTORY-1].kfse = kfse; - } - } - } - - - if (str != NULL && best_kfse) { - if (*longest_match_len <= 1) { - // if the best match we had was "/" then basically we're toast... - *longest_match_len = 0; - best_kfse = NULL; - } else if (*longest_match_len != len) { - OSBitOrAtomic16(KFSE_CONTAINS_DROPPED_EVENTS, &best_kfse->flags); - *reuse_type = KFSE_COLLAPSED; - } else { - OSBitOrAtomic16(KFSE_COMBINED_EVENTS, &best_kfse->flags); - *reuse_type = KFSE_COMBINED; - } - } - - return best_kfse; -} - - static struct timeval last_print; // @@ -478,14 +360,13 @@ int add_fsevent(int type, vfs_context_t ctx, ...) { struct proc *p = vfs_context_proc(ctx); - int i, arg_type, skip_init=0, longest_match_len, ret; + int i, arg_type, ret; kfs_event *kfse, *kfse_dest=NULL, *cur; fs_event_watcher *watcher; va_list ap; - int error = 0, did_alloc=0, need_event_unlock = 0; + int error = 0, did_alloc=0; dev_t dev = 0; uint64_t now, elapsed; - int reuse_type = 0; char *pathbuff=NULL; int pathbuff_len; @@ -517,7 +398,7 @@ add_fsevent(int type, vfs_context_t ctx, ...) // (as long as it's not an event type that can never be the // same as a previous event) // - if (type != FSE_CREATE_FILE && type != FSE_DELETE && type != FSE_RENAME && type != FSE_EXCHANGE && type != FSE_CHOWN) { + if (type != FSE_CREATE_FILE && type != FSE_DELETE && type != FSE_RENAME && type != FSE_EXCHANGE && type != FSE_CHOWN && type != FSE_DOCID_CHANGED && type != FSE_DOCID_CREATED && type != FSE_CLONE) { void *ptr=NULL; int vid=0, was_str=0, nlen=0; @@ -585,7 +466,7 @@ add_fsevent(int type, vfs_context_t ctx, ...) kfse = zalloc_noblock(event_zone); - if (kfse && (type == FSE_RENAME || type == FSE_EXCHANGE)) { + if (kfse && (type == FSE_RENAME || type == FSE_EXCHANGE || type == FSE_CLONE)) { kfse_dest = zalloc_noblock(event_zone); if (kfse_dest == NULL) { did_alloc = 1; @@ -596,55 +477,6 @@ add_fsevent(int type, vfs_context_t ctx, ...) if (kfse == NULL) { // yikes! no free events - int len=0; - char *str; - - // - // Figure out what kind of reference we have to the - // file in this event. This helps us find an event - // to combine/collapse into to make room. - // - // If we have a rename or exchange event then we - // don't want to go through the normal path, we - // want to "steal" an event instead (which is what - // find_an_event() will do if str is null). - // - arg_type = va_arg(ap, int32_t); - if (type == FSE_RENAME || type == FSE_EXCHANGE) { - str = NULL; - } else if (arg_type == FSE_ARG_STRING) { - len = va_arg(ap, int32_t); - str = va_arg(ap, char *); - } else if (arg_type == FSE_ARG_VNODE) { - struct vnode *vp; - - vp = va_arg(ap, struct vnode *); - pathbuff = get_pathbuff(); - pathbuff_len = MAXPATHLEN; - if (vn_getpath(vp, pathbuff, &pathbuff_len) != 0 || pathbuff[0] == '\0') { - release_pathbuff(pathbuff); - pathbuff = NULL; - } - str = pathbuff; - } else { - str = NULL; - } - - // - // This will go through all events and find one that we - // can combine with (hopefully), or "collapse" into (i.e - // it has the same parent) or in the worst case we have - // to "recycle" an event which means that it will combine - // two other events and return us the now unused event. - // failing all that, find_an_event() could still return - // null and if it does then we have a catastrophic dropped - // events scenario. - // - kfse = find_an_event(str, len, NULL, &reuse_type, &longest_match_len); - - if (kfse == NULL) { - bail_early: - unlock_fs_event_list(); lock_watch_table(); @@ -674,16 +506,19 @@ add_fsevent(int type, vfs_context_t ctx, ...) printf("add_fsevent: kfse_list head %p ; num_pending_rename %d\n", listhead, num_pending_rename); printf("add_fsevent: zalloc sez: %p\n", junkptr); printf("add_fsevent: event_zone info: %d 0x%x\n", ((int *)event_zone)[0], ((int *)event_zone)[1]); + lock_watch_table(); for(ii=0; ii < MAX_WATCHERS; ii++) { if (watcher_table[ii] == NULL) { continue; } - printf("add_fsevent: watcher %p: num dropped %d rd %4d wr %4d q_size %4d flags 0x%x\n", - watcher_table[ii], watcher_table[ii]->num_dropped, - watcher_table[ii]->rd, watcher_table[ii]->wr, - watcher_table[ii]->eventq_size, watcher_table[ii]->flags); + printf("add_fsevent: watcher %s %p: rd %4d wr %4d q_size %4d flags 0x%x\n", + watcher_table[ii]->proc_name, + watcher_table[ii], + watcher_table[ii]->rd, watcher_table[ii]->wr, + watcher_table[ii]->eventq_size, watcher_table[ii]->flags); } + unlock_watch_table(); last_print = current_tv; if (junkptr) { @@ -696,233 +531,21 @@ add_fsevent(int type, vfs_context_t ctx, ...) release_pathbuff(pathbuff); pathbuff = NULL; } - return ENOSPC; } - if ((type == FSE_RENAME || type == FSE_EXCHANGE) && reuse_type != KFSE_RECYCLED) { - panic("add_fsevent: type == %d but reuse type == %d!\n", type, reuse_type); - } else if ((kfse->type == FSE_RENAME || kfse->type == FSE_EXCHANGE) && kfse->dest == NULL) { - panic("add_fsevent: bogus kfse %p (type %d, but dest is NULL)\n", kfse, kfse->type); - } else if (kfse->type == FSE_RENAME || kfse->type == FSE_EXCHANGE) { - panic("add_fsevent: we should never re-use rename events (kfse %p reuse type %d)!\n", kfse, reuse_type); - } - - if (reuse_type == KFSE_COLLAPSED) { - if (str) { - const char *tmp_ptr, *new_str; - - // - // if we collapsed and have a string we have to chop off the - // tail component of the pathname to get the parent. - // - // NOTE: it is VERY IMPORTANT that we leave the trailing slash - // on the pathname. user-level code depends on this. - // - if (str[0] == '\0' || longest_match_len <= 1) { - printf("add_fsevent: strange state (str %s / longest_match_len %d)\n", str, longest_match_len); - if (longest_match_len < 0) { - panic("add_fsevent: longest_match_len %d\n", longest_match_len); - } - } - // chop off the tail component if it's not the - // first character... - if (longest_match_len > 1) { - str[longest_match_len] = '\0'; - } else if (longest_match_len == 0) { - longest_match_len = 1; - } - - new_str = vfs_addname(str, longest_match_len, 0, 0); - if (new_str == NULL || new_str[0] == '\0') { - panic("add_fsevent: longest match is strange (new_str %p).\n", new_str); - } - - lck_rw_lock_exclusive(&event_handling_lock); - - kfse->len = longest_match_len; - tmp_ptr = kfse->str; - kfse->str = new_str; - kfse->ino = 0; - kfse->mode = 0; - kfse->uid = 0; - kfse->gid = 0; - - lck_rw_unlock_exclusive(&event_handling_lock); - - vfs_removename(tmp_ptr); - } else { - panic("add_fsevent: don't have a vnode or a string pointer (kfse %p)\n", kfse); - } - } - - if (reuse_type == KFSE_RECYCLED && (type == FSE_RENAME || type == FSE_EXCHANGE)) { - - // if we're recycling this kfse and we have a rename or - // exchange event then we need to also get an event for - // kfse_dest. - // - if (did_alloc) { - // only happens if we allocated one but then failed - // for kfse_dest (and thus free'd the first one we - // allocated) - kfse_dest = zalloc_noblock(event_zone); - if (kfse_dest != NULL) { - memset(kfse_dest, 0, sizeof(kfs_event)); - kfse_dest->refcount = 1; - OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse_dest->flags); - } else { - did_alloc = 0; - } - } - - if (kfse_dest == NULL) { - int dest_reuse_type, dest_match_len; - - kfse_dest = find_an_event(NULL, 0, kfse, &dest_reuse_type, &dest_match_len); - - if (kfse_dest == NULL) { - // nothing we can do... gotta bail out - goto bail_early; - } - - if (dest_reuse_type != KFSE_RECYCLED) { - panic("add_fsevent: type == %d but dest_reuse type == %d!\n", type, dest_reuse_type); - } - } - } - - - // - // Here we check for some fast-path cases so that we can - // jump over the normal initialization and just get on - // with delivering the event. These cases are when we're - // combining/collapsing an event and so basically there is - // no more work to do (aside from a little book-keeping) - // - if (str && kfse->len != 0) { - kfse->abstime = now; - OSAddAtomic(1, &kfse->refcount); - skip_init = 1; - - if (reuse_type == KFSE_COMBINED) { - num_combined_events++; - } else if (reuse_type == KFSE_COLLAPSED) { - num_added_to_parent++; - } - } else if (reuse_type != KFSE_RECYCLED) { - panic("add_fsevent: I'm so confused! (reuse_type %d str %p kfse->len %d)\n", - reuse_type, str, kfse->len); - } - - va_end(ap); - - - if (skip_init) { - if (kfse->refcount < 1) { - panic("add_fsevent: line %d: kfse recount %d but should be at least 1\n", __LINE__, kfse->refcount); - } - - last_event_ptr = kfse; - unlock_fs_event_list(); - goto normal_delivery; - - } else if (reuse_type == KFSE_RECYCLED || reuse_type == KFSE_COMBINED) { - - // - // If we're here we have to clear out the kfs_event(s) - // that we were given by find_an_event() and set it - // up to be re-filled in by the normal code path. - // - va_start(ap, ctx); - - need_event_unlock = 1; - lck_rw_lock_exclusive(&event_handling_lock); - - OSAddAtomic(1, &kfse->refcount); - - if (kfse->refcount < 1) { - panic("add_fsevent: line %d: kfse recount %d but should be at least 1\n", __LINE__, kfse->refcount); - } - - if (kfse->len == 0) { - panic("%s:%d: no more fref.vp\n", __FILE__, __LINE__); - // vnode_rele_ext(kfse->fref.vp, O_EVTONLY, 0); - } else { - vfs_removename(kfse->str); - kfse->len = 0; - } - kfse->str = NULL; - - if (kfse->kevent_list.le_prev != NULL) { - num_events_outstanding--; - if (kfse->type == FSE_RENAME) { - num_pending_rename--; - } - LIST_REMOVE(kfse, kevent_list); - memset(&kfse->kevent_list, 0, sizeof(kfse->kevent_list)); - } - - kfse->flags = 0 | KFSE_RECYCLED_EVENT; - - if (kfse_dest) { - OSAddAtomic(1, &kfse_dest->refcount); - kfse_dest->flags = 0 | KFSE_RECYCLED_EVENT; - - if (did_alloc == 0) { - if (kfse_dest->len == 0) { - panic("%s:%d: no more fref.vp\n", __FILE__, __LINE__); - // vnode_rele_ext(kfse_dest->fref.vp, O_EVTONLY, 0); - } else { - vfs_removename(kfse_dest->str); - kfse_dest->len = 0; - } - kfse_dest->str = NULL; - - if (kfse_dest->kevent_list.le_prev != NULL) { - num_events_outstanding--; - LIST_REMOVE(kfse_dest, kevent_list); - memset(&kfse_dest->kevent_list, 0, sizeof(kfse_dest->kevent_list)); - } - - if (kfse_dest->dest) { - panic("add_fsevent: should never recycle a rename event! kfse %p\n", kfse); - } - } - } - - OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse->flags); - if (kfse_dest) { - OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse_dest->flags); - } - - goto process_normally; - } - } - - if (reuse_type != 0) { - panic("fsevents: we have a reuse_type (%d) but are about to clear out kfse %p\n", reuse_type, kfse); - } - - // - // we only want to do this for brand new events, not - // events which have been recycled. - // memset(kfse, 0, sizeof(kfs_event)); kfse->refcount = 1; OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse->flags); - process_normally: last_event_ptr = kfse; kfse->type = type; kfse->abstime = now; kfse->pid = p->p_pid; - if (type == FSE_RENAME || type == FSE_EXCHANGE) { - if (need_event_unlock == 0) { - memset(kfse_dest, 0, sizeof(kfs_event)); - kfse_dest->refcount = 1; - OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse_dest->flags); - } + if (type == FSE_RENAME || type == FSE_EXCHANGE || type == FSE_CLONE) { + memset(kfse_dest, 0, sizeof(kfs_event)); + kfse_dest->refcount = 1; + OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse_dest->flags); kfse_dest->type = type; kfse_dest->pid = p->p_pid; kfse_dest->abstime = now; @@ -946,11 +569,75 @@ add_fsevent(int type, vfs_context_t ctx, ...) // now process the arguments passed in and copy them into // the kfse // - if (need_event_unlock == 0) { - lck_rw_lock_shared(&event_handling_lock); - } cur = kfse; + + if (type == FSE_DOCID_CREATED || type == FSE_DOCID_CHANGED) { + uint64_t val; + + // + // These events are special and not like the other events. They only + // have a dev_t, src inode #, dest inode #, and a doc-id. We use the + // fields that we can in the kfse but have to overlay the dest inode + // number and the doc-id on the other fields. + // + + // First the dev_t + arg_type = va_arg(ap, int32_t); + if (arg_type == FSE_ARG_DEV) { + cur->dev = (dev_t)(va_arg(ap, dev_t)); + } else { + cur->dev = (dev_t)0xbadc0de1; + } + + // next the source inode # + arg_type = va_arg(ap, int32_t); + if (arg_type == FSE_ARG_INO) { + cur->ino = (ino64_t)(va_arg(ap, ino64_t)); + } else { + cur->ino = 0xbadc0de2; + } + + // now the dest inode # + arg_type = va_arg(ap, int32_t); + if (arg_type == FSE_ARG_INO) { + val = (ino64_t)(va_arg(ap, ino64_t)); + } else { + val = 0xbadc0de2; + } + // overlay the dest inode number on the str/dest pointer fields + memcpy(&cur->str, &val, sizeof(ino64_t)); + + + // and last the document-id + arg_type = va_arg(ap, int32_t); + if (arg_type == FSE_ARG_INT32) { + val = (uint64_t)va_arg(ap, uint32_t); + } else if (arg_type == FSE_ARG_INT64) { + val = (uint64_t)va_arg(ap, uint64_t); + } else { + val = 0xbadc0de3; + } + + // the docid is 64-bit and overlays the uid/gid fields + memcpy(&cur->uid, &val, sizeof(uint64_t)); + + goto done_with_args; + } + + if (type == FSE_UNMOUNT_PENDING) { + + // Just a dev_t + arg_type = va_arg(ap, int32_t); + if (arg_type == FSE_ARG_DEV) { + cur->dev = (dev_t)(va_arg(ap, dev_t)); + } else { + cur->dev = (dev_t)0xbadc0de1; + } + + goto done_with_args; + } + for(arg_type=va_arg(ap, int32_t); arg_type != FSE_ARG_DONE; arg_type=va_arg(ap, int32_t)) switch(arg_type) { @@ -975,14 +662,11 @@ add_fsevent(int type, vfs_context_t ctx, ...) VATTR_WANTED(&va, va_mode); VATTR_WANTED(&va, va_uid); VATTR_WANTED(&va, va_gid); + VATTR_WANTED(&va, va_nlink); if ((ret = vnode_getattr(vp, &va, vfs_context_kernel())) != 0) { // printf("add_fsevent: failed to getattr on vp %p (%d)\n", cur->fref.vp, ret); cur->str = NULL; error = EINVAL; - if (need_event_unlock == 0) { - // then we only grabbed it shared - lck_rw_unlock_shared(&event_handling_lock); - } goto clean_up; } @@ -991,6 +675,12 @@ add_fsevent(int type, vfs_context_t ctx, ...) cur->mode = (int32_t)vnode_vttoif(vnode_vtype(vp)) | va.va_mode; cur->uid = va.va_uid; cur->gid = va.va_gid; + if (vp->v_flag & VISHARDLINK) { + cur->mode |= FSE_MODE_HLINK; + if ((vp->v_type == VDIR && va.va_dirlinkcount == 0) || (vp->v_type == VREG && va.va_nlink == 0)) { + cur->mode |= FSE_MODE_LAST_HLINK; + } + } // if we haven't gotten the path yet, get it. if (pathbuff == NULL) { @@ -999,13 +689,7 @@ add_fsevent(int type, vfs_context_t ctx, ...) pathbuff[0] = '\0'; if ((ret = vn_getpath(vp, pathbuff, &pathbuff_len)) != 0 || pathbuff[0] == '\0') { - struct vnode *orig_vp = vp; - if (ret != ENOSPC) { - printf("add_fsevent: unable to get path for vp %p (%s; ret %d; type %d)\n", - vp, vp->v_name ? vp->v_name : "-UNKNOWN-FILE", ret, type); - } - cur->flags |= KFSE_CONTAINS_DROPPED_EVENTS; do { @@ -1027,12 +711,7 @@ add_fsevent(int type, vfs_context_t ctx, ...) } while (ret == ENOSPC); if (ret != 0 || vp == NULL) { - printf("add_fsevent: unabled to get a path for vp %p. dropping the event.\n", orig_vp); error = ENOENT; - if (need_event_unlock == 0) { - // then we only grabbed it shared - lck_rw_unlock_shared(&event_handling_lock); - } goto clean_up; } } @@ -1090,12 +769,19 @@ add_fsevent(int type, vfs_context_t ctx, ...) } break; + case FSE_ARG_INT32: { + uint32_t ival = (uint32_t)va_arg(ap, int32_t); + kfse->uid = (ino64_t)ival; + break; + } + default: printf("add_fsevent: unknown type %d\n", arg_type); // just skip one 32-bit word and hope we sync up... (void)va_arg(ap, int32_t); } +done_with_args: va_end(ap); OSBitAndAtomic16(~KFSE_BEING_CREATED, &kfse->flags); @@ -1103,19 +789,6 @@ add_fsevent(int type, vfs_context_t ctx, ...) OSBitAndAtomic16(~KFSE_BEING_CREATED, &kfse_dest->flags); } - if (need_event_unlock == 0) { - // then we only grabbed it shared - lck_rw_unlock_shared(&event_handling_lock); - } - - normal_delivery: - // unlock this here so we don't hold it across the - // event delivery loop. - if (need_event_unlock) { - lck_rw_unlock_exclusive(&event_handling_lock); - need_event_unlock = 0; - } - // // now we have to go and let everyone know that // is interested in this type of event @@ -1128,28 +801,24 @@ add_fsevent(int type, vfs_context_t ctx, ...) continue; } - if ( watcher->event_list[type] == FSE_REPORT + if ( type < watcher->num_events + && watcher->event_list[type] == FSE_REPORT && watcher_cares_about_dev(watcher, dev)) { if (watcher_add_event(watcher, kfse) != 0) { watcher->num_dropped++; + continue; } } - if (kfse->refcount < 1) { - panic("add_fsevent: line %d: kfse recount %d but should be at least 1\n", __LINE__, kfse->refcount); - } + // if (kfse->refcount < 1) { + // panic("add_fsevent: line %d: kfse recount %d but should be at least 1\n", __LINE__, kfse->refcount); + // } } unlock_watch_table(); clean_up: - // have to check if this needs to be unlocked (in - // case we came here from an error handling path) - if (need_event_unlock) { - lck_rw_unlock_exclusive(&event_handling_lock); - need_event_unlock = 0; - } if (pathbuff) { release_pathbuff(pathbuff); @@ -1207,8 +876,8 @@ release_event_ref(kfs_event *kfse) // holding the fs_event_buf lock // copy = *kfse; - if (kfse->dest && OSAddAtomic(-1, &kfse->dest->refcount) == 1) { - dest_copy = *kfse->dest; + if (kfse->type != FSE_DOCID_CREATED && kfse->type != FSE_DOCID_CHANGED && kfse->dest && OSAddAtomic(-1, &kfse->dest->refcount) == 1) { + dest_copy = *kfse->dest; } else { dest_copy.str = NULL; dest_copy.len = 0; @@ -1256,7 +925,7 @@ release_event_ref(kfs_event *kfse) unlock_fs_event_list(); // if we have a pointer in the union - if (copy.str) { + if (copy.str && copy.type != FSE_DOCID_CREATED && copy.type != FSE_DOCID_CHANGED) { if (copy.len == 0) { // and it's not a string panic("%s:%d: no more fref.vp!\n", __FILE__, __LINE__); // vnode_rele_ext(copy.fref.vp, O_EVTONLY, 0); @@ -1275,15 +944,14 @@ release_event_ref(kfs_event *kfse) } } - static int -add_watcher(int8_t *event_list, int32_t num_events, int32_t eventq_size, fs_event_watcher **watcher_out) +add_watcher(int8_t *event_list, int32_t num_events, int32_t eventq_size, fs_event_watcher **watcher_out, void *fseh) { int i; fs_event_watcher *watcher; - if (eventq_size <= 0 || eventq_size > 100*MAX_KFS_EVENTS) { - eventq_size = MAX_KFS_EVENTS; + if (eventq_size <= 0 || eventq_size > 100*max_kfs_events) { + eventq_size = max_kfs_events; } // Note: the event_queue follows the fs_event_watcher struct @@ -1308,20 +976,25 @@ add_watcher(int8_t *event_list, int32_t num_events, int32_t eventq_size, fs_even watcher->blockers = 0; watcher->num_readers = 0; watcher->max_event_id = 0; - watcher->fseh = NULL; + watcher->fseh = fseh; + watcher->pid = proc_selfpid(); + proc_selfname(watcher->proc_name, sizeof(watcher->proc_name)); watcher->num_dropped = 0; // XXXdbg - debugging - lock_watch_table(); - - // now update the global list of who's interested in - // events of a particular type... - for(i=0; i < num_events; i++) { - if (event_list[i] != FSE_IGNORE && i < FSE_MAX_EVENTS) { - fs_event_type_watchers[i]++; - } + if (!strncmp(watcher->proc_name, "fseventsd", sizeof(watcher->proc_name)) || + !strncmp(watcher->proc_name, "coreservicesd", sizeof(watcher->proc_name)) || + !strncmp(watcher->proc_name, "revisiond", sizeof(watcher->proc_name)) || + !strncmp(watcher->proc_name, "mds", sizeof(watcher->proc_name))) { + watcher->flags |= WATCHER_APPLE_SYSTEM_SERVICE; + } else { + printf("fsevents: watcher %s (pid: %d) - Using /dev/fsevents directly is unsupported. Migrate to FSEventsFramework\n", + watcher->proc_name, watcher->pid); } + lock_watch_table(); + + // find a slot for the new watcher for(i=0; i < MAX_WATCHERS; i++) { if (watcher_table[i] == NULL) { watcher->my_id = i; @@ -1330,12 +1003,21 @@ add_watcher(int8_t *event_list, int32_t num_events, int32_t eventq_size, fs_even } } - if (i > MAX_WATCHERS) { + if (i >= MAX_WATCHERS) { printf("fsevents: too many watchers!\n"); unlock_watch_table(); + FREE(watcher, M_TEMP); return ENOSPC; } + // now update the global list of who's interested in + // events of a particular type... + for(i=0; i < num_events; i++) { + if (event_list[i] != FSE_IGNORE && i < FSE_MAX_EVENTS) { + fs_event_type_watchers[i]++; + } + } + unlock_watch_table(); *watcher_out = watcher; @@ -1380,7 +1062,9 @@ remove_watcher(fs_event_watcher *target) unlock_watch_table(); while (watcher->num_readers > 1 && counter++ < 5000) { + lock_watch_table(); fsevents_wakeup(watcher); // in case they're asleep + unlock_watch_table(); tsleep(watcher, PRIBIO, "fsevents-close", 1); } @@ -1390,22 +1074,18 @@ remove_watcher(fs_event_watcher *target) } // drain the event_queue - while(watcher->rd != watcher->wr) { - lck_rw_lock_shared(&event_handling_lock); + lck_rw_lock_exclusive(&event_handling_lock); + while(watcher->rd != watcher->wr) { kfse = watcher->event_queue[watcher->rd]; - if (kfse->type == FSE_INVALID || kfse->refcount < 1) { - panic("remove_watcher: bogus kfse %p during cleanup (type %d refcount %d rd %d wr %d)\n", kfse, kfse->type, kfse->refcount, watcher->rd, watcher->wr); - } - - lck_rw_unlock_shared(&event_handling_lock); - + watcher->event_queue[watcher->rd] = NULL; watcher->rd = (watcher->rd+1) % watcher->eventq_size; - - if (kfse != NULL) { + OSSynchronizeIO(); + if (kfse != NULL && kfse->type != FSE_INVALID && kfse->refcount >= 1) { release_event_ref(kfse); } } + lck_rw_unlock_exclusive(&event_handling_lock); if (watcher->event_list) { FREE(watcher->event_list, M_TEMP); @@ -1491,22 +1171,47 @@ watcher_add_event(fs_event_watcher *watcher, kfs_event *kfse) watcher->event_queue[watcher->wr] = kfse; OSSynchronizeIO(); watcher->wr = (watcher->wr + 1) % watcher->eventq_size; - + // // wake up the watcher if there are more than MAX_NUM_PENDING events. // otherwise schedule a timer (if one isn't already set) which will // send any pending events if no more are received in the next // EVENT_DELAY_IN_MS milli-seconds. // - if ( (watcher->rd < watcher->wr && (watcher->wr - watcher->rd) > MAX_NUM_PENDING) - || (watcher->rd > watcher->wr && (watcher->wr + watcher->eventq_size - watcher->rd) > MAX_NUM_PENDING)) { + int32_t num_pending = 0; + if (watcher->rd < watcher->wr) { + num_pending = watcher->wr - watcher->rd; + } + + if (watcher->rd > watcher->wr) { + num_pending = watcher->wr + watcher->eventq_size - watcher->rd; + } + + if (num_pending > (watcher->eventq_size*3/4) && !(watcher->flags & WATCHER_APPLE_SYSTEM_SERVICE)) { + /* Non-Apple Service is falling behind, start dropping events for this process */ + lck_rw_lock_exclusive(&event_handling_lock); + while (watcher->rd != watcher->wr) { + kfse = watcher->event_queue[watcher->rd]; + watcher->event_queue[watcher->rd] = NULL; + watcher->rd = (watcher->rd+1) % watcher->eventq_size; + OSSynchronizeIO(); + if (kfse != NULL && kfse->type != FSE_INVALID && kfse->refcount >= 1) { + release_event_ref(kfse); + } + } + watcher->flags |= WATCHER_DROPPED_EVENTS; + lck_rw_unlock_exclusive(&event_handling_lock); - fsevents_wakeup(watcher); + printf("fsevents: watcher falling behind: %s (pid: %d) rd: %4d wr: %4d q_size: %4d flags: 0x%x\n", + watcher->proc_name, watcher->pid, watcher->rd, watcher->wr, + watcher->eventq_size, watcher->flags); + fsevents_wakeup(watcher); + } else if (num_pending > MAX_NUM_PENDING) { + fsevents_wakeup(watcher); } else if (timer_set == 0) { - - schedule_event_wakeup(); - } + schedule_event_wakeup(); + } return 0; } @@ -1598,11 +1303,11 @@ copy_out_kfse(fs_event_watcher *watcher, kfs_event *kfse, struct uio *uio) return 0; } - if (kfse->type == FSE_RENAME && kfse->dest == NULL) { + if (((kfse->type == FSE_RENAME) || (kfse->type == FSE_CLONE)) && kfse->dest == NULL) { // // This can happen if an event gets recycled but we had a // pointer to it in our event queue. The event is the - // destination of a rename which we'll process separately + // destination of a rename or clone which we'll process separately // (that is, another kfse points to this one so it's ok // to skip this guy because we'll process it when we process // the other one) @@ -1636,6 +1341,47 @@ copy_out_kfse(fs_event_watcher *watcher, kfs_event *kfse, struct uio *uio) copy_again: + if (kfse->type == FSE_DOCID_CHANGED || kfse->type == FSE_DOCID_CREATED) { + dev_t dev = cur->dev; + ino64_t ino = cur->ino; + uint64_t ival; + + error = fill_buff(FSE_ARG_DEV, sizeof(dev_t), &dev, evbuff, &evbuff_idx, sizeof(evbuff), uio); + if (error != 0) { + goto get_out; + } + + error = fill_buff(FSE_ARG_INO, sizeof(ino64_t), &ino, evbuff, &evbuff_idx, sizeof(evbuff), uio); + if (error != 0) { + goto get_out; + } + + memcpy(&ino, &cur->str, sizeof(ino64_t)); + error = fill_buff(FSE_ARG_INO, sizeof(ino64_t), &ino, evbuff, &evbuff_idx, sizeof(evbuff), uio); + if (error != 0) { + goto get_out; + } + + memcpy(&ival, &cur->uid, sizeof(uint64_t)); // the docid gets stuffed into the ino field + error = fill_buff(FSE_ARG_INT64, sizeof(uint64_t), &ival, evbuff, &evbuff_idx, sizeof(evbuff), uio); + if (error != 0) { + goto get_out; + } + + goto done; + } + + if (kfse->type == FSE_UNMOUNT_PENDING) { + dev_t dev = cur->dev; + + error = fill_buff(FSE_ARG_DEV, sizeof(dev_t), &dev, evbuff, &evbuff_idx, sizeof(evbuff), uio); + if (error != 0) { + goto get_out; + } + + goto done; + } + if (cur->str == NULL || cur->str[0] == '\0') { printf("copy_out_kfse:2: empty/short path (%s)\n", cur->str); error = fill_buff(FSE_ARG_STRING, 2, "/", evbuff, &evbuff_idx, sizeof(evbuff), uio); @@ -1664,15 +1410,12 @@ copy_out_kfse(fs_event_watcher *watcher, kfs_event *kfse, struct uio *uio) goto get_out; } } else { - ino_t ino; - error = fill_buff(FSE_ARG_DEV, sizeof(dev_t), &cur->dev, evbuff, &evbuff_idx, sizeof(evbuff), uio); if (error != 0) { goto get_out; } - ino = (ino_t)cur->ino; - error = fill_buff(FSE_ARG_INO, sizeof(ino_t), &ino, evbuff, &evbuff_idx, sizeof(evbuff), uio); + error = fill_buff(FSE_ARG_INO, sizeof(ino64_t), &cur->ino, evbuff, &evbuff_idx, sizeof(evbuff), uio); if (error != 0) { goto get_out; } @@ -1745,6 +1488,7 @@ fmod_watch(fs_event_watcher *watcher, struct uio *uio) user_ssize_t last_full_event_resid; kfs_event *kfse; uint16_t tmp16; + int skipped; last_full_event_resid = uio_resid(uio); @@ -1763,6 +1507,7 @@ fmod_watch(fs_event_watcher *watcher, struct uio *uio) return EAGAIN; } + restart_watch: if (watcher->rd == watcher->wr) { if (watcher->flags & WATCHER_CLOSING) { OSAddAtomic(-1, &watcher->num_readers); @@ -1804,6 +1549,9 @@ fmod_watch(fs_event_watcher *watcher, struct uio *uio) watcher->flags &= ~WATCHER_DROPPED_EVENTS; } + skipped = 0; + + lck_rw_lock_shared(&event_handling_lock); while (uio_resid(uio) > 0 && watcher->rd != watcher->wr) { if (watcher->flags & WATCHER_CLOSING) { break; @@ -1814,15 +1562,21 @@ fmod_watch(fs_event_watcher *watcher, struct uio *uio) // (since it may have been recycled/reused and changed // its type or which device it is for) // - lck_rw_lock_shared(&event_handling_lock); - kfse = watcher->event_queue[watcher->rd]; - if (kfse->type == FSE_INVALID || kfse->refcount < 1) { - panic("fmod_watch: someone left me a bogus kfse %p (type %d refcount %d rd %d wr %d)\n", kfse, kfse->type, kfse->refcount, watcher->rd, watcher->wr); + if (!kfse || kfse->type == FSE_INVALID || kfse->type >= watcher->num_events || kfse->refcount < 1) { + break; } if (watcher->event_list[kfse->type] == FSE_REPORT && watcher_cares_about_dev(watcher, kfse->dev)) { + if (!(watcher->flags & WATCHER_APPLE_SYSTEM_SERVICE) && kfse->type != FSE_DOCID_CREATED && kfse->type != FSE_DOCID_CHANGED && is_ignored_directory(kfse->str)) { + // If this is not an Apple System Service, skip specified directories + // radar://12034844 + error = 0; + skipped = 1; + } else { + + skipped = 0; if (last_event_ptr == kfse) { last_event_ptr = NULL; last_event_type = -1; @@ -1844,19 +1598,19 @@ fmod_watch(fs_event_watcher *watcher, struct uio *uio) } last_full_event_resid = uio_resid(uio); + } } - lck_rw_unlock_shared(&event_handling_lock); - + watcher->event_queue[watcher->rd] = NULL; watcher->rd = (watcher->rd + 1) % watcher->eventq_size; OSSynchronizeIO(); - - if (kfse->type == FSE_INVALID || kfse->refcount < 1) { - panic("fmod_watch:2: my kfse became bogus! kfse %p (type %d refcount %d rd %d wr %d)\n", kfse, kfse->type, kfse->refcount, watcher->rd, watcher->wr); - } - release_event_ref(kfse); } + lck_rw_unlock_shared(&event_handling_lock); + + if (skipped && error == 0) { + goto restart_watch; + } get_out: OSAddAtomic(-1, &watcher->num_readers); @@ -1865,18 +1619,70 @@ fmod_watch(fs_event_watcher *watcher, struct uio *uio) } -// release any references we might have on vnodes which are -// the mount point passed to us (so that it can be cleanly -// unmounted). // -// since we don't want to lose the events we'll convert the -// vnode refs to full paths. +// Shoo watchers away from a volume that's about to be unmounted +// (so that it can be cleanly unmounted). // void -fsevent_unmount(__unused struct mount *mp) +fsevent_unmount(__unused struct mount *mp, __unused vfs_context_t ctx) { - // we no longer maintain pointers to vnodes so - // there is nothing to do... +#if CONFIG_EMBEDDED + dev_t dev = mp->mnt_vfsstat.f_fsid.val[0]; + int error, waitcount = 0; + struct timespec ts = {1, 0}; + + // wait for any other pending unmounts to complete + lock_watch_table(); + while (fsevent_unmount_dev != 0) { + error = msleep((caddr_t)&fsevent_unmount_dev, &watch_table_lock, PRIBIO, "fsevent_unmount_wait", &ts); + if (error == EWOULDBLOCK) + error = 0; + if (!error && (++waitcount >= 10)) { + error = EWOULDBLOCK; + printf("timeout waiting to signal unmount pending for dev %d (fsevent_unmount_dev %d)\n", dev, fsevent_unmount_dev); + } + if (error) { + // there's a problem, bail out + unlock_watch_table(); + return; + } + } + if (fs_event_type_watchers[FSE_UNMOUNT_PENDING] == 0) { + // nobody watching for unmount pending events + unlock_watch_table(); + return; + } + // this is now the current unmount pending + fsevent_unmount_dev = dev; + fsevent_unmount_ack_count = fs_event_type_watchers[FSE_UNMOUNT_PENDING]; + unlock_watch_table(); + + // send an event to notify the watcher they need to get off the mount + error = add_fsevent(FSE_UNMOUNT_PENDING, ctx, FSE_ARG_DEV, dev, FSE_ARG_DONE); + + // wait for acknowledgment(s) (give up if it takes too long) + lock_watch_table(); + waitcount = 0; + while (fsevent_unmount_dev == dev) { + error = msleep((caddr_t)&fsevent_unmount_dev, &watch_table_lock, PRIBIO, "fsevent_unmount_pending", &ts); + if (error == EWOULDBLOCK) + error = 0; + if (!error && (++waitcount >= 10)) { + error = EWOULDBLOCK; + printf("unmount pending ack timeout for dev %d\n", dev); + } + if (error) { + // there's a problem, bail out + if (fsevent_unmount_dev == dev) { + fsevent_unmount_dev = 0; + fsevent_unmount_ack_count = 0; + } + wakeup((caddr_t)&fsevent_unmount_dev); + break; + } + } + unlock_watch_table(); +#endif } @@ -1916,59 +1722,25 @@ fseventsf_write(__unused struct fileproc *fp, __unused struct uio *uio, } #pragma pack(push, 4) -typedef struct ext_fsevent_dev_filter_args { - uint32_t num_devices; - user_addr_t devices; -} ext_fsevent_dev_filter_args; -#pragma pack(pop) - -typedef struct old_fsevent_dev_filter_args { - uint32_t num_devices; - int32_t devices; -} old_fsevent_dev_filter_args; - -#define OLD_FSEVENTS_DEVICE_FILTER _IOW('s', 100, old_fsevent_dev_filter_args) -#define NEW_FSEVENTS_DEVICE_FILTER _IOW('s', 100, ext_fsevent_dev_filter_args) - -#if __LP64__ -/* need this in spite of the padding due to alignment of devices */ typedef struct fsevent_dev_filter_args32 { - uint32_t num_devices; - uint32_t devices; - int32_t pad1; + uint32_t num_devices; + user32_addr_t devices; } fsevent_dev_filter_args32; -#endif +typedef struct fsevent_dev_filter_args64 { + uint32_t num_devices; + user64_addr_t devices; +} fsevent_dev_filter_args64; +#pragma pack(pop) + +#define FSEVENTS_DEVICE_FILTER_32 _IOW('s', 100, fsevent_dev_filter_args32) +#define FSEVENTS_DEVICE_FILTER_64 _IOW('s', 100, fsevent_dev_filter_args64) static int fseventsf_ioctl(struct fileproc *fp, u_long cmd, caddr_t data, vfs_context_t ctx) { fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data; int ret = 0; - ext_fsevent_dev_filter_args *devfilt_args, _devfilt_args; - - if (proc_is64bit(vfs_context_proc(ctx))) { - devfilt_args = (ext_fsevent_dev_filter_args *)data; - } else if (cmd == OLD_FSEVENTS_DEVICE_FILTER) { - old_fsevent_dev_filter_args *udev_filt_args = (old_fsevent_dev_filter_args *)data; - - devfilt_args = &_devfilt_args; - memset(devfilt_args, 0, sizeof(ext_fsevent_dev_filter_args)); - - devfilt_args->num_devices = udev_filt_args->num_devices; - devfilt_args->devices = CAST_USER_ADDR_T(udev_filt_args->devices); - } else { -#if __LP64__ - fsevent_dev_filter_args32 *udev_filt_args = (fsevent_dev_filter_args32 *)data; -#else - fsevent_dev_filter_args *udev_filt_args = (fsevent_dev_filter_args *)data; -#endif - - devfilt_args = &_devfilt_args; - memset(devfilt_args, 0, sizeof(ext_fsevent_dev_filter_args)); - - devfilt_args->num_devices = udev_filt_args->num_devices; - devfilt_args->devices = CAST_USER_ADDR_T(udev_filt_args->devices); - } + fsevent_dev_filter_args64 *devfilt_args, _devfilt_args; OSAddAtomic(1, &fseh->active); if (fseh->flags & FSEH_CLOSING) { @@ -1997,8 +1769,29 @@ fseventsf_ioctl(struct fileproc *fp, u_long cmd, caddr_t data, vfs_context_t ctx break; } - case OLD_FSEVENTS_DEVICE_FILTER: - case NEW_FSEVENTS_DEVICE_FILTER: { + case FSEVENTS_DEVICE_FILTER_32: { + if (proc_is64bit(vfs_context_proc(ctx))) { + ret = EINVAL; + break; + } + fsevent_dev_filter_args32 *devfilt_args32 = (fsevent_dev_filter_args32 *)data; + + devfilt_args = &_devfilt_args; + memset(devfilt_args, 0, sizeof(fsevent_dev_filter_args64)); + devfilt_args->num_devices = devfilt_args32->num_devices; + devfilt_args->devices = CAST_USER_ADDR_T(devfilt_args32->devices); + goto handle_dev_filter; + } + + case FSEVENTS_DEVICE_FILTER_64: + if (!proc_is64bit(vfs_context_proc(ctx))) { + ret = EINVAL; + break; + } + devfilt_args = (fsevent_dev_filter_args64 *)data; + + handle_dev_filter: + { int new_num_devices; dev_t *devices_not_to_watch, *tmp=NULL; @@ -2009,13 +1802,13 @@ fseventsf_ioctl(struct fileproc *fp, u_long cmd, caddr_t data, vfs_context_t ctx new_num_devices = devfilt_args->num_devices; if (new_num_devices == 0) { - tmp = fseh->watcher->devices_not_to_watch; - lock_watch_table(); + + tmp = fseh->watcher->devices_not_to_watch; fseh->watcher->devices_not_to_watch = NULL; fseh->watcher->num_devices = new_num_devices; - unlock_watch_table(); + unlock_watch_table(); if (tmp) { FREE(tmp, M_TEMP); } @@ -2051,6 +1844,22 @@ fseventsf_ioctl(struct fileproc *fp, u_long cmd, caddr_t data, vfs_context_t ctx break; } + case FSEVENTS_UNMOUNT_PENDING_ACK: { + lock_watch_table(); + dev_t dev = *(dev_t *)data; + if (fsevent_unmount_dev == dev) { + if (--fsevent_unmount_ack_count <= 0) { + fsevent_unmount_dev = 0; + wakeup((caddr_t)&fsevent_unmount_dev); + } + } else { + printf("unexpected unmount pending ack %d (%d)\n", dev, fsevent_unmount_dev); + ret = EINVAL; + } + unlock_watch_table(); + break; + } + default: ret = EINVAL; break; @@ -2158,7 +1967,7 @@ filt_fsevent(struct knote *kn, long hint) switch(kn->kn_filter) { case EVFILT_READ: kn->kn_data = amt; - + if (kn->kn_data != 0) { activate = 1; } @@ -2182,28 +1991,83 @@ filt_fsevent(struct knote *kn, long hint) } -struct filterops fsevent_filtops = { - .f_isfd = 1, - .f_attach = NULL, - .f_detach = filt_fsevent_detach, - .f_event = filt_fsevent +static int +filt_fsevent_touch(struct knote *kn, struct kevent_internal_s *kev) +{ + int res; + + lock_watch_table(); + + /* accept new fflags/data as saved */ + kn->kn_sfflags = kev->fflags; + kn->kn_sdata = kev->data; + + /* restrict the current results to the (smaller?) set of new interest */ + /* + * For compatibility with previous implementations, we leave kn_fflags + * as they were before. + */ + //kn->kn_fflags &= kev->fflags; + + /* determine if the filter is now fired */ + res = filt_fsevent(kn, 0); + + unlock_watch_table(); + + return res; +} + +static int +filt_fsevent_process(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev) +{ +#pragma unused(data) + int res; + + lock_watch_table(); + + res = filt_fsevent(kn, 0); + if (res) { + *kev = kn->kn_kevent; + if (kev->flags & EV_CLEAR) { + kn->kn_data = 0; + kn->kn_fflags = 0; + } + } + + unlock_watch_table(); + return res; +} + +SECURITY_READ_ONLY_EARLY(struct filterops) fsevent_filtops = { + .f_isfd = 1, + .f_attach = NULL, + .f_detach = filt_fsevent_detach, + .f_event = filt_fsevent, + .f_touch = filt_fsevent_touch, + .f_process = filt_fsevent_process, }; static int -fseventsf_kqfilter(__unused struct fileproc *fp, __unused struct knote *kn, __unused vfs_context_t ctx) +fseventsf_kqfilter(__unused struct fileproc *fp, __unused struct knote *kn, + __unused struct kevent_internal_s *kev, __unused vfs_context_t ctx) { fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data; + int res; kn->kn_hook = (void*)fseh; kn->kn_hookid = 1; - kn->kn_fop = &fsevent_filtops; - + kn->kn_filtid = EVFILTID_FSEVENT; + lock_watch_table(); KNOTE_ATTACH(&fseh->knotes, kn); + /* check to see if it is fired already */ + res = filt_fsevent(kn, 0); + unlock_watch_table(); - return 0; + + return res; } @@ -2213,8 +2077,6 @@ fseventsf_drain(struct fileproc *fp, __unused vfs_context_t ctx) int counter = 0; fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data; - fseh->watcher->flags |= WATCHER_CLOSING; - // if there are people still waiting, sleep for 10ms to // let them clean up and get out of there. however we // also don't want to get stuck forever so if they don't @@ -2226,7 +2088,9 @@ fseventsf_drain(struct fileproc *fp, __unused vfs_context_t ctx) // and decision to tsleep in fmod_watch... this bit of // latency is a decent tradeoff against not having to // take and drop a lock in fmod_watch + lock_watch_table(); fsevents_wakeup(fseh->watcher); + unlock_watch_table(); tsleep((caddr_t)fseh->watcher, PRIBIO, "watcher-close", 1); } @@ -2238,7 +2102,7 @@ fseventsf_drain(struct fileproc *fp, __unused vfs_context_t ctx) static int fseventsopen(__unused dev_t dev, __unused int flag, __unused int mode, __unused struct proc *p) { - if (!is_suser()) { + if (!kauth_cred_issuser(kauth_cred_get())) { return EPERM; } @@ -2292,7 +2156,7 @@ parse_buffer_and_add_events(const char *buffer, int bufsize, vfs_context_t ctx, path_len = ptr - path; - if (type != FSE_RENAME && type != FSE_EXCHANGE) { + if (type != FSE_RENAME && type != FSE_EXCHANGE && type != FSE_CLONE) { event_start = ptr; // record where the next event starts err = add_fsevent(type, ctx, FSE_ARG_STRING, path_len, path, FSE_ARG_FINFO, finfo, FSE_ARG_DONE); @@ -2377,7 +2241,7 @@ fseventswrite(__unused dev_t dev, struct uio *uio, __unused int ioflag) lck_mtx_lock(&event_writer_lock); if (write_buffer == NULL) { - if (kmem_alloc(kernel_map, (vm_offset_t *)&write_buffer, WRITE_BUFFER_SIZE)) { + if (kmem_alloc(kernel_map, (vm_offset_t *)&write_buffer, WRITE_BUFFER_SIZE, VM_KERN_MEMORY_FILE)) { lck_mtx_unlock(&event_writer_lock); return ENOMEM; } @@ -2431,31 +2295,33 @@ fseventswrite(__unused dev_t dev, struct uio *uio, __unused int ioflag) } -static struct fileops fsevents_fops = { - fseventsf_read, - fseventsf_write, - fseventsf_ioctl, - fseventsf_select, - fseventsf_close, - fseventsf_kqfilter, - fseventsf_drain +static const struct fileops fsevents_fops = { + .fo_type = DTYPE_FSEVENTS, + .fo_read = fseventsf_read, + .fo_write = fseventsf_write, + .fo_ioctl = fseventsf_ioctl, + .fo_select = fseventsf_select, + .fo_close = fseventsf_close, + .fo_kqfilter = fseventsf_kqfilter, + .fo_drain = fseventsf_drain, }; -typedef struct ext_fsevent_clone_args { - user_addr_t event_list; - int32_t num_events; - int32_t event_queue_depth; - user_addr_t fd; -} ext_fsevent_clone_args; +typedef struct fsevent_clone_args32 { + user32_addr_t event_list; + int32_t num_events; + int32_t event_queue_depth; + user32_addr_t fd; +} fsevent_clone_args32; -typedef struct old_fsevent_clone_args { - uint32_t event_list; - int32_t num_events; - int32_t event_queue_depth; - uint32_t fd; -} old_fsevent_clone_args; +typedef struct fsevent_clone_args64 { + user64_addr_t event_list; + int32_t num_events; + int32_t event_queue_depth; + user64_addr_t fd; +} fsevent_clone_args64; -#define OLD_FSEVENTS_CLONE _IOW('s', 1, old_fsevent_clone_args) +#define FSEVENTS_CLONE_32 _IOW('s', 1, fsevent_clone_args32) +#define FSEVENTS_CLONE_64 _IOW('s', 1, fsevent_clone_args64) static int fseventsioctl(__unused dev_t dev, u_long cmd, caddr_t data, __unused int flag, struct proc *p) @@ -2463,38 +2329,32 @@ fseventsioctl(__unused dev_t dev, u_long cmd, caddr_t data, __unused int flag, s struct fileproc *f; int fd, error; fsevent_handle *fseh = NULL; - ext_fsevent_clone_args *fse_clone_args, _fse_clone; + fsevent_clone_args64 *fse_clone_args, _fse_clone; int8_t *event_list; int is64bit = proc_is64bit(p); switch (cmd) { - case OLD_FSEVENTS_CLONE: { - old_fsevent_clone_args *old_args = (old_fsevent_clone_args *)data; + case FSEVENTS_CLONE_32: { + if (is64bit) { + return EINVAL; + } + fsevent_clone_args32 *args32 = (fsevent_clone_args32 *)data; fse_clone_args = &_fse_clone; - memset(fse_clone_args, 0, sizeof(ext_fsevent_clone_args)); + memset(fse_clone_args, 0, sizeof(fsevent_clone_args64)); - fse_clone_args->event_list = CAST_USER_ADDR_T(old_args->event_list); - fse_clone_args->num_events = old_args->num_events; - fse_clone_args->event_queue_depth = old_args->event_queue_depth; - fse_clone_args->fd = CAST_USER_ADDR_T(old_args->fd); + fse_clone_args->event_list = CAST_USER_ADDR_T(args32->event_list); + fse_clone_args->num_events = args32->num_events; + fse_clone_args->event_queue_depth = args32->event_queue_depth; + fse_clone_args->fd = CAST_USER_ADDR_T(args32->fd); goto handle_clone; } - - case FSEVENTS_CLONE: - if (is64bit) { - fse_clone_args = (ext_fsevent_clone_args *)data; - } else { - fsevent_clone_args *ufse_clone = (fsevent_clone_args *)data; - - fse_clone_args = &_fse_clone; - memset(fse_clone_args, 0, sizeof(ext_fsevent_clone_args)); - fse_clone_args->event_list = CAST_USER_ADDR_T(ufse_clone->event_list); - fse_clone_args->num_events = ufse_clone->num_events; - fse_clone_args->event_queue_depth = ufse_clone->event_queue_depth; - fse_clone_args->fd = CAST_USER_ADDR_T(ufse_clone->fd); + case FSEVENTS_CLONE_64: + if (!is64bit) { + return EINVAL; } + fse_clone_args = (fsevent_clone_args64 *)data; handle_clone: if (fse_clone_args->num_events < 0 || fse_clone_args->num_events > 4096) { @@ -2530,25 +2390,25 @@ fseventsioctl(__unused dev_t dev, u_long cmd, caddr_t data, __unused int flag, s error = add_watcher(event_list, fse_clone_args->num_events, fse_clone_args->event_queue_depth, - &fseh->watcher); + &fseh->watcher, + fseh); if (error) { FREE(event_list, M_TEMP); FREE(fseh, M_TEMP); return error; } - // connect up the watcher with this fsevent_handle fseh->watcher->fseh = fseh; error = falloc(p, &f, &fd, vfs_context_current()); if (error) { + remove_watcher(fseh->watcher); FREE(event_list, M_TEMP); FREE(fseh, M_TEMP); return (error); } proc_fdlock(p); f->f_fglob->fg_flag = FREAD | FWRITE; - f->f_fglob->fg_type = DTYPE_FSEVENTS; f->f_fglob->fg_ops = &fsevents_fops; f->f_fglob->fg_data = (caddr_t) fseh; proc_fdunlock(p); @@ -2750,6 +2610,9 @@ create_fsevent_from_kevent(vnode_t vp, uint32_t kevents, struct vnode_attr *vap) } #else /* CONFIG_FSE */ + +#include + /* * The get_pathbuff and release_pathbuff routines are used in places not * related to fsevents, and it's a handy abstraction, so define trivial @@ -2770,4 +2633,16 @@ release_pathbuff(char *path) { FREE_ZONE(path, MAXPATHLEN, M_NAMEI); } + +int +add_fsevent(__unused int type, __unused vfs_context_t ctx, ...) +{ + return 0; +} + +int need_fsevent(__unused int type, __unused vnode_t vp) +{ + return 0; +} + #endif /* CONFIG_FSE */