+get_out:
+ *_buff_idx = buff_idx;
+
+ return error;
+}
+
+
+static int copy_out_kfse(fs_event_watcher *watcher, kfs_event *kfse, struct uio *uio) __attribute__((noinline));
+
+static int
+copy_out_kfse(fs_event_watcher *watcher, kfs_event *kfse, struct uio *uio)
+{
+ int error;
+ uint16_t tmp16;
+ int32_t type;
+ kfs_event *cur;
+ char evbuff[512];
+ int evbuff_idx = 0;
+
+ if (kfse->type == FSE_INVALID) {
+ panic("fsevents: copy_out_kfse: asked to copy out an invalid event (kfse %p, refcount %d fref ptr %p)\n", kfse, kfse->refcount, kfse->str);
+ }
+
+ if (kfse->flags & KFSE_BEING_CREATED) {
+ return 0;
+ }
+
+ if (((kfse->type == FSE_RENAME) || (kfse->type == FSE_CLONE)) && kfse->dest == NULL) {
+ //
+ // This can happen if an event gets recycled but we had a
+ // pointer to it in our event queue. The event is the
+ // destination of a rename or clone which we'll process separately
+ // (that is, another kfse points to this one so it's ok
+ // to skip this guy because we'll process it when we process
+ // the other one)
+ error = 0;
+ goto get_out;
+ }
+
+ if (watcher->flags & WATCHER_WANTS_EXTENDED_INFO) {
+ type = (kfse->type & 0xfff);
+
+ if (kfse->flags & KFSE_CONTAINS_DROPPED_EVENTS) {
+ type |= (FSE_CONTAINS_DROPPED_EVENTS << FSE_FLAG_SHIFT);
+ } else if (kfse->flags & KFSE_COMBINED_EVENTS) {
+ type |= (FSE_COMBINED_EVENTS << FSE_FLAG_SHIFT);
+ }
+ } else {
+ type = (int32_t)kfse->type;
+ }
+
+ // copy out the type of the event
+ memcpy(evbuff, &type, sizeof(int32_t));
+ evbuff_idx += sizeof(int32_t);
+
+ // copy out the pid of the person that generated the event
+ memcpy(&evbuff[evbuff_idx], &kfse->pid, sizeof(pid_t));
+ evbuff_idx += sizeof(pid_t);
+
+ cur = kfse;
+
+copy_again:
+
+ if (kfse->type == FSE_DOCID_CHANGED || kfse->type == FSE_DOCID_CREATED) {
+ dev_t dev = cur->dev;
+ ino64_t ino = cur->ino;
+ uint64_t ival;
+
+ error = fill_buff(FSE_ARG_DEV, sizeof(dev_t), &dev, evbuff, &evbuff_idx, sizeof(evbuff), uio);
+ if (error != 0) {
+ goto get_out;
+ }
+
+ error = fill_buff(FSE_ARG_INO, sizeof(ino64_t), &ino, evbuff, &evbuff_idx, sizeof(evbuff), uio);
+ if (error != 0) {
+ goto get_out;
+ }
+
+ memcpy(&ino, &cur->str, sizeof(ino64_t));
+ error = fill_buff(FSE_ARG_INO, sizeof(ino64_t), &ino, evbuff, &evbuff_idx, sizeof(evbuff), uio);
+ if (error != 0) {
+ goto get_out;
+ }
+
+ memcpy(&ival, &cur->uid, sizeof(uint64_t)); // the docid gets stuffed into the ino field
+ error = fill_buff(FSE_ARG_INT64, sizeof(uint64_t), &ival, evbuff, &evbuff_idx, sizeof(evbuff), uio);
+ if (error != 0) {
+ goto get_out;
+ }
+
+ goto done;
+ }
+
+ if (kfse->type == FSE_UNMOUNT_PENDING) {
+ dev_t dev = cur->dev;
+
+ error = fill_buff(FSE_ARG_DEV, sizeof(dev_t), &dev, evbuff, &evbuff_idx, sizeof(evbuff), uio);
+ if (error != 0) {
+ goto get_out;
+ }
+
+ goto done;
+ }
+
+ if (cur->str == NULL || cur->str[0] == '\0') {
+ printf("copy_out_kfse:2: empty/short path (%s)\n", cur->str);
+ error = fill_buff(FSE_ARG_STRING, 2, "/", evbuff, &evbuff_idx, sizeof(evbuff), uio);
+ } else {
+ error = fill_buff(FSE_ARG_STRING, cur->len, cur->str, evbuff, &evbuff_idx, sizeof(evbuff), uio);
+ }
+ if (error != 0) {
+ goto get_out;
+ }
+
+ if (cur->dev == 0 && cur->ino == 0) {
+ // this happens when a rename event happens and the
+ // destination of the rename did not previously exist.
+ // it thus has no other file info so skip copying out
+ // the stuff below since it isn't initialized
+ goto done;
+ }
+
+
+ if (watcher->flags & WATCHER_WANTS_COMPACT_EVENTS) {
+ int32_t finfo_size;
+
+ finfo_size = sizeof(dev_t) + sizeof(ino64_t) + sizeof(int32_t) + sizeof(uid_t) + sizeof(gid_t);
+ error = fill_buff(FSE_ARG_FINFO, finfo_size, &cur->ino, evbuff, &evbuff_idx, sizeof(evbuff), uio);
+ if (error != 0) {
+ goto get_out;
+ }
+ } else {
+ error = fill_buff(FSE_ARG_DEV, sizeof(dev_t), &cur->dev, evbuff, &evbuff_idx, sizeof(evbuff), uio);
+ if (error != 0) {
+ goto get_out;
+ }
+
+ error = fill_buff(FSE_ARG_INO, sizeof(ino64_t), &cur->ino, evbuff, &evbuff_idx, sizeof(evbuff), uio);
+ if (error != 0) {
+ goto get_out;
+ }
+
+ error = fill_buff(FSE_ARG_MODE, sizeof(int32_t), &cur->mode, evbuff, &evbuff_idx, sizeof(evbuff), uio);
+ if (error != 0) {
+ goto get_out;
+ }
+
+ error = fill_buff(FSE_ARG_UID, sizeof(uid_t), &cur->uid, evbuff, &evbuff_idx, sizeof(evbuff), uio);
+ if (error != 0) {
+ goto get_out;
+ }
+
+ error = fill_buff(FSE_ARG_GID, sizeof(gid_t), &cur->gid, evbuff, &evbuff_idx, sizeof(evbuff), uio);
+ if (error != 0) {
+ goto get_out;
+ }
+ }
+
+
+ if (cur->dest) {
+ cur = cur->dest;
+ goto copy_again;
+ }
+
+done:
+ // very last thing: the time stamp
+ error = fill_buff(FSE_ARG_INT64, sizeof(uint64_t), &cur->abstime, evbuff, &evbuff_idx, sizeof(evbuff), uio);
+ if (error != 0) {
+ goto get_out;
+ }
+
+ // check if the FSE_ARG_DONE will fit
+ if (sizeof(uint16_t) > sizeof(evbuff) - evbuff_idx) {
+ if (evbuff_idx > uio_resid(uio)) {
+ error = ENOSPC;
+ goto get_out;
+ }
+ error = uiomove(evbuff, evbuff_idx, uio);
+ if (error) {
+ goto get_out;
+ }
+ evbuff_idx = 0;
+ }
+
+ tmp16 = FSE_ARG_DONE;
+ memcpy(&evbuff[evbuff_idx], &tmp16, sizeof(uint16_t));
+ evbuff_idx += sizeof(uint16_t);
+
+ // flush any remaining data in the buffer (and hopefully
+ // in most cases this is the only uiomove we'll do)
+ if (evbuff_idx > uio_resid(uio)) {
+ error = ENOSPC;
+ } else {
+ error = uiomove(evbuff, evbuff_idx, uio);
+ }
+
+get_out:
+
+ return error;
+}
+
+
+
+static int
+fmod_watch(fs_event_watcher *watcher, struct uio *uio)
+{
+ int error = 0;
+ user_ssize_t last_full_event_resid;
+ kfs_event *kfse;
+ uint16_t tmp16;
+ int skipped;
+
+ last_full_event_resid = uio_resid(uio);
+
+ // need at least 2048 bytes of space (maxpathlen + 1 event buf)
+ if (uio_resid(uio) < 2048 || watcher == NULL) {
+ return EINVAL;
+ }
+
+ if (watcher->flags & WATCHER_CLOSING) {
+ return 0;
+ }
+
+ if (OSAddAtomic(1, &watcher->num_readers) != 0) {
+ // don't allow multiple threads to read from the fd at the same time
+ OSAddAtomic(-1, &watcher->num_readers);
+ return EAGAIN;
+ }
+
+restart_watch:
+ if (watcher->rd == watcher->wr) {
+ if (watcher->flags & WATCHER_CLOSING) {
+ OSAddAtomic(-1, &watcher->num_readers);
+ return 0;
+ }
+ OSAddAtomic(1, &watcher->blockers);
+
+ // there's nothing to do, go to sleep
+ error = tsleep((caddr_t)watcher, PUSER | PCATCH, "fsevents_empty", 0);
+
+ OSAddAtomic(-1, &watcher->blockers);
+
+ if (error != 0 || (watcher->flags & WATCHER_CLOSING)) {
+ OSAddAtomic(-1, &watcher->num_readers);
+ return error;
+ }
+ }
+
+ // if we dropped events, return that as an event first
+ if (watcher->flags & WATCHER_DROPPED_EVENTS) {
+ int32_t val = FSE_EVENTS_DROPPED;
+
+ error = uiomove((caddr_t)&val, sizeof(int32_t), uio);
+ if (error == 0) {
+ val = 0; // a fake pid
+ error = uiomove((caddr_t)&val, sizeof(int32_t), uio);
+
+ tmp16 = FSE_ARG_DONE; // makes it a consistent msg
+ error = uiomove((caddr_t)&tmp16, sizeof(int16_t), uio);
+
+ last_full_event_resid = uio_resid(uio);
+ }
+
+ if (error) {
+ OSAddAtomic(-1, &watcher->num_readers);
+ return error;
+ }
+
+ watcher->flags &= ~WATCHER_DROPPED_EVENTS;
+ }
+
+ skipped = 0;
+
+ lck_rw_lock_shared(&event_handling_lock);
+ while (uio_resid(uio) > 0 && watcher->rd != watcher->wr) {
+ if (watcher->flags & WATCHER_CLOSING) {
+ break;
+ }
+
+ //
+ // check if the event is something of interest to us
+ // (since it may have been recycled/reused and changed
+ // its type or which device it is for)
+ //
+ kfse = watcher->event_queue[watcher->rd];
+ if (!kfse || kfse->type == FSE_INVALID || kfse->type >= watcher->num_events || kfse->refcount < 1) {
+ break;
+ }
+
+ if (watcher->event_list[kfse->type] == FSE_REPORT) {
+ boolean_t watcher_cares;
+
+ if (watcher->devices_not_to_watch == NULL) {
+ watcher_cares = true;
+ } else {
+ lock_watch_table();
+ watcher_cares = watcher_cares_about_dev(watcher, kfse->dev);
+ unlock_watch_table();
+ }
+
+ if (watcher_cares) {
+ if (!(watcher->flags & WATCHER_APPLE_SYSTEM_SERVICE) && kfse->type != FSE_DOCID_CREATED && kfse->type != FSE_DOCID_CHANGED && is_ignored_directory(kfse->str)) {
+ // If this is not an Apple System Service, skip specified directories
+ // radar://12034844
+ error = 0;
+ skipped = 1;
+ } else {
+ skipped = 0;
+ if (last_event_ptr == kfse) {
+ last_event_ptr = NULL;
+ last_event_type = -1;
+ last_coalesced_time = 0;
+ }
+ error = copy_out_kfse(watcher, kfse, uio);
+ if (error != 0) {
+ // if an event won't fit or encountered an error while
+ // we were copying it out, then backup to the last full
+ // event and just bail out. if the error was ENOENT
+ // then we can continue regular processing, otherwise
+ // we should unlock things and return.
+ uio_setresid(uio, last_full_event_resid);
+ if (error != ENOENT) {
+ lck_rw_unlock_shared(&event_handling_lock);
+ error = 0;
+ goto get_out;
+ }
+ }
+
+ last_full_event_resid = uio_resid(uio);
+ }
+ }
+ }
+
+ watcher->event_queue[watcher->rd] = NULL;
+ watcher->rd = (watcher->rd + 1) % watcher->eventq_size;
+ OSSynchronizeIO();
+ release_event_ref(kfse);
+ }
+ lck_rw_unlock_shared(&event_handling_lock);
+
+ if (skipped && error == 0) {
+ goto restart_watch;
+ }
+
+get_out:
+ OSAddAtomic(-1, &watcher->num_readers);
+
+ return error;