]>
Commit | Line | Data |
---|---|---|
91447636 | 1 | /* |
5d5c5d0d A |
2 | * Copyright (c) 2004 Apple Computer, Inc. All rights reserved. |
3 | * | |
8f6c56a5 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
91447636 | 5 | * |
8f6c56a5 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
8ad349bb | 24 | * limitations under the License. |
8f6c56a5 A |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
91447636 A |
27 | */ |
28 | #include <stdarg.h> | |
29 | #include <sys/param.h> | |
30 | #include <sys/systm.h> | |
31 | #include <sys/namei.h> | |
32 | #include <sys/filedesc.h> | |
33 | #include <sys/kernel.h> | |
34 | #include <sys/file_internal.h> | |
35 | #include <sys/stat.h> | |
36 | #include <sys/vnode_internal.h> | |
37 | #include <sys/mount_internal.h> | |
38 | #include <sys/proc_internal.h> | |
39 | #include <sys/kauth.h> | |
40 | #include <sys/uio.h> | |
41 | #include <sys/malloc.h> | |
42 | #include <sys/dirent.h> | |
43 | #include <sys/attr.h> | |
44 | #include <sys/sysctl.h> | |
45 | #include <sys/ubc.h> | |
46 | #include <machine/cons.h> | |
47 | #include <miscfs/specfs/specdev.h> | |
48 | #include <miscfs/devfs/devfs.h> | |
49 | #include <sys/filio.h> | |
91447636 A |
50 | #include <kern/locks.h> |
51 | #include <libkern/OSAtomic.h> | |
52 | ||
53 | #include <bsm/audit_kernel.h> | |
54 | #include <bsm/audit_kevents.h> | |
55 | ||
56 | // where all our structs and defines come from | |
57 | #include <sys/fsevents.h> | |
58 | ||
59 | ||
60 | typedef struct kfs_event_arg { | |
61 | u_int16_t type; | |
62 | u_int16_t len; | |
63 | union { | |
64 | struct vnode *vp; | |
65 | char *str; | |
66 | void *ptr; | |
67 | int32_t int32; | |
68 | dev_t dev; | |
69 | ino_t ino; | |
70 | int32_t mode; | |
71 | uid_t uid; | |
72 | gid_t gid; | |
73 | } data; | |
74 | }kfs_event_arg; | |
75 | ||
76 | #define KFS_NUM_ARGS FSE_MAX_ARGS | |
77 | typedef struct kfs_event { | |
78 | int32_t type; // type code of this event | |
79 | u_int32_t refcount; // number of clients referencing this | |
80 | pid_t pid; // pid of the process that did the op | |
81 | kfs_event_arg args[KFS_NUM_ARGS]; | |
82 | } kfs_event; | |
83 | ||
84 | ||
85 | typedef struct fs_event_watcher { | |
86 | SLIST_ENTRY(fs_event_watcher) link; | |
87 | int8_t *event_list; // the events we're interested in | |
88 | int32_t num_events; | |
89 | dev_t *devices_to_watch; // only report events from these devices | |
90 | uint32_t num_devices; | |
91 | int32_t flags; | |
92 | kfs_event **event_queue; | |
93 | int32_t eventq_size; // number of event pointers in queue | |
94 | int32_t rd, wr; // indices to the event_queue | |
95 | int32_t blockers; | |
89b3af67 | 96 | int32_t num_readers; |
91447636 A |
97 | } fs_event_watcher; |
98 | ||
99 | // fs_event_watcher flags | |
100 | #define WATCHER_DROPPED_EVENTS 0x0001 | |
101 | #define WATCHER_CLOSING 0x0002 | |
102 | ||
103 | static SLIST_HEAD(watch_list, fs_event_watcher) watch_list_head = { NULL }; | |
104 | ||
105 | ||
106 | #define MAX_KFS_EVENTS 2048 | |
107 | ||
108 | // this array holds each pending event | |
109 | static kfs_event fs_event_buf[MAX_KFS_EVENTS]; | |
110 | static int free_event_idx = 0; | |
111 | static int fs_event_init = 0; | |
112 | ||
113 | // | |
114 | // this array records whether anyone is interested in a | |
115 | // particular type of event. if no one is, we bail out | |
116 | // early from the event delivery | |
117 | // | |
118 | static int16_t fs_event_type_watchers[FSE_MAX_EVENTS]; | |
119 | ||
120 | static int watcher_add_event(fs_event_watcher *watcher, kfs_event *kfse); | |
121 | ||
122 | // | |
123 | // Locks | |
124 | // | |
125 | static lck_grp_attr_t * fsevent_group_attr; | |
126 | static lck_attr_t * fsevent_lock_attr; | |
127 | static lck_grp_t * fsevent_mutex_group; | |
128 | ||
129 | static lck_grp_t * fsevent_rw_group; | |
130 | ||
131 | static lck_rw_t fsevent_big_lock; // always grab this first | |
132 | static lck_mtx_t watch_list_lock; | |
133 | static lck_mtx_t event_buf_lock; | |
134 | ||
135 | ||
136 | static void init_pathbuff(void); | |
137 | ||
138 | ||
139 | static void | |
140 | fsevents_internal_init(void) | |
141 | { | |
142 | int i; | |
143 | ||
144 | if (fs_event_init++ != 0) { | |
145 | return; | |
146 | } | |
147 | ||
148 | for(i=0; i < FSE_MAX_EVENTS; i++) { | |
149 | fs_event_type_watchers[i] = 0; | |
150 | } | |
151 | ||
152 | for(i=0; i < MAX_KFS_EVENTS; i++) { | |
153 | fs_event_buf[i].type = FSE_INVALID; | |
154 | fs_event_buf[i].refcount = 0; | |
155 | } | |
156 | ||
157 | SLIST_INIT(&watch_list_head); | |
158 | ||
159 | fsevent_lock_attr = lck_attr_alloc_init(); | |
160 | fsevent_group_attr = lck_grp_attr_alloc_init(); | |
161 | fsevent_mutex_group = lck_grp_alloc_init("fsevent-mutex", fsevent_group_attr); | |
162 | fsevent_rw_group = lck_grp_alloc_init("fsevent-rw", fsevent_group_attr); | |
163 | ||
164 | lck_mtx_init(&watch_list_lock, fsevent_mutex_group, fsevent_lock_attr); | |
165 | lck_mtx_init(&event_buf_lock, fsevent_mutex_group, fsevent_lock_attr); | |
166 | ||
167 | lck_rw_init(&fsevent_big_lock, fsevent_rw_group, fsevent_lock_attr); | |
168 | ||
169 | init_pathbuff(); | |
170 | } | |
171 | ||
172 | static void | |
173 | lock_watch_list(void) | |
174 | { | |
175 | lck_mtx_lock(&watch_list_lock); | |
176 | } | |
177 | ||
178 | static void | |
179 | unlock_watch_list(void) | |
180 | { | |
181 | lck_mtx_unlock(&watch_list_lock); | |
182 | } | |
183 | ||
184 | static void | |
185 | lock_fs_event_buf(void) | |
186 | { | |
187 | lck_mtx_lock(&event_buf_lock); | |
188 | } | |
189 | ||
190 | static void | |
191 | unlock_fs_event_buf(void) | |
192 | { | |
193 | lck_mtx_unlock(&event_buf_lock); | |
194 | } | |
195 | ||
196 | // forward prototype | |
197 | static void do_free_event(kfs_event *kfse); | |
198 | ||
199 | static int | |
200 | watcher_cares_about_dev(fs_event_watcher *watcher, dev_t dev) | |
201 | { | |
202 | unsigned int i; | |
203 | ||
204 | // if there is not list of devices to watch, then always | |
205 | // say we're interested so we'll report all events from | |
206 | // all devices | |
207 | if (watcher->devices_to_watch == NULL) { | |
208 | return 1; | |
209 | } | |
210 | ||
211 | for(i=0; i < watcher->num_devices; i++) { | |
212 | if (dev == watcher->devices_to_watch[i]) { | |
213 | // found a match! that means we want events | |
214 | // from this device. | |
215 | return 1; | |
216 | } | |
217 | } | |
218 | ||
219 | // if we're here it's not in the devices_to_watch[] | |
220 | // list so that means we do not care about it | |
221 | return 0; | |
222 | } | |
223 | ||
224 | ||
225 | int | |
226 | need_fsevent(int type, vnode_t vp) | |
227 | { | |
228 | fs_event_watcher *watcher; | |
229 | dev_t dev; | |
230 | ||
231 | if (fs_event_type_watchers[type] == 0) | |
232 | return (0); | |
233 | dev = (dev_t)(vp->v_mount->mnt_vfsstat.f_fsid.val[0]); | |
234 | ||
235 | lock_watch_list(); | |
236 | ||
237 | SLIST_FOREACH(watcher, &watch_list_head, link) { | |
238 | if (watcher->event_list[type] == FSE_REPORT && watcher_cares_about_dev(watcher, dev)) { | |
239 | unlock_watch_list(); | |
240 | return (1); | |
241 | } | |
242 | } | |
243 | unlock_watch_list(); | |
244 | ||
245 | return (0); | |
246 | } | |
247 | ||
248 | ||
249 | int | |
250 | add_fsevent(int type, vfs_context_t ctx, ...) | |
251 | { | |
252 | struct proc *p = vfs_context_proc(ctx); | |
253 | int i, arg_idx, num_deliveries=0; | |
254 | kfs_event_arg *kea; | |
255 | kfs_event *kfse; | |
256 | fs_event_watcher *watcher; | |
257 | va_list ap; | |
89b3af67 | 258 | int error = 0, base; |
91447636 A |
259 | dev_t dev = 0; |
260 | ||
261 | va_start(ap, ctx); | |
262 | ||
263 | // if no one cares about this type of event, bail out | |
264 | if (fs_event_type_watchers[type] == 0) { | |
265 | va_end(ap); | |
266 | return 0; | |
267 | } | |
268 | ||
269 | lck_rw_lock_shared(&fsevent_big_lock); | |
270 | ||
271 | // find a free event and snag it for our use | |
272 | // NOTE: do not do anything that would block until | |
273 | // the lock is dropped. | |
274 | lock_fs_event_buf(); | |
275 | ||
89b3af67 | 276 | base = free_event_idx; |
91447636 | 277 | for(i=0; i < MAX_KFS_EVENTS; i++) { |
89b3af67 | 278 | if (fs_event_buf[(base + i) % MAX_KFS_EVENTS].type == FSE_INVALID) { |
91447636 A |
279 | break; |
280 | } | |
281 | } | |
282 | ||
283 | if (i >= MAX_KFS_EVENTS) { | |
284 | // yikes! no free slots | |
285 | unlock_fs_event_buf(); | |
286 | va_end(ap); | |
287 | ||
288 | lock_watch_list(); | |
289 | SLIST_FOREACH(watcher, &watch_list_head, link) { | |
290 | watcher->flags |= WATCHER_DROPPED_EVENTS; | |
291 | wakeup((caddr_t)watcher); | |
292 | } | |
293 | unlock_watch_list(); | |
294 | lck_rw_done(&fsevent_big_lock); | |
295 | ||
296 | printf("fs_events: add_event: event queue is full! dropping events.\n"); | |
297 | return ENOSPC; | |
298 | } | |
299 | ||
89b3af67 | 300 | kfse = &fs_event_buf[(base + i) % MAX_KFS_EVENTS]; |
91447636 | 301 | |
89b3af67 | 302 | free_event_idx = ((base + i) % MAX_KFS_EVENTS) + 1; |
91447636 A |
303 | |
304 | kfse->type = type; | |
89b3af67 | 305 | kfse->refcount = 1; |
91447636 A |
306 | kfse->pid = p->p_pid; |
307 | ||
308 | unlock_fs_event_buf(); // at this point it's safe to unlock | |
309 | ||
310 | // | |
311 | // now process the arguments passed in and copy them into | |
312 | // the kfse | |
313 | // | |
314 | arg_idx = 0; | |
315 | while(arg_idx < KFS_NUM_ARGS) { | |
316 | kea = &kfse->args[arg_idx++]; | |
317 | kea->type = va_arg(ap, int32_t); | |
318 | ||
319 | if (kea->type == FSE_ARG_DONE) { | |
320 | break; | |
321 | } | |
322 | ||
323 | switch(kea->type) { | |
324 | case FSE_ARG_VNODE: { | |
325 | // this expands out into multiple arguments to the client | |
326 | struct vnode *vp; | |
327 | struct vnode_attr va; | |
328 | ||
329 | kea->data.vp = vp = va_arg(ap, struct vnode *); | |
330 | if (kea->data.vp == NULL) { | |
331 | panic("add_fsevent: you can't pass me a NULL vnode ptr (type %d)!\n", | |
332 | kfse->type); | |
333 | } | |
334 | ||
335 | if (vnode_ref_ext(kea->data.vp, O_EVTONLY) != 0) { | |
336 | kea->type = FSE_ARG_DONE; | |
337 | ||
338 | error = EINVAL; | |
339 | goto clean_up; | |
340 | } | |
341 | VATTR_INIT(&va); | |
342 | VATTR_WANTED(&va, va_fsid); | |
343 | VATTR_WANTED(&va, va_fileid); | |
344 | VATTR_WANTED(&va, va_mode); | |
345 | VATTR_WANTED(&va, va_uid); | |
346 | VATTR_WANTED(&va, va_gid); | |
347 | if (vnode_getattr(kea->data.vp, &va, ctx) != 0) { | |
348 | vnode_rele_ext(kea->data.vp, O_EVTONLY, 0); | |
349 | kea->type = FSE_ARG_DONE; | |
350 | ||
351 | error = EINVAL; | |
352 | goto clean_up; | |
353 | } | |
354 | ||
355 | kea++; | |
356 | kea->type = FSE_ARG_DEV; | |
357 | kea->data.dev = dev = (dev_t)va.va_fsid; | |
358 | ||
359 | kea++; | |
360 | kea->type = FSE_ARG_INO; | |
361 | kea->data.ino = (ino_t)va.va_fileid; | |
362 | ||
363 | kea++; | |
364 | kea->type = FSE_ARG_MODE; | |
365 | kea->data.mode = (int32_t)vnode_vttoif(vnode_vtype(vp)) | va.va_mode; | |
366 | ||
367 | kea++; | |
368 | kea->type = FSE_ARG_UID; | |
369 | kea->data.uid = va.va_uid; | |
370 | ||
371 | kea++; | |
372 | kea->type = FSE_ARG_GID; | |
373 | kea->data.gid = va.va_gid; | |
374 | arg_idx += 5; | |
375 | break; | |
376 | } | |
377 | ||
378 | case FSE_ARG_FINFO: { | |
379 | fse_info *fse; | |
380 | ||
381 | fse = va_arg(ap, fse_info *); | |
382 | ||
383 | kea->type = FSE_ARG_DEV; | |
384 | kea->data.dev = dev = (dev_t)fse->dev; | |
385 | ||
386 | kea++; | |
387 | kea->type = FSE_ARG_INO; | |
388 | kea->data.ino = (ino_t)fse->ino; | |
389 | ||
390 | kea++; | |
391 | kea->type = FSE_ARG_MODE; | |
392 | kea->data.mode = (int32_t)fse->mode; | |
393 | ||
394 | kea++; | |
395 | kea->type = FSE_ARG_UID; | |
396 | kea->data.uid = (uid_t)fse->uid; | |
397 | ||
398 | kea++; | |
399 | kea->type = FSE_ARG_GID; | |
400 | kea->data.gid = (uid_t)fse->gid; | |
401 | arg_idx += 4; | |
402 | break; | |
403 | } | |
404 | ||
405 | case FSE_ARG_STRING: | |
406 | kea->len = (int16_t)(va_arg(ap, int32_t) & 0xffff); | |
407 | kea->data.str = vfs_addname(va_arg(ap, char *), kea->len, 0, 0); | |
408 | break; | |
409 | ||
410 | case FSE_ARG_INT32: | |
411 | kea->data.int32 = va_arg(ap, int32_t); | |
412 | break; | |
413 | ||
414 | case FSE_ARG_INT64: | |
415 | printf("fs_events: 64-bit args not implemented.\n"); | |
416 | // kea->data.int64 = va_arg(ap, int64_t); | |
417 | break; | |
418 | ||
419 | case FSE_ARG_RAW: | |
420 | kea->len = (int16_t)(va_arg(ap, int32_t) & 0xffff); | |
421 | MALLOC(kea->data.ptr, void *, kea->len, M_TEMP, M_WAITOK); | |
422 | memcpy(kea->data.ptr, va_arg(ap, void *), kea->len); | |
423 | break; | |
424 | ||
425 | case FSE_ARG_DEV: | |
426 | kea->data.dev = dev = va_arg(ap, dev_t); | |
427 | break; | |
428 | ||
429 | case FSE_ARG_MODE: | |
430 | kea->data.mode = va_arg(ap, int32_t); | |
431 | break; | |
432 | ||
433 | case FSE_ARG_INO: | |
434 | kea->data.ino = va_arg(ap, ino_t); | |
435 | break; | |
436 | ||
437 | case FSE_ARG_UID: | |
438 | kea->data.uid = va_arg(ap, uid_t); | |
439 | break; | |
440 | ||
441 | case FSE_ARG_GID: | |
442 | kea->data.gid = va_arg(ap, gid_t); | |
443 | break; | |
444 | ||
445 | default: | |
446 | printf("add_fsevent: unknown type %d\n", kea->type); | |
447 | // just skip one 32-bit word and hope we sync up... | |
448 | (void)va_arg(ap, int32_t); | |
449 | } | |
450 | } | |
451 | ||
452 | va_end(ap); | |
453 | ||
454 | // | |
455 | // now we have to go and let everyone know that | |
456 | // is interested in this type of event... | |
457 | // | |
458 | lock_watch_list(); | |
459 | ||
460 | SLIST_FOREACH(watcher, &watch_list_head, link) { | |
461 | if (watcher->event_list[type] == FSE_REPORT && watcher_cares_about_dev(watcher, dev)) { | |
462 | if (watcher_add_event(watcher, kfse) == 0) { | |
463 | num_deliveries++; | |
464 | } | |
465 | } | |
466 | } | |
467 | ||
468 | unlock_watch_list(); | |
469 | ||
470 | clean_up: | |
471 | // just in case no one was interested after all... | |
89b3af67 | 472 | if (OSAddAtomic(-1, (SInt32 *)&kfse->refcount) == 1) { |
91447636 | 473 | do_free_event(kfse); |
91447636 A |
474 | } |
475 | ||
476 | lck_rw_done(&fsevent_big_lock); | |
477 | return error; | |
478 | } | |
479 | ||
480 | static void | |
481 | do_free_event(kfs_event *kfse) | |
482 | { | |
483 | int i; | |
484 | kfs_event_arg *kea, all_args[KFS_NUM_ARGS]; | |
485 | ||
486 | lock_fs_event_buf(); | |
487 | ||
89b3af67 A |
488 | if (kfse->refcount > 0) { |
489 | panic("do_free_event: free'ing a kfsevent w/refcount == %d (kfse %p)\n", | |
490 | kfse->refcount, kfse); | |
491 | } | |
91447636 A |
492 | |
493 | // make a copy of this so we can free things without | |
494 | // holding the fs_event_buf lock | |
495 | // | |
496 | memcpy(&all_args[0], &kfse->args[0], sizeof(all_args)); | |
497 | ||
498 | // and just to be anal, set this so that there are no args | |
499 | kfse->args[0].type = FSE_ARG_DONE; | |
500 | ||
89b3af67 A |
501 | // mark this fsevent as invalid |
502 | kfse->type = FSE_INVALID; | |
503 | ||
91447636 A |
504 | free_event_idx = (kfse - fs_event_buf); |
505 | ||
506 | unlock_fs_event_buf(); | |
507 | ||
508 | for(i=0; i < KFS_NUM_ARGS; i++) { | |
509 | kea = &all_args[i]; | |
510 | if (kea->type == FSE_ARG_DONE) { | |
511 | break; | |
512 | } | |
513 | ||
514 | switch(kea->type) { | |
515 | case FSE_ARG_VNODE: | |
516 | vnode_rele_ext(kea->data.vp, O_EVTONLY, 0); | |
517 | break; | |
518 | case FSE_ARG_STRING: | |
519 | vfs_removename(kea->data.str); | |
520 | break; | |
521 | case FSE_ARG_RAW: | |
522 | FREE(kea->data.ptr, M_TEMP); | |
523 | break; | |
524 | } | |
525 | } | |
526 | } | |
527 | ||
528 | ||
529 | static int | |
530 | add_watcher(int8_t *event_list, int32_t num_events, int32_t eventq_size, fs_event_watcher **watcher_out) | |
531 | { | |
532 | int i; | |
533 | fs_event_watcher *watcher; | |
534 | ||
535 | if (eventq_size < 0 || eventq_size > MAX_KFS_EVENTS) { | |
536 | eventq_size = MAX_KFS_EVENTS; | |
537 | } | |
538 | ||
539 | // Note: the event_queue follows the fs_event_watcher struct | |
540 | // in memory so we only have to do one allocation | |
541 | MALLOC(watcher, | |
542 | fs_event_watcher *, | |
543 | sizeof(fs_event_watcher) + eventq_size * sizeof(kfs_event *), | |
544 | M_TEMP, M_WAITOK); | |
545 | ||
546 | watcher->event_list = event_list; | |
547 | watcher->num_events = num_events; | |
548 | watcher->devices_to_watch = NULL; | |
549 | watcher->num_devices = 0; | |
550 | watcher->flags = 0; | |
551 | watcher->event_queue = (kfs_event **)&watcher[1]; | |
552 | watcher->eventq_size = eventq_size; | |
553 | watcher->rd = 0; | |
554 | watcher->wr = 0; | |
555 | watcher->blockers = 0; | |
89b3af67 | 556 | watcher->num_readers = 0; |
91447636 A |
557 | |
558 | lock_watch_list(); | |
559 | ||
560 | // now update the global list of who's interested in | |
561 | // events of a particular type... | |
562 | for(i=0; i < num_events; i++) { | |
563 | if (event_list[i] != FSE_IGNORE && i < FSE_MAX_EVENTS) { | |
564 | fs_event_type_watchers[i]++; | |
565 | } | |
566 | } | |
567 | ||
568 | SLIST_INSERT_HEAD(&watch_list_head, watcher, link); | |
569 | ||
570 | unlock_watch_list(); | |
571 | ||
572 | *watcher_out = watcher; | |
573 | ||
574 | return 0; | |
575 | } | |
576 | ||
577 | static void | |
578 | remove_watcher(fs_event_watcher *target) | |
579 | { | |
580 | int i; | |
581 | fs_event_watcher *watcher; | |
582 | kfs_event *kfse; | |
583 | ||
584 | lck_rw_lock_shared(&fsevent_big_lock); | |
585 | ||
586 | lock_watch_list(); | |
587 | ||
588 | SLIST_FOREACH(watcher, &watch_list_head, link) { | |
589 | if (watcher == target) { | |
590 | SLIST_REMOVE(&watch_list_head, watcher, fs_event_watcher, link); | |
591 | ||
592 | for(i=0; i < watcher->num_events; i++) { | |
593 | if (watcher->event_list[i] != FSE_IGNORE && i < FSE_MAX_EVENTS) { | |
594 | fs_event_type_watchers[i]--; | |
595 | } | |
596 | } | |
597 | ||
598 | unlock_watch_list(); | |
599 | ||
600 | // drain the event_queue | |
601 | for(i=watcher->rd; i != watcher->wr; i=(i+1) % watcher->eventq_size) { | |
602 | kfse = watcher->event_queue[i]; | |
603 | ||
604 | if (OSAddAtomic(-1, (SInt32 *)&kfse->refcount) == 1) { | |
605 | do_free_event(kfse); | |
606 | } | |
607 | } | |
608 | ||
609 | if (watcher->event_list) { | |
610 | FREE(watcher->event_list, M_TEMP); | |
611 | watcher->event_list = NULL; | |
612 | } | |
613 | if (watcher->devices_to_watch) { | |
614 | FREE(watcher->devices_to_watch, M_TEMP); | |
615 | watcher->devices_to_watch = NULL; | |
616 | } | |
617 | FREE(watcher, M_TEMP); | |
618 | ||
619 | lck_rw_done(&fsevent_big_lock); | |
620 | return; | |
621 | } | |
622 | } | |
623 | ||
624 | unlock_watch_list(); | |
625 | lck_rw_done(&fsevent_big_lock); | |
626 | } | |
627 | ||
628 | ||
629 | static int | |
630 | watcher_add_event(fs_event_watcher *watcher, kfs_event *kfse) | |
631 | { | |
632 | if (((watcher->wr + 1) % watcher->eventq_size) == watcher->rd) { | |
633 | watcher->flags |= WATCHER_DROPPED_EVENTS; | |
634 | wakeup((caddr_t)watcher); | |
635 | return ENOSPC; | |
636 | } | |
637 | ||
638 | watcher->event_queue[watcher->wr] = kfse; | |
639 | OSAddAtomic(1, (SInt32 *)&kfse->refcount); | |
640 | watcher->wr = (watcher->wr + 1) % watcher->eventq_size; | |
641 | ||
642 | // wake up the watcher if he's waiting! | |
643 | wakeup((caddr_t)watcher); | |
644 | ||
645 | return 0; | |
646 | } | |
647 | ||
648 | ||
649 | static int | |
650 | fmod_watch(fs_event_watcher *watcher, struct uio *uio) | |
651 | { | |
652 | int i, error=0, last_full_event_resid; | |
653 | kfs_event *kfse; | |
654 | kfs_event_arg *kea; | |
655 | uint16_t tmp16; | |
656 | ||
657 | // LP64todo - fix this | |
658 | last_full_event_resid = uio_resid(uio); | |
659 | ||
660 | // need at least 2048 bytes of space (maxpathlen + 1 event buf) | |
661 | if (uio_resid(uio) < 2048 || watcher == NULL) { | |
662 | return EINVAL; | |
663 | } | |
664 | ||
89b3af67 A |
665 | if (OSAddAtomic(1, (SInt32 *)&watcher->num_readers) != 0) { |
666 | // don't allow multiple threads to read from the fd at the same time | |
667 | OSAddAtomic(-1, (SInt32 *)&watcher->num_readers); | |
668 | return EAGAIN; | |
669 | } | |
91447636 A |
670 | |
671 | if (watcher->rd == watcher->wr) { | |
672 | if (watcher->flags & WATCHER_CLOSING) { | |
89b3af67 | 673 | OSAddAtomic(-1, (SInt32 *)&watcher->num_readers); |
91447636 A |
674 | return 0; |
675 | } | |
676 | OSAddAtomic(1, (SInt32 *)&watcher->blockers); | |
677 | ||
678 | // there's nothing to do, go to sleep | |
679 | error = tsleep((caddr_t)watcher, PUSER|PCATCH, "fsevents_empty", 0); | |
680 | ||
681 | OSAddAtomic(-1, (SInt32 *)&watcher->blockers); | |
682 | ||
683 | if (error != 0 || (watcher->flags & WATCHER_CLOSING)) { | |
89b3af67 | 684 | OSAddAtomic(-1, (SInt32 *)&watcher->num_readers); |
91447636 A |
685 | return error; |
686 | } | |
687 | } | |
688 | ||
689 | // if we dropped events, return that as an event first | |
690 | if (watcher->flags & WATCHER_DROPPED_EVENTS) { | |
691 | int32_t val = FSE_EVENTS_DROPPED; | |
692 | ||
693 | error = uiomove((caddr_t)&val, sizeof(int32_t), uio); | |
694 | if (error == 0) { | |
695 | val = 0; // a fake pid | |
696 | error = uiomove((caddr_t)&val, sizeof(int32_t), uio); | |
697 | ||
698 | tmp16 = FSE_ARG_DONE; // makes it a consistent msg | |
699 | error = uiomove((caddr_t)&tmp16, sizeof(int16_t), uio); | |
700 | } | |
701 | ||
702 | if (error) { | |
89b3af67 | 703 | OSAddAtomic(-1, (SInt32 *)&watcher->num_readers); |
91447636 A |
704 | return error; |
705 | } | |
706 | ||
707 | watcher->flags &= ~WATCHER_DROPPED_EVENTS; | |
708 | } | |
709 | ||
710 | // check if the next chunk of data will fit in the user's | |
711 | // buffer. if not, just goto get_out which will return | |
712 | // the number of bytes worth of events that we did read. | |
713 | // this leaves the event that didn't fit in the queue. | |
714 | // | |
715 | // LP64todo - fix this | |
716 | #define CHECK_UPTR(size) if (size > (unsigned)uio_resid(uio)) { \ | |
717 | uio_setresid(uio, last_full_event_resid); \ | |
718 | goto get_out; \ | |
719 | } | |
720 | ||
721 | for (; uio_resid(uio) > 0 && watcher->rd != watcher->wr; ) { | |
722 | kfse = watcher->event_queue[watcher->rd]; | |
723 | ||
724 | // copy out the type of the event | |
725 | CHECK_UPTR(sizeof(int32_t)); | |
726 | if ((error = uiomove((caddr_t)&kfse->type, sizeof(int32_t), uio)) != 0) { | |
727 | goto get_out; | |
728 | } | |
729 | ||
730 | // now copy out the pid of the person that changed the file | |
731 | CHECK_UPTR(sizeof(pid_t)); | |
732 | if ((error = uiomove((caddr_t)&kfse->pid, sizeof(pid_t), uio)) != 0) { | |
733 | goto get_out; | |
734 | } | |
735 | ||
736 | error = 0; | |
737 | for(i=0; i < KFS_NUM_ARGS && error == 0; i++) { | |
738 | char *pathbuff; | |
739 | int pathbuff_len; | |
740 | ||
741 | kea = &kfse->args[i]; | |
742 | ||
743 | tmp16 = (uint16_t)kea->type; | |
744 | CHECK_UPTR(sizeof(uint16_t)); | |
745 | error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio); | |
746 | if (error || kea->type == FSE_ARG_DONE) { | |
747 | break; | |
748 | } | |
749 | ||
750 | switch(kea->type) { | |
751 | case FSE_ARG_VNODE: | |
752 | pathbuff = get_pathbuff(); | |
753 | pathbuff_len = MAXPATHLEN; | |
754 | if (kea->data.vp == NULL) { | |
755 | printf("fmod_watch: whoa... vp == NULL (%d)!\n", kfse->type); | |
756 | i--; | |
757 | release_pathbuff(pathbuff); | |
758 | continue; | |
759 | } | |
760 | ||
761 | if (vn_getpath(kea->data.vp, pathbuff, &pathbuff_len) != 0 || pathbuff[0] == '\0') { | |
762 | // printf("fmod_watch: vn_getpath failed! vp 0x%x vname 0x%x (%s) vparent 0x%x\n", | |
763 | // kea->data.vp, | |
764 | // VNAME(kea->data.vp), | |
765 | // VNAME(kea->data.vp) ? VNAME(kea->data.vp) : "<null>", | |
766 | // VPARENT(kea->data.vp)); | |
767 | } | |
768 | CHECK_UPTR(sizeof(uint16_t)); | |
769 | tmp16 = (uint16_t)pathbuff_len; | |
770 | error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio); | |
771 | ||
772 | CHECK_UPTR((unsigned)pathbuff_len); | |
773 | error = uiomove((caddr_t)pathbuff, pathbuff_len, uio); | |
774 | release_pathbuff(pathbuff); | |
775 | break; | |
776 | ||
777 | ||
778 | case FSE_ARG_STRING: | |
779 | tmp16 = (int32_t)kea->len; | |
780 | CHECK_UPTR(sizeof(uint16_t)); | |
781 | error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio); | |
782 | ||
783 | CHECK_UPTR(kea->len); | |
784 | error = uiomove((caddr_t)kea->data.str, kea->len, uio); | |
785 | break; | |
786 | ||
787 | case FSE_ARG_INT32: | |
788 | CHECK_UPTR(sizeof(uint16_t) + sizeof(int32_t)); | |
789 | tmp16 = sizeof(int32_t); | |
790 | error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio); | |
791 | error = uiomove((caddr_t)&kea->data.int32, sizeof(int32_t), uio); | |
792 | break; | |
793 | ||
794 | case FSE_ARG_INT64: | |
795 | printf("fs_events: 64-bit args not implemented on copyout.\n"); | |
796 | // CHECK_UPTR(sizeof(uint16_t) + sizeof(int64_t)); | |
797 | // tmp16 = sizeof(int64_t); | |
798 | // error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio); | |
799 | // error = uiomove((caddr_t)&kea->data.int64, sizeof(int64_t), uio); | |
800 | break; | |
801 | ||
802 | case FSE_ARG_RAW: | |
803 | tmp16 = (uint16_t)kea->len; | |
804 | CHECK_UPTR(sizeof(uint16_t)); | |
805 | error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio); | |
806 | ||
807 | CHECK_UPTR(kea->len); | |
808 | error = uiomove((caddr_t)kea->data.ptr, kea->len, uio); | |
809 | break; | |
810 | ||
811 | case FSE_ARG_DEV: | |
812 | CHECK_UPTR(sizeof(uint16_t) + sizeof(dev_t)); | |
813 | tmp16 = sizeof(dev_t); | |
814 | error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio); | |
815 | error = uiomove((caddr_t)&kea->data.dev, sizeof(dev_t), uio); | |
816 | break; | |
817 | ||
818 | case FSE_ARG_INO: | |
819 | CHECK_UPTR(sizeof(uint16_t) + sizeof(ino_t)); | |
820 | tmp16 = sizeof(ino_t); | |
821 | error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio); | |
822 | error = uiomove((caddr_t)&kea->data.ino, sizeof(ino_t), uio); | |
823 | break; | |
824 | ||
825 | case FSE_ARG_MODE: | |
826 | // XXXdbg - NOTE: we use 32-bits for the mode, not | |
827 | // 16-bits like a real mode_t | |
828 | CHECK_UPTR(sizeof(uint16_t) + sizeof(int32_t)); | |
829 | tmp16 = sizeof(int32_t); | |
830 | error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio); | |
831 | error = uiomove((caddr_t)&kea->data.mode, sizeof(int32_t), uio); | |
832 | break; | |
833 | ||
834 | case FSE_ARG_UID: | |
835 | CHECK_UPTR(sizeof(uint16_t) + sizeof(uid_t)); | |
836 | tmp16 = sizeof(uid_t); | |
837 | error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio); | |
838 | error = uiomove((caddr_t)&kea->data.uid, sizeof(uid_t), uio); | |
839 | break; | |
840 | ||
841 | case FSE_ARG_GID: | |
842 | CHECK_UPTR(sizeof(uint16_t) + sizeof(gid_t)); | |
843 | tmp16 = sizeof(gid_t); | |
844 | error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio); | |
845 | error = uiomove((caddr_t)&kea->data.gid, sizeof(gid_t), uio); | |
846 | break; | |
847 | ||
848 | default: | |
849 | printf("fmod_watch: unknown arg type %d.\n", kea->type); | |
850 | break; | |
851 | } | |
852 | } | |
853 | ||
854 | // make sure that we always end with a FSE_ARG_DONE | |
855 | if (i >= KFS_NUM_ARGS) { | |
856 | tmp16 = FSE_ARG_DONE; | |
857 | CHECK_UPTR(sizeof(uint16_t)); | |
858 | error = uiomove((caddr_t)&tmp16, sizeof(uint16_t), uio); | |
859 | } | |
860 | ||
861 | ||
862 | // LP64todo - fix this | |
863 | last_full_event_resid = uio_resid(uio); | |
864 | ||
865 | watcher->rd = (watcher->rd + 1) % watcher->eventq_size; | |
866 | ||
867 | if (OSAddAtomic(-1, (SInt32 *)&kfse->refcount) == 1) { | |
868 | do_free_event(kfse); | |
869 | } | |
870 | } | |
871 | ||
872 | get_out: | |
89b3af67 | 873 | OSAddAtomic(-1, (SInt32 *)&watcher->num_readers); |
91447636 A |
874 | return error; |
875 | } | |
876 | ||
877 | ||
878 | // release any references we might have on vnodes which are | |
879 | // the mount point passed to us (so that it can be cleanly | |
880 | // unmounted). | |
881 | // | |
882 | // since we don't want to lose the events we'll convert the | |
883 | // vnode refs to the full path, inode #, and uid. | |
884 | // | |
885 | void | |
886 | fsevent_unmount(struct mount *mp) | |
887 | { | |
888 | int i, j; | |
889 | kfs_event *kfse; | |
890 | kfs_event_arg *kea; | |
891 | ||
892 | lck_rw_lock_exclusive(&fsevent_big_lock); | |
893 | lock_fs_event_buf(); | |
894 | ||
895 | for(i=0; i < MAX_KFS_EVENTS; i++) { | |
896 | if (fs_event_buf[i].type == FSE_INVALID) { | |
897 | continue; | |
898 | } | |
899 | ||
900 | kfse = &fs_event_buf[i]; | |
901 | for(j=0; j < KFS_NUM_ARGS; j++) { | |
902 | kea = &kfse->args[j]; | |
903 | if (kea->type == FSE_ARG_DONE) { | |
904 | break; | |
905 | } | |
906 | ||
907 | if (kea->type == FSE_ARG_VNODE && kea->data.vp->v_mount == mp) { | |
908 | struct vnode *vp; | |
909 | char *pathbuff; | |
910 | int pathbuff_len; | |
911 | ||
912 | vp = kea->data.vp; | |
913 | pathbuff = get_pathbuff(); | |
914 | pathbuff_len = MAXPATHLEN; | |
915 | ||
916 | if (vn_getpath(vp, pathbuff, &pathbuff_len) != 0 || pathbuff[0] == '\0') { | |
917 | char *vname; | |
918 | ||
919 | vname = vnode_getname(vp); | |
920 | ||
921 | printf("fsevent_unmount: vn_getpath failed! vp 0x%x vname 0x%x (%s) vparent 0x%x\n", | |
922 | vp, vname, vname ? vname : "<null>", vp->v_parent); | |
923 | ||
924 | if (vname) | |
925 | vnode_putname(vname); | |
89b3af67 A |
926 | |
927 | strcpy(pathbuff, "UNKNOWN-FILE"); | |
928 | pathbuff_len = strlen(pathbuff) + 1; | |
91447636 A |
929 | } |
930 | ||
931 | // switch the type of the string | |
932 | kea->type = FSE_ARG_STRING; | |
933 | kea->data.str = vfs_addname(pathbuff, pathbuff_len, 0, 0); | |
934 | kea->len = pathbuff_len; | |
935 | release_pathbuff(pathbuff); | |
936 | ||
937 | // and finally let go of the reference on the vnode | |
938 | vnode_rele_ext(vp, O_EVTONLY, 0); | |
939 | } | |
940 | } | |
941 | } | |
942 | ||
943 | unlock_fs_event_buf(); | |
944 | lck_rw_done(&fsevent_big_lock); | |
945 | } | |
946 | ||
947 | ||
948 | // | |
949 | // /dev/fsevents device code | |
950 | // | |
951 | static int fsevents_installed = 0; | |
952 | static struct lock__bsd__ fsevents_lck; | |
953 | ||
954 | typedef struct fsevent_handle { | |
89b3af67 A |
955 | UInt32 flags; |
956 | SInt32 active; | |
91447636 A |
957 | fs_event_watcher *watcher; |
958 | struct selinfo si; | |
959 | } fsevent_handle; | |
960 | ||
89b3af67 | 961 | #define FSEH_CLOSING 0x0001 |
91447636 A |
962 | |
963 | static int | |
964 | fseventsf_read(struct fileproc *fp, struct uio *uio, | |
965 | __unused kauth_cred_t *cred, __unused int flags, | |
966 | __unused struct proc *p) | |
967 | { | |
968 | fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data; | |
969 | int error; | |
970 | ||
971 | error = fmod_watch(fseh->watcher, uio); | |
972 | ||
973 | return error; | |
974 | } | |
975 | ||
976 | static int | |
977 | fseventsf_write(__unused struct fileproc *fp, __unused struct uio *uio, | |
978 | __unused kauth_cred_t *cred, __unused int flags, | |
979 | __unused struct proc *p) | |
980 | { | |
981 | return EIO; | |
982 | } | |
983 | ||
984 | ||
985 | static int | |
986 | fseventsf_ioctl(struct fileproc *fp, u_long cmd, caddr_t data, struct proc *p) | |
987 | { | |
988 | fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data; | |
989 | int ret = 0; | |
990 | pid_t pid = 0; | |
991 | fsevent_dev_filter_args *devfilt_args=(fsevent_dev_filter_args *)data; | |
992 | ||
89b3af67 A |
993 | OSAddAtomic(1, &fseh->active); |
994 | if (fseh->flags & FSEH_CLOSING) { | |
995 | OSAddAtomic(-1, &fseh->active); | |
996 | return 0; | |
997 | } | |
998 | ||
91447636 A |
999 | switch (cmd) { |
1000 | case FIONBIO: | |
1001 | case FIOASYNC: | |
89b3af67 A |
1002 | ret = 0; |
1003 | break; | |
91447636 A |
1004 | |
1005 | case FSEVENTS_DEVICE_FILTER: { | |
1006 | int new_num_devices; | |
1007 | dev_t *devices_to_watch, *tmp=NULL; | |
1008 | ||
89b3af67 A |
1009 | if (fseh->flags & FSEH_CLOSING) { |
1010 | ret = 0; | |
1011 | break; | |
1012 | } | |
1013 | ||
91447636 A |
1014 | if (devfilt_args->num_devices > 256) { |
1015 | ret = EINVAL; | |
1016 | break; | |
1017 | } | |
1018 | ||
1019 | new_num_devices = devfilt_args->num_devices; | |
1020 | if (new_num_devices == 0) { | |
1021 | tmp = fseh->watcher->devices_to_watch; | |
1022 | ||
1023 | lock_watch_list(); | |
1024 | fseh->watcher->devices_to_watch = NULL; | |
1025 | fseh->watcher->num_devices = new_num_devices; | |
1026 | unlock_watch_list(); | |
1027 | ||
1028 | if (tmp) { | |
1029 | FREE(tmp, M_TEMP); | |
1030 | } | |
1031 | break; | |
1032 | } | |
1033 | ||
1034 | MALLOC(devices_to_watch, dev_t *, | |
1035 | new_num_devices * sizeof(dev_t), | |
1036 | M_TEMP, M_WAITOK); | |
1037 | if (devices_to_watch == NULL) { | |
1038 | ret = ENOMEM; | |
1039 | break; | |
1040 | } | |
1041 | ||
1042 | ret = copyin(CAST_USER_ADDR_T(devfilt_args->devices), | |
1043 | (void *)devices_to_watch, | |
1044 | new_num_devices * sizeof(dev_t)); | |
1045 | if (ret) { | |
1046 | FREE(devices_to_watch, M_TEMP); | |
1047 | break; | |
1048 | } | |
1049 | ||
1050 | lock_watch_list(); | |
1051 | fseh->watcher->num_devices = new_num_devices; | |
1052 | tmp = fseh->watcher->devices_to_watch; | |
1053 | fseh->watcher->devices_to_watch = devices_to_watch; | |
1054 | unlock_watch_list(); | |
1055 | ||
1056 | if (tmp) { | |
1057 | FREE(tmp, M_TEMP); | |
1058 | } | |
1059 | ||
1060 | break; | |
1061 | } | |
1062 | ||
1063 | default: | |
1064 | ret = EINVAL; | |
1065 | break; | |
1066 | } | |
1067 | ||
89b3af67 | 1068 | OSAddAtomic(-1, &fseh->active); |
91447636 A |
1069 | return (ret); |
1070 | } | |
1071 | ||
1072 | ||
1073 | static int | |
1074 | fseventsf_select(struct fileproc *fp, int which, void *wql, struct proc *p) | |
1075 | { | |
1076 | fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data; | |
1077 | int ready = 0; | |
1078 | ||
1079 | if ((which != FREAD) || (fseh->watcher->flags & WATCHER_CLOSING)) { | |
1080 | return 0; | |
1081 | } | |
1082 | ||
1083 | ||
1084 | // if there's nothing in the queue, we're not ready | |
1085 | if (fseh->watcher->rd == fseh->watcher->wr) { | |
1086 | ready = 0; | |
1087 | } else { | |
1088 | ready = 1; | |
1089 | } | |
1090 | ||
1091 | if (!ready) { | |
1092 | selrecord(p, &fseh->si, wql); | |
1093 | } | |
1094 | ||
1095 | return ready; | |
1096 | } | |
1097 | ||
1098 | ||
1099 | static int | |
1100 | fseventsf_stat(struct fileproc *fp, struct stat *sb, struct proc *p) | |
1101 | { | |
1102 | return ENOTSUP; | |
1103 | } | |
1104 | ||
1105 | ||
1106 | static int | |
1107 | fseventsf_close(struct fileglob *fg, struct proc *p) | |
1108 | { | |
1109 | fsevent_handle *fseh = (struct fsevent_handle *)fg->fg_data; | |
89b3af67 A |
1110 | fs_event_watcher *watcher; |
1111 | ||
1112 | OSBitOrAtomic(FSEH_CLOSING, &fseh->flags); | |
1113 | while (OSAddAtomic(0, &fseh->active) > 0) { | |
1114 | tsleep((caddr_t)fseh->watcher, PRIBIO, "fsevents-close", 1); | |
1115 | } | |
91447636 | 1116 | |
89b3af67 | 1117 | watcher = fseh->watcher; |
91447636 | 1118 | fseh->watcher = NULL; |
89b3af67 A |
1119 | fg->fg_data = NULL; |
1120 | ||
1121 | remove_watcher(watcher); | |
91447636 A |
1122 | FREE(fseh, M_TEMP); |
1123 | ||
1124 | return 0; | |
1125 | } | |
1126 | ||
1127 | int | |
1128 | fseventsf_kqfilter(struct fileproc *fp, struct knote *kn, struct proc *p) | |
1129 | { | |
1130 | // XXXdbg | |
1131 | return 0; | |
1132 | } | |
1133 | ||
1134 | ||
1135 | static int | |
1136 | fseventsf_drain(struct fileproc *fp, struct proc *p) | |
1137 | { | |
1138 | int counter = 0; | |
1139 | fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data; | |
1140 | ||
1141 | fseh->watcher->flags |= WATCHER_CLOSING; | |
1142 | ||
1143 | // if there are people still waiting, sleep for 10ms to | |
1144 | // let them clean up and get out of there. however we | |
1145 | // also don't want to get stuck forever so if they don't | |
1146 | // exit after 5 seconds we're tearing things down anyway. | |
1147 | while(fseh->watcher->blockers && counter++ < 500) { | |
1148 | // issue wakeup in case anyone is blocked waiting for an event | |
1149 | // do this each time we wakeup in case the blocker missed | |
1150 | // the wakeup due to the unprotected test of WATCHER_CLOSING | |
1151 | // and decision to tsleep in fmod_watch... this bit of | |
1152 | // latency is a decent tradeoff against not having to | |
1153 | // take and drop a lock in fmod_watch | |
1154 | wakeup((caddr_t)fseh->watcher); | |
1155 | ||
1156 | tsleep((caddr_t)fseh->watcher, PRIBIO, "watcher-close", 1); | |
1157 | } | |
1158 | ||
1159 | return 0; | |
1160 | } | |
1161 | ||
1162 | ||
1163 | static int | |
1164 | fseventsopen(dev_t dev, int flag, int mode, struct proc *p) | |
1165 | { | |
1166 | if (!is_suser()) { | |
1167 | return EPERM; | |
1168 | } | |
1169 | ||
1170 | return 0; | |
1171 | } | |
1172 | ||
1173 | static int | |
1174 | fseventsclose(dev_t dev, int flag, int mode, struct proc *p) | |
1175 | { | |
1176 | return 0; | |
1177 | } | |
1178 | ||
1179 | static int | |
1180 | fseventsread(dev_t dev, struct uio *uio, int ioflag) | |
1181 | { | |
1182 | return EIO; | |
1183 | } | |
1184 | ||
1185 | static int | |
1186 | fseventswrite(dev_t dev, struct uio *uio, int ioflag) | |
1187 | { | |
1188 | return EIO; | |
1189 | } | |
1190 | ||
1191 | ||
1192 | static struct fileops fsevents_fops = { | |
1193 | fseventsf_read, | |
1194 | fseventsf_write, | |
1195 | fseventsf_ioctl, | |
1196 | fseventsf_select, | |
1197 | fseventsf_close, | |
1198 | fseventsf_kqfilter, | |
1199 | fseventsf_drain | |
1200 | }; | |
1201 | ||
1202 | ||
1203 | ||
1204 | static int | |
1205 | fseventsioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) | |
1206 | { | |
1207 | struct fileproc *f; | |
1208 | int fd, error; | |
1209 | fsevent_handle *fseh = NULL; | |
1210 | fsevent_clone_args *fse_clone_args=(fsevent_clone_args *)data; | |
1211 | int8_t *event_list; | |
1212 | ||
1213 | switch (cmd) { | |
1214 | case FSEVENTS_CLONE: | |
1215 | if (fse_clone_args->num_events < 0 || fse_clone_args->num_events > 4096) { | |
1216 | return EINVAL; | |
1217 | } | |
1218 | ||
1219 | MALLOC(fseh, fsevent_handle *, sizeof(fsevent_handle), | |
1220 | M_TEMP, M_WAITOK); | |
1221 | memset(fseh, 0, sizeof(fsevent_handle)); | |
1222 | ||
1223 | MALLOC(event_list, int8_t *, | |
1224 | fse_clone_args->num_events * sizeof(int8_t), | |
1225 | M_TEMP, M_WAITOK); | |
1226 | ||
1227 | error = copyin(CAST_USER_ADDR_T(fse_clone_args->event_list), | |
1228 | (void *)event_list, | |
1229 | fse_clone_args->num_events * sizeof(int8_t)); | |
1230 | if (error) { | |
1231 | FREE(event_list, M_TEMP); | |
1232 | FREE(fseh, M_TEMP); | |
1233 | return error; | |
1234 | } | |
1235 | ||
1236 | error = add_watcher(event_list, | |
1237 | fse_clone_args->num_events, | |
1238 | fse_clone_args->event_queue_depth, | |
1239 | &fseh->watcher); | |
1240 | if (error) { | |
1241 | FREE(event_list, M_TEMP); | |
1242 | FREE(fseh, M_TEMP); | |
1243 | return error; | |
1244 | } | |
1245 | ||
1246 | error = falloc(p, &f, &fd); | |
1247 | if (error) { | |
1248 | FREE(event_list, M_TEMP); | |
1249 | FREE(fseh, M_TEMP); | |
1250 | return (error); | |
1251 | } | |
1252 | proc_fdlock(p); | |
1253 | f->f_fglob->fg_flag = FREAD | FWRITE; | |
1254 | f->f_fglob->fg_type = DTYPE_FSEVENTS; | |
1255 | f->f_fglob->fg_ops = &fsevents_fops; | |
1256 | f->f_fglob->fg_data = (caddr_t) fseh; | |
1257 | proc_fdunlock(p); | |
1258 | copyout((void *)&fd, CAST_USER_ADDR_T(fse_clone_args->fd), sizeof(int32_t)); | |
1259 | proc_fdlock(p); | |
1260 | *fdflags(p, fd) &= ~UF_RESERVED; | |
1261 | fp_drop(p, fd, f, 1); | |
1262 | proc_fdunlock(p); | |
1263 | break; | |
1264 | ||
1265 | default: | |
1266 | error = EINVAL; | |
1267 | break; | |
1268 | } | |
1269 | ||
1270 | return error; | |
1271 | } | |
1272 | ||
1273 | static int | |
1274 | fseventsselect(dev_t dev, int rw, struct proc *p) | |
1275 | { | |
1276 | return 0; | |
1277 | } | |
1278 | ||
1279 | static void | |
1280 | fsevents_wakeup(fsevent_handle *fseh) | |
1281 | { | |
1282 | wakeup((caddr_t)fseh); | |
1283 | selwakeup(&fseh->si); | |
1284 | } | |
1285 | ||
1286 | ||
1287 | /* | |
1288 | * A struct describing which functions will get invoked for certain | |
1289 | * actions. | |
1290 | */ | |
1291 | static struct cdevsw fsevents_cdevsw = | |
1292 | { | |
1293 | fseventsopen, /* open */ | |
1294 | fseventsclose, /* close */ | |
1295 | fseventsread, /* read */ | |
1296 | fseventswrite, /* write */ | |
1297 | fseventsioctl, /* ioctl */ | |
1298 | nulldev, /* stop */ | |
1299 | nulldev, /* reset */ | |
1300 | NULL, /* tty's */ | |
1301 | eno_select, /* select */ | |
1302 | eno_mmap, /* mmap */ | |
1303 | eno_strat, /* strategy */ | |
1304 | eno_getc, /* getc */ | |
1305 | eno_putc, /* putc */ | |
1306 | 0 /* type */ | |
1307 | }; | |
1308 | ||
1309 | ||
1310 | /* | |
1311 | * Called to initialize our device, | |
1312 | * and to register ourselves with devfs | |
1313 | */ | |
1314 | ||
1315 | void | |
1316 | fsevents_init(void) | |
1317 | { | |
1318 | int ret; | |
1319 | ||
1320 | if (fsevents_installed) { | |
1321 | return; | |
1322 | } | |
1323 | ||
1324 | fsevents_installed = 1; | |
1325 | ||
1326 | lockinit(&fsevents_lck, PLOCK, "fsevents", 0, 0); | |
1327 | ||
1328 | ret = cdevsw_add(-1, &fsevents_cdevsw); | |
1329 | if (ret < 0) { | |
1330 | fsevents_installed = 0; | |
1331 | return; | |
1332 | } | |
1333 | ||
1334 | devfs_make_node(makedev (ret, 0), DEVFS_CHAR, | |
1335 | UID_ROOT, GID_WHEEL, 0644, "fsevents", 0); | |
1336 | ||
1337 | fsevents_internal_init(); | |
1338 | } | |
1339 | ||
1340 | ||
1341 | ||
1342 | // | |
1343 | // XXXdbg - temporary path buffer handling | |
1344 | // | |
1345 | #define NUM_PATH_BUFFS 16 | |
1346 | static char path_buff[NUM_PATH_BUFFS][MAXPATHLEN]; | |
1347 | static char path_buff_inuse[NUM_PATH_BUFFS]; | |
1348 | ||
1349 | static lck_grp_attr_t * pathbuff_group_attr; | |
1350 | static lck_attr_t * pathbuff_lock_attr; | |
1351 | static lck_grp_t * pathbuff_mutex_group; | |
1352 | static lck_mtx_t pathbuff_lock; | |
1353 | ||
1354 | static void | |
1355 | init_pathbuff(void) | |
1356 | { | |
1357 | pathbuff_lock_attr = lck_attr_alloc_init(); | |
1358 | pathbuff_group_attr = lck_grp_attr_alloc_init(); | |
1359 | pathbuff_mutex_group = lck_grp_alloc_init("pathbuff-mutex", pathbuff_group_attr); | |
1360 | ||
1361 | lck_mtx_init(&pathbuff_lock, pathbuff_mutex_group, pathbuff_lock_attr); | |
1362 | } | |
1363 | ||
1364 | static void | |
1365 | lock_pathbuff(void) | |
1366 | { | |
1367 | lck_mtx_lock(&pathbuff_lock); | |
1368 | } | |
1369 | ||
1370 | static void | |
1371 | unlock_pathbuff(void) | |
1372 | { | |
1373 | lck_mtx_unlock(&pathbuff_lock); | |
1374 | } | |
1375 | ||
1376 | ||
1377 | char * | |
1378 | get_pathbuff(void) | |
1379 | { | |
1380 | int i; | |
1381 | ||
1382 | lock_pathbuff(); | |
1383 | for(i=0; i < NUM_PATH_BUFFS; i++) { | |
1384 | if (path_buff_inuse[i] == 0) { | |
1385 | break; | |
1386 | } | |
1387 | } | |
1388 | ||
1389 | if (i >= NUM_PATH_BUFFS) { | |
1390 | char *path; | |
1391 | ||
1392 | unlock_pathbuff(); | |
1393 | MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); | |
1394 | return path; | |
1395 | } | |
1396 | ||
1397 | path_buff_inuse[i] = 1; | |
1398 | unlock_pathbuff(); | |
1399 | return &path_buff[i][0]; | |
1400 | } | |
1401 | ||
1402 | void | |
1403 | release_pathbuff(char *path) | |
1404 | { | |
1405 | int i; | |
1406 | ||
1407 | if (path == NULL) { | |
1408 | return; | |
1409 | } | |
1410 | ||
1411 | lock_pathbuff(); | |
1412 | for(i=0; i < NUM_PATH_BUFFS; i++) { | |
1413 | if (path == &path_buff[i][0]) { | |
1414 | path_buff[i][0] = '\0'; | |
1415 | path_buff_inuse[i] = 0; | |
1416 | unlock_pathbuff(); | |
1417 | return; | |
1418 | } | |
1419 | } | |
1420 | ||
1421 | unlock_pathbuff(); | |
1422 | ||
1423 | // if we get here then it wasn't one of our temp buffers | |
1424 | FREE_ZONE(path, MAXPATHLEN, M_NAMEI); | |
1425 | } | |
1426 | ||
1427 | int | |
1428 | get_fse_info(struct vnode *vp, fse_info *fse, vfs_context_t ctx) | |
1429 | { | |
1430 | struct vnode_attr va; | |
1431 | ||
1432 | VATTR_INIT(&va); | |
1433 | VATTR_WANTED(&va, va_fsid); | |
1434 | VATTR_WANTED(&va, va_fileid); | |
1435 | VATTR_WANTED(&va, va_mode); | |
1436 | VATTR_WANTED(&va, va_uid); | |
1437 | VATTR_WANTED(&va, va_gid); | |
1438 | if (vnode_getattr(vp, &va, ctx) != 0) { | |
1439 | return -1; | |
1440 | } | |
1441 | ||
1442 | fse->dev = (dev_t)va.va_fsid; | |
1443 | fse->ino = (ino_t)va.va_fileid; | |
1444 | fse->mode = (int32_t)vnode_vttoif(vnode_vtype(vp)) | va.va_mode; | |
1445 | fse->uid = (uid_t)va.va_uid; | |
1446 | fse->gid = (gid_t)va.va_gid; | |
1447 | ||
1448 | return 0; | |
1449 | } |