]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
55e303ae | 2 | * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. |
1c79356b A |
3 | * |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
43866e37 | 6 | * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. |
1c79356b | 7 | * |
43866e37 A |
8 | * This file contains Original Code and/or Modifications of Original Code |
9 | * as defined in and that are subject to the Apple Public Source License | |
10 | * Version 2.0 (the 'License'). You may not use this file except in | |
11 | * compliance with the License. Please obtain a copy of the License at | |
12 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
13 | * file. | |
14 | * | |
15 | * The Original Code and all software distributed under the License are | |
16 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
1c79356b A |
17 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
18 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
43866e37 A |
19 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
20 | * Please see the License for the specific language governing rights and | |
21 | * limitations under the License. | |
1c79356b A |
22 | * |
23 | * @APPLE_LICENSE_HEADER_END@ | |
24 | * | |
25 | */ | |
55e303ae A |
26 | /*- |
27 | * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> | |
28 | * All rights reserved. | |
29 | * | |
30 | * Redistribution and use in source and binary forms, with or without | |
31 | * modification, are permitted provided that the following conditions | |
32 | * are met: | |
33 | * 1. Redistributions of source code must retain the above copyright | |
34 | * notice, this list of conditions and the following disclaimer. | |
35 | * 2. Redistributions in binary form must reproduce the above copyright | |
36 | * notice, this list of conditions and the following disclaimer in the | |
37 | * documentation and/or other materials provided with the distribution. | |
38 | * | |
39 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND | |
40 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
41 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
42 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE | |
43 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
44 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
45 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
46 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
47 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
48 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
49 | * SUCH DAMAGE. | |
50 | */ | |
1c79356b A |
51 | /* |
52 | * @(#)kern_event.c 1.0 (3/31/2000) | |
53 | */ | |
54 | ||
55e303ae A |
55 | #include <sys/param.h> |
56 | #include <sys/systm.h> | |
57 | #include <sys/filedesc.h> | |
58 | #include <sys/kernel.h> | |
59 | #include <sys/proc.h> | |
60 | #include <sys/malloc.h> | |
61 | #include <sys/unistd.h> | |
62 | #include <sys/file.h> | |
63 | #include <sys/fcntl.h> | |
64 | #include <sys/select.h> | |
65 | #include <sys/queue.h> | |
66 | #include <sys/event.h> | |
67 | #include <sys/eventvar.h> | |
68 | #include <sys/protosw.h> | |
69 | #include <sys/socket.h> | |
70 | #include <sys/socketvar.h> | |
71 | #include <sys/stat.h> | |
72 | #include <sys/sysctl.h> | |
73 | #include <sys/uio.h> | |
74 | ||
75 | #include <kern/zalloc.h> | |
76 | ||
77 | MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); | |
78 | ||
79 | static int kqueue_scan(struct file *fp, int maxevents, | |
80 | struct kevent *ulistp, const struct timespec *timeout, | |
81 | register_t *retval, struct proc *p); | |
82 | static void kqueue_wakeup(struct kqueue *kq); | |
83 | ||
84 | static int kqueue_read __P((struct file *fp, struct uio *uio, | |
85 | struct ucred *cred, int flags, struct proc *p)); | |
86 | static int kqueue_write __P((struct file *fp, struct uio *uio, | |
87 | struct ucred *cred, int flags, struct proc *p)); | |
88 | static int kqueue_ioctl __P((struct file *fp, u_long com, caddr_t data, | |
89 | struct proc *p)); | |
90 | static int kqueue_select __P((struct file *fp, int which, void *wql, | |
91 | struct proc *p)); | |
92 | static int kqueue_close __P((struct file *fp, struct proc *p)); | |
93 | static int kqueue_kqfilter __P((struct file *fp, struct knote *kn, struct proc *p)); | |
94 | ||
95 | static struct fileops kqueueops = { | |
96 | kqueue_read, | |
97 | kqueue_write, | |
98 | kqueue_ioctl, | |
99 | kqueue_select, | |
100 | kqueue_close, | |
101 | kqueue_kqfilter | |
102 | }; | |
103 | ||
104 | static void knote_fdpattach(struct knote *kn, struct filedesc *fdp); | |
105 | static void knote_drop(struct knote *kn, struct proc *p); | |
106 | static void knote_enqueue(struct knote *kn); | |
107 | static void knote_dequeue(struct knote *kn); | |
108 | static struct knote *knote_alloc(void); | |
109 | static void knote_free(struct knote *kn); | |
110 | ||
111 | static int filt_fileattach(struct knote *kn); | |
112 | static struct filterops file_filtops = | |
113 | { 1, filt_fileattach, NULL, NULL }; | |
114 | ||
115 | static void filt_kqdetach(struct knote *kn); | |
116 | static int filt_kqueue(struct knote *kn, long hint); | |
117 | static struct filterops kqread_filtops = | |
118 | { 1, NULL, filt_kqdetach, filt_kqueue }; | |
119 | ||
120 | /* | |
121 | * JMM - placeholder for not-yet-implemented filters | |
122 | */ | |
123 | static int filt_badattach(struct knote *kn); | |
124 | static struct filterops bad_filtops = | |
125 | { 0, filt_badattach, 0 , 0 }; | |
126 | ||
127 | static int filt_procattach(struct knote *kn); | |
128 | static void filt_procdetach(struct knote *kn); | |
129 | static int filt_proc(struct knote *kn, long hint); | |
130 | ||
131 | static struct filterops proc_filtops = | |
132 | { 0, filt_procattach, filt_procdetach, filt_proc }; | |
133 | ||
134 | extern struct filterops fs_filtops; | |
135 | ||
136 | extern struct filterops sig_filtops; | |
137 | ||
138 | #if 0 | |
139 | /* JMM - We don't implement these now */ | |
140 | static void filt_timerexpire(void *knx); | |
141 | static int filt_timerattach(struct knote *kn); | |
142 | static void filt_timerdetach(struct knote *kn); | |
143 | static int filt_timer(struct knote *kn, long hint); | |
144 | ||
145 | static struct filterops timer_filtops = | |
146 | { 0, filt_timerattach, filt_timerdetach, filt_timer }; | |
147 | ||
148 | static int kq_ncallouts = 0; | |
149 | static int kq_calloutmax = (4 * 1024); | |
150 | ||
151 | SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, | |
152 | &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue"); | |
153 | #endif /* 0 */ | |
154 | ||
155 | static zone_t knote_zone; | |
156 | ||
157 | #define KNOTE_ACTIVATE(kn) do { \ | |
158 | kn->kn_status |= KN_ACTIVE; \ | |
159 | if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ | |
160 | knote_enqueue(kn); \ | |
161 | } while(0) | |
162 | ||
163 | #define KN_HASHSIZE 64 /* XXX should be tunable */ | |
164 | #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) | |
165 | ||
166 | #if 0 | |
167 | extern struct filterops aio_filtops; | |
168 | #endif | |
169 | ||
170 | /* | |
171 | * Table for for all system-defined filters. | |
172 | */ | |
173 | static struct filterops *sysfilt_ops[] = { | |
174 | &file_filtops, /* EVFILT_READ */ | |
175 | &file_filtops, /* EVFILT_WRITE */ | |
176 | #if 0 | |
177 | &aio_filtops, /* EVFILT_AIO */ | |
178 | #else | |
179 | &bad_filtops, /* EVFILT_AIO */ | |
180 | #endif | |
181 | &file_filtops, /* EVFILT_VNODE */ | |
182 | &proc_filtops, /* EVFILT_PROC */ | |
183 | &sig_filtops, /* EVFILT_SIGNAL */ | |
184 | #if 0 | |
185 | &timer_filtops, /* EVFILT_TIMER */ | |
186 | #else | |
187 | &bad_filtops, /* EVFILT_TIMER */ | |
188 | #endif | |
189 | &bad_filtops, /* EVFILT_MACHPORT */ | |
190 | &fs_filtops /* EVFILT_FS */ | |
191 | }; | |
192 | ||
193 | static int | |
194 | filt_fileattach(struct knote *kn) | |
195 | { | |
196 | ||
197 | return (fo_kqfilter(kn->kn_fp, kn, current_proc())); | |
198 | } | |
199 | ||
200 | static void | |
201 | filt_kqdetach(struct knote *kn) | |
202 | { | |
203 | struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; | |
204 | ||
205 | if (kq->kq_state & KQ_SEL) | |
206 | return; | |
207 | ||
208 | KNOTE_DETACH(&kq->kq_sel.si_note, kn); | |
209 | } | |
210 | ||
211 | /*ARGSUSED*/ | |
212 | static int | |
213 | filt_kqueue(struct knote *kn, long hint) | |
214 | { | |
215 | struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; | |
216 | ||
217 | kn->kn_data = kq->kq_count; | |
218 | return (kn->kn_data > 0); | |
219 | } | |
220 | ||
221 | static int | |
222 | filt_procattach(struct knote *kn) | |
223 | { | |
224 | struct proc *p; | |
225 | ||
226 | p = pfind(kn->kn_id); | |
227 | if (p == NULL) | |
228 | return (ESRCH); | |
229 | if (! PRISON_CHECK(current_proc(), p)) | |
230 | return (EACCES); | |
231 | ||
232 | kn->kn_ptr.p_proc = p; | |
233 | kn->kn_flags |= EV_CLEAR; /* automatically set */ | |
234 | ||
235 | /* | |
236 | * internal flag indicating registration done by kernel | |
237 | */ | |
238 | if (kn->kn_flags & EV_FLAG1) { | |
239 | kn->kn_data = kn->kn_sdata; /* ppid */ | |
240 | kn->kn_fflags = NOTE_CHILD; | |
241 | kn->kn_flags &= ~EV_FLAG1; | |
242 | } | |
243 | ||
244 | /* XXX lock the proc here while adding to the list? */ | |
245 | KNOTE_ATTACH(&p->p_klist, kn); | |
246 | ||
247 | return (0); | |
248 | } | |
249 | ||
250 | /* | |
251 | * The knote may be attached to a different process, which may exit, | |
252 | * leaving nothing for the knote to be attached to. So when the process | |
253 | * exits, the knote is marked as DETACHED and also flagged as ONESHOT so | |
254 | * it will be deleted when read out. However, as part of the knote deletion, | |
255 | * this routine is called, so a check is needed to avoid actually performing | |
256 | * a detach, because the original process does not exist any more. | |
257 | */ | |
258 | static void | |
259 | filt_procdetach(struct knote *kn) | |
260 | { | |
261 | struct proc *p = kn->kn_ptr.p_proc; | |
262 | ||
263 | if (kn->kn_status & KN_DETACHED) | |
264 | return; | |
265 | ||
266 | /* XXX locking? this might modify another process. */ | |
267 | KNOTE_DETACH(&p->p_klist, kn); | |
268 | } | |
269 | ||
270 | static int | |
271 | filt_proc(struct knote *kn, long hint) | |
272 | { | |
273 | u_int event; | |
274 | ||
275 | /* | |
276 | * mask off extra data | |
277 | */ | |
278 | event = (u_int)hint & NOTE_PCTRLMASK; | |
279 | ||
280 | /* | |
281 | * if the user is interested in this event, record it. | |
282 | */ | |
283 | if (kn->kn_sfflags & event) | |
284 | kn->kn_fflags |= event; | |
285 | ||
286 | /* | |
287 | * process is gone, so flag the event as finished. | |
288 | */ | |
289 | if (event == NOTE_EXIT) { | |
290 | kn->kn_status |= KN_DETACHED; | |
291 | kn->kn_flags |= (EV_EOF | EV_ONESHOT); | |
292 | return (1); | |
293 | } | |
294 | ||
295 | /* | |
296 | * process forked, and user wants to track the new process, | |
297 | * so attach a new knote to it, and immediately report an | |
298 | * event with the parent's pid. | |
299 | */ | |
300 | if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) { | |
301 | struct kevent kev; | |
302 | int error; | |
303 | ||
304 | /* | |
305 | * register knote with new process. | |
306 | */ | |
307 | kev.ident = hint & NOTE_PDATAMASK; /* pid */ | |
308 | kev.filter = kn->kn_filter; | |
309 | kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; | |
310 | kev.fflags = kn->kn_sfflags; | |
311 | kev.data = kn->kn_id; /* parent */ | |
312 | kev.udata = kn->kn_kevent.udata; /* preserve udata */ | |
313 | error = kqueue_register(kn->kn_kq, &kev, NULL); | |
314 | if (error) | |
315 | kn->kn_fflags |= NOTE_TRACKERR; | |
316 | } | |
317 | ||
318 | return (kn->kn_fflags != 0); | |
319 | } | |
320 | ||
321 | #if 0 | |
322 | static void | |
323 | filt_timerexpire(void *knx) | |
324 | { | |
325 | struct knote *kn = knx; | |
326 | struct callout *calloutp; | |
327 | struct timeval tv; | |
328 | int tticks; | |
329 | ||
330 | kn->kn_data++; | |
331 | KNOTE_ACTIVATE(kn); | |
332 | ||
333 | if ((kn->kn_flags & EV_ONESHOT) == 0) { | |
334 | tv.tv_sec = kn->kn_sdata / 1000; | |
335 | tv.tv_usec = (kn->kn_sdata % 1000) * 1000; | |
336 | tticks = tvtohz(&tv); | |
337 | calloutp = (struct callout *)kn->kn_hook; | |
338 | callout_reset(calloutp, tticks, filt_timerexpire, kn); | |
339 | } | |
340 | } | |
341 | ||
342 | /* | |
343 | * data contains amount of time to sleep, in milliseconds | |
344 | */ | |
345 | static int | |
346 | filt_timerattach(struct knote *kn) | |
347 | { | |
348 | struct callout *calloutp; | |
349 | struct timeval tv; | |
350 | int tticks; | |
351 | ||
352 | if (kq_ncallouts >= kq_calloutmax) | |
353 | return (ENOMEM); | |
354 | kq_ncallouts++; | |
355 | ||
356 | tv.tv_sec = kn->kn_sdata / 1000; | |
357 | tv.tv_usec = (kn->kn_sdata % 1000) * 1000; | |
358 | tticks = tvtohz(&tv); | |
359 | ||
360 | kn->kn_flags |= EV_CLEAR; /* automatically set */ | |
361 | MALLOC(calloutp, struct callout *, sizeof(*calloutp), | |
362 | M_KQUEUE, M_WAITOK); | |
363 | callout_init(calloutp); | |
364 | callout_reset(calloutp, tticks, filt_timerexpire, kn); | |
365 | kn->kn_hook = (caddr_t)calloutp; | |
366 | ||
367 | return (0); | |
368 | } | |
369 | ||
370 | static void | |
371 | filt_timerdetach(struct knote *kn) | |
372 | { | |
373 | struct callout *calloutp; | |
374 | ||
375 | calloutp = (struct callout *)kn->kn_hook; | |
376 | callout_stop(calloutp); | |
377 | FREE(calloutp, M_KQUEUE); | |
378 | kq_ncallouts--; | |
379 | } | |
380 | ||
381 | static int | |
382 | filt_timer(struct knote *kn, long hint) | |
383 | { | |
384 | ||
385 | return (kn->kn_data != 0); | |
386 | } | |
387 | #endif /* 0 */ | |
388 | ||
389 | /* | |
390 | * JMM - placeholder for not-yet-implemented filters | |
391 | */ | |
392 | static int | |
393 | filt_badattach(struct knote *kn) | |
394 | { | |
395 | return(EOPNOTSUPP); | |
396 | } | |
397 | ||
398 | #ifndef _SYS_SYSPROTO_H_ | |
399 | struct kqueue_args { | |
400 | int dummy; | |
401 | }; | |
402 | #endif | |
403 | ||
404 | int | |
405 | kqueue(struct proc *p, struct kqueue_args *uap, register_t *retval) | |
406 | { | |
407 | struct filedesc *fdp = p->p_fd; | |
408 | struct kqueue *kq; | |
409 | struct file *fp; | |
410 | int fd, error; | |
411 | ||
412 | error = falloc(p, &fp, &fd); | |
413 | if (error) | |
414 | return (error); | |
415 | fp->f_flag = FREAD | FWRITE; | |
416 | fp->f_type = DTYPE_KQUEUE; | |
417 | fp->f_ops = &kqueueops; | |
418 | kq = (struct kqueue *)_MALLOC(sizeof(struct kqueue), M_KQUEUE, M_WAITOK | M_ZERO); | |
419 | TAILQ_INIT(&kq->kq_head); | |
420 | fp->f_data = (caddr_t)kq; | |
421 | *retval = fd; | |
422 | if (fdp->fd_knlistsize < 0) | |
423 | fdp->fd_knlistsize = 0; /* this process has a kq */ | |
424 | kq->kq_fdp = fdp; | |
425 | return (error); | |
426 | } | |
427 | ||
428 | #ifndef _SYS_SYSPROTO_H_ | |
429 | struct kqueue_portset_np_args { | |
430 | int fd; | |
431 | }; | |
432 | #endif | |
433 | int | |
434 | kqueue_portset_np(struct proc *p, struct kqueue_portset_np_args *uap, register_t *retval) | |
435 | { | |
436 | /* JMM - Placeholder for now */ | |
437 | return (EOPNOTSUPP); | |
438 | } | |
439 | ||
440 | #ifndef _SYS_SYSPROTO_H_ | |
441 | struct kqueue_from_portset_np_args { | |
442 | int fd; | |
443 | }; | |
444 | #endif | |
445 | int | |
446 | kqueue_from_portset_np(struct proc *p, struct kqueue_from_portset_np_args *uap, register_t *retval) | |
447 | { | |
448 | /* JMM - Placeholder for now */ | |
449 | return (EOPNOTSUPP); | |
450 | } | |
451 | ||
452 | #if !0 | |
453 | /* JMM - We don't implement this yet */ | |
454 | #define fhold(fp) | |
455 | #define fdrop(fp, p) | |
456 | #endif /* !0 */ | |
457 | ||
458 | #ifndef _SYS_SYSPROTO_H_ | |
459 | struct kevent_args { | |
460 | int fd; | |
461 | const struct kevent *changelist; | |
462 | int nchanges; | |
463 | struct kevent *eventlist; | |
464 | int nevents; | |
465 | const struct timespec *timeout; | |
466 | }; | |
467 | #endif | |
468 | int | |
469 | kevent(struct proc *p, struct kevent_args *uap, register_t *retval) | |
470 | { | |
471 | struct filedesc* fdp = p->p_fd; | |
472 | struct kqueue *kq; | |
473 | struct file *fp = NULL; | |
474 | struct timespec ts; | |
475 | int i, nerrors, error; | |
476 | ||
477 | if (uap->timeout != NULL) { | |
478 | error = copyin((caddr_t)uap->timeout, (caddr_t)&ts, sizeof(ts)); | |
479 | if (error) | |
480 | goto done; | |
481 | uap->timeout = &ts; | |
482 | } | |
483 | ||
484 | if (((u_int)uap->fd) >= fdp->fd_nfiles || | |
485 | (fp = fdp->fd_ofiles[uap->fd]) == NULL || | |
486 | (fp->f_type != DTYPE_KQUEUE)) | |
487 | return (EBADF); | |
488 | ||
489 | fhold(fp); | |
490 | ||
491 | kq = (struct kqueue *)fp->f_data; | |
492 | nerrors = 0; | |
493 | ||
494 | while (uap->nchanges > 0) { | |
495 | int i; | |
496 | int n = uap->nchanges > KQ_NEVENTS ? KQ_NEVENTS : uap->nchanges; | |
497 | struct kevent kq_kev[n]; | |
498 | ||
499 | error = copyin((caddr_t)uap->changelist, (caddr_t)kq_kev, | |
500 | n * sizeof(struct kevent)); | |
501 | if (error) | |
502 | goto done; | |
503 | for (i = 0; i < n; i++) { | |
504 | struct kevent *kevp = &kq_kev[i]; | |
505 | ||
506 | kevp->flags &= ~EV_SYSFLAGS; | |
507 | error = kqueue_register(kq, kevp, p); | |
508 | if (error) { | |
509 | if (uap->nevents != 0) { | |
510 | kevp->flags = EV_ERROR; | |
511 | kevp->data = error; | |
512 | (void) copyout((caddr_t)kevp, | |
513 | (caddr_t)uap->eventlist, | |
514 | sizeof(*kevp)); | |
515 | uap->eventlist++; | |
516 | uap->nevents--; | |
517 | nerrors++; | |
518 | } else { | |
519 | goto done; | |
520 | } | |
521 | } | |
522 | } | |
523 | uap->nchanges -= n; | |
524 | uap->changelist += n; | |
525 | } | |
526 | if (nerrors) { | |
527 | *retval = nerrors; | |
528 | error = 0; | |
529 | goto done; | |
530 | } | |
531 | ||
532 | error = kqueue_scan(fp, uap->nevents, uap->eventlist, uap->timeout, retval, p); | |
533 | done: | |
534 | if (fp != NULL) | |
535 | fdrop(fp, p); | |
536 | return (error); | |
537 | } | |
538 | ||
539 | int | |
540 | kqueue_register(struct kqueue *kq, struct kevent *kev, struct proc *p) | |
541 | { | |
542 | struct filedesc *fdp = kq->kq_fdp; | |
543 | struct filterops *fops; | |
544 | struct file *fp = NULL; | |
545 | struct knote *kn = NULL; | |
546 | int s, error = 0; | |
547 | ||
548 | if (kev->filter < 0) { | |
549 | if (kev->filter + EVFILT_SYSCOUNT < 0) | |
550 | return (EINVAL); | |
551 | fops = sysfilt_ops[~kev->filter]; /* to 0-base index */ | |
552 | } else { | |
553 | /* | |
554 | * XXX | |
555 | * filter attach routine is responsible for insuring that | |
556 | * the identifier can be attached to it. | |
557 | */ | |
558 | printf("unknown filter: %d\n", kev->filter); | |
559 | return (EINVAL); | |
560 | } | |
561 | ||
562 | if (fops->f_isfd) { | |
563 | /* validate descriptor */ | |
564 | if ((u_int)kev->ident >= fdp->fd_nfiles || | |
565 | (fp = fdp->fd_ofiles[kev->ident]) == NULL) | |
566 | return (EBADF); | |
567 | fhold(fp); | |
568 | ||
569 | if (kev->ident < fdp->fd_knlistsize) { | |
570 | SLIST_FOREACH(kn, &fdp->fd_knlist[kev->ident], kn_link) | |
571 | if (kq == kn->kn_kq && | |
572 | kev->filter == kn->kn_filter) | |
573 | break; | |
574 | } | |
575 | } else { | |
576 | if (fdp->fd_knhashmask != 0) { | |
577 | struct klist *list; | |
578 | ||
579 | list = &fdp->fd_knhash[ | |
580 | KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)]; | |
581 | SLIST_FOREACH(kn, list, kn_link) | |
582 | if (kev->ident == kn->kn_id && | |
583 | kq == kn->kn_kq && | |
584 | kev->filter == kn->kn_filter) | |
585 | break; | |
586 | } | |
587 | } | |
588 | ||
589 | if (kn == NULL && ((kev->flags & EV_ADD) == 0)) { | |
590 | error = ENOENT; | |
591 | goto done; | |
592 | } | |
593 | ||
594 | /* | |
595 | * kn now contains the matching knote, or NULL if no match | |
596 | */ | |
597 | if (kev->flags & EV_ADD) { | |
598 | ||
599 | if (kn == NULL) { | |
600 | kn = knote_alloc(); | |
601 | if (kn == NULL) { | |
602 | error = ENOMEM; | |
603 | goto done; | |
604 | } | |
605 | kn->kn_fp = fp; | |
606 | kn->kn_kq = kq; | |
607 | kn->kn_fop = fops; | |
608 | ||
609 | /* | |
610 | * apply reference count to knote structure, and | |
611 | * do not release it at the end of this routine. | |
612 | */ | |
613 | fp = NULL; | |
614 | ||
615 | kn->kn_sfflags = kev->fflags; | |
616 | kn->kn_sdata = kev->data; | |
617 | kev->fflags = 0; | |
618 | kev->data = 0; | |
619 | kn->kn_kevent = *kev; | |
620 | ||
621 | knote_fdpattach(kn, fdp); | |
622 | if ((error = fops->f_attach(kn)) != 0) { | |
623 | knote_drop(kn, p); | |
624 | goto done; | |
625 | } | |
626 | } else { | |
627 | /* | |
628 | * The user may change some filter values after the | |
629 | * initial EV_ADD, but doing so will not reset any | |
630 | * filter which have already been triggered. | |
631 | */ | |
632 | kn->kn_sfflags = kev->fflags; | |
633 | kn->kn_sdata = kev->data; | |
634 | kn->kn_kevent.udata = kev->udata; | |
635 | } | |
636 | ||
637 | s = splhigh(); | |
638 | if (kn->kn_fop->f_event(kn, 0)) | |
639 | KNOTE_ACTIVATE(kn); | |
640 | splx(s); | |
641 | ||
642 | } else if (kev->flags & EV_DELETE) { | |
643 | kn->kn_fop->f_detach(kn); | |
644 | knote_drop(kn, p); | |
645 | goto done; | |
646 | } | |
647 | ||
648 | if ((kev->flags & EV_DISABLE) && | |
649 | ((kn->kn_status & KN_DISABLED) == 0)) { | |
650 | s = splhigh(); | |
651 | kn->kn_status |= KN_DISABLED; | |
652 | splx(s); | |
653 | } | |
654 | ||
655 | if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) { | |
656 | s = splhigh(); | |
657 | kn->kn_status &= ~KN_DISABLED; | |
658 | if ((kn->kn_status & KN_ACTIVE) && | |
659 | ((kn->kn_status & KN_QUEUED) == 0)) | |
660 | knote_enqueue(kn); | |
661 | splx(s); | |
662 | } | |
663 | ||
664 | done: | |
665 | if (fp != NULL) | |
666 | fdrop(fp, p); | |
667 | return (error); | |
668 | } | |
669 | ||
670 | static int | |
671 | kqueue_scan(struct file *fp, int maxevents, struct kevent *ulistp, | |
672 | const struct timespec *tsp, register_t *retval, struct proc *p) | |
673 | { | |
674 | struct kqueue *kq = (struct kqueue *)fp->f_data; | |
675 | struct timeval atv, rtv, ttv; | |
676 | int s, count, timeout, error = 0; | |
677 | struct knote marker; | |
678 | ||
679 | count = maxevents; | |
680 | if (count == 0) | |
681 | goto done; | |
682 | ||
683 | if (tsp != NULL) { | |
684 | TIMESPEC_TO_TIMEVAL(&atv, tsp); | |
685 | if (itimerfix(&atv)) { | |
686 | error = EINVAL; | |
687 | goto done; | |
688 | } | |
689 | if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) | |
690 | timeout = -1; | |
691 | else | |
692 | timeout = atv.tv_sec > 24 * 60 * 60 ? | |
693 | 24 * 60 * 60 * hz : tvtohz(&atv); | |
694 | getmicrouptime(&rtv); | |
695 | timevaladd(&atv, &rtv); | |
696 | } else { | |
697 | atv.tv_sec = 0; | |
698 | atv.tv_usec = 0; | |
699 | timeout = 0; | |
700 | } | |
701 | goto start; | |
702 | ||
703 | retry: | |
704 | if (atv.tv_sec || atv.tv_usec) { | |
705 | getmicrouptime(&rtv); | |
706 | if (timevalcmp(&rtv, &atv, >=)) | |
707 | goto done; | |
708 | ttv = atv; | |
709 | timevalsub(&ttv, &rtv); | |
710 | timeout = ttv.tv_sec > 24 * 60 * 60 ? | |
711 | 24 * 60 * 60 * hz : tvtohz(&ttv); | |
712 | } | |
713 | ||
714 | start: | |
715 | s = splhigh(); | |
716 | if (kq->kq_count == 0) { | |
717 | if (timeout < 0) { | |
718 | error = EWOULDBLOCK; | |
719 | } else { | |
720 | kq->kq_state |= KQ_SLEEP; | |
721 | error = tsleep(kq, PSOCK | PCATCH, "kqread", timeout); | |
722 | } | |
723 | splx(s); | |
724 | if (error == 0) | |
725 | goto retry; | |
726 | /* don't restart after signals... */ | |
727 | if (error == ERESTART) | |
728 | error = EINTR; | |
729 | else if (error == EWOULDBLOCK) | |
730 | error = 0; | |
731 | goto done; | |
732 | } | |
733 | ||
734 | /* JMM - This marker trick doesn't work with multiple threads */ | |
735 | TAILQ_INSERT_TAIL(&kq->kq_head, &marker, kn_tqe); | |
736 | while (count) { | |
737 | int maxkev = (count > KQ_NEVENTS) ? KQ_NEVENTS : count; | |
738 | struct kevent kq_kev[maxkev]; | |
739 | struct kevent *kevp = kq_kev; | |
740 | struct knote *kn; | |
741 | int nkev = 0; | |
742 | ||
743 | while (nkev < maxkev) { | |
744 | kn = TAILQ_FIRST(&kq->kq_head); | |
745 | TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); | |
746 | if (kn == &marker) { | |
747 | if (count == maxevents) | |
748 | goto retry; | |
749 | break; | |
750 | } else if (kn->kn_status & KN_DISABLED) { | |
751 | kn->kn_status &= ~KN_QUEUED; | |
752 | kq->kq_count--; | |
753 | continue; | |
754 | } else if ((kn->kn_flags & EV_ONESHOT) == 0 && | |
755 | kn->kn_fop->f_event(kn, 0) == 0) { | |
756 | kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); | |
757 | kq->kq_count--; | |
758 | continue; | |
759 | } | |
760 | ||
761 | *kevp = kn->kn_kevent; | |
762 | kevp++; | |
763 | nkev++; | |
764 | count--; | |
765 | ||
766 | if (kn->kn_flags & EV_ONESHOT) { | |
767 | kn->kn_status &= ~KN_QUEUED; | |
768 | kq->kq_count--; | |
769 | splx(s); | |
770 | kn->kn_fop->f_detach(kn); | |
771 | knote_drop(kn, p); | |
772 | s = splhigh(); | |
773 | } else if (kn->kn_flags & EV_CLEAR) { | |
774 | kn->kn_data = 0; | |
775 | kn->kn_fflags = 0; | |
776 | kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); | |
777 | kq->kq_count--; | |
778 | } else { | |
779 | TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); | |
780 | } | |
781 | } | |
782 | splx(s); | |
783 | error = copyout((caddr_t)kq_kev, (caddr_t)ulistp, | |
784 | sizeof(struct kevent) * nkev); | |
785 | if (kn == &marker) | |
786 | goto done; | |
787 | ulistp += nkev; | |
788 | s = splhigh(); | |
789 | if (error) | |
790 | break; | |
791 | } | |
792 | TAILQ_REMOVE(&kq->kq_head, &marker, kn_tqe); | |
793 | splx(s); | |
794 | done: | |
795 | *retval = maxevents - count; | |
796 | return (error); | |
797 | } | |
798 | ||
799 | /* | |
800 | * XXX | |
801 | * This could be expanded to call kqueue_scan, if desired. | |
802 | */ | |
803 | /*ARGSUSED*/ | |
804 | static int | |
805 | kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred, | |
806 | int flags, struct proc *p) | |
807 | { | |
808 | return (ENXIO); | |
809 | } | |
810 | ||
811 | /*ARGSUSED*/ | |
812 | static int | |
813 | kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred, | |
814 | int flags, struct proc *p) | |
815 | { | |
816 | return (ENXIO); | |
817 | } | |
818 | ||
819 | /*ARGSUSED*/ | |
820 | static int | |
821 | kqueue_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p) | |
822 | { | |
823 | return (ENOTTY); | |
824 | } | |
825 | ||
826 | /*ARGSUSED*/ | |
827 | static int | |
828 | kqueue_select(struct file *fp, int which, void *wql, struct proc *p) | |
829 | { | |
830 | struct kqueue *kq = (struct kqueue *)fp->f_data; | |
831 | int retnum = 0; | |
832 | int s = splnet(); | |
833 | ||
834 | if (which == FREAD) { | |
835 | if (kq->kq_count) { | |
836 | retnum = 1; | |
837 | } else { | |
838 | selrecord(p, &kq->kq_sel, wql); | |
839 | kq->kq_state |= KQ_SEL; | |
840 | } | |
841 | } | |
842 | splx(s); | |
843 | return (retnum); | |
844 | } | |
845 | ||
846 | /*ARGSUSED*/ | |
847 | static int | |
848 | kqueue_close(struct file *fp, struct proc *p) | |
849 | { | |
850 | struct kqueue *kq = (struct kqueue *)fp->f_data; | |
851 | struct filedesc *fdp = p->p_fd; | |
852 | struct knote **knp, *kn, *kn0; | |
853 | int i; | |
854 | ||
855 | for (i = 0; i < fdp->fd_knlistsize; i++) { | |
856 | knp = &SLIST_FIRST(&fdp->fd_knlist[i]); | |
857 | kn = *knp; | |
858 | while (kn != NULL) { | |
859 | kn0 = SLIST_NEXT(kn, kn_link); | |
860 | if (kq == kn->kn_kq) { | |
861 | kn->kn_fop->f_detach(kn); | |
862 | fdrop(kn->kn_fp, p); | |
863 | knote_free(kn); | |
864 | *knp = kn0; | |
865 | } else { | |
866 | knp = &SLIST_NEXT(kn, kn_link); | |
867 | } | |
868 | kn = kn0; | |
869 | } | |
870 | } | |
871 | if (fdp->fd_knhashmask != 0) { | |
872 | for (i = 0; i < fdp->fd_knhashmask + 1; i++) { | |
873 | knp = &SLIST_FIRST(&fdp->fd_knhash[i]); | |
874 | kn = *knp; | |
875 | while (kn != NULL) { | |
876 | kn0 = SLIST_NEXT(kn, kn_link); | |
877 | if (kq == kn->kn_kq) { | |
878 | kn->kn_fop->f_detach(kn); | |
879 | /* XXX non-fd release of kn->kn_ptr */ | |
880 | knote_free(kn); | |
881 | *knp = kn0; | |
882 | } else { | |
883 | knp = &SLIST_NEXT(kn, kn_link); | |
884 | } | |
885 | kn = kn0; | |
886 | } | |
887 | } | |
888 | } | |
889 | _FREE(kq, M_KQUEUE); | |
890 | fp->f_data = NULL; | |
891 | ||
892 | return (0); | |
893 | } | |
894 | ||
895 | /*ARGSUSED*/ | |
896 | static int | |
897 | kqueue_kqfilter(struct file *fp, struct knote *kn, struct proc *p) | |
898 | { | |
899 | struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; | |
900 | ||
901 | if (kn->kn_filter != EVFILT_READ || (kq->kq_state & KQ_SEL)) | |
902 | return (1); | |
903 | ||
904 | kn->kn_fop = &kqread_filtops; | |
905 | KNOTE_ATTACH(&kq->kq_sel.si_note, kn); | |
906 | return (0); | |
907 | } | |
908 | ||
909 | /*ARGSUSED*/ | |
910 | int | |
911 | kqueue_stat(struct file *fp, struct stat *st, struct proc *p) | |
912 | { | |
913 | struct kqueue *kq = (struct kqueue *)fp->f_data; | |
914 | ||
915 | bzero((void *)st, sizeof(*st)); | |
916 | st->st_size = kq->kq_count; | |
917 | st->st_blksize = sizeof(struct kevent); | |
918 | st->st_mode = S_IFIFO; | |
919 | return (0); | |
920 | } | |
921 | ||
922 | static void | |
923 | kqueue_wakeup(struct kqueue *kq) | |
924 | { | |
925 | ||
926 | if (kq->kq_state & KQ_SLEEP) { | |
927 | kq->kq_state &= ~KQ_SLEEP; | |
928 | wakeup(kq); | |
929 | } | |
930 | if (kq->kq_state & KQ_SEL) { | |
931 | // kq->kq_state &= ~KQ_SEL; /* remove for now */ | |
932 | selwakeup(&kq->kq_sel); | |
933 | } else | |
934 | KNOTE(&kq->kq_sel.si_note, 0); | |
935 | } | |
936 | ||
937 | void | |
938 | klist_init(struct klist *list) | |
939 | { | |
940 | SLIST_INIT(list); | |
941 | } | |
942 | ||
943 | /* | |
944 | * walk down a list of knotes, activating them if their event has triggered. | |
945 | */ | |
946 | void | |
947 | knote(struct klist *list, long hint) | |
948 | { | |
949 | struct knote *kn; | |
950 | ||
951 | SLIST_FOREACH(kn, list, kn_selnext) | |
952 | if (kn->kn_fop->f_event(kn, hint)) | |
953 | KNOTE_ACTIVATE(kn); | |
954 | } | |
955 | ||
956 | /* | |
957 | * attach a knote to the specified list. Return true if this is the first entry. | |
958 | */ | |
959 | int | |
960 | knote_attach(struct klist *list, struct knote *kn) | |
961 | { | |
962 | int ret = SLIST_EMPTY(list); | |
963 | SLIST_INSERT_HEAD(list, kn, kn_selnext); | |
964 | return ret; | |
965 | } | |
966 | ||
967 | /* | |
968 | * detach a knote from the specified list. Return true if that was the last entry. | |
969 | */ | |
970 | int | |
971 | knote_detach(struct klist *list, struct knote *kn) | |
972 | { | |
973 | SLIST_REMOVE(list, kn, knote, kn_selnext); | |
974 | return SLIST_EMPTY(list); | |
975 | } | |
976 | ||
977 | /* | |
978 | * remove all knotes from a specified klist | |
979 | */ | |
980 | void | |
981 | knote_remove(struct proc *p, struct klist *list) | |
982 | { | |
983 | struct knote *kn; | |
984 | ||
985 | while ((kn = SLIST_FIRST(list)) != NULL) { | |
986 | kn->kn_fop->f_detach(kn); | |
987 | knote_drop(kn, p); | |
988 | } | |
989 | } | |
990 | ||
991 | /* | |
992 | * remove all knotes referencing a specified fd | |
993 | */ | |
994 | void | |
995 | knote_fdclose(struct proc *p, int fd) | |
996 | { | |
997 | struct filedesc *fdp = p->p_fd; | |
998 | struct klist *list = &fdp->fd_knlist[fd]; | |
999 | ||
1000 | knote_remove(p, list); | |
1001 | } | |
1002 | ||
1003 | static void | |
1004 | knote_fdpattach(struct knote *kn, struct filedesc *fdp) | |
1005 | { | |
1006 | struct klist *list; | |
1007 | int size; | |
1008 | ||
1009 | if (! kn->kn_fop->f_isfd) { | |
1010 | if (fdp->fd_knhashmask == 0) | |
1011 | fdp->fd_knhash = hashinit(KN_HASHSIZE, M_KQUEUE, | |
1012 | &fdp->fd_knhashmask); | |
1013 | list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)]; | |
1014 | goto done; | |
1015 | } | |
1016 | ||
1017 | if (fdp->fd_knlistsize <= kn->kn_id) { | |
1018 | size = fdp->fd_knlistsize; | |
1019 | while (size <= kn->kn_id) | |
1020 | size += KQEXTENT; | |
1021 | MALLOC(list, struct klist *, | |
1022 | size * sizeof(struct klist *), M_KQUEUE, M_WAITOK); | |
1023 | bcopy((caddr_t)fdp->fd_knlist, (caddr_t)list, | |
1024 | fdp->fd_knlistsize * sizeof(struct klist *)); | |
1025 | bzero((caddr_t)list + | |
1026 | fdp->fd_knlistsize * sizeof(struct klist *), | |
1027 | (size - fdp->fd_knlistsize) * sizeof(struct klist *)); | |
1028 | if (fdp->fd_knlist != NULL) | |
1029 | FREE(fdp->fd_knlist, M_KQUEUE); | |
1030 | fdp->fd_knlistsize = size; | |
1031 | fdp->fd_knlist = list; | |
1032 | } | |
1033 | list = &fdp->fd_knlist[kn->kn_id]; | |
1034 | done: | |
1035 | SLIST_INSERT_HEAD(list, kn, kn_link); | |
1036 | kn->kn_status = 0; | |
1037 | } | |
1038 | ||
1039 | /* | |
1040 | * should be called at spl == 0, since we don't want to hold spl | |
1041 | * while calling fdrop and free. | |
1042 | */ | |
1043 | static void | |
1044 | knote_drop(struct knote *kn, struct proc *p) | |
1045 | { | |
1046 | struct filedesc *fdp = p->p_fd; | |
1047 | struct klist *list; | |
1048 | ||
1049 | if (kn->kn_fop->f_isfd) | |
1050 | list = &fdp->fd_knlist[kn->kn_id]; | |
1051 | else | |
1052 | list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)]; | |
1053 | ||
1054 | SLIST_REMOVE(list, kn, knote, kn_link); | |
1055 | if (kn->kn_status & KN_QUEUED) | |
1056 | knote_dequeue(kn); | |
1057 | if (kn->kn_fop->f_isfd) | |
1058 | fdrop(kn->kn_fp, p); | |
1059 | knote_free(kn); | |
1060 | } | |
1061 | ||
1062 | ||
1063 | static void | |
1064 | knote_enqueue(struct knote *kn) | |
1065 | { | |
1066 | struct kqueue *kq = kn->kn_kq; | |
1067 | int s = splhigh(); | |
1068 | ||
1069 | KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); | |
1070 | ||
1071 | TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); | |
1072 | kn->kn_status |= KN_QUEUED; | |
1073 | kq->kq_count++; | |
1074 | splx(s); | |
1075 | kqueue_wakeup(kq); | |
1076 | } | |
1077 | ||
1078 | static void | |
1079 | knote_dequeue(struct knote *kn) | |
1080 | { | |
1081 | struct kqueue *kq = kn->kn_kq; | |
1082 | int s = splhigh(); | |
1083 | ||
1084 | KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); | |
1085 | ||
1086 | TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); | |
1087 | kn->kn_status &= ~KN_QUEUED; | |
1088 | kq->kq_count--; | |
1089 | splx(s); | |
1090 | } | |
1091 | ||
1092 | void | |
1093 | knote_init(void) | |
1094 | { | |
1095 | knote_zone = zinit(sizeof(struct knote), 8192*sizeof(struct knote), 8192, "knote zone"); | |
1096 | } | |
1097 | SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL) | |
1098 | ||
1099 | static struct knote * | |
1100 | knote_alloc(void) | |
1101 | { | |
1102 | return ((struct knote *)zalloc(knote_zone)); | |
1103 | } | |
1104 | ||
1105 | static void | |
1106 | knote_free(struct knote *kn) | |
1107 | { | |
1108 | zfree(knote_zone, (vm_offset_t)kn); | |
1109 | } | |
1110 | ||
1c79356b A |
1111 | #include <sys/param.h> |
1112 | #include <sys/socket.h> | |
1113 | #include <sys/protosw.h> | |
1114 | #include <sys/domain.h> | |
1115 | #include <sys/mbuf.h> | |
1116 | #include <sys/kern_event.h> | |
1117 | #include <sys/malloc.h> | |
9bccf70c A |
1118 | #include <sys/sys_domain.h> |
1119 | #include <sys/syslog.h> | |
1c79356b A |
1120 | |
1121 | ||
1122 | int raw_usrreq(); | |
1123 | struct pr_usrreqs event_usrreqs; | |
1124 | ||
1125 | struct protosw eventsw[] = { | |
1126 | { | |
1127 | SOCK_RAW, &systemdomain, SYSPROTO_EVENT, PR_ATOMIC, | |
1128 | 0, 0, 0, 0, | |
1129 | 0, | |
1130 | 0, 0, 0, 0, | |
1131 | 0, &event_usrreqs | |
1132 | } | |
1133 | }; | |
1134 | ||
1135 | static | |
1136 | struct kern_event_head kern_event_head; | |
1137 | ||
1138 | static u_long static_event_id = 0; | |
1139 | ||
9bccf70c A |
1140 | /* |
1141 | * Install the protosw's for the NKE manager. Invoked at | |
1142 | * extension load time | |
1143 | */ | |
1144 | int | |
1145 | kern_event_init(void) | |
1146 | { | |
1147 | int retval; | |
1148 | ||
1149 | if ((retval = net_add_proto(eventsw, &systemdomain)) == 0) | |
1150 | return(KERN_SUCCESS); | |
1151 | ||
1152 | log(LOG_WARNING, "Can't install kernel events protocol (%d)\n", retval); | |
1153 | return(retval); | |
1154 | } | |
1155 | ||
1c79356b A |
1156 | int kev_attach(struct socket *so, int proto, struct proc *p) |
1157 | { | |
1158 | int error; | |
1159 | struct kern_event_pcb *ev_pcb; | |
1160 | ||
55e303ae A |
1161 | error = soreserve(so, KEV_SNDSPACE, KEV_RECVSPACE); |
1162 | if (error) | |
1163 | return error; | |
1164 | ||
1c79356b A |
1165 | ev_pcb = _MALLOC(sizeof(struct kern_event_pcb), M_PCB, M_WAITOK); |
1166 | if (ev_pcb == 0) | |
1167 | return ENOBUFS; | |
1168 | ||
1169 | ev_pcb->ev_socket = so; | |
1170 | ev_pcb->vendor_code_filter = 0xffffffff; | |
1171 | ||
1172 | so->so_pcb = (caddr_t) ev_pcb; | |
1173 | LIST_INSERT_HEAD(&kern_event_head, ev_pcb, ev_link); | |
1c79356b A |
1174 | |
1175 | return 0; | |
1176 | } | |
1177 | ||
1178 | ||
1179 | int kev_detach(struct socket *so) | |
1180 | { | |
1181 | struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *) so->so_pcb; | |
1182 | ||
55e303ae A |
1183 | if (ev_pcb != 0) { |
1184 | LIST_REMOVE(ev_pcb, ev_link); | |
1185 | FREE(ev_pcb, M_PCB); | |
1186 | so->so_pcb = 0; | |
1187 | } | |
1c79356b A |
1188 | |
1189 | return 0; | |
1190 | } | |
1191 | ||
1192 | ||
1193 | int kev_post_msg(struct kev_msg *event_msg) | |
1194 | { | |
1195 | struct mbuf *m, *m2; | |
1196 | struct kern_event_pcb *ev_pcb; | |
1197 | struct kern_event_msg *ev; | |
1198 | char *tmp; | |
1199 | int total_size; | |
1200 | int i; | |
1201 | ||
1202 | ||
1203 | m = m_get(M_DONTWAIT, MT_DATA); | |
1204 | if (m == 0) | |
1205 | return ENOBUFS; | |
1206 | ||
1207 | ev = mtod(m, struct kern_event_msg *); | |
1208 | total_size = KEV_MSG_HEADER_SIZE; | |
1209 | ||
1210 | tmp = (char *) &ev->event_data[0]; | |
1211 | for (i = 0; i < 5; i++) { | |
1212 | if (event_msg->dv[i].data_length == 0) | |
1213 | break; | |
1214 | ||
1215 | total_size += event_msg->dv[i].data_length; | |
1216 | bcopy(event_msg->dv[i].data_ptr, tmp, | |
1217 | event_msg->dv[i].data_length); | |
1218 | tmp += event_msg->dv[i].data_length; | |
1219 | } | |
1220 | ||
1221 | ||
1222 | ev->id = ++static_event_id; | |
1223 | ev->total_size = total_size; | |
1224 | ev->vendor_code = event_msg->vendor_code; | |
1225 | ev->kev_class = event_msg->kev_class; | |
1226 | ev->kev_subclass = event_msg->kev_subclass; | |
1227 | ev->event_code = event_msg->event_code; | |
1228 | ||
1229 | m->m_len = total_size; | |
1c79356b A |
1230 | for (ev_pcb = LIST_FIRST(&kern_event_head); |
1231 | ev_pcb; | |
1232 | ev_pcb = LIST_NEXT(ev_pcb, ev_link)) { | |
1233 | ||
1234 | if (ev_pcb->vendor_code_filter != KEV_ANY_VENDOR) { | |
1235 | if (ev_pcb->vendor_code_filter != ev->vendor_code) | |
1236 | continue; | |
1237 | ||
1238 | if (ev_pcb->class_filter != KEV_ANY_CLASS) { | |
1239 | if (ev_pcb->class_filter != ev->kev_class) | |
1240 | continue; | |
1241 | ||
1242 | if ((ev_pcb->subclass_filter != KEV_ANY_SUBCLASS) && | |
1243 | (ev_pcb->subclass_filter != ev->kev_subclass)) | |
1244 | continue; | |
1245 | } | |
1246 | } | |
1247 | ||
1248 | m2 = m_copym(m, 0, m->m_len, M_NOWAIT); | |
1249 | if (m2 == 0) { | |
1250 | m_free(m); | |
1251 | return ENOBUFS; | |
1252 | } | |
1253 | ||
1254 | sbappendrecord(&ev_pcb->ev_socket->so_rcv, m2); | |
1255 | sorwakeup(ev_pcb->ev_socket); | |
1256 | } | |
1257 | ||
1258 | ||
1259 | m_free(m); | |
1260 | return 0; | |
1261 | } | |
1262 | ||
1263 | ||
1264 | int kev_control(so, cmd, data, ifp, p) | |
1265 | struct socket *so; | |
1266 | u_long cmd; | |
1267 | caddr_t data; | |
1268 | register struct ifnet *ifp; | |
1269 | struct proc *p; | |
1270 | { | |
1271 | struct kev_request *kev_req = (struct kev_request *) data; | |
1272 | int stat = 0; | |
1273 | struct kern_event_pcb *ev_pcb; | |
1274 | u_long *id_value = (u_long *) data; | |
1275 | ||
1276 | ||
1277 | switch (cmd) { | |
1278 | ||
1279 | case SIOCGKEVID: | |
1280 | *id_value = static_event_id; | |
1281 | break; | |
1282 | ||
1283 | case SIOCSKEVFILT: | |
1284 | ev_pcb = (struct kern_event_pcb *) so->so_pcb; | |
1285 | ev_pcb->vendor_code_filter = kev_req->vendor_code; | |
1286 | ev_pcb->class_filter = kev_req->kev_class; | |
1287 | ev_pcb->subclass_filter = kev_req->kev_subclass; | |
1288 | break; | |
1289 | ||
1290 | case SIOCGKEVFILT: | |
1291 | ev_pcb = (struct kern_event_pcb *) so->so_pcb; | |
1292 | kev_req->vendor_code = ev_pcb->vendor_code_filter; | |
1293 | kev_req->kev_class = ev_pcb->class_filter; | |
1294 | kev_req->kev_subclass = ev_pcb->subclass_filter; | |
1295 | break; | |
1296 | ||
1297 | default: | |
1298 | return EOPNOTSUPP; | |
1299 | } | |
1300 | ||
1301 | return 0; | |
1302 | } | |
1303 | ||
1304 | ||
1305 | struct pr_usrreqs event_usrreqs = { | |
1306 | pru_abort_notsupp, pru_accept_notsupp, kev_attach, pru_bind_notsupp, pru_connect_notsupp, | |
1307 | pru_connect2_notsupp, kev_control, kev_detach, pru_disconnect_notsupp, | |
1308 | pru_listen_notsupp, pru_peeraddr_notsupp, pru_rcvd_notsupp, pru_rcvoob_notsupp, | |
1309 | pru_send_notsupp, pru_sense_null, pru_shutdown_notsupp, pru_sockaddr_notsupp, | |
1310 | pru_sosend_notsupp, soreceive, sopoll | |
1311 | }; | |
1312 | ||
1313 | ||
1314 |