]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_control.c
723cda78ea612c1c8d9464be6d475459bf275394
[apple/xnu.git] / bsd / kern / kern_control.c
1 /*
2 * Copyright (c) 1999-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Kernel Control domain - allows control connections to
31 * and to read/write data.
32 *
33 * Vincent Lubet, 040506
34 * Christophe Allie, 010928
35 * Justin C. Walker, 990319
36 */
37
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/syslog.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/protosw.h>
45 #include <sys/domain.h>
46 #include <sys/malloc.h>
47 #include <sys/mbuf.h>
48 #include <sys/sys_domain.h>
49 #include <sys/kern_event.h>
50 #include <sys/kern_control.h>
51 #include <net/if_var.h>
52
53 #include <mach/vm_types.h>
54 #include <mach/kmod.h>
55
56 #include <kern/thread.h>
57
58 /*
59 * Definitions and vars for we support
60 */
61
62 #define CTL_SENDSIZE (2 * 1024) /* default buffer size */
63 #define CTL_RECVSIZE (8 * 1024) /* default buffer size */
64
65 /*
66 * Definitions and vars for we support
67 */
68
69 static u_int32_t ctl_last_id = 0;
70 static u_int32_t ctl_max = 256;
71 static u_int32_t ctl_maxunit = 65536;
72 static lck_grp_attr_t *ctl_lck_grp_attr = 0;
73 static lck_attr_t *ctl_lck_attr = 0;
74 static lck_grp_t *ctl_lck_grp = 0;
75 static lck_mtx_t *ctl_mtx;
76
77 /*
78 * internal structure maintained for each register controller
79 */
80
81 struct ctl_cb;
82
83 struct kctl
84 {
85 TAILQ_ENTRY(kctl) next; /* controller chain */
86
87 /* controller information provided when registering */
88 char name[MAX_KCTL_NAME]; /* unique nke identifier, provided by DTS */
89 u_int32_t id;
90 u_int32_t reg_unit;
91
92 /* misc communication information */
93 u_int32_t flags; /* support flags */
94 u_int32_t recvbufsize; /* request more than the default buffer size */
95 u_int32_t sendbufsize; /* request more than the default buffer size */
96
97 /* Dispatch functions */
98 ctl_connect_func connect; /* Make contact */
99 ctl_disconnect_func disconnect; /* Break contact */
100 ctl_send_func send; /* Send data to nke */
101 ctl_setopt_func setopt; /* set kctl configuration */
102 ctl_getopt_func getopt; /* get kctl configuration */
103
104 TAILQ_HEAD(, ctl_cb) kcb_head;
105 u_int32_t lastunit;
106 };
107
108 struct ctl_cb {
109 TAILQ_ENTRY(ctl_cb) next; /* controller chain */
110 lck_mtx_t *mtx;
111 struct socket *so; /* controlling socket */
112 struct kctl *kctl; /* back pointer to controller */
113 u_int32_t unit;
114 void *userdata;
115 };
116
117 /* all the controllers are chained */
118 TAILQ_HEAD(, kctl) ctl_head;
119
120 static int ctl_attach(struct socket *, int, struct proc *);
121 static int ctl_detach(struct socket *);
122 static int ctl_sofreelastref(struct socket *so);
123 static int ctl_connect(struct socket *, struct sockaddr *, struct proc *);
124 static int ctl_disconnect(struct socket *);
125 static int ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
126 struct ifnet *ifp, struct proc *p);
127 static int ctl_send(struct socket *, int, struct mbuf *,
128 struct sockaddr *, struct mbuf *, struct proc *);
129 static int ctl_ctloutput(struct socket *, struct sockopt *);
130 static int ctl_peeraddr(struct socket *so, struct sockaddr **nam);
131
132 static struct kctl *ctl_find_by_id(u_int32_t);
133 static struct kctl *ctl_find_by_name(const char *);
134 static struct kctl *ctl_find_by_id_unit(u_int32_t id, u_int32_t unit);
135
136 static struct ctl_cb *kcb_find(struct kctl *, u_int32_t unit);
137 static void ctl_post_msg(u_long event_code, u_int32_t id);
138
139 static int ctl_lock(struct socket *, int, int);
140 static int ctl_unlock(struct socket *, int, int);
141 static lck_mtx_t * ctl_getlock(struct socket *, int);
142
143 static struct pr_usrreqs ctl_usrreqs =
144 {
145 pru_abort_notsupp, pru_accept_notsupp, ctl_attach, pru_bind_notsupp,
146 ctl_connect, pru_connect2_notsupp, ctl_ioctl, ctl_detach,
147 ctl_disconnect, pru_listen_notsupp, ctl_peeraddr,
148 pru_rcvd_notsupp, pru_rcvoob_notsupp, ctl_send,
149 pru_sense_null, pru_shutdown_notsupp, pru_sockaddr_notsupp,
150 sosend, soreceive, pru_sopoll_notsupp
151 };
152
153 static struct protosw kctlswk_dgram =
154 {
155 SOCK_DGRAM, &systemdomain, SYSPROTO_CONTROL,
156 PR_ATOMIC|PR_CONNREQUIRED|PR_PCBLOCK,
157 NULL, NULL, NULL, ctl_ctloutput,
158 NULL, NULL,
159 NULL, NULL, NULL, NULL, &ctl_usrreqs,
160 ctl_lock, ctl_unlock, ctl_getlock, { 0, 0 } , 0, { 0 }
161 };
162
163 static struct protosw kctlswk_stream =
164 {
165 SOCK_STREAM, &systemdomain, SYSPROTO_CONTROL,
166 PR_CONNREQUIRED|PR_PCBLOCK,
167 NULL, NULL, NULL, ctl_ctloutput,
168 NULL, NULL,
169 NULL, NULL, NULL, NULL, &ctl_usrreqs,
170 ctl_lock, ctl_unlock, ctl_getlock, { 0, 0 } , 0, { 0 }
171 };
172
173
174 /*
175 * Install the protosw's for the Kernel Control manager.
176 */
177 __private_extern__ int
178 kern_control_init(void)
179 {
180 int error = 0;
181
182 ctl_lck_grp_attr = lck_grp_attr_alloc_init();
183 if (ctl_lck_grp_attr == 0) {
184 printf(": lck_grp_attr_alloc_init failed\n");
185 error = ENOMEM;
186 goto done;
187 }
188 lck_grp_attr_setdefault(ctl_lck_grp_attr);
189
190 ctl_lck_grp = lck_grp_alloc_init("Kernel Control Protocol", ctl_lck_grp_attr);
191 if (ctl_lck_grp == 0) {
192 printf("kern_control_init: lck_grp_alloc_init failed\n");
193 error = ENOMEM;
194 goto done;
195 }
196
197 ctl_lck_attr = lck_attr_alloc_init();
198 if (ctl_lck_attr == 0) {
199 printf("kern_control_init: lck_attr_alloc_init failed\n");
200 error = ENOMEM;
201 goto done;
202 }
203 lck_attr_setdefault(ctl_lck_attr);
204
205 ctl_mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
206 if (ctl_mtx == 0) {
207 printf("kern_control_init: lck_mtx_alloc_init failed\n");
208 error = ENOMEM;
209 goto done;
210 }
211 TAILQ_INIT(&ctl_head);
212
213 error = net_add_proto(&kctlswk_dgram, &systemdomain);
214 if (error) {
215 log(LOG_WARNING, "kern_control_init: net_add_proto dgram failed (%d)\n", error);
216 }
217 error = net_add_proto(&kctlswk_stream, &systemdomain);
218 if (error) {
219 log(LOG_WARNING, "kern_control_init: net_add_proto stream failed (%d)\n", error);
220 }
221
222 done:
223 if (error != 0) {
224 if (ctl_mtx) {
225 lck_mtx_free(ctl_mtx, ctl_lck_grp);
226 ctl_mtx = 0;
227 }
228 if (ctl_lck_grp) {
229 lck_grp_free(ctl_lck_grp);
230 ctl_lck_grp = 0;
231 }
232 if (ctl_lck_grp_attr) {
233 lck_grp_attr_free(ctl_lck_grp_attr);
234 ctl_lck_grp_attr = 0;
235 }
236 if (ctl_lck_attr) {
237 lck_attr_free(ctl_lck_attr);
238 ctl_lck_attr = 0;
239 }
240 }
241 return error;
242 }
243
244 static void
245 kcb_delete(struct ctl_cb *kcb)
246 {
247 if (kcb != 0) {
248 if (kcb->mtx != 0)
249 lck_mtx_free(kcb->mtx, ctl_lck_grp);
250 FREE(kcb, M_TEMP);
251 }
252 }
253
254
255 /*
256 * Kernel Controller user-request functions
257 * attach function must exist and succeed
258 * detach not necessary
259 * we need a pcb for the per socket mutex
260 */
261 static int
262 ctl_attach(__unused struct socket *so, __unused int proto, __unused struct proc *p)
263 {
264 int error = 0;
265 struct ctl_cb *kcb = 0;
266
267 MALLOC(kcb, struct ctl_cb *, sizeof(struct ctl_cb), M_TEMP, M_WAITOK);
268 if (kcb == NULL) {
269 error = ENOMEM;
270 goto quit;
271 }
272 bzero(kcb, sizeof(struct ctl_cb));
273
274 kcb->mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
275 if (kcb->mtx == NULL) {
276 error = ENOMEM;
277 goto quit;
278 }
279 kcb->so = so;
280 so->so_pcb = (caddr_t)kcb;
281
282 quit:
283 if (error != 0) {
284 kcb_delete(kcb);
285 kcb = 0;
286 }
287 return error;
288 }
289
290 static int
291 ctl_sofreelastref(struct socket *so)
292 {
293 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
294
295 so->so_pcb = 0;
296
297 if (kcb != 0) {
298 struct kctl *kctl;
299 if ((kctl = kcb->kctl) != 0) {
300 lck_mtx_lock(ctl_mtx);
301 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
302 lck_mtx_lock(ctl_mtx);
303 }
304 kcb_delete(kcb);
305 }
306 return 0;
307 }
308
309 static int
310 ctl_detach(struct socket *so)
311 {
312 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
313
314 if (kcb == 0)
315 return 0;
316
317 soisdisconnected(so);
318 so->so_flags |= SOF_PCBCLEARING;
319 return 0;
320 }
321
322
323 static int
324 ctl_connect(struct socket *so, struct sockaddr *nam, __unused struct proc *p)
325 {
326 struct kctl *kctl;
327 int error = 0;
328 struct sockaddr_ctl sa;
329 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
330
331 if (kcb == 0)
332 panic("ctl_connect so_pcb null\n");
333
334 if (nam->sa_len != sizeof(struct sockaddr_ctl))
335 return(EINVAL);
336
337 bcopy(nam, &sa, sizeof(struct sockaddr_ctl));
338
339 lck_mtx_lock(ctl_mtx);
340 kctl = ctl_find_by_id_unit(sa.sc_id, sa.sc_unit);
341 if (kctl == NULL) {
342 lck_mtx_unlock(ctl_mtx);
343 return ENOENT;
344 }
345
346 if (((kctl->flags & CTL_FLAG_REG_SOCK_STREAM) && (so->so_type != SOCK_STREAM)) ||
347 (!(kctl->flags & CTL_FLAG_REG_SOCK_STREAM) && (so->so_type != SOCK_DGRAM))) {
348 lck_mtx_unlock(ctl_mtx);
349 return EPROTOTYPE;
350 }
351
352 if (kctl->flags & CTL_FLAG_PRIVILEGED) {
353 if (p == 0) {
354 lck_mtx_unlock(ctl_mtx);
355 return(EINVAL);
356 }
357 if ((error = proc_suser(p))) {
358 lck_mtx_unlock(ctl_mtx);
359 return error;
360 }
361 }
362
363 if ((kctl->flags & CTL_FLAG_REG_ID_UNIT) || sa.sc_unit != 0) {
364 if (kcb_find(kctl, sa.sc_unit) != NULL) {
365 lck_mtx_unlock(ctl_mtx);
366 return EBUSY;
367 }
368 } else {
369 u_int32_t unit = kctl->lastunit + 1;
370
371 while (1) {
372 if (unit == ctl_maxunit)
373 unit = 1;
374 if (kcb_find(kctl, unit) == NULL) {
375 kctl->lastunit = sa.sc_unit = unit;
376 break;
377 }
378 if (unit++ == kctl->lastunit) {
379 lck_mtx_unlock(ctl_mtx);
380 return EBUSY;
381 }
382 }
383 }
384
385 kcb->unit = sa.sc_unit;
386 kcb->kctl = kctl;
387 TAILQ_INSERT_TAIL(&kctl->kcb_head, kcb, next);
388 lck_mtx_unlock(ctl_mtx);
389
390 error = soreserve(so, kctl->sendbufsize, kctl->recvbufsize);
391 if (error)
392 goto done;
393 soisconnecting(so);
394
395 socket_unlock(so, 0);
396 error = (*kctl->connect)(kctl, &sa, &kcb->userdata);
397 socket_lock(so, 0);
398 if (error)
399 goto done;
400
401 soisconnected(so);
402
403 done:
404 if (error) {
405 soisdisconnected(so);
406 lck_mtx_lock(ctl_mtx);
407 kcb->kctl = 0;
408 kcb->unit = 0;
409 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
410 lck_mtx_unlock(ctl_mtx);
411 }
412 return error;
413 }
414
415 static int
416 ctl_disconnect(struct socket *so)
417 {
418 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
419
420 if ((kcb = (struct ctl_cb *)so->so_pcb)) {
421 struct kctl *kctl = kcb->kctl;
422
423 if (kctl && kctl->disconnect) {
424 socket_unlock(so, 0);
425 (*kctl->disconnect)(kctl, kcb->unit, kcb->userdata);
426 socket_lock(so, 0);
427 }
428 lck_mtx_lock(ctl_mtx);
429 kcb->kctl = 0;
430 kcb->unit = 0;
431 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
432 soisdisconnected(so);
433 lck_mtx_unlock(ctl_mtx);
434 }
435 return 0;
436 }
437
438 static int
439 ctl_peeraddr(struct socket *so, struct sockaddr **nam)
440 {
441 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
442 struct kctl *kctl;
443 struct sockaddr_ctl sc;
444
445 if (kcb == NULL) /* sanity check */
446 return(ENOTCONN);
447
448 if ((kctl = kcb->kctl) == NULL)
449 return(EINVAL);
450
451 bzero(&sc, sizeof(struct sockaddr_ctl));
452 sc.sc_len = sizeof(struct sockaddr_ctl);
453 sc.sc_family = AF_SYSTEM;
454 sc.ss_sysaddr = AF_SYS_CONTROL;
455 sc.sc_id = kctl->id;
456 sc.sc_unit = kcb->unit;
457
458 *nam = dup_sockaddr((struct sockaddr *)&sc, 1);
459
460 return 0;
461 }
462
463 static int
464 ctl_send(struct socket *so, int flags, struct mbuf *m,
465 __unused struct sockaddr *addr, __unused struct mbuf *control,
466 __unused struct proc *p)
467 {
468 int error = 0;
469 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
470 struct kctl *kctl;
471
472 if (kcb == NULL) /* sanity check */
473 return(ENOTCONN);
474
475 if ((kctl = kcb->kctl) == NULL)
476 return(EINVAL);
477
478 if (kctl->send) {
479 socket_unlock(so, 0);
480 error = (*kctl->send)(kctl, kcb->unit, kcb->userdata, m, flags);
481 socket_lock(so, 0);
482 }
483 return error;
484 }
485
486 errno_t
487 ctl_enqueuembuf(void *kctlref, u_int32_t unit, struct mbuf *m, u_int32_t flags)
488 {
489 struct ctl_cb *kcb;
490 struct socket *so;
491 errno_t error = 0;
492 struct kctl *kctl = (struct kctl *)kctlref;
493
494 if (kctl == NULL)
495 return EINVAL;
496
497 kcb = kcb_find(kctl, unit);
498 if (kcb == NULL)
499 return EINVAL;
500
501 so = (struct socket *)kcb->so;
502 if (so == NULL)
503 return EINVAL;
504
505 socket_lock(so, 1);
506 if (sbspace(&so->so_rcv) < m->m_pkthdr.len) {
507 error = ENOBUFS;
508 goto bye;
509 }
510 if ((flags & CTL_DATA_EOR))
511 m->m_flags |= M_EOR;
512 if (sbappend(&so->so_rcv, m) && (flags & CTL_DATA_NOWAKEUP) == 0)
513 sorwakeup(so);
514 bye:
515 socket_unlock(so, 1);
516 return error;
517 }
518
519 errno_t
520 ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len, u_int32_t flags)
521 {
522 struct ctl_cb *kcb;
523 struct socket *so;
524 struct mbuf *m;
525 errno_t error = 0;
526 struct kctl *kctl = (struct kctl *)kctlref;
527 unsigned int num_needed;
528 struct mbuf *n;
529 size_t curlen = 0;
530
531 if (kctlref == NULL)
532 return EINVAL;
533
534 kcb = kcb_find(kctl, unit);
535 if (kcb == NULL)
536 return EINVAL;
537
538 so = (struct socket *)kcb->so;
539 if (so == NULL)
540 return EINVAL;
541
542 socket_lock(so, 1);
543 if ((size_t)sbspace(&so->so_rcv) < len) {
544 error = ENOBUFS;
545 goto bye;
546 }
547
548 num_needed = 1;
549 m = m_allocpacket_internal(&num_needed, len, NULL, M_NOWAIT, 1, 0);
550 if (m == NULL) {
551 printf("ctl_enqueuedata: m_allocpacket_internal(%lu) failed\n", len);
552 error = ENOBUFS;
553 goto bye;
554 }
555
556 for (n = m; n != NULL; n = n->m_next) {
557 size_t mlen = mbuf_maxlen(n);
558
559 if (mlen + curlen > len)
560 mlen = len - curlen;
561 n->m_len = mlen;
562 bcopy((char *)data + curlen, n->m_data, mlen);
563 curlen += mlen;
564 }
565 mbuf_pkthdr_setlen(m, curlen);
566
567 if ((flags & CTL_DATA_EOR))
568 m->m_flags |= M_EOR;
569 if (sbappend(&so->so_rcv, m) && (flags & CTL_DATA_NOWAKEUP) == 0)
570 sorwakeup(so);
571 bye:
572 socket_unlock(so, 1);
573 return error;
574 }
575
576
577 errno_t
578 ctl_getenqueuespace(kern_ctl_ref kctlref, u_int32_t unit, size_t *space)
579 {
580 struct ctl_cb *kcb;
581 struct kctl *kctl = (struct kctl *)kctlref;
582 struct socket *so;
583
584 if (kctlref == NULL || space == NULL)
585 return EINVAL;
586
587 kcb = kcb_find(kctl, unit);
588 if (kcb == NULL)
589 return EINVAL;
590
591 so = (struct socket *)kcb->so;
592 if (so == NULL)
593 return EINVAL;
594
595 socket_lock(so, 1);
596 *space = sbspace(&so->so_rcv);
597 socket_unlock(so, 1);
598
599 return 0;
600 }
601
602 static int
603 ctl_ctloutput(struct socket *so, struct sockopt *sopt)
604 {
605 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
606 struct kctl *kctl;
607 int error = 0;
608 void *data;
609 size_t len;
610
611 if (sopt->sopt_level != SYSPROTO_CONTROL) {
612 return(EINVAL);
613 }
614
615 if (kcb == NULL) /* sanity check */
616 return(ENOTCONN);
617
618 if ((kctl = kcb->kctl) == NULL)
619 return(EINVAL);
620
621 switch (sopt->sopt_dir) {
622 case SOPT_SET:
623 if (kctl->setopt == NULL)
624 return(ENOTSUP);
625 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, M_WAITOK);
626 if (data == NULL)
627 return(ENOMEM);
628 error = sooptcopyin(sopt, data, sopt->sopt_valsize, sopt->sopt_valsize);
629 if (error == 0) {
630 socket_unlock(so, 0);
631 error = (*kctl->setopt)(kcb->kctl, kcb->unit, kcb->userdata, sopt->sopt_name,
632 data, sopt->sopt_valsize);
633 socket_lock(so, 0);
634 }
635 FREE(data, M_TEMP);
636 break;
637
638 case SOPT_GET:
639 if (kctl->getopt == NULL)
640 return(ENOTSUP);
641 data = NULL;
642 if (sopt->sopt_valsize && sopt->sopt_val) {
643 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, M_WAITOK);
644 if (data == NULL)
645 return(ENOMEM);
646 /* 4108337 - copy in data for get socket option */
647 error = sooptcopyin(sopt, data, sopt->sopt_valsize, sopt->sopt_valsize);
648 }
649 len = sopt->sopt_valsize;
650 socket_unlock(so, 0);
651 error = (*kctl->getopt)(kcb->kctl, kcb->unit, kcb->userdata, sopt->sopt_name,
652 data, &len);
653 socket_lock(so, 0);
654 if (error == 0) {
655 if (data != NULL)
656 error = sooptcopyout(sopt, data, len);
657 else
658 sopt->sopt_valsize = len;
659 }
660 if (data != NULL)
661 FREE(data, M_TEMP);
662 break;
663 }
664 return error;
665 }
666
667 static int
668 ctl_ioctl(__unused struct socket *so, u_long cmd, caddr_t data,
669 __unused struct ifnet *ifp, __unused struct proc *p)
670 {
671 int error = ENOTSUP;
672
673 switch (cmd) {
674 /* get the number of controllers */
675 case CTLIOCGCOUNT: {
676 struct kctl *kctl;
677 int n = 0;
678
679 lck_mtx_lock(ctl_mtx);
680 TAILQ_FOREACH(kctl, &ctl_head, next)
681 n++;
682 lck_mtx_unlock(ctl_mtx);
683
684 *(u_int32_t *)data = n;
685 error = 0;
686 break;
687 }
688 case CTLIOCGINFO: {
689 struct ctl_info *ctl_info = (struct ctl_info *)data;
690 struct kctl *kctl = 0;
691 size_t name_len = strlen(ctl_info->ctl_name);
692
693 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
694 error = EINVAL;
695 break;
696 }
697 lck_mtx_lock(ctl_mtx);
698 kctl = ctl_find_by_name(ctl_info->ctl_name);
699 lck_mtx_unlock(ctl_mtx);
700 if (kctl == 0) {
701 error = ENOENT;
702 break;
703 }
704 ctl_info->ctl_id = kctl->id;
705 error = 0;
706 break;
707 }
708
709 /* add controls to get list of NKEs */
710
711 }
712
713 return error;
714 }
715
716 /*
717 * Register/unregister a NKE
718 */
719 errno_t
720 ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref)
721 {
722 struct kctl *kctl = 0;
723 u_int32_t id = -1;
724 u_int32_t n;
725 size_t name_len;
726
727 if (userkctl == NULL) /* sanity check */
728 return(EINVAL);
729 if (userkctl->ctl_connect == NULL)
730 return(EINVAL);
731 name_len = strlen(userkctl->ctl_name);
732 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME)
733 return(EINVAL);
734
735 MALLOC(kctl, struct kctl *, sizeof(*kctl), M_TEMP, M_WAITOK);
736 if (kctl == NULL)
737 return(ENOMEM);
738 bzero((char *)kctl, sizeof(*kctl));
739
740 lck_mtx_lock(ctl_mtx);
741
742 if ((userkctl->ctl_flags & CTL_FLAG_REG_ID_UNIT) == 0) {
743 if (ctl_find_by_name(userkctl->ctl_name) != NULL) {
744 lck_mtx_unlock(ctl_mtx);
745 FREE(kctl, M_TEMP);
746 return(EEXIST);
747 }
748 for (n = 0, id = ctl_last_id + 1; n < ctl_max; id++, n++) {
749 if (id == 0) {
750 n--;
751 continue;
752 }
753 if (ctl_find_by_id(id) == 0)
754 break;
755 }
756 if (id == ctl_max) {
757 lck_mtx_unlock(ctl_mtx);
758 FREE(kctl, M_TEMP);
759 return(ENOBUFS);
760 }
761 userkctl->ctl_id =id;
762 kctl->id = id;
763 kctl->reg_unit = -1;
764 } else {
765 if (ctl_find_by_id_unit(userkctl->ctl_id, userkctl->ctl_unit) != NULL) {
766 lck_mtx_unlock(ctl_mtx);
767 FREE(kctl, M_TEMP);
768 return(EEXIST);
769 }
770 kctl->id = userkctl->ctl_id;
771 kctl->reg_unit = userkctl->ctl_unit;
772 }
773 strcpy(kctl->name, userkctl->ctl_name);
774 kctl->flags = userkctl->ctl_flags;
775
776 /* Let the caller know the default send and receive sizes */
777 if (userkctl->ctl_sendsize == 0)
778 userkctl->ctl_sendsize = CTL_SENDSIZE;
779 kctl->sendbufsize = userkctl->ctl_sendsize;
780
781 if (userkctl->ctl_recvsize == 0)
782 userkctl->ctl_recvsize = CTL_RECVSIZE;
783 kctl->recvbufsize = userkctl->ctl_recvsize;
784
785 kctl->connect = userkctl->ctl_connect;
786 kctl->disconnect = userkctl->ctl_disconnect;
787 kctl->send = userkctl->ctl_send;
788 kctl->setopt = userkctl->ctl_setopt;
789 kctl->getopt = userkctl->ctl_getopt;
790
791 TAILQ_INIT(&kctl->kcb_head);
792
793 TAILQ_INSERT_TAIL(&ctl_head, kctl, next);
794 ctl_max++;
795
796 lck_mtx_unlock(ctl_mtx);
797
798 *kctlref = kctl;
799
800 ctl_post_msg(KEV_CTL_REGISTERED, kctl->id);
801 return(0);
802 }
803
804 errno_t
805 ctl_deregister(void *kctlref)
806 {
807 struct kctl *kctl;
808
809 if (kctlref == NULL) /* sanity check */
810 return(EINVAL);
811
812 lck_mtx_lock(ctl_mtx);
813 TAILQ_FOREACH(kctl, &ctl_head, next) {
814 if (kctl == (struct kctl *)kctlref)
815 break;
816 }
817 if (kctl != (struct kctl *)kctlref) {
818 lck_mtx_unlock(ctl_mtx);
819 return EINVAL;
820 }
821 if (!TAILQ_EMPTY(&kctl->kcb_head)) {
822 lck_mtx_unlock(ctl_mtx);
823 return EBUSY;
824 }
825
826 TAILQ_REMOVE(&ctl_head, kctl, next);
827 ctl_max--;
828
829 lck_mtx_unlock(ctl_mtx);
830
831 ctl_post_msg(KEV_CTL_DEREGISTERED, kctl->id);
832 FREE(kctl, M_TEMP);
833 return(0);
834 }
835
836 /*
837 * Must be called with global lock taked
838 */
839 static struct kctl *
840 ctl_find_by_id(u_int32_t id)
841 {
842 struct kctl *kctl;
843
844 TAILQ_FOREACH(kctl, &ctl_head, next)
845 if (kctl->id == id)
846 return kctl;
847
848 return NULL;
849 }
850
851 /*
852 * Must be called with global ctl_mtx lock taked
853 */
854 static struct kctl *
855 ctl_find_by_name(const char *name)
856 {
857 struct kctl *kctl;
858
859 TAILQ_FOREACH(kctl, &ctl_head, next)
860 if (strcmp(kctl->name, name) == 0)
861 return kctl;
862
863 return NULL;
864 }
865
866 /*
867 * Must be called with global ctl_mtx lock taked
868 *
869 */
870 static struct kctl *
871 ctl_find_by_id_unit(u_int32_t id, u_int32_t unit)
872 {
873 struct kctl *kctl;
874
875 TAILQ_FOREACH(kctl, &ctl_head, next) {
876 if (kctl->id == id && (kctl->flags & CTL_FLAG_REG_ID_UNIT) == 0)
877 return kctl;
878 else if (kctl->id == id && kctl->reg_unit == unit)
879 return kctl;
880 }
881 return NULL;
882 }
883
884 /*
885 * Must be called with kernel controller lock taken
886 */
887 static struct ctl_cb *
888 kcb_find(struct kctl *kctl, u_int32_t unit)
889 {
890 struct ctl_cb *kcb;
891
892 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
893 if ((kcb->unit == unit))
894 return kcb;
895
896 return NULL;
897 }
898
899 /*
900 * Must be called witout lock
901 */
902 static void
903 ctl_post_msg(u_long event_code, u_int32_t id)
904 {
905 struct ctl_event_data ctl_ev_data;
906 struct kev_msg ev_msg;
907
908 ev_msg.vendor_code = KEV_VENDOR_APPLE;
909
910 ev_msg.kev_class = KEV_SYSTEM_CLASS;
911 ev_msg.kev_subclass = KEV_CTL_SUBCLASS;
912 ev_msg.event_code = event_code;
913
914 /* common nke subclass data */
915 bzero(&ctl_ev_data, sizeof(ctl_ev_data));
916 ctl_ev_data.ctl_id = id;
917 ev_msg.dv[0].data_ptr = &ctl_ev_data;
918 ev_msg.dv[0].data_length = sizeof(ctl_ev_data);
919
920 ev_msg.dv[1].data_length = 0;
921
922 kev_post_msg(&ev_msg);
923 }
924
925 static int
926 ctl_lock(struct socket *so, int refcount, int lr)
927 {
928 int lr_saved;
929 #ifdef __ppc__
930 if (lr == 0) {
931 __asm__ volatile("mflr %0" : "=r" (lr_saved));
932 }
933 else lr_saved = lr;
934 #endif
935
936 if (so->so_pcb) {
937 lck_mtx_lock(((struct ctl_cb *)so->so_pcb)->mtx);
938 } else {
939 panic("ctl_lock: so=%x NO PCB! lr=%x\n", so, lr_saved);
940 lck_mtx_lock(so->so_proto->pr_domain->dom_mtx);
941 }
942
943 if (so->so_usecount < 0)
944 panic("ctl_lock: so=%x so_pcb=%x lr=%x ref=%x\n",
945 so, so->so_pcb, lr_saved, so->so_usecount);
946
947 if (refcount)
948 so->so_usecount++;
949 so->reserved3 = (void *)lr_saved;
950 return (0);
951 }
952
953 static int
954 ctl_unlock(struct socket *so, int refcount, int lr)
955 {
956 int lr_saved;
957 lck_mtx_t * mutex_held;
958
959 #ifdef __ppc__
960 if (lr == 0) {
961 __asm__ volatile("mflr %0" : "=r" (lr_saved));
962 }
963 else lr_saved = lr;
964 #endif
965
966 #ifdef MORE_KCTLLOCK_DEBUG
967 printf("ctl_unlock: so=%x sopcb=%x lock=%x ref=%x lr=%x\n",
968 so, so->so_pcb, ((struct ctl_cb *)so->so_pcb)->mtx, so->so_usecount, lr_saved);
969 #endif
970 if (refcount)
971 so->so_usecount--;
972
973 if (so->so_usecount < 0)
974 panic("ctl_unlock: so=%x usecount=%x\n", so, so->so_usecount);
975 if (so->so_pcb == NULL) {
976 panic("ctl_unlock: so=%x NO PCB usecount=%x lr=%x\n", so, so->so_usecount, lr_saved);
977 mutex_held = so->so_proto->pr_domain->dom_mtx;
978 } else {
979 mutex_held = ((struct ctl_cb *)so->so_pcb)->mtx;
980 }
981 lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
982 lck_mtx_unlock(mutex_held);
983 so->reserved4 = (void *)lr_saved;
984
985 if (so->so_usecount == 0)
986 ctl_sofreelastref(so);
987
988 return (0);
989 }
990
991 static lck_mtx_t *
992 ctl_getlock(struct socket *so, __unused int locktype)
993 {
994 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
995
996 if (so->so_pcb) {
997 if (so->so_usecount < 0)
998 panic("ctl_getlock: so=%x usecount=%x\n", so, so->so_usecount);
999 return(kcb->mtx);
1000 } else {
1001 panic("ctl_getlock: so=%x NULL so_pcb\n", so);
1002 return (so->so_proto->pr_domain->dom_mtx);
1003 }
1004 }