]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_control.c
xnu-2422.110.17.tar.gz
[apple/xnu.git] / bsd / kern / kern_control.c
1 /*
2 * Copyright (c) 1999-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Kernel Control domain - allows control connections to
31 * and to read/write data.
32 *
33 * Vincent Lubet, 040506
34 * Christophe Allie, 010928
35 * Justin C. Walker, 990319
36 */
37
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/syslog.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/protosw.h>
45 #include <sys/domain.h>
46 #include <sys/malloc.h>
47 #include <sys/mbuf.h>
48 #include <sys/sys_domain.h>
49 #include <sys/kern_event.h>
50 #include <sys/kern_control.h>
51 #include <sys/kauth.h>
52 #include <net/if_var.h>
53
54 #include <mach/vm_types.h>
55
56 #include <kern/thread.h>
57
58 /*
59 * Definitions and vars for we support
60 */
61
62 #define CTL_SENDSIZE (2 * 1024) /* default buffer size */
63 #define CTL_RECVSIZE (8 * 1024) /* default buffer size */
64
65 /*
66 * Definitions and vars for we support
67 */
68
69 static u_int32_t ctl_maxunit = 65536;
70 static lck_grp_attr_t *ctl_lck_grp_attr = 0;
71 static lck_attr_t *ctl_lck_attr = 0;
72 static lck_grp_t *ctl_lck_grp = 0;
73 static lck_mtx_t *ctl_mtx;
74
75
76 /* all the controllers are chained */
77 TAILQ_HEAD(kctl_list, kctl) ctl_head;
78
79 static int ctl_attach(struct socket *, int, struct proc *);
80 static int ctl_detach(struct socket *);
81 static int ctl_sofreelastref(struct socket *so);
82 static int ctl_connect(struct socket *, struct sockaddr *, struct proc *);
83 static int ctl_disconnect(struct socket *);
84 static int ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
85 struct ifnet *ifp, struct proc *p);
86 static int ctl_send(struct socket *, int, struct mbuf *,
87 struct sockaddr *, struct mbuf *, struct proc *);
88 static int ctl_ctloutput(struct socket *, struct sockopt *);
89 static int ctl_peeraddr(struct socket *so, struct sockaddr **nam);
90 static int ctl_usr_rcvd(struct socket *so, int flags);
91
92 static struct kctl *ctl_find_by_name(const char *);
93 static struct kctl *ctl_find_by_id_unit(u_int32_t id, u_int32_t unit);
94
95 static struct socket *kcb_find_socket(struct kctl *, u_int32_t unit);
96 static struct ctl_cb *kcb_find(struct kctl *, u_int32_t unit);
97 static void ctl_post_msg(u_int32_t event_code, u_int32_t id);
98
99 static int ctl_lock(struct socket *, int, void *);
100 static int ctl_unlock(struct socket *, int, void *);
101 static lck_mtx_t * ctl_getlock(struct socket *, int);
102
103 static struct pr_usrreqs ctl_usrreqs = {
104 .pru_attach = ctl_attach,
105 .pru_connect = ctl_connect,
106 .pru_control = ctl_ioctl,
107 .pru_detach = ctl_detach,
108 .pru_disconnect = ctl_disconnect,
109 .pru_peeraddr = ctl_peeraddr,
110 .pru_rcvd = ctl_usr_rcvd,
111 .pru_send = ctl_send,
112 .pru_sosend = sosend,
113 .pru_soreceive = soreceive,
114 };
115
116 static struct protosw kctlsw[] = {
117 {
118 .pr_type = SOCK_DGRAM,
119 .pr_protocol = SYSPROTO_CONTROL,
120 .pr_flags = PR_ATOMIC|PR_CONNREQUIRED|PR_PCBLOCK|PR_WANTRCVD,
121 .pr_ctloutput = ctl_ctloutput,
122 .pr_usrreqs = &ctl_usrreqs,
123 .pr_lock = ctl_lock,
124 .pr_unlock = ctl_unlock,
125 .pr_getlock = ctl_getlock,
126 },
127 {
128 .pr_type = SOCK_STREAM,
129 .pr_protocol = SYSPROTO_CONTROL,
130 .pr_flags = PR_CONNREQUIRED|PR_PCBLOCK|PR_WANTRCVD,
131 .pr_ctloutput = ctl_ctloutput,
132 .pr_usrreqs = &ctl_usrreqs,
133 .pr_lock = ctl_lock,
134 .pr_unlock = ctl_unlock,
135 .pr_getlock = ctl_getlock,
136 }
137 };
138
139 static int kctl_proto_count = (sizeof (kctlsw) / sizeof (struct protosw));
140
141 /*
142 * Install the protosw's for the Kernel Control manager.
143 */
144 __private_extern__ void
145 kern_control_init(struct domain *dp)
146 {
147 struct protosw *pr;
148 int i;
149
150 VERIFY(!(dp->dom_flags & DOM_INITIALIZED));
151 VERIFY(dp == systemdomain);
152
153 ctl_lck_grp_attr = lck_grp_attr_alloc_init();
154 if (ctl_lck_grp_attr == NULL) {
155 panic("%s: lck_grp_attr_alloc_init failed\n", __func__);
156 /* NOTREACHED */
157 }
158
159 ctl_lck_grp = lck_grp_alloc_init("Kernel Control Protocol",
160 ctl_lck_grp_attr);
161 if (ctl_lck_grp == NULL) {
162 panic("%s: lck_grp_alloc_init failed\n", __func__);
163 /* NOTREACHED */
164 }
165
166 ctl_lck_attr = lck_attr_alloc_init();
167 if (ctl_lck_attr == NULL) {
168 panic("%s: lck_attr_alloc_init failed\n", __func__);
169 /* NOTREACHED */
170 }
171
172 ctl_mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
173 if (ctl_mtx == NULL) {
174 panic("%s: lck_mtx_alloc_init failed\n", __func__);
175 /* NOTREACHED */
176 }
177 TAILQ_INIT(&ctl_head);
178
179 for (i = 0, pr = &kctlsw[0]; i < kctl_proto_count; i++, pr++)
180 net_add_proto(pr, dp, 1);
181 }
182
183 static void
184 kcb_delete(struct ctl_cb *kcb)
185 {
186 if (kcb != 0) {
187 if (kcb->mtx != 0)
188 lck_mtx_free(kcb->mtx, ctl_lck_grp);
189 FREE(kcb, M_TEMP);
190 }
191 }
192
193
194 /*
195 * Kernel Controller user-request functions
196 * attach function must exist and succeed
197 * detach not necessary
198 * we need a pcb for the per socket mutex
199 */
200 static int
201 ctl_attach(__unused struct socket *so, __unused int proto, __unused struct proc *p)
202 {
203 int error = 0;
204 struct ctl_cb *kcb = 0;
205
206 MALLOC(kcb, struct ctl_cb *, sizeof(struct ctl_cb), M_TEMP, M_WAITOK);
207 if (kcb == NULL) {
208 error = ENOMEM;
209 goto quit;
210 }
211 bzero(kcb, sizeof(struct ctl_cb));
212
213 kcb->mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
214 if (kcb->mtx == NULL) {
215 error = ENOMEM;
216 goto quit;
217 }
218 kcb->so = so;
219 so->so_pcb = (caddr_t)kcb;
220
221 quit:
222 if (error != 0) {
223 kcb_delete(kcb);
224 kcb = 0;
225 }
226 return error;
227 }
228
229 static int
230 ctl_sofreelastref(struct socket *so)
231 {
232 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
233
234 so->so_pcb = 0;
235
236 if (kcb != 0) {
237 struct kctl *kctl;
238 if ((kctl = kcb->kctl) != 0) {
239 lck_mtx_lock(ctl_mtx);
240 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
241 lck_mtx_unlock(ctl_mtx);
242 }
243 kcb_delete(kcb);
244 }
245 sofreelastref(so, 1);
246 return 0;
247 }
248
249 static int
250 ctl_detach(struct socket *so)
251 {
252 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
253
254 if (kcb == 0)
255 return 0;
256
257 soisdisconnected(so);
258 so->so_flags |= SOF_PCBCLEARING;
259 return 0;
260 }
261
262
263 static int
264 ctl_connect(struct socket *so, struct sockaddr *nam, __unused struct proc *p)
265 {
266 struct kctl *kctl;
267 int error = 0;
268 struct sockaddr_ctl sa;
269 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
270 struct ctl_cb *kcb_next = NULL;
271
272 if (kcb == 0)
273 panic("ctl_connect so_pcb null\n");
274
275 if (nam->sa_len != sizeof(struct sockaddr_ctl))
276 return(EINVAL);
277
278 bcopy(nam, &sa, sizeof(struct sockaddr_ctl));
279
280 lck_mtx_lock(ctl_mtx);
281 kctl = ctl_find_by_id_unit(sa.sc_id, sa.sc_unit);
282 if (kctl == NULL) {
283 lck_mtx_unlock(ctl_mtx);
284 return ENOENT;
285 }
286
287 if (((kctl->flags & CTL_FLAG_REG_SOCK_STREAM) && (so->so_type != SOCK_STREAM)) ||
288 (!(kctl->flags & CTL_FLAG_REG_SOCK_STREAM) && (so->so_type != SOCK_DGRAM))) {
289 lck_mtx_unlock(ctl_mtx);
290 return EPROTOTYPE;
291 }
292
293 if (kctl->flags & CTL_FLAG_PRIVILEGED) {
294 if (p == 0) {
295 lck_mtx_unlock(ctl_mtx);
296 return(EINVAL);
297 }
298 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
299 lck_mtx_unlock(ctl_mtx);
300 return EPERM;
301 }
302 }
303
304 if ((kctl->flags & CTL_FLAG_REG_ID_UNIT) || sa.sc_unit != 0) {
305 if (kcb_find(kctl, sa.sc_unit) != NULL) {
306 lck_mtx_unlock(ctl_mtx);
307 return EBUSY;
308 }
309 } else {
310 /* Find an unused ID, assumes control IDs are listed in order */
311 u_int32_t unit = 1;
312
313 TAILQ_FOREACH(kcb_next, &kctl->kcb_head, next) {
314 if (kcb_next->unit > unit) {
315 /* Found a gap, lets fill it in */
316 break;
317 }
318 unit = kcb_next->unit + 1;
319 if (unit == ctl_maxunit)
320 break;
321 }
322
323 if (unit == ctl_maxunit) {
324 lck_mtx_unlock(ctl_mtx);
325 return EBUSY;
326 }
327
328 sa.sc_unit = unit;
329 }
330
331 kcb->unit = sa.sc_unit;
332 kcb->kctl = kctl;
333 if (kcb_next != NULL) {
334 TAILQ_INSERT_BEFORE(kcb_next, kcb, next);
335 }
336 else {
337 TAILQ_INSERT_TAIL(&kctl->kcb_head, kcb, next);
338 }
339 lck_mtx_unlock(ctl_mtx);
340
341 error = soreserve(so, kctl->sendbufsize, kctl->recvbufsize);
342 if (error)
343 goto done;
344 soisconnecting(so);
345
346 socket_unlock(so, 0);
347 error = (*kctl->connect)(kctl, &sa, &kcb->userdata);
348 socket_lock(so, 0);
349 if (error)
350 goto end;
351
352 soisconnected(so);
353
354 end:
355 if (error && kctl->disconnect) {
356 socket_unlock(so, 0);
357 (*kctl->disconnect)(kctl, kcb->unit, kcb->userdata);
358 socket_lock(so, 0);
359 }
360 done:
361 if (error) {
362 soisdisconnected(so);
363 lck_mtx_lock(ctl_mtx);
364 kcb->kctl = 0;
365 kcb->unit = 0;
366 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
367 lck_mtx_unlock(ctl_mtx);
368 }
369 return error;
370 }
371
372 static int
373 ctl_disconnect(struct socket *so)
374 {
375 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
376
377 if ((kcb = (struct ctl_cb *)so->so_pcb)) {
378 struct kctl *kctl = kcb->kctl;
379
380 if (kctl && kctl->disconnect) {
381 socket_unlock(so, 0);
382 (*kctl->disconnect)(kctl, kcb->unit, kcb->userdata);
383 socket_lock(so, 0);
384 }
385
386 soisdisconnected(so);
387
388 socket_unlock(so, 0);
389 lck_mtx_lock(ctl_mtx);
390 kcb->kctl = 0;
391 kcb->unit = 0;
392 while (kcb->usecount != 0) {
393 msleep(&kcb->usecount, ctl_mtx, 0, "kcb->usecount", 0);
394 }
395 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
396 lck_mtx_unlock(ctl_mtx);
397 socket_lock(so, 0);
398 }
399 return 0;
400 }
401
402 static int
403 ctl_peeraddr(struct socket *so, struct sockaddr **nam)
404 {
405 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
406 struct kctl *kctl;
407 struct sockaddr_ctl sc;
408
409 if (kcb == NULL) /* sanity check */
410 return(ENOTCONN);
411
412 if ((kctl = kcb->kctl) == NULL)
413 return(EINVAL);
414
415 bzero(&sc, sizeof(struct sockaddr_ctl));
416 sc.sc_len = sizeof(struct sockaddr_ctl);
417 sc.sc_family = AF_SYSTEM;
418 sc.ss_sysaddr = AF_SYS_CONTROL;
419 sc.sc_id = kctl->id;
420 sc.sc_unit = kcb->unit;
421
422 *nam = dup_sockaddr((struct sockaddr *)&sc, 1);
423
424 return 0;
425 }
426
427 static int
428 ctl_usr_rcvd(struct socket *so, int flags)
429 {
430 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
431 struct kctl *kctl;
432
433 if ((kctl = kcb->kctl) == NULL) {
434 return EINVAL;
435 }
436
437 if (kctl->rcvd) {
438 socket_unlock(so, 0);
439 (*kctl->rcvd)(kctl, kcb->unit, kcb->userdata, flags);
440 socket_lock(so, 0);
441 }
442
443 return 0;
444 }
445
446 static int
447 ctl_send(struct socket *so, int flags, struct mbuf *m,
448 __unused struct sockaddr *addr, struct mbuf *control,
449 __unused struct proc *p)
450 {
451 int error = 0;
452 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
453 struct kctl *kctl;
454
455 if (control) m_freem(control);
456
457 if (kcb == NULL) /* sanity check */
458 error = ENOTCONN;
459
460 if (error == 0 && (kctl = kcb->kctl) == NULL)
461 error = EINVAL;
462
463 if (error == 0 && kctl->send) {
464 socket_unlock(so, 0);
465 error = (*kctl->send)(kctl, kcb->unit, kcb->userdata, m, flags);
466 socket_lock(so, 0);
467 } else {
468 m_freem(m);
469 if (error == 0)
470 error = ENOTSUP;
471 }
472 return error;
473 }
474
475 errno_t
476 ctl_enqueuembuf(void *kctlref, u_int32_t unit, struct mbuf *m, u_int32_t flags)
477 {
478 struct socket *so;
479 errno_t error = 0;
480 struct kctl *kctl = (struct kctl *)kctlref;
481
482 if (kctl == NULL)
483 return EINVAL;
484
485 so = kcb_find_socket(kctl, unit);
486
487 if (so == NULL)
488 return EINVAL;
489
490 if (sbspace(&so->so_rcv) < m->m_pkthdr.len) {
491 error = ENOBUFS;
492 goto bye;
493 }
494 if ((flags & CTL_DATA_EOR))
495 m->m_flags |= M_EOR;
496 if (sbappend(&so->so_rcv, m) && (flags & CTL_DATA_NOWAKEUP) == 0)
497 sorwakeup(so);
498 bye:
499 socket_unlock(so, 1);
500 return error;
501 }
502
503 errno_t
504 ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len, u_int32_t flags)
505 {
506 struct socket *so;
507 struct mbuf *m;
508 errno_t error = 0;
509 struct kctl *kctl = (struct kctl *)kctlref;
510 unsigned int num_needed;
511 struct mbuf *n;
512 size_t curlen = 0;
513
514 if (kctlref == NULL)
515 return EINVAL;
516
517 so = kcb_find_socket(kctl, unit);
518 if (so == NULL)
519 return EINVAL;
520
521 if (sbspace(&so->so_rcv) < (int)len) {
522 error = ENOBUFS;
523 goto bye;
524 }
525
526 num_needed = 1;
527 m = m_allocpacket_internal(&num_needed, len, NULL, M_NOWAIT, 1, 0);
528 if (m == NULL) {
529 printf("ctl_enqueuedata: m_allocpacket_internal(%lu) failed\n", len);
530 error = ENOBUFS;
531 goto bye;
532 }
533
534 for (n = m; n != NULL; n = n->m_next) {
535 size_t mlen = mbuf_maxlen(n);
536
537 if (mlen + curlen > len)
538 mlen = len - curlen;
539 n->m_len = mlen;
540 bcopy((char *)data + curlen, n->m_data, mlen);
541 curlen += mlen;
542 }
543 mbuf_pkthdr_setlen(m, curlen);
544
545 if ((flags & CTL_DATA_EOR))
546 m->m_flags |= M_EOR;
547 if (sbappend(&so->so_rcv, m) && (flags & CTL_DATA_NOWAKEUP) == 0)
548 sorwakeup(so);
549 bye:
550 socket_unlock(so, 1);
551 return error;
552 }
553
554
555 errno_t
556 ctl_getenqueuespace(kern_ctl_ref kctlref, u_int32_t unit, size_t *space)
557 {
558 struct kctl *kctl = (struct kctl *)kctlref;
559 struct socket *so;
560 long avail;
561
562 if (kctlref == NULL || space == NULL)
563 return EINVAL;
564
565 so = kcb_find_socket(kctl, unit);
566 if (so == NULL)
567 return EINVAL;
568
569 avail = sbspace(&so->so_rcv);
570 *space = (avail < 0) ? 0 : avail;
571 socket_unlock(so, 1);
572
573 return 0;
574 }
575
576 static int
577 ctl_ctloutput(struct socket *so, struct sockopt *sopt)
578 {
579 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
580 struct kctl *kctl;
581 int error = 0;
582 void *data;
583 size_t len;
584
585 if (sopt->sopt_level != SYSPROTO_CONTROL) {
586 return(EINVAL);
587 }
588
589 if (kcb == NULL) /* sanity check */
590 return(ENOTCONN);
591
592 if ((kctl = kcb->kctl) == NULL)
593 return(EINVAL);
594
595 switch (sopt->sopt_dir) {
596 case SOPT_SET:
597 if (kctl->setopt == NULL)
598 return(ENOTSUP);
599 if (sopt->sopt_valsize == 0) {
600 data = NULL;
601 } else {
602 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, M_WAITOK);
603 if (data == NULL)
604 return(ENOMEM);
605 error = sooptcopyin(sopt, data, sopt->sopt_valsize, sopt->sopt_valsize);
606 }
607 if (error == 0) {
608 socket_unlock(so, 0);
609 error = (*kctl->setopt)(kcb->kctl, kcb->unit, kcb->userdata, sopt->sopt_name,
610 data, sopt->sopt_valsize);
611 socket_lock(so, 0);
612 }
613 FREE(data, M_TEMP);
614 break;
615
616 case SOPT_GET:
617 if (kctl->getopt == NULL)
618 return(ENOTSUP);
619 data = NULL;
620 if (sopt->sopt_valsize && sopt->sopt_val) {
621 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, M_WAITOK);
622 if (data == NULL)
623 return(ENOMEM);
624 /* 4108337 - copy in data for get socket option */
625 error = sooptcopyin(sopt, data, sopt->sopt_valsize, sopt->sopt_valsize);
626 }
627 len = sopt->sopt_valsize;
628 socket_unlock(so, 0);
629 error = (*kctl->getopt)(kcb->kctl, kcb->unit, kcb->userdata, sopt->sopt_name,
630 data, &len);
631 if (data != NULL && len > sopt->sopt_valsize)
632 panic_plain("ctl_ctloutput: ctl %s returned len (%lu) > sopt_valsize (%lu)\n",
633 kcb->kctl->name, len, sopt->sopt_valsize);
634 socket_lock(so, 0);
635 if (error == 0) {
636 if (data != NULL)
637 error = sooptcopyout(sopt, data, len);
638 else
639 sopt->sopt_valsize = len;
640 }
641 if (data != NULL)
642 FREE(data, M_TEMP);
643 break;
644 }
645 return error;
646 }
647
648 static int
649 ctl_ioctl(__unused struct socket *so, u_long cmd, caddr_t data,
650 __unused struct ifnet *ifp, __unused struct proc *p)
651 {
652 int error = ENOTSUP;
653
654 switch (cmd) {
655 /* get the number of controllers */
656 case CTLIOCGCOUNT: {
657 struct kctl *kctl;
658 u_int32_t n = 0;
659
660 lck_mtx_lock(ctl_mtx);
661 TAILQ_FOREACH(kctl, &ctl_head, next)
662 n++;
663 lck_mtx_unlock(ctl_mtx);
664
665 bcopy(&n, data, sizeof (n));
666 error = 0;
667 break;
668 }
669 case CTLIOCGINFO: {
670 struct ctl_info ctl_info;
671 struct kctl *kctl = 0;
672 size_t name_len;
673
674 bcopy(data, &ctl_info, sizeof (ctl_info));
675 name_len = strnlen(ctl_info.ctl_name, MAX_KCTL_NAME);
676
677 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
678 error = EINVAL;
679 break;
680 }
681 lck_mtx_lock(ctl_mtx);
682 kctl = ctl_find_by_name(ctl_info.ctl_name);
683 lck_mtx_unlock(ctl_mtx);
684 if (kctl == 0) {
685 error = ENOENT;
686 break;
687 }
688 ctl_info.ctl_id = kctl->id;
689 bcopy(&ctl_info, data, sizeof (ctl_info));
690 error = 0;
691 break;
692 }
693
694 /* add controls to get list of NKEs */
695
696 }
697
698 return error;
699 }
700
701 /*
702 * Register/unregister a NKE
703 */
704 errno_t
705 ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref)
706 {
707 struct kctl *kctl = NULL;
708 struct kctl *kctl_next = NULL;
709 u_int32_t id = 1;
710 size_t name_len;
711 int is_extended = 0;
712
713 if (userkctl == NULL) /* sanity check */
714 return(EINVAL);
715 if (userkctl->ctl_connect == NULL)
716 return(EINVAL);
717 name_len = strlen(userkctl->ctl_name);
718 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME)
719 return(EINVAL);
720
721 MALLOC(kctl, struct kctl *, sizeof(*kctl), M_TEMP, M_WAITOK);
722 if (kctl == NULL)
723 return(ENOMEM);
724 bzero((char *)kctl, sizeof(*kctl));
725
726 lck_mtx_lock(ctl_mtx);
727
728 /*
729 * Kernel Control IDs
730 *
731 * CTL_FLAG_REG_ID_UNIT indicates the control ID and unit number are
732 * static. If they do not exist, add them to the list in order. If the
733 * flag is not set, we must find a new unique value. We assume the
734 * list is in order. We find the last item in the list and add one. If
735 * this leads to wrapping the id around, we start at the front of the
736 * list and look for a gap.
737 */
738
739 if ((userkctl->ctl_flags & CTL_FLAG_REG_ID_UNIT) == 0) {
740 /* Must dynamically assign an unused ID */
741
742 /* Verify the same name isn't already registered */
743 if (ctl_find_by_name(userkctl->ctl_name) != NULL) {
744 lck_mtx_unlock(ctl_mtx);
745 FREE(kctl, M_TEMP);
746 return(EEXIST);
747 }
748
749 /* Start with 1 in case the list is empty */
750 id = 1;
751 kctl_next = TAILQ_LAST(&ctl_head, kctl_list);
752
753 if (kctl_next != NULL) {
754 /* List was not empty, add one to the last item in the list */
755 id = kctl_next->id + 1;
756 kctl_next = NULL;
757
758 /*
759 * If this wrapped the id number, start looking at the front
760 * of the list for an unused id.
761 */
762 if (id == 0) {
763 /* Find the next unused ID */
764 id = 1;
765
766 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
767 if (kctl_next->id > id) {
768 /* We found a gap */
769 break;
770 }
771
772 id = kctl_next->id + 1;
773 }
774 }
775 }
776
777 userkctl->ctl_id = id;
778 kctl->id = id;
779 kctl->reg_unit = -1;
780 } else {
781 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
782 if (kctl_next->id > userkctl->ctl_id)
783 break;
784 }
785
786 if (ctl_find_by_id_unit(userkctl->ctl_id, userkctl->ctl_unit) != NULL) {
787 lck_mtx_unlock(ctl_mtx);
788 FREE(kctl, M_TEMP);
789 return(EEXIST);
790 }
791 kctl->id = userkctl->ctl_id;
792 kctl->reg_unit = userkctl->ctl_unit;
793 }
794
795 is_extended = (userkctl->ctl_flags & CTL_FLAG_REG_EXTENDED);
796
797 strlcpy(kctl->name, userkctl->ctl_name, MAX_KCTL_NAME);
798 kctl->flags = userkctl->ctl_flags;
799
800 /* Let the caller know the default send and receive sizes */
801 if (userkctl->ctl_sendsize == 0)
802 userkctl->ctl_sendsize = CTL_SENDSIZE;
803 kctl->sendbufsize = userkctl->ctl_sendsize;
804
805 if (userkctl->ctl_recvsize == 0)
806 userkctl->ctl_recvsize = CTL_RECVSIZE;
807 kctl->recvbufsize = userkctl->ctl_recvsize;
808
809 kctl->connect = userkctl->ctl_connect;
810 kctl->disconnect = userkctl->ctl_disconnect;
811 kctl->send = userkctl->ctl_send;
812 kctl->setopt = userkctl->ctl_setopt;
813 kctl->getopt = userkctl->ctl_getopt;
814 if (is_extended) {
815 kctl->rcvd = userkctl->ctl_rcvd;
816 }
817
818 TAILQ_INIT(&kctl->kcb_head);
819
820 if (kctl_next)
821 TAILQ_INSERT_BEFORE(kctl_next, kctl, next);
822 else
823 TAILQ_INSERT_TAIL(&ctl_head, kctl, next);
824
825 lck_mtx_unlock(ctl_mtx);
826
827 *kctlref = kctl;
828
829 ctl_post_msg(KEV_CTL_REGISTERED, kctl->id);
830 return(0);
831 }
832
833 errno_t
834 ctl_deregister(void *kctlref)
835 {
836 struct kctl *kctl;
837
838 if (kctlref == NULL) /* sanity check */
839 return(EINVAL);
840
841 lck_mtx_lock(ctl_mtx);
842 TAILQ_FOREACH(kctl, &ctl_head, next) {
843 if (kctl == (struct kctl *)kctlref)
844 break;
845 }
846 if (kctl != (struct kctl *)kctlref) {
847 lck_mtx_unlock(ctl_mtx);
848 return EINVAL;
849 }
850 if (!TAILQ_EMPTY(&kctl->kcb_head)) {
851 lck_mtx_unlock(ctl_mtx);
852 return EBUSY;
853 }
854
855 TAILQ_REMOVE(&ctl_head, kctl, next);
856
857 lck_mtx_unlock(ctl_mtx);
858
859 ctl_post_msg(KEV_CTL_DEREGISTERED, kctl->id);
860 FREE(kctl, M_TEMP);
861 return(0);
862 }
863
864 /*
865 * Must be called with global ctl_mtx lock taked
866 */
867 static struct kctl *
868 ctl_find_by_name(const char *name)
869 {
870 struct kctl *kctl;
871
872 TAILQ_FOREACH(kctl, &ctl_head, next)
873 if (strncmp(kctl->name, name, sizeof(kctl->name)) == 0)
874 return kctl;
875
876 return NULL;
877 }
878
879 u_int32_t
880 ctl_id_by_name(const char *name)
881 {
882 u_int32_t ctl_id = 0;
883
884 lck_mtx_lock(ctl_mtx);
885 struct kctl *kctl = ctl_find_by_name(name);
886 if (kctl) ctl_id = kctl->id;
887 lck_mtx_unlock(ctl_mtx);
888
889 return ctl_id;
890 }
891
892 errno_t
893 ctl_name_by_id(
894 u_int32_t id,
895 char *out_name,
896 size_t maxsize)
897 {
898 int found = 0;
899
900 lck_mtx_lock(ctl_mtx);
901 struct kctl *kctl;
902 TAILQ_FOREACH(kctl, &ctl_head, next) {
903 if (kctl->id == id)
904 break;
905 }
906
907 if (kctl && kctl->name)
908 {
909 if (maxsize > MAX_KCTL_NAME)
910 maxsize = MAX_KCTL_NAME;
911 strlcpy(out_name, kctl->name, maxsize);
912 found = 1;
913 }
914 lck_mtx_unlock(ctl_mtx);
915
916 return found ? 0 : ENOENT;
917 }
918
919 /*
920 * Must be called with global ctl_mtx lock taked
921 *
922 */
923 static struct kctl *
924 ctl_find_by_id_unit(u_int32_t id, u_int32_t unit)
925 {
926 struct kctl *kctl;
927
928 TAILQ_FOREACH(kctl, &ctl_head, next) {
929 if (kctl->id == id && (kctl->flags & CTL_FLAG_REG_ID_UNIT) == 0)
930 return kctl;
931 else if (kctl->id == id && kctl->reg_unit == unit)
932 return kctl;
933 }
934 return NULL;
935 }
936
937 /*
938 * Must be called with kernel controller lock taken
939 */
940 static struct ctl_cb *
941 kcb_find(struct kctl *kctl, u_int32_t unit)
942 {
943 struct ctl_cb *kcb;
944
945 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
946 if (kcb->unit == unit)
947 return kcb;
948
949 return NULL;
950 }
951
952 static struct socket *
953 kcb_find_socket(struct kctl *kctl, u_int32_t unit)
954 {
955 struct socket *so = NULL;
956
957 lck_mtx_lock(ctl_mtx);
958 struct ctl_cb *kcb = kcb_find(kctl, unit);
959 if (kcb && kcb->kctl == kctl) {
960 so = kcb->so;
961 if (so) {
962 kcb->usecount++;
963 }
964 }
965 lck_mtx_unlock(ctl_mtx);
966
967 if (so == NULL) {
968 return NULL;
969 }
970
971 socket_lock(so, 1);
972
973 lck_mtx_lock(ctl_mtx);
974 if (kcb->kctl == NULL)
975 {
976 lck_mtx_unlock(ctl_mtx);
977 socket_unlock(so, 1);
978 so = NULL;
979 lck_mtx_lock(ctl_mtx);
980 }
981 kcb->usecount--;
982 if (kcb->usecount == 0)
983 wakeup((event_t)&kcb->usecount);
984 lck_mtx_unlock(ctl_mtx);
985
986 return so;
987 }
988
989 static void
990 ctl_post_msg(u_int32_t event_code, u_int32_t id)
991 {
992 struct ctl_event_data ctl_ev_data;
993 struct kev_msg ev_msg;
994
995 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_NOTOWNED);
996
997 bzero(&ev_msg, sizeof(struct kev_msg));
998 ev_msg.vendor_code = KEV_VENDOR_APPLE;
999
1000 ev_msg.kev_class = KEV_SYSTEM_CLASS;
1001 ev_msg.kev_subclass = KEV_CTL_SUBCLASS;
1002 ev_msg.event_code = event_code;
1003
1004 /* common nke subclass data */
1005 bzero(&ctl_ev_data, sizeof(ctl_ev_data));
1006 ctl_ev_data.ctl_id = id;
1007 ev_msg.dv[0].data_ptr = &ctl_ev_data;
1008 ev_msg.dv[0].data_length = sizeof(ctl_ev_data);
1009
1010 ev_msg.dv[1].data_length = 0;
1011
1012 kev_post_msg(&ev_msg);
1013 }
1014
1015 static int
1016 ctl_lock(struct socket *so, int refcount, void *lr)
1017 {
1018 void *lr_saved;
1019
1020 if (lr == NULL)
1021 lr_saved = __builtin_return_address(0);
1022 else
1023 lr_saved = lr;
1024
1025 if (so->so_pcb != NULL) {
1026 lck_mtx_lock(((struct ctl_cb *)so->so_pcb)->mtx);
1027 } else {
1028 panic("ctl_lock: so=%p NO PCB! lr=%p lrh= %s\n",
1029 so, lr_saved, solockhistory_nr(so));
1030 /* NOTREACHED */
1031 }
1032
1033 if (so->so_usecount < 0) {
1034 panic("ctl_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s\n",
1035 so, so->so_pcb, lr_saved, so->so_usecount, solockhistory_nr(so));
1036 /* NOTREACHED */
1037 }
1038
1039 if (refcount)
1040 so->so_usecount++;
1041
1042 so->lock_lr[so->next_lock_lr] = lr_saved;
1043 so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX;
1044 return (0);
1045 }
1046
1047 static int
1048 ctl_unlock(struct socket *so, int refcount, void *lr)
1049 {
1050 void *lr_saved;
1051 lck_mtx_t *mutex_held;
1052
1053 if (lr == NULL)
1054 lr_saved = __builtin_return_address(0);
1055 else
1056 lr_saved = lr;
1057
1058 #ifdef MORE_KCTLLOCK_DEBUG
1059 printf("ctl_unlock: so=%x sopcb=%x lock=%x ref=%x lr=%p\n",
1060 so, so->so_pcb, ((struct ctl_cb *)so->so_pcb)->mtx,
1061 so->so_usecount, lr_saved);
1062 #endif
1063 if (refcount)
1064 so->so_usecount--;
1065
1066 if (so->so_usecount < 0) {
1067 panic("ctl_unlock: so=%p usecount=%x lrh= %s\n",
1068 so, so->so_usecount, solockhistory_nr(so));
1069 /* NOTREACHED */
1070 }
1071 if (so->so_pcb == NULL) {
1072 panic("ctl_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s\n",
1073 so, so->so_usecount, (void *)lr_saved, solockhistory_nr(so));
1074 /* NOTREACHED */
1075 }
1076 mutex_held = ((struct ctl_cb *)so->so_pcb)->mtx;
1077
1078 lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
1079 so->unlock_lr[so->next_unlock_lr] = lr_saved;
1080 so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
1081 lck_mtx_unlock(mutex_held);
1082
1083 if (so->so_usecount == 0)
1084 ctl_sofreelastref(so);
1085
1086 return (0);
1087 }
1088
1089 static lck_mtx_t *
1090 ctl_getlock(struct socket *so, __unused int locktype)
1091 {
1092 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
1093
1094 if (so->so_pcb) {
1095 if (so->so_usecount < 0)
1096 panic("ctl_getlock: so=%p usecount=%x lrh= %s\n",
1097 so, so->so_usecount, solockhistory_nr(so));
1098 return(kcb->mtx);
1099 } else {
1100 panic("ctl_getlock: so=%p NULL NO so_pcb %s\n",
1101 so, solockhistory_nr(so));
1102 return (so->so_proto->pr_domain->dom_mtx);
1103 }
1104 }