]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_control.c
xnu-1504.3.12.tar.gz
[apple/xnu.git] / bsd / kern / kern_control.c
1 /*
2 * Copyright (c) 1999-2008 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Kernel Control domain - allows control connections to
31 * and to read/write data.
32 *
33 * Vincent Lubet, 040506
34 * Christophe Allie, 010928
35 * Justin C. Walker, 990319
36 */
37
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/syslog.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/protosw.h>
45 #include <sys/domain.h>
46 #include <sys/malloc.h>
47 #include <sys/mbuf.h>
48 #include <sys/sys_domain.h>
49 #include <sys/kern_event.h>
50 #include <sys/kern_control.h>
51 #include <sys/kauth.h>
52 #include <net/if_var.h>
53
54 #include <mach/vm_types.h>
55
56 #include <kern/thread.h>
57
58 /*
59 * Definitions and vars for we support
60 */
61
62 #define CTL_SENDSIZE (2 * 1024) /* default buffer size */
63 #define CTL_RECVSIZE (8 * 1024) /* default buffer size */
64
65 /*
66 * Definitions and vars for we support
67 */
68
69 static u_int32_t ctl_maxunit = 65536;
70 static lck_grp_attr_t *ctl_lck_grp_attr = 0;
71 static lck_attr_t *ctl_lck_attr = 0;
72 static lck_grp_t *ctl_lck_grp = 0;
73 static lck_mtx_t *ctl_mtx;
74
75
76 /* all the controllers are chained */
77 TAILQ_HEAD(kctl_list, kctl) ctl_head;
78
79 static int ctl_attach(struct socket *, int, struct proc *);
80 static int ctl_detach(struct socket *);
81 static int ctl_sofreelastref(struct socket *so);
82 static int ctl_connect(struct socket *, struct sockaddr *, struct proc *);
83 static int ctl_disconnect(struct socket *);
84 static int ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
85 struct ifnet *ifp, struct proc *p);
86 static int ctl_send(struct socket *, int, struct mbuf *,
87 struct sockaddr *, struct mbuf *, struct proc *);
88 static int ctl_ctloutput(struct socket *, struct sockopt *);
89 static int ctl_peeraddr(struct socket *so, struct sockaddr **nam);
90
91 static struct kctl *ctl_find_by_name(const char *);
92 static struct kctl *ctl_find_by_id_unit(u_int32_t id, u_int32_t unit);
93
94 static struct ctl_cb *kcb_find(struct kctl *, u_int32_t unit);
95 static void ctl_post_msg(u_int32_t event_code, u_int32_t id);
96
97 static int ctl_lock(struct socket *, int, void *);
98 static int ctl_unlock(struct socket *, int, void *);
99 static lck_mtx_t * ctl_getlock(struct socket *, int);
100
101 static struct pr_usrreqs ctl_usrreqs =
102 {
103 pru_abort_notsupp, pru_accept_notsupp, ctl_attach, pru_bind_notsupp,
104 ctl_connect, pru_connect2_notsupp, ctl_ioctl, ctl_detach,
105 ctl_disconnect, pru_listen_notsupp, ctl_peeraddr,
106 pru_rcvd_notsupp, pru_rcvoob_notsupp, ctl_send,
107 pru_sense_null, pru_shutdown_notsupp, pru_sockaddr_notsupp,
108 sosend, soreceive, pru_sopoll_notsupp
109 };
110
111 static struct protosw kctlswk_dgram =
112 {
113 SOCK_DGRAM, &systemdomain, SYSPROTO_CONTROL,
114 PR_ATOMIC|PR_CONNREQUIRED|PR_PCBLOCK,
115 NULL, NULL, NULL, ctl_ctloutput,
116 NULL, NULL,
117 NULL, NULL, NULL, NULL, &ctl_usrreqs,
118 ctl_lock, ctl_unlock, ctl_getlock, { 0, 0 } , 0, { 0 }
119 };
120
121 static struct protosw kctlswk_stream =
122 {
123 SOCK_STREAM, &systemdomain, SYSPROTO_CONTROL,
124 PR_CONNREQUIRED|PR_PCBLOCK,
125 NULL, NULL, NULL, ctl_ctloutput,
126 NULL, NULL,
127 NULL, NULL, NULL, NULL, &ctl_usrreqs,
128 ctl_lock, ctl_unlock, ctl_getlock, { 0, 0 } , 0, { 0 }
129 };
130
131
132 /*
133 * Install the protosw's for the Kernel Control manager.
134 */
135 __private_extern__ int
136 kern_control_init(void)
137 {
138 int error = 0;
139
140 ctl_lck_grp_attr = lck_grp_attr_alloc_init();
141 if (ctl_lck_grp_attr == 0) {
142 printf(": lck_grp_attr_alloc_init failed\n");
143 error = ENOMEM;
144 goto done;
145 }
146
147 ctl_lck_grp = lck_grp_alloc_init("Kernel Control Protocol", ctl_lck_grp_attr);
148 if (ctl_lck_grp == 0) {
149 printf("kern_control_init: lck_grp_alloc_init failed\n");
150 error = ENOMEM;
151 goto done;
152 }
153
154 ctl_lck_attr = lck_attr_alloc_init();
155 if (ctl_lck_attr == 0) {
156 printf("kern_control_init: lck_attr_alloc_init failed\n");
157 error = ENOMEM;
158 goto done;
159 }
160
161 ctl_mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
162 if (ctl_mtx == 0) {
163 printf("kern_control_init: lck_mtx_alloc_init failed\n");
164 error = ENOMEM;
165 goto done;
166 }
167 TAILQ_INIT(&ctl_head);
168
169 error = net_add_proto(&kctlswk_dgram, &systemdomain);
170 if (error) {
171 log(LOG_WARNING, "kern_control_init: net_add_proto dgram failed (%d)\n", error);
172 }
173 error = net_add_proto(&kctlswk_stream, &systemdomain);
174 if (error) {
175 log(LOG_WARNING, "kern_control_init: net_add_proto stream failed (%d)\n", error);
176 }
177
178 done:
179 if (error != 0) {
180 if (ctl_mtx) {
181 lck_mtx_free(ctl_mtx, ctl_lck_grp);
182 ctl_mtx = 0;
183 }
184 if (ctl_lck_grp) {
185 lck_grp_free(ctl_lck_grp);
186 ctl_lck_grp = 0;
187 }
188 if (ctl_lck_grp_attr) {
189 lck_grp_attr_free(ctl_lck_grp_attr);
190 ctl_lck_grp_attr = 0;
191 }
192 if (ctl_lck_attr) {
193 lck_attr_free(ctl_lck_attr);
194 ctl_lck_attr = 0;
195 }
196 }
197 return error;
198 }
199
200 static void
201 kcb_delete(struct ctl_cb *kcb)
202 {
203 if (kcb != 0) {
204 if (kcb->mtx != 0)
205 lck_mtx_free(kcb->mtx, ctl_lck_grp);
206 FREE(kcb, M_TEMP);
207 }
208 }
209
210
211 /*
212 * Kernel Controller user-request functions
213 * attach function must exist and succeed
214 * detach not necessary
215 * we need a pcb for the per socket mutex
216 */
217 static int
218 ctl_attach(__unused struct socket *so, __unused int proto, __unused struct proc *p)
219 {
220 int error = 0;
221 struct ctl_cb *kcb = 0;
222
223 MALLOC(kcb, struct ctl_cb *, sizeof(struct ctl_cb), M_TEMP, M_WAITOK);
224 if (kcb == NULL) {
225 error = ENOMEM;
226 goto quit;
227 }
228 bzero(kcb, sizeof(struct ctl_cb));
229
230 kcb->mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
231 if (kcb->mtx == NULL) {
232 error = ENOMEM;
233 goto quit;
234 }
235 kcb->so = so;
236 so->so_pcb = (caddr_t)kcb;
237
238 quit:
239 if (error != 0) {
240 kcb_delete(kcb);
241 kcb = 0;
242 }
243 return error;
244 }
245
246 static int
247 ctl_sofreelastref(struct socket *so)
248 {
249 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
250
251 so->so_pcb = 0;
252
253 if (kcb != 0) {
254 struct kctl *kctl;
255 if ((kctl = kcb->kctl) != 0) {
256 lck_mtx_lock(ctl_mtx);
257 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
258 lck_mtx_lock(ctl_mtx);
259 }
260 kcb_delete(kcb);
261 }
262 sofreelastref(so, 1);
263 return 0;
264 }
265
266 static int
267 ctl_detach(struct socket *so)
268 {
269 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
270
271 if (kcb == 0)
272 return 0;
273
274 soisdisconnected(so);
275 so->so_flags |= SOF_PCBCLEARING;
276 return 0;
277 }
278
279
280 static int
281 ctl_connect(struct socket *so, struct sockaddr *nam, __unused struct proc *p)
282 {
283 struct kctl *kctl;
284 int error = 0;
285 struct sockaddr_ctl sa;
286 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
287 struct ctl_cb *kcb_next = NULL;
288
289 if (kcb == 0)
290 panic("ctl_connect so_pcb null\n");
291
292 if (nam->sa_len != sizeof(struct sockaddr_ctl))
293 return(EINVAL);
294
295 bcopy(nam, &sa, sizeof(struct sockaddr_ctl));
296
297 lck_mtx_lock(ctl_mtx);
298 kctl = ctl_find_by_id_unit(sa.sc_id, sa.sc_unit);
299 if (kctl == NULL) {
300 lck_mtx_unlock(ctl_mtx);
301 return ENOENT;
302 }
303
304 if (((kctl->flags & CTL_FLAG_REG_SOCK_STREAM) && (so->so_type != SOCK_STREAM)) ||
305 (!(kctl->flags & CTL_FLAG_REG_SOCK_STREAM) && (so->so_type != SOCK_DGRAM))) {
306 lck_mtx_unlock(ctl_mtx);
307 return EPROTOTYPE;
308 }
309
310 if (kctl->flags & CTL_FLAG_PRIVILEGED) {
311 if (p == 0) {
312 lck_mtx_unlock(ctl_mtx);
313 return(EINVAL);
314 }
315 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
316 lck_mtx_unlock(ctl_mtx);
317 return EPERM;
318 }
319 }
320
321 if ((kctl->flags & CTL_FLAG_REG_ID_UNIT) || sa.sc_unit != 0) {
322 if (kcb_find(kctl, sa.sc_unit) != NULL) {
323 lck_mtx_unlock(ctl_mtx);
324 return EBUSY;
325 }
326 } else {
327 /* Find an unused ID, assumes control IDs are listed in order */
328 u_int32_t unit = 1;
329
330 TAILQ_FOREACH(kcb_next, &kctl->kcb_head, next) {
331 if (kcb_next->unit > unit) {
332 /* Found a gap, lets fill it in */
333 break;
334 }
335 unit = kcb_next->unit + 1;
336 if (unit == ctl_maxunit)
337 break;
338 }
339
340 if (unit == ctl_maxunit) {
341 lck_mtx_unlock(ctl_mtx);
342 return EBUSY;
343 }
344
345 sa.sc_unit = unit;
346 }
347
348 kcb->unit = sa.sc_unit;
349 kcb->kctl = kctl;
350 if (kcb_next != NULL) {
351 TAILQ_INSERT_BEFORE(kcb_next, kcb, next);
352 }
353 else {
354 TAILQ_INSERT_TAIL(&kctl->kcb_head, kcb, next);
355 }
356 lck_mtx_unlock(ctl_mtx);
357
358 error = soreserve(so, kctl->sendbufsize, kctl->recvbufsize);
359 if (error)
360 goto done;
361 soisconnecting(so);
362
363 socket_unlock(so, 0);
364 error = (*kctl->connect)(kctl, &sa, &kcb->userdata);
365 socket_lock(so, 0);
366 if (error)
367 goto done;
368
369 soisconnected(so);
370
371 done:
372 if (error) {
373 soisdisconnected(so);
374 lck_mtx_lock(ctl_mtx);
375 kcb->kctl = 0;
376 kcb->unit = 0;
377 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
378 lck_mtx_unlock(ctl_mtx);
379 }
380 return error;
381 }
382
383 static int
384 ctl_disconnect(struct socket *so)
385 {
386 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
387
388 if ((kcb = (struct ctl_cb *)so->so_pcb)) {
389 struct kctl *kctl = kcb->kctl;
390
391 if (kctl && kctl->disconnect) {
392 socket_unlock(so, 0);
393 (*kctl->disconnect)(kctl, kcb->unit, kcb->userdata);
394 socket_lock(so, 0);
395 }
396 lck_mtx_lock(ctl_mtx);
397 kcb->kctl = 0;
398 kcb->unit = 0;
399 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
400 soisdisconnected(so);
401 lck_mtx_unlock(ctl_mtx);
402 }
403 return 0;
404 }
405
406 static int
407 ctl_peeraddr(struct socket *so, struct sockaddr **nam)
408 {
409 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
410 struct kctl *kctl;
411 struct sockaddr_ctl sc;
412
413 if (kcb == NULL) /* sanity check */
414 return(ENOTCONN);
415
416 if ((kctl = kcb->kctl) == NULL)
417 return(EINVAL);
418
419 bzero(&sc, sizeof(struct sockaddr_ctl));
420 sc.sc_len = sizeof(struct sockaddr_ctl);
421 sc.sc_family = AF_SYSTEM;
422 sc.ss_sysaddr = AF_SYS_CONTROL;
423 sc.sc_id = kctl->id;
424 sc.sc_unit = kcb->unit;
425
426 *nam = dup_sockaddr((struct sockaddr *)&sc, 1);
427
428 return 0;
429 }
430
431 static int
432 ctl_send(struct socket *so, int flags, struct mbuf *m,
433 __unused struct sockaddr *addr, __unused struct mbuf *control,
434 __unused struct proc *p)
435 {
436 int error = 0;
437 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
438 struct kctl *kctl;
439
440 if (kcb == NULL) /* sanity check */
441 return(ENOTCONN);
442
443 if ((kctl = kcb->kctl) == NULL)
444 return(EINVAL);
445
446 if (kctl->send) {
447 socket_unlock(so, 0);
448 error = (*kctl->send)(kctl, kcb->unit, kcb->userdata, m, flags);
449 socket_lock(so, 0);
450 }
451 return error;
452 }
453
454 errno_t
455 ctl_enqueuembuf(void *kctlref, u_int32_t unit, struct mbuf *m, u_int32_t flags)
456 {
457 struct ctl_cb *kcb;
458 struct socket *so;
459 errno_t error = 0;
460 struct kctl *kctl = (struct kctl *)kctlref;
461
462 if (kctl == NULL)
463 return EINVAL;
464
465 kcb = kcb_find(kctl, unit);
466 if (kcb == NULL)
467 return EINVAL;
468
469 so = (struct socket *)kcb->so;
470 if (so == NULL)
471 return EINVAL;
472
473 socket_lock(so, 1);
474 if (sbspace(&so->so_rcv) < m->m_pkthdr.len) {
475 error = ENOBUFS;
476 goto bye;
477 }
478 if ((flags & CTL_DATA_EOR))
479 m->m_flags |= M_EOR;
480 if (sbappend(&so->so_rcv, m) && (flags & CTL_DATA_NOWAKEUP) == 0)
481 sorwakeup(so);
482 bye:
483 socket_unlock(so, 1);
484 return error;
485 }
486
487 errno_t
488 ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len, u_int32_t flags)
489 {
490 struct ctl_cb *kcb;
491 struct socket *so;
492 struct mbuf *m;
493 errno_t error = 0;
494 struct kctl *kctl = (struct kctl *)kctlref;
495 unsigned int num_needed;
496 struct mbuf *n;
497 size_t curlen = 0;
498
499 if (kctlref == NULL)
500 return EINVAL;
501
502 kcb = kcb_find(kctl, unit);
503 if (kcb == NULL)
504 return EINVAL;
505
506 so = (struct socket *)kcb->so;
507 if (so == NULL)
508 return EINVAL;
509
510 socket_lock(so, 1);
511 if (sbspace(&so->so_rcv) < (int)len) {
512 error = ENOBUFS;
513 goto bye;
514 }
515
516 num_needed = 1;
517 m = m_allocpacket_internal(&num_needed, len, NULL, M_NOWAIT, 1, 0);
518 if (m == NULL) {
519 printf("ctl_enqueuedata: m_allocpacket_internal(%lu) failed\n", len);
520 error = ENOBUFS;
521 goto bye;
522 }
523
524 for (n = m; n != NULL; n = n->m_next) {
525 size_t mlen = mbuf_maxlen(n);
526
527 if (mlen + curlen > len)
528 mlen = len - curlen;
529 n->m_len = mlen;
530 bcopy((char *)data + curlen, n->m_data, mlen);
531 curlen += mlen;
532 }
533 mbuf_pkthdr_setlen(m, curlen);
534
535 if ((flags & CTL_DATA_EOR))
536 m->m_flags |= M_EOR;
537 if (sbappend(&so->so_rcv, m) && (flags & CTL_DATA_NOWAKEUP) == 0)
538 sorwakeup(so);
539 bye:
540 socket_unlock(so, 1);
541 return error;
542 }
543
544
545 errno_t
546 ctl_getenqueuespace(kern_ctl_ref kctlref, u_int32_t unit, size_t *space)
547 {
548 struct ctl_cb *kcb;
549 struct kctl *kctl = (struct kctl *)kctlref;
550 struct socket *so;
551 long avail;
552
553 if (kctlref == NULL || space == NULL)
554 return EINVAL;
555
556 kcb = kcb_find(kctl, unit);
557 if (kcb == NULL)
558 return EINVAL;
559
560 so = (struct socket *)kcb->so;
561 if (so == NULL)
562 return EINVAL;
563
564 socket_lock(so, 1);
565 avail = sbspace(&so->so_rcv);
566 *space = (avail < 0) ? 0 : avail;
567 socket_unlock(so, 1);
568
569 return 0;
570 }
571
572 static int
573 ctl_ctloutput(struct socket *so, struct sockopt *sopt)
574 {
575 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
576 struct kctl *kctl;
577 int error = 0;
578 void *data;
579 size_t len;
580
581 if (sopt->sopt_level != SYSPROTO_CONTROL) {
582 return(EINVAL);
583 }
584
585 if (kcb == NULL) /* sanity check */
586 return(ENOTCONN);
587
588 if ((kctl = kcb->kctl) == NULL)
589 return(EINVAL);
590
591 switch (sopt->sopt_dir) {
592 case SOPT_SET:
593 if (kctl->setopt == NULL)
594 return(ENOTSUP);
595 if (sopt->sopt_valsize == 0) {
596 data = NULL;
597 } else {
598 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, M_WAITOK);
599 if (data == NULL)
600 return(ENOMEM);
601 error = sooptcopyin(sopt, data, sopt->sopt_valsize, sopt->sopt_valsize);
602 }
603 if (error == 0) {
604 socket_unlock(so, 0);
605 error = (*kctl->setopt)(kcb->kctl, kcb->unit, kcb->userdata, sopt->sopt_name,
606 data, sopt->sopt_valsize);
607 socket_lock(so, 0);
608 }
609 FREE(data, M_TEMP);
610 break;
611
612 case SOPT_GET:
613 if (kctl->getopt == NULL)
614 return(ENOTSUP);
615 data = NULL;
616 if (sopt->sopt_valsize && sopt->sopt_val) {
617 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, M_WAITOK);
618 if (data == NULL)
619 return(ENOMEM);
620 /* 4108337 - copy in data for get socket option */
621 error = sooptcopyin(sopt, data, sopt->sopt_valsize, sopt->sopt_valsize);
622 }
623 len = sopt->sopt_valsize;
624 socket_unlock(so, 0);
625 error = (*kctl->getopt)(kcb->kctl, kcb->unit, kcb->userdata, sopt->sopt_name,
626 data, &len);
627 socket_lock(so, 0);
628 if (error == 0) {
629 if (data != NULL)
630 error = sooptcopyout(sopt, data, len);
631 else
632 sopt->sopt_valsize = len;
633 }
634 if (data != NULL)
635 FREE(data, M_TEMP);
636 break;
637 }
638 return error;
639 }
640
641 static int
642 ctl_ioctl(__unused struct socket *so, u_long cmd, caddr_t data,
643 __unused struct ifnet *ifp, __unused struct proc *p)
644 {
645 int error = ENOTSUP;
646
647 switch (cmd) {
648 /* get the number of controllers */
649 case CTLIOCGCOUNT: {
650 struct kctl *kctl;
651 int n = 0;
652
653 lck_mtx_lock(ctl_mtx);
654 TAILQ_FOREACH(kctl, &ctl_head, next)
655 n++;
656 lck_mtx_unlock(ctl_mtx);
657
658 *(u_int32_t *)data = n;
659 error = 0;
660 break;
661 }
662 case CTLIOCGINFO: {
663 struct ctl_info *ctl_info = (struct ctl_info *)data;
664 struct kctl *kctl = 0;
665 size_t name_len = strlen(ctl_info->ctl_name);
666
667 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
668 error = EINVAL;
669 break;
670 }
671 lck_mtx_lock(ctl_mtx);
672 kctl = ctl_find_by_name(ctl_info->ctl_name);
673 lck_mtx_unlock(ctl_mtx);
674 if (kctl == 0) {
675 error = ENOENT;
676 break;
677 }
678 ctl_info->ctl_id = kctl->id;
679 error = 0;
680 break;
681 }
682
683 /* add controls to get list of NKEs */
684
685 }
686
687 return error;
688 }
689
690 /*
691 * Register/unregister a NKE
692 */
693 errno_t
694 ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref)
695 {
696 struct kctl *kctl = NULL;
697 struct kctl *kctl_next = NULL;
698 u_int32_t id = 1;
699 size_t name_len;
700
701 if (userkctl == NULL) /* sanity check */
702 return(EINVAL);
703 if (userkctl->ctl_connect == NULL)
704 return(EINVAL);
705 name_len = strlen(userkctl->ctl_name);
706 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME)
707 return(EINVAL);
708
709 MALLOC(kctl, struct kctl *, sizeof(*kctl), M_TEMP, M_WAITOK);
710 if (kctl == NULL)
711 return(ENOMEM);
712 bzero((char *)kctl, sizeof(*kctl));
713
714 lck_mtx_lock(ctl_mtx);
715
716 /*
717 * Kernel Control IDs
718 *
719 * CTL_FLAG_REG_ID_UNIT indicates the control ID and unit number are
720 * static. If they do not exist, add them to the list in order. If the
721 * flag is not set, we must find a new unique value. We assume the
722 * list is in order. We find the last item in the list and add one. If
723 * this leads to wrapping the id around, we start at the front of the
724 * list and look for a gap.
725 */
726
727 if ((userkctl->ctl_flags & CTL_FLAG_REG_ID_UNIT) == 0) {
728 /* Must dynamically assign an unused ID */
729
730 /* Verify the same name isn't already registered */
731 if (ctl_find_by_name(userkctl->ctl_name) != NULL) {
732 lck_mtx_unlock(ctl_mtx);
733 FREE(kctl, M_TEMP);
734 return(EEXIST);
735 }
736
737 /* Start with 1 in case the list is empty */
738 id = 1;
739 kctl_next = TAILQ_LAST(&ctl_head, kctl_list);
740
741 if (kctl_next != NULL) {
742 /* List was not empty, add one to the last item in the list */
743 id = kctl_next->id + 1;
744 kctl_next = NULL;
745
746 /*
747 * If this wrapped the id number, start looking at the front
748 * of the list for an unused id.
749 */
750 if (id == 0) {
751 /* Find the next unused ID */
752 id = 1;
753
754 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
755 if (kctl_next->id > id) {
756 /* We found a gap */
757 break;
758 }
759
760 id = kctl_next->id + 1;
761 }
762 }
763 }
764
765 userkctl->ctl_id = id;
766 kctl->id = id;
767 kctl->reg_unit = -1;
768 } else {
769 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
770 if (kctl_next->id > userkctl->ctl_id)
771 break;
772 }
773
774 if (ctl_find_by_id_unit(userkctl->ctl_id, userkctl->ctl_unit) != NULL) {
775 lck_mtx_unlock(ctl_mtx);
776 FREE(kctl, M_TEMP);
777 return(EEXIST);
778 }
779 kctl->id = userkctl->ctl_id;
780 kctl->reg_unit = userkctl->ctl_unit;
781 }
782 strlcpy(kctl->name, userkctl->ctl_name, MAX_KCTL_NAME);
783 kctl->flags = userkctl->ctl_flags;
784
785 /* Let the caller know the default send and receive sizes */
786 if (userkctl->ctl_sendsize == 0)
787 userkctl->ctl_sendsize = CTL_SENDSIZE;
788 kctl->sendbufsize = userkctl->ctl_sendsize;
789
790 if (userkctl->ctl_recvsize == 0)
791 userkctl->ctl_recvsize = CTL_RECVSIZE;
792 kctl->recvbufsize = userkctl->ctl_recvsize;
793
794 kctl->connect = userkctl->ctl_connect;
795 kctl->disconnect = userkctl->ctl_disconnect;
796 kctl->send = userkctl->ctl_send;
797 kctl->setopt = userkctl->ctl_setopt;
798 kctl->getopt = userkctl->ctl_getopt;
799
800 TAILQ_INIT(&kctl->kcb_head);
801
802 if (kctl_next)
803 TAILQ_INSERT_BEFORE(kctl_next, kctl, next);
804 else
805 TAILQ_INSERT_TAIL(&ctl_head, kctl, next);
806
807 lck_mtx_unlock(ctl_mtx);
808
809 *kctlref = kctl;
810
811 ctl_post_msg(KEV_CTL_REGISTERED, kctl->id);
812 return(0);
813 }
814
815 errno_t
816 ctl_deregister(void *kctlref)
817 {
818 struct kctl *kctl;
819
820 if (kctlref == NULL) /* sanity check */
821 return(EINVAL);
822
823 lck_mtx_lock(ctl_mtx);
824 TAILQ_FOREACH(kctl, &ctl_head, next) {
825 if (kctl == (struct kctl *)kctlref)
826 break;
827 }
828 if (kctl != (struct kctl *)kctlref) {
829 lck_mtx_unlock(ctl_mtx);
830 return EINVAL;
831 }
832 if (!TAILQ_EMPTY(&kctl->kcb_head)) {
833 lck_mtx_unlock(ctl_mtx);
834 return EBUSY;
835 }
836
837 TAILQ_REMOVE(&ctl_head, kctl, next);
838
839 lck_mtx_unlock(ctl_mtx);
840
841 ctl_post_msg(KEV_CTL_DEREGISTERED, kctl->id);
842 FREE(kctl, M_TEMP);
843 return(0);
844 }
845
846 /*
847 * Must be called with global ctl_mtx lock taked
848 */
849 static struct kctl *
850 ctl_find_by_name(const char *name)
851 {
852 struct kctl *kctl;
853
854 TAILQ_FOREACH(kctl, &ctl_head, next)
855 if (strncmp(kctl->name, name, sizeof(kctl->name)) == 0)
856 return kctl;
857
858 return NULL;
859 }
860
861 /*
862 * Must be called with global ctl_mtx lock taked
863 *
864 */
865 static struct kctl *
866 ctl_find_by_id_unit(u_int32_t id, u_int32_t unit)
867 {
868 struct kctl *kctl;
869
870 TAILQ_FOREACH(kctl, &ctl_head, next) {
871 if (kctl->id == id && (kctl->flags & CTL_FLAG_REG_ID_UNIT) == 0)
872 return kctl;
873 else if (kctl->id == id && kctl->reg_unit == unit)
874 return kctl;
875 }
876 return NULL;
877 }
878
879 /*
880 * Must be called with kernel controller lock taken
881 */
882 static struct ctl_cb *
883 kcb_find(struct kctl *kctl, u_int32_t unit)
884 {
885 struct ctl_cb *kcb;
886
887 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
888 if ((kcb->unit == unit))
889 return kcb;
890
891 return NULL;
892 }
893
894 /*
895 * Must be called witout lock
896 */
897 static void
898 ctl_post_msg(u_int32_t event_code, u_int32_t id)
899 {
900 struct ctl_event_data ctl_ev_data;
901 struct kev_msg ev_msg;
902
903 ev_msg.vendor_code = KEV_VENDOR_APPLE;
904
905 ev_msg.kev_class = KEV_SYSTEM_CLASS;
906 ev_msg.kev_subclass = KEV_CTL_SUBCLASS;
907 ev_msg.event_code = event_code;
908
909 /* common nke subclass data */
910 bzero(&ctl_ev_data, sizeof(ctl_ev_data));
911 ctl_ev_data.ctl_id = id;
912 ev_msg.dv[0].data_ptr = &ctl_ev_data;
913 ev_msg.dv[0].data_length = sizeof(ctl_ev_data);
914
915 ev_msg.dv[1].data_length = 0;
916
917 kev_post_msg(&ev_msg);
918 }
919
920 static int
921 ctl_lock(struct socket *so, int refcount, void *lr)
922 {
923 void *lr_saved;
924
925 if (lr == NULL)
926 lr_saved = __builtin_return_address(0);
927 else
928 lr_saved = lr;
929
930 if (so->so_pcb != NULL) {
931 lck_mtx_lock(((struct ctl_cb *)so->so_pcb)->mtx);
932 } else {
933 panic("ctl_lock: so=%p NO PCB! lr=%p lrh= %s\n",
934 so, lr_saved, solockhistory_nr(so));
935 /* NOTREACHED */
936 }
937
938 if (so->so_usecount < 0) {
939 panic("ctl_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s\n",
940 so, so->so_pcb, lr_saved, so->so_usecount, solockhistory_nr(so));
941 /* NOTREACHED */
942 }
943
944 if (refcount)
945 so->so_usecount++;
946
947 so->lock_lr[so->next_lock_lr] = lr_saved;
948 so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX;
949 return (0);
950 }
951
952 static int
953 ctl_unlock(struct socket *so, int refcount, void *lr)
954 {
955 void *lr_saved;
956 lck_mtx_t *mutex_held;
957
958 if (lr == NULL)
959 lr_saved = __builtin_return_address(0);
960 else
961 lr_saved = lr;
962
963 #ifdef MORE_KCTLLOCK_DEBUG
964 printf("ctl_unlock: so=%x sopcb=%x lock=%x ref=%x lr=%p\n",
965 so, so->so_pcb, ((struct ctl_cb *)so->so_pcb)->mtx,
966 so->so_usecount, lr_saved);
967 #endif
968 if (refcount)
969 so->so_usecount--;
970
971 if (so->so_usecount < 0) {
972 panic("ctl_unlock: so=%p usecount=%x lrh= %s\n",
973 so, so->so_usecount, solockhistory_nr(so));
974 /* NOTREACHED */
975 }
976 if (so->so_pcb == NULL) {
977 panic("ctl_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s\n",
978 so, so->so_usecount, (void *)lr_saved, solockhistory_nr(so));
979 /* NOTREACHED */
980 }
981 mutex_held = ((struct ctl_cb *)so->so_pcb)->mtx;
982
983 lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
984 so->unlock_lr[so->next_unlock_lr] = lr_saved;
985 so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
986 lck_mtx_unlock(mutex_held);
987
988 if (so->so_usecount == 0)
989 ctl_sofreelastref(so);
990
991 return (0);
992 }
993
994 static lck_mtx_t *
995 ctl_getlock(struct socket *so, __unused int locktype)
996 {
997 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
998
999 if (so->so_pcb) {
1000 if (so->so_usecount < 0)
1001 panic("ctl_getlock: so=%p usecount=%x lrh= %s\n",
1002 so, so->so_usecount, solockhistory_nr(so));
1003 return(kcb->mtx);
1004 } else {
1005 panic("ctl_getlock: so=%p NULL NO so_pcb %s\n",
1006 so, solockhistory_nr(so));
1007 return (so->so_proto->pr_domain->dom_mtx);
1008 }
1009 }