]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_control.c
daac86d46cc4b9c88e98f156ada71f7eb12afd2b
[apple/xnu.git] / bsd / kern / kern_control.c
1 /*
2 * Copyright (c) 1999-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 /*
24 * Kernel Control domain - allows control connections to
25 * and to read/write data.
26 *
27 * Vincent Lubet, 040506
28 * Christophe Allie, 010928
29 * Justin C. Walker, 990319
30 */
31
32 #include <sys/types.h>
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/syslog.h>
36 #include <sys/socket.h>
37 #include <sys/socketvar.h>
38 #include <sys/protosw.h>
39 #include <sys/domain.h>
40 #include <sys/malloc.h>
41 #include <sys/mbuf.h>
42 #include <sys/sys_domain.h>
43 #include <sys/kern_event.h>
44 #include <sys/kern_control.h>
45 #include <net/if_var.h>
46
47 #include <mach/vm_types.h>
48 #include <mach/kmod.h>
49
50 #include <kern/thread.h>
51
52 /*
53 * Definitions and vars for we support
54 */
55
56 #define CTL_SENDSIZE (2 * 1024) /* default buffer size */
57 #define CTL_RECVSIZE (8 * 1024) /* default buffer size */
58
59 /*
60 * Definitions and vars for we support
61 */
62
63 static u_int32_t ctl_last_id = 0;
64 static u_int32_t ctl_max = 256;
65 static u_int32_t ctl_maxunit = 65536;
66 static lck_grp_attr_t *ctl_lck_grp_attr = 0;
67 static lck_attr_t *ctl_lck_attr = 0;
68 static lck_grp_t *ctl_lck_grp = 0;
69 static lck_mtx_t *ctl_mtx;
70
71
72 /* all the controllers are chained */
73 TAILQ_HEAD(, kctl) ctl_head;
74
75 static int ctl_attach(struct socket *, int, struct proc *);
76 static int ctl_detach(struct socket *);
77 static int ctl_sofreelastref(struct socket *so);
78 static int ctl_connect(struct socket *, struct sockaddr *, struct proc *);
79 static int ctl_disconnect(struct socket *);
80 static int ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
81 struct ifnet *ifp, struct proc *p);
82 static int ctl_send(struct socket *, int, struct mbuf *,
83 struct sockaddr *, struct mbuf *, struct proc *);
84 static int ctl_ctloutput(struct socket *, struct sockopt *);
85 static int ctl_peeraddr(struct socket *so, struct sockaddr **nam);
86
87 static struct kctl *ctl_find_by_id(u_int32_t);
88 static struct kctl *ctl_find_by_name(const char *);
89 static struct kctl *ctl_find_by_id_unit(u_int32_t id, u_int32_t unit);
90
91 static struct ctl_cb *kcb_find(struct kctl *, u_int32_t unit);
92 static void ctl_post_msg(u_long event_code, u_int32_t id);
93
94 static int ctl_lock(struct socket *, int, int);
95 static int ctl_unlock(struct socket *, int, int);
96 static lck_mtx_t * ctl_getlock(struct socket *, int);
97
98 static struct pr_usrreqs ctl_usrreqs =
99 {
100 pru_abort_notsupp, pru_accept_notsupp, ctl_attach, pru_bind_notsupp,
101 ctl_connect, pru_connect2_notsupp, ctl_ioctl, ctl_detach,
102 ctl_disconnect, pru_listen_notsupp, ctl_peeraddr,
103 pru_rcvd_notsupp, pru_rcvoob_notsupp, ctl_send,
104 pru_sense_null, pru_shutdown_notsupp, pru_sockaddr_notsupp,
105 sosend, soreceive, pru_sopoll_notsupp
106 };
107
108 static struct protosw kctlswk_dgram =
109 {
110 SOCK_DGRAM, &systemdomain, SYSPROTO_CONTROL,
111 PR_ATOMIC|PR_CONNREQUIRED|PR_PCBLOCK,
112 NULL, NULL, NULL, ctl_ctloutput,
113 NULL, NULL,
114 NULL, NULL, NULL, NULL, &ctl_usrreqs,
115 ctl_lock, ctl_unlock, ctl_getlock, { 0, 0 } , 0, { 0 }
116 };
117
118 static struct protosw kctlswk_stream =
119 {
120 SOCK_STREAM, &systemdomain, SYSPROTO_CONTROL,
121 PR_CONNREQUIRED|PR_PCBLOCK,
122 NULL, NULL, NULL, ctl_ctloutput,
123 NULL, NULL,
124 NULL, NULL, NULL, NULL, &ctl_usrreqs,
125 ctl_lock, ctl_unlock, ctl_getlock, { 0, 0 } , 0, { 0 }
126 };
127
128
129 /*
130 * Install the protosw's for the Kernel Control manager.
131 */
132 __private_extern__ int
133 kern_control_init(void)
134 {
135 int error = 0;
136
137 ctl_lck_grp_attr = lck_grp_attr_alloc_init();
138 if (ctl_lck_grp_attr == 0) {
139 printf(": lck_grp_attr_alloc_init failed\n");
140 error = ENOMEM;
141 goto done;
142 }
143
144 ctl_lck_grp = lck_grp_alloc_init("Kernel Control Protocol", ctl_lck_grp_attr);
145 if (ctl_lck_grp == 0) {
146 printf("kern_control_init: lck_grp_alloc_init failed\n");
147 error = ENOMEM;
148 goto done;
149 }
150
151 ctl_lck_attr = lck_attr_alloc_init();
152 if (ctl_lck_attr == 0) {
153 printf("kern_control_init: lck_attr_alloc_init failed\n");
154 error = ENOMEM;
155 goto done;
156 }
157
158 ctl_mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
159 if (ctl_mtx == 0) {
160 printf("kern_control_init: lck_mtx_alloc_init failed\n");
161 error = ENOMEM;
162 goto done;
163 }
164 TAILQ_INIT(&ctl_head);
165
166 error = net_add_proto(&kctlswk_dgram, &systemdomain);
167 if (error) {
168 log(LOG_WARNING, "kern_control_init: net_add_proto dgram failed (%d)\n", error);
169 }
170 error = net_add_proto(&kctlswk_stream, &systemdomain);
171 if (error) {
172 log(LOG_WARNING, "kern_control_init: net_add_proto stream failed (%d)\n", error);
173 }
174
175 done:
176 if (error != 0) {
177 if (ctl_mtx) {
178 lck_mtx_free(ctl_mtx, ctl_lck_grp);
179 ctl_mtx = 0;
180 }
181 if (ctl_lck_grp) {
182 lck_grp_free(ctl_lck_grp);
183 ctl_lck_grp = 0;
184 }
185 if (ctl_lck_grp_attr) {
186 lck_grp_attr_free(ctl_lck_grp_attr);
187 ctl_lck_grp_attr = 0;
188 }
189 if (ctl_lck_attr) {
190 lck_attr_free(ctl_lck_attr);
191 ctl_lck_attr = 0;
192 }
193 }
194 return error;
195 }
196
197 static void
198 kcb_delete(struct ctl_cb *kcb)
199 {
200 if (kcb != 0) {
201 if (kcb->mtx != 0)
202 lck_mtx_free(kcb->mtx, ctl_lck_grp);
203 FREE(kcb, M_TEMP);
204 }
205 }
206
207
208 /*
209 * Kernel Controller user-request functions
210 * attach function must exist and succeed
211 * detach not necessary
212 * we need a pcb for the per socket mutex
213 */
214 static int
215 ctl_attach(__unused struct socket *so, __unused int proto, __unused struct proc *p)
216 {
217 int error = 0;
218 struct ctl_cb *kcb = 0;
219
220 MALLOC(kcb, struct ctl_cb *, sizeof(struct ctl_cb), M_TEMP, M_WAITOK);
221 if (kcb == NULL) {
222 error = ENOMEM;
223 goto quit;
224 }
225 bzero(kcb, sizeof(struct ctl_cb));
226
227 kcb->mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
228 if (kcb->mtx == NULL) {
229 error = ENOMEM;
230 goto quit;
231 }
232 kcb->so = so;
233 so->so_pcb = (caddr_t)kcb;
234
235 quit:
236 if (error != 0) {
237 kcb_delete(kcb);
238 kcb = 0;
239 }
240 return error;
241 }
242
243 static int
244 ctl_sofreelastref(struct socket *so)
245 {
246 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
247
248 so->so_pcb = 0;
249
250 if (kcb != 0) {
251 struct kctl *kctl;
252 if ((kctl = kcb->kctl) != 0) {
253 lck_mtx_lock(ctl_mtx);
254 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
255 lck_mtx_lock(ctl_mtx);
256 }
257 kcb_delete(kcb);
258 }
259 return 0;
260 }
261
262 static int
263 ctl_detach(struct socket *so)
264 {
265 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
266
267 if (kcb == 0)
268 return 0;
269
270 soisdisconnected(so);
271 so->so_flags |= SOF_PCBCLEARING;
272 return 0;
273 }
274
275
276 static int
277 ctl_connect(struct socket *so, struct sockaddr *nam, __unused struct proc *p)
278 {
279 struct kctl *kctl;
280 int error = 0;
281 struct sockaddr_ctl sa;
282 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
283
284 if (kcb == 0)
285 panic("ctl_connect so_pcb null\n");
286
287 if (nam->sa_len != sizeof(struct sockaddr_ctl))
288 return(EINVAL);
289
290 bcopy(nam, &sa, sizeof(struct sockaddr_ctl));
291
292 lck_mtx_lock(ctl_mtx);
293 kctl = ctl_find_by_id_unit(sa.sc_id, sa.sc_unit);
294 if (kctl == NULL) {
295 lck_mtx_unlock(ctl_mtx);
296 return ENOENT;
297 }
298
299 if (((kctl->flags & CTL_FLAG_REG_SOCK_STREAM) && (so->so_type != SOCK_STREAM)) ||
300 (!(kctl->flags & CTL_FLAG_REG_SOCK_STREAM) && (so->so_type != SOCK_DGRAM))) {
301 lck_mtx_unlock(ctl_mtx);
302 return EPROTOTYPE;
303 }
304
305 if (kctl->flags & CTL_FLAG_PRIVILEGED) {
306 if (p == 0) {
307 lck_mtx_unlock(ctl_mtx);
308 return(EINVAL);
309 }
310 if ((error = proc_suser(p))) {
311 lck_mtx_unlock(ctl_mtx);
312 return error;
313 }
314 }
315
316 if ((kctl->flags & CTL_FLAG_REG_ID_UNIT) || sa.sc_unit != 0) {
317 if (kcb_find(kctl, sa.sc_unit) != NULL) {
318 lck_mtx_unlock(ctl_mtx);
319 return EBUSY;
320 }
321 } else {
322 u_int32_t unit = kctl->lastunit + 1;
323
324 while (1) {
325 if (unit == ctl_maxunit)
326 unit = 1;
327 if (kcb_find(kctl, unit) == NULL) {
328 kctl->lastunit = sa.sc_unit = unit;
329 break;
330 }
331 if (unit++ == kctl->lastunit) {
332 lck_mtx_unlock(ctl_mtx);
333 return EBUSY;
334 }
335 }
336 }
337
338 kcb->unit = sa.sc_unit;
339 kcb->kctl = kctl;
340 TAILQ_INSERT_TAIL(&kctl->kcb_head, kcb, next);
341 lck_mtx_unlock(ctl_mtx);
342
343 error = soreserve(so, kctl->sendbufsize, kctl->recvbufsize);
344 if (error)
345 goto done;
346 soisconnecting(so);
347
348 socket_unlock(so, 0);
349 error = (*kctl->connect)(kctl, &sa, &kcb->userdata);
350 socket_lock(so, 0);
351 if (error)
352 goto done;
353
354 soisconnected(so);
355
356 done:
357 if (error) {
358 soisdisconnected(so);
359 lck_mtx_lock(ctl_mtx);
360 kcb->kctl = 0;
361 kcb->unit = 0;
362 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
363 lck_mtx_unlock(ctl_mtx);
364 }
365 return error;
366 }
367
368 static int
369 ctl_disconnect(struct socket *so)
370 {
371 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
372
373 if ((kcb = (struct ctl_cb *)so->so_pcb)) {
374 struct kctl *kctl = kcb->kctl;
375
376 if (kctl && kctl->disconnect) {
377 socket_unlock(so, 0);
378 (*kctl->disconnect)(kctl, kcb->unit, kcb->userdata);
379 socket_lock(so, 0);
380 }
381 lck_mtx_lock(ctl_mtx);
382 kcb->kctl = 0;
383 kcb->unit = 0;
384 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
385 soisdisconnected(so);
386 lck_mtx_unlock(ctl_mtx);
387 }
388 return 0;
389 }
390
391 static int
392 ctl_peeraddr(struct socket *so, struct sockaddr **nam)
393 {
394 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
395 struct kctl *kctl;
396 struct sockaddr_ctl sc;
397
398 if (kcb == NULL) /* sanity check */
399 return(ENOTCONN);
400
401 if ((kctl = kcb->kctl) == NULL)
402 return(EINVAL);
403
404 bzero(&sc, sizeof(struct sockaddr_ctl));
405 sc.sc_len = sizeof(struct sockaddr_ctl);
406 sc.sc_family = AF_SYSTEM;
407 sc.ss_sysaddr = AF_SYS_CONTROL;
408 sc.sc_id = kctl->id;
409 sc.sc_unit = kcb->unit;
410
411 *nam = dup_sockaddr((struct sockaddr *)&sc, 1);
412
413 return 0;
414 }
415
416 static int
417 ctl_send(struct socket *so, int flags, struct mbuf *m,
418 __unused struct sockaddr *addr, __unused struct mbuf *control,
419 __unused struct proc *p)
420 {
421 int error = 0;
422 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
423 struct kctl *kctl;
424
425 if (kcb == NULL) /* sanity check */
426 return(ENOTCONN);
427
428 if ((kctl = kcb->kctl) == NULL)
429 return(EINVAL);
430
431 if (kctl->send) {
432 socket_unlock(so, 0);
433 error = (*kctl->send)(kctl, kcb->unit, kcb->userdata, m, flags);
434 socket_lock(so, 0);
435 }
436 return error;
437 }
438
439 errno_t
440 ctl_enqueuembuf(void *kctlref, u_int32_t unit, struct mbuf *m, u_int32_t flags)
441 {
442 struct ctl_cb *kcb;
443 struct socket *so;
444 errno_t error = 0;
445 struct kctl *kctl = (struct kctl *)kctlref;
446
447 if (kctl == NULL)
448 return EINVAL;
449
450 kcb = kcb_find(kctl, unit);
451 if (kcb == NULL)
452 return EINVAL;
453
454 so = (struct socket *)kcb->so;
455 if (so == NULL)
456 return EINVAL;
457
458 socket_lock(so, 1);
459 if (sbspace(&so->so_rcv) < m->m_pkthdr.len) {
460 error = ENOBUFS;
461 goto bye;
462 }
463 if ((flags & CTL_DATA_EOR))
464 m->m_flags |= M_EOR;
465 if (sbappend(&so->so_rcv, m) && (flags & CTL_DATA_NOWAKEUP) == 0)
466 sorwakeup(so);
467 bye:
468 socket_unlock(so, 1);
469 return error;
470 }
471
472 errno_t
473 ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len, u_int32_t flags)
474 {
475 struct ctl_cb *kcb;
476 struct socket *so;
477 struct mbuf *m;
478 errno_t error = 0;
479 struct kctl *kctl = (struct kctl *)kctlref;
480 unsigned int num_needed;
481 struct mbuf *n;
482 size_t curlen = 0;
483
484 if (kctlref == NULL)
485 return EINVAL;
486
487 kcb = kcb_find(kctl, unit);
488 if (kcb == NULL)
489 return EINVAL;
490
491 so = (struct socket *)kcb->so;
492 if (so == NULL)
493 return EINVAL;
494
495 socket_lock(so, 1);
496 if ((size_t)sbspace(&so->so_rcv) < len) {
497 error = ENOBUFS;
498 goto bye;
499 }
500
501 num_needed = 1;
502 m = m_allocpacket_internal(&num_needed, len, NULL, M_NOWAIT, 1, 0);
503 if (m == NULL) {
504 printf("ctl_enqueuedata: m_allocpacket_internal(%lu) failed\n", len);
505 error = ENOBUFS;
506 goto bye;
507 }
508
509 for (n = m; n != NULL; n = n->m_next) {
510 size_t mlen = mbuf_maxlen(n);
511
512 if (mlen + curlen > len)
513 mlen = len - curlen;
514 n->m_len = mlen;
515 bcopy((char *)data + curlen, n->m_data, mlen);
516 curlen += mlen;
517 }
518 mbuf_pkthdr_setlen(m, curlen);
519
520 if ((flags & CTL_DATA_EOR))
521 m->m_flags |= M_EOR;
522 if (sbappend(&so->so_rcv, m) && (flags & CTL_DATA_NOWAKEUP) == 0)
523 sorwakeup(so);
524 bye:
525 socket_unlock(so, 1);
526 return error;
527 }
528
529
530 errno_t
531 ctl_getenqueuespace(kern_ctl_ref kctlref, u_int32_t unit, size_t *space)
532 {
533 struct ctl_cb *kcb;
534 struct kctl *kctl = (struct kctl *)kctlref;
535 struct socket *so;
536
537 if (kctlref == NULL || space == NULL)
538 return EINVAL;
539
540 kcb = kcb_find(kctl, unit);
541 if (kcb == NULL)
542 return EINVAL;
543
544 so = (struct socket *)kcb->so;
545 if (so == NULL)
546 return EINVAL;
547
548 socket_lock(so, 1);
549 *space = sbspace(&so->so_rcv);
550 socket_unlock(so, 1);
551
552 return 0;
553 }
554
555 static int
556 ctl_ctloutput(struct socket *so, struct sockopt *sopt)
557 {
558 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
559 struct kctl *kctl;
560 int error = 0;
561 void *data;
562 size_t len;
563
564 if (sopt->sopt_level != SYSPROTO_CONTROL) {
565 return(EINVAL);
566 }
567
568 if (kcb == NULL) /* sanity check */
569 return(ENOTCONN);
570
571 if ((kctl = kcb->kctl) == NULL)
572 return(EINVAL);
573
574 switch (sopt->sopt_dir) {
575 case SOPT_SET:
576 if (kctl->setopt == NULL)
577 return(ENOTSUP);
578 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, M_WAITOK);
579 if (data == NULL)
580 return(ENOMEM);
581 error = sooptcopyin(sopt, data, sopt->sopt_valsize, sopt->sopt_valsize);
582 if (error == 0) {
583 socket_unlock(so, 0);
584 error = (*kctl->setopt)(kcb->kctl, kcb->unit, kcb->userdata, sopt->sopt_name,
585 data, sopt->sopt_valsize);
586 socket_lock(so, 0);
587 }
588 FREE(data, M_TEMP);
589 break;
590
591 case SOPT_GET:
592 if (kctl->getopt == NULL)
593 return(ENOTSUP);
594 data = NULL;
595 if (sopt->sopt_valsize && sopt->sopt_val) {
596 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, M_WAITOK);
597 if (data == NULL)
598 return(ENOMEM);
599 /* 4108337 - copy in data for get socket option */
600 error = sooptcopyin(sopt, data, sopt->sopt_valsize, sopt->sopt_valsize);
601 }
602 len = sopt->sopt_valsize;
603 socket_unlock(so, 0);
604 error = (*kctl->getopt)(kcb->kctl, kcb->unit, kcb->userdata, sopt->sopt_name,
605 data, &len);
606 socket_lock(so, 0);
607 if (error == 0) {
608 if (data != NULL)
609 error = sooptcopyout(sopt, data, len);
610 else
611 sopt->sopt_valsize = len;
612 }
613 if (data != NULL)
614 FREE(data, M_TEMP);
615 break;
616 }
617 return error;
618 }
619
620 static int
621 ctl_ioctl(__unused struct socket *so, u_long cmd, caddr_t data,
622 __unused struct ifnet *ifp, __unused struct proc *p)
623 {
624 int error = ENOTSUP;
625
626 switch (cmd) {
627 /* get the number of controllers */
628 case CTLIOCGCOUNT: {
629 struct kctl *kctl;
630 int n = 0;
631
632 lck_mtx_lock(ctl_mtx);
633 TAILQ_FOREACH(kctl, &ctl_head, next)
634 n++;
635 lck_mtx_unlock(ctl_mtx);
636
637 *(u_int32_t *)data = n;
638 error = 0;
639 break;
640 }
641 case CTLIOCGINFO: {
642 struct ctl_info *ctl_info = (struct ctl_info *)data;
643 struct kctl *kctl = 0;
644 size_t name_len = strlen(ctl_info->ctl_name);
645
646 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
647 error = EINVAL;
648 break;
649 }
650 lck_mtx_lock(ctl_mtx);
651 kctl = ctl_find_by_name(ctl_info->ctl_name);
652 lck_mtx_unlock(ctl_mtx);
653 if (kctl == 0) {
654 error = ENOENT;
655 break;
656 }
657 ctl_info->ctl_id = kctl->id;
658 error = 0;
659 break;
660 }
661
662 /* add controls to get list of NKEs */
663
664 }
665
666 return error;
667 }
668
669 /*
670 * Register/unregister a NKE
671 */
672 errno_t
673 ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref)
674 {
675 struct kctl *kctl = 0;
676 u_int32_t id = -1;
677 u_int32_t n;
678 size_t name_len;
679
680 if (userkctl == NULL) /* sanity check */
681 return(EINVAL);
682 if (userkctl->ctl_connect == NULL)
683 return(EINVAL);
684 name_len = strlen(userkctl->ctl_name);
685 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME)
686 return(EINVAL);
687
688 MALLOC(kctl, struct kctl *, sizeof(*kctl), M_TEMP, M_WAITOK);
689 if (kctl == NULL)
690 return(ENOMEM);
691 bzero((char *)kctl, sizeof(*kctl));
692
693 lck_mtx_lock(ctl_mtx);
694
695 if ((userkctl->ctl_flags & CTL_FLAG_REG_ID_UNIT) == 0) {
696 if (ctl_find_by_name(userkctl->ctl_name) != NULL) {
697 lck_mtx_unlock(ctl_mtx);
698 FREE(kctl, M_TEMP);
699 return(EEXIST);
700 }
701 for (n = 0, id = ctl_last_id + 1; n < ctl_max; id++, n++) {
702 if (id == 0) {
703 n--;
704 continue;
705 }
706 if (ctl_find_by_id(id) == 0)
707 break;
708 }
709 if (id == ctl_max) {
710 lck_mtx_unlock(ctl_mtx);
711 FREE(kctl, M_TEMP);
712 return(ENOBUFS);
713 }
714 userkctl->ctl_id =id;
715 kctl->id = id;
716 kctl->reg_unit = -1;
717 } else {
718 if (ctl_find_by_id_unit(userkctl->ctl_id, userkctl->ctl_unit) != NULL) {
719 lck_mtx_unlock(ctl_mtx);
720 FREE(kctl, M_TEMP);
721 return(EEXIST);
722 }
723 kctl->id = userkctl->ctl_id;
724 kctl->reg_unit = userkctl->ctl_unit;
725 }
726 strcpy(kctl->name, userkctl->ctl_name);
727 kctl->flags = userkctl->ctl_flags;
728
729 /* Let the caller know the default send and receive sizes */
730 if (userkctl->ctl_sendsize == 0)
731 userkctl->ctl_sendsize = CTL_SENDSIZE;
732 kctl->sendbufsize = userkctl->ctl_sendsize;
733
734 if (userkctl->ctl_recvsize == 0)
735 userkctl->ctl_recvsize = CTL_RECVSIZE;
736 kctl->recvbufsize = userkctl->ctl_recvsize;
737
738 kctl->connect = userkctl->ctl_connect;
739 kctl->disconnect = userkctl->ctl_disconnect;
740 kctl->send = userkctl->ctl_send;
741 kctl->setopt = userkctl->ctl_setopt;
742 kctl->getopt = userkctl->ctl_getopt;
743
744 TAILQ_INIT(&kctl->kcb_head);
745
746 TAILQ_INSERT_TAIL(&ctl_head, kctl, next);
747 ctl_max++;
748
749 lck_mtx_unlock(ctl_mtx);
750
751 *kctlref = kctl;
752
753 ctl_post_msg(KEV_CTL_REGISTERED, kctl->id);
754 return(0);
755 }
756
757 errno_t
758 ctl_deregister(void *kctlref)
759 {
760 struct kctl *kctl;
761
762 if (kctlref == NULL) /* sanity check */
763 return(EINVAL);
764
765 lck_mtx_lock(ctl_mtx);
766 TAILQ_FOREACH(kctl, &ctl_head, next) {
767 if (kctl == (struct kctl *)kctlref)
768 break;
769 }
770 if (kctl != (struct kctl *)kctlref) {
771 lck_mtx_unlock(ctl_mtx);
772 return EINVAL;
773 }
774 if (!TAILQ_EMPTY(&kctl->kcb_head)) {
775 lck_mtx_unlock(ctl_mtx);
776 return EBUSY;
777 }
778
779 TAILQ_REMOVE(&ctl_head, kctl, next);
780 ctl_max--;
781
782 lck_mtx_unlock(ctl_mtx);
783
784 ctl_post_msg(KEV_CTL_DEREGISTERED, kctl->id);
785 FREE(kctl, M_TEMP);
786 return(0);
787 }
788
789 /*
790 * Must be called with global lock taked
791 */
792 static struct kctl *
793 ctl_find_by_id(u_int32_t id)
794 {
795 struct kctl *kctl;
796
797 TAILQ_FOREACH(kctl, &ctl_head, next)
798 if (kctl->id == id)
799 return kctl;
800
801 return NULL;
802 }
803
804 /*
805 * Must be called with global ctl_mtx lock taked
806 */
807 static struct kctl *
808 ctl_find_by_name(const char *name)
809 {
810 struct kctl *kctl;
811
812 TAILQ_FOREACH(kctl, &ctl_head, next)
813 if (strcmp(kctl->name, name) == 0)
814 return kctl;
815
816 return NULL;
817 }
818
819 /*
820 * Must be called with global ctl_mtx lock taked
821 *
822 */
823 static struct kctl *
824 ctl_find_by_id_unit(u_int32_t id, u_int32_t unit)
825 {
826 struct kctl *kctl;
827
828 TAILQ_FOREACH(kctl, &ctl_head, next) {
829 if (kctl->id == id && (kctl->flags & CTL_FLAG_REG_ID_UNIT) == 0)
830 return kctl;
831 else if (kctl->id == id && kctl->reg_unit == unit)
832 return kctl;
833 }
834 return NULL;
835 }
836
837 /*
838 * Must be called with kernel controller lock taken
839 */
840 static struct ctl_cb *
841 kcb_find(struct kctl *kctl, u_int32_t unit)
842 {
843 struct ctl_cb *kcb;
844
845 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
846 if ((kcb->unit == unit))
847 return kcb;
848
849 return NULL;
850 }
851
852 /*
853 * Must be called witout lock
854 */
855 static void
856 ctl_post_msg(u_long event_code, u_int32_t id)
857 {
858 struct ctl_event_data ctl_ev_data;
859 struct kev_msg ev_msg;
860
861 ev_msg.vendor_code = KEV_VENDOR_APPLE;
862
863 ev_msg.kev_class = KEV_SYSTEM_CLASS;
864 ev_msg.kev_subclass = KEV_CTL_SUBCLASS;
865 ev_msg.event_code = event_code;
866
867 /* common nke subclass data */
868 bzero(&ctl_ev_data, sizeof(ctl_ev_data));
869 ctl_ev_data.ctl_id = id;
870 ev_msg.dv[0].data_ptr = &ctl_ev_data;
871 ev_msg.dv[0].data_length = sizeof(ctl_ev_data);
872
873 ev_msg.dv[1].data_length = 0;
874
875 kev_post_msg(&ev_msg);
876 }
877
878 static int
879 ctl_lock(struct socket *so, int refcount, int lr)
880 {
881 int lr_saved;
882 if (lr == 0)
883 lr_saved = (unsigned int) __builtin_return_address(0);
884 else lr_saved = lr;
885
886 if (so->so_pcb) {
887 lck_mtx_lock(((struct ctl_cb *)so->so_pcb)->mtx);
888 } else {
889 panic("ctl_lock: so=%x NO PCB! lr=%x\n", so, lr_saved);
890 lck_mtx_lock(so->so_proto->pr_domain->dom_mtx);
891 }
892
893 if (so->so_usecount < 0)
894 panic("ctl_lock: so=%x so_pcb=%x lr=%x ref=%x\n",
895 so, so->so_pcb, lr_saved, so->so_usecount);
896
897 if (refcount)
898 so->so_usecount++;
899
900 so->lock_lr[so->next_lock_lr] = (void *)lr_saved;
901 so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX;
902 return (0);
903 }
904
905 static int
906 ctl_unlock(struct socket *so, int refcount, int lr)
907 {
908 int lr_saved;
909 lck_mtx_t * mutex_held;
910
911 if (lr == 0)
912 lr_saved = (unsigned int) __builtin_return_address(0);
913 else lr_saved = lr;
914
915 #ifdef MORE_KCTLLOCK_DEBUG
916 printf("ctl_unlock: so=%x sopcb=%x lock=%x ref=%x lr=%x\n",
917 so, so->so_pcb, ((struct ctl_cb *)so->so_pcb)->mtx, so->so_usecount, lr_saved);
918 #endif
919 if (refcount)
920 so->so_usecount--;
921
922 if (so->so_usecount < 0)
923 panic("ctl_unlock: so=%x usecount=%x\n", so, so->so_usecount);
924 if (so->so_pcb == NULL) {
925 panic("ctl_unlock: so=%x NO PCB usecount=%x lr=%x\n", so, so->so_usecount, lr_saved);
926 mutex_held = so->so_proto->pr_domain->dom_mtx;
927 } else {
928 mutex_held = ((struct ctl_cb *)so->so_pcb)->mtx;
929 }
930 lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
931 so->unlock_lr[so->next_unlock_lr] = (void *)lr_saved;
932 so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
933 lck_mtx_unlock(mutex_held);
934
935 if (so->so_usecount == 0)
936 ctl_sofreelastref(so);
937
938 return (0);
939 }
940
941 static lck_mtx_t *
942 ctl_getlock(struct socket *so, __unused int locktype)
943 {
944 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
945
946 if (so->so_pcb) {
947 if (so->so_usecount < 0)
948 panic("ctl_getlock: so=%x usecount=%x\n", so, so->so_usecount);
949 return(kcb->mtx);
950 } else {
951 panic("ctl_getlock: so=%x NULL so_pcb\n", so);
952 return (so->so_proto->pr_domain->dom_mtx);
953 }
954 }