]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_control.c
xnu-792.6.56.tar.gz
[apple/xnu.git] / bsd / kern / kern_control.c
1 /*
2 * Copyright (c) 1999-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 /*
25 * Kernel Control domain - allows control connections to
26 * and to read/write data.
27 *
28 * Vincent Lubet, 040506
29 * Christophe Allie, 010928
30 * Justin C. Walker, 990319
31 */
32
33 #include <sys/types.h>
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/syslog.h>
37 #include <sys/socket.h>
38 #include <sys/socketvar.h>
39 #include <sys/protosw.h>
40 #include <sys/domain.h>
41 #include <sys/malloc.h>
42 #include <sys/mbuf.h>
43 #include <sys/sys_domain.h>
44 #include <sys/kern_event.h>
45 #include <sys/kern_control.h>
46 #include <net/if_var.h>
47
48 #include <mach/vm_types.h>
49 #include <mach/kmod.h>
50
51 #include <kern/thread.h>
52
53 /*
54 * Definitions and vars for we support
55 */
56
57 #define CTL_SENDSIZE (2 * 1024) /* default buffer size */
58 #define CTL_RECVSIZE (8 * 1024) /* default buffer size */
59
60 /*
61 * Definitions and vars for we support
62 */
63
64 static u_int32_t ctl_last_id = 0;
65 static u_int32_t ctl_max = 256;
66 static u_int32_t ctl_maxunit = 65536;
67 static lck_grp_attr_t *ctl_lck_grp_attr = 0;
68 static lck_attr_t *ctl_lck_attr = 0;
69 static lck_grp_t *ctl_lck_grp = 0;
70 static lck_mtx_t *ctl_mtx;
71
72 /*
73 * internal structure maintained for each register controller
74 */
75
76 struct ctl_cb;
77
78 struct kctl
79 {
80 TAILQ_ENTRY(kctl) next; /* controller chain */
81
82 /* controller information provided when registering */
83 char name[MAX_KCTL_NAME]; /* unique nke identifier, provided by DTS */
84 u_int32_t id;
85 u_int32_t reg_unit;
86
87 /* misc communication information */
88 u_int32_t flags; /* support flags */
89 u_int32_t recvbufsize; /* request more than the default buffer size */
90 u_int32_t sendbufsize; /* request more than the default buffer size */
91
92 /* Dispatch functions */
93 ctl_connect_func connect; /* Make contact */
94 ctl_disconnect_func disconnect; /* Break contact */
95 ctl_send_func send; /* Send data to nke */
96 ctl_setopt_func setopt; /* set kctl configuration */
97 ctl_getopt_func getopt; /* get kctl configuration */
98
99 TAILQ_HEAD(, ctl_cb) kcb_head;
100 u_int32_t lastunit;
101 };
102
103 struct ctl_cb {
104 TAILQ_ENTRY(ctl_cb) next; /* controller chain */
105 lck_mtx_t *mtx;
106 struct socket *so; /* controlling socket */
107 struct kctl *kctl; /* back pointer to controller */
108 u_int32_t unit;
109 void *userdata;
110 };
111
112 /* all the controllers are chained */
113 TAILQ_HEAD(, kctl) ctl_head;
114
115 static int ctl_attach(struct socket *, int, struct proc *);
116 static int ctl_detach(struct socket *);
117 static int ctl_sofreelastref(struct socket *so);
118 static int ctl_connect(struct socket *, struct sockaddr *, struct proc *);
119 static int ctl_disconnect(struct socket *);
120 static int ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
121 struct ifnet *ifp, struct proc *p);
122 static int ctl_send(struct socket *, int, struct mbuf *,
123 struct sockaddr *, struct mbuf *, struct proc *);
124 static int ctl_ctloutput(struct socket *, struct sockopt *);
125 static int ctl_peeraddr(struct socket *so, struct sockaddr **nam);
126
127 static struct kctl *ctl_find_by_id(u_int32_t);
128 static struct kctl *ctl_find_by_name(const char *);
129 static struct kctl *ctl_find_by_id_unit(u_int32_t id, u_int32_t unit);
130
131 static struct ctl_cb *kcb_find(struct kctl *, u_int32_t unit);
132 static void ctl_post_msg(u_long event_code, u_int32_t id);
133
134 static int ctl_lock(struct socket *, int, int);
135 static int ctl_unlock(struct socket *, int, int);
136 static lck_mtx_t * ctl_getlock(struct socket *, int);
137
138 static struct pr_usrreqs ctl_usrreqs =
139 {
140 pru_abort_notsupp, pru_accept_notsupp, ctl_attach, pru_bind_notsupp,
141 ctl_connect, pru_connect2_notsupp, ctl_ioctl, ctl_detach,
142 ctl_disconnect, pru_listen_notsupp, ctl_peeraddr,
143 pru_rcvd_notsupp, pru_rcvoob_notsupp, ctl_send,
144 pru_sense_null, pru_shutdown_notsupp, pru_sockaddr_notsupp,
145 sosend, soreceive, pru_sopoll_notsupp
146 };
147
148 static struct protosw kctlswk_dgram =
149 {
150 SOCK_DGRAM, &systemdomain, SYSPROTO_CONTROL,
151 PR_ATOMIC|PR_CONNREQUIRED|PR_PCBLOCK,
152 NULL, NULL, NULL, ctl_ctloutput,
153 NULL, NULL,
154 NULL, NULL, NULL, NULL, &ctl_usrreqs,
155 ctl_lock, ctl_unlock, ctl_getlock, { 0, 0 } , 0, { 0 }
156 };
157
158 static struct protosw kctlswk_stream =
159 {
160 SOCK_STREAM, &systemdomain, SYSPROTO_CONTROL,
161 PR_CONNREQUIRED|PR_PCBLOCK,
162 NULL, NULL, NULL, ctl_ctloutput,
163 NULL, NULL,
164 NULL, NULL, NULL, NULL, &ctl_usrreqs,
165 ctl_lock, ctl_unlock, ctl_getlock, { 0, 0 } , 0, { 0 }
166 };
167
168
169 /*
170 * Install the protosw's for the Kernel Control manager.
171 */
172 __private_extern__ int
173 kern_control_init(void)
174 {
175 int error = 0;
176
177 ctl_lck_grp_attr = lck_grp_attr_alloc_init();
178 if (ctl_lck_grp_attr == 0) {
179 printf(": lck_grp_attr_alloc_init failed\n");
180 error = ENOMEM;
181 goto done;
182 }
183 lck_grp_attr_setdefault(ctl_lck_grp_attr);
184
185 ctl_lck_grp = lck_grp_alloc_init("Kernel Control Protocol", ctl_lck_grp_attr);
186 if (ctl_lck_grp == 0) {
187 printf("kern_control_init: lck_grp_alloc_init failed\n");
188 error = ENOMEM;
189 goto done;
190 }
191
192 ctl_lck_attr = lck_attr_alloc_init();
193 if (ctl_lck_attr == 0) {
194 printf("kern_control_init: lck_attr_alloc_init failed\n");
195 error = ENOMEM;
196 goto done;
197 }
198 lck_attr_setdefault(ctl_lck_attr);
199
200 ctl_mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
201 if (ctl_mtx == 0) {
202 printf("kern_control_init: lck_mtx_alloc_init failed\n");
203 error = ENOMEM;
204 goto done;
205 }
206 TAILQ_INIT(&ctl_head);
207
208 error = net_add_proto(&kctlswk_dgram, &systemdomain);
209 if (error) {
210 log(LOG_WARNING, "kern_control_init: net_add_proto dgram failed (%d)\n", error);
211 }
212 error = net_add_proto(&kctlswk_stream, &systemdomain);
213 if (error) {
214 log(LOG_WARNING, "kern_control_init: net_add_proto stream failed (%d)\n", error);
215 }
216
217 done:
218 if (error != 0) {
219 if (ctl_mtx) {
220 lck_mtx_free(ctl_mtx, ctl_lck_grp);
221 ctl_mtx = 0;
222 }
223 if (ctl_lck_grp) {
224 lck_grp_free(ctl_lck_grp);
225 ctl_lck_grp = 0;
226 }
227 if (ctl_lck_grp_attr) {
228 lck_grp_attr_free(ctl_lck_grp_attr);
229 ctl_lck_grp_attr = 0;
230 }
231 if (ctl_lck_attr) {
232 lck_attr_free(ctl_lck_attr);
233 ctl_lck_attr = 0;
234 }
235 }
236 return error;
237 }
238
239 static void
240 kcb_delete(struct ctl_cb *kcb)
241 {
242 if (kcb != 0) {
243 if (kcb->mtx != 0)
244 lck_mtx_free(kcb->mtx, ctl_lck_grp);
245 FREE(kcb, M_TEMP);
246 }
247 }
248
249
250 /*
251 * Kernel Controller user-request functions
252 * attach function must exist and succeed
253 * detach not necessary
254 * we need a pcb for the per socket mutex
255 */
256 static int
257 ctl_attach(__unused struct socket *so, __unused int proto, __unused struct proc *p)
258 {
259 int error = 0;
260 struct ctl_cb *kcb = 0;
261
262 MALLOC(kcb, struct ctl_cb *, sizeof(struct ctl_cb), M_TEMP, M_WAITOK);
263 if (kcb == NULL) {
264 error = ENOMEM;
265 goto quit;
266 }
267 bzero(kcb, sizeof(struct ctl_cb));
268
269 kcb->mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
270 if (kcb->mtx == NULL) {
271 error = ENOMEM;
272 goto quit;
273 }
274 kcb->so = so;
275 so->so_pcb = (caddr_t)kcb;
276
277 quit:
278 if (error != 0) {
279 kcb_delete(kcb);
280 kcb = 0;
281 }
282 return error;
283 }
284
285 static int
286 ctl_sofreelastref(struct socket *so)
287 {
288 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
289
290 so->so_pcb = 0;
291
292 if (kcb != 0) {
293 struct kctl *kctl;
294 if ((kctl = kcb->kctl) != 0) {
295 lck_mtx_lock(ctl_mtx);
296 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
297 lck_mtx_lock(ctl_mtx);
298 }
299 kcb_delete(kcb);
300 }
301 return 0;
302 }
303
304 static int
305 ctl_detach(struct socket *so)
306 {
307 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
308
309 if (kcb == 0)
310 return 0;
311
312 soisdisconnected(so);
313 so->so_flags |= SOF_PCBCLEARING;
314 return 0;
315 }
316
317
318 static int
319 ctl_connect(struct socket *so, struct sockaddr *nam, __unused struct proc *p)
320 {
321 struct kctl *kctl;
322 int error = 0;
323 struct sockaddr_ctl sa;
324 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
325
326 if (kcb == 0)
327 panic("ctl_connect so_pcb null\n");
328
329 if (nam->sa_len != sizeof(struct sockaddr_ctl))
330 return(EINVAL);
331
332 bcopy(nam, &sa, sizeof(struct sockaddr_ctl));
333
334 lck_mtx_lock(ctl_mtx);
335 kctl = ctl_find_by_id_unit(sa.sc_id, sa.sc_unit);
336 if (kctl == NULL) {
337 lck_mtx_unlock(ctl_mtx);
338 return ENOENT;
339 }
340
341 if (((kctl->flags & CTL_FLAG_REG_SOCK_STREAM) && (so->so_type != SOCK_STREAM)) ||
342 (!(kctl->flags & CTL_FLAG_REG_SOCK_STREAM) && (so->so_type != SOCK_DGRAM))) {
343 lck_mtx_unlock(ctl_mtx);
344 return EPROTOTYPE;
345 }
346
347 if (kctl->flags & CTL_FLAG_PRIVILEGED) {
348 if (p == 0) {
349 lck_mtx_unlock(ctl_mtx);
350 return(EINVAL);
351 }
352 if ((error = proc_suser(p))) {
353 lck_mtx_unlock(ctl_mtx);
354 return error;
355 }
356 }
357
358 if ((kctl->flags & CTL_FLAG_REG_ID_UNIT) || sa.sc_unit != 0) {
359 if (kcb_find(kctl, sa.sc_unit) != NULL) {
360 lck_mtx_unlock(ctl_mtx);
361 return EBUSY;
362 }
363 } else {
364 u_int32_t unit = kctl->lastunit + 1;
365
366 while (1) {
367 if (unit == ctl_maxunit)
368 unit = 1;
369 if (kcb_find(kctl, unit) == NULL) {
370 kctl->lastunit = sa.sc_unit = unit;
371 break;
372 }
373 if (unit++ == kctl->lastunit) {
374 lck_mtx_unlock(ctl_mtx);
375 return EBUSY;
376 }
377 }
378 }
379
380 kcb->unit = sa.sc_unit;
381 kcb->kctl = kctl;
382 TAILQ_INSERT_TAIL(&kctl->kcb_head, kcb, next);
383 lck_mtx_unlock(ctl_mtx);
384
385 error = soreserve(so, kctl->sendbufsize, kctl->recvbufsize);
386 if (error)
387 goto done;
388 soisconnecting(so);
389
390 socket_unlock(so, 0);
391 error = (*kctl->connect)(kctl, &sa, &kcb->userdata);
392 socket_lock(so, 0);
393 if (error)
394 goto done;
395
396 soisconnected(so);
397
398 done:
399 if (error) {
400 soisdisconnected(so);
401 lck_mtx_lock(ctl_mtx);
402 kcb->kctl = 0;
403 kcb->unit = 0;
404 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
405 lck_mtx_unlock(ctl_mtx);
406 }
407 return error;
408 }
409
410 static int
411 ctl_disconnect(struct socket *so)
412 {
413 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
414
415 if ((kcb = (struct ctl_cb *)so->so_pcb)) {
416 struct kctl *kctl = kcb->kctl;
417
418 if (kctl && kctl->disconnect) {
419 socket_unlock(so, 0);
420 (*kctl->disconnect)(kctl, kcb->unit, kcb->userdata);
421 socket_lock(so, 0);
422 }
423 lck_mtx_lock(ctl_mtx);
424 kcb->kctl = 0;
425 kcb->unit = 0;
426 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
427 soisdisconnected(so);
428 lck_mtx_unlock(ctl_mtx);
429 }
430 return 0;
431 }
432
433 static int
434 ctl_peeraddr(struct socket *so, struct sockaddr **nam)
435 {
436 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
437 struct kctl *kctl;
438 struct sockaddr_ctl sc;
439
440 if (kcb == NULL) /* sanity check */
441 return(ENOTCONN);
442
443 if ((kctl = kcb->kctl) == NULL)
444 return(EINVAL);
445
446 bzero(&sc, sizeof(struct sockaddr_ctl));
447 sc.sc_len = sizeof(struct sockaddr_ctl);
448 sc.sc_family = AF_SYSTEM;
449 sc.ss_sysaddr = AF_SYS_CONTROL;
450 sc.sc_id = kctl->id;
451 sc.sc_unit = kcb->unit;
452
453 *nam = dup_sockaddr((struct sockaddr *)&sc, 1);
454
455 return 0;
456 }
457
458 static int
459 ctl_send(struct socket *so, int flags, struct mbuf *m,
460 __unused struct sockaddr *addr, __unused struct mbuf *control,
461 __unused struct proc *p)
462 {
463 int error = 0;
464 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
465 struct kctl *kctl;
466
467 if (kcb == NULL) /* sanity check */
468 return(ENOTCONN);
469
470 if ((kctl = kcb->kctl) == NULL)
471 return(EINVAL);
472
473 if (kctl->send) {
474 socket_unlock(so, 0);
475 error = (*kctl->send)(kctl, kcb->unit, kcb->userdata, m, flags);
476 socket_lock(so, 0);
477 }
478 return error;
479 }
480
481 errno_t
482 ctl_enqueuembuf(void *kctlref, u_int32_t unit, struct mbuf *m, u_int32_t flags)
483 {
484 struct ctl_cb *kcb;
485 struct socket *so;
486 errno_t error = 0;
487 struct kctl *kctl = (struct kctl *)kctlref;
488
489 if (kctl == NULL)
490 return EINVAL;
491
492 kcb = kcb_find(kctl, unit);
493 if (kcb == NULL)
494 return EINVAL;
495
496 so = (struct socket *)kcb->so;
497 if (so == NULL)
498 return EINVAL;
499
500 socket_lock(so, 1);
501 if (sbspace(&so->so_rcv) < m->m_pkthdr.len) {
502 error = ENOBUFS;
503 goto bye;
504 }
505 if ((flags & CTL_DATA_EOR))
506 m->m_flags |= M_EOR;
507 if (sbappend(&so->so_rcv, m) && (flags & CTL_DATA_NOWAKEUP) == 0)
508 sorwakeup(so);
509 bye:
510 socket_unlock(so, 1);
511 return error;
512 }
513
514 errno_t
515 ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len, u_int32_t flags)
516 {
517 struct ctl_cb *kcb;
518 struct socket *so;
519 struct mbuf *m;
520 errno_t error = 0;
521 struct kctl *kctl = (struct kctl *)kctlref;
522 unsigned int num_needed;
523 struct mbuf *n;
524 size_t curlen = 0;
525
526 if (kctlref == NULL)
527 return EINVAL;
528
529 kcb = kcb_find(kctl, unit);
530 if (kcb == NULL)
531 return EINVAL;
532
533 so = (struct socket *)kcb->so;
534 if (so == NULL)
535 return EINVAL;
536
537 socket_lock(so, 1);
538 if ((size_t)sbspace(&so->so_rcv) < len) {
539 error = ENOBUFS;
540 goto bye;
541 }
542
543 num_needed = 1;
544 m = m_allocpacket_internal(&num_needed, len, NULL, M_NOWAIT, 1, 0);
545 if (m == NULL) {
546 printf("ctl_enqueuedata: m_allocpacket_internal(%lu) failed\n", len);
547 error = ENOBUFS;
548 goto bye;
549 }
550
551 for (n = m; n != NULL; n = n->m_next) {
552 size_t mlen = mbuf_maxlen(n);
553
554 if (mlen + curlen > len)
555 mlen = len - curlen;
556 n->m_len = mlen;
557 bcopy((char *)data + curlen, n->m_data, mlen);
558 curlen += mlen;
559 }
560 mbuf_pkthdr_setlen(m, curlen);
561
562 if ((flags & CTL_DATA_EOR))
563 m->m_flags |= M_EOR;
564 if (sbappend(&so->so_rcv, m) && (flags & CTL_DATA_NOWAKEUP) == 0)
565 sorwakeup(so);
566 bye:
567 socket_unlock(so, 1);
568 return error;
569 }
570
571
572 errno_t
573 ctl_getenqueuespace(kern_ctl_ref kctlref, u_int32_t unit, size_t *space)
574 {
575 struct ctl_cb *kcb;
576 struct kctl *kctl = (struct kctl *)kctlref;
577 struct socket *so;
578
579 if (kctlref == NULL || space == NULL)
580 return EINVAL;
581
582 kcb = kcb_find(kctl, unit);
583 if (kcb == NULL)
584 return EINVAL;
585
586 so = (struct socket *)kcb->so;
587 if (so == NULL)
588 return EINVAL;
589
590 socket_lock(so, 1);
591 *space = sbspace(&so->so_rcv);
592 socket_unlock(so, 1);
593
594 return 0;
595 }
596
597 static int
598 ctl_ctloutput(struct socket *so, struct sockopt *sopt)
599 {
600 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
601 struct kctl *kctl;
602 int error = 0;
603 void *data;
604 size_t len;
605
606 if (sopt->sopt_level != SYSPROTO_CONTROL) {
607 return(EINVAL);
608 }
609
610 if (kcb == NULL) /* sanity check */
611 return(ENOTCONN);
612
613 if ((kctl = kcb->kctl) == NULL)
614 return(EINVAL);
615
616 switch (sopt->sopt_dir) {
617 case SOPT_SET:
618 if (kctl->setopt == NULL)
619 return(ENOTSUP);
620 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, M_WAITOK);
621 if (data == NULL)
622 return(ENOMEM);
623 error = sooptcopyin(sopt, data, sopt->sopt_valsize, sopt->sopt_valsize);
624 if (error == 0) {
625 socket_unlock(so, 0);
626 error = (*kctl->setopt)(kcb->kctl, kcb->unit, kcb->userdata, sopt->sopt_name,
627 data, sopt->sopt_valsize);
628 socket_lock(so, 0);
629 }
630 FREE(data, M_TEMP);
631 break;
632
633 case SOPT_GET:
634 if (kctl->getopt == NULL)
635 return(ENOTSUP);
636 data = NULL;
637 if (sopt->sopt_valsize && sopt->sopt_val) {
638 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, M_WAITOK);
639 if (data == NULL)
640 return(ENOMEM);
641 /* 4108337 - copy in data for get socket option */
642 error = sooptcopyin(sopt, data, sopt->sopt_valsize, sopt->sopt_valsize);
643 }
644 len = sopt->sopt_valsize;
645 socket_unlock(so, 0);
646 error = (*kctl->getopt)(kcb->kctl, kcb->unit, kcb->userdata, sopt->sopt_name,
647 data, &len);
648 socket_lock(so, 0);
649 if (error == 0) {
650 if (data != NULL)
651 error = sooptcopyout(sopt, data, len);
652 else
653 sopt->sopt_valsize = len;
654 }
655 if (data != NULL)
656 FREE(data, M_TEMP);
657 break;
658 }
659 return error;
660 }
661
662 static int
663 ctl_ioctl(__unused struct socket *so, u_long cmd, caddr_t data,
664 __unused struct ifnet *ifp, __unused struct proc *p)
665 {
666 int error = ENOTSUP;
667
668 switch (cmd) {
669 /* get the number of controllers */
670 case CTLIOCGCOUNT: {
671 struct kctl *kctl;
672 int n = 0;
673
674 lck_mtx_lock(ctl_mtx);
675 TAILQ_FOREACH(kctl, &ctl_head, next)
676 n++;
677 lck_mtx_unlock(ctl_mtx);
678
679 *(u_int32_t *)data = n;
680 error = 0;
681 break;
682 }
683 case CTLIOCGINFO: {
684 struct ctl_info *ctl_info = (struct ctl_info *)data;
685 struct kctl *kctl = 0;
686 size_t name_len = strlen(ctl_info->ctl_name);
687
688 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
689 error = EINVAL;
690 break;
691 }
692 lck_mtx_lock(ctl_mtx);
693 kctl = ctl_find_by_name(ctl_info->ctl_name);
694 lck_mtx_unlock(ctl_mtx);
695 if (kctl == 0) {
696 error = ENOENT;
697 break;
698 }
699 ctl_info->ctl_id = kctl->id;
700 error = 0;
701 break;
702 }
703
704 /* add controls to get list of NKEs */
705
706 }
707
708 return error;
709 }
710
711 /*
712 * Register/unregister a NKE
713 */
714 errno_t
715 ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref)
716 {
717 struct kctl *kctl = 0;
718 u_int32_t id = -1;
719 u_int32_t n;
720 size_t name_len;
721
722 if (userkctl == NULL) /* sanity check */
723 return(EINVAL);
724 if (userkctl->ctl_connect == NULL)
725 return(EINVAL);
726 name_len = strlen(userkctl->ctl_name);
727 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME)
728 return(EINVAL);
729
730 MALLOC(kctl, struct kctl *, sizeof(*kctl), M_TEMP, M_WAITOK);
731 if (kctl == NULL)
732 return(ENOMEM);
733 bzero((char *)kctl, sizeof(*kctl));
734
735 lck_mtx_lock(ctl_mtx);
736
737 if ((userkctl->ctl_flags & CTL_FLAG_REG_ID_UNIT) == 0) {
738 if (ctl_find_by_name(userkctl->ctl_name) != NULL) {
739 lck_mtx_unlock(ctl_mtx);
740 FREE(kctl, M_TEMP);
741 return(EEXIST);
742 }
743 for (n = 0, id = ctl_last_id + 1; n < ctl_max; id++, n++) {
744 if (id == 0) {
745 n--;
746 continue;
747 }
748 if (ctl_find_by_id(id) == 0)
749 break;
750 }
751 if (id == ctl_max) {
752 lck_mtx_unlock(ctl_mtx);
753 FREE(kctl, M_TEMP);
754 return(ENOBUFS);
755 }
756 userkctl->ctl_id =id;
757 kctl->id = id;
758 kctl->reg_unit = -1;
759 } else {
760 if (ctl_find_by_id_unit(userkctl->ctl_id, userkctl->ctl_unit) != NULL) {
761 lck_mtx_unlock(ctl_mtx);
762 FREE(kctl, M_TEMP);
763 return(EEXIST);
764 }
765 kctl->id = userkctl->ctl_id;
766 kctl->reg_unit = userkctl->ctl_unit;
767 }
768 strcpy(kctl->name, userkctl->ctl_name);
769 kctl->flags = userkctl->ctl_flags;
770
771 /* Let the caller know the default send and receive sizes */
772 if (userkctl->ctl_sendsize == 0)
773 userkctl->ctl_sendsize = CTL_SENDSIZE;
774 kctl->sendbufsize = userkctl->ctl_sendsize;
775
776 if (userkctl->ctl_recvsize == 0)
777 userkctl->ctl_recvsize = CTL_RECVSIZE;
778 kctl->recvbufsize = userkctl->ctl_recvsize;
779
780 kctl->connect = userkctl->ctl_connect;
781 kctl->disconnect = userkctl->ctl_disconnect;
782 kctl->send = userkctl->ctl_send;
783 kctl->setopt = userkctl->ctl_setopt;
784 kctl->getopt = userkctl->ctl_getopt;
785
786 TAILQ_INIT(&kctl->kcb_head);
787
788 TAILQ_INSERT_TAIL(&ctl_head, kctl, next);
789 ctl_max++;
790
791 lck_mtx_unlock(ctl_mtx);
792
793 *kctlref = kctl;
794
795 ctl_post_msg(KEV_CTL_REGISTERED, kctl->id);
796 return(0);
797 }
798
799 errno_t
800 ctl_deregister(void *kctlref)
801 {
802 struct kctl *kctl;
803
804 if (kctlref == NULL) /* sanity check */
805 return(EINVAL);
806
807 lck_mtx_lock(ctl_mtx);
808 TAILQ_FOREACH(kctl, &ctl_head, next) {
809 if (kctl == (struct kctl *)kctlref)
810 break;
811 }
812 if (kctl != (struct kctl *)kctlref) {
813 lck_mtx_unlock(ctl_mtx);
814 return EINVAL;
815 }
816 if (!TAILQ_EMPTY(&kctl->kcb_head)) {
817 lck_mtx_unlock(ctl_mtx);
818 return EBUSY;
819 }
820
821 TAILQ_REMOVE(&ctl_head, kctl, next);
822 ctl_max--;
823
824 lck_mtx_unlock(ctl_mtx);
825
826 ctl_post_msg(KEV_CTL_DEREGISTERED, kctl->id);
827 FREE(kctl, M_TEMP);
828 return(0);
829 }
830
831 /*
832 * Must be called with global lock taked
833 */
834 static struct kctl *
835 ctl_find_by_id(u_int32_t id)
836 {
837 struct kctl *kctl;
838
839 TAILQ_FOREACH(kctl, &ctl_head, next)
840 if (kctl->id == id)
841 return kctl;
842
843 return NULL;
844 }
845
846 /*
847 * Must be called with global ctl_mtx lock taked
848 */
849 static struct kctl *
850 ctl_find_by_name(const char *name)
851 {
852 struct kctl *kctl;
853
854 TAILQ_FOREACH(kctl, &ctl_head, next)
855 if (strcmp(kctl->name, name) == 0)
856 return kctl;
857
858 return NULL;
859 }
860
861 /*
862 * Must be called with global ctl_mtx lock taked
863 *
864 */
865 static struct kctl *
866 ctl_find_by_id_unit(u_int32_t id, u_int32_t unit)
867 {
868 struct kctl *kctl;
869
870 TAILQ_FOREACH(kctl, &ctl_head, next) {
871 if (kctl->id == id && (kctl->flags & CTL_FLAG_REG_ID_UNIT) == 0)
872 return kctl;
873 else if (kctl->id == id && kctl->reg_unit == unit)
874 return kctl;
875 }
876 return NULL;
877 }
878
879 /*
880 * Must be called with kernel controller lock taken
881 */
882 static struct ctl_cb *
883 kcb_find(struct kctl *kctl, u_int32_t unit)
884 {
885 struct ctl_cb *kcb;
886
887 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
888 if ((kcb->unit == unit))
889 return kcb;
890
891 return NULL;
892 }
893
894 /*
895 * Must be called witout lock
896 */
897 static void
898 ctl_post_msg(u_long event_code, u_int32_t id)
899 {
900 struct ctl_event_data ctl_ev_data;
901 struct kev_msg ev_msg;
902
903 ev_msg.vendor_code = KEV_VENDOR_APPLE;
904
905 ev_msg.kev_class = KEV_SYSTEM_CLASS;
906 ev_msg.kev_subclass = KEV_CTL_SUBCLASS;
907 ev_msg.event_code = event_code;
908
909 /* common nke subclass data */
910 bzero(&ctl_ev_data, sizeof(ctl_ev_data));
911 ctl_ev_data.ctl_id = id;
912 ev_msg.dv[0].data_ptr = &ctl_ev_data;
913 ev_msg.dv[0].data_length = sizeof(ctl_ev_data);
914
915 ev_msg.dv[1].data_length = 0;
916
917 kev_post_msg(&ev_msg);
918 }
919
920 static int
921 ctl_lock(struct socket *so, int refcount, int lr)
922 {
923 int lr_saved;
924 #ifdef __ppc__
925 if (lr == 0) {
926 __asm__ volatile("mflr %0" : "=r" (lr_saved));
927 }
928 else lr_saved = lr;
929 #endif
930
931 if (so->so_pcb) {
932 lck_mtx_lock(((struct ctl_cb *)so->so_pcb)->mtx);
933 } else {
934 panic("ctl_lock: so=%x NO PCB! lr=%x\n", so, lr_saved);
935 lck_mtx_lock(so->so_proto->pr_domain->dom_mtx);
936 }
937
938 if (so->so_usecount < 0)
939 panic("ctl_lock: so=%x so_pcb=%x lr=%x ref=%x\n",
940 so, so->so_pcb, lr_saved, so->so_usecount);
941
942 if (refcount)
943 so->so_usecount++;
944 so->reserved3 = (void *)lr_saved;
945 return (0);
946 }
947
948 static int
949 ctl_unlock(struct socket *so, int refcount, int lr)
950 {
951 int lr_saved;
952 lck_mtx_t * mutex_held;
953
954 #ifdef __ppc__
955 if (lr == 0) {
956 __asm__ volatile("mflr %0" : "=r" (lr_saved));
957 }
958 else lr_saved = lr;
959 #endif
960
961 #ifdef MORE_KCTLLOCK_DEBUG
962 printf("ctl_unlock: so=%x sopcb=%x lock=%x ref=%x lr=%x\n",
963 so, so->so_pcb, ((struct ctl_cb *)so->so_pcb)->mtx, so->so_usecount, lr_saved);
964 #endif
965 if (refcount)
966 so->so_usecount--;
967
968 if (so->so_usecount < 0)
969 panic("ctl_unlock: so=%x usecount=%x\n", so, so->so_usecount);
970 if (so->so_pcb == NULL) {
971 panic("ctl_unlock: so=%x NO PCB usecount=%x lr=%x\n", so, so->so_usecount, lr_saved);
972 mutex_held = so->so_proto->pr_domain->dom_mtx;
973 } else {
974 mutex_held = ((struct ctl_cb *)so->so_pcb)->mtx;
975 }
976 lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
977 lck_mtx_unlock(mutex_held);
978 so->reserved4 = (void *)lr_saved;
979
980 if (so->so_usecount == 0)
981 ctl_sofreelastref(so);
982
983 return (0);
984 }
985
986 static lck_mtx_t *
987 ctl_getlock(struct socket *so, __unused int locktype)
988 {
989 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
990
991 if (so->so_pcb) {
992 if (so->so_usecount < 0)
993 panic("ctl_getlock: so=%x usecount=%x\n", so, so->so_usecount);
994 return(kcb->mtx);
995 } else {
996 panic("ctl_getlock: so=%x NULL so_pcb\n", so);
997 return (so->so_proto->pr_domain->dom_mtx);
998 }
999 }