]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_control.c
xnu-792.2.4.tar.gz
[apple/xnu.git] / bsd / kern / kern_control.c
1 /*
2 * Copyright (c) 1999-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 /*
24 * Kernel Control domain - allows control connections to
25 * and to read/write data.
26 *
27 * Vincent Lubet, 040506
28 * Christophe Allie, 010928
29 * Justin C. Walker, 990319
30 */
31
32 #include <sys/types.h>
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/syslog.h>
36 #include <sys/socket.h>
37 #include <sys/socketvar.h>
38 #include <sys/protosw.h>
39 #include <sys/domain.h>
40 #include <sys/malloc.h>
41 #include <sys/mbuf.h>
42 #include <sys/sys_domain.h>
43 #include <sys/kern_event.h>
44 #include <sys/kern_control.h>
45 #include <net/if_var.h>
46
47 #include <mach/vm_types.h>
48 #include <mach/kmod.h>
49
50 #include <kern/thread.h>
51
52 /*
53 * Definitions and vars for we support
54 */
55
56 #define CTL_SENDSIZE (2 * 1024) /* default buffer size */
57 #define CTL_RECVSIZE (8 * 1024) /* default buffer size */
58
59 /*
60 * Definitions and vars for we support
61 */
62
63 static u_int32_t ctl_last_id = 0;
64 static u_int32_t ctl_max = 256;
65 static u_int32_t ctl_maxunit = 65536;
66 static lck_grp_attr_t *ctl_lck_grp_attr = 0;
67 static lck_attr_t *ctl_lck_attr = 0;
68 static lck_grp_t *ctl_lck_grp = 0;
69 static lck_mtx_t *ctl_mtx;
70
71 /*
72 * internal structure maintained for each register controller
73 */
74
75 struct ctl_cb;
76
77 struct kctl
78 {
79 TAILQ_ENTRY(kctl) next; /* controller chain */
80
81 /* controller information provided when registering */
82 char name[MAX_KCTL_NAME]; /* unique nke identifier, provided by DTS */
83 u_int32_t id;
84 u_int32_t reg_unit;
85
86 /* misc communication information */
87 u_int32_t flags; /* support flags */
88 u_int32_t recvbufsize; /* request more than the default buffer size */
89 u_int32_t sendbufsize; /* request more than the default buffer size */
90
91 /* Dispatch functions */
92 ctl_connect_func connect; /* Make contact */
93 ctl_disconnect_func disconnect; /* Break contact */
94 ctl_send_func send; /* Send data to nke */
95 ctl_setopt_func setopt; /* set kctl configuration */
96 ctl_getopt_func getopt; /* get kctl configuration */
97
98 TAILQ_HEAD(, ctl_cb) kcb_head;
99 u_int32_t lastunit;
100 };
101
102 struct ctl_cb {
103 TAILQ_ENTRY(ctl_cb) next; /* controller chain */
104 lck_mtx_t *mtx;
105 struct socket *so; /* controlling socket */
106 struct kctl *kctl; /* back pointer to controller */
107 u_int32_t unit;
108 void *userdata;
109 };
110
111 /* all the controllers are chained */
112 TAILQ_HEAD(, kctl) ctl_head;
113
114 static int ctl_attach(struct socket *, int, struct proc *);
115 static int ctl_detach(struct socket *);
116 static int ctl_sofreelastref(struct socket *so);
117 static int ctl_connect(struct socket *, struct sockaddr *, struct proc *);
118 static int ctl_disconnect(struct socket *);
119 static int ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
120 struct ifnet *ifp, struct proc *p);
121 static int ctl_send(struct socket *, int, struct mbuf *,
122 struct sockaddr *, struct mbuf *, struct proc *);
123 static int ctl_ctloutput(struct socket *, struct sockopt *);
124 static int ctl_peeraddr(struct socket *so, struct sockaddr **nam);
125
126 static struct kctl *ctl_find_by_id(u_int32_t);
127 static struct kctl *ctl_find_by_name(const char *);
128 static struct kctl *ctl_find_by_id_unit(u_int32_t id, u_int32_t unit);
129
130 static struct ctl_cb *kcb_find(struct kctl *, u_int32_t unit);
131 static void ctl_post_msg(u_long event_code, u_int32_t id);
132
133 static int ctl_lock(struct socket *, int, int);
134 static int ctl_unlock(struct socket *, int, int);
135 static lck_mtx_t * ctl_getlock(struct socket *, int);
136
137 static struct pr_usrreqs ctl_usrreqs =
138 {
139 pru_abort_notsupp, pru_accept_notsupp, ctl_attach, pru_bind_notsupp,
140 ctl_connect, pru_connect2_notsupp, ctl_ioctl, ctl_detach,
141 ctl_disconnect, pru_listen_notsupp, ctl_peeraddr,
142 pru_rcvd_notsupp, pru_rcvoob_notsupp, ctl_send,
143 pru_sense_null, pru_shutdown_notsupp, pru_sockaddr_notsupp,
144 sosend, soreceive, pru_sopoll_notsupp
145 };
146
147 static struct protosw kctlswk_dgram =
148 {
149 SOCK_DGRAM, &systemdomain, SYSPROTO_CONTROL,
150 PR_ATOMIC|PR_CONNREQUIRED|PR_PCBLOCK,
151 NULL, NULL, NULL, ctl_ctloutput,
152 NULL, NULL,
153 NULL, NULL, NULL, NULL, &ctl_usrreqs,
154 ctl_lock, ctl_unlock, ctl_getlock, { 0, 0 } , 0, { 0 }
155 };
156
157 static struct protosw kctlswk_stream =
158 {
159 SOCK_STREAM, &systemdomain, SYSPROTO_CONTROL,
160 PR_CONNREQUIRED|PR_PCBLOCK,
161 NULL, NULL, NULL, ctl_ctloutput,
162 NULL, NULL,
163 NULL, NULL, NULL, NULL, &ctl_usrreqs,
164 ctl_lock, ctl_unlock, ctl_getlock, { 0, 0 } , 0, { 0 }
165 };
166
167
168 /*
169 * Install the protosw's for the Kernel Control manager.
170 */
171 __private_extern__ int
172 kern_control_init(void)
173 {
174 int error = 0;
175
176 ctl_lck_grp_attr = lck_grp_attr_alloc_init();
177 if (ctl_lck_grp_attr == 0) {
178 printf(": lck_grp_attr_alloc_init failed\n");
179 error = ENOMEM;
180 goto done;
181 }
182 lck_grp_attr_setdefault(ctl_lck_grp_attr);
183
184 ctl_lck_grp = lck_grp_alloc_init("Kernel Control Protocol", ctl_lck_grp_attr);
185 if (ctl_lck_grp == 0) {
186 printf("kern_control_init: lck_grp_alloc_init failed\n");
187 error = ENOMEM;
188 goto done;
189 }
190
191 ctl_lck_attr = lck_attr_alloc_init();
192 if (ctl_lck_attr == 0) {
193 printf("kern_control_init: lck_attr_alloc_init failed\n");
194 error = ENOMEM;
195 goto done;
196 }
197 lck_attr_setdefault(ctl_lck_attr);
198
199 ctl_mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
200 if (ctl_mtx == 0) {
201 printf("kern_control_init: lck_mtx_alloc_init failed\n");
202 error = ENOMEM;
203 goto done;
204 }
205 TAILQ_INIT(&ctl_head);
206
207 error = net_add_proto(&kctlswk_dgram, &systemdomain);
208 if (error) {
209 log(LOG_WARNING, "kern_control_init: net_add_proto dgram failed (%d)\n", error);
210 }
211 error = net_add_proto(&kctlswk_stream, &systemdomain);
212 if (error) {
213 log(LOG_WARNING, "kern_control_init: net_add_proto stream failed (%d)\n", error);
214 }
215
216 done:
217 if (error != 0) {
218 if (ctl_mtx) {
219 lck_mtx_free(ctl_mtx, ctl_lck_grp);
220 ctl_mtx = 0;
221 }
222 if (ctl_lck_grp) {
223 lck_grp_free(ctl_lck_grp);
224 ctl_lck_grp = 0;
225 }
226 if (ctl_lck_grp_attr) {
227 lck_grp_attr_free(ctl_lck_grp_attr);
228 ctl_lck_grp_attr = 0;
229 }
230 if (ctl_lck_attr) {
231 lck_attr_free(ctl_lck_attr);
232 ctl_lck_attr = 0;
233 }
234 }
235 return error;
236 }
237
238 static void
239 kcb_delete(struct ctl_cb *kcb)
240 {
241 if (kcb != 0) {
242 if (kcb->mtx != 0)
243 lck_mtx_free(kcb->mtx, ctl_lck_grp);
244 FREE(kcb, M_TEMP);
245 }
246 }
247
248
249 /*
250 * Kernel Controller user-request functions
251 * attach function must exist and succeed
252 * detach not necessary
253 * we need a pcb for the per socket mutex
254 */
255 static int
256 ctl_attach(__unused struct socket *so, __unused int proto, __unused struct proc *p)
257 {
258 int error = 0;
259 struct ctl_cb *kcb = 0;
260
261 MALLOC(kcb, struct ctl_cb *, sizeof(struct ctl_cb), M_TEMP, M_WAITOK);
262 if (kcb == NULL) {
263 error = ENOMEM;
264 goto quit;
265 }
266 bzero(kcb, sizeof(struct ctl_cb));
267
268 kcb->mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
269 if (kcb->mtx == NULL) {
270 error = ENOMEM;
271 goto quit;
272 }
273 kcb->so = so;
274 so->so_pcb = (caddr_t)kcb;
275
276 quit:
277 if (error != 0) {
278 kcb_delete(kcb);
279 kcb = 0;
280 }
281 return error;
282 }
283
284 static int
285 ctl_sofreelastref(struct socket *so)
286 {
287 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
288
289 so->so_pcb = 0;
290
291 if (kcb != 0) {
292 struct kctl *kctl;
293 if ((kctl = kcb->kctl) != 0) {
294 lck_mtx_lock(ctl_mtx);
295 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
296 lck_mtx_lock(ctl_mtx);
297 }
298 kcb_delete(kcb);
299 }
300 return 0;
301 }
302
303 static int
304 ctl_detach(struct socket *so)
305 {
306 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
307
308 if (kcb == 0)
309 return 0;
310
311 soisdisconnected(so);
312 so->so_flags |= SOF_PCBCLEARING;
313 return 0;
314 }
315
316
317 static int
318 ctl_connect(struct socket *so, struct sockaddr *nam, __unused struct proc *p)
319 {
320 struct kctl *kctl;
321 int error = 0;
322 struct sockaddr_ctl sa;
323 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
324
325 if (kcb == 0)
326 panic("ctl_connect so_pcb null\n");
327
328 if (nam->sa_len != sizeof(struct sockaddr_ctl))
329 return(EINVAL);
330
331 bcopy(nam, &sa, sizeof(struct sockaddr_ctl));
332
333 lck_mtx_lock(ctl_mtx);
334 kctl = ctl_find_by_id_unit(sa.sc_id, sa.sc_unit);
335 if (kctl == NULL) {
336 lck_mtx_unlock(ctl_mtx);
337 return ENOENT;
338 }
339
340 if (((kctl->flags & CTL_FLAG_REG_SOCK_STREAM) && (so->so_type != SOCK_STREAM)) ||
341 (!(kctl->flags & CTL_FLAG_REG_SOCK_STREAM) && (so->so_type != SOCK_DGRAM))) {
342 lck_mtx_unlock(ctl_mtx);
343 return EPROTOTYPE;
344 }
345
346 if (kctl->flags & CTL_FLAG_PRIVILEGED) {
347 if (p == 0) {
348 lck_mtx_unlock(ctl_mtx);
349 return(EINVAL);
350 }
351 if ((error = proc_suser(p))) {
352 lck_mtx_unlock(ctl_mtx);
353 return error;
354 }
355 }
356
357 if ((kctl->flags & CTL_FLAG_REG_ID_UNIT) || sa.sc_unit != 0) {
358 if (kcb_find(kctl, sa.sc_unit) != NULL) {
359 lck_mtx_unlock(ctl_mtx);
360 return EBUSY;
361 }
362 } else {
363 u_int32_t unit = kctl->lastunit + 1;
364
365 while (1) {
366 if (unit == ctl_maxunit)
367 unit = 1;
368 if (kcb_find(kctl, unit) == NULL) {
369 kctl->lastunit = sa.sc_unit = unit;
370 break;
371 }
372 if (unit++ == kctl->lastunit) {
373 lck_mtx_unlock(ctl_mtx);
374 return EBUSY;
375 }
376 }
377 }
378
379 kcb->unit = sa.sc_unit;
380 kcb->kctl = kctl;
381 TAILQ_INSERT_TAIL(&kctl->kcb_head, kcb, next);
382 lck_mtx_unlock(ctl_mtx);
383
384 error = soreserve(so, kctl->sendbufsize, kctl->recvbufsize);
385 if (error)
386 goto done;
387 soisconnecting(so);
388
389 socket_unlock(so, 0);
390 error = (*kctl->connect)(kctl, &sa, &kcb->userdata);
391 socket_lock(so, 0);
392 if (error)
393 goto done;
394
395 soisconnected(so);
396
397 done:
398 if (error) {
399 soisdisconnected(so);
400 lck_mtx_lock(ctl_mtx);
401 kcb->kctl = 0;
402 kcb->unit = 0;
403 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
404 lck_mtx_unlock(ctl_mtx);
405 }
406 return error;
407 }
408
409 static int
410 ctl_disconnect(struct socket *so)
411 {
412 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
413
414 if ((kcb = (struct ctl_cb *)so->so_pcb)) {
415 struct kctl *kctl = kcb->kctl;
416
417 if (kctl && kctl->disconnect) {
418 socket_unlock(so, 0);
419 (*kctl->disconnect)(kctl, kcb->unit, kcb->userdata);
420 socket_lock(so, 0);
421 }
422 lck_mtx_lock(ctl_mtx);
423 kcb->kctl = 0;
424 kcb->unit = 0;
425 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
426 soisdisconnected(so);
427 lck_mtx_unlock(ctl_mtx);
428 }
429 return 0;
430 }
431
432 static int
433 ctl_peeraddr(struct socket *so, struct sockaddr **nam)
434 {
435 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
436 struct kctl *kctl;
437 struct sockaddr_ctl sc;
438
439 if (kcb == NULL) /* sanity check */
440 return(ENOTCONN);
441
442 if ((kctl = kcb->kctl) == NULL)
443 return(EINVAL);
444
445 bzero(&sc, sizeof(struct sockaddr_ctl));
446 sc.sc_len = sizeof(struct sockaddr_ctl);
447 sc.sc_family = AF_SYSTEM;
448 sc.ss_sysaddr = AF_SYS_CONTROL;
449 sc.sc_id = kctl->id;
450 sc.sc_unit = kcb->unit;
451
452 *nam = dup_sockaddr((struct sockaddr *)&sc, 1);
453
454 return 0;
455 }
456
457 static int
458 ctl_send(struct socket *so, int flags, struct mbuf *m,
459 __unused struct sockaddr *addr, __unused struct mbuf *control,
460 __unused struct proc *p)
461 {
462 int error = 0;
463 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
464 struct kctl *kctl;
465
466 if (kcb == NULL) /* sanity check */
467 return(ENOTCONN);
468
469 if ((kctl = kcb->kctl) == NULL)
470 return(EINVAL);
471
472 if (kctl->send) {
473 socket_unlock(so, 0);
474 error = (*kctl->send)(kctl, kcb->unit, kcb->userdata, m, flags);
475 socket_lock(so, 0);
476 }
477 return error;
478 }
479
480 errno_t
481 ctl_enqueuembuf(void *kctlref, u_int32_t unit, struct mbuf *m, u_int32_t flags)
482 {
483 struct ctl_cb *kcb;
484 struct socket *so;
485 errno_t error = 0;
486 struct kctl *kctl = (struct kctl *)kctlref;
487
488 if (kctl == NULL)
489 return EINVAL;
490
491 kcb = kcb_find(kctl, unit);
492 if (kcb == NULL)
493 return EINVAL;
494
495 so = (struct socket *)kcb->so;
496 if (so == NULL)
497 return EINVAL;
498
499 socket_lock(so, 1);
500 if (sbspace(&so->so_rcv) < m->m_pkthdr.len) {
501 error = ENOBUFS;
502 goto bye;
503 }
504 if ((flags & CTL_DATA_EOR))
505 m->m_flags |= M_EOR;
506 if (sbappend(&so->so_rcv, m) && (flags & CTL_DATA_NOWAKEUP) == 0)
507 sorwakeup(so);
508 bye:
509 socket_unlock(so, 1);
510 return error;
511 }
512
513 errno_t
514 ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len, u_int32_t flags)
515 {
516 struct ctl_cb *kcb;
517 struct socket *so;
518 struct mbuf *m;
519 errno_t error = 0;
520 struct kctl *kctl = (struct kctl *)kctlref;
521 unsigned int num_needed;
522 struct mbuf *n;
523 size_t curlen = 0;
524
525 if (kctlref == NULL)
526 return EINVAL;
527
528 kcb = kcb_find(kctl, unit);
529 if (kcb == NULL)
530 return EINVAL;
531
532 so = (struct socket *)kcb->so;
533 if (so == NULL)
534 return EINVAL;
535
536 socket_lock(so, 1);
537 if ((size_t)sbspace(&so->so_rcv) < len) {
538 error = ENOBUFS;
539 goto bye;
540 }
541
542 num_needed = 1;
543 m = m_allocpacket_internal(&num_needed, len, NULL, M_NOWAIT, 1, 0);
544 if (m == NULL) {
545 printf("ctl_enqueuedata: m_allocpacket_internal(%lu) failed\n", len);
546 error = ENOBUFS;
547 goto bye;
548 }
549
550 for (n = m; n != NULL; n = n->m_next) {
551 size_t mlen = mbuf_maxlen(n);
552
553 if (mlen + curlen > len)
554 mlen = len - curlen;
555 n->m_len = mlen;
556 bcopy((char *)data + curlen, n->m_data, mlen);
557 curlen += mlen;
558 }
559 mbuf_pkthdr_setlen(m, curlen);
560
561 if ((flags & CTL_DATA_EOR))
562 m->m_flags |= M_EOR;
563 if (sbappend(&so->so_rcv, m) && (flags & CTL_DATA_NOWAKEUP) == 0)
564 sorwakeup(so);
565 bye:
566 socket_unlock(so, 1);
567 return error;
568 }
569
570
571 errno_t
572 ctl_getenqueuespace(kern_ctl_ref kctlref, u_int32_t unit, size_t *space)
573 {
574 struct ctl_cb *kcb;
575 struct kctl *kctl = (struct kctl *)kctlref;
576 struct socket *so;
577
578 if (kctlref == NULL || space == NULL)
579 return EINVAL;
580
581 kcb = kcb_find(kctl, unit);
582 if (kcb == NULL)
583 return EINVAL;
584
585 so = (struct socket *)kcb->so;
586 if (so == NULL)
587 return EINVAL;
588
589 socket_lock(so, 1);
590 *space = sbspace(&so->so_rcv);
591 socket_unlock(so, 1);
592
593 return 0;
594 }
595
596 static int
597 ctl_ctloutput(struct socket *so, struct sockopt *sopt)
598 {
599 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
600 struct kctl *kctl;
601 int error = 0;
602 void *data;
603 size_t len;
604
605 if (sopt->sopt_level != SYSPROTO_CONTROL) {
606 return(EINVAL);
607 }
608
609 if (kcb == NULL) /* sanity check */
610 return(ENOTCONN);
611
612 if ((kctl = kcb->kctl) == NULL)
613 return(EINVAL);
614
615 switch (sopt->sopt_dir) {
616 case SOPT_SET:
617 if (kctl->setopt == NULL)
618 return(ENOTSUP);
619 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, M_WAITOK);
620 if (data == NULL)
621 return(ENOMEM);
622 error = sooptcopyin(sopt, data, sopt->sopt_valsize, sopt->sopt_valsize);
623 if (error == 0) {
624 socket_unlock(so, 0);
625 error = (*kctl->setopt)(kcb->kctl, kcb->unit, kcb->userdata, sopt->sopt_name,
626 data, sopt->sopt_valsize);
627 socket_lock(so, 0);
628 }
629 FREE(data, M_TEMP);
630 break;
631
632 case SOPT_GET:
633 if (kctl->getopt == NULL)
634 return(ENOTSUP);
635 data = NULL;
636 if (sopt->sopt_valsize && sopt->sopt_val) {
637 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, M_WAITOK);
638 if (data == NULL)
639 return(ENOMEM);
640 /* 4108337 - copy in data for get socket option */
641 error = sooptcopyin(sopt, data, sopt->sopt_valsize, sopt->sopt_valsize);
642 }
643 len = sopt->sopt_valsize;
644 socket_unlock(so, 0);
645 error = (*kctl->getopt)(kcb->kctl, kcb->unit, kcb->userdata, sopt->sopt_name,
646 data, &len);
647 socket_lock(so, 0);
648 if (error == 0) {
649 if (data != NULL)
650 error = sooptcopyout(sopt, data, len);
651 else
652 sopt->sopt_valsize = len;
653 }
654 if (data != NULL)
655 FREE(data, M_TEMP);
656 break;
657 }
658 return error;
659 }
660
661 static int
662 ctl_ioctl(__unused struct socket *so, u_long cmd, caddr_t data,
663 __unused struct ifnet *ifp, __unused struct proc *p)
664 {
665 int error = ENOTSUP;
666
667 switch (cmd) {
668 /* get the number of controllers */
669 case CTLIOCGCOUNT: {
670 struct kctl *kctl;
671 int n = 0;
672
673 lck_mtx_lock(ctl_mtx);
674 TAILQ_FOREACH(kctl, &ctl_head, next)
675 n++;
676 lck_mtx_unlock(ctl_mtx);
677
678 *(u_int32_t *)data = n;
679 error = 0;
680 break;
681 }
682 case CTLIOCGINFO: {
683 struct ctl_info *ctl_info = (struct ctl_info *)data;
684 struct kctl *kctl = 0;
685 size_t name_len = strlen(ctl_info->ctl_name);
686
687 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
688 error = EINVAL;
689 break;
690 }
691 lck_mtx_lock(ctl_mtx);
692 kctl = ctl_find_by_name(ctl_info->ctl_name);
693 lck_mtx_unlock(ctl_mtx);
694 if (kctl == 0) {
695 error = ENOENT;
696 break;
697 }
698 ctl_info->ctl_id = kctl->id;
699 error = 0;
700 break;
701 }
702
703 /* add controls to get list of NKEs */
704
705 }
706
707 return error;
708 }
709
710 /*
711 * Register/unregister a NKE
712 */
713 errno_t
714 ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref)
715 {
716 struct kctl *kctl = 0;
717 u_int32_t id = -1;
718 u_int32_t n;
719 size_t name_len;
720
721 if (userkctl == NULL) /* sanity check */
722 return(EINVAL);
723 if (userkctl->ctl_connect == NULL)
724 return(EINVAL);
725 name_len = strlen(userkctl->ctl_name);
726 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME)
727 return(EINVAL);
728
729 MALLOC(kctl, struct kctl *, sizeof(*kctl), M_TEMP, M_WAITOK);
730 if (kctl == NULL)
731 return(ENOMEM);
732 bzero((char *)kctl, sizeof(*kctl));
733
734 lck_mtx_lock(ctl_mtx);
735
736 if ((userkctl->ctl_flags & CTL_FLAG_REG_ID_UNIT) == 0) {
737 if (ctl_find_by_name(userkctl->ctl_name) != NULL) {
738 lck_mtx_unlock(ctl_mtx);
739 FREE(kctl, M_TEMP);
740 return(EEXIST);
741 }
742 for (n = 0, id = ctl_last_id + 1; n < ctl_max; id++, n++) {
743 if (id == 0) {
744 n--;
745 continue;
746 }
747 if (ctl_find_by_id(id) == 0)
748 break;
749 }
750 if (id == ctl_max) {
751 lck_mtx_unlock(ctl_mtx);
752 FREE(kctl, M_TEMP);
753 return(ENOBUFS);
754 }
755 userkctl->ctl_id =id;
756 kctl->id = id;
757 kctl->reg_unit = -1;
758 } else {
759 if (ctl_find_by_id_unit(userkctl->ctl_id, userkctl->ctl_unit) != NULL) {
760 lck_mtx_unlock(ctl_mtx);
761 FREE(kctl, M_TEMP);
762 return(EEXIST);
763 }
764 kctl->id = userkctl->ctl_id;
765 kctl->reg_unit = userkctl->ctl_unit;
766 }
767 strcpy(kctl->name, userkctl->ctl_name);
768 kctl->flags = userkctl->ctl_flags;
769
770 /* Let the caller know the default send and receive sizes */
771 if (userkctl->ctl_sendsize == 0)
772 userkctl->ctl_sendsize = CTL_SENDSIZE;
773 kctl->sendbufsize = userkctl->ctl_sendsize;
774
775 if (kctl->recvbufsize == 0)
776 userkctl->ctl_recvsize = CTL_RECVSIZE;
777 kctl->recvbufsize = userkctl->ctl_recvsize;
778
779 kctl->connect = userkctl->ctl_connect;
780 kctl->disconnect = userkctl->ctl_disconnect;
781 kctl->send = userkctl->ctl_send;
782 kctl->setopt = userkctl->ctl_setopt;
783 kctl->getopt = userkctl->ctl_getopt;
784
785 TAILQ_INIT(&kctl->kcb_head);
786
787 TAILQ_INSERT_TAIL(&ctl_head, kctl, next);
788 ctl_max++;
789
790 lck_mtx_unlock(ctl_mtx);
791
792 *kctlref = kctl;
793
794 ctl_post_msg(KEV_CTL_REGISTERED, kctl->id);
795 return(0);
796 }
797
798 errno_t
799 ctl_deregister(void *kctlref)
800 {
801 struct kctl *kctl;
802
803 if (kctlref == NULL) /* sanity check */
804 return(EINVAL);
805
806 lck_mtx_lock(ctl_mtx);
807 TAILQ_FOREACH(kctl, &ctl_head, next) {
808 if (kctl == (struct kctl *)kctlref)
809 break;
810 }
811 if (kctl != (struct kctl *)kctlref) {
812 lck_mtx_unlock(ctl_mtx);
813 return EINVAL;
814 }
815 if (!TAILQ_EMPTY(&kctl->kcb_head)) {
816 lck_mtx_unlock(ctl_mtx);
817 return EBUSY;
818 }
819
820 TAILQ_REMOVE(&ctl_head, kctl, next);
821 ctl_max--;
822
823 lck_mtx_unlock(ctl_mtx);
824
825 ctl_post_msg(KEV_CTL_DEREGISTERED, kctl->id);
826 FREE(kctl, M_TEMP);
827 return(0);
828 }
829
830 /*
831 * Must be called with global lock taked
832 */
833 static struct kctl *
834 ctl_find_by_id(u_int32_t id)
835 {
836 struct kctl *kctl;
837
838 TAILQ_FOREACH(kctl, &ctl_head, next)
839 if (kctl->id == id)
840 return kctl;
841
842 return NULL;
843 }
844
845 /*
846 * Must be called with global ctl_mtx lock taked
847 */
848 static struct kctl *
849 ctl_find_by_name(const char *name)
850 {
851 struct kctl *kctl;
852
853 TAILQ_FOREACH(kctl, &ctl_head, next)
854 if (strcmp(kctl->name, name) == 0)
855 return kctl;
856
857 return NULL;
858 }
859
860 /*
861 * Must be called with global ctl_mtx lock taked
862 *
863 */
864 static struct kctl *
865 ctl_find_by_id_unit(u_int32_t id, u_int32_t unit)
866 {
867 struct kctl *kctl;
868
869 TAILQ_FOREACH(kctl, &ctl_head, next) {
870 if (kctl->id == id && (kctl->flags & CTL_FLAG_REG_ID_UNIT) == 0)
871 return kctl;
872 else if (kctl->id == id && kctl->reg_unit == unit)
873 return kctl;
874 }
875 return NULL;
876 }
877
878 /*
879 * Must be called with kernel controller lock taken
880 */
881 static struct ctl_cb *
882 kcb_find(struct kctl *kctl, u_int32_t unit)
883 {
884 struct ctl_cb *kcb;
885
886 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
887 if ((kcb->unit == unit))
888 return kcb;
889
890 return NULL;
891 }
892
893 /*
894 * Must be called witout lock
895 */
896 static void
897 ctl_post_msg(u_long event_code, u_int32_t id)
898 {
899 struct ctl_event_data ctl_ev_data;
900 struct kev_msg ev_msg;
901
902 ev_msg.vendor_code = KEV_VENDOR_APPLE;
903
904 ev_msg.kev_class = KEV_SYSTEM_CLASS;
905 ev_msg.kev_subclass = KEV_CTL_SUBCLASS;
906 ev_msg.event_code = event_code;
907
908 /* common nke subclass data */
909 bzero(&ctl_ev_data, sizeof(ctl_ev_data));
910 ctl_ev_data.ctl_id = id;
911 ev_msg.dv[0].data_ptr = &ctl_ev_data;
912 ev_msg.dv[0].data_length = sizeof(ctl_ev_data);
913
914 ev_msg.dv[1].data_length = 0;
915
916 kev_post_msg(&ev_msg);
917 }
918
919 static int
920 ctl_lock(struct socket *so, int refcount, int lr)
921 {
922 int lr_saved;
923 #ifdef __ppc__
924 if (lr == 0) {
925 __asm__ volatile("mflr %0" : "=r" (lr_saved));
926 }
927 else lr_saved = lr;
928 #endif
929
930 if (so->so_pcb) {
931 lck_mtx_lock(((struct ctl_cb *)so->so_pcb)->mtx);
932 } else {
933 panic("ctl_lock: so=%x NO PCB! lr=%x\n", so, lr_saved);
934 lck_mtx_lock(so->so_proto->pr_domain->dom_mtx);
935 }
936
937 if (so->so_usecount < 0)
938 panic("ctl_lock: so=%x so_pcb=%x lr=%x ref=%x\n",
939 so, so->so_pcb, lr_saved, so->so_usecount);
940
941 if (refcount)
942 so->so_usecount++;
943 so->reserved3 = (void *)lr_saved;
944 return (0);
945 }
946
947 static int
948 ctl_unlock(struct socket *so, int refcount, int lr)
949 {
950 int lr_saved;
951 lck_mtx_t * mutex_held;
952
953 #ifdef __ppc__
954 if (lr == 0) {
955 __asm__ volatile("mflr %0" : "=r" (lr_saved));
956 }
957 else lr_saved = lr;
958 #endif
959
960 #ifdef MORE_KCTLLOCK_DEBUG
961 printf("ctl_unlock: so=%x sopcb=%x lock=%x ref=%x lr=%x\n",
962 so, so->so_pcb, ((struct ctl_cb *)so->so_pcb)->mtx, so->so_usecount, lr_saved);
963 #endif
964 if (refcount)
965 so->so_usecount--;
966
967 if (so->so_usecount < 0)
968 panic("ctl_unlock: so=%x usecount=%x\n", so, so->so_usecount);
969 if (so->so_pcb == NULL) {
970 panic("ctl_unlock: so=%x NO PCB usecount=%x lr=%x\n", so, so->so_usecount, lr_saved);
971 mutex_held = so->so_proto->pr_domain->dom_mtx;
972 } else {
973 mutex_held = ((struct ctl_cb *)so->so_pcb)->mtx;
974 }
975 lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
976 lck_mtx_unlock(mutex_held);
977 so->reserved4 = (void *)lr_saved;
978
979 if (so->so_usecount == 0)
980 ctl_sofreelastref(so);
981
982 return (0);
983 }
984
985 static lck_mtx_t *
986 ctl_getlock(struct socket *so, __unused int locktype)
987 {
988 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
989
990 if (so->so_pcb) {
991 if (so->so_usecount < 0)
992 panic("ctl_getlock: so=%x usecount=%x\n", so, so->so_usecount);
993 return(kcb->mtx);
994 } else {
995 panic("ctl_getlock: so=%x NULL so_pcb\n", so);
996 return (so->so_proto->pr_domain->dom_mtx);
997 }
998 }