]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_control.c
xnu-792.12.6.tar.gz
[apple/xnu.git] / bsd / kern / kern_control.c
1 /*
2 * Copyright (c) 1999-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30
31 /*
32 * Kernel Control domain - allows control connections to
33 * and to read/write data.
34 *
35 * Vincent Lubet, 040506
36 * Christophe Allie, 010928
37 * Justin C. Walker, 990319
38 */
39
40 #include <sys/types.h>
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/syslog.h>
44 #include <sys/socket.h>
45 #include <sys/socketvar.h>
46 #include <sys/protosw.h>
47 #include <sys/domain.h>
48 #include <sys/malloc.h>
49 #include <sys/mbuf.h>
50 #include <sys/sys_domain.h>
51 #include <sys/kern_event.h>
52 #include <sys/kern_control.h>
53 #include <net/if_var.h>
54
55 #include <mach/vm_types.h>
56 #include <mach/kmod.h>
57
58 #include <kern/thread.h>
59
60 /*
61 * Definitions and vars for we support
62 */
63
64 #define CTL_SENDSIZE (2 * 1024) /* default buffer size */
65 #define CTL_RECVSIZE (8 * 1024) /* default buffer size */
66
67 /*
68 * Definitions and vars for we support
69 */
70
71 static u_int32_t ctl_last_id = 0;
72 static u_int32_t ctl_max = 256;
73 static u_int32_t ctl_maxunit = 65536;
74 static lck_grp_attr_t *ctl_lck_grp_attr = 0;
75 static lck_attr_t *ctl_lck_attr = 0;
76 static lck_grp_t *ctl_lck_grp = 0;
77 static lck_mtx_t *ctl_mtx;
78
79 /*
80 * internal structure maintained for each register controller
81 */
82
83 struct ctl_cb;
84
85 struct kctl
86 {
87 TAILQ_ENTRY(kctl) next; /* controller chain */
88
89 /* controller information provided when registering */
90 char name[MAX_KCTL_NAME]; /* unique nke identifier, provided by DTS */
91 u_int32_t id;
92 u_int32_t reg_unit;
93
94 /* misc communication information */
95 u_int32_t flags; /* support flags */
96 u_int32_t recvbufsize; /* request more than the default buffer size */
97 u_int32_t sendbufsize; /* request more than the default buffer size */
98
99 /* Dispatch functions */
100 ctl_connect_func connect; /* Make contact */
101 ctl_disconnect_func disconnect; /* Break contact */
102 ctl_send_func send; /* Send data to nke */
103 ctl_setopt_func setopt; /* set kctl configuration */
104 ctl_getopt_func getopt; /* get kctl configuration */
105
106 TAILQ_HEAD(, ctl_cb) kcb_head;
107 u_int32_t lastunit;
108 };
109
110 struct ctl_cb {
111 TAILQ_ENTRY(ctl_cb) next; /* controller chain */
112 lck_mtx_t *mtx;
113 struct socket *so; /* controlling socket */
114 struct kctl *kctl; /* back pointer to controller */
115 u_int32_t unit;
116 void *userdata;
117 };
118
119 /* all the controllers are chained */
120 TAILQ_HEAD(, kctl) ctl_head;
121
122 static int ctl_attach(struct socket *, int, struct proc *);
123 static int ctl_detach(struct socket *);
124 static int ctl_sofreelastref(struct socket *so);
125 static int ctl_connect(struct socket *, struct sockaddr *, struct proc *);
126 static int ctl_disconnect(struct socket *);
127 static int ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
128 struct ifnet *ifp, struct proc *p);
129 static int ctl_send(struct socket *, int, struct mbuf *,
130 struct sockaddr *, struct mbuf *, struct proc *);
131 static int ctl_ctloutput(struct socket *, struct sockopt *);
132 static int ctl_peeraddr(struct socket *so, struct sockaddr **nam);
133
134 static struct kctl *ctl_find_by_id(u_int32_t);
135 static struct kctl *ctl_find_by_name(const char *);
136 static struct kctl *ctl_find_by_id_unit(u_int32_t id, u_int32_t unit);
137
138 static struct ctl_cb *kcb_find(struct kctl *, u_int32_t unit);
139 static void ctl_post_msg(u_long event_code, u_int32_t id);
140
141 static int ctl_lock(struct socket *, int, int);
142 static int ctl_unlock(struct socket *, int, int);
143 static lck_mtx_t * ctl_getlock(struct socket *, int);
144
145 static struct pr_usrreqs ctl_usrreqs =
146 {
147 pru_abort_notsupp, pru_accept_notsupp, ctl_attach, pru_bind_notsupp,
148 ctl_connect, pru_connect2_notsupp, ctl_ioctl, ctl_detach,
149 ctl_disconnect, pru_listen_notsupp, ctl_peeraddr,
150 pru_rcvd_notsupp, pru_rcvoob_notsupp, ctl_send,
151 pru_sense_null, pru_shutdown_notsupp, pru_sockaddr_notsupp,
152 sosend, soreceive, pru_sopoll_notsupp
153 };
154
155 static struct protosw kctlswk_dgram =
156 {
157 SOCK_DGRAM, &systemdomain, SYSPROTO_CONTROL,
158 PR_ATOMIC|PR_CONNREQUIRED|PR_PCBLOCK,
159 NULL, NULL, NULL, ctl_ctloutput,
160 NULL, NULL,
161 NULL, NULL, NULL, NULL, &ctl_usrreqs,
162 ctl_lock, ctl_unlock, ctl_getlock, { 0, 0 } , 0, { 0 }
163 };
164
165 static struct protosw kctlswk_stream =
166 {
167 SOCK_STREAM, &systemdomain, SYSPROTO_CONTROL,
168 PR_CONNREQUIRED|PR_PCBLOCK,
169 NULL, NULL, NULL, ctl_ctloutput,
170 NULL, NULL,
171 NULL, NULL, NULL, NULL, &ctl_usrreqs,
172 ctl_lock, ctl_unlock, ctl_getlock, { 0, 0 } , 0, { 0 }
173 };
174
175
176 /*
177 * Install the protosw's for the Kernel Control manager.
178 */
179 __private_extern__ int
180 kern_control_init(void)
181 {
182 int error = 0;
183
184 ctl_lck_grp_attr = lck_grp_attr_alloc_init();
185 if (ctl_lck_grp_attr == 0) {
186 printf(": lck_grp_attr_alloc_init failed\n");
187 error = ENOMEM;
188 goto done;
189 }
190 lck_grp_attr_setdefault(ctl_lck_grp_attr);
191
192 ctl_lck_grp = lck_grp_alloc_init("Kernel Control Protocol", ctl_lck_grp_attr);
193 if (ctl_lck_grp == 0) {
194 printf("kern_control_init: lck_grp_alloc_init failed\n");
195 error = ENOMEM;
196 goto done;
197 }
198
199 ctl_lck_attr = lck_attr_alloc_init();
200 if (ctl_lck_attr == 0) {
201 printf("kern_control_init: lck_attr_alloc_init failed\n");
202 error = ENOMEM;
203 goto done;
204 }
205 lck_attr_setdefault(ctl_lck_attr);
206
207 ctl_mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
208 if (ctl_mtx == 0) {
209 printf("kern_control_init: lck_mtx_alloc_init failed\n");
210 error = ENOMEM;
211 goto done;
212 }
213 TAILQ_INIT(&ctl_head);
214
215 error = net_add_proto(&kctlswk_dgram, &systemdomain);
216 if (error) {
217 log(LOG_WARNING, "kern_control_init: net_add_proto dgram failed (%d)\n", error);
218 }
219 error = net_add_proto(&kctlswk_stream, &systemdomain);
220 if (error) {
221 log(LOG_WARNING, "kern_control_init: net_add_proto stream failed (%d)\n", error);
222 }
223
224 done:
225 if (error != 0) {
226 if (ctl_mtx) {
227 lck_mtx_free(ctl_mtx, ctl_lck_grp);
228 ctl_mtx = 0;
229 }
230 if (ctl_lck_grp) {
231 lck_grp_free(ctl_lck_grp);
232 ctl_lck_grp = 0;
233 }
234 if (ctl_lck_grp_attr) {
235 lck_grp_attr_free(ctl_lck_grp_attr);
236 ctl_lck_grp_attr = 0;
237 }
238 if (ctl_lck_attr) {
239 lck_attr_free(ctl_lck_attr);
240 ctl_lck_attr = 0;
241 }
242 }
243 return error;
244 }
245
246 static void
247 kcb_delete(struct ctl_cb *kcb)
248 {
249 if (kcb != 0) {
250 if (kcb->mtx != 0)
251 lck_mtx_free(kcb->mtx, ctl_lck_grp);
252 FREE(kcb, M_TEMP);
253 }
254 }
255
256
257 /*
258 * Kernel Controller user-request functions
259 * attach function must exist and succeed
260 * detach not necessary
261 * we need a pcb for the per socket mutex
262 */
263 static int
264 ctl_attach(__unused struct socket *so, __unused int proto, __unused struct proc *p)
265 {
266 int error = 0;
267 struct ctl_cb *kcb = 0;
268
269 MALLOC(kcb, struct ctl_cb *, sizeof(struct ctl_cb), M_TEMP, M_WAITOK);
270 if (kcb == NULL) {
271 error = ENOMEM;
272 goto quit;
273 }
274 bzero(kcb, sizeof(struct ctl_cb));
275
276 kcb->mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
277 if (kcb->mtx == NULL) {
278 error = ENOMEM;
279 goto quit;
280 }
281 kcb->so = so;
282 so->so_pcb = (caddr_t)kcb;
283
284 quit:
285 if (error != 0) {
286 kcb_delete(kcb);
287 kcb = 0;
288 }
289 return error;
290 }
291
292 static int
293 ctl_sofreelastref(struct socket *so)
294 {
295 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
296
297 so->so_pcb = 0;
298
299 if (kcb != 0) {
300 struct kctl *kctl;
301 if ((kctl = kcb->kctl) != 0) {
302 lck_mtx_lock(ctl_mtx);
303 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
304 lck_mtx_lock(ctl_mtx);
305 }
306 kcb_delete(kcb);
307 }
308 return 0;
309 }
310
311 static int
312 ctl_detach(struct socket *so)
313 {
314 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
315
316 if (kcb == 0)
317 return 0;
318
319 soisdisconnected(so);
320 so->so_flags |= SOF_PCBCLEARING;
321 return 0;
322 }
323
324
325 static int
326 ctl_connect(struct socket *so, struct sockaddr *nam, __unused struct proc *p)
327 {
328 struct kctl *kctl;
329 int error = 0;
330 struct sockaddr_ctl sa;
331 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
332
333 if (kcb == 0)
334 panic("ctl_connect so_pcb null\n");
335
336 if (nam->sa_len != sizeof(struct sockaddr_ctl))
337 return(EINVAL);
338
339 bcopy(nam, &sa, sizeof(struct sockaddr_ctl));
340
341 lck_mtx_lock(ctl_mtx);
342 kctl = ctl_find_by_id_unit(sa.sc_id, sa.sc_unit);
343 if (kctl == NULL) {
344 lck_mtx_unlock(ctl_mtx);
345 return ENOENT;
346 }
347
348 if (((kctl->flags & CTL_FLAG_REG_SOCK_STREAM) && (so->so_type != SOCK_STREAM)) ||
349 (!(kctl->flags & CTL_FLAG_REG_SOCK_STREAM) && (so->so_type != SOCK_DGRAM))) {
350 lck_mtx_unlock(ctl_mtx);
351 return EPROTOTYPE;
352 }
353
354 if (kctl->flags & CTL_FLAG_PRIVILEGED) {
355 if (p == 0) {
356 lck_mtx_unlock(ctl_mtx);
357 return(EINVAL);
358 }
359 if ((error = proc_suser(p))) {
360 lck_mtx_unlock(ctl_mtx);
361 return error;
362 }
363 }
364
365 if ((kctl->flags & CTL_FLAG_REG_ID_UNIT) || sa.sc_unit != 0) {
366 if (kcb_find(kctl, sa.sc_unit) != NULL) {
367 lck_mtx_unlock(ctl_mtx);
368 return EBUSY;
369 }
370 } else {
371 u_int32_t unit = kctl->lastunit + 1;
372
373 while (1) {
374 if (unit == ctl_maxunit)
375 unit = 1;
376 if (kcb_find(kctl, unit) == NULL) {
377 kctl->lastunit = sa.sc_unit = unit;
378 break;
379 }
380 if (unit++ == kctl->lastunit) {
381 lck_mtx_unlock(ctl_mtx);
382 return EBUSY;
383 }
384 }
385 }
386
387 kcb->unit = sa.sc_unit;
388 kcb->kctl = kctl;
389 TAILQ_INSERT_TAIL(&kctl->kcb_head, kcb, next);
390 lck_mtx_unlock(ctl_mtx);
391
392 error = soreserve(so, kctl->sendbufsize, kctl->recvbufsize);
393 if (error)
394 goto done;
395 soisconnecting(so);
396
397 socket_unlock(so, 0);
398 error = (*kctl->connect)(kctl, &sa, &kcb->userdata);
399 socket_lock(so, 0);
400 if (error)
401 goto done;
402
403 soisconnected(so);
404
405 done:
406 if (error) {
407 soisdisconnected(so);
408 lck_mtx_lock(ctl_mtx);
409 kcb->kctl = 0;
410 kcb->unit = 0;
411 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
412 lck_mtx_unlock(ctl_mtx);
413 }
414 return error;
415 }
416
417 static int
418 ctl_disconnect(struct socket *so)
419 {
420 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
421
422 if ((kcb = (struct ctl_cb *)so->so_pcb)) {
423 struct kctl *kctl = kcb->kctl;
424
425 if (kctl && kctl->disconnect) {
426 socket_unlock(so, 0);
427 (*kctl->disconnect)(kctl, kcb->unit, kcb->userdata);
428 socket_lock(so, 0);
429 }
430 lck_mtx_lock(ctl_mtx);
431 kcb->kctl = 0;
432 kcb->unit = 0;
433 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
434 soisdisconnected(so);
435 lck_mtx_unlock(ctl_mtx);
436 }
437 return 0;
438 }
439
440 static int
441 ctl_peeraddr(struct socket *so, struct sockaddr **nam)
442 {
443 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
444 struct kctl *kctl;
445 struct sockaddr_ctl sc;
446
447 if (kcb == NULL) /* sanity check */
448 return(ENOTCONN);
449
450 if ((kctl = kcb->kctl) == NULL)
451 return(EINVAL);
452
453 bzero(&sc, sizeof(struct sockaddr_ctl));
454 sc.sc_len = sizeof(struct sockaddr_ctl);
455 sc.sc_family = AF_SYSTEM;
456 sc.ss_sysaddr = AF_SYS_CONTROL;
457 sc.sc_id = kctl->id;
458 sc.sc_unit = kcb->unit;
459
460 *nam = dup_sockaddr((struct sockaddr *)&sc, 1);
461
462 return 0;
463 }
464
465 static int
466 ctl_send(struct socket *so, int flags, struct mbuf *m,
467 __unused struct sockaddr *addr, __unused struct mbuf *control,
468 __unused struct proc *p)
469 {
470 int error = 0;
471 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
472 struct kctl *kctl;
473
474 if (kcb == NULL) /* sanity check */
475 return(ENOTCONN);
476
477 if ((kctl = kcb->kctl) == NULL)
478 return(EINVAL);
479
480 if (kctl->send) {
481 socket_unlock(so, 0);
482 error = (*kctl->send)(kctl, kcb->unit, kcb->userdata, m, flags);
483 socket_lock(so, 0);
484 }
485 return error;
486 }
487
488 errno_t
489 ctl_enqueuembuf(void *kctlref, u_int32_t unit, struct mbuf *m, u_int32_t flags)
490 {
491 struct ctl_cb *kcb;
492 struct socket *so;
493 errno_t error = 0;
494 struct kctl *kctl = (struct kctl *)kctlref;
495
496 if (kctl == NULL)
497 return EINVAL;
498
499 kcb = kcb_find(kctl, unit);
500 if (kcb == NULL)
501 return EINVAL;
502
503 so = (struct socket *)kcb->so;
504 if (so == NULL)
505 return EINVAL;
506
507 socket_lock(so, 1);
508 if (sbspace(&so->so_rcv) < m->m_pkthdr.len) {
509 error = ENOBUFS;
510 goto bye;
511 }
512 if ((flags & CTL_DATA_EOR))
513 m->m_flags |= M_EOR;
514 if (sbappend(&so->so_rcv, m) && (flags & CTL_DATA_NOWAKEUP) == 0)
515 sorwakeup(so);
516 bye:
517 socket_unlock(so, 1);
518 return error;
519 }
520
521 errno_t
522 ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len, u_int32_t flags)
523 {
524 struct ctl_cb *kcb;
525 struct socket *so;
526 struct mbuf *m;
527 errno_t error = 0;
528 struct kctl *kctl = (struct kctl *)kctlref;
529 unsigned int num_needed;
530 struct mbuf *n;
531 size_t curlen = 0;
532
533 if (kctlref == NULL)
534 return EINVAL;
535
536 kcb = kcb_find(kctl, unit);
537 if (kcb == NULL)
538 return EINVAL;
539
540 so = (struct socket *)kcb->so;
541 if (so == NULL)
542 return EINVAL;
543
544 socket_lock(so, 1);
545 if ((size_t)sbspace(&so->so_rcv) < len) {
546 error = ENOBUFS;
547 goto bye;
548 }
549
550 num_needed = 1;
551 m = m_allocpacket_internal(&num_needed, len, NULL, M_NOWAIT, 1, 0);
552 if (m == NULL) {
553 printf("ctl_enqueuedata: m_allocpacket_internal(%lu) failed\n", len);
554 error = ENOBUFS;
555 goto bye;
556 }
557
558 for (n = m; n != NULL; n = n->m_next) {
559 size_t mlen = mbuf_maxlen(n);
560
561 if (mlen + curlen > len)
562 mlen = len - curlen;
563 n->m_len = mlen;
564 bcopy((char *)data + curlen, n->m_data, mlen);
565 curlen += mlen;
566 }
567 mbuf_pkthdr_setlen(m, curlen);
568
569 if ((flags & CTL_DATA_EOR))
570 m->m_flags |= M_EOR;
571 if (sbappend(&so->so_rcv, m) && (flags & CTL_DATA_NOWAKEUP) == 0)
572 sorwakeup(so);
573 bye:
574 socket_unlock(so, 1);
575 return error;
576 }
577
578
579 errno_t
580 ctl_getenqueuespace(kern_ctl_ref kctlref, u_int32_t unit, size_t *space)
581 {
582 struct ctl_cb *kcb;
583 struct kctl *kctl = (struct kctl *)kctlref;
584 struct socket *so;
585
586 if (kctlref == NULL || space == NULL)
587 return EINVAL;
588
589 kcb = kcb_find(kctl, unit);
590 if (kcb == NULL)
591 return EINVAL;
592
593 so = (struct socket *)kcb->so;
594 if (so == NULL)
595 return EINVAL;
596
597 socket_lock(so, 1);
598 *space = sbspace(&so->so_rcv);
599 socket_unlock(so, 1);
600
601 return 0;
602 }
603
604 static int
605 ctl_ctloutput(struct socket *so, struct sockopt *sopt)
606 {
607 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
608 struct kctl *kctl;
609 int error = 0;
610 void *data;
611 size_t len;
612
613 if (sopt->sopt_level != SYSPROTO_CONTROL) {
614 return(EINVAL);
615 }
616
617 if (kcb == NULL) /* sanity check */
618 return(ENOTCONN);
619
620 if ((kctl = kcb->kctl) == NULL)
621 return(EINVAL);
622
623 switch (sopt->sopt_dir) {
624 case SOPT_SET:
625 if (kctl->setopt == NULL)
626 return(ENOTSUP);
627 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, M_WAITOK);
628 if (data == NULL)
629 return(ENOMEM);
630 error = sooptcopyin(sopt, data, sopt->sopt_valsize, sopt->sopt_valsize);
631 if (error == 0) {
632 socket_unlock(so, 0);
633 error = (*kctl->setopt)(kcb->kctl, kcb->unit, kcb->userdata, sopt->sopt_name,
634 data, sopt->sopt_valsize);
635 socket_lock(so, 0);
636 }
637 FREE(data, M_TEMP);
638 break;
639
640 case SOPT_GET:
641 if (kctl->getopt == NULL)
642 return(ENOTSUP);
643 data = NULL;
644 if (sopt->sopt_valsize && sopt->sopt_val) {
645 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, M_WAITOK);
646 if (data == NULL)
647 return(ENOMEM);
648 /* 4108337 - copy in data for get socket option */
649 error = sooptcopyin(sopt, data, sopt->sopt_valsize, sopt->sopt_valsize);
650 }
651 len = sopt->sopt_valsize;
652 socket_unlock(so, 0);
653 error = (*kctl->getopt)(kcb->kctl, kcb->unit, kcb->userdata, sopt->sopt_name,
654 data, &len);
655 socket_lock(so, 0);
656 if (error == 0) {
657 if (data != NULL)
658 error = sooptcopyout(sopt, data, len);
659 else
660 sopt->sopt_valsize = len;
661 }
662 if (data != NULL)
663 FREE(data, M_TEMP);
664 break;
665 }
666 return error;
667 }
668
669 static int
670 ctl_ioctl(__unused struct socket *so, u_long cmd, caddr_t data,
671 __unused struct ifnet *ifp, __unused struct proc *p)
672 {
673 int error = ENOTSUP;
674
675 switch (cmd) {
676 /* get the number of controllers */
677 case CTLIOCGCOUNT: {
678 struct kctl *kctl;
679 int n = 0;
680
681 lck_mtx_lock(ctl_mtx);
682 TAILQ_FOREACH(kctl, &ctl_head, next)
683 n++;
684 lck_mtx_unlock(ctl_mtx);
685
686 *(u_int32_t *)data = n;
687 error = 0;
688 break;
689 }
690 case CTLIOCGINFO: {
691 struct ctl_info *ctl_info = (struct ctl_info *)data;
692 struct kctl *kctl = 0;
693 size_t name_len = strlen(ctl_info->ctl_name);
694
695 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
696 error = EINVAL;
697 break;
698 }
699 lck_mtx_lock(ctl_mtx);
700 kctl = ctl_find_by_name(ctl_info->ctl_name);
701 lck_mtx_unlock(ctl_mtx);
702 if (kctl == 0) {
703 error = ENOENT;
704 break;
705 }
706 ctl_info->ctl_id = kctl->id;
707 error = 0;
708 break;
709 }
710
711 /* add controls to get list of NKEs */
712
713 }
714
715 return error;
716 }
717
718 /*
719 * Register/unregister a NKE
720 */
721 errno_t
722 ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref)
723 {
724 struct kctl *kctl = 0;
725 u_int32_t id = -1;
726 u_int32_t n;
727 size_t name_len;
728
729 if (userkctl == NULL) /* sanity check */
730 return(EINVAL);
731 if (userkctl->ctl_connect == NULL)
732 return(EINVAL);
733 name_len = strlen(userkctl->ctl_name);
734 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME)
735 return(EINVAL);
736
737 MALLOC(kctl, struct kctl *, sizeof(*kctl), M_TEMP, M_WAITOK);
738 if (kctl == NULL)
739 return(ENOMEM);
740 bzero((char *)kctl, sizeof(*kctl));
741
742 lck_mtx_lock(ctl_mtx);
743
744 if ((userkctl->ctl_flags & CTL_FLAG_REG_ID_UNIT) == 0) {
745 if (ctl_find_by_name(userkctl->ctl_name) != NULL) {
746 lck_mtx_unlock(ctl_mtx);
747 FREE(kctl, M_TEMP);
748 return(EEXIST);
749 }
750 for (n = 0, id = ctl_last_id + 1; n < ctl_max; id++, n++) {
751 if (id == 0) {
752 n--;
753 continue;
754 }
755 if (ctl_find_by_id(id) == 0)
756 break;
757 }
758 if (id == ctl_max) {
759 lck_mtx_unlock(ctl_mtx);
760 FREE(kctl, M_TEMP);
761 return(ENOBUFS);
762 }
763 userkctl->ctl_id =id;
764 kctl->id = id;
765 kctl->reg_unit = -1;
766 } else {
767 if (ctl_find_by_id_unit(userkctl->ctl_id, userkctl->ctl_unit) != NULL) {
768 lck_mtx_unlock(ctl_mtx);
769 FREE(kctl, M_TEMP);
770 return(EEXIST);
771 }
772 kctl->id = userkctl->ctl_id;
773 kctl->reg_unit = userkctl->ctl_unit;
774 }
775 strcpy(kctl->name, userkctl->ctl_name);
776 kctl->flags = userkctl->ctl_flags;
777
778 /* Let the caller know the default send and receive sizes */
779 if (userkctl->ctl_sendsize == 0)
780 userkctl->ctl_sendsize = CTL_SENDSIZE;
781 kctl->sendbufsize = userkctl->ctl_sendsize;
782
783 if (userkctl->ctl_recvsize == 0)
784 userkctl->ctl_recvsize = CTL_RECVSIZE;
785 kctl->recvbufsize = userkctl->ctl_recvsize;
786
787 kctl->connect = userkctl->ctl_connect;
788 kctl->disconnect = userkctl->ctl_disconnect;
789 kctl->send = userkctl->ctl_send;
790 kctl->setopt = userkctl->ctl_setopt;
791 kctl->getopt = userkctl->ctl_getopt;
792
793 TAILQ_INIT(&kctl->kcb_head);
794
795 TAILQ_INSERT_TAIL(&ctl_head, kctl, next);
796 ctl_max++;
797
798 lck_mtx_unlock(ctl_mtx);
799
800 *kctlref = kctl;
801
802 ctl_post_msg(KEV_CTL_REGISTERED, kctl->id);
803 return(0);
804 }
805
806 errno_t
807 ctl_deregister(void *kctlref)
808 {
809 struct kctl *kctl;
810
811 if (kctlref == NULL) /* sanity check */
812 return(EINVAL);
813
814 lck_mtx_lock(ctl_mtx);
815 TAILQ_FOREACH(kctl, &ctl_head, next) {
816 if (kctl == (struct kctl *)kctlref)
817 break;
818 }
819 if (kctl != (struct kctl *)kctlref) {
820 lck_mtx_unlock(ctl_mtx);
821 return EINVAL;
822 }
823 if (!TAILQ_EMPTY(&kctl->kcb_head)) {
824 lck_mtx_unlock(ctl_mtx);
825 return EBUSY;
826 }
827
828 TAILQ_REMOVE(&ctl_head, kctl, next);
829 ctl_max--;
830
831 lck_mtx_unlock(ctl_mtx);
832
833 ctl_post_msg(KEV_CTL_DEREGISTERED, kctl->id);
834 FREE(kctl, M_TEMP);
835 return(0);
836 }
837
838 /*
839 * Must be called with global lock taked
840 */
841 static struct kctl *
842 ctl_find_by_id(u_int32_t id)
843 {
844 struct kctl *kctl;
845
846 TAILQ_FOREACH(kctl, &ctl_head, next)
847 if (kctl->id == id)
848 return kctl;
849
850 return NULL;
851 }
852
853 /*
854 * Must be called with global ctl_mtx lock taked
855 */
856 static struct kctl *
857 ctl_find_by_name(const char *name)
858 {
859 struct kctl *kctl;
860
861 TAILQ_FOREACH(kctl, &ctl_head, next)
862 if (strcmp(kctl->name, name) == 0)
863 return kctl;
864
865 return NULL;
866 }
867
868 /*
869 * Must be called with global ctl_mtx lock taked
870 *
871 */
872 static struct kctl *
873 ctl_find_by_id_unit(u_int32_t id, u_int32_t unit)
874 {
875 struct kctl *kctl;
876
877 TAILQ_FOREACH(kctl, &ctl_head, next) {
878 if (kctl->id == id && (kctl->flags & CTL_FLAG_REG_ID_UNIT) == 0)
879 return kctl;
880 else if (kctl->id == id && kctl->reg_unit == unit)
881 return kctl;
882 }
883 return NULL;
884 }
885
886 /*
887 * Must be called with kernel controller lock taken
888 */
889 static struct ctl_cb *
890 kcb_find(struct kctl *kctl, u_int32_t unit)
891 {
892 struct ctl_cb *kcb;
893
894 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
895 if ((kcb->unit == unit))
896 return kcb;
897
898 return NULL;
899 }
900
901 /*
902 * Must be called witout lock
903 */
904 static void
905 ctl_post_msg(u_long event_code, u_int32_t id)
906 {
907 struct ctl_event_data ctl_ev_data;
908 struct kev_msg ev_msg;
909
910 ev_msg.vendor_code = KEV_VENDOR_APPLE;
911
912 ev_msg.kev_class = KEV_SYSTEM_CLASS;
913 ev_msg.kev_subclass = KEV_CTL_SUBCLASS;
914 ev_msg.event_code = event_code;
915
916 /* common nke subclass data */
917 bzero(&ctl_ev_data, sizeof(ctl_ev_data));
918 ctl_ev_data.ctl_id = id;
919 ev_msg.dv[0].data_ptr = &ctl_ev_data;
920 ev_msg.dv[0].data_length = sizeof(ctl_ev_data);
921
922 ev_msg.dv[1].data_length = 0;
923
924 kev_post_msg(&ev_msg);
925 }
926
927 static int
928 ctl_lock(struct socket *so, int refcount, int lr)
929 {
930 int lr_saved;
931 #ifdef __ppc__
932 if (lr == 0) {
933 __asm__ volatile("mflr %0" : "=r" (lr_saved));
934 }
935 else lr_saved = lr;
936 #endif
937
938 if (so->so_pcb) {
939 lck_mtx_lock(((struct ctl_cb *)so->so_pcb)->mtx);
940 } else {
941 panic("ctl_lock: so=%x NO PCB! lr=%x\n", so, lr_saved);
942 lck_mtx_lock(so->so_proto->pr_domain->dom_mtx);
943 }
944
945 if (so->so_usecount < 0)
946 panic("ctl_lock: so=%x so_pcb=%x lr=%x ref=%x\n",
947 so, so->so_pcb, lr_saved, so->so_usecount);
948
949 if (refcount)
950 so->so_usecount++;
951 so->reserved3 = (void *)lr_saved;
952 return (0);
953 }
954
955 static int
956 ctl_unlock(struct socket *so, int refcount, int lr)
957 {
958 int lr_saved;
959 lck_mtx_t * mutex_held;
960
961 #ifdef __ppc__
962 if (lr == 0) {
963 __asm__ volatile("mflr %0" : "=r" (lr_saved));
964 }
965 else lr_saved = lr;
966 #endif
967
968 #ifdef MORE_KCTLLOCK_DEBUG
969 printf("ctl_unlock: so=%x sopcb=%x lock=%x ref=%x lr=%x\n",
970 so, so->so_pcb, ((struct ctl_cb *)so->so_pcb)->mtx, so->so_usecount, lr_saved);
971 #endif
972 if (refcount)
973 so->so_usecount--;
974
975 if (so->so_usecount < 0)
976 panic("ctl_unlock: so=%x usecount=%x\n", so, so->so_usecount);
977 if (so->so_pcb == NULL) {
978 panic("ctl_unlock: so=%x NO PCB usecount=%x lr=%x\n", so, so->so_usecount, lr_saved);
979 mutex_held = so->so_proto->pr_domain->dom_mtx;
980 } else {
981 mutex_held = ((struct ctl_cb *)so->so_pcb)->mtx;
982 }
983 lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
984 lck_mtx_unlock(mutex_held);
985 so->reserved4 = (void *)lr_saved;
986
987 if (so->so_usecount == 0)
988 ctl_sofreelastref(so);
989
990 return (0);
991 }
992
993 static lck_mtx_t *
994 ctl_getlock(struct socket *so, __unused int locktype)
995 {
996 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
997
998 if (so->so_pcb) {
999 if (so->so_usecount < 0)
1000 panic("ctl_getlock: so=%x usecount=%x\n", so, so->so_usecount);
1001 return(kcb->mtx);
1002 } else {
1003 panic("ctl_getlock: so=%x NULL so_pcb\n", so);
1004 return (so->so_proto->pr_domain->dom_mtx);
1005 }
1006 }