]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_control.c
8e5b0150bd8775b1dd5aac552cbc792e2df70012
[apple/xnu.git] / bsd / kern / kern_control.c
1 /*
2 * Copyright (c) 1999-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Kernel Control domain - allows control connections to
31 * and to read/write data.
32 *
33 * Vincent Lubet, 040506
34 * Christophe Allie, 010928
35 * Justin C. Walker, 990319
36 */
37
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/syslog.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/protosw.h>
45 #include <sys/domain.h>
46 #include <sys/malloc.h>
47 #include <sys/mbuf.h>
48 #include <sys/sys_domain.h>
49 #include <sys/kern_event.h>
50 #include <sys/kern_control.h>
51 #include <sys/kauth.h>
52 #include <sys/sysctl.h>
53 #include <net/if_var.h>
54
55 #include <mach/vm_types.h>
56
57 #include <kern/thread.h>
58
59 #ifndef ROUNDUP64
60 #define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
61 #endif
62
63 #ifndef ADVANCE64
64 #define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
65 #endif
66
67 /*
68 * Definitions and vars for we support
69 */
70
71 #define CTL_SENDSIZE (2 * 1024) /* default buffer size */
72 #define CTL_RECVSIZE (8 * 1024) /* default buffer size */
73
74 /*
75 * Definitions and vars for we support
76 */
77
78 static u_int32_t ctl_maxunit = 65536;
79 static lck_grp_attr_t *ctl_lck_grp_attr = 0;
80 static lck_attr_t *ctl_lck_attr = 0;
81 static lck_grp_t *ctl_lck_grp = 0;
82 static lck_mtx_t *ctl_mtx;
83
84 /* all the controllers are chained */
85 TAILQ_HEAD(kctl_list, kctl) ctl_head;
86
87
88 static int ctl_attach(struct socket *, int, struct proc *);
89 static int ctl_detach(struct socket *);
90 static int ctl_sofreelastref(struct socket *so);
91 static int ctl_connect(struct socket *, struct sockaddr *, struct proc *);
92 static int ctl_disconnect(struct socket *);
93 static int ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
94 struct ifnet *ifp, struct proc *p);
95 static int ctl_send(struct socket *, int, struct mbuf *,
96 struct sockaddr *, struct mbuf *, struct proc *);
97 static int ctl_send_list(struct socket *, int, struct mbuf *,
98 struct sockaddr *, struct mbuf *, struct proc *);
99 static int ctl_ctloutput(struct socket *, struct sockopt *);
100 static int ctl_peeraddr(struct socket *so, struct sockaddr **nam);
101 static int ctl_usr_rcvd(struct socket *so, int flags);
102
103 static struct kctl *ctl_find_by_name(const char *);
104 static struct kctl *ctl_find_by_id_unit(u_int32_t id, u_int32_t unit);
105
106 static struct socket *kcb_find_socket(struct kctl *, u_int32_t unit);
107 static struct ctl_cb *kcb_find(struct kctl *, u_int32_t unit);
108 static void ctl_post_msg(u_int32_t event_code, u_int32_t id);
109
110 static int ctl_lock(struct socket *, int, void *);
111 static int ctl_unlock(struct socket *, int, void *);
112 static lck_mtx_t * ctl_getlock(struct socket *, int);
113
114 static struct pr_usrreqs ctl_usrreqs = {
115 .pru_attach = ctl_attach,
116 .pru_connect = ctl_connect,
117 .pru_control = ctl_ioctl,
118 .pru_detach = ctl_detach,
119 .pru_disconnect = ctl_disconnect,
120 .pru_peeraddr = ctl_peeraddr,
121 .pru_rcvd = ctl_usr_rcvd,
122 .pru_send = ctl_send,
123 .pru_send_list = ctl_send_list,
124 .pru_sosend = sosend,
125 .pru_sosend_list = sosend_list,
126 .pru_soreceive = soreceive,
127 .pru_soreceive_list = soreceive_list,
128 };
129
130 static struct protosw kctlsw[] = {
131 {
132 .pr_type = SOCK_DGRAM,
133 .pr_protocol = SYSPROTO_CONTROL,
134 .pr_flags = PR_ATOMIC|PR_CONNREQUIRED|PR_PCBLOCK|PR_WANTRCVD,
135 .pr_ctloutput = ctl_ctloutput,
136 .pr_usrreqs = &ctl_usrreqs,
137 .pr_lock = ctl_lock,
138 .pr_unlock = ctl_unlock,
139 .pr_getlock = ctl_getlock,
140 },
141 {
142 .pr_type = SOCK_STREAM,
143 .pr_protocol = SYSPROTO_CONTROL,
144 .pr_flags = PR_CONNREQUIRED|PR_PCBLOCK|PR_WANTRCVD,
145 .pr_ctloutput = ctl_ctloutput,
146 .pr_usrreqs = &ctl_usrreqs,
147 .pr_lock = ctl_lock,
148 .pr_unlock = ctl_unlock,
149 .pr_getlock = ctl_getlock,
150 }
151 };
152
153 __private_extern__ int kctl_reg_list SYSCTL_HANDLER_ARGS;
154 __private_extern__ int kctl_pcblist SYSCTL_HANDLER_ARGS;
155 __private_extern__ int kctl_getstat SYSCTL_HANDLER_ARGS;
156
157 static int kctl_proto_count = (sizeof (kctlsw) / sizeof (struct protosw));
158
159 SYSCTL_NODE(_net_systm, OID_AUTO, kctl,
160 CTLFLAG_RW|CTLFLAG_LOCKED, 0, "Kernel control family");
161
162 struct kctlstat kctlstat;
163 SYSCTL_PROC(_net_systm_kctl, OID_AUTO, stats,
164 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
165 kctl_getstat, "S,kctlstat", "");
166
167 SYSCTL_PROC(_net_systm_kctl, OID_AUTO, reg_list,
168 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
169 kctl_reg_list, "S,xkctl_reg", "");
170
171 SYSCTL_PROC(_net_systm_kctl, OID_AUTO, pcblist,
172 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
173 kctl_pcblist, "S,xkctlpcb", "");
174
175 u_int32_t ctl_autorcvbuf_max = 256 * 1024;
176 SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufmax,
177 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_autorcvbuf_max, 0, "");
178
179 u_int32_t ctl_autorcvbuf_high = 0;
180 SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufhigh,
181 CTLFLAG_RD | CTLFLAG_LOCKED, &ctl_autorcvbuf_high, 0, "");
182
183 u_int32_t ctl_debug = 0;
184 SYSCTL_INT(_net_systm_kctl, OID_AUTO, debug,
185 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_debug, 0, "");
186
187 /*
188 * Install the protosw's for the Kernel Control manager.
189 */
190 __private_extern__ void
191 kern_control_init(struct domain *dp)
192 {
193 struct protosw *pr;
194 int i;
195
196 VERIFY(!(dp->dom_flags & DOM_INITIALIZED));
197 VERIFY(dp == systemdomain);
198
199 ctl_lck_grp_attr = lck_grp_attr_alloc_init();
200 if (ctl_lck_grp_attr == NULL) {
201 panic("%s: lck_grp_attr_alloc_init failed\n", __func__);
202 /* NOTREACHED */
203 }
204
205 ctl_lck_grp = lck_grp_alloc_init("Kernel Control Protocol",
206 ctl_lck_grp_attr);
207 if (ctl_lck_grp == NULL) {
208 panic("%s: lck_grp_alloc_init failed\n", __func__);
209 /* NOTREACHED */
210 }
211
212 ctl_lck_attr = lck_attr_alloc_init();
213 if (ctl_lck_attr == NULL) {
214 panic("%s: lck_attr_alloc_init failed\n", __func__);
215 /* NOTREACHED */
216 }
217
218 ctl_mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
219 if (ctl_mtx == NULL) {
220 panic("%s: lck_mtx_alloc_init failed\n", __func__);
221 /* NOTREACHED */
222 }
223 TAILQ_INIT(&ctl_head);
224
225 for (i = 0, pr = &kctlsw[0]; i < kctl_proto_count; i++, pr++)
226 net_add_proto(pr, dp, 1);
227 }
228
229 static void
230 kcb_delete(struct ctl_cb *kcb)
231 {
232 if (kcb != 0) {
233 if (kcb->mtx != 0)
234 lck_mtx_free(kcb->mtx, ctl_lck_grp);
235 FREE(kcb, M_TEMP);
236 }
237 }
238
239 /*
240 * Kernel Controller user-request functions
241 * attach function must exist and succeed
242 * detach not necessary
243 * we need a pcb for the per socket mutex
244 */
245 static int
246 ctl_attach(struct socket *so, int proto, struct proc *p)
247 {
248 #pragma unused(proto, p)
249 int error = 0;
250 struct ctl_cb *kcb = 0;
251
252 MALLOC(kcb, struct ctl_cb *, sizeof(struct ctl_cb), M_TEMP, M_WAITOK);
253 if (kcb == NULL) {
254 error = ENOMEM;
255 goto quit;
256 }
257 bzero(kcb, sizeof(struct ctl_cb));
258
259 kcb->mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
260 if (kcb->mtx == NULL) {
261 error = ENOMEM;
262 goto quit;
263 }
264 kcb->so = so;
265 so->so_pcb = (caddr_t)kcb;
266
267 quit:
268 if (error != 0) {
269 kcb_delete(kcb);
270 kcb = 0;
271 }
272 return (error);
273 }
274
275 static int
276 ctl_sofreelastref(struct socket *so)
277 {
278 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
279
280 so->so_pcb = 0;
281
282 if (kcb != 0) {
283 struct kctl *kctl;
284 if ((kctl = kcb->kctl) != 0) {
285 lck_mtx_lock(ctl_mtx);
286 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
287 kctlstat.kcs_pcbcount--;
288 kctlstat.kcs_gencnt++;
289 lck_mtx_unlock(ctl_mtx);
290 }
291 kcb_delete(kcb);
292 }
293 sofreelastref(so, 1);
294 return (0);
295 }
296
297 static int
298 ctl_detach(struct socket *so)
299 {
300 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
301
302 if (kcb == 0)
303 return (0);
304
305 soisdisconnected(so);
306 so->so_flags |= SOF_PCBCLEARING;
307 return (0);
308 }
309
310
311 static int
312 ctl_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
313 {
314 #pragma unused(p)
315 struct kctl *kctl;
316 int error = 0;
317 struct sockaddr_ctl sa;
318 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
319 struct ctl_cb *kcb_next = NULL;
320 u_quad_t sbmaxsize;
321 u_int32_t recvbufsize, sendbufsize;
322
323 if (kcb == 0)
324 panic("ctl_connect so_pcb null\n");
325
326 if (nam->sa_len != sizeof(struct sockaddr_ctl))
327 return (EINVAL);
328
329 bcopy(nam, &sa, sizeof(struct sockaddr_ctl));
330
331 lck_mtx_lock(ctl_mtx);
332 kctl = ctl_find_by_id_unit(sa.sc_id, sa.sc_unit);
333 if (kctl == NULL) {
334 lck_mtx_unlock(ctl_mtx);
335 return (ENOENT);
336 }
337
338 if (((kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
339 (so->so_type != SOCK_STREAM)) ||
340 (!(kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
341 (so->so_type != SOCK_DGRAM))) {
342 lck_mtx_unlock(ctl_mtx);
343 return (EPROTOTYPE);
344 }
345
346 if (kctl->flags & CTL_FLAG_PRIVILEGED) {
347 if (p == 0) {
348 lck_mtx_unlock(ctl_mtx);
349 return (EINVAL);
350 }
351 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
352 lck_mtx_unlock(ctl_mtx);
353 return (EPERM);
354 }
355 }
356
357 if ((kctl->flags & CTL_FLAG_REG_ID_UNIT) || sa.sc_unit != 0) {
358 if (kcb_find(kctl, sa.sc_unit) != NULL) {
359 lck_mtx_unlock(ctl_mtx);
360 return (EBUSY);
361 }
362 } else {
363 /* Find an unused ID, assumes control IDs are in order */
364 u_int32_t unit = 1;
365
366 TAILQ_FOREACH(kcb_next, &kctl->kcb_head, next) {
367 if (kcb_next->unit > unit) {
368 /* Found a gap, lets fill it in */
369 break;
370 }
371 unit = kcb_next->unit + 1;
372 if (unit == ctl_maxunit)
373 break;
374 }
375
376 if (unit == ctl_maxunit) {
377 lck_mtx_unlock(ctl_mtx);
378 return (EBUSY);
379 }
380
381 sa.sc_unit = unit;
382 }
383
384 kcb->unit = sa.sc_unit;
385 kcb->kctl = kctl;
386 if (kcb_next != NULL) {
387 TAILQ_INSERT_BEFORE(kcb_next, kcb, next);
388 } else {
389 TAILQ_INSERT_TAIL(&kctl->kcb_head, kcb, next);
390 }
391 kctlstat.kcs_pcbcount++;
392 kctlstat.kcs_gencnt++;
393 kctlstat.kcs_connections++;
394 lck_mtx_unlock(ctl_mtx);
395
396 /*
397 * rdar://15526688: Limit the send and receive sizes to sb_max
398 * by using the same scaling as sbreserve()
399 */
400 sbmaxsize = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES);
401
402 if (kctl->sendbufsize > sbmaxsize)
403 sendbufsize = sbmaxsize;
404 else
405 sendbufsize = kctl->sendbufsize;
406
407 if (kctl->recvbufsize > sbmaxsize)
408 recvbufsize = sbmaxsize;
409 else
410 recvbufsize = kctl->recvbufsize;
411
412 error = soreserve(so, sendbufsize, recvbufsize);
413 if (error) {
414 printf("%s - soreserve(%llx, %u, %u) error %d\n", __func__,
415 (uint64_t)VM_KERNEL_ADDRPERM(so),
416 sendbufsize, recvbufsize, error);
417 goto done;
418 }
419 soisconnecting(so);
420
421 socket_unlock(so, 0);
422 error = (*kctl->connect)(kctl, &sa, &kcb->userdata);
423 socket_lock(so, 0);
424 if (error)
425 goto end;
426
427 soisconnected(so);
428
429 end:
430 if (error && kctl->disconnect) {
431 socket_unlock(so, 0);
432 (*kctl->disconnect)(kctl, kcb->unit, kcb->userdata);
433 socket_lock(so, 0);
434 }
435 done:
436 if (error) {
437 soisdisconnected(so);
438 lck_mtx_lock(ctl_mtx);
439 kcb->kctl = 0;
440 kcb->unit = 0;
441 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
442 kctlstat.kcs_pcbcount--;
443 kctlstat.kcs_gencnt++;
444 kctlstat.kcs_conn_fail++;
445 lck_mtx_unlock(ctl_mtx);
446 }
447 return (error);
448 }
449
450 static int
451 ctl_disconnect(struct socket *so)
452 {
453 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
454
455 if ((kcb = (struct ctl_cb *)so->so_pcb)) {
456 struct kctl *kctl = kcb->kctl;
457
458 if (kctl && kctl->disconnect) {
459 socket_unlock(so, 0);
460 (*kctl->disconnect)(kctl, kcb->unit, kcb->userdata);
461 socket_lock(so, 0);
462 }
463
464 soisdisconnected(so);
465
466 socket_unlock(so, 0);
467 lck_mtx_lock(ctl_mtx);
468 kcb->kctl = 0;
469 kcb->unit = 0;
470 while (kcb->usecount != 0) {
471 msleep(&kcb->usecount, ctl_mtx, 0, "kcb->usecount", 0);
472 }
473 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
474 kctlstat.kcs_pcbcount--;
475 kctlstat.kcs_gencnt++;
476 lck_mtx_unlock(ctl_mtx);
477 socket_lock(so, 0);
478 }
479 return (0);
480 }
481
482 static int
483 ctl_peeraddr(struct socket *so, struct sockaddr **nam)
484 {
485 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
486 struct kctl *kctl;
487 struct sockaddr_ctl sc;
488
489 if (kcb == NULL) /* sanity check */
490 return (ENOTCONN);
491
492 if ((kctl = kcb->kctl) == NULL)
493 return (EINVAL);
494
495 bzero(&sc, sizeof(struct sockaddr_ctl));
496 sc.sc_len = sizeof(struct sockaddr_ctl);
497 sc.sc_family = AF_SYSTEM;
498 sc.ss_sysaddr = AF_SYS_CONTROL;
499 sc.sc_id = kctl->id;
500 sc.sc_unit = kcb->unit;
501
502 *nam = dup_sockaddr((struct sockaddr *)&sc, 1);
503
504 return (0);
505 }
506
507 static void
508 ctl_sbrcv_trim(struct socket *so)
509 {
510 struct sockbuf *sb = &so->so_rcv;
511
512 if (sb->sb_hiwat > sb->sb_idealsize) {
513 u_int32_t diff;
514 int32_t trim;
515
516 /*
517 * The difference between the ideal size and the
518 * current size is the upper bound of the trimage
519 */
520 diff = sb->sb_hiwat - sb->sb_idealsize;
521 /*
522 * We cannot trim below the outstanding data
523 */
524 trim = sb->sb_hiwat - sb->sb_cc;
525
526 trim = imin(trim, (int32_t)diff);
527
528 if (trim > 0) {
529 sbreserve(sb, (sb->sb_hiwat - trim));
530
531 if (ctl_debug)
532 printf("%s - shrunk to %d\n",
533 __func__, sb->sb_hiwat);
534 }
535 }
536 }
537
538 static int
539 ctl_usr_rcvd(struct socket *so, int flags)
540 {
541 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
542 struct kctl *kctl;
543
544 if ((kctl = kcb->kctl) == NULL) {
545 return (EINVAL);
546 }
547
548 if (kctl->rcvd) {
549 socket_unlock(so, 0);
550 (*kctl->rcvd)(kctl, kcb->unit, kcb->userdata, flags);
551 socket_lock(so, 0);
552 }
553
554 ctl_sbrcv_trim(so);
555
556 return (0);
557 }
558
559 static int
560 ctl_send(struct socket *so, int flags, struct mbuf *m,
561 struct sockaddr *addr, struct mbuf *control,
562 struct proc *p)
563 {
564 #pragma unused(addr, p)
565 int error = 0;
566 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
567 struct kctl *kctl;
568
569 if (control)
570 m_freem(control);
571
572 if (kcb == NULL) /* sanity check */
573 error = ENOTCONN;
574
575 if (error == 0 && (kctl = kcb->kctl) == NULL)
576 error = EINVAL;
577
578 if (error == 0 && kctl->send) {
579 so_tc_update_stats(m, so, m_get_service_class(m));
580 socket_unlock(so, 0);
581 error = (*kctl->send)(kctl, kcb->unit, kcb->userdata, m, flags);
582 socket_lock(so, 0);
583 } else {
584 m_freem(m);
585 if (error == 0)
586 error = ENOTSUP;
587 }
588 if (error != 0)
589 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_fail);
590 return (error);
591 }
592
593 static int
594 ctl_send_list(struct socket *so, int flags, struct mbuf *m,
595 __unused struct sockaddr *addr, struct mbuf *control,
596 __unused struct proc *p)
597 {
598 int error = 0;
599 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
600 struct kctl *kctl;
601
602 if (control)
603 m_freem_list(control);
604
605 if (kcb == NULL) /* sanity check */
606 error = ENOTCONN;
607
608 if (error == 0 && (kctl = kcb->kctl) == NULL)
609 error = EINVAL;
610
611 if (error == 0 && kctl->send_list) {
612 struct mbuf *nxt;
613
614 for (nxt = m; nxt != NULL; nxt = nxt->m_nextpkt)
615 so_tc_update_stats(nxt, so, m_get_service_class(nxt));
616
617 socket_unlock(so, 0);
618 error = (*kctl->send_list)(kctl, kcb->unit, kcb->userdata, m,
619 flags);
620 socket_lock(so, 0);
621 } else if (error == 0 && kctl->send) {
622 while (m != NULL && error == 0) {
623 struct mbuf *nextpkt = m->m_nextpkt;
624
625 m->m_nextpkt = NULL;
626 so_tc_update_stats(m, so, m_get_service_class(m));
627 socket_unlock(so, 0);
628 error = (*kctl->send)(kctl, kcb->unit, kcb->userdata, m,
629 flags);
630 socket_lock(so, 0);
631 m = nextpkt;
632 }
633 if (m != NULL)
634 m_freem_list(m);
635 } else {
636 m_freem_list(m);
637 if (error == 0)
638 error = ENOTSUP;
639 }
640 if (error != 0)
641 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_list_fail);
642 return (error);
643 }
644
645 static errno_t
646 ctl_rcvbspace(struct kctl *kctl, struct socket *so, u_int32_t datasize,
647 u_int32_t flags)
648 {
649 struct sockbuf *sb = &so->so_rcv;
650 u_int32_t space = sbspace(sb);
651 errno_t error;
652
653 if ((kctl->flags & CTL_FLAG_REG_CRIT) == 0) {
654 if ((u_int32_t) space >= datasize)
655 error = 0;
656 else
657 error = ENOBUFS;
658 } else if ((flags & CTL_DATA_CRIT) == 0) {
659 /*
660 * Reserve 25% for critical messages
661 */
662 if (space < (sb->sb_hiwat >> 2) ||
663 space < datasize)
664 error = ENOBUFS;
665 else
666 error = 0;
667 } else {
668 u_int32_t autorcvbuf_max;
669
670 /*
671 * Allow overcommit of 25%
672 */
673 autorcvbuf_max = min(sb->sb_idealsize + (sb->sb_idealsize >> 2),
674 ctl_autorcvbuf_max);
675
676 if ((u_int32_t) space >= datasize) {
677 error = 0;
678 } else if (tcp_cansbgrow(sb) &&
679 sb->sb_hiwat < autorcvbuf_max) {
680 /*
681 * Grow with a little bit of leeway
682 */
683 u_int32_t grow = datasize - space + MSIZE;
684
685 if (sbreserve(sb,
686 min((sb->sb_hiwat + grow), autorcvbuf_max)) == 1) {
687
688 if (sb->sb_hiwat > ctl_autorcvbuf_high)
689 ctl_autorcvbuf_high = sb->sb_hiwat;
690
691 if (ctl_debug)
692 printf("%s - grown to %d\n",
693 __func__, sb->sb_hiwat);
694 error = 0;
695 } else {
696 error = ENOBUFS;
697 }
698 } else {
699 error = ENOBUFS;
700 }
701 }
702 return (error);
703 }
704
705 errno_t
706 ctl_enqueuembuf(void *kctlref, u_int32_t unit, struct mbuf *m, u_int32_t flags)
707 {
708 struct socket *so;
709 errno_t error = 0;
710 struct kctl *kctl = (struct kctl *)kctlref;
711 int len = m->m_pkthdr.len;
712
713 if (kctl == NULL)
714 return (EINVAL);
715
716 so = kcb_find_socket(kctl, unit);
717
718 if (so == NULL)
719 return (EINVAL);
720
721 if (ctl_rcvbspace(kctl, so, len, flags) != 0) {
722 error = ENOBUFS;
723 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
724 goto bye;
725 }
726 if ((flags & CTL_DATA_EOR))
727 m->m_flags |= M_EOR;
728
729 so_recv_data_stat(so, m, 0);
730 if (sbappend(&so->so_rcv, m) != 0) {
731 if ((flags & CTL_DATA_NOWAKEUP) == 0)
732 sorwakeup(so);
733 } else {
734 error = ENOBUFS;
735 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
736 }
737 bye:
738 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT))
739 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
740 __func__, error, len,
741 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
742
743 socket_unlock(so, 1);
744 if (error != 0)
745 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
746
747 return (error);
748 }
749
750 /*
751 * Compute space occupied by mbuf like sbappendrecord
752 */
753 static int
754 m_space(struct mbuf *m)
755 {
756 int space = 0;
757 struct mbuf *nxt;
758
759 for (nxt = m; nxt != NULL; nxt = nxt->m_next)
760 space += nxt->m_len;
761
762 return (space);
763 }
764
765 errno_t
766 ctl_enqueuembuf_list(void *kctlref, u_int32_t unit, struct mbuf *m_list,
767 u_int32_t flags, struct mbuf **m_remain)
768 {
769 struct socket *so = NULL;
770 errno_t error = 0;
771 struct kctl *kctl = (struct kctl *)kctlref;
772 struct mbuf *m, *nextpkt;
773 int needwakeup = 0;
774 int len;
775
776 /*
777 * Need to point the beginning of the list in case of early exit
778 */
779 m = m_list;
780
781 if (kctl == NULL) {
782 error = EINVAL;
783 goto done;
784 }
785 if (kctl->flags & CTL_FLAG_REG_SOCK_STREAM) {
786 error = EOPNOTSUPP;
787 goto done;
788 }
789 if (flags & CTL_DATA_EOR) {
790 error = EINVAL;
791 goto done;
792 }
793 /*
794 * kcb_find_socket takes the socket lock with a reference
795 */
796 so = kcb_find_socket(kctl, unit);
797 if (so == NULL) {
798 error = EINVAL;
799 goto done;
800 }
801
802 for (m = m_list; m != NULL; m = nextpkt) {
803 nextpkt = m->m_nextpkt;
804
805 if (m->m_pkthdr.len == 0)
806 printf("%s: %llx m_pkthdr.len is 0",
807 __func__, (uint64_t)VM_KERNEL_ADDRPERM(m));
808
809 /*
810 * The mbuf is either appended or freed by sbappendrecord()
811 * so it's not reliable from a data standpoint
812 */
813 len = m_space(m);
814 if (ctl_rcvbspace(kctl, so, len, flags) != 0) {
815 error = ENOBUFS;
816 OSIncrementAtomic64(
817 (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
818 break;
819 } else {
820 /*
821 * Unlink from the list, m is on its own
822 */
823 m->m_nextpkt = NULL;
824 so_recv_data_stat(so, m, 0);
825 if (sbappendrecord(&so->so_rcv, m) != 0) {
826 needwakeup = 1;
827 } else {
828 /*
829 * We free or return the remaining
830 * mbufs in the list
831 */
832 m = nextpkt;
833 error = ENOBUFS;
834 OSIncrementAtomic64(
835 (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
836 break;
837 }
838 }
839 }
840 if (needwakeup && (flags & CTL_DATA_NOWAKEUP) == 0)
841 sorwakeup(so);
842
843 done:
844 if (so != NULL) {
845 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT))
846 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
847 __func__, error, len,
848 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
849
850 socket_unlock(so, 1);
851 }
852 if (m_remain) {
853 *m_remain = m;
854
855 if (m != NULL && socket_debug && so != NULL &&
856 (so->so_options & SO_DEBUG)) {
857 struct mbuf *n;
858
859 printf("%s m_list %llx\n", __func__,
860 (uint64_t) VM_KERNEL_ADDRPERM(m_list));
861 for (n = m; n != NULL; n = n->m_nextpkt)
862 printf(" remain %llx m_next %llx\n",
863 (uint64_t) VM_KERNEL_ADDRPERM(n),
864 (uint64_t) VM_KERNEL_ADDRPERM(n->m_next));
865 }
866 } else {
867 if (m != NULL)
868 m_freem_list(m);
869 }
870 if (error != 0)
871 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
872 return (error);
873 }
874
875 errno_t
876 ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len,
877 u_int32_t flags)
878 {
879 struct socket *so;
880 struct mbuf *m;
881 errno_t error = 0;
882 struct kctl *kctl = (struct kctl *)kctlref;
883 unsigned int num_needed;
884 struct mbuf *n;
885 size_t curlen = 0;
886
887 if (kctlref == NULL)
888 return (EINVAL);
889
890 so = kcb_find_socket(kctl, unit);
891 if (so == NULL)
892 return (EINVAL);
893
894 if (ctl_rcvbspace(kctl, so, len, flags) != 0) {
895 error = ENOBUFS;
896 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
897 goto bye;
898 }
899
900 num_needed = 1;
901 m = m_allocpacket_internal(&num_needed, len, NULL, M_NOWAIT, 1, 0);
902 if (m == NULL) {
903 printf("ctl_enqueuedata: m_allocpacket_internal(%lu) failed\n",
904 len);
905 error = ENOMEM;
906 goto bye;
907 }
908
909 for (n = m; n != NULL; n = n->m_next) {
910 size_t mlen = mbuf_maxlen(n);
911
912 if (mlen + curlen > len)
913 mlen = len - curlen;
914 n->m_len = mlen;
915 bcopy((char *)data + curlen, n->m_data, mlen);
916 curlen += mlen;
917 }
918 mbuf_pkthdr_setlen(m, curlen);
919
920 if ((flags & CTL_DATA_EOR))
921 m->m_flags |= M_EOR;
922 so_recv_data_stat(so, m, 0);
923 if (sbappend(&so->so_rcv, m) != 0) {
924 if ((flags & CTL_DATA_NOWAKEUP) == 0)
925 sorwakeup(so);
926 } else {
927 error = ENOBUFS;
928 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
929 }
930
931 bye:
932 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT))
933 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
934 __func__, error, (int)len,
935 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
936
937 socket_unlock(so, 1);
938 if (error != 0)
939 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
940 return (error);
941 }
942
943
944 errno_t
945 ctl_getenqueuespace(kern_ctl_ref kctlref, u_int32_t unit, size_t *space)
946 {
947 struct kctl *kctl = (struct kctl *)kctlref;
948 struct socket *so;
949 long avail;
950
951 if (kctlref == NULL || space == NULL)
952 return (EINVAL);
953
954 so = kcb_find_socket(kctl, unit);
955 if (so == NULL)
956 return (EINVAL);
957
958 avail = sbspace(&so->so_rcv);
959 *space = (avail < 0) ? 0 : avail;
960 socket_unlock(so, 1);
961
962 return (0);
963 }
964
965 errno_t
966 ctl_getenqueuereadable(kern_ctl_ref kctlref, u_int32_t unit,
967 u_int32_t *difference)
968 {
969 struct kctl *kctl = (struct kctl *)kctlref;
970 struct socket *so;
971
972 if (kctlref == NULL || difference == NULL)
973 return (EINVAL);
974
975 so = kcb_find_socket(kctl, unit);
976 if (so == NULL)
977 return (EINVAL);
978
979 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat) {
980 *difference = 0;
981 } else {
982 *difference = (so->so_rcv.sb_lowat - so->so_rcv.sb_cc);
983 }
984 socket_unlock(so, 1);
985
986 return (0);
987 }
988
989 static int
990 ctl_ctloutput(struct socket *so, struct sockopt *sopt)
991 {
992 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
993 struct kctl *kctl;
994 int error = 0;
995 void *data;
996 size_t len;
997
998 if (sopt->sopt_level != SYSPROTO_CONTROL) {
999 return (EINVAL);
1000 }
1001
1002 if (kcb == NULL) /* sanity check */
1003 return (ENOTCONN);
1004
1005 if ((kctl = kcb->kctl) == NULL)
1006 return (EINVAL);
1007
1008 switch (sopt->sopt_dir) {
1009 case SOPT_SET:
1010 if (kctl->setopt == NULL)
1011 return (ENOTSUP);
1012 if (sopt->sopt_valsize == 0) {
1013 data = NULL;
1014 } else {
1015 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP,
1016 M_WAITOK);
1017 if (data == NULL)
1018 return (ENOMEM);
1019 error = sooptcopyin(sopt, data,
1020 sopt->sopt_valsize,
1021 sopt->sopt_valsize);
1022 }
1023 if (error == 0) {
1024 socket_unlock(so, 0);
1025 error = (*kctl->setopt)(kcb->kctl, kcb->unit,
1026 kcb->userdata,
1027 sopt->sopt_name,
1028 data,
1029 sopt->sopt_valsize);
1030 socket_lock(so, 0);
1031 }
1032 FREE(data, M_TEMP);
1033 break;
1034
1035 case SOPT_GET:
1036 if (kctl->getopt == NULL)
1037 return (ENOTSUP);
1038 data = NULL;
1039 if (sopt->sopt_valsize && sopt->sopt_val) {
1040 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP,
1041 M_WAITOK);
1042 if (data == NULL)
1043 return (ENOMEM);
1044 /*
1045 * 4108337 - copy user data in case the
1046 * kernel control needs it
1047 */
1048 error = sooptcopyin(sopt, data,
1049 sopt->sopt_valsize, sopt->sopt_valsize);
1050 }
1051 len = sopt->sopt_valsize;
1052 socket_unlock(so, 0);
1053 error = (*kctl->getopt)(kcb->kctl, kcb->unit,
1054 kcb->userdata, sopt->sopt_name,
1055 data, &len);
1056 if (data != NULL && len > sopt->sopt_valsize)
1057 panic_plain("ctl_ctloutput: ctl %s returned "
1058 "len (%lu) > sopt_valsize (%lu)\n",
1059 kcb->kctl->name, len,
1060 sopt->sopt_valsize);
1061 socket_lock(so, 0);
1062 if (error == 0) {
1063 if (data != NULL)
1064 error = sooptcopyout(sopt, data, len);
1065 else
1066 sopt->sopt_valsize = len;
1067 }
1068 if (data != NULL)
1069 FREE(data, M_TEMP);
1070 break;
1071 }
1072 return (error);
1073 }
1074
1075 static int
1076 ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
1077 struct ifnet *ifp, struct proc *p)
1078 {
1079 #pragma unused(so, ifp, p)
1080 int error = ENOTSUP;
1081
1082 switch (cmd) {
1083 /* get the number of controllers */
1084 case CTLIOCGCOUNT: {
1085 struct kctl *kctl;
1086 u_int32_t n = 0;
1087
1088 lck_mtx_lock(ctl_mtx);
1089 TAILQ_FOREACH(kctl, &ctl_head, next)
1090 n++;
1091 lck_mtx_unlock(ctl_mtx);
1092
1093 bcopy(&n, data, sizeof (n));
1094 error = 0;
1095 break;
1096 }
1097 case CTLIOCGINFO: {
1098 struct ctl_info ctl_info;
1099 struct kctl *kctl = 0;
1100 size_t name_len;
1101
1102 bcopy(data, &ctl_info, sizeof (ctl_info));
1103 name_len = strnlen(ctl_info.ctl_name, MAX_KCTL_NAME);
1104
1105 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
1106 error = EINVAL;
1107 break;
1108 }
1109 lck_mtx_lock(ctl_mtx);
1110 kctl = ctl_find_by_name(ctl_info.ctl_name);
1111 lck_mtx_unlock(ctl_mtx);
1112 if (kctl == 0) {
1113 error = ENOENT;
1114 break;
1115 }
1116 ctl_info.ctl_id = kctl->id;
1117 bcopy(&ctl_info, data, sizeof (ctl_info));
1118 error = 0;
1119 break;
1120 }
1121
1122 /* add controls to get list of NKEs */
1123
1124 }
1125
1126 return (error);
1127 }
1128
1129 /*
1130 * Register/unregister a NKE
1131 */
1132 errno_t
1133 ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref)
1134 {
1135 struct kctl *kctl = NULL;
1136 struct kctl *kctl_next = NULL;
1137 u_int32_t id = 1;
1138 size_t name_len;
1139 int is_extended = 0;
1140
1141 if (userkctl == NULL) /* sanity check */
1142 return (EINVAL);
1143 if (userkctl->ctl_connect == NULL)
1144 return (EINVAL);
1145 name_len = strlen(userkctl->ctl_name);
1146 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME)
1147 return (EINVAL);
1148
1149 MALLOC(kctl, struct kctl *, sizeof(*kctl), M_TEMP, M_WAITOK);
1150 if (kctl == NULL)
1151 return (ENOMEM);
1152 bzero((char *)kctl, sizeof(*kctl));
1153
1154 lck_mtx_lock(ctl_mtx);
1155
1156 /*
1157 * Kernel Control IDs
1158 *
1159 * CTL_FLAG_REG_ID_UNIT indicates the control ID and unit number are
1160 * static. If they do not exist, add them to the list in order. If the
1161 * flag is not set, we must find a new unique value. We assume the
1162 * list is in order. We find the last item in the list and add one. If
1163 * this leads to wrapping the id around, we start at the front of the
1164 * list and look for a gap.
1165 */
1166
1167 if ((userkctl->ctl_flags & CTL_FLAG_REG_ID_UNIT) == 0) {
1168 /* Must dynamically assign an unused ID */
1169
1170 /* Verify the same name isn't already registered */
1171 if (ctl_find_by_name(userkctl->ctl_name) != NULL) {
1172 lck_mtx_unlock(ctl_mtx);
1173 FREE(kctl, M_TEMP);
1174 return (EEXIST);
1175 }
1176
1177 /* Start with 1 in case the list is empty */
1178 id = 1;
1179 kctl_next = TAILQ_LAST(&ctl_head, kctl_list);
1180
1181 if (kctl_next != NULL) {
1182 /* List was not empty, add one to the last item */
1183 id = kctl_next->id + 1;
1184 kctl_next = NULL;
1185
1186 /*
1187 * If this wrapped the id number, start looking at
1188 * the front of the list for an unused id.
1189 */
1190 if (id == 0) {
1191 /* Find the next unused ID */
1192 id = 1;
1193
1194 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
1195 if (kctl_next->id > id) {
1196 /* We found a gap */
1197 break;
1198 }
1199
1200 id = kctl_next->id + 1;
1201 }
1202 }
1203 }
1204
1205 userkctl->ctl_id = id;
1206 kctl->id = id;
1207 kctl->reg_unit = -1;
1208 } else {
1209 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
1210 if (kctl_next->id > userkctl->ctl_id)
1211 break;
1212 }
1213
1214 if (ctl_find_by_id_unit(userkctl->ctl_id, userkctl->ctl_unit)) {
1215 lck_mtx_unlock(ctl_mtx);
1216 FREE(kctl, M_TEMP);
1217 return (EEXIST);
1218 }
1219 kctl->id = userkctl->ctl_id;
1220 kctl->reg_unit = userkctl->ctl_unit;
1221 }
1222
1223 is_extended = (userkctl->ctl_flags & CTL_FLAG_REG_EXTENDED);
1224
1225 strlcpy(kctl->name, userkctl->ctl_name, MAX_KCTL_NAME);
1226 kctl->flags = userkctl->ctl_flags;
1227
1228 /*
1229 * Let the caller know the default send and receive sizes
1230 */
1231 if (userkctl->ctl_sendsize == 0) {
1232 kctl->sendbufsize = CTL_SENDSIZE;
1233 userkctl->ctl_sendsize = kctl->sendbufsize;
1234 } else {
1235 kctl->sendbufsize = userkctl->ctl_sendsize;
1236 }
1237 if (userkctl->ctl_recvsize == 0) {
1238 kctl->recvbufsize = CTL_RECVSIZE;
1239 userkctl->ctl_recvsize = kctl->recvbufsize;
1240 } else {
1241 kctl->recvbufsize = userkctl->ctl_recvsize;
1242 }
1243
1244 kctl->connect = userkctl->ctl_connect;
1245 kctl->disconnect = userkctl->ctl_disconnect;
1246 kctl->send = userkctl->ctl_send;
1247 kctl->setopt = userkctl->ctl_setopt;
1248 kctl->getopt = userkctl->ctl_getopt;
1249 if (is_extended) {
1250 kctl->rcvd = userkctl->ctl_rcvd;
1251 kctl->send_list = userkctl->ctl_send_list;
1252 }
1253
1254 TAILQ_INIT(&kctl->kcb_head);
1255
1256 if (kctl_next)
1257 TAILQ_INSERT_BEFORE(kctl_next, kctl, next);
1258 else
1259 TAILQ_INSERT_TAIL(&ctl_head, kctl, next);
1260
1261 kctlstat.kcs_reg_count++;
1262 kctlstat.kcs_gencnt++;
1263
1264 lck_mtx_unlock(ctl_mtx);
1265
1266 *kctlref = kctl;
1267
1268 ctl_post_msg(KEV_CTL_REGISTERED, kctl->id);
1269 return (0);
1270 }
1271
1272 errno_t
1273 ctl_deregister(void *kctlref)
1274 {
1275 struct kctl *kctl;
1276
1277 if (kctlref == NULL) /* sanity check */
1278 return (EINVAL);
1279
1280 lck_mtx_lock(ctl_mtx);
1281 TAILQ_FOREACH(kctl, &ctl_head, next) {
1282 if (kctl == (struct kctl *)kctlref)
1283 break;
1284 }
1285 if (kctl != (struct kctl *)kctlref) {
1286 lck_mtx_unlock(ctl_mtx);
1287 return (EINVAL);
1288 }
1289 if (!TAILQ_EMPTY(&kctl->kcb_head)) {
1290 lck_mtx_unlock(ctl_mtx);
1291 return (EBUSY);
1292 }
1293
1294 TAILQ_REMOVE(&ctl_head, kctl, next);
1295
1296 kctlstat.kcs_reg_count--;
1297 kctlstat.kcs_gencnt++;
1298
1299 lck_mtx_unlock(ctl_mtx);
1300
1301 ctl_post_msg(KEV_CTL_DEREGISTERED, kctl->id);
1302 FREE(kctl, M_TEMP);
1303 return (0);
1304 }
1305
1306 /*
1307 * Must be called with global ctl_mtx lock taked
1308 */
1309 static struct kctl *
1310 ctl_find_by_name(const char *name)
1311 {
1312 struct kctl *kctl;
1313
1314 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1315
1316 TAILQ_FOREACH(kctl, &ctl_head, next)
1317 if (strncmp(kctl->name, name, sizeof(kctl->name)) == 0)
1318 return (kctl);
1319
1320 return (NULL);
1321 }
1322
1323 u_int32_t
1324 ctl_id_by_name(const char *name)
1325 {
1326 u_int32_t ctl_id = 0;
1327 struct kctl *kctl;
1328
1329 lck_mtx_lock(ctl_mtx);
1330 kctl = ctl_find_by_name(name);
1331 if (kctl)
1332 ctl_id = kctl->id;
1333 lck_mtx_unlock(ctl_mtx);
1334
1335 return (ctl_id);
1336 }
1337
1338 errno_t
1339 ctl_name_by_id(u_int32_t id, char *out_name, size_t maxsize)
1340 {
1341 int found = 0;
1342 struct kctl *kctl;
1343
1344 lck_mtx_lock(ctl_mtx);
1345 TAILQ_FOREACH(kctl, &ctl_head, next) {
1346 if (kctl->id == id)
1347 break;
1348 }
1349
1350 if (kctl && kctl->name) {
1351 if (maxsize > MAX_KCTL_NAME)
1352 maxsize = MAX_KCTL_NAME;
1353 strlcpy(out_name, kctl->name, maxsize);
1354 found = 1;
1355 }
1356 lck_mtx_unlock(ctl_mtx);
1357
1358 return (found ? 0 : ENOENT);
1359 }
1360
1361 /*
1362 * Must be called with global ctl_mtx lock taked
1363 *
1364 */
1365 static struct kctl *
1366 ctl_find_by_id_unit(u_int32_t id, u_int32_t unit)
1367 {
1368 struct kctl *kctl;
1369
1370 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1371
1372 TAILQ_FOREACH(kctl, &ctl_head, next) {
1373 if (kctl->id == id && (kctl->flags & CTL_FLAG_REG_ID_UNIT) == 0)
1374 return (kctl);
1375 else if (kctl->id == id && kctl->reg_unit == unit)
1376 return (kctl);
1377 }
1378 return (NULL);
1379 }
1380
1381 /*
1382 * Must be called with kernel controller lock taken
1383 */
1384 static struct ctl_cb *
1385 kcb_find(struct kctl *kctl, u_int32_t unit)
1386 {
1387 struct ctl_cb *kcb;
1388
1389 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1390
1391 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
1392 if (kcb->unit == unit)
1393 return (kcb);
1394
1395 return (NULL);
1396 }
1397
1398 static struct socket *
1399 kcb_find_socket(struct kctl *kctl, u_int32_t unit)
1400 {
1401 struct socket *so = NULL;
1402 struct ctl_cb *kcb;
1403 void *lr_saved;
1404
1405 lr_saved = __builtin_return_address(0);
1406
1407 lck_mtx_lock(ctl_mtx);
1408 kcb = kcb_find(kctl, unit);
1409 if (kcb && kcb->kctl == kctl) {
1410 so = kcb->so;
1411 if (so) {
1412 kcb->usecount++;
1413 }
1414 }
1415 lck_mtx_unlock(ctl_mtx);
1416
1417 if (so == NULL) {
1418 return (NULL);
1419 }
1420
1421 socket_lock(so, 1);
1422
1423 lck_mtx_lock(ctl_mtx);
1424 if (kcb->kctl == NULL) {
1425 lck_mtx_unlock(ctl_mtx);
1426 socket_unlock(so, 1);
1427 so = NULL;
1428 lck_mtx_lock(ctl_mtx);
1429 } else {
1430 /*
1431 * The socket lock history is more useful if we store
1432 * the address of the caller.
1433 */
1434 int i = (so->next_lock_lr + SO_LCKDBG_MAX - 1) % SO_LCKDBG_MAX;
1435
1436 so->lock_lr[i] = lr_saved;
1437 }
1438 kcb->usecount--;
1439 if (kcb->usecount == 0)
1440 wakeup((event_t)&kcb->usecount);
1441 lck_mtx_unlock(ctl_mtx);
1442
1443 return (so);
1444 }
1445
1446 static void
1447 ctl_post_msg(u_int32_t event_code, u_int32_t id)
1448 {
1449 struct ctl_event_data ctl_ev_data;
1450 struct kev_msg ev_msg;
1451
1452 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_NOTOWNED);
1453
1454 bzero(&ev_msg, sizeof(struct kev_msg));
1455 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1456
1457 ev_msg.kev_class = KEV_SYSTEM_CLASS;
1458 ev_msg.kev_subclass = KEV_CTL_SUBCLASS;
1459 ev_msg.event_code = event_code;
1460
1461 /* common nke subclass data */
1462 bzero(&ctl_ev_data, sizeof(ctl_ev_data));
1463 ctl_ev_data.ctl_id = id;
1464 ev_msg.dv[0].data_ptr = &ctl_ev_data;
1465 ev_msg.dv[0].data_length = sizeof(ctl_ev_data);
1466
1467 ev_msg.dv[1].data_length = 0;
1468
1469 kev_post_msg(&ev_msg);
1470 }
1471
1472 static int
1473 ctl_lock(struct socket *so, int refcount, void *lr)
1474 {
1475 void *lr_saved;
1476
1477 if (lr == NULL)
1478 lr_saved = __builtin_return_address(0);
1479 else
1480 lr_saved = lr;
1481
1482 if (so->so_pcb != NULL) {
1483 lck_mtx_lock(((struct ctl_cb *)so->so_pcb)->mtx);
1484 } else {
1485 panic("ctl_lock: so=%p NO PCB! lr=%p lrh= %s\n",
1486 so, lr_saved, solockhistory_nr(so));
1487 /* NOTREACHED */
1488 }
1489
1490 if (so->so_usecount < 0) {
1491 panic("ctl_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s\n",
1492 so, so->so_pcb, lr_saved, so->so_usecount,
1493 solockhistory_nr(so));
1494 /* NOTREACHED */
1495 }
1496
1497 if (refcount)
1498 so->so_usecount++;
1499
1500 so->lock_lr[so->next_lock_lr] = lr_saved;
1501 so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX;
1502 return (0);
1503 }
1504
1505 static int
1506 ctl_unlock(struct socket *so, int refcount, void *lr)
1507 {
1508 void *lr_saved;
1509 lck_mtx_t *mutex_held;
1510
1511 if (lr == NULL)
1512 lr_saved = __builtin_return_address(0);
1513 else
1514 lr_saved = lr;
1515
1516 #ifdef MORE_KCTLLOCK_DEBUG
1517 printf("ctl_unlock: so=%llx sopcb=%x lock=%llx ref=%u lr=%llx\n",
1518 (uint64_t)VM_KERNEL_ADDRPERM(so),
1519 (uint64_t)VM_KERNEL_ADDRPERM(so->so_pcb,
1520 (uint64_t)VM_KERNEL_ADDRPERM(((struct ctl_cb *)so->so_pcb)->mtx),
1521 so->so_usecount, (uint64_t)VM_KERNEL_ADDRPERM(lr_saved));
1522 #endif
1523 if (refcount)
1524 so->so_usecount--;
1525
1526 if (so->so_usecount < 0) {
1527 panic("ctl_unlock: so=%p usecount=%x lrh= %s\n",
1528 so, so->so_usecount, solockhistory_nr(so));
1529 /* NOTREACHED */
1530 }
1531 if (so->so_pcb == NULL) {
1532 panic("ctl_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s\n",
1533 so, so->so_usecount, (void *)lr_saved,
1534 solockhistory_nr(so));
1535 /* NOTREACHED */
1536 }
1537 mutex_held = ((struct ctl_cb *)so->so_pcb)->mtx;
1538
1539 lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
1540 so->unlock_lr[so->next_unlock_lr] = lr_saved;
1541 so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
1542 lck_mtx_unlock(mutex_held);
1543
1544 if (so->so_usecount == 0)
1545 ctl_sofreelastref(so);
1546
1547 return (0);
1548 }
1549
1550 static lck_mtx_t *
1551 ctl_getlock(struct socket *so, int locktype)
1552 {
1553 #pragma unused(locktype)
1554 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
1555
1556 if (so->so_pcb) {
1557 if (so->so_usecount < 0)
1558 panic("ctl_getlock: so=%p usecount=%x lrh= %s\n",
1559 so, so->so_usecount, solockhistory_nr(so));
1560 return (kcb->mtx);
1561 } else {
1562 panic("ctl_getlock: so=%p NULL NO so_pcb %s\n",
1563 so, solockhistory_nr(so));
1564 return (so->so_proto->pr_domain->dom_mtx);
1565 }
1566 }
1567
1568 __private_extern__ int
1569 kctl_reg_list SYSCTL_HANDLER_ARGS
1570 {
1571 #pragma unused(oidp, arg1, arg2)
1572 int error = 0;
1573 int n, i;
1574 struct xsystmgen xsg;
1575 void *buf = NULL;
1576 struct kctl *kctl;
1577 size_t item_size = ROUNDUP64(sizeof (struct xkctl_reg));
1578
1579 buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO);
1580 if (buf == NULL)
1581 return (ENOMEM);
1582
1583 lck_mtx_lock(ctl_mtx);
1584
1585 n = kctlstat.kcs_reg_count;
1586
1587 if (req->oldptr == USER_ADDR_NULL) {
1588 req->oldidx = (n + n/8) * sizeof(struct xkctl_reg);
1589 goto done;
1590 }
1591 if (req->newptr != USER_ADDR_NULL) {
1592 error = EPERM;
1593 goto done;
1594 }
1595 bzero(&xsg, sizeof (xsg));
1596 xsg.xg_len = sizeof (xsg);
1597 xsg.xg_count = n;
1598 xsg.xg_gen = kctlstat.kcs_gencnt;
1599 xsg.xg_sogen = so_gencnt;
1600 error = SYSCTL_OUT(req, &xsg, sizeof (xsg));
1601 if (error) {
1602 goto done;
1603 }
1604 /*
1605 * We are done if there is no pcb
1606 */
1607 if (n == 0) {
1608 goto done;
1609 }
1610
1611 i = 0;
1612 for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
1613 i < n && kctl != NULL;
1614 i++, kctl = TAILQ_NEXT(kctl, next)) {
1615 struct xkctl_reg *xkr = (struct xkctl_reg *)buf;
1616 struct ctl_cb *kcb;
1617 u_int32_t pcbcount = 0;
1618
1619 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
1620 pcbcount++;
1621
1622 bzero(buf, item_size);
1623
1624 xkr->xkr_len = sizeof(struct xkctl_reg);
1625 xkr->xkr_kind = XSO_KCREG;
1626 xkr->xkr_id = kctl->id;
1627 xkr->xkr_reg_unit = kctl->reg_unit;
1628 xkr->xkr_flags = kctl->flags;
1629 xkr->xkr_kctlref = (uint64_t)VM_KERNEL_ADDRPERM(kctl);
1630 xkr->xkr_recvbufsize = kctl->recvbufsize;
1631 xkr->xkr_sendbufsize = kctl->sendbufsize;
1632 xkr->xkr_lastunit = kctl->lastunit;
1633 xkr->xkr_pcbcount = pcbcount;
1634 xkr->xkr_connect = (uint64_t)VM_KERNEL_ADDRPERM(kctl->connect);
1635 xkr->xkr_disconnect =
1636 (uint64_t)VM_KERNEL_ADDRPERM(kctl->disconnect);
1637 xkr->xkr_send = (uint64_t)VM_KERNEL_ADDRPERM(kctl->send);
1638 xkr->xkr_send_list =
1639 (uint64_t)VM_KERNEL_ADDRPERM(kctl->send_list);
1640 xkr->xkr_setopt = (uint64_t)VM_KERNEL_ADDRPERM(kctl->setopt);
1641 xkr->xkr_getopt = (uint64_t)VM_KERNEL_ADDRPERM(kctl->getopt);
1642 xkr->xkr_rcvd = (uint64_t)VM_KERNEL_ADDRPERM(kctl->rcvd);
1643 strlcpy(xkr->xkr_name, kctl->name, sizeof(xkr->xkr_name));
1644
1645 error = SYSCTL_OUT(req, buf, item_size);
1646 }
1647
1648 if (error == 0) {
1649 /*
1650 * Give the user an updated idea of our state.
1651 * If the generation differs from what we told
1652 * her before, she knows that something happened
1653 * while we were processing this request, and it
1654 * might be necessary to retry.
1655 */
1656 bzero(&xsg, sizeof (xsg));
1657 xsg.xg_len = sizeof (xsg);
1658 xsg.xg_count = n;
1659 xsg.xg_gen = kctlstat.kcs_gencnt;
1660 xsg.xg_sogen = so_gencnt;
1661 error = SYSCTL_OUT(req, &xsg, sizeof (xsg));
1662 if (error) {
1663 goto done;
1664 }
1665 }
1666
1667 done:
1668 lck_mtx_unlock(ctl_mtx);
1669
1670 if (buf != NULL)
1671 FREE(buf, M_TEMP);
1672
1673 return (error);
1674 }
1675
1676 __private_extern__ int
1677 kctl_pcblist SYSCTL_HANDLER_ARGS
1678 {
1679 #pragma unused(oidp, arg1, arg2)
1680 int error = 0;
1681 int n, i;
1682 struct xsystmgen xsg;
1683 void *buf = NULL;
1684 struct kctl *kctl;
1685 size_t item_size = ROUNDUP64(sizeof (struct xkctlpcb)) +
1686 ROUNDUP64(sizeof (struct xsocket_n)) +
1687 2 * ROUNDUP64(sizeof (struct xsockbuf_n)) +
1688 ROUNDUP64(sizeof (struct xsockstat_n));
1689
1690 buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO);
1691 if (buf == NULL)
1692 return (ENOMEM);
1693
1694 lck_mtx_lock(ctl_mtx);
1695
1696 n = kctlstat.kcs_pcbcount;
1697
1698 if (req->oldptr == USER_ADDR_NULL) {
1699 req->oldidx = (n + n/8) * item_size;
1700 goto done;
1701 }
1702 if (req->newptr != USER_ADDR_NULL) {
1703 error = EPERM;
1704 goto done;
1705 }
1706 bzero(&xsg, sizeof (xsg));
1707 xsg.xg_len = sizeof (xsg);
1708 xsg.xg_count = n;
1709 xsg.xg_gen = kctlstat.kcs_gencnt;
1710 xsg.xg_sogen = so_gencnt;
1711 error = SYSCTL_OUT(req, &xsg, sizeof (xsg));
1712 if (error) {
1713 goto done;
1714 }
1715 /*
1716 * We are done if there is no pcb
1717 */
1718 if (n == 0) {
1719 goto done;
1720 }
1721
1722 i = 0;
1723 for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
1724 i < n && kctl != NULL;
1725 kctl = TAILQ_NEXT(kctl, next)) {
1726 struct ctl_cb *kcb;
1727
1728 for (kcb = TAILQ_FIRST(&kctl->kcb_head);
1729 i < n && kcb != NULL;
1730 i++, kcb = TAILQ_NEXT(kcb, next)) {
1731 struct xkctlpcb *xk = (struct xkctlpcb *)buf;
1732 struct xsocket_n *xso = (struct xsocket_n *)
1733 ADVANCE64(xk, sizeof (*xk));
1734 struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *)
1735 ADVANCE64(xso, sizeof (*xso));
1736 struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *)
1737 ADVANCE64(xsbrcv, sizeof (*xsbrcv));
1738 struct xsockstat_n *xsostats = (struct xsockstat_n *)
1739 ADVANCE64(xsbsnd, sizeof (*xsbsnd));
1740
1741 bzero(buf, item_size);
1742
1743 xk->xkp_len = sizeof(struct xkctlpcb);
1744 xk->xkp_kind = XSO_KCB;
1745 xk->xkp_unit = kcb->unit;
1746 xk->xkp_kctpcb = (uint64_t)VM_KERNEL_ADDRPERM(kcb);
1747 xk->xkp_kctlref = (uint64_t)VM_KERNEL_ADDRPERM(kctl);
1748 xk->xkp_kctlid = kctl->id;
1749 strlcpy(xk->xkp_kctlname, kctl->name,
1750 sizeof(xk->xkp_kctlname));
1751
1752 sotoxsocket_n(kcb->so, xso);
1753 sbtoxsockbuf_n(kcb->so ?
1754 &kcb->so->so_rcv : NULL, xsbrcv);
1755 sbtoxsockbuf_n(kcb->so ?
1756 &kcb->so->so_snd : NULL, xsbsnd);
1757 sbtoxsockstat_n(kcb->so, xsostats);
1758
1759 error = SYSCTL_OUT(req, buf, item_size);
1760 }
1761 }
1762
1763 if (error == 0) {
1764 /*
1765 * Give the user an updated idea of our state.
1766 * If the generation differs from what we told
1767 * her before, she knows that something happened
1768 * while we were processing this request, and it
1769 * might be necessary to retry.
1770 */
1771 bzero(&xsg, sizeof (xsg));
1772 xsg.xg_len = sizeof (xsg);
1773 xsg.xg_count = n;
1774 xsg.xg_gen = kctlstat.kcs_gencnt;
1775 xsg.xg_sogen = so_gencnt;
1776 error = SYSCTL_OUT(req, &xsg, sizeof (xsg));
1777 if (error) {
1778 goto done;
1779 }
1780 }
1781
1782 done:
1783 lck_mtx_unlock(ctl_mtx);
1784
1785 return (error);
1786 }
1787
1788 int
1789 kctl_getstat SYSCTL_HANDLER_ARGS
1790 {
1791 #pragma unused(oidp, arg1, arg2)
1792 int error = 0;
1793
1794 lck_mtx_lock(ctl_mtx);
1795
1796 if (req->newptr != USER_ADDR_NULL) {
1797 error = EPERM;
1798 goto done;
1799 }
1800 if (req->oldptr == USER_ADDR_NULL) {
1801 req->oldidx = sizeof(struct kctlstat);
1802 goto done;
1803 }
1804
1805 error = SYSCTL_OUT(req, &kctlstat,
1806 MIN(sizeof(struct kctlstat), req->oldlen));
1807 done:
1808 lck_mtx_unlock(ctl_mtx);
1809 return (error);
1810 }