]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/kern/kern_control.c
xnu-4903.270.47.tar.gz
[apple/xnu.git] / bsd / kern / kern_control.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 1999-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/*
30 * Kernel Control domain - allows control connections to
31 * and to read/write data.
32 *
33 * Vincent Lubet, 040506
34 * Christophe Allie, 010928
35 * Justin C. Walker, 990319
36 */
37
38#include <sys/types.h>
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/syslog.h>
42#include <sys/socket.h>
43#include <sys/socketvar.h>
44#include <sys/protosw.h>
45#include <sys/domain.h>
46#include <sys/malloc.h>
47#include <sys/mbuf.h>
48#include <sys/sys_domain.h>
49#include <sys/kern_event.h>
50#include <sys/kern_control.h>
51#include <sys/kauth.h>
52#include <sys/sysctl.h>
53#include <sys/proc_info.h>
54#include <net/if_var.h>
55
56#include <mach/vm_types.h>
57
58#include <kern/thread.h>
59
60struct kctl {
61 TAILQ_ENTRY(kctl) next; /* controller chain */
62 kern_ctl_ref kctlref;
63
64 /* controller information provided when registering */
65 char name[MAX_KCTL_NAME]; /* unique identifier */
66 u_int32_t id;
67 u_int32_t reg_unit;
68
69 /* misc communication information */
70 u_int32_t flags; /* support flags */
71 u_int32_t recvbufsize; /* request more than the default buffer size */
72 u_int32_t sendbufsize; /* request more than the default buffer size */
73
74 /* Dispatch functions */
75 ctl_bind_func bind; /* Prepare contact */
76 ctl_connect_func connect; /* Make contact */
77 ctl_disconnect_func disconnect; /* Break contact */
78 ctl_send_func send; /* Send data to nke */
79 ctl_send_list_func send_list; /* Send list of packets */
80 ctl_setopt_func setopt; /* set kctl configuration */
81 ctl_getopt_func getopt; /* get kctl configuration */
82 ctl_rcvd_func rcvd; /* Notify nke when client reads data */
83
84 TAILQ_HEAD(, ctl_cb) kcb_head;
85 u_int32_t lastunit;
86};
87
88struct ctl_cb {
89 TAILQ_ENTRY(ctl_cb) next; /* controller chain */
90 lck_mtx_t *mtx;
91 struct socket *so; /* controlling socket */
92 struct kctl *kctl; /* back pointer to controller */
93 void *userdata;
94 struct sockaddr_ctl sac;
95 u_int32_t usecount;
96};
97
98#ifndef ROUNDUP64
99#define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
100#endif
101
102#ifndef ADVANCE64
103#define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
104#endif
105
106/*
107 * Definitions and vars for we support
108 */
109
110#define CTL_SENDSIZE (2 * 1024) /* default buffer size */
111#define CTL_RECVSIZE (8 * 1024) /* default buffer size */
112
113/*
114 * Definitions and vars for we support
115 */
116
117static u_int32_t ctl_maxunit = 65536;
118static lck_grp_attr_t *ctl_lck_grp_attr = 0;
119static lck_attr_t *ctl_lck_attr = 0;
120static lck_grp_t *ctl_lck_grp = 0;
121static lck_mtx_t *ctl_mtx;
122
123/* all the controllers are chained */
124TAILQ_HEAD(kctl_list, kctl) ctl_head;
125
126static int ctl_attach(struct socket *, int, struct proc *);
127static int ctl_detach(struct socket *);
128static int ctl_sofreelastref(struct socket *so);
129static int ctl_bind(struct socket *, struct sockaddr *, struct proc *);
130static int ctl_connect(struct socket *, struct sockaddr *, struct proc *);
131static int ctl_disconnect(struct socket *);
132static int ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
133 struct ifnet *ifp, struct proc *p);
134static int ctl_send(struct socket *, int, struct mbuf *,
135 struct sockaddr *, struct mbuf *, struct proc *);
136static int ctl_send_list(struct socket *, int, struct mbuf *,
137 struct sockaddr *, struct mbuf *, struct proc *);
138static int ctl_ctloutput(struct socket *, struct sockopt *);
139static int ctl_peeraddr(struct socket *so, struct sockaddr **nam);
140static int ctl_usr_rcvd(struct socket *so, int flags);
141
142static struct kctl *ctl_find_by_name(const char *);
143static struct kctl *ctl_find_by_id_unit(u_int32_t id, u_int32_t unit);
144
145static struct socket *kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit,
146 u_int32_t *);
147static struct ctl_cb *kcb_find(struct kctl *, u_int32_t unit);
148static void ctl_post_msg(u_int32_t event_code, u_int32_t id);
149
150static int ctl_lock(struct socket *, int, void *);
151static int ctl_unlock(struct socket *, int, void *);
152static lck_mtx_t * ctl_getlock(struct socket *, int);
153
154static struct pr_usrreqs ctl_usrreqs = {
155 .pru_attach = ctl_attach,
156 .pru_bind = ctl_bind,
157 .pru_connect = ctl_connect,
158 .pru_control = ctl_ioctl,
159 .pru_detach = ctl_detach,
160 .pru_disconnect = ctl_disconnect,
161 .pru_peeraddr = ctl_peeraddr,
162 .pru_rcvd = ctl_usr_rcvd,
163 .pru_send = ctl_send,
164 .pru_send_list = ctl_send_list,
165 .pru_sosend = sosend,
166 .pru_sosend_list = sosend_list,
167 .pru_soreceive = soreceive,
168 .pru_soreceive_list = soreceive_list,
169};
170
171static struct protosw kctlsw[] = {
172 {
173 .pr_type = SOCK_DGRAM,
174 .pr_protocol = SYSPROTO_CONTROL,
175 .pr_flags = PR_ATOMIC | PR_CONNREQUIRED | PR_PCBLOCK | PR_WANTRCVD,
176 .pr_ctloutput = ctl_ctloutput,
177 .pr_usrreqs = &ctl_usrreqs,
178 .pr_lock = ctl_lock,
179 .pr_unlock = ctl_unlock,
180 .pr_getlock = ctl_getlock,
181 },
182 {
183 .pr_type = SOCK_STREAM,
184 .pr_protocol = SYSPROTO_CONTROL,
185 .pr_flags = PR_CONNREQUIRED | PR_PCBLOCK | PR_WANTRCVD,
186 .pr_ctloutput = ctl_ctloutput,
187 .pr_usrreqs = &ctl_usrreqs,
188 .pr_lock = ctl_lock,
189 .pr_unlock = ctl_unlock,
190 .pr_getlock = ctl_getlock,
191 }
192};
193
194__private_extern__ int kctl_reg_list SYSCTL_HANDLER_ARGS;
195__private_extern__ int kctl_pcblist SYSCTL_HANDLER_ARGS;
196__private_extern__ int kctl_getstat SYSCTL_HANDLER_ARGS;
197
198
199SYSCTL_NODE(_net_systm, OID_AUTO, kctl,
200 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Kernel control family");
201
202struct kctlstat kctlstat;
203SYSCTL_PROC(_net_systm_kctl, OID_AUTO, stats,
204 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
205 kctl_getstat, "S,kctlstat", "");
206
207SYSCTL_PROC(_net_systm_kctl, OID_AUTO, reg_list,
208 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
209 kctl_reg_list, "S,xkctl_reg", "");
210
211SYSCTL_PROC(_net_systm_kctl, OID_AUTO, pcblist,
212 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
213 kctl_pcblist, "S,xkctlpcb", "");
214
215u_int32_t ctl_autorcvbuf_max = 256 * 1024;
216SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufmax,
217 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_autorcvbuf_max, 0, "");
218
219u_int32_t ctl_autorcvbuf_high = 0;
220SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufhigh,
221 CTLFLAG_RD | CTLFLAG_LOCKED, &ctl_autorcvbuf_high, 0, "");
222
223u_int32_t ctl_debug = 0;
224SYSCTL_INT(_net_systm_kctl, OID_AUTO, debug,
225 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_debug, 0, "");
226
227#define KCTL_TBL_INC 16
228
229static uintptr_t kctl_tbl_size = 0;
230static u_int32_t kctl_tbl_growing = 0;
231static u_int32_t kctl_tbl_growing_waiting = 0;
232static uintptr_t kctl_tbl_count = 0;
233static struct kctl **kctl_table = NULL;
234static uintptr_t kctl_ref_gencnt = 0;
235
236static void kctl_tbl_grow(void);
237static kern_ctl_ref kctl_make_ref(struct kctl *kctl);
238static void kctl_delete_ref(kern_ctl_ref);
239static struct kctl *kctl_from_ref(kern_ctl_ref);
240
241/*
242 * Install the protosw's for the Kernel Control manager.
243 */
244__private_extern__ void
245kern_control_init(struct domain *dp)
246{
247 struct protosw *pr;
248 int i;
249 int kctl_proto_count = (sizeof(kctlsw) / sizeof(struct protosw));
250
251 VERIFY(!(dp->dom_flags & DOM_INITIALIZED));
252 VERIFY(dp == systemdomain);
253
254 ctl_lck_grp_attr = lck_grp_attr_alloc_init();
255 if (ctl_lck_grp_attr == NULL) {
256 panic("%s: lck_grp_attr_alloc_init failed\n", __func__);
257 /* NOTREACHED */
258 }
259
260 ctl_lck_grp = lck_grp_alloc_init("Kernel Control Protocol",
261 ctl_lck_grp_attr);
262 if (ctl_lck_grp == NULL) {
263 panic("%s: lck_grp_alloc_init failed\n", __func__);
264 /* NOTREACHED */
265 }
266
267 ctl_lck_attr = lck_attr_alloc_init();
268 if (ctl_lck_attr == NULL) {
269 panic("%s: lck_attr_alloc_init failed\n", __func__);
270 /* NOTREACHED */
271 }
272
273 ctl_mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
274 if (ctl_mtx == NULL) {
275 panic("%s: lck_mtx_alloc_init failed\n", __func__);
276 /* NOTREACHED */
277 }
278 TAILQ_INIT(&ctl_head);
279
280 for (i = 0, pr = &kctlsw[0]; i < kctl_proto_count; i++, pr++) {
281 net_add_proto(pr, dp, 1);
282 }
283}
284
285static void
286kcb_delete(struct ctl_cb *kcb)
287{
288 if (kcb != 0) {
289 if (kcb->mtx != 0) {
290 lck_mtx_free(kcb->mtx, ctl_lck_grp);
291 }
292 FREE(kcb, M_TEMP);
293 }
294}
295
296/*
297 * Kernel Controller user-request functions
298 * attach function must exist and succeed
299 * detach not necessary
300 * we need a pcb for the per socket mutex
301 */
302static int
303ctl_attach(struct socket *so, int proto, struct proc *p)
304{
305#pragma unused(proto, p)
306 int error = 0;
307 struct ctl_cb *kcb = 0;
308
309 MALLOC(kcb, struct ctl_cb *, sizeof(struct ctl_cb), M_TEMP, M_WAITOK);
310 if (kcb == NULL) {
311 error = ENOMEM;
312 goto quit;
313 }
314 bzero(kcb, sizeof(struct ctl_cb));
315
316 kcb->mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
317 if (kcb->mtx == NULL) {
318 error = ENOMEM;
319 goto quit;
320 }
321 kcb->so = so;
322 so->so_pcb = (caddr_t)kcb;
323
324quit:
325 if (error != 0) {
326 kcb_delete(kcb);
327 kcb = 0;
328 }
329 return error;
330}
331
332static int
333ctl_sofreelastref(struct socket *so)
334{
335 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
336
337 so->so_pcb = 0;
338
339 if (kcb != 0) {
340 struct kctl *kctl;
341 if ((kctl = kcb->kctl) != 0) {
342 lck_mtx_lock(ctl_mtx);
343 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
344 kctlstat.kcs_pcbcount--;
345 kctlstat.kcs_gencnt++;
346 lck_mtx_unlock(ctl_mtx);
347 }
348 kcb_delete(kcb);
349 }
350 sofreelastref(so, 1);
351 return 0;
352}
353
354static int
355ctl_detach(struct socket *so)
356{
357 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
358
359 if (kcb == 0) {
360 return 0;
361 }
362
363 if (kcb->kctl != NULL && kcb->kctl->bind != NULL &&
364 kcb->userdata != NULL && !(so->so_state & SS_ISCONNECTED)) {
365 // The unit was bound, but not connected
366 // Invoke the disconnected call to cleanup
367 if (kcb->kctl->disconnect != NULL) {
368 socket_unlock(so, 0);
369 (*kcb->kctl->disconnect)(kcb->kctl->kctlref,
370 kcb->sac.sc_unit, kcb->userdata);
371 socket_lock(so, 0);
372 }
373 }
374
375 soisdisconnected(so);
376 so->so_flags |= SOF_PCBCLEARING;
377 return 0;
378}
379
380static int
381ctl_setup_kctl(struct socket *so, struct sockaddr *nam, struct proc *p)
382{
383 struct kctl *kctl = NULL;
384 int error = 0;
385 struct sockaddr_ctl sa;
386 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
387 struct ctl_cb *kcb_next = NULL;
388 u_quad_t sbmaxsize;
389 u_int32_t recvbufsize, sendbufsize;
390
391 if (kcb == 0) {
392 panic("ctl_setup_kctl so_pcb null\n");
393 }
394
395 if (kcb->kctl != NULL) {
396 // Already set up, skip
397 return 0;
398 }
399
400 if (nam->sa_len != sizeof(struct sockaddr_ctl)) {
401 return EINVAL;
402 }
403
404 bcopy(nam, &sa, sizeof(struct sockaddr_ctl));
405
406 lck_mtx_lock(ctl_mtx);
407 kctl = ctl_find_by_id_unit(sa.sc_id, sa.sc_unit);
408 if (kctl == NULL) {
409 lck_mtx_unlock(ctl_mtx);
410 return ENOENT;
411 }
412
413 if (((kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
414 (so->so_type != SOCK_STREAM)) ||
415 (!(kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
416 (so->so_type != SOCK_DGRAM))) {
417 lck_mtx_unlock(ctl_mtx);
418 return EPROTOTYPE;
419 }
420
421 if (kctl->flags & CTL_FLAG_PRIVILEGED) {
422 if (p == 0) {
423 lck_mtx_unlock(ctl_mtx);
424 return EINVAL;
425 }
426 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
427 lck_mtx_unlock(ctl_mtx);
428 return EPERM;
429 }
430 }
431
432 if ((kctl->flags & CTL_FLAG_REG_ID_UNIT) || sa.sc_unit != 0) {
433 if (kcb_find(kctl, sa.sc_unit) != NULL) {
434 lck_mtx_unlock(ctl_mtx);
435 return EBUSY;
436 }
437 } else {
438 /* Find an unused ID, assumes control IDs are in order */
439 u_int32_t unit = 1;
440
441 TAILQ_FOREACH(kcb_next, &kctl->kcb_head, next) {
442 if (kcb_next->sac.sc_unit > unit) {
443 /* Found a gap, lets fill it in */
444 break;
445 }
446 unit = kcb_next->sac.sc_unit + 1;
447 if (unit == ctl_maxunit) {
448 break;
449 }
450 }
451
452 if (unit == ctl_maxunit) {
453 lck_mtx_unlock(ctl_mtx);
454 return EBUSY;
455 }
456
457 sa.sc_unit = unit;
458 }
459
460 bcopy(&sa, &kcb->sac, sizeof(struct sockaddr_ctl));
461 kcb->kctl = kctl;
462 if (kcb_next != NULL) {
463 TAILQ_INSERT_BEFORE(kcb_next, kcb, next);
464 } else {
465 TAILQ_INSERT_TAIL(&kctl->kcb_head, kcb, next);
466 }
467 kctlstat.kcs_pcbcount++;
468 kctlstat.kcs_gencnt++;
469 kctlstat.kcs_connections++;
470 lck_mtx_unlock(ctl_mtx);
471
472 /*
473 * rdar://15526688: Limit the send and receive sizes to sb_max
474 * by using the same scaling as sbreserve()
475 */
476 sbmaxsize = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES);
477
478 if (kctl->sendbufsize > sbmaxsize) {
479 sendbufsize = sbmaxsize;
480 } else {
481 sendbufsize = kctl->sendbufsize;
482 }
483
484 if (kctl->recvbufsize > sbmaxsize) {
485 recvbufsize = sbmaxsize;
486 } else {
487 recvbufsize = kctl->recvbufsize;
488 }
489
490 error = soreserve(so, sendbufsize, recvbufsize);
491 if (error) {
492 if (ctl_debug) {
493 printf("%s - soreserve(%llx, %u, %u) error %d\n",
494 __func__, (uint64_t)VM_KERNEL_ADDRPERM(so),
495 sendbufsize, recvbufsize, error);
496 }
497 goto done;
498 }
499
500done:
501 if (error) {
502 soisdisconnected(so);
503 lck_mtx_lock(ctl_mtx);
504 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
505 kcb->kctl = NULL;
506 kcb->sac.sc_unit = 0;
507 kctlstat.kcs_pcbcount--;
508 kctlstat.kcs_gencnt++;
509 kctlstat.kcs_conn_fail++;
510 lck_mtx_unlock(ctl_mtx);
511 }
512 return error;
513}
514
515static int
516ctl_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
517{
518 int error = 0;
519 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
520
521 if (kcb == NULL) {
522 panic("ctl_bind so_pcb null\n");
523 }
524
525 error = ctl_setup_kctl(so, nam, p);
526 if (error) {
527 return error;
528 }
529
530 if (kcb->kctl == NULL) {
531 panic("ctl_bind kctl null\n");
532 }
533
534 if (kcb->kctl->bind == NULL) {
535 return EINVAL;
536 }
537
538 socket_unlock(so, 0);
539 error = (*kcb->kctl->bind)(kcb->kctl->kctlref, &kcb->sac, &kcb->userdata);
540 socket_lock(so, 0);
541
542 return error;
543}
544
545static int
546ctl_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
547{
548 int error = 0;
549 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
550
551 if (kcb == NULL) {
552 panic("ctl_connect so_pcb null\n");
553 }
554
555 error = ctl_setup_kctl(so, nam, p);
556 if (error) {
557 return error;
558 }
559
560 if (kcb->kctl == NULL) {
561 panic("ctl_connect kctl null\n");
562 }
563
564 soisconnecting(so);
565 socket_unlock(so, 0);
566 error = (*kcb->kctl->connect)(kcb->kctl->kctlref, &kcb->sac, &kcb->userdata);
567 socket_lock(so, 0);
568 if (error) {
569 goto end;
570 }
571 soisconnected(so);
572
573end:
574 if (error && kcb->kctl->disconnect) {
575 /*
576 * XXX Make sure we Don't check the return value
577 * of disconnect here.
578 * ipsec/utun_ctl_disconnect will return error when
579 * disconnect gets called after connect failure.
580 * However if we decide to check for disconnect return
581 * value here. Please make sure to revisit
582 * ipsec/utun_ctl_disconnect.
583 */
584 socket_unlock(so, 0);
585 (*kcb->kctl->disconnect)(kcb->kctl->kctlref, kcb->sac.sc_unit, kcb->userdata);
586 socket_lock(so, 0);
587 }
588 if (error) {
589 soisdisconnected(so);
590 lck_mtx_lock(ctl_mtx);
591 TAILQ_REMOVE(&kcb->kctl->kcb_head, kcb, next);
592 kcb->kctl = NULL;
593 kcb->sac.sc_unit = 0;
594 kctlstat.kcs_pcbcount--;
595 kctlstat.kcs_gencnt++;
596 kctlstat.kcs_conn_fail++;
597 lck_mtx_unlock(ctl_mtx);
598 }
599 return error;
600}
601
602static int
603ctl_disconnect(struct socket *so)
604{
605 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
606
607 if ((kcb = (struct ctl_cb *)so->so_pcb)) {
608 struct kctl *kctl = kcb->kctl;
609
610 if (kctl && kctl->disconnect) {
611 socket_unlock(so, 0);
612 (*kctl->disconnect)(kctl->kctlref, kcb->sac.sc_unit,
613 kcb->userdata);
614 socket_lock(so, 0);
615 }
616
617 soisdisconnected(so);
618
619 socket_unlock(so, 0);
620 lck_mtx_lock(ctl_mtx);
621 kcb->kctl = 0;
622 kcb->sac.sc_unit = 0;
623 while (kcb->usecount != 0) {
624 msleep(&kcb->usecount, ctl_mtx, 0, "kcb->usecount", 0);
625 }
626 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
627 kctlstat.kcs_pcbcount--;
628 kctlstat.kcs_gencnt++;
629 lck_mtx_unlock(ctl_mtx);
630 socket_lock(so, 0);
631 }
632 return 0;
633}
634
635static int
636ctl_peeraddr(struct socket *so, struct sockaddr **nam)
637{
638 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
639 struct kctl *kctl;
640 struct sockaddr_ctl sc;
641
642 if (kcb == NULL) { /* sanity check */
643 return ENOTCONN;
644 }
645
646 if ((kctl = kcb->kctl) == NULL) {
647 return EINVAL;
648 }
649
650 bzero(&sc, sizeof(struct sockaddr_ctl));
651 sc.sc_len = sizeof(struct sockaddr_ctl);
652 sc.sc_family = AF_SYSTEM;
653 sc.ss_sysaddr = AF_SYS_CONTROL;
654 sc.sc_id = kctl->id;
655 sc.sc_unit = kcb->sac.sc_unit;
656
657 *nam = dup_sockaddr((struct sockaddr *)&sc, 1);
658
659 return 0;
660}
661
662static void
663ctl_sbrcv_trim(struct socket *so)
664{
665 struct sockbuf *sb = &so->so_rcv;
666
667 if (sb->sb_hiwat > sb->sb_idealsize) {
668 u_int32_t diff;
669 int32_t trim;
670
671 /*
672 * The difference between the ideal size and the
673 * current size is the upper bound of the trimage
674 */
675 diff = sb->sb_hiwat - sb->sb_idealsize;
676 /*
677 * We cannot trim below the outstanding data
678 */
679 trim = sb->sb_hiwat - sb->sb_cc;
680
681 trim = imin(trim, (int32_t)diff);
682
683 if (trim > 0) {
684 sbreserve(sb, (sb->sb_hiwat - trim));
685
686 if (ctl_debug) {
687 printf("%s - shrunk to %d\n",
688 __func__, sb->sb_hiwat);
689 }
690 }
691 }
692}
693
694static int
695ctl_usr_rcvd(struct socket *so, int flags)
696{
697 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
698 struct kctl *kctl;
699
700 if ((kctl = kcb->kctl) == NULL) {
701 return EINVAL;
702 }
703
704 if (kctl->rcvd) {
705 socket_unlock(so, 0);
706 (*kctl->rcvd)(kctl->kctlref, kcb->sac.sc_unit, kcb->userdata, flags);
707 socket_lock(so, 0);
708 }
709
710 ctl_sbrcv_trim(so);
711
712 return 0;
713}
714
715static int
716ctl_send(struct socket *so, int flags, struct mbuf *m,
717 struct sockaddr *addr, struct mbuf *control,
718 struct proc *p)
719{
720#pragma unused(addr, p)
721 int error = 0;
722 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
723 struct kctl *kctl;
724
725 if (control) {
726 m_freem(control);
727 }
728
729 if (kcb == NULL) { /* sanity check */
730 error = ENOTCONN;
731 }
732
733 if (error == 0 && (kctl = kcb->kctl) == NULL) {
734 error = EINVAL;
735 }
736
737 if (error == 0 && kctl->send) {
738 so_tc_update_stats(m, so, m_get_service_class(m));
739 socket_unlock(so, 0);
740 error = (*kctl->send)(kctl->kctlref, kcb->sac.sc_unit, kcb->userdata,
741 m, flags);
742 socket_lock(so, 0);
743 } else {
744 m_freem(m);
745 if (error == 0) {
746 error = ENOTSUP;
747 }
748 }
749 if (error != 0) {
750 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_fail);
751 }
752 return error;
753}
754
755static int
756ctl_send_list(struct socket *so, int flags, struct mbuf *m,
757 __unused struct sockaddr *addr, struct mbuf *control,
758 __unused struct proc *p)
759{
760 int error = 0;
761 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
762 struct kctl *kctl;
763
764 if (control) {
765 m_freem_list(control);
766 }
767
768 if (kcb == NULL) { /* sanity check */
769 error = ENOTCONN;
770 }
771
772 if (error == 0 && (kctl = kcb->kctl) == NULL) {
773 error = EINVAL;
774 }
775
776 if (error == 0 && kctl->send_list) {
777 struct mbuf *nxt;
778
779 for (nxt = m; nxt != NULL; nxt = nxt->m_nextpkt) {
780 so_tc_update_stats(nxt, so, m_get_service_class(nxt));
781 }
782
783 socket_unlock(so, 0);
784 error = (*kctl->send_list)(kctl->kctlref, kcb->sac.sc_unit,
785 kcb->userdata, m, flags);
786 socket_lock(so, 0);
787 } else if (error == 0 && kctl->send) {
788 while (m != NULL && error == 0) {
789 struct mbuf *nextpkt = m->m_nextpkt;
790
791 m->m_nextpkt = NULL;
792 so_tc_update_stats(m, so, m_get_service_class(m));
793 socket_unlock(so, 0);
794 error = (*kctl->send)(kctl->kctlref, kcb->sac.sc_unit,
795 kcb->userdata, m, flags);
796 socket_lock(so, 0);
797 m = nextpkt;
798 }
799 if (m != NULL) {
800 m_freem_list(m);
801 }
802 } else {
803 m_freem_list(m);
804 if (error == 0) {
805 error = ENOTSUP;
806 }
807 }
808 if (error != 0) {
809 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_list_fail);
810 }
811 return error;
812}
813
814static errno_t
815ctl_rcvbspace(struct socket *so, u_int32_t datasize,
816 u_int32_t kctlflags, u_int32_t flags)
817{
818 struct sockbuf *sb = &so->so_rcv;
819 u_int32_t space = sbspace(sb);
820 errno_t error;
821
822 if ((kctlflags & CTL_FLAG_REG_CRIT) == 0) {
823 if ((u_int32_t) space >= datasize) {
824 error = 0;
825 } else {
826 error = ENOBUFS;
827 }
828 } else if ((flags & CTL_DATA_CRIT) == 0) {
829 /*
830 * Reserve 25% for critical messages
831 */
832 if (space < (sb->sb_hiwat >> 2) ||
833 space < datasize) {
834 error = ENOBUFS;
835 } else {
836 error = 0;
837 }
838 } else {
839 u_int32_t autorcvbuf_max;
840
841 /*
842 * Allow overcommit of 25%
843 */
844 autorcvbuf_max = min(sb->sb_idealsize + (sb->sb_idealsize >> 2),
845 ctl_autorcvbuf_max);
846
847 if ((u_int32_t) space >= datasize) {
848 error = 0;
849 } else if (tcp_cansbgrow(sb) &&
850 sb->sb_hiwat < autorcvbuf_max) {
851 /*
852 * Grow with a little bit of leeway
853 */
854 u_int32_t grow = datasize - space + MSIZE;
855
856 if (sbreserve(sb,
857 min((sb->sb_hiwat + grow), autorcvbuf_max)) == 1) {
858 if (sb->sb_hiwat > ctl_autorcvbuf_high) {
859 ctl_autorcvbuf_high = sb->sb_hiwat;
860 }
861
862 /*
863 * A final check
864 */
865 if ((u_int32_t) sbspace(sb) >= datasize) {
866 error = 0;
867 } else {
868 error = ENOBUFS;
869 }
870
871 if (ctl_debug) {
872 printf("%s - grown to %d error %d\n",
873 __func__, sb->sb_hiwat, error);
874 }
875 } else {
876 error = ENOBUFS;
877 }
878 } else {
879 error = ENOBUFS;
880 }
881 }
882 return error;
883}
884
885errno_t
886ctl_enqueuembuf(kern_ctl_ref kctlref, u_int32_t unit, struct mbuf *m,
887 u_int32_t flags)
888{
889 struct socket *so;
890 errno_t error = 0;
891 int len = m->m_pkthdr.len;
892 u_int32_t kctlflags;
893
894 so = kcb_find_socket(kctlref, unit, &kctlflags);
895 if (so == NULL) {
896 return EINVAL;
897 }
898
899 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
900 error = ENOBUFS;
901 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
902 goto bye;
903 }
904 if ((flags & CTL_DATA_EOR)) {
905 m->m_flags |= M_EOR;
906 }
907
908 so_recv_data_stat(so, m, 0);
909 if (sbappend(&so->so_rcv, m) != 0) {
910 if ((flags & CTL_DATA_NOWAKEUP) == 0) {
911 sorwakeup(so);
912 }
913 } else {
914 error = ENOBUFS;
915 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
916 }
917bye:
918 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
919 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
920 __func__, error, len,
921 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
922 }
923
924 socket_unlock(so, 1);
925 if (error != 0) {
926 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
927 }
928
929 return error;
930}
931
932/*
933 * Compute space occupied by mbuf like sbappendrecord
934 */
935static int
936m_space(struct mbuf *m)
937{
938 int space = 0;
939 struct mbuf *nxt;
940
941 for (nxt = m; nxt != NULL; nxt = nxt->m_next) {
942 space += nxt->m_len;
943 }
944
945 return space;
946}
947
948errno_t
949ctl_enqueuembuf_list(void *kctlref, u_int32_t unit, struct mbuf *m_list,
950 u_int32_t flags, struct mbuf **m_remain)
951{
952 struct socket *so = NULL;
953 errno_t error = 0;
954 struct mbuf *m, *nextpkt;
955 int needwakeup = 0;
956 int len = 0;
957 u_int32_t kctlflags;
958
959 /*
960 * Need to point the beginning of the list in case of early exit
961 */
962 m = m_list;
963
964 /*
965 * kcb_find_socket takes the socket lock with a reference
966 */
967 so = kcb_find_socket(kctlref, unit, &kctlflags);
968 if (so == NULL) {
969 error = EINVAL;
970 goto done;
971 }
972
973 if (kctlflags & CTL_FLAG_REG_SOCK_STREAM) {
974 error = EOPNOTSUPP;
975 goto done;
976 }
977 if (flags & CTL_DATA_EOR) {
978 error = EINVAL;
979 goto done;
980 }
981
982 for (m = m_list; m != NULL; m = nextpkt) {
983 nextpkt = m->m_nextpkt;
984
985 if (m->m_pkthdr.len == 0 && ctl_debug) {
986 printf("%s: %llx m_pkthdr.len is 0",
987 __func__, (uint64_t)VM_KERNEL_ADDRPERM(m));
988 }
989
990 /*
991 * The mbuf is either appended or freed by sbappendrecord()
992 * so it's not reliable from a data standpoint
993 */
994 len = m_space(m);
995 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
996 error = ENOBUFS;
997 OSIncrementAtomic64(
998 (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
999 break;
1000 } else {
1001 /*
1002 * Unlink from the list, m is on its own
1003 */
1004 m->m_nextpkt = NULL;
1005 so_recv_data_stat(so, m, 0);
1006 if (sbappendrecord(&so->so_rcv, m) != 0) {
1007 needwakeup = 1;
1008 } else {
1009 /*
1010 * We free or return the remaining
1011 * mbufs in the list
1012 */
1013 m = nextpkt;
1014 error = ENOBUFS;
1015 OSIncrementAtomic64(
1016 (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1017 break;
1018 }
1019 }
1020 }
1021 if (needwakeup && (flags & CTL_DATA_NOWAKEUP) == 0) {
1022 sorwakeup(so);
1023 }
1024
1025done:
1026 if (so != NULL) {
1027 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
1028 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1029 __func__, error, len,
1030 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1031 }
1032
1033 socket_unlock(so, 1);
1034 }
1035 if (m_remain) {
1036 *m_remain = m;
1037
1038 if (m != NULL && socket_debug && so != NULL &&
1039 (so->so_options & SO_DEBUG)) {
1040 struct mbuf *n;
1041
1042 printf("%s m_list %llx\n", __func__,
1043 (uint64_t) VM_KERNEL_ADDRPERM(m_list));
1044 for (n = m; n != NULL; n = n->m_nextpkt) {
1045 printf(" remain %llx m_next %llx\n",
1046 (uint64_t) VM_KERNEL_ADDRPERM(n),
1047 (uint64_t) VM_KERNEL_ADDRPERM(n->m_next));
1048 }
1049 }
1050 } else {
1051 if (m != NULL) {
1052 m_freem_list(m);
1053 }
1054 }
1055 if (error != 0) {
1056 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
1057 }
1058 return error;
1059}
1060
1061errno_t
1062ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len,
1063 u_int32_t flags)
1064{
1065 struct socket *so;
1066 struct mbuf *m;
1067 errno_t error = 0;
1068 unsigned int num_needed;
1069 struct mbuf *n;
1070 size_t curlen = 0;
1071 u_int32_t kctlflags;
1072
1073 so = kcb_find_socket(kctlref, unit, &kctlflags);
1074 if (so == NULL) {
1075 return EINVAL;
1076 }
1077
1078 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
1079 error = ENOBUFS;
1080 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1081 goto bye;
1082 }
1083
1084 num_needed = 1;
1085 m = m_allocpacket_internal(&num_needed, len, NULL, M_NOWAIT, 1, 0);
1086 if (m == NULL) {
1087 kctlstat.kcs_enqdata_mb_alloc_fail++;
1088 if (ctl_debug) {
1089 printf("%s: m_allocpacket_internal(%lu) failed\n",
1090 __func__, len);
1091 }
1092 error = ENOMEM;
1093 goto bye;
1094 }
1095
1096 for (n = m; n != NULL; n = n->m_next) {
1097 size_t mlen = mbuf_maxlen(n);
1098
1099 if (mlen + curlen > len) {
1100 mlen = len - curlen;
1101 }
1102 n->m_len = mlen;
1103 bcopy((char *)data + curlen, n->m_data, mlen);
1104 curlen += mlen;
1105 }
1106 mbuf_pkthdr_setlen(m, curlen);
1107
1108 if ((flags & CTL_DATA_EOR)) {
1109 m->m_flags |= M_EOR;
1110 }
1111 so_recv_data_stat(so, m, 0);
1112 if (sbappend(&so->so_rcv, m) != 0) {
1113 if ((flags & CTL_DATA_NOWAKEUP) == 0) {
1114 sorwakeup(so);
1115 }
1116 } else {
1117 kctlstat.kcs_enqdata_sbappend_fail++;
1118 error = ENOBUFS;
1119 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1120 }
1121
1122bye:
1123 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
1124 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1125 __func__, error, (int)len,
1126 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1127 }
1128
1129 socket_unlock(so, 1);
1130 if (error != 0) {
1131 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
1132 }
1133 return error;
1134}
1135
1136errno_t
1137ctl_getenqueuepacketcount(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *pcnt)
1138{
1139 struct socket *so;
1140 u_int32_t cnt;
1141 struct mbuf *m1;
1142
1143 if (pcnt == NULL) {
1144 return EINVAL;
1145 }
1146
1147 so = kcb_find_socket(kctlref, unit, NULL);
1148 if (so == NULL) {
1149 return EINVAL;
1150 }
1151
1152 cnt = 0;
1153 m1 = so->so_rcv.sb_mb;
1154 while (m1 != NULL) {
1155 if (m1->m_type == MT_DATA ||
1156 m1->m_type == MT_HEADER ||
1157 m1->m_type == MT_OOBDATA) {
1158 cnt += 1;
1159 }
1160 m1 = m1->m_nextpkt;
1161 }
1162 *pcnt = cnt;
1163
1164 socket_unlock(so, 1);
1165
1166 return 0;
1167}
1168
1169errno_t
1170ctl_getenqueuespace(kern_ctl_ref kctlref, u_int32_t unit, size_t *space)
1171{
1172 struct socket *so;
1173 long avail;
1174
1175 if (space == NULL) {
1176 return EINVAL;
1177 }
1178
1179 so = kcb_find_socket(kctlref, unit, NULL);
1180 if (so == NULL) {
1181 return EINVAL;
1182 }
1183
1184 avail = sbspace(&so->so_rcv);
1185 *space = (avail < 0) ? 0 : avail;
1186 socket_unlock(so, 1);
1187
1188 return 0;
1189}
1190
1191errno_t
1192ctl_getenqueuereadable(kern_ctl_ref kctlref, u_int32_t unit,
1193 u_int32_t *difference)
1194{
1195 struct socket *so;
1196
1197 if (difference == NULL) {
1198 return EINVAL;
1199 }
1200
1201 so = kcb_find_socket(kctlref, unit, NULL);
1202 if (so == NULL) {
1203 return EINVAL;
1204 }
1205
1206 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat) {
1207 *difference = 0;
1208 } else {
1209 *difference = (so->so_rcv.sb_lowat - so->so_rcv.sb_cc);
1210 }
1211 socket_unlock(so, 1);
1212
1213 return 0;
1214}
1215
1216static int
1217ctl_ctloutput(struct socket *so, struct sockopt *sopt)
1218{
1219 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
1220 struct kctl *kctl;
1221 int error = 0;
1222 void *data = NULL;
1223 size_t len;
1224
1225 if (sopt->sopt_level != SYSPROTO_CONTROL) {
1226 return EINVAL;
1227 }
1228
1229 if (kcb == NULL) { /* sanity check */
1230 return ENOTCONN;
1231 }
1232
1233 if ((kctl = kcb->kctl) == NULL) {
1234 return EINVAL;
1235 }
1236
1237 switch (sopt->sopt_dir) {
1238 case SOPT_SET:
1239 if (kctl->setopt == NULL) {
1240 return ENOTSUP;
1241 }
1242 if (sopt->sopt_valsize != 0) {
1243 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP,
1244 M_WAITOK | M_ZERO);
1245 if (data == NULL) {
1246 return ENOMEM;
1247 }
1248 error = sooptcopyin(sopt, data,
1249 sopt->sopt_valsize, sopt->sopt_valsize);
1250 }
1251 if (error == 0) {
1252 socket_unlock(so, 0);
1253 error = (*kctl->setopt)(kctl->kctlref,
1254 kcb->sac.sc_unit, kcb->userdata, sopt->sopt_name,
1255 data, sopt->sopt_valsize);
1256 socket_lock(so, 0);
1257 }
1258
1259 if (data != NULL) {
1260 FREE(data, M_TEMP);
1261 }
1262 break;
1263
1264 case SOPT_GET:
1265 if (kctl->getopt == NULL) {
1266 return ENOTSUP;
1267 }
1268
1269 if (sopt->sopt_valsize && sopt->sopt_val) {
1270 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP,
1271 M_WAITOK | M_ZERO);
1272 if (data == NULL) {
1273 return ENOMEM;
1274 }
1275 /*
1276 * 4108337 - copy user data in case the
1277 * kernel control needs it
1278 */
1279 error = sooptcopyin(sopt, data,
1280 sopt->sopt_valsize, sopt->sopt_valsize);
1281 }
1282
1283 if (error == 0) {
1284 len = sopt->sopt_valsize;
1285 socket_unlock(so, 0);
1286 error = (*kctl->getopt)(kctl->kctlref, kcb->sac.sc_unit,
1287 kcb->userdata, sopt->sopt_name,
1288 data, &len);
1289 if (data != NULL && len > sopt->sopt_valsize) {
1290 panic_plain("ctl_ctloutput: ctl %s returned "
1291 "len (%lu) > sopt_valsize (%lu)\n",
1292 kcb->kctl->name, len,
1293 sopt->sopt_valsize);
1294 }
1295 socket_lock(so, 0);
1296 if (error == 0) {
1297 if (data != NULL) {
1298 error = sooptcopyout(sopt, data, len);
1299 } else {
1300 sopt->sopt_valsize = len;
1301 }
1302 }
1303 }
1304 if (data != NULL) {
1305 FREE(data, M_TEMP);
1306 }
1307 break;
1308 }
1309 return error;
1310}
1311
1312static int
1313ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
1314 struct ifnet *ifp, struct proc *p)
1315{
1316#pragma unused(so, ifp, p)
1317 int error = ENOTSUP;
1318
1319 switch (cmd) {
1320 /* get the number of controllers */
1321 case CTLIOCGCOUNT: {
1322 struct kctl *kctl;
1323 u_int32_t n = 0;
1324
1325 lck_mtx_lock(ctl_mtx);
1326 TAILQ_FOREACH(kctl, &ctl_head, next)
1327 n++;
1328 lck_mtx_unlock(ctl_mtx);
1329
1330 bcopy(&n, data, sizeof(n));
1331 error = 0;
1332 break;
1333 }
1334 case CTLIOCGINFO: {
1335 struct ctl_info ctl_info;
1336 struct kctl *kctl = 0;
1337 size_t name_len;
1338
1339 bcopy(data, &ctl_info, sizeof(ctl_info));
1340 name_len = strnlen(ctl_info.ctl_name, MAX_KCTL_NAME);
1341
1342 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
1343 error = EINVAL;
1344 break;
1345 }
1346 lck_mtx_lock(ctl_mtx);
1347 kctl = ctl_find_by_name(ctl_info.ctl_name);
1348 lck_mtx_unlock(ctl_mtx);
1349 if (kctl == 0) {
1350 error = ENOENT;
1351 break;
1352 }
1353 ctl_info.ctl_id = kctl->id;
1354 bcopy(&ctl_info, data, sizeof(ctl_info));
1355 error = 0;
1356 break;
1357 }
1358
1359 /* add controls to get list of NKEs */
1360 }
1361
1362 return error;
1363}
1364
1365static void
1366kctl_tbl_grow()
1367{
1368 struct kctl **new_table;
1369 uintptr_t new_size;
1370
1371 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1372
1373 if (kctl_tbl_growing) {
1374 /* Another thread is allocating */
1375 kctl_tbl_growing_waiting++;
1376
1377 do {
1378 (void) msleep((caddr_t) &kctl_tbl_growing, ctl_mtx,
1379 PSOCK | PCATCH, "kctl_tbl_growing", 0);
1380 } while (kctl_tbl_growing);
1381 kctl_tbl_growing_waiting--;
1382 }
1383 /* Another thread grew the table */
1384 if (kctl_table != NULL && kctl_tbl_count < kctl_tbl_size) {
1385 return;
1386 }
1387
1388 /* Verify we have a sane size */
1389 if (kctl_tbl_size + KCTL_TBL_INC >= UINT16_MAX) {
1390 kctlstat.kcs_tbl_size_too_big++;
1391 if (ctl_debug) {
1392 printf("%s kctl_tbl_size %lu too big\n",
1393 __func__, kctl_tbl_size);
1394 }
1395 return;
1396 }
1397 kctl_tbl_growing = 1;
1398
1399 new_size = kctl_tbl_size + KCTL_TBL_INC;
1400
1401 lck_mtx_unlock(ctl_mtx);
1402 new_table = _MALLOC(sizeof(struct kctl *) * new_size,
1403 M_TEMP, M_WAIT | M_ZERO);
1404 lck_mtx_lock(ctl_mtx);
1405
1406 if (new_table != NULL) {
1407 if (kctl_table != NULL) {
1408 bcopy(kctl_table, new_table,
1409 kctl_tbl_size * sizeof(struct kctl *));
1410
1411 _FREE(kctl_table, M_TEMP);
1412 }
1413 kctl_table = new_table;
1414 kctl_tbl_size = new_size;
1415 }
1416
1417 kctl_tbl_growing = 0;
1418
1419 if (kctl_tbl_growing_waiting) {
1420 wakeup(&kctl_tbl_growing);
1421 }
1422}
1423
1424#define KCTLREF_INDEX_MASK 0x0000FFFF
1425#define KCTLREF_GENCNT_MASK 0xFFFF0000
1426#define KCTLREF_GENCNT_SHIFT 16
1427
1428static kern_ctl_ref
1429kctl_make_ref(struct kctl *kctl)
1430{
1431 uintptr_t i;
1432
1433 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1434
1435 if (kctl_tbl_count >= kctl_tbl_size) {
1436 kctl_tbl_grow();
1437 }
1438
1439 kctl->kctlref = NULL;
1440 for (i = 0; i < kctl_tbl_size; i++) {
1441 if (kctl_table[i] == NULL) {
1442 uintptr_t ref;
1443
1444 /*
1445 * Reference is index plus one
1446 */
1447 kctl_ref_gencnt += 1;
1448
1449 /*
1450 * Add generation count as salt to reference to prevent
1451 * use after deregister
1452 */
1453 ref = ((kctl_ref_gencnt << KCTLREF_GENCNT_SHIFT) &
1454 KCTLREF_GENCNT_MASK) +
1455 ((i + 1) & KCTLREF_INDEX_MASK);
1456
1457 kctl->kctlref = (void *)(ref);
1458 kctl_table[i] = kctl;
1459 kctl_tbl_count++;
1460 break;
1461 }
1462 }
1463
1464 if (kctl->kctlref == NULL) {
1465 panic("%s no space in table", __func__);
1466 }
1467
1468 if (ctl_debug > 0) {
1469 printf("%s %p for %p\n",
1470 __func__, kctl->kctlref, kctl);
1471 }
1472
1473 return kctl->kctlref;
1474}
1475
1476static void
1477kctl_delete_ref(kern_ctl_ref kctlref)
1478{
1479 /*
1480 * Reference is index plus one
1481 */
1482 uintptr_t i = (((uintptr_t)kctlref) & KCTLREF_INDEX_MASK) - 1;
1483
1484 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1485
1486 if (i < kctl_tbl_size) {
1487 struct kctl *kctl = kctl_table[i];
1488
1489 if (kctl->kctlref == kctlref) {
1490 kctl_table[i] = NULL;
1491 kctl_tbl_count--;
1492 } else {
1493 kctlstat.kcs_bad_kctlref++;
1494 }
1495 } else {
1496 kctlstat.kcs_bad_kctlref++;
1497 }
1498}
1499
1500static struct kctl *
1501kctl_from_ref(kern_ctl_ref kctlref)
1502{
1503 /*
1504 * Reference is index plus one
1505 */
1506 uintptr_t i = (((uintptr_t)kctlref) & KCTLREF_INDEX_MASK) - 1;
1507 struct kctl *kctl = NULL;
1508
1509 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1510
1511 if (i >= kctl_tbl_size) {
1512 kctlstat.kcs_bad_kctlref++;
1513 return NULL;
1514 }
1515 kctl = kctl_table[i];
1516 if (kctl->kctlref != kctlref) {
1517 kctlstat.kcs_bad_kctlref++;
1518 return NULL;
1519 }
1520 return kctl;
1521}
1522
1523/*
1524 * Register/unregister a NKE
1525 */
1526errno_t
1527ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref)
1528{
1529 struct kctl *kctl = NULL;
1530 struct kctl *kctl_next = NULL;
1531 u_int32_t id = 1;
1532 size_t name_len;
1533 int is_extended = 0;
1534
1535 if (userkctl == NULL) { /* sanity check */
1536 return EINVAL;
1537 }
1538 if (userkctl->ctl_connect == NULL) {
1539 return EINVAL;
1540 }
1541 name_len = strlen(userkctl->ctl_name);
1542 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
1543 return EINVAL;
1544 }
1545
1546 MALLOC(kctl, struct kctl *, sizeof(*kctl), M_TEMP, M_WAITOK);
1547 if (kctl == NULL) {
1548 return ENOMEM;
1549 }
1550 bzero((char *)kctl, sizeof(*kctl));
1551
1552 lck_mtx_lock(ctl_mtx);
1553
1554 if (kctl_make_ref(kctl) == NULL) {
1555 lck_mtx_unlock(ctl_mtx);
1556 FREE(kctl, M_TEMP);
1557 return ENOMEM;
1558 }
1559
1560 /*
1561 * Kernel Control IDs
1562 *
1563 * CTL_FLAG_REG_ID_UNIT indicates the control ID and unit number are
1564 * static. If they do not exist, add them to the list in order. If the
1565 * flag is not set, we must find a new unique value. We assume the
1566 * list is in order. We find the last item in the list and add one. If
1567 * this leads to wrapping the id around, we start at the front of the
1568 * list and look for a gap.
1569 */
1570
1571 if ((userkctl->ctl_flags & CTL_FLAG_REG_ID_UNIT) == 0) {
1572 /* Must dynamically assign an unused ID */
1573
1574 /* Verify the same name isn't already registered */
1575 if (ctl_find_by_name(userkctl->ctl_name) != NULL) {
1576 kctl_delete_ref(kctl->kctlref);
1577 lck_mtx_unlock(ctl_mtx);
1578 FREE(kctl, M_TEMP);
1579 return EEXIST;
1580 }
1581
1582 /* Start with 1 in case the list is empty */
1583 id = 1;
1584 kctl_next = TAILQ_LAST(&ctl_head, kctl_list);
1585
1586 if (kctl_next != NULL) {
1587 /* List was not empty, add one to the last item */
1588 id = kctl_next->id + 1;
1589 kctl_next = NULL;
1590
1591 /*
1592 * If this wrapped the id number, start looking at
1593 * the front of the list for an unused id.
1594 */
1595 if (id == 0) {
1596 /* Find the next unused ID */
1597 id = 1;
1598
1599 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
1600 if (kctl_next->id > id) {
1601 /* We found a gap */
1602 break;
1603 }
1604
1605 id = kctl_next->id + 1;
1606 }
1607 }
1608 }
1609
1610 userkctl->ctl_id = id;
1611 kctl->id = id;
1612 kctl->reg_unit = -1;
1613 } else {
1614 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
1615 if (kctl_next->id > userkctl->ctl_id) {
1616 break;
1617 }
1618 }
1619
1620 if (ctl_find_by_id_unit(userkctl->ctl_id, userkctl->ctl_unit)) {
1621 kctl_delete_ref(kctl->kctlref);
1622 lck_mtx_unlock(ctl_mtx);
1623 FREE(kctl, M_TEMP);
1624 return EEXIST;
1625 }
1626 kctl->id = userkctl->ctl_id;
1627 kctl->reg_unit = userkctl->ctl_unit;
1628 }
1629
1630 is_extended = (userkctl->ctl_flags & CTL_FLAG_REG_EXTENDED);
1631
1632 strlcpy(kctl->name, userkctl->ctl_name, MAX_KCTL_NAME);
1633 kctl->flags = userkctl->ctl_flags;
1634
1635 /*
1636 * Let the caller know the default send and receive sizes
1637 */
1638 if (userkctl->ctl_sendsize == 0) {
1639 kctl->sendbufsize = CTL_SENDSIZE;
1640 userkctl->ctl_sendsize = kctl->sendbufsize;
1641 } else {
1642 kctl->sendbufsize = userkctl->ctl_sendsize;
1643 }
1644 if (userkctl->ctl_recvsize == 0) {
1645 kctl->recvbufsize = CTL_RECVSIZE;
1646 userkctl->ctl_recvsize = kctl->recvbufsize;
1647 } else {
1648 kctl->recvbufsize = userkctl->ctl_recvsize;
1649 }
1650
1651 kctl->bind = userkctl->ctl_bind;
1652 kctl->connect = userkctl->ctl_connect;
1653 kctl->disconnect = userkctl->ctl_disconnect;
1654 kctl->send = userkctl->ctl_send;
1655 kctl->setopt = userkctl->ctl_setopt;
1656 kctl->getopt = userkctl->ctl_getopt;
1657 if (is_extended) {
1658 kctl->rcvd = userkctl->ctl_rcvd;
1659 kctl->send_list = userkctl->ctl_send_list;
1660 }
1661
1662 TAILQ_INIT(&kctl->kcb_head);
1663
1664 if (kctl_next) {
1665 TAILQ_INSERT_BEFORE(kctl_next, kctl, next);
1666 } else {
1667 TAILQ_INSERT_TAIL(&ctl_head, kctl, next);
1668 }
1669
1670 kctlstat.kcs_reg_count++;
1671 kctlstat.kcs_gencnt++;
1672
1673 lck_mtx_unlock(ctl_mtx);
1674
1675 *kctlref = kctl->kctlref;
1676
1677 ctl_post_msg(KEV_CTL_REGISTERED, kctl->id);
1678 return 0;
1679}
1680
1681errno_t
1682ctl_deregister(void *kctlref)
1683{
1684 struct kctl *kctl;
1685
1686 lck_mtx_lock(ctl_mtx);
1687 if ((kctl = kctl_from_ref(kctlref)) == NULL) {
1688 kctlstat.kcs_bad_kctlref++;
1689 lck_mtx_unlock(ctl_mtx);
1690 if (ctl_debug != 0) {
1691 printf("%s invalid kctlref %p\n",
1692 __func__, kctlref);
1693 }
1694 return EINVAL;
1695 }
1696
1697 if (!TAILQ_EMPTY(&kctl->kcb_head)) {
1698 lck_mtx_unlock(ctl_mtx);
1699 return EBUSY;
1700 }
1701
1702 TAILQ_REMOVE(&ctl_head, kctl, next);
1703
1704 kctlstat.kcs_reg_count--;
1705 kctlstat.kcs_gencnt++;
1706
1707 kctl_delete_ref(kctl->kctlref);
1708 lck_mtx_unlock(ctl_mtx);
1709
1710 ctl_post_msg(KEV_CTL_DEREGISTERED, kctl->id);
1711 FREE(kctl, M_TEMP);
1712 return 0;
1713}
1714
1715/*
1716 * Must be called with global ctl_mtx lock taked
1717 */
1718static struct kctl *
1719ctl_find_by_name(const char *name)
1720{
1721 struct kctl *kctl;
1722
1723 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1724
1725 TAILQ_FOREACH(kctl, &ctl_head, next)
1726 if (strncmp(kctl->name, name, sizeof(kctl->name)) == 0) {
1727 return kctl;
1728 }
1729
1730 return NULL;
1731}
1732
1733u_int32_t
1734ctl_id_by_name(const char *name)
1735{
1736 u_int32_t ctl_id = 0;
1737 struct kctl *kctl;
1738
1739 lck_mtx_lock(ctl_mtx);
1740 kctl = ctl_find_by_name(name);
1741 if (kctl) {
1742 ctl_id = kctl->id;
1743 }
1744 lck_mtx_unlock(ctl_mtx);
1745
1746 return ctl_id;
1747}
1748
1749errno_t
1750ctl_name_by_id(u_int32_t id, char *out_name, size_t maxsize)
1751{
1752 int found = 0;
1753 struct kctl *kctl;
1754
1755 lck_mtx_lock(ctl_mtx);
1756 TAILQ_FOREACH(kctl, &ctl_head, next) {
1757 if (kctl->id == id) {
1758 break;
1759 }
1760 }
1761
1762 if (kctl) {
1763 if (maxsize > MAX_KCTL_NAME) {
1764 maxsize = MAX_KCTL_NAME;
1765 }
1766 strlcpy(out_name, kctl->name, maxsize);
1767 found = 1;
1768 }
1769 lck_mtx_unlock(ctl_mtx);
1770
1771 return found ? 0 : ENOENT;
1772}
1773
1774/*
1775 * Must be called with global ctl_mtx lock taked
1776 *
1777 */
1778static struct kctl *
1779ctl_find_by_id_unit(u_int32_t id, u_int32_t unit)
1780{
1781 struct kctl *kctl;
1782
1783 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1784
1785 TAILQ_FOREACH(kctl, &ctl_head, next) {
1786 if (kctl->id == id && (kctl->flags & CTL_FLAG_REG_ID_UNIT) == 0) {
1787 return kctl;
1788 } else if (kctl->id == id && kctl->reg_unit == unit) {
1789 return kctl;
1790 }
1791 }
1792 return NULL;
1793}
1794
1795/*
1796 * Must be called with kernel controller lock taken
1797 */
1798static struct ctl_cb *
1799kcb_find(struct kctl *kctl, u_int32_t unit)
1800{
1801 struct ctl_cb *kcb;
1802
1803 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1804
1805 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
1806 if (kcb->sac.sc_unit == unit) {
1807 return kcb;
1808 }
1809
1810 return NULL;
1811}
1812
1813static struct socket *
1814kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *kctlflags)
1815{
1816 struct socket *so = NULL;
1817 struct ctl_cb *kcb;
1818 void *lr_saved;
1819 struct kctl *kctl;
1820 int i;
1821
1822 lr_saved = __builtin_return_address(0);
1823
1824 lck_mtx_lock(ctl_mtx);
1825 /*
1826 * First validate the kctlref
1827 */
1828 if ((kctl = kctl_from_ref(kctlref)) == NULL) {
1829 kctlstat.kcs_bad_kctlref++;
1830 lck_mtx_unlock(ctl_mtx);
1831 if (ctl_debug != 0) {
1832 printf("%s invalid kctlref %p\n",
1833 __func__, kctlref);
1834 }
1835 return NULL;
1836 }
1837
1838 kcb = kcb_find(kctl, unit);
1839 if (kcb == NULL || kcb->kctl != kctl || (so = kcb->so) == NULL) {
1840 lck_mtx_unlock(ctl_mtx);
1841 return NULL;
1842 }
1843 /*
1844 * This prevents the socket from being closed
1845 */
1846 kcb->usecount++;
1847 /*
1848 * Respect lock ordering: socket before ctl_mtx
1849 */
1850 lck_mtx_unlock(ctl_mtx);
1851
1852 socket_lock(so, 1);
1853 /*
1854 * The socket lock history is more useful if we store
1855 * the address of the caller.
1856 */
1857 i = (so->next_lock_lr + SO_LCKDBG_MAX - 1) % SO_LCKDBG_MAX;
1858 so->lock_lr[i] = lr_saved;
1859
1860 lck_mtx_lock(ctl_mtx);
1861
1862 if ((kctl = kctl_from_ref(kctlref)) == NULL || kcb->kctl == NULL) {
1863 lck_mtx_unlock(ctl_mtx);
1864 socket_unlock(so, 1);
1865 so = NULL;
1866 lck_mtx_lock(ctl_mtx);
1867 } else if (kctlflags != NULL) {
1868 *kctlflags = kctl->flags;
1869 }
1870
1871 kcb->usecount--;
1872 if (kcb->usecount == 0) {
1873 wakeup((event_t)&kcb->usecount);
1874 }
1875
1876 lck_mtx_unlock(ctl_mtx);
1877
1878 return so;
1879}
1880
1881static void
1882ctl_post_msg(u_int32_t event_code, u_int32_t id)
1883{
1884 struct ctl_event_data ctl_ev_data;
1885 struct kev_msg ev_msg;
1886
1887 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_NOTOWNED);
1888
1889 bzero(&ev_msg, sizeof(struct kev_msg));
1890 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1891
1892 ev_msg.kev_class = KEV_SYSTEM_CLASS;
1893 ev_msg.kev_subclass = KEV_CTL_SUBCLASS;
1894 ev_msg.event_code = event_code;
1895
1896 /* common nke subclass data */
1897 bzero(&ctl_ev_data, sizeof(ctl_ev_data));
1898 ctl_ev_data.ctl_id = id;
1899 ev_msg.dv[0].data_ptr = &ctl_ev_data;
1900 ev_msg.dv[0].data_length = sizeof(ctl_ev_data);
1901
1902 ev_msg.dv[1].data_length = 0;
1903
1904 kev_post_msg(&ev_msg);
1905}
1906
1907static int
1908ctl_lock(struct socket *so, int refcount, void *lr)
1909{
1910 void *lr_saved;
1911
1912 if (lr == NULL) {
1913 lr_saved = __builtin_return_address(0);
1914 } else {
1915 lr_saved = lr;
1916 }
1917
1918 if (so->so_pcb != NULL) {
1919 lck_mtx_lock(((struct ctl_cb *)so->so_pcb)->mtx);
1920 } else {
1921 panic("ctl_lock: so=%p NO PCB! lr=%p lrh= %s\n",
1922 so, lr_saved, solockhistory_nr(so));
1923 /* NOTREACHED */
1924 }
1925
1926 if (so->so_usecount < 0) {
1927 panic("ctl_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s\n",
1928 so, so->so_pcb, lr_saved, so->so_usecount,
1929 solockhistory_nr(so));
1930 /* NOTREACHED */
1931 }
1932
1933 if (refcount) {
1934 so->so_usecount++;
1935 }
1936
1937 so->lock_lr[so->next_lock_lr] = lr_saved;
1938 so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX;
1939 return 0;
1940}
1941
1942static int
1943ctl_unlock(struct socket *so, int refcount, void *lr)
1944{
1945 void *lr_saved;
1946 lck_mtx_t *mutex_held;
1947
1948 if (lr == NULL) {
1949 lr_saved = __builtin_return_address(0);
1950 } else {
1951 lr_saved = lr;
1952 }
1953
1954#if (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG))
1955 printf("ctl_unlock: so=%llx sopcb=%x lock=%llx ref=%u lr=%llx\n",
1956 (uint64_t)VM_KERNEL_ADDRPERM(so),
1957 (uint64_t)VM_KERNEL_ADDRPERM(so->so_pcb,
1958 (uint64_t)VM_KERNEL_ADDRPERM(((struct ctl_cb *)so->so_pcb)->mtx),
1959 so->so_usecount, (uint64_t)VM_KERNEL_ADDRPERM(lr_saved));
1960#endif /* (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG)) */
1961 if (refcount) {
1962 so->so_usecount--;
1963 }
1964
1965 if (so->so_usecount < 0) {
1966 panic("ctl_unlock: so=%p usecount=%x lrh= %s\n",
1967 so, so->so_usecount, solockhistory_nr(so));
1968 /* NOTREACHED */
1969 }
1970 if (so->so_pcb == NULL) {
1971 panic("ctl_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s\n",
1972 so, so->so_usecount, (void *)lr_saved,
1973 solockhistory_nr(so));
1974 /* NOTREACHED */
1975 }
1976 mutex_held = ((struct ctl_cb *)so->so_pcb)->mtx;
1977
1978 lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
1979 so->unlock_lr[so->next_unlock_lr] = lr_saved;
1980 so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
1981 lck_mtx_unlock(mutex_held);
1982
1983 if (so->so_usecount == 0) {
1984 ctl_sofreelastref(so);
1985 }
1986
1987 return 0;
1988}
1989
1990static lck_mtx_t *
1991ctl_getlock(struct socket *so, int flags)
1992{
1993#pragma unused(flags)
1994 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
1995
1996 if (so->so_pcb) {
1997 if (so->so_usecount < 0) {
1998 panic("ctl_getlock: so=%p usecount=%x lrh= %s\n",
1999 so, so->so_usecount, solockhistory_nr(so));
2000 }
2001 return kcb->mtx;
2002 } else {
2003 panic("ctl_getlock: so=%p NULL NO so_pcb %s\n",
2004 so, solockhistory_nr(so));
2005 return so->so_proto->pr_domain->dom_mtx;
2006 }
2007}
2008
2009__private_extern__ int
2010kctl_reg_list SYSCTL_HANDLER_ARGS
2011{
2012#pragma unused(oidp, arg1, arg2)
2013 int error = 0;
2014 int n, i;
2015 struct xsystmgen xsg;
2016 void *buf = NULL;
2017 struct kctl *kctl;
2018 size_t item_size = ROUNDUP64(sizeof(struct xkctl_reg));
2019
2020 buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO);
2021 if (buf == NULL) {
2022 return ENOMEM;
2023 }
2024
2025 lck_mtx_lock(ctl_mtx);
2026
2027 n = kctlstat.kcs_reg_count;
2028
2029 if (req->oldptr == USER_ADDR_NULL) {
2030 req->oldidx = (n + n / 8) * sizeof(struct xkctl_reg);
2031 goto done;
2032 }
2033 if (req->newptr != USER_ADDR_NULL) {
2034 error = EPERM;
2035 goto done;
2036 }
2037 bzero(&xsg, sizeof(xsg));
2038 xsg.xg_len = sizeof(xsg);
2039 xsg.xg_count = n;
2040 xsg.xg_gen = kctlstat.kcs_gencnt;
2041 xsg.xg_sogen = so_gencnt;
2042 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2043 if (error) {
2044 goto done;
2045 }
2046 /*
2047 * We are done if there is no pcb
2048 */
2049 if (n == 0) {
2050 goto done;
2051 }
2052
2053 i = 0;
2054 for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
2055 i < n && kctl != NULL;
2056 i++, kctl = TAILQ_NEXT(kctl, next)) {
2057 struct xkctl_reg *xkr = (struct xkctl_reg *)buf;
2058 struct ctl_cb *kcb;
2059 u_int32_t pcbcount = 0;
2060
2061 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
2062 pcbcount++;
2063
2064 bzero(buf, item_size);
2065
2066 xkr->xkr_len = sizeof(struct xkctl_reg);
2067 xkr->xkr_kind = XSO_KCREG;
2068 xkr->xkr_id = kctl->id;
2069 xkr->xkr_reg_unit = kctl->reg_unit;
2070 xkr->xkr_flags = kctl->flags;
2071 xkr->xkr_kctlref = (uint64_t)(kctl->kctlref);
2072 xkr->xkr_recvbufsize = kctl->recvbufsize;
2073 xkr->xkr_sendbufsize = kctl->sendbufsize;
2074 xkr->xkr_lastunit = kctl->lastunit;
2075 xkr->xkr_pcbcount = pcbcount;
2076 xkr->xkr_connect = (uint64_t)VM_KERNEL_UNSLIDE(kctl->connect);
2077 xkr->xkr_disconnect =
2078 (uint64_t)VM_KERNEL_UNSLIDE(kctl->disconnect);
2079 xkr->xkr_send = (uint64_t)VM_KERNEL_UNSLIDE(kctl->send);
2080 xkr->xkr_send_list =
2081 (uint64_t)VM_KERNEL_UNSLIDE(kctl->send_list);
2082 xkr->xkr_setopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->setopt);
2083 xkr->xkr_getopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->getopt);
2084 xkr->xkr_rcvd = (uint64_t)VM_KERNEL_UNSLIDE(kctl->rcvd);
2085 strlcpy(xkr->xkr_name, kctl->name, sizeof(xkr->xkr_name));
2086
2087 error = SYSCTL_OUT(req, buf, item_size);
2088 }
2089
2090 if (error == 0) {
2091 /*
2092 * Give the user an updated idea of our state.
2093 * If the generation differs from what we told
2094 * her before, she knows that something happened
2095 * while we were processing this request, and it
2096 * might be necessary to retry.
2097 */
2098 bzero(&xsg, sizeof(xsg));
2099 xsg.xg_len = sizeof(xsg);
2100 xsg.xg_count = n;
2101 xsg.xg_gen = kctlstat.kcs_gencnt;
2102 xsg.xg_sogen = so_gencnt;
2103 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2104 if (error) {
2105 goto done;
2106 }
2107 }
2108
2109done:
2110 lck_mtx_unlock(ctl_mtx);
2111
2112 if (buf != NULL) {
2113 FREE(buf, M_TEMP);
2114 }
2115
2116 return error;
2117}
2118
2119__private_extern__ int
2120kctl_pcblist SYSCTL_HANDLER_ARGS
2121{
2122#pragma unused(oidp, arg1, arg2)
2123 int error = 0;
2124 int n, i;
2125 struct xsystmgen xsg;
2126 void *buf = NULL;
2127 struct kctl *kctl;
2128 size_t item_size = ROUNDUP64(sizeof(struct xkctlpcb)) +
2129 ROUNDUP64(sizeof(struct xsocket_n)) +
2130 2 * ROUNDUP64(sizeof(struct xsockbuf_n)) +
2131 ROUNDUP64(sizeof(struct xsockstat_n));
2132
2133 buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO);
2134 if (buf == NULL) {
2135 return ENOMEM;
2136 }
2137
2138 lck_mtx_lock(ctl_mtx);
2139
2140 n = kctlstat.kcs_pcbcount;
2141
2142 if (req->oldptr == USER_ADDR_NULL) {
2143 req->oldidx = (n + n / 8) * item_size;
2144 goto done;
2145 }
2146 if (req->newptr != USER_ADDR_NULL) {
2147 error = EPERM;
2148 goto done;
2149 }
2150 bzero(&xsg, sizeof(xsg));
2151 xsg.xg_len = sizeof(xsg);
2152 xsg.xg_count = n;
2153 xsg.xg_gen = kctlstat.kcs_gencnt;
2154 xsg.xg_sogen = so_gencnt;
2155 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2156 if (error) {
2157 goto done;
2158 }
2159 /*
2160 * We are done if there is no pcb
2161 */
2162 if (n == 0) {
2163 goto done;
2164 }
2165
2166 i = 0;
2167 for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
2168 i < n && kctl != NULL;
2169 kctl = TAILQ_NEXT(kctl, next)) {
2170 struct ctl_cb *kcb;
2171
2172 for (kcb = TAILQ_FIRST(&kctl->kcb_head);
2173 i < n && kcb != NULL;
2174 i++, kcb = TAILQ_NEXT(kcb, next)) {
2175 struct xkctlpcb *xk = (struct xkctlpcb *)buf;
2176 struct xsocket_n *xso = (struct xsocket_n *)
2177 ADVANCE64(xk, sizeof(*xk));
2178 struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *)
2179 ADVANCE64(xso, sizeof(*xso));
2180 struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *)
2181 ADVANCE64(xsbrcv, sizeof(*xsbrcv));
2182 struct xsockstat_n *xsostats = (struct xsockstat_n *)
2183 ADVANCE64(xsbsnd, sizeof(*xsbsnd));
2184
2185 bzero(buf, item_size);
2186
2187 xk->xkp_len = sizeof(struct xkctlpcb);
2188 xk->xkp_kind = XSO_KCB;
2189 xk->xkp_unit = kcb->sac.sc_unit;
2190 xk->xkp_kctpcb = (uint64_t)VM_KERNEL_ADDRPERM(kcb);
2191 xk->xkp_kctlref = (uint64_t)VM_KERNEL_ADDRPERM(kctl);
2192 xk->xkp_kctlid = kctl->id;
2193 strlcpy(xk->xkp_kctlname, kctl->name,
2194 sizeof(xk->xkp_kctlname));
2195
2196 sotoxsocket_n(kcb->so, xso);
2197 sbtoxsockbuf_n(kcb->so ?
2198 &kcb->so->so_rcv : NULL, xsbrcv);
2199 sbtoxsockbuf_n(kcb->so ?
2200 &kcb->so->so_snd : NULL, xsbsnd);
2201 sbtoxsockstat_n(kcb->so, xsostats);
2202
2203 error = SYSCTL_OUT(req, buf, item_size);
2204 }
2205 }
2206
2207 if (error == 0) {
2208 /*
2209 * Give the user an updated idea of our state.
2210 * If the generation differs from what we told
2211 * her before, she knows that something happened
2212 * while we were processing this request, and it
2213 * might be necessary to retry.
2214 */
2215 bzero(&xsg, sizeof(xsg));
2216 xsg.xg_len = sizeof(xsg);
2217 xsg.xg_count = n;
2218 xsg.xg_gen = kctlstat.kcs_gencnt;
2219 xsg.xg_sogen = so_gencnt;
2220 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2221 if (error) {
2222 goto done;
2223 }
2224 }
2225
2226done:
2227 lck_mtx_unlock(ctl_mtx);
2228
2229 return error;
2230}
2231
2232int
2233kctl_getstat SYSCTL_HANDLER_ARGS
2234{
2235#pragma unused(oidp, arg1, arg2)
2236 int error = 0;
2237
2238 lck_mtx_lock(ctl_mtx);
2239
2240 if (req->newptr != USER_ADDR_NULL) {
2241 error = EPERM;
2242 goto done;
2243 }
2244 if (req->oldptr == USER_ADDR_NULL) {
2245 req->oldidx = sizeof(struct kctlstat);
2246 goto done;
2247 }
2248
2249 error = SYSCTL_OUT(req, &kctlstat,
2250 MIN(sizeof(struct kctlstat), req->oldlen));
2251done:
2252 lck_mtx_unlock(ctl_mtx);
2253 return error;
2254}
2255
2256void
2257kctl_fill_socketinfo(struct socket *so, struct socket_info *si)
2258{
2259 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
2260 struct kern_ctl_info *kcsi =
2261 &si->soi_proto.pri_kern_ctl;
2262 struct kctl *kctl = kcb->kctl;
2263
2264 si->soi_kind = SOCKINFO_KERN_CTL;
2265
2266 if (kctl == 0) {
2267 return;
2268 }
2269
2270 kcsi->kcsi_id = kctl->id;
2271 kcsi->kcsi_reg_unit = kctl->reg_unit;
2272 kcsi->kcsi_flags = kctl->flags;
2273 kcsi->kcsi_recvbufsize = kctl->recvbufsize;
2274 kcsi->kcsi_sendbufsize = kctl->sendbufsize;
2275 kcsi->kcsi_unit = kcb->sac.sc_unit;
2276 strlcpy(kcsi->kcsi_name, kctl->name, MAX_KCTL_NAME);
2277}