]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/kern_control.c
xnu-4903.270.47.tar.gz
[apple/xnu.git] / bsd / kern / kern_control.c
CommitLineData
9bccf70c 1/*
5c9f4661 2 * Copyright (c) 1999-2017 Apple Inc. All rights reserved.
9bccf70c 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
fe8ab488 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
fe8ab488 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
fe8ab488 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
fe8ab488 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
9bccf70c 27 */
9bccf70c
A
28
29/*
91447636
A
30 * Kernel Control domain - allows control connections to
31 * and to read/write data.
9bccf70c 32 *
91447636 33 * Vincent Lubet, 040506
9bccf70c
A
34 * Christophe Allie, 010928
35 * Justin C. Walker, 990319
36 */
37
38#include <sys/types.h>
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/syslog.h>
42#include <sys/socket.h>
43#include <sys/socketvar.h>
44#include <sys/protosw.h>
45#include <sys/domain.h>
46#include <sys/malloc.h>
47#include <sys/mbuf.h>
9bccf70c
A
48#include <sys/sys_domain.h>
49#include <sys/kern_event.h>
50#include <sys/kern_control.h>
2d21ac55 51#include <sys/kauth.h>
fe8ab488 52#include <sys/sysctl.h>
3e170ce0 53#include <sys/proc_info.h>
9bccf70c
A
54#include <net/if_var.h>
55
56#include <mach/vm_types.h>
9bccf70c
A
57
58#include <kern/thread.h>
59
3e170ce0 60struct kctl {
0a7de745
A
61 TAILQ_ENTRY(kctl) next; /* controller chain */
62 kern_ctl_ref kctlref;
3e170ce0
A
63
64 /* controller information provided when registering */
0a7de745
A
65 char name[MAX_KCTL_NAME]; /* unique identifier */
66 u_int32_t id;
67 u_int32_t reg_unit;
3e170ce0
A
68
69 /* misc communication information */
0a7de745
A
70 u_int32_t flags; /* support flags */
71 u_int32_t recvbufsize; /* request more than the default buffer size */
72 u_int32_t sendbufsize; /* request more than the default buffer size */
3e170ce0
A
73
74 /* Dispatch functions */
0a7de745
A
75 ctl_bind_func bind; /* Prepare contact */
76 ctl_connect_func connect; /* Make contact */
77 ctl_disconnect_func disconnect; /* Break contact */
78 ctl_send_func send; /* Send data to nke */
79 ctl_send_list_func send_list; /* Send list of packets */
80 ctl_setopt_func setopt; /* set kctl configuration */
81 ctl_getopt_func getopt; /* get kctl configuration */
82 ctl_rcvd_func rcvd; /* Notify nke when client reads data */
83
84 TAILQ_HEAD(, ctl_cb) kcb_head;
85 u_int32_t lastunit;
3e170ce0
A
86};
87
88struct ctl_cb {
0a7de745
A
89 TAILQ_ENTRY(ctl_cb) next; /* controller chain */
90 lck_mtx_t *mtx;
91 struct socket *so; /* controlling socket */
92 struct kctl *kctl; /* back pointer to controller */
93 void *userdata;
94 struct sockaddr_ctl sac;
95 u_int32_t usecount;
3e170ce0
A
96};
97
fe8ab488 98#ifndef ROUNDUP64
0a7de745 99#define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
fe8ab488
A
100#endif
101
102#ifndef ADVANCE64
0a7de745 103#define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
fe8ab488
A
104#endif
105
9bccf70c
A
106/*
107 * Definitions and vars for we support
108 */
109
0a7de745
A
110#define CTL_SENDSIZE (2 * 1024) /* default buffer size */
111#define CTL_RECVSIZE (8 * 1024) /* default buffer size */
9bccf70c
A
112
113/*
91447636
A
114 * Definitions and vars for we support
115 */
9bccf70c 116
0a7de745
A
117static u_int32_t ctl_maxunit = 65536;
118static lck_grp_attr_t *ctl_lck_grp_attr = 0;
119static lck_attr_t *ctl_lck_attr = 0;
120static lck_grp_t *ctl_lck_grp = 0;
121static lck_mtx_t *ctl_mtx;
9bccf70c
A
122
123/* all the controllers are chained */
0a7de745 124TAILQ_HEAD(kctl_list, kctl) ctl_head;
91447636
A
125
126static int ctl_attach(struct socket *, int, struct proc *);
127static int ctl_detach(struct socket *);
128static int ctl_sofreelastref(struct socket *so);
5c9f4661 129static int ctl_bind(struct socket *, struct sockaddr *, struct proc *);
91447636
A
130static int ctl_connect(struct socket *, struct sockaddr *, struct proc *);
131static int ctl_disconnect(struct socket *);
132static int ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
0a7de745 133 struct ifnet *ifp, struct proc *p);
91447636 134static int ctl_send(struct socket *, int, struct mbuf *,
0a7de745 135 struct sockaddr *, struct mbuf *, struct proc *);
fe8ab488 136static int ctl_send_list(struct socket *, int, struct mbuf *,
0a7de745 137 struct sockaddr *, struct mbuf *, struct proc *);
91447636
A
138static int ctl_ctloutput(struct socket *, struct sockopt *);
139static int ctl_peeraddr(struct socket *so, struct sockaddr **nam);
39236c6e 140static int ctl_usr_rcvd(struct socket *so, int flags);
91447636 141
91447636
A
142static struct kctl *ctl_find_by_name(const char *);
143static struct kctl *ctl_find_by_id_unit(u_int32_t id, u_int32_t unit);
9bccf70c 144
3e170ce0 145static struct socket *kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit,
0a7de745 146 u_int32_t *);
91447636 147static struct ctl_cb *kcb_find(struct kctl *, u_int32_t unit);
b0d623f7 148static void ctl_post_msg(u_int32_t event_code, u_int32_t id);
9bccf70c 149
b0d623f7
A
150static int ctl_lock(struct socket *, int, void *);
151static int ctl_unlock(struct socket *, int, void *);
91447636 152static lck_mtx_t * ctl_getlock(struct socket *, int);
9bccf70c 153
39236c6e 154static struct pr_usrreqs ctl_usrreqs = {
0a7de745
A
155 .pru_attach = ctl_attach,
156 .pru_bind = ctl_bind,
157 .pru_connect = ctl_connect,
158 .pru_control = ctl_ioctl,
159 .pru_detach = ctl_detach,
160 .pru_disconnect = ctl_disconnect,
161 .pru_peeraddr = ctl_peeraddr,
162 .pru_rcvd = ctl_usr_rcvd,
163 .pru_send = ctl_send,
164 .pru_send_list = ctl_send_list,
165 .pru_sosend = sosend,
166 .pru_sosend_list = sosend_list,
167 .pru_soreceive = soreceive,
168 .pru_soreceive_list = soreceive_list,
91447636
A
169};
170
39236c6e 171static struct protosw kctlsw[] = {
0a7de745
A
172 {
173 .pr_type = SOCK_DGRAM,
174 .pr_protocol = SYSPROTO_CONTROL,
175 .pr_flags = PR_ATOMIC | PR_CONNREQUIRED | PR_PCBLOCK | PR_WANTRCVD,
176 .pr_ctloutput = ctl_ctloutput,
177 .pr_usrreqs = &ctl_usrreqs,
178 .pr_lock = ctl_lock,
179 .pr_unlock = ctl_unlock,
180 .pr_getlock = ctl_getlock,
181 },
182 {
183 .pr_type = SOCK_STREAM,
184 .pr_protocol = SYSPROTO_CONTROL,
185 .pr_flags = PR_CONNREQUIRED | PR_PCBLOCK | PR_WANTRCVD,
186 .pr_ctloutput = ctl_ctloutput,
187 .pr_usrreqs = &ctl_usrreqs,
188 .pr_lock = ctl_lock,
189 .pr_unlock = ctl_unlock,
190 .pr_getlock = ctl_getlock,
191 }
9bccf70c
A
192};
193
fe8ab488
A
194__private_extern__ int kctl_reg_list SYSCTL_HANDLER_ARGS;
195__private_extern__ int kctl_pcblist SYSCTL_HANDLER_ARGS;
196__private_extern__ int kctl_getstat SYSCTL_HANDLER_ARGS;
197
91447636 198
fe8ab488 199SYSCTL_NODE(_net_systm, OID_AUTO, kctl,
0a7de745 200 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Kernel control family");
fe8ab488
A
201
202struct kctlstat kctlstat;
203SYSCTL_PROC(_net_systm_kctl, OID_AUTO, stats,
204 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
205 kctl_getstat, "S,kctlstat", "");
206
207SYSCTL_PROC(_net_systm_kctl, OID_AUTO, reg_list,
0a7de745
A
208 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
209 kctl_reg_list, "S,xkctl_reg", "");
fe8ab488
A
210
211SYSCTL_PROC(_net_systm_kctl, OID_AUTO, pcblist,
0a7de745
A
212 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
213 kctl_pcblist, "S,xkctlpcb", "");
fe8ab488
A
214
215u_int32_t ctl_autorcvbuf_max = 256 * 1024;
216SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufmax,
0a7de745 217 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_autorcvbuf_max, 0, "");
fe8ab488
A
218
219u_int32_t ctl_autorcvbuf_high = 0;
220SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufhigh,
0a7de745 221 CTLFLAG_RD | CTLFLAG_LOCKED, &ctl_autorcvbuf_high, 0, "");
fe8ab488
A
222
223u_int32_t ctl_debug = 0;
224SYSCTL_INT(_net_systm_kctl, OID_AUTO, debug,
0a7de745 225 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_debug, 0, "");
fe8ab488 226
0a7de745 227#define KCTL_TBL_INC 16
3e170ce0
A
228
229static uintptr_t kctl_tbl_size = 0;
230static u_int32_t kctl_tbl_growing = 0;
39037602 231static u_int32_t kctl_tbl_growing_waiting = 0;
3e170ce0
A
232static uintptr_t kctl_tbl_count = 0;
233static struct kctl **kctl_table = NULL;
234static uintptr_t kctl_ref_gencnt = 0;
235
236static void kctl_tbl_grow(void);
237static kern_ctl_ref kctl_make_ref(struct kctl *kctl);
238static void kctl_delete_ref(kern_ctl_ref);
239static struct kctl *kctl_from_ref(kern_ctl_ref);
240
9bccf70c 241/*
91447636 242 * Install the protosw's for the Kernel Control manager.
9bccf70c 243 */
39236c6e
A
244__private_extern__ void
245kern_control_init(struct domain *dp)
9bccf70c 246{
39236c6e
A
247 struct protosw *pr;
248 int i;
0a7de745 249 int kctl_proto_count = (sizeof(kctlsw) / sizeof(struct protosw));
39236c6e
A
250
251 VERIFY(!(dp->dom_flags & DOM_INITIALIZED));
252 VERIFY(dp == systemdomain);
253
91447636 254 ctl_lck_grp_attr = lck_grp_attr_alloc_init();
39236c6e
A
255 if (ctl_lck_grp_attr == NULL) {
256 panic("%s: lck_grp_attr_alloc_init failed\n", __func__);
257 /* NOTREACHED */
91447636 258 }
39236c6e
A
259
260 ctl_lck_grp = lck_grp_alloc_init("Kernel Control Protocol",
261 ctl_lck_grp_attr);
262 if (ctl_lck_grp == NULL) {
263 panic("%s: lck_grp_alloc_init failed\n", __func__);
264 /* NOTREACHED */
91447636 265 }
39236c6e 266
91447636 267 ctl_lck_attr = lck_attr_alloc_init();
39236c6e
A
268 if (ctl_lck_attr == NULL) {
269 panic("%s: lck_attr_alloc_init failed\n", __func__);
270 /* NOTREACHED */
91447636 271 }
39236c6e 272
91447636 273 ctl_mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
39236c6e
A
274 if (ctl_mtx == NULL) {
275 panic("%s: lck_mtx_alloc_init failed\n", __func__);
276 /* NOTREACHED */
91447636
A
277 }
278 TAILQ_INIT(&ctl_head);
39236c6e 279
0a7de745 280 for (i = 0, pr = &kctlsw[0]; i < kctl_proto_count; i++, pr++) {
39236c6e 281 net_add_proto(pr, dp, 1);
0a7de745 282 }
91447636 283}
9bccf70c 284
91447636
A
285static void
286kcb_delete(struct ctl_cb *kcb)
287{
288 if (kcb != 0) {
0a7de745 289 if (kcb->mtx != 0) {
91447636 290 lck_mtx_free(kcb->mtx, ctl_lck_grp);
0a7de745 291 }
91447636
A
292 FREE(kcb, M_TEMP);
293 }
9bccf70c
A
294}
295
9bccf70c
A
296/*
297 * Kernel Controller user-request functions
fe8ab488
A
298 * attach function must exist and succeed
299 * detach not necessary
91447636 300 * we need a pcb for the per socket mutex
9bccf70c 301 */
91447636 302static int
fe8ab488
A
303ctl_attach(struct socket *so, int proto, struct proc *p)
304{
305#pragma unused(proto, p)
91447636 306 int error = 0;
0a7de745 307 struct ctl_cb *kcb = 0;
91447636
A
308
309 MALLOC(kcb, struct ctl_cb *, sizeof(struct ctl_cb), M_TEMP, M_WAITOK);
310 if (kcb == NULL) {
311 error = ENOMEM;
312 goto quit;
313 }
314 bzero(kcb, sizeof(struct ctl_cb));
fe8ab488 315
91447636
A
316 kcb->mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
317 if (kcb->mtx == NULL) {
318 error = ENOMEM;
319 goto quit;
320 }
321 kcb->so = so;
322 so->so_pcb = (caddr_t)kcb;
fe8ab488 323
91447636
A
324quit:
325 if (error != 0) {
326 kcb_delete(kcb);
327 kcb = 0;
328 }
0a7de745 329 return error;
91447636
A
330}
331
332static int
333ctl_sofreelastref(struct socket *so)
334{
0a7de745 335 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
fe8ab488
A
336
337 so->so_pcb = 0;
338
339 if (kcb != 0) {
0a7de745 340 struct kctl *kctl;
fe8ab488
A
341 if ((kctl = kcb->kctl) != 0) {
342 lck_mtx_lock(ctl_mtx);
343 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
344 kctlstat.kcs_pcbcount--;
345 kctlstat.kcs_gencnt++;
346 lck_mtx_unlock(ctl_mtx);
347 }
348 kcb_delete(kcb);
349 }
350 sofreelastref(so, 1);
0a7de745 351 return 0;
91447636
A
352}
353
354static int
355ctl_detach(struct socket *so)
356{
0a7de745 357 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
fe8ab488 358
0a7de745
A
359 if (kcb == 0) {
360 return 0;
361 }
fe8ab488 362
5c9f4661
A
363 if (kcb->kctl != NULL && kcb->kctl->bind != NULL &&
364 kcb->userdata != NULL && !(so->so_state & SS_ISCONNECTED)) {
365 // The unit was bound, but not connected
366 // Invoke the disconnected call to cleanup
367 if (kcb->kctl->disconnect != NULL) {
368 socket_unlock(so, 0);
369 (*kcb->kctl->disconnect)(kcb->kctl->kctlref,
370 kcb->sac.sc_unit, kcb->userdata);
371 socket_lock(so, 0);
372 }
373 }
374
fe8ab488
A
375 soisdisconnected(so);
376 so->so_flags |= SOF_PCBCLEARING;
0a7de745 377 return 0;
9bccf70c
A
378}
379
91447636 380static int
5c9f4661 381ctl_setup_kctl(struct socket *so, struct sockaddr *nam, struct proc *p)
fe8ab488 382{
5c9f4661
A
383 struct kctl *kctl = NULL;
384 int error = 0;
0a7de745 385 struct sockaddr_ctl sa;
5c9f4661
A
386 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
387 struct ctl_cb *kcb_next = NULL;
388 u_quad_t sbmaxsize;
389 u_int32_t recvbufsize, sendbufsize;
fe8ab488 390
5c9f4661
A
391 if (kcb == 0) {
392 panic("ctl_setup_kctl so_pcb null\n");
393 }
394
395 if (kcb->kctl != NULL) {
396 // Already set up, skip
0a7de745 397 return 0;
5c9f4661 398 }
fe8ab488 399
5c9f4661 400 if (nam->sa_len != sizeof(struct sockaddr_ctl)) {
0a7de745 401 return EINVAL;
5c9f4661 402 }
fe8ab488
A
403
404 bcopy(nam, &sa, sizeof(struct sockaddr_ctl));
405
406 lck_mtx_lock(ctl_mtx);
407 kctl = ctl_find_by_id_unit(sa.sc_id, sa.sc_unit);
408 if (kctl == NULL) {
409 lck_mtx_unlock(ctl_mtx);
0a7de745 410 return ENOENT;
fe8ab488
A
411 }
412
413 if (((kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
0a7de745
A
414 (so->so_type != SOCK_STREAM)) ||
415 (!(kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
416 (so->so_type != SOCK_DGRAM))) {
417 lck_mtx_unlock(ctl_mtx);
418 return EPROTOTYPE;
419 }
fe8ab488
A
420
421 if (kctl->flags & CTL_FLAG_PRIVILEGED) {
422 if (p == 0) {
423 lck_mtx_unlock(ctl_mtx);
0a7de745 424 return EINVAL;
fe8ab488
A
425 }
426 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
427 lck_mtx_unlock(ctl_mtx);
0a7de745 428 return EPERM;
fe8ab488
A
429 }
430 }
91447636
A
431
432 if ((kctl->flags & CTL_FLAG_REG_ID_UNIT) || sa.sc_unit != 0) {
433 if (kcb_find(kctl, sa.sc_unit) != NULL) {
434 lck_mtx_unlock(ctl_mtx);
0a7de745 435 return EBUSY;
91447636
A
436 }
437 } else {
fe8ab488 438 /* Find an unused ID, assumes control IDs are in order */
5c9f4661 439 u_int32_t unit = 1;
fe8ab488
A
440
441 TAILQ_FOREACH(kcb_next, &kctl->kcb_head, next) {
5c9f4661 442 if (kcb_next->sac.sc_unit > unit) {
fe8ab488
A
443 /* Found a gap, lets fill it in */
444 break;
445 }
5c9f4661
A
446 unit = kcb_next->sac.sc_unit + 1;
447 if (unit == ctl_maxunit) {
fe8ab488 448 break;
5c9f4661 449 }
fe8ab488
A
450 }
451
2d21ac55
A
452 if (unit == ctl_maxunit) {
453 lck_mtx_unlock(ctl_mtx);
0a7de745 454 return EBUSY;
2d21ac55 455 }
fe8ab488 456
2d21ac55 457 sa.sc_unit = unit;
fe8ab488 458 }
55e303ae 459
5c9f4661 460 bcopy(&sa, &kcb->sac, sizeof(struct sockaddr_ctl));
fe8ab488
A
461 kcb->kctl = kctl;
462 if (kcb_next != NULL) {
463 TAILQ_INSERT_BEFORE(kcb_next, kcb, next);
464 } else {
2d21ac55
A
465 TAILQ_INSERT_TAIL(&kctl->kcb_head, kcb, next);
466 }
fe8ab488
A
467 kctlstat.kcs_pcbcount++;
468 kctlstat.kcs_gencnt++;
469 kctlstat.kcs_connections++;
470 lck_mtx_unlock(ctl_mtx);
9bccf70c 471
04b8595b
A
472 /*
473 * rdar://15526688: Limit the send and receive sizes to sb_max
474 * by using the same scaling as sbreserve()
475 */
476 sbmaxsize = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES);
477
5c9f4661 478 if (kctl->sendbufsize > sbmaxsize) {
04b8595b 479 sendbufsize = sbmaxsize;
5c9f4661 480 } else {
04b8595b 481 sendbufsize = kctl->sendbufsize;
5c9f4661 482 }
04b8595b 483
5c9f4661 484 if (kctl->recvbufsize > sbmaxsize) {
04b8595b 485 recvbufsize = sbmaxsize;
5c9f4661 486 } else {
04b8595b 487 recvbufsize = kctl->recvbufsize;
5c9f4661 488 }
04b8595b
A
489
490 error = soreserve(so, sendbufsize, recvbufsize);
fe8ab488 491 if (error) {
0a7de745 492 if (ctl_debug) {
39037602 493 printf("%s - soreserve(%llx, %u, %u) error %d\n",
0a7de745
A
494 __func__, (uint64_t)VM_KERNEL_ADDRPERM(so),
495 sendbufsize, recvbufsize, error);
496 }
91447636 497 goto done;
fe8ab488 498 }
5c9f4661
A
499
500done:
501 if (error) {
502 soisdisconnected(so);
503 lck_mtx_lock(ctl_mtx);
504 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
505 kcb->kctl = NULL;
506 kcb->sac.sc_unit = 0;
507 kctlstat.kcs_pcbcount--;
508 kctlstat.kcs_gencnt++;
509 kctlstat.kcs_conn_fail++;
510 lck_mtx_unlock(ctl_mtx);
511 }
0a7de745 512 return error;
5c9f4661
A
513}
514
515static int
516ctl_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
517{
518 int error = 0;
519 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
520
521 if (kcb == NULL) {
522 panic("ctl_bind so_pcb null\n");
523 }
524
525 error = ctl_setup_kctl(so, nam, p);
526 if (error) {
0a7de745 527 return error;
5c9f4661
A
528 }
529
530 if (kcb->kctl == NULL) {
531 panic("ctl_bind kctl null\n");
532 }
533
534 if (kcb->kctl->bind == NULL) {
0a7de745 535 return EINVAL;
5c9f4661 536 }
fe8ab488 537
91447636 538 socket_unlock(so, 0);
5c9f4661 539 error = (*kcb->kctl->bind)(kcb->kctl->kctlref, &kcb->sac, &kcb->userdata);
91447636 540 socket_lock(so, 0);
fe8ab488 541
0a7de745 542 return error;
5c9f4661
A
543}
544
545static int
546ctl_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
547{
548 int error = 0;
549 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
550
551 if (kcb == NULL) {
552 panic("ctl_connect so_pcb null\n");
553 }
554
555 error = ctl_setup_kctl(so, nam, p);
556 if (error) {
0a7de745 557 return error;
5c9f4661
A
558 }
559
560 if (kcb->kctl == NULL) {
561 panic("ctl_connect kctl null\n");
562 }
563
564 soisconnecting(so);
565 socket_unlock(so, 0);
566 error = (*kcb->kctl->connect)(kcb->kctl->kctlref, &kcb->sac, &kcb->userdata);
567 socket_lock(so, 0);
568 if (error) {
569 goto end;
570 }
fe8ab488 571 soisconnected(so);
91447636 572
6d2010ae 573end:
5c9f4661 574 if (error && kcb->kctl->disconnect) {
39037602
A
575 /*
576 * XXX Make sure we Don't check the return value
577 * of disconnect here.
578 * ipsec/utun_ctl_disconnect will return error when
579 * disconnect gets called after connect failure.
580 * However if we decide to check for disconnect return
581 * value here. Please make sure to revisit
582 * ipsec/utun_ctl_disconnect.
583 */
6d2010ae 584 socket_unlock(so, 0);
5c9f4661 585 (*kcb->kctl->disconnect)(kcb->kctl->kctlref, kcb->sac.sc_unit, kcb->userdata);
6d2010ae
A
586 socket_lock(so, 0);
587 }
fe8ab488
A
588 if (error) {
589 soisdisconnected(so);
590 lck_mtx_lock(ctl_mtx);
5c9f4661
A
591 TAILQ_REMOVE(&kcb->kctl->kcb_head, kcb, next);
592 kcb->kctl = NULL;
593 kcb->sac.sc_unit = 0;
fe8ab488
A
594 kctlstat.kcs_pcbcount--;
595 kctlstat.kcs_gencnt++;
596 kctlstat.kcs_conn_fail++;
597 lck_mtx_unlock(ctl_mtx);
598 }
0a7de745 599 return error;
9bccf70c
A
600}
601
91447636 602static int
9bccf70c
A
603ctl_disconnect(struct socket *so)
604{
0a7de745 605 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
fe8ab488
A
606
607 if ((kcb = (struct ctl_cb *)so->so_pcb)) {
0a7de745 608 struct kctl *kctl = kcb->kctl;
fe8ab488
A
609
610 if (kctl && kctl->disconnect) {
611 socket_unlock(so, 0);
5c9f4661 612 (*kctl->disconnect)(kctl->kctlref, kcb->sac.sc_unit,
3e170ce0 613 kcb->userdata);
fe8ab488
A
614 socket_lock(so, 0);
615 }
616
617 soisdisconnected(so);
618
6d2010ae 619 socket_unlock(so, 0);
fe8ab488
A
620 lck_mtx_lock(ctl_mtx);
621 kcb->kctl = 0;
5c9f4661 622 kcb->sac.sc_unit = 0;
fe8ab488
A
623 while (kcb->usecount != 0) {
624 msleep(&kcb->usecount, ctl_mtx, 0, "kcb->usecount", 0);
625 }
626 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
627 kctlstat.kcs_pcbcount--;
628 kctlstat.kcs_gencnt++;
629 lck_mtx_unlock(ctl_mtx);
6d2010ae 630 socket_lock(so, 0);
fe8ab488 631 }
0a7de745 632 return 0;
9bccf70c
A
633}
634
91447636
A
635static int
636ctl_peeraddr(struct socket *so, struct sockaddr **nam)
9bccf70c 637{
0a7de745
A
638 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
639 struct kctl *kctl;
640 struct sockaddr_ctl sc;
fe8ab488 641
0a7de745
A
642 if (kcb == NULL) { /* sanity check */
643 return ENOTCONN;
644 }
fe8ab488 645
0a7de745
A
646 if ((kctl = kcb->kctl) == NULL) {
647 return EINVAL;
648 }
fe8ab488 649
91447636
A
650 bzero(&sc, sizeof(struct sockaddr_ctl));
651 sc.sc_len = sizeof(struct sockaddr_ctl);
652 sc.sc_family = AF_SYSTEM;
653 sc.ss_sysaddr = AF_SYS_CONTROL;
654 sc.sc_id = kctl->id;
5c9f4661 655 sc.sc_unit = kcb->sac.sc_unit;
fe8ab488 656
91447636 657 *nam = dup_sockaddr((struct sockaddr *)&sc, 1);
fe8ab488 658
0a7de745 659 return 0;
fe8ab488
A
660}
661
662static void
663ctl_sbrcv_trim(struct socket *so)
664{
665 struct sockbuf *sb = &so->so_rcv;
666
667 if (sb->sb_hiwat > sb->sb_idealsize) {
668 u_int32_t diff;
669 int32_t trim;
670
671 /*
672 * The difference between the ideal size and the
673 * current size is the upper bound of the trimage
674 */
675 diff = sb->sb_hiwat - sb->sb_idealsize;
676 /*
677 * We cannot trim below the outstanding data
678 */
679 trim = sb->sb_hiwat - sb->sb_cc;
680
681 trim = imin(trim, (int32_t)diff);
682
683 if (trim > 0) {
684 sbreserve(sb, (sb->sb_hiwat - trim));
685
0a7de745 686 if (ctl_debug) {
fe8ab488
A
687 printf("%s - shrunk to %d\n",
688 __func__, sb->sb_hiwat);
0a7de745 689 }
fe8ab488
A
690 }
691 }
9bccf70c
A
692}
693
39236c6e
A
694static int
695ctl_usr_rcvd(struct socket *so, int flags)
696{
0a7de745
A
697 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
698 struct kctl *kctl;
39236c6e
A
699
700 if ((kctl = kcb->kctl) == NULL) {
0a7de745 701 return EINVAL;
39236c6e
A
702 }
703
704 if (kctl->rcvd) {
705 socket_unlock(so, 0);
5c9f4661 706 (*kctl->rcvd)(kctl->kctlref, kcb->sac.sc_unit, kcb->userdata, flags);
39236c6e
A
707 socket_lock(so, 0);
708 }
709
fe8ab488
A
710 ctl_sbrcv_trim(so);
711
0a7de745 712 return 0;
39236c6e
A
713}
714
91447636
A
715static int
716ctl_send(struct socket *so, int flags, struct mbuf *m,
0a7de745
A
717 struct sockaddr *addr, struct mbuf *control,
718 struct proc *p)
9bccf70c 719{
fe8ab488 720#pragma unused(addr, p)
0a7de745
A
721 int error = 0;
722 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
723 struct kctl *kctl;
fe8ab488 724
0a7de745 725 if (control) {
fe8ab488 726 m_freem(control);
0a7de745 727 }
fe8ab488 728
0a7de745 729 if (kcb == NULL) { /* sanity check */
6d2010ae 730 error = ENOTCONN;
0a7de745 731 }
fe8ab488 732
0a7de745 733 if (error == 0 && (kctl = kcb->kctl) == NULL) {
6d2010ae 734 error = EINVAL;
0a7de745 735 }
fe8ab488 736
6d2010ae 737 if (error == 0 && kctl->send) {
fe8ab488 738 so_tc_update_stats(m, so, m_get_service_class(m));
91447636 739 socket_unlock(so, 0);
5c9f4661 740 error = (*kctl->send)(kctl->kctlref, kcb->sac.sc_unit, kcb->userdata,
3e170ce0 741 m, flags);
91447636 742 socket_lock(so, 0);
6d2010ae
A
743 } else {
744 m_freem(m);
0a7de745 745 if (error == 0) {
6d2010ae 746 error = ENOTSUP;
0a7de745 747 }
91447636 748 }
0a7de745 749 if (error != 0) {
fe8ab488 750 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_fail);
0a7de745
A
751 }
752 return error;
fe8ab488
A
753}
754
755static int
756ctl_send_list(struct socket *so, int flags, struct mbuf *m,
0a7de745
A
757 __unused struct sockaddr *addr, struct mbuf *control,
758 __unused struct proc *p)
fe8ab488 759{
0a7de745
A
760 int error = 0;
761 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
762 struct kctl *kctl;
fe8ab488 763
0a7de745 764 if (control) {
fe8ab488 765 m_freem_list(control);
0a7de745 766 }
fe8ab488 767
0a7de745 768 if (kcb == NULL) { /* sanity check */
fe8ab488 769 error = ENOTCONN;
0a7de745 770 }
fe8ab488 771
0a7de745 772 if (error == 0 && (kctl = kcb->kctl) == NULL) {
fe8ab488 773 error = EINVAL;
0a7de745 774 }
fe8ab488
A
775
776 if (error == 0 && kctl->send_list) {
777 struct mbuf *nxt;
778
0a7de745 779 for (nxt = m; nxt != NULL; nxt = nxt->m_nextpkt) {
fe8ab488 780 so_tc_update_stats(nxt, so, m_get_service_class(nxt));
0a7de745 781 }
fe8ab488
A
782
783 socket_unlock(so, 0);
5c9f4661 784 error = (*kctl->send_list)(kctl->kctlref, kcb->sac.sc_unit,
3e170ce0 785 kcb->userdata, m, flags);
fe8ab488
A
786 socket_lock(so, 0);
787 } else if (error == 0 && kctl->send) {
788 while (m != NULL && error == 0) {
789 struct mbuf *nextpkt = m->m_nextpkt;
790
791 m->m_nextpkt = NULL;
792 so_tc_update_stats(m, so, m_get_service_class(m));
793 socket_unlock(so, 0);
5c9f4661 794 error = (*kctl->send)(kctl->kctlref, kcb->sac.sc_unit,
3e170ce0 795 kcb->userdata, m, flags);
fe8ab488
A
796 socket_lock(so, 0);
797 m = nextpkt;
798 }
0a7de745 799 if (m != NULL) {
fe8ab488 800 m_freem_list(m);
0a7de745 801 }
fe8ab488
A
802 } else {
803 m_freem_list(m);
0a7de745 804 if (error == 0) {
fe8ab488 805 error = ENOTSUP;
0a7de745 806 }
fe8ab488 807 }
0a7de745 808 if (error != 0) {
fe8ab488 809 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_list_fail);
0a7de745
A
810 }
811 return error;
fe8ab488
A
812}
813
814static errno_t
3e170ce0 815ctl_rcvbspace(struct socket *so, u_int32_t datasize,
0a7de745 816 u_int32_t kctlflags, u_int32_t flags)
fe8ab488
A
817{
818 struct sockbuf *sb = &so->so_rcv;
819 u_int32_t space = sbspace(sb);
820 errno_t error;
04b8595b 821
3e170ce0 822 if ((kctlflags & CTL_FLAG_REG_CRIT) == 0) {
0a7de745 823 if ((u_int32_t) space >= datasize) {
fe8ab488 824 error = 0;
0a7de745 825 } else {
fe8ab488 826 error = ENOBUFS;
0a7de745 827 }
fe8ab488 828 } else if ((flags & CTL_DATA_CRIT) == 0) {
3e170ce0
A
829 /*
830 * Reserve 25% for critical messages
831 */
832 if (space < (sb->sb_hiwat >> 2) ||
0a7de745 833 space < datasize) {
3e170ce0 834 error = ENOBUFS;
0a7de745 835 } else {
3e170ce0 836 error = 0;
0a7de745 837 }
fe8ab488
A
838 } else {
839 u_int32_t autorcvbuf_max;
840
841 /*
842 * Allow overcommit of 25%
843 */
844 autorcvbuf_max = min(sb->sb_idealsize + (sb->sb_idealsize >> 2),
0a7de745 845 ctl_autorcvbuf_max);
fe8ab488
A
846
847 if ((u_int32_t) space >= datasize) {
848 error = 0;
849 } else if (tcp_cansbgrow(sb) &&
850 sb->sb_hiwat < autorcvbuf_max) {
851 /*
852 * Grow with a little bit of leeway
853 */
854 u_int32_t grow = datasize - space + MSIZE;
855
856 if (sbreserve(sb,
857 min((sb->sb_hiwat + grow), autorcvbuf_max)) == 1) {
0a7de745 858 if (sb->sb_hiwat > ctl_autorcvbuf_high) {
fe8ab488 859 ctl_autorcvbuf_high = sb->sb_hiwat;
0a7de745 860 }
fe8ab488 861
3e170ce0
A
862 /*
863 * A final check
864 */
865 if ((u_int32_t) sbspace(sb) >= datasize) {
866 error = 0;
867 } else {
868 error = ENOBUFS;
869 }
870
0a7de745 871 if (ctl_debug) {
3e170ce0
A
872 printf("%s - grown to %d error %d\n",
873 __func__, sb->sb_hiwat, error);
0a7de745 874 }
fe8ab488
A
875 } else {
876 error = ENOBUFS;
877 }
878 } else {
879 error = ENOBUFS;
880 }
881 }
0a7de745 882 return error;
9bccf70c
A
883}
884
91447636 885errno_t
3e170ce0
A
886ctl_enqueuembuf(kern_ctl_ref kctlref, u_int32_t unit, struct mbuf *m,
887 u_int32_t flags)
9bccf70c 888{
0a7de745
A
889 struct socket *so;
890 errno_t error = 0;
891 int len = m->m_pkthdr.len;
892 u_int32_t kctlflags;
fe8ab488 893
3e170ce0
A
894 so = kcb_find_socket(kctlref, unit, &kctlflags);
895 if (so == NULL) {
0a7de745 896 return EINVAL;
3e170ce0 897 }
fe8ab488 898
3e170ce0 899 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
91447636 900 error = ENOBUFS;
fe8ab488 901 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
91447636
A
902 goto bye;
903 }
0a7de745 904 if ((flags & CTL_DATA_EOR)) {
91447636 905 m->m_flags |= M_EOR;
0a7de745 906 }
fe8ab488
A
907
908 so_recv_data_stat(so, m, 0);
909 if (sbappend(&so->so_rcv, m) != 0) {
0a7de745 910 if ((flags & CTL_DATA_NOWAKEUP) == 0) {
fe8ab488 911 sorwakeup(so);
0a7de745 912 }
fe8ab488
A
913 } else {
914 error = ENOBUFS;
915 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
916 }
91447636 917bye:
0a7de745 918 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
fe8ab488 919 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
0a7de745
A
920 __func__, error, len,
921 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
922 }
fe8ab488 923
91447636 924 socket_unlock(so, 1);
0a7de745 925 if (error != 0) {
fe8ab488 926 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
0a7de745 927 }
fe8ab488 928
0a7de745 929 return error;
fe8ab488
A
930}
931
932/*
933 * Compute space occupied by mbuf like sbappendrecord
934 */
935static int
936m_space(struct mbuf *m)
937{
938 int space = 0;
939 struct mbuf *nxt;
940
0a7de745 941 for (nxt = m; nxt != NULL; nxt = nxt->m_next) {
fe8ab488 942 space += nxt->m_len;
0a7de745 943 }
fe8ab488 944
0a7de745 945 return space;
fe8ab488
A
946}
947
948errno_t
949ctl_enqueuembuf_list(void *kctlref, u_int32_t unit, struct mbuf *m_list,
0a7de745 950 u_int32_t flags, struct mbuf **m_remain)
fe8ab488
A
951{
952 struct socket *so = NULL;
953 errno_t error = 0;
fe8ab488
A
954 struct mbuf *m, *nextpkt;
955 int needwakeup = 0;
5ba3f43e 956 int len = 0;
3e170ce0 957 u_int32_t kctlflags;
fe8ab488
A
958
959 /*
960 * Need to point the beginning of the list in case of early exit
961 */
962 m = m_list;
963
3e170ce0
A
964 /*
965 * kcb_find_socket takes the socket lock with a reference
966 */
967 so = kcb_find_socket(kctlref, unit, &kctlflags);
968 if (so == NULL) {
fe8ab488
A
969 error = EINVAL;
970 goto done;
971 }
3e170ce0
A
972
973 if (kctlflags & CTL_FLAG_REG_SOCK_STREAM) {
fe8ab488
A
974 error = EOPNOTSUPP;
975 goto done;
976 }
977 if (flags & CTL_DATA_EOR) {
978 error = EINVAL;
979 goto done;
980 }
fe8ab488
A
981
982 for (m = m_list; m != NULL; m = nextpkt) {
983 nextpkt = m->m_nextpkt;
984
0a7de745 985 if (m->m_pkthdr.len == 0 && ctl_debug) {
fe8ab488 986 printf("%s: %llx m_pkthdr.len is 0",
0a7de745
A
987 __func__, (uint64_t)VM_KERNEL_ADDRPERM(m));
988 }
fe8ab488
A
989
990 /*
991 * The mbuf is either appended or freed by sbappendrecord()
992 * so it's not reliable from a data standpoint
993 */
994 len = m_space(m);
3e170ce0 995 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
fe8ab488
A
996 error = ENOBUFS;
997 OSIncrementAtomic64(
0a7de745 998 (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
fe8ab488
A
999 break;
1000 } else {
1001 /*
1002 * Unlink from the list, m is on its own
1003 */
1004 m->m_nextpkt = NULL;
1005 so_recv_data_stat(so, m, 0);
1006 if (sbappendrecord(&so->so_rcv, m) != 0) {
1007 needwakeup = 1;
1008 } else {
1009 /*
1010 * We free or return the remaining
1011 * mbufs in the list
1012 */
1013 m = nextpkt;
1014 error = ENOBUFS;
1015 OSIncrementAtomic64(
0a7de745 1016 (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
fe8ab488
A
1017 break;
1018 }
1019 }
1020 }
0a7de745 1021 if (needwakeup && (flags & CTL_DATA_NOWAKEUP) == 0) {
fe8ab488 1022 sorwakeup(so);
0a7de745 1023 }
fe8ab488
A
1024
1025done:
1026 if (so != NULL) {
0a7de745 1027 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
fe8ab488 1028 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
0a7de745
A
1029 __func__, error, len,
1030 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1031 }
fe8ab488
A
1032
1033 socket_unlock(so, 1);
1034 }
1035 if (m_remain) {
1036 *m_remain = m;
1037
1038 if (m != NULL && socket_debug && so != NULL &&
1039 (so->so_options & SO_DEBUG)) {
1040 struct mbuf *n;
1041
1042 printf("%s m_list %llx\n", __func__,
1043 (uint64_t) VM_KERNEL_ADDRPERM(m_list));
0a7de745 1044 for (n = m; n != NULL; n = n->m_nextpkt) {
fe8ab488
A
1045 printf(" remain %llx m_next %llx\n",
1046 (uint64_t) VM_KERNEL_ADDRPERM(n),
1047 (uint64_t) VM_KERNEL_ADDRPERM(n->m_next));
0a7de745 1048 }
fe8ab488
A
1049 }
1050 } else {
0a7de745 1051 if (m != NULL) {
fe8ab488 1052 m_freem_list(m);
0a7de745 1053 }
fe8ab488 1054 }
0a7de745 1055 if (error != 0) {
fe8ab488 1056 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
0a7de745
A
1057 }
1058 return error;
91447636 1059}
9bccf70c 1060
91447636 1061errno_t
fe8ab488
A
1062ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len,
1063 u_int32_t flags)
91447636 1064{
0a7de745
A
1065 struct socket *so;
1066 struct mbuf *m;
1067 errno_t error = 0;
1068 unsigned int num_needed;
1069 struct mbuf *n;
1070 size_t curlen = 0;
1071 u_int32_t kctlflags;
fe8ab488 1072
3e170ce0
A
1073 so = kcb_find_socket(kctlref, unit, &kctlflags);
1074 if (so == NULL) {
0a7de745 1075 return EINVAL;
3e170ce0 1076 }
fe8ab488 1077
3e170ce0 1078 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
91447636 1079 error = ENOBUFS;
fe8ab488 1080 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
91447636
A
1081 goto bye;
1082 }
1083
1084 num_needed = 1;
1085 m = m_allocpacket_internal(&num_needed, len, NULL, M_NOWAIT, 1, 0);
1086 if (m == NULL) {
39037602 1087 kctlstat.kcs_enqdata_mb_alloc_fail++;
0a7de745 1088 if (ctl_debug) {
39037602
A
1089 printf("%s: m_allocpacket_internal(%lu) failed\n",
1090 __func__, len);
0a7de745 1091 }
fe8ab488 1092 error = ENOMEM;
91447636
A
1093 goto bye;
1094 }
fe8ab488 1095
91447636
A
1096 for (n = m; n != NULL; n = n->m_next) {
1097 size_t mlen = mbuf_maxlen(n);
fe8ab488 1098
0a7de745 1099 if (mlen + curlen > len) {
91447636 1100 mlen = len - curlen;
0a7de745 1101 }
91447636
A
1102 n->m_len = mlen;
1103 bcopy((char *)data + curlen, n->m_data, mlen);
1104 curlen += mlen;
1105 }
1106 mbuf_pkthdr_setlen(m, curlen);
1107
0a7de745 1108 if ((flags & CTL_DATA_EOR)) {
91447636 1109 m->m_flags |= M_EOR;
0a7de745 1110 }
fe8ab488
A
1111 so_recv_data_stat(so, m, 0);
1112 if (sbappend(&so->so_rcv, m) != 0) {
0a7de745 1113 if ((flags & CTL_DATA_NOWAKEUP) == 0) {
fe8ab488 1114 sorwakeup(so);
0a7de745 1115 }
fe8ab488 1116 } else {
39037602 1117 kctlstat.kcs_enqdata_sbappend_fail++;
fe8ab488
A
1118 error = ENOBUFS;
1119 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1120 }
1121
91447636 1122bye:
0a7de745 1123 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
fe8ab488 1124 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
0a7de745
A
1125 __func__, error, (int)len,
1126 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1127 }
fe8ab488 1128
91447636 1129 socket_unlock(so, 1);
0a7de745 1130 if (error != 0) {
fe8ab488 1131 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
0a7de745
A
1132 }
1133 return error;
91447636 1134}
9bccf70c 1135
3e170ce0
A
1136errno_t
1137ctl_getenqueuepacketcount(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *pcnt)
1138{
0a7de745 1139 struct socket *so;
3e170ce0
A
1140 u_int32_t cnt;
1141 struct mbuf *m1;
1142
0a7de745
A
1143 if (pcnt == NULL) {
1144 return EINVAL;
1145 }
3e170ce0
A
1146
1147 so = kcb_find_socket(kctlref, unit, NULL);
1148 if (so == NULL) {
0a7de745 1149 return EINVAL;
3e170ce0
A
1150 }
1151
1152 cnt = 0;
1153 m1 = so->so_rcv.sb_mb;
1154 while (m1 != NULL) {
1155 if (m1->m_type == MT_DATA ||
1156 m1->m_type == MT_HEADER ||
0a7de745 1157 m1->m_type == MT_OOBDATA) {
3e170ce0 1158 cnt += 1;
0a7de745 1159 }
3e170ce0
A
1160 m1 = m1->m_nextpkt;
1161 }
1162 *pcnt = cnt;
1163
1164 socket_unlock(so, 1);
1165
0a7de745 1166 return 0;
3e170ce0 1167}
55e303ae 1168
fe8ab488 1169errno_t
91447636
A
1170ctl_getenqueuespace(kern_ctl_ref kctlref, u_int32_t unit, size_t *space)
1171{
0a7de745 1172 struct socket *so;
2d21ac55 1173 long avail;
fe8ab488 1174
0a7de745
A
1175 if (space == NULL) {
1176 return EINVAL;
1177 }
fe8ab488 1178
3e170ce0
A
1179 so = kcb_find_socket(kctlref, unit, NULL);
1180 if (so == NULL) {
0a7de745 1181 return EINVAL;
3e170ce0 1182 }
fe8ab488 1183
2d21ac55
A
1184 avail = sbspace(&so->so_rcv);
1185 *space = (avail < 0) ? 0 : avail;
91447636 1186 socket_unlock(so, 1);
fe8ab488 1187
0a7de745 1188 return 0;
fe8ab488
A
1189}
1190
1191errno_t
1192ctl_getenqueuereadable(kern_ctl_ref kctlref, u_int32_t unit,
1193 u_int32_t *difference)
1194{
0a7de745 1195 struct socket *so;
fe8ab488 1196
0a7de745
A
1197 if (difference == NULL) {
1198 return EINVAL;
1199 }
fe8ab488 1200
3e170ce0
A
1201 so = kcb_find_socket(kctlref, unit, NULL);
1202 if (so == NULL) {
0a7de745 1203 return EINVAL;
3e170ce0 1204 }
fe8ab488
A
1205
1206 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat) {
1207 *difference = 0;
1208 } else {
1209 *difference = (so->so_rcv.sb_lowat - so->so_rcv.sb_cc);
1210 }
1211 socket_unlock(so, 1);
1212
0a7de745 1213 return 0;
9bccf70c
A
1214}
1215
91447636 1216static int
9bccf70c
A
1217ctl_ctloutput(struct socket *so, struct sockopt *sopt)
1218{
0a7de745
A
1219 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
1220 struct kctl *kctl;
1221 int error = 0;
1222 void *data = NULL;
1223 size_t len;
fe8ab488 1224
91447636 1225 if (sopt->sopt_level != SYSPROTO_CONTROL) {
0a7de745 1226 return EINVAL;
91447636 1227 }
fe8ab488 1228
0a7de745
A
1229 if (kcb == NULL) { /* sanity check */
1230 return ENOTCONN;
1231 }
fe8ab488 1232
0a7de745
A
1233 if ((kctl = kcb->kctl) == NULL) {
1234 return EINVAL;
1235 }
fe8ab488 1236
91447636 1237 switch (sopt->sopt_dir) {
0a7de745
A
1238 case SOPT_SET:
1239 if (kctl->setopt == NULL) {
1240 return ENOTSUP;
1241 }
1242 if (sopt->sopt_valsize != 0) {
1243 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP,
1244 M_WAITOK | M_ZERO);
1245 if (data == NULL) {
1246 return ENOMEM;
91447636 1247 }
0a7de745
A
1248 error = sooptcopyin(sopt, data,
1249 sopt->sopt_valsize, sopt->sopt_valsize);
1250 }
1251 if (error == 0) {
1252 socket_unlock(so, 0);
1253 error = (*kctl->setopt)(kctl->kctlref,
1254 kcb->sac.sc_unit, kcb->userdata, sopt->sopt_name,
1255 data, sopt->sopt_valsize);
1256 socket_lock(so, 0);
1257 }
5c9f4661 1258
0a7de745
A
1259 if (data != NULL) {
1260 FREE(data, M_TEMP);
1261 }
1262 break;
fe8ab488 1263
0a7de745
A
1264 case SOPT_GET:
1265 if (kctl->getopt == NULL) {
1266 return ENOTSUP;
1267 }
5c9f4661 1268
0a7de745
A
1269 if (sopt->sopt_valsize && sopt->sopt_val) {
1270 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP,
1271 M_WAITOK | M_ZERO);
1272 if (data == NULL) {
1273 return ENOMEM;
91447636 1274 }
0a7de745
A
1275 /*
1276 * 4108337 - copy user data in case the
1277 * kernel control needs it
1278 */
1279 error = sooptcopyin(sopt, data,
1280 sopt->sopt_valsize, sopt->sopt_valsize);
1281 }
5c9f4661 1282
0a7de745
A
1283 if (error == 0) {
1284 len = sopt->sopt_valsize;
1285 socket_unlock(so, 0);
1286 error = (*kctl->getopt)(kctl->kctlref, kcb->sac.sc_unit,
1287 kcb->userdata, sopt->sopt_name,
1288 data, &len);
1289 if (data != NULL && len > sopt->sopt_valsize) {
1290 panic_plain("ctl_ctloutput: ctl %s returned "
1291 "len (%lu) > sopt_valsize (%lu)\n",
1292 kcb->kctl->name, len,
1293 sopt->sopt_valsize);
1294 }
1295 socket_lock(so, 0);
91447636 1296 if (error == 0) {
0a7de745
A
1297 if (data != NULL) {
1298 error = sooptcopyout(sopt, data, len);
1299 } else {
1300 sopt->sopt_valsize = len;
5c9f4661 1301 }
91447636 1302 }
0a7de745
A
1303 }
1304 if (data != NULL) {
1305 FREE(data, M_TEMP);
1306 }
1307 break;
91447636 1308 }
0a7de745 1309 return error;
91447636 1310}
9bccf70c 1311
fe8ab488
A
1312static int
1313ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
0a7de745 1314 struct ifnet *ifp, struct proc *p)
91447636 1315{
fe8ab488 1316#pragma unused(so, ifp, p)
0a7de745 1317 int error = ENOTSUP;
fe8ab488 1318
91447636 1319 switch (cmd) {
0a7de745
A
1320 /* get the number of controllers */
1321 case CTLIOCGCOUNT: {
1322 struct kctl *kctl;
1323 u_int32_t n = 0;
91447636 1324
0a7de745
A
1325 lck_mtx_lock(ctl_mtx);
1326 TAILQ_FOREACH(kctl, &ctl_head, next)
1327 n++;
1328 lck_mtx_unlock(ctl_mtx);
fe8ab488 1329
0a7de745
A
1330 bcopy(&n, data, sizeof(n));
1331 error = 0;
1332 break;
1333 }
1334 case CTLIOCGINFO: {
1335 struct ctl_info ctl_info;
1336 struct kctl *kctl = 0;
1337 size_t name_len;
316670eb 1338
0a7de745
A
1339 bcopy(data, &ctl_info, sizeof(ctl_info));
1340 name_len = strnlen(ctl_info.ctl_name, MAX_KCTL_NAME);
316670eb 1341
0a7de745
A
1342 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
1343 error = EINVAL;
91447636
A
1344 break;
1345 }
0a7de745
A
1346 lck_mtx_lock(ctl_mtx);
1347 kctl = ctl_find_by_name(ctl_info.ctl_name);
1348 lck_mtx_unlock(ctl_mtx);
1349 if (kctl == 0) {
1350 error = ENOENT;
1351 break;
1352 }
1353 ctl_info.ctl_id = kctl->id;
1354 bcopy(&ctl_info, data, sizeof(ctl_info));
1355 error = 0;
1356 break;
1357 }
fe8ab488 1358
91447636 1359 /* add controls to get list of NKEs */
91447636 1360 }
fe8ab488 1361
0a7de745 1362 return error;
91447636 1363}
9bccf70c 1364
3e170ce0
A
1365static void
1366kctl_tbl_grow()
1367{
1368 struct kctl **new_table;
1369 uintptr_t new_size;
1370
1371 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1372
39037602 1373 if (kctl_tbl_growing) {
3e170ce0 1374 /* Another thread is allocating */
39037602
A
1375 kctl_tbl_growing_waiting++;
1376
1377 do {
1378 (void) msleep((caddr_t) &kctl_tbl_growing, ctl_mtx,
0a7de745 1379 PSOCK | PCATCH, "kctl_tbl_growing", 0);
39037602
A
1380 } while (kctl_tbl_growing);
1381 kctl_tbl_growing_waiting--;
3e170ce0
A
1382 }
1383 /* Another thread grew the table */
0a7de745 1384 if (kctl_table != NULL && kctl_tbl_count < kctl_tbl_size) {
3e170ce0 1385 return;
0a7de745 1386 }
3e170ce0
A
1387
1388 /* Verify we have a sane size */
1389 if (kctl_tbl_size + KCTL_TBL_INC >= UINT16_MAX) {
39037602 1390 kctlstat.kcs_tbl_size_too_big++;
0a7de745 1391 if (ctl_debug) {
39037602
A
1392 printf("%s kctl_tbl_size %lu too big\n",
1393 __func__, kctl_tbl_size);
0a7de745 1394 }
3e170ce0
A
1395 return;
1396 }
1397 kctl_tbl_growing = 1;
1398
1399 new_size = kctl_tbl_size + KCTL_TBL_INC;
1400
1401 lck_mtx_unlock(ctl_mtx);
1402 new_table = _MALLOC(sizeof(struct kctl *) * new_size,
1403 M_TEMP, M_WAIT | M_ZERO);
1404 lck_mtx_lock(ctl_mtx);
1405
1406 if (new_table != NULL) {
1407 if (kctl_table != NULL) {
1408 bcopy(kctl_table, new_table,
1409 kctl_tbl_size * sizeof(struct kctl *));
1410
1411 _FREE(kctl_table, M_TEMP);
1412 }
1413 kctl_table = new_table;
1414 kctl_tbl_size = new_size;
1415 }
1416
1417 kctl_tbl_growing = 0;
39037602
A
1418
1419 if (kctl_tbl_growing_waiting) {
1420 wakeup(&kctl_tbl_growing);
1421 }
3e170ce0
A
1422}
1423
1424#define KCTLREF_INDEX_MASK 0x0000FFFF
1425#define KCTLREF_GENCNT_MASK 0xFFFF0000
1426#define KCTLREF_GENCNT_SHIFT 16
1427
1428static kern_ctl_ref
1429kctl_make_ref(struct kctl *kctl)
1430{
1431 uintptr_t i;
1432
1433 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1434
0a7de745 1435 if (kctl_tbl_count >= kctl_tbl_size) {
3e170ce0 1436 kctl_tbl_grow();
0a7de745 1437 }
3e170ce0
A
1438
1439 kctl->kctlref = NULL;
1440 for (i = 0; i < kctl_tbl_size; i++) {
1441 if (kctl_table[i] == NULL) {
1442 uintptr_t ref;
1443
1444 /*
1445 * Reference is index plus one
1446 */
1447 kctl_ref_gencnt += 1;
1448
1449 /*
1450 * Add generation count as salt to reference to prevent
1451 * use after deregister
1452 */
0a7de745 1453 ref = ((kctl_ref_gencnt << KCTLREF_GENCNT_SHIFT) &
3e170ce0
A
1454 KCTLREF_GENCNT_MASK) +
1455 ((i + 1) & KCTLREF_INDEX_MASK);
1456
1457 kctl->kctlref = (void *)(ref);
1458 kctl_table[i] = kctl;
1459 kctl_tbl_count++;
1460 break;
1461 }
1462 }
1463
0a7de745 1464 if (kctl->kctlref == NULL) {
3e170ce0 1465 panic("%s no space in table", __func__);
0a7de745 1466 }
3e170ce0 1467
0a7de745 1468 if (ctl_debug > 0) {
3e170ce0 1469 printf("%s %p for %p\n",
0a7de745
A
1470 __func__, kctl->kctlref, kctl);
1471 }
3e170ce0 1472
0a7de745 1473 return kctl->kctlref;
3e170ce0
A
1474}
1475
1476static void
1477kctl_delete_ref(kern_ctl_ref kctlref)
1478{
1479 /*
1480 * Reference is index plus one
1481 */
1482 uintptr_t i = (((uintptr_t)kctlref) & KCTLREF_INDEX_MASK) - 1;
1483
1484 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1485
1486 if (i < kctl_tbl_size) {
1487 struct kctl *kctl = kctl_table[i];
1488
1489 if (kctl->kctlref == kctlref) {
1490 kctl_table[i] = NULL;
1491 kctl_tbl_count--;
1492 } else {
1493 kctlstat.kcs_bad_kctlref++;
1494 }
1495 } else {
1496 kctlstat.kcs_bad_kctlref++;
1497 }
1498}
1499
1500static struct kctl *
1501kctl_from_ref(kern_ctl_ref kctlref)
1502{
1503 /*
1504 * Reference is index plus one
1505 */
1506 uintptr_t i = (((uintptr_t)kctlref) & KCTLREF_INDEX_MASK) - 1;
1507 struct kctl *kctl = NULL;
1508
1509 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1510
1511 if (i >= kctl_tbl_size) {
1512 kctlstat.kcs_bad_kctlref++;
0a7de745 1513 return NULL;
3e170ce0
A
1514 }
1515 kctl = kctl_table[i];
1516 if (kctl->kctlref != kctlref) {
1517 kctlstat.kcs_bad_kctlref++;
0a7de745 1518 return NULL;
3e170ce0 1519 }
0a7de745 1520 return kctl;
3e170ce0
A
1521}
1522
91447636
A
1523/*
1524 * Register/unregister a NKE
1525 */
1526errno_t
1527ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref)
2d21ac55 1528{
0a7de745
A
1529 struct kctl *kctl = NULL;
1530 struct kctl *kctl_next = NULL;
1531 u_int32_t id = 1;
1532 size_t name_len;
1533 int is_extended = 0;
1534
1535 if (userkctl == NULL) { /* sanity check */
1536 return EINVAL;
1537 }
1538 if (userkctl->ctl_connect == NULL) {
1539 return EINVAL;
1540 }
91447636 1541 name_len = strlen(userkctl->ctl_name);
0a7de745
A
1542 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
1543 return EINVAL;
1544 }
fe8ab488 1545
91447636 1546 MALLOC(kctl, struct kctl *, sizeof(*kctl), M_TEMP, M_WAITOK);
0a7de745
A
1547 if (kctl == NULL) {
1548 return ENOMEM;
1549 }
91447636 1550 bzero((char *)kctl, sizeof(*kctl));
fe8ab488 1551
91447636 1552 lck_mtx_lock(ctl_mtx);
fe8ab488 1553
3e170ce0
A
1554 if (kctl_make_ref(kctl) == NULL) {
1555 lck_mtx_unlock(ctl_mtx);
1556 FREE(kctl, M_TEMP);
0a7de745 1557 return ENOMEM;
3e170ce0
A
1558 }
1559
2d21ac55
A
1560 /*
1561 * Kernel Control IDs
1562 *
1563 * CTL_FLAG_REG_ID_UNIT indicates the control ID and unit number are
1564 * static. If they do not exist, add them to the list in order. If the
1565 * flag is not set, we must find a new unique value. We assume the
1566 * list is in order. We find the last item in the list and add one. If
1567 * this leads to wrapping the id around, we start at the front of the
1568 * list and look for a gap.
1569 */
fe8ab488 1570
2d21ac55
A
1571 if ((userkctl->ctl_flags & CTL_FLAG_REG_ID_UNIT) == 0) {
1572 /* Must dynamically assign an unused ID */
fe8ab488 1573
2d21ac55 1574 /* Verify the same name isn't already registered */
91447636 1575 if (ctl_find_by_name(userkctl->ctl_name) != NULL) {
3e170ce0 1576 kctl_delete_ref(kctl->kctlref);
91447636
A
1577 lck_mtx_unlock(ctl_mtx);
1578 FREE(kctl, M_TEMP);
0a7de745 1579 return EEXIST;
91447636 1580 }
fe8ab488 1581
2d21ac55
A
1582 /* Start with 1 in case the list is empty */
1583 id = 1;
1584 kctl_next = TAILQ_LAST(&ctl_head, kctl_list);
fe8ab488 1585
2d21ac55 1586 if (kctl_next != NULL) {
fe8ab488 1587 /* List was not empty, add one to the last item */
2d21ac55
A
1588 id = kctl_next->id + 1;
1589 kctl_next = NULL;
fe8ab488 1590
2d21ac55 1591 /*
fe8ab488
A
1592 * If this wrapped the id number, start looking at
1593 * the front of the list for an unused id.
2d21ac55 1594 */
91447636 1595 if (id == 0) {
2d21ac55
A
1596 /* Find the next unused ID */
1597 id = 1;
fe8ab488 1598
2d21ac55
A
1599 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
1600 if (kctl_next->id > id) {
1601 /* We found a gap */
1602 break;
1603 }
fe8ab488 1604
2d21ac55
A
1605 id = kctl_next->id + 1;
1606 }
91447636 1607 }
91447636 1608 }
fe8ab488 1609
2d21ac55 1610 userkctl->ctl_id = id;
91447636
A
1611 kctl->id = id;
1612 kctl->reg_unit = -1;
1613 } else {
2d21ac55 1614 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
0a7de745 1615 if (kctl_next->id > userkctl->ctl_id) {
2d21ac55 1616 break;
0a7de745 1617 }
2d21ac55 1618 }
fe8ab488
A
1619
1620 if (ctl_find_by_id_unit(userkctl->ctl_id, userkctl->ctl_unit)) {
3e170ce0 1621 kctl_delete_ref(kctl->kctlref);
91447636
A
1622 lck_mtx_unlock(ctl_mtx);
1623 FREE(kctl, M_TEMP);
0a7de745 1624 return EEXIST;
91447636
A
1625 }
1626 kctl->id = userkctl->ctl_id;
1627 kctl->reg_unit = userkctl->ctl_unit;
1628 }
39236c6e
A
1629
1630 is_extended = (userkctl->ctl_flags & CTL_FLAG_REG_EXTENDED);
1631
2d21ac55 1632 strlcpy(kctl->name, userkctl->ctl_name, MAX_KCTL_NAME);
91447636
A
1633 kctl->flags = userkctl->ctl_flags;
1634
fe8ab488
A
1635 /*
1636 * Let the caller know the default send and receive sizes
fe8ab488 1637 */
04b8595b 1638 if (userkctl->ctl_sendsize == 0) {
fe8ab488 1639 kctl->sendbufsize = CTL_SENDSIZE;
04b8595b
A
1640 userkctl->ctl_sendsize = kctl->sendbufsize;
1641 } else {
1642 kctl->sendbufsize = userkctl->ctl_sendsize;
1643 }
1644 if (userkctl->ctl_recvsize == 0) {
fe8ab488 1645 kctl->recvbufsize = CTL_RECVSIZE;
04b8595b
A
1646 userkctl->ctl_recvsize = kctl->recvbufsize;
1647 } else {
1648 kctl->recvbufsize = userkctl->ctl_recvsize;
1649 }
91447636 1650
5c9f4661 1651 kctl->bind = userkctl->ctl_bind;
91447636
A
1652 kctl->connect = userkctl->ctl_connect;
1653 kctl->disconnect = userkctl->ctl_disconnect;
1654 kctl->send = userkctl->ctl_send;
1655 kctl->setopt = userkctl->ctl_setopt;
1656 kctl->getopt = userkctl->ctl_getopt;
39236c6e
A
1657 if (is_extended) {
1658 kctl->rcvd = userkctl->ctl_rcvd;
fe8ab488 1659 kctl->send_list = userkctl->ctl_send_list;
39236c6e 1660 }
fe8ab488 1661
91447636 1662 TAILQ_INIT(&kctl->kcb_head);
fe8ab488 1663
0a7de745 1664 if (kctl_next) {
2d21ac55 1665 TAILQ_INSERT_BEFORE(kctl_next, kctl, next);
0a7de745 1666 } else {
2d21ac55 1667 TAILQ_INSERT_TAIL(&ctl_head, kctl, next);
0a7de745 1668 }
fe8ab488
A
1669
1670 kctlstat.kcs_reg_count++;
1671 kctlstat.kcs_gencnt++;
1672
91447636 1673 lck_mtx_unlock(ctl_mtx);
fe8ab488 1674
3e170ce0 1675 *kctlref = kctl->kctlref;
fe8ab488 1676
91447636 1677 ctl_post_msg(KEV_CTL_REGISTERED, kctl->id);
0a7de745 1678 return 0;
9bccf70c
A
1679}
1680
91447636
A
1681errno_t
1682ctl_deregister(void *kctlref)
fe8ab488 1683{
0a7de745 1684 struct kctl *kctl;
fe8ab488 1685
fe8ab488 1686 lck_mtx_lock(ctl_mtx);
3e170ce0
A
1687 if ((kctl = kctl_from_ref(kctlref)) == NULL) {
1688 kctlstat.kcs_bad_kctlref++;
fe8ab488 1689 lck_mtx_unlock(ctl_mtx);
0a7de745 1690 if (ctl_debug != 0) {
3e170ce0 1691 printf("%s invalid kctlref %p\n",
0a7de745
A
1692 __func__, kctlref);
1693 }
1694 return EINVAL;
fe8ab488 1695 }
3e170ce0 1696
91447636 1697 if (!TAILQ_EMPTY(&kctl->kcb_head)) {
fe8ab488 1698 lck_mtx_unlock(ctl_mtx);
0a7de745 1699 return EBUSY;
91447636
A
1700 }
1701
fe8ab488
A
1702 TAILQ_REMOVE(&ctl_head, kctl, next);
1703
1704 kctlstat.kcs_reg_count--;
1705 kctlstat.kcs_gencnt++;
91447636 1706
3e170ce0 1707 kctl_delete_ref(kctl->kctlref);
fe8ab488
A
1708 lck_mtx_unlock(ctl_mtx);
1709
1710 ctl_post_msg(KEV_CTL_DEREGISTERED, kctl->id);
1711 FREE(kctl, M_TEMP);
0a7de745 1712 return 0;
9bccf70c
A
1713}
1714
91447636
A
1715/*
1716 * Must be called with global ctl_mtx lock taked
1717 */
1718static struct kctl *
1719ctl_find_by_name(const char *name)
fe8ab488 1720{
0a7de745 1721 struct kctl *kctl;
fe8ab488
A
1722
1723 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
9bccf70c 1724
fe8ab488 1725 TAILQ_FOREACH(kctl, &ctl_head, next)
0a7de745
A
1726 if (strncmp(kctl->name, name, sizeof(kctl->name)) == 0) {
1727 return kctl;
1728 }
9bccf70c 1729
0a7de745 1730 return NULL;
91447636 1731}
9bccf70c 1732
6d2010ae
A
1733u_int32_t
1734ctl_id_by_name(const char *name)
1735{
0a7de745
A
1736 u_int32_t ctl_id = 0;
1737 struct kctl *kctl;
fe8ab488 1738
6d2010ae 1739 lck_mtx_lock(ctl_mtx);
fe8ab488 1740 kctl = ctl_find_by_name(name);
0a7de745 1741 if (kctl) {
fe8ab488 1742 ctl_id = kctl->id;
0a7de745 1743 }
6d2010ae 1744 lck_mtx_unlock(ctl_mtx);
fe8ab488 1745
0a7de745 1746 return ctl_id;
6d2010ae
A
1747}
1748
1749errno_t
fe8ab488 1750ctl_name_by_id(u_int32_t id, char *out_name, size_t maxsize)
6d2010ae 1751{
0a7de745 1752 int found = 0;
6d2010ae 1753 struct kctl *kctl;
fe8ab488
A
1754
1755 lck_mtx_lock(ctl_mtx);
1756 TAILQ_FOREACH(kctl, &ctl_head, next) {
0a7de745 1757 if (kctl->id == id) {
fe8ab488 1758 break;
0a7de745 1759 }
fe8ab488
A
1760 }
1761
3e170ce0 1762 if (kctl) {
0a7de745 1763 if (maxsize > MAX_KCTL_NAME) {
fe8ab488 1764 maxsize = MAX_KCTL_NAME;
0a7de745 1765 }
fe8ab488
A
1766 strlcpy(out_name, kctl->name, maxsize);
1767 found = 1;
1768 }
6d2010ae 1769 lck_mtx_unlock(ctl_mtx);
fe8ab488 1770
0a7de745 1771 return found ? 0 : ENOENT;
6d2010ae
A
1772}
1773
91447636
A
1774/*
1775 * Must be called with global ctl_mtx lock taked
1776 *
1777 */
1778static struct kctl *
1779ctl_find_by_id_unit(u_int32_t id, u_int32_t unit)
fe8ab488 1780{
0a7de745 1781 struct kctl *kctl;
fe8ab488
A
1782
1783 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1784
1785 TAILQ_FOREACH(kctl, &ctl_head, next) {
0a7de745
A
1786 if (kctl->id == id && (kctl->flags & CTL_FLAG_REG_ID_UNIT) == 0) {
1787 return kctl;
1788 } else if (kctl->id == id && kctl->reg_unit == unit) {
1789 return kctl;
1790 }
fe8ab488 1791 }
0a7de745 1792 return NULL;
9bccf70c
A
1793}
1794
1795/*
91447636 1796 * Must be called with kernel controller lock taken
9bccf70c 1797 */
91447636
A
1798static struct ctl_cb *
1799kcb_find(struct kctl *kctl, u_int32_t unit)
fe8ab488 1800{
0a7de745 1801 struct ctl_cb *kcb;
9bccf70c 1802
fe8ab488 1803 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
9bccf70c 1804
fe8ab488 1805 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
0a7de745
A
1806 if (kcb->sac.sc_unit == unit) {
1807 return kcb;
1808 }
fe8ab488 1809
0a7de745 1810 return NULL;
9bccf70c
A
1811}
1812
6d2010ae 1813static struct socket *
3e170ce0 1814kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *kctlflags)
6d2010ae
A
1815{
1816 struct socket *so = NULL;
0a7de745 1817 struct ctl_cb *kcb;
fe8ab488 1818 void *lr_saved;
3e170ce0
A
1819 struct kctl *kctl;
1820 int i;
fe8ab488
A
1821
1822 lr_saved = __builtin_return_address(0);
1823
6d2010ae 1824 lck_mtx_lock(ctl_mtx);
3e170ce0
A
1825 /*
1826 * First validate the kctlref
1827 */
1828 if ((kctl = kctl_from_ref(kctlref)) == NULL) {
1829 kctlstat.kcs_bad_kctlref++;
1830 lck_mtx_unlock(ctl_mtx);
0a7de745 1831 if (ctl_debug != 0) {
3e170ce0 1832 printf("%s invalid kctlref %p\n",
0a7de745
A
1833 __func__, kctlref);
1834 }
1835 return NULL;
6d2010ae 1836 }
fe8ab488 1837
3e170ce0
A
1838 kcb = kcb_find(kctl, unit);
1839 if (kcb == NULL || kcb->kctl != kctl || (so = kcb->so) == NULL) {
1840 lck_mtx_unlock(ctl_mtx);
0a7de745 1841 return NULL;
6d2010ae 1842 }
3e170ce0
A
1843 /*
1844 * This prevents the socket from being closed
1845 */
1846 kcb->usecount++;
1847 /*
1848 * Respect lock ordering: socket before ctl_mtx
1849 */
1850 lck_mtx_unlock(ctl_mtx);
fe8ab488 1851
6d2010ae 1852 socket_lock(so, 1);
3e170ce0
A
1853 /*
1854 * The socket lock history is more useful if we store
1855 * the address of the caller.
1856 */
1857 i = (so->next_lock_lr + SO_LCKDBG_MAX - 1) % SO_LCKDBG_MAX;
1858 so->lock_lr[i] = lr_saved;
fe8ab488 1859
6d2010ae 1860 lck_mtx_lock(ctl_mtx);
3e170ce0
A
1861
1862 if ((kctl = kctl_from_ref(kctlref)) == NULL || kcb->kctl == NULL) {
6d2010ae
A
1863 lck_mtx_unlock(ctl_mtx);
1864 socket_unlock(so, 1);
1865 so = NULL;
1866 lck_mtx_lock(ctl_mtx);
3e170ce0
A
1867 } else if (kctlflags != NULL) {
1868 *kctlflags = kctl->flags;
6d2010ae 1869 }
3e170ce0 1870
6d2010ae 1871 kcb->usecount--;
0a7de745 1872 if (kcb->usecount == 0) {
6d2010ae 1873 wakeup((event_t)&kcb->usecount);
0a7de745 1874 }
3e170ce0 1875
6d2010ae 1876 lck_mtx_unlock(ctl_mtx);
fe8ab488 1877
0a7de745 1878 return so;
6d2010ae
A
1879}
1880
fe8ab488
A
1881static void
1882ctl_post_msg(u_int32_t event_code, u_int32_t id)
9bccf70c 1883{
0a7de745
A
1884 struct ctl_event_data ctl_ev_data;
1885 struct kev_msg ev_msg;
fe8ab488
A
1886
1887 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_NOTOWNED);
1888
1889 bzero(&ev_msg, sizeof(struct kev_msg));
1890 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1891
1892 ev_msg.kev_class = KEV_SYSTEM_CLASS;
1893 ev_msg.kev_subclass = KEV_CTL_SUBCLASS;
1894 ev_msg.event_code = event_code;
1895
1896 /* common nke subclass data */
1897 bzero(&ctl_ev_data, sizeof(ctl_ev_data));
1898 ctl_ev_data.ctl_id = id;
1899 ev_msg.dv[0].data_ptr = &ctl_ev_data;
1900 ev_msg.dv[0].data_length = sizeof(ctl_ev_data);
1901
1902 ev_msg.dv[1].data_length = 0;
1903
1904 kev_post_msg(&ev_msg);
9bccf70c
A
1905}
1906
91447636 1907static int
b0d623f7
A
1908ctl_lock(struct socket *so, int refcount, void *lr)
1909{
1910 void *lr_saved;
1911
0a7de745 1912 if (lr == NULL) {
b0d623f7 1913 lr_saved = __builtin_return_address(0);
0a7de745 1914 } else {
b0d623f7 1915 lr_saved = lr;
0a7de745 1916 }
b0d623f7
A
1917
1918 if (so->so_pcb != NULL) {
91447636 1919 lck_mtx_lock(((struct ctl_cb *)so->so_pcb)->mtx);
0a7de745 1920 } else {
fe8ab488 1921 panic("ctl_lock: so=%p NO PCB! lr=%p lrh= %s\n",
b0d623f7
A
1922 so, lr_saved, solockhistory_nr(so));
1923 /* NOTREACHED */
91447636 1924 }
b0d623f7
A
1925
1926 if (so->so_usecount < 0) {
1927 panic("ctl_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s\n",
0a7de745
A
1928 so, so->so_pcb, lr_saved, so->so_usecount,
1929 solockhistory_nr(so));
b0d623f7
A
1930 /* NOTREACHED */
1931 }
1932
0a7de745 1933 if (refcount) {
91447636 1934 so->so_usecount++;
0a7de745 1935 }
0c530ab8 1936
2d21ac55 1937 so->lock_lr[so->next_lock_lr] = lr_saved;
0a7de745
A
1938 so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX;
1939 return 0;
91447636
A
1940}
1941
1942static int
b0d623f7 1943ctl_unlock(struct socket *so, int refcount, void *lr)
91447636 1944{
b0d623f7
A
1945 void *lr_saved;
1946 lck_mtx_t *mutex_held;
1947
0a7de745 1948 if (lr == NULL) {
b0d623f7 1949 lr_saved = __builtin_return_address(0);
0a7de745 1950 } else {
b0d623f7 1951 lr_saved = lr;
0a7de745 1952 }
b0d623f7 1953
39037602 1954#if (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG))
fe8ab488
A
1955 printf("ctl_unlock: so=%llx sopcb=%x lock=%llx ref=%u lr=%llx\n",
1956 (uint64_t)VM_KERNEL_ADDRPERM(so),
1957 (uint64_t)VM_KERNEL_ADDRPERM(so->so_pcb,
1958 (uint64_t)VM_KERNEL_ADDRPERM(((struct ctl_cb *)so->so_pcb)->mtx),
1959 so->so_usecount, (uint64_t)VM_KERNEL_ADDRPERM(lr_saved));
39037602 1960#endif /* (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG)) */
0a7de745 1961 if (refcount) {
91447636 1962 so->so_usecount--;
0a7de745 1963 }
b0d623f7
A
1964
1965 if (so->so_usecount < 0) {
fe8ab488 1966 panic("ctl_unlock: so=%p usecount=%x lrh= %s\n",
b0d623f7
A
1967 so, so->so_usecount, solockhistory_nr(so));
1968 /* NOTREACHED */
1969 }
91447636 1970 if (so->so_pcb == NULL) {
fe8ab488 1971 panic("ctl_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s\n",
0a7de745
A
1972 so, so->so_usecount, (void *)lr_saved,
1973 solockhistory_nr(so));
b0d623f7 1974 /* NOTREACHED */
91447636 1975 }
b0d623f7
A
1976 mutex_held = ((struct ctl_cb *)so->so_pcb)->mtx;
1977
0a7de745
A
1978 lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
1979 so->unlock_lr[so->next_unlock_lr] = lr_saved;
1980 so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
1981 lck_mtx_unlock(mutex_held);
b0d623f7 1982
0a7de745 1983 if (so->so_usecount == 0) {
91447636 1984 ctl_sofreelastref(so);
0a7de745 1985 }
b0d623f7 1986
0a7de745 1987 return 0;
91447636
A
1988}
1989
1990static lck_mtx_t *
5ba3f43e 1991ctl_getlock(struct socket *so, int flags)
91447636 1992{
5ba3f43e 1993#pragma unused(flags)
0a7de745 1994 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
fe8ab488 1995
0a7de745
A
1996 if (so->so_pcb) {
1997 if (so->so_usecount < 0) {
1998 panic("ctl_getlock: so=%p usecount=%x lrh= %s\n",
1999 so, so->so_usecount, solockhistory_nr(so));
2000 }
2001 return kcb->mtx;
91447636 2002 } else {
0a7de745
A
2003 panic("ctl_getlock: so=%p NULL NO so_pcb %s\n",
2004 so, solockhistory_nr(so));
2005 return so->so_proto->pr_domain->dom_mtx;
91447636
A
2006 }
2007}
fe8ab488
A
2008
2009__private_extern__ int
2010kctl_reg_list SYSCTL_HANDLER_ARGS
2011{
2012#pragma unused(oidp, arg1, arg2)
0a7de745
A
2013 int error = 0;
2014 int n, i;
2015 struct xsystmgen xsg;
2016 void *buf = NULL;
2017 struct kctl *kctl;
2018 size_t item_size = ROUNDUP64(sizeof(struct xkctl_reg));
2019
2020 buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO);
2021 if (buf == NULL) {
2022 return ENOMEM;
2023 }
2024
2025 lck_mtx_lock(ctl_mtx);
2026
2027 n = kctlstat.kcs_reg_count;
2028
2029 if (req->oldptr == USER_ADDR_NULL) {
2030 req->oldidx = (n + n / 8) * sizeof(struct xkctl_reg);
2031 goto done;
2032 }
2033 if (req->newptr != USER_ADDR_NULL) {
2034 error = EPERM;
2035 goto done;
2036 }
2037 bzero(&xsg, sizeof(xsg));
2038 xsg.xg_len = sizeof(xsg);
2039 xsg.xg_count = n;
2040 xsg.xg_gen = kctlstat.kcs_gencnt;
2041 xsg.xg_sogen = so_gencnt;
2042 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2043 if (error) {
2044 goto done;
2045 }
2046 /*
2047 * We are done if there is no pcb
2048 */
2049 if (n == 0) {
2050 goto done;
2051 }
2052
2053 i = 0;
2054 for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
2055 i < n && kctl != NULL;
2056 i++, kctl = TAILQ_NEXT(kctl, next)) {
2057 struct xkctl_reg *xkr = (struct xkctl_reg *)buf;
2058 struct ctl_cb *kcb;
2059 u_int32_t pcbcount = 0;
2060
2061 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
2062 pcbcount++;
2063
2064 bzero(buf, item_size);
2065
2066 xkr->xkr_len = sizeof(struct xkctl_reg);
2067 xkr->xkr_kind = XSO_KCREG;
2068 xkr->xkr_id = kctl->id;
2069 xkr->xkr_reg_unit = kctl->reg_unit;
2070 xkr->xkr_flags = kctl->flags;
2071 xkr->xkr_kctlref = (uint64_t)(kctl->kctlref);
2072 xkr->xkr_recvbufsize = kctl->recvbufsize;
2073 xkr->xkr_sendbufsize = kctl->sendbufsize;
2074 xkr->xkr_lastunit = kctl->lastunit;
2075 xkr->xkr_pcbcount = pcbcount;
2076 xkr->xkr_connect = (uint64_t)VM_KERNEL_UNSLIDE(kctl->connect);
2077 xkr->xkr_disconnect =
2078 (uint64_t)VM_KERNEL_UNSLIDE(kctl->disconnect);
2079 xkr->xkr_send = (uint64_t)VM_KERNEL_UNSLIDE(kctl->send);
2080 xkr->xkr_send_list =
2081 (uint64_t)VM_KERNEL_UNSLIDE(kctl->send_list);
2082 xkr->xkr_setopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->setopt);
2083 xkr->xkr_getopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->getopt);
2084 xkr->xkr_rcvd = (uint64_t)VM_KERNEL_UNSLIDE(kctl->rcvd);
2085 strlcpy(xkr->xkr_name, kctl->name, sizeof(xkr->xkr_name));
2086
2087 error = SYSCTL_OUT(req, buf, item_size);
2088 }
2089
2090 if (error == 0) {
2091 /*
2092 * Give the user an updated idea of our state.
2093 * If the generation differs from what we told
2094 * her before, she knows that something happened
2095 * while we were processing this request, and it
2096 * might be necessary to retry.
2097 */
2098 bzero(&xsg, sizeof(xsg));
2099 xsg.xg_len = sizeof(xsg);
2100 xsg.xg_count = n;
2101 xsg.xg_gen = kctlstat.kcs_gencnt;
2102 xsg.xg_sogen = so_gencnt;
2103 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2104 if (error) {
2105 goto done;
fe8ab488
A
2106 }
2107 }
2108
2109done:
0a7de745 2110 lck_mtx_unlock(ctl_mtx);
fe8ab488 2111
0a7de745
A
2112 if (buf != NULL) {
2113 FREE(buf, M_TEMP);
2114 }
fe8ab488 2115
0a7de745 2116 return error;
fe8ab488
A
2117}
2118
2119__private_extern__ int
2120kctl_pcblist SYSCTL_HANDLER_ARGS
2121{
2122#pragma unused(oidp, arg1, arg2)
0a7de745
A
2123 int error = 0;
2124 int n, i;
2125 struct xsystmgen xsg;
2126 void *buf = NULL;
2127 struct kctl *kctl;
2128 size_t item_size = ROUNDUP64(sizeof(struct xkctlpcb)) +
2129 ROUNDUP64(sizeof(struct xsocket_n)) +
2130 2 * ROUNDUP64(sizeof(struct xsockbuf_n)) +
2131 ROUNDUP64(sizeof(struct xsockstat_n));
2132
2133 buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO);
2134 if (buf == NULL) {
2135 return ENOMEM;
2136 }
2137
2138 lck_mtx_lock(ctl_mtx);
2139
2140 n = kctlstat.kcs_pcbcount;
2141
2142 if (req->oldptr == USER_ADDR_NULL) {
2143 req->oldidx = (n + n / 8) * item_size;
2144 goto done;
2145 }
2146 if (req->newptr != USER_ADDR_NULL) {
2147 error = EPERM;
2148 goto done;
2149 }
2150 bzero(&xsg, sizeof(xsg));
2151 xsg.xg_len = sizeof(xsg);
2152 xsg.xg_count = n;
2153 xsg.xg_gen = kctlstat.kcs_gencnt;
2154 xsg.xg_sogen = so_gencnt;
2155 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2156 if (error) {
2157 goto done;
2158 }
2159 /*
2160 * We are done if there is no pcb
2161 */
2162 if (n == 0) {
2163 goto done;
2164 }
2165
2166 i = 0;
2167 for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
2168 i < n && kctl != NULL;
2169 kctl = TAILQ_NEXT(kctl, next)) {
2170 struct ctl_cb *kcb;
2171
2172 for (kcb = TAILQ_FIRST(&kctl->kcb_head);
2173 i < n && kcb != NULL;
2174 i++, kcb = TAILQ_NEXT(kcb, next)) {
2175 struct xkctlpcb *xk = (struct xkctlpcb *)buf;
2176 struct xsocket_n *xso = (struct xsocket_n *)
2177 ADVANCE64(xk, sizeof(*xk));
2178 struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *)
2179 ADVANCE64(xso, sizeof(*xso));
2180 struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *)
2181 ADVANCE64(xsbrcv, sizeof(*xsbrcv));
2182 struct xsockstat_n *xsostats = (struct xsockstat_n *)
2183 ADVANCE64(xsbsnd, sizeof(*xsbsnd));
2184
2185 bzero(buf, item_size);
2186
2187 xk->xkp_len = sizeof(struct xkctlpcb);
2188 xk->xkp_kind = XSO_KCB;
2189 xk->xkp_unit = kcb->sac.sc_unit;
2190 xk->xkp_kctpcb = (uint64_t)VM_KERNEL_ADDRPERM(kcb);
2191 xk->xkp_kctlref = (uint64_t)VM_KERNEL_ADDRPERM(kctl);
2192 xk->xkp_kctlid = kctl->id;
2193 strlcpy(xk->xkp_kctlname, kctl->name,
2194 sizeof(xk->xkp_kctlname));
2195
2196 sotoxsocket_n(kcb->so, xso);
2197 sbtoxsockbuf_n(kcb->so ?
2198 &kcb->so->so_rcv : NULL, xsbrcv);
2199 sbtoxsockbuf_n(kcb->so ?
2200 &kcb->so->so_snd : NULL, xsbsnd);
2201 sbtoxsockstat_n(kcb->so, xsostats);
2202
2203 error = SYSCTL_OUT(req, buf, item_size);
fe8ab488
A
2204 }
2205 }
2206
0a7de745
A
2207 if (error == 0) {
2208 /*
2209 * Give the user an updated idea of our state.
2210 * If the generation differs from what we told
2211 * her before, she knows that something happened
2212 * while we were processing this request, and it
2213 * might be necessary to retry.
2214 */
2215 bzero(&xsg, sizeof(xsg));
2216 xsg.xg_len = sizeof(xsg);
2217 xsg.xg_count = n;
2218 xsg.xg_gen = kctlstat.kcs_gencnt;
2219 xsg.xg_sogen = so_gencnt;
2220 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2221 if (error) {
2222 goto done;
fe8ab488
A
2223 }
2224 }
2225
2226done:
0a7de745 2227 lck_mtx_unlock(ctl_mtx);
fe8ab488 2228
0a7de745 2229 return error;
fe8ab488
A
2230}
2231
2232int
2233kctl_getstat SYSCTL_HANDLER_ARGS
2234{
2235#pragma unused(oidp, arg1, arg2)
0a7de745 2236 int error = 0;
fe8ab488 2237
0a7de745 2238 lck_mtx_lock(ctl_mtx);
fe8ab488 2239
0a7de745
A
2240 if (req->newptr != USER_ADDR_NULL) {
2241 error = EPERM;
2242 goto done;
fe8ab488 2243 }
0a7de745
A
2244 if (req->oldptr == USER_ADDR_NULL) {
2245 req->oldidx = sizeof(struct kctlstat);
2246 goto done;
fe8ab488
A
2247 }
2248
0a7de745
A
2249 error = SYSCTL_OUT(req, &kctlstat,
2250 MIN(sizeof(struct kctlstat), req->oldlen));
fe8ab488 2251done:
0a7de745
A
2252 lck_mtx_unlock(ctl_mtx);
2253 return error;
fe8ab488 2254}
3e170ce0
A
2255
2256void
2257kctl_fill_socketinfo(struct socket *so, struct socket_info *si)
2258{
0a7de745
A
2259 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
2260 struct kern_ctl_info *kcsi =
2261 &si->soi_proto.pri_kern_ctl;
2262 struct kctl *kctl = kcb->kctl;
3e170ce0 2263
0a7de745 2264 si->soi_kind = SOCKINFO_KERN_CTL;
3e170ce0 2265
0a7de745
A
2266 if (kctl == 0) {
2267 return;
2268 }
3e170ce0 2269
0a7de745
A
2270 kcsi->kcsi_id = kctl->id;
2271 kcsi->kcsi_reg_unit = kctl->reg_unit;
2272 kcsi->kcsi_flags = kctl->flags;
2273 kcsi->kcsi_recvbufsize = kctl->recvbufsize;
2274 kcsi->kcsi_sendbufsize = kctl->sendbufsize;
2275 kcsi->kcsi_unit = kcb->sac.sc_unit;
2276 strlcpy(kcsi->kcsi_name, kctl->name, MAX_KCTL_NAME);
3e170ce0 2277}