]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/kern_control.c
xnu-4570.41.2.tar.gz
[apple/xnu.git] / bsd / kern / kern_control.c
CommitLineData
9bccf70c 1/*
5c9f4661 2 * Copyright (c) 1999-2017 Apple Inc. All rights reserved.
9bccf70c 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
fe8ab488 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
fe8ab488 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
fe8ab488 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
fe8ab488 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
9bccf70c 27 */
9bccf70c
A
28
29/*
91447636
A
30 * Kernel Control domain - allows control connections to
31 * and to read/write data.
9bccf70c 32 *
91447636 33 * Vincent Lubet, 040506
9bccf70c
A
34 * Christophe Allie, 010928
35 * Justin C. Walker, 990319
36 */
37
38#include <sys/types.h>
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/syslog.h>
42#include <sys/socket.h>
43#include <sys/socketvar.h>
44#include <sys/protosw.h>
45#include <sys/domain.h>
46#include <sys/malloc.h>
47#include <sys/mbuf.h>
9bccf70c
A
48#include <sys/sys_domain.h>
49#include <sys/kern_event.h>
50#include <sys/kern_control.h>
2d21ac55 51#include <sys/kauth.h>
fe8ab488 52#include <sys/sysctl.h>
3e170ce0 53#include <sys/proc_info.h>
9bccf70c
A
54#include <net/if_var.h>
55
56#include <mach/vm_types.h>
9bccf70c
A
57
58#include <kern/thread.h>
59
3e170ce0
A
60struct kctl {
61 TAILQ_ENTRY(kctl) next; /* controller chain */
62 kern_ctl_ref kctlref;
63
64 /* controller information provided when registering */
65 char name[MAX_KCTL_NAME]; /* unique identifier */
66 u_int32_t id;
67 u_int32_t reg_unit;
68
69 /* misc communication information */
70 u_int32_t flags; /* support flags */
71 u_int32_t recvbufsize; /* request more than the default buffer size */
72 u_int32_t sendbufsize; /* request more than the default buffer size */
73
74 /* Dispatch functions */
5c9f4661 75 ctl_bind_func bind; /* Prepare contact */
3e170ce0
A
76 ctl_connect_func connect; /* Make contact */
77 ctl_disconnect_func disconnect; /* Break contact */
78 ctl_send_func send; /* Send data to nke */
79 ctl_send_list_func send_list; /* Send list of packets */
80 ctl_setopt_func setopt; /* set kctl configuration */
81 ctl_getopt_func getopt; /* get kctl configuration */
82 ctl_rcvd_func rcvd; /* Notify nke when client reads data */
83
84 TAILQ_HEAD(, ctl_cb) kcb_head;
85 u_int32_t lastunit;
86};
87
88struct ctl_cb {
89 TAILQ_ENTRY(ctl_cb) next; /* controller chain */
90 lck_mtx_t *mtx;
91 struct socket *so; /* controlling socket */
92 struct kctl *kctl; /* back pointer to controller */
93 void *userdata;
5c9f4661 94 struct sockaddr_ctl sac;
3e170ce0
A
95 u_int32_t usecount;
96};
97
fe8ab488
A
98#ifndef ROUNDUP64
99#define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
100#endif
101
102#ifndef ADVANCE64
103#define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
104#endif
105
9bccf70c
A
106/*
107 * Definitions and vars for we support
108 */
109
fe8ab488
A
110#define CTL_SENDSIZE (2 * 1024) /* default buffer size */
111#define CTL_RECVSIZE (8 * 1024) /* default buffer size */
9bccf70c
A
112
113/*
91447636
A
114 * Definitions and vars for we support
115 */
9bccf70c 116
fe8ab488 117static u_int32_t ctl_maxunit = 65536;
91447636 118static lck_grp_attr_t *ctl_lck_grp_attr = 0;
fe8ab488
A
119static lck_attr_t *ctl_lck_attr = 0;
120static lck_grp_t *ctl_lck_grp = 0;
121static lck_mtx_t *ctl_mtx;
9bccf70c
A
122
123/* all the controllers are chained */
2d21ac55 124TAILQ_HEAD(kctl_list, kctl) ctl_head;
91447636
A
125
126static int ctl_attach(struct socket *, int, struct proc *);
127static int ctl_detach(struct socket *);
128static int ctl_sofreelastref(struct socket *so);
5c9f4661 129static int ctl_bind(struct socket *, struct sockaddr *, struct proc *);
91447636
A
130static int ctl_connect(struct socket *, struct sockaddr *, struct proc *);
131static int ctl_disconnect(struct socket *);
132static int ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
fe8ab488 133 struct ifnet *ifp, struct proc *p);
91447636 134static int ctl_send(struct socket *, int, struct mbuf *,
fe8ab488
A
135 struct sockaddr *, struct mbuf *, struct proc *);
136static int ctl_send_list(struct socket *, int, struct mbuf *,
137 struct sockaddr *, struct mbuf *, struct proc *);
91447636
A
138static int ctl_ctloutput(struct socket *, struct sockopt *);
139static int ctl_peeraddr(struct socket *so, struct sockaddr **nam);
39236c6e 140static int ctl_usr_rcvd(struct socket *so, int flags);
91447636 141
91447636
A
142static struct kctl *ctl_find_by_name(const char *);
143static struct kctl *ctl_find_by_id_unit(u_int32_t id, u_int32_t unit);
9bccf70c 144
3e170ce0
A
145static struct socket *kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit,
146 u_int32_t *);
91447636 147static struct ctl_cb *kcb_find(struct kctl *, u_int32_t unit);
b0d623f7 148static void ctl_post_msg(u_int32_t event_code, u_int32_t id);
9bccf70c 149
b0d623f7
A
150static int ctl_lock(struct socket *, int, void *);
151static int ctl_unlock(struct socket *, int, void *);
91447636 152static lck_mtx_t * ctl_getlock(struct socket *, int);
9bccf70c 153
39236c6e
A
154static struct pr_usrreqs ctl_usrreqs = {
155 .pru_attach = ctl_attach,
5c9f4661 156 .pru_bind = ctl_bind,
39236c6e
A
157 .pru_connect = ctl_connect,
158 .pru_control = ctl_ioctl,
159 .pru_detach = ctl_detach,
160 .pru_disconnect = ctl_disconnect,
161 .pru_peeraddr = ctl_peeraddr,
162 .pru_rcvd = ctl_usr_rcvd,
163 .pru_send = ctl_send,
fe8ab488 164 .pru_send_list = ctl_send_list,
39236c6e 165 .pru_sosend = sosend,
fe8ab488 166 .pru_sosend_list = sosend_list,
39236c6e 167 .pru_soreceive = soreceive,
fe8ab488 168 .pru_soreceive_list = soreceive_list,
91447636
A
169};
170
39236c6e 171static struct protosw kctlsw[] = {
91447636 172{
fe8ab488
A
173 .pr_type = SOCK_DGRAM,
174 .pr_protocol = SYSPROTO_CONTROL,
175 .pr_flags = PR_ATOMIC|PR_CONNREQUIRED|PR_PCBLOCK|PR_WANTRCVD,
176 .pr_ctloutput = ctl_ctloutput,
177 .pr_usrreqs = &ctl_usrreqs,
178 .pr_lock = ctl_lock,
179 .pr_unlock = ctl_unlock,
180 .pr_getlock = ctl_getlock,
39236c6e 181},
9bccf70c 182{
fe8ab488
A
183 .pr_type = SOCK_STREAM,
184 .pr_protocol = SYSPROTO_CONTROL,
185 .pr_flags = PR_CONNREQUIRED|PR_PCBLOCK|PR_WANTRCVD,
186 .pr_ctloutput = ctl_ctloutput,
187 .pr_usrreqs = &ctl_usrreqs,
188 .pr_lock = ctl_lock,
189 .pr_unlock = ctl_unlock,
190 .pr_getlock = ctl_getlock,
39236c6e 191}
9bccf70c
A
192};
193
fe8ab488
A
194__private_extern__ int kctl_reg_list SYSCTL_HANDLER_ARGS;
195__private_extern__ int kctl_pcblist SYSCTL_HANDLER_ARGS;
196__private_extern__ int kctl_getstat SYSCTL_HANDLER_ARGS;
197
91447636 198
fe8ab488
A
199SYSCTL_NODE(_net_systm, OID_AUTO, kctl,
200 CTLFLAG_RW|CTLFLAG_LOCKED, 0, "Kernel control family");
201
202struct kctlstat kctlstat;
203SYSCTL_PROC(_net_systm_kctl, OID_AUTO, stats,
204 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
205 kctl_getstat, "S,kctlstat", "");
206
207SYSCTL_PROC(_net_systm_kctl, OID_AUTO, reg_list,
208 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
209 kctl_reg_list, "S,xkctl_reg", "");
210
211SYSCTL_PROC(_net_systm_kctl, OID_AUTO, pcblist,
212 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
213 kctl_pcblist, "S,xkctlpcb", "");
214
215u_int32_t ctl_autorcvbuf_max = 256 * 1024;
216SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufmax,
217 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_autorcvbuf_max, 0, "");
218
219u_int32_t ctl_autorcvbuf_high = 0;
220SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufhigh,
221 CTLFLAG_RD | CTLFLAG_LOCKED, &ctl_autorcvbuf_high, 0, "");
222
223u_int32_t ctl_debug = 0;
224SYSCTL_INT(_net_systm_kctl, OID_AUTO, debug,
225 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_debug, 0, "");
226
3e170ce0
A
227#define KCTL_TBL_INC 16
228
229static uintptr_t kctl_tbl_size = 0;
230static u_int32_t kctl_tbl_growing = 0;
39037602 231static u_int32_t kctl_tbl_growing_waiting = 0;
3e170ce0
A
232static uintptr_t kctl_tbl_count = 0;
233static struct kctl **kctl_table = NULL;
234static uintptr_t kctl_ref_gencnt = 0;
235
236static void kctl_tbl_grow(void);
237static kern_ctl_ref kctl_make_ref(struct kctl *kctl);
238static void kctl_delete_ref(kern_ctl_ref);
239static struct kctl *kctl_from_ref(kern_ctl_ref);
240
9bccf70c 241/*
91447636 242 * Install the protosw's for the Kernel Control manager.
9bccf70c 243 */
39236c6e
A
244__private_extern__ void
245kern_control_init(struct domain *dp)
9bccf70c 246{
39236c6e
A
247 struct protosw *pr;
248 int i;
3e170ce0 249 int kctl_proto_count = (sizeof (kctlsw) / sizeof (struct protosw));
39236c6e
A
250
251 VERIFY(!(dp->dom_flags & DOM_INITIALIZED));
252 VERIFY(dp == systemdomain);
253
91447636 254 ctl_lck_grp_attr = lck_grp_attr_alloc_init();
39236c6e
A
255 if (ctl_lck_grp_attr == NULL) {
256 panic("%s: lck_grp_attr_alloc_init failed\n", __func__);
257 /* NOTREACHED */
91447636 258 }
39236c6e
A
259
260 ctl_lck_grp = lck_grp_alloc_init("Kernel Control Protocol",
261 ctl_lck_grp_attr);
262 if (ctl_lck_grp == NULL) {
263 panic("%s: lck_grp_alloc_init failed\n", __func__);
264 /* NOTREACHED */
91447636 265 }
39236c6e 266
91447636 267 ctl_lck_attr = lck_attr_alloc_init();
39236c6e
A
268 if (ctl_lck_attr == NULL) {
269 panic("%s: lck_attr_alloc_init failed\n", __func__);
270 /* NOTREACHED */
91447636 271 }
39236c6e 272
91447636 273 ctl_mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
39236c6e
A
274 if (ctl_mtx == NULL) {
275 panic("%s: lck_mtx_alloc_init failed\n", __func__);
276 /* NOTREACHED */
91447636
A
277 }
278 TAILQ_INIT(&ctl_head);
39236c6e
A
279
280 for (i = 0, pr = &kctlsw[0]; i < kctl_proto_count; i++, pr++)
281 net_add_proto(pr, dp, 1);
91447636 282}
9bccf70c 283
91447636
A
284static void
285kcb_delete(struct ctl_cb *kcb)
286{
287 if (kcb != 0) {
288 if (kcb->mtx != 0)
289 lck_mtx_free(kcb->mtx, ctl_lck_grp);
290 FREE(kcb, M_TEMP);
291 }
9bccf70c
A
292}
293
9bccf70c
A
294/*
295 * Kernel Controller user-request functions
fe8ab488
A
296 * attach function must exist and succeed
297 * detach not necessary
91447636 298 * we need a pcb for the per socket mutex
9bccf70c 299 */
91447636 300static int
fe8ab488
A
301ctl_attach(struct socket *so, int proto, struct proc *p)
302{
303#pragma unused(proto, p)
91447636
A
304 int error = 0;
305 struct ctl_cb *kcb = 0;
306
307 MALLOC(kcb, struct ctl_cb *, sizeof(struct ctl_cb), M_TEMP, M_WAITOK);
308 if (kcb == NULL) {
309 error = ENOMEM;
310 goto quit;
311 }
312 bzero(kcb, sizeof(struct ctl_cb));
fe8ab488 313
91447636
A
314 kcb->mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
315 if (kcb->mtx == NULL) {
316 error = ENOMEM;
317 goto quit;
318 }
319 kcb->so = so;
320 so->so_pcb = (caddr_t)kcb;
fe8ab488 321
91447636
A
322quit:
323 if (error != 0) {
324 kcb_delete(kcb);
325 kcb = 0;
326 }
fe8ab488 327 return (error);
91447636
A
328}
329
330static int
331ctl_sofreelastref(struct socket *so)
332{
fe8ab488
A
333 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
334
335 so->so_pcb = 0;
336
337 if (kcb != 0) {
338 struct kctl *kctl;
339 if ((kctl = kcb->kctl) != 0) {
340 lck_mtx_lock(ctl_mtx);
341 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
342 kctlstat.kcs_pcbcount--;
343 kctlstat.kcs_gencnt++;
344 lck_mtx_unlock(ctl_mtx);
345 }
346 kcb_delete(kcb);
347 }
348 sofreelastref(so, 1);
349 return (0);
91447636
A
350}
351
352static int
353ctl_detach(struct socket *so)
354{
fe8ab488
A
355 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
356
357 if (kcb == 0)
358 return (0);
359
5c9f4661
A
360 if (kcb->kctl != NULL && kcb->kctl->bind != NULL &&
361 kcb->userdata != NULL && !(so->so_state & SS_ISCONNECTED)) {
362 // The unit was bound, but not connected
363 // Invoke the disconnected call to cleanup
364 if (kcb->kctl->disconnect != NULL) {
365 socket_unlock(so, 0);
366 (*kcb->kctl->disconnect)(kcb->kctl->kctlref,
367 kcb->sac.sc_unit, kcb->userdata);
368 socket_lock(so, 0);
369 }
370 }
371
fe8ab488
A
372 soisdisconnected(so);
373 so->so_flags |= SOF_PCBCLEARING;
374 return (0);
9bccf70c
A
375}
376
91447636 377static int
5c9f4661 378ctl_setup_kctl(struct socket *so, struct sockaddr *nam, struct proc *p)
fe8ab488 379{
5c9f4661
A
380 struct kctl *kctl = NULL;
381 int error = 0;
fe8ab488 382 struct sockaddr_ctl sa;
5c9f4661
A
383 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
384 struct ctl_cb *kcb_next = NULL;
385 u_quad_t sbmaxsize;
386 u_int32_t recvbufsize, sendbufsize;
fe8ab488 387
5c9f4661
A
388 if (kcb == 0) {
389 panic("ctl_setup_kctl so_pcb null\n");
390 }
391
392 if (kcb->kctl != NULL) {
393 // Already set up, skip
394 return (0);
395 }
fe8ab488 396
5c9f4661 397 if (nam->sa_len != sizeof(struct sockaddr_ctl)) {
fe8ab488 398 return (EINVAL);
5c9f4661 399 }
fe8ab488
A
400
401 bcopy(nam, &sa, sizeof(struct sockaddr_ctl));
402
403 lck_mtx_lock(ctl_mtx);
404 kctl = ctl_find_by_id_unit(sa.sc_id, sa.sc_unit);
405 if (kctl == NULL) {
406 lck_mtx_unlock(ctl_mtx);
407 return (ENOENT);
408 }
409
410 if (((kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
5c9f4661 411 (so->so_type != SOCK_STREAM)) ||
fe8ab488 412 (!(kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
5c9f4661
A
413 (so->so_type != SOCK_DGRAM))) {
414 lck_mtx_unlock(ctl_mtx);
415 return (EPROTOTYPE);
416 }
fe8ab488
A
417
418 if (kctl->flags & CTL_FLAG_PRIVILEGED) {
419 if (p == 0) {
420 lck_mtx_unlock(ctl_mtx);
421 return (EINVAL);
422 }
423 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
424 lck_mtx_unlock(ctl_mtx);
425 return (EPERM);
426 }
427 }
91447636
A
428
429 if ((kctl->flags & CTL_FLAG_REG_ID_UNIT) || sa.sc_unit != 0) {
430 if (kcb_find(kctl, sa.sc_unit) != NULL) {
431 lck_mtx_unlock(ctl_mtx);
fe8ab488 432 return (EBUSY);
91447636
A
433 }
434 } else {
fe8ab488 435 /* Find an unused ID, assumes control IDs are in order */
5c9f4661 436 u_int32_t unit = 1;
fe8ab488
A
437
438 TAILQ_FOREACH(kcb_next, &kctl->kcb_head, next) {
5c9f4661 439 if (kcb_next->sac.sc_unit > unit) {
fe8ab488
A
440 /* Found a gap, lets fill it in */
441 break;
442 }
5c9f4661
A
443 unit = kcb_next->sac.sc_unit + 1;
444 if (unit == ctl_maxunit) {
fe8ab488 445 break;
5c9f4661 446 }
fe8ab488
A
447 }
448
2d21ac55
A
449 if (unit == ctl_maxunit) {
450 lck_mtx_unlock(ctl_mtx);
fe8ab488 451 return (EBUSY);
2d21ac55 452 }
fe8ab488 453
2d21ac55 454 sa.sc_unit = unit;
fe8ab488 455 }
55e303ae 456
5c9f4661 457 bcopy(&sa, &kcb->sac, sizeof(struct sockaddr_ctl));
fe8ab488
A
458 kcb->kctl = kctl;
459 if (kcb_next != NULL) {
460 TAILQ_INSERT_BEFORE(kcb_next, kcb, next);
461 } else {
2d21ac55
A
462 TAILQ_INSERT_TAIL(&kctl->kcb_head, kcb, next);
463 }
fe8ab488
A
464 kctlstat.kcs_pcbcount++;
465 kctlstat.kcs_gencnt++;
466 kctlstat.kcs_connections++;
467 lck_mtx_unlock(ctl_mtx);
9bccf70c 468
04b8595b
A
469 /*
470 * rdar://15526688: Limit the send and receive sizes to sb_max
471 * by using the same scaling as sbreserve()
472 */
473 sbmaxsize = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES);
474
5c9f4661 475 if (kctl->sendbufsize > sbmaxsize) {
04b8595b 476 sendbufsize = sbmaxsize;
5c9f4661 477 } else {
04b8595b 478 sendbufsize = kctl->sendbufsize;
5c9f4661 479 }
04b8595b 480
5c9f4661 481 if (kctl->recvbufsize > sbmaxsize) {
04b8595b 482 recvbufsize = sbmaxsize;
5c9f4661 483 } else {
04b8595b 484 recvbufsize = kctl->recvbufsize;
5c9f4661 485 }
04b8595b
A
486
487 error = soreserve(so, sendbufsize, recvbufsize);
fe8ab488 488 if (error) {
39037602
A
489 if (ctl_debug)
490 printf("%s - soreserve(%llx, %u, %u) error %d\n",
5c9f4661
A
491 __func__, (uint64_t)VM_KERNEL_ADDRPERM(so),
492 sendbufsize, recvbufsize, error);
91447636 493 goto done;
fe8ab488 494 }
5c9f4661
A
495
496done:
497 if (error) {
498 soisdisconnected(so);
499 lck_mtx_lock(ctl_mtx);
500 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
501 kcb->kctl = NULL;
502 kcb->sac.sc_unit = 0;
503 kctlstat.kcs_pcbcount--;
504 kctlstat.kcs_gencnt++;
505 kctlstat.kcs_conn_fail++;
506 lck_mtx_unlock(ctl_mtx);
507 }
508 return (error);
509}
510
511static int
512ctl_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
513{
514 int error = 0;
515 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
516
517 if (kcb == NULL) {
518 panic("ctl_bind so_pcb null\n");
519 }
520
521 error = ctl_setup_kctl(so, nam, p);
522 if (error) {
523 return (error);
524 }
525
526 if (kcb->kctl == NULL) {
527 panic("ctl_bind kctl null\n");
528 }
529
530 if (kcb->kctl->bind == NULL) {
531 return (EINVAL);
532 }
fe8ab488 533
91447636 534 socket_unlock(so, 0);
5c9f4661 535 error = (*kcb->kctl->bind)(kcb->kctl->kctlref, &kcb->sac, &kcb->userdata);
91447636 536 socket_lock(so, 0);
fe8ab488 537
5c9f4661
A
538 return (error);
539}
540
541static int
542ctl_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
543{
544 int error = 0;
545 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
546
547 if (kcb == NULL) {
548 panic("ctl_connect so_pcb null\n");
549 }
550
551 error = ctl_setup_kctl(so, nam, p);
552 if (error) {
553 return (error);
554 }
555
556 if (kcb->kctl == NULL) {
557 panic("ctl_connect kctl null\n");
558 }
559
560 soisconnecting(so);
561 socket_unlock(so, 0);
562 error = (*kcb->kctl->connect)(kcb->kctl->kctlref, &kcb->sac, &kcb->userdata);
563 socket_lock(so, 0);
564 if (error) {
565 goto end;
566 }
fe8ab488 567 soisconnected(so);
91447636 568
6d2010ae 569end:
5c9f4661 570 if (error && kcb->kctl->disconnect) {
39037602
A
571 /*
572 * XXX Make sure we Don't check the return value
573 * of disconnect here.
574 * ipsec/utun_ctl_disconnect will return error when
575 * disconnect gets called after connect failure.
576 * However if we decide to check for disconnect return
577 * value here. Please make sure to revisit
578 * ipsec/utun_ctl_disconnect.
579 */
6d2010ae 580 socket_unlock(so, 0);
5c9f4661 581 (*kcb->kctl->disconnect)(kcb->kctl->kctlref, kcb->sac.sc_unit, kcb->userdata);
6d2010ae
A
582 socket_lock(so, 0);
583 }
fe8ab488
A
584 if (error) {
585 soisdisconnected(so);
586 lck_mtx_lock(ctl_mtx);
5c9f4661
A
587 TAILQ_REMOVE(&kcb->kctl->kcb_head, kcb, next);
588 kcb->kctl = NULL;
589 kcb->sac.sc_unit = 0;
fe8ab488
A
590 kctlstat.kcs_pcbcount--;
591 kctlstat.kcs_gencnt++;
592 kctlstat.kcs_conn_fail++;
593 lck_mtx_unlock(ctl_mtx);
594 }
595 return (error);
9bccf70c
A
596}
597
91447636 598static int
9bccf70c
A
599ctl_disconnect(struct socket *so)
600{
fe8ab488
A
601 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
602
603 if ((kcb = (struct ctl_cb *)so->so_pcb)) {
604 struct kctl *kctl = kcb->kctl;
605
606 if (kctl && kctl->disconnect) {
607 socket_unlock(so, 0);
5c9f4661 608 (*kctl->disconnect)(kctl->kctlref, kcb->sac.sc_unit,
3e170ce0 609 kcb->userdata);
fe8ab488
A
610 socket_lock(so, 0);
611 }
612
613 soisdisconnected(so);
614
6d2010ae 615 socket_unlock(so, 0);
fe8ab488
A
616 lck_mtx_lock(ctl_mtx);
617 kcb->kctl = 0;
5c9f4661 618 kcb->sac.sc_unit = 0;
fe8ab488
A
619 while (kcb->usecount != 0) {
620 msleep(&kcb->usecount, ctl_mtx, 0, "kcb->usecount", 0);
621 }
622 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
623 kctlstat.kcs_pcbcount--;
624 kctlstat.kcs_gencnt++;
625 lck_mtx_unlock(ctl_mtx);
6d2010ae 626 socket_lock(so, 0);
fe8ab488
A
627 }
628 return (0);
9bccf70c
A
629}
630
91447636
A
631static int
632ctl_peeraddr(struct socket *so, struct sockaddr **nam)
9bccf70c 633{
91447636
A
634 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
635 struct kctl *kctl;
636 struct sockaddr_ctl sc;
fe8ab488 637
91447636 638 if (kcb == NULL) /* sanity check */
fe8ab488
A
639 return (ENOTCONN);
640
91447636 641 if ((kctl = kcb->kctl) == NULL)
fe8ab488
A
642 return (EINVAL);
643
91447636
A
644 bzero(&sc, sizeof(struct sockaddr_ctl));
645 sc.sc_len = sizeof(struct sockaddr_ctl);
646 sc.sc_family = AF_SYSTEM;
647 sc.ss_sysaddr = AF_SYS_CONTROL;
648 sc.sc_id = kctl->id;
5c9f4661 649 sc.sc_unit = kcb->sac.sc_unit;
fe8ab488 650
91447636 651 *nam = dup_sockaddr((struct sockaddr *)&sc, 1);
fe8ab488
A
652
653 return (0);
654}
655
656static void
657ctl_sbrcv_trim(struct socket *so)
658{
659 struct sockbuf *sb = &so->so_rcv;
660
661 if (sb->sb_hiwat > sb->sb_idealsize) {
662 u_int32_t diff;
663 int32_t trim;
664
665 /*
666 * The difference between the ideal size and the
667 * current size is the upper bound of the trimage
668 */
669 diff = sb->sb_hiwat - sb->sb_idealsize;
670 /*
671 * We cannot trim below the outstanding data
672 */
673 trim = sb->sb_hiwat - sb->sb_cc;
674
675 trim = imin(trim, (int32_t)diff);
676
677 if (trim > 0) {
678 sbreserve(sb, (sb->sb_hiwat - trim));
679
680 if (ctl_debug)
681 printf("%s - shrunk to %d\n",
682 __func__, sb->sb_hiwat);
683 }
684 }
9bccf70c
A
685}
686
39236c6e
A
687static int
688ctl_usr_rcvd(struct socket *so, int flags)
689{
690 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
691 struct kctl *kctl;
692
693 if ((kctl = kcb->kctl) == NULL) {
fe8ab488 694 return (EINVAL);
39236c6e
A
695 }
696
697 if (kctl->rcvd) {
698 socket_unlock(so, 0);
5c9f4661 699 (*kctl->rcvd)(kctl->kctlref, kcb->sac.sc_unit, kcb->userdata, flags);
39236c6e
A
700 socket_lock(so, 0);
701 }
702
fe8ab488
A
703 ctl_sbrcv_trim(so);
704
705 return (0);
39236c6e
A
706}
707
91447636
A
708static int
709ctl_send(struct socket *so, int flags, struct mbuf *m,
fe8ab488
A
710 struct sockaddr *addr, struct mbuf *control,
711 struct proc *p)
9bccf70c 712{
fe8ab488
A
713#pragma unused(addr, p)
714 int error = 0;
91447636 715 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
fe8ab488
A
716 struct kctl *kctl;
717
718 if (control)
719 m_freem(control);
720
91447636 721 if (kcb == NULL) /* sanity check */
6d2010ae 722 error = ENOTCONN;
fe8ab488 723
6d2010ae
A
724 if (error == 0 && (kctl = kcb->kctl) == NULL)
725 error = EINVAL;
fe8ab488 726
6d2010ae 727 if (error == 0 && kctl->send) {
fe8ab488 728 so_tc_update_stats(m, so, m_get_service_class(m));
91447636 729 socket_unlock(so, 0);
5c9f4661 730 error = (*kctl->send)(kctl->kctlref, kcb->sac.sc_unit, kcb->userdata,
3e170ce0 731 m, flags);
91447636 732 socket_lock(so, 0);
6d2010ae
A
733 } else {
734 m_freem(m);
735 if (error == 0)
736 error = ENOTSUP;
91447636 737 }
fe8ab488
A
738 if (error != 0)
739 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_fail);
740 return (error);
741}
742
743static int
744ctl_send_list(struct socket *so, int flags, struct mbuf *m,
745 __unused struct sockaddr *addr, struct mbuf *control,
746 __unused struct proc *p)
747{
748 int error = 0;
749 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
750 struct kctl *kctl;
751
752 if (control)
753 m_freem_list(control);
754
755 if (kcb == NULL) /* sanity check */
756 error = ENOTCONN;
757
758 if (error == 0 && (kctl = kcb->kctl) == NULL)
759 error = EINVAL;
760
761 if (error == 0 && kctl->send_list) {
762 struct mbuf *nxt;
763
764 for (nxt = m; nxt != NULL; nxt = nxt->m_nextpkt)
765 so_tc_update_stats(nxt, so, m_get_service_class(nxt));
766
767 socket_unlock(so, 0);
5c9f4661 768 error = (*kctl->send_list)(kctl->kctlref, kcb->sac.sc_unit,
3e170ce0 769 kcb->userdata, m, flags);
fe8ab488
A
770 socket_lock(so, 0);
771 } else if (error == 0 && kctl->send) {
772 while (m != NULL && error == 0) {
773 struct mbuf *nextpkt = m->m_nextpkt;
774
775 m->m_nextpkt = NULL;
776 so_tc_update_stats(m, so, m_get_service_class(m));
777 socket_unlock(so, 0);
5c9f4661 778 error = (*kctl->send)(kctl->kctlref, kcb->sac.sc_unit,
3e170ce0 779 kcb->userdata, m, flags);
fe8ab488
A
780 socket_lock(so, 0);
781 m = nextpkt;
782 }
783 if (m != NULL)
784 m_freem_list(m);
785 } else {
786 m_freem_list(m);
787 if (error == 0)
788 error = ENOTSUP;
789 }
790 if (error != 0)
791 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_list_fail);
792 return (error);
793}
794
795static errno_t
3e170ce0
A
796ctl_rcvbspace(struct socket *so, u_int32_t datasize,
797 u_int32_t kctlflags, u_int32_t flags)
fe8ab488
A
798{
799 struct sockbuf *sb = &so->so_rcv;
800 u_int32_t space = sbspace(sb);
801 errno_t error;
04b8595b 802
3e170ce0 803 if ((kctlflags & CTL_FLAG_REG_CRIT) == 0) {
fe8ab488
A
804 if ((u_int32_t) space >= datasize)
805 error = 0;
806 else
807 error = ENOBUFS;
808 } else if ((flags & CTL_DATA_CRIT) == 0) {
3e170ce0
A
809 /*
810 * Reserve 25% for critical messages
811 */
812 if (space < (sb->sb_hiwat >> 2) ||
813 space < datasize)
814 error = ENOBUFS;
815 else
816 error = 0;
fe8ab488
A
817 } else {
818 u_int32_t autorcvbuf_max;
819
820 /*
821 * Allow overcommit of 25%
822 */
823 autorcvbuf_max = min(sb->sb_idealsize + (sb->sb_idealsize >> 2),
824 ctl_autorcvbuf_max);
825
826 if ((u_int32_t) space >= datasize) {
827 error = 0;
828 } else if (tcp_cansbgrow(sb) &&
829 sb->sb_hiwat < autorcvbuf_max) {
830 /*
831 * Grow with a little bit of leeway
832 */
833 u_int32_t grow = datasize - space + MSIZE;
834
835 if (sbreserve(sb,
836 min((sb->sb_hiwat + grow), autorcvbuf_max)) == 1) {
837
838 if (sb->sb_hiwat > ctl_autorcvbuf_high)
839 ctl_autorcvbuf_high = sb->sb_hiwat;
840
3e170ce0
A
841 /*
842 * A final check
843 */
844 if ((u_int32_t) sbspace(sb) >= datasize) {
845 error = 0;
846 } else {
847 error = ENOBUFS;
848 }
849
fe8ab488 850 if (ctl_debug)
3e170ce0
A
851 printf("%s - grown to %d error %d\n",
852 __func__, sb->sb_hiwat, error);
fe8ab488
A
853 } else {
854 error = ENOBUFS;
855 }
856 } else {
857 error = ENOBUFS;
858 }
859 }
860 return (error);
9bccf70c
A
861}
862
91447636 863errno_t
3e170ce0
A
864ctl_enqueuembuf(kern_ctl_ref kctlref, u_int32_t unit, struct mbuf *m,
865 u_int32_t flags)
9bccf70c 866{
91447636 867 struct socket *so;
fe8ab488 868 errno_t error = 0;
fe8ab488 869 int len = m->m_pkthdr.len;
3e170ce0 870 u_int32_t kctlflags;
fe8ab488 871
3e170ce0
A
872 so = kcb_find_socket(kctlref, unit, &kctlflags);
873 if (so == NULL) {
fe8ab488 874 return (EINVAL);
3e170ce0 875 }
fe8ab488 876
3e170ce0 877 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
91447636 878 error = ENOBUFS;
fe8ab488 879 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
91447636
A
880 goto bye;
881 }
882 if ((flags & CTL_DATA_EOR))
883 m->m_flags |= M_EOR;
fe8ab488
A
884
885 so_recv_data_stat(so, m, 0);
886 if (sbappend(&so->so_rcv, m) != 0) {
887 if ((flags & CTL_DATA_NOWAKEUP) == 0)
888 sorwakeup(so);
889 } else {
890 error = ENOBUFS;
891 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
892 }
91447636 893bye:
fe8ab488
A
894 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT))
895 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
896 __func__, error, len,
897 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
898
91447636 899 socket_unlock(so, 1);
fe8ab488
A
900 if (error != 0)
901 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
902
903 return (error);
904}
905
906/*
907 * Compute space occupied by mbuf like sbappendrecord
908 */
909static int
910m_space(struct mbuf *m)
911{
912 int space = 0;
913 struct mbuf *nxt;
914
915 for (nxt = m; nxt != NULL; nxt = nxt->m_next)
916 space += nxt->m_len;
917
918 return (space);
919}
920
921errno_t
922ctl_enqueuembuf_list(void *kctlref, u_int32_t unit, struct mbuf *m_list,
923 u_int32_t flags, struct mbuf **m_remain)
924{
925 struct socket *so = NULL;
926 errno_t error = 0;
fe8ab488
A
927 struct mbuf *m, *nextpkt;
928 int needwakeup = 0;
5ba3f43e 929 int len = 0;
3e170ce0 930 u_int32_t kctlflags;
fe8ab488
A
931
932 /*
933 * Need to point the beginning of the list in case of early exit
934 */
935 m = m_list;
936
3e170ce0
A
937 /*
938 * kcb_find_socket takes the socket lock with a reference
939 */
940 so = kcb_find_socket(kctlref, unit, &kctlflags);
941 if (so == NULL) {
fe8ab488
A
942 error = EINVAL;
943 goto done;
944 }
3e170ce0
A
945
946 if (kctlflags & CTL_FLAG_REG_SOCK_STREAM) {
fe8ab488
A
947 error = EOPNOTSUPP;
948 goto done;
949 }
950 if (flags & CTL_DATA_EOR) {
951 error = EINVAL;
952 goto done;
953 }
fe8ab488
A
954
955 for (m = m_list; m != NULL; m = nextpkt) {
956 nextpkt = m->m_nextpkt;
957
39037602 958 if (m->m_pkthdr.len == 0 && ctl_debug)
fe8ab488
A
959 printf("%s: %llx m_pkthdr.len is 0",
960 __func__, (uint64_t)VM_KERNEL_ADDRPERM(m));
961
962 /*
963 * The mbuf is either appended or freed by sbappendrecord()
964 * so it's not reliable from a data standpoint
965 */
966 len = m_space(m);
3e170ce0 967 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
fe8ab488
A
968 error = ENOBUFS;
969 OSIncrementAtomic64(
970 (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
971 break;
972 } else {
973 /*
974 * Unlink from the list, m is on its own
975 */
976 m->m_nextpkt = NULL;
977 so_recv_data_stat(so, m, 0);
978 if (sbappendrecord(&so->so_rcv, m) != 0) {
979 needwakeup = 1;
980 } else {
981 /*
982 * We free or return the remaining
983 * mbufs in the list
984 */
985 m = nextpkt;
986 error = ENOBUFS;
987 OSIncrementAtomic64(
988 (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
989 break;
990 }
991 }
992 }
993 if (needwakeup && (flags & CTL_DATA_NOWAKEUP) == 0)
994 sorwakeup(so);
995
996done:
997 if (so != NULL) {
998 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT))
999 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1000 __func__, error, len,
1001 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1002
1003 socket_unlock(so, 1);
1004 }
1005 if (m_remain) {
1006 *m_remain = m;
1007
1008 if (m != NULL && socket_debug && so != NULL &&
1009 (so->so_options & SO_DEBUG)) {
1010 struct mbuf *n;
1011
1012 printf("%s m_list %llx\n", __func__,
1013 (uint64_t) VM_KERNEL_ADDRPERM(m_list));
1014 for (n = m; n != NULL; n = n->m_nextpkt)
1015 printf(" remain %llx m_next %llx\n",
1016 (uint64_t) VM_KERNEL_ADDRPERM(n),
1017 (uint64_t) VM_KERNEL_ADDRPERM(n->m_next));
1018 }
1019 } else {
1020 if (m != NULL)
1021 m_freem_list(m);
1022 }
1023 if (error != 0)
1024 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
1025 return (error);
91447636 1026}
9bccf70c 1027
91447636 1028errno_t
fe8ab488
A
1029ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len,
1030 u_int32_t flags)
91447636 1031{
91447636
A
1032 struct socket *so;
1033 struct mbuf *m;
fe8ab488 1034 errno_t error = 0;
91447636
A
1035 unsigned int num_needed;
1036 struct mbuf *n;
fe8ab488 1037 size_t curlen = 0;
3e170ce0 1038 u_int32_t kctlflags;
fe8ab488 1039
3e170ce0
A
1040 so = kcb_find_socket(kctlref, unit, &kctlflags);
1041 if (so == NULL) {
fe8ab488 1042 return (EINVAL);
3e170ce0 1043 }
fe8ab488 1044
3e170ce0 1045 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
91447636 1046 error = ENOBUFS;
fe8ab488 1047 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
91447636
A
1048 goto bye;
1049 }
1050
1051 num_needed = 1;
1052 m = m_allocpacket_internal(&num_needed, len, NULL, M_NOWAIT, 1, 0);
1053 if (m == NULL) {
39037602
A
1054 kctlstat.kcs_enqdata_mb_alloc_fail++;
1055 if (ctl_debug)
1056 printf("%s: m_allocpacket_internal(%lu) failed\n",
1057 __func__, len);
fe8ab488 1058 error = ENOMEM;
91447636
A
1059 goto bye;
1060 }
fe8ab488 1061
91447636
A
1062 for (n = m; n != NULL; n = n->m_next) {
1063 size_t mlen = mbuf_maxlen(n);
fe8ab488 1064
91447636
A
1065 if (mlen + curlen > len)
1066 mlen = len - curlen;
1067 n->m_len = mlen;
1068 bcopy((char *)data + curlen, n->m_data, mlen);
1069 curlen += mlen;
1070 }
1071 mbuf_pkthdr_setlen(m, curlen);
1072
1073 if ((flags & CTL_DATA_EOR))
1074 m->m_flags |= M_EOR;
fe8ab488
A
1075 so_recv_data_stat(so, m, 0);
1076 if (sbappend(&so->so_rcv, m) != 0) {
1077 if ((flags & CTL_DATA_NOWAKEUP) == 0)
1078 sorwakeup(so);
1079 } else {
39037602 1080 kctlstat.kcs_enqdata_sbappend_fail++;
fe8ab488
A
1081 error = ENOBUFS;
1082 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1083 }
1084
91447636 1085bye:
fe8ab488
A
1086 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT))
1087 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1088 __func__, error, (int)len,
1089 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1090
91447636 1091 socket_unlock(so, 1);
fe8ab488
A
1092 if (error != 0)
1093 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
1094 return (error);
91447636 1095}
9bccf70c 1096
3e170ce0
A
1097errno_t
1098ctl_getenqueuepacketcount(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *pcnt)
1099{
1100 struct socket *so;
1101 u_int32_t cnt;
1102 struct mbuf *m1;
1103
1104 if (pcnt == NULL)
1105 return (EINVAL);
1106
1107 so = kcb_find_socket(kctlref, unit, NULL);
1108 if (so == NULL) {
1109 return (EINVAL);
1110 }
1111
1112 cnt = 0;
1113 m1 = so->so_rcv.sb_mb;
1114 while (m1 != NULL) {
1115 if (m1->m_type == MT_DATA ||
1116 m1->m_type == MT_HEADER ||
1117 m1->m_type == MT_OOBDATA)
1118 cnt += 1;
1119 m1 = m1->m_nextpkt;
1120 }
1121 *pcnt = cnt;
1122
1123 socket_unlock(so, 1);
1124
1125 return (0);
1126}
55e303ae 1127
fe8ab488 1128errno_t
91447636
A
1129ctl_getenqueuespace(kern_ctl_ref kctlref, u_int32_t unit, size_t *space)
1130{
91447636 1131 struct socket *so;
2d21ac55 1132 long avail;
fe8ab488 1133
3e170ce0 1134 if (space == NULL)
fe8ab488
A
1135 return (EINVAL);
1136
3e170ce0
A
1137 so = kcb_find_socket(kctlref, unit, NULL);
1138 if (so == NULL) {
fe8ab488 1139 return (EINVAL);
3e170ce0 1140 }
fe8ab488 1141
2d21ac55
A
1142 avail = sbspace(&so->so_rcv);
1143 *space = (avail < 0) ? 0 : avail;
91447636 1144 socket_unlock(so, 1);
fe8ab488
A
1145
1146 return (0);
1147}
1148
1149errno_t
1150ctl_getenqueuereadable(kern_ctl_ref kctlref, u_int32_t unit,
1151 u_int32_t *difference)
1152{
fe8ab488
A
1153 struct socket *so;
1154
3e170ce0 1155 if (difference == NULL)
fe8ab488
A
1156 return (EINVAL);
1157
3e170ce0
A
1158 so = kcb_find_socket(kctlref, unit, NULL);
1159 if (so == NULL) {
fe8ab488 1160 return (EINVAL);
3e170ce0 1161 }
fe8ab488
A
1162
1163 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat) {
1164 *difference = 0;
1165 } else {
1166 *difference = (so->so_rcv.sb_lowat - so->so_rcv.sb_cc);
1167 }
1168 socket_unlock(so, 1);
1169
1170 return (0);
9bccf70c
A
1171}
1172
91447636 1173static int
9bccf70c
A
1174ctl_ctloutput(struct socket *so, struct sockopt *sopt)
1175{
91447636
A
1176 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
1177 struct kctl *kctl;
1178 int error = 0;
5c9f4661 1179 void *data = NULL;
91447636 1180 size_t len;
fe8ab488 1181
91447636 1182 if (sopt->sopt_level != SYSPROTO_CONTROL) {
fe8ab488 1183 return (EINVAL);
91447636 1184 }
fe8ab488 1185
91447636 1186 if (kcb == NULL) /* sanity check */
fe8ab488
A
1187 return (ENOTCONN);
1188
91447636 1189 if ((kctl = kcb->kctl) == NULL)
fe8ab488
A
1190 return (EINVAL);
1191
91447636
A
1192 switch (sopt->sopt_dir) {
1193 case SOPT_SET:
1194 if (kctl->setopt == NULL)
fe8ab488 1195 return (ENOTSUP);
5c9f4661 1196 if (sopt->sopt_valsize != 0) {
fe8ab488 1197 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP,
5c9f4661 1198 M_WAITOK | M_ZERO);
2d21ac55 1199 if (data == NULL)
fe8ab488
A
1200 return (ENOMEM);
1201 error = sooptcopyin(sopt, data,
3e170ce0 1202 sopt->sopt_valsize, sopt->sopt_valsize);
2d21ac55 1203 }
91447636
A
1204 if (error == 0) {
1205 socket_unlock(so, 0);
3e170ce0 1206 error = (*kctl->setopt)(kctl->kctlref,
5c9f4661 1207 kcb->sac.sc_unit, kcb->userdata, sopt->sopt_name,
3e170ce0 1208 data, sopt->sopt_valsize);
91447636
A
1209 socket_lock(so, 0);
1210 }
5c9f4661
A
1211
1212 if (data != NULL)
1213 FREE(data, M_TEMP);
91447636 1214 break;
fe8ab488 1215
91447636
A
1216 case SOPT_GET:
1217 if (kctl->getopt == NULL)
fe8ab488 1218 return (ENOTSUP);
5c9f4661 1219
91447636 1220 if (sopt->sopt_valsize && sopt->sopt_val) {
fe8ab488 1221 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP,
5c9f4661 1222 M_WAITOK | M_ZERO);
91447636 1223 if (data == NULL)
fe8ab488
A
1224 return (ENOMEM);
1225 /*
1226 * 4108337 - copy user data in case the
1227 * kernel control needs it
1228 */
1229 error = sooptcopyin(sopt, data,
1230 sopt->sopt_valsize, sopt->sopt_valsize);
91447636 1231 }
5c9f4661 1232
91447636 1233 if (error == 0) {
5c9f4661
A
1234 len = sopt->sopt_valsize;
1235 socket_unlock(so, 0);
1236 error = (*kctl->getopt)(kctl->kctlref, kcb->sac.sc_unit,
1237 kcb->userdata, sopt->sopt_name,
1238 data, &len);
1239 if (data != NULL && len > sopt->sopt_valsize)
1240 panic_plain("ctl_ctloutput: ctl %s returned "
1241 "len (%lu) > sopt_valsize (%lu)\n",
1242 kcb->kctl->name, len,
1243 sopt->sopt_valsize);
1244 socket_lock(so, 0);
1245 if (error == 0) {
1246 if (data != NULL)
1247 error = sooptcopyout(sopt, data, len);
1248 else
1249 sopt->sopt_valsize = len;
1250 }
91447636
A
1251 }
1252 if (data != NULL)
fe8ab488 1253 FREE(data, M_TEMP);
91447636
A
1254 break;
1255 }
fe8ab488 1256 return (error);
91447636 1257}
9bccf70c 1258
fe8ab488
A
1259static int
1260ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
1261 struct ifnet *ifp, struct proc *p)
91447636 1262{
fe8ab488 1263#pragma unused(so, ifp, p)
91447636 1264 int error = ENOTSUP;
fe8ab488 1265
91447636
A
1266 switch (cmd) {
1267 /* get the number of controllers */
1268 case CTLIOCGCOUNT: {
1269 struct kctl *kctl;
316670eb 1270 u_int32_t n = 0;
91447636
A
1271
1272 lck_mtx_lock(ctl_mtx);
1273 TAILQ_FOREACH(kctl, &ctl_head, next)
1274 n++;
1275 lck_mtx_unlock(ctl_mtx);
fe8ab488 1276
316670eb 1277 bcopy(&n, data, sizeof (n));
91447636
A
1278 error = 0;
1279 break;
1280 }
1281 case CTLIOCGINFO: {
316670eb 1282 struct ctl_info ctl_info;
91447636 1283 struct kctl *kctl = 0;
316670eb
A
1284 size_t name_len;
1285
1286 bcopy(data, &ctl_info, sizeof (ctl_info));
1287 name_len = strnlen(ctl_info.ctl_name, MAX_KCTL_NAME);
1288
91447636
A
1289 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
1290 error = EINVAL;
1291 break;
1292 }
1293 lck_mtx_lock(ctl_mtx);
316670eb 1294 kctl = ctl_find_by_name(ctl_info.ctl_name);
91447636
A
1295 lck_mtx_unlock(ctl_mtx);
1296 if (kctl == 0) {
1297 error = ENOENT;
1298 break;
1299 }
316670eb
A
1300 ctl_info.ctl_id = kctl->id;
1301 bcopy(&ctl_info, data, sizeof (ctl_info));
91447636
A
1302 error = 0;
1303 break;
1304 }
fe8ab488 1305
91447636 1306 /* add controls to get list of NKEs */
fe8ab488 1307
91447636 1308 }
fe8ab488
A
1309
1310 return (error);
91447636 1311}
9bccf70c 1312
3e170ce0
A
1313static void
1314kctl_tbl_grow()
1315{
1316 struct kctl **new_table;
1317 uintptr_t new_size;
1318
1319 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1320
39037602 1321 if (kctl_tbl_growing) {
3e170ce0 1322 /* Another thread is allocating */
39037602
A
1323 kctl_tbl_growing_waiting++;
1324
1325 do {
1326 (void) msleep((caddr_t) &kctl_tbl_growing, ctl_mtx,
1327 PSOCK | PCATCH, "kctl_tbl_growing", 0);
1328 } while (kctl_tbl_growing);
1329 kctl_tbl_growing_waiting--;
3e170ce0
A
1330 }
1331 /* Another thread grew the table */
1332 if (kctl_table != NULL && kctl_tbl_count < kctl_tbl_size)
1333 return;
1334
1335 /* Verify we have a sane size */
1336 if (kctl_tbl_size + KCTL_TBL_INC >= UINT16_MAX) {
39037602
A
1337 kctlstat.kcs_tbl_size_too_big++;
1338 if (ctl_debug)
1339 printf("%s kctl_tbl_size %lu too big\n",
1340 __func__, kctl_tbl_size);
3e170ce0
A
1341 return;
1342 }
1343 kctl_tbl_growing = 1;
1344
1345 new_size = kctl_tbl_size + KCTL_TBL_INC;
1346
1347 lck_mtx_unlock(ctl_mtx);
1348 new_table = _MALLOC(sizeof(struct kctl *) * new_size,
1349 M_TEMP, M_WAIT | M_ZERO);
1350 lck_mtx_lock(ctl_mtx);
1351
1352 if (new_table != NULL) {
1353 if (kctl_table != NULL) {
1354 bcopy(kctl_table, new_table,
1355 kctl_tbl_size * sizeof(struct kctl *));
1356
1357 _FREE(kctl_table, M_TEMP);
1358 }
1359 kctl_table = new_table;
1360 kctl_tbl_size = new_size;
1361 }
1362
1363 kctl_tbl_growing = 0;
39037602
A
1364
1365 if (kctl_tbl_growing_waiting) {
1366 wakeup(&kctl_tbl_growing);
1367 }
3e170ce0
A
1368}
1369
1370#define KCTLREF_INDEX_MASK 0x0000FFFF
1371#define KCTLREF_GENCNT_MASK 0xFFFF0000
1372#define KCTLREF_GENCNT_SHIFT 16
1373
1374static kern_ctl_ref
1375kctl_make_ref(struct kctl *kctl)
1376{
1377 uintptr_t i;
1378
1379 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1380
1381 if (kctl_tbl_count >= kctl_tbl_size)
1382 kctl_tbl_grow();
1383
1384 kctl->kctlref = NULL;
1385 for (i = 0; i < kctl_tbl_size; i++) {
1386 if (kctl_table[i] == NULL) {
1387 uintptr_t ref;
1388
1389 /*
1390 * Reference is index plus one
1391 */
1392 kctl_ref_gencnt += 1;
1393
1394 /*
1395 * Add generation count as salt to reference to prevent
1396 * use after deregister
1397 */
1398 ref = ((kctl_ref_gencnt << KCTLREF_GENCNT_SHIFT) &
1399 KCTLREF_GENCNT_MASK) +
1400 ((i + 1) & KCTLREF_INDEX_MASK);
1401
1402 kctl->kctlref = (void *)(ref);
1403 kctl_table[i] = kctl;
1404 kctl_tbl_count++;
1405 break;
1406 }
1407 }
1408
1409 if (kctl->kctlref == NULL)
1410 panic("%s no space in table", __func__);
1411
1412 if (ctl_debug > 0)
1413 printf("%s %p for %p\n",
1414 __func__, kctl->kctlref, kctl);
1415
1416 return (kctl->kctlref);
1417}
1418
1419static void
1420kctl_delete_ref(kern_ctl_ref kctlref)
1421{
1422 /*
1423 * Reference is index plus one
1424 */
1425 uintptr_t i = (((uintptr_t)kctlref) & KCTLREF_INDEX_MASK) - 1;
1426
1427 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1428
1429 if (i < kctl_tbl_size) {
1430 struct kctl *kctl = kctl_table[i];
1431
1432 if (kctl->kctlref == kctlref) {
1433 kctl_table[i] = NULL;
1434 kctl_tbl_count--;
1435 } else {
1436 kctlstat.kcs_bad_kctlref++;
1437 }
1438 } else {
1439 kctlstat.kcs_bad_kctlref++;
1440 }
1441}
1442
1443static struct kctl *
1444kctl_from_ref(kern_ctl_ref kctlref)
1445{
1446 /*
1447 * Reference is index plus one
1448 */
1449 uintptr_t i = (((uintptr_t)kctlref) & KCTLREF_INDEX_MASK) - 1;
1450 struct kctl *kctl = NULL;
1451
1452 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1453
1454 if (i >= kctl_tbl_size) {
1455 kctlstat.kcs_bad_kctlref++;
1456 return (NULL);
1457 }
1458 kctl = kctl_table[i];
1459 if (kctl->kctlref != kctlref) {
1460 kctlstat.kcs_bad_kctlref++;
1461 return (NULL);
1462 }
1463 return (kctl);
1464}
1465
91447636
A
1466/*
1467 * Register/unregister a NKE
1468 */
1469errno_t
1470ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref)
2d21ac55
A
1471{
1472 struct kctl *kctl = NULL;
1473 struct kctl *kctl_next = NULL;
04b8595b
A
1474 u_int32_t id = 1;
1475 size_t name_len;
1476 int is_extended = 0;
fe8ab488 1477
91447636 1478 if (userkctl == NULL) /* sanity check */
fe8ab488 1479 return (EINVAL);
91447636 1480 if (userkctl->ctl_connect == NULL)
fe8ab488 1481 return (EINVAL);
91447636
A
1482 name_len = strlen(userkctl->ctl_name);
1483 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME)
fe8ab488
A
1484 return (EINVAL);
1485
91447636
A
1486 MALLOC(kctl, struct kctl *, sizeof(*kctl), M_TEMP, M_WAITOK);
1487 if (kctl == NULL)
fe8ab488 1488 return (ENOMEM);
91447636 1489 bzero((char *)kctl, sizeof(*kctl));
fe8ab488 1490
91447636 1491 lck_mtx_lock(ctl_mtx);
fe8ab488 1492
3e170ce0
A
1493 if (kctl_make_ref(kctl) == NULL) {
1494 lck_mtx_unlock(ctl_mtx);
1495 FREE(kctl, M_TEMP);
1496 return (ENOMEM);
1497 }
1498
2d21ac55
A
1499 /*
1500 * Kernel Control IDs
1501 *
1502 * CTL_FLAG_REG_ID_UNIT indicates the control ID and unit number are
1503 * static. If they do not exist, add them to the list in order. If the
1504 * flag is not set, we must find a new unique value. We assume the
1505 * list is in order. We find the last item in the list and add one. If
1506 * this leads to wrapping the id around, we start at the front of the
1507 * list and look for a gap.
1508 */
fe8ab488 1509
2d21ac55
A
1510 if ((userkctl->ctl_flags & CTL_FLAG_REG_ID_UNIT) == 0) {
1511 /* Must dynamically assign an unused ID */
fe8ab488 1512
2d21ac55 1513 /* Verify the same name isn't already registered */
91447636 1514 if (ctl_find_by_name(userkctl->ctl_name) != NULL) {
3e170ce0 1515 kctl_delete_ref(kctl->kctlref);
91447636
A
1516 lck_mtx_unlock(ctl_mtx);
1517 FREE(kctl, M_TEMP);
fe8ab488 1518 return (EEXIST);
91447636 1519 }
fe8ab488 1520
2d21ac55
A
1521 /* Start with 1 in case the list is empty */
1522 id = 1;
1523 kctl_next = TAILQ_LAST(&ctl_head, kctl_list);
fe8ab488 1524
2d21ac55 1525 if (kctl_next != NULL) {
fe8ab488 1526 /* List was not empty, add one to the last item */
2d21ac55
A
1527 id = kctl_next->id + 1;
1528 kctl_next = NULL;
fe8ab488 1529
2d21ac55 1530 /*
fe8ab488
A
1531 * If this wrapped the id number, start looking at
1532 * the front of the list for an unused id.
2d21ac55 1533 */
91447636 1534 if (id == 0) {
2d21ac55
A
1535 /* Find the next unused ID */
1536 id = 1;
fe8ab488 1537
2d21ac55
A
1538 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
1539 if (kctl_next->id > id) {
1540 /* We found a gap */
1541 break;
1542 }
fe8ab488 1543
2d21ac55
A
1544 id = kctl_next->id + 1;
1545 }
91447636 1546 }
91447636 1547 }
fe8ab488 1548
2d21ac55 1549 userkctl->ctl_id = id;
91447636
A
1550 kctl->id = id;
1551 kctl->reg_unit = -1;
1552 } else {
2d21ac55
A
1553 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
1554 if (kctl_next->id > userkctl->ctl_id)
1555 break;
1556 }
fe8ab488
A
1557
1558 if (ctl_find_by_id_unit(userkctl->ctl_id, userkctl->ctl_unit)) {
3e170ce0 1559 kctl_delete_ref(kctl->kctlref);
91447636
A
1560 lck_mtx_unlock(ctl_mtx);
1561 FREE(kctl, M_TEMP);
fe8ab488 1562 return (EEXIST);
91447636
A
1563 }
1564 kctl->id = userkctl->ctl_id;
1565 kctl->reg_unit = userkctl->ctl_unit;
1566 }
39236c6e
A
1567
1568 is_extended = (userkctl->ctl_flags & CTL_FLAG_REG_EXTENDED);
1569
2d21ac55 1570 strlcpy(kctl->name, userkctl->ctl_name, MAX_KCTL_NAME);
91447636
A
1571 kctl->flags = userkctl->ctl_flags;
1572
fe8ab488
A
1573 /*
1574 * Let the caller know the default send and receive sizes
fe8ab488 1575 */
04b8595b 1576 if (userkctl->ctl_sendsize == 0) {
fe8ab488 1577 kctl->sendbufsize = CTL_SENDSIZE;
04b8595b
A
1578 userkctl->ctl_sendsize = kctl->sendbufsize;
1579 } else {
1580 kctl->sendbufsize = userkctl->ctl_sendsize;
1581 }
1582 if (userkctl->ctl_recvsize == 0) {
fe8ab488 1583 kctl->recvbufsize = CTL_RECVSIZE;
04b8595b
A
1584 userkctl->ctl_recvsize = kctl->recvbufsize;
1585 } else {
1586 kctl->recvbufsize = userkctl->ctl_recvsize;
1587 }
91447636 1588
5c9f4661 1589 kctl->bind = userkctl->ctl_bind;
91447636
A
1590 kctl->connect = userkctl->ctl_connect;
1591 kctl->disconnect = userkctl->ctl_disconnect;
1592 kctl->send = userkctl->ctl_send;
1593 kctl->setopt = userkctl->ctl_setopt;
1594 kctl->getopt = userkctl->ctl_getopt;
39236c6e
A
1595 if (is_extended) {
1596 kctl->rcvd = userkctl->ctl_rcvd;
fe8ab488 1597 kctl->send_list = userkctl->ctl_send_list;
39236c6e 1598 }
fe8ab488 1599
91447636 1600 TAILQ_INIT(&kctl->kcb_head);
fe8ab488 1601
2d21ac55
A
1602 if (kctl_next)
1603 TAILQ_INSERT_BEFORE(kctl_next, kctl, next);
1604 else
1605 TAILQ_INSERT_TAIL(&ctl_head, kctl, next);
fe8ab488
A
1606
1607 kctlstat.kcs_reg_count++;
1608 kctlstat.kcs_gencnt++;
1609
91447636 1610 lck_mtx_unlock(ctl_mtx);
fe8ab488 1611
3e170ce0 1612 *kctlref = kctl->kctlref;
fe8ab488 1613
91447636 1614 ctl_post_msg(KEV_CTL_REGISTERED, kctl->id);
fe8ab488 1615 return (0);
9bccf70c
A
1616}
1617
91447636
A
1618errno_t
1619ctl_deregister(void *kctlref)
fe8ab488
A
1620{
1621 struct kctl *kctl;
1622
fe8ab488 1623 lck_mtx_lock(ctl_mtx);
3e170ce0
A
1624 if ((kctl = kctl_from_ref(kctlref)) == NULL) {
1625 kctlstat.kcs_bad_kctlref++;
fe8ab488 1626 lck_mtx_unlock(ctl_mtx);
3e170ce0
A
1627 if (ctl_debug != 0)
1628 printf("%s invalid kctlref %p\n",
1629 __func__, kctlref);
fe8ab488
A
1630 return (EINVAL);
1631 }
3e170ce0 1632
91447636 1633 if (!TAILQ_EMPTY(&kctl->kcb_head)) {
fe8ab488
A
1634 lck_mtx_unlock(ctl_mtx);
1635 return (EBUSY);
91447636
A
1636 }
1637
fe8ab488
A
1638 TAILQ_REMOVE(&ctl_head, kctl, next);
1639
1640 kctlstat.kcs_reg_count--;
1641 kctlstat.kcs_gencnt++;
91447636 1642
3e170ce0 1643 kctl_delete_ref(kctl->kctlref);
fe8ab488
A
1644 lck_mtx_unlock(ctl_mtx);
1645
1646 ctl_post_msg(KEV_CTL_DEREGISTERED, kctl->id);
1647 FREE(kctl, M_TEMP);
1648 return (0);
9bccf70c
A
1649}
1650
91447636
A
1651/*
1652 * Must be called with global ctl_mtx lock taked
1653 */
1654static struct kctl *
1655ctl_find_by_name(const char *name)
fe8ab488
A
1656{
1657 struct kctl *kctl;
1658
1659 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
9bccf70c 1660
fe8ab488
A
1661 TAILQ_FOREACH(kctl, &ctl_head, next)
1662 if (strncmp(kctl->name, name, sizeof(kctl->name)) == 0)
1663 return (kctl);
9bccf70c 1664
fe8ab488 1665 return (NULL);
91447636 1666}
9bccf70c 1667
6d2010ae
A
1668u_int32_t
1669ctl_id_by_name(const char *name)
1670{
1671 u_int32_t ctl_id = 0;
fe8ab488
A
1672 struct kctl *kctl;
1673
6d2010ae 1674 lck_mtx_lock(ctl_mtx);
fe8ab488
A
1675 kctl = ctl_find_by_name(name);
1676 if (kctl)
1677 ctl_id = kctl->id;
6d2010ae 1678 lck_mtx_unlock(ctl_mtx);
fe8ab488
A
1679
1680 return (ctl_id);
6d2010ae
A
1681}
1682
1683errno_t
fe8ab488 1684ctl_name_by_id(u_int32_t id, char *out_name, size_t maxsize)
6d2010ae
A
1685{
1686 int found = 0;
6d2010ae 1687 struct kctl *kctl;
fe8ab488
A
1688
1689 lck_mtx_lock(ctl_mtx);
1690 TAILQ_FOREACH(kctl, &ctl_head, next) {
1691 if (kctl->id == id)
1692 break;
1693 }
1694
3e170ce0 1695 if (kctl) {
fe8ab488
A
1696 if (maxsize > MAX_KCTL_NAME)
1697 maxsize = MAX_KCTL_NAME;
1698 strlcpy(out_name, kctl->name, maxsize);
1699 found = 1;
1700 }
6d2010ae 1701 lck_mtx_unlock(ctl_mtx);
fe8ab488
A
1702
1703 return (found ? 0 : ENOENT);
6d2010ae
A
1704}
1705
91447636
A
1706/*
1707 * Must be called with global ctl_mtx lock taked
1708 *
1709 */
1710static struct kctl *
1711ctl_find_by_id_unit(u_int32_t id, u_int32_t unit)
fe8ab488
A
1712{
1713 struct kctl *kctl;
1714
1715 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1716
1717 TAILQ_FOREACH(kctl, &ctl_head, next) {
1718 if (kctl->id == id && (kctl->flags & CTL_FLAG_REG_ID_UNIT) == 0)
1719 return (kctl);
1720 else if (kctl->id == id && kctl->reg_unit == unit)
1721 return (kctl);
1722 }
1723 return (NULL);
9bccf70c
A
1724}
1725
1726/*
91447636 1727 * Must be called with kernel controller lock taken
9bccf70c 1728 */
91447636
A
1729static struct ctl_cb *
1730kcb_find(struct kctl *kctl, u_int32_t unit)
fe8ab488
A
1731{
1732 struct ctl_cb *kcb;
9bccf70c 1733
fe8ab488 1734 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
9bccf70c 1735
fe8ab488 1736 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
5c9f4661 1737 if (kcb->sac.sc_unit == unit)
fe8ab488
A
1738 return (kcb);
1739
1740 return (NULL);
9bccf70c
A
1741}
1742
6d2010ae 1743static struct socket *
3e170ce0 1744kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *kctlflags)
6d2010ae
A
1745{
1746 struct socket *so = NULL;
fe8ab488
A
1747 struct ctl_cb *kcb;
1748 void *lr_saved;
3e170ce0
A
1749 struct kctl *kctl;
1750 int i;
fe8ab488
A
1751
1752 lr_saved = __builtin_return_address(0);
1753
6d2010ae 1754 lck_mtx_lock(ctl_mtx);
3e170ce0
A
1755 /*
1756 * First validate the kctlref
1757 */
1758 if ((kctl = kctl_from_ref(kctlref)) == NULL) {
1759 kctlstat.kcs_bad_kctlref++;
1760 lck_mtx_unlock(ctl_mtx);
1761 if (ctl_debug != 0)
1762 printf("%s invalid kctlref %p\n",
1763 __func__, kctlref);
1764 return (NULL);
6d2010ae 1765 }
fe8ab488 1766
3e170ce0
A
1767 kcb = kcb_find(kctl, unit);
1768 if (kcb == NULL || kcb->kctl != kctl || (so = kcb->so) == NULL) {
1769 lck_mtx_unlock(ctl_mtx);
fe8ab488 1770 return (NULL);
6d2010ae 1771 }
3e170ce0
A
1772 /*
1773 * This prevents the socket from being closed
1774 */
1775 kcb->usecount++;
1776 /*
1777 * Respect lock ordering: socket before ctl_mtx
1778 */
1779 lck_mtx_unlock(ctl_mtx);
fe8ab488 1780
6d2010ae 1781 socket_lock(so, 1);
3e170ce0
A
1782 /*
1783 * The socket lock history is more useful if we store
1784 * the address of the caller.
1785 */
1786 i = (so->next_lock_lr + SO_LCKDBG_MAX - 1) % SO_LCKDBG_MAX;
1787 so->lock_lr[i] = lr_saved;
fe8ab488 1788
6d2010ae 1789 lck_mtx_lock(ctl_mtx);
3e170ce0
A
1790
1791 if ((kctl = kctl_from_ref(kctlref)) == NULL || kcb->kctl == NULL) {
6d2010ae
A
1792 lck_mtx_unlock(ctl_mtx);
1793 socket_unlock(so, 1);
1794 so = NULL;
1795 lck_mtx_lock(ctl_mtx);
3e170ce0
A
1796 } else if (kctlflags != NULL) {
1797 *kctlflags = kctl->flags;
6d2010ae 1798 }
3e170ce0 1799
6d2010ae
A
1800 kcb->usecount--;
1801 if (kcb->usecount == 0)
1802 wakeup((event_t)&kcb->usecount);
3e170ce0 1803
6d2010ae 1804 lck_mtx_unlock(ctl_mtx);
fe8ab488
A
1805
1806 return (so);
6d2010ae
A
1807}
1808
fe8ab488
A
1809static void
1810ctl_post_msg(u_int32_t event_code, u_int32_t id)
9bccf70c 1811{
fe8ab488
A
1812 struct ctl_event_data ctl_ev_data;
1813 struct kev_msg ev_msg;
1814
1815 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_NOTOWNED);
1816
1817 bzero(&ev_msg, sizeof(struct kev_msg));
1818 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1819
1820 ev_msg.kev_class = KEV_SYSTEM_CLASS;
1821 ev_msg.kev_subclass = KEV_CTL_SUBCLASS;
1822 ev_msg.event_code = event_code;
1823
1824 /* common nke subclass data */
1825 bzero(&ctl_ev_data, sizeof(ctl_ev_data));
1826 ctl_ev_data.ctl_id = id;
1827 ev_msg.dv[0].data_ptr = &ctl_ev_data;
1828 ev_msg.dv[0].data_length = sizeof(ctl_ev_data);
1829
1830 ev_msg.dv[1].data_length = 0;
1831
1832 kev_post_msg(&ev_msg);
9bccf70c
A
1833}
1834
91447636 1835static int
b0d623f7
A
1836ctl_lock(struct socket *so, int refcount, void *lr)
1837{
1838 void *lr_saved;
1839
1840 if (lr == NULL)
1841 lr_saved = __builtin_return_address(0);
1842 else
1843 lr_saved = lr;
1844
1845 if (so->so_pcb != NULL) {
91447636
A
1846 lck_mtx_lock(((struct ctl_cb *)so->so_pcb)->mtx);
1847 } else {
fe8ab488 1848 panic("ctl_lock: so=%p NO PCB! lr=%p lrh= %s\n",
b0d623f7
A
1849 so, lr_saved, solockhistory_nr(so));
1850 /* NOTREACHED */
91447636 1851 }
b0d623f7
A
1852
1853 if (so->so_usecount < 0) {
1854 panic("ctl_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s\n",
fe8ab488
A
1855 so, so->so_pcb, lr_saved, so->so_usecount,
1856 solockhistory_nr(so));
b0d623f7
A
1857 /* NOTREACHED */
1858 }
1859
91447636
A
1860 if (refcount)
1861 so->so_usecount++;
0c530ab8 1862
2d21ac55 1863 so->lock_lr[so->next_lock_lr] = lr_saved;
0c530ab8 1864 so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX;
91447636
A
1865 return (0);
1866}
1867
1868static int
b0d623f7 1869ctl_unlock(struct socket *so, int refcount, void *lr)
91447636 1870{
b0d623f7
A
1871 void *lr_saved;
1872 lck_mtx_t *mutex_held;
1873
1874 if (lr == NULL)
1875 lr_saved = __builtin_return_address(0);
1876 else
1877 lr_saved = lr;
1878
39037602 1879#if (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG))
fe8ab488
A
1880 printf("ctl_unlock: so=%llx sopcb=%x lock=%llx ref=%u lr=%llx\n",
1881 (uint64_t)VM_KERNEL_ADDRPERM(so),
1882 (uint64_t)VM_KERNEL_ADDRPERM(so->so_pcb,
1883 (uint64_t)VM_KERNEL_ADDRPERM(((struct ctl_cb *)so->so_pcb)->mtx),
1884 so->so_usecount, (uint64_t)VM_KERNEL_ADDRPERM(lr_saved));
39037602 1885#endif /* (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG)) */
91447636
A
1886 if (refcount)
1887 so->so_usecount--;
b0d623f7
A
1888
1889 if (so->so_usecount < 0) {
fe8ab488 1890 panic("ctl_unlock: so=%p usecount=%x lrh= %s\n",
b0d623f7
A
1891 so, so->so_usecount, solockhistory_nr(so));
1892 /* NOTREACHED */
1893 }
91447636 1894 if (so->so_pcb == NULL) {
fe8ab488
A
1895 panic("ctl_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s\n",
1896 so, so->so_usecount, (void *)lr_saved,
1897 solockhistory_nr(so));
b0d623f7 1898 /* NOTREACHED */
91447636 1899 }
b0d623f7
A
1900 mutex_held = ((struct ctl_cb *)so->so_pcb)->mtx;
1901
91447636 1902 lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
2d21ac55 1903 so->unlock_lr[so->next_unlock_lr] = lr_saved;
0c530ab8 1904 so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
91447636 1905 lck_mtx_unlock(mutex_held);
b0d623f7 1906
91447636
A
1907 if (so->so_usecount == 0)
1908 ctl_sofreelastref(so);
b0d623f7 1909
91447636
A
1910 return (0);
1911}
1912
1913static lck_mtx_t *
5ba3f43e 1914ctl_getlock(struct socket *so, int flags)
91447636 1915{
5ba3f43e 1916#pragma unused(flags)
91447636 1917 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
fe8ab488 1918
91447636
A
1919 if (so->so_pcb) {
1920 if (so->so_usecount < 0)
fe8ab488 1921 panic("ctl_getlock: so=%p usecount=%x lrh= %s\n",
b0d623f7 1922 so, so->so_usecount, solockhistory_nr(so));
fe8ab488 1923 return (kcb->mtx);
91447636 1924 } else {
fe8ab488 1925 panic("ctl_getlock: so=%p NULL NO so_pcb %s\n",
b0d623f7 1926 so, solockhistory_nr(so));
91447636
A
1927 return (so->so_proto->pr_domain->dom_mtx);
1928 }
1929}
fe8ab488
A
1930
1931__private_extern__ int
1932kctl_reg_list SYSCTL_HANDLER_ARGS
1933{
1934#pragma unused(oidp, arg1, arg2)
1935 int error = 0;
1936 int n, i;
1937 struct xsystmgen xsg;
1938 void *buf = NULL;
1939 struct kctl *kctl;
1940 size_t item_size = ROUNDUP64(sizeof (struct xkctl_reg));
1941
1942 buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO);
1943 if (buf == NULL)
1944 return (ENOMEM);
1945
1946 lck_mtx_lock(ctl_mtx);
1947
1948 n = kctlstat.kcs_reg_count;
1949
1950 if (req->oldptr == USER_ADDR_NULL) {
1951 req->oldidx = (n + n/8) * sizeof(struct xkctl_reg);
1952 goto done;
1953 }
1954 if (req->newptr != USER_ADDR_NULL) {
1955 error = EPERM;
1956 goto done;
1957 }
1958 bzero(&xsg, sizeof (xsg));
1959 xsg.xg_len = sizeof (xsg);
1960 xsg.xg_count = n;
1961 xsg.xg_gen = kctlstat.kcs_gencnt;
1962 xsg.xg_sogen = so_gencnt;
1963 error = SYSCTL_OUT(req, &xsg, sizeof (xsg));
1964 if (error) {
1965 goto done;
1966 }
1967 /*
1968 * We are done if there is no pcb
1969 */
1970 if (n == 0) {
1971 goto done;
1972 }
1973
1974 i = 0;
1975 for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
1976 i < n && kctl != NULL;
1977 i++, kctl = TAILQ_NEXT(kctl, next)) {
1978 struct xkctl_reg *xkr = (struct xkctl_reg *)buf;
1979 struct ctl_cb *kcb;
1980 u_int32_t pcbcount = 0;
1981
1982 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
1983 pcbcount++;
1984
1985 bzero(buf, item_size);
1986
1987 xkr->xkr_len = sizeof(struct xkctl_reg);
1988 xkr->xkr_kind = XSO_KCREG;
1989 xkr->xkr_id = kctl->id;
1990 xkr->xkr_reg_unit = kctl->reg_unit;
1991 xkr->xkr_flags = kctl->flags;
3e170ce0 1992 xkr->xkr_kctlref = (uint64_t)(kctl->kctlref);
fe8ab488
A
1993 xkr->xkr_recvbufsize = kctl->recvbufsize;
1994 xkr->xkr_sendbufsize = kctl->sendbufsize;
1995 xkr->xkr_lastunit = kctl->lastunit;
1996 xkr->xkr_pcbcount = pcbcount;
5ba3f43e 1997 xkr->xkr_connect = (uint64_t)VM_KERNEL_UNSLIDE(kctl->connect);
fe8ab488 1998 xkr->xkr_disconnect =
5ba3f43e
A
1999 (uint64_t)VM_KERNEL_UNSLIDE(kctl->disconnect);
2000 xkr->xkr_send = (uint64_t)VM_KERNEL_UNSLIDE(kctl->send);
fe8ab488 2001 xkr->xkr_send_list =
5ba3f43e
A
2002 (uint64_t)VM_KERNEL_UNSLIDE(kctl->send_list);
2003 xkr->xkr_setopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->setopt);
2004 xkr->xkr_getopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->getopt);
2005 xkr->xkr_rcvd = (uint64_t)VM_KERNEL_UNSLIDE(kctl->rcvd);
fe8ab488
A
2006 strlcpy(xkr->xkr_name, kctl->name, sizeof(xkr->xkr_name));
2007
2008 error = SYSCTL_OUT(req, buf, item_size);
2009 }
2010
2011 if (error == 0) {
2012 /*
2013 * Give the user an updated idea of our state.
2014 * If the generation differs from what we told
2015 * her before, she knows that something happened
2016 * while we were processing this request, and it
2017 * might be necessary to retry.
2018 */
2019 bzero(&xsg, sizeof (xsg));
2020 xsg.xg_len = sizeof (xsg);
2021 xsg.xg_count = n;
2022 xsg.xg_gen = kctlstat.kcs_gencnt;
2023 xsg.xg_sogen = so_gencnt;
2024 error = SYSCTL_OUT(req, &xsg, sizeof (xsg));
2025 if (error) {
2026 goto done;
2027 }
2028 }
2029
2030done:
2031 lck_mtx_unlock(ctl_mtx);
2032
2033 if (buf != NULL)
2034 FREE(buf, M_TEMP);
2035
2036 return (error);
2037}
2038
2039__private_extern__ int
2040kctl_pcblist SYSCTL_HANDLER_ARGS
2041{
2042#pragma unused(oidp, arg1, arg2)
2043 int error = 0;
2044 int n, i;
2045 struct xsystmgen xsg;
2046 void *buf = NULL;
2047 struct kctl *kctl;
2048 size_t item_size = ROUNDUP64(sizeof (struct xkctlpcb)) +
2049 ROUNDUP64(sizeof (struct xsocket_n)) +
2050 2 * ROUNDUP64(sizeof (struct xsockbuf_n)) +
2051 ROUNDUP64(sizeof (struct xsockstat_n));
2052
2053 buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO);
2054 if (buf == NULL)
2055 return (ENOMEM);
2056
2057 lck_mtx_lock(ctl_mtx);
2058
2059 n = kctlstat.kcs_pcbcount;
2060
2061 if (req->oldptr == USER_ADDR_NULL) {
2062 req->oldidx = (n + n/8) * item_size;
2063 goto done;
2064 }
2065 if (req->newptr != USER_ADDR_NULL) {
2066 error = EPERM;
2067 goto done;
2068 }
2069 bzero(&xsg, sizeof (xsg));
2070 xsg.xg_len = sizeof (xsg);
2071 xsg.xg_count = n;
2072 xsg.xg_gen = kctlstat.kcs_gencnt;
2073 xsg.xg_sogen = so_gencnt;
2074 error = SYSCTL_OUT(req, &xsg, sizeof (xsg));
2075 if (error) {
2076 goto done;
2077 }
2078 /*
2079 * We are done if there is no pcb
2080 */
2081 if (n == 0) {
2082 goto done;
2083 }
2084
2085 i = 0;
2086 for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
2087 i < n && kctl != NULL;
2088 kctl = TAILQ_NEXT(kctl, next)) {
2089 struct ctl_cb *kcb;
2090
2091 for (kcb = TAILQ_FIRST(&kctl->kcb_head);
2092 i < n && kcb != NULL;
2093 i++, kcb = TAILQ_NEXT(kcb, next)) {
2094 struct xkctlpcb *xk = (struct xkctlpcb *)buf;
2095 struct xsocket_n *xso = (struct xsocket_n *)
2096 ADVANCE64(xk, sizeof (*xk));
2097 struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *)
2098 ADVANCE64(xso, sizeof (*xso));
2099 struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *)
2100 ADVANCE64(xsbrcv, sizeof (*xsbrcv));
2101 struct xsockstat_n *xsostats = (struct xsockstat_n *)
2102 ADVANCE64(xsbsnd, sizeof (*xsbsnd));
2103
2104 bzero(buf, item_size);
2105
2106 xk->xkp_len = sizeof(struct xkctlpcb);
2107 xk->xkp_kind = XSO_KCB;
5c9f4661 2108 xk->xkp_unit = kcb->sac.sc_unit;
fe8ab488
A
2109 xk->xkp_kctpcb = (uint64_t)VM_KERNEL_ADDRPERM(kcb);
2110 xk->xkp_kctlref = (uint64_t)VM_KERNEL_ADDRPERM(kctl);
2111 xk->xkp_kctlid = kctl->id;
2112 strlcpy(xk->xkp_kctlname, kctl->name,
2113 sizeof(xk->xkp_kctlname));
2114
2115 sotoxsocket_n(kcb->so, xso);
2116 sbtoxsockbuf_n(kcb->so ?
2117 &kcb->so->so_rcv : NULL, xsbrcv);
2118 sbtoxsockbuf_n(kcb->so ?
2119 &kcb->so->so_snd : NULL, xsbsnd);
2120 sbtoxsockstat_n(kcb->so, xsostats);
2121
2122 error = SYSCTL_OUT(req, buf, item_size);
2123 }
2124 }
2125
2126 if (error == 0) {
2127 /*
2128 * Give the user an updated idea of our state.
2129 * If the generation differs from what we told
2130 * her before, she knows that something happened
2131 * while we were processing this request, and it
2132 * might be necessary to retry.
2133 */
2134 bzero(&xsg, sizeof (xsg));
2135 xsg.xg_len = sizeof (xsg);
2136 xsg.xg_count = n;
2137 xsg.xg_gen = kctlstat.kcs_gencnt;
2138 xsg.xg_sogen = so_gencnt;
2139 error = SYSCTL_OUT(req, &xsg, sizeof (xsg));
2140 if (error) {
2141 goto done;
2142 }
2143 }
2144
2145done:
2146 lck_mtx_unlock(ctl_mtx);
2147
2148 return (error);
2149}
2150
2151int
2152kctl_getstat SYSCTL_HANDLER_ARGS
2153{
2154#pragma unused(oidp, arg1, arg2)
2155 int error = 0;
2156
2157 lck_mtx_lock(ctl_mtx);
2158
2159 if (req->newptr != USER_ADDR_NULL) {
2160 error = EPERM;
2161 goto done;
2162 }
2163 if (req->oldptr == USER_ADDR_NULL) {
2164 req->oldidx = sizeof(struct kctlstat);
2165 goto done;
2166 }
2167
2168 error = SYSCTL_OUT(req, &kctlstat,
2169 MIN(sizeof(struct kctlstat), req->oldlen));
2170done:
2171 lck_mtx_unlock(ctl_mtx);
2172 return (error);
2173}
3e170ce0
A
2174
2175void
2176kctl_fill_socketinfo(struct socket *so, struct socket_info *si)
2177{
2178 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
2179 struct kern_ctl_info *kcsi =
2180 &si->soi_proto.pri_kern_ctl;
2181 struct kctl *kctl = kcb->kctl;
2182
2183 si->soi_kind = SOCKINFO_KERN_CTL;
2184
2185 if (kctl == 0)
2186 return;
2187
2188 kcsi->kcsi_id = kctl->id;
2189 kcsi->kcsi_reg_unit = kctl->reg_unit;
2190 kcsi->kcsi_flags = kctl->flags;
2191 kcsi->kcsi_recvbufsize = kctl->recvbufsize;
2192 kcsi->kcsi_sendbufsize = kctl->sendbufsize;
5c9f4661 2193 kcsi->kcsi_unit = kcb->sac.sc_unit;
3e170ce0
A
2194 strlcpy(kcsi->kcsi_name, kctl->name, MAX_KCTL_NAME);
2195}