]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/kern_control.c
xnu-4570.20.62.tar.gz
[apple/xnu.git] / bsd / kern / kern_control.c
CommitLineData
9bccf70c 1/*
04b8595b 2 * Copyright (c) 1999-2015 Apple Inc. All rights reserved.
9bccf70c 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
fe8ab488 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
fe8ab488 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
fe8ab488 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
fe8ab488 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
9bccf70c 27 */
9bccf70c
A
28
29/*
91447636
A
30 * Kernel Control domain - allows control connections to
31 * and to read/write data.
9bccf70c 32 *
91447636 33 * Vincent Lubet, 040506
9bccf70c
A
34 * Christophe Allie, 010928
35 * Justin C. Walker, 990319
36 */
37
38#include <sys/types.h>
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/syslog.h>
42#include <sys/socket.h>
43#include <sys/socketvar.h>
44#include <sys/protosw.h>
45#include <sys/domain.h>
46#include <sys/malloc.h>
47#include <sys/mbuf.h>
9bccf70c
A
48#include <sys/sys_domain.h>
49#include <sys/kern_event.h>
50#include <sys/kern_control.h>
2d21ac55 51#include <sys/kauth.h>
fe8ab488 52#include <sys/sysctl.h>
3e170ce0 53#include <sys/proc_info.h>
9bccf70c
A
54#include <net/if_var.h>
55
56#include <mach/vm_types.h>
9bccf70c
A
57
58#include <kern/thread.h>
59
3e170ce0
A
60struct kctl {
61 TAILQ_ENTRY(kctl) next; /* controller chain */
62 kern_ctl_ref kctlref;
63
64 /* controller information provided when registering */
65 char name[MAX_KCTL_NAME]; /* unique identifier */
66 u_int32_t id;
67 u_int32_t reg_unit;
68
69 /* misc communication information */
70 u_int32_t flags; /* support flags */
71 u_int32_t recvbufsize; /* request more than the default buffer size */
72 u_int32_t sendbufsize; /* request more than the default buffer size */
73
74 /* Dispatch functions */
75 ctl_connect_func connect; /* Make contact */
76 ctl_disconnect_func disconnect; /* Break contact */
77 ctl_send_func send; /* Send data to nke */
78 ctl_send_list_func send_list; /* Send list of packets */
79 ctl_setopt_func setopt; /* set kctl configuration */
80 ctl_getopt_func getopt; /* get kctl configuration */
81 ctl_rcvd_func rcvd; /* Notify nke when client reads data */
82
83 TAILQ_HEAD(, ctl_cb) kcb_head;
84 u_int32_t lastunit;
85};
86
87struct ctl_cb {
88 TAILQ_ENTRY(ctl_cb) next; /* controller chain */
89 lck_mtx_t *mtx;
90 struct socket *so; /* controlling socket */
91 struct kctl *kctl; /* back pointer to controller */
92 void *userdata;
93 u_int32_t unit;
94 u_int32_t usecount;
95};
96
fe8ab488
A
97#ifndef ROUNDUP64
98#define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
99#endif
100
101#ifndef ADVANCE64
102#define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
103#endif
104
9bccf70c
A
105/*
106 * Definitions and vars for we support
107 */
108
fe8ab488
A
109#define CTL_SENDSIZE (2 * 1024) /* default buffer size */
110#define CTL_RECVSIZE (8 * 1024) /* default buffer size */
9bccf70c
A
111
112/*
91447636
A
113 * Definitions and vars for we support
114 */
9bccf70c 115
fe8ab488 116static u_int32_t ctl_maxunit = 65536;
91447636 117static lck_grp_attr_t *ctl_lck_grp_attr = 0;
fe8ab488
A
118static lck_attr_t *ctl_lck_attr = 0;
119static lck_grp_t *ctl_lck_grp = 0;
120static lck_mtx_t *ctl_mtx;
9bccf70c
A
121
122/* all the controllers are chained */
2d21ac55 123TAILQ_HEAD(kctl_list, kctl) ctl_head;
91447636
A
124
125static int ctl_attach(struct socket *, int, struct proc *);
126static int ctl_detach(struct socket *);
127static int ctl_sofreelastref(struct socket *so);
128static int ctl_connect(struct socket *, struct sockaddr *, struct proc *);
129static int ctl_disconnect(struct socket *);
130static int ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
fe8ab488 131 struct ifnet *ifp, struct proc *p);
91447636 132static int ctl_send(struct socket *, int, struct mbuf *,
fe8ab488
A
133 struct sockaddr *, struct mbuf *, struct proc *);
134static int ctl_send_list(struct socket *, int, struct mbuf *,
135 struct sockaddr *, struct mbuf *, struct proc *);
91447636
A
136static int ctl_ctloutput(struct socket *, struct sockopt *);
137static int ctl_peeraddr(struct socket *so, struct sockaddr **nam);
39236c6e 138static int ctl_usr_rcvd(struct socket *so, int flags);
91447636 139
91447636
A
140static struct kctl *ctl_find_by_name(const char *);
141static struct kctl *ctl_find_by_id_unit(u_int32_t id, u_int32_t unit);
9bccf70c 142
3e170ce0
A
143static struct socket *kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit,
144 u_int32_t *);
91447636 145static struct ctl_cb *kcb_find(struct kctl *, u_int32_t unit);
b0d623f7 146static void ctl_post_msg(u_int32_t event_code, u_int32_t id);
9bccf70c 147
b0d623f7
A
148static int ctl_lock(struct socket *, int, void *);
149static int ctl_unlock(struct socket *, int, void *);
91447636 150static lck_mtx_t * ctl_getlock(struct socket *, int);
9bccf70c 151
39236c6e
A
152static struct pr_usrreqs ctl_usrreqs = {
153 .pru_attach = ctl_attach,
154 .pru_connect = ctl_connect,
155 .pru_control = ctl_ioctl,
156 .pru_detach = ctl_detach,
157 .pru_disconnect = ctl_disconnect,
158 .pru_peeraddr = ctl_peeraddr,
159 .pru_rcvd = ctl_usr_rcvd,
160 .pru_send = ctl_send,
fe8ab488 161 .pru_send_list = ctl_send_list,
39236c6e 162 .pru_sosend = sosend,
fe8ab488 163 .pru_sosend_list = sosend_list,
39236c6e 164 .pru_soreceive = soreceive,
fe8ab488 165 .pru_soreceive_list = soreceive_list,
91447636
A
166};
167
39236c6e 168static struct protosw kctlsw[] = {
91447636 169{
fe8ab488
A
170 .pr_type = SOCK_DGRAM,
171 .pr_protocol = SYSPROTO_CONTROL,
172 .pr_flags = PR_ATOMIC|PR_CONNREQUIRED|PR_PCBLOCK|PR_WANTRCVD,
173 .pr_ctloutput = ctl_ctloutput,
174 .pr_usrreqs = &ctl_usrreqs,
175 .pr_lock = ctl_lock,
176 .pr_unlock = ctl_unlock,
177 .pr_getlock = ctl_getlock,
39236c6e 178},
9bccf70c 179{
fe8ab488
A
180 .pr_type = SOCK_STREAM,
181 .pr_protocol = SYSPROTO_CONTROL,
182 .pr_flags = PR_CONNREQUIRED|PR_PCBLOCK|PR_WANTRCVD,
183 .pr_ctloutput = ctl_ctloutput,
184 .pr_usrreqs = &ctl_usrreqs,
185 .pr_lock = ctl_lock,
186 .pr_unlock = ctl_unlock,
187 .pr_getlock = ctl_getlock,
39236c6e 188}
9bccf70c
A
189};
190
fe8ab488
A
191__private_extern__ int kctl_reg_list SYSCTL_HANDLER_ARGS;
192__private_extern__ int kctl_pcblist SYSCTL_HANDLER_ARGS;
193__private_extern__ int kctl_getstat SYSCTL_HANDLER_ARGS;
194
91447636 195
fe8ab488
A
196SYSCTL_NODE(_net_systm, OID_AUTO, kctl,
197 CTLFLAG_RW|CTLFLAG_LOCKED, 0, "Kernel control family");
198
199struct kctlstat kctlstat;
200SYSCTL_PROC(_net_systm_kctl, OID_AUTO, stats,
201 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
202 kctl_getstat, "S,kctlstat", "");
203
204SYSCTL_PROC(_net_systm_kctl, OID_AUTO, reg_list,
205 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
206 kctl_reg_list, "S,xkctl_reg", "");
207
208SYSCTL_PROC(_net_systm_kctl, OID_AUTO, pcblist,
209 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
210 kctl_pcblist, "S,xkctlpcb", "");
211
212u_int32_t ctl_autorcvbuf_max = 256 * 1024;
213SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufmax,
214 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_autorcvbuf_max, 0, "");
215
216u_int32_t ctl_autorcvbuf_high = 0;
217SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufhigh,
218 CTLFLAG_RD | CTLFLAG_LOCKED, &ctl_autorcvbuf_high, 0, "");
219
220u_int32_t ctl_debug = 0;
221SYSCTL_INT(_net_systm_kctl, OID_AUTO, debug,
222 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_debug, 0, "");
223
3e170ce0
A
224#define KCTL_TBL_INC 16
225
226static uintptr_t kctl_tbl_size = 0;
227static u_int32_t kctl_tbl_growing = 0;
39037602 228static u_int32_t kctl_tbl_growing_waiting = 0;
3e170ce0
A
229static uintptr_t kctl_tbl_count = 0;
230static struct kctl **kctl_table = NULL;
231static uintptr_t kctl_ref_gencnt = 0;
232
233static void kctl_tbl_grow(void);
234static kern_ctl_ref kctl_make_ref(struct kctl *kctl);
235static void kctl_delete_ref(kern_ctl_ref);
236static struct kctl *kctl_from_ref(kern_ctl_ref);
237
9bccf70c 238/*
91447636 239 * Install the protosw's for the Kernel Control manager.
9bccf70c 240 */
39236c6e
A
241__private_extern__ void
242kern_control_init(struct domain *dp)
9bccf70c 243{
39236c6e
A
244 struct protosw *pr;
245 int i;
3e170ce0 246 int kctl_proto_count = (sizeof (kctlsw) / sizeof (struct protosw));
39236c6e
A
247
248 VERIFY(!(dp->dom_flags & DOM_INITIALIZED));
249 VERIFY(dp == systemdomain);
250
91447636 251 ctl_lck_grp_attr = lck_grp_attr_alloc_init();
39236c6e
A
252 if (ctl_lck_grp_attr == NULL) {
253 panic("%s: lck_grp_attr_alloc_init failed\n", __func__);
254 /* NOTREACHED */
91447636 255 }
39236c6e
A
256
257 ctl_lck_grp = lck_grp_alloc_init("Kernel Control Protocol",
258 ctl_lck_grp_attr);
259 if (ctl_lck_grp == NULL) {
260 panic("%s: lck_grp_alloc_init failed\n", __func__);
261 /* NOTREACHED */
91447636 262 }
39236c6e 263
91447636 264 ctl_lck_attr = lck_attr_alloc_init();
39236c6e
A
265 if (ctl_lck_attr == NULL) {
266 panic("%s: lck_attr_alloc_init failed\n", __func__);
267 /* NOTREACHED */
91447636 268 }
39236c6e 269
91447636 270 ctl_mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
39236c6e
A
271 if (ctl_mtx == NULL) {
272 panic("%s: lck_mtx_alloc_init failed\n", __func__);
273 /* NOTREACHED */
91447636
A
274 }
275 TAILQ_INIT(&ctl_head);
39236c6e
A
276
277 for (i = 0, pr = &kctlsw[0]; i < kctl_proto_count; i++, pr++)
278 net_add_proto(pr, dp, 1);
91447636 279}
9bccf70c 280
91447636
A
281static void
282kcb_delete(struct ctl_cb *kcb)
283{
284 if (kcb != 0) {
285 if (kcb->mtx != 0)
286 lck_mtx_free(kcb->mtx, ctl_lck_grp);
287 FREE(kcb, M_TEMP);
288 }
9bccf70c
A
289}
290
9bccf70c
A
291/*
292 * Kernel Controller user-request functions
fe8ab488
A
293 * attach function must exist and succeed
294 * detach not necessary
91447636 295 * we need a pcb for the per socket mutex
9bccf70c 296 */
91447636 297static int
fe8ab488
A
298ctl_attach(struct socket *so, int proto, struct proc *p)
299{
300#pragma unused(proto, p)
91447636
A
301 int error = 0;
302 struct ctl_cb *kcb = 0;
303
304 MALLOC(kcb, struct ctl_cb *, sizeof(struct ctl_cb), M_TEMP, M_WAITOK);
305 if (kcb == NULL) {
306 error = ENOMEM;
307 goto quit;
308 }
309 bzero(kcb, sizeof(struct ctl_cb));
fe8ab488 310
91447636
A
311 kcb->mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
312 if (kcb->mtx == NULL) {
313 error = ENOMEM;
314 goto quit;
315 }
316 kcb->so = so;
317 so->so_pcb = (caddr_t)kcb;
fe8ab488 318
91447636
A
319quit:
320 if (error != 0) {
321 kcb_delete(kcb);
322 kcb = 0;
323 }
fe8ab488 324 return (error);
91447636
A
325}
326
327static int
328ctl_sofreelastref(struct socket *so)
329{
fe8ab488
A
330 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
331
332 so->so_pcb = 0;
333
334 if (kcb != 0) {
335 struct kctl *kctl;
336 if ((kctl = kcb->kctl) != 0) {
337 lck_mtx_lock(ctl_mtx);
338 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
339 kctlstat.kcs_pcbcount--;
340 kctlstat.kcs_gencnt++;
341 lck_mtx_unlock(ctl_mtx);
342 }
343 kcb_delete(kcb);
344 }
345 sofreelastref(so, 1);
346 return (0);
91447636
A
347}
348
349static int
350ctl_detach(struct socket *so)
351{
fe8ab488
A
352 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
353
354 if (kcb == 0)
355 return (0);
356
357 soisdisconnected(so);
358 so->so_flags |= SOF_PCBCLEARING;
359 return (0);
9bccf70c
A
360}
361
91447636 362static int
fe8ab488
A
363ctl_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
364{
365#pragma unused(p)
366 struct kctl *kctl;
367 int error = 0;
368 struct sockaddr_ctl sa;
369 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
370 struct ctl_cb *kcb_next = NULL;
04b8595b
A
371 u_quad_t sbmaxsize;
372 u_int32_t recvbufsize, sendbufsize;
fe8ab488
A
373
374 if (kcb == 0)
375 panic("ctl_connect so_pcb null\n");
376
377 if (nam->sa_len != sizeof(struct sockaddr_ctl))
378 return (EINVAL);
379
380 bcopy(nam, &sa, sizeof(struct sockaddr_ctl));
381
382 lck_mtx_lock(ctl_mtx);
383 kctl = ctl_find_by_id_unit(sa.sc_id, sa.sc_unit);
384 if (kctl == NULL) {
385 lck_mtx_unlock(ctl_mtx);
386 return (ENOENT);
387 }
388
389 if (((kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
390 (so->so_type != SOCK_STREAM)) ||
391 (!(kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
392 (so->so_type != SOCK_DGRAM))) {
393 lck_mtx_unlock(ctl_mtx);
394 return (EPROTOTYPE);
395 }
396
397 if (kctl->flags & CTL_FLAG_PRIVILEGED) {
398 if (p == 0) {
399 lck_mtx_unlock(ctl_mtx);
400 return (EINVAL);
401 }
402 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
403 lck_mtx_unlock(ctl_mtx);
404 return (EPERM);
405 }
406 }
91447636
A
407
408 if ((kctl->flags & CTL_FLAG_REG_ID_UNIT) || sa.sc_unit != 0) {
409 if (kcb_find(kctl, sa.sc_unit) != NULL) {
410 lck_mtx_unlock(ctl_mtx);
fe8ab488 411 return (EBUSY);
91447636
A
412 }
413 } else {
fe8ab488
A
414 /* Find an unused ID, assumes control IDs are in order */
415 u_int32_t unit = 1;
416
417 TAILQ_FOREACH(kcb_next, &kctl->kcb_head, next) {
418 if (kcb_next->unit > unit) {
419 /* Found a gap, lets fill it in */
420 break;
421 }
422 unit = kcb_next->unit + 1;
423 if (unit == ctl_maxunit)
424 break;
425 }
426
2d21ac55
A
427 if (unit == ctl_maxunit) {
428 lck_mtx_unlock(ctl_mtx);
fe8ab488 429 return (EBUSY);
2d21ac55 430 }
fe8ab488 431
2d21ac55 432 sa.sc_unit = unit;
fe8ab488 433 }
55e303ae 434
91447636 435 kcb->unit = sa.sc_unit;
fe8ab488
A
436 kcb->kctl = kctl;
437 if (kcb_next != NULL) {
438 TAILQ_INSERT_BEFORE(kcb_next, kcb, next);
439 } else {
2d21ac55
A
440 TAILQ_INSERT_TAIL(&kctl->kcb_head, kcb, next);
441 }
fe8ab488
A
442 kctlstat.kcs_pcbcount++;
443 kctlstat.kcs_gencnt++;
444 kctlstat.kcs_connections++;
445 lck_mtx_unlock(ctl_mtx);
9bccf70c 446
04b8595b
A
447 /*
448 * rdar://15526688: Limit the send and receive sizes to sb_max
449 * by using the same scaling as sbreserve()
450 */
451 sbmaxsize = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES);
452
453 if (kctl->sendbufsize > sbmaxsize)
454 sendbufsize = sbmaxsize;
455 else
456 sendbufsize = kctl->sendbufsize;
457
458 if (kctl->recvbufsize > sbmaxsize)
459 recvbufsize = sbmaxsize;
460 else
461 recvbufsize = kctl->recvbufsize;
462
463 error = soreserve(so, sendbufsize, recvbufsize);
fe8ab488 464 if (error) {
39037602
A
465 if (ctl_debug)
466 printf("%s - soreserve(%llx, %u, %u) error %d\n",
467 __func__, (uint64_t)VM_KERNEL_ADDRPERM(so),
468 sendbufsize, recvbufsize, error);
91447636 469 goto done;
fe8ab488
A
470 }
471 soisconnecting(so);
472
91447636 473 socket_unlock(so, 0);
3e170ce0 474 error = (*kctl->connect)(kctl->kctlref, &sa, &kcb->userdata);
91447636 475 socket_lock(so, 0);
fe8ab488 476 if (error)
6d2010ae 477 goto end;
fe8ab488
A
478
479 soisconnected(so);
91447636 480
6d2010ae
A
481end:
482 if (error && kctl->disconnect) {
39037602
A
483 /*
484 * XXX Make sure we Don't check the return value
485 * of disconnect here.
486 * ipsec/utun_ctl_disconnect will return error when
487 * disconnect gets called after connect failure.
488 * However if we decide to check for disconnect return
489 * value here. Please make sure to revisit
490 * ipsec/utun_ctl_disconnect.
491 */
6d2010ae 492 socket_unlock(so, 0);
3e170ce0 493 (*kctl->disconnect)(kctl->kctlref, kcb->unit, kcb->userdata);
6d2010ae
A
494 socket_lock(so, 0);
495 }
91447636 496done:
fe8ab488
A
497 if (error) {
498 soisdisconnected(so);
499 lck_mtx_lock(ctl_mtx);
500 kcb->kctl = 0;
501 kcb->unit = 0;
502 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
503 kctlstat.kcs_pcbcount--;
504 kctlstat.kcs_gencnt++;
505 kctlstat.kcs_conn_fail++;
506 lck_mtx_unlock(ctl_mtx);
507 }
508 return (error);
9bccf70c
A
509}
510
91447636 511static int
9bccf70c
A
512ctl_disconnect(struct socket *so)
513{
fe8ab488
A
514 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
515
516 if ((kcb = (struct ctl_cb *)so->so_pcb)) {
517 struct kctl *kctl = kcb->kctl;
518
519 if (kctl && kctl->disconnect) {
520 socket_unlock(so, 0);
3e170ce0
A
521 (*kctl->disconnect)(kctl->kctlref, kcb->unit,
522 kcb->userdata);
fe8ab488
A
523 socket_lock(so, 0);
524 }
525
526 soisdisconnected(so);
527
6d2010ae 528 socket_unlock(so, 0);
fe8ab488
A
529 lck_mtx_lock(ctl_mtx);
530 kcb->kctl = 0;
531 kcb->unit = 0;
532 while (kcb->usecount != 0) {
533 msleep(&kcb->usecount, ctl_mtx, 0, "kcb->usecount", 0);
534 }
535 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
536 kctlstat.kcs_pcbcount--;
537 kctlstat.kcs_gencnt++;
538 lck_mtx_unlock(ctl_mtx);
6d2010ae 539 socket_lock(so, 0);
fe8ab488
A
540 }
541 return (0);
9bccf70c
A
542}
543
91447636
A
544static int
545ctl_peeraddr(struct socket *so, struct sockaddr **nam)
9bccf70c 546{
91447636
A
547 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
548 struct kctl *kctl;
549 struct sockaddr_ctl sc;
fe8ab488 550
91447636 551 if (kcb == NULL) /* sanity check */
fe8ab488
A
552 return (ENOTCONN);
553
91447636 554 if ((kctl = kcb->kctl) == NULL)
fe8ab488
A
555 return (EINVAL);
556
91447636
A
557 bzero(&sc, sizeof(struct sockaddr_ctl));
558 sc.sc_len = sizeof(struct sockaddr_ctl);
559 sc.sc_family = AF_SYSTEM;
560 sc.ss_sysaddr = AF_SYS_CONTROL;
561 sc.sc_id = kctl->id;
562 sc.sc_unit = kcb->unit;
fe8ab488 563
91447636 564 *nam = dup_sockaddr((struct sockaddr *)&sc, 1);
fe8ab488
A
565
566 return (0);
567}
568
569static void
570ctl_sbrcv_trim(struct socket *so)
571{
572 struct sockbuf *sb = &so->so_rcv;
573
574 if (sb->sb_hiwat > sb->sb_idealsize) {
575 u_int32_t diff;
576 int32_t trim;
577
578 /*
579 * The difference between the ideal size and the
580 * current size is the upper bound of the trimage
581 */
582 diff = sb->sb_hiwat - sb->sb_idealsize;
583 /*
584 * We cannot trim below the outstanding data
585 */
586 trim = sb->sb_hiwat - sb->sb_cc;
587
588 trim = imin(trim, (int32_t)diff);
589
590 if (trim > 0) {
591 sbreserve(sb, (sb->sb_hiwat - trim));
592
593 if (ctl_debug)
594 printf("%s - shrunk to %d\n",
595 __func__, sb->sb_hiwat);
596 }
597 }
9bccf70c
A
598}
599
39236c6e
A
600static int
601ctl_usr_rcvd(struct socket *so, int flags)
602{
603 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
604 struct kctl *kctl;
605
606 if ((kctl = kcb->kctl) == NULL) {
fe8ab488 607 return (EINVAL);
39236c6e
A
608 }
609
610 if (kctl->rcvd) {
611 socket_unlock(so, 0);
3e170ce0 612 (*kctl->rcvd)(kctl->kctlref, kcb->unit, kcb->userdata, flags);
39236c6e
A
613 socket_lock(so, 0);
614 }
615
fe8ab488
A
616 ctl_sbrcv_trim(so);
617
618 return (0);
39236c6e
A
619}
620
91447636
A
621static int
622ctl_send(struct socket *so, int flags, struct mbuf *m,
fe8ab488
A
623 struct sockaddr *addr, struct mbuf *control,
624 struct proc *p)
9bccf70c 625{
fe8ab488
A
626#pragma unused(addr, p)
627 int error = 0;
91447636 628 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
fe8ab488
A
629 struct kctl *kctl;
630
631 if (control)
632 m_freem(control);
633
91447636 634 if (kcb == NULL) /* sanity check */
6d2010ae 635 error = ENOTCONN;
fe8ab488 636
6d2010ae
A
637 if (error == 0 && (kctl = kcb->kctl) == NULL)
638 error = EINVAL;
fe8ab488 639
6d2010ae 640 if (error == 0 && kctl->send) {
fe8ab488 641 so_tc_update_stats(m, so, m_get_service_class(m));
91447636 642 socket_unlock(so, 0);
3e170ce0
A
643 error = (*kctl->send)(kctl->kctlref, kcb->unit, kcb->userdata,
644 m, flags);
91447636 645 socket_lock(so, 0);
6d2010ae
A
646 } else {
647 m_freem(m);
648 if (error == 0)
649 error = ENOTSUP;
91447636 650 }
fe8ab488
A
651 if (error != 0)
652 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_fail);
653 return (error);
654}
655
656static int
657ctl_send_list(struct socket *so, int flags, struct mbuf *m,
658 __unused struct sockaddr *addr, struct mbuf *control,
659 __unused struct proc *p)
660{
661 int error = 0;
662 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
663 struct kctl *kctl;
664
665 if (control)
666 m_freem_list(control);
667
668 if (kcb == NULL) /* sanity check */
669 error = ENOTCONN;
670
671 if (error == 0 && (kctl = kcb->kctl) == NULL)
672 error = EINVAL;
673
674 if (error == 0 && kctl->send_list) {
675 struct mbuf *nxt;
676
677 for (nxt = m; nxt != NULL; nxt = nxt->m_nextpkt)
678 so_tc_update_stats(nxt, so, m_get_service_class(nxt));
679
680 socket_unlock(so, 0);
3e170ce0
A
681 error = (*kctl->send_list)(kctl->kctlref, kcb->unit,
682 kcb->userdata, m, flags);
fe8ab488
A
683 socket_lock(so, 0);
684 } else if (error == 0 && kctl->send) {
685 while (m != NULL && error == 0) {
686 struct mbuf *nextpkt = m->m_nextpkt;
687
688 m->m_nextpkt = NULL;
689 so_tc_update_stats(m, so, m_get_service_class(m));
690 socket_unlock(so, 0);
3e170ce0
A
691 error = (*kctl->send)(kctl->kctlref, kcb->unit,
692 kcb->userdata, m, flags);
fe8ab488
A
693 socket_lock(so, 0);
694 m = nextpkt;
695 }
696 if (m != NULL)
697 m_freem_list(m);
698 } else {
699 m_freem_list(m);
700 if (error == 0)
701 error = ENOTSUP;
702 }
703 if (error != 0)
704 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_list_fail);
705 return (error);
706}
707
708static errno_t
3e170ce0
A
709ctl_rcvbspace(struct socket *so, u_int32_t datasize,
710 u_int32_t kctlflags, u_int32_t flags)
fe8ab488
A
711{
712 struct sockbuf *sb = &so->so_rcv;
713 u_int32_t space = sbspace(sb);
714 errno_t error;
04b8595b 715
3e170ce0 716 if ((kctlflags & CTL_FLAG_REG_CRIT) == 0) {
fe8ab488
A
717 if ((u_int32_t) space >= datasize)
718 error = 0;
719 else
720 error = ENOBUFS;
721 } else if ((flags & CTL_DATA_CRIT) == 0) {
3e170ce0
A
722 /*
723 * Reserve 25% for critical messages
724 */
725 if (space < (sb->sb_hiwat >> 2) ||
726 space < datasize)
727 error = ENOBUFS;
728 else
729 error = 0;
fe8ab488
A
730 } else {
731 u_int32_t autorcvbuf_max;
732
733 /*
734 * Allow overcommit of 25%
735 */
736 autorcvbuf_max = min(sb->sb_idealsize + (sb->sb_idealsize >> 2),
737 ctl_autorcvbuf_max);
738
739 if ((u_int32_t) space >= datasize) {
740 error = 0;
741 } else if (tcp_cansbgrow(sb) &&
742 sb->sb_hiwat < autorcvbuf_max) {
743 /*
744 * Grow with a little bit of leeway
745 */
746 u_int32_t grow = datasize - space + MSIZE;
747
748 if (sbreserve(sb,
749 min((sb->sb_hiwat + grow), autorcvbuf_max)) == 1) {
750
751 if (sb->sb_hiwat > ctl_autorcvbuf_high)
752 ctl_autorcvbuf_high = sb->sb_hiwat;
753
3e170ce0
A
754 /*
755 * A final check
756 */
757 if ((u_int32_t) sbspace(sb) >= datasize) {
758 error = 0;
759 } else {
760 error = ENOBUFS;
761 }
762
fe8ab488 763 if (ctl_debug)
3e170ce0
A
764 printf("%s - grown to %d error %d\n",
765 __func__, sb->sb_hiwat, error);
fe8ab488
A
766 } else {
767 error = ENOBUFS;
768 }
769 } else {
770 error = ENOBUFS;
771 }
772 }
773 return (error);
9bccf70c
A
774}
775
91447636 776errno_t
3e170ce0
A
777ctl_enqueuembuf(kern_ctl_ref kctlref, u_int32_t unit, struct mbuf *m,
778 u_int32_t flags)
9bccf70c 779{
91447636 780 struct socket *so;
fe8ab488 781 errno_t error = 0;
fe8ab488 782 int len = m->m_pkthdr.len;
3e170ce0 783 u_int32_t kctlflags;
fe8ab488 784
3e170ce0
A
785 so = kcb_find_socket(kctlref, unit, &kctlflags);
786 if (so == NULL) {
fe8ab488 787 return (EINVAL);
3e170ce0 788 }
fe8ab488 789
3e170ce0 790 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
91447636 791 error = ENOBUFS;
fe8ab488 792 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
91447636
A
793 goto bye;
794 }
795 if ((flags & CTL_DATA_EOR))
796 m->m_flags |= M_EOR;
fe8ab488
A
797
798 so_recv_data_stat(so, m, 0);
799 if (sbappend(&so->so_rcv, m) != 0) {
800 if ((flags & CTL_DATA_NOWAKEUP) == 0)
801 sorwakeup(so);
802 } else {
803 error = ENOBUFS;
804 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
805 }
91447636 806bye:
fe8ab488
A
807 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT))
808 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
809 __func__, error, len,
810 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
811
91447636 812 socket_unlock(so, 1);
fe8ab488
A
813 if (error != 0)
814 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
815
816 return (error);
817}
818
819/*
820 * Compute space occupied by mbuf like sbappendrecord
821 */
822static int
823m_space(struct mbuf *m)
824{
825 int space = 0;
826 struct mbuf *nxt;
827
828 for (nxt = m; nxt != NULL; nxt = nxt->m_next)
829 space += nxt->m_len;
830
831 return (space);
832}
833
834errno_t
835ctl_enqueuembuf_list(void *kctlref, u_int32_t unit, struct mbuf *m_list,
836 u_int32_t flags, struct mbuf **m_remain)
837{
838 struct socket *so = NULL;
839 errno_t error = 0;
fe8ab488
A
840 struct mbuf *m, *nextpkt;
841 int needwakeup = 0;
5ba3f43e 842 int len = 0;
3e170ce0 843 u_int32_t kctlflags;
fe8ab488
A
844
845 /*
846 * Need to point the beginning of the list in case of early exit
847 */
848 m = m_list;
849
3e170ce0
A
850 /*
851 * kcb_find_socket takes the socket lock with a reference
852 */
853 so = kcb_find_socket(kctlref, unit, &kctlflags);
854 if (so == NULL) {
fe8ab488
A
855 error = EINVAL;
856 goto done;
857 }
3e170ce0
A
858
859 if (kctlflags & CTL_FLAG_REG_SOCK_STREAM) {
fe8ab488
A
860 error = EOPNOTSUPP;
861 goto done;
862 }
863 if (flags & CTL_DATA_EOR) {
864 error = EINVAL;
865 goto done;
866 }
fe8ab488
A
867
868 for (m = m_list; m != NULL; m = nextpkt) {
869 nextpkt = m->m_nextpkt;
870
39037602 871 if (m->m_pkthdr.len == 0 && ctl_debug)
fe8ab488
A
872 printf("%s: %llx m_pkthdr.len is 0",
873 __func__, (uint64_t)VM_KERNEL_ADDRPERM(m));
874
875 /*
876 * The mbuf is either appended or freed by sbappendrecord()
877 * so it's not reliable from a data standpoint
878 */
879 len = m_space(m);
3e170ce0 880 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
fe8ab488
A
881 error = ENOBUFS;
882 OSIncrementAtomic64(
883 (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
884 break;
885 } else {
886 /*
887 * Unlink from the list, m is on its own
888 */
889 m->m_nextpkt = NULL;
890 so_recv_data_stat(so, m, 0);
891 if (sbappendrecord(&so->so_rcv, m) != 0) {
892 needwakeup = 1;
893 } else {
894 /*
895 * We free or return the remaining
896 * mbufs in the list
897 */
898 m = nextpkt;
899 error = ENOBUFS;
900 OSIncrementAtomic64(
901 (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
902 break;
903 }
904 }
905 }
906 if (needwakeup && (flags & CTL_DATA_NOWAKEUP) == 0)
907 sorwakeup(so);
908
909done:
910 if (so != NULL) {
911 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT))
912 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
913 __func__, error, len,
914 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
915
916 socket_unlock(so, 1);
917 }
918 if (m_remain) {
919 *m_remain = m;
920
921 if (m != NULL && socket_debug && so != NULL &&
922 (so->so_options & SO_DEBUG)) {
923 struct mbuf *n;
924
925 printf("%s m_list %llx\n", __func__,
926 (uint64_t) VM_KERNEL_ADDRPERM(m_list));
927 for (n = m; n != NULL; n = n->m_nextpkt)
928 printf(" remain %llx m_next %llx\n",
929 (uint64_t) VM_KERNEL_ADDRPERM(n),
930 (uint64_t) VM_KERNEL_ADDRPERM(n->m_next));
931 }
932 } else {
933 if (m != NULL)
934 m_freem_list(m);
935 }
936 if (error != 0)
937 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
938 return (error);
91447636 939}
9bccf70c 940
91447636 941errno_t
fe8ab488
A
942ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len,
943 u_int32_t flags)
91447636 944{
91447636
A
945 struct socket *so;
946 struct mbuf *m;
fe8ab488 947 errno_t error = 0;
91447636
A
948 unsigned int num_needed;
949 struct mbuf *n;
fe8ab488 950 size_t curlen = 0;
3e170ce0 951 u_int32_t kctlflags;
fe8ab488 952
3e170ce0
A
953 so = kcb_find_socket(kctlref, unit, &kctlflags);
954 if (so == NULL) {
fe8ab488 955 return (EINVAL);
3e170ce0 956 }
fe8ab488 957
3e170ce0 958 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
91447636 959 error = ENOBUFS;
fe8ab488 960 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
91447636
A
961 goto bye;
962 }
963
964 num_needed = 1;
965 m = m_allocpacket_internal(&num_needed, len, NULL, M_NOWAIT, 1, 0);
966 if (m == NULL) {
39037602
A
967 kctlstat.kcs_enqdata_mb_alloc_fail++;
968 if (ctl_debug)
969 printf("%s: m_allocpacket_internal(%lu) failed\n",
970 __func__, len);
fe8ab488 971 error = ENOMEM;
91447636
A
972 goto bye;
973 }
fe8ab488 974
91447636
A
975 for (n = m; n != NULL; n = n->m_next) {
976 size_t mlen = mbuf_maxlen(n);
fe8ab488 977
91447636
A
978 if (mlen + curlen > len)
979 mlen = len - curlen;
980 n->m_len = mlen;
981 bcopy((char *)data + curlen, n->m_data, mlen);
982 curlen += mlen;
983 }
984 mbuf_pkthdr_setlen(m, curlen);
985
986 if ((flags & CTL_DATA_EOR))
987 m->m_flags |= M_EOR;
fe8ab488
A
988 so_recv_data_stat(so, m, 0);
989 if (sbappend(&so->so_rcv, m) != 0) {
990 if ((flags & CTL_DATA_NOWAKEUP) == 0)
991 sorwakeup(so);
992 } else {
39037602 993 kctlstat.kcs_enqdata_sbappend_fail++;
fe8ab488
A
994 error = ENOBUFS;
995 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
996 }
997
91447636 998bye:
fe8ab488
A
999 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT))
1000 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1001 __func__, error, (int)len,
1002 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1003
91447636 1004 socket_unlock(so, 1);
fe8ab488
A
1005 if (error != 0)
1006 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
1007 return (error);
91447636 1008}
9bccf70c 1009
3e170ce0
A
1010errno_t
1011ctl_getenqueuepacketcount(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *pcnt)
1012{
1013 struct socket *so;
1014 u_int32_t cnt;
1015 struct mbuf *m1;
1016
1017 if (pcnt == NULL)
1018 return (EINVAL);
1019
1020 so = kcb_find_socket(kctlref, unit, NULL);
1021 if (so == NULL) {
1022 return (EINVAL);
1023 }
1024
1025 cnt = 0;
1026 m1 = so->so_rcv.sb_mb;
1027 while (m1 != NULL) {
1028 if (m1->m_type == MT_DATA ||
1029 m1->m_type == MT_HEADER ||
1030 m1->m_type == MT_OOBDATA)
1031 cnt += 1;
1032 m1 = m1->m_nextpkt;
1033 }
1034 *pcnt = cnt;
1035
1036 socket_unlock(so, 1);
1037
1038 return (0);
1039}
55e303ae 1040
fe8ab488 1041errno_t
91447636
A
1042ctl_getenqueuespace(kern_ctl_ref kctlref, u_int32_t unit, size_t *space)
1043{
91447636 1044 struct socket *so;
2d21ac55 1045 long avail;
fe8ab488 1046
3e170ce0 1047 if (space == NULL)
fe8ab488
A
1048 return (EINVAL);
1049
3e170ce0
A
1050 so = kcb_find_socket(kctlref, unit, NULL);
1051 if (so == NULL) {
fe8ab488 1052 return (EINVAL);
3e170ce0 1053 }
fe8ab488 1054
2d21ac55
A
1055 avail = sbspace(&so->so_rcv);
1056 *space = (avail < 0) ? 0 : avail;
91447636 1057 socket_unlock(so, 1);
fe8ab488
A
1058
1059 return (0);
1060}
1061
1062errno_t
1063ctl_getenqueuereadable(kern_ctl_ref kctlref, u_int32_t unit,
1064 u_int32_t *difference)
1065{
fe8ab488
A
1066 struct socket *so;
1067
3e170ce0 1068 if (difference == NULL)
fe8ab488
A
1069 return (EINVAL);
1070
3e170ce0
A
1071 so = kcb_find_socket(kctlref, unit, NULL);
1072 if (so == NULL) {
fe8ab488 1073 return (EINVAL);
3e170ce0 1074 }
fe8ab488
A
1075
1076 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat) {
1077 *difference = 0;
1078 } else {
1079 *difference = (so->so_rcv.sb_lowat - so->so_rcv.sb_cc);
1080 }
1081 socket_unlock(so, 1);
1082
1083 return (0);
9bccf70c
A
1084}
1085
91447636 1086static int
9bccf70c
A
1087ctl_ctloutput(struct socket *so, struct sockopt *sopt)
1088{
91447636
A
1089 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
1090 struct kctl *kctl;
1091 int error = 0;
1092 void *data;
1093 size_t len;
fe8ab488 1094
91447636 1095 if (sopt->sopt_level != SYSPROTO_CONTROL) {
fe8ab488 1096 return (EINVAL);
91447636 1097 }
fe8ab488 1098
91447636 1099 if (kcb == NULL) /* sanity check */
fe8ab488
A
1100 return (ENOTCONN);
1101
91447636 1102 if ((kctl = kcb->kctl) == NULL)
fe8ab488
A
1103 return (EINVAL);
1104
91447636
A
1105 switch (sopt->sopt_dir) {
1106 case SOPT_SET:
1107 if (kctl->setopt == NULL)
fe8ab488 1108 return (ENOTSUP);
2d21ac55
A
1109 if (sopt->sopt_valsize == 0) {
1110 data = NULL;
1111 } else {
fe8ab488
A
1112 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP,
1113 M_WAITOK);
2d21ac55 1114 if (data == NULL)
fe8ab488
A
1115 return (ENOMEM);
1116 error = sooptcopyin(sopt, data,
3e170ce0 1117 sopt->sopt_valsize, sopt->sopt_valsize);
2d21ac55 1118 }
91447636
A
1119 if (error == 0) {
1120 socket_unlock(so, 0);
3e170ce0
A
1121 error = (*kctl->setopt)(kctl->kctlref,
1122 kcb->unit, kcb->userdata, sopt->sopt_name,
1123 data, sopt->sopt_valsize);
91447636
A
1124 socket_lock(so, 0);
1125 }
1126 FREE(data, M_TEMP);
1127 break;
fe8ab488 1128
91447636
A
1129 case SOPT_GET:
1130 if (kctl->getopt == NULL)
fe8ab488 1131 return (ENOTSUP);
91447636
A
1132 data = NULL;
1133 if (sopt->sopt_valsize && sopt->sopt_val) {
fe8ab488
A
1134 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP,
1135 M_WAITOK);
91447636 1136 if (data == NULL)
fe8ab488
A
1137 return (ENOMEM);
1138 /*
1139 * 4108337 - copy user data in case the
1140 * kernel control needs it
1141 */
1142 error = sooptcopyin(sopt, data,
1143 sopt->sopt_valsize, sopt->sopt_valsize);
91447636
A
1144 }
1145 len = sopt->sopt_valsize;
1146 socket_unlock(so, 0);
3e170ce0 1147 error = (*kctl->getopt)(kctl->kctlref, kcb->unit,
fe8ab488 1148 kcb->userdata, sopt->sopt_name,
91447636 1149 data, &len);
6d2010ae 1150 if (data != NULL && len > sopt->sopt_valsize)
fe8ab488
A
1151 panic_plain("ctl_ctloutput: ctl %s returned "
1152 "len (%lu) > sopt_valsize (%lu)\n",
1153 kcb->kctl->name, len,
1154 sopt->sopt_valsize);
1155 socket_lock(so, 0);
91447636
A
1156 if (error == 0) {
1157 if (data != NULL)
1158 error = sooptcopyout(sopt, data, len);
fe8ab488 1159 else
91447636
A
1160 sopt->sopt_valsize = len;
1161 }
1162 if (data != NULL)
fe8ab488 1163 FREE(data, M_TEMP);
91447636
A
1164 break;
1165 }
fe8ab488 1166 return (error);
91447636 1167}
9bccf70c 1168
fe8ab488
A
1169static int
1170ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
1171 struct ifnet *ifp, struct proc *p)
91447636 1172{
fe8ab488 1173#pragma unused(so, ifp, p)
91447636 1174 int error = ENOTSUP;
fe8ab488 1175
91447636
A
1176 switch (cmd) {
1177 /* get the number of controllers */
1178 case CTLIOCGCOUNT: {
1179 struct kctl *kctl;
316670eb 1180 u_int32_t n = 0;
91447636
A
1181
1182 lck_mtx_lock(ctl_mtx);
1183 TAILQ_FOREACH(kctl, &ctl_head, next)
1184 n++;
1185 lck_mtx_unlock(ctl_mtx);
fe8ab488 1186
316670eb 1187 bcopy(&n, data, sizeof (n));
91447636
A
1188 error = 0;
1189 break;
1190 }
1191 case CTLIOCGINFO: {
316670eb 1192 struct ctl_info ctl_info;
91447636 1193 struct kctl *kctl = 0;
316670eb
A
1194 size_t name_len;
1195
1196 bcopy(data, &ctl_info, sizeof (ctl_info));
1197 name_len = strnlen(ctl_info.ctl_name, MAX_KCTL_NAME);
1198
91447636
A
1199 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
1200 error = EINVAL;
1201 break;
1202 }
1203 lck_mtx_lock(ctl_mtx);
316670eb 1204 kctl = ctl_find_by_name(ctl_info.ctl_name);
91447636
A
1205 lck_mtx_unlock(ctl_mtx);
1206 if (kctl == 0) {
1207 error = ENOENT;
1208 break;
1209 }
316670eb
A
1210 ctl_info.ctl_id = kctl->id;
1211 bcopy(&ctl_info, data, sizeof (ctl_info));
91447636
A
1212 error = 0;
1213 break;
1214 }
fe8ab488 1215
91447636 1216 /* add controls to get list of NKEs */
fe8ab488 1217
91447636 1218 }
fe8ab488
A
1219
1220 return (error);
91447636 1221}
9bccf70c 1222
3e170ce0
A
1223static void
1224kctl_tbl_grow()
1225{
1226 struct kctl **new_table;
1227 uintptr_t new_size;
1228
1229 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1230
39037602 1231 if (kctl_tbl_growing) {
3e170ce0 1232 /* Another thread is allocating */
39037602
A
1233 kctl_tbl_growing_waiting++;
1234
1235 do {
1236 (void) msleep((caddr_t) &kctl_tbl_growing, ctl_mtx,
1237 PSOCK | PCATCH, "kctl_tbl_growing", 0);
1238 } while (kctl_tbl_growing);
1239 kctl_tbl_growing_waiting--;
3e170ce0
A
1240 }
1241 /* Another thread grew the table */
1242 if (kctl_table != NULL && kctl_tbl_count < kctl_tbl_size)
1243 return;
1244
1245 /* Verify we have a sane size */
1246 if (kctl_tbl_size + KCTL_TBL_INC >= UINT16_MAX) {
39037602
A
1247 kctlstat.kcs_tbl_size_too_big++;
1248 if (ctl_debug)
1249 printf("%s kctl_tbl_size %lu too big\n",
1250 __func__, kctl_tbl_size);
3e170ce0
A
1251 return;
1252 }
1253 kctl_tbl_growing = 1;
1254
1255 new_size = kctl_tbl_size + KCTL_TBL_INC;
1256
1257 lck_mtx_unlock(ctl_mtx);
1258 new_table = _MALLOC(sizeof(struct kctl *) * new_size,
1259 M_TEMP, M_WAIT | M_ZERO);
1260 lck_mtx_lock(ctl_mtx);
1261
1262 if (new_table != NULL) {
1263 if (kctl_table != NULL) {
1264 bcopy(kctl_table, new_table,
1265 kctl_tbl_size * sizeof(struct kctl *));
1266
1267 _FREE(kctl_table, M_TEMP);
1268 }
1269 kctl_table = new_table;
1270 kctl_tbl_size = new_size;
1271 }
1272
1273 kctl_tbl_growing = 0;
39037602
A
1274
1275 if (kctl_tbl_growing_waiting) {
1276 wakeup(&kctl_tbl_growing);
1277 }
3e170ce0
A
1278}
1279
1280#define KCTLREF_INDEX_MASK 0x0000FFFF
1281#define KCTLREF_GENCNT_MASK 0xFFFF0000
1282#define KCTLREF_GENCNT_SHIFT 16
1283
1284static kern_ctl_ref
1285kctl_make_ref(struct kctl *kctl)
1286{
1287 uintptr_t i;
1288
1289 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1290
1291 if (kctl_tbl_count >= kctl_tbl_size)
1292 kctl_tbl_grow();
1293
1294 kctl->kctlref = NULL;
1295 for (i = 0; i < kctl_tbl_size; i++) {
1296 if (kctl_table[i] == NULL) {
1297 uintptr_t ref;
1298
1299 /*
1300 * Reference is index plus one
1301 */
1302 kctl_ref_gencnt += 1;
1303
1304 /*
1305 * Add generation count as salt to reference to prevent
1306 * use after deregister
1307 */
1308 ref = ((kctl_ref_gencnt << KCTLREF_GENCNT_SHIFT) &
1309 KCTLREF_GENCNT_MASK) +
1310 ((i + 1) & KCTLREF_INDEX_MASK);
1311
1312 kctl->kctlref = (void *)(ref);
1313 kctl_table[i] = kctl;
1314 kctl_tbl_count++;
1315 break;
1316 }
1317 }
1318
1319 if (kctl->kctlref == NULL)
1320 panic("%s no space in table", __func__);
1321
1322 if (ctl_debug > 0)
1323 printf("%s %p for %p\n",
1324 __func__, kctl->kctlref, kctl);
1325
1326 return (kctl->kctlref);
1327}
1328
1329static void
1330kctl_delete_ref(kern_ctl_ref kctlref)
1331{
1332 /*
1333 * Reference is index plus one
1334 */
1335 uintptr_t i = (((uintptr_t)kctlref) & KCTLREF_INDEX_MASK) - 1;
1336
1337 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1338
1339 if (i < kctl_tbl_size) {
1340 struct kctl *kctl = kctl_table[i];
1341
1342 if (kctl->kctlref == kctlref) {
1343 kctl_table[i] = NULL;
1344 kctl_tbl_count--;
1345 } else {
1346 kctlstat.kcs_bad_kctlref++;
1347 }
1348 } else {
1349 kctlstat.kcs_bad_kctlref++;
1350 }
1351}
1352
1353static struct kctl *
1354kctl_from_ref(kern_ctl_ref kctlref)
1355{
1356 /*
1357 * Reference is index plus one
1358 */
1359 uintptr_t i = (((uintptr_t)kctlref) & KCTLREF_INDEX_MASK) - 1;
1360 struct kctl *kctl = NULL;
1361
1362 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1363
1364 if (i >= kctl_tbl_size) {
1365 kctlstat.kcs_bad_kctlref++;
1366 return (NULL);
1367 }
1368 kctl = kctl_table[i];
1369 if (kctl->kctlref != kctlref) {
1370 kctlstat.kcs_bad_kctlref++;
1371 return (NULL);
1372 }
1373 return (kctl);
1374}
1375
91447636
A
1376/*
1377 * Register/unregister a NKE
1378 */
1379errno_t
1380ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref)
2d21ac55
A
1381{
1382 struct kctl *kctl = NULL;
1383 struct kctl *kctl_next = NULL;
04b8595b
A
1384 u_int32_t id = 1;
1385 size_t name_len;
1386 int is_extended = 0;
fe8ab488 1387
91447636 1388 if (userkctl == NULL) /* sanity check */
fe8ab488 1389 return (EINVAL);
91447636 1390 if (userkctl->ctl_connect == NULL)
fe8ab488 1391 return (EINVAL);
91447636
A
1392 name_len = strlen(userkctl->ctl_name);
1393 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME)
fe8ab488
A
1394 return (EINVAL);
1395
91447636
A
1396 MALLOC(kctl, struct kctl *, sizeof(*kctl), M_TEMP, M_WAITOK);
1397 if (kctl == NULL)
fe8ab488 1398 return (ENOMEM);
91447636 1399 bzero((char *)kctl, sizeof(*kctl));
fe8ab488 1400
91447636 1401 lck_mtx_lock(ctl_mtx);
fe8ab488 1402
3e170ce0
A
1403 if (kctl_make_ref(kctl) == NULL) {
1404 lck_mtx_unlock(ctl_mtx);
1405 FREE(kctl, M_TEMP);
1406 return (ENOMEM);
1407 }
1408
2d21ac55
A
1409 /*
1410 * Kernel Control IDs
1411 *
1412 * CTL_FLAG_REG_ID_UNIT indicates the control ID and unit number are
1413 * static. If they do not exist, add them to the list in order. If the
1414 * flag is not set, we must find a new unique value. We assume the
1415 * list is in order. We find the last item in the list and add one. If
1416 * this leads to wrapping the id around, we start at the front of the
1417 * list and look for a gap.
1418 */
fe8ab488 1419
2d21ac55
A
1420 if ((userkctl->ctl_flags & CTL_FLAG_REG_ID_UNIT) == 0) {
1421 /* Must dynamically assign an unused ID */
fe8ab488 1422
2d21ac55 1423 /* Verify the same name isn't already registered */
91447636 1424 if (ctl_find_by_name(userkctl->ctl_name) != NULL) {
3e170ce0 1425 kctl_delete_ref(kctl->kctlref);
91447636
A
1426 lck_mtx_unlock(ctl_mtx);
1427 FREE(kctl, M_TEMP);
fe8ab488 1428 return (EEXIST);
91447636 1429 }
fe8ab488 1430
2d21ac55
A
1431 /* Start with 1 in case the list is empty */
1432 id = 1;
1433 kctl_next = TAILQ_LAST(&ctl_head, kctl_list);
fe8ab488 1434
2d21ac55 1435 if (kctl_next != NULL) {
fe8ab488 1436 /* List was not empty, add one to the last item */
2d21ac55
A
1437 id = kctl_next->id + 1;
1438 kctl_next = NULL;
fe8ab488 1439
2d21ac55 1440 /*
fe8ab488
A
1441 * If this wrapped the id number, start looking at
1442 * the front of the list for an unused id.
2d21ac55 1443 */
91447636 1444 if (id == 0) {
2d21ac55
A
1445 /* Find the next unused ID */
1446 id = 1;
fe8ab488 1447
2d21ac55
A
1448 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
1449 if (kctl_next->id > id) {
1450 /* We found a gap */
1451 break;
1452 }
fe8ab488 1453
2d21ac55
A
1454 id = kctl_next->id + 1;
1455 }
91447636 1456 }
91447636 1457 }
fe8ab488 1458
2d21ac55 1459 userkctl->ctl_id = id;
91447636
A
1460 kctl->id = id;
1461 kctl->reg_unit = -1;
1462 } else {
2d21ac55
A
1463 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
1464 if (kctl_next->id > userkctl->ctl_id)
1465 break;
1466 }
fe8ab488
A
1467
1468 if (ctl_find_by_id_unit(userkctl->ctl_id, userkctl->ctl_unit)) {
3e170ce0 1469 kctl_delete_ref(kctl->kctlref);
91447636
A
1470 lck_mtx_unlock(ctl_mtx);
1471 FREE(kctl, M_TEMP);
fe8ab488 1472 return (EEXIST);
91447636
A
1473 }
1474 kctl->id = userkctl->ctl_id;
1475 kctl->reg_unit = userkctl->ctl_unit;
1476 }
39236c6e
A
1477
1478 is_extended = (userkctl->ctl_flags & CTL_FLAG_REG_EXTENDED);
1479
2d21ac55 1480 strlcpy(kctl->name, userkctl->ctl_name, MAX_KCTL_NAME);
91447636
A
1481 kctl->flags = userkctl->ctl_flags;
1482
fe8ab488
A
1483 /*
1484 * Let the caller know the default send and receive sizes
fe8ab488 1485 */
04b8595b 1486 if (userkctl->ctl_sendsize == 0) {
fe8ab488 1487 kctl->sendbufsize = CTL_SENDSIZE;
04b8595b
A
1488 userkctl->ctl_sendsize = kctl->sendbufsize;
1489 } else {
1490 kctl->sendbufsize = userkctl->ctl_sendsize;
1491 }
1492 if (userkctl->ctl_recvsize == 0) {
fe8ab488 1493 kctl->recvbufsize = CTL_RECVSIZE;
04b8595b
A
1494 userkctl->ctl_recvsize = kctl->recvbufsize;
1495 } else {
1496 kctl->recvbufsize = userkctl->ctl_recvsize;
1497 }
91447636
A
1498
1499 kctl->connect = userkctl->ctl_connect;
1500 kctl->disconnect = userkctl->ctl_disconnect;
1501 kctl->send = userkctl->ctl_send;
1502 kctl->setopt = userkctl->ctl_setopt;
1503 kctl->getopt = userkctl->ctl_getopt;
39236c6e
A
1504 if (is_extended) {
1505 kctl->rcvd = userkctl->ctl_rcvd;
fe8ab488 1506 kctl->send_list = userkctl->ctl_send_list;
39236c6e 1507 }
fe8ab488 1508
91447636 1509 TAILQ_INIT(&kctl->kcb_head);
fe8ab488 1510
2d21ac55
A
1511 if (kctl_next)
1512 TAILQ_INSERT_BEFORE(kctl_next, kctl, next);
1513 else
1514 TAILQ_INSERT_TAIL(&ctl_head, kctl, next);
fe8ab488
A
1515
1516 kctlstat.kcs_reg_count++;
1517 kctlstat.kcs_gencnt++;
1518
91447636 1519 lck_mtx_unlock(ctl_mtx);
fe8ab488 1520
3e170ce0 1521 *kctlref = kctl->kctlref;
fe8ab488 1522
91447636 1523 ctl_post_msg(KEV_CTL_REGISTERED, kctl->id);
fe8ab488 1524 return (0);
9bccf70c
A
1525}
1526
91447636
A
1527errno_t
1528ctl_deregister(void *kctlref)
fe8ab488
A
1529{
1530 struct kctl *kctl;
1531
fe8ab488 1532 lck_mtx_lock(ctl_mtx);
3e170ce0
A
1533 if ((kctl = kctl_from_ref(kctlref)) == NULL) {
1534 kctlstat.kcs_bad_kctlref++;
fe8ab488 1535 lck_mtx_unlock(ctl_mtx);
3e170ce0
A
1536 if (ctl_debug != 0)
1537 printf("%s invalid kctlref %p\n",
1538 __func__, kctlref);
fe8ab488
A
1539 return (EINVAL);
1540 }
3e170ce0 1541
91447636 1542 if (!TAILQ_EMPTY(&kctl->kcb_head)) {
fe8ab488
A
1543 lck_mtx_unlock(ctl_mtx);
1544 return (EBUSY);
91447636
A
1545 }
1546
fe8ab488
A
1547 TAILQ_REMOVE(&ctl_head, kctl, next);
1548
1549 kctlstat.kcs_reg_count--;
1550 kctlstat.kcs_gencnt++;
91447636 1551
3e170ce0 1552 kctl_delete_ref(kctl->kctlref);
fe8ab488
A
1553 lck_mtx_unlock(ctl_mtx);
1554
1555 ctl_post_msg(KEV_CTL_DEREGISTERED, kctl->id);
1556 FREE(kctl, M_TEMP);
1557 return (0);
9bccf70c
A
1558}
1559
91447636
A
1560/*
1561 * Must be called with global ctl_mtx lock taked
1562 */
1563static struct kctl *
1564ctl_find_by_name(const char *name)
fe8ab488
A
1565{
1566 struct kctl *kctl;
1567
1568 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
9bccf70c 1569
fe8ab488
A
1570 TAILQ_FOREACH(kctl, &ctl_head, next)
1571 if (strncmp(kctl->name, name, sizeof(kctl->name)) == 0)
1572 return (kctl);
9bccf70c 1573
fe8ab488 1574 return (NULL);
91447636 1575}
9bccf70c 1576
6d2010ae
A
1577u_int32_t
1578ctl_id_by_name(const char *name)
1579{
1580 u_int32_t ctl_id = 0;
fe8ab488
A
1581 struct kctl *kctl;
1582
6d2010ae 1583 lck_mtx_lock(ctl_mtx);
fe8ab488
A
1584 kctl = ctl_find_by_name(name);
1585 if (kctl)
1586 ctl_id = kctl->id;
6d2010ae 1587 lck_mtx_unlock(ctl_mtx);
fe8ab488
A
1588
1589 return (ctl_id);
6d2010ae
A
1590}
1591
1592errno_t
fe8ab488 1593ctl_name_by_id(u_int32_t id, char *out_name, size_t maxsize)
6d2010ae
A
1594{
1595 int found = 0;
6d2010ae 1596 struct kctl *kctl;
fe8ab488
A
1597
1598 lck_mtx_lock(ctl_mtx);
1599 TAILQ_FOREACH(kctl, &ctl_head, next) {
1600 if (kctl->id == id)
1601 break;
1602 }
1603
3e170ce0 1604 if (kctl) {
fe8ab488
A
1605 if (maxsize > MAX_KCTL_NAME)
1606 maxsize = MAX_KCTL_NAME;
1607 strlcpy(out_name, kctl->name, maxsize);
1608 found = 1;
1609 }
6d2010ae 1610 lck_mtx_unlock(ctl_mtx);
fe8ab488
A
1611
1612 return (found ? 0 : ENOENT);
6d2010ae
A
1613}
1614
91447636
A
1615/*
1616 * Must be called with global ctl_mtx lock taked
1617 *
1618 */
1619static struct kctl *
1620ctl_find_by_id_unit(u_int32_t id, u_int32_t unit)
fe8ab488
A
1621{
1622 struct kctl *kctl;
1623
1624 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1625
1626 TAILQ_FOREACH(kctl, &ctl_head, next) {
1627 if (kctl->id == id && (kctl->flags & CTL_FLAG_REG_ID_UNIT) == 0)
1628 return (kctl);
1629 else if (kctl->id == id && kctl->reg_unit == unit)
1630 return (kctl);
1631 }
1632 return (NULL);
9bccf70c
A
1633}
1634
1635/*
91447636 1636 * Must be called with kernel controller lock taken
9bccf70c 1637 */
91447636
A
1638static struct ctl_cb *
1639kcb_find(struct kctl *kctl, u_int32_t unit)
fe8ab488
A
1640{
1641 struct ctl_cb *kcb;
9bccf70c 1642
fe8ab488 1643 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
9bccf70c 1644
fe8ab488
A
1645 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
1646 if (kcb->unit == unit)
1647 return (kcb);
1648
1649 return (NULL);
9bccf70c
A
1650}
1651
6d2010ae 1652static struct socket *
3e170ce0 1653kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *kctlflags)
6d2010ae
A
1654{
1655 struct socket *so = NULL;
fe8ab488
A
1656 struct ctl_cb *kcb;
1657 void *lr_saved;
3e170ce0
A
1658 struct kctl *kctl;
1659 int i;
fe8ab488
A
1660
1661 lr_saved = __builtin_return_address(0);
1662
6d2010ae 1663 lck_mtx_lock(ctl_mtx);
3e170ce0
A
1664 /*
1665 * First validate the kctlref
1666 */
1667 if ((kctl = kctl_from_ref(kctlref)) == NULL) {
1668 kctlstat.kcs_bad_kctlref++;
1669 lck_mtx_unlock(ctl_mtx);
1670 if (ctl_debug != 0)
1671 printf("%s invalid kctlref %p\n",
1672 __func__, kctlref);
1673 return (NULL);
6d2010ae 1674 }
fe8ab488 1675
3e170ce0
A
1676 kcb = kcb_find(kctl, unit);
1677 if (kcb == NULL || kcb->kctl != kctl || (so = kcb->so) == NULL) {
1678 lck_mtx_unlock(ctl_mtx);
fe8ab488 1679 return (NULL);
6d2010ae 1680 }
3e170ce0
A
1681 /*
1682 * This prevents the socket from being closed
1683 */
1684 kcb->usecount++;
1685 /*
1686 * Respect lock ordering: socket before ctl_mtx
1687 */
1688 lck_mtx_unlock(ctl_mtx);
fe8ab488 1689
6d2010ae 1690 socket_lock(so, 1);
3e170ce0
A
1691 /*
1692 * The socket lock history is more useful if we store
1693 * the address of the caller.
1694 */
1695 i = (so->next_lock_lr + SO_LCKDBG_MAX - 1) % SO_LCKDBG_MAX;
1696 so->lock_lr[i] = lr_saved;
fe8ab488 1697
6d2010ae 1698 lck_mtx_lock(ctl_mtx);
3e170ce0
A
1699
1700 if ((kctl = kctl_from_ref(kctlref)) == NULL || kcb->kctl == NULL) {
6d2010ae
A
1701 lck_mtx_unlock(ctl_mtx);
1702 socket_unlock(so, 1);
1703 so = NULL;
1704 lck_mtx_lock(ctl_mtx);
3e170ce0
A
1705 } else if (kctlflags != NULL) {
1706 *kctlflags = kctl->flags;
6d2010ae 1707 }
3e170ce0 1708
6d2010ae
A
1709 kcb->usecount--;
1710 if (kcb->usecount == 0)
1711 wakeup((event_t)&kcb->usecount);
3e170ce0 1712
6d2010ae 1713 lck_mtx_unlock(ctl_mtx);
fe8ab488
A
1714
1715 return (so);
6d2010ae
A
1716}
1717
fe8ab488
A
1718static void
1719ctl_post_msg(u_int32_t event_code, u_int32_t id)
9bccf70c 1720{
fe8ab488
A
1721 struct ctl_event_data ctl_ev_data;
1722 struct kev_msg ev_msg;
1723
1724 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_NOTOWNED);
1725
1726 bzero(&ev_msg, sizeof(struct kev_msg));
1727 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1728
1729 ev_msg.kev_class = KEV_SYSTEM_CLASS;
1730 ev_msg.kev_subclass = KEV_CTL_SUBCLASS;
1731 ev_msg.event_code = event_code;
1732
1733 /* common nke subclass data */
1734 bzero(&ctl_ev_data, sizeof(ctl_ev_data));
1735 ctl_ev_data.ctl_id = id;
1736 ev_msg.dv[0].data_ptr = &ctl_ev_data;
1737 ev_msg.dv[0].data_length = sizeof(ctl_ev_data);
1738
1739 ev_msg.dv[1].data_length = 0;
1740
1741 kev_post_msg(&ev_msg);
9bccf70c
A
1742}
1743
91447636 1744static int
b0d623f7
A
1745ctl_lock(struct socket *so, int refcount, void *lr)
1746{
1747 void *lr_saved;
1748
1749 if (lr == NULL)
1750 lr_saved = __builtin_return_address(0);
1751 else
1752 lr_saved = lr;
1753
1754 if (so->so_pcb != NULL) {
91447636
A
1755 lck_mtx_lock(((struct ctl_cb *)so->so_pcb)->mtx);
1756 } else {
fe8ab488 1757 panic("ctl_lock: so=%p NO PCB! lr=%p lrh= %s\n",
b0d623f7
A
1758 so, lr_saved, solockhistory_nr(so));
1759 /* NOTREACHED */
91447636 1760 }
b0d623f7
A
1761
1762 if (so->so_usecount < 0) {
1763 panic("ctl_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s\n",
fe8ab488
A
1764 so, so->so_pcb, lr_saved, so->so_usecount,
1765 solockhistory_nr(so));
b0d623f7
A
1766 /* NOTREACHED */
1767 }
1768
91447636
A
1769 if (refcount)
1770 so->so_usecount++;
0c530ab8 1771
2d21ac55 1772 so->lock_lr[so->next_lock_lr] = lr_saved;
0c530ab8 1773 so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX;
91447636
A
1774 return (0);
1775}
1776
1777static int
b0d623f7 1778ctl_unlock(struct socket *so, int refcount, void *lr)
91447636 1779{
b0d623f7
A
1780 void *lr_saved;
1781 lck_mtx_t *mutex_held;
1782
1783 if (lr == NULL)
1784 lr_saved = __builtin_return_address(0);
1785 else
1786 lr_saved = lr;
1787
39037602 1788#if (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG))
fe8ab488
A
1789 printf("ctl_unlock: so=%llx sopcb=%x lock=%llx ref=%u lr=%llx\n",
1790 (uint64_t)VM_KERNEL_ADDRPERM(so),
1791 (uint64_t)VM_KERNEL_ADDRPERM(so->so_pcb,
1792 (uint64_t)VM_KERNEL_ADDRPERM(((struct ctl_cb *)so->so_pcb)->mtx),
1793 so->so_usecount, (uint64_t)VM_KERNEL_ADDRPERM(lr_saved));
39037602 1794#endif /* (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG)) */
91447636
A
1795 if (refcount)
1796 so->so_usecount--;
b0d623f7
A
1797
1798 if (so->so_usecount < 0) {
fe8ab488 1799 panic("ctl_unlock: so=%p usecount=%x lrh= %s\n",
b0d623f7
A
1800 so, so->so_usecount, solockhistory_nr(so));
1801 /* NOTREACHED */
1802 }
91447636 1803 if (so->so_pcb == NULL) {
fe8ab488
A
1804 panic("ctl_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s\n",
1805 so, so->so_usecount, (void *)lr_saved,
1806 solockhistory_nr(so));
b0d623f7 1807 /* NOTREACHED */
91447636 1808 }
b0d623f7
A
1809 mutex_held = ((struct ctl_cb *)so->so_pcb)->mtx;
1810
91447636 1811 lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
2d21ac55 1812 so->unlock_lr[so->next_unlock_lr] = lr_saved;
0c530ab8 1813 so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
91447636 1814 lck_mtx_unlock(mutex_held);
b0d623f7 1815
91447636
A
1816 if (so->so_usecount == 0)
1817 ctl_sofreelastref(so);
b0d623f7 1818
91447636
A
1819 return (0);
1820}
1821
1822static lck_mtx_t *
5ba3f43e 1823ctl_getlock(struct socket *so, int flags)
91447636 1824{
5ba3f43e 1825#pragma unused(flags)
91447636 1826 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
fe8ab488 1827
91447636
A
1828 if (so->so_pcb) {
1829 if (so->so_usecount < 0)
fe8ab488 1830 panic("ctl_getlock: so=%p usecount=%x lrh= %s\n",
b0d623f7 1831 so, so->so_usecount, solockhistory_nr(so));
fe8ab488 1832 return (kcb->mtx);
91447636 1833 } else {
fe8ab488 1834 panic("ctl_getlock: so=%p NULL NO so_pcb %s\n",
b0d623f7 1835 so, solockhistory_nr(so));
91447636
A
1836 return (so->so_proto->pr_domain->dom_mtx);
1837 }
1838}
fe8ab488
A
1839
1840__private_extern__ int
1841kctl_reg_list SYSCTL_HANDLER_ARGS
1842{
1843#pragma unused(oidp, arg1, arg2)
1844 int error = 0;
1845 int n, i;
1846 struct xsystmgen xsg;
1847 void *buf = NULL;
1848 struct kctl *kctl;
1849 size_t item_size = ROUNDUP64(sizeof (struct xkctl_reg));
1850
1851 buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO);
1852 if (buf == NULL)
1853 return (ENOMEM);
1854
1855 lck_mtx_lock(ctl_mtx);
1856
1857 n = kctlstat.kcs_reg_count;
1858
1859 if (req->oldptr == USER_ADDR_NULL) {
1860 req->oldidx = (n + n/8) * sizeof(struct xkctl_reg);
1861 goto done;
1862 }
1863 if (req->newptr != USER_ADDR_NULL) {
1864 error = EPERM;
1865 goto done;
1866 }
1867 bzero(&xsg, sizeof (xsg));
1868 xsg.xg_len = sizeof (xsg);
1869 xsg.xg_count = n;
1870 xsg.xg_gen = kctlstat.kcs_gencnt;
1871 xsg.xg_sogen = so_gencnt;
1872 error = SYSCTL_OUT(req, &xsg, sizeof (xsg));
1873 if (error) {
1874 goto done;
1875 }
1876 /*
1877 * We are done if there is no pcb
1878 */
1879 if (n == 0) {
1880 goto done;
1881 }
1882
1883 i = 0;
1884 for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
1885 i < n && kctl != NULL;
1886 i++, kctl = TAILQ_NEXT(kctl, next)) {
1887 struct xkctl_reg *xkr = (struct xkctl_reg *)buf;
1888 struct ctl_cb *kcb;
1889 u_int32_t pcbcount = 0;
1890
1891 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
1892 pcbcount++;
1893
1894 bzero(buf, item_size);
1895
1896 xkr->xkr_len = sizeof(struct xkctl_reg);
1897 xkr->xkr_kind = XSO_KCREG;
1898 xkr->xkr_id = kctl->id;
1899 xkr->xkr_reg_unit = kctl->reg_unit;
1900 xkr->xkr_flags = kctl->flags;
3e170ce0 1901 xkr->xkr_kctlref = (uint64_t)(kctl->kctlref);
fe8ab488
A
1902 xkr->xkr_recvbufsize = kctl->recvbufsize;
1903 xkr->xkr_sendbufsize = kctl->sendbufsize;
1904 xkr->xkr_lastunit = kctl->lastunit;
1905 xkr->xkr_pcbcount = pcbcount;
5ba3f43e 1906 xkr->xkr_connect = (uint64_t)VM_KERNEL_UNSLIDE(kctl->connect);
fe8ab488 1907 xkr->xkr_disconnect =
5ba3f43e
A
1908 (uint64_t)VM_KERNEL_UNSLIDE(kctl->disconnect);
1909 xkr->xkr_send = (uint64_t)VM_KERNEL_UNSLIDE(kctl->send);
fe8ab488 1910 xkr->xkr_send_list =
5ba3f43e
A
1911 (uint64_t)VM_KERNEL_UNSLIDE(kctl->send_list);
1912 xkr->xkr_setopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->setopt);
1913 xkr->xkr_getopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->getopt);
1914 xkr->xkr_rcvd = (uint64_t)VM_KERNEL_UNSLIDE(kctl->rcvd);
fe8ab488
A
1915 strlcpy(xkr->xkr_name, kctl->name, sizeof(xkr->xkr_name));
1916
1917 error = SYSCTL_OUT(req, buf, item_size);
1918 }
1919
1920 if (error == 0) {
1921 /*
1922 * Give the user an updated idea of our state.
1923 * If the generation differs from what we told
1924 * her before, she knows that something happened
1925 * while we were processing this request, and it
1926 * might be necessary to retry.
1927 */
1928 bzero(&xsg, sizeof (xsg));
1929 xsg.xg_len = sizeof (xsg);
1930 xsg.xg_count = n;
1931 xsg.xg_gen = kctlstat.kcs_gencnt;
1932 xsg.xg_sogen = so_gencnt;
1933 error = SYSCTL_OUT(req, &xsg, sizeof (xsg));
1934 if (error) {
1935 goto done;
1936 }
1937 }
1938
1939done:
1940 lck_mtx_unlock(ctl_mtx);
1941
1942 if (buf != NULL)
1943 FREE(buf, M_TEMP);
1944
1945 return (error);
1946}
1947
1948__private_extern__ int
1949kctl_pcblist SYSCTL_HANDLER_ARGS
1950{
1951#pragma unused(oidp, arg1, arg2)
1952 int error = 0;
1953 int n, i;
1954 struct xsystmgen xsg;
1955 void *buf = NULL;
1956 struct kctl *kctl;
1957 size_t item_size = ROUNDUP64(sizeof (struct xkctlpcb)) +
1958 ROUNDUP64(sizeof (struct xsocket_n)) +
1959 2 * ROUNDUP64(sizeof (struct xsockbuf_n)) +
1960 ROUNDUP64(sizeof (struct xsockstat_n));
1961
1962 buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO);
1963 if (buf == NULL)
1964 return (ENOMEM);
1965
1966 lck_mtx_lock(ctl_mtx);
1967
1968 n = kctlstat.kcs_pcbcount;
1969
1970 if (req->oldptr == USER_ADDR_NULL) {
1971 req->oldidx = (n + n/8) * item_size;
1972 goto done;
1973 }
1974 if (req->newptr != USER_ADDR_NULL) {
1975 error = EPERM;
1976 goto done;
1977 }
1978 bzero(&xsg, sizeof (xsg));
1979 xsg.xg_len = sizeof (xsg);
1980 xsg.xg_count = n;
1981 xsg.xg_gen = kctlstat.kcs_gencnt;
1982 xsg.xg_sogen = so_gencnt;
1983 error = SYSCTL_OUT(req, &xsg, sizeof (xsg));
1984 if (error) {
1985 goto done;
1986 }
1987 /*
1988 * We are done if there is no pcb
1989 */
1990 if (n == 0) {
1991 goto done;
1992 }
1993
1994 i = 0;
1995 for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
1996 i < n && kctl != NULL;
1997 kctl = TAILQ_NEXT(kctl, next)) {
1998 struct ctl_cb *kcb;
1999
2000 for (kcb = TAILQ_FIRST(&kctl->kcb_head);
2001 i < n && kcb != NULL;
2002 i++, kcb = TAILQ_NEXT(kcb, next)) {
2003 struct xkctlpcb *xk = (struct xkctlpcb *)buf;
2004 struct xsocket_n *xso = (struct xsocket_n *)
2005 ADVANCE64(xk, sizeof (*xk));
2006 struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *)
2007 ADVANCE64(xso, sizeof (*xso));
2008 struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *)
2009 ADVANCE64(xsbrcv, sizeof (*xsbrcv));
2010 struct xsockstat_n *xsostats = (struct xsockstat_n *)
2011 ADVANCE64(xsbsnd, sizeof (*xsbsnd));
2012
2013 bzero(buf, item_size);
2014
2015 xk->xkp_len = sizeof(struct xkctlpcb);
2016 xk->xkp_kind = XSO_KCB;
2017 xk->xkp_unit = kcb->unit;
2018 xk->xkp_kctpcb = (uint64_t)VM_KERNEL_ADDRPERM(kcb);
2019 xk->xkp_kctlref = (uint64_t)VM_KERNEL_ADDRPERM(kctl);
2020 xk->xkp_kctlid = kctl->id;
2021 strlcpy(xk->xkp_kctlname, kctl->name,
2022 sizeof(xk->xkp_kctlname));
2023
2024 sotoxsocket_n(kcb->so, xso);
2025 sbtoxsockbuf_n(kcb->so ?
2026 &kcb->so->so_rcv : NULL, xsbrcv);
2027 sbtoxsockbuf_n(kcb->so ?
2028 &kcb->so->so_snd : NULL, xsbsnd);
2029 sbtoxsockstat_n(kcb->so, xsostats);
2030
2031 error = SYSCTL_OUT(req, buf, item_size);
2032 }
2033 }
2034
2035 if (error == 0) {
2036 /*
2037 * Give the user an updated idea of our state.
2038 * If the generation differs from what we told
2039 * her before, she knows that something happened
2040 * while we were processing this request, and it
2041 * might be necessary to retry.
2042 */
2043 bzero(&xsg, sizeof (xsg));
2044 xsg.xg_len = sizeof (xsg);
2045 xsg.xg_count = n;
2046 xsg.xg_gen = kctlstat.kcs_gencnt;
2047 xsg.xg_sogen = so_gencnt;
2048 error = SYSCTL_OUT(req, &xsg, sizeof (xsg));
2049 if (error) {
2050 goto done;
2051 }
2052 }
2053
2054done:
2055 lck_mtx_unlock(ctl_mtx);
2056
2057 return (error);
2058}
2059
2060int
2061kctl_getstat SYSCTL_HANDLER_ARGS
2062{
2063#pragma unused(oidp, arg1, arg2)
2064 int error = 0;
2065
2066 lck_mtx_lock(ctl_mtx);
2067
2068 if (req->newptr != USER_ADDR_NULL) {
2069 error = EPERM;
2070 goto done;
2071 }
2072 if (req->oldptr == USER_ADDR_NULL) {
2073 req->oldidx = sizeof(struct kctlstat);
2074 goto done;
2075 }
2076
2077 error = SYSCTL_OUT(req, &kctlstat,
2078 MIN(sizeof(struct kctlstat), req->oldlen));
2079done:
2080 lck_mtx_unlock(ctl_mtx);
2081 return (error);
2082}
3e170ce0
A
2083
2084void
2085kctl_fill_socketinfo(struct socket *so, struct socket_info *si)
2086{
2087 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
2088 struct kern_ctl_info *kcsi =
2089 &si->soi_proto.pri_kern_ctl;
2090 struct kctl *kctl = kcb->kctl;
2091
2092 si->soi_kind = SOCKINFO_KERN_CTL;
2093
2094 if (kctl == 0)
2095 return;
2096
2097 kcsi->kcsi_id = kctl->id;
2098 kcsi->kcsi_reg_unit = kctl->reg_unit;
2099 kcsi->kcsi_flags = kctl->flags;
2100 kcsi->kcsi_recvbufsize = kctl->recvbufsize;
2101 kcsi->kcsi_sendbufsize = kctl->sendbufsize;
2102 kcsi->kcsi_unit = kcb->unit;
2103 strlcpy(kcsi->kcsi_name, kctl->name, MAX_KCTL_NAME);
2104}