]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/kern/kern_control.c
xnu-7195.81.3.tar.gz
[apple/xnu.git] / bsd / kern / kern_control.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 1999-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/*
30 * Kernel Control domain - allows control connections to
31 * and to read/write data.
32 *
33 * Vincent Lubet, 040506
34 * Christophe Allie, 010928
35 * Justin C. Walker, 990319
36 */
37
38#include <sys/types.h>
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/syslog.h>
42#include <sys/socket.h>
43#include <sys/socketvar.h>
44#include <sys/protosw.h>
45#include <sys/domain.h>
46#include <sys/malloc.h>
47#include <sys/mbuf.h>
48#include <sys/sys_domain.h>
49#include <sys/kern_event.h>
50#include <sys/kern_control.h>
51#include <sys/kauth.h>
52#include <sys/sysctl.h>
53#include <sys/proc_info.h>
54#include <net/if_var.h>
55
56#include <mach/vm_types.h>
57
58#include <kern/thread.h>
59
60struct kctl {
61 TAILQ_ENTRY(kctl) next; /* controller chain */
62 kern_ctl_ref kctlref;
63
64 /* controller information provided when registering */
65 char name[MAX_KCTL_NAME]; /* unique identifier */
66 u_int32_t id;
67 u_int32_t reg_unit;
68
69 /* misc communication information */
70 u_int32_t flags; /* support flags */
71 u_int32_t recvbufsize; /* request more than the default buffer size */
72 u_int32_t sendbufsize; /* request more than the default buffer size */
73
74 /* Dispatch functions */
75 ctl_setup_func setup; /* Setup contact */
76 ctl_bind_func bind; /* Prepare contact */
77 ctl_connect_func connect; /* Make contact */
78 ctl_disconnect_func disconnect; /* Break contact */
79 ctl_send_func send; /* Send data to nke */
80 ctl_send_list_func send_list; /* Send list of packets */
81 ctl_setopt_func setopt; /* set kctl configuration */
82 ctl_getopt_func getopt; /* get kctl configuration */
83 ctl_rcvd_func rcvd; /* Notify nke when client reads data */
84
85 TAILQ_HEAD(, ctl_cb) kcb_head;
86 u_int32_t lastunit;
87};
88
89#if DEVELOPMENT || DEBUG
90enum ctl_status {
91 KCTL_DISCONNECTED = 0,
92 KCTL_CONNECTING = 1,
93 KCTL_CONNECTED = 2
94};
95#endif /* DEVELOPMENT || DEBUG */
96
97struct ctl_cb {
98 TAILQ_ENTRY(ctl_cb) next; /* controller chain */
99 lck_mtx_t *mtx;
100 struct socket *so; /* controlling socket */
101 struct kctl *kctl; /* back pointer to controller */
102 void *userdata;
103 struct sockaddr_ctl sac;
104 u_int32_t usecount;
105 u_int32_t kcb_usecount;
106 u_int32_t require_clearing_count;
107#if DEVELOPMENT || DEBUG
108 enum ctl_status status;
109#endif /* DEVELOPMENT || DEBUG */
110};
111
112#ifndef ROUNDUP64
113#define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
114#endif
115
116#ifndef ADVANCE64
117#define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
118#endif
119
120/*
121 * Definitions and vars for we support
122 */
123
124#define CTL_SENDSIZE (2 * 1024) /* default buffer size */
125#define CTL_RECVSIZE (8 * 1024) /* default buffer size */
126
127/*
128 * Definitions and vars for we support
129 */
130
131const u_int32_t ctl_maxunit = 65536;
132static lck_grp_attr_t *ctl_lck_grp_attr = 0;
133static lck_attr_t *ctl_lck_attr = 0;
134static lck_grp_t *ctl_lck_grp = 0;
135static lck_mtx_t *ctl_mtx;
136
137/* all the controllers are chained */
138TAILQ_HEAD(kctl_list, kctl) ctl_head;
139
140static int ctl_attach(struct socket *, int, struct proc *);
141static int ctl_detach(struct socket *);
142static int ctl_sofreelastref(struct socket *so);
143static int ctl_bind(struct socket *, struct sockaddr *, struct proc *);
144static int ctl_connect(struct socket *, struct sockaddr *, struct proc *);
145static int ctl_disconnect(struct socket *);
146static int ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
147 struct ifnet *ifp, struct proc *p);
148static int ctl_send(struct socket *, int, struct mbuf *,
149 struct sockaddr *, struct mbuf *, struct proc *);
150static int ctl_send_list(struct socket *, int, struct mbuf *,
151 struct sockaddr *, struct mbuf *, struct proc *);
152static int ctl_ctloutput(struct socket *, struct sockopt *);
153static int ctl_peeraddr(struct socket *so, struct sockaddr **nam);
154static int ctl_usr_rcvd(struct socket *so, int flags);
155
156static struct kctl *ctl_find_by_name(const char *);
157static struct kctl *ctl_find_by_id_unit(u_int32_t id, u_int32_t unit);
158
159static struct socket *kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit,
160 u_int32_t *);
161static struct ctl_cb *kcb_find(struct kctl *, u_int32_t unit);
162static void ctl_post_msg(u_int32_t event_code, u_int32_t id);
163
164static int ctl_lock(struct socket *, int, void *);
165static int ctl_unlock(struct socket *, int, void *);
166static lck_mtx_t * ctl_getlock(struct socket *, int);
167
168static struct pr_usrreqs ctl_usrreqs = {
169 .pru_attach = ctl_attach,
170 .pru_bind = ctl_bind,
171 .pru_connect = ctl_connect,
172 .pru_control = ctl_ioctl,
173 .pru_detach = ctl_detach,
174 .pru_disconnect = ctl_disconnect,
175 .pru_peeraddr = ctl_peeraddr,
176 .pru_rcvd = ctl_usr_rcvd,
177 .pru_send = ctl_send,
178 .pru_send_list = ctl_send_list,
179 .pru_sosend = sosend,
180 .pru_sosend_list = sosend_list,
181 .pru_soreceive = soreceive,
182 .pru_soreceive_list = soreceive_list,
183};
184
185static struct protosw kctlsw[] = {
186 {
187 .pr_type = SOCK_DGRAM,
188 .pr_protocol = SYSPROTO_CONTROL,
189 .pr_flags = PR_ATOMIC | PR_CONNREQUIRED | PR_PCBLOCK | PR_WANTRCVD,
190 .pr_ctloutput = ctl_ctloutput,
191 .pr_usrreqs = &ctl_usrreqs,
192 .pr_lock = ctl_lock,
193 .pr_unlock = ctl_unlock,
194 .pr_getlock = ctl_getlock,
195 },
196 {
197 .pr_type = SOCK_STREAM,
198 .pr_protocol = SYSPROTO_CONTROL,
199 .pr_flags = PR_CONNREQUIRED | PR_PCBLOCK | PR_WANTRCVD,
200 .pr_ctloutput = ctl_ctloutput,
201 .pr_usrreqs = &ctl_usrreqs,
202 .pr_lock = ctl_lock,
203 .pr_unlock = ctl_unlock,
204 .pr_getlock = ctl_getlock,
205 }
206};
207
208__private_extern__ int kctl_reg_list SYSCTL_HANDLER_ARGS;
209__private_extern__ int kctl_pcblist SYSCTL_HANDLER_ARGS;
210__private_extern__ int kctl_getstat SYSCTL_HANDLER_ARGS;
211
212
213SYSCTL_NODE(_net_systm, OID_AUTO, kctl,
214 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Kernel control family");
215
216struct kctlstat kctlstat;
217SYSCTL_PROC(_net_systm_kctl, OID_AUTO, stats,
218 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
219 kctl_getstat, "S,kctlstat", "");
220
221SYSCTL_PROC(_net_systm_kctl, OID_AUTO, reg_list,
222 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
223 kctl_reg_list, "S,xkctl_reg", "");
224
225SYSCTL_PROC(_net_systm_kctl, OID_AUTO, pcblist,
226 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
227 kctl_pcblist, "S,xkctlpcb", "");
228
229u_int32_t ctl_autorcvbuf_max = 256 * 1024;
230SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufmax,
231 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_autorcvbuf_max, 0, "");
232
233u_int32_t ctl_autorcvbuf_high = 0;
234SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufhigh,
235 CTLFLAG_RD | CTLFLAG_LOCKED, &ctl_autorcvbuf_high, 0, "");
236
237u_int32_t ctl_debug = 0;
238SYSCTL_INT(_net_systm_kctl, OID_AUTO, debug,
239 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_debug, 0, "");
240
241#if DEVELOPMENT || DEBUG
242u_int32_t ctl_panic_debug = 0;
243SYSCTL_INT(_net_systm_kctl, OID_AUTO, panicdebug,
244 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_panic_debug, 0, "");
245#endif /* DEVELOPMENT || DEBUG */
246
247#define KCTL_TBL_INC 16
248
249static uintptr_t kctl_tbl_size = 0;
250static u_int32_t kctl_tbl_growing = 0;
251static u_int32_t kctl_tbl_growing_waiting = 0;
252static uintptr_t kctl_tbl_count = 0;
253static struct kctl **kctl_table = NULL;
254static uintptr_t kctl_ref_gencnt = 0;
255
256static void kctl_tbl_grow(void);
257static kern_ctl_ref kctl_make_ref(struct kctl *kctl);
258static void kctl_delete_ref(kern_ctl_ref);
259static struct kctl *kctl_from_ref(kern_ctl_ref);
260
261/*
262 * Install the protosw's for the Kernel Control manager.
263 */
264__private_extern__ void
265kern_control_init(struct domain *dp)
266{
267 struct protosw *pr;
268 int i;
269 int kctl_proto_count = (sizeof(kctlsw) / sizeof(struct protosw));
270
271 VERIFY(!(dp->dom_flags & DOM_INITIALIZED));
272 VERIFY(dp == systemdomain);
273
274 ctl_lck_grp_attr = lck_grp_attr_alloc_init();
275 if (ctl_lck_grp_attr == NULL) {
276 panic("%s: lck_grp_attr_alloc_init failed\n", __func__);
277 /* NOTREACHED */
278 }
279
280 ctl_lck_grp = lck_grp_alloc_init("Kernel Control Protocol",
281 ctl_lck_grp_attr);
282 if (ctl_lck_grp == NULL) {
283 panic("%s: lck_grp_alloc_init failed\n", __func__);
284 /* NOTREACHED */
285 }
286
287 ctl_lck_attr = lck_attr_alloc_init();
288 if (ctl_lck_attr == NULL) {
289 panic("%s: lck_attr_alloc_init failed\n", __func__);
290 /* NOTREACHED */
291 }
292
293 ctl_mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
294 if (ctl_mtx == NULL) {
295 panic("%s: lck_mtx_alloc_init failed\n", __func__);
296 /* NOTREACHED */
297 }
298 TAILQ_INIT(&ctl_head);
299
300 for (i = 0, pr = &kctlsw[0]; i < kctl_proto_count; i++, pr++) {
301 net_add_proto(pr, dp, 1);
302 }
303}
304
305static void
306kcb_delete(struct ctl_cb *kcb)
307{
308 if (kcb != 0) {
309 if (kcb->mtx != 0) {
310 lck_mtx_free(kcb->mtx, ctl_lck_grp);
311 }
312 FREE(kcb, M_TEMP);
313 }
314}
315
316/*
317 * Kernel Controller user-request functions
318 * attach function must exist and succeed
319 * detach not necessary
320 * we need a pcb for the per socket mutex
321 */
322static int
323ctl_attach(struct socket *so, int proto, struct proc *p)
324{
325#pragma unused(proto, p)
326 int error = 0;
327 struct ctl_cb *kcb = 0;
328
329 MALLOC(kcb, struct ctl_cb *, sizeof(struct ctl_cb), M_TEMP, M_WAITOK);
330 if (kcb == NULL) {
331 error = ENOMEM;
332 goto quit;
333 }
334 bzero(kcb, sizeof(struct ctl_cb));
335
336 kcb->mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
337 if (kcb->mtx == NULL) {
338 error = ENOMEM;
339 goto quit;
340 }
341 kcb->so = so;
342 so->so_pcb = (caddr_t)kcb;
343
344quit:
345 if (error != 0) {
346 kcb_delete(kcb);
347 kcb = 0;
348 }
349 return error;
350}
351
352static int
353ctl_sofreelastref(struct socket *so)
354{
355 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
356
357 so->so_pcb = 0;
358
359 if (kcb != 0) {
360 struct kctl *kctl;
361 if ((kctl = kcb->kctl) != 0) {
362 lck_mtx_lock(ctl_mtx);
363 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
364 kctlstat.kcs_pcbcount--;
365 kctlstat.kcs_gencnt++;
366 lck_mtx_unlock(ctl_mtx);
367 }
368 kcb_delete(kcb);
369 }
370 sofreelastref(so, 1);
371 return 0;
372}
373
374/*
375 * Use this function and ctl_kcb_require_clearing to serialize
376 * critical calls into the kctl subsystem
377 */
378static void
379ctl_kcb_increment_use_count(struct ctl_cb *kcb, lck_mtx_t *mutex_held)
380{
381 LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
382 while (kcb->require_clearing_count > 0) {
383 msleep(&kcb->require_clearing_count, mutex_held, PSOCK | PCATCH, "kcb_require_clearing", NULL);
384 }
385 kcb->kcb_usecount++;
386}
387
388static void
389ctl_kcb_require_clearing(struct ctl_cb *kcb, lck_mtx_t *mutex_held)
390{
391 assert(kcb->kcb_usecount != 0);
392 kcb->require_clearing_count++;
393 kcb->kcb_usecount--;
394 while (kcb->kcb_usecount > 0) { // we need to wait until no one else is running
395 msleep(&kcb->kcb_usecount, mutex_held, PSOCK | PCATCH, "kcb_usecount", NULL);
396 }
397 kcb->kcb_usecount++;
398}
399
400static void
401ctl_kcb_done_clearing(struct ctl_cb *kcb)
402{
403 assert(kcb->require_clearing_count != 0);
404 kcb->require_clearing_count--;
405 wakeup((caddr_t)&kcb->require_clearing_count);
406}
407
408static void
409ctl_kcb_decrement_use_count(struct ctl_cb *kcb)
410{
411 assert(kcb->kcb_usecount != 0);
412 kcb->kcb_usecount--;
413 wakeup((caddr_t)&kcb->kcb_usecount);
414}
415
416static int
417ctl_detach(struct socket *so)
418{
419 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
420
421 if (kcb == 0) {
422 return 0;
423 }
424
425 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
426 ctl_kcb_increment_use_count(kcb, mtx_held);
427 ctl_kcb_require_clearing(kcb, mtx_held);
428
429 if (kcb->kctl != NULL && kcb->kctl->bind != NULL &&
430 kcb->userdata != NULL && !(so->so_state & SS_ISCONNECTED)) {
431 // The unit was bound, but not connected
432 // Invoke the disconnected call to cleanup
433 if (kcb->kctl->disconnect != NULL) {
434 socket_unlock(so, 0);
435 (*kcb->kctl->disconnect)(kcb->kctl->kctlref,
436 kcb->sac.sc_unit, kcb->userdata);
437 socket_lock(so, 0);
438 }
439 }
440
441 soisdisconnected(so);
442#if DEVELOPMENT || DEBUG
443 kcb->status = KCTL_DISCONNECTED;
444#endif /* DEVELOPMENT || DEBUG */
445 so->so_flags |= SOF_PCBCLEARING;
446 ctl_kcb_done_clearing(kcb);
447 ctl_kcb_decrement_use_count(kcb);
448 return 0;
449}
450
451static int
452ctl_setup_kctl(struct socket *so, struct sockaddr *nam, struct proc *p)
453{
454 struct kctl *kctl = NULL;
455 int error = 0;
456 struct sockaddr_ctl sa;
457 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
458 struct ctl_cb *kcb_next = NULL;
459 u_quad_t sbmaxsize;
460 u_int32_t recvbufsize, sendbufsize;
461
462 if (kcb == 0) {
463 panic("ctl_setup_kctl so_pcb null\n");
464 }
465
466 if (kcb->kctl != NULL) {
467 // Already set up, skip
468 return 0;
469 }
470
471 if (nam->sa_len != sizeof(struct sockaddr_ctl)) {
472 return EINVAL;
473 }
474
475 bcopy(nam, &sa, sizeof(struct sockaddr_ctl));
476
477 lck_mtx_lock(ctl_mtx);
478 kctl = ctl_find_by_id_unit(sa.sc_id, sa.sc_unit);
479 if (kctl == NULL) {
480 lck_mtx_unlock(ctl_mtx);
481 return ENOENT;
482 }
483
484 if (((kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
485 (so->so_type != SOCK_STREAM)) ||
486 (!(kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
487 (so->so_type != SOCK_DGRAM))) {
488 lck_mtx_unlock(ctl_mtx);
489 return EPROTOTYPE;
490 }
491
492 if (kctl->flags & CTL_FLAG_PRIVILEGED) {
493 if (p == 0) {
494 lck_mtx_unlock(ctl_mtx);
495 return EINVAL;
496 }
497 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
498 lck_mtx_unlock(ctl_mtx);
499 return EPERM;
500 }
501 }
502
503 if ((kctl->flags & CTL_FLAG_REG_ID_UNIT) || sa.sc_unit != 0) {
504 if (kcb_find(kctl, sa.sc_unit) != NULL) {
505 lck_mtx_unlock(ctl_mtx);
506 return EBUSY;
507 }
508 } else if (kctl->setup != NULL) {
509 error = (*kctl->setup)(&sa.sc_unit, &kcb->userdata);
510 if (error != 0) {
511 lck_mtx_unlock(ctl_mtx);
512 return error;
513 }
514 } else {
515 /* Find an unused ID, assumes control IDs are in order */
516 u_int32_t unit = 1;
517
518 TAILQ_FOREACH(kcb_next, &kctl->kcb_head, next) {
519 if (kcb_next->sac.sc_unit > unit) {
520 /* Found a gap, lets fill it in */
521 break;
522 }
523 unit = kcb_next->sac.sc_unit + 1;
524 if (unit == ctl_maxunit) {
525 break;
526 }
527 }
528
529 if (unit == ctl_maxunit) {
530 lck_mtx_unlock(ctl_mtx);
531 return EBUSY;
532 }
533
534 sa.sc_unit = unit;
535 }
536
537 bcopy(&sa, &kcb->sac, sizeof(struct sockaddr_ctl));
538 kcb->kctl = kctl;
539 if (kcb_next != NULL) {
540 TAILQ_INSERT_BEFORE(kcb_next, kcb, next);
541 } else {
542 TAILQ_INSERT_TAIL(&kctl->kcb_head, kcb, next);
543 }
544 kctlstat.kcs_pcbcount++;
545 kctlstat.kcs_gencnt++;
546 kctlstat.kcs_connections++;
547 lck_mtx_unlock(ctl_mtx);
548
549 /*
550 * rdar://15526688: Limit the send and receive sizes to sb_max
551 * by using the same scaling as sbreserve()
552 */
553 sbmaxsize = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES);
554
555 if (kctl->sendbufsize > sbmaxsize) {
556 sendbufsize = (u_int32_t)sbmaxsize;
557 } else {
558 sendbufsize = kctl->sendbufsize;
559 }
560
561 if (kctl->recvbufsize > sbmaxsize) {
562 recvbufsize = (u_int32_t)sbmaxsize;
563 } else {
564 recvbufsize = kctl->recvbufsize;
565 }
566
567 error = soreserve(so, sendbufsize, recvbufsize);
568 if (error) {
569 if (ctl_debug) {
570 printf("%s - soreserve(%llx, %u, %u) error %d\n",
571 __func__, (uint64_t)VM_KERNEL_ADDRPERM(so),
572 sendbufsize, recvbufsize, error);
573 }
574 goto done;
575 }
576
577done:
578 if (error) {
579 soisdisconnected(so);
580#if DEVELOPMENT || DEBUG
581 kcb->status = KCTL_DISCONNECTED;
582#endif /* DEVELOPMENT || DEBUG */
583 lck_mtx_lock(ctl_mtx);
584 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
585 kcb->kctl = NULL;
586 kcb->sac.sc_unit = 0;
587 kctlstat.kcs_pcbcount--;
588 kctlstat.kcs_gencnt++;
589 kctlstat.kcs_conn_fail++;
590 lck_mtx_unlock(ctl_mtx);
591 }
592 return error;
593}
594
595static int
596ctl_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
597{
598 int error = 0;
599 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
600
601 if (kcb == NULL) {
602 panic("ctl_bind so_pcb null\n");
603 }
604
605 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
606 ctl_kcb_increment_use_count(kcb, mtx_held);
607 ctl_kcb_require_clearing(kcb, mtx_held);
608
609 error = ctl_setup_kctl(so, nam, p);
610 if (error) {
611 goto out;
612 }
613
614 if (kcb->kctl == NULL) {
615 panic("ctl_bind kctl null\n");
616 }
617
618 if (kcb->kctl->bind == NULL) {
619 error = EINVAL;
620 goto out;
621 }
622
623 socket_unlock(so, 0);
624 error = (*kcb->kctl->bind)(kcb->kctl->kctlref, &kcb->sac, &kcb->userdata);
625 socket_lock(so, 0);
626
627out:
628 ctl_kcb_done_clearing(kcb);
629 ctl_kcb_decrement_use_count(kcb);
630 return error;
631}
632
633static int
634ctl_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
635{
636 int error = 0;
637 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
638
639 if (kcb == NULL) {
640 panic("ctl_connect so_pcb null\n");
641 }
642
643 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
644 ctl_kcb_increment_use_count(kcb, mtx_held);
645 ctl_kcb_require_clearing(kcb, mtx_held);
646
647#if DEVELOPMENT || DEBUG
648 if (kcb->status != KCTL_DISCONNECTED && ctl_panic_debug) {
649 panic("kctl already connecting/connected");
650 }
651 kcb->status = KCTL_CONNECTING;
652#endif /* DEVELOPMENT || DEBUG */
653
654 error = ctl_setup_kctl(so, nam, p);
655 if (error) {
656 goto out;
657 }
658
659 if (kcb->kctl == NULL) {
660 panic("ctl_connect kctl null\n");
661 }
662
663 soisconnecting(so);
664 socket_unlock(so, 0);
665 error = (*kcb->kctl->connect)(kcb->kctl->kctlref, &kcb->sac, &kcb->userdata);
666 socket_lock(so, 0);
667 if (error) {
668 goto end;
669 }
670 soisconnected(so);
671#if DEVELOPMENT || DEBUG
672 kcb->status = KCTL_CONNECTED;
673#endif /* DEVELOPMENT || DEBUG */
674
675end:
676 if (error && kcb->kctl->disconnect) {
677 /*
678 * XXX Make sure we Don't check the return value
679 * of disconnect here.
680 * ipsec/utun_ctl_disconnect will return error when
681 * disconnect gets called after connect failure.
682 * However if we decide to check for disconnect return
683 * value here. Please make sure to revisit
684 * ipsec/utun_ctl_disconnect.
685 */
686 socket_unlock(so, 0);
687 (*kcb->kctl->disconnect)(kcb->kctl->kctlref, kcb->sac.sc_unit, kcb->userdata);
688 socket_lock(so, 0);
689 }
690 if (error) {
691 soisdisconnected(so);
692#if DEVELOPMENT || DEBUG
693 kcb->status = KCTL_DISCONNECTED;
694#endif /* DEVELOPMENT || DEBUG */
695 lck_mtx_lock(ctl_mtx);
696 TAILQ_REMOVE(&kcb->kctl->kcb_head, kcb, next);
697 kcb->kctl = NULL;
698 kcb->sac.sc_unit = 0;
699 kctlstat.kcs_pcbcount--;
700 kctlstat.kcs_gencnt++;
701 kctlstat.kcs_conn_fail++;
702 lck_mtx_unlock(ctl_mtx);
703 }
704out:
705 ctl_kcb_done_clearing(kcb);
706 ctl_kcb_decrement_use_count(kcb);
707 return error;
708}
709
710static int
711ctl_disconnect(struct socket *so)
712{
713 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
714
715 if ((kcb = (struct ctl_cb *)so->so_pcb)) {
716 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
717 ctl_kcb_increment_use_count(kcb, mtx_held);
718 ctl_kcb_require_clearing(kcb, mtx_held);
719 struct kctl *kctl = kcb->kctl;
720
721 if (kctl && kctl->disconnect) {
722 socket_unlock(so, 0);
723 (*kctl->disconnect)(kctl->kctlref, kcb->sac.sc_unit,
724 kcb->userdata);
725 socket_lock(so, 0);
726 }
727
728 soisdisconnected(so);
729#if DEVELOPMENT || DEBUG
730 kcb->status = KCTL_DISCONNECTED;
731#endif /* DEVELOPMENT || DEBUG */
732
733 socket_unlock(so, 0);
734 lck_mtx_lock(ctl_mtx);
735 kcb->kctl = 0;
736 kcb->sac.sc_unit = 0;
737 while (kcb->usecount != 0) {
738 msleep(&kcb->usecount, ctl_mtx, 0, "kcb->usecount", 0);
739 }
740 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
741 kctlstat.kcs_pcbcount--;
742 kctlstat.kcs_gencnt++;
743 lck_mtx_unlock(ctl_mtx);
744 socket_lock(so, 0);
745 ctl_kcb_done_clearing(kcb);
746 ctl_kcb_decrement_use_count(kcb);
747 }
748 return 0;
749}
750
751static int
752ctl_peeraddr(struct socket *so, struct sockaddr **nam)
753{
754 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
755 struct kctl *kctl;
756 struct sockaddr_ctl sc;
757
758 if (kcb == NULL) { /* sanity check */
759 return ENOTCONN;
760 }
761
762 if ((kctl = kcb->kctl) == NULL) {
763 return EINVAL;
764 }
765
766 bzero(&sc, sizeof(struct sockaddr_ctl));
767 sc.sc_len = sizeof(struct sockaddr_ctl);
768 sc.sc_family = AF_SYSTEM;
769 sc.ss_sysaddr = AF_SYS_CONTROL;
770 sc.sc_id = kctl->id;
771 sc.sc_unit = kcb->sac.sc_unit;
772
773 *nam = dup_sockaddr((struct sockaddr *)&sc, 1);
774
775 return 0;
776}
777
778static void
779ctl_sbrcv_trim(struct socket *so)
780{
781 struct sockbuf *sb = &so->so_rcv;
782
783 if (sb->sb_hiwat > sb->sb_idealsize) {
784 u_int32_t diff;
785 int32_t trim;
786
787 /*
788 * The difference between the ideal size and the
789 * current size is the upper bound of the trimage
790 */
791 diff = sb->sb_hiwat - sb->sb_idealsize;
792 /*
793 * We cannot trim below the outstanding data
794 */
795 trim = sb->sb_hiwat - sb->sb_cc;
796
797 trim = imin(trim, (int32_t)diff);
798
799 if (trim > 0) {
800 sbreserve(sb, (sb->sb_hiwat - trim));
801
802 if (ctl_debug) {
803 printf("%s - shrunk to %d\n",
804 __func__, sb->sb_hiwat);
805 }
806 }
807 }
808}
809
810static int
811ctl_usr_rcvd(struct socket *so, int flags)
812{
813 int error = 0;
814 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
815 struct kctl *kctl;
816
817 if (kcb == NULL) {
818 return ENOTCONN;
819 }
820
821 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
822 ctl_kcb_increment_use_count(kcb, mtx_held);
823
824 if ((kctl = kcb->kctl) == NULL) {
825 error = EINVAL;
826 goto out;
827 }
828
829 if (kctl->rcvd) {
830 socket_unlock(so, 0);
831 (*kctl->rcvd)(kctl->kctlref, kcb->sac.sc_unit, kcb->userdata, flags);
832 socket_lock(so, 0);
833 }
834
835 ctl_sbrcv_trim(so);
836
837out:
838 ctl_kcb_decrement_use_count(kcb);
839 return error;
840}
841
842static int
843ctl_send(struct socket *so, int flags, struct mbuf *m,
844 struct sockaddr *addr, struct mbuf *control,
845 struct proc *p)
846{
847#pragma unused(addr, p)
848 int error = 0;
849 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
850 struct kctl *kctl;
851
852 if (control) {
853 m_freem(control);
854 }
855
856 if (kcb == NULL) { /* sanity check */
857 error = ENOTCONN;
858 }
859
860 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
861 ctl_kcb_increment_use_count(kcb, mtx_held);
862
863 if (error == 0 && (kctl = kcb->kctl) == NULL) {
864 error = EINVAL;
865 }
866
867 if (error == 0 && kctl->send) {
868 so_tc_update_stats(m, so, m_get_service_class(m));
869 socket_unlock(so, 0);
870 error = (*kctl->send)(kctl->kctlref, kcb->sac.sc_unit, kcb->userdata,
871 m, flags);
872 socket_lock(so, 0);
873 } else {
874 m_freem(m);
875 if (error == 0) {
876 error = ENOTSUP;
877 }
878 }
879 if (error != 0) {
880 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_fail);
881 }
882 ctl_kcb_decrement_use_count(kcb);
883
884 return error;
885}
886
887static int
888ctl_send_list(struct socket *so, int flags, struct mbuf *m,
889 __unused struct sockaddr *addr, struct mbuf *control,
890 __unused struct proc *p)
891{
892 int error = 0;
893 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
894 struct kctl *kctl;
895
896 if (control) {
897 m_freem_list(control);
898 }
899
900 if (kcb == NULL) { /* sanity check */
901 error = ENOTCONN;
902 }
903
904 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
905 ctl_kcb_increment_use_count(kcb, mtx_held);
906
907 if (error == 0 && (kctl = kcb->kctl) == NULL) {
908 error = EINVAL;
909 }
910
911 if (error == 0 && kctl->send_list) {
912 struct mbuf *nxt;
913
914 for (nxt = m; nxt != NULL; nxt = nxt->m_nextpkt) {
915 so_tc_update_stats(nxt, so, m_get_service_class(nxt));
916 }
917
918 socket_unlock(so, 0);
919 error = (*kctl->send_list)(kctl->kctlref, kcb->sac.sc_unit,
920 kcb->userdata, m, flags);
921 socket_lock(so, 0);
922 } else if (error == 0 && kctl->send) {
923 while (m != NULL && error == 0) {
924 struct mbuf *nextpkt = m->m_nextpkt;
925
926 m->m_nextpkt = NULL;
927 so_tc_update_stats(m, so, m_get_service_class(m));
928 socket_unlock(so, 0);
929 error = (*kctl->send)(kctl->kctlref, kcb->sac.sc_unit,
930 kcb->userdata, m, flags);
931 socket_lock(so, 0);
932 m = nextpkt;
933 }
934 if (m != NULL) {
935 m_freem_list(m);
936 }
937 } else {
938 m_freem_list(m);
939 if (error == 0) {
940 error = ENOTSUP;
941 }
942 }
943 if (error != 0) {
944 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_list_fail);
945 }
946 ctl_kcb_decrement_use_count(kcb);
947
948 return error;
949}
950
951static errno_t
952ctl_rcvbspace(struct socket *so, size_t datasize,
953 u_int32_t kctlflags, u_int32_t flags)
954{
955 struct sockbuf *sb = &so->so_rcv;
956 u_int32_t space = sbspace(sb);
957 errno_t error;
958
959 if ((kctlflags & CTL_FLAG_REG_CRIT) == 0) {
960 if ((u_int32_t) space >= datasize) {
961 error = 0;
962 } else {
963 error = ENOBUFS;
964 }
965 } else if ((flags & CTL_DATA_CRIT) == 0) {
966 /*
967 * Reserve 25% for critical messages
968 */
969 if (space < (sb->sb_hiwat >> 2) ||
970 space < datasize) {
971 error = ENOBUFS;
972 } else {
973 error = 0;
974 }
975 } else {
976 size_t autorcvbuf_max;
977
978 /*
979 * Allow overcommit of 25%
980 */
981 autorcvbuf_max = min(sb->sb_idealsize + (sb->sb_idealsize >> 2),
982 ctl_autorcvbuf_max);
983
984 if ((u_int32_t) space >= datasize) {
985 error = 0;
986 } else if (tcp_cansbgrow(sb) &&
987 sb->sb_hiwat < autorcvbuf_max) {
988 /*
989 * Grow with a little bit of leeway
990 */
991 size_t grow = datasize - space + MSIZE;
992 u_int32_t cc = (u_int32_t)MIN(MIN((sb->sb_hiwat + grow), autorcvbuf_max), UINT32_MAX);
993
994 if (sbreserve(sb, cc) == 1) {
995 if (sb->sb_hiwat > ctl_autorcvbuf_high) {
996 ctl_autorcvbuf_high = sb->sb_hiwat;
997 }
998
999 /*
1000 * A final check
1001 */
1002 if ((u_int32_t) sbspace(sb) >= datasize) {
1003 error = 0;
1004 } else {
1005 error = ENOBUFS;
1006 }
1007
1008 if (ctl_debug) {
1009 printf("%s - grown to %d error %d\n",
1010 __func__, sb->sb_hiwat, error);
1011 }
1012 } else {
1013 error = ENOBUFS;
1014 }
1015 } else {
1016 error = ENOBUFS;
1017 }
1018 }
1019 return error;
1020}
1021
1022errno_t
1023ctl_enqueuembuf(kern_ctl_ref kctlref, u_int32_t unit, struct mbuf *m,
1024 u_int32_t flags)
1025{
1026 struct socket *so;
1027 errno_t error = 0;
1028 int len = m->m_pkthdr.len;
1029 u_int32_t kctlflags;
1030
1031 so = kcb_find_socket(kctlref, unit, &kctlflags);
1032 if (so == NULL) {
1033 return EINVAL;
1034 }
1035
1036 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
1037 error = ENOBUFS;
1038 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1039 goto bye;
1040 }
1041 if ((flags & CTL_DATA_EOR)) {
1042 m->m_flags |= M_EOR;
1043 }
1044
1045 so_recv_data_stat(so, m, 0);
1046 if (sbappend_nodrop(&so->so_rcv, m) != 0) {
1047 if ((flags & CTL_DATA_NOWAKEUP) == 0) {
1048 sorwakeup(so);
1049 }
1050 } else {
1051 error = ENOBUFS;
1052 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1053 }
1054bye:
1055 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
1056 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1057 __func__, error, len,
1058 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1059 }
1060
1061 socket_unlock(so, 1);
1062 if (error != 0) {
1063 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
1064 }
1065
1066 return error;
1067}
1068
1069/*
1070 * Compute space occupied by mbuf like sbappendrecord
1071 */
1072static int
1073m_space(struct mbuf *m)
1074{
1075 int space = 0;
1076 struct mbuf *nxt;
1077
1078 for (nxt = m; nxt != NULL; nxt = nxt->m_next) {
1079 space += nxt->m_len;
1080 }
1081
1082 return space;
1083}
1084
1085errno_t
1086ctl_enqueuembuf_list(void *kctlref, u_int32_t unit, struct mbuf *m_list,
1087 u_int32_t flags, struct mbuf **m_remain)
1088{
1089 struct socket *so = NULL;
1090 errno_t error = 0;
1091 struct mbuf *m, *nextpkt;
1092 int needwakeup = 0;
1093 int len = 0;
1094 u_int32_t kctlflags;
1095
1096 /*
1097 * Need to point the beginning of the list in case of early exit
1098 */
1099 m = m_list;
1100
1101 /*
1102 * kcb_find_socket takes the socket lock with a reference
1103 */
1104 so = kcb_find_socket(kctlref, unit, &kctlflags);
1105 if (so == NULL) {
1106 error = EINVAL;
1107 goto done;
1108 }
1109
1110 if (kctlflags & CTL_FLAG_REG_SOCK_STREAM) {
1111 error = EOPNOTSUPP;
1112 goto done;
1113 }
1114 if (flags & CTL_DATA_EOR) {
1115 error = EINVAL;
1116 goto done;
1117 }
1118
1119 for (m = m_list; m != NULL; m = nextpkt) {
1120 nextpkt = m->m_nextpkt;
1121
1122 if (m->m_pkthdr.len == 0 && ctl_debug) {
1123 printf("%s: %llx m_pkthdr.len is 0",
1124 __func__, (uint64_t)VM_KERNEL_ADDRPERM(m));
1125 }
1126
1127 /*
1128 * The mbuf is either appended or freed by sbappendrecord()
1129 * so it's not reliable from a data standpoint
1130 */
1131 len = m_space(m);
1132 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
1133 error = ENOBUFS;
1134 OSIncrementAtomic64(
1135 (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1136 break;
1137 } else {
1138 /*
1139 * Unlink from the list, m is on its own
1140 */
1141 m->m_nextpkt = NULL;
1142 so_recv_data_stat(so, m, 0);
1143 if (sbappendrecord_nodrop(&so->so_rcv, m) != 0) {
1144 needwakeup = 1;
1145 } else {
1146 /*
1147 * We free or return the remaining
1148 * mbufs in the list
1149 */
1150 m = nextpkt;
1151 error = ENOBUFS;
1152 OSIncrementAtomic64(
1153 (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1154 break;
1155 }
1156 }
1157 }
1158 if (needwakeup && (flags & CTL_DATA_NOWAKEUP) == 0) {
1159 sorwakeup(so);
1160 }
1161
1162done:
1163 if (so != NULL) {
1164 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
1165 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1166 __func__, error, len,
1167 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1168 }
1169
1170 socket_unlock(so, 1);
1171 }
1172 if (m_remain) {
1173 *m_remain = m;
1174
1175 if (m != NULL && socket_debug && so != NULL &&
1176 (so->so_options & SO_DEBUG)) {
1177 struct mbuf *n;
1178
1179 printf("%s m_list %llx\n", __func__,
1180 (uint64_t) VM_KERNEL_ADDRPERM(m_list));
1181 for (n = m; n != NULL; n = n->m_nextpkt) {
1182 printf(" remain %llx m_next %llx\n",
1183 (uint64_t) VM_KERNEL_ADDRPERM(n),
1184 (uint64_t) VM_KERNEL_ADDRPERM(n->m_next));
1185 }
1186 }
1187 } else {
1188 if (m != NULL) {
1189 m_freem_list(m);
1190 }
1191 }
1192 if (error != 0) {
1193 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
1194 }
1195 return error;
1196}
1197
1198errno_t
1199ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len,
1200 u_int32_t flags)
1201{
1202 struct socket *so;
1203 struct mbuf *m;
1204 errno_t error = 0;
1205 unsigned int num_needed;
1206 struct mbuf *n;
1207 size_t curlen = 0;
1208 u_int32_t kctlflags;
1209
1210 so = kcb_find_socket(kctlref, unit, &kctlflags);
1211 if (so == NULL) {
1212 return EINVAL;
1213 }
1214
1215 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
1216 error = ENOBUFS;
1217 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1218 goto bye;
1219 }
1220
1221 num_needed = 1;
1222 m = m_allocpacket_internal(&num_needed, len, NULL, M_NOWAIT, 1, 0);
1223 if (m == NULL) {
1224 kctlstat.kcs_enqdata_mb_alloc_fail++;
1225 if (ctl_debug) {
1226 printf("%s: m_allocpacket_internal(%lu) failed\n",
1227 __func__, len);
1228 }
1229 error = ENOMEM;
1230 goto bye;
1231 }
1232
1233 for (n = m; n != NULL; n = n->m_next) {
1234 size_t mlen = mbuf_maxlen(n);
1235
1236 if (mlen + curlen > len) {
1237 mlen = len - curlen;
1238 }
1239 n->m_len = (int32_t)mlen;
1240 bcopy((char *)data + curlen, n->m_data, mlen);
1241 curlen += mlen;
1242 }
1243 mbuf_pkthdr_setlen(m, curlen);
1244
1245 if ((flags & CTL_DATA_EOR)) {
1246 m->m_flags |= M_EOR;
1247 }
1248 so_recv_data_stat(so, m, 0);
1249 /*
1250 * No need to call the "nodrop" variant of sbappend
1251 * because the mbuf is local to the scope of the function
1252 */
1253 if (sbappend(&so->so_rcv, m) != 0) {
1254 if ((flags & CTL_DATA_NOWAKEUP) == 0) {
1255 sorwakeup(so);
1256 }
1257 } else {
1258 kctlstat.kcs_enqdata_sbappend_fail++;
1259 error = ENOBUFS;
1260 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1261 }
1262
1263bye:
1264 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
1265 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1266 __func__, error, (int)len,
1267 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1268 }
1269
1270 socket_unlock(so, 1);
1271 if (error != 0) {
1272 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
1273 }
1274 return error;
1275}
1276
1277errno_t
1278ctl_getenqueuepacketcount(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *pcnt)
1279{
1280 struct socket *so;
1281 u_int32_t cnt;
1282 struct mbuf *m1;
1283
1284 if (pcnt == NULL) {
1285 return EINVAL;
1286 }
1287
1288 so = kcb_find_socket(kctlref, unit, NULL);
1289 if (so == NULL) {
1290 return EINVAL;
1291 }
1292
1293 cnt = 0;
1294 m1 = so->so_rcv.sb_mb;
1295 while (m1 != NULL) {
1296 if (m1->m_type == MT_DATA ||
1297 m1->m_type == MT_HEADER ||
1298 m1->m_type == MT_OOBDATA) {
1299 cnt += 1;
1300 }
1301 m1 = m1->m_nextpkt;
1302 }
1303 *pcnt = cnt;
1304
1305 socket_unlock(so, 1);
1306
1307 return 0;
1308}
1309
1310errno_t
1311ctl_getenqueuespace(kern_ctl_ref kctlref, u_int32_t unit, size_t *space)
1312{
1313 struct socket *so;
1314 long avail;
1315
1316 if (space == NULL) {
1317 return EINVAL;
1318 }
1319
1320 so = kcb_find_socket(kctlref, unit, NULL);
1321 if (so == NULL) {
1322 return EINVAL;
1323 }
1324
1325 avail = sbspace(&so->so_rcv);
1326 *space = (avail < 0) ? 0 : avail;
1327 socket_unlock(so, 1);
1328
1329 return 0;
1330}
1331
1332errno_t
1333ctl_getenqueuereadable(kern_ctl_ref kctlref, u_int32_t unit,
1334 u_int32_t *difference)
1335{
1336 struct socket *so;
1337
1338 if (difference == NULL) {
1339 return EINVAL;
1340 }
1341
1342 so = kcb_find_socket(kctlref, unit, NULL);
1343 if (so == NULL) {
1344 return EINVAL;
1345 }
1346
1347 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat) {
1348 *difference = 0;
1349 } else {
1350 *difference = (so->so_rcv.sb_lowat - so->so_rcv.sb_cc);
1351 }
1352 socket_unlock(so, 1);
1353
1354 return 0;
1355}
1356
1357static int
1358ctl_ctloutput(struct socket *so, struct sockopt *sopt)
1359{
1360 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
1361 struct kctl *kctl;
1362 int error = 0;
1363 void *data = NULL;
1364 size_t len;
1365
1366 if (sopt->sopt_level != SYSPROTO_CONTROL) {
1367 return EINVAL;
1368 }
1369
1370 if (kcb == NULL) { /* sanity check */
1371 return ENOTCONN;
1372 }
1373
1374 if ((kctl = kcb->kctl) == NULL) {
1375 return EINVAL;
1376 }
1377
1378 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
1379 ctl_kcb_increment_use_count(kcb, mtx_held);
1380
1381 switch (sopt->sopt_dir) {
1382 case SOPT_SET:
1383 if (kctl->setopt == NULL) {
1384 error = ENOTSUP;
1385 goto out;
1386 }
1387 if (sopt->sopt_valsize != 0) {
1388 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP,
1389 M_WAITOK | M_ZERO);
1390 if (data == NULL) {
1391 error = ENOMEM;
1392 goto out;
1393 }
1394 error = sooptcopyin(sopt, data,
1395 sopt->sopt_valsize, sopt->sopt_valsize);
1396 }
1397 if (error == 0) {
1398 socket_unlock(so, 0);
1399 error = (*kctl->setopt)(kctl->kctlref,
1400 kcb->sac.sc_unit, kcb->userdata, sopt->sopt_name,
1401 data, sopt->sopt_valsize);
1402 socket_lock(so, 0);
1403 }
1404
1405 if (data != NULL) {
1406 FREE(data, M_TEMP);
1407 }
1408 break;
1409
1410 case SOPT_GET:
1411 if (kctl->getopt == NULL) {
1412 error = ENOTSUP;
1413 goto out;
1414 }
1415
1416 if (sopt->sopt_valsize && sopt->sopt_val) {
1417 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP,
1418 M_WAITOK | M_ZERO);
1419 if (data == NULL) {
1420 error = ENOMEM;
1421 goto out;
1422 }
1423 /*
1424 * 4108337 - copy user data in case the
1425 * kernel control needs it
1426 */
1427 error = sooptcopyin(sopt, data,
1428 sopt->sopt_valsize, sopt->sopt_valsize);
1429 }
1430
1431 if (error == 0) {
1432 len = sopt->sopt_valsize;
1433 socket_unlock(so, 0);
1434 error = (*kctl->getopt)(kctl->kctlref, kcb->sac.sc_unit,
1435 kcb->userdata, sopt->sopt_name,
1436 data, &len);
1437 if (data != NULL && len > sopt->sopt_valsize) {
1438 panic_plain("ctl_ctloutput: ctl %s returned "
1439 "len (%lu) > sopt_valsize (%lu)\n",
1440 kcb->kctl->name, len,
1441 sopt->sopt_valsize);
1442 }
1443 socket_lock(so, 0);
1444 if (error == 0) {
1445 if (data != NULL) {
1446 error = sooptcopyout(sopt, data, len);
1447 } else {
1448 sopt->sopt_valsize = len;
1449 }
1450 }
1451 }
1452 if (data != NULL) {
1453 FREE(data, M_TEMP);
1454 }
1455 break;
1456 }
1457
1458out:
1459 ctl_kcb_decrement_use_count(kcb);
1460 return error;
1461}
1462
1463static int
1464ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
1465 struct ifnet *ifp, struct proc *p)
1466{
1467#pragma unused(so, ifp, p)
1468 int error = ENOTSUP;
1469
1470 switch (cmd) {
1471 /* get the number of controllers */
1472 case CTLIOCGCOUNT: {
1473 struct kctl *kctl;
1474 u_int32_t n = 0;
1475
1476 lck_mtx_lock(ctl_mtx);
1477 TAILQ_FOREACH(kctl, &ctl_head, next)
1478 n++;
1479 lck_mtx_unlock(ctl_mtx);
1480
1481 bcopy(&n, data, sizeof(n));
1482 error = 0;
1483 break;
1484 }
1485 case CTLIOCGINFO: {
1486 struct ctl_info ctl_info;
1487 struct kctl *kctl = 0;
1488 size_t name_len;
1489
1490 bcopy(data, &ctl_info, sizeof(ctl_info));
1491 name_len = strnlen(ctl_info.ctl_name, MAX_KCTL_NAME);
1492
1493 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
1494 error = EINVAL;
1495 break;
1496 }
1497 lck_mtx_lock(ctl_mtx);
1498 kctl = ctl_find_by_name(ctl_info.ctl_name);
1499 lck_mtx_unlock(ctl_mtx);
1500 if (kctl == 0) {
1501 error = ENOENT;
1502 break;
1503 }
1504 ctl_info.ctl_id = kctl->id;
1505 bcopy(&ctl_info, data, sizeof(ctl_info));
1506 error = 0;
1507 break;
1508 }
1509
1510 /* add controls to get list of NKEs */
1511 }
1512
1513 return error;
1514}
1515
1516static void
1517kctl_tbl_grow()
1518{
1519 struct kctl **new_table;
1520 uintptr_t new_size;
1521
1522 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1523
1524 if (kctl_tbl_growing) {
1525 /* Another thread is allocating */
1526 kctl_tbl_growing_waiting++;
1527
1528 do {
1529 (void) msleep((caddr_t) &kctl_tbl_growing, ctl_mtx,
1530 PSOCK | PCATCH, "kctl_tbl_growing", 0);
1531 } while (kctl_tbl_growing);
1532 kctl_tbl_growing_waiting--;
1533 }
1534 /* Another thread grew the table */
1535 if (kctl_table != NULL && kctl_tbl_count < kctl_tbl_size) {
1536 return;
1537 }
1538
1539 /* Verify we have a sane size */
1540 if (kctl_tbl_size + KCTL_TBL_INC >= UINT16_MAX) {
1541 kctlstat.kcs_tbl_size_too_big++;
1542 if (ctl_debug) {
1543 printf("%s kctl_tbl_size %lu too big\n",
1544 __func__, kctl_tbl_size);
1545 }
1546 return;
1547 }
1548 kctl_tbl_growing = 1;
1549
1550 new_size = kctl_tbl_size + KCTL_TBL_INC;
1551
1552 lck_mtx_unlock(ctl_mtx);
1553 new_table = _MALLOC(sizeof(struct kctl *) * new_size,
1554 M_TEMP, M_WAIT | M_ZERO);
1555 lck_mtx_lock(ctl_mtx);
1556
1557 if (new_table != NULL) {
1558 if (kctl_table != NULL) {
1559 bcopy(kctl_table, new_table,
1560 kctl_tbl_size * sizeof(struct kctl *));
1561
1562 _FREE(kctl_table, M_TEMP);
1563 }
1564 kctl_table = new_table;
1565 kctl_tbl_size = new_size;
1566 }
1567
1568 kctl_tbl_growing = 0;
1569
1570 if (kctl_tbl_growing_waiting) {
1571 wakeup(&kctl_tbl_growing);
1572 }
1573}
1574
1575#define KCTLREF_INDEX_MASK 0x0000FFFF
1576#define KCTLREF_GENCNT_MASK 0xFFFF0000
1577#define KCTLREF_GENCNT_SHIFT 16
1578
1579static kern_ctl_ref
1580kctl_make_ref(struct kctl *kctl)
1581{
1582 uintptr_t i;
1583
1584 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1585
1586 if (kctl_tbl_count >= kctl_tbl_size) {
1587 kctl_tbl_grow();
1588 }
1589
1590 kctl->kctlref = NULL;
1591 for (i = 0; i < kctl_tbl_size; i++) {
1592 if (kctl_table[i] == NULL) {
1593 uintptr_t ref;
1594
1595 /*
1596 * Reference is index plus one
1597 */
1598 kctl_ref_gencnt += 1;
1599
1600 /*
1601 * Add generation count as salt to reference to prevent
1602 * use after deregister
1603 */
1604 ref = ((kctl_ref_gencnt << KCTLREF_GENCNT_SHIFT) &
1605 KCTLREF_GENCNT_MASK) +
1606 ((i + 1) & KCTLREF_INDEX_MASK);
1607
1608 kctl->kctlref = (void *)(ref);
1609 kctl_table[i] = kctl;
1610 kctl_tbl_count++;
1611 break;
1612 }
1613 }
1614
1615 if (kctl->kctlref == NULL) {
1616 panic("%s no space in table", __func__);
1617 }
1618
1619 if (ctl_debug > 0) {
1620 printf("%s %p for %p\n",
1621 __func__, kctl->kctlref, kctl);
1622 }
1623
1624 return kctl->kctlref;
1625}
1626
1627static void
1628kctl_delete_ref(kern_ctl_ref kctlref)
1629{
1630 /*
1631 * Reference is index plus one
1632 */
1633 uintptr_t i = (((uintptr_t)kctlref) & KCTLREF_INDEX_MASK) - 1;
1634
1635 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1636
1637 if (i < kctl_tbl_size) {
1638 struct kctl *kctl = kctl_table[i];
1639
1640 if (kctl->kctlref == kctlref) {
1641 kctl_table[i] = NULL;
1642 kctl_tbl_count--;
1643 } else {
1644 kctlstat.kcs_bad_kctlref++;
1645 }
1646 } else {
1647 kctlstat.kcs_bad_kctlref++;
1648 }
1649}
1650
1651static struct kctl *
1652kctl_from_ref(kern_ctl_ref kctlref)
1653{
1654 /*
1655 * Reference is index plus one
1656 */
1657 uintptr_t i = (((uintptr_t)kctlref) & KCTLREF_INDEX_MASK) - 1;
1658 struct kctl *kctl = NULL;
1659
1660 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1661
1662 if (i >= kctl_tbl_size) {
1663 kctlstat.kcs_bad_kctlref++;
1664 return NULL;
1665 }
1666 kctl = kctl_table[i];
1667 if (kctl->kctlref != kctlref) {
1668 kctlstat.kcs_bad_kctlref++;
1669 return NULL;
1670 }
1671 return kctl;
1672}
1673
1674/*
1675 * Register/unregister a NKE
1676 */
1677errno_t
1678ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref)
1679{
1680 struct kctl *kctl = NULL;
1681 struct kctl *kctl_next = NULL;
1682 u_int32_t id = 1;
1683 size_t name_len;
1684 int is_extended = 0;
1685 int is_setup = 0;
1686
1687 if (userkctl == NULL) { /* sanity check */
1688 return EINVAL;
1689 }
1690 if (userkctl->ctl_connect == NULL) {
1691 return EINVAL;
1692 }
1693 name_len = strlen(userkctl->ctl_name);
1694 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
1695 return EINVAL;
1696 }
1697
1698 MALLOC(kctl, struct kctl *, sizeof(*kctl), M_TEMP, M_WAITOK);
1699 if (kctl == NULL) {
1700 return ENOMEM;
1701 }
1702 bzero((char *)kctl, sizeof(*kctl));
1703
1704 lck_mtx_lock(ctl_mtx);
1705
1706 if (kctl_make_ref(kctl) == NULL) {
1707 lck_mtx_unlock(ctl_mtx);
1708 FREE(kctl, M_TEMP);
1709 return ENOMEM;
1710 }
1711
1712 /*
1713 * Kernel Control IDs
1714 *
1715 * CTL_FLAG_REG_ID_UNIT indicates the control ID and unit number are
1716 * static. If they do not exist, add them to the list in order. If the
1717 * flag is not set, we must find a new unique value. We assume the
1718 * list is in order. We find the last item in the list and add one. If
1719 * this leads to wrapping the id around, we start at the front of the
1720 * list and look for a gap.
1721 */
1722
1723 if ((userkctl->ctl_flags & CTL_FLAG_REG_ID_UNIT) == 0) {
1724 /* Must dynamically assign an unused ID */
1725
1726 /* Verify the same name isn't already registered */
1727 if (ctl_find_by_name(userkctl->ctl_name) != NULL) {
1728 kctl_delete_ref(kctl->kctlref);
1729 lck_mtx_unlock(ctl_mtx);
1730 FREE(kctl, M_TEMP);
1731 return EEXIST;
1732 }
1733
1734 /* Start with 1 in case the list is empty */
1735 id = 1;
1736 kctl_next = TAILQ_LAST(&ctl_head, kctl_list);
1737
1738 if (kctl_next != NULL) {
1739 /* List was not empty, add one to the last item */
1740 id = kctl_next->id + 1;
1741 kctl_next = NULL;
1742
1743 /*
1744 * If this wrapped the id number, start looking at
1745 * the front of the list for an unused id.
1746 */
1747 if (id == 0) {
1748 /* Find the next unused ID */
1749 id = 1;
1750
1751 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
1752 if (kctl_next->id > id) {
1753 /* We found a gap */
1754 break;
1755 }
1756
1757 id = kctl_next->id + 1;
1758 }
1759 }
1760 }
1761
1762 userkctl->ctl_id = id;
1763 kctl->id = id;
1764 kctl->reg_unit = -1;
1765 } else {
1766 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
1767 if (kctl_next->id > userkctl->ctl_id) {
1768 break;
1769 }
1770 }
1771
1772 if (ctl_find_by_id_unit(userkctl->ctl_id, userkctl->ctl_unit)) {
1773 kctl_delete_ref(kctl->kctlref);
1774 lck_mtx_unlock(ctl_mtx);
1775 FREE(kctl, M_TEMP);
1776 return EEXIST;
1777 }
1778 kctl->id = userkctl->ctl_id;
1779 kctl->reg_unit = userkctl->ctl_unit;
1780 }
1781
1782 is_extended = (userkctl->ctl_flags & CTL_FLAG_REG_EXTENDED);
1783 is_setup = (userkctl->ctl_flags & CTL_FLAG_REG_SETUP);
1784
1785 strlcpy(kctl->name, userkctl->ctl_name, MAX_KCTL_NAME);
1786 kctl->flags = userkctl->ctl_flags;
1787
1788 /*
1789 * Let the caller know the default send and receive sizes
1790 */
1791 if (userkctl->ctl_sendsize == 0) {
1792 kctl->sendbufsize = CTL_SENDSIZE;
1793 userkctl->ctl_sendsize = kctl->sendbufsize;
1794 } else {
1795 kctl->sendbufsize = userkctl->ctl_sendsize;
1796 }
1797 if (userkctl->ctl_recvsize == 0) {
1798 kctl->recvbufsize = CTL_RECVSIZE;
1799 userkctl->ctl_recvsize = kctl->recvbufsize;
1800 } else {
1801 kctl->recvbufsize = userkctl->ctl_recvsize;
1802 }
1803
1804 if (is_setup) {
1805 kctl->setup = userkctl->ctl_setup;
1806 }
1807 kctl->bind = userkctl->ctl_bind;
1808 kctl->connect = userkctl->ctl_connect;
1809 kctl->disconnect = userkctl->ctl_disconnect;
1810 kctl->send = userkctl->ctl_send;
1811 kctl->setopt = userkctl->ctl_setopt;
1812 kctl->getopt = userkctl->ctl_getopt;
1813 if (is_extended) {
1814 kctl->rcvd = userkctl->ctl_rcvd;
1815 kctl->send_list = userkctl->ctl_send_list;
1816 }
1817
1818 TAILQ_INIT(&kctl->kcb_head);
1819
1820 if (kctl_next) {
1821 TAILQ_INSERT_BEFORE(kctl_next, kctl, next);
1822 } else {
1823 TAILQ_INSERT_TAIL(&ctl_head, kctl, next);
1824 }
1825
1826 kctlstat.kcs_reg_count++;
1827 kctlstat.kcs_gencnt++;
1828
1829 lck_mtx_unlock(ctl_mtx);
1830
1831 *kctlref = kctl->kctlref;
1832
1833 ctl_post_msg(KEV_CTL_REGISTERED, kctl->id);
1834 return 0;
1835}
1836
1837errno_t
1838ctl_deregister(void *kctlref)
1839{
1840 struct kctl *kctl;
1841
1842 lck_mtx_lock(ctl_mtx);
1843 if ((kctl = kctl_from_ref(kctlref)) == NULL) {
1844 kctlstat.kcs_bad_kctlref++;
1845 lck_mtx_unlock(ctl_mtx);
1846 if (ctl_debug != 0) {
1847 printf("%s invalid kctlref %p\n",
1848 __func__, kctlref);
1849 }
1850 return EINVAL;
1851 }
1852
1853 if (!TAILQ_EMPTY(&kctl->kcb_head)) {
1854 lck_mtx_unlock(ctl_mtx);
1855 return EBUSY;
1856 }
1857
1858 TAILQ_REMOVE(&ctl_head, kctl, next);
1859
1860 kctlstat.kcs_reg_count--;
1861 kctlstat.kcs_gencnt++;
1862
1863 kctl_delete_ref(kctl->kctlref);
1864 lck_mtx_unlock(ctl_mtx);
1865
1866 ctl_post_msg(KEV_CTL_DEREGISTERED, kctl->id);
1867 FREE(kctl, M_TEMP);
1868 return 0;
1869}
1870
1871/*
1872 * Must be called with global ctl_mtx lock taked
1873 */
1874static struct kctl *
1875ctl_find_by_name(const char *name)
1876{
1877 struct kctl *kctl;
1878
1879 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1880
1881 TAILQ_FOREACH(kctl, &ctl_head, next)
1882 if (strncmp(kctl->name, name, sizeof(kctl->name)) == 0) {
1883 return kctl;
1884 }
1885
1886 return NULL;
1887}
1888
1889u_int32_t
1890ctl_id_by_name(const char *name)
1891{
1892 u_int32_t ctl_id = 0;
1893 struct kctl *kctl;
1894
1895 lck_mtx_lock(ctl_mtx);
1896 kctl = ctl_find_by_name(name);
1897 if (kctl) {
1898 ctl_id = kctl->id;
1899 }
1900 lck_mtx_unlock(ctl_mtx);
1901
1902 return ctl_id;
1903}
1904
1905errno_t
1906ctl_name_by_id(u_int32_t id, char *out_name, size_t maxsize)
1907{
1908 int found = 0;
1909 struct kctl *kctl;
1910
1911 lck_mtx_lock(ctl_mtx);
1912 TAILQ_FOREACH(kctl, &ctl_head, next) {
1913 if (kctl->id == id) {
1914 break;
1915 }
1916 }
1917
1918 if (kctl) {
1919 if (maxsize > MAX_KCTL_NAME) {
1920 maxsize = MAX_KCTL_NAME;
1921 }
1922 strlcpy(out_name, kctl->name, maxsize);
1923 found = 1;
1924 }
1925 lck_mtx_unlock(ctl_mtx);
1926
1927 return found ? 0 : ENOENT;
1928}
1929
1930/*
1931 * Must be called with global ctl_mtx lock taked
1932 *
1933 */
1934static struct kctl *
1935ctl_find_by_id_unit(u_int32_t id, u_int32_t unit)
1936{
1937 struct kctl *kctl;
1938
1939 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1940
1941 TAILQ_FOREACH(kctl, &ctl_head, next) {
1942 if (kctl->id == id && (kctl->flags & CTL_FLAG_REG_ID_UNIT) == 0) {
1943 return kctl;
1944 } else if (kctl->id == id && kctl->reg_unit == unit) {
1945 return kctl;
1946 }
1947 }
1948 return NULL;
1949}
1950
1951/*
1952 * Must be called with kernel controller lock taken
1953 */
1954static struct ctl_cb *
1955kcb_find(struct kctl *kctl, u_int32_t unit)
1956{
1957 struct ctl_cb *kcb;
1958
1959 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1960
1961 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
1962 if (kcb->sac.sc_unit == unit) {
1963 return kcb;
1964 }
1965
1966 return NULL;
1967}
1968
1969static struct socket *
1970kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *kctlflags)
1971{
1972 struct socket *so = NULL;
1973 struct ctl_cb *kcb;
1974 void *lr_saved;
1975 struct kctl *kctl;
1976 int i;
1977
1978 lr_saved = __builtin_return_address(0);
1979
1980 lck_mtx_lock(ctl_mtx);
1981 /*
1982 * First validate the kctlref
1983 */
1984 if ((kctl = kctl_from_ref(kctlref)) == NULL) {
1985 kctlstat.kcs_bad_kctlref++;
1986 lck_mtx_unlock(ctl_mtx);
1987 if (ctl_debug != 0) {
1988 printf("%s invalid kctlref %p\n",
1989 __func__, kctlref);
1990 }
1991 return NULL;
1992 }
1993
1994 kcb = kcb_find(kctl, unit);
1995 if (kcb == NULL || kcb->kctl != kctl || (so = kcb->so) == NULL) {
1996 lck_mtx_unlock(ctl_mtx);
1997 return NULL;
1998 }
1999 /*
2000 * This prevents the socket from being closed
2001 */
2002 kcb->usecount++;
2003 /*
2004 * Respect lock ordering: socket before ctl_mtx
2005 */
2006 lck_mtx_unlock(ctl_mtx);
2007
2008 socket_lock(so, 1);
2009 /*
2010 * The socket lock history is more useful if we store
2011 * the address of the caller.
2012 */
2013 i = (so->next_lock_lr + SO_LCKDBG_MAX - 1) % SO_LCKDBG_MAX;
2014 so->lock_lr[i] = lr_saved;
2015
2016 lck_mtx_lock(ctl_mtx);
2017
2018 if ((kctl = kctl_from_ref(kctlref)) == NULL || kcb->kctl == NULL) {
2019 lck_mtx_unlock(ctl_mtx);
2020 socket_unlock(so, 1);
2021 so = NULL;
2022 lck_mtx_lock(ctl_mtx);
2023 } else if (kctlflags != NULL) {
2024 *kctlflags = kctl->flags;
2025 }
2026
2027 kcb->usecount--;
2028 if (kcb->usecount == 0) {
2029 wakeup((event_t)&kcb->usecount);
2030 }
2031
2032 lck_mtx_unlock(ctl_mtx);
2033
2034 return so;
2035}
2036
2037static void
2038ctl_post_msg(u_int32_t event_code, u_int32_t id)
2039{
2040 struct ctl_event_data ctl_ev_data;
2041 struct kev_msg ev_msg;
2042
2043 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_NOTOWNED);
2044
2045 bzero(&ev_msg, sizeof(struct kev_msg));
2046 ev_msg.vendor_code = KEV_VENDOR_APPLE;
2047
2048 ev_msg.kev_class = KEV_SYSTEM_CLASS;
2049 ev_msg.kev_subclass = KEV_CTL_SUBCLASS;
2050 ev_msg.event_code = event_code;
2051
2052 /* common nke subclass data */
2053 bzero(&ctl_ev_data, sizeof(ctl_ev_data));
2054 ctl_ev_data.ctl_id = id;
2055 ev_msg.dv[0].data_ptr = &ctl_ev_data;
2056 ev_msg.dv[0].data_length = sizeof(ctl_ev_data);
2057
2058 ev_msg.dv[1].data_length = 0;
2059
2060 kev_post_msg(&ev_msg);
2061}
2062
2063static int
2064ctl_lock(struct socket *so, int refcount, void *lr)
2065{
2066 void *lr_saved;
2067
2068 if (lr == NULL) {
2069 lr_saved = __builtin_return_address(0);
2070 } else {
2071 lr_saved = lr;
2072 }
2073
2074 if (so->so_pcb != NULL) {
2075 lck_mtx_lock(((struct ctl_cb *)so->so_pcb)->mtx);
2076 } else {
2077 panic("ctl_lock: so=%p NO PCB! lr=%p lrh= %s\n",
2078 so, lr_saved, solockhistory_nr(so));
2079 /* NOTREACHED */
2080 }
2081
2082 if (so->so_usecount < 0) {
2083 panic("ctl_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s\n",
2084 so, so->so_pcb, lr_saved, so->so_usecount,
2085 solockhistory_nr(so));
2086 /* NOTREACHED */
2087 }
2088
2089 if (refcount) {
2090 so->so_usecount++;
2091 }
2092
2093 so->lock_lr[so->next_lock_lr] = lr_saved;
2094 so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX;
2095 return 0;
2096}
2097
2098static int
2099ctl_unlock(struct socket *so, int refcount, void *lr)
2100{
2101 void *lr_saved;
2102 lck_mtx_t *mutex_held;
2103
2104 if (lr == NULL) {
2105 lr_saved = __builtin_return_address(0);
2106 } else {
2107 lr_saved = lr;
2108 }
2109
2110#if (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG))
2111 printf("ctl_unlock: so=%llx sopcb=%x lock=%llx ref=%u lr=%llx\n",
2112 (uint64_t)VM_KERNEL_ADDRPERM(so),
2113 (uint64_t)VM_KERNEL_ADDRPERM(so->so_pcb,
2114 (uint64_t)VM_KERNEL_ADDRPERM(((struct ctl_cb *)so->so_pcb)->mtx),
2115 so->so_usecount, (uint64_t)VM_KERNEL_ADDRPERM(lr_saved));
2116#endif /* (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG)) */
2117 if (refcount) {
2118 so->so_usecount--;
2119 }
2120
2121 if (so->so_usecount < 0) {
2122 panic("ctl_unlock: so=%p usecount=%x lrh= %s\n",
2123 so, so->so_usecount, solockhistory_nr(so));
2124 /* NOTREACHED */
2125 }
2126 if (so->so_pcb == NULL) {
2127 panic("ctl_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s\n",
2128 so, so->so_usecount, (void *)lr_saved,
2129 solockhistory_nr(so));
2130 /* NOTREACHED */
2131 }
2132 mutex_held = ((struct ctl_cb *)so->so_pcb)->mtx;
2133
2134 lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
2135 so->unlock_lr[so->next_unlock_lr] = lr_saved;
2136 so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
2137 lck_mtx_unlock(mutex_held);
2138
2139 if (so->so_usecount == 0) {
2140 ctl_sofreelastref(so);
2141 }
2142
2143 return 0;
2144}
2145
2146static lck_mtx_t *
2147ctl_getlock(struct socket *so, int flags)
2148{
2149#pragma unused(flags)
2150 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
2151
2152 if (so->so_pcb) {
2153 if (so->so_usecount < 0) {
2154 panic("ctl_getlock: so=%p usecount=%x lrh= %s\n",
2155 so, so->so_usecount, solockhistory_nr(so));
2156 }
2157 return kcb->mtx;
2158 } else {
2159 panic("ctl_getlock: so=%p NULL NO so_pcb %s\n",
2160 so, solockhistory_nr(so));
2161 return so->so_proto->pr_domain->dom_mtx;
2162 }
2163}
2164
2165__private_extern__ int
2166kctl_reg_list SYSCTL_HANDLER_ARGS
2167{
2168#pragma unused(oidp, arg1, arg2)
2169 int error = 0;
2170 u_int64_t i, n;
2171 struct xsystmgen xsg;
2172 void *buf = NULL;
2173 struct kctl *kctl;
2174 size_t item_size = ROUNDUP64(sizeof(struct xkctl_reg));
2175
2176 buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO);
2177 if (buf == NULL) {
2178 return ENOMEM;
2179 }
2180
2181 lck_mtx_lock(ctl_mtx);
2182
2183 n = kctlstat.kcs_reg_count;
2184
2185 if (req->oldptr == USER_ADDR_NULL) {
2186 req->oldidx = (size_t)(n + n / 8) * sizeof(struct xkctl_reg);
2187 goto done;
2188 }
2189 if (req->newptr != USER_ADDR_NULL) {
2190 error = EPERM;
2191 goto done;
2192 }
2193 bzero(&xsg, sizeof(xsg));
2194 xsg.xg_len = sizeof(xsg);
2195 xsg.xg_count = n;
2196 xsg.xg_gen = kctlstat.kcs_gencnt;
2197 xsg.xg_sogen = so_gencnt;
2198 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2199 if (error) {
2200 goto done;
2201 }
2202 /*
2203 * We are done if there is no pcb
2204 */
2205 if (n == 0) {
2206 goto done;
2207 }
2208
2209 for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
2210 i < n && kctl != NULL;
2211 i++, kctl = TAILQ_NEXT(kctl, next)) {
2212 struct xkctl_reg *xkr = (struct xkctl_reg *)buf;
2213 struct ctl_cb *kcb;
2214 u_int32_t pcbcount = 0;
2215
2216 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
2217 pcbcount++;
2218
2219 bzero(buf, item_size);
2220
2221 xkr->xkr_len = sizeof(struct xkctl_reg);
2222 xkr->xkr_kind = XSO_KCREG;
2223 xkr->xkr_id = kctl->id;
2224 xkr->xkr_reg_unit = kctl->reg_unit;
2225 xkr->xkr_flags = kctl->flags;
2226 xkr->xkr_kctlref = (uint64_t)(kctl->kctlref);
2227 xkr->xkr_recvbufsize = kctl->recvbufsize;
2228 xkr->xkr_sendbufsize = kctl->sendbufsize;
2229 xkr->xkr_lastunit = kctl->lastunit;
2230 xkr->xkr_pcbcount = pcbcount;
2231 xkr->xkr_connect = (uint64_t)VM_KERNEL_UNSLIDE(kctl->connect);
2232 xkr->xkr_disconnect =
2233 (uint64_t)VM_KERNEL_UNSLIDE(kctl->disconnect);
2234 xkr->xkr_send = (uint64_t)VM_KERNEL_UNSLIDE(kctl->send);
2235 xkr->xkr_send_list =
2236 (uint64_t)VM_KERNEL_UNSLIDE(kctl->send_list);
2237 xkr->xkr_setopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->setopt);
2238 xkr->xkr_getopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->getopt);
2239 xkr->xkr_rcvd = (uint64_t)VM_KERNEL_UNSLIDE(kctl->rcvd);
2240 strlcpy(xkr->xkr_name, kctl->name, sizeof(xkr->xkr_name));
2241
2242 error = SYSCTL_OUT(req, buf, item_size);
2243 }
2244
2245 if (error == 0) {
2246 /*
2247 * Give the user an updated idea of our state.
2248 * If the generation differs from what we told
2249 * her before, she knows that something happened
2250 * while we were processing this request, and it
2251 * might be necessary to retry.
2252 */
2253 bzero(&xsg, sizeof(xsg));
2254 xsg.xg_len = sizeof(xsg);
2255 xsg.xg_count = n;
2256 xsg.xg_gen = kctlstat.kcs_gencnt;
2257 xsg.xg_sogen = so_gencnt;
2258 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2259 if (error) {
2260 goto done;
2261 }
2262 }
2263
2264done:
2265 lck_mtx_unlock(ctl_mtx);
2266
2267 if (buf != NULL) {
2268 FREE(buf, M_TEMP);
2269 }
2270
2271 return error;
2272}
2273
2274__private_extern__ int
2275kctl_pcblist SYSCTL_HANDLER_ARGS
2276{
2277#pragma unused(oidp, arg1, arg2)
2278 int error = 0;
2279 u_int64_t n, i;
2280 struct xsystmgen xsg;
2281 void *buf = NULL;
2282 struct kctl *kctl;
2283 size_t item_size = ROUNDUP64(sizeof(struct xkctlpcb)) +
2284 ROUNDUP64(sizeof(struct xsocket_n)) +
2285 2 * ROUNDUP64(sizeof(struct xsockbuf_n)) +
2286 ROUNDUP64(sizeof(struct xsockstat_n));
2287
2288 buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO);
2289 if (buf == NULL) {
2290 return ENOMEM;
2291 }
2292
2293 lck_mtx_lock(ctl_mtx);
2294
2295 n = kctlstat.kcs_pcbcount;
2296
2297 if (req->oldptr == USER_ADDR_NULL) {
2298 req->oldidx = (size_t)(n + n / 8) * item_size;
2299 goto done;
2300 }
2301 if (req->newptr != USER_ADDR_NULL) {
2302 error = EPERM;
2303 goto done;
2304 }
2305 bzero(&xsg, sizeof(xsg));
2306 xsg.xg_len = sizeof(xsg);
2307 xsg.xg_count = n;
2308 xsg.xg_gen = kctlstat.kcs_gencnt;
2309 xsg.xg_sogen = so_gencnt;
2310 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2311 if (error) {
2312 goto done;
2313 }
2314 /*
2315 * We are done if there is no pcb
2316 */
2317 if (n == 0) {
2318 goto done;
2319 }
2320
2321 for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
2322 i < n && kctl != NULL;
2323 kctl = TAILQ_NEXT(kctl, next)) {
2324 struct ctl_cb *kcb;
2325
2326 for (kcb = TAILQ_FIRST(&kctl->kcb_head);
2327 i < n && kcb != NULL;
2328 i++, kcb = TAILQ_NEXT(kcb, next)) {
2329 struct xkctlpcb *xk = (struct xkctlpcb *)buf;
2330 struct xsocket_n *xso = (struct xsocket_n *)
2331 ADVANCE64(xk, sizeof(*xk));
2332 struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *)
2333 ADVANCE64(xso, sizeof(*xso));
2334 struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *)
2335 ADVANCE64(xsbrcv, sizeof(*xsbrcv));
2336 struct xsockstat_n *xsostats = (struct xsockstat_n *)
2337 ADVANCE64(xsbsnd, sizeof(*xsbsnd));
2338
2339 bzero(buf, item_size);
2340
2341 xk->xkp_len = sizeof(struct xkctlpcb);
2342 xk->xkp_kind = XSO_KCB;
2343 xk->xkp_unit = kcb->sac.sc_unit;
2344 xk->xkp_kctpcb = (uint64_t)VM_KERNEL_ADDRPERM(kcb);
2345 xk->xkp_kctlref = (uint64_t)VM_KERNEL_ADDRPERM(kctl);
2346 xk->xkp_kctlid = kctl->id;
2347 strlcpy(xk->xkp_kctlname, kctl->name,
2348 sizeof(xk->xkp_kctlname));
2349
2350 sotoxsocket_n(kcb->so, xso);
2351 sbtoxsockbuf_n(kcb->so ?
2352 &kcb->so->so_rcv : NULL, xsbrcv);
2353 sbtoxsockbuf_n(kcb->so ?
2354 &kcb->so->so_snd : NULL, xsbsnd);
2355 sbtoxsockstat_n(kcb->so, xsostats);
2356
2357 error = SYSCTL_OUT(req, buf, item_size);
2358 }
2359 }
2360
2361 if (error == 0) {
2362 /*
2363 * Give the user an updated idea of our state.
2364 * If the generation differs from what we told
2365 * her before, she knows that something happened
2366 * while we were processing this request, and it
2367 * might be necessary to retry.
2368 */
2369 bzero(&xsg, sizeof(xsg));
2370 xsg.xg_len = sizeof(xsg);
2371 xsg.xg_count = n;
2372 xsg.xg_gen = kctlstat.kcs_gencnt;
2373 xsg.xg_sogen = so_gencnt;
2374 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2375 if (error) {
2376 goto done;
2377 }
2378 }
2379
2380done:
2381 lck_mtx_unlock(ctl_mtx);
2382
2383 return error;
2384}
2385
2386int
2387kctl_getstat SYSCTL_HANDLER_ARGS
2388{
2389#pragma unused(oidp, arg1, arg2)
2390 int error = 0;
2391
2392 lck_mtx_lock(ctl_mtx);
2393
2394 if (req->newptr != USER_ADDR_NULL) {
2395 error = EPERM;
2396 goto done;
2397 }
2398 if (req->oldptr == USER_ADDR_NULL) {
2399 req->oldidx = sizeof(struct kctlstat);
2400 goto done;
2401 }
2402
2403 error = SYSCTL_OUT(req, &kctlstat,
2404 MIN(sizeof(struct kctlstat), req->oldlen));
2405done:
2406 lck_mtx_unlock(ctl_mtx);
2407 return error;
2408}
2409
2410void
2411kctl_fill_socketinfo(struct socket *so, struct socket_info *si)
2412{
2413 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
2414 struct kern_ctl_info *kcsi =
2415 &si->soi_proto.pri_kern_ctl;
2416 struct kctl *kctl = kcb->kctl;
2417
2418 si->soi_kind = SOCKINFO_KERN_CTL;
2419
2420 if (kctl == 0) {
2421 return;
2422 }
2423
2424 kcsi->kcsi_id = kctl->id;
2425 kcsi->kcsi_reg_unit = kctl->reg_unit;
2426 kcsi->kcsi_flags = kctl->flags;
2427 kcsi->kcsi_recvbufsize = kctl->recvbufsize;
2428 kcsi->kcsi_sendbufsize = kctl->sendbufsize;
2429 kcsi->kcsi_unit = kcb->sac.sc_unit;
2430 strlcpy(kcsi->kcsi_name, kctl->name, MAX_KCTL_NAME);
2431}