]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/kern_control.c
xnu-7195.81.3.tar.gz
[apple/xnu.git] / bsd / kern / kern_control.c
CommitLineData
9bccf70c 1/*
bca245ac 2 * Copyright (c) 1999-2020 Apple Inc. All rights reserved.
9bccf70c 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
fe8ab488 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
fe8ab488 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
fe8ab488 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
fe8ab488 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
9bccf70c 27 */
9bccf70c
A
28
29/*
91447636
A
30 * Kernel Control domain - allows control connections to
31 * and to read/write data.
9bccf70c 32 *
91447636 33 * Vincent Lubet, 040506
9bccf70c
A
34 * Christophe Allie, 010928
35 * Justin C. Walker, 990319
36 */
37
38#include <sys/types.h>
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/syslog.h>
42#include <sys/socket.h>
43#include <sys/socketvar.h>
44#include <sys/protosw.h>
45#include <sys/domain.h>
46#include <sys/malloc.h>
47#include <sys/mbuf.h>
9bccf70c
A
48#include <sys/sys_domain.h>
49#include <sys/kern_event.h>
50#include <sys/kern_control.h>
2d21ac55 51#include <sys/kauth.h>
fe8ab488 52#include <sys/sysctl.h>
3e170ce0 53#include <sys/proc_info.h>
9bccf70c
A
54#include <net/if_var.h>
55
56#include <mach/vm_types.h>
9bccf70c
A
57
58#include <kern/thread.h>
59
3e170ce0 60struct kctl {
0a7de745
A
61 TAILQ_ENTRY(kctl) next; /* controller chain */
62 kern_ctl_ref kctlref;
3e170ce0
A
63
64 /* controller information provided when registering */
0a7de745
A
65 char name[MAX_KCTL_NAME]; /* unique identifier */
66 u_int32_t id;
67 u_int32_t reg_unit;
3e170ce0
A
68
69 /* misc communication information */
0a7de745
A
70 u_int32_t flags; /* support flags */
71 u_int32_t recvbufsize; /* request more than the default buffer size */
72 u_int32_t sendbufsize; /* request more than the default buffer size */
3e170ce0
A
73
74 /* Dispatch functions */
f427ee49 75 ctl_setup_func setup; /* Setup contact */
0a7de745
A
76 ctl_bind_func bind; /* Prepare contact */
77 ctl_connect_func connect; /* Make contact */
78 ctl_disconnect_func disconnect; /* Break contact */
79 ctl_send_func send; /* Send data to nke */
80 ctl_send_list_func send_list; /* Send list of packets */
81 ctl_setopt_func setopt; /* set kctl configuration */
82 ctl_getopt_func getopt; /* get kctl configuration */
83 ctl_rcvd_func rcvd; /* Notify nke when client reads data */
84
85 TAILQ_HEAD(, ctl_cb) kcb_head;
86 u_int32_t lastunit;
3e170ce0
A
87};
88
4ba76501
A
89#if DEVELOPMENT || DEBUG
90enum ctl_status {
91 KCTL_DISCONNECTED = 0,
92 KCTL_CONNECTING = 1,
93 KCTL_CONNECTED = 2
94};
95#endif /* DEVELOPMENT || DEBUG */
96
3e170ce0 97struct ctl_cb {
0a7de745
A
98 TAILQ_ENTRY(ctl_cb) next; /* controller chain */
99 lck_mtx_t *mtx;
100 struct socket *so; /* controlling socket */
101 struct kctl *kctl; /* back pointer to controller */
102 void *userdata;
103 struct sockaddr_ctl sac;
104 u_int32_t usecount;
94ff46dc 105 u_int32_t kcb_usecount;
ea3f0419 106 u_int32_t require_clearing_count;
4ba76501
A
107#if DEVELOPMENT || DEBUG
108 enum ctl_status status;
109#endif /* DEVELOPMENT || DEBUG */
3e170ce0
A
110};
111
fe8ab488 112#ifndef ROUNDUP64
0a7de745 113#define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
fe8ab488
A
114#endif
115
116#ifndef ADVANCE64
0a7de745 117#define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
fe8ab488
A
118#endif
119
9bccf70c
A
120/*
121 * Definitions and vars for we support
122 */
123
0a7de745
A
124#define CTL_SENDSIZE (2 * 1024) /* default buffer size */
125#define CTL_RECVSIZE (8 * 1024) /* default buffer size */
9bccf70c
A
126
127/*
91447636
A
128 * Definitions and vars for we support
129 */
9bccf70c 130
f427ee49 131const u_int32_t ctl_maxunit = 65536;
0a7de745
A
132static lck_grp_attr_t *ctl_lck_grp_attr = 0;
133static lck_attr_t *ctl_lck_attr = 0;
134static lck_grp_t *ctl_lck_grp = 0;
135static lck_mtx_t *ctl_mtx;
9bccf70c
A
136
137/* all the controllers are chained */
0a7de745 138TAILQ_HEAD(kctl_list, kctl) ctl_head;
91447636
A
139
140static int ctl_attach(struct socket *, int, struct proc *);
141static int ctl_detach(struct socket *);
142static int ctl_sofreelastref(struct socket *so);
5c9f4661 143static int ctl_bind(struct socket *, struct sockaddr *, struct proc *);
91447636
A
144static int ctl_connect(struct socket *, struct sockaddr *, struct proc *);
145static int ctl_disconnect(struct socket *);
146static int ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
0a7de745 147 struct ifnet *ifp, struct proc *p);
91447636 148static int ctl_send(struct socket *, int, struct mbuf *,
0a7de745 149 struct sockaddr *, struct mbuf *, struct proc *);
fe8ab488 150static int ctl_send_list(struct socket *, int, struct mbuf *,
0a7de745 151 struct sockaddr *, struct mbuf *, struct proc *);
91447636
A
152static int ctl_ctloutput(struct socket *, struct sockopt *);
153static int ctl_peeraddr(struct socket *so, struct sockaddr **nam);
39236c6e 154static int ctl_usr_rcvd(struct socket *so, int flags);
91447636 155
91447636
A
156static struct kctl *ctl_find_by_name(const char *);
157static struct kctl *ctl_find_by_id_unit(u_int32_t id, u_int32_t unit);
9bccf70c 158
3e170ce0 159static struct socket *kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit,
0a7de745 160 u_int32_t *);
91447636 161static struct ctl_cb *kcb_find(struct kctl *, u_int32_t unit);
b0d623f7 162static void ctl_post_msg(u_int32_t event_code, u_int32_t id);
9bccf70c 163
b0d623f7
A
164static int ctl_lock(struct socket *, int, void *);
165static int ctl_unlock(struct socket *, int, void *);
91447636 166static lck_mtx_t * ctl_getlock(struct socket *, int);
9bccf70c 167
39236c6e 168static struct pr_usrreqs ctl_usrreqs = {
0a7de745
A
169 .pru_attach = ctl_attach,
170 .pru_bind = ctl_bind,
171 .pru_connect = ctl_connect,
172 .pru_control = ctl_ioctl,
173 .pru_detach = ctl_detach,
174 .pru_disconnect = ctl_disconnect,
175 .pru_peeraddr = ctl_peeraddr,
176 .pru_rcvd = ctl_usr_rcvd,
177 .pru_send = ctl_send,
178 .pru_send_list = ctl_send_list,
179 .pru_sosend = sosend,
180 .pru_sosend_list = sosend_list,
181 .pru_soreceive = soreceive,
182 .pru_soreceive_list = soreceive_list,
91447636
A
183};
184
39236c6e 185static struct protosw kctlsw[] = {
0a7de745
A
186 {
187 .pr_type = SOCK_DGRAM,
188 .pr_protocol = SYSPROTO_CONTROL,
189 .pr_flags = PR_ATOMIC | PR_CONNREQUIRED | PR_PCBLOCK | PR_WANTRCVD,
190 .pr_ctloutput = ctl_ctloutput,
191 .pr_usrreqs = &ctl_usrreqs,
192 .pr_lock = ctl_lock,
193 .pr_unlock = ctl_unlock,
194 .pr_getlock = ctl_getlock,
195 },
196 {
197 .pr_type = SOCK_STREAM,
198 .pr_protocol = SYSPROTO_CONTROL,
199 .pr_flags = PR_CONNREQUIRED | PR_PCBLOCK | PR_WANTRCVD,
200 .pr_ctloutput = ctl_ctloutput,
201 .pr_usrreqs = &ctl_usrreqs,
202 .pr_lock = ctl_lock,
203 .pr_unlock = ctl_unlock,
204 .pr_getlock = ctl_getlock,
205 }
9bccf70c
A
206};
207
fe8ab488
A
208__private_extern__ int kctl_reg_list SYSCTL_HANDLER_ARGS;
209__private_extern__ int kctl_pcblist SYSCTL_HANDLER_ARGS;
210__private_extern__ int kctl_getstat SYSCTL_HANDLER_ARGS;
211
91447636 212
fe8ab488 213SYSCTL_NODE(_net_systm, OID_AUTO, kctl,
0a7de745 214 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Kernel control family");
fe8ab488
A
215
216struct kctlstat kctlstat;
217SYSCTL_PROC(_net_systm_kctl, OID_AUTO, stats,
218 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
219 kctl_getstat, "S,kctlstat", "");
220
221SYSCTL_PROC(_net_systm_kctl, OID_AUTO, reg_list,
0a7de745
A
222 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
223 kctl_reg_list, "S,xkctl_reg", "");
fe8ab488
A
224
225SYSCTL_PROC(_net_systm_kctl, OID_AUTO, pcblist,
0a7de745
A
226 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
227 kctl_pcblist, "S,xkctlpcb", "");
fe8ab488
A
228
229u_int32_t ctl_autorcvbuf_max = 256 * 1024;
230SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufmax,
0a7de745 231 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_autorcvbuf_max, 0, "");
fe8ab488
A
232
233u_int32_t ctl_autorcvbuf_high = 0;
234SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufhigh,
0a7de745 235 CTLFLAG_RD | CTLFLAG_LOCKED, &ctl_autorcvbuf_high, 0, "");
fe8ab488
A
236
237u_int32_t ctl_debug = 0;
238SYSCTL_INT(_net_systm_kctl, OID_AUTO, debug,
0a7de745 239 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_debug, 0, "");
fe8ab488 240
4ba76501
A
241#if DEVELOPMENT || DEBUG
242u_int32_t ctl_panic_debug = 0;
243SYSCTL_INT(_net_systm_kctl, OID_AUTO, panicdebug,
244 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_panic_debug, 0, "");
245#endif /* DEVELOPMENT || DEBUG */
246
0a7de745 247#define KCTL_TBL_INC 16
3e170ce0
A
248
249static uintptr_t kctl_tbl_size = 0;
250static u_int32_t kctl_tbl_growing = 0;
39037602 251static u_int32_t kctl_tbl_growing_waiting = 0;
3e170ce0
A
252static uintptr_t kctl_tbl_count = 0;
253static struct kctl **kctl_table = NULL;
254static uintptr_t kctl_ref_gencnt = 0;
255
256static void kctl_tbl_grow(void);
257static kern_ctl_ref kctl_make_ref(struct kctl *kctl);
258static void kctl_delete_ref(kern_ctl_ref);
259static struct kctl *kctl_from_ref(kern_ctl_ref);
260
9bccf70c 261/*
91447636 262 * Install the protosw's for the Kernel Control manager.
9bccf70c 263 */
39236c6e
A
264__private_extern__ void
265kern_control_init(struct domain *dp)
9bccf70c 266{
39236c6e
A
267 struct protosw *pr;
268 int i;
0a7de745 269 int kctl_proto_count = (sizeof(kctlsw) / sizeof(struct protosw));
39236c6e
A
270
271 VERIFY(!(dp->dom_flags & DOM_INITIALIZED));
272 VERIFY(dp == systemdomain);
273
91447636 274 ctl_lck_grp_attr = lck_grp_attr_alloc_init();
39236c6e
A
275 if (ctl_lck_grp_attr == NULL) {
276 panic("%s: lck_grp_attr_alloc_init failed\n", __func__);
277 /* NOTREACHED */
91447636 278 }
39236c6e
A
279
280 ctl_lck_grp = lck_grp_alloc_init("Kernel Control Protocol",
281 ctl_lck_grp_attr);
282 if (ctl_lck_grp == NULL) {
283 panic("%s: lck_grp_alloc_init failed\n", __func__);
284 /* NOTREACHED */
91447636 285 }
39236c6e 286
91447636 287 ctl_lck_attr = lck_attr_alloc_init();
39236c6e
A
288 if (ctl_lck_attr == NULL) {
289 panic("%s: lck_attr_alloc_init failed\n", __func__);
290 /* NOTREACHED */
91447636 291 }
39236c6e 292
91447636 293 ctl_mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
39236c6e
A
294 if (ctl_mtx == NULL) {
295 panic("%s: lck_mtx_alloc_init failed\n", __func__);
296 /* NOTREACHED */
91447636
A
297 }
298 TAILQ_INIT(&ctl_head);
39236c6e 299
0a7de745 300 for (i = 0, pr = &kctlsw[0]; i < kctl_proto_count; i++, pr++) {
39236c6e 301 net_add_proto(pr, dp, 1);
0a7de745 302 }
91447636 303}
9bccf70c 304
91447636
A
305static void
306kcb_delete(struct ctl_cb *kcb)
307{
308 if (kcb != 0) {
0a7de745 309 if (kcb->mtx != 0) {
91447636 310 lck_mtx_free(kcb->mtx, ctl_lck_grp);
0a7de745 311 }
91447636
A
312 FREE(kcb, M_TEMP);
313 }
9bccf70c
A
314}
315
9bccf70c
A
316/*
317 * Kernel Controller user-request functions
fe8ab488
A
318 * attach function must exist and succeed
319 * detach not necessary
91447636 320 * we need a pcb for the per socket mutex
9bccf70c 321 */
91447636 322static int
fe8ab488
A
323ctl_attach(struct socket *so, int proto, struct proc *p)
324{
325#pragma unused(proto, p)
91447636 326 int error = 0;
0a7de745 327 struct ctl_cb *kcb = 0;
91447636
A
328
329 MALLOC(kcb, struct ctl_cb *, sizeof(struct ctl_cb), M_TEMP, M_WAITOK);
330 if (kcb == NULL) {
331 error = ENOMEM;
332 goto quit;
333 }
334 bzero(kcb, sizeof(struct ctl_cb));
fe8ab488 335
91447636
A
336 kcb->mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
337 if (kcb->mtx == NULL) {
338 error = ENOMEM;
339 goto quit;
340 }
341 kcb->so = so;
342 so->so_pcb = (caddr_t)kcb;
fe8ab488 343
91447636
A
344quit:
345 if (error != 0) {
346 kcb_delete(kcb);
347 kcb = 0;
348 }
0a7de745 349 return error;
91447636
A
350}
351
352static int
353ctl_sofreelastref(struct socket *so)
354{
0a7de745 355 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
fe8ab488
A
356
357 so->so_pcb = 0;
358
359 if (kcb != 0) {
0a7de745 360 struct kctl *kctl;
fe8ab488
A
361 if ((kctl = kcb->kctl) != 0) {
362 lck_mtx_lock(ctl_mtx);
363 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
364 kctlstat.kcs_pcbcount--;
365 kctlstat.kcs_gencnt++;
366 lck_mtx_unlock(ctl_mtx);
367 }
368 kcb_delete(kcb);
369 }
370 sofreelastref(so, 1);
0a7de745 371 return 0;
91447636
A
372}
373
94ff46dc 374/*
ea3f0419
A
375 * Use this function and ctl_kcb_require_clearing to serialize
376 * critical calls into the kctl subsystem
94ff46dc
A
377 */
378static void
379ctl_kcb_increment_use_count(struct ctl_cb *kcb, lck_mtx_t *mutex_held)
380{
381 LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
ea3f0419
A
382 while (kcb->require_clearing_count > 0) {
383 msleep(&kcb->require_clearing_count, mutex_held, PSOCK | PCATCH, "kcb_require_clearing", NULL);
384 }
385 kcb->kcb_usecount++;
386}
387
388static void
389ctl_kcb_require_clearing(struct ctl_cb *kcb, lck_mtx_t *mutex_held)
390{
391 assert(kcb->kcb_usecount != 0);
392 kcb->require_clearing_count++;
393 kcb->kcb_usecount--;
394 while (kcb->kcb_usecount > 0) { // we need to wait until no one else is running
94ff46dc
A
395 msleep(&kcb->kcb_usecount, mutex_held, PSOCK | PCATCH, "kcb_usecount", NULL);
396 }
397 kcb->kcb_usecount++;
398}
399
400static void
ea3f0419
A
401ctl_kcb_done_clearing(struct ctl_cb *kcb)
402{
403 assert(kcb->require_clearing_count != 0);
404 kcb->require_clearing_count--;
405 wakeup((caddr_t)&kcb->require_clearing_count);
406}
407
408static void
409ctl_kcb_decrement_use_count(struct ctl_cb *kcb)
94ff46dc
A
410{
411 assert(kcb->kcb_usecount != 0);
412 kcb->kcb_usecount--;
ea3f0419 413 wakeup((caddr_t)&kcb->kcb_usecount);
94ff46dc
A
414}
415
91447636
A
416static int
417ctl_detach(struct socket *so)
418{
0a7de745 419 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
fe8ab488 420
0a7de745
A
421 if (kcb == 0) {
422 return 0;
423 }
fe8ab488 424
94ff46dc
A
425 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
426 ctl_kcb_increment_use_count(kcb, mtx_held);
ea3f0419 427 ctl_kcb_require_clearing(kcb, mtx_held);
94ff46dc 428
5c9f4661
A
429 if (kcb->kctl != NULL && kcb->kctl->bind != NULL &&
430 kcb->userdata != NULL && !(so->so_state & SS_ISCONNECTED)) {
431 // The unit was bound, but not connected
432 // Invoke the disconnected call to cleanup
433 if (kcb->kctl->disconnect != NULL) {
434 socket_unlock(so, 0);
435 (*kcb->kctl->disconnect)(kcb->kctl->kctlref,
436 kcb->sac.sc_unit, kcb->userdata);
437 socket_lock(so, 0);
438 }
439 }
440
fe8ab488 441 soisdisconnected(so);
4ba76501
A
442#if DEVELOPMENT || DEBUG
443 kcb->status = KCTL_DISCONNECTED;
444#endif /* DEVELOPMENT || DEBUG */
fe8ab488 445 so->so_flags |= SOF_PCBCLEARING;
ea3f0419
A
446 ctl_kcb_done_clearing(kcb);
447 ctl_kcb_decrement_use_count(kcb);
0a7de745 448 return 0;
9bccf70c
A
449}
450
91447636 451static int
5c9f4661 452ctl_setup_kctl(struct socket *so, struct sockaddr *nam, struct proc *p)
fe8ab488 453{
5c9f4661
A
454 struct kctl *kctl = NULL;
455 int error = 0;
0a7de745 456 struct sockaddr_ctl sa;
5c9f4661
A
457 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
458 struct ctl_cb *kcb_next = NULL;
459 u_quad_t sbmaxsize;
460 u_int32_t recvbufsize, sendbufsize;
fe8ab488 461
5c9f4661
A
462 if (kcb == 0) {
463 panic("ctl_setup_kctl so_pcb null\n");
464 }
465
466 if (kcb->kctl != NULL) {
467 // Already set up, skip
0a7de745 468 return 0;
5c9f4661 469 }
fe8ab488 470
5c9f4661 471 if (nam->sa_len != sizeof(struct sockaddr_ctl)) {
0a7de745 472 return EINVAL;
5c9f4661 473 }
fe8ab488
A
474
475 bcopy(nam, &sa, sizeof(struct sockaddr_ctl));
476
477 lck_mtx_lock(ctl_mtx);
478 kctl = ctl_find_by_id_unit(sa.sc_id, sa.sc_unit);
479 if (kctl == NULL) {
480 lck_mtx_unlock(ctl_mtx);
0a7de745 481 return ENOENT;
fe8ab488
A
482 }
483
484 if (((kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
0a7de745
A
485 (so->so_type != SOCK_STREAM)) ||
486 (!(kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
487 (so->so_type != SOCK_DGRAM))) {
488 lck_mtx_unlock(ctl_mtx);
489 return EPROTOTYPE;
490 }
fe8ab488
A
491
492 if (kctl->flags & CTL_FLAG_PRIVILEGED) {
493 if (p == 0) {
494 lck_mtx_unlock(ctl_mtx);
0a7de745 495 return EINVAL;
fe8ab488
A
496 }
497 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
498 lck_mtx_unlock(ctl_mtx);
0a7de745 499 return EPERM;
fe8ab488
A
500 }
501 }
91447636
A
502
503 if ((kctl->flags & CTL_FLAG_REG_ID_UNIT) || sa.sc_unit != 0) {
504 if (kcb_find(kctl, sa.sc_unit) != NULL) {
505 lck_mtx_unlock(ctl_mtx);
0a7de745 506 return EBUSY;
91447636 507 }
f427ee49
A
508 } else if (kctl->setup != NULL) {
509 error = (*kctl->setup)(&sa.sc_unit, &kcb->userdata);
510 if (error != 0) {
511 lck_mtx_unlock(ctl_mtx);
512 return error;
513 }
91447636 514 } else {
fe8ab488 515 /* Find an unused ID, assumes control IDs are in order */
5c9f4661 516 u_int32_t unit = 1;
fe8ab488
A
517
518 TAILQ_FOREACH(kcb_next, &kctl->kcb_head, next) {
5c9f4661 519 if (kcb_next->sac.sc_unit > unit) {
fe8ab488
A
520 /* Found a gap, lets fill it in */
521 break;
522 }
5c9f4661
A
523 unit = kcb_next->sac.sc_unit + 1;
524 if (unit == ctl_maxunit) {
fe8ab488 525 break;
5c9f4661 526 }
fe8ab488
A
527 }
528
2d21ac55
A
529 if (unit == ctl_maxunit) {
530 lck_mtx_unlock(ctl_mtx);
0a7de745 531 return EBUSY;
2d21ac55 532 }
fe8ab488 533
2d21ac55 534 sa.sc_unit = unit;
fe8ab488 535 }
55e303ae 536
5c9f4661 537 bcopy(&sa, &kcb->sac, sizeof(struct sockaddr_ctl));
fe8ab488
A
538 kcb->kctl = kctl;
539 if (kcb_next != NULL) {
540 TAILQ_INSERT_BEFORE(kcb_next, kcb, next);
541 } else {
2d21ac55
A
542 TAILQ_INSERT_TAIL(&kctl->kcb_head, kcb, next);
543 }
fe8ab488
A
544 kctlstat.kcs_pcbcount++;
545 kctlstat.kcs_gencnt++;
546 kctlstat.kcs_connections++;
547 lck_mtx_unlock(ctl_mtx);
9bccf70c 548
04b8595b
A
549 /*
550 * rdar://15526688: Limit the send and receive sizes to sb_max
551 * by using the same scaling as sbreserve()
552 */
553 sbmaxsize = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES);
554
5c9f4661 555 if (kctl->sendbufsize > sbmaxsize) {
f427ee49 556 sendbufsize = (u_int32_t)sbmaxsize;
5c9f4661 557 } else {
04b8595b 558 sendbufsize = kctl->sendbufsize;
5c9f4661 559 }
04b8595b 560
5c9f4661 561 if (kctl->recvbufsize > sbmaxsize) {
f427ee49 562 recvbufsize = (u_int32_t)sbmaxsize;
5c9f4661 563 } else {
04b8595b 564 recvbufsize = kctl->recvbufsize;
5c9f4661 565 }
04b8595b
A
566
567 error = soreserve(so, sendbufsize, recvbufsize);
fe8ab488 568 if (error) {
0a7de745 569 if (ctl_debug) {
39037602 570 printf("%s - soreserve(%llx, %u, %u) error %d\n",
0a7de745
A
571 __func__, (uint64_t)VM_KERNEL_ADDRPERM(so),
572 sendbufsize, recvbufsize, error);
573 }
91447636 574 goto done;
fe8ab488 575 }
5c9f4661
A
576
577done:
578 if (error) {
579 soisdisconnected(so);
4ba76501
A
580#if DEVELOPMENT || DEBUG
581 kcb->status = KCTL_DISCONNECTED;
582#endif /* DEVELOPMENT || DEBUG */
5c9f4661
A
583 lck_mtx_lock(ctl_mtx);
584 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
585 kcb->kctl = NULL;
586 kcb->sac.sc_unit = 0;
587 kctlstat.kcs_pcbcount--;
588 kctlstat.kcs_gencnt++;
589 kctlstat.kcs_conn_fail++;
590 lck_mtx_unlock(ctl_mtx);
591 }
0a7de745 592 return error;
5c9f4661
A
593}
594
595static int
596ctl_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
597{
598 int error = 0;
599 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
600
601 if (kcb == NULL) {
602 panic("ctl_bind so_pcb null\n");
603 }
604
94ff46dc
A
605 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
606 ctl_kcb_increment_use_count(kcb, mtx_held);
ea3f0419 607 ctl_kcb_require_clearing(kcb, mtx_held);
94ff46dc 608
5c9f4661
A
609 error = ctl_setup_kctl(so, nam, p);
610 if (error) {
94ff46dc 611 goto out;
5c9f4661
A
612 }
613
614 if (kcb->kctl == NULL) {
615 panic("ctl_bind kctl null\n");
616 }
617
618 if (kcb->kctl->bind == NULL) {
94ff46dc
A
619 error = EINVAL;
620 goto out;
5c9f4661 621 }
fe8ab488 622
91447636 623 socket_unlock(so, 0);
5c9f4661 624 error = (*kcb->kctl->bind)(kcb->kctl->kctlref, &kcb->sac, &kcb->userdata);
91447636 625 socket_lock(so, 0);
fe8ab488 626
94ff46dc 627out:
ea3f0419
A
628 ctl_kcb_done_clearing(kcb);
629 ctl_kcb_decrement_use_count(kcb);
0a7de745 630 return error;
5c9f4661
A
631}
632
633static int
634ctl_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
635{
636 int error = 0;
637 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
638
639 if (kcb == NULL) {
640 panic("ctl_connect so_pcb null\n");
641 }
642
94ff46dc
A
643 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
644 ctl_kcb_increment_use_count(kcb, mtx_held);
ea3f0419 645 ctl_kcb_require_clearing(kcb, mtx_held);
94ff46dc 646
4ba76501
A
647#if DEVELOPMENT || DEBUG
648 if (kcb->status != KCTL_DISCONNECTED && ctl_panic_debug) {
649 panic("kctl already connecting/connected");
650 }
651 kcb->status = KCTL_CONNECTING;
652#endif /* DEVELOPMENT || DEBUG */
653
5c9f4661
A
654 error = ctl_setup_kctl(so, nam, p);
655 if (error) {
94ff46dc 656 goto out;
5c9f4661
A
657 }
658
659 if (kcb->kctl == NULL) {
660 panic("ctl_connect kctl null\n");
661 }
662
663 soisconnecting(so);
664 socket_unlock(so, 0);
665 error = (*kcb->kctl->connect)(kcb->kctl->kctlref, &kcb->sac, &kcb->userdata);
666 socket_lock(so, 0);
667 if (error) {
668 goto end;
669 }
fe8ab488 670 soisconnected(so);
4ba76501
A
671#if DEVELOPMENT || DEBUG
672 kcb->status = KCTL_CONNECTED;
673#endif /* DEVELOPMENT || DEBUG */
91447636 674
6d2010ae 675end:
5c9f4661 676 if (error && kcb->kctl->disconnect) {
39037602
A
677 /*
678 * XXX Make sure we Don't check the return value
679 * of disconnect here.
680 * ipsec/utun_ctl_disconnect will return error when
681 * disconnect gets called after connect failure.
682 * However if we decide to check for disconnect return
683 * value here. Please make sure to revisit
684 * ipsec/utun_ctl_disconnect.
685 */
6d2010ae 686 socket_unlock(so, 0);
5c9f4661 687 (*kcb->kctl->disconnect)(kcb->kctl->kctlref, kcb->sac.sc_unit, kcb->userdata);
6d2010ae
A
688 socket_lock(so, 0);
689 }
fe8ab488
A
690 if (error) {
691 soisdisconnected(so);
4ba76501
A
692#if DEVELOPMENT || DEBUG
693 kcb->status = KCTL_DISCONNECTED;
694#endif /* DEVELOPMENT || DEBUG */
fe8ab488 695 lck_mtx_lock(ctl_mtx);
5c9f4661
A
696 TAILQ_REMOVE(&kcb->kctl->kcb_head, kcb, next);
697 kcb->kctl = NULL;
698 kcb->sac.sc_unit = 0;
fe8ab488
A
699 kctlstat.kcs_pcbcount--;
700 kctlstat.kcs_gencnt++;
701 kctlstat.kcs_conn_fail++;
702 lck_mtx_unlock(ctl_mtx);
703 }
94ff46dc 704out:
ea3f0419
A
705 ctl_kcb_done_clearing(kcb);
706 ctl_kcb_decrement_use_count(kcb);
0a7de745 707 return error;
9bccf70c
A
708}
709
91447636 710static int
9bccf70c
A
711ctl_disconnect(struct socket *so)
712{
0a7de745 713 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
fe8ab488
A
714
715 if ((kcb = (struct ctl_cb *)so->so_pcb)) {
94ff46dc
A
716 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
717 ctl_kcb_increment_use_count(kcb, mtx_held);
ea3f0419 718 ctl_kcb_require_clearing(kcb, mtx_held);
0a7de745 719 struct kctl *kctl = kcb->kctl;
fe8ab488
A
720
721 if (kctl && kctl->disconnect) {
722 socket_unlock(so, 0);
5c9f4661 723 (*kctl->disconnect)(kctl->kctlref, kcb->sac.sc_unit,
3e170ce0 724 kcb->userdata);
fe8ab488
A
725 socket_lock(so, 0);
726 }
727
728 soisdisconnected(so);
4ba76501
A
729#if DEVELOPMENT || DEBUG
730 kcb->status = KCTL_DISCONNECTED;
731#endif /* DEVELOPMENT || DEBUG */
fe8ab488 732
6d2010ae 733 socket_unlock(so, 0);
fe8ab488
A
734 lck_mtx_lock(ctl_mtx);
735 kcb->kctl = 0;
5c9f4661 736 kcb->sac.sc_unit = 0;
fe8ab488
A
737 while (kcb->usecount != 0) {
738 msleep(&kcb->usecount, ctl_mtx, 0, "kcb->usecount", 0);
739 }
740 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
741 kctlstat.kcs_pcbcount--;
742 kctlstat.kcs_gencnt++;
743 lck_mtx_unlock(ctl_mtx);
6d2010ae 744 socket_lock(so, 0);
ea3f0419
A
745 ctl_kcb_done_clearing(kcb);
746 ctl_kcb_decrement_use_count(kcb);
fe8ab488 747 }
0a7de745 748 return 0;
9bccf70c
A
749}
750
91447636
A
751static int
752ctl_peeraddr(struct socket *so, struct sockaddr **nam)
9bccf70c 753{
0a7de745
A
754 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
755 struct kctl *kctl;
756 struct sockaddr_ctl sc;
fe8ab488 757
0a7de745
A
758 if (kcb == NULL) { /* sanity check */
759 return ENOTCONN;
760 }
fe8ab488 761
0a7de745
A
762 if ((kctl = kcb->kctl) == NULL) {
763 return EINVAL;
764 }
fe8ab488 765
91447636
A
766 bzero(&sc, sizeof(struct sockaddr_ctl));
767 sc.sc_len = sizeof(struct sockaddr_ctl);
768 sc.sc_family = AF_SYSTEM;
769 sc.ss_sysaddr = AF_SYS_CONTROL;
770 sc.sc_id = kctl->id;
5c9f4661 771 sc.sc_unit = kcb->sac.sc_unit;
fe8ab488 772
91447636 773 *nam = dup_sockaddr((struct sockaddr *)&sc, 1);
fe8ab488 774
0a7de745 775 return 0;
fe8ab488
A
776}
777
778static void
779ctl_sbrcv_trim(struct socket *so)
780{
781 struct sockbuf *sb = &so->so_rcv;
782
783 if (sb->sb_hiwat > sb->sb_idealsize) {
784 u_int32_t diff;
785 int32_t trim;
786
787 /*
788 * The difference between the ideal size and the
789 * current size is the upper bound of the trimage
790 */
791 diff = sb->sb_hiwat - sb->sb_idealsize;
792 /*
793 * We cannot trim below the outstanding data
794 */
795 trim = sb->sb_hiwat - sb->sb_cc;
796
797 trim = imin(trim, (int32_t)diff);
798
799 if (trim > 0) {
800 sbreserve(sb, (sb->sb_hiwat - trim));
801
0a7de745 802 if (ctl_debug) {
fe8ab488
A
803 printf("%s - shrunk to %d\n",
804 __func__, sb->sb_hiwat);
0a7de745 805 }
fe8ab488
A
806 }
807 }
9bccf70c
A
808}
809
39236c6e
A
810static int
811ctl_usr_rcvd(struct socket *so, int flags)
812{
94ff46dc 813 int error = 0;
0a7de745
A
814 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
815 struct kctl *kctl;
39236c6e 816
94ff46dc
A
817 if (kcb == NULL) {
818 return ENOTCONN;
819 }
820
821 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
822 ctl_kcb_increment_use_count(kcb, mtx_held);
823
39236c6e 824 if ((kctl = kcb->kctl) == NULL) {
94ff46dc
A
825 error = EINVAL;
826 goto out;
39236c6e
A
827 }
828
829 if (kctl->rcvd) {
830 socket_unlock(so, 0);
5c9f4661 831 (*kctl->rcvd)(kctl->kctlref, kcb->sac.sc_unit, kcb->userdata, flags);
39236c6e
A
832 socket_lock(so, 0);
833 }
834
fe8ab488
A
835 ctl_sbrcv_trim(so);
836
94ff46dc 837out:
ea3f0419 838 ctl_kcb_decrement_use_count(kcb);
94ff46dc 839 return error;
39236c6e
A
840}
841
91447636
A
842static int
843ctl_send(struct socket *so, int flags, struct mbuf *m,
0a7de745
A
844 struct sockaddr *addr, struct mbuf *control,
845 struct proc *p)
9bccf70c 846{
fe8ab488 847#pragma unused(addr, p)
0a7de745
A
848 int error = 0;
849 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
850 struct kctl *kctl;
fe8ab488 851
0a7de745 852 if (control) {
fe8ab488 853 m_freem(control);
0a7de745 854 }
fe8ab488 855
0a7de745 856 if (kcb == NULL) { /* sanity check */
6d2010ae 857 error = ENOTCONN;
0a7de745 858 }
fe8ab488 859
94ff46dc
A
860 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
861 ctl_kcb_increment_use_count(kcb, mtx_held);
862
0a7de745 863 if (error == 0 && (kctl = kcb->kctl) == NULL) {
6d2010ae 864 error = EINVAL;
0a7de745 865 }
fe8ab488 866
6d2010ae 867 if (error == 0 && kctl->send) {
fe8ab488 868 so_tc_update_stats(m, so, m_get_service_class(m));
91447636 869 socket_unlock(so, 0);
5c9f4661 870 error = (*kctl->send)(kctl->kctlref, kcb->sac.sc_unit, kcb->userdata,
3e170ce0 871 m, flags);
91447636 872 socket_lock(so, 0);
6d2010ae
A
873 } else {
874 m_freem(m);
0a7de745 875 if (error == 0) {
6d2010ae 876 error = ENOTSUP;
0a7de745 877 }
91447636 878 }
0a7de745 879 if (error != 0) {
fe8ab488 880 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_fail);
0a7de745 881 }
ea3f0419 882 ctl_kcb_decrement_use_count(kcb);
94ff46dc 883
0a7de745 884 return error;
fe8ab488
A
885}
886
887static int
888ctl_send_list(struct socket *so, int flags, struct mbuf *m,
0a7de745
A
889 __unused struct sockaddr *addr, struct mbuf *control,
890 __unused struct proc *p)
fe8ab488 891{
0a7de745
A
892 int error = 0;
893 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
894 struct kctl *kctl;
fe8ab488 895
0a7de745 896 if (control) {
fe8ab488 897 m_freem_list(control);
0a7de745 898 }
fe8ab488 899
0a7de745 900 if (kcb == NULL) { /* sanity check */
fe8ab488 901 error = ENOTCONN;
0a7de745 902 }
fe8ab488 903
94ff46dc
A
904 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
905 ctl_kcb_increment_use_count(kcb, mtx_held);
906
0a7de745 907 if (error == 0 && (kctl = kcb->kctl) == NULL) {
fe8ab488 908 error = EINVAL;
0a7de745 909 }
fe8ab488
A
910
911 if (error == 0 && kctl->send_list) {
912 struct mbuf *nxt;
913
0a7de745 914 for (nxt = m; nxt != NULL; nxt = nxt->m_nextpkt) {
fe8ab488 915 so_tc_update_stats(nxt, so, m_get_service_class(nxt));
0a7de745 916 }
fe8ab488
A
917
918 socket_unlock(so, 0);
5c9f4661 919 error = (*kctl->send_list)(kctl->kctlref, kcb->sac.sc_unit,
3e170ce0 920 kcb->userdata, m, flags);
fe8ab488
A
921 socket_lock(so, 0);
922 } else if (error == 0 && kctl->send) {
923 while (m != NULL && error == 0) {
924 struct mbuf *nextpkt = m->m_nextpkt;
925
926 m->m_nextpkt = NULL;
927 so_tc_update_stats(m, so, m_get_service_class(m));
928 socket_unlock(so, 0);
5c9f4661 929 error = (*kctl->send)(kctl->kctlref, kcb->sac.sc_unit,
3e170ce0 930 kcb->userdata, m, flags);
fe8ab488
A
931 socket_lock(so, 0);
932 m = nextpkt;
933 }
0a7de745 934 if (m != NULL) {
fe8ab488 935 m_freem_list(m);
0a7de745 936 }
fe8ab488
A
937 } else {
938 m_freem_list(m);
0a7de745 939 if (error == 0) {
fe8ab488 940 error = ENOTSUP;
0a7de745 941 }
fe8ab488 942 }
0a7de745 943 if (error != 0) {
fe8ab488 944 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_list_fail);
0a7de745 945 }
ea3f0419 946 ctl_kcb_decrement_use_count(kcb);
94ff46dc 947
0a7de745 948 return error;
fe8ab488
A
949}
950
951static errno_t
f427ee49 952ctl_rcvbspace(struct socket *so, size_t datasize,
0a7de745 953 u_int32_t kctlflags, u_int32_t flags)
fe8ab488
A
954{
955 struct sockbuf *sb = &so->so_rcv;
956 u_int32_t space = sbspace(sb);
957 errno_t error;
04b8595b 958
3e170ce0 959 if ((kctlflags & CTL_FLAG_REG_CRIT) == 0) {
0a7de745 960 if ((u_int32_t) space >= datasize) {
fe8ab488 961 error = 0;
0a7de745 962 } else {
fe8ab488 963 error = ENOBUFS;
0a7de745 964 }
fe8ab488 965 } else if ((flags & CTL_DATA_CRIT) == 0) {
3e170ce0
A
966 /*
967 * Reserve 25% for critical messages
968 */
969 if (space < (sb->sb_hiwat >> 2) ||
0a7de745 970 space < datasize) {
3e170ce0 971 error = ENOBUFS;
0a7de745 972 } else {
3e170ce0 973 error = 0;
0a7de745 974 }
fe8ab488 975 } else {
f427ee49 976 size_t autorcvbuf_max;
fe8ab488
A
977
978 /*
979 * Allow overcommit of 25%
980 */
981 autorcvbuf_max = min(sb->sb_idealsize + (sb->sb_idealsize >> 2),
0a7de745 982 ctl_autorcvbuf_max);
fe8ab488
A
983
984 if ((u_int32_t) space >= datasize) {
985 error = 0;
986 } else if (tcp_cansbgrow(sb) &&
987 sb->sb_hiwat < autorcvbuf_max) {
988 /*
989 * Grow with a little bit of leeway
990 */
f427ee49
A
991 size_t grow = datasize - space + MSIZE;
992 u_int32_t cc = (u_int32_t)MIN(MIN((sb->sb_hiwat + grow), autorcvbuf_max), UINT32_MAX);
fe8ab488 993
f427ee49 994 if (sbreserve(sb, cc) == 1) {
0a7de745 995 if (sb->sb_hiwat > ctl_autorcvbuf_high) {
fe8ab488 996 ctl_autorcvbuf_high = sb->sb_hiwat;
0a7de745 997 }
fe8ab488 998
3e170ce0
A
999 /*
1000 * A final check
1001 */
1002 if ((u_int32_t) sbspace(sb) >= datasize) {
1003 error = 0;
1004 } else {
1005 error = ENOBUFS;
1006 }
1007
0a7de745 1008 if (ctl_debug) {
3e170ce0
A
1009 printf("%s - grown to %d error %d\n",
1010 __func__, sb->sb_hiwat, error);
0a7de745 1011 }
fe8ab488
A
1012 } else {
1013 error = ENOBUFS;
1014 }
1015 } else {
1016 error = ENOBUFS;
1017 }
1018 }
0a7de745 1019 return error;
9bccf70c
A
1020}
1021
91447636 1022errno_t
3e170ce0
A
1023ctl_enqueuembuf(kern_ctl_ref kctlref, u_int32_t unit, struct mbuf *m,
1024 u_int32_t flags)
9bccf70c 1025{
0a7de745
A
1026 struct socket *so;
1027 errno_t error = 0;
1028 int len = m->m_pkthdr.len;
1029 u_int32_t kctlflags;
fe8ab488 1030
3e170ce0
A
1031 so = kcb_find_socket(kctlref, unit, &kctlflags);
1032 if (so == NULL) {
0a7de745 1033 return EINVAL;
3e170ce0 1034 }
fe8ab488 1035
3e170ce0 1036 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
91447636 1037 error = ENOBUFS;
fe8ab488 1038 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
91447636
A
1039 goto bye;
1040 }
0a7de745 1041 if ((flags & CTL_DATA_EOR)) {
91447636 1042 m->m_flags |= M_EOR;
0a7de745 1043 }
fe8ab488
A
1044
1045 so_recv_data_stat(so, m, 0);
bca245ac 1046 if (sbappend_nodrop(&so->so_rcv, m) != 0) {
0a7de745 1047 if ((flags & CTL_DATA_NOWAKEUP) == 0) {
fe8ab488 1048 sorwakeup(so);
0a7de745 1049 }
fe8ab488
A
1050 } else {
1051 error = ENOBUFS;
1052 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1053 }
91447636 1054bye:
0a7de745 1055 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
fe8ab488 1056 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
0a7de745
A
1057 __func__, error, len,
1058 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1059 }
fe8ab488 1060
91447636 1061 socket_unlock(so, 1);
0a7de745 1062 if (error != 0) {
fe8ab488 1063 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
0a7de745 1064 }
fe8ab488 1065
0a7de745 1066 return error;
fe8ab488
A
1067}
1068
1069/*
1070 * Compute space occupied by mbuf like sbappendrecord
1071 */
1072static int
1073m_space(struct mbuf *m)
1074{
1075 int space = 0;
1076 struct mbuf *nxt;
1077
0a7de745 1078 for (nxt = m; nxt != NULL; nxt = nxt->m_next) {
fe8ab488 1079 space += nxt->m_len;
0a7de745 1080 }
fe8ab488 1081
0a7de745 1082 return space;
fe8ab488
A
1083}
1084
1085errno_t
1086ctl_enqueuembuf_list(void *kctlref, u_int32_t unit, struct mbuf *m_list,
0a7de745 1087 u_int32_t flags, struct mbuf **m_remain)
fe8ab488
A
1088{
1089 struct socket *so = NULL;
1090 errno_t error = 0;
fe8ab488
A
1091 struct mbuf *m, *nextpkt;
1092 int needwakeup = 0;
5ba3f43e 1093 int len = 0;
3e170ce0 1094 u_int32_t kctlflags;
fe8ab488
A
1095
1096 /*
1097 * Need to point the beginning of the list in case of early exit
1098 */
1099 m = m_list;
1100
3e170ce0
A
1101 /*
1102 * kcb_find_socket takes the socket lock with a reference
1103 */
1104 so = kcb_find_socket(kctlref, unit, &kctlflags);
1105 if (so == NULL) {
fe8ab488
A
1106 error = EINVAL;
1107 goto done;
1108 }
3e170ce0
A
1109
1110 if (kctlflags & CTL_FLAG_REG_SOCK_STREAM) {
fe8ab488
A
1111 error = EOPNOTSUPP;
1112 goto done;
1113 }
1114 if (flags & CTL_DATA_EOR) {
1115 error = EINVAL;
1116 goto done;
1117 }
fe8ab488
A
1118
1119 for (m = m_list; m != NULL; m = nextpkt) {
1120 nextpkt = m->m_nextpkt;
1121
0a7de745 1122 if (m->m_pkthdr.len == 0 && ctl_debug) {
fe8ab488 1123 printf("%s: %llx m_pkthdr.len is 0",
0a7de745
A
1124 __func__, (uint64_t)VM_KERNEL_ADDRPERM(m));
1125 }
fe8ab488
A
1126
1127 /*
1128 * The mbuf is either appended or freed by sbappendrecord()
1129 * so it's not reliable from a data standpoint
1130 */
1131 len = m_space(m);
3e170ce0 1132 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
fe8ab488
A
1133 error = ENOBUFS;
1134 OSIncrementAtomic64(
0a7de745 1135 (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
fe8ab488
A
1136 break;
1137 } else {
1138 /*
1139 * Unlink from the list, m is on its own
1140 */
1141 m->m_nextpkt = NULL;
1142 so_recv_data_stat(so, m, 0);
bca245ac 1143 if (sbappendrecord_nodrop(&so->so_rcv, m) != 0) {
fe8ab488
A
1144 needwakeup = 1;
1145 } else {
1146 /*
1147 * We free or return the remaining
1148 * mbufs in the list
1149 */
1150 m = nextpkt;
1151 error = ENOBUFS;
1152 OSIncrementAtomic64(
0a7de745 1153 (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
fe8ab488
A
1154 break;
1155 }
1156 }
1157 }
0a7de745 1158 if (needwakeup && (flags & CTL_DATA_NOWAKEUP) == 0) {
fe8ab488 1159 sorwakeup(so);
0a7de745 1160 }
fe8ab488
A
1161
1162done:
1163 if (so != NULL) {
0a7de745 1164 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
fe8ab488 1165 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
0a7de745
A
1166 __func__, error, len,
1167 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1168 }
fe8ab488
A
1169
1170 socket_unlock(so, 1);
1171 }
1172 if (m_remain) {
1173 *m_remain = m;
1174
1175 if (m != NULL && socket_debug && so != NULL &&
1176 (so->so_options & SO_DEBUG)) {
1177 struct mbuf *n;
1178
1179 printf("%s m_list %llx\n", __func__,
1180 (uint64_t) VM_KERNEL_ADDRPERM(m_list));
0a7de745 1181 for (n = m; n != NULL; n = n->m_nextpkt) {
fe8ab488
A
1182 printf(" remain %llx m_next %llx\n",
1183 (uint64_t) VM_KERNEL_ADDRPERM(n),
1184 (uint64_t) VM_KERNEL_ADDRPERM(n->m_next));
0a7de745 1185 }
fe8ab488
A
1186 }
1187 } else {
0a7de745 1188 if (m != NULL) {
fe8ab488 1189 m_freem_list(m);
0a7de745 1190 }
fe8ab488 1191 }
0a7de745 1192 if (error != 0) {
fe8ab488 1193 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
0a7de745
A
1194 }
1195 return error;
91447636 1196}
9bccf70c 1197
91447636 1198errno_t
fe8ab488
A
1199ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len,
1200 u_int32_t flags)
91447636 1201{
0a7de745
A
1202 struct socket *so;
1203 struct mbuf *m;
1204 errno_t error = 0;
1205 unsigned int num_needed;
1206 struct mbuf *n;
1207 size_t curlen = 0;
1208 u_int32_t kctlflags;
fe8ab488 1209
3e170ce0
A
1210 so = kcb_find_socket(kctlref, unit, &kctlflags);
1211 if (so == NULL) {
0a7de745 1212 return EINVAL;
3e170ce0 1213 }
fe8ab488 1214
3e170ce0 1215 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
91447636 1216 error = ENOBUFS;
fe8ab488 1217 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
91447636
A
1218 goto bye;
1219 }
1220
1221 num_needed = 1;
1222 m = m_allocpacket_internal(&num_needed, len, NULL, M_NOWAIT, 1, 0);
1223 if (m == NULL) {
39037602 1224 kctlstat.kcs_enqdata_mb_alloc_fail++;
0a7de745 1225 if (ctl_debug) {
39037602
A
1226 printf("%s: m_allocpacket_internal(%lu) failed\n",
1227 __func__, len);
0a7de745 1228 }
fe8ab488 1229 error = ENOMEM;
91447636
A
1230 goto bye;
1231 }
fe8ab488 1232
91447636
A
1233 for (n = m; n != NULL; n = n->m_next) {
1234 size_t mlen = mbuf_maxlen(n);
fe8ab488 1235
0a7de745 1236 if (mlen + curlen > len) {
91447636 1237 mlen = len - curlen;
0a7de745 1238 }
f427ee49 1239 n->m_len = (int32_t)mlen;
91447636
A
1240 bcopy((char *)data + curlen, n->m_data, mlen);
1241 curlen += mlen;
1242 }
1243 mbuf_pkthdr_setlen(m, curlen);
1244
0a7de745 1245 if ((flags & CTL_DATA_EOR)) {
91447636 1246 m->m_flags |= M_EOR;
0a7de745 1247 }
fe8ab488 1248 so_recv_data_stat(so, m, 0);
bca245ac
A
1249 /*
1250 * No need to call the "nodrop" variant of sbappend
1251 * because the mbuf is local to the scope of the function
1252 */
fe8ab488 1253 if (sbappend(&so->so_rcv, m) != 0) {
0a7de745 1254 if ((flags & CTL_DATA_NOWAKEUP) == 0) {
fe8ab488 1255 sorwakeup(so);
0a7de745 1256 }
fe8ab488 1257 } else {
39037602 1258 kctlstat.kcs_enqdata_sbappend_fail++;
fe8ab488
A
1259 error = ENOBUFS;
1260 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1261 }
1262
91447636 1263bye:
0a7de745 1264 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
fe8ab488 1265 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
0a7de745
A
1266 __func__, error, (int)len,
1267 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1268 }
fe8ab488 1269
91447636 1270 socket_unlock(so, 1);
0a7de745 1271 if (error != 0) {
fe8ab488 1272 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
0a7de745
A
1273 }
1274 return error;
91447636 1275}
9bccf70c 1276
3e170ce0
A
1277errno_t
1278ctl_getenqueuepacketcount(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *pcnt)
1279{
0a7de745 1280 struct socket *so;
3e170ce0
A
1281 u_int32_t cnt;
1282 struct mbuf *m1;
1283
0a7de745
A
1284 if (pcnt == NULL) {
1285 return EINVAL;
1286 }
3e170ce0
A
1287
1288 so = kcb_find_socket(kctlref, unit, NULL);
1289 if (so == NULL) {
0a7de745 1290 return EINVAL;
3e170ce0
A
1291 }
1292
1293 cnt = 0;
1294 m1 = so->so_rcv.sb_mb;
1295 while (m1 != NULL) {
1296 if (m1->m_type == MT_DATA ||
1297 m1->m_type == MT_HEADER ||
0a7de745 1298 m1->m_type == MT_OOBDATA) {
3e170ce0 1299 cnt += 1;
0a7de745 1300 }
3e170ce0
A
1301 m1 = m1->m_nextpkt;
1302 }
1303 *pcnt = cnt;
1304
1305 socket_unlock(so, 1);
1306
0a7de745 1307 return 0;
3e170ce0 1308}
55e303ae 1309
fe8ab488 1310errno_t
91447636
A
1311ctl_getenqueuespace(kern_ctl_ref kctlref, u_int32_t unit, size_t *space)
1312{
0a7de745 1313 struct socket *so;
2d21ac55 1314 long avail;
fe8ab488 1315
0a7de745
A
1316 if (space == NULL) {
1317 return EINVAL;
1318 }
fe8ab488 1319
3e170ce0
A
1320 so = kcb_find_socket(kctlref, unit, NULL);
1321 if (so == NULL) {
0a7de745 1322 return EINVAL;
3e170ce0 1323 }
fe8ab488 1324
2d21ac55
A
1325 avail = sbspace(&so->so_rcv);
1326 *space = (avail < 0) ? 0 : avail;
91447636 1327 socket_unlock(so, 1);
fe8ab488 1328
0a7de745 1329 return 0;
fe8ab488
A
1330}
1331
1332errno_t
1333ctl_getenqueuereadable(kern_ctl_ref kctlref, u_int32_t unit,
1334 u_int32_t *difference)
1335{
0a7de745 1336 struct socket *so;
fe8ab488 1337
0a7de745
A
1338 if (difference == NULL) {
1339 return EINVAL;
1340 }
fe8ab488 1341
3e170ce0
A
1342 so = kcb_find_socket(kctlref, unit, NULL);
1343 if (so == NULL) {
0a7de745 1344 return EINVAL;
3e170ce0 1345 }
fe8ab488
A
1346
1347 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat) {
1348 *difference = 0;
1349 } else {
1350 *difference = (so->so_rcv.sb_lowat - so->so_rcv.sb_cc);
1351 }
1352 socket_unlock(so, 1);
1353
0a7de745 1354 return 0;
9bccf70c
A
1355}
1356
91447636 1357static int
9bccf70c
A
1358ctl_ctloutput(struct socket *so, struct sockopt *sopt)
1359{
0a7de745
A
1360 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
1361 struct kctl *kctl;
1362 int error = 0;
1363 void *data = NULL;
1364 size_t len;
fe8ab488 1365
91447636 1366 if (sopt->sopt_level != SYSPROTO_CONTROL) {
0a7de745 1367 return EINVAL;
91447636 1368 }
fe8ab488 1369
0a7de745
A
1370 if (kcb == NULL) { /* sanity check */
1371 return ENOTCONN;
1372 }
fe8ab488 1373
0a7de745
A
1374 if ((kctl = kcb->kctl) == NULL) {
1375 return EINVAL;
1376 }
fe8ab488 1377
94ff46dc
A
1378 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
1379 ctl_kcb_increment_use_count(kcb, mtx_held);
1380
91447636 1381 switch (sopt->sopt_dir) {
0a7de745
A
1382 case SOPT_SET:
1383 if (kctl->setopt == NULL) {
94ff46dc
A
1384 error = ENOTSUP;
1385 goto out;
0a7de745
A
1386 }
1387 if (sopt->sopt_valsize != 0) {
1388 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP,
1389 M_WAITOK | M_ZERO);
1390 if (data == NULL) {
94ff46dc
A
1391 error = ENOMEM;
1392 goto out;
91447636 1393 }
0a7de745
A
1394 error = sooptcopyin(sopt, data,
1395 sopt->sopt_valsize, sopt->sopt_valsize);
1396 }
1397 if (error == 0) {
1398 socket_unlock(so, 0);
1399 error = (*kctl->setopt)(kctl->kctlref,
1400 kcb->sac.sc_unit, kcb->userdata, sopt->sopt_name,
1401 data, sopt->sopt_valsize);
1402 socket_lock(so, 0);
1403 }
5c9f4661 1404
0a7de745
A
1405 if (data != NULL) {
1406 FREE(data, M_TEMP);
1407 }
1408 break;
fe8ab488 1409
0a7de745
A
1410 case SOPT_GET:
1411 if (kctl->getopt == NULL) {
94ff46dc
A
1412 error = ENOTSUP;
1413 goto out;
0a7de745 1414 }
5c9f4661 1415
0a7de745
A
1416 if (sopt->sopt_valsize && sopt->sopt_val) {
1417 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP,
1418 M_WAITOK | M_ZERO);
1419 if (data == NULL) {
94ff46dc
A
1420 error = ENOMEM;
1421 goto out;
91447636 1422 }
0a7de745
A
1423 /*
1424 * 4108337 - copy user data in case the
1425 * kernel control needs it
1426 */
1427 error = sooptcopyin(sopt, data,
1428 sopt->sopt_valsize, sopt->sopt_valsize);
1429 }
5c9f4661 1430
0a7de745
A
1431 if (error == 0) {
1432 len = sopt->sopt_valsize;
1433 socket_unlock(so, 0);
1434 error = (*kctl->getopt)(kctl->kctlref, kcb->sac.sc_unit,
1435 kcb->userdata, sopt->sopt_name,
1436 data, &len);
1437 if (data != NULL && len > sopt->sopt_valsize) {
1438 panic_plain("ctl_ctloutput: ctl %s returned "
1439 "len (%lu) > sopt_valsize (%lu)\n",
1440 kcb->kctl->name, len,
1441 sopt->sopt_valsize);
1442 }
1443 socket_lock(so, 0);
91447636 1444 if (error == 0) {
0a7de745
A
1445 if (data != NULL) {
1446 error = sooptcopyout(sopt, data, len);
1447 } else {
1448 sopt->sopt_valsize = len;
5c9f4661 1449 }
91447636 1450 }
0a7de745
A
1451 }
1452 if (data != NULL) {
1453 FREE(data, M_TEMP);
1454 }
1455 break;
91447636 1456 }
94ff46dc
A
1457
1458out:
ea3f0419 1459 ctl_kcb_decrement_use_count(kcb);
0a7de745 1460 return error;
91447636 1461}
9bccf70c 1462
fe8ab488
A
1463static int
1464ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
0a7de745 1465 struct ifnet *ifp, struct proc *p)
91447636 1466{
fe8ab488 1467#pragma unused(so, ifp, p)
0a7de745 1468 int error = ENOTSUP;
fe8ab488 1469
91447636 1470 switch (cmd) {
0a7de745
A
1471 /* get the number of controllers */
1472 case CTLIOCGCOUNT: {
1473 struct kctl *kctl;
1474 u_int32_t n = 0;
91447636 1475
0a7de745
A
1476 lck_mtx_lock(ctl_mtx);
1477 TAILQ_FOREACH(kctl, &ctl_head, next)
1478 n++;
1479 lck_mtx_unlock(ctl_mtx);
fe8ab488 1480
0a7de745
A
1481 bcopy(&n, data, sizeof(n));
1482 error = 0;
1483 break;
1484 }
1485 case CTLIOCGINFO: {
1486 struct ctl_info ctl_info;
1487 struct kctl *kctl = 0;
1488 size_t name_len;
316670eb 1489
0a7de745
A
1490 bcopy(data, &ctl_info, sizeof(ctl_info));
1491 name_len = strnlen(ctl_info.ctl_name, MAX_KCTL_NAME);
316670eb 1492
0a7de745
A
1493 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
1494 error = EINVAL;
91447636
A
1495 break;
1496 }
0a7de745
A
1497 lck_mtx_lock(ctl_mtx);
1498 kctl = ctl_find_by_name(ctl_info.ctl_name);
1499 lck_mtx_unlock(ctl_mtx);
1500 if (kctl == 0) {
1501 error = ENOENT;
1502 break;
1503 }
1504 ctl_info.ctl_id = kctl->id;
1505 bcopy(&ctl_info, data, sizeof(ctl_info));
1506 error = 0;
1507 break;
1508 }
fe8ab488 1509
91447636 1510 /* add controls to get list of NKEs */
91447636 1511 }
fe8ab488 1512
0a7de745 1513 return error;
91447636 1514}
9bccf70c 1515
3e170ce0
A
1516static void
1517kctl_tbl_grow()
1518{
1519 struct kctl **new_table;
1520 uintptr_t new_size;
1521
1522 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1523
39037602 1524 if (kctl_tbl_growing) {
3e170ce0 1525 /* Another thread is allocating */
39037602
A
1526 kctl_tbl_growing_waiting++;
1527
1528 do {
1529 (void) msleep((caddr_t) &kctl_tbl_growing, ctl_mtx,
0a7de745 1530 PSOCK | PCATCH, "kctl_tbl_growing", 0);
39037602
A
1531 } while (kctl_tbl_growing);
1532 kctl_tbl_growing_waiting--;
3e170ce0
A
1533 }
1534 /* Another thread grew the table */
0a7de745 1535 if (kctl_table != NULL && kctl_tbl_count < kctl_tbl_size) {
3e170ce0 1536 return;
0a7de745 1537 }
3e170ce0
A
1538
1539 /* Verify we have a sane size */
1540 if (kctl_tbl_size + KCTL_TBL_INC >= UINT16_MAX) {
39037602 1541 kctlstat.kcs_tbl_size_too_big++;
0a7de745 1542 if (ctl_debug) {
39037602
A
1543 printf("%s kctl_tbl_size %lu too big\n",
1544 __func__, kctl_tbl_size);
0a7de745 1545 }
3e170ce0
A
1546 return;
1547 }
1548 kctl_tbl_growing = 1;
1549
1550 new_size = kctl_tbl_size + KCTL_TBL_INC;
1551
1552 lck_mtx_unlock(ctl_mtx);
1553 new_table = _MALLOC(sizeof(struct kctl *) * new_size,
1554 M_TEMP, M_WAIT | M_ZERO);
1555 lck_mtx_lock(ctl_mtx);
1556
1557 if (new_table != NULL) {
1558 if (kctl_table != NULL) {
1559 bcopy(kctl_table, new_table,
1560 kctl_tbl_size * sizeof(struct kctl *));
1561
1562 _FREE(kctl_table, M_TEMP);
1563 }
1564 kctl_table = new_table;
1565 kctl_tbl_size = new_size;
1566 }
1567
1568 kctl_tbl_growing = 0;
39037602
A
1569
1570 if (kctl_tbl_growing_waiting) {
1571 wakeup(&kctl_tbl_growing);
1572 }
3e170ce0
A
1573}
1574
1575#define KCTLREF_INDEX_MASK 0x0000FFFF
1576#define KCTLREF_GENCNT_MASK 0xFFFF0000
1577#define KCTLREF_GENCNT_SHIFT 16
1578
1579static kern_ctl_ref
1580kctl_make_ref(struct kctl *kctl)
1581{
1582 uintptr_t i;
1583
1584 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1585
0a7de745 1586 if (kctl_tbl_count >= kctl_tbl_size) {
3e170ce0 1587 kctl_tbl_grow();
0a7de745 1588 }
3e170ce0
A
1589
1590 kctl->kctlref = NULL;
1591 for (i = 0; i < kctl_tbl_size; i++) {
1592 if (kctl_table[i] == NULL) {
1593 uintptr_t ref;
1594
1595 /*
1596 * Reference is index plus one
1597 */
1598 kctl_ref_gencnt += 1;
1599
1600 /*
1601 * Add generation count as salt to reference to prevent
1602 * use after deregister
1603 */
0a7de745 1604 ref = ((kctl_ref_gencnt << KCTLREF_GENCNT_SHIFT) &
3e170ce0
A
1605 KCTLREF_GENCNT_MASK) +
1606 ((i + 1) & KCTLREF_INDEX_MASK);
1607
1608 kctl->kctlref = (void *)(ref);
1609 kctl_table[i] = kctl;
1610 kctl_tbl_count++;
1611 break;
1612 }
1613 }
1614
0a7de745 1615 if (kctl->kctlref == NULL) {
3e170ce0 1616 panic("%s no space in table", __func__);
0a7de745 1617 }
3e170ce0 1618
0a7de745 1619 if (ctl_debug > 0) {
3e170ce0 1620 printf("%s %p for %p\n",
0a7de745
A
1621 __func__, kctl->kctlref, kctl);
1622 }
3e170ce0 1623
0a7de745 1624 return kctl->kctlref;
3e170ce0
A
1625}
1626
1627static void
1628kctl_delete_ref(kern_ctl_ref kctlref)
1629{
1630 /*
1631 * Reference is index plus one
1632 */
1633 uintptr_t i = (((uintptr_t)kctlref) & KCTLREF_INDEX_MASK) - 1;
1634
1635 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1636
1637 if (i < kctl_tbl_size) {
1638 struct kctl *kctl = kctl_table[i];
1639
1640 if (kctl->kctlref == kctlref) {
1641 kctl_table[i] = NULL;
1642 kctl_tbl_count--;
1643 } else {
1644 kctlstat.kcs_bad_kctlref++;
1645 }
1646 } else {
1647 kctlstat.kcs_bad_kctlref++;
1648 }
1649}
1650
1651static struct kctl *
1652kctl_from_ref(kern_ctl_ref kctlref)
1653{
1654 /*
1655 * Reference is index plus one
1656 */
1657 uintptr_t i = (((uintptr_t)kctlref) & KCTLREF_INDEX_MASK) - 1;
1658 struct kctl *kctl = NULL;
1659
1660 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1661
1662 if (i >= kctl_tbl_size) {
1663 kctlstat.kcs_bad_kctlref++;
0a7de745 1664 return NULL;
3e170ce0
A
1665 }
1666 kctl = kctl_table[i];
1667 if (kctl->kctlref != kctlref) {
1668 kctlstat.kcs_bad_kctlref++;
0a7de745 1669 return NULL;
3e170ce0 1670 }
0a7de745 1671 return kctl;
3e170ce0
A
1672}
1673
91447636
A
1674/*
1675 * Register/unregister a NKE
1676 */
1677errno_t
1678ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref)
2d21ac55 1679{
0a7de745
A
1680 struct kctl *kctl = NULL;
1681 struct kctl *kctl_next = NULL;
1682 u_int32_t id = 1;
1683 size_t name_len;
1684 int is_extended = 0;
f427ee49 1685 int is_setup = 0;
0a7de745
A
1686
1687 if (userkctl == NULL) { /* sanity check */
1688 return EINVAL;
1689 }
1690 if (userkctl->ctl_connect == NULL) {
1691 return EINVAL;
1692 }
91447636 1693 name_len = strlen(userkctl->ctl_name);
0a7de745
A
1694 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
1695 return EINVAL;
1696 }
fe8ab488 1697
91447636 1698 MALLOC(kctl, struct kctl *, sizeof(*kctl), M_TEMP, M_WAITOK);
0a7de745
A
1699 if (kctl == NULL) {
1700 return ENOMEM;
1701 }
91447636 1702 bzero((char *)kctl, sizeof(*kctl));
fe8ab488 1703
91447636 1704 lck_mtx_lock(ctl_mtx);
fe8ab488 1705
3e170ce0
A
1706 if (kctl_make_ref(kctl) == NULL) {
1707 lck_mtx_unlock(ctl_mtx);
1708 FREE(kctl, M_TEMP);
0a7de745 1709 return ENOMEM;
3e170ce0
A
1710 }
1711
2d21ac55
A
1712 /*
1713 * Kernel Control IDs
1714 *
1715 * CTL_FLAG_REG_ID_UNIT indicates the control ID and unit number are
1716 * static. If they do not exist, add them to the list in order. If the
1717 * flag is not set, we must find a new unique value. We assume the
1718 * list is in order. We find the last item in the list and add one. If
1719 * this leads to wrapping the id around, we start at the front of the
1720 * list and look for a gap.
1721 */
fe8ab488 1722
2d21ac55
A
1723 if ((userkctl->ctl_flags & CTL_FLAG_REG_ID_UNIT) == 0) {
1724 /* Must dynamically assign an unused ID */
fe8ab488 1725
2d21ac55 1726 /* Verify the same name isn't already registered */
91447636 1727 if (ctl_find_by_name(userkctl->ctl_name) != NULL) {
3e170ce0 1728 kctl_delete_ref(kctl->kctlref);
91447636
A
1729 lck_mtx_unlock(ctl_mtx);
1730 FREE(kctl, M_TEMP);
0a7de745 1731 return EEXIST;
91447636 1732 }
fe8ab488 1733
2d21ac55
A
1734 /* Start with 1 in case the list is empty */
1735 id = 1;
1736 kctl_next = TAILQ_LAST(&ctl_head, kctl_list);
fe8ab488 1737
2d21ac55 1738 if (kctl_next != NULL) {
fe8ab488 1739 /* List was not empty, add one to the last item */
2d21ac55
A
1740 id = kctl_next->id + 1;
1741 kctl_next = NULL;
fe8ab488 1742
2d21ac55 1743 /*
fe8ab488
A
1744 * If this wrapped the id number, start looking at
1745 * the front of the list for an unused id.
2d21ac55 1746 */
91447636 1747 if (id == 0) {
2d21ac55
A
1748 /* Find the next unused ID */
1749 id = 1;
fe8ab488 1750
2d21ac55
A
1751 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
1752 if (kctl_next->id > id) {
1753 /* We found a gap */
1754 break;
1755 }
fe8ab488 1756
2d21ac55
A
1757 id = kctl_next->id + 1;
1758 }
91447636 1759 }
91447636 1760 }
fe8ab488 1761
2d21ac55 1762 userkctl->ctl_id = id;
91447636
A
1763 kctl->id = id;
1764 kctl->reg_unit = -1;
1765 } else {
2d21ac55 1766 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
0a7de745 1767 if (kctl_next->id > userkctl->ctl_id) {
2d21ac55 1768 break;
0a7de745 1769 }
2d21ac55 1770 }
fe8ab488
A
1771
1772 if (ctl_find_by_id_unit(userkctl->ctl_id, userkctl->ctl_unit)) {
3e170ce0 1773 kctl_delete_ref(kctl->kctlref);
91447636
A
1774 lck_mtx_unlock(ctl_mtx);
1775 FREE(kctl, M_TEMP);
0a7de745 1776 return EEXIST;
91447636
A
1777 }
1778 kctl->id = userkctl->ctl_id;
1779 kctl->reg_unit = userkctl->ctl_unit;
1780 }
39236c6e
A
1781
1782 is_extended = (userkctl->ctl_flags & CTL_FLAG_REG_EXTENDED);
f427ee49 1783 is_setup = (userkctl->ctl_flags & CTL_FLAG_REG_SETUP);
39236c6e 1784
2d21ac55 1785 strlcpy(kctl->name, userkctl->ctl_name, MAX_KCTL_NAME);
91447636
A
1786 kctl->flags = userkctl->ctl_flags;
1787
fe8ab488
A
1788 /*
1789 * Let the caller know the default send and receive sizes
fe8ab488 1790 */
04b8595b 1791 if (userkctl->ctl_sendsize == 0) {
fe8ab488 1792 kctl->sendbufsize = CTL_SENDSIZE;
04b8595b
A
1793 userkctl->ctl_sendsize = kctl->sendbufsize;
1794 } else {
1795 kctl->sendbufsize = userkctl->ctl_sendsize;
1796 }
1797 if (userkctl->ctl_recvsize == 0) {
fe8ab488 1798 kctl->recvbufsize = CTL_RECVSIZE;
04b8595b
A
1799 userkctl->ctl_recvsize = kctl->recvbufsize;
1800 } else {
1801 kctl->recvbufsize = userkctl->ctl_recvsize;
1802 }
91447636 1803
f427ee49
A
1804 if (is_setup) {
1805 kctl->setup = userkctl->ctl_setup;
1806 }
5c9f4661 1807 kctl->bind = userkctl->ctl_bind;
91447636
A
1808 kctl->connect = userkctl->ctl_connect;
1809 kctl->disconnect = userkctl->ctl_disconnect;
1810 kctl->send = userkctl->ctl_send;
1811 kctl->setopt = userkctl->ctl_setopt;
1812 kctl->getopt = userkctl->ctl_getopt;
39236c6e
A
1813 if (is_extended) {
1814 kctl->rcvd = userkctl->ctl_rcvd;
fe8ab488 1815 kctl->send_list = userkctl->ctl_send_list;
39236c6e 1816 }
fe8ab488 1817
91447636 1818 TAILQ_INIT(&kctl->kcb_head);
fe8ab488 1819
0a7de745 1820 if (kctl_next) {
2d21ac55 1821 TAILQ_INSERT_BEFORE(kctl_next, kctl, next);
0a7de745 1822 } else {
2d21ac55 1823 TAILQ_INSERT_TAIL(&ctl_head, kctl, next);
0a7de745 1824 }
fe8ab488
A
1825
1826 kctlstat.kcs_reg_count++;
1827 kctlstat.kcs_gencnt++;
1828
91447636 1829 lck_mtx_unlock(ctl_mtx);
fe8ab488 1830
3e170ce0 1831 *kctlref = kctl->kctlref;
fe8ab488 1832
91447636 1833 ctl_post_msg(KEV_CTL_REGISTERED, kctl->id);
0a7de745 1834 return 0;
9bccf70c
A
1835}
1836
91447636
A
1837errno_t
1838ctl_deregister(void *kctlref)
fe8ab488 1839{
0a7de745 1840 struct kctl *kctl;
fe8ab488 1841
fe8ab488 1842 lck_mtx_lock(ctl_mtx);
3e170ce0
A
1843 if ((kctl = kctl_from_ref(kctlref)) == NULL) {
1844 kctlstat.kcs_bad_kctlref++;
fe8ab488 1845 lck_mtx_unlock(ctl_mtx);
0a7de745 1846 if (ctl_debug != 0) {
3e170ce0 1847 printf("%s invalid kctlref %p\n",
0a7de745
A
1848 __func__, kctlref);
1849 }
1850 return EINVAL;
fe8ab488 1851 }
3e170ce0 1852
91447636 1853 if (!TAILQ_EMPTY(&kctl->kcb_head)) {
fe8ab488 1854 lck_mtx_unlock(ctl_mtx);
0a7de745 1855 return EBUSY;
91447636
A
1856 }
1857
fe8ab488
A
1858 TAILQ_REMOVE(&ctl_head, kctl, next);
1859
1860 kctlstat.kcs_reg_count--;
1861 kctlstat.kcs_gencnt++;
91447636 1862
3e170ce0 1863 kctl_delete_ref(kctl->kctlref);
fe8ab488
A
1864 lck_mtx_unlock(ctl_mtx);
1865
1866 ctl_post_msg(KEV_CTL_DEREGISTERED, kctl->id);
1867 FREE(kctl, M_TEMP);
0a7de745 1868 return 0;
9bccf70c
A
1869}
1870
91447636
A
1871/*
1872 * Must be called with global ctl_mtx lock taked
1873 */
1874static struct kctl *
1875ctl_find_by_name(const char *name)
fe8ab488 1876{
0a7de745 1877 struct kctl *kctl;
fe8ab488
A
1878
1879 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
9bccf70c 1880
fe8ab488 1881 TAILQ_FOREACH(kctl, &ctl_head, next)
0a7de745
A
1882 if (strncmp(kctl->name, name, sizeof(kctl->name)) == 0) {
1883 return kctl;
1884 }
9bccf70c 1885
0a7de745 1886 return NULL;
91447636 1887}
9bccf70c 1888
6d2010ae
A
1889u_int32_t
1890ctl_id_by_name(const char *name)
1891{
0a7de745
A
1892 u_int32_t ctl_id = 0;
1893 struct kctl *kctl;
fe8ab488 1894
6d2010ae 1895 lck_mtx_lock(ctl_mtx);
fe8ab488 1896 kctl = ctl_find_by_name(name);
0a7de745 1897 if (kctl) {
fe8ab488 1898 ctl_id = kctl->id;
0a7de745 1899 }
6d2010ae 1900 lck_mtx_unlock(ctl_mtx);
fe8ab488 1901
0a7de745 1902 return ctl_id;
6d2010ae
A
1903}
1904
1905errno_t
fe8ab488 1906ctl_name_by_id(u_int32_t id, char *out_name, size_t maxsize)
6d2010ae 1907{
0a7de745 1908 int found = 0;
6d2010ae 1909 struct kctl *kctl;
fe8ab488
A
1910
1911 lck_mtx_lock(ctl_mtx);
1912 TAILQ_FOREACH(kctl, &ctl_head, next) {
0a7de745 1913 if (kctl->id == id) {
fe8ab488 1914 break;
0a7de745 1915 }
fe8ab488
A
1916 }
1917
3e170ce0 1918 if (kctl) {
0a7de745 1919 if (maxsize > MAX_KCTL_NAME) {
fe8ab488 1920 maxsize = MAX_KCTL_NAME;
0a7de745 1921 }
fe8ab488
A
1922 strlcpy(out_name, kctl->name, maxsize);
1923 found = 1;
1924 }
6d2010ae 1925 lck_mtx_unlock(ctl_mtx);
fe8ab488 1926
0a7de745 1927 return found ? 0 : ENOENT;
6d2010ae
A
1928}
1929
91447636
A
1930/*
1931 * Must be called with global ctl_mtx lock taked
1932 *
1933 */
1934static struct kctl *
1935ctl_find_by_id_unit(u_int32_t id, u_int32_t unit)
fe8ab488 1936{
0a7de745 1937 struct kctl *kctl;
fe8ab488
A
1938
1939 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1940
1941 TAILQ_FOREACH(kctl, &ctl_head, next) {
0a7de745
A
1942 if (kctl->id == id && (kctl->flags & CTL_FLAG_REG_ID_UNIT) == 0) {
1943 return kctl;
1944 } else if (kctl->id == id && kctl->reg_unit == unit) {
1945 return kctl;
1946 }
fe8ab488 1947 }
0a7de745 1948 return NULL;
9bccf70c
A
1949}
1950
1951/*
91447636 1952 * Must be called with kernel controller lock taken
9bccf70c 1953 */
91447636
A
1954static struct ctl_cb *
1955kcb_find(struct kctl *kctl, u_int32_t unit)
fe8ab488 1956{
0a7de745 1957 struct ctl_cb *kcb;
9bccf70c 1958
fe8ab488 1959 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
9bccf70c 1960
fe8ab488 1961 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
0a7de745
A
1962 if (kcb->sac.sc_unit == unit) {
1963 return kcb;
1964 }
fe8ab488 1965
0a7de745 1966 return NULL;
9bccf70c
A
1967}
1968
6d2010ae 1969static struct socket *
3e170ce0 1970kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *kctlflags)
6d2010ae
A
1971{
1972 struct socket *so = NULL;
0a7de745 1973 struct ctl_cb *kcb;
fe8ab488 1974 void *lr_saved;
3e170ce0
A
1975 struct kctl *kctl;
1976 int i;
fe8ab488
A
1977
1978 lr_saved = __builtin_return_address(0);
1979
6d2010ae 1980 lck_mtx_lock(ctl_mtx);
3e170ce0
A
1981 /*
1982 * First validate the kctlref
1983 */
1984 if ((kctl = kctl_from_ref(kctlref)) == NULL) {
1985 kctlstat.kcs_bad_kctlref++;
1986 lck_mtx_unlock(ctl_mtx);
0a7de745 1987 if (ctl_debug != 0) {
3e170ce0 1988 printf("%s invalid kctlref %p\n",
0a7de745
A
1989 __func__, kctlref);
1990 }
1991 return NULL;
6d2010ae 1992 }
fe8ab488 1993
3e170ce0
A
1994 kcb = kcb_find(kctl, unit);
1995 if (kcb == NULL || kcb->kctl != kctl || (so = kcb->so) == NULL) {
1996 lck_mtx_unlock(ctl_mtx);
0a7de745 1997 return NULL;
6d2010ae 1998 }
3e170ce0
A
1999 /*
2000 * This prevents the socket from being closed
2001 */
2002 kcb->usecount++;
2003 /*
2004 * Respect lock ordering: socket before ctl_mtx
2005 */
2006 lck_mtx_unlock(ctl_mtx);
fe8ab488 2007
6d2010ae 2008 socket_lock(so, 1);
3e170ce0
A
2009 /*
2010 * The socket lock history is more useful if we store
2011 * the address of the caller.
2012 */
2013 i = (so->next_lock_lr + SO_LCKDBG_MAX - 1) % SO_LCKDBG_MAX;
2014 so->lock_lr[i] = lr_saved;
fe8ab488 2015
6d2010ae 2016 lck_mtx_lock(ctl_mtx);
3e170ce0
A
2017
2018 if ((kctl = kctl_from_ref(kctlref)) == NULL || kcb->kctl == NULL) {
6d2010ae
A
2019 lck_mtx_unlock(ctl_mtx);
2020 socket_unlock(so, 1);
2021 so = NULL;
2022 lck_mtx_lock(ctl_mtx);
3e170ce0
A
2023 } else if (kctlflags != NULL) {
2024 *kctlflags = kctl->flags;
6d2010ae 2025 }
3e170ce0 2026
6d2010ae 2027 kcb->usecount--;
0a7de745 2028 if (kcb->usecount == 0) {
6d2010ae 2029 wakeup((event_t)&kcb->usecount);
0a7de745 2030 }
3e170ce0 2031
6d2010ae 2032 lck_mtx_unlock(ctl_mtx);
fe8ab488 2033
0a7de745 2034 return so;
6d2010ae
A
2035}
2036
fe8ab488
A
2037static void
2038ctl_post_msg(u_int32_t event_code, u_int32_t id)
9bccf70c 2039{
0a7de745
A
2040 struct ctl_event_data ctl_ev_data;
2041 struct kev_msg ev_msg;
fe8ab488
A
2042
2043 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_NOTOWNED);
2044
2045 bzero(&ev_msg, sizeof(struct kev_msg));
2046 ev_msg.vendor_code = KEV_VENDOR_APPLE;
2047
2048 ev_msg.kev_class = KEV_SYSTEM_CLASS;
2049 ev_msg.kev_subclass = KEV_CTL_SUBCLASS;
2050 ev_msg.event_code = event_code;
2051
2052 /* common nke subclass data */
2053 bzero(&ctl_ev_data, sizeof(ctl_ev_data));
2054 ctl_ev_data.ctl_id = id;
2055 ev_msg.dv[0].data_ptr = &ctl_ev_data;
2056 ev_msg.dv[0].data_length = sizeof(ctl_ev_data);
2057
2058 ev_msg.dv[1].data_length = 0;
2059
2060 kev_post_msg(&ev_msg);
9bccf70c
A
2061}
2062
91447636 2063static int
b0d623f7
A
2064ctl_lock(struct socket *so, int refcount, void *lr)
2065{
2066 void *lr_saved;
2067
0a7de745 2068 if (lr == NULL) {
b0d623f7 2069 lr_saved = __builtin_return_address(0);
0a7de745 2070 } else {
b0d623f7 2071 lr_saved = lr;
0a7de745 2072 }
b0d623f7
A
2073
2074 if (so->so_pcb != NULL) {
91447636 2075 lck_mtx_lock(((struct ctl_cb *)so->so_pcb)->mtx);
0a7de745 2076 } else {
fe8ab488 2077 panic("ctl_lock: so=%p NO PCB! lr=%p lrh= %s\n",
b0d623f7
A
2078 so, lr_saved, solockhistory_nr(so));
2079 /* NOTREACHED */
91447636 2080 }
b0d623f7
A
2081
2082 if (so->so_usecount < 0) {
2083 panic("ctl_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s\n",
0a7de745
A
2084 so, so->so_pcb, lr_saved, so->so_usecount,
2085 solockhistory_nr(so));
b0d623f7
A
2086 /* NOTREACHED */
2087 }
2088
0a7de745 2089 if (refcount) {
91447636 2090 so->so_usecount++;
0a7de745 2091 }
0c530ab8 2092
2d21ac55 2093 so->lock_lr[so->next_lock_lr] = lr_saved;
0a7de745
A
2094 so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX;
2095 return 0;
91447636
A
2096}
2097
2098static int
b0d623f7 2099ctl_unlock(struct socket *so, int refcount, void *lr)
91447636 2100{
b0d623f7
A
2101 void *lr_saved;
2102 lck_mtx_t *mutex_held;
2103
0a7de745 2104 if (lr == NULL) {
b0d623f7 2105 lr_saved = __builtin_return_address(0);
0a7de745 2106 } else {
b0d623f7 2107 lr_saved = lr;
0a7de745 2108 }
b0d623f7 2109
39037602 2110#if (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG))
fe8ab488
A
2111 printf("ctl_unlock: so=%llx sopcb=%x lock=%llx ref=%u lr=%llx\n",
2112 (uint64_t)VM_KERNEL_ADDRPERM(so),
2113 (uint64_t)VM_KERNEL_ADDRPERM(so->so_pcb,
2114 (uint64_t)VM_KERNEL_ADDRPERM(((struct ctl_cb *)so->so_pcb)->mtx),
2115 so->so_usecount, (uint64_t)VM_KERNEL_ADDRPERM(lr_saved));
39037602 2116#endif /* (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG)) */
0a7de745 2117 if (refcount) {
91447636 2118 so->so_usecount--;
0a7de745 2119 }
b0d623f7
A
2120
2121 if (so->so_usecount < 0) {
fe8ab488 2122 panic("ctl_unlock: so=%p usecount=%x lrh= %s\n",
b0d623f7
A
2123 so, so->so_usecount, solockhistory_nr(so));
2124 /* NOTREACHED */
2125 }
91447636 2126 if (so->so_pcb == NULL) {
fe8ab488 2127 panic("ctl_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s\n",
0a7de745
A
2128 so, so->so_usecount, (void *)lr_saved,
2129 solockhistory_nr(so));
b0d623f7 2130 /* NOTREACHED */
91447636 2131 }
b0d623f7
A
2132 mutex_held = ((struct ctl_cb *)so->so_pcb)->mtx;
2133
0a7de745
A
2134 lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
2135 so->unlock_lr[so->next_unlock_lr] = lr_saved;
2136 so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
2137 lck_mtx_unlock(mutex_held);
b0d623f7 2138
0a7de745 2139 if (so->so_usecount == 0) {
91447636 2140 ctl_sofreelastref(so);
0a7de745 2141 }
b0d623f7 2142
0a7de745 2143 return 0;
91447636
A
2144}
2145
2146static lck_mtx_t *
5ba3f43e 2147ctl_getlock(struct socket *so, int flags)
91447636 2148{
5ba3f43e 2149#pragma unused(flags)
0a7de745 2150 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
fe8ab488 2151
0a7de745
A
2152 if (so->so_pcb) {
2153 if (so->so_usecount < 0) {
2154 panic("ctl_getlock: so=%p usecount=%x lrh= %s\n",
2155 so, so->so_usecount, solockhistory_nr(so));
2156 }
2157 return kcb->mtx;
91447636 2158 } else {
0a7de745
A
2159 panic("ctl_getlock: so=%p NULL NO so_pcb %s\n",
2160 so, solockhistory_nr(so));
2161 return so->so_proto->pr_domain->dom_mtx;
91447636
A
2162 }
2163}
fe8ab488
A
2164
2165__private_extern__ int
2166kctl_reg_list SYSCTL_HANDLER_ARGS
2167{
2168#pragma unused(oidp, arg1, arg2)
0a7de745 2169 int error = 0;
f427ee49 2170 u_int64_t i, n;
0a7de745
A
2171 struct xsystmgen xsg;
2172 void *buf = NULL;
2173 struct kctl *kctl;
2174 size_t item_size = ROUNDUP64(sizeof(struct xkctl_reg));
2175
2176 buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO);
2177 if (buf == NULL) {
2178 return ENOMEM;
2179 }
2180
2181 lck_mtx_lock(ctl_mtx);
2182
2183 n = kctlstat.kcs_reg_count;
2184
2185 if (req->oldptr == USER_ADDR_NULL) {
f427ee49 2186 req->oldidx = (size_t)(n + n / 8) * sizeof(struct xkctl_reg);
0a7de745
A
2187 goto done;
2188 }
2189 if (req->newptr != USER_ADDR_NULL) {
2190 error = EPERM;
2191 goto done;
2192 }
2193 bzero(&xsg, sizeof(xsg));
2194 xsg.xg_len = sizeof(xsg);
2195 xsg.xg_count = n;
2196 xsg.xg_gen = kctlstat.kcs_gencnt;
2197 xsg.xg_sogen = so_gencnt;
2198 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2199 if (error) {
2200 goto done;
2201 }
2202 /*
2203 * We are done if there is no pcb
2204 */
2205 if (n == 0) {
2206 goto done;
2207 }
2208
0a7de745
A
2209 for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
2210 i < n && kctl != NULL;
2211 i++, kctl = TAILQ_NEXT(kctl, next)) {
2212 struct xkctl_reg *xkr = (struct xkctl_reg *)buf;
2213 struct ctl_cb *kcb;
2214 u_int32_t pcbcount = 0;
2215
2216 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
2217 pcbcount++;
2218
2219 bzero(buf, item_size);
2220
2221 xkr->xkr_len = sizeof(struct xkctl_reg);
2222 xkr->xkr_kind = XSO_KCREG;
2223 xkr->xkr_id = kctl->id;
2224 xkr->xkr_reg_unit = kctl->reg_unit;
2225 xkr->xkr_flags = kctl->flags;
2226 xkr->xkr_kctlref = (uint64_t)(kctl->kctlref);
2227 xkr->xkr_recvbufsize = kctl->recvbufsize;
2228 xkr->xkr_sendbufsize = kctl->sendbufsize;
2229 xkr->xkr_lastunit = kctl->lastunit;
2230 xkr->xkr_pcbcount = pcbcount;
2231 xkr->xkr_connect = (uint64_t)VM_KERNEL_UNSLIDE(kctl->connect);
2232 xkr->xkr_disconnect =
2233 (uint64_t)VM_KERNEL_UNSLIDE(kctl->disconnect);
2234 xkr->xkr_send = (uint64_t)VM_KERNEL_UNSLIDE(kctl->send);
2235 xkr->xkr_send_list =
2236 (uint64_t)VM_KERNEL_UNSLIDE(kctl->send_list);
2237 xkr->xkr_setopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->setopt);
2238 xkr->xkr_getopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->getopt);
2239 xkr->xkr_rcvd = (uint64_t)VM_KERNEL_UNSLIDE(kctl->rcvd);
2240 strlcpy(xkr->xkr_name, kctl->name, sizeof(xkr->xkr_name));
2241
2242 error = SYSCTL_OUT(req, buf, item_size);
2243 }
2244
2245 if (error == 0) {
2246 /*
2247 * Give the user an updated idea of our state.
2248 * If the generation differs from what we told
2249 * her before, she knows that something happened
2250 * while we were processing this request, and it
2251 * might be necessary to retry.
2252 */
2253 bzero(&xsg, sizeof(xsg));
2254 xsg.xg_len = sizeof(xsg);
2255 xsg.xg_count = n;
2256 xsg.xg_gen = kctlstat.kcs_gencnt;
2257 xsg.xg_sogen = so_gencnt;
2258 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2259 if (error) {
2260 goto done;
fe8ab488
A
2261 }
2262 }
2263
2264done:
0a7de745 2265 lck_mtx_unlock(ctl_mtx);
fe8ab488 2266
0a7de745
A
2267 if (buf != NULL) {
2268 FREE(buf, M_TEMP);
2269 }
fe8ab488 2270
0a7de745 2271 return error;
fe8ab488
A
2272}
2273
2274__private_extern__ int
2275kctl_pcblist SYSCTL_HANDLER_ARGS
2276{
2277#pragma unused(oidp, arg1, arg2)
0a7de745 2278 int error = 0;
f427ee49 2279 u_int64_t n, i;
0a7de745
A
2280 struct xsystmgen xsg;
2281 void *buf = NULL;
2282 struct kctl *kctl;
2283 size_t item_size = ROUNDUP64(sizeof(struct xkctlpcb)) +
2284 ROUNDUP64(sizeof(struct xsocket_n)) +
2285 2 * ROUNDUP64(sizeof(struct xsockbuf_n)) +
2286 ROUNDUP64(sizeof(struct xsockstat_n));
2287
2288 buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO);
2289 if (buf == NULL) {
2290 return ENOMEM;
2291 }
2292
2293 lck_mtx_lock(ctl_mtx);
2294
2295 n = kctlstat.kcs_pcbcount;
2296
2297 if (req->oldptr == USER_ADDR_NULL) {
f427ee49 2298 req->oldidx = (size_t)(n + n / 8) * item_size;
0a7de745
A
2299 goto done;
2300 }
2301 if (req->newptr != USER_ADDR_NULL) {
2302 error = EPERM;
2303 goto done;
2304 }
2305 bzero(&xsg, sizeof(xsg));
2306 xsg.xg_len = sizeof(xsg);
2307 xsg.xg_count = n;
2308 xsg.xg_gen = kctlstat.kcs_gencnt;
2309 xsg.xg_sogen = so_gencnt;
2310 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2311 if (error) {
2312 goto done;
2313 }
2314 /*
2315 * We are done if there is no pcb
2316 */
2317 if (n == 0) {
2318 goto done;
2319 }
2320
0a7de745
A
2321 for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
2322 i < n && kctl != NULL;
2323 kctl = TAILQ_NEXT(kctl, next)) {
2324 struct ctl_cb *kcb;
2325
2326 for (kcb = TAILQ_FIRST(&kctl->kcb_head);
2327 i < n && kcb != NULL;
2328 i++, kcb = TAILQ_NEXT(kcb, next)) {
2329 struct xkctlpcb *xk = (struct xkctlpcb *)buf;
2330 struct xsocket_n *xso = (struct xsocket_n *)
2331 ADVANCE64(xk, sizeof(*xk));
2332 struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *)
2333 ADVANCE64(xso, sizeof(*xso));
2334 struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *)
2335 ADVANCE64(xsbrcv, sizeof(*xsbrcv));
2336 struct xsockstat_n *xsostats = (struct xsockstat_n *)
2337 ADVANCE64(xsbsnd, sizeof(*xsbsnd));
2338
2339 bzero(buf, item_size);
2340
2341 xk->xkp_len = sizeof(struct xkctlpcb);
2342 xk->xkp_kind = XSO_KCB;
2343 xk->xkp_unit = kcb->sac.sc_unit;
2344 xk->xkp_kctpcb = (uint64_t)VM_KERNEL_ADDRPERM(kcb);
2345 xk->xkp_kctlref = (uint64_t)VM_KERNEL_ADDRPERM(kctl);
2346 xk->xkp_kctlid = kctl->id;
2347 strlcpy(xk->xkp_kctlname, kctl->name,
2348 sizeof(xk->xkp_kctlname));
2349
2350 sotoxsocket_n(kcb->so, xso);
2351 sbtoxsockbuf_n(kcb->so ?
2352 &kcb->so->so_rcv : NULL, xsbrcv);
2353 sbtoxsockbuf_n(kcb->so ?
2354 &kcb->so->so_snd : NULL, xsbsnd);
2355 sbtoxsockstat_n(kcb->so, xsostats);
2356
2357 error = SYSCTL_OUT(req, buf, item_size);
fe8ab488
A
2358 }
2359 }
2360
0a7de745
A
2361 if (error == 0) {
2362 /*
2363 * Give the user an updated idea of our state.
2364 * If the generation differs from what we told
2365 * her before, she knows that something happened
2366 * while we were processing this request, and it
2367 * might be necessary to retry.
2368 */
2369 bzero(&xsg, sizeof(xsg));
2370 xsg.xg_len = sizeof(xsg);
2371 xsg.xg_count = n;
2372 xsg.xg_gen = kctlstat.kcs_gencnt;
2373 xsg.xg_sogen = so_gencnt;
2374 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2375 if (error) {
2376 goto done;
fe8ab488
A
2377 }
2378 }
2379
2380done:
0a7de745 2381 lck_mtx_unlock(ctl_mtx);
fe8ab488 2382
0a7de745 2383 return error;
fe8ab488
A
2384}
2385
2386int
2387kctl_getstat SYSCTL_HANDLER_ARGS
2388{
2389#pragma unused(oidp, arg1, arg2)
0a7de745 2390 int error = 0;
fe8ab488 2391
0a7de745 2392 lck_mtx_lock(ctl_mtx);
fe8ab488 2393
0a7de745
A
2394 if (req->newptr != USER_ADDR_NULL) {
2395 error = EPERM;
2396 goto done;
fe8ab488 2397 }
0a7de745
A
2398 if (req->oldptr == USER_ADDR_NULL) {
2399 req->oldidx = sizeof(struct kctlstat);
2400 goto done;
fe8ab488
A
2401 }
2402
0a7de745
A
2403 error = SYSCTL_OUT(req, &kctlstat,
2404 MIN(sizeof(struct kctlstat), req->oldlen));
fe8ab488 2405done:
0a7de745
A
2406 lck_mtx_unlock(ctl_mtx);
2407 return error;
fe8ab488 2408}
3e170ce0
A
2409
2410void
2411kctl_fill_socketinfo(struct socket *so, struct socket_info *si)
2412{
0a7de745
A
2413 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
2414 struct kern_ctl_info *kcsi =
2415 &si->soi_proto.pri_kern_ctl;
2416 struct kctl *kctl = kcb->kctl;
3e170ce0 2417
0a7de745 2418 si->soi_kind = SOCKINFO_KERN_CTL;
3e170ce0 2419
0a7de745
A
2420 if (kctl == 0) {
2421 return;
2422 }
3e170ce0 2423
0a7de745
A
2424 kcsi->kcsi_id = kctl->id;
2425 kcsi->kcsi_reg_unit = kctl->reg_unit;
2426 kcsi->kcsi_flags = kctl->flags;
2427 kcsi->kcsi_recvbufsize = kctl->recvbufsize;
2428 kcsi->kcsi_sendbufsize = kctl->sendbufsize;
2429 kcsi->kcsi_unit = kcb->sac.sc_unit;
2430 strlcpy(kcsi->kcsi_name, kctl->name, MAX_KCTL_NAME);
3e170ce0 2431}