]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/kern_control.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / kern / kern_control.c
CommitLineData
9bccf70c 1/*
bca245ac 2 * Copyright (c) 1999-2020 Apple Inc. All rights reserved.
9bccf70c 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
fe8ab488 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
fe8ab488 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
fe8ab488 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
fe8ab488 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
9bccf70c 27 */
9bccf70c
A
28
29/*
91447636
A
30 * Kernel Control domain - allows control connections to
31 * and to read/write data.
9bccf70c 32 *
91447636 33 * Vincent Lubet, 040506
9bccf70c
A
34 * Christophe Allie, 010928
35 * Justin C. Walker, 990319
36 */
37
38#include <sys/types.h>
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/syslog.h>
42#include <sys/socket.h>
43#include <sys/socketvar.h>
44#include <sys/protosw.h>
45#include <sys/domain.h>
46#include <sys/malloc.h>
47#include <sys/mbuf.h>
9bccf70c
A
48#include <sys/sys_domain.h>
49#include <sys/kern_event.h>
50#include <sys/kern_control.h>
2d21ac55 51#include <sys/kauth.h>
fe8ab488 52#include <sys/sysctl.h>
3e170ce0 53#include <sys/proc_info.h>
9bccf70c
A
54#include <net/if_var.h>
55
56#include <mach/vm_types.h>
9bccf70c
A
57
58#include <kern/thread.h>
59
3e170ce0 60struct kctl {
0a7de745
A
61 TAILQ_ENTRY(kctl) next; /* controller chain */
62 kern_ctl_ref kctlref;
3e170ce0
A
63
64 /* controller information provided when registering */
0a7de745
A
65 char name[MAX_KCTL_NAME]; /* unique identifier */
66 u_int32_t id;
67 u_int32_t reg_unit;
3e170ce0
A
68
69 /* misc communication information */
0a7de745
A
70 u_int32_t flags; /* support flags */
71 u_int32_t recvbufsize; /* request more than the default buffer size */
72 u_int32_t sendbufsize; /* request more than the default buffer size */
3e170ce0
A
73
74 /* Dispatch functions */
f427ee49 75 ctl_setup_func setup; /* Setup contact */
0a7de745
A
76 ctl_bind_func bind; /* Prepare contact */
77 ctl_connect_func connect; /* Make contact */
78 ctl_disconnect_func disconnect; /* Break contact */
79 ctl_send_func send; /* Send data to nke */
80 ctl_send_list_func send_list; /* Send list of packets */
81 ctl_setopt_func setopt; /* set kctl configuration */
82 ctl_getopt_func getopt; /* get kctl configuration */
83 ctl_rcvd_func rcvd; /* Notify nke when client reads data */
84
85 TAILQ_HEAD(, ctl_cb) kcb_head;
86 u_int32_t lastunit;
3e170ce0
A
87};
88
4ba76501
A
89#if DEVELOPMENT || DEBUG
90enum ctl_status {
91 KCTL_DISCONNECTED = 0,
92 KCTL_CONNECTING = 1,
93 KCTL_CONNECTED = 2
94};
95#endif /* DEVELOPMENT || DEBUG */
96
3e170ce0 97struct ctl_cb {
0a7de745 98 TAILQ_ENTRY(ctl_cb) next; /* controller chain */
c3c9b80d 99 lck_mtx_t mtx;
0a7de745
A
100 struct socket *so; /* controlling socket */
101 struct kctl *kctl; /* back pointer to controller */
102 void *userdata;
103 struct sockaddr_ctl sac;
104 u_int32_t usecount;
94ff46dc 105 u_int32_t kcb_usecount;
ea3f0419 106 u_int32_t require_clearing_count;
4ba76501
A
107#if DEVELOPMENT || DEBUG
108 enum ctl_status status;
109#endif /* DEVELOPMENT || DEBUG */
3e170ce0
A
110};
111
fe8ab488 112#ifndef ROUNDUP64
0a7de745 113#define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
fe8ab488
A
114#endif
115
116#ifndef ADVANCE64
0a7de745 117#define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
fe8ab488
A
118#endif
119
9bccf70c
A
120/*
121 * Definitions and vars for we support
122 */
123
0a7de745
A
124#define CTL_SENDSIZE (2 * 1024) /* default buffer size */
125#define CTL_RECVSIZE (8 * 1024) /* default buffer size */
9bccf70c
A
126
127/*
91447636
A
128 * Definitions and vars for we support
129 */
9bccf70c 130
f427ee49 131const u_int32_t ctl_maxunit = 65536;
c3c9b80d
A
132static LCK_ATTR_DECLARE(ctl_lck_attr, 0, 0);
133static LCK_GRP_DECLARE(ctl_lck_grp, "Kernel Control Protocol");
134static LCK_MTX_DECLARE_ATTR(ctl_mtx, &ctl_lck_grp, &ctl_lck_attr);
9bccf70c
A
135
136/* all the controllers are chained */
c3c9b80d 137TAILQ_HEAD(kctl_list, kctl) ctl_head = TAILQ_HEAD_INITIALIZER(ctl_head);
91447636
A
138
139static int ctl_attach(struct socket *, int, struct proc *);
140static int ctl_detach(struct socket *);
141static int ctl_sofreelastref(struct socket *so);
5c9f4661 142static int ctl_bind(struct socket *, struct sockaddr *, struct proc *);
91447636
A
143static int ctl_connect(struct socket *, struct sockaddr *, struct proc *);
144static int ctl_disconnect(struct socket *);
145static int ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
0a7de745 146 struct ifnet *ifp, struct proc *p);
91447636 147static int ctl_send(struct socket *, int, struct mbuf *,
0a7de745 148 struct sockaddr *, struct mbuf *, struct proc *);
fe8ab488 149static int ctl_send_list(struct socket *, int, struct mbuf *,
0a7de745 150 struct sockaddr *, struct mbuf *, struct proc *);
91447636
A
151static int ctl_ctloutput(struct socket *, struct sockopt *);
152static int ctl_peeraddr(struct socket *so, struct sockaddr **nam);
39236c6e 153static int ctl_usr_rcvd(struct socket *so, int flags);
91447636 154
91447636
A
155static struct kctl *ctl_find_by_name(const char *);
156static struct kctl *ctl_find_by_id_unit(u_int32_t id, u_int32_t unit);
9bccf70c 157
3e170ce0 158static struct socket *kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit,
0a7de745 159 u_int32_t *);
91447636 160static struct ctl_cb *kcb_find(struct kctl *, u_int32_t unit);
b0d623f7 161static void ctl_post_msg(u_int32_t event_code, u_int32_t id);
9bccf70c 162
b0d623f7
A
163static int ctl_lock(struct socket *, int, void *);
164static int ctl_unlock(struct socket *, int, void *);
91447636 165static lck_mtx_t * ctl_getlock(struct socket *, int);
9bccf70c 166
39236c6e 167static struct pr_usrreqs ctl_usrreqs = {
0a7de745
A
168 .pru_attach = ctl_attach,
169 .pru_bind = ctl_bind,
170 .pru_connect = ctl_connect,
171 .pru_control = ctl_ioctl,
172 .pru_detach = ctl_detach,
173 .pru_disconnect = ctl_disconnect,
174 .pru_peeraddr = ctl_peeraddr,
175 .pru_rcvd = ctl_usr_rcvd,
176 .pru_send = ctl_send,
177 .pru_send_list = ctl_send_list,
178 .pru_sosend = sosend,
179 .pru_sosend_list = sosend_list,
180 .pru_soreceive = soreceive,
181 .pru_soreceive_list = soreceive_list,
91447636
A
182};
183
39236c6e 184static struct protosw kctlsw[] = {
0a7de745
A
185 {
186 .pr_type = SOCK_DGRAM,
187 .pr_protocol = SYSPROTO_CONTROL,
188 .pr_flags = PR_ATOMIC | PR_CONNREQUIRED | PR_PCBLOCK | PR_WANTRCVD,
189 .pr_ctloutput = ctl_ctloutput,
190 .pr_usrreqs = &ctl_usrreqs,
191 .pr_lock = ctl_lock,
192 .pr_unlock = ctl_unlock,
193 .pr_getlock = ctl_getlock,
194 },
195 {
196 .pr_type = SOCK_STREAM,
197 .pr_protocol = SYSPROTO_CONTROL,
198 .pr_flags = PR_CONNREQUIRED | PR_PCBLOCK | PR_WANTRCVD,
199 .pr_ctloutput = ctl_ctloutput,
200 .pr_usrreqs = &ctl_usrreqs,
201 .pr_lock = ctl_lock,
202 .pr_unlock = ctl_unlock,
203 .pr_getlock = ctl_getlock,
204 }
9bccf70c
A
205};
206
fe8ab488
A
207__private_extern__ int kctl_reg_list SYSCTL_HANDLER_ARGS;
208__private_extern__ int kctl_pcblist SYSCTL_HANDLER_ARGS;
209__private_extern__ int kctl_getstat SYSCTL_HANDLER_ARGS;
210
91447636 211
fe8ab488 212SYSCTL_NODE(_net_systm, OID_AUTO, kctl,
0a7de745 213 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Kernel control family");
fe8ab488
A
214
215struct kctlstat kctlstat;
216SYSCTL_PROC(_net_systm_kctl, OID_AUTO, stats,
217 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
218 kctl_getstat, "S,kctlstat", "");
219
220SYSCTL_PROC(_net_systm_kctl, OID_AUTO, reg_list,
0a7de745
A
221 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
222 kctl_reg_list, "S,xkctl_reg", "");
fe8ab488
A
223
224SYSCTL_PROC(_net_systm_kctl, OID_AUTO, pcblist,
0a7de745
A
225 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
226 kctl_pcblist, "S,xkctlpcb", "");
fe8ab488
A
227
228u_int32_t ctl_autorcvbuf_max = 256 * 1024;
229SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufmax,
0a7de745 230 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_autorcvbuf_max, 0, "");
fe8ab488
A
231
232u_int32_t ctl_autorcvbuf_high = 0;
233SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufhigh,
0a7de745 234 CTLFLAG_RD | CTLFLAG_LOCKED, &ctl_autorcvbuf_high, 0, "");
fe8ab488
A
235
236u_int32_t ctl_debug = 0;
237SYSCTL_INT(_net_systm_kctl, OID_AUTO, debug,
0a7de745 238 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_debug, 0, "");
fe8ab488 239
4ba76501
A
240#if DEVELOPMENT || DEBUG
241u_int32_t ctl_panic_debug = 0;
242SYSCTL_INT(_net_systm_kctl, OID_AUTO, panicdebug,
243 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_panic_debug, 0, "");
244#endif /* DEVELOPMENT || DEBUG */
245
0a7de745 246#define KCTL_TBL_INC 16
3e170ce0
A
247
248static uintptr_t kctl_tbl_size = 0;
249static u_int32_t kctl_tbl_growing = 0;
39037602 250static u_int32_t kctl_tbl_growing_waiting = 0;
3e170ce0
A
251static uintptr_t kctl_tbl_count = 0;
252static struct kctl **kctl_table = NULL;
253static uintptr_t kctl_ref_gencnt = 0;
254
255static void kctl_tbl_grow(void);
256static kern_ctl_ref kctl_make_ref(struct kctl *kctl);
257static void kctl_delete_ref(kern_ctl_ref);
258static struct kctl *kctl_from_ref(kern_ctl_ref);
259
9bccf70c 260/*
91447636 261 * Install the protosw's for the Kernel Control manager.
9bccf70c 262 */
39236c6e
A
263__private_extern__ void
264kern_control_init(struct domain *dp)
9bccf70c 265{
39236c6e
A
266 struct protosw *pr;
267 int i;
0a7de745 268 int kctl_proto_count = (sizeof(kctlsw) / sizeof(struct protosw));
39236c6e
A
269
270 VERIFY(!(dp->dom_flags & DOM_INITIALIZED));
271 VERIFY(dp == systemdomain);
272
0a7de745 273 for (i = 0, pr = &kctlsw[0]; i < kctl_proto_count; i++, pr++) {
39236c6e 274 net_add_proto(pr, dp, 1);
0a7de745 275 }
91447636 276}
9bccf70c 277
91447636
A
278static void
279kcb_delete(struct ctl_cb *kcb)
280{
281 if (kcb != 0) {
c3c9b80d
A
282 lck_mtx_destroy(&kcb->mtx, &ctl_lck_grp);
283 kheap_free(KHEAP_DEFAULT, kcb, sizeof(struct ctl_cb));
91447636 284 }
9bccf70c
A
285}
286
9bccf70c
A
287/*
288 * Kernel Controller user-request functions
fe8ab488
A
289 * attach function must exist and succeed
290 * detach not necessary
91447636 291 * we need a pcb for the per socket mutex
9bccf70c 292 */
91447636 293static int
fe8ab488
A
294ctl_attach(struct socket *so, int proto, struct proc *p)
295{
296#pragma unused(proto, p)
91447636 297 int error = 0;
0a7de745 298 struct ctl_cb *kcb = 0;
91447636 299
c3c9b80d 300 kcb = kheap_alloc(KHEAP_DEFAULT, sizeof(struct ctl_cb), Z_WAITOK | Z_ZERO);
91447636
A
301 if (kcb == NULL) {
302 error = ENOMEM;
303 goto quit;
304 }
fe8ab488 305
c3c9b80d 306 lck_mtx_init(&kcb->mtx, &ctl_lck_grp, &ctl_lck_attr);
91447636
A
307 kcb->so = so;
308 so->so_pcb = (caddr_t)kcb;
fe8ab488 309
91447636
A
310quit:
311 if (error != 0) {
312 kcb_delete(kcb);
313 kcb = 0;
314 }
0a7de745 315 return error;
91447636
A
316}
317
318static int
319ctl_sofreelastref(struct socket *so)
320{
0a7de745 321 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
fe8ab488
A
322
323 so->so_pcb = 0;
324
325 if (kcb != 0) {
0a7de745 326 struct kctl *kctl;
fe8ab488 327 if ((kctl = kcb->kctl) != 0) {
c3c9b80d 328 lck_mtx_lock(&ctl_mtx);
fe8ab488
A
329 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
330 kctlstat.kcs_pcbcount--;
331 kctlstat.kcs_gencnt++;
c3c9b80d 332 lck_mtx_unlock(&ctl_mtx);
fe8ab488
A
333 }
334 kcb_delete(kcb);
335 }
336 sofreelastref(so, 1);
0a7de745 337 return 0;
91447636
A
338}
339
94ff46dc 340/*
ea3f0419
A
341 * Use this function and ctl_kcb_require_clearing to serialize
342 * critical calls into the kctl subsystem
94ff46dc
A
343 */
344static void
345ctl_kcb_increment_use_count(struct ctl_cb *kcb, lck_mtx_t *mutex_held)
346{
347 LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
ea3f0419
A
348 while (kcb->require_clearing_count > 0) {
349 msleep(&kcb->require_clearing_count, mutex_held, PSOCK | PCATCH, "kcb_require_clearing", NULL);
350 }
351 kcb->kcb_usecount++;
352}
353
354static void
355ctl_kcb_require_clearing(struct ctl_cb *kcb, lck_mtx_t *mutex_held)
356{
357 assert(kcb->kcb_usecount != 0);
358 kcb->require_clearing_count++;
359 kcb->kcb_usecount--;
360 while (kcb->kcb_usecount > 0) { // we need to wait until no one else is running
94ff46dc
A
361 msleep(&kcb->kcb_usecount, mutex_held, PSOCK | PCATCH, "kcb_usecount", NULL);
362 }
363 kcb->kcb_usecount++;
364}
365
366static void
ea3f0419
A
367ctl_kcb_done_clearing(struct ctl_cb *kcb)
368{
369 assert(kcb->require_clearing_count != 0);
370 kcb->require_clearing_count--;
371 wakeup((caddr_t)&kcb->require_clearing_count);
372}
373
374static void
375ctl_kcb_decrement_use_count(struct ctl_cb *kcb)
94ff46dc
A
376{
377 assert(kcb->kcb_usecount != 0);
378 kcb->kcb_usecount--;
ea3f0419 379 wakeup((caddr_t)&kcb->kcb_usecount);
94ff46dc
A
380}
381
91447636
A
382static int
383ctl_detach(struct socket *so)
384{
0a7de745 385 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
fe8ab488 386
0a7de745
A
387 if (kcb == 0) {
388 return 0;
389 }
fe8ab488 390
94ff46dc
A
391 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
392 ctl_kcb_increment_use_count(kcb, mtx_held);
ea3f0419 393 ctl_kcb_require_clearing(kcb, mtx_held);
94ff46dc 394
5c9f4661
A
395 if (kcb->kctl != NULL && kcb->kctl->bind != NULL &&
396 kcb->userdata != NULL && !(so->so_state & SS_ISCONNECTED)) {
397 // The unit was bound, but not connected
398 // Invoke the disconnected call to cleanup
399 if (kcb->kctl->disconnect != NULL) {
400 socket_unlock(so, 0);
401 (*kcb->kctl->disconnect)(kcb->kctl->kctlref,
402 kcb->sac.sc_unit, kcb->userdata);
403 socket_lock(so, 0);
404 }
405 }
406
fe8ab488 407 soisdisconnected(so);
4ba76501
A
408#if DEVELOPMENT || DEBUG
409 kcb->status = KCTL_DISCONNECTED;
410#endif /* DEVELOPMENT || DEBUG */
fe8ab488 411 so->so_flags |= SOF_PCBCLEARING;
ea3f0419
A
412 ctl_kcb_done_clearing(kcb);
413 ctl_kcb_decrement_use_count(kcb);
0a7de745 414 return 0;
9bccf70c
A
415}
416
91447636 417static int
5c9f4661 418ctl_setup_kctl(struct socket *so, struct sockaddr *nam, struct proc *p)
fe8ab488 419{
5c9f4661
A
420 struct kctl *kctl = NULL;
421 int error = 0;
0a7de745 422 struct sockaddr_ctl sa;
5c9f4661
A
423 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
424 struct ctl_cb *kcb_next = NULL;
425 u_quad_t sbmaxsize;
426 u_int32_t recvbufsize, sendbufsize;
fe8ab488 427
5c9f4661
A
428 if (kcb == 0) {
429 panic("ctl_setup_kctl so_pcb null\n");
430 }
431
432 if (kcb->kctl != NULL) {
433 // Already set up, skip
0a7de745 434 return 0;
5c9f4661 435 }
fe8ab488 436
5c9f4661 437 if (nam->sa_len != sizeof(struct sockaddr_ctl)) {
0a7de745 438 return EINVAL;
5c9f4661 439 }
fe8ab488
A
440
441 bcopy(nam, &sa, sizeof(struct sockaddr_ctl));
442
c3c9b80d 443 lck_mtx_lock(&ctl_mtx);
fe8ab488
A
444 kctl = ctl_find_by_id_unit(sa.sc_id, sa.sc_unit);
445 if (kctl == NULL) {
c3c9b80d 446 lck_mtx_unlock(&ctl_mtx);
0a7de745 447 return ENOENT;
fe8ab488
A
448 }
449
450 if (((kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
0a7de745
A
451 (so->so_type != SOCK_STREAM)) ||
452 (!(kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
453 (so->so_type != SOCK_DGRAM))) {
c3c9b80d 454 lck_mtx_unlock(&ctl_mtx);
0a7de745
A
455 return EPROTOTYPE;
456 }
fe8ab488
A
457
458 if (kctl->flags & CTL_FLAG_PRIVILEGED) {
459 if (p == 0) {
c3c9b80d 460 lck_mtx_unlock(&ctl_mtx);
0a7de745 461 return EINVAL;
fe8ab488
A
462 }
463 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
c3c9b80d 464 lck_mtx_unlock(&ctl_mtx);
0a7de745 465 return EPERM;
fe8ab488
A
466 }
467 }
91447636
A
468
469 if ((kctl->flags & CTL_FLAG_REG_ID_UNIT) || sa.sc_unit != 0) {
470 if (kcb_find(kctl, sa.sc_unit) != NULL) {
c3c9b80d 471 lck_mtx_unlock(&ctl_mtx);
0a7de745 472 return EBUSY;
91447636 473 }
f427ee49
A
474 } else if (kctl->setup != NULL) {
475 error = (*kctl->setup)(&sa.sc_unit, &kcb->userdata);
476 if (error != 0) {
c3c9b80d 477 lck_mtx_unlock(&ctl_mtx);
f427ee49
A
478 return error;
479 }
91447636 480 } else {
fe8ab488 481 /* Find an unused ID, assumes control IDs are in order */
5c9f4661 482 u_int32_t unit = 1;
fe8ab488
A
483
484 TAILQ_FOREACH(kcb_next, &kctl->kcb_head, next) {
5c9f4661 485 if (kcb_next->sac.sc_unit > unit) {
fe8ab488
A
486 /* Found a gap, lets fill it in */
487 break;
488 }
5c9f4661
A
489 unit = kcb_next->sac.sc_unit + 1;
490 if (unit == ctl_maxunit) {
fe8ab488 491 break;
5c9f4661 492 }
fe8ab488
A
493 }
494
2d21ac55 495 if (unit == ctl_maxunit) {
c3c9b80d 496 lck_mtx_unlock(&ctl_mtx);
0a7de745 497 return EBUSY;
2d21ac55 498 }
fe8ab488 499
2d21ac55 500 sa.sc_unit = unit;
fe8ab488 501 }
55e303ae 502
5c9f4661 503 bcopy(&sa, &kcb->sac, sizeof(struct sockaddr_ctl));
fe8ab488
A
504 kcb->kctl = kctl;
505 if (kcb_next != NULL) {
506 TAILQ_INSERT_BEFORE(kcb_next, kcb, next);
507 } else {
2d21ac55
A
508 TAILQ_INSERT_TAIL(&kctl->kcb_head, kcb, next);
509 }
fe8ab488
A
510 kctlstat.kcs_pcbcount++;
511 kctlstat.kcs_gencnt++;
512 kctlstat.kcs_connections++;
c3c9b80d 513 lck_mtx_unlock(&ctl_mtx);
9bccf70c 514
04b8595b
A
515 /*
516 * rdar://15526688: Limit the send and receive sizes to sb_max
517 * by using the same scaling as sbreserve()
518 */
519 sbmaxsize = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES);
520
5c9f4661 521 if (kctl->sendbufsize > sbmaxsize) {
f427ee49 522 sendbufsize = (u_int32_t)sbmaxsize;
5c9f4661 523 } else {
04b8595b 524 sendbufsize = kctl->sendbufsize;
5c9f4661 525 }
04b8595b 526
5c9f4661 527 if (kctl->recvbufsize > sbmaxsize) {
f427ee49 528 recvbufsize = (u_int32_t)sbmaxsize;
5c9f4661 529 } else {
04b8595b 530 recvbufsize = kctl->recvbufsize;
5c9f4661 531 }
04b8595b
A
532
533 error = soreserve(so, sendbufsize, recvbufsize);
fe8ab488 534 if (error) {
0a7de745 535 if (ctl_debug) {
39037602 536 printf("%s - soreserve(%llx, %u, %u) error %d\n",
0a7de745
A
537 __func__, (uint64_t)VM_KERNEL_ADDRPERM(so),
538 sendbufsize, recvbufsize, error);
539 }
91447636 540 goto done;
fe8ab488 541 }
5c9f4661
A
542
543done:
544 if (error) {
545 soisdisconnected(so);
4ba76501
A
546#if DEVELOPMENT || DEBUG
547 kcb->status = KCTL_DISCONNECTED;
548#endif /* DEVELOPMENT || DEBUG */
c3c9b80d 549 lck_mtx_lock(&ctl_mtx);
5c9f4661
A
550 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
551 kcb->kctl = NULL;
552 kcb->sac.sc_unit = 0;
553 kctlstat.kcs_pcbcount--;
554 kctlstat.kcs_gencnt++;
555 kctlstat.kcs_conn_fail++;
c3c9b80d 556 lck_mtx_unlock(&ctl_mtx);
5c9f4661 557 }
0a7de745 558 return error;
5c9f4661
A
559}
560
561static int
562ctl_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
563{
564 int error = 0;
565 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
566
567 if (kcb == NULL) {
568 panic("ctl_bind so_pcb null\n");
569 }
570
94ff46dc
A
571 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
572 ctl_kcb_increment_use_count(kcb, mtx_held);
ea3f0419 573 ctl_kcb_require_clearing(kcb, mtx_held);
94ff46dc 574
5c9f4661
A
575 error = ctl_setup_kctl(so, nam, p);
576 if (error) {
94ff46dc 577 goto out;
5c9f4661
A
578 }
579
580 if (kcb->kctl == NULL) {
581 panic("ctl_bind kctl null\n");
582 }
583
584 if (kcb->kctl->bind == NULL) {
94ff46dc
A
585 error = EINVAL;
586 goto out;
5c9f4661 587 }
fe8ab488 588
91447636 589 socket_unlock(so, 0);
5c9f4661 590 error = (*kcb->kctl->bind)(kcb->kctl->kctlref, &kcb->sac, &kcb->userdata);
91447636 591 socket_lock(so, 0);
fe8ab488 592
94ff46dc 593out:
ea3f0419
A
594 ctl_kcb_done_clearing(kcb);
595 ctl_kcb_decrement_use_count(kcb);
0a7de745 596 return error;
5c9f4661
A
597}
598
599static int
600ctl_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
601{
602 int error = 0;
603 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
604
605 if (kcb == NULL) {
606 panic("ctl_connect so_pcb null\n");
607 }
608
94ff46dc
A
609 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
610 ctl_kcb_increment_use_count(kcb, mtx_held);
ea3f0419 611 ctl_kcb_require_clearing(kcb, mtx_held);
94ff46dc 612
4ba76501
A
613#if DEVELOPMENT || DEBUG
614 if (kcb->status != KCTL_DISCONNECTED && ctl_panic_debug) {
615 panic("kctl already connecting/connected");
616 }
617 kcb->status = KCTL_CONNECTING;
618#endif /* DEVELOPMENT || DEBUG */
619
5c9f4661
A
620 error = ctl_setup_kctl(so, nam, p);
621 if (error) {
94ff46dc 622 goto out;
5c9f4661
A
623 }
624
625 if (kcb->kctl == NULL) {
626 panic("ctl_connect kctl null\n");
627 }
628
629 soisconnecting(so);
630 socket_unlock(so, 0);
631 error = (*kcb->kctl->connect)(kcb->kctl->kctlref, &kcb->sac, &kcb->userdata);
632 socket_lock(so, 0);
633 if (error) {
634 goto end;
635 }
fe8ab488 636 soisconnected(so);
4ba76501
A
637#if DEVELOPMENT || DEBUG
638 kcb->status = KCTL_CONNECTED;
639#endif /* DEVELOPMENT || DEBUG */
91447636 640
6d2010ae 641end:
5c9f4661 642 if (error && kcb->kctl->disconnect) {
39037602
A
643 /*
644 * XXX Make sure we Don't check the return value
645 * of disconnect here.
646 * ipsec/utun_ctl_disconnect will return error when
647 * disconnect gets called after connect failure.
648 * However if we decide to check for disconnect return
649 * value here. Please make sure to revisit
650 * ipsec/utun_ctl_disconnect.
651 */
6d2010ae 652 socket_unlock(so, 0);
5c9f4661 653 (*kcb->kctl->disconnect)(kcb->kctl->kctlref, kcb->sac.sc_unit, kcb->userdata);
6d2010ae
A
654 socket_lock(so, 0);
655 }
fe8ab488
A
656 if (error) {
657 soisdisconnected(so);
4ba76501
A
658#if DEVELOPMENT || DEBUG
659 kcb->status = KCTL_DISCONNECTED;
660#endif /* DEVELOPMENT || DEBUG */
c3c9b80d 661 lck_mtx_lock(&ctl_mtx);
5c9f4661
A
662 TAILQ_REMOVE(&kcb->kctl->kcb_head, kcb, next);
663 kcb->kctl = NULL;
664 kcb->sac.sc_unit = 0;
fe8ab488
A
665 kctlstat.kcs_pcbcount--;
666 kctlstat.kcs_gencnt++;
667 kctlstat.kcs_conn_fail++;
c3c9b80d 668 lck_mtx_unlock(&ctl_mtx);
fe8ab488 669 }
94ff46dc 670out:
ea3f0419
A
671 ctl_kcb_done_clearing(kcb);
672 ctl_kcb_decrement_use_count(kcb);
0a7de745 673 return error;
9bccf70c
A
674}
675
91447636 676static int
9bccf70c
A
677ctl_disconnect(struct socket *so)
678{
0a7de745 679 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
fe8ab488
A
680
681 if ((kcb = (struct ctl_cb *)so->so_pcb)) {
94ff46dc
A
682 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
683 ctl_kcb_increment_use_count(kcb, mtx_held);
ea3f0419 684 ctl_kcb_require_clearing(kcb, mtx_held);
0a7de745 685 struct kctl *kctl = kcb->kctl;
fe8ab488
A
686
687 if (kctl && kctl->disconnect) {
688 socket_unlock(so, 0);
5c9f4661 689 (*kctl->disconnect)(kctl->kctlref, kcb->sac.sc_unit,
3e170ce0 690 kcb->userdata);
fe8ab488
A
691 socket_lock(so, 0);
692 }
693
694 soisdisconnected(so);
4ba76501
A
695#if DEVELOPMENT || DEBUG
696 kcb->status = KCTL_DISCONNECTED;
697#endif /* DEVELOPMENT || DEBUG */
fe8ab488 698
6d2010ae 699 socket_unlock(so, 0);
c3c9b80d 700 lck_mtx_lock(&ctl_mtx);
fe8ab488 701 kcb->kctl = 0;
5c9f4661 702 kcb->sac.sc_unit = 0;
fe8ab488 703 while (kcb->usecount != 0) {
c3c9b80d 704 msleep(&kcb->usecount, &ctl_mtx, 0, "kcb->usecount", 0);
fe8ab488
A
705 }
706 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
707 kctlstat.kcs_pcbcount--;
708 kctlstat.kcs_gencnt++;
c3c9b80d 709 lck_mtx_unlock(&ctl_mtx);
6d2010ae 710 socket_lock(so, 0);
ea3f0419
A
711 ctl_kcb_done_clearing(kcb);
712 ctl_kcb_decrement_use_count(kcb);
fe8ab488 713 }
0a7de745 714 return 0;
9bccf70c
A
715}
716
91447636
A
717static int
718ctl_peeraddr(struct socket *so, struct sockaddr **nam)
9bccf70c 719{
0a7de745
A
720 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
721 struct kctl *kctl;
722 struct sockaddr_ctl sc;
fe8ab488 723
0a7de745
A
724 if (kcb == NULL) { /* sanity check */
725 return ENOTCONN;
726 }
fe8ab488 727
0a7de745
A
728 if ((kctl = kcb->kctl) == NULL) {
729 return EINVAL;
730 }
fe8ab488 731
91447636
A
732 bzero(&sc, sizeof(struct sockaddr_ctl));
733 sc.sc_len = sizeof(struct sockaddr_ctl);
734 sc.sc_family = AF_SYSTEM;
735 sc.ss_sysaddr = AF_SYS_CONTROL;
736 sc.sc_id = kctl->id;
5c9f4661 737 sc.sc_unit = kcb->sac.sc_unit;
fe8ab488 738
91447636 739 *nam = dup_sockaddr((struct sockaddr *)&sc, 1);
fe8ab488 740
0a7de745 741 return 0;
fe8ab488
A
742}
743
744static void
745ctl_sbrcv_trim(struct socket *so)
746{
747 struct sockbuf *sb = &so->so_rcv;
748
749 if (sb->sb_hiwat > sb->sb_idealsize) {
750 u_int32_t diff;
751 int32_t trim;
752
753 /*
754 * The difference between the ideal size and the
755 * current size is the upper bound of the trimage
756 */
757 diff = sb->sb_hiwat - sb->sb_idealsize;
758 /*
759 * We cannot trim below the outstanding data
760 */
761 trim = sb->sb_hiwat - sb->sb_cc;
762
763 trim = imin(trim, (int32_t)diff);
764
765 if (trim > 0) {
766 sbreserve(sb, (sb->sb_hiwat - trim));
767
0a7de745 768 if (ctl_debug) {
fe8ab488
A
769 printf("%s - shrunk to %d\n",
770 __func__, sb->sb_hiwat);
0a7de745 771 }
fe8ab488
A
772 }
773 }
9bccf70c
A
774}
775
39236c6e
A
776static int
777ctl_usr_rcvd(struct socket *so, int flags)
778{
94ff46dc 779 int error = 0;
0a7de745
A
780 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
781 struct kctl *kctl;
39236c6e 782
94ff46dc
A
783 if (kcb == NULL) {
784 return ENOTCONN;
785 }
786
787 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
788 ctl_kcb_increment_use_count(kcb, mtx_held);
789
39236c6e 790 if ((kctl = kcb->kctl) == NULL) {
94ff46dc
A
791 error = EINVAL;
792 goto out;
39236c6e
A
793 }
794
795 if (kctl->rcvd) {
796 socket_unlock(so, 0);
5c9f4661 797 (*kctl->rcvd)(kctl->kctlref, kcb->sac.sc_unit, kcb->userdata, flags);
39236c6e
A
798 socket_lock(so, 0);
799 }
800
fe8ab488
A
801 ctl_sbrcv_trim(so);
802
94ff46dc 803out:
ea3f0419 804 ctl_kcb_decrement_use_count(kcb);
94ff46dc 805 return error;
39236c6e
A
806}
807
91447636
A
808static int
809ctl_send(struct socket *so, int flags, struct mbuf *m,
0a7de745
A
810 struct sockaddr *addr, struct mbuf *control,
811 struct proc *p)
9bccf70c 812{
fe8ab488 813#pragma unused(addr, p)
0a7de745
A
814 int error = 0;
815 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
816 struct kctl *kctl;
fe8ab488 817
0a7de745 818 if (control) {
fe8ab488 819 m_freem(control);
0a7de745 820 }
fe8ab488 821
0a7de745 822 if (kcb == NULL) { /* sanity check */
6d2010ae 823 error = ENOTCONN;
0a7de745 824 }
fe8ab488 825
94ff46dc
A
826 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
827 ctl_kcb_increment_use_count(kcb, mtx_held);
828
0a7de745 829 if (error == 0 && (kctl = kcb->kctl) == NULL) {
6d2010ae 830 error = EINVAL;
0a7de745 831 }
fe8ab488 832
6d2010ae 833 if (error == 0 && kctl->send) {
fe8ab488 834 so_tc_update_stats(m, so, m_get_service_class(m));
91447636 835 socket_unlock(so, 0);
5c9f4661 836 error = (*kctl->send)(kctl->kctlref, kcb->sac.sc_unit, kcb->userdata,
3e170ce0 837 m, flags);
91447636 838 socket_lock(so, 0);
6d2010ae
A
839 } else {
840 m_freem(m);
0a7de745 841 if (error == 0) {
6d2010ae 842 error = ENOTSUP;
0a7de745 843 }
91447636 844 }
0a7de745 845 if (error != 0) {
fe8ab488 846 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_fail);
0a7de745 847 }
ea3f0419 848 ctl_kcb_decrement_use_count(kcb);
94ff46dc 849
0a7de745 850 return error;
fe8ab488
A
851}
852
853static int
854ctl_send_list(struct socket *so, int flags, struct mbuf *m,
0a7de745
A
855 __unused struct sockaddr *addr, struct mbuf *control,
856 __unused struct proc *p)
fe8ab488 857{
0a7de745
A
858 int error = 0;
859 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
860 struct kctl *kctl;
fe8ab488 861
0a7de745 862 if (control) {
fe8ab488 863 m_freem_list(control);
0a7de745 864 }
fe8ab488 865
0a7de745 866 if (kcb == NULL) { /* sanity check */
fe8ab488 867 error = ENOTCONN;
0a7de745 868 }
fe8ab488 869
94ff46dc
A
870 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
871 ctl_kcb_increment_use_count(kcb, mtx_held);
872
0a7de745 873 if (error == 0 && (kctl = kcb->kctl) == NULL) {
fe8ab488 874 error = EINVAL;
0a7de745 875 }
fe8ab488
A
876
877 if (error == 0 && kctl->send_list) {
878 struct mbuf *nxt;
879
0a7de745 880 for (nxt = m; nxt != NULL; nxt = nxt->m_nextpkt) {
fe8ab488 881 so_tc_update_stats(nxt, so, m_get_service_class(nxt));
0a7de745 882 }
fe8ab488
A
883
884 socket_unlock(so, 0);
5c9f4661 885 error = (*kctl->send_list)(kctl->kctlref, kcb->sac.sc_unit,
3e170ce0 886 kcb->userdata, m, flags);
fe8ab488
A
887 socket_lock(so, 0);
888 } else if (error == 0 && kctl->send) {
889 while (m != NULL && error == 0) {
890 struct mbuf *nextpkt = m->m_nextpkt;
891
892 m->m_nextpkt = NULL;
893 so_tc_update_stats(m, so, m_get_service_class(m));
894 socket_unlock(so, 0);
5c9f4661 895 error = (*kctl->send)(kctl->kctlref, kcb->sac.sc_unit,
3e170ce0 896 kcb->userdata, m, flags);
fe8ab488
A
897 socket_lock(so, 0);
898 m = nextpkt;
899 }
0a7de745 900 if (m != NULL) {
fe8ab488 901 m_freem_list(m);
0a7de745 902 }
fe8ab488
A
903 } else {
904 m_freem_list(m);
0a7de745 905 if (error == 0) {
fe8ab488 906 error = ENOTSUP;
0a7de745 907 }
fe8ab488 908 }
0a7de745 909 if (error != 0) {
fe8ab488 910 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_list_fail);
0a7de745 911 }
ea3f0419 912 ctl_kcb_decrement_use_count(kcb);
94ff46dc 913
0a7de745 914 return error;
fe8ab488
A
915}
916
917static errno_t
f427ee49 918ctl_rcvbspace(struct socket *so, size_t datasize,
0a7de745 919 u_int32_t kctlflags, u_int32_t flags)
fe8ab488
A
920{
921 struct sockbuf *sb = &so->so_rcv;
922 u_int32_t space = sbspace(sb);
923 errno_t error;
04b8595b 924
3e170ce0 925 if ((kctlflags & CTL_FLAG_REG_CRIT) == 0) {
0a7de745 926 if ((u_int32_t) space >= datasize) {
fe8ab488 927 error = 0;
0a7de745 928 } else {
fe8ab488 929 error = ENOBUFS;
0a7de745 930 }
fe8ab488 931 } else if ((flags & CTL_DATA_CRIT) == 0) {
3e170ce0
A
932 /*
933 * Reserve 25% for critical messages
934 */
935 if (space < (sb->sb_hiwat >> 2) ||
0a7de745 936 space < datasize) {
3e170ce0 937 error = ENOBUFS;
0a7de745 938 } else {
3e170ce0 939 error = 0;
0a7de745 940 }
fe8ab488 941 } else {
f427ee49 942 size_t autorcvbuf_max;
fe8ab488
A
943
944 /*
945 * Allow overcommit of 25%
946 */
947 autorcvbuf_max = min(sb->sb_idealsize + (sb->sb_idealsize >> 2),
0a7de745 948 ctl_autorcvbuf_max);
fe8ab488
A
949
950 if ((u_int32_t) space >= datasize) {
951 error = 0;
952 } else if (tcp_cansbgrow(sb) &&
953 sb->sb_hiwat < autorcvbuf_max) {
954 /*
955 * Grow with a little bit of leeway
956 */
f427ee49
A
957 size_t grow = datasize - space + MSIZE;
958 u_int32_t cc = (u_int32_t)MIN(MIN((sb->sb_hiwat + grow), autorcvbuf_max), UINT32_MAX);
fe8ab488 959
f427ee49 960 if (sbreserve(sb, cc) == 1) {
0a7de745 961 if (sb->sb_hiwat > ctl_autorcvbuf_high) {
fe8ab488 962 ctl_autorcvbuf_high = sb->sb_hiwat;
0a7de745 963 }
fe8ab488 964
3e170ce0
A
965 /*
966 * A final check
967 */
968 if ((u_int32_t) sbspace(sb) >= datasize) {
969 error = 0;
970 } else {
971 error = ENOBUFS;
972 }
973
0a7de745 974 if (ctl_debug) {
3e170ce0
A
975 printf("%s - grown to %d error %d\n",
976 __func__, sb->sb_hiwat, error);
0a7de745 977 }
fe8ab488
A
978 } else {
979 error = ENOBUFS;
980 }
981 } else {
982 error = ENOBUFS;
983 }
984 }
0a7de745 985 return error;
9bccf70c
A
986}
987
91447636 988errno_t
3e170ce0
A
989ctl_enqueuembuf(kern_ctl_ref kctlref, u_int32_t unit, struct mbuf *m,
990 u_int32_t flags)
9bccf70c 991{
0a7de745
A
992 struct socket *so;
993 errno_t error = 0;
994 int len = m->m_pkthdr.len;
995 u_int32_t kctlflags;
fe8ab488 996
3e170ce0
A
997 so = kcb_find_socket(kctlref, unit, &kctlflags);
998 if (so == NULL) {
0a7de745 999 return EINVAL;
3e170ce0 1000 }
fe8ab488 1001
3e170ce0 1002 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
91447636 1003 error = ENOBUFS;
fe8ab488 1004 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
91447636
A
1005 goto bye;
1006 }
0a7de745 1007 if ((flags & CTL_DATA_EOR)) {
91447636 1008 m->m_flags |= M_EOR;
0a7de745 1009 }
fe8ab488
A
1010
1011 so_recv_data_stat(so, m, 0);
bca245ac 1012 if (sbappend_nodrop(&so->so_rcv, m) != 0) {
0a7de745 1013 if ((flags & CTL_DATA_NOWAKEUP) == 0) {
fe8ab488 1014 sorwakeup(so);
0a7de745 1015 }
fe8ab488
A
1016 } else {
1017 error = ENOBUFS;
1018 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1019 }
91447636 1020bye:
0a7de745 1021 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
fe8ab488 1022 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
0a7de745
A
1023 __func__, error, len,
1024 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1025 }
fe8ab488 1026
91447636 1027 socket_unlock(so, 1);
0a7de745 1028 if (error != 0) {
fe8ab488 1029 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
0a7de745 1030 }
fe8ab488 1031
0a7de745 1032 return error;
fe8ab488
A
1033}
1034
1035/*
1036 * Compute space occupied by mbuf like sbappendrecord
1037 */
1038static int
1039m_space(struct mbuf *m)
1040{
1041 int space = 0;
1042 struct mbuf *nxt;
1043
0a7de745 1044 for (nxt = m; nxt != NULL; nxt = nxt->m_next) {
fe8ab488 1045 space += nxt->m_len;
0a7de745 1046 }
fe8ab488 1047
0a7de745 1048 return space;
fe8ab488
A
1049}
1050
1051errno_t
1052ctl_enqueuembuf_list(void *kctlref, u_int32_t unit, struct mbuf *m_list,
0a7de745 1053 u_int32_t flags, struct mbuf **m_remain)
fe8ab488
A
1054{
1055 struct socket *so = NULL;
1056 errno_t error = 0;
fe8ab488
A
1057 struct mbuf *m, *nextpkt;
1058 int needwakeup = 0;
5ba3f43e 1059 int len = 0;
3e170ce0 1060 u_int32_t kctlflags;
fe8ab488
A
1061
1062 /*
1063 * Need to point the beginning of the list in case of early exit
1064 */
1065 m = m_list;
1066
3e170ce0
A
1067 /*
1068 * kcb_find_socket takes the socket lock with a reference
1069 */
1070 so = kcb_find_socket(kctlref, unit, &kctlflags);
1071 if (so == NULL) {
fe8ab488
A
1072 error = EINVAL;
1073 goto done;
1074 }
3e170ce0
A
1075
1076 if (kctlflags & CTL_FLAG_REG_SOCK_STREAM) {
fe8ab488
A
1077 error = EOPNOTSUPP;
1078 goto done;
1079 }
1080 if (flags & CTL_DATA_EOR) {
1081 error = EINVAL;
1082 goto done;
1083 }
fe8ab488
A
1084
1085 for (m = m_list; m != NULL; m = nextpkt) {
1086 nextpkt = m->m_nextpkt;
1087
0a7de745 1088 if (m->m_pkthdr.len == 0 && ctl_debug) {
fe8ab488 1089 printf("%s: %llx m_pkthdr.len is 0",
0a7de745
A
1090 __func__, (uint64_t)VM_KERNEL_ADDRPERM(m));
1091 }
fe8ab488
A
1092
1093 /*
1094 * The mbuf is either appended or freed by sbappendrecord()
1095 * so it's not reliable from a data standpoint
1096 */
1097 len = m_space(m);
3e170ce0 1098 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
fe8ab488
A
1099 error = ENOBUFS;
1100 OSIncrementAtomic64(
0a7de745 1101 (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
fe8ab488
A
1102 break;
1103 } else {
1104 /*
1105 * Unlink from the list, m is on its own
1106 */
1107 m->m_nextpkt = NULL;
1108 so_recv_data_stat(so, m, 0);
bca245ac 1109 if (sbappendrecord_nodrop(&so->so_rcv, m) != 0) {
fe8ab488
A
1110 needwakeup = 1;
1111 } else {
1112 /*
1113 * We free or return the remaining
1114 * mbufs in the list
1115 */
1116 m = nextpkt;
1117 error = ENOBUFS;
1118 OSIncrementAtomic64(
0a7de745 1119 (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
fe8ab488
A
1120 break;
1121 }
1122 }
1123 }
0a7de745 1124 if (needwakeup && (flags & CTL_DATA_NOWAKEUP) == 0) {
fe8ab488 1125 sorwakeup(so);
0a7de745 1126 }
fe8ab488
A
1127
1128done:
1129 if (so != NULL) {
0a7de745 1130 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
fe8ab488 1131 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
0a7de745
A
1132 __func__, error, len,
1133 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1134 }
fe8ab488
A
1135
1136 socket_unlock(so, 1);
1137 }
1138 if (m_remain) {
1139 *m_remain = m;
1140
1141 if (m != NULL && socket_debug && so != NULL &&
1142 (so->so_options & SO_DEBUG)) {
1143 struct mbuf *n;
1144
1145 printf("%s m_list %llx\n", __func__,
1146 (uint64_t) VM_KERNEL_ADDRPERM(m_list));
0a7de745 1147 for (n = m; n != NULL; n = n->m_nextpkt) {
fe8ab488
A
1148 printf(" remain %llx m_next %llx\n",
1149 (uint64_t) VM_KERNEL_ADDRPERM(n),
1150 (uint64_t) VM_KERNEL_ADDRPERM(n->m_next));
0a7de745 1151 }
fe8ab488
A
1152 }
1153 } else {
0a7de745 1154 if (m != NULL) {
fe8ab488 1155 m_freem_list(m);
0a7de745 1156 }
fe8ab488 1157 }
0a7de745 1158 if (error != 0) {
fe8ab488 1159 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
0a7de745
A
1160 }
1161 return error;
91447636 1162}
9bccf70c 1163
91447636 1164errno_t
fe8ab488
A
1165ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len,
1166 u_int32_t flags)
91447636 1167{
0a7de745
A
1168 struct socket *so;
1169 struct mbuf *m;
1170 errno_t error = 0;
1171 unsigned int num_needed;
1172 struct mbuf *n;
1173 size_t curlen = 0;
1174 u_int32_t kctlflags;
fe8ab488 1175
3e170ce0
A
1176 so = kcb_find_socket(kctlref, unit, &kctlflags);
1177 if (so == NULL) {
0a7de745 1178 return EINVAL;
3e170ce0 1179 }
fe8ab488 1180
3e170ce0 1181 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
91447636 1182 error = ENOBUFS;
fe8ab488 1183 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
91447636
A
1184 goto bye;
1185 }
1186
1187 num_needed = 1;
1188 m = m_allocpacket_internal(&num_needed, len, NULL, M_NOWAIT, 1, 0);
1189 if (m == NULL) {
39037602 1190 kctlstat.kcs_enqdata_mb_alloc_fail++;
0a7de745 1191 if (ctl_debug) {
39037602
A
1192 printf("%s: m_allocpacket_internal(%lu) failed\n",
1193 __func__, len);
0a7de745 1194 }
fe8ab488 1195 error = ENOMEM;
91447636
A
1196 goto bye;
1197 }
fe8ab488 1198
91447636
A
1199 for (n = m; n != NULL; n = n->m_next) {
1200 size_t mlen = mbuf_maxlen(n);
fe8ab488 1201
0a7de745 1202 if (mlen + curlen > len) {
91447636 1203 mlen = len - curlen;
0a7de745 1204 }
f427ee49 1205 n->m_len = (int32_t)mlen;
91447636
A
1206 bcopy((char *)data + curlen, n->m_data, mlen);
1207 curlen += mlen;
1208 }
1209 mbuf_pkthdr_setlen(m, curlen);
1210
0a7de745 1211 if ((flags & CTL_DATA_EOR)) {
91447636 1212 m->m_flags |= M_EOR;
0a7de745 1213 }
fe8ab488 1214 so_recv_data_stat(so, m, 0);
bca245ac
A
1215 /*
1216 * No need to call the "nodrop" variant of sbappend
1217 * because the mbuf is local to the scope of the function
1218 */
fe8ab488 1219 if (sbappend(&so->so_rcv, m) != 0) {
0a7de745 1220 if ((flags & CTL_DATA_NOWAKEUP) == 0) {
fe8ab488 1221 sorwakeup(so);
0a7de745 1222 }
fe8ab488 1223 } else {
39037602 1224 kctlstat.kcs_enqdata_sbappend_fail++;
fe8ab488
A
1225 error = ENOBUFS;
1226 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1227 }
1228
91447636 1229bye:
0a7de745 1230 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
fe8ab488 1231 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
0a7de745
A
1232 __func__, error, (int)len,
1233 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1234 }
fe8ab488 1235
91447636 1236 socket_unlock(so, 1);
0a7de745 1237 if (error != 0) {
fe8ab488 1238 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
0a7de745
A
1239 }
1240 return error;
91447636 1241}
9bccf70c 1242
3e170ce0
A
1243errno_t
1244ctl_getenqueuepacketcount(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *pcnt)
1245{
0a7de745 1246 struct socket *so;
3e170ce0
A
1247 u_int32_t cnt;
1248 struct mbuf *m1;
1249
0a7de745
A
1250 if (pcnt == NULL) {
1251 return EINVAL;
1252 }
3e170ce0
A
1253
1254 so = kcb_find_socket(kctlref, unit, NULL);
1255 if (so == NULL) {
0a7de745 1256 return EINVAL;
3e170ce0
A
1257 }
1258
1259 cnt = 0;
1260 m1 = so->so_rcv.sb_mb;
1261 while (m1 != NULL) {
1262 if (m1->m_type == MT_DATA ||
1263 m1->m_type == MT_HEADER ||
0a7de745 1264 m1->m_type == MT_OOBDATA) {
3e170ce0 1265 cnt += 1;
0a7de745 1266 }
3e170ce0
A
1267 m1 = m1->m_nextpkt;
1268 }
1269 *pcnt = cnt;
1270
1271 socket_unlock(so, 1);
1272
0a7de745 1273 return 0;
3e170ce0 1274}
55e303ae 1275
fe8ab488 1276errno_t
91447636
A
1277ctl_getenqueuespace(kern_ctl_ref kctlref, u_int32_t unit, size_t *space)
1278{
0a7de745 1279 struct socket *so;
2d21ac55 1280 long avail;
fe8ab488 1281
0a7de745
A
1282 if (space == NULL) {
1283 return EINVAL;
1284 }
fe8ab488 1285
3e170ce0
A
1286 so = kcb_find_socket(kctlref, unit, NULL);
1287 if (so == NULL) {
0a7de745 1288 return EINVAL;
3e170ce0 1289 }
fe8ab488 1290
2d21ac55
A
1291 avail = sbspace(&so->so_rcv);
1292 *space = (avail < 0) ? 0 : avail;
91447636 1293 socket_unlock(so, 1);
fe8ab488 1294
0a7de745 1295 return 0;
fe8ab488
A
1296}
1297
1298errno_t
1299ctl_getenqueuereadable(kern_ctl_ref kctlref, u_int32_t unit,
1300 u_int32_t *difference)
1301{
0a7de745 1302 struct socket *so;
fe8ab488 1303
0a7de745
A
1304 if (difference == NULL) {
1305 return EINVAL;
1306 }
fe8ab488 1307
3e170ce0
A
1308 so = kcb_find_socket(kctlref, unit, NULL);
1309 if (so == NULL) {
0a7de745 1310 return EINVAL;
3e170ce0 1311 }
fe8ab488
A
1312
1313 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat) {
1314 *difference = 0;
1315 } else {
1316 *difference = (so->so_rcv.sb_lowat - so->so_rcv.sb_cc);
1317 }
1318 socket_unlock(so, 1);
1319
0a7de745 1320 return 0;
9bccf70c
A
1321}
1322
91447636 1323static int
9bccf70c
A
1324ctl_ctloutput(struct socket *so, struct sockopt *sopt)
1325{
0a7de745
A
1326 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
1327 struct kctl *kctl;
1328 int error = 0;
1329 void *data = NULL;
c3c9b80d 1330 size_t data_len = 0;
0a7de745 1331 size_t len;
fe8ab488 1332
91447636 1333 if (sopt->sopt_level != SYSPROTO_CONTROL) {
0a7de745 1334 return EINVAL;
91447636 1335 }
fe8ab488 1336
0a7de745
A
1337 if (kcb == NULL) { /* sanity check */
1338 return ENOTCONN;
1339 }
fe8ab488 1340
0a7de745
A
1341 if ((kctl = kcb->kctl) == NULL) {
1342 return EINVAL;
1343 }
fe8ab488 1344
94ff46dc
A
1345 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
1346 ctl_kcb_increment_use_count(kcb, mtx_held);
1347
91447636 1348 switch (sopt->sopt_dir) {
0a7de745
A
1349 case SOPT_SET:
1350 if (kctl->setopt == NULL) {
94ff46dc
A
1351 error = ENOTSUP;
1352 goto out;
0a7de745
A
1353 }
1354 if (sopt->sopt_valsize != 0) {
c3c9b80d
A
1355 data_len = sopt->sopt_valsize;
1356 data = kheap_alloc(KHEAP_TEMP, data_len, Z_WAITOK | Z_ZERO);
0a7de745 1357 if (data == NULL) {
c3c9b80d 1358 data_len = 0;
94ff46dc
A
1359 error = ENOMEM;
1360 goto out;
91447636 1361 }
0a7de745
A
1362 error = sooptcopyin(sopt, data,
1363 sopt->sopt_valsize, sopt->sopt_valsize);
1364 }
1365 if (error == 0) {
1366 socket_unlock(so, 0);
1367 error = (*kctl->setopt)(kctl->kctlref,
1368 kcb->sac.sc_unit, kcb->userdata, sopt->sopt_name,
1369 data, sopt->sopt_valsize);
1370 socket_lock(so, 0);
1371 }
5c9f4661 1372
c3c9b80d 1373 kheap_free(KHEAP_TEMP, data, data_len);
0a7de745 1374 break;
fe8ab488 1375
0a7de745
A
1376 case SOPT_GET:
1377 if (kctl->getopt == NULL) {
94ff46dc
A
1378 error = ENOTSUP;
1379 goto out;
0a7de745 1380 }
5c9f4661 1381
0a7de745 1382 if (sopt->sopt_valsize && sopt->sopt_val) {
c3c9b80d
A
1383 data_len = sopt->sopt_valsize;
1384 data = kheap_alloc(KHEAP_TEMP, data_len, Z_WAITOK | Z_ZERO);
0a7de745 1385 if (data == NULL) {
c3c9b80d 1386 data_len = 0;
94ff46dc
A
1387 error = ENOMEM;
1388 goto out;
91447636 1389 }
0a7de745
A
1390 /*
1391 * 4108337 - copy user data in case the
1392 * kernel control needs it
1393 */
1394 error = sooptcopyin(sopt, data,
1395 sopt->sopt_valsize, sopt->sopt_valsize);
1396 }
5c9f4661 1397
0a7de745
A
1398 if (error == 0) {
1399 len = sopt->sopt_valsize;
1400 socket_unlock(so, 0);
1401 error = (*kctl->getopt)(kctl->kctlref, kcb->sac.sc_unit,
1402 kcb->userdata, sopt->sopt_name,
1403 data, &len);
1404 if (data != NULL && len > sopt->sopt_valsize) {
1405 panic_plain("ctl_ctloutput: ctl %s returned "
1406 "len (%lu) > sopt_valsize (%lu)\n",
1407 kcb->kctl->name, len,
1408 sopt->sopt_valsize);
1409 }
1410 socket_lock(so, 0);
91447636 1411 if (error == 0) {
0a7de745
A
1412 if (data != NULL) {
1413 error = sooptcopyout(sopt, data, len);
1414 } else {
1415 sopt->sopt_valsize = len;
5c9f4661 1416 }
91447636 1417 }
0a7de745 1418 }
c3c9b80d
A
1419
1420 kheap_free(KHEAP_TEMP, data, data_len);
0a7de745 1421 break;
91447636 1422 }
94ff46dc
A
1423
1424out:
ea3f0419 1425 ctl_kcb_decrement_use_count(kcb);
0a7de745 1426 return error;
91447636 1427}
9bccf70c 1428
fe8ab488
A
1429static int
1430ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
0a7de745 1431 struct ifnet *ifp, struct proc *p)
91447636 1432{
fe8ab488 1433#pragma unused(so, ifp, p)
0a7de745 1434 int error = ENOTSUP;
fe8ab488 1435
91447636 1436 switch (cmd) {
0a7de745
A
1437 /* get the number of controllers */
1438 case CTLIOCGCOUNT: {
1439 struct kctl *kctl;
1440 u_int32_t n = 0;
91447636 1441
c3c9b80d 1442 lck_mtx_lock(&ctl_mtx);
0a7de745
A
1443 TAILQ_FOREACH(kctl, &ctl_head, next)
1444 n++;
c3c9b80d 1445 lck_mtx_unlock(&ctl_mtx);
fe8ab488 1446
0a7de745
A
1447 bcopy(&n, data, sizeof(n));
1448 error = 0;
1449 break;
1450 }
1451 case CTLIOCGINFO: {
1452 struct ctl_info ctl_info;
1453 struct kctl *kctl = 0;
1454 size_t name_len;
316670eb 1455
0a7de745
A
1456 bcopy(data, &ctl_info, sizeof(ctl_info));
1457 name_len = strnlen(ctl_info.ctl_name, MAX_KCTL_NAME);
316670eb 1458
0a7de745
A
1459 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
1460 error = EINVAL;
91447636
A
1461 break;
1462 }
c3c9b80d 1463 lck_mtx_lock(&ctl_mtx);
0a7de745 1464 kctl = ctl_find_by_name(ctl_info.ctl_name);
c3c9b80d 1465 lck_mtx_unlock(&ctl_mtx);
0a7de745
A
1466 if (kctl == 0) {
1467 error = ENOENT;
1468 break;
1469 }
1470 ctl_info.ctl_id = kctl->id;
1471 bcopy(&ctl_info, data, sizeof(ctl_info));
1472 error = 0;
1473 break;
1474 }
fe8ab488 1475
91447636 1476 /* add controls to get list of NKEs */
91447636 1477 }
fe8ab488 1478
0a7de745 1479 return error;
91447636 1480}
9bccf70c 1481
3e170ce0 1482static void
c3c9b80d 1483kctl_tbl_grow(void)
3e170ce0
A
1484{
1485 struct kctl **new_table;
1486 uintptr_t new_size;
1487
c3c9b80d 1488 lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
3e170ce0 1489
39037602 1490 if (kctl_tbl_growing) {
3e170ce0 1491 /* Another thread is allocating */
39037602
A
1492 kctl_tbl_growing_waiting++;
1493
1494 do {
c3c9b80d 1495 (void) msleep((caddr_t) &kctl_tbl_growing, &ctl_mtx,
0a7de745 1496 PSOCK | PCATCH, "kctl_tbl_growing", 0);
39037602
A
1497 } while (kctl_tbl_growing);
1498 kctl_tbl_growing_waiting--;
3e170ce0
A
1499 }
1500 /* Another thread grew the table */
0a7de745 1501 if (kctl_table != NULL && kctl_tbl_count < kctl_tbl_size) {
3e170ce0 1502 return;
0a7de745 1503 }
3e170ce0
A
1504
1505 /* Verify we have a sane size */
1506 if (kctl_tbl_size + KCTL_TBL_INC >= UINT16_MAX) {
39037602 1507 kctlstat.kcs_tbl_size_too_big++;
0a7de745 1508 if (ctl_debug) {
39037602
A
1509 printf("%s kctl_tbl_size %lu too big\n",
1510 __func__, kctl_tbl_size);
0a7de745 1511 }
3e170ce0
A
1512 return;
1513 }
1514 kctl_tbl_growing = 1;
1515
1516 new_size = kctl_tbl_size + KCTL_TBL_INC;
1517
c3c9b80d
A
1518 lck_mtx_unlock(&ctl_mtx);
1519 new_table = kheap_alloc(KHEAP_DEFAULT, sizeof(struct kctl *) * new_size,
1520 Z_WAITOK | Z_ZERO);
1521 lck_mtx_lock(&ctl_mtx);
3e170ce0
A
1522
1523 if (new_table != NULL) {
1524 if (kctl_table != NULL) {
1525 bcopy(kctl_table, new_table,
1526 kctl_tbl_size * sizeof(struct kctl *));
1527
c3c9b80d
A
1528 kheap_free(KHEAP_DEFAULT, kctl_table,
1529 sizeof(struct kctl *) * kctl_tbl_size);
3e170ce0
A
1530 }
1531 kctl_table = new_table;
1532 kctl_tbl_size = new_size;
1533 }
1534
1535 kctl_tbl_growing = 0;
39037602
A
1536
1537 if (kctl_tbl_growing_waiting) {
1538 wakeup(&kctl_tbl_growing);
1539 }
3e170ce0
A
1540}
1541
1542#define KCTLREF_INDEX_MASK 0x0000FFFF
1543#define KCTLREF_GENCNT_MASK 0xFFFF0000
1544#define KCTLREF_GENCNT_SHIFT 16
1545
1546static kern_ctl_ref
1547kctl_make_ref(struct kctl *kctl)
1548{
1549 uintptr_t i;
1550
c3c9b80d 1551 lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
3e170ce0 1552
0a7de745 1553 if (kctl_tbl_count >= kctl_tbl_size) {
3e170ce0 1554 kctl_tbl_grow();
0a7de745 1555 }
3e170ce0
A
1556
1557 kctl->kctlref = NULL;
1558 for (i = 0; i < kctl_tbl_size; i++) {
1559 if (kctl_table[i] == NULL) {
1560 uintptr_t ref;
1561
1562 /*
1563 * Reference is index plus one
1564 */
1565 kctl_ref_gencnt += 1;
1566
1567 /*
1568 * Add generation count as salt to reference to prevent
1569 * use after deregister
1570 */
0a7de745 1571 ref = ((kctl_ref_gencnt << KCTLREF_GENCNT_SHIFT) &
3e170ce0
A
1572 KCTLREF_GENCNT_MASK) +
1573 ((i + 1) & KCTLREF_INDEX_MASK);
1574
1575 kctl->kctlref = (void *)(ref);
1576 kctl_table[i] = kctl;
1577 kctl_tbl_count++;
1578 break;
1579 }
1580 }
1581
0a7de745 1582 if (kctl->kctlref == NULL) {
3e170ce0 1583 panic("%s no space in table", __func__);
0a7de745 1584 }
3e170ce0 1585
0a7de745 1586 if (ctl_debug > 0) {
3e170ce0 1587 printf("%s %p for %p\n",
0a7de745
A
1588 __func__, kctl->kctlref, kctl);
1589 }
3e170ce0 1590
0a7de745 1591 return kctl->kctlref;
3e170ce0
A
1592}
1593
1594static void
1595kctl_delete_ref(kern_ctl_ref kctlref)
1596{
1597 /*
1598 * Reference is index plus one
1599 */
1600 uintptr_t i = (((uintptr_t)kctlref) & KCTLREF_INDEX_MASK) - 1;
1601
c3c9b80d 1602 lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
3e170ce0
A
1603
1604 if (i < kctl_tbl_size) {
1605 struct kctl *kctl = kctl_table[i];
1606
1607 if (kctl->kctlref == kctlref) {
1608 kctl_table[i] = NULL;
1609 kctl_tbl_count--;
1610 } else {
1611 kctlstat.kcs_bad_kctlref++;
1612 }
1613 } else {
1614 kctlstat.kcs_bad_kctlref++;
1615 }
1616}
1617
1618static struct kctl *
1619kctl_from_ref(kern_ctl_ref kctlref)
1620{
1621 /*
1622 * Reference is index plus one
1623 */
1624 uintptr_t i = (((uintptr_t)kctlref) & KCTLREF_INDEX_MASK) - 1;
1625 struct kctl *kctl = NULL;
1626
c3c9b80d 1627 lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
3e170ce0
A
1628
1629 if (i >= kctl_tbl_size) {
1630 kctlstat.kcs_bad_kctlref++;
0a7de745 1631 return NULL;
3e170ce0
A
1632 }
1633 kctl = kctl_table[i];
1634 if (kctl->kctlref != kctlref) {
1635 kctlstat.kcs_bad_kctlref++;
0a7de745 1636 return NULL;
3e170ce0 1637 }
0a7de745 1638 return kctl;
3e170ce0
A
1639}
1640
91447636
A
1641/*
1642 * Register/unregister a NKE
1643 */
1644errno_t
1645ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref)
2d21ac55 1646{
0a7de745
A
1647 struct kctl *kctl = NULL;
1648 struct kctl *kctl_next = NULL;
1649 u_int32_t id = 1;
1650 size_t name_len;
1651 int is_extended = 0;
f427ee49 1652 int is_setup = 0;
0a7de745
A
1653
1654 if (userkctl == NULL) { /* sanity check */
1655 return EINVAL;
1656 }
1657 if (userkctl->ctl_connect == NULL) {
1658 return EINVAL;
1659 }
91447636 1660 name_len = strlen(userkctl->ctl_name);
0a7de745
A
1661 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
1662 return EINVAL;
1663 }
fe8ab488 1664
c3c9b80d 1665 kctl = kheap_alloc(KHEAP_DEFAULT, sizeof(struct kctl), Z_WAITOK | Z_ZERO);
0a7de745
A
1666 if (kctl == NULL) {
1667 return ENOMEM;
1668 }
fe8ab488 1669
c3c9b80d 1670 lck_mtx_lock(&ctl_mtx);
fe8ab488 1671
3e170ce0 1672 if (kctl_make_ref(kctl) == NULL) {
c3c9b80d
A
1673 lck_mtx_unlock(&ctl_mtx);
1674 kheap_free(KHEAP_DEFAULT, kctl, sizeof(struct kctl));
0a7de745 1675 return ENOMEM;
3e170ce0
A
1676 }
1677
2d21ac55
A
1678 /*
1679 * Kernel Control IDs
1680 *
1681 * CTL_FLAG_REG_ID_UNIT indicates the control ID and unit number are
1682 * static. If they do not exist, add them to the list in order. If the
1683 * flag is not set, we must find a new unique value. We assume the
1684 * list is in order. We find the last item in the list and add one. If
1685 * this leads to wrapping the id around, we start at the front of the
1686 * list and look for a gap.
1687 */
fe8ab488 1688
2d21ac55
A
1689 if ((userkctl->ctl_flags & CTL_FLAG_REG_ID_UNIT) == 0) {
1690 /* Must dynamically assign an unused ID */
fe8ab488 1691
2d21ac55 1692 /* Verify the same name isn't already registered */
91447636 1693 if (ctl_find_by_name(userkctl->ctl_name) != NULL) {
3e170ce0 1694 kctl_delete_ref(kctl->kctlref);
c3c9b80d
A
1695 lck_mtx_unlock(&ctl_mtx);
1696 kheap_free(KHEAP_DEFAULT, kctl, sizeof(struct kctl));
0a7de745 1697 return EEXIST;
91447636 1698 }
fe8ab488 1699
2d21ac55
A
1700 /* Start with 1 in case the list is empty */
1701 id = 1;
1702 kctl_next = TAILQ_LAST(&ctl_head, kctl_list);
fe8ab488 1703
2d21ac55 1704 if (kctl_next != NULL) {
fe8ab488 1705 /* List was not empty, add one to the last item */
2d21ac55
A
1706 id = kctl_next->id + 1;
1707 kctl_next = NULL;
fe8ab488 1708
2d21ac55 1709 /*
fe8ab488
A
1710 * If this wrapped the id number, start looking at
1711 * the front of the list for an unused id.
2d21ac55 1712 */
91447636 1713 if (id == 0) {
2d21ac55
A
1714 /* Find the next unused ID */
1715 id = 1;
fe8ab488 1716
2d21ac55
A
1717 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
1718 if (kctl_next->id > id) {
1719 /* We found a gap */
1720 break;
1721 }
fe8ab488 1722
2d21ac55
A
1723 id = kctl_next->id + 1;
1724 }
91447636 1725 }
91447636 1726 }
fe8ab488 1727
2d21ac55 1728 userkctl->ctl_id = id;
91447636
A
1729 kctl->id = id;
1730 kctl->reg_unit = -1;
1731 } else {
2d21ac55 1732 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
0a7de745 1733 if (kctl_next->id > userkctl->ctl_id) {
2d21ac55 1734 break;
0a7de745 1735 }
2d21ac55 1736 }
fe8ab488
A
1737
1738 if (ctl_find_by_id_unit(userkctl->ctl_id, userkctl->ctl_unit)) {
3e170ce0 1739 kctl_delete_ref(kctl->kctlref);
c3c9b80d
A
1740 lck_mtx_unlock(&ctl_mtx);
1741 kheap_free(KHEAP_DEFAULT, kctl, sizeof(struct kctl));
0a7de745 1742 return EEXIST;
91447636
A
1743 }
1744 kctl->id = userkctl->ctl_id;
1745 kctl->reg_unit = userkctl->ctl_unit;
1746 }
39236c6e
A
1747
1748 is_extended = (userkctl->ctl_flags & CTL_FLAG_REG_EXTENDED);
f427ee49 1749 is_setup = (userkctl->ctl_flags & CTL_FLAG_REG_SETUP);
39236c6e 1750
2d21ac55 1751 strlcpy(kctl->name, userkctl->ctl_name, MAX_KCTL_NAME);
91447636
A
1752 kctl->flags = userkctl->ctl_flags;
1753
fe8ab488
A
1754 /*
1755 * Let the caller know the default send and receive sizes
fe8ab488 1756 */
04b8595b 1757 if (userkctl->ctl_sendsize == 0) {
fe8ab488 1758 kctl->sendbufsize = CTL_SENDSIZE;
04b8595b
A
1759 userkctl->ctl_sendsize = kctl->sendbufsize;
1760 } else {
1761 kctl->sendbufsize = userkctl->ctl_sendsize;
1762 }
1763 if (userkctl->ctl_recvsize == 0) {
fe8ab488 1764 kctl->recvbufsize = CTL_RECVSIZE;
04b8595b
A
1765 userkctl->ctl_recvsize = kctl->recvbufsize;
1766 } else {
1767 kctl->recvbufsize = userkctl->ctl_recvsize;
1768 }
91447636 1769
f427ee49
A
1770 if (is_setup) {
1771 kctl->setup = userkctl->ctl_setup;
1772 }
5c9f4661 1773 kctl->bind = userkctl->ctl_bind;
91447636
A
1774 kctl->connect = userkctl->ctl_connect;
1775 kctl->disconnect = userkctl->ctl_disconnect;
1776 kctl->send = userkctl->ctl_send;
1777 kctl->setopt = userkctl->ctl_setopt;
1778 kctl->getopt = userkctl->ctl_getopt;
39236c6e
A
1779 if (is_extended) {
1780 kctl->rcvd = userkctl->ctl_rcvd;
fe8ab488 1781 kctl->send_list = userkctl->ctl_send_list;
39236c6e 1782 }
fe8ab488 1783
91447636 1784 TAILQ_INIT(&kctl->kcb_head);
fe8ab488 1785
0a7de745 1786 if (kctl_next) {
2d21ac55 1787 TAILQ_INSERT_BEFORE(kctl_next, kctl, next);
0a7de745 1788 } else {
2d21ac55 1789 TAILQ_INSERT_TAIL(&ctl_head, kctl, next);
0a7de745 1790 }
fe8ab488
A
1791
1792 kctlstat.kcs_reg_count++;
1793 kctlstat.kcs_gencnt++;
1794
c3c9b80d 1795 lck_mtx_unlock(&ctl_mtx);
fe8ab488 1796
3e170ce0 1797 *kctlref = kctl->kctlref;
fe8ab488 1798
91447636 1799 ctl_post_msg(KEV_CTL_REGISTERED, kctl->id);
0a7de745 1800 return 0;
9bccf70c
A
1801}
1802
91447636
A
1803errno_t
1804ctl_deregister(void *kctlref)
fe8ab488 1805{
0a7de745 1806 struct kctl *kctl;
fe8ab488 1807
c3c9b80d 1808 lck_mtx_lock(&ctl_mtx);
3e170ce0
A
1809 if ((kctl = kctl_from_ref(kctlref)) == NULL) {
1810 kctlstat.kcs_bad_kctlref++;
c3c9b80d 1811 lck_mtx_unlock(&ctl_mtx);
0a7de745 1812 if (ctl_debug != 0) {
3e170ce0 1813 printf("%s invalid kctlref %p\n",
0a7de745
A
1814 __func__, kctlref);
1815 }
1816 return EINVAL;
fe8ab488 1817 }
3e170ce0 1818
91447636 1819 if (!TAILQ_EMPTY(&kctl->kcb_head)) {
c3c9b80d 1820 lck_mtx_unlock(&ctl_mtx);
0a7de745 1821 return EBUSY;
91447636
A
1822 }
1823
fe8ab488
A
1824 TAILQ_REMOVE(&ctl_head, kctl, next);
1825
1826 kctlstat.kcs_reg_count--;
1827 kctlstat.kcs_gencnt++;
91447636 1828
3e170ce0 1829 kctl_delete_ref(kctl->kctlref);
c3c9b80d 1830 lck_mtx_unlock(&ctl_mtx);
fe8ab488
A
1831
1832 ctl_post_msg(KEV_CTL_DEREGISTERED, kctl->id);
c3c9b80d 1833 kheap_free(KHEAP_DEFAULT, kctl, sizeof(struct kctl));
0a7de745 1834 return 0;
9bccf70c
A
1835}
1836
91447636
A
1837/*
1838 * Must be called with global ctl_mtx lock taked
1839 */
1840static struct kctl *
1841ctl_find_by_name(const char *name)
fe8ab488 1842{
0a7de745 1843 struct kctl *kctl;
fe8ab488 1844
c3c9b80d 1845 lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
9bccf70c 1846
fe8ab488 1847 TAILQ_FOREACH(kctl, &ctl_head, next)
0a7de745
A
1848 if (strncmp(kctl->name, name, sizeof(kctl->name)) == 0) {
1849 return kctl;
1850 }
9bccf70c 1851
0a7de745 1852 return NULL;
91447636 1853}
9bccf70c 1854
6d2010ae
A
1855u_int32_t
1856ctl_id_by_name(const char *name)
1857{
0a7de745
A
1858 u_int32_t ctl_id = 0;
1859 struct kctl *kctl;
fe8ab488 1860
c3c9b80d 1861 lck_mtx_lock(&ctl_mtx);
fe8ab488 1862 kctl = ctl_find_by_name(name);
0a7de745 1863 if (kctl) {
fe8ab488 1864 ctl_id = kctl->id;
0a7de745 1865 }
c3c9b80d 1866 lck_mtx_unlock(&ctl_mtx);
fe8ab488 1867
0a7de745 1868 return ctl_id;
6d2010ae
A
1869}
1870
1871errno_t
fe8ab488 1872ctl_name_by_id(u_int32_t id, char *out_name, size_t maxsize)
6d2010ae 1873{
0a7de745 1874 int found = 0;
6d2010ae 1875 struct kctl *kctl;
fe8ab488 1876
c3c9b80d 1877 lck_mtx_lock(&ctl_mtx);
fe8ab488 1878 TAILQ_FOREACH(kctl, &ctl_head, next) {
0a7de745 1879 if (kctl->id == id) {
fe8ab488 1880 break;
0a7de745 1881 }
fe8ab488
A
1882 }
1883
3e170ce0 1884 if (kctl) {
0a7de745 1885 if (maxsize > MAX_KCTL_NAME) {
fe8ab488 1886 maxsize = MAX_KCTL_NAME;
0a7de745 1887 }
fe8ab488
A
1888 strlcpy(out_name, kctl->name, maxsize);
1889 found = 1;
1890 }
c3c9b80d 1891 lck_mtx_unlock(&ctl_mtx);
fe8ab488 1892
0a7de745 1893 return found ? 0 : ENOENT;
6d2010ae
A
1894}
1895
91447636
A
1896/*
1897 * Must be called with global ctl_mtx lock taked
1898 *
1899 */
1900static struct kctl *
1901ctl_find_by_id_unit(u_int32_t id, u_int32_t unit)
fe8ab488 1902{
0a7de745 1903 struct kctl *kctl;
fe8ab488 1904
c3c9b80d 1905 lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
fe8ab488
A
1906
1907 TAILQ_FOREACH(kctl, &ctl_head, next) {
0a7de745
A
1908 if (kctl->id == id && (kctl->flags & CTL_FLAG_REG_ID_UNIT) == 0) {
1909 return kctl;
1910 } else if (kctl->id == id && kctl->reg_unit == unit) {
1911 return kctl;
1912 }
fe8ab488 1913 }
0a7de745 1914 return NULL;
9bccf70c
A
1915}
1916
1917/*
91447636 1918 * Must be called with kernel controller lock taken
9bccf70c 1919 */
91447636
A
1920static struct ctl_cb *
1921kcb_find(struct kctl *kctl, u_int32_t unit)
fe8ab488 1922{
0a7de745 1923 struct ctl_cb *kcb;
9bccf70c 1924
c3c9b80d 1925 lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
9bccf70c 1926
fe8ab488 1927 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
0a7de745
A
1928 if (kcb->sac.sc_unit == unit) {
1929 return kcb;
1930 }
fe8ab488 1931
0a7de745 1932 return NULL;
9bccf70c
A
1933}
1934
6d2010ae 1935static struct socket *
3e170ce0 1936kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *kctlflags)
6d2010ae
A
1937{
1938 struct socket *so = NULL;
0a7de745 1939 struct ctl_cb *kcb;
fe8ab488 1940 void *lr_saved;
3e170ce0
A
1941 struct kctl *kctl;
1942 int i;
fe8ab488
A
1943
1944 lr_saved = __builtin_return_address(0);
1945
c3c9b80d 1946 lck_mtx_lock(&ctl_mtx);
3e170ce0
A
1947 /*
1948 * First validate the kctlref
1949 */
1950 if ((kctl = kctl_from_ref(kctlref)) == NULL) {
1951 kctlstat.kcs_bad_kctlref++;
c3c9b80d 1952 lck_mtx_unlock(&ctl_mtx);
0a7de745 1953 if (ctl_debug != 0) {
3e170ce0 1954 printf("%s invalid kctlref %p\n",
0a7de745
A
1955 __func__, kctlref);
1956 }
1957 return NULL;
6d2010ae 1958 }
fe8ab488 1959
3e170ce0
A
1960 kcb = kcb_find(kctl, unit);
1961 if (kcb == NULL || kcb->kctl != kctl || (so = kcb->so) == NULL) {
c3c9b80d 1962 lck_mtx_unlock(&ctl_mtx);
0a7de745 1963 return NULL;
6d2010ae 1964 }
3e170ce0
A
1965 /*
1966 * This prevents the socket from being closed
1967 */
1968 kcb->usecount++;
1969 /*
1970 * Respect lock ordering: socket before ctl_mtx
1971 */
c3c9b80d 1972 lck_mtx_unlock(&ctl_mtx);
fe8ab488 1973
6d2010ae 1974 socket_lock(so, 1);
3e170ce0
A
1975 /*
1976 * The socket lock history is more useful if we store
1977 * the address of the caller.
1978 */
1979 i = (so->next_lock_lr + SO_LCKDBG_MAX - 1) % SO_LCKDBG_MAX;
1980 so->lock_lr[i] = lr_saved;
fe8ab488 1981
c3c9b80d 1982 lck_mtx_lock(&ctl_mtx);
3e170ce0
A
1983
1984 if ((kctl = kctl_from_ref(kctlref)) == NULL || kcb->kctl == NULL) {
c3c9b80d 1985 lck_mtx_unlock(&ctl_mtx);
6d2010ae
A
1986 socket_unlock(so, 1);
1987 so = NULL;
c3c9b80d 1988 lck_mtx_lock(&ctl_mtx);
3e170ce0
A
1989 } else if (kctlflags != NULL) {
1990 *kctlflags = kctl->flags;
6d2010ae 1991 }
3e170ce0 1992
6d2010ae 1993 kcb->usecount--;
0a7de745 1994 if (kcb->usecount == 0) {
6d2010ae 1995 wakeup((event_t)&kcb->usecount);
0a7de745 1996 }
3e170ce0 1997
c3c9b80d 1998 lck_mtx_unlock(&ctl_mtx);
fe8ab488 1999
0a7de745 2000 return so;
6d2010ae
A
2001}
2002
fe8ab488
A
2003static void
2004ctl_post_msg(u_int32_t event_code, u_int32_t id)
9bccf70c 2005{
0a7de745
A
2006 struct ctl_event_data ctl_ev_data;
2007 struct kev_msg ev_msg;
fe8ab488 2008
c3c9b80d 2009 lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_NOTOWNED);
fe8ab488
A
2010
2011 bzero(&ev_msg, sizeof(struct kev_msg));
2012 ev_msg.vendor_code = KEV_VENDOR_APPLE;
2013
2014 ev_msg.kev_class = KEV_SYSTEM_CLASS;
2015 ev_msg.kev_subclass = KEV_CTL_SUBCLASS;
2016 ev_msg.event_code = event_code;
2017
2018 /* common nke subclass data */
2019 bzero(&ctl_ev_data, sizeof(ctl_ev_data));
2020 ctl_ev_data.ctl_id = id;
2021 ev_msg.dv[0].data_ptr = &ctl_ev_data;
2022 ev_msg.dv[0].data_length = sizeof(ctl_ev_data);
2023
2024 ev_msg.dv[1].data_length = 0;
2025
2026 kev_post_msg(&ev_msg);
9bccf70c
A
2027}
2028
91447636 2029static int
b0d623f7
A
2030ctl_lock(struct socket *so, int refcount, void *lr)
2031{
2032 void *lr_saved;
2033
0a7de745 2034 if (lr == NULL) {
b0d623f7 2035 lr_saved = __builtin_return_address(0);
0a7de745 2036 } else {
b0d623f7 2037 lr_saved = lr;
0a7de745 2038 }
b0d623f7
A
2039
2040 if (so->so_pcb != NULL) {
c3c9b80d 2041 lck_mtx_lock(&((struct ctl_cb *)so->so_pcb)->mtx);
0a7de745 2042 } else {
fe8ab488 2043 panic("ctl_lock: so=%p NO PCB! lr=%p lrh= %s\n",
b0d623f7
A
2044 so, lr_saved, solockhistory_nr(so));
2045 /* NOTREACHED */
91447636 2046 }
b0d623f7
A
2047
2048 if (so->so_usecount < 0) {
2049 panic("ctl_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s\n",
0a7de745
A
2050 so, so->so_pcb, lr_saved, so->so_usecount,
2051 solockhistory_nr(so));
b0d623f7
A
2052 /* NOTREACHED */
2053 }
2054
0a7de745 2055 if (refcount) {
91447636 2056 so->so_usecount++;
0a7de745 2057 }
0c530ab8 2058
2d21ac55 2059 so->lock_lr[so->next_lock_lr] = lr_saved;
0a7de745
A
2060 so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX;
2061 return 0;
91447636
A
2062}
2063
2064static int
b0d623f7 2065ctl_unlock(struct socket *so, int refcount, void *lr)
91447636 2066{
b0d623f7
A
2067 void *lr_saved;
2068 lck_mtx_t *mutex_held;
2069
0a7de745 2070 if (lr == NULL) {
b0d623f7 2071 lr_saved = __builtin_return_address(0);
0a7de745 2072 } else {
b0d623f7 2073 lr_saved = lr;
0a7de745 2074 }
b0d623f7 2075
39037602 2076#if (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG))
fe8ab488
A
2077 printf("ctl_unlock: so=%llx sopcb=%x lock=%llx ref=%u lr=%llx\n",
2078 (uint64_t)VM_KERNEL_ADDRPERM(so),
2079 (uint64_t)VM_KERNEL_ADDRPERM(so->so_pcb,
c3c9b80d 2080 (uint64_t)VM_KERNEL_ADDRPERM(&((struct ctl_cb *)so->so_pcb)->mtx),
fe8ab488 2081 so->so_usecount, (uint64_t)VM_KERNEL_ADDRPERM(lr_saved));
39037602 2082#endif /* (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG)) */
0a7de745 2083 if (refcount) {
91447636 2084 so->so_usecount--;
0a7de745 2085 }
b0d623f7
A
2086
2087 if (so->so_usecount < 0) {
fe8ab488 2088 panic("ctl_unlock: so=%p usecount=%x lrh= %s\n",
b0d623f7
A
2089 so, so->so_usecount, solockhistory_nr(so));
2090 /* NOTREACHED */
2091 }
91447636 2092 if (so->so_pcb == NULL) {
fe8ab488 2093 panic("ctl_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s\n",
0a7de745
A
2094 so, so->so_usecount, (void *)lr_saved,
2095 solockhistory_nr(so));
b0d623f7 2096 /* NOTREACHED */
91447636 2097 }
c3c9b80d 2098 mutex_held = &((struct ctl_cb *)so->so_pcb)->mtx;
b0d623f7 2099
0a7de745
A
2100 lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
2101 so->unlock_lr[so->next_unlock_lr] = lr_saved;
2102 so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
2103 lck_mtx_unlock(mutex_held);
b0d623f7 2104
0a7de745 2105 if (so->so_usecount == 0) {
91447636 2106 ctl_sofreelastref(so);
0a7de745 2107 }
b0d623f7 2108
0a7de745 2109 return 0;
91447636
A
2110}
2111
2112static lck_mtx_t *
5ba3f43e 2113ctl_getlock(struct socket *so, int flags)
91447636 2114{
5ba3f43e 2115#pragma unused(flags)
0a7de745 2116 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
fe8ab488 2117
0a7de745
A
2118 if (so->so_pcb) {
2119 if (so->so_usecount < 0) {
2120 panic("ctl_getlock: so=%p usecount=%x lrh= %s\n",
2121 so, so->so_usecount, solockhistory_nr(so));
2122 }
c3c9b80d 2123 return &kcb->mtx;
91447636 2124 } else {
0a7de745
A
2125 panic("ctl_getlock: so=%p NULL NO so_pcb %s\n",
2126 so, solockhistory_nr(so));
2127 return so->so_proto->pr_domain->dom_mtx;
91447636
A
2128 }
2129}
fe8ab488
A
2130
2131__private_extern__ int
2132kctl_reg_list SYSCTL_HANDLER_ARGS
2133{
2134#pragma unused(oidp, arg1, arg2)
0a7de745 2135 int error = 0;
f427ee49 2136 u_int64_t i, n;
0a7de745
A
2137 struct xsystmgen xsg;
2138 void *buf = NULL;
2139 struct kctl *kctl;
2140 size_t item_size = ROUNDUP64(sizeof(struct xkctl_reg));
2141
c3c9b80d 2142 buf = kheap_alloc(KHEAP_TEMP, item_size, Z_WAITOK | Z_ZERO);
0a7de745
A
2143 if (buf == NULL) {
2144 return ENOMEM;
2145 }
2146
c3c9b80d 2147 lck_mtx_lock(&ctl_mtx);
0a7de745
A
2148
2149 n = kctlstat.kcs_reg_count;
2150
2151 if (req->oldptr == USER_ADDR_NULL) {
f427ee49 2152 req->oldidx = (size_t)(n + n / 8) * sizeof(struct xkctl_reg);
0a7de745
A
2153 goto done;
2154 }
2155 if (req->newptr != USER_ADDR_NULL) {
2156 error = EPERM;
2157 goto done;
2158 }
2159 bzero(&xsg, sizeof(xsg));
2160 xsg.xg_len = sizeof(xsg);
2161 xsg.xg_count = n;
2162 xsg.xg_gen = kctlstat.kcs_gencnt;
2163 xsg.xg_sogen = so_gencnt;
2164 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2165 if (error) {
2166 goto done;
2167 }
2168 /*
2169 * We are done if there is no pcb
2170 */
2171 if (n == 0) {
2172 goto done;
2173 }
2174
0a7de745
A
2175 for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
2176 i < n && kctl != NULL;
2177 i++, kctl = TAILQ_NEXT(kctl, next)) {
2178 struct xkctl_reg *xkr = (struct xkctl_reg *)buf;
2179 struct ctl_cb *kcb;
2180 u_int32_t pcbcount = 0;
2181
2182 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
2183 pcbcount++;
2184
2185 bzero(buf, item_size);
2186
2187 xkr->xkr_len = sizeof(struct xkctl_reg);
2188 xkr->xkr_kind = XSO_KCREG;
2189 xkr->xkr_id = kctl->id;
2190 xkr->xkr_reg_unit = kctl->reg_unit;
2191 xkr->xkr_flags = kctl->flags;
2192 xkr->xkr_kctlref = (uint64_t)(kctl->kctlref);
2193 xkr->xkr_recvbufsize = kctl->recvbufsize;
2194 xkr->xkr_sendbufsize = kctl->sendbufsize;
2195 xkr->xkr_lastunit = kctl->lastunit;
2196 xkr->xkr_pcbcount = pcbcount;
2197 xkr->xkr_connect = (uint64_t)VM_KERNEL_UNSLIDE(kctl->connect);
2198 xkr->xkr_disconnect =
2199 (uint64_t)VM_KERNEL_UNSLIDE(kctl->disconnect);
2200 xkr->xkr_send = (uint64_t)VM_KERNEL_UNSLIDE(kctl->send);
2201 xkr->xkr_send_list =
2202 (uint64_t)VM_KERNEL_UNSLIDE(kctl->send_list);
2203 xkr->xkr_setopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->setopt);
2204 xkr->xkr_getopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->getopt);
2205 xkr->xkr_rcvd = (uint64_t)VM_KERNEL_UNSLIDE(kctl->rcvd);
2206 strlcpy(xkr->xkr_name, kctl->name, sizeof(xkr->xkr_name));
2207
2208 error = SYSCTL_OUT(req, buf, item_size);
2209 }
2210
2211 if (error == 0) {
2212 /*
2213 * Give the user an updated idea of our state.
2214 * If the generation differs from what we told
2215 * her before, she knows that something happened
2216 * while we were processing this request, and it
2217 * might be necessary to retry.
2218 */
2219 bzero(&xsg, sizeof(xsg));
2220 xsg.xg_len = sizeof(xsg);
2221 xsg.xg_count = n;
2222 xsg.xg_gen = kctlstat.kcs_gencnt;
2223 xsg.xg_sogen = so_gencnt;
2224 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2225 if (error) {
2226 goto done;
fe8ab488
A
2227 }
2228 }
2229
2230done:
c3c9b80d 2231 lck_mtx_unlock(&ctl_mtx);
fe8ab488 2232
c3c9b80d 2233 kheap_free(KHEAP_TEMP, buf, item_size);
fe8ab488 2234
0a7de745 2235 return error;
fe8ab488
A
2236}
2237
2238__private_extern__ int
2239kctl_pcblist SYSCTL_HANDLER_ARGS
2240{
2241#pragma unused(oidp, arg1, arg2)
0a7de745 2242 int error = 0;
f427ee49 2243 u_int64_t n, i;
0a7de745
A
2244 struct xsystmgen xsg;
2245 void *buf = NULL;
2246 struct kctl *kctl;
2247 size_t item_size = ROUNDUP64(sizeof(struct xkctlpcb)) +
2248 ROUNDUP64(sizeof(struct xsocket_n)) +
2249 2 * ROUNDUP64(sizeof(struct xsockbuf_n)) +
2250 ROUNDUP64(sizeof(struct xsockstat_n));
2251
c3c9b80d 2252 buf = kheap_alloc(KHEAP_TEMP, item_size, Z_WAITOK | Z_ZERO);
0a7de745
A
2253 if (buf == NULL) {
2254 return ENOMEM;
2255 }
2256
c3c9b80d 2257 lck_mtx_lock(&ctl_mtx);
0a7de745
A
2258
2259 n = kctlstat.kcs_pcbcount;
2260
2261 if (req->oldptr == USER_ADDR_NULL) {
f427ee49 2262 req->oldidx = (size_t)(n + n / 8) * item_size;
0a7de745
A
2263 goto done;
2264 }
2265 if (req->newptr != USER_ADDR_NULL) {
2266 error = EPERM;
2267 goto done;
2268 }
2269 bzero(&xsg, sizeof(xsg));
2270 xsg.xg_len = sizeof(xsg);
2271 xsg.xg_count = n;
2272 xsg.xg_gen = kctlstat.kcs_gencnt;
2273 xsg.xg_sogen = so_gencnt;
2274 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2275 if (error) {
2276 goto done;
2277 }
2278 /*
2279 * We are done if there is no pcb
2280 */
2281 if (n == 0) {
2282 goto done;
2283 }
2284
0a7de745
A
2285 for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
2286 i < n && kctl != NULL;
2287 kctl = TAILQ_NEXT(kctl, next)) {
2288 struct ctl_cb *kcb;
2289
2290 for (kcb = TAILQ_FIRST(&kctl->kcb_head);
2291 i < n && kcb != NULL;
2292 i++, kcb = TAILQ_NEXT(kcb, next)) {
2293 struct xkctlpcb *xk = (struct xkctlpcb *)buf;
2294 struct xsocket_n *xso = (struct xsocket_n *)
2295 ADVANCE64(xk, sizeof(*xk));
2296 struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *)
2297 ADVANCE64(xso, sizeof(*xso));
2298 struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *)
2299 ADVANCE64(xsbrcv, sizeof(*xsbrcv));
2300 struct xsockstat_n *xsostats = (struct xsockstat_n *)
2301 ADVANCE64(xsbsnd, sizeof(*xsbsnd));
2302
2303 bzero(buf, item_size);
2304
2305 xk->xkp_len = sizeof(struct xkctlpcb);
2306 xk->xkp_kind = XSO_KCB;
2307 xk->xkp_unit = kcb->sac.sc_unit;
2308 xk->xkp_kctpcb = (uint64_t)VM_KERNEL_ADDRPERM(kcb);
2309 xk->xkp_kctlref = (uint64_t)VM_KERNEL_ADDRPERM(kctl);
2310 xk->xkp_kctlid = kctl->id;
2311 strlcpy(xk->xkp_kctlname, kctl->name,
2312 sizeof(xk->xkp_kctlname));
2313
2314 sotoxsocket_n(kcb->so, xso);
2315 sbtoxsockbuf_n(kcb->so ?
2316 &kcb->so->so_rcv : NULL, xsbrcv);
2317 sbtoxsockbuf_n(kcb->so ?
2318 &kcb->so->so_snd : NULL, xsbsnd);
2319 sbtoxsockstat_n(kcb->so, xsostats);
2320
2321 error = SYSCTL_OUT(req, buf, item_size);
fe8ab488
A
2322 }
2323 }
2324
0a7de745
A
2325 if (error == 0) {
2326 /*
2327 * Give the user an updated idea of our state.
2328 * If the generation differs from what we told
2329 * her before, she knows that something happened
2330 * while we were processing this request, and it
2331 * might be necessary to retry.
2332 */
2333 bzero(&xsg, sizeof(xsg));
2334 xsg.xg_len = sizeof(xsg);
2335 xsg.xg_count = n;
2336 xsg.xg_gen = kctlstat.kcs_gencnt;
2337 xsg.xg_sogen = so_gencnt;
2338 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2339 if (error) {
2340 goto done;
fe8ab488
A
2341 }
2342 }
2343
2344done:
c3c9b80d 2345 lck_mtx_unlock(&ctl_mtx);
fe8ab488 2346
c3c9b80d 2347 kheap_free(KHEAP_TEMP, buf, item_size);
0a7de745 2348 return error;
fe8ab488
A
2349}
2350
2351int
2352kctl_getstat SYSCTL_HANDLER_ARGS
2353{
2354#pragma unused(oidp, arg1, arg2)
0a7de745 2355 int error = 0;
fe8ab488 2356
c3c9b80d 2357 lck_mtx_lock(&ctl_mtx);
fe8ab488 2358
0a7de745
A
2359 if (req->newptr != USER_ADDR_NULL) {
2360 error = EPERM;
2361 goto done;
fe8ab488 2362 }
0a7de745
A
2363 if (req->oldptr == USER_ADDR_NULL) {
2364 req->oldidx = sizeof(struct kctlstat);
2365 goto done;
fe8ab488
A
2366 }
2367
0a7de745
A
2368 error = SYSCTL_OUT(req, &kctlstat,
2369 MIN(sizeof(struct kctlstat), req->oldlen));
fe8ab488 2370done:
c3c9b80d 2371 lck_mtx_unlock(&ctl_mtx);
0a7de745 2372 return error;
fe8ab488 2373}
3e170ce0
A
2374
2375void
2376kctl_fill_socketinfo(struct socket *so, struct socket_info *si)
2377{
0a7de745
A
2378 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
2379 struct kern_ctl_info *kcsi =
2380 &si->soi_proto.pri_kern_ctl;
2381 struct kctl *kctl = kcb->kctl;
3e170ce0 2382
0a7de745 2383 si->soi_kind = SOCKINFO_KERN_CTL;
3e170ce0 2384
0a7de745
A
2385 if (kctl == 0) {
2386 return;
2387 }
3e170ce0 2388
0a7de745
A
2389 kcsi->kcsi_id = kctl->id;
2390 kcsi->kcsi_reg_unit = kctl->reg_unit;
2391 kcsi->kcsi_flags = kctl->flags;
2392 kcsi->kcsi_recvbufsize = kctl->recvbufsize;
2393 kcsi->kcsi_sendbufsize = kctl->sendbufsize;
2394 kcsi->kcsi_unit = kcb->sac.sc_unit;
2395 strlcpy(kcsi->kcsi_name, kctl->name, MAX_KCTL_NAME);
3e170ce0 2396}