]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_control.c
e41d1f103d8528c6e6fe6a870a0bb5bde632ca56
[apple/xnu.git] / bsd / kern / kern_control.c
1 /*
2 * Copyright (c) 1999-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Kernel Control domain - allows control connections to
31 * and to read/write data.
32 *
33 * Vincent Lubet, 040506
34 * Christophe Allie, 010928
35 * Justin C. Walker, 990319
36 */
37
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/syslog.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/protosw.h>
45 #include <sys/domain.h>
46 #include <sys/malloc.h>
47 #include <sys/mbuf.h>
48 #include <sys/sys_domain.h>
49 #include <sys/kern_event.h>
50 #include <sys/kern_control.h>
51 #include <sys/kauth.h>
52 #include <sys/sysctl.h>
53 #include <sys/proc_info.h>
54 #include <net/if_var.h>
55
56 #include <mach/vm_types.h>
57
58 #include <kern/thread.h>
59
60 struct kctl {
61 TAILQ_ENTRY(kctl) next; /* controller chain */
62 kern_ctl_ref kctlref;
63
64 /* controller information provided when registering */
65 char name[MAX_KCTL_NAME]; /* unique identifier */
66 u_int32_t id;
67 u_int32_t reg_unit;
68
69 /* misc communication information */
70 u_int32_t flags; /* support flags */
71 u_int32_t recvbufsize; /* request more than the default buffer size */
72 u_int32_t sendbufsize; /* request more than the default buffer size */
73
74 /* Dispatch functions */
75 ctl_bind_func bind; /* Prepare contact */
76 ctl_connect_func connect; /* Make contact */
77 ctl_disconnect_func disconnect; /* Break contact */
78 ctl_send_func send; /* Send data to nke */
79 ctl_send_list_func send_list; /* Send list of packets */
80 ctl_setopt_func setopt; /* set kctl configuration */
81 ctl_getopt_func getopt; /* get kctl configuration */
82 ctl_rcvd_func rcvd; /* Notify nke when client reads data */
83
84 TAILQ_HEAD(, ctl_cb) kcb_head;
85 u_int32_t lastunit;
86 };
87
88 struct ctl_cb {
89 TAILQ_ENTRY(ctl_cb) next; /* controller chain */
90 lck_mtx_t *mtx;
91 struct socket *so; /* controlling socket */
92 struct kctl *kctl; /* back pointer to controller */
93 void *userdata;
94 struct sockaddr_ctl sac;
95 u_int32_t usecount;
96 u_int32_t kcb_usecount;
97 };
98
99 #ifndef ROUNDUP64
100 #define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
101 #endif
102
103 #ifndef ADVANCE64
104 #define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
105 #endif
106
107 /*
108 * Definitions and vars for we support
109 */
110
111 #define CTL_SENDSIZE (2 * 1024) /* default buffer size */
112 #define CTL_RECVSIZE (8 * 1024) /* default buffer size */
113
114 /*
115 * Definitions and vars for we support
116 */
117
118 static u_int32_t ctl_maxunit = 65536;
119 static lck_grp_attr_t *ctl_lck_grp_attr = 0;
120 static lck_attr_t *ctl_lck_attr = 0;
121 static lck_grp_t *ctl_lck_grp = 0;
122 static lck_mtx_t *ctl_mtx;
123
124 /* all the controllers are chained */
125 TAILQ_HEAD(kctl_list, kctl) ctl_head;
126
127 static int ctl_attach(struct socket *, int, struct proc *);
128 static int ctl_detach(struct socket *);
129 static int ctl_sofreelastref(struct socket *so);
130 static int ctl_bind(struct socket *, struct sockaddr *, struct proc *);
131 static int ctl_connect(struct socket *, struct sockaddr *, struct proc *);
132 static int ctl_disconnect(struct socket *);
133 static int ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
134 struct ifnet *ifp, struct proc *p);
135 static int ctl_send(struct socket *, int, struct mbuf *,
136 struct sockaddr *, struct mbuf *, struct proc *);
137 static int ctl_send_list(struct socket *, int, struct mbuf *,
138 struct sockaddr *, struct mbuf *, struct proc *);
139 static int ctl_ctloutput(struct socket *, struct sockopt *);
140 static int ctl_peeraddr(struct socket *so, struct sockaddr **nam);
141 static int ctl_usr_rcvd(struct socket *so, int flags);
142
143 static struct kctl *ctl_find_by_name(const char *);
144 static struct kctl *ctl_find_by_id_unit(u_int32_t id, u_int32_t unit);
145
146 static struct socket *kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit,
147 u_int32_t *);
148 static struct ctl_cb *kcb_find(struct kctl *, u_int32_t unit);
149 static void ctl_post_msg(u_int32_t event_code, u_int32_t id);
150
151 static int ctl_lock(struct socket *, int, void *);
152 static int ctl_unlock(struct socket *, int, void *);
153 static lck_mtx_t * ctl_getlock(struct socket *, int);
154
155 static struct pr_usrreqs ctl_usrreqs = {
156 .pru_attach = ctl_attach,
157 .pru_bind = ctl_bind,
158 .pru_connect = ctl_connect,
159 .pru_control = ctl_ioctl,
160 .pru_detach = ctl_detach,
161 .pru_disconnect = ctl_disconnect,
162 .pru_peeraddr = ctl_peeraddr,
163 .pru_rcvd = ctl_usr_rcvd,
164 .pru_send = ctl_send,
165 .pru_send_list = ctl_send_list,
166 .pru_sosend = sosend,
167 .pru_sosend_list = sosend_list,
168 .pru_soreceive = soreceive,
169 .pru_soreceive_list = soreceive_list,
170 };
171
172 static struct protosw kctlsw[] = {
173 {
174 .pr_type = SOCK_DGRAM,
175 .pr_protocol = SYSPROTO_CONTROL,
176 .pr_flags = PR_ATOMIC | PR_CONNREQUIRED | PR_PCBLOCK | PR_WANTRCVD,
177 .pr_ctloutput = ctl_ctloutput,
178 .pr_usrreqs = &ctl_usrreqs,
179 .pr_lock = ctl_lock,
180 .pr_unlock = ctl_unlock,
181 .pr_getlock = ctl_getlock,
182 },
183 {
184 .pr_type = SOCK_STREAM,
185 .pr_protocol = SYSPROTO_CONTROL,
186 .pr_flags = PR_CONNREQUIRED | PR_PCBLOCK | PR_WANTRCVD,
187 .pr_ctloutput = ctl_ctloutput,
188 .pr_usrreqs = &ctl_usrreqs,
189 .pr_lock = ctl_lock,
190 .pr_unlock = ctl_unlock,
191 .pr_getlock = ctl_getlock,
192 }
193 };
194
195 __private_extern__ int kctl_reg_list SYSCTL_HANDLER_ARGS;
196 __private_extern__ int kctl_pcblist SYSCTL_HANDLER_ARGS;
197 __private_extern__ int kctl_getstat SYSCTL_HANDLER_ARGS;
198
199
200 SYSCTL_NODE(_net_systm, OID_AUTO, kctl,
201 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Kernel control family");
202
203 struct kctlstat kctlstat;
204 SYSCTL_PROC(_net_systm_kctl, OID_AUTO, stats,
205 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
206 kctl_getstat, "S,kctlstat", "");
207
208 SYSCTL_PROC(_net_systm_kctl, OID_AUTO, reg_list,
209 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
210 kctl_reg_list, "S,xkctl_reg", "");
211
212 SYSCTL_PROC(_net_systm_kctl, OID_AUTO, pcblist,
213 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
214 kctl_pcblist, "S,xkctlpcb", "");
215
216 u_int32_t ctl_autorcvbuf_max = 256 * 1024;
217 SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufmax,
218 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_autorcvbuf_max, 0, "");
219
220 u_int32_t ctl_autorcvbuf_high = 0;
221 SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufhigh,
222 CTLFLAG_RD | CTLFLAG_LOCKED, &ctl_autorcvbuf_high, 0, "");
223
224 u_int32_t ctl_debug = 0;
225 SYSCTL_INT(_net_systm_kctl, OID_AUTO, debug,
226 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_debug, 0, "");
227
228 #define KCTL_TBL_INC 16
229
230 static uintptr_t kctl_tbl_size = 0;
231 static u_int32_t kctl_tbl_growing = 0;
232 static u_int32_t kctl_tbl_growing_waiting = 0;
233 static uintptr_t kctl_tbl_count = 0;
234 static struct kctl **kctl_table = NULL;
235 static uintptr_t kctl_ref_gencnt = 0;
236
237 static void kctl_tbl_grow(void);
238 static kern_ctl_ref kctl_make_ref(struct kctl *kctl);
239 static void kctl_delete_ref(kern_ctl_ref);
240 static struct kctl *kctl_from_ref(kern_ctl_ref);
241
242 /*
243 * Install the protosw's for the Kernel Control manager.
244 */
245 __private_extern__ void
246 kern_control_init(struct domain *dp)
247 {
248 struct protosw *pr;
249 int i;
250 int kctl_proto_count = (sizeof(kctlsw) / sizeof(struct protosw));
251
252 VERIFY(!(dp->dom_flags & DOM_INITIALIZED));
253 VERIFY(dp == systemdomain);
254
255 ctl_lck_grp_attr = lck_grp_attr_alloc_init();
256 if (ctl_lck_grp_attr == NULL) {
257 panic("%s: lck_grp_attr_alloc_init failed\n", __func__);
258 /* NOTREACHED */
259 }
260
261 ctl_lck_grp = lck_grp_alloc_init("Kernel Control Protocol",
262 ctl_lck_grp_attr);
263 if (ctl_lck_grp == NULL) {
264 panic("%s: lck_grp_alloc_init failed\n", __func__);
265 /* NOTREACHED */
266 }
267
268 ctl_lck_attr = lck_attr_alloc_init();
269 if (ctl_lck_attr == NULL) {
270 panic("%s: lck_attr_alloc_init failed\n", __func__);
271 /* NOTREACHED */
272 }
273
274 ctl_mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
275 if (ctl_mtx == NULL) {
276 panic("%s: lck_mtx_alloc_init failed\n", __func__);
277 /* NOTREACHED */
278 }
279 TAILQ_INIT(&ctl_head);
280
281 for (i = 0, pr = &kctlsw[0]; i < kctl_proto_count; i++, pr++) {
282 net_add_proto(pr, dp, 1);
283 }
284 }
285
286 static void
287 kcb_delete(struct ctl_cb *kcb)
288 {
289 if (kcb != 0) {
290 if (kcb->mtx != 0) {
291 lck_mtx_free(kcb->mtx, ctl_lck_grp);
292 }
293 FREE(kcb, M_TEMP);
294 }
295 }
296
297 /*
298 * Kernel Controller user-request functions
299 * attach function must exist and succeed
300 * detach not necessary
301 * we need a pcb for the per socket mutex
302 */
303 static int
304 ctl_attach(struct socket *so, int proto, struct proc *p)
305 {
306 #pragma unused(proto, p)
307 int error = 0;
308 struct ctl_cb *kcb = 0;
309
310 MALLOC(kcb, struct ctl_cb *, sizeof(struct ctl_cb), M_TEMP, M_WAITOK);
311 if (kcb == NULL) {
312 error = ENOMEM;
313 goto quit;
314 }
315 bzero(kcb, sizeof(struct ctl_cb));
316
317 kcb->mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
318 if (kcb->mtx == NULL) {
319 error = ENOMEM;
320 goto quit;
321 }
322 kcb->so = so;
323 so->so_pcb = (caddr_t)kcb;
324
325 quit:
326 if (error != 0) {
327 kcb_delete(kcb);
328 kcb = 0;
329 }
330 return error;
331 }
332
333 static int
334 ctl_sofreelastref(struct socket *so)
335 {
336 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
337
338 so->so_pcb = 0;
339
340 if (kcb != 0) {
341 struct kctl *kctl;
342 if ((kctl = kcb->kctl) != 0) {
343 lck_mtx_lock(ctl_mtx);
344 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
345 kctlstat.kcs_pcbcount--;
346 kctlstat.kcs_gencnt++;
347 lck_mtx_unlock(ctl_mtx);
348 }
349 kcb_delete(kcb);
350 }
351 sofreelastref(so, 1);
352 return 0;
353 }
354
355 /*
356 * Use this function to serialize calls into the kctl subsystem
357 */
358 static void
359 ctl_kcb_increment_use_count(struct ctl_cb *kcb, lck_mtx_t *mutex_held)
360 {
361 LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
362 while (kcb->kcb_usecount > 0) {
363 msleep(&kcb->kcb_usecount, mutex_held, PSOCK | PCATCH, "kcb_usecount", NULL);
364 }
365 kcb->kcb_usecount++;
366 }
367
368 static void
369 clt_kcb_decrement_use_count(struct ctl_cb *kcb)
370 {
371 assert(kcb->kcb_usecount != 0);
372 kcb->kcb_usecount--;
373 wakeup_one((caddr_t)&kcb->kcb_usecount);
374 }
375
376 static int
377 ctl_detach(struct socket *so)
378 {
379 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
380
381 if (kcb == 0) {
382 return 0;
383 }
384
385 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
386 ctl_kcb_increment_use_count(kcb, mtx_held);
387
388 if (kcb->kctl != NULL && kcb->kctl->bind != NULL &&
389 kcb->userdata != NULL && !(so->so_state & SS_ISCONNECTED)) {
390 // The unit was bound, but not connected
391 // Invoke the disconnected call to cleanup
392 if (kcb->kctl->disconnect != NULL) {
393 socket_unlock(so, 0);
394 (*kcb->kctl->disconnect)(kcb->kctl->kctlref,
395 kcb->sac.sc_unit, kcb->userdata);
396 socket_lock(so, 0);
397 }
398 }
399
400 soisdisconnected(so);
401 so->so_flags |= SOF_PCBCLEARING;
402 clt_kcb_decrement_use_count(kcb);
403 return 0;
404 }
405
406 static int
407 ctl_setup_kctl(struct socket *so, struct sockaddr *nam, struct proc *p)
408 {
409 struct kctl *kctl = NULL;
410 int error = 0;
411 struct sockaddr_ctl sa;
412 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
413 struct ctl_cb *kcb_next = NULL;
414 u_quad_t sbmaxsize;
415 u_int32_t recvbufsize, sendbufsize;
416
417 if (kcb == 0) {
418 panic("ctl_setup_kctl so_pcb null\n");
419 }
420
421 if (kcb->kctl != NULL) {
422 // Already set up, skip
423 return 0;
424 }
425
426 if (nam->sa_len != sizeof(struct sockaddr_ctl)) {
427 return EINVAL;
428 }
429
430 bcopy(nam, &sa, sizeof(struct sockaddr_ctl));
431
432 lck_mtx_lock(ctl_mtx);
433 kctl = ctl_find_by_id_unit(sa.sc_id, sa.sc_unit);
434 if (kctl == NULL) {
435 lck_mtx_unlock(ctl_mtx);
436 return ENOENT;
437 }
438
439 if (((kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
440 (so->so_type != SOCK_STREAM)) ||
441 (!(kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
442 (so->so_type != SOCK_DGRAM))) {
443 lck_mtx_unlock(ctl_mtx);
444 return EPROTOTYPE;
445 }
446
447 if (kctl->flags & CTL_FLAG_PRIVILEGED) {
448 if (p == 0) {
449 lck_mtx_unlock(ctl_mtx);
450 return EINVAL;
451 }
452 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
453 lck_mtx_unlock(ctl_mtx);
454 return EPERM;
455 }
456 }
457
458 if ((kctl->flags & CTL_FLAG_REG_ID_UNIT) || sa.sc_unit != 0) {
459 if (kcb_find(kctl, sa.sc_unit) != NULL) {
460 lck_mtx_unlock(ctl_mtx);
461 return EBUSY;
462 }
463 } else {
464 /* Find an unused ID, assumes control IDs are in order */
465 u_int32_t unit = 1;
466
467 TAILQ_FOREACH(kcb_next, &kctl->kcb_head, next) {
468 if (kcb_next->sac.sc_unit > unit) {
469 /* Found a gap, lets fill it in */
470 break;
471 }
472 unit = kcb_next->sac.sc_unit + 1;
473 if (unit == ctl_maxunit) {
474 break;
475 }
476 }
477
478 if (unit == ctl_maxunit) {
479 lck_mtx_unlock(ctl_mtx);
480 return EBUSY;
481 }
482
483 sa.sc_unit = unit;
484 }
485
486 bcopy(&sa, &kcb->sac, sizeof(struct sockaddr_ctl));
487 kcb->kctl = kctl;
488 if (kcb_next != NULL) {
489 TAILQ_INSERT_BEFORE(kcb_next, kcb, next);
490 } else {
491 TAILQ_INSERT_TAIL(&kctl->kcb_head, kcb, next);
492 }
493 kctlstat.kcs_pcbcount++;
494 kctlstat.kcs_gencnt++;
495 kctlstat.kcs_connections++;
496 lck_mtx_unlock(ctl_mtx);
497
498 /*
499 * rdar://15526688: Limit the send and receive sizes to sb_max
500 * by using the same scaling as sbreserve()
501 */
502 sbmaxsize = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES);
503
504 if (kctl->sendbufsize > sbmaxsize) {
505 sendbufsize = sbmaxsize;
506 } else {
507 sendbufsize = kctl->sendbufsize;
508 }
509
510 if (kctl->recvbufsize > sbmaxsize) {
511 recvbufsize = sbmaxsize;
512 } else {
513 recvbufsize = kctl->recvbufsize;
514 }
515
516 error = soreserve(so, sendbufsize, recvbufsize);
517 if (error) {
518 if (ctl_debug) {
519 printf("%s - soreserve(%llx, %u, %u) error %d\n",
520 __func__, (uint64_t)VM_KERNEL_ADDRPERM(so),
521 sendbufsize, recvbufsize, error);
522 }
523 goto done;
524 }
525
526 done:
527 if (error) {
528 soisdisconnected(so);
529 lck_mtx_lock(ctl_mtx);
530 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
531 kcb->kctl = NULL;
532 kcb->sac.sc_unit = 0;
533 kctlstat.kcs_pcbcount--;
534 kctlstat.kcs_gencnt++;
535 kctlstat.kcs_conn_fail++;
536 lck_mtx_unlock(ctl_mtx);
537 }
538 return error;
539 }
540
541 static int
542 ctl_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
543 {
544 int error = 0;
545 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
546
547 if (kcb == NULL) {
548 panic("ctl_bind so_pcb null\n");
549 }
550
551 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
552 ctl_kcb_increment_use_count(kcb, mtx_held);
553
554 error = ctl_setup_kctl(so, nam, p);
555 if (error) {
556 goto out;
557 }
558
559 if (kcb->kctl == NULL) {
560 panic("ctl_bind kctl null\n");
561 }
562
563 if (kcb->kctl->bind == NULL) {
564 error = EINVAL;
565 goto out;
566 }
567
568 socket_unlock(so, 0);
569 error = (*kcb->kctl->bind)(kcb->kctl->kctlref, &kcb->sac, &kcb->userdata);
570 socket_lock(so, 0);
571
572 out:
573 clt_kcb_decrement_use_count(kcb);
574 return error;
575 }
576
577 static int
578 ctl_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
579 {
580 int error = 0;
581 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
582
583 if (kcb == NULL) {
584 panic("ctl_connect so_pcb null\n");
585 }
586
587 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
588 ctl_kcb_increment_use_count(kcb, mtx_held);
589
590 error = ctl_setup_kctl(so, nam, p);
591 if (error) {
592 goto out;
593 }
594
595 if (kcb->kctl == NULL) {
596 panic("ctl_connect kctl null\n");
597 }
598
599 soisconnecting(so);
600 socket_unlock(so, 0);
601 error = (*kcb->kctl->connect)(kcb->kctl->kctlref, &kcb->sac, &kcb->userdata);
602 socket_lock(so, 0);
603 if (error) {
604 goto end;
605 }
606 soisconnected(so);
607
608 end:
609 if (error && kcb->kctl->disconnect) {
610 /*
611 * XXX Make sure we Don't check the return value
612 * of disconnect here.
613 * ipsec/utun_ctl_disconnect will return error when
614 * disconnect gets called after connect failure.
615 * However if we decide to check for disconnect return
616 * value here. Please make sure to revisit
617 * ipsec/utun_ctl_disconnect.
618 */
619 socket_unlock(so, 0);
620 (*kcb->kctl->disconnect)(kcb->kctl->kctlref, kcb->sac.sc_unit, kcb->userdata);
621 socket_lock(so, 0);
622 }
623 if (error) {
624 soisdisconnected(so);
625 lck_mtx_lock(ctl_mtx);
626 TAILQ_REMOVE(&kcb->kctl->kcb_head, kcb, next);
627 kcb->kctl = NULL;
628 kcb->sac.sc_unit = 0;
629 kctlstat.kcs_pcbcount--;
630 kctlstat.kcs_gencnt++;
631 kctlstat.kcs_conn_fail++;
632 lck_mtx_unlock(ctl_mtx);
633 }
634 out:
635 clt_kcb_decrement_use_count(kcb);
636 return error;
637 }
638
639 static int
640 ctl_disconnect(struct socket *so)
641 {
642 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
643
644 if ((kcb = (struct ctl_cb *)so->so_pcb)) {
645 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
646 ctl_kcb_increment_use_count(kcb, mtx_held);
647 struct kctl *kctl = kcb->kctl;
648
649 if (kctl && kctl->disconnect) {
650 socket_unlock(so, 0);
651 (*kctl->disconnect)(kctl->kctlref, kcb->sac.sc_unit,
652 kcb->userdata);
653 socket_lock(so, 0);
654 }
655
656 soisdisconnected(so);
657
658 socket_unlock(so, 0);
659 lck_mtx_lock(ctl_mtx);
660 kcb->kctl = 0;
661 kcb->sac.sc_unit = 0;
662 while (kcb->usecount != 0) {
663 msleep(&kcb->usecount, ctl_mtx, 0, "kcb->usecount", 0);
664 }
665 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
666 kctlstat.kcs_pcbcount--;
667 kctlstat.kcs_gencnt++;
668 lck_mtx_unlock(ctl_mtx);
669 socket_lock(so, 0);
670 clt_kcb_decrement_use_count(kcb);
671 }
672 return 0;
673 }
674
675 static int
676 ctl_peeraddr(struct socket *so, struct sockaddr **nam)
677 {
678 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
679 struct kctl *kctl;
680 struct sockaddr_ctl sc;
681
682 if (kcb == NULL) { /* sanity check */
683 return ENOTCONN;
684 }
685
686 if ((kctl = kcb->kctl) == NULL) {
687 return EINVAL;
688 }
689
690 bzero(&sc, sizeof(struct sockaddr_ctl));
691 sc.sc_len = sizeof(struct sockaddr_ctl);
692 sc.sc_family = AF_SYSTEM;
693 sc.ss_sysaddr = AF_SYS_CONTROL;
694 sc.sc_id = kctl->id;
695 sc.sc_unit = kcb->sac.sc_unit;
696
697 *nam = dup_sockaddr((struct sockaddr *)&sc, 1);
698
699 return 0;
700 }
701
702 static void
703 ctl_sbrcv_trim(struct socket *so)
704 {
705 struct sockbuf *sb = &so->so_rcv;
706
707 if (sb->sb_hiwat > sb->sb_idealsize) {
708 u_int32_t diff;
709 int32_t trim;
710
711 /*
712 * The difference between the ideal size and the
713 * current size is the upper bound of the trimage
714 */
715 diff = sb->sb_hiwat - sb->sb_idealsize;
716 /*
717 * We cannot trim below the outstanding data
718 */
719 trim = sb->sb_hiwat - sb->sb_cc;
720
721 trim = imin(trim, (int32_t)diff);
722
723 if (trim > 0) {
724 sbreserve(sb, (sb->sb_hiwat - trim));
725
726 if (ctl_debug) {
727 printf("%s - shrunk to %d\n",
728 __func__, sb->sb_hiwat);
729 }
730 }
731 }
732 }
733
734 static int
735 ctl_usr_rcvd(struct socket *so, int flags)
736 {
737 int error = 0;
738 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
739 struct kctl *kctl;
740
741 if (kcb == NULL) {
742 return ENOTCONN;
743 }
744
745 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
746 ctl_kcb_increment_use_count(kcb, mtx_held);
747
748 if ((kctl = kcb->kctl) == NULL) {
749 error = EINVAL;
750 goto out;
751 }
752
753 if (kctl->rcvd) {
754 socket_unlock(so, 0);
755 (*kctl->rcvd)(kctl->kctlref, kcb->sac.sc_unit, kcb->userdata, flags);
756 socket_lock(so, 0);
757 }
758
759 ctl_sbrcv_trim(so);
760
761 out:
762 clt_kcb_decrement_use_count(kcb);
763 return error;
764 }
765
766 static int
767 ctl_send(struct socket *so, int flags, struct mbuf *m,
768 struct sockaddr *addr, struct mbuf *control,
769 struct proc *p)
770 {
771 #pragma unused(addr, p)
772 int error = 0;
773 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
774 struct kctl *kctl;
775
776 if (control) {
777 m_freem(control);
778 }
779
780 if (kcb == NULL) { /* sanity check */
781 error = ENOTCONN;
782 }
783
784 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
785 ctl_kcb_increment_use_count(kcb, mtx_held);
786
787 if (error == 0 && (kctl = kcb->kctl) == NULL) {
788 error = EINVAL;
789 }
790
791 if (error == 0 && kctl->send) {
792 so_tc_update_stats(m, so, m_get_service_class(m));
793 socket_unlock(so, 0);
794 error = (*kctl->send)(kctl->kctlref, kcb->sac.sc_unit, kcb->userdata,
795 m, flags);
796 socket_lock(so, 0);
797 } else {
798 m_freem(m);
799 if (error == 0) {
800 error = ENOTSUP;
801 }
802 }
803 if (error != 0) {
804 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_fail);
805 }
806 clt_kcb_decrement_use_count(kcb);
807
808 return error;
809 }
810
811 static int
812 ctl_send_list(struct socket *so, int flags, struct mbuf *m,
813 __unused struct sockaddr *addr, struct mbuf *control,
814 __unused struct proc *p)
815 {
816 int error = 0;
817 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
818 struct kctl *kctl;
819
820 if (control) {
821 m_freem_list(control);
822 }
823
824 if (kcb == NULL) { /* sanity check */
825 error = ENOTCONN;
826 }
827
828 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
829 ctl_kcb_increment_use_count(kcb, mtx_held);
830
831 if (error == 0 && (kctl = kcb->kctl) == NULL) {
832 error = EINVAL;
833 }
834
835 if (error == 0 && kctl->send_list) {
836 struct mbuf *nxt;
837
838 for (nxt = m; nxt != NULL; nxt = nxt->m_nextpkt) {
839 so_tc_update_stats(nxt, so, m_get_service_class(nxt));
840 }
841
842 socket_unlock(so, 0);
843 error = (*kctl->send_list)(kctl->kctlref, kcb->sac.sc_unit,
844 kcb->userdata, m, flags);
845 socket_lock(so, 0);
846 } else if (error == 0 && kctl->send) {
847 while (m != NULL && error == 0) {
848 struct mbuf *nextpkt = m->m_nextpkt;
849
850 m->m_nextpkt = NULL;
851 so_tc_update_stats(m, so, m_get_service_class(m));
852 socket_unlock(so, 0);
853 error = (*kctl->send)(kctl->kctlref, kcb->sac.sc_unit,
854 kcb->userdata, m, flags);
855 socket_lock(so, 0);
856 m = nextpkt;
857 }
858 if (m != NULL) {
859 m_freem_list(m);
860 }
861 } else {
862 m_freem_list(m);
863 if (error == 0) {
864 error = ENOTSUP;
865 }
866 }
867 if (error != 0) {
868 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_list_fail);
869 }
870 clt_kcb_decrement_use_count(kcb);
871
872 return error;
873 }
874
875 static errno_t
876 ctl_rcvbspace(struct socket *so, u_int32_t datasize,
877 u_int32_t kctlflags, u_int32_t flags)
878 {
879 struct sockbuf *sb = &so->so_rcv;
880 u_int32_t space = sbspace(sb);
881 errno_t error;
882
883 if ((kctlflags & CTL_FLAG_REG_CRIT) == 0) {
884 if ((u_int32_t) space >= datasize) {
885 error = 0;
886 } else {
887 error = ENOBUFS;
888 }
889 } else if ((flags & CTL_DATA_CRIT) == 0) {
890 /*
891 * Reserve 25% for critical messages
892 */
893 if (space < (sb->sb_hiwat >> 2) ||
894 space < datasize) {
895 error = ENOBUFS;
896 } else {
897 error = 0;
898 }
899 } else {
900 u_int32_t autorcvbuf_max;
901
902 /*
903 * Allow overcommit of 25%
904 */
905 autorcvbuf_max = min(sb->sb_idealsize + (sb->sb_idealsize >> 2),
906 ctl_autorcvbuf_max);
907
908 if ((u_int32_t) space >= datasize) {
909 error = 0;
910 } else if (tcp_cansbgrow(sb) &&
911 sb->sb_hiwat < autorcvbuf_max) {
912 /*
913 * Grow with a little bit of leeway
914 */
915 u_int32_t grow = datasize - space + MSIZE;
916
917 if (sbreserve(sb,
918 min((sb->sb_hiwat + grow), autorcvbuf_max)) == 1) {
919 if (sb->sb_hiwat > ctl_autorcvbuf_high) {
920 ctl_autorcvbuf_high = sb->sb_hiwat;
921 }
922
923 /*
924 * A final check
925 */
926 if ((u_int32_t) sbspace(sb) >= datasize) {
927 error = 0;
928 } else {
929 error = ENOBUFS;
930 }
931
932 if (ctl_debug) {
933 printf("%s - grown to %d error %d\n",
934 __func__, sb->sb_hiwat, error);
935 }
936 } else {
937 error = ENOBUFS;
938 }
939 } else {
940 error = ENOBUFS;
941 }
942 }
943 return error;
944 }
945
946 errno_t
947 ctl_enqueuembuf(kern_ctl_ref kctlref, u_int32_t unit, struct mbuf *m,
948 u_int32_t flags)
949 {
950 struct socket *so;
951 errno_t error = 0;
952 int len = m->m_pkthdr.len;
953 u_int32_t kctlflags;
954
955 so = kcb_find_socket(kctlref, unit, &kctlflags);
956 if (so == NULL) {
957 return EINVAL;
958 }
959
960 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
961 error = ENOBUFS;
962 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
963 goto bye;
964 }
965 if ((flags & CTL_DATA_EOR)) {
966 m->m_flags |= M_EOR;
967 }
968
969 so_recv_data_stat(so, m, 0);
970 if (sbappend(&so->so_rcv, m) != 0) {
971 if ((flags & CTL_DATA_NOWAKEUP) == 0) {
972 sorwakeup(so);
973 }
974 } else {
975 error = ENOBUFS;
976 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
977 }
978 bye:
979 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
980 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
981 __func__, error, len,
982 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
983 }
984
985 socket_unlock(so, 1);
986 if (error != 0) {
987 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
988 }
989
990 return error;
991 }
992
993 /*
994 * Compute space occupied by mbuf like sbappendrecord
995 */
996 static int
997 m_space(struct mbuf *m)
998 {
999 int space = 0;
1000 struct mbuf *nxt;
1001
1002 for (nxt = m; nxt != NULL; nxt = nxt->m_next) {
1003 space += nxt->m_len;
1004 }
1005
1006 return space;
1007 }
1008
1009 errno_t
1010 ctl_enqueuembuf_list(void *kctlref, u_int32_t unit, struct mbuf *m_list,
1011 u_int32_t flags, struct mbuf **m_remain)
1012 {
1013 struct socket *so = NULL;
1014 errno_t error = 0;
1015 struct mbuf *m, *nextpkt;
1016 int needwakeup = 0;
1017 int len = 0;
1018 u_int32_t kctlflags;
1019
1020 /*
1021 * Need to point the beginning of the list in case of early exit
1022 */
1023 m = m_list;
1024
1025 /*
1026 * kcb_find_socket takes the socket lock with a reference
1027 */
1028 so = kcb_find_socket(kctlref, unit, &kctlflags);
1029 if (so == NULL) {
1030 error = EINVAL;
1031 goto done;
1032 }
1033
1034 if (kctlflags & CTL_FLAG_REG_SOCK_STREAM) {
1035 error = EOPNOTSUPP;
1036 goto done;
1037 }
1038 if (flags & CTL_DATA_EOR) {
1039 error = EINVAL;
1040 goto done;
1041 }
1042
1043 for (m = m_list; m != NULL; m = nextpkt) {
1044 nextpkt = m->m_nextpkt;
1045
1046 if (m->m_pkthdr.len == 0 && ctl_debug) {
1047 printf("%s: %llx m_pkthdr.len is 0",
1048 __func__, (uint64_t)VM_KERNEL_ADDRPERM(m));
1049 }
1050
1051 /*
1052 * The mbuf is either appended or freed by sbappendrecord()
1053 * so it's not reliable from a data standpoint
1054 */
1055 len = m_space(m);
1056 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
1057 error = ENOBUFS;
1058 OSIncrementAtomic64(
1059 (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1060 break;
1061 } else {
1062 /*
1063 * Unlink from the list, m is on its own
1064 */
1065 m->m_nextpkt = NULL;
1066 so_recv_data_stat(so, m, 0);
1067 if (sbappendrecord(&so->so_rcv, m) != 0) {
1068 needwakeup = 1;
1069 } else {
1070 /*
1071 * We free or return the remaining
1072 * mbufs in the list
1073 */
1074 m = nextpkt;
1075 error = ENOBUFS;
1076 OSIncrementAtomic64(
1077 (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1078 break;
1079 }
1080 }
1081 }
1082 if (needwakeup && (flags & CTL_DATA_NOWAKEUP) == 0) {
1083 sorwakeup(so);
1084 }
1085
1086 done:
1087 if (so != NULL) {
1088 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
1089 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1090 __func__, error, len,
1091 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1092 }
1093
1094 socket_unlock(so, 1);
1095 }
1096 if (m_remain) {
1097 *m_remain = m;
1098
1099 if (m != NULL && socket_debug && so != NULL &&
1100 (so->so_options & SO_DEBUG)) {
1101 struct mbuf *n;
1102
1103 printf("%s m_list %llx\n", __func__,
1104 (uint64_t) VM_KERNEL_ADDRPERM(m_list));
1105 for (n = m; n != NULL; n = n->m_nextpkt) {
1106 printf(" remain %llx m_next %llx\n",
1107 (uint64_t) VM_KERNEL_ADDRPERM(n),
1108 (uint64_t) VM_KERNEL_ADDRPERM(n->m_next));
1109 }
1110 }
1111 } else {
1112 if (m != NULL) {
1113 m_freem_list(m);
1114 }
1115 }
1116 if (error != 0) {
1117 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
1118 }
1119 return error;
1120 }
1121
1122 errno_t
1123 ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len,
1124 u_int32_t flags)
1125 {
1126 struct socket *so;
1127 struct mbuf *m;
1128 errno_t error = 0;
1129 unsigned int num_needed;
1130 struct mbuf *n;
1131 size_t curlen = 0;
1132 u_int32_t kctlflags;
1133
1134 so = kcb_find_socket(kctlref, unit, &kctlflags);
1135 if (so == NULL) {
1136 return EINVAL;
1137 }
1138
1139 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
1140 error = ENOBUFS;
1141 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1142 goto bye;
1143 }
1144
1145 num_needed = 1;
1146 m = m_allocpacket_internal(&num_needed, len, NULL, M_NOWAIT, 1, 0);
1147 if (m == NULL) {
1148 kctlstat.kcs_enqdata_mb_alloc_fail++;
1149 if (ctl_debug) {
1150 printf("%s: m_allocpacket_internal(%lu) failed\n",
1151 __func__, len);
1152 }
1153 error = ENOMEM;
1154 goto bye;
1155 }
1156
1157 for (n = m; n != NULL; n = n->m_next) {
1158 size_t mlen = mbuf_maxlen(n);
1159
1160 if (mlen + curlen > len) {
1161 mlen = len - curlen;
1162 }
1163 n->m_len = mlen;
1164 bcopy((char *)data + curlen, n->m_data, mlen);
1165 curlen += mlen;
1166 }
1167 mbuf_pkthdr_setlen(m, curlen);
1168
1169 if ((flags & CTL_DATA_EOR)) {
1170 m->m_flags |= M_EOR;
1171 }
1172 so_recv_data_stat(so, m, 0);
1173 if (sbappend(&so->so_rcv, m) != 0) {
1174 if ((flags & CTL_DATA_NOWAKEUP) == 0) {
1175 sorwakeup(so);
1176 }
1177 } else {
1178 kctlstat.kcs_enqdata_sbappend_fail++;
1179 error = ENOBUFS;
1180 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1181 }
1182
1183 bye:
1184 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
1185 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1186 __func__, error, (int)len,
1187 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1188 }
1189
1190 socket_unlock(so, 1);
1191 if (error != 0) {
1192 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
1193 }
1194 return error;
1195 }
1196
1197 errno_t
1198 ctl_getenqueuepacketcount(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *pcnt)
1199 {
1200 struct socket *so;
1201 u_int32_t cnt;
1202 struct mbuf *m1;
1203
1204 if (pcnt == NULL) {
1205 return EINVAL;
1206 }
1207
1208 so = kcb_find_socket(kctlref, unit, NULL);
1209 if (so == NULL) {
1210 return EINVAL;
1211 }
1212
1213 cnt = 0;
1214 m1 = so->so_rcv.sb_mb;
1215 while (m1 != NULL) {
1216 if (m1->m_type == MT_DATA ||
1217 m1->m_type == MT_HEADER ||
1218 m1->m_type == MT_OOBDATA) {
1219 cnt += 1;
1220 }
1221 m1 = m1->m_nextpkt;
1222 }
1223 *pcnt = cnt;
1224
1225 socket_unlock(so, 1);
1226
1227 return 0;
1228 }
1229
1230 errno_t
1231 ctl_getenqueuespace(kern_ctl_ref kctlref, u_int32_t unit, size_t *space)
1232 {
1233 struct socket *so;
1234 long avail;
1235
1236 if (space == NULL) {
1237 return EINVAL;
1238 }
1239
1240 so = kcb_find_socket(kctlref, unit, NULL);
1241 if (so == NULL) {
1242 return EINVAL;
1243 }
1244
1245 avail = sbspace(&so->so_rcv);
1246 *space = (avail < 0) ? 0 : avail;
1247 socket_unlock(so, 1);
1248
1249 return 0;
1250 }
1251
1252 errno_t
1253 ctl_getenqueuereadable(kern_ctl_ref kctlref, u_int32_t unit,
1254 u_int32_t *difference)
1255 {
1256 struct socket *so;
1257
1258 if (difference == NULL) {
1259 return EINVAL;
1260 }
1261
1262 so = kcb_find_socket(kctlref, unit, NULL);
1263 if (so == NULL) {
1264 return EINVAL;
1265 }
1266
1267 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat) {
1268 *difference = 0;
1269 } else {
1270 *difference = (so->so_rcv.sb_lowat - so->so_rcv.sb_cc);
1271 }
1272 socket_unlock(so, 1);
1273
1274 return 0;
1275 }
1276
1277 static int
1278 ctl_ctloutput(struct socket *so, struct sockopt *sopt)
1279 {
1280 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
1281 struct kctl *kctl;
1282 int error = 0;
1283 void *data = NULL;
1284 size_t len;
1285
1286 if (sopt->sopt_level != SYSPROTO_CONTROL) {
1287 return EINVAL;
1288 }
1289
1290 if (kcb == NULL) { /* sanity check */
1291 return ENOTCONN;
1292 }
1293
1294 if ((kctl = kcb->kctl) == NULL) {
1295 return EINVAL;
1296 }
1297
1298 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
1299 ctl_kcb_increment_use_count(kcb, mtx_held);
1300
1301 switch (sopt->sopt_dir) {
1302 case SOPT_SET:
1303 if (kctl->setopt == NULL) {
1304 error = ENOTSUP;
1305 goto out;
1306 }
1307 if (sopt->sopt_valsize != 0) {
1308 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP,
1309 M_WAITOK | M_ZERO);
1310 if (data == NULL) {
1311 error = ENOMEM;
1312 goto out;
1313 }
1314 error = sooptcopyin(sopt, data,
1315 sopt->sopt_valsize, sopt->sopt_valsize);
1316 }
1317 if (error == 0) {
1318 socket_unlock(so, 0);
1319 error = (*kctl->setopt)(kctl->kctlref,
1320 kcb->sac.sc_unit, kcb->userdata, sopt->sopt_name,
1321 data, sopt->sopt_valsize);
1322 socket_lock(so, 0);
1323 }
1324
1325 if (data != NULL) {
1326 FREE(data, M_TEMP);
1327 }
1328 break;
1329
1330 case SOPT_GET:
1331 if (kctl->getopt == NULL) {
1332 error = ENOTSUP;
1333 goto out;
1334 }
1335
1336 if (sopt->sopt_valsize && sopt->sopt_val) {
1337 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP,
1338 M_WAITOK | M_ZERO);
1339 if (data == NULL) {
1340 error = ENOMEM;
1341 goto out;
1342 }
1343 /*
1344 * 4108337 - copy user data in case the
1345 * kernel control needs it
1346 */
1347 error = sooptcopyin(sopt, data,
1348 sopt->sopt_valsize, sopt->sopt_valsize);
1349 }
1350
1351 if (error == 0) {
1352 len = sopt->sopt_valsize;
1353 socket_unlock(so, 0);
1354 error = (*kctl->getopt)(kctl->kctlref, kcb->sac.sc_unit,
1355 kcb->userdata, sopt->sopt_name,
1356 data, &len);
1357 if (data != NULL && len > sopt->sopt_valsize) {
1358 panic_plain("ctl_ctloutput: ctl %s returned "
1359 "len (%lu) > sopt_valsize (%lu)\n",
1360 kcb->kctl->name, len,
1361 sopt->sopt_valsize);
1362 }
1363 socket_lock(so, 0);
1364 if (error == 0) {
1365 if (data != NULL) {
1366 error = sooptcopyout(sopt, data, len);
1367 } else {
1368 sopt->sopt_valsize = len;
1369 }
1370 }
1371 }
1372 if (data != NULL) {
1373 FREE(data, M_TEMP);
1374 }
1375 break;
1376 }
1377
1378 out:
1379 clt_kcb_decrement_use_count(kcb);
1380 return error;
1381 }
1382
1383 static int
1384 ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
1385 struct ifnet *ifp, struct proc *p)
1386 {
1387 #pragma unused(so, ifp, p)
1388 int error = ENOTSUP;
1389
1390 switch (cmd) {
1391 /* get the number of controllers */
1392 case CTLIOCGCOUNT: {
1393 struct kctl *kctl;
1394 u_int32_t n = 0;
1395
1396 lck_mtx_lock(ctl_mtx);
1397 TAILQ_FOREACH(kctl, &ctl_head, next)
1398 n++;
1399 lck_mtx_unlock(ctl_mtx);
1400
1401 bcopy(&n, data, sizeof(n));
1402 error = 0;
1403 break;
1404 }
1405 case CTLIOCGINFO: {
1406 struct ctl_info ctl_info;
1407 struct kctl *kctl = 0;
1408 size_t name_len;
1409
1410 bcopy(data, &ctl_info, sizeof(ctl_info));
1411 name_len = strnlen(ctl_info.ctl_name, MAX_KCTL_NAME);
1412
1413 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
1414 error = EINVAL;
1415 break;
1416 }
1417 lck_mtx_lock(ctl_mtx);
1418 kctl = ctl_find_by_name(ctl_info.ctl_name);
1419 lck_mtx_unlock(ctl_mtx);
1420 if (kctl == 0) {
1421 error = ENOENT;
1422 break;
1423 }
1424 ctl_info.ctl_id = kctl->id;
1425 bcopy(&ctl_info, data, sizeof(ctl_info));
1426 error = 0;
1427 break;
1428 }
1429
1430 /* add controls to get list of NKEs */
1431 }
1432
1433 return error;
1434 }
1435
1436 static void
1437 kctl_tbl_grow()
1438 {
1439 struct kctl **new_table;
1440 uintptr_t new_size;
1441
1442 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1443
1444 if (kctl_tbl_growing) {
1445 /* Another thread is allocating */
1446 kctl_tbl_growing_waiting++;
1447
1448 do {
1449 (void) msleep((caddr_t) &kctl_tbl_growing, ctl_mtx,
1450 PSOCK | PCATCH, "kctl_tbl_growing", 0);
1451 } while (kctl_tbl_growing);
1452 kctl_tbl_growing_waiting--;
1453 }
1454 /* Another thread grew the table */
1455 if (kctl_table != NULL && kctl_tbl_count < kctl_tbl_size) {
1456 return;
1457 }
1458
1459 /* Verify we have a sane size */
1460 if (kctl_tbl_size + KCTL_TBL_INC >= UINT16_MAX) {
1461 kctlstat.kcs_tbl_size_too_big++;
1462 if (ctl_debug) {
1463 printf("%s kctl_tbl_size %lu too big\n",
1464 __func__, kctl_tbl_size);
1465 }
1466 return;
1467 }
1468 kctl_tbl_growing = 1;
1469
1470 new_size = kctl_tbl_size + KCTL_TBL_INC;
1471
1472 lck_mtx_unlock(ctl_mtx);
1473 new_table = _MALLOC(sizeof(struct kctl *) * new_size,
1474 M_TEMP, M_WAIT | M_ZERO);
1475 lck_mtx_lock(ctl_mtx);
1476
1477 if (new_table != NULL) {
1478 if (kctl_table != NULL) {
1479 bcopy(kctl_table, new_table,
1480 kctl_tbl_size * sizeof(struct kctl *));
1481
1482 _FREE(kctl_table, M_TEMP);
1483 }
1484 kctl_table = new_table;
1485 kctl_tbl_size = new_size;
1486 }
1487
1488 kctl_tbl_growing = 0;
1489
1490 if (kctl_tbl_growing_waiting) {
1491 wakeup(&kctl_tbl_growing);
1492 }
1493 }
1494
1495 #define KCTLREF_INDEX_MASK 0x0000FFFF
1496 #define KCTLREF_GENCNT_MASK 0xFFFF0000
1497 #define KCTLREF_GENCNT_SHIFT 16
1498
1499 static kern_ctl_ref
1500 kctl_make_ref(struct kctl *kctl)
1501 {
1502 uintptr_t i;
1503
1504 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1505
1506 if (kctl_tbl_count >= kctl_tbl_size) {
1507 kctl_tbl_grow();
1508 }
1509
1510 kctl->kctlref = NULL;
1511 for (i = 0; i < kctl_tbl_size; i++) {
1512 if (kctl_table[i] == NULL) {
1513 uintptr_t ref;
1514
1515 /*
1516 * Reference is index plus one
1517 */
1518 kctl_ref_gencnt += 1;
1519
1520 /*
1521 * Add generation count as salt to reference to prevent
1522 * use after deregister
1523 */
1524 ref = ((kctl_ref_gencnt << KCTLREF_GENCNT_SHIFT) &
1525 KCTLREF_GENCNT_MASK) +
1526 ((i + 1) & KCTLREF_INDEX_MASK);
1527
1528 kctl->kctlref = (void *)(ref);
1529 kctl_table[i] = kctl;
1530 kctl_tbl_count++;
1531 break;
1532 }
1533 }
1534
1535 if (kctl->kctlref == NULL) {
1536 panic("%s no space in table", __func__);
1537 }
1538
1539 if (ctl_debug > 0) {
1540 printf("%s %p for %p\n",
1541 __func__, kctl->kctlref, kctl);
1542 }
1543
1544 return kctl->kctlref;
1545 }
1546
1547 static void
1548 kctl_delete_ref(kern_ctl_ref kctlref)
1549 {
1550 /*
1551 * Reference is index plus one
1552 */
1553 uintptr_t i = (((uintptr_t)kctlref) & KCTLREF_INDEX_MASK) - 1;
1554
1555 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1556
1557 if (i < kctl_tbl_size) {
1558 struct kctl *kctl = kctl_table[i];
1559
1560 if (kctl->kctlref == kctlref) {
1561 kctl_table[i] = NULL;
1562 kctl_tbl_count--;
1563 } else {
1564 kctlstat.kcs_bad_kctlref++;
1565 }
1566 } else {
1567 kctlstat.kcs_bad_kctlref++;
1568 }
1569 }
1570
1571 static struct kctl *
1572 kctl_from_ref(kern_ctl_ref kctlref)
1573 {
1574 /*
1575 * Reference is index plus one
1576 */
1577 uintptr_t i = (((uintptr_t)kctlref) & KCTLREF_INDEX_MASK) - 1;
1578 struct kctl *kctl = NULL;
1579
1580 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1581
1582 if (i >= kctl_tbl_size) {
1583 kctlstat.kcs_bad_kctlref++;
1584 return NULL;
1585 }
1586 kctl = kctl_table[i];
1587 if (kctl->kctlref != kctlref) {
1588 kctlstat.kcs_bad_kctlref++;
1589 return NULL;
1590 }
1591 return kctl;
1592 }
1593
1594 /*
1595 * Register/unregister a NKE
1596 */
1597 errno_t
1598 ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref)
1599 {
1600 struct kctl *kctl = NULL;
1601 struct kctl *kctl_next = NULL;
1602 u_int32_t id = 1;
1603 size_t name_len;
1604 int is_extended = 0;
1605
1606 if (userkctl == NULL) { /* sanity check */
1607 return EINVAL;
1608 }
1609 if (userkctl->ctl_connect == NULL) {
1610 return EINVAL;
1611 }
1612 name_len = strlen(userkctl->ctl_name);
1613 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
1614 return EINVAL;
1615 }
1616
1617 MALLOC(kctl, struct kctl *, sizeof(*kctl), M_TEMP, M_WAITOK);
1618 if (kctl == NULL) {
1619 return ENOMEM;
1620 }
1621 bzero((char *)kctl, sizeof(*kctl));
1622
1623 lck_mtx_lock(ctl_mtx);
1624
1625 if (kctl_make_ref(kctl) == NULL) {
1626 lck_mtx_unlock(ctl_mtx);
1627 FREE(kctl, M_TEMP);
1628 return ENOMEM;
1629 }
1630
1631 /*
1632 * Kernel Control IDs
1633 *
1634 * CTL_FLAG_REG_ID_UNIT indicates the control ID and unit number are
1635 * static. If they do not exist, add them to the list in order. If the
1636 * flag is not set, we must find a new unique value. We assume the
1637 * list is in order. We find the last item in the list and add one. If
1638 * this leads to wrapping the id around, we start at the front of the
1639 * list and look for a gap.
1640 */
1641
1642 if ((userkctl->ctl_flags & CTL_FLAG_REG_ID_UNIT) == 0) {
1643 /* Must dynamically assign an unused ID */
1644
1645 /* Verify the same name isn't already registered */
1646 if (ctl_find_by_name(userkctl->ctl_name) != NULL) {
1647 kctl_delete_ref(kctl->kctlref);
1648 lck_mtx_unlock(ctl_mtx);
1649 FREE(kctl, M_TEMP);
1650 return EEXIST;
1651 }
1652
1653 /* Start with 1 in case the list is empty */
1654 id = 1;
1655 kctl_next = TAILQ_LAST(&ctl_head, kctl_list);
1656
1657 if (kctl_next != NULL) {
1658 /* List was not empty, add one to the last item */
1659 id = kctl_next->id + 1;
1660 kctl_next = NULL;
1661
1662 /*
1663 * If this wrapped the id number, start looking at
1664 * the front of the list for an unused id.
1665 */
1666 if (id == 0) {
1667 /* Find the next unused ID */
1668 id = 1;
1669
1670 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
1671 if (kctl_next->id > id) {
1672 /* We found a gap */
1673 break;
1674 }
1675
1676 id = kctl_next->id + 1;
1677 }
1678 }
1679 }
1680
1681 userkctl->ctl_id = id;
1682 kctl->id = id;
1683 kctl->reg_unit = -1;
1684 } else {
1685 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
1686 if (kctl_next->id > userkctl->ctl_id) {
1687 break;
1688 }
1689 }
1690
1691 if (ctl_find_by_id_unit(userkctl->ctl_id, userkctl->ctl_unit)) {
1692 kctl_delete_ref(kctl->kctlref);
1693 lck_mtx_unlock(ctl_mtx);
1694 FREE(kctl, M_TEMP);
1695 return EEXIST;
1696 }
1697 kctl->id = userkctl->ctl_id;
1698 kctl->reg_unit = userkctl->ctl_unit;
1699 }
1700
1701 is_extended = (userkctl->ctl_flags & CTL_FLAG_REG_EXTENDED);
1702
1703 strlcpy(kctl->name, userkctl->ctl_name, MAX_KCTL_NAME);
1704 kctl->flags = userkctl->ctl_flags;
1705
1706 /*
1707 * Let the caller know the default send and receive sizes
1708 */
1709 if (userkctl->ctl_sendsize == 0) {
1710 kctl->sendbufsize = CTL_SENDSIZE;
1711 userkctl->ctl_sendsize = kctl->sendbufsize;
1712 } else {
1713 kctl->sendbufsize = userkctl->ctl_sendsize;
1714 }
1715 if (userkctl->ctl_recvsize == 0) {
1716 kctl->recvbufsize = CTL_RECVSIZE;
1717 userkctl->ctl_recvsize = kctl->recvbufsize;
1718 } else {
1719 kctl->recvbufsize = userkctl->ctl_recvsize;
1720 }
1721
1722 kctl->bind = userkctl->ctl_bind;
1723 kctl->connect = userkctl->ctl_connect;
1724 kctl->disconnect = userkctl->ctl_disconnect;
1725 kctl->send = userkctl->ctl_send;
1726 kctl->setopt = userkctl->ctl_setopt;
1727 kctl->getopt = userkctl->ctl_getopt;
1728 if (is_extended) {
1729 kctl->rcvd = userkctl->ctl_rcvd;
1730 kctl->send_list = userkctl->ctl_send_list;
1731 }
1732
1733 TAILQ_INIT(&kctl->kcb_head);
1734
1735 if (kctl_next) {
1736 TAILQ_INSERT_BEFORE(kctl_next, kctl, next);
1737 } else {
1738 TAILQ_INSERT_TAIL(&ctl_head, kctl, next);
1739 }
1740
1741 kctlstat.kcs_reg_count++;
1742 kctlstat.kcs_gencnt++;
1743
1744 lck_mtx_unlock(ctl_mtx);
1745
1746 *kctlref = kctl->kctlref;
1747
1748 ctl_post_msg(KEV_CTL_REGISTERED, kctl->id);
1749 return 0;
1750 }
1751
1752 errno_t
1753 ctl_deregister(void *kctlref)
1754 {
1755 struct kctl *kctl;
1756
1757 lck_mtx_lock(ctl_mtx);
1758 if ((kctl = kctl_from_ref(kctlref)) == NULL) {
1759 kctlstat.kcs_bad_kctlref++;
1760 lck_mtx_unlock(ctl_mtx);
1761 if (ctl_debug != 0) {
1762 printf("%s invalid kctlref %p\n",
1763 __func__, kctlref);
1764 }
1765 return EINVAL;
1766 }
1767
1768 if (!TAILQ_EMPTY(&kctl->kcb_head)) {
1769 lck_mtx_unlock(ctl_mtx);
1770 return EBUSY;
1771 }
1772
1773 TAILQ_REMOVE(&ctl_head, kctl, next);
1774
1775 kctlstat.kcs_reg_count--;
1776 kctlstat.kcs_gencnt++;
1777
1778 kctl_delete_ref(kctl->kctlref);
1779 lck_mtx_unlock(ctl_mtx);
1780
1781 ctl_post_msg(KEV_CTL_DEREGISTERED, kctl->id);
1782 FREE(kctl, M_TEMP);
1783 return 0;
1784 }
1785
1786 /*
1787 * Must be called with global ctl_mtx lock taked
1788 */
1789 static struct kctl *
1790 ctl_find_by_name(const char *name)
1791 {
1792 struct kctl *kctl;
1793
1794 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1795
1796 TAILQ_FOREACH(kctl, &ctl_head, next)
1797 if (strncmp(kctl->name, name, sizeof(kctl->name)) == 0) {
1798 return kctl;
1799 }
1800
1801 return NULL;
1802 }
1803
1804 u_int32_t
1805 ctl_id_by_name(const char *name)
1806 {
1807 u_int32_t ctl_id = 0;
1808 struct kctl *kctl;
1809
1810 lck_mtx_lock(ctl_mtx);
1811 kctl = ctl_find_by_name(name);
1812 if (kctl) {
1813 ctl_id = kctl->id;
1814 }
1815 lck_mtx_unlock(ctl_mtx);
1816
1817 return ctl_id;
1818 }
1819
1820 errno_t
1821 ctl_name_by_id(u_int32_t id, char *out_name, size_t maxsize)
1822 {
1823 int found = 0;
1824 struct kctl *kctl;
1825
1826 lck_mtx_lock(ctl_mtx);
1827 TAILQ_FOREACH(kctl, &ctl_head, next) {
1828 if (kctl->id == id) {
1829 break;
1830 }
1831 }
1832
1833 if (kctl) {
1834 if (maxsize > MAX_KCTL_NAME) {
1835 maxsize = MAX_KCTL_NAME;
1836 }
1837 strlcpy(out_name, kctl->name, maxsize);
1838 found = 1;
1839 }
1840 lck_mtx_unlock(ctl_mtx);
1841
1842 return found ? 0 : ENOENT;
1843 }
1844
1845 /*
1846 * Must be called with global ctl_mtx lock taked
1847 *
1848 */
1849 static struct kctl *
1850 ctl_find_by_id_unit(u_int32_t id, u_int32_t unit)
1851 {
1852 struct kctl *kctl;
1853
1854 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1855
1856 TAILQ_FOREACH(kctl, &ctl_head, next) {
1857 if (kctl->id == id && (kctl->flags & CTL_FLAG_REG_ID_UNIT) == 0) {
1858 return kctl;
1859 } else if (kctl->id == id && kctl->reg_unit == unit) {
1860 return kctl;
1861 }
1862 }
1863 return NULL;
1864 }
1865
1866 /*
1867 * Must be called with kernel controller lock taken
1868 */
1869 static struct ctl_cb *
1870 kcb_find(struct kctl *kctl, u_int32_t unit)
1871 {
1872 struct ctl_cb *kcb;
1873
1874 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1875
1876 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
1877 if (kcb->sac.sc_unit == unit) {
1878 return kcb;
1879 }
1880
1881 return NULL;
1882 }
1883
1884 static struct socket *
1885 kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *kctlflags)
1886 {
1887 struct socket *so = NULL;
1888 struct ctl_cb *kcb;
1889 void *lr_saved;
1890 struct kctl *kctl;
1891 int i;
1892
1893 lr_saved = __builtin_return_address(0);
1894
1895 lck_mtx_lock(ctl_mtx);
1896 /*
1897 * First validate the kctlref
1898 */
1899 if ((kctl = kctl_from_ref(kctlref)) == NULL) {
1900 kctlstat.kcs_bad_kctlref++;
1901 lck_mtx_unlock(ctl_mtx);
1902 if (ctl_debug != 0) {
1903 printf("%s invalid kctlref %p\n",
1904 __func__, kctlref);
1905 }
1906 return NULL;
1907 }
1908
1909 kcb = kcb_find(kctl, unit);
1910 if (kcb == NULL || kcb->kctl != kctl || (so = kcb->so) == NULL) {
1911 lck_mtx_unlock(ctl_mtx);
1912 return NULL;
1913 }
1914 /*
1915 * This prevents the socket from being closed
1916 */
1917 kcb->usecount++;
1918 /*
1919 * Respect lock ordering: socket before ctl_mtx
1920 */
1921 lck_mtx_unlock(ctl_mtx);
1922
1923 socket_lock(so, 1);
1924 /*
1925 * The socket lock history is more useful if we store
1926 * the address of the caller.
1927 */
1928 i = (so->next_lock_lr + SO_LCKDBG_MAX - 1) % SO_LCKDBG_MAX;
1929 so->lock_lr[i] = lr_saved;
1930
1931 lck_mtx_lock(ctl_mtx);
1932
1933 if ((kctl = kctl_from_ref(kctlref)) == NULL || kcb->kctl == NULL) {
1934 lck_mtx_unlock(ctl_mtx);
1935 socket_unlock(so, 1);
1936 so = NULL;
1937 lck_mtx_lock(ctl_mtx);
1938 } else if (kctlflags != NULL) {
1939 *kctlflags = kctl->flags;
1940 }
1941
1942 kcb->usecount--;
1943 if (kcb->usecount == 0) {
1944 wakeup((event_t)&kcb->usecount);
1945 }
1946
1947 lck_mtx_unlock(ctl_mtx);
1948
1949 return so;
1950 }
1951
1952 static void
1953 ctl_post_msg(u_int32_t event_code, u_int32_t id)
1954 {
1955 struct ctl_event_data ctl_ev_data;
1956 struct kev_msg ev_msg;
1957
1958 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_NOTOWNED);
1959
1960 bzero(&ev_msg, sizeof(struct kev_msg));
1961 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1962
1963 ev_msg.kev_class = KEV_SYSTEM_CLASS;
1964 ev_msg.kev_subclass = KEV_CTL_SUBCLASS;
1965 ev_msg.event_code = event_code;
1966
1967 /* common nke subclass data */
1968 bzero(&ctl_ev_data, sizeof(ctl_ev_data));
1969 ctl_ev_data.ctl_id = id;
1970 ev_msg.dv[0].data_ptr = &ctl_ev_data;
1971 ev_msg.dv[0].data_length = sizeof(ctl_ev_data);
1972
1973 ev_msg.dv[1].data_length = 0;
1974
1975 kev_post_msg(&ev_msg);
1976 }
1977
1978 static int
1979 ctl_lock(struct socket *so, int refcount, void *lr)
1980 {
1981 void *lr_saved;
1982
1983 if (lr == NULL) {
1984 lr_saved = __builtin_return_address(0);
1985 } else {
1986 lr_saved = lr;
1987 }
1988
1989 if (so->so_pcb != NULL) {
1990 lck_mtx_lock(((struct ctl_cb *)so->so_pcb)->mtx);
1991 } else {
1992 panic("ctl_lock: so=%p NO PCB! lr=%p lrh= %s\n",
1993 so, lr_saved, solockhistory_nr(so));
1994 /* NOTREACHED */
1995 }
1996
1997 if (so->so_usecount < 0) {
1998 panic("ctl_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s\n",
1999 so, so->so_pcb, lr_saved, so->so_usecount,
2000 solockhistory_nr(so));
2001 /* NOTREACHED */
2002 }
2003
2004 if (refcount) {
2005 so->so_usecount++;
2006 }
2007
2008 so->lock_lr[so->next_lock_lr] = lr_saved;
2009 so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX;
2010 return 0;
2011 }
2012
2013 static int
2014 ctl_unlock(struct socket *so, int refcount, void *lr)
2015 {
2016 void *lr_saved;
2017 lck_mtx_t *mutex_held;
2018
2019 if (lr == NULL) {
2020 lr_saved = __builtin_return_address(0);
2021 } else {
2022 lr_saved = lr;
2023 }
2024
2025 #if (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG))
2026 printf("ctl_unlock: so=%llx sopcb=%x lock=%llx ref=%u lr=%llx\n",
2027 (uint64_t)VM_KERNEL_ADDRPERM(so),
2028 (uint64_t)VM_KERNEL_ADDRPERM(so->so_pcb,
2029 (uint64_t)VM_KERNEL_ADDRPERM(((struct ctl_cb *)so->so_pcb)->mtx),
2030 so->so_usecount, (uint64_t)VM_KERNEL_ADDRPERM(lr_saved));
2031 #endif /* (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG)) */
2032 if (refcount) {
2033 so->so_usecount--;
2034 }
2035
2036 if (so->so_usecount < 0) {
2037 panic("ctl_unlock: so=%p usecount=%x lrh= %s\n",
2038 so, so->so_usecount, solockhistory_nr(so));
2039 /* NOTREACHED */
2040 }
2041 if (so->so_pcb == NULL) {
2042 panic("ctl_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s\n",
2043 so, so->so_usecount, (void *)lr_saved,
2044 solockhistory_nr(so));
2045 /* NOTREACHED */
2046 }
2047 mutex_held = ((struct ctl_cb *)so->so_pcb)->mtx;
2048
2049 lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
2050 so->unlock_lr[so->next_unlock_lr] = lr_saved;
2051 so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
2052 lck_mtx_unlock(mutex_held);
2053
2054 if (so->so_usecount == 0) {
2055 ctl_sofreelastref(so);
2056 }
2057
2058 return 0;
2059 }
2060
2061 static lck_mtx_t *
2062 ctl_getlock(struct socket *so, int flags)
2063 {
2064 #pragma unused(flags)
2065 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
2066
2067 if (so->so_pcb) {
2068 if (so->so_usecount < 0) {
2069 panic("ctl_getlock: so=%p usecount=%x lrh= %s\n",
2070 so, so->so_usecount, solockhistory_nr(so));
2071 }
2072 return kcb->mtx;
2073 } else {
2074 panic("ctl_getlock: so=%p NULL NO so_pcb %s\n",
2075 so, solockhistory_nr(so));
2076 return so->so_proto->pr_domain->dom_mtx;
2077 }
2078 }
2079
2080 __private_extern__ int
2081 kctl_reg_list SYSCTL_HANDLER_ARGS
2082 {
2083 #pragma unused(oidp, arg1, arg2)
2084 int error = 0;
2085 int n, i;
2086 struct xsystmgen xsg;
2087 void *buf = NULL;
2088 struct kctl *kctl;
2089 size_t item_size = ROUNDUP64(sizeof(struct xkctl_reg));
2090
2091 buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO);
2092 if (buf == NULL) {
2093 return ENOMEM;
2094 }
2095
2096 lck_mtx_lock(ctl_mtx);
2097
2098 n = kctlstat.kcs_reg_count;
2099
2100 if (req->oldptr == USER_ADDR_NULL) {
2101 req->oldidx = (n + n / 8) * sizeof(struct xkctl_reg);
2102 goto done;
2103 }
2104 if (req->newptr != USER_ADDR_NULL) {
2105 error = EPERM;
2106 goto done;
2107 }
2108 bzero(&xsg, sizeof(xsg));
2109 xsg.xg_len = sizeof(xsg);
2110 xsg.xg_count = n;
2111 xsg.xg_gen = kctlstat.kcs_gencnt;
2112 xsg.xg_sogen = so_gencnt;
2113 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2114 if (error) {
2115 goto done;
2116 }
2117 /*
2118 * We are done if there is no pcb
2119 */
2120 if (n == 0) {
2121 goto done;
2122 }
2123
2124 i = 0;
2125 for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
2126 i < n && kctl != NULL;
2127 i++, kctl = TAILQ_NEXT(kctl, next)) {
2128 struct xkctl_reg *xkr = (struct xkctl_reg *)buf;
2129 struct ctl_cb *kcb;
2130 u_int32_t pcbcount = 0;
2131
2132 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
2133 pcbcount++;
2134
2135 bzero(buf, item_size);
2136
2137 xkr->xkr_len = sizeof(struct xkctl_reg);
2138 xkr->xkr_kind = XSO_KCREG;
2139 xkr->xkr_id = kctl->id;
2140 xkr->xkr_reg_unit = kctl->reg_unit;
2141 xkr->xkr_flags = kctl->flags;
2142 xkr->xkr_kctlref = (uint64_t)(kctl->kctlref);
2143 xkr->xkr_recvbufsize = kctl->recvbufsize;
2144 xkr->xkr_sendbufsize = kctl->sendbufsize;
2145 xkr->xkr_lastunit = kctl->lastunit;
2146 xkr->xkr_pcbcount = pcbcount;
2147 xkr->xkr_connect = (uint64_t)VM_KERNEL_UNSLIDE(kctl->connect);
2148 xkr->xkr_disconnect =
2149 (uint64_t)VM_KERNEL_UNSLIDE(kctl->disconnect);
2150 xkr->xkr_send = (uint64_t)VM_KERNEL_UNSLIDE(kctl->send);
2151 xkr->xkr_send_list =
2152 (uint64_t)VM_KERNEL_UNSLIDE(kctl->send_list);
2153 xkr->xkr_setopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->setopt);
2154 xkr->xkr_getopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->getopt);
2155 xkr->xkr_rcvd = (uint64_t)VM_KERNEL_UNSLIDE(kctl->rcvd);
2156 strlcpy(xkr->xkr_name, kctl->name, sizeof(xkr->xkr_name));
2157
2158 error = SYSCTL_OUT(req, buf, item_size);
2159 }
2160
2161 if (error == 0) {
2162 /*
2163 * Give the user an updated idea of our state.
2164 * If the generation differs from what we told
2165 * her before, she knows that something happened
2166 * while we were processing this request, and it
2167 * might be necessary to retry.
2168 */
2169 bzero(&xsg, sizeof(xsg));
2170 xsg.xg_len = sizeof(xsg);
2171 xsg.xg_count = n;
2172 xsg.xg_gen = kctlstat.kcs_gencnt;
2173 xsg.xg_sogen = so_gencnt;
2174 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2175 if (error) {
2176 goto done;
2177 }
2178 }
2179
2180 done:
2181 lck_mtx_unlock(ctl_mtx);
2182
2183 if (buf != NULL) {
2184 FREE(buf, M_TEMP);
2185 }
2186
2187 return error;
2188 }
2189
2190 __private_extern__ int
2191 kctl_pcblist SYSCTL_HANDLER_ARGS
2192 {
2193 #pragma unused(oidp, arg1, arg2)
2194 int error = 0;
2195 int n, i;
2196 struct xsystmgen xsg;
2197 void *buf = NULL;
2198 struct kctl *kctl;
2199 size_t item_size = ROUNDUP64(sizeof(struct xkctlpcb)) +
2200 ROUNDUP64(sizeof(struct xsocket_n)) +
2201 2 * ROUNDUP64(sizeof(struct xsockbuf_n)) +
2202 ROUNDUP64(sizeof(struct xsockstat_n));
2203
2204 buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO);
2205 if (buf == NULL) {
2206 return ENOMEM;
2207 }
2208
2209 lck_mtx_lock(ctl_mtx);
2210
2211 n = kctlstat.kcs_pcbcount;
2212
2213 if (req->oldptr == USER_ADDR_NULL) {
2214 req->oldidx = (n + n / 8) * item_size;
2215 goto done;
2216 }
2217 if (req->newptr != USER_ADDR_NULL) {
2218 error = EPERM;
2219 goto done;
2220 }
2221 bzero(&xsg, sizeof(xsg));
2222 xsg.xg_len = sizeof(xsg);
2223 xsg.xg_count = n;
2224 xsg.xg_gen = kctlstat.kcs_gencnt;
2225 xsg.xg_sogen = so_gencnt;
2226 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2227 if (error) {
2228 goto done;
2229 }
2230 /*
2231 * We are done if there is no pcb
2232 */
2233 if (n == 0) {
2234 goto done;
2235 }
2236
2237 i = 0;
2238 for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
2239 i < n && kctl != NULL;
2240 kctl = TAILQ_NEXT(kctl, next)) {
2241 struct ctl_cb *kcb;
2242
2243 for (kcb = TAILQ_FIRST(&kctl->kcb_head);
2244 i < n && kcb != NULL;
2245 i++, kcb = TAILQ_NEXT(kcb, next)) {
2246 struct xkctlpcb *xk = (struct xkctlpcb *)buf;
2247 struct xsocket_n *xso = (struct xsocket_n *)
2248 ADVANCE64(xk, sizeof(*xk));
2249 struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *)
2250 ADVANCE64(xso, sizeof(*xso));
2251 struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *)
2252 ADVANCE64(xsbrcv, sizeof(*xsbrcv));
2253 struct xsockstat_n *xsostats = (struct xsockstat_n *)
2254 ADVANCE64(xsbsnd, sizeof(*xsbsnd));
2255
2256 bzero(buf, item_size);
2257
2258 xk->xkp_len = sizeof(struct xkctlpcb);
2259 xk->xkp_kind = XSO_KCB;
2260 xk->xkp_unit = kcb->sac.sc_unit;
2261 xk->xkp_kctpcb = (uint64_t)VM_KERNEL_ADDRPERM(kcb);
2262 xk->xkp_kctlref = (uint64_t)VM_KERNEL_ADDRPERM(kctl);
2263 xk->xkp_kctlid = kctl->id;
2264 strlcpy(xk->xkp_kctlname, kctl->name,
2265 sizeof(xk->xkp_kctlname));
2266
2267 sotoxsocket_n(kcb->so, xso);
2268 sbtoxsockbuf_n(kcb->so ?
2269 &kcb->so->so_rcv : NULL, xsbrcv);
2270 sbtoxsockbuf_n(kcb->so ?
2271 &kcb->so->so_snd : NULL, xsbsnd);
2272 sbtoxsockstat_n(kcb->so, xsostats);
2273
2274 error = SYSCTL_OUT(req, buf, item_size);
2275 }
2276 }
2277
2278 if (error == 0) {
2279 /*
2280 * Give the user an updated idea of our state.
2281 * If the generation differs from what we told
2282 * her before, she knows that something happened
2283 * while we were processing this request, and it
2284 * might be necessary to retry.
2285 */
2286 bzero(&xsg, sizeof(xsg));
2287 xsg.xg_len = sizeof(xsg);
2288 xsg.xg_count = n;
2289 xsg.xg_gen = kctlstat.kcs_gencnt;
2290 xsg.xg_sogen = so_gencnt;
2291 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2292 if (error) {
2293 goto done;
2294 }
2295 }
2296
2297 done:
2298 lck_mtx_unlock(ctl_mtx);
2299
2300 return error;
2301 }
2302
2303 int
2304 kctl_getstat SYSCTL_HANDLER_ARGS
2305 {
2306 #pragma unused(oidp, arg1, arg2)
2307 int error = 0;
2308
2309 lck_mtx_lock(ctl_mtx);
2310
2311 if (req->newptr != USER_ADDR_NULL) {
2312 error = EPERM;
2313 goto done;
2314 }
2315 if (req->oldptr == USER_ADDR_NULL) {
2316 req->oldidx = sizeof(struct kctlstat);
2317 goto done;
2318 }
2319
2320 error = SYSCTL_OUT(req, &kctlstat,
2321 MIN(sizeof(struct kctlstat), req->oldlen));
2322 done:
2323 lck_mtx_unlock(ctl_mtx);
2324 return error;
2325 }
2326
2327 void
2328 kctl_fill_socketinfo(struct socket *so, struct socket_info *si)
2329 {
2330 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
2331 struct kern_ctl_info *kcsi =
2332 &si->soi_proto.pri_kern_ctl;
2333 struct kctl *kctl = kcb->kctl;
2334
2335 si->soi_kind = SOCKINFO_KERN_CTL;
2336
2337 if (kctl == 0) {
2338 return;
2339 }
2340
2341 kcsi->kcsi_id = kctl->id;
2342 kcsi->kcsi_reg_unit = kctl->reg_unit;
2343 kcsi->kcsi_flags = kctl->flags;
2344 kcsi->kcsi_recvbufsize = kctl->recvbufsize;
2345 kcsi->kcsi_sendbufsize = kctl->sendbufsize;
2346 kcsi->kcsi_unit = kcb->sac.sc_unit;
2347 strlcpy(kcsi->kcsi_name, kctl->name, MAX_KCTL_NAME);
2348 }