]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_control.c
xnu-6153.121.1.tar.gz
[apple/xnu.git] / bsd / kern / kern_control.c
1 /*
2 * Copyright (c) 1999-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Kernel Control domain - allows control connections to
31 * and to read/write data.
32 *
33 * Vincent Lubet, 040506
34 * Christophe Allie, 010928
35 * Justin C. Walker, 990319
36 */
37
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/syslog.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/protosw.h>
45 #include <sys/domain.h>
46 #include <sys/malloc.h>
47 #include <sys/mbuf.h>
48 #include <sys/sys_domain.h>
49 #include <sys/kern_event.h>
50 #include <sys/kern_control.h>
51 #include <sys/kauth.h>
52 #include <sys/sysctl.h>
53 #include <sys/proc_info.h>
54 #include <net/if_var.h>
55
56 #include <mach/vm_types.h>
57
58 #include <kern/thread.h>
59
60 struct kctl {
61 TAILQ_ENTRY(kctl) next; /* controller chain */
62 kern_ctl_ref kctlref;
63
64 /* controller information provided when registering */
65 char name[MAX_KCTL_NAME]; /* unique identifier */
66 u_int32_t id;
67 u_int32_t reg_unit;
68
69 /* misc communication information */
70 u_int32_t flags; /* support flags */
71 u_int32_t recvbufsize; /* request more than the default buffer size */
72 u_int32_t sendbufsize; /* request more than the default buffer size */
73
74 /* Dispatch functions */
75 ctl_bind_func bind; /* Prepare contact */
76 ctl_connect_func connect; /* Make contact */
77 ctl_disconnect_func disconnect; /* Break contact */
78 ctl_send_func send; /* Send data to nke */
79 ctl_send_list_func send_list; /* Send list of packets */
80 ctl_setopt_func setopt; /* set kctl configuration */
81 ctl_getopt_func getopt; /* get kctl configuration */
82 ctl_rcvd_func rcvd; /* Notify nke when client reads data */
83
84 TAILQ_HEAD(, ctl_cb) kcb_head;
85 u_int32_t lastunit;
86 };
87
88 #if DEVELOPMENT || DEBUG
89 enum ctl_status {
90 KCTL_DISCONNECTED = 0,
91 KCTL_CONNECTING = 1,
92 KCTL_CONNECTED = 2
93 };
94 #endif /* DEVELOPMENT || DEBUG */
95
96 struct ctl_cb {
97 TAILQ_ENTRY(ctl_cb) next; /* controller chain */
98 lck_mtx_t *mtx;
99 struct socket *so; /* controlling socket */
100 struct kctl *kctl; /* back pointer to controller */
101 void *userdata;
102 struct sockaddr_ctl sac;
103 u_int32_t usecount;
104 u_int32_t kcb_usecount;
105 u_int32_t require_clearing_count;
106 #if DEVELOPMENT || DEBUG
107 enum ctl_status status;
108 #endif /* DEVELOPMENT || DEBUG */
109 };
110
111 #ifndef ROUNDUP64
112 #define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
113 #endif
114
115 #ifndef ADVANCE64
116 #define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
117 #endif
118
119 /*
120 * Definitions and vars for we support
121 */
122
123 #define CTL_SENDSIZE (2 * 1024) /* default buffer size */
124 #define CTL_RECVSIZE (8 * 1024) /* default buffer size */
125
126 /*
127 * Definitions and vars for we support
128 */
129
130 static u_int32_t ctl_maxunit = 65536;
131 static lck_grp_attr_t *ctl_lck_grp_attr = 0;
132 static lck_attr_t *ctl_lck_attr = 0;
133 static lck_grp_t *ctl_lck_grp = 0;
134 static lck_mtx_t *ctl_mtx;
135
136 /* all the controllers are chained */
137 TAILQ_HEAD(kctl_list, kctl) ctl_head;
138
139 static int ctl_attach(struct socket *, int, struct proc *);
140 static int ctl_detach(struct socket *);
141 static int ctl_sofreelastref(struct socket *so);
142 static int ctl_bind(struct socket *, struct sockaddr *, struct proc *);
143 static int ctl_connect(struct socket *, struct sockaddr *, struct proc *);
144 static int ctl_disconnect(struct socket *);
145 static int ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
146 struct ifnet *ifp, struct proc *p);
147 static int ctl_send(struct socket *, int, struct mbuf *,
148 struct sockaddr *, struct mbuf *, struct proc *);
149 static int ctl_send_list(struct socket *, int, struct mbuf *,
150 struct sockaddr *, struct mbuf *, struct proc *);
151 static int ctl_ctloutput(struct socket *, struct sockopt *);
152 static int ctl_peeraddr(struct socket *so, struct sockaddr **nam);
153 static int ctl_usr_rcvd(struct socket *so, int flags);
154
155 static struct kctl *ctl_find_by_name(const char *);
156 static struct kctl *ctl_find_by_id_unit(u_int32_t id, u_int32_t unit);
157
158 static struct socket *kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit,
159 u_int32_t *);
160 static struct ctl_cb *kcb_find(struct kctl *, u_int32_t unit);
161 static void ctl_post_msg(u_int32_t event_code, u_int32_t id);
162
163 static int ctl_lock(struct socket *, int, void *);
164 static int ctl_unlock(struct socket *, int, void *);
165 static lck_mtx_t * ctl_getlock(struct socket *, int);
166
167 static struct pr_usrreqs ctl_usrreqs = {
168 .pru_attach = ctl_attach,
169 .pru_bind = ctl_bind,
170 .pru_connect = ctl_connect,
171 .pru_control = ctl_ioctl,
172 .pru_detach = ctl_detach,
173 .pru_disconnect = ctl_disconnect,
174 .pru_peeraddr = ctl_peeraddr,
175 .pru_rcvd = ctl_usr_rcvd,
176 .pru_send = ctl_send,
177 .pru_send_list = ctl_send_list,
178 .pru_sosend = sosend,
179 .pru_sosend_list = sosend_list,
180 .pru_soreceive = soreceive,
181 .pru_soreceive_list = soreceive_list,
182 };
183
184 static struct protosw kctlsw[] = {
185 {
186 .pr_type = SOCK_DGRAM,
187 .pr_protocol = SYSPROTO_CONTROL,
188 .pr_flags = PR_ATOMIC | PR_CONNREQUIRED | PR_PCBLOCK | PR_WANTRCVD,
189 .pr_ctloutput = ctl_ctloutput,
190 .pr_usrreqs = &ctl_usrreqs,
191 .pr_lock = ctl_lock,
192 .pr_unlock = ctl_unlock,
193 .pr_getlock = ctl_getlock,
194 },
195 {
196 .pr_type = SOCK_STREAM,
197 .pr_protocol = SYSPROTO_CONTROL,
198 .pr_flags = PR_CONNREQUIRED | PR_PCBLOCK | PR_WANTRCVD,
199 .pr_ctloutput = ctl_ctloutput,
200 .pr_usrreqs = &ctl_usrreqs,
201 .pr_lock = ctl_lock,
202 .pr_unlock = ctl_unlock,
203 .pr_getlock = ctl_getlock,
204 }
205 };
206
207 __private_extern__ int kctl_reg_list SYSCTL_HANDLER_ARGS;
208 __private_extern__ int kctl_pcblist SYSCTL_HANDLER_ARGS;
209 __private_extern__ int kctl_getstat SYSCTL_HANDLER_ARGS;
210
211
212 SYSCTL_NODE(_net_systm, OID_AUTO, kctl,
213 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Kernel control family");
214
215 struct kctlstat kctlstat;
216 SYSCTL_PROC(_net_systm_kctl, OID_AUTO, stats,
217 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
218 kctl_getstat, "S,kctlstat", "");
219
220 SYSCTL_PROC(_net_systm_kctl, OID_AUTO, reg_list,
221 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
222 kctl_reg_list, "S,xkctl_reg", "");
223
224 SYSCTL_PROC(_net_systm_kctl, OID_AUTO, pcblist,
225 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
226 kctl_pcblist, "S,xkctlpcb", "");
227
228 u_int32_t ctl_autorcvbuf_max = 256 * 1024;
229 SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufmax,
230 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_autorcvbuf_max, 0, "");
231
232 u_int32_t ctl_autorcvbuf_high = 0;
233 SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufhigh,
234 CTLFLAG_RD | CTLFLAG_LOCKED, &ctl_autorcvbuf_high, 0, "");
235
236 u_int32_t ctl_debug = 0;
237 SYSCTL_INT(_net_systm_kctl, OID_AUTO, debug,
238 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_debug, 0, "");
239
240 #if DEVELOPMENT || DEBUG
241 u_int32_t ctl_panic_debug = 0;
242 SYSCTL_INT(_net_systm_kctl, OID_AUTO, panicdebug,
243 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_panic_debug, 0, "");
244 #endif /* DEVELOPMENT || DEBUG */
245
246 #define KCTL_TBL_INC 16
247
248 static uintptr_t kctl_tbl_size = 0;
249 static u_int32_t kctl_tbl_growing = 0;
250 static u_int32_t kctl_tbl_growing_waiting = 0;
251 static uintptr_t kctl_tbl_count = 0;
252 static struct kctl **kctl_table = NULL;
253 static uintptr_t kctl_ref_gencnt = 0;
254
255 static void kctl_tbl_grow(void);
256 static kern_ctl_ref kctl_make_ref(struct kctl *kctl);
257 static void kctl_delete_ref(kern_ctl_ref);
258 static struct kctl *kctl_from_ref(kern_ctl_ref);
259
260 /*
261 * Install the protosw's for the Kernel Control manager.
262 */
263 __private_extern__ void
264 kern_control_init(struct domain *dp)
265 {
266 struct protosw *pr;
267 int i;
268 int kctl_proto_count = (sizeof(kctlsw) / sizeof(struct protosw));
269
270 VERIFY(!(dp->dom_flags & DOM_INITIALIZED));
271 VERIFY(dp == systemdomain);
272
273 ctl_lck_grp_attr = lck_grp_attr_alloc_init();
274 if (ctl_lck_grp_attr == NULL) {
275 panic("%s: lck_grp_attr_alloc_init failed\n", __func__);
276 /* NOTREACHED */
277 }
278
279 ctl_lck_grp = lck_grp_alloc_init("Kernel Control Protocol",
280 ctl_lck_grp_attr);
281 if (ctl_lck_grp == NULL) {
282 panic("%s: lck_grp_alloc_init failed\n", __func__);
283 /* NOTREACHED */
284 }
285
286 ctl_lck_attr = lck_attr_alloc_init();
287 if (ctl_lck_attr == NULL) {
288 panic("%s: lck_attr_alloc_init failed\n", __func__);
289 /* NOTREACHED */
290 }
291
292 ctl_mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
293 if (ctl_mtx == NULL) {
294 panic("%s: lck_mtx_alloc_init failed\n", __func__);
295 /* NOTREACHED */
296 }
297 TAILQ_INIT(&ctl_head);
298
299 for (i = 0, pr = &kctlsw[0]; i < kctl_proto_count; i++, pr++) {
300 net_add_proto(pr, dp, 1);
301 }
302 }
303
304 static void
305 kcb_delete(struct ctl_cb *kcb)
306 {
307 if (kcb != 0) {
308 if (kcb->mtx != 0) {
309 lck_mtx_free(kcb->mtx, ctl_lck_grp);
310 }
311 FREE(kcb, M_TEMP);
312 }
313 }
314
315 /*
316 * Kernel Controller user-request functions
317 * attach function must exist and succeed
318 * detach not necessary
319 * we need a pcb for the per socket mutex
320 */
321 static int
322 ctl_attach(struct socket *so, int proto, struct proc *p)
323 {
324 #pragma unused(proto, p)
325 int error = 0;
326 struct ctl_cb *kcb = 0;
327
328 MALLOC(kcb, struct ctl_cb *, sizeof(struct ctl_cb), M_TEMP, M_WAITOK);
329 if (kcb == NULL) {
330 error = ENOMEM;
331 goto quit;
332 }
333 bzero(kcb, sizeof(struct ctl_cb));
334
335 kcb->mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
336 if (kcb->mtx == NULL) {
337 error = ENOMEM;
338 goto quit;
339 }
340 kcb->so = so;
341 so->so_pcb = (caddr_t)kcb;
342
343 quit:
344 if (error != 0) {
345 kcb_delete(kcb);
346 kcb = 0;
347 }
348 return error;
349 }
350
351 static int
352 ctl_sofreelastref(struct socket *so)
353 {
354 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
355
356 so->so_pcb = 0;
357
358 if (kcb != 0) {
359 struct kctl *kctl;
360 if ((kctl = kcb->kctl) != 0) {
361 lck_mtx_lock(ctl_mtx);
362 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
363 kctlstat.kcs_pcbcount--;
364 kctlstat.kcs_gencnt++;
365 lck_mtx_unlock(ctl_mtx);
366 }
367 kcb_delete(kcb);
368 }
369 sofreelastref(so, 1);
370 return 0;
371 }
372
373 /*
374 * Use this function and ctl_kcb_require_clearing to serialize
375 * critical calls into the kctl subsystem
376 */
377 static void
378 ctl_kcb_increment_use_count(struct ctl_cb *kcb, lck_mtx_t *mutex_held)
379 {
380 LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
381 while (kcb->require_clearing_count > 0) {
382 msleep(&kcb->require_clearing_count, mutex_held, PSOCK | PCATCH, "kcb_require_clearing", NULL);
383 }
384 kcb->kcb_usecount++;
385 }
386
387 static void
388 ctl_kcb_require_clearing(struct ctl_cb *kcb, lck_mtx_t *mutex_held)
389 {
390 assert(kcb->kcb_usecount != 0);
391 kcb->require_clearing_count++;
392 kcb->kcb_usecount--;
393 while (kcb->kcb_usecount > 0) { // we need to wait until no one else is running
394 msleep(&kcb->kcb_usecount, mutex_held, PSOCK | PCATCH, "kcb_usecount", NULL);
395 }
396 kcb->kcb_usecount++;
397 }
398
399 static void
400 ctl_kcb_done_clearing(struct ctl_cb *kcb)
401 {
402 assert(kcb->require_clearing_count != 0);
403 kcb->require_clearing_count--;
404 wakeup((caddr_t)&kcb->require_clearing_count);
405 }
406
407 static void
408 ctl_kcb_decrement_use_count(struct ctl_cb *kcb)
409 {
410 assert(kcb->kcb_usecount != 0);
411 kcb->kcb_usecount--;
412 wakeup((caddr_t)&kcb->kcb_usecount);
413 }
414
415 static int
416 ctl_detach(struct socket *so)
417 {
418 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
419
420 if (kcb == 0) {
421 return 0;
422 }
423
424 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
425 ctl_kcb_increment_use_count(kcb, mtx_held);
426 ctl_kcb_require_clearing(kcb, mtx_held);
427
428 if (kcb->kctl != NULL && kcb->kctl->bind != NULL &&
429 kcb->userdata != NULL && !(so->so_state & SS_ISCONNECTED)) {
430 // The unit was bound, but not connected
431 // Invoke the disconnected call to cleanup
432 if (kcb->kctl->disconnect != NULL) {
433 socket_unlock(so, 0);
434 (*kcb->kctl->disconnect)(kcb->kctl->kctlref,
435 kcb->sac.sc_unit, kcb->userdata);
436 socket_lock(so, 0);
437 }
438 }
439
440 soisdisconnected(so);
441 #if DEVELOPMENT || DEBUG
442 kcb->status = KCTL_DISCONNECTED;
443 #endif /* DEVELOPMENT || DEBUG */
444 so->so_flags |= SOF_PCBCLEARING;
445 ctl_kcb_done_clearing(kcb);
446 ctl_kcb_decrement_use_count(kcb);
447 return 0;
448 }
449
450 static int
451 ctl_setup_kctl(struct socket *so, struct sockaddr *nam, struct proc *p)
452 {
453 struct kctl *kctl = NULL;
454 int error = 0;
455 struct sockaddr_ctl sa;
456 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
457 struct ctl_cb *kcb_next = NULL;
458 u_quad_t sbmaxsize;
459 u_int32_t recvbufsize, sendbufsize;
460
461 if (kcb == 0) {
462 panic("ctl_setup_kctl so_pcb null\n");
463 }
464
465 if (kcb->kctl != NULL) {
466 // Already set up, skip
467 return 0;
468 }
469
470 if (nam->sa_len != sizeof(struct sockaddr_ctl)) {
471 return EINVAL;
472 }
473
474 bcopy(nam, &sa, sizeof(struct sockaddr_ctl));
475
476 lck_mtx_lock(ctl_mtx);
477 kctl = ctl_find_by_id_unit(sa.sc_id, sa.sc_unit);
478 if (kctl == NULL) {
479 lck_mtx_unlock(ctl_mtx);
480 return ENOENT;
481 }
482
483 if (((kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
484 (so->so_type != SOCK_STREAM)) ||
485 (!(kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
486 (so->so_type != SOCK_DGRAM))) {
487 lck_mtx_unlock(ctl_mtx);
488 return EPROTOTYPE;
489 }
490
491 if (kctl->flags & CTL_FLAG_PRIVILEGED) {
492 if (p == 0) {
493 lck_mtx_unlock(ctl_mtx);
494 return EINVAL;
495 }
496 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
497 lck_mtx_unlock(ctl_mtx);
498 return EPERM;
499 }
500 }
501
502 if ((kctl->flags & CTL_FLAG_REG_ID_UNIT) || sa.sc_unit != 0) {
503 if (kcb_find(kctl, sa.sc_unit) != NULL) {
504 lck_mtx_unlock(ctl_mtx);
505 return EBUSY;
506 }
507 } else {
508 /* Find an unused ID, assumes control IDs are in order */
509 u_int32_t unit = 1;
510
511 TAILQ_FOREACH(kcb_next, &kctl->kcb_head, next) {
512 if (kcb_next->sac.sc_unit > unit) {
513 /* Found a gap, lets fill it in */
514 break;
515 }
516 unit = kcb_next->sac.sc_unit + 1;
517 if (unit == ctl_maxunit) {
518 break;
519 }
520 }
521
522 if (unit == ctl_maxunit) {
523 lck_mtx_unlock(ctl_mtx);
524 return EBUSY;
525 }
526
527 sa.sc_unit = unit;
528 }
529
530 bcopy(&sa, &kcb->sac, sizeof(struct sockaddr_ctl));
531 kcb->kctl = kctl;
532 if (kcb_next != NULL) {
533 TAILQ_INSERT_BEFORE(kcb_next, kcb, next);
534 } else {
535 TAILQ_INSERT_TAIL(&kctl->kcb_head, kcb, next);
536 }
537 kctlstat.kcs_pcbcount++;
538 kctlstat.kcs_gencnt++;
539 kctlstat.kcs_connections++;
540 lck_mtx_unlock(ctl_mtx);
541
542 /*
543 * rdar://15526688: Limit the send and receive sizes to sb_max
544 * by using the same scaling as sbreserve()
545 */
546 sbmaxsize = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES);
547
548 if (kctl->sendbufsize > sbmaxsize) {
549 sendbufsize = sbmaxsize;
550 } else {
551 sendbufsize = kctl->sendbufsize;
552 }
553
554 if (kctl->recvbufsize > sbmaxsize) {
555 recvbufsize = sbmaxsize;
556 } else {
557 recvbufsize = kctl->recvbufsize;
558 }
559
560 error = soreserve(so, sendbufsize, recvbufsize);
561 if (error) {
562 if (ctl_debug) {
563 printf("%s - soreserve(%llx, %u, %u) error %d\n",
564 __func__, (uint64_t)VM_KERNEL_ADDRPERM(so),
565 sendbufsize, recvbufsize, error);
566 }
567 goto done;
568 }
569
570 done:
571 if (error) {
572 soisdisconnected(so);
573 #if DEVELOPMENT || DEBUG
574 kcb->status = KCTL_DISCONNECTED;
575 #endif /* DEVELOPMENT || DEBUG */
576 lck_mtx_lock(ctl_mtx);
577 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
578 kcb->kctl = NULL;
579 kcb->sac.sc_unit = 0;
580 kctlstat.kcs_pcbcount--;
581 kctlstat.kcs_gencnt++;
582 kctlstat.kcs_conn_fail++;
583 lck_mtx_unlock(ctl_mtx);
584 }
585 return error;
586 }
587
588 static int
589 ctl_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
590 {
591 int error = 0;
592 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
593
594 if (kcb == NULL) {
595 panic("ctl_bind so_pcb null\n");
596 }
597
598 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
599 ctl_kcb_increment_use_count(kcb, mtx_held);
600 ctl_kcb_require_clearing(kcb, mtx_held);
601
602 error = ctl_setup_kctl(so, nam, p);
603 if (error) {
604 goto out;
605 }
606
607 if (kcb->kctl == NULL) {
608 panic("ctl_bind kctl null\n");
609 }
610
611 if (kcb->kctl->bind == NULL) {
612 error = EINVAL;
613 goto out;
614 }
615
616 socket_unlock(so, 0);
617 error = (*kcb->kctl->bind)(kcb->kctl->kctlref, &kcb->sac, &kcb->userdata);
618 socket_lock(so, 0);
619
620 out:
621 ctl_kcb_done_clearing(kcb);
622 ctl_kcb_decrement_use_count(kcb);
623 return error;
624 }
625
626 static int
627 ctl_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
628 {
629 int error = 0;
630 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
631
632 if (kcb == NULL) {
633 panic("ctl_connect so_pcb null\n");
634 }
635
636 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
637 ctl_kcb_increment_use_count(kcb, mtx_held);
638 ctl_kcb_require_clearing(kcb, mtx_held);
639
640 #if DEVELOPMENT || DEBUG
641 if (kcb->status != KCTL_DISCONNECTED && ctl_panic_debug) {
642 panic("kctl already connecting/connected");
643 }
644 kcb->status = KCTL_CONNECTING;
645 #endif /* DEVELOPMENT || DEBUG */
646
647 error = ctl_setup_kctl(so, nam, p);
648 if (error) {
649 goto out;
650 }
651
652 if (kcb->kctl == NULL) {
653 panic("ctl_connect kctl null\n");
654 }
655
656 soisconnecting(so);
657 socket_unlock(so, 0);
658 error = (*kcb->kctl->connect)(kcb->kctl->kctlref, &kcb->sac, &kcb->userdata);
659 socket_lock(so, 0);
660 if (error) {
661 goto end;
662 }
663 soisconnected(so);
664 #if DEVELOPMENT || DEBUG
665 kcb->status = KCTL_CONNECTED;
666 #endif /* DEVELOPMENT || DEBUG */
667
668 end:
669 if (error && kcb->kctl->disconnect) {
670 /*
671 * XXX Make sure we Don't check the return value
672 * of disconnect here.
673 * ipsec/utun_ctl_disconnect will return error when
674 * disconnect gets called after connect failure.
675 * However if we decide to check for disconnect return
676 * value here. Please make sure to revisit
677 * ipsec/utun_ctl_disconnect.
678 */
679 socket_unlock(so, 0);
680 (*kcb->kctl->disconnect)(kcb->kctl->kctlref, kcb->sac.sc_unit, kcb->userdata);
681 socket_lock(so, 0);
682 }
683 if (error) {
684 soisdisconnected(so);
685 #if DEVELOPMENT || DEBUG
686 kcb->status = KCTL_DISCONNECTED;
687 #endif /* DEVELOPMENT || DEBUG */
688 lck_mtx_lock(ctl_mtx);
689 TAILQ_REMOVE(&kcb->kctl->kcb_head, kcb, next);
690 kcb->kctl = NULL;
691 kcb->sac.sc_unit = 0;
692 kctlstat.kcs_pcbcount--;
693 kctlstat.kcs_gencnt++;
694 kctlstat.kcs_conn_fail++;
695 lck_mtx_unlock(ctl_mtx);
696 }
697 out:
698 ctl_kcb_done_clearing(kcb);
699 ctl_kcb_decrement_use_count(kcb);
700 return error;
701 }
702
703 static int
704 ctl_disconnect(struct socket *so)
705 {
706 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
707
708 if ((kcb = (struct ctl_cb *)so->so_pcb)) {
709 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
710 ctl_kcb_increment_use_count(kcb, mtx_held);
711 ctl_kcb_require_clearing(kcb, mtx_held);
712 struct kctl *kctl = kcb->kctl;
713
714 if (kctl && kctl->disconnect) {
715 socket_unlock(so, 0);
716 (*kctl->disconnect)(kctl->kctlref, kcb->sac.sc_unit,
717 kcb->userdata);
718 socket_lock(so, 0);
719 }
720
721 soisdisconnected(so);
722 #if DEVELOPMENT || DEBUG
723 kcb->status = KCTL_DISCONNECTED;
724 #endif /* DEVELOPMENT || DEBUG */
725
726 socket_unlock(so, 0);
727 lck_mtx_lock(ctl_mtx);
728 kcb->kctl = 0;
729 kcb->sac.sc_unit = 0;
730 while (kcb->usecount != 0) {
731 msleep(&kcb->usecount, ctl_mtx, 0, "kcb->usecount", 0);
732 }
733 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
734 kctlstat.kcs_pcbcount--;
735 kctlstat.kcs_gencnt++;
736 lck_mtx_unlock(ctl_mtx);
737 socket_lock(so, 0);
738 ctl_kcb_done_clearing(kcb);
739 ctl_kcb_decrement_use_count(kcb);
740 }
741 return 0;
742 }
743
744 static int
745 ctl_peeraddr(struct socket *so, struct sockaddr **nam)
746 {
747 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
748 struct kctl *kctl;
749 struct sockaddr_ctl sc;
750
751 if (kcb == NULL) { /* sanity check */
752 return ENOTCONN;
753 }
754
755 if ((kctl = kcb->kctl) == NULL) {
756 return EINVAL;
757 }
758
759 bzero(&sc, sizeof(struct sockaddr_ctl));
760 sc.sc_len = sizeof(struct sockaddr_ctl);
761 sc.sc_family = AF_SYSTEM;
762 sc.ss_sysaddr = AF_SYS_CONTROL;
763 sc.sc_id = kctl->id;
764 sc.sc_unit = kcb->sac.sc_unit;
765
766 *nam = dup_sockaddr((struct sockaddr *)&sc, 1);
767
768 return 0;
769 }
770
771 static void
772 ctl_sbrcv_trim(struct socket *so)
773 {
774 struct sockbuf *sb = &so->so_rcv;
775
776 if (sb->sb_hiwat > sb->sb_idealsize) {
777 u_int32_t diff;
778 int32_t trim;
779
780 /*
781 * The difference between the ideal size and the
782 * current size is the upper bound of the trimage
783 */
784 diff = sb->sb_hiwat - sb->sb_idealsize;
785 /*
786 * We cannot trim below the outstanding data
787 */
788 trim = sb->sb_hiwat - sb->sb_cc;
789
790 trim = imin(trim, (int32_t)diff);
791
792 if (trim > 0) {
793 sbreserve(sb, (sb->sb_hiwat - trim));
794
795 if (ctl_debug) {
796 printf("%s - shrunk to %d\n",
797 __func__, sb->sb_hiwat);
798 }
799 }
800 }
801 }
802
803 static int
804 ctl_usr_rcvd(struct socket *so, int flags)
805 {
806 int error = 0;
807 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
808 struct kctl *kctl;
809
810 if (kcb == NULL) {
811 return ENOTCONN;
812 }
813
814 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
815 ctl_kcb_increment_use_count(kcb, mtx_held);
816
817 if ((kctl = kcb->kctl) == NULL) {
818 error = EINVAL;
819 goto out;
820 }
821
822 if (kctl->rcvd) {
823 socket_unlock(so, 0);
824 (*kctl->rcvd)(kctl->kctlref, kcb->sac.sc_unit, kcb->userdata, flags);
825 socket_lock(so, 0);
826 }
827
828 ctl_sbrcv_trim(so);
829
830 out:
831 ctl_kcb_decrement_use_count(kcb);
832 return error;
833 }
834
835 static int
836 ctl_send(struct socket *so, int flags, struct mbuf *m,
837 struct sockaddr *addr, struct mbuf *control,
838 struct proc *p)
839 {
840 #pragma unused(addr, p)
841 int error = 0;
842 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
843 struct kctl *kctl;
844
845 if (control) {
846 m_freem(control);
847 }
848
849 if (kcb == NULL) { /* sanity check */
850 error = ENOTCONN;
851 }
852
853 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
854 ctl_kcb_increment_use_count(kcb, mtx_held);
855
856 if (error == 0 && (kctl = kcb->kctl) == NULL) {
857 error = EINVAL;
858 }
859
860 if (error == 0 && kctl->send) {
861 so_tc_update_stats(m, so, m_get_service_class(m));
862 socket_unlock(so, 0);
863 error = (*kctl->send)(kctl->kctlref, kcb->sac.sc_unit, kcb->userdata,
864 m, flags);
865 socket_lock(so, 0);
866 } else {
867 m_freem(m);
868 if (error == 0) {
869 error = ENOTSUP;
870 }
871 }
872 if (error != 0) {
873 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_fail);
874 }
875 ctl_kcb_decrement_use_count(kcb);
876
877 return error;
878 }
879
880 static int
881 ctl_send_list(struct socket *so, int flags, struct mbuf *m,
882 __unused struct sockaddr *addr, struct mbuf *control,
883 __unused struct proc *p)
884 {
885 int error = 0;
886 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
887 struct kctl *kctl;
888
889 if (control) {
890 m_freem_list(control);
891 }
892
893 if (kcb == NULL) { /* sanity check */
894 error = ENOTCONN;
895 }
896
897 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
898 ctl_kcb_increment_use_count(kcb, mtx_held);
899
900 if (error == 0 && (kctl = kcb->kctl) == NULL) {
901 error = EINVAL;
902 }
903
904 if (error == 0 && kctl->send_list) {
905 struct mbuf *nxt;
906
907 for (nxt = m; nxt != NULL; nxt = nxt->m_nextpkt) {
908 so_tc_update_stats(nxt, so, m_get_service_class(nxt));
909 }
910
911 socket_unlock(so, 0);
912 error = (*kctl->send_list)(kctl->kctlref, kcb->sac.sc_unit,
913 kcb->userdata, m, flags);
914 socket_lock(so, 0);
915 } else if (error == 0 && kctl->send) {
916 while (m != NULL && error == 0) {
917 struct mbuf *nextpkt = m->m_nextpkt;
918
919 m->m_nextpkt = NULL;
920 so_tc_update_stats(m, so, m_get_service_class(m));
921 socket_unlock(so, 0);
922 error = (*kctl->send)(kctl->kctlref, kcb->sac.sc_unit,
923 kcb->userdata, m, flags);
924 socket_lock(so, 0);
925 m = nextpkt;
926 }
927 if (m != NULL) {
928 m_freem_list(m);
929 }
930 } else {
931 m_freem_list(m);
932 if (error == 0) {
933 error = ENOTSUP;
934 }
935 }
936 if (error != 0) {
937 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_list_fail);
938 }
939 ctl_kcb_decrement_use_count(kcb);
940
941 return error;
942 }
943
944 static errno_t
945 ctl_rcvbspace(struct socket *so, u_int32_t datasize,
946 u_int32_t kctlflags, u_int32_t flags)
947 {
948 struct sockbuf *sb = &so->so_rcv;
949 u_int32_t space = sbspace(sb);
950 errno_t error;
951
952 if ((kctlflags & CTL_FLAG_REG_CRIT) == 0) {
953 if ((u_int32_t) space >= datasize) {
954 error = 0;
955 } else {
956 error = ENOBUFS;
957 }
958 } else if ((flags & CTL_DATA_CRIT) == 0) {
959 /*
960 * Reserve 25% for critical messages
961 */
962 if (space < (sb->sb_hiwat >> 2) ||
963 space < datasize) {
964 error = ENOBUFS;
965 } else {
966 error = 0;
967 }
968 } else {
969 u_int32_t autorcvbuf_max;
970
971 /*
972 * Allow overcommit of 25%
973 */
974 autorcvbuf_max = min(sb->sb_idealsize + (sb->sb_idealsize >> 2),
975 ctl_autorcvbuf_max);
976
977 if ((u_int32_t) space >= datasize) {
978 error = 0;
979 } else if (tcp_cansbgrow(sb) &&
980 sb->sb_hiwat < autorcvbuf_max) {
981 /*
982 * Grow with a little bit of leeway
983 */
984 u_int32_t grow = datasize - space + MSIZE;
985
986 if (sbreserve(sb,
987 min((sb->sb_hiwat + grow), autorcvbuf_max)) == 1) {
988 if (sb->sb_hiwat > ctl_autorcvbuf_high) {
989 ctl_autorcvbuf_high = sb->sb_hiwat;
990 }
991
992 /*
993 * A final check
994 */
995 if ((u_int32_t) sbspace(sb) >= datasize) {
996 error = 0;
997 } else {
998 error = ENOBUFS;
999 }
1000
1001 if (ctl_debug) {
1002 printf("%s - grown to %d error %d\n",
1003 __func__, sb->sb_hiwat, error);
1004 }
1005 } else {
1006 error = ENOBUFS;
1007 }
1008 } else {
1009 error = ENOBUFS;
1010 }
1011 }
1012 return error;
1013 }
1014
1015 errno_t
1016 ctl_enqueuembuf(kern_ctl_ref kctlref, u_int32_t unit, struct mbuf *m,
1017 u_int32_t flags)
1018 {
1019 struct socket *so;
1020 errno_t error = 0;
1021 int len = m->m_pkthdr.len;
1022 u_int32_t kctlflags;
1023
1024 so = kcb_find_socket(kctlref, unit, &kctlflags);
1025 if (so == NULL) {
1026 return EINVAL;
1027 }
1028
1029 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
1030 error = ENOBUFS;
1031 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1032 goto bye;
1033 }
1034 if ((flags & CTL_DATA_EOR)) {
1035 m->m_flags |= M_EOR;
1036 }
1037
1038 so_recv_data_stat(so, m, 0);
1039 if (sbappend_nodrop(&so->so_rcv, m) != 0) {
1040 if ((flags & CTL_DATA_NOWAKEUP) == 0) {
1041 sorwakeup(so);
1042 }
1043 } else {
1044 error = ENOBUFS;
1045 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1046 }
1047 bye:
1048 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
1049 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1050 __func__, error, len,
1051 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1052 }
1053
1054 socket_unlock(so, 1);
1055 if (error != 0) {
1056 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
1057 }
1058
1059 return error;
1060 }
1061
1062 /*
1063 * Compute space occupied by mbuf like sbappendrecord
1064 */
1065 static int
1066 m_space(struct mbuf *m)
1067 {
1068 int space = 0;
1069 struct mbuf *nxt;
1070
1071 for (nxt = m; nxt != NULL; nxt = nxt->m_next) {
1072 space += nxt->m_len;
1073 }
1074
1075 return space;
1076 }
1077
1078 errno_t
1079 ctl_enqueuembuf_list(void *kctlref, u_int32_t unit, struct mbuf *m_list,
1080 u_int32_t flags, struct mbuf **m_remain)
1081 {
1082 struct socket *so = NULL;
1083 errno_t error = 0;
1084 struct mbuf *m, *nextpkt;
1085 int needwakeup = 0;
1086 int len = 0;
1087 u_int32_t kctlflags;
1088
1089 /*
1090 * Need to point the beginning of the list in case of early exit
1091 */
1092 m = m_list;
1093
1094 /*
1095 * kcb_find_socket takes the socket lock with a reference
1096 */
1097 so = kcb_find_socket(kctlref, unit, &kctlflags);
1098 if (so == NULL) {
1099 error = EINVAL;
1100 goto done;
1101 }
1102
1103 if (kctlflags & CTL_FLAG_REG_SOCK_STREAM) {
1104 error = EOPNOTSUPP;
1105 goto done;
1106 }
1107 if (flags & CTL_DATA_EOR) {
1108 error = EINVAL;
1109 goto done;
1110 }
1111
1112 for (m = m_list; m != NULL; m = nextpkt) {
1113 nextpkt = m->m_nextpkt;
1114
1115 if (m->m_pkthdr.len == 0 && ctl_debug) {
1116 printf("%s: %llx m_pkthdr.len is 0",
1117 __func__, (uint64_t)VM_KERNEL_ADDRPERM(m));
1118 }
1119
1120 /*
1121 * The mbuf is either appended or freed by sbappendrecord()
1122 * so it's not reliable from a data standpoint
1123 */
1124 len = m_space(m);
1125 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
1126 error = ENOBUFS;
1127 OSIncrementAtomic64(
1128 (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1129 break;
1130 } else {
1131 /*
1132 * Unlink from the list, m is on its own
1133 */
1134 m->m_nextpkt = NULL;
1135 so_recv_data_stat(so, m, 0);
1136 if (sbappendrecord_nodrop(&so->so_rcv, m) != 0) {
1137 needwakeup = 1;
1138 } else {
1139 /*
1140 * We free or return the remaining
1141 * mbufs in the list
1142 */
1143 m = nextpkt;
1144 error = ENOBUFS;
1145 OSIncrementAtomic64(
1146 (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1147 break;
1148 }
1149 }
1150 }
1151 if (needwakeup && (flags & CTL_DATA_NOWAKEUP) == 0) {
1152 sorwakeup(so);
1153 }
1154
1155 done:
1156 if (so != NULL) {
1157 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
1158 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1159 __func__, error, len,
1160 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1161 }
1162
1163 socket_unlock(so, 1);
1164 }
1165 if (m_remain) {
1166 *m_remain = m;
1167
1168 if (m != NULL && socket_debug && so != NULL &&
1169 (so->so_options & SO_DEBUG)) {
1170 struct mbuf *n;
1171
1172 printf("%s m_list %llx\n", __func__,
1173 (uint64_t) VM_KERNEL_ADDRPERM(m_list));
1174 for (n = m; n != NULL; n = n->m_nextpkt) {
1175 printf(" remain %llx m_next %llx\n",
1176 (uint64_t) VM_KERNEL_ADDRPERM(n),
1177 (uint64_t) VM_KERNEL_ADDRPERM(n->m_next));
1178 }
1179 }
1180 } else {
1181 if (m != NULL) {
1182 m_freem_list(m);
1183 }
1184 }
1185 if (error != 0) {
1186 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
1187 }
1188 return error;
1189 }
1190
1191 errno_t
1192 ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len,
1193 u_int32_t flags)
1194 {
1195 struct socket *so;
1196 struct mbuf *m;
1197 errno_t error = 0;
1198 unsigned int num_needed;
1199 struct mbuf *n;
1200 size_t curlen = 0;
1201 u_int32_t kctlflags;
1202
1203 so = kcb_find_socket(kctlref, unit, &kctlflags);
1204 if (so == NULL) {
1205 return EINVAL;
1206 }
1207
1208 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
1209 error = ENOBUFS;
1210 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1211 goto bye;
1212 }
1213
1214 num_needed = 1;
1215 m = m_allocpacket_internal(&num_needed, len, NULL, M_NOWAIT, 1, 0);
1216 if (m == NULL) {
1217 kctlstat.kcs_enqdata_mb_alloc_fail++;
1218 if (ctl_debug) {
1219 printf("%s: m_allocpacket_internal(%lu) failed\n",
1220 __func__, len);
1221 }
1222 error = ENOMEM;
1223 goto bye;
1224 }
1225
1226 for (n = m; n != NULL; n = n->m_next) {
1227 size_t mlen = mbuf_maxlen(n);
1228
1229 if (mlen + curlen > len) {
1230 mlen = len - curlen;
1231 }
1232 n->m_len = mlen;
1233 bcopy((char *)data + curlen, n->m_data, mlen);
1234 curlen += mlen;
1235 }
1236 mbuf_pkthdr_setlen(m, curlen);
1237
1238 if ((flags & CTL_DATA_EOR)) {
1239 m->m_flags |= M_EOR;
1240 }
1241 so_recv_data_stat(so, m, 0);
1242 /*
1243 * No need to call the "nodrop" variant of sbappend
1244 * because the mbuf is local to the scope of the function
1245 */
1246 if (sbappend(&so->so_rcv, m) != 0) {
1247 if ((flags & CTL_DATA_NOWAKEUP) == 0) {
1248 sorwakeup(so);
1249 }
1250 } else {
1251 kctlstat.kcs_enqdata_sbappend_fail++;
1252 error = ENOBUFS;
1253 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1254 }
1255
1256 bye:
1257 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
1258 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1259 __func__, error, (int)len,
1260 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1261 }
1262
1263 socket_unlock(so, 1);
1264 if (error != 0) {
1265 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
1266 }
1267 return error;
1268 }
1269
1270 errno_t
1271 ctl_getenqueuepacketcount(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *pcnt)
1272 {
1273 struct socket *so;
1274 u_int32_t cnt;
1275 struct mbuf *m1;
1276
1277 if (pcnt == NULL) {
1278 return EINVAL;
1279 }
1280
1281 so = kcb_find_socket(kctlref, unit, NULL);
1282 if (so == NULL) {
1283 return EINVAL;
1284 }
1285
1286 cnt = 0;
1287 m1 = so->so_rcv.sb_mb;
1288 while (m1 != NULL) {
1289 if (m1->m_type == MT_DATA ||
1290 m1->m_type == MT_HEADER ||
1291 m1->m_type == MT_OOBDATA) {
1292 cnt += 1;
1293 }
1294 m1 = m1->m_nextpkt;
1295 }
1296 *pcnt = cnt;
1297
1298 socket_unlock(so, 1);
1299
1300 return 0;
1301 }
1302
1303 errno_t
1304 ctl_getenqueuespace(kern_ctl_ref kctlref, u_int32_t unit, size_t *space)
1305 {
1306 struct socket *so;
1307 long avail;
1308
1309 if (space == NULL) {
1310 return EINVAL;
1311 }
1312
1313 so = kcb_find_socket(kctlref, unit, NULL);
1314 if (so == NULL) {
1315 return EINVAL;
1316 }
1317
1318 avail = sbspace(&so->so_rcv);
1319 *space = (avail < 0) ? 0 : avail;
1320 socket_unlock(so, 1);
1321
1322 return 0;
1323 }
1324
1325 errno_t
1326 ctl_getenqueuereadable(kern_ctl_ref kctlref, u_int32_t unit,
1327 u_int32_t *difference)
1328 {
1329 struct socket *so;
1330
1331 if (difference == NULL) {
1332 return EINVAL;
1333 }
1334
1335 so = kcb_find_socket(kctlref, unit, NULL);
1336 if (so == NULL) {
1337 return EINVAL;
1338 }
1339
1340 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat) {
1341 *difference = 0;
1342 } else {
1343 *difference = (so->so_rcv.sb_lowat - so->so_rcv.sb_cc);
1344 }
1345 socket_unlock(so, 1);
1346
1347 return 0;
1348 }
1349
1350 static int
1351 ctl_ctloutput(struct socket *so, struct sockopt *sopt)
1352 {
1353 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
1354 struct kctl *kctl;
1355 int error = 0;
1356 void *data = NULL;
1357 size_t len;
1358
1359 if (sopt->sopt_level != SYSPROTO_CONTROL) {
1360 return EINVAL;
1361 }
1362
1363 if (kcb == NULL) { /* sanity check */
1364 return ENOTCONN;
1365 }
1366
1367 if ((kctl = kcb->kctl) == NULL) {
1368 return EINVAL;
1369 }
1370
1371 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
1372 ctl_kcb_increment_use_count(kcb, mtx_held);
1373
1374 switch (sopt->sopt_dir) {
1375 case SOPT_SET:
1376 if (kctl->setopt == NULL) {
1377 error = ENOTSUP;
1378 goto out;
1379 }
1380 if (sopt->sopt_valsize != 0) {
1381 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP,
1382 M_WAITOK | M_ZERO);
1383 if (data == NULL) {
1384 error = ENOMEM;
1385 goto out;
1386 }
1387 error = sooptcopyin(sopt, data,
1388 sopt->sopt_valsize, sopt->sopt_valsize);
1389 }
1390 if (error == 0) {
1391 socket_unlock(so, 0);
1392 error = (*kctl->setopt)(kctl->kctlref,
1393 kcb->sac.sc_unit, kcb->userdata, sopt->sopt_name,
1394 data, sopt->sopt_valsize);
1395 socket_lock(so, 0);
1396 }
1397
1398 if (data != NULL) {
1399 FREE(data, M_TEMP);
1400 }
1401 break;
1402
1403 case SOPT_GET:
1404 if (kctl->getopt == NULL) {
1405 error = ENOTSUP;
1406 goto out;
1407 }
1408
1409 if (sopt->sopt_valsize && sopt->sopt_val) {
1410 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP,
1411 M_WAITOK | M_ZERO);
1412 if (data == NULL) {
1413 error = ENOMEM;
1414 goto out;
1415 }
1416 /*
1417 * 4108337 - copy user data in case the
1418 * kernel control needs it
1419 */
1420 error = sooptcopyin(sopt, data,
1421 sopt->sopt_valsize, sopt->sopt_valsize);
1422 }
1423
1424 if (error == 0) {
1425 len = sopt->sopt_valsize;
1426 socket_unlock(so, 0);
1427 error = (*kctl->getopt)(kctl->kctlref, kcb->sac.sc_unit,
1428 kcb->userdata, sopt->sopt_name,
1429 data, &len);
1430 if (data != NULL && len > sopt->sopt_valsize) {
1431 panic_plain("ctl_ctloutput: ctl %s returned "
1432 "len (%lu) > sopt_valsize (%lu)\n",
1433 kcb->kctl->name, len,
1434 sopt->sopt_valsize);
1435 }
1436 socket_lock(so, 0);
1437 if (error == 0) {
1438 if (data != NULL) {
1439 error = sooptcopyout(sopt, data, len);
1440 } else {
1441 sopt->sopt_valsize = len;
1442 }
1443 }
1444 }
1445 if (data != NULL) {
1446 FREE(data, M_TEMP);
1447 }
1448 break;
1449 }
1450
1451 out:
1452 ctl_kcb_decrement_use_count(kcb);
1453 return error;
1454 }
1455
1456 static int
1457 ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
1458 struct ifnet *ifp, struct proc *p)
1459 {
1460 #pragma unused(so, ifp, p)
1461 int error = ENOTSUP;
1462
1463 switch (cmd) {
1464 /* get the number of controllers */
1465 case CTLIOCGCOUNT: {
1466 struct kctl *kctl;
1467 u_int32_t n = 0;
1468
1469 lck_mtx_lock(ctl_mtx);
1470 TAILQ_FOREACH(kctl, &ctl_head, next)
1471 n++;
1472 lck_mtx_unlock(ctl_mtx);
1473
1474 bcopy(&n, data, sizeof(n));
1475 error = 0;
1476 break;
1477 }
1478 case CTLIOCGINFO: {
1479 struct ctl_info ctl_info;
1480 struct kctl *kctl = 0;
1481 size_t name_len;
1482
1483 bcopy(data, &ctl_info, sizeof(ctl_info));
1484 name_len = strnlen(ctl_info.ctl_name, MAX_KCTL_NAME);
1485
1486 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
1487 error = EINVAL;
1488 break;
1489 }
1490 lck_mtx_lock(ctl_mtx);
1491 kctl = ctl_find_by_name(ctl_info.ctl_name);
1492 lck_mtx_unlock(ctl_mtx);
1493 if (kctl == 0) {
1494 error = ENOENT;
1495 break;
1496 }
1497 ctl_info.ctl_id = kctl->id;
1498 bcopy(&ctl_info, data, sizeof(ctl_info));
1499 error = 0;
1500 break;
1501 }
1502
1503 /* add controls to get list of NKEs */
1504 }
1505
1506 return error;
1507 }
1508
1509 static void
1510 kctl_tbl_grow()
1511 {
1512 struct kctl **new_table;
1513 uintptr_t new_size;
1514
1515 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1516
1517 if (kctl_tbl_growing) {
1518 /* Another thread is allocating */
1519 kctl_tbl_growing_waiting++;
1520
1521 do {
1522 (void) msleep((caddr_t) &kctl_tbl_growing, ctl_mtx,
1523 PSOCK | PCATCH, "kctl_tbl_growing", 0);
1524 } while (kctl_tbl_growing);
1525 kctl_tbl_growing_waiting--;
1526 }
1527 /* Another thread grew the table */
1528 if (kctl_table != NULL && kctl_tbl_count < kctl_tbl_size) {
1529 return;
1530 }
1531
1532 /* Verify we have a sane size */
1533 if (kctl_tbl_size + KCTL_TBL_INC >= UINT16_MAX) {
1534 kctlstat.kcs_tbl_size_too_big++;
1535 if (ctl_debug) {
1536 printf("%s kctl_tbl_size %lu too big\n",
1537 __func__, kctl_tbl_size);
1538 }
1539 return;
1540 }
1541 kctl_tbl_growing = 1;
1542
1543 new_size = kctl_tbl_size + KCTL_TBL_INC;
1544
1545 lck_mtx_unlock(ctl_mtx);
1546 new_table = _MALLOC(sizeof(struct kctl *) * new_size,
1547 M_TEMP, M_WAIT | M_ZERO);
1548 lck_mtx_lock(ctl_mtx);
1549
1550 if (new_table != NULL) {
1551 if (kctl_table != NULL) {
1552 bcopy(kctl_table, new_table,
1553 kctl_tbl_size * sizeof(struct kctl *));
1554
1555 _FREE(kctl_table, M_TEMP);
1556 }
1557 kctl_table = new_table;
1558 kctl_tbl_size = new_size;
1559 }
1560
1561 kctl_tbl_growing = 0;
1562
1563 if (kctl_tbl_growing_waiting) {
1564 wakeup(&kctl_tbl_growing);
1565 }
1566 }
1567
1568 #define KCTLREF_INDEX_MASK 0x0000FFFF
1569 #define KCTLREF_GENCNT_MASK 0xFFFF0000
1570 #define KCTLREF_GENCNT_SHIFT 16
1571
1572 static kern_ctl_ref
1573 kctl_make_ref(struct kctl *kctl)
1574 {
1575 uintptr_t i;
1576
1577 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1578
1579 if (kctl_tbl_count >= kctl_tbl_size) {
1580 kctl_tbl_grow();
1581 }
1582
1583 kctl->kctlref = NULL;
1584 for (i = 0; i < kctl_tbl_size; i++) {
1585 if (kctl_table[i] == NULL) {
1586 uintptr_t ref;
1587
1588 /*
1589 * Reference is index plus one
1590 */
1591 kctl_ref_gencnt += 1;
1592
1593 /*
1594 * Add generation count as salt to reference to prevent
1595 * use after deregister
1596 */
1597 ref = ((kctl_ref_gencnt << KCTLREF_GENCNT_SHIFT) &
1598 KCTLREF_GENCNT_MASK) +
1599 ((i + 1) & KCTLREF_INDEX_MASK);
1600
1601 kctl->kctlref = (void *)(ref);
1602 kctl_table[i] = kctl;
1603 kctl_tbl_count++;
1604 break;
1605 }
1606 }
1607
1608 if (kctl->kctlref == NULL) {
1609 panic("%s no space in table", __func__);
1610 }
1611
1612 if (ctl_debug > 0) {
1613 printf("%s %p for %p\n",
1614 __func__, kctl->kctlref, kctl);
1615 }
1616
1617 return kctl->kctlref;
1618 }
1619
1620 static void
1621 kctl_delete_ref(kern_ctl_ref kctlref)
1622 {
1623 /*
1624 * Reference is index plus one
1625 */
1626 uintptr_t i = (((uintptr_t)kctlref) & KCTLREF_INDEX_MASK) - 1;
1627
1628 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1629
1630 if (i < kctl_tbl_size) {
1631 struct kctl *kctl = kctl_table[i];
1632
1633 if (kctl->kctlref == kctlref) {
1634 kctl_table[i] = NULL;
1635 kctl_tbl_count--;
1636 } else {
1637 kctlstat.kcs_bad_kctlref++;
1638 }
1639 } else {
1640 kctlstat.kcs_bad_kctlref++;
1641 }
1642 }
1643
1644 static struct kctl *
1645 kctl_from_ref(kern_ctl_ref kctlref)
1646 {
1647 /*
1648 * Reference is index plus one
1649 */
1650 uintptr_t i = (((uintptr_t)kctlref) & KCTLREF_INDEX_MASK) - 1;
1651 struct kctl *kctl = NULL;
1652
1653 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1654
1655 if (i >= kctl_tbl_size) {
1656 kctlstat.kcs_bad_kctlref++;
1657 return NULL;
1658 }
1659 kctl = kctl_table[i];
1660 if (kctl->kctlref != kctlref) {
1661 kctlstat.kcs_bad_kctlref++;
1662 return NULL;
1663 }
1664 return kctl;
1665 }
1666
1667 /*
1668 * Register/unregister a NKE
1669 */
1670 errno_t
1671 ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref)
1672 {
1673 struct kctl *kctl = NULL;
1674 struct kctl *kctl_next = NULL;
1675 u_int32_t id = 1;
1676 size_t name_len;
1677 int is_extended = 0;
1678
1679 if (userkctl == NULL) { /* sanity check */
1680 return EINVAL;
1681 }
1682 if (userkctl->ctl_connect == NULL) {
1683 return EINVAL;
1684 }
1685 name_len = strlen(userkctl->ctl_name);
1686 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
1687 return EINVAL;
1688 }
1689
1690 MALLOC(kctl, struct kctl *, sizeof(*kctl), M_TEMP, M_WAITOK);
1691 if (kctl == NULL) {
1692 return ENOMEM;
1693 }
1694 bzero((char *)kctl, sizeof(*kctl));
1695
1696 lck_mtx_lock(ctl_mtx);
1697
1698 if (kctl_make_ref(kctl) == NULL) {
1699 lck_mtx_unlock(ctl_mtx);
1700 FREE(kctl, M_TEMP);
1701 return ENOMEM;
1702 }
1703
1704 /*
1705 * Kernel Control IDs
1706 *
1707 * CTL_FLAG_REG_ID_UNIT indicates the control ID and unit number are
1708 * static. If they do not exist, add them to the list in order. If the
1709 * flag is not set, we must find a new unique value. We assume the
1710 * list is in order. We find the last item in the list and add one. If
1711 * this leads to wrapping the id around, we start at the front of the
1712 * list and look for a gap.
1713 */
1714
1715 if ((userkctl->ctl_flags & CTL_FLAG_REG_ID_UNIT) == 0) {
1716 /* Must dynamically assign an unused ID */
1717
1718 /* Verify the same name isn't already registered */
1719 if (ctl_find_by_name(userkctl->ctl_name) != NULL) {
1720 kctl_delete_ref(kctl->kctlref);
1721 lck_mtx_unlock(ctl_mtx);
1722 FREE(kctl, M_TEMP);
1723 return EEXIST;
1724 }
1725
1726 /* Start with 1 in case the list is empty */
1727 id = 1;
1728 kctl_next = TAILQ_LAST(&ctl_head, kctl_list);
1729
1730 if (kctl_next != NULL) {
1731 /* List was not empty, add one to the last item */
1732 id = kctl_next->id + 1;
1733 kctl_next = NULL;
1734
1735 /*
1736 * If this wrapped the id number, start looking at
1737 * the front of the list for an unused id.
1738 */
1739 if (id == 0) {
1740 /* Find the next unused ID */
1741 id = 1;
1742
1743 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
1744 if (kctl_next->id > id) {
1745 /* We found a gap */
1746 break;
1747 }
1748
1749 id = kctl_next->id + 1;
1750 }
1751 }
1752 }
1753
1754 userkctl->ctl_id = id;
1755 kctl->id = id;
1756 kctl->reg_unit = -1;
1757 } else {
1758 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
1759 if (kctl_next->id > userkctl->ctl_id) {
1760 break;
1761 }
1762 }
1763
1764 if (ctl_find_by_id_unit(userkctl->ctl_id, userkctl->ctl_unit)) {
1765 kctl_delete_ref(kctl->kctlref);
1766 lck_mtx_unlock(ctl_mtx);
1767 FREE(kctl, M_TEMP);
1768 return EEXIST;
1769 }
1770 kctl->id = userkctl->ctl_id;
1771 kctl->reg_unit = userkctl->ctl_unit;
1772 }
1773
1774 is_extended = (userkctl->ctl_flags & CTL_FLAG_REG_EXTENDED);
1775
1776 strlcpy(kctl->name, userkctl->ctl_name, MAX_KCTL_NAME);
1777 kctl->flags = userkctl->ctl_flags;
1778
1779 /*
1780 * Let the caller know the default send and receive sizes
1781 */
1782 if (userkctl->ctl_sendsize == 0) {
1783 kctl->sendbufsize = CTL_SENDSIZE;
1784 userkctl->ctl_sendsize = kctl->sendbufsize;
1785 } else {
1786 kctl->sendbufsize = userkctl->ctl_sendsize;
1787 }
1788 if (userkctl->ctl_recvsize == 0) {
1789 kctl->recvbufsize = CTL_RECVSIZE;
1790 userkctl->ctl_recvsize = kctl->recvbufsize;
1791 } else {
1792 kctl->recvbufsize = userkctl->ctl_recvsize;
1793 }
1794
1795 kctl->bind = userkctl->ctl_bind;
1796 kctl->connect = userkctl->ctl_connect;
1797 kctl->disconnect = userkctl->ctl_disconnect;
1798 kctl->send = userkctl->ctl_send;
1799 kctl->setopt = userkctl->ctl_setopt;
1800 kctl->getopt = userkctl->ctl_getopt;
1801 if (is_extended) {
1802 kctl->rcvd = userkctl->ctl_rcvd;
1803 kctl->send_list = userkctl->ctl_send_list;
1804 }
1805
1806 TAILQ_INIT(&kctl->kcb_head);
1807
1808 if (kctl_next) {
1809 TAILQ_INSERT_BEFORE(kctl_next, kctl, next);
1810 } else {
1811 TAILQ_INSERT_TAIL(&ctl_head, kctl, next);
1812 }
1813
1814 kctlstat.kcs_reg_count++;
1815 kctlstat.kcs_gencnt++;
1816
1817 lck_mtx_unlock(ctl_mtx);
1818
1819 *kctlref = kctl->kctlref;
1820
1821 ctl_post_msg(KEV_CTL_REGISTERED, kctl->id);
1822 return 0;
1823 }
1824
1825 errno_t
1826 ctl_deregister(void *kctlref)
1827 {
1828 struct kctl *kctl;
1829
1830 lck_mtx_lock(ctl_mtx);
1831 if ((kctl = kctl_from_ref(kctlref)) == NULL) {
1832 kctlstat.kcs_bad_kctlref++;
1833 lck_mtx_unlock(ctl_mtx);
1834 if (ctl_debug != 0) {
1835 printf("%s invalid kctlref %p\n",
1836 __func__, kctlref);
1837 }
1838 return EINVAL;
1839 }
1840
1841 if (!TAILQ_EMPTY(&kctl->kcb_head)) {
1842 lck_mtx_unlock(ctl_mtx);
1843 return EBUSY;
1844 }
1845
1846 TAILQ_REMOVE(&ctl_head, kctl, next);
1847
1848 kctlstat.kcs_reg_count--;
1849 kctlstat.kcs_gencnt++;
1850
1851 kctl_delete_ref(kctl->kctlref);
1852 lck_mtx_unlock(ctl_mtx);
1853
1854 ctl_post_msg(KEV_CTL_DEREGISTERED, kctl->id);
1855 FREE(kctl, M_TEMP);
1856 return 0;
1857 }
1858
1859 /*
1860 * Must be called with global ctl_mtx lock taked
1861 */
1862 static struct kctl *
1863 ctl_find_by_name(const char *name)
1864 {
1865 struct kctl *kctl;
1866
1867 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1868
1869 TAILQ_FOREACH(kctl, &ctl_head, next)
1870 if (strncmp(kctl->name, name, sizeof(kctl->name)) == 0) {
1871 return kctl;
1872 }
1873
1874 return NULL;
1875 }
1876
1877 u_int32_t
1878 ctl_id_by_name(const char *name)
1879 {
1880 u_int32_t ctl_id = 0;
1881 struct kctl *kctl;
1882
1883 lck_mtx_lock(ctl_mtx);
1884 kctl = ctl_find_by_name(name);
1885 if (kctl) {
1886 ctl_id = kctl->id;
1887 }
1888 lck_mtx_unlock(ctl_mtx);
1889
1890 return ctl_id;
1891 }
1892
1893 errno_t
1894 ctl_name_by_id(u_int32_t id, char *out_name, size_t maxsize)
1895 {
1896 int found = 0;
1897 struct kctl *kctl;
1898
1899 lck_mtx_lock(ctl_mtx);
1900 TAILQ_FOREACH(kctl, &ctl_head, next) {
1901 if (kctl->id == id) {
1902 break;
1903 }
1904 }
1905
1906 if (kctl) {
1907 if (maxsize > MAX_KCTL_NAME) {
1908 maxsize = MAX_KCTL_NAME;
1909 }
1910 strlcpy(out_name, kctl->name, maxsize);
1911 found = 1;
1912 }
1913 lck_mtx_unlock(ctl_mtx);
1914
1915 return found ? 0 : ENOENT;
1916 }
1917
1918 /*
1919 * Must be called with global ctl_mtx lock taked
1920 *
1921 */
1922 static struct kctl *
1923 ctl_find_by_id_unit(u_int32_t id, u_int32_t unit)
1924 {
1925 struct kctl *kctl;
1926
1927 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1928
1929 TAILQ_FOREACH(kctl, &ctl_head, next) {
1930 if (kctl->id == id && (kctl->flags & CTL_FLAG_REG_ID_UNIT) == 0) {
1931 return kctl;
1932 } else if (kctl->id == id && kctl->reg_unit == unit) {
1933 return kctl;
1934 }
1935 }
1936 return NULL;
1937 }
1938
1939 /*
1940 * Must be called with kernel controller lock taken
1941 */
1942 static struct ctl_cb *
1943 kcb_find(struct kctl *kctl, u_int32_t unit)
1944 {
1945 struct ctl_cb *kcb;
1946
1947 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1948
1949 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
1950 if (kcb->sac.sc_unit == unit) {
1951 return kcb;
1952 }
1953
1954 return NULL;
1955 }
1956
1957 static struct socket *
1958 kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *kctlflags)
1959 {
1960 struct socket *so = NULL;
1961 struct ctl_cb *kcb;
1962 void *lr_saved;
1963 struct kctl *kctl;
1964 int i;
1965
1966 lr_saved = __builtin_return_address(0);
1967
1968 lck_mtx_lock(ctl_mtx);
1969 /*
1970 * First validate the kctlref
1971 */
1972 if ((kctl = kctl_from_ref(kctlref)) == NULL) {
1973 kctlstat.kcs_bad_kctlref++;
1974 lck_mtx_unlock(ctl_mtx);
1975 if (ctl_debug != 0) {
1976 printf("%s invalid kctlref %p\n",
1977 __func__, kctlref);
1978 }
1979 return NULL;
1980 }
1981
1982 kcb = kcb_find(kctl, unit);
1983 if (kcb == NULL || kcb->kctl != kctl || (so = kcb->so) == NULL) {
1984 lck_mtx_unlock(ctl_mtx);
1985 return NULL;
1986 }
1987 /*
1988 * This prevents the socket from being closed
1989 */
1990 kcb->usecount++;
1991 /*
1992 * Respect lock ordering: socket before ctl_mtx
1993 */
1994 lck_mtx_unlock(ctl_mtx);
1995
1996 socket_lock(so, 1);
1997 /*
1998 * The socket lock history is more useful if we store
1999 * the address of the caller.
2000 */
2001 i = (so->next_lock_lr + SO_LCKDBG_MAX - 1) % SO_LCKDBG_MAX;
2002 so->lock_lr[i] = lr_saved;
2003
2004 lck_mtx_lock(ctl_mtx);
2005
2006 if ((kctl = kctl_from_ref(kctlref)) == NULL || kcb->kctl == NULL) {
2007 lck_mtx_unlock(ctl_mtx);
2008 socket_unlock(so, 1);
2009 so = NULL;
2010 lck_mtx_lock(ctl_mtx);
2011 } else if (kctlflags != NULL) {
2012 *kctlflags = kctl->flags;
2013 }
2014
2015 kcb->usecount--;
2016 if (kcb->usecount == 0) {
2017 wakeup((event_t)&kcb->usecount);
2018 }
2019
2020 lck_mtx_unlock(ctl_mtx);
2021
2022 return so;
2023 }
2024
2025 static void
2026 ctl_post_msg(u_int32_t event_code, u_int32_t id)
2027 {
2028 struct ctl_event_data ctl_ev_data;
2029 struct kev_msg ev_msg;
2030
2031 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_NOTOWNED);
2032
2033 bzero(&ev_msg, sizeof(struct kev_msg));
2034 ev_msg.vendor_code = KEV_VENDOR_APPLE;
2035
2036 ev_msg.kev_class = KEV_SYSTEM_CLASS;
2037 ev_msg.kev_subclass = KEV_CTL_SUBCLASS;
2038 ev_msg.event_code = event_code;
2039
2040 /* common nke subclass data */
2041 bzero(&ctl_ev_data, sizeof(ctl_ev_data));
2042 ctl_ev_data.ctl_id = id;
2043 ev_msg.dv[0].data_ptr = &ctl_ev_data;
2044 ev_msg.dv[0].data_length = sizeof(ctl_ev_data);
2045
2046 ev_msg.dv[1].data_length = 0;
2047
2048 kev_post_msg(&ev_msg);
2049 }
2050
2051 static int
2052 ctl_lock(struct socket *so, int refcount, void *lr)
2053 {
2054 void *lr_saved;
2055
2056 if (lr == NULL) {
2057 lr_saved = __builtin_return_address(0);
2058 } else {
2059 lr_saved = lr;
2060 }
2061
2062 if (so->so_pcb != NULL) {
2063 lck_mtx_lock(((struct ctl_cb *)so->so_pcb)->mtx);
2064 } else {
2065 panic("ctl_lock: so=%p NO PCB! lr=%p lrh= %s\n",
2066 so, lr_saved, solockhistory_nr(so));
2067 /* NOTREACHED */
2068 }
2069
2070 if (so->so_usecount < 0) {
2071 panic("ctl_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s\n",
2072 so, so->so_pcb, lr_saved, so->so_usecount,
2073 solockhistory_nr(so));
2074 /* NOTREACHED */
2075 }
2076
2077 if (refcount) {
2078 so->so_usecount++;
2079 }
2080
2081 so->lock_lr[so->next_lock_lr] = lr_saved;
2082 so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX;
2083 return 0;
2084 }
2085
2086 static int
2087 ctl_unlock(struct socket *so, int refcount, void *lr)
2088 {
2089 void *lr_saved;
2090 lck_mtx_t *mutex_held;
2091
2092 if (lr == NULL) {
2093 lr_saved = __builtin_return_address(0);
2094 } else {
2095 lr_saved = lr;
2096 }
2097
2098 #if (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG))
2099 printf("ctl_unlock: so=%llx sopcb=%x lock=%llx ref=%u lr=%llx\n",
2100 (uint64_t)VM_KERNEL_ADDRPERM(so),
2101 (uint64_t)VM_KERNEL_ADDRPERM(so->so_pcb,
2102 (uint64_t)VM_KERNEL_ADDRPERM(((struct ctl_cb *)so->so_pcb)->mtx),
2103 so->so_usecount, (uint64_t)VM_KERNEL_ADDRPERM(lr_saved));
2104 #endif /* (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG)) */
2105 if (refcount) {
2106 so->so_usecount--;
2107 }
2108
2109 if (so->so_usecount < 0) {
2110 panic("ctl_unlock: so=%p usecount=%x lrh= %s\n",
2111 so, so->so_usecount, solockhistory_nr(so));
2112 /* NOTREACHED */
2113 }
2114 if (so->so_pcb == NULL) {
2115 panic("ctl_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s\n",
2116 so, so->so_usecount, (void *)lr_saved,
2117 solockhistory_nr(so));
2118 /* NOTREACHED */
2119 }
2120 mutex_held = ((struct ctl_cb *)so->so_pcb)->mtx;
2121
2122 lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
2123 so->unlock_lr[so->next_unlock_lr] = lr_saved;
2124 so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
2125 lck_mtx_unlock(mutex_held);
2126
2127 if (so->so_usecount == 0) {
2128 ctl_sofreelastref(so);
2129 }
2130
2131 return 0;
2132 }
2133
2134 static lck_mtx_t *
2135 ctl_getlock(struct socket *so, int flags)
2136 {
2137 #pragma unused(flags)
2138 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
2139
2140 if (so->so_pcb) {
2141 if (so->so_usecount < 0) {
2142 panic("ctl_getlock: so=%p usecount=%x lrh= %s\n",
2143 so, so->so_usecount, solockhistory_nr(so));
2144 }
2145 return kcb->mtx;
2146 } else {
2147 panic("ctl_getlock: so=%p NULL NO so_pcb %s\n",
2148 so, solockhistory_nr(so));
2149 return so->so_proto->pr_domain->dom_mtx;
2150 }
2151 }
2152
2153 __private_extern__ int
2154 kctl_reg_list SYSCTL_HANDLER_ARGS
2155 {
2156 #pragma unused(oidp, arg1, arg2)
2157 int error = 0;
2158 int n, i;
2159 struct xsystmgen xsg;
2160 void *buf = NULL;
2161 struct kctl *kctl;
2162 size_t item_size = ROUNDUP64(sizeof(struct xkctl_reg));
2163
2164 buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO);
2165 if (buf == NULL) {
2166 return ENOMEM;
2167 }
2168
2169 lck_mtx_lock(ctl_mtx);
2170
2171 n = kctlstat.kcs_reg_count;
2172
2173 if (req->oldptr == USER_ADDR_NULL) {
2174 req->oldidx = (n + n / 8) * sizeof(struct xkctl_reg);
2175 goto done;
2176 }
2177 if (req->newptr != USER_ADDR_NULL) {
2178 error = EPERM;
2179 goto done;
2180 }
2181 bzero(&xsg, sizeof(xsg));
2182 xsg.xg_len = sizeof(xsg);
2183 xsg.xg_count = n;
2184 xsg.xg_gen = kctlstat.kcs_gencnt;
2185 xsg.xg_sogen = so_gencnt;
2186 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2187 if (error) {
2188 goto done;
2189 }
2190 /*
2191 * We are done if there is no pcb
2192 */
2193 if (n == 0) {
2194 goto done;
2195 }
2196
2197 i = 0;
2198 for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
2199 i < n && kctl != NULL;
2200 i++, kctl = TAILQ_NEXT(kctl, next)) {
2201 struct xkctl_reg *xkr = (struct xkctl_reg *)buf;
2202 struct ctl_cb *kcb;
2203 u_int32_t pcbcount = 0;
2204
2205 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
2206 pcbcount++;
2207
2208 bzero(buf, item_size);
2209
2210 xkr->xkr_len = sizeof(struct xkctl_reg);
2211 xkr->xkr_kind = XSO_KCREG;
2212 xkr->xkr_id = kctl->id;
2213 xkr->xkr_reg_unit = kctl->reg_unit;
2214 xkr->xkr_flags = kctl->flags;
2215 xkr->xkr_kctlref = (uint64_t)(kctl->kctlref);
2216 xkr->xkr_recvbufsize = kctl->recvbufsize;
2217 xkr->xkr_sendbufsize = kctl->sendbufsize;
2218 xkr->xkr_lastunit = kctl->lastunit;
2219 xkr->xkr_pcbcount = pcbcount;
2220 xkr->xkr_connect = (uint64_t)VM_KERNEL_UNSLIDE(kctl->connect);
2221 xkr->xkr_disconnect =
2222 (uint64_t)VM_KERNEL_UNSLIDE(kctl->disconnect);
2223 xkr->xkr_send = (uint64_t)VM_KERNEL_UNSLIDE(kctl->send);
2224 xkr->xkr_send_list =
2225 (uint64_t)VM_KERNEL_UNSLIDE(kctl->send_list);
2226 xkr->xkr_setopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->setopt);
2227 xkr->xkr_getopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->getopt);
2228 xkr->xkr_rcvd = (uint64_t)VM_KERNEL_UNSLIDE(kctl->rcvd);
2229 strlcpy(xkr->xkr_name, kctl->name, sizeof(xkr->xkr_name));
2230
2231 error = SYSCTL_OUT(req, buf, item_size);
2232 }
2233
2234 if (error == 0) {
2235 /*
2236 * Give the user an updated idea of our state.
2237 * If the generation differs from what we told
2238 * her before, she knows that something happened
2239 * while we were processing this request, and it
2240 * might be necessary to retry.
2241 */
2242 bzero(&xsg, sizeof(xsg));
2243 xsg.xg_len = sizeof(xsg);
2244 xsg.xg_count = n;
2245 xsg.xg_gen = kctlstat.kcs_gencnt;
2246 xsg.xg_sogen = so_gencnt;
2247 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2248 if (error) {
2249 goto done;
2250 }
2251 }
2252
2253 done:
2254 lck_mtx_unlock(ctl_mtx);
2255
2256 if (buf != NULL) {
2257 FREE(buf, M_TEMP);
2258 }
2259
2260 return error;
2261 }
2262
2263 __private_extern__ int
2264 kctl_pcblist SYSCTL_HANDLER_ARGS
2265 {
2266 #pragma unused(oidp, arg1, arg2)
2267 int error = 0;
2268 int n, i;
2269 struct xsystmgen xsg;
2270 void *buf = NULL;
2271 struct kctl *kctl;
2272 size_t item_size = ROUNDUP64(sizeof(struct xkctlpcb)) +
2273 ROUNDUP64(sizeof(struct xsocket_n)) +
2274 2 * ROUNDUP64(sizeof(struct xsockbuf_n)) +
2275 ROUNDUP64(sizeof(struct xsockstat_n));
2276
2277 buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO);
2278 if (buf == NULL) {
2279 return ENOMEM;
2280 }
2281
2282 lck_mtx_lock(ctl_mtx);
2283
2284 n = kctlstat.kcs_pcbcount;
2285
2286 if (req->oldptr == USER_ADDR_NULL) {
2287 req->oldidx = (n + n / 8) * item_size;
2288 goto done;
2289 }
2290 if (req->newptr != USER_ADDR_NULL) {
2291 error = EPERM;
2292 goto done;
2293 }
2294 bzero(&xsg, sizeof(xsg));
2295 xsg.xg_len = sizeof(xsg);
2296 xsg.xg_count = n;
2297 xsg.xg_gen = kctlstat.kcs_gencnt;
2298 xsg.xg_sogen = so_gencnt;
2299 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2300 if (error) {
2301 goto done;
2302 }
2303 /*
2304 * We are done if there is no pcb
2305 */
2306 if (n == 0) {
2307 goto done;
2308 }
2309
2310 i = 0;
2311 for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
2312 i < n && kctl != NULL;
2313 kctl = TAILQ_NEXT(kctl, next)) {
2314 struct ctl_cb *kcb;
2315
2316 for (kcb = TAILQ_FIRST(&kctl->kcb_head);
2317 i < n && kcb != NULL;
2318 i++, kcb = TAILQ_NEXT(kcb, next)) {
2319 struct xkctlpcb *xk = (struct xkctlpcb *)buf;
2320 struct xsocket_n *xso = (struct xsocket_n *)
2321 ADVANCE64(xk, sizeof(*xk));
2322 struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *)
2323 ADVANCE64(xso, sizeof(*xso));
2324 struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *)
2325 ADVANCE64(xsbrcv, sizeof(*xsbrcv));
2326 struct xsockstat_n *xsostats = (struct xsockstat_n *)
2327 ADVANCE64(xsbsnd, sizeof(*xsbsnd));
2328
2329 bzero(buf, item_size);
2330
2331 xk->xkp_len = sizeof(struct xkctlpcb);
2332 xk->xkp_kind = XSO_KCB;
2333 xk->xkp_unit = kcb->sac.sc_unit;
2334 xk->xkp_kctpcb = (uint64_t)VM_KERNEL_ADDRPERM(kcb);
2335 xk->xkp_kctlref = (uint64_t)VM_KERNEL_ADDRPERM(kctl);
2336 xk->xkp_kctlid = kctl->id;
2337 strlcpy(xk->xkp_kctlname, kctl->name,
2338 sizeof(xk->xkp_kctlname));
2339
2340 sotoxsocket_n(kcb->so, xso);
2341 sbtoxsockbuf_n(kcb->so ?
2342 &kcb->so->so_rcv : NULL, xsbrcv);
2343 sbtoxsockbuf_n(kcb->so ?
2344 &kcb->so->so_snd : NULL, xsbsnd);
2345 sbtoxsockstat_n(kcb->so, xsostats);
2346
2347 error = SYSCTL_OUT(req, buf, item_size);
2348 }
2349 }
2350
2351 if (error == 0) {
2352 /*
2353 * Give the user an updated idea of our state.
2354 * If the generation differs from what we told
2355 * her before, she knows that something happened
2356 * while we were processing this request, and it
2357 * might be necessary to retry.
2358 */
2359 bzero(&xsg, sizeof(xsg));
2360 xsg.xg_len = sizeof(xsg);
2361 xsg.xg_count = n;
2362 xsg.xg_gen = kctlstat.kcs_gencnt;
2363 xsg.xg_sogen = so_gencnt;
2364 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2365 if (error) {
2366 goto done;
2367 }
2368 }
2369
2370 done:
2371 lck_mtx_unlock(ctl_mtx);
2372
2373 return error;
2374 }
2375
2376 int
2377 kctl_getstat SYSCTL_HANDLER_ARGS
2378 {
2379 #pragma unused(oidp, arg1, arg2)
2380 int error = 0;
2381
2382 lck_mtx_lock(ctl_mtx);
2383
2384 if (req->newptr != USER_ADDR_NULL) {
2385 error = EPERM;
2386 goto done;
2387 }
2388 if (req->oldptr == USER_ADDR_NULL) {
2389 req->oldidx = sizeof(struct kctlstat);
2390 goto done;
2391 }
2392
2393 error = SYSCTL_OUT(req, &kctlstat,
2394 MIN(sizeof(struct kctlstat), req->oldlen));
2395 done:
2396 lck_mtx_unlock(ctl_mtx);
2397 return error;
2398 }
2399
2400 void
2401 kctl_fill_socketinfo(struct socket *so, struct socket_info *si)
2402 {
2403 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
2404 struct kern_ctl_info *kcsi =
2405 &si->soi_proto.pri_kern_ctl;
2406 struct kctl *kctl = kcb->kctl;
2407
2408 si->soi_kind = SOCKINFO_KERN_CTL;
2409
2410 if (kctl == 0) {
2411 return;
2412 }
2413
2414 kcsi->kcsi_id = kctl->id;
2415 kcsi->kcsi_reg_unit = kctl->reg_unit;
2416 kcsi->kcsi_flags = kctl->flags;
2417 kcsi->kcsi_recvbufsize = kctl->recvbufsize;
2418 kcsi->kcsi_sendbufsize = kctl->sendbufsize;
2419 kcsi->kcsi_unit = kcb->sac.sc_unit;
2420 strlcpy(kcsi->kcsi_name, kctl->name, MAX_KCTL_NAME);
2421 }