]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_control.c
xnu-6153.101.6.tar.gz
[apple/xnu.git] / bsd / kern / kern_control.c
1 /*
2 * Copyright (c) 1999-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Kernel Control domain - allows control connections to
31 * and to read/write data.
32 *
33 * Vincent Lubet, 040506
34 * Christophe Allie, 010928
35 * Justin C. Walker, 990319
36 */
37
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/syslog.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/protosw.h>
45 #include <sys/domain.h>
46 #include <sys/malloc.h>
47 #include <sys/mbuf.h>
48 #include <sys/sys_domain.h>
49 #include <sys/kern_event.h>
50 #include <sys/kern_control.h>
51 #include <sys/kauth.h>
52 #include <sys/sysctl.h>
53 #include <sys/proc_info.h>
54 #include <net/if_var.h>
55
56 #include <mach/vm_types.h>
57
58 #include <kern/thread.h>
59
60 struct kctl {
61 TAILQ_ENTRY(kctl) next; /* controller chain */
62 kern_ctl_ref kctlref;
63
64 /* controller information provided when registering */
65 char name[MAX_KCTL_NAME]; /* unique identifier */
66 u_int32_t id;
67 u_int32_t reg_unit;
68
69 /* misc communication information */
70 u_int32_t flags; /* support flags */
71 u_int32_t recvbufsize; /* request more than the default buffer size */
72 u_int32_t sendbufsize; /* request more than the default buffer size */
73
74 /* Dispatch functions */
75 ctl_bind_func bind; /* Prepare contact */
76 ctl_connect_func connect; /* Make contact */
77 ctl_disconnect_func disconnect; /* Break contact */
78 ctl_send_func send; /* Send data to nke */
79 ctl_send_list_func send_list; /* Send list of packets */
80 ctl_setopt_func setopt; /* set kctl configuration */
81 ctl_getopt_func getopt; /* get kctl configuration */
82 ctl_rcvd_func rcvd; /* Notify nke when client reads data */
83
84 TAILQ_HEAD(, ctl_cb) kcb_head;
85 u_int32_t lastunit;
86 };
87
88 #if DEVELOPMENT || DEBUG
89 enum ctl_status {
90 KCTL_DISCONNECTED = 0,
91 KCTL_CONNECTING = 1,
92 KCTL_CONNECTED = 2
93 };
94 #endif /* DEVELOPMENT || DEBUG */
95
96 struct ctl_cb {
97 TAILQ_ENTRY(ctl_cb) next; /* controller chain */
98 lck_mtx_t *mtx;
99 struct socket *so; /* controlling socket */
100 struct kctl *kctl; /* back pointer to controller */
101 void *userdata;
102 struct sockaddr_ctl sac;
103 u_int32_t usecount;
104 u_int32_t kcb_usecount;
105 u_int32_t require_clearing_count;
106 #if DEVELOPMENT || DEBUG
107 enum ctl_status status;
108 #endif /* DEVELOPMENT || DEBUG */
109 };
110
111 #ifndef ROUNDUP64
112 #define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
113 #endif
114
115 #ifndef ADVANCE64
116 #define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
117 #endif
118
119 /*
120 * Definitions and vars for we support
121 */
122
123 #define CTL_SENDSIZE (2 * 1024) /* default buffer size */
124 #define CTL_RECVSIZE (8 * 1024) /* default buffer size */
125
126 /*
127 * Definitions and vars for we support
128 */
129
130 static u_int32_t ctl_maxunit = 65536;
131 static lck_grp_attr_t *ctl_lck_grp_attr = 0;
132 static lck_attr_t *ctl_lck_attr = 0;
133 static lck_grp_t *ctl_lck_grp = 0;
134 static lck_mtx_t *ctl_mtx;
135
136 /* all the controllers are chained */
137 TAILQ_HEAD(kctl_list, kctl) ctl_head;
138
139 static int ctl_attach(struct socket *, int, struct proc *);
140 static int ctl_detach(struct socket *);
141 static int ctl_sofreelastref(struct socket *so);
142 static int ctl_bind(struct socket *, struct sockaddr *, struct proc *);
143 static int ctl_connect(struct socket *, struct sockaddr *, struct proc *);
144 static int ctl_disconnect(struct socket *);
145 static int ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
146 struct ifnet *ifp, struct proc *p);
147 static int ctl_send(struct socket *, int, struct mbuf *,
148 struct sockaddr *, struct mbuf *, struct proc *);
149 static int ctl_send_list(struct socket *, int, struct mbuf *,
150 struct sockaddr *, struct mbuf *, struct proc *);
151 static int ctl_ctloutput(struct socket *, struct sockopt *);
152 static int ctl_peeraddr(struct socket *so, struct sockaddr **nam);
153 static int ctl_usr_rcvd(struct socket *so, int flags);
154
155 static struct kctl *ctl_find_by_name(const char *);
156 static struct kctl *ctl_find_by_id_unit(u_int32_t id, u_int32_t unit);
157
158 static struct socket *kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit,
159 u_int32_t *);
160 static struct ctl_cb *kcb_find(struct kctl *, u_int32_t unit);
161 static void ctl_post_msg(u_int32_t event_code, u_int32_t id);
162
163 static int ctl_lock(struct socket *, int, void *);
164 static int ctl_unlock(struct socket *, int, void *);
165 static lck_mtx_t * ctl_getlock(struct socket *, int);
166
167 static struct pr_usrreqs ctl_usrreqs = {
168 .pru_attach = ctl_attach,
169 .pru_bind = ctl_bind,
170 .pru_connect = ctl_connect,
171 .pru_control = ctl_ioctl,
172 .pru_detach = ctl_detach,
173 .pru_disconnect = ctl_disconnect,
174 .pru_peeraddr = ctl_peeraddr,
175 .pru_rcvd = ctl_usr_rcvd,
176 .pru_send = ctl_send,
177 .pru_send_list = ctl_send_list,
178 .pru_sosend = sosend,
179 .pru_sosend_list = sosend_list,
180 .pru_soreceive = soreceive,
181 .pru_soreceive_list = soreceive_list,
182 };
183
184 static struct protosw kctlsw[] = {
185 {
186 .pr_type = SOCK_DGRAM,
187 .pr_protocol = SYSPROTO_CONTROL,
188 .pr_flags = PR_ATOMIC | PR_CONNREQUIRED | PR_PCBLOCK | PR_WANTRCVD,
189 .pr_ctloutput = ctl_ctloutput,
190 .pr_usrreqs = &ctl_usrreqs,
191 .pr_lock = ctl_lock,
192 .pr_unlock = ctl_unlock,
193 .pr_getlock = ctl_getlock,
194 },
195 {
196 .pr_type = SOCK_STREAM,
197 .pr_protocol = SYSPROTO_CONTROL,
198 .pr_flags = PR_CONNREQUIRED | PR_PCBLOCK | PR_WANTRCVD,
199 .pr_ctloutput = ctl_ctloutput,
200 .pr_usrreqs = &ctl_usrreqs,
201 .pr_lock = ctl_lock,
202 .pr_unlock = ctl_unlock,
203 .pr_getlock = ctl_getlock,
204 }
205 };
206
207 __private_extern__ int kctl_reg_list SYSCTL_HANDLER_ARGS;
208 __private_extern__ int kctl_pcblist SYSCTL_HANDLER_ARGS;
209 __private_extern__ int kctl_getstat SYSCTL_HANDLER_ARGS;
210
211
212 SYSCTL_NODE(_net_systm, OID_AUTO, kctl,
213 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Kernel control family");
214
215 struct kctlstat kctlstat;
216 SYSCTL_PROC(_net_systm_kctl, OID_AUTO, stats,
217 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
218 kctl_getstat, "S,kctlstat", "");
219
220 SYSCTL_PROC(_net_systm_kctl, OID_AUTO, reg_list,
221 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
222 kctl_reg_list, "S,xkctl_reg", "");
223
224 SYSCTL_PROC(_net_systm_kctl, OID_AUTO, pcblist,
225 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
226 kctl_pcblist, "S,xkctlpcb", "");
227
228 u_int32_t ctl_autorcvbuf_max = 256 * 1024;
229 SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufmax,
230 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_autorcvbuf_max, 0, "");
231
232 u_int32_t ctl_autorcvbuf_high = 0;
233 SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufhigh,
234 CTLFLAG_RD | CTLFLAG_LOCKED, &ctl_autorcvbuf_high, 0, "");
235
236 u_int32_t ctl_debug = 0;
237 SYSCTL_INT(_net_systm_kctl, OID_AUTO, debug,
238 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_debug, 0, "");
239
240 #if DEVELOPMENT || DEBUG
241 u_int32_t ctl_panic_debug = 0;
242 SYSCTL_INT(_net_systm_kctl, OID_AUTO, panicdebug,
243 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_panic_debug, 0, "");
244 #endif /* DEVELOPMENT || DEBUG */
245
246 #define KCTL_TBL_INC 16
247
248 static uintptr_t kctl_tbl_size = 0;
249 static u_int32_t kctl_tbl_growing = 0;
250 static u_int32_t kctl_tbl_growing_waiting = 0;
251 static uintptr_t kctl_tbl_count = 0;
252 static struct kctl **kctl_table = NULL;
253 static uintptr_t kctl_ref_gencnt = 0;
254
255 static void kctl_tbl_grow(void);
256 static kern_ctl_ref kctl_make_ref(struct kctl *kctl);
257 static void kctl_delete_ref(kern_ctl_ref);
258 static struct kctl *kctl_from_ref(kern_ctl_ref);
259
260 /*
261 * Install the protosw's for the Kernel Control manager.
262 */
263 __private_extern__ void
264 kern_control_init(struct domain *dp)
265 {
266 struct protosw *pr;
267 int i;
268 int kctl_proto_count = (sizeof(kctlsw) / sizeof(struct protosw));
269
270 VERIFY(!(dp->dom_flags & DOM_INITIALIZED));
271 VERIFY(dp == systemdomain);
272
273 ctl_lck_grp_attr = lck_grp_attr_alloc_init();
274 if (ctl_lck_grp_attr == NULL) {
275 panic("%s: lck_grp_attr_alloc_init failed\n", __func__);
276 /* NOTREACHED */
277 }
278
279 ctl_lck_grp = lck_grp_alloc_init("Kernel Control Protocol",
280 ctl_lck_grp_attr);
281 if (ctl_lck_grp == NULL) {
282 panic("%s: lck_grp_alloc_init failed\n", __func__);
283 /* NOTREACHED */
284 }
285
286 ctl_lck_attr = lck_attr_alloc_init();
287 if (ctl_lck_attr == NULL) {
288 panic("%s: lck_attr_alloc_init failed\n", __func__);
289 /* NOTREACHED */
290 }
291
292 ctl_mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
293 if (ctl_mtx == NULL) {
294 panic("%s: lck_mtx_alloc_init failed\n", __func__);
295 /* NOTREACHED */
296 }
297 TAILQ_INIT(&ctl_head);
298
299 for (i = 0, pr = &kctlsw[0]; i < kctl_proto_count; i++, pr++) {
300 net_add_proto(pr, dp, 1);
301 }
302 }
303
304 static void
305 kcb_delete(struct ctl_cb *kcb)
306 {
307 if (kcb != 0) {
308 if (kcb->mtx != 0) {
309 lck_mtx_free(kcb->mtx, ctl_lck_grp);
310 }
311 FREE(kcb, M_TEMP);
312 }
313 }
314
315 /*
316 * Kernel Controller user-request functions
317 * attach function must exist and succeed
318 * detach not necessary
319 * we need a pcb for the per socket mutex
320 */
321 static int
322 ctl_attach(struct socket *so, int proto, struct proc *p)
323 {
324 #pragma unused(proto, p)
325 int error = 0;
326 struct ctl_cb *kcb = 0;
327
328 MALLOC(kcb, struct ctl_cb *, sizeof(struct ctl_cb), M_TEMP, M_WAITOK);
329 if (kcb == NULL) {
330 error = ENOMEM;
331 goto quit;
332 }
333 bzero(kcb, sizeof(struct ctl_cb));
334
335 kcb->mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
336 if (kcb->mtx == NULL) {
337 error = ENOMEM;
338 goto quit;
339 }
340 kcb->so = so;
341 so->so_pcb = (caddr_t)kcb;
342
343 quit:
344 if (error != 0) {
345 kcb_delete(kcb);
346 kcb = 0;
347 }
348 return error;
349 }
350
351 static int
352 ctl_sofreelastref(struct socket *so)
353 {
354 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
355
356 so->so_pcb = 0;
357
358 if (kcb != 0) {
359 struct kctl *kctl;
360 if ((kctl = kcb->kctl) != 0) {
361 lck_mtx_lock(ctl_mtx);
362 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
363 kctlstat.kcs_pcbcount--;
364 kctlstat.kcs_gencnt++;
365 lck_mtx_unlock(ctl_mtx);
366 }
367 kcb_delete(kcb);
368 }
369 sofreelastref(so, 1);
370 return 0;
371 }
372
373 /*
374 * Use this function and ctl_kcb_require_clearing to serialize
375 * critical calls into the kctl subsystem
376 */
377 static void
378 ctl_kcb_increment_use_count(struct ctl_cb *kcb, lck_mtx_t *mutex_held)
379 {
380 LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
381 while (kcb->require_clearing_count > 0) {
382 msleep(&kcb->require_clearing_count, mutex_held, PSOCK | PCATCH, "kcb_require_clearing", NULL);
383 }
384 kcb->kcb_usecount++;
385 }
386
387 static void
388 ctl_kcb_require_clearing(struct ctl_cb *kcb, lck_mtx_t *mutex_held)
389 {
390 assert(kcb->kcb_usecount != 0);
391 kcb->require_clearing_count++;
392 kcb->kcb_usecount--;
393 while (kcb->kcb_usecount > 0) { // we need to wait until no one else is running
394 msleep(&kcb->kcb_usecount, mutex_held, PSOCK | PCATCH, "kcb_usecount", NULL);
395 }
396 kcb->kcb_usecount++;
397 }
398
399 static void
400 ctl_kcb_done_clearing(struct ctl_cb *kcb)
401 {
402 assert(kcb->require_clearing_count != 0);
403 kcb->require_clearing_count--;
404 wakeup((caddr_t)&kcb->require_clearing_count);
405 }
406
407 static void
408 ctl_kcb_decrement_use_count(struct ctl_cb *kcb)
409 {
410 assert(kcb->kcb_usecount != 0);
411 kcb->kcb_usecount--;
412 wakeup((caddr_t)&kcb->kcb_usecount);
413 }
414
415 static int
416 ctl_detach(struct socket *so)
417 {
418 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
419
420 if (kcb == 0) {
421 return 0;
422 }
423
424 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
425 ctl_kcb_increment_use_count(kcb, mtx_held);
426 ctl_kcb_require_clearing(kcb, mtx_held);
427
428 if (kcb->kctl != NULL && kcb->kctl->bind != NULL &&
429 kcb->userdata != NULL && !(so->so_state & SS_ISCONNECTED)) {
430 // The unit was bound, but not connected
431 // Invoke the disconnected call to cleanup
432 if (kcb->kctl->disconnect != NULL) {
433 socket_unlock(so, 0);
434 (*kcb->kctl->disconnect)(kcb->kctl->kctlref,
435 kcb->sac.sc_unit, kcb->userdata);
436 socket_lock(so, 0);
437 }
438 }
439
440 soisdisconnected(so);
441 #if DEVELOPMENT || DEBUG
442 kcb->status = KCTL_DISCONNECTED;
443 #endif /* DEVELOPMENT || DEBUG */
444 so->so_flags |= SOF_PCBCLEARING;
445 ctl_kcb_done_clearing(kcb);
446 ctl_kcb_decrement_use_count(kcb);
447 return 0;
448 }
449
450 static int
451 ctl_setup_kctl(struct socket *so, struct sockaddr *nam, struct proc *p)
452 {
453 struct kctl *kctl = NULL;
454 int error = 0;
455 struct sockaddr_ctl sa;
456 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
457 struct ctl_cb *kcb_next = NULL;
458 u_quad_t sbmaxsize;
459 u_int32_t recvbufsize, sendbufsize;
460
461 if (kcb == 0) {
462 panic("ctl_setup_kctl so_pcb null\n");
463 }
464
465 if (kcb->kctl != NULL) {
466 // Already set up, skip
467 return 0;
468 }
469
470 if (nam->sa_len != sizeof(struct sockaddr_ctl)) {
471 return EINVAL;
472 }
473
474 bcopy(nam, &sa, sizeof(struct sockaddr_ctl));
475
476 lck_mtx_lock(ctl_mtx);
477 kctl = ctl_find_by_id_unit(sa.sc_id, sa.sc_unit);
478 if (kctl == NULL) {
479 lck_mtx_unlock(ctl_mtx);
480 return ENOENT;
481 }
482
483 if (((kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
484 (so->so_type != SOCK_STREAM)) ||
485 (!(kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
486 (so->so_type != SOCK_DGRAM))) {
487 lck_mtx_unlock(ctl_mtx);
488 return EPROTOTYPE;
489 }
490
491 if (kctl->flags & CTL_FLAG_PRIVILEGED) {
492 if (p == 0) {
493 lck_mtx_unlock(ctl_mtx);
494 return EINVAL;
495 }
496 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
497 lck_mtx_unlock(ctl_mtx);
498 return EPERM;
499 }
500 }
501
502 if ((kctl->flags & CTL_FLAG_REG_ID_UNIT) || sa.sc_unit != 0) {
503 if (kcb_find(kctl, sa.sc_unit) != NULL) {
504 lck_mtx_unlock(ctl_mtx);
505 return EBUSY;
506 }
507 } else {
508 /* Find an unused ID, assumes control IDs are in order */
509 u_int32_t unit = 1;
510
511 TAILQ_FOREACH(kcb_next, &kctl->kcb_head, next) {
512 if (kcb_next->sac.sc_unit > unit) {
513 /* Found a gap, lets fill it in */
514 break;
515 }
516 unit = kcb_next->sac.sc_unit + 1;
517 if (unit == ctl_maxunit) {
518 break;
519 }
520 }
521
522 if (unit == ctl_maxunit) {
523 lck_mtx_unlock(ctl_mtx);
524 return EBUSY;
525 }
526
527 sa.sc_unit = unit;
528 }
529
530 bcopy(&sa, &kcb->sac, sizeof(struct sockaddr_ctl));
531 kcb->kctl = kctl;
532 if (kcb_next != NULL) {
533 TAILQ_INSERT_BEFORE(kcb_next, kcb, next);
534 } else {
535 TAILQ_INSERT_TAIL(&kctl->kcb_head, kcb, next);
536 }
537 kctlstat.kcs_pcbcount++;
538 kctlstat.kcs_gencnt++;
539 kctlstat.kcs_connections++;
540 lck_mtx_unlock(ctl_mtx);
541
542 /*
543 * rdar://15526688: Limit the send and receive sizes to sb_max
544 * by using the same scaling as sbreserve()
545 */
546 sbmaxsize = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES);
547
548 if (kctl->sendbufsize > sbmaxsize) {
549 sendbufsize = sbmaxsize;
550 } else {
551 sendbufsize = kctl->sendbufsize;
552 }
553
554 if (kctl->recvbufsize > sbmaxsize) {
555 recvbufsize = sbmaxsize;
556 } else {
557 recvbufsize = kctl->recvbufsize;
558 }
559
560 error = soreserve(so, sendbufsize, recvbufsize);
561 if (error) {
562 if (ctl_debug) {
563 printf("%s - soreserve(%llx, %u, %u) error %d\n",
564 __func__, (uint64_t)VM_KERNEL_ADDRPERM(so),
565 sendbufsize, recvbufsize, error);
566 }
567 goto done;
568 }
569
570 done:
571 if (error) {
572 soisdisconnected(so);
573 #if DEVELOPMENT || DEBUG
574 kcb->status = KCTL_DISCONNECTED;
575 #endif /* DEVELOPMENT || DEBUG */
576 lck_mtx_lock(ctl_mtx);
577 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
578 kcb->kctl = NULL;
579 kcb->sac.sc_unit = 0;
580 kctlstat.kcs_pcbcount--;
581 kctlstat.kcs_gencnt++;
582 kctlstat.kcs_conn_fail++;
583 lck_mtx_unlock(ctl_mtx);
584 }
585 return error;
586 }
587
588 static int
589 ctl_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
590 {
591 int error = 0;
592 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
593
594 if (kcb == NULL) {
595 panic("ctl_bind so_pcb null\n");
596 }
597
598 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
599 ctl_kcb_increment_use_count(kcb, mtx_held);
600 ctl_kcb_require_clearing(kcb, mtx_held);
601
602 error = ctl_setup_kctl(so, nam, p);
603 if (error) {
604 goto out;
605 }
606
607 if (kcb->kctl == NULL) {
608 panic("ctl_bind kctl null\n");
609 }
610
611 if (kcb->kctl->bind == NULL) {
612 error = EINVAL;
613 goto out;
614 }
615
616 socket_unlock(so, 0);
617 error = (*kcb->kctl->bind)(kcb->kctl->kctlref, &kcb->sac, &kcb->userdata);
618 socket_lock(so, 0);
619
620 out:
621 ctl_kcb_done_clearing(kcb);
622 ctl_kcb_decrement_use_count(kcb);
623 return error;
624 }
625
626 static int
627 ctl_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
628 {
629 int error = 0;
630 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
631
632 if (kcb == NULL) {
633 panic("ctl_connect so_pcb null\n");
634 }
635
636 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
637 ctl_kcb_increment_use_count(kcb, mtx_held);
638 ctl_kcb_require_clearing(kcb, mtx_held);
639
640 #if DEVELOPMENT || DEBUG
641 if (kcb->status != KCTL_DISCONNECTED && ctl_panic_debug) {
642 panic("kctl already connecting/connected");
643 }
644 kcb->status = KCTL_CONNECTING;
645 #endif /* DEVELOPMENT || DEBUG */
646
647 error = ctl_setup_kctl(so, nam, p);
648 if (error) {
649 goto out;
650 }
651
652 if (kcb->kctl == NULL) {
653 panic("ctl_connect kctl null\n");
654 }
655
656 soisconnecting(so);
657 socket_unlock(so, 0);
658 error = (*kcb->kctl->connect)(kcb->kctl->kctlref, &kcb->sac, &kcb->userdata);
659 socket_lock(so, 0);
660 if (error) {
661 goto end;
662 }
663 soisconnected(so);
664 #if DEVELOPMENT || DEBUG
665 kcb->status = KCTL_CONNECTED;
666 #endif /* DEVELOPMENT || DEBUG */
667
668 end:
669 if (error && kcb->kctl->disconnect) {
670 /*
671 * XXX Make sure we Don't check the return value
672 * of disconnect here.
673 * ipsec/utun_ctl_disconnect will return error when
674 * disconnect gets called after connect failure.
675 * However if we decide to check for disconnect return
676 * value here. Please make sure to revisit
677 * ipsec/utun_ctl_disconnect.
678 */
679 socket_unlock(so, 0);
680 (*kcb->kctl->disconnect)(kcb->kctl->kctlref, kcb->sac.sc_unit, kcb->userdata);
681 socket_lock(so, 0);
682 }
683 if (error) {
684 soisdisconnected(so);
685 #if DEVELOPMENT || DEBUG
686 kcb->status = KCTL_DISCONNECTED;
687 #endif /* DEVELOPMENT || DEBUG */
688 lck_mtx_lock(ctl_mtx);
689 TAILQ_REMOVE(&kcb->kctl->kcb_head, kcb, next);
690 kcb->kctl = NULL;
691 kcb->sac.sc_unit = 0;
692 kctlstat.kcs_pcbcount--;
693 kctlstat.kcs_gencnt++;
694 kctlstat.kcs_conn_fail++;
695 lck_mtx_unlock(ctl_mtx);
696 }
697 out:
698 ctl_kcb_done_clearing(kcb);
699 ctl_kcb_decrement_use_count(kcb);
700 return error;
701 }
702
703 static int
704 ctl_disconnect(struct socket *so)
705 {
706 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
707
708 if ((kcb = (struct ctl_cb *)so->so_pcb)) {
709 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
710 ctl_kcb_increment_use_count(kcb, mtx_held);
711 ctl_kcb_require_clearing(kcb, mtx_held);
712 struct kctl *kctl = kcb->kctl;
713
714 if (kctl && kctl->disconnect) {
715 socket_unlock(so, 0);
716 (*kctl->disconnect)(kctl->kctlref, kcb->sac.sc_unit,
717 kcb->userdata);
718 socket_lock(so, 0);
719 }
720
721 soisdisconnected(so);
722 #if DEVELOPMENT || DEBUG
723 kcb->status = KCTL_DISCONNECTED;
724 #endif /* DEVELOPMENT || DEBUG */
725
726 socket_unlock(so, 0);
727 lck_mtx_lock(ctl_mtx);
728 kcb->kctl = 0;
729 kcb->sac.sc_unit = 0;
730 while (kcb->usecount != 0) {
731 msleep(&kcb->usecount, ctl_mtx, 0, "kcb->usecount", 0);
732 }
733 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
734 kctlstat.kcs_pcbcount--;
735 kctlstat.kcs_gencnt++;
736 lck_mtx_unlock(ctl_mtx);
737 socket_lock(so, 0);
738 ctl_kcb_done_clearing(kcb);
739 ctl_kcb_decrement_use_count(kcb);
740 }
741 return 0;
742 }
743
744 static int
745 ctl_peeraddr(struct socket *so, struct sockaddr **nam)
746 {
747 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
748 struct kctl *kctl;
749 struct sockaddr_ctl sc;
750
751 if (kcb == NULL) { /* sanity check */
752 return ENOTCONN;
753 }
754
755 if ((kctl = kcb->kctl) == NULL) {
756 return EINVAL;
757 }
758
759 bzero(&sc, sizeof(struct sockaddr_ctl));
760 sc.sc_len = sizeof(struct sockaddr_ctl);
761 sc.sc_family = AF_SYSTEM;
762 sc.ss_sysaddr = AF_SYS_CONTROL;
763 sc.sc_id = kctl->id;
764 sc.sc_unit = kcb->sac.sc_unit;
765
766 *nam = dup_sockaddr((struct sockaddr *)&sc, 1);
767
768 return 0;
769 }
770
771 static void
772 ctl_sbrcv_trim(struct socket *so)
773 {
774 struct sockbuf *sb = &so->so_rcv;
775
776 if (sb->sb_hiwat > sb->sb_idealsize) {
777 u_int32_t diff;
778 int32_t trim;
779
780 /*
781 * The difference between the ideal size and the
782 * current size is the upper bound of the trimage
783 */
784 diff = sb->sb_hiwat - sb->sb_idealsize;
785 /*
786 * We cannot trim below the outstanding data
787 */
788 trim = sb->sb_hiwat - sb->sb_cc;
789
790 trim = imin(trim, (int32_t)diff);
791
792 if (trim > 0) {
793 sbreserve(sb, (sb->sb_hiwat - trim));
794
795 if (ctl_debug) {
796 printf("%s - shrunk to %d\n",
797 __func__, sb->sb_hiwat);
798 }
799 }
800 }
801 }
802
803 static int
804 ctl_usr_rcvd(struct socket *so, int flags)
805 {
806 int error = 0;
807 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
808 struct kctl *kctl;
809
810 if (kcb == NULL) {
811 return ENOTCONN;
812 }
813
814 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
815 ctl_kcb_increment_use_count(kcb, mtx_held);
816
817 if ((kctl = kcb->kctl) == NULL) {
818 error = EINVAL;
819 goto out;
820 }
821
822 if (kctl->rcvd) {
823 socket_unlock(so, 0);
824 (*kctl->rcvd)(kctl->kctlref, kcb->sac.sc_unit, kcb->userdata, flags);
825 socket_lock(so, 0);
826 }
827
828 ctl_sbrcv_trim(so);
829
830 out:
831 ctl_kcb_decrement_use_count(kcb);
832 return error;
833 }
834
835 static int
836 ctl_send(struct socket *so, int flags, struct mbuf *m,
837 struct sockaddr *addr, struct mbuf *control,
838 struct proc *p)
839 {
840 #pragma unused(addr, p)
841 int error = 0;
842 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
843 struct kctl *kctl;
844
845 if (control) {
846 m_freem(control);
847 }
848
849 if (kcb == NULL) { /* sanity check */
850 error = ENOTCONN;
851 }
852
853 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
854 ctl_kcb_increment_use_count(kcb, mtx_held);
855
856 if (error == 0 && (kctl = kcb->kctl) == NULL) {
857 error = EINVAL;
858 }
859
860 if (error == 0 && kctl->send) {
861 so_tc_update_stats(m, so, m_get_service_class(m));
862 socket_unlock(so, 0);
863 error = (*kctl->send)(kctl->kctlref, kcb->sac.sc_unit, kcb->userdata,
864 m, flags);
865 socket_lock(so, 0);
866 } else {
867 m_freem(m);
868 if (error == 0) {
869 error = ENOTSUP;
870 }
871 }
872 if (error != 0) {
873 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_fail);
874 }
875 ctl_kcb_decrement_use_count(kcb);
876
877 return error;
878 }
879
880 static int
881 ctl_send_list(struct socket *so, int flags, struct mbuf *m,
882 __unused struct sockaddr *addr, struct mbuf *control,
883 __unused struct proc *p)
884 {
885 int error = 0;
886 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
887 struct kctl *kctl;
888
889 if (control) {
890 m_freem_list(control);
891 }
892
893 if (kcb == NULL) { /* sanity check */
894 error = ENOTCONN;
895 }
896
897 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
898 ctl_kcb_increment_use_count(kcb, mtx_held);
899
900 if (error == 0 && (kctl = kcb->kctl) == NULL) {
901 error = EINVAL;
902 }
903
904 if (error == 0 && kctl->send_list) {
905 struct mbuf *nxt;
906
907 for (nxt = m; nxt != NULL; nxt = nxt->m_nextpkt) {
908 so_tc_update_stats(nxt, so, m_get_service_class(nxt));
909 }
910
911 socket_unlock(so, 0);
912 error = (*kctl->send_list)(kctl->kctlref, kcb->sac.sc_unit,
913 kcb->userdata, m, flags);
914 socket_lock(so, 0);
915 } else if (error == 0 && kctl->send) {
916 while (m != NULL && error == 0) {
917 struct mbuf *nextpkt = m->m_nextpkt;
918
919 m->m_nextpkt = NULL;
920 so_tc_update_stats(m, so, m_get_service_class(m));
921 socket_unlock(so, 0);
922 error = (*kctl->send)(kctl->kctlref, kcb->sac.sc_unit,
923 kcb->userdata, m, flags);
924 socket_lock(so, 0);
925 m = nextpkt;
926 }
927 if (m != NULL) {
928 m_freem_list(m);
929 }
930 } else {
931 m_freem_list(m);
932 if (error == 0) {
933 error = ENOTSUP;
934 }
935 }
936 if (error != 0) {
937 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_list_fail);
938 }
939 ctl_kcb_decrement_use_count(kcb);
940
941 return error;
942 }
943
944 static errno_t
945 ctl_rcvbspace(struct socket *so, u_int32_t datasize,
946 u_int32_t kctlflags, u_int32_t flags)
947 {
948 struct sockbuf *sb = &so->so_rcv;
949 u_int32_t space = sbspace(sb);
950 errno_t error;
951
952 if ((kctlflags & CTL_FLAG_REG_CRIT) == 0) {
953 if ((u_int32_t) space >= datasize) {
954 error = 0;
955 } else {
956 error = ENOBUFS;
957 }
958 } else if ((flags & CTL_DATA_CRIT) == 0) {
959 /*
960 * Reserve 25% for critical messages
961 */
962 if (space < (sb->sb_hiwat >> 2) ||
963 space < datasize) {
964 error = ENOBUFS;
965 } else {
966 error = 0;
967 }
968 } else {
969 u_int32_t autorcvbuf_max;
970
971 /*
972 * Allow overcommit of 25%
973 */
974 autorcvbuf_max = min(sb->sb_idealsize + (sb->sb_idealsize >> 2),
975 ctl_autorcvbuf_max);
976
977 if ((u_int32_t) space >= datasize) {
978 error = 0;
979 } else if (tcp_cansbgrow(sb) &&
980 sb->sb_hiwat < autorcvbuf_max) {
981 /*
982 * Grow with a little bit of leeway
983 */
984 u_int32_t grow = datasize - space + MSIZE;
985
986 if (sbreserve(sb,
987 min((sb->sb_hiwat + grow), autorcvbuf_max)) == 1) {
988 if (sb->sb_hiwat > ctl_autorcvbuf_high) {
989 ctl_autorcvbuf_high = sb->sb_hiwat;
990 }
991
992 /*
993 * A final check
994 */
995 if ((u_int32_t) sbspace(sb) >= datasize) {
996 error = 0;
997 } else {
998 error = ENOBUFS;
999 }
1000
1001 if (ctl_debug) {
1002 printf("%s - grown to %d error %d\n",
1003 __func__, sb->sb_hiwat, error);
1004 }
1005 } else {
1006 error = ENOBUFS;
1007 }
1008 } else {
1009 error = ENOBUFS;
1010 }
1011 }
1012 return error;
1013 }
1014
1015 errno_t
1016 ctl_enqueuembuf(kern_ctl_ref kctlref, u_int32_t unit, struct mbuf *m,
1017 u_int32_t flags)
1018 {
1019 struct socket *so;
1020 errno_t error = 0;
1021 int len = m->m_pkthdr.len;
1022 u_int32_t kctlflags;
1023
1024 so = kcb_find_socket(kctlref, unit, &kctlflags);
1025 if (so == NULL) {
1026 return EINVAL;
1027 }
1028
1029 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
1030 error = ENOBUFS;
1031 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1032 goto bye;
1033 }
1034 if ((flags & CTL_DATA_EOR)) {
1035 m->m_flags |= M_EOR;
1036 }
1037
1038 so_recv_data_stat(so, m, 0);
1039 if (sbappend(&so->so_rcv, m) != 0) {
1040 if ((flags & CTL_DATA_NOWAKEUP) == 0) {
1041 sorwakeup(so);
1042 }
1043 } else {
1044 error = ENOBUFS;
1045 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1046 }
1047 bye:
1048 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
1049 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1050 __func__, error, len,
1051 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1052 }
1053
1054 socket_unlock(so, 1);
1055 if (error != 0) {
1056 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
1057 }
1058
1059 return error;
1060 }
1061
1062 /*
1063 * Compute space occupied by mbuf like sbappendrecord
1064 */
1065 static int
1066 m_space(struct mbuf *m)
1067 {
1068 int space = 0;
1069 struct mbuf *nxt;
1070
1071 for (nxt = m; nxt != NULL; nxt = nxt->m_next) {
1072 space += nxt->m_len;
1073 }
1074
1075 return space;
1076 }
1077
1078 errno_t
1079 ctl_enqueuembuf_list(void *kctlref, u_int32_t unit, struct mbuf *m_list,
1080 u_int32_t flags, struct mbuf **m_remain)
1081 {
1082 struct socket *so = NULL;
1083 errno_t error = 0;
1084 struct mbuf *m, *nextpkt;
1085 int needwakeup = 0;
1086 int len = 0;
1087 u_int32_t kctlflags;
1088
1089 /*
1090 * Need to point the beginning of the list in case of early exit
1091 */
1092 m = m_list;
1093
1094 /*
1095 * kcb_find_socket takes the socket lock with a reference
1096 */
1097 so = kcb_find_socket(kctlref, unit, &kctlflags);
1098 if (so == NULL) {
1099 error = EINVAL;
1100 goto done;
1101 }
1102
1103 if (kctlflags & CTL_FLAG_REG_SOCK_STREAM) {
1104 error = EOPNOTSUPP;
1105 goto done;
1106 }
1107 if (flags & CTL_DATA_EOR) {
1108 error = EINVAL;
1109 goto done;
1110 }
1111
1112 for (m = m_list; m != NULL; m = nextpkt) {
1113 nextpkt = m->m_nextpkt;
1114
1115 if (m->m_pkthdr.len == 0 && ctl_debug) {
1116 printf("%s: %llx m_pkthdr.len is 0",
1117 __func__, (uint64_t)VM_KERNEL_ADDRPERM(m));
1118 }
1119
1120 /*
1121 * The mbuf is either appended or freed by sbappendrecord()
1122 * so it's not reliable from a data standpoint
1123 */
1124 len = m_space(m);
1125 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
1126 error = ENOBUFS;
1127 OSIncrementAtomic64(
1128 (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1129 break;
1130 } else {
1131 /*
1132 * Unlink from the list, m is on its own
1133 */
1134 m->m_nextpkt = NULL;
1135 so_recv_data_stat(so, m, 0);
1136 if (sbappendrecord(&so->so_rcv, m) != 0) {
1137 needwakeup = 1;
1138 } else {
1139 /*
1140 * We free or return the remaining
1141 * mbufs in the list
1142 */
1143 m = nextpkt;
1144 error = ENOBUFS;
1145 OSIncrementAtomic64(
1146 (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1147 break;
1148 }
1149 }
1150 }
1151 if (needwakeup && (flags & CTL_DATA_NOWAKEUP) == 0) {
1152 sorwakeup(so);
1153 }
1154
1155 done:
1156 if (so != NULL) {
1157 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
1158 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1159 __func__, error, len,
1160 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1161 }
1162
1163 socket_unlock(so, 1);
1164 }
1165 if (m_remain) {
1166 *m_remain = m;
1167
1168 if (m != NULL && socket_debug && so != NULL &&
1169 (so->so_options & SO_DEBUG)) {
1170 struct mbuf *n;
1171
1172 printf("%s m_list %llx\n", __func__,
1173 (uint64_t) VM_KERNEL_ADDRPERM(m_list));
1174 for (n = m; n != NULL; n = n->m_nextpkt) {
1175 printf(" remain %llx m_next %llx\n",
1176 (uint64_t) VM_KERNEL_ADDRPERM(n),
1177 (uint64_t) VM_KERNEL_ADDRPERM(n->m_next));
1178 }
1179 }
1180 } else {
1181 if (m != NULL) {
1182 m_freem_list(m);
1183 }
1184 }
1185 if (error != 0) {
1186 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
1187 }
1188 return error;
1189 }
1190
1191 errno_t
1192 ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len,
1193 u_int32_t flags)
1194 {
1195 struct socket *so;
1196 struct mbuf *m;
1197 errno_t error = 0;
1198 unsigned int num_needed;
1199 struct mbuf *n;
1200 size_t curlen = 0;
1201 u_int32_t kctlflags;
1202
1203 so = kcb_find_socket(kctlref, unit, &kctlflags);
1204 if (so == NULL) {
1205 return EINVAL;
1206 }
1207
1208 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
1209 error = ENOBUFS;
1210 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1211 goto bye;
1212 }
1213
1214 num_needed = 1;
1215 m = m_allocpacket_internal(&num_needed, len, NULL, M_NOWAIT, 1, 0);
1216 if (m == NULL) {
1217 kctlstat.kcs_enqdata_mb_alloc_fail++;
1218 if (ctl_debug) {
1219 printf("%s: m_allocpacket_internal(%lu) failed\n",
1220 __func__, len);
1221 }
1222 error = ENOMEM;
1223 goto bye;
1224 }
1225
1226 for (n = m; n != NULL; n = n->m_next) {
1227 size_t mlen = mbuf_maxlen(n);
1228
1229 if (mlen + curlen > len) {
1230 mlen = len - curlen;
1231 }
1232 n->m_len = mlen;
1233 bcopy((char *)data + curlen, n->m_data, mlen);
1234 curlen += mlen;
1235 }
1236 mbuf_pkthdr_setlen(m, curlen);
1237
1238 if ((flags & CTL_DATA_EOR)) {
1239 m->m_flags |= M_EOR;
1240 }
1241 so_recv_data_stat(so, m, 0);
1242 if (sbappend(&so->so_rcv, m) != 0) {
1243 if ((flags & CTL_DATA_NOWAKEUP) == 0) {
1244 sorwakeup(so);
1245 }
1246 } else {
1247 kctlstat.kcs_enqdata_sbappend_fail++;
1248 error = ENOBUFS;
1249 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1250 }
1251
1252 bye:
1253 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
1254 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1255 __func__, error, (int)len,
1256 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1257 }
1258
1259 socket_unlock(so, 1);
1260 if (error != 0) {
1261 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
1262 }
1263 return error;
1264 }
1265
1266 errno_t
1267 ctl_getenqueuepacketcount(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *pcnt)
1268 {
1269 struct socket *so;
1270 u_int32_t cnt;
1271 struct mbuf *m1;
1272
1273 if (pcnt == NULL) {
1274 return EINVAL;
1275 }
1276
1277 so = kcb_find_socket(kctlref, unit, NULL);
1278 if (so == NULL) {
1279 return EINVAL;
1280 }
1281
1282 cnt = 0;
1283 m1 = so->so_rcv.sb_mb;
1284 while (m1 != NULL) {
1285 if (m1->m_type == MT_DATA ||
1286 m1->m_type == MT_HEADER ||
1287 m1->m_type == MT_OOBDATA) {
1288 cnt += 1;
1289 }
1290 m1 = m1->m_nextpkt;
1291 }
1292 *pcnt = cnt;
1293
1294 socket_unlock(so, 1);
1295
1296 return 0;
1297 }
1298
1299 errno_t
1300 ctl_getenqueuespace(kern_ctl_ref kctlref, u_int32_t unit, size_t *space)
1301 {
1302 struct socket *so;
1303 long avail;
1304
1305 if (space == NULL) {
1306 return EINVAL;
1307 }
1308
1309 so = kcb_find_socket(kctlref, unit, NULL);
1310 if (so == NULL) {
1311 return EINVAL;
1312 }
1313
1314 avail = sbspace(&so->so_rcv);
1315 *space = (avail < 0) ? 0 : avail;
1316 socket_unlock(so, 1);
1317
1318 return 0;
1319 }
1320
1321 errno_t
1322 ctl_getenqueuereadable(kern_ctl_ref kctlref, u_int32_t unit,
1323 u_int32_t *difference)
1324 {
1325 struct socket *so;
1326
1327 if (difference == NULL) {
1328 return EINVAL;
1329 }
1330
1331 so = kcb_find_socket(kctlref, unit, NULL);
1332 if (so == NULL) {
1333 return EINVAL;
1334 }
1335
1336 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat) {
1337 *difference = 0;
1338 } else {
1339 *difference = (so->so_rcv.sb_lowat - so->so_rcv.sb_cc);
1340 }
1341 socket_unlock(so, 1);
1342
1343 return 0;
1344 }
1345
1346 static int
1347 ctl_ctloutput(struct socket *so, struct sockopt *sopt)
1348 {
1349 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
1350 struct kctl *kctl;
1351 int error = 0;
1352 void *data = NULL;
1353 size_t len;
1354
1355 if (sopt->sopt_level != SYSPROTO_CONTROL) {
1356 return EINVAL;
1357 }
1358
1359 if (kcb == NULL) { /* sanity check */
1360 return ENOTCONN;
1361 }
1362
1363 if ((kctl = kcb->kctl) == NULL) {
1364 return EINVAL;
1365 }
1366
1367 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
1368 ctl_kcb_increment_use_count(kcb, mtx_held);
1369
1370 switch (sopt->sopt_dir) {
1371 case SOPT_SET:
1372 if (kctl->setopt == NULL) {
1373 error = ENOTSUP;
1374 goto out;
1375 }
1376 if (sopt->sopt_valsize != 0) {
1377 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP,
1378 M_WAITOK | M_ZERO);
1379 if (data == NULL) {
1380 error = ENOMEM;
1381 goto out;
1382 }
1383 error = sooptcopyin(sopt, data,
1384 sopt->sopt_valsize, sopt->sopt_valsize);
1385 }
1386 if (error == 0) {
1387 socket_unlock(so, 0);
1388 error = (*kctl->setopt)(kctl->kctlref,
1389 kcb->sac.sc_unit, kcb->userdata, sopt->sopt_name,
1390 data, sopt->sopt_valsize);
1391 socket_lock(so, 0);
1392 }
1393
1394 if (data != NULL) {
1395 FREE(data, M_TEMP);
1396 }
1397 break;
1398
1399 case SOPT_GET:
1400 if (kctl->getopt == NULL) {
1401 error = ENOTSUP;
1402 goto out;
1403 }
1404
1405 if (sopt->sopt_valsize && sopt->sopt_val) {
1406 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP,
1407 M_WAITOK | M_ZERO);
1408 if (data == NULL) {
1409 error = ENOMEM;
1410 goto out;
1411 }
1412 /*
1413 * 4108337 - copy user data in case the
1414 * kernel control needs it
1415 */
1416 error = sooptcopyin(sopt, data,
1417 sopt->sopt_valsize, sopt->sopt_valsize);
1418 }
1419
1420 if (error == 0) {
1421 len = sopt->sopt_valsize;
1422 socket_unlock(so, 0);
1423 error = (*kctl->getopt)(kctl->kctlref, kcb->sac.sc_unit,
1424 kcb->userdata, sopt->sopt_name,
1425 data, &len);
1426 if (data != NULL && len > sopt->sopt_valsize) {
1427 panic_plain("ctl_ctloutput: ctl %s returned "
1428 "len (%lu) > sopt_valsize (%lu)\n",
1429 kcb->kctl->name, len,
1430 sopt->sopt_valsize);
1431 }
1432 socket_lock(so, 0);
1433 if (error == 0) {
1434 if (data != NULL) {
1435 error = sooptcopyout(sopt, data, len);
1436 } else {
1437 sopt->sopt_valsize = len;
1438 }
1439 }
1440 }
1441 if (data != NULL) {
1442 FREE(data, M_TEMP);
1443 }
1444 break;
1445 }
1446
1447 out:
1448 ctl_kcb_decrement_use_count(kcb);
1449 return error;
1450 }
1451
1452 static int
1453 ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
1454 struct ifnet *ifp, struct proc *p)
1455 {
1456 #pragma unused(so, ifp, p)
1457 int error = ENOTSUP;
1458
1459 switch (cmd) {
1460 /* get the number of controllers */
1461 case CTLIOCGCOUNT: {
1462 struct kctl *kctl;
1463 u_int32_t n = 0;
1464
1465 lck_mtx_lock(ctl_mtx);
1466 TAILQ_FOREACH(kctl, &ctl_head, next)
1467 n++;
1468 lck_mtx_unlock(ctl_mtx);
1469
1470 bcopy(&n, data, sizeof(n));
1471 error = 0;
1472 break;
1473 }
1474 case CTLIOCGINFO: {
1475 struct ctl_info ctl_info;
1476 struct kctl *kctl = 0;
1477 size_t name_len;
1478
1479 bcopy(data, &ctl_info, sizeof(ctl_info));
1480 name_len = strnlen(ctl_info.ctl_name, MAX_KCTL_NAME);
1481
1482 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
1483 error = EINVAL;
1484 break;
1485 }
1486 lck_mtx_lock(ctl_mtx);
1487 kctl = ctl_find_by_name(ctl_info.ctl_name);
1488 lck_mtx_unlock(ctl_mtx);
1489 if (kctl == 0) {
1490 error = ENOENT;
1491 break;
1492 }
1493 ctl_info.ctl_id = kctl->id;
1494 bcopy(&ctl_info, data, sizeof(ctl_info));
1495 error = 0;
1496 break;
1497 }
1498
1499 /* add controls to get list of NKEs */
1500 }
1501
1502 return error;
1503 }
1504
1505 static void
1506 kctl_tbl_grow()
1507 {
1508 struct kctl **new_table;
1509 uintptr_t new_size;
1510
1511 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1512
1513 if (kctl_tbl_growing) {
1514 /* Another thread is allocating */
1515 kctl_tbl_growing_waiting++;
1516
1517 do {
1518 (void) msleep((caddr_t) &kctl_tbl_growing, ctl_mtx,
1519 PSOCK | PCATCH, "kctl_tbl_growing", 0);
1520 } while (kctl_tbl_growing);
1521 kctl_tbl_growing_waiting--;
1522 }
1523 /* Another thread grew the table */
1524 if (kctl_table != NULL && kctl_tbl_count < kctl_tbl_size) {
1525 return;
1526 }
1527
1528 /* Verify we have a sane size */
1529 if (kctl_tbl_size + KCTL_TBL_INC >= UINT16_MAX) {
1530 kctlstat.kcs_tbl_size_too_big++;
1531 if (ctl_debug) {
1532 printf("%s kctl_tbl_size %lu too big\n",
1533 __func__, kctl_tbl_size);
1534 }
1535 return;
1536 }
1537 kctl_tbl_growing = 1;
1538
1539 new_size = kctl_tbl_size + KCTL_TBL_INC;
1540
1541 lck_mtx_unlock(ctl_mtx);
1542 new_table = _MALLOC(sizeof(struct kctl *) * new_size,
1543 M_TEMP, M_WAIT | M_ZERO);
1544 lck_mtx_lock(ctl_mtx);
1545
1546 if (new_table != NULL) {
1547 if (kctl_table != NULL) {
1548 bcopy(kctl_table, new_table,
1549 kctl_tbl_size * sizeof(struct kctl *));
1550
1551 _FREE(kctl_table, M_TEMP);
1552 }
1553 kctl_table = new_table;
1554 kctl_tbl_size = new_size;
1555 }
1556
1557 kctl_tbl_growing = 0;
1558
1559 if (kctl_tbl_growing_waiting) {
1560 wakeup(&kctl_tbl_growing);
1561 }
1562 }
1563
1564 #define KCTLREF_INDEX_MASK 0x0000FFFF
1565 #define KCTLREF_GENCNT_MASK 0xFFFF0000
1566 #define KCTLREF_GENCNT_SHIFT 16
1567
1568 static kern_ctl_ref
1569 kctl_make_ref(struct kctl *kctl)
1570 {
1571 uintptr_t i;
1572
1573 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1574
1575 if (kctl_tbl_count >= kctl_tbl_size) {
1576 kctl_tbl_grow();
1577 }
1578
1579 kctl->kctlref = NULL;
1580 for (i = 0; i < kctl_tbl_size; i++) {
1581 if (kctl_table[i] == NULL) {
1582 uintptr_t ref;
1583
1584 /*
1585 * Reference is index plus one
1586 */
1587 kctl_ref_gencnt += 1;
1588
1589 /*
1590 * Add generation count as salt to reference to prevent
1591 * use after deregister
1592 */
1593 ref = ((kctl_ref_gencnt << KCTLREF_GENCNT_SHIFT) &
1594 KCTLREF_GENCNT_MASK) +
1595 ((i + 1) & KCTLREF_INDEX_MASK);
1596
1597 kctl->kctlref = (void *)(ref);
1598 kctl_table[i] = kctl;
1599 kctl_tbl_count++;
1600 break;
1601 }
1602 }
1603
1604 if (kctl->kctlref == NULL) {
1605 panic("%s no space in table", __func__);
1606 }
1607
1608 if (ctl_debug > 0) {
1609 printf("%s %p for %p\n",
1610 __func__, kctl->kctlref, kctl);
1611 }
1612
1613 return kctl->kctlref;
1614 }
1615
1616 static void
1617 kctl_delete_ref(kern_ctl_ref kctlref)
1618 {
1619 /*
1620 * Reference is index plus one
1621 */
1622 uintptr_t i = (((uintptr_t)kctlref) & KCTLREF_INDEX_MASK) - 1;
1623
1624 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1625
1626 if (i < kctl_tbl_size) {
1627 struct kctl *kctl = kctl_table[i];
1628
1629 if (kctl->kctlref == kctlref) {
1630 kctl_table[i] = NULL;
1631 kctl_tbl_count--;
1632 } else {
1633 kctlstat.kcs_bad_kctlref++;
1634 }
1635 } else {
1636 kctlstat.kcs_bad_kctlref++;
1637 }
1638 }
1639
1640 static struct kctl *
1641 kctl_from_ref(kern_ctl_ref kctlref)
1642 {
1643 /*
1644 * Reference is index plus one
1645 */
1646 uintptr_t i = (((uintptr_t)kctlref) & KCTLREF_INDEX_MASK) - 1;
1647 struct kctl *kctl = NULL;
1648
1649 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1650
1651 if (i >= kctl_tbl_size) {
1652 kctlstat.kcs_bad_kctlref++;
1653 return NULL;
1654 }
1655 kctl = kctl_table[i];
1656 if (kctl->kctlref != kctlref) {
1657 kctlstat.kcs_bad_kctlref++;
1658 return NULL;
1659 }
1660 return kctl;
1661 }
1662
1663 /*
1664 * Register/unregister a NKE
1665 */
1666 errno_t
1667 ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref)
1668 {
1669 struct kctl *kctl = NULL;
1670 struct kctl *kctl_next = NULL;
1671 u_int32_t id = 1;
1672 size_t name_len;
1673 int is_extended = 0;
1674
1675 if (userkctl == NULL) { /* sanity check */
1676 return EINVAL;
1677 }
1678 if (userkctl->ctl_connect == NULL) {
1679 return EINVAL;
1680 }
1681 name_len = strlen(userkctl->ctl_name);
1682 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
1683 return EINVAL;
1684 }
1685
1686 MALLOC(kctl, struct kctl *, sizeof(*kctl), M_TEMP, M_WAITOK);
1687 if (kctl == NULL) {
1688 return ENOMEM;
1689 }
1690 bzero((char *)kctl, sizeof(*kctl));
1691
1692 lck_mtx_lock(ctl_mtx);
1693
1694 if (kctl_make_ref(kctl) == NULL) {
1695 lck_mtx_unlock(ctl_mtx);
1696 FREE(kctl, M_TEMP);
1697 return ENOMEM;
1698 }
1699
1700 /*
1701 * Kernel Control IDs
1702 *
1703 * CTL_FLAG_REG_ID_UNIT indicates the control ID and unit number are
1704 * static. If they do not exist, add them to the list in order. If the
1705 * flag is not set, we must find a new unique value. We assume the
1706 * list is in order. We find the last item in the list and add one. If
1707 * this leads to wrapping the id around, we start at the front of the
1708 * list and look for a gap.
1709 */
1710
1711 if ((userkctl->ctl_flags & CTL_FLAG_REG_ID_UNIT) == 0) {
1712 /* Must dynamically assign an unused ID */
1713
1714 /* Verify the same name isn't already registered */
1715 if (ctl_find_by_name(userkctl->ctl_name) != NULL) {
1716 kctl_delete_ref(kctl->kctlref);
1717 lck_mtx_unlock(ctl_mtx);
1718 FREE(kctl, M_TEMP);
1719 return EEXIST;
1720 }
1721
1722 /* Start with 1 in case the list is empty */
1723 id = 1;
1724 kctl_next = TAILQ_LAST(&ctl_head, kctl_list);
1725
1726 if (kctl_next != NULL) {
1727 /* List was not empty, add one to the last item */
1728 id = kctl_next->id + 1;
1729 kctl_next = NULL;
1730
1731 /*
1732 * If this wrapped the id number, start looking at
1733 * the front of the list for an unused id.
1734 */
1735 if (id == 0) {
1736 /* Find the next unused ID */
1737 id = 1;
1738
1739 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
1740 if (kctl_next->id > id) {
1741 /* We found a gap */
1742 break;
1743 }
1744
1745 id = kctl_next->id + 1;
1746 }
1747 }
1748 }
1749
1750 userkctl->ctl_id = id;
1751 kctl->id = id;
1752 kctl->reg_unit = -1;
1753 } else {
1754 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
1755 if (kctl_next->id > userkctl->ctl_id) {
1756 break;
1757 }
1758 }
1759
1760 if (ctl_find_by_id_unit(userkctl->ctl_id, userkctl->ctl_unit)) {
1761 kctl_delete_ref(kctl->kctlref);
1762 lck_mtx_unlock(ctl_mtx);
1763 FREE(kctl, M_TEMP);
1764 return EEXIST;
1765 }
1766 kctl->id = userkctl->ctl_id;
1767 kctl->reg_unit = userkctl->ctl_unit;
1768 }
1769
1770 is_extended = (userkctl->ctl_flags & CTL_FLAG_REG_EXTENDED);
1771
1772 strlcpy(kctl->name, userkctl->ctl_name, MAX_KCTL_NAME);
1773 kctl->flags = userkctl->ctl_flags;
1774
1775 /*
1776 * Let the caller know the default send and receive sizes
1777 */
1778 if (userkctl->ctl_sendsize == 0) {
1779 kctl->sendbufsize = CTL_SENDSIZE;
1780 userkctl->ctl_sendsize = kctl->sendbufsize;
1781 } else {
1782 kctl->sendbufsize = userkctl->ctl_sendsize;
1783 }
1784 if (userkctl->ctl_recvsize == 0) {
1785 kctl->recvbufsize = CTL_RECVSIZE;
1786 userkctl->ctl_recvsize = kctl->recvbufsize;
1787 } else {
1788 kctl->recvbufsize = userkctl->ctl_recvsize;
1789 }
1790
1791 kctl->bind = userkctl->ctl_bind;
1792 kctl->connect = userkctl->ctl_connect;
1793 kctl->disconnect = userkctl->ctl_disconnect;
1794 kctl->send = userkctl->ctl_send;
1795 kctl->setopt = userkctl->ctl_setopt;
1796 kctl->getopt = userkctl->ctl_getopt;
1797 if (is_extended) {
1798 kctl->rcvd = userkctl->ctl_rcvd;
1799 kctl->send_list = userkctl->ctl_send_list;
1800 }
1801
1802 TAILQ_INIT(&kctl->kcb_head);
1803
1804 if (kctl_next) {
1805 TAILQ_INSERT_BEFORE(kctl_next, kctl, next);
1806 } else {
1807 TAILQ_INSERT_TAIL(&ctl_head, kctl, next);
1808 }
1809
1810 kctlstat.kcs_reg_count++;
1811 kctlstat.kcs_gencnt++;
1812
1813 lck_mtx_unlock(ctl_mtx);
1814
1815 *kctlref = kctl->kctlref;
1816
1817 ctl_post_msg(KEV_CTL_REGISTERED, kctl->id);
1818 return 0;
1819 }
1820
1821 errno_t
1822 ctl_deregister(void *kctlref)
1823 {
1824 struct kctl *kctl;
1825
1826 lck_mtx_lock(ctl_mtx);
1827 if ((kctl = kctl_from_ref(kctlref)) == NULL) {
1828 kctlstat.kcs_bad_kctlref++;
1829 lck_mtx_unlock(ctl_mtx);
1830 if (ctl_debug != 0) {
1831 printf("%s invalid kctlref %p\n",
1832 __func__, kctlref);
1833 }
1834 return EINVAL;
1835 }
1836
1837 if (!TAILQ_EMPTY(&kctl->kcb_head)) {
1838 lck_mtx_unlock(ctl_mtx);
1839 return EBUSY;
1840 }
1841
1842 TAILQ_REMOVE(&ctl_head, kctl, next);
1843
1844 kctlstat.kcs_reg_count--;
1845 kctlstat.kcs_gencnt++;
1846
1847 kctl_delete_ref(kctl->kctlref);
1848 lck_mtx_unlock(ctl_mtx);
1849
1850 ctl_post_msg(KEV_CTL_DEREGISTERED, kctl->id);
1851 FREE(kctl, M_TEMP);
1852 return 0;
1853 }
1854
1855 /*
1856 * Must be called with global ctl_mtx lock taked
1857 */
1858 static struct kctl *
1859 ctl_find_by_name(const char *name)
1860 {
1861 struct kctl *kctl;
1862
1863 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1864
1865 TAILQ_FOREACH(kctl, &ctl_head, next)
1866 if (strncmp(kctl->name, name, sizeof(kctl->name)) == 0) {
1867 return kctl;
1868 }
1869
1870 return NULL;
1871 }
1872
1873 u_int32_t
1874 ctl_id_by_name(const char *name)
1875 {
1876 u_int32_t ctl_id = 0;
1877 struct kctl *kctl;
1878
1879 lck_mtx_lock(ctl_mtx);
1880 kctl = ctl_find_by_name(name);
1881 if (kctl) {
1882 ctl_id = kctl->id;
1883 }
1884 lck_mtx_unlock(ctl_mtx);
1885
1886 return ctl_id;
1887 }
1888
1889 errno_t
1890 ctl_name_by_id(u_int32_t id, char *out_name, size_t maxsize)
1891 {
1892 int found = 0;
1893 struct kctl *kctl;
1894
1895 lck_mtx_lock(ctl_mtx);
1896 TAILQ_FOREACH(kctl, &ctl_head, next) {
1897 if (kctl->id == id) {
1898 break;
1899 }
1900 }
1901
1902 if (kctl) {
1903 if (maxsize > MAX_KCTL_NAME) {
1904 maxsize = MAX_KCTL_NAME;
1905 }
1906 strlcpy(out_name, kctl->name, maxsize);
1907 found = 1;
1908 }
1909 lck_mtx_unlock(ctl_mtx);
1910
1911 return found ? 0 : ENOENT;
1912 }
1913
1914 /*
1915 * Must be called with global ctl_mtx lock taked
1916 *
1917 */
1918 static struct kctl *
1919 ctl_find_by_id_unit(u_int32_t id, u_int32_t unit)
1920 {
1921 struct kctl *kctl;
1922
1923 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1924
1925 TAILQ_FOREACH(kctl, &ctl_head, next) {
1926 if (kctl->id == id && (kctl->flags & CTL_FLAG_REG_ID_UNIT) == 0) {
1927 return kctl;
1928 } else if (kctl->id == id && kctl->reg_unit == unit) {
1929 return kctl;
1930 }
1931 }
1932 return NULL;
1933 }
1934
1935 /*
1936 * Must be called with kernel controller lock taken
1937 */
1938 static struct ctl_cb *
1939 kcb_find(struct kctl *kctl, u_int32_t unit)
1940 {
1941 struct ctl_cb *kcb;
1942
1943 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1944
1945 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
1946 if (kcb->sac.sc_unit == unit) {
1947 return kcb;
1948 }
1949
1950 return NULL;
1951 }
1952
1953 static struct socket *
1954 kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *kctlflags)
1955 {
1956 struct socket *so = NULL;
1957 struct ctl_cb *kcb;
1958 void *lr_saved;
1959 struct kctl *kctl;
1960 int i;
1961
1962 lr_saved = __builtin_return_address(0);
1963
1964 lck_mtx_lock(ctl_mtx);
1965 /*
1966 * First validate the kctlref
1967 */
1968 if ((kctl = kctl_from_ref(kctlref)) == NULL) {
1969 kctlstat.kcs_bad_kctlref++;
1970 lck_mtx_unlock(ctl_mtx);
1971 if (ctl_debug != 0) {
1972 printf("%s invalid kctlref %p\n",
1973 __func__, kctlref);
1974 }
1975 return NULL;
1976 }
1977
1978 kcb = kcb_find(kctl, unit);
1979 if (kcb == NULL || kcb->kctl != kctl || (so = kcb->so) == NULL) {
1980 lck_mtx_unlock(ctl_mtx);
1981 return NULL;
1982 }
1983 /*
1984 * This prevents the socket from being closed
1985 */
1986 kcb->usecount++;
1987 /*
1988 * Respect lock ordering: socket before ctl_mtx
1989 */
1990 lck_mtx_unlock(ctl_mtx);
1991
1992 socket_lock(so, 1);
1993 /*
1994 * The socket lock history is more useful if we store
1995 * the address of the caller.
1996 */
1997 i = (so->next_lock_lr + SO_LCKDBG_MAX - 1) % SO_LCKDBG_MAX;
1998 so->lock_lr[i] = lr_saved;
1999
2000 lck_mtx_lock(ctl_mtx);
2001
2002 if ((kctl = kctl_from_ref(kctlref)) == NULL || kcb->kctl == NULL) {
2003 lck_mtx_unlock(ctl_mtx);
2004 socket_unlock(so, 1);
2005 so = NULL;
2006 lck_mtx_lock(ctl_mtx);
2007 } else if (kctlflags != NULL) {
2008 *kctlflags = kctl->flags;
2009 }
2010
2011 kcb->usecount--;
2012 if (kcb->usecount == 0) {
2013 wakeup((event_t)&kcb->usecount);
2014 }
2015
2016 lck_mtx_unlock(ctl_mtx);
2017
2018 return so;
2019 }
2020
2021 static void
2022 ctl_post_msg(u_int32_t event_code, u_int32_t id)
2023 {
2024 struct ctl_event_data ctl_ev_data;
2025 struct kev_msg ev_msg;
2026
2027 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_NOTOWNED);
2028
2029 bzero(&ev_msg, sizeof(struct kev_msg));
2030 ev_msg.vendor_code = KEV_VENDOR_APPLE;
2031
2032 ev_msg.kev_class = KEV_SYSTEM_CLASS;
2033 ev_msg.kev_subclass = KEV_CTL_SUBCLASS;
2034 ev_msg.event_code = event_code;
2035
2036 /* common nke subclass data */
2037 bzero(&ctl_ev_data, sizeof(ctl_ev_data));
2038 ctl_ev_data.ctl_id = id;
2039 ev_msg.dv[0].data_ptr = &ctl_ev_data;
2040 ev_msg.dv[0].data_length = sizeof(ctl_ev_data);
2041
2042 ev_msg.dv[1].data_length = 0;
2043
2044 kev_post_msg(&ev_msg);
2045 }
2046
2047 static int
2048 ctl_lock(struct socket *so, int refcount, void *lr)
2049 {
2050 void *lr_saved;
2051
2052 if (lr == NULL) {
2053 lr_saved = __builtin_return_address(0);
2054 } else {
2055 lr_saved = lr;
2056 }
2057
2058 if (so->so_pcb != NULL) {
2059 lck_mtx_lock(((struct ctl_cb *)so->so_pcb)->mtx);
2060 } else {
2061 panic("ctl_lock: so=%p NO PCB! lr=%p lrh= %s\n",
2062 so, lr_saved, solockhistory_nr(so));
2063 /* NOTREACHED */
2064 }
2065
2066 if (so->so_usecount < 0) {
2067 panic("ctl_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s\n",
2068 so, so->so_pcb, lr_saved, so->so_usecount,
2069 solockhistory_nr(so));
2070 /* NOTREACHED */
2071 }
2072
2073 if (refcount) {
2074 so->so_usecount++;
2075 }
2076
2077 so->lock_lr[so->next_lock_lr] = lr_saved;
2078 so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX;
2079 return 0;
2080 }
2081
2082 static int
2083 ctl_unlock(struct socket *so, int refcount, void *lr)
2084 {
2085 void *lr_saved;
2086 lck_mtx_t *mutex_held;
2087
2088 if (lr == NULL) {
2089 lr_saved = __builtin_return_address(0);
2090 } else {
2091 lr_saved = lr;
2092 }
2093
2094 #if (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG))
2095 printf("ctl_unlock: so=%llx sopcb=%x lock=%llx ref=%u lr=%llx\n",
2096 (uint64_t)VM_KERNEL_ADDRPERM(so),
2097 (uint64_t)VM_KERNEL_ADDRPERM(so->so_pcb,
2098 (uint64_t)VM_KERNEL_ADDRPERM(((struct ctl_cb *)so->so_pcb)->mtx),
2099 so->so_usecount, (uint64_t)VM_KERNEL_ADDRPERM(lr_saved));
2100 #endif /* (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG)) */
2101 if (refcount) {
2102 so->so_usecount--;
2103 }
2104
2105 if (so->so_usecount < 0) {
2106 panic("ctl_unlock: so=%p usecount=%x lrh= %s\n",
2107 so, so->so_usecount, solockhistory_nr(so));
2108 /* NOTREACHED */
2109 }
2110 if (so->so_pcb == NULL) {
2111 panic("ctl_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s\n",
2112 so, so->so_usecount, (void *)lr_saved,
2113 solockhistory_nr(so));
2114 /* NOTREACHED */
2115 }
2116 mutex_held = ((struct ctl_cb *)so->so_pcb)->mtx;
2117
2118 lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
2119 so->unlock_lr[so->next_unlock_lr] = lr_saved;
2120 so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
2121 lck_mtx_unlock(mutex_held);
2122
2123 if (so->so_usecount == 0) {
2124 ctl_sofreelastref(so);
2125 }
2126
2127 return 0;
2128 }
2129
2130 static lck_mtx_t *
2131 ctl_getlock(struct socket *so, int flags)
2132 {
2133 #pragma unused(flags)
2134 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
2135
2136 if (so->so_pcb) {
2137 if (so->so_usecount < 0) {
2138 panic("ctl_getlock: so=%p usecount=%x lrh= %s\n",
2139 so, so->so_usecount, solockhistory_nr(so));
2140 }
2141 return kcb->mtx;
2142 } else {
2143 panic("ctl_getlock: so=%p NULL NO so_pcb %s\n",
2144 so, solockhistory_nr(so));
2145 return so->so_proto->pr_domain->dom_mtx;
2146 }
2147 }
2148
2149 __private_extern__ int
2150 kctl_reg_list SYSCTL_HANDLER_ARGS
2151 {
2152 #pragma unused(oidp, arg1, arg2)
2153 int error = 0;
2154 int n, i;
2155 struct xsystmgen xsg;
2156 void *buf = NULL;
2157 struct kctl *kctl;
2158 size_t item_size = ROUNDUP64(sizeof(struct xkctl_reg));
2159
2160 buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO);
2161 if (buf == NULL) {
2162 return ENOMEM;
2163 }
2164
2165 lck_mtx_lock(ctl_mtx);
2166
2167 n = kctlstat.kcs_reg_count;
2168
2169 if (req->oldptr == USER_ADDR_NULL) {
2170 req->oldidx = (n + n / 8) * sizeof(struct xkctl_reg);
2171 goto done;
2172 }
2173 if (req->newptr != USER_ADDR_NULL) {
2174 error = EPERM;
2175 goto done;
2176 }
2177 bzero(&xsg, sizeof(xsg));
2178 xsg.xg_len = sizeof(xsg);
2179 xsg.xg_count = n;
2180 xsg.xg_gen = kctlstat.kcs_gencnt;
2181 xsg.xg_sogen = so_gencnt;
2182 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2183 if (error) {
2184 goto done;
2185 }
2186 /*
2187 * We are done if there is no pcb
2188 */
2189 if (n == 0) {
2190 goto done;
2191 }
2192
2193 i = 0;
2194 for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
2195 i < n && kctl != NULL;
2196 i++, kctl = TAILQ_NEXT(kctl, next)) {
2197 struct xkctl_reg *xkr = (struct xkctl_reg *)buf;
2198 struct ctl_cb *kcb;
2199 u_int32_t pcbcount = 0;
2200
2201 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
2202 pcbcount++;
2203
2204 bzero(buf, item_size);
2205
2206 xkr->xkr_len = sizeof(struct xkctl_reg);
2207 xkr->xkr_kind = XSO_KCREG;
2208 xkr->xkr_id = kctl->id;
2209 xkr->xkr_reg_unit = kctl->reg_unit;
2210 xkr->xkr_flags = kctl->flags;
2211 xkr->xkr_kctlref = (uint64_t)(kctl->kctlref);
2212 xkr->xkr_recvbufsize = kctl->recvbufsize;
2213 xkr->xkr_sendbufsize = kctl->sendbufsize;
2214 xkr->xkr_lastunit = kctl->lastunit;
2215 xkr->xkr_pcbcount = pcbcount;
2216 xkr->xkr_connect = (uint64_t)VM_KERNEL_UNSLIDE(kctl->connect);
2217 xkr->xkr_disconnect =
2218 (uint64_t)VM_KERNEL_UNSLIDE(kctl->disconnect);
2219 xkr->xkr_send = (uint64_t)VM_KERNEL_UNSLIDE(kctl->send);
2220 xkr->xkr_send_list =
2221 (uint64_t)VM_KERNEL_UNSLIDE(kctl->send_list);
2222 xkr->xkr_setopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->setopt);
2223 xkr->xkr_getopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->getopt);
2224 xkr->xkr_rcvd = (uint64_t)VM_KERNEL_UNSLIDE(kctl->rcvd);
2225 strlcpy(xkr->xkr_name, kctl->name, sizeof(xkr->xkr_name));
2226
2227 error = SYSCTL_OUT(req, buf, item_size);
2228 }
2229
2230 if (error == 0) {
2231 /*
2232 * Give the user an updated idea of our state.
2233 * If the generation differs from what we told
2234 * her before, she knows that something happened
2235 * while we were processing this request, and it
2236 * might be necessary to retry.
2237 */
2238 bzero(&xsg, sizeof(xsg));
2239 xsg.xg_len = sizeof(xsg);
2240 xsg.xg_count = n;
2241 xsg.xg_gen = kctlstat.kcs_gencnt;
2242 xsg.xg_sogen = so_gencnt;
2243 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2244 if (error) {
2245 goto done;
2246 }
2247 }
2248
2249 done:
2250 lck_mtx_unlock(ctl_mtx);
2251
2252 if (buf != NULL) {
2253 FREE(buf, M_TEMP);
2254 }
2255
2256 return error;
2257 }
2258
2259 __private_extern__ int
2260 kctl_pcblist SYSCTL_HANDLER_ARGS
2261 {
2262 #pragma unused(oidp, arg1, arg2)
2263 int error = 0;
2264 int n, i;
2265 struct xsystmgen xsg;
2266 void *buf = NULL;
2267 struct kctl *kctl;
2268 size_t item_size = ROUNDUP64(sizeof(struct xkctlpcb)) +
2269 ROUNDUP64(sizeof(struct xsocket_n)) +
2270 2 * ROUNDUP64(sizeof(struct xsockbuf_n)) +
2271 ROUNDUP64(sizeof(struct xsockstat_n));
2272
2273 buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO);
2274 if (buf == NULL) {
2275 return ENOMEM;
2276 }
2277
2278 lck_mtx_lock(ctl_mtx);
2279
2280 n = kctlstat.kcs_pcbcount;
2281
2282 if (req->oldptr == USER_ADDR_NULL) {
2283 req->oldidx = (n + n / 8) * item_size;
2284 goto done;
2285 }
2286 if (req->newptr != USER_ADDR_NULL) {
2287 error = EPERM;
2288 goto done;
2289 }
2290 bzero(&xsg, sizeof(xsg));
2291 xsg.xg_len = sizeof(xsg);
2292 xsg.xg_count = n;
2293 xsg.xg_gen = kctlstat.kcs_gencnt;
2294 xsg.xg_sogen = so_gencnt;
2295 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2296 if (error) {
2297 goto done;
2298 }
2299 /*
2300 * We are done if there is no pcb
2301 */
2302 if (n == 0) {
2303 goto done;
2304 }
2305
2306 i = 0;
2307 for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
2308 i < n && kctl != NULL;
2309 kctl = TAILQ_NEXT(kctl, next)) {
2310 struct ctl_cb *kcb;
2311
2312 for (kcb = TAILQ_FIRST(&kctl->kcb_head);
2313 i < n && kcb != NULL;
2314 i++, kcb = TAILQ_NEXT(kcb, next)) {
2315 struct xkctlpcb *xk = (struct xkctlpcb *)buf;
2316 struct xsocket_n *xso = (struct xsocket_n *)
2317 ADVANCE64(xk, sizeof(*xk));
2318 struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *)
2319 ADVANCE64(xso, sizeof(*xso));
2320 struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *)
2321 ADVANCE64(xsbrcv, sizeof(*xsbrcv));
2322 struct xsockstat_n *xsostats = (struct xsockstat_n *)
2323 ADVANCE64(xsbsnd, sizeof(*xsbsnd));
2324
2325 bzero(buf, item_size);
2326
2327 xk->xkp_len = sizeof(struct xkctlpcb);
2328 xk->xkp_kind = XSO_KCB;
2329 xk->xkp_unit = kcb->sac.sc_unit;
2330 xk->xkp_kctpcb = (uint64_t)VM_KERNEL_ADDRPERM(kcb);
2331 xk->xkp_kctlref = (uint64_t)VM_KERNEL_ADDRPERM(kctl);
2332 xk->xkp_kctlid = kctl->id;
2333 strlcpy(xk->xkp_kctlname, kctl->name,
2334 sizeof(xk->xkp_kctlname));
2335
2336 sotoxsocket_n(kcb->so, xso);
2337 sbtoxsockbuf_n(kcb->so ?
2338 &kcb->so->so_rcv : NULL, xsbrcv);
2339 sbtoxsockbuf_n(kcb->so ?
2340 &kcb->so->so_snd : NULL, xsbsnd);
2341 sbtoxsockstat_n(kcb->so, xsostats);
2342
2343 error = SYSCTL_OUT(req, buf, item_size);
2344 }
2345 }
2346
2347 if (error == 0) {
2348 /*
2349 * Give the user an updated idea of our state.
2350 * If the generation differs from what we told
2351 * her before, she knows that something happened
2352 * while we were processing this request, and it
2353 * might be necessary to retry.
2354 */
2355 bzero(&xsg, sizeof(xsg));
2356 xsg.xg_len = sizeof(xsg);
2357 xsg.xg_count = n;
2358 xsg.xg_gen = kctlstat.kcs_gencnt;
2359 xsg.xg_sogen = so_gencnt;
2360 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2361 if (error) {
2362 goto done;
2363 }
2364 }
2365
2366 done:
2367 lck_mtx_unlock(ctl_mtx);
2368
2369 return error;
2370 }
2371
2372 int
2373 kctl_getstat SYSCTL_HANDLER_ARGS
2374 {
2375 #pragma unused(oidp, arg1, arg2)
2376 int error = 0;
2377
2378 lck_mtx_lock(ctl_mtx);
2379
2380 if (req->newptr != USER_ADDR_NULL) {
2381 error = EPERM;
2382 goto done;
2383 }
2384 if (req->oldptr == USER_ADDR_NULL) {
2385 req->oldidx = sizeof(struct kctlstat);
2386 goto done;
2387 }
2388
2389 error = SYSCTL_OUT(req, &kctlstat,
2390 MIN(sizeof(struct kctlstat), req->oldlen));
2391 done:
2392 lck_mtx_unlock(ctl_mtx);
2393 return error;
2394 }
2395
2396 void
2397 kctl_fill_socketinfo(struct socket *so, struct socket_info *si)
2398 {
2399 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
2400 struct kern_ctl_info *kcsi =
2401 &si->soi_proto.pri_kern_ctl;
2402 struct kctl *kctl = kcb->kctl;
2403
2404 si->soi_kind = SOCKINFO_KERN_CTL;
2405
2406 if (kctl == 0) {
2407 return;
2408 }
2409
2410 kcsi->kcsi_id = kctl->id;
2411 kcsi->kcsi_reg_unit = kctl->reg_unit;
2412 kcsi->kcsi_flags = kctl->flags;
2413 kcsi->kcsi_recvbufsize = kctl->recvbufsize;
2414 kcsi->kcsi_sendbufsize = kctl->sendbufsize;
2415 kcsi->kcsi_unit = kcb->sac.sc_unit;
2416 strlcpy(kcsi->kcsi_name, kctl->name, MAX_KCTL_NAME);
2417 }