]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_control.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / kern / kern_control.c
1 /*
2 * Copyright (c) 1999-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Kernel Control domain - allows control connections to
31 * and to read/write data.
32 *
33 * Vincent Lubet, 040506
34 * Christophe Allie, 010928
35 * Justin C. Walker, 990319
36 */
37
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/syslog.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/protosw.h>
45 #include <sys/domain.h>
46 #include <sys/malloc.h>
47 #include <sys/mbuf.h>
48 #include <sys/sys_domain.h>
49 #include <sys/kern_event.h>
50 #include <sys/kern_control.h>
51 #include <sys/kauth.h>
52 #include <sys/sysctl.h>
53 #include <sys/proc_info.h>
54 #include <net/if_var.h>
55
56 #include <mach/vm_types.h>
57
58 #include <kern/thread.h>
59
60 struct kctl {
61 TAILQ_ENTRY(kctl) next; /* controller chain */
62 kern_ctl_ref kctlref;
63
64 /* controller information provided when registering */
65 char name[MAX_KCTL_NAME]; /* unique identifier */
66 u_int32_t id;
67 u_int32_t reg_unit;
68
69 /* misc communication information */
70 u_int32_t flags; /* support flags */
71 u_int32_t recvbufsize; /* request more than the default buffer size */
72 u_int32_t sendbufsize; /* request more than the default buffer size */
73
74 /* Dispatch functions */
75 ctl_setup_func setup; /* Setup contact */
76 ctl_bind_func bind; /* Prepare contact */
77 ctl_connect_func connect; /* Make contact */
78 ctl_disconnect_func disconnect; /* Break contact */
79 ctl_send_func send; /* Send data to nke */
80 ctl_send_list_func send_list; /* Send list of packets */
81 ctl_setopt_func setopt; /* set kctl configuration */
82 ctl_getopt_func getopt; /* get kctl configuration */
83 ctl_rcvd_func rcvd; /* Notify nke when client reads data */
84
85 TAILQ_HEAD(, ctl_cb) kcb_head;
86 u_int32_t lastunit;
87 };
88
89 #if DEVELOPMENT || DEBUG
90 enum ctl_status {
91 KCTL_DISCONNECTED = 0,
92 KCTL_CONNECTING = 1,
93 KCTL_CONNECTED = 2
94 };
95 #endif /* DEVELOPMENT || DEBUG */
96
97 struct ctl_cb {
98 TAILQ_ENTRY(ctl_cb) next; /* controller chain */
99 lck_mtx_t mtx;
100 struct socket *so; /* controlling socket */
101 struct kctl *kctl; /* back pointer to controller */
102 void *userdata;
103 struct sockaddr_ctl sac;
104 u_int32_t usecount;
105 u_int32_t kcb_usecount;
106 u_int32_t require_clearing_count;
107 #if DEVELOPMENT || DEBUG
108 enum ctl_status status;
109 #endif /* DEVELOPMENT || DEBUG */
110 };
111
112 #ifndef ROUNDUP64
113 #define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
114 #endif
115
116 #ifndef ADVANCE64
117 #define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
118 #endif
119
120 /*
121 * Definitions and vars for we support
122 */
123
124 #define CTL_SENDSIZE (2 * 1024) /* default buffer size */
125 #define CTL_RECVSIZE (8 * 1024) /* default buffer size */
126
127 /*
128 * Definitions and vars for we support
129 */
130
131 const u_int32_t ctl_maxunit = 65536;
132 static LCK_ATTR_DECLARE(ctl_lck_attr, 0, 0);
133 static LCK_GRP_DECLARE(ctl_lck_grp, "Kernel Control Protocol");
134 static LCK_MTX_DECLARE_ATTR(ctl_mtx, &ctl_lck_grp, &ctl_lck_attr);
135
136 /* all the controllers are chained */
137 TAILQ_HEAD(kctl_list, kctl) ctl_head = TAILQ_HEAD_INITIALIZER(ctl_head);
138
139 static int ctl_attach(struct socket *, int, struct proc *);
140 static int ctl_detach(struct socket *);
141 static int ctl_sofreelastref(struct socket *so);
142 static int ctl_bind(struct socket *, struct sockaddr *, struct proc *);
143 static int ctl_connect(struct socket *, struct sockaddr *, struct proc *);
144 static int ctl_disconnect(struct socket *);
145 static int ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
146 struct ifnet *ifp, struct proc *p);
147 static int ctl_send(struct socket *, int, struct mbuf *,
148 struct sockaddr *, struct mbuf *, struct proc *);
149 static int ctl_send_list(struct socket *, int, struct mbuf *,
150 struct sockaddr *, struct mbuf *, struct proc *);
151 static int ctl_ctloutput(struct socket *, struct sockopt *);
152 static int ctl_peeraddr(struct socket *so, struct sockaddr **nam);
153 static int ctl_usr_rcvd(struct socket *so, int flags);
154
155 static struct kctl *ctl_find_by_name(const char *);
156 static struct kctl *ctl_find_by_id_unit(u_int32_t id, u_int32_t unit);
157
158 static struct socket *kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit,
159 u_int32_t *);
160 static struct ctl_cb *kcb_find(struct kctl *, u_int32_t unit);
161 static void ctl_post_msg(u_int32_t event_code, u_int32_t id);
162
163 static int ctl_lock(struct socket *, int, void *);
164 static int ctl_unlock(struct socket *, int, void *);
165 static lck_mtx_t * ctl_getlock(struct socket *, int);
166
167 static struct pr_usrreqs ctl_usrreqs = {
168 .pru_attach = ctl_attach,
169 .pru_bind = ctl_bind,
170 .pru_connect = ctl_connect,
171 .pru_control = ctl_ioctl,
172 .pru_detach = ctl_detach,
173 .pru_disconnect = ctl_disconnect,
174 .pru_peeraddr = ctl_peeraddr,
175 .pru_rcvd = ctl_usr_rcvd,
176 .pru_send = ctl_send,
177 .pru_send_list = ctl_send_list,
178 .pru_sosend = sosend,
179 .pru_sosend_list = sosend_list,
180 .pru_soreceive = soreceive,
181 .pru_soreceive_list = soreceive_list,
182 };
183
184 static struct protosw kctlsw[] = {
185 {
186 .pr_type = SOCK_DGRAM,
187 .pr_protocol = SYSPROTO_CONTROL,
188 .pr_flags = PR_ATOMIC | PR_CONNREQUIRED | PR_PCBLOCK | PR_WANTRCVD,
189 .pr_ctloutput = ctl_ctloutput,
190 .pr_usrreqs = &ctl_usrreqs,
191 .pr_lock = ctl_lock,
192 .pr_unlock = ctl_unlock,
193 .pr_getlock = ctl_getlock,
194 },
195 {
196 .pr_type = SOCK_STREAM,
197 .pr_protocol = SYSPROTO_CONTROL,
198 .pr_flags = PR_CONNREQUIRED | PR_PCBLOCK | PR_WANTRCVD,
199 .pr_ctloutput = ctl_ctloutput,
200 .pr_usrreqs = &ctl_usrreqs,
201 .pr_lock = ctl_lock,
202 .pr_unlock = ctl_unlock,
203 .pr_getlock = ctl_getlock,
204 }
205 };
206
207 __private_extern__ int kctl_reg_list SYSCTL_HANDLER_ARGS;
208 __private_extern__ int kctl_pcblist SYSCTL_HANDLER_ARGS;
209 __private_extern__ int kctl_getstat SYSCTL_HANDLER_ARGS;
210
211
212 SYSCTL_NODE(_net_systm, OID_AUTO, kctl,
213 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Kernel control family");
214
215 struct kctlstat kctlstat;
216 SYSCTL_PROC(_net_systm_kctl, OID_AUTO, stats,
217 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
218 kctl_getstat, "S,kctlstat", "");
219
220 SYSCTL_PROC(_net_systm_kctl, OID_AUTO, reg_list,
221 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
222 kctl_reg_list, "S,xkctl_reg", "");
223
224 SYSCTL_PROC(_net_systm_kctl, OID_AUTO, pcblist,
225 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
226 kctl_pcblist, "S,xkctlpcb", "");
227
228 u_int32_t ctl_autorcvbuf_max = 256 * 1024;
229 SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufmax,
230 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_autorcvbuf_max, 0, "");
231
232 u_int32_t ctl_autorcvbuf_high = 0;
233 SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufhigh,
234 CTLFLAG_RD | CTLFLAG_LOCKED, &ctl_autorcvbuf_high, 0, "");
235
236 u_int32_t ctl_debug = 0;
237 SYSCTL_INT(_net_systm_kctl, OID_AUTO, debug,
238 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_debug, 0, "");
239
240 #if DEVELOPMENT || DEBUG
241 u_int32_t ctl_panic_debug = 0;
242 SYSCTL_INT(_net_systm_kctl, OID_AUTO, panicdebug,
243 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_panic_debug, 0, "");
244 #endif /* DEVELOPMENT || DEBUG */
245
246 #define KCTL_TBL_INC 16
247
248 static uintptr_t kctl_tbl_size = 0;
249 static u_int32_t kctl_tbl_growing = 0;
250 static u_int32_t kctl_tbl_growing_waiting = 0;
251 static uintptr_t kctl_tbl_count = 0;
252 static struct kctl **kctl_table = NULL;
253 static uintptr_t kctl_ref_gencnt = 0;
254
255 static void kctl_tbl_grow(void);
256 static kern_ctl_ref kctl_make_ref(struct kctl *kctl);
257 static void kctl_delete_ref(kern_ctl_ref);
258 static struct kctl *kctl_from_ref(kern_ctl_ref);
259
260 /*
261 * Install the protosw's for the Kernel Control manager.
262 */
263 __private_extern__ void
264 kern_control_init(struct domain *dp)
265 {
266 struct protosw *pr;
267 int i;
268 int kctl_proto_count = (sizeof(kctlsw) / sizeof(struct protosw));
269
270 VERIFY(!(dp->dom_flags & DOM_INITIALIZED));
271 VERIFY(dp == systemdomain);
272
273 for (i = 0, pr = &kctlsw[0]; i < kctl_proto_count; i++, pr++) {
274 net_add_proto(pr, dp, 1);
275 }
276 }
277
278 static void
279 kcb_delete(struct ctl_cb *kcb)
280 {
281 if (kcb != 0) {
282 lck_mtx_destroy(&kcb->mtx, &ctl_lck_grp);
283 kheap_free(KHEAP_DEFAULT, kcb, sizeof(struct ctl_cb));
284 }
285 }
286
287 /*
288 * Kernel Controller user-request functions
289 * attach function must exist and succeed
290 * detach not necessary
291 * we need a pcb for the per socket mutex
292 */
293 static int
294 ctl_attach(struct socket *so, int proto, struct proc *p)
295 {
296 #pragma unused(proto, p)
297 int error = 0;
298 struct ctl_cb *kcb = 0;
299
300 kcb = kheap_alloc(KHEAP_DEFAULT, sizeof(struct ctl_cb), Z_WAITOK | Z_ZERO);
301 if (kcb == NULL) {
302 error = ENOMEM;
303 goto quit;
304 }
305
306 lck_mtx_init(&kcb->mtx, &ctl_lck_grp, &ctl_lck_attr);
307 kcb->so = so;
308 so->so_pcb = (caddr_t)kcb;
309
310 quit:
311 if (error != 0) {
312 kcb_delete(kcb);
313 kcb = 0;
314 }
315 return error;
316 }
317
318 static int
319 ctl_sofreelastref(struct socket *so)
320 {
321 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
322
323 so->so_pcb = 0;
324
325 if (kcb != 0) {
326 struct kctl *kctl;
327 if ((kctl = kcb->kctl) != 0) {
328 lck_mtx_lock(&ctl_mtx);
329 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
330 kctlstat.kcs_pcbcount--;
331 kctlstat.kcs_gencnt++;
332 lck_mtx_unlock(&ctl_mtx);
333 }
334 kcb_delete(kcb);
335 }
336 sofreelastref(so, 1);
337 return 0;
338 }
339
340 /*
341 * Use this function and ctl_kcb_require_clearing to serialize
342 * critical calls into the kctl subsystem
343 */
344 static void
345 ctl_kcb_increment_use_count(struct ctl_cb *kcb, lck_mtx_t *mutex_held)
346 {
347 LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
348 while (kcb->require_clearing_count > 0) {
349 msleep(&kcb->require_clearing_count, mutex_held, PSOCK | PCATCH, "kcb_require_clearing", NULL);
350 }
351 kcb->kcb_usecount++;
352 }
353
354 static void
355 ctl_kcb_require_clearing(struct ctl_cb *kcb, lck_mtx_t *mutex_held)
356 {
357 assert(kcb->kcb_usecount != 0);
358 kcb->require_clearing_count++;
359 kcb->kcb_usecount--;
360 while (kcb->kcb_usecount > 0) { // we need to wait until no one else is running
361 msleep(&kcb->kcb_usecount, mutex_held, PSOCK | PCATCH, "kcb_usecount", NULL);
362 }
363 kcb->kcb_usecount++;
364 }
365
366 static void
367 ctl_kcb_done_clearing(struct ctl_cb *kcb)
368 {
369 assert(kcb->require_clearing_count != 0);
370 kcb->require_clearing_count--;
371 wakeup((caddr_t)&kcb->require_clearing_count);
372 }
373
374 static void
375 ctl_kcb_decrement_use_count(struct ctl_cb *kcb)
376 {
377 assert(kcb->kcb_usecount != 0);
378 kcb->kcb_usecount--;
379 wakeup((caddr_t)&kcb->kcb_usecount);
380 }
381
382 static int
383 ctl_detach(struct socket *so)
384 {
385 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
386
387 if (kcb == 0) {
388 return 0;
389 }
390
391 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
392 ctl_kcb_increment_use_count(kcb, mtx_held);
393 ctl_kcb_require_clearing(kcb, mtx_held);
394
395 if (kcb->kctl != NULL && kcb->kctl->bind != NULL &&
396 kcb->userdata != NULL && !(so->so_state & SS_ISCONNECTED)) {
397 // The unit was bound, but not connected
398 // Invoke the disconnected call to cleanup
399 if (kcb->kctl->disconnect != NULL) {
400 socket_unlock(so, 0);
401 (*kcb->kctl->disconnect)(kcb->kctl->kctlref,
402 kcb->sac.sc_unit, kcb->userdata);
403 socket_lock(so, 0);
404 }
405 }
406
407 soisdisconnected(so);
408 #if DEVELOPMENT || DEBUG
409 kcb->status = KCTL_DISCONNECTED;
410 #endif /* DEVELOPMENT || DEBUG */
411 so->so_flags |= SOF_PCBCLEARING;
412 ctl_kcb_done_clearing(kcb);
413 ctl_kcb_decrement_use_count(kcb);
414 return 0;
415 }
416
417 static int
418 ctl_setup_kctl(struct socket *so, struct sockaddr *nam, struct proc *p)
419 {
420 struct kctl *kctl = NULL;
421 int error = 0;
422 struct sockaddr_ctl sa;
423 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
424 struct ctl_cb *kcb_next = NULL;
425 u_quad_t sbmaxsize;
426 u_int32_t recvbufsize, sendbufsize;
427
428 if (kcb == 0) {
429 panic("ctl_setup_kctl so_pcb null\n");
430 }
431
432 if (kcb->kctl != NULL) {
433 // Already set up, skip
434 return 0;
435 }
436
437 if (nam->sa_len != sizeof(struct sockaddr_ctl)) {
438 return EINVAL;
439 }
440
441 bcopy(nam, &sa, sizeof(struct sockaddr_ctl));
442
443 lck_mtx_lock(&ctl_mtx);
444 kctl = ctl_find_by_id_unit(sa.sc_id, sa.sc_unit);
445 if (kctl == NULL) {
446 lck_mtx_unlock(&ctl_mtx);
447 return ENOENT;
448 }
449
450 if (((kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
451 (so->so_type != SOCK_STREAM)) ||
452 (!(kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
453 (so->so_type != SOCK_DGRAM))) {
454 lck_mtx_unlock(&ctl_mtx);
455 return EPROTOTYPE;
456 }
457
458 if (kctl->flags & CTL_FLAG_PRIVILEGED) {
459 if (p == 0) {
460 lck_mtx_unlock(&ctl_mtx);
461 return EINVAL;
462 }
463 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
464 lck_mtx_unlock(&ctl_mtx);
465 return EPERM;
466 }
467 }
468
469 if ((kctl->flags & CTL_FLAG_REG_ID_UNIT) || sa.sc_unit != 0) {
470 if (kcb_find(kctl, sa.sc_unit) != NULL) {
471 lck_mtx_unlock(&ctl_mtx);
472 return EBUSY;
473 }
474 } else if (kctl->setup != NULL) {
475 error = (*kctl->setup)(&sa.sc_unit, &kcb->userdata);
476 if (error != 0) {
477 lck_mtx_unlock(&ctl_mtx);
478 return error;
479 }
480 } else {
481 /* Find an unused ID, assumes control IDs are in order */
482 u_int32_t unit = 1;
483
484 TAILQ_FOREACH(kcb_next, &kctl->kcb_head, next) {
485 if (kcb_next->sac.sc_unit > unit) {
486 /* Found a gap, lets fill it in */
487 break;
488 }
489 unit = kcb_next->sac.sc_unit + 1;
490 if (unit == ctl_maxunit) {
491 break;
492 }
493 }
494
495 if (unit == ctl_maxunit) {
496 lck_mtx_unlock(&ctl_mtx);
497 return EBUSY;
498 }
499
500 sa.sc_unit = unit;
501 }
502
503 bcopy(&sa, &kcb->sac, sizeof(struct sockaddr_ctl));
504 kcb->kctl = kctl;
505 if (kcb_next != NULL) {
506 TAILQ_INSERT_BEFORE(kcb_next, kcb, next);
507 } else {
508 TAILQ_INSERT_TAIL(&kctl->kcb_head, kcb, next);
509 }
510 kctlstat.kcs_pcbcount++;
511 kctlstat.kcs_gencnt++;
512 kctlstat.kcs_connections++;
513 lck_mtx_unlock(&ctl_mtx);
514
515 /*
516 * rdar://15526688: Limit the send and receive sizes to sb_max
517 * by using the same scaling as sbreserve()
518 */
519 sbmaxsize = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES);
520
521 if (kctl->sendbufsize > sbmaxsize) {
522 sendbufsize = (u_int32_t)sbmaxsize;
523 } else {
524 sendbufsize = kctl->sendbufsize;
525 }
526
527 if (kctl->recvbufsize > sbmaxsize) {
528 recvbufsize = (u_int32_t)sbmaxsize;
529 } else {
530 recvbufsize = kctl->recvbufsize;
531 }
532
533 error = soreserve(so, sendbufsize, recvbufsize);
534 if (error) {
535 if (ctl_debug) {
536 printf("%s - soreserve(%llx, %u, %u) error %d\n",
537 __func__, (uint64_t)VM_KERNEL_ADDRPERM(so),
538 sendbufsize, recvbufsize, error);
539 }
540 goto done;
541 }
542
543 done:
544 if (error) {
545 soisdisconnected(so);
546 #if DEVELOPMENT || DEBUG
547 kcb->status = KCTL_DISCONNECTED;
548 #endif /* DEVELOPMENT || DEBUG */
549 lck_mtx_lock(&ctl_mtx);
550 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
551 kcb->kctl = NULL;
552 kcb->sac.sc_unit = 0;
553 kctlstat.kcs_pcbcount--;
554 kctlstat.kcs_gencnt++;
555 kctlstat.kcs_conn_fail++;
556 lck_mtx_unlock(&ctl_mtx);
557 }
558 return error;
559 }
560
561 static int
562 ctl_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
563 {
564 int error = 0;
565 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
566
567 if (kcb == NULL) {
568 panic("ctl_bind so_pcb null\n");
569 }
570
571 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
572 ctl_kcb_increment_use_count(kcb, mtx_held);
573 ctl_kcb_require_clearing(kcb, mtx_held);
574
575 error = ctl_setup_kctl(so, nam, p);
576 if (error) {
577 goto out;
578 }
579
580 if (kcb->kctl == NULL) {
581 panic("ctl_bind kctl null\n");
582 }
583
584 if (kcb->kctl->bind == NULL) {
585 error = EINVAL;
586 goto out;
587 }
588
589 socket_unlock(so, 0);
590 error = (*kcb->kctl->bind)(kcb->kctl->kctlref, &kcb->sac, &kcb->userdata);
591 socket_lock(so, 0);
592
593 out:
594 ctl_kcb_done_clearing(kcb);
595 ctl_kcb_decrement_use_count(kcb);
596 return error;
597 }
598
599 static int
600 ctl_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
601 {
602 int error = 0;
603 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
604
605 if (kcb == NULL) {
606 panic("ctl_connect so_pcb null\n");
607 }
608
609 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
610 ctl_kcb_increment_use_count(kcb, mtx_held);
611 ctl_kcb_require_clearing(kcb, mtx_held);
612
613 #if DEVELOPMENT || DEBUG
614 if (kcb->status != KCTL_DISCONNECTED && ctl_panic_debug) {
615 panic("kctl already connecting/connected");
616 }
617 kcb->status = KCTL_CONNECTING;
618 #endif /* DEVELOPMENT || DEBUG */
619
620 error = ctl_setup_kctl(so, nam, p);
621 if (error) {
622 goto out;
623 }
624
625 if (kcb->kctl == NULL) {
626 panic("ctl_connect kctl null\n");
627 }
628
629 soisconnecting(so);
630 socket_unlock(so, 0);
631 error = (*kcb->kctl->connect)(kcb->kctl->kctlref, &kcb->sac, &kcb->userdata);
632 socket_lock(so, 0);
633 if (error) {
634 goto end;
635 }
636 soisconnected(so);
637 #if DEVELOPMENT || DEBUG
638 kcb->status = KCTL_CONNECTED;
639 #endif /* DEVELOPMENT || DEBUG */
640
641 end:
642 if (error && kcb->kctl->disconnect) {
643 /*
644 * XXX Make sure we Don't check the return value
645 * of disconnect here.
646 * ipsec/utun_ctl_disconnect will return error when
647 * disconnect gets called after connect failure.
648 * However if we decide to check for disconnect return
649 * value here. Please make sure to revisit
650 * ipsec/utun_ctl_disconnect.
651 */
652 socket_unlock(so, 0);
653 (*kcb->kctl->disconnect)(kcb->kctl->kctlref, kcb->sac.sc_unit, kcb->userdata);
654 socket_lock(so, 0);
655 }
656 if (error) {
657 soisdisconnected(so);
658 #if DEVELOPMENT || DEBUG
659 kcb->status = KCTL_DISCONNECTED;
660 #endif /* DEVELOPMENT || DEBUG */
661 lck_mtx_lock(&ctl_mtx);
662 TAILQ_REMOVE(&kcb->kctl->kcb_head, kcb, next);
663 kcb->kctl = NULL;
664 kcb->sac.sc_unit = 0;
665 kctlstat.kcs_pcbcount--;
666 kctlstat.kcs_gencnt++;
667 kctlstat.kcs_conn_fail++;
668 lck_mtx_unlock(&ctl_mtx);
669 }
670 out:
671 ctl_kcb_done_clearing(kcb);
672 ctl_kcb_decrement_use_count(kcb);
673 return error;
674 }
675
676 static int
677 ctl_disconnect(struct socket *so)
678 {
679 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
680
681 if ((kcb = (struct ctl_cb *)so->so_pcb)) {
682 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
683 ctl_kcb_increment_use_count(kcb, mtx_held);
684 ctl_kcb_require_clearing(kcb, mtx_held);
685 struct kctl *kctl = kcb->kctl;
686
687 if (kctl && kctl->disconnect) {
688 socket_unlock(so, 0);
689 (*kctl->disconnect)(kctl->kctlref, kcb->sac.sc_unit,
690 kcb->userdata);
691 socket_lock(so, 0);
692 }
693
694 soisdisconnected(so);
695 #if DEVELOPMENT || DEBUG
696 kcb->status = KCTL_DISCONNECTED;
697 #endif /* DEVELOPMENT || DEBUG */
698
699 socket_unlock(so, 0);
700 lck_mtx_lock(&ctl_mtx);
701 kcb->kctl = 0;
702 kcb->sac.sc_unit = 0;
703 while (kcb->usecount != 0) {
704 msleep(&kcb->usecount, &ctl_mtx, 0, "kcb->usecount", 0);
705 }
706 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
707 kctlstat.kcs_pcbcount--;
708 kctlstat.kcs_gencnt++;
709 lck_mtx_unlock(&ctl_mtx);
710 socket_lock(so, 0);
711 ctl_kcb_done_clearing(kcb);
712 ctl_kcb_decrement_use_count(kcb);
713 }
714 return 0;
715 }
716
717 static int
718 ctl_peeraddr(struct socket *so, struct sockaddr **nam)
719 {
720 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
721 struct kctl *kctl;
722 struct sockaddr_ctl sc;
723
724 if (kcb == NULL) { /* sanity check */
725 return ENOTCONN;
726 }
727
728 if ((kctl = kcb->kctl) == NULL) {
729 return EINVAL;
730 }
731
732 bzero(&sc, sizeof(struct sockaddr_ctl));
733 sc.sc_len = sizeof(struct sockaddr_ctl);
734 sc.sc_family = AF_SYSTEM;
735 sc.ss_sysaddr = AF_SYS_CONTROL;
736 sc.sc_id = kctl->id;
737 sc.sc_unit = kcb->sac.sc_unit;
738
739 *nam = dup_sockaddr((struct sockaddr *)&sc, 1);
740
741 return 0;
742 }
743
744 static void
745 ctl_sbrcv_trim(struct socket *so)
746 {
747 struct sockbuf *sb = &so->so_rcv;
748
749 if (sb->sb_hiwat > sb->sb_idealsize) {
750 u_int32_t diff;
751 int32_t trim;
752
753 /*
754 * The difference between the ideal size and the
755 * current size is the upper bound of the trimage
756 */
757 diff = sb->sb_hiwat - sb->sb_idealsize;
758 /*
759 * We cannot trim below the outstanding data
760 */
761 trim = sb->sb_hiwat - sb->sb_cc;
762
763 trim = imin(trim, (int32_t)diff);
764
765 if (trim > 0) {
766 sbreserve(sb, (sb->sb_hiwat - trim));
767
768 if (ctl_debug) {
769 printf("%s - shrunk to %d\n",
770 __func__, sb->sb_hiwat);
771 }
772 }
773 }
774 }
775
776 static int
777 ctl_usr_rcvd(struct socket *so, int flags)
778 {
779 int error = 0;
780 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
781 struct kctl *kctl;
782
783 if (kcb == NULL) {
784 return ENOTCONN;
785 }
786
787 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
788 ctl_kcb_increment_use_count(kcb, mtx_held);
789
790 if ((kctl = kcb->kctl) == NULL) {
791 error = EINVAL;
792 goto out;
793 }
794
795 if (kctl->rcvd) {
796 socket_unlock(so, 0);
797 (*kctl->rcvd)(kctl->kctlref, kcb->sac.sc_unit, kcb->userdata, flags);
798 socket_lock(so, 0);
799 }
800
801 ctl_sbrcv_trim(so);
802
803 out:
804 ctl_kcb_decrement_use_count(kcb);
805 return error;
806 }
807
808 static int
809 ctl_send(struct socket *so, int flags, struct mbuf *m,
810 struct sockaddr *addr, struct mbuf *control,
811 struct proc *p)
812 {
813 #pragma unused(addr, p)
814 int error = 0;
815 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
816 struct kctl *kctl;
817
818 if (control) {
819 m_freem(control);
820 }
821
822 if (kcb == NULL) { /* sanity check */
823 error = ENOTCONN;
824 }
825
826 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
827 ctl_kcb_increment_use_count(kcb, mtx_held);
828
829 if (error == 0 && (kctl = kcb->kctl) == NULL) {
830 error = EINVAL;
831 }
832
833 if (error == 0 && kctl->send) {
834 so_tc_update_stats(m, so, m_get_service_class(m));
835 socket_unlock(so, 0);
836 error = (*kctl->send)(kctl->kctlref, kcb->sac.sc_unit, kcb->userdata,
837 m, flags);
838 socket_lock(so, 0);
839 } else {
840 m_freem(m);
841 if (error == 0) {
842 error = ENOTSUP;
843 }
844 }
845 if (error != 0) {
846 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_fail);
847 }
848 ctl_kcb_decrement_use_count(kcb);
849
850 return error;
851 }
852
853 static int
854 ctl_send_list(struct socket *so, int flags, struct mbuf *m,
855 __unused struct sockaddr *addr, struct mbuf *control,
856 __unused struct proc *p)
857 {
858 int error = 0;
859 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
860 struct kctl *kctl;
861
862 if (control) {
863 m_freem_list(control);
864 }
865
866 if (kcb == NULL) { /* sanity check */
867 error = ENOTCONN;
868 }
869
870 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
871 ctl_kcb_increment_use_count(kcb, mtx_held);
872
873 if (error == 0 && (kctl = kcb->kctl) == NULL) {
874 error = EINVAL;
875 }
876
877 if (error == 0 && kctl->send_list) {
878 struct mbuf *nxt;
879
880 for (nxt = m; nxt != NULL; nxt = nxt->m_nextpkt) {
881 so_tc_update_stats(nxt, so, m_get_service_class(nxt));
882 }
883
884 socket_unlock(so, 0);
885 error = (*kctl->send_list)(kctl->kctlref, kcb->sac.sc_unit,
886 kcb->userdata, m, flags);
887 socket_lock(so, 0);
888 } else if (error == 0 && kctl->send) {
889 while (m != NULL && error == 0) {
890 struct mbuf *nextpkt = m->m_nextpkt;
891
892 m->m_nextpkt = NULL;
893 so_tc_update_stats(m, so, m_get_service_class(m));
894 socket_unlock(so, 0);
895 error = (*kctl->send)(kctl->kctlref, kcb->sac.sc_unit,
896 kcb->userdata, m, flags);
897 socket_lock(so, 0);
898 m = nextpkt;
899 }
900 if (m != NULL) {
901 m_freem_list(m);
902 }
903 } else {
904 m_freem_list(m);
905 if (error == 0) {
906 error = ENOTSUP;
907 }
908 }
909 if (error != 0) {
910 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_list_fail);
911 }
912 ctl_kcb_decrement_use_count(kcb);
913
914 return error;
915 }
916
917 static errno_t
918 ctl_rcvbspace(struct socket *so, size_t datasize,
919 u_int32_t kctlflags, u_int32_t flags)
920 {
921 struct sockbuf *sb = &so->so_rcv;
922 u_int32_t space = sbspace(sb);
923 errno_t error;
924
925 if ((kctlflags & CTL_FLAG_REG_CRIT) == 0) {
926 if ((u_int32_t) space >= datasize) {
927 error = 0;
928 } else {
929 error = ENOBUFS;
930 }
931 } else if ((flags & CTL_DATA_CRIT) == 0) {
932 /*
933 * Reserve 25% for critical messages
934 */
935 if (space < (sb->sb_hiwat >> 2) ||
936 space < datasize) {
937 error = ENOBUFS;
938 } else {
939 error = 0;
940 }
941 } else {
942 size_t autorcvbuf_max;
943
944 /*
945 * Allow overcommit of 25%
946 */
947 autorcvbuf_max = min(sb->sb_idealsize + (sb->sb_idealsize >> 2),
948 ctl_autorcvbuf_max);
949
950 if ((u_int32_t) space >= datasize) {
951 error = 0;
952 } else if (tcp_cansbgrow(sb) &&
953 sb->sb_hiwat < autorcvbuf_max) {
954 /*
955 * Grow with a little bit of leeway
956 */
957 size_t grow = datasize - space + MSIZE;
958 u_int32_t cc = (u_int32_t)MIN(MIN((sb->sb_hiwat + grow), autorcvbuf_max), UINT32_MAX);
959
960 if (sbreserve(sb, cc) == 1) {
961 if (sb->sb_hiwat > ctl_autorcvbuf_high) {
962 ctl_autorcvbuf_high = sb->sb_hiwat;
963 }
964
965 /*
966 * A final check
967 */
968 if ((u_int32_t) sbspace(sb) >= datasize) {
969 error = 0;
970 } else {
971 error = ENOBUFS;
972 }
973
974 if (ctl_debug) {
975 printf("%s - grown to %d error %d\n",
976 __func__, sb->sb_hiwat, error);
977 }
978 } else {
979 error = ENOBUFS;
980 }
981 } else {
982 error = ENOBUFS;
983 }
984 }
985 return error;
986 }
987
988 errno_t
989 ctl_enqueuembuf(kern_ctl_ref kctlref, u_int32_t unit, struct mbuf *m,
990 u_int32_t flags)
991 {
992 struct socket *so;
993 errno_t error = 0;
994 int len = m->m_pkthdr.len;
995 u_int32_t kctlflags;
996
997 so = kcb_find_socket(kctlref, unit, &kctlflags);
998 if (so == NULL) {
999 return EINVAL;
1000 }
1001
1002 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
1003 error = ENOBUFS;
1004 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1005 goto bye;
1006 }
1007 if ((flags & CTL_DATA_EOR)) {
1008 m->m_flags |= M_EOR;
1009 }
1010
1011 so_recv_data_stat(so, m, 0);
1012 if (sbappend_nodrop(&so->so_rcv, m) != 0) {
1013 if ((flags & CTL_DATA_NOWAKEUP) == 0) {
1014 sorwakeup(so);
1015 }
1016 } else {
1017 error = ENOBUFS;
1018 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1019 }
1020 bye:
1021 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
1022 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1023 __func__, error, len,
1024 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1025 }
1026
1027 socket_unlock(so, 1);
1028 if (error != 0) {
1029 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
1030 }
1031
1032 return error;
1033 }
1034
1035 /*
1036 * Compute space occupied by mbuf like sbappendrecord
1037 */
1038 static int
1039 m_space(struct mbuf *m)
1040 {
1041 int space = 0;
1042 struct mbuf *nxt;
1043
1044 for (nxt = m; nxt != NULL; nxt = nxt->m_next) {
1045 space += nxt->m_len;
1046 }
1047
1048 return space;
1049 }
1050
1051 errno_t
1052 ctl_enqueuembuf_list(void *kctlref, u_int32_t unit, struct mbuf *m_list,
1053 u_int32_t flags, struct mbuf **m_remain)
1054 {
1055 struct socket *so = NULL;
1056 errno_t error = 0;
1057 struct mbuf *m, *nextpkt;
1058 int needwakeup = 0;
1059 int len = 0;
1060 u_int32_t kctlflags;
1061
1062 /*
1063 * Need to point the beginning of the list in case of early exit
1064 */
1065 m = m_list;
1066
1067 /*
1068 * kcb_find_socket takes the socket lock with a reference
1069 */
1070 so = kcb_find_socket(kctlref, unit, &kctlflags);
1071 if (so == NULL) {
1072 error = EINVAL;
1073 goto done;
1074 }
1075
1076 if (kctlflags & CTL_FLAG_REG_SOCK_STREAM) {
1077 error = EOPNOTSUPP;
1078 goto done;
1079 }
1080 if (flags & CTL_DATA_EOR) {
1081 error = EINVAL;
1082 goto done;
1083 }
1084
1085 for (m = m_list; m != NULL; m = nextpkt) {
1086 nextpkt = m->m_nextpkt;
1087
1088 if (m->m_pkthdr.len == 0 && ctl_debug) {
1089 printf("%s: %llx m_pkthdr.len is 0",
1090 __func__, (uint64_t)VM_KERNEL_ADDRPERM(m));
1091 }
1092
1093 /*
1094 * The mbuf is either appended or freed by sbappendrecord()
1095 * so it's not reliable from a data standpoint
1096 */
1097 len = m_space(m);
1098 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
1099 error = ENOBUFS;
1100 OSIncrementAtomic64(
1101 (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1102 break;
1103 } else {
1104 /*
1105 * Unlink from the list, m is on its own
1106 */
1107 m->m_nextpkt = NULL;
1108 so_recv_data_stat(so, m, 0);
1109 if (sbappendrecord_nodrop(&so->so_rcv, m) != 0) {
1110 needwakeup = 1;
1111 } else {
1112 /*
1113 * We free or return the remaining
1114 * mbufs in the list
1115 */
1116 m = nextpkt;
1117 error = ENOBUFS;
1118 OSIncrementAtomic64(
1119 (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1120 break;
1121 }
1122 }
1123 }
1124 if (needwakeup && (flags & CTL_DATA_NOWAKEUP) == 0) {
1125 sorwakeup(so);
1126 }
1127
1128 done:
1129 if (so != NULL) {
1130 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
1131 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1132 __func__, error, len,
1133 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1134 }
1135
1136 socket_unlock(so, 1);
1137 }
1138 if (m_remain) {
1139 *m_remain = m;
1140
1141 if (m != NULL && socket_debug && so != NULL &&
1142 (so->so_options & SO_DEBUG)) {
1143 struct mbuf *n;
1144
1145 printf("%s m_list %llx\n", __func__,
1146 (uint64_t) VM_KERNEL_ADDRPERM(m_list));
1147 for (n = m; n != NULL; n = n->m_nextpkt) {
1148 printf(" remain %llx m_next %llx\n",
1149 (uint64_t) VM_KERNEL_ADDRPERM(n),
1150 (uint64_t) VM_KERNEL_ADDRPERM(n->m_next));
1151 }
1152 }
1153 } else {
1154 if (m != NULL) {
1155 m_freem_list(m);
1156 }
1157 }
1158 if (error != 0) {
1159 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
1160 }
1161 return error;
1162 }
1163
1164 errno_t
1165 ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len,
1166 u_int32_t flags)
1167 {
1168 struct socket *so;
1169 struct mbuf *m;
1170 errno_t error = 0;
1171 unsigned int num_needed;
1172 struct mbuf *n;
1173 size_t curlen = 0;
1174 u_int32_t kctlflags;
1175
1176 so = kcb_find_socket(kctlref, unit, &kctlflags);
1177 if (so == NULL) {
1178 return EINVAL;
1179 }
1180
1181 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
1182 error = ENOBUFS;
1183 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1184 goto bye;
1185 }
1186
1187 num_needed = 1;
1188 m = m_allocpacket_internal(&num_needed, len, NULL, M_NOWAIT, 1, 0);
1189 if (m == NULL) {
1190 kctlstat.kcs_enqdata_mb_alloc_fail++;
1191 if (ctl_debug) {
1192 printf("%s: m_allocpacket_internal(%lu) failed\n",
1193 __func__, len);
1194 }
1195 error = ENOMEM;
1196 goto bye;
1197 }
1198
1199 for (n = m; n != NULL; n = n->m_next) {
1200 size_t mlen = mbuf_maxlen(n);
1201
1202 if (mlen + curlen > len) {
1203 mlen = len - curlen;
1204 }
1205 n->m_len = (int32_t)mlen;
1206 bcopy((char *)data + curlen, n->m_data, mlen);
1207 curlen += mlen;
1208 }
1209 mbuf_pkthdr_setlen(m, curlen);
1210
1211 if ((flags & CTL_DATA_EOR)) {
1212 m->m_flags |= M_EOR;
1213 }
1214 so_recv_data_stat(so, m, 0);
1215 /*
1216 * No need to call the "nodrop" variant of sbappend
1217 * because the mbuf is local to the scope of the function
1218 */
1219 if (sbappend(&so->so_rcv, m) != 0) {
1220 if ((flags & CTL_DATA_NOWAKEUP) == 0) {
1221 sorwakeup(so);
1222 }
1223 } else {
1224 kctlstat.kcs_enqdata_sbappend_fail++;
1225 error = ENOBUFS;
1226 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1227 }
1228
1229 bye:
1230 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
1231 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1232 __func__, error, (int)len,
1233 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1234 }
1235
1236 socket_unlock(so, 1);
1237 if (error != 0) {
1238 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
1239 }
1240 return error;
1241 }
1242
1243 errno_t
1244 ctl_getenqueuepacketcount(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *pcnt)
1245 {
1246 struct socket *so;
1247 u_int32_t cnt;
1248 struct mbuf *m1;
1249
1250 if (pcnt == NULL) {
1251 return EINVAL;
1252 }
1253
1254 so = kcb_find_socket(kctlref, unit, NULL);
1255 if (so == NULL) {
1256 return EINVAL;
1257 }
1258
1259 cnt = 0;
1260 m1 = so->so_rcv.sb_mb;
1261 while (m1 != NULL) {
1262 if (m1->m_type == MT_DATA ||
1263 m1->m_type == MT_HEADER ||
1264 m1->m_type == MT_OOBDATA) {
1265 cnt += 1;
1266 }
1267 m1 = m1->m_nextpkt;
1268 }
1269 *pcnt = cnt;
1270
1271 socket_unlock(so, 1);
1272
1273 return 0;
1274 }
1275
1276 errno_t
1277 ctl_getenqueuespace(kern_ctl_ref kctlref, u_int32_t unit, size_t *space)
1278 {
1279 struct socket *so;
1280 long avail;
1281
1282 if (space == NULL) {
1283 return EINVAL;
1284 }
1285
1286 so = kcb_find_socket(kctlref, unit, NULL);
1287 if (so == NULL) {
1288 return EINVAL;
1289 }
1290
1291 avail = sbspace(&so->so_rcv);
1292 *space = (avail < 0) ? 0 : avail;
1293 socket_unlock(so, 1);
1294
1295 return 0;
1296 }
1297
1298 errno_t
1299 ctl_getenqueuereadable(kern_ctl_ref kctlref, u_int32_t unit,
1300 u_int32_t *difference)
1301 {
1302 struct socket *so;
1303
1304 if (difference == NULL) {
1305 return EINVAL;
1306 }
1307
1308 so = kcb_find_socket(kctlref, unit, NULL);
1309 if (so == NULL) {
1310 return EINVAL;
1311 }
1312
1313 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat) {
1314 *difference = 0;
1315 } else {
1316 *difference = (so->so_rcv.sb_lowat - so->so_rcv.sb_cc);
1317 }
1318 socket_unlock(so, 1);
1319
1320 return 0;
1321 }
1322
1323 static int
1324 ctl_ctloutput(struct socket *so, struct sockopt *sopt)
1325 {
1326 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
1327 struct kctl *kctl;
1328 int error = 0;
1329 void *data = NULL;
1330 size_t data_len = 0;
1331 size_t len;
1332
1333 if (sopt->sopt_level != SYSPROTO_CONTROL) {
1334 return EINVAL;
1335 }
1336
1337 if (kcb == NULL) { /* sanity check */
1338 return ENOTCONN;
1339 }
1340
1341 if ((kctl = kcb->kctl) == NULL) {
1342 return EINVAL;
1343 }
1344
1345 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
1346 ctl_kcb_increment_use_count(kcb, mtx_held);
1347
1348 switch (sopt->sopt_dir) {
1349 case SOPT_SET:
1350 if (kctl->setopt == NULL) {
1351 error = ENOTSUP;
1352 goto out;
1353 }
1354 if (sopt->sopt_valsize != 0) {
1355 data_len = sopt->sopt_valsize;
1356 data = kheap_alloc(KHEAP_TEMP, data_len, Z_WAITOK | Z_ZERO);
1357 if (data == NULL) {
1358 data_len = 0;
1359 error = ENOMEM;
1360 goto out;
1361 }
1362 error = sooptcopyin(sopt, data,
1363 sopt->sopt_valsize, sopt->sopt_valsize);
1364 }
1365 if (error == 0) {
1366 socket_unlock(so, 0);
1367 error = (*kctl->setopt)(kctl->kctlref,
1368 kcb->sac.sc_unit, kcb->userdata, sopt->sopt_name,
1369 data, sopt->sopt_valsize);
1370 socket_lock(so, 0);
1371 }
1372
1373 kheap_free(KHEAP_TEMP, data, data_len);
1374 break;
1375
1376 case SOPT_GET:
1377 if (kctl->getopt == NULL) {
1378 error = ENOTSUP;
1379 goto out;
1380 }
1381
1382 if (sopt->sopt_valsize && sopt->sopt_val) {
1383 data_len = sopt->sopt_valsize;
1384 data = kheap_alloc(KHEAP_TEMP, data_len, Z_WAITOK | Z_ZERO);
1385 if (data == NULL) {
1386 data_len = 0;
1387 error = ENOMEM;
1388 goto out;
1389 }
1390 /*
1391 * 4108337 - copy user data in case the
1392 * kernel control needs it
1393 */
1394 error = sooptcopyin(sopt, data,
1395 sopt->sopt_valsize, sopt->sopt_valsize);
1396 }
1397
1398 if (error == 0) {
1399 len = sopt->sopt_valsize;
1400 socket_unlock(so, 0);
1401 error = (*kctl->getopt)(kctl->kctlref, kcb->sac.sc_unit,
1402 kcb->userdata, sopt->sopt_name,
1403 data, &len);
1404 if (data != NULL && len > sopt->sopt_valsize) {
1405 panic_plain("ctl_ctloutput: ctl %s returned "
1406 "len (%lu) > sopt_valsize (%lu)\n",
1407 kcb->kctl->name, len,
1408 sopt->sopt_valsize);
1409 }
1410 socket_lock(so, 0);
1411 if (error == 0) {
1412 if (data != NULL) {
1413 error = sooptcopyout(sopt, data, len);
1414 } else {
1415 sopt->sopt_valsize = len;
1416 }
1417 }
1418 }
1419
1420 kheap_free(KHEAP_TEMP, data, data_len);
1421 break;
1422 }
1423
1424 out:
1425 ctl_kcb_decrement_use_count(kcb);
1426 return error;
1427 }
1428
1429 static int
1430 ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
1431 struct ifnet *ifp, struct proc *p)
1432 {
1433 #pragma unused(so, ifp, p)
1434 int error = ENOTSUP;
1435
1436 switch (cmd) {
1437 /* get the number of controllers */
1438 case CTLIOCGCOUNT: {
1439 struct kctl *kctl;
1440 u_int32_t n = 0;
1441
1442 lck_mtx_lock(&ctl_mtx);
1443 TAILQ_FOREACH(kctl, &ctl_head, next)
1444 n++;
1445 lck_mtx_unlock(&ctl_mtx);
1446
1447 bcopy(&n, data, sizeof(n));
1448 error = 0;
1449 break;
1450 }
1451 case CTLIOCGINFO: {
1452 struct ctl_info ctl_info;
1453 struct kctl *kctl = 0;
1454 size_t name_len;
1455
1456 bcopy(data, &ctl_info, sizeof(ctl_info));
1457 name_len = strnlen(ctl_info.ctl_name, MAX_KCTL_NAME);
1458
1459 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
1460 error = EINVAL;
1461 break;
1462 }
1463 lck_mtx_lock(&ctl_mtx);
1464 kctl = ctl_find_by_name(ctl_info.ctl_name);
1465 lck_mtx_unlock(&ctl_mtx);
1466 if (kctl == 0) {
1467 error = ENOENT;
1468 break;
1469 }
1470 ctl_info.ctl_id = kctl->id;
1471 bcopy(&ctl_info, data, sizeof(ctl_info));
1472 error = 0;
1473 break;
1474 }
1475
1476 /* add controls to get list of NKEs */
1477 }
1478
1479 return error;
1480 }
1481
1482 static void
1483 kctl_tbl_grow(void)
1484 {
1485 struct kctl **new_table;
1486 uintptr_t new_size;
1487
1488 lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
1489
1490 if (kctl_tbl_growing) {
1491 /* Another thread is allocating */
1492 kctl_tbl_growing_waiting++;
1493
1494 do {
1495 (void) msleep((caddr_t) &kctl_tbl_growing, &ctl_mtx,
1496 PSOCK | PCATCH, "kctl_tbl_growing", 0);
1497 } while (kctl_tbl_growing);
1498 kctl_tbl_growing_waiting--;
1499 }
1500 /* Another thread grew the table */
1501 if (kctl_table != NULL && kctl_tbl_count < kctl_tbl_size) {
1502 return;
1503 }
1504
1505 /* Verify we have a sane size */
1506 if (kctl_tbl_size + KCTL_TBL_INC >= UINT16_MAX) {
1507 kctlstat.kcs_tbl_size_too_big++;
1508 if (ctl_debug) {
1509 printf("%s kctl_tbl_size %lu too big\n",
1510 __func__, kctl_tbl_size);
1511 }
1512 return;
1513 }
1514 kctl_tbl_growing = 1;
1515
1516 new_size = kctl_tbl_size + KCTL_TBL_INC;
1517
1518 lck_mtx_unlock(&ctl_mtx);
1519 new_table = kheap_alloc(KHEAP_DEFAULT, sizeof(struct kctl *) * new_size,
1520 Z_WAITOK | Z_ZERO);
1521 lck_mtx_lock(&ctl_mtx);
1522
1523 if (new_table != NULL) {
1524 if (kctl_table != NULL) {
1525 bcopy(kctl_table, new_table,
1526 kctl_tbl_size * sizeof(struct kctl *));
1527
1528 kheap_free(KHEAP_DEFAULT, kctl_table,
1529 sizeof(struct kctl *) * kctl_tbl_size);
1530 }
1531 kctl_table = new_table;
1532 kctl_tbl_size = new_size;
1533 }
1534
1535 kctl_tbl_growing = 0;
1536
1537 if (kctl_tbl_growing_waiting) {
1538 wakeup(&kctl_tbl_growing);
1539 }
1540 }
1541
1542 #define KCTLREF_INDEX_MASK 0x0000FFFF
1543 #define KCTLREF_GENCNT_MASK 0xFFFF0000
1544 #define KCTLREF_GENCNT_SHIFT 16
1545
1546 static kern_ctl_ref
1547 kctl_make_ref(struct kctl *kctl)
1548 {
1549 uintptr_t i;
1550
1551 lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
1552
1553 if (kctl_tbl_count >= kctl_tbl_size) {
1554 kctl_tbl_grow();
1555 }
1556
1557 kctl->kctlref = NULL;
1558 for (i = 0; i < kctl_tbl_size; i++) {
1559 if (kctl_table[i] == NULL) {
1560 uintptr_t ref;
1561
1562 /*
1563 * Reference is index plus one
1564 */
1565 kctl_ref_gencnt += 1;
1566
1567 /*
1568 * Add generation count as salt to reference to prevent
1569 * use after deregister
1570 */
1571 ref = ((kctl_ref_gencnt << KCTLREF_GENCNT_SHIFT) &
1572 KCTLREF_GENCNT_MASK) +
1573 ((i + 1) & KCTLREF_INDEX_MASK);
1574
1575 kctl->kctlref = (void *)(ref);
1576 kctl_table[i] = kctl;
1577 kctl_tbl_count++;
1578 break;
1579 }
1580 }
1581
1582 if (kctl->kctlref == NULL) {
1583 panic("%s no space in table", __func__);
1584 }
1585
1586 if (ctl_debug > 0) {
1587 printf("%s %p for %p\n",
1588 __func__, kctl->kctlref, kctl);
1589 }
1590
1591 return kctl->kctlref;
1592 }
1593
1594 static void
1595 kctl_delete_ref(kern_ctl_ref kctlref)
1596 {
1597 /*
1598 * Reference is index plus one
1599 */
1600 uintptr_t i = (((uintptr_t)kctlref) & KCTLREF_INDEX_MASK) - 1;
1601
1602 lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
1603
1604 if (i < kctl_tbl_size) {
1605 struct kctl *kctl = kctl_table[i];
1606
1607 if (kctl->kctlref == kctlref) {
1608 kctl_table[i] = NULL;
1609 kctl_tbl_count--;
1610 } else {
1611 kctlstat.kcs_bad_kctlref++;
1612 }
1613 } else {
1614 kctlstat.kcs_bad_kctlref++;
1615 }
1616 }
1617
1618 static struct kctl *
1619 kctl_from_ref(kern_ctl_ref kctlref)
1620 {
1621 /*
1622 * Reference is index plus one
1623 */
1624 uintptr_t i = (((uintptr_t)kctlref) & KCTLREF_INDEX_MASK) - 1;
1625 struct kctl *kctl = NULL;
1626
1627 lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
1628
1629 if (i >= kctl_tbl_size) {
1630 kctlstat.kcs_bad_kctlref++;
1631 return NULL;
1632 }
1633 kctl = kctl_table[i];
1634 if (kctl->kctlref != kctlref) {
1635 kctlstat.kcs_bad_kctlref++;
1636 return NULL;
1637 }
1638 return kctl;
1639 }
1640
1641 /*
1642 * Register/unregister a NKE
1643 */
1644 errno_t
1645 ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref)
1646 {
1647 struct kctl *kctl = NULL;
1648 struct kctl *kctl_next = NULL;
1649 u_int32_t id = 1;
1650 size_t name_len;
1651 int is_extended = 0;
1652 int is_setup = 0;
1653
1654 if (userkctl == NULL) { /* sanity check */
1655 return EINVAL;
1656 }
1657 if (userkctl->ctl_connect == NULL) {
1658 return EINVAL;
1659 }
1660 name_len = strlen(userkctl->ctl_name);
1661 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
1662 return EINVAL;
1663 }
1664
1665 kctl = kheap_alloc(KHEAP_DEFAULT, sizeof(struct kctl), Z_WAITOK | Z_ZERO);
1666 if (kctl == NULL) {
1667 return ENOMEM;
1668 }
1669
1670 lck_mtx_lock(&ctl_mtx);
1671
1672 if (kctl_make_ref(kctl) == NULL) {
1673 lck_mtx_unlock(&ctl_mtx);
1674 kheap_free(KHEAP_DEFAULT, kctl, sizeof(struct kctl));
1675 return ENOMEM;
1676 }
1677
1678 /*
1679 * Kernel Control IDs
1680 *
1681 * CTL_FLAG_REG_ID_UNIT indicates the control ID and unit number are
1682 * static. If they do not exist, add them to the list in order. If the
1683 * flag is not set, we must find a new unique value. We assume the
1684 * list is in order. We find the last item in the list and add one. If
1685 * this leads to wrapping the id around, we start at the front of the
1686 * list and look for a gap.
1687 */
1688
1689 if ((userkctl->ctl_flags & CTL_FLAG_REG_ID_UNIT) == 0) {
1690 /* Must dynamically assign an unused ID */
1691
1692 /* Verify the same name isn't already registered */
1693 if (ctl_find_by_name(userkctl->ctl_name) != NULL) {
1694 kctl_delete_ref(kctl->kctlref);
1695 lck_mtx_unlock(&ctl_mtx);
1696 kheap_free(KHEAP_DEFAULT, kctl, sizeof(struct kctl));
1697 return EEXIST;
1698 }
1699
1700 /* Start with 1 in case the list is empty */
1701 id = 1;
1702 kctl_next = TAILQ_LAST(&ctl_head, kctl_list);
1703
1704 if (kctl_next != NULL) {
1705 /* List was not empty, add one to the last item */
1706 id = kctl_next->id + 1;
1707 kctl_next = NULL;
1708
1709 /*
1710 * If this wrapped the id number, start looking at
1711 * the front of the list for an unused id.
1712 */
1713 if (id == 0) {
1714 /* Find the next unused ID */
1715 id = 1;
1716
1717 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
1718 if (kctl_next->id > id) {
1719 /* We found a gap */
1720 break;
1721 }
1722
1723 id = kctl_next->id + 1;
1724 }
1725 }
1726 }
1727
1728 userkctl->ctl_id = id;
1729 kctl->id = id;
1730 kctl->reg_unit = -1;
1731 } else {
1732 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
1733 if (kctl_next->id > userkctl->ctl_id) {
1734 break;
1735 }
1736 }
1737
1738 if (ctl_find_by_id_unit(userkctl->ctl_id, userkctl->ctl_unit)) {
1739 kctl_delete_ref(kctl->kctlref);
1740 lck_mtx_unlock(&ctl_mtx);
1741 kheap_free(KHEAP_DEFAULT, kctl, sizeof(struct kctl));
1742 return EEXIST;
1743 }
1744 kctl->id = userkctl->ctl_id;
1745 kctl->reg_unit = userkctl->ctl_unit;
1746 }
1747
1748 is_extended = (userkctl->ctl_flags & CTL_FLAG_REG_EXTENDED);
1749 is_setup = (userkctl->ctl_flags & CTL_FLAG_REG_SETUP);
1750
1751 strlcpy(kctl->name, userkctl->ctl_name, MAX_KCTL_NAME);
1752 kctl->flags = userkctl->ctl_flags;
1753
1754 /*
1755 * Let the caller know the default send and receive sizes
1756 */
1757 if (userkctl->ctl_sendsize == 0) {
1758 kctl->sendbufsize = CTL_SENDSIZE;
1759 userkctl->ctl_sendsize = kctl->sendbufsize;
1760 } else {
1761 kctl->sendbufsize = userkctl->ctl_sendsize;
1762 }
1763 if (userkctl->ctl_recvsize == 0) {
1764 kctl->recvbufsize = CTL_RECVSIZE;
1765 userkctl->ctl_recvsize = kctl->recvbufsize;
1766 } else {
1767 kctl->recvbufsize = userkctl->ctl_recvsize;
1768 }
1769
1770 if (is_setup) {
1771 kctl->setup = userkctl->ctl_setup;
1772 }
1773 kctl->bind = userkctl->ctl_bind;
1774 kctl->connect = userkctl->ctl_connect;
1775 kctl->disconnect = userkctl->ctl_disconnect;
1776 kctl->send = userkctl->ctl_send;
1777 kctl->setopt = userkctl->ctl_setopt;
1778 kctl->getopt = userkctl->ctl_getopt;
1779 if (is_extended) {
1780 kctl->rcvd = userkctl->ctl_rcvd;
1781 kctl->send_list = userkctl->ctl_send_list;
1782 }
1783
1784 TAILQ_INIT(&kctl->kcb_head);
1785
1786 if (kctl_next) {
1787 TAILQ_INSERT_BEFORE(kctl_next, kctl, next);
1788 } else {
1789 TAILQ_INSERT_TAIL(&ctl_head, kctl, next);
1790 }
1791
1792 kctlstat.kcs_reg_count++;
1793 kctlstat.kcs_gencnt++;
1794
1795 lck_mtx_unlock(&ctl_mtx);
1796
1797 *kctlref = kctl->kctlref;
1798
1799 ctl_post_msg(KEV_CTL_REGISTERED, kctl->id);
1800 return 0;
1801 }
1802
1803 errno_t
1804 ctl_deregister(void *kctlref)
1805 {
1806 struct kctl *kctl;
1807
1808 lck_mtx_lock(&ctl_mtx);
1809 if ((kctl = kctl_from_ref(kctlref)) == NULL) {
1810 kctlstat.kcs_bad_kctlref++;
1811 lck_mtx_unlock(&ctl_mtx);
1812 if (ctl_debug != 0) {
1813 printf("%s invalid kctlref %p\n",
1814 __func__, kctlref);
1815 }
1816 return EINVAL;
1817 }
1818
1819 if (!TAILQ_EMPTY(&kctl->kcb_head)) {
1820 lck_mtx_unlock(&ctl_mtx);
1821 return EBUSY;
1822 }
1823
1824 TAILQ_REMOVE(&ctl_head, kctl, next);
1825
1826 kctlstat.kcs_reg_count--;
1827 kctlstat.kcs_gencnt++;
1828
1829 kctl_delete_ref(kctl->kctlref);
1830 lck_mtx_unlock(&ctl_mtx);
1831
1832 ctl_post_msg(KEV_CTL_DEREGISTERED, kctl->id);
1833 kheap_free(KHEAP_DEFAULT, kctl, sizeof(struct kctl));
1834 return 0;
1835 }
1836
1837 /*
1838 * Must be called with global ctl_mtx lock taked
1839 */
1840 static struct kctl *
1841 ctl_find_by_name(const char *name)
1842 {
1843 struct kctl *kctl;
1844
1845 lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
1846
1847 TAILQ_FOREACH(kctl, &ctl_head, next)
1848 if (strncmp(kctl->name, name, sizeof(kctl->name)) == 0) {
1849 return kctl;
1850 }
1851
1852 return NULL;
1853 }
1854
1855 u_int32_t
1856 ctl_id_by_name(const char *name)
1857 {
1858 u_int32_t ctl_id = 0;
1859 struct kctl *kctl;
1860
1861 lck_mtx_lock(&ctl_mtx);
1862 kctl = ctl_find_by_name(name);
1863 if (kctl) {
1864 ctl_id = kctl->id;
1865 }
1866 lck_mtx_unlock(&ctl_mtx);
1867
1868 return ctl_id;
1869 }
1870
1871 errno_t
1872 ctl_name_by_id(u_int32_t id, char *out_name, size_t maxsize)
1873 {
1874 int found = 0;
1875 struct kctl *kctl;
1876
1877 lck_mtx_lock(&ctl_mtx);
1878 TAILQ_FOREACH(kctl, &ctl_head, next) {
1879 if (kctl->id == id) {
1880 break;
1881 }
1882 }
1883
1884 if (kctl) {
1885 if (maxsize > MAX_KCTL_NAME) {
1886 maxsize = MAX_KCTL_NAME;
1887 }
1888 strlcpy(out_name, kctl->name, maxsize);
1889 found = 1;
1890 }
1891 lck_mtx_unlock(&ctl_mtx);
1892
1893 return found ? 0 : ENOENT;
1894 }
1895
1896 /*
1897 * Must be called with global ctl_mtx lock taked
1898 *
1899 */
1900 static struct kctl *
1901 ctl_find_by_id_unit(u_int32_t id, u_int32_t unit)
1902 {
1903 struct kctl *kctl;
1904
1905 lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
1906
1907 TAILQ_FOREACH(kctl, &ctl_head, next) {
1908 if (kctl->id == id && (kctl->flags & CTL_FLAG_REG_ID_UNIT) == 0) {
1909 return kctl;
1910 } else if (kctl->id == id && kctl->reg_unit == unit) {
1911 return kctl;
1912 }
1913 }
1914 return NULL;
1915 }
1916
1917 /*
1918 * Must be called with kernel controller lock taken
1919 */
1920 static struct ctl_cb *
1921 kcb_find(struct kctl *kctl, u_int32_t unit)
1922 {
1923 struct ctl_cb *kcb;
1924
1925 lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
1926
1927 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
1928 if (kcb->sac.sc_unit == unit) {
1929 return kcb;
1930 }
1931
1932 return NULL;
1933 }
1934
1935 static struct socket *
1936 kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *kctlflags)
1937 {
1938 struct socket *so = NULL;
1939 struct ctl_cb *kcb;
1940 void *lr_saved;
1941 struct kctl *kctl;
1942 int i;
1943
1944 lr_saved = __builtin_return_address(0);
1945
1946 lck_mtx_lock(&ctl_mtx);
1947 /*
1948 * First validate the kctlref
1949 */
1950 if ((kctl = kctl_from_ref(kctlref)) == NULL) {
1951 kctlstat.kcs_bad_kctlref++;
1952 lck_mtx_unlock(&ctl_mtx);
1953 if (ctl_debug != 0) {
1954 printf("%s invalid kctlref %p\n",
1955 __func__, kctlref);
1956 }
1957 return NULL;
1958 }
1959
1960 kcb = kcb_find(kctl, unit);
1961 if (kcb == NULL || kcb->kctl != kctl || (so = kcb->so) == NULL) {
1962 lck_mtx_unlock(&ctl_mtx);
1963 return NULL;
1964 }
1965 /*
1966 * This prevents the socket from being closed
1967 */
1968 kcb->usecount++;
1969 /*
1970 * Respect lock ordering: socket before ctl_mtx
1971 */
1972 lck_mtx_unlock(&ctl_mtx);
1973
1974 socket_lock(so, 1);
1975 /*
1976 * The socket lock history is more useful if we store
1977 * the address of the caller.
1978 */
1979 i = (so->next_lock_lr + SO_LCKDBG_MAX - 1) % SO_LCKDBG_MAX;
1980 so->lock_lr[i] = lr_saved;
1981
1982 lck_mtx_lock(&ctl_mtx);
1983
1984 if ((kctl = kctl_from_ref(kctlref)) == NULL || kcb->kctl == NULL) {
1985 lck_mtx_unlock(&ctl_mtx);
1986 socket_unlock(so, 1);
1987 so = NULL;
1988 lck_mtx_lock(&ctl_mtx);
1989 } else if (kctlflags != NULL) {
1990 *kctlflags = kctl->flags;
1991 }
1992
1993 kcb->usecount--;
1994 if (kcb->usecount == 0) {
1995 wakeup((event_t)&kcb->usecount);
1996 }
1997
1998 lck_mtx_unlock(&ctl_mtx);
1999
2000 return so;
2001 }
2002
2003 static void
2004 ctl_post_msg(u_int32_t event_code, u_int32_t id)
2005 {
2006 struct ctl_event_data ctl_ev_data;
2007 struct kev_msg ev_msg;
2008
2009 lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_NOTOWNED);
2010
2011 bzero(&ev_msg, sizeof(struct kev_msg));
2012 ev_msg.vendor_code = KEV_VENDOR_APPLE;
2013
2014 ev_msg.kev_class = KEV_SYSTEM_CLASS;
2015 ev_msg.kev_subclass = KEV_CTL_SUBCLASS;
2016 ev_msg.event_code = event_code;
2017
2018 /* common nke subclass data */
2019 bzero(&ctl_ev_data, sizeof(ctl_ev_data));
2020 ctl_ev_data.ctl_id = id;
2021 ev_msg.dv[0].data_ptr = &ctl_ev_data;
2022 ev_msg.dv[0].data_length = sizeof(ctl_ev_data);
2023
2024 ev_msg.dv[1].data_length = 0;
2025
2026 kev_post_msg(&ev_msg);
2027 }
2028
2029 static int
2030 ctl_lock(struct socket *so, int refcount, void *lr)
2031 {
2032 void *lr_saved;
2033
2034 if (lr == NULL) {
2035 lr_saved = __builtin_return_address(0);
2036 } else {
2037 lr_saved = lr;
2038 }
2039
2040 if (so->so_pcb != NULL) {
2041 lck_mtx_lock(&((struct ctl_cb *)so->so_pcb)->mtx);
2042 } else {
2043 panic("ctl_lock: so=%p NO PCB! lr=%p lrh= %s\n",
2044 so, lr_saved, solockhistory_nr(so));
2045 /* NOTREACHED */
2046 }
2047
2048 if (so->so_usecount < 0) {
2049 panic("ctl_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s\n",
2050 so, so->so_pcb, lr_saved, so->so_usecount,
2051 solockhistory_nr(so));
2052 /* NOTREACHED */
2053 }
2054
2055 if (refcount) {
2056 so->so_usecount++;
2057 }
2058
2059 so->lock_lr[so->next_lock_lr] = lr_saved;
2060 so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX;
2061 return 0;
2062 }
2063
2064 static int
2065 ctl_unlock(struct socket *so, int refcount, void *lr)
2066 {
2067 void *lr_saved;
2068 lck_mtx_t *mutex_held;
2069
2070 if (lr == NULL) {
2071 lr_saved = __builtin_return_address(0);
2072 } else {
2073 lr_saved = lr;
2074 }
2075
2076 #if (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG))
2077 printf("ctl_unlock: so=%llx sopcb=%x lock=%llx ref=%u lr=%llx\n",
2078 (uint64_t)VM_KERNEL_ADDRPERM(so),
2079 (uint64_t)VM_KERNEL_ADDRPERM(so->so_pcb,
2080 (uint64_t)VM_KERNEL_ADDRPERM(&((struct ctl_cb *)so->so_pcb)->mtx),
2081 so->so_usecount, (uint64_t)VM_KERNEL_ADDRPERM(lr_saved));
2082 #endif /* (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG)) */
2083 if (refcount) {
2084 so->so_usecount--;
2085 }
2086
2087 if (so->so_usecount < 0) {
2088 panic("ctl_unlock: so=%p usecount=%x lrh= %s\n",
2089 so, so->so_usecount, solockhistory_nr(so));
2090 /* NOTREACHED */
2091 }
2092 if (so->so_pcb == NULL) {
2093 panic("ctl_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s\n",
2094 so, so->so_usecount, (void *)lr_saved,
2095 solockhistory_nr(so));
2096 /* NOTREACHED */
2097 }
2098 mutex_held = &((struct ctl_cb *)so->so_pcb)->mtx;
2099
2100 lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
2101 so->unlock_lr[so->next_unlock_lr] = lr_saved;
2102 so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
2103 lck_mtx_unlock(mutex_held);
2104
2105 if (so->so_usecount == 0) {
2106 ctl_sofreelastref(so);
2107 }
2108
2109 return 0;
2110 }
2111
2112 static lck_mtx_t *
2113 ctl_getlock(struct socket *so, int flags)
2114 {
2115 #pragma unused(flags)
2116 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
2117
2118 if (so->so_pcb) {
2119 if (so->so_usecount < 0) {
2120 panic("ctl_getlock: so=%p usecount=%x lrh= %s\n",
2121 so, so->so_usecount, solockhistory_nr(so));
2122 }
2123 return &kcb->mtx;
2124 } else {
2125 panic("ctl_getlock: so=%p NULL NO so_pcb %s\n",
2126 so, solockhistory_nr(so));
2127 return so->so_proto->pr_domain->dom_mtx;
2128 }
2129 }
2130
2131 __private_extern__ int
2132 kctl_reg_list SYSCTL_HANDLER_ARGS
2133 {
2134 #pragma unused(oidp, arg1, arg2)
2135 int error = 0;
2136 u_int64_t i, n;
2137 struct xsystmgen xsg;
2138 void *buf = NULL;
2139 struct kctl *kctl;
2140 size_t item_size = ROUNDUP64(sizeof(struct xkctl_reg));
2141
2142 buf = kheap_alloc(KHEAP_TEMP, item_size, Z_WAITOK | Z_ZERO);
2143 if (buf == NULL) {
2144 return ENOMEM;
2145 }
2146
2147 lck_mtx_lock(&ctl_mtx);
2148
2149 n = kctlstat.kcs_reg_count;
2150
2151 if (req->oldptr == USER_ADDR_NULL) {
2152 req->oldidx = (size_t)(n + n / 8) * sizeof(struct xkctl_reg);
2153 goto done;
2154 }
2155 if (req->newptr != USER_ADDR_NULL) {
2156 error = EPERM;
2157 goto done;
2158 }
2159 bzero(&xsg, sizeof(xsg));
2160 xsg.xg_len = sizeof(xsg);
2161 xsg.xg_count = n;
2162 xsg.xg_gen = kctlstat.kcs_gencnt;
2163 xsg.xg_sogen = so_gencnt;
2164 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2165 if (error) {
2166 goto done;
2167 }
2168 /*
2169 * We are done if there is no pcb
2170 */
2171 if (n == 0) {
2172 goto done;
2173 }
2174
2175 for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
2176 i < n && kctl != NULL;
2177 i++, kctl = TAILQ_NEXT(kctl, next)) {
2178 struct xkctl_reg *xkr = (struct xkctl_reg *)buf;
2179 struct ctl_cb *kcb;
2180 u_int32_t pcbcount = 0;
2181
2182 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
2183 pcbcount++;
2184
2185 bzero(buf, item_size);
2186
2187 xkr->xkr_len = sizeof(struct xkctl_reg);
2188 xkr->xkr_kind = XSO_KCREG;
2189 xkr->xkr_id = kctl->id;
2190 xkr->xkr_reg_unit = kctl->reg_unit;
2191 xkr->xkr_flags = kctl->flags;
2192 xkr->xkr_kctlref = (uint64_t)(kctl->kctlref);
2193 xkr->xkr_recvbufsize = kctl->recvbufsize;
2194 xkr->xkr_sendbufsize = kctl->sendbufsize;
2195 xkr->xkr_lastunit = kctl->lastunit;
2196 xkr->xkr_pcbcount = pcbcount;
2197 xkr->xkr_connect = (uint64_t)VM_KERNEL_UNSLIDE(kctl->connect);
2198 xkr->xkr_disconnect =
2199 (uint64_t)VM_KERNEL_UNSLIDE(kctl->disconnect);
2200 xkr->xkr_send = (uint64_t)VM_KERNEL_UNSLIDE(kctl->send);
2201 xkr->xkr_send_list =
2202 (uint64_t)VM_KERNEL_UNSLIDE(kctl->send_list);
2203 xkr->xkr_setopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->setopt);
2204 xkr->xkr_getopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->getopt);
2205 xkr->xkr_rcvd = (uint64_t)VM_KERNEL_UNSLIDE(kctl->rcvd);
2206 strlcpy(xkr->xkr_name, kctl->name, sizeof(xkr->xkr_name));
2207
2208 error = SYSCTL_OUT(req, buf, item_size);
2209 }
2210
2211 if (error == 0) {
2212 /*
2213 * Give the user an updated idea of our state.
2214 * If the generation differs from what we told
2215 * her before, she knows that something happened
2216 * while we were processing this request, and it
2217 * might be necessary to retry.
2218 */
2219 bzero(&xsg, sizeof(xsg));
2220 xsg.xg_len = sizeof(xsg);
2221 xsg.xg_count = n;
2222 xsg.xg_gen = kctlstat.kcs_gencnt;
2223 xsg.xg_sogen = so_gencnt;
2224 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2225 if (error) {
2226 goto done;
2227 }
2228 }
2229
2230 done:
2231 lck_mtx_unlock(&ctl_mtx);
2232
2233 kheap_free(KHEAP_TEMP, buf, item_size);
2234
2235 return error;
2236 }
2237
2238 __private_extern__ int
2239 kctl_pcblist SYSCTL_HANDLER_ARGS
2240 {
2241 #pragma unused(oidp, arg1, arg2)
2242 int error = 0;
2243 u_int64_t n, i;
2244 struct xsystmgen xsg;
2245 void *buf = NULL;
2246 struct kctl *kctl;
2247 size_t item_size = ROUNDUP64(sizeof(struct xkctlpcb)) +
2248 ROUNDUP64(sizeof(struct xsocket_n)) +
2249 2 * ROUNDUP64(sizeof(struct xsockbuf_n)) +
2250 ROUNDUP64(sizeof(struct xsockstat_n));
2251
2252 buf = kheap_alloc(KHEAP_TEMP, item_size, Z_WAITOK | Z_ZERO);
2253 if (buf == NULL) {
2254 return ENOMEM;
2255 }
2256
2257 lck_mtx_lock(&ctl_mtx);
2258
2259 n = kctlstat.kcs_pcbcount;
2260
2261 if (req->oldptr == USER_ADDR_NULL) {
2262 req->oldidx = (size_t)(n + n / 8) * item_size;
2263 goto done;
2264 }
2265 if (req->newptr != USER_ADDR_NULL) {
2266 error = EPERM;
2267 goto done;
2268 }
2269 bzero(&xsg, sizeof(xsg));
2270 xsg.xg_len = sizeof(xsg);
2271 xsg.xg_count = n;
2272 xsg.xg_gen = kctlstat.kcs_gencnt;
2273 xsg.xg_sogen = so_gencnt;
2274 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2275 if (error) {
2276 goto done;
2277 }
2278 /*
2279 * We are done if there is no pcb
2280 */
2281 if (n == 0) {
2282 goto done;
2283 }
2284
2285 for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
2286 i < n && kctl != NULL;
2287 kctl = TAILQ_NEXT(kctl, next)) {
2288 struct ctl_cb *kcb;
2289
2290 for (kcb = TAILQ_FIRST(&kctl->kcb_head);
2291 i < n && kcb != NULL;
2292 i++, kcb = TAILQ_NEXT(kcb, next)) {
2293 struct xkctlpcb *xk = (struct xkctlpcb *)buf;
2294 struct xsocket_n *xso = (struct xsocket_n *)
2295 ADVANCE64(xk, sizeof(*xk));
2296 struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *)
2297 ADVANCE64(xso, sizeof(*xso));
2298 struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *)
2299 ADVANCE64(xsbrcv, sizeof(*xsbrcv));
2300 struct xsockstat_n *xsostats = (struct xsockstat_n *)
2301 ADVANCE64(xsbsnd, sizeof(*xsbsnd));
2302
2303 bzero(buf, item_size);
2304
2305 xk->xkp_len = sizeof(struct xkctlpcb);
2306 xk->xkp_kind = XSO_KCB;
2307 xk->xkp_unit = kcb->sac.sc_unit;
2308 xk->xkp_kctpcb = (uint64_t)VM_KERNEL_ADDRPERM(kcb);
2309 xk->xkp_kctlref = (uint64_t)VM_KERNEL_ADDRPERM(kctl);
2310 xk->xkp_kctlid = kctl->id;
2311 strlcpy(xk->xkp_kctlname, kctl->name,
2312 sizeof(xk->xkp_kctlname));
2313
2314 sotoxsocket_n(kcb->so, xso);
2315 sbtoxsockbuf_n(kcb->so ?
2316 &kcb->so->so_rcv : NULL, xsbrcv);
2317 sbtoxsockbuf_n(kcb->so ?
2318 &kcb->so->so_snd : NULL, xsbsnd);
2319 sbtoxsockstat_n(kcb->so, xsostats);
2320
2321 error = SYSCTL_OUT(req, buf, item_size);
2322 }
2323 }
2324
2325 if (error == 0) {
2326 /*
2327 * Give the user an updated idea of our state.
2328 * If the generation differs from what we told
2329 * her before, she knows that something happened
2330 * while we were processing this request, and it
2331 * might be necessary to retry.
2332 */
2333 bzero(&xsg, sizeof(xsg));
2334 xsg.xg_len = sizeof(xsg);
2335 xsg.xg_count = n;
2336 xsg.xg_gen = kctlstat.kcs_gencnt;
2337 xsg.xg_sogen = so_gencnt;
2338 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2339 if (error) {
2340 goto done;
2341 }
2342 }
2343
2344 done:
2345 lck_mtx_unlock(&ctl_mtx);
2346
2347 kheap_free(KHEAP_TEMP, buf, item_size);
2348 return error;
2349 }
2350
2351 int
2352 kctl_getstat SYSCTL_HANDLER_ARGS
2353 {
2354 #pragma unused(oidp, arg1, arg2)
2355 int error = 0;
2356
2357 lck_mtx_lock(&ctl_mtx);
2358
2359 if (req->newptr != USER_ADDR_NULL) {
2360 error = EPERM;
2361 goto done;
2362 }
2363 if (req->oldptr == USER_ADDR_NULL) {
2364 req->oldidx = sizeof(struct kctlstat);
2365 goto done;
2366 }
2367
2368 error = SYSCTL_OUT(req, &kctlstat,
2369 MIN(sizeof(struct kctlstat), req->oldlen));
2370 done:
2371 lck_mtx_unlock(&ctl_mtx);
2372 return error;
2373 }
2374
2375 void
2376 kctl_fill_socketinfo(struct socket *so, struct socket_info *si)
2377 {
2378 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
2379 struct kern_ctl_info *kcsi =
2380 &si->soi_proto.pri_kern_ctl;
2381 struct kctl *kctl = kcb->kctl;
2382
2383 si->soi_kind = SOCKINFO_KERN_CTL;
2384
2385 if (kctl == 0) {
2386 return;
2387 }
2388
2389 kcsi->kcsi_id = kctl->id;
2390 kcsi->kcsi_reg_unit = kctl->reg_unit;
2391 kcsi->kcsi_flags = kctl->flags;
2392 kcsi->kcsi_recvbufsize = kctl->recvbufsize;
2393 kcsi->kcsi_sendbufsize = kctl->sendbufsize;
2394 kcsi->kcsi_unit = kcb->sac.sc_unit;
2395 strlcpy(kcsi->kcsi_name, kctl->name, MAX_KCTL_NAME);
2396 }