]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_control.c
xnu-6153.81.5.tar.gz
[apple/xnu.git] / bsd / kern / kern_control.c
1 /*
2 * Copyright (c) 1999-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Kernel Control domain - allows control connections to
31 * and to read/write data.
32 *
33 * Vincent Lubet, 040506
34 * Christophe Allie, 010928
35 * Justin C. Walker, 990319
36 */
37
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/syslog.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/protosw.h>
45 #include <sys/domain.h>
46 #include <sys/malloc.h>
47 #include <sys/mbuf.h>
48 #include <sys/sys_domain.h>
49 #include <sys/kern_event.h>
50 #include <sys/kern_control.h>
51 #include <sys/kauth.h>
52 #include <sys/sysctl.h>
53 #include <sys/proc_info.h>
54 #include <net/if_var.h>
55
56 #include <mach/vm_types.h>
57
58 #include <kern/thread.h>
59
60 struct kctl {
61 TAILQ_ENTRY(kctl) next; /* controller chain */
62 kern_ctl_ref kctlref;
63
64 /* controller information provided when registering */
65 char name[MAX_KCTL_NAME]; /* unique identifier */
66 u_int32_t id;
67 u_int32_t reg_unit;
68
69 /* misc communication information */
70 u_int32_t flags; /* support flags */
71 u_int32_t recvbufsize; /* request more than the default buffer size */
72 u_int32_t sendbufsize; /* request more than the default buffer size */
73
74 /* Dispatch functions */
75 ctl_bind_func bind; /* Prepare contact */
76 ctl_connect_func connect; /* Make contact */
77 ctl_disconnect_func disconnect; /* Break contact */
78 ctl_send_func send; /* Send data to nke */
79 ctl_send_list_func send_list; /* Send list of packets */
80 ctl_setopt_func setopt; /* set kctl configuration */
81 ctl_getopt_func getopt; /* get kctl configuration */
82 ctl_rcvd_func rcvd; /* Notify nke when client reads data */
83
84 TAILQ_HEAD(, ctl_cb) kcb_head;
85 u_int32_t lastunit;
86 };
87
88 #if DEVELOPMENT || DEBUG
89 enum ctl_status {
90 KCTL_DISCONNECTED = 0,
91 KCTL_CONNECTING = 1,
92 KCTL_CONNECTED = 2
93 };
94 #endif /* DEVELOPMENT || DEBUG */
95
96 struct ctl_cb {
97 TAILQ_ENTRY(ctl_cb) next; /* controller chain */
98 lck_mtx_t *mtx;
99 struct socket *so; /* controlling socket */
100 struct kctl *kctl; /* back pointer to controller */
101 void *userdata;
102 struct sockaddr_ctl sac;
103 u_int32_t usecount;
104 u_int32_t kcb_usecount;
105 #if DEVELOPMENT || DEBUG
106 enum ctl_status status;
107 #endif /* DEVELOPMENT || DEBUG */
108 };
109
110 #ifndef ROUNDUP64
111 #define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
112 #endif
113
114 #ifndef ADVANCE64
115 #define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
116 #endif
117
118 /*
119 * Definitions and vars for we support
120 */
121
122 #define CTL_SENDSIZE (2 * 1024) /* default buffer size */
123 #define CTL_RECVSIZE (8 * 1024) /* default buffer size */
124
125 /*
126 * Definitions and vars for we support
127 */
128
129 static u_int32_t ctl_maxunit = 65536;
130 static lck_grp_attr_t *ctl_lck_grp_attr = 0;
131 static lck_attr_t *ctl_lck_attr = 0;
132 static lck_grp_t *ctl_lck_grp = 0;
133 static lck_mtx_t *ctl_mtx;
134
135 /* all the controllers are chained */
136 TAILQ_HEAD(kctl_list, kctl) ctl_head;
137
138 static int ctl_attach(struct socket *, int, struct proc *);
139 static int ctl_detach(struct socket *);
140 static int ctl_sofreelastref(struct socket *so);
141 static int ctl_bind(struct socket *, struct sockaddr *, struct proc *);
142 static int ctl_connect(struct socket *, struct sockaddr *, struct proc *);
143 static int ctl_disconnect(struct socket *);
144 static int ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
145 struct ifnet *ifp, struct proc *p);
146 static int ctl_send(struct socket *, int, struct mbuf *,
147 struct sockaddr *, struct mbuf *, struct proc *);
148 static int ctl_send_list(struct socket *, int, struct mbuf *,
149 struct sockaddr *, struct mbuf *, struct proc *);
150 static int ctl_ctloutput(struct socket *, struct sockopt *);
151 static int ctl_peeraddr(struct socket *so, struct sockaddr **nam);
152 static int ctl_usr_rcvd(struct socket *so, int flags);
153
154 static struct kctl *ctl_find_by_name(const char *);
155 static struct kctl *ctl_find_by_id_unit(u_int32_t id, u_int32_t unit);
156
157 static struct socket *kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit,
158 u_int32_t *);
159 static struct ctl_cb *kcb_find(struct kctl *, u_int32_t unit);
160 static void ctl_post_msg(u_int32_t event_code, u_int32_t id);
161
162 static int ctl_lock(struct socket *, int, void *);
163 static int ctl_unlock(struct socket *, int, void *);
164 static lck_mtx_t * ctl_getlock(struct socket *, int);
165
166 static struct pr_usrreqs ctl_usrreqs = {
167 .pru_attach = ctl_attach,
168 .pru_bind = ctl_bind,
169 .pru_connect = ctl_connect,
170 .pru_control = ctl_ioctl,
171 .pru_detach = ctl_detach,
172 .pru_disconnect = ctl_disconnect,
173 .pru_peeraddr = ctl_peeraddr,
174 .pru_rcvd = ctl_usr_rcvd,
175 .pru_send = ctl_send,
176 .pru_send_list = ctl_send_list,
177 .pru_sosend = sosend,
178 .pru_sosend_list = sosend_list,
179 .pru_soreceive = soreceive,
180 .pru_soreceive_list = soreceive_list,
181 };
182
183 static struct protosw kctlsw[] = {
184 {
185 .pr_type = SOCK_DGRAM,
186 .pr_protocol = SYSPROTO_CONTROL,
187 .pr_flags = PR_ATOMIC | PR_CONNREQUIRED | PR_PCBLOCK | PR_WANTRCVD,
188 .pr_ctloutput = ctl_ctloutput,
189 .pr_usrreqs = &ctl_usrreqs,
190 .pr_lock = ctl_lock,
191 .pr_unlock = ctl_unlock,
192 .pr_getlock = ctl_getlock,
193 },
194 {
195 .pr_type = SOCK_STREAM,
196 .pr_protocol = SYSPROTO_CONTROL,
197 .pr_flags = PR_CONNREQUIRED | PR_PCBLOCK | PR_WANTRCVD,
198 .pr_ctloutput = ctl_ctloutput,
199 .pr_usrreqs = &ctl_usrreqs,
200 .pr_lock = ctl_lock,
201 .pr_unlock = ctl_unlock,
202 .pr_getlock = ctl_getlock,
203 }
204 };
205
206 __private_extern__ int kctl_reg_list SYSCTL_HANDLER_ARGS;
207 __private_extern__ int kctl_pcblist SYSCTL_HANDLER_ARGS;
208 __private_extern__ int kctl_getstat SYSCTL_HANDLER_ARGS;
209
210
211 SYSCTL_NODE(_net_systm, OID_AUTO, kctl,
212 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Kernel control family");
213
214 struct kctlstat kctlstat;
215 SYSCTL_PROC(_net_systm_kctl, OID_AUTO, stats,
216 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
217 kctl_getstat, "S,kctlstat", "");
218
219 SYSCTL_PROC(_net_systm_kctl, OID_AUTO, reg_list,
220 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
221 kctl_reg_list, "S,xkctl_reg", "");
222
223 SYSCTL_PROC(_net_systm_kctl, OID_AUTO, pcblist,
224 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
225 kctl_pcblist, "S,xkctlpcb", "");
226
227 u_int32_t ctl_autorcvbuf_max = 256 * 1024;
228 SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufmax,
229 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_autorcvbuf_max, 0, "");
230
231 u_int32_t ctl_autorcvbuf_high = 0;
232 SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufhigh,
233 CTLFLAG_RD | CTLFLAG_LOCKED, &ctl_autorcvbuf_high, 0, "");
234
235 u_int32_t ctl_debug = 0;
236 SYSCTL_INT(_net_systm_kctl, OID_AUTO, debug,
237 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_debug, 0, "");
238
239 #if DEVELOPMENT || DEBUG
240 u_int32_t ctl_panic_debug = 0;
241 SYSCTL_INT(_net_systm_kctl, OID_AUTO, panicdebug,
242 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_panic_debug, 0, "");
243 #endif /* DEVELOPMENT || DEBUG */
244
245 #define KCTL_TBL_INC 16
246
247 static uintptr_t kctl_tbl_size = 0;
248 static u_int32_t kctl_tbl_growing = 0;
249 static u_int32_t kctl_tbl_growing_waiting = 0;
250 static uintptr_t kctl_tbl_count = 0;
251 static struct kctl **kctl_table = NULL;
252 static uintptr_t kctl_ref_gencnt = 0;
253
254 static void kctl_tbl_grow(void);
255 static kern_ctl_ref kctl_make_ref(struct kctl *kctl);
256 static void kctl_delete_ref(kern_ctl_ref);
257 static struct kctl *kctl_from_ref(kern_ctl_ref);
258
259 /*
260 * Install the protosw's for the Kernel Control manager.
261 */
262 __private_extern__ void
263 kern_control_init(struct domain *dp)
264 {
265 struct protosw *pr;
266 int i;
267 int kctl_proto_count = (sizeof(kctlsw) / sizeof(struct protosw));
268
269 VERIFY(!(dp->dom_flags & DOM_INITIALIZED));
270 VERIFY(dp == systemdomain);
271
272 ctl_lck_grp_attr = lck_grp_attr_alloc_init();
273 if (ctl_lck_grp_attr == NULL) {
274 panic("%s: lck_grp_attr_alloc_init failed\n", __func__);
275 /* NOTREACHED */
276 }
277
278 ctl_lck_grp = lck_grp_alloc_init("Kernel Control Protocol",
279 ctl_lck_grp_attr);
280 if (ctl_lck_grp == NULL) {
281 panic("%s: lck_grp_alloc_init failed\n", __func__);
282 /* NOTREACHED */
283 }
284
285 ctl_lck_attr = lck_attr_alloc_init();
286 if (ctl_lck_attr == NULL) {
287 panic("%s: lck_attr_alloc_init failed\n", __func__);
288 /* NOTREACHED */
289 }
290
291 ctl_mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
292 if (ctl_mtx == NULL) {
293 panic("%s: lck_mtx_alloc_init failed\n", __func__);
294 /* NOTREACHED */
295 }
296 TAILQ_INIT(&ctl_head);
297
298 for (i = 0, pr = &kctlsw[0]; i < kctl_proto_count; i++, pr++) {
299 net_add_proto(pr, dp, 1);
300 }
301 }
302
303 static void
304 kcb_delete(struct ctl_cb *kcb)
305 {
306 if (kcb != 0) {
307 if (kcb->mtx != 0) {
308 lck_mtx_free(kcb->mtx, ctl_lck_grp);
309 }
310 FREE(kcb, M_TEMP);
311 }
312 }
313
314 /*
315 * Kernel Controller user-request functions
316 * attach function must exist and succeed
317 * detach not necessary
318 * we need a pcb for the per socket mutex
319 */
320 static int
321 ctl_attach(struct socket *so, int proto, struct proc *p)
322 {
323 #pragma unused(proto, p)
324 int error = 0;
325 struct ctl_cb *kcb = 0;
326
327 MALLOC(kcb, struct ctl_cb *, sizeof(struct ctl_cb), M_TEMP, M_WAITOK);
328 if (kcb == NULL) {
329 error = ENOMEM;
330 goto quit;
331 }
332 bzero(kcb, sizeof(struct ctl_cb));
333
334 kcb->mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
335 if (kcb->mtx == NULL) {
336 error = ENOMEM;
337 goto quit;
338 }
339 kcb->so = so;
340 so->so_pcb = (caddr_t)kcb;
341
342 quit:
343 if (error != 0) {
344 kcb_delete(kcb);
345 kcb = 0;
346 }
347 return error;
348 }
349
350 static int
351 ctl_sofreelastref(struct socket *so)
352 {
353 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
354
355 so->so_pcb = 0;
356
357 if (kcb != 0) {
358 struct kctl *kctl;
359 if ((kctl = kcb->kctl) != 0) {
360 lck_mtx_lock(ctl_mtx);
361 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
362 kctlstat.kcs_pcbcount--;
363 kctlstat.kcs_gencnt++;
364 lck_mtx_unlock(ctl_mtx);
365 }
366 kcb_delete(kcb);
367 }
368 sofreelastref(so, 1);
369 return 0;
370 }
371
372 /*
373 * Use this function to serialize calls into the kctl subsystem
374 */
375 static void
376 ctl_kcb_increment_use_count(struct ctl_cb *kcb, lck_mtx_t *mutex_held)
377 {
378 LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
379 while (kcb->kcb_usecount > 0) {
380 msleep(&kcb->kcb_usecount, mutex_held, PSOCK | PCATCH, "kcb_usecount", NULL);
381 }
382 kcb->kcb_usecount++;
383 }
384
385 static void
386 clt_kcb_decrement_use_count(struct ctl_cb *kcb)
387 {
388 assert(kcb->kcb_usecount != 0);
389 kcb->kcb_usecount--;
390 wakeup_one((caddr_t)&kcb->kcb_usecount);
391 }
392
393 static int
394 ctl_detach(struct socket *so)
395 {
396 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
397
398 if (kcb == 0) {
399 return 0;
400 }
401
402 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
403 ctl_kcb_increment_use_count(kcb, mtx_held);
404
405 if (kcb->kctl != NULL && kcb->kctl->bind != NULL &&
406 kcb->userdata != NULL && !(so->so_state & SS_ISCONNECTED)) {
407 // The unit was bound, but not connected
408 // Invoke the disconnected call to cleanup
409 if (kcb->kctl->disconnect != NULL) {
410 socket_unlock(so, 0);
411 (*kcb->kctl->disconnect)(kcb->kctl->kctlref,
412 kcb->sac.sc_unit, kcb->userdata);
413 socket_lock(so, 0);
414 }
415 }
416
417 soisdisconnected(so);
418 #if DEVELOPMENT || DEBUG
419 kcb->status = KCTL_DISCONNECTED;
420 #endif /* DEVELOPMENT || DEBUG */
421 so->so_flags |= SOF_PCBCLEARING;
422 clt_kcb_decrement_use_count(kcb);
423 return 0;
424 }
425
426 static int
427 ctl_setup_kctl(struct socket *so, struct sockaddr *nam, struct proc *p)
428 {
429 struct kctl *kctl = NULL;
430 int error = 0;
431 struct sockaddr_ctl sa;
432 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
433 struct ctl_cb *kcb_next = NULL;
434 u_quad_t sbmaxsize;
435 u_int32_t recvbufsize, sendbufsize;
436
437 if (kcb == 0) {
438 panic("ctl_setup_kctl so_pcb null\n");
439 }
440
441 if (kcb->kctl != NULL) {
442 // Already set up, skip
443 return 0;
444 }
445
446 if (nam->sa_len != sizeof(struct sockaddr_ctl)) {
447 return EINVAL;
448 }
449
450 bcopy(nam, &sa, sizeof(struct sockaddr_ctl));
451
452 lck_mtx_lock(ctl_mtx);
453 kctl = ctl_find_by_id_unit(sa.sc_id, sa.sc_unit);
454 if (kctl == NULL) {
455 lck_mtx_unlock(ctl_mtx);
456 return ENOENT;
457 }
458
459 if (((kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
460 (so->so_type != SOCK_STREAM)) ||
461 (!(kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
462 (so->so_type != SOCK_DGRAM))) {
463 lck_mtx_unlock(ctl_mtx);
464 return EPROTOTYPE;
465 }
466
467 if (kctl->flags & CTL_FLAG_PRIVILEGED) {
468 if (p == 0) {
469 lck_mtx_unlock(ctl_mtx);
470 return EINVAL;
471 }
472 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
473 lck_mtx_unlock(ctl_mtx);
474 return EPERM;
475 }
476 }
477
478 if ((kctl->flags & CTL_FLAG_REG_ID_UNIT) || sa.sc_unit != 0) {
479 if (kcb_find(kctl, sa.sc_unit) != NULL) {
480 lck_mtx_unlock(ctl_mtx);
481 return EBUSY;
482 }
483 } else {
484 /* Find an unused ID, assumes control IDs are in order */
485 u_int32_t unit = 1;
486
487 TAILQ_FOREACH(kcb_next, &kctl->kcb_head, next) {
488 if (kcb_next->sac.sc_unit > unit) {
489 /* Found a gap, lets fill it in */
490 break;
491 }
492 unit = kcb_next->sac.sc_unit + 1;
493 if (unit == ctl_maxunit) {
494 break;
495 }
496 }
497
498 if (unit == ctl_maxunit) {
499 lck_mtx_unlock(ctl_mtx);
500 return EBUSY;
501 }
502
503 sa.sc_unit = unit;
504 }
505
506 bcopy(&sa, &kcb->sac, sizeof(struct sockaddr_ctl));
507 kcb->kctl = kctl;
508 if (kcb_next != NULL) {
509 TAILQ_INSERT_BEFORE(kcb_next, kcb, next);
510 } else {
511 TAILQ_INSERT_TAIL(&kctl->kcb_head, kcb, next);
512 }
513 kctlstat.kcs_pcbcount++;
514 kctlstat.kcs_gencnt++;
515 kctlstat.kcs_connections++;
516 lck_mtx_unlock(ctl_mtx);
517
518 /*
519 * rdar://15526688: Limit the send and receive sizes to sb_max
520 * by using the same scaling as sbreserve()
521 */
522 sbmaxsize = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES);
523
524 if (kctl->sendbufsize > sbmaxsize) {
525 sendbufsize = sbmaxsize;
526 } else {
527 sendbufsize = kctl->sendbufsize;
528 }
529
530 if (kctl->recvbufsize > sbmaxsize) {
531 recvbufsize = sbmaxsize;
532 } else {
533 recvbufsize = kctl->recvbufsize;
534 }
535
536 error = soreserve(so, sendbufsize, recvbufsize);
537 if (error) {
538 if (ctl_debug) {
539 printf("%s - soreserve(%llx, %u, %u) error %d\n",
540 __func__, (uint64_t)VM_KERNEL_ADDRPERM(so),
541 sendbufsize, recvbufsize, error);
542 }
543 goto done;
544 }
545
546 done:
547 if (error) {
548 soisdisconnected(so);
549 #if DEVELOPMENT || DEBUG
550 kcb->status = KCTL_DISCONNECTED;
551 #endif /* DEVELOPMENT || DEBUG */
552 lck_mtx_lock(ctl_mtx);
553 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
554 kcb->kctl = NULL;
555 kcb->sac.sc_unit = 0;
556 kctlstat.kcs_pcbcount--;
557 kctlstat.kcs_gencnt++;
558 kctlstat.kcs_conn_fail++;
559 lck_mtx_unlock(ctl_mtx);
560 }
561 return error;
562 }
563
564 static int
565 ctl_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
566 {
567 int error = 0;
568 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
569
570 if (kcb == NULL) {
571 panic("ctl_bind so_pcb null\n");
572 }
573
574 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
575 ctl_kcb_increment_use_count(kcb, mtx_held);
576
577 error = ctl_setup_kctl(so, nam, p);
578 if (error) {
579 goto out;
580 }
581
582 if (kcb->kctl == NULL) {
583 panic("ctl_bind kctl null\n");
584 }
585
586 if (kcb->kctl->bind == NULL) {
587 error = EINVAL;
588 goto out;
589 }
590
591 socket_unlock(so, 0);
592 error = (*kcb->kctl->bind)(kcb->kctl->kctlref, &kcb->sac, &kcb->userdata);
593 socket_lock(so, 0);
594
595 out:
596 clt_kcb_decrement_use_count(kcb);
597 return error;
598 }
599
600 static int
601 ctl_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
602 {
603 int error = 0;
604 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
605
606 if (kcb == NULL) {
607 panic("ctl_connect so_pcb null\n");
608 }
609
610 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
611 ctl_kcb_increment_use_count(kcb, mtx_held);
612
613 #if DEVELOPMENT || DEBUG
614 if (kcb->status != KCTL_DISCONNECTED && ctl_panic_debug) {
615 panic("kctl already connecting/connected");
616 }
617 kcb->status = KCTL_CONNECTING;
618 #endif /* DEVELOPMENT || DEBUG */
619
620 error = ctl_setup_kctl(so, nam, p);
621 if (error) {
622 goto out;
623 }
624
625 if (kcb->kctl == NULL) {
626 panic("ctl_connect kctl null\n");
627 }
628
629 soisconnecting(so);
630 socket_unlock(so, 0);
631 error = (*kcb->kctl->connect)(kcb->kctl->kctlref, &kcb->sac, &kcb->userdata);
632 socket_lock(so, 0);
633 if (error) {
634 goto end;
635 }
636 soisconnected(so);
637 #if DEVELOPMENT || DEBUG
638 kcb->status = KCTL_CONNECTED;
639 #endif /* DEVELOPMENT || DEBUG */
640
641 end:
642 if (error && kcb->kctl->disconnect) {
643 /*
644 * XXX Make sure we Don't check the return value
645 * of disconnect here.
646 * ipsec/utun_ctl_disconnect will return error when
647 * disconnect gets called after connect failure.
648 * However if we decide to check for disconnect return
649 * value here. Please make sure to revisit
650 * ipsec/utun_ctl_disconnect.
651 */
652 socket_unlock(so, 0);
653 (*kcb->kctl->disconnect)(kcb->kctl->kctlref, kcb->sac.sc_unit, kcb->userdata);
654 socket_lock(so, 0);
655 }
656 if (error) {
657 soisdisconnected(so);
658 #if DEVELOPMENT || DEBUG
659 kcb->status = KCTL_DISCONNECTED;
660 #endif /* DEVELOPMENT || DEBUG */
661 lck_mtx_lock(ctl_mtx);
662 TAILQ_REMOVE(&kcb->kctl->kcb_head, kcb, next);
663 kcb->kctl = NULL;
664 kcb->sac.sc_unit = 0;
665 kctlstat.kcs_pcbcount--;
666 kctlstat.kcs_gencnt++;
667 kctlstat.kcs_conn_fail++;
668 lck_mtx_unlock(ctl_mtx);
669 }
670 out:
671 clt_kcb_decrement_use_count(kcb);
672 return error;
673 }
674
675 static int
676 ctl_disconnect(struct socket *so)
677 {
678 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
679
680 if ((kcb = (struct ctl_cb *)so->so_pcb)) {
681 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
682 ctl_kcb_increment_use_count(kcb, mtx_held);
683 struct kctl *kctl = kcb->kctl;
684
685 if (kctl && kctl->disconnect) {
686 socket_unlock(so, 0);
687 (*kctl->disconnect)(kctl->kctlref, kcb->sac.sc_unit,
688 kcb->userdata);
689 socket_lock(so, 0);
690 }
691
692 soisdisconnected(so);
693 #if DEVELOPMENT || DEBUG
694 kcb->status = KCTL_DISCONNECTED;
695 #endif /* DEVELOPMENT || DEBUG */
696
697 socket_unlock(so, 0);
698 lck_mtx_lock(ctl_mtx);
699 kcb->kctl = 0;
700 kcb->sac.sc_unit = 0;
701 while (kcb->usecount != 0) {
702 msleep(&kcb->usecount, ctl_mtx, 0, "kcb->usecount", 0);
703 }
704 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
705 kctlstat.kcs_pcbcount--;
706 kctlstat.kcs_gencnt++;
707 lck_mtx_unlock(ctl_mtx);
708 socket_lock(so, 0);
709 clt_kcb_decrement_use_count(kcb);
710 }
711 return 0;
712 }
713
714 static int
715 ctl_peeraddr(struct socket *so, struct sockaddr **nam)
716 {
717 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
718 struct kctl *kctl;
719 struct sockaddr_ctl sc;
720
721 if (kcb == NULL) { /* sanity check */
722 return ENOTCONN;
723 }
724
725 if ((kctl = kcb->kctl) == NULL) {
726 return EINVAL;
727 }
728
729 bzero(&sc, sizeof(struct sockaddr_ctl));
730 sc.sc_len = sizeof(struct sockaddr_ctl);
731 sc.sc_family = AF_SYSTEM;
732 sc.ss_sysaddr = AF_SYS_CONTROL;
733 sc.sc_id = kctl->id;
734 sc.sc_unit = kcb->sac.sc_unit;
735
736 *nam = dup_sockaddr((struct sockaddr *)&sc, 1);
737
738 return 0;
739 }
740
741 static void
742 ctl_sbrcv_trim(struct socket *so)
743 {
744 struct sockbuf *sb = &so->so_rcv;
745
746 if (sb->sb_hiwat > sb->sb_idealsize) {
747 u_int32_t diff;
748 int32_t trim;
749
750 /*
751 * The difference between the ideal size and the
752 * current size is the upper bound of the trimage
753 */
754 diff = sb->sb_hiwat - sb->sb_idealsize;
755 /*
756 * We cannot trim below the outstanding data
757 */
758 trim = sb->sb_hiwat - sb->sb_cc;
759
760 trim = imin(trim, (int32_t)diff);
761
762 if (trim > 0) {
763 sbreserve(sb, (sb->sb_hiwat - trim));
764
765 if (ctl_debug) {
766 printf("%s - shrunk to %d\n",
767 __func__, sb->sb_hiwat);
768 }
769 }
770 }
771 }
772
773 static int
774 ctl_usr_rcvd(struct socket *so, int flags)
775 {
776 int error = 0;
777 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
778 struct kctl *kctl;
779
780 if (kcb == NULL) {
781 return ENOTCONN;
782 }
783
784 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
785 ctl_kcb_increment_use_count(kcb, mtx_held);
786
787 if ((kctl = kcb->kctl) == NULL) {
788 error = EINVAL;
789 goto out;
790 }
791
792 if (kctl->rcvd) {
793 socket_unlock(so, 0);
794 (*kctl->rcvd)(kctl->kctlref, kcb->sac.sc_unit, kcb->userdata, flags);
795 socket_lock(so, 0);
796 }
797
798 ctl_sbrcv_trim(so);
799
800 out:
801 clt_kcb_decrement_use_count(kcb);
802 return error;
803 }
804
805 static int
806 ctl_send(struct socket *so, int flags, struct mbuf *m,
807 struct sockaddr *addr, struct mbuf *control,
808 struct proc *p)
809 {
810 #pragma unused(addr, p)
811 int error = 0;
812 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
813 struct kctl *kctl;
814
815 if (control) {
816 m_freem(control);
817 }
818
819 if (kcb == NULL) { /* sanity check */
820 error = ENOTCONN;
821 }
822
823 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
824 ctl_kcb_increment_use_count(kcb, mtx_held);
825
826 if (error == 0 && (kctl = kcb->kctl) == NULL) {
827 error = EINVAL;
828 }
829
830 if (error == 0 && kctl->send) {
831 so_tc_update_stats(m, so, m_get_service_class(m));
832 socket_unlock(so, 0);
833 error = (*kctl->send)(kctl->kctlref, kcb->sac.sc_unit, kcb->userdata,
834 m, flags);
835 socket_lock(so, 0);
836 } else {
837 m_freem(m);
838 if (error == 0) {
839 error = ENOTSUP;
840 }
841 }
842 if (error != 0) {
843 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_fail);
844 }
845 clt_kcb_decrement_use_count(kcb);
846
847 return error;
848 }
849
850 static int
851 ctl_send_list(struct socket *so, int flags, struct mbuf *m,
852 __unused struct sockaddr *addr, struct mbuf *control,
853 __unused struct proc *p)
854 {
855 int error = 0;
856 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
857 struct kctl *kctl;
858
859 if (control) {
860 m_freem_list(control);
861 }
862
863 if (kcb == NULL) { /* sanity check */
864 error = ENOTCONN;
865 }
866
867 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
868 ctl_kcb_increment_use_count(kcb, mtx_held);
869
870 if (error == 0 && (kctl = kcb->kctl) == NULL) {
871 error = EINVAL;
872 }
873
874 if (error == 0 && kctl->send_list) {
875 struct mbuf *nxt;
876
877 for (nxt = m; nxt != NULL; nxt = nxt->m_nextpkt) {
878 so_tc_update_stats(nxt, so, m_get_service_class(nxt));
879 }
880
881 socket_unlock(so, 0);
882 error = (*kctl->send_list)(kctl->kctlref, kcb->sac.sc_unit,
883 kcb->userdata, m, flags);
884 socket_lock(so, 0);
885 } else if (error == 0 && kctl->send) {
886 while (m != NULL && error == 0) {
887 struct mbuf *nextpkt = m->m_nextpkt;
888
889 m->m_nextpkt = NULL;
890 so_tc_update_stats(m, so, m_get_service_class(m));
891 socket_unlock(so, 0);
892 error = (*kctl->send)(kctl->kctlref, kcb->sac.sc_unit,
893 kcb->userdata, m, flags);
894 socket_lock(so, 0);
895 m = nextpkt;
896 }
897 if (m != NULL) {
898 m_freem_list(m);
899 }
900 } else {
901 m_freem_list(m);
902 if (error == 0) {
903 error = ENOTSUP;
904 }
905 }
906 if (error != 0) {
907 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_list_fail);
908 }
909 clt_kcb_decrement_use_count(kcb);
910
911 return error;
912 }
913
914 static errno_t
915 ctl_rcvbspace(struct socket *so, u_int32_t datasize,
916 u_int32_t kctlflags, u_int32_t flags)
917 {
918 struct sockbuf *sb = &so->so_rcv;
919 u_int32_t space = sbspace(sb);
920 errno_t error;
921
922 if ((kctlflags & CTL_FLAG_REG_CRIT) == 0) {
923 if ((u_int32_t) space >= datasize) {
924 error = 0;
925 } else {
926 error = ENOBUFS;
927 }
928 } else if ((flags & CTL_DATA_CRIT) == 0) {
929 /*
930 * Reserve 25% for critical messages
931 */
932 if (space < (sb->sb_hiwat >> 2) ||
933 space < datasize) {
934 error = ENOBUFS;
935 } else {
936 error = 0;
937 }
938 } else {
939 u_int32_t autorcvbuf_max;
940
941 /*
942 * Allow overcommit of 25%
943 */
944 autorcvbuf_max = min(sb->sb_idealsize + (sb->sb_idealsize >> 2),
945 ctl_autorcvbuf_max);
946
947 if ((u_int32_t) space >= datasize) {
948 error = 0;
949 } else if (tcp_cansbgrow(sb) &&
950 sb->sb_hiwat < autorcvbuf_max) {
951 /*
952 * Grow with a little bit of leeway
953 */
954 u_int32_t grow = datasize - space + MSIZE;
955
956 if (sbreserve(sb,
957 min((sb->sb_hiwat + grow), autorcvbuf_max)) == 1) {
958 if (sb->sb_hiwat > ctl_autorcvbuf_high) {
959 ctl_autorcvbuf_high = sb->sb_hiwat;
960 }
961
962 /*
963 * A final check
964 */
965 if ((u_int32_t) sbspace(sb) >= datasize) {
966 error = 0;
967 } else {
968 error = ENOBUFS;
969 }
970
971 if (ctl_debug) {
972 printf("%s - grown to %d error %d\n",
973 __func__, sb->sb_hiwat, error);
974 }
975 } else {
976 error = ENOBUFS;
977 }
978 } else {
979 error = ENOBUFS;
980 }
981 }
982 return error;
983 }
984
985 errno_t
986 ctl_enqueuembuf(kern_ctl_ref kctlref, u_int32_t unit, struct mbuf *m,
987 u_int32_t flags)
988 {
989 struct socket *so;
990 errno_t error = 0;
991 int len = m->m_pkthdr.len;
992 u_int32_t kctlflags;
993
994 so = kcb_find_socket(kctlref, unit, &kctlflags);
995 if (so == NULL) {
996 return EINVAL;
997 }
998
999 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
1000 error = ENOBUFS;
1001 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1002 goto bye;
1003 }
1004 if ((flags & CTL_DATA_EOR)) {
1005 m->m_flags |= M_EOR;
1006 }
1007
1008 so_recv_data_stat(so, m, 0);
1009 if (sbappend(&so->so_rcv, m) != 0) {
1010 if ((flags & CTL_DATA_NOWAKEUP) == 0) {
1011 sorwakeup(so);
1012 }
1013 } else {
1014 error = ENOBUFS;
1015 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1016 }
1017 bye:
1018 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
1019 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1020 __func__, error, len,
1021 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1022 }
1023
1024 socket_unlock(so, 1);
1025 if (error != 0) {
1026 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
1027 }
1028
1029 return error;
1030 }
1031
1032 /*
1033 * Compute space occupied by mbuf like sbappendrecord
1034 */
1035 static int
1036 m_space(struct mbuf *m)
1037 {
1038 int space = 0;
1039 struct mbuf *nxt;
1040
1041 for (nxt = m; nxt != NULL; nxt = nxt->m_next) {
1042 space += nxt->m_len;
1043 }
1044
1045 return space;
1046 }
1047
1048 errno_t
1049 ctl_enqueuembuf_list(void *kctlref, u_int32_t unit, struct mbuf *m_list,
1050 u_int32_t flags, struct mbuf **m_remain)
1051 {
1052 struct socket *so = NULL;
1053 errno_t error = 0;
1054 struct mbuf *m, *nextpkt;
1055 int needwakeup = 0;
1056 int len = 0;
1057 u_int32_t kctlflags;
1058
1059 /*
1060 * Need to point the beginning of the list in case of early exit
1061 */
1062 m = m_list;
1063
1064 /*
1065 * kcb_find_socket takes the socket lock with a reference
1066 */
1067 so = kcb_find_socket(kctlref, unit, &kctlflags);
1068 if (so == NULL) {
1069 error = EINVAL;
1070 goto done;
1071 }
1072
1073 if (kctlflags & CTL_FLAG_REG_SOCK_STREAM) {
1074 error = EOPNOTSUPP;
1075 goto done;
1076 }
1077 if (flags & CTL_DATA_EOR) {
1078 error = EINVAL;
1079 goto done;
1080 }
1081
1082 for (m = m_list; m != NULL; m = nextpkt) {
1083 nextpkt = m->m_nextpkt;
1084
1085 if (m->m_pkthdr.len == 0 && ctl_debug) {
1086 printf("%s: %llx m_pkthdr.len is 0",
1087 __func__, (uint64_t)VM_KERNEL_ADDRPERM(m));
1088 }
1089
1090 /*
1091 * The mbuf is either appended or freed by sbappendrecord()
1092 * so it's not reliable from a data standpoint
1093 */
1094 len = m_space(m);
1095 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
1096 error = ENOBUFS;
1097 OSIncrementAtomic64(
1098 (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1099 break;
1100 } else {
1101 /*
1102 * Unlink from the list, m is on its own
1103 */
1104 m->m_nextpkt = NULL;
1105 so_recv_data_stat(so, m, 0);
1106 if (sbappendrecord(&so->so_rcv, m) != 0) {
1107 needwakeup = 1;
1108 } else {
1109 /*
1110 * We free or return the remaining
1111 * mbufs in the list
1112 */
1113 m = nextpkt;
1114 error = ENOBUFS;
1115 OSIncrementAtomic64(
1116 (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1117 break;
1118 }
1119 }
1120 }
1121 if (needwakeup && (flags & CTL_DATA_NOWAKEUP) == 0) {
1122 sorwakeup(so);
1123 }
1124
1125 done:
1126 if (so != NULL) {
1127 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
1128 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1129 __func__, error, len,
1130 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1131 }
1132
1133 socket_unlock(so, 1);
1134 }
1135 if (m_remain) {
1136 *m_remain = m;
1137
1138 if (m != NULL && socket_debug && so != NULL &&
1139 (so->so_options & SO_DEBUG)) {
1140 struct mbuf *n;
1141
1142 printf("%s m_list %llx\n", __func__,
1143 (uint64_t) VM_KERNEL_ADDRPERM(m_list));
1144 for (n = m; n != NULL; n = n->m_nextpkt) {
1145 printf(" remain %llx m_next %llx\n",
1146 (uint64_t) VM_KERNEL_ADDRPERM(n),
1147 (uint64_t) VM_KERNEL_ADDRPERM(n->m_next));
1148 }
1149 }
1150 } else {
1151 if (m != NULL) {
1152 m_freem_list(m);
1153 }
1154 }
1155 if (error != 0) {
1156 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
1157 }
1158 return error;
1159 }
1160
1161 errno_t
1162 ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len,
1163 u_int32_t flags)
1164 {
1165 struct socket *so;
1166 struct mbuf *m;
1167 errno_t error = 0;
1168 unsigned int num_needed;
1169 struct mbuf *n;
1170 size_t curlen = 0;
1171 u_int32_t kctlflags;
1172
1173 so = kcb_find_socket(kctlref, unit, &kctlflags);
1174 if (so == NULL) {
1175 return EINVAL;
1176 }
1177
1178 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
1179 error = ENOBUFS;
1180 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1181 goto bye;
1182 }
1183
1184 num_needed = 1;
1185 m = m_allocpacket_internal(&num_needed, len, NULL, M_NOWAIT, 1, 0);
1186 if (m == NULL) {
1187 kctlstat.kcs_enqdata_mb_alloc_fail++;
1188 if (ctl_debug) {
1189 printf("%s: m_allocpacket_internal(%lu) failed\n",
1190 __func__, len);
1191 }
1192 error = ENOMEM;
1193 goto bye;
1194 }
1195
1196 for (n = m; n != NULL; n = n->m_next) {
1197 size_t mlen = mbuf_maxlen(n);
1198
1199 if (mlen + curlen > len) {
1200 mlen = len - curlen;
1201 }
1202 n->m_len = mlen;
1203 bcopy((char *)data + curlen, n->m_data, mlen);
1204 curlen += mlen;
1205 }
1206 mbuf_pkthdr_setlen(m, curlen);
1207
1208 if ((flags & CTL_DATA_EOR)) {
1209 m->m_flags |= M_EOR;
1210 }
1211 so_recv_data_stat(so, m, 0);
1212 if (sbappend(&so->so_rcv, m) != 0) {
1213 if ((flags & CTL_DATA_NOWAKEUP) == 0) {
1214 sorwakeup(so);
1215 }
1216 } else {
1217 kctlstat.kcs_enqdata_sbappend_fail++;
1218 error = ENOBUFS;
1219 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1220 }
1221
1222 bye:
1223 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
1224 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1225 __func__, error, (int)len,
1226 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1227 }
1228
1229 socket_unlock(so, 1);
1230 if (error != 0) {
1231 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
1232 }
1233 return error;
1234 }
1235
1236 errno_t
1237 ctl_getenqueuepacketcount(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *pcnt)
1238 {
1239 struct socket *so;
1240 u_int32_t cnt;
1241 struct mbuf *m1;
1242
1243 if (pcnt == NULL) {
1244 return EINVAL;
1245 }
1246
1247 so = kcb_find_socket(kctlref, unit, NULL);
1248 if (so == NULL) {
1249 return EINVAL;
1250 }
1251
1252 cnt = 0;
1253 m1 = so->so_rcv.sb_mb;
1254 while (m1 != NULL) {
1255 if (m1->m_type == MT_DATA ||
1256 m1->m_type == MT_HEADER ||
1257 m1->m_type == MT_OOBDATA) {
1258 cnt += 1;
1259 }
1260 m1 = m1->m_nextpkt;
1261 }
1262 *pcnt = cnt;
1263
1264 socket_unlock(so, 1);
1265
1266 return 0;
1267 }
1268
1269 errno_t
1270 ctl_getenqueuespace(kern_ctl_ref kctlref, u_int32_t unit, size_t *space)
1271 {
1272 struct socket *so;
1273 long avail;
1274
1275 if (space == NULL) {
1276 return EINVAL;
1277 }
1278
1279 so = kcb_find_socket(kctlref, unit, NULL);
1280 if (so == NULL) {
1281 return EINVAL;
1282 }
1283
1284 avail = sbspace(&so->so_rcv);
1285 *space = (avail < 0) ? 0 : avail;
1286 socket_unlock(so, 1);
1287
1288 return 0;
1289 }
1290
1291 errno_t
1292 ctl_getenqueuereadable(kern_ctl_ref kctlref, u_int32_t unit,
1293 u_int32_t *difference)
1294 {
1295 struct socket *so;
1296
1297 if (difference == NULL) {
1298 return EINVAL;
1299 }
1300
1301 so = kcb_find_socket(kctlref, unit, NULL);
1302 if (so == NULL) {
1303 return EINVAL;
1304 }
1305
1306 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat) {
1307 *difference = 0;
1308 } else {
1309 *difference = (so->so_rcv.sb_lowat - so->so_rcv.sb_cc);
1310 }
1311 socket_unlock(so, 1);
1312
1313 return 0;
1314 }
1315
1316 static int
1317 ctl_ctloutput(struct socket *so, struct sockopt *sopt)
1318 {
1319 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
1320 struct kctl *kctl;
1321 int error = 0;
1322 void *data = NULL;
1323 size_t len;
1324
1325 if (sopt->sopt_level != SYSPROTO_CONTROL) {
1326 return EINVAL;
1327 }
1328
1329 if (kcb == NULL) { /* sanity check */
1330 return ENOTCONN;
1331 }
1332
1333 if ((kctl = kcb->kctl) == NULL) {
1334 return EINVAL;
1335 }
1336
1337 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
1338 ctl_kcb_increment_use_count(kcb, mtx_held);
1339
1340 switch (sopt->sopt_dir) {
1341 case SOPT_SET:
1342 if (kctl->setopt == NULL) {
1343 error = ENOTSUP;
1344 goto out;
1345 }
1346 if (sopt->sopt_valsize != 0) {
1347 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP,
1348 M_WAITOK | M_ZERO);
1349 if (data == NULL) {
1350 error = ENOMEM;
1351 goto out;
1352 }
1353 error = sooptcopyin(sopt, data,
1354 sopt->sopt_valsize, sopt->sopt_valsize);
1355 }
1356 if (error == 0) {
1357 socket_unlock(so, 0);
1358 error = (*kctl->setopt)(kctl->kctlref,
1359 kcb->sac.sc_unit, kcb->userdata, sopt->sopt_name,
1360 data, sopt->sopt_valsize);
1361 socket_lock(so, 0);
1362 }
1363
1364 if (data != NULL) {
1365 FREE(data, M_TEMP);
1366 }
1367 break;
1368
1369 case SOPT_GET:
1370 if (kctl->getopt == NULL) {
1371 error = ENOTSUP;
1372 goto out;
1373 }
1374
1375 if (sopt->sopt_valsize && sopt->sopt_val) {
1376 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP,
1377 M_WAITOK | M_ZERO);
1378 if (data == NULL) {
1379 error = ENOMEM;
1380 goto out;
1381 }
1382 /*
1383 * 4108337 - copy user data in case the
1384 * kernel control needs it
1385 */
1386 error = sooptcopyin(sopt, data,
1387 sopt->sopt_valsize, sopt->sopt_valsize);
1388 }
1389
1390 if (error == 0) {
1391 len = sopt->sopt_valsize;
1392 socket_unlock(so, 0);
1393 error = (*kctl->getopt)(kctl->kctlref, kcb->sac.sc_unit,
1394 kcb->userdata, sopt->sopt_name,
1395 data, &len);
1396 if (data != NULL && len > sopt->sopt_valsize) {
1397 panic_plain("ctl_ctloutput: ctl %s returned "
1398 "len (%lu) > sopt_valsize (%lu)\n",
1399 kcb->kctl->name, len,
1400 sopt->sopt_valsize);
1401 }
1402 socket_lock(so, 0);
1403 if (error == 0) {
1404 if (data != NULL) {
1405 error = sooptcopyout(sopt, data, len);
1406 } else {
1407 sopt->sopt_valsize = len;
1408 }
1409 }
1410 }
1411 if (data != NULL) {
1412 FREE(data, M_TEMP);
1413 }
1414 break;
1415 }
1416
1417 out:
1418 clt_kcb_decrement_use_count(kcb);
1419 return error;
1420 }
1421
1422 static int
1423 ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
1424 struct ifnet *ifp, struct proc *p)
1425 {
1426 #pragma unused(so, ifp, p)
1427 int error = ENOTSUP;
1428
1429 switch (cmd) {
1430 /* get the number of controllers */
1431 case CTLIOCGCOUNT: {
1432 struct kctl *kctl;
1433 u_int32_t n = 0;
1434
1435 lck_mtx_lock(ctl_mtx);
1436 TAILQ_FOREACH(kctl, &ctl_head, next)
1437 n++;
1438 lck_mtx_unlock(ctl_mtx);
1439
1440 bcopy(&n, data, sizeof(n));
1441 error = 0;
1442 break;
1443 }
1444 case CTLIOCGINFO: {
1445 struct ctl_info ctl_info;
1446 struct kctl *kctl = 0;
1447 size_t name_len;
1448
1449 bcopy(data, &ctl_info, sizeof(ctl_info));
1450 name_len = strnlen(ctl_info.ctl_name, MAX_KCTL_NAME);
1451
1452 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
1453 error = EINVAL;
1454 break;
1455 }
1456 lck_mtx_lock(ctl_mtx);
1457 kctl = ctl_find_by_name(ctl_info.ctl_name);
1458 lck_mtx_unlock(ctl_mtx);
1459 if (kctl == 0) {
1460 error = ENOENT;
1461 break;
1462 }
1463 ctl_info.ctl_id = kctl->id;
1464 bcopy(&ctl_info, data, sizeof(ctl_info));
1465 error = 0;
1466 break;
1467 }
1468
1469 /* add controls to get list of NKEs */
1470 }
1471
1472 return error;
1473 }
1474
1475 static void
1476 kctl_tbl_grow()
1477 {
1478 struct kctl **new_table;
1479 uintptr_t new_size;
1480
1481 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1482
1483 if (kctl_tbl_growing) {
1484 /* Another thread is allocating */
1485 kctl_tbl_growing_waiting++;
1486
1487 do {
1488 (void) msleep((caddr_t) &kctl_tbl_growing, ctl_mtx,
1489 PSOCK | PCATCH, "kctl_tbl_growing", 0);
1490 } while (kctl_tbl_growing);
1491 kctl_tbl_growing_waiting--;
1492 }
1493 /* Another thread grew the table */
1494 if (kctl_table != NULL && kctl_tbl_count < kctl_tbl_size) {
1495 return;
1496 }
1497
1498 /* Verify we have a sane size */
1499 if (kctl_tbl_size + KCTL_TBL_INC >= UINT16_MAX) {
1500 kctlstat.kcs_tbl_size_too_big++;
1501 if (ctl_debug) {
1502 printf("%s kctl_tbl_size %lu too big\n",
1503 __func__, kctl_tbl_size);
1504 }
1505 return;
1506 }
1507 kctl_tbl_growing = 1;
1508
1509 new_size = kctl_tbl_size + KCTL_TBL_INC;
1510
1511 lck_mtx_unlock(ctl_mtx);
1512 new_table = _MALLOC(sizeof(struct kctl *) * new_size,
1513 M_TEMP, M_WAIT | M_ZERO);
1514 lck_mtx_lock(ctl_mtx);
1515
1516 if (new_table != NULL) {
1517 if (kctl_table != NULL) {
1518 bcopy(kctl_table, new_table,
1519 kctl_tbl_size * sizeof(struct kctl *));
1520
1521 _FREE(kctl_table, M_TEMP);
1522 }
1523 kctl_table = new_table;
1524 kctl_tbl_size = new_size;
1525 }
1526
1527 kctl_tbl_growing = 0;
1528
1529 if (kctl_tbl_growing_waiting) {
1530 wakeup(&kctl_tbl_growing);
1531 }
1532 }
1533
1534 #define KCTLREF_INDEX_MASK 0x0000FFFF
1535 #define KCTLREF_GENCNT_MASK 0xFFFF0000
1536 #define KCTLREF_GENCNT_SHIFT 16
1537
1538 static kern_ctl_ref
1539 kctl_make_ref(struct kctl *kctl)
1540 {
1541 uintptr_t i;
1542
1543 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1544
1545 if (kctl_tbl_count >= kctl_tbl_size) {
1546 kctl_tbl_grow();
1547 }
1548
1549 kctl->kctlref = NULL;
1550 for (i = 0; i < kctl_tbl_size; i++) {
1551 if (kctl_table[i] == NULL) {
1552 uintptr_t ref;
1553
1554 /*
1555 * Reference is index plus one
1556 */
1557 kctl_ref_gencnt += 1;
1558
1559 /*
1560 * Add generation count as salt to reference to prevent
1561 * use after deregister
1562 */
1563 ref = ((kctl_ref_gencnt << KCTLREF_GENCNT_SHIFT) &
1564 KCTLREF_GENCNT_MASK) +
1565 ((i + 1) & KCTLREF_INDEX_MASK);
1566
1567 kctl->kctlref = (void *)(ref);
1568 kctl_table[i] = kctl;
1569 kctl_tbl_count++;
1570 break;
1571 }
1572 }
1573
1574 if (kctl->kctlref == NULL) {
1575 panic("%s no space in table", __func__);
1576 }
1577
1578 if (ctl_debug > 0) {
1579 printf("%s %p for %p\n",
1580 __func__, kctl->kctlref, kctl);
1581 }
1582
1583 return kctl->kctlref;
1584 }
1585
1586 static void
1587 kctl_delete_ref(kern_ctl_ref kctlref)
1588 {
1589 /*
1590 * Reference is index plus one
1591 */
1592 uintptr_t i = (((uintptr_t)kctlref) & KCTLREF_INDEX_MASK) - 1;
1593
1594 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1595
1596 if (i < kctl_tbl_size) {
1597 struct kctl *kctl = kctl_table[i];
1598
1599 if (kctl->kctlref == kctlref) {
1600 kctl_table[i] = NULL;
1601 kctl_tbl_count--;
1602 } else {
1603 kctlstat.kcs_bad_kctlref++;
1604 }
1605 } else {
1606 kctlstat.kcs_bad_kctlref++;
1607 }
1608 }
1609
1610 static struct kctl *
1611 kctl_from_ref(kern_ctl_ref kctlref)
1612 {
1613 /*
1614 * Reference is index plus one
1615 */
1616 uintptr_t i = (((uintptr_t)kctlref) & KCTLREF_INDEX_MASK) - 1;
1617 struct kctl *kctl = NULL;
1618
1619 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1620
1621 if (i >= kctl_tbl_size) {
1622 kctlstat.kcs_bad_kctlref++;
1623 return NULL;
1624 }
1625 kctl = kctl_table[i];
1626 if (kctl->kctlref != kctlref) {
1627 kctlstat.kcs_bad_kctlref++;
1628 return NULL;
1629 }
1630 return kctl;
1631 }
1632
1633 /*
1634 * Register/unregister a NKE
1635 */
1636 errno_t
1637 ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref)
1638 {
1639 struct kctl *kctl = NULL;
1640 struct kctl *kctl_next = NULL;
1641 u_int32_t id = 1;
1642 size_t name_len;
1643 int is_extended = 0;
1644
1645 if (userkctl == NULL) { /* sanity check */
1646 return EINVAL;
1647 }
1648 if (userkctl->ctl_connect == NULL) {
1649 return EINVAL;
1650 }
1651 name_len = strlen(userkctl->ctl_name);
1652 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
1653 return EINVAL;
1654 }
1655
1656 MALLOC(kctl, struct kctl *, sizeof(*kctl), M_TEMP, M_WAITOK);
1657 if (kctl == NULL) {
1658 return ENOMEM;
1659 }
1660 bzero((char *)kctl, sizeof(*kctl));
1661
1662 lck_mtx_lock(ctl_mtx);
1663
1664 if (kctl_make_ref(kctl) == NULL) {
1665 lck_mtx_unlock(ctl_mtx);
1666 FREE(kctl, M_TEMP);
1667 return ENOMEM;
1668 }
1669
1670 /*
1671 * Kernel Control IDs
1672 *
1673 * CTL_FLAG_REG_ID_UNIT indicates the control ID and unit number are
1674 * static. If they do not exist, add them to the list in order. If the
1675 * flag is not set, we must find a new unique value. We assume the
1676 * list is in order. We find the last item in the list and add one. If
1677 * this leads to wrapping the id around, we start at the front of the
1678 * list and look for a gap.
1679 */
1680
1681 if ((userkctl->ctl_flags & CTL_FLAG_REG_ID_UNIT) == 0) {
1682 /* Must dynamically assign an unused ID */
1683
1684 /* Verify the same name isn't already registered */
1685 if (ctl_find_by_name(userkctl->ctl_name) != NULL) {
1686 kctl_delete_ref(kctl->kctlref);
1687 lck_mtx_unlock(ctl_mtx);
1688 FREE(kctl, M_TEMP);
1689 return EEXIST;
1690 }
1691
1692 /* Start with 1 in case the list is empty */
1693 id = 1;
1694 kctl_next = TAILQ_LAST(&ctl_head, kctl_list);
1695
1696 if (kctl_next != NULL) {
1697 /* List was not empty, add one to the last item */
1698 id = kctl_next->id + 1;
1699 kctl_next = NULL;
1700
1701 /*
1702 * If this wrapped the id number, start looking at
1703 * the front of the list for an unused id.
1704 */
1705 if (id == 0) {
1706 /* Find the next unused ID */
1707 id = 1;
1708
1709 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
1710 if (kctl_next->id > id) {
1711 /* We found a gap */
1712 break;
1713 }
1714
1715 id = kctl_next->id + 1;
1716 }
1717 }
1718 }
1719
1720 userkctl->ctl_id = id;
1721 kctl->id = id;
1722 kctl->reg_unit = -1;
1723 } else {
1724 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
1725 if (kctl_next->id > userkctl->ctl_id) {
1726 break;
1727 }
1728 }
1729
1730 if (ctl_find_by_id_unit(userkctl->ctl_id, userkctl->ctl_unit)) {
1731 kctl_delete_ref(kctl->kctlref);
1732 lck_mtx_unlock(ctl_mtx);
1733 FREE(kctl, M_TEMP);
1734 return EEXIST;
1735 }
1736 kctl->id = userkctl->ctl_id;
1737 kctl->reg_unit = userkctl->ctl_unit;
1738 }
1739
1740 is_extended = (userkctl->ctl_flags & CTL_FLAG_REG_EXTENDED);
1741
1742 strlcpy(kctl->name, userkctl->ctl_name, MAX_KCTL_NAME);
1743 kctl->flags = userkctl->ctl_flags;
1744
1745 /*
1746 * Let the caller know the default send and receive sizes
1747 */
1748 if (userkctl->ctl_sendsize == 0) {
1749 kctl->sendbufsize = CTL_SENDSIZE;
1750 userkctl->ctl_sendsize = kctl->sendbufsize;
1751 } else {
1752 kctl->sendbufsize = userkctl->ctl_sendsize;
1753 }
1754 if (userkctl->ctl_recvsize == 0) {
1755 kctl->recvbufsize = CTL_RECVSIZE;
1756 userkctl->ctl_recvsize = kctl->recvbufsize;
1757 } else {
1758 kctl->recvbufsize = userkctl->ctl_recvsize;
1759 }
1760
1761 kctl->bind = userkctl->ctl_bind;
1762 kctl->connect = userkctl->ctl_connect;
1763 kctl->disconnect = userkctl->ctl_disconnect;
1764 kctl->send = userkctl->ctl_send;
1765 kctl->setopt = userkctl->ctl_setopt;
1766 kctl->getopt = userkctl->ctl_getopt;
1767 if (is_extended) {
1768 kctl->rcvd = userkctl->ctl_rcvd;
1769 kctl->send_list = userkctl->ctl_send_list;
1770 }
1771
1772 TAILQ_INIT(&kctl->kcb_head);
1773
1774 if (kctl_next) {
1775 TAILQ_INSERT_BEFORE(kctl_next, kctl, next);
1776 } else {
1777 TAILQ_INSERT_TAIL(&ctl_head, kctl, next);
1778 }
1779
1780 kctlstat.kcs_reg_count++;
1781 kctlstat.kcs_gencnt++;
1782
1783 lck_mtx_unlock(ctl_mtx);
1784
1785 *kctlref = kctl->kctlref;
1786
1787 ctl_post_msg(KEV_CTL_REGISTERED, kctl->id);
1788 return 0;
1789 }
1790
1791 errno_t
1792 ctl_deregister(void *kctlref)
1793 {
1794 struct kctl *kctl;
1795
1796 lck_mtx_lock(ctl_mtx);
1797 if ((kctl = kctl_from_ref(kctlref)) == NULL) {
1798 kctlstat.kcs_bad_kctlref++;
1799 lck_mtx_unlock(ctl_mtx);
1800 if (ctl_debug != 0) {
1801 printf("%s invalid kctlref %p\n",
1802 __func__, kctlref);
1803 }
1804 return EINVAL;
1805 }
1806
1807 if (!TAILQ_EMPTY(&kctl->kcb_head)) {
1808 lck_mtx_unlock(ctl_mtx);
1809 return EBUSY;
1810 }
1811
1812 TAILQ_REMOVE(&ctl_head, kctl, next);
1813
1814 kctlstat.kcs_reg_count--;
1815 kctlstat.kcs_gencnt++;
1816
1817 kctl_delete_ref(kctl->kctlref);
1818 lck_mtx_unlock(ctl_mtx);
1819
1820 ctl_post_msg(KEV_CTL_DEREGISTERED, kctl->id);
1821 FREE(kctl, M_TEMP);
1822 return 0;
1823 }
1824
1825 /*
1826 * Must be called with global ctl_mtx lock taked
1827 */
1828 static struct kctl *
1829 ctl_find_by_name(const char *name)
1830 {
1831 struct kctl *kctl;
1832
1833 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1834
1835 TAILQ_FOREACH(kctl, &ctl_head, next)
1836 if (strncmp(kctl->name, name, sizeof(kctl->name)) == 0) {
1837 return kctl;
1838 }
1839
1840 return NULL;
1841 }
1842
1843 u_int32_t
1844 ctl_id_by_name(const char *name)
1845 {
1846 u_int32_t ctl_id = 0;
1847 struct kctl *kctl;
1848
1849 lck_mtx_lock(ctl_mtx);
1850 kctl = ctl_find_by_name(name);
1851 if (kctl) {
1852 ctl_id = kctl->id;
1853 }
1854 lck_mtx_unlock(ctl_mtx);
1855
1856 return ctl_id;
1857 }
1858
1859 errno_t
1860 ctl_name_by_id(u_int32_t id, char *out_name, size_t maxsize)
1861 {
1862 int found = 0;
1863 struct kctl *kctl;
1864
1865 lck_mtx_lock(ctl_mtx);
1866 TAILQ_FOREACH(kctl, &ctl_head, next) {
1867 if (kctl->id == id) {
1868 break;
1869 }
1870 }
1871
1872 if (kctl) {
1873 if (maxsize > MAX_KCTL_NAME) {
1874 maxsize = MAX_KCTL_NAME;
1875 }
1876 strlcpy(out_name, kctl->name, maxsize);
1877 found = 1;
1878 }
1879 lck_mtx_unlock(ctl_mtx);
1880
1881 return found ? 0 : ENOENT;
1882 }
1883
1884 /*
1885 * Must be called with global ctl_mtx lock taked
1886 *
1887 */
1888 static struct kctl *
1889 ctl_find_by_id_unit(u_int32_t id, u_int32_t unit)
1890 {
1891 struct kctl *kctl;
1892
1893 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1894
1895 TAILQ_FOREACH(kctl, &ctl_head, next) {
1896 if (kctl->id == id && (kctl->flags & CTL_FLAG_REG_ID_UNIT) == 0) {
1897 return kctl;
1898 } else if (kctl->id == id && kctl->reg_unit == unit) {
1899 return kctl;
1900 }
1901 }
1902 return NULL;
1903 }
1904
1905 /*
1906 * Must be called with kernel controller lock taken
1907 */
1908 static struct ctl_cb *
1909 kcb_find(struct kctl *kctl, u_int32_t unit)
1910 {
1911 struct ctl_cb *kcb;
1912
1913 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1914
1915 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
1916 if (kcb->sac.sc_unit == unit) {
1917 return kcb;
1918 }
1919
1920 return NULL;
1921 }
1922
1923 static struct socket *
1924 kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *kctlflags)
1925 {
1926 struct socket *so = NULL;
1927 struct ctl_cb *kcb;
1928 void *lr_saved;
1929 struct kctl *kctl;
1930 int i;
1931
1932 lr_saved = __builtin_return_address(0);
1933
1934 lck_mtx_lock(ctl_mtx);
1935 /*
1936 * First validate the kctlref
1937 */
1938 if ((kctl = kctl_from_ref(kctlref)) == NULL) {
1939 kctlstat.kcs_bad_kctlref++;
1940 lck_mtx_unlock(ctl_mtx);
1941 if (ctl_debug != 0) {
1942 printf("%s invalid kctlref %p\n",
1943 __func__, kctlref);
1944 }
1945 return NULL;
1946 }
1947
1948 kcb = kcb_find(kctl, unit);
1949 if (kcb == NULL || kcb->kctl != kctl || (so = kcb->so) == NULL) {
1950 lck_mtx_unlock(ctl_mtx);
1951 return NULL;
1952 }
1953 /*
1954 * This prevents the socket from being closed
1955 */
1956 kcb->usecount++;
1957 /*
1958 * Respect lock ordering: socket before ctl_mtx
1959 */
1960 lck_mtx_unlock(ctl_mtx);
1961
1962 socket_lock(so, 1);
1963 /*
1964 * The socket lock history is more useful if we store
1965 * the address of the caller.
1966 */
1967 i = (so->next_lock_lr + SO_LCKDBG_MAX - 1) % SO_LCKDBG_MAX;
1968 so->lock_lr[i] = lr_saved;
1969
1970 lck_mtx_lock(ctl_mtx);
1971
1972 if ((kctl = kctl_from_ref(kctlref)) == NULL || kcb->kctl == NULL) {
1973 lck_mtx_unlock(ctl_mtx);
1974 socket_unlock(so, 1);
1975 so = NULL;
1976 lck_mtx_lock(ctl_mtx);
1977 } else if (kctlflags != NULL) {
1978 *kctlflags = kctl->flags;
1979 }
1980
1981 kcb->usecount--;
1982 if (kcb->usecount == 0) {
1983 wakeup((event_t)&kcb->usecount);
1984 }
1985
1986 lck_mtx_unlock(ctl_mtx);
1987
1988 return so;
1989 }
1990
1991 static void
1992 ctl_post_msg(u_int32_t event_code, u_int32_t id)
1993 {
1994 struct ctl_event_data ctl_ev_data;
1995 struct kev_msg ev_msg;
1996
1997 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_NOTOWNED);
1998
1999 bzero(&ev_msg, sizeof(struct kev_msg));
2000 ev_msg.vendor_code = KEV_VENDOR_APPLE;
2001
2002 ev_msg.kev_class = KEV_SYSTEM_CLASS;
2003 ev_msg.kev_subclass = KEV_CTL_SUBCLASS;
2004 ev_msg.event_code = event_code;
2005
2006 /* common nke subclass data */
2007 bzero(&ctl_ev_data, sizeof(ctl_ev_data));
2008 ctl_ev_data.ctl_id = id;
2009 ev_msg.dv[0].data_ptr = &ctl_ev_data;
2010 ev_msg.dv[0].data_length = sizeof(ctl_ev_data);
2011
2012 ev_msg.dv[1].data_length = 0;
2013
2014 kev_post_msg(&ev_msg);
2015 }
2016
2017 static int
2018 ctl_lock(struct socket *so, int refcount, void *lr)
2019 {
2020 void *lr_saved;
2021
2022 if (lr == NULL) {
2023 lr_saved = __builtin_return_address(0);
2024 } else {
2025 lr_saved = lr;
2026 }
2027
2028 if (so->so_pcb != NULL) {
2029 lck_mtx_lock(((struct ctl_cb *)so->so_pcb)->mtx);
2030 } else {
2031 panic("ctl_lock: so=%p NO PCB! lr=%p lrh= %s\n",
2032 so, lr_saved, solockhistory_nr(so));
2033 /* NOTREACHED */
2034 }
2035
2036 if (so->so_usecount < 0) {
2037 panic("ctl_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s\n",
2038 so, so->so_pcb, lr_saved, so->so_usecount,
2039 solockhistory_nr(so));
2040 /* NOTREACHED */
2041 }
2042
2043 if (refcount) {
2044 so->so_usecount++;
2045 }
2046
2047 so->lock_lr[so->next_lock_lr] = lr_saved;
2048 so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX;
2049 return 0;
2050 }
2051
2052 static int
2053 ctl_unlock(struct socket *so, int refcount, void *lr)
2054 {
2055 void *lr_saved;
2056 lck_mtx_t *mutex_held;
2057
2058 if (lr == NULL) {
2059 lr_saved = __builtin_return_address(0);
2060 } else {
2061 lr_saved = lr;
2062 }
2063
2064 #if (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG))
2065 printf("ctl_unlock: so=%llx sopcb=%x lock=%llx ref=%u lr=%llx\n",
2066 (uint64_t)VM_KERNEL_ADDRPERM(so),
2067 (uint64_t)VM_KERNEL_ADDRPERM(so->so_pcb,
2068 (uint64_t)VM_KERNEL_ADDRPERM(((struct ctl_cb *)so->so_pcb)->mtx),
2069 so->so_usecount, (uint64_t)VM_KERNEL_ADDRPERM(lr_saved));
2070 #endif /* (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG)) */
2071 if (refcount) {
2072 so->so_usecount--;
2073 }
2074
2075 if (so->so_usecount < 0) {
2076 panic("ctl_unlock: so=%p usecount=%x lrh= %s\n",
2077 so, so->so_usecount, solockhistory_nr(so));
2078 /* NOTREACHED */
2079 }
2080 if (so->so_pcb == NULL) {
2081 panic("ctl_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s\n",
2082 so, so->so_usecount, (void *)lr_saved,
2083 solockhistory_nr(so));
2084 /* NOTREACHED */
2085 }
2086 mutex_held = ((struct ctl_cb *)so->so_pcb)->mtx;
2087
2088 lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
2089 so->unlock_lr[so->next_unlock_lr] = lr_saved;
2090 so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
2091 lck_mtx_unlock(mutex_held);
2092
2093 if (so->so_usecount == 0) {
2094 ctl_sofreelastref(so);
2095 }
2096
2097 return 0;
2098 }
2099
2100 static lck_mtx_t *
2101 ctl_getlock(struct socket *so, int flags)
2102 {
2103 #pragma unused(flags)
2104 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
2105
2106 if (so->so_pcb) {
2107 if (so->so_usecount < 0) {
2108 panic("ctl_getlock: so=%p usecount=%x lrh= %s\n",
2109 so, so->so_usecount, solockhistory_nr(so));
2110 }
2111 return kcb->mtx;
2112 } else {
2113 panic("ctl_getlock: so=%p NULL NO so_pcb %s\n",
2114 so, solockhistory_nr(so));
2115 return so->so_proto->pr_domain->dom_mtx;
2116 }
2117 }
2118
2119 __private_extern__ int
2120 kctl_reg_list SYSCTL_HANDLER_ARGS
2121 {
2122 #pragma unused(oidp, arg1, arg2)
2123 int error = 0;
2124 int n, i;
2125 struct xsystmgen xsg;
2126 void *buf = NULL;
2127 struct kctl *kctl;
2128 size_t item_size = ROUNDUP64(sizeof(struct xkctl_reg));
2129
2130 buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO);
2131 if (buf == NULL) {
2132 return ENOMEM;
2133 }
2134
2135 lck_mtx_lock(ctl_mtx);
2136
2137 n = kctlstat.kcs_reg_count;
2138
2139 if (req->oldptr == USER_ADDR_NULL) {
2140 req->oldidx = (n + n / 8) * sizeof(struct xkctl_reg);
2141 goto done;
2142 }
2143 if (req->newptr != USER_ADDR_NULL) {
2144 error = EPERM;
2145 goto done;
2146 }
2147 bzero(&xsg, sizeof(xsg));
2148 xsg.xg_len = sizeof(xsg);
2149 xsg.xg_count = n;
2150 xsg.xg_gen = kctlstat.kcs_gencnt;
2151 xsg.xg_sogen = so_gencnt;
2152 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2153 if (error) {
2154 goto done;
2155 }
2156 /*
2157 * We are done if there is no pcb
2158 */
2159 if (n == 0) {
2160 goto done;
2161 }
2162
2163 i = 0;
2164 for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
2165 i < n && kctl != NULL;
2166 i++, kctl = TAILQ_NEXT(kctl, next)) {
2167 struct xkctl_reg *xkr = (struct xkctl_reg *)buf;
2168 struct ctl_cb *kcb;
2169 u_int32_t pcbcount = 0;
2170
2171 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
2172 pcbcount++;
2173
2174 bzero(buf, item_size);
2175
2176 xkr->xkr_len = sizeof(struct xkctl_reg);
2177 xkr->xkr_kind = XSO_KCREG;
2178 xkr->xkr_id = kctl->id;
2179 xkr->xkr_reg_unit = kctl->reg_unit;
2180 xkr->xkr_flags = kctl->flags;
2181 xkr->xkr_kctlref = (uint64_t)(kctl->kctlref);
2182 xkr->xkr_recvbufsize = kctl->recvbufsize;
2183 xkr->xkr_sendbufsize = kctl->sendbufsize;
2184 xkr->xkr_lastunit = kctl->lastunit;
2185 xkr->xkr_pcbcount = pcbcount;
2186 xkr->xkr_connect = (uint64_t)VM_KERNEL_UNSLIDE(kctl->connect);
2187 xkr->xkr_disconnect =
2188 (uint64_t)VM_KERNEL_UNSLIDE(kctl->disconnect);
2189 xkr->xkr_send = (uint64_t)VM_KERNEL_UNSLIDE(kctl->send);
2190 xkr->xkr_send_list =
2191 (uint64_t)VM_KERNEL_UNSLIDE(kctl->send_list);
2192 xkr->xkr_setopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->setopt);
2193 xkr->xkr_getopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->getopt);
2194 xkr->xkr_rcvd = (uint64_t)VM_KERNEL_UNSLIDE(kctl->rcvd);
2195 strlcpy(xkr->xkr_name, kctl->name, sizeof(xkr->xkr_name));
2196
2197 error = SYSCTL_OUT(req, buf, item_size);
2198 }
2199
2200 if (error == 0) {
2201 /*
2202 * Give the user an updated idea of our state.
2203 * If the generation differs from what we told
2204 * her before, she knows that something happened
2205 * while we were processing this request, and it
2206 * might be necessary to retry.
2207 */
2208 bzero(&xsg, sizeof(xsg));
2209 xsg.xg_len = sizeof(xsg);
2210 xsg.xg_count = n;
2211 xsg.xg_gen = kctlstat.kcs_gencnt;
2212 xsg.xg_sogen = so_gencnt;
2213 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2214 if (error) {
2215 goto done;
2216 }
2217 }
2218
2219 done:
2220 lck_mtx_unlock(ctl_mtx);
2221
2222 if (buf != NULL) {
2223 FREE(buf, M_TEMP);
2224 }
2225
2226 return error;
2227 }
2228
2229 __private_extern__ int
2230 kctl_pcblist SYSCTL_HANDLER_ARGS
2231 {
2232 #pragma unused(oidp, arg1, arg2)
2233 int error = 0;
2234 int n, i;
2235 struct xsystmgen xsg;
2236 void *buf = NULL;
2237 struct kctl *kctl;
2238 size_t item_size = ROUNDUP64(sizeof(struct xkctlpcb)) +
2239 ROUNDUP64(sizeof(struct xsocket_n)) +
2240 2 * ROUNDUP64(sizeof(struct xsockbuf_n)) +
2241 ROUNDUP64(sizeof(struct xsockstat_n));
2242
2243 buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO);
2244 if (buf == NULL) {
2245 return ENOMEM;
2246 }
2247
2248 lck_mtx_lock(ctl_mtx);
2249
2250 n = kctlstat.kcs_pcbcount;
2251
2252 if (req->oldptr == USER_ADDR_NULL) {
2253 req->oldidx = (n + n / 8) * item_size;
2254 goto done;
2255 }
2256 if (req->newptr != USER_ADDR_NULL) {
2257 error = EPERM;
2258 goto done;
2259 }
2260 bzero(&xsg, sizeof(xsg));
2261 xsg.xg_len = sizeof(xsg);
2262 xsg.xg_count = n;
2263 xsg.xg_gen = kctlstat.kcs_gencnt;
2264 xsg.xg_sogen = so_gencnt;
2265 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2266 if (error) {
2267 goto done;
2268 }
2269 /*
2270 * We are done if there is no pcb
2271 */
2272 if (n == 0) {
2273 goto done;
2274 }
2275
2276 i = 0;
2277 for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
2278 i < n && kctl != NULL;
2279 kctl = TAILQ_NEXT(kctl, next)) {
2280 struct ctl_cb *kcb;
2281
2282 for (kcb = TAILQ_FIRST(&kctl->kcb_head);
2283 i < n && kcb != NULL;
2284 i++, kcb = TAILQ_NEXT(kcb, next)) {
2285 struct xkctlpcb *xk = (struct xkctlpcb *)buf;
2286 struct xsocket_n *xso = (struct xsocket_n *)
2287 ADVANCE64(xk, sizeof(*xk));
2288 struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *)
2289 ADVANCE64(xso, sizeof(*xso));
2290 struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *)
2291 ADVANCE64(xsbrcv, sizeof(*xsbrcv));
2292 struct xsockstat_n *xsostats = (struct xsockstat_n *)
2293 ADVANCE64(xsbsnd, sizeof(*xsbsnd));
2294
2295 bzero(buf, item_size);
2296
2297 xk->xkp_len = sizeof(struct xkctlpcb);
2298 xk->xkp_kind = XSO_KCB;
2299 xk->xkp_unit = kcb->sac.sc_unit;
2300 xk->xkp_kctpcb = (uint64_t)VM_KERNEL_ADDRPERM(kcb);
2301 xk->xkp_kctlref = (uint64_t)VM_KERNEL_ADDRPERM(kctl);
2302 xk->xkp_kctlid = kctl->id;
2303 strlcpy(xk->xkp_kctlname, kctl->name,
2304 sizeof(xk->xkp_kctlname));
2305
2306 sotoxsocket_n(kcb->so, xso);
2307 sbtoxsockbuf_n(kcb->so ?
2308 &kcb->so->so_rcv : NULL, xsbrcv);
2309 sbtoxsockbuf_n(kcb->so ?
2310 &kcb->so->so_snd : NULL, xsbsnd);
2311 sbtoxsockstat_n(kcb->so, xsostats);
2312
2313 error = SYSCTL_OUT(req, buf, item_size);
2314 }
2315 }
2316
2317 if (error == 0) {
2318 /*
2319 * Give the user an updated idea of our state.
2320 * If the generation differs from what we told
2321 * her before, she knows that something happened
2322 * while we were processing this request, and it
2323 * might be necessary to retry.
2324 */
2325 bzero(&xsg, sizeof(xsg));
2326 xsg.xg_len = sizeof(xsg);
2327 xsg.xg_count = n;
2328 xsg.xg_gen = kctlstat.kcs_gencnt;
2329 xsg.xg_sogen = so_gencnt;
2330 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2331 if (error) {
2332 goto done;
2333 }
2334 }
2335
2336 done:
2337 lck_mtx_unlock(ctl_mtx);
2338
2339 return error;
2340 }
2341
2342 int
2343 kctl_getstat SYSCTL_HANDLER_ARGS
2344 {
2345 #pragma unused(oidp, arg1, arg2)
2346 int error = 0;
2347
2348 lck_mtx_lock(ctl_mtx);
2349
2350 if (req->newptr != USER_ADDR_NULL) {
2351 error = EPERM;
2352 goto done;
2353 }
2354 if (req->oldptr == USER_ADDR_NULL) {
2355 req->oldidx = sizeof(struct kctlstat);
2356 goto done;
2357 }
2358
2359 error = SYSCTL_OUT(req, &kctlstat,
2360 MIN(sizeof(struct kctlstat), req->oldlen));
2361 done:
2362 lck_mtx_unlock(ctl_mtx);
2363 return error;
2364 }
2365
2366 void
2367 kctl_fill_socketinfo(struct socket *so, struct socket_info *si)
2368 {
2369 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
2370 struct kern_ctl_info *kcsi =
2371 &si->soi_proto.pri_kern_ctl;
2372 struct kctl *kctl = kcb->kctl;
2373
2374 si->soi_kind = SOCKINFO_KERN_CTL;
2375
2376 if (kctl == 0) {
2377 return;
2378 }
2379
2380 kcsi->kcsi_id = kctl->id;
2381 kcsi->kcsi_reg_unit = kctl->reg_unit;
2382 kcsi->kcsi_flags = kctl->flags;
2383 kcsi->kcsi_recvbufsize = kctl->recvbufsize;
2384 kcsi->kcsi_sendbufsize = kctl->sendbufsize;
2385 kcsi->kcsi_unit = kcb->sac.sc_unit;
2386 strlcpy(kcsi->kcsi_name, kctl->name, MAX_KCTL_NAME);
2387 }