2 * Copyright (c) 1999-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 * Kernel Control domain - allows control connections to
31 * and to read/write data.
33 * Vincent Lubet, 040506
34 * Christophe Allie, 010928
35 * Justin C. Walker, 990319
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/syslog.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/protosw.h>
45 #include <sys/domain.h>
46 #include <sys/malloc.h>
48 #include <sys/sys_domain.h>
49 #include <sys/kern_event.h>
50 #include <sys/kern_control.h>
51 #include <sys/kauth.h>
52 #include <sys/sysctl.h>
53 #include <sys/proc_info.h>
54 #include <net/if_var.h>
56 #include <mach/vm_types.h>
58 #include <kern/thread.h>
61 TAILQ_ENTRY(kctl
) next
; /* controller chain */
64 /* controller information provided when registering */
65 char name
[MAX_KCTL_NAME
]; /* unique identifier */
69 /* misc communication information */
70 u_int32_t flags
; /* support flags */
71 u_int32_t recvbufsize
; /* request more than the default buffer size */
72 u_int32_t sendbufsize
; /* request more than the default buffer size */
74 /* Dispatch functions */
75 ctl_setup_func setup
; /* Setup contact */
76 ctl_bind_func bind
; /* Prepare contact */
77 ctl_connect_func connect
; /* Make contact */
78 ctl_disconnect_func disconnect
; /* Break contact */
79 ctl_send_func send
; /* Send data to nke */
80 ctl_send_list_func send_list
; /* Send list of packets */
81 ctl_setopt_func setopt
; /* set kctl configuration */
82 ctl_getopt_func getopt
; /* get kctl configuration */
83 ctl_rcvd_func rcvd
; /* Notify nke when client reads data */
85 TAILQ_HEAD(, ctl_cb
) kcb_head
;
89 #if DEVELOPMENT || DEBUG
91 KCTL_DISCONNECTED
= 0,
95 #endif /* DEVELOPMENT || DEBUG */
98 TAILQ_ENTRY(ctl_cb
) next
; /* controller chain */
100 struct socket
*so
; /* controlling socket */
101 struct kctl
*kctl
; /* back pointer to controller */
103 struct sockaddr_ctl sac
;
105 u_int32_t kcb_usecount
;
106 u_int32_t require_clearing_count
;
107 #if DEVELOPMENT || DEBUG
108 enum ctl_status status
;
109 #endif /* DEVELOPMENT || DEBUG */
113 #define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
117 #define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
121 * Definitions and vars for we support
124 #define CTL_SENDSIZE (2 * 1024) /* default buffer size */
125 #define CTL_RECVSIZE (8 * 1024) /* default buffer size */
128 * Definitions and vars for we support
131 const u_int32_t ctl_maxunit
= 65536;
132 static LCK_ATTR_DECLARE(ctl_lck_attr
, 0, 0);
133 static LCK_GRP_DECLARE(ctl_lck_grp
, "Kernel Control Protocol");
134 static LCK_MTX_DECLARE_ATTR(ctl_mtx
, &ctl_lck_grp
, &ctl_lck_attr
);
136 /* all the controllers are chained */
137 TAILQ_HEAD(kctl_list
, kctl
) ctl_head
= TAILQ_HEAD_INITIALIZER(ctl_head
);
139 static int ctl_attach(struct socket
*, int, struct proc
*);
140 static int ctl_detach(struct socket
*);
141 static int ctl_sofreelastref(struct socket
*so
);
142 static int ctl_bind(struct socket
*, struct sockaddr
*, struct proc
*);
143 static int ctl_connect(struct socket
*, struct sockaddr
*, struct proc
*);
144 static int ctl_disconnect(struct socket
*);
145 static int ctl_ioctl(struct socket
*so
, u_long cmd
, caddr_t data
,
146 struct ifnet
*ifp
, struct proc
*p
);
147 static int ctl_send(struct socket
*, int, struct mbuf
*,
148 struct sockaddr
*, struct mbuf
*, struct proc
*);
149 static int ctl_send_list(struct socket
*, int, struct mbuf
*,
150 struct sockaddr
*, struct mbuf
*, struct proc
*);
151 static int ctl_ctloutput(struct socket
*, struct sockopt
*);
152 static int ctl_peeraddr(struct socket
*so
, struct sockaddr
**nam
);
153 static int ctl_usr_rcvd(struct socket
*so
, int flags
);
155 static struct kctl
*ctl_find_by_name(const char *);
156 static struct kctl
*ctl_find_by_id_unit(u_int32_t id
, u_int32_t unit
);
158 static struct socket
*kcb_find_socket(kern_ctl_ref kctlref
, u_int32_t unit
,
160 static struct ctl_cb
*kcb_find(struct kctl
*, u_int32_t unit
);
161 static void ctl_post_msg(u_int32_t event_code
, u_int32_t id
);
163 static int ctl_lock(struct socket
*, int, void *);
164 static int ctl_unlock(struct socket
*, int, void *);
165 static lck_mtx_t
* ctl_getlock(struct socket
*, int);
167 static struct pr_usrreqs ctl_usrreqs
= {
168 .pru_attach
= ctl_attach
,
169 .pru_bind
= ctl_bind
,
170 .pru_connect
= ctl_connect
,
171 .pru_control
= ctl_ioctl
,
172 .pru_detach
= ctl_detach
,
173 .pru_disconnect
= ctl_disconnect
,
174 .pru_peeraddr
= ctl_peeraddr
,
175 .pru_rcvd
= ctl_usr_rcvd
,
176 .pru_send
= ctl_send
,
177 .pru_send_list
= ctl_send_list
,
178 .pru_sosend
= sosend
,
179 .pru_sosend_list
= sosend_list
,
180 .pru_soreceive
= soreceive
,
181 .pru_soreceive_list
= soreceive_list
,
184 static struct protosw kctlsw
[] = {
186 .pr_type
= SOCK_DGRAM
,
187 .pr_protocol
= SYSPROTO_CONTROL
,
188 .pr_flags
= PR_ATOMIC
| PR_CONNREQUIRED
| PR_PCBLOCK
| PR_WANTRCVD
,
189 .pr_ctloutput
= ctl_ctloutput
,
190 .pr_usrreqs
= &ctl_usrreqs
,
192 .pr_unlock
= ctl_unlock
,
193 .pr_getlock
= ctl_getlock
,
196 .pr_type
= SOCK_STREAM
,
197 .pr_protocol
= SYSPROTO_CONTROL
,
198 .pr_flags
= PR_CONNREQUIRED
| PR_PCBLOCK
| PR_WANTRCVD
,
199 .pr_ctloutput
= ctl_ctloutput
,
200 .pr_usrreqs
= &ctl_usrreqs
,
202 .pr_unlock
= ctl_unlock
,
203 .pr_getlock
= ctl_getlock
,
207 __private_extern__
int kctl_reg_list SYSCTL_HANDLER_ARGS
;
208 __private_extern__
int kctl_pcblist SYSCTL_HANDLER_ARGS
;
209 __private_extern__
int kctl_getstat SYSCTL_HANDLER_ARGS
;
212 SYSCTL_NODE(_net_systm
, OID_AUTO
, kctl
,
213 CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "Kernel control family");
215 struct kctlstat kctlstat
;
216 SYSCTL_PROC(_net_systm_kctl
, OID_AUTO
, stats
,
217 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0,
218 kctl_getstat
, "S,kctlstat", "");
220 SYSCTL_PROC(_net_systm_kctl
, OID_AUTO
, reg_list
,
221 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0,
222 kctl_reg_list
, "S,xkctl_reg", "");
224 SYSCTL_PROC(_net_systm_kctl
, OID_AUTO
, pcblist
,
225 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0,
226 kctl_pcblist
, "S,xkctlpcb", "");
228 u_int32_t ctl_autorcvbuf_max
= 256 * 1024;
229 SYSCTL_INT(_net_systm_kctl
, OID_AUTO
, autorcvbufmax
,
230 CTLFLAG_RW
| CTLFLAG_LOCKED
, &ctl_autorcvbuf_max
, 0, "");
232 u_int32_t ctl_autorcvbuf_high
= 0;
233 SYSCTL_INT(_net_systm_kctl
, OID_AUTO
, autorcvbufhigh
,
234 CTLFLAG_RD
| CTLFLAG_LOCKED
, &ctl_autorcvbuf_high
, 0, "");
236 u_int32_t ctl_debug
= 0;
237 SYSCTL_INT(_net_systm_kctl
, OID_AUTO
, debug
,
238 CTLFLAG_RW
| CTLFLAG_LOCKED
, &ctl_debug
, 0, "");
240 #if DEVELOPMENT || DEBUG
241 u_int32_t ctl_panic_debug
= 0;
242 SYSCTL_INT(_net_systm_kctl
, OID_AUTO
, panicdebug
,
243 CTLFLAG_RW
| CTLFLAG_LOCKED
, &ctl_panic_debug
, 0, "");
244 #endif /* DEVELOPMENT || DEBUG */
246 #define KCTL_TBL_INC 16
248 static uintptr_t kctl_tbl_size
= 0;
249 static u_int32_t kctl_tbl_growing
= 0;
250 static u_int32_t kctl_tbl_growing_waiting
= 0;
251 static uintptr_t kctl_tbl_count
= 0;
252 static struct kctl
**kctl_table
= NULL
;
253 static uintptr_t kctl_ref_gencnt
= 0;
255 static void kctl_tbl_grow(void);
256 static kern_ctl_ref
kctl_make_ref(struct kctl
*kctl
);
257 static void kctl_delete_ref(kern_ctl_ref
);
258 static struct kctl
*kctl_from_ref(kern_ctl_ref
);
261 * Install the protosw's for the Kernel Control manager.
263 __private_extern__
void
264 kern_control_init(struct domain
*dp
)
268 int kctl_proto_count
= (sizeof(kctlsw
) / sizeof(struct protosw
));
270 VERIFY(!(dp
->dom_flags
& DOM_INITIALIZED
));
271 VERIFY(dp
== systemdomain
);
273 for (i
= 0, pr
= &kctlsw
[0]; i
< kctl_proto_count
; i
++, pr
++) {
274 net_add_proto(pr
, dp
, 1);
279 kcb_delete(struct ctl_cb
*kcb
)
282 lck_mtx_destroy(&kcb
->mtx
, &ctl_lck_grp
);
283 kheap_free(KHEAP_DEFAULT
, kcb
, sizeof(struct ctl_cb
));
288 * Kernel Controller user-request functions
289 * attach function must exist and succeed
290 * detach not necessary
291 * we need a pcb for the per socket mutex
294 ctl_attach(struct socket
*so
, int proto
, struct proc
*p
)
296 #pragma unused(proto, p)
298 struct ctl_cb
*kcb
= 0;
300 kcb
= kheap_alloc(KHEAP_DEFAULT
, sizeof(struct ctl_cb
), Z_WAITOK
| Z_ZERO
);
306 lck_mtx_init(&kcb
->mtx
, &ctl_lck_grp
, &ctl_lck_attr
);
308 so
->so_pcb
= (caddr_t
)kcb
;
319 ctl_sofreelastref(struct socket
*so
)
321 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
327 if ((kctl
= kcb
->kctl
) != 0) {
328 lck_mtx_lock(&ctl_mtx
);
329 TAILQ_REMOVE(&kctl
->kcb_head
, kcb
, next
);
330 kctlstat
.kcs_pcbcount
--;
331 kctlstat
.kcs_gencnt
++;
332 lck_mtx_unlock(&ctl_mtx
);
336 sofreelastref(so
, 1);
341 * Use this function and ctl_kcb_require_clearing to serialize
342 * critical calls into the kctl subsystem
345 ctl_kcb_increment_use_count(struct ctl_cb
*kcb
, lck_mtx_t
*mutex_held
)
347 LCK_MTX_ASSERT(mutex_held
, LCK_MTX_ASSERT_OWNED
);
348 while (kcb
->require_clearing_count
> 0) {
349 msleep(&kcb
->require_clearing_count
, mutex_held
, PSOCK
| PCATCH
, "kcb_require_clearing", NULL
);
355 ctl_kcb_require_clearing(struct ctl_cb
*kcb
, lck_mtx_t
*mutex_held
)
357 assert(kcb
->kcb_usecount
!= 0);
358 kcb
->require_clearing_count
++;
360 while (kcb
->kcb_usecount
> 0) { // we need to wait until no one else is running
361 msleep(&kcb
->kcb_usecount
, mutex_held
, PSOCK
| PCATCH
, "kcb_usecount", NULL
);
367 ctl_kcb_done_clearing(struct ctl_cb
*kcb
)
369 assert(kcb
->require_clearing_count
!= 0);
370 kcb
->require_clearing_count
--;
371 wakeup((caddr_t
)&kcb
->require_clearing_count
);
375 ctl_kcb_decrement_use_count(struct ctl_cb
*kcb
)
377 assert(kcb
->kcb_usecount
!= 0);
379 wakeup((caddr_t
)&kcb
->kcb_usecount
);
383 ctl_detach(struct socket
*so
)
385 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
391 lck_mtx_t
*mtx_held
= socket_getlock(so
, PR_F_WILLUNLOCK
);
392 ctl_kcb_increment_use_count(kcb
, mtx_held
);
393 ctl_kcb_require_clearing(kcb
, mtx_held
);
395 if (kcb
->kctl
!= NULL
&& kcb
->kctl
->bind
!= NULL
&&
396 kcb
->userdata
!= NULL
&& !(so
->so_state
& SS_ISCONNECTED
)) {
397 // The unit was bound, but not connected
398 // Invoke the disconnected call to cleanup
399 if (kcb
->kctl
->disconnect
!= NULL
) {
400 socket_unlock(so
, 0);
401 (*kcb
->kctl
->disconnect
)(kcb
->kctl
->kctlref
,
402 kcb
->sac
.sc_unit
, kcb
->userdata
);
407 soisdisconnected(so
);
408 #if DEVELOPMENT || DEBUG
409 kcb
->status
= KCTL_DISCONNECTED
;
410 #endif /* DEVELOPMENT || DEBUG */
411 so
->so_flags
|= SOF_PCBCLEARING
;
412 ctl_kcb_done_clearing(kcb
);
413 ctl_kcb_decrement_use_count(kcb
);
418 ctl_setup_kctl(struct socket
*so
, struct sockaddr
*nam
, struct proc
*p
)
420 struct kctl
*kctl
= NULL
;
422 struct sockaddr_ctl sa
;
423 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
424 struct ctl_cb
*kcb_next
= NULL
;
426 u_int32_t recvbufsize
, sendbufsize
;
429 panic("ctl_setup_kctl so_pcb null\n");
432 if (kcb
->kctl
!= NULL
) {
433 // Already set up, skip
437 if (nam
->sa_len
!= sizeof(struct sockaddr_ctl
)) {
441 bcopy(nam
, &sa
, sizeof(struct sockaddr_ctl
));
443 lck_mtx_lock(&ctl_mtx
);
444 kctl
= ctl_find_by_id_unit(sa
.sc_id
, sa
.sc_unit
);
446 lck_mtx_unlock(&ctl_mtx
);
450 if (((kctl
->flags
& CTL_FLAG_REG_SOCK_STREAM
) &&
451 (so
->so_type
!= SOCK_STREAM
)) ||
452 (!(kctl
->flags
& CTL_FLAG_REG_SOCK_STREAM
) &&
453 (so
->so_type
!= SOCK_DGRAM
))) {
454 lck_mtx_unlock(&ctl_mtx
);
458 if (kctl
->flags
& CTL_FLAG_PRIVILEGED
) {
460 lck_mtx_unlock(&ctl_mtx
);
463 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
464 lck_mtx_unlock(&ctl_mtx
);
469 if ((kctl
->flags
& CTL_FLAG_REG_ID_UNIT
) || sa
.sc_unit
!= 0) {
470 if (kcb_find(kctl
, sa
.sc_unit
) != NULL
) {
471 lck_mtx_unlock(&ctl_mtx
);
474 } else if (kctl
->setup
!= NULL
) {
475 error
= (*kctl
->setup
)(&sa
.sc_unit
, &kcb
->userdata
);
477 lck_mtx_unlock(&ctl_mtx
);
481 /* Find an unused ID, assumes control IDs are in order */
484 TAILQ_FOREACH(kcb_next
, &kctl
->kcb_head
, next
) {
485 if (kcb_next
->sac
.sc_unit
> unit
) {
486 /* Found a gap, lets fill it in */
489 unit
= kcb_next
->sac
.sc_unit
+ 1;
490 if (unit
== ctl_maxunit
) {
495 if (unit
== ctl_maxunit
) {
496 lck_mtx_unlock(&ctl_mtx
);
503 bcopy(&sa
, &kcb
->sac
, sizeof(struct sockaddr_ctl
));
505 if (kcb_next
!= NULL
) {
506 TAILQ_INSERT_BEFORE(kcb_next
, kcb
, next
);
508 TAILQ_INSERT_TAIL(&kctl
->kcb_head
, kcb
, next
);
510 kctlstat
.kcs_pcbcount
++;
511 kctlstat
.kcs_gencnt
++;
512 kctlstat
.kcs_connections
++;
513 lck_mtx_unlock(&ctl_mtx
);
516 * rdar://15526688: Limit the send and receive sizes to sb_max
517 * by using the same scaling as sbreserve()
519 sbmaxsize
= (u_quad_t
)sb_max
* MCLBYTES
/ (MSIZE
+ MCLBYTES
);
521 if (kctl
->sendbufsize
> sbmaxsize
) {
522 sendbufsize
= (u_int32_t
)sbmaxsize
;
524 sendbufsize
= kctl
->sendbufsize
;
527 if (kctl
->recvbufsize
> sbmaxsize
) {
528 recvbufsize
= (u_int32_t
)sbmaxsize
;
530 recvbufsize
= kctl
->recvbufsize
;
533 error
= soreserve(so
, sendbufsize
, recvbufsize
);
536 printf("%s - soreserve(%llx, %u, %u) error %d\n",
537 __func__
, (uint64_t)VM_KERNEL_ADDRPERM(so
),
538 sendbufsize
, recvbufsize
, error
);
545 soisdisconnected(so
);
546 #if DEVELOPMENT || DEBUG
547 kcb
->status
= KCTL_DISCONNECTED
;
548 #endif /* DEVELOPMENT || DEBUG */
549 lck_mtx_lock(&ctl_mtx
);
550 TAILQ_REMOVE(&kctl
->kcb_head
, kcb
, next
);
552 kcb
->sac
.sc_unit
= 0;
553 kctlstat
.kcs_pcbcount
--;
554 kctlstat
.kcs_gencnt
++;
555 kctlstat
.kcs_conn_fail
++;
556 lck_mtx_unlock(&ctl_mtx
);
562 ctl_bind(struct socket
*so
, struct sockaddr
*nam
, struct proc
*p
)
565 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
568 panic("ctl_bind so_pcb null\n");
571 lck_mtx_t
*mtx_held
= socket_getlock(so
, PR_F_WILLUNLOCK
);
572 ctl_kcb_increment_use_count(kcb
, mtx_held
);
573 ctl_kcb_require_clearing(kcb
, mtx_held
);
575 error
= ctl_setup_kctl(so
, nam
, p
);
580 if (kcb
->kctl
== NULL
) {
581 panic("ctl_bind kctl null\n");
584 if (kcb
->kctl
->bind
== NULL
) {
589 socket_unlock(so
, 0);
590 error
= (*kcb
->kctl
->bind
)(kcb
->kctl
->kctlref
, &kcb
->sac
, &kcb
->userdata
);
594 ctl_kcb_done_clearing(kcb
);
595 ctl_kcb_decrement_use_count(kcb
);
600 ctl_connect(struct socket
*so
, struct sockaddr
*nam
, struct proc
*p
)
603 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
606 panic("ctl_connect so_pcb null\n");
609 lck_mtx_t
*mtx_held
= socket_getlock(so
, PR_F_WILLUNLOCK
);
610 ctl_kcb_increment_use_count(kcb
, mtx_held
);
611 ctl_kcb_require_clearing(kcb
, mtx_held
);
613 #if DEVELOPMENT || DEBUG
614 if (kcb
->status
!= KCTL_DISCONNECTED
&& ctl_panic_debug
) {
615 panic("kctl already connecting/connected");
617 kcb
->status
= KCTL_CONNECTING
;
618 #endif /* DEVELOPMENT || DEBUG */
620 error
= ctl_setup_kctl(so
, nam
, p
);
625 if (kcb
->kctl
== NULL
) {
626 panic("ctl_connect kctl null\n");
630 socket_unlock(so
, 0);
631 error
= (*kcb
->kctl
->connect
)(kcb
->kctl
->kctlref
, &kcb
->sac
, &kcb
->userdata
);
637 #if DEVELOPMENT || DEBUG
638 kcb
->status
= KCTL_CONNECTED
;
639 #endif /* DEVELOPMENT || DEBUG */
642 if (error
&& kcb
->kctl
->disconnect
) {
644 * XXX Make sure we Don't check the return value
645 * of disconnect here.
646 * ipsec/utun_ctl_disconnect will return error when
647 * disconnect gets called after connect failure.
648 * However if we decide to check for disconnect return
649 * value here. Please make sure to revisit
650 * ipsec/utun_ctl_disconnect.
652 socket_unlock(so
, 0);
653 (*kcb
->kctl
->disconnect
)(kcb
->kctl
->kctlref
, kcb
->sac
.sc_unit
, kcb
->userdata
);
657 soisdisconnected(so
);
658 #if DEVELOPMENT || DEBUG
659 kcb
->status
= KCTL_DISCONNECTED
;
660 #endif /* DEVELOPMENT || DEBUG */
661 lck_mtx_lock(&ctl_mtx
);
662 TAILQ_REMOVE(&kcb
->kctl
->kcb_head
, kcb
, next
);
664 kcb
->sac
.sc_unit
= 0;
665 kctlstat
.kcs_pcbcount
--;
666 kctlstat
.kcs_gencnt
++;
667 kctlstat
.kcs_conn_fail
++;
668 lck_mtx_unlock(&ctl_mtx
);
671 ctl_kcb_done_clearing(kcb
);
672 ctl_kcb_decrement_use_count(kcb
);
677 ctl_disconnect(struct socket
*so
)
679 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
681 if ((kcb
= (struct ctl_cb
*)so
->so_pcb
)) {
682 lck_mtx_t
*mtx_held
= socket_getlock(so
, PR_F_WILLUNLOCK
);
683 ctl_kcb_increment_use_count(kcb
, mtx_held
);
684 ctl_kcb_require_clearing(kcb
, mtx_held
);
685 struct kctl
*kctl
= kcb
->kctl
;
687 if (kctl
&& kctl
->disconnect
) {
688 socket_unlock(so
, 0);
689 (*kctl
->disconnect
)(kctl
->kctlref
, kcb
->sac
.sc_unit
,
694 soisdisconnected(so
);
695 #if DEVELOPMENT || DEBUG
696 kcb
->status
= KCTL_DISCONNECTED
;
697 #endif /* DEVELOPMENT || DEBUG */
699 socket_unlock(so
, 0);
700 lck_mtx_lock(&ctl_mtx
);
702 kcb
->sac
.sc_unit
= 0;
703 while (kcb
->usecount
!= 0) {
704 msleep(&kcb
->usecount
, &ctl_mtx
, 0, "kcb->usecount", 0);
706 TAILQ_REMOVE(&kctl
->kcb_head
, kcb
, next
);
707 kctlstat
.kcs_pcbcount
--;
708 kctlstat
.kcs_gencnt
++;
709 lck_mtx_unlock(&ctl_mtx
);
711 ctl_kcb_done_clearing(kcb
);
712 ctl_kcb_decrement_use_count(kcb
);
718 ctl_peeraddr(struct socket
*so
, struct sockaddr
**nam
)
720 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
722 struct sockaddr_ctl sc
;
724 if (kcb
== NULL
) { /* sanity check */
728 if ((kctl
= kcb
->kctl
) == NULL
) {
732 bzero(&sc
, sizeof(struct sockaddr_ctl
));
733 sc
.sc_len
= sizeof(struct sockaddr_ctl
);
734 sc
.sc_family
= AF_SYSTEM
;
735 sc
.ss_sysaddr
= AF_SYS_CONTROL
;
737 sc
.sc_unit
= kcb
->sac
.sc_unit
;
739 *nam
= dup_sockaddr((struct sockaddr
*)&sc
, 1);
745 ctl_sbrcv_trim(struct socket
*so
)
747 struct sockbuf
*sb
= &so
->so_rcv
;
749 if (sb
->sb_hiwat
> sb
->sb_idealsize
) {
754 * The difference between the ideal size and the
755 * current size is the upper bound of the trimage
757 diff
= sb
->sb_hiwat
- sb
->sb_idealsize
;
759 * We cannot trim below the outstanding data
761 trim
= sb
->sb_hiwat
- sb
->sb_cc
;
763 trim
= imin(trim
, (int32_t)diff
);
766 sbreserve(sb
, (sb
->sb_hiwat
- trim
));
769 printf("%s - shrunk to %d\n",
770 __func__
, sb
->sb_hiwat
);
777 ctl_usr_rcvd(struct socket
*so
, int flags
)
780 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
787 lck_mtx_t
*mtx_held
= socket_getlock(so
, PR_F_WILLUNLOCK
);
788 ctl_kcb_increment_use_count(kcb
, mtx_held
);
790 if ((kctl
= kcb
->kctl
) == NULL
) {
796 socket_unlock(so
, 0);
797 (*kctl
->rcvd
)(kctl
->kctlref
, kcb
->sac
.sc_unit
, kcb
->userdata
, flags
);
804 ctl_kcb_decrement_use_count(kcb
);
809 ctl_send(struct socket
*so
, int flags
, struct mbuf
*m
,
810 struct sockaddr
*addr
, struct mbuf
*control
,
813 #pragma unused(addr, p)
815 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
822 if (kcb
== NULL
) { /* sanity check */
826 lck_mtx_t
*mtx_held
= socket_getlock(so
, PR_F_WILLUNLOCK
);
827 ctl_kcb_increment_use_count(kcb
, mtx_held
);
829 if (error
== 0 && (kctl
= kcb
->kctl
) == NULL
) {
833 if (error
== 0 && kctl
->send
) {
834 so_tc_update_stats(m
, so
, m_get_service_class(m
));
835 socket_unlock(so
, 0);
836 error
= (*kctl
->send
)(kctl
->kctlref
, kcb
->sac
.sc_unit
, kcb
->userdata
,
846 OSIncrementAtomic64((SInt64
*)&kctlstat
.kcs_send_fail
);
848 ctl_kcb_decrement_use_count(kcb
);
854 ctl_send_list(struct socket
*so
, int flags
, struct mbuf
*m
,
855 __unused
struct sockaddr
*addr
, struct mbuf
*control
,
856 __unused
struct proc
*p
)
859 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
863 m_freem_list(control
);
866 if (kcb
== NULL
) { /* sanity check */
870 lck_mtx_t
*mtx_held
= socket_getlock(so
, PR_F_WILLUNLOCK
);
871 ctl_kcb_increment_use_count(kcb
, mtx_held
);
873 if (error
== 0 && (kctl
= kcb
->kctl
) == NULL
) {
877 if (error
== 0 && kctl
->send_list
) {
880 for (nxt
= m
; nxt
!= NULL
; nxt
= nxt
->m_nextpkt
) {
881 so_tc_update_stats(nxt
, so
, m_get_service_class(nxt
));
884 socket_unlock(so
, 0);
885 error
= (*kctl
->send_list
)(kctl
->kctlref
, kcb
->sac
.sc_unit
,
886 kcb
->userdata
, m
, flags
);
888 } else if (error
== 0 && kctl
->send
) {
889 while (m
!= NULL
&& error
== 0) {
890 struct mbuf
*nextpkt
= m
->m_nextpkt
;
893 so_tc_update_stats(m
, so
, m_get_service_class(m
));
894 socket_unlock(so
, 0);
895 error
= (*kctl
->send
)(kctl
->kctlref
, kcb
->sac
.sc_unit
,
896 kcb
->userdata
, m
, flags
);
910 OSIncrementAtomic64((SInt64
*)&kctlstat
.kcs_send_list_fail
);
912 ctl_kcb_decrement_use_count(kcb
);
918 ctl_rcvbspace(struct socket
*so
, size_t datasize
,
919 u_int32_t kctlflags
, u_int32_t flags
)
921 struct sockbuf
*sb
= &so
->so_rcv
;
922 u_int32_t space
= sbspace(sb
);
925 if ((kctlflags
& CTL_FLAG_REG_CRIT
) == 0) {
926 if ((u_int32_t
) space
>= datasize
) {
931 } else if ((flags
& CTL_DATA_CRIT
) == 0) {
933 * Reserve 25% for critical messages
935 if (space
< (sb
->sb_hiwat
>> 2) ||
942 size_t autorcvbuf_max
;
945 * Allow overcommit of 25%
947 autorcvbuf_max
= min(sb
->sb_idealsize
+ (sb
->sb_idealsize
>> 2),
950 if ((u_int32_t
) space
>= datasize
) {
952 } else if (tcp_cansbgrow(sb
) &&
953 sb
->sb_hiwat
< autorcvbuf_max
) {
955 * Grow with a little bit of leeway
957 size_t grow
= datasize
- space
+ MSIZE
;
958 u_int32_t cc
= (u_int32_t
)MIN(MIN((sb
->sb_hiwat
+ grow
), autorcvbuf_max
), UINT32_MAX
);
960 if (sbreserve(sb
, cc
) == 1) {
961 if (sb
->sb_hiwat
> ctl_autorcvbuf_high
) {
962 ctl_autorcvbuf_high
= sb
->sb_hiwat
;
968 if ((u_int32_t
) sbspace(sb
) >= datasize
) {
975 printf("%s - grown to %d error %d\n",
976 __func__
, sb
->sb_hiwat
, error
);
989 ctl_enqueuembuf(kern_ctl_ref kctlref
, u_int32_t unit
, struct mbuf
*m
,
994 int len
= m
->m_pkthdr
.len
;
997 so
= kcb_find_socket(kctlref
, unit
, &kctlflags
);
1002 if (ctl_rcvbspace(so
, len
, kctlflags
, flags
) != 0) {
1004 OSIncrementAtomic64((SInt64
*)&kctlstat
.kcs_enqueue_fullsock
);
1007 if ((flags
& CTL_DATA_EOR
)) {
1008 m
->m_flags
|= M_EOR
;
1011 so_recv_data_stat(so
, m
, 0);
1012 if (sbappend_nodrop(&so
->so_rcv
, m
) != 0) {
1013 if ((flags
& CTL_DATA_NOWAKEUP
) == 0) {
1018 OSIncrementAtomic64((SInt64
*)&kctlstat
.kcs_enqueue_fullsock
);
1021 if (ctl_debug
&& error
!= 0 && (flags
& CTL_DATA_CRIT
)) {
1022 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1023 __func__
, error
, len
,
1024 so
->so_rcv
.sb_hiwat
, so
->so_rcv
.sb_cc
);
1027 socket_unlock(so
, 1);
1029 OSIncrementAtomic64((SInt64
*)&kctlstat
.kcs_enqueue_fail
);
1036 * Compute space occupied by mbuf like sbappendrecord
1039 m_space(struct mbuf
*m
)
1044 for (nxt
= m
; nxt
!= NULL
; nxt
= nxt
->m_next
) {
1045 space
+= nxt
->m_len
;
1052 ctl_enqueuembuf_list(void *kctlref
, u_int32_t unit
, struct mbuf
*m_list
,
1053 u_int32_t flags
, struct mbuf
**m_remain
)
1055 struct socket
*so
= NULL
;
1057 struct mbuf
*m
, *nextpkt
;
1060 u_int32_t kctlflags
;
1063 * Need to point the beginning of the list in case of early exit
1068 * kcb_find_socket takes the socket lock with a reference
1070 so
= kcb_find_socket(kctlref
, unit
, &kctlflags
);
1076 if (kctlflags
& CTL_FLAG_REG_SOCK_STREAM
) {
1080 if (flags
& CTL_DATA_EOR
) {
1085 for (m
= m_list
; m
!= NULL
; m
= nextpkt
) {
1086 nextpkt
= m
->m_nextpkt
;
1088 if (m
->m_pkthdr
.len
== 0 && ctl_debug
) {
1089 printf("%s: %llx m_pkthdr.len is 0",
1090 __func__
, (uint64_t)VM_KERNEL_ADDRPERM(m
));
1094 * The mbuf is either appended or freed by sbappendrecord()
1095 * so it's not reliable from a data standpoint
1098 if (ctl_rcvbspace(so
, len
, kctlflags
, flags
) != 0) {
1100 OSIncrementAtomic64(
1101 (SInt64
*)&kctlstat
.kcs_enqueue_fullsock
);
1105 * Unlink from the list, m is on its own
1107 m
->m_nextpkt
= NULL
;
1108 so_recv_data_stat(so
, m
, 0);
1109 if (sbappendrecord_nodrop(&so
->so_rcv
, m
) != 0) {
1113 * We free or return the remaining
1118 OSIncrementAtomic64(
1119 (SInt64
*)&kctlstat
.kcs_enqueue_fullsock
);
1124 if (needwakeup
&& (flags
& CTL_DATA_NOWAKEUP
) == 0) {
1130 if (ctl_debug
&& error
!= 0 && (flags
& CTL_DATA_CRIT
)) {
1131 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1132 __func__
, error
, len
,
1133 so
->so_rcv
.sb_hiwat
, so
->so_rcv
.sb_cc
);
1136 socket_unlock(so
, 1);
1141 if (m
!= NULL
&& socket_debug
&& so
!= NULL
&&
1142 (so
->so_options
& SO_DEBUG
)) {
1145 printf("%s m_list %llx\n", __func__
,
1146 (uint64_t) VM_KERNEL_ADDRPERM(m_list
));
1147 for (n
= m
; n
!= NULL
; n
= n
->m_nextpkt
) {
1148 printf(" remain %llx m_next %llx\n",
1149 (uint64_t) VM_KERNEL_ADDRPERM(n
),
1150 (uint64_t) VM_KERNEL_ADDRPERM(n
->m_next
));
1159 OSIncrementAtomic64((SInt64
*)&kctlstat
.kcs_enqueue_fail
);
1165 ctl_enqueuedata(void *kctlref
, u_int32_t unit
, void *data
, size_t len
,
1171 unsigned int num_needed
;
1174 u_int32_t kctlflags
;
1176 so
= kcb_find_socket(kctlref
, unit
, &kctlflags
);
1181 if (ctl_rcvbspace(so
, len
, kctlflags
, flags
) != 0) {
1183 OSIncrementAtomic64((SInt64
*)&kctlstat
.kcs_enqueue_fullsock
);
1188 m
= m_allocpacket_internal(&num_needed
, len
, NULL
, M_NOWAIT
, 1, 0);
1190 kctlstat
.kcs_enqdata_mb_alloc_fail
++;
1192 printf("%s: m_allocpacket_internal(%lu) failed\n",
1199 for (n
= m
; n
!= NULL
; n
= n
->m_next
) {
1200 size_t mlen
= mbuf_maxlen(n
);
1202 if (mlen
+ curlen
> len
) {
1203 mlen
= len
- curlen
;
1205 n
->m_len
= (int32_t)mlen
;
1206 bcopy((char *)data
+ curlen
, n
->m_data
, mlen
);
1209 mbuf_pkthdr_setlen(m
, curlen
);
1211 if ((flags
& CTL_DATA_EOR
)) {
1212 m
->m_flags
|= M_EOR
;
1214 so_recv_data_stat(so
, m
, 0);
1216 * No need to call the "nodrop" variant of sbappend
1217 * because the mbuf is local to the scope of the function
1219 if (sbappend(&so
->so_rcv
, m
) != 0) {
1220 if ((flags
& CTL_DATA_NOWAKEUP
) == 0) {
1224 kctlstat
.kcs_enqdata_sbappend_fail
++;
1226 OSIncrementAtomic64((SInt64
*)&kctlstat
.kcs_enqueue_fullsock
);
1230 if (ctl_debug
&& error
!= 0 && (flags
& CTL_DATA_CRIT
)) {
1231 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1232 __func__
, error
, (int)len
,
1233 so
->so_rcv
.sb_hiwat
, so
->so_rcv
.sb_cc
);
1236 socket_unlock(so
, 1);
1238 OSIncrementAtomic64((SInt64
*)&kctlstat
.kcs_enqueue_fail
);
1244 ctl_getenqueuepacketcount(kern_ctl_ref kctlref
, u_int32_t unit
, u_int32_t
*pcnt
)
1254 so
= kcb_find_socket(kctlref
, unit
, NULL
);
1260 m1
= so
->so_rcv
.sb_mb
;
1261 while (m1
!= NULL
) {
1262 if (m1
->m_type
== MT_DATA
||
1263 m1
->m_type
== MT_HEADER
||
1264 m1
->m_type
== MT_OOBDATA
) {
1271 socket_unlock(so
, 1);
1277 ctl_getenqueuespace(kern_ctl_ref kctlref
, u_int32_t unit
, size_t *space
)
1282 if (space
== NULL
) {
1286 so
= kcb_find_socket(kctlref
, unit
, NULL
);
1291 avail
= sbspace(&so
->so_rcv
);
1292 *space
= (avail
< 0) ? 0 : avail
;
1293 socket_unlock(so
, 1);
1299 ctl_getenqueuereadable(kern_ctl_ref kctlref
, u_int32_t unit
,
1300 u_int32_t
*difference
)
1304 if (difference
== NULL
) {
1308 so
= kcb_find_socket(kctlref
, unit
, NULL
);
1313 if (so
->so_rcv
.sb_cc
>= so
->so_rcv
.sb_lowat
) {
1316 *difference
= (so
->so_rcv
.sb_lowat
- so
->so_rcv
.sb_cc
);
1318 socket_unlock(so
, 1);
1324 ctl_ctloutput(struct socket
*so
, struct sockopt
*sopt
)
1326 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
1330 size_t data_len
= 0;
1333 if (sopt
->sopt_level
!= SYSPROTO_CONTROL
) {
1337 if (kcb
== NULL
) { /* sanity check */
1341 if ((kctl
= kcb
->kctl
) == NULL
) {
1345 lck_mtx_t
*mtx_held
= socket_getlock(so
, PR_F_WILLUNLOCK
);
1346 ctl_kcb_increment_use_count(kcb
, mtx_held
);
1348 switch (sopt
->sopt_dir
) {
1350 if (kctl
->setopt
== NULL
) {
1354 if (sopt
->sopt_valsize
!= 0) {
1355 data_len
= sopt
->sopt_valsize
;
1356 data
= kheap_alloc(KHEAP_TEMP
, data_len
, Z_WAITOK
| Z_ZERO
);
1362 error
= sooptcopyin(sopt
, data
,
1363 sopt
->sopt_valsize
, sopt
->sopt_valsize
);
1366 socket_unlock(so
, 0);
1367 error
= (*kctl
->setopt
)(kctl
->kctlref
,
1368 kcb
->sac
.sc_unit
, kcb
->userdata
, sopt
->sopt_name
,
1369 data
, sopt
->sopt_valsize
);
1373 kheap_free(KHEAP_TEMP
, data
, data_len
);
1377 if (kctl
->getopt
== NULL
) {
1382 if (sopt
->sopt_valsize
&& sopt
->sopt_val
) {
1383 data_len
= sopt
->sopt_valsize
;
1384 data
= kheap_alloc(KHEAP_TEMP
, data_len
, Z_WAITOK
| Z_ZERO
);
1391 * 4108337 - copy user data in case the
1392 * kernel control needs it
1394 error
= sooptcopyin(sopt
, data
,
1395 sopt
->sopt_valsize
, sopt
->sopt_valsize
);
1399 len
= sopt
->sopt_valsize
;
1400 socket_unlock(so
, 0);
1401 error
= (*kctl
->getopt
)(kctl
->kctlref
, kcb
->sac
.sc_unit
,
1402 kcb
->userdata
, sopt
->sopt_name
,
1404 if (data
!= NULL
&& len
> sopt
->sopt_valsize
) {
1405 panic_plain("ctl_ctloutput: ctl %s returned "
1406 "len (%lu) > sopt_valsize (%lu)\n",
1407 kcb
->kctl
->name
, len
,
1408 sopt
->sopt_valsize
);
1413 error
= sooptcopyout(sopt
, data
, len
);
1415 sopt
->sopt_valsize
= len
;
1420 kheap_free(KHEAP_TEMP
, data
, data_len
);
1425 ctl_kcb_decrement_use_count(kcb
);
1430 ctl_ioctl(struct socket
*so
, u_long cmd
, caddr_t data
,
1431 struct ifnet
*ifp
, struct proc
*p
)
1433 #pragma unused(so, ifp, p)
1434 int error
= ENOTSUP
;
1437 /* get the number of controllers */
1438 case CTLIOCGCOUNT
: {
1442 lck_mtx_lock(&ctl_mtx
);
1443 TAILQ_FOREACH(kctl
, &ctl_head
, next
)
1445 lck_mtx_unlock(&ctl_mtx
);
1447 bcopy(&n
, data
, sizeof(n
));
1452 struct ctl_info ctl_info
;
1453 struct kctl
*kctl
= 0;
1456 bcopy(data
, &ctl_info
, sizeof(ctl_info
));
1457 name_len
= strnlen(ctl_info
.ctl_name
, MAX_KCTL_NAME
);
1459 if (name_len
== 0 || name_len
+ 1 > MAX_KCTL_NAME
) {
1463 lck_mtx_lock(&ctl_mtx
);
1464 kctl
= ctl_find_by_name(ctl_info
.ctl_name
);
1465 lck_mtx_unlock(&ctl_mtx
);
1470 ctl_info
.ctl_id
= kctl
->id
;
1471 bcopy(&ctl_info
, data
, sizeof(ctl_info
));
1476 /* add controls to get list of NKEs */
1485 struct kctl
**new_table
;
1488 lck_mtx_assert(&ctl_mtx
, LCK_MTX_ASSERT_OWNED
);
1490 if (kctl_tbl_growing
) {
1491 /* Another thread is allocating */
1492 kctl_tbl_growing_waiting
++;
1495 (void) msleep((caddr_t
) &kctl_tbl_growing
, &ctl_mtx
,
1496 PSOCK
| PCATCH
, "kctl_tbl_growing", 0);
1497 } while (kctl_tbl_growing
);
1498 kctl_tbl_growing_waiting
--;
1500 /* Another thread grew the table */
1501 if (kctl_table
!= NULL
&& kctl_tbl_count
< kctl_tbl_size
) {
1505 /* Verify we have a sane size */
1506 if (kctl_tbl_size
+ KCTL_TBL_INC
>= UINT16_MAX
) {
1507 kctlstat
.kcs_tbl_size_too_big
++;
1509 printf("%s kctl_tbl_size %lu too big\n",
1510 __func__
, kctl_tbl_size
);
1514 kctl_tbl_growing
= 1;
1516 new_size
= kctl_tbl_size
+ KCTL_TBL_INC
;
1518 lck_mtx_unlock(&ctl_mtx
);
1519 new_table
= kheap_alloc(KHEAP_DEFAULT
, sizeof(struct kctl
*) * new_size
,
1521 lck_mtx_lock(&ctl_mtx
);
1523 if (new_table
!= NULL
) {
1524 if (kctl_table
!= NULL
) {
1525 bcopy(kctl_table
, new_table
,
1526 kctl_tbl_size
* sizeof(struct kctl
*));
1528 kheap_free(KHEAP_DEFAULT
, kctl_table
,
1529 sizeof(struct kctl
*) * kctl_tbl_size
);
1531 kctl_table
= new_table
;
1532 kctl_tbl_size
= new_size
;
1535 kctl_tbl_growing
= 0;
1537 if (kctl_tbl_growing_waiting
) {
1538 wakeup(&kctl_tbl_growing
);
1542 #define KCTLREF_INDEX_MASK 0x0000FFFF
1543 #define KCTLREF_GENCNT_MASK 0xFFFF0000
1544 #define KCTLREF_GENCNT_SHIFT 16
1547 kctl_make_ref(struct kctl
*kctl
)
1551 lck_mtx_assert(&ctl_mtx
, LCK_MTX_ASSERT_OWNED
);
1553 if (kctl_tbl_count
>= kctl_tbl_size
) {
1557 kctl
->kctlref
= NULL
;
1558 for (i
= 0; i
< kctl_tbl_size
; i
++) {
1559 if (kctl_table
[i
] == NULL
) {
1563 * Reference is index plus one
1565 kctl_ref_gencnt
+= 1;
1568 * Add generation count as salt to reference to prevent
1569 * use after deregister
1571 ref
= ((kctl_ref_gencnt
<< KCTLREF_GENCNT_SHIFT
) &
1572 KCTLREF_GENCNT_MASK
) +
1573 ((i
+ 1) & KCTLREF_INDEX_MASK
);
1575 kctl
->kctlref
= (void *)(ref
);
1576 kctl_table
[i
] = kctl
;
1582 if (kctl
->kctlref
== NULL
) {
1583 panic("%s no space in table", __func__
);
1586 if (ctl_debug
> 0) {
1587 printf("%s %p for %p\n",
1588 __func__
, kctl
->kctlref
, kctl
);
1591 return kctl
->kctlref
;
1595 kctl_delete_ref(kern_ctl_ref kctlref
)
1598 * Reference is index plus one
1600 uintptr_t i
= (((uintptr_t)kctlref
) & KCTLREF_INDEX_MASK
) - 1;
1602 lck_mtx_assert(&ctl_mtx
, LCK_MTX_ASSERT_OWNED
);
1604 if (i
< kctl_tbl_size
) {
1605 struct kctl
*kctl
= kctl_table
[i
];
1607 if (kctl
->kctlref
== kctlref
) {
1608 kctl_table
[i
] = NULL
;
1611 kctlstat
.kcs_bad_kctlref
++;
1614 kctlstat
.kcs_bad_kctlref
++;
1618 static struct kctl
*
1619 kctl_from_ref(kern_ctl_ref kctlref
)
1622 * Reference is index plus one
1624 uintptr_t i
= (((uintptr_t)kctlref
) & KCTLREF_INDEX_MASK
) - 1;
1625 struct kctl
*kctl
= NULL
;
1627 lck_mtx_assert(&ctl_mtx
, LCK_MTX_ASSERT_OWNED
);
1629 if (i
>= kctl_tbl_size
) {
1630 kctlstat
.kcs_bad_kctlref
++;
1633 kctl
= kctl_table
[i
];
1634 if (kctl
->kctlref
!= kctlref
) {
1635 kctlstat
.kcs_bad_kctlref
++;
1642 * Register/unregister a NKE
1645 ctl_register(struct kern_ctl_reg
*userkctl
, kern_ctl_ref
*kctlref
)
1647 struct kctl
*kctl
= NULL
;
1648 struct kctl
*kctl_next
= NULL
;
1651 int is_extended
= 0;
1654 if (userkctl
== NULL
) { /* sanity check */
1657 if (userkctl
->ctl_connect
== NULL
) {
1660 name_len
= strlen(userkctl
->ctl_name
);
1661 if (name_len
== 0 || name_len
+ 1 > MAX_KCTL_NAME
) {
1665 kctl
= kheap_alloc(KHEAP_DEFAULT
, sizeof(struct kctl
), Z_WAITOK
| Z_ZERO
);
1670 lck_mtx_lock(&ctl_mtx
);
1672 if (kctl_make_ref(kctl
) == NULL
) {
1673 lck_mtx_unlock(&ctl_mtx
);
1674 kheap_free(KHEAP_DEFAULT
, kctl
, sizeof(struct kctl
));
1679 * Kernel Control IDs
1681 * CTL_FLAG_REG_ID_UNIT indicates the control ID and unit number are
1682 * static. If they do not exist, add them to the list in order. If the
1683 * flag is not set, we must find a new unique value. We assume the
1684 * list is in order. We find the last item in the list and add one. If
1685 * this leads to wrapping the id around, we start at the front of the
1686 * list and look for a gap.
1689 if ((userkctl
->ctl_flags
& CTL_FLAG_REG_ID_UNIT
) == 0) {
1690 /* Must dynamically assign an unused ID */
1692 /* Verify the same name isn't already registered */
1693 if (ctl_find_by_name(userkctl
->ctl_name
) != NULL
) {
1694 kctl_delete_ref(kctl
->kctlref
);
1695 lck_mtx_unlock(&ctl_mtx
);
1696 kheap_free(KHEAP_DEFAULT
, kctl
, sizeof(struct kctl
));
1700 /* Start with 1 in case the list is empty */
1702 kctl_next
= TAILQ_LAST(&ctl_head
, kctl_list
);
1704 if (kctl_next
!= NULL
) {
1705 /* List was not empty, add one to the last item */
1706 id
= kctl_next
->id
+ 1;
1710 * If this wrapped the id number, start looking at
1711 * the front of the list for an unused id.
1714 /* Find the next unused ID */
1717 TAILQ_FOREACH(kctl_next
, &ctl_head
, next
) {
1718 if (kctl_next
->id
> id
) {
1719 /* We found a gap */
1723 id
= kctl_next
->id
+ 1;
1728 userkctl
->ctl_id
= id
;
1730 kctl
->reg_unit
= -1;
1732 TAILQ_FOREACH(kctl_next
, &ctl_head
, next
) {
1733 if (kctl_next
->id
> userkctl
->ctl_id
) {
1738 if (ctl_find_by_id_unit(userkctl
->ctl_id
, userkctl
->ctl_unit
)) {
1739 kctl_delete_ref(kctl
->kctlref
);
1740 lck_mtx_unlock(&ctl_mtx
);
1741 kheap_free(KHEAP_DEFAULT
, kctl
, sizeof(struct kctl
));
1744 kctl
->id
= userkctl
->ctl_id
;
1745 kctl
->reg_unit
= userkctl
->ctl_unit
;
1748 is_extended
= (userkctl
->ctl_flags
& CTL_FLAG_REG_EXTENDED
);
1749 is_setup
= (userkctl
->ctl_flags
& CTL_FLAG_REG_SETUP
);
1751 strlcpy(kctl
->name
, userkctl
->ctl_name
, MAX_KCTL_NAME
);
1752 kctl
->flags
= userkctl
->ctl_flags
;
1755 * Let the caller know the default send and receive sizes
1757 if (userkctl
->ctl_sendsize
== 0) {
1758 kctl
->sendbufsize
= CTL_SENDSIZE
;
1759 userkctl
->ctl_sendsize
= kctl
->sendbufsize
;
1761 kctl
->sendbufsize
= userkctl
->ctl_sendsize
;
1763 if (userkctl
->ctl_recvsize
== 0) {
1764 kctl
->recvbufsize
= CTL_RECVSIZE
;
1765 userkctl
->ctl_recvsize
= kctl
->recvbufsize
;
1767 kctl
->recvbufsize
= userkctl
->ctl_recvsize
;
1771 kctl
->setup
= userkctl
->ctl_setup
;
1773 kctl
->bind
= userkctl
->ctl_bind
;
1774 kctl
->connect
= userkctl
->ctl_connect
;
1775 kctl
->disconnect
= userkctl
->ctl_disconnect
;
1776 kctl
->send
= userkctl
->ctl_send
;
1777 kctl
->setopt
= userkctl
->ctl_setopt
;
1778 kctl
->getopt
= userkctl
->ctl_getopt
;
1780 kctl
->rcvd
= userkctl
->ctl_rcvd
;
1781 kctl
->send_list
= userkctl
->ctl_send_list
;
1784 TAILQ_INIT(&kctl
->kcb_head
);
1787 TAILQ_INSERT_BEFORE(kctl_next
, kctl
, next
);
1789 TAILQ_INSERT_TAIL(&ctl_head
, kctl
, next
);
1792 kctlstat
.kcs_reg_count
++;
1793 kctlstat
.kcs_gencnt
++;
1795 lck_mtx_unlock(&ctl_mtx
);
1797 *kctlref
= kctl
->kctlref
;
1799 ctl_post_msg(KEV_CTL_REGISTERED
, kctl
->id
);
1804 ctl_deregister(void *kctlref
)
1808 lck_mtx_lock(&ctl_mtx
);
1809 if ((kctl
= kctl_from_ref(kctlref
)) == NULL
) {
1810 kctlstat
.kcs_bad_kctlref
++;
1811 lck_mtx_unlock(&ctl_mtx
);
1812 if (ctl_debug
!= 0) {
1813 printf("%s invalid kctlref %p\n",
1819 if (!TAILQ_EMPTY(&kctl
->kcb_head
)) {
1820 lck_mtx_unlock(&ctl_mtx
);
1824 TAILQ_REMOVE(&ctl_head
, kctl
, next
);
1826 kctlstat
.kcs_reg_count
--;
1827 kctlstat
.kcs_gencnt
++;
1829 kctl_delete_ref(kctl
->kctlref
);
1830 lck_mtx_unlock(&ctl_mtx
);
1832 ctl_post_msg(KEV_CTL_DEREGISTERED
, kctl
->id
);
1833 kheap_free(KHEAP_DEFAULT
, kctl
, sizeof(struct kctl
));
1838 * Must be called with global ctl_mtx lock taked
1840 static struct kctl
*
1841 ctl_find_by_name(const char *name
)
1845 lck_mtx_assert(&ctl_mtx
, LCK_MTX_ASSERT_OWNED
);
1847 TAILQ_FOREACH(kctl
, &ctl_head
, next
)
1848 if (strncmp(kctl
->name
, name
, sizeof(kctl
->name
)) == 0) {
1856 ctl_id_by_name(const char *name
)
1858 u_int32_t ctl_id
= 0;
1861 lck_mtx_lock(&ctl_mtx
);
1862 kctl
= ctl_find_by_name(name
);
1866 lck_mtx_unlock(&ctl_mtx
);
1872 ctl_name_by_id(u_int32_t id
, char *out_name
, size_t maxsize
)
1877 lck_mtx_lock(&ctl_mtx
);
1878 TAILQ_FOREACH(kctl
, &ctl_head
, next
) {
1879 if (kctl
->id
== id
) {
1885 if (maxsize
> MAX_KCTL_NAME
) {
1886 maxsize
= MAX_KCTL_NAME
;
1888 strlcpy(out_name
, kctl
->name
, maxsize
);
1891 lck_mtx_unlock(&ctl_mtx
);
1893 return found
? 0 : ENOENT
;
1897 * Must be called with global ctl_mtx lock taked
1900 static struct kctl
*
1901 ctl_find_by_id_unit(u_int32_t id
, u_int32_t unit
)
1905 lck_mtx_assert(&ctl_mtx
, LCK_MTX_ASSERT_OWNED
);
1907 TAILQ_FOREACH(kctl
, &ctl_head
, next
) {
1908 if (kctl
->id
== id
&& (kctl
->flags
& CTL_FLAG_REG_ID_UNIT
) == 0) {
1910 } else if (kctl
->id
== id
&& kctl
->reg_unit
== unit
) {
1918 * Must be called with kernel controller lock taken
1920 static struct ctl_cb
*
1921 kcb_find(struct kctl
*kctl
, u_int32_t unit
)
1925 lck_mtx_assert(&ctl_mtx
, LCK_MTX_ASSERT_OWNED
);
1927 TAILQ_FOREACH(kcb
, &kctl
->kcb_head
, next
)
1928 if (kcb
->sac
.sc_unit
== unit
) {
1935 static struct socket
*
1936 kcb_find_socket(kern_ctl_ref kctlref
, u_int32_t unit
, u_int32_t
*kctlflags
)
1938 struct socket
*so
= NULL
;
1944 lr_saved
= __builtin_return_address(0);
1946 lck_mtx_lock(&ctl_mtx
);
1948 * First validate the kctlref
1950 if ((kctl
= kctl_from_ref(kctlref
)) == NULL
) {
1951 kctlstat
.kcs_bad_kctlref
++;
1952 lck_mtx_unlock(&ctl_mtx
);
1953 if (ctl_debug
!= 0) {
1954 printf("%s invalid kctlref %p\n",
1960 kcb
= kcb_find(kctl
, unit
);
1961 if (kcb
== NULL
|| kcb
->kctl
!= kctl
|| (so
= kcb
->so
) == NULL
) {
1962 lck_mtx_unlock(&ctl_mtx
);
1966 * This prevents the socket from being closed
1970 * Respect lock ordering: socket before ctl_mtx
1972 lck_mtx_unlock(&ctl_mtx
);
1976 * The socket lock history is more useful if we store
1977 * the address of the caller.
1979 i
= (so
->next_lock_lr
+ SO_LCKDBG_MAX
- 1) % SO_LCKDBG_MAX
;
1980 so
->lock_lr
[i
] = lr_saved
;
1982 lck_mtx_lock(&ctl_mtx
);
1984 if ((kctl
= kctl_from_ref(kctlref
)) == NULL
|| kcb
->kctl
== NULL
) {
1985 lck_mtx_unlock(&ctl_mtx
);
1986 socket_unlock(so
, 1);
1988 lck_mtx_lock(&ctl_mtx
);
1989 } else if (kctlflags
!= NULL
) {
1990 *kctlflags
= kctl
->flags
;
1994 if (kcb
->usecount
== 0) {
1995 wakeup((event_t
)&kcb
->usecount
);
1998 lck_mtx_unlock(&ctl_mtx
);
2004 ctl_post_msg(u_int32_t event_code
, u_int32_t id
)
2006 struct ctl_event_data ctl_ev_data
;
2007 struct kev_msg ev_msg
;
2009 lck_mtx_assert(&ctl_mtx
, LCK_MTX_ASSERT_NOTOWNED
);
2011 bzero(&ev_msg
, sizeof(struct kev_msg
));
2012 ev_msg
.vendor_code
= KEV_VENDOR_APPLE
;
2014 ev_msg
.kev_class
= KEV_SYSTEM_CLASS
;
2015 ev_msg
.kev_subclass
= KEV_CTL_SUBCLASS
;
2016 ev_msg
.event_code
= event_code
;
2018 /* common nke subclass data */
2019 bzero(&ctl_ev_data
, sizeof(ctl_ev_data
));
2020 ctl_ev_data
.ctl_id
= id
;
2021 ev_msg
.dv
[0].data_ptr
= &ctl_ev_data
;
2022 ev_msg
.dv
[0].data_length
= sizeof(ctl_ev_data
);
2024 ev_msg
.dv
[1].data_length
= 0;
2026 kev_post_msg(&ev_msg
);
2030 ctl_lock(struct socket
*so
, int refcount
, void *lr
)
2035 lr_saved
= __builtin_return_address(0);
2040 if (so
->so_pcb
!= NULL
) {
2041 lck_mtx_lock(&((struct ctl_cb
*)so
->so_pcb
)->mtx
);
2043 panic("ctl_lock: so=%p NO PCB! lr=%p lrh= %s\n",
2044 so
, lr_saved
, solockhistory_nr(so
));
2048 if (so
->so_usecount
< 0) {
2049 panic("ctl_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s\n",
2050 so
, so
->so_pcb
, lr_saved
, so
->so_usecount
,
2051 solockhistory_nr(so
));
2059 so
->lock_lr
[so
->next_lock_lr
] = lr_saved
;
2060 so
->next_lock_lr
= (so
->next_lock_lr
+ 1) % SO_LCKDBG_MAX
;
2065 ctl_unlock(struct socket
*so
, int refcount
, void *lr
)
2068 lck_mtx_t
*mutex_held
;
2071 lr_saved
= __builtin_return_address(0);
2076 #if (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG))
2077 printf("ctl_unlock: so=%llx sopcb=%x lock=%llx ref=%u lr=%llx\n",
2078 (uint64_t)VM_KERNEL_ADDRPERM(so
),
2079 (uint64_t)VM_KERNEL_ADDRPERM(so
->so_pcb
,
2080 (uint64_t)VM_KERNEL_ADDRPERM(&((struct ctl_cb
*)so
->so_pcb
)->mtx
),
2081 so
->so_usecount
, (uint64_t)VM_KERNEL_ADDRPERM(lr_saved
));
2082 #endif /* (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG)) */
2087 if (so
->so_usecount
< 0) {
2088 panic("ctl_unlock: so=%p usecount=%x lrh= %s\n",
2089 so
, so
->so_usecount
, solockhistory_nr(so
));
2092 if (so
->so_pcb
== NULL
) {
2093 panic("ctl_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s\n",
2094 so
, so
->so_usecount
, (void *)lr_saved
,
2095 solockhistory_nr(so
));
2098 mutex_held
= &((struct ctl_cb
*)so
->so_pcb
)->mtx
;
2100 lck_mtx_assert(mutex_held
, LCK_MTX_ASSERT_OWNED
);
2101 so
->unlock_lr
[so
->next_unlock_lr
] = lr_saved
;
2102 so
->next_unlock_lr
= (so
->next_unlock_lr
+ 1) % SO_LCKDBG_MAX
;
2103 lck_mtx_unlock(mutex_held
);
2105 if (so
->so_usecount
== 0) {
2106 ctl_sofreelastref(so
);
2113 ctl_getlock(struct socket
*so
, int flags
)
2115 #pragma unused(flags)
2116 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
2119 if (so
->so_usecount
< 0) {
2120 panic("ctl_getlock: so=%p usecount=%x lrh= %s\n",
2121 so
, so
->so_usecount
, solockhistory_nr(so
));
2125 panic("ctl_getlock: so=%p NULL NO so_pcb %s\n",
2126 so
, solockhistory_nr(so
));
2127 return so
->so_proto
->pr_domain
->dom_mtx
;
2131 __private_extern__
int
2132 kctl_reg_list SYSCTL_HANDLER_ARGS
2134 #pragma unused(oidp, arg1, arg2)
2137 struct xsystmgen xsg
;
2140 size_t item_size
= ROUNDUP64(sizeof(struct xkctl_reg
));
2142 buf
= kheap_alloc(KHEAP_TEMP
, item_size
, Z_WAITOK
| Z_ZERO
);
2147 lck_mtx_lock(&ctl_mtx
);
2149 n
= kctlstat
.kcs_reg_count
;
2151 if (req
->oldptr
== USER_ADDR_NULL
) {
2152 req
->oldidx
= (size_t)(n
+ n
/ 8) * sizeof(struct xkctl_reg
);
2155 if (req
->newptr
!= USER_ADDR_NULL
) {
2159 bzero(&xsg
, sizeof(xsg
));
2160 xsg
.xg_len
= sizeof(xsg
);
2162 xsg
.xg_gen
= kctlstat
.kcs_gencnt
;
2163 xsg
.xg_sogen
= so_gencnt
;
2164 error
= SYSCTL_OUT(req
, &xsg
, sizeof(xsg
));
2169 * We are done if there is no pcb
2175 for (i
= 0, kctl
= TAILQ_FIRST(&ctl_head
);
2176 i
< n
&& kctl
!= NULL
;
2177 i
++, kctl
= TAILQ_NEXT(kctl
, next
)) {
2178 struct xkctl_reg
*xkr
= (struct xkctl_reg
*)buf
;
2180 u_int32_t pcbcount
= 0;
2182 TAILQ_FOREACH(kcb
, &kctl
->kcb_head
, next
)
2185 bzero(buf
, item_size
);
2187 xkr
->xkr_len
= sizeof(struct xkctl_reg
);
2188 xkr
->xkr_kind
= XSO_KCREG
;
2189 xkr
->xkr_id
= kctl
->id
;
2190 xkr
->xkr_reg_unit
= kctl
->reg_unit
;
2191 xkr
->xkr_flags
= kctl
->flags
;
2192 xkr
->xkr_kctlref
= (uint64_t)(kctl
->kctlref
);
2193 xkr
->xkr_recvbufsize
= kctl
->recvbufsize
;
2194 xkr
->xkr_sendbufsize
= kctl
->sendbufsize
;
2195 xkr
->xkr_lastunit
= kctl
->lastunit
;
2196 xkr
->xkr_pcbcount
= pcbcount
;
2197 xkr
->xkr_connect
= (uint64_t)VM_KERNEL_UNSLIDE(kctl
->connect
);
2198 xkr
->xkr_disconnect
=
2199 (uint64_t)VM_KERNEL_UNSLIDE(kctl
->disconnect
);
2200 xkr
->xkr_send
= (uint64_t)VM_KERNEL_UNSLIDE(kctl
->send
);
2201 xkr
->xkr_send_list
=
2202 (uint64_t)VM_KERNEL_UNSLIDE(kctl
->send_list
);
2203 xkr
->xkr_setopt
= (uint64_t)VM_KERNEL_UNSLIDE(kctl
->setopt
);
2204 xkr
->xkr_getopt
= (uint64_t)VM_KERNEL_UNSLIDE(kctl
->getopt
);
2205 xkr
->xkr_rcvd
= (uint64_t)VM_KERNEL_UNSLIDE(kctl
->rcvd
);
2206 strlcpy(xkr
->xkr_name
, kctl
->name
, sizeof(xkr
->xkr_name
));
2208 error
= SYSCTL_OUT(req
, buf
, item_size
);
2213 * Give the user an updated idea of our state.
2214 * If the generation differs from what we told
2215 * her before, she knows that something happened
2216 * while we were processing this request, and it
2217 * might be necessary to retry.
2219 bzero(&xsg
, sizeof(xsg
));
2220 xsg
.xg_len
= sizeof(xsg
);
2222 xsg
.xg_gen
= kctlstat
.kcs_gencnt
;
2223 xsg
.xg_sogen
= so_gencnt
;
2224 error
= SYSCTL_OUT(req
, &xsg
, sizeof(xsg
));
2231 lck_mtx_unlock(&ctl_mtx
);
2233 kheap_free(KHEAP_TEMP
, buf
, item_size
);
2238 __private_extern__
int
2239 kctl_pcblist SYSCTL_HANDLER_ARGS
2241 #pragma unused(oidp, arg1, arg2)
2244 struct xsystmgen xsg
;
2247 size_t item_size
= ROUNDUP64(sizeof(struct xkctlpcb
)) +
2248 ROUNDUP64(sizeof(struct xsocket_n
)) +
2249 2 * ROUNDUP64(sizeof(struct xsockbuf_n
)) +
2250 ROUNDUP64(sizeof(struct xsockstat_n
));
2252 buf
= kheap_alloc(KHEAP_TEMP
, item_size
, Z_WAITOK
| Z_ZERO
);
2257 lck_mtx_lock(&ctl_mtx
);
2259 n
= kctlstat
.kcs_pcbcount
;
2261 if (req
->oldptr
== USER_ADDR_NULL
) {
2262 req
->oldidx
= (size_t)(n
+ n
/ 8) * item_size
;
2265 if (req
->newptr
!= USER_ADDR_NULL
) {
2269 bzero(&xsg
, sizeof(xsg
));
2270 xsg
.xg_len
= sizeof(xsg
);
2272 xsg
.xg_gen
= kctlstat
.kcs_gencnt
;
2273 xsg
.xg_sogen
= so_gencnt
;
2274 error
= SYSCTL_OUT(req
, &xsg
, sizeof(xsg
));
2279 * We are done if there is no pcb
2285 for (i
= 0, kctl
= TAILQ_FIRST(&ctl_head
);
2286 i
< n
&& kctl
!= NULL
;
2287 kctl
= TAILQ_NEXT(kctl
, next
)) {
2290 for (kcb
= TAILQ_FIRST(&kctl
->kcb_head
);
2291 i
< n
&& kcb
!= NULL
;
2292 i
++, kcb
= TAILQ_NEXT(kcb
, next
)) {
2293 struct xkctlpcb
*xk
= (struct xkctlpcb
*)buf
;
2294 struct xsocket_n
*xso
= (struct xsocket_n
*)
2295 ADVANCE64(xk
, sizeof(*xk
));
2296 struct xsockbuf_n
*xsbrcv
= (struct xsockbuf_n
*)
2297 ADVANCE64(xso
, sizeof(*xso
));
2298 struct xsockbuf_n
*xsbsnd
= (struct xsockbuf_n
*)
2299 ADVANCE64(xsbrcv
, sizeof(*xsbrcv
));
2300 struct xsockstat_n
*xsostats
= (struct xsockstat_n
*)
2301 ADVANCE64(xsbsnd
, sizeof(*xsbsnd
));
2303 bzero(buf
, item_size
);
2305 xk
->xkp_len
= sizeof(struct xkctlpcb
);
2306 xk
->xkp_kind
= XSO_KCB
;
2307 xk
->xkp_unit
= kcb
->sac
.sc_unit
;
2308 xk
->xkp_kctpcb
= (uint64_t)VM_KERNEL_ADDRPERM(kcb
);
2309 xk
->xkp_kctlref
= (uint64_t)VM_KERNEL_ADDRPERM(kctl
);
2310 xk
->xkp_kctlid
= kctl
->id
;
2311 strlcpy(xk
->xkp_kctlname
, kctl
->name
,
2312 sizeof(xk
->xkp_kctlname
));
2314 sotoxsocket_n(kcb
->so
, xso
);
2315 sbtoxsockbuf_n(kcb
->so
?
2316 &kcb
->so
->so_rcv
: NULL
, xsbrcv
);
2317 sbtoxsockbuf_n(kcb
->so
?
2318 &kcb
->so
->so_snd
: NULL
, xsbsnd
);
2319 sbtoxsockstat_n(kcb
->so
, xsostats
);
2321 error
= SYSCTL_OUT(req
, buf
, item_size
);
2327 * Give the user an updated idea of our state.
2328 * If the generation differs from what we told
2329 * her before, she knows that something happened
2330 * while we were processing this request, and it
2331 * might be necessary to retry.
2333 bzero(&xsg
, sizeof(xsg
));
2334 xsg
.xg_len
= sizeof(xsg
);
2336 xsg
.xg_gen
= kctlstat
.kcs_gencnt
;
2337 xsg
.xg_sogen
= so_gencnt
;
2338 error
= SYSCTL_OUT(req
, &xsg
, sizeof(xsg
));
2345 lck_mtx_unlock(&ctl_mtx
);
2347 kheap_free(KHEAP_TEMP
, buf
, item_size
);
2352 kctl_getstat SYSCTL_HANDLER_ARGS
2354 #pragma unused(oidp, arg1, arg2)
2357 lck_mtx_lock(&ctl_mtx
);
2359 if (req
->newptr
!= USER_ADDR_NULL
) {
2363 if (req
->oldptr
== USER_ADDR_NULL
) {
2364 req
->oldidx
= sizeof(struct kctlstat
);
2368 error
= SYSCTL_OUT(req
, &kctlstat
,
2369 MIN(sizeof(struct kctlstat
), req
->oldlen
));
2371 lck_mtx_unlock(&ctl_mtx
);
2376 kctl_fill_socketinfo(struct socket
*so
, struct socket_info
*si
)
2378 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
2379 struct kern_ctl_info
*kcsi
=
2380 &si
->soi_proto
.pri_kern_ctl
;
2381 struct kctl
*kctl
= kcb
->kctl
;
2383 si
->soi_kind
= SOCKINFO_KERN_CTL
;
2389 kcsi
->kcsi_id
= kctl
->id
;
2390 kcsi
->kcsi_reg_unit
= kctl
->reg_unit
;
2391 kcsi
->kcsi_flags
= kctl
->flags
;
2392 kcsi
->kcsi_recvbufsize
= kctl
->recvbufsize
;
2393 kcsi
->kcsi_sendbufsize
= kctl
->sendbufsize
;
2394 kcsi
->kcsi_unit
= kcb
->sac
.sc_unit
;
2395 strlcpy(kcsi
->kcsi_name
, kctl
->name
, MAX_KCTL_NAME
);