2 * Copyright (c) 1999-2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 * Kernel Control domain - allows control connections to
31 * and to read/write data.
33 * Vincent Lubet, 040506
34 * Christophe Allie, 010928
35 * Justin C. Walker, 990319
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/syslog.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/protosw.h>
45 #include <sys/domain.h>
46 #include <sys/malloc.h>
48 #include <sys/sys_domain.h>
49 #include <sys/kern_event.h>
50 #include <sys/kern_control.h>
51 #include <sys/kauth.h>
52 #include <sys/sysctl.h>
53 #include <sys/proc_info.h>
54 #include <net/if_var.h>
56 #include <mach/vm_types.h>
58 #include <kern/thread.h>
61 TAILQ_ENTRY(kctl
) next
; /* controller chain */
64 /* controller information provided when registering */
65 char name
[MAX_KCTL_NAME
]; /* unique identifier */
69 /* misc communication information */
70 u_int32_t flags
; /* support flags */
71 u_int32_t recvbufsize
; /* request more than the default buffer size */
72 u_int32_t sendbufsize
; /* request more than the default buffer size */
74 /* Dispatch functions */
75 ctl_connect_func connect
; /* Make contact */
76 ctl_disconnect_func disconnect
; /* Break contact */
77 ctl_send_func send
; /* Send data to nke */
78 ctl_send_list_func send_list
; /* Send list of packets */
79 ctl_setopt_func setopt
; /* set kctl configuration */
80 ctl_getopt_func getopt
; /* get kctl configuration */
81 ctl_rcvd_func rcvd
; /* Notify nke when client reads data */
83 TAILQ_HEAD(, ctl_cb
) kcb_head
;
88 TAILQ_ENTRY(ctl_cb
) next
; /* controller chain */
90 struct socket
*so
; /* controlling socket */
91 struct kctl
*kctl
; /* back pointer to controller */
98 #define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
102 #define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
106 * Definitions and vars for we support
109 #define CTL_SENDSIZE (2 * 1024) /* default buffer size */
110 #define CTL_RECVSIZE (8 * 1024) /* default buffer size */
113 * Definitions and vars for we support
116 static u_int32_t ctl_maxunit
= 65536;
117 static lck_grp_attr_t
*ctl_lck_grp_attr
= 0;
118 static lck_attr_t
*ctl_lck_attr
= 0;
119 static lck_grp_t
*ctl_lck_grp
= 0;
120 static lck_mtx_t
*ctl_mtx
;
122 /* all the controllers are chained */
123 TAILQ_HEAD(kctl_list
, kctl
) ctl_head
;
125 static int ctl_attach(struct socket
*, int, struct proc
*);
126 static int ctl_detach(struct socket
*);
127 static int ctl_sofreelastref(struct socket
*so
);
128 static int ctl_connect(struct socket
*, struct sockaddr
*, struct proc
*);
129 static int ctl_disconnect(struct socket
*);
130 static int ctl_ioctl(struct socket
*so
, u_long cmd
, caddr_t data
,
131 struct ifnet
*ifp
, struct proc
*p
);
132 static int ctl_send(struct socket
*, int, struct mbuf
*,
133 struct sockaddr
*, struct mbuf
*, struct proc
*);
134 static int ctl_send_list(struct socket
*, int, struct mbuf
*,
135 struct sockaddr
*, struct mbuf
*, struct proc
*);
136 static int ctl_ctloutput(struct socket
*, struct sockopt
*);
137 static int ctl_peeraddr(struct socket
*so
, struct sockaddr
**nam
);
138 static int ctl_usr_rcvd(struct socket
*so
, int flags
);
140 static struct kctl
*ctl_find_by_name(const char *);
141 static struct kctl
*ctl_find_by_id_unit(u_int32_t id
, u_int32_t unit
);
143 static struct socket
*kcb_find_socket(kern_ctl_ref kctlref
, u_int32_t unit
,
145 static struct ctl_cb
*kcb_find(struct kctl
*, u_int32_t unit
);
146 static void ctl_post_msg(u_int32_t event_code
, u_int32_t id
);
148 static int ctl_lock(struct socket
*, int, void *);
149 static int ctl_unlock(struct socket
*, int, void *);
150 static lck_mtx_t
* ctl_getlock(struct socket
*, int);
152 static struct pr_usrreqs ctl_usrreqs
= {
153 .pru_attach
= ctl_attach
,
154 .pru_connect
= ctl_connect
,
155 .pru_control
= ctl_ioctl
,
156 .pru_detach
= ctl_detach
,
157 .pru_disconnect
= ctl_disconnect
,
158 .pru_peeraddr
= ctl_peeraddr
,
159 .pru_rcvd
= ctl_usr_rcvd
,
160 .pru_send
= ctl_send
,
161 .pru_send_list
= ctl_send_list
,
162 .pru_sosend
= sosend
,
163 .pru_sosend_list
= sosend_list
,
164 .pru_soreceive
= soreceive
,
165 .pru_soreceive_list
= soreceive_list
,
168 static struct protosw kctlsw
[] = {
170 .pr_type
= SOCK_DGRAM
,
171 .pr_protocol
= SYSPROTO_CONTROL
,
172 .pr_flags
= PR_ATOMIC
|PR_CONNREQUIRED
|PR_PCBLOCK
|PR_WANTRCVD
,
173 .pr_ctloutput
= ctl_ctloutput
,
174 .pr_usrreqs
= &ctl_usrreqs
,
176 .pr_unlock
= ctl_unlock
,
177 .pr_getlock
= ctl_getlock
,
180 .pr_type
= SOCK_STREAM
,
181 .pr_protocol
= SYSPROTO_CONTROL
,
182 .pr_flags
= PR_CONNREQUIRED
|PR_PCBLOCK
|PR_WANTRCVD
,
183 .pr_ctloutput
= ctl_ctloutput
,
184 .pr_usrreqs
= &ctl_usrreqs
,
186 .pr_unlock
= ctl_unlock
,
187 .pr_getlock
= ctl_getlock
,
191 __private_extern__
int kctl_reg_list SYSCTL_HANDLER_ARGS
;
192 __private_extern__
int kctl_pcblist SYSCTL_HANDLER_ARGS
;
193 __private_extern__
int kctl_getstat SYSCTL_HANDLER_ARGS
;
196 SYSCTL_NODE(_net_systm
, OID_AUTO
, kctl
,
197 CTLFLAG_RW
|CTLFLAG_LOCKED
, 0, "Kernel control family");
199 struct kctlstat kctlstat
;
200 SYSCTL_PROC(_net_systm_kctl
, OID_AUTO
, stats
,
201 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0,
202 kctl_getstat
, "S,kctlstat", "");
204 SYSCTL_PROC(_net_systm_kctl
, OID_AUTO
, reg_list
,
205 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0,
206 kctl_reg_list
, "S,xkctl_reg", "");
208 SYSCTL_PROC(_net_systm_kctl
, OID_AUTO
, pcblist
,
209 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0,
210 kctl_pcblist
, "S,xkctlpcb", "");
212 u_int32_t ctl_autorcvbuf_max
= 256 * 1024;
213 SYSCTL_INT(_net_systm_kctl
, OID_AUTO
, autorcvbufmax
,
214 CTLFLAG_RW
| CTLFLAG_LOCKED
, &ctl_autorcvbuf_max
, 0, "");
216 u_int32_t ctl_autorcvbuf_high
= 0;
217 SYSCTL_INT(_net_systm_kctl
, OID_AUTO
, autorcvbufhigh
,
218 CTLFLAG_RD
| CTLFLAG_LOCKED
, &ctl_autorcvbuf_high
, 0, "");
220 u_int32_t ctl_debug
= 0;
221 SYSCTL_INT(_net_systm_kctl
, OID_AUTO
, debug
,
222 CTLFLAG_RW
| CTLFLAG_LOCKED
, &ctl_debug
, 0, "");
224 #define KCTL_TBL_INC 16
226 static uintptr_t kctl_tbl_size
= 0;
227 static u_int32_t kctl_tbl_growing
= 0;
228 static u_int32_t kctl_tbl_growing_waiting
= 0;
229 static uintptr_t kctl_tbl_count
= 0;
230 static struct kctl
**kctl_table
= NULL
;
231 static uintptr_t kctl_ref_gencnt
= 0;
233 static void kctl_tbl_grow(void);
234 static kern_ctl_ref
kctl_make_ref(struct kctl
*kctl
);
235 static void kctl_delete_ref(kern_ctl_ref
);
236 static struct kctl
*kctl_from_ref(kern_ctl_ref
);
239 * Install the protosw's for the Kernel Control manager.
241 __private_extern__
void
242 kern_control_init(struct domain
*dp
)
246 int kctl_proto_count
= (sizeof (kctlsw
) / sizeof (struct protosw
));
248 VERIFY(!(dp
->dom_flags
& DOM_INITIALIZED
));
249 VERIFY(dp
== systemdomain
);
251 ctl_lck_grp_attr
= lck_grp_attr_alloc_init();
252 if (ctl_lck_grp_attr
== NULL
) {
253 panic("%s: lck_grp_attr_alloc_init failed\n", __func__
);
257 ctl_lck_grp
= lck_grp_alloc_init("Kernel Control Protocol",
259 if (ctl_lck_grp
== NULL
) {
260 panic("%s: lck_grp_alloc_init failed\n", __func__
);
264 ctl_lck_attr
= lck_attr_alloc_init();
265 if (ctl_lck_attr
== NULL
) {
266 panic("%s: lck_attr_alloc_init failed\n", __func__
);
270 ctl_mtx
= lck_mtx_alloc_init(ctl_lck_grp
, ctl_lck_attr
);
271 if (ctl_mtx
== NULL
) {
272 panic("%s: lck_mtx_alloc_init failed\n", __func__
);
275 TAILQ_INIT(&ctl_head
);
277 for (i
= 0, pr
= &kctlsw
[0]; i
< kctl_proto_count
; i
++, pr
++)
278 net_add_proto(pr
, dp
, 1);
282 kcb_delete(struct ctl_cb
*kcb
)
286 lck_mtx_free(kcb
->mtx
, ctl_lck_grp
);
292 * Kernel Controller user-request functions
293 * attach function must exist and succeed
294 * detach not necessary
295 * we need a pcb for the per socket mutex
298 ctl_attach(struct socket
*so
, int proto
, struct proc
*p
)
300 #pragma unused(proto, p)
302 struct ctl_cb
*kcb
= 0;
304 MALLOC(kcb
, struct ctl_cb
*, sizeof(struct ctl_cb
), M_TEMP
, M_WAITOK
);
309 bzero(kcb
, sizeof(struct ctl_cb
));
311 kcb
->mtx
= lck_mtx_alloc_init(ctl_lck_grp
, ctl_lck_attr
);
312 if (kcb
->mtx
== NULL
) {
317 so
->so_pcb
= (caddr_t
)kcb
;
328 ctl_sofreelastref(struct socket
*so
)
330 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
336 if ((kctl
= kcb
->kctl
) != 0) {
337 lck_mtx_lock(ctl_mtx
);
338 TAILQ_REMOVE(&kctl
->kcb_head
, kcb
, next
);
339 kctlstat
.kcs_pcbcount
--;
340 kctlstat
.kcs_gencnt
++;
341 lck_mtx_unlock(ctl_mtx
);
345 sofreelastref(so
, 1);
350 ctl_detach(struct socket
*so
)
352 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
357 soisdisconnected(so
);
358 so
->so_flags
|= SOF_PCBCLEARING
;
363 ctl_connect(struct socket
*so
, struct sockaddr
*nam
, struct proc
*p
)
368 struct sockaddr_ctl sa
;
369 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
370 struct ctl_cb
*kcb_next
= NULL
;
372 u_int32_t recvbufsize
, sendbufsize
;
375 panic("ctl_connect so_pcb null\n");
377 if (nam
->sa_len
!= sizeof(struct sockaddr_ctl
))
380 bcopy(nam
, &sa
, sizeof(struct sockaddr_ctl
));
382 lck_mtx_lock(ctl_mtx
);
383 kctl
= ctl_find_by_id_unit(sa
.sc_id
, sa
.sc_unit
);
385 lck_mtx_unlock(ctl_mtx
);
389 if (((kctl
->flags
& CTL_FLAG_REG_SOCK_STREAM
) &&
390 (so
->so_type
!= SOCK_STREAM
)) ||
391 (!(kctl
->flags
& CTL_FLAG_REG_SOCK_STREAM
) &&
392 (so
->so_type
!= SOCK_DGRAM
))) {
393 lck_mtx_unlock(ctl_mtx
);
397 if (kctl
->flags
& CTL_FLAG_PRIVILEGED
) {
399 lck_mtx_unlock(ctl_mtx
);
402 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
403 lck_mtx_unlock(ctl_mtx
);
408 if ((kctl
->flags
& CTL_FLAG_REG_ID_UNIT
) || sa
.sc_unit
!= 0) {
409 if (kcb_find(kctl
, sa
.sc_unit
) != NULL
) {
410 lck_mtx_unlock(ctl_mtx
);
414 /* Find an unused ID, assumes control IDs are in order */
417 TAILQ_FOREACH(kcb_next
, &kctl
->kcb_head
, next
) {
418 if (kcb_next
->unit
> unit
) {
419 /* Found a gap, lets fill it in */
422 unit
= kcb_next
->unit
+ 1;
423 if (unit
== ctl_maxunit
)
427 if (unit
== ctl_maxunit
) {
428 lck_mtx_unlock(ctl_mtx
);
435 kcb
->unit
= sa
.sc_unit
;
437 if (kcb_next
!= NULL
) {
438 TAILQ_INSERT_BEFORE(kcb_next
, kcb
, next
);
440 TAILQ_INSERT_TAIL(&kctl
->kcb_head
, kcb
, next
);
442 kctlstat
.kcs_pcbcount
++;
443 kctlstat
.kcs_gencnt
++;
444 kctlstat
.kcs_connections
++;
445 lck_mtx_unlock(ctl_mtx
);
448 * rdar://15526688: Limit the send and receive sizes to sb_max
449 * by using the same scaling as sbreserve()
451 sbmaxsize
= (u_quad_t
)sb_max
* MCLBYTES
/ (MSIZE
+ MCLBYTES
);
453 if (kctl
->sendbufsize
> sbmaxsize
)
454 sendbufsize
= sbmaxsize
;
456 sendbufsize
= kctl
->sendbufsize
;
458 if (kctl
->recvbufsize
> sbmaxsize
)
459 recvbufsize
= sbmaxsize
;
461 recvbufsize
= kctl
->recvbufsize
;
463 error
= soreserve(so
, sendbufsize
, recvbufsize
);
466 printf("%s - soreserve(%llx, %u, %u) error %d\n",
467 __func__
, (uint64_t)VM_KERNEL_ADDRPERM(so
),
468 sendbufsize
, recvbufsize
, error
);
473 socket_unlock(so
, 0);
474 error
= (*kctl
->connect
)(kctl
->kctlref
, &sa
, &kcb
->userdata
);
482 if (error
&& kctl
->disconnect
) {
484 * XXX Make sure we Don't check the return value
485 * of disconnect here.
486 * ipsec/utun_ctl_disconnect will return error when
487 * disconnect gets called after connect failure.
488 * However if we decide to check for disconnect return
489 * value here. Please make sure to revisit
490 * ipsec/utun_ctl_disconnect.
492 socket_unlock(so
, 0);
493 (*kctl
->disconnect
)(kctl
->kctlref
, kcb
->unit
, kcb
->userdata
);
498 soisdisconnected(so
);
499 lck_mtx_lock(ctl_mtx
);
502 TAILQ_REMOVE(&kctl
->kcb_head
, kcb
, next
);
503 kctlstat
.kcs_pcbcount
--;
504 kctlstat
.kcs_gencnt
++;
505 kctlstat
.kcs_conn_fail
++;
506 lck_mtx_unlock(ctl_mtx
);
512 ctl_disconnect(struct socket
*so
)
514 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
516 if ((kcb
= (struct ctl_cb
*)so
->so_pcb
)) {
517 struct kctl
*kctl
= kcb
->kctl
;
519 if (kctl
&& kctl
->disconnect
) {
520 socket_unlock(so
, 0);
521 (*kctl
->disconnect
)(kctl
->kctlref
, kcb
->unit
,
526 soisdisconnected(so
);
528 socket_unlock(so
, 0);
529 lck_mtx_lock(ctl_mtx
);
532 while (kcb
->usecount
!= 0) {
533 msleep(&kcb
->usecount
, ctl_mtx
, 0, "kcb->usecount", 0);
535 TAILQ_REMOVE(&kctl
->kcb_head
, kcb
, next
);
536 kctlstat
.kcs_pcbcount
--;
537 kctlstat
.kcs_gencnt
++;
538 lck_mtx_unlock(ctl_mtx
);
545 ctl_peeraddr(struct socket
*so
, struct sockaddr
**nam
)
547 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
549 struct sockaddr_ctl sc
;
551 if (kcb
== NULL
) /* sanity check */
554 if ((kctl
= kcb
->kctl
) == NULL
)
557 bzero(&sc
, sizeof(struct sockaddr_ctl
));
558 sc
.sc_len
= sizeof(struct sockaddr_ctl
);
559 sc
.sc_family
= AF_SYSTEM
;
560 sc
.ss_sysaddr
= AF_SYS_CONTROL
;
562 sc
.sc_unit
= kcb
->unit
;
564 *nam
= dup_sockaddr((struct sockaddr
*)&sc
, 1);
570 ctl_sbrcv_trim(struct socket
*so
)
572 struct sockbuf
*sb
= &so
->so_rcv
;
574 if (sb
->sb_hiwat
> sb
->sb_idealsize
) {
579 * The difference between the ideal size and the
580 * current size is the upper bound of the trimage
582 diff
= sb
->sb_hiwat
- sb
->sb_idealsize
;
584 * We cannot trim below the outstanding data
586 trim
= sb
->sb_hiwat
- sb
->sb_cc
;
588 trim
= imin(trim
, (int32_t)diff
);
591 sbreserve(sb
, (sb
->sb_hiwat
- trim
));
594 printf("%s - shrunk to %d\n",
595 __func__
, sb
->sb_hiwat
);
601 ctl_usr_rcvd(struct socket
*so
, int flags
)
603 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
606 if ((kctl
= kcb
->kctl
) == NULL
) {
611 socket_unlock(so
, 0);
612 (*kctl
->rcvd
)(kctl
->kctlref
, kcb
->unit
, kcb
->userdata
, flags
);
622 ctl_send(struct socket
*so
, int flags
, struct mbuf
*m
,
623 struct sockaddr
*addr
, struct mbuf
*control
,
626 #pragma unused(addr, p)
628 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
634 if (kcb
== NULL
) /* sanity check */
637 if (error
== 0 && (kctl
= kcb
->kctl
) == NULL
)
640 if (error
== 0 && kctl
->send
) {
641 so_tc_update_stats(m
, so
, m_get_service_class(m
));
642 socket_unlock(so
, 0);
643 error
= (*kctl
->send
)(kctl
->kctlref
, kcb
->unit
, kcb
->userdata
,
652 OSIncrementAtomic64((SInt64
*)&kctlstat
.kcs_send_fail
);
657 ctl_send_list(struct socket
*so
, int flags
, struct mbuf
*m
,
658 __unused
struct sockaddr
*addr
, struct mbuf
*control
,
659 __unused
struct proc
*p
)
662 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
666 m_freem_list(control
);
668 if (kcb
== NULL
) /* sanity check */
671 if (error
== 0 && (kctl
= kcb
->kctl
) == NULL
)
674 if (error
== 0 && kctl
->send_list
) {
677 for (nxt
= m
; nxt
!= NULL
; nxt
= nxt
->m_nextpkt
)
678 so_tc_update_stats(nxt
, so
, m_get_service_class(nxt
));
680 socket_unlock(so
, 0);
681 error
= (*kctl
->send_list
)(kctl
->kctlref
, kcb
->unit
,
682 kcb
->userdata
, m
, flags
);
684 } else if (error
== 0 && kctl
->send
) {
685 while (m
!= NULL
&& error
== 0) {
686 struct mbuf
*nextpkt
= m
->m_nextpkt
;
689 so_tc_update_stats(m
, so
, m_get_service_class(m
));
690 socket_unlock(so
, 0);
691 error
= (*kctl
->send
)(kctl
->kctlref
, kcb
->unit
,
692 kcb
->userdata
, m
, flags
);
704 OSIncrementAtomic64((SInt64
*)&kctlstat
.kcs_send_list_fail
);
709 ctl_rcvbspace(struct socket
*so
, u_int32_t datasize
,
710 u_int32_t kctlflags
, u_int32_t flags
)
712 struct sockbuf
*sb
= &so
->so_rcv
;
713 u_int32_t space
= sbspace(sb
);
716 if ((kctlflags
& CTL_FLAG_REG_CRIT
) == 0) {
717 if ((u_int32_t
) space
>= datasize
)
721 } else if ((flags
& CTL_DATA_CRIT
) == 0) {
723 * Reserve 25% for critical messages
725 if (space
< (sb
->sb_hiwat
>> 2) ||
731 u_int32_t autorcvbuf_max
;
734 * Allow overcommit of 25%
736 autorcvbuf_max
= min(sb
->sb_idealsize
+ (sb
->sb_idealsize
>> 2),
739 if ((u_int32_t
) space
>= datasize
) {
741 } else if (tcp_cansbgrow(sb
) &&
742 sb
->sb_hiwat
< autorcvbuf_max
) {
744 * Grow with a little bit of leeway
746 u_int32_t grow
= datasize
- space
+ MSIZE
;
749 min((sb
->sb_hiwat
+ grow
), autorcvbuf_max
)) == 1) {
751 if (sb
->sb_hiwat
> ctl_autorcvbuf_high
)
752 ctl_autorcvbuf_high
= sb
->sb_hiwat
;
757 if ((u_int32_t
) sbspace(sb
) >= datasize
) {
764 printf("%s - grown to %d error %d\n",
765 __func__
, sb
->sb_hiwat
, error
);
777 ctl_enqueuembuf(kern_ctl_ref kctlref
, u_int32_t unit
, struct mbuf
*m
,
782 int len
= m
->m_pkthdr
.len
;
785 so
= kcb_find_socket(kctlref
, unit
, &kctlflags
);
790 if (ctl_rcvbspace(so
, len
, kctlflags
, flags
) != 0) {
792 OSIncrementAtomic64((SInt64
*)&kctlstat
.kcs_enqueue_fullsock
);
795 if ((flags
& CTL_DATA_EOR
))
798 so_recv_data_stat(so
, m
, 0);
799 if (sbappend(&so
->so_rcv
, m
) != 0) {
800 if ((flags
& CTL_DATA_NOWAKEUP
) == 0)
804 OSIncrementAtomic64((SInt64
*)&kctlstat
.kcs_enqueue_fullsock
);
807 if (ctl_debug
&& error
!= 0 && (flags
& CTL_DATA_CRIT
))
808 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
809 __func__
, error
, len
,
810 so
->so_rcv
.sb_hiwat
, so
->so_rcv
.sb_cc
);
812 socket_unlock(so
, 1);
814 OSIncrementAtomic64((SInt64
*)&kctlstat
.kcs_enqueue_fail
);
820 * Compute space occupied by mbuf like sbappendrecord
823 m_space(struct mbuf
*m
)
828 for (nxt
= m
; nxt
!= NULL
; nxt
= nxt
->m_next
)
835 ctl_enqueuembuf_list(void *kctlref
, u_int32_t unit
, struct mbuf
*m_list
,
836 u_int32_t flags
, struct mbuf
**m_remain
)
838 struct socket
*so
= NULL
;
840 struct mbuf
*m
, *nextpkt
;
846 * Need to point the beginning of the list in case of early exit
851 * kcb_find_socket takes the socket lock with a reference
853 so
= kcb_find_socket(kctlref
, unit
, &kctlflags
);
859 if (kctlflags
& CTL_FLAG_REG_SOCK_STREAM
) {
863 if (flags
& CTL_DATA_EOR
) {
868 for (m
= m_list
; m
!= NULL
; m
= nextpkt
) {
869 nextpkt
= m
->m_nextpkt
;
871 if (m
->m_pkthdr
.len
== 0 && ctl_debug
)
872 printf("%s: %llx m_pkthdr.len is 0",
873 __func__
, (uint64_t)VM_KERNEL_ADDRPERM(m
));
876 * The mbuf is either appended or freed by sbappendrecord()
877 * so it's not reliable from a data standpoint
880 if (ctl_rcvbspace(so
, len
, kctlflags
, flags
) != 0) {
883 (SInt64
*)&kctlstat
.kcs_enqueue_fullsock
);
887 * Unlink from the list, m is on its own
890 so_recv_data_stat(so
, m
, 0);
891 if (sbappendrecord(&so
->so_rcv
, m
) != 0) {
895 * We free or return the remaining
901 (SInt64
*)&kctlstat
.kcs_enqueue_fullsock
);
906 if (needwakeup
&& (flags
& CTL_DATA_NOWAKEUP
) == 0)
911 if (ctl_debug
&& error
!= 0 && (flags
& CTL_DATA_CRIT
))
912 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
913 __func__
, error
, len
,
914 so
->so_rcv
.sb_hiwat
, so
->so_rcv
.sb_cc
);
916 socket_unlock(so
, 1);
921 if (m
!= NULL
&& socket_debug
&& so
!= NULL
&&
922 (so
->so_options
& SO_DEBUG
)) {
925 printf("%s m_list %llx\n", __func__
,
926 (uint64_t) VM_KERNEL_ADDRPERM(m_list
));
927 for (n
= m
; n
!= NULL
; n
= n
->m_nextpkt
)
928 printf(" remain %llx m_next %llx\n",
929 (uint64_t) VM_KERNEL_ADDRPERM(n
),
930 (uint64_t) VM_KERNEL_ADDRPERM(n
->m_next
));
937 OSIncrementAtomic64((SInt64
*)&kctlstat
.kcs_enqueue_fail
);
942 ctl_enqueuedata(void *kctlref
, u_int32_t unit
, void *data
, size_t len
,
948 unsigned int num_needed
;
953 so
= kcb_find_socket(kctlref
, unit
, &kctlflags
);
958 if (ctl_rcvbspace(so
, len
, kctlflags
, flags
) != 0) {
960 OSIncrementAtomic64((SInt64
*)&kctlstat
.kcs_enqueue_fullsock
);
965 m
= m_allocpacket_internal(&num_needed
, len
, NULL
, M_NOWAIT
, 1, 0);
967 kctlstat
.kcs_enqdata_mb_alloc_fail
++;
969 printf("%s: m_allocpacket_internal(%lu) failed\n",
975 for (n
= m
; n
!= NULL
; n
= n
->m_next
) {
976 size_t mlen
= mbuf_maxlen(n
);
978 if (mlen
+ curlen
> len
)
981 bcopy((char *)data
+ curlen
, n
->m_data
, mlen
);
984 mbuf_pkthdr_setlen(m
, curlen
);
986 if ((flags
& CTL_DATA_EOR
))
988 so_recv_data_stat(so
, m
, 0);
989 if (sbappend(&so
->so_rcv
, m
) != 0) {
990 if ((flags
& CTL_DATA_NOWAKEUP
) == 0)
993 kctlstat
.kcs_enqdata_sbappend_fail
++;
995 OSIncrementAtomic64((SInt64
*)&kctlstat
.kcs_enqueue_fullsock
);
999 if (ctl_debug
&& error
!= 0 && (flags
& CTL_DATA_CRIT
))
1000 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1001 __func__
, error
, (int)len
,
1002 so
->so_rcv
.sb_hiwat
, so
->so_rcv
.sb_cc
);
1004 socket_unlock(so
, 1);
1006 OSIncrementAtomic64((SInt64
*)&kctlstat
.kcs_enqueue_fail
);
1011 ctl_getenqueuepacketcount(kern_ctl_ref kctlref
, u_int32_t unit
, u_int32_t
*pcnt
)
1020 so
= kcb_find_socket(kctlref
, unit
, NULL
);
1026 m1
= so
->so_rcv
.sb_mb
;
1027 while (m1
!= NULL
) {
1028 if (m1
->m_type
== MT_DATA
||
1029 m1
->m_type
== MT_HEADER
||
1030 m1
->m_type
== MT_OOBDATA
)
1036 socket_unlock(so
, 1);
1042 ctl_getenqueuespace(kern_ctl_ref kctlref
, u_int32_t unit
, size_t *space
)
1050 so
= kcb_find_socket(kctlref
, unit
, NULL
);
1055 avail
= sbspace(&so
->so_rcv
);
1056 *space
= (avail
< 0) ? 0 : avail
;
1057 socket_unlock(so
, 1);
1063 ctl_getenqueuereadable(kern_ctl_ref kctlref
, u_int32_t unit
,
1064 u_int32_t
*difference
)
1068 if (difference
== NULL
)
1071 so
= kcb_find_socket(kctlref
, unit
, NULL
);
1076 if (so
->so_rcv
.sb_cc
>= so
->so_rcv
.sb_lowat
) {
1079 *difference
= (so
->so_rcv
.sb_lowat
- so
->so_rcv
.sb_cc
);
1081 socket_unlock(so
, 1);
1087 ctl_ctloutput(struct socket
*so
, struct sockopt
*sopt
)
1089 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
1095 if (sopt
->sopt_level
!= SYSPROTO_CONTROL
) {
1099 if (kcb
== NULL
) /* sanity check */
1102 if ((kctl
= kcb
->kctl
) == NULL
)
1105 switch (sopt
->sopt_dir
) {
1107 if (kctl
->setopt
== NULL
)
1109 if (sopt
->sopt_valsize
== 0) {
1112 MALLOC(data
, void *, sopt
->sopt_valsize
, M_TEMP
,
1116 error
= sooptcopyin(sopt
, data
,
1117 sopt
->sopt_valsize
, sopt
->sopt_valsize
);
1120 socket_unlock(so
, 0);
1121 error
= (*kctl
->setopt
)(kctl
->kctlref
,
1122 kcb
->unit
, kcb
->userdata
, sopt
->sopt_name
,
1123 data
, sopt
->sopt_valsize
);
1130 if (kctl
->getopt
== NULL
)
1133 if (sopt
->sopt_valsize
&& sopt
->sopt_val
) {
1134 MALLOC(data
, void *, sopt
->sopt_valsize
, M_TEMP
,
1139 * 4108337 - copy user data in case the
1140 * kernel control needs it
1142 error
= sooptcopyin(sopt
, data
,
1143 sopt
->sopt_valsize
, sopt
->sopt_valsize
);
1145 len
= sopt
->sopt_valsize
;
1146 socket_unlock(so
, 0);
1147 error
= (*kctl
->getopt
)(kctl
->kctlref
, kcb
->unit
,
1148 kcb
->userdata
, sopt
->sopt_name
,
1150 if (data
!= NULL
&& len
> sopt
->sopt_valsize
)
1151 panic_plain("ctl_ctloutput: ctl %s returned "
1152 "len (%lu) > sopt_valsize (%lu)\n",
1153 kcb
->kctl
->name
, len
,
1154 sopt
->sopt_valsize
);
1158 error
= sooptcopyout(sopt
, data
, len
);
1160 sopt
->sopt_valsize
= len
;
1170 ctl_ioctl(struct socket
*so
, u_long cmd
, caddr_t data
,
1171 struct ifnet
*ifp
, struct proc
*p
)
1173 #pragma unused(so, ifp, p)
1174 int error
= ENOTSUP
;
1177 /* get the number of controllers */
1178 case CTLIOCGCOUNT
: {
1182 lck_mtx_lock(ctl_mtx
);
1183 TAILQ_FOREACH(kctl
, &ctl_head
, next
)
1185 lck_mtx_unlock(ctl_mtx
);
1187 bcopy(&n
, data
, sizeof (n
));
1192 struct ctl_info ctl_info
;
1193 struct kctl
*kctl
= 0;
1196 bcopy(data
, &ctl_info
, sizeof (ctl_info
));
1197 name_len
= strnlen(ctl_info
.ctl_name
, MAX_KCTL_NAME
);
1199 if (name_len
== 0 || name_len
+ 1 > MAX_KCTL_NAME
) {
1203 lck_mtx_lock(ctl_mtx
);
1204 kctl
= ctl_find_by_name(ctl_info
.ctl_name
);
1205 lck_mtx_unlock(ctl_mtx
);
1210 ctl_info
.ctl_id
= kctl
->id
;
1211 bcopy(&ctl_info
, data
, sizeof (ctl_info
));
1216 /* add controls to get list of NKEs */
1226 struct kctl
**new_table
;
1229 lck_mtx_assert(ctl_mtx
, LCK_MTX_ASSERT_OWNED
);
1231 if (kctl_tbl_growing
) {
1232 /* Another thread is allocating */
1233 kctl_tbl_growing_waiting
++;
1236 (void) msleep((caddr_t
) &kctl_tbl_growing
, ctl_mtx
,
1237 PSOCK
| PCATCH
, "kctl_tbl_growing", 0);
1238 } while (kctl_tbl_growing
);
1239 kctl_tbl_growing_waiting
--;
1241 /* Another thread grew the table */
1242 if (kctl_table
!= NULL
&& kctl_tbl_count
< kctl_tbl_size
)
1245 /* Verify we have a sane size */
1246 if (kctl_tbl_size
+ KCTL_TBL_INC
>= UINT16_MAX
) {
1247 kctlstat
.kcs_tbl_size_too_big
++;
1249 printf("%s kctl_tbl_size %lu too big\n",
1250 __func__
, kctl_tbl_size
);
1253 kctl_tbl_growing
= 1;
1255 new_size
= kctl_tbl_size
+ KCTL_TBL_INC
;
1257 lck_mtx_unlock(ctl_mtx
);
1258 new_table
= _MALLOC(sizeof(struct kctl
*) * new_size
,
1259 M_TEMP
, M_WAIT
| M_ZERO
);
1260 lck_mtx_lock(ctl_mtx
);
1262 if (new_table
!= NULL
) {
1263 if (kctl_table
!= NULL
) {
1264 bcopy(kctl_table
, new_table
,
1265 kctl_tbl_size
* sizeof(struct kctl
*));
1267 _FREE(kctl_table
, M_TEMP
);
1269 kctl_table
= new_table
;
1270 kctl_tbl_size
= new_size
;
1273 kctl_tbl_growing
= 0;
1275 if (kctl_tbl_growing_waiting
) {
1276 wakeup(&kctl_tbl_growing
);
1280 #define KCTLREF_INDEX_MASK 0x0000FFFF
1281 #define KCTLREF_GENCNT_MASK 0xFFFF0000
1282 #define KCTLREF_GENCNT_SHIFT 16
1285 kctl_make_ref(struct kctl
*kctl
)
1289 lck_mtx_assert(ctl_mtx
, LCK_MTX_ASSERT_OWNED
);
1291 if (kctl_tbl_count
>= kctl_tbl_size
)
1294 kctl
->kctlref
= NULL
;
1295 for (i
= 0; i
< kctl_tbl_size
; i
++) {
1296 if (kctl_table
[i
] == NULL
) {
1300 * Reference is index plus one
1302 kctl_ref_gencnt
+= 1;
1305 * Add generation count as salt to reference to prevent
1306 * use after deregister
1308 ref
= ((kctl_ref_gencnt
<< KCTLREF_GENCNT_SHIFT
) &
1309 KCTLREF_GENCNT_MASK
) +
1310 ((i
+ 1) & KCTLREF_INDEX_MASK
);
1312 kctl
->kctlref
= (void *)(ref
);
1313 kctl_table
[i
] = kctl
;
1319 if (kctl
->kctlref
== NULL
)
1320 panic("%s no space in table", __func__
);
1323 printf("%s %p for %p\n",
1324 __func__
, kctl
->kctlref
, kctl
);
1326 return (kctl
->kctlref
);
1330 kctl_delete_ref(kern_ctl_ref kctlref
)
1333 * Reference is index plus one
1335 uintptr_t i
= (((uintptr_t)kctlref
) & KCTLREF_INDEX_MASK
) - 1;
1337 lck_mtx_assert(ctl_mtx
, LCK_MTX_ASSERT_OWNED
);
1339 if (i
< kctl_tbl_size
) {
1340 struct kctl
*kctl
= kctl_table
[i
];
1342 if (kctl
->kctlref
== kctlref
) {
1343 kctl_table
[i
] = NULL
;
1346 kctlstat
.kcs_bad_kctlref
++;
1349 kctlstat
.kcs_bad_kctlref
++;
1353 static struct kctl
*
1354 kctl_from_ref(kern_ctl_ref kctlref
)
1357 * Reference is index plus one
1359 uintptr_t i
= (((uintptr_t)kctlref
) & KCTLREF_INDEX_MASK
) - 1;
1360 struct kctl
*kctl
= NULL
;
1362 lck_mtx_assert(ctl_mtx
, LCK_MTX_ASSERT_OWNED
);
1364 if (i
>= kctl_tbl_size
) {
1365 kctlstat
.kcs_bad_kctlref
++;
1368 kctl
= kctl_table
[i
];
1369 if (kctl
->kctlref
!= kctlref
) {
1370 kctlstat
.kcs_bad_kctlref
++;
1377 * Register/unregister a NKE
1380 ctl_register(struct kern_ctl_reg
*userkctl
, kern_ctl_ref
*kctlref
)
1382 struct kctl
*kctl
= NULL
;
1383 struct kctl
*kctl_next
= NULL
;
1386 int is_extended
= 0;
1388 if (userkctl
== NULL
) /* sanity check */
1390 if (userkctl
->ctl_connect
== NULL
)
1392 name_len
= strlen(userkctl
->ctl_name
);
1393 if (name_len
== 0 || name_len
+ 1 > MAX_KCTL_NAME
)
1396 MALLOC(kctl
, struct kctl
*, sizeof(*kctl
), M_TEMP
, M_WAITOK
);
1399 bzero((char *)kctl
, sizeof(*kctl
));
1401 lck_mtx_lock(ctl_mtx
);
1403 if (kctl_make_ref(kctl
) == NULL
) {
1404 lck_mtx_unlock(ctl_mtx
);
1410 * Kernel Control IDs
1412 * CTL_FLAG_REG_ID_UNIT indicates the control ID and unit number are
1413 * static. If they do not exist, add them to the list in order. If the
1414 * flag is not set, we must find a new unique value. We assume the
1415 * list is in order. We find the last item in the list and add one. If
1416 * this leads to wrapping the id around, we start at the front of the
1417 * list and look for a gap.
1420 if ((userkctl
->ctl_flags
& CTL_FLAG_REG_ID_UNIT
) == 0) {
1421 /* Must dynamically assign an unused ID */
1423 /* Verify the same name isn't already registered */
1424 if (ctl_find_by_name(userkctl
->ctl_name
) != NULL
) {
1425 kctl_delete_ref(kctl
->kctlref
);
1426 lck_mtx_unlock(ctl_mtx
);
1431 /* Start with 1 in case the list is empty */
1433 kctl_next
= TAILQ_LAST(&ctl_head
, kctl_list
);
1435 if (kctl_next
!= NULL
) {
1436 /* List was not empty, add one to the last item */
1437 id
= kctl_next
->id
+ 1;
1441 * If this wrapped the id number, start looking at
1442 * the front of the list for an unused id.
1445 /* Find the next unused ID */
1448 TAILQ_FOREACH(kctl_next
, &ctl_head
, next
) {
1449 if (kctl_next
->id
> id
) {
1450 /* We found a gap */
1454 id
= kctl_next
->id
+ 1;
1459 userkctl
->ctl_id
= id
;
1461 kctl
->reg_unit
= -1;
1463 TAILQ_FOREACH(kctl_next
, &ctl_head
, next
) {
1464 if (kctl_next
->id
> userkctl
->ctl_id
)
1468 if (ctl_find_by_id_unit(userkctl
->ctl_id
, userkctl
->ctl_unit
)) {
1469 kctl_delete_ref(kctl
->kctlref
);
1470 lck_mtx_unlock(ctl_mtx
);
1474 kctl
->id
= userkctl
->ctl_id
;
1475 kctl
->reg_unit
= userkctl
->ctl_unit
;
1478 is_extended
= (userkctl
->ctl_flags
& CTL_FLAG_REG_EXTENDED
);
1480 strlcpy(kctl
->name
, userkctl
->ctl_name
, MAX_KCTL_NAME
);
1481 kctl
->flags
= userkctl
->ctl_flags
;
1484 * Let the caller know the default send and receive sizes
1486 if (userkctl
->ctl_sendsize
== 0) {
1487 kctl
->sendbufsize
= CTL_SENDSIZE
;
1488 userkctl
->ctl_sendsize
= kctl
->sendbufsize
;
1490 kctl
->sendbufsize
= userkctl
->ctl_sendsize
;
1492 if (userkctl
->ctl_recvsize
== 0) {
1493 kctl
->recvbufsize
= CTL_RECVSIZE
;
1494 userkctl
->ctl_recvsize
= kctl
->recvbufsize
;
1496 kctl
->recvbufsize
= userkctl
->ctl_recvsize
;
1499 kctl
->connect
= userkctl
->ctl_connect
;
1500 kctl
->disconnect
= userkctl
->ctl_disconnect
;
1501 kctl
->send
= userkctl
->ctl_send
;
1502 kctl
->setopt
= userkctl
->ctl_setopt
;
1503 kctl
->getopt
= userkctl
->ctl_getopt
;
1505 kctl
->rcvd
= userkctl
->ctl_rcvd
;
1506 kctl
->send_list
= userkctl
->ctl_send_list
;
1509 TAILQ_INIT(&kctl
->kcb_head
);
1512 TAILQ_INSERT_BEFORE(kctl_next
, kctl
, next
);
1514 TAILQ_INSERT_TAIL(&ctl_head
, kctl
, next
);
1516 kctlstat
.kcs_reg_count
++;
1517 kctlstat
.kcs_gencnt
++;
1519 lck_mtx_unlock(ctl_mtx
);
1521 *kctlref
= kctl
->kctlref
;
1523 ctl_post_msg(KEV_CTL_REGISTERED
, kctl
->id
);
1528 ctl_deregister(void *kctlref
)
1532 lck_mtx_lock(ctl_mtx
);
1533 if ((kctl
= kctl_from_ref(kctlref
)) == NULL
) {
1534 kctlstat
.kcs_bad_kctlref
++;
1535 lck_mtx_unlock(ctl_mtx
);
1537 printf("%s invalid kctlref %p\n",
1542 if (!TAILQ_EMPTY(&kctl
->kcb_head
)) {
1543 lck_mtx_unlock(ctl_mtx
);
1547 TAILQ_REMOVE(&ctl_head
, kctl
, next
);
1549 kctlstat
.kcs_reg_count
--;
1550 kctlstat
.kcs_gencnt
++;
1552 kctl_delete_ref(kctl
->kctlref
);
1553 lck_mtx_unlock(ctl_mtx
);
1555 ctl_post_msg(KEV_CTL_DEREGISTERED
, kctl
->id
);
1561 * Must be called with global ctl_mtx lock taked
1563 static struct kctl
*
1564 ctl_find_by_name(const char *name
)
1568 lck_mtx_assert(ctl_mtx
, LCK_MTX_ASSERT_OWNED
);
1570 TAILQ_FOREACH(kctl
, &ctl_head
, next
)
1571 if (strncmp(kctl
->name
, name
, sizeof(kctl
->name
)) == 0)
1578 ctl_id_by_name(const char *name
)
1580 u_int32_t ctl_id
= 0;
1583 lck_mtx_lock(ctl_mtx
);
1584 kctl
= ctl_find_by_name(name
);
1587 lck_mtx_unlock(ctl_mtx
);
1593 ctl_name_by_id(u_int32_t id
, char *out_name
, size_t maxsize
)
1598 lck_mtx_lock(ctl_mtx
);
1599 TAILQ_FOREACH(kctl
, &ctl_head
, next
) {
1605 if (maxsize
> MAX_KCTL_NAME
)
1606 maxsize
= MAX_KCTL_NAME
;
1607 strlcpy(out_name
, kctl
->name
, maxsize
);
1610 lck_mtx_unlock(ctl_mtx
);
1612 return (found
? 0 : ENOENT
);
1616 * Must be called with global ctl_mtx lock taked
1619 static struct kctl
*
1620 ctl_find_by_id_unit(u_int32_t id
, u_int32_t unit
)
1624 lck_mtx_assert(ctl_mtx
, LCK_MTX_ASSERT_OWNED
);
1626 TAILQ_FOREACH(kctl
, &ctl_head
, next
) {
1627 if (kctl
->id
== id
&& (kctl
->flags
& CTL_FLAG_REG_ID_UNIT
) == 0)
1629 else if (kctl
->id
== id
&& kctl
->reg_unit
== unit
)
1636 * Must be called with kernel controller lock taken
1638 static struct ctl_cb
*
1639 kcb_find(struct kctl
*kctl
, u_int32_t unit
)
1643 lck_mtx_assert(ctl_mtx
, LCK_MTX_ASSERT_OWNED
);
1645 TAILQ_FOREACH(kcb
, &kctl
->kcb_head
, next
)
1646 if (kcb
->unit
== unit
)
1652 static struct socket
*
1653 kcb_find_socket(kern_ctl_ref kctlref
, u_int32_t unit
, u_int32_t
*kctlflags
)
1655 struct socket
*so
= NULL
;
1661 lr_saved
= __builtin_return_address(0);
1663 lck_mtx_lock(ctl_mtx
);
1665 * First validate the kctlref
1667 if ((kctl
= kctl_from_ref(kctlref
)) == NULL
) {
1668 kctlstat
.kcs_bad_kctlref
++;
1669 lck_mtx_unlock(ctl_mtx
);
1671 printf("%s invalid kctlref %p\n",
1676 kcb
= kcb_find(kctl
, unit
);
1677 if (kcb
== NULL
|| kcb
->kctl
!= kctl
|| (so
= kcb
->so
) == NULL
) {
1678 lck_mtx_unlock(ctl_mtx
);
1682 * This prevents the socket from being closed
1686 * Respect lock ordering: socket before ctl_mtx
1688 lck_mtx_unlock(ctl_mtx
);
1692 * The socket lock history is more useful if we store
1693 * the address of the caller.
1695 i
= (so
->next_lock_lr
+ SO_LCKDBG_MAX
- 1) % SO_LCKDBG_MAX
;
1696 so
->lock_lr
[i
] = lr_saved
;
1698 lck_mtx_lock(ctl_mtx
);
1700 if ((kctl
= kctl_from_ref(kctlref
)) == NULL
|| kcb
->kctl
== NULL
) {
1701 lck_mtx_unlock(ctl_mtx
);
1702 socket_unlock(so
, 1);
1704 lck_mtx_lock(ctl_mtx
);
1705 } else if (kctlflags
!= NULL
) {
1706 *kctlflags
= kctl
->flags
;
1710 if (kcb
->usecount
== 0)
1711 wakeup((event_t
)&kcb
->usecount
);
1713 lck_mtx_unlock(ctl_mtx
);
1719 ctl_post_msg(u_int32_t event_code
, u_int32_t id
)
1721 struct ctl_event_data ctl_ev_data
;
1722 struct kev_msg ev_msg
;
1724 lck_mtx_assert(ctl_mtx
, LCK_MTX_ASSERT_NOTOWNED
);
1726 bzero(&ev_msg
, sizeof(struct kev_msg
));
1727 ev_msg
.vendor_code
= KEV_VENDOR_APPLE
;
1729 ev_msg
.kev_class
= KEV_SYSTEM_CLASS
;
1730 ev_msg
.kev_subclass
= KEV_CTL_SUBCLASS
;
1731 ev_msg
.event_code
= event_code
;
1733 /* common nke subclass data */
1734 bzero(&ctl_ev_data
, sizeof(ctl_ev_data
));
1735 ctl_ev_data
.ctl_id
= id
;
1736 ev_msg
.dv
[0].data_ptr
= &ctl_ev_data
;
1737 ev_msg
.dv
[0].data_length
= sizeof(ctl_ev_data
);
1739 ev_msg
.dv
[1].data_length
= 0;
1741 kev_post_msg(&ev_msg
);
1745 ctl_lock(struct socket
*so
, int refcount
, void *lr
)
1750 lr_saved
= __builtin_return_address(0);
1754 if (so
->so_pcb
!= NULL
) {
1755 lck_mtx_lock(((struct ctl_cb
*)so
->so_pcb
)->mtx
);
1757 panic("ctl_lock: so=%p NO PCB! lr=%p lrh= %s\n",
1758 so
, lr_saved
, solockhistory_nr(so
));
1762 if (so
->so_usecount
< 0) {
1763 panic("ctl_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s\n",
1764 so
, so
->so_pcb
, lr_saved
, so
->so_usecount
,
1765 solockhistory_nr(so
));
1772 so
->lock_lr
[so
->next_lock_lr
] = lr_saved
;
1773 so
->next_lock_lr
= (so
->next_lock_lr
+1) % SO_LCKDBG_MAX
;
1778 ctl_unlock(struct socket
*so
, int refcount
, void *lr
)
1781 lck_mtx_t
*mutex_held
;
1784 lr_saved
= __builtin_return_address(0);
1788 #if (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG))
1789 printf("ctl_unlock: so=%llx sopcb=%x lock=%llx ref=%u lr=%llx\n",
1790 (uint64_t)VM_KERNEL_ADDRPERM(so
),
1791 (uint64_t)VM_KERNEL_ADDRPERM(so
->so_pcb
,
1792 (uint64_t)VM_KERNEL_ADDRPERM(((struct ctl_cb
*)so
->so_pcb
)->mtx
),
1793 so
->so_usecount
, (uint64_t)VM_KERNEL_ADDRPERM(lr_saved
));
1794 #endif /* (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG)) */
1798 if (so
->so_usecount
< 0) {
1799 panic("ctl_unlock: so=%p usecount=%x lrh= %s\n",
1800 so
, so
->so_usecount
, solockhistory_nr(so
));
1803 if (so
->so_pcb
== NULL
) {
1804 panic("ctl_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s\n",
1805 so
, so
->so_usecount
, (void *)lr_saved
,
1806 solockhistory_nr(so
));
1809 mutex_held
= ((struct ctl_cb
*)so
->so_pcb
)->mtx
;
1811 lck_mtx_assert(mutex_held
, LCK_MTX_ASSERT_OWNED
);
1812 so
->unlock_lr
[so
->next_unlock_lr
] = lr_saved
;
1813 so
->next_unlock_lr
= (so
->next_unlock_lr
+1) % SO_LCKDBG_MAX
;
1814 lck_mtx_unlock(mutex_held
);
1816 if (so
->so_usecount
== 0)
1817 ctl_sofreelastref(so
);
1823 ctl_getlock(struct socket
*so
, int locktype
)
1825 #pragma unused(locktype)
1826 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
1829 if (so
->so_usecount
< 0)
1830 panic("ctl_getlock: so=%p usecount=%x lrh= %s\n",
1831 so
, so
->so_usecount
, solockhistory_nr(so
));
1834 panic("ctl_getlock: so=%p NULL NO so_pcb %s\n",
1835 so
, solockhistory_nr(so
));
1836 return (so
->so_proto
->pr_domain
->dom_mtx
);
1840 __private_extern__
int
1841 kctl_reg_list SYSCTL_HANDLER_ARGS
1843 #pragma unused(oidp, arg1, arg2)
1846 struct xsystmgen xsg
;
1849 size_t item_size
= ROUNDUP64(sizeof (struct xkctl_reg
));
1851 buf
= _MALLOC(item_size
, M_TEMP
, M_WAITOK
| M_ZERO
);
1855 lck_mtx_lock(ctl_mtx
);
1857 n
= kctlstat
.kcs_reg_count
;
1859 if (req
->oldptr
== USER_ADDR_NULL
) {
1860 req
->oldidx
= (n
+ n
/8) * sizeof(struct xkctl_reg
);
1863 if (req
->newptr
!= USER_ADDR_NULL
) {
1867 bzero(&xsg
, sizeof (xsg
));
1868 xsg
.xg_len
= sizeof (xsg
);
1870 xsg
.xg_gen
= kctlstat
.kcs_gencnt
;
1871 xsg
.xg_sogen
= so_gencnt
;
1872 error
= SYSCTL_OUT(req
, &xsg
, sizeof (xsg
));
1877 * We are done if there is no pcb
1884 for (i
= 0, kctl
= TAILQ_FIRST(&ctl_head
);
1885 i
< n
&& kctl
!= NULL
;
1886 i
++, kctl
= TAILQ_NEXT(kctl
, next
)) {
1887 struct xkctl_reg
*xkr
= (struct xkctl_reg
*)buf
;
1889 u_int32_t pcbcount
= 0;
1891 TAILQ_FOREACH(kcb
, &kctl
->kcb_head
, next
)
1894 bzero(buf
, item_size
);
1896 xkr
->xkr_len
= sizeof(struct xkctl_reg
);
1897 xkr
->xkr_kind
= XSO_KCREG
;
1898 xkr
->xkr_id
= kctl
->id
;
1899 xkr
->xkr_reg_unit
= kctl
->reg_unit
;
1900 xkr
->xkr_flags
= kctl
->flags
;
1901 xkr
->xkr_kctlref
= (uint64_t)(kctl
->kctlref
);
1902 xkr
->xkr_recvbufsize
= kctl
->recvbufsize
;
1903 xkr
->xkr_sendbufsize
= kctl
->sendbufsize
;
1904 xkr
->xkr_lastunit
= kctl
->lastunit
;
1905 xkr
->xkr_pcbcount
= pcbcount
;
1906 xkr
->xkr_connect
= (uint64_t)VM_KERNEL_ADDRPERM(kctl
->connect
);
1907 xkr
->xkr_disconnect
=
1908 (uint64_t)VM_KERNEL_ADDRPERM(kctl
->disconnect
);
1909 xkr
->xkr_send
= (uint64_t)VM_KERNEL_ADDRPERM(kctl
->send
);
1910 xkr
->xkr_send_list
=
1911 (uint64_t)VM_KERNEL_ADDRPERM(kctl
->send_list
);
1912 xkr
->xkr_setopt
= (uint64_t)VM_KERNEL_ADDRPERM(kctl
->setopt
);
1913 xkr
->xkr_getopt
= (uint64_t)VM_KERNEL_ADDRPERM(kctl
->getopt
);
1914 xkr
->xkr_rcvd
= (uint64_t)VM_KERNEL_ADDRPERM(kctl
->rcvd
);
1915 strlcpy(xkr
->xkr_name
, kctl
->name
, sizeof(xkr
->xkr_name
));
1917 error
= SYSCTL_OUT(req
, buf
, item_size
);
1922 * Give the user an updated idea of our state.
1923 * If the generation differs from what we told
1924 * her before, she knows that something happened
1925 * while we were processing this request, and it
1926 * might be necessary to retry.
1928 bzero(&xsg
, sizeof (xsg
));
1929 xsg
.xg_len
= sizeof (xsg
);
1931 xsg
.xg_gen
= kctlstat
.kcs_gencnt
;
1932 xsg
.xg_sogen
= so_gencnt
;
1933 error
= SYSCTL_OUT(req
, &xsg
, sizeof (xsg
));
1940 lck_mtx_unlock(ctl_mtx
);
1948 __private_extern__
int
1949 kctl_pcblist SYSCTL_HANDLER_ARGS
1951 #pragma unused(oidp, arg1, arg2)
1954 struct xsystmgen xsg
;
1957 size_t item_size
= ROUNDUP64(sizeof (struct xkctlpcb
)) +
1958 ROUNDUP64(sizeof (struct xsocket_n
)) +
1959 2 * ROUNDUP64(sizeof (struct xsockbuf_n
)) +
1960 ROUNDUP64(sizeof (struct xsockstat_n
));
1962 buf
= _MALLOC(item_size
, M_TEMP
, M_WAITOK
| M_ZERO
);
1966 lck_mtx_lock(ctl_mtx
);
1968 n
= kctlstat
.kcs_pcbcount
;
1970 if (req
->oldptr
== USER_ADDR_NULL
) {
1971 req
->oldidx
= (n
+ n
/8) * item_size
;
1974 if (req
->newptr
!= USER_ADDR_NULL
) {
1978 bzero(&xsg
, sizeof (xsg
));
1979 xsg
.xg_len
= sizeof (xsg
);
1981 xsg
.xg_gen
= kctlstat
.kcs_gencnt
;
1982 xsg
.xg_sogen
= so_gencnt
;
1983 error
= SYSCTL_OUT(req
, &xsg
, sizeof (xsg
));
1988 * We are done if there is no pcb
1995 for (i
= 0, kctl
= TAILQ_FIRST(&ctl_head
);
1996 i
< n
&& kctl
!= NULL
;
1997 kctl
= TAILQ_NEXT(kctl
, next
)) {
2000 for (kcb
= TAILQ_FIRST(&kctl
->kcb_head
);
2001 i
< n
&& kcb
!= NULL
;
2002 i
++, kcb
= TAILQ_NEXT(kcb
, next
)) {
2003 struct xkctlpcb
*xk
= (struct xkctlpcb
*)buf
;
2004 struct xsocket_n
*xso
= (struct xsocket_n
*)
2005 ADVANCE64(xk
, sizeof (*xk
));
2006 struct xsockbuf_n
*xsbrcv
= (struct xsockbuf_n
*)
2007 ADVANCE64(xso
, sizeof (*xso
));
2008 struct xsockbuf_n
*xsbsnd
= (struct xsockbuf_n
*)
2009 ADVANCE64(xsbrcv
, sizeof (*xsbrcv
));
2010 struct xsockstat_n
*xsostats
= (struct xsockstat_n
*)
2011 ADVANCE64(xsbsnd
, sizeof (*xsbsnd
));
2013 bzero(buf
, item_size
);
2015 xk
->xkp_len
= sizeof(struct xkctlpcb
);
2016 xk
->xkp_kind
= XSO_KCB
;
2017 xk
->xkp_unit
= kcb
->unit
;
2018 xk
->xkp_kctpcb
= (uint64_t)VM_KERNEL_ADDRPERM(kcb
);
2019 xk
->xkp_kctlref
= (uint64_t)VM_KERNEL_ADDRPERM(kctl
);
2020 xk
->xkp_kctlid
= kctl
->id
;
2021 strlcpy(xk
->xkp_kctlname
, kctl
->name
,
2022 sizeof(xk
->xkp_kctlname
));
2024 sotoxsocket_n(kcb
->so
, xso
);
2025 sbtoxsockbuf_n(kcb
->so
?
2026 &kcb
->so
->so_rcv
: NULL
, xsbrcv
);
2027 sbtoxsockbuf_n(kcb
->so
?
2028 &kcb
->so
->so_snd
: NULL
, xsbsnd
);
2029 sbtoxsockstat_n(kcb
->so
, xsostats
);
2031 error
= SYSCTL_OUT(req
, buf
, item_size
);
2037 * Give the user an updated idea of our state.
2038 * If the generation differs from what we told
2039 * her before, she knows that something happened
2040 * while we were processing this request, and it
2041 * might be necessary to retry.
2043 bzero(&xsg
, sizeof (xsg
));
2044 xsg
.xg_len
= sizeof (xsg
);
2046 xsg
.xg_gen
= kctlstat
.kcs_gencnt
;
2047 xsg
.xg_sogen
= so_gencnt
;
2048 error
= SYSCTL_OUT(req
, &xsg
, sizeof (xsg
));
2055 lck_mtx_unlock(ctl_mtx
);
2061 kctl_getstat SYSCTL_HANDLER_ARGS
2063 #pragma unused(oidp, arg1, arg2)
2066 lck_mtx_lock(ctl_mtx
);
2068 if (req
->newptr
!= USER_ADDR_NULL
) {
2072 if (req
->oldptr
== USER_ADDR_NULL
) {
2073 req
->oldidx
= sizeof(struct kctlstat
);
2077 error
= SYSCTL_OUT(req
, &kctlstat
,
2078 MIN(sizeof(struct kctlstat
), req
->oldlen
));
2080 lck_mtx_unlock(ctl_mtx
);
2085 kctl_fill_socketinfo(struct socket
*so
, struct socket_info
*si
)
2087 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
2088 struct kern_ctl_info
*kcsi
=
2089 &si
->soi_proto
.pri_kern_ctl
;
2090 struct kctl
*kctl
= kcb
->kctl
;
2092 si
->soi_kind
= SOCKINFO_KERN_CTL
;
2097 kcsi
->kcsi_id
= kctl
->id
;
2098 kcsi
->kcsi_reg_unit
= kctl
->reg_unit
;
2099 kcsi
->kcsi_flags
= kctl
->flags
;
2100 kcsi
->kcsi_recvbufsize
= kctl
->recvbufsize
;
2101 kcsi
->kcsi_sendbufsize
= kctl
->sendbufsize
;
2102 kcsi
->kcsi_unit
= kcb
->unit
;
2103 strlcpy(kcsi
->kcsi_name
, kctl
->name
, MAX_KCTL_NAME
);