2 * Copyright (c) 1999-2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 * Kernel Control domain - allows control connections to
31 * and to read/write data.
33 * Vincent Lubet, 040506
34 * Christophe Allie, 010928
35 * Justin C. Walker, 990319
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/syslog.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/protosw.h>
45 #include <sys/domain.h>
46 #include <sys/malloc.h>
48 #include <sys/sys_domain.h>
49 #include <sys/kern_event.h>
50 #include <sys/kern_control.h>
51 #include <sys/kauth.h>
52 #include <sys/sysctl.h>
53 #include <net/if_var.h>
55 #include <mach/vm_types.h>
57 #include <kern/thread.h>
60 #define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
64 #define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
68 * Definitions and vars for we support
71 #define CTL_SENDSIZE (2 * 1024) /* default buffer size */
72 #define CTL_RECVSIZE (8 * 1024) /* default buffer size */
75 * Definitions and vars for we support
78 static u_int32_t ctl_maxunit
= 65536;
79 static lck_grp_attr_t
*ctl_lck_grp_attr
= 0;
80 static lck_attr_t
*ctl_lck_attr
= 0;
81 static lck_grp_t
*ctl_lck_grp
= 0;
82 static lck_mtx_t
*ctl_mtx
;
84 /* all the controllers are chained */
85 TAILQ_HEAD(kctl_list
, kctl
) ctl_head
;
88 static int ctl_attach(struct socket
*, int, struct proc
*);
89 static int ctl_detach(struct socket
*);
90 static int ctl_sofreelastref(struct socket
*so
);
91 static int ctl_connect(struct socket
*, struct sockaddr
*, struct proc
*);
92 static int ctl_disconnect(struct socket
*);
93 static int ctl_ioctl(struct socket
*so
, u_long cmd
, caddr_t data
,
94 struct ifnet
*ifp
, struct proc
*p
);
95 static int ctl_send(struct socket
*, int, struct mbuf
*,
96 struct sockaddr
*, struct mbuf
*, struct proc
*);
97 static int ctl_send_list(struct socket
*, int, struct mbuf
*,
98 struct sockaddr
*, struct mbuf
*, struct proc
*);
99 static int ctl_ctloutput(struct socket
*, struct sockopt
*);
100 static int ctl_peeraddr(struct socket
*so
, struct sockaddr
**nam
);
101 static int ctl_usr_rcvd(struct socket
*so
, int flags
);
103 static struct kctl
*ctl_find_by_name(const char *);
104 static struct kctl
*ctl_find_by_id_unit(u_int32_t id
, u_int32_t unit
);
106 static struct socket
*kcb_find_socket(struct kctl
*, u_int32_t unit
);
107 static struct ctl_cb
*kcb_find(struct kctl
*, u_int32_t unit
);
108 static void ctl_post_msg(u_int32_t event_code
, u_int32_t id
);
110 static int ctl_lock(struct socket
*, int, void *);
111 static int ctl_unlock(struct socket
*, int, void *);
112 static lck_mtx_t
* ctl_getlock(struct socket
*, int);
114 static struct pr_usrreqs ctl_usrreqs
= {
115 .pru_attach
= ctl_attach
,
116 .pru_connect
= ctl_connect
,
117 .pru_control
= ctl_ioctl
,
118 .pru_detach
= ctl_detach
,
119 .pru_disconnect
= ctl_disconnect
,
120 .pru_peeraddr
= ctl_peeraddr
,
121 .pru_rcvd
= ctl_usr_rcvd
,
122 .pru_send
= ctl_send
,
123 .pru_send_list
= ctl_send_list
,
124 .pru_sosend
= sosend
,
125 .pru_sosend_list
= sosend_list
,
126 .pru_soreceive
= soreceive
,
127 .pru_soreceive_list
= soreceive_list
,
130 static struct protosw kctlsw
[] = {
132 .pr_type
= SOCK_DGRAM
,
133 .pr_protocol
= SYSPROTO_CONTROL
,
134 .pr_flags
= PR_ATOMIC
|PR_CONNREQUIRED
|PR_PCBLOCK
|PR_WANTRCVD
,
135 .pr_ctloutput
= ctl_ctloutput
,
136 .pr_usrreqs
= &ctl_usrreqs
,
138 .pr_unlock
= ctl_unlock
,
139 .pr_getlock
= ctl_getlock
,
142 .pr_type
= SOCK_STREAM
,
143 .pr_protocol
= SYSPROTO_CONTROL
,
144 .pr_flags
= PR_CONNREQUIRED
|PR_PCBLOCK
|PR_WANTRCVD
,
145 .pr_ctloutput
= ctl_ctloutput
,
146 .pr_usrreqs
= &ctl_usrreqs
,
148 .pr_unlock
= ctl_unlock
,
149 .pr_getlock
= ctl_getlock
,
153 __private_extern__
int kctl_reg_list SYSCTL_HANDLER_ARGS
;
154 __private_extern__
int kctl_pcblist SYSCTL_HANDLER_ARGS
;
155 __private_extern__
int kctl_getstat SYSCTL_HANDLER_ARGS
;
157 static int kctl_proto_count
= (sizeof (kctlsw
) / sizeof (struct protosw
));
159 SYSCTL_NODE(_net_systm
, OID_AUTO
, kctl
,
160 CTLFLAG_RW
|CTLFLAG_LOCKED
, 0, "Kernel control family");
162 struct kctlstat kctlstat
;
163 SYSCTL_PROC(_net_systm_kctl
, OID_AUTO
, stats
,
164 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0,
165 kctl_getstat
, "S,kctlstat", "");
167 SYSCTL_PROC(_net_systm_kctl
, OID_AUTO
, reg_list
,
168 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0,
169 kctl_reg_list
, "S,xkctl_reg", "");
171 SYSCTL_PROC(_net_systm_kctl
, OID_AUTO
, pcblist
,
172 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0,
173 kctl_pcblist
, "S,xkctlpcb", "");
175 u_int32_t ctl_autorcvbuf_max
= 256 * 1024;
176 SYSCTL_INT(_net_systm_kctl
, OID_AUTO
, autorcvbufmax
,
177 CTLFLAG_RW
| CTLFLAG_LOCKED
, &ctl_autorcvbuf_max
, 0, "");
179 u_int32_t ctl_autorcvbuf_high
= 0;
180 SYSCTL_INT(_net_systm_kctl
, OID_AUTO
, autorcvbufhigh
,
181 CTLFLAG_RD
| CTLFLAG_LOCKED
, &ctl_autorcvbuf_high
, 0, "");
183 u_int32_t ctl_debug
= 0;
184 SYSCTL_INT(_net_systm_kctl
, OID_AUTO
, debug
,
185 CTLFLAG_RW
| CTLFLAG_LOCKED
, &ctl_debug
, 0, "");
188 * Install the protosw's for the Kernel Control manager.
190 __private_extern__
void
191 kern_control_init(struct domain
*dp
)
196 VERIFY(!(dp
->dom_flags
& DOM_INITIALIZED
));
197 VERIFY(dp
== systemdomain
);
199 ctl_lck_grp_attr
= lck_grp_attr_alloc_init();
200 if (ctl_lck_grp_attr
== NULL
) {
201 panic("%s: lck_grp_attr_alloc_init failed\n", __func__
);
205 ctl_lck_grp
= lck_grp_alloc_init("Kernel Control Protocol",
207 if (ctl_lck_grp
== NULL
) {
208 panic("%s: lck_grp_alloc_init failed\n", __func__
);
212 ctl_lck_attr
= lck_attr_alloc_init();
213 if (ctl_lck_attr
== NULL
) {
214 panic("%s: lck_attr_alloc_init failed\n", __func__
);
218 ctl_mtx
= lck_mtx_alloc_init(ctl_lck_grp
, ctl_lck_attr
);
219 if (ctl_mtx
== NULL
) {
220 panic("%s: lck_mtx_alloc_init failed\n", __func__
);
223 TAILQ_INIT(&ctl_head
);
225 for (i
= 0, pr
= &kctlsw
[0]; i
< kctl_proto_count
; i
++, pr
++)
226 net_add_proto(pr
, dp
, 1);
230 kcb_delete(struct ctl_cb
*kcb
)
234 lck_mtx_free(kcb
->mtx
, ctl_lck_grp
);
240 * Kernel Controller user-request functions
241 * attach function must exist and succeed
242 * detach not necessary
243 * we need a pcb for the per socket mutex
246 ctl_attach(struct socket
*so
, int proto
, struct proc
*p
)
248 #pragma unused(proto, p)
250 struct ctl_cb
*kcb
= 0;
252 MALLOC(kcb
, struct ctl_cb
*, sizeof(struct ctl_cb
), M_TEMP
, M_WAITOK
);
257 bzero(kcb
, sizeof(struct ctl_cb
));
259 kcb
->mtx
= lck_mtx_alloc_init(ctl_lck_grp
, ctl_lck_attr
);
260 if (kcb
->mtx
== NULL
) {
265 so
->so_pcb
= (caddr_t
)kcb
;
276 ctl_sofreelastref(struct socket
*so
)
278 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
284 if ((kctl
= kcb
->kctl
) != 0) {
285 lck_mtx_lock(ctl_mtx
);
286 TAILQ_REMOVE(&kctl
->kcb_head
, kcb
, next
);
287 kctlstat
.kcs_pcbcount
--;
288 kctlstat
.kcs_gencnt
++;
289 lck_mtx_unlock(ctl_mtx
);
293 sofreelastref(so
, 1);
298 ctl_detach(struct socket
*so
)
300 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
305 soisdisconnected(so
);
306 so
->so_flags
|= SOF_PCBCLEARING
;
312 ctl_connect(struct socket
*so
, struct sockaddr
*nam
, struct proc
*p
)
317 struct sockaddr_ctl sa
;
318 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
319 struct ctl_cb
*kcb_next
= NULL
;
321 u_int32_t recvbufsize
, sendbufsize
;
324 panic("ctl_connect so_pcb null\n");
326 if (nam
->sa_len
!= sizeof(struct sockaddr_ctl
))
329 bcopy(nam
, &sa
, sizeof(struct sockaddr_ctl
));
331 lck_mtx_lock(ctl_mtx
);
332 kctl
= ctl_find_by_id_unit(sa
.sc_id
, sa
.sc_unit
);
334 lck_mtx_unlock(ctl_mtx
);
338 if (((kctl
->flags
& CTL_FLAG_REG_SOCK_STREAM
) &&
339 (so
->so_type
!= SOCK_STREAM
)) ||
340 (!(kctl
->flags
& CTL_FLAG_REG_SOCK_STREAM
) &&
341 (so
->so_type
!= SOCK_DGRAM
))) {
342 lck_mtx_unlock(ctl_mtx
);
346 if (kctl
->flags
& CTL_FLAG_PRIVILEGED
) {
348 lck_mtx_unlock(ctl_mtx
);
351 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
352 lck_mtx_unlock(ctl_mtx
);
357 if ((kctl
->flags
& CTL_FLAG_REG_ID_UNIT
) || sa
.sc_unit
!= 0) {
358 if (kcb_find(kctl
, sa
.sc_unit
) != NULL
) {
359 lck_mtx_unlock(ctl_mtx
);
363 /* Find an unused ID, assumes control IDs are in order */
366 TAILQ_FOREACH(kcb_next
, &kctl
->kcb_head
, next
) {
367 if (kcb_next
->unit
> unit
) {
368 /* Found a gap, lets fill it in */
371 unit
= kcb_next
->unit
+ 1;
372 if (unit
== ctl_maxunit
)
376 if (unit
== ctl_maxunit
) {
377 lck_mtx_unlock(ctl_mtx
);
384 kcb
->unit
= sa
.sc_unit
;
386 if (kcb_next
!= NULL
) {
387 TAILQ_INSERT_BEFORE(kcb_next
, kcb
, next
);
389 TAILQ_INSERT_TAIL(&kctl
->kcb_head
, kcb
, next
);
391 kctlstat
.kcs_pcbcount
++;
392 kctlstat
.kcs_gencnt
++;
393 kctlstat
.kcs_connections
++;
394 lck_mtx_unlock(ctl_mtx
);
397 * rdar://15526688: Limit the send and receive sizes to sb_max
398 * by using the same scaling as sbreserve()
400 sbmaxsize
= (u_quad_t
)sb_max
* MCLBYTES
/ (MSIZE
+ MCLBYTES
);
402 if (kctl
->sendbufsize
> sbmaxsize
)
403 sendbufsize
= sbmaxsize
;
405 sendbufsize
= kctl
->sendbufsize
;
407 if (kctl
->recvbufsize
> sbmaxsize
)
408 recvbufsize
= sbmaxsize
;
410 recvbufsize
= kctl
->recvbufsize
;
412 error
= soreserve(so
, sendbufsize
, recvbufsize
);
414 printf("%s - soreserve(%llx, %u, %u) error %d\n", __func__
,
415 (uint64_t)VM_KERNEL_ADDRPERM(so
),
416 sendbufsize
, recvbufsize
, error
);
421 socket_unlock(so
, 0);
422 error
= (*kctl
->connect
)(kctl
, &sa
, &kcb
->userdata
);
430 if (error
&& kctl
->disconnect
) {
431 socket_unlock(so
, 0);
432 (*kctl
->disconnect
)(kctl
, kcb
->unit
, kcb
->userdata
);
437 soisdisconnected(so
);
438 lck_mtx_lock(ctl_mtx
);
441 TAILQ_REMOVE(&kctl
->kcb_head
, kcb
, next
);
442 kctlstat
.kcs_pcbcount
--;
443 kctlstat
.kcs_gencnt
++;
444 kctlstat
.kcs_conn_fail
++;
445 lck_mtx_unlock(ctl_mtx
);
451 ctl_disconnect(struct socket
*so
)
453 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
455 if ((kcb
= (struct ctl_cb
*)so
->so_pcb
)) {
456 struct kctl
*kctl
= kcb
->kctl
;
458 if (kctl
&& kctl
->disconnect
) {
459 socket_unlock(so
, 0);
460 (*kctl
->disconnect
)(kctl
, kcb
->unit
, kcb
->userdata
);
464 soisdisconnected(so
);
466 socket_unlock(so
, 0);
467 lck_mtx_lock(ctl_mtx
);
470 while (kcb
->usecount
!= 0) {
471 msleep(&kcb
->usecount
, ctl_mtx
, 0, "kcb->usecount", 0);
473 TAILQ_REMOVE(&kctl
->kcb_head
, kcb
, next
);
474 kctlstat
.kcs_pcbcount
--;
475 kctlstat
.kcs_gencnt
++;
476 lck_mtx_unlock(ctl_mtx
);
483 ctl_peeraddr(struct socket
*so
, struct sockaddr
**nam
)
485 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
487 struct sockaddr_ctl sc
;
489 if (kcb
== NULL
) /* sanity check */
492 if ((kctl
= kcb
->kctl
) == NULL
)
495 bzero(&sc
, sizeof(struct sockaddr_ctl
));
496 sc
.sc_len
= sizeof(struct sockaddr_ctl
);
497 sc
.sc_family
= AF_SYSTEM
;
498 sc
.ss_sysaddr
= AF_SYS_CONTROL
;
500 sc
.sc_unit
= kcb
->unit
;
502 *nam
= dup_sockaddr((struct sockaddr
*)&sc
, 1);
508 ctl_sbrcv_trim(struct socket
*so
)
510 struct sockbuf
*sb
= &so
->so_rcv
;
512 if (sb
->sb_hiwat
> sb
->sb_idealsize
) {
517 * The difference between the ideal size and the
518 * current size is the upper bound of the trimage
520 diff
= sb
->sb_hiwat
- sb
->sb_idealsize
;
522 * We cannot trim below the outstanding data
524 trim
= sb
->sb_hiwat
- sb
->sb_cc
;
526 trim
= imin(trim
, (int32_t)diff
);
529 sbreserve(sb
, (sb
->sb_hiwat
- trim
));
532 printf("%s - shrunk to %d\n",
533 __func__
, sb
->sb_hiwat
);
539 ctl_usr_rcvd(struct socket
*so
, int flags
)
541 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
544 if ((kctl
= kcb
->kctl
) == NULL
) {
549 socket_unlock(so
, 0);
550 (*kctl
->rcvd
)(kctl
, kcb
->unit
, kcb
->userdata
, flags
);
560 ctl_send(struct socket
*so
, int flags
, struct mbuf
*m
,
561 struct sockaddr
*addr
, struct mbuf
*control
,
564 #pragma unused(addr, p)
566 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
572 if (kcb
== NULL
) /* sanity check */
575 if (error
== 0 && (kctl
= kcb
->kctl
) == NULL
)
578 if (error
== 0 && kctl
->send
) {
579 so_tc_update_stats(m
, so
, m_get_service_class(m
));
580 socket_unlock(so
, 0);
581 error
= (*kctl
->send
)(kctl
, kcb
->unit
, kcb
->userdata
, m
, flags
);
589 OSIncrementAtomic64((SInt64
*)&kctlstat
.kcs_send_fail
);
594 ctl_send_list(struct socket
*so
, int flags
, struct mbuf
*m
,
595 __unused
struct sockaddr
*addr
, struct mbuf
*control
,
596 __unused
struct proc
*p
)
599 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
603 m_freem_list(control
);
605 if (kcb
== NULL
) /* sanity check */
608 if (error
== 0 && (kctl
= kcb
->kctl
) == NULL
)
611 if (error
== 0 && kctl
->send_list
) {
614 for (nxt
= m
; nxt
!= NULL
; nxt
= nxt
->m_nextpkt
)
615 so_tc_update_stats(nxt
, so
, m_get_service_class(nxt
));
617 socket_unlock(so
, 0);
618 error
= (*kctl
->send_list
)(kctl
, kcb
->unit
, kcb
->userdata
, m
,
621 } else if (error
== 0 && kctl
->send
) {
622 while (m
!= NULL
&& error
== 0) {
623 struct mbuf
*nextpkt
= m
->m_nextpkt
;
626 so_tc_update_stats(m
, so
, m_get_service_class(m
));
627 socket_unlock(so
, 0);
628 error
= (*kctl
->send
)(kctl
, kcb
->unit
, kcb
->userdata
, m
,
641 OSIncrementAtomic64((SInt64
*)&kctlstat
.kcs_send_list_fail
);
646 ctl_rcvbspace(struct kctl
*kctl
, struct socket
*so
, u_int32_t datasize
,
649 struct sockbuf
*sb
= &so
->so_rcv
;
650 u_int32_t space
= sbspace(sb
);
653 if ((kctl
->flags
& CTL_FLAG_REG_CRIT
) == 0) {
654 if ((u_int32_t
) space
>= datasize
)
658 } else if ((flags
& CTL_DATA_CRIT
) == 0) {
660 * Reserve 25% for critical messages
662 if (space
< (sb
->sb_hiwat
>> 2) ||
668 u_int32_t autorcvbuf_max
;
671 * Allow overcommit of 25%
673 autorcvbuf_max
= min(sb
->sb_idealsize
+ (sb
->sb_idealsize
>> 2),
676 if ((u_int32_t
) space
>= datasize
) {
678 } else if (tcp_cansbgrow(sb
) &&
679 sb
->sb_hiwat
< autorcvbuf_max
) {
681 * Grow with a little bit of leeway
683 u_int32_t grow
= datasize
- space
+ MSIZE
;
686 min((sb
->sb_hiwat
+ grow
), autorcvbuf_max
)) == 1) {
688 if (sb
->sb_hiwat
> ctl_autorcvbuf_high
)
689 ctl_autorcvbuf_high
= sb
->sb_hiwat
;
692 printf("%s - grown to %d\n",
693 __func__
, sb
->sb_hiwat
);
706 ctl_enqueuembuf(void *kctlref
, u_int32_t unit
, struct mbuf
*m
, u_int32_t flags
)
710 struct kctl
*kctl
= (struct kctl
*)kctlref
;
711 int len
= m
->m_pkthdr
.len
;
716 so
= kcb_find_socket(kctl
, unit
);
721 if (ctl_rcvbspace(kctl
, so
, len
, flags
) != 0) {
723 OSIncrementAtomic64((SInt64
*)&kctlstat
.kcs_enqueue_fullsock
);
726 if ((flags
& CTL_DATA_EOR
))
729 so_recv_data_stat(so
, m
, 0);
730 if (sbappend(&so
->so_rcv
, m
) != 0) {
731 if ((flags
& CTL_DATA_NOWAKEUP
) == 0)
735 OSIncrementAtomic64((SInt64
*)&kctlstat
.kcs_enqueue_fullsock
);
738 if (ctl_debug
&& error
!= 0 && (flags
& CTL_DATA_CRIT
))
739 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
740 __func__
, error
, len
,
741 so
->so_rcv
.sb_hiwat
, so
->so_rcv
.sb_cc
);
743 socket_unlock(so
, 1);
745 OSIncrementAtomic64((SInt64
*)&kctlstat
.kcs_enqueue_fail
);
751 * Compute space occupied by mbuf like sbappendrecord
754 m_space(struct mbuf
*m
)
759 for (nxt
= m
; nxt
!= NULL
; nxt
= nxt
->m_next
)
766 ctl_enqueuembuf_list(void *kctlref
, u_int32_t unit
, struct mbuf
*m_list
,
767 u_int32_t flags
, struct mbuf
**m_remain
)
769 struct socket
*so
= NULL
;
771 struct kctl
*kctl
= (struct kctl
*)kctlref
;
772 struct mbuf
*m
, *nextpkt
;
777 * Need to point the beginning of the list in case of early exit
785 if (kctl
->flags
& CTL_FLAG_REG_SOCK_STREAM
) {
789 if (flags
& CTL_DATA_EOR
) {
794 * kcb_find_socket takes the socket lock with a reference
796 so
= kcb_find_socket(kctl
, unit
);
802 for (m
= m_list
; m
!= NULL
; m
= nextpkt
) {
803 nextpkt
= m
->m_nextpkt
;
805 if (m
->m_pkthdr
.len
== 0)
806 printf("%s: %llx m_pkthdr.len is 0",
807 __func__
, (uint64_t)VM_KERNEL_ADDRPERM(m
));
810 * The mbuf is either appended or freed by sbappendrecord()
811 * so it's not reliable from a data standpoint
814 if (ctl_rcvbspace(kctl
, so
, len
, flags
) != 0) {
817 (SInt64
*)&kctlstat
.kcs_enqueue_fullsock
);
821 * Unlink from the list, m is on its own
824 so_recv_data_stat(so
, m
, 0);
825 if (sbappendrecord(&so
->so_rcv
, m
) != 0) {
829 * We free or return the remaining
835 (SInt64
*)&kctlstat
.kcs_enqueue_fullsock
);
840 if (needwakeup
&& (flags
& CTL_DATA_NOWAKEUP
) == 0)
845 if (ctl_debug
&& error
!= 0 && (flags
& CTL_DATA_CRIT
))
846 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
847 __func__
, error
, len
,
848 so
->so_rcv
.sb_hiwat
, so
->so_rcv
.sb_cc
);
850 socket_unlock(so
, 1);
855 if (m
!= NULL
&& socket_debug
&& so
!= NULL
&&
856 (so
->so_options
& SO_DEBUG
)) {
859 printf("%s m_list %llx\n", __func__
,
860 (uint64_t) VM_KERNEL_ADDRPERM(m_list
));
861 for (n
= m
; n
!= NULL
; n
= n
->m_nextpkt
)
862 printf(" remain %llx m_next %llx\n",
863 (uint64_t) VM_KERNEL_ADDRPERM(n
),
864 (uint64_t) VM_KERNEL_ADDRPERM(n
->m_next
));
871 OSIncrementAtomic64((SInt64
*)&kctlstat
.kcs_enqueue_fail
);
876 ctl_enqueuedata(void *kctlref
, u_int32_t unit
, void *data
, size_t len
,
882 struct kctl
*kctl
= (struct kctl
*)kctlref
;
883 unsigned int num_needed
;
890 so
= kcb_find_socket(kctl
, unit
);
894 if (ctl_rcvbspace(kctl
, so
, len
, flags
) != 0) {
896 OSIncrementAtomic64((SInt64
*)&kctlstat
.kcs_enqueue_fullsock
);
901 m
= m_allocpacket_internal(&num_needed
, len
, NULL
, M_NOWAIT
, 1, 0);
903 printf("ctl_enqueuedata: m_allocpacket_internal(%lu) failed\n",
909 for (n
= m
; n
!= NULL
; n
= n
->m_next
) {
910 size_t mlen
= mbuf_maxlen(n
);
912 if (mlen
+ curlen
> len
)
915 bcopy((char *)data
+ curlen
, n
->m_data
, mlen
);
918 mbuf_pkthdr_setlen(m
, curlen
);
920 if ((flags
& CTL_DATA_EOR
))
922 so_recv_data_stat(so
, m
, 0);
923 if (sbappend(&so
->so_rcv
, m
) != 0) {
924 if ((flags
& CTL_DATA_NOWAKEUP
) == 0)
928 OSIncrementAtomic64((SInt64
*)&kctlstat
.kcs_enqueue_fullsock
);
932 if (ctl_debug
&& error
!= 0 && (flags
& CTL_DATA_CRIT
))
933 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
934 __func__
, error
, (int)len
,
935 so
->so_rcv
.sb_hiwat
, so
->so_rcv
.sb_cc
);
937 socket_unlock(so
, 1);
939 OSIncrementAtomic64((SInt64
*)&kctlstat
.kcs_enqueue_fail
);
945 ctl_getenqueuespace(kern_ctl_ref kctlref
, u_int32_t unit
, size_t *space
)
947 struct kctl
*kctl
= (struct kctl
*)kctlref
;
951 if (kctlref
== NULL
|| space
== NULL
)
954 so
= kcb_find_socket(kctl
, unit
);
958 avail
= sbspace(&so
->so_rcv
);
959 *space
= (avail
< 0) ? 0 : avail
;
960 socket_unlock(so
, 1);
966 ctl_getenqueuereadable(kern_ctl_ref kctlref
, u_int32_t unit
,
967 u_int32_t
*difference
)
969 struct kctl
*kctl
= (struct kctl
*)kctlref
;
972 if (kctlref
== NULL
|| difference
== NULL
)
975 so
= kcb_find_socket(kctl
, unit
);
979 if (so
->so_rcv
.sb_cc
>= so
->so_rcv
.sb_lowat
) {
982 *difference
= (so
->so_rcv
.sb_lowat
- so
->so_rcv
.sb_cc
);
984 socket_unlock(so
, 1);
990 ctl_ctloutput(struct socket
*so
, struct sockopt
*sopt
)
992 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
998 if (sopt
->sopt_level
!= SYSPROTO_CONTROL
) {
1002 if (kcb
== NULL
) /* sanity check */
1005 if ((kctl
= kcb
->kctl
) == NULL
)
1008 switch (sopt
->sopt_dir
) {
1010 if (kctl
->setopt
== NULL
)
1012 if (sopt
->sopt_valsize
== 0) {
1015 MALLOC(data
, void *, sopt
->sopt_valsize
, M_TEMP
,
1019 error
= sooptcopyin(sopt
, data
,
1021 sopt
->sopt_valsize
);
1024 socket_unlock(so
, 0);
1025 error
= (*kctl
->setopt
)(kcb
->kctl
, kcb
->unit
,
1029 sopt
->sopt_valsize
);
1036 if (kctl
->getopt
== NULL
)
1039 if (sopt
->sopt_valsize
&& sopt
->sopt_val
) {
1040 MALLOC(data
, void *, sopt
->sopt_valsize
, M_TEMP
,
1045 * 4108337 - copy user data in case the
1046 * kernel control needs it
1048 error
= sooptcopyin(sopt
, data
,
1049 sopt
->sopt_valsize
, sopt
->sopt_valsize
);
1051 len
= sopt
->sopt_valsize
;
1052 socket_unlock(so
, 0);
1053 error
= (*kctl
->getopt
)(kcb
->kctl
, kcb
->unit
,
1054 kcb
->userdata
, sopt
->sopt_name
,
1056 if (data
!= NULL
&& len
> sopt
->sopt_valsize
)
1057 panic_plain("ctl_ctloutput: ctl %s returned "
1058 "len (%lu) > sopt_valsize (%lu)\n",
1059 kcb
->kctl
->name
, len
,
1060 sopt
->sopt_valsize
);
1064 error
= sooptcopyout(sopt
, data
, len
);
1066 sopt
->sopt_valsize
= len
;
1076 ctl_ioctl(struct socket
*so
, u_long cmd
, caddr_t data
,
1077 struct ifnet
*ifp
, struct proc
*p
)
1079 #pragma unused(so, ifp, p)
1080 int error
= ENOTSUP
;
1083 /* get the number of controllers */
1084 case CTLIOCGCOUNT
: {
1088 lck_mtx_lock(ctl_mtx
);
1089 TAILQ_FOREACH(kctl
, &ctl_head
, next
)
1091 lck_mtx_unlock(ctl_mtx
);
1093 bcopy(&n
, data
, sizeof (n
));
1098 struct ctl_info ctl_info
;
1099 struct kctl
*kctl
= 0;
1102 bcopy(data
, &ctl_info
, sizeof (ctl_info
));
1103 name_len
= strnlen(ctl_info
.ctl_name
, MAX_KCTL_NAME
);
1105 if (name_len
== 0 || name_len
+ 1 > MAX_KCTL_NAME
) {
1109 lck_mtx_lock(ctl_mtx
);
1110 kctl
= ctl_find_by_name(ctl_info
.ctl_name
);
1111 lck_mtx_unlock(ctl_mtx
);
1116 ctl_info
.ctl_id
= kctl
->id
;
1117 bcopy(&ctl_info
, data
, sizeof (ctl_info
));
1122 /* add controls to get list of NKEs */
1130 * Register/unregister a NKE
1133 ctl_register(struct kern_ctl_reg
*userkctl
, kern_ctl_ref
*kctlref
)
1135 struct kctl
*kctl
= NULL
;
1136 struct kctl
*kctl_next
= NULL
;
1139 int is_extended
= 0;
1141 if (userkctl
== NULL
) /* sanity check */
1143 if (userkctl
->ctl_connect
== NULL
)
1145 name_len
= strlen(userkctl
->ctl_name
);
1146 if (name_len
== 0 || name_len
+ 1 > MAX_KCTL_NAME
)
1149 MALLOC(kctl
, struct kctl
*, sizeof(*kctl
), M_TEMP
, M_WAITOK
);
1152 bzero((char *)kctl
, sizeof(*kctl
));
1154 lck_mtx_lock(ctl_mtx
);
1157 * Kernel Control IDs
1159 * CTL_FLAG_REG_ID_UNIT indicates the control ID and unit number are
1160 * static. If they do not exist, add them to the list in order. If the
1161 * flag is not set, we must find a new unique value. We assume the
1162 * list is in order. We find the last item in the list and add one. If
1163 * this leads to wrapping the id around, we start at the front of the
1164 * list and look for a gap.
1167 if ((userkctl
->ctl_flags
& CTL_FLAG_REG_ID_UNIT
) == 0) {
1168 /* Must dynamically assign an unused ID */
1170 /* Verify the same name isn't already registered */
1171 if (ctl_find_by_name(userkctl
->ctl_name
) != NULL
) {
1172 lck_mtx_unlock(ctl_mtx
);
1177 /* Start with 1 in case the list is empty */
1179 kctl_next
= TAILQ_LAST(&ctl_head
, kctl_list
);
1181 if (kctl_next
!= NULL
) {
1182 /* List was not empty, add one to the last item */
1183 id
= kctl_next
->id
+ 1;
1187 * If this wrapped the id number, start looking at
1188 * the front of the list for an unused id.
1191 /* Find the next unused ID */
1194 TAILQ_FOREACH(kctl_next
, &ctl_head
, next
) {
1195 if (kctl_next
->id
> id
) {
1196 /* We found a gap */
1200 id
= kctl_next
->id
+ 1;
1205 userkctl
->ctl_id
= id
;
1207 kctl
->reg_unit
= -1;
1209 TAILQ_FOREACH(kctl_next
, &ctl_head
, next
) {
1210 if (kctl_next
->id
> userkctl
->ctl_id
)
1214 if (ctl_find_by_id_unit(userkctl
->ctl_id
, userkctl
->ctl_unit
)) {
1215 lck_mtx_unlock(ctl_mtx
);
1219 kctl
->id
= userkctl
->ctl_id
;
1220 kctl
->reg_unit
= userkctl
->ctl_unit
;
1223 is_extended
= (userkctl
->ctl_flags
& CTL_FLAG_REG_EXTENDED
);
1225 strlcpy(kctl
->name
, userkctl
->ctl_name
, MAX_KCTL_NAME
);
1226 kctl
->flags
= userkctl
->ctl_flags
;
1229 * Let the caller know the default send and receive sizes
1231 if (userkctl
->ctl_sendsize
== 0) {
1232 kctl
->sendbufsize
= CTL_SENDSIZE
;
1233 userkctl
->ctl_sendsize
= kctl
->sendbufsize
;
1235 kctl
->sendbufsize
= userkctl
->ctl_sendsize
;
1237 if (userkctl
->ctl_recvsize
== 0) {
1238 kctl
->recvbufsize
= CTL_RECVSIZE
;
1239 userkctl
->ctl_recvsize
= kctl
->recvbufsize
;
1241 kctl
->recvbufsize
= userkctl
->ctl_recvsize
;
1244 kctl
->connect
= userkctl
->ctl_connect
;
1245 kctl
->disconnect
= userkctl
->ctl_disconnect
;
1246 kctl
->send
= userkctl
->ctl_send
;
1247 kctl
->setopt
= userkctl
->ctl_setopt
;
1248 kctl
->getopt
= userkctl
->ctl_getopt
;
1250 kctl
->rcvd
= userkctl
->ctl_rcvd
;
1251 kctl
->send_list
= userkctl
->ctl_send_list
;
1254 TAILQ_INIT(&kctl
->kcb_head
);
1257 TAILQ_INSERT_BEFORE(kctl_next
, kctl
, next
);
1259 TAILQ_INSERT_TAIL(&ctl_head
, kctl
, next
);
1261 kctlstat
.kcs_reg_count
++;
1262 kctlstat
.kcs_gencnt
++;
1264 lck_mtx_unlock(ctl_mtx
);
1268 ctl_post_msg(KEV_CTL_REGISTERED
, kctl
->id
);
1273 ctl_deregister(void *kctlref
)
1277 if (kctlref
== NULL
) /* sanity check */
1280 lck_mtx_lock(ctl_mtx
);
1281 TAILQ_FOREACH(kctl
, &ctl_head
, next
) {
1282 if (kctl
== (struct kctl
*)kctlref
)
1285 if (kctl
!= (struct kctl
*)kctlref
) {
1286 lck_mtx_unlock(ctl_mtx
);
1289 if (!TAILQ_EMPTY(&kctl
->kcb_head
)) {
1290 lck_mtx_unlock(ctl_mtx
);
1294 TAILQ_REMOVE(&ctl_head
, kctl
, next
);
1296 kctlstat
.kcs_reg_count
--;
1297 kctlstat
.kcs_gencnt
++;
1299 lck_mtx_unlock(ctl_mtx
);
1301 ctl_post_msg(KEV_CTL_DEREGISTERED
, kctl
->id
);
1307 * Must be called with global ctl_mtx lock taked
1309 static struct kctl
*
1310 ctl_find_by_name(const char *name
)
1314 lck_mtx_assert(ctl_mtx
, LCK_MTX_ASSERT_OWNED
);
1316 TAILQ_FOREACH(kctl
, &ctl_head
, next
)
1317 if (strncmp(kctl
->name
, name
, sizeof(kctl
->name
)) == 0)
1324 ctl_id_by_name(const char *name
)
1326 u_int32_t ctl_id
= 0;
1329 lck_mtx_lock(ctl_mtx
);
1330 kctl
= ctl_find_by_name(name
);
1333 lck_mtx_unlock(ctl_mtx
);
1339 ctl_name_by_id(u_int32_t id
, char *out_name
, size_t maxsize
)
1344 lck_mtx_lock(ctl_mtx
);
1345 TAILQ_FOREACH(kctl
, &ctl_head
, next
) {
1350 if (kctl
&& kctl
->name
) {
1351 if (maxsize
> MAX_KCTL_NAME
)
1352 maxsize
= MAX_KCTL_NAME
;
1353 strlcpy(out_name
, kctl
->name
, maxsize
);
1356 lck_mtx_unlock(ctl_mtx
);
1358 return (found
? 0 : ENOENT
);
1362 * Must be called with global ctl_mtx lock taked
1365 static struct kctl
*
1366 ctl_find_by_id_unit(u_int32_t id
, u_int32_t unit
)
1370 lck_mtx_assert(ctl_mtx
, LCK_MTX_ASSERT_OWNED
);
1372 TAILQ_FOREACH(kctl
, &ctl_head
, next
) {
1373 if (kctl
->id
== id
&& (kctl
->flags
& CTL_FLAG_REG_ID_UNIT
) == 0)
1375 else if (kctl
->id
== id
&& kctl
->reg_unit
== unit
)
1382 * Must be called with kernel controller lock taken
1384 static struct ctl_cb
*
1385 kcb_find(struct kctl
*kctl
, u_int32_t unit
)
1389 lck_mtx_assert(ctl_mtx
, LCK_MTX_ASSERT_OWNED
);
1391 TAILQ_FOREACH(kcb
, &kctl
->kcb_head
, next
)
1392 if (kcb
->unit
== unit
)
1398 static struct socket
*
1399 kcb_find_socket(struct kctl
*kctl
, u_int32_t unit
)
1401 struct socket
*so
= NULL
;
1405 lr_saved
= __builtin_return_address(0);
1407 lck_mtx_lock(ctl_mtx
);
1408 kcb
= kcb_find(kctl
, unit
);
1409 if (kcb
&& kcb
->kctl
== kctl
) {
1415 lck_mtx_unlock(ctl_mtx
);
1423 lck_mtx_lock(ctl_mtx
);
1424 if (kcb
->kctl
== NULL
) {
1425 lck_mtx_unlock(ctl_mtx
);
1426 socket_unlock(so
, 1);
1428 lck_mtx_lock(ctl_mtx
);
1431 * The socket lock history is more useful if we store
1432 * the address of the caller.
1434 int i
= (so
->next_lock_lr
+ SO_LCKDBG_MAX
- 1) % SO_LCKDBG_MAX
;
1436 so
->lock_lr
[i
] = lr_saved
;
1439 if (kcb
->usecount
== 0)
1440 wakeup((event_t
)&kcb
->usecount
);
1441 lck_mtx_unlock(ctl_mtx
);
1447 ctl_post_msg(u_int32_t event_code
, u_int32_t id
)
1449 struct ctl_event_data ctl_ev_data
;
1450 struct kev_msg ev_msg
;
1452 lck_mtx_assert(ctl_mtx
, LCK_MTX_ASSERT_NOTOWNED
);
1454 bzero(&ev_msg
, sizeof(struct kev_msg
));
1455 ev_msg
.vendor_code
= KEV_VENDOR_APPLE
;
1457 ev_msg
.kev_class
= KEV_SYSTEM_CLASS
;
1458 ev_msg
.kev_subclass
= KEV_CTL_SUBCLASS
;
1459 ev_msg
.event_code
= event_code
;
1461 /* common nke subclass data */
1462 bzero(&ctl_ev_data
, sizeof(ctl_ev_data
));
1463 ctl_ev_data
.ctl_id
= id
;
1464 ev_msg
.dv
[0].data_ptr
= &ctl_ev_data
;
1465 ev_msg
.dv
[0].data_length
= sizeof(ctl_ev_data
);
1467 ev_msg
.dv
[1].data_length
= 0;
1469 kev_post_msg(&ev_msg
);
1473 ctl_lock(struct socket
*so
, int refcount
, void *lr
)
1478 lr_saved
= __builtin_return_address(0);
1482 if (so
->so_pcb
!= NULL
) {
1483 lck_mtx_lock(((struct ctl_cb
*)so
->so_pcb
)->mtx
);
1485 panic("ctl_lock: so=%p NO PCB! lr=%p lrh= %s\n",
1486 so
, lr_saved
, solockhistory_nr(so
));
1490 if (so
->so_usecount
< 0) {
1491 panic("ctl_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s\n",
1492 so
, so
->so_pcb
, lr_saved
, so
->so_usecount
,
1493 solockhistory_nr(so
));
1500 so
->lock_lr
[so
->next_lock_lr
] = lr_saved
;
1501 so
->next_lock_lr
= (so
->next_lock_lr
+1) % SO_LCKDBG_MAX
;
1506 ctl_unlock(struct socket
*so
, int refcount
, void *lr
)
1509 lck_mtx_t
*mutex_held
;
1512 lr_saved
= __builtin_return_address(0);
1516 #ifdef MORE_KCTLLOCK_DEBUG
1517 printf("ctl_unlock: so=%llx sopcb=%x lock=%llx ref=%u lr=%llx\n",
1518 (uint64_t)VM_KERNEL_ADDRPERM(so
),
1519 (uint64_t)VM_KERNEL_ADDRPERM(so
->so_pcb
,
1520 (uint64_t)VM_KERNEL_ADDRPERM(((struct ctl_cb
*)so
->so_pcb
)->mtx
),
1521 so
->so_usecount
, (uint64_t)VM_KERNEL_ADDRPERM(lr_saved
));
1526 if (so
->so_usecount
< 0) {
1527 panic("ctl_unlock: so=%p usecount=%x lrh= %s\n",
1528 so
, so
->so_usecount
, solockhistory_nr(so
));
1531 if (so
->so_pcb
== NULL
) {
1532 panic("ctl_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s\n",
1533 so
, so
->so_usecount
, (void *)lr_saved
,
1534 solockhistory_nr(so
));
1537 mutex_held
= ((struct ctl_cb
*)so
->so_pcb
)->mtx
;
1539 lck_mtx_assert(mutex_held
, LCK_MTX_ASSERT_OWNED
);
1540 so
->unlock_lr
[so
->next_unlock_lr
] = lr_saved
;
1541 so
->next_unlock_lr
= (so
->next_unlock_lr
+1) % SO_LCKDBG_MAX
;
1542 lck_mtx_unlock(mutex_held
);
1544 if (so
->so_usecount
== 0)
1545 ctl_sofreelastref(so
);
1551 ctl_getlock(struct socket
*so
, int locktype
)
1553 #pragma unused(locktype)
1554 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
1557 if (so
->so_usecount
< 0)
1558 panic("ctl_getlock: so=%p usecount=%x lrh= %s\n",
1559 so
, so
->so_usecount
, solockhistory_nr(so
));
1562 panic("ctl_getlock: so=%p NULL NO so_pcb %s\n",
1563 so
, solockhistory_nr(so
));
1564 return (so
->so_proto
->pr_domain
->dom_mtx
);
1568 __private_extern__
int
1569 kctl_reg_list SYSCTL_HANDLER_ARGS
1571 #pragma unused(oidp, arg1, arg2)
1574 struct xsystmgen xsg
;
1577 size_t item_size
= ROUNDUP64(sizeof (struct xkctl_reg
));
1579 buf
= _MALLOC(item_size
, M_TEMP
, M_WAITOK
| M_ZERO
);
1583 lck_mtx_lock(ctl_mtx
);
1585 n
= kctlstat
.kcs_reg_count
;
1587 if (req
->oldptr
== USER_ADDR_NULL
) {
1588 req
->oldidx
= (n
+ n
/8) * sizeof(struct xkctl_reg
);
1591 if (req
->newptr
!= USER_ADDR_NULL
) {
1595 bzero(&xsg
, sizeof (xsg
));
1596 xsg
.xg_len
= sizeof (xsg
);
1598 xsg
.xg_gen
= kctlstat
.kcs_gencnt
;
1599 xsg
.xg_sogen
= so_gencnt
;
1600 error
= SYSCTL_OUT(req
, &xsg
, sizeof (xsg
));
1605 * We are done if there is no pcb
1612 for (i
= 0, kctl
= TAILQ_FIRST(&ctl_head
);
1613 i
< n
&& kctl
!= NULL
;
1614 i
++, kctl
= TAILQ_NEXT(kctl
, next
)) {
1615 struct xkctl_reg
*xkr
= (struct xkctl_reg
*)buf
;
1617 u_int32_t pcbcount
= 0;
1619 TAILQ_FOREACH(kcb
, &kctl
->kcb_head
, next
)
1622 bzero(buf
, item_size
);
1624 xkr
->xkr_len
= sizeof(struct xkctl_reg
);
1625 xkr
->xkr_kind
= XSO_KCREG
;
1626 xkr
->xkr_id
= kctl
->id
;
1627 xkr
->xkr_reg_unit
= kctl
->reg_unit
;
1628 xkr
->xkr_flags
= kctl
->flags
;
1629 xkr
->xkr_kctlref
= (uint64_t)VM_KERNEL_ADDRPERM(kctl
);
1630 xkr
->xkr_recvbufsize
= kctl
->recvbufsize
;
1631 xkr
->xkr_sendbufsize
= kctl
->sendbufsize
;
1632 xkr
->xkr_lastunit
= kctl
->lastunit
;
1633 xkr
->xkr_pcbcount
= pcbcount
;
1634 xkr
->xkr_connect
= (uint64_t)VM_KERNEL_ADDRPERM(kctl
->connect
);
1635 xkr
->xkr_disconnect
=
1636 (uint64_t)VM_KERNEL_ADDRPERM(kctl
->disconnect
);
1637 xkr
->xkr_send
= (uint64_t)VM_KERNEL_ADDRPERM(kctl
->send
);
1638 xkr
->xkr_send_list
=
1639 (uint64_t)VM_KERNEL_ADDRPERM(kctl
->send_list
);
1640 xkr
->xkr_setopt
= (uint64_t)VM_KERNEL_ADDRPERM(kctl
->setopt
);
1641 xkr
->xkr_getopt
= (uint64_t)VM_KERNEL_ADDRPERM(kctl
->getopt
);
1642 xkr
->xkr_rcvd
= (uint64_t)VM_KERNEL_ADDRPERM(kctl
->rcvd
);
1643 strlcpy(xkr
->xkr_name
, kctl
->name
, sizeof(xkr
->xkr_name
));
1645 error
= SYSCTL_OUT(req
, buf
, item_size
);
1650 * Give the user an updated idea of our state.
1651 * If the generation differs from what we told
1652 * her before, she knows that something happened
1653 * while we were processing this request, and it
1654 * might be necessary to retry.
1656 bzero(&xsg
, sizeof (xsg
));
1657 xsg
.xg_len
= sizeof (xsg
);
1659 xsg
.xg_gen
= kctlstat
.kcs_gencnt
;
1660 xsg
.xg_sogen
= so_gencnt
;
1661 error
= SYSCTL_OUT(req
, &xsg
, sizeof (xsg
));
1668 lck_mtx_unlock(ctl_mtx
);
1676 __private_extern__
int
1677 kctl_pcblist SYSCTL_HANDLER_ARGS
1679 #pragma unused(oidp, arg1, arg2)
1682 struct xsystmgen xsg
;
1685 size_t item_size
= ROUNDUP64(sizeof (struct xkctlpcb
)) +
1686 ROUNDUP64(sizeof (struct xsocket_n
)) +
1687 2 * ROUNDUP64(sizeof (struct xsockbuf_n
)) +
1688 ROUNDUP64(sizeof (struct xsockstat_n
));
1690 buf
= _MALLOC(item_size
, M_TEMP
, M_WAITOK
| M_ZERO
);
1694 lck_mtx_lock(ctl_mtx
);
1696 n
= kctlstat
.kcs_pcbcount
;
1698 if (req
->oldptr
== USER_ADDR_NULL
) {
1699 req
->oldidx
= (n
+ n
/8) * item_size
;
1702 if (req
->newptr
!= USER_ADDR_NULL
) {
1706 bzero(&xsg
, sizeof (xsg
));
1707 xsg
.xg_len
= sizeof (xsg
);
1709 xsg
.xg_gen
= kctlstat
.kcs_gencnt
;
1710 xsg
.xg_sogen
= so_gencnt
;
1711 error
= SYSCTL_OUT(req
, &xsg
, sizeof (xsg
));
1716 * We are done if there is no pcb
1723 for (i
= 0, kctl
= TAILQ_FIRST(&ctl_head
);
1724 i
< n
&& kctl
!= NULL
;
1725 kctl
= TAILQ_NEXT(kctl
, next
)) {
1728 for (kcb
= TAILQ_FIRST(&kctl
->kcb_head
);
1729 i
< n
&& kcb
!= NULL
;
1730 i
++, kcb
= TAILQ_NEXT(kcb
, next
)) {
1731 struct xkctlpcb
*xk
= (struct xkctlpcb
*)buf
;
1732 struct xsocket_n
*xso
= (struct xsocket_n
*)
1733 ADVANCE64(xk
, sizeof (*xk
));
1734 struct xsockbuf_n
*xsbrcv
= (struct xsockbuf_n
*)
1735 ADVANCE64(xso
, sizeof (*xso
));
1736 struct xsockbuf_n
*xsbsnd
= (struct xsockbuf_n
*)
1737 ADVANCE64(xsbrcv
, sizeof (*xsbrcv
));
1738 struct xsockstat_n
*xsostats
= (struct xsockstat_n
*)
1739 ADVANCE64(xsbsnd
, sizeof (*xsbsnd
));
1741 bzero(buf
, item_size
);
1743 xk
->xkp_len
= sizeof(struct xkctlpcb
);
1744 xk
->xkp_kind
= XSO_KCB
;
1745 xk
->xkp_unit
= kcb
->unit
;
1746 xk
->xkp_kctpcb
= (uint64_t)VM_KERNEL_ADDRPERM(kcb
);
1747 xk
->xkp_kctlref
= (uint64_t)VM_KERNEL_ADDRPERM(kctl
);
1748 xk
->xkp_kctlid
= kctl
->id
;
1749 strlcpy(xk
->xkp_kctlname
, kctl
->name
,
1750 sizeof(xk
->xkp_kctlname
));
1752 sotoxsocket_n(kcb
->so
, xso
);
1753 sbtoxsockbuf_n(kcb
->so
?
1754 &kcb
->so
->so_rcv
: NULL
, xsbrcv
);
1755 sbtoxsockbuf_n(kcb
->so
?
1756 &kcb
->so
->so_snd
: NULL
, xsbsnd
);
1757 sbtoxsockstat_n(kcb
->so
, xsostats
);
1759 error
= SYSCTL_OUT(req
, buf
, item_size
);
1765 * Give the user an updated idea of our state.
1766 * If the generation differs from what we told
1767 * her before, she knows that something happened
1768 * while we were processing this request, and it
1769 * might be necessary to retry.
1771 bzero(&xsg
, sizeof (xsg
));
1772 xsg
.xg_len
= sizeof (xsg
);
1774 xsg
.xg_gen
= kctlstat
.kcs_gencnt
;
1775 xsg
.xg_sogen
= so_gencnt
;
1776 error
= SYSCTL_OUT(req
, &xsg
, sizeof (xsg
));
1783 lck_mtx_unlock(ctl_mtx
);
1789 kctl_getstat SYSCTL_HANDLER_ARGS
1791 #pragma unused(oidp, arg1, arg2)
1794 lck_mtx_lock(ctl_mtx
);
1796 if (req
->newptr
!= USER_ADDR_NULL
) {
1800 if (req
->oldptr
== USER_ADDR_NULL
) {
1801 req
->oldidx
= sizeof(struct kctlstat
);
1805 error
= SYSCTL_OUT(req
, &kctlstat
,
1806 MIN(sizeof(struct kctlstat
), req
->oldlen
));
1808 lck_mtx_unlock(ctl_mtx
);