2 * Copyright (c) 1999-2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 * Kernel Control domain - allows control connections to
31 * and to read/write data.
33 * Vincent Lubet, 040506
34 * Christophe Allie, 010928
35 * Justin C. Walker, 990319
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/syslog.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/protosw.h>
45 #include <sys/domain.h>
46 #include <sys/malloc.h>
48 #include <sys/sys_domain.h>
49 #include <sys/kern_event.h>
50 #include <sys/kern_control.h>
51 #include <sys/kauth.h>
52 #include <net/if_var.h>
54 #include <mach/vm_types.h>
56 #include <kern/thread.h>
59 * Definitions and vars for we support
62 #define CTL_SENDSIZE (2 * 1024) /* default buffer size */
63 #define CTL_RECVSIZE (8 * 1024) /* default buffer size */
66 * Definitions and vars for we support
69 static u_int32_t ctl_maxunit
= 65536;
70 static lck_grp_attr_t
*ctl_lck_grp_attr
= 0;
71 static lck_attr_t
*ctl_lck_attr
= 0;
72 static lck_grp_t
*ctl_lck_grp
= 0;
73 static lck_mtx_t
*ctl_mtx
;
76 /* all the controllers are chained */
77 TAILQ_HEAD(kctl_list
, kctl
) ctl_head
;
79 static int ctl_attach(struct socket
*, int, struct proc
*);
80 static int ctl_detach(struct socket
*);
81 static int ctl_sofreelastref(struct socket
*so
);
82 static int ctl_connect(struct socket
*, struct sockaddr
*, struct proc
*);
83 static int ctl_disconnect(struct socket
*);
84 static int ctl_ioctl(struct socket
*so
, u_long cmd
, caddr_t data
,
85 struct ifnet
*ifp
, struct proc
*p
);
86 static int ctl_send(struct socket
*, int, struct mbuf
*,
87 struct sockaddr
*, struct mbuf
*, struct proc
*);
88 static int ctl_ctloutput(struct socket
*, struct sockopt
*);
89 static int ctl_peeraddr(struct socket
*so
, struct sockaddr
**nam
);
90 static int ctl_usr_rcvd(struct socket
*so
, int flags
);
92 static struct kctl
*ctl_find_by_name(const char *);
93 static struct kctl
*ctl_find_by_id_unit(u_int32_t id
, u_int32_t unit
);
95 static struct socket
*kcb_find_socket(struct kctl
*, u_int32_t unit
);
96 static struct ctl_cb
*kcb_find(struct kctl
*, u_int32_t unit
);
97 static void ctl_post_msg(u_int32_t event_code
, u_int32_t id
);
99 static int ctl_lock(struct socket
*, int, void *);
100 static int ctl_unlock(struct socket
*, int, void *);
101 static lck_mtx_t
* ctl_getlock(struct socket
*, int);
103 static struct pr_usrreqs ctl_usrreqs
= {
104 .pru_attach
= ctl_attach
,
105 .pru_connect
= ctl_connect
,
106 .pru_control
= ctl_ioctl
,
107 .pru_detach
= ctl_detach
,
108 .pru_disconnect
= ctl_disconnect
,
109 .pru_peeraddr
= ctl_peeraddr
,
110 .pru_rcvd
= ctl_usr_rcvd
,
111 .pru_send
= ctl_send
,
112 .pru_sosend
= sosend
,
113 .pru_soreceive
= soreceive
,
116 static struct protosw kctlsw
[] = {
118 .pr_type
= SOCK_DGRAM
,
119 .pr_protocol
= SYSPROTO_CONTROL
,
120 .pr_flags
= PR_ATOMIC
|PR_CONNREQUIRED
|PR_PCBLOCK
|PR_WANTRCVD
,
121 .pr_ctloutput
= ctl_ctloutput
,
122 .pr_usrreqs
= &ctl_usrreqs
,
124 .pr_unlock
= ctl_unlock
,
125 .pr_getlock
= ctl_getlock
,
128 .pr_type
= SOCK_STREAM
,
129 .pr_protocol
= SYSPROTO_CONTROL
,
130 .pr_flags
= PR_CONNREQUIRED
|PR_PCBLOCK
|PR_WANTRCVD
,
131 .pr_ctloutput
= ctl_ctloutput
,
132 .pr_usrreqs
= &ctl_usrreqs
,
134 .pr_unlock
= ctl_unlock
,
135 .pr_getlock
= ctl_getlock
,
139 static int kctl_proto_count
= (sizeof (kctlsw
) / sizeof (struct protosw
));
142 * Install the protosw's for the Kernel Control manager.
144 __private_extern__
void
145 kern_control_init(struct domain
*dp
)
150 VERIFY(!(dp
->dom_flags
& DOM_INITIALIZED
));
151 VERIFY(dp
== systemdomain
);
153 ctl_lck_grp_attr
= lck_grp_attr_alloc_init();
154 if (ctl_lck_grp_attr
== NULL
) {
155 panic("%s: lck_grp_attr_alloc_init failed\n", __func__
);
159 ctl_lck_grp
= lck_grp_alloc_init("Kernel Control Protocol",
161 if (ctl_lck_grp
== NULL
) {
162 panic("%s: lck_grp_alloc_init failed\n", __func__
);
166 ctl_lck_attr
= lck_attr_alloc_init();
167 if (ctl_lck_attr
== NULL
) {
168 panic("%s: lck_attr_alloc_init failed\n", __func__
);
172 ctl_mtx
= lck_mtx_alloc_init(ctl_lck_grp
, ctl_lck_attr
);
173 if (ctl_mtx
== NULL
) {
174 panic("%s: lck_mtx_alloc_init failed\n", __func__
);
177 TAILQ_INIT(&ctl_head
);
179 for (i
= 0, pr
= &kctlsw
[0]; i
< kctl_proto_count
; i
++, pr
++)
180 net_add_proto(pr
, dp
, 1);
184 kcb_delete(struct ctl_cb
*kcb
)
188 lck_mtx_free(kcb
->mtx
, ctl_lck_grp
);
195 * Kernel Controller user-request functions
196 * attach function must exist and succeed
197 * detach not necessary
198 * we need a pcb for the per socket mutex
201 ctl_attach(__unused
struct socket
*so
, __unused
int proto
, __unused
struct proc
*p
)
204 struct ctl_cb
*kcb
= 0;
206 MALLOC(kcb
, struct ctl_cb
*, sizeof(struct ctl_cb
), M_TEMP
, M_WAITOK
);
211 bzero(kcb
, sizeof(struct ctl_cb
));
213 kcb
->mtx
= lck_mtx_alloc_init(ctl_lck_grp
, ctl_lck_attr
);
214 if (kcb
->mtx
== NULL
) {
219 so
->so_pcb
= (caddr_t
)kcb
;
230 ctl_sofreelastref(struct socket
*so
)
232 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
238 if ((kctl
= kcb
->kctl
) != 0) {
239 lck_mtx_lock(ctl_mtx
);
240 TAILQ_REMOVE(&kctl
->kcb_head
, kcb
, next
);
241 lck_mtx_unlock(ctl_mtx
);
245 sofreelastref(so
, 1);
250 ctl_detach(struct socket
*so
)
252 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
257 soisdisconnected(so
);
258 so
->so_flags
|= SOF_PCBCLEARING
;
264 ctl_connect(struct socket
*so
, struct sockaddr
*nam
, __unused
struct proc
*p
)
268 struct sockaddr_ctl sa
;
269 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
270 struct ctl_cb
*kcb_next
= NULL
;
273 panic("ctl_connect so_pcb null\n");
275 if (nam
->sa_len
!= sizeof(struct sockaddr_ctl
))
278 bcopy(nam
, &sa
, sizeof(struct sockaddr_ctl
));
280 lck_mtx_lock(ctl_mtx
);
281 kctl
= ctl_find_by_id_unit(sa
.sc_id
, sa
.sc_unit
);
283 lck_mtx_unlock(ctl_mtx
);
287 if (((kctl
->flags
& CTL_FLAG_REG_SOCK_STREAM
) && (so
->so_type
!= SOCK_STREAM
)) ||
288 (!(kctl
->flags
& CTL_FLAG_REG_SOCK_STREAM
) && (so
->so_type
!= SOCK_DGRAM
))) {
289 lck_mtx_unlock(ctl_mtx
);
293 if (kctl
->flags
& CTL_FLAG_PRIVILEGED
) {
295 lck_mtx_unlock(ctl_mtx
);
298 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
299 lck_mtx_unlock(ctl_mtx
);
304 if ((kctl
->flags
& CTL_FLAG_REG_ID_UNIT
) || sa
.sc_unit
!= 0) {
305 if (kcb_find(kctl
, sa
.sc_unit
) != NULL
) {
306 lck_mtx_unlock(ctl_mtx
);
310 /* Find an unused ID, assumes control IDs are listed in order */
313 TAILQ_FOREACH(kcb_next
, &kctl
->kcb_head
, next
) {
314 if (kcb_next
->unit
> unit
) {
315 /* Found a gap, lets fill it in */
318 unit
= kcb_next
->unit
+ 1;
319 if (unit
== ctl_maxunit
)
323 if (unit
== ctl_maxunit
) {
324 lck_mtx_unlock(ctl_mtx
);
331 kcb
->unit
= sa
.sc_unit
;
333 if (kcb_next
!= NULL
) {
334 TAILQ_INSERT_BEFORE(kcb_next
, kcb
, next
);
337 TAILQ_INSERT_TAIL(&kctl
->kcb_head
, kcb
, next
);
339 lck_mtx_unlock(ctl_mtx
);
341 error
= soreserve(so
, kctl
->sendbufsize
, kctl
->recvbufsize
);
346 socket_unlock(so
, 0);
347 error
= (*kctl
->connect
)(kctl
, &sa
, &kcb
->userdata
);
355 if (error
&& kctl
->disconnect
) {
356 socket_unlock(so
, 0);
357 (*kctl
->disconnect
)(kctl
, kcb
->unit
, kcb
->userdata
);
362 soisdisconnected(so
);
363 lck_mtx_lock(ctl_mtx
);
366 TAILQ_REMOVE(&kctl
->kcb_head
, kcb
, next
);
367 lck_mtx_unlock(ctl_mtx
);
373 ctl_disconnect(struct socket
*so
)
375 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
377 if ((kcb
= (struct ctl_cb
*)so
->so_pcb
)) {
378 struct kctl
*kctl
= kcb
->kctl
;
380 if (kctl
&& kctl
->disconnect
) {
381 socket_unlock(so
, 0);
382 (*kctl
->disconnect
)(kctl
, kcb
->unit
, kcb
->userdata
);
386 soisdisconnected(so
);
388 socket_unlock(so
, 0);
389 lck_mtx_lock(ctl_mtx
);
392 while (kcb
->usecount
!= 0) {
393 msleep(&kcb
->usecount
, ctl_mtx
, 0, "kcb->usecount", 0);
395 TAILQ_REMOVE(&kctl
->kcb_head
, kcb
, next
);
396 lck_mtx_unlock(ctl_mtx
);
403 ctl_peeraddr(struct socket
*so
, struct sockaddr
**nam
)
405 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
407 struct sockaddr_ctl sc
;
409 if (kcb
== NULL
) /* sanity check */
412 if ((kctl
= kcb
->kctl
) == NULL
)
415 bzero(&sc
, sizeof(struct sockaddr_ctl
));
416 sc
.sc_len
= sizeof(struct sockaddr_ctl
);
417 sc
.sc_family
= AF_SYSTEM
;
418 sc
.ss_sysaddr
= AF_SYS_CONTROL
;
420 sc
.sc_unit
= kcb
->unit
;
422 *nam
= dup_sockaddr((struct sockaddr
*)&sc
, 1);
428 ctl_usr_rcvd(struct socket
*so
, int flags
)
430 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
433 if ((kctl
= kcb
->kctl
) == NULL
) {
438 socket_unlock(so
, 0);
439 (*kctl
->rcvd
)(kctl
, kcb
->unit
, kcb
->userdata
, flags
);
447 ctl_send(struct socket
*so
, int flags
, struct mbuf
*m
,
448 __unused
struct sockaddr
*addr
, struct mbuf
*control
,
449 __unused
struct proc
*p
)
452 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
455 if (control
) m_freem(control
);
457 if (kcb
== NULL
) /* sanity check */
460 if (error
== 0 && (kctl
= kcb
->kctl
) == NULL
)
463 if (error
== 0 && kctl
->send
) {
464 socket_unlock(so
, 0);
465 error
= (*kctl
->send
)(kctl
, kcb
->unit
, kcb
->userdata
, m
, flags
);
476 ctl_enqueuembuf(void *kctlref
, u_int32_t unit
, struct mbuf
*m
, u_int32_t flags
)
480 struct kctl
*kctl
= (struct kctl
*)kctlref
;
485 so
= kcb_find_socket(kctl
, unit
);
490 if (sbspace(&so
->so_rcv
) < m
->m_pkthdr
.len
) {
494 if ((flags
& CTL_DATA_EOR
))
496 if (sbappend(&so
->so_rcv
, m
) && (flags
& CTL_DATA_NOWAKEUP
) == 0)
499 socket_unlock(so
, 1);
504 ctl_enqueuedata(void *kctlref
, u_int32_t unit
, void *data
, size_t len
, u_int32_t flags
)
509 struct kctl
*kctl
= (struct kctl
*)kctlref
;
510 unsigned int num_needed
;
517 so
= kcb_find_socket(kctl
, unit
);
521 if (sbspace(&so
->so_rcv
) < (int)len
) {
527 m
= m_allocpacket_internal(&num_needed
, len
, NULL
, M_NOWAIT
, 1, 0);
529 printf("ctl_enqueuedata: m_allocpacket_internal(%lu) failed\n", len
);
534 for (n
= m
; n
!= NULL
; n
= n
->m_next
) {
535 size_t mlen
= mbuf_maxlen(n
);
537 if (mlen
+ curlen
> len
)
540 bcopy((char *)data
+ curlen
, n
->m_data
, mlen
);
543 mbuf_pkthdr_setlen(m
, curlen
);
545 if ((flags
& CTL_DATA_EOR
))
547 if (sbappend(&so
->so_rcv
, m
) && (flags
& CTL_DATA_NOWAKEUP
) == 0)
550 socket_unlock(so
, 1);
556 ctl_getenqueuespace(kern_ctl_ref kctlref
, u_int32_t unit
, size_t *space
)
558 struct kctl
*kctl
= (struct kctl
*)kctlref
;
562 if (kctlref
== NULL
|| space
== NULL
)
565 so
= kcb_find_socket(kctl
, unit
);
569 avail
= sbspace(&so
->so_rcv
);
570 *space
= (avail
< 0) ? 0 : avail
;
571 socket_unlock(so
, 1);
577 ctl_ctloutput(struct socket
*so
, struct sockopt
*sopt
)
579 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
585 if (sopt
->sopt_level
!= SYSPROTO_CONTROL
) {
589 if (kcb
== NULL
) /* sanity check */
592 if ((kctl
= kcb
->kctl
) == NULL
)
595 switch (sopt
->sopt_dir
) {
597 if (kctl
->setopt
== NULL
)
599 if (sopt
->sopt_valsize
== 0) {
602 MALLOC(data
, void *, sopt
->sopt_valsize
, M_TEMP
, M_WAITOK
);
605 error
= sooptcopyin(sopt
, data
, sopt
->sopt_valsize
, sopt
->sopt_valsize
);
608 socket_unlock(so
, 0);
609 error
= (*kctl
->setopt
)(kcb
->kctl
, kcb
->unit
, kcb
->userdata
, sopt
->sopt_name
,
610 data
, sopt
->sopt_valsize
);
617 if (kctl
->getopt
== NULL
)
620 if (sopt
->sopt_valsize
&& sopt
->sopt_val
) {
621 MALLOC(data
, void *, sopt
->sopt_valsize
, M_TEMP
, M_WAITOK
);
624 /* 4108337 - copy in data for get socket option */
625 error
= sooptcopyin(sopt
, data
, sopt
->sopt_valsize
, sopt
->sopt_valsize
);
627 len
= sopt
->sopt_valsize
;
628 socket_unlock(so
, 0);
629 error
= (*kctl
->getopt
)(kcb
->kctl
, kcb
->unit
, kcb
->userdata
, sopt
->sopt_name
,
631 if (data
!= NULL
&& len
> sopt
->sopt_valsize
)
632 panic_plain("ctl_ctloutput: ctl %s returned len (%lu) > sopt_valsize (%lu)\n",
633 kcb
->kctl
->name
, len
, sopt
->sopt_valsize
);
637 error
= sooptcopyout(sopt
, data
, len
);
639 sopt
->sopt_valsize
= len
;
649 ctl_ioctl(__unused
struct socket
*so
, u_long cmd
, caddr_t data
,
650 __unused
struct ifnet
*ifp
, __unused
struct proc
*p
)
655 /* get the number of controllers */
660 lck_mtx_lock(ctl_mtx
);
661 TAILQ_FOREACH(kctl
, &ctl_head
, next
)
663 lck_mtx_unlock(ctl_mtx
);
665 bcopy(&n
, data
, sizeof (n
));
670 struct ctl_info ctl_info
;
671 struct kctl
*kctl
= 0;
674 bcopy(data
, &ctl_info
, sizeof (ctl_info
));
675 name_len
= strnlen(ctl_info
.ctl_name
, MAX_KCTL_NAME
);
677 if (name_len
== 0 || name_len
+ 1 > MAX_KCTL_NAME
) {
681 lck_mtx_lock(ctl_mtx
);
682 kctl
= ctl_find_by_name(ctl_info
.ctl_name
);
683 lck_mtx_unlock(ctl_mtx
);
688 ctl_info
.ctl_id
= kctl
->id
;
689 bcopy(&ctl_info
, data
, sizeof (ctl_info
));
694 /* add controls to get list of NKEs */
702 * Register/unregister a NKE
705 ctl_register(struct kern_ctl_reg
*userkctl
, kern_ctl_ref
*kctlref
)
707 struct kctl
*kctl
= NULL
;
708 struct kctl
*kctl_next
= NULL
;
713 if (userkctl
== NULL
) /* sanity check */
715 if (userkctl
->ctl_connect
== NULL
)
717 name_len
= strlen(userkctl
->ctl_name
);
718 if (name_len
== 0 || name_len
+ 1 > MAX_KCTL_NAME
)
721 MALLOC(kctl
, struct kctl
*, sizeof(*kctl
), M_TEMP
, M_WAITOK
);
724 bzero((char *)kctl
, sizeof(*kctl
));
726 lck_mtx_lock(ctl_mtx
);
731 * CTL_FLAG_REG_ID_UNIT indicates the control ID and unit number are
732 * static. If they do not exist, add them to the list in order. If the
733 * flag is not set, we must find a new unique value. We assume the
734 * list is in order. We find the last item in the list and add one. If
735 * this leads to wrapping the id around, we start at the front of the
736 * list and look for a gap.
739 if ((userkctl
->ctl_flags
& CTL_FLAG_REG_ID_UNIT
) == 0) {
740 /* Must dynamically assign an unused ID */
742 /* Verify the same name isn't already registered */
743 if (ctl_find_by_name(userkctl
->ctl_name
) != NULL
) {
744 lck_mtx_unlock(ctl_mtx
);
749 /* Start with 1 in case the list is empty */
751 kctl_next
= TAILQ_LAST(&ctl_head
, kctl_list
);
753 if (kctl_next
!= NULL
) {
754 /* List was not empty, add one to the last item in the list */
755 id
= kctl_next
->id
+ 1;
759 * If this wrapped the id number, start looking at the front
760 * of the list for an unused id.
763 /* Find the next unused ID */
766 TAILQ_FOREACH(kctl_next
, &ctl_head
, next
) {
767 if (kctl_next
->id
> id
) {
772 id
= kctl_next
->id
+ 1;
777 userkctl
->ctl_id
= id
;
781 TAILQ_FOREACH(kctl_next
, &ctl_head
, next
) {
782 if (kctl_next
->id
> userkctl
->ctl_id
)
786 if (ctl_find_by_id_unit(userkctl
->ctl_id
, userkctl
->ctl_unit
) != NULL
) {
787 lck_mtx_unlock(ctl_mtx
);
791 kctl
->id
= userkctl
->ctl_id
;
792 kctl
->reg_unit
= userkctl
->ctl_unit
;
795 is_extended
= (userkctl
->ctl_flags
& CTL_FLAG_REG_EXTENDED
);
797 strlcpy(kctl
->name
, userkctl
->ctl_name
, MAX_KCTL_NAME
);
798 kctl
->flags
= userkctl
->ctl_flags
;
800 /* Let the caller know the default send and receive sizes */
801 if (userkctl
->ctl_sendsize
== 0)
802 userkctl
->ctl_sendsize
= CTL_SENDSIZE
;
803 kctl
->sendbufsize
= userkctl
->ctl_sendsize
;
805 if (userkctl
->ctl_recvsize
== 0)
806 userkctl
->ctl_recvsize
= CTL_RECVSIZE
;
807 kctl
->recvbufsize
= userkctl
->ctl_recvsize
;
809 kctl
->connect
= userkctl
->ctl_connect
;
810 kctl
->disconnect
= userkctl
->ctl_disconnect
;
811 kctl
->send
= userkctl
->ctl_send
;
812 kctl
->setopt
= userkctl
->ctl_setopt
;
813 kctl
->getopt
= userkctl
->ctl_getopt
;
815 kctl
->rcvd
= userkctl
->ctl_rcvd
;
818 TAILQ_INIT(&kctl
->kcb_head
);
821 TAILQ_INSERT_BEFORE(kctl_next
, kctl
, next
);
823 TAILQ_INSERT_TAIL(&ctl_head
, kctl
, next
);
825 lck_mtx_unlock(ctl_mtx
);
829 ctl_post_msg(KEV_CTL_REGISTERED
, kctl
->id
);
834 ctl_deregister(void *kctlref
)
838 if (kctlref
== NULL
) /* sanity check */
841 lck_mtx_lock(ctl_mtx
);
842 TAILQ_FOREACH(kctl
, &ctl_head
, next
) {
843 if (kctl
== (struct kctl
*)kctlref
)
846 if (kctl
!= (struct kctl
*)kctlref
) {
847 lck_mtx_unlock(ctl_mtx
);
850 if (!TAILQ_EMPTY(&kctl
->kcb_head
)) {
851 lck_mtx_unlock(ctl_mtx
);
855 TAILQ_REMOVE(&ctl_head
, kctl
, next
);
857 lck_mtx_unlock(ctl_mtx
);
859 ctl_post_msg(KEV_CTL_DEREGISTERED
, kctl
->id
);
865 * Must be called with global ctl_mtx lock taked
868 ctl_find_by_name(const char *name
)
872 TAILQ_FOREACH(kctl
, &ctl_head
, next
)
873 if (strncmp(kctl
->name
, name
, sizeof(kctl
->name
)) == 0)
880 ctl_id_by_name(const char *name
)
882 u_int32_t ctl_id
= 0;
884 lck_mtx_lock(ctl_mtx
);
885 struct kctl
*kctl
= ctl_find_by_name(name
);
886 if (kctl
) ctl_id
= kctl
->id
;
887 lck_mtx_unlock(ctl_mtx
);
900 lck_mtx_lock(ctl_mtx
);
902 TAILQ_FOREACH(kctl
, &ctl_head
, next
) {
907 if (kctl
&& kctl
->name
)
909 if (maxsize
> MAX_KCTL_NAME
)
910 maxsize
= MAX_KCTL_NAME
;
911 strlcpy(out_name
, kctl
->name
, maxsize
);
914 lck_mtx_unlock(ctl_mtx
);
916 return found
? 0 : ENOENT
;
920 * Must be called with global ctl_mtx lock taked
924 ctl_find_by_id_unit(u_int32_t id
, u_int32_t unit
)
928 TAILQ_FOREACH(kctl
, &ctl_head
, next
) {
929 if (kctl
->id
== id
&& (kctl
->flags
& CTL_FLAG_REG_ID_UNIT
) == 0)
931 else if (kctl
->id
== id
&& kctl
->reg_unit
== unit
)
938 * Must be called with kernel controller lock taken
940 static struct ctl_cb
*
941 kcb_find(struct kctl
*kctl
, u_int32_t unit
)
945 TAILQ_FOREACH(kcb
, &kctl
->kcb_head
, next
)
946 if (kcb
->unit
== unit
)
952 static struct socket
*
953 kcb_find_socket(struct kctl
*kctl
, u_int32_t unit
)
955 struct socket
*so
= NULL
;
957 lck_mtx_lock(ctl_mtx
);
958 struct ctl_cb
*kcb
= kcb_find(kctl
, unit
);
959 if (kcb
&& kcb
->kctl
== kctl
) {
965 lck_mtx_unlock(ctl_mtx
);
973 lck_mtx_lock(ctl_mtx
);
974 if (kcb
->kctl
== NULL
)
976 lck_mtx_unlock(ctl_mtx
);
977 socket_unlock(so
, 1);
979 lck_mtx_lock(ctl_mtx
);
982 if (kcb
->usecount
== 0)
983 wakeup((event_t
)&kcb
->usecount
);
984 lck_mtx_unlock(ctl_mtx
);
990 ctl_post_msg(u_int32_t event_code
, u_int32_t id
)
992 struct ctl_event_data ctl_ev_data
;
993 struct kev_msg ev_msg
;
995 lck_mtx_assert(ctl_mtx
, LCK_MTX_ASSERT_NOTOWNED
);
997 bzero(&ev_msg
, sizeof(struct kev_msg
));
998 ev_msg
.vendor_code
= KEV_VENDOR_APPLE
;
1000 ev_msg
.kev_class
= KEV_SYSTEM_CLASS
;
1001 ev_msg
.kev_subclass
= KEV_CTL_SUBCLASS
;
1002 ev_msg
.event_code
= event_code
;
1004 /* common nke subclass data */
1005 bzero(&ctl_ev_data
, sizeof(ctl_ev_data
));
1006 ctl_ev_data
.ctl_id
= id
;
1007 ev_msg
.dv
[0].data_ptr
= &ctl_ev_data
;
1008 ev_msg
.dv
[0].data_length
= sizeof(ctl_ev_data
);
1010 ev_msg
.dv
[1].data_length
= 0;
1012 kev_post_msg(&ev_msg
);
1016 ctl_lock(struct socket
*so
, int refcount
, void *lr
)
1021 lr_saved
= __builtin_return_address(0);
1025 if (so
->so_pcb
!= NULL
) {
1026 lck_mtx_lock(((struct ctl_cb
*)so
->so_pcb
)->mtx
);
1028 panic("ctl_lock: so=%p NO PCB! lr=%p lrh= %s\n",
1029 so
, lr_saved
, solockhistory_nr(so
));
1033 if (so
->so_usecount
< 0) {
1034 panic("ctl_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s\n",
1035 so
, so
->so_pcb
, lr_saved
, so
->so_usecount
, solockhistory_nr(so
));
1042 so
->lock_lr
[so
->next_lock_lr
] = lr_saved
;
1043 so
->next_lock_lr
= (so
->next_lock_lr
+1) % SO_LCKDBG_MAX
;
1048 ctl_unlock(struct socket
*so
, int refcount
, void *lr
)
1051 lck_mtx_t
*mutex_held
;
1054 lr_saved
= __builtin_return_address(0);
1058 #ifdef MORE_KCTLLOCK_DEBUG
1059 printf("ctl_unlock: so=%x sopcb=%x lock=%x ref=%x lr=%p\n",
1060 so
, so
->so_pcb
, ((struct ctl_cb
*)so
->so_pcb
)->mtx
,
1061 so
->so_usecount
, lr_saved
);
1066 if (so
->so_usecount
< 0) {
1067 panic("ctl_unlock: so=%p usecount=%x lrh= %s\n",
1068 so
, so
->so_usecount
, solockhistory_nr(so
));
1071 if (so
->so_pcb
== NULL
) {
1072 panic("ctl_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s\n",
1073 so
, so
->so_usecount
, (void *)lr_saved
, solockhistory_nr(so
));
1076 mutex_held
= ((struct ctl_cb
*)so
->so_pcb
)->mtx
;
1078 lck_mtx_assert(mutex_held
, LCK_MTX_ASSERT_OWNED
);
1079 so
->unlock_lr
[so
->next_unlock_lr
] = lr_saved
;
1080 so
->next_unlock_lr
= (so
->next_unlock_lr
+1) % SO_LCKDBG_MAX
;
1081 lck_mtx_unlock(mutex_held
);
1083 if (so
->so_usecount
== 0)
1084 ctl_sofreelastref(so
);
1090 ctl_getlock(struct socket
*so
, __unused
int locktype
)
1092 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
1095 if (so
->so_usecount
< 0)
1096 panic("ctl_getlock: so=%p usecount=%x lrh= %s\n",
1097 so
, so
->so_usecount
, solockhistory_nr(so
));
1100 panic("ctl_getlock: so=%p NULL NO so_pcb %s\n",
1101 so
, solockhistory_nr(so
));
1102 return (so
->so_proto
->pr_domain
->dom_mtx
);