2 * Copyright (c) 1999-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 * Kernel Control domain - allows control connections to
31 * and to read/write data.
33 * Vincent Lubet, 040506
34 * Christophe Allie, 010928
35 * Justin C. Walker, 990319
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/syslog.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/protosw.h>
45 #include <sys/domain.h>
46 #include <sys/malloc.h>
48 #include <sys/sys_domain.h>
49 #include <sys/kern_event.h>
50 #include <sys/kern_control.h>
51 #include <sys/kauth.h>
52 #include <net/if_var.h>
54 #include <mach/vm_types.h>
55 #include <mach/kmod.h>
57 #include <kern/thread.h>
60 * Definitions and vars for we support
63 #define CTL_SENDSIZE (2 * 1024) /* default buffer size */
64 #define CTL_RECVSIZE (8 * 1024) /* default buffer size */
67 * Definitions and vars for we support
70 static u_int32_t ctl_maxunit
= 65536;
71 static lck_grp_attr_t
*ctl_lck_grp_attr
= 0;
72 static lck_attr_t
*ctl_lck_attr
= 0;
73 static lck_grp_t
*ctl_lck_grp
= 0;
74 static lck_mtx_t
*ctl_mtx
;
77 /* all the controllers are chained */
78 TAILQ_HEAD(kctl_list
, kctl
) ctl_head
;
80 static int ctl_attach(struct socket
*, int, struct proc
*);
81 static int ctl_detach(struct socket
*);
82 static int ctl_sofreelastref(struct socket
*so
);
83 static int ctl_connect(struct socket
*, struct sockaddr
*, struct proc
*);
84 static int ctl_disconnect(struct socket
*);
85 static int ctl_ioctl(struct socket
*so
, u_long cmd
, caddr_t data
,
86 struct ifnet
*ifp
, struct proc
*p
);
87 static int ctl_send(struct socket
*, int, struct mbuf
*,
88 struct sockaddr
*, struct mbuf
*, struct proc
*);
89 static int ctl_ctloutput(struct socket
*, struct sockopt
*);
90 static int ctl_peeraddr(struct socket
*so
, struct sockaddr
**nam
);
92 static struct kctl
*ctl_find_by_name(const char *);
93 static struct kctl
*ctl_find_by_id_unit(u_int32_t id
, u_int32_t unit
);
95 static struct ctl_cb
*kcb_find(struct kctl
*, u_int32_t unit
);
96 static void ctl_post_msg(u_long event_code
, u_int32_t id
);
98 static int ctl_lock(struct socket
*, int, int);
99 static int ctl_unlock(struct socket
*, int, int);
100 static lck_mtx_t
* ctl_getlock(struct socket
*, int);
102 static struct pr_usrreqs ctl_usrreqs
=
104 pru_abort_notsupp
, pru_accept_notsupp
, ctl_attach
, pru_bind_notsupp
,
105 ctl_connect
, pru_connect2_notsupp
, ctl_ioctl
, ctl_detach
,
106 ctl_disconnect
, pru_listen_notsupp
, ctl_peeraddr
,
107 pru_rcvd_notsupp
, pru_rcvoob_notsupp
, ctl_send
,
108 pru_sense_null
, pru_shutdown_notsupp
, pru_sockaddr_notsupp
,
109 sosend
, soreceive
, pru_sopoll_notsupp
112 static struct protosw kctlswk_dgram
=
114 SOCK_DGRAM
, &systemdomain
, SYSPROTO_CONTROL
,
115 PR_ATOMIC
|PR_CONNREQUIRED
|PR_PCBLOCK
,
116 NULL
, NULL
, NULL
, ctl_ctloutput
,
118 NULL
, NULL
, NULL
, NULL
, &ctl_usrreqs
,
119 ctl_lock
, ctl_unlock
, ctl_getlock
, { 0, 0 } , 0, { 0 }
122 static struct protosw kctlswk_stream
=
124 SOCK_STREAM
, &systemdomain
, SYSPROTO_CONTROL
,
125 PR_CONNREQUIRED
|PR_PCBLOCK
,
126 NULL
, NULL
, NULL
, ctl_ctloutput
,
128 NULL
, NULL
, NULL
, NULL
, &ctl_usrreqs
,
129 ctl_lock
, ctl_unlock
, ctl_getlock
, { 0, 0 } , 0, { 0 }
134 * Install the protosw's for the Kernel Control manager.
136 __private_extern__
int
137 kern_control_init(void)
141 ctl_lck_grp_attr
= lck_grp_attr_alloc_init();
142 if (ctl_lck_grp_attr
== 0) {
143 printf(": lck_grp_attr_alloc_init failed\n");
148 ctl_lck_grp
= lck_grp_alloc_init("Kernel Control Protocol", ctl_lck_grp_attr
);
149 if (ctl_lck_grp
== 0) {
150 printf("kern_control_init: lck_grp_alloc_init failed\n");
155 ctl_lck_attr
= lck_attr_alloc_init();
156 if (ctl_lck_attr
== 0) {
157 printf("kern_control_init: lck_attr_alloc_init failed\n");
162 ctl_mtx
= lck_mtx_alloc_init(ctl_lck_grp
, ctl_lck_attr
);
164 printf("kern_control_init: lck_mtx_alloc_init failed\n");
168 TAILQ_INIT(&ctl_head
);
170 error
= net_add_proto(&kctlswk_dgram
, &systemdomain
);
172 log(LOG_WARNING
, "kern_control_init: net_add_proto dgram failed (%d)\n", error
);
174 error
= net_add_proto(&kctlswk_stream
, &systemdomain
);
176 log(LOG_WARNING
, "kern_control_init: net_add_proto stream failed (%d)\n", error
);
182 lck_mtx_free(ctl_mtx
, ctl_lck_grp
);
186 lck_grp_free(ctl_lck_grp
);
189 if (ctl_lck_grp_attr
) {
190 lck_grp_attr_free(ctl_lck_grp_attr
);
191 ctl_lck_grp_attr
= 0;
194 lck_attr_free(ctl_lck_attr
);
202 kcb_delete(struct ctl_cb
*kcb
)
206 lck_mtx_free(kcb
->mtx
, ctl_lck_grp
);
213 * Kernel Controller user-request functions
214 * attach function must exist and succeed
215 * detach not necessary
216 * we need a pcb for the per socket mutex
219 ctl_attach(__unused
struct socket
*so
, __unused
int proto
, __unused
struct proc
*p
)
222 struct ctl_cb
*kcb
= 0;
224 MALLOC(kcb
, struct ctl_cb
*, sizeof(struct ctl_cb
), M_TEMP
, M_WAITOK
);
229 bzero(kcb
, sizeof(struct ctl_cb
));
231 kcb
->mtx
= lck_mtx_alloc_init(ctl_lck_grp
, ctl_lck_attr
);
232 if (kcb
->mtx
== NULL
) {
237 so
->so_pcb
= (caddr_t
)kcb
;
248 ctl_sofreelastref(struct socket
*so
)
250 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
256 if ((kctl
= kcb
->kctl
) != 0) {
257 lck_mtx_lock(ctl_mtx
);
258 TAILQ_REMOVE(&kctl
->kcb_head
, kcb
, next
);
259 lck_mtx_lock(ctl_mtx
);
263 sofreelastref(so
, 1);
268 ctl_detach(struct socket
*so
)
270 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
275 soisdisconnected(so
);
276 so
->so_flags
|= SOF_PCBCLEARING
;
282 ctl_connect(struct socket
*so
, struct sockaddr
*nam
, __unused
struct proc
*p
)
286 struct sockaddr_ctl sa
;
287 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
288 struct ctl_cb
*kcb_next
= NULL
;
291 panic("ctl_connect so_pcb null\n");
293 if (nam
->sa_len
!= sizeof(struct sockaddr_ctl
))
296 bcopy(nam
, &sa
, sizeof(struct sockaddr_ctl
));
298 lck_mtx_lock(ctl_mtx
);
299 kctl
= ctl_find_by_id_unit(sa
.sc_id
, sa
.sc_unit
);
301 lck_mtx_unlock(ctl_mtx
);
305 if (((kctl
->flags
& CTL_FLAG_REG_SOCK_STREAM
) && (so
->so_type
!= SOCK_STREAM
)) ||
306 (!(kctl
->flags
& CTL_FLAG_REG_SOCK_STREAM
) && (so
->so_type
!= SOCK_DGRAM
))) {
307 lck_mtx_unlock(ctl_mtx
);
311 if (kctl
->flags
& CTL_FLAG_PRIVILEGED
) {
313 lck_mtx_unlock(ctl_mtx
);
316 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
317 lck_mtx_unlock(ctl_mtx
);
322 if ((kctl
->flags
& CTL_FLAG_REG_ID_UNIT
) || sa
.sc_unit
!= 0) {
323 if (kcb_find(kctl
, sa
.sc_unit
) != NULL
) {
324 lck_mtx_unlock(ctl_mtx
);
328 /* Find an unused ID, assumes control IDs are listed in order */
331 TAILQ_FOREACH(kcb_next
, &kctl
->kcb_head
, next
) {
332 if (kcb_next
->unit
> unit
) {
333 /* Found a gap, lets fill it in */
336 unit
= kcb_next
->unit
+ 1;
337 if (unit
== ctl_maxunit
)
341 if (unit
== ctl_maxunit
) {
342 lck_mtx_unlock(ctl_mtx
);
349 kcb
->unit
= sa
.sc_unit
;
351 if (kcb_next
!= NULL
) {
352 TAILQ_INSERT_BEFORE(kcb_next
, kcb
, next
);
355 TAILQ_INSERT_TAIL(&kctl
->kcb_head
, kcb
, next
);
357 lck_mtx_unlock(ctl_mtx
);
359 error
= soreserve(so
, kctl
->sendbufsize
, kctl
->recvbufsize
);
364 socket_unlock(so
, 0);
365 error
= (*kctl
->connect
)(kctl
, &sa
, &kcb
->userdata
);
374 soisdisconnected(so
);
375 lck_mtx_lock(ctl_mtx
);
378 TAILQ_REMOVE(&kctl
->kcb_head
, kcb
, next
);
379 lck_mtx_unlock(ctl_mtx
);
385 ctl_disconnect(struct socket
*so
)
387 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
389 if ((kcb
= (struct ctl_cb
*)so
->so_pcb
)) {
390 struct kctl
*kctl
= kcb
->kctl
;
392 if (kctl
&& kctl
->disconnect
) {
393 socket_unlock(so
, 0);
394 (*kctl
->disconnect
)(kctl
, kcb
->unit
, kcb
->userdata
);
397 lck_mtx_lock(ctl_mtx
);
400 TAILQ_REMOVE(&kctl
->kcb_head
, kcb
, next
);
401 soisdisconnected(so
);
402 lck_mtx_unlock(ctl_mtx
);
408 ctl_peeraddr(struct socket
*so
, struct sockaddr
**nam
)
410 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
412 struct sockaddr_ctl sc
;
414 if (kcb
== NULL
) /* sanity check */
417 if ((kctl
= kcb
->kctl
) == NULL
)
420 bzero(&sc
, sizeof(struct sockaddr_ctl
));
421 sc
.sc_len
= sizeof(struct sockaddr_ctl
);
422 sc
.sc_family
= AF_SYSTEM
;
423 sc
.ss_sysaddr
= AF_SYS_CONTROL
;
425 sc
.sc_unit
= kcb
->unit
;
427 *nam
= dup_sockaddr((struct sockaddr
*)&sc
, 1);
433 ctl_send(struct socket
*so
, int flags
, struct mbuf
*m
,
434 __unused
struct sockaddr
*addr
, __unused
struct mbuf
*control
,
435 __unused
struct proc
*p
)
438 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
441 if (kcb
== NULL
) /* sanity check */
444 if ((kctl
= kcb
->kctl
) == NULL
)
448 socket_unlock(so
, 0);
449 error
= (*kctl
->send
)(kctl
, kcb
->unit
, kcb
->userdata
, m
, flags
);
456 ctl_enqueuembuf(void *kctlref
, u_int32_t unit
, struct mbuf
*m
, u_int32_t flags
)
461 struct kctl
*kctl
= (struct kctl
*)kctlref
;
466 kcb
= kcb_find(kctl
, unit
);
470 so
= (struct socket
*)kcb
->so
;
475 if (sbspace(&so
->so_rcv
) < m
->m_pkthdr
.len
) {
479 if ((flags
& CTL_DATA_EOR
))
481 if (sbappend(&so
->so_rcv
, m
) && (flags
& CTL_DATA_NOWAKEUP
) == 0)
484 socket_unlock(so
, 1);
489 ctl_enqueuedata(void *kctlref
, u_int32_t unit
, void *data
, size_t len
, u_int32_t flags
)
495 struct kctl
*kctl
= (struct kctl
*)kctlref
;
496 unsigned int num_needed
;
503 kcb
= kcb_find(kctl
, unit
);
507 so
= (struct socket
*)kcb
->so
;
512 if (sbspace(&so
->so_rcv
) < (long)len
) {
518 m
= m_allocpacket_internal(&num_needed
, len
, NULL
, M_NOWAIT
, 1, 0);
520 printf("ctl_enqueuedata: m_allocpacket_internal(%lu) failed\n", len
);
525 for (n
= m
; n
!= NULL
; n
= n
->m_next
) {
526 size_t mlen
= mbuf_maxlen(n
);
528 if (mlen
+ curlen
> len
)
531 bcopy((char *)data
+ curlen
, n
->m_data
, mlen
);
534 mbuf_pkthdr_setlen(m
, curlen
);
536 if ((flags
& CTL_DATA_EOR
))
538 if (sbappend(&so
->so_rcv
, m
) && (flags
& CTL_DATA_NOWAKEUP
) == 0)
541 socket_unlock(so
, 1);
547 ctl_getenqueuespace(kern_ctl_ref kctlref
, u_int32_t unit
, size_t *space
)
550 struct kctl
*kctl
= (struct kctl
*)kctlref
;
554 if (kctlref
== NULL
|| space
== NULL
)
557 kcb
= kcb_find(kctl
, unit
);
561 so
= (struct socket
*)kcb
->so
;
566 avail
= sbspace(&so
->so_rcv
);
567 *space
= (avail
< 0) ? 0 : avail
;
568 socket_unlock(so
, 1);
574 ctl_ctloutput(struct socket
*so
, struct sockopt
*sopt
)
576 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
582 if (sopt
->sopt_level
!= SYSPROTO_CONTROL
) {
586 if (kcb
== NULL
) /* sanity check */
589 if ((kctl
= kcb
->kctl
) == NULL
)
592 switch (sopt
->sopt_dir
) {
594 if (kctl
->setopt
== NULL
)
596 if (sopt
->sopt_valsize
== 0) {
599 MALLOC(data
, void *, sopt
->sopt_valsize
, M_TEMP
, M_WAITOK
);
602 error
= sooptcopyin(sopt
, data
, sopt
->sopt_valsize
, sopt
->sopt_valsize
);
605 socket_unlock(so
, 0);
606 error
= (*kctl
->setopt
)(kcb
->kctl
, kcb
->unit
, kcb
->userdata
, sopt
->sopt_name
,
607 data
, sopt
->sopt_valsize
);
614 if (kctl
->getopt
== NULL
)
617 if (sopt
->sopt_valsize
&& sopt
->sopt_val
) {
618 MALLOC(data
, void *, sopt
->sopt_valsize
, M_TEMP
, M_WAITOK
);
621 /* 4108337 - copy in data for get socket option */
622 error
= sooptcopyin(sopt
, data
, sopt
->sopt_valsize
, sopt
->sopt_valsize
);
624 len
= sopt
->sopt_valsize
;
625 socket_unlock(so
, 0);
626 error
= (*kctl
->getopt
)(kcb
->kctl
, kcb
->unit
, kcb
->userdata
, sopt
->sopt_name
,
631 error
= sooptcopyout(sopt
, data
, len
);
633 sopt
->sopt_valsize
= len
;
643 ctl_ioctl(__unused
struct socket
*so
, u_long cmd
, caddr_t data
,
644 __unused
struct ifnet
*ifp
, __unused
struct proc
*p
)
649 /* get the number of controllers */
654 lck_mtx_lock(ctl_mtx
);
655 TAILQ_FOREACH(kctl
, &ctl_head
, next
)
657 lck_mtx_unlock(ctl_mtx
);
659 *(u_int32_t
*)data
= n
;
664 struct ctl_info
*ctl_info
= (struct ctl_info
*)data
;
665 struct kctl
*kctl
= 0;
666 size_t name_len
= strlen(ctl_info
->ctl_name
);
668 if (name_len
== 0 || name_len
+ 1 > MAX_KCTL_NAME
) {
672 lck_mtx_lock(ctl_mtx
);
673 kctl
= ctl_find_by_name(ctl_info
->ctl_name
);
674 lck_mtx_unlock(ctl_mtx
);
679 ctl_info
->ctl_id
= kctl
->id
;
684 /* add controls to get list of NKEs */
692 * Register/unregister a NKE
695 ctl_register(struct kern_ctl_reg
*userkctl
, kern_ctl_ref
*kctlref
)
697 struct kctl
*kctl
= NULL
;
698 struct kctl
*kctl_next
= NULL
;
702 if (userkctl
== NULL
) /* sanity check */
704 if (userkctl
->ctl_connect
== NULL
)
706 name_len
= strlen(userkctl
->ctl_name
);
707 if (name_len
== 0 || name_len
+ 1 > MAX_KCTL_NAME
)
710 MALLOC(kctl
, struct kctl
*, sizeof(*kctl
), M_TEMP
, M_WAITOK
);
713 bzero((char *)kctl
, sizeof(*kctl
));
715 lck_mtx_lock(ctl_mtx
);
720 * CTL_FLAG_REG_ID_UNIT indicates the control ID and unit number are
721 * static. If they do not exist, add them to the list in order. If the
722 * flag is not set, we must find a new unique value. We assume the
723 * list is in order. We find the last item in the list and add one. If
724 * this leads to wrapping the id around, we start at the front of the
725 * list and look for a gap.
728 if ((userkctl
->ctl_flags
& CTL_FLAG_REG_ID_UNIT
) == 0) {
729 /* Must dynamically assign an unused ID */
731 /* Verify the same name isn't already registered */
732 if (ctl_find_by_name(userkctl
->ctl_name
) != NULL
) {
733 lck_mtx_unlock(ctl_mtx
);
738 /* Start with 1 in case the list is empty */
740 kctl_next
= TAILQ_LAST(&ctl_head
, kctl_list
);
742 if (kctl_next
!= NULL
) {
743 /* List was not empty, add one to the last item in the list */
744 id
= kctl_next
->id
+ 1;
748 * If this wrapped the id number, start looking at the front
749 * of the list for an unused id.
752 /* Find the next unused ID */
755 TAILQ_FOREACH(kctl_next
, &ctl_head
, next
) {
756 if (kctl_next
->id
> id
) {
761 id
= kctl_next
->id
+ 1;
766 userkctl
->ctl_id
= id
;
770 TAILQ_FOREACH(kctl_next
, &ctl_head
, next
) {
771 if (kctl_next
->id
> userkctl
->ctl_id
)
775 if (ctl_find_by_id_unit(userkctl
->ctl_id
, userkctl
->ctl_unit
) != NULL
) {
776 lck_mtx_unlock(ctl_mtx
);
780 kctl
->id
= userkctl
->ctl_id
;
781 kctl
->reg_unit
= userkctl
->ctl_unit
;
783 strlcpy(kctl
->name
, userkctl
->ctl_name
, MAX_KCTL_NAME
);
784 kctl
->flags
= userkctl
->ctl_flags
;
786 /* Let the caller know the default send and receive sizes */
787 if (userkctl
->ctl_sendsize
== 0)
788 userkctl
->ctl_sendsize
= CTL_SENDSIZE
;
789 kctl
->sendbufsize
= userkctl
->ctl_sendsize
;
791 if (userkctl
->ctl_recvsize
== 0)
792 userkctl
->ctl_recvsize
= CTL_RECVSIZE
;
793 kctl
->recvbufsize
= userkctl
->ctl_recvsize
;
795 kctl
->connect
= userkctl
->ctl_connect
;
796 kctl
->disconnect
= userkctl
->ctl_disconnect
;
797 kctl
->send
= userkctl
->ctl_send
;
798 kctl
->setopt
= userkctl
->ctl_setopt
;
799 kctl
->getopt
= userkctl
->ctl_getopt
;
801 TAILQ_INIT(&kctl
->kcb_head
);
804 TAILQ_INSERT_BEFORE(kctl_next
, kctl
, next
);
806 TAILQ_INSERT_TAIL(&ctl_head
, kctl
, next
);
808 lck_mtx_unlock(ctl_mtx
);
812 ctl_post_msg(KEV_CTL_REGISTERED
, kctl
->id
);
817 ctl_deregister(void *kctlref
)
821 if (kctlref
== NULL
) /* sanity check */
824 lck_mtx_lock(ctl_mtx
);
825 TAILQ_FOREACH(kctl
, &ctl_head
, next
) {
826 if (kctl
== (struct kctl
*)kctlref
)
829 if (kctl
!= (struct kctl
*)kctlref
) {
830 lck_mtx_unlock(ctl_mtx
);
833 if (!TAILQ_EMPTY(&kctl
->kcb_head
)) {
834 lck_mtx_unlock(ctl_mtx
);
838 TAILQ_REMOVE(&ctl_head
, kctl
, next
);
840 lck_mtx_unlock(ctl_mtx
);
842 ctl_post_msg(KEV_CTL_DEREGISTERED
, kctl
->id
);
848 * Must be called with global ctl_mtx lock taked
851 ctl_find_by_name(const char *name
)
855 TAILQ_FOREACH(kctl
, &ctl_head
, next
)
856 if (strncmp(kctl
->name
, name
, sizeof(kctl
->name
)) == 0)
863 * Must be called with global ctl_mtx lock taked
867 ctl_find_by_id_unit(u_int32_t id
, u_int32_t unit
)
871 TAILQ_FOREACH(kctl
, &ctl_head
, next
) {
872 if (kctl
->id
== id
&& (kctl
->flags
& CTL_FLAG_REG_ID_UNIT
) == 0)
874 else if (kctl
->id
== id
&& kctl
->reg_unit
== unit
)
881 * Must be called with kernel controller lock taken
883 static struct ctl_cb
*
884 kcb_find(struct kctl
*kctl
, u_int32_t unit
)
888 TAILQ_FOREACH(kcb
, &kctl
->kcb_head
, next
)
889 if ((kcb
->unit
== unit
))
896 * Must be called witout lock
899 ctl_post_msg(u_long event_code
, u_int32_t id
)
901 struct ctl_event_data ctl_ev_data
;
902 struct kev_msg ev_msg
;
904 ev_msg
.vendor_code
= KEV_VENDOR_APPLE
;
906 ev_msg
.kev_class
= KEV_SYSTEM_CLASS
;
907 ev_msg
.kev_subclass
= KEV_CTL_SUBCLASS
;
908 ev_msg
.event_code
= event_code
;
910 /* common nke subclass data */
911 bzero(&ctl_ev_data
, sizeof(ctl_ev_data
));
912 ctl_ev_data
.ctl_id
= id
;
913 ev_msg
.dv
[0].data_ptr
= &ctl_ev_data
;
914 ev_msg
.dv
[0].data_length
= sizeof(ctl_ev_data
);
916 ev_msg
.dv
[1].data_length
= 0;
918 kev_post_msg(&ev_msg
);
922 ctl_lock(struct socket
*so
, int refcount
, int lr
)
926 lr_saved
= (unsigned int) __builtin_return_address(0);
930 lck_mtx_lock(((struct ctl_cb
*)so
->so_pcb
)->mtx
);
932 panic("ctl_lock: so=%p NO PCB! lr=%x\n", so
, lr_saved
);
933 lck_mtx_lock(so
->so_proto
->pr_domain
->dom_mtx
);
936 if (so
->so_usecount
< 0)
937 panic("ctl_lock: so=%p so_pcb=%p lr=%x ref=%x\n",
938 so
, so
->so_pcb
, lr_saved
, so
->so_usecount
);
943 so
->lock_lr
[so
->next_lock_lr
] = lr_saved
;
944 so
->next_lock_lr
= (so
->next_lock_lr
+1) % SO_LCKDBG_MAX
;
949 ctl_unlock(struct socket
*so
, int refcount
, int lr
)
952 lck_mtx_t
* mutex_held
;
955 lr_saved
= (unsigned int) __builtin_return_address(0);
958 #ifdef MORE_KCTLLOCK_DEBUG
959 printf("ctl_unlock: so=%x sopcb=%x lock=%x ref=%x lr=%x\n",
960 so
, so
->so_pcb
, ((struct ctl_cb
*)so
->so_pcb
)->mtx
, so
->so_usecount
, lr_saved
);
965 if (so
->so_usecount
< 0)
966 panic("ctl_unlock: so=%p usecount=%x\n", so
, so
->so_usecount
);
967 if (so
->so_pcb
== NULL
) {
968 panic("ctl_unlock: so=%p NO PCB usecount=%x lr=%x\n", so
, so
->so_usecount
, lr_saved
);
969 mutex_held
= so
->so_proto
->pr_domain
->dom_mtx
;
971 mutex_held
= ((struct ctl_cb
*)so
->so_pcb
)->mtx
;
973 lck_mtx_assert(mutex_held
, LCK_MTX_ASSERT_OWNED
);
974 so
->unlock_lr
[so
->next_unlock_lr
] = lr_saved
;
975 so
->next_unlock_lr
= (so
->next_unlock_lr
+1) % SO_LCKDBG_MAX
;
976 lck_mtx_unlock(mutex_held
);
978 if (so
->so_usecount
== 0)
979 ctl_sofreelastref(so
);
985 ctl_getlock(struct socket
*so
, __unused
int locktype
)
987 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
990 if (so
->so_usecount
< 0)
991 panic("ctl_getlock: so=%p usecount=%x\n", so
, so
->so_usecount
);
994 panic("ctl_getlock: so=%p NULL so_pcb\n", so
);
995 return (so
->so_proto
->pr_domain
->dom_mtx
);