2 * Copyright (c) 1999-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
32 * Kernel Control domain - allows control connections to
33 * and to read/write data.
35 * Vincent Lubet, 040506
36 * Christophe Allie, 010928
37 * Justin C. Walker, 990319
40 #include <sys/types.h>
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/syslog.h>
44 #include <sys/socket.h>
45 #include <sys/socketvar.h>
46 #include <sys/protosw.h>
47 #include <sys/domain.h>
48 #include <sys/malloc.h>
50 #include <sys/sys_domain.h>
51 #include <sys/kern_event.h>
52 #include <sys/kern_control.h>
53 #include <net/if_var.h>
55 #include <mach/vm_types.h>
56 #include <mach/kmod.h>
58 #include <kern/thread.h>
61 * Definitions and vars for we support
64 #define CTL_SENDSIZE (2 * 1024) /* default buffer size */
65 #define CTL_RECVSIZE (8 * 1024) /* default buffer size */
68 * Definitions and vars for we support
71 static u_int32_t ctl_last_id
= 0;
72 static u_int32_t ctl_max
= 256;
73 static u_int32_t ctl_maxunit
= 65536;
74 static lck_grp_attr_t
*ctl_lck_grp_attr
= 0;
75 static lck_attr_t
*ctl_lck_attr
= 0;
76 static lck_grp_t
*ctl_lck_grp
= 0;
77 static lck_mtx_t
*ctl_mtx
;
80 * internal structure maintained for each register controller
87 TAILQ_ENTRY(kctl
) next
; /* controller chain */
89 /* controller information provided when registering */
90 char name
[MAX_KCTL_NAME
]; /* unique nke identifier, provided by DTS */
94 /* misc communication information */
95 u_int32_t flags
; /* support flags */
96 u_int32_t recvbufsize
; /* request more than the default buffer size */
97 u_int32_t sendbufsize
; /* request more than the default buffer size */
99 /* Dispatch functions */
100 ctl_connect_func connect
; /* Make contact */
101 ctl_disconnect_func disconnect
; /* Break contact */
102 ctl_send_func send
; /* Send data to nke */
103 ctl_setopt_func setopt
; /* set kctl configuration */
104 ctl_getopt_func getopt
; /* get kctl configuration */
106 TAILQ_HEAD(, ctl_cb
) kcb_head
;
111 TAILQ_ENTRY(ctl_cb
) next
; /* controller chain */
113 struct socket
*so
; /* controlling socket */
114 struct kctl
*kctl
; /* back pointer to controller */
119 /* all the controllers are chained */
120 TAILQ_HEAD(, kctl
) ctl_head
;
122 static int ctl_attach(struct socket
*, int, struct proc
*);
123 static int ctl_detach(struct socket
*);
124 static int ctl_sofreelastref(struct socket
*so
);
125 static int ctl_connect(struct socket
*, struct sockaddr
*, struct proc
*);
126 static int ctl_disconnect(struct socket
*);
127 static int ctl_ioctl(struct socket
*so
, u_long cmd
, caddr_t data
,
128 struct ifnet
*ifp
, struct proc
*p
);
129 static int ctl_send(struct socket
*, int, struct mbuf
*,
130 struct sockaddr
*, struct mbuf
*, struct proc
*);
131 static int ctl_ctloutput(struct socket
*, struct sockopt
*);
132 static int ctl_peeraddr(struct socket
*so
, struct sockaddr
**nam
);
134 static struct kctl
*ctl_find_by_id(u_int32_t
);
135 static struct kctl
*ctl_find_by_name(const char *);
136 static struct kctl
*ctl_find_by_id_unit(u_int32_t id
, u_int32_t unit
);
138 static struct ctl_cb
*kcb_find(struct kctl
*, u_int32_t unit
);
139 static void ctl_post_msg(u_long event_code
, u_int32_t id
);
141 static int ctl_lock(struct socket
*, int, int);
142 static int ctl_unlock(struct socket
*, int, int);
143 static lck_mtx_t
* ctl_getlock(struct socket
*, int);
145 static struct pr_usrreqs ctl_usrreqs
=
147 pru_abort_notsupp
, pru_accept_notsupp
, ctl_attach
, pru_bind_notsupp
,
148 ctl_connect
, pru_connect2_notsupp
, ctl_ioctl
, ctl_detach
,
149 ctl_disconnect
, pru_listen_notsupp
, ctl_peeraddr
,
150 pru_rcvd_notsupp
, pru_rcvoob_notsupp
, ctl_send
,
151 pru_sense_null
, pru_shutdown_notsupp
, pru_sockaddr_notsupp
,
152 sosend
, soreceive
, pru_sopoll_notsupp
155 static struct protosw kctlswk_dgram
=
157 SOCK_DGRAM
, &systemdomain
, SYSPROTO_CONTROL
,
158 PR_ATOMIC
|PR_CONNREQUIRED
|PR_PCBLOCK
,
159 NULL
, NULL
, NULL
, ctl_ctloutput
,
161 NULL
, NULL
, NULL
, NULL
, &ctl_usrreqs
,
162 ctl_lock
, ctl_unlock
, ctl_getlock
, { 0, 0 } , 0, { 0 }
165 static struct protosw kctlswk_stream
=
167 SOCK_STREAM
, &systemdomain
, SYSPROTO_CONTROL
,
168 PR_CONNREQUIRED
|PR_PCBLOCK
,
169 NULL
, NULL
, NULL
, ctl_ctloutput
,
171 NULL
, NULL
, NULL
, NULL
, &ctl_usrreqs
,
172 ctl_lock
, ctl_unlock
, ctl_getlock
, { 0, 0 } , 0, { 0 }
177 * Install the protosw's for the Kernel Control manager.
179 __private_extern__
int
180 kern_control_init(void)
184 ctl_lck_grp_attr
= lck_grp_attr_alloc_init();
185 if (ctl_lck_grp_attr
== 0) {
186 printf(": lck_grp_attr_alloc_init failed\n");
190 lck_grp_attr_setdefault(ctl_lck_grp_attr
);
192 ctl_lck_grp
= lck_grp_alloc_init("Kernel Control Protocol", ctl_lck_grp_attr
);
193 if (ctl_lck_grp
== 0) {
194 printf("kern_control_init: lck_grp_alloc_init failed\n");
199 ctl_lck_attr
= lck_attr_alloc_init();
200 if (ctl_lck_attr
== 0) {
201 printf("kern_control_init: lck_attr_alloc_init failed\n");
205 lck_attr_setdefault(ctl_lck_attr
);
207 ctl_mtx
= lck_mtx_alloc_init(ctl_lck_grp
, ctl_lck_attr
);
209 printf("kern_control_init: lck_mtx_alloc_init failed\n");
213 TAILQ_INIT(&ctl_head
);
215 error
= net_add_proto(&kctlswk_dgram
, &systemdomain
);
217 log(LOG_WARNING
, "kern_control_init: net_add_proto dgram failed (%d)\n", error
);
219 error
= net_add_proto(&kctlswk_stream
, &systemdomain
);
221 log(LOG_WARNING
, "kern_control_init: net_add_proto stream failed (%d)\n", error
);
227 lck_mtx_free(ctl_mtx
, ctl_lck_grp
);
231 lck_grp_free(ctl_lck_grp
);
234 if (ctl_lck_grp_attr
) {
235 lck_grp_attr_free(ctl_lck_grp_attr
);
236 ctl_lck_grp_attr
= 0;
239 lck_attr_free(ctl_lck_attr
);
247 kcb_delete(struct ctl_cb
*kcb
)
251 lck_mtx_free(kcb
->mtx
, ctl_lck_grp
);
258 * Kernel Controller user-request functions
259 * attach function must exist and succeed
260 * detach not necessary
261 * we need a pcb for the per socket mutex
264 ctl_attach(__unused
struct socket
*so
, __unused
int proto
, __unused
struct proc
*p
)
267 struct ctl_cb
*kcb
= 0;
269 MALLOC(kcb
, struct ctl_cb
*, sizeof(struct ctl_cb
), M_TEMP
, M_WAITOK
);
274 bzero(kcb
, sizeof(struct ctl_cb
));
276 kcb
->mtx
= lck_mtx_alloc_init(ctl_lck_grp
, ctl_lck_attr
);
277 if (kcb
->mtx
== NULL
) {
282 so
->so_pcb
= (caddr_t
)kcb
;
293 ctl_sofreelastref(struct socket
*so
)
295 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
301 if ((kctl
= kcb
->kctl
) != 0) {
302 lck_mtx_lock(ctl_mtx
);
303 TAILQ_REMOVE(&kctl
->kcb_head
, kcb
, next
);
304 lck_mtx_lock(ctl_mtx
);
312 ctl_detach(struct socket
*so
)
314 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
319 soisdisconnected(so
);
320 so
->so_flags
|= SOF_PCBCLEARING
;
326 ctl_connect(struct socket
*so
, struct sockaddr
*nam
, __unused
struct proc
*p
)
330 struct sockaddr_ctl sa
;
331 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
334 panic("ctl_connect so_pcb null\n");
336 if (nam
->sa_len
!= sizeof(struct sockaddr_ctl
))
339 bcopy(nam
, &sa
, sizeof(struct sockaddr_ctl
));
341 lck_mtx_lock(ctl_mtx
);
342 kctl
= ctl_find_by_id_unit(sa
.sc_id
, sa
.sc_unit
);
344 lck_mtx_unlock(ctl_mtx
);
348 if (((kctl
->flags
& CTL_FLAG_REG_SOCK_STREAM
) && (so
->so_type
!= SOCK_STREAM
)) ||
349 (!(kctl
->flags
& CTL_FLAG_REG_SOCK_STREAM
) && (so
->so_type
!= SOCK_DGRAM
))) {
350 lck_mtx_unlock(ctl_mtx
);
354 if (kctl
->flags
& CTL_FLAG_PRIVILEGED
) {
356 lck_mtx_unlock(ctl_mtx
);
359 if ((error
= proc_suser(p
))) {
360 lck_mtx_unlock(ctl_mtx
);
365 if ((kctl
->flags
& CTL_FLAG_REG_ID_UNIT
) || sa
.sc_unit
!= 0) {
366 if (kcb_find(kctl
, sa
.sc_unit
) != NULL
) {
367 lck_mtx_unlock(ctl_mtx
);
371 u_int32_t unit
= kctl
->lastunit
+ 1;
374 if (unit
== ctl_maxunit
)
376 if (kcb_find(kctl
, unit
) == NULL
) {
377 kctl
->lastunit
= sa
.sc_unit
= unit
;
380 if (unit
++ == kctl
->lastunit
) {
381 lck_mtx_unlock(ctl_mtx
);
387 kcb
->unit
= sa
.sc_unit
;
389 TAILQ_INSERT_TAIL(&kctl
->kcb_head
, kcb
, next
);
390 lck_mtx_unlock(ctl_mtx
);
392 error
= soreserve(so
, kctl
->sendbufsize
, kctl
->recvbufsize
);
397 socket_unlock(so
, 0);
398 error
= (*kctl
->connect
)(kctl
, &sa
, &kcb
->userdata
);
407 soisdisconnected(so
);
408 lck_mtx_lock(ctl_mtx
);
411 TAILQ_REMOVE(&kctl
->kcb_head
, kcb
, next
);
412 lck_mtx_unlock(ctl_mtx
);
418 ctl_disconnect(struct socket
*so
)
420 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
422 if ((kcb
= (struct ctl_cb
*)so
->so_pcb
)) {
423 struct kctl
*kctl
= kcb
->kctl
;
425 if (kctl
&& kctl
->disconnect
) {
426 socket_unlock(so
, 0);
427 (*kctl
->disconnect
)(kctl
, kcb
->unit
, kcb
->userdata
);
430 lck_mtx_lock(ctl_mtx
);
433 TAILQ_REMOVE(&kctl
->kcb_head
, kcb
, next
);
434 soisdisconnected(so
);
435 lck_mtx_unlock(ctl_mtx
);
441 ctl_peeraddr(struct socket
*so
, struct sockaddr
**nam
)
443 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
445 struct sockaddr_ctl sc
;
447 if (kcb
== NULL
) /* sanity check */
450 if ((kctl
= kcb
->kctl
) == NULL
)
453 bzero(&sc
, sizeof(struct sockaddr_ctl
));
454 sc
.sc_len
= sizeof(struct sockaddr_ctl
);
455 sc
.sc_family
= AF_SYSTEM
;
456 sc
.ss_sysaddr
= AF_SYS_CONTROL
;
458 sc
.sc_unit
= kcb
->unit
;
460 *nam
= dup_sockaddr((struct sockaddr
*)&sc
, 1);
466 ctl_send(struct socket
*so
, int flags
, struct mbuf
*m
,
467 __unused
struct sockaddr
*addr
, __unused
struct mbuf
*control
,
468 __unused
struct proc
*p
)
471 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
474 if (kcb
== NULL
) /* sanity check */
477 if ((kctl
= kcb
->kctl
) == NULL
)
481 socket_unlock(so
, 0);
482 error
= (*kctl
->send
)(kctl
, kcb
->unit
, kcb
->userdata
, m
, flags
);
489 ctl_enqueuembuf(void *kctlref
, u_int32_t unit
, struct mbuf
*m
, u_int32_t flags
)
494 struct kctl
*kctl
= (struct kctl
*)kctlref
;
499 kcb
= kcb_find(kctl
, unit
);
503 so
= (struct socket
*)kcb
->so
;
508 if (sbspace(&so
->so_rcv
) < m
->m_pkthdr
.len
) {
512 if ((flags
& CTL_DATA_EOR
))
514 if (sbappend(&so
->so_rcv
, m
) && (flags
& CTL_DATA_NOWAKEUP
) == 0)
517 socket_unlock(so
, 1);
522 ctl_enqueuedata(void *kctlref
, u_int32_t unit
, void *data
, size_t len
, u_int32_t flags
)
528 struct kctl
*kctl
= (struct kctl
*)kctlref
;
529 unsigned int num_needed
;
536 kcb
= kcb_find(kctl
, unit
);
540 so
= (struct socket
*)kcb
->so
;
545 if ((size_t)sbspace(&so
->so_rcv
) < len
) {
551 m
= m_allocpacket_internal(&num_needed
, len
, NULL
, M_NOWAIT
, 1, 0);
553 printf("ctl_enqueuedata: m_allocpacket_internal(%lu) failed\n", len
);
558 for (n
= m
; n
!= NULL
; n
= n
->m_next
) {
559 size_t mlen
= mbuf_maxlen(n
);
561 if (mlen
+ curlen
> len
)
564 bcopy((char *)data
+ curlen
, n
->m_data
, mlen
);
567 mbuf_pkthdr_setlen(m
, curlen
);
569 if ((flags
& CTL_DATA_EOR
))
571 if (sbappend(&so
->so_rcv
, m
) && (flags
& CTL_DATA_NOWAKEUP
) == 0)
574 socket_unlock(so
, 1);
580 ctl_getenqueuespace(kern_ctl_ref kctlref
, u_int32_t unit
, size_t *space
)
583 struct kctl
*kctl
= (struct kctl
*)kctlref
;
586 if (kctlref
== NULL
|| space
== NULL
)
589 kcb
= kcb_find(kctl
, unit
);
593 so
= (struct socket
*)kcb
->so
;
598 *space
= sbspace(&so
->so_rcv
);
599 socket_unlock(so
, 1);
605 ctl_ctloutput(struct socket
*so
, struct sockopt
*sopt
)
607 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
613 if (sopt
->sopt_level
!= SYSPROTO_CONTROL
) {
617 if (kcb
== NULL
) /* sanity check */
620 if ((kctl
= kcb
->kctl
) == NULL
)
623 switch (sopt
->sopt_dir
) {
625 if (kctl
->setopt
== NULL
)
627 MALLOC(data
, void *, sopt
->sopt_valsize
, M_TEMP
, M_WAITOK
);
630 error
= sooptcopyin(sopt
, data
, sopt
->sopt_valsize
, sopt
->sopt_valsize
);
632 socket_unlock(so
, 0);
633 error
= (*kctl
->setopt
)(kcb
->kctl
, kcb
->unit
, kcb
->userdata
, sopt
->sopt_name
,
634 data
, sopt
->sopt_valsize
);
641 if (kctl
->getopt
== NULL
)
644 if (sopt
->sopt_valsize
&& sopt
->sopt_val
) {
645 MALLOC(data
, void *, sopt
->sopt_valsize
, M_TEMP
, M_WAITOK
);
648 /* 4108337 - copy in data for get socket option */
649 error
= sooptcopyin(sopt
, data
, sopt
->sopt_valsize
, sopt
->sopt_valsize
);
651 len
= sopt
->sopt_valsize
;
652 socket_unlock(so
, 0);
653 error
= (*kctl
->getopt
)(kcb
->kctl
, kcb
->unit
, kcb
->userdata
, sopt
->sopt_name
,
658 error
= sooptcopyout(sopt
, data
, len
);
660 sopt
->sopt_valsize
= len
;
670 ctl_ioctl(__unused
struct socket
*so
, u_long cmd
, caddr_t data
,
671 __unused
struct ifnet
*ifp
, __unused
struct proc
*p
)
676 /* get the number of controllers */
681 lck_mtx_lock(ctl_mtx
);
682 TAILQ_FOREACH(kctl
, &ctl_head
, next
)
684 lck_mtx_unlock(ctl_mtx
);
686 *(u_int32_t
*)data
= n
;
691 struct ctl_info
*ctl_info
= (struct ctl_info
*)data
;
692 struct kctl
*kctl
= 0;
693 size_t name_len
= strlen(ctl_info
->ctl_name
);
695 if (name_len
== 0 || name_len
+ 1 > MAX_KCTL_NAME
) {
699 lck_mtx_lock(ctl_mtx
);
700 kctl
= ctl_find_by_name(ctl_info
->ctl_name
);
701 lck_mtx_unlock(ctl_mtx
);
706 ctl_info
->ctl_id
= kctl
->id
;
711 /* add controls to get list of NKEs */
719 * Register/unregister a NKE
722 ctl_register(struct kern_ctl_reg
*userkctl
, kern_ctl_ref
*kctlref
)
724 struct kctl
*kctl
= 0;
729 if (userkctl
== NULL
) /* sanity check */
731 if (userkctl
->ctl_connect
== NULL
)
733 name_len
= strlen(userkctl
->ctl_name
);
734 if (name_len
== 0 || name_len
+ 1 > MAX_KCTL_NAME
)
737 MALLOC(kctl
, struct kctl
*, sizeof(*kctl
), M_TEMP
, M_WAITOK
);
740 bzero((char *)kctl
, sizeof(*kctl
));
742 lck_mtx_lock(ctl_mtx
);
744 if ((userkctl
->ctl_flags
& CTL_FLAG_REG_ID_UNIT
) == 0) {
745 if (ctl_find_by_name(userkctl
->ctl_name
) != NULL
) {
746 lck_mtx_unlock(ctl_mtx
);
750 for (n
= 0, id
= ctl_last_id
+ 1; n
< ctl_max
; id
++, n
++) {
755 if (ctl_find_by_id(id
) == 0)
759 lck_mtx_unlock(ctl_mtx
);
763 userkctl
->ctl_id
=id
;
767 if (ctl_find_by_id_unit(userkctl
->ctl_id
, userkctl
->ctl_unit
) != NULL
) {
768 lck_mtx_unlock(ctl_mtx
);
772 kctl
->id
= userkctl
->ctl_id
;
773 kctl
->reg_unit
= userkctl
->ctl_unit
;
775 strcpy(kctl
->name
, userkctl
->ctl_name
);
776 kctl
->flags
= userkctl
->ctl_flags
;
778 /* Let the caller know the default send and receive sizes */
779 if (userkctl
->ctl_sendsize
== 0)
780 userkctl
->ctl_sendsize
= CTL_SENDSIZE
;
781 kctl
->sendbufsize
= userkctl
->ctl_sendsize
;
783 if (userkctl
->ctl_recvsize
== 0)
784 userkctl
->ctl_recvsize
= CTL_RECVSIZE
;
785 kctl
->recvbufsize
= userkctl
->ctl_recvsize
;
787 kctl
->connect
= userkctl
->ctl_connect
;
788 kctl
->disconnect
= userkctl
->ctl_disconnect
;
789 kctl
->send
= userkctl
->ctl_send
;
790 kctl
->setopt
= userkctl
->ctl_setopt
;
791 kctl
->getopt
= userkctl
->ctl_getopt
;
793 TAILQ_INIT(&kctl
->kcb_head
);
795 TAILQ_INSERT_TAIL(&ctl_head
, kctl
, next
);
798 lck_mtx_unlock(ctl_mtx
);
802 ctl_post_msg(KEV_CTL_REGISTERED
, kctl
->id
);
807 ctl_deregister(void *kctlref
)
811 if (kctlref
== NULL
) /* sanity check */
814 lck_mtx_lock(ctl_mtx
);
815 TAILQ_FOREACH(kctl
, &ctl_head
, next
) {
816 if (kctl
== (struct kctl
*)kctlref
)
819 if (kctl
!= (struct kctl
*)kctlref
) {
820 lck_mtx_unlock(ctl_mtx
);
823 if (!TAILQ_EMPTY(&kctl
->kcb_head
)) {
824 lck_mtx_unlock(ctl_mtx
);
828 TAILQ_REMOVE(&ctl_head
, kctl
, next
);
831 lck_mtx_unlock(ctl_mtx
);
833 ctl_post_msg(KEV_CTL_DEREGISTERED
, kctl
->id
);
839 * Must be called with global lock taked
842 ctl_find_by_id(u_int32_t id
)
846 TAILQ_FOREACH(kctl
, &ctl_head
, next
)
854 * Must be called with global ctl_mtx lock taked
857 ctl_find_by_name(const char *name
)
861 TAILQ_FOREACH(kctl
, &ctl_head
, next
)
862 if (strcmp(kctl
->name
, name
) == 0)
869 * Must be called with global ctl_mtx lock taked
873 ctl_find_by_id_unit(u_int32_t id
, u_int32_t unit
)
877 TAILQ_FOREACH(kctl
, &ctl_head
, next
) {
878 if (kctl
->id
== id
&& (kctl
->flags
& CTL_FLAG_REG_ID_UNIT
) == 0)
880 else if (kctl
->id
== id
&& kctl
->reg_unit
== unit
)
887 * Must be called with kernel controller lock taken
889 static struct ctl_cb
*
890 kcb_find(struct kctl
*kctl
, u_int32_t unit
)
894 TAILQ_FOREACH(kcb
, &kctl
->kcb_head
, next
)
895 if ((kcb
->unit
== unit
))
902 * Must be called witout lock
905 ctl_post_msg(u_long event_code
, u_int32_t id
)
907 struct ctl_event_data ctl_ev_data
;
908 struct kev_msg ev_msg
;
910 ev_msg
.vendor_code
= KEV_VENDOR_APPLE
;
912 ev_msg
.kev_class
= KEV_SYSTEM_CLASS
;
913 ev_msg
.kev_subclass
= KEV_CTL_SUBCLASS
;
914 ev_msg
.event_code
= event_code
;
916 /* common nke subclass data */
917 bzero(&ctl_ev_data
, sizeof(ctl_ev_data
));
918 ctl_ev_data
.ctl_id
= id
;
919 ev_msg
.dv
[0].data_ptr
= &ctl_ev_data
;
920 ev_msg
.dv
[0].data_length
= sizeof(ctl_ev_data
);
922 ev_msg
.dv
[1].data_length
= 0;
924 kev_post_msg(&ev_msg
);
928 ctl_lock(struct socket
*so
, int refcount
, int lr
)
933 __asm__
volatile("mflr %0" : "=r" (lr_saved
));
939 lck_mtx_lock(((struct ctl_cb
*)so
->so_pcb
)->mtx
);
941 panic("ctl_lock: so=%x NO PCB! lr=%x\n", so
, lr_saved
);
942 lck_mtx_lock(so
->so_proto
->pr_domain
->dom_mtx
);
945 if (so
->so_usecount
< 0)
946 panic("ctl_lock: so=%x so_pcb=%x lr=%x ref=%x\n",
947 so
, so
->so_pcb
, lr_saved
, so
->so_usecount
);
951 so
->reserved3
= (void *)lr_saved
;
956 ctl_unlock(struct socket
*so
, int refcount
, int lr
)
959 lck_mtx_t
* mutex_held
;
963 __asm__
volatile("mflr %0" : "=r" (lr_saved
));
968 #ifdef MORE_KCTLLOCK_DEBUG
969 printf("ctl_unlock: so=%x sopcb=%x lock=%x ref=%x lr=%x\n",
970 so
, so
->so_pcb
, ((struct ctl_cb
*)so
->so_pcb
)->mtx
, so
->so_usecount
, lr_saved
);
975 if (so
->so_usecount
< 0)
976 panic("ctl_unlock: so=%x usecount=%x\n", so
, so
->so_usecount
);
977 if (so
->so_pcb
== NULL
) {
978 panic("ctl_unlock: so=%x NO PCB usecount=%x lr=%x\n", so
, so
->so_usecount
, lr_saved
);
979 mutex_held
= so
->so_proto
->pr_domain
->dom_mtx
;
981 mutex_held
= ((struct ctl_cb
*)so
->so_pcb
)->mtx
;
983 lck_mtx_assert(mutex_held
, LCK_MTX_ASSERT_OWNED
);
984 lck_mtx_unlock(mutex_held
);
985 so
->reserved4
= (void *)lr_saved
;
987 if (so
->so_usecount
== 0)
988 ctl_sofreelastref(so
);
994 ctl_getlock(struct socket
*so
, __unused
int locktype
)
996 struct ctl_cb
*kcb
= (struct ctl_cb
*)so
->so_pcb
;
999 if (so
->so_usecount
< 0)
1000 panic("ctl_getlock: so=%x usecount=%x\n", so
, so
->so_usecount
);
1003 panic("ctl_getlock: so=%x NULL so_pcb\n", so
);
1004 return (so
->so_proto
->pr_domain
->dom_mtx
);