]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 1999-2006 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | /* | |
30 | * Kernel Control domain - allows control connections to | |
31 | * and to read/write data. | |
32 | * | |
33 | * Vincent Lubet, 040506 | |
34 | * Christophe Allie, 010928 | |
35 | * Justin C. Walker, 990319 | |
36 | */ | |
37 | ||
38 | #include <sys/types.h> | |
39 | #include <sys/param.h> | |
40 | #include <sys/systm.h> | |
41 | #include <sys/syslog.h> | |
42 | #include <sys/socket.h> | |
43 | #include <sys/socketvar.h> | |
44 | #include <sys/protosw.h> | |
45 | #include <sys/domain.h> | |
46 | #include <sys/malloc.h> | |
47 | #include <sys/mbuf.h> | |
48 | #include <sys/sys_domain.h> | |
49 | #include <sys/kern_event.h> | |
50 | #include <sys/kern_control.h> | |
51 | #include <sys/kauth.h> | |
52 | #include <net/if_var.h> | |
53 | ||
54 | #include <mach/vm_types.h> | |
55 | #include <mach/kmod.h> | |
56 | ||
57 | #include <kern/thread.h> | |
58 | ||
59 | /* | |
60 | * Definitions and vars for we support | |
61 | */ | |
62 | ||
63 | #define CTL_SENDSIZE (2 * 1024) /* default buffer size */ | |
64 | #define CTL_RECVSIZE (8 * 1024) /* default buffer size */ | |
65 | ||
66 | /* | |
67 | * Definitions and vars for we support | |
68 | */ | |
69 | ||
70 | static u_int32_t ctl_maxunit = 65536; | |
71 | static lck_grp_attr_t *ctl_lck_grp_attr = 0; | |
72 | static lck_attr_t *ctl_lck_attr = 0; | |
73 | static lck_grp_t *ctl_lck_grp = 0; | |
74 | static lck_mtx_t *ctl_mtx; | |
75 | ||
76 | ||
77 | /* all the controllers are chained */ | |
78 | TAILQ_HEAD(kctl_list, kctl) ctl_head; | |
79 | ||
80 | static int ctl_attach(struct socket *, int, struct proc *); | |
81 | static int ctl_detach(struct socket *); | |
82 | static int ctl_sofreelastref(struct socket *so); | |
83 | static int ctl_connect(struct socket *, struct sockaddr *, struct proc *); | |
84 | static int ctl_disconnect(struct socket *); | |
85 | static int ctl_ioctl(struct socket *so, u_long cmd, caddr_t data, | |
86 | struct ifnet *ifp, struct proc *p); | |
87 | static int ctl_send(struct socket *, int, struct mbuf *, | |
88 | struct sockaddr *, struct mbuf *, struct proc *); | |
89 | static int ctl_ctloutput(struct socket *, struct sockopt *); | |
90 | static int ctl_peeraddr(struct socket *so, struct sockaddr **nam); | |
91 | ||
92 | static struct kctl *ctl_find_by_name(const char *); | |
93 | static struct kctl *ctl_find_by_id_unit(u_int32_t id, u_int32_t unit); | |
94 | ||
95 | static struct ctl_cb *kcb_find(struct kctl *, u_int32_t unit); | |
96 | static void ctl_post_msg(u_long event_code, u_int32_t id); | |
97 | ||
98 | static int ctl_lock(struct socket *, int, int); | |
99 | static int ctl_unlock(struct socket *, int, int); | |
100 | static lck_mtx_t * ctl_getlock(struct socket *, int); | |
101 | ||
102 | static struct pr_usrreqs ctl_usrreqs = | |
103 | { | |
104 | pru_abort_notsupp, pru_accept_notsupp, ctl_attach, pru_bind_notsupp, | |
105 | ctl_connect, pru_connect2_notsupp, ctl_ioctl, ctl_detach, | |
106 | ctl_disconnect, pru_listen_notsupp, ctl_peeraddr, | |
107 | pru_rcvd_notsupp, pru_rcvoob_notsupp, ctl_send, | |
108 | pru_sense_null, pru_shutdown_notsupp, pru_sockaddr_notsupp, | |
109 | sosend, soreceive, pru_sopoll_notsupp | |
110 | }; | |
111 | ||
112 | static struct protosw kctlswk_dgram = | |
113 | { | |
114 | SOCK_DGRAM, &systemdomain, SYSPROTO_CONTROL, | |
115 | PR_ATOMIC|PR_CONNREQUIRED|PR_PCBLOCK, | |
116 | NULL, NULL, NULL, ctl_ctloutput, | |
117 | NULL, NULL, | |
118 | NULL, NULL, NULL, NULL, &ctl_usrreqs, | |
119 | ctl_lock, ctl_unlock, ctl_getlock, { 0, 0 } , 0, { 0 } | |
120 | }; | |
121 | ||
122 | static struct protosw kctlswk_stream = | |
123 | { | |
124 | SOCK_STREAM, &systemdomain, SYSPROTO_CONTROL, | |
125 | PR_CONNREQUIRED|PR_PCBLOCK, | |
126 | NULL, NULL, NULL, ctl_ctloutput, | |
127 | NULL, NULL, | |
128 | NULL, NULL, NULL, NULL, &ctl_usrreqs, | |
129 | ctl_lock, ctl_unlock, ctl_getlock, { 0, 0 } , 0, { 0 } | |
130 | }; | |
131 | ||
132 | ||
133 | /* | |
134 | * Install the protosw's for the Kernel Control manager. | |
135 | */ | |
136 | __private_extern__ int | |
137 | kern_control_init(void) | |
138 | { | |
139 | int error = 0; | |
140 | ||
141 | ctl_lck_grp_attr = lck_grp_attr_alloc_init(); | |
142 | if (ctl_lck_grp_attr == 0) { | |
143 | printf(": lck_grp_attr_alloc_init failed\n"); | |
144 | error = ENOMEM; | |
145 | goto done; | |
146 | } | |
147 | ||
148 | ctl_lck_grp = lck_grp_alloc_init("Kernel Control Protocol", ctl_lck_grp_attr); | |
149 | if (ctl_lck_grp == 0) { | |
150 | printf("kern_control_init: lck_grp_alloc_init failed\n"); | |
151 | error = ENOMEM; | |
152 | goto done; | |
153 | } | |
154 | ||
155 | ctl_lck_attr = lck_attr_alloc_init(); | |
156 | if (ctl_lck_attr == 0) { | |
157 | printf("kern_control_init: lck_attr_alloc_init failed\n"); | |
158 | error = ENOMEM; | |
159 | goto done; | |
160 | } | |
161 | ||
162 | ctl_mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr); | |
163 | if (ctl_mtx == 0) { | |
164 | printf("kern_control_init: lck_mtx_alloc_init failed\n"); | |
165 | error = ENOMEM; | |
166 | goto done; | |
167 | } | |
168 | TAILQ_INIT(&ctl_head); | |
169 | ||
170 | error = net_add_proto(&kctlswk_dgram, &systemdomain); | |
171 | if (error) { | |
172 | log(LOG_WARNING, "kern_control_init: net_add_proto dgram failed (%d)\n", error); | |
173 | } | |
174 | error = net_add_proto(&kctlswk_stream, &systemdomain); | |
175 | if (error) { | |
176 | log(LOG_WARNING, "kern_control_init: net_add_proto stream failed (%d)\n", error); | |
177 | } | |
178 | ||
179 | done: | |
180 | if (error != 0) { | |
181 | if (ctl_mtx) { | |
182 | lck_mtx_free(ctl_mtx, ctl_lck_grp); | |
183 | ctl_mtx = 0; | |
184 | } | |
185 | if (ctl_lck_grp) { | |
186 | lck_grp_free(ctl_lck_grp); | |
187 | ctl_lck_grp = 0; | |
188 | } | |
189 | if (ctl_lck_grp_attr) { | |
190 | lck_grp_attr_free(ctl_lck_grp_attr); | |
191 | ctl_lck_grp_attr = 0; | |
192 | } | |
193 | if (ctl_lck_attr) { | |
194 | lck_attr_free(ctl_lck_attr); | |
195 | ctl_lck_attr = 0; | |
196 | } | |
197 | } | |
198 | return error; | |
199 | } | |
200 | ||
201 | static void | |
202 | kcb_delete(struct ctl_cb *kcb) | |
203 | { | |
204 | if (kcb != 0) { | |
205 | if (kcb->mtx != 0) | |
206 | lck_mtx_free(kcb->mtx, ctl_lck_grp); | |
207 | FREE(kcb, M_TEMP); | |
208 | } | |
209 | } | |
210 | ||
211 | ||
212 | /* | |
213 | * Kernel Controller user-request functions | |
214 | * attach function must exist and succeed | |
215 | * detach not necessary | |
216 | * we need a pcb for the per socket mutex | |
217 | */ | |
218 | static int | |
219 | ctl_attach(__unused struct socket *so, __unused int proto, __unused struct proc *p) | |
220 | { | |
221 | int error = 0; | |
222 | struct ctl_cb *kcb = 0; | |
223 | ||
224 | MALLOC(kcb, struct ctl_cb *, sizeof(struct ctl_cb), M_TEMP, M_WAITOK); | |
225 | if (kcb == NULL) { | |
226 | error = ENOMEM; | |
227 | goto quit; | |
228 | } | |
229 | bzero(kcb, sizeof(struct ctl_cb)); | |
230 | ||
231 | kcb->mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr); | |
232 | if (kcb->mtx == NULL) { | |
233 | error = ENOMEM; | |
234 | goto quit; | |
235 | } | |
236 | kcb->so = so; | |
237 | so->so_pcb = (caddr_t)kcb; | |
238 | ||
239 | quit: | |
240 | if (error != 0) { | |
241 | kcb_delete(kcb); | |
242 | kcb = 0; | |
243 | } | |
244 | return error; | |
245 | } | |
246 | ||
247 | static int | |
248 | ctl_sofreelastref(struct socket *so) | |
249 | { | |
250 | struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; | |
251 | ||
252 | so->so_pcb = 0; | |
253 | ||
254 | if (kcb != 0) { | |
255 | struct kctl *kctl; | |
256 | if ((kctl = kcb->kctl) != 0) { | |
257 | lck_mtx_lock(ctl_mtx); | |
258 | TAILQ_REMOVE(&kctl->kcb_head, kcb, next); | |
259 | lck_mtx_lock(ctl_mtx); | |
260 | } | |
261 | kcb_delete(kcb); | |
262 | } | |
263 | sofreelastref(so, 1); | |
264 | return 0; | |
265 | } | |
266 | ||
267 | static int | |
268 | ctl_detach(struct socket *so) | |
269 | { | |
270 | struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; | |
271 | ||
272 | if (kcb == 0) | |
273 | return 0; | |
274 | ||
275 | soisdisconnected(so); | |
276 | so->so_flags |= SOF_PCBCLEARING; | |
277 | return 0; | |
278 | } | |
279 | ||
280 | ||
281 | static int | |
282 | ctl_connect(struct socket *so, struct sockaddr *nam, __unused struct proc *p) | |
283 | { | |
284 | struct kctl *kctl; | |
285 | int error = 0; | |
286 | struct sockaddr_ctl sa; | |
287 | struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; | |
288 | struct ctl_cb *kcb_next = NULL; | |
289 | ||
290 | if (kcb == 0) | |
291 | panic("ctl_connect so_pcb null\n"); | |
292 | ||
293 | if (nam->sa_len != sizeof(struct sockaddr_ctl)) | |
294 | return(EINVAL); | |
295 | ||
296 | bcopy(nam, &sa, sizeof(struct sockaddr_ctl)); | |
297 | ||
298 | lck_mtx_lock(ctl_mtx); | |
299 | kctl = ctl_find_by_id_unit(sa.sc_id, sa.sc_unit); | |
300 | if (kctl == NULL) { | |
301 | lck_mtx_unlock(ctl_mtx); | |
302 | return ENOENT; | |
303 | } | |
304 | ||
305 | if (((kctl->flags & CTL_FLAG_REG_SOCK_STREAM) && (so->so_type != SOCK_STREAM)) || | |
306 | (!(kctl->flags & CTL_FLAG_REG_SOCK_STREAM) && (so->so_type != SOCK_DGRAM))) { | |
307 | lck_mtx_unlock(ctl_mtx); | |
308 | return EPROTOTYPE; | |
309 | } | |
310 | ||
311 | if (kctl->flags & CTL_FLAG_PRIVILEGED) { | |
312 | if (p == 0) { | |
313 | lck_mtx_unlock(ctl_mtx); | |
314 | return(EINVAL); | |
315 | } | |
316 | if (kauth_cred_issuser(kauth_cred_get()) == 0) { | |
317 | lck_mtx_unlock(ctl_mtx); | |
318 | return EPERM; | |
319 | } | |
320 | } | |
321 | ||
322 | if ((kctl->flags & CTL_FLAG_REG_ID_UNIT) || sa.sc_unit != 0) { | |
323 | if (kcb_find(kctl, sa.sc_unit) != NULL) { | |
324 | lck_mtx_unlock(ctl_mtx); | |
325 | return EBUSY; | |
326 | } | |
327 | } else { | |
328 | /* Find an unused ID, assumes control IDs are listed in order */ | |
329 | u_int32_t unit = 1; | |
330 | ||
331 | TAILQ_FOREACH(kcb_next, &kctl->kcb_head, next) { | |
332 | if (kcb_next->unit > unit) { | |
333 | /* Found a gap, lets fill it in */ | |
334 | break; | |
335 | } | |
336 | unit = kcb_next->unit + 1; | |
337 | if (unit == ctl_maxunit) | |
338 | break; | |
339 | } | |
340 | ||
341 | if (unit == ctl_maxunit) { | |
342 | lck_mtx_unlock(ctl_mtx); | |
343 | return EBUSY; | |
344 | } | |
345 | ||
346 | sa.sc_unit = unit; | |
347 | } | |
348 | ||
349 | kcb->unit = sa.sc_unit; | |
350 | kcb->kctl = kctl; | |
351 | if (kcb_next != NULL) { | |
352 | TAILQ_INSERT_BEFORE(kcb_next, kcb, next); | |
353 | } | |
354 | else { | |
355 | TAILQ_INSERT_TAIL(&kctl->kcb_head, kcb, next); | |
356 | } | |
357 | lck_mtx_unlock(ctl_mtx); | |
358 | ||
359 | error = soreserve(so, kctl->sendbufsize, kctl->recvbufsize); | |
360 | if (error) | |
361 | goto done; | |
362 | soisconnecting(so); | |
363 | ||
364 | socket_unlock(so, 0); | |
365 | error = (*kctl->connect)(kctl, &sa, &kcb->userdata); | |
366 | socket_lock(so, 0); | |
367 | if (error) | |
368 | goto done; | |
369 | ||
370 | soisconnected(so); | |
371 | ||
372 | done: | |
373 | if (error) { | |
374 | soisdisconnected(so); | |
375 | lck_mtx_lock(ctl_mtx); | |
376 | kcb->kctl = 0; | |
377 | kcb->unit = 0; | |
378 | TAILQ_REMOVE(&kctl->kcb_head, kcb, next); | |
379 | lck_mtx_unlock(ctl_mtx); | |
380 | } | |
381 | return error; | |
382 | } | |
383 | ||
384 | static int | |
385 | ctl_disconnect(struct socket *so) | |
386 | { | |
387 | struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; | |
388 | ||
389 | if ((kcb = (struct ctl_cb *)so->so_pcb)) { | |
390 | struct kctl *kctl = kcb->kctl; | |
391 | ||
392 | if (kctl && kctl->disconnect) { | |
393 | socket_unlock(so, 0); | |
394 | (*kctl->disconnect)(kctl, kcb->unit, kcb->userdata); | |
395 | socket_lock(so, 0); | |
396 | } | |
397 | lck_mtx_lock(ctl_mtx); | |
398 | kcb->kctl = 0; | |
399 | kcb->unit = 0; | |
400 | TAILQ_REMOVE(&kctl->kcb_head, kcb, next); | |
401 | soisdisconnected(so); | |
402 | lck_mtx_unlock(ctl_mtx); | |
403 | } | |
404 | return 0; | |
405 | } | |
406 | ||
407 | static int | |
408 | ctl_peeraddr(struct socket *so, struct sockaddr **nam) | |
409 | { | |
410 | struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; | |
411 | struct kctl *kctl; | |
412 | struct sockaddr_ctl sc; | |
413 | ||
414 | if (kcb == NULL) /* sanity check */ | |
415 | return(ENOTCONN); | |
416 | ||
417 | if ((kctl = kcb->kctl) == NULL) | |
418 | return(EINVAL); | |
419 | ||
420 | bzero(&sc, sizeof(struct sockaddr_ctl)); | |
421 | sc.sc_len = sizeof(struct sockaddr_ctl); | |
422 | sc.sc_family = AF_SYSTEM; | |
423 | sc.ss_sysaddr = AF_SYS_CONTROL; | |
424 | sc.sc_id = kctl->id; | |
425 | sc.sc_unit = kcb->unit; | |
426 | ||
427 | *nam = dup_sockaddr((struct sockaddr *)&sc, 1); | |
428 | ||
429 | return 0; | |
430 | } | |
431 | ||
432 | static int | |
433 | ctl_send(struct socket *so, int flags, struct mbuf *m, | |
434 | __unused struct sockaddr *addr, __unused struct mbuf *control, | |
435 | __unused struct proc *p) | |
436 | { | |
437 | int error = 0; | |
438 | struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; | |
439 | struct kctl *kctl; | |
440 | ||
441 | if (kcb == NULL) /* sanity check */ | |
442 | return(ENOTCONN); | |
443 | ||
444 | if ((kctl = kcb->kctl) == NULL) | |
445 | return(EINVAL); | |
446 | ||
447 | if (kctl->send) { | |
448 | socket_unlock(so, 0); | |
449 | error = (*kctl->send)(kctl, kcb->unit, kcb->userdata, m, flags); | |
450 | socket_lock(so, 0); | |
451 | } | |
452 | return error; | |
453 | } | |
454 | ||
455 | errno_t | |
456 | ctl_enqueuembuf(void *kctlref, u_int32_t unit, struct mbuf *m, u_int32_t flags) | |
457 | { | |
458 | struct ctl_cb *kcb; | |
459 | struct socket *so; | |
460 | errno_t error = 0; | |
461 | struct kctl *kctl = (struct kctl *)kctlref; | |
462 | ||
463 | if (kctl == NULL) | |
464 | return EINVAL; | |
465 | ||
466 | kcb = kcb_find(kctl, unit); | |
467 | if (kcb == NULL) | |
468 | return EINVAL; | |
469 | ||
470 | so = (struct socket *)kcb->so; | |
471 | if (so == NULL) | |
472 | return EINVAL; | |
473 | ||
474 | socket_lock(so, 1); | |
475 | if (sbspace(&so->so_rcv) < m->m_pkthdr.len) { | |
476 | error = ENOBUFS; | |
477 | goto bye; | |
478 | } | |
479 | if ((flags & CTL_DATA_EOR)) | |
480 | m->m_flags |= M_EOR; | |
481 | if (sbappend(&so->so_rcv, m) && (flags & CTL_DATA_NOWAKEUP) == 0) | |
482 | sorwakeup(so); | |
483 | bye: | |
484 | socket_unlock(so, 1); | |
485 | return error; | |
486 | } | |
487 | ||
488 | errno_t | |
489 | ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len, u_int32_t flags) | |
490 | { | |
491 | struct ctl_cb *kcb; | |
492 | struct socket *so; | |
493 | struct mbuf *m; | |
494 | errno_t error = 0; | |
495 | struct kctl *kctl = (struct kctl *)kctlref; | |
496 | unsigned int num_needed; | |
497 | struct mbuf *n; | |
498 | size_t curlen = 0; | |
499 | ||
500 | if (kctlref == NULL) | |
501 | return EINVAL; | |
502 | ||
503 | kcb = kcb_find(kctl, unit); | |
504 | if (kcb == NULL) | |
505 | return EINVAL; | |
506 | ||
507 | so = (struct socket *)kcb->so; | |
508 | if (so == NULL) | |
509 | return EINVAL; | |
510 | ||
511 | socket_lock(so, 1); | |
512 | if (sbspace(&so->so_rcv) < (long)len) { | |
513 | error = ENOBUFS; | |
514 | goto bye; | |
515 | } | |
516 | ||
517 | num_needed = 1; | |
518 | m = m_allocpacket_internal(&num_needed, len, NULL, M_NOWAIT, 1, 0); | |
519 | if (m == NULL) { | |
520 | printf("ctl_enqueuedata: m_allocpacket_internal(%lu) failed\n", len); | |
521 | error = ENOBUFS; | |
522 | goto bye; | |
523 | } | |
524 | ||
525 | for (n = m; n != NULL; n = n->m_next) { | |
526 | size_t mlen = mbuf_maxlen(n); | |
527 | ||
528 | if (mlen + curlen > len) | |
529 | mlen = len - curlen; | |
530 | n->m_len = mlen; | |
531 | bcopy((char *)data + curlen, n->m_data, mlen); | |
532 | curlen += mlen; | |
533 | } | |
534 | mbuf_pkthdr_setlen(m, curlen); | |
535 | ||
536 | if ((flags & CTL_DATA_EOR)) | |
537 | m->m_flags |= M_EOR; | |
538 | if (sbappend(&so->so_rcv, m) && (flags & CTL_DATA_NOWAKEUP) == 0) | |
539 | sorwakeup(so); | |
540 | bye: | |
541 | socket_unlock(so, 1); | |
542 | return error; | |
543 | } | |
544 | ||
545 | ||
546 | errno_t | |
547 | ctl_getenqueuespace(kern_ctl_ref kctlref, u_int32_t unit, size_t *space) | |
548 | { | |
549 | struct ctl_cb *kcb; | |
550 | struct kctl *kctl = (struct kctl *)kctlref; | |
551 | struct socket *so; | |
552 | long avail; | |
553 | ||
554 | if (kctlref == NULL || space == NULL) | |
555 | return EINVAL; | |
556 | ||
557 | kcb = kcb_find(kctl, unit); | |
558 | if (kcb == NULL) | |
559 | return EINVAL; | |
560 | ||
561 | so = (struct socket *)kcb->so; | |
562 | if (so == NULL) | |
563 | return EINVAL; | |
564 | ||
565 | socket_lock(so, 1); | |
566 | avail = sbspace(&so->so_rcv); | |
567 | *space = (avail < 0) ? 0 : avail; | |
568 | socket_unlock(so, 1); | |
569 | ||
570 | return 0; | |
571 | } | |
572 | ||
573 | static int | |
574 | ctl_ctloutput(struct socket *so, struct sockopt *sopt) | |
575 | { | |
576 | struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; | |
577 | struct kctl *kctl; | |
578 | int error = 0; | |
579 | void *data; | |
580 | size_t len; | |
581 | ||
582 | if (sopt->sopt_level != SYSPROTO_CONTROL) { | |
583 | return(EINVAL); | |
584 | } | |
585 | ||
586 | if (kcb == NULL) /* sanity check */ | |
587 | return(ENOTCONN); | |
588 | ||
589 | if ((kctl = kcb->kctl) == NULL) | |
590 | return(EINVAL); | |
591 | ||
592 | switch (sopt->sopt_dir) { | |
593 | case SOPT_SET: | |
594 | if (kctl->setopt == NULL) | |
595 | return(ENOTSUP); | |
596 | if (sopt->sopt_valsize == 0) { | |
597 | data = NULL; | |
598 | } else { | |
599 | MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, M_WAITOK); | |
600 | if (data == NULL) | |
601 | return(ENOMEM); | |
602 | error = sooptcopyin(sopt, data, sopt->sopt_valsize, sopt->sopt_valsize); | |
603 | } | |
604 | if (error == 0) { | |
605 | socket_unlock(so, 0); | |
606 | error = (*kctl->setopt)(kcb->kctl, kcb->unit, kcb->userdata, sopt->sopt_name, | |
607 | data, sopt->sopt_valsize); | |
608 | socket_lock(so, 0); | |
609 | } | |
610 | FREE(data, M_TEMP); | |
611 | break; | |
612 | ||
613 | case SOPT_GET: | |
614 | if (kctl->getopt == NULL) | |
615 | return(ENOTSUP); | |
616 | data = NULL; | |
617 | if (sopt->sopt_valsize && sopt->sopt_val) { | |
618 | MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, M_WAITOK); | |
619 | if (data == NULL) | |
620 | return(ENOMEM); | |
621 | /* 4108337 - copy in data for get socket option */ | |
622 | error = sooptcopyin(sopt, data, sopt->sopt_valsize, sopt->sopt_valsize); | |
623 | } | |
624 | len = sopt->sopt_valsize; | |
625 | socket_unlock(so, 0); | |
626 | error = (*kctl->getopt)(kcb->kctl, kcb->unit, kcb->userdata, sopt->sopt_name, | |
627 | data, &len); | |
628 | socket_lock(so, 0); | |
629 | if (error == 0) { | |
630 | if (data != NULL) | |
631 | error = sooptcopyout(sopt, data, len); | |
632 | else | |
633 | sopt->sopt_valsize = len; | |
634 | } | |
635 | if (data != NULL) | |
636 | FREE(data, M_TEMP); | |
637 | break; | |
638 | } | |
639 | return error; | |
640 | } | |
641 | ||
642 | static int | |
643 | ctl_ioctl(__unused struct socket *so, u_long cmd, caddr_t data, | |
644 | __unused struct ifnet *ifp, __unused struct proc *p) | |
645 | { | |
646 | int error = ENOTSUP; | |
647 | ||
648 | switch (cmd) { | |
649 | /* get the number of controllers */ | |
650 | case CTLIOCGCOUNT: { | |
651 | struct kctl *kctl; | |
652 | int n = 0; | |
653 | ||
654 | lck_mtx_lock(ctl_mtx); | |
655 | TAILQ_FOREACH(kctl, &ctl_head, next) | |
656 | n++; | |
657 | lck_mtx_unlock(ctl_mtx); | |
658 | ||
659 | *(u_int32_t *)data = n; | |
660 | error = 0; | |
661 | break; | |
662 | } | |
663 | case CTLIOCGINFO: { | |
664 | struct ctl_info *ctl_info = (struct ctl_info *)data; | |
665 | struct kctl *kctl = 0; | |
666 | size_t name_len = strlen(ctl_info->ctl_name); | |
667 | ||
668 | if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) { | |
669 | error = EINVAL; | |
670 | break; | |
671 | } | |
672 | lck_mtx_lock(ctl_mtx); | |
673 | kctl = ctl_find_by_name(ctl_info->ctl_name); | |
674 | lck_mtx_unlock(ctl_mtx); | |
675 | if (kctl == 0) { | |
676 | error = ENOENT; | |
677 | break; | |
678 | } | |
679 | ctl_info->ctl_id = kctl->id; | |
680 | error = 0; | |
681 | break; | |
682 | } | |
683 | ||
684 | /* add controls to get list of NKEs */ | |
685 | ||
686 | } | |
687 | ||
688 | return error; | |
689 | } | |
690 | ||
691 | /* | |
692 | * Register/unregister a NKE | |
693 | */ | |
694 | errno_t | |
695 | ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref) | |
696 | { | |
697 | struct kctl *kctl = NULL; | |
698 | struct kctl *kctl_next = NULL; | |
699 | u_int32_t id = 1; | |
700 | size_t name_len; | |
701 | ||
702 | if (userkctl == NULL) /* sanity check */ | |
703 | return(EINVAL); | |
704 | if (userkctl->ctl_connect == NULL) | |
705 | return(EINVAL); | |
706 | name_len = strlen(userkctl->ctl_name); | |
707 | if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) | |
708 | return(EINVAL); | |
709 | ||
710 | MALLOC(kctl, struct kctl *, sizeof(*kctl), M_TEMP, M_WAITOK); | |
711 | if (kctl == NULL) | |
712 | return(ENOMEM); | |
713 | bzero((char *)kctl, sizeof(*kctl)); | |
714 | ||
715 | lck_mtx_lock(ctl_mtx); | |
716 | ||
717 | /* | |
718 | * Kernel Control IDs | |
719 | * | |
720 | * CTL_FLAG_REG_ID_UNIT indicates the control ID and unit number are | |
721 | * static. If they do not exist, add them to the list in order. If the | |
722 | * flag is not set, we must find a new unique value. We assume the | |
723 | * list is in order. We find the last item in the list and add one. If | |
724 | * this leads to wrapping the id around, we start at the front of the | |
725 | * list and look for a gap. | |
726 | */ | |
727 | ||
728 | if ((userkctl->ctl_flags & CTL_FLAG_REG_ID_UNIT) == 0) { | |
729 | /* Must dynamically assign an unused ID */ | |
730 | ||
731 | /* Verify the same name isn't already registered */ | |
732 | if (ctl_find_by_name(userkctl->ctl_name) != NULL) { | |
733 | lck_mtx_unlock(ctl_mtx); | |
734 | FREE(kctl, M_TEMP); | |
735 | return(EEXIST); | |
736 | } | |
737 | ||
738 | /* Start with 1 in case the list is empty */ | |
739 | id = 1; | |
740 | kctl_next = TAILQ_LAST(&ctl_head, kctl_list); | |
741 | ||
742 | if (kctl_next != NULL) { | |
743 | /* List was not empty, add one to the last item in the list */ | |
744 | id = kctl_next->id + 1; | |
745 | kctl_next = NULL; | |
746 | ||
747 | /* | |
748 | * If this wrapped the id number, start looking at the front | |
749 | * of the list for an unused id. | |
750 | */ | |
751 | if (id == 0) { | |
752 | /* Find the next unused ID */ | |
753 | id = 1; | |
754 | ||
755 | TAILQ_FOREACH(kctl_next, &ctl_head, next) { | |
756 | if (kctl_next->id > id) { | |
757 | /* We found a gap */ | |
758 | break; | |
759 | } | |
760 | ||
761 | id = kctl_next->id + 1; | |
762 | } | |
763 | } | |
764 | } | |
765 | ||
766 | userkctl->ctl_id = id; | |
767 | kctl->id = id; | |
768 | kctl->reg_unit = -1; | |
769 | } else { | |
770 | TAILQ_FOREACH(kctl_next, &ctl_head, next) { | |
771 | if (kctl_next->id > userkctl->ctl_id) | |
772 | break; | |
773 | } | |
774 | ||
775 | if (ctl_find_by_id_unit(userkctl->ctl_id, userkctl->ctl_unit) != NULL) { | |
776 | lck_mtx_unlock(ctl_mtx); | |
777 | FREE(kctl, M_TEMP); | |
778 | return(EEXIST); | |
779 | } | |
780 | kctl->id = userkctl->ctl_id; | |
781 | kctl->reg_unit = userkctl->ctl_unit; | |
782 | } | |
783 | strlcpy(kctl->name, userkctl->ctl_name, MAX_KCTL_NAME); | |
784 | kctl->flags = userkctl->ctl_flags; | |
785 | ||
786 | /* Let the caller know the default send and receive sizes */ | |
787 | if (userkctl->ctl_sendsize == 0) | |
788 | userkctl->ctl_sendsize = CTL_SENDSIZE; | |
789 | kctl->sendbufsize = userkctl->ctl_sendsize; | |
790 | ||
791 | if (userkctl->ctl_recvsize == 0) | |
792 | userkctl->ctl_recvsize = CTL_RECVSIZE; | |
793 | kctl->recvbufsize = userkctl->ctl_recvsize; | |
794 | ||
795 | kctl->connect = userkctl->ctl_connect; | |
796 | kctl->disconnect = userkctl->ctl_disconnect; | |
797 | kctl->send = userkctl->ctl_send; | |
798 | kctl->setopt = userkctl->ctl_setopt; | |
799 | kctl->getopt = userkctl->ctl_getopt; | |
800 | ||
801 | TAILQ_INIT(&kctl->kcb_head); | |
802 | ||
803 | if (kctl_next) | |
804 | TAILQ_INSERT_BEFORE(kctl_next, kctl, next); | |
805 | else | |
806 | TAILQ_INSERT_TAIL(&ctl_head, kctl, next); | |
807 | ||
808 | lck_mtx_unlock(ctl_mtx); | |
809 | ||
810 | *kctlref = kctl; | |
811 | ||
812 | ctl_post_msg(KEV_CTL_REGISTERED, kctl->id); | |
813 | return(0); | |
814 | } | |
815 | ||
816 | errno_t | |
817 | ctl_deregister(void *kctlref) | |
818 | { | |
819 | struct kctl *kctl; | |
820 | ||
821 | if (kctlref == NULL) /* sanity check */ | |
822 | return(EINVAL); | |
823 | ||
824 | lck_mtx_lock(ctl_mtx); | |
825 | TAILQ_FOREACH(kctl, &ctl_head, next) { | |
826 | if (kctl == (struct kctl *)kctlref) | |
827 | break; | |
828 | } | |
829 | if (kctl != (struct kctl *)kctlref) { | |
830 | lck_mtx_unlock(ctl_mtx); | |
831 | return EINVAL; | |
832 | } | |
833 | if (!TAILQ_EMPTY(&kctl->kcb_head)) { | |
834 | lck_mtx_unlock(ctl_mtx); | |
835 | return EBUSY; | |
836 | } | |
837 | ||
838 | TAILQ_REMOVE(&ctl_head, kctl, next); | |
839 | ||
840 | lck_mtx_unlock(ctl_mtx); | |
841 | ||
842 | ctl_post_msg(KEV_CTL_DEREGISTERED, kctl->id); | |
843 | FREE(kctl, M_TEMP); | |
844 | return(0); | |
845 | } | |
846 | ||
847 | /* | |
848 | * Must be called with global ctl_mtx lock taked | |
849 | */ | |
850 | static struct kctl * | |
851 | ctl_find_by_name(const char *name) | |
852 | { | |
853 | struct kctl *kctl; | |
854 | ||
855 | TAILQ_FOREACH(kctl, &ctl_head, next) | |
856 | if (strncmp(kctl->name, name, sizeof(kctl->name)) == 0) | |
857 | return kctl; | |
858 | ||
859 | return NULL; | |
860 | } | |
861 | ||
862 | /* | |
863 | * Must be called with global ctl_mtx lock taked | |
864 | * | |
865 | */ | |
866 | static struct kctl * | |
867 | ctl_find_by_id_unit(u_int32_t id, u_int32_t unit) | |
868 | { | |
869 | struct kctl *kctl; | |
870 | ||
871 | TAILQ_FOREACH(kctl, &ctl_head, next) { | |
872 | if (kctl->id == id && (kctl->flags & CTL_FLAG_REG_ID_UNIT) == 0) | |
873 | return kctl; | |
874 | else if (kctl->id == id && kctl->reg_unit == unit) | |
875 | return kctl; | |
876 | } | |
877 | return NULL; | |
878 | } | |
879 | ||
880 | /* | |
881 | * Must be called with kernel controller lock taken | |
882 | */ | |
883 | static struct ctl_cb * | |
884 | kcb_find(struct kctl *kctl, u_int32_t unit) | |
885 | { | |
886 | struct ctl_cb *kcb; | |
887 | ||
888 | TAILQ_FOREACH(kcb, &kctl->kcb_head, next) | |
889 | if ((kcb->unit == unit)) | |
890 | return kcb; | |
891 | ||
892 | return NULL; | |
893 | } | |
894 | ||
895 | /* | |
896 | * Must be called witout lock | |
897 | */ | |
898 | static void | |
899 | ctl_post_msg(u_long event_code, u_int32_t id) | |
900 | { | |
901 | struct ctl_event_data ctl_ev_data; | |
902 | struct kev_msg ev_msg; | |
903 | ||
904 | ev_msg.vendor_code = KEV_VENDOR_APPLE; | |
905 | ||
906 | ev_msg.kev_class = KEV_SYSTEM_CLASS; | |
907 | ev_msg.kev_subclass = KEV_CTL_SUBCLASS; | |
908 | ev_msg.event_code = event_code; | |
909 | ||
910 | /* common nke subclass data */ | |
911 | bzero(&ctl_ev_data, sizeof(ctl_ev_data)); | |
912 | ctl_ev_data.ctl_id = id; | |
913 | ev_msg.dv[0].data_ptr = &ctl_ev_data; | |
914 | ev_msg.dv[0].data_length = sizeof(ctl_ev_data); | |
915 | ||
916 | ev_msg.dv[1].data_length = 0; | |
917 | ||
918 | kev_post_msg(&ev_msg); | |
919 | } | |
920 | ||
921 | static int | |
922 | ctl_lock(struct socket *so, int refcount, int lr) | |
923 | { | |
924 | uint32_t lr_saved; | |
925 | if (lr == 0) | |
926 | lr_saved = (unsigned int) __builtin_return_address(0); | |
927 | else lr_saved = lr; | |
928 | ||
929 | if (so->so_pcb) { | |
930 | lck_mtx_lock(((struct ctl_cb *)so->so_pcb)->mtx); | |
931 | } else { | |
932 | panic("ctl_lock: so=%p NO PCB! lr=%x\n", so, lr_saved); | |
933 | lck_mtx_lock(so->so_proto->pr_domain->dom_mtx); | |
934 | } | |
935 | ||
936 | if (so->so_usecount < 0) | |
937 | panic("ctl_lock: so=%p so_pcb=%p lr=%x ref=%x\n", | |
938 | so, so->so_pcb, lr_saved, so->so_usecount); | |
939 | ||
940 | if (refcount) | |
941 | so->so_usecount++; | |
942 | ||
943 | so->lock_lr[so->next_lock_lr] = lr_saved; | |
944 | so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX; | |
945 | return (0); | |
946 | } | |
947 | ||
948 | static int | |
949 | ctl_unlock(struct socket *so, int refcount, int lr) | |
950 | { | |
951 | uint32_t lr_saved; | |
952 | lck_mtx_t * mutex_held; | |
953 | ||
954 | if (lr == 0) | |
955 | lr_saved = (unsigned int) __builtin_return_address(0); | |
956 | else lr_saved = lr; | |
957 | ||
958 | #ifdef MORE_KCTLLOCK_DEBUG | |
959 | printf("ctl_unlock: so=%x sopcb=%x lock=%x ref=%x lr=%x\n", | |
960 | so, so->so_pcb, ((struct ctl_cb *)so->so_pcb)->mtx, so->so_usecount, lr_saved); | |
961 | #endif | |
962 | if (refcount) | |
963 | so->so_usecount--; | |
964 | ||
965 | if (so->so_usecount < 0) | |
966 | panic("ctl_unlock: so=%p usecount=%x\n", so, so->so_usecount); | |
967 | if (so->so_pcb == NULL) { | |
968 | panic("ctl_unlock: so=%p NO PCB usecount=%x lr=%x\n", so, so->so_usecount, lr_saved); | |
969 | mutex_held = so->so_proto->pr_domain->dom_mtx; | |
970 | } else { | |
971 | mutex_held = ((struct ctl_cb *)so->so_pcb)->mtx; | |
972 | } | |
973 | lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED); | |
974 | so->unlock_lr[so->next_unlock_lr] = lr_saved; | |
975 | so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX; | |
976 | lck_mtx_unlock(mutex_held); | |
977 | ||
978 | if (so->so_usecount == 0) | |
979 | ctl_sofreelastref(so); | |
980 | ||
981 | return (0); | |
982 | } | |
983 | ||
984 | static lck_mtx_t * | |
985 | ctl_getlock(struct socket *so, __unused int locktype) | |
986 | { | |
987 | struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; | |
988 | ||
989 | if (so->so_pcb) { | |
990 | if (so->so_usecount < 0) | |
991 | panic("ctl_getlock: so=%p usecount=%x\n", so, so->so_usecount); | |
992 | return(kcb->mtx); | |
993 | } else { | |
994 | panic("ctl_getlock: so=%p NULL so_pcb\n", so); | |
995 | return (so->so_proto->pr_domain->dom_mtx); | |
996 | } | |
997 | } |