]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/kern_control.c
xnu-2782.40.9.tar.gz
[apple/xnu.git] / bsd / kern / kern_control.c
CommitLineData
9bccf70c 1/*
04b8595b 2 * Copyright (c) 1999-2015 Apple Inc. All rights reserved.
9bccf70c 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
fe8ab488 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
fe8ab488 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
fe8ab488 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
fe8ab488 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
9bccf70c 27 */
9bccf70c
A
28
29/*
91447636
A
30 * Kernel Control domain - allows control connections to
31 * and to read/write data.
9bccf70c 32 *
91447636 33 * Vincent Lubet, 040506
9bccf70c
A
34 * Christophe Allie, 010928
35 * Justin C. Walker, 990319
36 */
37
38#include <sys/types.h>
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/syslog.h>
42#include <sys/socket.h>
43#include <sys/socketvar.h>
44#include <sys/protosw.h>
45#include <sys/domain.h>
46#include <sys/malloc.h>
47#include <sys/mbuf.h>
9bccf70c
A
48#include <sys/sys_domain.h>
49#include <sys/kern_event.h>
50#include <sys/kern_control.h>
2d21ac55 51#include <sys/kauth.h>
fe8ab488 52#include <sys/sysctl.h>
9bccf70c
A
53#include <net/if_var.h>
54
55#include <mach/vm_types.h>
9bccf70c
A
56
57#include <kern/thread.h>
58
fe8ab488
A
59#ifndef ROUNDUP64
60#define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
61#endif
62
63#ifndef ADVANCE64
64#define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
65#endif
66
9bccf70c
A
67/*
68 * Definitions and vars for we support
69 */
70
fe8ab488
A
71#define CTL_SENDSIZE (2 * 1024) /* default buffer size */
72#define CTL_RECVSIZE (8 * 1024) /* default buffer size */
9bccf70c
A
73
74/*
91447636
A
75 * Definitions and vars for we support
76 */
9bccf70c 77
fe8ab488 78static u_int32_t ctl_maxunit = 65536;
91447636 79static lck_grp_attr_t *ctl_lck_grp_attr = 0;
fe8ab488
A
80static lck_attr_t *ctl_lck_attr = 0;
81static lck_grp_t *ctl_lck_grp = 0;
82static lck_mtx_t *ctl_mtx;
9bccf70c
A
83
84/* all the controllers are chained */
2d21ac55 85TAILQ_HEAD(kctl_list, kctl) ctl_head;
91447636 86
fe8ab488 87
91447636
A
88static int ctl_attach(struct socket *, int, struct proc *);
89static int ctl_detach(struct socket *);
90static int ctl_sofreelastref(struct socket *so);
91static int ctl_connect(struct socket *, struct sockaddr *, struct proc *);
92static int ctl_disconnect(struct socket *);
93static int ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
fe8ab488 94 struct ifnet *ifp, struct proc *p);
91447636 95static int ctl_send(struct socket *, int, struct mbuf *,
fe8ab488
A
96 struct sockaddr *, struct mbuf *, struct proc *);
97static int ctl_send_list(struct socket *, int, struct mbuf *,
98 struct sockaddr *, struct mbuf *, struct proc *);
91447636
A
99static int ctl_ctloutput(struct socket *, struct sockopt *);
100static int ctl_peeraddr(struct socket *so, struct sockaddr **nam);
39236c6e 101static int ctl_usr_rcvd(struct socket *so, int flags);
91447636 102
91447636
A
103static struct kctl *ctl_find_by_name(const char *);
104static struct kctl *ctl_find_by_id_unit(u_int32_t id, u_int32_t unit);
9bccf70c 105
6d2010ae 106static struct socket *kcb_find_socket(struct kctl *, u_int32_t unit);
91447636 107static struct ctl_cb *kcb_find(struct kctl *, u_int32_t unit);
b0d623f7 108static void ctl_post_msg(u_int32_t event_code, u_int32_t id);
9bccf70c 109
b0d623f7
A
110static int ctl_lock(struct socket *, int, void *);
111static int ctl_unlock(struct socket *, int, void *);
91447636 112static lck_mtx_t * ctl_getlock(struct socket *, int);
9bccf70c 113
39236c6e
A
114static struct pr_usrreqs ctl_usrreqs = {
115 .pru_attach = ctl_attach,
116 .pru_connect = ctl_connect,
117 .pru_control = ctl_ioctl,
118 .pru_detach = ctl_detach,
119 .pru_disconnect = ctl_disconnect,
120 .pru_peeraddr = ctl_peeraddr,
121 .pru_rcvd = ctl_usr_rcvd,
122 .pru_send = ctl_send,
fe8ab488 123 .pru_send_list = ctl_send_list,
39236c6e 124 .pru_sosend = sosend,
fe8ab488 125 .pru_sosend_list = sosend_list,
39236c6e 126 .pru_soreceive = soreceive,
fe8ab488 127 .pru_soreceive_list = soreceive_list,
91447636
A
128};
129
39236c6e 130static struct protosw kctlsw[] = {
91447636 131{
fe8ab488
A
132 .pr_type = SOCK_DGRAM,
133 .pr_protocol = SYSPROTO_CONTROL,
134 .pr_flags = PR_ATOMIC|PR_CONNREQUIRED|PR_PCBLOCK|PR_WANTRCVD,
135 .pr_ctloutput = ctl_ctloutput,
136 .pr_usrreqs = &ctl_usrreqs,
137 .pr_lock = ctl_lock,
138 .pr_unlock = ctl_unlock,
139 .pr_getlock = ctl_getlock,
39236c6e 140},
9bccf70c 141{
fe8ab488
A
142 .pr_type = SOCK_STREAM,
143 .pr_protocol = SYSPROTO_CONTROL,
144 .pr_flags = PR_CONNREQUIRED|PR_PCBLOCK|PR_WANTRCVD,
145 .pr_ctloutput = ctl_ctloutput,
146 .pr_usrreqs = &ctl_usrreqs,
147 .pr_lock = ctl_lock,
148 .pr_unlock = ctl_unlock,
149 .pr_getlock = ctl_getlock,
39236c6e 150}
9bccf70c
A
151};
152
fe8ab488
A
153__private_extern__ int kctl_reg_list SYSCTL_HANDLER_ARGS;
154__private_extern__ int kctl_pcblist SYSCTL_HANDLER_ARGS;
155__private_extern__ int kctl_getstat SYSCTL_HANDLER_ARGS;
156
39236c6e 157static int kctl_proto_count = (sizeof (kctlsw) / sizeof (struct protosw));
91447636 158
fe8ab488
A
159SYSCTL_NODE(_net_systm, OID_AUTO, kctl,
160 CTLFLAG_RW|CTLFLAG_LOCKED, 0, "Kernel control family");
161
162struct kctlstat kctlstat;
163SYSCTL_PROC(_net_systm_kctl, OID_AUTO, stats,
164 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
165 kctl_getstat, "S,kctlstat", "");
166
167SYSCTL_PROC(_net_systm_kctl, OID_AUTO, reg_list,
168 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
169 kctl_reg_list, "S,xkctl_reg", "");
170
171SYSCTL_PROC(_net_systm_kctl, OID_AUTO, pcblist,
172 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
173 kctl_pcblist, "S,xkctlpcb", "");
174
175u_int32_t ctl_autorcvbuf_max = 256 * 1024;
176SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufmax,
177 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_autorcvbuf_max, 0, "");
178
179u_int32_t ctl_autorcvbuf_high = 0;
180SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufhigh,
181 CTLFLAG_RD | CTLFLAG_LOCKED, &ctl_autorcvbuf_high, 0, "");
182
183u_int32_t ctl_debug = 0;
184SYSCTL_INT(_net_systm_kctl, OID_AUTO, debug,
185 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_debug, 0, "");
186
9bccf70c 187/*
91447636 188 * Install the protosw's for the Kernel Control manager.
9bccf70c 189 */
39236c6e
A
190__private_extern__ void
191kern_control_init(struct domain *dp)
9bccf70c 192{
39236c6e
A
193 struct protosw *pr;
194 int i;
195
196 VERIFY(!(dp->dom_flags & DOM_INITIALIZED));
197 VERIFY(dp == systemdomain);
198
91447636 199 ctl_lck_grp_attr = lck_grp_attr_alloc_init();
39236c6e
A
200 if (ctl_lck_grp_attr == NULL) {
201 panic("%s: lck_grp_attr_alloc_init failed\n", __func__);
202 /* NOTREACHED */
91447636 203 }
39236c6e
A
204
205 ctl_lck_grp = lck_grp_alloc_init("Kernel Control Protocol",
206 ctl_lck_grp_attr);
207 if (ctl_lck_grp == NULL) {
208 panic("%s: lck_grp_alloc_init failed\n", __func__);
209 /* NOTREACHED */
91447636 210 }
39236c6e 211
91447636 212 ctl_lck_attr = lck_attr_alloc_init();
39236c6e
A
213 if (ctl_lck_attr == NULL) {
214 panic("%s: lck_attr_alloc_init failed\n", __func__);
215 /* NOTREACHED */
91447636 216 }
39236c6e 217
91447636 218 ctl_mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
39236c6e
A
219 if (ctl_mtx == NULL) {
220 panic("%s: lck_mtx_alloc_init failed\n", __func__);
221 /* NOTREACHED */
91447636
A
222 }
223 TAILQ_INIT(&ctl_head);
39236c6e
A
224
225 for (i = 0, pr = &kctlsw[0]; i < kctl_proto_count; i++, pr++)
226 net_add_proto(pr, dp, 1);
91447636 227}
9bccf70c 228
91447636
A
229static void
230kcb_delete(struct ctl_cb *kcb)
231{
232 if (kcb != 0) {
233 if (kcb->mtx != 0)
234 lck_mtx_free(kcb->mtx, ctl_lck_grp);
235 FREE(kcb, M_TEMP);
236 }
9bccf70c
A
237}
238
9bccf70c
A
239/*
240 * Kernel Controller user-request functions
fe8ab488
A
241 * attach function must exist and succeed
242 * detach not necessary
91447636 243 * we need a pcb for the per socket mutex
9bccf70c 244 */
91447636 245static int
fe8ab488
A
246ctl_attach(struct socket *so, int proto, struct proc *p)
247{
248#pragma unused(proto, p)
91447636
A
249 int error = 0;
250 struct ctl_cb *kcb = 0;
251
252 MALLOC(kcb, struct ctl_cb *, sizeof(struct ctl_cb), M_TEMP, M_WAITOK);
253 if (kcb == NULL) {
254 error = ENOMEM;
255 goto quit;
256 }
257 bzero(kcb, sizeof(struct ctl_cb));
fe8ab488 258
91447636
A
259 kcb->mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
260 if (kcb->mtx == NULL) {
261 error = ENOMEM;
262 goto quit;
263 }
264 kcb->so = so;
265 so->so_pcb = (caddr_t)kcb;
fe8ab488 266
91447636
A
267quit:
268 if (error != 0) {
269 kcb_delete(kcb);
270 kcb = 0;
271 }
fe8ab488 272 return (error);
91447636
A
273}
274
275static int
276ctl_sofreelastref(struct socket *so)
277{
fe8ab488
A
278 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
279
280 so->so_pcb = 0;
281
282 if (kcb != 0) {
283 struct kctl *kctl;
284 if ((kctl = kcb->kctl) != 0) {
285 lck_mtx_lock(ctl_mtx);
286 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
287 kctlstat.kcs_pcbcount--;
288 kctlstat.kcs_gencnt++;
289 lck_mtx_unlock(ctl_mtx);
290 }
291 kcb_delete(kcb);
292 }
293 sofreelastref(so, 1);
294 return (0);
91447636
A
295}
296
297static int
298ctl_detach(struct socket *so)
299{
fe8ab488
A
300 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
301
302 if (kcb == 0)
303 return (0);
304
305 soisdisconnected(so);
306 so->so_flags |= SOF_PCBCLEARING;
307 return (0);
9bccf70c
A
308}
309
91447636
A
310
311static int
fe8ab488
A
312ctl_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
313{
314#pragma unused(p)
315 struct kctl *kctl;
316 int error = 0;
317 struct sockaddr_ctl sa;
318 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
319 struct ctl_cb *kcb_next = NULL;
04b8595b
A
320 u_quad_t sbmaxsize;
321 u_int32_t recvbufsize, sendbufsize;
fe8ab488
A
322
323 if (kcb == 0)
324 panic("ctl_connect so_pcb null\n");
325
326 if (nam->sa_len != sizeof(struct sockaddr_ctl))
327 return (EINVAL);
328
329 bcopy(nam, &sa, sizeof(struct sockaddr_ctl));
330
331 lck_mtx_lock(ctl_mtx);
332 kctl = ctl_find_by_id_unit(sa.sc_id, sa.sc_unit);
333 if (kctl == NULL) {
334 lck_mtx_unlock(ctl_mtx);
335 return (ENOENT);
336 }
337
338 if (((kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
339 (so->so_type != SOCK_STREAM)) ||
340 (!(kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
341 (so->so_type != SOCK_DGRAM))) {
342 lck_mtx_unlock(ctl_mtx);
343 return (EPROTOTYPE);
344 }
345
346 if (kctl->flags & CTL_FLAG_PRIVILEGED) {
347 if (p == 0) {
348 lck_mtx_unlock(ctl_mtx);
349 return (EINVAL);
350 }
351 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
352 lck_mtx_unlock(ctl_mtx);
353 return (EPERM);
354 }
355 }
91447636
A
356
357 if ((kctl->flags & CTL_FLAG_REG_ID_UNIT) || sa.sc_unit != 0) {
358 if (kcb_find(kctl, sa.sc_unit) != NULL) {
359 lck_mtx_unlock(ctl_mtx);
fe8ab488 360 return (EBUSY);
91447636
A
361 }
362 } else {
fe8ab488
A
363 /* Find an unused ID, assumes control IDs are in order */
364 u_int32_t unit = 1;
365
366 TAILQ_FOREACH(kcb_next, &kctl->kcb_head, next) {
367 if (kcb_next->unit > unit) {
368 /* Found a gap, lets fill it in */
369 break;
370 }
371 unit = kcb_next->unit + 1;
372 if (unit == ctl_maxunit)
373 break;
374 }
375
2d21ac55
A
376 if (unit == ctl_maxunit) {
377 lck_mtx_unlock(ctl_mtx);
fe8ab488 378 return (EBUSY);
2d21ac55 379 }
fe8ab488 380
2d21ac55 381 sa.sc_unit = unit;
fe8ab488 382 }
55e303ae 383
91447636 384 kcb->unit = sa.sc_unit;
fe8ab488
A
385 kcb->kctl = kctl;
386 if (kcb_next != NULL) {
387 TAILQ_INSERT_BEFORE(kcb_next, kcb, next);
388 } else {
2d21ac55
A
389 TAILQ_INSERT_TAIL(&kctl->kcb_head, kcb, next);
390 }
fe8ab488
A
391 kctlstat.kcs_pcbcount++;
392 kctlstat.kcs_gencnt++;
393 kctlstat.kcs_connections++;
394 lck_mtx_unlock(ctl_mtx);
9bccf70c 395
04b8595b
A
396 /*
397 * rdar://15526688: Limit the send and receive sizes to sb_max
398 * by using the same scaling as sbreserve()
399 */
400 sbmaxsize = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES);
401
402 if (kctl->sendbufsize > sbmaxsize)
403 sendbufsize = sbmaxsize;
404 else
405 sendbufsize = kctl->sendbufsize;
406
407 if (kctl->recvbufsize > sbmaxsize)
408 recvbufsize = sbmaxsize;
409 else
410 recvbufsize = kctl->recvbufsize;
411
412 error = soreserve(so, sendbufsize, recvbufsize);
fe8ab488
A
413 if (error) {
414 printf("%s - soreserve(%llx, %u, %u) error %d\n", __func__,
415 (uint64_t)VM_KERNEL_ADDRPERM(so),
04b8595b 416 sendbufsize, recvbufsize, error);
91447636 417 goto done;
fe8ab488
A
418 }
419 soisconnecting(so);
420
91447636 421 socket_unlock(so, 0);
fe8ab488 422 error = (*kctl->connect)(kctl, &sa, &kcb->userdata);
91447636 423 socket_lock(so, 0);
fe8ab488 424 if (error)
6d2010ae 425 goto end;
fe8ab488
A
426
427 soisconnected(so);
91447636 428
6d2010ae
A
429end:
430 if (error && kctl->disconnect) {
431 socket_unlock(so, 0);
432 (*kctl->disconnect)(kctl, kcb->unit, kcb->userdata);
433 socket_lock(so, 0);
434 }
91447636 435done:
fe8ab488
A
436 if (error) {
437 soisdisconnected(so);
438 lck_mtx_lock(ctl_mtx);
439 kcb->kctl = 0;
440 kcb->unit = 0;
441 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
442 kctlstat.kcs_pcbcount--;
443 kctlstat.kcs_gencnt++;
444 kctlstat.kcs_conn_fail++;
445 lck_mtx_unlock(ctl_mtx);
446 }
447 return (error);
9bccf70c
A
448}
449
91447636 450static int
9bccf70c
A
451ctl_disconnect(struct socket *so)
452{
fe8ab488
A
453 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
454
455 if ((kcb = (struct ctl_cb *)so->so_pcb)) {
456 struct kctl *kctl = kcb->kctl;
457
458 if (kctl && kctl->disconnect) {
459 socket_unlock(so, 0);
460 (*kctl->disconnect)(kctl, kcb->unit, kcb->userdata);
461 socket_lock(so, 0);
462 }
463
464 soisdisconnected(so);
465
6d2010ae 466 socket_unlock(so, 0);
fe8ab488
A
467 lck_mtx_lock(ctl_mtx);
468 kcb->kctl = 0;
469 kcb->unit = 0;
470 while (kcb->usecount != 0) {
471 msleep(&kcb->usecount, ctl_mtx, 0, "kcb->usecount", 0);
472 }
473 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
474 kctlstat.kcs_pcbcount--;
475 kctlstat.kcs_gencnt++;
476 lck_mtx_unlock(ctl_mtx);
6d2010ae 477 socket_lock(so, 0);
fe8ab488
A
478 }
479 return (0);
9bccf70c
A
480}
481
91447636
A
482static int
483ctl_peeraddr(struct socket *so, struct sockaddr **nam)
9bccf70c 484{
91447636
A
485 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
486 struct kctl *kctl;
487 struct sockaddr_ctl sc;
fe8ab488 488
91447636 489 if (kcb == NULL) /* sanity check */
fe8ab488
A
490 return (ENOTCONN);
491
91447636 492 if ((kctl = kcb->kctl) == NULL)
fe8ab488
A
493 return (EINVAL);
494
91447636
A
495 bzero(&sc, sizeof(struct sockaddr_ctl));
496 sc.sc_len = sizeof(struct sockaddr_ctl);
497 sc.sc_family = AF_SYSTEM;
498 sc.ss_sysaddr = AF_SYS_CONTROL;
499 sc.sc_id = kctl->id;
500 sc.sc_unit = kcb->unit;
fe8ab488 501
91447636 502 *nam = dup_sockaddr((struct sockaddr *)&sc, 1);
fe8ab488
A
503
504 return (0);
505}
506
507static void
508ctl_sbrcv_trim(struct socket *so)
509{
510 struct sockbuf *sb = &so->so_rcv;
511
512 if (sb->sb_hiwat > sb->sb_idealsize) {
513 u_int32_t diff;
514 int32_t trim;
515
516 /*
517 * The difference between the ideal size and the
518 * current size is the upper bound of the trimage
519 */
520 diff = sb->sb_hiwat - sb->sb_idealsize;
521 /*
522 * We cannot trim below the outstanding data
523 */
524 trim = sb->sb_hiwat - sb->sb_cc;
525
526 trim = imin(trim, (int32_t)diff);
527
528 if (trim > 0) {
529 sbreserve(sb, (sb->sb_hiwat - trim));
530
531 if (ctl_debug)
532 printf("%s - shrunk to %d\n",
533 __func__, sb->sb_hiwat);
534 }
535 }
9bccf70c
A
536}
537
39236c6e
A
538static int
539ctl_usr_rcvd(struct socket *so, int flags)
540{
541 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
542 struct kctl *kctl;
543
544 if ((kctl = kcb->kctl) == NULL) {
fe8ab488 545 return (EINVAL);
39236c6e
A
546 }
547
548 if (kctl->rcvd) {
549 socket_unlock(so, 0);
550 (*kctl->rcvd)(kctl, kcb->unit, kcb->userdata, flags);
551 socket_lock(so, 0);
552 }
553
fe8ab488
A
554 ctl_sbrcv_trim(so);
555
556 return (0);
39236c6e
A
557}
558
91447636
A
559static int
560ctl_send(struct socket *so, int flags, struct mbuf *m,
fe8ab488
A
561 struct sockaddr *addr, struct mbuf *control,
562 struct proc *p)
9bccf70c 563{
fe8ab488
A
564#pragma unused(addr, p)
565 int error = 0;
91447636 566 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
fe8ab488
A
567 struct kctl *kctl;
568
569 if (control)
570 m_freem(control);
571
91447636 572 if (kcb == NULL) /* sanity check */
6d2010ae 573 error = ENOTCONN;
fe8ab488 574
6d2010ae
A
575 if (error == 0 && (kctl = kcb->kctl) == NULL)
576 error = EINVAL;
fe8ab488 577
6d2010ae 578 if (error == 0 && kctl->send) {
fe8ab488 579 so_tc_update_stats(m, so, m_get_service_class(m));
91447636
A
580 socket_unlock(so, 0);
581 error = (*kctl->send)(kctl, kcb->unit, kcb->userdata, m, flags);
582 socket_lock(so, 0);
6d2010ae
A
583 } else {
584 m_freem(m);
585 if (error == 0)
586 error = ENOTSUP;
91447636 587 }
fe8ab488
A
588 if (error != 0)
589 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_fail);
590 return (error);
591}
592
593static int
594ctl_send_list(struct socket *so, int flags, struct mbuf *m,
595 __unused struct sockaddr *addr, struct mbuf *control,
596 __unused struct proc *p)
597{
598 int error = 0;
599 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
600 struct kctl *kctl;
601
602 if (control)
603 m_freem_list(control);
604
605 if (kcb == NULL) /* sanity check */
606 error = ENOTCONN;
607
608 if (error == 0 && (kctl = kcb->kctl) == NULL)
609 error = EINVAL;
610
611 if (error == 0 && kctl->send_list) {
612 struct mbuf *nxt;
613
614 for (nxt = m; nxt != NULL; nxt = nxt->m_nextpkt)
615 so_tc_update_stats(nxt, so, m_get_service_class(nxt));
616
617 socket_unlock(so, 0);
618 error = (*kctl->send_list)(kctl, kcb->unit, kcb->userdata, m,
619 flags);
620 socket_lock(so, 0);
621 } else if (error == 0 && kctl->send) {
622 while (m != NULL && error == 0) {
623 struct mbuf *nextpkt = m->m_nextpkt;
624
625 m->m_nextpkt = NULL;
626 so_tc_update_stats(m, so, m_get_service_class(m));
627 socket_unlock(so, 0);
628 error = (*kctl->send)(kctl, kcb->unit, kcb->userdata, m,
629 flags);
630 socket_lock(so, 0);
631 m = nextpkt;
632 }
633 if (m != NULL)
634 m_freem_list(m);
635 } else {
636 m_freem_list(m);
637 if (error == 0)
638 error = ENOTSUP;
639 }
640 if (error != 0)
641 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_list_fail);
642 return (error);
643}
644
645static errno_t
646ctl_rcvbspace(struct kctl *kctl, struct socket *so, u_int32_t datasize,
647 u_int32_t flags)
648{
649 struct sockbuf *sb = &so->so_rcv;
650 u_int32_t space = sbspace(sb);
651 errno_t error;
04b8595b 652
fe8ab488
A
653 if ((kctl->flags & CTL_FLAG_REG_CRIT) == 0) {
654 if ((u_int32_t) space >= datasize)
655 error = 0;
656 else
657 error = ENOBUFS;
658 } else if ((flags & CTL_DATA_CRIT) == 0) {
659 /*
660 * Reserve 25% for critical messages
661 */
662 if (space < (sb->sb_hiwat >> 2) ||
663 space < datasize)
664 error = ENOBUFS;
665 else
666 error = 0;
667 } else {
668 u_int32_t autorcvbuf_max;
669
670 /*
671 * Allow overcommit of 25%
672 */
673 autorcvbuf_max = min(sb->sb_idealsize + (sb->sb_idealsize >> 2),
674 ctl_autorcvbuf_max);
675
676 if ((u_int32_t) space >= datasize) {
677 error = 0;
678 } else if (tcp_cansbgrow(sb) &&
679 sb->sb_hiwat < autorcvbuf_max) {
680 /*
681 * Grow with a little bit of leeway
682 */
683 u_int32_t grow = datasize - space + MSIZE;
684
685 if (sbreserve(sb,
686 min((sb->sb_hiwat + grow), autorcvbuf_max)) == 1) {
687
688 if (sb->sb_hiwat > ctl_autorcvbuf_high)
689 ctl_autorcvbuf_high = sb->sb_hiwat;
690
691 if (ctl_debug)
692 printf("%s - grown to %d\n",
693 __func__, sb->sb_hiwat);
694 error = 0;
695 } else {
696 error = ENOBUFS;
697 }
698 } else {
699 error = ENOBUFS;
700 }
701 }
702 return (error);
9bccf70c
A
703}
704
91447636
A
705errno_t
706ctl_enqueuembuf(void *kctlref, u_int32_t unit, struct mbuf *m, u_int32_t flags)
9bccf70c 707{
91447636 708 struct socket *so;
fe8ab488
A
709 errno_t error = 0;
710 struct kctl *kctl = (struct kctl *)kctlref;
711 int len = m->m_pkthdr.len;
712
91447636 713 if (kctl == NULL)
fe8ab488
A
714 return (EINVAL);
715
6d2010ae 716 so = kcb_find_socket(kctl, unit);
fe8ab488 717
6d2010ae 718 if (so == NULL)
fe8ab488
A
719 return (EINVAL);
720
721 if (ctl_rcvbspace(kctl, so, len, flags) != 0) {
91447636 722 error = ENOBUFS;
fe8ab488 723 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
91447636
A
724 goto bye;
725 }
726 if ((flags & CTL_DATA_EOR))
727 m->m_flags |= M_EOR;
fe8ab488
A
728
729 so_recv_data_stat(so, m, 0);
730 if (sbappend(&so->so_rcv, m) != 0) {
731 if ((flags & CTL_DATA_NOWAKEUP) == 0)
732 sorwakeup(so);
733 } else {
734 error = ENOBUFS;
735 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
736 }
91447636 737bye:
fe8ab488
A
738 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT))
739 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
740 __func__, error, len,
741 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
742
91447636 743 socket_unlock(so, 1);
fe8ab488
A
744 if (error != 0)
745 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
746
747 return (error);
748}
749
750/*
751 * Compute space occupied by mbuf like sbappendrecord
752 */
753static int
754m_space(struct mbuf *m)
755{
756 int space = 0;
757 struct mbuf *nxt;
758
759 for (nxt = m; nxt != NULL; nxt = nxt->m_next)
760 space += nxt->m_len;
761
762 return (space);
763}
764
765errno_t
766ctl_enqueuembuf_list(void *kctlref, u_int32_t unit, struct mbuf *m_list,
767 u_int32_t flags, struct mbuf **m_remain)
768{
769 struct socket *so = NULL;
770 errno_t error = 0;
771 struct kctl *kctl = (struct kctl *)kctlref;
772 struct mbuf *m, *nextpkt;
773 int needwakeup = 0;
774 int len;
775
776 /*
777 * Need to point the beginning of the list in case of early exit
778 */
779 m = m_list;
780
781 if (kctl == NULL) {
782 error = EINVAL;
783 goto done;
784 }
785 if (kctl->flags & CTL_FLAG_REG_SOCK_STREAM) {
786 error = EOPNOTSUPP;
787 goto done;
788 }
789 if (flags & CTL_DATA_EOR) {
790 error = EINVAL;
791 goto done;
792 }
793 /*
794 * kcb_find_socket takes the socket lock with a reference
795 */
796 so = kcb_find_socket(kctl, unit);
797 if (so == NULL) {
798 error = EINVAL;
799 goto done;
800 }
801
802 for (m = m_list; m != NULL; m = nextpkt) {
803 nextpkt = m->m_nextpkt;
804
805 if (m->m_pkthdr.len == 0)
806 printf("%s: %llx m_pkthdr.len is 0",
807 __func__, (uint64_t)VM_KERNEL_ADDRPERM(m));
808
809 /*
810 * The mbuf is either appended or freed by sbappendrecord()
811 * so it's not reliable from a data standpoint
812 */
813 len = m_space(m);
814 if (ctl_rcvbspace(kctl, so, len, flags) != 0) {
815 error = ENOBUFS;
816 OSIncrementAtomic64(
817 (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
818 break;
819 } else {
820 /*
821 * Unlink from the list, m is on its own
822 */
823 m->m_nextpkt = NULL;
824 so_recv_data_stat(so, m, 0);
825 if (sbappendrecord(&so->so_rcv, m) != 0) {
826 needwakeup = 1;
827 } else {
828 /*
829 * We free or return the remaining
830 * mbufs in the list
831 */
832 m = nextpkt;
833 error = ENOBUFS;
834 OSIncrementAtomic64(
835 (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
836 break;
837 }
838 }
839 }
840 if (needwakeup && (flags & CTL_DATA_NOWAKEUP) == 0)
841 sorwakeup(so);
842
843done:
844 if (so != NULL) {
845 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT))
846 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
847 __func__, error, len,
848 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
849
850 socket_unlock(so, 1);
851 }
852 if (m_remain) {
853 *m_remain = m;
854
855 if (m != NULL && socket_debug && so != NULL &&
856 (so->so_options & SO_DEBUG)) {
857 struct mbuf *n;
858
859 printf("%s m_list %llx\n", __func__,
860 (uint64_t) VM_KERNEL_ADDRPERM(m_list));
861 for (n = m; n != NULL; n = n->m_nextpkt)
862 printf(" remain %llx m_next %llx\n",
863 (uint64_t) VM_KERNEL_ADDRPERM(n),
864 (uint64_t) VM_KERNEL_ADDRPERM(n->m_next));
865 }
866 } else {
867 if (m != NULL)
868 m_freem_list(m);
869 }
870 if (error != 0)
871 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
872 return (error);
91447636 873}
9bccf70c 874
91447636 875errno_t
fe8ab488
A
876ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len,
877 u_int32_t flags)
91447636 878{
91447636
A
879 struct socket *so;
880 struct mbuf *m;
fe8ab488
A
881 errno_t error = 0;
882 struct kctl *kctl = (struct kctl *)kctlref;
91447636
A
883 unsigned int num_needed;
884 struct mbuf *n;
fe8ab488
A
885 size_t curlen = 0;
886
91447636 887 if (kctlref == NULL)
fe8ab488
A
888 return (EINVAL);
889
6d2010ae
A
890 so = kcb_find_socket(kctl, unit);
891 if (so == NULL)
fe8ab488
A
892 return (EINVAL);
893
894 if (ctl_rcvbspace(kctl, so, len, flags) != 0) {
91447636 895 error = ENOBUFS;
fe8ab488 896 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
91447636
A
897 goto bye;
898 }
899
900 num_needed = 1;
901 m = m_allocpacket_internal(&num_needed, len, NULL, M_NOWAIT, 1, 0);
902 if (m == NULL) {
fe8ab488
A
903 printf("ctl_enqueuedata: m_allocpacket_internal(%lu) failed\n",
904 len);
905 error = ENOMEM;
91447636
A
906 goto bye;
907 }
fe8ab488 908
91447636
A
909 for (n = m; n != NULL; n = n->m_next) {
910 size_t mlen = mbuf_maxlen(n);
fe8ab488 911
91447636
A
912 if (mlen + curlen > len)
913 mlen = len - curlen;
914 n->m_len = mlen;
915 bcopy((char *)data + curlen, n->m_data, mlen);
916 curlen += mlen;
917 }
918 mbuf_pkthdr_setlen(m, curlen);
919
920 if ((flags & CTL_DATA_EOR))
921 m->m_flags |= M_EOR;
fe8ab488
A
922 so_recv_data_stat(so, m, 0);
923 if (sbappend(&so->so_rcv, m) != 0) {
924 if ((flags & CTL_DATA_NOWAKEUP) == 0)
925 sorwakeup(so);
926 } else {
927 error = ENOBUFS;
928 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
929 }
930
91447636 931bye:
fe8ab488
A
932 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT))
933 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
934 __func__, error, (int)len,
935 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
936
91447636 937 socket_unlock(so, 1);
fe8ab488
A
938 if (error != 0)
939 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
940 return (error);
91447636 941}
9bccf70c 942
55e303ae 943
fe8ab488 944errno_t
91447636
A
945ctl_getenqueuespace(kern_ctl_ref kctlref, u_int32_t unit, size_t *space)
946{
91447636
A
947 struct kctl *kctl = (struct kctl *)kctlref;
948 struct socket *so;
2d21ac55 949 long avail;
fe8ab488 950
91447636 951 if (kctlref == NULL || space == NULL)
fe8ab488
A
952 return (EINVAL);
953
6d2010ae
A
954 so = kcb_find_socket(kctl, unit);
955 if (so == NULL)
fe8ab488
A
956 return (EINVAL);
957
2d21ac55
A
958 avail = sbspace(&so->so_rcv);
959 *space = (avail < 0) ? 0 : avail;
91447636 960 socket_unlock(so, 1);
fe8ab488
A
961
962 return (0);
963}
964
965errno_t
966ctl_getenqueuereadable(kern_ctl_ref kctlref, u_int32_t unit,
967 u_int32_t *difference)
968{
969 struct kctl *kctl = (struct kctl *)kctlref;
970 struct socket *so;
971
972 if (kctlref == NULL || difference == NULL)
973 return (EINVAL);
974
975 so = kcb_find_socket(kctl, unit);
976 if (so == NULL)
977 return (EINVAL);
978
979 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat) {
980 *difference = 0;
981 } else {
982 *difference = (so->so_rcv.sb_lowat - so->so_rcv.sb_cc);
983 }
984 socket_unlock(so, 1);
985
986 return (0);
9bccf70c
A
987}
988
91447636 989static int
9bccf70c
A
990ctl_ctloutput(struct socket *so, struct sockopt *sopt)
991{
91447636
A
992 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
993 struct kctl *kctl;
994 int error = 0;
995 void *data;
996 size_t len;
fe8ab488 997
91447636 998 if (sopt->sopt_level != SYSPROTO_CONTROL) {
fe8ab488 999 return (EINVAL);
91447636 1000 }
fe8ab488 1001
91447636 1002 if (kcb == NULL) /* sanity check */
fe8ab488
A
1003 return (ENOTCONN);
1004
91447636 1005 if ((kctl = kcb->kctl) == NULL)
fe8ab488
A
1006 return (EINVAL);
1007
91447636
A
1008 switch (sopt->sopt_dir) {
1009 case SOPT_SET:
1010 if (kctl->setopt == NULL)
fe8ab488 1011 return (ENOTSUP);
2d21ac55
A
1012 if (sopt->sopt_valsize == 0) {
1013 data = NULL;
1014 } else {
fe8ab488
A
1015 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP,
1016 M_WAITOK);
2d21ac55 1017 if (data == NULL)
fe8ab488
A
1018 return (ENOMEM);
1019 error = sooptcopyin(sopt, data,
1020 sopt->sopt_valsize,
1021 sopt->sopt_valsize);
2d21ac55 1022 }
91447636
A
1023 if (error == 0) {
1024 socket_unlock(so, 0);
fe8ab488
A
1025 error = (*kctl->setopt)(kcb->kctl, kcb->unit,
1026 kcb->userdata,
1027 sopt->sopt_name,
1028 data,
1029 sopt->sopt_valsize);
91447636
A
1030 socket_lock(so, 0);
1031 }
1032 FREE(data, M_TEMP);
1033 break;
fe8ab488 1034
91447636
A
1035 case SOPT_GET:
1036 if (kctl->getopt == NULL)
fe8ab488 1037 return (ENOTSUP);
91447636
A
1038 data = NULL;
1039 if (sopt->sopt_valsize && sopt->sopt_val) {
fe8ab488
A
1040 MALLOC(data, void *, sopt->sopt_valsize, M_TEMP,
1041 M_WAITOK);
91447636 1042 if (data == NULL)
fe8ab488
A
1043 return (ENOMEM);
1044 /*
1045 * 4108337 - copy user data in case the
1046 * kernel control needs it
1047 */
1048 error = sooptcopyin(sopt, data,
1049 sopt->sopt_valsize, sopt->sopt_valsize);
91447636
A
1050 }
1051 len = sopt->sopt_valsize;
1052 socket_unlock(so, 0);
fe8ab488
A
1053 error = (*kctl->getopt)(kcb->kctl, kcb->unit,
1054 kcb->userdata, sopt->sopt_name,
91447636 1055 data, &len);
6d2010ae 1056 if (data != NULL && len > sopt->sopt_valsize)
fe8ab488
A
1057 panic_plain("ctl_ctloutput: ctl %s returned "
1058 "len (%lu) > sopt_valsize (%lu)\n",
1059 kcb->kctl->name, len,
1060 sopt->sopt_valsize);
1061 socket_lock(so, 0);
91447636
A
1062 if (error == 0) {
1063 if (data != NULL)
1064 error = sooptcopyout(sopt, data, len);
fe8ab488 1065 else
91447636
A
1066 sopt->sopt_valsize = len;
1067 }
1068 if (data != NULL)
fe8ab488 1069 FREE(data, M_TEMP);
91447636
A
1070 break;
1071 }
fe8ab488 1072 return (error);
91447636 1073}
9bccf70c 1074
fe8ab488
A
1075static int
1076ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
1077 struct ifnet *ifp, struct proc *p)
91447636 1078{
fe8ab488 1079#pragma unused(so, ifp, p)
91447636 1080 int error = ENOTSUP;
fe8ab488 1081
91447636
A
1082 switch (cmd) {
1083 /* get the number of controllers */
1084 case CTLIOCGCOUNT: {
1085 struct kctl *kctl;
316670eb 1086 u_int32_t n = 0;
91447636
A
1087
1088 lck_mtx_lock(ctl_mtx);
1089 TAILQ_FOREACH(kctl, &ctl_head, next)
1090 n++;
1091 lck_mtx_unlock(ctl_mtx);
fe8ab488 1092
316670eb 1093 bcopy(&n, data, sizeof (n));
91447636
A
1094 error = 0;
1095 break;
1096 }
1097 case CTLIOCGINFO: {
316670eb 1098 struct ctl_info ctl_info;
91447636 1099 struct kctl *kctl = 0;
316670eb
A
1100 size_t name_len;
1101
1102 bcopy(data, &ctl_info, sizeof (ctl_info));
1103 name_len = strnlen(ctl_info.ctl_name, MAX_KCTL_NAME);
1104
91447636
A
1105 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
1106 error = EINVAL;
1107 break;
1108 }
1109 lck_mtx_lock(ctl_mtx);
316670eb 1110 kctl = ctl_find_by_name(ctl_info.ctl_name);
91447636
A
1111 lck_mtx_unlock(ctl_mtx);
1112 if (kctl == 0) {
1113 error = ENOENT;
1114 break;
1115 }
316670eb
A
1116 ctl_info.ctl_id = kctl->id;
1117 bcopy(&ctl_info, data, sizeof (ctl_info));
91447636
A
1118 error = 0;
1119 break;
1120 }
fe8ab488 1121
91447636 1122 /* add controls to get list of NKEs */
fe8ab488 1123
91447636 1124 }
fe8ab488
A
1125
1126 return (error);
91447636 1127}
9bccf70c 1128
91447636
A
1129/*
1130 * Register/unregister a NKE
1131 */
1132errno_t
1133ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref)
2d21ac55
A
1134{
1135 struct kctl *kctl = NULL;
1136 struct kctl *kctl_next = NULL;
04b8595b
A
1137 u_int32_t id = 1;
1138 size_t name_len;
1139 int is_extended = 0;
fe8ab488 1140
91447636 1141 if (userkctl == NULL) /* sanity check */
fe8ab488 1142 return (EINVAL);
91447636 1143 if (userkctl->ctl_connect == NULL)
fe8ab488 1144 return (EINVAL);
91447636
A
1145 name_len = strlen(userkctl->ctl_name);
1146 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME)
fe8ab488
A
1147 return (EINVAL);
1148
91447636
A
1149 MALLOC(kctl, struct kctl *, sizeof(*kctl), M_TEMP, M_WAITOK);
1150 if (kctl == NULL)
fe8ab488 1151 return (ENOMEM);
91447636 1152 bzero((char *)kctl, sizeof(*kctl));
fe8ab488 1153
91447636 1154 lck_mtx_lock(ctl_mtx);
fe8ab488 1155
2d21ac55
A
1156 /*
1157 * Kernel Control IDs
1158 *
1159 * CTL_FLAG_REG_ID_UNIT indicates the control ID and unit number are
1160 * static. If they do not exist, add them to the list in order. If the
1161 * flag is not set, we must find a new unique value. We assume the
1162 * list is in order. We find the last item in the list and add one. If
1163 * this leads to wrapping the id around, we start at the front of the
1164 * list and look for a gap.
1165 */
fe8ab488 1166
2d21ac55
A
1167 if ((userkctl->ctl_flags & CTL_FLAG_REG_ID_UNIT) == 0) {
1168 /* Must dynamically assign an unused ID */
fe8ab488 1169
2d21ac55 1170 /* Verify the same name isn't already registered */
91447636
A
1171 if (ctl_find_by_name(userkctl->ctl_name) != NULL) {
1172 lck_mtx_unlock(ctl_mtx);
1173 FREE(kctl, M_TEMP);
fe8ab488 1174 return (EEXIST);
91447636 1175 }
fe8ab488 1176
2d21ac55
A
1177 /* Start with 1 in case the list is empty */
1178 id = 1;
1179 kctl_next = TAILQ_LAST(&ctl_head, kctl_list);
fe8ab488 1180
2d21ac55 1181 if (kctl_next != NULL) {
fe8ab488 1182 /* List was not empty, add one to the last item */
2d21ac55
A
1183 id = kctl_next->id + 1;
1184 kctl_next = NULL;
fe8ab488 1185
2d21ac55 1186 /*
fe8ab488
A
1187 * If this wrapped the id number, start looking at
1188 * the front of the list for an unused id.
2d21ac55 1189 */
91447636 1190 if (id == 0) {
2d21ac55
A
1191 /* Find the next unused ID */
1192 id = 1;
fe8ab488 1193
2d21ac55
A
1194 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
1195 if (kctl_next->id > id) {
1196 /* We found a gap */
1197 break;
1198 }
fe8ab488 1199
2d21ac55
A
1200 id = kctl_next->id + 1;
1201 }
91447636 1202 }
91447636 1203 }
fe8ab488 1204
2d21ac55 1205 userkctl->ctl_id = id;
91447636
A
1206 kctl->id = id;
1207 kctl->reg_unit = -1;
1208 } else {
2d21ac55
A
1209 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
1210 if (kctl_next->id > userkctl->ctl_id)
1211 break;
1212 }
fe8ab488
A
1213
1214 if (ctl_find_by_id_unit(userkctl->ctl_id, userkctl->ctl_unit)) {
91447636
A
1215 lck_mtx_unlock(ctl_mtx);
1216 FREE(kctl, M_TEMP);
fe8ab488 1217 return (EEXIST);
91447636
A
1218 }
1219 kctl->id = userkctl->ctl_id;
1220 kctl->reg_unit = userkctl->ctl_unit;
1221 }
39236c6e
A
1222
1223 is_extended = (userkctl->ctl_flags & CTL_FLAG_REG_EXTENDED);
1224
2d21ac55 1225 strlcpy(kctl->name, userkctl->ctl_name, MAX_KCTL_NAME);
91447636
A
1226 kctl->flags = userkctl->ctl_flags;
1227
fe8ab488
A
1228 /*
1229 * Let the caller know the default send and receive sizes
fe8ab488 1230 */
04b8595b 1231 if (userkctl->ctl_sendsize == 0) {
fe8ab488 1232 kctl->sendbufsize = CTL_SENDSIZE;
04b8595b
A
1233 userkctl->ctl_sendsize = kctl->sendbufsize;
1234 } else {
1235 kctl->sendbufsize = userkctl->ctl_sendsize;
1236 }
1237 if (userkctl->ctl_recvsize == 0) {
fe8ab488 1238 kctl->recvbufsize = CTL_RECVSIZE;
04b8595b
A
1239 userkctl->ctl_recvsize = kctl->recvbufsize;
1240 } else {
1241 kctl->recvbufsize = userkctl->ctl_recvsize;
1242 }
91447636
A
1243
1244 kctl->connect = userkctl->ctl_connect;
1245 kctl->disconnect = userkctl->ctl_disconnect;
1246 kctl->send = userkctl->ctl_send;
1247 kctl->setopt = userkctl->ctl_setopt;
1248 kctl->getopt = userkctl->ctl_getopt;
39236c6e
A
1249 if (is_extended) {
1250 kctl->rcvd = userkctl->ctl_rcvd;
fe8ab488 1251 kctl->send_list = userkctl->ctl_send_list;
39236c6e 1252 }
fe8ab488 1253
91447636 1254 TAILQ_INIT(&kctl->kcb_head);
fe8ab488 1255
2d21ac55
A
1256 if (kctl_next)
1257 TAILQ_INSERT_BEFORE(kctl_next, kctl, next);
1258 else
1259 TAILQ_INSERT_TAIL(&ctl_head, kctl, next);
fe8ab488
A
1260
1261 kctlstat.kcs_reg_count++;
1262 kctlstat.kcs_gencnt++;
1263
91447636 1264 lck_mtx_unlock(ctl_mtx);
fe8ab488 1265
91447636 1266 *kctlref = kctl;
fe8ab488 1267
91447636 1268 ctl_post_msg(KEV_CTL_REGISTERED, kctl->id);
fe8ab488 1269 return (0);
9bccf70c
A
1270}
1271
91447636
A
1272errno_t
1273ctl_deregister(void *kctlref)
fe8ab488
A
1274{
1275 struct kctl *kctl;
1276
1277 if (kctlref == NULL) /* sanity check */
1278 return (EINVAL);
1279
1280 lck_mtx_lock(ctl_mtx);
1281 TAILQ_FOREACH(kctl, &ctl_head, next) {
1282 if (kctl == (struct kctl *)kctlref)
1283 break;
1284 }
1285 if (kctl != (struct kctl *)kctlref) {
1286 lck_mtx_unlock(ctl_mtx);
1287 return (EINVAL);
1288 }
91447636 1289 if (!TAILQ_EMPTY(&kctl->kcb_head)) {
fe8ab488
A
1290 lck_mtx_unlock(ctl_mtx);
1291 return (EBUSY);
91447636
A
1292 }
1293
fe8ab488
A
1294 TAILQ_REMOVE(&ctl_head, kctl, next);
1295
1296 kctlstat.kcs_reg_count--;
1297 kctlstat.kcs_gencnt++;
91447636 1298
fe8ab488
A
1299 lck_mtx_unlock(ctl_mtx);
1300
1301 ctl_post_msg(KEV_CTL_DEREGISTERED, kctl->id);
1302 FREE(kctl, M_TEMP);
1303 return (0);
9bccf70c
A
1304}
1305
91447636
A
1306/*
1307 * Must be called with global ctl_mtx lock taked
1308 */
1309static struct kctl *
1310ctl_find_by_name(const char *name)
fe8ab488
A
1311{
1312 struct kctl *kctl;
1313
1314 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
9bccf70c 1315
fe8ab488
A
1316 TAILQ_FOREACH(kctl, &ctl_head, next)
1317 if (strncmp(kctl->name, name, sizeof(kctl->name)) == 0)
1318 return (kctl);
9bccf70c 1319
fe8ab488 1320 return (NULL);
91447636 1321}
9bccf70c 1322
6d2010ae
A
1323u_int32_t
1324ctl_id_by_name(const char *name)
1325{
1326 u_int32_t ctl_id = 0;
fe8ab488
A
1327 struct kctl *kctl;
1328
6d2010ae 1329 lck_mtx_lock(ctl_mtx);
fe8ab488
A
1330 kctl = ctl_find_by_name(name);
1331 if (kctl)
1332 ctl_id = kctl->id;
6d2010ae 1333 lck_mtx_unlock(ctl_mtx);
fe8ab488
A
1334
1335 return (ctl_id);
6d2010ae
A
1336}
1337
1338errno_t
fe8ab488 1339ctl_name_by_id(u_int32_t id, char *out_name, size_t maxsize)
6d2010ae
A
1340{
1341 int found = 0;
6d2010ae 1342 struct kctl *kctl;
fe8ab488
A
1343
1344 lck_mtx_lock(ctl_mtx);
1345 TAILQ_FOREACH(kctl, &ctl_head, next) {
1346 if (kctl->id == id)
1347 break;
1348 }
1349
1350 if (kctl && kctl->name) {
1351 if (maxsize > MAX_KCTL_NAME)
1352 maxsize = MAX_KCTL_NAME;
1353 strlcpy(out_name, kctl->name, maxsize);
1354 found = 1;
1355 }
6d2010ae 1356 lck_mtx_unlock(ctl_mtx);
fe8ab488
A
1357
1358 return (found ? 0 : ENOENT);
6d2010ae
A
1359}
1360
91447636
A
1361/*
1362 * Must be called with global ctl_mtx lock taked
1363 *
1364 */
1365static struct kctl *
1366ctl_find_by_id_unit(u_int32_t id, u_int32_t unit)
fe8ab488
A
1367{
1368 struct kctl *kctl;
1369
1370 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
1371
1372 TAILQ_FOREACH(kctl, &ctl_head, next) {
1373 if (kctl->id == id && (kctl->flags & CTL_FLAG_REG_ID_UNIT) == 0)
1374 return (kctl);
1375 else if (kctl->id == id && kctl->reg_unit == unit)
1376 return (kctl);
1377 }
1378 return (NULL);
9bccf70c
A
1379}
1380
1381/*
91447636 1382 * Must be called with kernel controller lock taken
9bccf70c 1383 */
91447636
A
1384static struct ctl_cb *
1385kcb_find(struct kctl *kctl, u_int32_t unit)
fe8ab488
A
1386{
1387 struct ctl_cb *kcb;
9bccf70c 1388
fe8ab488 1389 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
9bccf70c 1390
fe8ab488
A
1391 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
1392 if (kcb->unit == unit)
1393 return (kcb);
1394
1395 return (NULL);
9bccf70c
A
1396}
1397
6d2010ae
A
1398static struct socket *
1399kcb_find_socket(struct kctl *kctl, u_int32_t unit)
1400{
1401 struct socket *so = NULL;
fe8ab488
A
1402 struct ctl_cb *kcb;
1403 void *lr_saved;
1404
1405 lr_saved = __builtin_return_address(0);
1406
6d2010ae 1407 lck_mtx_lock(ctl_mtx);
fe8ab488 1408 kcb = kcb_find(kctl, unit);
6d2010ae
A
1409 if (kcb && kcb->kctl == kctl) {
1410 so = kcb->so;
1411 if (so) {
1412 kcb->usecount++;
1413 }
1414 }
1415 lck_mtx_unlock(ctl_mtx);
fe8ab488 1416
6d2010ae 1417 if (so == NULL) {
fe8ab488 1418 return (NULL);
6d2010ae 1419 }
fe8ab488 1420
6d2010ae 1421 socket_lock(so, 1);
fe8ab488 1422
6d2010ae 1423 lck_mtx_lock(ctl_mtx);
fe8ab488 1424 if (kcb->kctl == NULL) {
6d2010ae
A
1425 lck_mtx_unlock(ctl_mtx);
1426 socket_unlock(so, 1);
1427 so = NULL;
1428 lck_mtx_lock(ctl_mtx);
fe8ab488
A
1429 } else {
1430 /*
1431 * The socket lock history is more useful if we store
1432 * the address of the caller.
1433 */
1434 int i = (so->next_lock_lr + SO_LCKDBG_MAX - 1) % SO_LCKDBG_MAX;
1435
1436 so->lock_lr[i] = lr_saved;
6d2010ae
A
1437 }
1438 kcb->usecount--;
1439 if (kcb->usecount == 0)
1440 wakeup((event_t)&kcb->usecount);
1441 lck_mtx_unlock(ctl_mtx);
fe8ab488
A
1442
1443 return (so);
6d2010ae
A
1444}
1445
fe8ab488
A
1446static void
1447ctl_post_msg(u_int32_t event_code, u_int32_t id)
9bccf70c 1448{
fe8ab488
A
1449 struct ctl_event_data ctl_ev_data;
1450 struct kev_msg ev_msg;
1451
1452 lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_NOTOWNED);
1453
1454 bzero(&ev_msg, sizeof(struct kev_msg));
1455 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1456
1457 ev_msg.kev_class = KEV_SYSTEM_CLASS;
1458 ev_msg.kev_subclass = KEV_CTL_SUBCLASS;
1459 ev_msg.event_code = event_code;
1460
1461 /* common nke subclass data */
1462 bzero(&ctl_ev_data, sizeof(ctl_ev_data));
1463 ctl_ev_data.ctl_id = id;
1464 ev_msg.dv[0].data_ptr = &ctl_ev_data;
1465 ev_msg.dv[0].data_length = sizeof(ctl_ev_data);
1466
1467 ev_msg.dv[1].data_length = 0;
1468
1469 kev_post_msg(&ev_msg);
9bccf70c
A
1470}
1471
91447636 1472static int
b0d623f7
A
1473ctl_lock(struct socket *so, int refcount, void *lr)
1474{
1475 void *lr_saved;
1476
1477 if (lr == NULL)
1478 lr_saved = __builtin_return_address(0);
1479 else
1480 lr_saved = lr;
1481
1482 if (so->so_pcb != NULL) {
91447636
A
1483 lck_mtx_lock(((struct ctl_cb *)so->so_pcb)->mtx);
1484 } else {
fe8ab488 1485 panic("ctl_lock: so=%p NO PCB! lr=%p lrh= %s\n",
b0d623f7
A
1486 so, lr_saved, solockhistory_nr(so));
1487 /* NOTREACHED */
91447636 1488 }
b0d623f7
A
1489
1490 if (so->so_usecount < 0) {
1491 panic("ctl_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s\n",
fe8ab488
A
1492 so, so->so_pcb, lr_saved, so->so_usecount,
1493 solockhistory_nr(so));
b0d623f7
A
1494 /* NOTREACHED */
1495 }
1496
91447636
A
1497 if (refcount)
1498 so->so_usecount++;
0c530ab8 1499
2d21ac55 1500 so->lock_lr[so->next_lock_lr] = lr_saved;
0c530ab8 1501 so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX;
91447636
A
1502 return (0);
1503}
1504
1505static int
b0d623f7 1506ctl_unlock(struct socket *so, int refcount, void *lr)
91447636 1507{
b0d623f7
A
1508 void *lr_saved;
1509 lck_mtx_t *mutex_held;
1510
1511 if (lr == NULL)
1512 lr_saved = __builtin_return_address(0);
1513 else
1514 lr_saved = lr;
1515
91447636 1516#ifdef MORE_KCTLLOCK_DEBUG
fe8ab488
A
1517 printf("ctl_unlock: so=%llx sopcb=%x lock=%llx ref=%u lr=%llx\n",
1518 (uint64_t)VM_KERNEL_ADDRPERM(so),
1519 (uint64_t)VM_KERNEL_ADDRPERM(so->so_pcb,
1520 (uint64_t)VM_KERNEL_ADDRPERM(((struct ctl_cb *)so->so_pcb)->mtx),
1521 so->so_usecount, (uint64_t)VM_KERNEL_ADDRPERM(lr_saved));
91447636
A
1522#endif
1523 if (refcount)
1524 so->so_usecount--;
b0d623f7
A
1525
1526 if (so->so_usecount < 0) {
fe8ab488 1527 panic("ctl_unlock: so=%p usecount=%x lrh= %s\n",
b0d623f7
A
1528 so, so->so_usecount, solockhistory_nr(so));
1529 /* NOTREACHED */
1530 }
91447636 1531 if (so->so_pcb == NULL) {
fe8ab488
A
1532 panic("ctl_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s\n",
1533 so, so->so_usecount, (void *)lr_saved,
1534 solockhistory_nr(so));
b0d623f7 1535 /* NOTREACHED */
91447636 1536 }
b0d623f7
A
1537 mutex_held = ((struct ctl_cb *)so->so_pcb)->mtx;
1538
91447636 1539 lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
2d21ac55 1540 so->unlock_lr[so->next_unlock_lr] = lr_saved;
0c530ab8 1541 so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
91447636 1542 lck_mtx_unlock(mutex_held);
b0d623f7 1543
91447636
A
1544 if (so->so_usecount == 0)
1545 ctl_sofreelastref(so);
b0d623f7 1546
91447636
A
1547 return (0);
1548}
1549
1550static lck_mtx_t *
fe8ab488 1551ctl_getlock(struct socket *so, int locktype)
91447636 1552{
fe8ab488 1553#pragma unused(locktype)
91447636 1554 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
fe8ab488 1555
91447636
A
1556 if (so->so_pcb) {
1557 if (so->so_usecount < 0)
fe8ab488 1558 panic("ctl_getlock: so=%p usecount=%x lrh= %s\n",
b0d623f7 1559 so, so->so_usecount, solockhistory_nr(so));
fe8ab488 1560 return (kcb->mtx);
91447636 1561 } else {
fe8ab488 1562 panic("ctl_getlock: so=%p NULL NO so_pcb %s\n",
b0d623f7 1563 so, solockhistory_nr(so));
91447636
A
1564 return (so->so_proto->pr_domain->dom_mtx);
1565 }
1566}
fe8ab488
A
1567
1568__private_extern__ int
1569kctl_reg_list SYSCTL_HANDLER_ARGS
1570{
1571#pragma unused(oidp, arg1, arg2)
1572 int error = 0;
1573 int n, i;
1574 struct xsystmgen xsg;
1575 void *buf = NULL;
1576 struct kctl *kctl;
1577 size_t item_size = ROUNDUP64(sizeof (struct xkctl_reg));
1578
1579 buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO);
1580 if (buf == NULL)
1581 return (ENOMEM);
1582
1583 lck_mtx_lock(ctl_mtx);
1584
1585 n = kctlstat.kcs_reg_count;
1586
1587 if (req->oldptr == USER_ADDR_NULL) {
1588 req->oldidx = (n + n/8) * sizeof(struct xkctl_reg);
1589 goto done;
1590 }
1591 if (req->newptr != USER_ADDR_NULL) {
1592 error = EPERM;
1593 goto done;
1594 }
1595 bzero(&xsg, sizeof (xsg));
1596 xsg.xg_len = sizeof (xsg);
1597 xsg.xg_count = n;
1598 xsg.xg_gen = kctlstat.kcs_gencnt;
1599 xsg.xg_sogen = so_gencnt;
1600 error = SYSCTL_OUT(req, &xsg, sizeof (xsg));
1601 if (error) {
1602 goto done;
1603 }
1604 /*
1605 * We are done if there is no pcb
1606 */
1607 if (n == 0) {
1608 goto done;
1609 }
1610
1611 i = 0;
1612 for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
1613 i < n && kctl != NULL;
1614 i++, kctl = TAILQ_NEXT(kctl, next)) {
1615 struct xkctl_reg *xkr = (struct xkctl_reg *)buf;
1616 struct ctl_cb *kcb;
1617 u_int32_t pcbcount = 0;
1618
1619 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
1620 pcbcount++;
1621
1622 bzero(buf, item_size);
1623
1624 xkr->xkr_len = sizeof(struct xkctl_reg);
1625 xkr->xkr_kind = XSO_KCREG;
1626 xkr->xkr_id = kctl->id;
1627 xkr->xkr_reg_unit = kctl->reg_unit;
1628 xkr->xkr_flags = kctl->flags;
1629 xkr->xkr_kctlref = (uint64_t)VM_KERNEL_ADDRPERM(kctl);
1630 xkr->xkr_recvbufsize = kctl->recvbufsize;
1631 xkr->xkr_sendbufsize = kctl->sendbufsize;
1632 xkr->xkr_lastunit = kctl->lastunit;
1633 xkr->xkr_pcbcount = pcbcount;
1634 xkr->xkr_connect = (uint64_t)VM_KERNEL_ADDRPERM(kctl->connect);
1635 xkr->xkr_disconnect =
1636 (uint64_t)VM_KERNEL_ADDRPERM(kctl->disconnect);
1637 xkr->xkr_send = (uint64_t)VM_KERNEL_ADDRPERM(kctl->send);
1638 xkr->xkr_send_list =
1639 (uint64_t)VM_KERNEL_ADDRPERM(kctl->send_list);
1640 xkr->xkr_setopt = (uint64_t)VM_KERNEL_ADDRPERM(kctl->setopt);
1641 xkr->xkr_getopt = (uint64_t)VM_KERNEL_ADDRPERM(kctl->getopt);
1642 xkr->xkr_rcvd = (uint64_t)VM_KERNEL_ADDRPERM(kctl->rcvd);
1643 strlcpy(xkr->xkr_name, kctl->name, sizeof(xkr->xkr_name));
1644
1645 error = SYSCTL_OUT(req, buf, item_size);
1646 }
1647
1648 if (error == 0) {
1649 /*
1650 * Give the user an updated idea of our state.
1651 * If the generation differs from what we told
1652 * her before, she knows that something happened
1653 * while we were processing this request, and it
1654 * might be necessary to retry.
1655 */
1656 bzero(&xsg, sizeof (xsg));
1657 xsg.xg_len = sizeof (xsg);
1658 xsg.xg_count = n;
1659 xsg.xg_gen = kctlstat.kcs_gencnt;
1660 xsg.xg_sogen = so_gencnt;
1661 error = SYSCTL_OUT(req, &xsg, sizeof (xsg));
1662 if (error) {
1663 goto done;
1664 }
1665 }
1666
1667done:
1668 lck_mtx_unlock(ctl_mtx);
1669
1670 if (buf != NULL)
1671 FREE(buf, M_TEMP);
1672
1673 return (error);
1674}
1675
1676__private_extern__ int
1677kctl_pcblist SYSCTL_HANDLER_ARGS
1678{
1679#pragma unused(oidp, arg1, arg2)
1680 int error = 0;
1681 int n, i;
1682 struct xsystmgen xsg;
1683 void *buf = NULL;
1684 struct kctl *kctl;
1685 size_t item_size = ROUNDUP64(sizeof (struct xkctlpcb)) +
1686 ROUNDUP64(sizeof (struct xsocket_n)) +
1687 2 * ROUNDUP64(sizeof (struct xsockbuf_n)) +
1688 ROUNDUP64(sizeof (struct xsockstat_n));
1689
1690 buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO);
1691 if (buf == NULL)
1692 return (ENOMEM);
1693
1694 lck_mtx_lock(ctl_mtx);
1695
1696 n = kctlstat.kcs_pcbcount;
1697
1698 if (req->oldptr == USER_ADDR_NULL) {
1699 req->oldidx = (n + n/8) * item_size;
1700 goto done;
1701 }
1702 if (req->newptr != USER_ADDR_NULL) {
1703 error = EPERM;
1704 goto done;
1705 }
1706 bzero(&xsg, sizeof (xsg));
1707 xsg.xg_len = sizeof (xsg);
1708 xsg.xg_count = n;
1709 xsg.xg_gen = kctlstat.kcs_gencnt;
1710 xsg.xg_sogen = so_gencnt;
1711 error = SYSCTL_OUT(req, &xsg, sizeof (xsg));
1712 if (error) {
1713 goto done;
1714 }
1715 /*
1716 * We are done if there is no pcb
1717 */
1718 if (n == 0) {
1719 goto done;
1720 }
1721
1722 i = 0;
1723 for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
1724 i < n && kctl != NULL;
1725 kctl = TAILQ_NEXT(kctl, next)) {
1726 struct ctl_cb *kcb;
1727
1728 for (kcb = TAILQ_FIRST(&kctl->kcb_head);
1729 i < n && kcb != NULL;
1730 i++, kcb = TAILQ_NEXT(kcb, next)) {
1731 struct xkctlpcb *xk = (struct xkctlpcb *)buf;
1732 struct xsocket_n *xso = (struct xsocket_n *)
1733 ADVANCE64(xk, sizeof (*xk));
1734 struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *)
1735 ADVANCE64(xso, sizeof (*xso));
1736 struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *)
1737 ADVANCE64(xsbrcv, sizeof (*xsbrcv));
1738 struct xsockstat_n *xsostats = (struct xsockstat_n *)
1739 ADVANCE64(xsbsnd, sizeof (*xsbsnd));
1740
1741 bzero(buf, item_size);
1742
1743 xk->xkp_len = sizeof(struct xkctlpcb);
1744 xk->xkp_kind = XSO_KCB;
1745 xk->xkp_unit = kcb->unit;
1746 xk->xkp_kctpcb = (uint64_t)VM_KERNEL_ADDRPERM(kcb);
1747 xk->xkp_kctlref = (uint64_t)VM_KERNEL_ADDRPERM(kctl);
1748 xk->xkp_kctlid = kctl->id;
1749 strlcpy(xk->xkp_kctlname, kctl->name,
1750 sizeof(xk->xkp_kctlname));
1751
1752 sotoxsocket_n(kcb->so, xso);
1753 sbtoxsockbuf_n(kcb->so ?
1754 &kcb->so->so_rcv : NULL, xsbrcv);
1755 sbtoxsockbuf_n(kcb->so ?
1756 &kcb->so->so_snd : NULL, xsbsnd);
1757 sbtoxsockstat_n(kcb->so, xsostats);
1758
1759 error = SYSCTL_OUT(req, buf, item_size);
1760 }
1761 }
1762
1763 if (error == 0) {
1764 /*
1765 * Give the user an updated idea of our state.
1766 * If the generation differs from what we told
1767 * her before, she knows that something happened
1768 * while we were processing this request, and it
1769 * might be necessary to retry.
1770 */
1771 bzero(&xsg, sizeof (xsg));
1772 xsg.xg_len = sizeof (xsg);
1773 xsg.xg_count = n;
1774 xsg.xg_gen = kctlstat.kcs_gencnt;
1775 xsg.xg_sogen = so_gencnt;
1776 error = SYSCTL_OUT(req, &xsg, sizeof (xsg));
1777 if (error) {
1778 goto done;
1779 }
1780 }
1781
1782done:
1783 lck_mtx_unlock(ctl_mtx);
1784
1785 return (error);
1786}
1787
1788int
1789kctl_getstat SYSCTL_HANDLER_ARGS
1790{
1791#pragma unused(oidp, arg1, arg2)
1792 int error = 0;
1793
1794 lck_mtx_lock(ctl_mtx);
1795
1796 if (req->newptr != USER_ADDR_NULL) {
1797 error = EPERM;
1798 goto done;
1799 }
1800 if (req->oldptr == USER_ADDR_NULL) {
1801 req->oldidx = sizeof(struct kctlstat);
1802 goto done;
1803 }
1804
1805 error = SYSCTL_OUT(req, &kctlstat,
1806 MIN(sizeof(struct kctlstat), req->oldlen));
1807done:
1808 lck_mtx_unlock(ctl_mtx);
1809 return (error);
1810}