]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/ntstat.c
14f52353597f8e8979208da5e2a93dfb440b6039
[apple/xnu.git] / bsd / net / ntstat.c
1 /*
2 * Copyright (c) 2010-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/param.h>
30 #include <sys/types.h>
31 #include <sys/kpi_mbuf.h>
32 #include <sys/socket.h>
33 #include <sys/kern_control.h>
34 #include <sys/mcache.h>
35 #include <sys/socketvar.h>
36 #include <sys/sysctl.h>
37 #include <sys/queue.h>
38 #include <sys/priv.h>
39 #include <sys/protosw.h>
40
41 #include <kern/clock.h>
42 #include <kern/debug.h>
43
44 #include <libkern/libkern.h>
45 #include <libkern/OSMalloc.h>
46 #include <libkern/OSAtomic.h>
47 #include <libkern/locks.h>
48
49 #include <net/if.h>
50 #include <net/if_var.h>
51 #include <net/if_types.h>
52 #include <net/route.h>
53 #include <net/ntstat.h>
54
55 #include <netinet/ip_var.h>
56 #include <netinet/in_pcb.h>
57 #include <netinet/in_var.h>
58 #include <netinet/tcp.h>
59 #include <netinet/tcp_var.h>
60 #include <netinet/tcp_fsm.h>
61 #include <netinet/tcp_cc.h>
62 #include <netinet/udp.h>
63 #include <netinet/udp_var.h>
64 #include <netinet6/in6_pcb.h>
65 #include <netinet6/in6_var.h>
66
67 __private_extern__ int nstat_collect = 1;
68 SYSCTL_INT(_net, OID_AUTO, statistics, CTLFLAG_RW | CTLFLAG_LOCKED,
69 &nstat_collect, 0, "Collect detailed statistics");
70
71 static int nstat_privcheck = 0;
72 SYSCTL_INT(_net, OID_AUTO, statistics_privcheck, CTLFLAG_RW | CTLFLAG_LOCKED,
73 &nstat_privcheck, 0, "Entitlement check");
74
75 SYSCTL_NODE(_net, OID_AUTO, stats,
76 CTLFLAG_RW|CTLFLAG_LOCKED, 0, "network statistics");
77
78 static int nstat_debug = 0;
79 SYSCTL_INT(_net_stats, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED,
80 &nstat_debug, 0, "");
81
82 static int nstat_sendspace = 2048;
83 SYSCTL_INT(_net_stats, OID_AUTO, sendspace, CTLFLAG_RW | CTLFLAG_LOCKED,
84 &nstat_sendspace, 0, "");
85
86 static int nstat_recvspace = 8192;
87 SYSCTL_INT(_net_stats, OID_AUTO, recvspace, CTLFLAG_RW | CTLFLAG_LOCKED,
88 &nstat_recvspace, 0, "");
89
90 static struct nstat_stats nstat_stats;
91 SYSCTL_STRUCT(_net_stats, OID_AUTO, stats, CTLFLAG_RD | CTLFLAG_LOCKED,
92 &nstat_stats, nstat_stats, "");
93
94 enum
95 {
96 NSTAT_FLAG_CLEANUP = (1 << 0),
97 NSTAT_FLAG_REQCOUNTS = (1 << 1),
98 NSTAT_FLAG_SUPPORTS_UPDATES = (1 << 2),
99 NSTAT_FLAG_SYSINFO_SUBSCRIBED = (1 << 3),
100 };
101
102 #define QUERY_CONTINUATION_SRC_COUNT 100
103
104 typedef struct nstat_provider_filter
105 {
106 u_int64_t npf_flags;
107 u_int64_t npf_events;
108 pid_t npf_pid;
109 uuid_t npf_uuid;
110 } nstat_provider_filter;
111
112
113 typedef struct nstat_control_state
114 {
115 struct nstat_control_state *ncs_next;
116 u_int32_t ncs_watching;
117 decl_lck_mtx_data(, mtx);
118 kern_ctl_ref ncs_kctl;
119 u_int32_t ncs_unit;
120 nstat_src_ref_t ncs_next_srcref;
121 struct nstat_src *ncs_srcs;
122 mbuf_t ncs_accumulated;
123 u_int32_t ncs_flags;
124 nstat_provider_filter ncs_provider_filters[NSTAT_PROVIDER_COUNT];
125 /* state maintained for partial query requests */
126 u_int64_t ncs_context;
127 u_int64_t ncs_seq;
128 } nstat_control_state;
129
130 typedef struct nstat_provider
131 {
132 struct nstat_provider *next;
133 nstat_provider_id_t nstat_provider_id;
134 size_t nstat_descriptor_length;
135 errno_t (*nstat_lookup)(const void *data, u_int32_t length, nstat_provider_cookie_t *out_cookie);
136 int (*nstat_gone)(nstat_provider_cookie_t cookie);
137 errno_t (*nstat_counts)(nstat_provider_cookie_t cookie, struct nstat_counts *out_counts, int *out_gone);
138 errno_t (*nstat_watcher_add)(nstat_control_state *state);
139 void (*nstat_watcher_remove)(nstat_control_state *state);
140 errno_t (*nstat_copy_descriptor)(nstat_provider_cookie_t cookie, void *data, u_int32_t len);
141 void (*nstat_release)(nstat_provider_cookie_t cookie, boolean_t locked);
142 bool (*nstat_reporting_allowed)(nstat_provider_cookie_t cookie, nstat_provider_filter *filter);
143 } nstat_provider;
144
145 typedef STAILQ_HEAD(, nstat_src) stailq_head_nstat_src;
146 typedef STAILQ_ENTRY(nstat_src) stailq_entry_nstat_src;
147
148 typedef TAILQ_HEAD(, nstat_tu_shadow) tailq_head_tu_shadow;
149 typedef TAILQ_ENTRY(nstat_tu_shadow) tailq_entry_tu_shadow;
150
151 typedef struct nstat_src
152 {
153 struct nstat_src *next;
154 nstat_src_ref_t srcref;
155 nstat_provider *provider;
156 nstat_provider_cookie_t cookie;
157 uint32_t filter;
158 uint64_t seq;
159 } nstat_src;
160
161 static errno_t nstat_control_send_counts(nstat_control_state *,
162 nstat_src *, unsigned long long, u_int16_t, int *);
163 static int nstat_control_send_description(nstat_control_state *state, nstat_src *src, u_int64_t context, u_int16_t hdr_flags);
164 static int nstat_control_send_update(nstat_control_state *state, nstat_src *src, u_int64_t context, u_int16_t hdr_flags, int *gone);
165 static errno_t nstat_control_send_removed(nstat_control_state *, nstat_src *);
166 static errno_t nstat_control_send_goodbye(nstat_control_state *state, nstat_src *src);
167 static void nstat_control_cleanup_source(nstat_control_state *state, nstat_src *src, boolean_t);
168 static bool nstat_control_reporting_allowed(nstat_control_state *state, nstat_src *src);
169 static boolean_t nstat_control_begin_query(nstat_control_state *state, const nstat_msg_hdr *hdrp);
170 static u_int16_t nstat_control_end_query(nstat_control_state *state, nstat_src *last_src, boolean_t partial);
171 static void nstat_ifnet_report_ecn_stats(void);
172
173 static u_int32_t nstat_udp_watchers = 0;
174 static u_int32_t nstat_userland_udp_watchers = 0;
175 static u_int32_t nstat_tcp_watchers = 0;
176 static u_int32_t nstat_userland_tcp_watchers = 0;
177
178 static void nstat_control_register(void);
179
180 /*
181 * The lock order is as follows:
182 *
183 * socket_lock (inpcb)
184 * nstat_mtx
185 * state->mtx
186 */
187 static volatile OSMallocTag nstat_malloc_tag = NULL;
188 static nstat_control_state *nstat_controls = NULL;
189 static uint64_t nstat_idle_time = 0;
190 static decl_lck_mtx_data(, nstat_mtx);
191
192 /* some extern definitions */
193 extern void mbuf_report_peak_usage(void);
194 extern void tcp_report_stats(void);
195
196 static void
197 nstat_copy_sa_out(
198 const struct sockaddr *src,
199 struct sockaddr *dst,
200 int maxlen)
201 {
202 if (src->sa_len > maxlen) return;
203
204 bcopy(src, dst, src->sa_len);
205 if (src->sa_family == AF_INET6 &&
206 src->sa_len >= sizeof(struct sockaddr_in6))
207 {
208 struct sockaddr_in6 *sin6 = (struct sockaddr_in6*)(void *)dst;
209 if (IN6_IS_SCOPE_EMBED(&sin6->sin6_addr))
210 {
211 if (sin6->sin6_scope_id == 0)
212 sin6->sin6_scope_id = ntohs(sin6->sin6_addr.s6_addr16[1]);
213 sin6->sin6_addr.s6_addr16[1] = 0;
214 }
215 }
216 }
217
218 static void
219 nstat_ip_to_sockaddr(
220 const struct in_addr *ip,
221 u_int16_t port,
222 struct sockaddr_in *sin,
223 u_int32_t maxlen)
224 {
225 if (maxlen < sizeof(struct sockaddr_in))
226 return;
227
228 sin->sin_family = AF_INET;
229 sin->sin_len = sizeof(*sin);
230 sin->sin_port = port;
231 sin->sin_addr = *ip;
232 }
233
234 static void
235 nstat_ip6_to_sockaddr(
236 const struct in6_addr *ip6,
237 u_int16_t port,
238 struct sockaddr_in6 *sin6,
239 u_int32_t maxlen)
240 {
241 if (maxlen < sizeof(struct sockaddr_in6))
242 return;
243
244 sin6->sin6_family = AF_INET6;
245 sin6->sin6_len = sizeof(*sin6);
246 sin6->sin6_port = port;
247 sin6->sin6_addr = *ip6;
248 if (IN6_IS_SCOPE_EMBED(&sin6->sin6_addr))
249 {
250 sin6->sin6_scope_id = ntohs(sin6->sin6_addr.s6_addr16[1]);
251 sin6->sin6_addr.s6_addr16[1] = 0;
252 }
253 }
254
255 static u_int16_t
256 nstat_ifnet_to_flags(
257 struct ifnet *ifp)
258 {
259 u_int16_t flags = 0;
260 u_int32_t functional_type = if_functional_type(ifp, FALSE);
261
262 /* Panic if someone adds a functional type without updating ntstat. */
263 VERIFY(0 <= functional_type && functional_type <= IFRTYPE_FUNCTIONAL_LAST);
264
265 switch (functional_type)
266 {
267 case IFRTYPE_FUNCTIONAL_UNKNOWN:
268 flags |= NSTAT_IFNET_IS_UNKNOWN_TYPE;
269 break;
270 case IFRTYPE_FUNCTIONAL_LOOPBACK:
271 flags |= NSTAT_IFNET_IS_LOOPBACK;
272 break;
273 case IFRTYPE_FUNCTIONAL_WIRED:
274 case IFRTYPE_FUNCTIONAL_INTCOPROC:
275 flags |= NSTAT_IFNET_IS_WIRED;
276 break;
277 case IFRTYPE_FUNCTIONAL_WIFI_INFRA:
278 flags |= NSTAT_IFNET_IS_WIFI;
279 break;
280 case IFRTYPE_FUNCTIONAL_WIFI_AWDL:
281 flags |= NSTAT_IFNET_IS_WIFI;
282 flags |= NSTAT_IFNET_IS_AWDL;
283 break;
284 case IFRTYPE_FUNCTIONAL_CELLULAR:
285 flags |= NSTAT_IFNET_IS_CELLULAR;
286 break;
287 }
288
289 if (IFNET_IS_EXPENSIVE(ifp))
290 {
291 flags |= NSTAT_IFNET_IS_EXPENSIVE;
292 }
293
294 return flags;
295 }
296
297 static u_int16_t
298 nstat_inpcb_to_flags(
299 const struct inpcb *inp)
300 {
301 u_int16_t flags = 0;
302
303 if ((inp != NULL ) && (inp->inp_last_outifp != NULL))
304 {
305 struct ifnet *ifp = inp->inp_last_outifp;
306 flags = nstat_ifnet_to_flags(ifp);
307
308 if (flags & NSTAT_IFNET_IS_CELLULAR)
309 {
310 if (inp->inp_socket != NULL &&
311 (inp->inp_socket->so_flags1 & SOF1_CELLFALLBACK))
312 flags |= NSTAT_IFNET_VIA_CELLFALLBACK;
313 }
314 }
315 else
316 {
317 flags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
318 }
319
320 return flags;
321 }
322
323 #pragma mark -- Network Statistic Providers --
324
325 static errno_t nstat_control_source_add(u_int64_t context, nstat_control_state *state, nstat_provider *provider, nstat_provider_cookie_t cookie);
326 struct nstat_provider *nstat_providers = NULL;
327
328 static struct nstat_provider*
329 nstat_find_provider_by_id(
330 nstat_provider_id_t id)
331 {
332 struct nstat_provider *provider;
333
334 for (provider = nstat_providers; provider != NULL; provider = provider->next)
335 {
336 if (provider->nstat_provider_id == id)
337 break;
338 }
339
340 return provider;
341 }
342
343 static errno_t
344 nstat_lookup_entry(
345 nstat_provider_id_t id,
346 const void *data,
347 u_int32_t length,
348 nstat_provider **out_provider,
349 nstat_provider_cookie_t *out_cookie)
350 {
351 *out_provider = nstat_find_provider_by_id(id);
352 if (*out_provider == NULL)
353 {
354 return ENOENT;
355 }
356
357 return (*out_provider)->nstat_lookup(data, length, out_cookie);
358 }
359
360 static void nstat_init_route_provider(void);
361 static void nstat_init_tcp_provider(void);
362 static void nstat_init_userland_tcp_provider(void);
363 static void nstat_init_udp_provider(void);
364 static void nstat_init_userland_udp_provider(void);
365 static void nstat_init_ifnet_provider(void);
366
367 __private_extern__ void
368 nstat_init(void)
369 {
370 if (nstat_malloc_tag != NULL) return;
371
372 OSMallocTag tag = OSMalloc_Tagalloc(NET_STAT_CONTROL_NAME, OSMT_DEFAULT);
373 if (!OSCompareAndSwapPtr(NULL, tag, &nstat_malloc_tag))
374 {
375 OSMalloc_Tagfree(tag);
376 tag = nstat_malloc_tag;
377 }
378 else
379 {
380 // we need to initialize other things, we do it here as this code path will only be hit once;
381 nstat_init_route_provider();
382 nstat_init_tcp_provider();
383 nstat_init_userland_tcp_provider();
384 nstat_init_udp_provider();
385 nstat_init_userland_udp_provider();
386 nstat_init_ifnet_provider();
387 nstat_control_register();
388 }
389 }
390
391 #pragma mark -- Aligned Buffer Allocation --
392
393 struct align_header
394 {
395 u_int32_t offset;
396 u_int32_t length;
397 };
398
399 static void*
400 nstat_malloc_aligned(
401 u_int32_t length,
402 u_int8_t alignment,
403 OSMallocTag tag)
404 {
405 struct align_header *hdr = NULL;
406 u_int32_t size = length + sizeof(*hdr) + alignment - 1;
407
408 u_int8_t *buffer = OSMalloc(size, tag);
409 if (buffer == NULL) return NULL;
410
411 u_int8_t *aligned = buffer + sizeof(*hdr);
412 aligned = (u_int8_t*)P2ROUNDUP(aligned, alignment);
413
414 hdr = (struct align_header*)(void *)(aligned - sizeof(*hdr));
415 hdr->offset = aligned - buffer;
416 hdr->length = size;
417
418 return aligned;
419 }
420
421 static void
422 nstat_free_aligned(
423 void *buffer,
424 OSMallocTag tag)
425 {
426 struct align_header *hdr = (struct align_header*)(void *)((u_int8_t*)buffer - sizeof(*hdr));
427 OSFree(((char*)buffer) - hdr->offset, hdr->length, tag);
428 }
429
430 #pragma mark -- Route Provider --
431
432 static nstat_provider nstat_route_provider;
433
434 static errno_t
435 nstat_route_lookup(
436 const void *data,
437 u_int32_t length,
438 nstat_provider_cookie_t *out_cookie)
439 {
440 // rt_lookup doesn't take const params but it doesn't modify the parameters for
441 // the lookup. So...we use a union to eliminate the warning.
442 union
443 {
444 struct sockaddr *sa;
445 const struct sockaddr *const_sa;
446 } dst, mask;
447
448 const nstat_route_add_param *param = (const nstat_route_add_param*)data;
449 *out_cookie = NULL;
450
451 if (length < sizeof(*param))
452 {
453 return EINVAL;
454 }
455
456 if (param->dst.v4.sin_family == 0 ||
457 param->dst.v4.sin_family > AF_MAX ||
458 (param->mask.v4.sin_family != 0 && param->mask.v4.sin_family != param->dst.v4.sin_family))
459 {
460 return EINVAL;
461 }
462
463 if (param->dst.v4.sin_len > sizeof(param->dst) ||
464 (param->mask.v4.sin_family && param->mask.v4.sin_len > sizeof(param->mask.v4.sin_len)))
465 {
466 return EINVAL;
467 }
468 if ((param->dst.v4.sin_family == AF_INET &&
469 param->dst.v4.sin_len < sizeof(struct sockaddr_in)) ||
470 (param->dst.v6.sin6_family == AF_INET6 &&
471 param->dst.v6.sin6_len < sizeof(struct sockaddr_in6)))
472 {
473 return EINVAL;
474 }
475
476 dst.const_sa = (const struct sockaddr*)&param->dst;
477 mask.const_sa = param->mask.v4.sin_family ? (const struct sockaddr*)&param->mask : NULL;
478
479 struct radix_node_head *rnh = rt_tables[dst.sa->sa_family];
480 if (rnh == NULL) return EAFNOSUPPORT;
481
482 lck_mtx_lock(rnh_lock);
483 struct rtentry *rt = rt_lookup(TRUE, dst.sa, mask.sa, rnh, param->ifindex);
484 lck_mtx_unlock(rnh_lock);
485
486 if (rt) *out_cookie = (nstat_provider_cookie_t)rt;
487
488 return rt ? 0 : ENOENT;
489 }
490
491 static int
492 nstat_route_gone(
493 nstat_provider_cookie_t cookie)
494 {
495 struct rtentry *rt = (struct rtentry*)cookie;
496 return ((rt->rt_flags & RTF_UP) == 0) ? 1 : 0;
497 }
498
499 static errno_t
500 nstat_route_counts(
501 nstat_provider_cookie_t cookie,
502 struct nstat_counts *out_counts,
503 int *out_gone)
504 {
505 struct rtentry *rt = (struct rtentry*)cookie;
506 struct nstat_counts *rt_stats = rt->rt_stats;
507
508 if (out_gone) *out_gone = 0;
509
510 if (out_gone && (rt->rt_flags & RTF_UP) == 0) *out_gone = 1;
511
512 if (rt_stats)
513 {
514 atomic_get_64(out_counts->nstat_rxpackets, &rt_stats->nstat_rxpackets);
515 atomic_get_64(out_counts->nstat_rxbytes, &rt_stats->nstat_rxbytes);
516 atomic_get_64(out_counts->nstat_txpackets, &rt_stats->nstat_txpackets);
517 atomic_get_64(out_counts->nstat_txbytes, &rt_stats->nstat_txbytes);
518 out_counts->nstat_rxduplicatebytes = rt_stats->nstat_rxduplicatebytes;
519 out_counts->nstat_rxoutoforderbytes = rt_stats->nstat_rxoutoforderbytes;
520 out_counts->nstat_txretransmit = rt_stats->nstat_txretransmit;
521 out_counts->nstat_connectattempts = rt_stats->nstat_connectattempts;
522 out_counts->nstat_connectsuccesses = rt_stats->nstat_connectsuccesses;
523 out_counts->nstat_min_rtt = rt_stats->nstat_min_rtt;
524 out_counts->nstat_avg_rtt = rt_stats->nstat_avg_rtt;
525 out_counts->nstat_var_rtt = rt_stats->nstat_var_rtt;
526 out_counts->nstat_cell_rxbytes = out_counts->nstat_cell_txbytes = 0;
527 }
528 else
529 {
530 bzero(out_counts, sizeof(*out_counts));
531 }
532
533 return 0;
534 }
535
536 static void
537 nstat_route_release(
538 nstat_provider_cookie_t cookie,
539 __unused int locked)
540 {
541 rtfree((struct rtentry*)cookie);
542 }
543
544 static u_int32_t nstat_route_watchers = 0;
545
546 static int
547 nstat_route_walktree_add(
548 struct radix_node *rn,
549 void *context)
550 {
551 errno_t result = 0;
552 struct rtentry *rt = (struct rtentry *)rn;
553 nstat_control_state *state = (nstat_control_state*)context;
554
555 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
556
557 /* RTF_UP can't change while rnh_lock is held */
558 if ((rt->rt_flags & RTF_UP) != 0)
559 {
560 /* Clear RTPRF_OURS if the route is still usable */
561 RT_LOCK(rt);
562 if (rt_validate(rt)) {
563 RT_ADDREF_LOCKED(rt);
564 RT_UNLOCK(rt);
565 } else {
566 RT_UNLOCK(rt);
567 rt = NULL;
568 }
569
570 /* Otherwise if RTF_CONDEMNED, treat it as if it were down */
571 if (rt == NULL)
572 return (0);
573
574 result = nstat_control_source_add(0, state, &nstat_route_provider, rt);
575 if (result != 0)
576 rtfree_locked(rt);
577 }
578
579 return result;
580 }
581
582 static errno_t
583 nstat_route_add_watcher(
584 nstat_control_state *state)
585 {
586 int i;
587 errno_t result = 0;
588 OSIncrementAtomic(&nstat_route_watchers);
589
590 lck_mtx_lock(rnh_lock);
591 for (i = 1; i < AF_MAX; i++)
592 {
593 struct radix_node_head *rnh;
594 rnh = rt_tables[i];
595 if (!rnh) continue;
596
597 result = rnh->rnh_walktree(rnh, nstat_route_walktree_add, state);
598 if (result != 0)
599 {
600 break;
601 }
602 }
603 lck_mtx_unlock(rnh_lock);
604
605 return result;
606 }
607
608 __private_extern__ void
609 nstat_route_new_entry(
610 struct rtentry *rt)
611 {
612 if (nstat_route_watchers == 0)
613 return;
614
615 lck_mtx_lock(&nstat_mtx);
616 if ((rt->rt_flags & RTF_UP) != 0)
617 {
618 nstat_control_state *state;
619 for (state = nstat_controls; state; state = state->ncs_next)
620 {
621 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_ROUTE)) != 0)
622 {
623 // this client is watching routes
624 // acquire a reference for the route
625 RT_ADDREF(rt);
626
627 // add the source, if that fails, release the reference
628 if (nstat_control_source_add(0, state, &nstat_route_provider, rt) != 0)
629 RT_REMREF(rt);
630 }
631 }
632 }
633 lck_mtx_unlock(&nstat_mtx);
634 }
635
636 static void
637 nstat_route_remove_watcher(
638 __unused nstat_control_state *state)
639 {
640 OSDecrementAtomic(&nstat_route_watchers);
641 }
642
643 static errno_t
644 nstat_route_copy_descriptor(
645 nstat_provider_cookie_t cookie,
646 void *data,
647 u_int32_t len)
648 {
649 nstat_route_descriptor *desc = (nstat_route_descriptor*)data;
650 if (len < sizeof(*desc))
651 {
652 return EINVAL;
653 }
654 bzero(desc, sizeof(*desc));
655
656 struct rtentry *rt = (struct rtentry*)cookie;
657 desc->id = (uint64_t)VM_KERNEL_ADDRPERM(rt);
658 desc->parent_id = (uint64_t)VM_KERNEL_ADDRPERM(rt->rt_parent);
659 desc->gateway_id = (uint64_t)VM_KERNEL_ADDRPERM(rt->rt_gwroute);
660
661
662 // key/dest
663 struct sockaddr *sa;
664 if ((sa = rt_key(rt)))
665 nstat_copy_sa_out(sa, &desc->dst.sa, sizeof(desc->dst));
666
667 // mask
668 if ((sa = rt_mask(rt)) && sa->sa_len <= sizeof(desc->mask))
669 memcpy(&desc->mask, sa, sa->sa_len);
670
671 // gateway
672 if ((sa = rt->rt_gateway))
673 nstat_copy_sa_out(sa, &desc->gateway.sa, sizeof(desc->gateway));
674
675 if (rt->rt_ifp)
676 desc->ifindex = rt->rt_ifp->if_index;
677
678 desc->flags = rt->rt_flags;
679
680 return 0;
681 }
682
683 static bool
684 nstat_route_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter)
685 {
686 bool retval = true;
687
688 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0)
689 {
690 struct rtentry *rt = (struct rtentry*)cookie;
691 struct ifnet *ifp = rt->rt_ifp;
692
693 if (ifp)
694 {
695 uint16_t interface_properties = nstat_ifnet_to_flags(ifp);
696
697 if ((filter->npf_flags & interface_properties) == 0)
698 {
699 retval = false;
700 }
701 }
702 }
703 return retval;
704 }
705
706 static void
707 nstat_init_route_provider(void)
708 {
709 bzero(&nstat_route_provider, sizeof(nstat_route_provider));
710 nstat_route_provider.nstat_descriptor_length = sizeof(nstat_route_descriptor);
711 nstat_route_provider.nstat_provider_id = NSTAT_PROVIDER_ROUTE;
712 nstat_route_provider.nstat_lookup = nstat_route_lookup;
713 nstat_route_provider.nstat_gone = nstat_route_gone;
714 nstat_route_provider.nstat_counts = nstat_route_counts;
715 nstat_route_provider.nstat_release = nstat_route_release;
716 nstat_route_provider.nstat_watcher_add = nstat_route_add_watcher;
717 nstat_route_provider.nstat_watcher_remove = nstat_route_remove_watcher;
718 nstat_route_provider.nstat_copy_descriptor = nstat_route_copy_descriptor;
719 nstat_route_provider.nstat_reporting_allowed = nstat_route_reporting_allowed;
720 nstat_route_provider.next = nstat_providers;
721 nstat_providers = &nstat_route_provider;
722 }
723
724 #pragma mark -- Route Collection --
725
726 static struct nstat_counts*
727 nstat_route_attach(
728 struct rtentry *rte)
729 {
730 struct nstat_counts *result = rte->rt_stats;
731 if (result) return result;
732
733 if (nstat_malloc_tag == NULL) nstat_init();
734
735 result = nstat_malloc_aligned(sizeof(*result), sizeof(u_int64_t), nstat_malloc_tag);
736 if (!result) return result;
737
738 bzero(result, sizeof(*result));
739
740 if (!OSCompareAndSwapPtr(NULL, result, &rte->rt_stats))
741 {
742 nstat_free_aligned(result, nstat_malloc_tag);
743 result = rte->rt_stats;
744 }
745
746 return result;
747 }
748
749 __private_extern__ void
750 nstat_route_detach(
751 struct rtentry *rte)
752 {
753 if (rte->rt_stats)
754 {
755 nstat_free_aligned(rte->rt_stats, nstat_malloc_tag);
756 rte->rt_stats = NULL;
757 }
758 }
759
760 __private_extern__ void
761 nstat_route_connect_attempt(
762 struct rtentry *rte)
763 {
764 while (rte)
765 {
766 struct nstat_counts* stats = nstat_route_attach(rte);
767 if (stats)
768 {
769 OSIncrementAtomic(&stats->nstat_connectattempts);
770 }
771
772 rte = rte->rt_parent;
773 }
774 }
775
776 __private_extern__ void
777 nstat_route_connect_success(
778 struct rtentry *rte)
779 {
780 // This route
781 while (rte)
782 {
783 struct nstat_counts* stats = nstat_route_attach(rte);
784 if (stats)
785 {
786 OSIncrementAtomic(&stats->nstat_connectsuccesses);
787 }
788
789 rte = rte->rt_parent;
790 }
791 }
792
793 __private_extern__ void
794 nstat_route_tx(
795 struct rtentry *rte,
796 u_int32_t packets,
797 u_int32_t bytes,
798 u_int32_t flags)
799 {
800 while (rte)
801 {
802 struct nstat_counts* stats = nstat_route_attach(rte);
803 if (stats)
804 {
805 if ((flags & NSTAT_TX_FLAG_RETRANSMIT) != 0)
806 {
807 OSAddAtomic(bytes, &stats->nstat_txretransmit);
808 }
809 else
810 {
811 OSAddAtomic64((SInt64)packets, (SInt64*)&stats->nstat_txpackets);
812 OSAddAtomic64((SInt64)bytes, (SInt64*)&stats->nstat_txbytes);
813 }
814 }
815
816 rte = rte->rt_parent;
817 }
818 }
819
820 __private_extern__ void
821 nstat_route_rx(
822 struct rtentry *rte,
823 u_int32_t packets,
824 u_int32_t bytes,
825 u_int32_t flags)
826 {
827 while (rte)
828 {
829 struct nstat_counts* stats = nstat_route_attach(rte);
830 if (stats)
831 {
832 if (flags == 0)
833 {
834 OSAddAtomic64((SInt64)packets, (SInt64*)&stats->nstat_rxpackets);
835 OSAddAtomic64((SInt64)bytes, (SInt64*)&stats->nstat_rxbytes);
836 }
837 else
838 {
839 if (flags & NSTAT_RX_FLAG_OUT_OF_ORDER)
840 OSAddAtomic(bytes, &stats->nstat_rxoutoforderbytes);
841 if (flags & NSTAT_RX_FLAG_DUPLICATE)
842 OSAddAtomic(bytes, &stats->nstat_rxduplicatebytes);
843 }
844 }
845
846 rte = rte->rt_parent;
847 }
848 }
849
850 __private_extern__ void
851 nstat_route_rtt(
852 struct rtentry *rte,
853 u_int32_t rtt,
854 u_int32_t rtt_var)
855 {
856 const int32_t factor = 8;
857
858 while (rte)
859 {
860 struct nstat_counts* stats = nstat_route_attach(rte);
861 if (stats)
862 {
863 int32_t oldrtt;
864 int32_t newrtt;
865
866 // average
867 do
868 {
869 oldrtt = stats->nstat_avg_rtt;
870 if (oldrtt == 0)
871 {
872 newrtt = rtt;
873 }
874 else
875 {
876 newrtt = oldrtt - (oldrtt - (int32_t)rtt) / factor;
877 }
878 if (oldrtt == newrtt) break;
879 } while (!OSCompareAndSwap(oldrtt, newrtt, &stats->nstat_avg_rtt));
880
881 // minimum
882 do
883 {
884 oldrtt = stats->nstat_min_rtt;
885 if (oldrtt != 0 && oldrtt < (int32_t)rtt)
886 {
887 break;
888 }
889 } while (!OSCompareAndSwap(oldrtt, rtt, &stats->nstat_min_rtt));
890
891 // variance
892 do
893 {
894 oldrtt = stats->nstat_var_rtt;
895 if (oldrtt == 0)
896 {
897 newrtt = rtt_var;
898 }
899 else
900 {
901 newrtt = oldrtt - (oldrtt - (int32_t)rtt_var) / factor;
902 }
903 if (oldrtt == newrtt) break;
904 } while (!OSCompareAndSwap(oldrtt, newrtt, &stats->nstat_var_rtt));
905 }
906
907 rte = rte->rt_parent;
908 }
909 }
910
911
912 #pragma mark -- TCP Kernel Provider --
913
914 /*
915 * Due to the way the kernel deallocates a process (the process structure
916 * might be gone by the time we get the PCB detach notification),
917 * we need to cache the process name. Without this, proc_name() would
918 * return null and the process name would never be sent to userland.
919 *
920 * For UDP sockets, we also store the cached the connection tuples along with
921 * the interface index. This is necessary because when UDP sockets are
922 * disconnected, the connection tuples are forever lost from the inpcb, thus
923 * we need to keep track of the last call to connect() in ntstat.
924 */
925 struct nstat_tucookie {
926 struct inpcb *inp;
927 char pname[MAXCOMLEN+1];
928 bool cached;
929 union
930 {
931 struct sockaddr_in v4;
932 struct sockaddr_in6 v6;
933 } local;
934 union
935 {
936 struct sockaddr_in v4;
937 struct sockaddr_in6 v6;
938 } remote;
939 unsigned int if_index;
940 uint16_t ifnet_properties;
941 };
942
943 static struct nstat_tucookie *
944 nstat_tucookie_alloc_internal(
945 struct inpcb *inp,
946 bool ref,
947 bool locked)
948 {
949 struct nstat_tucookie *cookie;
950
951 cookie = OSMalloc(sizeof(*cookie), nstat_malloc_tag);
952 if (cookie == NULL)
953 return NULL;
954 if (!locked)
955 lck_mtx_assert(&nstat_mtx, LCK_MTX_ASSERT_NOTOWNED);
956 if (ref && in_pcb_checkstate(inp, WNT_ACQUIRE, locked) == WNT_STOPUSING)
957 {
958 OSFree(cookie, sizeof(*cookie), nstat_malloc_tag);
959 return NULL;
960 }
961 bzero(cookie, sizeof(*cookie));
962 cookie->inp = inp;
963 proc_name(inp->inp_socket->last_pid, cookie->pname,
964 sizeof(cookie->pname));
965 /*
966 * We only increment the reference count for UDP sockets because we
967 * only cache UDP socket tuples.
968 */
969 if (SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP)
970 OSIncrementAtomic(&inp->inp_nstat_refcnt);
971
972 return cookie;
973 }
974
975 static struct nstat_tucookie *
976 nstat_tucookie_alloc(
977 struct inpcb *inp)
978 {
979 return nstat_tucookie_alloc_internal(inp, false, false);
980 }
981
982 static struct nstat_tucookie *
983 nstat_tucookie_alloc_ref(
984 struct inpcb *inp)
985 {
986 return nstat_tucookie_alloc_internal(inp, true, false);
987 }
988
989 static struct nstat_tucookie *
990 nstat_tucookie_alloc_ref_locked(
991 struct inpcb *inp)
992 {
993 return nstat_tucookie_alloc_internal(inp, true, true);
994 }
995
996 static void
997 nstat_tucookie_release_internal(
998 struct nstat_tucookie *cookie,
999 int inplock)
1000 {
1001 if (SOCK_PROTO(cookie->inp->inp_socket) == IPPROTO_UDP)
1002 OSDecrementAtomic(&cookie->inp->inp_nstat_refcnt);
1003 in_pcb_checkstate(cookie->inp, WNT_RELEASE, inplock);
1004 OSFree(cookie, sizeof(*cookie), nstat_malloc_tag);
1005 }
1006
1007 static void
1008 nstat_tucookie_release(
1009 struct nstat_tucookie *cookie)
1010 {
1011 nstat_tucookie_release_internal(cookie, false);
1012 }
1013
1014 static void
1015 nstat_tucookie_release_locked(
1016 struct nstat_tucookie *cookie)
1017 {
1018 nstat_tucookie_release_internal(cookie, true);
1019 }
1020
1021
1022 static nstat_provider nstat_tcp_provider;
1023
1024 static errno_t
1025 nstat_tcpudp_lookup(
1026 struct inpcbinfo *inpinfo,
1027 const void *data,
1028 u_int32_t length,
1029 nstat_provider_cookie_t *out_cookie)
1030 {
1031 struct inpcb *inp = NULL;
1032
1033 // parameter validation
1034 const nstat_tcp_add_param *param = (const nstat_tcp_add_param*)data;
1035 if (length < sizeof(*param))
1036 {
1037 return EINVAL;
1038 }
1039
1040 // src and dst must match
1041 if (param->remote.v4.sin_family != 0 &&
1042 param->remote.v4.sin_family != param->local.v4.sin_family)
1043 {
1044 return EINVAL;
1045 }
1046
1047
1048 switch (param->local.v4.sin_family)
1049 {
1050 case AF_INET:
1051 {
1052 if (param->local.v4.sin_len != sizeof(param->local.v4) ||
1053 (param->remote.v4.sin_family != 0 &&
1054 param->remote.v4.sin_len != sizeof(param->remote.v4)))
1055 {
1056 return EINVAL;
1057 }
1058
1059 inp = in_pcblookup_hash(inpinfo, param->remote.v4.sin_addr, param->remote.v4.sin_port,
1060 param->local.v4.sin_addr, param->local.v4.sin_port, 1, NULL);
1061 }
1062 break;
1063
1064 #if INET6
1065 case AF_INET6:
1066 {
1067 union
1068 {
1069 const struct in6_addr *in6c;
1070 struct in6_addr *in6;
1071 } local, remote;
1072
1073 if (param->local.v6.sin6_len != sizeof(param->local.v6) ||
1074 (param->remote.v6.sin6_family != 0 &&
1075 param->remote.v6.sin6_len != sizeof(param->remote.v6)))
1076 {
1077 return EINVAL;
1078 }
1079
1080 local.in6c = &param->local.v6.sin6_addr;
1081 remote.in6c = &param->remote.v6.sin6_addr;
1082
1083 inp = in6_pcblookup_hash(inpinfo, remote.in6, param->remote.v6.sin6_port,
1084 local.in6, param->local.v6.sin6_port, 1, NULL);
1085 }
1086 break;
1087 #endif
1088
1089 default:
1090 return EINVAL;
1091 }
1092
1093 if (inp == NULL)
1094 return ENOENT;
1095
1096 // At this point we have a ref to the inpcb
1097 *out_cookie = nstat_tucookie_alloc(inp);
1098 if (*out_cookie == NULL)
1099 in_pcb_checkstate(inp, WNT_RELEASE, 0);
1100
1101 return 0;
1102 }
1103
1104 static errno_t
1105 nstat_tcp_lookup(
1106 const void *data,
1107 u_int32_t length,
1108 nstat_provider_cookie_t *out_cookie)
1109 {
1110 return nstat_tcpudp_lookup(&tcbinfo, data, length, out_cookie);
1111 }
1112
1113 static int
1114 nstat_tcp_gone(
1115 nstat_provider_cookie_t cookie)
1116 {
1117 struct nstat_tucookie *tucookie =
1118 (struct nstat_tucookie *)cookie;
1119 struct inpcb *inp;
1120 struct tcpcb *tp;
1121
1122 return (!(inp = tucookie->inp) ||
1123 !(tp = intotcpcb(inp)) ||
1124 inp->inp_state == INPCB_STATE_DEAD) ? 1 : 0;
1125 }
1126
1127 static errno_t
1128 nstat_tcp_counts(
1129 nstat_provider_cookie_t cookie,
1130 struct nstat_counts *out_counts,
1131 int *out_gone)
1132 {
1133 struct nstat_tucookie *tucookie =
1134 (struct nstat_tucookie *)cookie;
1135 struct inpcb *inp;
1136
1137 bzero(out_counts, sizeof(*out_counts));
1138
1139 if (out_gone) *out_gone = 0;
1140
1141 // if the pcb is in the dead state, we should stop using it
1142 if (nstat_tcp_gone(cookie))
1143 {
1144 if (out_gone) *out_gone = 1;
1145 if (!(inp = tucookie->inp) || !intotcpcb(inp))
1146 return EINVAL;
1147 }
1148 inp = tucookie->inp;
1149 struct tcpcb *tp = intotcpcb(inp);
1150
1151 atomic_get_64(out_counts->nstat_rxpackets, &inp->inp_stat->rxpackets);
1152 atomic_get_64(out_counts->nstat_rxbytes, &inp->inp_stat->rxbytes);
1153 atomic_get_64(out_counts->nstat_txpackets, &inp->inp_stat->txpackets);
1154 atomic_get_64(out_counts->nstat_txbytes, &inp->inp_stat->txbytes);
1155 out_counts->nstat_rxduplicatebytes = tp->t_stat.rxduplicatebytes;
1156 out_counts->nstat_rxoutoforderbytes = tp->t_stat.rxoutoforderbytes;
1157 out_counts->nstat_txretransmit = tp->t_stat.txretransmitbytes;
1158 out_counts->nstat_connectattempts = tp->t_state >= TCPS_SYN_SENT ? 1 : 0;
1159 out_counts->nstat_connectsuccesses = tp->t_state >= TCPS_ESTABLISHED ? 1 : 0;
1160 out_counts->nstat_avg_rtt = tp->t_srtt;
1161 out_counts->nstat_min_rtt = tp->t_rttbest;
1162 out_counts->nstat_var_rtt = tp->t_rttvar;
1163 if (out_counts->nstat_avg_rtt < out_counts->nstat_min_rtt)
1164 out_counts->nstat_min_rtt = out_counts->nstat_avg_rtt;
1165 atomic_get_64(out_counts->nstat_cell_rxbytes, &inp->inp_cstat->rxbytes);
1166 atomic_get_64(out_counts->nstat_cell_txbytes, &inp->inp_cstat->txbytes);
1167 atomic_get_64(out_counts->nstat_wifi_rxbytes, &inp->inp_wstat->rxbytes);
1168 atomic_get_64(out_counts->nstat_wifi_txbytes, &inp->inp_wstat->txbytes);
1169 atomic_get_64(out_counts->nstat_wired_rxbytes, &inp->inp_Wstat->rxbytes);
1170 atomic_get_64(out_counts->nstat_wired_txbytes, &inp->inp_Wstat->txbytes);
1171
1172 return 0;
1173 }
1174
1175 static void
1176 nstat_tcp_release(
1177 nstat_provider_cookie_t cookie,
1178 int locked)
1179 {
1180 struct nstat_tucookie *tucookie =
1181 (struct nstat_tucookie *)cookie;
1182
1183 nstat_tucookie_release_internal(tucookie, locked);
1184 }
1185
1186 static errno_t
1187 nstat_tcp_add_watcher(
1188 nstat_control_state *state)
1189 {
1190 OSIncrementAtomic(&nstat_tcp_watchers);
1191
1192 lck_rw_lock_shared(tcbinfo.ipi_lock);
1193
1194 // Add all current tcp inpcbs. Ignore those in timewait
1195 struct inpcb *inp;
1196 struct nstat_tucookie *cookie;
1197 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list)
1198 {
1199 cookie = nstat_tucookie_alloc_ref(inp);
1200 if (cookie == NULL)
1201 continue;
1202 if (nstat_control_source_add(0, state, &nstat_tcp_provider,
1203 cookie) != 0)
1204 {
1205 nstat_tucookie_release(cookie);
1206 break;
1207 }
1208 }
1209
1210 lck_rw_done(tcbinfo.ipi_lock);
1211
1212 return 0;
1213 }
1214
1215 static void
1216 nstat_tcp_remove_watcher(
1217 __unused nstat_control_state *state)
1218 {
1219 OSDecrementAtomic(&nstat_tcp_watchers);
1220 }
1221
1222 __private_extern__ void
1223 nstat_tcp_new_pcb(
1224 struct inpcb *inp)
1225 {
1226 struct nstat_tucookie *cookie;
1227
1228 if (nstat_tcp_watchers == 0)
1229 return;
1230
1231 socket_lock(inp->inp_socket, 0);
1232 lck_mtx_lock(&nstat_mtx);
1233 nstat_control_state *state;
1234 for (state = nstat_controls; state; state = state->ncs_next)
1235 {
1236 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_TCP_KERNEL)) != 0)
1237 {
1238 // this client is watching tcp
1239 // acquire a reference for it
1240 cookie = nstat_tucookie_alloc_ref_locked(inp);
1241 if (cookie == NULL)
1242 continue;
1243 // add the source, if that fails, release the reference
1244 if (nstat_control_source_add(0, state,
1245 &nstat_tcp_provider, cookie) != 0)
1246 {
1247 nstat_tucookie_release_locked(cookie);
1248 break;
1249 }
1250 }
1251 }
1252 lck_mtx_unlock(&nstat_mtx);
1253 socket_unlock(inp->inp_socket, 0);
1254 }
1255
1256 __private_extern__ void
1257 nstat_pcb_detach(struct inpcb *inp)
1258 {
1259 nstat_control_state *state;
1260 nstat_src *src, *prevsrc;
1261 nstat_src *dead_list = NULL;
1262 struct nstat_tucookie *tucookie;
1263 errno_t result;
1264
1265 if (inp == NULL || (nstat_tcp_watchers == 0 && nstat_udp_watchers == 0))
1266 return;
1267
1268 lck_mtx_lock(&nstat_mtx);
1269 for (state = nstat_controls; state; state = state->ncs_next)
1270 {
1271 lck_mtx_lock(&state->mtx);
1272 for (prevsrc = NULL, src = state->ncs_srcs; src;
1273 prevsrc = src, src = src->next)
1274 {
1275 tucookie = (struct nstat_tucookie *)src->cookie;
1276 if (tucookie->inp == inp)
1277 break;
1278 }
1279
1280 if (src)
1281 {
1282 result = nstat_control_send_goodbye(state, src);
1283
1284 if (prevsrc)
1285 prevsrc->next = src->next;
1286 else
1287 state->ncs_srcs = src->next;
1288
1289 src->next = dead_list;
1290 dead_list = src;
1291 }
1292 lck_mtx_unlock(&state->mtx);
1293 }
1294 lck_mtx_unlock(&nstat_mtx);
1295
1296 while (dead_list) {
1297 src = dead_list;
1298 dead_list = src->next;
1299
1300 nstat_control_cleanup_source(NULL, src, TRUE);
1301 }
1302 }
1303
1304 __private_extern__ void
1305 nstat_pcb_cache(struct inpcb *inp)
1306 {
1307 nstat_control_state *state;
1308 nstat_src *src;
1309 struct nstat_tucookie *tucookie;
1310
1311 if (inp == NULL || nstat_udp_watchers == 0 ||
1312 inp->inp_nstat_refcnt == 0)
1313 return;
1314 VERIFY(SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP);
1315 lck_mtx_lock(&nstat_mtx);
1316 for (state = nstat_controls; state; state = state->ncs_next) {
1317 lck_mtx_lock(&state->mtx);
1318 for (src = state->ncs_srcs; src; src = src->next)
1319 {
1320 tucookie = (struct nstat_tucookie *)src->cookie;
1321 if (tucookie->inp == inp)
1322 {
1323 if (inp->inp_vflag & INP_IPV6)
1324 {
1325 nstat_ip6_to_sockaddr(&inp->in6p_laddr,
1326 inp->inp_lport,
1327 &tucookie->local.v6,
1328 sizeof(tucookie->local));
1329 nstat_ip6_to_sockaddr(&inp->in6p_faddr,
1330 inp->inp_fport,
1331 &tucookie->remote.v6,
1332 sizeof(tucookie->remote));
1333 }
1334 else if (inp->inp_vflag & INP_IPV4)
1335 {
1336 nstat_ip_to_sockaddr(&inp->inp_laddr,
1337 inp->inp_lport,
1338 &tucookie->local.v4,
1339 sizeof(tucookie->local));
1340 nstat_ip_to_sockaddr(&inp->inp_faddr,
1341 inp->inp_fport,
1342 &tucookie->remote.v4,
1343 sizeof(tucookie->remote));
1344 }
1345 if (inp->inp_last_outifp)
1346 tucookie->if_index =
1347 inp->inp_last_outifp->if_index;
1348
1349 tucookie->ifnet_properties = nstat_inpcb_to_flags(inp);
1350 tucookie->cached = true;
1351 break;
1352 }
1353 }
1354 lck_mtx_unlock(&state->mtx);
1355 }
1356 lck_mtx_unlock(&nstat_mtx);
1357 }
1358
1359 __private_extern__ void
1360 nstat_pcb_invalidate_cache(struct inpcb *inp)
1361 {
1362 nstat_control_state *state;
1363 nstat_src *src;
1364 struct nstat_tucookie *tucookie;
1365
1366 if (inp == NULL || nstat_udp_watchers == 0 ||
1367 inp->inp_nstat_refcnt == 0)
1368 return;
1369 VERIFY(SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP);
1370 lck_mtx_lock(&nstat_mtx);
1371 for (state = nstat_controls; state; state = state->ncs_next) {
1372 lck_mtx_lock(&state->mtx);
1373 for (src = state->ncs_srcs; src; src = src->next)
1374 {
1375 tucookie = (struct nstat_tucookie *)src->cookie;
1376 if (tucookie->inp == inp)
1377 {
1378 tucookie->cached = false;
1379 break;
1380 }
1381 }
1382 lck_mtx_unlock(&state->mtx);
1383 }
1384 lck_mtx_unlock(&nstat_mtx);
1385 }
1386
1387 static errno_t
1388 nstat_tcp_copy_descriptor(
1389 nstat_provider_cookie_t cookie,
1390 void *data,
1391 u_int32_t len)
1392 {
1393 if (len < sizeof(nstat_tcp_descriptor))
1394 {
1395 return EINVAL;
1396 }
1397
1398 if (nstat_tcp_gone(cookie))
1399 return EINVAL;
1400
1401 nstat_tcp_descriptor *desc = (nstat_tcp_descriptor*)data;
1402 struct nstat_tucookie *tucookie =
1403 (struct nstat_tucookie *)cookie;
1404 struct inpcb *inp = tucookie->inp;
1405 struct tcpcb *tp = intotcpcb(inp);
1406 bzero(desc, sizeof(*desc));
1407
1408 if (inp->inp_vflag & INP_IPV6)
1409 {
1410 nstat_ip6_to_sockaddr(&inp->in6p_laddr, inp->inp_lport,
1411 &desc->local.v6, sizeof(desc->local));
1412 nstat_ip6_to_sockaddr(&inp->in6p_faddr, inp->inp_fport,
1413 &desc->remote.v6, sizeof(desc->remote));
1414 }
1415 else if (inp->inp_vflag & INP_IPV4)
1416 {
1417 nstat_ip_to_sockaddr(&inp->inp_laddr, inp->inp_lport,
1418 &desc->local.v4, sizeof(desc->local));
1419 nstat_ip_to_sockaddr(&inp->inp_faddr, inp->inp_fport,
1420 &desc->remote.v4, sizeof(desc->remote));
1421 }
1422
1423 desc->state = intotcpcb(inp)->t_state;
1424 desc->ifindex = (inp->inp_last_outifp == NULL) ? 0 :
1425 inp->inp_last_outifp->if_index;
1426
1427 // danger - not locked, values could be bogus
1428 desc->txunacked = tp->snd_max - tp->snd_una;
1429 desc->txwindow = tp->snd_wnd;
1430 desc->txcwindow = tp->snd_cwnd;
1431
1432 if (CC_ALGO(tp)->name != NULL) {
1433 strlcpy(desc->cc_algo, CC_ALGO(tp)->name,
1434 sizeof(desc->cc_algo));
1435 }
1436
1437 struct socket *so = inp->inp_socket;
1438 if (so)
1439 {
1440 // TBD - take the socket lock around these to make sure
1441 // they're in sync?
1442 desc->upid = so->last_upid;
1443 desc->pid = so->last_pid;
1444 desc->traffic_class = so->so_traffic_class;
1445 if ((so->so_flags1 & SOF1_TRAFFIC_MGT_SO_BACKGROUND))
1446 desc->traffic_mgt_flags |= TRAFFIC_MGT_SO_BACKGROUND;
1447 if ((so->so_flags1 & SOF1_TRAFFIC_MGT_TCP_RECVBG))
1448 desc->traffic_mgt_flags |= TRAFFIC_MGT_TCP_RECVBG;
1449 proc_name(desc->pid, desc->pname, sizeof(desc->pname));
1450 if (desc->pname[0] == 0)
1451 {
1452 strlcpy(desc->pname, tucookie->pname,
1453 sizeof(desc->pname));
1454 }
1455 else
1456 {
1457 desc->pname[sizeof(desc->pname) - 1] = 0;
1458 strlcpy(tucookie->pname, desc->pname,
1459 sizeof(tucookie->pname));
1460 }
1461 memcpy(desc->uuid, so->last_uuid, sizeof(so->last_uuid));
1462 memcpy(desc->vuuid, so->so_vuuid, sizeof(so->so_vuuid));
1463 if (so->so_flags & SOF_DELEGATED) {
1464 desc->eupid = so->e_upid;
1465 desc->epid = so->e_pid;
1466 memcpy(desc->euuid, so->e_uuid, sizeof(so->e_uuid));
1467 } else {
1468 desc->eupid = desc->upid;
1469 desc->epid = desc->pid;
1470 memcpy(desc->euuid, desc->uuid, sizeof(desc->uuid));
1471 }
1472 desc->sndbufsize = so->so_snd.sb_hiwat;
1473 desc->sndbufused = so->so_snd.sb_cc;
1474 desc->rcvbufsize = so->so_rcv.sb_hiwat;
1475 desc->rcvbufused = so->so_rcv.sb_cc;
1476 }
1477
1478 tcp_get_connectivity_status(tp, &desc->connstatus);
1479 desc->ifnet_properties = nstat_inpcb_to_flags(inp);
1480 return 0;
1481 }
1482
1483 static bool
1484 nstat_tcpudp_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter, bool is_UDP)
1485 {
1486 bool retval = true;
1487
1488 if ((filter->npf_flags & (NSTAT_FILTER_IFNET_FLAGS|NSTAT_FILTER_SPECIFIC_USER)) != 0)
1489 {
1490 struct nstat_tucookie *tucookie = (struct nstat_tucookie *)cookie;
1491 struct inpcb *inp = tucookie->inp;
1492
1493 /* Only apply interface filter if at least one is allowed. */
1494 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0)
1495 {
1496 uint16_t interface_properties = nstat_inpcb_to_flags(inp);
1497
1498 if ((filter->npf_flags & interface_properties) == 0)
1499 {
1500 // For UDP, we could have an undefined interface and yet transfers may have occurred.
1501 // We allow reporting if there have been transfers of the requested kind.
1502 // This is imperfect as we cannot account for the expensive attribute over wifi.
1503 // We also assume that cellular is expensive and we have no way to select for AWDL
1504 if (is_UDP)
1505 {
1506 do
1507 {
1508 if ((filter->npf_flags & (NSTAT_FILTER_ACCEPT_CELLULAR|NSTAT_FILTER_ACCEPT_EXPENSIVE)) &&
1509 (inp->inp_cstat->rxbytes || inp->inp_cstat->txbytes))
1510 {
1511 break;
1512 }
1513 if ((filter->npf_flags & NSTAT_FILTER_ACCEPT_WIFI) &&
1514 (inp->inp_wstat->rxbytes || inp->inp_wstat->txbytes))
1515 {
1516 break;
1517 }
1518 if ((filter->npf_flags & NSTAT_FILTER_ACCEPT_WIRED) &&
1519 (inp->inp_Wstat->rxbytes || inp->inp_Wstat->txbytes))
1520 {
1521 break;
1522 }
1523 return false;
1524 } while (0);
1525 }
1526 else
1527 {
1528 return false;
1529 }
1530 }
1531 }
1532
1533 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) && (retval))
1534 {
1535 struct socket *so = inp->inp_socket;
1536 retval = false;
1537
1538 if (so)
1539 {
1540 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_PID) != 0) &&
1541 (filter->npf_pid == so->last_pid))
1542 {
1543 retval = true;
1544 }
1545 else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EPID) != 0) &&
1546 (filter->npf_pid == (so->so_flags & SOF_DELEGATED)? so->e_upid : so->last_pid))
1547 {
1548 retval = true;
1549 }
1550 else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_UUID) != 0) &&
1551 (memcmp(filter->npf_uuid, so->last_uuid, sizeof(so->last_uuid)) == 0))
1552 {
1553 retval = true;
1554 }
1555 else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EUUID) != 0) &&
1556 (memcmp(filter->npf_uuid, (so->so_flags & SOF_DELEGATED)? so->e_uuid : so->last_uuid,
1557 sizeof(so->last_uuid)) == 0))
1558 {
1559 retval = true;
1560 }
1561 }
1562 }
1563 }
1564 return retval;
1565 }
1566
1567 static bool
1568 nstat_tcp_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter)
1569 {
1570 return nstat_tcpudp_reporting_allowed(cookie, filter, FALSE);
1571 }
1572
1573 static void
1574 nstat_init_tcp_provider(void)
1575 {
1576 bzero(&nstat_tcp_provider, sizeof(nstat_tcp_provider));
1577 nstat_tcp_provider.nstat_descriptor_length = sizeof(nstat_tcp_descriptor);
1578 nstat_tcp_provider.nstat_provider_id = NSTAT_PROVIDER_TCP_KERNEL;
1579 nstat_tcp_provider.nstat_lookup = nstat_tcp_lookup;
1580 nstat_tcp_provider.nstat_gone = nstat_tcp_gone;
1581 nstat_tcp_provider.nstat_counts = nstat_tcp_counts;
1582 nstat_tcp_provider.nstat_release = nstat_tcp_release;
1583 nstat_tcp_provider.nstat_watcher_add = nstat_tcp_add_watcher;
1584 nstat_tcp_provider.nstat_watcher_remove = nstat_tcp_remove_watcher;
1585 nstat_tcp_provider.nstat_copy_descriptor = nstat_tcp_copy_descriptor;
1586 nstat_tcp_provider.nstat_reporting_allowed = nstat_tcp_reporting_allowed;
1587 nstat_tcp_provider.next = nstat_providers;
1588 nstat_providers = &nstat_tcp_provider;
1589 }
1590
1591 #pragma mark -- UDP Provider --
1592
1593 static nstat_provider nstat_udp_provider;
1594
1595 static errno_t
1596 nstat_udp_lookup(
1597 const void *data,
1598 u_int32_t length,
1599 nstat_provider_cookie_t *out_cookie)
1600 {
1601 return nstat_tcpudp_lookup(&udbinfo, data, length, out_cookie);
1602 }
1603
1604 static int
1605 nstat_udp_gone(
1606 nstat_provider_cookie_t cookie)
1607 {
1608 struct nstat_tucookie *tucookie =
1609 (struct nstat_tucookie *)cookie;
1610 struct inpcb *inp;
1611
1612 return (!(inp = tucookie->inp) ||
1613 inp->inp_state == INPCB_STATE_DEAD) ? 1 : 0;
1614 }
1615
1616 static errno_t
1617 nstat_udp_counts(
1618 nstat_provider_cookie_t cookie,
1619 struct nstat_counts *out_counts,
1620 int *out_gone)
1621 {
1622 struct nstat_tucookie *tucookie =
1623 (struct nstat_tucookie *)cookie;
1624
1625 if (out_gone) *out_gone = 0;
1626
1627 // if the pcb is in the dead state, we should stop using it
1628 if (nstat_udp_gone(cookie))
1629 {
1630 if (out_gone) *out_gone = 1;
1631 if (!tucookie->inp)
1632 return EINVAL;
1633 }
1634 struct inpcb *inp = tucookie->inp;
1635
1636 atomic_get_64(out_counts->nstat_rxpackets, &inp->inp_stat->rxpackets);
1637 atomic_get_64(out_counts->nstat_rxbytes, &inp->inp_stat->rxbytes);
1638 atomic_get_64(out_counts->nstat_txpackets, &inp->inp_stat->txpackets);
1639 atomic_get_64(out_counts->nstat_txbytes, &inp->inp_stat->txbytes);
1640 atomic_get_64(out_counts->nstat_cell_rxbytes, &inp->inp_cstat->rxbytes);
1641 atomic_get_64(out_counts->nstat_cell_txbytes, &inp->inp_cstat->txbytes);
1642 atomic_get_64(out_counts->nstat_wifi_rxbytes, &inp->inp_wstat->rxbytes);
1643 atomic_get_64(out_counts->nstat_wifi_txbytes, &inp->inp_wstat->txbytes);
1644 atomic_get_64(out_counts->nstat_wired_rxbytes, &inp->inp_Wstat->rxbytes);
1645 atomic_get_64(out_counts->nstat_wired_txbytes, &inp->inp_Wstat->txbytes);
1646
1647 return 0;
1648 }
1649
1650 static void
1651 nstat_udp_release(
1652 nstat_provider_cookie_t cookie,
1653 int locked)
1654 {
1655 struct nstat_tucookie *tucookie =
1656 (struct nstat_tucookie *)cookie;
1657
1658 nstat_tucookie_release_internal(tucookie, locked);
1659 }
1660
1661 static errno_t
1662 nstat_udp_add_watcher(
1663 nstat_control_state *state)
1664 {
1665 struct inpcb *inp;
1666 struct nstat_tucookie *cookie;
1667
1668 OSIncrementAtomic(&nstat_udp_watchers);
1669
1670 lck_rw_lock_shared(udbinfo.ipi_lock);
1671 // Add all current UDP inpcbs.
1672 LIST_FOREACH(inp, udbinfo.ipi_listhead, inp_list)
1673 {
1674 cookie = nstat_tucookie_alloc_ref(inp);
1675 if (cookie == NULL)
1676 continue;
1677 if (nstat_control_source_add(0, state, &nstat_udp_provider,
1678 cookie) != 0)
1679 {
1680 nstat_tucookie_release(cookie);
1681 break;
1682 }
1683 }
1684
1685 lck_rw_done(udbinfo.ipi_lock);
1686
1687 return 0;
1688 }
1689
1690 static void
1691 nstat_udp_remove_watcher(
1692 __unused nstat_control_state *state)
1693 {
1694 OSDecrementAtomic(&nstat_udp_watchers);
1695 }
1696
1697 __private_extern__ void
1698 nstat_udp_new_pcb(
1699 struct inpcb *inp)
1700 {
1701 struct nstat_tucookie *cookie;
1702
1703 if (nstat_udp_watchers == 0)
1704 return;
1705
1706 socket_lock(inp->inp_socket, 0);
1707 lck_mtx_lock(&nstat_mtx);
1708 nstat_control_state *state;
1709 for (state = nstat_controls; state; state = state->ncs_next)
1710 {
1711 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_UDP_KERNEL)) != 0)
1712 {
1713 // this client is watching tcp
1714 // acquire a reference for it
1715 cookie = nstat_tucookie_alloc_ref_locked(inp);
1716 if (cookie == NULL)
1717 continue;
1718 // add the source, if that fails, release the reference
1719 if (nstat_control_source_add(0, state,
1720 &nstat_udp_provider, cookie) != 0)
1721 {
1722 nstat_tucookie_release_locked(cookie);
1723 break;
1724 }
1725 }
1726 }
1727 lck_mtx_unlock(&nstat_mtx);
1728 socket_unlock(inp->inp_socket, 0);
1729 }
1730
1731 static errno_t
1732 nstat_udp_copy_descriptor(
1733 nstat_provider_cookie_t cookie,
1734 void *data,
1735 u_int32_t len)
1736 {
1737 if (len < sizeof(nstat_udp_descriptor))
1738 {
1739 return EINVAL;
1740 }
1741
1742 if (nstat_udp_gone(cookie))
1743 return EINVAL;
1744
1745 struct nstat_tucookie *tucookie =
1746 (struct nstat_tucookie *)cookie;
1747 nstat_udp_descriptor *desc = (nstat_udp_descriptor*)data;
1748 struct inpcb *inp = tucookie->inp;
1749
1750 bzero(desc, sizeof(*desc));
1751
1752 if (tucookie->cached == false) {
1753 if (inp->inp_vflag & INP_IPV6)
1754 {
1755 nstat_ip6_to_sockaddr(&inp->in6p_laddr, inp->inp_lport,
1756 &desc->local.v6, sizeof(desc->local.v6));
1757 nstat_ip6_to_sockaddr(&inp->in6p_faddr, inp->inp_fport,
1758 &desc->remote.v6, sizeof(desc->remote.v6));
1759 }
1760 else if (inp->inp_vflag & INP_IPV4)
1761 {
1762 nstat_ip_to_sockaddr(&inp->inp_laddr, inp->inp_lport,
1763 &desc->local.v4, sizeof(desc->local.v4));
1764 nstat_ip_to_sockaddr(&inp->inp_faddr, inp->inp_fport,
1765 &desc->remote.v4, sizeof(desc->remote.v4));
1766 }
1767 desc->ifnet_properties = nstat_inpcb_to_flags(inp);
1768 }
1769 else
1770 {
1771 if (inp->inp_vflag & INP_IPV6)
1772 {
1773 memcpy(&desc->local.v6, &tucookie->local.v6,
1774 sizeof(desc->local.v6));
1775 memcpy(&desc->remote.v6, &tucookie->remote.v6,
1776 sizeof(desc->remote.v6));
1777 }
1778 else if (inp->inp_vflag & INP_IPV4)
1779 {
1780 memcpy(&desc->local.v4, &tucookie->local.v4,
1781 sizeof(desc->local.v4));
1782 memcpy(&desc->remote.v4, &tucookie->remote.v4,
1783 sizeof(desc->remote.v4));
1784 }
1785 desc->ifnet_properties = tucookie->ifnet_properties;
1786 }
1787
1788 if (inp->inp_last_outifp)
1789 desc->ifindex = inp->inp_last_outifp->if_index;
1790 else
1791 desc->ifindex = tucookie->if_index;
1792
1793 struct socket *so = inp->inp_socket;
1794 if (so)
1795 {
1796 // TBD - take the socket lock around these to make sure
1797 // they're in sync?
1798 desc->upid = so->last_upid;
1799 desc->pid = so->last_pid;
1800 proc_name(desc->pid, desc->pname, sizeof(desc->pname));
1801 if (desc->pname[0] == 0)
1802 {
1803 strlcpy(desc->pname, tucookie->pname,
1804 sizeof(desc->pname));
1805 }
1806 else
1807 {
1808 desc->pname[sizeof(desc->pname) - 1] = 0;
1809 strlcpy(tucookie->pname, desc->pname,
1810 sizeof(tucookie->pname));
1811 }
1812 memcpy(desc->uuid, so->last_uuid, sizeof(so->last_uuid));
1813 memcpy(desc->vuuid, so->so_vuuid, sizeof(so->so_vuuid));
1814 if (so->so_flags & SOF_DELEGATED) {
1815 desc->eupid = so->e_upid;
1816 desc->epid = so->e_pid;
1817 memcpy(desc->euuid, so->e_uuid, sizeof(so->e_uuid));
1818 } else {
1819 desc->eupid = desc->upid;
1820 desc->epid = desc->pid;
1821 memcpy(desc->euuid, desc->uuid, sizeof(desc->uuid));
1822 }
1823 desc->rcvbufsize = so->so_rcv.sb_hiwat;
1824 desc->rcvbufused = so->so_rcv.sb_cc;
1825 desc->traffic_class = so->so_traffic_class;
1826 }
1827
1828 return 0;
1829 }
1830
1831 static bool
1832 nstat_udp_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter)
1833 {
1834 return nstat_tcpudp_reporting_allowed(cookie, filter, TRUE);
1835 }
1836
1837
1838 static void
1839 nstat_init_udp_provider(void)
1840 {
1841 bzero(&nstat_udp_provider, sizeof(nstat_udp_provider));
1842 nstat_udp_provider.nstat_provider_id = NSTAT_PROVIDER_UDP_KERNEL;
1843 nstat_udp_provider.nstat_descriptor_length = sizeof(nstat_udp_descriptor);
1844 nstat_udp_provider.nstat_lookup = nstat_udp_lookup;
1845 nstat_udp_provider.nstat_gone = nstat_udp_gone;
1846 nstat_udp_provider.nstat_counts = nstat_udp_counts;
1847 nstat_udp_provider.nstat_watcher_add = nstat_udp_add_watcher;
1848 nstat_udp_provider.nstat_watcher_remove = nstat_udp_remove_watcher;
1849 nstat_udp_provider.nstat_copy_descriptor = nstat_udp_copy_descriptor;
1850 nstat_udp_provider.nstat_release = nstat_udp_release;
1851 nstat_udp_provider.nstat_reporting_allowed = nstat_udp_reporting_allowed;
1852 nstat_udp_provider.next = nstat_providers;
1853 nstat_providers = &nstat_udp_provider;
1854 }
1855
1856 #pragma mark -- TCP/UDP Userland
1857
1858 // Almost all of this infrastucture is common to both TCP and UDP
1859
1860 static nstat_provider nstat_userland_tcp_provider;
1861 static nstat_provider nstat_userland_udp_provider;
1862
1863
1864 struct nstat_tu_shadow {
1865 tailq_entry_tu_shadow shad_link;
1866 userland_stats_request_vals_fn *shad_getvals_fn;
1867 userland_stats_provider_context *shad_provider_context;
1868 u_int64_t shad_properties;
1869 int shad_provider;
1870 uint32_t shad_magic;
1871 };
1872
1873 // Magic number checking should remain in place until the userland provider has been fully proven
1874 #define TU_SHADOW_MAGIC 0xfeedf00d
1875 #define TU_SHADOW_UNMAGIC 0xdeaddeed
1876
1877 static tailq_head_tu_shadow nstat_userprot_shad_head = TAILQ_HEAD_INITIALIZER(nstat_userprot_shad_head);
1878
1879 static errno_t
1880 nstat_userland_tu_lookup(
1881 __unused const void *data,
1882 __unused u_int32_t length,
1883 __unused nstat_provider_cookie_t *out_cookie)
1884 {
1885 // Looking up a specific connection is not supported
1886 return ENOTSUP;
1887 }
1888
1889 static int
1890 nstat_userland_tu_gone(
1891 __unused nstat_provider_cookie_t cookie)
1892 {
1893 // Returns non-zero if the source has gone.
1894 // We don't keep a source hanging around, so the answer is always 0
1895 return 0;
1896 }
1897
1898 static errno_t
1899 nstat_userland_tu_counts(
1900 nstat_provider_cookie_t cookie,
1901 struct nstat_counts *out_counts,
1902 int *out_gone)
1903 {
1904 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
1905 assert(shad->shad_magic == TU_SHADOW_MAGIC);
1906
1907 bool result = (*shad->shad_getvals_fn)(shad->shad_provider_context, out_counts, NULL);
1908
1909 if (out_gone) *out_gone = 0;
1910
1911 return (result)? 0 : EIO;
1912 }
1913
1914
1915 static errno_t
1916 nstat_userland_tu_copy_descriptor(
1917 nstat_provider_cookie_t cookie,
1918 void *data,
1919 __unused u_int32_t len)
1920 {
1921 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
1922 assert(shad->shad_magic == TU_SHADOW_MAGIC);
1923
1924 bool result = (*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, data);
1925
1926 return (result)? 0 : EIO;
1927 }
1928
1929 static void
1930 nstat_userland_tu_release(
1931 __unused nstat_provider_cookie_t cookie,
1932 __unused int locked)
1933 {
1934 // Called when a nstat_src is detached.
1935 // We don't reference count or ask for delayed release so nothing to do here.
1936 }
1937
1938 static bool
1939 check_reporting_for_user(nstat_provider_filter *filter, pid_t pid, pid_t epid, uuid_t *uuid, uuid_t *euuid)
1940 {
1941 bool retval = true;
1942
1943 if ((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0)
1944 {
1945 retval = false;
1946
1947 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_PID) != 0) &&
1948 (filter->npf_pid == pid))
1949 {
1950 retval = true;
1951 }
1952 else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EPID) != 0) &&
1953 (filter->npf_pid == epid))
1954 {
1955 retval = true;
1956 }
1957 else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_UUID) != 0) &&
1958 (memcmp(filter->npf_uuid, uuid, sizeof(*uuid)) == 0))
1959 {
1960 retval = true;
1961 }
1962 else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EUUID) != 0) &&
1963 (memcmp(filter->npf_uuid, euuid, sizeof(*euuid)) == 0))
1964 {
1965 retval = true;
1966 }
1967 }
1968 return retval;
1969 }
1970
1971 static bool
1972 nstat_userland_tcp_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter)
1973 {
1974 bool retval = true;
1975
1976 if ((filter->npf_flags & (NSTAT_FILTER_IFNET_FLAGS|NSTAT_FILTER_SPECIFIC_USER)) != 0)
1977 {
1978 nstat_tcp_descriptor tcp_desc; // Stack allocation - OK or pushing the limits too far?
1979 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
1980
1981 assert(shad->shad_magic == TU_SHADOW_MAGIC);
1982
1983 if ((*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, &tcp_desc))
1984 {
1985 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0)
1986 {
1987 if ((filter->npf_flags & tcp_desc.ifnet_properties) == 0)
1988 {
1989 return false;
1990 }
1991 }
1992 if ((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0)
1993 {
1994 retval = check_reporting_for_user(filter, (pid_t)tcp_desc.pid, (pid_t)tcp_desc.epid,
1995 &tcp_desc.uuid, &tcp_desc.euuid);
1996 }
1997 }
1998 else
1999 {
2000 retval = false; // No further information, so might as well give up now.
2001 }
2002 }
2003 return retval;
2004 }
2005
2006 static bool
2007 nstat_userland_udp_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter)
2008 {
2009 bool retval = true;
2010
2011 if ((filter->npf_flags & (NSTAT_FILTER_IFNET_FLAGS|NSTAT_FILTER_SPECIFIC_USER)) != 0)
2012 {
2013 nstat_udp_descriptor udp_desc; // Stack allocation - OK or pushing the limits too far?
2014 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
2015
2016 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2017
2018 if ((*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, &udp_desc))
2019 {
2020 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0)
2021 {
2022 if ((filter->npf_flags & udp_desc.ifnet_properties) == 0)
2023 {
2024 return false;
2025 }
2026 }
2027 if ((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0)
2028 {
2029 retval = check_reporting_for_user(filter, (pid_t)udp_desc.pid, (pid_t)udp_desc.epid,
2030 &udp_desc.uuid, &udp_desc.euuid);
2031 }
2032 }
2033 else
2034 {
2035 retval = false; // No further information, so might as well give up now.
2036 }
2037 }
2038 return retval;
2039 }
2040
2041
2042
2043 static errno_t
2044 nstat_userland_tcp_add_watcher(
2045 nstat_control_state *state)
2046 {
2047 struct nstat_tu_shadow *shad;
2048
2049 OSIncrementAtomic(&nstat_userland_tcp_watchers);
2050
2051 lck_mtx_lock(&nstat_mtx);
2052
2053 TAILQ_FOREACH(shad, &nstat_userprot_shad_head, shad_link) {
2054 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2055
2056 if (shad->shad_provider == NSTAT_PROVIDER_TCP_USERLAND)
2057 {
2058 int result = nstat_control_source_add(0, state, &nstat_userland_tcp_provider, shad);
2059 if (result != 0)
2060 {
2061 printf("%s - nstat_control_source_add returned %d\n", __func__, result);
2062 }
2063 }
2064 }
2065 lck_mtx_unlock(&nstat_mtx);
2066
2067 return 0;
2068 }
2069
2070 static errno_t
2071 nstat_userland_udp_add_watcher(
2072 nstat_control_state *state)
2073 {
2074 struct nstat_tu_shadow *shad;
2075
2076 OSIncrementAtomic(&nstat_userland_udp_watchers);
2077
2078 lck_mtx_lock(&nstat_mtx);
2079
2080 TAILQ_FOREACH(shad, &nstat_userprot_shad_head, shad_link) {
2081 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2082
2083 if (shad->shad_provider == NSTAT_PROVIDER_UDP_USERLAND)
2084 {
2085 int result = nstat_control_source_add(0, state, &nstat_userland_udp_provider, shad);
2086 if (result != 0)
2087 {
2088 printf("%s - nstat_control_source_add returned %d\n", __func__, result);
2089 }
2090 }
2091 }
2092 lck_mtx_unlock(&nstat_mtx);
2093
2094 return 0;
2095 }
2096
2097
2098 static void
2099 nstat_userland_tcp_remove_watcher(
2100 __unused nstat_control_state *state)
2101 {
2102 OSDecrementAtomic(&nstat_userland_tcp_watchers);
2103 }
2104
2105 static void
2106 nstat_userland_udp_remove_watcher(
2107 __unused nstat_control_state *state)
2108 {
2109 OSDecrementAtomic(&nstat_userland_udp_watchers);
2110 }
2111
2112 static void
2113 nstat_init_userland_tcp_provider(void)
2114 {
2115 bzero(&nstat_userland_tcp_provider, sizeof(nstat_tcp_provider));
2116 nstat_userland_tcp_provider.nstat_descriptor_length = sizeof(nstat_tcp_descriptor);
2117 nstat_userland_tcp_provider.nstat_provider_id = NSTAT_PROVIDER_TCP_USERLAND;
2118 nstat_userland_tcp_provider.nstat_lookup = nstat_userland_tu_lookup;
2119 nstat_userland_tcp_provider.nstat_gone = nstat_userland_tu_gone;
2120 nstat_userland_tcp_provider.nstat_counts = nstat_userland_tu_counts;
2121 nstat_userland_tcp_provider.nstat_release = nstat_userland_tu_release;
2122 nstat_userland_tcp_provider.nstat_watcher_add = nstat_userland_tcp_add_watcher;
2123 nstat_userland_tcp_provider.nstat_watcher_remove = nstat_userland_tcp_remove_watcher;
2124 nstat_userland_tcp_provider.nstat_copy_descriptor = nstat_userland_tu_copy_descriptor;
2125 nstat_userland_tcp_provider.nstat_reporting_allowed = nstat_userland_tcp_reporting_allowed;
2126 nstat_userland_tcp_provider.next = nstat_providers;
2127 nstat_providers = &nstat_userland_tcp_provider;
2128 }
2129
2130
2131 static void
2132 nstat_init_userland_udp_provider(void)
2133 {
2134 bzero(&nstat_userland_udp_provider, sizeof(nstat_udp_provider));
2135 nstat_userland_udp_provider.nstat_descriptor_length = sizeof(nstat_udp_descriptor);
2136 nstat_userland_udp_provider.nstat_provider_id = NSTAT_PROVIDER_UDP_USERLAND;
2137 nstat_userland_udp_provider.nstat_lookup = nstat_userland_tu_lookup;
2138 nstat_userland_udp_provider.nstat_gone = nstat_userland_tu_gone;
2139 nstat_userland_udp_provider.nstat_counts = nstat_userland_tu_counts;
2140 nstat_userland_udp_provider.nstat_release = nstat_userland_tu_release;
2141 nstat_userland_udp_provider.nstat_watcher_add = nstat_userland_udp_add_watcher;
2142 nstat_userland_udp_provider.nstat_watcher_remove = nstat_userland_udp_remove_watcher;
2143 nstat_userland_udp_provider.nstat_copy_descriptor = nstat_userland_tu_copy_descriptor;
2144 nstat_userland_udp_provider.nstat_reporting_allowed = nstat_userland_udp_reporting_allowed;
2145 nstat_userland_udp_provider.next = nstat_providers;
2146 nstat_providers = &nstat_userland_udp_provider;
2147 }
2148
2149
2150
2151 // Things get started with a call to netstats to say that there’s a new connection:
2152 __private_extern__ nstat_userland_context
2153 ntstat_userland_stats_open(userland_stats_provider_context *ctx,
2154 int provider_id,
2155 u_int64_t properties,
2156 userland_stats_request_vals_fn req_fn)
2157 {
2158 struct nstat_tu_shadow *shad;
2159
2160 if ((provider_id != NSTAT_PROVIDER_TCP_USERLAND) && (provider_id != NSTAT_PROVIDER_UDP_USERLAND))
2161 {
2162 printf("%s - incorrect provider is supplied, %d\n", __func__, provider_id);
2163 return NULL;
2164 }
2165
2166 shad = OSMalloc(sizeof(*shad), nstat_malloc_tag);
2167 if (shad == NULL)
2168 return NULL;
2169
2170 shad->shad_getvals_fn = req_fn;
2171 shad->shad_provider_context = ctx;
2172 shad->shad_provider = provider_id;
2173 shad->shad_properties = properties;
2174 shad->shad_magic = TU_SHADOW_MAGIC;
2175
2176 lck_mtx_lock(&nstat_mtx);
2177 nstat_control_state *state;
2178
2179 // Even if there are no watchers, we save the shadow structure
2180 TAILQ_INSERT_HEAD(&nstat_userprot_shad_head, shad, shad_link);
2181
2182 for (state = nstat_controls; state; state = state->ncs_next)
2183 {
2184 if ((state->ncs_watching & (1 << provider_id)) != 0)
2185 {
2186 // this client is watching tcp/udp userland
2187 // Link to it.
2188 int result = nstat_control_source_add(0, state, &nstat_userland_tcp_provider, shad);
2189 if (result != 0)
2190 {
2191 printf("%s - nstat_control_source_add returned %d\n", __func__, result);
2192 }
2193 }
2194 }
2195 lck_mtx_unlock(&nstat_mtx);
2196
2197 return (nstat_userland_context)shad;
2198 }
2199
2200
2201 __private_extern__ void
2202 ntstat_userland_stats_close(nstat_userland_context nstat_ctx)
2203 {
2204 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)nstat_ctx;
2205 nstat_src *dead_list = NULL;
2206
2207 if (shad == NULL)
2208 return;
2209
2210 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2211
2212 lck_mtx_lock(&nstat_mtx);
2213 if (nstat_userland_udp_watchers != 0 || nstat_userland_tcp_watchers != 0)
2214 {
2215 nstat_control_state *state;
2216 nstat_src *src, *prevsrc;
2217 errno_t result;
2218
2219 for (state = nstat_controls; state; state = state->ncs_next)
2220 {
2221 lck_mtx_lock(&state->mtx);
2222 for (prevsrc = NULL, src = state->ncs_srcs; src;
2223 prevsrc = src, src = src->next)
2224 {
2225 if (shad == (struct nstat_tu_shadow *)src->cookie)
2226 break;
2227 }
2228
2229 if (src)
2230 {
2231 result = nstat_control_send_goodbye(state, src);
2232
2233 if (prevsrc)
2234 prevsrc->next = src->next;
2235 else
2236 state->ncs_srcs = src->next;
2237
2238 src->next = dead_list;
2239 dead_list = src;
2240 }
2241 lck_mtx_unlock(&state->mtx);
2242 }
2243 }
2244 TAILQ_REMOVE(&nstat_userprot_shad_head, shad, shad_link);
2245
2246 lck_mtx_unlock(&nstat_mtx);
2247
2248 while (dead_list)
2249 {
2250 nstat_src *src;
2251 src = dead_list;
2252 dead_list = src->next;
2253
2254 nstat_control_cleanup_source(NULL, src, TRUE);
2255 }
2256
2257 shad->shad_magic = TU_SHADOW_UNMAGIC;
2258
2259 OSFree(shad, sizeof(*shad), nstat_malloc_tag);
2260 }
2261
2262
2263 __private_extern__ void
2264 ntstat_userland_stats_event(
2265 __unused nstat_userland_context context,
2266 __unused userland_stats_event_t event)
2267 {
2268 // This is a dummy for when we hook up event reporting to NetworkStatistics.
2269 // See <rdar://problem/23022832> NetworkStatistics should provide opt-in notifications
2270 }
2271
2272
2273
2274
2275 #pragma mark -- ifnet Provider --
2276
2277 static nstat_provider nstat_ifnet_provider;
2278
2279 /*
2280 * We store a pointer to the ifnet and the original threshold
2281 * requested by the client.
2282 */
2283 struct nstat_ifnet_cookie
2284 {
2285 struct ifnet *ifp;
2286 uint64_t threshold;
2287 };
2288
2289 static errno_t
2290 nstat_ifnet_lookup(
2291 const void *data,
2292 u_int32_t length,
2293 nstat_provider_cookie_t *out_cookie)
2294 {
2295 const nstat_ifnet_add_param *param = (const nstat_ifnet_add_param *)data;
2296 struct ifnet *ifp;
2297 boolean_t changed = FALSE;
2298 nstat_control_state *state;
2299 nstat_src *src;
2300 struct nstat_ifnet_cookie *cookie;
2301
2302 if (length < sizeof(*param) || param->threshold < 1024*1024)
2303 return EINVAL;
2304 if (nstat_privcheck != 0) {
2305 errno_t result = priv_check_cred(kauth_cred_get(),
2306 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
2307 if (result != 0)
2308 return result;
2309 }
2310 cookie = OSMalloc(sizeof(*cookie), nstat_malloc_tag);
2311 if (cookie == NULL)
2312 return ENOMEM;
2313 bzero(cookie, sizeof(*cookie));
2314
2315 ifnet_head_lock_shared();
2316 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
2317 {
2318 ifnet_lock_exclusive(ifp);
2319 if (ifp->if_index == param->ifindex)
2320 {
2321 cookie->ifp = ifp;
2322 cookie->threshold = param->threshold;
2323 *out_cookie = cookie;
2324 if (!ifp->if_data_threshold ||
2325 ifp->if_data_threshold > param->threshold)
2326 {
2327 changed = TRUE;
2328 ifp->if_data_threshold = param->threshold;
2329 }
2330 ifnet_lock_done(ifp);
2331 ifnet_reference(ifp);
2332 break;
2333 }
2334 ifnet_lock_done(ifp);
2335 }
2336 ifnet_head_done();
2337
2338 /*
2339 * When we change the threshold to something smaller, we notify
2340 * all of our clients with a description message.
2341 * We won't send a message to the client we are currently serving
2342 * because it has no `ifnet source' yet.
2343 */
2344 if (changed)
2345 {
2346 lck_mtx_lock(&nstat_mtx);
2347 for (state = nstat_controls; state; state = state->ncs_next)
2348 {
2349 lck_mtx_lock(&state->mtx);
2350 for (src = state->ncs_srcs; src; src = src->next)
2351 {
2352 if (src->provider != &nstat_ifnet_provider)
2353 continue;
2354 nstat_control_send_description(state, src, 0, 0);
2355 }
2356 lck_mtx_unlock(&state->mtx);
2357 }
2358 lck_mtx_unlock(&nstat_mtx);
2359 }
2360 if (cookie->ifp == NULL)
2361 OSFree(cookie, sizeof(*cookie), nstat_malloc_tag);
2362
2363 return ifp ? 0 : EINVAL;
2364 }
2365
2366 static int
2367 nstat_ifnet_gone(
2368 nstat_provider_cookie_t cookie)
2369 {
2370 struct ifnet *ifp;
2371 struct nstat_ifnet_cookie *ifcookie =
2372 (struct nstat_ifnet_cookie *)cookie;
2373
2374 ifnet_head_lock_shared();
2375 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
2376 {
2377 if (ifp == ifcookie->ifp)
2378 break;
2379 }
2380 ifnet_head_done();
2381
2382 return ifp ? 0 : 1;
2383 }
2384
2385 static errno_t
2386 nstat_ifnet_counts(
2387 nstat_provider_cookie_t cookie,
2388 struct nstat_counts *out_counts,
2389 int *out_gone)
2390 {
2391 struct nstat_ifnet_cookie *ifcookie =
2392 (struct nstat_ifnet_cookie *)cookie;
2393 struct ifnet *ifp = ifcookie->ifp;
2394
2395 if (out_gone) *out_gone = 0;
2396
2397 // if the ifnet is gone, we should stop using it
2398 if (nstat_ifnet_gone(cookie))
2399 {
2400 if (out_gone) *out_gone = 1;
2401 return EINVAL;
2402 }
2403
2404 bzero(out_counts, sizeof(*out_counts));
2405 out_counts->nstat_rxpackets = ifp->if_ipackets;
2406 out_counts->nstat_rxbytes = ifp->if_ibytes;
2407 out_counts->nstat_txpackets = ifp->if_opackets;
2408 out_counts->nstat_txbytes = ifp->if_obytes;
2409 out_counts->nstat_cell_rxbytes = out_counts->nstat_cell_txbytes = 0;
2410 return 0;
2411 }
2412
2413 static void
2414 nstat_ifnet_release(
2415 nstat_provider_cookie_t cookie,
2416 __unused int locked)
2417 {
2418 struct nstat_ifnet_cookie *ifcookie;
2419 struct ifnet *ifp;
2420 nstat_control_state *state;
2421 nstat_src *src;
2422 uint64_t minthreshold = UINT64_MAX;
2423
2424 /*
2425 * Find all the clients that requested a threshold
2426 * for this ifnet and re-calculate if_data_threshold.
2427 */
2428 lck_mtx_lock(&nstat_mtx);
2429 for (state = nstat_controls; state; state = state->ncs_next)
2430 {
2431 lck_mtx_lock(&state->mtx);
2432 for (src = state->ncs_srcs; src; src = src->next)
2433 {
2434 /* Skip the provider we are about to detach. */
2435 if (src->provider != &nstat_ifnet_provider ||
2436 src->cookie == cookie)
2437 continue;
2438 ifcookie = (struct nstat_ifnet_cookie *)src->cookie;
2439 if (ifcookie->threshold < minthreshold)
2440 minthreshold = ifcookie->threshold;
2441 }
2442 lck_mtx_unlock(&state->mtx);
2443 }
2444 lck_mtx_unlock(&nstat_mtx);
2445 /*
2446 * Reset if_data_threshold or disable it.
2447 */
2448 ifcookie = (struct nstat_ifnet_cookie *)cookie;
2449 ifp = ifcookie->ifp;
2450 if (ifnet_is_attached(ifp, 1)) {
2451 ifnet_lock_exclusive(ifp);
2452 if (minthreshold == UINT64_MAX)
2453 ifp->if_data_threshold = 0;
2454 else
2455 ifp->if_data_threshold = minthreshold;
2456 ifnet_lock_done(ifp);
2457 ifnet_decr_iorefcnt(ifp);
2458 }
2459 ifnet_release(ifp);
2460 OSFree(ifcookie, sizeof(*ifcookie), nstat_malloc_tag);
2461 }
2462
2463 static void
2464 nstat_ifnet_copy_link_status(
2465 struct ifnet *ifp,
2466 struct nstat_ifnet_descriptor *desc)
2467 {
2468 struct if_link_status *ifsr = ifp->if_link_status;
2469 nstat_ifnet_desc_link_status *link_status = &desc->link_status;
2470
2471 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_NONE;
2472 if (ifsr == NULL)
2473 return;
2474
2475 lck_rw_lock_shared(&ifp->if_link_status_lock);
2476
2477 if (ifp->if_type == IFT_CELLULAR) {
2478
2479 nstat_ifnet_desc_cellular_status *cell_status = &link_status->u.cellular;
2480 struct if_cellular_status_v1 *if_cell_sr =
2481 &ifsr->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
2482
2483 if (ifsr->ifsr_version != IF_CELLULAR_STATUS_REPORT_VERSION_1)
2484 goto done;
2485
2486 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_CELLULAR;
2487
2488 if (if_cell_sr->valid_bitmask & IF_CELL_LINK_QUALITY_METRIC_VALID) {
2489 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_LINK_QUALITY_METRIC_VALID;
2490 cell_status->link_quality_metric = if_cell_sr->link_quality_metric;
2491 }
2492 if (if_cell_sr->valid_bitmask & IF_CELL_UL_EFFECTIVE_BANDWIDTH_VALID) {
2493 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_BANDWIDTH_VALID;
2494 cell_status->ul_effective_bandwidth = if_cell_sr->ul_effective_bandwidth;
2495 }
2496 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_BANDWIDTH_VALID) {
2497 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_BANDWIDTH_VALID;
2498 cell_status->ul_max_bandwidth = if_cell_sr->ul_max_bandwidth;
2499 }
2500 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MIN_LATENCY_VALID) {
2501 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MIN_LATENCY_VALID;
2502 cell_status->ul_min_latency = if_cell_sr->ul_min_latency;
2503 }
2504 if (if_cell_sr->valid_bitmask & IF_CELL_UL_EFFECTIVE_LATENCY_VALID) {
2505 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_LATENCY_VALID;
2506 cell_status->ul_effective_latency = if_cell_sr->ul_effective_latency;
2507 }
2508 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_LATENCY_VALID) {
2509 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_LATENCY_VALID;
2510 cell_status->ul_max_latency = if_cell_sr->ul_max_latency;
2511 }
2512 if (if_cell_sr->valid_bitmask & IF_CELL_UL_RETXT_LEVEL_VALID) {
2513 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID;
2514 if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_NONE)
2515 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_NONE;
2516 else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_LOW)
2517 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_LOW;
2518 else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_MEDIUM)
2519 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_MEDIUM;
2520 else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_HIGH)
2521 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_HIGH;
2522 else
2523 cell_status->valid_bitmask &= ~NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID;
2524 }
2525 if (if_cell_sr->valid_bitmask & IF_CELL_UL_BYTES_LOST_VALID) {
2526 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_BYTES_LOST_VALID;
2527 cell_status->ul_bytes_lost = if_cell_sr->ul_bytes_lost;
2528 }
2529 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MIN_QUEUE_SIZE_VALID) {
2530 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MIN_QUEUE_SIZE_VALID;
2531 cell_status->ul_min_queue_size = if_cell_sr->ul_min_queue_size;
2532 }
2533 if (if_cell_sr->valid_bitmask & IF_CELL_UL_AVG_QUEUE_SIZE_VALID) {
2534 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_AVG_QUEUE_SIZE_VALID;
2535 cell_status->ul_avg_queue_size = if_cell_sr->ul_avg_queue_size;
2536 }
2537 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_QUEUE_SIZE_VALID) {
2538 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_QUEUE_SIZE_VALID;
2539 cell_status->ul_max_queue_size = if_cell_sr->ul_max_queue_size;
2540 }
2541 if (if_cell_sr->valid_bitmask & IF_CELL_DL_EFFECTIVE_BANDWIDTH_VALID) {
2542 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_DL_EFFECTIVE_BANDWIDTH_VALID;
2543 cell_status->dl_effective_bandwidth = if_cell_sr->dl_effective_bandwidth;
2544 }
2545 if (if_cell_sr->valid_bitmask & IF_CELL_DL_MAX_BANDWIDTH_VALID) {
2546 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_DL_MAX_BANDWIDTH_VALID;
2547 cell_status->dl_max_bandwidth = if_cell_sr->dl_max_bandwidth;
2548 }
2549 if (if_cell_sr->valid_bitmask & IF_CELL_CONFIG_INACTIVITY_TIME_VALID) {
2550 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_CONFIG_INACTIVITY_TIME_VALID;
2551 cell_status->config_inactivity_time = if_cell_sr->config_inactivity_time;
2552 }
2553 if (if_cell_sr->valid_bitmask & IF_CELL_CONFIG_BACKOFF_TIME_VALID) {
2554 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_CONFIG_BACKOFF_TIME_VALID;
2555 cell_status->config_backoff_time = if_cell_sr->config_backoff_time;
2556 }
2557 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MSS_RECOMMENDED_VALID) {
2558 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_MSS_RECOMMENDED_VALID;
2559 cell_status->mss_recommended = if_cell_sr->mss_recommended;
2560 }
2561 } else if (ifp->if_subfamily == IFNET_SUBFAMILY_WIFI) {
2562
2563 nstat_ifnet_desc_wifi_status *wifi_status = &link_status->u.wifi;
2564 struct if_wifi_status_v1 *if_wifi_sr =
2565 &ifsr->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
2566
2567 if (ifsr->ifsr_version != IF_WIFI_STATUS_REPORT_VERSION_1)
2568 goto done;
2569
2570 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_WIFI;
2571
2572 if (if_wifi_sr->valid_bitmask & IF_WIFI_LINK_QUALITY_METRIC_VALID) {
2573 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_LINK_QUALITY_METRIC_VALID;
2574 wifi_status->link_quality_metric = if_wifi_sr->link_quality_metric;
2575 }
2576 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID) {
2577 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID;
2578 wifi_status->ul_effective_bandwidth = if_wifi_sr->ul_effective_bandwidth;
2579 }
2580 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MAX_BANDWIDTH_VALID) {
2581 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MAX_BANDWIDTH_VALID;
2582 wifi_status->ul_max_bandwidth = if_wifi_sr->ul_max_bandwidth;
2583 }
2584 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MIN_LATENCY_VALID) {
2585 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MIN_LATENCY_VALID;
2586 wifi_status->ul_min_latency = if_wifi_sr->ul_min_latency;
2587 }
2588 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_EFFECTIVE_LATENCY_VALID) {
2589 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_LATENCY_VALID;
2590 wifi_status->ul_effective_latency = if_wifi_sr->ul_effective_latency;
2591 }
2592 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MAX_LATENCY_VALID) {
2593 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MAX_LATENCY_VALID;
2594 wifi_status->ul_max_latency = if_wifi_sr->ul_max_latency;
2595 }
2596 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_RETXT_LEVEL_VALID) {
2597 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID;
2598 if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_NONE)
2599 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_NONE;
2600 else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_LOW)
2601 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_LOW;
2602 else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_MEDIUM)
2603 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_MEDIUM;
2604 else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_HIGH)
2605 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_HIGH;
2606 else
2607 wifi_status->valid_bitmask &= ~NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID;
2608 }
2609 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_BYTES_LOST_VALID) {
2610 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_BYTES_LOST_VALID;
2611 wifi_status->ul_bytes_lost = if_wifi_sr->ul_bytes_lost;
2612 }
2613 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_ERROR_RATE_VALID) {
2614 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_ERROR_RATE_VALID;
2615 wifi_status->ul_error_rate = if_wifi_sr->ul_error_rate;
2616 }
2617 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID) {
2618 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID;
2619 wifi_status->dl_effective_bandwidth = if_wifi_sr->dl_effective_bandwidth;
2620 }
2621 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MAX_BANDWIDTH_VALID) {
2622 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MAX_BANDWIDTH_VALID;
2623 wifi_status->dl_max_bandwidth = if_wifi_sr->dl_max_bandwidth;
2624 }
2625 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MIN_LATENCY_VALID) {
2626 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MIN_LATENCY_VALID;
2627 wifi_status->dl_min_latency = if_wifi_sr->dl_min_latency;
2628 }
2629 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_EFFECTIVE_LATENCY_VALID) {
2630 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_LATENCY_VALID;
2631 wifi_status->dl_effective_latency = if_wifi_sr->dl_effective_latency;
2632 }
2633 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MAX_LATENCY_VALID) {
2634 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MAX_LATENCY_VALID;
2635 wifi_status->dl_max_latency = if_wifi_sr->dl_max_latency;
2636 }
2637 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_ERROR_RATE_VALID) {
2638 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_ERROR_RATE_VALID;
2639 wifi_status->dl_error_rate = if_wifi_sr->dl_error_rate;
2640 }
2641 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_FREQUENCY_VALID) {
2642 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID;
2643 if (if_wifi_sr->config_frequency == IF_WIFI_CONFIG_FREQUENCY_2_4_GHZ)
2644 wifi_status->config_frequency = NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_2_4_GHZ;
2645 else if (if_wifi_sr->config_frequency == IF_WIFI_CONFIG_FREQUENCY_5_0_GHZ)
2646 wifi_status->config_frequency = NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_5_0_GHZ;
2647 else
2648 wifi_status->valid_bitmask &= ~NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID;
2649 }
2650 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_MULTICAST_RATE_VALID) {
2651 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_MULTICAST_RATE_VALID;
2652 wifi_status->config_multicast_rate = if_wifi_sr->config_multicast_rate;
2653 }
2654 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_SCAN_COUNT_VALID) {
2655 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_COUNT_VALID;
2656 wifi_status->scan_count = if_wifi_sr->scan_count;
2657 }
2658 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_SCAN_DURATION_VALID) {
2659 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_DURATION_VALID;
2660 wifi_status->scan_duration = if_wifi_sr->scan_duration;
2661 }
2662 }
2663
2664 done:
2665 lck_rw_done(&ifp->if_link_status_lock);
2666 }
2667
2668 static u_int64_t nstat_ifnet_last_report_time = 0;
2669 extern int tcp_report_stats_interval;
2670
2671 static void
2672 nstat_ifnet_compute_percentages(struct if_tcp_ecn_perf_stat *ifst)
2673 {
2674 /* Retransmit percentage */
2675 if (ifst->total_rxmitpkts > 0 && ifst->total_txpkts > 0) {
2676 /* shift by 10 for precision */
2677 ifst->rxmit_percent =
2678 ((ifst->total_rxmitpkts << 10) * 100) / ifst->total_txpkts;
2679 } else {
2680 ifst->rxmit_percent = 0;
2681 }
2682
2683 /* Out-of-order percentage */
2684 if (ifst->total_oopkts > 0 && ifst->total_rxpkts > 0) {
2685 /* shift by 10 for precision */
2686 ifst->oo_percent =
2687 ((ifst->total_oopkts << 10) * 100) / ifst->total_rxpkts;
2688 } else {
2689 ifst->oo_percent = 0;
2690 }
2691
2692 /* Reorder percentage */
2693 if (ifst->total_reorderpkts > 0 &&
2694 (ifst->total_txpkts + ifst->total_rxpkts) > 0) {
2695 /* shift by 10 for precision */
2696 ifst->reorder_percent =
2697 ((ifst->total_reorderpkts << 10) * 100) /
2698 (ifst->total_txpkts + ifst->total_rxpkts);
2699 } else {
2700 ifst->reorder_percent = 0;
2701 }
2702 }
2703
2704 static void
2705 nstat_ifnet_normalize_counter(struct if_tcp_ecn_stat *if_st)
2706 {
2707 u_int64_t ecn_on_conn, ecn_off_conn;
2708
2709 if (if_st == NULL)
2710 return;
2711 ecn_on_conn = if_st->ecn_client_success +
2712 if_st->ecn_server_success;
2713 ecn_off_conn = if_st->ecn_off_conn +
2714 (if_st->ecn_client_setup - if_st->ecn_client_success) +
2715 (if_st->ecn_server_setup - if_st->ecn_server_success);
2716
2717 /*
2718 * report sack episodes, rst_drop and rxmit_drop
2719 * as a ratio per connection, shift by 10 for precision
2720 */
2721 if (ecn_on_conn > 0) {
2722 if_st->ecn_on.sack_episodes =
2723 (if_st->ecn_on.sack_episodes << 10) / ecn_on_conn;
2724 if_st->ecn_on.rst_drop =
2725 (if_st->ecn_on.rst_drop << 10) * 100 / ecn_on_conn;
2726 if_st->ecn_on.rxmit_drop =
2727 (if_st->ecn_on.rxmit_drop << 10) * 100 / ecn_on_conn;
2728 } else {
2729 /* set to zero, just in case */
2730 if_st->ecn_on.sack_episodes = 0;
2731 if_st->ecn_on.rst_drop = 0;
2732 if_st->ecn_on.rxmit_drop = 0;
2733 }
2734
2735 if (ecn_off_conn > 0) {
2736 if_st->ecn_off.sack_episodes =
2737 (if_st->ecn_off.sack_episodes << 10) / ecn_off_conn;
2738 if_st->ecn_off.rst_drop =
2739 (if_st->ecn_off.rst_drop << 10) * 100 / ecn_off_conn;
2740 if_st->ecn_off.rxmit_drop =
2741 (if_st->ecn_off.rxmit_drop << 10) * 100 / ecn_off_conn;
2742 } else {
2743 if_st->ecn_off.sack_episodes = 0;
2744 if_st->ecn_off.rst_drop = 0;
2745 if_st->ecn_off.rxmit_drop = 0;
2746 }
2747 if_st->ecn_total_conn = ecn_off_conn + ecn_on_conn;
2748 }
2749
2750 static void
2751 nstat_ifnet_report_ecn_stats(void)
2752 {
2753 u_int64_t uptime, last_report_time;
2754 struct nstat_sysinfo_data data;
2755 struct nstat_sysinfo_ifnet_ecn_stats *st;
2756 struct ifnet *ifp;
2757
2758 uptime = net_uptime();
2759
2760 if ((int)(uptime - nstat_ifnet_last_report_time) <
2761 tcp_report_stats_interval)
2762 return;
2763
2764 last_report_time = nstat_ifnet_last_report_time;
2765 nstat_ifnet_last_report_time = uptime;
2766 data.flags = NSTAT_SYSINFO_IFNET_ECN_STATS;
2767 st = &data.u.ifnet_ecn_stats;
2768
2769 ifnet_head_lock_shared();
2770 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2771 if (ifp->if_ipv4_stat == NULL || ifp->if_ipv6_stat == NULL)
2772 continue;
2773
2774 if ((ifp->if_refflags & (IFRF_ATTACHED | IFRF_DETACHING)) !=
2775 IFRF_ATTACHED)
2776 continue;
2777
2778 /* Limit reporting to Wifi, Ethernet and cellular. */
2779 if (!(IFNET_IS_ETHERNET(ifp) || IFNET_IS_CELLULAR(ifp)))
2780 continue;
2781
2782 bzero(st, sizeof(*st));
2783 if (IFNET_IS_CELLULAR(ifp)) {
2784 st->ifnet_type = NSTAT_IFNET_ECN_TYPE_CELLULAR;
2785 } else if (IFNET_IS_WIFI(ifp)) {
2786 st->ifnet_type = NSTAT_IFNET_ECN_TYPE_WIFI;
2787 } else {
2788 st->ifnet_type = NSTAT_IFNET_ECN_TYPE_ETHERNET;
2789 }
2790 data.unsent_data_cnt = ifp->if_unsent_data_cnt;
2791 /* skip if there was no update since last report */
2792 if (ifp->if_ipv4_stat->timestamp <= 0 ||
2793 ifp->if_ipv4_stat->timestamp < last_report_time)
2794 goto v6;
2795 st->ifnet_proto = NSTAT_IFNET_ECN_PROTO_IPV4;
2796 /* compute percentages using packet counts */
2797 nstat_ifnet_compute_percentages(&ifp->if_ipv4_stat->ecn_on);
2798 nstat_ifnet_compute_percentages(&ifp->if_ipv4_stat->ecn_off);
2799 nstat_ifnet_normalize_counter(ifp->if_ipv4_stat);
2800 bcopy(ifp->if_ipv4_stat, &st->ecn_stat,
2801 sizeof(st->ecn_stat));
2802 nstat_sysinfo_send_data(&data);
2803 bzero(ifp->if_ipv4_stat, sizeof(*ifp->if_ipv4_stat));
2804
2805 v6:
2806 /* skip if there was no update since last report */
2807 if (ifp->if_ipv6_stat->timestamp <= 0 ||
2808 ifp->if_ipv6_stat->timestamp < last_report_time)
2809 continue;
2810 st->ifnet_proto = NSTAT_IFNET_ECN_PROTO_IPV6;
2811
2812 /* compute percentages using packet counts */
2813 nstat_ifnet_compute_percentages(&ifp->if_ipv6_stat->ecn_on);
2814 nstat_ifnet_compute_percentages(&ifp->if_ipv6_stat->ecn_off);
2815 nstat_ifnet_normalize_counter(ifp->if_ipv6_stat);
2816 bcopy(ifp->if_ipv6_stat, &st->ecn_stat,
2817 sizeof(st->ecn_stat));
2818 nstat_sysinfo_send_data(&data);
2819
2820 /* Zero the stats in ifp */
2821 bzero(ifp->if_ipv6_stat, sizeof(*ifp->if_ipv6_stat));
2822 }
2823 ifnet_head_done();
2824
2825 }
2826
2827 static errno_t
2828 nstat_ifnet_copy_descriptor(
2829 nstat_provider_cookie_t cookie,
2830 void *data,
2831 u_int32_t len)
2832 {
2833 nstat_ifnet_descriptor *desc = (nstat_ifnet_descriptor *)data;
2834 struct nstat_ifnet_cookie *ifcookie =
2835 (struct nstat_ifnet_cookie *)cookie;
2836 struct ifnet *ifp = ifcookie->ifp;
2837
2838 if (len < sizeof(nstat_ifnet_descriptor))
2839 return EINVAL;
2840
2841 if (nstat_ifnet_gone(cookie))
2842 return EINVAL;
2843
2844 bzero(desc, sizeof(*desc));
2845 ifnet_lock_shared(ifp);
2846 strlcpy(desc->name, ifp->if_xname, sizeof(desc->name));
2847 desc->ifindex = ifp->if_index;
2848 desc->threshold = ifp->if_data_threshold;
2849 desc->type = ifp->if_type;
2850 if (ifp->if_desc.ifd_len < sizeof(desc->description))
2851 memcpy(desc->description, ifp->if_desc.ifd_desc,
2852 sizeof(desc->description));
2853 nstat_ifnet_copy_link_status(ifp, desc);
2854 ifnet_lock_done(ifp);
2855 return 0;
2856 }
2857
2858 static void
2859 nstat_init_ifnet_provider(void)
2860 {
2861 bzero(&nstat_ifnet_provider, sizeof(nstat_ifnet_provider));
2862 nstat_ifnet_provider.nstat_provider_id = NSTAT_PROVIDER_IFNET;
2863 nstat_ifnet_provider.nstat_descriptor_length = sizeof(nstat_ifnet_descriptor);
2864 nstat_ifnet_provider.nstat_lookup = nstat_ifnet_lookup;
2865 nstat_ifnet_provider.nstat_gone = nstat_ifnet_gone;
2866 nstat_ifnet_provider.nstat_counts = nstat_ifnet_counts;
2867 nstat_ifnet_provider.nstat_watcher_add = NULL;
2868 nstat_ifnet_provider.nstat_watcher_remove = NULL;
2869 nstat_ifnet_provider.nstat_copy_descriptor = nstat_ifnet_copy_descriptor;
2870 nstat_ifnet_provider.nstat_release = nstat_ifnet_release;
2871 nstat_ifnet_provider.next = nstat_providers;
2872 nstat_providers = &nstat_ifnet_provider;
2873 }
2874
2875 __private_extern__ void
2876 nstat_ifnet_threshold_reached(unsigned int ifindex)
2877 {
2878 nstat_control_state *state;
2879 nstat_src *src;
2880 struct ifnet *ifp;
2881 struct nstat_ifnet_cookie *ifcookie;
2882
2883 lck_mtx_lock(&nstat_mtx);
2884 for (state = nstat_controls; state; state = state->ncs_next)
2885 {
2886 lck_mtx_lock(&state->mtx);
2887 for (src = state->ncs_srcs; src; src = src->next)
2888 {
2889 if (src->provider != &nstat_ifnet_provider)
2890 continue;
2891 ifcookie = (struct nstat_ifnet_cookie *)src->cookie;
2892 ifp = ifcookie->ifp;
2893 if (ifp->if_index != ifindex)
2894 continue;
2895 nstat_control_send_counts(state, src, 0, 0, NULL);
2896 }
2897 lck_mtx_unlock(&state->mtx);
2898 }
2899 lck_mtx_unlock(&nstat_mtx);
2900 }
2901
2902 #pragma mark -- Sysinfo --
2903 static void
2904 nstat_set_keyval_scalar(nstat_sysinfo_keyval *kv, int key, u_int32_t val)
2905 {
2906 kv->nstat_sysinfo_key = key;
2907 kv->nstat_sysinfo_flags = NSTAT_SYSINFO_FLAG_SCALAR;
2908 kv->u.nstat_sysinfo_scalar = val;
2909 }
2910
2911 static void
2912 nstat_sysinfo_send_data_internal(
2913 nstat_control_state *control,
2914 nstat_sysinfo_data *data)
2915 {
2916 nstat_msg_sysinfo_counts *syscnt = NULL;
2917 size_t allocsize = 0, countsize = 0, nkeyvals = 0, finalsize = 0;
2918 nstat_sysinfo_keyval *kv;
2919 errno_t result = 0;
2920 size_t i = 0;
2921
2922 allocsize = offsetof(nstat_msg_sysinfo_counts, counts);
2923 countsize = offsetof(nstat_sysinfo_counts, nstat_sysinfo_keyvals);
2924 finalsize = allocsize;
2925
2926 /* get number of key-vals for each kind of stat */
2927 switch (data->flags)
2928 {
2929 case NSTAT_SYSINFO_MBUF_STATS:
2930 nkeyvals = sizeof(struct nstat_sysinfo_mbuf_stats) /
2931 sizeof(u_int32_t);
2932 break;
2933 case NSTAT_SYSINFO_TCP_STATS:
2934 nkeyvals = sizeof(struct nstat_sysinfo_tcp_stats) /
2935 sizeof(u_int32_t);
2936 break;
2937 case NSTAT_SYSINFO_IFNET_ECN_STATS:
2938 nkeyvals = (sizeof(struct if_tcp_ecn_stat) /
2939 sizeof(u_int64_t));
2940
2941 /* Two more keys for ifnet type and proto */
2942 nkeyvals += 2;
2943
2944 /* One key for unsent data. */
2945 nkeyvals++;
2946 break;
2947 default:
2948 return;
2949 }
2950 countsize += sizeof(nstat_sysinfo_keyval) * nkeyvals;
2951 allocsize += countsize;
2952
2953 syscnt = OSMalloc(allocsize, nstat_malloc_tag);
2954 if (syscnt == NULL)
2955 return;
2956 bzero(syscnt, allocsize);
2957
2958 kv = (nstat_sysinfo_keyval *) &syscnt->counts.nstat_sysinfo_keyvals;
2959 switch (data->flags)
2960 {
2961 case NSTAT_SYSINFO_MBUF_STATS:
2962 {
2963 nstat_set_keyval_scalar(&kv[i++],
2964 NSTAT_SYSINFO_KEY_MBUF_256B_TOTAL,
2965 data->u.mb_stats.total_256b);
2966 nstat_set_keyval_scalar(&kv[i++],
2967 NSTAT_SYSINFO_KEY_MBUF_2KB_TOTAL,
2968 data->u.mb_stats.total_2kb);
2969 nstat_set_keyval_scalar(&kv[i++],
2970 NSTAT_SYSINFO_KEY_MBUF_4KB_TOTAL,
2971 data->u.mb_stats.total_4kb);
2972 nstat_set_keyval_scalar(&kv[i++],
2973 NSTAT_SYSINFO_MBUF_16KB_TOTAL,
2974 data->u.mb_stats.total_16kb);
2975 nstat_set_keyval_scalar(&kv[i++],
2976 NSTAT_SYSINFO_KEY_SOCK_MBCNT,
2977 data->u.mb_stats.sbmb_total);
2978 nstat_set_keyval_scalar(&kv[i++],
2979 NSTAT_SYSINFO_KEY_SOCK_ATMBLIMIT,
2980 data->u.mb_stats.sb_atmbuflimit);
2981 nstat_set_keyval_scalar(&kv[i++],
2982 NSTAT_SYSINFO_MBUF_DRAIN_CNT,
2983 data->u.mb_stats.draincnt);
2984 nstat_set_keyval_scalar(&kv[i++],
2985 NSTAT_SYSINFO_MBUF_MEM_RELEASED,
2986 data->u.mb_stats.memreleased);
2987 nstat_set_keyval_scalar(&kv[i++],
2988 NSTAT_SYSINFO_KEY_SOCK_MBFLOOR,
2989 data->u.mb_stats.sbmb_floor);
2990 VERIFY(i == nkeyvals);
2991 break;
2992 }
2993 case NSTAT_SYSINFO_TCP_STATS:
2994 {
2995 nstat_set_keyval_scalar(&kv[i++],
2996 NSTAT_SYSINFO_KEY_IPV4_AVGRTT,
2997 data->u.tcp_stats.ipv4_avgrtt);
2998 nstat_set_keyval_scalar(&kv[i++],
2999 NSTAT_SYSINFO_KEY_IPV6_AVGRTT,
3000 data->u.tcp_stats.ipv6_avgrtt);
3001 nstat_set_keyval_scalar(&kv[i++],
3002 NSTAT_SYSINFO_KEY_SEND_PLR,
3003 data->u.tcp_stats.send_plr);
3004 nstat_set_keyval_scalar(&kv[i++],
3005 NSTAT_SYSINFO_KEY_RECV_PLR,
3006 data->u.tcp_stats.recv_plr);
3007 nstat_set_keyval_scalar(&kv[i++],
3008 NSTAT_SYSINFO_KEY_SEND_TLRTO,
3009 data->u.tcp_stats.send_tlrto_rate);
3010 nstat_set_keyval_scalar(&kv[i++],
3011 NSTAT_SYSINFO_KEY_SEND_REORDERRATE,
3012 data->u.tcp_stats.send_reorder_rate);
3013 nstat_set_keyval_scalar(&kv[i++],
3014 NSTAT_SYSINFO_CONNECTION_ATTEMPTS,
3015 data->u.tcp_stats.connection_attempts);
3016 nstat_set_keyval_scalar(&kv[i++],
3017 NSTAT_SYSINFO_CONNECTION_ACCEPTS,
3018 data->u.tcp_stats.connection_accepts);
3019 nstat_set_keyval_scalar(&kv[i++],
3020 NSTAT_SYSINFO_ECN_CLIENT_ENABLED,
3021 data->u.tcp_stats.ecn_client_enabled);
3022 nstat_set_keyval_scalar(&kv[i++],
3023 NSTAT_SYSINFO_ECN_SERVER_ENABLED,
3024 data->u.tcp_stats.ecn_server_enabled);
3025 nstat_set_keyval_scalar(&kv[i++],
3026 NSTAT_SYSINFO_ECN_CLIENT_SETUP,
3027 data->u.tcp_stats.ecn_client_setup);
3028 nstat_set_keyval_scalar(&kv[i++],
3029 NSTAT_SYSINFO_ECN_SERVER_SETUP,
3030 data->u.tcp_stats.ecn_server_setup);
3031 nstat_set_keyval_scalar(&kv[i++],
3032 NSTAT_SYSINFO_ECN_CLIENT_SUCCESS,
3033 data->u.tcp_stats.ecn_client_success);
3034 nstat_set_keyval_scalar(&kv[i++],
3035 NSTAT_SYSINFO_ECN_SERVER_SUCCESS,
3036 data->u.tcp_stats.ecn_server_success);
3037 nstat_set_keyval_scalar(&kv[i++],
3038 NSTAT_SYSINFO_ECN_NOT_SUPPORTED,
3039 data->u.tcp_stats.ecn_not_supported);
3040 nstat_set_keyval_scalar(&kv[i++],
3041 NSTAT_SYSINFO_ECN_LOST_SYN,
3042 data->u.tcp_stats.ecn_lost_syn);
3043 nstat_set_keyval_scalar(&kv[i++],
3044 NSTAT_SYSINFO_ECN_LOST_SYNACK,
3045 data->u.tcp_stats.ecn_lost_synack);
3046 nstat_set_keyval_scalar(&kv[i++],
3047 NSTAT_SYSINFO_ECN_RECV_CE,
3048 data->u.tcp_stats.ecn_recv_ce);
3049 nstat_set_keyval_scalar(&kv[i++],
3050 NSTAT_SYSINFO_ECN_RECV_ECE,
3051 data->u.tcp_stats.ecn_recv_ece);
3052 nstat_set_keyval_scalar(&kv[i++],
3053 NSTAT_SYSINFO_ECN_SENT_ECE,
3054 data->u.tcp_stats.ecn_sent_ece);
3055 nstat_set_keyval_scalar(&kv[i++],
3056 NSTAT_SYSINFO_ECN_CONN_RECV_CE,
3057 data->u.tcp_stats.ecn_conn_recv_ce);
3058 nstat_set_keyval_scalar(&kv[i++],
3059 NSTAT_SYSINFO_ECN_CONN_RECV_ECE,
3060 data->u.tcp_stats.ecn_conn_recv_ece);
3061 nstat_set_keyval_scalar(&kv[i++],
3062 NSTAT_SYSINFO_ECN_CONN_PLNOCE,
3063 data->u.tcp_stats.ecn_conn_plnoce);
3064 nstat_set_keyval_scalar(&kv[i++],
3065 NSTAT_SYSINFO_ECN_CONN_PL_CE,
3066 data->u.tcp_stats.ecn_conn_pl_ce);
3067 nstat_set_keyval_scalar(&kv[i++],
3068 NSTAT_SYSINFO_ECN_CONN_NOPL_CE,
3069 data->u.tcp_stats.ecn_conn_nopl_ce);
3070 nstat_set_keyval_scalar(&kv[i++],
3071 NSTAT_SYSINFO_ECN_FALLBACK_SYNLOSS,
3072 data->u.tcp_stats.ecn_fallback_synloss);
3073 nstat_set_keyval_scalar(&kv[i++],
3074 NSTAT_SYSINFO_ECN_FALLBACK_REORDER,
3075 data->u.tcp_stats.ecn_fallback_reorder);
3076 nstat_set_keyval_scalar(&kv[i++],
3077 NSTAT_SYSINFO_ECN_FALLBACK_CE,
3078 data->u.tcp_stats.ecn_fallback_ce);
3079 nstat_set_keyval_scalar(&kv[i++],
3080 NSTAT_SYSINFO_TFO_SYN_DATA_RCV,
3081 data->u.tcp_stats.tfo_syn_data_rcv);
3082 nstat_set_keyval_scalar(&kv[i++],
3083 NSTAT_SYSINFO_TFO_COOKIE_REQ_RCV,
3084 data->u.tcp_stats.tfo_cookie_req_rcv);
3085 nstat_set_keyval_scalar(&kv[i++],
3086 NSTAT_SYSINFO_TFO_COOKIE_SENT,
3087 data->u.tcp_stats.tfo_cookie_sent);
3088 nstat_set_keyval_scalar(&kv[i++],
3089 NSTAT_SYSINFO_TFO_COOKIE_INVALID,
3090 data->u.tcp_stats.tfo_cookie_invalid);
3091 nstat_set_keyval_scalar(&kv[i++],
3092 NSTAT_SYSINFO_TFO_COOKIE_REQ,
3093 data->u.tcp_stats.tfo_cookie_req);
3094 nstat_set_keyval_scalar(&kv[i++],
3095 NSTAT_SYSINFO_TFO_COOKIE_RCV,
3096 data->u.tcp_stats.tfo_cookie_rcv);
3097 nstat_set_keyval_scalar(&kv[i++],
3098 NSTAT_SYSINFO_TFO_SYN_DATA_SENT,
3099 data->u.tcp_stats.tfo_syn_data_sent);
3100 nstat_set_keyval_scalar(&kv[i++],
3101 NSTAT_SYSINFO_TFO_SYN_DATA_ACKED,
3102 data->u.tcp_stats.tfo_syn_data_acked);
3103 nstat_set_keyval_scalar(&kv[i++],
3104 NSTAT_SYSINFO_TFO_SYN_LOSS,
3105 data->u.tcp_stats.tfo_syn_loss);
3106 nstat_set_keyval_scalar(&kv[i++],
3107 NSTAT_SYSINFO_TFO_BLACKHOLE,
3108 data->u.tcp_stats.tfo_blackhole);
3109 nstat_set_keyval_scalar(&kv[i++],
3110 NSTAT_SYSINFO_TFO_COOKIE_WRONG,
3111 data->u.tcp_stats.tfo_cookie_wrong);
3112 nstat_set_keyval_scalar(&kv[i++],
3113 NSTAT_SYSINFO_TFO_NO_COOKIE_RCV,
3114 data->u.tcp_stats.tfo_no_cookie_rcv);
3115 nstat_set_keyval_scalar(&kv[i++],
3116 NSTAT_SYSINFO_TFO_HEURISTICS_DISABLE,
3117 data->u.tcp_stats.tfo_heuristics_disable);
3118 nstat_set_keyval_scalar(&kv[i++],
3119 NSTAT_SYSINFO_TFO_SEND_BLACKHOLE,
3120 data->u.tcp_stats.tfo_sndblackhole);
3121 VERIFY(i == nkeyvals);
3122 break;
3123 }
3124 case NSTAT_SYSINFO_IFNET_ECN_STATS:
3125 {
3126 nstat_set_keyval_scalar(&kv[i++],
3127 NSTAT_SYSINFO_ECN_IFNET_TYPE,
3128 data->u.ifnet_ecn_stats.ifnet_type);
3129 nstat_set_keyval_scalar(&kv[i++],
3130 NSTAT_SYSINFO_ECN_IFNET_PROTO,
3131 data->u.ifnet_ecn_stats.ifnet_proto);
3132 nstat_set_keyval_scalar(&kv[i++],
3133 NSTAT_SYSINFO_ECN_IFNET_CLIENT_SETUP,
3134 data->u.ifnet_ecn_stats.ecn_stat.ecn_client_setup);
3135 nstat_set_keyval_scalar(&kv[i++],
3136 NSTAT_SYSINFO_ECN_IFNET_SERVER_SETUP,
3137 data->u.ifnet_ecn_stats.ecn_stat.ecn_server_setup);
3138 nstat_set_keyval_scalar(&kv[i++],
3139 NSTAT_SYSINFO_ECN_IFNET_CLIENT_SUCCESS,
3140 data->u.ifnet_ecn_stats.ecn_stat.ecn_client_success);
3141 nstat_set_keyval_scalar(&kv[i++],
3142 NSTAT_SYSINFO_ECN_IFNET_SERVER_SUCCESS,
3143 data->u.ifnet_ecn_stats.ecn_stat.ecn_server_success);
3144 nstat_set_keyval_scalar(&kv[i++],
3145 NSTAT_SYSINFO_ECN_IFNET_PEER_NOSUPPORT,
3146 data->u.ifnet_ecn_stats.ecn_stat.ecn_peer_nosupport);
3147 nstat_set_keyval_scalar(&kv[i++],
3148 NSTAT_SYSINFO_ECN_IFNET_SYN_LOST,
3149 data->u.ifnet_ecn_stats.ecn_stat.ecn_syn_lost);
3150 nstat_set_keyval_scalar(&kv[i++],
3151 NSTAT_SYSINFO_ECN_IFNET_SYNACK_LOST,
3152 data->u.ifnet_ecn_stats.ecn_stat.ecn_synack_lost);
3153 nstat_set_keyval_scalar(&kv[i++],
3154 NSTAT_SYSINFO_ECN_IFNET_RECV_CE,
3155 data->u.ifnet_ecn_stats.ecn_stat.ecn_recv_ce);
3156 nstat_set_keyval_scalar(&kv[i++],
3157 NSTAT_SYSINFO_ECN_IFNET_RECV_ECE,
3158 data->u.ifnet_ecn_stats.ecn_stat.ecn_recv_ece);
3159 nstat_set_keyval_scalar(&kv[i++],
3160 NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_CE,
3161 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_recv_ce);
3162 nstat_set_keyval_scalar(&kv[i++],
3163 NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_ECE,
3164 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_recv_ece);
3165 nstat_set_keyval_scalar(&kv[i++],
3166 NSTAT_SYSINFO_ECN_IFNET_CONN_PLNOCE,
3167 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_plnoce);
3168 nstat_set_keyval_scalar(&kv[i++],
3169 NSTAT_SYSINFO_ECN_IFNET_CONN_PLCE,
3170 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_plce);
3171 nstat_set_keyval_scalar(&kv[i++],
3172 NSTAT_SYSINFO_ECN_IFNET_CONN_NOPLCE,
3173 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_noplce);
3174 nstat_set_keyval_scalar(&kv[i++],
3175 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNLOSS,
3176 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_synloss);
3177 nstat_set_keyval_scalar(&kv[i++],
3178 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_REORDER,
3179 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_reorder);
3180 nstat_set_keyval_scalar(&kv[i++],
3181 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_CE,
3182 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_ce);
3183 nstat_set_keyval_scalar(&kv[i++],
3184 NSTAT_SYSINFO_ECN_IFNET_ON_RTT_AVG,
3185 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rtt_avg);
3186 nstat_set_keyval_scalar(&kv[i++],
3187 NSTAT_SYSINFO_ECN_IFNET_ON_RTT_VAR,
3188 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rtt_var);
3189 nstat_set_keyval_scalar(&kv[i++],
3190 NSTAT_SYSINFO_ECN_IFNET_ON_OOPERCENT,
3191 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.oo_percent);
3192 nstat_set_keyval_scalar(&kv[i++],
3193 NSTAT_SYSINFO_ECN_IFNET_ON_SACK_EPISODE,
3194 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.sack_episodes);
3195 nstat_set_keyval_scalar(&kv[i++],
3196 NSTAT_SYSINFO_ECN_IFNET_ON_REORDER_PERCENT,
3197 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.reorder_percent);
3198 nstat_set_keyval_scalar(&kv[i++],
3199 NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_PERCENT,
3200 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rxmit_percent);
3201 nstat_set_keyval_scalar(&kv[i++],
3202 NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_DROP,
3203 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rxmit_drop);
3204 nstat_set_keyval_scalar(&kv[i++],
3205 NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_AVG,
3206 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rtt_avg);
3207 nstat_set_keyval_scalar(&kv[i++],
3208 NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_VAR,
3209 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rtt_var);
3210 nstat_set_keyval_scalar(&kv[i++],
3211 NSTAT_SYSINFO_ECN_IFNET_OFF_OOPERCENT,
3212 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.oo_percent);
3213 nstat_set_keyval_scalar(&kv[i++],
3214 NSTAT_SYSINFO_ECN_IFNET_OFF_SACK_EPISODE,
3215 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.sack_episodes);
3216 nstat_set_keyval_scalar(&kv[i++],
3217 NSTAT_SYSINFO_ECN_IFNET_OFF_REORDER_PERCENT,
3218 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.reorder_percent);
3219 nstat_set_keyval_scalar(&kv[i++],
3220 NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_PERCENT,
3221 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rxmit_percent);
3222 nstat_set_keyval_scalar(&kv[i++],
3223 NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_DROP,
3224 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rxmit_drop);
3225 nstat_set_keyval_scalar(&kv[i++],
3226 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_TXPKTS,
3227 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_txpkts);
3228 nstat_set_keyval_scalar(&kv[i++],
3229 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXMTPKTS,
3230 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_rxmitpkts);
3231 nstat_set_keyval_scalar(&kv[i++],
3232 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXPKTS,
3233 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_rxpkts);
3234 nstat_set_keyval_scalar(&kv[i++],
3235 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_OOPKTS,
3236 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_oopkts);
3237 nstat_set_keyval_scalar(&kv[i++],
3238 NSTAT_SYSINFO_ECN_IFNET_ON_DROP_RST,
3239 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rst_drop);
3240 nstat_set_keyval_scalar(&kv[i++],
3241 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_TXPKTS,
3242 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_txpkts);
3243 nstat_set_keyval_scalar(&kv[i++],
3244 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXMTPKTS,
3245 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_rxmitpkts);
3246 nstat_set_keyval_scalar(&kv[i++],
3247 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXPKTS,
3248 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_rxpkts);
3249 nstat_set_keyval_scalar(&kv[i++],
3250 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_OOPKTS,
3251 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_oopkts);
3252 nstat_set_keyval_scalar(&kv[i++],
3253 NSTAT_SYSINFO_ECN_IFNET_OFF_DROP_RST,
3254 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rst_drop);
3255 nstat_set_keyval_scalar(&kv[i++],
3256 NSTAT_SYSINFO_ECN_IFNET_TOTAL_CONN,
3257 data->u.ifnet_ecn_stats.ecn_stat.ecn_total_conn);
3258 nstat_set_keyval_scalar(&kv[i++],
3259 NSTAT_SYSINFO_IFNET_UNSENT_DATA,
3260 data->unsent_data_cnt);
3261 nstat_set_keyval_scalar(&kv[i++],
3262 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRST,
3263 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_droprst);
3264 nstat_set_keyval_scalar(&kv[i++],
3265 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRXMT,
3266 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_droprxmt);
3267 break;
3268 }
3269 }
3270 if (syscnt != NULL)
3271 {
3272 VERIFY(i > 0 && i <= nkeyvals);
3273 countsize = offsetof(nstat_sysinfo_counts,
3274 nstat_sysinfo_keyvals) +
3275 sizeof(nstat_sysinfo_keyval) * i;
3276 finalsize += countsize;
3277 syscnt->hdr.type = NSTAT_MSG_TYPE_SYSINFO_COUNTS;
3278 syscnt->hdr.length = finalsize;
3279 syscnt->counts.nstat_sysinfo_len = countsize;
3280
3281 result = ctl_enqueuedata(control->ncs_kctl,
3282 control->ncs_unit, syscnt, finalsize, CTL_DATA_EOR);
3283 if (result != 0)
3284 {
3285 nstat_stats.nstat_sysinfofailures += 1;
3286 }
3287 OSFree(syscnt, allocsize, nstat_malloc_tag);
3288 }
3289 return;
3290 }
3291
3292 __private_extern__ void
3293 nstat_sysinfo_send_data(
3294 nstat_sysinfo_data *data)
3295 {
3296 nstat_control_state *control;
3297
3298 lck_mtx_lock(&nstat_mtx);
3299 for (control = nstat_controls; control; control = control->ncs_next)
3300 {
3301 lck_mtx_lock(&control->mtx);
3302 if ((control->ncs_flags & NSTAT_FLAG_SYSINFO_SUBSCRIBED) != 0)
3303 {
3304 nstat_sysinfo_send_data_internal(control, data);
3305 }
3306 lck_mtx_unlock(&control->mtx);
3307 }
3308 lck_mtx_unlock(&nstat_mtx);
3309 }
3310
3311 static void
3312 nstat_sysinfo_generate_report(void)
3313 {
3314 mbuf_report_peak_usage();
3315 tcp_report_stats();
3316 nstat_ifnet_report_ecn_stats();
3317 }
3318
3319 #pragma mark -- Kernel Control Socket --
3320
3321 static kern_ctl_ref nstat_ctlref = NULL;
3322 static lck_grp_t *nstat_lck_grp = NULL;
3323
3324 static errno_t nstat_control_connect(kern_ctl_ref kctl, struct sockaddr_ctl *sac, void **uinfo);
3325 static errno_t nstat_control_disconnect(kern_ctl_ref kctl, u_int32_t unit, void *uinfo);
3326 static errno_t nstat_control_send(kern_ctl_ref kctl, u_int32_t unit, void *uinfo, mbuf_t m, int flags);
3327
3328 static errno_t
3329 nstat_enqueue_success(
3330 uint64_t context,
3331 nstat_control_state *state,
3332 u_int16_t flags)
3333 {
3334 nstat_msg_hdr success;
3335 errno_t result;
3336
3337 bzero(&success, sizeof(success));
3338 success.context = context;
3339 success.type = NSTAT_MSG_TYPE_SUCCESS;
3340 success.length = sizeof(success);
3341 success.flags = flags;
3342 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &success,
3343 sizeof(success), CTL_DATA_EOR | CTL_DATA_CRIT);
3344 if (result != 0) {
3345 if (nstat_debug != 0)
3346 printf("%s: could not enqueue success message %d\n",
3347 __func__, result);
3348 nstat_stats.nstat_successmsgfailures += 1;
3349 }
3350 return result;
3351 }
3352
3353 static errno_t
3354 nstat_control_send_goodbye(
3355 nstat_control_state *state,
3356 nstat_src *src)
3357 {
3358 errno_t result = 0;
3359 int failed = 0;
3360
3361 if (nstat_control_reporting_allowed(state, src))
3362 {
3363 if ((state->ncs_flags & NSTAT_FLAG_SUPPORTS_UPDATES) != 0)
3364 {
3365 result = nstat_control_send_update(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING, NULL);
3366 if (result != 0)
3367 {
3368 failed = 1;
3369 if (nstat_debug != 0)
3370 printf("%s - nstat_control_send_update() %d\n", __func__, result);
3371 }
3372 }
3373 else
3374 {
3375 // send one last counts notification
3376 result = nstat_control_send_counts(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING, NULL);
3377 if (result != 0)
3378 {
3379 failed = 1;
3380 if (nstat_debug != 0)
3381 printf("%s - nstat_control_send_counts() %d\n", __func__, result);
3382 }
3383
3384 // send a last description
3385 result = nstat_control_send_description(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING);
3386 if (result != 0)
3387 {
3388 failed = 1;
3389 if (nstat_debug != 0)
3390 printf("%s - nstat_control_send_description() %d\n", __func__, result);
3391 }
3392 }
3393 }
3394
3395 // send the source removed notification
3396 result = nstat_control_send_removed(state, src);
3397 if (result != 0 && nstat_debug)
3398 {
3399 failed = 1;
3400 if (nstat_debug != 0)
3401 printf("%s - nstat_control_send_removed() %d\n", __func__, result);
3402 }
3403
3404 if (failed != 0)
3405 nstat_stats.nstat_control_send_goodbye_failures++;
3406
3407
3408 return result;
3409 }
3410
3411 static errno_t
3412 nstat_flush_accumulated_msgs(
3413 nstat_control_state *state)
3414 {
3415 errno_t result = 0;
3416 if (state->ncs_accumulated != NULL && mbuf_len(state->ncs_accumulated) > 0)
3417 {
3418 mbuf_pkthdr_setlen(state->ncs_accumulated, mbuf_len(state->ncs_accumulated));
3419 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, state->ncs_accumulated, CTL_DATA_EOR);
3420 if (result != 0)
3421 {
3422 nstat_stats.nstat_flush_accumulated_msgs_failures++;
3423 if (nstat_debug != 0)
3424 printf("%s - ctl_enqueuembuf failed: %d\n", __func__, result);
3425 mbuf_freem(state->ncs_accumulated);
3426 }
3427 state->ncs_accumulated = NULL;
3428 }
3429 return result;
3430 }
3431
3432 static errno_t
3433 nstat_accumulate_msg(
3434 nstat_control_state *state,
3435 nstat_msg_hdr *hdr,
3436 size_t length)
3437 {
3438 if (state->ncs_accumulated && mbuf_trailingspace(state->ncs_accumulated) < length)
3439 {
3440 // Will send the current mbuf
3441 nstat_flush_accumulated_msgs(state);
3442 }
3443
3444 errno_t result = 0;
3445
3446 if (state->ncs_accumulated == NULL)
3447 {
3448 unsigned int one = 1;
3449 if (mbuf_allocpacket(MBUF_DONTWAIT, NSTAT_MAX_MSG_SIZE, &one, &state->ncs_accumulated) != 0)
3450 {
3451 if (nstat_debug != 0)
3452 printf("%s - mbuf_allocpacket failed\n", __func__);
3453 result = ENOMEM;
3454 }
3455 else
3456 {
3457 mbuf_setlen(state->ncs_accumulated, 0);
3458 }
3459 }
3460
3461 if (result == 0)
3462 {
3463 hdr->length = length;
3464 result = mbuf_copyback(state->ncs_accumulated, mbuf_len(state->ncs_accumulated),
3465 length, hdr, MBUF_DONTWAIT);
3466 }
3467
3468 if (result != 0)
3469 {
3470 nstat_flush_accumulated_msgs(state);
3471 if (nstat_debug != 0)
3472 printf("%s - resorting to ctl_enqueuedata\n", __func__);
3473 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, hdr, length, CTL_DATA_EOR);
3474 }
3475
3476 if (result != 0)
3477 nstat_stats.nstat_accumulate_msg_failures++;
3478
3479 return result;
3480 }
3481
3482 static void*
3483 nstat_idle_check(
3484 __unused thread_call_param_t p0,
3485 __unused thread_call_param_t p1)
3486 {
3487 lck_mtx_lock(&nstat_mtx);
3488
3489 nstat_idle_time = 0;
3490
3491 nstat_control_state *control;
3492 nstat_src *dead = NULL;
3493 nstat_src *dead_list = NULL;
3494 for (control = nstat_controls; control; control = control->ncs_next)
3495 {
3496 lck_mtx_lock(&control->mtx);
3497 nstat_src **srcpp = &control->ncs_srcs;
3498
3499 if (!(control->ncs_flags & NSTAT_FLAG_REQCOUNTS))
3500 {
3501 while(*srcpp != NULL)
3502 {
3503 if ((*srcpp)->provider->nstat_gone((*srcpp)->cookie))
3504 {
3505 errno_t result;
3506
3507 // Pull it off the list
3508 dead = *srcpp;
3509 *srcpp = (*srcpp)->next;
3510
3511 result = nstat_control_send_goodbye(control, dead);
3512
3513 // Put this on the list to release later
3514 dead->next = dead_list;
3515 dead_list = dead;
3516 }
3517 else
3518 {
3519 srcpp = &(*srcpp)->next;
3520 }
3521 }
3522 }
3523 control->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
3524 lck_mtx_unlock(&control->mtx);
3525 }
3526
3527 if (nstat_controls)
3528 {
3529 clock_interval_to_deadline(60, NSEC_PER_SEC, &nstat_idle_time);
3530 thread_call_func_delayed((thread_call_func_t)nstat_idle_check, NULL, nstat_idle_time);
3531 }
3532
3533 lck_mtx_unlock(&nstat_mtx);
3534
3535 /* Generate any system level reports, if needed */
3536 nstat_sysinfo_generate_report();
3537
3538 // Release the sources now that we aren't holding lots of locks
3539 while (dead_list)
3540 {
3541 dead = dead_list;
3542 dead_list = dead->next;
3543
3544 nstat_control_cleanup_source(NULL, dead, FALSE);
3545 }
3546
3547 return NULL;
3548 }
3549
3550 static void
3551 nstat_control_register(void)
3552 {
3553 // Create our lock group first
3554 lck_grp_attr_t *grp_attr = lck_grp_attr_alloc_init();
3555 lck_grp_attr_setdefault(grp_attr);
3556 nstat_lck_grp = lck_grp_alloc_init("network statistics kctl", grp_attr);
3557 lck_grp_attr_free(grp_attr);
3558
3559 lck_mtx_init(&nstat_mtx, nstat_lck_grp, NULL);
3560
3561 // Register the control
3562 struct kern_ctl_reg nstat_control;
3563 bzero(&nstat_control, sizeof(nstat_control));
3564 strlcpy(nstat_control.ctl_name, NET_STAT_CONTROL_NAME, sizeof(nstat_control.ctl_name));
3565 nstat_control.ctl_flags = CTL_FLAG_REG_EXTENDED | CTL_FLAG_REG_CRIT;
3566 nstat_control.ctl_sendsize = nstat_sendspace;
3567 nstat_control.ctl_recvsize = nstat_recvspace;
3568 nstat_control.ctl_connect = nstat_control_connect;
3569 nstat_control.ctl_disconnect = nstat_control_disconnect;
3570 nstat_control.ctl_send = nstat_control_send;
3571
3572 ctl_register(&nstat_control, &nstat_ctlref);
3573 }
3574
3575 static void
3576 nstat_control_cleanup_source(
3577 nstat_control_state *state,
3578 struct nstat_src *src,
3579 boolean_t locked)
3580 {
3581 errno_t result;
3582
3583 if (state)
3584 {
3585 result = nstat_control_send_removed(state, src);
3586 if (result != 0)
3587 {
3588 nstat_stats.nstat_control_cleanup_source_failures++;
3589 if (nstat_debug != 0)
3590 printf("%s - nstat_control_send_removed() %d\n",
3591 __func__, result);
3592 }
3593 }
3594 // Cleanup the source if we found it.
3595 src->provider->nstat_release(src->cookie, locked);
3596 OSFree(src, sizeof(*src), nstat_malloc_tag);
3597 }
3598
3599
3600 static bool
3601 nstat_control_reporting_allowed(
3602 nstat_control_state *state,
3603 nstat_src *src)
3604 {
3605 if (src->provider->nstat_reporting_allowed == NULL)
3606 return TRUE;
3607
3608 return (
3609 src->provider->nstat_reporting_allowed(src->cookie,
3610 &state->ncs_provider_filters[src->provider->nstat_provider_id])
3611 );
3612 }
3613
3614
3615 static errno_t
3616 nstat_control_connect(
3617 kern_ctl_ref kctl,
3618 struct sockaddr_ctl *sac,
3619 void **uinfo)
3620 {
3621 nstat_control_state *state = OSMalloc(sizeof(*state), nstat_malloc_tag);
3622 if (state == NULL) return ENOMEM;
3623
3624 bzero(state, sizeof(*state));
3625 lck_mtx_init(&state->mtx, nstat_lck_grp, NULL);
3626 state->ncs_kctl = kctl;
3627 state->ncs_unit = sac->sc_unit;
3628 state->ncs_flags = NSTAT_FLAG_REQCOUNTS;
3629 *uinfo = state;
3630
3631 lck_mtx_lock(&nstat_mtx);
3632 state->ncs_next = nstat_controls;
3633 nstat_controls = state;
3634
3635 if (nstat_idle_time == 0)
3636 {
3637 clock_interval_to_deadline(60, NSEC_PER_SEC, &nstat_idle_time);
3638 thread_call_func_delayed((thread_call_func_t)nstat_idle_check, NULL, nstat_idle_time);
3639 }
3640
3641 lck_mtx_unlock(&nstat_mtx);
3642
3643 return 0;
3644 }
3645
3646 static errno_t
3647 nstat_control_disconnect(
3648 __unused kern_ctl_ref kctl,
3649 __unused u_int32_t unit,
3650 void *uinfo)
3651 {
3652 u_int32_t watching;
3653 nstat_control_state *state = (nstat_control_state*)uinfo;
3654
3655 // pull it out of the global list of states
3656 lck_mtx_lock(&nstat_mtx);
3657 nstat_control_state **statepp;
3658 for (statepp = &nstat_controls; *statepp; statepp = &(*statepp)->ncs_next)
3659 {
3660 if (*statepp == state)
3661 {
3662 *statepp = state->ncs_next;
3663 break;
3664 }
3665 }
3666 lck_mtx_unlock(&nstat_mtx);
3667
3668 lck_mtx_lock(&state->mtx);
3669 // Stop watching for sources
3670 nstat_provider *provider;
3671 watching = state->ncs_watching;
3672 state->ncs_watching = 0;
3673 for (provider = nstat_providers; provider && watching; provider = provider->next)
3674 {
3675 if ((watching & (1 << provider->nstat_provider_id)) != 0)
3676 {
3677 watching &= ~(1 << provider->nstat_provider_id);
3678 provider->nstat_watcher_remove(state);
3679 }
3680 }
3681
3682 // set cleanup flags
3683 state->ncs_flags |= NSTAT_FLAG_CLEANUP;
3684
3685 if (state->ncs_accumulated)
3686 {
3687 mbuf_freem(state->ncs_accumulated);
3688 state->ncs_accumulated = NULL;
3689 }
3690
3691 // Copy out the list of sources
3692 nstat_src *srcs = state->ncs_srcs;
3693 state->ncs_srcs = NULL;
3694 lck_mtx_unlock(&state->mtx);
3695
3696 while (srcs)
3697 {
3698 nstat_src *src;
3699
3700 // pull it out of the list
3701 src = srcs;
3702 srcs = src->next;
3703
3704 // clean it up
3705 nstat_control_cleanup_source(NULL, src, FALSE);
3706 }
3707 lck_mtx_destroy(&state->mtx, nstat_lck_grp);
3708 OSFree(state, sizeof(*state), nstat_malloc_tag);
3709
3710 return 0;
3711 }
3712
3713 static nstat_src_ref_t
3714 nstat_control_next_src_ref(
3715 nstat_control_state *state)
3716 {
3717 return ++state->ncs_next_srcref;
3718 }
3719
3720 static errno_t
3721 nstat_control_send_counts(
3722 nstat_control_state *state,
3723 nstat_src *src,
3724 unsigned long long context,
3725 u_int16_t hdr_flags,
3726 int *gone)
3727 {
3728 nstat_msg_src_counts counts;
3729 errno_t result = 0;
3730
3731 /* Some providers may not have any counts to send */
3732 if (src->provider->nstat_counts == NULL)
3733 return (0);
3734
3735 bzero(&counts, sizeof(counts));
3736 counts.hdr.type = NSTAT_MSG_TYPE_SRC_COUNTS;
3737 counts.hdr.length = sizeof(counts);
3738 counts.hdr.flags = hdr_flags;
3739 counts.hdr.context = context;
3740 counts.srcref = src->srcref;
3741 counts.event_flags = 0;
3742
3743 if (src->provider->nstat_counts(src->cookie, &counts.counts, gone) == 0)
3744 {
3745 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) &&
3746 counts.counts.nstat_rxbytes == 0 &&
3747 counts.counts.nstat_txbytes == 0)
3748 {
3749 result = EAGAIN;
3750 }
3751 else
3752 {
3753 result = ctl_enqueuedata(state->ncs_kctl,
3754 state->ncs_unit, &counts, sizeof(counts),
3755 CTL_DATA_EOR);
3756 if (result != 0)
3757 nstat_stats.nstat_sendcountfailures += 1;
3758 }
3759 }
3760 return result;
3761 }
3762
3763 static errno_t
3764 nstat_control_append_counts(
3765 nstat_control_state *state,
3766 nstat_src *src,
3767 int *gone)
3768 {
3769 /* Some providers may not have any counts to send */
3770 if (!src->provider->nstat_counts) return 0;
3771
3772 nstat_msg_src_counts counts;
3773 bzero(&counts, sizeof(counts));
3774 counts.hdr.type = NSTAT_MSG_TYPE_SRC_COUNTS;
3775 counts.hdr.length = sizeof(counts);
3776 counts.srcref = src->srcref;
3777 counts.event_flags = 0;
3778
3779 errno_t result = 0;
3780 result = src->provider->nstat_counts(src->cookie, &counts.counts, gone);
3781 if (result != 0)
3782 {
3783 return result;
3784 }
3785
3786 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
3787 counts.counts.nstat_rxbytes == 0 && counts.counts.nstat_txbytes == 0)
3788 {
3789 return EAGAIN;
3790 }
3791
3792 return nstat_accumulate_msg(state, &counts.hdr, counts.hdr.length);
3793 }
3794
3795 static int
3796 nstat_control_send_description(
3797 nstat_control_state *state,
3798 nstat_src *src,
3799 u_int64_t context,
3800 u_int16_t hdr_flags)
3801 {
3802 // Provider doesn't support getting the descriptor? Done.
3803 if (src->provider->nstat_descriptor_length == 0 ||
3804 src->provider->nstat_copy_descriptor == NULL)
3805 {
3806 return EOPNOTSUPP;
3807 }
3808
3809 // Allocate storage for the descriptor message
3810 mbuf_t msg;
3811 unsigned int one = 1;
3812 u_int32_t size = offsetof(nstat_msg_src_description, data) + src->provider->nstat_descriptor_length;
3813 if (mbuf_allocpacket(MBUF_DONTWAIT, size, &one, &msg) != 0)
3814 {
3815 return ENOMEM;
3816 }
3817
3818 nstat_msg_src_description *desc = (nstat_msg_src_description*)mbuf_data(msg);
3819 bzero(desc, size);
3820 mbuf_setlen(msg, size);
3821 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
3822
3823 // Query the provider for the provider specific bits
3824 errno_t result = src->provider->nstat_copy_descriptor(src->cookie, desc->data, src->provider->nstat_descriptor_length);
3825
3826 if (result != 0)
3827 {
3828 mbuf_freem(msg);
3829 return result;
3830 }
3831
3832 desc->hdr.context = context;
3833 desc->hdr.type = NSTAT_MSG_TYPE_SRC_DESC;
3834 desc->hdr.length = size;
3835 desc->hdr.flags = hdr_flags;
3836 desc->srcref = src->srcref;
3837 desc->event_flags = 0;
3838 desc->provider = src->provider->nstat_provider_id;
3839
3840 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg, CTL_DATA_EOR);
3841 if (result != 0)
3842 {
3843 nstat_stats.nstat_descriptionfailures += 1;
3844 mbuf_freem(msg);
3845 }
3846
3847 return result;
3848 }
3849
3850 static errno_t
3851 nstat_control_append_description(
3852 nstat_control_state *state,
3853 nstat_src *src)
3854 {
3855 size_t size = offsetof(nstat_msg_src_description, data) + src->provider->nstat_descriptor_length;
3856 if (size > 512 || src->provider->nstat_descriptor_length == 0 ||
3857 src->provider->nstat_copy_descriptor == NULL)
3858 {
3859 return EOPNOTSUPP;
3860 }
3861
3862 // Fill out a buffer on the stack, we will copy to the mbuf later
3863 u_int64_t buffer[size/sizeof(u_int64_t) + 1]; // u_int64_t to ensure alignment
3864 bzero(buffer, size);
3865
3866 nstat_msg_src_description *desc = (nstat_msg_src_description*)buffer;
3867 desc->hdr.type = NSTAT_MSG_TYPE_SRC_DESC;
3868 desc->hdr.length = size;
3869 desc->srcref = src->srcref;
3870 desc->event_flags = 0;
3871 desc->provider = src->provider->nstat_provider_id;
3872
3873 errno_t result = 0;
3874 // Fill in the description
3875 // Query the provider for the provider specific bits
3876 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
3877 src->provider->nstat_descriptor_length);
3878 if (result != 0)
3879 {
3880 return result;
3881 }
3882
3883 return nstat_accumulate_msg(state, &desc->hdr, size);
3884 }
3885
3886 static int
3887 nstat_control_send_update(
3888 nstat_control_state *state,
3889 nstat_src *src,
3890 u_int64_t context,
3891 u_int16_t hdr_flags,
3892 int *gone)
3893 {
3894 // Provider doesn't support getting the descriptor or counts? Done.
3895 if ((src->provider->nstat_descriptor_length == 0 ||
3896 src->provider->nstat_copy_descriptor == NULL) &&
3897 src->provider->nstat_counts == NULL)
3898 {
3899 return EOPNOTSUPP;
3900 }
3901
3902 // Allocate storage for the descriptor message
3903 mbuf_t msg;
3904 unsigned int one = 1;
3905 u_int32_t size = offsetof(nstat_msg_src_update, data) +
3906 src->provider->nstat_descriptor_length;
3907 if (mbuf_allocpacket(MBUF_DONTWAIT, size, &one, &msg) != 0)
3908 {
3909 return ENOMEM;
3910 }
3911
3912 nstat_msg_src_update *desc = (nstat_msg_src_update*)mbuf_data(msg);
3913 bzero(desc, size);
3914 desc->hdr.context = context;
3915 desc->hdr.type = NSTAT_MSG_TYPE_SRC_UPDATE;
3916 desc->hdr.length = size;
3917 desc->hdr.flags = hdr_flags;
3918 desc->srcref = src->srcref;
3919 desc->event_flags = 0;
3920 desc->provider = src->provider->nstat_provider_id;
3921
3922 mbuf_setlen(msg, size);
3923 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
3924
3925 errno_t result = 0;
3926 if (src->provider->nstat_descriptor_length != 0 && src->provider->nstat_copy_descriptor)
3927 {
3928 // Query the provider for the provider specific bits
3929 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
3930 src->provider->nstat_descriptor_length);
3931 if (result != 0)
3932 {
3933 mbuf_freem(msg);
3934 return result;
3935 }
3936 }
3937
3938 if (src->provider->nstat_counts)
3939 {
3940 result = src->provider->nstat_counts(src->cookie, &desc->counts, gone);
3941 if (result == 0)
3942 {
3943 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
3944 desc->counts.nstat_rxbytes == 0 && desc->counts.nstat_txbytes == 0)
3945 {
3946 result = EAGAIN;
3947 }
3948 else
3949 {
3950 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg, CTL_DATA_EOR);
3951 }
3952 }
3953 }
3954
3955 if (result != 0)
3956 {
3957 nstat_stats.nstat_srcupatefailures += 1;
3958 mbuf_freem(msg);
3959 }
3960
3961 return result;
3962 }
3963
3964 static errno_t
3965 nstat_control_append_update(
3966 nstat_control_state *state,
3967 nstat_src *src,
3968 int *gone)
3969 {
3970 size_t size = offsetof(nstat_msg_src_update, data) + src->provider->nstat_descriptor_length;
3971 if (size > 512 || ((src->provider->nstat_descriptor_length == 0 ||
3972 src->provider->nstat_copy_descriptor == NULL) &&
3973 src->provider->nstat_counts == NULL))
3974 {
3975 return EOPNOTSUPP;
3976 }
3977
3978 // Fill out a buffer on the stack, we will copy to the mbuf later
3979 u_int64_t buffer[size/sizeof(u_int64_t) + 1]; // u_int64_t to ensure alignment
3980 bzero(buffer, size);
3981
3982 nstat_msg_src_update *desc = (nstat_msg_src_update*)buffer;
3983 desc->hdr.type = NSTAT_MSG_TYPE_SRC_UPDATE;
3984 desc->hdr.length = size;
3985 desc->srcref = src->srcref;
3986 desc->event_flags = 0;
3987 desc->provider = src->provider->nstat_provider_id;
3988
3989 errno_t result = 0;
3990 // Fill in the description
3991 if (src->provider->nstat_descriptor_length != 0 && src->provider->nstat_copy_descriptor)
3992 {
3993 // Query the provider for the provider specific bits
3994 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
3995 src->provider->nstat_descriptor_length);
3996 if (result != 0)
3997 {
3998 nstat_stats.nstat_copy_descriptor_failures++;
3999 if (nstat_debug != 0)
4000 printf("%s: src->provider->nstat_copy_descriptor: %d\n", __func__, result);
4001 return result;
4002 }
4003 }
4004
4005 if (src->provider->nstat_counts)
4006 {
4007 result = src->provider->nstat_counts(src->cookie, &desc->counts, gone);
4008 if (result != 0)
4009 {
4010 nstat_stats.nstat_provider_counts_failures++;
4011 if (nstat_debug != 0)
4012 printf("%s: src->provider->nstat_counts: %d\n", __func__, result);
4013 return result;
4014 }
4015
4016 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
4017 desc->counts.nstat_rxbytes == 0 && desc->counts.nstat_txbytes == 0)
4018 {
4019 return EAGAIN;
4020 }
4021 }
4022
4023 return nstat_accumulate_msg(state, &desc->hdr, size);
4024 }
4025
4026 static errno_t
4027 nstat_control_send_removed(
4028 nstat_control_state *state,
4029 nstat_src *src)
4030 {
4031 nstat_msg_src_removed removed;
4032 errno_t result;
4033
4034 bzero(&removed, sizeof(removed));
4035 removed.hdr.type = NSTAT_MSG_TYPE_SRC_REMOVED;
4036 removed.hdr.length = sizeof(removed);
4037 removed.hdr.context = 0;
4038 removed.srcref = src->srcref;
4039 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &removed,
4040 sizeof(removed), CTL_DATA_EOR | CTL_DATA_CRIT);
4041 if (result != 0)
4042 nstat_stats.nstat_msgremovedfailures += 1;
4043
4044 return result;
4045 }
4046
4047 static errno_t
4048 nstat_control_handle_add_request(
4049 nstat_control_state *state,
4050 mbuf_t m)
4051 {
4052 errno_t result;
4053
4054 // Verify the header fits in the first mbuf
4055 if (mbuf_len(m) < offsetof(nstat_msg_add_src_req, param))
4056 {
4057 return EINVAL;
4058 }
4059
4060 // Calculate the length of the parameter field
4061 int32_t paramlength = mbuf_pkthdr_len(m) - offsetof(nstat_msg_add_src_req, param);
4062 if (paramlength < 0 || paramlength > 2 * 1024)
4063 {
4064 return EINVAL;
4065 }
4066
4067 nstat_provider *provider;
4068 nstat_provider_cookie_t cookie;
4069 nstat_msg_add_src_req *req = mbuf_data(m);
4070 if (mbuf_pkthdr_len(m) > mbuf_len(m))
4071 {
4072 // parameter is too large, we need to make a contiguous copy
4073 void *data = OSMalloc(paramlength, nstat_malloc_tag);
4074
4075 if (!data) return ENOMEM;
4076 result = mbuf_copydata(m, offsetof(nstat_msg_add_src_req, param), paramlength, data);
4077 if (result == 0)
4078 result = nstat_lookup_entry(req->provider, data, paramlength, &provider, &cookie);
4079 OSFree(data, paramlength, nstat_malloc_tag);
4080 }
4081 else
4082 {
4083 result = nstat_lookup_entry(req->provider, (void*)&req->param, paramlength, &provider, &cookie);
4084 }
4085
4086 if (result != 0)
4087 {
4088 return result;
4089 }
4090
4091 result = nstat_control_source_add(req->hdr.context, state, provider, cookie);
4092 if (result != 0)
4093 provider->nstat_release(cookie, 0);
4094
4095 return result;
4096 }
4097
4098 static errno_t
4099 nstat_control_handle_add_all(
4100 nstat_control_state *state,
4101 mbuf_t m)
4102 {
4103 errno_t result = 0;
4104
4105 // Verify the header fits in the first mbuf
4106 if (mbuf_len(m) < sizeof(nstat_msg_add_all_srcs))
4107 {
4108 return EINVAL;
4109 }
4110
4111
4112 nstat_msg_add_all_srcs *req = mbuf_data(m);
4113 if (req->provider > NSTAT_PROVIDER_LAST) return ENOENT;
4114
4115 nstat_provider *provider = nstat_find_provider_by_id(req->provider);
4116
4117 if (!provider) return ENOENT;
4118 if (provider->nstat_watcher_add == NULL) return ENOTSUP;
4119
4120 if (nstat_privcheck != 0) {
4121 result = priv_check_cred(kauth_cred_get(),
4122 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
4123 if (result != 0)
4124 return result;
4125 }
4126
4127 // Make sure we don't add the provider twice
4128 lck_mtx_lock(&state->mtx);
4129 if ((state->ncs_watching & (1 << provider->nstat_provider_id)) != 0)
4130 result = EALREADY;
4131 state->ncs_watching |= (1 << provider->nstat_provider_id);
4132 lck_mtx_unlock(&state->mtx);
4133 if (result != 0) return result;
4134
4135 state->ncs_provider_filters[req->provider].npf_flags = req->filter;
4136 state->ncs_provider_filters[req->provider].npf_events = req->events;
4137 state->ncs_provider_filters[req->provider].npf_pid = req->target_pid;
4138 memcpy(state->ncs_provider_filters[req->provider].npf_uuid, req->target_uuid,
4139 sizeof(state->ncs_provider_filters[req->provider].npf_uuid));
4140
4141 result = provider->nstat_watcher_add(state);
4142 if (result != 0)
4143 {
4144 state->ncs_provider_filters[req->provider].npf_flags = 0;
4145 state->ncs_provider_filters[req->provider].npf_events = 0;
4146 state->ncs_provider_filters[req->provider].npf_pid = 0;
4147 bzero(state->ncs_provider_filters[req->provider].npf_uuid,
4148 sizeof(state->ncs_provider_filters[req->provider].npf_uuid));
4149
4150 lck_mtx_lock(&state->mtx);
4151 state->ncs_watching &= ~(1 << provider->nstat_provider_id);
4152 lck_mtx_unlock(&state->mtx);
4153 }
4154 if (result == 0)
4155 nstat_enqueue_success(req->hdr.context, state, 0);
4156
4157 return result;
4158 }
4159
4160 static errno_t
4161 nstat_control_source_add(
4162 u_int64_t context,
4163 nstat_control_state *state,
4164 nstat_provider *provider,
4165 nstat_provider_cookie_t cookie)
4166 {
4167 // Fill out source added message if appropriate
4168 mbuf_t msg = NULL;
4169 nstat_src_ref_t *srcrefp = NULL;
4170
4171 u_int64_t provider_filter_flagss =
4172 state->ncs_provider_filters[provider->nstat_provider_id].npf_flags;
4173 boolean_t tell_user =
4174 ((provider_filter_flagss & NSTAT_FILTER_SUPPRESS_SRC_ADDED) == 0);
4175 u_int32_t src_filter =
4176 (provider_filter_flagss & NSTAT_FILTER_PROVIDER_NOZEROBYTES)
4177 ? NSTAT_FILTER_NOZEROBYTES : 0;
4178
4179 if (tell_user)
4180 {
4181 unsigned int one = 1;
4182
4183 if (mbuf_allocpacket(MBUF_DONTWAIT, sizeof(nstat_msg_src_added),
4184 &one, &msg) != 0)
4185 return ENOMEM;
4186
4187 mbuf_setlen(msg, sizeof(nstat_msg_src_added));
4188 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
4189 nstat_msg_src_added *add = mbuf_data(msg);
4190 bzero(add, sizeof(*add));
4191 add->hdr.type = NSTAT_MSG_TYPE_SRC_ADDED;
4192 add->hdr.length = mbuf_len(msg);
4193 add->hdr.context = context;
4194 add->provider = provider->nstat_provider_id;
4195 srcrefp = &add->srcref;
4196 }
4197
4198 // Allocate storage for the source
4199 nstat_src *src = OSMalloc(sizeof(*src), nstat_malloc_tag);
4200 if (src == NULL)
4201 {
4202 if (msg) mbuf_freem(msg);
4203 return ENOMEM;
4204 }
4205
4206 // Fill in the source, including picking an unused source ref
4207 lck_mtx_lock(&state->mtx);
4208
4209 src->srcref = nstat_control_next_src_ref(state);
4210 if (srcrefp)
4211 *srcrefp = src->srcref;
4212
4213 if (state->ncs_flags & NSTAT_FLAG_CLEANUP || src->srcref == NSTAT_SRC_REF_INVALID)
4214 {
4215 lck_mtx_unlock(&state->mtx);
4216 OSFree(src, sizeof(*src), nstat_malloc_tag);
4217 if (msg) mbuf_freem(msg);
4218 return EINVAL;
4219 }
4220 src->provider = provider;
4221 src->cookie = cookie;
4222 src->filter = src_filter;
4223
4224 if (msg)
4225 {
4226 // send the source added message if appropriate
4227 errno_t result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg,
4228 CTL_DATA_EOR);
4229 if (result != 0)
4230 {
4231 nstat_stats.nstat_srcaddedfailures += 1;
4232 lck_mtx_unlock(&state->mtx);
4233 OSFree(src, sizeof(*src), nstat_malloc_tag);
4234 mbuf_freem(msg);
4235 return result;
4236 }
4237 }
4238 // Put the source in the list
4239 src->next = state->ncs_srcs;
4240 state->ncs_srcs = src;
4241
4242 lck_mtx_unlock(&state->mtx);
4243
4244 return 0;
4245 }
4246
4247 static errno_t
4248 nstat_control_handle_remove_request(
4249 nstat_control_state *state,
4250 mbuf_t m)
4251 {
4252 nstat_src_ref_t srcref = NSTAT_SRC_REF_INVALID;
4253
4254 if (mbuf_copydata(m, offsetof(nstat_msg_rem_src_req, srcref), sizeof(srcref), &srcref) != 0)
4255 {
4256 return EINVAL;
4257 }
4258
4259 lck_mtx_lock(&state->mtx);
4260
4261 // Remove this source as we look for it
4262 nstat_src **nextp;
4263 nstat_src *src = NULL;
4264 for (nextp = &state->ncs_srcs; *nextp; nextp = &(*nextp)->next)
4265 {
4266 if ((*nextp)->srcref == srcref)
4267 {
4268 src = *nextp;
4269 *nextp = src->next;
4270 break;
4271 }
4272 }
4273
4274 lck_mtx_unlock(&state->mtx);
4275
4276 if (src) nstat_control_cleanup_source(state, src, FALSE);
4277
4278 return src ? 0 : ENOENT;
4279 }
4280
4281 static errno_t
4282 nstat_control_handle_query_request(
4283 nstat_control_state *state,
4284 mbuf_t m)
4285 {
4286 // TBD: handle this from another thread so we can enqueue a lot of data
4287 // As written, if a client requests query all, this function will be
4288 // called from their send of the request message. We will attempt to write
4289 // responses and succeed until the buffer fills up. Since the clients thread
4290 // is blocked on send, it won't be reading unless the client has two threads
4291 // using this socket, one for read and one for write. Two threads probably
4292 // won't work with this code anyhow since we don't have proper locking in
4293 // place yet.
4294 nstat_src *dead_srcs = NULL;
4295 errno_t result = ENOENT;
4296 nstat_msg_query_src_req req;
4297
4298 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0)
4299 {
4300 return EINVAL;
4301 }
4302
4303 const boolean_t all_srcs = (req.srcref == NSTAT_SRC_REF_ALL);
4304
4305 lck_mtx_lock(&state->mtx);
4306
4307 if (all_srcs)
4308 {
4309 state->ncs_flags |= NSTAT_FLAG_REQCOUNTS;
4310 }
4311 nstat_src **srcpp = &state->ncs_srcs;
4312 u_int64_t src_count = 0;
4313 boolean_t partial = FALSE;
4314
4315 /*
4316 * Error handling policy and sequence number generation is folded into
4317 * nstat_control_begin_query.
4318 */
4319 partial = nstat_control_begin_query(state, &req.hdr);
4320
4321 while (*srcpp != NULL
4322 && (!partial || src_count < QUERY_CONTINUATION_SRC_COUNT))
4323 {
4324 nstat_src *src = NULL;
4325 int gone;
4326
4327 src = *srcpp;
4328 gone = 0;
4329 // XXX ignore IFACE types?
4330 if (all_srcs || src->srcref == req.srcref)
4331 {
4332 if (nstat_control_reporting_allowed(state, src)
4333 && (!partial || !all_srcs || src->seq != state->ncs_seq))
4334 {
4335 if (all_srcs &&
4336 (req.hdr.flags & NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE) != 0)
4337 {
4338 result = nstat_control_append_counts(state, src, &gone);
4339 }
4340 else
4341 {
4342 result = nstat_control_send_counts(state, src, req.hdr.context, 0, &gone);
4343 }
4344
4345 if (ENOMEM == result || ENOBUFS == result)
4346 {
4347 /*
4348 * If the counts message failed to
4349 * enqueue then we should clear our flag so
4350 * that a client doesn't miss anything on
4351 * idle cleanup. We skip the "gone"
4352 * processing in the hope that we may
4353 * catch it another time.
4354 */
4355 state->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
4356 break;
4357 }
4358 if (partial)
4359 {
4360 /*
4361 * We skip over hard errors and
4362 * filtered sources.
4363 */
4364 src->seq = state->ncs_seq;
4365 src_count++;
4366 }
4367 }
4368 }
4369
4370 if (gone)
4371 {
4372 // send one last descriptor message so client may see last state
4373 // If we can't send the notification now, it
4374 // will be sent in the idle cleanup.
4375 result = nstat_control_send_description(state, *srcpp, 0, 0);
4376 if (result != 0)
4377 {
4378 nstat_stats.nstat_control_send_description_failures++;
4379 if (nstat_debug != 0)
4380 printf("%s - nstat_control_send_description() %d\n", __func__, result);
4381 state->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
4382 break;
4383 }
4384
4385 // pull src out of the list
4386 *srcpp = src->next;
4387
4388 src->next = dead_srcs;
4389 dead_srcs = src;
4390 }
4391 else
4392 {
4393 srcpp = &(*srcpp)->next;
4394 }
4395
4396 if (!all_srcs && req.srcref == src->srcref)
4397 {
4398 break;
4399 }
4400 }
4401 nstat_flush_accumulated_msgs(state);
4402
4403 u_int16_t flags = 0;
4404 if (req.srcref == NSTAT_SRC_REF_ALL)
4405 flags = nstat_control_end_query(state, *srcpp, partial);
4406
4407 lck_mtx_unlock(&state->mtx);
4408
4409 /*
4410 * If an error occurred enqueueing data, then allow the error to
4411 * propagate to nstat_control_send. This way, the error is sent to
4412 * user-level.
4413 */
4414 if (all_srcs && ENOMEM != result && ENOBUFS != result)
4415 {
4416 nstat_enqueue_success(req.hdr.context, state, flags);
4417 result = 0;
4418 }
4419
4420 while (dead_srcs)
4421 {
4422 nstat_src *src;
4423
4424 src = dead_srcs;
4425 dead_srcs = src->next;
4426
4427 // release src and send notification
4428 nstat_control_cleanup_source(state, src, FALSE);
4429 }
4430
4431 return result;
4432 }
4433
4434 static errno_t
4435 nstat_control_handle_get_src_description(
4436 nstat_control_state *state,
4437 mbuf_t m)
4438 {
4439 nstat_msg_get_src_description req;
4440 errno_t result = ENOENT;
4441 nstat_src *src;
4442
4443 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0)
4444 {
4445 return EINVAL;
4446 }
4447
4448 lck_mtx_lock(&state->mtx);
4449 u_int64_t src_count = 0;
4450 boolean_t partial = FALSE;
4451 const boolean_t all_srcs = (req.srcref == NSTAT_SRC_REF_ALL);
4452
4453 /*
4454 * Error handling policy and sequence number generation is folded into
4455 * nstat_control_begin_query.
4456 */
4457 partial = nstat_control_begin_query(state, &req.hdr);
4458
4459 for (src = state->ncs_srcs;
4460 src && (!partial || src_count < QUERY_CONTINUATION_SRC_COUNT);
4461 src = src->next)
4462 {
4463 if (all_srcs || src->srcref == req.srcref)
4464 {
4465 if (nstat_control_reporting_allowed(state, src)
4466 && (!all_srcs || !partial || src->seq != state->ncs_seq))
4467 {
4468 if ((req.hdr.flags & NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE) != 0 && all_srcs)
4469 {
4470 result = nstat_control_append_description(state, src);
4471 }
4472 else
4473 {
4474 result = nstat_control_send_description(state, src, req.hdr.context, 0);
4475 }
4476
4477 if (ENOMEM == result || ENOBUFS == result)
4478 {
4479 /*
4480 * If the description message failed to
4481 * enqueue then we give up for now.
4482 */
4483 break;
4484 }
4485 if (partial)
4486 {
4487 /*
4488 * Note, we skip over hard errors and
4489 * filtered sources.
4490 */
4491 src->seq = state->ncs_seq;
4492 src_count++;
4493 }
4494 }
4495
4496 if (!all_srcs)
4497 {
4498 break;
4499 }
4500 }
4501 }
4502 nstat_flush_accumulated_msgs(state);
4503
4504 u_int16_t flags = 0;
4505 if (req.srcref == NSTAT_SRC_REF_ALL)
4506 flags = nstat_control_end_query(state, src, partial);
4507
4508 lck_mtx_unlock(&state->mtx);
4509 /*
4510 * If an error occurred enqueueing data, then allow the error to
4511 * propagate to nstat_control_send. This way, the error is sent to
4512 * user-level.
4513 */
4514 if (all_srcs && ENOMEM != result && ENOBUFS != result)
4515 {
4516 nstat_enqueue_success(req.hdr.context, state, flags);
4517 result = 0;
4518 }
4519
4520 return result;
4521 }
4522
4523 static errno_t
4524 nstat_control_handle_set_filter(
4525 nstat_control_state *state,
4526 mbuf_t m)
4527 {
4528 nstat_msg_set_filter req;
4529 nstat_src *src;
4530
4531 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0)
4532 return EINVAL;
4533 if (req.srcref == NSTAT_SRC_REF_ALL ||
4534 req.srcref == NSTAT_SRC_REF_INVALID)
4535 return EINVAL;
4536
4537 lck_mtx_lock(&state->mtx);
4538 for (src = state->ncs_srcs; src; src = src->next)
4539 if (req.srcref == src->srcref)
4540 {
4541 src->filter = req.filter;
4542 break;
4543 }
4544 lck_mtx_unlock(&state->mtx);
4545 if (src == NULL)
4546 return ENOENT;
4547
4548 return 0;
4549 }
4550
4551 static void
4552 nstat_send_error(
4553 nstat_control_state *state,
4554 u_int64_t context,
4555 u_int32_t error)
4556 {
4557 errno_t result;
4558 struct nstat_msg_error err;
4559
4560 bzero(&err, sizeof(err));
4561 err.hdr.type = NSTAT_MSG_TYPE_ERROR;
4562 err.hdr.length = sizeof(err);
4563 err.hdr.context = context;
4564 err.error = error;
4565
4566 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &err,
4567 sizeof(err), CTL_DATA_EOR | CTL_DATA_CRIT);
4568 if (result != 0)
4569 nstat_stats.nstat_msgerrorfailures++;
4570 }
4571
4572 static boolean_t
4573 nstat_control_begin_query(
4574 nstat_control_state *state,
4575 const nstat_msg_hdr *hdrp)
4576 {
4577 boolean_t partial = FALSE;
4578
4579 if (hdrp->flags & NSTAT_MSG_HDR_FLAG_CONTINUATION)
4580 {
4581 /* A partial query all has been requested. */
4582 partial = TRUE;
4583
4584 if (state->ncs_context != hdrp->context)
4585 {
4586 if (state->ncs_context != 0)
4587 nstat_send_error(state, state->ncs_context, EAGAIN);
4588
4589 /* Initialize state for a partial query all. */
4590 state->ncs_context = hdrp->context;
4591 state->ncs_seq++;
4592 }
4593 }
4594 else if (state->ncs_context != 0)
4595 {
4596 /*
4597 * A continuation of a paced-query was in progress. Send that
4598 * context an error and reset the state. If the same context
4599 * has changed its mind, just send the full query results.
4600 */
4601 if (state->ncs_context != hdrp->context)
4602 nstat_send_error(state, state->ncs_context, EAGAIN);
4603 }
4604
4605 return partial;
4606 }
4607
4608 static u_int16_t
4609 nstat_control_end_query(
4610 nstat_control_state *state,
4611 nstat_src *last_src,
4612 boolean_t partial)
4613 {
4614 u_int16_t flags = 0;
4615
4616 if (last_src == NULL || !partial)
4617 {
4618 /*
4619 * We iterated through the entire srcs list or exited early
4620 * from the loop when a partial update was not requested (an
4621 * error occurred), so clear context to indicate internally
4622 * that the query is finished.
4623 */
4624 state->ncs_context = 0;
4625 }
4626 else
4627 {
4628 /*
4629 * Indicate to userlevel to make another partial request as
4630 * there are still sources left to be reported.
4631 */
4632 flags |= NSTAT_MSG_HDR_FLAG_CONTINUATION;
4633 }
4634
4635 return flags;
4636 }
4637
4638 static errno_t
4639 nstat_control_handle_get_update(
4640 nstat_control_state *state,
4641 mbuf_t m)
4642 {
4643 nstat_msg_query_src_req req;
4644
4645 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0)
4646 {
4647 return EINVAL;
4648 }
4649
4650 lck_mtx_lock(&state->mtx);
4651
4652 state->ncs_flags |= NSTAT_FLAG_SUPPORTS_UPDATES;
4653
4654 errno_t result = ENOENT;
4655 nstat_src *src;
4656 nstat_src *dead_srcs = NULL;
4657 nstat_src **srcpp = &state->ncs_srcs;
4658 u_int64_t src_count = 0;
4659 boolean_t partial = FALSE;
4660
4661 /*
4662 * Error handling policy and sequence number generation is folded into
4663 * nstat_control_begin_query.
4664 */
4665 partial = nstat_control_begin_query(state, &req.hdr);
4666
4667 while (*srcpp != NULL
4668 && (FALSE == partial
4669 || src_count < QUERY_CONTINUATION_SRC_COUNT))
4670 {
4671 int gone;
4672
4673 gone = 0;
4674 src = *srcpp;
4675 if (nstat_control_reporting_allowed(state, src))
4676 {
4677 /* skip this source if it has the current state
4678 * sequence number as it's already been reported in
4679 * this query-all partial sequence. */
4680 if (req.srcref == NSTAT_SRC_REF_ALL
4681 && (FALSE == partial || src->seq != state->ncs_seq))
4682 {
4683 result = nstat_control_append_update(state, src, &gone);
4684 if (ENOMEM == result || ENOBUFS == result)
4685 {
4686 /*
4687 * If the update message failed to
4688 * enqueue then give up.
4689 */
4690 break;
4691 }
4692 if (partial)
4693 {
4694 /*
4695 * We skip over hard errors and
4696 * filtered sources.
4697 */
4698 src->seq = state->ncs_seq;
4699 src_count++;
4700 }
4701 }
4702 else if (src->srcref == req.srcref)
4703 {
4704 result = nstat_control_send_update(state, src, req.hdr.context, 0, &gone);
4705 }
4706 }
4707
4708 if (gone)
4709 {
4710 // pull src out of the list
4711 *srcpp = src->next;
4712
4713 src->next = dead_srcs;
4714 dead_srcs = src;
4715 }
4716 else
4717 {
4718 srcpp = &(*srcpp)->next;
4719 }
4720
4721 if (req.srcref != NSTAT_SRC_REF_ALL && req.srcref == src->srcref)
4722 {
4723 break;
4724 }
4725 }
4726
4727 nstat_flush_accumulated_msgs(state);
4728
4729
4730 u_int16_t flags = 0;
4731 if (req.srcref == NSTAT_SRC_REF_ALL)
4732 flags = nstat_control_end_query(state, *srcpp, partial);
4733
4734 lck_mtx_unlock(&state->mtx);
4735 /*
4736 * If an error occurred enqueueing data, then allow the error to
4737 * propagate to nstat_control_send. This way, the error is sent to
4738 * user-level.
4739 */
4740 if (req.srcref == NSTAT_SRC_REF_ALL && ENOMEM != result && ENOBUFS != result)
4741 {
4742 nstat_enqueue_success(req.hdr.context, state, flags);
4743 result = 0;
4744 }
4745
4746 while (dead_srcs)
4747 {
4748 src = dead_srcs;
4749 dead_srcs = src->next;
4750
4751 // release src and send notification
4752 nstat_control_cleanup_source(state, src, FALSE);
4753 }
4754
4755 return result;
4756 }
4757
4758 static errno_t
4759 nstat_control_handle_subscribe_sysinfo(
4760 nstat_control_state *state)
4761 {
4762 errno_t result = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
4763
4764 if (result != 0)
4765 {
4766 return result;
4767 }
4768
4769 lck_mtx_lock(&state->mtx);
4770 state->ncs_flags |= NSTAT_FLAG_SYSINFO_SUBSCRIBED;
4771 lck_mtx_unlock(&state->mtx);
4772
4773 return 0;
4774 }
4775
4776 static errno_t
4777 nstat_control_send(
4778 kern_ctl_ref kctl,
4779 u_int32_t unit,
4780 void *uinfo,
4781 mbuf_t m,
4782 __unused int flags)
4783 {
4784 nstat_control_state *state = (nstat_control_state*)uinfo;
4785 struct nstat_msg_hdr *hdr;
4786 struct nstat_msg_hdr storage;
4787 errno_t result = 0;
4788
4789 if (mbuf_pkthdr_len(m) < sizeof(*hdr))
4790 {
4791 // Is this the right thing to do?
4792 mbuf_freem(m);
4793 return EINVAL;
4794 }
4795
4796 if (mbuf_len(m) >= sizeof(*hdr))
4797 {
4798 hdr = mbuf_data(m);
4799 }
4800 else
4801 {
4802 mbuf_copydata(m, 0, sizeof(storage), &storage);
4803 hdr = &storage;
4804 }
4805
4806 // Legacy clients may not set the length
4807 // Those clients are likely not setting the flags either
4808 // Fix everything up so old clients continue to work
4809 if (hdr->length != mbuf_pkthdr_len(m))
4810 {
4811 hdr->flags = 0;
4812 hdr->length = mbuf_pkthdr_len(m);
4813 if (hdr == &storage)
4814 {
4815 mbuf_copyback(m, 0, sizeof(*hdr), hdr, MBUF_DONTWAIT);
4816 }
4817 }
4818
4819 switch (hdr->type)
4820 {
4821 case NSTAT_MSG_TYPE_ADD_SRC:
4822 result = nstat_control_handle_add_request(state, m);
4823 break;
4824
4825 case NSTAT_MSG_TYPE_ADD_ALL_SRCS:
4826 result = nstat_control_handle_add_all(state, m);
4827 break;
4828
4829 case NSTAT_MSG_TYPE_REM_SRC:
4830 result = nstat_control_handle_remove_request(state, m);
4831 break;
4832
4833 case NSTAT_MSG_TYPE_QUERY_SRC:
4834 result = nstat_control_handle_query_request(state, m);
4835 break;
4836
4837 case NSTAT_MSG_TYPE_GET_SRC_DESC:
4838 result = nstat_control_handle_get_src_description(state, m);
4839 break;
4840
4841 case NSTAT_MSG_TYPE_SET_FILTER:
4842 result = nstat_control_handle_set_filter(state, m);
4843 break;
4844
4845 case NSTAT_MSG_TYPE_GET_UPDATE:
4846 result = nstat_control_handle_get_update(state, m);
4847 break;
4848
4849 case NSTAT_MSG_TYPE_SUBSCRIBE_SYSINFO:
4850 result = nstat_control_handle_subscribe_sysinfo(state);
4851 break;
4852
4853 default:
4854 result = EINVAL;
4855 break;
4856 }
4857
4858 if (result != 0)
4859 {
4860 struct nstat_msg_error err;
4861
4862 bzero(&err, sizeof(err));
4863 err.hdr.type = NSTAT_MSG_TYPE_ERROR;
4864 err.hdr.length = sizeof(err) + mbuf_pkthdr_len(m);
4865 err.hdr.context = hdr->context;
4866 err.error = result;
4867
4868 if (mbuf_prepend(&m, sizeof(err), MBUF_DONTWAIT) == 0 &&
4869 mbuf_copyback(m, 0, sizeof(err), &err, MBUF_DONTWAIT) == 0)
4870 {
4871 result = ctl_enqueuembuf(kctl, unit, m, CTL_DATA_EOR | CTL_DATA_CRIT);
4872 if (result != 0)
4873 {
4874 mbuf_freem(m);
4875 }
4876 m = NULL;
4877 }
4878
4879 if (result != 0)
4880 {
4881 // Unable to prepend the error to the request - just send the error
4882 err.hdr.length = sizeof(err);
4883 result = ctl_enqueuedata(kctl, unit, &err, sizeof(err),
4884 CTL_DATA_EOR | CTL_DATA_CRIT);
4885 if (result != 0)
4886 nstat_stats.nstat_msgerrorfailures += 1;
4887 }
4888 nstat_stats.nstat_handle_msg_failures += 1;
4889 }
4890
4891 if (m) mbuf_freem(m);
4892
4893 return result;
4894 }