]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/ntstat.c
xnu-6153.11.26.tar.gz
[apple/xnu.git] / bsd / net / ntstat.c
1 /*
2 * Copyright (c) 2010-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/param.h>
30 #include <sys/types.h>
31 #include <sys/kpi_mbuf.h>
32 #include <sys/socket.h>
33 #include <sys/kern_control.h>
34 #include <sys/mcache.h>
35 #include <sys/socketvar.h>
36 #include <sys/sysctl.h>
37 #include <sys/queue.h>
38 #include <sys/priv.h>
39 #include <sys/protosw.h>
40
41 #include <kern/clock.h>
42 #include <kern/debug.h>
43
44 #include <libkern/libkern.h>
45 #include <libkern/OSMalloc.h>
46 #include <libkern/OSAtomic.h>
47 #include <libkern/locks.h>
48
49 #include <net/if.h>
50 #include <net/if_var.h>
51 #include <net/if_types.h>
52 #include <net/route.h>
53
54 // These includes appear in ntstat.h but we include them here first so they won't trigger
55 // any clang diagnostic errors.
56 #include <netinet/in.h>
57 #include <netinet/in_stat.h>
58 #include <netinet/tcp.h>
59
60 #pragma clang diagnostic push
61 #pragma clang diagnostic error "-Wpadded"
62 #pragma clang diagnostic error "-Wpacked"
63 // This header defines structures shared with user space, so we need to ensure there is
64 // no compiler inserted padding in case the user space process isn't using the same
65 // architecture as the kernel (example: i386 process with x86_64 kernel).
66 #include <net/ntstat.h>
67 #pragma clang diagnostic pop
68
69 #include <netinet/ip_var.h>
70 #include <netinet/in_pcb.h>
71 #include <netinet/in_var.h>
72 #include <netinet/tcp_var.h>
73 #include <netinet/tcp_fsm.h>
74 #include <netinet/tcp_cc.h>
75 #include <netinet/udp.h>
76 #include <netinet/udp_var.h>
77 #include <netinet6/in6_pcb.h>
78 #include <netinet6/in6_var.h>
79
80 __private_extern__ int nstat_collect = 1;
81
82 #if (DEBUG || DEVELOPMENT)
83 SYSCTL_INT(_net, OID_AUTO, statistics, CTLFLAG_RW | CTLFLAG_LOCKED,
84 &nstat_collect, 0, "Collect detailed statistics");
85 #endif /* (DEBUG || DEVELOPMENT) */
86
87 #if CONFIG_EMBEDDED
88 static int nstat_privcheck = 1;
89 #else
90 static int nstat_privcheck = 0;
91 #endif
92 SYSCTL_INT(_net, OID_AUTO, statistics_privcheck, CTLFLAG_RW | CTLFLAG_LOCKED,
93 &nstat_privcheck, 0, "Entitlement check");
94
95 SYSCTL_NODE(_net, OID_AUTO, stats,
96 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "network statistics");
97
98 static int nstat_debug = 0;
99 SYSCTL_INT(_net_stats, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED,
100 &nstat_debug, 0, "");
101
102 static int nstat_sendspace = 2048;
103 SYSCTL_INT(_net_stats, OID_AUTO, sendspace, CTLFLAG_RW | CTLFLAG_LOCKED,
104 &nstat_sendspace, 0, "");
105
106 static int nstat_recvspace = 8192;
107 SYSCTL_INT(_net_stats, OID_AUTO, recvspace, CTLFLAG_RW | CTLFLAG_LOCKED,
108 &nstat_recvspace, 0, "");
109
110 static struct nstat_stats nstat_stats;
111 SYSCTL_STRUCT(_net_stats, OID_AUTO, stats, CTLFLAG_RD | CTLFLAG_LOCKED,
112 &nstat_stats, nstat_stats, "");
113
114 static u_int32_t nstat_lim_interval = 30 * 60; /* Report interval, seconds */
115 static u_int32_t nstat_lim_min_tx_pkts = 100;
116 static u_int32_t nstat_lim_min_rx_pkts = 100;
117 #if (DEBUG || DEVELOPMENT)
118 SYSCTL_INT(_net_stats, OID_AUTO, lim_report_interval,
119 CTLFLAG_RW | CTLFLAG_LOCKED, &nstat_lim_interval, 0,
120 "Low internet stat report interval");
121
122 SYSCTL_INT(_net_stats, OID_AUTO, lim_min_tx_pkts,
123 CTLFLAG_RW | CTLFLAG_LOCKED, &nstat_lim_min_tx_pkts, 0,
124 "Low Internet, min transmit packets threshold");
125
126 SYSCTL_INT(_net_stats, OID_AUTO, lim_min_rx_pkts,
127 CTLFLAG_RW | CTLFLAG_LOCKED, &nstat_lim_min_rx_pkts, 0,
128 "Low Internet, min receive packets threshold");
129 #endif /* DEBUG || DEVELOPMENT */
130
131 static struct net_api_stats net_api_stats_before;
132 static u_int64_t net_api_stats_last_report_time;
133 #define NET_API_STATS_REPORT_INTERVAL (12 * 60 * 60) /* 12 hours, in seconds */
134 static u_int32_t net_api_stats_report_interval = NET_API_STATS_REPORT_INTERVAL;
135
136 #if (DEBUG || DEVELOPMENT)
137 SYSCTL_UINT(_net_stats, OID_AUTO, api_report_interval,
138 CTLFLAG_RW | CTLFLAG_LOCKED, &net_api_stats_report_interval, 0, "");
139 #endif /* DEBUG || DEVELOPMENT */
140
141 enum{
142 NSTAT_FLAG_CLEANUP = (1 << 0),
143 NSTAT_FLAG_REQCOUNTS = (1 << 1),
144 NSTAT_FLAG_SUPPORTS_UPDATES = (1 << 2),
145 NSTAT_FLAG_SYSINFO_SUBSCRIBED = (1 << 3),
146 };
147
148 #if CONFIG_EMBEDDED
149 #define QUERY_CONTINUATION_SRC_COUNT 50
150 #else
151 #define QUERY_CONTINUATION_SRC_COUNT 100
152 #endif
153
154 typedef TAILQ_HEAD(, nstat_src) tailq_head_nstat_src;
155 typedef TAILQ_ENTRY(nstat_src) tailq_entry_nstat_src;
156
157 typedef struct nstat_provider_filter {
158 u_int64_t npf_flags;
159 u_int64_t npf_events;
160 pid_t npf_pid;
161 uuid_t npf_uuid;
162 } nstat_provider_filter;
163
164
165 typedef struct nstat_control_state {
166 struct nstat_control_state *ncs_next;
167 u_int32_t ncs_watching;
168 decl_lck_mtx_data(, ncs_mtx);
169 kern_ctl_ref ncs_kctl;
170 u_int32_t ncs_unit;
171 nstat_src_ref_t ncs_next_srcref;
172 tailq_head_nstat_src ncs_src_queue;
173 mbuf_t ncs_accumulated;
174 u_int32_t ncs_flags;
175 nstat_provider_filter ncs_provider_filters[NSTAT_PROVIDER_COUNT];
176 /* state maintained for partial query requests */
177 u_int64_t ncs_context;
178 u_int64_t ncs_seq;
179 } nstat_control_state;
180
181 typedef struct nstat_provider {
182 struct nstat_provider *next;
183 nstat_provider_id_t nstat_provider_id;
184 size_t nstat_descriptor_length;
185 errno_t (*nstat_lookup)(const void *data, u_int32_t length, nstat_provider_cookie_t *out_cookie);
186 int (*nstat_gone)(nstat_provider_cookie_t cookie);
187 errno_t (*nstat_counts)(nstat_provider_cookie_t cookie, struct nstat_counts *out_counts, int *out_gone);
188 errno_t (*nstat_watcher_add)(nstat_control_state *state, nstat_msg_add_all_srcs *req);
189 void (*nstat_watcher_remove)(nstat_control_state *state);
190 errno_t (*nstat_copy_descriptor)(nstat_provider_cookie_t cookie, void *data, u_int32_t len);
191 void (*nstat_release)(nstat_provider_cookie_t cookie, boolean_t locked);
192 bool (*nstat_reporting_allowed)(nstat_provider_cookie_t cookie, nstat_provider_filter *filter);
193 } nstat_provider;
194
195 typedef STAILQ_HEAD(, nstat_src) stailq_head_nstat_src;
196 typedef STAILQ_ENTRY(nstat_src) stailq_entry_nstat_src;
197
198 typedef TAILQ_HEAD(, nstat_tu_shadow) tailq_head_tu_shadow;
199 typedef TAILQ_ENTRY(nstat_tu_shadow) tailq_entry_tu_shadow;
200
201 typedef TAILQ_HEAD(, nstat_procdetails) tailq_head_procdetails;
202 typedef TAILQ_ENTRY(nstat_procdetails) tailq_entry_procdetails;
203
204 typedef struct nstat_src {
205 tailq_entry_nstat_src ns_control_link; // All sources for the nstat_control_state, for iterating over.
206 nstat_control_state *ns_control; // The nstat_control_state that this is a source for
207 nstat_src_ref_t srcref;
208 nstat_provider *provider;
209 nstat_provider_cookie_t cookie;
210 uint32_t filter;
211 uint64_t seq;
212 } nstat_src;
213
214 static errno_t nstat_control_send_counts(nstat_control_state *, nstat_src *, unsigned long long, u_int16_t, int *);
215 static int nstat_control_send_description(nstat_control_state *state, nstat_src *src, u_int64_t context, u_int16_t hdr_flags);
216 static int nstat_control_send_update(nstat_control_state *state, nstat_src *src, u_int64_t context, u_int64_t event, u_int16_t hdr_flags, int *gone);
217 static errno_t nstat_control_send_removed(nstat_control_state *, nstat_src *);
218 static errno_t nstat_control_send_goodbye(nstat_control_state *state, nstat_src *src);
219 static void nstat_control_cleanup_source(nstat_control_state *state, nstat_src *src, boolean_t);
220 static bool nstat_control_reporting_allowed(nstat_control_state *state, nstat_src *src);
221 static boolean_t nstat_control_begin_query(nstat_control_state *state, const nstat_msg_hdr *hdrp);
222 static u_int16_t nstat_control_end_query(nstat_control_state *state, nstat_src *last_src, boolean_t partial);
223 static void nstat_ifnet_report_ecn_stats(void);
224 static void nstat_ifnet_report_lim_stats(void);
225 static void nstat_net_api_report_stats(void);
226 static errno_t nstat_set_provider_filter( nstat_control_state *state, nstat_msg_add_all_srcs *req);
227 static errno_t nstat_control_send_event(nstat_control_state *state, nstat_src *src, u_int64_t event);
228
229 static u_int32_t nstat_udp_watchers = 0;
230 static u_int32_t nstat_tcp_watchers = 0;
231
232 static void nstat_control_register(void);
233
234 /*
235 * The lock order is as follows:
236 *
237 * socket_lock (inpcb)
238 * nstat_mtx
239 * state->ncs_mtx
240 */
241 static volatile OSMallocTag nstat_malloc_tag = NULL;
242 static nstat_control_state *nstat_controls = NULL;
243 static uint64_t nstat_idle_time = 0;
244 static decl_lck_mtx_data(, nstat_mtx);
245
246 /* some extern definitions */
247 extern void mbuf_report_peak_usage(void);
248 extern void tcp_report_stats(void);
249
250 static void
251 nstat_copy_sa_out(
252 const struct sockaddr *src,
253 struct sockaddr *dst,
254 int maxlen)
255 {
256 if (src->sa_len > maxlen) {
257 return;
258 }
259
260 bcopy(src, dst, src->sa_len);
261 if (src->sa_family == AF_INET6 &&
262 src->sa_len >= sizeof(struct sockaddr_in6)) {
263 struct sockaddr_in6 *sin6 = (struct sockaddr_in6*)(void *)dst;
264 if (IN6_IS_SCOPE_EMBED(&sin6->sin6_addr)) {
265 if (sin6->sin6_scope_id == 0) {
266 sin6->sin6_scope_id = ntohs(sin6->sin6_addr.s6_addr16[1]);
267 }
268 sin6->sin6_addr.s6_addr16[1] = 0;
269 }
270 }
271 }
272
273 static void
274 nstat_ip_to_sockaddr(
275 const struct in_addr *ip,
276 u_int16_t port,
277 struct sockaddr_in *sin,
278 u_int32_t maxlen)
279 {
280 if (maxlen < sizeof(struct sockaddr_in)) {
281 return;
282 }
283
284 sin->sin_family = AF_INET;
285 sin->sin_len = sizeof(*sin);
286 sin->sin_port = port;
287 sin->sin_addr = *ip;
288 }
289
290 u_int16_t
291 nstat_ifnet_to_flags(
292 struct ifnet *ifp)
293 {
294 u_int16_t flags = 0;
295 u_int32_t functional_type = if_functional_type(ifp, FALSE);
296
297 /* Panic if someone adds a functional type without updating ntstat. */
298 VERIFY(0 <= functional_type && functional_type <= IFRTYPE_FUNCTIONAL_LAST);
299
300 switch (functional_type) {
301 case IFRTYPE_FUNCTIONAL_UNKNOWN:
302 flags |= NSTAT_IFNET_IS_UNKNOWN_TYPE;
303 break;
304 case IFRTYPE_FUNCTIONAL_LOOPBACK:
305 flags |= NSTAT_IFNET_IS_LOOPBACK;
306 break;
307 case IFRTYPE_FUNCTIONAL_WIRED:
308 case IFRTYPE_FUNCTIONAL_INTCOPROC:
309 flags |= NSTAT_IFNET_IS_WIRED;
310 break;
311 case IFRTYPE_FUNCTIONAL_WIFI_INFRA:
312 flags |= NSTAT_IFNET_IS_WIFI;
313 break;
314 case IFRTYPE_FUNCTIONAL_WIFI_AWDL:
315 flags |= NSTAT_IFNET_IS_WIFI;
316 flags |= NSTAT_IFNET_IS_AWDL;
317 break;
318 case IFRTYPE_FUNCTIONAL_CELLULAR:
319 flags |= NSTAT_IFNET_IS_CELLULAR;
320 break;
321 case IFRTYPE_FUNCTIONAL_COMPANIONLINK:
322 flags |= NSTAT_IFNET_IS_COMPANIONLINK;
323 break;
324 }
325
326 if (IFNET_IS_EXPENSIVE(ifp)) {
327 flags |= NSTAT_IFNET_IS_EXPENSIVE;
328 }
329 if (IFNET_IS_CONSTRAINED(ifp)) {
330 flags |= NSTAT_IFNET_IS_CONSTRAINED;
331 }
332
333 return flags;
334 }
335
336 static u_int16_t
337 nstat_inpcb_to_flags(
338 const struct inpcb *inp)
339 {
340 u_int16_t flags = 0;
341
342 if (inp != NULL) {
343 if (inp->inp_last_outifp != NULL) {
344 struct ifnet *ifp = inp->inp_last_outifp;
345 flags = nstat_ifnet_to_flags(ifp);
346
347 struct tcpcb *tp = intotcpcb(inp);
348 if (tp) {
349 if (tp->t_flags & TF_LOCAL) {
350 flags |= NSTAT_IFNET_IS_LOCAL;
351 } else {
352 flags |= NSTAT_IFNET_IS_NON_LOCAL;
353 }
354 }
355 } else {
356 flags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
357 }
358 if (inp->inp_socket != NULL &&
359 (inp->inp_socket->so_flags1 & SOF1_CELLFALLBACK)) {
360 flags |= NSTAT_IFNET_VIA_CELLFALLBACK;
361 }
362 }
363 return flags;
364 }
365
366 #pragma mark -- Network Statistic Providers --
367
368 static errno_t nstat_control_source_add(u_int64_t context, nstat_control_state *state, nstat_provider *provider, nstat_provider_cookie_t cookie);
369 struct nstat_provider *nstat_providers = NULL;
370
371 static struct nstat_provider*
372 nstat_find_provider_by_id(
373 nstat_provider_id_t id)
374 {
375 struct nstat_provider *provider;
376
377 for (provider = nstat_providers; provider != NULL; provider = provider->next) {
378 if (provider->nstat_provider_id == id) {
379 break;
380 }
381 }
382
383 return provider;
384 }
385
386 static errno_t
387 nstat_lookup_entry(
388 nstat_provider_id_t id,
389 const void *data,
390 u_int32_t length,
391 nstat_provider **out_provider,
392 nstat_provider_cookie_t *out_cookie)
393 {
394 *out_provider = nstat_find_provider_by_id(id);
395 if (*out_provider == NULL) {
396 return ENOENT;
397 }
398
399 return (*out_provider)->nstat_lookup(data, length, out_cookie);
400 }
401
402 static void nstat_init_route_provider(void);
403 static void nstat_init_tcp_provider(void);
404 static void nstat_init_udp_provider(void);
405 static void nstat_init_ifnet_provider(void);
406
407 __private_extern__ void
408 nstat_init(void)
409 {
410 if (nstat_malloc_tag != NULL) {
411 return;
412 }
413
414 OSMallocTag tag = OSMalloc_Tagalloc(NET_STAT_CONTROL_NAME, OSMT_DEFAULT);
415 if (!OSCompareAndSwapPtr(NULL, tag, &nstat_malloc_tag)) {
416 OSMalloc_Tagfree(tag);
417 tag = nstat_malloc_tag;
418 } else {
419 // we need to initialize other things, we do it here as this code path will only be hit once;
420 nstat_init_route_provider();
421 nstat_init_tcp_provider();
422 nstat_init_udp_provider();
423 nstat_init_ifnet_provider();
424 nstat_control_register();
425 }
426 }
427
428 #pragma mark -- Aligned Buffer Allocation --
429
430 struct align_header {
431 u_int32_t offset;
432 u_int32_t length;
433 };
434
435 static void*
436 nstat_malloc_aligned(
437 u_int32_t length,
438 u_int8_t alignment,
439 OSMallocTag tag)
440 {
441 struct align_header *hdr = NULL;
442 u_int32_t size = length + sizeof(*hdr) + alignment - 1;
443
444 u_int8_t *buffer = OSMalloc(size, tag);
445 if (buffer == NULL) {
446 return NULL;
447 }
448
449 u_int8_t *aligned = buffer + sizeof(*hdr);
450 aligned = (u_int8_t*)P2ROUNDUP(aligned, alignment);
451
452 hdr = (struct align_header*)(void *)(aligned - sizeof(*hdr));
453 hdr->offset = aligned - buffer;
454 hdr->length = size;
455
456 return aligned;
457 }
458
459 static void
460 nstat_free_aligned(
461 void *buffer,
462 OSMallocTag tag)
463 {
464 struct align_header *hdr = (struct align_header*)(void *)((u_int8_t*)buffer - sizeof(*hdr));
465 OSFree(((char*)buffer) - hdr->offset, hdr->length, tag);
466 }
467
468 #pragma mark -- Route Provider --
469
470 static nstat_provider nstat_route_provider;
471
472 static errno_t
473 nstat_route_lookup(
474 const void *data,
475 u_int32_t length,
476 nstat_provider_cookie_t *out_cookie)
477 {
478 // rt_lookup doesn't take const params but it doesn't modify the parameters for
479 // the lookup. So...we use a union to eliminate the warning.
480 union{
481 struct sockaddr *sa;
482 const struct sockaddr *const_sa;
483 } dst, mask;
484
485 const nstat_route_add_param *param = (const nstat_route_add_param*)data;
486 *out_cookie = NULL;
487
488 if (length < sizeof(*param)) {
489 return EINVAL;
490 }
491
492 if (param->dst.v4.sin_family == 0 ||
493 param->dst.v4.sin_family > AF_MAX ||
494 (param->mask.v4.sin_family != 0 && param->mask.v4.sin_family != param->dst.v4.sin_family)) {
495 return EINVAL;
496 }
497
498 if (param->dst.v4.sin_len > sizeof(param->dst) ||
499 (param->mask.v4.sin_family && param->mask.v4.sin_len > sizeof(param->mask.v4.sin_len))) {
500 return EINVAL;
501 }
502 if ((param->dst.v4.sin_family == AF_INET &&
503 param->dst.v4.sin_len < sizeof(struct sockaddr_in)) ||
504 (param->dst.v6.sin6_family == AF_INET6 &&
505 param->dst.v6.sin6_len < sizeof(struct sockaddr_in6))) {
506 return EINVAL;
507 }
508
509 dst.const_sa = (const struct sockaddr*)&param->dst;
510 mask.const_sa = param->mask.v4.sin_family ? (const struct sockaddr*)&param->mask : NULL;
511
512 struct radix_node_head *rnh = rt_tables[dst.sa->sa_family];
513 if (rnh == NULL) {
514 return EAFNOSUPPORT;
515 }
516
517 lck_mtx_lock(rnh_lock);
518 struct rtentry *rt = rt_lookup(TRUE, dst.sa, mask.sa, rnh, param->ifindex);
519 lck_mtx_unlock(rnh_lock);
520
521 if (rt) {
522 *out_cookie = (nstat_provider_cookie_t)rt;
523 }
524
525 return rt ? 0 : ENOENT;
526 }
527
528 static int
529 nstat_route_gone(
530 nstat_provider_cookie_t cookie)
531 {
532 struct rtentry *rt = (struct rtentry*)cookie;
533 return ((rt->rt_flags & RTF_UP) == 0) ? 1 : 0;
534 }
535
536 static errno_t
537 nstat_route_counts(
538 nstat_provider_cookie_t cookie,
539 struct nstat_counts *out_counts,
540 int *out_gone)
541 {
542 struct rtentry *rt = (struct rtentry*)cookie;
543 struct nstat_counts *rt_stats = rt->rt_stats;
544
545 if (out_gone) {
546 *out_gone = 0;
547 }
548
549 if (out_gone && (rt->rt_flags & RTF_UP) == 0) {
550 *out_gone = 1;
551 }
552
553 if (rt_stats) {
554 atomic_get_64(out_counts->nstat_rxpackets, &rt_stats->nstat_rxpackets);
555 atomic_get_64(out_counts->nstat_rxbytes, &rt_stats->nstat_rxbytes);
556 atomic_get_64(out_counts->nstat_txpackets, &rt_stats->nstat_txpackets);
557 atomic_get_64(out_counts->nstat_txbytes, &rt_stats->nstat_txbytes);
558 out_counts->nstat_rxduplicatebytes = rt_stats->nstat_rxduplicatebytes;
559 out_counts->nstat_rxoutoforderbytes = rt_stats->nstat_rxoutoforderbytes;
560 out_counts->nstat_txretransmit = rt_stats->nstat_txretransmit;
561 out_counts->nstat_connectattempts = rt_stats->nstat_connectattempts;
562 out_counts->nstat_connectsuccesses = rt_stats->nstat_connectsuccesses;
563 out_counts->nstat_min_rtt = rt_stats->nstat_min_rtt;
564 out_counts->nstat_avg_rtt = rt_stats->nstat_avg_rtt;
565 out_counts->nstat_var_rtt = rt_stats->nstat_var_rtt;
566 out_counts->nstat_cell_rxbytes = out_counts->nstat_cell_txbytes = 0;
567 } else {
568 bzero(out_counts, sizeof(*out_counts));
569 }
570
571 return 0;
572 }
573
574 static void
575 nstat_route_release(
576 nstat_provider_cookie_t cookie,
577 __unused int locked)
578 {
579 rtfree((struct rtentry*)cookie);
580 }
581
582 static u_int32_t nstat_route_watchers = 0;
583
584 static int
585 nstat_route_walktree_add(
586 struct radix_node *rn,
587 void *context)
588 {
589 errno_t result = 0;
590 struct rtentry *rt = (struct rtentry *)rn;
591 nstat_control_state *state = (nstat_control_state*)context;
592
593 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
594
595 /* RTF_UP can't change while rnh_lock is held */
596 if ((rt->rt_flags & RTF_UP) != 0) {
597 /* Clear RTPRF_OURS if the route is still usable */
598 RT_LOCK(rt);
599 if (rt_validate(rt)) {
600 RT_ADDREF_LOCKED(rt);
601 RT_UNLOCK(rt);
602 } else {
603 RT_UNLOCK(rt);
604 rt = NULL;
605 }
606
607 /* Otherwise if RTF_CONDEMNED, treat it as if it were down */
608 if (rt == NULL) {
609 return 0;
610 }
611
612 result = nstat_control_source_add(0, state, &nstat_route_provider, rt);
613 if (result != 0) {
614 rtfree_locked(rt);
615 }
616 }
617
618 return result;
619 }
620
621 static errno_t
622 nstat_route_add_watcher(
623 nstat_control_state *state,
624 nstat_msg_add_all_srcs *req)
625 {
626 int i;
627 errno_t result = 0;
628
629 lck_mtx_lock(rnh_lock);
630
631 result = nstat_set_provider_filter(state, req);
632 if (result == 0) {
633 OSIncrementAtomic(&nstat_route_watchers);
634
635 for (i = 1; i < AF_MAX; i++) {
636 struct radix_node_head *rnh;
637 rnh = rt_tables[i];
638 if (!rnh) {
639 continue;
640 }
641
642 result = rnh->rnh_walktree(rnh, nstat_route_walktree_add, state);
643 if (result != 0) {
644 // This is probably resource exhaustion.
645 // There currently isn't a good way to recover from this.
646 // Least bad seems to be to give up on the add-all but leave
647 // the watcher in place.
648 break;
649 }
650 }
651 }
652 lck_mtx_unlock(rnh_lock);
653
654 return result;
655 }
656
657 __private_extern__ void
658 nstat_route_new_entry(
659 struct rtentry *rt)
660 {
661 if (nstat_route_watchers == 0) {
662 return;
663 }
664
665 lck_mtx_lock(&nstat_mtx);
666 if ((rt->rt_flags & RTF_UP) != 0) {
667 nstat_control_state *state;
668 for (state = nstat_controls; state; state = state->ncs_next) {
669 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_ROUTE)) != 0) {
670 // this client is watching routes
671 // acquire a reference for the route
672 RT_ADDREF(rt);
673
674 // add the source, if that fails, release the reference
675 if (nstat_control_source_add(0, state, &nstat_route_provider, rt) != 0) {
676 RT_REMREF(rt);
677 }
678 }
679 }
680 }
681 lck_mtx_unlock(&nstat_mtx);
682 }
683
684 static void
685 nstat_route_remove_watcher(
686 __unused nstat_control_state *state)
687 {
688 OSDecrementAtomic(&nstat_route_watchers);
689 }
690
691 static errno_t
692 nstat_route_copy_descriptor(
693 nstat_provider_cookie_t cookie,
694 void *data,
695 u_int32_t len)
696 {
697 nstat_route_descriptor *desc = (nstat_route_descriptor*)data;
698 if (len < sizeof(*desc)) {
699 return EINVAL;
700 }
701 bzero(desc, sizeof(*desc));
702
703 struct rtentry *rt = (struct rtentry*)cookie;
704 desc->id = (uint64_t)VM_KERNEL_ADDRPERM(rt);
705 desc->parent_id = (uint64_t)VM_KERNEL_ADDRPERM(rt->rt_parent);
706 desc->gateway_id = (uint64_t)VM_KERNEL_ADDRPERM(rt->rt_gwroute);
707
708
709 // key/dest
710 struct sockaddr *sa;
711 if ((sa = rt_key(rt))) {
712 nstat_copy_sa_out(sa, &desc->dst.sa, sizeof(desc->dst));
713 }
714
715 // mask
716 if ((sa = rt_mask(rt)) && sa->sa_len <= sizeof(desc->mask)) {
717 memcpy(&desc->mask, sa, sa->sa_len);
718 }
719
720 // gateway
721 if ((sa = rt->rt_gateway)) {
722 nstat_copy_sa_out(sa, &desc->gateway.sa, sizeof(desc->gateway));
723 }
724
725 if (rt->rt_ifp) {
726 desc->ifindex = rt->rt_ifp->if_index;
727 }
728
729 desc->flags = rt->rt_flags;
730
731 return 0;
732 }
733
734 static bool
735 nstat_route_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter)
736 {
737 bool retval = true;
738
739 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) {
740 struct rtentry *rt = (struct rtentry*)cookie;
741 struct ifnet *ifp = rt->rt_ifp;
742
743 if (ifp) {
744 uint16_t interface_properties = nstat_ifnet_to_flags(ifp);
745
746 if ((filter->npf_flags & interface_properties) == 0) {
747 retval = false;
748 }
749 }
750 }
751 return retval;
752 }
753
754 static void
755 nstat_init_route_provider(void)
756 {
757 bzero(&nstat_route_provider, sizeof(nstat_route_provider));
758 nstat_route_provider.nstat_descriptor_length = sizeof(nstat_route_descriptor);
759 nstat_route_provider.nstat_provider_id = NSTAT_PROVIDER_ROUTE;
760 nstat_route_provider.nstat_lookup = nstat_route_lookup;
761 nstat_route_provider.nstat_gone = nstat_route_gone;
762 nstat_route_provider.nstat_counts = nstat_route_counts;
763 nstat_route_provider.nstat_release = nstat_route_release;
764 nstat_route_provider.nstat_watcher_add = nstat_route_add_watcher;
765 nstat_route_provider.nstat_watcher_remove = nstat_route_remove_watcher;
766 nstat_route_provider.nstat_copy_descriptor = nstat_route_copy_descriptor;
767 nstat_route_provider.nstat_reporting_allowed = nstat_route_reporting_allowed;
768 nstat_route_provider.next = nstat_providers;
769 nstat_providers = &nstat_route_provider;
770 }
771
772 #pragma mark -- Route Collection --
773
774 __private_extern__ struct nstat_counts*
775 nstat_route_attach(
776 struct rtentry *rte)
777 {
778 struct nstat_counts *result = rte->rt_stats;
779 if (result) {
780 return result;
781 }
782
783 if (nstat_malloc_tag == NULL) {
784 nstat_init();
785 }
786
787 result = nstat_malloc_aligned(sizeof(*result), sizeof(u_int64_t), nstat_malloc_tag);
788 if (!result) {
789 return result;
790 }
791
792 bzero(result, sizeof(*result));
793
794 if (!OSCompareAndSwapPtr(NULL, result, &rte->rt_stats)) {
795 nstat_free_aligned(result, nstat_malloc_tag);
796 result = rte->rt_stats;
797 }
798
799 return result;
800 }
801
802 __private_extern__ void
803 nstat_route_detach(
804 struct rtentry *rte)
805 {
806 if (rte->rt_stats) {
807 nstat_free_aligned(rte->rt_stats, nstat_malloc_tag);
808 rte->rt_stats = NULL;
809 }
810 }
811
812 __private_extern__ void
813 nstat_route_connect_attempt(
814 struct rtentry *rte)
815 {
816 while (rte) {
817 struct nstat_counts* stats = nstat_route_attach(rte);
818 if (stats) {
819 OSIncrementAtomic(&stats->nstat_connectattempts);
820 }
821
822 rte = rte->rt_parent;
823 }
824 }
825
826 __private_extern__ void
827 nstat_route_connect_success(
828 struct rtentry *rte)
829 {
830 // This route
831 while (rte) {
832 struct nstat_counts* stats = nstat_route_attach(rte);
833 if (stats) {
834 OSIncrementAtomic(&stats->nstat_connectsuccesses);
835 }
836
837 rte = rte->rt_parent;
838 }
839 }
840
841 __private_extern__ void
842 nstat_route_tx(
843 struct rtentry *rte,
844 u_int32_t packets,
845 u_int32_t bytes,
846 u_int32_t flags)
847 {
848 while (rte) {
849 struct nstat_counts* stats = nstat_route_attach(rte);
850 if (stats) {
851 if ((flags & NSTAT_TX_FLAG_RETRANSMIT) != 0) {
852 OSAddAtomic(bytes, &stats->nstat_txretransmit);
853 } else {
854 OSAddAtomic64((SInt64)packets, (SInt64*)&stats->nstat_txpackets);
855 OSAddAtomic64((SInt64)bytes, (SInt64*)&stats->nstat_txbytes);
856 }
857 }
858
859 rte = rte->rt_parent;
860 }
861 }
862
863 __private_extern__ void
864 nstat_route_rx(
865 struct rtentry *rte,
866 u_int32_t packets,
867 u_int32_t bytes,
868 u_int32_t flags)
869 {
870 while (rte) {
871 struct nstat_counts* stats = nstat_route_attach(rte);
872 if (stats) {
873 if (flags == 0) {
874 OSAddAtomic64((SInt64)packets, (SInt64*)&stats->nstat_rxpackets);
875 OSAddAtomic64((SInt64)bytes, (SInt64*)&stats->nstat_rxbytes);
876 } else {
877 if (flags & NSTAT_RX_FLAG_OUT_OF_ORDER) {
878 OSAddAtomic(bytes, &stats->nstat_rxoutoforderbytes);
879 }
880 if (flags & NSTAT_RX_FLAG_DUPLICATE) {
881 OSAddAtomic(bytes, &stats->nstat_rxduplicatebytes);
882 }
883 }
884 }
885
886 rte = rte->rt_parent;
887 }
888 }
889
890 /* atomically average current value at _val_addr with _new_val and store */
891 #define NSTAT_EWMA_ATOMIC(_val_addr, _new_val, _decay) do { \
892 volatile uint32_t _old_val; \
893 volatile uint32_t _avg; \
894 do { \
895 _old_val = *_val_addr; \
896 if (_old_val == 0) \
897 { \
898 _avg = _new_val; \
899 } \
900 else \
901 { \
902 _avg = _old_val - (_old_val >> _decay) + (_new_val >> _decay); \
903 } \
904 if (_old_val == _avg) break; \
905 } while (!OSCompareAndSwap(_old_val, _avg, _val_addr)); \
906 } while (0);
907
908 /* atomically compute minimum of current value at _val_addr with _new_val and store */
909 #define NSTAT_MIN_ATOMIC(_val_addr, _new_val) do { \
910 volatile uint32_t _old_val; \
911 do { \
912 _old_val = *_val_addr; \
913 if (_old_val != 0 && _old_val < _new_val) \
914 { \
915 break; \
916 } \
917 } while (!OSCompareAndSwap(_old_val, _new_val, _val_addr)); \
918 } while (0);
919
920 __private_extern__ void
921 nstat_route_rtt(
922 struct rtentry *rte,
923 u_int32_t rtt,
924 u_int32_t rtt_var)
925 {
926 const uint32_t decay = 3;
927
928 while (rte) {
929 struct nstat_counts* stats = nstat_route_attach(rte);
930 if (stats) {
931 NSTAT_EWMA_ATOMIC(&stats->nstat_avg_rtt, rtt, decay);
932 NSTAT_MIN_ATOMIC(&stats->nstat_min_rtt, rtt);
933 NSTAT_EWMA_ATOMIC(&stats->nstat_var_rtt, rtt_var, decay);
934 }
935 rte = rte->rt_parent;
936 }
937 }
938
939 __private_extern__ void
940 nstat_route_update(
941 struct rtentry *rte,
942 uint32_t connect_attempts,
943 uint32_t connect_successes,
944 uint32_t rx_packets,
945 uint32_t rx_bytes,
946 uint32_t rx_duplicatebytes,
947 uint32_t rx_outoforderbytes,
948 uint32_t tx_packets,
949 uint32_t tx_bytes,
950 uint32_t tx_retransmit,
951 uint32_t rtt,
952 uint32_t rtt_var)
953 {
954 const uint32_t decay = 3;
955
956 while (rte) {
957 struct nstat_counts* stats = nstat_route_attach(rte);
958 if (stats) {
959 OSAddAtomic(connect_attempts, &stats->nstat_connectattempts);
960 OSAddAtomic(connect_successes, &stats->nstat_connectsuccesses);
961 OSAddAtomic64((SInt64)tx_packets, (SInt64*)&stats->nstat_txpackets);
962 OSAddAtomic64((SInt64)tx_bytes, (SInt64*)&stats->nstat_txbytes);
963 OSAddAtomic(tx_retransmit, &stats->nstat_txretransmit);
964 OSAddAtomic64((SInt64)rx_packets, (SInt64*)&stats->nstat_rxpackets);
965 OSAddAtomic64((SInt64)rx_bytes, (SInt64*)&stats->nstat_rxbytes);
966 OSAddAtomic(rx_outoforderbytes, &stats->nstat_rxoutoforderbytes);
967 OSAddAtomic(rx_duplicatebytes, &stats->nstat_rxduplicatebytes);
968
969 if (rtt != 0) {
970 NSTAT_EWMA_ATOMIC(&stats->nstat_avg_rtt, rtt, decay);
971 NSTAT_MIN_ATOMIC(&stats->nstat_min_rtt, rtt);
972 NSTAT_EWMA_ATOMIC(&stats->nstat_var_rtt, rtt_var, decay);
973 }
974 }
975 rte = rte->rt_parent;
976 }
977 }
978
979 #pragma mark -- TCP Kernel Provider --
980
981 /*
982 * Due to the way the kernel deallocates a process (the process structure
983 * might be gone by the time we get the PCB detach notification),
984 * we need to cache the process name. Without this, proc_name() would
985 * return null and the process name would never be sent to userland.
986 *
987 * For UDP sockets, we also store the cached the connection tuples along with
988 * the interface index. This is necessary because when UDP sockets are
989 * disconnected, the connection tuples are forever lost from the inpcb, thus
990 * we need to keep track of the last call to connect() in ntstat.
991 */
992 struct nstat_tucookie {
993 struct inpcb *inp;
994 char pname[MAXCOMLEN + 1];
995 bool cached;
996 union{
997 struct sockaddr_in v4;
998 struct sockaddr_in6 v6;
999 } local;
1000 union{
1001 struct sockaddr_in v4;
1002 struct sockaddr_in6 v6;
1003 } remote;
1004 unsigned int if_index;
1005 uint16_t ifnet_properties;
1006 };
1007
1008 static struct nstat_tucookie *
1009 nstat_tucookie_alloc_internal(
1010 struct inpcb *inp,
1011 bool ref,
1012 bool locked)
1013 {
1014 struct nstat_tucookie *cookie;
1015
1016 cookie = OSMalloc(sizeof(*cookie), nstat_malloc_tag);
1017 if (cookie == NULL) {
1018 return NULL;
1019 }
1020 if (!locked) {
1021 LCK_MTX_ASSERT(&nstat_mtx, LCK_MTX_ASSERT_NOTOWNED);
1022 }
1023 if (ref && in_pcb_checkstate(inp, WNT_ACQUIRE, locked) == WNT_STOPUSING) {
1024 OSFree(cookie, sizeof(*cookie), nstat_malloc_tag);
1025 return NULL;
1026 }
1027 bzero(cookie, sizeof(*cookie));
1028 cookie->inp = inp;
1029 proc_name(inp->inp_socket->last_pid, cookie->pname,
1030 sizeof(cookie->pname));
1031 /*
1032 * We only increment the reference count for UDP sockets because we
1033 * only cache UDP socket tuples.
1034 */
1035 if (SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP) {
1036 OSIncrementAtomic(&inp->inp_nstat_refcnt);
1037 }
1038
1039 return cookie;
1040 }
1041
1042 static struct nstat_tucookie *
1043 nstat_tucookie_alloc(
1044 struct inpcb *inp)
1045 {
1046 return nstat_tucookie_alloc_internal(inp, false, false);
1047 }
1048
1049 static struct nstat_tucookie *
1050 nstat_tucookie_alloc_ref(
1051 struct inpcb *inp)
1052 {
1053 return nstat_tucookie_alloc_internal(inp, true, false);
1054 }
1055
1056 static struct nstat_tucookie *
1057 nstat_tucookie_alloc_ref_locked(
1058 struct inpcb *inp)
1059 {
1060 return nstat_tucookie_alloc_internal(inp, true, true);
1061 }
1062
1063 static void
1064 nstat_tucookie_release_internal(
1065 struct nstat_tucookie *cookie,
1066 int inplock)
1067 {
1068 if (SOCK_PROTO(cookie->inp->inp_socket) == IPPROTO_UDP) {
1069 OSDecrementAtomic(&cookie->inp->inp_nstat_refcnt);
1070 }
1071 in_pcb_checkstate(cookie->inp, WNT_RELEASE, inplock);
1072 OSFree(cookie, sizeof(*cookie), nstat_malloc_tag);
1073 }
1074
1075 static void
1076 nstat_tucookie_release(
1077 struct nstat_tucookie *cookie)
1078 {
1079 nstat_tucookie_release_internal(cookie, false);
1080 }
1081
1082 static void
1083 nstat_tucookie_release_locked(
1084 struct nstat_tucookie *cookie)
1085 {
1086 nstat_tucookie_release_internal(cookie, true);
1087 }
1088
1089
1090 static nstat_provider nstat_tcp_provider;
1091
1092 static errno_t
1093 nstat_tcpudp_lookup(
1094 struct inpcbinfo *inpinfo,
1095 const void *data,
1096 u_int32_t length,
1097 nstat_provider_cookie_t *out_cookie)
1098 {
1099 struct inpcb *inp = NULL;
1100
1101 // parameter validation
1102 const nstat_tcp_add_param *param = (const nstat_tcp_add_param*)data;
1103 if (length < sizeof(*param)) {
1104 return EINVAL;
1105 }
1106
1107 // src and dst must match
1108 if (param->remote.v4.sin_family != 0 &&
1109 param->remote.v4.sin_family != param->local.v4.sin_family) {
1110 return EINVAL;
1111 }
1112
1113
1114 switch (param->local.v4.sin_family) {
1115 case AF_INET:
1116 {
1117 if (param->local.v4.sin_len != sizeof(param->local.v4) ||
1118 (param->remote.v4.sin_family != 0 &&
1119 param->remote.v4.sin_len != sizeof(param->remote.v4))) {
1120 return EINVAL;
1121 }
1122
1123 inp = in_pcblookup_hash(inpinfo, param->remote.v4.sin_addr, param->remote.v4.sin_port,
1124 param->local.v4.sin_addr, param->local.v4.sin_port, 1, NULL);
1125 }
1126 break;
1127
1128 #if INET6
1129 case AF_INET6:
1130 {
1131 union{
1132 const struct in6_addr *in6c;
1133 struct in6_addr *in6;
1134 } local, remote;
1135
1136 if (param->local.v6.sin6_len != sizeof(param->local.v6) ||
1137 (param->remote.v6.sin6_family != 0 &&
1138 param->remote.v6.sin6_len != sizeof(param->remote.v6))) {
1139 return EINVAL;
1140 }
1141
1142 local.in6c = &param->local.v6.sin6_addr;
1143 remote.in6c = &param->remote.v6.sin6_addr;
1144
1145 inp = in6_pcblookup_hash(inpinfo, remote.in6, param->remote.v6.sin6_port,
1146 local.in6, param->local.v6.sin6_port, 1, NULL);
1147 }
1148 break;
1149 #endif
1150
1151 default:
1152 return EINVAL;
1153 }
1154
1155 if (inp == NULL) {
1156 return ENOENT;
1157 }
1158
1159 // At this point we have a ref to the inpcb
1160 *out_cookie = nstat_tucookie_alloc(inp);
1161 if (*out_cookie == NULL) {
1162 in_pcb_checkstate(inp, WNT_RELEASE, 0);
1163 }
1164
1165 return 0;
1166 }
1167
1168 static errno_t
1169 nstat_tcp_lookup(
1170 const void *data,
1171 u_int32_t length,
1172 nstat_provider_cookie_t *out_cookie)
1173 {
1174 return nstat_tcpudp_lookup(&tcbinfo, data, length, out_cookie);
1175 }
1176
1177 static int
1178 nstat_tcp_gone(
1179 nstat_provider_cookie_t cookie)
1180 {
1181 struct nstat_tucookie *tucookie =
1182 (struct nstat_tucookie *)cookie;
1183 struct inpcb *inp;
1184 struct tcpcb *tp;
1185
1186 return (!(inp = tucookie->inp) ||
1187 !(tp = intotcpcb(inp)) ||
1188 inp->inp_state == INPCB_STATE_DEAD) ? 1 : 0;
1189 }
1190
1191 static errno_t
1192 nstat_tcp_counts(
1193 nstat_provider_cookie_t cookie,
1194 struct nstat_counts *out_counts,
1195 int *out_gone)
1196 {
1197 struct nstat_tucookie *tucookie =
1198 (struct nstat_tucookie *)cookie;
1199 struct inpcb *inp;
1200
1201 bzero(out_counts, sizeof(*out_counts));
1202
1203 if (out_gone) {
1204 *out_gone = 0;
1205 }
1206
1207 // if the pcb is in the dead state, we should stop using it
1208 if (nstat_tcp_gone(cookie)) {
1209 if (out_gone) {
1210 *out_gone = 1;
1211 }
1212 if (!(inp = tucookie->inp) || !intotcpcb(inp)) {
1213 return EINVAL;
1214 }
1215 }
1216 inp = tucookie->inp;
1217 struct tcpcb *tp = intotcpcb(inp);
1218
1219 atomic_get_64(out_counts->nstat_rxpackets, &inp->inp_stat->rxpackets);
1220 atomic_get_64(out_counts->nstat_rxbytes, &inp->inp_stat->rxbytes);
1221 atomic_get_64(out_counts->nstat_txpackets, &inp->inp_stat->txpackets);
1222 atomic_get_64(out_counts->nstat_txbytes, &inp->inp_stat->txbytes);
1223 out_counts->nstat_rxduplicatebytes = tp->t_stat.rxduplicatebytes;
1224 out_counts->nstat_rxoutoforderbytes = tp->t_stat.rxoutoforderbytes;
1225 out_counts->nstat_txretransmit = tp->t_stat.txretransmitbytes;
1226 out_counts->nstat_connectattempts = tp->t_state >= TCPS_SYN_SENT ? 1 : 0;
1227 out_counts->nstat_connectsuccesses = tp->t_state >= TCPS_ESTABLISHED ? 1 : 0;
1228 out_counts->nstat_avg_rtt = tp->t_srtt;
1229 out_counts->nstat_min_rtt = tp->t_rttbest;
1230 out_counts->nstat_var_rtt = tp->t_rttvar;
1231 if (out_counts->nstat_avg_rtt < out_counts->nstat_min_rtt) {
1232 out_counts->nstat_min_rtt = out_counts->nstat_avg_rtt;
1233 }
1234 atomic_get_64(out_counts->nstat_cell_rxbytes, &inp->inp_cstat->rxbytes);
1235 atomic_get_64(out_counts->nstat_cell_txbytes, &inp->inp_cstat->txbytes);
1236 atomic_get_64(out_counts->nstat_wifi_rxbytes, &inp->inp_wstat->rxbytes);
1237 atomic_get_64(out_counts->nstat_wifi_txbytes, &inp->inp_wstat->txbytes);
1238 atomic_get_64(out_counts->nstat_wired_rxbytes, &inp->inp_Wstat->rxbytes);
1239 atomic_get_64(out_counts->nstat_wired_txbytes, &inp->inp_Wstat->txbytes);
1240
1241 return 0;
1242 }
1243
1244 static void
1245 nstat_tcp_release(
1246 nstat_provider_cookie_t cookie,
1247 int locked)
1248 {
1249 struct nstat_tucookie *tucookie =
1250 (struct nstat_tucookie *)cookie;
1251
1252 nstat_tucookie_release_internal(tucookie, locked);
1253 }
1254
1255 static errno_t
1256 nstat_tcp_add_watcher(
1257 nstat_control_state *state,
1258 nstat_msg_add_all_srcs *req)
1259 {
1260 // There is a tricky issue around getting all TCP sockets added once
1261 // and only once. nstat_tcp_new_pcb() is called prior to the new item
1262 // being placed on any lists where it might be found.
1263 // By locking the tcbinfo.ipi_lock prior to marking the state as a watcher,
1264 // it should be impossible for a new socket to be added twice.
1265 // On the other hand, there is still a timing issue where a new socket
1266 // results in a call to nstat_tcp_new_pcb() before this watcher
1267 // is instantiated and yet the socket doesn't make it into ipi_listhead
1268 // prior to the scan. <rdar://problem/30361716>
1269
1270 errno_t result;
1271
1272 lck_rw_lock_shared(tcbinfo.ipi_lock);
1273 result = nstat_set_provider_filter(state, req);
1274 if (result == 0) {
1275 OSIncrementAtomic(&nstat_tcp_watchers);
1276
1277 // Add all current tcp inpcbs. Ignore those in timewait
1278 struct inpcb *inp;
1279 struct nstat_tucookie *cookie;
1280 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list)
1281 {
1282 cookie = nstat_tucookie_alloc_ref(inp);
1283 if (cookie == NULL) {
1284 continue;
1285 }
1286 if (nstat_control_source_add(0, state, &nstat_tcp_provider,
1287 cookie) != 0) {
1288 nstat_tucookie_release(cookie);
1289 break;
1290 }
1291 }
1292 }
1293
1294 lck_rw_done(tcbinfo.ipi_lock);
1295
1296 return result;
1297 }
1298
1299 static void
1300 nstat_tcp_remove_watcher(
1301 __unused nstat_control_state *state)
1302 {
1303 OSDecrementAtomic(&nstat_tcp_watchers);
1304 }
1305
1306 __private_extern__ void
1307 nstat_tcp_new_pcb(
1308 struct inpcb *inp)
1309 {
1310 struct nstat_tucookie *cookie;
1311
1312 inp->inp_start_timestamp = mach_continuous_time();
1313
1314 if (nstat_tcp_watchers == 0) {
1315 return;
1316 }
1317
1318 socket_lock(inp->inp_socket, 0);
1319 lck_mtx_lock(&nstat_mtx);
1320 nstat_control_state *state;
1321 for (state = nstat_controls; state; state = state->ncs_next) {
1322 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_TCP_KERNEL)) != 0) {
1323 // this client is watching tcp
1324 // acquire a reference for it
1325 cookie = nstat_tucookie_alloc_ref_locked(inp);
1326 if (cookie == NULL) {
1327 continue;
1328 }
1329 // add the source, if that fails, release the reference
1330 if (nstat_control_source_add(0, state,
1331 &nstat_tcp_provider, cookie) != 0) {
1332 nstat_tucookie_release_locked(cookie);
1333 break;
1334 }
1335 }
1336 }
1337 lck_mtx_unlock(&nstat_mtx);
1338 socket_unlock(inp->inp_socket, 0);
1339 }
1340
1341 __private_extern__ void
1342 nstat_pcb_detach(struct inpcb *inp)
1343 {
1344 nstat_control_state *state;
1345 nstat_src *src;
1346 tailq_head_nstat_src dead_list;
1347 struct nstat_tucookie *tucookie;
1348 errno_t result;
1349
1350 if (inp == NULL || (nstat_tcp_watchers == 0 && nstat_udp_watchers == 0)) {
1351 return;
1352 }
1353
1354 TAILQ_INIT(&dead_list);
1355 lck_mtx_lock(&nstat_mtx);
1356 for (state = nstat_controls; state; state = state->ncs_next) {
1357 lck_mtx_lock(&state->ncs_mtx);
1358 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
1359 {
1360 nstat_provider_id_t provider_id = src->provider->nstat_provider_id;
1361 if (provider_id == NSTAT_PROVIDER_TCP_KERNEL || provider_id == NSTAT_PROVIDER_UDP_KERNEL) {
1362 tucookie = (struct nstat_tucookie *)src->cookie;
1363 if (tucookie->inp == inp) {
1364 break;
1365 }
1366 }
1367 }
1368
1369 if (src) {
1370 result = nstat_control_send_goodbye(state, src);
1371
1372 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
1373 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
1374 }
1375 lck_mtx_unlock(&state->ncs_mtx);
1376 }
1377 lck_mtx_unlock(&nstat_mtx);
1378
1379 while ((src = TAILQ_FIRST(&dead_list))) {
1380 TAILQ_REMOVE(&dead_list, src, ns_control_link);
1381 nstat_control_cleanup_source(NULL, src, TRUE);
1382 }
1383 }
1384
1385 __private_extern__ void
1386 nstat_pcb_event(struct inpcb *inp, u_int64_t event)
1387 {
1388 nstat_control_state *state;
1389 nstat_src *src;
1390 struct nstat_tucookie *tucookie;
1391 errno_t result;
1392 nstat_provider_id_t provider_id;
1393
1394 if (inp == NULL || (nstat_tcp_watchers == 0 && nstat_udp_watchers == 0)) {
1395 return;
1396 }
1397
1398 lck_mtx_lock(&nstat_mtx);
1399 for (state = nstat_controls; state; state = state->ncs_next) {
1400 if (((state->ncs_provider_filters[NSTAT_PROVIDER_TCP_KERNEL].npf_events & event) == 0) &&
1401 ((state->ncs_provider_filters[NSTAT_PROVIDER_UDP_KERNEL].npf_events & event) == 0)) {
1402 continue;
1403 }
1404 lck_mtx_lock(&state->ncs_mtx);
1405 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
1406 {
1407 provider_id = src->provider->nstat_provider_id;
1408 if (provider_id == NSTAT_PROVIDER_TCP_KERNEL || provider_id == NSTAT_PROVIDER_UDP_KERNEL) {
1409 tucookie = (struct nstat_tucookie *)src->cookie;
1410 if (tucookie->inp == inp) {
1411 break;
1412 }
1413 }
1414 }
1415
1416 if (src && ((state->ncs_provider_filters[provider_id].npf_events & event) != 0)) {
1417 result = nstat_control_send_event(state, src, event);
1418 }
1419 lck_mtx_unlock(&state->ncs_mtx);
1420 }
1421 lck_mtx_unlock(&nstat_mtx);
1422 }
1423
1424
1425 __private_extern__ void
1426 nstat_pcb_cache(struct inpcb *inp)
1427 {
1428 nstat_control_state *state;
1429 nstat_src *src;
1430 struct nstat_tucookie *tucookie;
1431
1432 if (inp == NULL || nstat_udp_watchers == 0 ||
1433 inp->inp_nstat_refcnt == 0) {
1434 return;
1435 }
1436 VERIFY(SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP);
1437 lck_mtx_lock(&nstat_mtx);
1438 for (state = nstat_controls; state; state = state->ncs_next) {
1439 lck_mtx_lock(&state->ncs_mtx);
1440 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
1441 {
1442 tucookie = (struct nstat_tucookie *)src->cookie;
1443 if (tucookie->inp == inp) {
1444 if (inp->inp_vflag & INP_IPV6) {
1445 in6_ip6_to_sockaddr(&inp->in6p_laddr,
1446 inp->inp_lport,
1447 &tucookie->local.v6,
1448 sizeof(tucookie->local));
1449 in6_ip6_to_sockaddr(&inp->in6p_faddr,
1450 inp->inp_fport,
1451 &tucookie->remote.v6,
1452 sizeof(tucookie->remote));
1453 } else if (inp->inp_vflag & INP_IPV4) {
1454 nstat_ip_to_sockaddr(&inp->inp_laddr,
1455 inp->inp_lport,
1456 &tucookie->local.v4,
1457 sizeof(tucookie->local));
1458 nstat_ip_to_sockaddr(&inp->inp_faddr,
1459 inp->inp_fport,
1460 &tucookie->remote.v4,
1461 sizeof(tucookie->remote));
1462 }
1463 if (inp->inp_last_outifp) {
1464 tucookie->if_index =
1465 inp->inp_last_outifp->if_index;
1466 }
1467
1468 tucookie->ifnet_properties = nstat_inpcb_to_flags(inp);
1469 tucookie->cached = true;
1470 break;
1471 }
1472 }
1473 lck_mtx_unlock(&state->ncs_mtx);
1474 }
1475 lck_mtx_unlock(&nstat_mtx);
1476 }
1477
1478 __private_extern__ void
1479 nstat_pcb_invalidate_cache(struct inpcb *inp)
1480 {
1481 nstat_control_state *state;
1482 nstat_src *src;
1483 struct nstat_tucookie *tucookie;
1484
1485 if (inp == NULL || nstat_udp_watchers == 0 ||
1486 inp->inp_nstat_refcnt == 0) {
1487 return;
1488 }
1489 VERIFY(SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP);
1490 lck_mtx_lock(&nstat_mtx);
1491 for (state = nstat_controls; state; state = state->ncs_next) {
1492 lck_mtx_lock(&state->ncs_mtx);
1493 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
1494 {
1495 tucookie = (struct nstat_tucookie *)src->cookie;
1496 if (tucookie->inp == inp) {
1497 tucookie->cached = false;
1498 break;
1499 }
1500 }
1501 lck_mtx_unlock(&state->ncs_mtx);
1502 }
1503 lck_mtx_unlock(&nstat_mtx);
1504 }
1505
1506 static errno_t
1507 nstat_tcp_copy_descriptor(
1508 nstat_provider_cookie_t cookie,
1509 void *data,
1510 u_int32_t len)
1511 {
1512 if (len < sizeof(nstat_tcp_descriptor)) {
1513 return EINVAL;
1514 }
1515
1516 if (nstat_tcp_gone(cookie)) {
1517 return EINVAL;
1518 }
1519
1520 nstat_tcp_descriptor *desc = (nstat_tcp_descriptor*)data;
1521 struct nstat_tucookie *tucookie =
1522 (struct nstat_tucookie *)cookie;
1523 struct inpcb *inp = tucookie->inp;
1524 struct tcpcb *tp = intotcpcb(inp);
1525 bzero(desc, sizeof(*desc));
1526
1527 if (inp->inp_vflag & INP_IPV6) {
1528 in6_ip6_to_sockaddr(&inp->in6p_laddr, inp->inp_lport,
1529 &desc->local.v6, sizeof(desc->local));
1530 in6_ip6_to_sockaddr(&inp->in6p_faddr, inp->inp_fport,
1531 &desc->remote.v6, sizeof(desc->remote));
1532 } else if (inp->inp_vflag & INP_IPV4) {
1533 nstat_ip_to_sockaddr(&inp->inp_laddr, inp->inp_lport,
1534 &desc->local.v4, sizeof(desc->local));
1535 nstat_ip_to_sockaddr(&inp->inp_faddr, inp->inp_fport,
1536 &desc->remote.v4, sizeof(desc->remote));
1537 }
1538
1539 desc->state = intotcpcb(inp)->t_state;
1540 desc->ifindex = (inp->inp_last_outifp == NULL) ? 0 :
1541 inp->inp_last_outifp->if_index;
1542
1543 // danger - not locked, values could be bogus
1544 desc->txunacked = tp->snd_max - tp->snd_una;
1545 desc->txwindow = tp->snd_wnd;
1546 desc->txcwindow = tp->snd_cwnd;
1547
1548 if (CC_ALGO(tp)->name != NULL) {
1549 strlcpy(desc->cc_algo, CC_ALGO(tp)->name,
1550 sizeof(desc->cc_algo));
1551 }
1552
1553 struct socket *so = inp->inp_socket;
1554 if (so) {
1555 // TBD - take the socket lock around these to make sure
1556 // they're in sync?
1557 desc->upid = so->last_upid;
1558 desc->pid = so->last_pid;
1559 desc->traffic_class = so->so_traffic_class;
1560 if ((so->so_flags1 & SOF1_TRAFFIC_MGT_SO_BACKGROUND)) {
1561 desc->traffic_mgt_flags |= TRAFFIC_MGT_SO_BACKGROUND;
1562 }
1563 if ((so->so_flags1 & SOF1_TRAFFIC_MGT_TCP_RECVBG)) {
1564 desc->traffic_mgt_flags |= TRAFFIC_MGT_TCP_RECVBG;
1565 }
1566 proc_name(desc->pid, desc->pname, sizeof(desc->pname));
1567 if (desc->pname[0] == 0) {
1568 strlcpy(desc->pname, tucookie->pname,
1569 sizeof(desc->pname));
1570 } else {
1571 desc->pname[sizeof(desc->pname) - 1] = 0;
1572 strlcpy(tucookie->pname, desc->pname,
1573 sizeof(tucookie->pname));
1574 }
1575 memcpy(desc->uuid, so->last_uuid, sizeof(so->last_uuid));
1576 memcpy(desc->vuuid, so->so_vuuid, sizeof(so->so_vuuid));
1577 if (so->so_flags & SOF_DELEGATED) {
1578 desc->eupid = so->e_upid;
1579 desc->epid = so->e_pid;
1580 memcpy(desc->euuid, so->e_uuid, sizeof(so->e_uuid));
1581 } else {
1582 desc->eupid = desc->upid;
1583 desc->epid = desc->pid;
1584 memcpy(desc->euuid, desc->uuid, sizeof(desc->uuid));
1585 }
1586 desc->sndbufsize = so->so_snd.sb_hiwat;
1587 desc->sndbufused = so->so_snd.sb_cc;
1588 desc->rcvbufsize = so->so_rcv.sb_hiwat;
1589 desc->rcvbufused = so->so_rcv.sb_cc;
1590 }
1591
1592 tcp_get_connectivity_status(tp, &desc->connstatus);
1593 desc->ifnet_properties = nstat_inpcb_to_flags(inp);
1594 inp_get_activity_bitmap(inp, &desc->activity_bitmap);
1595 desc->start_timestamp = inp->inp_start_timestamp;
1596 desc->timestamp = mach_continuous_time();
1597 return 0;
1598 }
1599
1600 static bool
1601 nstat_tcpudp_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter, bool is_UDP)
1602 {
1603 bool retval = true;
1604
1605 if ((filter->npf_flags & (NSTAT_FILTER_IFNET_FLAGS | NSTAT_FILTER_SPECIFIC_USER)) != 0) {
1606 struct nstat_tucookie *tucookie = (struct nstat_tucookie *)cookie;
1607 struct inpcb *inp = tucookie->inp;
1608
1609 /* Only apply interface filter if at least one is allowed. */
1610 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) {
1611 uint16_t interface_properties = nstat_inpcb_to_flags(inp);
1612
1613 if ((filter->npf_flags & interface_properties) == 0) {
1614 // For UDP, we could have an undefined interface and yet transfers may have occurred.
1615 // We allow reporting if there have been transfers of the requested kind.
1616 // This is imperfect as we cannot account for the expensive attribute over wifi.
1617 // We also assume that cellular is expensive and we have no way to select for AWDL
1618 if (is_UDP) {
1619 do{
1620 if ((filter->npf_flags & (NSTAT_FILTER_ACCEPT_CELLULAR | NSTAT_FILTER_ACCEPT_EXPENSIVE)) &&
1621 (inp->inp_cstat->rxbytes || inp->inp_cstat->txbytes)) {
1622 break;
1623 }
1624 if ((filter->npf_flags & NSTAT_FILTER_ACCEPT_WIFI) &&
1625 (inp->inp_wstat->rxbytes || inp->inp_wstat->txbytes)) {
1626 break;
1627 }
1628 if ((filter->npf_flags & NSTAT_FILTER_ACCEPT_WIRED) &&
1629 (inp->inp_Wstat->rxbytes || inp->inp_Wstat->txbytes)) {
1630 break;
1631 }
1632 return false;
1633 } while (0);
1634 } else {
1635 return false;
1636 }
1637 }
1638 }
1639
1640 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) && (retval)) {
1641 struct socket *so = inp->inp_socket;
1642 retval = false;
1643
1644 if (so) {
1645 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_PID) != 0) &&
1646 (filter->npf_pid == so->last_pid)) {
1647 retval = true;
1648 } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EPID) != 0) &&
1649 (filter->npf_pid == (so->so_flags & SOF_DELEGATED)? so->e_upid : so->last_pid)) {
1650 retval = true;
1651 } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_UUID) != 0) &&
1652 (memcmp(filter->npf_uuid, so->last_uuid, sizeof(so->last_uuid)) == 0)) {
1653 retval = true;
1654 } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EUUID) != 0) &&
1655 (memcmp(filter->npf_uuid, (so->so_flags & SOF_DELEGATED)? so->e_uuid : so->last_uuid,
1656 sizeof(so->last_uuid)) == 0)) {
1657 retval = true;
1658 }
1659 }
1660 }
1661 }
1662 return retval;
1663 }
1664
1665 static bool
1666 nstat_tcp_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter)
1667 {
1668 return nstat_tcpudp_reporting_allowed(cookie, filter, FALSE);
1669 }
1670
1671 static void
1672 nstat_init_tcp_provider(void)
1673 {
1674 bzero(&nstat_tcp_provider, sizeof(nstat_tcp_provider));
1675 nstat_tcp_provider.nstat_descriptor_length = sizeof(nstat_tcp_descriptor);
1676 nstat_tcp_provider.nstat_provider_id = NSTAT_PROVIDER_TCP_KERNEL;
1677 nstat_tcp_provider.nstat_lookup = nstat_tcp_lookup;
1678 nstat_tcp_provider.nstat_gone = nstat_tcp_gone;
1679 nstat_tcp_provider.nstat_counts = nstat_tcp_counts;
1680 nstat_tcp_provider.nstat_release = nstat_tcp_release;
1681 nstat_tcp_provider.nstat_watcher_add = nstat_tcp_add_watcher;
1682 nstat_tcp_provider.nstat_watcher_remove = nstat_tcp_remove_watcher;
1683 nstat_tcp_provider.nstat_copy_descriptor = nstat_tcp_copy_descriptor;
1684 nstat_tcp_provider.nstat_reporting_allowed = nstat_tcp_reporting_allowed;
1685 nstat_tcp_provider.next = nstat_providers;
1686 nstat_providers = &nstat_tcp_provider;
1687 }
1688
1689 #pragma mark -- UDP Provider --
1690
1691 static nstat_provider nstat_udp_provider;
1692
1693 static errno_t
1694 nstat_udp_lookup(
1695 const void *data,
1696 u_int32_t length,
1697 nstat_provider_cookie_t *out_cookie)
1698 {
1699 return nstat_tcpudp_lookup(&udbinfo, data, length, out_cookie);
1700 }
1701
1702 static int
1703 nstat_udp_gone(
1704 nstat_provider_cookie_t cookie)
1705 {
1706 struct nstat_tucookie *tucookie =
1707 (struct nstat_tucookie *)cookie;
1708 struct inpcb *inp;
1709
1710 return (!(inp = tucookie->inp) ||
1711 inp->inp_state == INPCB_STATE_DEAD) ? 1 : 0;
1712 }
1713
1714 static errno_t
1715 nstat_udp_counts(
1716 nstat_provider_cookie_t cookie,
1717 struct nstat_counts *out_counts,
1718 int *out_gone)
1719 {
1720 struct nstat_tucookie *tucookie =
1721 (struct nstat_tucookie *)cookie;
1722
1723 if (out_gone) {
1724 *out_gone = 0;
1725 }
1726
1727 // if the pcb is in the dead state, we should stop using it
1728 if (nstat_udp_gone(cookie)) {
1729 if (out_gone) {
1730 *out_gone = 1;
1731 }
1732 if (!tucookie->inp) {
1733 return EINVAL;
1734 }
1735 }
1736 struct inpcb *inp = tucookie->inp;
1737
1738 atomic_get_64(out_counts->nstat_rxpackets, &inp->inp_stat->rxpackets);
1739 atomic_get_64(out_counts->nstat_rxbytes, &inp->inp_stat->rxbytes);
1740 atomic_get_64(out_counts->nstat_txpackets, &inp->inp_stat->txpackets);
1741 atomic_get_64(out_counts->nstat_txbytes, &inp->inp_stat->txbytes);
1742 atomic_get_64(out_counts->nstat_cell_rxbytes, &inp->inp_cstat->rxbytes);
1743 atomic_get_64(out_counts->nstat_cell_txbytes, &inp->inp_cstat->txbytes);
1744 atomic_get_64(out_counts->nstat_wifi_rxbytes, &inp->inp_wstat->rxbytes);
1745 atomic_get_64(out_counts->nstat_wifi_txbytes, &inp->inp_wstat->txbytes);
1746 atomic_get_64(out_counts->nstat_wired_rxbytes, &inp->inp_Wstat->rxbytes);
1747 atomic_get_64(out_counts->nstat_wired_txbytes, &inp->inp_Wstat->txbytes);
1748
1749 return 0;
1750 }
1751
1752 static void
1753 nstat_udp_release(
1754 nstat_provider_cookie_t cookie,
1755 int locked)
1756 {
1757 struct nstat_tucookie *tucookie =
1758 (struct nstat_tucookie *)cookie;
1759
1760 nstat_tucookie_release_internal(tucookie, locked);
1761 }
1762
1763 static errno_t
1764 nstat_udp_add_watcher(
1765 nstat_control_state *state,
1766 nstat_msg_add_all_srcs *req)
1767 {
1768 // There is a tricky issue around getting all UDP sockets added once
1769 // and only once. nstat_udp_new_pcb() is called prior to the new item
1770 // being placed on any lists where it might be found.
1771 // By locking the udpinfo.ipi_lock prior to marking the state as a watcher,
1772 // it should be impossible for a new socket to be added twice.
1773 // On the other hand, there is still a timing issue where a new socket
1774 // results in a call to nstat_udp_new_pcb() before this watcher
1775 // is instantiated and yet the socket doesn't make it into ipi_listhead
1776 // prior to the scan. <rdar://problem/30361716>
1777
1778 errno_t result;
1779
1780 lck_rw_lock_shared(udbinfo.ipi_lock);
1781 result = nstat_set_provider_filter(state, req);
1782
1783 if (result == 0) {
1784 struct inpcb *inp;
1785 struct nstat_tucookie *cookie;
1786
1787 OSIncrementAtomic(&nstat_udp_watchers);
1788
1789 // Add all current UDP inpcbs.
1790 LIST_FOREACH(inp, udbinfo.ipi_listhead, inp_list)
1791 {
1792 cookie = nstat_tucookie_alloc_ref(inp);
1793 if (cookie == NULL) {
1794 continue;
1795 }
1796 if (nstat_control_source_add(0, state, &nstat_udp_provider,
1797 cookie) != 0) {
1798 nstat_tucookie_release(cookie);
1799 break;
1800 }
1801 }
1802 }
1803
1804 lck_rw_done(udbinfo.ipi_lock);
1805
1806 return result;
1807 }
1808
1809 static void
1810 nstat_udp_remove_watcher(
1811 __unused nstat_control_state *state)
1812 {
1813 OSDecrementAtomic(&nstat_udp_watchers);
1814 }
1815
1816 __private_extern__ void
1817 nstat_udp_new_pcb(
1818 struct inpcb *inp)
1819 {
1820 struct nstat_tucookie *cookie;
1821
1822 inp->inp_start_timestamp = mach_continuous_time();
1823
1824 if (nstat_udp_watchers == 0) {
1825 return;
1826 }
1827
1828 socket_lock(inp->inp_socket, 0);
1829 lck_mtx_lock(&nstat_mtx);
1830 nstat_control_state *state;
1831 for (state = nstat_controls; state; state = state->ncs_next) {
1832 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_UDP_KERNEL)) != 0) {
1833 // this client is watching tcp
1834 // acquire a reference for it
1835 cookie = nstat_tucookie_alloc_ref_locked(inp);
1836 if (cookie == NULL) {
1837 continue;
1838 }
1839 // add the source, if that fails, release the reference
1840 if (nstat_control_source_add(0, state,
1841 &nstat_udp_provider, cookie) != 0) {
1842 nstat_tucookie_release_locked(cookie);
1843 break;
1844 }
1845 }
1846 }
1847 lck_mtx_unlock(&nstat_mtx);
1848 socket_unlock(inp->inp_socket, 0);
1849 }
1850
1851 static errno_t
1852 nstat_udp_copy_descriptor(
1853 nstat_provider_cookie_t cookie,
1854 void *data,
1855 u_int32_t len)
1856 {
1857 if (len < sizeof(nstat_udp_descriptor)) {
1858 return EINVAL;
1859 }
1860
1861 if (nstat_udp_gone(cookie)) {
1862 return EINVAL;
1863 }
1864
1865 struct nstat_tucookie *tucookie =
1866 (struct nstat_tucookie *)cookie;
1867 nstat_udp_descriptor *desc = (nstat_udp_descriptor*)data;
1868 struct inpcb *inp = tucookie->inp;
1869
1870 bzero(desc, sizeof(*desc));
1871
1872 if (tucookie->cached == false) {
1873 if (inp->inp_vflag & INP_IPV6) {
1874 in6_ip6_to_sockaddr(&inp->in6p_laddr, inp->inp_lport,
1875 &desc->local.v6, sizeof(desc->local.v6));
1876 in6_ip6_to_sockaddr(&inp->in6p_faddr, inp->inp_fport,
1877 &desc->remote.v6, sizeof(desc->remote.v6));
1878 } else if (inp->inp_vflag & INP_IPV4) {
1879 nstat_ip_to_sockaddr(&inp->inp_laddr, inp->inp_lport,
1880 &desc->local.v4, sizeof(desc->local.v4));
1881 nstat_ip_to_sockaddr(&inp->inp_faddr, inp->inp_fport,
1882 &desc->remote.v4, sizeof(desc->remote.v4));
1883 }
1884 desc->ifnet_properties = nstat_inpcb_to_flags(inp);
1885 } else {
1886 if (inp->inp_vflag & INP_IPV6) {
1887 memcpy(&desc->local.v6, &tucookie->local.v6,
1888 sizeof(desc->local.v6));
1889 memcpy(&desc->remote.v6, &tucookie->remote.v6,
1890 sizeof(desc->remote.v6));
1891 } else if (inp->inp_vflag & INP_IPV4) {
1892 memcpy(&desc->local.v4, &tucookie->local.v4,
1893 sizeof(desc->local.v4));
1894 memcpy(&desc->remote.v4, &tucookie->remote.v4,
1895 sizeof(desc->remote.v4));
1896 }
1897 desc->ifnet_properties = tucookie->ifnet_properties;
1898 }
1899
1900 if (inp->inp_last_outifp) {
1901 desc->ifindex = inp->inp_last_outifp->if_index;
1902 } else {
1903 desc->ifindex = tucookie->if_index;
1904 }
1905
1906 struct socket *so = inp->inp_socket;
1907 if (so) {
1908 // TBD - take the socket lock around these to make sure
1909 // they're in sync?
1910 desc->upid = so->last_upid;
1911 desc->pid = so->last_pid;
1912 proc_name(desc->pid, desc->pname, sizeof(desc->pname));
1913 if (desc->pname[0] == 0) {
1914 strlcpy(desc->pname, tucookie->pname,
1915 sizeof(desc->pname));
1916 } else {
1917 desc->pname[sizeof(desc->pname) - 1] = 0;
1918 strlcpy(tucookie->pname, desc->pname,
1919 sizeof(tucookie->pname));
1920 }
1921 memcpy(desc->uuid, so->last_uuid, sizeof(so->last_uuid));
1922 memcpy(desc->vuuid, so->so_vuuid, sizeof(so->so_vuuid));
1923 if (so->so_flags & SOF_DELEGATED) {
1924 desc->eupid = so->e_upid;
1925 desc->epid = so->e_pid;
1926 memcpy(desc->euuid, so->e_uuid, sizeof(so->e_uuid));
1927 } else {
1928 desc->eupid = desc->upid;
1929 desc->epid = desc->pid;
1930 memcpy(desc->euuid, desc->uuid, sizeof(desc->uuid));
1931 }
1932 desc->rcvbufsize = so->so_rcv.sb_hiwat;
1933 desc->rcvbufused = so->so_rcv.sb_cc;
1934 desc->traffic_class = so->so_traffic_class;
1935 inp_get_activity_bitmap(inp, &desc->activity_bitmap);
1936 desc->start_timestamp = inp->inp_start_timestamp;
1937 desc->timestamp = mach_continuous_time();
1938 }
1939
1940 return 0;
1941 }
1942
1943 static bool
1944 nstat_udp_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter)
1945 {
1946 return nstat_tcpudp_reporting_allowed(cookie, filter, TRUE);
1947 }
1948
1949
1950 static void
1951 nstat_init_udp_provider(void)
1952 {
1953 bzero(&nstat_udp_provider, sizeof(nstat_udp_provider));
1954 nstat_udp_provider.nstat_provider_id = NSTAT_PROVIDER_UDP_KERNEL;
1955 nstat_udp_provider.nstat_descriptor_length = sizeof(nstat_udp_descriptor);
1956 nstat_udp_provider.nstat_lookup = nstat_udp_lookup;
1957 nstat_udp_provider.nstat_gone = nstat_udp_gone;
1958 nstat_udp_provider.nstat_counts = nstat_udp_counts;
1959 nstat_udp_provider.nstat_watcher_add = nstat_udp_add_watcher;
1960 nstat_udp_provider.nstat_watcher_remove = nstat_udp_remove_watcher;
1961 nstat_udp_provider.nstat_copy_descriptor = nstat_udp_copy_descriptor;
1962 nstat_udp_provider.nstat_release = nstat_udp_release;
1963 nstat_udp_provider.nstat_reporting_allowed = nstat_udp_reporting_allowed;
1964 nstat_udp_provider.next = nstat_providers;
1965 nstat_providers = &nstat_udp_provider;
1966 }
1967
1968
1969
1970 #pragma mark -- ifnet Provider --
1971
1972 static nstat_provider nstat_ifnet_provider;
1973
1974 /*
1975 * We store a pointer to the ifnet and the original threshold
1976 * requested by the client.
1977 */
1978 struct nstat_ifnet_cookie {
1979 struct ifnet *ifp;
1980 uint64_t threshold;
1981 };
1982
1983 static errno_t
1984 nstat_ifnet_lookup(
1985 const void *data,
1986 u_int32_t length,
1987 nstat_provider_cookie_t *out_cookie)
1988 {
1989 const nstat_ifnet_add_param *param = (const nstat_ifnet_add_param *)data;
1990 struct ifnet *ifp;
1991 boolean_t changed = FALSE;
1992 nstat_control_state *state;
1993 nstat_src *src;
1994 struct nstat_ifnet_cookie *cookie;
1995
1996 if (length < sizeof(*param) || param->threshold < 1024 * 1024) {
1997 return EINVAL;
1998 }
1999 if (nstat_privcheck != 0) {
2000 errno_t result = priv_check_cred(kauth_cred_get(),
2001 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
2002 if (result != 0) {
2003 return result;
2004 }
2005 }
2006 cookie = OSMalloc(sizeof(*cookie), nstat_malloc_tag);
2007 if (cookie == NULL) {
2008 return ENOMEM;
2009 }
2010 bzero(cookie, sizeof(*cookie));
2011
2012 ifnet_head_lock_shared();
2013 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
2014 {
2015 ifnet_lock_exclusive(ifp);
2016 if (ifp->if_index == param->ifindex) {
2017 cookie->ifp = ifp;
2018 cookie->threshold = param->threshold;
2019 *out_cookie = cookie;
2020 if (!ifp->if_data_threshold ||
2021 ifp->if_data_threshold > param->threshold) {
2022 changed = TRUE;
2023 ifp->if_data_threshold = param->threshold;
2024 }
2025 ifnet_lock_done(ifp);
2026 ifnet_reference(ifp);
2027 break;
2028 }
2029 ifnet_lock_done(ifp);
2030 }
2031 ifnet_head_done();
2032
2033 /*
2034 * When we change the threshold to something smaller, we notify
2035 * all of our clients with a description message.
2036 * We won't send a message to the client we are currently serving
2037 * because it has no `ifnet source' yet.
2038 */
2039 if (changed) {
2040 lck_mtx_lock(&nstat_mtx);
2041 for (state = nstat_controls; state; state = state->ncs_next) {
2042 lck_mtx_lock(&state->ncs_mtx);
2043 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
2044 {
2045 if (src->provider != &nstat_ifnet_provider) {
2046 continue;
2047 }
2048 nstat_control_send_description(state, src, 0, 0);
2049 }
2050 lck_mtx_unlock(&state->ncs_mtx);
2051 }
2052 lck_mtx_unlock(&nstat_mtx);
2053 }
2054 if (cookie->ifp == NULL) {
2055 OSFree(cookie, sizeof(*cookie), nstat_malloc_tag);
2056 }
2057
2058 return ifp ? 0 : EINVAL;
2059 }
2060
2061 static int
2062 nstat_ifnet_gone(
2063 nstat_provider_cookie_t cookie)
2064 {
2065 struct ifnet *ifp;
2066 struct nstat_ifnet_cookie *ifcookie =
2067 (struct nstat_ifnet_cookie *)cookie;
2068
2069 ifnet_head_lock_shared();
2070 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
2071 {
2072 if (ifp == ifcookie->ifp) {
2073 break;
2074 }
2075 }
2076 ifnet_head_done();
2077
2078 return ifp ? 0 : 1;
2079 }
2080
2081 static errno_t
2082 nstat_ifnet_counts(
2083 nstat_provider_cookie_t cookie,
2084 struct nstat_counts *out_counts,
2085 int *out_gone)
2086 {
2087 struct nstat_ifnet_cookie *ifcookie =
2088 (struct nstat_ifnet_cookie *)cookie;
2089 struct ifnet *ifp = ifcookie->ifp;
2090
2091 if (out_gone) {
2092 *out_gone = 0;
2093 }
2094
2095 // if the ifnet is gone, we should stop using it
2096 if (nstat_ifnet_gone(cookie)) {
2097 if (out_gone) {
2098 *out_gone = 1;
2099 }
2100 return EINVAL;
2101 }
2102
2103 bzero(out_counts, sizeof(*out_counts));
2104 out_counts->nstat_rxpackets = ifp->if_ipackets;
2105 out_counts->nstat_rxbytes = ifp->if_ibytes;
2106 out_counts->nstat_txpackets = ifp->if_opackets;
2107 out_counts->nstat_txbytes = ifp->if_obytes;
2108 out_counts->nstat_cell_rxbytes = out_counts->nstat_cell_txbytes = 0;
2109 return 0;
2110 }
2111
2112 static void
2113 nstat_ifnet_release(
2114 nstat_provider_cookie_t cookie,
2115 __unused int locked)
2116 {
2117 struct nstat_ifnet_cookie *ifcookie;
2118 struct ifnet *ifp;
2119 nstat_control_state *state;
2120 nstat_src *src;
2121 uint64_t minthreshold = UINT64_MAX;
2122
2123 /*
2124 * Find all the clients that requested a threshold
2125 * for this ifnet and re-calculate if_data_threshold.
2126 */
2127 lck_mtx_lock(&nstat_mtx);
2128 for (state = nstat_controls; state; state = state->ncs_next) {
2129 lck_mtx_lock(&state->ncs_mtx);
2130 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
2131 {
2132 /* Skip the provider we are about to detach. */
2133 if (src->provider != &nstat_ifnet_provider ||
2134 src->cookie == cookie) {
2135 continue;
2136 }
2137 ifcookie = (struct nstat_ifnet_cookie *)src->cookie;
2138 if (ifcookie->threshold < minthreshold) {
2139 minthreshold = ifcookie->threshold;
2140 }
2141 }
2142 lck_mtx_unlock(&state->ncs_mtx);
2143 }
2144 lck_mtx_unlock(&nstat_mtx);
2145 /*
2146 * Reset if_data_threshold or disable it.
2147 */
2148 ifcookie = (struct nstat_ifnet_cookie *)cookie;
2149 ifp = ifcookie->ifp;
2150 if (ifnet_is_attached(ifp, 1)) {
2151 ifnet_lock_exclusive(ifp);
2152 if (minthreshold == UINT64_MAX) {
2153 ifp->if_data_threshold = 0;
2154 } else {
2155 ifp->if_data_threshold = minthreshold;
2156 }
2157 ifnet_lock_done(ifp);
2158 ifnet_decr_iorefcnt(ifp);
2159 }
2160 ifnet_release(ifp);
2161 OSFree(ifcookie, sizeof(*ifcookie), nstat_malloc_tag);
2162 }
2163
2164 static void
2165 nstat_ifnet_copy_link_status(
2166 struct ifnet *ifp,
2167 struct nstat_ifnet_descriptor *desc)
2168 {
2169 struct if_link_status *ifsr = ifp->if_link_status;
2170 nstat_ifnet_desc_link_status *link_status = &desc->link_status;
2171
2172 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_NONE;
2173 if (ifsr == NULL) {
2174 return;
2175 }
2176
2177 lck_rw_lock_shared(&ifp->if_link_status_lock);
2178
2179 if (ifp->if_type == IFT_CELLULAR) {
2180 nstat_ifnet_desc_cellular_status *cell_status = &link_status->u.cellular;
2181 struct if_cellular_status_v1 *if_cell_sr =
2182 &ifsr->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
2183
2184 if (ifsr->ifsr_version != IF_CELLULAR_STATUS_REPORT_VERSION_1) {
2185 goto done;
2186 }
2187
2188 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_CELLULAR;
2189
2190 if (if_cell_sr->valid_bitmask & IF_CELL_LINK_QUALITY_METRIC_VALID) {
2191 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_LINK_QUALITY_METRIC_VALID;
2192 cell_status->link_quality_metric = if_cell_sr->link_quality_metric;
2193 }
2194 if (if_cell_sr->valid_bitmask & IF_CELL_UL_EFFECTIVE_BANDWIDTH_VALID) {
2195 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_BANDWIDTH_VALID;
2196 cell_status->ul_effective_bandwidth = if_cell_sr->ul_effective_bandwidth;
2197 }
2198 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_BANDWIDTH_VALID) {
2199 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_BANDWIDTH_VALID;
2200 cell_status->ul_max_bandwidth = if_cell_sr->ul_max_bandwidth;
2201 }
2202 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MIN_LATENCY_VALID) {
2203 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MIN_LATENCY_VALID;
2204 cell_status->ul_min_latency = if_cell_sr->ul_min_latency;
2205 }
2206 if (if_cell_sr->valid_bitmask & IF_CELL_UL_EFFECTIVE_LATENCY_VALID) {
2207 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_LATENCY_VALID;
2208 cell_status->ul_effective_latency = if_cell_sr->ul_effective_latency;
2209 }
2210 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_LATENCY_VALID) {
2211 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_LATENCY_VALID;
2212 cell_status->ul_max_latency = if_cell_sr->ul_max_latency;
2213 }
2214 if (if_cell_sr->valid_bitmask & IF_CELL_UL_RETXT_LEVEL_VALID) {
2215 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID;
2216 if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_NONE) {
2217 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_NONE;
2218 } else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_LOW) {
2219 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_LOW;
2220 } else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_MEDIUM) {
2221 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_MEDIUM;
2222 } else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_HIGH) {
2223 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_HIGH;
2224 } else {
2225 cell_status->valid_bitmask &= ~NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID;
2226 }
2227 }
2228 if (if_cell_sr->valid_bitmask & IF_CELL_UL_BYTES_LOST_VALID) {
2229 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_BYTES_LOST_VALID;
2230 cell_status->ul_bytes_lost = if_cell_sr->ul_bytes_lost;
2231 }
2232 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MIN_QUEUE_SIZE_VALID) {
2233 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MIN_QUEUE_SIZE_VALID;
2234 cell_status->ul_min_queue_size = if_cell_sr->ul_min_queue_size;
2235 }
2236 if (if_cell_sr->valid_bitmask & IF_CELL_UL_AVG_QUEUE_SIZE_VALID) {
2237 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_AVG_QUEUE_SIZE_VALID;
2238 cell_status->ul_avg_queue_size = if_cell_sr->ul_avg_queue_size;
2239 }
2240 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_QUEUE_SIZE_VALID) {
2241 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_QUEUE_SIZE_VALID;
2242 cell_status->ul_max_queue_size = if_cell_sr->ul_max_queue_size;
2243 }
2244 if (if_cell_sr->valid_bitmask & IF_CELL_DL_EFFECTIVE_BANDWIDTH_VALID) {
2245 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_DL_EFFECTIVE_BANDWIDTH_VALID;
2246 cell_status->dl_effective_bandwidth = if_cell_sr->dl_effective_bandwidth;
2247 }
2248 if (if_cell_sr->valid_bitmask & IF_CELL_DL_MAX_BANDWIDTH_VALID) {
2249 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_DL_MAX_BANDWIDTH_VALID;
2250 cell_status->dl_max_bandwidth = if_cell_sr->dl_max_bandwidth;
2251 }
2252 if (if_cell_sr->valid_bitmask & IF_CELL_CONFIG_INACTIVITY_TIME_VALID) {
2253 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_CONFIG_INACTIVITY_TIME_VALID;
2254 cell_status->config_inactivity_time = if_cell_sr->config_inactivity_time;
2255 }
2256 if (if_cell_sr->valid_bitmask & IF_CELL_CONFIG_BACKOFF_TIME_VALID) {
2257 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_CONFIG_BACKOFF_TIME_VALID;
2258 cell_status->config_backoff_time = if_cell_sr->config_backoff_time;
2259 }
2260 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MSS_RECOMMENDED_VALID) {
2261 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_MSS_RECOMMENDED_VALID;
2262 cell_status->mss_recommended = if_cell_sr->mss_recommended;
2263 }
2264 } else if (IFNET_IS_WIFI(ifp)) {
2265 nstat_ifnet_desc_wifi_status *wifi_status = &link_status->u.wifi;
2266 struct if_wifi_status_v1 *if_wifi_sr =
2267 &ifsr->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
2268
2269 if (ifsr->ifsr_version != IF_WIFI_STATUS_REPORT_VERSION_1) {
2270 goto done;
2271 }
2272
2273 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_WIFI;
2274
2275 if (if_wifi_sr->valid_bitmask & IF_WIFI_LINK_QUALITY_METRIC_VALID) {
2276 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_LINK_QUALITY_METRIC_VALID;
2277 wifi_status->link_quality_metric = if_wifi_sr->link_quality_metric;
2278 }
2279 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID) {
2280 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID;
2281 wifi_status->ul_effective_bandwidth = if_wifi_sr->ul_effective_bandwidth;
2282 }
2283 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MAX_BANDWIDTH_VALID) {
2284 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MAX_BANDWIDTH_VALID;
2285 wifi_status->ul_max_bandwidth = if_wifi_sr->ul_max_bandwidth;
2286 }
2287 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MIN_LATENCY_VALID) {
2288 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MIN_LATENCY_VALID;
2289 wifi_status->ul_min_latency = if_wifi_sr->ul_min_latency;
2290 }
2291 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_EFFECTIVE_LATENCY_VALID) {
2292 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_LATENCY_VALID;
2293 wifi_status->ul_effective_latency = if_wifi_sr->ul_effective_latency;
2294 }
2295 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MAX_LATENCY_VALID) {
2296 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MAX_LATENCY_VALID;
2297 wifi_status->ul_max_latency = if_wifi_sr->ul_max_latency;
2298 }
2299 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_RETXT_LEVEL_VALID) {
2300 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID;
2301 if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_NONE) {
2302 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_NONE;
2303 } else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_LOW) {
2304 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_LOW;
2305 } else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_MEDIUM) {
2306 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_MEDIUM;
2307 } else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_HIGH) {
2308 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_HIGH;
2309 } else {
2310 wifi_status->valid_bitmask &= ~NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID;
2311 }
2312 }
2313 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_BYTES_LOST_VALID) {
2314 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_BYTES_LOST_VALID;
2315 wifi_status->ul_bytes_lost = if_wifi_sr->ul_bytes_lost;
2316 }
2317 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_ERROR_RATE_VALID) {
2318 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_ERROR_RATE_VALID;
2319 wifi_status->ul_error_rate = if_wifi_sr->ul_error_rate;
2320 }
2321 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID) {
2322 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID;
2323 wifi_status->dl_effective_bandwidth = if_wifi_sr->dl_effective_bandwidth;
2324 }
2325 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MAX_BANDWIDTH_VALID) {
2326 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MAX_BANDWIDTH_VALID;
2327 wifi_status->dl_max_bandwidth = if_wifi_sr->dl_max_bandwidth;
2328 }
2329 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MIN_LATENCY_VALID) {
2330 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MIN_LATENCY_VALID;
2331 wifi_status->dl_min_latency = if_wifi_sr->dl_min_latency;
2332 }
2333 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_EFFECTIVE_LATENCY_VALID) {
2334 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_LATENCY_VALID;
2335 wifi_status->dl_effective_latency = if_wifi_sr->dl_effective_latency;
2336 }
2337 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MAX_LATENCY_VALID) {
2338 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MAX_LATENCY_VALID;
2339 wifi_status->dl_max_latency = if_wifi_sr->dl_max_latency;
2340 }
2341 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_ERROR_RATE_VALID) {
2342 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_ERROR_RATE_VALID;
2343 wifi_status->dl_error_rate = if_wifi_sr->dl_error_rate;
2344 }
2345 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_FREQUENCY_VALID) {
2346 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID;
2347 if (if_wifi_sr->config_frequency == IF_WIFI_CONFIG_FREQUENCY_2_4_GHZ) {
2348 wifi_status->config_frequency = NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_2_4_GHZ;
2349 } else if (if_wifi_sr->config_frequency == IF_WIFI_CONFIG_FREQUENCY_5_0_GHZ) {
2350 wifi_status->config_frequency = NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_5_0_GHZ;
2351 } else {
2352 wifi_status->valid_bitmask &= ~NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID;
2353 }
2354 }
2355 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_MULTICAST_RATE_VALID) {
2356 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_MULTICAST_RATE_VALID;
2357 wifi_status->config_multicast_rate = if_wifi_sr->config_multicast_rate;
2358 }
2359 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_SCAN_COUNT_VALID) {
2360 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_COUNT_VALID;
2361 wifi_status->scan_count = if_wifi_sr->scan_count;
2362 }
2363 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_SCAN_DURATION_VALID) {
2364 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_DURATION_VALID;
2365 wifi_status->scan_duration = if_wifi_sr->scan_duration;
2366 }
2367 }
2368
2369 done:
2370 lck_rw_done(&ifp->if_link_status_lock);
2371 }
2372
2373 static u_int64_t nstat_ifnet_last_report_time = 0;
2374 extern int tcp_report_stats_interval;
2375
2376 static void
2377 nstat_ifnet_compute_percentages(struct if_tcp_ecn_perf_stat *ifst)
2378 {
2379 /* Retransmit percentage */
2380 if (ifst->total_rxmitpkts > 0 && ifst->total_txpkts > 0) {
2381 /* shift by 10 for precision */
2382 ifst->rxmit_percent =
2383 ((ifst->total_rxmitpkts << 10) * 100) / ifst->total_txpkts;
2384 } else {
2385 ifst->rxmit_percent = 0;
2386 }
2387
2388 /* Out-of-order percentage */
2389 if (ifst->total_oopkts > 0 && ifst->total_rxpkts > 0) {
2390 /* shift by 10 for precision */
2391 ifst->oo_percent =
2392 ((ifst->total_oopkts << 10) * 100) / ifst->total_rxpkts;
2393 } else {
2394 ifst->oo_percent = 0;
2395 }
2396
2397 /* Reorder percentage */
2398 if (ifst->total_reorderpkts > 0 &&
2399 (ifst->total_txpkts + ifst->total_rxpkts) > 0) {
2400 /* shift by 10 for precision */
2401 ifst->reorder_percent =
2402 ((ifst->total_reorderpkts << 10) * 100) /
2403 (ifst->total_txpkts + ifst->total_rxpkts);
2404 } else {
2405 ifst->reorder_percent = 0;
2406 }
2407 }
2408
2409 static void
2410 nstat_ifnet_normalize_counter(struct if_tcp_ecn_stat *if_st)
2411 {
2412 u_int64_t ecn_on_conn, ecn_off_conn;
2413
2414 if (if_st == NULL) {
2415 return;
2416 }
2417 ecn_on_conn = if_st->ecn_client_success +
2418 if_st->ecn_server_success;
2419 ecn_off_conn = if_st->ecn_off_conn +
2420 (if_st->ecn_client_setup - if_st->ecn_client_success) +
2421 (if_st->ecn_server_setup - if_st->ecn_server_success);
2422
2423 /*
2424 * report sack episodes, rst_drop and rxmit_drop
2425 * as a ratio per connection, shift by 10 for precision
2426 */
2427 if (ecn_on_conn > 0) {
2428 if_st->ecn_on.sack_episodes =
2429 (if_st->ecn_on.sack_episodes << 10) / ecn_on_conn;
2430 if_st->ecn_on.rst_drop =
2431 (if_st->ecn_on.rst_drop << 10) * 100 / ecn_on_conn;
2432 if_st->ecn_on.rxmit_drop =
2433 (if_st->ecn_on.rxmit_drop << 10) * 100 / ecn_on_conn;
2434 } else {
2435 /* set to zero, just in case */
2436 if_st->ecn_on.sack_episodes = 0;
2437 if_st->ecn_on.rst_drop = 0;
2438 if_st->ecn_on.rxmit_drop = 0;
2439 }
2440
2441 if (ecn_off_conn > 0) {
2442 if_st->ecn_off.sack_episodes =
2443 (if_st->ecn_off.sack_episodes << 10) / ecn_off_conn;
2444 if_st->ecn_off.rst_drop =
2445 (if_st->ecn_off.rst_drop << 10) * 100 / ecn_off_conn;
2446 if_st->ecn_off.rxmit_drop =
2447 (if_st->ecn_off.rxmit_drop << 10) * 100 / ecn_off_conn;
2448 } else {
2449 if_st->ecn_off.sack_episodes = 0;
2450 if_st->ecn_off.rst_drop = 0;
2451 if_st->ecn_off.rxmit_drop = 0;
2452 }
2453 if_st->ecn_total_conn = ecn_off_conn + ecn_on_conn;
2454 }
2455
2456 static void
2457 nstat_ifnet_report_ecn_stats(void)
2458 {
2459 u_int64_t uptime, last_report_time;
2460 struct nstat_sysinfo_data data;
2461 struct nstat_sysinfo_ifnet_ecn_stats *st;
2462 struct ifnet *ifp;
2463
2464 uptime = net_uptime();
2465
2466 if ((int)(uptime - nstat_ifnet_last_report_time) <
2467 tcp_report_stats_interval) {
2468 return;
2469 }
2470
2471 last_report_time = nstat_ifnet_last_report_time;
2472 nstat_ifnet_last_report_time = uptime;
2473 data.flags = NSTAT_SYSINFO_IFNET_ECN_STATS;
2474 st = &data.u.ifnet_ecn_stats;
2475
2476 ifnet_head_lock_shared();
2477 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2478 if (ifp->if_ipv4_stat == NULL || ifp->if_ipv6_stat == NULL) {
2479 continue;
2480 }
2481
2482 if (!IF_FULLY_ATTACHED(ifp)) {
2483 continue;
2484 }
2485
2486 /* Limit reporting to Wifi, Ethernet and cellular. */
2487 if (!(IFNET_IS_ETHERNET(ifp) || IFNET_IS_CELLULAR(ifp))) {
2488 continue;
2489 }
2490
2491 bzero(st, sizeof(*st));
2492 if (IFNET_IS_CELLULAR(ifp)) {
2493 st->ifnet_type = NSTAT_IFNET_ECN_TYPE_CELLULAR;
2494 } else if (IFNET_IS_WIFI(ifp)) {
2495 st->ifnet_type = NSTAT_IFNET_ECN_TYPE_WIFI;
2496 } else {
2497 st->ifnet_type = NSTAT_IFNET_ECN_TYPE_ETHERNET;
2498 }
2499 data.unsent_data_cnt = ifp->if_unsent_data_cnt;
2500 /* skip if there was no update since last report */
2501 if (ifp->if_ipv4_stat->timestamp <= 0 ||
2502 ifp->if_ipv4_stat->timestamp < last_report_time) {
2503 goto v6;
2504 }
2505 st->ifnet_proto = NSTAT_IFNET_ECN_PROTO_IPV4;
2506 /* compute percentages using packet counts */
2507 nstat_ifnet_compute_percentages(&ifp->if_ipv4_stat->ecn_on);
2508 nstat_ifnet_compute_percentages(&ifp->if_ipv4_stat->ecn_off);
2509 nstat_ifnet_normalize_counter(ifp->if_ipv4_stat);
2510 bcopy(ifp->if_ipv4_stat, &st->ecn_stat,
2511 sizeof(st->ecn_stat));
2512 nstat_sysinfo_send_data(&data);
2513 bzero(ifp->if_ipv4_stat, sizeof(*ifp->if_ipv4_stat));
2514
2515 v6:
2516 /* skip if there was no update since last report */
2517 if (ifp->if_ipv6_stat->timestamp <= 0 ||
2518 ifp->if_ipv6_stat->timestamp < last_report_time) {
2519 continue;
2520 }
2521 st->ifnet_proto = NSTAT_IFNET_ECN_PROTO_IPV6;
2522
2523 /* compute percentages using packet counts */
2524 nstat_ifnet_compute_percentages(&ifp->if_ipv6_stat->ecn_on);
2525 nstat_ifnet_compute_percentages(&ifp->if_ipv6_stat->ecn_off);
2526 nstat_ifnet_normalize_counter(ifp->if_ipv6_stat);
2527 bcopy(ifp->if_ipv6_stat, &st->ecn_stat,
2528 sizeof(st->ecn_stat));
2529 nstat_sysinfo_send_data(&data);
2530
2531 /* Zero the stats in ifp */
2532 bzero(ifp->if_ipv6_stat, sizeof(*ifp->if_ipv6_stat));
2533 }
2534 ifnet_head_done();
2535 }
2536
2537 /* Some thresholds to determine Low Iternet mode */
2538 #define NSTAT_LIM_DL_MAX_BANDWIDTH_THRESHOLD 1000000 /* 1 Mbps */
2539 #define NSTAT_LIM_UL_MAX_BANDWIDTH_THRESHOLD 500000 /* 500 Kbps */
2540 #define NSTAT_LIM_UL_MIN_RTT_THRESHOLD 1000 /* 1 second */
2541 #define NSTAT_LIM_CONN_TIMEOUT_PERCENT_THRESHOLD (10 << 10) /* 10 percent connection timeouts */
2542 #define NSTAT_LIM_PACKET_LOSS_PERCENT_THRESHOLD (2 << 10) /* 2 percent packet loss rate */
2543
2544 static boolean_t
2545 nstat_lim_activity_check(struct if_lim_perf_stat *st)
2546 {
2547 /* check that the current activity is enough to report stats */
2548 if (st->lim_total_txpkts < nstat_lim_min_tx_pkts ||
2549 st->lim_total_rxpkts < nstat_lim_min_rx_pkts ||
2550 st->lim_conn_attempts == 0) {
2551 return FALSE;
2552 }
2553
2554 /*
2555 * Compute percentages if there was enough activity. Use
2556 * shift-left by 10 to preserve precision.
2557 */
2558 st->lim_packet_loss_percent = ((st->lim_total_retxpkts << 10) /
2559 st->lim_total_txpkts) * 100;
2560
2561 st->lim_packet_ooo_percent = ((st->lim_total_oopkts << 10) /
2562 st->lim_total_rxpkts) * 100;
2563
2564 st->lim_conn_timeout_percent = ((st->lim_conn_timeouts << 10) /
2565 st->lim_conn_attempts) * 100;
2566
2567 /*
2568 * Is Low Internet detected? First order metrics are bandwidth
2569 * and RTT. If these metrics are below the minimum thresholds
2570 * defined then the network attachment can be classified as
2571 * having Low Internet capacity.
2572 *
2573 * High connection timeout rate also indicates Low Internet
2574 * capacity.
2575 */
2576 if (st->lim_dl_max_bandwidth > 0 &&
2577 st->lim_dl_max_bandwidth <= NSTAT_LIM_DL_MAX_BANDWIDTH_THRESHOLD) {
2578 st->lim_dl_detected = 1;
2579 }
2580
2581 if ((st->lim_ul_max_bandwidth > 0 &&
2582 st->lim_ul_max_bandwidth <= NSTAT_LIM_UL_MAX_BANDWIDTH_THRESHOLD) ||
2583 st->lim_rtt_min >= NSTAT_LIM_UL_MIN_RTT_THRESHOLD) {
2584 st->lim_ul_detected = 1;
2585 }
2586
2587 if (st->lim_conn_attempts > 20 &&
2588 st->lim_conn_timeout_percent >=
2589 NSTAT_LIM_CONN_TIMEOUT_PERCENT_THRESHOLD) {
2590 st->lim_ul_detected = 1;
2591 }
2592 /*
2593 * Second order metrics: If there was high packet loss even after
2594 * using delay based algorithms then we classify it as Low Internet
2595 * again
2596 */
2597 if (st->lim_bk_txpkts >= nstat_lim_min_tx_pkts &&
2598 st->lim_packet_loss_percent >=
2599 NSTAT_LIM_PACKET_LOSS_PERCENT_THRESHOLD) {
2600 st->lim_ul_detected = 1;
2601 }
2602 return TRUE;
2603 }
2604
2605 static u_int64_t nstat_lim_last_report_time = 0;
2606 static void
2607 nstat_ifnet_report_lim_stats(void)
2608 {
2609 u_int64_t uptime;
2610 struct nstat_sysinfo_data data;
2611 struct nstat_sysinfo_lim_stats *st;
2612 struct ifnet *ifp;
2613 int err;
2614
2615 uptime = net_uptime();
2616
2617 if ((u_int32_t)(uptime - nstat_lim_last_report_time) <
2618 nstat_lim_interval) {
2619 return;
2620 }
2621
2622 nstat_lim_last_report_time = uptime;
2623 data.flags = NSTAT_SYSINFO_LIM_STATS;
2624 st = &data.u.lim_stats;
2625 data.unsent_data_cnt = 0;
2626
2627 ifnet_head_lock_shared();
2628 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2629 if (!IF_FULLY_ATTACHED(ifp)) {
2630 continue;
2631 }
2632
2633 /* Limit reporting to Wifi, Ethernet and cellular */
2634 if (!(IFNET_IS_ETHERNET(ifp) || IFNET_IS_CELLULAR(ifp))) {
2635 continue;
2636 }
2637
2638 if (!nstat_lim_activity_check(&ifp->if_lim_stat)) {
2639 continue;
2640 }
2641
2642 bzero(st, sizeof(*st));
2643 st->ifnet_siglen = sizeof(st->ifnet_signature);
2644 err = ifnet_get_netsignature(ifp, AF_INET,
2645 (u_int8_t *)&st->ifnet_siglen, NULL,
2646 st->ifnet_signature);
2647 if (err != 0) {
2648 err = ifnet_get_netsignature(ifp, AF_INET6,
2649 (u_int8_t *)&st->ifnet_siglen, NULL,
2650 st->ifnet_signature);
2651 if (err != 0) {
2652 continue;
2653 }
2654 }
2655 ifnet_lock_shared(ifp);
2656 if (IFNET_IS_CELLULAR(ifp)) {
2657 st->ifnet_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_CELLULAR;
2658 } else if (IFNET_IS_WIFI(ifp)) {
2659 st->ifnet_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_WIFI;
2660 } else {
2661 st->ifnet_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_ETHERNET;
2662 }
2663 bcopy(&ifp->if_lim_stat, &st->lim_stat,
2664 sizeof(st->lim_stat));
2665
2666 /* Zero the stats in ifp */
2667 bzero(&ifp->if_lim_stat, sizeof(ifp->if_lim_stat));
2668 ifnet_lock_done(ifp);
2669 nstat_sysinfo_send_data(&data);
2670 }
2671 ifnet_head_done();
2672 }
2673
2674 static errno_t
2675 nstat_ifnet_copy_descriptor(
2676 nstat_provider_cookie_t cookie,
2677 void *data,
2678 u_int32_t len)
2679 {
2680 nstat_ifnet_descriptor *desc = (nstat_ifnet_descriptor *)data;
2681 struct nstat_ifnet_cookie *ifcookie =
2682 (struct nstat_ifnet_cookie *)cookie;
2683 struct ifnet *ifp = ifcookie->ifp;
2684
2685 if (len < sizeof(nstat_ifnet_descriptor)) {
2686 return EINVAL;
2687 }
2688
2689 if (nstat_ifnet_gone(cookie)) {
2690 return EINVAL;
2691 }
2692
2693 bzero(desc, sizeof(*desc));
2694 ifnet_lock_shared(ifp);
2695 strlcpy(desc->name, ifp->if_xname, sizeof(desc->name));
2696 desc->ifindex = ifp->if_index;
2697 desc->threshold = ifp->if_data_threshold;
2698 desc->type = ifp->if_type;
2699 if (ifp->if_desc.ifd_len < sizeof(desc->description)) {
2700 memcpy(desc->description, ifp->if_desc.ifd_desc,
2701 sizeof(desc->description));
2702 }
2703 nstat_ifnet_copy_link_status(ifp, desc);
2704 ifnet_lock_done(ifp);
2705 return 0;
2706 }
2707
2708 static void
2709 nstat_init_ifnet_provider(void)
2710 {
2711 bzero(&nstat_ifnet_provider, sizeof(nstat_ifnet_provider));
2712 nstat_ifnet_provider.nstat_provider_id = NSTAT_PROVIDER_IFNET;
2713 nstat_ifnet_provider.nstat_descriptor_length = sizeof(nstat_ifnet_descriptor);
2714 nstat_ifnet_provider.nstat_lookup = nstat_ifnet_lookup;
2715 nstat_ifnet_provider.nstat_gone = nstat_ifnet_gone;
2716 nstat_ifnet_provider.nstat_counts = nstat_ifnet_counts;
2717 nstat_ifnet_provider.nstat_watcher_add = NULL;
2718 nstat_ifnet_provider.nstat_watcher_remove = NULL;
2719 nstat_ifnet_provider.nstat_copy_descriptor = nstat_ifnet_copy_descriptor;
2720 nstat_ifnet_provider.nstat_release = nstat_ifnet_release;
2721 nstat_ifnet_provider.next = nstat_providers;
2722 nstat_providers = &nstat_ifnet_provider;
2723 }
2724
2725 __private_extern__ void
2726 nstat_ifnet_threshold_reached(unsigned int ifindex)
2727 {
2728 nstat_control_state *state;
2729 nstat_src *src;
2730 struct ifnet *ifp;
2731 struct nstat_ifnet_cookie *ifcookie;
2732
2733 lck_mtx_lock(&nstat_mtx);
2734 for (state = nstat_controls; state; state = state->ncs_next) {
2735 lck_mtx_lock(&state->ncs_mtx);
2736 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
2737 {
2738 if (src->provider != &nstat_ifnet_provider) {
2739 continue;
2740 }
2741 ifcookie = (struct nstat_ifnet_cookie *)src->cookie;
2742 ifp = ifcookie->ifp;
2743 if (ifp->if_index != ifindex) {
2744 continue;
2745 }
2746 nstat_control_send_counts(state, src, 0, 0, NULL);
2747 }
2748 lck_mtx_unlock(&state->ncs_mtx);
2749 }
2750 lck_mtx_unlock(&nstat_mtx);
2751 }
2752
2753 #pragma mark -- Sysinfo --
2754 static void
2755 nstat_set_keyval_scalar(nstat_sysinfo_keyval *kv, int key, u_int32_t val)
2756 {
2757 kv->nstat_sysinfo_key = key;
2758 kv->nstat_sysinfo_flags = NSTAT_SYSINFO_FLAG_SCALAR;
2759 kv->u.nstat_sysinfo_scalar = val;
2760 kv->nstat_sysinfo_valsize = sizeof(kv->u.nstat_sysinfo_scalar);
2761 }
2762
2763 static void
2764 nstat_set_keyval_string(nstat_sysinfo_keyval *kv, int key, u_int8_t *buf,
2765 u_int32_t len)
2766 {
2767 kv->nstat_sysinfo_key = key;
2768 kv->nstat_sysinfo_flags = NSTAT_SYSINFO_FLAG_STRING;
2769 kv->nstat_sysinfo_valsize = min(len,
2770 NSTAT_SYSINFO_KEYVAL_STRING_MAXSIZE);
2771 bcopy(buf, kv->u.nstat_sysinfo_string, kv->nstat_sysinfo_valsize);
2772 }
2773
2774 static void
2775 nstat_sysinfo_send_data_internal(
2776 nstat_control_state *control,
2777 nstat_sysinfo_data *data)
2778 {
2779 nstat_msg_sysinfo_counts *syscnt = NULL;
2780 size_t allocsize = 0, countsize = 0, nkeyvals = 0, finalsize = 0;
2781 nstat_sysinfo_keyval *kv;
2782 errno_t result = 0;
2783 size_t i = 0;
2784
2785 allocsize = offsetof(nstat_msg_sysinfo_counts, counts);
2786 countsize = offsetof(nstat_sysinfo_counts, nstat_sysinfo_keyvals);
2787 finalsize = allocsize;
2788
2789 /* get number of key-vals for each kind of stat */
2790 switch (data->flags) {
2791 case NSTAT_SYSINFO_MBUF_STATS:
2792 nkeyvals = sizeof(struct nstat_sysinfo_mbuf_stats) /
2793 sizeof(u_int32_t);
2794 break;
2795 case NSTAT_SYSINFO_TCP_STATS:
2796 nkeyvals = NSTAT_SYSINFO_TCP_STATS_COUNT;
2797 break;
2798 case NSTAT_SYSINFO_IFNET_ECN_STATS:
2799 nkeyvals = (sizeof(struct if_tcp_ecn_stat) /
2800 sizeof(u_int64_t));
2801
2802 /* Two more keys for ifnet type and proto */
2803 nkeyvals += 2;
2804
2805 /* One key for unsent data. */
2806 nkeyvals++;
2807 break;
2808 case NSTAT_SYSINFO_LIM_STATS:
2809 nkeyvals = NSTAT_LIM_STAT_KEYVAL_COUNT;
2810 break;
2811 case NSTAT_SYSINFO_NET_API_STATS:
2812 nkeyvals = NSTAT_NET_API_STAT_KEYVAL_COUNT;
2813 break;
2814 default:
2815 return;
2816 }
2817 countsize += sizeof(nstat_sysinfo_keyval) * nkeyvals;
2818 allocsize += countsize;
2819
2820 syscnt = OSMalloc(allocsize, nstat_malloc_tag);
2821 if (syscnt == NULL) {
2822 return;
2823 }
2824 bzero(syscnt, allocsize);
2825
2826 kv = (nstat_sysinfo_keyval *) &syscnt->counts.nstat_sysinfo_keyvals;
2827 switch (data->flags) {
2828 case NSTAT_SYSINFO_MBUF_STATS:
2829 {
2830 nstat_set_keyval_scalar(&kv[i++],
2831 NSTAT_SYSINFO_KEY_MBUF_256B_TOTAL,
2832 data->u.mb_stats.total_256b);
2833 nstat_set_keyval_scalar(&kv[i++],
2834 NSTAT_SYSINFO_KEY_MBUF_2KB_TOTAL,
2835 data->u.mb_stats.total_2kb);
2836 nstat_set_keyval_scalar(&kv[i++],
2837 NSTAT_SYSINFO_KEY_MBUF_4KB_TOTAL,
2838 data->u.mb_stats.total_4kb);
2839 nstat_set_keyval_scalar(&kv[i++],
2840 NSTAT_SYSINFO_MBUF_16KB_TOTAL,
2841 data->u.mb_stats.total_16kb);
2842 nstat_set_keyval_scalar(&kv[i++],
2843 NSTAT_SYSINFO_KEY_SOCK_MBCNT,
2844 data->u.mb_stats.sbmb_total);
2845 nstat_set_keyval_scalar(&kv[i++],
2846 NSTAT_SYSINFO_KEY_SOCK_ATMBLIMIT,
2847 data->u.mb_stats.sb_atmbuflimit);
2848 nstat_set_keyval_scalar(&kv[i++],
2849 NSTAT_SYSINFO_MBUF_DRAIN_CNT,
2850 data->u.mb_stats.draincnt);
2851 nstat_set_keyval_scalar(&kv[i++],
2852 NSTAT_SYSINFO_MBUF_MEM_RELEASED,
2853 data->u.mb_stats.memreleased);
2854 nstat_set_keyval_scalar(&kv[i++],
2855 NSTAT_SYSINFO_KEY_SOCK_MBFLOOR,
2856 data->u.mb_stats.sbmb_floor);
2857 VERIFY(i == nkeyvals);
2858 break;
2859 }
2860 case NSTAT_SYSINFO_TCP_STATS:
2861 {
2862 nstat_set_keyval_scalar(&kv[i++],
2863 NSTAT_SYSINFO_KEY_IPV4_AVGRTT,
2864 data->u.tcp_stats.ipv4_avgrtt);
2865 nstat_set_keyval_scalar(&kv[i++],
2866 NSTAT_SYSINFO_KEY_IPV6_AVGRTT,
2867 data->u.tcp_stats.ipv6_avgrtt);
2868 nstat_set_keyval_scalar(&kv[i++],
2869 NSTAT_SYSINFO_KEY_SEND_PLR,
2870 data->u.tcp_stats.send_plr);
2871 nstat_set_keyval_scalar(&kv[i++],
2872 NSTAT_SYSINFO_KEY_RECV_PLR,
2873 data->u.tcp_stats.recv_plr);
2874 nstat_set_keyval_scalar(&kv[i++],
2875 NSTAT_SYSINFO_KEY_SEND_TLRTO,
2876 data->u.tcp_stats.send_tlrto_rate);
2877 nstat_set_keyval_scalar(&kv[i++],
2878 NSTAT_SYSINFO_KEY_SEND_REORDERRATE,
2879 data->u.tcp_stats.send_reorder_rate);
2880 nstat_set_keyval_scalar(&kv[i++],
2881 NSTAT_SYSINFO_CONNECTION_ATTEMPTS,
2882 data->u.tcp_stats.connection_attempts);
2883 nstat_set_keyval_scalar(&kv[i++],
2884 NSTAT_SYSINFO_CONNECTION_ACCEPTS,
2885 data->u.tcp_stats.connection_accepts);
2886 nstat_set_keyval_scalar(&kv[i++],
2887 NSTAT_SYSINFO_ECN_CLIENT_ENABLED,
2888 data->u.tcp_stats.ecn_client_enabled);
2889 nstat_set_keyval_scalar(&kv[i++],
2890 NSTAT_SYSINFO_ECN_SERVER_ENABLED,
2891 data->u.tcp_stats.ecn_server_enabled);
2892 nstat_set_keyval_scalar(&kv[i++],
2893 NSTAT_SYSINFO_ECN_CLIENT_SETUP,
2894 data->u.tcp_stats.ecn_client_setup);
2895 nstat_set_keyval_scalar(&kv[i++],
2896 NSTAT_SYSINFO_ECN_SERVER_SETUP,
2897 data->u.tcp_stats.ecn_server_setup);
2898 nstat_set_keyval_scalar(&kv[i++],
2899 NSTAT_SYSINFO_ECN_CLIENT_SUCCESS,
2900 data->u.tcp_stats.ecn_client_success);
2901 nstat_set_keyval_scalar(&kv[i++],
2902 NSTAT_SYSINFO_ECN_SERVER_SUCCESS,
2903 data->u.tcp_stats.ecn_server_success);
2904 nstat_set_keyval_scalar(&kv[i++],
2905 NSTAT_SYSINFO_ECN_NOT_SUPPORTED,
2906 data->u.tcp_stats.ecn_not_supported);
2907 nstat_set_keyval_scalar(&kv[i++],
2908 NSTAT_SYSINFO_ECN_LOST_SYN,
2909 data->u.tcp_stats.ecn_lost_syn);
2910 nstat_set_keyval_scalar(&kv[i++],
2911 NSTAT_SYSINFO_ECN_LOST_SYNACK,
2912 data->u.tcp_stats.ecn_lost_synack);
2913 nstat_set_keyval_scalar(&kv[i++],
2914 NSTAT_SYSINFO_ECN_RECV_CE,
2915 data->u.tcp_stats.ecn_recv_ce);
2916 nstat_set_keyval_scalar(&kv[i++],
2917 NSTAT_SYSINFO_ECN_RECV_ECE,
2918 data->u.tcp_stats.ecn_recv_ece);
2919 nstat_set_keyval_scalar(&kv[i++],
2920 NSTAT_SYSINFO_ECN_SENT_ECE,
2921 data->u.tcp_stats.ecn_sent_ece);
2922 nstat_set_keyval_scalar(&kv[i++],
2923 NSTAT_SYSINFO_ECN_CONN_RECV_CE,
2924 data->u.tcp_stats.ecn_conn_recv_ce);
2925 nstat_set_keyval_scalar(&kv[i++],
2926 NSTAT_SYSINFO_ECN_CONN_RECV_ECE,
2927 data->u.tcp_stats.ecn_conn_recv_ece);
2928 nstat_set_keyval_scalar(&kv[i++],
2929 NSTAT_SYSINFO_ECN_CONN_PLNOCE,
2930 data->u.tcp_stats.ecn_conn_plnoce);
2931 nstat_set_keyval_scalar(&kv[i++],
2932 NSTAT_SYSINFO_ECN_CONN_PL_CE,
2933 data->u.tcp_stats.ecn_conn_pl_ce);
2934 nstat_set_keyval_scalar(&kv[i++],
2935 NSTAT_SYSINFO_ECN_CONN_NOPL_CE,
2936 data->u.tcp_stats.ecn_conn_nopl_ce);
2937 nstat_set_keyval_scalar(&kv[i++],
2938 NSTAT_SYSINFO_ECN_FALLBACK_SYNLOSS,
2939 data->u.tcp_stats.ecn_fallback_synloss);
2940 nstat_set_keyval_scalar(&kv[i++],
2941 NSTAT_SYSINFO_ECN_FALLBACK_REORDER,
2942 data->u.tcp_stats.ecn_fallback_reorder);
2943 nstat_set_keyval_scalar(&kv[i++],
2944 NSTAT_SYSINFO_ECN_FALLBACK_CE,
2945 data->u.tcp_stats.ecn_fallback_ce);
2946 nstat_set_keyval_scalar(&kv[i++],
2947 NSTAT_SYSINFO_TFO_SYN_DATA_RCV,
2948 data->u.tcp_stats.tfo_syn_data_rcv);
2949 nstat_set_keyval_scalar(&kv[i++],
2950 NSTAT_SYSINFO_TFO_COOKIE_REQ_RCV,
2951 data->u.tcp_stats.tfo_cookie_req_rcv);
2952 nstat_set_keyval_scalar(&kv[i++],
2953 NSTAT_SYSINFO_TFO_COOKIE_SENT,
2954 data->u.tcp_stats.tfo_cookie_sent);
2955 nstat_set_keyval_scalar(&kv[i++],
2956 NSTAT_SYSINFO_TFO_COOKIE_INVALID,
2957 data->u.tcp_stats.tfo_cookie_invalid);
2958 nstat_set_keyval_scalar(&kv[i++],
2959 NSTAT_SYSINFO_TFO_COOKIE_REQ,
2960 data->u.tcp_stats.tfo_cookie_req);
2961 nstat_set_keyval_scalar(&kv[i++],
2962 NSTAT_SYSINFO_TFO_COOKIE_RCV,
2963 data->u.tcp_stats.tfo_cookie_rcv);
2964 nstat_set_keyval_scalar(&kv[i++],
2965 NSTAT_SYSINFO_TFO_SYN_DATA_SENT,
2966 data->u.tcp_stats.tfo_syn_data_sent);
2967 nstat_set_keyval_scalar(&kv[i++],
2968 NSTAT_SYSINFO_TFO_SYN_DATA_ACKED,
2969 data->u.tcp_stats.tfo_syn_data_acked);
2970 nstat_set_keyval_scalar(&kv[i++],
2971 NSTAT_SYSINFO_TFO_SYN_LOSS,
2972 data->u.tcp_stats.tfo_syn_loss);
2973 nstat_set_keyval_scalar(&kv[i++],
2974 NSTAT_SYSINFO_TFO_BLACKHOLE,
2975 data->u.tcp_stats.tfo_blackhole);
2976 nstat_set_keyval_scalar(&kv[i++],
2977 NSTAT_SYSINFO_TFO_COOKIE_WRONG,
2978 data->u.tcp_stats.tfo_cookie_wrong);
2979 nstat_set_keyval_scalar(&kv[i++],
2980 NSTAT_SYSINFO_TFO_NO_COOKIE_RCV,
2981 data->u.tcp_stats.tfo_no_cookie_rcv);
2982 nstat_set_keyval_scalar(&kv[i++],
2983 NSTAT_SYSINFO_TFO_HEURISTICS_DISABLE,
2984 data->u.tcp_stats.tfo_heuristics_disable);
2985 nstat_set_keyval_scalar(&kv[i++],
2986 NSTAT_SYSINFO_TFO_SEND_BLACKHOLE,
2987 data->u.tcp_stats.tfo_sndblackhole);
2988 nstat_set_keyval_scalar(&kv[i++],
2989 NSTAT_SYSINFO_MPTCP_HANDOVER_ATTEMPT,
2990 data->u.tcp_stats.mptcp_handover_attempt);
2991 nstat_set_keyval_scalar(&kv[i++],
2992 NSTAT_SYSINFO_MPTCP_INTERACTIVE_ATTEMPT,
2993 data->u.tcp_stats.mptcp_interactive_attempt);
2994 nstat_set_keyval_scalar(&kv[i++],
2995 NSTAT_SYSINFO_MPTCP_AGGREGATE_ATTEMPT,
2996 data->u.tcp_stats.mptcp_aggregate_attempt);
2997 nstat_set_keyval_scalar(&kv[i++],
2998 NSTAT_SYSINFO_MPTCP_FP_HANDOVER_ATTEMPT,
2999 data->u.tcp_stats.mptcp_fp_handover_attempt);
3000 nstat_set_keyval_scalar(&kv[i++],
3001 NSTAT_SYSINFO_MPTCP_FP_INTERACTIVE_ATTEMPT,
3002 data->u.tcp_stats.mptcp_fp_interactive_attempt);
3003 nstat_set_keyval_scalar(&kv[i++],
3004 NSTAT_SYSINFO_MPTCP_FP_AGGREGATE_ATTEMPT,
3005 data->u.tcp_stats.mptcp_fp_aggregate_attempt);
3006 nstat_set_keyval_scalar(&kv[i++],
3007 NSTAT_SYSINFO_MPTCP_HEURISTIC_FALLBACK,
3008 data->u.tcp_stats.mptcp_heuristic_fallback);
3009 nstat_set_keyval_scalar(&kv[i++],
3010 NSTAT_SYSINFO_MPTCP_FP_HEURISTIC_FALLBACK,
3011 data->u.tcp_stats.mptcp_fp_heuristic_fallback);
3012 nstat_set_keyval_scalar(&kv[i++],
3013 NSTAT_SYSINFO_MPTCP_HANDOVER_SUCCESS_WIFI,
3014 data->u.tcp_stats.mptcp_handover_success_wifi);
3015 nstat_set_keyval_scalar(&kv[i++],
3016 NSTAT_SYSINFO_MPTCP_HANDOVER_SUCCESS_CELL,
3017 data->u.tcp_stats.mptcp_handover_success_cell);
3018 nstat_set_keyval_scalar(&kv[i++],
3019 NSTAT_SYSINFO_MPTCP_INTERACTIVE_SUCCESS,
3020 data->u.tcp_stats.mptcp_interactive_success);
3021 nstat_set_keyval_scalar(&kv[i++],
3022 NSTAT_SYSINFO_MPTCP_AGGREGATE_SUCCESS,
3023 data->u.tcp_stats.mptcp_aggregate_success);
3024 nstat_set_keyval_scalar(&kv[i++],
3025 NSTAT_SYSINFO_MPTCP_FP_HANDOVER_SUCCESS_WIFI,
3026 data->u.tcp_stats.mptcp_fp_handover_success_wifi);
3027 nstat_set_keyval_scalar(&kv[i++],
3028 NSTAT_SYSINFO_MPTCP_FP_HANDOVER_SUCCESS_CELL,
3029 data->u.tcp_stats.mptcp_fp_handover_success_cell);
3030 nstat_set_keyval_scalar(&kv[i++],
3031 NSTAT_SYSINFO_MPTCP_FP_INTERACTIVE_SUCCESS,
3032 data->u.tcp_stats.mptcp_fp_interactive_success);
3033 nstat_set_keyval_scalar(&kv[i++],
3034 NSTAT_SYSINFO_MPTCP_FP_AGGREGATE_SUCCESS,
3035 data->u.tcp_stats.mptcp_fp_aggregate_success);
3036 nstat_set_keyval_scalar(&kv[i++],
3037 NSTAT_SYSINFO_MPTCP_HANDOVER_CELL_FROM_WIFI,
3038 data->u.tcp_stats.mptcp_handover_cell_from_wifi);
3039 nstat_set_keyval_scalar(&kv[i++],
3040 NSTAT_SYSINFO_MPTCP_HANDOVER_WIFI_FROM_CELL,
3041 data->u.tcp_stats.mptcp_handover_wifi_from_cell);
3042 nstat_set_keyval_scalar(&kv[i++],
3043 NSTAT_SYSINFO_MPTCP_INTERACTIVE_CELL_FROM_WIFI,
3044 data->u.tcp_stats.mptcp_interactive_cell_from_wifi);
3045 nstat_set_keyval_scalar(&kv[i++],
3046 NSTAT_SYSINFO_MPTCP_HANDOVER_CELL_BYTES,
3047 data->u.tcp_stats.mptcp_handover_cell_bytes);
3048 nstat_set_keyval_scalar(&kv[i++],
3049 NSTAT_SYSINFO_MPTCP_INTERACTIVE_CELL_BYTES,
3050 data->u.tcp_stats.mptcp_interactive_cell_bytes);
3051 nstat_set_keyval_scalar(&kv[i++],
3052 NSTAT_SYSINFO_MPTCP_AGGREGATE_CELL_BYTES,
3053 data->u.tcp_stats.mptcp_aggregate_cell_bytes);
3054 nstat_set_keyval_scalar(&kv[i++],
3055 NSTAT_SYSINFO_MPTCP_HANDOVER_ALL_BYTES,
3056 data->u.tcp_stats.mptcp_handover_all_bytes);
3057 nstat_set_keyval_scalar(&kv[i++],
3058 NSTAT_SYSINFO_MPTCP_INTERACTIVE_ALL_BYTES,
3059 data->u.tcp_stats.mptcp_interactive_all_bytes);
3060 nstat_set_keyval_scalar(&kv[i++],
3061 NSTAT_SYSINFO_MPTCP_AGGREGATE_ALL_BYTES,
3062 data->u.tcp_stats.mptcp_aggregate_all_bytes);
3063 nstat_set_keyval_scalar(&kv[i++],
3064 NSTAT_SYSINFO_MPTCP_BACK_TO_WIFI,
3065 data->u.tcp_stats.mptcp_back_to_wifi);
3066 nstat_set_keyval_scalar(&kv[i++],
3067 NSTAT_SYSINFO_MPTCP_WIFI_PROXY,
3068 data->u.tcp_stats.mptcp_wifi_proxy);
3069 nstat_set_keyval_scalar(&kv[i++],
3070 NSTAT_SYSINFO_MPTCP_CELL_PROXY,
3071 data->u.tcp_stats.mptcp_cell_proxy);
3072 nstat_set_keyval_scalar(&kv[i++],
3073 NSTAT_SYSINFO_MPTCP_TRIGGERED_CELL,
3074 data->u.tcp_stats.mptcp_triggered_cell);
3075 VERIFY(i == nkeyvals);
3076 break;
3077 }
3078 case NSTAT_SYSINFO_IFNET_ECN_STATS:
3079 {
3080 nstat_set_keyval_scalar(&kv[i++],
3081 NSTAT_SYSINFO_ECN_IFNET_TYPE,
3082 data->u.ifnet_ecn_stats.ifnet_type);
3083 nstat_set_keyval_scalar(&kv[i++],
3084 NSTAT_SYSINFO_ECN_IFNET_PROTO,
3085 data->u.ifnet_ecn_stats.ifnet_proto);
3086 nstat_set_keyval_scalar(&kv[i++],
3087 NSTAT_SYSINFO_ECN_IFNET_CLIENT_SETUP,
3088 data->u.ifnet_ecn_stats.ecn_stat.ecn_client_setup);
3089 nstat_set_keyval_scalar(&kv[i++],
3090 NSTAT_SYSINFO_ECN_IFNET_SERVER_SETUP,
3091 data->u.ifnet_ecn_stats.ecn_stat.ecn_server_setup);
3092 nstat_set_keyval_scalar(&kv[i++],
3093 NSTAT_SYSINFO_ECN_IFNET_CLIENT_SUCCESS,
3094 data->u.ifnet_ecn_stats.ecn_stat.ecn_client_success);
3095 nstat_set_keyval_scalar(&kv[i++],
3096 NSTAT_SYSINFO_ECN_IFNET_SERVER_SUCCESS,
3097 data->u.ifnet_ecn_stats.ecn_stat.ecn_server_success);
3098 nstat_set_keyval_scalar(&kv[i++],
3099 NSTAT_SYSINFO_ECN_IFNET_PEER_NOSUPPORT,
3100 data->u.ifnet_ecn_stats.ecn_stat.ecn_peer_nosupport);
3101 nstat_set_keyval_scalar(&kv[i++],
3102 NSTAT_SYSINFO_ECN_IFNET_SYN_LOST,
3103 data->u.ifnet_ecn_stats.ecn_stat.ecn_syn_lost);
3104 nstat_set_keyval_scalar(&kv[i++],
3105 NSTAT_SYSINFO_ECN_IFNET_SYNACK_LOST,
3106 data->u.ifnet_ecn_stats.ecn_stat.ecn_synack_lost);
3107 nstat_set_keyval_scalar(&kv[i++],
3108 NSTAT_SYSINFO_ECN_IFNET_RECV_CE,
3109 data->u.ifnet_ecn_stats.ecn_stat.ecn_recv_ce);
3110 nstat_set_keyval_scalar(&kv[i++],
3111 NSTAT_SYSINFO_ECN_IFNET_RECV_ECE,
3112 data->u.ifnet_ecn_stats.ecn_stat.ecn_recv_ece);
3113 nstat_set_keyval_scalar(&kv[i++],
3114 NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_CE,
3115 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_recv_ce);
3116 nstat_set_keyval_scalar(&kv[i++],
3117 NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_ECE,
3118 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_recv_ece);
3119 nstat_set_keyval_scalar(&kv[i++],
3120 NSTAT_SYSINFO_ECN_IFNET_CONN_PLNOCE,
3121 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_plnoce);
3122 nstat_set_keyval_scalar(&kv[i++],
3123 NSTAT_SYSINFO_ECN_IFNET_CONN_PLCE,
3124 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_plce);
3125 nstat_set_keyval_scalar(&kv[i++],
3126 NSTAT_SYSINFO_ECN_IFNET_CONN_NOPLCE,
3127 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_noplce);
3128 nstat_set_keyval_scalar(&kv[i++],
3129 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNLOSS,
3130 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_synloss);
3131 nstat_set_keyval_scalar(&kv[i++],
3132 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_REORDER,
3133 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_reorder);
3134 nstat_set_keyval_scalar(&kv[i++],
3135 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_CE,
3136 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_ce);
3137 nstat_set_keyval_scalar(&kv[i++],
3138 NSTAT_SYSINFO_ECN_IFNET_ON_RTT_AVG,
3139 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rtt_avg);
3140 nstat_set_keyval_scalar(&kv[i++],
3141 NSTAT_SYSINFO_ECN_IFNET_ON_RTT_VAR,
3142 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rtt_var);
3143 nstat_set_keyval_scalar(&kv[i++],
3144 NSTAT_SYSINFO_ECN_IFNET_ON_OOPERCENT,
3145 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.oo_percent);
3146 nstat_set_keyval_scalar(&kv[i++],
3147 NSTAT_SYSINFO_ECN_IFNET_ON_SACK_EPISODE,
3148 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.sack_episodes);
3149 nstat_set_keyval_scalar(&kv[i++],
3150 NSTAT_SYSINFO_ECN_IFNET_ON_REORDER_PERCENT,
3151 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.reorder_percent);
3152 nstat_set_keyval_scalar(&kv[i++],
3153 NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_PERCENT,
3154 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rxmit_percent);
3155 nstat_set_keyval_scalar(&kv[i++],
3156 NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_DROP,
3157 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rxmit_drop);
3158 nstat_set_keyval_scalar(&kv[i++],
3159 NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_AVG,
3160 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rtt_avg);
3161 nstat_set_keyval_scalar(&kv[i++],
3162 NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_VAR,
3163 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rtt_var);
3164 nstat_set_keyval_scalar(&kv[i++],
3165 NSTAT_SYSINFO_ECN_IFNET_OFF_OOPERCENT,
3166 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.oo_percent);
3167 nstat_set_keyval_scalar(&kv[i++],
3168 NSTAT_SYSINFO_ECN_IFNET_OFF_SACK_EPISODE,
3169 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.sack_episodes);
3170 nstat_set_keyval_scalar(&kv[i++],
3171 NSTAT_SYSINFO_ECN_IFNET_OFF_REORDER_PERCENT,
3172 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.reorder_percent);
3173 nstat_set_keyval_scalar(&kv[i++],
3174 NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_PERCENT,
3175 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rxmit_percent);
3176 nstat_set_keyval_scalar(&kv[i++],
3177 NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_DROP,
3178 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rxmit_drop);
3179 nstat_set_keyval_scalar(&kv[i++],
3180 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_TXPKTS,
3181 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_txpkts);
3182 nstat_set_keyval_scalar(&kv[i++],
3183 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXMTPKTS,
3184 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_rxmitpkts);
3185 nstat_set_keyval_scalar(&kv[i++],
3186 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXPKTS,
3187 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_rxpkts);
3188 nstat_set_keyval_scalar(&kv[i++],
3189 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_OOPKTS,
3190 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_oopkts);
3191 nstat_set_keyval_scalar(&kv[i++],
3192 NSTAT_SYSINFO_ECN_IFNET_ON_DROP_RST,
3193 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rst_drop);
3194 nstat_set_keyval_scalar(&kv[i++],
3195 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_TXPKTS,
3196 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_txpkts);
3197 nstat_set_keyval_scalar(&kv[i++],
3198 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXMTPKTS,
3199 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_rxmitpkts);
3200 nstat_set_keyval_scalar(&kv[i++],
3201 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXPKTS,
3202 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_rxpkts);
3203 nstat_set_keyval_scalar(&kv[i++],
3204 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_OOPKTS,
3205 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_oopkts);
3206 nstat_set_keyval_scalar(&kv[i++],
3207 NSTAT_SYSINFO_ECN_IFNET_OFF_DROP_RST,
3208 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rst_drop);
3209 nstat_set_keyval_scalar(&kv[i++],
3210 NSTAT_SYSINFO_ECN_IFNET_TOTAL_CONN,
3211 data->u.ifnet_ecn_stats.ecn_stat.ecn_total_conn);
3212 nstat_set_keyval_scalar(&kv[i++],
3213 NSTAT_SYSINFO_IFNET_UNSENT_DATA,
3214 data->unsent_data_cnt);
3215 nstat_set_keyval_scalar(&kv[i++],
3216 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRST,
3217 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_droprst);
3218 nstat_set_keyval_scalar(&kv[i++],
3219 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRXMT,
3220 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_droprxmt);
3221 nstat_set_keyval_scalar(&kv[i++],
3222 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNRST,
3223 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_synrst);
3224 break;
3225 }
3226 case NSTAT_SYSINFO_LIM_STATS:
3227 {
3228 nstat_set_keyval_string(&kv[i++],
3229 NSTAT_SYSINFO_LIM_IFNET_SIGNATURE,
3230 data->u.lim_stats.ifnet_signature,
3231 data->u.lim_stats.ifnet_siglen);
3232 nstat_set_keyval_scalar(&kv[i++],
3233 NSTAT_SYSINFO_LIM_IFNET_DL_MAX_BANDWIDTH,
3234 data->u.lim_stats.lim_stat.lim_dl_max_bandwidth);
3235 nstat_set_keyval_scalar(&kv[i++],
3236 NSTAT_SYSINFO_LIM_IFNET_UL_MAX_BANDWIDTH,
3237 data->u.lim_stats.lim_stat.lim_ul_max_bandwidth);
3238 nstat_set_keyval_scalar(&kv[i++],
3239 NSTAT_SYSINFO_LIM_IFNET_PACKET_LOSS_PERCENT,
3240 data->u.lim_stats.lim_stat.lim_packet_loss_percent);
3241 nstat_set_keyval_scalar(&kv[i++],
3242 NSTAT_SYSINFO_LIM_IFNET_PACKET_OOO_PERCENT,
3243 data->u.lim_stats.lim_stat.lim_packet_ooo_percent);
3244 nstat_set_keyval_scalar(&kv[i++],
3245 NSTAT_SYSINFO_LIM_IFNET_RTT_VARIANCE,
3246 data->u.lim_stats.lim_stat.lim_rtt_variance);
3247 nstat_set_keyval_scalar(&kv[i++],
3248 NSTAT_SYSINFO_LIM_IFNET_RTT_MIN,
3249 data->u.lim_stats.lim_stat.lim_rtt_min);
3250 nstat_set_keyval_scalar(&kv[i++],
3251 NSTAT_SYSINFO_LIM_IFNET_RTT_AVG,
3252 data->u.lim_stats.lim_stat.lim_rtt_average);
3253 nstat_set_keyval_scalar(&kv[i++],
3254 NSTAT_SYSINFO_LIM_IFNET_CONN_TIMEOUT_PERCENT,
3255 data->u.lim_stats.lim_stat.lim_conn_timeout_percent);
3256 nstat_set_keyval_scalar(&kv[i++],
3257 NSTAT_SYSINFO_LIM_IFNET_DL_DETECTED,
3258 data->u.lim_stats.lim_stat.lim_dl_detected);
3259 nstat_set_keyval_scalar(&kv[i++],
3260 NSTAT_SYSINFO_LIM_IFNET_UL_DETECTED,
3261 data->u.lim_stats.lim_stat.lim_ul_detected);
3262 nstat_set_keyval_scalar(&kv[i++],
3263 NSTAT_SYSINFO_LIM_IFNET_TYPE,
3264 data->u.lim_stats.ifnet_type);
3265 break;
3266 }
3267 case NSTAT_SYSINFO_NET_API_STATS:
3268 {
3269 nstat_set_keyval_scalar(&kv[i++],
3270 NSTAT_SYSINFO_API_IF_FLTR_ATTACH,
3271 data->u.net_api_stats.net_api_stats.nas_iflt_attach_total);
3272 nstat_set_keyval_scalar(&kv[i++],
3273 NSTAT_SYSINFO_API_IF_FLTR_ATTACH_OS,
3274 data->u.net_api_stats.net_api_stats.nas_iflt_attach_os_total);
3275 nstat_set_keyval_scalar(&kv[i++],
3276 NSTAT_SYSINFO_API_IP_FLTR_ADD,
3277 data->u.net_api_stats.net_api_stats.nas_ipf_add_total);
3278 nstat_set_keyval_scalar(&kv[i++],
3279 NSTAT_SYSINFO_API_IP_FLTR_ADD_OS,
3280 data->u.net_api_stats.net_api_stats.nas_ipf_add_os_total);
3281 nstat_set_keyval_scalar(&kv[i++],
3282 NSTAT_SYSINFO_API_SOCK_FLTR_ATTACH,
3283 data->u.net_api_stats.net_api_stats.nas_sfltr_register_total);
3284 nstat_set_keyval_scalar(&kv[i++],
3285 NSTAT_SYSINFO_API_SOCK_FLTR_ATTACH_OS,
3286 data->u.net_api_stats.net_api_stats.nas_sfltr_register_os_total);
3287
3288
3289 nstat_set_keyval_scalar(&kv[i++],
3290 NSTAT_SYSINFO_API_SOCK_ALLOC_TOTAL,
3291 data->u.net_api_stats.net_api_stats.nas_socket_alloc_total);
3292 nstat_set_keyval_scalar(&kv[i++],
3293 NSTAT_SYSINFO_API_SOCK_ALLOC_KERNEL,
3294 data->u.net_api_stats.net_api_stats.nas_socket_in_kernel_total);
3295 nstat_set_keyval_scalar(&kv[i++],
3296 NSTAT_SYSINFO_API_SOCK_ALLOC_KERNEL_OS,
3297 data->u.net_api_stats.net_api_stats.nas_socket_in_kernel_os_total);
3298 nstat_set_keyval_scalar(&kv[i++],
3299 NSTAT_SYSINFO_API_SOCK_NECP_CLIENTUUID,
3300 data->u.net_api_stats.net_api_stats.nas_socket_necp_clientuuid_total);
3301
3302 nstat_set_keyval_scalar(&kv[i++],
3303 NSTAT_SYSINFO_API_SOCK_DOMAIN_LOCAL,
3304 data->u.net_api_stats.net_api_stats.nas_socket_domain_local_total);
3305 nstat_set_keyval_scalar(&kv[i++],
3306 NSTAT_SYSINFO_API_SOCK_DOMAIN_ROUTE,
3307 data->u.net_api_stats.net_api_stats.nas_socket_domain_route_total);
3308 nstat_set_keyval_scalar(&kv[i++],
3309 NSTAT_SYSINFO_API_SOCK_DOMAIN_INET,
3310 data->u.net_api_stats.net_api_stats.nas_socket_domain_inet_total);
3311 nstat_set_keyval_scalar(&kv[i++],
3312 NSTAT_SYSINFO_API_SOCK_DOMAIN_INET6,
3313 data->u.net_api_stats.net_api_stats.nas_socket_domain_inet6_total);
3314 nstat_set_keyval_scalar(&kv[i++],
3315 NSTAT_SYSINFO_API_SOCK_DOMAIN_SYSTEM,
3316 data->u.net_api_stats.net_api_stats.nas_socket_domain_system_total);
3317 nstat_set_keyval_scalar(&kv[i++],
3318 NSTAT_SYSINFO_API_SOCK_DOMAIN_MULTIPATH,
3319 data->u.net_api_stats.net_api_stats.nas_socket_domain_multipath_total);
3320 nstat_set_keyval_scalar(&kv[i++],
3321 NSTAT_SYSINFO_API_SOCK_DOMAIN_KEY,
3322 data->u.net_api_stats.net_api_stats.nas_socket_domain_key_total);
3323 nstat_set_keyval_scalar(&kv[i++],
3324 NSTAT_SYSINFO_API_SOCK_DOMAIN_NDRV,
3325 data->u.net_api_stats.net_api_stats.nas_socket_domain_ndrv_total);
3326 nstat_set_keyval_scalar(&kv[i++],
3327 NSTAT_SYSINFO_API_SOCK_DOMAIN_OTHER,
3328 data->u.net_api_stats.net_api_stats.nas_socket_domain_other_total);
3329
3330 nstat_set_keyval_scalar(&kv[i++],
3331 NSTAT_SYSINFO_API_SOCK_INET_STREAM,
3332 data->u.net_api_stats.net_api_stats.nas_socket_inet_stream_total);
3333 nstat_set_keyval_scalar(&kv[i++],
3334 NSTAT_SYSINFO_API_SOCK_INET_DGRAM,
3335 data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_total);
3336 nstat_set_keyval_scalar(&kv[i++],
3337 NSTAT_SYSINFO_API_SOCK_INET_DGRAM_CONNECTED,
3338 data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_connected);
3339 nstat_set_keyval_scalar(&kv[i++],
3340 NSTAT_SYSINFO_API_SOCK_INET_DGRAM_DNS,
3341 data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_dns);
3342 nstat_set_keyval_scalar(&kv[i++],
3343 NSTAT_SYSINFO_API_SOCK_INET_DGRAM_NO_DATA,
3344 data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_no_data);
3345
3346 nstat_set_keyval_scalar(&kv[i++],
3347 NSTAT_SYSINFO_API_SOCK_INET6_STREAM,
3348 data->u.net_api_stats.net_api_stats.nas_socket_inet6_stream_total);
3349 nstat_set_keyval_scalar(&kv[i++],
3350 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM,
3351 data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_total);
3352 nstat_set_keyval_scalar(&kv[i++],
3353 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_CONNECTED,
3354 data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_connected);
3355 nstat_set_keyval_scalar(&kv[i++],
3356 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_DNS,
3357 data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_dns);
3358 nstat_set_keyval_scalar(&kv[i++],
3359 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_NO_DATA,
3360 data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_no_data);
3361
3362 nstat_set_keyval_scalar(&kv[i++],
3363 NSTAT_SYSINFO_API_SOCK_INET_MCAST_JOIN,
3364 data->u.net_api_stats.net_api_stats.nas_socket_mcast_join_total);
3365 nstat_set_keyval_scalar(&kv[i++],
3366 NSTAT_SYSINFO_API_SOCK_INET_MCAST_JOIN_OS,
3367 data->u.net_api_stats.net_api_stats.nas_socket_mcast_join_os_total);
3368
3369 nstat_set_keyval_scalar(&kv[i++],
3370 NSTAT_SYSINFO_API_NEXUS_FLOW_INET_STREAM,
3371 data->u.net_api_stats.net_api_stats.nas_nx_flow_inet_stream_total);
3372 nstat_set_keyval_scalar(&kv[i++],
3373 NSTAT_SYSINFO_API_NEXUS_FLOW_INET_DATAGRAM,
3374 data->u.net_api_stats.net_api_stats.nas_nx_flow_inet_dgram_total);
3375
3376 nstat_set_keyval_scalar(&kv[i++],
3377 NSTAT_SYSINFO_API_NEXUS_FLOW_INET6_STREAM,
3378 data->u.net_api_stats.net_api_stats.nas_nx_flow_inet6_stream_total);
3379 nstat_set_keyval_scalar(&kv[i++],
3380 NSTAT_SYSINFO_API_NEXUS_FLOW_INET6_DATAGRAM,
3381 data->u.net_api_stats.net_api_stats.nas_nx_flow_inet6_dgram_total);
3382
3383 nstat_set_keyval_scalar(&kv[i++],
3384 NSTAT_SYSINFO_API_IFNET_ALLOC,
3385 data->u.net_api_stats.net_api_stats.nas_ifnet_alloc_total);
3386 nstat_set_keyval_scalar(&kv[i++],
3387 NSTAT_SYSINFO_API_IFNET_ALLOC_OS,
3388 data->u.net_api_stats.net_api_stats.nas_ifnet_alloc_os_total);
3389
3390 nstat_set_keyval_scalar(&kv[i++],
3391 NSTAT_SYSINFO_API_PF_ADDRULE,
3392 data->u.net_api_stats.net_api_stats.nas_pf_addrule_total);
3393 nstat_set_keyval_scalar(&kv[i++],
3394 NSTAT_SYSINFO_API_PF_ADDRULE_OS,
3395 data->u.net_api_stats.net_api_stats.nas_pf_addrule_os);
3396
3397 nstat_set_keyval_scalar(&kv[i++],
3398 NSTAT_SYSINFO_API_VMNET_START,
3399 data->u.net_api_stats.net_api_stats.nas_vmnet_total);
3400
3401
3402 nstat_set_keyval_scalar(&kv[i++],
3403 NSTAT_SYSINFO_API_REPORT_INTERVAL,
3404 data->u.net_api_stats.report_interval);
3405
3406 break;
3407 }
3408 }
3409 if (syscnt != NULL) {
3410 VERIFY(i > 0 && i <= nkeyvals);
3411 countsize = offsetof(nstat_sysinfo_counts,
3412 nstat_sysinfo_keyvals) +
3413 sizeof(nstat_sysinfo_keyval) * i;
3414 finalsize += countsize;
3415 syscnt->hdr.type = NSTAT_MSG_TYPE_SYSINFO_COUNTS;
3416 syscnt->hdr.length = finalsize;
3417 syscnt->counts.nstat_sysinfo_len = countsize;
3418
3419 result = ctl_enqueuedata(control->ncs_kctl,
3420 control->ncs_unit, syscnt, finalsize, CTL_DATA_EOR);
3421 if (result != 0) {
3422 nstat_stats.nstat_sysinfofailures += 1;
3423 }
3424 OSFree(syscnt, allocsize, nstat_malloc_tag);
3425 }
3426 return;
3427 }
3428
3429 __private_extern__ void
3430 nstat_sysinfo_send_data(
3431 nstat_sysinfo_data *data)
3432 {
3433 nstat_control_state *control;
3434
3435 lck_mtx_lock(&nstat_mtx);
3436 for (control = nstat_controls; control; control = control->ncs_next) {
3437 lck_mtx_lock(&control->ncs_mtx);
3438 if ((control->ncs_flags & NSTAT_FLAG_SYSINFO_SUBSCRIBED) != 0) {
3439 nstat_sysinfo_send_data_internal(control, data);
3440 }
3441 lck_mtx_unlock(&control->ncs_mtx);
3442 }
3443 lck_mtx_unlock(&nstat_mtx);
3444 }
3445
3446 static void
3447 nstat_sysinfo_generate_report(void)
3448 {
3449 mbuf_report_peak_usage();
3450 tcp_report_stats();
3451 nstat_ifnet_report_ecn_stats();
3452 nstat_ifnet_report_lim_stats();
3453 nstat_net_api_report_stats();
3454 }
3455
3456 #pragma mark -- net_api --
3457
3458 static struct net_api_stats net_api_stats_before;
3459 static u_int64_t net_api_stats_last_report_time;
3460
3461 static void
3462 nstat_net_api_report_stats(void)
3463 {
3464 struct nstat_sysinfo_data data;
3465 struct nstat_sysinfo_net_api_stats *st = &data.u.net_api_stats;
3466 u_int64_t uptime;
3467
3468 uptime = net_uptime();
3469
3470 if ((u_int32_t)(uptime - net_api_stats_last_report_time) <
3471 net_api_stats_report_interval) {
3472 return;
3473 }
3474
3475 st->report_interval = uptime - net_api_stats_last_report_time;
3476 net_api_stats_last_report_time = uptime;
3477
3478 data.flags = NSTAT_SYSINFO_NET_API_STATS;
3479 data.unsent_data_cnt = 0;
3480
3481 /*
3482 * Some of the fields in the report are the current value and
3483 * other fields are the delta from the last report:
3484 * - Report difference for the per flow counters as they increase
3485 * with time
3486 * - Report current value for other counters as they tend not to change
3487 * much with time
3488 */
3489 #define STATCOPY(f) \
3490 (st->net_api_stats.f = net_api_stats.f)
3491 #define STATDIFF(f) \
3492 (st->net_api_stats.f = net_api_stats.f - net_api_stats_before.f)
3493
3494 STATCOPY(nas_iflt_attach_count);
3495 STATCOPY(nas_iflt_attach_total);
3496 STATCOPY(nas_iflt_attach_os_total);
3497
3498 STATCOPY(nas_ipf_add_count);
3499 STATCOPY(nas_ipf_add_total);
3500 STATCOPY(nas_ipf_add_os_total);
3501
3502 STATCOPY(nas_sfltr_register_count);
3503 STATCOPY(nas_sfltr_register_total);
3504 STATCOPY(nas_sfltr_register_os_total);
3505
3506 STATDIFF(nas_socket_alloc_total);
3507 STATDIFF(nas_socket_in_kernel_total);
3508 STATDIFF(nas_socket_in_kernel_os_total);
3509 STATDIFF(nas_socket_necp_clientuuid_total);
3510
3511 STATDIFF(nas_socket_domain_local_total);
3512 STATDIFF(nas_socket_domain_route_total);
3513 STATDIFF(nas_socket_domain_inet_total);
3514 STATDIFF(nas_socket_domain_inet6_total);
3515 STATDIFF(nas_socket_domain_system_total);
3516 STATDIFF(nas_socket_domain_multipath_total);
3517 STATDIFF(nas_socket_domain_key_total);
3518 STATDIFF(nas_socket_domain_ndrv_total);
3519 STATDIFF(nas_socket_domain_other_total);
3520
3521 STATDIFF(nas_socket_inet_stream_total);
3522 STATDIFF(nas_socket_inet_dgram_total);
3523 STATDIFF(nas_socket_inet_dgram_connected);
3524 STATDIFF(nas_socket_inet_dgram_dns);
3525 STATDIFF(nas_socket_inet_dgram_no_data);
3526
3527 STATDIFF(nas_socket_inet6_stream_total);
3528 STATDIFF(nas_socket_inet6_dgram_total);
3529 STATDIFF(nas_socket_inet6_dgram_connected);
3530 STATDIFF(nas_socket_inet6_dgram_dns);
3531 STATDIFF(nas_socket_inet6_dgram_no_data);
3532
3533 STATDIFF(nas_socket_mcast_join_total);
3534 STATDIFF(nas_socket_mcast_join_os_total);
3535
3536 STATDIFF(nas_sock_inet6_stream_exthdr_in);
3537 STATDIFF(nas_sock_inet6_stream_exthdr_out);
3538 STATDIFF(nas_sock_inet6_dgram_exthdr_in);
3539 STATDIFF(nas_sock_inet6_dgram_exthdr_out);
3540
3541 STATDIFF(nas_nx_flow_inet_stream_total);
3542 STATDIFF(nas_nx_flow_inet_dgram_total);
3543
3544 STATDIFF(nas_nx_flow_inet6_stream_total);
3545 STATDIFF(nas_nx_flow_inet6_dgram_total);
3546
3547 STATCOPY(nas_ifnet_alloc_count);
3548 STATCOPY(nas_ifnet_alloc_total);
3549 STATCOPY(nas_ifnet_alloc_os_count);
3550 STATCOPY(nas_ifnet_alloc_os_total);
3551
3552 STATCOPY(nas_pf_addrule_total);
3553 STATCOPY(nas_pf_addrule_os);
3554
3555 STATCOPY(nas_vmnet_total);
3556
3557 #undef STATCOPY
3558 #undef STATDIFF
3559
3560 nstat_sysinfo_send_data(&data);
3561
3562 /*
3563 * Save a copy of the current fields so we can diff them the next time
3564 */
3565 memcpy(&net_api_stats_before, &net_api_stats,
3566 sizeof(struct net_api_stats));
3567 _CASSERT(sizeof(net_api_stats_before) == sizeof(net_api_stats));
3568 }
3569
3570
3571 #pragma mark -- Kernel Control Socket --
3572
3573 static kern_ctl_ref nstat_ctlref = NULL;
3574 static lck_grp_t *nstat_lck_grp = NULL;
3575
3576 static errno_t nstat_control_connect(kern_ctl_ref kctl, struct sockaddr_ctl *sac, void **uinfo);
3577 static errno_t nstat_control_disconnect(kern_ctl_ref kctl, u_int32_t unit, void *uinfo);
3578 static errno_t nstat_control_send(kern_ctl_ref kctl, u_int32_t unit, void *uinfo, mbuf_t m, int flags);
3579
3580 static errno_t
3581 nstat_enqueue_success(
3582 uint64_t context,
3583 nstat_control_state *state,
3584 u_int16_t flags)
3585 {
3586 nstat_msg_hdr success;
3587 errno_t result;
3588
3589 bzero(&success, sizeof(success));
3590 success.context = context;
3591 success.type = NSTAT_MSG_TYPE_SUCCESS;
3592 success.length = sizeof(success);
3593 success.flags = flags;
3594 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &success,
3595 sizeof(success), CTL_DATA_EOR | CTL_DATA_CRIT);
3596 if (result != 0) {
3597 if (nstat_debug != 0) {
3598 printf("%s: could not enqueue success message %d\n",
3599 __func__, result);
3600 }
3601 nstat_stats.nstat_successmsgfailures += 1;
3602 }
3603 return result;
3604 }
3605
3606 static errno_t
3607 nstat_control_send_event(
3608 nstat_control_state *state,
3609 nstat_src *src,
3610 u_int64_t event)
3611 {
3612 errno_t result = 0;
3613 int failed = 0;
3614
3615 if (nstat_control_reporting_allowed(state, src)) {
3616 if ((state->ncs_flags & NSTAT_FLAG_SUPPORTS_UPDATES) != 0) {
3617 result = nstat_control_send_update(state, src, 0, event, 0, NULL);
3618 if (result != 0) {
3619 failed = 1;
3620 if (nstat_debug != 0) {
3621 printf("%s - nstat_control_send_event() %d\n", __func__, result);
3622 }
3623 }
3624 } else {
3625 if (nstat_debug != 0) {
3626 printf("%s - nstat_control_send_event() used when updates not supported\n", __func__);
3627 }
3628 }
3629 }
3630 return result;
3631 }
3632
3633 static errno_t
3634 nstat_control_send_goodbye(
3635 nstat_control_state *state,
3636 nstat_src *src)
3637 {
3638 errno_t result = 0;
3639 int failed = 0;
3640
3641 if (nstat_control_reporting_allowed(state, src)) {
3642 if ((state->ncs_flags & NSTAT_FLAG_SUPPORTS_UPDATES) != 0) {
3643 result = nstat_control_send_update(state, src, 0, 0, NSTAT_MSG_HDR_FLAG_CLOSING, NULL);
3644 if (result != 0) {
3645 failed = 1;
3646 if (nstat_debug != 0) {
3647 printf("%s - nstat_control_send_update() %d\n", __func__, result);
3648 }
3649 }
3650 } else {
3651 // send one last counts notification
3652 result = nstat_control_send_counts(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING, NULL);
3653 if (result != 0) {
3654 failed = 1;
3655 if (nstat_debug != 0) {
3656 printf("%s - nstat_control_send_counts() %d\n", __func__, result);
3657 }
3658 }
3659
3660 // send a last description
3661 result = nstat_control_send_description(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING);
3662 if (result != 0) {
3663 failed = 1;
3664 if (nstat_debug != 0) {
3665 printf("%s - nstat_control_send_description() %d\n", __func__, result);
3666 }
3667 }
3668 }
3669 }
3670
3671 // send the source removed notification
3672 result = nstat_control_send_removed(state, src);
3673 if (result != 0 && nstat_debug) {
3674 failed = 1;
3675 if (nstat_debug != 0) {
3676 printf("%s - nstat_control_send_removed() %d\n", __func__, result);
3677 }
3678 }
3679
3680 if (failed != 0) {
3681 nstat_stats.nstat_control_send_goodbye_failures++;
3682 }
3683
3684
3685 return result;
3686 }
3687
3688 static errno_t
3689 nstat_flush_accumulated_msgs(
3690 nstat_control_state *state)
3691 {
3692 errno_t result = 0;
3693 if (state->ncs_accumulated != NULL && mbuf_len(state->ncs_accumulated) > 0) {
3694 mbuf_pkthdr_setlen(state->ncs_accumulated, mbuf_len(state->ncs_accumulated));
3695 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, state->ncs_accumulated, CTL_DATA_EOR);
3696 if (result != 0) {
3697 nstat_stats.nstat_flush_accumulated_msgs_failures++;
3698 if (nstat_debug != 0) {
3699 printf("%s - ctl_enqueuembuf failed: %d\n", __func__, result);
3700 }
3701 mbuf_freem(state->ncs_accumulated);
3702 }
3703 state->ncs_accumulated = NULL;
3704 }
3705 return result;
3706 }
3707
3708 static errno_t
3709 nstat_accumulate_msg(
3710 nstat_control_state *state,
3711 nstat_msg_hdr *hdr,
3712 size_t length)
3713 {
3714 if (state->ncs_accumulated && mbuf_trailingspace(state->ncs_accumulated) < length) {
3715 // Will send the current mbuf
3716 nstat_flush_accumulated_msgs(state);
3717 }
3718
3719 errno_t result = 0;
3720
3721 if (state->ncs_accumulated == NULL) {
3722 unsigned int one = 1;
3723 if (mbuf_allocpacket(MBUF_DONTWAIT, NSTAT_MAX_MSG_SIZE, &one, &state->ncs_accumulated) != 0) {
3724 if (nstat_debug != 0) {
3725 printf("%s - mbuf_allocpacket failed\n", __func__);
3726 }
3727 result = ENOMEM;
3728 } else {
3729 mbuf_setlen(state->ncs_accumulated, 0);
3730 }
3731 }
3732
3733 if (result == 0) {
3734 hdr->length = length;
3735 result = mbuf_copyback(state->ncs_accumulated, mbuf_len(state->ncs_accumulated),
3736 length, hdr, MBUF_DONTWAIT);
3737 }
3738
3739 if (result != 0) {
3740 nstat_flush_accumulated_msgs(state);
3741 if (nstat_debug != 0) {
3742 printf("%s - resorting to ctl_enqueuedata\n", __func__);
3743 }
3744 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, hdr, length, CTL_DATA_EOR);
3745 }
3746
3747 if (result != 0) {
3748 nstat_stats.nstat_accumulate_msg_failures++;
3749 }
3750
3751 return result;
3752 }
3753
3754 static void*
3755 nstat_idle_check(
3756 __unused thread_call_param_t p0,
3757 __unused thread_call_param_t p1)
3758 {
3759 nstat_control_state *control;
3760 nstat_src *src, *tmpsrc;
3761 tailq_head_nstat_src dead_list;
3762 TAILQ_INIT(&dead_list);
3763
3764 lck_mtx_lock(&nstat_mtx);
3765
3766 nstat_idle_time = 0;
3767
3768 for (control = nstat_controls; control; control = control->ncs_next) {
3769 lck_mtx_lock(&control->ncs_mtx);
3770 if (!(control->ncs_flags & NSTAT_FLAG_REQCOUNTS)) {
3771 TAILQ_FOREACH_SAFE(src, &control->ncs_src_queue, ns_control_link, tmpsrc)
3772 {
3773 if (src->provider->nstat_gone(src->cookie)) {
3774 errno_t result;
3775
3776 // Pull it off the list
3777 TAILQ_REMOVE(&control->ncs_src_queue, src, ns_control_link);
3778
3779 result = nstat_control_send_goodbye(control, src);
3780
3781 // Put this on the list to release later
3782 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
3783 }
3784 }
3785 }
3786 control->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
3787 lck_mtx_unlock(&control->ncs_mtx);
3788 }
3789
3790 if (nstat_controls) {
3791 clock_interval_to_deadline(60, NSEC_PER_SEC, &nstat_idle_time);
3792 thread_call_func_delayed((thread_call_func_t)nstat_idle_check, NULL, nstat_idle_time);
3793 }
3794
3795 lck_mtx_unlock(&nstat_mtx);
3796
3797 /* Generate any system level reports, if needed */
3798 nstat_sysinfo_generate_report();
3799
3800 // Release the sources now that we aren't holding lots of locks
3801 while ((src = TAILQ_FIRST(&dead_list))) {
3802 TAILQ_REMOVE(&dead_list, src, ns_control_link);
3803 nstat_control_cleanup_source(NULL, src, FALSE);
3804 }
3805
3806
3807 return NULL;
3808 }
3809
3810 static void
3811 nstat_control_register(void)
3812 {
3813 // Create our lock group first
3814 lck_grp_attr_t *grp_attr = lck_grp_attr_alloc_init();
3815 lck_grp_attr_setdefault(grp_attr);
3816 nstat_lck_grp = lck_grp_alloc_init("network statistics kctl", grp_attr);
3817 lck_grp_attr_free(grp_attr);
3818
3819 lck_mtx_init(&nstat_mtx, nstat_lck_grp, NULL);
3820
3821 // Register the control
3822 struct kern_ctl_reg nstat_control;
3823 bzero(&nstat_control, sizeof(nstat_control));
3824 strlcpy(nstat_control.ctl_name, NET_STAT_CONTROL_NAME, sizeof(nstat_control.ctl_name));
3825 nstat_control.ctl_flags = CTL_FLAG_REG_EXTENDED | CTL_FLAG_REG_CRIT;
3826 nstat_control.ctl_sendsize = nstat_sendspace;
3827 nstat_control.ctl_recvsize = nstat_recvspace;
3828 nstat_control.ctl_connect = nstat_control_connect;
3829 nstat_control.ctl_disconnect = nstat_control_disconnect;
3830 nstat_control.ctl_send = nstat_control_send;
3831
3832 ctl_register(&nstat_control, &nstat_ctlref);
3833 }
3834
3835 static void
3836 nstat_control_cleanup_source(
3837 nstat_control_state *state,
3838 struct nstat_src *src,
3839 boolean_t locked)
3840 {
3841 errno_t result;
3842
3843 if (state) {
3844 result = nstat_control_send_removed(state, src);
3845 if (result != 0) {
3846 nstat_stats.nstat_control_cleanup_source_failures++;
3847 if (nstat_debug != 0) {
3848 printf("%s - nstat_control_send_removed() %d\n",
3849 __func__, result);
3850 }
3851 }
3852 }
3853 // Cleanup the source if we found it.
3854 src->provider->nstat_release(src->cookie, locked);
3855 OSFree(src, sizeof(*src), nstat_malloc_tag);
3856 }
3857
3858
3859 static bool
3860 nstat_control_reporting_allowed(
3861 nstat_control_state *state,
3862 nstat_src *src)
3863 {
3864 if (src->provider->nstat_reporting_allowed == NULL) {
3865 return TRUE;
3866 }
3867
3868 return src->provider->nstat_reporting_allowed(src->cookie,
3869 &state->ncs_provider_filters[src->provider->nstat_provider_id]);
3870 }
3871
3872
3873 static errno_t
3874 nstat_control_connect(
3875 kern_ctl_ref kctl,
3876 struct sockaddr_ctl *sac,
3877 void **uinfo)
3878 {
3879 nstat_control_state *state = OSMalloc(sizeof(*state), nstat_malloc_tag);
3880 if (state == NULL) {
3881 return ENOMEM;
3882 }
3883
3884 bzero(state, sizeof(*state));
3885 lck_mtx_init(&state->ncs_mtx, nstat_lck_grp, NULL);
3886 state->ncs_kctl = kctl;
3887 state->ncs_unit = sac->sc_unit;
3888 state->ncs_flags = NSTAT_FLAG_REQCOUNTS;
3889 *uinfo = state;
3890
3891 lck_mtx_lock(&nstat_mtx);
3892 state->ncs_next = nstat_controls;
3893 nstat_controls = state;
3894
3895 if (nstat_idle_time == 0) {
3896 clock_interval_to_deadline(60, NSEC_PER_SEC, &nstat_idle_time);
3897 thread_call_func_delayed((thread_call_func_t)nstat_idle_check, NULL, nstat_idle_time);
3898 }
3899
3900 lck_mtx_unlock(&nstat_mtx);
3901
3902 return 0;
3903 }
3904
3905 static errno_t
3906 nstat_control_disconnect(
3907 __unused kern_ctl_ref kctl,
3908 __unused u_int32_t unit,
3909 void *uinfo)
3910 {
3911 u_int32_t watching;
3912 nstat_control_state *state = (nstat_control_state*)uinfo;
3913 tailq_head_nstat_src cleanup_list;
3914 nstat_src *src;
3915
3916 TAILQ_INIT(&cleanup_list);
3917
3918 // pull it out of the global list of states
3919 lck_mtx_lock(&nstat_mtx);
3920 nstat_control_state **statepp;
3921 for (statepp = &nstat_controls; *statepp; statepp = &(*statepp)->ncs_next) {
3922 if (*statepp == state) {
3923 *statepp = state->ncs_next;
3924 break;
3925 }
3926 }
3927 lck_mtx_unlock(&nstat_mtx);
3928
3929 lck_mtx_lock(&state->ncs_mtx);
3930 // Stop watching for sources
3931 nstat_provider *provider;
3932 watching = state->ncs_watching;
3933 state->ncs_watching = 0;
3934 for (provider = nstat_providers; provider && watching; provider = provider->next) {
3935 if ((watching & (1 << provider->nstat_provider_id)) != 0) {
3936 watching &= ~(1 << provider->nstat_provider_id);
3937 provider->nstat_watcher_remove(state);
3938 }
3939 }
3940
3941 // set cleanup flags
3942 state->ncs_flags |= NSTAT_FLAG_CLEANUP;
3943
3944 if (state->ncs_accumulated) {
3945 mbuf_freem(state->ncs_accumulated);
3946 state->ncs_accumulated = NULL;
3947 }
3948
3949 // Copy out the list of sources
3950 TAILQ_CONCAT(&cleanup_list, &state->ncs_src_queue, ns_control_link);
3951 lck_mtx_unlock(&state->ncs_mtx);
3952
3953 while ((src = TAILQ_FIRST(&cleanup_list))) {
3954 TAILQ_REMOVE(&cleanup_list, src, ns_control_link);
3955 nstat_control_cleanup_source(NULL, src, FALSE);
3956 }
3957
3958 lck_mtx_destroy(&state->ncs_mtx, nstat_lck_grp);
3959 OSFree(state, sizeof(*state), nstat_malloc_tag);
3960
3961 return 0;
3962 }
3963
3964 static nstat_src_ref_t
3965 nstat_control_next_src_ref(
3966 nstat_control_state *state)
3967 {
3968 return ++state->ncs_next_srcref;
3969 }
3970
3971 static errno_t
3972 nstat_control_send_counts(
3973 nstat_control_state *state,
3974 nstat_src *src,
3975 unsigned long long context,
3976 u_int16_t hdr_flags,
3977 int *gone)
3978 {
3979 nstat_msg_src_counts counts;
3980 errno_t result = 0;
3981
3982 /* Some providers may not have any counts to send */
3983 if (src->provider->nstat_counts == NULL) {
3984 return 0;
3985 }
3986
3987 bzero(&counts, sizeof(counts));
3988 counts.hdr.type = NSTAT_MSG_TYPE_SRC_COUNTS;
3989 counts.hdr.length = sizeof(counts);
3990 counts.hdr.flags = hdr_flags;
3991 counts.hdr.context = context;
3992 counts.srcref = src->srcref;
3993 counts.event_flags = 0;
3994
3995 if (src->provider->nstat_counts(src->cookie, &counts.counts, gone) == 0) {
3996 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) &&
3997 counts.counts.nstat_rxbytes == 0 &&
3998 counts.counts.nstat_txbytes == 0) {
3999 result = EAGAIN;
4000 } else {
4001 result = ctl_enqueuedata(state->ncs_kctl,
4002 state->ncs_unit, &counts, sizeof(counts),
4003 CTL_DATA_EOR);
4004 if (result != 0) {
4005 nstat_stats.nstat_sendcountfailures += 1;
4006 }
4007 }
4008 }
4009 return result;
4010 }
4011
4012 static errno_t
4013 nstat_control_append_counts(
4014 nstat_control_state *state,
4015 nstat_src *src,
4016 int *gone)
4017 {
4018 /* Some providers may not have any counts to send */
4019 if (!src->provider->nstat_counts) {
4020 return 0;
4021 }
4022
4023 nstat_msg_src_counts counts;
4024 bzero(&counts, sizeof(counts));
4025 counts.hdr.type = NSTAT_MSG_TYPE_SRC_COUNTS;
4026 counts.hdr.length = sizeof(counts);
4027 counts.srcref = src->srcref;
4028 counts.event_flags = 0;
4029
4030 errno_t result = 0;
4031 result = src->provider->nstat_counts(src->cookie, &counts.counts, gone);
4032 if (result != 0) {
4033 return result;
4034 }
4035
4036 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
4037 counts.counts.nstat_rxbytes == 0 && counts.counts.nstat_txbytes == 0) {
4038 return EAGAIN;
4039 }
4040
4041 return nstat_accumulate_msg(state, &counts.hdr, counts.hdr.length);
4042 }
4043
4044 static int
4045 nstat_control_send_description(
4046 nstat_control_state *state,
4047 nstat_src *src,
4048 u_int64_t context,
4049 u_int16_t hdr_flags)
4050 {
4051 // Provider doesn't support getting the descriptor? Done.
4052 if (src->provider->nstat_descriptor_length == 0 ||
4053 src->provider->nstat_copy_descriptor == NULL) {
4054 return EOPNOTSUPP;
4055 }
4056
4057 // Allocate storage for the descriptor message
4058 mbuf_t msg;
4059 unsigned int one = 1;
4060 u_int32_t size = offsetof(nstat_msg_src_description, data) + src->provider->nstat_descriptor_length;
4061 if (mbuf_allocpacket(MBUF_DONTWAIT, size, &one, &msg) != 0) {
4062 return ENOMEM;
4063 }
4064
4065 nstat_msg_src_description *desc = (nstat_msg_src_description*)mbuf_data(msg);
4066 bzero(desc, size);
4067 mbuf_setlen(msg, size);
4068 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
4069
4070 // Query the provider for the provider specific bits
4071 errno_t result = src->provider->nstat_copy_descriptor(src->cookie, desc->data, src->provider->nstat_descriptor_length);
4072
4073 if (result != 0) {
4074 mbuf_freem(msg);
4075 return result;
4076 }
4077
4078 desc->hdr.context = context;
4079 desc->hdr.type = NSTAT_MSG_TYPE_SRC_DESC;
4080 desc->hdr.length = size;
4081 desc->hdr.flags = hdr_flags;
4082 desc->srcref = src->srcref;
4083 desc->event_flags = 0;
4084 desc->provider = src->provider->nstat_provider_id;
4085
4086 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg, CTL_DATA_EOR);
4087 if (result != 0) {
4088 nstat_stats.nstat_descriptionfailures += 1;
4089 mbuf_freem(msg);
4090 }
4091
4092 return result;
4093 }
4094
4095 static errno_t
4096 nstat_control_append_description(
4097 nstat_control_state *state,
4098 nstat_src *src)
4099 {
4100 size_t size = offsetof(nstat_msg_src_description, data) + src->provider->nstat_descriptor_length;
4101 if (size > 512 || src->provider->nstat_descriptor_length == 0 ||
4102 src->provider->nstat_copy_descriptor == NULL) {
4103 return EOPNOTSUPP;
4104 }
4105
4106 // Fill out a buffer on the stack, we will copy to the mbuf later
4107 u_int64_t buffer[size / sizeof(u_int64_t) + 1]; // u_int64_t to ensure alignment
4108 bzero(buffer, size);
4109
4110 nstat_msg_src_description *desc = (nstat_msg_src_description*)buffer;
4111 desc->hdr.type = NSTAT_MSG_TYPE_SRC_DESC;
4112 desc->hdr.length = size;
4113 desc->srcref = src->srcref;
4114 desc->event_flags = 0;
4115 desc->provider = src->provider->nstat_provider_id;
4116
4117 errno_t result = 0;
4118 // Fill in the description
4119 // Query the provider for the provider specific bits
4120 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
4121 src->provider->nstat_descriptor_length);
4122 if (result != 0) {
4123 return result;
4124 }
4125
4126 return nstat_accumulate_msg(state, &desc->hdr, size);
4127 }
4128
4129 static int
4130 nstat_control_send_update(
4131 nstat_control_state *state,
4132 nstat_src *src,
4133 u_int64_t context,
4134 u_int64_t event,
4135 u_int16_t hdr_flags,
4136 int *gone)
4137 {
4138 // Provider doesn't support getting the descriptor or counts? Done.
4139 if ((src->provider->nstat_descriptor_length == 0 ||
4140 src->provider->nstat_copy_descriptor == NULL) &&
4141 src->provider->nstat_counts == NULL) {
4142 return EOPNOTSUPP;
4143 }
4144
4145 // Allocate storage for the descriptor message
4146 mbuf_t msg;
4147 unsigned int one = 1;
4148 u_int32_t size = offsetof(nstat_msg_src_update, data) +
4149 src->provider->nstat_descriptor_length;
4150 if (mbuf_allocpacket(MBUF_DONTWAIT, size, &one, &msg) != 0) {
4151 return ENOMEM;
4152 }
4153
4154 nstat_msg_src_update *desc = (nstat_msg_src_update*)mbuf_data(msg);
4155 bzero(desc, size);
4156 desc->hdr.context = context;
4157 desc->hdr.type = NSTAT_MSG_TYPE_SRC_UPDATE;
4158 desc->hdr.length = size;
4159 desc->hdr.flags = hdr_flags;
4160 desc->srcref = src->srcref;
4161 desc->event_flags = event;
4162 desc->provider = src->provider->nstat_provider_id;
4163
4164 mbuf_setlen(msg, size);
4165 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
4166
4167 errno_t result = 0;
4168 if (src->provider->nstat_descriptor_length != 0 && src->provider->nstat_copy_descriptor) {
4169 // Query the provider for the provider specific bits
4170 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
4171 src->provider->nstat_descriptor_length);
4172 if (result != 0) {
4173 mbuf_freem(msg);
4174 return result;
4175 }
4176 }
4177
4178 if (src->provider->nstat_counts) {
4179 result = src->provider->nstat_counts(src->cookie, &desc->counts, gone);
4180 if (result == 0) {
4181 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
4182 desc->counts.nstat_rxbytes == 0 && desc->counts.nstat_txbytes == 0) {
4183 result = EAGAIN;
4184 } else {
4185 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg, CTL_DATA_EOR);
4186 }
4187 }
4188 }
4189
4190 if (result != 0) {
4191 nstat_stats.nstat_srcupatefailures += 1;
4192 mbuf_freem(msg);
4193 }
4194
4195 return result;
4196 }
4197
4198 static errno_t
4199 nstat_control_append_update(
4200 nstat_control_state *state,
4201 nstat_src *src,
4202 int *gone)
4203 {
4204 size_t size = offsetof(nstat_msg_src_update, data) + src->provider->nstat_descriptor_length;
4205 if (size > 512 || ((src->provider->nstat_descriptor_length == 0 ||
4206 src->provider->nstat_copy_descriptor == NULL) &&
4207 src->provider->nstat_counts == NULL)) {
4208 return EOPNOTSUPP;
4209 }
4210
4211 // Fill out a buffer on the stack, we will copy to the mbuf later
4212 u_int64_t buffer[size / sizeof(u_int64_t) + 1]; // u_int64_t to ensure alignment
4213 bzero(buffer, size);
4214
4215 nstat_msg_src_update *desc = (nstat_msg_src_update*)buffer;
4216 desc->hdr.type = NSTAT_MSG_TYPE_SRC_UPDATE;
4217 desc->hdr.length = size;
4218 desc->srcref = src->srcref;
4219 desc->event_flags = 0;
4220 desc->provider = src->provider->nstat_provider_id;
4221
4222 errno_t result = 0;
4223 // Fill in the description
4224 if (src->provider->nstat_descriptor_length != 0 && src->provider->nstat_copy_descriptor) {
4225 // Query the provider for the provider specific bits
4226 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
4227 src->provider->nstat_descriptor_length);
4228 if (result != 0) {
4229 nstat_stats.nstat_copy_descriptor_failures++;
4230 if (nstat_debug != 0) {
4231 printf("%s: src->provider->nstat_copy_descriptor: %d\n", __func__, result);
4232 }
4233 return result;
4234 }
4235 }
4236
4237 if (src->provider->nstat_counts) {
4238 result = src->provider->nstat_counts(src->cookie, &desc->counts, gone);
4239 if (result != 0) {
4240 nstat_stats.nstat_provider_counts_failures++;
4241 if (nstat_debug != 0) {
4242 printf("%s: src->provider->nstat_counts: %d\n", __func__, result);
4243 }
4244 return result;
4245 }
4246
4247 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
4248 desc->counts.nstat_rxbytes == 0 && desc->counts.nstat_txbytes == 0) {
4249 return EAGAIN;
4250 }
4251 }
4252
4253 return nstat_accumulate_msg(state, &desc->hdr, size);
4254 }
4255
4256 static errno_t
4257 nstat_control_send_removed(
4258 nstat_control_state *state,
4259 nstat_src *src)
4260 {
4261 nstat_msg_src_removed removed;
4262 errno_t result;
4263
4264 bzero(&removed, sizeof(removed));
4265 removed.hdr.type = NSTAT_MSG_TYPE_SRC_REMOVED;
4266 removed.hdr.length = sizeof(removed);
4267 removed.hdr.context = 0;
4268 removed.srcref = src->srcref;
4269 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &removed,
4270 sizeof(removed), CTL_DATA_EOR | CTL_DATA_CRIT);
4271 if (result != 0) {
4272 nstat_stats.nstat_msgremovedfailures += 1;
4273 }
4274
4275 return result;
4276 }
4277
4278 static errno_t
4279 nstat_control_handle_add_request(
4280 nstat_control_state *state,
4281 mbuf_t m)
4282 {
4283 errno_t result;
4284
4285 // Verify the header fits in the first mbuf
4286 if (mbuf_len(m) < offsetof(nstat_msg_add_src_req, param)) {
4287 return EINVAL;
4288 }
4289
4290 // Calculate the length of the parameter field
4291 int32_t paramlength = mbuf_pkthdr_len(m) - offsetof(nstat_msg_add_src_req, param);
4292 if (paramlength < 0 || paramlength > 2 * 1024) {
4293 return EINVAL;
4294 }
4295
4296 nstat_provider *provider = NULL;
4297 nstat_provider_cookie_t cookie = NULL;
4298 nstat_msg_add_src_req *req = mbuf_data(m);
4299 if (mbuf_pkthdr_len(m) > mbuf_len(m)) {
4300 // parameter is too large, we need to make a contiguous copy
4301 void *data = OSMalloc(paramlength, nstat_malloc_tag);
4302
4303 if (!data) {
4304 return ENOMEM;
4305 }
4306 result = mbuf_copydata(m, offsetof(nstat_msg_add_src_req, param), paramlength, data);
4307 if (result == 0) {
4308 result = nstat_lookup_entry(req->provider, data, paramlength, &provider, &cookie);
4309 }
4310 OSFree(data, paramlength, nstat_malloc_tag);
4311 } else {
4312 result = nstat_lookup_entry(req->provider, (void*)&req->param, paramlength, &provider, &cookie);
4313 }
4314
4315 if (result != 0) {
4316 return result;
4317 }
4318
4319 result = nstat_control_source_add(req->hdr.context, state, provider, cookie);
4320 if (result != 0) {
4321 provider->nstat_release(cookie, 0);
4322 }
4323
4324 return result;
4325 }
4326
4327 static errno_t
4328 nstat_set_provider_filter(
4329 nstat_control_state *state,
4330 nstat_msg_add_all_srcs *req)
4331 {
4332 nstat_provider_id_t provider_id = req->provider;
4333
4334 u_int32_t prev_ncs_watching = atomic_or_32_ov(&state->ncs_watching, (1 << provider_id));
4335
4336 if ((prev_ncs_watching & (1 << provider_id)) != 0) {
4337 return EALREADY;
4338 }
4339
4340 state->ncs_watching |= (1 << provider_id);
4341 state->ncs_provider_filters[provider_id].npf_flags = req->filter;
4342 state->ncs_provider_filters[provider_id].npf_events = req->events;
4343 state->ncs_provider_filters[provider_id].npf_pid = req->target_pid;
4344 uuid_copy(state->ncs_provider_filters[provider_id].npf_uuid, req->target_uuid);
4345 return 0;
4346 }
4347
4348 static errno_t
4349 nstat_control_handle_add_all(
4350 nstat_control_state *state,
4351 mbuf_t m)
4352 {
4353 errno_t result = 0;
4354
4355 // Verify the header fits in the first mbuf
4356 if (mbuf_len(m) < sizeof(nstat_msg_add_all_srcs)) {
4357 return EINVAL;
4358 }
4359
4360 nstat_msg_add_all_srcs *req = mbuf_data(m);
4361 if (req->provider > NSTAT_PROVIDER_LAST) {
4362 return ENOENT;
4363 }
4364
4365 nstat_provider *provider = nstat_find_provider_by_id(req->provider);
4366
4367 if (!provider) {
4368 return ENOENT;
4369 }
4370 if (provider->nstat_watcher_add == NULL) {
4371 return ENOTSUP;
4372 }
4373
4374 if (nstat_privcheck != 0) {
4375 result = priv_check_cred(kauth_cred_get(),
4376 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
4377 if (result != 0) {
4378 return result;
4379 }
4380 }
4381
4382 lck_mtx_lock(&state->ncs_mtx);
4383 if (req->filter & NSTAT_FILTER_SUPPRESS_SRC_ADDED) {
4384 // Suppression of source messages implicitly requires the use of update messages
4385 state->ncs_flags |= NSTAT_FLAG_SUPPORTS_UPDATES;
4386 }
4387 lck_mtx_unlock(&state->ncs_mtx);
4388
4389 // rdar://problem/30301300 Different providers require different synchronization
4390 // to ensure that a new entry does not get double counted due to being added prior
4391 // to all current provider entries being added. Hence pass the provider the details
4392 // in the original request for this to be applied atomically
4393
4394 result = provider->nstat_watcher_add(state, req);
4395
4396 if (result == 0) {
4397 nstat_enqueue_success(req->hdr.context, state, 0);
4398 }
4399
4400 return result;
4401 }
4402
4403 static errno_t
4404 nstat_control_source_add(
4405 u_int64_t context,
4406 nstat_control_state *state,
4407 nstat_provider *provider,
4408 nstat_provider_cookie_t cookie)
4409 {
4410 // Fill out source added message if appropriate
4411 mbuf_t msg = NULL;
4412 nstat_src_ref_t *srcrefp = NULL;
4413
4414 u_int64_t provider_filter_flagss =
4415 state->ncs_provider_filters[provider->nstat_provider_id].npf_flags;
4416 boolean_t tell_user =
4417 ((provider_filter_flagss & NSTAT_FILTER_SUPPRESS_SRC_ADDED) == 0);
4418 u_int32_t src_filter =
4419 (provider_filter_flagss & NSTAT_FILTER_PROVIDER_NOZEROBYTES)
4420 ? NSTAT_FILTER_NOZEROBYTES : 0;
4421
4422 if (provider_filter_flagss & NSTAT_FILTER_TCP_NO_EARLY_CLOSE) {
4423 src_filter |= NSTAT_FILTER_TCP_NO_EARLY_CLOSE;
4424 }
4425
4426 if (tell_user) {
4427 unsigned int one = 1;
4428
4429 if (mbuf_allocpacket(MBUF_DONTWAIT, sizeof(nstat_msg_src_added),
4430 &one, &msg) != 0) {
4431 return ENOMEM;
4432 }
4433
4434 mbuf_setlen(msg, sizeof(nstat_msg_src_added));
4435 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
4436 nstat_msg_src_added *add = mbuf_data(msg);
4437 bzero(add, sizeof(*add));
4438 add->hdr.type = NSTAT_MSG_TYPE_SRC_ADDED;
4439 add->hdr.length = mbuf_len(msg);
4440 add->hdr.context = context;
4441 add->provider = provider->nstat_provider_id;
4442 srcrefp = &add->srcref;
4443 }
4444
4445 // Allocate storage for the source
4446 nstat_src *src = OSMalloc(sizeof(*src), nstat_malloc_tag);
4447 if (src == NULL) {
4448 if (msg) {
4449 mbuf_freem(msg);
4450 }
4451 return ENOMEM;
4452 }
4453
4454 // Fill in the source, including picking an unused source ref
4455 lck_mtx_lock(&state->ncs_mtx);
4456
4457 src->srcref = nstat_control_next_src_ref(state);
4458 if (srcrefp) {
4459 *srcrefp = src->srcref;
4460 }
4461
4462 if (state->ncs_flags & NSTAT_FLAG_CLEANUP || src->srcref == NSTAT_SRC_REF_INVALID) {
4463 lck_mtx_unlock(&state->ncs_mtx);
4464 OSFree(src, sizeof(*src), nstat_malloc_tag);
4465 if (msg) {
4466 mbuf_freem(msg);
4467 }
4468 return EINVAL;
4469 }
4470 src->provider = provider;
4471 src->cookie = cookie;
4472 src->filter = src_filter;
4473 src->seq = 0;
4474
4475 if (msg) {
4476 // send the source added message if appropriate
4477 errno_t result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg,
4478 CTL_DATA_EOR);
4479 if (result != 0) {
4480 nstat_stats.nstat_srcaddedfailures += 1;
4481 lck_mtx_unlock(&state->ncs_mtx);
4482 OSFree(src, sizeof(*src), nstat_malloc_tag);
4483 mbuf_freem(msg);
4484 return result;
4485 }
4486 }
4487 // Put the source in the list
4488 TAILQ_INSERT_HEAD(&state->ncs_src_queue, src, ns_control_link);
4489 src->ns_control = state;
4490
4491 lck_mtx_unlock(&state->ncs_mtx);
4492
4493 return 0;
4494 }
4495
4496 static errno_t
4497 nstat_control_handle_remove_request(
4498 nstat_control_state *state,
4499 mbuf_t m)
4500 {
4501 nstat_src_ref_t srcref = NSTAT_SRC_REF_INVALID;
4502 nstat_src *src;
4503
4504 if (mbuf_copydata(m, offsetof(nstat_msg_rem_src_req, srcref), sizeof(srcref), &srcref) != 0) {
4505 return EINVAL;
4506 }
4507
4508 lck_mtx_lock(&state->ncs_mtx);
4509
4510 // Remove this source as we look for it
4511 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
4512 {
4513 if (src->srcref == srcref) {
4514 break;
4515 }
4516 }
4517 if (src) {
4518 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
4519 }
4520
4521 lck_mtx_unlock(&state->ncs_mtx);
4522
4523 if (src) {
4524 nstat_control_cleanup_source(state, src, FALSE);
4525 }
4526
4527 return src ? 0 : ENOENT;
4528 }
4529
4530 static errno_t
4531 nstat_control_handle_query_request(
4532 nstat_control_state *state,
4533 mbuf_t m)
4534 {
4535 // TBD: handle this from another thread so we can enqueue a lot of data
4536 // As written, if a client requests query all, this function will be
4537 // called from their send of the request message. We will attempt to write
4538 // responses and succeed until the buffer fills up. Since the clients thread
4539 // is blocked on send, it won't be reading unless the client has two threads
4540 // using this socket, one for read and one for write. Two threads probably
4541 // won't work with this code anyhow since we don't have proper locking in
4542 // place yet.
4543 tailq_head_nstat_src dead_list;
4544 errno_t result = ENOENT;
4545 nstat_msg_query_src_req req;
4546
4547 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0) {
4548 return EINVAL;
4549 }
4550
4551 const boolean_t all_srcs = (req.srcref == NSTAT_SRC_REF_ALL);
4552 TAILQ_INIT(&dead_list);
4553
4554 lck_mtx_lock(&state->ncs_mtx);
4555
4556 if (all_srcs) {
4557 state->ncs_flags |= NSTAT_FLAG_REQCOUNTS;
4558 }
4559 nstat_src *src, *tmpsrc;
4560 u_int64_t src_count = 0;
4561 boolean_t partial = FALSE;
4562
4563 /*
4564 * Error handling policy and sequence number generation is folded into
4565 * nstat_control_begin_query.
4566 */
4567 partial = nstat_control_begin_query(state, &req.hdr);
4568
4569
4570 TAILQ_FOREACH_SAFE(src, &state->ncs_src_queue, ns_control_link, tmpsrc)
4571 {
4572 int gone = 0;
4573
4574 // XXX ignore IFACE types?
4575 if (all_srcs || src->srcref == req.srcref) {
4576 if (nstat_control_reporting_allowed(state, src)
4577 && (!partial || !all_srcs || src->seq != state->ncs_seq)) {
4578 if (all_srcs &&
4579 (req.hdr.flags & NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE) != 0) {
4580 result = nstat_control_append_counts(state, src, &gone);
4581 } else {
4582 result = nstat_control_send_counts(state, src, req.hdr.context, 0, &gone);
4583 }
4584
4585 if (ENOMEM == result || ENOBUFS == result) {
4586 /*
4587 * If the counts message failed to
4588 * enqueue then we should clear our flag so
4589 * that a client doesn't miss anything on
4590 * idle cleanup. We skip the "gone"
4591 * processing in the hope that we may
4592 * catch it another time.
4593 */
4594 state->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
4595 break;
4596 }
4597 if (partial) {
4598 /*
4599 * We skip over hard errors and
4600 * filtered sources.
4601 */
4602 src->seq = state->ncs_seq;
4603 src_count++;
4604 }
4605 }
4606 }
4607
4608 if (gone) {
4609 // send one last descriptor message so client may see last state
4610 // If we can't send the notification now, it
4611 // will be sent in the idle cleanup.
4612 result = nstat_control_send_description(state, src, 0, 0);
4613 if (result != 0) {
4614 nstat_stats.nstat_control_send_description_failures++;
4615 if (nstat_debug != 0) {
4616 printf("%s - nstat_control_send_description() %d\n", __func__, result);
4617 }
4618 state->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
4619 break;
4620 }
4621
4622 // pull src out of the list
4623 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
4624 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
4625 }
4626
4627 if (all_srcs) {
4628 if (src_count >= QUERY_CONTINUATION_SRC_COUNT) {
4629 break;
4630 }
4631 } else if (req.srcref == src->srcref) {
4632 break;
4633 }
4634 }
4635
4636 nstat_flush_accumulated_msgs(state);
4637
4638 u_int16_t flags = 0;
4639 if (req.srcref == NSTAT_SRC_REF_ALL) {
4640 flags = nstat_control_end_query(state, src, partial);
4641 }
4642
4643 lck_mtx_unlock(&state->ncs_mtx);
4644
4645 /*
4646 * If an error occurred enqueueing data, then allow the error to
4647 * propagate to nstat_control_send. This way, the error is sent to
4648 * user-level.
4649 */
4650 if (all_srcs && ENOMEM != result && ENOBUFS != result) {
4651 nstat_enqueue_success(req.hdr.context, state, flags);
4652 result = 0;
4653 }
4654
4655 while ((src = TAILQ_FIRST(&dead_list))) {
4656 TAILQ_REMOVE(&dead_list, src, ns_control_link);
4657 nstat_control_cleanup_source(state, src, FALSE);
4658 }
4659
4660 return result;
4661 }
4662
4663 static errno_t
4664 nstat_control_handle_get_src_description(
4665 nstat_control_state *state,
4666 mbuf_t m)
4667 {
4668 nstat_msg_get_src_description req;
4669 errno_t result = ENOENT;
4670 nstat_src *src;
4671
4672 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0) {
4673 return EINVAL;
4674 }
4675
4676 lck_mtx_lock(&state->ncs_mtx);
4677 u_int64_t src_count = 0;
4678 boolean_t partial = FALSE;
4679 const boolean_t all_srcs = (req.srcref == NSTAT_SRC_REF_ALL);
4680
4681 /*
4682 * Error handling policy and sequence number generation is folded into
4683 * nstat_control_begin_query.
4684 */
4685 partial = nstat_control_begin_query(state, &req.hdr);
4686
4687 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
4688 {
4689 if (all_srcs || src->srcref == req.srcref) {
4690 if (nstat_control_reporting_allowed(state, src)
4691 && (!all_srcs || !partial || src->seq != state->ncs_seq)) {
4692 if ((req.hdr.flags & NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE) != 0 && all_srcs) {
4693 result = nstat_control_append_description(state, src);
4694 } else {
4695 result = nstat_control_send_description(state, src, req.hdr.context, 0);
4696 }
4697
4698 if (ENOMEM == result || ENOBUFS == result) {
4699 /*
4700 * If the description message failed to
4701 * enqueue then we give up for now.
4702 */
4703 break;
4704 }
4705 if (partial) {
4706 /*
4707 * Note, we skip over hard errors and
4708 * filtered sources.
4709 */
4710 src->seq = state->ncs_seq;
4711 src_count++;
4712 if (src_count >= QUERY_CONTINUATION_SRC_COUNT) {
4713 break;
4714 }
4715 }
4716 }
4717
4718 if (!all_srcs) {
4719 break;
4720 }
4721 }
4722 }
4723 nstat_flush_accumulated_msgs(state);
4724
4725 u_int16_t flags = 0;
4726 if (req.srcref == NSTAT_SRC_REF_ALL) {
4727 flags = nstat_control_end_query(state, src, partial);
4728 }
4729
4730 lck_mtx_unlock(&state->ncs_mtx);
4731 /*
4732 * If an error occurred enqueueing data, then allow the error to
4733 * propagate to nstat_control_send. This way, the error is sent to
4734 * user-level.
4735 */
4736 if (all_srcs && ENOMEM != result && ENOBUFS != result) {
4737 nstat_enqueue_success(req.hdr.context, state, flags);
4738 result = 0;
4739 }
4740
4741 return result;
4742 }
4743
4744 static errno_t
4745 nstat_control_handle_set_filter(
4746 nstat_control_state *state,
4747 mbuf_t m)
4748 {
4749 nstat_msg_set_filter req;
4750 nstat_src *src;
4751
4752 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0) {
4753 return EINVAL;
4754 }
4755 if (req.srcref == NSTAT_SRC_REF_ALL ||
4756 req.srcref == NSTAT_SRC_REF_INVALID) {
4757 return EINVAL;
4758 }
4759
4760 lck_mtx_lock(&state->ncs_mtx);
4761 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
4762 {
4763 if (req.srcref == src->srcref) {
4764 src->filter = req.filter;
4765 break;
4766 }
4767 }
4768 lck_mtx_unlock(&state->ncs_mtx);
4769 if (src == NULL) {
4770 return ENOENT;
4771 }
4772
4773 return 0;
4774 }
4775
4776 static void
4777 nstat_send_error(
4778 nstat_control_state *state,
4779 u_int64_t context,
4780 u_int32_t error)
4781 {
4782 errno_t result;
4783 struct nstat_msg_error err;
4784
4785 bzero(&err, sizeof(err));
4786 err.hdr.type = NSTAT_MSG_TYPE_ERROR;
4787 err.hdr.length = sizeof(err);
4788 err.hdr.context = context;
4789 err.error = error;
4790
4791 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &err,
4792 sizeof(err), CTL_DATA_EOR | CTL_DATA_CRIT);
4793 if (result != 0) {
4794 nstat_stats.nstat_msgerrorfailures++;
4795 }
4796 }
4797
4798 static boolean_t
4799 nstat_control_begin_query(
4800 nstat_control_state *state,
4801 const nstat_msg_hdr *hdrp)
4802 {
4803 boolean_t partial = FALSE;
4804
4805 if (hdrp->flags & NSTAT_MSG_HDR_FLAG_CONTINUATION) {
4806 /* A partial query all has been requested. */
4807 partial = TRUE;
4808
4809 if (state->ncs_context != hdrp->context) {
4810 if (state->ncs_context != 0) {
4811 nstat_send_error(state, state->ncs_context, EAGAIN);
4812 }
4813
4814 /* Initialize state for a partial query all. */
4815 state->ncs_context = hdrp->context;
4816 state->ncs_seq++;
4817 }
4818 }
4819
4820 return partial;
4821 }
4822
4823 static u_int16_t
4824 nstat_control_end_query(
4825 nstat_control_state *state,
4826 nstat_src *last_src,
4827 boolean_t partial)
4828 {
4829 u_int16_t flags = 0;
4830
4831 if (last_src == NULL || !partial) {
4832 /*
4833 * We iterated through the entire srcs list or exited early
4834 * from the loop when a partial update was not requested (an
4835 * error occurred), so clear context to indicate internally
4836 * that the query is finished.
4837 */
4838 state->ncs_context = 0;
4839 } else {
4840 /*
4841 * Indicate to userlevel to make another partial request as
4842 * there are still sources left to be reported.
4843 */
4844 flags |= NSTAT_MSG_HDR_FLAG_CONTINUATION;
4845 }
4846
4847 return flags;
4848 }
4849
4850 static errno_t
4851 nstat_control_handle_get_update(
4852 nstat_control_state *state,
4853 mbuf_t m)
4854 {
4855 nstat_msg_query_src_req req;
4856
4857 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0) {
4858 return EINVAL;
4859 }
4860
4861 lck_mtx_lock(&state->ncs_mtx);
4862
4863 state->ncs_flags |= NSTAT_FLAG_SUPPORTS_UPDATES;
4864
4865 errno_t result = ENOENT;
4866 nstat_src *src, *tmpsrc;
4867 tailq_head_nstat_src dead_list;
4868 u_int64_t src_count = 0;
4869 boolean_t partial = FALSE;
4870 TAILQ_INIT(&dead_list);
4871
4872 /*
4873 * Error handling policy and sequence number generation is folded into
4874 * nstat_control_begin_query.
4875 */
4876 partial = nstat_control_begin_query(state, &req.hdr);
4877
4878 TAILQ_FOREACH_SAFE(src, &state->ncs_src_queue, ns_control_link, tmpsrc)
4879 {
4880 int gone;
4881
4882 gone = 0;
4883 if (nstat_control_reporting_allowed(state, src)) {
4884 /* skip this source if it has the current state
4885 * sequence number as it's already been reported in
4886 * this query-all partial sequence. */
4887 if (req.srcref == NSTAT_SRC_REF_ALL
4888 && (FALSE == partial || src->seq != state->ncs_seq)) {
4889 result = nstat_control_append_update(state, src, &gone);
4890 if (ENOMEM == result || ENOBUFS == result) {
4891 /*
4892 * If the update message failed to
4893 * enqueue then give up.
4894 */
4895 break;
4896 }
4897 if (partial) {
4898 /*
4899 * We skip over hard errors and
4900 * filtered sources.
4901 */
4902 src->seq = state->ncs_seq;
4903 src_count++;
4904 }
4905 } else if (src->srcref == req.srcref) {
4906 result = nstat_control_send_update(state, src, req.hdr.context, 0, 0, &gone);
4907 }
4908 }
4909
4910 if (gone) {
4911 // pull src out of the list
4912 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
4913 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
4914 }
4915
4916 if (req.srcref != NSTAT_SRC_REF_ALL && req.srcref == src->srcref) {
4917 break;
4918 }
4919 if (src_count >= QUERY_CONTINUATION_SRC_COUNT) {
4920 break;
4921 }
4922 }
4923
4924 nstat_flush_accumulated_msgs(state);
4925
4926
4927 u_int16_t flags = 0;
4928 if (req.srcref == NSTAT_SRC_REF_ALL) {
4929 flags = nstat_control_end_query(state, src, partial);
4930 }
4931
4932 lck_mtx_unlock(&state->ncs_mtx);
4933 /*
4934 * If an error occurred enqueueing data, then allow the error to
4935 * propagate to nstat_control_send. This way, the error is sent to
4936 * user-level.
4937 */
4938 if (req.srcref == NSTAT_SRC_REF_ALL && ENOMEM != result && ENOBUFS != result) {
4939 nstat_enqueue_success(req.hdr.context, state, flags);
4940 result = 0;
4941 }
4942
4943 while ((src = TAILQ_FIRST(&dead_list))) {
4944 TAILQ_REMOVE(&dead_list, src, ns_control_link);
4945 // release src and send notification
4946 nstat_control_cleanup_source(state, src, FALSE);
4947 }
4948
4949 return result;
4950 }
4951
4952 static errno_t
4953 nstat_control_handle_subscribe_sysinfo(
4954 nstat_control_state *state)
4955 {
4956 errno_t result = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
4957
4958 if (result != 0) {
4959 return result;
4960 }
4961
4962 lck_mtx_lock(&state->ncs_mtx);
4963 state->ncs_flags |= NSTAT_FLAG_SYSINFO_SUBSCRIBED;
4964 lck_mtx_unlock(&state->ncs_mtx);
4965
4966 return 0;
4967 }
4968
4969 static errno_t
4970 nstat_control_send(
4971 kern_ctl_ref kctl,
4972 u_int32_t unit,
4973 void *uinfo,
4974 mbuf_t m,
4975 __unused int flags)
4976 {
4977 nstat_control_state *state = (nstat_control_state*)uinfo;
4978 struct nstat_msg_hdr *hdr;
4979 struct nstat_msg_hdr storage;
4980 errno_t result = 0;
4981
4982 if (mbuf_pkthdr_len(m) < sizeof(*hdr)) {
4983 // Is this the right thing to do?
4984 mbuf_freem(m);
4985 return EINVAL;
4986 }
4987
4988 if (mbuf_len(m) >= sizeof(*hdr)) {
4989 hdr = mbuf_data(m);
4990 } else {
4991 mbuf_copydata(m, 0, sizeof(storage), &storage);
4992 hdr = &storage;
4993 }
4994
4995 // Legacy clients may not set the length
4996 // Those clients are likely not setting the flags either
4997 // Fix everything up so old clients continue to work
4998 if (hdr->length != mbuf_pkthdr_len(m)) {
4999 hdr->flags = 0;
5000 hdr->length = mbuf_pkthdr_len(m);
5001 if (hdr == &storage) {
5002 mbuf_copyback(m, 0, sizeof(*hdr), hdr, MBUF_DONTWAIT);
5003 }
5004 }
5005
5006 switch (hdr->type) {
5007 case NSTAT_MSG_TYPE_ADD_SRC:
5008 result = nstat_control_handle_add_request(state, m);
5009 break;
5010
5011 case NSTAT_MSG_TYPE_ADD_ALL_SRCS:
5012 result = nstat_control_handle_add_all(state, m);
5013 break;
5014
5015 case NSTAT_MSG_TYPE_REM_SRC:
5016 result = nstat_control_handle_remove_request(state, m);
5017 break;
5018
5019 case NSTAT_MSG_TYPE_QUERY_SRC:
5020 result = nstat_control_handle_query_request(state, m);
5021 break;
5022
5023 case NSTAT_MSG_TYPE_GET_SRC_DESC:
5024 result = nstat_control_handle_get_src_description(state, m);
5025 break;
5026
5027 case NSTAT_MSG_TYPE_SET_FILTER:
5028 result = nstat_control_handle_set_filter(state, m);
5029 break;
5030
5031 case NSTAT_MSG_TYPE_GET_UPDATE:
5032 result = nstat_control_handle_get_update(state, m);
5033 break;
5034
5035 case NSTAT_MSG_TYPE_SUBSCRIBE_SYSINFO:
5036 result = nstat_control_handle_subscribe_sysinfo(state);
5037 break;
5038
5039 default:
5040 result = EINVAL;
5041 break;
5042 }
5043
5044 if (result != 0) {
5045 struct nstat_msg_error err;
5046
5047 bzero(&err, sizeof(err));
5048 err.hdr.type = NSTAT_MSG_TYPE_ERROR;
5049 err.hdr.length = sizeof(err) + mbuf_pkthdr_len(m);
5050 err.hdr.context = hdr->context;
5051 err.error = result;
5052
5053 if (mbuf_prepend(&m, sizeof(err), MBUF_DONTWAIT) == 0 &&
5054 mbuf_copyback(m, 0, sizeof(err), &err, MBUF_DONTWAIT) == 0) {
5055 result = ctl_enqueuembuf(kctl, unit, m, CTL_DATA_EOR | CTL_DATA_CRIT);
5056 if (result != 0) {
5057 mbuf_freem(m);
5058 }
5059 m = NULL;
5060 }
5061
5062 if (result != 0) {
5063 // Unable to prepend the error to the request - just send the error
5064 err.hdr.length = sizeof(err);
5065 result = ctl_enqueuedata(kctl, unit, &err, sizeof(err),
5066 CTL_DATA_EOR | CTL_DATA_CRIT);
5067 if (result != 0) {
5068 nstat_stats.nstat_msgerrorfailures += 1;
5069 }
5070 }
5071 nstat_stats.nstat_handle_msg_failures += 1;
5072 }
5073
5074 if (m) {
5075 mbuf_freem(m);
5076 }
5077
5078 return result;
5079 }
5080
5081
5082 static int
5083 tcp_progress_indicators_for_interface(unsigned int ifindex, uint64_t recentflow_maxduration, uint16_t filter_flags, struct xtcpprogress_indicators *indicators)
5084 {
5085 int error = 0;
5086 struct inpcb *inp;
5087 uint64_t min_recent_start_time;
5088
5089 min_recent_start_time = mach_continuous_time() - recentflow_maxduration;
5090 bzero(indicators, sizeof(*indicators));
5091
5092 lck_rw_lock_shared(tcbinfo.ipi_lock);
5093 /*
5094 * For progress indicators we don't need to special case TCP to collect time wait connections
5095 */
5096 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list)
5097 {
5098 struct tcpcb *tp = intotcpcb(inp);
5099 if (tp && inp->inp_last_outifp &&
5100 inp->inp_last_outifp->if_index == ifindex &&
5101 inp->inp_state != INPCB_STATE_DEAD &&
5102 ((filter_flags == 0) ||
5103 ((filter_flags & NSTAT_IFNET_IS_NON_LOCAL) && !(tp->t_flags & TF_LOCAL)) ||
5104 ((filter_flags & NSTAT_IFNET_IS_LOCAL) && (tp->t_flags & TF_LOCAL)))) {
5105 struct tcp_conn_status connstatus;
5106 indicators->xp_numflows++;
5107 tcp_get_connectivity_status(tp, &connstatus);
5108 if (connstatus.write_probe_failed) {
5109 indicators->xp_write_probe_fails++;
5110 }
5111 if (connstatus.read_probe_failed) {
5112 indicators->xp_read_probe_fails++;
5113 }
5114 if (connstatus.conn_probe_failed) {
5115 indicators->xp_conn_probe_fails++;
5116 }
5117 if (inp->inp_start_timestamp > min_recent_start_time) {
5118 uint64_t flow_count;
5119
5120 indicators->xp_recentflows++;
5121 atomic_get_64(flow_count, &inp->inp_stat->rxbytes);
5122 indicators->xp_recentflows_rxbytes += flow_count;
5123 atomic_get_64(flow_count, &inp->inp_stat->txbytes);
5124 indicators->xp_recentflows_txbytes += flow_count;
5125
5126 indicators->xp_recentflows_rxooo += tp->t_stat.rxoutoforderbytes;
5127 indicators->xp_recentflows_rxdup += tp->t_stat.rxduplicatebytes;
5128 indicators->xp_recentflows_retx += tp->t_stat.txretransmitbytes;
5129 if (tp->snd_max - tp->snd_una) {
5130 indicators->xp_recentflows_unacked++;
5131 }
5132 }
5133 }
5134 }
5135 lck_rw_done(tcbinfo.ipi_lock);
5136
5137 return error;
5138 }
5139
5140
5141 __private_extern__ int
5142 ntstat_tcp_progress_indicators(struct sysctl_req *req)
5143 {
5144 struct xtcpprogress_indicators indicators = {};
5145 int error = 0;
5146 struct tcpprogressreq requested;
5147
5148 if (priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0) != 0) {
5149 return EACCES;
5150 }
5151 if (req->newptr == USER_ADDR_NULL) {
5152 return EINVAL;
5153 }
5154 if (req->newlen < sizeof(req)) {
5155 return EINVAL;
5156 }
5157 error = SYSCTL_IN(req, &requested, sizeof(requested));
5158 if (error != 0) {
5159 return error;
5160 }
5161 error = tcp_progress_indicators_for_interface(requested.ifindex, requested.recentflow_maxduration, (uint16_t)requested.filter_flags, &indicators);
5162 if (error != 0) {
5163 return error;
5164 }
5165 error = SYSCTL_OUT(req, &indicators, sizeof(indicators));
5166
5167 return error;
5168 }
5169
5170
5171
5172