]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/ntstat.c
xnu-4570.41.2.tar.gz
[apple/xnu.git] / bsd / net / ntstat.c
1 /*
2 * Copyright (c) 2010-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/param.h>
30 #include <sys/types.h>
31 #include <sys/kpi_mbuf.h>
32 #include <sys/socket.h>
33 #include <sys/kern_control.h>
34 #include <sys/mcache.h>
35 #include <sys/socketvar.h>
36 #include <sys/sysctl.h>
37 #include <sys/queue.h>
38 #include <sys/priv.h>
39 #include <sys/protosw.h>
40
41 #include <kern/clock.h>
42 #include <kern/debug.h>
43
44 #include <libkern/libkern.h>
45 #include <libkern/OSMalloc.h>
46 #include <libkern/OSAtomic.h>
47 #include <libkern/locks.h>
48
49 #include <net/if.h>
50 #include <net/if_var.h>
51 #include <net/if_types.h>
52 #include <net/route.h>
53
54 // These includes appear in ntstat.h but we include them here first so they won't trigger
55 // any clang diagnostic errors.
56 #include <netinet/in.h>
57 #include <netinet/in_stat.h>
58 #include <netinet/tcp.h>
59
60 #pragma clang diagnostic push
61 #pragma clang diagnostic error "-Wpadded"
62 #pragma clang diagnostic error "-Wpacked"
63 // This header defines structures shared with user space, so we need to ensure there is
64 // no compiler inserted padding in case the user space process isn't using the same
65 // architecture as the kernel (example: i386 process with x86_64 kernel).
66 #include <net/ntstat.h>
67 #pragma clang diagnostic pop
68
69 #include <netinet/ip_var.h>
70 #include <netinet/in_pcb.h>
71 #include <netinet/in_var.h>
72 #include <netinet/tcp_var.h>
73 #include <netinet/tcp_fsm.h>
74 #include <netinet/tcp_cc.h>
75 #include <netinet/udp.h>
76 #include <netinet/udp_var.h>
77 #include <netinet6/in6_pcb.h>
78 #include <netinet6/in6_var.h>
79
80 extern unsigned int if_enable_netagent;
81
82 __private_extern__ int nstat_collect = 1;
83
84 #if (DEBUG || DEVELOPMENT)
85 SYSCTL_INT(_net, OID_AUTO, statistics, CTLFLAG_RW | CTLFLAG_LOCKED,
86 &nstat_collect, 0, "Collect detailed statistics");
87 #endif /* (DEBUG || DEVELOPMENT) */
88
89 #if CONFIG_EMBEDDED
90 static int nstat_privcheck = 1;
91 #else
92 static int nstat_privcheck = 0;
93 #endif
94 SYSCTL_INT(_net, OID_AUTO, statistics_privcheck, CTLFLAG_RW | CTLFLAG_LOCKED,
95 &nstat_privcheck, 0, "Entitlement check");
96
97 SYSCTL_NODE(_net, OID_AUTO, stats,
98 CTLFLAG_RW|CTLFLAG_LOCKED, 0, "network statistics");
99
100 static int nstat_debug = 0;
101 SYSCTL_INT(_net_stats, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED,
102 &nstat_debug, 0, "");
103
104 static int nstat_sendspace = 2048;
105 SYSCTL_INT(_net_stats, OID_AUTO, sendspace, CTLFLAG_RW | CTLFLAG_LOCKED,
106 &nstat_sendspace, 0, "");
107
108 static int nstat_recvspace = 8192;
109 SYSCTL_INT(_net_stats, OID_AUTO, recvspace, CTLFLAG_RW | CTLFLAG_LOCKED,
110 &nstat_recvspace, 0, "");
111
112 static struct nstat_stats nstat_stats;
113 SYSCTL_STRUCT(_net_stats, OID_AUTO, stats, CTLFLAG_RD | CTLFLAG_LOCKED,
114 &nstat_stats, nstat_stats, "");
115
116 static u_int32_t nstat_lim_interval = 30 * 60; /* Report interval, seconds */
117 static u_int32_t nstat_lim_min_tx_pkts = 100;
118 static u_int32_t nstat_lim_min_rx_pkts = 100;
119 #if (DEBUG || DEVELOPMENT)
120 SYSCTL_INT(_net_stats, OID_AUTO, lim_report_interval,
121 CTLFLAG_RW | CTLFLAG_LOCKED, &nstat_lim_interval, 0,
122 "Low internet stat report interval");
123
124 SYSCTL_INT(_net_stats, OID_AUTO, lim_min_tx_pkts,
125 CTLFLAG_RW | CTLFLAG_LOCKED, &nstat_lim_min_tx_pkts, 0,
126 "Low Internet, min transmit packets threshold");
127
128 SYSCTL_INT(_net_stats, OID_AUTO, lim_min_rx_pkts,
129 CTLFLAG_RW | CTLFLAG_LOCKED, &nstat_lim_min_rx_pkts, 0,
130 "Low Internet, min receive packets threshold");
131 #endif /* DEBUG || DEVELOPMENT */
132
133 static struct net_api_stats net_api_stats_before;
134 static u_int64_t net_api_stats_last_report_time;
135 #define NET_API_STATS_REPORT_INTERVAL (12 * 60 * 60) /* 12 hours, in seconds */
136 static u_int32_t net_api_stats_report_interval = NET_API_STATS_REPORT_INTERVAL;
137
138 #if (DEBUG || DEVELOPMENT)
139 SYSCTL_UINT(_net_stats, OID_AUTO, api_report_interval,
140 CTLFLAG_RW | CTLFLAG_LOCKED, &net_api_stats_report_interval, 0, "");
141 #endif /* DEBUG || DEVELOPMENT */
142
143 enum
144 {
145 NSTAT_FLAG_CLEANUP = (1 << 0),
146 NSTAT_FLAG_REQCOUNTS = (1 << 1),
147 NSTAT_FLAG_SUPPORTS_UPDATES = (1 << 2),
148 NSTAT_FLAG_SYSINFO_SUBSCRIBED = (1 << 3),
149 };
150
151 #if CONFIG_EMBEDDED
152 #define QUERY_CONTINUATION_SRC_COUNT 50
153 #else
154 #define QUERY_CONTINUATION_SRC_COUNT 100
155 #endif
156
157 typedef TAILQ_HEAD(, nstat_src) tailq_head_nstat_src;
158 typedef TAILQ_ENTRY(nstat_src) tailq_entry_nstat_src;
159
160 typedef struct nstat_provider_filter
161 {
162 u_int64_t npf_flags;
163 u_int64_t npf_events;
164 pid_t npf_pid;
165 uuid_t npf_uuid;
166 } nstat_provider_filter;
167
168
169 typedef struct nstat_control_state
170 {
171 struct nstat_control_state *ncs_next;
172 u_int32_t ncs_watching;
173 decl_lck_mtx_data(, ncs_mtx);
174 kern_ctl_ref ncs_kctl;
175 u_int32_t ncs_unit;
176 nstat_src_ref_t ncs_next_srcref;
177 tailq_head_nstat_src ncs_src_queue;
178 mbuf_t ncs_accumulated;
179 u_int32_t ncs_flags;
180 nstat_provider_filter ncs_provider_filters[NSTAT_PROVIDER_COUNT];
181 /* state maintained for partial query requests */
182 u_int64_t ncs_context;
183 u_int64_t ncs_seq;
184 } nstat_control_state;
185
186 typedef struct nstat_provider
187 {
188 struct nstat_provider *next;
189 nstat_provider_id_t nstat_provider_id;
190 size_t nstat_descriptor_length;
191 errno_t (*nstat_lookup)(const void *data, u_int32_t length, nstat_provider_cookie_t *out_cookie);
192 int (*nstat_gone)(nstat_provider_cookie_t cookie);
193 errno_t (*nstat_counts)(nstat_provider_cookie_t cookie, struct nstat_counts *out_counts, int *out_gone);
194 errno_t (*nstat_watcher_add)(nstat_control_state *state, nstat_msg_add_all_srcs *req);
195 void (*nstat_watcher_remove)(nstat_control_state *state);
196 errno_t (*nstat_copy_descriptor)(nstat_provider_cookie_t cookie, void *data, u_int32_t len);
197 void (*nstat_release)(nstat_provider_cookie_t cookie, boolean_t locked);
198 bool (*nstat_reporting_allowed)(nstat_provider_cookie_t cookie, nstat_provider_filter *filter);
199 } nstat_provider;
200
201 typedef STAILQ_HEAD(, nstat_src) stailq_head_nstat_src;
202 typedef STAILQ_ENTRY(nstat_src) stailq_entry_nstat_src;
203
204 typedef TAILQ_HEAD(, nstat_tu_shadow) tailq_head_tu_shadow;
205 typedef TAILQ_ENTRY(nstat_tu_shadow) tailq_entry_tu_shadow;
206
207 typedef TAILQ_HEAD(, nstat_procdetails) tailq_head_procdetails;
208 typedef TAILQ_ENTRY(nstat_procdetails) tailq_entry_procdetails;
209
210 typedef struct nstat_src
211 {
212 tailq_entry_nstat_src ns_control_link; // All sources for the nstat_control_state, for iterating over.
213 nstat_control_state *ns_control; // The nstat_control_state that this is a source for
214 nstat_src_ref_t srcref;
215 nstat_provider *provider;
216 nstat_provider_cookie_t cookie;
217 uint32_t filter;
218 uint64_t seq;
219 } nstat_src;
220
221 static errno_t nstat_control_send_counts(nstat_control_state *,
222 nstat_src *, unsigned long long, u_int16_t, int *);
223 static int nstat_control_send_description(nstat_control_state *state, nstat_src *src, u_int64_t context, u_int16_t hdr_flags);
224 static int nstat_control_send_update(nstat_control_state *state, nstat_src *src, u_int64_t context, u_int16_t hdr_flags, int *gone);
225 static errno_t nstat_control_send_removed(nstat_control_state *, nstat_src *);
226 static errno_t nstat_control_send_goodbye(nstat_control_state *state, nstat_src *src);
227 static void nstat_control_cleanup_source(nstat_control_state *state, nstat_src *src, boolean_t);
228 static bool nstat_control_reporting_allowed(nstat_control_state *state, nstat_src *src);
229 static boolean_t nstat_control_begin_query(nstat_control_state *state, const nstat_msg_hdr *hdrp);
230 static u_int16_t nstat_control_end_query(nstat_control_state *state, nstat_src *last_src, boolean_t partial);
231 static void nstat_ifnet_report_ecn_stats(void);
232 static void nstat_ifnet_report_lim_stats(void);
233 static void nstat_net_api_report_stats(void);
234 static errno_t nstat_set_provider_filter( nstat_control_state *state, nstat_msg_add_all_srcs *req);
235
236 static u_int32_t nstat_udp_watchers = 0;
237 static u_int32_t nstat_tcp_watchers = 0;
238
239 static void nstat_control_register(void);
240
241 /*
242 * The lock order is as follows:
243 *
244 * socket_lock (inpcb)
245 * nstat_mtx
246 * state->ncs_mtx
247 */
248 static volatile OSMallocTag nstat_malloc_tag = NULL;
249 static nstat_control_state *nstat_controls = NULL;
250 static uint64_t nstat_idle_time = 0;
251 static decl_lck_mtx_data(, nstat_mtx);
252
253 /* some extern definitions */
254 extern void mbuf_report_peak_usage(void);
255 extern void tcp_report_stats(void);
256
257 static void
258 nstat_copy_sa_out(
259 const struct sockaddr *src,
260 struct sockaddr *dst,
261 int maxlen)
262 {
263 if (src->sa_len > maxlen) return;
264
265 bcopy(src, dst, src->sa_len);
266 if (src->sa_family == AF_INET6 &&
267 src->sa_len >= sizeof(struct sockaddr_in6))
268 {
269 struct sockaddr_in6 *sin6 = (struct sockaddr_in6*)(void *)dst;
270 if (IN6_IS_SCOPE_EMBED(&sin6->sin6_addr))
271 {
272 if (sin6->sin6_scope_id == 0)
273 sin6->sin6_scope_id = ntohs(sin6->sin6_addr.s6_addr16[1]);
274 sin6->sin6_addr.s6_addr16[1] = 0;
275 }
276 }
277 }
278
279 static void
280 nstat_ip_to_sockaddr(
281 const struct in_addr *ip,
282 u_int16_t port,
283 struct sockaddr_in *sin,
284 u_int32_t maxlen)
285 {
286 if (maxlen < sizeof(struct sockaddr_in))
287 return;
288
289 sin->sin_family = AF_INET;
290 sin->sin_len = sizeof(*sin);
291 sin->sin_port = port;
292 sin->sin_addr = *ip;
293 }
294
295 u_int16_t
296 nstat_ifnet_to_flags(
297 struct ifnet *ifp)
298 {
299 u_int16_t flags = 0;
300 u_int32_t functional_type = if_functional_type(ifp, FALSE);
301
302 /* Panic if someone adds a functional type without updating ntstat. */
303 VERIFY(0 <= functional_type && functional_type <= IFRTYPE_FUNCTIONAL_LAST);
304
305 switch (functional_type)
306 {
307 case IFRTYPE_FUNCTIONAL_UNKNOWN:
308 flags |= NSTAT_IFNET_IS_UNKNOWN_TYPE;
309 break;
310 case IFRTYPE_FUNCTIONAL_LOOPBACK:
311 flags |= NSTAT_IFNET_IS_LOOPBACK;
312 break;
313 case IFRTYPE_FUNCTIONAL_WIRED:
314 case IFRTYPE_FUNCTIONAL_INTCOPROC:
315 flags |= NSTAT_IFNET_IS_WIRED;
316 break;
317 case IFRTYPE_FUNCTIONAL_WIFI_INFRA:
318 flags |= NSTAT_IFNET_IS_WIFI;
319 break;
320 case IFRTYPE_FUNCTIONAL_WIFI_AWDL:
321 flags |= NSTAT_IFNET_IS_WIFI;
322 flags |= NSTAT_IFNET_IS_AWDL;
323 break;
324 case IFRTYPE_FUNCTIONAL_CELLULAR:
325 flags |= NSTAT_IFNET_IS_CELLULAR;
326 break;
327 }
328
329 if (IFNET_IS_EXPENSIVE(ifp))
330 {
331 flags |= NSTAT_IFNET_IS_EXPENSIVE;
332 }
333
334 return flags;
335 }
336
337 static u_int16_t
338 nstat_inpcb_to_flags(
339 const struct inpcb *inp)
340 {
341 u_int16_t flags = 0;
342
343 if ((inp != NULL ) && (inp->inp_last_outifp != NULL))
344 {
345 struct ifnet *ifp = inp->inp_last_outifp;
346 flags = nstat_ifnet_to_flags(ifp);
347
348 if (flags & NSTAT_IFNET_IS_CELLULAR)
349 {
350 if (inp->inp_socket != NULL &&
351 (inp->inp_socket->so_flags1 & SOF1_CELLFALLBACK))
352 flags |= NSTAT_IFNET_VIA_CELLFALLBACK;
353 }
354 }
355 else
356 {
357 flags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
358 }
359
360 return flags;
361 }
362
363 #pragma mark -- Network Statistic Providers --
364
365 static errno_t nstat_control_source_add(u_int64_t context, nstat_control_state *state, nstat_provider *provider, nstat_provider_cookie_t cookie);
366 struct nstat_provider *nstat_providers = NULL;
367
368 static struct nstat_provider*
369 nstat_find_provider_by_id(
370 nstat_provider_id_t id)
371 {
372 struct nstat_provider *provider;
373
374 for (provider = nstat_providers; provider != NULL; provider = provider->next)
375 {
376 if (provider->nstat_provider_id == id)
377 break;
378 }
379
380 return provider;
381 }
382
383 static errno_t
384 nstat_lookup_entry(
385 nstat_provider_id_t id,
386 const void *data,
387 u_int32_t length,
388 nstat_provider **out_provider,
389 nstat_provider_cookie_t *out_cookie)
390 {
391 *out_provider = nstat_find_provider_by_id(id);
392 if (*out_provider == NULL)
393 {
394 return ENOENT;
395 }
396
397 return (*out_provider)->nstat_lookup(data, length, out_cookie);
398 }
399
400 static void nstat_init_route_provider(void);
401 static void nstat_init_tcp_provider(void);
402 static void nstat_init_udp_provider(void);
403 static void nstat_init_ifnet_provider(void);
404
405 __private_extern__ void
406 nstat_init(void)
407 {
408 if (nstat_malloc_tag != NULL) return;
409
410 OSMallocTag tag = OSMalloc_Tagalloc(NET_STAT_CONTROL_NAME, OSMT_DEFAULT);
411 if (!OSCompareAndSwapPtr(NULL, tag, &nstat_malloc_tag))
412 {
413 OSMalloc_Tagfree(tag);
414 tag = nstat_malloc_tag;
415 }
416 else
417 {
418 // we need to initialize other things, we do it here as this code path will only be hit once;
419 nstat_init_route_provider();
420 nstat_init_tcp_provider();
421 nstat_init_udp_provider();
422 nstat_init_ifnet_provider();
423 nstat_control_register();
424 }
425 }
426
427 #pragma mark -- Aligned Buffer Allocation --
428
429 struct align_header
430 {
431 u_int32_t offset;
432 u_int32_t length;
433 };
434
435 static void*
436 nstat_malloc_aligned(
437 u_int32_t length,
438 u_int8_t alignment,
439 OSMallocTag tag)
440 {
441 struct align_header *hdr = NULL;
442 u_int32_t size = length + sizeof(*hdr) + alignment - 1;
443
444 u_int8_t *buffer = OSMalloc(size, tag);
445 if (buffer == NULL) return NULL;
446
447 u_int8_t *aligned = buffer + sizeof(*hdr);
448 aligned = (u_int8_t*)P2ROUNDUP(aligned, alignment);
449
450 hdr = (struct align_header*)(void *)(aligned - sizeof(*hdr));
451 hdr->offset = aligned - buffer;
452 hdr->length = size;
453
454 return aligned;
455 }
456
457 static void
458 nstat_free_aligned(
459 void *buffer,
460 OSMallocTag tag)
461 {
462 struct align_header *hdr = (struct align_header*)(void *)((u_int8_t*)buffer - sizeof(*hdr));
463 OSFree(((char*)buffer) - hdr->offset, hdr->length, tag);
464 }
465
466 #pragma mark -- Route Provider --
467
468 static nstat_provider nstat_route_provider;
469
470 static errno_t
471 nstat_route_lookup(
472 const void *data,
473 u_int32_t length,
474 nstat_provider_cookie_t *out_cookie)
475 {
476 // rt_lookup doesn't take const params but it doesn't modify the parameters for
477 // the lookup. So...we use a union to eliminate the warning.
478 union
479 {
480 struct sockaddr *sa;
481 const struct sockaddr *const_sa;
482 } dst, mask;
483
484 const nstat_route_add_param *param = (const nstat_route_add_param*)data;
485 *out_cookie = NULL;
486
487 if (length < sizeof(*param))
488 {
489 return EINVAL;
490 }
491
492 if (param->dst.v4.sin_family == 0 ||
493 param->dst.v4.sin_family > AF_MAX ||
494 (param->mask.v4.sin_family != 0 && param->mask.v4.sin_family != param->dst.v4.sin_family))
495 {
496 return EINVAL;
497 }
498
499 if (param->dst.v4.sin_len > sizeof(param->dst) ||
500 (param->mask.v4.sin_family && param->mask.v4.sin_len > sizeof(param->mask.v4.sin_len)))
501 {
502 return EINVAL;
503 }
504 if ((param->dst.v4.sin_family == AF_INET &&
505 param->dst.v4.sin_len < sizeof(struct sockaddr_in)) ||
506 (param->dst.v6.sin6_family == AF_INET6 &&
507 param->dst.v6.sin6_len < sizeof(struct sockaddr_in6)))
508 {
509 return EINVAL;
510 }
511
512 dst.const_sa = (const struct sockaddr*)&param->dst;
513 mask.const_sa = param->mask.v4.sin_family ? (const struct sockaddr*)&param->mask : NULL;
514
515 struct radix_node_head *rnh = rt_tables[dst.sa->sa_family];
516 if (rnh == NULL) return EAFNOSUPPORT;
517
518 lck_mtx_lock(rnh_lock);
519 struct rtentry *rt = rt_lookup(TRUE, dst.sa, mask.sa, rnh, param->ifindex);
520 lck_mtx_unlock(rnh_lock);
521
522 if (rt) *out_cookie = (nstat_provider_cookie_t)rt;
523
524 return rt ? 0 : ENOENT;
525 }
526
527 static int
528 nstat_route_gone(
529 nstat_provider_cookie_t cookie)
530 {
531 struct rtentry *rt = (struct rtentry*)cookie;
532 return ((rt->rt_flags & RTF_UP) == 0) ? 1 : 0;
533 }
534
535 static errno_t
536 nstat_route_counts(
537 nstat_provider_cookie_t cookie,
538 struct nstat_counts *out_counts,
539 int *out_gone)
540 {
541 struct rtentry *rt = (struct rtentry*)cookie;
542 struct nstat_counts *rt_stats = rt->rt_stats;
543
544 if (out_gone) *out_gone = 0;
545
546 if (out_gone && (rt->rt_flags & RTF_UP) == 0) *out_gone = 1;
547
548 if (rt_stats)
549 {
550 atomic_get_64(out_counts->nstat_rxpackets, &rt_stats->nstat_rxpackets);
551 atomic_get_64(out_counts->nstat_rxbytes, &rt_stats->nstat_rxbytes);
552 atomic_get_64(out_counts->nstat_txpackets, &rt_stats->nstat_txpackets);
553 atomic_get_64(out_counts->nstat_txbytes, &rt_stats->nstat_txbytes);
554 out_counts->nstat_rxduplicatebytes = rt_stats->nstat_rxduplicatebytes;
555 out_counts->nstat_rxoutoforderbytes = rt_stats->nstat_rxoutoforderbytes;
556 out_counts->nstat_txretransmit = rt_stats->nstat_txretransmit;
557 out_counts->nstat_connectattempts = rt_stats->nstat_connectattempts;
558 out_counts->nstat_connectsuccesses = rt_stats->nstat_connectsuccesses;
559 out_counts->nstat_min_rtt = rt_stats->nstat_min_rtt;
560 out_counts->nstat_avg_rtt = rt_stats->nstat_avg_rtt;
561 out_counts->nstat_var_rtt = rt_stats->nstat_var_rtt;
562 out_counts->nstat_cell_rxbytes = out_counts->nstat_cell_txbytes = 0;
563 }
564 else
565 {
566 bzero(out_counts, sizeof(*out_counts));
567 }
568
569 return 0;
570 }
571
572 static void
573 nstat_route_release(
574 nstat_provider_cookie_t cookie,
575 __unused int locked)
576 {
577 rtfree((struct rtentry*)cookie);
578 }
579
580 static u_int32_t nstat_route_watchers = 0;
581
582 static int
583 nstat_route_walktree_add(
584 struct radix_node *rn,
585 void *context)
586 {
587 errno_t result = 0;
588 struct rtentry *rt = (struct rtentry *)rn;
589 nstat_control_state *state = (nstat_control_state*)context;
590
591 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
592
593 /* RTF_UP can't change while rnh_lock is held */
594 if ((rt->rt_flags & RTF_UP) != 0)
595 {
596 /* Clear RTPRF_OURS if the route is still usable */
597 RT_LOCK(rt);
598 if (rt_validate(rt)) {
599 RT_ADDREF_LOCKED(rt);
600 RT_UNLOCK(rt);
601 } else {
602 RT_UNLOCK(rt);
603 rt = NULL;
604 }
605
606 /* Otherwise if RTF_CONDEMNED, treat it as if it were down */
607 if (rt == NULL)
608 return (0);
609
610 result = nstat_control_source_add(0, state, &nstat_route_provider, rt);
611 if (result != 0)
612 rtfree_locked(rt);
613 }
614
615 return result;
616 }
617
618 static errno_t
619 nstat_route_add_watcher(
620 nstat_control_state *state,
621 nstat_msg_add_all_srcs *req)
622 {
623 int i;
624 errno_t result = 0;
625
626 lck_mtx_lock(rnh_lock);
627
628 result = nstat_set_provider_filter(state, req);
629 if (result == 0)
630 {
631 OSIncrementAtomic(&nstat_route_watchers);
632
633 for (i = 1; i < AF_MAX; i++)
634 {
635 struct radix_node_head *rnh;
636 rnh = rt_tables[i];
637 if (!rnh) continue;
638
639 result = rnh->rnh_walktree(rnh, nstat_route_walktree_add, state);
640 if (result != 0)
641 {
642 // This is probably resource exhaustion.
643 // There currently isn't a good way to recover from this.
644 // Least bad seems to be to give up on the add-all but leave
645 // the watcher in place.
646 break;
647 }
648 }
649 }
650 lck_mtx_unlock(rnh_lock);
651
652 return result;
653 }
654
655 __private_extern__ void
656 nstat_route_new_entry(
657 struct rtentry *rt)
658 {
659 if (nstat_route_watchers == 0)
660 return;
661
662 lck_mtx_lock(&nstat_mtx);
663 if ((rt->rt_flags & RTF_UP) != 0)
664 {
665 nstat_control_state *state;
666 for (state = nstat_controls; state; state = state->ncs_next)
667 {
668 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_ROUTE)) != 0)
669 {
670 // this client is watching routes
671 // acquire a reference for the route
672 RT_ADDREF(rt);
673
674 // add the source, if that fails, release the reference
675 if (nstat_control_source_add(0, state, &nstat_route_provider, rt) != 0)
676 RT_REMREF(rt);
677 }
678 }
679 }
680 lck_mtx_unlock(&nstat_mtx);
681 }
682
683 static void
684 nstat_route_remove_watcher(
685 __unused nstat_control_state *state)
686 {
687 OSDecrementAtomic(&nstat_route_watchers);
688 }
689
690 static errno_t
691 nstat_route_copy_descriptor(
692 nstat_provider_cookie_t cookie,
693 void *data,
694 u_int32_t len)
695 {
696 nstat_route_descriptor *desc = (nstat_route_descriptor*)data;
697 if (len < sizeof(*desc))
698 {
699 return EINVAL;
700 }
701 bzero(desc, sizeof(*desc));
702
703 struct rtentry *rt = (struct rtentry*)cookie;
704 desc->id = (uint64_t)VM_KERNEL_ADDRPERM(rt);
705 desc->parent_id = (uint64_t)VM_KERNEL_ADDRPERM(rt->rt_parent);
706 desc->gateway_id = (uint64_t)VM_KERNEL_ADDRPERM(rt->rt_gwroute);
707
708
709 // key/dest
710 struct sockaddr *sa;
711 if ((sa = rt_key(rt)))
712 nstat_copy_sa_out(sa, &desc->dst.sa, sizeof(desc->dst));
713
714 // mask
715 if ((sa = rt_mask(rt)) && sa->sa_len <= sizeof(desc->mask))
716 memcpy(&desc->mask, sa, sa->sa_len);
717
718 // gateway
719 if ((sa = rt->rt_gateway))
720 nstat_copy_sa_out(sa, &desc->gateway.sa, sizeof(desc->gateway));
721
722 if (rt->rt_ifp)
723 desc->ifindex = rt->rt_ifp->if_index;
724
725 desc->flags = rt->rt_flags;
726
727 return 0;
728 }
729
730 static bool
731 nstat_route_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter)
732 {
733 bool retval = true;
734
735 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0)
736 {
737 struct rtentry *rt = (struct rtentry*)cookie;
738 struct ifnet *ifp = rt->rt_ifp;
739
740 if (ifp)
741 {
742 uint16_t interface_properties = nstat_ifnet_to_flags(ifp);
743
744 if ((filter->npf_flags & interface_properties) == 0)
745 {
746 retval = false;
747 }
748 }
749 }
750 return retval;
751 }
752
753 static void
754 nstat_init_route_provider(void)
755 {
756 bzero(&nstat_route_provider, sizeof(nstat_route_provider));
757 nstat_route_provider.nstat_descriptor_length = sizeof(nstat_route_descriptor);
758 nstat_route_provider.nstat_provider_id = NSTAT_PROVIDER_ROUTE;
759 nstat_route_provider.nstat_lookup = nstat_route_lookup;
760 nstat_route_provider.nstat_gone = nstat_route_gone;
761 nstat_route_provider.nstat_counts = nstat_route_counts;
762 nstat_route_provider.nstat_release = nstat_route_release;
763 nstat_route_provider.nstat_watcher_add = nstat_route_add_watcher;
764 nstat_route_provider.nstat_watcher_remove = nstat_route_remove_watcher;
765 nstat_route_provider.nstat_copy_descriptor = nstat_route_copy_descriptor;
766 nstat_route_provider.nstat_reporting_allowed = nstat_route_reporting_allowed;
767 nstat_route_provider.next = nstat_providers;
768 nstat_providers = &nstat_route_provider;
769 }
770
771 #pragma mark -- Route Collection --
772
773 __private_extern__ struct nstat_counts*
774 nstat_route_attach(
775 struct rtentry *rte)
776 {
777 struct nstat_counts *result = rte->rt_stats;
778 if (result) return result;
779
780 if (nstat_malloc_tag == NULL) nstat_init();
781
782 result = nstat_malloc_aligned(sizeof(*result), sizeof(u_int64_t), nstat_malloc_tag);
783 if (!result) return result;
784
785 bzero(result, sizeof(*result));
786
787 if (!OSCompareAndSwapPtr(NULL, result, &rte->rt_stats))
788 {
789 nstat_free_aligned(result, nstat_malloc_tag);
790 result = rte->rt_stats;
791 }
792
793 return result;
794 }
795
796 __private_extern__ void
797 nstat_route_detach(
798 struct rtentry *rte)
799 {
800 if (rte->rt_stats)
801 {
802 nstat_free_aligned(rte->rt_stats, nstat_malloc_tag);
803 rte->rt_stats = NULL;
804 }
805 }
806
807 __private_extern__ void
808 nstat_route_connect_attempt(
809 struct rtentry *rte)
810 {
811 while (rte)
812 {
813 struct nstat_counts* stats = nstat_route_attach(rte);
814 if (stats)
815 {
816 OSIncrementAtomic(&stats->nstat_connectattempts);
817 }
818
819 rte = rte->rt_parent;
820 }
821 }
822
823 __private_extern__ void
824 nstat_route_connect_success(
825 struct rtentry *rte)
826 {
827 // This route
828 while (rte)
829 {
830 struct nstat_counts* stats = nstat_route_attach(rte);
831 if (stats)
832 {
833 OSIncrementAtomic(&stats->nstat_connectsuccesses);
834 }
835
836 rte = rte->rt_parent;
837 }
838 }
839
840 __private_extern__ void
841 nstat_route_tx(
842 struct rtentry *rte,
843 u_int32_t packets,
844 u_int32_t bytes,
845 u_int32_t flags)
846 {
847 while (rte)
848 {
849 struct nstat_counts* stats = nstat_route_attach(rte);
850 if (stats)
851 {
852 if ((flags & NSTAT_TX_FLAG_RETRANSMIT) != 0)
853 {
854 OSAddAtomic(bytes, &stats->nstat_txretransmit);
855 }
856 else
857 {
858 OSAddAtomic64((SInt64)packets, (SInt64*)&stats->nstat_txpackets);
859 OSAddAtomic64((SInt64)bytes, (SInt64*)&stats->nstat_txbytes);
860 }
861 }
862
863 rte = rte->rt_parent;
864 }
865 }
866
867 __private_extern__ void
868 nstat_route_rx(
869 struct rtentry *rte,
870 u_int32_t packets,
871 u_int32_t bytes,
872 u_int32_t flags)
873 {
874 while (rte)
875 {
876 struct nstat_counts* stats = nstat_route_attach(rte);
877 if (stats)
878 {
879 if (flags == 0)
880 {
881 OSAddAtomic64((SInt64)packets, (SInt64*)&stats->nstat_rxpackets);
882 OSAddAtomic64((SInt64)bytes, (SInt64*)&stats->nstat_rxbytes);
883 }
884 else
885 {
886 if (flags & NSTAT_RX_FLAG_OUT_OF_ORDER)
887 OSAddAtomic(bytes, &stats->nstat_rxoutoforderbytes);
888 if (flags & NSTAT_RX_FLAG_DUPLICATE)
889 OSAddAtomic(bytes, &stats->nstat_rxduplicatebytes);
890 }
891 }
892
893 rte = rte->rt_parent;
894 }
895 }
896
897 /* atomically average current value at _val_addr with _new_val and store */
898 #define NSTAT_EWMA_ATOMIC(_val_addr, _new_val, _decay) do { \
899 volatile uint32_t _old_val; \
900 volatile uint32_t _avg; \
901 do { \
902 _old_val = *_val_addr; \
903 if (_old_val == 0) \
904 { \
905 _avg = _new_val; \
906 } \
907 else \
908 { \
909 _avg = _old_val - (_old_val >> _decay) + (_new_val >> _decay); \
910 } \
911 if (_old_val == _avg) break; \
912 } while (!OSCompareAndSwap(_old_val, _avg, _val_addr)); \
913 } while (0);
914
915 /* atomically compute minimum of current value at _val_addr with _new_val and store */
916 #define NSTAT_MIN_ATOMIC(_val_addr, _new_val) do { \
917 volatile uint32_t _old_val; \
918 do { \
919 _old_val = *_val_addr; \
920 if (_old_val != 0 && _old_val < _new_val) \
921 { \
922 break; \
923 } \
924 } while (!OSCompareAndSwap(_old_val, _new_val, _val_addr)); \
925 } while (0);
926
927 __private_extern__ void
928 nstat_route_rtt(
929 struct rtentry *rte,
930 u_int32_t rtt,
931 u_int32_t rtt_var)
932 {
933 const uint32_t decay = 3;
934
935 while (rte)
936 {
937 struct nstat_counts* stats = nstat_route_attach(rte);
938 if (stats)
939 {
940 NSTAT_EWMA_ATOMIC(&stats->nstat_avg_rtt, rtt, decay);
941 NSTAT_MIN_ATOMIC(&stats->nstat_min_rtt, rtt);
942 NSTAT_EWMA_ATOMIC(&stats->nstat_var_rtt, rtt_var, decay);
943 }
944 rte = rte->rt_parent;
945 }
946 }
947
948 __private_extern__ void
949 nstat_route_update(
950 struct rtentry *rte,
951 uint32_t connect_attempts,
952 uint32_t connect_successes,
953 uint32_t rx_packets,
954 uint32_t rx_bytes,
955 uint32_t rx_duplicatebytes,
956 uint32_t rx_outoforderbytes,
957 uint32_t tx_packets,
958 uint32_t tx_bytes,
959 uint32_t tx_retransmit,
960 uint32_t rtt,
961 uint32_t rtt_var)
962 {
963 const uint32_t decay = 3;
964
965 while (rte)
966 {
967 struct nstat_counts* stats = nstat_route_attach(rte);
968 if (stats)
969 {
970 OSAddAtomic(connect_attempts, &stats->nstat_connectattempts);
971 OSAddAtomic(connect_successes, &stats->nstat_connectsuccesses);
972 OSAddAtomic64((SInt64)tx_packets, (SInt64*)&stats->nstat_txpackets);
973 OSAddAtomic64((SInt64)tx_bytes, (SInt64*)&stats->nstat_txbytes);
974 OSAddAtomic(tx_retransmit, &stats->nstat_txretransmit);
975 OSAddAtomic64((SInt64)rx_packets, (SInt64*)&stats->nstat_rxpackets);
976 OSAddAtomic64((SInt64)rx_bytes, (SInt64*)&stats->nstat_rxbytes);
977 OSAddAtomic(rx_outoforderbytes, &stats->nstat_rxoutoforderbytes);
978 OSAddAtomic(rx_duplicatebytes, &stats->nstat_rxduplicatebytes);
979
980 if (rtt != 0) {
981 NSTAT_EWMA_ATOMIC(&stats->nstat_avg_rtt, rtt, decay);
982 NSTAT_MIN_ATOMIC(&stats->nstat_min_rtt, rtt);
983 NSTAT_EWMA_ATOMIC(&stats->nstat_var_rtt, rtt_var, decay);
984 }
985 }
986 rte = rte->rt_parent;
987 }
988 }
989
990 #pragma mark -- TCP Kernel Provider --
991
992 /*
993 * Due to the way the kernel deallocates a process (the process structure
994 * might be gone by the time we get the PCB detach notification),
995 * we need to cache the process name. Without this, proc_name() would
996 * return null and the process name would never be sent to userland.
997 *
998 * For UDP sockets, we also store the cached the connection tuples along with
999 * the interface index. This is necessary because when UDP sockets are
1000 * disconnected, the connection tuples are forever lost from the inpcb, thus
1001 * we need to keep track of the last call to connect() in ntstat.
1002 */
1003 struct nstat_tucookie {
1004 struct inpcb *inp;
1005 char pname[MAXCOMLEN+1];
1006 bool cached;
1007 union
1008 {
1009 struct sockaddr_in v4;
1010 struct sockaddr_in6 v6;
1011 } local;
1012 union
1013 {
1014 struct sockaddr_in v4;
1015 struct sockaddr_in6 v6;
1016 } remote;
1017 unsigned int if_index;
1018 uint16_t ifnet_properties;
1019 };
1020
1021 static struct nstat_tucookie *
1022 nstat_tucookie_alloc_internal(
1023 struct inpcb *inp,
1024 bool ref,
1025 bool locked)
1026 {
1027 struct nstat_tucookie *cookie;
1028
1029 cookie = OSMalloc(sizeof(*cookie), nstat_malloc_tag);
1030 if (cookie == NULL)
1031 return NULL;
1032 if (!locked)
1033 LCK_MTX_ASSERT(&nstat_mtx, LCK_MTX_ASSERT_NOTOWNED);
1034 if (ref && in_pcb_checkstate(inp, WNT_ACQUIRE, locked) == WNT_STOPUSING)
1035 {
1036 OSFree(cookie, sizeof(*cookie), nstat_malloc_tag);
1037 return NULL;
1038 }
1039 bzero(cookie, sizeof(*cookie));
1040 cookie->inp = inp;
1041 proc_name(inp->inp_socket->last_pid, cookie->pname,
1042 sizeof(cookie->pname));
1043 /*
1044 * We only increment the reference count for UDP sockets because we
1045 * only cache UDP socket tuples.
1046 */
1047 if (SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP)
1048 OSIncrementAtomic(&inp->inp_nstat_refcnt);
1049
1050 return cookie;
1051 }
1052
1053 static struct nstat_tucookie *
1054 nstat_tucookie_alloc(
1055 struct inpcb *inp)
1056 {
1057 return nstat_tucookie_alloc_internal(inp, false, false);
1058 }
1059
1060 static struct nstat_tucookie *
1061 nstat_tucookie_alloc_ref(
1062 struct inpcb *inp)
1063 {
1064 return nstat_tucookie_alloc_internal(inp, true, false);
1065 }
1066
1067 static struct nstat_tucookie *
1068 nstat_tucookie_alloc_ref_locked(
1069 struct inpcb *inp)
1070 {
1071 return nstat_tucookie_alloc_internal(inp, true, true);
1072 }
1073
1074 static void
1075 nstat_tucookie_release_internal(
1076 struct nstat_tucookie *cookie,
1077 int inplock)
1078 {
1079 if (SOCK_PROTO(cookie->inp->inp_socket) == IPPROTO_UDP)
1080 OSDecrementAtomic(&cookie->inp->inp_nstat_refcnt);
1081 in_pcb_checkstate(cookie->inp, WNT_RELEASE, inplock);
1082 OSFree(cookie, sizeof(*cookie), nstat_malloc_tag);
1083 }
1084
1085 static void
1086 nstat_tucookie_release(
1087 struct nstat_tucookie *cookie)
1088 {
1089 nstat_tucookie_release_internal(cookie, false);
1090 }
1091
1092 static void
1093 nstat_tucookie_release_locked(
1094 struct nstat_tucookie *cookie)
1095 {
1096 nstat_tucookie_release_internal(cookie, true);
1097 }
1098
1099
1100 static nstat_provider nstat_tcp_provider;
1101
1102 static errno_t
1103 nstat_tcpudp_lookup(
1104 struct inpcbinfo *inpinfo,
1105 const void *data,
1106 u_int32_t length,
1107 nstat_provider_cookie_t *out_cookie)
1108 {
1109 struct inpcb *inp = NULL;
1110
1111 // parameter validation
1112 const nstat_tcp_add_param *param = (const nstat_tcp_add_param*)data;
1113 if (length < sizeof(*param))
1114 {
1115 return EINVAL;
1116 }
1117
1118 // src and dst must match
1119 if (param->remote.v4.sin_family != 0 &&
1120 param->remote.v4.sin_family != param->local.v4.sin_family)
1121 {
1122 return EINVAL;
1123 }
1124
1125
1126 switch (param->local.v4.sin_family)
1127 {
1128 case AF_INET:
1129 {
1130 if (param->local.v4.sin_len != sizeof(param->local.v4) ||
1131 (param->remote.v4.sin_family != 0 &&
1132 param->remote.v4.sin_len != sizeof(param->remote.v4)))
1133 {
1134 return EINVAL;
1135 }
1136
1137 inp = in_pcblookup_hash(inpinfo, param->remote.v4.sin_addr, param->remote.v4.sin_port,
1138 param->local.v4.sin_addr, param->local.v4.sin_port, 1, NULL);
1139 }
1140 break;
1141
1142 #if INET6
1143 case AF_INET6:
1144 {
1145 union
1146 {
1147 const struct in6_addr *in6c;
1148 struct in6_addr *in6;
1149 } local, remote;
1150
1151 if (param->local.v6.sin6_len != sizeof(param->local.v6) ||
1152 (param->remote.v6.sin6_family != 0 &&
1153 param->remote.v6.sin6_len != sizeof(param->remote.v6)))
1154 {
1155 return EINVAL;
1156 }
1157
1158 local.in6c = &param->local.v6.sin6_addr;
1159 remote.in6c = &param->remote.v6.sin6_addr;
1160
1161 inp = in6_pcblookup_hash(inpinfo, remote.in6, param->remote.v6.sin6_port,
1162 local.in6, param->local.v6.sin6_port, 1, NULL);
1163 }
1164 break;
1165 #endif
1166
1167 default:
1168 return EINVAL;
1169 }
1170
1171 if (inp == NULL)
1172 return ENOENT;
1173
1174 // At this point we have a ref to the inpcb
1175 *out_cookie = nstat_tucookie_alloc(inp);
1176 if (*out_cookie == NULL)
1177 in_pcb_checkstate(inp, WNT_RELEASE, 0);
1178
1179 return 0;
1180 }
1181
1182 static errno_t
1183 nstat_tcp_lookup(
1184 const void *data,
1185 u_int32_t length,
1186 nstat_provider_cookie_t *out_cookie)
1187 {
1188 return nstat_tcpudp_lookup(&tcbinfo, data, length, out_cookie);
1189 }
1190
1191 static int
1192 nstat_tcp_gone(
1193 nstat_provider_cookie_t cookie)
1194 {
1195 struct nstat_tucookie *tucookie =
1196 (struct nstat_tucookie *)cookie;
1197 struct inpcb *inp;
1198 struct tcpcb *tp;
1199
1200 return (!(inp = tucookie->inp) ||
1201 !(tp = intotcpcb(inp)) ||
1202 inp->inp_state == INPCB_STATE_DEAD) ? 1 : 0;
1203 }
1204
1205 static errno_t
1206 nstat_tcp_counts(
1207 nstat_provider_cookie_t cookie,
1208 struct nstat_counts *out_counts,
1209 int *out_gone)
1210 {
1211 struct nstat_tucookie *tucookie =
1212 (struct nstat_tucookie *)cookie;
1213 struct inpcb *inp;
1214
1215 bzero(out_counts, sizeof(*out_counts));
1216
1217 if (out_gone) *out_gone = 0;
1218
1219 // if the pcb is in the dead state, we should stop using it
1220 if (nstat_tcp_gone(cookie))
1221 {
1222 if (out_gone) *out_gone = 1;
1223 if (!(inp = tucookie->inp) || !intotcpcb(inp))
1224 return EINVAL;
1225 }
1226 inp = tucookie->inp;
1227 struct tcpcb *tp = intotcpcb(inp);
1228
1229 atomic_get_64(out_counts->nstat_rxpackets, &inp->inp_stat->rxpackets);
1230 atomic_get_64(out_counts->nstat_rxbytes, &inp->inp_stat->rxbytes);
1231 atomic_get_64(out_counts->nstat_txpackets, &inp->inp_stat->txpackets);
1232 atomic_get_64(out_counts->nstat_txbytes, &inp->inp_stat->txbytes);
1233 out_counts->nstat_rxduplicatebytes = tp->t_stat.rxduplicatebytes;
1234 out_counts->nstat_rxoutoforderbytes = tp->t_stat.rxoutoforderbytes;
1235 out_counts->nstat_txretransmit = tp->t_stat.txretransmitbytes;
1236 out_counts->nstat_connectattempts = tp->t_state >= TCPS_SYN_SENT ? 1 : 0;
1237 out_counts->nstat_connectsuccesses = tp->t_state >= TCPS_ESTABLISHED ? 1 : 0;
1238 out_counts->nstat_avg_rtt = tp->t_srtt;
1239 out_counts->nstat_min_rtt = tp->t_rttbest;
1240 out_counts->nstat_var_rtt = tp->t_rttvar;
1241 if (out_counts->nstat_avg_rtt < out_counts->nstat_min_rtt)
1242 out_counts->nstat_min_rtt = out_counts->nstat_avg_rtt;
1243 atomic_get_64(out_counts->nstat_cell_rxbytes, &inp->inp_cstat->rxbytes);
1244 atomic_get_64(out_counts->nstat_cell_txbytes, &inp->inp_cstat->txbytes);
1245 atomic_get_64(out_counts->nstat_wifi_rxbytes, &inp->inp_wstat->rxbytes);
1246 atomic_get_64(out_counts->nstat_wifi_txbytes, &inp->inp_wstat->txbytes);
1247 atomic_get_64(out_counts->nstat_wired_rxbytes, &inp->inp_Wstat->rxbytes);
1248 atomic_get_64(out_counts->nstat_wired_txbytes, &inp->inp_Wstat->txbytes);
1249
1250 return 0;
1251 }
1252
1253 static void
1254 nstat_tcp_release(
1255 nstat_provider_cookie_t cookie,
1256 int locked)
1257 {
1258 struct nstat_tucookie *tucookie =
1259 (struct nstat_tucookie *)cookie;
1260
1261 nstat_tucookie_release_internal(tucookie, locked);
1262 }
1263
1264 static errno_t
1265 nstat_tcp_add_watcher(
1266 nstat_control_state *state,
1267 nstat_msg_add_all_srcs *req)
1268 {
1269 // There is a tricky issue around getting all TCP sockets added once
1270 // and only once. nstat_tcp_new_pcb() is called prior to the new item
1271 // being placed on any lists where it might be found.
1272 // By locking the tcbinfo.ipi_lock prior to marking the state as a watcher,
1273 // it should be impossible for a new socket to be added twice.
1274 // On the other hand, there is still a timing issue where a new socket
1275 // results in a call to nstat_tcp_new_pcb() before this watcher
1276 // is instantiated and yet the socket doesn't make it into ipi_listhead
1277 // prior to the scan. <rdar://problem/30361716>
1278
1279 errno_t result;
1280
1281 lck_rw_lock_shared(tcbinfo.ipi_lock);
1282 result = nstat_set_provider_filter(state, req);
1283 if (result == 0) {
1284 OSIncrementAtomic(&nstat_tcp_watchers);
1285
1286 // Add all current tcp inpcbs. Ignore those in timewait
1287 struct inpcb *inp;
1288 struct nstat_tucookie *cookie;
1289 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list)
1290 {
1291 cookie = nstat_tucookie_alloc_ref(inp);
1292 if (cookie == NULL)
1293 continue;
1294 if (nstat_control_source_add(0, state, &nstat_tcp_provider,
1295 cookie) != 0)
1296 {
1297 nstat_tucookie_release(cookie);
1298 break;
1299 }
1300 }
1301 }
1302
1303 lck_rw_done(tcbinfo.ipi_lock);
1304
1305 return result;
1306 }
1307
1308 static void
1309 nstat_tcp_remove_watcher(
1310 __unused nstat_control_state *state)
1311 {
1312 OSDecrementAtomic(&nstat_tcp_watchers);
1313 }
1314
1315 __private_extern__ void
1316 nstat_tcp_new_pcb(
1317 struct inpcb *inp)
1318 {
1319 struct nstat_tucookie *cookie;
1320
1321 inp->inp_start_timestamp = mach_continuous_time();
1322
1323 if (nstat_tcp_watchers == 0)
1324 return;
1325
1326 socket_lock(inp->inp_socket, 0);
1327 lck_mtx_lock(&nstat_mtx);
1328 nstat_control_state *state;
1329 for (state = nstat_controls; state; state = state->ncs_next)
1330 {
1331 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_TCP_KERNEL)) != 0)
1332 {
1333 // this client is watching tcp
1334 // acquire a reference for it
1335 cookie = nstat_tucookie_alloc_ref_locked(inp);
1336 if (cookie == NULL)
1337 continue;
1338 // add the source, if that fails, release the reference
1339 if (nstat_control_source_add(0, state,
1340 &nstat_tcp_provider, cookie) != 0)
1341 {
1342 nstat_tucookie_release_locked(cookie);
1343 break;
1344 }
1345 }
1346 }
1347 lck_mtx_unlock(&nstat_mtx);
1348 socket_unlock(inp->inp_socket, 0);
1349 }
1350
1351 __private_extern__ void
1352 nstat_pcb_detach(struct inpcb *inp)
1353 {
1354 nstat_control_state *state;
1355 nstat_src *src;
1356 tailq_head_nstat_src dead_list;
1357 struct nstat_tucookie *tucookie;
1358 errno_t result;
1359
1360 if (inp == NULL || (nstat_tcp_watchers == 0 && nstat_udp_watchers == 0))
1361 return;
1362
1363 TAILQ_INIT(&dead_list);
1364 lck_mtx_lock(&nstat_mtx);
1365 for (state = nstat_controls; state; state = state->ncs_next)
1366 {
1367 lck_mtx_lock(&state->ncs_mtx);
1368 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
1369 {
1370 nstat_provider_id_t provider_id = src->provider->nstat_provider_id;
1371 if (provider_id == NSTAT_PROVIDER_TCP_KERNEL || provider_id == NSTAT_PROVIDER_UDP_KERNEL)
1372 {
1373 tucookie = (struct nstat_tucookie *)src->cookie;
1374 if (tucookie->inp == inp)
1375 break;
1376 }
1377 }
1378
1379 if (src)
1380 {
1381 result = nstat_control_send_goodbye(state, src);
1382
1383 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
1384 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
1385 }
1386 lck_mtx_unlock(&state->ncs_mtx);
1387 }
1388 lck_mtx_unlock(&nstat_mtx);
1389
1390 while ((src = TAILQ_FIRST(&dead_list)))
1391 {
1392 TAILQ_REMOVE(&dead_list, src, ns_control_link);
1393 nstat_control_cleanup_source(NULL, src, TRUE);
1394 }
1395 }
1396
1397 __private_extern__ void
1398 nstat_pcb_cache(struct inpcb *inp)
1399 {
1400 nstat_control_state *state;
1401 nstat_src *src;
1402 struct nstat_tucookie *tucookie;
1403
1404 if (inp == NULL || nstat_udp_watchers == 0 ||
1405 inp->inp_nstat_refcnt == 0)
1406 return;
1407 VERIFY(SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP);
1408 lck_mtx_lock(&nstat_mtx);
1409 for (state = nstat_controls; state; state = state->ncs_next) {
1410 lck_mtx_lock(&state->ncs_mtx);
1411 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
1412 {
1413 tucookie = (struct nstat_tucookie *)src->cookie;
1414 if (tucookie->inp == inp)
1415 {
1416 if (inp->inp_vflag & INP_IPV6)
1417 {
1418 in6_ip6_to_sockaddr(&inp->in6p_laddr,
1419 inp->inp_lport,
1420 &tucookie->local.v6,
1421 sizeof(tucookie->local));
1422 in6_ip6_to_sockaddr(&inp->in6p_faddr,
1423 inp->inp_fport,
1424 &tucookie->remote.v6,
1425 sizeof(tucookie->remote));
1426 }
1427 else if (inp->inp_vflag & INP_IPV4)
1428 {
1429 nstat_ip_to_sockaddr(&inp->inp_laddr,
1430 inp->inp_lport,
1431 &tucookie->local.v4,
1432 sizeof(tucookie->local));
1433 nstat_ip_to_sockaddr(&inp->inp_faddr,
1434 inp->inp_fport,
1435 &tucookie->remote.v4,
1436 sizeof(tucookie->remote));
1437 }
1438 if (inp->inp_last_outifp)
1439 tucookie->if_index =
1440 inp->inp_last_outifp->if_index;
1441
1442 tucookie->ifnet_properties = nstat_inpcb_to_flags(inp);
1443 tucookie->cached = true;
1444 break;
1445 }
1446 }
1447 lck_mtx_unlock(&state->ncs_mtx);
1448 }
1449 lck_mtx_unlock(&nstat_mtx);
1450 }
1451
1452 __private_extern__ void
1453 nstat_pcb_invalidate_cache(struct inpcb *inp)
1454 {
1455 nstat_control_state *state;
1456 nstat_src *src;
1457 struct nstat_tucookie *tucookie;
1458
1459 if (inp == NULL || nstat_udp_watchers == 0 ||
1460 inp->inp_nstat_refcnt == 0)
1461 return;
1462 VERIFY(SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP);
1463 lck_mtx_lock(&nstat_mtx);
1464 for (state = nstat_controls; state; state = state->ncs_next) {
1465 lck_mtx_lock(&state->ncs_mtx);
1466 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
1467 {
1468 tucookie = (struct nstat_tucookie *)src->cookie;
1469 if (tucookie->inp == inp)
1470 {
1471 tucookie->cached = false;
1472 break;
1473 }
1474 }
1475 lck_mtx_unlock(&state->ncs_mtx);
1476 }
1477 lck_mtx_unlock(&nstat_mtx);
1478 }
1479
1480 static errno_t
1481 nstat_tcp_copy_descriptor(
1482 nstat_provider_cookie_t cookie,
1483 void *data,
1484 u_int32_t len)
1485 {
1486 if (len < sizeof(nstat_tcp_descriptor))
1487 {
1488 return EINVAL;
1489 }
1490
1491 if (nstat_tcp_gone(cookie))
1492 return EINVAL;
1493
1494 nstat_tcp_descriptor *desc = (nstat_tcp_descriptor*)data;
1495 struct nstat_tucookie *tucookie =
1496 (struct nstat_tucookie *)cookie;
1497 struct inpcb *inp = tucookie->inp;
1498 struct tcpcb *tp = intotcpcb(inp);
1499 bzero(desc, sizeof(*desc));
1500
1501 if (inp->inp_vflag & INP_IPV6)
1502 {
1503 in6_ip6_to_sockaddr(&inp->in6p_laddr, inp->inp_lport,
1504 &desc->local.v6, sizeof(desc->local));
1505 in6_ip6_to_sockaddr(&inp->in6p_faddr, inp->inp_fport,
1506 &desc->remote.v6, sizeof(desc->remote));
1507 }
1508 else if (inp->inp_vflag & INP_IPV4)
1509 {
1510 nstat_ip_to_sockaddr(&inp->inp_laddr, inp->inp_lport,
1511 &desc->local.v4, sizeof(desc->local));
1512 nstat_ip_to_sockaddr(&inp->inp_faddr, inp->inp_fport,
1513 &desc->remote.v4, sizeof(desc->remote));
1514 }
1515
1516 desc->state = intotcpcb(inp)->t_state;
1517 desc->ifindex = (inp->inp_last_outifp == NULL) ? 0 :
1518 inp->inp_last_outifp->if_index;
1519
1520 // danger - not locked, values could be bogus
1521 desc->txunacked = tp->snd_max - tp->snd_una;
1522 desc->txwindow = tp->snd_wnd;
1523 desc->txcwindow = tp->snd_cwnd;
1524
1525 if (CC_ALGO(tp)->name != NULL) {
1526 strlcpy(desc->cc_algo, CC_ALGO(tp)->name,
1527 sizeof(desc->cc_algo));
1528 }
1529
1530 struct socket *so = inp->inp_socket;
1531 if (so)
1532 {
1533 // TBD - take the socket lock around these to make sure
1534 // they're in sync?
1535 desc->upid = so->last_upid;
1536 desc->pid = so->last_pid;
1537 desc->traffic_class = so->so_traffic_class;
1538 if ((so->so_flags1 & SOF1_TRAFFIC_MGT_SO_BACKGROUND))
1539 desc->traffic_mgt_flags |= TRAFFIC_MGT_SO_BACKGROUND;
1540 if ((so->so_flags1 & SOF1_TRAFFIC_MGT_TCP_RECVBG))
1541 desc->traffic_mgt_flags |= TRAFFIC_MGT_TCP_RECVBG;
1542 proc_name(desc->pid, desc->pname, sizeof(desc->pname));
1543 if (desc->pname[0] == 0)
1544 {
1545 strlcpy(desc->pname, tucookie->pname,
1546 sizeof(desc->pname));
1547 }
1548 else
1549 {
1550 desc->pname[sizeof(desc->pname) - 1] = 0;
1551 strlcpy(tucookie->pname, desc->pname,
1552 sizeof(tucookie->pname));
1553 }
1554 memcpy(desc->uuid, so->last_uuid, sizeof(so->last_uuid));
1555 memcpy(desc->vuuid, so->so_vuuid, sizeof(so->so_vuuid));
1556 if (so->so_flags & SOF_DELEGATED) {
1557 desc->eupid = so->e_upid;
1558 desc->epid = so->e_pid;
1559 memcpy(desc->euuid, so->e_uuid, sizeof(so->e_uuid));
1560 } else {
1561 desc->eupid = desc->upid;
1562 desc->epid = desc->pid;
1563 memcpy(desc->euuid, desc->uuid, sizeof(desc->uuid));
1564 }
1565 desc->sndbufsize = so->so_snd.sb_hiwat;
1566 desc->sndbufused = so->so_snd.sb_cc;
1567 desc->rcvbufsize = so->so_rcv.sb_hiwat;
1568 desc->rcvbufused = so->so_rcv.sb_cc;
1569 }
1570
1571 tcp_get_connectivity_status(tp, &desc->connstatus);
1572 desc->ifnet_properties = nstat_inpcb_to_flags(inp);
1573 inp_get_activity_bitmap(inp, &desc->activity_bitmap);
1574 desc->start_timestamp = inp->inp_start_timestamp;
1575 desc->timestamp = mach_continuous_time();
1576 return 0;
1577 }
1578
1579 static bool
1580 nstat_tcpudp_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter, bool is_UDP)
1581 {
1582 bool retval = true;
1583
1584 if ((filter->npf_flags & (NSTAT_FILTER_IFNET_FLAGS|NSTAT_FILTER_SPECIFIC_USER)) != 0)
1585 {
1586 struct nstat_tucookie *tucookie = (struct nstat_tucookie *)cookie;
1587 struct inpcb *inp = tucookie->inp;
1588
1589 /* Only apply interface filter if at least one is allowed. */
1590 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0)
1591 {
1592 uint16_t interface_properties = nstat_inpcb_to_flags(inp);
1593
1594 if ((filter->npf_flags & interface_properties) == 0)
1595 {
1596 // For UDP, we could have an undefined interface and yet transfers may have occurred.
1597 // We allow reporting if there have been transfers of the requested kind.
1598 // This is imperfect as we cannot account for the expensive attribute over wifi.
1599 // We also assume that cellular is expensive and we have no way to select for AWDL
1600 if (is_UDP)
1601 {
1602 do
1603 {
1604 if ((filter->npf_flags & (NSTAT_FILTER_ACCEPT_CELLULAR|NSTAT_FILTER_ACCEPT_EXPENSIVE)) &&
1605 (inp->inp_cstat->rxbytes || inp->inp_cstat->txbytes))
1606 {
1607 break;
1608 }
1609 if ((filter->npf_flags & NSTAT_FILTER_ACCEPT_WIFI) &&
1610 (inp->inp_wstat->rxbytes || inp->inp_wstat->txbytes))
1611 {
1612 break;
1613 }
1614 if ((filter->npf_flags & NSTAT_FILTER_ACCEPT_WIRED) &&
1615 (inp->inp_Wstat->rxbytes || inp->inp_Wstat->txbytes))
1616 {
1617 break;
1618 }
1619 return false;
1620 } while (0);
1621 }
1622 else
1623 {
1624 return false;
1625 }
1626 }
1627 }
1628
1629 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) && (retval))
1630 {
1631 struct socket *so = inp->inp_socket;
1632 retval = false;
1633
1634 if (so)
1635 {
1636 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_PID) != 0) &&
1637 (filter->npf_pid == so->last_pid))
1638 {
1639 retval = true;
1640 }
1641 else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EPID) != 0) &&
1642 (filter->npf_pid == (so->so_flags & SOF_DELEGATED)? so->e_upid : so->last_pid))
1643 {
1644 retval = true;
1645 }
1646 else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_UUID) != 0) &&
1647 (memcmp(filter->npf_uuid, so->last_uuid, sizeof(so->last_uuid)) == 0))
1648 {
1649 retval = true;
1650 }
1651 else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EUUID) != 0) &&
1652 (memcmp(filter->npf_uuid, (so->so_flags & SOF_DELEGATED)? so->e_uuid : so->last_uuid,
1653 sizeof(so->last_uuid)) == 0))
1654 {
1655 retval = true;
1656 }
1657 }
1658 }
1659 }
1660 return retval;
1661 }
1662
1663 static bool
1664 nstat_tcp_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter)
1665 {
1666 return nstat_tcpudp_reporting_allowed(cookie, filter, FALSE);
1667 }
1668
1669 static void
1670 nstat_init_tcp_provider(void)
1671 {
1672 bzero(&nstat_tcp_provider, sizeof(nstat_tcp_provider));
1673 nstat_tcp_provider.nstat_descriptor_length = sizeof(nstat_tcp_descriptor);
1674 nstat_tcp_provider.nstat_provider_id = NSTAT_PROVIDER_TCP_KERNEL;
1675 nstat_tcp_provider.nstat_lookup = nstat_tcp_lookup;
1676 nstat_tcp_provider.nstat_gone = nstat_tcp_gone;
1677 nstat_tcp_provider.nstat_counts = nstat_tcp_counts;
1678 nstat_tcp_provider.nstat_release = nstat_tcp_release;
1679 nstat_tcp_provider.nstat_watcher_add = nstat_tcp_add_watcher;
1680 nstat_tcp_provider.nstat_watcher_remove = nstat_tcp_remove_watcher;
1681 nstat_tcp_provider.nstat_copy_descriptor = nstat_tcp_copy_descriptor;
1682 nstat_tcp_provider.nstat_reporting_allowed = nstat_tcp_reporting_allowed;
1683 nstat_tcp_provider.next = nstat_providers;
1684 nstat_providers = &nstat_tcp_provider;
1685 }
1686
1687 #pragma mark -- UDP Provider --
1688
1689 static nstat_provider nstat_udp_provider;
1690
1691 static errno_t
1692 nstat_udp_lookup(
1693 const void *data,
1694 u_int32_t length,
1695 nstat_provider_cookie_t *out_cookie)
1696 {
1697 return nstat_tcpudp_lookup(&udbinfo, data, length, out_cookie);
1698 }
1699
1700 static int
1701 nstat_udp_gone(
1702 nstat_provider_cookie_t cookie)
1703 {
1704 struct nstat_tucookie *tucookie =
1705 (struct nstat_tucookie *)cookie;
1706 struct inpcb *inp;
1707
1708 return (!(inp = tucookie->inp) ||
1709 inp->inp_state == INPCB_STATE_DEAD) ? 1 : 0;
1710 }
1711
1712 static errno_t
1713 nstat_udp_counts(
1714 nstat_provider_cookie_t cookie,
1715 struct nstat_counts *out_counts,
1716 int *out_gone)
1717 {
1718 struct nstat_tucookie *tucookie =
1719 (struct nstat_tucookie *)cookie;
1720
1721 if (out_gone) *out_gone = 0;
1722
1723 // if the pcb is in the dead state, we should stop using it
1724 if (nstat_udp_gone(cookie))
1725 {
1726 if (out_gone) *out_gone = 1;
1727 if (!tucookie->inp)
1728 return EINVAL;
1729 }
1730 struct inpcb *inp = tucookie->inp;
1731
1732 atomic_get_64(out_counts->nstat_rxpackets, &inp->inp_stat->rxpackets);
1733 atomic_get_64(out_counts->nstat_rxbytes, &inp->inp_stat->rxbytes);
1734 atomic_get_64(out_counts->nstat_txpackets, &inp->inp_stat->txpackets);
1735 atomic_get_64(out_counts->nstat_txbytes, &inp->inp_stat->txbytes);
1736 atomic_get_64(out_counts->nstat_cell_rxbytes, &inp->inp_cstat->rxbytes);
1737 atomic_get_64(out_counts->nstat_cell_txbytes, &inp->inp_cstat->txbytes);
1738 atomic_get_64(out_counts->nstat_wifi_rxbytes, &inp->inp_wstat->rxbytes);
1739 atomic_get_64(out_counts->nstat_wifi_txbytes, &inp->inp_wstat->txbytes);
1740 atomic_get_64(out_counts->nstat_wired_rxbytes, &inp->inp_Wstat->rxbytes);
1741 atomic_get_64(out_counts->nstat_wired_txbytes, &inp->inp_Wstat->txbytes);
1742
1743 return 0;
1744 }
1745
1746 static void
1747 nstat_udp_release(
1748 nstat_provider_cookie_t cookie,
1749 int locked)
1750 {
1751 struct nstat_tucookie *tucookie =
1752 (struct nstat_tucookie *)cookie;
1753
1754 nstat_tucookie_release_internal(tucookie, locked);
1755 }
1756
1757 static errno_t
1758 nstat_udp_add_watcher(
1759 nstat_control_state *state,
1760 nstat_msg_add_all_srcs *req)
1761 {
1762 // There is a tricky issue around getting all UDP sockets added once
1763 // and only once. nstat_udp_new_pcb() is called prior to the new item
1764 // being placed on any lists where it might be found.
1765 // By locking the udpinfo.ipi_lock prior to marking the state as a watcher,
1766 // it should be impossible for a new socket to be added twice.
1767 // On the other hand, there is still a timing issue where a new socket
1768 // results in a call to nstat_udp_new_pcb() before this watcher
1769 // is instantiated and yet the socket doesn't make it into ipi_listhead
1770 // prior to the scan. <rdar://problem/30361716>
1771
1772 errno_t result;
1773
1774 lck_rw_lock_shared(udbinfo.ipi_lock);
1775 result = nstat_set_provider_filter(state, req);
1776
1777 if (result == 0) {
1778 struct inpcb *inp;
1779 struct nstat_tucookie *cookie;
1780
1781 OSIncrementAtomic(&nstat_udp_watchers);
1782
1783 // Add all current UDP inpcbs.
1784 LIST_FOREACH(inp, udbinfo.ipi_listhead, inp_list)
1785 {
1786 cookie = nstat_tucookie_alloc_ref(inp);
1787 if (cookie == NULL)
1788 continue;
1789 if (nstat_control_source_add(0, state, &nstat_udp_provider,
1790 cookie) != 0)
1791 {
1792 nstat_tucookie_release(cookie);
1793 break;
1794 }
1795 }
1796 }
1797
1798 lck_rw_done(udbinfo.ipi_lock);
1799
1800 return result;
1801 }
1802
1803 static void
1804 nstat_udp_remove_watcher(
1805 __unused nstat_control_state *state)
1806 {
1807 OSDecrementAtomic(&nstat_udp_watchers);
1808 }
1809
1810 __private_extern__ void
1811 nstat_udp_new_pcb(
1812 struct inpcb *inp)
1813 {
1814 struct nstat_tucookie *cookie;
1815
1816 inp->inp_start_timestamp = mach_continuous_time();
1817
1818 if (nstat_udp_watchers == 0)
1819 return;
1820
1821 socket_lock(inp->inp_socket, 0);
1822 lck_mtx_lock(&nstat_mtx);
1823 nstat_control_state *state;
1824 for (state = nstat_controls; state; state = state->ncs_next)
1825 {
1826 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_UDP_KERNEL)) != 0)
1827 {
1828 // this client is watching tcp
1829 // acquire a reference for it
1830 cookie = nstat_tucookie_alloc_ref_locked(inp);
1831 if (cookie == NULL)
1832 continue;
1833 // add the source, if that fails, release the reference
1834 if (nstat_control_source_add(0, state,
1835 &nstat_udp_provider, cookie) != 0)
1836 {
1837 nstat_tucookie_release_locked(cookie);
1838 break;
1839 }
1840 }
1841 }
1842 lck_mtx_unlock(&nstat_mtx);
1843 socket_unlock(inp->inp_socket, 0);
1844 }
1845
1846 static errno_t
1847 nstat_udp_copy_descriptor(
1848 nstat_provider_cookie_t cookie,
1849 void *data,
1850 u_int32_t len)
1851 {
1852 if (len < sizeof(nstat_udp_descriptor))
1853 {
1854 return EINVAL;
1855 }
1856
1857 if (nstat_udp_gone(cookie))
1858 return EINVAL;
1859
1860 struct nstat_tucookie *tucookie =
1861 (struct nstat_tucookie *)cookie;
1862 nstat_udp_descriptor *desc = (nstat_udp_descriptor*)data;
1863 struct inpcb *inp = tucookie->inp;
1864
1865 bzero(desc, sizeof(*desc));
1866
1867 if (tucookie->cached == false) {
1868 if (inp->inp_vflag & INP_IPV6)
1869 {
1870 in6_ip6_to_sockaddr(&inp->in6p_laddr, inp->inp_lport,
1871 &desc->local.v6, sizeof(desc->local.v6));
1872 in6_ip6_to_sockaddr(&inp->in6p_faddr, inp->inp_fport,
1873 &desc->remote.v6, sizeof(desc->remote.v6));
1874 }
1875 else if (inp->inp_vflag & INP_IPV4)
1876 {
1877 nstat_ip_to_sockaddr(&inp->inp_laddr, inp->inp_lport,
1878 &desc->local.v4, sizeof(desc->local.v4));
1879 nstat_ip_to_sockaddr(&inp->inp_faddr, inp->inp_fport,
1880 &desc->remote.v4, sizeof(desc->remote.v4));
1881 }
1882 desc->ifnet_properties = nstat_inpcb_to_flags(inp);
1883 }
1884 else
1885 {
1886 if (inp->inp_vflag & INP_IPV6)
1887 {
1888 memcpy(&desc->local.v6, &tucookie->local.v6,
1889 sizeof(desc->local.v6));
1890 memcpy(&desc->remote.v6, &tucookie->remote.v6,
1891 sizeof(desc->remote.v6));
1892 }
1893 else if (inp->inp_vflag & INP_IPV4)
1894 {
1895 memcpy(&desc->local.v4, &tucookie->local.v4,
1896 sizeof(desc->local.v4));
1897 memcpy(&desc->remote.v4, &tucookie->remote.v4,
1898 sizeof(desc->remote.v4));
1899 }
1900 desc->ifnet_properties = tucookie->ifnet_properties;
1901 }
1902
1903 if (inp->inp_last_outifp)
1904 desc->ifindex = inp->inp_last_outifp->if_index;
1905 else
1906 desc->ifindex = tucookie->if_index;
1907
1908 struct socket *so = inp->inp_socket;
1909 if (so)
1910 {
1911 // TBD - take the socket lock around these to make sure
1912 // they're in sync?
1913 desc->upid = so->last_upid;
1914 desc->pid = so->last_pid;
1915 proc_name(desc->pid, desc->pname, sizeof(desc->pname));
1916 if (desc->pname[0] == 0)
1917 {
1918 strlcpy(desc->pname, tucookie->pname,
1919 sizeof(desc->pname));
1920 }
1921 else
1922 {
1923 desc->pname[sizeof(desc->pname) - 1] = 0;
1924 strlcpy(tucookie->pname, desc->pname,
1925 sizeof(tucookie->pname));
1926 }
1927 memcpy(desc->uuid, so->last_uuid, sizeof(so->last_uuid));
1928 memcpy(desc->vuuid, so->so_vuuid, sizeof(so->so_vuuid));
1929 if (so->so_flags & SOF_DELEGATED) {
1930 desc->eupid = so->e_upid;
1931 desc->epid = so->e_pid;
1932 memcpy(desc->euuid, so->e_uuid, sizeof(so->e_uuid));
1933 } else {
1934 desc->eupid = desc->upid;
1935 desc->epid = desc->pid;
1936 memcpy(desc->euuid, desc->uuid, sizeof(desc->uuid));
1937 }
1938 desc->rcvbufsize = so->so_rcv.sb_hiwat;
1939 desc->rcvbufused = so->so_rcv.sb_cc;
1940 desc->traffic_class = so->so_traffic_class;
1941 inp_get_activity_bitmap(inp, &desc->activity_bitmap);
1942 desc->start_timestamp = inp->inp_start_timestamp;
1943 desc->timestamp = mach_continuous_time();
1944 }
1945
1946 return 0;
1947 }
1948
1949 static bool
1950 nstat_udp_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter)
1951 {
1952 return nstat_tcpudp_reporting_allowed(cookie, filter, TRUE);
1953 }
1954
1955
1956 static void
1957 nstat_init_udp_provider(void)
1958 {
1959 bzero(&nstat_udp_provider, sizeof(nstat_udp_provider));
1960 nstat_udp_provider.nstat_provider_id = NSTAT_PROVIDER_UDP_KERNEL;
1961 nstat_udp_provider.nstat_descriptor_length = sizeof(nstat_udp_descriptor);
1962 nstat_udp_provider.nstat_lookup = nstat_udp_lookup;
1963 nstat_udp_provider.nstat_gone = nstat_udp_gone;
1964 nstat_udp_provider.nstat_counts = nstat_udp_counts;
1965 nstat_udp_provider.nstat_watcher_add = nstat_udp_add_watcher;
1966 nstat_udp_provider.nstat_watcher_remove = nstat_udp_remove_watcher;
1967 nstat_udp_provider.nstat_copy_descriptor = nstat_udp_copy_descriptor;
1968 nstat_udp_provider.nstat_release = nstat_udp_release;
1969 nstat_udp_provider.nstat_reporting_allowed = nstat_udp_reporting_allowed;
1970 nstat_udp_provider.next = nstat_providers;
1971 nstat_providers = &nstat_udp_provider;
1972 }
1973
1974
1975
1976 #pragma mark -- ifnet Provider --
1977
1978 static nstat_provider nstat_ifnet_provider;
1979
1980 /*
1981 * We store a pointer to the ifnet and the original threshold
1982 * requested by the client.
1983 */
1984 struct nstat_ifnet_cookie
1985 {
1986 struct ifnet *ifp;
1987 uint64_t threshold;
1988 };
1989
1990 static errno_t
1991 nstat_ifnet_lookup(
1992 const void *data,
1993 u_int32_t length,
1994 nstat_provider_cookie_t *out_cookie)
1995 {
1996 const nstat_ifnet_add_param *param = (const nstat_ifnet_add_param *)data;
1997 struct ifnet *ifp;
1998 boolean_t changed = FALSE;
1999 nstat_control_state *state;
2000 nstat_src *src;
2001 struct nstat_ifnet_cookie *cookie;
2002
2003 if (length < sizeof(*param) || param->threshold < 1024*1024)
2004 return EINVAL;
2005 if (nstat_privcheck != 0) {
2006 errno_t result = priv_check_cred(kauth_cred_get(),
2007 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
2008 if (result != 0)
2009 return result;
2010 }
2011 cookie = OSMalloc(sizeof(*cookie), nstat_malloc_tag);
2012 if (cookie == NULL)
2013 return ENOMEM;
2014 bzero(cookie, sizeof(*cookie));
2015
2016 ifnet_head_lock_shared();
2017 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
2018 {
2019 ifnet_lock_exclusive(ifp);
2020 if (ifp->if_index == param->ifindex)
2021 {
2022 cookie->ifp = ifp;
2023 cookie->threshold = param->threshold;
2024 *out_cookie = cookie;
2025 if (!ifp->if_data_threshold ||
2026 ifp->if_data_threshold > param->threshold)
2027 {
2028 changed = TRUE;
2029 ifp->if_data_threshold = param->threshold;
2030 }
2031 ifnet_lock_done(ifp);
2032 ifnet_reference(ifp);
2033 break;
2034 }
2035 ifnet_lock_done(ifp);
2036 }
2037 ifnet_head_done();
2038
2039 /*
2040 * When we change the threshold to something smaller, we notify
2041 * all of our clients with a description message.
2042 * We won't send a message to the client we are currently serving
2043 * because it has no `ifnet source' yet.
2044 */
2045 if (changed)
2046 {
2047 lck_mtx_lock(&nstat_mtx);
2048 for (state = nstat_controls; state; state = state->ncs_next)
2049 {
2050 lck_mtx_lock(&state->ncs_mtx);
2051 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
2052 {
2053 if (src->provider != &nstat_ifnet_provider)
2054 continue;
2055 nstat_control_send_description(state, src, 0, 0);
2056 }
2057 lck_mtx_unlock(&state->ncs_mtx);
2058 }
2059 lck_mtx_unlock(&nstat_mtx);
2060 }
2061 if (cookie->ifp == NULL)
2062 OSFree(cookie, sizeof(*cookie), nstat_malloc_tag);
2063
2064 return ifp ? 0 : EINVAL;
2065 }
2066
2067 static int
2068 nstat_ifnet_gone(
2069 nstat_provider_cookie_t cookie)
2070 {
2071 struct ifnet *ifp;
2072 struct nstat_ifnet_cookie *ifcookie =
2073 (struct nstat_ifnet_cookie *)cookie;
2074
2075 ifnet_head_lock_shared();
2076 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
2077 {
2078 if (ifp == ifcookie->ifp)
2079 break;
2080 }
2081 ifnet_head_done();
2082
2083 return ifp ? 0 : 1;
2084 }
2085
2086 static errno_t
2087 nstat_ifnet_counts(
2088 nstat_provider_cookie_t cookie,
2089 struct nstat_counts *out_counts,
2090 int *out_gone)
2091 {
2092 struct nstat_ifnet_cookie *ifcookie =
2093 (struct nstat_ifnet_cookie *)cookie;
2094 struct ifnet *ifp = ifcookie->ifp;
2095
2096 if (out_gone) *out_gone = 0;
2097
2098 // if the ifnet is gone, we should stop using it
2099 if (nstat_ifnet_gone(cookie))
2100 {
2101 if (out_gone) *out_gone = 1;
2102 return EINVAL;
2103 }
2104
2105 bzero(out_counts, sizeof(*out_counts));
2106 out_counts->nstat_rxpackets = ifp->if_ipackets;
2107 out_counts->nstat_rxbytes = ifp->if_ibytes;
2108 out_counts->nstat_txpackets = ifp->if_opackets;
2109 out_counts->nstat_txbytes = ifp->if_obytes;
2110 out_counts->nstat_cell_rxbytes = out_counts->nstat_cell_txbytes = 0;
2111 return 0;
2112 }
2113
2114 static void
2115 nstat_ifnet_release(
2116 nstat_provider_cookie_t cookie,
2117 __unused int locked)
2118 {
2119 struct nstat_ifnet_cookie *ifcookie;
2120 struct ifnet *ifp;
2121 nstat_control_state *state;
2122 nstat_src *src;
2123 uint64_t minthreshold = UINT64_MAX;
2124
2125 /*
2126 * Find all the clients that requested a threshold
2127 * for this ifnet and re-calculate if_data_threshold.
2128 */
2129 lck_mtx_lock(&nstat_mtx);
2130 for (state = nstat_controls; state; state = state->ncs_next)
2131 {
2132 lck_mtx_lock(&state->ncs_mtx);
2133 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
2134 {
2135 /* Skip the provider we are about to detach. */
2136 if (src->provider != &nstat_ifnet_provider ||
2137 src->cookie == cookie)
2138 continue;
2139 ifcookie = (struct nstat_ifnet_cookie *)src->cookie;
2140 if (ifcookie->threshold < minthreshold)
2141 minthreshold = ifcookie->threshold;
2142 }
2143 lck_mtx_unlock(&state->ncs_mtx);
2144 }
2145 lck_mtx_unlock(&nstat_mtx);
2146 /*
2147 * Reset if_data_threshold or disable it.
2148 */
2149 ifcookie = (struct nstat_ifnet_cookie *)cookie;
2150 ifp = ifcookie->ifp;
2151 if (ifnet_is_attached(ifp, 1)) {
2152 ifnet_lock_exclusive(ifp);
2153 if (minthreshold == UINT64_MAX)
2154 ifp->if_data_threshold = 0;
2155 else
2156 ifp->if_data_threshold = minthreshold;
2157 ifnet_lock_done(ifp);
2158 ifnet_decr_iorefcnt(ifp);
2159 }
2160 ifnet_release(ifp);
2161 OSFree(ifcookie, sizeof(*ifcookie), nstat_malloc_tag);
2162 }
2163
2164 static void
2165 nstat_ifnet_copy_link_status(
2166 struct ifnet *ifp,
2167 struct nstat_ifnet_descriptor *desc)
2168 {
2169 struct if_link_status *ifsr = ifp->if_link_status;
2170 nstat_ifnet_desc_link_status *link_status = &desc->link_status;
2171
2172 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_NONE;
2173 if (ifsr == NULL)
2174 return;
2175
2176 lck_rw_lock_shared(&ifp->if_link_status_lock);
2177
2178 if (ifp->if_type == IFT_CELLULAR) {
2179
2180 nstat_ifnet_desc_cellular_status *cell_status = &link_status->u.cellular;
2181 struct if_cellular_status_v1 *if_cell_sr =
2182 &ifsr->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
2183
2184 if (ifsr->ifsr_version != IF_CELLULAR_STATUS_REPORT_VERSION_1)
2185 goto done;
2186
2187 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_CELLULAR;
2188
2189 if (if_cell_sr->valid_bitmask & IF_CELL_LINK_QUALITY_METRIC_VALID) {
2190 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_LINK_QUALITY_METRIC_VALID;
2191 cell_status->link_quality_metric = if_cell_sr->link_quality_metric;
2192 }
2193 if (if_cell_sr->valid_bitmask & IF_CELL_UL_EFFECTIVE_BANDWIDTH_VALID) {
2194 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_BANDWIDTH_VALID;
2195 cell_status->ul_effective_bandwidth = if_cell_sr->ul_effective_bandwidth;
2196 }
2197 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_BANDWIDTH_VALID) {
2198 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_BANDWIDTH_VALID;
2199 cell_status->ul_max_bandwidth = if_cell_sr->ul_max_bandwidth;
2200 }
2201 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MIN_LATENCY_VALID) {
2202 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MIN_LATENCY_VALID;
2203 cell_status->ul_min_latency = if_cell_sr->ul_min_latency;
2204 }
2205 if (if_cell_sr->valid_bitmask & IF_CELL_UL_EFFECTIVE_LATENCY_VALID) {
2206 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_LATENCY_VALID;
2207 cell_status->ul_effective_latency = if_cell_sr->ul_effective_latency;
2208 }
2209 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_LATENCY_VALID) {
2210 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_LATENCY_VALID;
2211 cell_status->ul_max_latency = if_cell_sr->ul_max_latency;
2212 }
2213 if (if_cell_sr->valid_bitmask & IF_CELL_UL_RETXT_LEVEL_VALID) {
2214 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID;
2215 if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_NONE)
2216 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_NONE;
2217 else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_LOW)
2218 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_LOW;
2219 else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_MEDIUM)
2220 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_MEDIUM;
2221 else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_HIGH)
2222 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_HIGH;
2223 else
2224 cell_status->valid_bitmask &= ~NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID;
2225 }
2226 if (if_cell_sr->valid_bitmask & IF_CELL_UL_BYTES_LOST_VALID) {
2227 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_BYTES_LOST_VALID;
2228 cell_status->ul_bytes_lost = if_cell_sr->ul_bytes_lost;
2229 }
2230 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MIN_QUEUE_SIZE_VALID) {
2231 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MIN_QUEUE_SIZE_VALID;
2232 cell_status->ul_min_queue_size = if_cell_sr->ul_min_queue_size;
2233 }
2234 if (if_cell_sr->valid_bitmask & IF_CELL_UL_AVG_QUEUE_SIZE_VALID) {
2235 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_AVG_QUEUE_SIZE_VALID;
2236 cell_status->ul_avg_queue_size = if_cell_sr->ul_avg_queue_size;
2237 }
2238 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_QUEUE_SIZE_VALID) {
2239 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_QUEUE_SIZE_VALID;
2240 cell_status->ul_max_queue_size = if_cell_sr->ul_max_queue_size;
2241 }
2242 if (if_cell_sr->valid_bitmask & IF_CELL_DL_EFFECTIVE_BANDWIDTH_VALID) {
2243 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_DL_EFFECTIVE_BANDWIDTH_VALID;
2244 cell_status->dl_effective_bandwidth = if_cell_sr->dl_effective_bandwidth;
2245 }
2246 if (if_cell_sr->valid_bitmask & IF_CELL_DL_MAX_BANDWIDTH_VALID) {
2247 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_DL_MAX_BANDWIDTH_VALID;
2248 cell_status->dl_max_bandwidth = if_cell_sr->dl_max_bandwidth;
2249 }
2250 if (if_cell_sr->valid_bitmask & IF_CELL_CONFIG_INACTIVITY_TIME_VALID) {
2251 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_CONFIG_INACTIVITY_TIME_VALID;
2252 cell_status->config_inactivity_time = if_cell_sr->config_inactivity_time;
2253 }
2254 if (if_cell_sr->valid_bitmask & IF_CELL_CONFIG_BACKOFF_TIME_VALID) {
2255 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_CONFIG_BACKOFF_TIME_VALID;
2256 cell_status->config_backoff_time = if_cell_sr->config_backoff_time;
2257 }
2258 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MSS_RECOMMENDED_VALID) {
2259 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_MSS_RECOMMENDED_VALID;
2260 cell_status->mss_recommended = if_cell_sr->mss_recommended;
2261 }
2262 } else if (ifp->if_subfamily == IFNET_SUBFAMILY_WIFI) {
2263
2264 nstat_ifnet_desc_wifi_status *wifi_status = &link_status->u.wifi;
2265 struct if_wifi_status_v1 *if_wifi_sr =
2266 &ifsr->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
2267
2268 if (ifsr->ifsr_version != IF_WIFI_STATUS_REPORT_VERSION_1)
2269 goto done;
2270
2271 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_WIFI;
2272
2273 if (if_wifi_sr->valid_bitmask & IF_WIFI_LINK_QUALITY_METRIC_VALID) {
2274 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_LINK_QUALITY_METRIC_VALID;
2275 wifi_status->link_quality_metric = if_wifi_sr->link_quality_metric;
2276 }
2277 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID) {
2278 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID;
2279 wifi_status->ul_effective_bandwidth = if_wifi_sr->ul_effective_bandwidth;
2280 }
2281 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MAX_BANDWIDTH_VALID) {
2282 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MAX_BANDWIDTH_VALID;
2283 wifi_status->ul_max_bandwidth = if_wifi_sr->ul_max_bandwidth;
2284 }
2285 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MIN_LATENCY_VALID) {
2286 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MIN_LATENCY_VALID;
2287 wifi_status->ul_min_latency = if_wifi_sr->ul_min_latency;
2288 }
2289 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_EFFECTIVE_LATENCY_VALID) {
2290 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_LATENCY_VALID;
2291 wifi_status->ul_effective_latency = if_wifi_sr->ul_effective_latency;
2292 }
2293 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MAX_LATENCY_VALID) {
2294 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MAX_LATENCY_VALID;
2295 wifi_status->ul_max_latency = if_wifi_sr->ul_max_latency;
2296 }
2297 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_RETXT_LEVEL_VALID) {
2298 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID;
2299 if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_NONE)
2300 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_NONE;
2301 else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_LOW)
2302 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_LOW;
2303 else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_MEDIUM)
2304 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_MEDIUM;
2305 else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_HIGH)
2306 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_HIGH;
2307 else
2308 wifi_status->valid_bitmask &= ~NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID;
2309 }
2310 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_BYTES_LOST_VALID) {
2311 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_BYTES_LOST_VALID;
2312 wifi_status->ul_bytes_lost = if_wifi_sr->ul_bytes_lost;
2313 }
2314 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_ERROR_RATE_VALID) {
2315 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_ERROR_RATE_VALID;
2316 wifi_status->ul_error_rate = if_wifi_sr->ul_error_rate;
2317 }
2318 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID) {
2319 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID;
2320 wifi_status->dl_effective_bandwidth = if_wifi_sr->dl_effective_bandwidth;
2321 }
2322 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MAX_BANDWIDTH_VALID) {
2323 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MAX_BANDWIDTH_VALID;
2324 wifi_status->dl_max_bandwidth = if_wifi_sr->dl_max_bandwidth;
2325 }
2326 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MIN_LATENCY_VALID) {
2327 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MIN_LATENCY_VALID;
2328 wifi_status->dl_min_latency = if_wifi_sr->dl_min_latency;
2329 }
2330 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_EFFECTIVE_LATENCY_VALID) {
2331 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_LATENCY_VALID;
2332 wifi_status->dl_effective_latency = if_wifi_sr->dl_effective_latency;
2333 }
2334 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MAX_LATENCY_VALID) {
2335 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MAX_LATENCY_VALID;
2336 wifi_status->dl_max_latency = if_wifi_sr->dl_max_latency;
2337 }
2338 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_ERROR_RATE_VALID) {
2339 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_ERROR_RATE_VALID;
2340 wifi_status->dl_error_rate = if_wifi_sr->dl_error_rate;
2341 }
2342 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_FREQUENCY_VALID) {
2343 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID;
2344 if (if_wifi_sr->config_frequency == IF_WIFI_CONFIG_FREQUENCY_2_4_GHZ)
2345 wifi_status->config_frequency = NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_2_4_GHZ;
2346 else if (if_wifi_sr->config_frequency == IF_WIFI_CONFIG_FREQUENCY_5_0_GHZ)
2347 wifi_status->config_frequency = NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_5_0_GHZ;
2348 else
2349 wifi_status->valid_bitmask &= ~NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID;
2350 }
2351 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_MULTICAST_RATE_VALID) {
2352 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_MULTICAST_RATE_VALID;
2353 wifi_status->config_multicast_rate = if_wifi_sr->config_multicast_rate;
2354 }
2355 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_SCAN_COUNT_VALID) {
2356 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_COUNT_VALID;
2357 wifi_status->scan_count = if_wifi_sr->scan_count;
2358 }
2359 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_SCAN_DURATION_VALID) {
2360 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_DURATION_VALID;
2361 wifi_status->scan_duration = if_wifi_sr->scan_duration;
2362 }
2363 }
2364
2365 done:
2366 lck_rw_done(&ifp->if_link_status_lock);
2367 }
2368
2369 static u_int64_t nstat_ifnet_last_report_time = 0;
2370 extern int tcp_report_stats_interval;
2371
2372 static void
2373 nstat_ifnet_compute_percentages(struct if_tcp_ecn_perf_stat *ifst)
2374 {
2375 /* Retransmit percentage */
2376 if (ifst->total_rxmitpkts > 0 && ifst->total_txpkts > 0) {
2377 /* shift by 10 for precision */
2378 ifst->rxmit_percent =
2379 ((ifst->total_rxmitpkts << 10) * 100) / ifst->total_txpkts;
2380 } else {
2381 ifst->rxmit_percent = 0;
2382 }
2383
2384 /* Out-of-order percentage */
2385 if (ifst->total_oopkts > 0 && ifst->total_rxpkts > 0) {
2386 /* shift by 10 for precision */
2387 ifst->oo_percent =
2388 ((ifst->total_oopkts << 10) * 100) / ifst->total_rxpkts;
2389 } else {
2390 ifst->oo_percent = 0;
2391 }
2392
2393 /* Reorder percentage */
2394 if (ifst->total_reorderpkts > 0 &&
2395 (ifst->total_txpkts + ifst->total_rxpkts) > 0) {
2396 /* shift by 10 for precision */
2397 ifst->reorder_percent =
2398 ((ifst->total_reorderpkts << 10) * 100) /
2399 (ifst->total_txpkts + ifst->total_rxpkts);
2400 } else {
2401 ifst->reorder_percent = 0;
2402 }
2403 }
2404
2405 static void
2406 nstat_ifnet_normalize_counter(struct if_tcp_ecn_stat *if_st)
2407 {
2408 u_int64_t ecn_on_conn, ecn_off_conn;
2409
2410 if (if_st == NULL)
2411 return;
2412 ecn_on_conn = if_st->ecn_client_success +
2413 if_st->ecn_server_success;
2414 ecn_off_conn = if_st->ecn_off_conn +
2415 (if_st->ecn_client_setup - if_st->ecn_client_success) +
2416 (if_st->ecn_server_setup - if_st->ecn_server_success);
2417
2418 /*
2419 * report sack episodes, rst_drop and rxmit_drop
2420 * as a ratio per connection, shift by 10 for precision
2421 */
2422 if (ecn_on_conn > 0) {
2423 if_st->ecn_on.sack_episodes =
2424 (if_st->ecn_on.sack_episodes << 10) / ecn_on_conn;
2425 if_st->ecn_on.rst_drop =
2426 (if_st->ecn_on.rst_drop << 10) * 100 / ecn_on_conn;
2427 if_st->ecn_on.rxmit_drop =
2428 (if_st->ecn_on.rxmit_drop << 10) * 100 / ecn_on_conn;
2429 } else {
2430 /* set to zero, just in case */
2431 if_st->ecn_on.sack_episodes = 0;
2432 if_st->ecn_on.rst_drop = 0;
2433 if_st->ecn_on.rxmit_drop = 0;
2434 }
2435
2436 if (ecn_off_conn > 0) {
2437 if_st->ecn_off.sack_episodes =
2438 (if_st->ecn_off.sack_episodes << 10) / ecn_off_conn;
2439 if_st->ecn_off.rst_drop =
2440 (if_st->ecn_off.rst_drop << 10) * 100 / ecn_off_conn;
2441 if_st->ecn_off.rxmit_drop =
2442 (if_st->ecn_off.rxmit_drop << 10) * 100 / ecn_off_conn;
2443 } else {
2444 if_st->ecn_off.sack_episodes = 0;
2445 if_st->ecn_off.rst_drop = 0;
2446 if_st->ecn_off.rxmit_drop = 0;
2447 }
2448 if_st->ecn_total_conn = ecn_off_conn + ecn_on_conn;
2449 }
2450
2451 static void
2452 nstat_ifnet_report_ecn_stats(void)
2453 {
2454 u_int64_t uptime, last_report_time;
2455 struct nstat_sysinfo_data data;
2456 struct nstat_sysinfo_ifnet_ecn_stats *st;
2457 struct ifnet *ifp;
2458
2459 uptime = net_uptime();
2460
2461 if ((int)(uptime - nstat_ifnet_last_report_time) <
2462 tcp_report_stats_interval)
2463 return;
2464
2465 last_report_time = nstat_ifnet_last_report_time;
2466 nstat_ifnet_last_report_time = uptime;
2467 data.flags = NSTAT_SYSINFO_IFNET_ECN_STATS;
2468 st = &data.u.ifnet_ecn_stats;
2469
2470 ifnet_head_lock_shared();
2471 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2472 if (ifp->if_ipv4_stat == NULL || ifp->if_ipv6_stat == NULL)
2473 continue;
2474
2475 if (!IF_FULLY_ATTACHED(ifp))
2476 continue;
2477
2478 /* Limit reporting to Wifi, Ethernet and cellular. */
2479 if (!(IFNET_IS_ETHERNET(ifp) || IFNET_IS_CELLULAR(ifp)))
2480 continue;
2481
2482 bzero(st, sizeof(*st));
2483 if (IFNET_IS_CELLULAR(ifp)) {
2484 st->ifnet_type = NSTAT_IFNET_ECN_TYPE_CELLULAR;
2485 } else if (IFNET_IS_WIFI(ifp)) {
2486 st->ifnet_type = NSTAT_IFNET_ECN_TYPE_WIFI;
2487 } else {
2488 st->ifnet_type = NSTAT_IFNET_ECN_TYPE_ETHERNET;
2489 }
2490 data.unsent_data_cnt = ifp->if_unsent_data_cnt;
2491 /* skip if there was no update since last report */
2492 if (ifp->if_ipv4_stat->timestamp <= 0 ||
2493 ifp->if_ipv4_stat->timestamp < last_report_time)
2494 goto v6;
2495 st->ifnet_proto = NSTAT_IFNET_ECN_PROTO_IPV4;
2496 /* compute percentages using packet counts */
2497 nstat_ifnet_compute_percentages(&ifp->if_ipv4_stat->ecn_on);
2498 nstat_ifnet_compute_percentages(&ifp->if_ipv4_stat->ecn_off);
2499 nstat_ifnet_normalize_counter(ifp->if_ipv4_stat);
2500 bcopy(ifp->if_ipv4_stat, &st->ecn_stat,
2501 sizeof(st->ecn_stat));
2502 nstat_sysinfo_send_data(&data);
2503 bzero(ifp->if_ipv4_stat, sizeof(*ifp->if_ipv4_stat));
2504
2505 v6:
2506 /* skip if there was no update since last report */
2507 if (ifp->if_ipv6_stat->timestamp <= 0 ||
2508 ifp->if_ipv6_stat->timestamp < last_report_time)
2509 continue;
2510 st->ifnet_proto = NSTAT_IFNET_ECN_PROTO_IPV6;
2511
2512 /* compute percentages using packet counts */
2513 nstat_ifnet_compute_percentages(&ifp->if_ipv6_stat->ecn_on);
2514 nstat_ifnet_compute_percentages(&ifp->if_ipv6_stat->ecn_off);
2515 nstat_ifnet_normalize_counter(ifp->if_ipv6_stat);
2516 bcopy(ifp->if_ipv6_stat, &st->ecn_stat,
2517 sizeof(st->ecn_stat));
2518 nstat_sysinfo_send_data(&data);
2519
2520 /* Zero the stats in ifp */
2521 bzero(ifp->if_ipv6_stat, sizeof(*ifp->if_ipv6_stat));
2522 }
2523 ifnet_head_done();
2524
2525 }
2526
2527 /* Some thresholds to determine Low Iternet mode */
2528 #define NSTAT_LIM_DL_MAX_BANDWIDTH_THRESHOLD 1000000 /* 1 Mbps */
2529 #define NSTAT_LIM_UL_MAX_BANDWIDTH_THRESHOLD 500000 /* 500 Kbps */
2530 #define NSTAT_LIM_UL_MIN_RTT_THRESHOLD 1000 /* 1 second */
2531 #define NSTAT_LIM_CONN_TIMEOUT_PERCENT_THRESHOLD (10 << 10) /* 10 percent connection timeouts */
2532 #define NSTAT_LIM_PACKET_LOSS_PERCENT_THRESHOLD (2 << 10) /* 2 percent packet loss rate */
2533
2534 static boolean_t
2535 nstat_lim_activity_check(struct if_lim_perf_stat *st)
2536 {
2537 /* check that the current activity is enough to report stats */
2538 if (st->lim_total_txpkts < nstat_lim_min_tx_pkts ||
2539 st->lim_total_rxpkts < nstat_lim_min_rx_pkts ||
2540 st->lim_conn_attempts == 0)
2541 return (FALSE);
2542
2543 /*
2544 * Compute percentages if there was enough activity. Use
2545 * shift-left by 10 to preserve precision.
2546 */
2547 st->lim_packet_loss_percent = ((st->lim_total_retxpkts << 10) /
2548 st->lim_total_txpkts) * 100;
2549
2550 st->lim_packet_ooo_percent = ((st->lim_total_oopkts << 10) /
2551 st->lim_total_rxpkts) * 100;
2552
2553 st->lim_conn_timeout_percent = ((st->lim_conn_timeouts << 10) /
2554 st->lim_conn_attempts) * 100;
2555
2556 /*
2557 * Is Low Internet detected? First order metrics are bandwidth
2558 * and RTT. If these metrics are below the minimum thresholds
2559 * defined then the network attachment can be classified as
2560 * having Low Internet capacity.
2561 *
2562 * High connection timeout rate also indicates Low Internet
2563 * capacity.
2564 */
2565 if (st->lim_dl_max_bandwidth > 0 &&
2566 st->lim_dl_max_bandwidth <= NSTAT_LIM_DL_MAX_BANDWIDTH_THRESHOLD)
2567 st->lim_dl_detected = 1;
2568
2569 if ((st->lim_ul_max_bandwidth > 0 &&
2570 st->lim_ul_max_bandwidth <= NSTAT_LIM_UL_MAX_BANDWIDTH_THRESHOLD) ||
2571 st->lim_rtt_min >= NSTAT_LIM_UL_MIN_RTT_THRESHOLD)
2572 st->lim_ul_detected = 1;
2573
2574 if (st->lim_conn_attempts > 20 &&
2575 st->lim_conn_timeout_percent >=
2576 NSTAT_LIM_CONN_TIMEOUT_PERCENT_THRESHOLD)
2577 st->lim_ul_detected = 1;
2578 /*
2579 * Second order metrics: If there was high packet loss even after
2580 * using delay based algorithms then we classify it as Low Internet
2581 * again
2582 */
2583 if (st->lim_bk_txpkts >= nstat_lim_min_tx_pkts &&
2584 st->lim_packet_loss_percent >=
2585 NSTAT_LIM_PACKET_LOSS_PERCENT_THRESHOLD)
2586 st->lim_ul_detected = 1;
2587 return (TRUE);
2588 }
2589
2590 static u_int64_t nstat_lim_last_report_time = 0;
2591 static void
2592 nstat_ifnet_report_lim_stats(void)
2593 {
2594 u_int64_t uptime;
2595 struct nstat_sysinfo_data data;
2596 struct nstat_sysinfo_lim_stats *st;
2597 struct ifnet *ifp;
2598 int err;
2599
2600 uptime = net_uptime();
2601
2602 if ((u_int32_t)(uptime - nstat_lim_last_report_time) <
2603 nstat_lim_interval)
2604 return;
2605
2606 nstat_lim_last_report_time = uptime;
2607 data.flags = NSTAT_SYSINFO_LIM_STATS;
2608 st = &data.u.lim_stats;
2609 data.unsent_data_cnt = 0;
2610
2611 ifnet_head_lock_shared();
2612 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2613 if (!IF_FULLY_ATTACHED(ifp))
2614 continue;
2615
2616 /* Limit reporting to Wifi, Ethernet and cellular */
2617 if (!(IFNET_IS_ETHERNET(ifp) || IFNET_IS_CELLULAR(ifp)))
2618 continue;
2619
2620 if (!nstat_lim_activity_check(&ifp->if_lim_stat))
2621 continue;
2622
2623 bzero(st, sizeof(*st));
2624 st->ifnet_siglen = sizeof (st->ifnet_signature);
2625 err = ifnet_get_netsignature(ifp, AF_INET,
2626 (u_int8_t *)&st->ifnet_siglen, NULL,
2627 st->ifnet_signature);
2628 if (err != 0) {
2629 err = ifnet_get_netsignature(ifp, AF_INET6,
2630 (u_int8_t *)&st->ifnet_siglen, NULL,
2631 st->ifnet_signature);
2632 if (err != 0)
2633 continue;
2634 }
2635 ifnet_lock_shared(ifp);
2636 if (IFNET_IS_CELLULAR(ifp)) {
2637 st->ifnet_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_CELLULAR;
2638 } else if (IFNET_IS_WIFI(ifp)) {
2639 st->ifnet_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_WIFI;
2640 } else {
2641 st->ifnet_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_ETHERNET;
2642 }
2643 bcopy(&ifp->if_lim_stat, &st->lim_stat,
2644 sizeof(st->lim_stat));
2645
2646 /* Zero the stats in ifp */
2647 bzero(&ifp->if_lim_stat, sizeof(ifp->if_lim_stat));
2648 ifnet_lock_done(ifp);
2649 nstat_sysinfo_send_data(&data);
2650 }
2651 ifnet_head_done();
2652 }
2653
2654 static errno_t
2655 nstat_ifnet_copy_descriptor(
2656 nstat_provider_cookie_t cookie,
2657 void *data,
2658 u_int32_t len)
2659 {
2660 nstat_ifnet_descriptor *desc = (nstat_ifnet_descriptor *)data;
2661 struct nstat_ifnet_cookie *ifcookie =
2662 (struct nstat_ifnet_cookie *)cookie;
2663 struct ifnet *ifp = ifcookie->ifp;
2664
2665 if (len < sizeof(nstat_ifnet_descriptor))
2666 return EINVAL;
2667
2668 if (nstat_ifnet_gone(cookie))
2669 return EINVAL;
2670
2671 bzero(desc, sizeof(*desc));
2672 ifnet_lock_shared(ifp);
2673 strlcpy(desc->name, ifp->if_xname, sizeof(desc->name));
2674 desc->ifindex = ifp->if_index;
2675 desc->threshold = ifp->if_data_threshold;
2676 desc->type = ifp->if_type;
2677 if (ifp->if_desc.ifd_len < sizeof(desc->description))
2678 memcpy(desc->description, ifp->if_desc.ifd_desc,
2679 sizeof(desc->description));
2680 nstat_ifnet_copy_link_status(ifp, desc);
2681 ifnet_lock_done(ifp);
2682 return 0;
2683 }
2684
2685 static void
2686 nstat_init_ifnet_provider(void)
2687 {
2688 bzero(&nstat_ifnet_provider, sizeof(nstat_ifnet_provider));
2689 nstat_ifnet_provider.nstat_provider_id = NSTAT_PROVIDER_IFNET;
2690 nstat_ifnet_provider.nstat_descriptor_length = sizeof(nstat_ifnet_descriptor);
2691 nstat_ifnet_provider.nstat_lookup = nstat_ifnet_lookup;
2692 nstat_ifnet_provider.nstat_gone = nstat_ifnet_gone;
2693 nstat_ifnet_provider.nstat_counts = nstat_ifnet_counts;
2694 nstat_ifnet_provider.nstat_watcher_add = NULL;
2695 nstat_ifnet_provider.nstat_watcher_remove = NULL;
2696 nstat_ifnet_provider.nstat_copy_descriptor = nstat_ifnet_copy_descriptor;
2697 nstat_ifnet_provider.nstat_release = nstat_ifnet_release;
2698 nstat_ifnet_provider.next = nstat_providers;
2699 nstat_providers = &nstat_ifnet_provider;
2700 }
2701
2702 __private_extern__ void
2703 nstat_ifnet_threshold_reached(unsigned int ifindex)
2704 {
2705 nstat_control_state *state;
2706 nstat_src *src;
2707 struct ifnet *ifp;
2708 struct nstat_ifnet_cookie *ifcookie;
2709
2710 lck_mtx_lock(&nstat_mtx);
2711 for (state = nstat_controls; state; state = state->ncs_next)
2712 {
2713 lck_mtx_lock(&state->ncs_mtx);
2714 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
2715 {
2716 if (src->provider != &nstat_ifnet_provider)
2717 continue;
2718 ifcookie = (struct nstat_ifnet_cookie *)src->cookie;
2719 ifp = ifcookie->ifp;
2720 if (ifp->if_index != ifindex)
2721 continue;
2722 nstat_control_send_counts(state, src, 0, 0, NULL);
2723 }
2724 lck_mtx_unlock(&state->ncs_mtx);
2725 }
2726 lck_mtx_unlock(&nstat_mtx);
2727 }
2728
2729 #pragma mark -- Sysinfo --
2730 static void
2731 nstat_set_keyval_scalar(nstat_sysinfo_keyval *kv, int key, u_int32_t val)
2732 {
2733 kv->nstat_sysinfo_key = key;
2734 kv->nstat_sysinfo_flags = NSTAT_SYSINFO_FLAG_SCALAR;
2735 kv->u.nstat_sysinfo_scalar = val;
2736 kv->nstat_sysinfo_valsize = sizeof(kv->u.nstat_sysinfo_scalar);
2737 }
2738
2739 static void
2740 nstat_set_keyval_string(nstat_sysinfo_keyval *kv, int key, u_int8_t *buf,
2741 u_int32_t len)
2742 {
2743 kv->nstat_sysinfo_key = key;
2744 kv->nstat_sysinfo_flags = NSTAT_SYSINFO_FLAG_STRING;
2745 kv->nstat_sysinfo_valsize = min(len,
2746 NSTAT_SYSINFO_KEYVAL_STRING_MAXSIZE);
2747 bcopy(buf, kv->u.nstat_sysinfo_string, kv->nstat_sysinfo_valsize);
2748 }
2749
2750 static void
2751 nstat_sysinfo_send_data_internal(
2752 nstat_control_state *control,
2753 nstat_sysinfo_data *data)
2754 {
2755 nstat_msg_sysinfo_counts *syscnt = NULL;
2756 size_t allocsize = 0, countsize = 0, nkeyvals = 0, finalsize = 0;
2757 nstat_sysinfo_keyval *kv;
2758 errno_t result = 0;
2759 size_t i = 0;
2760
2761 allocsize = offsetof(nstat_msg_sysinfo_counts, counts);
2762 countsize = offsetof(nstat_sysinfo_counts, nstat_sysinfo_keyvals);
2763 finalsize = allocsize;
2764
2765 /* get number of key-vals for each kind of stat */
2766 switch (data->flags)
2767 {
2768 case NSTAT_SYSINFO_MBUF_STATS:
2769 nkeyvals = sizeof(struct nstat_sysinfo_mbuf_stats) /
2770 sizeof(u_int32_t);
2771 break;
2772 case NSTAT_SYSINFO_TCP_STATS:
2773 nkeyvals = NSTAT_SYSINFO_TCP_STATS_COUNT;
2774 break;
2775 case NSTAT_SYSINFO_IFNET_ECN_STATS:
2776 nkeyvals = (sizeof(struct if_tcp_ecn_stat) /
2777 sizeof(u_int64_t));
2778
2779 /* Two more keys for ifnet type and proto */
2780 nkeyvals += 2;
2781
2782 /* One key for unsent data. */
2783 nkeyvals++;
2784 break;
2785 case NSTAT_SYSINFO_LIM_STATS:
2786 nkeyvals = NSTAT_LIM_STAT_KEYVAL_COUNT;
2787 break;
2788 case NSTAT_SYSINFO_NET_API_STATS:
2789 nkeyvals = NSTAT_NET_API_STAT_KEYVAL_COUNT;
2790 break;
2791 default:
2792 return;
2793 }
2794 countsize += sizeof(nstat_sysinfo_keyval) * nkeyvals;
2795 allocsize += countsize;
2796
2797 syscnt = OSMalloc(allocsize, nstat_malloc_tag);
2798 if (syscnt == NULL)
2799 return;
2800 bzero(syscnt, allocsize);
2801
2802 kv = (nstat_sysinfo_keyval *) &syscnt->counts.nstat_sysinfo_keyvals;
2803 switch (data->flags)
2804 {
2805 case NSTAT_SYSINFO_MBUF_STATS:
2806 {
2807 nstat_set_keyval_scalar(&kv[i++],
2808 NSTAT_SYSINFO_KEY_MBUF_256B_TOTAL,
2809 data->u.mb_stats.total_256b);
2810 nstat_set_keyval_scalar(&kv[i++],
2811 NSTAT_SYSINFO_KEY_MBUF_2KB_TOTAL,
2812 data->u.mb_stats.total_2kb);
2813 nstat_set_keyval_scalar(&kv[i++],
2814 NSTAT_SYSINFO_KEY_MBUF_4KB_TOTAL,
2815 data->u.mb_stats.total_4kb);
2816 nstat_set_keyval_scalar(&kv[i++],
2817 NSTAT_SYSINFO_MBUF_16KB_TOTAL,
2818 data->u.mb_stats.total_16kb);
2819 nstat_set_keyval_scalar(&kv[i++],
2820 NSTAT_SYSINFO_KEY_SOCK_MBCNT,
2821 data->u.mb_stats.sbmb_total);
2822 nstat_set_keyval_scalar(&kv[i++],
2823 NSTAT_SYSINFO_KEY_SOCK_ATMBLIMIT,
2824 data->u.mb_stats.sb_atmbuflimit);
2825 nstat_set_keyval_scalar(&kv[i++],
2826 NSTAT_SYSINFO_MBUF_DRAIN_CNT,
2827 data->u.mb_stats.draincnt);
2828 nstat_set_keyval_scalar(&kv[i++],
2829 NSTAT_SYSINFO_MBUF_MEM_RELEASED,
2830 data->u.mb_stats.memreleased);
2831 nstat_set_keyval_scalar(&kv[i++],
2832 NSTAT_SYSINFO_KEY_SOCK_MBFLOOR,
2833 data->u.mb_stats.sbmb_floor);
2834 VERIFY(i == nkeyvals);
2835 break;
2836 }
2837 case NSTAT_SYSINFO_TCP_STATS:
2838 {
2839 nstat_set_keyval_scalar(&kv[i++],
2840 NSTAT_SYSINFO_KEY_IPV4_AVGRTT,
2841 data->u.tcp_stats.ipv4_avgrtt);
2842 nstat_set_keyval_scalar(&kv[i++],
2843 NSTAT_SYSINFO_KEY_IPV6_AVGRTT,
2844 data->u.tcp_stats.ipv6_avgrtt);
2845 nstat_set_keyval_scalar(&kv[i++],
2846 NSTAT_SYSINFO_KEY_SEND_PLR,
2847 data->u.tcp_stats.send_plr);
2848 nstat_set_keyval_scalar(&kv[i++],
2849 NSTAT_SYSINFO_KEY_RECV_PLR,
2850 data->u.tcp_stats.recv_plr);
2851 nstat_set_keyval_scalar(&kv[i++],
2852 NSTAT_SYSINFO_KEY_SEND_TLRTO,
2853 data->u.tcp_stats.send_tlrto_rate);
2854 nstat_set_keyval_scalar(&kv[i++],
2855 NSTAT_SYSINFO_KEY_SEND_REORDERRATE,
2856 data->u.tcp_stats.send_reorder_rate);
2857 nstat_set_keyval_scalar(&kv[i++],
2858 NSTAT_SYSINFO_CONNECTION_ATTEMPTS,
2859 data->u.tcp_stats.connection_attempts);
2860 nstat_set_keyval_scalar(&kv[i++],
2861 NSTAT_SYSINFO_CONNECTION_ACCEPTS,
2862 data->u.tcp_stats.connection_accepts);
2863 nstat_set_keyval_scalar(&kv[i++],
2864 NSTAT_SYSINFO_ECN_CLIENT_ENABLED,
2865 data->u.tcp_stats.ecn_client_enabled);
2866 nstat_set_keyval_scalar(&kv[i++],
2867 NSTAT_SYSINFO_ECN_SERVER_ENABLED,
2868 data->u.tcp_stats.ecn_server_enabled);
2869 nstat_set_keyval_scalar(&kv[i++],
2870 NSTAT_SYSINFO_ECN_CLIENT_SETUP,
2871 data->u.tcp_stats.ecn_client_setup);
2872 nstat_set_keyval_scalar(&kv[i++],
2873 NSTAT_SYSINFO_ECN_SERVER_SETUP,
2874 data->u.tcp_stats.ecn_server_setup);
2875 nstat_set_keyval_scalar(&kv[i++],
2876 NSTAT_SYSINFO_ECN_CLIENT_SUCCESS,
2877 data->u.tcp_stats.ecn_client_success);
2878 nstat_set_keyval_scalar(&kv[i++],
2879 NSTAT_SYSINFO_ECN_SERVER_SUCCESS,
2880 data->u.tcp_stats.ecn_server_success);
2881 nstat_set_keyval_scalar(&kv[i++],
2882 NSTAT_SYSINFO_ECN_NOT_SUPPORTED,
2883 data->u.tcp_stats.ecn_not_supported);
2884 nstat_set_keyval_scalar(&kv[i++],
2885 NSTAT_SYSINFO_ECN_LOST_SYN,
2886 data->u.tcp_stats.ecn_lost_syn);
2887 nstat_set_keyval_scalar(&kv[i++],
2888 NSTAT_SYSINFO_ECN_LOST_SYNACK,
2889 data->u.tcp_stats.ecn_lost_synack);
2890 nstat_set_keyval_scalar(&kv[i++],
2891 NSTAT_SYSINFO_ECN_RECV_CE,
2892 data->u.tcp_stats.ecn_recv_ce);
2893 nstat_set_keyval_scalar(&kv[i++],
2894 NSTAT_SYSINFO_ECN_RECV_ECE,
2895 data->u.tcp_stats.ecn_recv_ece);
2896 nstat_set_keyval_scalar(&kv[i++],
2897 NSTAT_SYSINFO_ECN_SENT_ECE,
2898 data->u.tcp_stats.ecn_sent_ece);
2899 nstat_set_keyval_scalar(&kv[i++],
2900 NSTAT_SYSINFO_ECN_CONN_RECV_CE,
2901 data->u.tcp_stats.ecn_conn_recv_ce);
2902 nstat_set_keyval_scalar(&kv[i++],
2903 NSTAT_SYSINFO_ECN_CONN_RECV_ECE,
2904 data->u.tcp_stats.ecn_conn_recv_ece);
2905 nstat_set_keyval_scalar(&kv[i++],
2906 NSTAT_SYSINFO_ECN_CONN_PLNOCE,
2907 data->u.tcp_stats.ecn_conn_plnoce);
2908 nstat_set_keyval_scalar(&kv[i++],
2909 NSTAT_SYSINFO_ECN_CONN_PL_CE,
2910 data->u.tcp_stats.ecn_conn_pl_ce);
2911 nstat_set_keyval_scalar(&kv[i++],
2912 NSTAT_SYSINFO_ECN_CONN_NOPL_CE,
2913 data->u.tcp_stats.ecn_conn_nopl_ce);
2914 nstat_set_keyval_scalar(&kv[i++],
2915 NSTAT_SYSINFO_ECN_FALLBACK_SYNLOSS,
2916 data->u.tcp_stats.ecn_fallback_synloss);
2917 nstat_set_keyval_scalar(&kv[i++],
2918 NSTAT_SYSINFO_ECN_FALLBACK_REORDER,
2919 data->u.tcp_stats.ecn_fallback_reorder);
2920 nstat_set_keyval_scalar(&kv[i++],
2921 NSTAT_SYSINFO_ECN_FALLBACK_CE,
2922 data->u.tcp_stats.ecn_fallback_ce);
2923 nstat_set_keyval_scalar(&kv[i++],
2924 NSTAT_SYSINFO_TFO_SYN_DATA_RCV,
2925 data->u.tcp_stats.tfo_syn_data_rcv);
2926 nstat_set_keyval_scalar(&kv[i++],
2927 NSTAT_SYSINFO_TFO_COOKIE_REQ_RCV,
2928 data->u.tcp_stats.tfo_cookie_req_rcv);
2929 nstat_set_keyval_scalar(&kv[i++],
2930 NSTAT_SYSINFO_TFO_COOKIE_SENT,
2931 data->u.tcp_stats.tfo_cookie_sent);
2932 nstat_set_keyval_scalar(&kv[i++],
2933 NSTAT_SYSINFO_TFO_COOKIE_INVALID,
2934 data->u.tcp_stats.tfo_cookie_invalid);
2935 nstat_set_keyval_scalar(&kv[i++],
2936 NSTAT_SYSINFO_TFO_COOKIE_REQ,
2937 data->u.tcp_stats.tfo_cookie_req);
2938 nstat_set_keyval_scalar(&kv[i++],
2939 NSTAT_SYSINFO_TFO_COOKIE_RCV,
2940 data->u.tcp_stats.tfo_cookie_rcv);
2941 nstat_set_keyval_scalar(&kv[i++],
2942 NSTAT_SYSINFO_TFO_SYN_DATA_SENT,
2943 data->u.tcp_stats.tfo_syn_data_sent);
2944 nstat_set_keyval_scalar(&kv[i++],
2945 NSTAT_SYSINFO_TFO_SYN_DATA_ACKED,
2946 data->u.tcp_stats.tfo_syn_data_acked);
2947 nstat_set_keyval_scalar(&kv[i++],
2948 NSTAT_SYSINFO_TFO_SYN_LOSS,
2949 data->u.tcp_stats.tfo_syn_loss);
2950 nstat_set_keyval_scalar(&kv[i++],
2951 NSTAT_SYSINFO_TFO_BLACKHOLE,
2952 data->u.tcp_stats.tfo_blackhole);
2953 nstat_set_keyval_scalar(&kv[i++],
2954 NSTAT_SYSINFO_TFO_COOKIE_WRONG,
2955 data->u.tcp_stats.tfo_cookie_wrong);
2956 nstat_set_keyval_scalar(&kv[i++],
2957 NSTAT_SYSINFO_TFO_NO_COOKIE_RCV,
2958 data->u.tcp_stats.tfo_no_cookie_rcv);
2959 nstat_set_keyval_scalar(&kv[i++],
2960 NSTAT_SYSINFO_TFO_HEURISTICS_DISABLE,
2961 data->u.tcp_stats.tfo_heuristics_disable);
2962 nstat_set_keyval_scalar(&kv[i++],
2963 NSTAT_SYSINFO_TFO_SEND_BLACKHOLE,
2964 data->u.tcp_stats.tfo_sndblackhole);
2965 nstat_set_keyval_scalar(&kv[i++],
2966 NSTAT_SYSINFO_MPTCP_HANDOVER_ATTEMPT,
2967 data->u.tcp_stats.mptcp_handover_attempt);
2968 nstat_set_keyval_scalar(&kv[i++],
2969 NSTAT_SYSINFO_MPTCP_INTERACTIVE_ATTEMPT,
2970 data->u.tcp_stats.mptcp_interactive_attempt);
2971 nstat_set_keyval_scalar(&kv[i++],
2972 NSTAT_SYSINFO_MPTCP_AGGREGATE_ATTEMPT,
2973 data->u.tcp_stats.mptcp_aggregate_attempt);
2974 nstat_set_keyval_scalar(&kv[i++],
2975 NSTAT_SYSINFO_MPTCP_FP_HANDOVER_ATTEMPT,
2976 data->u.tcp_stats.mptcp_fp_handover_attempt);
2977 nstat_set_keyval_scalar(&kv[i++],
2978 NSTAT_SYSINFO_MPTCP_FP_INTERACTIVE_ATTEMPT,
2979 data->u.tcp_stats.mptcp_fp_interactive_attempt);
2980 nstat_set_keyval_scalar(&kv[i++],
2981 NSTAT_SYSINFO_MPTCP_FP_AGGREGATE_ATTEMPT,
2982 data->u.tcp_stats.mptcp_fp_aggregate_attempt);
2983 nstat_set_keyval_scalar(&kv[i++],
2984 NSTAT_SYSINFO_MPTCP_HEURISTIC_FALLBACK,
2985 data->u.tcp_stats.mptcp_heuristic_fallback);
2986 nstat_set_keyval_scalar(&kv[i++],
2987 NSTAT_SYSINFO_MPTCP_FP_HEURISTIC_FALLBACK,
2988 data->u.tcp_stats.mptcp_fp_heuristic_fallback);
2989 nstat_set_keyval_scalar(&kv[i++],
2990 NSTAT_SYSINFO_MPTCP_HANDOVER_SUCCESS_WIFI,
2991 data->u.tcp_stats.mptcp_handover_success_wifi);
2992 nstat_set_keyval_scalar(&kv[i++],
2993 NSTAT_SYSINFO_MPTCP_HANDOVER_SUCCESS_CELL,
2994 data->u.tcp_stats.mptcp_handover_success_cell);
2995 nstat_set_keyval_scalar(&kv[i++],
2996 NSTAT_SYSINFO_MPTCP_INTERACTIVE_SUCCESS,
2997 data->u.tcp_stats.mptcp_interactive_success);
2998 nstat_set_keyval_scalar(&kv[i++],
2999 NSTAT_SYSINFO_MPTCP_AGGREGATE_SUCCESS,
3000 data->u.tcp_stats.mptcp_aggregate_success);
3001 nstat_set_keyval_scalar(&kv[i++],
3002 NSTAT_SYSINFO_MPTCP_FP_HANDOVER_SUCCESS_WIFI,
3003 data->u.tcp_stats.mptcp_fp_handover_success_wifi);
3004 nstat_set_keyval_scalar(&kv[i++],
3005 NSTAT_SYSINFO_MPTCP_FP_HANDOVER_SUCCESS_CELL,
3006 data->u.tcp_stats.mptcp_fp_handover_success_cell);
3007 nstat_set_keyval_scalar(&kv[i++],
3008 NSTAT_SYSINFO_MPTCP_FP_INTERACTIVE_SUCCESS,
3009 data->u.tcp_stats.mptcp_fp_interactive_success);
3010 nstat_set_keyval_scalar(&kv[i++],
3011 NSTAT_SYSINFO_MPTCP_FP_AGGREGATE_SUCCESS,
3012 data->u.tcp_stats.mptcp_fp_aggregate_success);
3013 nstat_set_keyval_scalar(&kv[i++],
3014 NSTAT_SYSINFO_MPTCP_HANDOVER_CELL_FROM_WIFI,
3015 data->u.tcp_stats.mptcp_handover_cell_from_wifi);
3016 nstat_set_keyval_scalar(&kv[i++],
3017 NSTAT_SYSINFO_MPTCP_HANDOVER_WIFI_FROM_CELL,
3018 data->u.tcp_stats.mptcp_handover_wifi_from_cell);
3019 nstat_set_keyval_scalar(&kv[i++],
3020 NSTAT_SYSINFO_MPTCP_INTERACTIVE_CELL_FROM_WIFI,
3021 data->u.tcp_stats.mptcp_interactive_cell_from_wifi);
3022 nstat_set_keyval_scalar(&kv[i++],
3023 NSTAT_SYSINFO_MPTCP_HANDOVER_CELL_BYTES,
3024 data->u.tcp_stats.mptcp_handover_cell_bytes);
3025 nstat_set_keyval_scalar(&kv[i++],
3026 NSTAT_SYSINFO_MPTCP_INTERACTIVE_CELL_BYTES,
3027 data->u.tcp_stats.mptcp_interactive_cell_bytes);
3028 nstat_set_keyval_scalar(&kv[i++],
3029 NSTAT_SYSINFO_MPTCP_AGGREGATE_CELL_BYTES,
3030 data->u.tcp_stats.mptcp_aggregate_cell_bytes);
3031 nstat_set_keyval_scalar(&kv[i++],
3032 NSTAT_SYSINFO_MPTCP_HANDOVER_ALL_BYTES,
3033 data->u.tcp_stats.mptcp_handover_all_bytes);
3034 nstat_set_keyval_scalar(&kv[i++],
3035 NSTAT_SYSINFO_MPTCP_INTERACTIVE_ALL_BYTES,
3036 data->u.tcp_stats.mptcp_interactive_all_bytes);
3037 nstat_set_keyval_scalar(&kv[i++],
3038 NSTAT_SYSINFO_MPTCP_AGGREGATE_ALL_BYTES,
3039 data->u.tcp_stats.mptcp_aggregate_all_bytes);
3040 nstat_set_keyval_scalar(&kv[i++],
3041 NSTAT_SYSINFO_MPTCP_BACK_TO_WIFI,
3042 data->u.tcp_stats.mptcp_back_to_wifi);
3043 nstat_set_keyval_scalar(&kv[i++],
3044 NSTAT_SYSINFO_MPTCP_WIFI_PROXY,
3045 data->u.tcp_stats.mptcp_wifi_proxy);
3046 nstat_set_keyval_scalar(&kv[i++],
3047 NSTAT_SYSINFO_MPTCP_CELL_PROXY,
3048 data->u.tcp_stats.mptcp_cell_proxy);
3049 VERIFY(i == nkeyvals);
3050 break;
3051 }
3052 case NSTAT_SYSINFO_IFNET_ECN_STATS:
3053 {
3054 nstat_set_keyval_scalar(&kv[i++],
3055 NSTAT_SYSINFO_ECN_IFNET_TYPE,
3056 data->u.ifnet_ecn_stats.ifnet_type);
3057 nstat_set_keyval_scalar(&kv[i++],
3058 NSTAT_SYSINFO_ECN_IFNET_PROTO,
3059 data->u.ifnet_ecn_stats.ifnet_proto);
3060 nstat_set_keyval_scalar(&kv[i++],
3061 NSTAT_SYSINFO_ECN_IFNET_CLIENT_SETUP,
3062 data->u.ifnet_ecn_stats.ecn_stat.ecn_client_setup);
3063 nstat_set_keyval_scalar(&kv[i++],
3064 NSTAT_SYSINFO_ECN_IFNET_SERVER_SETUP,
3065 data->u.ifnet_ecn_stats.ecn_stat.ecn_server_setup);
3066 nstat_set_keyval_scalar(&kv[i++],
3067 NSTAT_SYSINFO_ECN_IFNET_CLIENT_SUCCESS,
3068 data->u.ifnet_ecn_stats.ecn_stat.ecn_client_success);
3069 nstat_set_keyval_scalar(&kv[i++],
3070 NSTAT_SYSINFO_ECN_IFNET_SERVER_SUCCESS,
3071 data->u.ifnet_ecn_stats.ecn_stat.ecn_server_success);
3072 nstat_set_keyval_scalar(&kv[i++],
3073 NSTAT_SYSINFO_ECN_IFNET_PEER_NOSUPPORT,
3074 data->u.ifnet_ecn_stats.ecn_stat.ecn_peer_nosupport);
3075 nstat_set_keyval_scalar(&kv[i++],
3076 NSTAT_SYSINFO_ECN_IFNET_SYN_LOST,
3077 data->u.ifnet_ecn_stats.ecn_stat.ecn_syn_lost);
3078 nstat_set_keyval_scalar(&kv[i++],
3079 NSTAT_SYSINFO_ECN_IFNET_SYNACK_LOST,
3080 data->u.ifnet_ecn_stats.ecn_stat.ecn_synack_lost);
3081 nstat_set_keyval_scalar(&kv[i++],
3082 NSTAT_SYSINFO_ECN_IFNET_RECV_CE,
3083 data->u.ifnet_ecn_stats.ecn_stat.ecn_recv_ce);
3084 nstat_set_keyval_scalar(&kv[i++],
3085 NSTAT_SYSINFO_ECN_IFNET_RECV_ECE,
3086 data->u.ifnet_ecn_stats.ecn_stat.ecn_recv_ece);
3087 nstat_set_keyval_scalar(&kv[i++],
3088 NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_CE,
3089 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_recv_ce);
3090 nstat_set_keyval_scalar(&kv[i++],
3091 NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_ECE,
3092 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_recv_ece);
3093 nstat_set_keyval_scalar(&kv[i++],
3094 NSTAT_SYSINFO_ECN_IFNET_CONN_PLNOCE,
3095 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_plnoce);
3096 nstat_set_keyval_scalar(&kv[i++],
3097 NSTAT_SYSINFO_ECN_IFNET_CONN_PLCE,
3098 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_plce);
3099 nstat_set_keyval_scalar(&kv[i++],
3100 NSTAT_SYSINFO_ECN_IFNET_CONN_NOPLCE,
3101 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_noplce);
3102 nstat_set_keyval_scalar(&kv[i++],
3103 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNLOSS,
3104 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_synloss);
3105 nstat_set_keyval_scalar(&kv[i++],
3106 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_REORDER,
3107 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_reorder);
3108 nstat_set_keyval_scalar(&kv[i++],
3109 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_CE,
3110 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_ce);
3111 nstat_set_keyval_scalar(&kv[i++],
3112 NSTAT_SYSINFO_ECN_IFNET_ON_RTT_AVG,
3113 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rtt_avg);
3114 nstat_set_keyval_scalar(&kv[i++],
3115 NSTAT_SYSINFO_ECN_IFNET_ON_RTT_VAR,
3116 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rtt_var);
3117 nstat_set_keyval_scalar(&kv[i++],
3118 NSTAT_SYSINFO_ECN_IFNET_ON_OOPERCENT,
3119 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.oo_percent);
3120 nstat_set_keyval_scalar(&kv[i++],
3121 NSTAT_SYSINFO_ECN_IFNET_ON_SACK_EPISODE,
3122 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.sack_episodes);
3123 nstat_set_keyval_scalar(&kv[i++],
3124 NSTAT_SYSINFO_ECN_IFNET_ON_REORDER_PERCENT,
3125 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.reorder_percent);
3126 nstat_set_keyval_scalar(&kv[i++],
3127 NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_PERCENT,
3128 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rxmit_percent);
3129 nstat_set_keyval_scalar(&kv[i++],
3130 NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_DROP,
3131 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rxmit_drop);
3132 nstat_set_keyval_scalar(&kv[i++],
3133 NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_AVG,
3134 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rtt_avg);
3135 nstat_set_keyval_scalar(&kv[i++],
3136 NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_VAR,
3137 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rtt_var);
3138 nstat_set_keyval_scalar(&kv[i++],
3139 NSTAT_SYSINFO_ECN_IFNET_OFF_OOPERCENT,
3140 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.oo_percent);
3141 nstat_set_keyval_scalar(&kv[i++],
3142 NSTAT_SYSINFO_ECN_IFNET_OFF_SACK_EPISODE,
3143 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.sack_episodes);
3144 nstat_set_keyval_scalar(&kv[i++],
3145 NSTAT_SYSINFO_ECN_IFNET_OFF_REORDER_PERCENT,
3146 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.reorder_percent);
3147 nstat_set_keyval_scalar(&kv[i++],
3148 NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_PERCENT,
3149 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rxmit_percent);
3150 nstat_set_keyval_scalar(&kv[i++],
3151 NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_DROP,
3152 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rxmit_drop);
3153 nstat_set_keyval_scalar(&kv[i++],
3154 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_TXPKTS,
3155 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_txpkts);
3156 nstat_set_keyval_scalar(&kv[i++],
3157 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXMTPKTS,
3158 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_rxmitpkts);
3159 nstat_set_keyval_scalar(&kv[i++],
3160 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXPKTS,
3161 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_rxpkts);
3162 nstat_set_keyval_scalar(&kv[i++],
3163 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_OOPKTS,
3164 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_oopkts);
3165 nstat_set_keyval_scalar(&kv[i++],
3166 NSTAT_SYSINFO_ECN_IFNET_ON_DROP_RST,
3167 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rst_drop);
3168 nstat_set_keyval_scalar(&kv[i++],
3169 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_TXPKTS,
3170 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_txpkts);
3171 nstat_set_keyval_scalar(&kv[i++],
3172 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXMTPKTS,
3173 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_rxmitpkts);
3174 nstat_set_keyval_scalar(&kv[i++],
3175 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXPKTS,
3176 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_rxpkts);
3177 nstat_set_keyval_scalar(&kv[i++],
3178 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_OOPKTS,
3179 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_oopkts);
3180 nstat_set_keyval_scalar(&kv[i++],
3181 NSTAT_SYSINFO_ECN_IFNET_OFF_DROP_RST,
3182 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rst_drop);
3183 nstat_set_keyval_scalar(&kv[i++],
3184 NSTAT_SYSINFO_ECN_IFNET_TOTAL_CONN,
3185 data->u.ifnet_ecn_stats.ecn_stat.ecn_total_conn);
3186 nstat_set_keyval_scalar(&kv[i++],
3187 NSTAT_SYSINFO_IFNET_UNSENT_DATA,
3188 data->unsent_data_cnt);
3189 nstat_set_keyval_scalar(&kv[i++],
3190 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRST,
3191 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_droprst);
3192 nstat_set_keyval_scalar(&kv[i++],
3193 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRXMT,
3194 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_droprxmt);
3195 nstat_set_keyval_scalar(&kv[i++],
3196 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNRST,
3197 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_synrst);
3198 break;
3199 }
3200 case NSTAT_SYSINFO_LIM_STATS:
3201 {
3202 nstat_set_keyval_string(&kv[i++],
3203 NSTAT_SYSINFO_LIM_IFNET_SIGNATURE,
3204 data->u.lim_stats.ifnet_signature,
3205 data->u.lim_stats.ifnet_siglen);
3206 nstat_set_keyval_scalar(&kv[i++],
3207 NSTAT_SYSINFO_LIM_IFNET_DL_MAX_BANDWIDTH,
3208 data->u.lim_stats.lim_stat.lim_dl_max_bandwidth);
3209 nstat_set_keyval_scalar(&kv[i++],
3210 NSTAT_SYSINFO_LIM_IFNET_UL_MAX_BANDWIDTH,
3211 data->u.lim_stats.lim_stat.lim_ul_max_bandwidth);
3212 nstat_set_keyval_scalar(&kv[i++],
3213 NSTAT_SYSINFO_LIM_IFNET_PACKET_LOSS_PERCENT,
3214 data->u.lim_stats.lim_stat.lim_packet_loss_percent);
3215 nstat_set_keyval_scalar(&kv[i++],
3216 NSTAT_SYSINFO_LIM_IFNET_PACKET_OOO_PERCENT,
3217 data->u.lim_stats.lim_stat.lim_packet_ooo_percent);
3218 nstat_set_keyval_scalar(&kv[i++],
3219 NSTAT_SYSINFO_LIM_IFNET_RTT_VARIANCE,
3220 data->u.lim_stats.lim_stat.lim_rtt_variance);
3221 nstat_set_keyval_scalar(&kv[i++],
3222 NSTAT_SYSINFO_LIM_IFNET_RTT_MIN,
3223 data->u.lim_stats.lim_stat.lim_rtt_min);
3224 nstat_set_keyval_scalar(&kv[i++],
3225 NSTAT_SYSINFO_LIM_IFNET_RTT_AVG,
3226 data->u.lim_stats.lim_stat.lim_rtt_average);
3227 nstat_set_keyval_scalar(&kv[i++],
3228 NSTAT_SYSINFO_LIM_IFNET_CONN_TIMEOUT_PERCENT,
3229 data->u.lim_stats.lim_stat.lim_conn_timeout_percent);
3230 nstat_set_keyval_scalar(&kv[i++],
3231 NSTAT_SYSINFO_LIM_IFNET_DL_DETECTED,
3232 data->u.lim_stats.lim_stat.lim_dl_detected);
3233 nstat_set_keyval_scalar(&kv[i++],
3234 NSTAT_SYSINFO_LIM_IFNET_UL_DETECTED,
3235 data->u.lim_stats.lim_stat.lim_ul_detected);
3236 nstat_set_keyval_scalar(&kv[i++],
3237 NSTAT_SYSINFO_LIM_IFNET_TYPE,
3238 data->u.lim_stats.ifnet_type);
3239 break;
3240 }
3241 case NSTAT_SYSINFO_NET_API_STATS:
3242 {
3243 nstat_set_keyval_scalar(&kv[i++],
3244 NSTAT_SYSINFO_API_IF_FLTR_ATTACH,
3245 data->u.net_api_stats.net_api_stats.nas_iflt_attach_total);
3246 nstat_set_keyval_scalar(&kv[i++],
3247 NSTAT_SYSINFO_API_IF_FLTR_ATTACH_OS,
3248 data->u.net_api_stats.net_api_stats.nas_iflt_attach_os_total);
3249 nstat_set_keyval_scalar(&kv[i++],
3250 NSTAT_SYSINFO_API_IP_FLTR_ADD,
3251 data->u.net_api_stats.net_api_stats.nas_ipf_add_total);
3252 nstat_set_keyval_scalar(&kv[i++],
3253 NSTAT_SYSINFO_API_IP_FLTR_ADD_OS,
3254 data->u.net_api_stats.net_api_stats.nas_ipf_add_os_total);
3255 nstat_set_keyval_scalar(&kv[i++],
3256 NSTAT_SYSINFO_API_SOCK_FLTR_ATTACH,
3257 data->u.net_api_stats.net_api_stats.nas_sfltr_register_total);
3258 nstat_set_keyval_scalar(&kv[i++],
3259 NSTAT_SYSINFO_API_SOCK_FLTR_ATTACH_OS,
3260 data->u.net_api_stats.net_api_stats.nas_sfltr_register_os_total);
3261
3262
3263 nstat_set_keyval_scalar(&kv[i++],
3264 NSTAT_SYSINFO_API_SOCK_ALLOC_TOTAL,
3265 data->u.net_api_stats.net_api_stats.nas_socket_alloc_total);
3266 nstat_set_keyval_scalar(&kv[i++],
3267 NSTAT_SYSINFO_API_SOCK_ALLOC_KERNEL,
3268 data->u.net_api_stats.net_api_stats.nas_socket_in_kernel_total);
3269 nstat_set_keyval_scalar(&kv[i++],
3270 NSTAT_SYSINFO_API_SOCK_ALLOC_KERNEL_OS,
3271 data->u.net_api_stats.net_api_stats.nas_socket_in_kernel_os_total);
3272 nstat_set_keyval_scalar(&kv[i++],
3273 NSTAT_SYSINFO_API_SOCK_NECP_CLIENTUUID,
3274 data->u.net_api_stats.net_api_stats.nas_socket_necp_clientuuid_total);
3275
3276 nstat_set_keyval_scalar(&kv[i++],
3277 NSTAT_SYSINFO_API_SOCK_DOMAIN_LOCAL,
3278 data->u.net_api_stats.net_api_stats.nas_socket_domain_local_total);
3279 nstat_set_keyval_scalar(&kv[i++],
3280 NSTAT_SYSINFO_API_SOCK_DOMAIN_ROUTE,
3281 data->u.net_api_stats.net_api_stats.nas_socket_domain_route_total);
3282 nstat_set_keyval_scalar(&kv[i++],
3283 NSTAT_SYSINFO_API_SOCK_DOMAIN_INET,
3284 data->u.net_api_stats.net_api_stats.nas_socket_domain_inet_total);
3285 nstat_set_keyval_scalar(&kv[i++],
3286 NSTAT_SYSINFO_API_SOCK_DOMAIN_INET6,
3287 data->u.net_api_stats.net_api_stats.nas_socket_domain_inet6_total);
3288 nstat_set_keyval_scalar(&kv[i++],
3289 NSTAT_SYSINFO_API_SOCK_DOMAIN_SYSTEM,
3290 data->u.net_api_stats.net_api_stats.nas_socket_domain_system_total);
3291 nstat_set_keyval_scalar(&kv[i++],
3292 NSTAT_SYSINFO_API_SOCK_DOMAIN_MULTIPATH,
3293 data->u.net_api_stats.net_api_stats.nas_socket_domain_multipath_total);
3294 nstat_set_keyval_scalar(&kv[i++],
3295 NSTAT_SYSINFO_API_SOCK_DOMAIN_KEY,
3296 data->u.net_api_stats.net_api_stats.nas_socket_domain_key_total);
3297 nstat_set_keyval_scalar(&kv[i++],
3298 NSTAT_SYSINFO_API_SOCK_DOMAIN_NDRV,
3299 data->u.net_api_stats.net_api_stats.nas_socket_domain_ndrv_total);
3300 nstat_set_keyval_scalar(&kv[i++],
3301 NSTAT_SYSINFO_API_SOCK_DOMAIN_OTHER,
3302 data->u.net_api_stats.net_api_stats.nas_socket_domain_other_total);
3303
3304 nstat_set_keyval_scalar(&kv[i++],
3305 NSTAT_SYSINFO_API_SOCK_INET_STREAM,
3306 data->u.net_api_stats.net_api_stats.nas_socket_inet_stream_total);
3307 nstat_set_keyval_scalar(&kv[i++],
3308 NSTAT_SYSINFO_API_SOCK_INET_DGRAM,
3309 data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_total);
3310 nstat_set_keyval_scalar(&kv[i++],
3311 NSTAT_SYSINFO_API_SOCK_INET_DGRAM_CONNECTED,
3312 data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_connected);
3313 nstat_set_keyval_scalar(&kv[i++],
3314 NSTAT_SYSINFO_API_SOCK_INET_DGRAM_DNS,
3315 data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_dns);
3316 nstat_set_keyval_scalar(&kv[i++],
3317 NSTAT_SYSINFO_API_SOCK_INET_DGRAM_NO_DATA,
3318 data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_no_data);
3319
3320 nstat_set_keyval_scalar(&kv[i++],
3321 NSTAT_SYSINFO_API_SOCK_INET6_STREAM,
3322 data->u.net_api_stats.net_api_stats.nas_socket_inet6_stream_total);
3323 nstat_set_keyval_scalar(&kv[i++],
3324 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM,
3325 data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_total);
3326 nstat_set_keyval_scalar(&kv[i++],
3327 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_CONNECTED,
3328 data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_connected);
3329 nstat_set_keyval_scalar(&kv[i++],
3330 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_DNS,
3331 data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_dns);
3332 nstat_set_keyval_scalar(&kv[i++],
3333 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_NO_DATA,
3334 data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_no_data);
3335
3336 nstat_set_keyval_scalar(&kv[i++],
3337 NSTAT_SYSINFO_API_SOCK_INET_MCAST_JOIN,
3338 data->u.net_api_stats.net_api_stats.nas_socket_mcast_join_total);
3339 nstat_set_keyval_scalar(&kv[i++],
3340 NSTAT_SYSINFO_API_SOCK_INET_MCAST_JOIN_OS,
3341 data->u.net_api_stats.net_api_stats.nas_socket_mcast_join_os_total);
3342
3343 nstat_set_keyval_scalar(&kv[i++],
3344 NSTAT_SYSINFO_API_NEXUS_FLOW_INET_STREAM,
3345 data->u.net_api_stats.net_api_stats.nas_nx_flow_inet_stream_total);
3346 nstat_set_keyval_scalar(&kv[i++],
3347 NSTAT_SYSINFO_API_NEXUS_FLOW_INET_DATAGRAM,
3348 data->u.net_api_stats.net_api_stats.nas_nx_flow_inet_dgram_total);
3349
3350 nstat_set_keyval_scalar(&kv[i++],
3351 NSTAT_SYSINFO_API_NEXUS_FLOW_INET6_STREAM,
3352 data->u.net_api_stats.net_api_stats.nas_nx_flow_inet6_stream_total);
3353 nstat_set_keyval_scalar(&kv[i++],
3354 NSTAT_SYSINFO_API_NEXUS_FLOW_INET6_DATAGRAM,
3355 data->u.net_api_stats.net_api_stats.nas_nx_flow_inet6_dgram_total);
3356
3357 nstat_set_keyval_scalar(&kv[i++],
3358 NSTAT_SYSINFO_API_IFNET_ALLOC,
3359 data->u.net_api_stats.net_api_stats.nas_ifnet_alloc_total);
3360 nstat_set_keyval_scalar(&kv[i++],
3361 NSTAT_SYSINFO_API_IFNET_ALLOC_OS,
3362 data->u.net_api_stats.net_api_stats.nas_ifnet_alloc_os_total);
3363
3364 nstat_set_keyval_scalar(&kv[i++],
3365 NSTAT_SYSINFO_API_PF_ADDRULE,
3366 data->u.net_api_stats.net_api_stats.nas_pf_addrule_total);
3367 nstat_set_keyval_scalar(&kv[i++],
3368 NSTAT_SYSINFO_API_PF_ADDRULE_OS,
3369 data->u.net_api_stats.net_api_stats.nas_pf_addrule_os);
3370
3371 nstat_set_keyval_scalar(&kv[i++],
3372 NSTAT_SYSINFO_API_VMNET_START,
3373 data->u.net_api_stats.net_api_stats.nas_vmnet_total);
3374
3375
3376 nstat_set_keyval_scalar(&kv[i++],
3377 NSTAT_SYSINFO_API_REPORT_INTERVAL,
3378 data->u.net_api_stats.report_interval);
3379
3380 break;
3381 }
3382 }
3383 if (syscnt != NULL)
3384 {
3385 VERIFY(i > 0 && i <= nkeyvals);
3386 countsize = offsetof(nstat_sysinfo_counts,
3387 nstat_sysinfo_keyvals) +
3388 sizeof(nstat_sysinfo_keyval) * i;
3389 finalsize += countsize;
3390 syscnt->hdr.type = NSTAT_MSG_TYPE_SYSINFO_COUNTS;
3391 syscnt->hdr.length = finalsize;
3392 syscnt->counts.nstat_sysinfo_len = countsize;
3393
3394 result = ctl_enqueuedata(control->ncs_kctl,
3395 control->ncs_unit, syscnt, finalsize, CTL_DATA_EOR);
3396 if (result != 0)
3397 {
3398 nstat_stats.nstat_sysinfofailures += 1;
3399 }
3400 OSFree(syscnt, allocsize, nstat_malloc_tag);
3401 }
3402 return;
3403 }
3404
3405 __private_extern__ void
3406 nstat_sysinfo_send_data(
3407 nstat_sysinfo_data *data)
3408 {
3409 nstat_control_state *control;
3410
3411 lck_mtx_lock(&nstat_mtx);
3412 for (control = nstat_controls; control; control = control->ncs_next) {
3413 lck_mtx_lock(&control->ncs_mtx);
3414 if ((control->ncs_flags & NSTAT_FLAG_SYSINFO_SUBSCRIBED) != 0) {
3415 nstat_sysinfo_send_data_internal(control, data);
3416 }
3417 lck_mtx_unlock(&control->ncs_mtx);
3418 }
3419 lck_mtx_unlock(&nstat_mtx);
3420 }
3421
3422 static void
3423 nstat_sysinfo_generate_report(void)
3424 {
3425 mbuf_report_peak_usage();
3426 tcp_report_stats();
3427 nstat_ifnet_report_ecn_stats();
3428 nstat_ifnet_report_lim_stats();
3429 nstat_net_api_report_stats();
3430 }
3431
3432 #pragma mark -- net_api --
3433
3434 static struct net_api_stats net_api_stats_before;
3435 static u_int64_t net_api_stats_last_report_time;
3436
3437 static void
3438 nstat_net_api_report_stats(void)
3439 {
3440 struct nstat_sysinfo_data data;
3441 struct nstat_sysinfo_net_api_stats *st = &data.u.net_api_stats;
3442 u_int64_t uptime;
3443
3444 uptime = net_uptime();
3445
3446 if ((u_int32_t)(uptime - net_api_stats_last_report_time) <
3447 net_api_stats_report_interval)
3448 return;
3449
3450 st->report_interval = uptime - net_api_stats_last_report_time;
3451 net_api_stats_last_report_time = uptime;
3452
3453 data.flags = NSTAT_SYSINFO_NET_API_STATS;
3454 data.unsent_data_cnt = 0;
3455
3456 /*
3457 * Some of the fields in the report are the current value and
3458 * other fields are the delta from the last report:
3459 * - Report difference for the per flow counters as they increase
3460 * with time
3461 * - Report current value for other counters as they tend not to change
3462 * much with time
3463 */
3464 #define STATCOPY(f) \
3465 (st->net_api_stats.f = net_api_stats.f)
3466 #define STATDIFF(f) \
3467 (st->net_api_stats.f = net_api_stats.f - net_api_stats_before.f)
3468
3469 STATCOPY(nas_iflt_attach_count);
3470 STATCOPY(nas_iflt_attach_total);
3471 STATCOPY(nas_iflt_attach_os_total);
3472
3473 STATCOPY(nas_ipf_add_count);
3474 STATCOPY(nas_ipf_add_total);
3475 STATCOPY(nas_ipf_add_os_total);
3476
3477 STATCOPY(nas_sfltr_register_count);
3478 STATCOPY(nas_sfltr_register_total);
3479 STATCOPY(nas_sfltr_register_os_total);
3480
3481 STATDIFF(nas_socket_alloc_total);
3482 STATDIFF(nas_socket_in_kernel_total);
3483 STATDIFF(nas_socket_in_kernel_os_total);
3484 STATDIFF(nas_socket_necp_clientuuid_total);
3485
3486 STATDIFF(nas_socket_domain_local_total);
3487 STATDIFF(nas_socket_domain_route_total);
3488 STATDIFF(nas_socket_domain_inet_total);
3489 STATDIFF(nas_socket_domain_inet6_total);
3490 STATDIFF(nas_socket_domain_system_total);
3491 STATDIFF(nas_socket_domain_multipath_total);
3492 STATDIFF(nas_socket_domain_key_total);
3493 STATDIFF(nas_socket_domain_ndrv_total);
3494 STATDIFF(nas_socket_domain_other_total);
3495
3496 STATDIFF(nas_socket_inet_stream_total);
3497 STATDIFF(nas_socket_inet_dgram_total);
3498 STATDIFF(nas_socket_inet_dgram_connected);
3499 STATDIFF(nas_socket_inet_dgram_dns);
3500 STATDIFF(nas_socket_inet_dgram_no_data);
3501
3502 STATDIFF(nas_socket_inet6_stream_total);
3503 STATDIFF(nas_socket_inet6_dgram_total);
3504 STATDIFF(nas_socket_inet6_dgram_connected);
3505 STATDIFF(nas_socket_inet6_dgram_dns);
3506 STATDIFF(nas_socket_inet6_dgram_no_data);
3507
3508 STATDIFF(nas_socket_mcast_join_total);
3509 STATDIFF(nas_socket_mcast_join_os_total);
3510
3511 STATDIFF(nas_sock_inet6_stream_exthdr_in);
3512 STATDIFF(nas_sock_inet6_stream_exthdr_out);
3513 STATDIFF(nas_sock_inet6_dgram_exthdr_in);
3514 STATDIFF(nas_sock_inet6_dgram_exthdr_out);
3515
3516 STATDIFF(nas_nx_flow_inet_stream_total);
3517 STATDIFF(nas_nx_flow_inet_dgram_total);
3518
3519 STATDIFF(nas_nx_flow_inet6_stream_total);
3520 STATDIFF(nas_nx_flow_inet6_dgram_total);
3521
3522 STATCOPY(nas_ifnet_alloc_count);
3523 STATCOPY(nas_ifnet_alloc_total);
3524 STATCOPY(nas_ifnet_alloc_os_count);
3525 STATCOPY(nas_ifnet_alloc_os_total);
3526
3527 STATCOPY(nas_pf_addrule_total);
3528 STATCOPY(nas_pf_addrule_os);
3529
3530 STATCOPY(nas_vmnet_total);
3531
3532 #undef STATCOPY
3533 #undef STATDIFF
3534
3535 nstat_sysinfo_send_data(&data);
3536
3537 /*
3538 * Save a copy of the current fields so we can diff them the next time
3539 */
3540 memcpy(&net_api_stats_before, &net_api_stats,
3541 sizeof(struct net_api_stats));
3542 _CASSERT(sizeof (net_api_stats_before) == sizeof (net_api_stats));
3543 }
3544
3545
3546 #pragma mark -- Kernel Control Socket --
3547
3548 static kern_ctl_ref nstat_ctlref = NULL;
3549 static lck_grp_t *nstat_lck_grp = NULL;
3550
3551 static errno_t nstat_control_connect(kern_ctl_ref kctl, struct sockaddr_ctl *sac, void **uinfo);
3552 static errno_t nstat_control_disconnect(kern_ctl_ref kctl, u_int32_t unit, void *uinfo);
3553 static errno_t nstat_control_send(kern_ctl_ref kctl, u_int32_t unit, void *uinfo, mbuf_t m, int flags);
3554
3555 static errno_t
3556 nstat_enqueue_success(
3557 uint64_t context,
3558 nstat_control_state *state,
3559 u_int16_t flags)
3560 {
3561 nstat_msg_hdr success;
3562 errno_t result;
3563
3564 bzero(&success, sizeof(success));
3565 success.context = context;
3566 success.type = NSTAT_MSG_TYPE_SUCCESS;
3567 success.length = sizeof(success);
3568 success.flags = flags;
3569 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &success,
3570 sizeof(success), CTL_DATA_EOR | CTL_DATA_CRIT);
3571 if (result != 0) {
3572 if (nstat_debug != 0)
3573 printf("%s: could not enqueue success message %d\n",
3574 __func__, result);
3575 nstat_stats.nstat_successmsgfailures += 1;
3576 }
3577 return result;
3578 }
3579
3580 static errno_t
3581 nstat_control_send_goodbye(
3582 nstat_control_state *state,
3583 nstat_src *src)
3584 {
3585 errno_t result = 0;
3586 int failed = 0;
3587
3588 if (nstat_control_reporting_allowed(state, src))
3589 {
3590 if ((state->ncs_flags & NSTAT_FLAG_SUPPORTS_UPDATES) != 0)
3591 {
3592 result = nstat_control_send_update(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING, NULL);
3593 if (result != 0)
3594 {
3595 failed = 1;
3596 if (nstat_debug != 0)
3597 printf("%s - nstat_control_send_update() %d\n", __func__, result);
3598 }
3599 }
3600 else
3601 {
3602 // send one last counts notification
3603 result = nstat_control_send_counts(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING, NULL);
3604 if (result != 0)
3605 {
3606 failed = 1;
3607 if (nstat_debug != 0)
3608 printf("%s - nstat_control_send_counts() %d\n", __func__, result);
3609 }
3610
3611 // send a last description
3612 result = nstat_control_send_description(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING);
3613 if (result != 0)
3614 {
3615 failed = 1;
3616 if (nstat_debug != 0)
3617 printf("%s - nstat_control_send_description() %d\n", __func__, result);
3618 }
3619 }
3620 }
3621
3622 // send the source removed notification
3623 result = nstat_control_send_removed(state, src);
3624 if (result != 0 && nstat_debug)
3625 {
3626 failed = 1;
3627 if (nstat_debug != 0)
3628 printf("%s - nstat_control_send_removed() %d\n", __func__, result);
3629 }
3630
3631 if (failed != 0)
3632 nstat_stats.nstat_control_send_goodbye_failures++;
3633
3634
3635 return result;
3636 }
3637
3638 static errno_t
3639 nstat_flush_accumulated_msgs(
3640 nstat_control_state *state)
3641 {
3642 errno_t result = 0;
3643 if (state->ncs_accumulated != NULL && mbuf_len(state->ncs_accumulated) > 0)
3644 {
3645 mbuf_pkthdr_setlen(state->ncs_accumulated, mbuf_len(state->ncs_accumulated));
3646 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, state->ncs_accumulated, CTL_DATA_EOR);
3647 if (result != 0)
3648 {
3649 nstat_stats.nstat_flush_accumulated_msgs_failures++;
3650 if (nstat_debug != 0)
3651 printf("%s - ctl_enqueuembuf failed: %d\n", __func__, result);
3652 mbuf_freem(state->ncs_accumulated);
3653 }
3654 state->ncs_accumulated = NULL;
3655 }
3656 return result;
3657 }
3658
3659 static errno_t
3660 nstat_accumulate_msg(
3661 nstat_control_state *state,
3662 nstat_msg_hdr *hdr,
3663 size_t length)
3664 {
3665 if (state->ncs_accumulated && mbuf_trailingspace(state->ncs_accumulated) < length)
3666 {
3667 // Will send the current mbuf
3668 nstat_flush_accumulated_msgs(state);
3669 }
3670
3671 errno_t result = 0;
3672
3673 if (state->ncs_accumulated == NULL)
3674 {
3675 unsigned int one = 1;
3676 if (mbuf_allocpacket(MBUF_DONTWAIT, NSTAT_MAX_MSG_SIZE, &one, &state->ncs_accumulated) != 0)
3677 {
3678 if (nstat_debug != 0)
3679 printf("%s - mbuf_allocpacket failed\n", __func__);
3680 result = ENOMEM;
3681 }
3682 else
3683 {
3684 mbuf_setlen(state->ncs_accumulated, 0);
3685 }
3686 }
3687
3688 if (result == 0)
3689 {
3690 hdr->length = length;
3691 result = mbuf_copyback(state->ncs_accumulated, mbuf_len(state->ncs_accumulated),
3692 length, hdr, MBUF_DONTWAIT);
3693 }
3694
3695 if (result != 0)
3696 {
3697 nstat_flush_accumulated_msgs(state);
3698 if (nstat_debug != 0)
3699 printf("%s - resorting to ctl_enqueuedata\n", __func__);
3700 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, hdr, length, CTL_DATA_EOR);
3701 }
3702
3703 if (result != 0)
3704 nstat_stats.nstat_accumulate_msg_failures++;
3705
3706 return result;
3707 }
3708
3709 static void*
3710 nstat_idle_check(
3711 __unused thread_call_param_t p0,
3712 __unused thread_call_param_t p1)
3713 {
3714 nstat_control_state *control;
3715 nstat_src *src, *tmpsrc;
3716 tailq_head_nstat_src dead_list;
3717 TAILQ_INIT(&dead_list);
3718
3719 lck_mtx_lock(&nstat_mtx);
3720
3721 nstat_idle_time = 0;
3722
3723 for (control = nstat_controls; control; control = control->ncs_next)
3724 {
3725 lck_mtx_lock(&control->ncs_mtx);
3726 if (!(control->ncs_flags & NSTAT_FLAG_REQCOUNTS))
3727 {
3728 TAILQ_FOREACH_SAFE(src, &control->ncs_src_queue, ns_control_link, tmpsrc)
3729 {
3730 if (src->provider->nstat_gone(src->cookie))
3731 {
3732 errno_t result;
3733
3734 // Pull it off the list
3735 TAILQ_REMOVE(&control->ncs_src_queue, src, ns_control_link);
3736
3737 result = nstat_control_send_goodbye(control, src);
3738
3739 // Put this on the list to release later
3740 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
3741 }
3742 }
3743 }
3744 control->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
3745 lck_mtx_unlock(&control->ncs_mtx);
3746 }
3747
3748 if (nstat_controls)
3749 {
3750 clock_interval_to_deadline(60, NSEC_PER_SEC, &nstat_idle_time);
3751 thread_call_func_delayed((thread_call_func_t)nstat_idle_check, NULL, nstat_idle_time);
3752 }
3753
3754 lck_mtx_unlock(&nstat_mtx);
3755
3756 /* Generate any system level reports, if needed */
3757 nstat_sysinfo_generate_report();
3758
3759 // Release the sources now that we aren't holding lots of locks
3760 while ((src = TAILQ_FIRST(&dead_list)))
3761 {
3762 TAILQ_REMOVE(&dead_list, src, ns_control_link);
3763 nstat_control_cleanup_source(NULL, src, FALSE);
3764 }
3765
3766
3767 return NULL;
3768 }
3769
3770 static void
3771 nstat_control_register(void)
3772 {
3773 // Create our lock group first
3774 lck_grp_attr_t *grp_attr = lck_grp_attr_alloc_init();
3775 lck_grp_attr_setdefault(grp_attr);
3776 nstat_lck_grp = lck_grp_alloc_init("network statistics kctl", grp_attr);
3777 lck_grp_attr_free(grp_attr);
3778
3779 lck_mtx_init(&nstat_mtx, nstat_lck_grp, NULL);
3780
3781 // Register the control
3782 struct kern_ctl_reg nstat_control;
3783 bzero(&nstat_control, sizeof(nstat_control));
3784 strlcpy(nstat_control.ctl_name, NET_STAT_CONTROL_NAME, sizeof(nstat_control.ctl_name));
3785 nstat_control.ctl_flags = CTL_FLAG_REG_EXTENDED | CTL_FLAG_REG_CRIT;
3786 nstat_control.ctl_sendsize = nstat_sendspace;
3787 nstat_control.ctl_recvsize = nstat_recvspace;
3788 nstat_control.ctl_connect = nstat_control_connect;
3789 nstat_control.ctl_disconnect = nstat_control_disconnect;
3790 nstat_control.ctl_send = nstat_control_send;
3791
3792 ctl_register(&nstat_control, &nstat_ctlref);
3793 }
3794
3795 static void
3796 nstat_control_cleanup_source(
3797 nstat_control_state *state,
3798 struct nstat_src *src,
3799 boolean_t locked)
3800 {
3801 errno_t result;
3802
3803 if (state)
3804 {
3805 result = nstat_control_send_removed(state, src);
3806 if (result != 0)
3807 {
3808 nstat_stats.nstat_control_cleanup_source_failures++;
3809 if (nstat_debug != 0)
3810 printf("%s - nstat_control_send_removed() %d\n",
3811 __func__, result);
3812 }
3813 }
3814 // Cleanup the source if we found it.
3815 src->provider->nstat_release(src->cookie, locked);
3816 OSFree(src, sizeof(*src), nstat_malloc_tag);
3817 }
3818
3819
3820 static bool
3821 nstat_control_reporting_allowed(
3822 nstat_control_state *state,
3823 nstat_src *src)
3824 {
3825 if (src->provider->nstat_reporting_allowed == NULL)
3826 return TRUE;
3827
3828 return (
3829 src->provider->nstat_reporting_allowed(src->cookie,
3830 &state->ncs_provider_filters[src->provider->nstat_provider_id])
3831 );
3832 }
3833
3834
3835 static errno_t
3836 nstat_control_connect(
3837 kern_ctl_ref kctl,
3838 struct sockaddr_ctl *sac,
3839 void **uinfo)
3840 {
3841 nstat_control_state *state = OSMalloc(sizeof(*state), nstat_malloc_tag);
3842 if (state == NULL) return ENOMEM;
3843
3844 bzero(state, sizeof(*state));
3845 lck_mtx_init(&state->ncs_mtx, nstat_lck_grp, NULL);
3846 state->ncs_kctl = kctl;
3847 state->ncs_unit = sac->sc_unit;
3848 state->ncs_flags = NSTAT_FLAG_REQCOUNTS;
3849 *uinfo = state;
3850
3851 lck_mtx_lock(&nstat_mtx);
3852 state->ncs_next = nstat_controls;
3853 nstat_controls = state;
3854
3855 if (nstat_idle_time == 0)
3856 {
3857 clock_interval_to_deadline(60, NSEC_PER_SEC, &nstat_idle_time);
3858 thread_call_func_delayed((thread_call_func_t)nstat_idle_check, NULL, nstat_idle_time);
3859 }
3860
3861 lck_mtx_unlock(&nstat_mtx);
3862
3863 return 0;
3864 }
3865
3866 static errno_t
3867 nstat_control_disconnect(
3868 __unused kern_ctl_ref kctl,
3869 __unused u_int32_t unit,
3870 void *uinfo)
3871 {
3872 u_int32_t watching;
3873 nstat_control_state *state = (nstat_control_state*)uinfo;
3874 tailq_head_nstat_src cleanup_list;
3875 nstat_src *src;
3876
3877 TAILQ_INIT(&cleanup_list);
3878
3879 // pull it out of the global list of states
3880 lck_mtx_lock(&nstat_mtx);
3881 nstat_control_state **statepp;
3882 for (statepp = &nstat_controls; *statepp; statepp = &(*statepp)->ncs_next)
3883 {
3884 if (*statepp == state)
3885 {
3886 *statepp = state->ncs_next;
3887 break;
3888 }
3889 }
3890 lck_mtx_unlock(&nstat_mtx);
3891
3892 lck_mtx_lock(&state->ncs_mtx);
3893 // Stop watching for sources
3894 nstat_provider *provider;
3895 watching = state->ncs_watching;
3896 state->ncs_watching = 0;
3897 for (provider = nstat_providers; provider && watching; provider = provider->next)
3898 {
3899 if ((watching & (1 << provider->nstat_provider_id)) != 0)
3900 {
3901 watching &= ~(1 << provider->nstat_provider_id);
3902 provider->nstat_watcher_remove(state);
3903 }
3904 }
3905
3906 // set cleanup flags
3907 state->ncs_flags |= NSTAT_FLAG_CLEANUP;
3908
3909 if (state->ncs_accumulated)
3910 {
3911 mbuf_freem(state->ncs_accumulated);
3912 state->ncs_accumulated = NULL;
3913 }
3914
3915 // Copy out the list of sources
3916 TAILQ_CONCAT(&cleanup_list, &state->ncs_src_queue, ns_control_link);
3917 lck_mtx_unlock(&state->ncs_mtx);
3918
3919 while ((src = TAILQ_FIRST(&cleanup_list)))
3920 {
3921 TAILQ_REMOVE(&cleanup_list, src, ns_control_link);
3922 nstat_control_cleanup_source(NULL, src, FALSE);
3923 }
3924
3925 lck_mtx_destroy(&state->ncs_mtx, nstat_lck_grp);
3926 OSFree(state, sizeof(*state), nstat_malloc_tag);
3927
3928 return 0;
3929 }
3930
3931 static nstat_src_ref_t
3932 nstat_control_next_src_ref(
3933 nstat_control_state *state)
3934 {
3935 return ++state->ncs_next_srcref;
3936 }
3937
3938 static errno_t
3939 nstat_control_send_counts(
3940 nstat_control_state *state,
3941 nstat_src *src,
3942 unsigned long long context,
3943 u_int16_t hdr_flags,
3944 int *gone)
3945 {
3946 nstat_msg_src_counts counts;
3947 errno_t result = 0;
3948
3949 /* Some providers may not have any counts to send */
3950 if (src->provider->nstat_counts == NULL)
3951 return (0);
3952
3953 bzero(&counts, sizeof(counts));
3954 counts.hdr.type = NSTAT_MSG_TYPE_SRC_COUNTS;
3955 counts.hdr.length = sizeof(counts);
3956 counts.hdr.flags = hdr_flags;
3957 counts.hdr.context = context;
3958 counts.srcref = src->srcref;
3959 counts.event_flags = 0;
3960
3961 if (src->provider->nstat_counts(src->cookie, &counts.counts, gone) == 0)
3962 {
3963 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) &&
3964 counts.counts.nstat_rxbytes == 0 &&
3965 counts.counts.nstat_txbytes == 0)
3966 {
3967 result = EAGAIN;
3968 }
3969 else
3970 {
3971 result = ctl_enqueuedata(state->ncs_kctl,
3972 state->ncs_unit, &counts, sizeof(counts),
3973 CTL_DATA_EOR);
3974 if (result != 0)
3975 nstat_stats.nstat_sendcountfailures += 1;
3976 }
3977 }
3978 return result;
3979 }
3980
3981 static errno_t
3982 nstat_control_append_counts(
3983 nstat_control_state *state,
3984 nstat_src *src,
3985 int *gone)
3986 {
3987 /* Some providers may not have any counts to send */
3988 if (!src->provider->nstat_counts) return 0;
3989
3990 nstat_msg_src_counts counts;
3991 bzero(&counts, sizeof(counts));
3992 counts.hdr.type = NSTAT_MSG_TYPE_SRC_COUNTS;
3993 counts.hdr.length = sizeof(counts);
3994 counts.srcref = src->srcref;
3995 counts.event_flags = 0;
3996
3997 errno_t result = 0;
3998 result = src->provider->nstat_counts(src->cookie, &counts.counts, gone);
3999 if (result != 0)
4000 {
4001 return result;
4002 }
4003
4004 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
4005 counts.counts.nstat_rxbytes == 0 && counts.counts.nstat_txbytes == 0)
4006 {
4007 return EAGAIN;
4008 }
4009
4010 return nstat_accumulate_msg(state, &counts.hdr, counts.hdr.length);
4011 }
4012
4013 static int
4014 nstat_control_send_description(
4015 nstat_control_state *state,
4016 nstat_src *src,
4017 u_int64_t context,
4018 u_int16_t hdr_flags)
4019 {
4020 // Provider doesn't support getting the descriptor? Done.
4021 if (src->provider->nstat_descriptor_length == 0 ||
4022 src->provider->nstat_copy_descriptor == NULL)
4023 {
4024 return EOPNOTSUPP;
4025 }
4026
4027 // Allocate storage for the descriptor message
4028 mbuf_t msg;
4029 unsigned int one = 1;
4030 u_int32_t size = offsetof(nstat_msg_src_description, data) + src->provider->nstat_descriptor_length;
4031 if (mbuf_allocpacket(MBUF_DONTWAIT, size, &one, &msg) != 0)
4032 {
4033 return ENOMEM;
4034 }
4035
4036 nstat_msg_src_description *desc = (nstat_msg_src_description*)mbuf_data(msg);
4037 bzero(desc, size);
4038 mbuf_setlen(msg, size);
4039 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
4040
4041 // Query the provider for the provider specific bits
4042 errno_t result = src->provider->nstat_copy_descriptor(src->cookie, desc->data, src->provider->nstat_descriptor_length);
4043
4044 if (result != 0)
4045 {
4046 mbuf_freem(msg);
4047 return result;
4048 }
4049
4050 desc->hdr.context = context;
4051 desc->hdr.type = NSTAT_MSG_TYPE_SRC_DESC;
4052 desc->hdr.length = size;
4053 desc->hdr.flags = hdr_flags;
4054 desc->srcref = src->srcref;
4055 desc->event_flags = 0;
4056 desc->provider = src->provider->nstat_provider_id;
4057
4058 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg, CTL_DATA_EOR);
4059 if (result != 0)
4060 {
4061 nstat_stats.nstat_descriptionfailures += 1;
4062 mbuf_freem(msg);
4063 }
4064
4065 return result;
4066 }
4067
4068 static errno_t
4069 nstat_control_append_description(
4070 nstat_control_state *state,
4071 nstat_src *src)
4072 {
4073 size_t size = offsetof(nstat_msg_src_description, data) + src->provider->nstat_descriptor_length;
4074 if (size > 512 || src->provider->nstat_descriptor_length == 0 ||
4075 src->provider->nstat_copy_descriptor == NULL)
4076 {
4077 return EOPNOTSUPP;
4078 }
4079
4080 // Fill out a buffer on the stack, we will copy to the mbuf later
4081 u_int64_t buffer[size/sizeof(u_int64_t) + 1]; // u_int64_t to ensure alignment
4082 bzero(buffer, size);
4083
4084 nstat_msg_src_description *desc = (nstat_msg_src_description*)buffer;
4085 desc->hdr.type = NSTAT_MSG_TYPE_SRC_DESC;
4086 desc->hdr.length = size;
4087 desc->srcref = src->srcref;
4088 desc->event_flags = 0;
4089 desc->provider = src->provider->nstat_provider_id;
4090
4091 errno_t result = 0;
4092 // Fill in the description
4093 // Query the provider for the provider specific bits
4094 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
4095 src->provider->nstat_descriptor_length);
4096 if (result != 0)
4097 {
4098 return result;
4099 }
4100
4101 return nstat_accumulate_msg(state, &desc->hdr, size);
4102 }
4103
4104 static int
4105 nstat_control_send_update(
4106 nstat_control_state *state,
4107 nstat_src *src,
4108 u_int64_t context,
4109 u_int16_t hdr_flags,
4110 int *gone)
4111 {
4112 // Provider doesn't support getting the descriptor or counts? Done.
4113 if ((src->provider->nstat_descriptor_length == 0 ||
4114 src->provider->nstat_copy_descriptor == NULL) &&
4115 src->provider->nstat_counts == NULL)
4116 {
4117 return EOPNOTSUPP;
4118 }
4119
4120 // Allocate storage for the descriptor message
4121 mbuf_t msg;
4122 unsigned int one = 1;
4123 u_int32_t size = offsetof(nstat_msg_src_update, data) +
4124 src->provider->nstat_descriptor_length;
4125 if (mbuf_allocpacket(MBUF_DONTWAIT, size, &one, &msg) != 0)
4126 {
4127 return ENOMEM;
4128 }
4129
4130 nstat_msg_src_update *desc = (nstat_msg_src_update*)mbuf_data(msg);
4131 bzero(desc, size);
4132 desc->hdr.context = context;
4133 desc->hdr.type = NSTAT_MSG_TYPE_SRC_UPDATE;
4134 desc->hdr.length = size;
4135 desc->hdr.flags = hdr_flags;
4136 desc->srcref = src->srcref;
4137 desc->event_flags = 0;
4138 desc->provider = src->provider->nstat_provider_id;
4139
4140 mbuf_setlen(msg, size);
4141 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
4142
4143 errno_t result = 0;
4144 if (src->provider->nstat_descriptor_length != 0 && src->provider->nstat_copy_descriptor)
4145 {
4146 // Query the provider for the provider specific bits
4147 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
4148 src->provider->nstat_descriptor_length);
4149 if (result != 0)
4150 {
4151 mbuf_freem(msg);
4152 return result;
4153 }
4154 }
4155
4156 if (src->provider->nstat_counts)
4157 {
4158 result = src->provider->nstat_counts(src->cookie, &desc->counts, gone);
4159 if (result == 0)
4160 {
4161 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
4162 desc->counts.nstat_rxbytes == 0 && desc->counts.nstat_txbytes == 0)
4163 {
4164 result = EAGAIN;
4165 }
4166 else
4167 {
4168 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg, CTL_DATA_EOR);
4169 }
4170 }
4171 }
4172
4173 if (result != 0)
4174 {
4175 nstat_stats.nstat_srcupatefailures += 1;
4176 mbuf_freem(msg);
4177 }
4178
4179 return result;
4180 }
4181
4182 static errno_t
4183 nstat_control_append_update(
4184 nstat_control_state *state,
4185 nstat_src *src,
4186 int *gone)
4187 {
4188 size_t size = offsetof(nstat_msg_src_update, data) + src->provider->nstat_descriptor_length;
4189 if (size > 512 || ((src->provider->nstat_descriptor_length == 0 ||
4190 src->provider->nstat_copy_descriptor == NULL) &&
4191 src->provider->nstat_counts == NULL))
4192 {
4193 return EOPNOTSUPP;
4194 }
4195
4196 // Fill out a buffer on the stack, we will copy to the mbuf later
4197 u_int64_t buffer[size/sizeof(u_int64_t) + 1]; // u_int64_t to ensure alignment
4198 bzero(buffer, size);
4199
4200 nstat_msg_src_update *desc = (nstat_msg_src_update*)buffer;
4201 desc->hdr.type = NSTAT_MSG_TYPE_SRC_UPDATE;
4202 desc->hdr.length = size;
4203 desc->srcref = src->srcref;
4204 desc->event_flags = 0;
4205 desc->provider = src->provider->nstat_provider_id;
4206
4207 errno_t result = 0;
4208 // Fill in the description
4209 if (src->provider->nstat_descriptor_length != 0 && src->provider->nstat_copy_descriptor)
4210 {
4211 // Query the provider for the provider specific bits
4212 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
4213 src->provider->nstat_descriptor_length);
4214 if (result != 0)
4215 {
4216 nstat_stats.nstat_copy_descriptor_failures++;
4217 if (nstat_debug != 0)
4218 printf("%s: src->provider->nstat_copy_descriptor: %d\n", __func__, result);
4219 return result;
4220 }
4221 }
4222
4223 if (src->provider->nstat_counts)
4224 {
4225 result = src->provider->nstat_counts(src->cookie, &desc->counts, gone);
4226 if (result != 0)
4227 {
4228 nstat_stats.nstat_provider_counts_failures++;
4229 if (nstat_debug != 0)
4230 printf("%s: src->provider->nstat_counts: %d\n", __func__, result);
4231 return result;
4232 }
4233
4234 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
4235 desc->counts.nstat_rxbytes == 0 && desc->counts.nstat_txbytes == 0)
4236 {
4237 return EAGAIN;
4238 }
4239 }
4240
4241 return nstat_accumulate_msg(state, &desc->hdr, size);
4242 }
4243
4244 static errno_t
4245 nstat_control_send_removed(
4246 nstat_control_state *state,
4247 nstat_src *src)
4248 {
4249 nstat_msg_src_removed removed;
4250 errno_t result;
4251
4252 bzero(&removed, sizeof(removed));
4253 removed.hdr.type = NSTAT_MSG_TYPE_SRC_REMOVED;
4254 removed.hdr.length = sizeof(removed);
4255 removed.hdr.context = 0;
4256 removed.srcref = src->srcref;
4257 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &removed,
4258 sizeof(removed), CTL_DATA_EOR | CTL_DATA_CRIT);
4259 if (result != 0)
4260 nstat_stats.nstat_msgremovedfailures += 1;
4261
4262 return result;
4263 }
4264
4265 static errno_t
4266 nstat_control_handle_add_request(
4267 nstat_control_state *state,
4268 mbuf_t m)
4269 {
4270 errno_t result;
4271
4272 // Verify the header fits in the first mbuf
4273 if (mbuf_len(m) < offsetof(nstat_msg_add_src_req, param))
4274 {
4275 return EINVAL;
4276 }
4277
4278 // Calculate the length of the parameter field
4279 int32_t paramlength = mbuf_pkthdr_len(m) - offsetof(nstat_msg_add_src_req, param);
4280 if (paramlength < 0 || paramlength > 2 * 1024)
4281 {
4282 return EINVAL;
4283 }
4284
4285 nstat_provider *provider = NULL;
4286 nstat_provider_cookie_t cookie = NULL;
4287 nstat_msg_add_src_req *req = mbuf_data(m);
4288 if (mbuf_pkthdr_len(m) > mbuf_len(m))
4289 {
4290 // parameter is too large, we need to make a contiguous copy
4291 void *data = OSMalloc(paramlength, nstat_malloc_tag);
4292
4293 if (!data) return ENOMEM;
4294 result = mbuf_copydata(m, offsetof(nstat_msg_add_src_req, param), paramlength, data);
4295 if (result == 0)
4296 result = nstat_lookup_entry(req->provider, data, paramlength, &provider, &cookie);
4297 OSFree(data, paramlength, nstat_malloc_tag);
4298 }
4299 else
4300 {
4301 result = nstat_lookup_entry(req->provider, (void*)&req->param, paramlength, &provider, &cookie);
4302 }
4303
4304 if (result != 0)
4305 {
4306 return result;
4307 }
4308
4309 result = nstat_control_source_add(req->hdr.context, state, provider, cookie);
4310 if (result != 0)
4311 provider->nstat_release(cookie, 0);
4312
4313 return result;
4314 }
4315
4316 static errno_t
4317 nstat_set_provider_filter(
4318 nstat_control_state *state,
4319 nstat_msg_add_all_srcs *req)
4320 {
4321 nstat_provider_id_t provider_id = req->provider;
4322
4323 u_int32_t prev_ncs_watching = atomic_or_32_ov(&state->ncs_watching, (1 << provider_id));
4324
4325 if ((prev_ncs_watching & (1 << provider_id)) != 0)
4326 return EALREADY;
4327
4328 state->ncs_watching |= (1 << provider_id);
4329 state->ncs_provider_filters[provider_id].npf_flags = req->filter;
4330 state->ncs_provider_filters[provider_id].npf_events = req->events;
4331 state->ncs_provider_filters[provider_id].npf_pid = req->target_pid;
4332 uuid_copy(state->ncs_provider_filters[provider_id].npf_uuid, req->target_uuid);
4333 return 0;
4334 }
4335
4336 static errno_t
4337 nstat_control_handle_add_all(
4338 nstat_control_state *state,
4339 mbuf_t m)
4340 {
4341 errno_t result = 0;
4342
4343 // Verify the header fits in the first mbuf
4344 if (mbuf_len(m) < sizeof(nstat_msg_add_all_srcs))
4345 {
4346 return EINVAL;
4347 }
4348
4349 nstat_msg_add_all_srcs *req = mbuf_data(m);
4350 if (req->provider > NSTAT_PROVIDER_LAST) return ENOENT;
4351
4352 nstat_provider *provider = nstat_find_provider_by_id(req->provider);
4353
4354 if (!provider) return ENOENT;
4355 if (provider->nstat_watcher_add == NULL) return ENOTSUP;
4356
4357 if (nstat_privcheck != 0) {
4358 result = priv_check_cred(kauth_cred_get(),
4359 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
4360 if (result != 0)
4361 return result;
4362 }
4363
4364 lck_mtx_lock(&state->ncs_mtx);
4365 if (req->filter & NSTAT_FILTER_SUPPRESS_SRC_ADDED)
4366 {
4367 // Suppression of source messages implicitly requires the use of update messages
4368 state->ncs_flags |= NSTAT_FLAG_SUPPORTS_UPDATES;
4369 }
4370 lck_mtx_unlock(&state->ncs_mtx);
4371
4372 // rdar://problem/30301300 Different providers require different synchronization
4373 // to ensure that a new entry does not get double counted due to being added prior
4374 // to all current provider entries being added. Hence pass the provider the details
4375 // in the original request for this to be applied atomically
4376
4377 result = provider->nstat_watcher_add(state, req);
4378
4379 if (result == 0)
4380 nstat_enqueue_success(req->hdr.context, state, 0);
4381
4382 return result;
4383 }
4384
4385 static errno_t
4386 nstat_control_source_add(
4387 u_int64_t context,
4388 nstat_control_state *state,
4389 nstat_provider *provider,
4390 nstat_provider_cookie_t cookie)
4391 {
4392 // Fill out source added message if appropriate
4393 mbuf_t msg = NULL;
4394 nstat_src_ref_t *srcrefp = NULL;
4395
4396 u_int64_t provider_filter_flagss =
4397 state->ncs_provider_filters[provider->nstat_provider_id].npf_flags;
4398 boolean_t tell_user =
4399 ((provider_filter_flagss & NSTAT_FILTER_SUPPRESS_SRC_ADDED) == 0);
4400 u_int32_t src_filter =
4401 (provider_filter_flagss & NSTAT_FILTER_PROVIDER_NOZEROBYTES)
4402 ? NSTAT_FILTER_NOZEROBYTES : 0;
4403
4404 if (provider_filter_flagss & NSTAT_FILTER_TCP_NO_EARLY_CLOSE)
4405 {
4406 src_filter |= NSTAT_FILTER_TCP_NO_EARLY_CLOSE;
4407 }
4408
4409 if (tell_user)
4410 {
4411 unsigned int one = 1;
4412
4413 if (mbuf_allocpacket(MBUF_DONTWAIT, sizeof(nstat_msg_src_added),
4414 &one, &msg) != 0)
4415 return ENOMEM;
4416
4417 mbuf_setlen(msg, sizeof(nstat_msg_src_added));
4418 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
4419 nstat_msg_src_added *add = mbuf_data(msg);
4420 bzero(add, sizeof(*add));
4421 add->hdr.type = NSTAT_MSG_TYPE_SRC_ADDED;
4422 add->hdr.length = mbuf_len(msg);
4423 add->hdr.context = context;
4424 add->provider = provider->nstat_provider_id;
4425 srcrefp = &add->srcref;
4426 }
4427
4428 // Allocate storage for the source
4429 nstat_src *src = OSMalloc(sizeof(*src), nstat_malloc_tag);
4430 if (src == NULL)
4431 {
4432 if (msg) mbuf_freem(msg);
4433 return ENOMEM;
4434 }
4435
4436 // Fill in the source, including picking an unused source ref
4437 lck_mtx_lock(&state->ncs_mtx);
4438
4439 src->srcref = nstat_control_next_src_ref(state);
4440 if (srcrefp)
4441 *srcrefp = src->srcref;
4442
4443 if (state->ncs_flags & NSTAT_FLAG_CLEANUP || src->srcref == NSTAT_SRC_REF_INVALID)
4444 {
4445 lck_mtx_unlock(&state->ncs_mtx);
4446 OSFree(src, sizeof(*src), nstat_malloc_tag);
4447 if (msg) mbuf_freem(msg);
4448 return EINVAL;
4449 }
4450 src->provider = provider;
4451 src->cookie = cookie;
4452 src->filter = src_filter;
4453 src->seq = 0;
4454
4455 if (msg)
4456 {
4457 // send the source added message if appropriate
4458 errno_t result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg,
4459 CTL_DATA_EOR);
4460 if (result != 0)
4461 {
4462 nstat_stats.nstat_srcaddedfailures += 1;
4463 lck_mtx_unlock(&state->ncs_mtx);
4464 OSFree(src, sizeof(*src), nstat_malloc_tag);
4465 mbuf_freem(msg);
4466 return result;
4467 }
4468 }
4469 // Put the source in the list
4470 TAILQ_INSERT_HEAD(&state->ncs_src_queue, src, ns_control_link);
4471 src->ns_control = state;
4472
4473 lck_mtx_unlock(&state->ncs_mtx);
4474
4475 return 0;
4476 }
4477
4478 static errno_t
4479 nstat_control_handle_remove_request(
4480 nstat_control_state *state,
4481 mbuf_t m)
4482 {
4483 nstat_src_ref_t srcref = NSTAT_SRC_REF_INVALID;
4484 nstat_src *src;
4485
4486 if (mbuf_copydata(m, offsetof(nstat_msg_rem_src_req, srcref), sizeof(srcref), &srcref) != 0)
4487 {
4488 return EINVAL;
4489 }
4490
4491 lck_mtx_lock(&state->ncs_mtx);
4492
4493 // Remove this source as we look for it
4494 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
4495 {
4496 if (src->srcref == srcref)
4497 {
4498 break;
4499 }
4500 }
4501 if (src)
4502 {
4503 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
4504 }
4505
4506 lck_mtx_unlock(&state->ncs_mtx);
4507
4508 if (src) nstat_control_cleanup_source(state, src, FALSE);
4509
4510 return src ? 0 : ENOENT;
4511 }
4512
4513 static errno_t
4514 nstat_control_handle_query_request(
4515 nstat_control_state *state,
4516 mbuf_t m)
4517 {
4518 // TBD: handle this from another thread so we can enqueue a lot of data
4519 // As written, if a client requests query all, this function will be
4520 // called from their send of the request message. We will attempt to write
4521 // responses and succeed until the buffer fills up. Since the clients thread
4522 // is blocked on send, it won't be reading unless the client has two threads
4523 // using this socket, one for read and one for write. Two threads probably
4524 // won't work with this code anyhow since we don't have proper locking in
4525 // place yet.
4526 tailq_head_nstat_src dead_list;
4527 errno_t result = ENOENT;
4528 nstat_msg_query_src_req req;
4529
4530 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0)
4531 {
4532 return EINVAL;
4533 }
4534
4535 const boolean_t all_srcs = (req.srcref == NSTAT_SRC_REF_ALL);
4536 TAILQ_INIT(&dead_list);
4537
4538 lck_mtx_lock(&state->ncs_mtx);
4539
4540 if (all_srcs)
4541 {
4542 state->ncs_flags |= NSTAT_FLAG_REQCOUNTS;
4543 }
4544 nstat_src *src, *tmpsrc;
4545 u_int64_t src_count = 0;
4546 boolean_t partial = FALSE;
4547
4548 /*
4549 * Error handling policy and sequence number generation is folded into
4550 * nstat_control_begin_query.
4551 */
4552 partial = nstat_control_begin_query(state, &req.hdr);
4553
4554
4555 TAILQ_FOREACH_SAFE(src, &state->ncs_src_queue, ns_control_link, tmpsrc)
4556 {
4557 int gone = 0;
4558
4559 // XXX ignore IFACE types?
4560 if (all_srcs || src->srcref == req.srcref)
4561 {
4562 if (nstat_control_reporting_allowed(state, src)
4563 && (!partial || !all_srcs || src->seq != state->ncs_seq))
4564 {
4565 if (all_srcs &&
4566 (req.hdr.flags & NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE) != 0)
4567 {
4568 result = nstat_control_append_counts(state, src, &gone);
4569 }
4570 else
4571 {
4572 result = nstat_control_send_counts(state, src, req.hdr.context, 0, &gone);
4573 }
4574
4575 if (ENOMEM == result || ENOBUFS == result)
4576 {
4577 /*
4578 * If the counts message failed to
4579 * enqueue then we should clear our flag so
4580 * that a client doesn't miss anything on
4581 * idle cleanup. We skip the "gone"
4582 * processing in the hope that we may
4583 * catch it another time.
4584 */
4585 state->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
4586 break;
4587 }
4588 if (partial)
4589 {
4590 /*
4591 * We skip over hard errors and
4592 * filtered sources.
4593 */
4594 src->seq = state->ncs_seq;
4595 src_count++;
4596 }
4597 }
4598 }
4599
4600 if (gone)
4601 {
4602 // send one last descriptor message so client may see last state
4603 // If we can't send the notification now, it
4604 // will be sent in the idle cleanup.
4605 result = nstat_control_send_description(state, src, 0, 0);
4606 if (result != 0)
4607 {
4608 nstat_stats.nstat_control_send_description_failures++;
4609 if (nstat_debug != 0)
4610 printf("%s - nstat_control_send_description() %d\n", __func__, result);
4611 state->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
4612 break;
4613 }
4614
4615 // pull src out of the list
4616 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
4617 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
4618 }
4619
4620 if (all_srcs)
4621 {
4622 if (src_count >= QUERY_CONTINUATION_SRC_COUNT)
4623 {
4624 break;
4625 }
4626 }
4627 else if (req.srcref == src->srcref)
4628 {
4629 break;
4630 }
4631 }
4632
4633 nstat_flush_accumulated_msgs(state);
4634
4635 u_int16_t flags = 0;
4636 if (req.srcref == NSTAT_SRC_REF_ALL)
4637 flags = nstat_control_end_query(state, src, partial);
4638
4639 lck_mtx_unlock(&state->ncs_mtx);
4640
4641 /*
4642 * If an error occurred enqueueing data, then allow the error to
4643 * propagate to nstat_control_send. This way, the error is sent to
4644 * user-level.
4645 */
4646 if (all_srcs && ENOMEM != result && ENOBUFS != result)
4647 {
4648 nstat_enqueue_success(req.hdr.context, state, flags);
4649 result = 0;
4650 }
4651
4652 while ((src = TAILQ_FIRST(&dead_list)))
4653 {
4654 TAILQ_REMOVE(&dead_list, src, ns_control_link);
4655 nstat_control_cleanup_source(state, src, FALSE);
4656 }
4657
4658 return result;
4659 }
4660
4661 static errno_t
4662 nstat_control_handle_get_src_description(
4663 nstat_control_state *state,
4664 mbuf_t m)
4665 {
4666 nstat_msg_get_src_description req;
4667 errno_t result = ENOENT;
4668 nstat_src *src;
4669
4670 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0)
4671 {
4672 return EINVAL;
4673 }
4674
4675 lck_mtx_lock(&state->ncs_mtx);
4676 u_int64_t src_count = 0;
4677 boolean_t partial = FALSE;
4678 const boolean_t all_srcs = (req.srcref == NSTAT_SRC_REF_ALL);
4679
4680 /*
4681 * Error handling policy and sequence number generation is folded into
4682 * nstat_control_begin_query.
4683 */
4684 partial = nstat_control_begin_query(state, &req.hdr);
4685
4686 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
4687 {
4688 if (all_srcs || src->srcref == req.srcref)
4689 {
4690 if (nstat_control_reporting_allowed(state, src)
4691 && (!all_srcs || !partial || src->seq != state->ncs_seq))
4692 {
4693 if ((req.hdr.flags & NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE) != 0 && all_srcs)
4694 {
4695 result = nstat_control_append_description(state, src);
4696 }
4697 else
4698 {
4699 result = nstat_control_send_description(state, src, req.hdr.context, 0);
4700 }
4701
4702 if (ENOMEM == result || ENOBUFS == result)
4703 {
4704 /*
4705 * If the description message failed to
4706 * enqueue then we give up for now.
4707 */
4708 break;
4709 }
4710 if (partial)
4711 {
4712 /*
4713 * Note, we skip over hard errors and
4714 * filtered sources.
4715 */
4716 src->seq = state->ncs_seq;
4717 src_count++;
4718 if (src_count >= QUERY_CONTINUATION_SRC_COUNT)
4719 {
4720 break;
4721 }
4722 }
4723 }
4724
4725 if (!all_srcs)
4726 {
4727 break;
4728 }
4729 }
4730 }
4731 nstat_flush_accumulated_msgs(state);
4732
4733 u_int16_t flags = 0;
4734 if (req.srcref == NSTAT_SRC_REF_ALL)
4735 flags = nstat_control_end_query(state, src, partial);
4736
4737 lck_mtx_unlock(&state->ncs_mtx);
4738 /*
4739 * If an error occurred enqueueing data, then allow the error to
4740 * propagate to nstat_control_send. This way, the error is sent to
4741 * user-level.
4742 */
4743 if (all_srcs && ENOMEM != result && ENOBUFS != result)
4744 {
4745 nstat_enqueue_success(req.hdr.context, state, flags);
4746 result = 0;
4747 }
4748
4749 return result;
4750 }
4751
4752 static errno_t
4753 nstat_control_handle_set_filter(
4754 nstat_control_state *state,
4755 mbuf_t m)
4756 {
4757 nstat_msg_set_filter req;
4758 nstat_src *src;
4759
4760 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0)
4761 return EINVAL;
4762 if (req.srcref == NSTAT_SRC_REF_ALL ||
4763 req.srcref == NSTAT_SRC_REF_INVALID)
4764 return EINVAL;
4765
4766 lck_mtx_lock(&state->ncs_mtx);
4767 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
4768 {
4769 if (req.srcref == src->srcref)
4770 {
4771 src->filter = req.filter;
4772 break;
4773 }
4774 }
4775 lck_mtx_unlock(&state->ncs_mtx);
4776 if (src == NULL)
4777 return ENOENT;
4778
4779 return 0;
4780 }
4781
4782 static void
4783 nstat_send_error(
4784 nstat_control_state *state,
4785 u_int64_t context,
4786 u_int32_t error)
4787 {
4788 errno_t result;
4789 struct nstat_msg_error err;
4790
4791 bzero(&err, sizeof(err));
4792 err.hdr.type = NSTAT_MSG_TYPE_ERROR;
4793 err.hdr.length = sizeof(err);
4794 err.hdr.context = context;
4795 err.error = error;
4796
4797 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &err,
4798 sizeof(err), CTL_DATA_EOR | CTL_DATA_CRIT);
4799 if (result != 0)
4800 nstat_stats.nstat_msgerrorfailures++;
4801 }
4802
4803 static boolean_t
4804 nstat_control_begin_query(
4805 nstat_control_state *state,
4806 const nstat_msg_hdr *hdrp)
4807 {
4808 boolean_t partial = FALSE;
4809
4810 if (hdrp->flags & NSTAT_MSG_HDR_FLAG_CONTINUATION)
4811 {
4812 /* A partial query all has been requested. */
4813 partial = TRUE;
4814
4815 if (state->ncs_context != hdrp->context)
4816 {
4817 if (state->ncs_context != 0)
4818 nstat_send_error(state, state->ncs_context, EAGAIN);
4819
4820 /* Initialize state for a partial query all. */
4821 state->ncs_context = hdrp->context;
4822 state->ncs_seq++;
4823 }
4824 }
4825
4826 return partial;
4827 }
4828
4829 static u_int16_t
4830 nstat_control_end_query(
4831 nstat_control_state *state,
4832 nstat_src *last_src,
4833 boolean_t partial)
4834 {
4835 u_int16_t flags = 0;
4836
4837 if (last_src == NULL || !partial)
4838 {
4839 /*
4840 * We iterated through the entire srcs list or exited early
4841 * from the loop when a partial update was not requested (an
4842 * error occurred), so clear context to indicate internally
4843 * that the query is finished.
4844 */
4845 state->ncs_context = 0;
4846 }
4847 else
4848 {
4849 /*
4850 * Indicate to userlevel to make another partial request as
4851 * there are still sources left to be reported.
4852 */
4853 flags |= NSTAT_MSG_HDR_FLAG_CONTINUATION;
4854 }
4855
4856 return flags;
4857 }
4858
4859 static errno_t
4860 nstat_control_handle_get_update(
4861 nstat_control_state *state,
4862 mbuf_t m)
4863 {
4864 nstat_msg_query_src_req req;
4865
4866 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0)
4867 {
4868 return EINVAL;
4869 }
4870
4871 lck_mtx_lock(&state->ncs_mtx);
4872
4873 state->ncs_flags |= NSTAT_FLAG_SUPPORTS_UPDATES;
4874
4875 errno_t result = ENOENT;
4876 nstat_src *src, *tmpsrc;
4877 tailq_head_nstat_src dead_list;
4878 u_int64_t src_count = 0;
4879 boolean_t partial = FALSE;
4880 TAILQ_INIT(&dead_list);
4881
4882 /*
4883 * Error handling policy and sequence number generation is folded into
4884 * nstat_control_begin_query.
4885 */
4886 partial = nstat_control_begin_query(state, &req.hdr);
4887
4888 TAILQ_FOREACH_SAFE(src, &state->ncs_src_queue, ns_control_link, tmpsrc)
4889 {
4890 int gone;
4891
4892 gone = 0;
4893 if (nstat_control_reporting_allowed(state, src))
4894 {
4895 /* skip this source if it has the current state
4896 * sequence number as it's already been reported in
4897 * this query-all partial sequence. */
4898 if (req.srcref == NSTAT_SRC_REF_ALL
4899 && (FALSE == partial || src->seq != state->ncs_seq))
4900 {
4901 result = nstat_control_append_update(state, src, &gone);
4902 if (ENOMEM == result || ENOBUFS == result)
4903 {
4904 /*
4905 * If the update message failed to
4906 * enqueue then give up.
4907 */
4908 break;
4909 }
4910 if (partial)
4911 {
4912 /*
4913 * We skip over hard errors and
4914 * filtered sources.
4915 */
4916 src->seq = state->ncs_seq;
4917 src_count++;
4918 }
4919 }
4920 else if (src->srcref == req.srcref)
4921 {
4922 result = nstat_control_send_update(state, src, req.hdr.context, 0, &gone);
4923 }
4924 }
4925
4926 if (gone)
4927 {
4928 // pull src out of the list
4929 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
4930 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
4931 }
4932
4933 if (req.srcref != NSTAT_SRC_REF_ALL && req.srcref == src->srcref)
4934 {
4935 break;
4936 }
4937 if (src_count >= QUERY_CONTINUATION_SRC_COUNT)
4938 {
4939 break;
4940 }
4941 }
4942
4943 nstat_flush_accumulated_msgs(state);
4944
4945
4946 u_int16_t flags = 0;
4947 if (req.srcref == NSTAT_SRC_REF_ALL)
4948 flags = nstat_control_end_query(state, src, partial);
4949
4950 lck_mtx_unlock(&state->ncs_mtx);
4951 /*
4952 * If an error occurred enqueueing data, then allow the error to
4953 * propagate to nstat_control_send. This way, the error is sent to
4954 * user-level.
4955 */
4956 if (req.srcref == NSTAT_SRC_REF_ALL && ENOMEM != result && ENOBUFS != result)
4957 {
4958 nstat_enqueue_success(req.hdr.context, state, flags);
4959 result = 0;
4960 }
4961
4962 while ((src = TAILQ_FIRST(&dead_list)))
4963 {
4964 TAILQ_REMOVE(&dead_list, src, ns_control_link);
4965 // release src and send notification
4966 nstat_control_cleanup_source(state, src, FALSE);
4967 }
4968
4969 return result;
4970 }
4971
4972 static errno_t
4973 nstat_control_handle_subscribe_sysinfo(
4974 nstat_control_state *state)
4975 {
4976 errno_t result = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
4977
4978 if (result != 0)
4979 {
4980 return result;
4981 }
4982
4983 lck_mtx_lock(&state->ncs_mtx);
4984 state->ncs_flags |= NSTAT_FLAG_SYSINFO_SUBSCRIBED;
4985 lck_mtx_unlock(&state->ncs_mtx);
4986
4987 return 0;
4988 }
4989
4990 static errno_t
4991 nstat_control_send(
4992 kern_ctl_ref kctl,
4993 u_int32_t unit,
4994 void *uinfo,
4995 mbuf_t m,
4996 __unused int flags)
4997 {
4998 nstat_control_state *state = (nstat_control_state*)uinfo;
4999 struct nstat_msg_hdr *hdr;
5000 struct nstat_msg_hdr storage;
5001 errno_t result = 0;
5002
5003 if (mbuf_pkthdr_len(m) < sizeof(*hdr))
5004 {
5005 // Is this the right thing to do?
5006 mbuf_freem(m);
5007 return EINVAL;
5008 }
5009
5010 if (mbuf_len(m) >= sizeof(*hdr))
5011 {
5012 hdr = mbuf_data(m);
5013 }
5014 else
5015 {
5016 mbuf_copydata(m, 0, sizeof(storage), &storage);
5017 hdr = &storage;
5018 }
5019
5020 // Legacy clients may not set the length
5021 // Those clients are likely not setting the flags either
5022 // Fix everything up so old clients continue to work
5023 if (hdr->length != mbuf_pkthdr_len(m))
5024 {
5025 hdr->flags = 0;
5026 hdr->length = mbuf_pkthdr_len(m);
5027 if (hdr == &storage)
5028 {
5029 mbuf_copyback(m, 0, sizeof(*hdr), hdr, MBUF_DONTWAIT);
5030 }
5031 }
5032
5033 switch (hdr->type)
5034 {
5035 case NSTAT_MSG_TYPE_ADD_SRC:
5036 result = nstat_control_handle_add_request(state, m);
5037 break;
5038
5039 case NSTAT_MSG_TYPE_ADD_ALL_SRCS:
5040 result = nstat_control_handle_add_all(state, m);
5041 break;
5042
5043 case NSTAT_MSG_TYPE_REM_SRC:
5044 result = nstat_control_handle_remove_request(state, m);
5045 break;
5046
5047 case NSTAT_MSG_TYPE_QUERY_SRC:
5048 result = nstat_control_handle_query_request(state, m);
5049 break;
5050
5051 case NSTAT_MSG_TYPE_GET_SRC_DESC:
5052 result = nstat_control_handle_get_src_description(state, m);
5053 break;
5054
5055 case NSTAT_MSG_TYPE_SET_FILTER:
5056 result = nstat_control_handle_set_filter(state, m);
5057 break;
5058
5059 case NSTAT_MSG_TYPE_GET_UPDATE:
5060 result = nstat_control_handle_get_update(state, m);
5061 break;
5062
5063 case NSTAT_MSG_TYPE_SUBSCRIBE_SYSINFO:
5064 result = nstat_control_handle_subscribe_sysinfo(state);
5065 break;
5066
5067 default:
5068 result = EINVAL;
5069 break;
5070 }
5071
5072 if (result != 0)
5073 {
5074 struct nstat_msg_error err;
5075
5076 bzero(&err, sizeof(err));
5077 err.hdr.type = NSTAT_MSG_TYPE_ERROR;
5078 err.hdr.length = sizeof(err) + mbuf_pkthdr_len(m);
5079 err.hdr.context = hdr->context;
5080 err.error = result;
5081
5082 if (mbuf_prepend(&m, sizeof(err), MBUF_DONTWAIT) == 0 &&
5083 mbuf_copyback(m, 0, sizeof(err), &err, MBUF_DONTWAIT) == 0)
5084 {
5085 result = ctl_enqueuembuf(kctl, unit, m, CTL_DATA_EOR | CTL_DATA_CRIT);
5086 if (result != 0)
5087 {
5088 mbuf_freem(m);
5089 }
5090 m = NULL;
5091 }
5092
5093 if (result != 0)
5094 {
5095 // Unable to prepend the error to the request - just send the error
5096 err.hdr.length = sizeof(err);
5097 result = ctl_enqueuedata(kctl, unit, &err, sizeof(err),
5098 CTL_DATA_EOR | CTL_DATA_CRIT);
5099 if (result != 0)
5100 nstat_stats.nstat_msgerrorfailures += 1;
5101 }
5102 nstat_stats.nstat_handle_msg_failures += 1;
5103 }
5104
5105 if (m) mbuf_freem(m);
5106
5107 return result;
5108 }
5109
5110