]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/ntstat.c
7a33d6832aef94db6b49ee6e18a3c084c6ce0ab8
[apple/xnu.git] / bsd / net / ntstat.c
1 /*
2 * Copyright (c) 2010-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/param.h>
30 #include <sys/types.h>
31 #include <sys/kpi_mbuf.h>
32 #include <sys/socket.h>
33 #include <sys/kern_control.h>
34 #include <sys/mcache.h>
35 #include <sys/socketvar.h>
36 #include <sys/sysctl.h>
37 #include <sys/queue.h>
38 #include <sys/priv.h>
39 #include <sys/protosw.h>
40
41 #include <kern/clock.h>
42 #include <kern/debug.h>
43
44 #include <libkern/libkern.h>
45 #include <libkern/OSMalloc.h>
46 #include <libkern/OSAtomic.h>
47 #include <libkern/locks.h>
48
49 #include <net/if.h>
50 #include <net/if_var.h>
51 #include <net/if_types.h>
52 #include <net/route.h>
53
54 // These includes appear in ntstat.h but we include them here first so they won't trigger
55 // any clang diagnostic errors.
56 #include <netinet/in.h>
57 #include <netinet/in_stat.h>
58 #include <netinet/tcp.h>
59
60 #pragma clang diagnostic push
61 #pragma clang diagnostic error "-Wpadded"
62 #pragma clang diagnostic error "-Wpacked"
63 // This header defines structures shared with user space, so we need to ensure there is
64 // no compiler inserted padding in case the user space process isn't using the same
65 // architecture as the kernel (example: i386 process with x86_64 kernel).
66 #include <net/ntstat.h>
67 #pragma clang diagnostic pop
68
69 #include <netinet/ip_var.h>
70 #include <netinet/in_pcb.h>
71 #include <netinet/in_var.h>
72 #include <netinet/tcp_var.h>
73 #include <netinet/tcp_fsm.h>
74 #include <netinet/tcp_cc.h>
75 #include <netinet/udp.h>
76 #include <netinet/udp_var.h>
77 #include <netinet6/in6_pcb.h>
78 #include <netinet6/in6_var.h>
79
80 __private_extern__ int nstat_collect = 1;
81
82 #if (DEBUG || DEVELOPMENT)
83 SYSCTL_INT(_net, OID_AUTO, statistics, CTLFLAG_RW | CTLFLAG_LOCKED,
84 &nstat_collect, 0, "Collect detailed statistics");
85 #endif /* (DEBUG || DEVELOPMENT) */
86
87 #if CONFIG_EMBEDDED
88 static int nstat_privcheck = 1;
89 #else
90 static int nstat_privcheck = 0;
91 #endif
92 SYSCTL_INT(_net, OID_AUTO, statistics_privcheck, CTLFLAG_RW | CTLFLAG_LOCKED,
93 &nstat_privcheck, 0, "Entitlement check");
94
95 SYSCTL_NODE(_net, OID_AUTO, stats,
96 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "network statistics");
97
98 static int nstat_debug = 0;
99 SYSCTL_INT(_net_stats, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED,
100 &nstat_debug, 0, "");
101
102 static int nstat_sendspace = 2048;
103 SYSCTL_INT(_net_stats, OID_AUTO, sendspace, CTLFLAG_RW | CTLFLAG_LOCKED,
104 &nstat_sendspace, 0, "");
105
106 static int nstat_recvspace = 8192;
107 SYSCTL_INT(_net_stats, OID_AUTO, recvspace, CTLFLAG_RW | CTLFLAG_LOCKED,
108 &nstat_recvspace, 0, "");
109
110 static struct nstat_stats nstat_stats;
111 SYSCTL_STRUCT(_net_stats, OID_AUTO, stats, CTLFLAG_RD | CTLFLAG_LOCKED,
112 &nstat_stats, nstat_stats, "");
113
114 static u_int32_t nstat_lim_interval = 30 * 60; /* Report interval, seconds */
115 static u_int32_t nstat_lim_min_tx_pkts = 100;
116 static u_int32_t nstat_lim_min_rx_pkts = 100;
117 #if (DEBUG || DEVELOPMENT)
118 SYSCTL_INT(_net_stats, OID_AUTO, lim_report_interval,
119 CTLFLAG_RW | CTLFLAG_LOCKED, &nstat_lim_interval, 0,
120 "Low internet stat report interval");
121
122 SYSCTL_INT(_net_stats, OID_AUTO, lim_min_tx_pkts,
123 CTLFLAG_RW | CTLFLAG_LOCKED, &nstat_lim_min_tx_pkts, 0,
124 "Low Internet, min transmit packets threshold");
125
126 SYSCTL_INT(_net_stats, OID_AUTO, lim_min_rx_pkts,
127 CTLFLAG_RW | CTLFLAG_LOCKED, &nstat_lim_min_rx_pkts, 0,
128 "Low Internet, min receive packets threshold");
129 #endif /* DEBUG || DEVELOPMENT */
130
131 static struct net_api_stats net_api_stats_before;
132 static u_int64_t net_api_stats_last_report_time;
133 #define NET_API_STATS_REPORT_INTERVAL (12 * 60 * 60) /* 12 hours, in seconds */
134 static u_int32_t net_api_stats_report_interval = NET_API_STATS_REPORT_INTERVAL;
135
136 #if (DEBUG || DEVELOPMENT)
137 SYSCTL_UINT(_net_stats, OID_AUTO, api_report_interval,
138 CTLFLAG_RW | CTLFLAG_LOCKED, &net_api_stats_report_interval, 0, "");
139 #endif /* DEBUG || DEVELOPMENT */
140
141 enum{
142 NSTAT_FLAG_CLEANUP = (1 << 0),
143 NSTAT_FLAG_REQCOUNTS = (1 << 1),
144 NSTAT_FLAG_SUPPORTS_UPDATES = (1 << 2),
145 NSTAT_FLAG_SYSINFO_SUBSCRIBED = (1 << 3),
146 };
147
148 #if CONFIG_EMBEDDED
149 #define QUERY_CONTINUATION_SRC_COUNT 50
150 #else
151 #define QUERY_CONTINUATION_SRC_COUNT 100
152 #endif
153
154 typedef TAILQ_HEAD(, nstat_src) tailq_head_nstat_src;
155 typedef TAILQ_ENTRY(nstat_src) tailq_entry_nstat_src;
156
157 typedef struct nstat_provider_filter {
158 u_int64_t npf_flags;
159 u_int64_t npf_events;
160 pid_t npf_pid;
161 uuid_t npf_uuid;
162 } nstat_provider_filter;
163
164
165 typedef struct nstat_control_state {
166 struct nstat_control_state *ncs_next;
167 u_int32_t ncs_watching;
168 decl_lck_mtx_data(, ncs_mtx);
169 kern_ctl_ref ncs_kctl;
170 u_int32_t ncs_unit;
171 nstat_src_ref_t ncs_next_srcref;
172 tailq_head_nstat_src ncs_src_queue;
173 mbuf_t ncs_accumulated;
174 u_int32_t ncs_flags;
175 nstat_provider_filter ncs_provider_filters[NSTAT_PROVIDER_COUNT];
176 /* state maintained for partial query requests */
177 u_int64_t ncs_context;
178 u_int64_t ncs_seq;
179 } nstat_control_state;
180
181 typedef struct nstat_provider {
182 struct nstat_provider *next;
183 nstat_provider_id_t nstat_provider_id;
184 size_t nstat_descriptor_length;
185 errno_t (*nstat_lookup)(const void *data, u_int32_t length, nstat_provider_cookie_t *out_cookie);
186 int (*nstat_gone)(nstat_provider_cookie_t cookie);
187 errno_t (*nstat_counts)(nstat_provider_cookie_t cookie, struct nstat_counts *out_counts, int *out_gone);
188 errno_t (*nstat_watcher_add)(nstat_control_state *state, nstat_msg_add_all_srcs *req);
189 void (*nstat_watcher_remove)(nstat_control_state *state);
190 errno_t (*nstat_copy_descriptor)(nstat_provider_cookie_t cookie, void *data, u_int32_t len);
191 void (*nstat_release)(nstat_provider_cookie_t cookie, boolean_t locked);
192 bool (*nstat_reporting_allowed)(nstat_provider_cookie_t cookie, nstat_provider_filter *filter);
193 } nstat_provider;
194
195 typedef STAILQ_HEAD(, nstat_src) stailq_head_nstat_src;
196 typedef STAILQ_ENTRY(nstat_src) stailq_entry_nstat_src;
197
198 typedef TAILQ_HEAD(, nstat_tu_shadow) tailq_head_tu_shadow;
199 typedef TAILQ_ENTRY(nstat_tu_shadow) tailq_entry_tu_shadow;
200
201 typedef TAILQ_HEAD(, nstat_procdetails) tailq_head_procdetails;
202 typedef TAILQ_ENTRY(nstat_procdetails) tailq_entry_procdetails;
203
204 typedef struct nstat_src {
205 tailq_entry_nstat_src ns_control_link; // All sources for the nstat_control_state, for iterating over.
206 nstat_control_state *ns_control; // The nstat_control_state that this is a source for
207 nstat_src_ref_t srcref;
208 nstat_provider *provider;
209 nstat_provider_cookie_t cookie;
210 uint32_t filter;
211 uint64_t seq;
212 } nstat_src;
213
214 static errno_t nstat_control_send_counts(nstat_control_state *,
215 nstat_src *, unsigned long long, u_int16_t, int *);
216 static int nstat_control_send_description(nstat_control_state *state, nstat_src *src, u_int64_t context, u_int16_t hdr_flags);
217 static int nstat_control_send_update(nstat_control_state *state, nstat_src *src, u_int64_t context, u_int16_t hdr_flags, int *gone);
218 static errno_t nstat_control_send_removed(nstat_control_state *, nstat_src *);
219 static errno_t nstat_control_send_goodbye(nstat_control_state *state, nstat_src *src);
220 static void nstat_control_cleanup_source(nstat_control_state *state, nstat_src *src, boolean_t);
221 static bool nstat_control_reporting_allowed(nstat_control_state *state, nstat_src *src);
222 static boolean_t nstat_control_begin_query(nstat_control_state *state, const nstat_msg_hdr *hdrp);
223 static u_int16_t nstat_control_end_query(nstat_control_state *state, nstat_src *last_src, boolean_t partial);
224 static void nstat_ifnet_report_ecn_stats(void);
225 static void nstat_ifnet_report_lim_stats(void);
226 static void nstat_net_api_report_stats(void);
227 static errno_t nstat_set_provider_filter( nstat_control_state *state, nstat_msg_add_all_srcs *req);
228
229 static u_int32_t nstat_udp_watchers = 0;
230 static u_int32_t nstat_tcp_watchers = 0;
231
232 static void nstat_control_register(void);
233
234 /*
235 * The lock order is as follows:
236 *
237 * socket_lock (inpcb)
238 * nstat_mtx
239 * state->ncs_mtx
240 */
241 static volatile OSMallocTag nstat_malloc_tag = NULL;
242 static nstat_control_state *nstat_controls = NULL;
243 static uint64_t nstat_idle_time = 0;
244 static decl_lck_mtx_data(, nstat_mtx);
245
246 /* some extern definitions */
247 extern void mbuf_report_peak_usage(void);
248 extern void tcp_report_stats(void);
249
250 static void
251 nstat_copy_sa_out(
252 const struct sockaddr *src,
253 struct sockaddr *dst,
254 int maxlen)
255 {
256 if (src->sa_len > maxlen) {
257 return;
258 }
259
260 bcopy(src, dst, src->sa_len);
261 if (src->sa_family == AF_INET6 &&
262 src->sa_len >= sizeof(struct sockaddr_in6)) {
263 struct sockaddr_in6 *sin6 = (struct sockaddr_in6*)(void *)dst;
264 if (IN6_IS_SCOPE_EMBED(&sin6->sin6_addr)) {
265 if (sin6->sin6_scope_id == 0) {
266 sin6->sin6_scope_id = ntohs(sin6->sin6_addr.s6_addr16[1]);
267 }
268 sin6->sin6_addr.s6_addr16[1] = 0;
269 }
270 }
271 }
272
273 static void
274 nstat_ip_to_sockaddr(
275 const struct in_addr *ip,
276 u_int16_t port,
277 struct sockaddr_in *sin,
278 u_int32_t maxlen)
279 {
280 if (maxlen < sizeof(struct sockaddr_in)) {
281 return;
282 }
283
284 sin->sin_family = AF_INET;
285 sin->sin_len = sizeof(*sin);
286 sin->sin_port = port;
287 sin->sin_addr = *ip;
288 }
289
290 u_int16_t
291 nstat_ifnet_to_flags(
292 struct ifnet *ifp)
293 {
294 u_int16_t flags = 0;
295 u_int32_t functional_type = if_functional_type(ifp, FALSE);
296
297 /* Panic if someone adds a functional type without updating ntstat. */
298 VERIFY(0 <= functional_type && functional_type <= IFRTYPE_FUNCTIONAL_LAST);
299
300 switch (functional_type) {
301 case IFRTYPE_FUNCTIONAL_UNKNOWN:
302 flags |= NSTAT_IFNET_IS_UNKNOWN_TYPE;
303 break;
304 case IFRTYPE_FUNCTIONAL_LOOPBACK:
305 flags |= NSTAT_IFNET_IS_LOOPBACK;
306 break;
307 case IFRTYPE_FUNCTIONAL_WIRED:
308 case IFRTYPE_FUNCTIONAL_INTCOPROC:
309 flags |= NSTAT_IFNET_IS_WIRED;
310 break;
311 case IFRTYPE_FUNCTIONAL_WIFI_INFRA:
312 flags |= NSTAT_IFNET_IS_WIFI;
313 break;
314 case IFRTYPE_FUNCTIONAL_WIFI_AWDL:
315 flags |= NSTAT_IFNET_IS_WIFI;
316 flags |= NSTAT_IFNET_IS_AWDL;
317 break;
318 case IFRTYPE_FUNCTIONAL_CELLULAR:
319 flags |= NSTAT_IFNET_IS_CELLULAR;
320 break;
321 }
322
323 if (IFNET_IS_EXPENSIVE(ifp)) {
324 flags |= NSTAT_IFNET_IS_EXPENSIVE;
325 }
326
327 return flags;
328 }
329
330 static u_int16_t
331 nstat_inpcb_to_flags(
332 const struct inpcb *inp)
333 {
334 u_int16_t flags = 0;
335
336 if ((inp != NULL) && (inp->inp_last_outifp != NULL)) {
337 struct ifnet *ifp = inp->inp_last_outifp;
338 flags = nstat_ifnet_to_flags(ifp);
339
340 if (flags & NSTAT_IFNET_IS_CELLULAR) {
341 if (inp->inp_socket != NULL &&
342 (inp->inp_socket->so_flags1 & SOF1_CELLFALLBACK)) {
343 flags |= NSTAT_IFNET_VIA_CELLFALLBACK;
344 }
345 }
346 } else {
347 flags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
348 }
349
350 return flags;
351 }
352
353 #pragma mark -- Network Statistic Providers --
354
355 static errno_t nstat_control_source_add(u_int64_t context, nstat_control_state *state, nstat_provider *provider, nstat_provider_cookie_t cookie);
356 struct nstat_provider *nstat_providers = NULL;
357
358 static struct nstat_provider*
359 nstat_find_provider_by_id(
360 nstat_provider_id_t id)
361 {
362 struct nstat_provider *provider;
363
364 for (provider = nstat_providers; provider != NULL; provider = provider->next) {
365 if (provider->nstat_provider_id == id) {
366 break;
367 }
368 }
369
370 return provider;
371 }
372
373 static errno_t
374 nstat_lookup_entry(
375 nstat_provider_id_t id,
376 const void *data,
377 u_int32_t length,
378 nstat_provider **out_provider,
379 nstat_provider_cookie_t *out_cookie)
380 {
381 *out_provider = nstat_find_provider_by_id(id);
382 if (*out_provider == NULL) {
383 return ENOENT;
384 }
385
386 return (*out_provider)->nstat_lookup(data, length, out_cookie);
387 }
388
389 static void nstat_init_route_provider(void);
390 static void nstat_init_tcp_provider(void);
391 static void nstat_init_udp_provider(void);
392 static void nstat_init_ifnet_provider(void);
393
394 __private_extern__ void
395 nstat_init(void)
396 {
397 if (nstat_malloc_tag != NULL) {
398 return;
399 }
400
401 OSMallocTag tag = OSMalloc_Tagalloc(NET_STAT_CONTROL_NAME, OSMT_DEFAULT);
402 if (!OSCompareAndSwapPtr(NULL, tag, &nstat_malloc_tag)) {
403 OSMalloc_Tagfree(tag);
404 tag = nstat_malloc_tag;
405 } else {
406 // we need to initialize other things, we do it here as this code path will only be hit once;
407 nstat_init_route_provider();
408 nstat_init_tcp_provider();
409 nstat_init_udp_provider();
410 nstat_init_ifnet_provider();
411 nstat_control_register();
412 }
413 }
414
415 #pragma mark -- Aligned Buffer Allocation --
416
417 struct align_header {
418 u_int32_t offset;
419 u_int32_t length;
420 };
421
422 static void*
423 nstat_malloc_aligned(
424 u_int32_t length,
425 u_int8_t alignment,
426 OSMallocTag tag)
427 {
428 struct align_header *hdr = NULL;
429 u_int32_t size = length + sizeof(*hdr) + alignment - 1;
430
431 u_int8_t *buffer = OSMalloc(size, tag);
432 if (buffer == NULL) {
433 return NULL;
434 }
435
436 u_int8_t *aligned = buffer + sizeof(*hdr);
437 aligned = (u_int8_t*)P2ROUNDUP(aligned, alignment);
438
439 hdr = (struct align_header*)(void *)(aligned - sizeof(*hdr));
440 hdr->offset = aligned - buffer;
441 hdr->length = size;
442
443 return aligned;
444 }
445
446 static void
447 nstat_free_aligned(
448 void *buffer,
449 OSMallocTag tag)
450 {
451 struct align_header *hdr = (struct align_header*)(void *)((u_int8_t*)buffer - sizeof(*hdr));
452 OSFree(((char*)buffer) - hdr->offset, hdr->length, tag);
453 }
454
455 #pragma mark -- Route Provider --
456
457 static nstat_provider nstat_route_provider;
458
459 static errno_t
460 nstat_route_lookup(
461 const void *data,
462 u_int32_t length,
463 nstat_provider_cookie_t *out_cookie)
464 {
465 // rt_lookup doesn't take const params but it doesn't modify the parameters for
466 // the lookup. So...we use a union to eliminate the warning.
467 union{
468 struct sockaddr *sa;
469 const struct sockaddr *const_sa;
470 } dst, mask;
471
472 const nstat_route_add_param *param = (const nstat_route_add_param*)data;
473 *out_cookie = NULL;
474
475 if (length < sizeof(*param)) {
476 return EINVAL;
477 }
478
479 if (param->dst.v4.sin_family == 0 ||
480 param->dst.v4.sin_family > AF_MAX ||
481 (param->mask.v4.sin_family != 0 && param->mask.v4.sin_family != param->dst.v4.sin_family)) {
482 return EINVAL;
483 }
484
485 if (param->dst.v4.sin_len > sizeof(param->dst) ||
486 (param->mask.v4.sin_family && param->mask.v4.sin_len > sizeof(param->mask.v4.sin_len))) {
487 return EINVAL;
488 }
489 if ((param->dst.v4.sin_family == AF_INET &&
490 param->dst.v4.sin_len < sizeof(struct sockaddr_in)) ||
491 (param->dst.v6.sin6_family == AF_INET6 &&
492 param->dst.v6.sin6_len < sizeof(struct sockaddr_in6))) {
493 return EINVAL;
494 }
495
496 dst.const_sa = (const struct sockaddr*)&param->dst;
497 mask.const_sa = param->mask.v4.sin_family ? (const struct sockaddr*)&param->mask : NULL;
498
499 struct radix_node_head *rnh = rt_tables[dst.sa->sa_family];
500 if (rnh == NULL) {
501 return EAFNOSUPPORT;
502 }
503
504 lck_mtx_lock(rnh_lock);
505 struct rtentry *rt = rt_lookup(TRUE, dst.sa, mask.sa, rnh, param->ifindex);
506 lck_mtx_unlock(rnh_lock);
507
508 if (rt) {
509 *out_cookie = (nstat_provider_cookie_t)rt;
510 }
511
512 return rt ? 0 : ENOENT;
513 }
514
515 static int
516 nstat_route_gone(
517 nstat_provider_cookie_t cookie)
518 {
519 struct rtentry *rt = (struct rtentry*)cookie;
520 return ((rt->rt_flags & RTF_UP) == 0) ? 1 : 0;
521 }
522
523 static errno_t
524 nstat_route_counts(
525 nstat_provider_cookie_t cookie,
526 struct nstat_counts *out_counts,
527 int *out_gone)
528 {
529 struct rtentry *rt = (struct rtentry*)cookie;
530 struct nstat_counts *rt_stats = rt->rt_stats;
531
532 if (out_gone) {
533 *out_gone = 0;
534 }
535
536 if (out_gone && (rt->rt_flags & RTF_UP) == 0) {
537 *out_gone = 1;
538 }
539
540 if (rt_stats) {
541 atomic_get_64(out_counts->nstat_rxpackets, &rt_stats->nstat_rxpackets);
542 atomic_get_64(out_counts->nstat_rxbytes, &rt_stats->nstat_rxbytes);
543 atomic_get_64(out_counts->nstat_txpackets, &rt_stats->nstat_txpackets);
544 atomic_get_64(out_counts->nstat_txbytes, &rt_stats->nstat_txbytes);
545 out_counts->nstat_rxduplicatebytes = rt_stats->nstat_rxduplicatebytes;
546 out_counts->nstat_rxoutoforderbytes = rt_stats->nstat_rxoutoforderbytes;
547 out_counts->nstat_txretransmit = rt_stats->nstat_txretransmit;
548 out_counts->nstat_connectattempts = rt_stats->nstat_connectattempts;
549 out_counts->nstat_connectsuccesses = rt_stats->nstat_connectsuccesses;
550 out_counts->nstat_min_rtt = rt_stats->nstat_min_rtt;
551 out_counts->nstat_avg_rtt = rt_stats->nstat_avg_rtt;
552 out_counts->nstat_var_rtt = rt_stats->nstat_var_rtt;
553 out_counts->nstat_cell_rxbytes = out_counts->nstat_cell_txbytes = 0;
554 } else {
555 bzero(out_counts, sizeof(*out_counts));
556 }
557
558 return 0;
559 }
560
561 static void
562 nstat_route_release(
563 nstat_provider_cookie_t cookie,
564 __unused int locked)
565 {
566 rtfree((struct rtentry*)cookie);
567 }
568
569 static u_int32_t nstat_route_watchers = 0;
570
571 static int
572 nstat_route_walktree_add(
573 struct radix_node *rn,
574 void *context)
575 {
576 errno_t result = 0;
577 struct rtentry *rt = (struct rtentry *)rn;
578 nstat_control_state *state = (nstat_control_state*)context;
579
580 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
581
582 /* RTF_UP can't change while rnh_lock is held */
583 if ((rt->rt_flags & RTF_UP) != 0) {
584 /* Clear RTPRF_OURS if the route is still usable */
585 RT_LOCK(rt);
586 if (rt_validate(rt)) {
587 RT_ADDREF_LOCKED(rt);
588 RT_UNLOCK(rt);
589 } else {
590 RT_UNLOCK(rt);
591 rt = NULL;
592 }
593
594 /* Otherwise if RTF_CONDEMNED, treat it as if it were down */
595 if (rt == NULL) {
596 return 0;
597 }
598
599 result = nstat_control_source_add(0, state, &nstat_route_provider, rt);
600 if (result != 0) {
601 rtfree_locked(rt);
602 }
603 }
604
605 return result;
606 }
607
608 static errno_t
609 nstat_route_add_watcher(
610 nstat_control_state *state,
611 nstat_msg_add_all_srcs *req)
612 {
613 int i;
614 errno_t result = 0;
615
616 lck_mtx_lock(rnh_lock);
617
618 result = nstat_set_provider_filter(state, req);
619 if (result == 0) {
620 OSIncrementAtomic(&nstat_route_watchers);
621
622 for (i = 1; i < AF_MAX; i++) {
623 struct radix_node_head *rnh;
624 rnh = rt_tables[i];
625 if (!rnh) {
626 continue;
627 }
628
629 result = rnh->rnh_walktree(rnh, nstat_route_walktree_add, state);
630 if (result != 0) {
631 // This is probably resource exhaustion.
632 // There currently isn't a good way to recover from this.
633 // Least bad seems to be to give up on the add-all but leave
634 // the watcher in place.
635 break;
636 }
637 }
638 }
639 lck_mtx_unlock(rnh_lock);
640
641 return result;
642 }
643
644 __private_extern__ void
645 nstat_route_new_entry(
646 struct rtentry *rt)
647 {
648 if (nstat_route_watchers == 0) {
649 return;
650 }
651
652 lck_mtx_lock(&nstat_mtx);
653 if ((rt->rt_flags & RTF_UP) != 0) {
654 nstat_control_state *state;
655 for (state = nstat_controls; state; state = state->ncs_next) {
656 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_ROUTE)) != 0) {
657 // this client is watching routes
658 // acquire a reference for the route
659 RT_ADDREF(rt);
660
661 // add the source, if that fails, release the reference
662 if (nstat_control_source_add(0, state, &nstat_route_provider, rt) != 0) {
663 RT_REMREF(rt);
664 }
665 }
666 }
667 }
668 lck_mtx_unlock(&nstat_mtx);
669 }
670
671 static void
672 nstat_route_remove_watcher(
673 __unused nstat_control_state *state)
674 {
675 OSDecrementAtomic(&nstat_route_watchers);
676 }
677
678 static errno_t
679 nstat_route_copy_descriptor(
680 nstat_provider_cookie_t cookie,
681 void *data,
682 u_int32_t len)
683 {
684 nstat_route_descriptor *desc = (nstat_route_descriptor*)data;
685 if (len < sizeof(*desc)) {
686 return EINVAL;
687 }
688 bzero(desc, sizeof(*desc));
689
690 struct rtentry *rt = (struct rtentry*)cookie;
691 desc->id = (uint64_t)VM_KERNEL_ADDRPERM(rt);
692 desc->parent_id = (uint64_t)VM_KERNEL_ADDRPERM(rt->rt_parent);
693 desc->gateway_id = (uint64_t)VM_KERNEL_ADDRPERM(rt->rt_gwroute);
694
695
696 // key/dest
697 struct sockaddr *sa;
698 if ((sa = rt_key(rt))) {
699 nstat_copy_sa_out(sa, &desc->dst.sa, sizeof(desc->dst));
700 }
701
702 // mask
703 if ((sa = rt_mask(rt)) && sa->sa_len <= sizeof(desc->mask)) {
704 memcpy(&desc->mask, sa, sa->sa_len);
705 }
706
707 // gateway
708 if ((sa = rt->rt_gateway)) {
709 nstat_copy_sa_out(sa, &desc->gateway.sa, sizeof(desc->gateway));
710 }
711
712 if (rt->rt_ifp) {
713 desc->ifindex = rt->rt_ifp->if_index;
714 }
715
716 desc->flags = rt->rt_flags;
717
718 return 0;
719 }
720
721 static bool
722 nstat_route_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter)
723 {
724 bool retval = true;
725
726 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) {
727 struct rtentry *rt = (struct rtentry*)cookie;
728 struct ifnet *ifp = rt->rt_ifp;
729
730 if (ifp) {
731 uint16_t interface_properties = nstat_ifnet_to_flags(ifp);
732
733 if ((filter->npf_flags & interface_properties) == 0) {
734 retval = false;
735 }
736 }
737 }
738 return retval;
739 }
740
741 static void
742 nstat_init_route_provider(void)
743 {
744 bzero(&nstat_route_provider, sizeof(nstat_route_provider));
745 nstat_route_provider.nstat_descriptor_length = sizeof(nstat_route_descriptor);
746 nstat_route_provider.nstat_provider_id = NSTAT_PROVIDER_ROUTE;
747 nstat_route_provider.nstat_lookup = nstat_route_lookup;
748 nstat_route_provider.nstat_gone = nstat_route_gone;
749 nstat_route_provider.nstat_counts = nstat_route_counts;
750 nstat_route_provider.nstat_release = nstat_route_release;
751 nstat_route_provider.nstat_watcher_add = nstat_route_add_watcher;
752 nstat_route_provider.nstat_watcher_remove = nstat_route_remove_watcher;
753 nstat_route_provider.nstat_copy_descriptor = nstat_route_copy_descriptor;
754 nstat_route_provider.nstat_reporting_allowed = nstat_route_reporting_allowed;
755 nstat_route_provider.next = nstat_providers;
756 nstat_providers = &nstat_route_provider;
757 }
758
759 #pragma mark -- Route Collection --
760
761 __private_extern__ struct nstat_counts*
762 nstat_route_attach(
763 struct rtentry *rte)
764 {
765 struct nstat_counts *result = rte->rt_stats;
766 if (result) {
767 return result;
768 }
769
770 if (nstat_malloc_tag == NULL) {
771 nstat_init();
772 }
773
774 result = nstat_malloc_aligned(sizeof(*result), sizeof(u_int64_t), nstat_malloc_tag);
775 if (!result) {
776 return result;
777 }
778
779 bzero(result, sizeof(*result));
780
781 if (!OSCompareAndSwapPtr(NULL, result, &rte->rt_stats)) {
782 nstat_free_aligned(result, nstat_malloc_tag);
783 result = rte->rt_stats;
784 }
785
786 return result;
787 }
788
789 __private_extern__ void
790 nstat_route_detach(
791 struct rtentry *rte)
792 {
793 if (rte->rt_stats) {
794 nstat_free_aligned(rte->rt_stats, nstat_malloc_tag);
795 rte->rt_stats = NULL;
796 }
797 }
798
799 __private_extern__ void
800 nstat_route_connect_attempt(
801 struct rtentry *rte)
802 {
803 while (rte) {
804 struct nstat_counts* stats = nstat_route_attach(rte);
805 if (stats) {
806 OSIncrementAtomic(&stats->nstat_connectattempts);
807 }
808
809 rte = rte->rt_parent;
810 }
811 }
812
813 __private_extern__ void
814 nstat_route_connect_success(
815 struct rtentry *rte)
816 {
817 // This route
818 while (rte) {
819 struct nstat_counts* stats = nstat_route_attach(rte);
820 if (stats) {
821 OSIncrementAtomic(&stats->nstat_connectsuccesses);
822 }
823
824 rte = rte->rt_parent;
825 }
826 }
827
828 __private_extern__ void
829 nstat_route_tx(
830 struct rtentry *rte,
831 u_int32_t packets,
832 u_int32_t bytes,
833 u_int32_t flags)
834 {
835 while (rte) {
836 struct nstat_counts* stats = nstat_route_attach(rte);
837 if (stats) {
838 if ((flags & NSTAT_TX_FLAG_RETRANSMIT) != 0) {
839 OSAddAtomic(bytes, &stats->nstat_txretransmit);
840 } else {
841 OSAddAtomic64((SInt64)packets, (SInt64*)&stats->nstat_txpackets);
842 OSAddAtomic64((SInt64)bytes, (SInt64*)&stats->nstat_txbytes);
843 }
844 }
845
846 rte = rte->rt_parent;
847 }
848 }
849
850 __private_extern__ void
851 nstat_route_rx(
852 struct rtentry *rte,
853 u_int32_t packets,
854 u_int32_t bytes,
855 u_int32_t flags)
856 {
857 while (rte) {
858 struct nstat_counts* stats = nstat_route_attach(rte);
859 if (stats) {
860 if (flags == 0) {
861 OSAddAtomic64((SInt64)packets, (SInt64*)&stats->nstat_rxpackets);
862 OSAddAtomic64((SInt64)bytes, (SInt64*)&stats->nstat_rxbytes);
863 } else {
864 if (flags & NSTAT_RX_FLAG_OUT_OF_ORDER) {
865 OSAddAtomic(bytes, &stats->nstat_rxoutoforderbytes);
866 }
867 if (flags & NSTAT_RX_FLAG_DUPLICATE) {
868 OSAddAtomic(bytes, &stats->nstat_rxduplicatebytes);
869 }
870 }
871 }
872
873 rte = rte->rt_parent;
874 }
875 }
876
877 /* atomically average current value at _val_addr with _new_val and store */
878 #define NSTAT_EWMA_ATOMIC(_val_addr, _new_val, _decay) do { \
879 volatile uint32_t _old_val; \
880 volatile uint32_t _avg; \
881 do { \
882 _old_val = *_val_addr; \
883 if (_old_val == 0) \
884 { \
885 _avg = _new_val; \
886 } \
887 else \
888 { \
889 _avg = _old_val - (_old_val >> _decay) + (_new_val >> _decay); \
890 } \
891 if (_old_val == _avg) break; \
892 } while (!OSCompareAndSwap(_old_val, _avg, _val_addr)); \
893 } while (0);
894
895 /* atomically compute minimum of current value at _val_addr with _new_val and store */
896 #define NSTAT_MIN_ATOMIC(_val_addr, _new_val) do { \
897 volatile uint32_t _old_val; \
898 do { \
899 _old_val = *_val_addr; \
900 if (_old_val != 0 && _old_val < _new_val) \
901 { \
902 break; \
903 } \
904 } while (!OSCompareAndSwap(_old_val, _new_val, _val_addr)); \
905 } while (0);
906
907 __private_extern__ void
908 nstat_route_rtt(
909 struct rtentry *rte,
910 u_int32_t rtt,
911 u_int32_t rtt_var)
912 {
913 const uint32_t decay = 3;
914
915 while (rte) {
916 struct nstat_counts* stats = nstat_route_attach(rte);
917 if (stats) {
918 NSTAT_EWMA_ATOMIC(&stats->nstat_avg_rtt, rtt, decay);
919 NSTAT_MIN_ATOMIC(&stats->nstat_min_rtt, rtt);
920 NSTAT_EWMA_ATOMIC(&stats->nstat_var_rtt, rtt_var, decay);
921 }
922 rte = rte->rt_parent;
923 }
924 }
925
926 __private_extern__ void
927 nstat_route_update(
928 struct rtentry *rte,
929 uint32_t connect_attempts,
930 uint32_t connect_successes,
931 uint32_t rx_packets,
932 uint32_t rx_bytes,
933 uint32_t rx_duplicatebytes,
934 uint32_t rx_outoforderbytes,
935 uint32_t tx_packets,
936 uint32_t tx_bytes,
937 uint32_t tx_retransmit,
938 uint32_t rtt,
939 uint32_t rtt_var)
940 {
941 const uint32_t decay = 3;
942
943 while (rte) {
944 struct nstat_counts* stats = nstat_route_attach(rte);
945 if (stats) {
946 OSAddAtomic(connect_attempts, &stats->nstat_connectattempts);
947 OSAddAtomic(connect_successes, &stats->nstat_connectsuccesses);
948 OSAddAtomic64((SInt64)tx_packets, (SInt64*)&stats->nstat_txpackets);
949 OSAddAtomic64((SInt64)tx_bytes, (SInt64*)&stats->nstat_txbytes);
950 OSAddAtomic(tx_retransmit, &stats->nstat_txretransmit);
951 OSAddAtomic64((SInt64)rx_packets, (SInt64*)&stats->nstat_rxpackets);
952 OSAddAtomic64((SInt64)rx_bytes, (SInt64*)&stats->nstat_rxbytes);
953 OSAddAtomic(rx_outoforderbytes, &stats->nstat_rxoutoforderbytes);
954 OSAddAtomic(rx_duplicatebytes, &stats->nstat_rxduplicatebytes);
955
956 if (rtt != 0) {
957 NSTAT_EWMA_ATOMIC(&stats->nstat_avg_rtt, rtt, decay);
958 NSTAT_MIN_ATOMIC(&stats->nstat_min_rtt, rtt);
959 NSTAT_EWMA_ATOMIC(&stats->nstat_var_rtt, rtt_var, decay);
960 }
961 }
962 rte = rte->rt_parent;
963 }
964 }
965
966 #pragma mark -- TCP Kernel Provider --
967
968 /*
969 * Due to the way the kernel deallocates a process (the process structure
970 * might be gone by the time we get the PCB detach notification),
971 * we need to cache the process name. Without this, proc_name() would
972 * return null and the process name would never be sent to userland.
973 *
974 * For UDP sockets, we also store the cached the connection tuples along with
975 * the interface index. This is necessary because when UDP sockets are
976 * disconnected, the connection tuples are forever lost from the inpcb, thus
977 * we need to keep track of the last call to connect() in ntstat.
978 */
979 struct nstat_tucookie {
980 struct inpcb *inp;
981 char pname[MAXCOMLEN + 1];
982 bool cached;
983 union{
984 struct sockaddr_in v4;
985 struct sockaddr_in6 v6;
986 } local;
987 union{
988 struct sockaddr_in v4;
989 struct sockaddr_in6 v6;
990 } remote;
991 unsigned int if_index;
992 uint16_t ifnet_properties;
993 };
994
995 static struct nstat_tucookie *
996 nstat_tucookie_alloc_internal(
997 struct inpcb *inp,
998 bool ref,
999 bool locked)
1000 {
1001 struct nstat_tucookie *cookie;
1002
1003 cookie = OSMalloc(sizeof(*cookie), nstat_malloc_tag);
1004 if (cookie == NULL) {
1005 return NULL;
1006 }
1007 if (!locked) {
1008 LCK_MTX_ASSERT(&nstat_mtx, LCK_MTX_ASSERT_NOTOWNED);
1009 }
1010 if (ref && in_pcb_checkstate(inp, WNT_ACQUIRE, locked) == WNT_STOPUSING) {
1011 OSFree(cookie, sizeof(*cookie), nstat_malloc_tag);
1012 return NULL;
1013 }
1014 bzero(cookie, sizeof(*cookie));
1015 cookie->inp = inp;
1016 proc_name(inp->inp_socket->last_pid, cookie->pname,
1017 sizeof(cookie->pname));
1018 /*
1019 * We only increment the reference count for UDP sockets because we
1020 * only cache UDP socket tuples.
1021 */
1022 if (SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP) {
1023 OSIncrementAtomic(&inp->inp_nstat_refcnt);
1024 }
1025
1026 return cookie;
1027 }
1028
1029 static struct nstat_tucookie *
1030 nstat_tucookie_alloc(
1031 struct inpcb *inp)
1032 {
1033 return nstat_tucookie_alloc_internal(inp, false, false);
1034 }
1035
1036 static struct nstat_tucookie *
1037 nstat_tucookie_alloc_ref(
1038 struct inpcb *inp)
1039 {
1040 return nstat_tucookie_alloc_internal(inp, true, false);
1041 }
1042
1043 static struct nstat_tucookie *
1044 nstat_tucookie_alloc_ref_locked(
1045 struct inpcb *inp)
1046 {
1047 return nstat_tucookie_alloc_internal(inp, true, true);
1048 }
1049
1050 static void
1051 nstat_tucookie_release_internal(
1052 struct nstat_tucookie *cookie,
1053 int inplock)
1054 {
1055 if (SOCK_PROTO(cookie->inp->inp_socket) == IPPROTO_UDP) {
1056 OSDecrementAtomic(&cookie->inp->inp_nstat_refcnt);
1057 }
1058 in_pcb_checkstate(cookie->inp, WNT_RELEASE, inplock);
1059 OSFree(cookie, sizeof(*cookie), nstat_malloc_tag);
1060 }
1061
1062 static void
1063 nstat_tucookie_release(
1064 struct nstat_tucookie *cookie)
1065 {
1066 nstat_tucookie_release_internal(cookie, false);
1067 }
1068
1069 static void
1070 nstat_tucookie_release_locked(
1071 struct nstat_tucookie *cookie)
1072 {
1073 nstat_tucookie_release_internal(cookie, true);
1074 }
1075
1076
1077 static nstat_provider nstat_tcp_provider;
1078
1079 static errno_t
1080 nstat_tcpudp_lookup(
1081 struct inpcbinfo *inpinfo,
1082 const void *data,
1083 u_int32_t length,
1084 nstat_provider_cookie_t *out_cookie)
1085 {
1086 struct inpcb *inp = NULL;
1087
1088 // parameter validation
1089 const nstat_tcp_add_param *param = (const nstat_tcp_add_param*)data;
1090 if (length < sizeof(*param)) {
1091 return EINVAL;
1092 }
1093
1094 // src and dst must match
1095 if (param->remote.v4.sin_family != 0 &&
1096 param->remote.v4.sin_family != param->local.v4.sin_family) {
1097 return EINVAL;
1098 }
1099
1100
1101 switch (param->local.v4.sin_family) {
1102 case AF_INET:
1103 {
1104 if (param->local.v4.sin_len != sizeof(param->local.v4) ||
1105 (param->remote.v4.sin_family != 0 &&
1106 param->remote.v4.sin_len != sizeof(param->remote.v4))) {
1107 return EINVAL;
1108 }
1109
1110 inp = in_pcblookup_hash(inpinfo, param->remote.v4.sin_addr, param->remote.v4.sin_port,
1111 param->local.v4.sin_addr, param->local.v4.sin_port, 1, NULL);
1112 }
1113 break;
1114
1115 #if INET6
1116 case AF_INET6:
1117 {
1118 union{
1119 const struct in6_addr *in6c;
1120 struct in6_addr *in6;
1121 } local, remote;
1122
1123 if (param->local.v6.sin6_len != sizeof(param->local.v6) ||
1124 (param->remote.v6.sin6_family != 0 &&
1125 param->remote.v6.sin6_len != sizeof(param->remote.v6))) {
1126 return EINVAL;
1127 }
1128
1129 local.in6c = &param->local.v6.sin6_addr;
1130 remote.in6c = &param->remote.v6.sin6_addr;
1131
1132 inp = in6_pcblookup_hash(inpinfo, remote.in6, param->remote.v6.sin6_port,
1133 local.in6, param->local.v6.sin6_port, 1, NULL);
1134 }
1135 break;
1136 #endif
1137
1138 default:
1139 return EINVAL;
1140 }
1141
1142 if (inp == NULL) {
1143 return ENOENT;
1144 }
1145
1146 // At this point we have a ref to the inpcb
1147 *out_cookie = nstat_tucookie_alloc(inp);
1148 if (*out_cookie == NULL) {
1149 in_pcb_checkstate(inp, WNT_RELEASE, 0);
1150 }
1151
1152 return 0;
1153 }
1154
1155 static errno_t
1156 nstat_tcp_lookup(
1157 const void *data,
1158 u_int32_t length,
1159 nstat_provider_cookie_t *out_cookie)
1160 {
1161 return nstat_tcpudp_lookup(&tcbinfo, data, length, out_cookie);
1162 }
1163
1164 static int
1165 nstat_tcp_gone(
1166 nstat_provider_cookie_t cookie)
1167 {
1168 struct nstat_tucookie *tucookie =
1169 (struct nstat_tucookie *)cookie;
1170 struct inpcb *inp;
1171 struct tcpcb *tp;
1172
1173 return (!(inp = tucookie->inp) ||
1174 !(tp = intotcpcb(inp)) ||
1175 inp->inp_state == INPCB_STATE_DEAD) ? 1 : 0;
1176 }
1177
1178 static errno_t
1179 nstat_tcp_counts(
1180 nstat_provider_cookie_t cookie,
1181 struct nstat_counts *out_counts,
1182 int *out_gone)
1183 {
1184 struct nstat_tucookie *tucookie =
1185 (struct nstat_tucookie *)cookie;
1186 struct inpcb *inp;
1187
1188 bzero(out_counts, sizeof(*out_counts));
1189
1190 if (out_gone) {
1191 *out_gone = 0;
1192 }
1193
1194 // if the pcb is in the dead state, we should stop using it
1195 if (nstat_tcp_gone(cookie)) {
1196 if (out_gone) {
1197 *out_gone = 1;
1198 }
1199 if (!(inp = tucookie->inp) || !intotcpcb(inp)) {
1200 return EINVAL;
1201 }
1202 }
1203 inp = tucookie->inp;
1204 struct tcpcb *tp = intotcpcb(inp);
1205
1206 atomic_get_64(out_counts->nstat_rxpackets, &inp->inp_stat->rxpackets);
1207 atomic_get_64(out_counts->nstat_rxbytes, &inp->inp_stat->rxbytes);
1208 atomic_get_64(out_counts->nstat_txpackets, &inp->inp_stat->txpackets);
1209 atomic_get_64(out_counts->nstat_txbytes, &inp->inp_stat->txbytes);
1210 out_counts->nstat_rxduplicatebytes = tp->t_stat.rxduplicatebytes;
1211 out_counts->nstat_rxoutoforderbytes = tp->t_stat.rxoutoforderbytes;
1212 out_counts->nstat_txretransmit = tp->t_stat.txretransmitbytes;
1213 out_counts->nstat_connectattempts = tp->t_state >= TCPS_SYN_SENT ? 1 : 0;
1214 out_counts->nstat_connectsuccesses = tp->t_state >= TCPS_ESTABLISHED ? 1 : 0;
1215 out_counts->nstat_avg_rtt = tp->t_srtt;
1216 out_counts->nstat_min_rtt = tp->t_rttbest;
1217 out_counts->nstat_var_rtt = tp->t_rttvar;
1218 if (out_counts->nstat_avg_rtt < out_counts->nstat_min_rtt) {
1219 out_counts->nstat_min_rtt = out_counts->nstat_avg_rtt;
1220 }
1221 atomic_get_64(out_counts->nstat_cell_rxbytes, &inp->inp_cstat->rxbytes);
1222 atomic_get_64(out_counts->nstat_cell_txbytes, &inp->inp_cstat->txbytes);
1223 atomic_get_64(out_counts->nstat_wifi_rxbytes, &inp->inp_wstat->rxbytes);
1224 atomic_get_64(out_counts->nstat_wifi_txbytes, &inp->inp_wstat->txbytes);
1225 atomic_get_64(out_counts->nstat_wired_rxbytes, &inp->inp_Wstat->rxbytes);
1226 atomic_get_64(out_counts->nstat_wired_txbytes, &inp->inp_Wstat->txbytes);
1227
1228 return 0;
1229 }
1230
1231 static void
1232 nstat_tcp_release(
1233 nstat_provider_cookie_t cookie,
1234 int locked)
1235 {
1236 struct nstat_tucookie *tucookie =
1237 (struct nstat_tucookie *)cookie;
1238
1239 nstat_tucookie_release_internal(tucookie, locked);
1240 }
1241
1242 static errno_t
1243 nstat_tcp_add_watcher(
1244 nstat_control_state *state,
1245 nstat_msg_add_all_srcs *req)
1246 {
1247 // There is a tricky issue around getting all TCP sockets added once
1248 // and only once. nstat_tcp_new_pcb() is called prior to the new item
1249 // being placed on any lists where it might be found.
1250 // By locking the tcbinfo.ipi_lock prior to marking the state as a watcher,
1251 // it should be impossible for a new socket to be added twice.
1252 // On the other hand, there is still a timing issue where a new socket
1253 // results in a call to nstat_tcp_new_pcb() before this watcher
1254 // is instantiated and yet the socket doesn't make it into ipi_listhead
1255 // prior to the scan. <rdar://problem/30361716>
1256
1257 errno_t result;
1258
1259 lck_rw_lock_shared(tcbinfo.ipi_lock);
1260 result = nstat_set_provider_filter(state, req);
1261 if (result == 0) {
1262 OSIncrementAtomic(&nstat_tcp_watchers);
1263
1264 // Add all current tcp inpcbs. Ignore those in timewait
1265 struct inpcb *inp;
1266 struct nstat_tucookie *cookie;
1267 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list)
1268 {
1269 cookie = nstat_tucookie_alloc_ref(inp);
1270 if (cookie == NULL) {
1271 continue;
1272 }
1273 if (nstat_control_source_add(0, state, &nstat_tcp_provider,
1274 cookie) != 0) {
1275 nstat_tucookie_release(cookie);
1276 break;
1277 }
1278 }
1279 }
1280
1281 lck_rw_done(tcbinfo.ipi_lock);
1282
1283 return result;
1284 }
1285
1286 static void
1287 nstat_tcp_remove_watcher(
1288 __unused nstat_control_state *state)
1289 {
1290 OSDecrementAtomic(&nstat_tcp_watchers);
1291 }
1292
1293 __private_extern__ void
1294 nstat_tcp_new_pcb(
1295 struct inpcb *inp)
1296 {
1297 struct nstat_tucookie *cookie;
1298
1299 inp->inp_start_timestamp = mach_continuous_time();
1300
1301 if (nstat_tcp_watchers == 0) {
1302 return;
1303 }
1304
1305 socket_lock(inp->inp_socket, 0);
1306 lck_mtx_lock(&nstat_mtx);
1307 nstat_control_state *state;
1308 for (state = nstat_controls; state; state = state->ncs_next) {
1309 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_TCP_KERNEL)) != 0) {
1310 // this client is watching tcp
1311 // acquire a reference for it
1312 cookie = nstat_tucookie_alloc_ref_locked(inp);
1313 if (cookie == NULL) {
1314 continue;
1315 }
1316 // add the source, if that fails, release the reference
1317 if (nstat_control_source_add(0, state,
1318 &nstat_tcp_provider, cookie) != 0) {
1319 nstat_tucookie_release_locked(cookie);
1320 break;
1321 }
1322 }
1323 }
1324 lck_mtx_unlock(&nstat_mtx);
1325 socket_unlock(inp->inp_socket, 0);
1326 }
1327
1328 __private_extern__ void
1329 nstat_pcb_detach(struct inpcb *inp)
1330 {
1331 nstat_control_state *state;
1332 nstat_src *src;
1333 tailq_head_nstat_src dead_list;
1334 struct nstat_tucookie *tucookie;
1335 errno_t result;
1336
1337 if (inp == NULL || (nstat_tcp_watchers == 0 && nstat_udp_watchers == 0)) {
1338 return;
1339 }
1340
1341 TAILQ_INIT(&dead_list);
1342 lck_mtx_lock(&nstat_mtx);
1343 for (state = nstat_controls; state; state = state->ncs_next) {
1344 lck_mtx_lock(&state->ncs_mtx);
1345 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
1346 {
1347 nstat_provider_id_t provider_id = src->provider->nstat_provider_id;
1348 if (provider_id == NSTAT_PROVIDER_TCP_KERNEL || provider_id == NSTAT_PROVIDER_UDP_KERNEL) {
1349 tucookie = (struct nstat_tucookie *)src->cookie;
1350 if (tucookie->inp == inp) {
1351 break;
1352 }
1353 }
1354 }
1355
1356 if (src) {
1357 result = nstat_control_send_goodbye(state, src);
1358
1359 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
1360 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
1361 }
1362 lck_mtx_unlock(&state->ncs_mtx);
1363 }
1364 lck_mtx_unlock(&nstat_mtx);
1365
1366 while ((src = TAILQ_FIRST(&dead_list))) {
1367 TAILQ_REMOVE(&dead_list, src, ns_control_link);
1368 nstat_control_cleanup_source(NULL, src, TRUE);
1369 }
1370 }
1371
1372 __private_extern__ void
1373 nstat_pcb_cache(struct inpcb *inp)
1374 {
1375 nstat_control_state *state;
1376 nstat_src *src;
1377 struct nstat_tucookie *tucookie;
1378
1379 if (inp == NULL || nstat_udp_watchers == 0 ||
1380 inp->inp_nstat_refcnt == 0) {
1381 return;
1382 }
1383 VERIFY(SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP);
1384 lck_mtx_lock(&nstat_mtx);
1385 for (state = nstat_controls; state; state = state->ncs_next) {
1386 lck_mtx_lock(&state->ncs_mtx);
1387 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
1388 {
1389 tucookie = (struct nstat_tucookie *)src->cookie;
1390 if (tucookie->inp == inp) {
1391 if (inp->inp_vflag & INP_IPV6) {
1392 in6_ip6_to_sockaddr(&inp->in6p_laddr,
1393 inp->inp_lport,
1394 &tucookie->local.v6,
1395 sizeof(tucookie->local));
1396 in6_ip6_to_sockaddr(&inp->in6p_faddr,
1397 inp->inp_fport,
1398 &tucookie->remote.v6,
1399 sizeof(tucookie->remote));
1400 } else if (inp->inp_vflag & INP_IPV4) {
1401 nstat_ip_to_sockaddr(&inp->inp_laddr,
1402 inp->inp_lport,
1403 &tucookie->local.v4,
1404 sizeof(tucookie->local));
1405 nstat_ip_to_sockaddr(&inp->inp_faddr,
1406 inp->inp_fport,
1407 &tucookie->remote.v4,
1408 sizeof(tucookie->remote));
1409 }
1410 if (inp->inp_last_outifp) {
1411 tucookie->if_index =
1412 inp->inp_last_outifp->if_index;
1413 }
1414
1415 tucookie->ifnet_properties = nstat_inpcb_to_flags(inp);
1416 tucookie->cached = true;
1417 break;
1418 }
1419 }
1420 lck_mtx_unlock(&state->ncs_mtx);
1421 }
1422 lck_mtx_unlock(&nstat_mtx);
1423 }
1424
1425 __private_extern__ void
1426 nstat_pcb_invalidate_cache(struct inpcb *inp)
1427 {
1428 nstat_control_state *state;
1429 nstat_src *src;
1430 struct nstat_tucookie *tucookie;
1431
1432 if (inp == NULL || nstat_udp_watchers == 0 ||
1433 inp->inp_nstat_refcnt == 0) {
1434 return;
1435 }
1436 VERIFY(SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP);
1437 lck_mtx_lock(&nstat_mtx);
1438 for (state = nstat_controls; state; state = state->ncs_next) {
1439 lck_mtx_lock(&state->ncs_mtx);
1440 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
1441 {
1442 tucookie = (struct nstat_tucookie *)src->cookie;
1443 if (tucookie->inp == inp) {
1444 tucookie->cached = false;
1445 break;
1446 }
1447 }
1448 lck_mtx_unlock(&state->ncs_mtx);
1449 }
1450 lck_mtx_unlock(&nstat_mtx);
1451 }
1452
1453 static errno_t
1454 nstat_tcp_copy_descriptor(
1455 nstat_provider_cookie_t cookie,
1456 void *data,
1457 u_int32_t len)
1458 {
1459 if (len < sizeof(nstat_tcp_descriptor)) {
1460 return EINVAL;
1461 }
1462
1463 if (nstat_tcp_gone(cookie)) {
1464 return EINVAL;
1465 }
1466
1467 nstat_tcp_descriptor *desc = (nstat_tcp_descriptor*)data;
1468 struct nstat_tucookie *tucookie =
1469 (struct nstat_tucookie *)cookie;
1470 struct inpcb *inp = tucookie->inp;
1471 struct tcpcb *tp = intotcpcb(inp);
1472 bzero(desc, sizeof(*desc));
1473
1474 if (inp->inp_vflag & INP_IPV6) {
1475 in6_ip6_to_sockaddr(&inp->in6p_laddr, inp->inp_lport,
1476 &desc->local.v6, sizeof(desc->local));
1477 in6_ip6_to_sockaddr(&inp->in6p_faddr, inp->inp_fport,
1478 &desc->remote.v6, sizeof(desc->remote));
1479 } else if (inp->inp_vflag & INP_IPV4) {
1480 nstat_ip_to_sockaddr(&inp->inp_laddr, inp->inp_lport,
1481 &desc->local.v4, sizeof(desc->local));
1482 nstat_ip_to_sockaddr(&inp->inp_faddr, inp->inp_fport,
1483 &desc->remote.v4, sizeof(desc->remote));
1484 }
1485
1486 desc->state = intotcpcb(inp)->t_state;
1487 desc->ifindex = (inp->inp_last_outifp == NULL) ? 0 :
1488 inp->inp_last_outifp->if_index;
1489
1490 // danger - not locked, values could be bogus
1491 desc->txunacked = tp->snd_max - tp->snd_una;
1492 desc->txwindow = tp->snd_wnd;
1493 desc->txcwindow = tp->snd_cwnd;
1494
1495 if (CC_ALGO(tp)->name != NULL) {
1496 strlcpy(desc->cc_algo, CC_ALGO(tp)->name,
1497 sizeof(desc->cc_algo));
1498 }
1499
1500 struct socket *so = inp->inp_socket;
1501 if (so) {
1502 // TBD - take the socket lock around these to make sure
1503 // they're in sync?
1504 desc->upid = so->last_upid;
1505 desc->pid = so->last_pid;
1506 desc->traffic_class = so->so_traffic_class;
1507 if ((so->so_flags1 & SOF1_TRAFFIC_MGT_SO_BACKGROUND)) {
1508 desc->traffic_mgt_flags |= TRAFFIC_MGT_SO_BACKGROUND;
1509 }
1510 if ((so->so_flags1 & SOF1_TRAFFIC_MGT_TCP_RECVBG)) {
1511 desc->traffic_mgt_flags |= TRAFFIC_MGT_TCP_RECVBG;
1512 }
1513 proc_name(desc->pid, desc->pname, sizeof(desc->pname));
1514 if (desc->pname[0] == 0) {
1515 strlcpy(desc->pname, tucookie->pname,
1516 sizeof(desc->pname));
1517 } else {
1518 desc->pname[sizeof(desc->pname) - 1] = 0;
1519 strlcpy(tucookie->pname, desc->pname,
1520 sizeof(tucookie->pname));
1521 }
1522 memcpy(desc->uuid, so->last_uuid, sizeof(so->last_uuid));
1523 memcpy(desc->vuuid, so->so_vuuid, sizeof(so->so_vuuid));
1524 if (so->so_flags & SOF_DELEGATED) {
1525 desc->eupid = so->e_upid;
1526 desc->epid = so->e_pid;
1527 memcpy(desc->euuid, so->e_uuid, sizeof(so->e_uuid));
1528 } else {
1529 desc->eupid = desc->upid;
1530 desc->epid = desc->pid;
1531 memcpy(desc->euuid, desc->uuid, sizeof(desc->uuid));
1532 }
1533 desc->sndbufsize = so->so_snd.sb_hiwat;
1534 desc->sndbufused = so->so_snd.sb_cc;
1535 desc->rcvbufsize = so->so_rcv.sb_hiwat;
1536 desc->rcvbufused = so->so_rcv.sb_cc;
1537 }
1538
1539 tcp_get_connectivity_status(tp, &desc->connstatus);
1540 desc->ifnet_properties = nstat_inpcb_to_flags(inp);
1541 inp_get_activity_bitmap(inp, &desc->activity_bitmap);
1542 desc->start_timestamp = inp->inp_start_timestamp;
1543 desc->timestamp = mach_continuous_time();
1544 return 0;
1545 }
1546
1547 static bool
1548 nstat_tcpudp_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter, bool is_UDP)
1549 {
1550 bool retval = true;
1551
1552 if ((filter->npf_flags & (NSTAT_FILTER_IFNET_FLAGS | NSTAT_FILTER_SPECIFIC_USER)) != 0) {
1553 struct nstat_tucookie *tucookie = (struct nstat_tucookie *)cookie;
1554 struct inpcb *inp = tucookie->inp;
1555
1556 /* Only apply interface filter if at least one is allowed. */
1557 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) {
1558 uint16_t interface_properties = nstat_inpcb_to_flags(inp);
1559
1560 if ((filter->npf_flags & interface_properties) == 0) {
1561 // For UDP, we could have an undefined interface and yet transfers may have occurred.
1562 // We allow reporting if there have been transfers of the requested kind.
1563 // This is imperfect as we cannot account for the expensive attribute over wifi.
1564 // We also assume that cellular is expensive and we have no way to select for AWDL
1565 if (is_UDP) {
1566 do{
1567 if ((filter->npf_flags & (NSTAT_FILTER_ACCEPT_CELLULAR | NSTAT_FILTER_ACCEPT_EXPENSIVE)) &&
1568 (inp->inp_cstat->rxbytes || inp->inp_cstat->txbytes)) {
1569 break;
1570 }
1571 if ((filter->npf_flags & NSTAT_FILTER_ACCEPT_WIFI) &&
1572 (inp->inp_wstat->rxbytes || inp->inp_wstat->txbytes)) {
1573 break;
1574 }
1575 if ((filter->npf_flags & NSTAT_FILTER_ACCEPT_WIRED) &&
1576 (inp->inp_Wstat->rxbytes || inp->inp_Wstat->txbytes)) {
1577 break;
1578 }
1579 return false;
1580 } while (0);
1581 } else {
1582 return false;
1583 }
1584 }
1585 }
1586
1587 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) && (retval)) {
1588 struct socket *so = inp->inp_socket;
1589 retval = false;
1590
1591 if (so) {
1592 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_PID) != 0) &&
1593 (filter->npf_pid == so->last_pid)) {
1594 retval = true;
1595 } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EPID) != 0) &&
1596 (filter->npf_pid == (so->so_flags & SOF_DELEGATED)? so->e_upid : so->last_pid)) {
1597 retval = true;
1598 } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_UUID) != 0) &&
1599 (memcmp(filter->npf_uuid, so->last_uuid, sizeof(so->last_uuid)) == 0)) {
1600 retval = true;
1601 } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EUUID) != 0) &&
1602 (memcmp(filter->npf_uuid, (so->so_flags & SOF_DELEGATED)? so->e_uuid : so->last_uuid,
1603 sizeof(so->last_uuid)) == 0)) {
1604 retval = true;
1605 }
1606 }
1607 }
1608 }
1609 return retval;
1610 }
1611
1612 static bool
1613 nstat_tcp_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter)
1614 {
1615 return nstat_tcpudp_reporting_allowed(cookie, filter, FALSE);
1616 }
1617
1618 static void
1619 nstat_init_tcp_provider(void)
1620 {
1621 bzero(&nstat_tcp_provider, sizeof(nstat_tcp_provider));
1622 nstat_tcp_provider.nstat_descriptor_length = sizeof(nstat_tcp_descriptor);
1623 nstat_tcp_provider.nstat_provider_id = NSTAT_PROVIDER_TCP_KERNEL;
1624 nstat_tcp_provider.nstat_lookup = nstat_tcp_lookup;
1625 nstat_tcp_provider.nstat_gone = nstat_tcp_gone;
1626 nstat_tcp_provider.nstat_counts = nstat_tcp_counts;
1627 nstat_tcp_provider.nstat_release = nstat_tcp_release;
1628 nstat_tcp_provider.nstat_watcher_add = nstat_tcp_add_watcher;
1629 nstat_tcp_provider.nstat_watcher_remove = nstat_tcp_remove_watcher;
1630 nstat_tcp_provider.nstat_copy_descriptor = nstat_tcp_copy_descriptor;
1631 nstat_tcp_provider.nstat_reporting_allowed = nstat_tcp_reporting_allowed;
1632 nstat_tcp_provider.next = nstat_providers;
1633 nstat_providers = &nstat_tcp_provider;
1634 }
1635
1636 #pragma mark -- UDP Provider --
1637
1638 static nstat_provider nstat_udp_provider;
1639
1640 static errno_t
1641 nstat_udp_lookup(
1642 const void *data,
1643 u_int32_t length,
1644 nstat_provider_cookie_t *out_cookie)
1645 {
1646 return nstat_tcpudp_lookup(&udbinfo, data, length, out_cookie);
1647 }
1648
1649 static int
1650 nstat_udp_gone(
1651 nstat_provider_cookie_t cookie)
1652 {
1653 struct nstat_tucookie *tucookie =
1654 (struct nstat_tucookie *)cookie;
1655 struct inpcb *inp;
1656
1657 return (!(inp = tucookie->inp) ||
1658 inp->inp_state == INPCB_STATE_DEAD) ? 1 : 0;
1659 }
1660
1661 static errno_t
1662 nstat_udp_counts(
1663 nstat_provider_cookie_t cookie,
1664 struct nstat_counts *out_counts,
1665 int *out_gone)
1666 {
1667 struct nstat_tucookie *tucookie =
1668 (struct nstat_tucookie *)cookie;
1669
1670 if (out_gone) {
1671 *out_gone = 0;
1672 }
1673
1674 // if the pcb is in the dead state, we should stop using it
1675 if (nstat_udp_gone(cookie)) {
1676 if (out_gone) {
1677 *out_gone = 1;
1678 }
1679 if (!tucookie->inp) {
1680 return EINVAL;
1681 }
1682 }
1683 struct inpcb *inp = tucookie->inp;
1684
1685 atomic_get_64(out_counts->nstat_rxpackets, &inp->inp_stat->rxpackets);
1686 atomic_get_64(out_counts->nstat_rxbytes, &inp->inp_stat->rxbytes);
1687 atomic_get_64(out_counts->nstat_txpackets, &inp->inp_stat->txpackets);
1688 atomic_get_64(out_counts->nstat_txbytes, &inp->inp_stat->txbytes);
1689 atomic_get_64(out_counts->nstat_cell_rxbytes, &inp->inp_cstat->rxbytes);
1690 atomic_get_64(out_counts->nstat_cell_txbytes, &inp->inp_cstat->txbytes);
1691 atomic_get_64(out_counts->nstat_wifi_rxbytes, &inp->inp_wstat->rxbytes);
1692 atomic_get_64(out_counts->nstat_wifi_txbytes, &inp->inp_wstat->txbytes);
1693 atomic_get_64(out_counts->nstat_wired_rxbytes, &inp->inp_Wstat->rxbytes);
1694 atomic_get_64(out_counts->nstat_wired_txbytes, &inp->inp_Wstat->txbytes);
1695
1696 return 0;
1697 }
1698
1699 static void
1700 nstat_udp_release(
1701 nstat_provider_cookie_t cookie,
1702 int locked)
1703 {
1704 struct nstat_tucookie *tucookie =
1705 (struct nstat_tucookie *)cookie;
1706
1707 nstat_tucookie_release_internal(tucookie, locked);
1708 }
1709
1710 static errno_t
1711 nstat_udp_add_watcher(
1712 nstat_control_state *state,
1713 nstat_msg_add_all_srcs *req)
1714 {
1715 // There is a tricky issue around getting all UDP sockets added once
1716 // and only once. nstat_udp_new_pcb() is called prior to the new item
1717 // being placed on any lists where it might be found.
1718 // By locking the udpinfo.ipi_lock prior to marking the state as a watcher,
1719 // it should be impossible for a new socket to be added twice.
1720 // On the other hand, there is still a timing issue where a new socket
1721 // results in a call to nstat_udp_new_pcb() before this watcher
1722 // is instantiated and yet the socket doesn't make it into ipi_listhead
1723 // prior to the scan. <rdar://problem/30361716>
1724
1725 errno_t result;
1726
1727 lck_rw_lock_shared(udbinfo.ipi_lock);
1728 result = nstat_set_provider_filter(state, req);
1729
1730 if (result == 0) {
1731 struct inpcb *inp;
1732 struct nstat_tucookie *cookie;
1733
1734 OSIncrementAtomic(&nstat_udp_watchers);
1735
1736 // Add all current UDP inpcbs.
1737 LIST_FOREACH(inp, udbinfo.ipi_listhead, inp_list)
1738 {
1739 cookie = nstat_tucookie_alloc_ref(inp);
1740 if (cookie == NULL) {
1741 continue;
1742 }
1743 if (nstat_control_source_add(0, state, &nstat_udp_provider,
1744 cookie) != 0) {
1745 nstat_tucookie_release(cookie);
1746 break;
1747 }
1748 }
1749 }
1750
1751 lck_rw_done(udbinfo.ipi_lock);
1752
1753 return result;
1754 }
1755
1756 static void
1757 nstat_udp_remove_watcher(
1758 __unused nstat_control_state *state)
1759 {
1760 OSDecrementAtomic(&nstat_udp_watchers);
1761 }
1762
1763 __private_extern__ void
1764 nstat_udp_new_pcb(
1765 struct inpcb *inp)
1766 {
1767 struct nstat_tucookie *cookie;
1768
1769 inp->inp_start_timestamp = mach_continuous_time();
1770
1771 if (nstat_udp_watchers == 0) {
1772 return;
1773 }
1774
1775 socket_lock(inp->inp_socket, 0);
1776 lck_mtx_lock(&nstat_mtx);
1777 nstat_control_state *state;
1778 for (state = nstat_controls; state; state = state->ncs_next) {
1779 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_UDP_KERNEL)) != 0) {
1780 // this client is watching tcp
1781 // acquire a reference for it
1782 cookie = nstat_tucookie_alloc_ref_locked(inp);
1783 if (cookie == NULL) {
1784 continue;
1785 }
1786 // add the source, if that fails, release the reference
1787 if (nstat_control_source_add(0, state,
1788 &nstat_udp_provider, cookie) != 0) {
1789 nstat_tucookie_release_locked(cookie);
1790 break;
1791 }
1792 }
1793 }
1794 lck_mtx_unlock(&nstat_mtx);
1795 socket_unlock(inp->inp_socket, 0);
1796 }
1797
1798 static errno_t
1799 nstat_udp_copy_descriptor(
1800 nstat_provider_cookie_t cookie,
1801 void *data,
1802 u_int32_t len)
1803 {
1804 if (len < sizeof(nstat_udp_descriptor)) {
1805 return EINVAL;
1806 }
1807
1808 if (nstat_udp_gone(cookie)) {
1809 return EINVAL;
1810 }
1811
1812 struct nstat_tucookie *tucookie =
1813 (struct nstat_tucookie *)cookie;
1814 nstat_udp_descriptor *desc = (nstat_udp_descriptor*)data;
1815 struct inpcb *inp = tucookie->inp;
1816
1817 bzero(desc, sizeof(*desc));
1818
1819 if (tucookie->cached == false) {
1820 if (inp->inp_vflag & INP_IPV6) {
1821 in6_ip6_to_sockaddr(&inp->in6p_laddr, inp->inp_lport,
1822 &desc->local.v6, sizeof(desc->local.v6));
1823 in6_ip6_to_sockaddr(&inp->in6p_faddr, inp->inp_fport,
1824 &desc->remote.v6, sizeof(desc->remote.v6));
1825 } else if (inp->inp_vflag & INP_IPV4) {
1826 nstat_ip_to_sockaddr(&inp->inp_laddr, inp->inp_lport,
1827 &desc->local.v4, sizeof(desc->local.v4));
1828 nstat_ip_to_sockaddr(&inp->inp_faddr, inp->inp_fport,
1829 &desc->remote.v4, sizeof(desc->remote.v4));
1830 }
1831 desc->ifnet_properties = nstat_inpcb_to_flags(inp);
1832 } else {
1833 if (inp->inp_vflag & INP_IPV6) {
1834 memcpy(&desc->local.v6, &tucookie->local.v6,
1835 sizeof(desc->local.v6));
1836 memcpy(&desc->remote.v6, &tucookie->remote.v6,
1837 sizeof(desc->remote.v6));
1838 } else if (inp->inp_vflag & INP_IPV4) {
1839 memcpy(&desc->local.v4, &tucookie->local.v4,
1840 sizeof(desc->local.v4));
1841 memcpy(&desc->remote.v4, &tucookie->remote.v4,
1842 sizeof(desc->remote.v4));
1843 }
1844 desc->ifnet_properties = tucookie->ifnet_properties;
1845 }
1846
1847 if (inp->inp_last_outifp) {
1848 desc->ifindex = inp->inp_last_outifp->if_index;
1849 } else {
1850 desc->ifindex = tucookie->if_index;
1851 }
1852
1853 struct socket *so = inp->inp_socket;
1854 if (so) {
1855 // TBD - take the socket lock around these to make sure
1856 // they're in sync?
1857 desc->upid = so->last_upid;
1858 desc->pid = so->last_pid;
1859 proc_name(desc->pid, desc->pname, sizeof(desc->pname));
1860 if (desc->pname[0] == 0) {
1861 strlcpy(desc->pname, tucookie->pname,
1862 sizeof(desc->pname));
1863 } else {
1864 desc->pname[sizeof(desc->pname) - 1] = 0;
1865 strlcpy(tucookie->pname, desc->pname,
1866 sizeof(tucookie->pname));
1867 }
1868 memcpy(desc->uuid, so->last_uuid, sizeof(so->last_uuid));
1869 memcpy(desc->vuuid, so->so_vuuid, sizeof(so->so_vuuid));
1870 if (so->so_flags & SOF_DELEGATED) {
1871 desc->eupid = so->e_upid;
1872 desc->epid = so->e_pid;
1873 memcpy(desc->euuid, so->e_uuid, sizeof(so->e_uuid));
1874 } else {
1875 desc->eupid = desc->upid;
1876 desc->epid = desc->pid;
1877 memcpy(desc->euuid, desc->uuid, sizeof(desc->uuid));
1878 }
1879 desc->rcvbufsize = so->so_rcv.sb_hiwat;
1880 desc->rcvbufused = so->so_rcv.sb_cc;
1881 desc->traffic_class = so->so_traffic_class;
1882 inp_get_activity_bitmap(inp, &desc->activity_bitmap);
1883 desc->start_timestamp = inp->inp_start_timestamp;
1884 desc->timestamp = mach_continuous_time();
1885 }
1886
1887 return 0;
1888 }
1889
1890 static bool
1891 nstat_udp_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter)
1892 {
1893 return nstat_tcpudp_reporting_allowed(cookie, filter, TRUE);
1894 }
1895
1896
1897 static void
1898 nstat_init_udp_provider(void)
1899 {
1900 bzero(&nstat_udp_provider, sizeof(nstat_udp_provider));
1901 nstat_udp_provider.nstat_provider_id = NSTAT_PROVIDER_UDP_KERNEL;
1902 nstat_udp_provider.nstat_descriptor_length = sizeof(nstat_udp_descriptor);
1903 nstat_udp_provider.nstat_lookup = nstat_udp_lookup;
1904 nstat_udp_provider.nstat_gone = nstat_udp_gone;
1905 nstat_udp_provider.nstat_counts = nstat_udp_counts;
1906 nstat_udp_provider.nstat_watcher_add = nstat_udp_add_watcher;
1907 nstat_udp_provider.nstat_watcher_remove = nstat_udp_remove_watcher;
1908 nstat_udp_provider.nstat_copy_descriptor = nstat_udp_copy_descriptor;
1909 nstat_udp_provider.nstat_release = nstat_udp_release;
1910 nstat_udp_provider.nstat_reporting_allowed = nstat_udp_reporting_allowed;
1911 nstat_udp_provider.next = nstat_providers;
1912 nstat_providers = &nstat_udp_provider;
1913 }
1914
1915
1916
1917 #pragma mark -- ifnet Provider --
1918
1919 static nstat_provider nstat_ifnet_provider;
1920
1921 /*
1922 * We store a pointer to the ifnet and the original threshold
1923 * requested by the client.
1924 */
1925 struct nstat_ifnet_cookie {
1926 struct ifnet *ifp;
1927 uint64_t threshold;
1928 };
1929
1930 static errno_t
1931 nstat_ifnet_lookup(
1932 const void *data,
1933 u_int32_t length,
1934 nstat_provider_cookie_t *out_cookie)
1935 {
1936 const nstat_ifnet_add_param *param = (const nstat_ifnet_add_param *)data;
1937 struct ifnet *ifp;
1938 boolean_t changed = FALSE;
1939 nstat_control_state *state;
1940 nstat_src *src;
1941 struct nstat_ifnet_cookie *cookie;
1942
1943 if (length < sizeof(*param) || param->threshold < 1024 * 1024) {
1944 return EINVAL;
1945 }
1946 if (nstat_privcheck != 0) {
1947 errno_t result = priv_check_cred(kauth_cred_get(),
1948 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
1949 if (result != 0) {
1950 return result;
1951 }
1952 }
1953 cookie = OSMalloc(sizeof(*cookie), nstat_malloc_tag);
1954 if (cookie == NULL) {
1955 return ENOMEM;
1956 }
1957 bzero(cookie, sizeof(*cookie));
1958
1959 ifnet_head_lock_shared();
1960 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
1961 {
1962 ifnet_lock_exclusive(ifp);
1963 if (ifp->if_index == param->ifindex) {
1964 cookie->ifp = ifp;
1965 cookie->threshold = param->threshold;
1966 *out_cookie = cookie;
1967 if (!ifp->if_data_threshold ||
1968 ifp->if_data_threshold > param->threshold) {
1969 changed = TRUE;
1970 ifp->if_data_threshold = param->threshold;
1971 }
1972 ifnet_lock_done(ifp);
1973 ifnet_reference(ifp);
1974 break;
1975 }
1976 ifnet_lock_done(ifp);
1977 }
1978 ifnet_head_done();
1979
1980 /*
1981 * When we change the threshold to something smaller, we notify
1982 * all of our clients with a description message.
1983 * We won't send a message to the client we are currently serving
1984 * because it has no `ifnet source' yet.
1985 */
1986 if (changed) {
1987 lck_mtx_lock(&nstat_mtx);
1988 for (state = nstat_controls; state; state = state->ncs_next) {
1989 lck_mtx_lock(&state->ncs_mtx);
1990 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
1991 {
1992 if (src->provider != &nstat_ifnet_provider) {
1993 continue;
1994 }
1995 nstat_control_send_description(state, src, 0, 0);
1996 }
1997 lck_mtx_unlock(&state->ncs_mtx);
1998 }
1999 lck_mtx_unlock(&nstat_mtx);
2000 }
2001 if (cookie->ifp == NULL) {
2002 OSFree(cookie, sizeof(*cookie), nstat_malloc_tag);
2003 }
2004
2005 return ifp ? 0 : EINVAL;
2006 }
2007
2008 static int
2009 nstat_ifnet_gone(
2010 nstat_provider_cookie_t cookie)
2011 {
2012 struct ifnet *ifp;
2013 struct nstat_ifnet_cookie *ifcookie =
2014 (struct nstat_ifnet_cookie *)cookie;
2015
2016 ifnet_head_lock_shared();
2017 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
2018 {
2019 if (ifp == ifcookie->ifp) {
2020 break;
2021 }
2022 }
2023 ifnet_head_done();
2024
2025 return ifp ? 0 : 1;
2026 }
2027
2028 static errno_t
2029 nstat_ifnet_counts(
2030 nstat_provider_cookie_t cookie,
2031 struct nstat_counts *out_counts,
2032 int *out_gone)
2033 {
2034 struct nstat_ifnet_cookie *ifcookie =
2035 (struct nstat_ifnet_cookie *)cookie;
2036 struct ifnet *ifp = ifcookie->ifp;
2037
2038 if (out_gone) {
2039 *out_gone = 0;
2040 }
2041
2042 // if the ifnet is gone, we should stop using it
2043 if (nstat_ifnet_gone(cookie)) {
2044 if (out_gone) {
2045 *out_gone = 1;
2046 }
2047 return EINVAL;
2048 }
2049
2050 bzero(out_counts, sizeof(*out_counts));
2051 out_counts->nstat_rxpackets = ifp->if_ipackets;
2052 out_counts->nstat_rxbytes = ifp->if_ibytes;
2053 out_counts->nstat_txpackets = ifp->if_opackets;
2054 out_counts->nstat_txbytes = ifp->if_obytes;
2055 out_counts->nstat_cell_rxbytes = out_counts->nstat_cell_txbytes = 0;
2056 return 0;
2057 }
2058
2059 static void
2060 nstat_ifnet_release(
2061 nstat_provider_cookie_t cookie,
2062 __unused int locked)
2063 {
2064 struct nstat_ifnet_cookie *ifcookie;
2065 struct ifnet *ifp;
2066 nstat_control_state *state;
2067 nstat_src *src;
2068 uint64_t minthreshold = UINT64_MAX;
2069
2070 /*
2071 * Find all the clients that requested a threshold
2072 * for this ifnet and re-calculate if_data_threshold.
2073 */
2074 lck_mtx_lock(&nstat_mtx);
2075 for (state = nstat_controls; state; state = state->ncs_next) {
2076 lck_mtx_lock(&state->ncs_mtx);
2077 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
2078 {
2079 /* Skip the provider we are about to detach. */
2080 if (src->provider != &nstat_ifnet_provider ||
2081 src->cookie == cookie) {
2082 continue;
2083 }
2084 ifcookie = (struct nstat_ifnet_cookie *)src->cookie;
2085 if (ifcookie->threshold < minthreshold) {
2086 minthreshold = ifcookie->threshold;
2087 }
2088 }
2089 lck_mtx_unlock(&state->ncs_mtx);
2090 }
2091 lck_mtx_unlock(&nstat_mtx);
2092 /*
2093 * Reset if_data_threshold or disable it.
2094 */
2095 ifcookie = (struct nstat_ifnet_cookie *)cookie;
2096 ifp = ifcookie->ifp;
2097 if (ifnet_is_attached(ifp, 1)) {
2098 ifnet_lock_exclusive(ifp);
2099 if (minthreshold == UINT64_MAX) {
2100 ifp->if_data_threshold = 0;
2101 } else {
2102 ifp->if_data_threshold = minthreshold;
2103 }
2104 ifnet_lock_done(ifp);
2105 ifnet_decr_iorefcnt(ifp);
2106 }
2107 ifnet_release(ifp);
2108 OSFree(ifcookie, sizeof(*ifcookie), nstat_malloc_tag);
2109 }
2110
2111 static void
2112 nstat_ifnet_copy_link_status(
2113 struct ifnet *ifp,
2114 struct nstat_ifnet_descriptor *desc)
2115 {
2116 struct if_link_status *ifsr = ifp->if_link_status;
2117 nstat_ifnet_desc_link_status *link_status = &desc->link_status;
2118
2119 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_NONE;
2120 if (ifsr == NULL) {
2121 return;
2122 }
2123
2124 lck_rw_lock_shared(&ifp->if_link_status_lock);
2125
2126 if (ifp->if_type == IFT_CELLULAR) {
2127 nstat_ifnet_desc_cellular_status *cell_status = &link_status->u.cellular;
2128 struct if_cellular_status_v1 *if_cell_sr =
2129 &ifsr->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
2130
2131 if (ifsr->ifsr_version != IF_CELLULAR_STATUS_REPORT_VERSION_1) {
2132 goto done;
2133 }
2134
2135 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_CELLULAR;
2136
2137 if (if_cell_sr->valid_bitmask & IF_CELL_LINK_QUALITY_METRIC_VALID) {
2138 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_LINK_QUALITY_METRIC_VALID;
2139 cell_status->link_quality_metric = if_cell_sr->link_quality_metric;
2140 }
2141 if (if_cell_sr->valid_bitmask & IF_CELL_UL_EFFECTIVE_BANDWIDTH_VALID) {
2142 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_BANDWIDTH_VALID;
2143 cell_status->ul_effective_bandwidth = if_cell_sr->ul_effective_bandwidth;
2144 }
2145 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_BANDWIDTH_VALID) {
2146 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_BANDWIDTH_VALID;
2147 cell_status->ul_max_bandwidth = if_cell_sr->ul_max_bandwidth;
2148 }
2149 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MIN_LATENCY_VALID) {
2150 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MIN_LATENCY_VALID;
2151 cell_status->ul_min_latency = if_cell_sr->ul_min_latency;
2152 }
2153 if (if_cell_sr->valid_bitmask & IF_CELL_UL_EFFECTIVE_LATENCY_VALID) {
2154 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_LATENCY_VALID;
2155 cell_status->ul_effective_latency = if_cell_sr->ul_effective_latency;
2156 }
2157 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_LATENCY_VALID) {
2158 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_LATENCY_VALID;
2159 cell_status->ul_max_latency = if_cell_sr->ul_max_latency;
2160 }
2161 if (if_cell_sr->valid_bitmask & IF_CELL_UL_RETXT_LEVEL_VALID) {
2162 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID;
2163 if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_NONE) {
2164 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_NONE;
2165 } else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_LOW) {
2166 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_LOW;
2167 } else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_MEDIUM) {
2168 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_MEDIUM;
2169 } else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_HIGH) {
2170 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_HIGH;
2171 } else {
2172 cell_status->valid_bitmask &= ~NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID;
2173 }
2174 }
2175 if (if_cell_sr->valid_bitmask & IF_CELL_UL_BYTES_LOST_VALID) {
2176 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_BYTES_LOST_VALID;
2177 cell_status->ul_bytes_lost = if_cell_sr->ul_bytes_lost;
2178 }
2179 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MIN_QUEUE_SIZE_VALID) {
2180 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MIN_QUEUE_SIZE_VALID;
2181 cell_status->ul_min_queue_size = if_cell_sr->ul_min_queue_size;
2182 }
2183 if (if_cell_sr->valid_bitmask & IF_CELL_UL_AVG_QUEUE_SIZE_VALID) {
2184 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_AVG_QUEUE_SIZE_VALID;
2185 cell_status->ul_avg_queue_size = if_cell_sr->ul_avg_queue_size;
2186 }
2187 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_QUEUE_SIZE_VALID) {
2188 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_QUEUE_SIZE_VALID;
2189 cell_status->ul_max_queue_size = if_cell_sr->ul_max_queue_size;
2190 }
2191 if (if_cell_sr->valid_bitmask & IF_CELL_DL_EFFECTIVE_BANDWIDTH_VALID) {
2192 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_DL_EFFECTIVE_BANDWIDTH_VALID;
2193 cell_status->dl_effective_bandwidth = if_cell_sr->dl_effective_bandwidth;
2194 }
2195 if (if_cell_sr->valid_bitmask & IF_CELL_DL_MAX_BANDWIDTH_VALID) {
2196 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_DL_MAX_BANDWIDTH_VALID;
2197 cell_status->dl_max_bandwidth = if_cell_sr->dl_max_bandwidth;
2198 }
2199 if (if_cell_sr->valid_bitmask & IF_CELL_CONFIG_INACTIVITY_TIME_VALID) {
2200 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_CONFIG_INACTIVITY_TIME_VALID;
2201 cell_status->config_inactivity_time = if_cell_sr->config_inactivity_time;
2202 }
2203 if (if_cell_sr->valid_bitmask & IF_CELL_CONFIG_BACKOFF_TIME_VALID) {
2204 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_CONFIG_BACKOFF_TIME_VALID;
2205 cell_status->config_backoff_time = if_cell_sr->config_backoff_time;
2206 }
2207 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MSS_RECOMMENDED_VALID) {
2208 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_MSS_RECOMMENDED_VALID;
2209 cell_status->mss_recommended = if_cell_sr->mss_recommended;
2210 }
2211 } else if (ifp->if_subfamily == IFNET_SUBFAMILY_WIFI) {
2212 nstat_ifnet_desc_wifi_status *wifi_status = &link_status->u.wifi;
2213 struct if_wifi_status_v1 *if_wifi_sr =
2214 &ifsr->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
2215
2216 if (ifsr->ifsr_version != IF_WIFI_STATUS_REPORT_VERSION_1) {
2217 goto done;
2218 }
2219
2220 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_WIFI;
2221
2222 if (if_wifi_sr->valid_bitmask & IF_WIFI_LINK_QUALITY_METRIC_VALID) {
2223 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_LINK_QUALITY_METRIC_VALID;
2224 wifi_status->link_quality_metric = if_wifi_sr->link_quality_metric;
2225 }
2226 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID) {
2227 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID;
2228 wifi_status->ul_effective_bandwidth = if_wifi_sr->ul_effective_bandwidth;
2229 }
2230 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MAX_BANDWIDTH_VALID) {
2231 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MAX_BANDWIDTH_VALID;
2232 wifi_status->ul_max_bandwidth = if_wifi_sr->ul_max_bandwidth;
2233 }
2234 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MIN_LATENCY_VALID) {
2235 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MIN_LATENCY_VALID;
2236 wifi_status->ul_min_latency = if_wifi_sr->ul_min_latency;
2237 }
2238 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_EFFECTIVE_LATENCY_VALID) {
2239 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_LATENCY_VALID;
2240 wifi_status->ul_effective_latency = if_wifi_sr->ul_effective_latency;
2241 }
2242 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MAX_LATENCY_VALID) {
2243 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MAX_LATENCY_VALID;
2244 wifi_status->ul_max_latency = if_wifi_sr->ul_max_latency;
2245 }
2246 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_RETXT_LEVEL_VALID) {
2247 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID;
2248 if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_NONE) {
2249 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_NONE;
2250 } else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_LOW) {
2251 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_LOW;
2252 } else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_MEDIUM) {
2253 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_MEDIUM;
2254 } else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_HIGH) {
2255 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_HIGH;
2256 } else {
2257 wifi_status->valid_bitmask &= ~NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID;
2258 }
2259 }
2260 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_BYTES_LOST_VALID) {
2261 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_BYTES_LOST_VALID;
2262 wifi_status->ul_bytes_lost = if_wifi_sr->ul_bytes_lost;
2263 }
2264 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_ERROR_RATE_VALID) {
2265 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_ERROR_RATE_VALID;
2266 wifi_status->ul_error_rate = if_wifi_sr->ul_error_rate;
2267 }
2268 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID) {
2269 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID;
2270 wifi_status->dl_effective_bandwidth = if_wifi_sr->dl_effective_bandwidth;
2271 }
2272 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MAX_BANDWIDTH_VALID) {
2273 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MAX_BANDWIDTH_VALID;
2274 wifi_status->dl_max_bandwidth = if_wifi_sr->dl_max_bandwidth;
2275 }
2276 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MIN_LATENCY_VALID) {
2277 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MIN_LATENCY_VALID;
2278 wifi_status->dl_min_latency = if_wifi_sr->dl_min_latency;
2279 }
2280 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_EFFECTIVE_LATENCY_VALID) {
2281 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_LATENCY_VALID;
2282 wifi_status->dl_effective_latency = if_wifi_sr->dl_effective_latency;
2283 }
2284 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MAX_LATENCY_VALID) {
2285 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MAX_LATENCY_VALID;
2286 wifi_status->dl_max_latency = if_wifi_sr->dl_max_latency;
2287 }
2288 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_ERROR_RATE_VALID) {
2289 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_ERROR_RATE_VALID;
2290 wifi_status->dl_error_rate = if_wifi_sr->dl_error_rate;
2291 }
2292 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_FREQUENCY_VALID) {
2293 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID;
2294 if (if_wifi_sr->config_frequency == IF_WIFI_CONFIG_FREQUENCY_2_4_GHZ) {
2295 wifi_status->config_frequency = NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_2_4_GHZ;
2296 } else if (if_wifi_sr->config_frequency == IF_WIFI_CONFIG_FREQUENCY_5_0_GHZ) {
2297 wifi_status->config_frequency = NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_5_0_GHZ;
2298 } else {
2299 wifi_status->valid_bitmask &= ~NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID;
2300 }
2301 }
2302 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_MULTICAST_RATE_VALID) {
2303 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_MULTICAST_RATE_VALID;
2304 wifi_status->config_multicast_rate = if_wifi_sr->config_multicast_rate;
2305 }
2306 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_SCAN_COUNT_VALID) {
2307 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_COUNT_VALID;
2308 wifi_status->scan_count = if_wifi_sr->scan_count;
2309 }
2310 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_SCAN_DURATION_VALID) {
2311 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_DURATION_VALID;
2312 wifi_status->scan_duration = if_wifi_sr->scan_duration;
2313 }
2314 }
2315
2316 done:
2317 lck_rw_done(&ifp->if_link_status_lock);
2318 }
2319
2320 static u_int64_t nstat_ifnet_last_report_time = 0;
2321 extern int tcp_report_stats_interval;
2322
2323 static void
2324 nstat_ifnet_compute_percentages(struct if_tcp_ecn_perf_stat *ifst)
2325 {
2326 /* Retransmit percentage */
2327 if (ifst->total_rxmitpkts > 0 && ifst->total_txpkts > 0) {
2328 /* shift by 10 for precision */
2329 ifst->rxmit_percent =
2330 ((ifst->total_rxmitpkts << 10) * 100) / ifst->total_txpkts;
2331 } else {
2332 ifst->rxmit_percent = 0;
2333 }
2334
2335 /* Out-of-order percentage */
2336 if (ifst->total_oopkts > 0 && ifst->total_rxpkts > 0) {
2337 /* shift by 10 for precision */
2338 ifst->oo_percent =
2339 ((ifst->total_oopkts << 10) * 100) / ifst->total_rxpkts;
2340 } else {
2341 ifst->oo_percent = 0;
2342 }
2343
2344 /* Reorder percentage */
2345 if (ifst->total_reorderpkts > 0 &&
2346 (ifst->total_txpkts + ifst->total_rxpkts) > 0) {
2347 /* shift by 10 for precision */
2348 ifst->reorder_percent =
2349 ((ifst->total_reorderpkts << 10) * 100) /
2350 (ifst->total_txpkts + ifst->total_rxpkts);
2351 } else {
2352 ifst->reorder_percent = 0;
2353 }
2354 }
2355
2356 static void
2357 nstat_ifnet_normalize_counter(struct if_tcp_ecn_stat *if_st)
2358 {
2359 u_int64_t ecn_on_conn, ecn_off_conn;
2360
2361 if (if_st == NULL) {
2362 return;
2363 }
2364 ecn_on_conn = if_st->ecn_client_success +
2365 if_st->ecn_server_success;
2366 ecn_off_conn = if_st->ecn_off_conn +
2367 (if_st->ecn_client_setup - if_st->ecn_client_success) +
2368 (if_st->ecn_server_setup - if_st->ecn_server_success);
2369
2370 /*
2371 * report sack episodes, rst_drop and rxmit_drop
2372 * as a ratio per connection, shift by 10 for precision
2373 */
2374 if (ecn_on_conn > 0) {
2375 if_st->ecn_on.sack_episodes =
2376 (if_st->ecn_on.sack_episodes << 10) / ecn_on_conn;
2377 if_st->ecn_on.rst_drop =
2378 (if_st->ecn_on.rst_drop << 10) * 100 / ecn_on_conn;
2379 if_st->ecn_on.rxmit_drop =
2380 (if_st->ecn_on.rxmit_drop << 10) * 100 / ecn_on_conn;
2381 } else {
2382 /* set to zero, just in case */
2383 if_st->ecn_on.sack_episodes = 0;
2384 if_st->ecn_on.rst_drop = 0;
2385 if_st->ecn_on.rxmit_drop = 0;
2386 }
2387
2388 if (ecn_off_conn > 0) {
2389 if_st->ecn_off.sack_episodes =
2390 (if_st->ecn_off.sack_episodes << 10) / ecn_off_conn;
2391 if_st->ecn_off.rst_drop =
2392 (if_st->ecn_off.rst_drop << 10) * 100 / ecn_off_conn;
2393 if_st->ecn_off.rxmit_drop =
2394 (if_st->ecn_off.rxmit_drop << 10) * 100 / ecn_off_conn;
2395 } else {
2396 if_st->ecn_off.sack_episodes = 0;
2397 if_st->ecn_off.rst_drop = 0;
2398 if_st->ecn_off.rxmit_drop = 0;
2399 }
2400 if_st->ecn_total_conn = ecn_off_conn + ecn_on_conn;
2401 }
2402
2403 static void
2404 nstat_ifnet_report_ecn_stats(void)
2405 {
2406 u_int64_t uptime, last_report_time;
2407 struct nstat_sysinfo_data data;
2408 struct nstat_sysinfo_ifnet_ecn_stats *st;
2409 struct ifnet *ifp;
2410
2411 uptime = net_uptime();
2412
2413 if ((int)(uptime - nstat_ifnet_last_report_time) <
2414 tcp_report_stats_interval) {
2415 return;
2416 }
2417
2418 last_report_time = nstat_ifnet_last_report_time;
2419 nstat_ifnet_last_report_time = uptime;
2420 data.flags = NSTAT_SYSINFO_IFNET_ECN_STATS;
2421 st = &data.u.ifnet_ecn_stats;
2422
2423 ifnet_head_lock_shared();
2424 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2425 if (ifp->if_ipv4_stat == NULL || ifp->if_ipv6_stat == NULL) {
2426 continue;
2427 }
2428
2429 if (!IF_FULLY_ATTACHED(ifp)) {
2430 continue;
2431 }
2432
2433 /* Limit reporting to Wifi, Ethernet and cellular. */
2434 if (!(IFNET_IS_ETHERNET(ifp) || IFNET_IS_CELLULAR(ifp))) {
2435 continue;
2436 }
2437
2438 bzero(st, sizeof(*st));
2439 if (IFNET_IS_CELLULAR(ifp)) {
2440 st->ifnet_type = NSTAT_IFNET_ECN_TYPE_CELLULAR;
2441 } else if (IFNET_IS_WIFI(ifp)) {
2442 st->ifnet_type = NSTAT_IFNET_ECN_TYPE_WIFI;
2443 } else {
2444 st->ifnet_type = NSTAT_IFNET_ECN_TYPE_ETHERNET;
2445 }
2446 data.unsent_data_cnt = ifp->if_unsent_data_cnt;
2447 /* skip if there was no update since last report */
2448 if (ifp->if_ipv4_stat->timestamp <= 0 ||
2449 ifp->if_ipv4_stat->timestamp < last_report_time) {
2450 goto v6;
2451 }
2452 st->ifnet_proto = NSTAT_IFNET_ECN_PROTO_IPV4;
2453 /* compute percentages using packet counts */
2454 nstat_ifnet_compute_percentages(&ifp->if_ipv4_stat->ecn_on);
2455 nstat_ifnet_compute_percentages(&ifp->if_ipv4_stat->ecn_off);
2456 nstat_ifnet_normalize_counter(ifp->if_ipv4_stat);
2457 bcopy(ifp->if_ipv4_stat, &st->ecn_stat,
2458 sizeof(st->ecn_stat));
2459 nstat_sysinfo_send_data(&data);
2460 bzero(ifp->if_ipv4_stat, sizeof(*ifp->if_ipv4_stat));
2461
2462 v6:
2463 /* skip if there was no update since last report */
2464 if (ifp->if_ipv6_stat->timestamp <= 0 ||
2465 ifp->if_ipv6_stat->timestamp < last_report_time) {
2466 continue;
2467 }
2468 st->ifnet_proto = NSTAT_IFNET_ECN_PROTO_IPV6;
2469
2470 /* compute percentages using packet counts */
2471 nstat_ifnet_compute_percentages(&ifp->if_ipv6_stat->ecn_on);
2472 nstat_ifnet_compute_percentages(&ifp->if_ipv6_stat->ecn_off);
2473 nstat_ifnet_normalize_counter(ifp->if_ipv6_stat);
2474 bcopy(ifp->if_ipv6_stat, &st->ecn_stat,
2475 sizeof(st->ecn_stat));
2476 nstat_sysinfo_send_data(&data);
2477
2478 /* Zero the stats in ifp */
2479 bzero(ifp->if_ipv6_stat, sizeof(*ifp->if_ipv6_stat));
2480 }
2481 ifnet_head_done();
2482 }
2483
2484 /* Some thresholds to determine Low Iternet mode */
2485 #define NSTAT_LIM_DL_MAX_BANDWIDTH_THRESHOLD 1000000 /* 1 Mbps */
2486 #define NSTAT_LIM_UL_MAX_BANDWIDTH_THRESHOLD 500000 /* 500 Kbps */
2487 #define NSTAT_LIM_UL_MIN_RTT_THRESHOLD 1000 /* 1 second */
2488 #define NSTAT_LIM_CONN_TIMEOUT_PERCENT_THRESHOLD (10 << 10) /* 10 percent connection timeouts */
2489 #define NSTAT_LIM_PACKET_LOSS_PERCENT_THRESHOLD (2 << 10) /* 2 percent packet loss rate */
2490
2491 static boolean_t
2492 nstat_lim_activity_check(struct if_lim_perf_stat *st)
2493 {
2494 /* check that the current activity is enough to report stats */
2495 if (st->lim_total_txpkts < nstat_lim_min_tx_pkts ||
2496 st->lim_total_rxpkts < nstat_lim_min_rx_pkts ||
2497 st->lim_conn_attempts == 0) {
2498 return FALSE;
2499 }
2500
2501 /*
2502 * Compute percentages if there was enough activity. Use
2503 * shift-left by 10 to preserve precision.
2504 */
2505 st->lim_packet_loss_percent = ((st->lim_total_retxpkts << 10) /
2506 st->lim_total_txpkts) * 100;
2507
2508 st->lim_packet_ooo_percent = ((st->lim_total_oopkts << 10) /
2509 st->lim_total_rxpkts) * 100;
2510
2511 st->lim_conn_timeout_percent = ((st->lim_conn_timeouts << 10) /
2512 st->lim_conn_attempts) * 100;
2513
2514 /*
2515 * Is Low Internet detected? First order metrics are bandwidth
2516 * and RTT. If these metrics are below the minimum thresholds
2517 * defined then the network attachment can be classified as
2518 * having Low Internet capacity.
2519 *
2520 * High connection timeout rate also indicates Low Internet
2521 * capacity.
2522 */
2523 if (st->lim_dl_max_bandwidth > 0 &&
2524 st->lim_dl_max_bandwidth <= NSTAT_LIM_DL_MAX_BANDWIDTH_THRESHOLD) {
2525 st->lim_dl_detected = 1;
2526 }
2527
2528 if ((st->lim_ul_max_bandwidth > 0 &&
2529 st->lim_ul_max_bandwidth <= NSTAT_LIM_UL_MAX_BANDWIDTH_THRESHOLD) ||
2530 st->lim_rtt_min >= NSTAT_LIM_UL_MIN_RTT_THRESHOLD) {
2531 st->lim_ul_detected = 1;
2532 }
2533
2534 if (st->lim_conn_attempts > 20 &&
2535 st->lim_conn_timeout_percent >=
2536 NSTAT_LIM_CONN_TIMEOUT_PERCENT_THRESHOLD) {
2537 st->lim_ul_detected = 1;
2538 }
2539 /*
2540 * Second order metrics: If there was high packet loss even after
2541 * using delay based algorithms then we classify it as Low Internet
2542 * again
2543 */
2544 if (st->lim_bk_txpkts >= nstat_lim_min_tx_pkts &&
2545 st->lim_packet_loss_percent >=
2546 NSTAT_LIM_PACKET_LOSS_PERCENT_THRESHOLD) {
2547 st->lim_ul_detected = 1;
2548 }
2549 return TRUE;
2550 }
2551
2552 static u_int64_t nstat_lim_last_report_time = 0;
2553 static void
2554 nstat_ifnet_report_lim_stats(void)
2555 {
2556 u_int64_t uptime;
2557 struct nstat_sysinfo_data data;
2558 struct nstat_sysinfo_lim_stats *st;
2559 struct ifnet *ifp;
2560 int err;
2561
2562 uptime = net_uptime();
2563
2564 if ((u_int32_t)(uptime - nstat_lim_last_report_time) <
2565 nstat_lim_interval) {
2566 return;
2567 }
2568
2569 nstat_lim_last_report_time = uptime;
2570 data.flags = NSTAT_SYSINFO_LIM_STATS;
2571 st = &data.u.lim_stats;
2572 data.unsent_data_cnt = 0;
2573
2574 ifnet_head_lock_shared();
2575 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2576 if (!IF_FULLY_ATTACHED(ifp)) {
2577 continue;
2578 }
2579
2580 /* Limit reporting to Wifi, Ethernet and cellular */
2581 if (!(IFNET_IS_ETHERNET(ifp) || IFNET_IS_CELLULAR(ifp))) {
2582 continue;
2583 }
2584
2585 if (!nstat_lim_activity_check(&ifp->if_lim_stat)) {
2586 continue;
2587 }
2588
2589 bzero(st, sizeof(*st));
2590 st->ifnet_siglen = sizeof(st->ifnet_signature);
2591 err = ifnet_get_netsignature(ifp, AF_INET,
2592 (u_int8_t *)&st->ifnet_siglen, NULL,
2593 st->ifnet_signature);
2594 if (err != 0) {
2595 err = ifnet_get_netsignature(ifp, AF_INET6,
2596 (u_int8_t *)&st->ifnet_siglen, NULL,
2597 st->ifnet_signature);
2598 if (err != 0) {
2599 continue;
2600 }
2601 }
2602 ifnet_lock_shared(ifp);
2603 if (IFNET_IS_CELLULAR(ifp)) {
2604 st->ifnet_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_CELLULAR;
2605 } else if (IFNET_IS_WIFI(ifp)) {
2606 st->ifnet_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_WIFI;
2607 } else {
2608 st->ifnet_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_ETHERNET;
2609 }
2610 bcopy(&ifp->if_lim_stat, &st->lim_stat,
2611 sizeof(st->lim_stat));
2612
2613 /* Zero the stats in ifp */
2614 bzero(&ifp->if_lim_stat, sizeof(ifp->if_lim_stat));
2615 ifnet_lock_done(ifp);
2616 nstat_sysinfo_send_data(&data);
2617 }
2618 ifnet_head_done();
2619 }
2620
2621 static errno_t
2622 nstat_ifnet_copy_descriptor(
2623 nstat_provider_cookie_t cookie,
2624 void *data,
2625 u_int32_t len)
2626 {
2627 nstat_ifnet_descriptor *desc = (nstat_ifnet_descriptor *)data;
2628 struct nstat_ifnet_cookie *ifcookie =
2629 (struct nstat_ifnet_cookie *)cookie;
2630 struct ifnet *ifp = ifcookie->ifp;
2631
2632 if (len < sizeof(nstat_ifnet_descriptor)) {
2633 return EINVAL;
2634 }
2635
2636 if (nstat_ifnet_gone(cookie)) {
2637 return EINVAL;
2638 }
2639
2640 bzero(desc, sizeof(*desc));
2641 ifnet_lock_shared(ifp);
2642 strlcpy(desc->name, ifp->if_xname, sizeof(desc->name));
2643 desc->ifindex = ifp->if_index;
2644 desc->threshold = ifp->if_data_threshold;
2645 desc->type = ifp->if_type;
2646 if (ifp->if_desc.ifd_len < sizeof(desc->description)) {
2647 memcpy(desc->description, ifp->if_desc.ifd_desc,
2648 sizeof(desc->description));
2649 }
2650 nstat_ifnet_copy_link_status(ifp, desc);
2651 ifnet_lock_done(ifp);
2652 return 0;
2653 }
2654
2655 static void
2656 nstat_init_ifnet_provider(void)
2657 {
2658 bzero(&nstat_ifnet_provider, sizeof(nstat_ifnet_provider));
2659 nstat_ifnet_provider.nstat_provider_id = NSTAT_PROVIDER_IFNET;
2660 nstat_ifnet_provider.nstat_descriptor_length = sizeof(nstat_ifnet_descriptor);
2661 nstat_ifnet_provider.nstat_lookup = nstat_ifnet_lookup;
2662 nstat_ifnet_provider.nstat_gone = nstat_ifnet_gone;
2663 nstat_ifnet_provider.nstat_counts = nstat_ifnet_counts;
2664 nstat_ifnet_provider.nstat_watcher_add = NULL;
2665 nstat_ifnet_provider.nstat_watcher_remove = NULL;
2666 nstat_ifnet_provider.nstat_copy_descriptor = nstat_ifnet_copy_descriptor;
2667 nstat_ifnet_provider.nstat_release = nstat_ifnet_release;
2668 nstat_ifnet_provider.next = nstat_providers;
2669 nstat_providers = &nstat_ifnet_provider;
2670 }
2671
2672 __private_extern__ void
2673 nstat_ifnet_threshold_reached(unsigned int ifindex)
2674 {
2675 nstat_control_state *state;
2676 nstat_src *src;
2677 struct ifnet *ifp;
2678 struct nstat_ifnet_cookie *ifcookie;
2679
2680 lck_mtx_lock(&nstat_mtx);
2681 for (state = nstat_controls; state; state = state->ncs_next) {
2682 lck_mtx_lock(&state->ncs_mtx);
2683 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
2684 {
2685 if (src->provider != &nstat_ifnet_provider) {
2686 continue;
2687 }
2688 ifcookie = (struct nstat_ifnet_cookie *)src->cookie;
2689 ifp = ifcookie->ifp;
2690 if (ifp->if_index != ifindex) {
2691 continue;
2692 }
2693 nstat_control_send_counts(state, src, 0, 0, NULL);
2694 }
2695 lck_mtx_unlock(&state->ncs_mtx);
2696 }
2697 lck_mtx_unlock(&nstat_mtx);
2698 }
2699
2700 #pragma mark -- Sysinfo --
2701 static void
2702 nstat_set_keyval_scalar(nstat_sysinfo_keyval *kv, int key, u_int32_t val)
2703 {
2704 kv->nstat_sysinfo_key = key;
2705 kv->nstat_sysinfo_flags = NSTAT_SYSINFO_FLAG_SCALAR;
2706 kv->u.nstat_sysinfo_scalar = val;
2707 kv->nstat_sysinfo_valsize = sizeof(kv->u.nstat_sysinfo_scalar);
2708 }
2709
2710 static void
2711 nstat_set_keyval_string(nstat_sysinfo_keyval *kv, int key, u_int8_t *buf,
2712 u_int32_t len)
2713 {
2714 kv->nstat_sysinfo_key = key;
2715 kv->nstat_sysinfo_flags = NSTAT_SYSINFO_FLAG_STRING;
2716 kv->nstat_sysinfo_valsize = min(len,
2717 NSTAT_SYSINFO_KEYVAL_STRING_MAXSIZE);
2718 bcopy(buf, kv->u.nstat_sysinfo_string, kv->nstat_sysinfo_valsize);
2719 }
2720
2721 static void
2722 nstat_sysinfo_send_data_internal(
2723 nstat_control_state *control,
2724 nstat_sysinfo_data *data)
2725 {
2726 nstat_msg_sysinfo_counts *syscnt = NULL;
2727 size_t allocsize = 0, countsize = 0, nkeyvals = 0, finalsize = 0;
2728 nstat_sysinfo_keyval *kv;
2729 errno_t result = 0;
2730 size_t i = 0;
2731
2732 allocsize = offsetof(nstat_msg_sysinfo_counts, counts);
2733 countsize = offsetof(nstat_sysinfo_counts, nstat_sysinfo_keyvals);
2734 finalsize = allocsize;
2735
2736 /* get number of key-vals for each kind of stat */
2737 switch (data->flags) {
2738 case NSTAT_SYSINFO_MBUF_STATS:
2739 nkeyvals = sizeof(struct nstat_sysinfo_mbuf_stats) /
2740 sizeof(u_int32_t);
2741 break;
2742 case NSTAT_SYSINFO_TCP_STATS:
2743 nkeyvals = NSTAT_SYSINFO_TCP_STATS_COUNT;
2744 break;
2745 case NSTAT_SYSINFO_IFNET_ECN_STATS:
2746 nkeyvals = (sizeof(struct if_tcp_ecn_stat) /
2747 sizeof(u_int64_t));
2748
2749 /* Two more keys for ifnet type and proto */
2750 nkeyvals += 2;
2751
2752 /* One key for unsent data. */
2753 nkeyvals++;
2754 break;
2755 case NSTAT_SYSINFO_LIM_STATS:
2756 nkeyvals = NSTAT_LIM_STAT_KEYVAL_COUNT;
2757 break;
2758 case NSTAT_SYSINFO_NET_API_STATS:
2759 nkeyvals = NSTAT_NET_API_STAT_KEYVAL_COUNT;
2760 break;
2761 default:
2762 return;
2763 }
2764 countsize += sizeof(nstat_sysinfo_keyval) * nkeyvals;
2765 allocsize += countsize;
2766
2767 syscnt = OSMalloc(allocsize, nstat_malloc_tag);
2768 if (syscnt == NULL) {
2769 return;
2770 }
2771 bzero(syscnt, allocsize);
2772
2773 kv = (nstat_sysinfo_keyval *) &syscnt->counts.nstat_sysinfo_keyvals;
2774 switch (data->flags) {
2775 case NSTAT_SYSINFO_MBUF_STATS:
2776 {
2777 nstat_set_keyval_scalar(&kv[i++],
2778 NSTAT_SYSINFO_KEY_MBUF_256B_TOTAL,
2779 data->u.mb_stats.total_256b);
2780 nstat_set_keyval_scalar(&kv[i++],
2781 NSTAT_SYSINFO_KEY_MBUF_2KB_TOTAL,
2782 data->u.mb_stats.total_2kb);
2783 nstat_set_keyval_scalar(&kv[i++],
2784 NSTAT_SYSINFO_KEY_MBUF_4KB_TOTAL,
2785 data->u.mb_stats.total_4kb);
2786 nstat_set_keyval_scalar(&kv[i++],
2787 NSTAT_SYSINFO_MBUF_16KB_TOTAL,
2788 data->u.mb_stats.total_16kb);
2789 nstat_set_keyval_scalar(&kv[i++],
2790 NSTAT_SYSINFO_KEY_SOCK_MBCNT,
2791 data->u.mb_stats.sbmb_total);
2792 nstat_set_keyval_scalar(&kv[i++],
2793 NSTAT_SYSINFO_KEY_SOCK_ATMBLIMIT,
2794 data->u.mb_stats.sb_atmbuflimit);
2795 nstat_set_keyval_scalar(&kv[i++],
2796 NSTAT_SYSINFO_MBUF_DRAIN_CNT,
2797 data->u.mb_stats.draincnt);
2798 nstat_set_keyval_scalar(&kv[i++],
2799 NSTAT_SYSINFO_MBUF_MEM_RELEASED,
2800 data->u.mb_stats.memreleased);
2801 nstat_set_keyval_scalar(&kv[i++],
2802 NSTAT_SYSINFO_KEY_SOCK_MBFLOOR,
2803 data->u.mb_stats.sbmb_floor);
2804 VERIFY(i == nkeyvals);
2805 break;
2806 }
2807 case NSTAT_SYSINFO_TCP_STATS:
2808 {
2809 nstat_set_keyval_scalar(&kv[i++],
2810 NSTAT_SYSINFO_KEY_IPV4_AVGRTT,
2811 data->u.tcp_stats.ipv4_avgrtt);
2812 nstat_set_keyval_scalar(&kv[i++],
2813 NSTAT_SYSINFO_KEY_IPV6_AVGRTT,
2814 data->u.tcp_stats.ipv6_avgrtt);
2815 nstat_set_keyval_scalar(&kv[i++],
2816 NSTAT_SYSINFO_KEY_SEND_PLR,
2817 data->u.tcp_stats.send_plr);
2818 nstat_set_keyval_scalar(&kv[i++],
2819 NSTAT_SYSINFO_KEY_RECV_PLR,
2820 data->u.tcp_stats.recv_plr);
2821 nstat_set_keyval_scalar(&kv[i++],
2822 NSTAT_SYSINFO_KEY_SEND_TLRTO,
2823 data->u.tcp_stats.send_tlrto_rate);
2824 nstat_set_keyval_scalar(&kv[i++],
2825 NSTAT_SYSINFO_KEY_SEND_REORDERRATE,
2826 data->u.tcp_stats.send_reorder_rate);
2827 nstat_set_keyval_scalar(&kv[i++],
2828 NSTAT_SYSINFO_CONNECTION_ATTEMPTS,
2829 data->u.tcp_stats.connection_attempts);
2830 nstat_set_keyval_scalar(&kv[i++],
2831 NSTAT_SYSINFO_CONNECTION_ACCEPTS,
2832 data->u.tcp_stats.connection_accepts);
2833 nstat_set_keyval_scalar(&kv[i++],
2834 NSTAT_SYSINFO_ECN_CLIENT_ENABLED,
2835 data->u.tcp_stats.ecn_client_enabled);
2836 nstat_set_keyval_scalar(&kv[i++],
2837 NSTAT_SYSINFO_ECN_SERVER_ENABLED,
2838 data->u.tcp_stats.ecn_server_enabled);
2839 nstat_set_keyval_scalar(&kv[i++],
2840 NSTAT_SYSINFO_ECN_CLIENT_SETUP,
2841 data->u.tcp_stats.ecn_client_setup);
2842 nstat_set_keyval_scalar(&kv[i++],
2843 NSTAT_SYSINFO_ECN_SERVER_SETUP,
2844 data->u.tcp_stats.ecn_server_setup);
2845 nstat_set_keyval_scalar(&kv[i++],
2846 NSTAT_SYSINFO_ECN_CLIENT_SUCCESS,
2847 data->u.tcp_stats.ecn_client_success);
2848 nstat_set_keyval_scalar(&kv[i++],
2849 NSTAT_SYSINFO_ECN_SERVER_SUCCESS,
2850 data->u.tcp_stats.ecn_server_success);
2851 nstat_set_keyval_scalar(&kv[i++],
2852 NSTAT_SYSINFO_ECN_NOT_SUPPORTED,
2853 data->u.tcp_stats.ecn_not_supported);
2854 nstat_set_keyval_scalar(&kv[i++],
2855 NSTAT_SYSINFO_ECN_LOST_SYN,
2856 data->u.tcp_stats.ecn_lost_syn);
2857 nstat_set_keyval_scalar(&kv[i++],
2858 NSTAT_SYSINFO_ECN_LOST_SYNACK,
2859 data->u.tcp_stats.ecn_lost_synack);
2860 nstat_set_keyval_scalar(&kv[i++],
2861 NSTAT_SYSINFO_ECN_RECV_CE,
2862 data->u.tcp_stats.ecn_recv_ce);
2863 nstat_set_keyval_scalar(&kv[i++],
2864 NSTAT_SYSINFO_ECN_RECV_ECE,
2865 data->u.tcp_stats.ecn_recv_ece);
2866 nstat_set_keyval_scalar(&kv[i++],
2867 NSTAT_SYSINFO_ECN_SENT_ECE,
2868 data->u.tcp_stats.ecn_sent_ece);
2869 nstat_set_keyval_scalar(&kv[i++],
2870 NSTAT_SYSINFO_ECN_CONN_RECV_CE,
2871 data->u.tcp_stats.ecn_conn_recv_ce);
2872 nstat_set_keyval_scalar(&kv[i++],
2873 NSTAT_SYSINFO_ECN_CONN_RECV_ECE,
2874 data->u.tcp_stats.ecn_conn_recv_ece);
2875 nstat_set_keyval_scalar(&kv[i++],
2876 NSTAT_SYSINFO_ECN_CONN_PLNOCE,
2877 data->u.tcp_stats.ecn_conn_plnoce);
2878 nstat_set_keyval_scalar(&kv[i++],
2879 NSTAT_SYSINFO_ECN_CONN_PL_CE,
2880 data->u.tcp_stats.ecn_conn_pl_ce);
2881 nstat_set_keyval_scalar(&kv[i++],
2882 NSTAT_SYSINFO_ECN_CONN_NOPL_CE,
2883 data->u.tcp_stats.ecn_conn_nopl_ce);
2884 nstat_set_keyval_scalar(&kv[i++],
2885 NSTAT_SYSINFO_ECN_FALLBACK_SYNLOSS,
2886 data->u.tcp_stats.ecn_fallback_synloss);
2887 nstat_set_keyval_scalar(&kv[i++],
2888 NSTAT_SYSINFO_ECN_FALLBACK_REORDER,
2889 data->u.tcp_stats.ecn_fallback_reorder);
2890 nstat_set_keyval_scalar(&kv[i++],
2891 NSTAT_SYSINFO_ECN_FALLBACK_CE,
2892 data->u.tcp_stats.ecn_fallback_ce);
2893 nstat_set_keyval_scalar(&kv[i++],
2894 NSTAT_SYSINFO_TFO_SYN_DATA_RCV,
2895 data->u.tcp_stats.tfo_syn_data_rcv);
2896 nstat_set_keyval_scalar(&kv[i++],
2897 NSTAT_SYSINFO_TFO_COOKIE_REQ_RCV,
2898 data->u.tcp_stats.tfo_cookie_req_rcv);
2899 nstat_set_keyval_scalar(&kv[i++],
2900 NSTAT_SYSINFO_TFO_COOKIE_SENT,
2901 data->u.tcp_stats.tfo_cookie_sent);
2902 nstat_set_keyval_scalar(&kv[i++],
2903 NSTAT_SYSINFO_TFO_COOKIE_INVALID,
2904 data->u.tcp_stats.tfo_cookie_invalid);
2905 nstat_set_keyval_scalar(&kv[i++],
2906 NSTAT_SYSINFO_TFO_COOKIE_REQ,
2907 data->u.tcp_stats.tfo_cookie_req);
2908 nstat_set_keyval_scalar(&kv[i++],
2909 NSTAT_SYSINFO_TFO_COOKIE_RCV,
2910 data->u.tcp_stats.tfo_cookie_rcv);
2911 nstat_set_keyval_scalar(&kv[i++],
2912 NSTAT_SYSINFO_TFO_SYN_DATA_SENT,
2913 data->u.tcp_stats.tfo_syn_data_sent);
2914 nstat_set_keyval_scalar(&kv[i++],
2915 NSTAT_SYSINFO_TFO_SYN_DATA_ACKED,
2916 data->u.tcp_stats.tfo_syn_data_acked);
2917 nstat_set_keyval_scalar(&kv[i++],
2918 NSTAT_SYSINFO_TFO_SYN_LOSS,
2919 data->u.tcp_stats.tfo_syn_loss);
2920 nstat_set_keyval_scalar(&kv[i++],
2921 NSTAT_SYSINFO_TFO_BLACKHOLE,
2922 data->u.tcp_stats.tfo_blackhole);
2923 nstat_set_keyval_scalar(&kv[i++],
2924 NSTAT_SYSINFO_TFO_COOKIE_WRONG,
2925 data->u.tcp_stats.tfo_cookie_wrong);
2926 nstat_set_keyval_scalar(&kv[i++],
2927 NSTAT_SYSINFO_TFO_NO_COOKIE_RCV,
2928 data->u.tcp_stats.tfo_no_cookie_rcv);
2929 nstat_set_keyval_scalar(&kv[i++],
2930 NSTAT_SYSINFO_TFO_HEURISTICS_DISABLE,
2931 data->u.tcp_stats.tfo_heuristics_disable);
2932 nstat_set_keyval_scalar(&kv[i++],
2933 NSTAT_SYSINFO_TFO_SEND_BLACKHOLE,
2934 data->u.tcp_stats.tfo_sndblackhole);
2935 nstat_set_keyval_scalar(&kv[i++],
2936 NSTAT_SYSINFO_MPTCP_HANDOVER_ATTEMPT,
2937 data->u.tcp_stats.mptcp_handover_attempt);
2938 nstat_set_keyval_scalar(&kv[i++],
2939 NSTAT_SYSINFO_MPTCP_INTERACTIVE_ATTEMPT,
2940 data->u.tcp_stats.mptcp_interactive_attempt);
2941 nstat_set_keyval_scalar(&kv[i++],
2942 NSTAT_SYSINFO_MPTCP_AGGREGATE_ATTEMPT,
2943 data->u.tcp_stats.mptcp_aggregate_attempt);
2944 nstat_set_keyval_scalar(&kv[i++],
2945 NSTAT_SYSINFO_MPTCP_FP_HANDOVER_ATTEMPT,
2946 data->u.tcp_stats.mptcp_fp_handover_attempt);
2947 nstat_set_keyval_scalar(&kv[i++],
2948 NSTAT_SYSINFO_MPTCP_FP_INTERACTIVE_ATTEMPT,
2949 data->u.tcp_stats.mptcp_fp_interactive_attempt);
2950 nstat_set_keyval_scalar(&kv[i++],
2951 NSTAT_SYSINFO_MPTCP_FP_AGGREGATE_ATTEMPT,
2952 data->u.tcp_stats.mptcp_fp_aggregate_attempt);
2953 nstat_set_keyval_scalar(&kv[i++],
2954 NSTAT_SYSINFO_MPTCP_HEURISTIC_FALLBACK,
2955 data->u.tcp_stats.mptcp_heuristic_fallback);
2956 nstat_set_keyval_scalar(&kv[i++],
2957 NSTAT_SYSINFO_MPTCP_FP_HEURISTIC_FALLBACK,
2958 data->u.tcp_stats.mptcp_fp_heuristic_fallback);
2959 nstat_set_keyval_scalar(&kv[i++],
2960 NSTAT_SYSINFO_MPTCP_HANDOVER_SUCCESS_WIFI,
2961 data->u.tcp_stats.mptcp_handover_success_wifi);
2962 nstat_set_keyval_scalar(&kv[i++],
2963 NSTAT_SYSINFO_MPTCP_HANDOVER_SUCCESS_CELL,
2964 data->u.tcp_stats.mptcp_handover_success_cell);
2965 nstat_set_keyval_scalar(&kv[i++],
2966 NSTAT_SYSINFO_MPTCP_INTERACTIVE_SUCCESS,
2967 data->u.tcp_stats.mptcp_interactive_success);
2968 nstat_set_keyval_scalar(&kv[i++],
2969 NSTAT_SYSINFO_MPTCP_AGGREGATE_SUCCESS,
2970 data->u.tcp_stats.mptcp_aggregate_success);
2971 nstat_set_keyval_scalar(&kv[i++],
2972 NSTAT_SYSINFO_MPTCP_FP_HANDOVER_SUCCESS_WIFI,
2973 data->u.tcp_stats.mptcp_fp_handover_success_wifi);
2974 nstat_set_keyval_scalar(&kv[i++],
2975 NSTAT_SYSINFO_MPTCP_FP_HANDOVER_SUCCESS_CELL,
2976 data->u.tcp_stats.mptcp_fp_handover_success_cell);
2977 nstat_set_keyval_scalar(&kv[i++],
2978 NSTAT_SYSINFO_MPTCP_FP_INTERACTIVE_SUCCESS,
2979 data->u.tcp_stats.mptcp_fp_interactive_success);
2980 nstat_set_keyval_scalar(&kv[i++],
2981 NSTAT_SYSINFO_MPTCP_FP_AGGREGATE_SUCCESS,
2982 data->u.tcp_stats.mptcp_fp_aggregate_success);
2983 nstat_set_keyval_scalar(&kv[i++],
2984 NSTAT_SYSINFO_MPTCP_HANDOVER_CELL_FROM_WIFI,
2985 data->u.tcp_stats.mptcp_handover_cell_from_wifi);
2986 nstat_set_keyval_scalar(&kv[i++],
2987 NSTAT_SYSINFO_MPTCP_HANDOVER_WIFI_FROM_CELL,
2988 data->u.tcp_stats.mptcp_handover_wifi_from_cell);
2989 nstat_set_keyval_scalar(&kv[i++],
2990 NSTAT_SYSINFO_MPTCP_INTERACTIVE_CELL_FROM_WIFI,
2991 data->u.tcp_stats.mptcp_interactive_cell_from_wifi);
2992 nstat_set_keyval_scalar(&kv[i++],
2993 NSTAT_SYSINFO_MPTCP_HANDOVER_CELL_BYTES,
2994 data->u.tcp_stats.mptcp_handover_cell_bytes);
2995 nstat_set_keyval_scalar(&kv[i++],
2996 NSTAT_SYSINFO_MPTCP_INTERACTIVE_CELL_BYTES,
2997 data->u.tcp_stats.mptcp_interactive_cell_bytes);
2998 nstat_set_keyval_scalar(&kv[i++],
2999 NSTAT_SYSINFO_MPTCP_AGGREGATE_CELL_BYTES,
3000 data->u.tcp_stats.mptcp_aggregate_cell_bytes);
3001 nstat_set_keyval_scalar(&kv[i++],
3002 NSTAT_SYSINFO_MPTCP_HANDOVER_ALL_BYTES,
3003 data->u.tcp_stats.mptcp_handover_all_bytes);
3004 nstat_set_keyval_scalar(&kv[i++],
3005 NSTAT_SYSINFO_MPTCP_INTERACTIVE_ALL_BYTES,
3006 data->u.tcp_stats.mptcp_interactive_all_bytes);
3007 nstat_set_keyval_scalar(&kv[i++],
3008 NSTAT_SYSINFO_MPTCP_AGGREGATE_ALL_BYTES,
3009 data->u.tcp_stats.mptcp_aggregate_all_bytes);
3010 nstat_set_keyval_scalar(&kv[i++],
3011 NSTAT_SYSINFO_MPTCP_BACK_TO_WIFI,
3012 data->u.tcp_stats.mptcp_back_to_wifi);
3013 nstat_set_keyval_scalar(&kv[i++],
3014 NSTAT_SYSINFO_MPTCP_WIFI_PROXY,
3015 data->u.tcp_stats.mptcp_wifi_proxy);
3016 nstat_set_keyval_scalar(&kv[i++],
3017 NSTAT_SYSINFO_MPTCP_CELL_PROXY,
3018 data->u.tcp_stats.mptcp_cell_proxy);
3019 nstat_set_keyval_scalar(&kv[i++],
3020 NSTAT_SYSINFO_MPTCP_TRIGGERED_CELL,
3021 data->u.tcp_stats.mptcp_triggered_cell);
3022 VERIFY(i == nkeyvals);
3023 break;
3024 }
3025 case NSTAT_SYSINFO_IFNET_ECN_STATS:
3026 {
3027 nstat_set_keyval_scalar(&kv[i++],
3028 NSTAT_SYSINFO_ECN_IFNET_TYPE,
3029 data->u.ifnet_ecn_stats.ifnet_type);
3030 nstat_set_keyval_scalar(&kv[i++],
3031 NSTAT_SYSINFO_ECN_IFNET_PROTO,
3032 data->u.ifnet_ecn_stats.ifnet_proto);
3033 nstat_set_keyval_scalar(&kv[i++],
3034 NSTAT_SYSINFO_ECN_IFNET_CLIENT_SETUP,
3035 data->u.ifnet_ecn_stats.ecn_stat.ecn_client_setup);
3036 nstat_set_keyval_scalar(&kv[i++],
3037 NSTAT_SYSINFO_ECN_IFNET_SERVER_SETUP,
3038 data->u.ifnet_ecn_stats.ecn_stat.ecn_server_setup);
3039 nstat_set_keyval_scalar(&kv[i++],
3040 NSTAT_SYSINFO_ECN_IFNET_CLIENT_SUCCESS,
3041 data->u.ifnet_ecn_stats.ecn_stat.ecn_client_success);
3042 nstat_set_keyval_scalar(&kv[i++],
3043 NSTAT_SYSINFO_ECN_IFNET_SERVER_SUCCESS,
3044 data->u.ifnet_ecn_stats.ecn_stat.ecn_server_success);
3045 nstat_set_keyval_scalar(&kv[i++],
3046 NSTAT_SYSINFO_ECN_IFNET_PEER_NOSUPPORT,
3047 data->u.ifnet_ecn_stats.ecn_stat.ecn_peer_nosupport);
3048 nstat_set_keyval_scalar(&kv[i++],
3049 NSTAT_SYSINFO_ECN_IFNET_SYN_LOST,
3050 data->u.ifnet_ecn_stats.ecn_stat.ecn_syn_lost);
3051 nstat_set_keyval_scalar(&kv[i++],
3052 NSTAT_SYSINFO_ECN_IFNET_SYNACK_LOST,
3053 data->u.ifnet_ecn_stats.ecn_stat.ecn_synack_lost);
3054 nstat_set_keyval_scalar(&kv[i++],
3055 NSTAT_SYSINFO_ECN_IFNET_RECV_CE,
3056 data->u.ifnet_ecn_stats.ecn_stat.ecn_recv_ce);
3057 nstat_set_keyval_scalar(&kv[i++],
3058 NSTAT_SYSINFO_ECN_IFNET_RECV_ECE,
3059 data->u.ifnet_ecn_stats.ecn_stat.ecn_recv_ece);
3060 nstat_set_keyval_scalar(&kv[i++],
3061 NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_CE,
3062 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_recv_ce);
3063 nstat_set_keyval_scalar(&kv[i++],
3064 NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_ECE,
3065 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_recv_ece);
3066 nstat_set_keyval_scalar(&kv[i++],
3067 NSTAT_SYSINFO_ECN_IFNET_CONN_PLNOCE,
3068 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_plnoce);
3069 nstat_set_keyval_scalar(&kv[i++],
3070 NSTAT_SYSINFO_ECN_IFNET_CONN_PLCE,
3071 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_plce);
3072 nstat_set_keyval_scalar(&kv[i++],
3073 NSTAT_SYSINFO_ECN_IFNET_CONN_NOPLCE,
3074 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_noplce);
3075 nstat_set_keyval_scalar(&kv[i++],
3076 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNLOSS,
3077 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_synloss);
3078 nstat_set_keyval_scalar(&kv[i++],
3079 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_REORDER,
3080 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_reorder);
3081 nstat_set_keyval_scalar(&kv[i++],
3082 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_CE,
3083 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_ce);
3084 nstat_set_keyval_scalar(&kv[i++],
3085 NSTAT_SYSINFO_ECN_IFNET_ON_RTT_AVG,
3086 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rtt_avg);
3087 nstat_set_keyval_scalar(&kv[i++],
3088 NSTAT_SYSINFO_ECN_IFNET_ON_RTT_VAR,
3089 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rtt_var);
3090 nstat_set_keyval_scalar(&kv[i++],
3091 NSTAT_SYSINFO_ECN_IFNET_ON_OOPERCENT,
3092 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.oo_percent);
3093 nstat_set_keyval_scalar(&kv[i++],
3094 NSTAT_SYSINFO_ECN_IFNET_ON_SACK_EPISODE,
3095 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.sack_episodes);
3096 nstat_set_keyval_scalar(&kv[i++],
3097 NSTAT_SYSINFO_ECN_IFNET_ON_REORDER_PERCENT,
3098 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.reorder_percent);
3099 nstat_set_keyval_scalar(&kv[i++],
3100 NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_PERCENT,
3101 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rxmit_percent);
3102 nstat_set_keyval_scalar(&kv[i++],
3103 NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_DROP,
3104 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rxmit_drop);
3105 nstat_set_keyval_scalar(&kv[i++],
3106 NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_AVG,
3107 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rtt_avg);
3108 nstat_set_keyval_scalar(&kv[i++],
3109 NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_VAR,
3110 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rtt_var);
3111 nstat_set_keyval_scalar(&kv[i++],
3112 NSTAT_SYSINFO_ECN_IFNET_OFF_OOPERCENT,
3113 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.oo_percent);
3114 nstat_set_keyval_scalar(&kv[i++],
3115 NSTAT_SYSINFO_ECN_IFNET_OFF_SACK_EPISODE,
3116 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.sack_episodes);
3117 nstat_set_keyval_scalar(&kv[i++],
3118 NSTAT_SYSINFO_ECN_IFNET_OFF_REORDER_PERCENT,
3119 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.reorder_percent);
3120 nstat_set_keyval_scalar(&kv[i++],
3121 NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_PERCENT,
3122 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rxmit_percent);
3123 nstat_set_keyval_scalar(&kv[i++],
3124 NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_DROP,
3125 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rxmit_drop);
3126 nstat_set_keyval_scalar(&kv[i++],
3127 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_TXPKTS,
3128 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_txpkts);
3129 nstat_set_keyval_scalar(&kv[i++],
3130 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXMTPKTS,
3131 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_rxmitpkts);
3132 nstat_set_keyval_scalar(&kv[i++],
3133 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXPKTS,
3134 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_rxpkts);
3135 nstat_set_keyval_scalar(&kv[i++],
3136 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_OOPKTS,
3137 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_oopkts);
3138 nstat_set_keyval_scalar(&kv[i++],
3139 NSTAT_SYSINFO_ECN_IFNET_ON_DROP_RST,
3140 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rst_drop);
3141 nstat_set_keyval_scalar(&kv[i++],
3142 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_TXPKTS,
3143 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_txpkts);
3144 nstat_set_keyval_scalar(&kv[i++],
3145 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXMTPKTS,
3146 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_rxmitpkts);
3147 nstat_set_keyval_scalar(&kv[i++],
3148 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXPKTS,
3149 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_rxpkts);
3150 nstat_set_keyval_scalar(&kv[i++],
3151 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_OOPKTS,
3152 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_oopkts);
3153 nstat_set_keyval_scalar(&kv[i++],
3154 NSTAT_SYSINFO_ECN_IFNET_OFF_DROP_RST,
3155 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rst_drop);
3156 nstat_set_keyval_scalar(&kv[i++],
3157 NSTAT_SYSINFO_ECN_IFNET_TOTAL_CONN,
3158 data->u.ifnet_ecn_stats.ecn_stat.ecn_total_conn);
3159 nstat_set_keyval_scalar(&kv[i++],
3160 NSTAT_SYSINFO_IFNET_UNSENT_DATA,
3161 data->unsent_data_cnt);
3162 nstat_set_keyval_scalar(&kv[i++],
3163 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRST,
3164 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_droprst);
3165 nstat_set_keyval_scalar(&kv[i++],
3166 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRXMT,
3167 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_droprxmt);
3168 nstat_set_keyval_scalar(&kv[i++],
3169 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNRST,
3170 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_synrst);
3171 break;
3172 }
3173 case NSTAT_SYSINFO_LIM_STATS:
3174 {
3175 nstat_set_keyval_string(&kv[i++],
3176 NSTAT_SYSINFO_LIM_IFNET_SIGNATURE,
3177 data->u.lim_stats.ifnet_signature,
3178 data->u.lim_stats.ifnet_siglen);
3179 nstat_set_keyval_scalar(&kv[i++],
3180 NSTAT_SYSINFO_LIM_IFNET_DL_MAX_BANDWIDTH,
3181 data->u.lim_stats.lim_stat.lim_dl_max_bandwidth);
3182 nstat_set_keyval_scalar(&kv[i++],
3183 NSTAT_SYSINFO_LIM_IFNET_UL_MAX_BANDWIDTH,
3184 data->u.lim_stats.lim_stat.lim_ul_max_bandwidth);
3185 nstat_set_keyval_scalar(&kv[i++],
3186 NSTAT_SYSINFO_LIM_IFNET_PACKET_LOSS_PERCENT,
3187 data->u.lim_stats.lim_stat.lim_packet_loss_percent);
3188 nstat_set_keyval_scalar(&kv[i++],
3189 NSTAT_SYSINFO_LIM_IFNET_PACKET_OOO_PERCENT,
3190 data->u.lim_stats.lim_stat.lim_packet_ooo_percent);
3191 nstat_set_keyval_scalar(&kv[i++],
3192 NSTAT_SYSINFO_LIM_IFNET_RTT_VARIANCE,
3193 data->u.lim_stats.lim_stat.lim_rtt_variance);
3194 nstat_set_keyval_scalar(&kv[i++],
3195 NSTAT_SYSINFO_LIM_IFNET_RTT_MIN,
3196 data->u.lim_stats.lim_stat.lim_rtt_min);
3197 nstat_set_keyval_scalar(&kv[i++],
3198 NSTAT_SYSINFO_LIM_IFNET_RTT_AVG,
3199 data->u.lim_stats.lim_stat.lim_rtt_average);
3200 nstat_set_keyval_scalar(&kv[i++],
3201 NSTAT_SYSINFO_LIM_IFNET_CONN_TIMEOUT_PERCENT,
3202 data->u.lim_stats.lim_stat.lim_conn_timeout_percent);
3203 nstat_set_keyval_scalar(&kv[i++],
3204 NSTAT_SYSINFO_LIM_IFNET_DL_DETECTED,
3205 data->u.lim_stats.lim_stat.lim_dl_detected);
3206 nstat_set_keyval_scalar(&kv[i++],
3207 NSTAT_SYSINFO_LIM_IFNET_UL_DETECTED,
3208 data->u.lim_stats.lim_stat.lim_ul_detected);
3209 nstat_set_keyval_scalar(&kv[i++],
3210 NSTAT_SYSINFO_LIM_IFNET_TYPE,
3211 data->u.lim_stats.ifnet_type);
3212 break;
3213 }
3214 case NSTAT_SYSINFO_NET_API_STATS:
3215 {
3216 nstat_set_keyval_scalar(&kv[i++],
3217 NSTAT_SYSINFO_API_IF_FLTR_ATTACH,
3218 data->u.net_api_stats.net_api_stats.nas_iflt_attach_total);
3219 nstat_set_keyval_scalar(&kv[i++],
3220 NSTAT_SYSINFO_API_IF_FLTR_ATTACH_OS,
3221 data->u.net_api_stats.net_api_stats.nas_iflt_attach_os_total);
3222 nstat_set_keyval_scalar(&kv[i++],
3223 NSTAT_SYSINFO_API_IP_FLTR_ADD,
3224 data->u.net_api_stats.net_api_stats.nas_ipf_add_total);
3225 nstat_set_keyval_scalar(&kv[i++],
3226 NSTAT_SYSINFO_API_IP_FLTR_ADD_OS,
3227 data->u.net_api_stats.net_api_stats.nas_ipf_add_os_total);
3228 nstat_set_keyval_scalar(&kv[i++],
3229 NSTAT_SYSINFO_API_SOCK_FLTR_ATTACH,
3230 data->u.net_api_stats.net_api_stats.nas_sfltr_register_total);
3231 nstat_set_keyval_scalar(&kv[i++],
3232 NSTAT_SYSINFO_API_SOCK_FLTR_ATTACH_OS,
3233 data->u.net_api_stats.net_api_stats.nas_sfltr_register_os_total);
3234
3235
3236 nstat_set_keyval_scalar(&kv[i++],
3237 NSTAT_SYSINFO_API_SOCK_ALLOC_TOTAL,
3238 data->u.net_api_stats.net_api_stats.nas_socket_alloc_total);
3239 nstat_set_keyval_scalar(&kv[i++],
3240 NSTAT_SYSINFO_API_SOCK_ALLOC_KERNEL,
3241 data->u.net_api_stats.net_api_stats.nas_socket_in_kernel_total);
3242 nstat_set_keyval_scalar(&kv[i++],
3243 NSTAT_SYSINFO_API_SOCK_ALLOC_KERNEL_OS,
3244 data->u.net_api_stats.net_api_stats.nas_socket_in_kernel_os_total);
3245 nstat_set_keyval_scalar(&kv[i++],
3246 NSTAT_SYSINFO_API_SOCK_NECP_CLIENTUUID,
3247 data->u.net_api_stats.net_api_stats.nas_socket_necp_clientuuid_total);
3248
3249 nstat_set_keyval_scalar(&kv[i++],
3250 NSTAT_SYSINFO_API_SOCK_DOMAIN_LOCAL,
3251 data->u.net_api_stats.net_api_stats.nas_socket_domain_local_total);
3252 nstat_set_keyval_scalar(&kv[i++],
3253 NSTAT_SYSINFO_API_SOCK_DOMAIN_ROUTE,
3254 data->u.net_api_stats.net_api_stats.nas_socket_domain_route_total);
3255 nstat_set_keyval_scalar(&kv[i++],
3256 NSTAT_SYSINFO_API_SOCK_DOMAIN_INET,
3257 data->u.net_api_stats.net_api_stats.nas_socket_domain_inet_total);
3258 nstat_set_keyval_scalar(&kv[i++],
3259 NSTAT_SYSINFO_API_SOCK_DOMAIN_INET6,
3260 data->u.net_api_stats.net_api_stats.nas_socket_domain_inet6_total);
3261 nstat_set_keyval_scalar(&kv[i++],
3262 NSTAT_SYSINFO_API_SOCK_DOMAIN_SYSTEM,
3263 data->u.net_api_stats.net_api_stats.nas_socket_domain_system_total);
3264 nstat_set_keyval_scalar(&kv[i++],
3265 NSTAT_SYSINFO_API_SOCK_DOMAIN_MULTIPATH,
3266 data->u.net_api_stats.net_api_stats.nas_socket_domain_multipath_total);
3267 nstat_set_keyval_scalar(&kv[i++],
3268 NSTAT_SYSINFO_API_SOCK_DOMAIN_KEY,
3269 data->u.net_api_stats.net_api_stats.nas_socket_domain_key_total);
3270 nstat_set_keyval_scalar(&kv[i++],
3271 NSTAT_SYSINFO_API_SOCK_DOMAIN_NDRV,
3272 data->u.net_api_stats.net_api_stats.nas_socket_domain_ndrv_total);
3273 nstat_set_keyval_scalar(&kv[i++],
3274 NSTAT_SYSINFO_API_SOCK_DOMAIN_OTHER,
3275 data->u.net_api_stats.net_api_stats.nas_socket_domain_other_total);
3276
3277 nstat_set_keyval_scalar(&kv[i++],
3278 NSTAT_SYSINFO_API_SOCK_INET_STREAM,
3279 data->u.net_api_stats.net_api_stats.nas_socket_inet_stream_total);
3280 nstat_set_keyval_scalar(&kv[i++],
3281 NSTAT_SYSINFO_API_SOCK_INET_DGRAM,
3282 data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_total);
3283 nstat_set_keyval_scalar(&kv[i++],
3284 NSTAT_SYSINFO_API_SOCK_INET_DGRAM_CONNECTED,
3285 data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_connected);
3286 nstat_set_keyval_scalar(&kv[i++],
3287 NSTAT_SYSINFO_API_SOCK_INET_DGRAM_DNS,
3288 data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_dns);
3289 nstat_set_keyval_scalar(&kv[i++],
3290 NSTAT_SYSINFO_API_SOCK_INET_DGRAM_NO_DATA,
3291 data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_no_data);
3292
3293 nstat_set_keyval_scalar(&kv[i++],
3294 NSTAT_SYSINFO_API_SOCK_INET6_STREAM,
3295 data->u.net_api_stats.net_api_stats.nas_socket_inet6_stream_total);
3296 nstat_set_keyval_scalar(&kv[i++],
3297 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM,
3298 data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_total);
3299 nstat_set_keyval_scalar(&kv[i++],
3300 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_CONNECTED,
3301 data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_connected);
3302 nstat_set_keyval_scalar(&kv[i++],
3303 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_DNS,
3304 data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_dns);
3305 nstat_set_keyval_scalar(&kv[i++],
3306 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_NO_DATA,
3307 data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_no_data);
3308
3309 nstat_set_keyval_scalar(&kv[i++],
3310 NSTAT_SYSINFO_API_SOCK_INET_MCAST_JOIN,
3311 data->u.net_api_stats.net_api_stats.nas_socket_mcast_join_total);
3312 nstat_set_keyval_scalar(&kv[i++],
3313 NSTAT_SYSINFO_API_SOCK_INET_MCAST_JOIN_OS,
3314 data->u.net_api_stats.net_api_stats.nas_socket_mcast_join_os_total);
3315
3316 nstat_set_keyval_scalar(&kv[i++],
3317 NSTAT_SYSINFO_API_NEXUS_FLOW_INET_STREAM,
3318 data->u.net_api_stats.net_api_stats.nas_nx_flow_inet_stream_total);
3319 nstat_set_keyval_scalar(&kv[i++],
3320 NSTAT_SYSINFO_API_NEXUS_FLOW_INET_DATAGRAM,
3321 data->u.net_api_stats.net_api_stats.nas_nx_flow_inet_dgram_total);
3322
3323 nstat_set_keyval_scalar(&kv[i++],
3324 NSTAT_SYSINFO_API_NEXUS_FLOW_INET6_STREAM,
3325 data->u.net_api_stats.net_api_stats.nas_nx_flow_inet6_stream_total);
3326 nstat_set_keyval_scalar(&kv[i++],
3327 NSTAT_SYSINFO_API_NEXUS_FLOW_INET6_DATAGRAM,
3328 data->u.net_api_stats.net_api_stats.nas_nx_flow_inet6_dgram_total);
3329
3330 nstat_set_keyval_scalar(&kv[i++],
3331 NSTAT_SYSINFO_API_IFNET_ALLOC,
3332 data->u.net_api_stats.net_api_stats.nas_ifnet_alloc_total);
3333 nstat_set_keyval_scalar(&kv[i++],
3334 NSTAT_SYSINFO_API_IFNET_ALLOC_OS,
3335 data->u.net_api_stats.net_api_stats.nas_ifnet_alloc_os_total);
3336
3337 nstat_set_keyval_scalar(&kv[i++],
3338 NSTAT_SYSINFO_API_PF_ADDRULE,
3339 data->u.net_api_stats.net_api_stats.nas_pf_addrule_total);
3340 nstat_set_keyval_scalar(&kv[i++],
3341 NSTAT_SYSINFO_API_PF_ADDRULE_OS,
3342 data->u.net_api_stats.net_api_stats.nas_pf_addrule_os);
3343
3344 nstat_set_keyval_scalar(&kv[i++],
3345 NSTAT_SYSINFO_API_VMNET_START,
3346 data->u.net_api_stats.net_api_stats.nas_vmnet_total);
3347
3348
3349 nstat_set_keyval_scalar(&kv[i++],
3350 NSTAT_SYSINFO_API_REPORT_INTERVAL,
3351 data->u.net_api_stats.report_interval);
3352
3353 break;
3354 }
3355 }
3356 if (syscnt != NULL) {
3357 VERIFY(i > 0 && i <= nkeyvals);
3358 countsize = offsetof(nstat_sysinfo_counts,
3359 nstat_sysinfo_keyvals) +
3360 sizeof(nstat_sysinfo_keyval) * i;
3361 finalsize += countsize;
3362 syscnt->hdr.type = NSTAT_MSG_TYPE_SYSINFO_COUNTS;
3363 syscnt->hdr.length = finalsize;
3364 syscnt->counts.nstat_sysinfo_len = countsize;
3365
3366 result = ctl_enqueuedata(control->ncs_kctl,
3367 control->ncs_unit, syscnt, finalsize, CTL_DATA_EOR);
3368 if (result != 0) {
3369 nstat_stats.nstat_sysinfofailures += 1;
3370 }
3371 OSFree(syscnt, allocsize, nstat_malloc_tag);
3372 }
3373 return;
3374 }
3375
3376 __private_extern__ void
3377 nstat_sysinfo_send_data(
3378 nstat_sysinfo_data *data)
3379 {
3380 nstat_control_state *control;
3381
3382 lck_mtx_lock(&nstat_mtx);
3383 for (control = nstat_controls; control; control = control->ncs_next) {
3384 lck_mtx_lock(&control->ncs_mtx);
3385 if ((control->ncs_flags & NSTAT_FLAG_SYSINFO_SUBSCRIBED) != 0) {
3386 nstat_sysinfo_send_data_internal(control, data);
3387 }
3388 lck_mtx_unlock(&control->ncs_mtx);
3389 }
3390 lck_mtx_unlock(&nstat_mtx);
3391 }
3392
3393 static void
3394 nstat_sysinfo_generate_report(void)
3395 {
3396 mbuf_report_peak_usage();
3397 tcp_report_stats();
3398 nstat_ifnet_report_ecn_stats();
3399 nstat_ifnet_report_lim_stats();
3400 nstat_net_api_report_stats();
3401 }
3402
3403 #pragma mark -- net_api --
3404
3405 static struct net_api_stats net_api_stats_before;
3406 static u_int64_t net_api_stats_last_report_time;
3407
3408 static void
3409 nstat_net_api_report_stats(void)
3410 {
3411 struct nstat_sysinfo_data data;
3412 struct nstat_sysinfo_net_api_stats *st = &data.u.net_api_stats;
3413 u_int64_t uptime;
3414
3415 uptime = net_uptime();
3416
3417 if ((u_int32_t)(uptime - net_api_stats_last_report_time) <
3418 net_api_stats_report_interval) {
3419 return;
3420 }
3421
3422 st->report_interval = uptime - net_api_stats_last_report_time;
3423 net_api_stats_last_report_time = uptime;
3424
3425 data.flags = NSTAT_SYSINFO_NET_API_STATS;
3426 data.unsent_data_cnt = 0;
3427
3428 /*
3429 * Some of the fields in the report are the current value and
3430 * other fields are the delta from the last report:
3431 * - Report difference for the per flow counters as they increase
3432 * with time
3433 * - Report current value for other counters as they tend not to change
3434 * much with time
3435 */
3436 #define STATCOPY(f) \
3437 (st->net_api_stats.f = net_api_stats.f)
3438 #define STATDIFF(f) \
3439 (st->net_api_stats.f = net_api_stats.f - net_api_stats_before.f)
3440
3441 STATCOPY(nas_iflt_attach_count);
3442 STATCOPY(nas_iflt_attach_total);
3443 STATCOPY(nas_iflt_attach_os_total);
3444
3445 STATCOPY(nas_ipf_add_count);
3446 STATCOPY(nas_ipf_add_total);
3447 STATCOPY(nas_ipf_add_os_total);
3448
3449 STATCOPY(nas_sfltr_register_count);
3450 STATCOPY(nas_sfltr_register_total);
3451 STATCOPY(nas_sfltr_register_os_total);
3452
3453 STATDIFF(nas_socket_alloc_total);
3454 STATDIFF(nas_socket_in_kernel_total);
3455 STATDIFF(nas_socket_in_kernel_os_total);
3456 STATDIFF(nas_socket_necp_clientuuid_total);
3457
3458 STATDIFF(nas_socket_domain_local_total);
3459 STATDIFF(nas_socket_domain_route_total);
3460 STATDIFF(nas_socket_domain_inet_total);
3461 STATDIFF(nas_socket_domain_inet6_total);
3462 STATDIFF(nas_socket_domain_system_total);
3463 STATDIFF(nas_socket_domain_multipath_total);
3464 STATDIFF(nas_socket_domain_key_total);
3465 STATDIFF(nas_socket_domain_ndrv_total);
3466 STATDIFF(nas_socket_domain_other_total);
3467
3468 STATDIFF(nas_socket_inet_stream_total);
3469 STATDIFF(nas_socket_inet_dgram_total);
3470 STATDIFF(nas_socket_inet_dgram_connected);
3471 STATDIFF(nas_socket_inet_dgram_dns);
3472 STATDIFF(nas_socket_inet_dgram_no_data);
3473
3474 STATDIFF(nas_socket_inet6_stream_total);
3475 STATDIFF(nas_socket_inet6_dgram_total);
3476 STATDIFF(nas_socket_inet6_dgram_connected);
3477 STATDIFF(nas_socket_inet6_dgram_dns);
3478 STATDIFF(nas_socket_inet6_dgram_no_data);
3479
3480 STATDIFF(nas_socket_mcast_join_total);
3481 STATDIFF(nas_socket_mcast_join_os_total);
3482
3483 STATDIFF(nas_sock_inet6_stream_exthdr_in);
3484 STATDIFF(nas_sock_inet6_stream_exthdr_out);
3485 STATDIFF(nas_sock_inet6_dgram_exthdr_in);
3486 STATDIFF(nas_sock_inet6_dgram_exthdr_out);
3487
3488 STATDIFF(nas_nx_flow_inet_stream_total);
3489 STATDIFF(nas_nx_flow_inet_dgram_total);
3490
3491 STATDIFF(nas_nx_flow_inet6_stream_total);
3492 STATDIFF(nas_nx_flow_inet6_dgram_total);
3493
3494 STATCOPY(nas_ifnet_alloc_count);
3495 STATCOPY(nas_ifnet_alloc_total);
3496 STATCOPY(nas_ifnet_alloc_os_count);
3497 STATCOPY(nas_ifnet_alloc_os_total);
3498
3499 STATCOPY(nas_pf_addrule_total);
3500 STATCOPY(nas_pf_addrule_os);
3501
3502 STATCOPY(nas_vmnet_total);
3503
3504 #undef STATCOPY
3505 #undef STATDIFF
3506
3507 nstat_sysinfo_send_data(&data);
3508
3509 /*
3510 * Save a copy of the current fields so we can diff them the next time
3511 */
3512 memcpy(&net_api_stats_before, &net_api_stats,
3513 sizeof(struct net_api_stats));
3514 _CASSERT(sizeof(net_api_stats_before) == sizeof(net_api_stats));
3515 }
3516
3517
3518 #pragma mark -- Kernel Control Socket --
3519
3520 static kern_ctl_ref nstat_ctlref = NULL;
3521 static lck_grp_t *nstat_lck_grp = NULL;
3522
3523 static errno_t nstat_control_connect(kern_ctl_ref kctl, struct sockaddr_ctl *sac, void **uinfo);
3524 static errno_t nstat_control_disconnect(kern_ctl_ref kctl, u_int32_t unit, void *uinfo);
3525 static errno_t nstat_control_send(kern_ctl_ref kctl, u_int32_t unit, void *uinfo, mbuf_t m, int flags);
3526
3527 static errno_t
3528 nstat_enqueue_success(
3529 uint64_t context,
3530 nstat_control_state *state,
3531 u_int16_t flags)
3532 {
3533 nstat_msg_hdr success;
3534 errno_t result;
3535
3536 bzero(&success, sizeof(success));
3537 success.context = context;
3538 success.type = NSTAT_MSG_TYPE_SUCCESS;
3539 success.length = sizeof(success);
3540 success.flags = flags;
3541 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &success,
3542 sizeof(success), CTL_DATA_EOR | CTL_DATA_CRIT);
3543 if (result != 0) {
3544 if (nstat_debug != 0) {
3545 printf("%s: could not enqueue success message %d\n",
3546 __func__, result);
3547 }
3548 nstat_stats.nstat_successmsgfailures += 1;
3549 }
3550 return result;
3551 }
3552
3553 static errno_t
3554 nstat_control_send_goodbye(
3555 nstat_control_state *state,
3556 nstat_src *src)
3557 {
3558 errno_t result = 0;
3559 int failed = 0;
3560
3561 if (nstat_control_reporting_allowed(state, src)) {
3562 if ((state->ncs_flags & NSTAT_FLAG_SUPPORTS_UPDATES) != 0) {
3563 result = nstat_control_send_update(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING, NULL);
3564 if (result != 0) {
3565 failed = 1;
3566 if (nstat_debug != 0) {
3567 printf("%s - nstat_control_send_update() %d\n", __func__, result);
3568 }
3569 }
3570 } else {
3571 // send one last counts notification
3572 result = nstat_control_send_counts(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING, NULL);
3573 if (result != 0) {
3574 failed = 1;
3575 if (nstat_debug != 0) {
3576 printf("%s - nstat_control_send_counts() %d\n", __func__, result);
3577 }
3578 }
3579
3580 // send a last description
3581 result = nstat_control_send_description(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING);
3582 if (result != 0) {
3583 failed = 1;
3584 if (nstat_debug != 0) {
3585 printf("%s - nstat_control_send_description() %d\n", __func__, result);
3586 }
3587 }
3588 }
3589 }
3590
3591 // send the source removed notification
3592 result = nstat_control_send_removed(state, src);
3593 if (result != 0 && nstat_debug) {
3594 failed = 1;
3595 if (nstat_debug != 0) {
3596 printf("%s - nstat_control_send_removed() %d\n", __func__, result);
3597 }
3598 }
3599
3600 if (failed != 0) {
3601 nstat_stats.nstat_control_send_goodbye_failures++;
3602 }
3603
3604
3605 return result;
3606 }
3607
3608 static errno_t
3609 nstat_flush_accumulated_msgs(
3610 nstat_control_state *state)
3611 {
3612 errno_t result = 0;
3613 if (state->ncs_accumulated != NULL && mbuf_len(state->ncs_accumulated) > 0) {
3614 mbuf_pkthdr_setlen(state->ncs_accumulated, mbuf_len(state->ncs_accumulated));
3615 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, state->ncs_accumulated, CTL_DATA_EOR);
3616 if (result != 0) {
3617 nstat_stats.nstat_flush_accumulated_msgs_failures++;
3618 if (nstat_debug != 0) {
3619 printf("%s - ctl_enqueuembuf failed: %d\n", __func__, result);
3620 }
3621 mbuf_freem(state->ncs_accumulated);
3622 }
3623 state->ncs_accumulated = NULL;
3624 }
3625 return result;
3626 }
3627
3628 static errno_t
3629 nstat_accumulate_msg(
3630 nstat_control_state *state,
3631 nstat_msg_hdr *hdr,
3632 size_t length)
3633 {
3634 if (state->ncs_accumulated && mbuf_trailingspace(state->ncs_accumulated) < length) {
3635 // Will send the current mbuf
3636 nstat_flush_accumulated_msgs(state);
3637 }
3638
3639 errno_t result = 0;
3640
3641 if (state->ncs_accumulated == NULL) {
3642 unsigned int one = 1;
3643 if (mbuf_allocpacket(MBUF_DONTWAIT, NSTAT_MAX_MSG_SIZE, &one, &state->ncs_accumulated) != 0) {
3644 if (nstat_debug != 0) {
3645 printf("%s - mbuf_allocpacket failed\n", __func__);
3646 }
3647 result = ENOMEM;
3648 } else {
3649 mbuf_setlen(state->ncs_accumulated, 0);
3650 }
3651 }
3652
3653 if (result == 0) {
3654 hdr->length = length;
3655 result = mbuf_copyback(state->ncs_accumulated, mbuf_len(state->ncs_accumulated),
3656 length, hdr, MBUF_DONTWAIT);
3657 }
3658
3659 if (result != 0) {
3660 nstat_flush_accumulated_msgs(state);
3661 if (nstat_debug != 0) {
3662 printf("%s - resorting to ctl_enqueuedata\n", __func__);
3663 }
3664 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, hdr, length, CTL_DATA_EOR);
3665 }
3666
3667 if (result != 0) {
3668 nstat_stats.nstat_accumulate_msg_failures++;
3669 }
3670
3671 return result;
3672 }
3673
3674 static void*
3675 nstat_idle_check(
3676 __unused thread_call_param_t p0,
3677 __unused thread_call_param_t p1)
3678 {
3679 nstat_control_state *control;
3680 nstat_src *src, *tmpsrc;
3681 tailq_head_nstat_src dead_list;
3682 TAILQ_INIT(&dead_list);
3683
3684 lck_mtx_lock(&nstat_mtx);
3685
3686 nstat_idle_time = 0;
3687
3688 for (control = nstat_controls; control; control = control->ncs_next) {
3689 lck_mtx_lock(&control->ncs_mtx);
3690 if (!(control->ncs_flags & NSTAT_FLAG_REQCOUNTS)) {
3691 TAILQ_FOREACH_SAFE(src, &control->ncs_src_queue, ns_control_link, tmpsrc)
3692 {
3693 if (src->provider->nstat_gone(src->cookie)) {
3694 errno_t result;
3695
3696 // Pull it off the list
3697 TAILQ_REMOVE(&control->ncs_src_queue, src, ns_control_link);
3698
3699 result = nstat_control_send_goodbye(control, src);
3700
3701 // Put this on the list to release later
3702 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
3703 }
3704 }
3705 }
3706 control->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
3707 lck_mtx_unlock(&control->ncs_mtx);
3708 }
3709
3710 if (nstat_controls) {
3711 clock_interval_to_deadline(60, NSEC_PER_SEC, &nstat_idle_time);
3712 thread_call_func_delayed((thread_call_func_t)nstat_idle_check, NULL, nstat_idle_time);
3713 }
3714
3715 lck_mtx_unlock(&nstat_mtx);
3716
3717 /* Generate any system level reports, if needed */
3718 nstat_sysinfo_generate_report();
3719
3720 // Release the sources now that we aren't holding lots of locks
3721 while ((src = TAILQ_FIRST(&dead_list))) {
3722 TAILQ_REMOVE(&dead_list, src, ns_control_link);
3723 nstat_control_cleanup_source(NULL, src, FALSE);
3724 }
3725
3726
3727 return NULL;
3728 }
3729
3730 static void
3731 nstat_control_register(void)
3732 {
3733 // Create our lock group first
3734 lck_grp_attr_t *grp_attr = lck_grp_attr_alloc_init();
3735 lck_grp_attr_setdefault(grp_attr);
3736 nstat_lck_grp = lck_grp_alloc_init("network statistics kctl", grp_attr);
3737 lck_grp_attr_free(grp_attr);
3738
3739 lck_mtx_init(&nstat_mtx, nstat_lck_grp, NULL);
3740
3741 // Register the control
3742 struct kern_ctl_reg nstat_control;
3743 bzero(&nstat_control, sizeof(nstat_control));
3744 strlcpy(nstat_control.ctl_name, NET_STAT_CONTROL_NAME, sizeof(nstat_control.ctl_name));
3745 nstat_control.ctl_flags = CTL_FLAG_REG_EXTENDED | CTL_FLAG_REG_CRIT;
3746 nstat_control.ctl_sendsize = nstat_sendspace;
3747 nstat_control.ctl_recvsize = nstat_recvspace;
3748 nstat_control.ctl_connect = nstat_control_connect;
3749 nstat_control.ctl_disconnect = nstat_control_disconnect;
3750 nstat_control.ctl_send = nstat_control_send;
3751
3752 ctl_register(&nstat_control, &nstat_ctlref);
3753 }
3754
3755 static void
3756 nstat_control_cleanup_source(
3757 nstat_control_state *state,
3758 struct nstat_src *src,
3759 boolean_t locked)
3760 {
3761 errno_t result;
3762
3763 if (state) {
3764 result = nstat_control_send_removed(state, src);
3765 if (result != 0) {
3766 nstat_stats.nstat_control_cleanup_source_failures++;
3767 if (nstat_debug != 0) {
3768 printf("%s - nstat_control_send_removed() %d\n",
3769 __func__, result);
3770 }
3771 }
3772 }
3773 // Cleanup the source if we found it.
3774 src->provider->nstat_release(src->cookie, locked);
3775 OSFree(src, sizeof(*src), nstat_malloc_tag);
3776 }
3777
3778
3779 static bool
3780 nstat_control_reporting_allowed(
3781 nstat_control_state *state,
3782 nstat_src *src)
3783 {
3784 if (src->provider->nstat_reporting_allowed == NULL) {
3785 return TRUE;
3786 }
3787
3788 return
3789 src->provider->nstat_reporting_allowed(src->cookie,
3790 &state->ncs_provider_filters[src->provider->nstat_provider_id])
3791 ;
3792 }
3793
3794
3795 static errno_t
3796 nstat_control_connect(
3797 kern_ctl_ref kctl,
3798 struct sockaddr_ctl *sac,
3799 void **uinfo)
3800 {
3801 nstat_control_state *state = OSMalloc(sizeof(*state), nstat_malloc_tag);
3802 if (state == NULL) {
3803 return ENOMEM;
3804 }
3805
3806 bzero(state, sizeof(*state));
3807 lck_mtx_init(&state->ncs_mtx, nstat_lck_grp, NULL);
3808 state->ncs_kctl = kctl;
3809 state->ncs_unit = sac->sc_unit;
3810 state->ncs_flags = NSTAT_FLAG_REQCOUNTS;
3811 *uinfo = state;
3812
3813 lck_mtx_lock(&nstat_mtx);
3814 state->ncs_next = nstat_controls;
3815 nstat_controls = state;
3816
3817 if (nstat_idle_time == 0) {
3818 clock_interval_to_deadline(60, NSEC_PER_SEC, &nstat_idle_time);
3819 thread_call_func_delayed((thread_call_func_t)nstat_idle_check, NULL, nstat_idle_time);
3820 }
3821
3822 lck_mtx_unlock(&nstat_mtx);
3823
3824 return 0;
3825 }
3826
3827 static errno_t
3828 nstat_control_disconnect(
3829 __unused kern_ctl_ref kctl,
3830 __unused u_int32_t unit,
3831 void *uinfo)
3832 {
3833 u_int32_t watching;
3834 nstat_control_state *state = (nstat_control_state*)uinfo;
3835 tailq_head_nstat_src cleanup_list;
3836 nstat_src *src;
3837
3838 TAILQ_INIT(&cleanup_list);
3839
3840 // pull it out of the global list of states
3841 lck_mtx_lock(&nstat_mtx);
3842 nstat_control_state **statepp;
3843 for (statepp = &nstat_controls; *statepp; statepp = &(*statepp)->ncs_next) {
3844 if (*statepp == state) {
3845 *statepp = state->ncs_next;
3846 break;
3847 }
3848 }
3849 lck_mtx_unlock(&nstat_mtx);
3850
3851 lck_mtx_lock(&state->ncs_mtx);
3852 // Stop watching for sources
3853 nstat_provider *provider;
3854 watching = state->ncs_watching;
3855 state->ncs_watching = 0;
3856 for (provider = nstat_providers; provider && watching; provider = provider->next) {
3857 if ((watching & (1 << provider->nstat_provider_id)) != 0) {
3858 watching &= ~(1 << provider->nstat_provider_id);
3859 provider->nstat_watcher_remove(state);
3860 }
3861 }
3862
3863 // set cleanup flags
3864 state->ncs_flags |= NSTAT_FLAG_CLEANUP;
3865
3866 if (state->ncs_accumulated) {
3867 mbuf_freem(state->ncs_accumulated);
3868 state->ncs_accumulated = NULL;
3869 }
3870
3871 // Copy out the list of sources
3872 TAILQ_CONCAT(&cleanup_list, &state->ncs_src_queue, ns_control_link);
3873 lck_mtx_unlock(&state->ncs_mtx);
3874
3875 while ((src = TAILQ_FIRST(&cleanup_list))) {
3876 TAILQ_REMOVE(&cleanup_list, src, ns_control_link);
3877 nstat_control_cleanup_source(NULL, src, FALSE);
3878 }
3879
3880 lck_mtx_destroy(&state->ncs_mtx, nstat_lck_grp);
3881 OSFree(state, sizeof(*state), nstat_malloc_tag);
3882
3883 return 0;
3884 }
3885
3886 static nstat_src_ref_t
3887 nstat_control_next_src_ref(
3888 nstat_control_state *state)
3889 {
3890 return ++state->ncs_next_srcref;
3891 }
3892
3893 static errno_t
3894 nstat_control_send_counts(
3895 nstat_control_state *state,
3896 nstat_src *src,
3897 unsigned long long context,
3898 u_int16_t hdr_flags,
3899 int *gone)
3900 {
3901 nstat_msg_src_counts counts;
3902 errno_t result = 0;
3903
3904 /* Some providers may not have any counts to send */
3905 if (src->provider->nstat_counts == NULL) {
3906 return 0;
3907 }
3908
3909 bzero(&counts, sizeof(counts));
3910 counts.hdr.type = NSTAT_MSG_TYPE_SRC_COUNTS;
3911 counts.hdr.length = sizeof(counts);
3912 counts.hdr.flags = hdr_flags;
3913 counts.hdr.context = context;
3914 counts.srcref = src->srcref;
3915 counts.event_flags = 0;
3916
3917 if (src->provider->nstat_counts(src->cookie, &counts.counts, gone) == 0) {
3918 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) &&
3919 counts.counts.nstat_rxbytes == 0 &&
3920 counts.counts.nstat_txbytes == 0) {
3921 result = EAGAIN;
3922 } else {
3923 result = ctl_enqueuedata(state->ncs_kctl,
3924 state->ncs_unit, &counts, sizeof(counts),
3925 CTL_DATA_EOR);
3926 if (result != 0) {
3927 nstat_stats.nstat_sendcountfailures += 1;
3928 }
3929 }
3930 }
3931 return result;
3932 }
3933
3934 static errno_t
3935 nstat_control_append_counts(
3936 nstat_control_state *state,
3937 nstat_src *src,
3938 int *gone)
3939 {
3940 /* Some providers may not have any counts to send */
3941 if (!src->provider->nstat_counts) {
3942 return 0;
3943 }
3944
3945 nstat_msg_src_counts counts;
3946 bzero(&counts, sizeof(counts));
3947 counts.hdr.type = NSTAT_MSG_TYPE_SRC_COUNTS;
3948 counts.hdr.length = sizeof(counts);
3949 counts.srcref = src->srcref;
3950 counts.event_flags = 0;
3951
3952 errno_t result = 0;
3953 result = src->provider->nstat_counts(src->cookie, &counts.counts, gone);
3954 if (result != 0) {
3955 return result;
3956 }
3957
3958 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
3959 counts.counts.nstat_rxbytes == 0 && counts.counts.nstat_txbytes == 0) {
3960 return EAGAIN;
3961 }
3962
3963 return nstat_accumulate_msg(state, &counts.hdr, counts.hdr.length);
3964 }
3965
3966 static int
3967 nstat_control_send_description(
3968 nstat_control_state *state,
3969 nstat_src *src,
3970 u_int64_t context,
3971 u_int16_t hdr_flags)
3972 {
3973 // Provider doesn't support getting the descriptor? Done.
3974 if (src->provider->nstat_descriptor_length == 0 ||
3975 src->provider->nstat_copy_descriptor == NULL) {
3976 return EOPNOTSUPP;
3977 }
3978
3979 // Allocate storage for the descriptor message
3980 mbuf_t msg;
3981 unsigned int one = 1;
3982 u_int32_t size = offsetof(nstat_msg_src_description, data) + src->provider->nstat_descriptor_length;
3983 if (mbuf_allocpacket(MBUF_DONTWAIT, size, &one, &msg) != 0) {
3984 return ENOMEM;
3985 }
3986
3987 nstat_msg_src_description *desc = (nstat_msg_src_description*)mbuf_data(msg);
3988 bzero(desc, size);
3989 mbuf_setlen(msg, size);
3990 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
3991
3992 // Query the provider for the provider specific bits
3993 errno_t result = src->provider->nstat_copy_descriptor(src->cookie, desc->data, src->provider->nstat_descriptor_length);
3994
3995 if (result != 0) {
3996 mbuf_freem(msg);
3997 return result;
3998 }
3999
4000 desc->hdr.context = context;
4001 desc->hdr.type = NSTAT_MSG_TYPE_SRC_DESC;
4002 desc->hdr.length = size;
4003 desc->hdr.flags = hdr_flags;
4004 desc->srcref = src->srcref;
4005 desc->event_flags = 0;
4006 desc->provider = src->provider->nstat_provider_id;
4007
4008 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg, CTL_DATA_EOR);
4009 if (result != 0) {
4010 nstat_stats.nstat_descriptionfailures += 1;
4011 mbuf_freem(msg);
4012 }
4013
4014 return result;
4015 }
4016
4017 static errno_t
4018 nstat_control_append_description(
4019 nstat_control_state *state,
4020 nstat_src *src)
4021 {
4022 size_t size = offsetof(nstat_msg_src_description, data) + src->provider->nstat_descriptor_length;
4023 if (size > 512 || src->provider->nstat_descriptor_length == 0 ||
4024 src->provider->nstat_copy_descriptor == NULL) {
4025 return EOPNOTSUPP;
4026 }
4027
4028 // Fill out a buffer on the stack, we will copy to the mbuf later
4029 u_int64_t buffer[size / sizeof(u_int64_t) + 1]; // u_int64_t to ensure alignment
4030 bzero(buffer, size);
4031
4032 nstat_msg_src_description *desc = (nstat_msg_src_description*)buffer;
4033 desc->hdr.type = NSTAT_MSG_TYPE_SRC_DESC;
4034 desc->hdr.length = size;
4035 desc->srcref = src->srcref;
4036 desc->event_flags = 0;
4037 desc->provider = src->provider->nstat_provider_id;
4038
4039 errno_t result = 0;
4040 // Fill in the description
4041 // Query the provider for the provider specific bits
4042 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
4043 src->provider->nstat_descriptor_length);
4044 if (result != 0) {
4045 return result;
4046 }
4047
4048 return nstat_accumulate_msg(state, &desc->hdr, size);
4049 }
4050
4051 static int
4052 nstat_control_send_update(
4053 nstat_control_state *state,
4054 nstat_src *src,
4055 u_int64_t context,
4056 u_int16_t hdr_flags,
4057 int *gone)
4058 {
4059 // Provider doesn't support getting the descriptor or counts? Done.
4060 if ((src->provider->nstat_descriptor_length == 0 ||
4061 src->provider->nstat_copy_descriptor == NULL) &&
4062 src->provider->nstat_counts == NULL) {
4063 return EOPNOTSUPP;
4064 }
4065
4066 // Allocate storage for the descriptor message
4067 mbuf_t msg;
4068 unsigned int one = 1;
4069 u_int32_t size = offsetof(nstat_msg_src_update, data) +
4070 src->provider->nstat_descriptor_length;
4071 if (mbuf_allocpacket(MBUF_DONTWAIT, size, &one, &msg) != 0) {
4072 return ENOMEM;
4073 }
4074
4075 nstat_msg_src_update *desc = (nstat_msg_src_update*)mbuf_data(msg);
4076 bzero(desc, size);
4077 desc->hdr.context = context;
4078 desc->hdr.type = NSTAT_MSG_TYPE_SRC_UPDATE;
4079 desc->hdr.length = size;
4080 desc->hdr.flags = hdr_flags;
4081 desc->srcref = src->srcref;
4082 desc->event_flags = 0;
4083 desc->provider = src->provider->nstat_provider_id;
4084
4085 mbuf_setlen(msg, size);
4086 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
4087
4088 errno_t result = 0;
4089 if (src->provider->nstat_descriptor_length != 0 && src->provider->nstat_copy_descriptor) {
4090 // Query the provider for the provider specific bits
4091 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
4092 src->provider->nstat_descriptor_length);
4093 if (result != 0) {
4094 mbuf_freem(msg);
4095 return result;
4096 }
4097 }
4098
4099 if (src->provider->nstat_counts) {
4100 result = src->provider->nstat_counts(src->cookie, &desc->counts, gone);
4101 if (result == 0) {
4102 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
4103 desc->counts.nstat_rxbytes == 0 && desc->counts.nstat_txbytes == 0) {
4104 result = EAGAIN;
4105 } else {
4106 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg, CTL_DATA_EOR);
4107 }
4108 }
4109 }
4110
4111 if (result != 0) {
4112 nstat_stats.nstat_srcupatefailures += 1;
4113 mbuf_freem(msg);
4114 }
4115
4116 return result;
4117 }
4118
4119 static errno_t
4120 nstat_control_append_update(
4121 nstat_control_state *state,
4122 nstat_src *src,
4123 int *gone)
4124 {
4125 size_t size = offsetof(nstat_msg_src_update, data) + src->provider->nstat_descriptor_length;
4126 if (size > 512 || ((src->provider->nstat_descriptor_length == 0 ||
4127 src->provider->nstat_copy_descriptor == NULL) &&
4128 src->provider->nstat_counts == NULL)) {
4129 return EOPNOTSUPP;
4130 }
4131
4132 // Fill out a buffer on the stack, we will copy to the mbuf later
4133 u_int64_t buffer[size / sizeof(u_int64_t) + 1]; // u_int64_t to ensure alignment
4134 bzero(buffer, size);
4135
4136 nstat_msg_src_update *desc = (nstat_msg_src_update*)buffer;
4137 desc->hdr.type = NSTAT_MSG_TYPE_SRC_UPDATE;
4138 desc->hdr.length = size;
4139 desc->srcref = src->srcref;
4140 desc->event_flags = 0;
4141 desc->provider = src->provider->nstat_provider_id;
4142
4143 errno_t result = 0;
4144 // Fill in the description
4145 if (src->provider->nstat_descriptor_length != 0 && src->provider->nstat_copy_descriptor) {
4146 // Query the provider for the provider specific bits
4147 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
4148 src->provider->nstat_descriptor_length);
4149 if (result != 0) {
4150 nstat_stats.nstat_copy_descriptor_failures++;
4151 if (nstat_debug != 0) {
4152 printf("%s: src->provider->nstat_copy_descriptor: %d\n", __func__, result);
4153 }
4154 return result;
4155 }
4156 }
4157
4158 if (src->provider->nstat_counts) {
4159 result = src->provider->nstat_counts(src->cookie, &desc->counts, gone);
4160 if (result != 0) {
4161 nstat_stats.nstat_provider_counts_failures++;
4162 if (nstat_debug != 0) {
4163 printf("%s: src->provider->nstat_counts: %d\n", __func__, result);
4164 }
4165 return result;
4166 }
4167
4168 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
4169 desc->counts.nstat_rxbytes == 0 && desc->counts.nstat_txbytes == 0) {
4170 return EAGAIN;
4171 }
4172 }
4173
4174 return nstat_accumulate_msg(state, &desc->hdr, size);
4175 }
4176
4177 static errno_t
4178 nstat_control_send_removed(
4179 nstat_control_state *state,
4180 nstat_src *src)
4181 {
4182 nstat_msg_src_removed removed;
4183 errno_t result;
4184
4185 bzero(&removed, sizeof(removed));
4186 removed.hdr.type = NSTAT_MSG_TYPE_SRC_REMOVED;
4187 removed.hdr.length = sizeof(removed);
4188 removed.hdr.context = 0;
4189 removed.srcref = src->srcref;
4190 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &removed,
4191 sizeof(removed), CTL_DATA_EOR | CTL_DATA_CRIT);
4192 if (result != 0) {
4193 nstat_stats.nstat_msgremovedfailures += 1;
4194 }
4195
4196 return result;
4197 }
4198
4199 static errno_t
4200 nstat_control_handle_add_request(
4201 nstat_control_state *state,
4202 mbuf_t m)
4203 {
4204 errno_t result;
4205
4206 // Verify the header fits in the first mbuf
4207 if (mbuf_len(m) < offsetof(nstat_msg_add_src_req, param)) {
4208 return EINVAL;
4209 }
4210
4211 // Calculate the length of the parameter field
4212 int32_t paramlength = mbuf_pkthdr_len(m) - offsetof(nstat_msg_add_src_req, param);
4213 if (paramlength < 0 || paramlength > 2 * 1024) {
4214 return EINVAL;
4215 }
4216
4217 nstat_provider *provider = NULL;
4218 nstat_provider_cookie_t cookie = NULL;
4219 nstat_msg_add_src_req *req = mbuf_data(m);
4220 if (mbuf_pkthdr_len(m) > mbuf_len(m)) {
4221 // parameter is too large, we need to make a contiguous copy
4222 void *data = OSMalloc(paramlength, nstat_malloc_tag);
4223
4224 if (!data) {
4225 return ENOMEM;
4226 }
4227 result = mbuf_copydata(m, offsetof(nstat_msg_add_src_req, param), paramlength, data);
4228 if (result == 0) {
4229 result = nstat_lookup_entry(req->provider, data, paramlength, &provider, &cookie);
4230 }
4231 OSFree(data, paramlength, nstat_malloc_tag);
4232 } else {
4233 result = nstat_lookup_entry(req->provider, (void*)&req->param, paramlength, &provider, &cookie);
4234 }
4235
4236 if (result != 0) {
4237 return result;
4238 }
4239
4240 result = nstat_control_source_add(req->hdr.context, state, provider, cookie);
4241 if (result != 0) {
4242 provider->nstat_release(cookie, 0);
4243 }
4244
4245 return result;
4246 }
4247
4248 static errno_t
4249 nstat_set_provider_filter(
4250 nstat_control_state *state,
4251 nstat_msg_add_all_srcs *req)
4252 {
4253 nstat_provider_id_t provider_id = req->provider;
4254
4255 u_int32_t prev_ncs_watching = atomic_or_32_ov(&state->ncs_watching, (1 << provider_id));
4256
4257 if ((prev_ncs_watching & (1 << provider_id)) != 0) {
4258 return EALREADY;
4259 }
4260
4261 state->ncs_watching |= (1 << provider_id);
4262 state->ncs_provider_filters[provider_id].npf_flags = req->filter;
4263 state->ncs_provider_filters[provider_id].npf_events = req->events;
4264 state->ncs_provider_filters[provider_id].npf_pid = req->target_pid;
4265 uuid_copy(state->ncs_provider_filters[provider_id].npf_uuid, req->target_uuid);
4266 return 0;
4267 }
4268
4269 static errno_t
4270 nstat_control_handle_add_all(
4271 nstat_control_state *state,
4272 mbuf_t m)
4273 {
4274 errno_t result = 0;
4275
4276 // Verify the header fits in the first mbuf
4277 if (mbuf_len(m) < sizeof(nstat_msg_add_all_srcs)) {
4278 return EINVAL;
4279 }
4280
4281 nstat_msg_add_all_srcs *req = mbuf_data(m);
4282 if (req->provider > NSTAT_PROVIDER_LAST) {
4283 return ENOENT;
4284 }
4285
4286 nstat_provider *provider = nstat_find_provider_by_id(req->provider);
4287
4288 if (!provider) {
4289 return ENOENT;
4290 }
4291 if (provider->nstat_watcher_add == NULL) {
4292 return ENOTSUP;
4293 }
4294
4295 if (nstat_privcheck != 0) {
4296 result = priv_check_cred(kauth_cred_get(),
4297 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
4298 if (result != 0) {
4299 return result;
4300 }
4301 }
4302
4303 lck_mtx_lock(&state->ncs_mtx);
4304 if (req->filter & NSTAT_FILTER_SUPPRESS_SRC_ADDED) {
4305 // Suppression of source messages implicitly requires the use of update messages
4306 state->ncs_flags |= NSTAT_FLAG_SUPPORTS_UPDATES;
4307 }
4308 lck_mtx_unlock(&state->ncs_mtx);
4309
4310 // rdar://problem/30301300 Different providers require different synchronization
4311 // to ensure that a new entry does not get double counted due to being added prior
4312 // to all current provider entries being added. Hence pass the provider the details
4313 // in the original request for this to be applied atomically
4314
4315 result = provider->nstat_watcher_add(state, req);
4316
4317 if (result == 0) {
4318 nstat_enqueue_success(req->hdr.context, state, 0);
4319 }
4320
4321 return result;
4322 }
4323
4324 static errno_t
4325 nstat_control_source_add(
4326 u_int64_t context,
4327 nstat_control_state *state,
4328 nstat_provider *provider,
4329 nstat_provider_cookie_t cookie)
4330 {
4331 // Fill out source added message if appropriate
4332 mbuf_t msg = NULL;
4333 nstat_src_ref_t *srcrefp = NULL;
4334
4335 u_int64_t provider_filter_flagss =
4336 state->ncs_provider_filters[provider->nstat_provider_id].npf_flags;
4337 boolean_t tell_user =
4338 ((provider_filter_flagss & NSTAT_FILTER_SUPPRESS_SRC_ADDED) == 0);
4339 u_int32_t src_filter =
4340 (provider_filter_flagss & NSTAT_FILTER_PROVIDER_NOZEROBYTES)
4341 ? NSTAT_FILTER_NOZEROBYTES : 0;
4342
4343 if (provider_filter_flagss & NSTAT_FILTER_TCP_NO_EARLY_CLOSE) {
4344 src_filter |= NSTAT_FILTER_TCP_NO_EARLY_CLOSE;
4345 }
4346
4347 if (tell_user) {
4348 unsigned int one = 1;
4349
4350 if (mbuf_allocpacket(MBUF_DONTWAIT, sizeof(nstat_msg_src_added),
4351 &one, &msg) != 0) {
4352 return ENOMEM;
4353 }
4354
4355 mbuf_setlen(msg, sizeof(nstat_msg_src_added));
4356 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
4357 nstat_msg_src_added *add = mbuf_data(msg);
4358 bzero(add, sizeof(*add));
4359 add->hdr.type = NSTAT_MSG_TYPE_SRC_ADDED;
4360 add->hdr.length = mbuf_len(msg);
4361 add->hdr.context = context;
4362 add->provider = provider->nstat_provider_id;
4363 srcrefp = &add->srcref;
4364 }
4365
4366 // Allocate storage for the source
4367 nstat_src *src = OSMalloc(sizeof(*src), nstat_malloc_tag);
4368 if (src == NULL) {
4369 if (msg) {
4370 mbuf_freem(msg);
4371 }
4372 return ENOMEM;
4373 }
4374
4375 // Fill in the source, including picking an unused source ref
4376 lck_mtx_lock(&state->ncs_mtx);
4377
4378 src->srcref = nstat_control_next_src_ref(state);
4379 if (srcrefp) {
4380 *srcrefp = src->srcref;
4381 }
4382
4383 if (state->ncs_flags & NSTAT_FLAG_CLEANUP || src->srcref == NSTAT_SRC_REF_INVALID) {
4384 lck_mtx_unlock(&state->ncs_mtx);
4385 OSFree(src, sizeof(*src), nstat_malloc_tag);
4386 if (msg) {
4387 mbuf_freem(msg);
4388 }
4389 return EINVAL;
4390 }
4391 src->provider = provider;
4392 src->cookie = cookie;
4393 src->filter = src_filter;
4394 src->seq = 0;
4395
4396 if (msg) {
4397 // send the source added message if appropriate
4398 errno_t result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg,
4399 CTL_DATA_EOR);
4400 if (result != 0) {
4401 nstat_stats.nstat_srcaddedfailures += 1;
4402 lck_mtx_unlock(&state->ncs_mtx);
4403 OSFree(src, sizeof(*src), nstat_malloc_tag);
4404 mbuf_freem(msg);
4405 return result;
4406 }
4407 }
4408 // Put the source in the list
4409 TAILQ_INSERT_HEAD(&state->ncs_src_queue, src, ns_control_link);
4410 src->ns_control = state;
4411
4412 lck_mtx_unlock(&state->ncs_mtx);
4413
4414 return 0;
4415 }
4416
4417 static errno_t
4418 nstat_control_handle_remove_request(
4419 nstat_control_state *state,
4420 mbuf_t m)
4421 {
4422 nstat_src_ref_t srcref = NSTAT_SRC_REF_INVALID;
4423 nstat_src *src;
4424
4425 if (mbuf_copydata(m, offsetof(nstat_msg_rem_src_req, srcref), sizeof(srcref), &srcref) != 0) {
4426 return EINVAL;
4427 }
4428
4429 lck_mtx_lock(&state->ncs_mtx);
4430
4431 // Remove this source as we look for it
4432 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
4433 {
4434 if (src->srcref == srcref) {
4435 break;
4436 }
4437 }
4438 if (src) {
4439 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
4440 }
4441
4442 lck_mtx_unlock(&state->ncs_mtx);
4443
4444 if (src) {
4445 nstat_control_cleanup_source(state, src, FALSE);
4446 }
4447
4448 return src ? 0 : ENOENT;
4449 }
4450
4451 static errno_t
4452 nstat_control_handle_query_request(
4453 nstat_control_state *state,
4454 mbuf_t m)
4455 {
4456 // TBD: handle this from another thread so we can enqueue a lot of data
4457 // As written, if a client requests query all, this function will be
4458 // called from their send of the request message. We will attempt to write
4459 // responses and succeed until the buffer fills up. Since the clients thread
4460 // is blocked on send, it won't be reading unless the client has two threads
4461 // using this socket, one for read and one for write. Two threads probably
4462 // won't work with this code anyhow since we don't have proper locking in
4463 // place yet.
4464 tailq_head_nstat_src dead_list;
4465 errno_t result = ENOENT;
4466 nstat_msg_query_src_req req;
4467
4468 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0) {
4469 return EINVAL;
4470 }
4471
4472 const boolean_t all_srcs = (req.srcref == NSTAT_SRC_REF_ALL);
4473 TAILQ_INIT(&dead_list);
4474
4475 lck_mtx_lock(&state->ncs_mtx);
4476
4477 if (all_srcs) {
4478 state->ncs_flags |= NSTAT_FLAG_REQCOUNTS;
4479 }
4480 nstat_src *src, *tmpsrc;
4481 u_int64_t src_count = 0;
4482 boolean_t partial = FALSE;
4483
4484 /*
4485 * Error handling policy and sequence number generation is folded into
4486 * nstat_control_begin_query.
4487 */
4488 partial = nstat_control_begin_query(state, &req.hdr);
4489
4490
4491 TAILQ_FOREACH_SAFE(src, &state->ncs_src_queue, ns_control_link, tmpsrc)
4492 {
4493 int gone = 0;
4494
4495 // XXX ignore IFACE types?
4496 if (all_srcs || src->srcref == req.srcref) {
4497 if (nstat_control_reporting_allowed(state, src)
4498 && (!partial || !all_srcs || src->seq != state->ncs_seq)) {
4499 if (all_srcs &&
4500 (req.hdr.flags & NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE) != 0) {
4501 result = nstat_control_append_counts(state, src, &gone);
4502 } else {
4503 result = nstat_control_send_counts(state, src, req.hdr.context, 0, &gone);
4504 }
4505
4506 if (ENOMEM == result || ENOBUFS == result) {
4507 /*
4508 * If the counts message failed to
4509 * enqueue then we should clear our flag so
4510 * that a client doesn't miss anything on
4511 * idle cleanup. We skip the "gone"
4512 * processing in the hope that we may
4513 * catch it another time.
4514 */
4515 state->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
4516 break;
4517 }
4518 if (partial) {
4519 /*
4520 * We skip over hard errors and
4521 * filtered sources.
4522 */
4523 src->seq = state->ncs_seq;
4524 src_count++;
4525 }
4526 }
4527 }
4528
4529 if (gone) {
4530 // send one last descriptor message so client may see last state
4531 // If we can't send the notification now, it
4532 // will be sent in the idle cleanup.
4533 result = nstat_control_send_description(state, src, 0, 0);
4534 if (result != 0) {
4535 nstat_stats.nstat_control_send_description_failures++;
4536 if (nstat_debug != 0) {
4537 printf("%s - nstat_control_send_description() %d\n", __func__, result);
4538 }
4539 state->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
4540 break;
4541 }
4542
4543 // pull src out of the list
4544 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
4545 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
4546 }
4547
4548 if (all_srcs) {
4549 if (src_count >= QUERY_CONTINUATION_SRC_COUNT) {
4550 break;
4551 }
4552 } else if (req.srcref == src->srcref) {
4553 break;
4554 }
4555 }
4556
4557 nstat_flush_accumulated_msgs(state);
4558
4559 u_int16_t flags = 0;
4560 if (req.srcref == NSTAT_SRC_REF_ALL) {
4561 flags = nstat_control_end_query(state, src, partial);
4562 }
4563
4564 lck_mtx_unlock(&state->ncs_mtx);
4565
4566 /*
4567 * If an error occurred enqueueing data, then allow the error to
4568 * propagate to nstat_control_send. This way, the error is sent to
4569 * user-level.
4570 */
4571 if (all_srcs && ENOMEM != result && ENOBUFS != result) {
4572 nstat_enqueue_success(req.hdr.context, state, flags);
4573 result = 0;
4574 }
4575
4576 while ((src = TAILQ_FIRST(&dead_list))) {
4577 TAILQ_REMOVE(&dead_list, src, ns_control_link);
4578 nstat_control_cleanup_source(state, src, FALSE);
4579 }
4580
4581 return result;
4582 }
4583
4584 static errno_t
4585 nstat_control_handle_get_src_description(
4586 nstat_control_state *state,
4587 mbuf_t m)
4588 {
4589 nstat_msg_get_src_description req;
4590 errno_t result = ENOENT;
4591 nstat_src *src;
4592
4593 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0) {
4594 return EINVAL;
4595 }
4596
4597 lck_mtx_lock(&state->ncs_mtx);
4598 u_int64_t src_count = 0;
4599 boolean_t partial = FALSE;
4600 const boolean_t all_srcs = (req.srcref == NSTAT_SRC_REF_ALL);
4601
4602 /*
4603 * Error handling policy and sequence number generation is folded into
4604 * nstat_control_begin_query.
4605 */
4606 partial = nstat_control_begin_query(state, &req.hdr);
4607
4608 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
4609 {
4610 if (all_srcs || src->srcref == req.srcref) {
4611 if (nstat_control_reporting_allowed(state, src)
4612 && (!all_srcs || !partial || src->seq != state->ncs_seq)) {
4613 if ((req.hdr.flags & NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE) != 0 && all_srcs) {
4614 result = nstat_control_append_description(state, src);
4615 } else {
4616 result = nstat_control_send_description(state, src, req.hdr.context, 0);
4617 }
4618
4619 if (ENOMEM == result || ENOBUFS == result) {
4620 /*
4621 * If the description message failed to
4622 * enqueue then we give up for now.
4623 */
4624 break;
4625 }
4626 if (partial) {
4627 /*
4628 * Note, we skip over hard errors and
4629 * filtered sources.
4630 */
4631 src->seq = state->ncs_seq;
4632 src_count++;
4633 if (src_count >= QUERY_CONTINUATION_SRC_COUNT) {
4634 break;
4635 }
4636 }
4637 }
4638
4639 if (!all_srcs) {
4640 break;
4641 }
4642 }
4643 }
4644 nstat_flush_accumulated_msgs(state);
4645
4646 u_int16_t flags = 0;
4647 if (req.srcref == NSTAT_SRC_REF_ALL) {
4648 flags = nstat_control_end_query(state, src, partial);
4649 }
4650
4651 lck_mtx_unlock(&state->ncs_mtx);
4652 /*
4653 * If an error occurred enqueueing data, then allow the error to
4654 * propagate to nstat_control_send. This way, the error is sent to
4655 * user-level.
4656 */
4657 if (all_srcs && ENOMEM != result && ENOBUFS != result) {
4658 nstat_enqueue_success(req.hdr.context, state, flags);
4659 result = 0;
4660 }
4661
4662 return result;
4663 }
4664
4665 static errno_t
4666 nstat_control_handle_set_filter(
4667 nstat_control_state *state,
4668 mbuf_t m)
4669 {
4670 nstat_msg_set_filter req;
4671 nstat_src *src;
4672
4673 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0) {
4674 return EINVAL;
4675 }
4676 if (req.srcref == NSTAT_SRC_REF_ALL ||
4677 req.srcref == NSTAT_SRC_REF_INVALID) {
4678 return EINVAL;
4679 }
4680
4681 lck_mtx_lock(&state->ncs_mtx);
4682 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
4683 {
4684 if (req.srcref == src->srcref) {
4685 src->filter = req.filter;
4686 break;
4687 }
4688 }
4689 lck_mtx_unlock(&state->ncs_mtx);
4690 if (src == NULL) {
4691 return ENOENT;
4692 }
4693
4694 return 0;
4695 }
4696
4697 static void
4698 nstat_send_error(
4699 nstat_control_state *state,
4700 u_int64_t context,
4701 u_int32_t error)
4702 {
4703 errno_t result;
4704 struct nstat_msg_error err;
4705
4706 bzero(&err, sizeof(err));
4707 err.hdr.type = NSTAT_MSG_TYPE_ERROR;
4708 err.hdr.length = sizeof(err);
4709 err.hdr.context = context;
4710 err.error = error;
4711
4712 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &err,
4713 sizeof(err), CTL_DATA_EOR | CTL_DATA_CRIT);
4714 if (result != 0) {
4715 nstat_stats.nstat_msgerrorfailures++;
4716 }
4717 }
4718
4719 static boolean_t
4720 nstat_control_begin_query(
4721 nstat_control_state *state,
4722 const nstat_msg_hdr *hdrp)
4723 {
4724 boolean_t partial = FALSE;
4725
4726 if (hdrp->flags & NSTAT_MSG_HDR_FLAG_CONTINUATION) {
4727 /* A partial query all has been requested. */
4728 partial = TRUE;
4729
4730 if (state->ncs_context != hdrp->context) {
4731 if (state->ncs_context != 0) {
4732 nstat_send_error(state, state->ncs_context, EAGAIN);
4733 }
4734
4735 /* Initialize state for a partial query all. */
4736 state->ncs_context = hdrp->context;
4737 state->ncs_seq++;
4738 }
4739 }
4740
4741 return partial;
4742 }
4743
4744 static u_int16_t
4745 nstat_control_end_query(
4746 nstat_control_state *state,
4747 nstat_src *last_src,
4748 boolean_t partial)
4749 {
4750 u_int16_t flags = 0;
4751
4752 if (last_src == NULL || !partial) {
4753 /*
4754 * We iterated through the entire srcs list or exited early
4755 * from the loop when a partial update was not requested (an
4756 * error occurred), so clear context to indicate internally
4757 * that the query is finished.
4758 */
4759 state->ncs_context = 0;
4760 } else {
4761 /*
4762 * Indicate to userlevel to make another partial request as
4763 * there are still sources left to be reported.
4764 */
4765 flags |= NSTAT_MSG_HDR_FLAG_CONTINUATION;
4766 }
4767
4768 return flags;
4769 }
4770
4771 static errno_t
4772 nstat_control_handle_get_update(
4773 nstat_control_state *state,
4774 mbuf_t m)
4775 {
4776 nstat_msg_query_src_req req;
4777
4778 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0) {
4779 return EINVAL;
4780 }
4781
4782 lck_mtx_lock(&state->ncs_mtx);
4783
4784 state->ncs_flags |= NSTAT_FLAG_SUPPORTS_UPDATES;
4785
4786 errno_t result = ENOENT;
4787 nstat_src *src, *tmpsrc;
4788 tailq_head_nstat_src dead_list;
4789 u_int64_t src_count = 0;
4790 boolean_t partial = FALSE;
4791 TAILQ_INIT(&dead_list);
4792
4793 /*
4794 * Error handling policy and sequence number generation is folded into
4795 * nstat_control_begin_query.
4796 */
4797 partial = nstat_control_begin_query(state, &req.hdr);
4798
4799 TAILQ_FOREACH_SAFE(src, &state->ncs_src_queue, ns_control_link, tmpsrc)
4800 {
4801 int gone;
4802
4803 gone = 0;
4804 if (nstat_control_reporting_allowed(state, src)) {
4805 /* skip this source if it has the current state
4806 * sequence number as it's already been reported in
4807 * this query-all partial sequence. */
4808 if (req.srcref == NSTAT_SRC_REF_ALL
4809 && (FALSE == partial || src->seq != state->ncs_seq)) {
4810 result = nstat_control_append_update(state, src, &gone);
4811 if (ENOMEM == result || ENOBUFS == result) {
4812 /*
4813 * If the update message failed to
4814 * enqueue then give up.
4815 */
4816 break;
4817 }
4818 if (partial) {
4819 /*
4820 * We skip over hard errors and
4821 * filtered sources.
4822 */
4823 src->seq = state->ncs_seq;
4824 src_count++;
4825 }
4826 } else if (src->srcref == req.srcref) {
4827 result = nstat_control_send_update(state, src, req.hdr.context, 0, &gone);
4828 }
4829 }
4830
4831 if (gone) {
4832 // pull src out of the list
4833 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
4834 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
4835 }
4836
4837 if (req.srcref != NSTAT_SRC_REF_ALL && req.srcref == src->srcref) {
4838 break;
4839 }
4840 if (src_count >= QUERY_CONTINUATION_SRC_COUNT) {
4841 break;
4842 }
4843 }
4844
4845 nstat_flush_accumulated_msgs(state);
4846
4847
4848 u_int16_t flags = 0;
4849 if (req.srcref == NSTAT_SRC_REF_ALL) {
4850 flags = nstat_control_end_query(state, src, partial);
4851 }
4852
4853 lck_mtx_unlock(&state->ncs_mtx);
4854 /*
4855 * If an error occurred enqueueing data, then allow the error to
4856 * propagate to nstat_control_send. This way, the error is sent to
4857 * user-level.
4858 */
4859 if (req.srcref == NSTAT_SRC_REF_ALL && ENOMEM != result && ENOBUFS != result) {
4860 nstat_enqueue_success(req.hdr.context, state, flags);
4861 result = 0;
4862 }
4863
4864 while ((src = TAILQ_FIRST(&dead_list))) {
4865 TAILQ_REMOVE(&dead_list, src, ns_control_link);
4866 // release src and send notification
4867 nstat_control_cleanup_source(state, src, FALSE);
4868 }
4869
4870 return result;
4871 }
4872
4873 static errno_t
4874 nstat_control_handle_subscribe_sysinfo(
4875 nstat_control_state *state)
4876 {
4877 errno_t result = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
4878
4879 if (result != 0) {
4880 return result;
4881 }
4882
4883 lck_mtx_lock(&state->ncs_mtx);
4884 state->ncs_flags |= NSTAT_FLAG_SYSINFO_SUBSCRIBED;
4885 lck_mtx_unlock(&state->ncs_mtx);
4886
4887 return 0;
4888 }
4889
4890 static errno_t
4891 nstat_control_send(
4892 kern_ctl_ref kctl,
4893 u_int32_t unit,
4894 void *uinfo,
4895 mbuf_t m,
4896 __unused int flags)
4897 {
4898 nstat_control_state *state = (nstat_control_state*)uinfo;
4899 struct nstat_msg_hdr *hdr;
4900 struct nstat_msg_hdr storage;
4901 errno_t result = 0;
4902
4903 if (mbuf_pkthdr_len(m) < sizeof(*hdr)) {
4904 // Is this the right thing to do?
4905 mbuf_freem(m);
4906 return EINVAL;
4907 }
4908
4909 if (mbuf_len(m) >= sizeof(*hdr)) {
4910 hdr = mbuf_data(m);
4911 } else {
4912 mbuf_copydata(m, 0, sizeof(storage), &storage);
4913 hdr = &storage;
4914 }
4915
4916 // Legacy clients may not set the length
4917 // Those clients are likely not setting the flags either
4918 // Fix everything up so old clients continue to work
4919 if (hdr->length != mbuf_pkthdr_len(m)) {
4920 hdr->flags = 0;
4921 hdr->length = mbuf_pkthdr_len(m);
4922 if (hdr == &storage) {
4923 mbuf_copyback(m, 0, sizeof(*hdr), hdr, MBUF_DONTWAIT);
4924 }
4925 }
4926
4927 switch (hdr->type) {
4928 case NSTAT_MSG_TYPE_ADD_SRC:
4929 result = nstat_control_handle_add_request(state, m);
4930 break;
4931
4932 case NSTAT_MSG_TYPE_ADD_ALL_SRCS:
4933 result = nstat_control_handle_add_all(state, m);
4934 break;
4935
4936 case NSTAT_MSG_TYPE_REM_SRC:
4937 result = nstat_control_handle_remove_request(state, m);
4938 break;
4939
4940 case NSTAT_MSG_TYPE_QUERY_SRC:
4941 result = nstat_control_handle_query_request(state, m);
4942 break;
4943
4944 case NSTAT_MSG_TYPE_GET_SRC_DESC:
4945 result = nstat_control_handle_get_src_description(state, m);
4946 break;
4947
4948 case NSTAT_MSG_TYPE_SET_FILTER:
4949 result = nstat_control_handle_set_filter(state, m);
4950 break;
4951
4952 case NSTAT_MSG_TYPE_GET_UPDATE:
4953 result = nstat_control_handle_get_update(state, m);
4954 break;
4955
4956 case NSTAT_MSG_TYPE_SUBSCRIBE_SYSINFO:
4957 result = nstat_control_handle_subscribe_sysinfo(state);
4958 break;
4959
4960 default:
4961 result = EINVAL;
4962 break;
4963 }
4964
4965 if (result != 0) {
4966 struct nstat_msg_error err;
4967
4968 bzero(&err, sizeof(err));
4969 err.hdr.type = NSTAT_MSG_TYPE_ERROR;
4970 err.hdr.length = sizeof(err) + mbuf_pkthdr_len(m);
4971 err.hdr.context = hdr->context;
4972 err.error = result;
4973
4974 if (mbuf_prepend(&m, sizeof(err), MBUF_DONTWAIT) == 0 &&
4975 mbuf_copyback(m, 0, sizeof(err), &err, MBUF_DONTWAIT) == 0) {
4976 result = ctl_enqueuembuf(kctl, unit, m, CTL_DATA_EOR | CTL_DATA_CRIT);
4977 if (result != 0) {
4978 mbuf_freem(m);
4979 }
4980 m = NULL;
4981 }
4982
4983 if (result != 0) {
4984 // Unable to prepend the error to the request - just send the error
4985 err.hdr.length = sizeof(err);
4986 result = ctl_enqueuedata(kctl, unit, &err, sizeof(err),
4987 CTL_DATA_EOR | CTL_DATA_CRIT);
4988 if (result != 0) {
4989 nstat_stats.nstat_msgerrorfailures += 1;
4990 }
4991 }
4992 nstat_stats.nstat_handle_msg_failures += 1;
4993 }
4994
4995 if (m) {
4996 mbuf_freem(m);
4997 }
4998
4999 return result;
5000 }
5001
5002
5003 static int
5004 tcp_progress_indicators_for_interface(unsigned int ifindex, uint64_t recentflow_maxduration, struct xtcpprogress_indicators *indicators)
5005 {
5006 int error = 0;
5007 struct inpcb *inp;
5008 uint64_t min_recent_start_time;
5009
5010 min_recent_start_time = mach_continuous_time() - recentflow_maxduration;
5011 bzero(indicators, sizeof(*indicators));
5012
5013 lck_rw_lock_shared(tcbinfo.ipi_lock);
5014 /*
5015 * For progress indicators we don't need to special case TCP to collect time wait connections
5016 */
5017 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list)
5018 {
5019 struct tcpcb *tp = intotcpcb(inp);
5020 if (tp && inp->inp_last_outifp &&
5021 inp->inp_last_outifp->if_index == ifindex &&
5022 inp->inp_state != INPCB_STATE_DEAD &&
5023 !(tp->t_flags & TF_LOCAL)) {
5024 struct tcp_conn_status connstatus;
5025 indicators->xp_numflows++;
5026 tcp_get_connectivity_status(tp, &connstatus);
5027 if (connstatus.write_probe_failed) {
5028 indicators->xp_write_probe_fails++;
5029 }
5030 if (connstatus.read_probe_failed) {
5031 indicators->xp_read_probe_fails++;
5032 }
5033 if (connstatus.conn_probe_failed) {
5034 indicators->xp_conn_probe_fails++;
5035 }
5036 if (inp->inp_start_timestamp > min_recent_start_time) {
5037 uint64_t flow_count;
5038
5039 indicators->xp_recentflows++;
5040 atomic_get_64(flow_count, &inp->inp_stat->rxbytes);
5041 indicators->xp_recentflows_rxbytes += flow_count;
5042 atomic_get_64(flow_count, &inp->inp_stat->txbytes);
5043 indicators->xp_recentflows_txbytes += flow_count;
5044
5045 indicators->xp_recentflows_rxooo += tp->t_stat.rxoutoforderbytes;
5046 indicators->xp_recentflows_rxdup += tp->t_stat.rxduplicatebytes;
5047 indicators->xp_recentflows_retx += tp->t_stat.txretransmitbytes;
5048 if (tp->snd_max - tp->snd_una) {
5049 indicators->xp_recentflows_unacked++;
5050 }
5051 }
5052 }
5053 }
5054 lck_rw_done(tcbinfo.ipi_lock);
5055
5056 return error;
5057 }
5058
5059
5060 __private_extern__ int
5061 ntstat_tcp_progress_indicators(struct sysctl_req *req)
5062 {
5063 struct xtcpprogress_indicators indicators = {};
5064 int error = 0;
5065 struct tcpprogressreq requested;
5066
5067 if (priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0) != 0) {
5068 return EACCES;
5069 }
5070 if (req->newptr == USER_ADDR_NULL) {
5071 return EINVAL;
5072 }
5073 if (req->newlen < sizeof(req)) {
5074 return EINVAL;
5075 }
5076 error = SYSCTL_IN(req, &requested, sizeof(requested));
5077 if (error != 0) {
5078 return error;
5079 }
5080 error = tcp_progress_indicators_for_interface(requested.ifindex, requested.recentflow_maxduration, &indicators);
5081 if (error != 0) {
5082 return error;
5083 }
5084 error = SYSCTL_OUT(req, &indicators, sizeof(indicators));
5085
5086 return error;
5087 }
5088
5089
5090
5091