]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/ntstat.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / net / ntstat.c
1 /*
2 * Copyright (c) 2010-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/param.h>
30 #include <sys/types.h>
31 #include <sys/kpi_mbuf.h>
32 #include <sys/socket.h>
33 #include <sys/kern_control.h>
34 #include <sys/mcache.h>
35 #include <sys/socketvar.h>
36 #include <sys/sysctl.h>
37 #include <sys/queue.h>
38 #include <sys/priv.h>
39 #include <sys/protosw.h>
40
41 #include <kern/clock.h>
42 #include <kern/debug.h>
43
44 #include <libkern/libkern.h>
45 #include <libkern/OSAtomic.h>
46 #include <libkern/locks.h>
47
48 #include <net/if.h>
49 #include <net/if_var.h>
50 #include <net/if_types.h>
51 #include <net/route.h>
52
53 // These includes appear in ntstat.h but we include them here first so they won't trigger
54 // any clang diagnostic errors.
55 #include <netinet/in.h>
56 #include <netinet/in_stat.h>
57 #include <netinet/tcp.h>
58
59 #pragma clang diagnostic push
60 #pragma clang diagnostic error "-Wpadded"
61 #pragma clang diagnostic error "-Wpacked"
62 // This header defines structures shared with user space, so we need to ensure there is
63 // no compiler inserted padding in case the user space process isn't using the same
64 // architecture as the kernel (example: i386 process with x86_64 kernel).
65 #include <net/ntstat.h>
66 #pragma clang diagnostic pop
67
68 #include <netinet/ip_var.h>
69 #include <netinet/in_pcb.h>
70 #include <netinet/in_var.h>
71 #include <netinet/tcp_var.h>
72 #include <netinet/tcp_fsm.h>
73 #include <netinet/tcp_cc.h>
74 #include <netinet/udp.h>
75 #include <netinet/udp_var.h>
76 #include <netinet6/in6_pcb.h>
77 #include <netinet6/in6_var.h>
78
79 __private_extern__ int nstat_collect = 1;
80
81 #if (DEBUG || DEVELOPMENT)
82 SYSCTL_INT(_net, OID_AUTO, statistics, CTLFLAG_RW | CTLFLAG_LOCKED,
83 &nstat_collect, 0, "Collect detailed statistics");
84 #endif /* (DEBUG || DEVELOPMENT) */
85
86 #if !XNU_TARGET_OS_OSX
87 static int nstat_privcheck = 1;
88 #else /* XNU_TARGET_OS_OSX */
89 static int nstat_privcheck = 0;
90 #endif /* XNU_TARGET_OS_OSX */
91 SYSCTL_INT(_net, OID_AUTO, statistics_privcheck, CTLFLAG_RW | CTLFLAG_LOCKED,
92 &nstat_privcheck, 0, "Entitlement check");
93
94 SYSCTL_NODE(_net, OID_AUTO, stats,
95 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "network statistics");
96
97 static int nstat_debug = 0;
98 SYSCTL_INT(_net_stats, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED,
99 &nstat_debug, 0, "");
100
101 static int nstat_sendspace = 2048;
102 SYSCTL_INT(_net_stats, OID_AUTO, sendspace, CTLFLAG_RW | CTLFLAG_LOCKED,
103 &nstat_sendspace, 0, "");
104
105 static int nstat_recvspace = 8192;
106 SYSCTL_INT(_net_stats, OID_AUTO, recvspace, CTLFLAG_RW | CTLFLAG_LOCKED,
107 &nstat_recvspace, 0, "");
108
109 static struct nstat_stats nstat_stats;
110 SYSCTL_STRUCT(_net_stats, OID_AUTO, stats, CTLFLAG_RD | CTLFLAG_LOCKED,
111 &nstat_stats, nstat_stats, "");
112
113 static u_int32_t nstat_lim_interval = 30 * 60; /* Report interval, seconds */
114 static u_int32_t nstat_lim_min_tx_pkts = 100;
115 static u_int32_t nstat_lim_min_rx_pkts = 100;
116 #if (DEBUG || DEVELOPMENT)
117 SYSCTL_INT(_net_stats, OID_AUTO, lim_report_interval,
118 CTLFLAG_RW | CTLFLAG_LOCKED, &nstat_lim_interval, 0,
119 "Low internet stat report interval");
120
121 SYSCTL_INT(_net_stats, OID_AUTO, lim_min_tx_pkts,
122 CTLFLAG_RW | CTLFLAG_LOCKED, &nstat_lim_min_tx_pkts, 0,
123 "Low Internet, min transmit packets threshold");
124
125 SYSCTL_INT(_net_stats, OID_AUTO, lim_min_rx_pkts,
126 CTLFLAG_RW | CTLFLAG_LOCKED, &nstat_lim_min_rx_pkts, 0,
127 "Low Internet, min receive packets threshold");
128 #endif /* DEBUG || DEVELOPMENT */
129
130 static struct net_api_stats net_api_stats_before;
131 static u_int64_t net_api_stats_last_report_time;
132 #define NET_API_STATS_REPORT_INTERVAL (12 * 60 * 60) /* 12 hours, in seconds */
133 static u_int32_t net_api_stats_report_interval = NET_API_STATS_REPORT_INTERVAL;
134
135 #if (DEBUG || DEVELOPMENT)
136 SYSCTL_UINT(_net_stats, OID_AUTO, api_report_interval,
137 CTLFLAG_RW | CTLFLAG_LOCKED, &net_api_stats_report_interval, 0, "");
138 #endif /* DEBUG || DEVELOPMENT */
139
140 enum{
141 NSTAT_FLAG_CLEANUP = (1 << 0),
142 NSTAT_FLAG_REQCOUNTS = (1 << 1),
143 NSTAT_FLAG_SUPPORTS_UPDATES = (1 << 2),
144 NSTAT_FLAG_SYSINFO_SUBSCRIBED = (1 << 3),
145 };
146
147 #if !XNU_TARGET_OS_OSX
148 #define QUERY_CONTINUATION_SRC_COUNT 50
149 #else /* XNU_TARGET_OS_OSX */
150 #define QUERY_CONTINUATION_SRC_COUNT 100
151 #endif /* XNU_TARGET_OS_OSX */
152
153 typedef TAILQ_HEAD(, nstat_src) tailq_head_nstat_src;
154 typedef TAILQ_ENTRY(nstat_src) tailq_entry_nstat_src;
155
156 typedef struct nstat_provider_filter {
157 u_int64_t npf_flags;
158 u_int64_t npf_events;
159 pid_t npf_pid;
160 uuid_t npf_uuid;
161 } nstat_provider_filter;
162
163
164 typedef struct nstat_control_state {
165 struct nstat_control_state *ncs_next;
166 u_int32_t ncs_watching;
167 decl_lck_mtx_data(, ncs_mtx);
168 kern_ctl_ref ncs_kctl;
169 u_int32_t ncs_unit;
170 nstat_src_ref_t ncs_next_srcref;
171 tailq_head_nstat_src ncs_src_queue;
172 mbuf_t ncs_accumulated;
173 u_int32_t ncs_flags;
174 nstat_provider_filter ncs_provider_filters[NSTAT_PROVIDER_COUNT];
175 /* state maintained for partial query requests */
176 u_int64_t ncs_context;
177 u_int64_t ncs_seq;
178 } nstat_control_state;
179
180 typedef struct nstat_provider {
181 struct nstat_provider *next;
182 nstat_provider_id_t nstat_provider_id;
183 size_t nstat_descriptor_length;
184 errno_t (*nstat_lookup)(const void *data, u_int32_t length, nstat_provider_cookie_t *out_cookie);
185 int (*nstat_gone)(nstat_provider_cookie_t cookie);
186 errno_t (*nstat_counts)(nstat_provider_cookie_t cookie, struct nstat_counts *out_counts, int *out_gone);
187 errno_t (*nstat_watcher_add)(nstat_control_state *state, nstat_msg_add_all_srcs *req);
188 void (*nstat_watcher_remove)(nstat_control_state *state);
189 errno_t (*nstat_copy_descriptor)(nstat_provider_cookie_t cookie, void *data, size_t len);
190 void (*nstat_release)(nstat_provider_cookie_t cookie, boolean_t locked);
191 bool (*nstat_reporting_allowed)(nstat_provider_cookie_t cookie, nstat_provider_filter *filter);
192 } nstat_provider;
193
194 typedef STAILQ_HEAD(, nstat_src) stailq_head_nstat_src;
195 typedef STAILQ_ENTRY(nstat_src) stailq_entry_nstat_src;
196
197 typedef TAILQ_HEAD(, nstat_tu_shadow) tailq_head_tu_shadow;
198 typedef TAILQ_ENTRY(nstat_tu_shadow) tailq_entry_tu_shadow;
199
200 typedef TAILQ_HEAD(, nstat_procdetails) tailq_head_procdetails;
201 typedef TAILQ_ENTRY(nstat_procdetails) tailq_entry_procdetails;
202
203 typedef struct nstat_src {
204 tailq_entry_nstat_src ns_control_link; // All sources for the nstat_control_state, for iterating over.
205 nstat_control_state *ns_control; // The nstat_control_state that this is a source for
206 nstat_src_ref_t srcref;
207 nstat_provider *provider;
208 nstat_provider_cookie_t cookie;
209 uint32_t filter;
210 uint64_t seq;
211 } nstat_src;
212
213 static errno_t nstat_control_send_counts(nstat_control_state *, nstat_src *, unsigned long long, u_int16_t, int *);
214 static int nstat_control_send_description(nstat_control_state *state, nstat_src *src, u_int64_t context, u_int16_t hdr_flags);
215 static int nstat_control_send_update(nstat_control_state *state, nstat_src *src, u_int64_t context, u_int64_t event, u_int16_t hdr_flags, int *gone);
216 static errno_t nstat_control_send_removed(nstat_control_state *, nstat_src *);
217 static errno_t nstat_control_send_goodbye(nstat_control_state *state, nstat_src *src);
218 static void nstat_control_cleanup_source(nstat_control_state *state, nstat_src *src, boolean_t);
219 static bool nstat_control_reporting_allowed(nstat_control_state *state, nstat_src *src);
220 static boolean_t nstat_control_begin_query(nstat_control_state *state, const nstat_msg_hdr *hdrp);
221 static u_int16_t nstat_control_end_query(nstat_control_state *state, nstat_src *last_src, boolean_t partial);
222 static void nstat_ifnet_report_ecn_stats(void);
223 static void nstat_ifnet_report_lim_stats(void);
224 static void nstat_net_api_report_stats(void);
225 static errno_t nstat_set_provider_filter( nstat_control_state *state, nstat_msg_add_all_srcs *req);
226 static errno_t nstat_control_send_event(nstat_control_state *state, nstat_src *src, u_int64_t event);
227
228 static u_int32_t nstat_udp_watchers = 0;
229 static u_int32_t nstat_tcp_watchers = 0;
230
231 static void nstat_control_register(void);
232
233 /*
234 * The lock order is as follows:
235 *
236 * socket_lock (inpcb)
237 * nstat_mtx
238 * state->ncs_mtx
239 */
240 static KALLOC_HEAP_DEFINE(KHEAP_NET_STAT, NET_STAT_CONTROL_NAME,
241 KHEAP_ID_DEFAULT);
242 static nstat_control_state *nstat_controls = NULL;
243 static uint64_t nstat_idle_time = 0;
244 static decl_lck_mtx_data(, nstat_mtx);
245
246 /* some extern definitions */
247 extern void mbuf_report_peak_usage(void);
248 extern void tcp_report_stats(void);
249
250 static void
251 nstat_copy_sa_out(
252 const struct sockaddr *src,
253 struct sockaddr *dst,
254 int maxlen)
255 {
256 if (src->sa_len > maxlen) {
257 return;
258 }
259
260 bcopy(src, dst, src->sa_len);
261 if (src->sa_family == AF_INET6 &&
262 src->sa_len >= sizeof(struct sockaddr_in6)) {
263 struct sockaddr_in6 *sin6 = (struct sockaddr_in6*)(void *)dst;
264 if (IN6_IS_SCOPE_EMBED(&sin6->sin6_addr)) {
265 if (sin6->sin6_scope_id == 0) {
266 sin6->sin6_scope_id = ntohs(sin6->sin6_addr.s6_addr16[1]);
267 }
268 sin6->sin6_addr.s6_addr16[1] = 0;
269 }
270 }
271 }
272
273 static void
274 nstat_ip_to_sockaddr(
275 const struct in_addr *ip,
276 u_int16_t port,
277 struct sockaddr_in *sin,
278 u_int32_t maxlen)
279 {
280 if (maxlen < sizeof(struct sockaddr_in)) {
281 return;
282 }
283
284 sin->sin_family = AF_INET;
285 sin->sin_len = sizeof(*sin);
286 sin->sin_port = port;
287 sin->sin_addr = *ip;
288 }
289
290 u_int16_t
291 nstat_ifnet_to_flags(
292 struct ifnet *ifp)
293 {
294 u_int16_t flags = 0;
295 u_int32_t functional_type = if_functional_type(ifp, FALSE);
296
297 /* Panic if someone adds a functional type without updating ntstat. */
298 VERIFY(0 <= functional_type && functional_type <= IFRTYPE_FUNCTIONAL_LAST);
299
300 switch (functional_type) {
301 case IFRTYPE_FUNCTIONAL_UNKNOWN:
302 flags |= NSTAT_IFNET_IS_UNKNOWN_TYPE;
303 break;
304 case IFRTYPE_FUNCTIONAL_LOOPBACK:
305 flags |= NSTAT_IFNET_IS_LOOPBACK;
306 break;
307 case IFRTYPE_FUNCTIONAL_WIRED:
308 case IFRTYPE_FUNCTIONAL_INTCOPROC:
309 flags |= NSTAT_IFNET_IS_WIRED;
310 break;
311 case IFRTYPE_FUNCTIONAL_WIFI_INFRA:
312 flags |= NSTAT_IFNET_IS_WIFI;
313 break;
314 case IFRTYPE_FUNCTIONAL_WIFI_AWDL:
315 flags |= NSTAT_IFNET_IS_WIFI;
316 flags |= NSTAT_IFNET_IS_AWDL;
317 break;
318 case IFRTYPE_FUNCTIONAL_CELLULAR:
319 flags |= NSTAT_IFNET_IS_CELLULAR;
320 break;
321 case IFRTYPE_FUNCTIONAL_COMPANIONLINK:
322 flags |= NSTAT_IFNET_IS_COMPANIONLINK;
323 break;
324 }
325
326 if (IFNET_IS_EXPENSIVE(ifp)) {
327 flags |= NSTAT_IFNET_IS_EXPENSIVE;
328 }
329 if (IFNET_IS_CONSTRAINED(ifp)) {
330 flags |= NSTAT_IFNET_IS_CONSTRAINED;
331 }
332
333 return flags;
334 }
335
336 static u_int16_t
337 nstat_inpcb_to_flags(
338 const struct inpcb *inp)
339 {
340 u_int16_t flags = 0;
341
342 if (inp != NULL) {
343 if (inp->inp_last_outifp != NULL) {
344 struct ifnet *ifp = inp->inp_last_outifp;
345 flags = nstat_ifnet_to_flags(ifp);
346
347 struct tcpcb *tp = intotcpcb(inp);
348 if (tp) {
349 if (tp->t_flags & TF_LOCAL) {
350 flags |= NSTAT_IFNET_IS_LOCAL;
351 } else {
352 flags |= NSTAT_IFNET_IS_NON_LOCAL;
353 }
354 }
355 } else {
356 flags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
357 }
358 if (inp->inp_socket != NULL &&
359 (inp->inp_socket->so_flags1 & SOF1_CELLFALLBACK)) {
360 flags |= NSTAT_IFNET_VIA_CELLFALLBACK;
361 }
362 }
363 return flags;
364 }
365
366 #pragma mark -- Network Statistic Providers --
367
368 static errno_t nstat_control_source_add(u_int64_t context, nstat_control_state *state, nstat_provider *provider, nstat_provider_cookie_t cookie);
369 struct nstat_provider *nstat_providers = NULL;
370
371 static struct nstat_provider*
372 nstat_find_provider_by_id(
373 nstat_provider_id_t id)
374 {
375 struct nstat_provider *provider;
376
377 for (provider = nstat_providers; provider != NULL; provider = provider->next) {
378 if (provider->nstat_provider_id == id) {
379 break;
380 }
381 }
382
383 return provider;
384 }
385
386 static errno_t
387 nstat_lookup_entry(
388 nstat_provider_id_t id,
389 const void *data,
390 u_int32_t length,
391 nstat_provider **out_provider,
392 nstat_provider_cookie_t *out_cookie)
393 {
394 *out_provider = nstat_find_provider_by_id(id);
395 if (*out_provider == NULL) {
396 return ENOENT;
397 }
398
399 return (*out_provider)->nstat_lookup(data, length, out_cookie);
400 }
401
402 static void nstat_init_route_provider(void);
403 static void nstat_init_tcp_provider(void);
404 static void nstat_init_udp_provider(void);
405 static void nstat_init_ifnet_provider(void);
406
407 __private_extern__ void
408 nstat_init(void)
409 {
410 nstat_init_route_provider();
411 nstat_init_tcp_provider();
412 nstat_init_udp_provider();
413 nstat_init_ifnet_provider();
414 nstat_control_register();
415 }
416
417 #pragma mark -- Aligned Buffer Allocation --
418
419 struct align_header {
420 u_int32_t offset;
421 u_int32_t length;
422 };
423
424 static void*
425 nstat_malloc_aligned(
426 size_t length,
427 u_int8_t alignment,
428 zalloc_flags_t flags)
429 {
430 struct align_header *hdr = NULL;
431 size_t size = length + sizeof(*hdr) + alignment - 1;
432
433 // Arbitrary limit to prevent abuse
434 if (length > (64 * 1024)) {
435 return NULL;
436 }
437 u_int8_t *buffer = kheap_alloc(KHEAP_NET_STAT, size, flags);
438 if (buffer == NULL) {
439 return NULL;
440 }
441
442 u_int8_t *aligned = buffer + sizeof(*hdr);
443 aligned = (u_int8_t*)P2ROUNDUP(aligned, alignment);
444
445 hdr = (struct align_header*)(void *)(aligned - sizeof(*hdr));
446 hdr->offset = aligned - buffer;
447 hdr->length = size;
448
449 return aligned;
450 }
451
452 static void
453 nstat_free_aligned(
454 void *buffer)
455 {
456 struct align_header *hdr = (struct align_header*)(void *)((u_int8_t*)buffer - sizeof(*hdr));
457 (kheap_free)(KHEAP_NET_STAT, (char *)buffer - hdr->offset, hdr->length);
458 }
459
460 #pragma mark -- Route Provider --
461
462 static nstat_provider nstat_route_provider;
463
464 static errno_t
465 nstat_route_lookup(
466 const void *data,
467 u_int32_t length,
468 nstat_provider_cookie_t *out_cookie)
469 {
470 // rt_lookup doesn't take const params but it doesn't modify the parameters for
471 // the lookup. So...we use a union to eliminate the warning.
472 union{
473 struct sockaddr *sa;
474 const struct sockaddr *const_sa;
475 } dst, mask;
476
477 const nstat_route_add_param *param = (const nstat_route_add_param*)data;
478 *out_cookie = NULL;
479
480 if (length < sizeof(*param)) {
481 return EINVAL;
482 }
483
484 if (param->dst.v4.sin_family == 0 ||
485 param->dst.v4.sin_family > AF_MAX ||
486 (param->mask.v4.sin_family != 0 && param->mask.v4.sin_family != param->dst.v4.sin_family)) {
487 return EINVAL;
488 }
489
490 if (param->dst.v4.sin_len > sizeof(param->dst) ||
491 (param->mask.v4.sin_family && param->mask.v4.sin_len > sizeof(param->mask.v4.sin_len))) {
492 return EINVAL;
493 }
494 if ((param->dst.v4.sin_family == AF_INET &&
495 param->dst.v4.sin_len < sizeof(struct sockaddr_in)) ||
496 (param->dst.v6.sin6_family == AF_INET6 &&
497 param->dst.v6.sin6_len < sizeof(struct sockaddr_in6))) {
498 return EINVAL;
499 }
500
501 dst.const_sa = (const struct sockaddr*)&param->dst;
502 mask.const_sa = param->mask.v4.sin_family ? (const struct sockaddr*)&param->mask : NULL;
503
504 struct radix_node_head *rnh = rt_tables[dst.sa->sa_family];
505 if (rnh == NULL) {
506 return EAFNOSUPPORT;
507 }
508
509 lck_mtx_lock(rnh_lock);
510 struct rtentry *rt = rt_lookup(TRUE, dst.sa, mask.sa, rnh, param->ifindex);
511 lck_mtx_unlock(rnh_lock);
512
513 if (rt) {
514 *out_cookie = (nstat_provider_cookie_t)rt;
515 }
516
517 return rt ? 0 : ENOENT;
518 }
519
520 static int
521 nstat_route_gone(
522 nstat_provider_cookie_t cookie)
523 {
524 struct rtentry *rt = (struct rtentry*)cookie;
525 return ((rt->rt_flags & RTF_UP) == 0) ? 1 : 0;
526 }
527
528 static errno_t
529 nstat_route_counts(
530 nstat_provider_cookie_t cookie,
531 struct nstat_counts *out_counts,
532 int *out_gone)
533 {
534 struct rtentry *rt = (struct rtentry*)cookie;
535 struct nstat_counts *rt_stats = rt->rt_stats;
536
537 if (out_gone) {
538 *out_gone = 0;
539 }
540
541 if (out_gone && (rt->rt_flags & RTF_UP) == 0) {
542 *out_gone = 1;
543 }
544
545 if (rt_stats) {
546 atomic_get_64(out_counts->nstat_rxpackets, &rt_stats->nstat_rxpackets);
547 atomic_get_64(out_counts->nstat_rxbytes, &rt_stats->nstat_rxbytes);
548 atomic_get_64(out_counts->nstat_txpackets, &rt_stats->nstat_txpackets);
549 atomic_get_64(out_counts->nstat_txbytes, &rt_stats->nstat_txbytes);
550 out_counts->nstat_rxduplicatebytes = rt_stats->nstat_rxduplicatebytes;
551 out_counts->nstat_rxoutoforderbytes = rt_stats->nstat_rxoutoforderbytes;
552 out_counts->nstat_txretransmit = rt_stats->nstat_txretransmit;
553 out_counts->nstat_connectattempts = rt_stats->nstat_connectattempts;
554 out_counts->nstat_connectsuccesses = rt_stats->nstat_connectsuccesses;
555 out_counts->nstat_min_rtt = rt_stats->nstat_min_rtt;
556 out_counts->nstat_avg_rtt = rt_stats->nstat_avg_rtt;
557 out_counts->nstat_var_rtt = rt_stats->nstat_var_rtt;
558 out_counts->nstat_cell_rxbytes = out_counts->nstat_cell_txbytes = 0;
559 } else {
560 bzero(out_counts, sizeof(*out_counts));
561 }
562
563 return 0;
564 }
565
566 static void
567 nstat_route_release(
568 nstat_provider_cookie_t cookie,
569 __unused int locked)
570 {
571 rtfree((struct rtentry*)cookie);
572 }
573
574 static u_int32_t nstat_route_watchers = 0;
575
576 static int
577 nstat_route_walktree_add(
578 struct radix_node *rn,
579 void *context)
580 {
581 errno_t result = 0;
582 struct rtentry *rt = (struct rtentry *)rn;
583 nstat_control_state *state = (nstat_control_state*)context;
584
585 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
586
587 /* RTF_UP can't change while rnh_lock is held */
588 if ((rt->rt_flags & RTF_UP) != 0) {
589 /* Clear RTPRF_OURS if the route is still usable */
590 RT_LOCK(rt);
591 if (rt_validate(rt)) {
592 RT_ADDREF_LOCKED(rt);
593 RT_UNLOCK(rt);
594 } else {
595 RT_UNLOCK(rt);
596 rt = NULL;
597 }
598
599 /* Otherwise if RTF_CONDEMNED, treat it as if it were down */
600 if (rt == NULL) {
601 return 0;
602 }
603
604 result = nstat_control_source_add(0, state, &nstat_route_provider, rt);
605 if (result != 0) {
606 rtfree_locked(rt);
607 }
608 }
609
610 return result;
611 }
612
613 static errno_t
614 nstat_route_add_watcher(
615 nstat_control_state *state,
616 nstat_msg_add_all_srcs *req)
617 {
618 int i;
619 errno_t result = 0;
620
621 lck_mtx_lock(rnh_lock);
622
623 result = nstat_set_provider_filter(state, req);
624 if (result == 0) {
625 OSIncrementAtomic(&nstat_route_watchers);
626
627 for (i = 1; i < AF_MAX; i++) {
628 struct radix_node_head *rnh;
629 rnh = rt_tables[i];
630 if (!rnh) {
631 continue;
632 }
633
634 result = rnh->rnh_walktree(rnh, nstat_route_walktree_add, state);
635 if (result != 0) {
636 // This is probably resource exhaustion.
637 // There currently isn't a good way to recover from this.
638 // Least bad seems to be to give up on the add-all but leave
639 // the watcher in place.
640 break;
641 }
642 }
643 }
644 lck_mtx_unlock(rnh_lock);
645
646 return result;
647 }
648
649 __private_extern__ void
650 nstat_route_new_entry(
651 struct rtentry *rt)
652 {
653 if (nstat_route_watchers == 0) {
654 return;
655 }
656
657 lck_mtx_lock(&nstat_mtx);
658 if ((rt->rt_flags & RTF_UP) != 0) {
659 nstat_control_state *state;
660 for (state = nstat_controls; state; state = state->ncs_next) {
661 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_ROUTE)) != 0) {
662 // this client is watching routes
663 // acquire a reference for the route
664 RT_ADDREF(rt);
665
666 // add the source, if that fails, release the reference
667 if (nstat_control_source_add(0, state, &nstat_route_provider, rt) != 0) {
668 RT_REMREF(rt);
669 }
670 }
671 }
672 }
673 lck_mtx_unlock(&nstat_mtx);
674 }
675
676 static void
677 nstat_route_remove_watcher(
678 __unused nstat_control_state *state)
679 {
680 OSDecrementAtomic(&nstat_route_watchers);
681 }
682
683 static errno_t
684 nstat_route_copy_descriptor(
685 nstat_provider_cookie_t cookie,
686 void *data,
687 size_t len)
688 {
689 nstat_route_descriptor *desc = (nstat_route_descriptor*)data;
690 if (len < sizeof(*desc)) {
691 return EINVAL;
692 }
693 bzero(desc, sizeof(*desc));
694
695 struct rtentry *rt = (struct rtentry*)cookie;
696 desc->id = (uint64_t)VM_KERNEL_ADDRPERM(rt);
697 desc->parent_id = (uint64_t)VM_KERNEL_ADDRPERM(rt->rt_parent);
698 desc->gateway_id = (uint64_t)VM_KERNEL_ADDRPERM(rt->rt_gwroute);
699
700
701 // key/dest
702 struct sockaddr *sa;
703 if ((sa = rt_key(rt))) {
704 nstat_copy_sa_out(sa, &desc->dst.sa, sizeof(desc->dst));
705 }
706
707 // mask
708 if ((sa = rt_mask(rt)) && sa->sa_len <= sizeof(desc->mask)) {
709 memcpy(&desc->mask, sa, sa->sa_len);
710 }
711
712 // gateway
713 if ((sa = rt->rt_gateway)) {
714 nstat_copy_sa_out(sa, &desc->gateway.sa, sizeof(desc->gateway));
715 }
716
717 if (rt->rt_ifp) {
718 desc->ifindex = rt->rt_ifp->if_index;
719 }
720
721 desc->flags = rt->rt_flags;
722
723 return 0;
724 }
725
726 static bool
727 nstat_route_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter)
728 {
729 bool retval = true;
730
731 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) {
732 struct rtentry *rt = (struct rtentry*)cookie;
733 struct ifnet *ifp = rt->rt_ifp;
734
735 if (ifp) {
736 uint16_t interface_properties = nstat_ifnet_to_flags(ifp);
737
738 if ((filter->npf_flags & interface_properties) == 0) {
739 retval = false;
740 }
741 }
742 }
743 return retval;
744 }
745
746 static void
747 nstat_init_route_provider(void)
748 {
749 bzero(&nstat_route_provider, sizeof(nstat_route_provider));
750 nstat_route_provider.nstat_descriptor_length = sizeof(nstat_route_descriptor);
751 nstat_route_provider.nstat_provider_id = NSTAT_PROVIDER_ROUTE;
752 nstat_route_provider.nstat_lookup = nstat_route_lookup;
753 nstat_route_provider.nstat_gone = nstat_route_gone;
754 nstat_route_provider.nstat_counts = nstat_route_counts;
755 nstat_route_provider.nstat_release = nstat_route_release;
756 nstat_route_provider.nstat_watcher_add = nstat_route_add_watcher;
757 nstat_route_provider.nstat_watcher_remove = nstat_route_remove_watcher;
758 nstat_route_provider.nstat_copy_descriptor = nstat_route_copy_descriptor;
759 nstat_route_provider.nstat_reporting_allowed = nstat_route_reporting_allowed;
760 nstat_route_provider.next = nstat_providers;
761 nstat_providers = &nstat_route_provider;
762 }
763
764 #pragma mark -- Route Collection --
765
766 __private_extern__ struct nstat_counts*
767 nstat_route_attach(
768 struct rtentry *rte)
769 {
770 struct nstat_counts *result = rte->rt_stats;
771 if (result) {
772 return result;
773 }
774
775 result = nstat_malloc_aligned(sizeof(*result), sizeof(u_int64_t),
776 Z_WAITOK | Z_ZERO);
777 if (!result) {
778 return result;
779 }
780
781 if (!OSCompareAndSwapPtr(NULL, result, &rte->rt_stats)) {
782 nstat_free_aligned(result);
783 result = rte->rt_stats;
784 }
785
786 return result;
787 }
788
789 __private_extern__ void
790 nstat_route_detach(
791 struct rtentry *rte)
792 {
793 if (rte->rt_stats) {
794 nstat_free_aligned(rte->rt_stats);
795 rte->rt_stats = NULL;
796 }
797 }
798
799 __private_extern__ void
800 nstat_route_connect_attempt(
801 struct rtentry *rte)
802 {
803 while (rte) {
804 struct nstat_counts* stats = nstat_route_attach(rte);
805 if (stats) {
806 OSIncrementAtomic(&stats->nstat_connectattempts);
807 }
808
809 rte = rte->rt_parent;
810 }
811 }
812
813 __private_extern__ void
814 nstat_route_connect_success(
815 struct rtentry *rte)
816 {
817 // This route
818 while (rte) {
819 struct nstat_counts* stats = nstat_route_attach(rte);
820 if (stats) {
821 OSIncrementAtomic(&stats->nstat_connectsuccesses);
822 }
823
824 rte = rte->rt_parent;
825 }
826 }
827
828 __private_extern__ void
829 nstat_route_tx(
830 struct rtentry *rte,
831 u_int32_t packets,
832 u_int32_t bytes,
833 u_int32_t flags)
834 {
835 while (rte) {
836 struct nstat_counts* stats = nstat_route_attach(rte);
837 if (stats) {
838 if ((flags & NSTAT_TX_FLAG_RETRANSMIT) != 0) {
839 OSAddAtomic(bytes, &stats->nstat_txretransmit);
840 } else {
841 OSAddAtomic64((SInt64)packets, (SInt64*)&stats->nstat_txpackets);
842 OSAddAtomic64((SInt64)bytes, (SInt64*)&stats->nstat_txbytes);
843 }
844 }
845
846 rte = rte->rt_parent;
847 }
848 }
849
850 __private_extern__ void
851 nstat_route_rx(
852 struct rtentry *rte,
853 u_int32_t packets,
854 u_int32_t bytes,
855 u_int32_t flags)
856 {
857 while (rte) {
858 struct nstat_counts* stats = nstat_route_attach(rte);
859 if (stats) {
860 if (flags == 0) {
861 OSAddAtomic64((SInt64)packets, (SInt64*)&stats->nstat_rxpackets);
862 OSAddAtomic64((SInt64)bytes, (SInt64*)&stats->nstat_rxbytes);
863 } else {
864 if (flags & NSTAT_RX_FLAG_OUT_OF_ORDER) {
865 OSAddAtomic(bytes, &stats->nstat_rxoutoforderbytes);
866 }
867 if (flags & NSTAT_RX_FLAG_DUPLICATE) {
868 OSAddAtomic(bytes, &stats->nstat_rxduplicatebytes);
869 }
870 }
871 }
872
873 rte = rte->rt_parent;
874 }
875 }
876
877 /* atomically average current value at _val_addr with _new_val and store */
878 #define NSTAT_EWMA_ATOMIC(_val_addr, _new_val, _decay) do { \
879 volatile uint32_t _old_val; \
880 volatile uint32_t _avg; \
881 do { \
882 _old_val = *_val_addr; \
883 if (_old_val == 0) \
884 { \
885 _avg = _new_val; \
886 } \
887 else \
888 { \
889 _avg = _old_val - (_old_val >> _decay) + (_new_val >> _decay); \
890 } \
891 if (_old_val == _avg) break; \
892 } while (!OSCompareAndSwap(_old_val, _avg, _val_addr)); \
893 } while (0);
894
895 /* atomically compute minimum of current value at _val_addr with _new_val and store */
896 #define NSTAT_MIN_ATOMIC(_val_addr, _new_val) do { \
897 volatile uint32_t _old_val; \
898 do { \
899 _old_val = *_val_addr; \
900 if (_old_val != 0 && _old_val < _new_val) \
901 { \
902 break; \
903 } \
904 } while (!OSCompareAndSwap(_old_val, _new_val, _val_addr)); \
905 } while (0);
906
907 __private_extern__ void
908 nstat_route_rtt(
909 struct rtentry *rte,
910 u_int32_t rtt,
911 u_int32_t rtt_var)
912 {
913 const uint32_t decay = 3;
914
915 while (rte) {
916 struct nstat_counts* stats = nstat_route_attach(rte);
917 if (stats) {
918 NSTAT_EWMA_ATOMIC(&stats->nstat_avg_rtt, rtt, decay);
919 NSTAT_MIN_ATOMIC(&stats->nstat_min_rtt, rtt);
920 NSTAT_EWMA_ATOMIC(&stats->nstat_var_rtt, rtt_var, decay);
921 }
922 rte = rte->rt_parent;
923 }
924 }
925
926 __private_extern__ void
927 nstat_route_update(
928 struct rtentry *rte,
929 uint32_t connect_attempts,
930 uint32_t connect_successes,
931 uint32_t rx_packets,
932 uint32_t rx_bytes,
933 uint32_t rx_duplicatebytes,
934 uint32_t rx_outoforderbytes,
935 uint32_t tx_packets,
936 uint32_t tx_bytes,
937 uint32_t tx_retransmit,
938 uint32_t rtt,
939 uint32_t rtt_var)
940 {
941 const uint32_t decay = 3;
942
943 while (rte) {
944 struct nstat_counts* stats = nstat_route_attach(rte);
945 if (stats) {
946 OSAddAtomic(connect_attempts, &stats->nstat_connectattempts);
947 OSAddAtomic(connect_successes, &stats->nstat_connectsuccesses);
948 OSAddAtomic64((SInt64)tx_packets, (SInt64*)&stats->nstat_txpackets);
949 OSAddAtomic64((SInt64)tx_bytes, (SInt64*)&stats->nstat_txbytes);
950 OSAddAtomic(tx_retransmit, &stats->nstat_txretransmit);
951 OSAddAtomic64((SInt64)rx_packets, (SInt64*)&stats->nstat_rxpackets);
952 OSAddAtomic64((SInt64)rx_bytes, (SInt64*)&stats->nstat_rxbytes);
953 OSAddAtomic(rx_outoforderbytes, &stats->nstat_rxoutoforderbytes);
954 OSAddAtomic(rx_duplicatebytes, &stats->nstat_rxduplicatebytes);
955
956 if (rtt != 0) {
957 NSTAT_EWMA_ATOMIC(&stats->nstat_avg_rtt, rtt, decay);
958 NSTAT_MIN_ATOMIC(&stats->nstat_min_rtt, rtt);
959 NSTAT_EWMA_ATOMIC(&stats->nstat_var_rtt, rtt_var, decay);
960 }
961 }
962 rte = rte->rt_parent;
963 }
964 }
965
966 #pragma mark -- TCP Kernel Provider --
967
968 /*
969 * Due to the way the kernel deallocates a process (the process structure
970 * might be gone by the time we get the PCB detach notification),
971 * we need to cache the process name. Without this, proc_name() would
972 * return null and the process name would never be sent to userland.
973 *
974 * For UDP sockets, we also store the cached the connection tuples along with
975 * the interface index. This is necessary because when UDP sockets are
976 * disconnected, the connection tuples are forever lost from the inpcb, thus
977 * we need to keep track of the last call to connect() in ntstat.
978 */
979 struct nstat_tucookie {
980 struct inpcb *inp;
981 char pname[MAXCOMLEN + 1];
982 bool cached;
983 union{
984 struct sockaddr_in v4;
985 struct sockaddr_in6 v6;
986 } local;
987 union{
988 struct sockaddr_in v4;
989 struct sockaddr_in6 v6;
990 } remote;
991 unsigned int if_index;
992 uint16_t ifnet_properties;
993 };
994
995 static struct nstat_tucookie *
996 nstat_tucookie_alloc_internal(
997 struct inpcb *inp,
998 bool ref,
999 bool locked)
1000 {
1001 struct nstat_tucookie *cookie;
1002
1003 cookie = kheap_alloc(KHEAP_NET_STAT, sizeof(*cookie), Z_WAITOK);
1004 if (cookie == NULL) {
1005 return NULL;
1006 }
1007 if (!locked) {
1008 LCK_MTX_ASSERT(&nstat_mtx, LCK_MTX_ASSERT_NOTOWNED);
1009 }
1010 if (ref && in_pcb_checkstate(inp, WNT_ACQUIRE, locked) == WNT_STOPUSING) {
1011 kheap_free(KHEAP_NET_STAT, cookie, sizeof(*cookie));
1012 return NULL;
1013 }
1014 bzero(cookie, sizeof(*cookie));
1015 cookie->inp = inp;
1016 proc_name(inp->inp_socket->last_pid, cookie->pname,
1017 sizeof(cookie->pname));
1018 /*
1019 * We only increment the reference count for UDP sockets because we
1020 * only cache UDP socket tuples.
1021 */
1022 if (SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP) {
1023 OSIncrementAtomic(&inp->inp_nstat_refcnt);
1024 }
1025
1026 return cookie;
1027 }
1028
1029 static struct nstat_tucookie *
1030 nstat_tucookie_alloc(
1031 struct inpcb *inp)
1032 {
1033 return nstat_tucookie_alloc_internal(inp, false, false);
1034 }
1035
1036 static struct nstat_tucookie *
1037 nstat_tucookie_alloc_ref(
1038 struct inpcb *inp)
1039 {
1040 return nstat_tucookie_alloc_internal(inp, true, false);
1041 }
1042
1043 static struct nstat_tucookie *
1044 nstat_tucookie_alloc_ref_locked(
1045 struct inpcb *inp)
1046 {
1047 return nstat_tucookie_alloc_internal(inp, true, true);
1048 }
1049
1050 static void
1051 nstat_tucookie_release_internal(
1052 struct nstat_tucookie *cookie,
1053 int inplock)
1054 {
1055 if (SOCK_PROTO(cookie->inp->inp_socket) == IPPROTO_UDP) {
1056 OSDecrementAtomic(&cookie->inp->inp_nstat_refcnt);
1057 }
1058 in_pcb_checkstate(cookie->inp, WNT_RELEASE, inplock);
1059 kheap_free(KHEAP_NET_STAT, cookie, sizeof(*cookie));
1060 }
1061
1062 static void
1063 nstat_tucookie_release(
1064 struct nstat_tucookie *cookie)
1065 {
1066 nstat_tucookie_release_internal(cookie, false);
1067 }
1068
1069 static void
1070 nstat_tucookie_release_locked(
1071 struct nstat_tucookie *cookie)
1072 {
1073 nstat_tucookie_release_internal(cookie, true);
1074 }
1075
1076
1077 static nstat_provider nstat_tcp_provider;
1078
1079 static errno_t
1080 nstat_tcpudp_lookup(
1081 struct inpcbinfo *inpinfo,
1082 const void *data,
1083 u_int32_t length,
1084 nstat_provider_cookie_t *out_cookie)
1085 {
1086 struct inpcb *inp = NULL;
1087
1088 // parameter validation
1089 const nstat_tcp_add_param *param = (const nstat_tcp_add_param*)data;
1090 if (length < sizeof(*param)) {
1091 return EINVAL;
1092 }
1093
1094 // src and dst must match
1095 if (param->remote.v4.sin_family != 0 &&
1096 param->remote.v4.sin_family != param->local.v4.sin_family) {
1097 return EINVAL;
1098 }
1099
1100
1101 switch (param->local.v4.sin_family) {
1102 case AF_INET:
1103 {
1104 if (param->local.v4.sin_len != sizeof(param->local.v4) ||
1105 (param->remote.v4.sin_family != 0 &&
1106 param->remote.v4.sin_len != sizeof(param->remote.v4))) {
1107 return EINVAL;
1108 }
1109
1110 inp = in_pcblookup_hash(inpinfo, param->remote.v4.sin_addr, param->remote.v4.sin_port,
1111 param->local.v4.sin_addr, param->local.v4.sin_port, 1, NULL);
1112 }
1113 break;
1114
1115 case AF_INET6:
1116 {
1117 union{
1118 const struct in6_addr *in6c;
1119 struct in6_addr *in6;
1120 } local, remote;
1121
1122 if (param->local.v6.sin6_len != sizeof(param->local.v6) ||
1123 (param->remote.v6.sin6_family != 0 &&
1124 param->remote.v6.sin6_len != sizeof(param->remote.v6))) {
1125 return EINVAL;
1126 }
1127
1128 local.in6c = &param->local.v6.sin6_addr;
1129 remote.in6c = &param->remote.v6.sin6_addr;
1130
1131 inp = in6_pcblookup_hash(inpinfo, remote.in6, param->remote.v6.sin6_port,
1132 local.in6, param->local.v6.sin6_port, 1, NULL);
1133 }
1134 break;
1135
1136 default:
1137 return EINVAL;
1138 }
1139
1140 if (inp == NULL) {
1141 return ENOENT;
1142 }
1143
1144 // At this point we have a ref to the inpcb
1145 *out_cookie = nstat_tucookie_alloc(inp);
1146 if (*out_cookie == NULL) {
1147 in_pcb_checkstate(inp, WNT_RELEASE, 0);
1148 }
1149
1150 return 0;
1151 }
1152
1153 static errno_t
1154 nstat_tcp_lookup(
1155 const void *data,
1156 u_int32_t length,
1157 nstat_provider_cookie_t *out_cookie)
1158 {
1159 return nstat_tcpudp_lookup(&tcbinfo, data, length, out_cookie);
1160 }
1161
1162 static int
1163 nstat_tcp_gone(
1164 nstat_provider_cookie_t cookie)
1165 {
1166 struct nstat_tucookie *tucookie =
1167 (struct nstat_tucookie *)cookie;
1168 struct inpcb *inp;
1169 struct tcpcb *tp;
1170
1171 return (!(inp = tucookie->inp) ||
1172 !(tp = intotcpcb(inp)) ||
1173 inp->inp_state == INPCB_STATE_DEAD) ? 1 : 0;
1174 }
1175
1176 static errno_t
1177 nstat_tcp_counts(
1178 nstat_provider_cookie_t cookie,
1179 struct nstat_counts *out_counts,
1180 int *out_gone)
1181 {
1182 struct nstat_tucookie *tucookie =
1183 (struct nstat_tucookie *)cookie;
1184 struct inpcb *inp;
1185
1186 bzero(out_counts, sizeof(*out_counts));
1187
1188 if (out_gone) {
1189 *out_gone = 0;
1190 }
1191
1192 // if the pcb is in the dead state, we should stop using it
1193 if (nstat_tcp_gone(cookie)) {
1194 if (out_gone) {
1195 *out_gone = 1;
1196 }
1197 if (!(inp = tucookie->inp) || !intotcpcb(inp)) {
1198 return EINVAL;
1199 }
1200 }
1201 inp = tucookie->inp;
1202 struct tcpcb *tp = intotcpcb(inp);
1203
1204 atomic_get_64(out_counts->nstat_rxpackets, &inp->inp_stat->rxpackets);
1205 atomic_get_64(out_counts->nstat_rxbytes, &inp->inp_stat->rxbytes);
1206 atomic_get_64(out_counts->nstat_txpackets, &inp->inp_stat->txpackets);
1207 atomic_get_64(out_counts->nstat_txbytes, &inp->inp_stat->txbytes);
1208 out_counts->nstat_rxduplicatebytes = tp->t_stat.rxduplicatebytes;
1209 out_counts->nstat_rxoutoforderbytes = tp->t_stat.rxoutoforderbytes;
1210 out_counts->nstat_txretransmit = tp->t_stat.txretransmitbytes;
1211 out_counts->nstat_connectattempts = tp->t_state >= TCPS_SYN_SENT ? 1 : 0;
1212 out_counts->nstat_connectsuccesses = tp->t_state >= TCPS_ESTABLISHED ? 1 : 0;
1213 out_counts->nstat_avg_rtt = tp->t_srtt;
1214 out_counts->nstat_min_rtt = tp->t_rttbest;
1215 out_counts->nstat_var_rtt = tp->t_rttvar;
1216 if (out_counts->nstat_avg_rtt < out_counts->nstat_min_rtt) {
1217 out_counts->nstat_min_rtt = out_counts->nstat_avg_rtt;
1218 }
1219 atomic_get_64(out_counts->nstat_cell_rxbytes, &inp->inp_cstat->rxbytes);
1220 atomic_get_64(out_counts->nstat_cell_txbytes, &inp->inp_cstat->txbytes);
1221 atomic_get_64(out_counts->nstat_wifi_rxbytes, &inp->inp_wstat->rxbytes);
1222 atomic_get_64(out_counts->nstat_wifi_txbytes, &inp->inp_wstat->txbytes);
1223 atomic_get_64(out_counts->nstat_wired_rxbytes, &inp->inp_Wstat->rxbytes);
1224 atomic_get_64(out_counts->nstat_wired_txbytes, &inp->inp_Wstat->txbytes);
1225
1226 return 0;
1227 }
1228
1229 static void
1230 nstat_tcp_release(
1231 nstat_provider_cookie_t cookie,
1232 int locked)
1233 {
1234 struct nstat_tucookie *tucookie =
1235 (struct nstat_tucookie *)cookie;
1236
1237 nstat_tucookie_release_internal(tucookie, locked);
1238 }
1239
1240 static errno_t
1241 nstat_tcp_add_watcher(
1242 nstat_control_state *state,
1243 nstat_msg_add_all_srcs *req)
1244 {
1245 // There is a tricky issue around getting all TCP sockets added once
1246 // and only once. nstat_tcp_new_pcb() is called prior to the new item
1247 // being placed on any lists where it might be found.
1248 // By locking the tcbinfo.ipi_lock prior to marking the state as a watcher,
1249 // it should be impossible for a new socket to be added twice.
1250 // On the other hand, there is still a timing issue where a new socket
1251 // results in a call to nstat_tcp_new_pcb() before this watcher
1252 // is instantiated and yet the socket doesn't make it into ipi_listhead
1253 // prior to the scan. <rdar://problem/30361716>
1254
1255 errno_t result;
1256
1257 lck_rw_lock_shared(tcbinfo.ipi_lock);
1258 result = nstat_set_provider_filter(state, req);
1259 if (result == 0) {
1260 OSIncrementAtomic(&nstat_tcp_watchers);
1261
1262 // Add all current tcp inpcbs. Ignore those in timewait
1263 struct inpcb *inp;
1264 struct nstat_tucookie *cookie;
1265 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list)
1266 {
1267 cookie = nstat_tucookie_alloc_ref(inp);
1268 if (cookie == NULL) {
1269 continue;
1270 }
1271 if (nstat_control_source_add(0, state, &nstat_tcp_provider,
1272 cookie) != 0) {
1273 nstat_tucookie_release(cookie);
1274 break;
1275 }
1276 }
1277 }
1278
1279 lck_rw_done(tcbinfo.ipi_lock);
1280
1281 return result;
1282 }
1283
1284 static void
1285 nstat_tcp_remove_watcher(
1286 __unused nstat_control_state *state)
1287 {
1288 OSDecrementAtomic(&nstat_tcp_watchers);
1289 }
1290
1291 __private_extern__ void
1292 nstat_tcp_new_pcb(
1293 struct inpcb *inp)
1294 {
1295 struct nstat_tucookie *cookie;
1296
1297 inp->inp_start_timestamp = mach_continuous_time();
1298
1299 if (nstat_tcp_watchers == 0) {
1300 return;
1301 }
1302
1303 socket_lock(inp->inp_socket, 0);
1304 lck_mtx_lock(&nstat_mtx);
1305 nstat_control_state *state;
1306 for (state = nstat_controls; state; state = state->ncs_next) {
1307 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_TCP_KERNEL)) != 0) {
1308 // this client is watching tcp
1309 // acquire a reference for it
1310 cookie = nstat_tucookie_alloc_ref_locked(inp);
1311 if (cookie == NULL) {
1312 continue;
1313 }
1314 // add the source, if that fails, release the reference
1315 if (nstat_control_source_add(0, state,
1316 &nstat_tcp_provider, cookie) != 0) {
1317 nstat_tucookie_release_locked(cookie);
1318 break;
1319 }
1320 }
1321 }
1322 lck_mtx_unlock(&nstat_mtx);
1323 socket_unlock(inp->inp_socket, 0);
1324 }
1325
1326 __private_extern__ void
1327 nstat_pcb_detach(struct inpcb *inp)
1328 {
1329 nstat_control_state *state;
1330 nstat_src *src;
1331 tailq_head_nstat_src dead_list;
1332 struct nstat_tucookie *tucookie;
1333 errno_t result;
1334
1335 if (inp == NULL || (nstat_tcp_watchers == 0 && nstat_udp_watchers == 0)) {
1336 return;
1337 }
1338
1339 TAILQ_INIT(&dead_list);
1340 lck_mtx_lock(&nstat_mtx);
1341 for (state = nstat_controls; state; state = state->ncs_next) {
1342 lck_mtx_lock(&state->ncs_mtx);
1343 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
1344 {
1345 nstat_provider_id_t provider_id = src->provider->nstat_provider_id;
1346 if (provider_id == NSTAT_PROVIDER_TCP_KERNEL || provider_id == NSTAT_PROVIDER_UDP_KERNEL) {
1347 tucookie = (struct nstat_tucookie *)src->cookie;
1348 if (tucookie->inp == inp) {
1349 break;
1350 }
1351 }
1352 }
1353
1354 if (src) {
1355 result = nstat_control_send_goodbye(state, src);
1356
1357 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
1358 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
1359 }
1360 lck_mtx_unlock(&state->ncs_mtx);
1361 }
1362 lck_mtx_unlock(&nstat_mtx);
1363
1364 while ((src = TAILQ_FIRST(&dead_list))) {
1365 TAILQ_REMOVE(&dead_list, src, ns_control_link);
1366 nstat_control_cleanup_source(NULL, src, TRUE);
1367 }
1368 }
1369
1370 __private_extern__ void
1371 nstat_pcb_event(struct inpcb *inp, u_int64_t event)
1372 {
1373 nstat_control_state *state;
1374 nstat_src *src;
1375 struct nstat_tucookie *tucookie;
1376 errno_t result;
1377 nstat_provider_id_t provider_id;
1378
1379 if (inp == NULL || (nstat_tcp_watchers == 0 && nstat_udp_watchers == 0)) {
1380 return;
1381 }
1382
1383 lck_mtx_lock(&nstat_mtx);
1384 for (state = nstat_controls; state; state = state->ncs_next) {
1385 if (((state->ncs_provider_filters[NSTAT_PROVIDER_TCP_KERNEL].npf_events & event) == 0) &&
1386 ((state->ncs_provider_filters[NSTAT_PROVIDER_UDP_KERNEL].npf_events & event) == 0)) {
1387 continue;
1388 }
1389 lck_mtx_lock(&state->ncs_mtx);
1390 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
1391 {
1392 provider_id = src->provider->nstat_provider_id;
1393 if (provider_id == NSTAT_PROVIDER_TCP_KERNEL || provider_id == NSTAT_PROVIDER_UDP_KERNEL) {
1394 tucookie = (struct nstat_tucookie *)src->cookie;
1395 if (tucookie->inp == inp) {
1396 break;
1397 }
1398 }
1399 }
1400
1401 if (src && ((state->ncs_provider_filters[provider_id].npf_events & event) != 0)) {
1402 result = nstat_control_send_event(state, src, event);
1403 }
1404 lck_mtx_unlock(&state->ncs_mtx);
1405 }
1406 lck_mtx_unlock(&nstat_mtx);
1407 }
1408
1409
1410 __private_extern__ void
1411 nstat_pcb_cache(struct inpcb *inp)
1412 {
1413 nstat_control_state *state;
1414 nstat_src *src;
1415 struct nstat_tucookie *tucookie;
1416
1417 if (inp == NULL || nstat_udp_watchers == 0 ||
1418 inp->inp_nstat_refcnt == 0) {
1419 return;
1420 }
1421 VERIFY(SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP);
1422 lck_mtx_lock(&nstat_mtx);
1423 for (state = nstat_controls; state; state = state->ncs_next) {
1424 lck_mtx_lock(&state->ncs_mtx);
1425 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
1426 {
1427 tucookie = (struct nstat_tucookie *)src->cookie;
1428 if (tucookie->inp == inp) {
1429 if (inp->inp_vflag & INP_IPV6) {
1430 in6_ip6_to_sockaddr(&inp->in6p_laddr,
1431 inp->inp_lport,
1432 &tucookie->local.v6,
1433 sizeof(tucookie->local));
1434 in6_ip6_to_sockaddr(&inp->in6p_faddr,
1435 inp->inp_fport,
1436 &tucookie->remote.v6,
1437 sizeof(tucookie->remote));
1438 } else if (inp->inp_vflag & INP_IPV4) {
1439 nstat_ip_to_sockaddr(&inp->inp_laddr,
1440 inp->inp_lport,
1441 &tucookie->local.v4,
1442 sizeof(tucookie->local));
1443 nstat_ip_to_sockaddr(&inp->inp_faddr,
1444 inp->inp_fport,
1445 &tucookie->remote.v4,
1446 sizeof(tucookie->remote));
1447 }
1448 if (inp->inp_last_outifp) {
1449 tucookie->if_index =
1450 inp->inp_last_outifp->if_index;
1451 }
1452
1453 tucookie->ifnet_properties = nstat_inpcb_to_flags(inp);
1454 tucookie->cached = true;
1455 break;
1456 }
1457 }
1458 lck_mtx_unlock(&state->ncs_mtx);
1459 }
1460 lck_mtx_unlock(&nstat_mtx);
1461 }
1462
1463 __private_extern__ void
1464 nstat_pcb_invalidate_cache(struct inpcb *inp)
1465 {
1466 nstat_control_state *state;
1467 nstat_src *src;
1468 struct nstat_tucookie *tucookie;
1469
1470 if (inp == NULL || nstat_udp_watchers == 0 ||
1471 inp->inp_nstat_refcnt == 0) {
1472 return;
1473 }
1474 VERIFY(SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP);
1475 lck_mtx_lock(&nstat_mtx);
1476 for (state = nstat_controls; state; state = state->ncs_next) {
1477 lck_mtx_lock(&state->ncs_mtx);
1478 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
1479 {
1480 tucookie = (struct nstat_tucookie *)src->cookie;
1481 if (tucookie->inp == inp) {
1482 tucookie->cached = false;
1483 break;
1484 }
1485 }
1486 lck_mtx_unlock(&state->ncs_mtx);
1487 }
1488 lck_mtx_unlock(&nstat_mtx);
1489 }
1490
1491 static errno_t
1492 nstat_tcp_copy_descriptor(
1493 nstat_provider_cookie_t cookie,
1494 void *data,
1495 size_t len)
1496 {
1497 if (len < sizeof(nstat_tcp_descriptor)) {
1498 return EINVAL;
1499 }
1500
1501 if (nstat_tcp_gone(cookie)) {
1502 return EINVAL;
1503 }
1504
1505 nstat_tcp_descriptor *desc = (nstat_tcp_descriptor*)data;
1506 struct nstat_tucookie *tucookie =
1507 (struct nstat_tucookie *)cookie;
1508 struct inpcb *inp = tucookie->inp;
1509 struct tcpcb *tp = intotcpcb(inp);
1510 bzero(desc, sizeof(*desc));
1511
1512 if (inp->inp_vflag & INP_IPV6) {
1513 in6_ip6_to_sockaddr(&inp->in6p_laddr, inp->inp_lport,
1514 &desc->local.v6, sizeof(desc->local));
1515 in6_ip6_to_sockaddr(&inp->in6p_faddr, inp->inp_fport,
1516 &desc->remote.v6, sizeof(desc->remote));
1517 } else if (inp->inp_vflag & INP_IPV4) {
1518 nstat_ip_to_sockaddr(&inp->inp_laddr, inp->inp_lport,
1519 &desc->local.v4, sizeof(desc->local));
1520 nstat_ip_to_sockaddr(&inp->inp_faddr, inp->inp_fport,
1521 &desc->remote.v4, sizeof(desc->remote));
1522 }
1523
1524 desc->state = intotcpcb(inp)->t_state;
1525 desc->ifindex = (inp->inp_last_outifp == NULL) ? 0 :
1526 inp->inp_last_outifp->if_index;
1527
1528 // danger - not locked, values could be bogus
1529 desc->txunacked = tp->snd_max - tp->snd_una;
1530 desc->txwindow = tp->snd_wnd;
1531 desc->txcwindow = tp->snd_cwnd;
1532
1533 if (CC_ALGO(tp)->name != NULL) {
1534 strlcpy(desc->cc_algo, CC_ALGO(tp)->name,
1535 sizeof(desc->cc_algo));
1536 }
1537
1538 struct socket *so = inp->inp_socket;
1539 if (so) {
1540 // TBD - take the socket lock around these to make sure
1541 // they're in sync?
1542 desc->upid = so->last_upid;
1543 desc->pid = so->last_pid;
1544 desc->traffic_class = so->so_traffic_class;
1545 if ((so->so_flags1 & SOF1_TRAFFIC_MGT_SO_BACKGROUND)) {
1546 desc->traffic_mgt_flags |= TRAFFIC_MGT_SO_BACKGROUND;
1547 }
1548 if ((so->so_flags1 & SOF1_TRAFFIC_MGT_TCP_RECVBG)) {
1549 desc->traffic_mgt_flags |= TRAFFIC_MGT_TCP_RECVBG;
1550 }
1551 proc_name(desc->pid, desc->pname, sizeof(desc->pname));
1552 if (desc->pname[0] == 0) {
1553 strlcpy(desc->pname, tucookie->pname,
1554 sizeof(desc->pname));
1555 } else {
1556 desc->pname[sizeof(desc->pname) - 1] = 0;
1557 strlcpy(tucookie->pname, desc->pname,
1558 sizeof(tucookie->pname));
1559 }
1560 memcpy(desc->uuid, so->last_uuid, sizeof(so->last_uuid));
1561 memcpy(desc->vuuid, so->so_vuuid, sizeof(so->so_vuuid));
1562 if (so->so_flags & SOF_DELEGATED) {
1563 desc->eupid = so->e_upid;
1564 desc->epid = so->e_pid;
1565 memcpy(desc->euuid, so->e_uuid, sizeof(so->e_uuid));
1566 } else {
1567 desc->eupid = desc->upid;
1568 desc->epid = desc->pid;
1569 memcpy(desc->euuid, desc->uuid, sizeof(desc->uuid));
1570 }
1571 uuid_copy(desc->fuuid, inp->necp_client_uuid);
1572 desc->sndbufsize = so->so_snd.sb_hiwat;
1573 desc->sndbufused = so->so_snd.sb_cc;
1574 desc->rcvbufsize = so->so_rcv.sb_hiwat;
1575 desc->rcvbufused = so->so_rcv.sb_cc;
1576 }
1577
1578 tcp_get_connectivity_status(tp, &desc->connstatus);
1579 desc->ifnet_properties = nstat_inpcb_to_flags(inp);
1580 inp_get_activity_bitmap(inp, &desc->activity_bitmap);
1581 desc->start_timestamp = inp->inp_start_timestamp;
1582 desc->timestamp = mach_continuous_time();
1583 return 0;
1584 }
1585
1586 static bool
1587 nstat_tcpudp_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter, bool is_UDP)
1588 {
1589 bool retval = true;
1590
1591 if ((filter->npf_flags & (NSTAT_FILTER_IFNET_FLAGS | NSTAT_FILTER_SPECIFIC_USER)) != 0) {
1592 struct nstat_tucookie *tucookie = (struct nstat_tucookie *)cookie;
1593 struct inpcb *inp = tucookie->inp;
1594
1595 /* Only apply interface filter if at least one is allowed. */
1596 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) {
1597 uint16_t interface_properties = nstat_inpcb_to_flags(inp);
1598
1599 if ((filter->npf_flags & interface_properties) == 0) {
1600 // For UDP, we could have an undefined interface and yet transfers may have occurred.
1601 // We allow reporting if there have been transfers of the requested kind.
1602 // This is imperfect as we cannot account for the expensive attribute over wifi.
1603 // We also assume that cellular is expensive and we have no way to select for AWDL
1604 if (is_UDP) {
1605 do{
1606 if ((filter->npf_flags & (NSTAT_FILTER_ACCEPT_CELLULAR | NSTAT_FILTER_ACCEPT_EXPENSIVE)) &&
1607 (inp->inp_cstat->rxbytes || inp->inp_cstat->txbytes)) {
1608 break;
1609 }
1610 if ((filter->npf_flags & NSTAT_FILTER_ACCEPT_WIFI) &&
1611 (inp->inp_wstat->rxbytes || inp->inp_wstat->txbytes)) {
1612 break;
1613 }
1614 if ((filter->npf_flags & NSTAT_FILTER_ACCEPT_WIRED) &&
1615 (inp->inp_Wstat->rxbytes || inp->inp_Wstat->txbytes)) {
1616 break;
1617 }
1618 return false;
1619 } while (0);
1620 } else {
1621 return false;
1622 }
1623 }
1624 }
1625
1626 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) && (retval)) {
1627 struct socket *so = inp->inp_socket;
1628 retval = false;
1629
1630 if (so) {
1631 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_PID) != 0) &&
1632 (filter->npf_pid == so->last_pid)) {
1633 retval = true;
1634 } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EPID) != 0) &&
1635 (filter->npf_pid == (so->so_flags & SOF_DELEGATED)? so->e_upid : so->last_pid)) {
1636 retval = true;
1637 } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_UUID) != 0) &&
1638 (memcmp(filter->npf_uuid, so->last_uuid, sizeof(so->last_uuid)) == 0)) {
1639 retval = true;
1640 } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EUUID) != 0) &&
1641 (memcmp(filter->npf_uuid, (so->so_flags & SOF_DELEGATED)? so->e_uuid : so->last_uuid,
1642 sizeof(so->last_uuid)) == 0)) {
1643 retval = true;
1644 }
1645 }
1646 }
1647 }
1648 return retval;
1649 }
1650
1651 static bool
1652 nstat_tcp_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter)
1653 {
1654 return nstat_tcpudp_reporting_allowed(cookie, filter, FALSE);
1655 }
1656
1657 static void
1658 nstat_init_tcp_provider(void)
1659 {
1660 bzero(&nstat_tcp_provider, sizeof(nstat_tcp_provider));
1661 nstat_tcp_provider.nstat_descriptor_length = sizeof(nstat_tcp_descriptor);
1662 nstat_tcp_provider.nstat_provider_id = NSTAT_PROVIDER_TCP_KERNEL;
1663 nstat_tcp_provider.nstat_lookup = nstat_tcp_lookup;
1664 nstat_tcp_provider.nstat_gone = nstat_tcp_gone;
1665 nstat_tcp_provider.nstat_counts = nstat_tcp_counts;
1666 nstat_tcp_provider.nstat_release = nstat_tcp_release;
1667 nstat_tcp_provider.nstat_watcher_add = nstat_tcp_add_watcher;
1668 nstat_tcp_provider.nstat_watcher_remove = nstat_tcp_remove_watcher;
1669 nstat_tcp_provider.nstat_copy_descriptor = nstat_tcp_copy_descriptor;
1670 nstat_tcp_provider.nstat_reporting_allowed = nstat_tcp_reporting_allowed;
1671 nstat_tcp_provider.next = nstat_providers;
1672 nstat_providers = &nstat_tcp_provider;
1673 }
1674
1675 #pragma mark -- UDP Provider --
1676
1677 static nstat_provider nstat_udp_provider;
1678
1679 static errno_t
1680 nstat_udp_lookup(
1681 const void *data,
1682 u_int32_t length,
1683 nstat_provider_cookie_t *out_cookie)
1684 {
1685 return nstat_tcpudp_lookup(&udbinfo, data, length, out_cookie);
1686 }
1687
1688 static int
1689 nstat_udp_gone(
1690 nstat_provider_cookie_t cookie)
1691 {
1692 struct nstat_tucookie *tucookie =
1693 (struct nstat_tucookie *)cookie;
1694 struct inpcb *inp;
1695
1696 return (!(inp = tucookie->inp) ||
1697 inp->inp_state == INPCB_STATE_DEAD) ? 1 : 0;
1698 }
1699
1700 static errno_t
1701 nstat_udp_counts(
1702 nstat_provider_cookie_t cookie,
1703 struct nstat_counts *out_counts,
1704 int *out_gone)
1705 {
1706 struct nstat_tucookie *tucookie =
1707 (struct nstat_tucookie *)cookie;
1708
1709 if (out_gone) {
1710 *out_gone = 0;
1711 }
1712
1713 // if the pcb is in the dead state, we should stop using it
1714 if (nstat_udp_gone(cookie)) {
1715 if (out_gone) {
1716 *out_gone = 1;
1717 }
1718 if (!tucookie->inp) {
1719 return EINVAL;
1720 }
1721 }
1722 struct inpcb *inp = tucookie->inp;
1723
1724 atomic_get_64(out_counts->nstat_rxpackets, &inp->inp_stat->rxpackets);
1725 atomic_get_64(out_counts->nstat_rxbytes, &inp->inp_stat->rxbytes);
1726 atomic_get_64(out_counts->nstat_txpackets, &inp->inp_stat->txpackets);
1727 atomic_get_64(out_counts->nstat_txbytes, &inp->inp_stat->txbytes);
1728 atomic_get_64(out_counts->nstat_cell_rxbytes, &inp->inp_cstat->rxbytes);
1729 atomic_get_64(out_counts->nstat_cell_txbytes, &inp->inp_cstat->txbytes);
1730 atomic_get_64(out_counts->nstat_wifi_rxbytes, &inp->inp_wstat->rxbytes);
1731 atomic_get_64(out_counts->nstat_wifi_txbytes, &inp->inp_wstat->txbytes);
1732 atomic_get_64(out_counts->nstat_wired_rxbytes, &inp->inp_Wstat->rxbytes);
1733 atomic_get_64(out_counts->nstat_wired_txbytes, &inp->inp_Wstat->txbytes);
1734
1735 return 0;
1736 }
1737
1738 static void
1739 nstat_udp_release(
1740 nstat_provider_cookie_t cookie,
1741 int locked)
1742 {
1743 struct nstat_tucookie *tucookie =
1744 (struct nstat_tucookie *)cookie;
1745
1746 nstat_tucookie_release_internal(tucookie, locked);
1747 }
1748
1749 static errno_t
1750 nstat_udp_add_watcher(
1751 nstat_control_state *state,
1752 nstat_msg_add_all_srcs *req)
1753 {
1754 // There is a tricky issue around getting all UDP sockets added once
1755 // and only once. nstat_udp_new_pcb() is called prior to the new item
1756 // being placed on any lists where it might be found.
1757 // By locking the udpinfo.ipi_lock prior to marking the state as a watcher,
1758 // it should be impossible for a new socket to be added twice.
1759 // On the other hand, there is still a timing issue where a new socket
1760 // results in a call to nstat_udp_new_pcb() before this watcher
1761 // is instantiated and yet the socket doesn't make it into ipi_listhead
1762 // prior to the scan. <rdar://problem/30361716>
1763
1764 errno_t result;
1765
1766 lck_rw_lock_shared(udbinfo.ipi_lock);
1767 result = nstat_set_provider_filter(state, req);
1768
1769 if (result == 0) {
1770 struct inpcb *inp;
1771 struct nstat_tucookie *cookie;
1772
1773 OSIncrementAtomic(&nstat_udp_watchers);
1774
1775 // Add all current UDP inpcbs.
1776 LIST_FOREACH(inp, udbinfo.ipi_listhead, inp_list)
1777 {
1778 cookie = nstat_tucookie_alloc_ref(inp);
1779 if (cookie == NULL) {
1780 continue;
1781 }
1782 if (nstat_control_source_add(0, state, &nstat_udp_provider,
1783 cookie) != 0) {
1784 nstat_tucookie_release(cookie);
1785 break;
1786 }
1787 }
1788 }
1789
1790 lck_rw_done(udbinfo.ipi_lock);
1791
1792 return result;
1793 }
1794
1795 static void
1796 nstat_udp_remove_watcher(
1797 __unused nstat_control_state *state)
1798 {
1799 OSDecrementAtomic(&nstat_udp_watchers);
1800 }
1801
1802 __private_extern__ void
1803 nstat_udp_new_pcb(
1804 struct inpcb *inp)
1805 {
1806 struct nstat_tucookie *cookie;
1807
1808 inp->inp_start_timestamp = mach_continuous_time();
1809
1810 if (nstat_udp_watchers == 0) {
1811 return;
1812 }
1813
1814 socket_lock(inp->inp_socket, 0);
1815 lck_mtx_lock(&nstat_mtx);
1816 nstat_control_state *state;
1817 for (state = nstat_controls; state; state = state->ncs_next) {
1818 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_UDP_KERNEL)) != 0) {
1819 // this client is watching tcp
1820 // acquire a reference for it
1821 cookie = nstat_tucookie_alloc_ref_locked(inp);
1822 if (cookie == NULL) {
1823 continue;
1824 }
1825 // add the source, if that fails, release the reference
1826 if (nstat_control_source_add(0, state,
1827 &nstat_udp_provider, cookie) != 0) {
1828 nstat_tucookie_release_locked(cookie);
1829 break;
1830 }
1831 }
1832 }
1833 lck_mtx_unlock(&nstat_mtx);
1834 socket_unlock(inp->inp_socket, 0);
1835 }
1836
1837 static errno_t
1838 nstat_udp_copy_descriptor(
1839 nstat_provider_cookie_t cookie,
1840 void *data,
1841 size_t len)
1842 {
1843 if (len < sizeof(nstat_udp_descriptor)) {
1844 return EINVAL;
1845 }
1846
1847 if (nstat_udp_gone(cookie)) {
1848 return EINVAL;
1849 }
1850
1851 struct nstat_tucookie *tucookie =
1852 (struct nstat_tucookie *)cookie;
1853 nstat_udp_descriptor *desc = (nstat_udp_descriptor*)data;
1854 struct inpcb *inp = tucookie->inp;
1855
1856 bzero(desc, sizeof(*desc));
1857
1858 if (tucookie->cached == false) {
1859 if (inp->inp_vflag & INP_IPV6) {
1860 in6_ip6_to_sockaddr(&inp->in6p_laddr, inp->inp_lport,
1861 &desc->local.v6, sizeof(desc->local.v6));
1862 in6_ip6_to_sockaddr(&inp->in6p_faddr, inp->inp_fport,
1863 &desc->remote.v6, sizeof(desc->remote.v6));
1864 } else if (inp->inp_vflag & INP_IPV4) {
1865 nstat_ip_to_sockaddr(&inp->inp_laddr, inp->inp_lport,
1866 &desc->local.v4, sizeof(desc->local.v4));
1867 nstat_ip_to_sockaddr(&inp->inp_faddr, inp->inp_fport,
1868 &desc->remote.v4, sizeof(desc->remote.v4));
1869 }
1870 desc->ifnet_properties = nstat_inpcb_to_flags(inp);
1871 } else {
1872 if (inp->inp_vflag & INP_IPV6) {
1873 memcpy(&desc->local.v6, &tucookie->local.v6,
1874 sizeof(desc->local.v6));
1875 memcpy(&desc->remote.v6, &tucookie->remote.v6,
1876 sizeof(desc->remote.v6));
1877 } else if (inp->inp_vflag & INP_IPV4) {
1878 memcpy(&desc->local.v4, &tucookie->local.v4,
1879 sizeof(desc->local.v4));
1880 memcpy(&desc->remote.v4, &tucookie->remote.v4,
1881 sizeof(desc->remote.v4));
1882 }
1883 desc->ifnet_properties = tucookie->ifnet_properties;
1884 }
1885
1886 if (inp->inp_last_outifp) {
1887 desc->ifindex = inp->inp_last_outifp->if_index;
1888 } else {
1889 desc->ifindex = tucookie->if_index;
1890 }
1891
1892 struct socket *so = inp->inp_socket;
1893 if (so) {
1894 // TBD - take the socket lock around these to make sure
1895 // they're in sync?
1896 desc->upid = so->last_upid;
1897 desc->pid = so->last_pid;
1898 proc_name(desc->pid, desc->pname, sizeof(desc->pname));
1899 if (desc->pname[0] == 0) {
1900 strlcpy(desc->pname, tucookie->pname,
1901 sizeof(desc->pname));
1902 } else {
1903 desc->pname[sizeof(desc->pname) - 1] = 0;
1904 strlcpy(tucookie->pname, desc->pname,
1905 sizeof(tucookie->pname));
1906 }
1907 memcpy(desc->uuid, so->last_uuid, sizeof(so->last_uuid));
1908 memcpy(desc->vuuid, so->so_vuuid, sizeof(so->so_vuuid));
1909 if (so->so_flags & SOF_DELEGATED) {
1910 desc->eupid = so->e_upid;
1911 desc->epid = so->e_pid;
1912 memcpy(desc->euuid, so->e_uuid, sizeof(so->e_uuid));
1913 } else {
1914 desc->eupid = desc->upid;
1915 desc->epid = desc->pid;
1916 memcpy(desc->euuid, desc->uuid, sizeof(desc->uuid));
1917 }
1918 uuid_copy(desc->fuuid, inp->necp_client_uuid);
1919 desc->rcvbufsize = so->so_rcv.sb_hiwat;
1920 desc->rcvbufused = so->so_rcv.sb_cc;
1921 desc->traffic_class = so->so_traffic_class;
1922 inp_get_activity_bitmap(inp, &desc->activity_bitmap);
1923 desc->start_timestamp = inp->inp_start_timestamp;
1924 desc->timestamp = mach_continuous_time();
1925 }
1926
1927 return 0;
1928 }
1929
1930 static bool
1931 nstat_udp_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter)
1932 {
1933 return nstat_tcpudp_reporting_allowed(cookie, filter, TRUE);
1934 }
1935
1936
1937 static void
1938 nstat_init_udp_provider(void)
1939 {
1940 bzero(&nstat_udp_provider, sizeof(nstat_udp_provider));
1941 nstat_udp_provider.nstat_provider_id = NSTAT_PROVIDER_UDP_KERNEL;
1942 nstat_udp_provider.nstat_descriptor_length = sizeof(nstat_udp_descriptor);
1943 nstat_udp_provider.nstat_lookup = nstat_udp_lookup;
1944 nstat_udp_provider.nstat_gone = nstat_udp_gone;
1945 nstat_udp_provider.nstat_counts = nstat_udp_counts;
1946 nstat_udp_provider.nstat_watcher_add = nstat_udp_add_watcher;
1947 nstat_udp_provider.nstat_watcher_remove = nstat_udp_remove_watcher;
1948 nstat_udp_provider.nstat_copy_descriptor = nstat_udp_copy_descriptor;
1949 nstat_udp_provider.nstat_release = nstat_udp_release;
1950 nstat_udp_provider.nstat_reporting_allowed = nstat_udp_reporting_allowed;
1951 nstat_udp_provider.next = nstat_providers;
1952 nstat_providers = &nstat_udp_provider;
1953 }
1954
1955
1956
1957 #pragma mark -- ifnet Provider --
1958
1959 static nstat_provider nstat_ifnet_provider;
1960
1961 /*
1962 * We store a pointer to the ifnet and the original threshold
1963 * requested by the client.
1964 */
1965 struct nstat_ifnet_cookie {
1966 struct ifnet *ifp;
1967 uint64_t threshold;
1968 };
1969
1970 static errno_t
1971 nstat_ifnet_lookup(
1972 const void *data,
1973 u_int32_t length,
1974 nstat_provider_cookie_t *out_cookie)
1975 {
1976 const nstat_ifnet_add_param *param = (const nstat_ifnet_add_param *)data;
1977 struct ifnet *ifp;
1978 boolean_t changed = FALSE;
1979 nstat_control_state *state;
1980 nstat_src *src;
1981 struct nstat_ifnet_cookie *cookie;
1982
1983 if (length < sizeof(*param) || param->threshold < 1024 * 1024) {
1984 return EINVAL;
1985 }
1986 if (nstat_privcheck != 0) {
1987 errno_t result = priv_check_cred(kauth_cred_get(),
1988 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
1989 if (result != 0) {
1990 return result;
1991 }
1992 }
1993 cookie = kheap_alloc(KHEAP_NET_STAT, sizeof(*cookie), Z_WAITOK | Z_ZERO);
1994 if (cookie == NULL) {
1995 return ENOMEM;
1996 }
1997
1998 ifnet_head_lock_shared();
1999 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
2000 {
2001 ifnet_lock_exclusive(ifp);
2002 if (ifp->if_index == param->ifindex) {
2003 cookie->ifp = ifp;
2004 cookie->threshold = param->threshold;
2005 *out_cookie = cookie;
2006 if (!ifp->if_data_threshold ||
2007 ifp->if_data_threshold > param->threshold) {
2008 changed = TRUE;
2009 ifp->if_data_threshold = param->threshold;
2010 }
2011 ifnet_lock_done(ifp);
2012 ifnet_reference(ifp);
2013 break;
2014 }
2015 ifnet_lock_done(ifp);
2016 }
2017 ifnet_head_done();
2018
2019 /*
2020 * When we change the threshold to something smaller, we notify
2021 * all of our clients with a description message.
2022 * We won't send a message to the client we are currently serving
2023 * because it has no `ifnet source' yet.
2024 */
2025 if (changed) {
2026 lck_mtx_lock(&nstat_mtx);
2027 for (state = nstat_controls; state; state = state->ncs_next) {
2028 lck_mtx_lock(&state->ncs_mtx);
2029 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
2030 {
2031 if (src->provider != &nstat_ifnet_provider) {
2032 continue;
2033 }
2034 nstat_control_send_description(state, src, 0, 0);
2035 }
2036 lck_mtx_unlock(&state->ncs_mtx);
2037 }
2038 lck_mtx_unlock(&nstat_mtx);
2039 }
2040 if (cookie->ifp == NULL) {
2041 kheap_free(KHEAP_NET_STAT, cookie, sizeof(*cookie));
2042 }
2043
2044 return ifp ? 0 : EINVAL;
2045 }
2046
2047 static int
2048 nstat_ifnet_gone(
2049 nstat_provider_cookie_t cookie)
2050 {
2051 struct ifnet *ifp;
2052 struct nstat_ifnet_cookie *ifcookie =
2053 (struct nstat_ifnet_cookie *)cookie;
2054
2055 ifnet_head_lock_shared();
2056 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
2057 {
2058 if (ifp == ifcookie->ifp) {
2059 break;
2060 }
2061 }
2062 ifnet_head_done();
2063
2064 return ifp ? 0 : 1;
2065 }
2066
2067 static errno_t
2068 nstat_ifnet_counts(
2069 nstat_provider_cookie_t cookie,
2070 struct nstat_counts *out_counts,
2071 int *out_gone)
2072 {
2073 struct nstat_ifnet_cookie *ifcookie =
2074 (struct nstat_ifnet_cookie *)cookie;
2075 struct ifnet *ifp = ifcookie->ifp;
2076
2077 if (out_gone) {
2078 *out_gone = 0;
2079 }
2080
2081 // if the ifnet is gone, we should stop using it
2082 if (nstat_ifnet_gone(cookie)) {
2083 if (out_gone) {
2084 *out_gone = 1;
2085 }
2086 return EINVAL;
2087 }
2088
2089 bzero(out_counts, sizeof(*out_counts));
2090 out_counts->nstat_rxpackets = ifp->if_ipackets;
2091 out_counts->nstat_rxbytes = ifp->if_ibytes;
2092 out_counts->nstat_txpackets = ifp->if_opackets;
2093 out_counts->nstat_txbytes = ifp->if_obytes;
2094 out_counts->nstat_cell_rxbytes = out_counts->nstat_cell_txbytes = 0;
2095 return 0;
2096 }
2097
2098 static void
2099 nstat_ifnet_release(
2100 nstat_provider_cookie_t cookie,
2101 __unused int locked)
2102 {
2103 struct nstat_ifnet_cookie *ifcookie;
2104 struct ifnet *ifp;
2105 nstat_control_state *state;
2106 nstat_src *src;
2107 uint64_t minthreshold = UINT64_MAX;
2108
2109 /*
2110 * Find all the clients that requested a threshold
2111 * for this ifnet and re-calculate if_data_threshold.
2112 */
2113 lck_mtx_lock(&nstat_mtx);
2114 for (state = nstat_controls; state; state = state->ncs_next) {
2115 lck_mtx_lock(&state->ncs_mtx);
2116 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
2117 {
2118 /* Skip the provider we are about to detach. */
2119 if (src->provider != &nstat_ifnet_provider ||
2120 src->cookie == cookie) {
2121 continue;
2122 }
2123 ifcookie = (struct nstat_ifnet_cookie *)src->cookie;
2124 if (ifcookie->threshold < minthreshold) {
2125 minthreshold = ifcookie->threshold;
2126 }
2127 }
2128 lck_mtx_unlock(&state->ncs_mtx);
2129 }
2130 lck_mtx_unlock(&nstat_mtx);
2131 /*
2132 * Reset if_data_threshold or disable it.
2133 */
2134 ifcookie = (struct nstat_ifnet_cookie *)cookie;
2135 ifp = ifcookie->ifp;
2136 if (ifnet_is_attached(ifp, 1)) {
2137 ifnet_lock_exclusive(ifp);
2138 if (minthreshold == UINT64_MAX) {
2139 ifp->if_data_threshold = 0;
2140 } else {
2141 ifp->if_data_threshold = minthreshold;
2142 }
2143 ifnet_lock_done(ifp);
2144 ifnet_decr_iorefcnt(ifp);
2145 }
2146 ifnet_release(ifp);
2147 kheap_free(KHEAP_NET_STAT, ifcookie, sizeof(*ifcookie));
2148 }
2149
2150 static void
2151 nstat_ifnet_copy_link_status(
2152 struct ifnet *ifp,
2153 struct nstat_ifnet_descriptor *desc)
2154 {
2155 struct if_link_status *ifsr = ifp->if_link_status;
2156 nstat_ifnet_desc_link_status *link_status = &desc->link_status;
2157
2158 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_NONE;
2159 if (ifsr == NULL) {
2160 return;
2161 }
2162
2163 lck_rw_lock_shared(&ifp->if_link_status_lock);
2164
2165 if (ifp->if_type == IFT_CELLULAR) {
2166 nstat_ifnet_desc_cellular_status *cell_status = &link_status->u.cellular;
2167 struct if_cellular_status_v1 *if_cell_sr =
2168 &ifsr->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
2169
2170 if (ifsr->ifsr_version != IF_CELLULAR_STATUS_REPORT_VERSION_1) {
2171 goto done;
2172 }
2173
2174 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_CELLULAR;
2175
2176 if (if_cell_sr->valid_bitmask & IF_CELL_LINK_QUALITY_METRIC_VALID) {
2177 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_LINK_QUALITY_METRIC_VALID;
2178 cell_status->link_quality_metric = if_cell_sr->link_quality_metric;
2179 }
2180 if (if_cell_sr->valid_bitmask & IF_CELL_UL_EFFECTIVE_BANDWIDTH_VALID) {
2181 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_BANDWIDTH_VALID;
2182 cell_status->ul_effective_bandwidth = if_cell_sr->ul_effective_bandwidth;
2183 }
2184 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_BANDWIDTH_VALID) {
2185 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_BANDWIDTH_VALID;
2186 cell_status->ul_max_bandwidth = if_cell_sr->ul_max_bandwidth;
2187 }
2188 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MIN_LATENCY_VALID) {
2189 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MIN_LATENCY_VALID;
2190 cell_status->ul_min_latency = if_cell_sr->ul_min_latency;
2191 }
2192 if (if_cell_sr->valid_bitmask & IF_CELL_UL_EFFECTIVE_LATENCY_VALID) {
2193 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_LATENCY_VALID;
2194 cell_status->ul_effective_latency = if_cell_sr->ul_effective_latency;
2195 }
2196 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_LATENCY_VALID) {
2197 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_LATENCY_VALID;
2198 cell_status->ul_max_latency = if_cell_sr->ul_max_latency;
2199 }
2200 if (if_cell_sr->valid_bitmask & IF_CELL_UL_RETXT_LEVEL_VALID) {
2201 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID;
2202 if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_NONE) {
2203 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_NONE;
2204 } else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_LOW) {
2205 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_LOW;
2206 } else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_MEDIUM) {
2207 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_MEDIUM;
2208 } else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_HIGH) {
2209 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_HIGH;
2210 } else {
2211 cell_status->valid_bitmask &= ~NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID;
2212 }
2213 }
2214 if (if_cell_sr->valid_bitmask & IF_CELL_UL_BYTES_LOST_VALID) {
2215 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_BYTES_LOST_VALID;
2216 cell_status->ul_bytes_lost = if_cell_sr->ul_bytes_lost;
2217 }
2218 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MIN_QUEUE_SIZE_VALID) {
2219 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MIN_QUEUE_SIZE_VALID;
2220 cell_status->ul_min_queue_size = if_cell_sr->ul_min_queue_size;
2221 }
2222 if (if_cell_sr->valid_bitmask & IF_CELL_UL_AVG_QUEUE_SIZE_VALID) {
2223 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_AVG_QUEUE_SIZE_VALID;
2224 cell_status->ul_avg_queue_size = if_cell_sr->ul_avg_queue_size;
2225 }
2226 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_QUEUE_SIZE_VALID) {
2227 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_QUEUE_SIZE_VALID;
2228 cell_status->ul_max_queue_size = if_cell_sr->ul_max_queue_size;
2229 }
2230 if (if_cell_sr->valid_bitmask & IF_CELL_DL_EFFECTIVE_BANDWIDTH_VALID) {
2231 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_DL_EFFECTIVE_BANDWIDTH_VALID;
2232 cell_status->dl_effective_bandwidth = if_cell_sr->dl_effective_bandwidth;
2233 }
2234 if (if_cell_sr->valid_bitmask & IF_CELL_DL_MAX_BANDWIDTH_VALID) {
2235 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_DL_MAX_BANDWIDTH_VALID;
2236 cell_status->dl_max_bandwidth = if_cell_sr->dl_max_bandwidth;
2237 }
2238 if (if_cell_sr->valid_bitmask & IF_CELL_CONFIG_INACTIVITY_TIME_VALID) {
2239 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_CONFIG_INACTIVITY_TIME_VALID;
2240 cell_status->config_inactivity_time = if_cell_sr->config_inactivity_time;
2241 }
2242 if (if_cell_sr->valid_bitmask & IF_CELL_CONFIG_BACKOFF_TIME_VALID) {
2243 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_CONFIG_BACKOFF_TIME_VALID;
2244 cell_status->config_backoff_time = if_cell_sr->config_backoff_time;
2245 }
2246 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MSS_RECOMMENDED_VALID) {
2247 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_MSS_RECOMMENDED_VALID;
2248 cell_status->mss_recommended = if_cell_sr->mss_recommended;
2249 }
2250 } else if (IFNET_IS_WIFI(ifp)) {
2251 nstat_ifnet_desc_wifi_status *wifi_status = &link_status->u.wifi;
2252 struct if_wifi_status_v1 *if_wifi_sr =
2253 &ifsr->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
2254
2255 if (ifsr->ifsr_version != IF_WIFI_STATUS_REPORT_VERSION_1) {
2256 goto done;
2257 }
2258
2259 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_WIFI;
2260
2261 if (if_wifi_sr->valid_bitmask & IF_WIFI_LINK_QUALITY_METRIC_VALID) {
2262 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_LINK_QUALITY_METRIC_VALID;
2263 wifi_status->link_quality_metric = if_wifi_sr->link_quality_metric;
2264 }
2265 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID) {
2266 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID;
2267 wifi_status->ul_effective_bandwidth = if_wifi_sr->ul_effective_bandwidth;
2268 }
2269 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MAX_BANDWIDTH_VALID) {
2270 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MAX_BANDWIDTH_VALID;
2271 wifi_status->ul_max_bandwidth = if_wifi_sr->ul_max_bandwidth;
2272 }
2273 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MIN_LATENCY_VALID) {
2274 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MIN_LATENCY_VALID;
2275 wifi_status->ul_min_latency = if_wifi_sr->ul_min_latency;
2276 }
2277 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_EFFECTIVE_LATENCY_VALID) {
2278 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_LATENCY_VALID;
2279 wifi_status->ul_effective_latency = if_wifi_sr->ul_effective_latency;
2280 }
2281 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MAX_LATENCY_VALID) {
2282 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MAX_LATENCY_VALID;
2283 wifi_status->ul_max_latency = if_wifi_sr->ul_max_latency;
2284 }
2285 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_RETXT_LEVEL_VALID) {
2286 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID;
2287 if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_NONE) {
2288 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_NONE;
2289 } else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_LOW) {
2290 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_LOW;
2291 } else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_MEDIUM) {
2292 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_MEDIUM;
2293 } else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_HIGH) {
2294 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_HIGH;
2295 } else {
2296 wifi_status->valid_bitmask &= ~NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID;
2297 }
2298 }
2299 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_BYTES_LOST_VALID) {
2300 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_BYTES_LOST_VALID;
2301 wifi_status->ul_bytes_lost = if_wifi_sr->ul_bytes_lost;
2302 }
2303 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_ERROR_RATE_VALID) {
2304 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_ERROR_RATE_VALID;
2305 wifi_status->ul_error_rate = if_wifi_sr->ul_error_rate;
2306 }
2307 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID) {
2308 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID;
2309 wifi_status->dl_effective_bandwidth = if_wifi_sr->dl_effective_bandwidth;
2310 }
2311 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MAX_BANDWIDTH_VALID) {
2312 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MAX_BANDWIDTH_VALID;
2313 wifi_status->dl_max_bandwidth = if_wifi_sr->dl_max_bandwidth;
2314 }
2315 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MIN_LATENCY_VALID) {
2316 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MIN_LATENCY_VALID;
2317 wifi_status->dl_min_latency = if_wifi_sr->dl_min_latency;
2318 }
2319 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_EFFECTIVE_LATENCY_VALID) {
2320 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_LATENCY_VALID;
2321 wifi_status->dl_effective_latency = if_wifi_sr->dl_effective_latency;
2322 }
2323 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MAX_LATENCY_VALID) {
2324 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MAX_LATENCY_VALID;
2325 wifi_status->dl_max_latency = if_wifi_sr->dl_max_latency;
2326 }
2327 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_ERROR_RATE_VALID) {
2328 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_ERROR_RATE_VALID;
2329 wifi_status->dl_error_rate = if_wifi_sr->dl_error_rate;
2330 }
2331 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_FREQUENCY_VALID) {
2332 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID;
2333 if (if_wifi_sr->config_frequency == IF_WIFI_CONFIG_FREQUENCY_2_4_GHZ) {
2334 wifi_status->config_frequency = NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_2_4_GHZ;
2335 } else if (if_wifi_sr->config_frequency == IF_WIFI_CONFIG_FREQUENCY_5_0_GHZ) {
2336 wifi_status->config_frequency = NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_5_0_GHZ;
2337 } else {
2338 wifi_status->valid_bitmask &= ~NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID;
2339 }
2340 }
2341 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_MULTICAST_RATE_VALID) {
2342 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_MULTICAST_RATE_VALID;
2343 wifi_status->config_multicast_rate = if_wifi_sr->config_multicast_rate;
2344 }
2345 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_SCAN_COUNT_VALID) {
2346 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_COUNT_VALID;
2347 wifi_status->scan_count = if_wifi_sr->scan_count;
2348 }
2349 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_SCAN_DURATION_VALID) {
2350 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_DURATION_VALID;
2351 wifi_status->scan_duration = if_wifi_sr->scan_duration;
2352 }
2353 }
2354
2355 done:
2356 lck_rw_done(&ifp->if_link_status_lock);
2357 }
2358
2359 static u_int64_t nstat_ifnet_last_report_time = 0;
2360 extern int tcp_report_stats_interval;
2361
2362 static void
2363 nstat_ifnet_compute_percentages(struct if_tcp_ecn_perf_stat *ifst)
2364 {
2365 /* Retransmit percentage */
2366 if (ifst->total_rxmitpkts > 0 && ifst->total_txpkts > 0) {
2367 /* shift by 10 for precision */
2368 ifst->rxmit_percent =
2369 ((ifst->total_rxmitpkts << 10) * 100) / ifst->total_txpkts;
2370 } else {
2371 ifst->rxmit_percent = 0;
2372 }
2373
2374 /* Out-of-order percentage */
2375 if (ifst->total_oopkts > 0 && ifst->total_rxpkts > 0) {
2376 /* shift by 10 for precision */
2377 ifst->oo_percent =
2378 ((ifst->total_oopkts << 10) * 100) / ifst->total_rxpkts;
2379 } else {
2380 ifst->oo_percent = 0;
2381 }
2382
2383 /* Reorder percentage */
2384 if (ifst->total_reorderpkts > 0 &&
2385 (ifst->total_txpkts + ifst->total_rxpkts) > 0) {
2386 /* shift by 10 for precision */
2387 ifst->reorder_percent =
2388 ((ifst->total_reorderpkts << 10) * 100) /
2389 (ifst->total_txpkts + ifst->total_rxpkts);
2390 } else {
2391 ifst->reorder_percent = 0;
2392 }
2393 }
2394
2395 static void
2396 nstat_ifnet_normalize_counter(struct if_tcp_ecn_stat *if_st)
2397 {
2398 u_int64_t ecn_on_conn, ecn_off_conn;
2399
2400 if (if_st == NULL) {
2401 return;
2402 }
2403 ecn_on_conn = if_st->ecn_client_success +
2404 if_st->ecn_server_success;
2405 ecn_off_conn = if_st->ecn_off_conn +
2406 (if_st->ecn_client_setup - if_st->ecn_client_success) +
2407 (if_st->ecn_server_setup - if_st->ecn_server_success);
2408
2409 /*
2410 * report sack episodes, rst_drop and rxmit_drop
2411 * as a ratio per connection, shift by 10 for precision
2412 */
2413 if (ecn_on_conn > 0) {
2414 if_st->ecn_on.sack_episodes =
2415 (if_st->ecn_on.sack_episodes << 10) / ecn_on_conn;
2416 if_st->ecn_on.rst_drop =
2417 (if_st->ecn_on.rst_drop << 10) * 100 / ecn_on_conn;
2418 if_st->ecn_on.rxmit_drop =
2419 (if_st->ecn_on.rxmit_drop << 10) * 100 / ecn_on_conn;
2420 } else {
2421 /* set to zero, just in case */
2422 if_st->ecn_on.sack_episodes = 0;
2423 if_st->ecn_on.rst_drop = 0;
2424 if_st->ecn_on.rxmit_drop = 0;
2425 }
2426
2427 if (ecn_off_conn > 0) {
2428 if_st->ecn_off.sack_episodes =
2429 (if_st->ecn_off.sack_episodes << 10) / ecn_off_conn;
2430 if_st->ecn_off.rst_drop =
2431 (if_st->ecn_off.rst_drop << 10) * 100 / ecn_off_conn;
2432 if_st->ecn_off.rxmit_drop =
2433 (if_st->ecn_off.rxmit_drop << 10) * 100 / ecn_off_conn;
2434 } else {
2435 if_st->ecn_off.sack_episodes = 0;
2436 if_st->ecn_off.rst_drop = 0;
2437 if_st->ecn_off.rxmit_drop = 0;
2438 }
2439 if_st->ecn_total_conn = ecn_off_conn + ecn_on_conn;
2440 }
2441
2442 static void
2443 nstat_ifnet_report_ecn_stats(void)
2444 {
2445 u_int64_t uptime, last_report_time;
2446 struct nstat_sysinfo_data data;
2447 struct nstat_sysinfo_ifnet_ecn_stats *st;
2448 struct ifnet *ifp;
2449
2450 uptime = net_uptime();
2451
2452 if ((int)(uptime - nstat_ifnet_last_report_time) <
2453 tcp_report_stats_interval) {
2454 return;
2455 }
2456
2457 last_report_time = nstat_ifnet_last_report_time;
2458 nstat_ifnet_last_report_time = uptime;
2459 data.flags = NSTAT_SYSINFO_IFNET_ECN_STATS;
2460 st = &data.u.ifnet_ecn_stats;
2461
2462 ifnet_head_lock_shared();
2463 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2464 if (ifp->if_ipv4_stat == NULL || ifp->if_ipv6_stat == NULL) {
2465 continue;
2466 }
2467
2468 if (!IF_FULLY_ATTACHED(ifp)) {
2469 continue;
2470 }
2471
2472 /* Limit reporting to Wifi, Ethernet and cellular. */
2473 if (!(IFNET_IS_ETHERNET(ifp) || IFNET_IS_CELLULAR(ifp))) {
2474 continue;
2475 }
2476
2477 bzero(st, sizeof(*st));
2478 if (IFNET_IS_CELLULAR(ifp)) {
2479 st->ifnet_type = NSTAT_IFNET_ECN_TYPE_CELLULAR;
2480 } else if (IFNET_IS_WIFI(ifp)) {
2481 st->ifnet_type = NSTAT_IFNET_ECN_TYPE_WIFI;
2482 } else {
2483 st->ifnet_type = NSTAT_IFNET_ECN_TYPE_ETHERNET;
2484 }
2485 data.unsent_data_cnt = ifp->if_unsent_data_cnt;
2486 /* skip if there was no update since last report */
2487 if (ifp->if_ipv4_stat->timestamp <= 0 ||
2488 ifp->if_ipv4_stat->timestamp < last_report_time) {
2489 goto v6;
2490 }
2491 st->ifnet_proto = NSTAT_IFNET_ECN_PROTO_IPV4;
2492 /* compute percentages using packet counts */
2493 nstat_ifnet_compute_percentages(&ifp->if_ipv4_stat->ecn_on);
2494 nstat_ifnet_compute_percentages(&ifp->if_ipv4_stat->ecn_off);
2495 nstat_ifnet_normalize_counter(ifp->if_ipv4_stat);
2496 bcopy(ifp->if_ipv4_stat, &st->ecn_stat,
2497 sizeof(st->ecn_stat));
2498 nstat_sysinfo_send_data(&data);
2499 bzero(ifp->if_ipv4_stat, sizeof(*ifp->if_ipv4_stat));
2500
2501 v6:
2502 /* skip if there was no update since last report */
2503 if (ifp->if_ipv6_stat->timestamp <= 0 ||
2504 ifp->if_ipv6_stat->timestamp < last_report_time) {
2505 continue;
2506 }
2507 st->ifnet_proto = NSTAT_IFNET_ECN_PROTO_IPV6;
2508
2509 /* compute percentages using packet counts */
2510 nstat_ifnet_compute_percentages(&ifp->if_ipv6_stat->ecn_on);
2511 nstat_ifnet_compute_percentages(&ifp->if_ipv6_stat->ecn_off);
2512 nstat_ifnet_normalize_counter(ifp->if_ipv6_stat);
2513 bcopy(ifp->if_ipv6_stat, &st->ecn_stat,
2514 sizeof(st->ecn_stat));
2515 nstat_sysinfo_send_data(&data);
2516
2517 /* Zero the stats in ifp */
2518 bzero(ifp->if_ipv6_stat, sizeof(*ifp->if_ipv6_stat));
2519 }
2520 ifnet_head_done();
2521 }
2522
2523 /* Some thresholds to determine Low Iternet mode */
2524 #define NSTAT_LIM_DL_MAX_BANDWIDTH_THRESHOLD 1000000 /* 1 Mbps */
2525 #define NSTAT_LIM_UL_MAX_BANDWIDTH_THRESHOLD 500000 /* 500 Kbps */
2526 #define NSTAT_LIM_UL_MIN_RTT_THRESHOLD 1000 /* 1 second */
2527 #define NSTAT_LIM_CONN_TIMEOUT_PERCENT_THRESHOLD (10 << 10) /* 10 percent connection timeouts */
2528 #define NSTAT_LIM_PACKET_LOSS_PERCENT_THRESHOLD (2 << 10) /* 2 percent packet loss rate */
2529
2530 static boolean_t
2531 nstat_lim_activity_check(struct if_lim_perf_stat *st)
2532 {
2533 /* check that the current activity is enough to report stats */
2534 if (st->lim_total_txpkts < nstat_lim_min_tx_pkts ||
2535 st->lim_total_rxpkts < nstat_lim_min_rx_pkts ||
2536 st->lim_conn_attempts == 0) {
2537 return FALSE;
2538 }
2539
2540 /*
2541 * Compute percentages if there was enough activity. Use
2542 * shift-left by 10 to preserve precision.
2543 */
2544 st->lim_packet_loss_percent = ((st->lim_total_retxpkts << 10) /
2545 st->lim_total_txpkts) * 100;
2546
2547 st->lim_packet_ooo_percent = ((st->lim_total_oopkts << 10) /
2548 st->lim_total_rxpkts) * 100;
2549
2550 st->lim_conn_timeout_percent = ((st->lim_conn_timeouts << 10) /
2551 st->lim_conn_attempts) * 100;
2552
2553 /*
2554 * Is Low Internet detected? First order metrics are bandwidth
2555 * and RTT. If these metrics are below the minimum thresholds
2556 * defined then the network attachment can be classified as
2557 * having Low Internet capacity.
2558 *
2559 * High connection timeout rate also indicates Low Internet
2560 * capacity.
2561 */
2562 if (st->lim_dl_max_bandwidth > 0 &&
2563 st->lim_dl_max_bandwidth <= NSTAT_LIM_DL_MAX_BANDWIDTH_THRESHOLD) {
2564 st->lim_dl_detected = 1;
2565 }
2566
2567 if ((st->lim_ul_max_bandwidth > 0 &&
2568 st->lim_ul_max_bandwidth <= NSTAT_LIM_UL_MAX_BANDWIDTH_THRESHOLD) ||
2569 st->lim_rtt_min >= NSTAT_LIM_UL_MIN_RTT_THRESHOLD) {
2570 st->lim_ul_detected = 1;
2571 }
2572
2573 if (st->lim_conn_attempts > 20 &&
2574 st->lim_conn_timeout_percent >=
2575 NSTAT_LIM_CONN_TIMEOUT_PERCENT_THRESHOLD) {
2576 st->lim_ul_detected = 1;
2577 }
2578 /*
2579 * Second order metrics: If there was high packet loss even after
2580 * using delay based algorithms then we classify it as Low Internet
2581 * again
2582 */
2583 if (st->lim_bk_txpkts >= nstat_lim_min_tx_pkts &&
2584 st->lim_packet_loss_percent >=
2585 NSTAT_LIM_PACKET_LOSS_PERCENT_THRESHOLD) {
2586 st->lim_ul_detected = 1;
2587 }
2588 return TRUE;
2589 }
2590
2591 static u_int64_t nstat_lim_last_report_time = 0;
2592 static void
2593 nstat_ifnet_report_lim_stats(void)
2594 {
2595 u_int64_t uptime;
2596 struct nstat_sysinfo_data data;
2597 struct nstat_sysinfo_lim_stats *st;
2598 struct ifnet *ifp;
2599 int err;
2600
2601 uptime = net_uptime();
2602
2603 if ((u_int32_t)(uptime - nstat_lim_last_report_time) <
2604 nstat_lim_interval) {
2605 return;
2606 }
2607
2608 nstat_lim_last_report_time = uptime;
2609 data.flags = NSTAT_SYSINFO_LIM_STATS;
2610 st = &data.u.lim_stats;
2611 data.unsent_data_cnt = 0;
2612
2613 ifnet_head_lock_shared();
2614 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2615 if (!IF_FULLY_ATTACHED(ifp)) {
2616 continue;
2617 }
2618
2619 /* Limit reporting to Wifi, Ethernet and cellular */
2620 if (!(IFNET_IS_ETHERNET(ifp) || IFNET_IS_CELLULAR(ifp))) {
2621 continue;
2622 }
2623
2624 if (!nstat_lim_activity_check(&ifp->if_lim_stat)) {
2625 continue;
2626 }
2627
2628 bzero(st, sizeof(*st));
2629 st->ifnet_siglen = sizeof(st->ifnet_signature);
2630 err = ifnet_get_netsignature(ifp, AF_INET,
2631 (u_int8_t *)&st->ifnet_siglen, NULL,
2632 st->ifnet_signature);
2633 if (err != 0) {
2634 err = ifnet_get_netsignature(ifp, AF_INET6,
2635 (u_int8_t *)&st->ifnet_siglen, NULL,
2636 st->ifnet_signature);
2637 if (err != 0) {
2638 continue;
2639 }
2640 }
2641 ifnet_lock_shared(ifp);
2642 if (IFNET_IS_CELLULAR(ifp)) {
2643 st->ifnet_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_CELLULAR;
2644 } else if (IFNET_IS_WIFI(ifp)) {
2645 st->ifnet_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_WIFI;
2646 } else {
2647 st->ifnet_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_ETHERNET;
2648 }
2649 bcopy(&ifp->if_lim_stat, &st->lim_stat,
2650 sizeof(st->lim_stat));
2651
2652 /* Zero the stats in ifp */
2653 bzero(&ifp->if_lim_stat, sizeof(ifp->if_lim_stat));
2654 ifnet_lock_done(ifp);
2655 nstat_sysinfo_send_data(&data);
2656 }
2657 ifnet_head_done();
2658 }
2659
2660 static errno_t
2661 nstat_ifnet_copy_descriptor(
2662 nstat_provider_cookie_t cookie,
2663 void *data,
2664 size_t len)
2665 {
2666 nstat_ifnet_descriptor *desc = (nstat_ifnet_descriptor *)data;
2667 struct nstat_ifnet_cookie *ifcookie =
2668 (struct nstat_ifnet_cookie *)cookie;
2669 struct ifnet *ifp = ifcookie->ifp;
2670
2671 if (len < sizeof(nstat_ifnet_descriptor)) {
2672 return EINVAL;
2673 }
2674
2675 if (nstat_ifnet_gone(cookie)) {
2676 return EINVAL;
2677 }
2678
2679 bzero(desc, sizeof(*desc));
2680 ifnet_lock_shared(ifp);
2681 strlcpy(desc->name, ifp->if_xname, sizeof(desc->name));
2682 desc->ifindex = ifp->if_index;
2683 desc->threshold = ifp->if_data_threshold;
2684 desc->type = ifp->if_type;
2685 if (ifp->if_desc.ifd_len < sizeof(desc->description)) {
2686 memcpy(desc->description, ifp->if_desc.ifd_desc,
2687 sizeof(desc->description));
2688 }
2689 nstat_ifnet_copy_link_status(ifp, desc);
2690 ifnet_lock_done(ifp);
2691 return 0;
2692 }
2693
2694 static void
2695 nstat_init_ifnet_provider(void)
2696 {
2697 bzero(&nstat_ifnet_provider, sizeof(nstat_ifnet_provider));
2698 nstat_ifnet_provider.nstat_provider_id = NSTAT_PROVIDER_IFNET;
2699 nstat_ifnet_provider.nstat_descriptor_length = sizeof(nstat_ifnet_descriptor);
2700 nstat_ifnet_provider.nstat_lookup = nstat_ifnet_lookup;
2701 nstat_ifnet_provider.nstat_gone = nstat_ifnet_gone;
2702 nstat_ifnet_provider.nstat_counts = nstat_ifnet_counts;
2703 nstat_ifnet_provider.nstat_watcher_add = NULL;
2704 nstat_ifnet_provider.nstat_watcher_remove = NULL;
2705 nstat_ifnet_provider.nstat_copy_descriptor = nstat_ifnet_copy_descriptor;
2706 nstat_ifnet_provider.nstat_release = nstat_ifnet_release;
2707 nstat_ifnet_provider.next = nstat_providers;
2708 nstat_providers = &nstat_ifnet_provider;
2709 }
2710
2711 __private_extern__ void
2712 nstat_ifnet_threshold_reached(unsigned int ifindex)
2713 {
2714 nstat_control_state *state;
2715 nstat_src *src;
2716 struct ifnet *ifp;
2717 struct nstat_ifnet_cookie *ifcookie;
2718
2719 lck_mtx_lock(&nstat_mtx);
2720 for (state = nstat_controls; state; state = state->ncs_next) {
2721 lck_mtx_lock(&state->ncs_mtx);
2722 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
2723 {
2724 if (src->provider != &nstat_ifnet_provider) {
2725 continue;
2726 }
2727 ifcookie = (struct nstat_ifnet_cookie *)src->cookie;
2728 ifp = ifcookie->ifp;
2729 if (ifp->if_index != ifindex) {
2730 continue;
2731 }
2732 nstat_control_send_counts(state, src, 0, 0, NULL);
2733 }
2734 lck_mtx_unlock(&state->ncs_mtx);
2735 }
2736 lck_mtx_unlock(&nstat_mtx);
2737 }
2738
2739 #pragma mark -- Sysinfo --
2740 static void
2741 nstat_set_keyval_scalar(nstat_sysinfo_keyval *kv, int key, u_int32_t val)
2742 {
2743 kv->nstat_sysinfo_key = key;
2744 kv->nstat_sysinfo_flags = NSTAT_SYSINFO_FLAG_SCALAR;
2745 kv->u.nstat_sysinfo_scalar = val;
2746 kv->nstat_sysinfo_valsize = sizeof(kv->u.nstat_sysinfo_scalar);
2747 }
2748
2749 static void
2750 nstat_set_keyval_u64_scalar(nstat_sysinfo_keyval *kv, int key, u_int64_t val)
2751 {
2752 kv->nstat_sysinfo_key = key;
2753 kv->nstat_sysinfo_flags = NSTAT_SYSINFO_FLAG_SCALAR;
2754 kv->u.nstat_sysinfo_scalar = val;
2755 kv->nstat_sysinfo_valsize = sizeof(kv->u.nstat_sysinfo_scalar);
2756 }
2757
2758 static void
2759 nstat_set_keyval_string(nstat_sysinfo_keyval *kv, int key, u_int8_t *buf,
2760 u_int32_t len)
2761 {
2762 kv->nstat_sysinfo_key = key;
2763 kv->nstat_sysinfo_flags = NSTAT_SYSINFO_FLAG_STRING;
2764 kv->nstat_sysinfo_valsize = min(len,
2765 NSTAT_SYSINFO_KEYVAL_STRING_MAXSIZE);
2766 bcopy(buf, kv->u.nstat_sysinfo_string, kv->nstat_sysinfo_valsize);
2767 }
2768
2769 static void
2770 nstat_sysinfo_send_data_internal(
2771 nstat_control_state *control,
2772 nstat_sysinfo_data *data)
2773 {
2774 nstat_msg_sysinfo_counts *syscnt = NULL;
2775 size_t allocsize = 0, countsize = 0, nkeyvals = 0, finalsize = 0;
2776 nstat_sysinfo_keyval *kv;
2777 errno_t result = 0;
2778 size_t i = 0;
2779
2780 allocsize = offsetof(nstat_msg_sysinfo_counts, counts);
2781 countsize = offsetof(nstat_sysinfo_counts, nstat_sysinfo_keyvals);
2782 finalsize = allocsize;
2783
2784 /* get number of key-vals for each kind of stat */
2785 switch (data->flags) {
2786 case NSTAT_SYSINFO_MBUF_STATS:
2787 nkeyvals = sizeof(struct nstat_sysinfo_mbuf_stats) /
2788 sizeof(u_int32_t);
2789 break;
2790 case NSTAT_SYSINFO_TCP_STATS:
2791 nkeyvals = NSTAT_SYSINFO_TCP_STATS_COUNT;
2792 break;
2793 case NSTAT_SYSINFO_IFNET_ECN_STATS:
2794 nkeyvals = (sizeof(struct if_tcp_ecn_stat) /
2795 sizeof(u_int64_t));
2796
2797 /* Two more keys for ifnet type and proto */
2798 nkeyvals += 2;
2799
2800 /* One key for unsent data. */
2801 nkeyvals++;
2802 break;
2803 case NSTAT_SYSINFO_LIM_STATS:
2804 nkeyvals = NSTAT_LIM_STAT_KEYVAL_COUNT;
2805 break;
2806 case NSTAT_SYSINFO_NET_API_STATS:
2807 nkeyvals = NSTAT_NET_API_STAT_KEYVAL_COUNT;
2808 break;
2809 default:
2810 return;
2811 }
2812 countsize += sizeof(nstat_sysinfo_keyval) * nkeyvals;
2813 allocsize += countsize;
2814
2815 syscnt = kheap_alloc(KHEAP_TEMP, allocsize, Z_WAITOK | Z_ZERO);
2816 if (syscnt == NULL) {
2817 return;
2818 }
2819
2820 kv = (nstat_sysinfo_keyval *) &syscnt->counts.nstat_sysinfo_keyvals;
2821 switch (data->flags) {
2822 case NSTAT_SYSINFO_MBUF_STATS:
2823 {
2824 nstat_set_keyval_scalar(&kv[i++],
2825 NSTAT_SYSINFO_KEY_MBUF_256B_TOTAL,
2826 data->u.mb_stats.total_256b);
2827 nstat_set_keyval_scalar(&kv[i++],
2828 NSTAT_SYSINFO_KEY_MBUF_2KB_TOTAL,
2829 data->u.mb_stats.total_2kb);
2830 nstat_set_keyval_scalar(&kv[i++],
2831 NSTAT_SYSINFO_KEY_MBUF_4KB_TOTAL,
2832 data->u.mb_stats.total_4kb);
2833 nstat_set_keyval_scalar(&kv[i++],
2834 NSTAT_SYSINFO_MBUF_16KB_TOTAL,
2835 data->u.mb_stats.total_16kb);
2836 nstat_set_keyval_scalar(&kv[i++],
2837 NSTAT_SYSINFO_KEY_SOCK_MBCNT,
2838 data->u.mb_stats.sbmb_total);
2839 nstat_set_keyval_scalar(&kv[i++],
2840 NSTAT_SYSINFO_KEY_SOCK_ATMBLIMIT,
2841 data->u.mb_stats.sb_atmbuflimit);
2842 nstat_set_keyval_scalar(&kv[i++],
2843 NSTAT_SYSINFO_MBUF_DRAIN_CNT,
2844 data->u.mb_stats.draincnt);
2845 nstat_set_keyval_scalar(&kv[i++],
2846 NSTAT_SYSINFO_MBUF_MEM_RELEASED,
2847 data->u.mb_stats.memreleased);
2848 nstat_set_keyval_scalar(&kv[i++],
2849 NSTAT_SYSINFO_KEY_SOCK_MBFLOOR,
2850 data->u.mb_stats.sbmb_floor);
2851 VERIFY(i == nkeyvals);
2852 break;
2853 }
2854 case NSTAT_SYSINFO_TCP_STATS:
2855 {
2856 nstat_set_keyval_scalar(&kv[i++],
2857 NSTAT_SYSINFO_KEY_IPV4_AVGRTT,
2858 data->u.tcp_stats.ipv4_avgrtt);
2859 nstat_set_keyval_scalar(&kv[i++],
2860 NSTAT_SYSINFO_KEY_IPV6_AVGRTT,
2861 data->u.tcp_stats.ipv6_avgrtt);
2862 nstat_set_keyval_scalar(&kv[i++],
2863 NSTAT_SYSINFO_KEY_SEND_PLR,
2864 data->u.tcp_stats.send_plr);
2865 nstat_set_keyval_scalar(&kv[i++],
2866 NSTAT_SYSINFO_KEY_RECV_PLR,
2867 data->u.tcp_stats.recv_plr);
2868 nstat_set_keyval_scalar(&kv[i++],
2869 NSTAT_SYSINFO_KEY_SEND_TLRTO,
2870 data->u.tcp_stats.send_tlrto_rate);
2871 nstat_set_keyval_scalar(&kv[i++],
2872 NSTAT_SYSINFO_KEY_SEND_REORDERRATE,
2873 data->u.tcp_stats.send_reorder_rate);
2874 nstat_set_keyval_scalar(&kv[i++],
2875 NSTAT_SYSINFO_CONNECTION_ATTEMPTS,
2876 data->u.tcp_stats.connection_attempts);
2877 nstat_set_keyval_scalar(&kv[i++],
2878 NSTAT_SYSINFO_CONNECTION_ACCEPTS,
2879 data->u.tcp_stats.connection_accepts);
2880 nstat_set_keyval_scalar(&kv[i++],
2881 NSTAT_SYSINFO_ECN_CLIENT_ENABLED,
2882 data->u.tcp_stats.ecn_client_enabled);
2883 nstat_set_keyval_scalar(&kv[i++],
2884 NSTAT_SYSINFO_ECN_SERVER_ENABLED,
2885 data->u.tcp_stats.ecn_server_enabled);
2886 nstat_set_keyval_scalar(&kv[i++],
2887 NSTAT_SYSINFO_ECN_CLIENT_SETUP,
2888 data->u.tcp_stats.ecn_client_setup);
2889 nstat_set_keyval_scalar(&kv[i++],
2890 NSTAT_SYSINFO_ECN_SERVER_SETUP,
2891 data->u.tcp_stats.ecn_server_setup);
2892 nstat_set_keyval_scalar(&kv[i++],
2893 NSTAT_SYSINFO_ECN_CLIENT_SUCCESS,
2894 data->u.tcp_stats.ecn_client_success);
2895 nstat_set_keyval_scalar(&kv[i++],
2896 NSTAT_SYSINFO_ECN_SERVER_SUCCESS,
2897 data->u.tcp_stats.ecn_server_success);
2898 nstat_set_keyval_scalar(&kv[i++],
2899 NSTAT_SYSINFO_ECN_NOT_SUPPORTED,
2900 data->u.tcp_stats.ecn_not_supported);
2901 nstat_set_keyval_scalar(&kv[i++],
2902 NSTAT_SYSINFO_ECN_LOST_SYN,
2903 data->u.tcp_stats.ecn_lost_syn);
2904 nstat_set_keyval_scalar(&kv[i++],
2905 NSTAT_SYSINFO_ECN_LOST_SYNACK,
2906 data->u.tcp_stats.ecn_lost_synack);
2907 nstat_set_keyval_scalar(&kv[i++],
2908 NSTAT_SYSINFO_ECN_RECV_CE,
2909 data->u.tcp_stats.ecn_recv_ce);
2910 nstat_set_keyval_scalar(&kv[i++],
2911 NSTAT_SYSINFO_ECN_RECV_ECE,
2912 data->u.tcp_stats.ecn_recv_ece);
2913 nstat_set_keyval_scalar(&kv[i++],
2914 NSTAT_SYSINFO_ECN_SENT_ECE,
2915 data->u.tcp_stats.ecn_sent_ece);
2916 nstat_set_keyval_scalar(&kv[i++],
2917 NSTAT_SYSINFO_ECN_CONN_RECV_CE,
2918 data->u.tcp_stats.ecn_conn_recv_ce);
2919 nstat_set_keyval_scalar(&kv[i++],
2920 NSTAT_SYSINFO_ECN_CONN_RECV_ECE,
2921 data->u.tcp_stats.ecn_conn_recv_ece);
2922 nstat_set_keyval_scalar(&kv[i++],
2923 NSTAT_SYSINFO_ECN_CONN_PLNOCE,
2924 data->u.tcp_stats.ecn_conn_plnoce);
2925 nstat_set_keyval_scalar(&kv[i++],
2926 NSTAT_SYSINFO_ECN_CONN_PL_CE,
2927 data->u.tcp_stats.ecn_conn_pl_ce);
2928 nstat_set_keyval_scalar(&kv[i++],
2929 NSTAT_SYSINFO_ECN_CONN_NOPL_CE,
2930 data->u.tcp_stats.ecn_conn_nopl_ce);
2931 nstat_set_keyval_scalar(&kv[i++],
2932 NSTAT_SYSINFO_ECN_FALLBACK_SYNLOSS,
2933 data->u.tcp_stats.ecn_fallback_synloss);
2934 nstat_set_keyval_scalar(&kv[i++],
2935 NSTAT_SYSINFO_ECN_FALLBACK_REORDER,
2936 data->u.tcp_stats.ecn_fallback_reorder);
2937 nstat_set_keyval_scalar(&kv[i++],
2938 NSTAT_SYSINFO_ECN_FALLBACK_CE,
2939 data->u.tcp_stats.ecn_fallback_ce);
2940 nstat_set_keyval_scalar(&kv[i++],
2941 NSTAT_SYSINFO_TFO_SYN_DATA_RCV,
2942 data->u.tcp_stats.tfo_syn_data_rcv);
2943 nstat_set_keyval_scalar(&kv[i++],
2944 NSTAT_SYSINFO_TFO_COOKIE_REQ_RCV,
2945 data->u.tcp_stats.tfo_cookie_req_rcv);
2946 nstat_set_keyval_scalar(&kv[i++],
2947 NSTAT_SYSINFO_TFO_COOKIE_SENT,
2948 data->u.tcp_stats.tfo_cookie_sent);
2949 nstat_set_keyval_scalar(&kv[i++],
2950 NSTAT_SYSINFO_TFO_COOKIE_INVALID,
2951 data->u.tcp_stats.tfo_cookie_invalid);
2952 nstat_set_keyval_scalar(&kv[i++],
2953 NSTAT_SYSINFO_TFO_COOKIE_REQ,
2954 data->u.tcp_stats.tfo_cookie_req);
2955 nstat_set_keyval_scalar(&kv[i++],
2956 NSTAT_SYSINFO_TFO_COOKIE_RCV,
2957 data->u.tcp_stats.tfo_cookie_rcv);
2958 nstat_set_keyval_scalar(&kv[i++],
2959 NSTAT_SYSINFO_TFO_SYN_DATA_SENT,
2960 data->u.tcp_stats.tfo_syn_data_sent);
2961 nstat_set_keyval_scalar(&kv[i++],
2962 NSTAT_SYSINFO_TFO_SYN_DATA_ACKED,
2963 data->u.tcp_stats.tfo_syn_data_acked);
2964 nstat_set_keyval_scalar(&kv[i++],
2965 NSTAT_SYSINFO_TFO_SYN_LOSS,
2966 data->u.tcp_stats.tfo_syn_loss);
2967 nstat_set_keyval_scalar(&kv[i++],
2968 NSTAT_SYSINFO_TFO_BLACKHOLE,
2969 data->u.tcp_stats.tfo_blackhole);
2970 nstat_set_keyval_scalar(&kv[i++],
2971 NSTAT_SYSINFO_TFO_COOKIE_WRONG,
2972 data->u.tcp_stats.tfo_cookie_wrong);
2973 nstat_set_keyval_scalar(&kv[i++],
2974 NSTAT_SYSINFO_TFO_NO_COOKIE_RCV,
2975 data->u.tcp_stats.tfo_no_cookie_rcv);
2976 nstat_set_keyval_scalar(&kv[i++],
2977 NSTAT_SYSINFO_TFO_HEURISTICS_DISABLE,
2978 data->u.tcp_stats.tfo_heuristics_disable);
2979 nstat_set_keyval_scalar(&kv[i++],
2980 NSTAT_SYSINFO_TFO_SEND_BLACKHOLE,
2981 data->u.tcp_stats.tfo_sndblackhole);
2982 nstat_set_keyval_scalar(&kv[i++],
2983 NSTAT_SYSINFO_MPTCP_HANDOVER_ATTEMPT,
2984 data->u.tcp_stats.mptcp_handover_attempt);
2985 nstat_set_keyval_scalar(&kv[i++],
2986 NSTAT_SYSINFO_MPTCP_INTERACTIVE_ATTEMPT,
2987 data->u.tcp_stats.mptcp_interactive_attempt);
2988 nstat_set_keyval_scalar(&kv[i++],
2989 NSTAT_SYSINFO_MPTCP_AGGREGATE_ATTEMPT,
2990 data->u.tcp_stats.mptcp_aggregate_attempt);
2991 nstat_set_keyval_scalar(&kv[i++],
2992 NSTAT_SYSINFO_MPTCP_FP_HANDOVER_ATTEMPT,
2993 data->u.tcp_stats.mptcp_fp_handover_attempt);
2994 nstat_set_keyval_scalar(&kv[i++],
2995 NSTAT_SYSINFO_MPTCP_FP_INTERACTIVE_ATTEMPT,
2996 data->u.tcp_stats.mptcp_fp_interactive_attempt);
2997 nstat_set_keyval_scalar(&kv[i++],
2998 NSTAT_SYSINFO_MPTCP_FP_AGGREGATE_ATTEMPT,
2999 data->u.tcp_stats.mptcp_fp_aggregate_attempt);
3000 nstat_set_keyval_scalar(&kv[i++],
3001 NSTAT_SYSINFO_MPTCP_HEURISTIC_FALLBACK,
3002 data->u.tcp_stats.mptcp_heuristic_fallback);
3003 nstat_set_keyval_scalar(&kv[i++],
3004 NSTAT_SYSINFO_MPTCP_FP_HEURISTIC_FALLBACK,
3005 data->u.tcp_stats.mptcp_fp_heuristic_fallback);
3006 nstat_set_keyval_scalar(&kv[i++],
3007 NSTAT_SYSINFO_MPTCP_HANDOVER_SUCCESS_WIFI,
3008 data->u.tcp_stats.mptcp_handover_success_wifi);
3009 nstat_set_keyval_scalar(&kv[i++],
3010 NSTAT_SYSINFO_MPTCP_HANDOVER_SUCCESS_CELL,
3011 data->u.tcp_stats.mptcp_handover_success_cell);
3012 nstat_set_keyval_scalar(&kv[i++],
3013 NSTAT_SYSINFO_MPTCP_INTERACTIVE_SUCCESS,
3014 data->u.tcp_stats.mptcp_interactive_success);
3015 nstat_set_keyval_scalar(&kv[i++],
3016 NSTAT_SYSINFO_MPTCP_AGGREGATE_SUCCESS,
3017 data->u.tcp_stats.mptcp_aggregate_success);
3018 nstat_set_keyval_scalar(&kv[i++],
3019 NSTAT_SYSINFO_MPTCP_FP_HANDOVER_SUCCESS_WIFI,
3020 data->u.tcp_stats.mptcp_fp_handover_success_wifi);
3021 nstat_set_keyval_scalar(&kv[i++],
3022 NSTAT_SYSINFO_MPTCP_FP_HANDOVER_SUCCESS_CELL,
3023 data->u.tcp_stats.mptcp_fp_handover_success_cell);
3024 nstat_set_keyval_scalar(&kv[i++],
3025 NSTAT_SYSINFO_MPTCP_FP_INTERACTIVE_SUCCESS,
3026 data->u.tcp_stats.mptcp_fp_interactive_success);
3027 nstat_set_keyval_scalar(&kv[i++],
3028 NSTAT_SYSINFO_MPTCP_FP_AGGREGATE_SUCCESS,
3029 data->u.tcp_stats.mptcp_fp_aggregate_success);
3030 nstat_set_keyval_scalar(&kv[i++],
3031 NSTAT_SYSINFO_MPTCP_HANDOVER_CELL_FROM_WIFI,
3032 data->u.tcp_stats.mptcp_handover_cell_from_wifi);
3033 nstat_set_keyval_scalar(&kv[i++],
3034 NSTAT_SYSINFO_MPTCP_HANDOVER_WIFI_FROM_CELL,
3035 data->u.tcp_stats.mptcp_handover_wifi_from_cell);
3036 nstat_set_keyval_scalar(&kv[i++],
3037 NSTAT_SYSINFO_MPTCP_INTERACTIVE_CELL_FROM_WIFI,
3038 data->u.tcp_stats.mptcp_interactive_cell_from_wifi);
3039 nstat_set_keyval_u64_scalar(&kv[i++],
3040 NSTAT_SYSINFO_MPTCP_HANDOVER_CELL_BYTES,
3041 data->u.tcp_stats.mptcp_handover_cell_bytes);
3042 nstat_set_keyval_u64_scalar(&kv[i++],
3043 NSTAT_SYSINFO_MPTCP_INTERACTIVE_CELL_BYTES,
3044 data->u.tcp_stats.mptcp_interactive_cell_bytes);
3045 nstat_set_keyval_u64_scalar(&kv[i++],
3046 NSTAT_SYSINFO_MPTCP_AGGREGATE_CELL_BYTES,
3047 data->u.tcp_stats.mptcp_aggregate_cell_bytes);
3048 nstat_set_keyval_u64_scalar(&kv[i++],
3049 NSTAT_SYSINFO_MPTCP_HANDOVER_ALL_BYTES,
3050 data->u.tcp_stats.mptcp_handover_all_bytes);
3051 nstat_set_keyval_u64_scalar(&kv[i++],
3052 NSTAT_SYSINFO_MPTCP_INTERACTIVE_ALL_BYTES,
3053 data->u.tcp_stats.mptcp_interactive_all_bytes);
3054 nstat_set_keyval_u64_scalar(&kv[i++],
3055 NSTAT_SYSINFO_MPTCP_AGGREGATE_ALL_BYTES,
3056 data->u.tcp_stats.mptcp_aggregate_all_bytes);
3057 nstat_set_keyval_scalar(&kv[i++],
3058 NSTAT_SYSINFO_MPTCP_BACK_TO_WIFI,
3059 data->u.tcp_stats.mptcp_back_to_wifi);
3060 nstat_set_keyval_scalar(&kv[i++],
3061 NSTAT_SYSINFO_MPTCP_WIFI_PROXY,
3062 data->u.tcp_stats.mptcp_wifi_proxy);
3063 nstat_set_keyval_scalar(&kv[i++],
3064 NSTAT_SYSINFO_MPTCP_CELL_PROXY,
3065 data->u.tcp_stats.mptcp_cell_proxy);
3066 nstat_set_keyval_scalar(&kv[i++],
3067 NSTAT_SYSINFO_MPTCP_TRIGGERED_CELL,
3068 data->u.tcp_stats.mptcp_triggered_cell);
3069 VERIFY(i == nkeyvals);
3070 break;
3071 }
3072 case NSTAT_SYSINFO_IFNET_ECN_STATS:
3073 {
3074 nstat_set_keyval_scalar(&kv[i++],
3075 NSTAT_SYSINFO_ECN_IFNET_TYPE,
3076 data->u.ifnet_ecn_stats.ifnet_type);
3077 nstat_set_keyval_scalar(&kv[i++],
3078 NSTAT_SYSINFO_ECN_IFNET_PROTO,
3079 data->u.ifnet_ecn_stats.ifnet_proto);
3080 nstat_set_keyval_u64_scalar(&kv[i++],
3081 NSTAT_SYSINFO_ECN_IFNET_CLIENT_SETUP,
3082 data->u.ifnet_ecn_stats.ecn_stat.ecn_client_setup);
3083 nstat_set_keyval_u64_scalar(&kv[i++],
3084 NSTAT_SYSINFO_ECN_IFNET_SERVER_SETUP,
3085 data->u.ifnet_ecn_stats.ecn_stat.ecn_server_setup);
3086 nstat_set_keyval_u64_scalar(&kv[i++],
3087 NSTAT_SYSINFO_ECN_IFNET_CLIENT_SUCCESS,
3088 data->u.ifnet_ecn_stats.ecn_stat.ecn_client_success);
3089 nstat_set_keyval_u64_scalar(&kv[i++],
3090 NSTAT_SYSINFO_ECN_IFNET_SERVER_SUCCESS,
3091 data->u.ifnet_ecn_stats.ecn_stat.ecn_server_success);
3092 nstat_set_keyval_u64_scalar(&kv[i++],
3093 NSTAT_SYSINFO_ECN_IFNET_PEER_NOSUPPORT,
3094 data->u.ifnet_ecn_stats.ecn_stat.ecn_peer_nosupport);
3095 nstat_set_keyval_u64_scalar(&kv[i++],
3096 NSTAT_SYSINFO_ECN_IFNET_SYN_LOST,
3097 data->u.ifnet_ecn_stats.ecn_stat.ecn_syn_lost);
3098 nstat_set_keyval_u64_scalar(&kv[i++],
3099 NSTAT_SYSINFO_ECN_IFNET_SYNACK_LOST,
3100 data->u.ifnet_ecn_stats.ecn_stat.ecn_synack_lost);
3101 nstat_set_keyval_u64_scalar(&kv[i++],
3102 NSTAT_SYSINFO_ECN_IFNET_RECV_CE,
3103 data->u.ifnet_ecn_stats.ecn_stat.ecn_recv_ce);
3104 nstat_set_keyval_u64_scalar(&kv[i++],
3105 NSTAT_SYSINFO_ECN_IFNET_RECV_ECE,
3106 data->u.ifnet_ecn_stats.ecn_stat.ecn_recv_ece);
3107 nstat_set_keyval_u64_scalar(&kv[i++],
3108 NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_CE,
3109 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_recv_ce);
3110 nstat_set_keyval_u64_scalar(&kv[i++],
3111 NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_ECE,
3112 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_recv_ece);
3113 nstat_set_keyval_u64_scalar(&kv[i++],
3114 NSTAT_SYSINFO_ECN_IFNET_CONN_PLNOCE,
3115 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_plnoce);
3116 nstat_set_keyval_u64_scalar(&kv[i++],
3117 NSTAT_SYSINFO_ECN_IFNET_CONN_PLCE,
3118 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_plce);
3119 nstat_set_keyval_u64_scalar(&kv[i++],
3120 NSTAT_SYSINFO_ECN_IFNET_CONN_NOPLCE,
3121 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_noplce);
3122 nstat_set_keyval_u64_scalar(&kv[i++],
3123 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNLOSS,
3124 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_synloss);
3125 nstat_set_keyval_u64_scalar(&kv[i++],
3126 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_REORDER,
3127 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_reorder);
3128 nstat_set_keyval_u64_scalar(&kv[i++],
3129 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_CE,
3130 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_ce);
3131 nstat_set_keyval_u64_scalar(&kv[i++],
3132 NSTAT_SYSINFO_ECN_IFNET_ON_RTT_AVG,
3133 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rtt_avg);
3134 nstat_set_keyval_u64_scalar(&kv[i++],
3135 NSTAT_SYSINFO_ECN_IFNET_ON_RTT_VAR,
3136 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rtt_var);
3137 nstat_set_keyval_u64_scalar(&kv[i++],
3138 NSTAT_SYSINFO_ECN_IFNET_ON_OOPERCENT,
3139 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.oo_percent);
3140 nstat_set_keyval_u64_scalar(&kv[i++],
3141 NSTAT_SYSINFO_ECN_IFNET_ON_SACK_EPISODE,
3142 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.sack_episodes);
3143 nstat_set_keyval_u64_scalar(&kv[i++],
3144 NSTAT_SYSINFO_ECN_IFNET_ON_REORDER_PERCENT,
3145 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.reorder_percent);
3146 nstat_set_keyval_u64_scalar(&kv[i++],
3147 NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_PERCENT,
3148 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rxmit_percent);
3149 nstat_set_keyval_u64_scalar(&kv[i++],
3150 NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_DROP,
3151 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rxmit_drop);
3152 nstat_set_keyval_u64_scalar(&kv[i++],
3153 NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_AVG,
3154 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rtt_avg);
3155 nstat_set_keyval_u64_scalar(&kv[i++],
3156 NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_VAR,
3157 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rtt_var);
3158 nstat_set_keyval_u64_scalar(&kv[i++],
3159 NSTAT_SYSINFO_ECN_IFNET_OFF_OOPERCENT,
3160 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.oo_percent);
3161 nstat_set_keyval_u64_scalar(&kv[i++],
3162 NSTAT_SYSINFO_ECN_IFNET_OFF_SACK_EPISODE,
3163 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.sack_episodes);
3164 nstat_set_keyval_u64_scalar(&kv[i++],
3165 NSTAT_SYSINFO_ECN_IFNET_OFF_REORDER_PERCENT,
3166 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.reorder_percent);
3167 nstat_set_keyval_u64_scalar(&kv[i++],
3168 NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_PERCENT,
3169 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rxmit_percent);
3170 nstat_set_keyval_u64_scalar(&kv[i++],
3171 NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_DROP,
3172 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rxmit_drop);
3173 nstat_set_keyval_u64_scalar(&kv[i++],
3174 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_TXPKTS,
3175 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_txpkts);
3176 nstat_set_keyval_u64_scalar(&kv[i++],
3177 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXMTPKTS,
3178 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_rxmitpkts);
3179 nstat_set_keyval_u64_scalar(&kv[i++],
3180 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXPKTS,
3181 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_rxpkts);
3182 nstat_set_keyval_u64_scalar(&kv[i++],
3183 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_OOPKTS,
3184 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_oopkts);
3185 nstat_set_keyval_u64_scalar(&kv[i++],
3186 NSTAT_SYSINFO_ECN_IFNET_ON_DROP_RST,
3187 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rst_drop);
3188 nstat_set_keyval_u64_scalar(&kv[i++],
3189 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_TXPKTS,
3190 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_txpkts);
3191 nstat_set_keyval_u64_scalar(&kv[i++],
3192 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXMTPKTS,
3193 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_rxmitpkts);
3194 nstat_set_keyval_u64_scalar(&kv[i++],
3195 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXPKTS,
3196 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_rxpkts);
3197 nstat_set_keyval_u64_scalar(&kv[i++],
3198 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_OOPKTS,
3199 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_oopkts);
3200 nstat_set_keyval_u64_scalar(&kv[i++],
3201 NSTAT_SYSINFO_ECN_IFNET_OFF_DROP_RST,
3202 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rst_drop);
3203 nstat_set_keyval_u64_scalar(&kv[i++],
3204 NSTAT_SYSINFO_ECN_IFNET_TOTAL_CONN,
3205 data->u.ifnet_ecn_stats.ecn_stat.ecn_total_conn);
3206 nstat_set_keyval_scalar(&kv[i++],
3207 NSTAT_SYSINFO_IFNET_UNSENT_DATA,
3208 data->unsent_data_cnt);
3209 nstat_set_keyval_u64_scalar(&kv[i++],
3210 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRST,
3211 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_droprst);
3212 nstat_set_keyval_u64_scalar(&kv[i++],
3213 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRXMT,
3214 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_droprxmt);
3215 nstat_set_keyval_u64_scalar(&kv[i++],
3216 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNRST,
3217 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_synrst);
3218 break;
3219 }
3220 case NSTAT_SYSINFO_LIM_STATS:
3221 {
3222 nstat_set_keyval_string(&kv[i++],
3223 NSTAT_SYSINFO_LIM_IFNET_SIGNATURE,
3224 data->u.lim_stats.ifnet_signature,
3225 data->u.lim_stats.ifnet_siglen);
3226 nstat_set_keyval_u64_scalar(&kv[i++],
3227 NSTAT_SYSINFO_LIM_IFNET_DL_MAX_BANDWIDTH,
3228 data->u.lim_stats.lim_stat.lim_dl_max_bandwidth);
3229 nstat_set_keyval_u64_scalar(&kv[i++],
3230 NSTAT_SYSINFO_LIM_IFNET_UL_MAX_BANDWIDTH,
3231 data->u.lim_stats.lim_stat.lim_ul_max_bandwidth);
3232 nstat_set_keyval_u64_scalar(&kv[i++],
3233 NSTAT_SYSINFO_LIM_IFNET_PACKET_LOSS_PERCENT,
3234 data->u.lim_stats.lim_stat.lim_packet_loss_percent);
3235 nstat_set_keyval_u64_scalar(&kv[i++],
3236 NSTAT_SYSINFO_LIM_IFNET_PACKET_OOO_PERCENT,
3237 data->u.lim_stats.lim_stat.lim_packet_ooo_percent);
3238 nstat_set_keyval_u64_scalar(&kv[i++],
3239 NSTAT_SYSINFO_LIM_IFNET_RTT_VARIANCE,
3240 data->u.lim_stats.lim_stat.lim_rtt_variance);
3241 nstat_set_keyval_u64_scalar(&kv[i++],
3242 NSTAT_SYSINFO_LIM_IFNET_RTT_MIN,
3243 data->u.lim_stats.lim_stat.lim_rtt_min);
3244 nstat_set_keyval_u64_scalar(&kv[i++],
3245 NSTAT_SYSINFO_LIM_IFNET_RTT_AVG,
3246 data->u.lim_stats.lim_stat.lim_rtt_average);
3247 nstat_set_keyval_u64_scalar(&kv[i++],
3248 NSTAT_SYSINFO_LIM_IFNET_CONN_TIMEOUT_PERCENT,
3249 data->u.lim_stats.lim_stat.lim_conn_timeout_percent);
3250 nstat_set_keyval_scalar(&kv[i++],
3251 NSTAT_SYSINFO_LIM_IFNET_DL_DETECTED,
3252 data->u.lim_stats.lim_stat.lim_dl_detected);
3253 nstat_set_keyval_scalar(&kv[i++],
3254 NSTAT_SYSINFO_LIM_IFNET_UL_DETECTED,
3255 data->u.lim_stats.lim_stat.lim_ul_detected);
3256 nstat_set_keyval_scalar(&kv[i++],
3257 NSTAT_SYSINFO_LIM_IFNET_TYPE,
3258 data->u.lim_stats.ifnet_type);
3259 break;
3260 }
3261 case NSTAT_SYSINFO_NET_API_STATS:
3262 {
3263 nstat_set_keyval_u64_scalar(&kv[i++],
3264 NSTAT_SYSINFO_API_IF_FLTR_ATTACH,
3265 data->u.net_api_stats.net_api_stats.nas_iflt_attach_total);
3266 nstat_set_keyval_u64_scalar(&kv[i++],
3267 NSTAT_SYSINFO_API_IF_FLTR_ATTACH_OS,
3268 data->u.net_api_stats.net_api_stats.nas_iflt_attach_os_total);
3269 nstat_set_keyval_u64_scalar(&kv[i++],
3270 NSTAT_SYSINFO_API_IP_FLTR_ADD,
3271 data->u.net_api_stats.net_api_stats.nas_ipf_add_total);
3272 nstat_set_keyval_u64_scalar(&kv[i++],
3273 NSTAT_SYSINFO_API_IP_FLTR_ADD_OS,
3274 data->u.net_api_stats.net_api_stats.nas_ipf_add_os_total);
3275 nstat_set_keyval_u64_scalar(&kv[i++],
3276 NSTAT_SYSINFO_API_SOCK_FLTR_ATTACH,
3277 data->u.net_api_stats.net_api_stats.nas_sfltr_register_total);
3278 nstat_set_keyval_u64_scalar(&kv[i++],
3279 NSTAT_SYSINFO_API_SOCK_FLTR_ATTACH_OS,
3280 data->u.net_api_stats.net_api_stats.nas_sfltr_register_os_total);
3281
3282
3283 nstat_set_keyval_u64_scalar(&kv[i++],
3284 NSTAT_SYSINFO_API_SOCK_ALLOC_TOTAL,
3285 data->u.net_api_stats.net_api_stats.nas_socket_alloc_total);
3286 nstat_set_keyval_u64_scalar(&kv[i++],
3287 NSTAT_SYSINFO_API_SOCK_ALLOC_KERNEL,
3288 data->u.net_api_stats.net_api_stats.nas_socket_in_kernel_total);
3289 nstat_set_keyval_u64_scalar(&kv[i++],
3290 NSTAT_SYSINFO_API_SOCK_ALLOC_KERNEL_OS,
3291 data->u.net_api_stats.net_api_stats.nas_socket_in_kernel_os_total);
3292 nstat_set_keyval_u64_scalar(&kv[i++],
3293 NSTAT_SYSINFO_API_SOCK_NECP_CLIENTUUID,
3294 data->u.net_api_stats.net_api_stats.nas_socket_necp_clientuuid_total);
3295
3296 nstat_set_keyval_u64_scalar(&kv[i++],
3297 NSTAT_SYSINFO_API_SOCK_DOMAIN_LOCAL,
3298 data->u.net_api_stats.net_api_stats.nas_socket_domain_local_total);
3299 nstat_set_keyval_u64_scalar(&kv[i++],
3300 NSTAT_SYSINFO_API_SOCK_DOMAIN_ROUTE,
3301 data->u.net_api_stats.net_api_stats.nas_socket_domain_route_total);
3302 nstat_set_keyval_u64_scalar(&kv[i++],
3303 NSTAT_SYSINFO_API_SOCK_DOMAIN_INET,
3304 data->u.net_api_stats.net_api_stats.nas_socket_domain_inet_total);
3305 nstat_set_keyval_u64_scalar(&kv[i++],
3306 NSTAT_SYSINFO_API_SOCK_DOMAIN_INET6,
3307 data->u.net_api_stats.net_api_stats.nas_socket_domain_inet6_total);
3308 nstat_set_keyval_u64_scalar(&kv[i++],
3309 NSTAT_SYSINFO_API_SOCK_DOMAIN_SYSTEM,
3310 data->u.net_api_stats.net_api_stats.nas_socket_domain_system_total);
3311 nstat_set_keyval_u64_scalar(&kv[i++],
3312 NSTAT_SYSINFO_API_SOCK_DOMAIN_MULTIPATH,
3313 data->u.net_api_stats.net_api_stats.nas_socket_domain_multipath_total);
3314 nstat_set_keyval_u64_scalar(&kv[i++],
3315 NSTAT_SYSINFO_API_SOCK_DOMAIN_KEY,
3316 data->u.net_api_stats.net_api_stats.nas_socket_domain_key_total);
3317 nstat_set_keyval_u64_scalar(&kv[i++],
3318 NSTAT_SYSINFO_API_SOCK_DOMAIN_NDRV,
3319 data->u.net_api_stats.net_api_stats.nas_socket_domain_ndrv_total);
3320 nstat_set_keyval_u64_scalar(&kv[i++],
3321 NSTAT_SYSINFO_API_SOCK_DOMAIN_OTHER,
3322 data->u.net_api_stats.net_api_stats.nas_socket_domain_other_total);
3323
3324 nstat_set_keyval_u64_scalar(&kv[i++],
3325 NSTAT_SYSINFO_API_SOCK_INET_STREAM,
3326 data->u.net_api_stats.net_api_stats.nas_socket_inet_stream_total);
3327 nstat_set_keyval_u64_scalar(&kv[i++],
3328 NSTAT_SYSINFO_API_SOCK_INET_DGRAM,
3329 data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_total);
3330 nstat_set_keyval_u64_scalar(&kv[i++],
3331 NSTAT_SYSINFO_API_SOCK_INET_DGRAM_CONNECTED,
3332 data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_connected);
3333 nstat_set_keyval_u64_scalar(&kv[i++],
3334 NSTAT_SYSINFO_API_SOCK_INET_DGRAM_DNS,
3335 data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_dns);
3336 nstat_set_keyval_u64_scalar(&kv[i++],
3337 NSTAT_SYSINFO_API_SOCK_INET_DGRAM_NO_DATA,
3338 data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_no_data);
3339
3340 nstat_set_keyval_u64_scalar(&kv[i++],
3341 NSTAT_SYSINFO_API_SOCK_INET6_STREAM,
3342 data->u.net_api_stats.net_api_stats.nas_socket_inet6_stream_total);
3343 nstat_set_keyval_u64_scalar(&kv[i++],
3344 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM,
3345 data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_total);
3346 nstat_set_keyval_u64_scalar(&kv[i++],
3347 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_CONNECTED,
3348 data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_connected);
3349 nstat_set_keyval_u64_scalar(&kv[i++],
3350 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_DNS,
3351 data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_dns);
3352 nstat_set_keyval_u64_scalar(&kv[i++],
3353 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_NO_DATA,
3354 data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_no_data);
3355
3356 nstat_set_keyval_u64_scalar(&kv[i++],
3357 NSTAT_SYSINFO_API_SOCK_INET_MCAST_JOIN,
3358 data->u.net_api_stats.net_api_stats.nas_socket_mcast_join_total);
3359 nstat_set_keyval_u64_scalar(&kv[i++],
3360 NSTAT_SYSINFO_API_SOCK_INET_MCAST_JOIN_OS,
3361 data->u.net_api_stats.net_api_stats.nas_socket_mcast_join_os_total);
3362
3363 nstat_set_keyval_u64_scalar(&kv[i++],
3364 NSTAT_SYSINFO_API_NEXUS_FLOW_INET_STREAM,
3365 data->u.net_api_stats.net_api_stats.nas_nx_flow_inet_stream_total);
3366 nstat_set_keyval_u64_scalar(&kv[i++],
3367 NSTAT_SYSINFO_API_NEXUS_FLOW_INET_DATAGRAM,
3368 data->u.net_api_stats.net_api_stats.nas_nx_flow_inet_dgram_total);
3369
3370 nstat_set_keyval_u64_scalar(&kv[i++],
3371 NSTAT_SYSINFO_API_NEXUS_FLOW_INET6_STREAM,
3372 data->u.net_api_stats.net_api_stats.nas_nx_flow_inet6_stream_total);
3373 nstat_set_keyval_u64_scalar(&kv[i++],
3374 NSTAT_SYSINFO_API_NEXUS_FLOW_INET6_DATAGRAM,
3375 data->u.net_api_stats.net_api_stats.nas_nx_flow_inet6_dgram_total);
3376
3377 nstat_set_keyval_u64_scalar(&kv[i++],
3378 NSTAT_SYSINFO_API_IFNET_ALLOC,
3379 data->u.net_api_stats.net_api_stats.nas_ifnet_alloc_total);
3380 nstat_set_keyval_u64_scalar(&kv[i++],
3381 NSTAT_SYSINFO_API_IFNET_ALLOC_OS,
3382 data->u.net_api_stats.net_api_stats.nas_ifnet_alloc_os_total);
3383
3384 nstat_set_keyval_u64_scalar(&kv[i++],
3385 NSTAT_SYSINFO_API_PF_ADDRULE,
3386 data->u.net_api_stats.net_api_stats.nas_pf_addrule_total);
3387 nstat_set_keyval_u64_scalar(&kv[i++],
3388 NSTAT_SYSINFO_API_PF_ADDRULE_OS,
3389 data->u.net_api_stats.net_api_stats.nas_pf_addrule_os);
3390
3391 nstat_set_keyval_u64_scalar(&kv[i++],
3392 NSTAT_SYSINFO_API_VMNET_START,
3393 data->u.net_api_stats.net_api_stats.nas_vmnet_total);
3394
3395
3396 nstat_set_keyval_scalar(&kv[i++],
3397 NSTAT_SYSINFO_API_REPORT_INTERVAL,
3398 data->u.net_api_stats.report_interval);
3399
3400 break;
3401 }
3402 }
3403 if (syscnt != NULL) {
3404 VERIFY(i > 0 && i <= nkeyvals);
3405 countsize = offsetof(nstat_sysinfo_counts,
3406 nstat_sysinfo_keyvals) +
3407 sizeof(nstat_sysinfo_keyval) * i;
3408 finalsize += countsize;
3409 syscnt->hdr.type = NSTAT_MSG_TYPE_SYSINFO_COUNTS;
3410 assert(finalsize <= MAX_NSTAT_MSG_HDR_LENGTH);
3411 syscnt->hdr.length = (u_int16_t)finalsize;
3412 syscnt->counts.nstat_sysinfo_len = (u_int32_t)countsize;
3413
3414 result = ctl_enqueuedata(control->ncs_kctl,
3415 control->ncs_unit, syscnt, finalsize, CTL_DATA_EOR);
3416 if (result != 0) {
3417 nstat_stats.nstat_sysinfofailures += 1;
3418 }
3419 kheap_free(KHEAP_TEMP, syscnt, allocsize);
3420 }
3421 return;
3422 }
3423
3424 __private_extern__ void
3425 nstat_sysinfo_send_data(
3426 nstat_sysinfo_data *data)
3427 {
3428 nstat_control_state *control;
3429
3430 lck_mtx_lock(&nstat_mtx);
3431 for (control = nstat_controls; control; control = control->ncs_next) {
3432 lck_mtx_lock(&control->ncs_mtx);
3433 if ((control->ncs_flags & NSTAT_FLAG_SYSINFO_SUBSCRIBED) != 0) {
3434 nstat_sysinfo_send_data_internal(control, data);
3435 }
3436 lck_mtx_unlock(&control->ncs_mtx);
3437 }
3438 lck_mtx_unlock(&nstat_mtx);
3439 }
3440
3441 static void
3442 nstat_sysinfo_generate_report(void)
3443 {
3444 mbuf_report_peak_usage();
3445 tcp_report_stats();
3446 nstat_ifnet_report_ecn_stats();
3447 nstat_ifnet_report_lim_stats();
3448 nstat_net_api_report_stats();
3449 }
3450
3451 #pragma mark -- net_api --
3452
3453 static struct net_api_stats net_api_stats_before;
3454 static u_int64_t net_api_stats_last_report_time;
3455
3456 static void
3457 nstat_net_api_report_stats(void)
3458 {
3459 struct nstat_sysinfo_data data;
3460 struct nstat_sysinfo_net_api_stats *st = &data.u.net_api_stats;
3461 u_int64_t uptime;
3462
3463 uptime = net_uptime();
3464
3465 if ((u_int32_t)(uptime - net_api_stats_last_report_time) <
3466 net_api_stats_report_interval) {
3467 return;
3468 }
3469
3470 st->report_interval = (u_int32_t)(uptime - net_api_stats_last_report_time);
3471 net_api_stats_last_report_time = uptime;
3472
3473 data.flags = NSTAT_SYSINFO_NET_API_STATS;
3474 data.unsent_data_cnt = 0;
3475
3476 /*
3477 * Some of the fields in the report are the current value and
3478 * other fields are the delta from the last report:
3479 * - Report difference for the per flow counters as they increase
3480 * with time
3481 * - Report current value for other counters as they tend not to change
3482 * much with time
3483 */
3484 #define STATCOPY(f) \
3485 (st->net_api_stats.f = net_api_stats.f)
3486 #define STATDIFF(f) \
3487 (st->net_api_stats.f = net_api_stats.f - net_api_stats_before.f)
3488
3489 STATCOPY(nas_iflt_attach_count);
3490 STATCOPY(nas_iflt_attach_total);
3491 STATCOPY(nas_iflt_attach_os_total);
3492
3493 STATCOPY(nas_ipf_add_count);
3494 STATCOPY(nas_ipf_add_total);
3495 STATCOPY(nas_ipf_add_os_total);
3496
3497 STATCOPY(nas_sfltr_register_count);
3498 STATCOPY(nas_sfltr_register_total);
3499 STATCOPY(nas_sfltr_register_os_total);
3500
3501 STATDIFF(nas_socket_alloc_total);
3502 STATDIFF(nas_socket_in_kernel_total);
3503 STATDIFF(nas_socket_in_kernel_os_total);
3504 STATDIFF(nas_socket_necp_clientuuid_total);
3505
3506 STATDIFF(nas_socket_domain_local_total);
3507 STATDIFF(nas_socket_domain_route_total);
3508 STATDIFF(nas_socket_domain_inet_total);
3509 STATDIFF(nas_socket_domain_inet6_total);
3510 STATDIFF(nas_socket_domain_system_total);
3511 STATDIFF(nas_socket_domain_multipath_total);
3512 STATDIFF(nas_socket_domain_key_total);
3513 STATDIFF(nas_socket_domain_ndrv_total);
3514 STATDIFF(nas_socket_domain_other_total);
3515
3516 STATDIFF(nas_socket_inet_stream_total);
3517 STATDIFF(nas_socket_inet_dgram_total);
3518 STATDIFF(nas_socket_inet_dgram_connected);
3519 STATDIFF(nas_socket_inet_dgram_dns);
3520 STATDIFF(nas_socket_inet_dgram_no_data);
3521
3522 STATDIFF(nas_socket_inet6_stream_total);
3523 STATDIFF(nas_socket_inet6_dgram_total);
3524 STATDIFF(nas_socket_inet6_dgram_connected);
3525 STATDIFF(nas_socket_inet6_dgram_dns);
3526 STATDIFF(nas_socket_inet6_dgram_no_data);
3527
3528 STATDIFF(nas_socket_mcast_join_total);
3529 STATDIFF(nas_socket_mcast_join_os_total);
3530
3531 STATDIFF(nas_sock_inet6_stream_exthdr_in);
3532 STATDIFF(nas_sock_inet6_stream_exthdr_out);
3533 STATDIFF(nas_sock_inet6_dgram_exthdr_in);
3534 STATDIFF(nas_sock_inet6_dgram_exthdr_out);
3535
3536 STATDIFF(nas_nx_flow_inet_stream_total);
3537 STATDIFF(nas_nx_flow_inet_dgram_total);
3538
3539 STATDIFF(nas_nx_flow_inet6_stream_total);
3540 STATDIFF(nas_nx_flow_inet6_dgram_total);
3541
3542 STATCOPY(nas_ifnet_alloc_count);
3543 STATCOPY(nas_ifnet_alloc_total);
3544 STATCOPY(nas_ifnet_alloc_os_count);
3545 STATCOPY(nas_ifnet_alloc_os_total);
3546
3547 STATCOPY(nas_pf_addrule_total);
3548 STATCOPY(nas_pf_addrule_os);
3549
3550 STATCOPY(nas_vmnet_total);
3551
3552 #undef STATCOPY
3553 #undef STATDIFF
3554
3555 nstat_sysinfo_send_data(&data);
3556
3557 /*
3558 * Save a copy of the current fields so we can diff them the next time
3559 */
3560 memcpy(&net_api_stats_before, &net_api_stats,
3561 sizeof(struct net_api_stats));
3562 _CASSERT(sizeof(net_api_stats_before) == sizeof(net_api_stats));
3563 }
3564
3565
3566 #pragma mark -- Kernel Control Socket --
3567
3568 static kern_ctl_ref nstat_ctlref = NULL;
3569 static lck_grp_t *nstat_lck_grp = NULL;
3570
3571 static errno_t nstat_control_connect(kern_ctl_ref kctl, struct sockaddr_ctl *sac, void **uinfo);
3572 static errno_t nstat_control_disconnect(kern_ctl_ref kctl, u_int32_t unit, void *uinfo);
3573 static errno_t nstat_control_send(kern_ctl_ref kctl, u_int32_t unit, void *uinfo, mbuf_t m, int flags);
3574
3575 static errno_t
3576 nstat_enqueue_success(
3577 uint64_t context,
3578 nstat_control_state *state,
3579 u_int16_t flags)
3580 {
3581 nstat_msg_hdr success;
3582 errno_t result;
3583
3584 bzero(&success, sizeof(success));
3585 success.context = context;
3586 success.type = NSTAT_MSG_TYPE_SUCCESS;
3587 success.length = sizeof(success);
3588 success.flags = flags;
3589 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &success,
3590 sizeof(success), CTL_DATA_EOR | CTL_DATA_CRIT);
3591 if (result != 0) {
3592 if (nstat_debug != 0) {
3593 printf("%s: could not enqueue success message %d\n",
3594 __func__, result);
3595 }
3596 nstat_stats.nstat_successmsgfailures += 1;
3597 }
3598 return result;
3599 }
3600
3601 static errno_t
3602 nstat_control_send_event(
3603 nstat_control_state *state,
3604 nstat_src *src,
3605 u_int64_t event)
3606 {
3607 errno_t result = 0;
3608 int failed = 0;
3609
3610 if (nstat_control_reporting_allowed(state, src)) {
3611 if ((state->ncs_flags & NSTAT_FLAG_SUPPORTS_UPDATES) != 0) {
3612 result = nstat_control_send_update(state, src, 0, event, 0, NULL);
3613 if (result != 0) {
3614 failed = 1;
3615 if (nstat_debug != 0) {
3616 printf("%s - nstat_control_send_event() %d\n", __func__, result);
3617 }
3618 }
3619 } else {
3620 if (nstat_debug != 0) {
3621 printf("%s - nstat_control_send_event() used when updates not supported\n", __func__);
3622 }
3623 }
3624 }
3625 return result;
3626 }
3627
3628 static errno_t
3629 nstat_control_send_goodbye(
3630 nstat_control_state *state,
3631 nstat_src *src)
3632 {
3633 errno_t result = 0;
3634 int failed = 0;
3635
3636 if (nstat_control_reporting_allowed(state, src)) {
3637 if ((state->ncs_flags & NSTAT_FLAG_SUPPORTS_UPDATES) != 0) {
3638 result = nstat_control_send_update(state, src, 0, 0, NSTAT_MSG_HDR_FLAG_CLOSING, NULL);
3639 if (result != 0) {
3640 failed = 1;
3641 if (nstat_debug != 0) {
3642 printf("%s - nstat_control_send_update() %d\n", __func__, result);
3643 }
3644 }
3645 } else {
3646 // send one last counts notification
3647 result = nstat_control_send_counts(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING, NULL);
3648 if (result != 0) {
3649 failed = 1;
3650 if (nstat_debug != 0) {
3651 printf("%s - nstat_control_send_counts() %d\n", __func__, result);
3652 }
3653 }
3654
3655 // send a last description
3656 result = nstat_control_send_description(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING);
3657 if (result != 0) {
3658 failed = 1;
3659 if (nstat_debug != 0) {
3660 printf("%s - nstat_control_send_description() %d\n", __func__, result);
3661 }
3662 }
3663 }
3664 }
3665
3666 // send the source removed notification
3667 result = nstat_control_send_removed(state, src);
3668 if (result != 0 && nstat_debug) {
3669 failed = 1;
3670 if (nstat_debug != 0) {
3671 printf("%s - nstat_control_send_removed() %d\n", __func__, result);
3672 }
3673 }
3674
3675 if (failed != 0) {
3676 nstat_stats.nstat_control_send_goodbye_failures++;
3677 }
3678
3679
3680 return result;
3681 }
3682
3683 static errno_t
3684 nstat_flush_accumulated_msgs(
3685 nstat_control_state *state)
3686 {
3687 errno_t result = 0;
3688 if (state->ncs_accumulated != NULL && mbuf_len(state->ncs_accumulated) > 0) {
3689 mbuf_pkthdr_setlen(state->ncs_accumulated, mbuf_len(state->ncs_accumulated));
3690 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, state->ncs_accumulated, CTL_DATA_EOR);
3691 if (result != 0) {
3692 nstat_stats.nstat_flush_accumulated_msgs_failures++;
3693 if (nstat_debug != 0) {
3694 printf("%s - ctl_enqueuembuf failed: %d\n", __func__, result);
3695 }
3696 mbuf_freem(state->ncs_accumulated);
3697 }
3698 state->ncs_accumulated = NULL;
3699 }
3700 return result;
3701 }
3702
3703 static errno_t
3704 nstat_accumulate_msg(
3705 nstat_control_state *state,
3706 nstat_msg_hdr *hdr,
3707 size_t length)
3708 {
3709 assert(length <= MAX_NSTAT_MSG_HDR_LENGTH);
3710
3711 if (state->ncs_accumulated && mbuf_trailingspace(state->ncs_accumulated) < length) {
3712 // Will send the current mbuf
3713 nstat_flush_accumulated_msgs(state);
3714 }
3715
3716 errno_t result = 0;
3717
3718 if (state->ncs_accumulated == NULL) {
3719 unsigned int one = 1;
3720 if (mbuf_allocpacket(MBUF_DONTWAIT, NSTAT_MAX_MSG_SIZE, &one, &state->ncs_accumulated) != 0) {
3721 if (nstat_debug != 0) {
3722 printf("%s - mbuf_allocpacket failed\n", __func__);
3723 }
3724 result = ENOMEM;
3725 } else {
3726 mbuf_setlen(state->ncs_accumulated, 0);
3727 }
3728 }
3729
3730 if (result == 0) {
3731 hdr->length = (u_int16_t)length;
3732 result = mbuf_copyback(state->ncs_accumulated, mbuf_len(state->ncs_accumulated),
3733 length, hdr, MBUF_DONTWAIT);
3734 }
3735
3736 if (result != 0) {
3737 nstat_flush_accumulated_msgs(state);
3738 if (nstat_debug != 0) {
3739 printf("%s - resorting to ctl_enqueuedata\n", __func__);
3740 }
3741 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, hdr, length, CTL_DATA_EOR);
3742 }
3743
3744 if (result != 0) {
3745 nstat_stats.nstat_accumulate_msg_failures++;
3746 }
3747
3748 return result;
3749 }
3750
3751 static void*
3752 nstat_idle_check(
3753 __unused thread_call_param_t p0,
3754 __unused thread_call_param_t p1)
3755 {
3756 nstat_control_state *control;
3757 nstat_src *src, *tmpsrc;
3758 tailq_head_nstat_src dead_list;
3759 TAILQ_INIT(&dead_list);
3760
3761 lck_mtx_lock(&nstat_mtx);
3762
3763 nstat_idle_time = 0;
3764
3765 for (control = nstat_controls; control; control = control->ncs_next) {
3766 lck_mtx_lock(&control->ncs_mtx);
3767 if (!(control->ncs_flags & NSTAT_FLAG_REQCOUNTS)) {
3768 TAILQ_FOREACH_SAFE(src, &control->ncs_src_queue, ns_control_link, tmpsrc)
3769 {
3770 if (src->provider->nstat_gone(src->cookie)) {
3771 errno_t result;
3772
3773 // Pull it off the list
3774 TAILQ_REMOVE(&control->ncs_src_queue, src, ns_control_link);
3775
3776 result = nstat_control_send_goodbye(control, src);
3777
3778 // Put this on the list to release later
3779 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
3780 }
3781 }
3782 }
3783 control->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
3784 lck_mtx_unlock(&control->ncs_mtx);
3785 }
3786
3787 if (nstat_controls) {
3788 clock_interval_to_deadline(60, NSEC_PER_SEC, &nstat_idle_time);
3789 thread_call_func_delayed((thread_call_func_t)nstat_idle_check, NULL, nstat_idle_time);
3790 }
3791
3792 lck_mtx_unlock(&nstat_mtx);
3793
3794 /* Generate any system level reports, if needed */
3795 nstat_sysinfo_generate_report();
3796
3797 // Release the sources now that we aren't holding lots of locks
3798 while ((src = TAILQ_FIRST(&dead_list))) {
3799 TAILQ_REMOVE(&dead_list, src, ns_control_link);
3800 nstat_control_cleanup_source(NULL, src, FALSE);
3801 }
3802
3803
3804 return NULL;
3805 }
3806
3807 static void
3808 nstat_control_register(void)
3809 {
3810 // Create our lock group first
3811 lck_grp_attr_t *grp_attr = lck_grp_attr_alloc_init();
3812 lck_grp_attr_setdefault(grp_attr);
3813 nstat_lck_grp = lck_grp_alloc_init("network statistics kctl", grp_attr);
3814 lck_grp_attr_free(grp_attr);
3815
3816 lck_mtx_init(&nstat_mtx, nstat_lck_grp, NULL);
3817
3818 // Register the control
3819 struct kern_ctl_reg nstat_control;
3820 bzero(&nstat_control, sizeof(nstat_control));
3821 strlcpy(nstat_control.ctl_name, NET_STAT_CONTROL_NAME, sizeof(nstat_control.ctl_name));
3822 nstat_control.ctl_flags = CTL_FLAG_REG_EXTENDED | CTL_FLAG_REG_CRIT;
3823 nstat_control.ctl_sendsize = nstat_sendspace;
3824 nstat_control.ctl_recvsize = nstat_recvspace;
3825 nstat_control.ctl_connect = nstat_control_connect;
3826 nstat_control.ctl_disconnect = nstat_control_disconnect;
3827 nstat_control.ctl_send = nstat_control_send;
3828
3829 ctl_register(&nstat_control, &nstat_ctlref);
3830 }
3831
3832 static void
3833 nstat_control_cleanup_source(
3834 nstat_control_state *state,
3835 struct nstat_src *src,
3836 boolean_t locked)
3837 {
3838 errno_t result;
3839
3840 if (state) {
3841 result = nstat_control_send_removed(state, src);
3842 if (result != 0) {
3843 nstat_stats.nstat_control_cleanup_source_failures++;
3844 if (nstat_debug != 0) {
3845 printf("%s - nstat_control_send_removed() %d\n",
3846 __func__, result);
3847 }
3848 }
3849 }
3850 // Cleanup the source if we found it.
3851 src->provider->nstat_release(src->cookie, locked);
3852 kheap_free(KHEAP_NET_STAT, src, sizeof(*src));
3853 }
3854
3855
3856 static bool
3857 nstat_control_reporting_allowed(
3858 nstat_control_state *state,
3859 nstat_src *src)
3860 {
3861 if (src->provider->nstat_reporting_allowed == NULL) {
3862 return TRUE;
3863 }
3864
3865 return src->provider->nstat_reporting_allowed(src->cookie,
3866 &state->ncs_provider_filters[src->provider->nstat_provider_id]);
3867 }
3868
3869
3870 static errno_t
3871 nstat_control_connect(
3872 kern_ctl_ref kctl,
3873 struct sockaddr_ctl *sac,
3874 void **uinfo)
3875 {
3876 nstat_control_state *state = kheap_alloc(KHEAP_NET_STAT,
3877 sizeof(*state), Z_WAITOK | Z_ZERO);
3878 if (state == NULL) {
3879 return ENOMEM;
3880 }
3881
3882 lck_mtx_init(&state->ncs_mtx, nstat_lck_grp, NULL);
3883 state->ncs_kctl = kctl;
3884 state->ncs_unit = sac->sc_unit;
3885 state->ncs_flags = NSTAT_FLAG_REQCOUNTS;
3886 *uinfo = state;
3887
3888 lck_mtx_lock(&nstat_mtx);
3889 state->ncs_next = nstat_controls;
3890 nstat_controls = state;
3891
3892 if (nstat_idle_time == 0) {
3893 clock_interval_to_deadline(60, NSEC_PER_SEC, &nstat_idle_time);
3894 thread_call_func_delayed((thread_call_func_t)nstat_idle_check, NULL, nstat_idle_time);
3895 }
3896
3897 lck_mtx_unlock(&nstat_mtx);
3898
3899 return 0;
3900 }
3901
3902 static errno_t
3903 nstat_control_disconnect(
3904 __unused kern_ctl_ref kctl,
3905 __unused u_int32_t unit,
3906 void *uinfo)
3907 {
3908 u_int32_t watching;
3909 nstat_control_state *state = (nstat_control_state*)uinfo;
3910 tailq_head_nstat_src cleanup_list;
3911 nstat_src *src;
3912
3913 TAILQ_INIT(&cleanup_list);
3914
3915 // pull it out of the global list of states
3916 lck_mtx_lock(&nstat_mtx);
3917 nstat_control_state **statepp;
3918 for (statepp = &nstat_controls; *statepp; statepp = &(*statepp)->ncs_next) {
3919 if (*statepp == state) {
3920 *statepp = state->ncs_next;
3921 break;
3922 }
3923 }
3924 lck_mtx_unlock(&nstat_mtx);
3925
3926 lck_mtx_lock(&state->ncs_mtx);
3927 // Stop watching for sources
3928 nstat_provider *provider;
3929 watching = state->ncs_watching;
3930 state->ncs_watching = 0;
3931 for (provider = nstat_providers; provider && watching; provider = provider->next) {
3932 if ((watching & (1 << provider->nstat_provider_id)) != 0) {
3933 watching &= ~(1 << provider->nstat_provider_id);
3934 provider->nstat_watcher_remove(state);
3935 }
3936 }
3937
3938 // set cleanup flags
3939 state->ncs_flags |= NSTAT_FLAG_CLEANUP;
3940
3941 if (state->ncs_accumulated) {
3942 mbuf_freem(state->ncs_accumulated);
3943 state->ncs_accumulated = NULL;
3944 }
3945
3946 // Copy out the list of sources
3947 TAILQ_CONCAT(&cleanup_list, &state->ncs_src_queue, ns_control_link);
3948 lck_mtx_unlock(&state->ncs_mtx);
3949
3950 while ((src = TAILQ_FIRST(&cleanup_list))) {
3951 TAILQ_REMOVE(&cleanup_list, src, ns_control_link);
3952 nstat_control_cleanup_source(NULL, src, FALSE);
3953 }
3954
3955 lck_mtx_destroy(&state->ncs_mtx, nstat_lck_grp);
3956 kheap_free(KHEAP_NET_STAT, state, sizeof(*state));
3957
3958 return 0;
3959 }
3960
3961 static nstat_src_ref_t
3962 nstat_control_next_src_ref(
3963 nstat_control_state *state)
3964 {
3965 return ++state->ncs_next_srcref;
3966 }
3967
3968 static errno_t
3969 nstat_control_send_counts(
3970 nstat_control_state *state,
3971 nstat_src *src,
3972 unsigned long long context,
3973 u_int16_t hdr_flags,
3974 int *gone)
3975 {
3976 nstat_msg_src_counts counts;
3977 errno_t result = 0;
3978
3979 /* Some providers may not have any counts to send */
3980 if (src->provider->nstat_counts == NULL) {
3981 return 0;
3982 }
3983
3984 bzero(&counts, sizeof(counts));
3985 counts.hdr.type = NSTAT_MSG_TYPE_SRC_COUNTS;
3986 counts.hdr.length = sizeof(counts);
3987 counts.hdr.flags = hdr_flags;
3988 counts.hdr.context = context;
3989 counts.srcref = src->srcref;
3990 counts.event_flags = 0;
3991
3992 if (src->provider->nstat_counts(src->cookie, &counts.counts, gone) == 0) {
3993 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) &&
3994 counts.counts.nstat_rxbytes == 0 &&
3995 counts.counts.nstat_txbytes == 0) {
3996 result = EAGAIN;
3997 } else {
3998 result = ctl_enqueuedata(state->ncs_kctl,
3999 state->ncs_unit, &counts, sizeof(counts),
4000 CTL_DATA_EOR);
4001 if (result != 0) {
4002 nstat_stats.nstat_sendcountfailures += 1;
4003 }
4004 }
4005 }
4006 return result;
4007 }
4008
4009 static errno_t
4010 nstat_control_append_counts(
4011 nstat_control_state *state,
4012 nstat_src *src,
4013 int *gone)
4014 {
4015 /* Some providers may not have any counts to send */
4016 if (!src->provider->nstat_counts) {
4017 return 0;
4018 }
4019
4020 nstat_msg_src_counts counts;
4021 bzero(&counts, sizeof(counts));
4022 counts.hdr.type = NSTAT_MSG_TYPE_SRC_COUNTS;
4023 counts.hdr.length = sizeof(counts);
4024 counts.srcref = src->srcref;
4025 counts.event_flags = 0;
4026
4027 errno_t result = 0;
4028 result = src->provider->nstat_counts(src->cookie, &counts.counts, gone);
4029 if (result != 0) {
4030 return result;
4031 }
4032
4033 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
4034 counts.counts.nstat_rxbytes == 0 && counts.counts.nstat_txbytes == 0) {
4035 return EAGAIN;
4036 }
4037
4038 return nstat_accumulate_msg(state, &counts.hdr, counts.hdr.length);
4039 }
4040
4041 static int
4042 nstat_control_send_description(
4043 nstat_control_state *state,
4044 nstat_src *src,
4045 u_int64_t context,
4046 u_int16_t hdr_flags)
4047 {
4048 // Provider doesn't support getting the descriptor? Done.
4049 if (src->provider->nstat_descriptor_length == 0 ||
4050 src->provider->nstat_copy_descriptor == NULL) {
4051 return EOPNOTSUPP;
4052 }
4053
4054 // Allocate storage for the descriptor message
4055 mbuf_t msg;
4056 unsigned int one = 1;
4057 size_t size = offsetof(nstat_msg_src_description, data) + src->provider->nstat_descriptor_length;
4058 assert(size <= MAX_NSTAT_MSG_HDR_LENGTH);
4059
4060 if (mbuf_allocpacket(MBUF_DONTWAIT, size, &one, &msg) != 0) {
4061 return ENOMEM;
4062 }
4063
4064 nstat_msg_src_description *desc = (nstat_msg_src_description*)mbuf_data(msg);
4065 bzero(desc, size);
4066 mbuf_setlen(msg, size);
4067 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
4068
4069 // Query the provider for the provider specific bits
4070 errno_t result = src->provider->nstat_copy_descriptor(src->cookie, desc->data, src->provider->nstat_descriptor_length);
4071
4072 if (result != 0) {
4073 mbuf_freem(msg);
4074 return result;
4075 }
4076
4077 desc->hdr.context = context;
4078 desc->hdr.type = NSTAT_MSG_TYPE_SRC_DESC;
4079 desc->hdr.length = (u_int16_t)size;
4080 desc->hdr.flags = hdr_flags;
4081 desc->srcref = src->srcref;
4082 desc->event_flags = 0;
4083 desc->provider = src->provider->nstat_provider_id;
4084
4085 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg, CTL_DATA_EOR);
4086 if (result != 0) {
4087 nstat_stats.nstat_descriptionfailures += 1;
4088 mbuf_freem(msg);
4089 }
4090
4091 return result;
4092 }
4093
4094 static errno_t
4095 nstat_control_append_description(
4096 nstat_control_state *state,
4097 nstat_src *src)
4098 {
4099 size_t size = offsetof(nstat_msg_src_description, data) + src->provider->nstat_descriptor_length;
4100 if (size > 512 || src->provider->nstat_descriptor_length == 0 ||
4101 src->provider->nstat_copy_descriptor == NULL) {
4102 return EOPNOTSUPP;
4103 }
4104
4105 // Fill out a buffer on the stack, we will copy to the mbuf later
4106 u_int64_t buffer[size / sizeof(u_int64_t) + 1]; // u_int64_t to ensure alignment
4107 bzero(buffer, size);
4108
4109 nstat_msg_src_description *desc = (nstat_msg_src_description*)buffer;
4110 desc->hdr.type = NSTAT_MSG_TYPE_SRC_DESC;
4111 desc->hdr.length = (u_int16_t)size;
4112 desc->srcref = src->srcref;
4113 desc->event_flags = 0;
4114 desc->provider = src->provider->nstat_provider_id;
4115
4116 errno_t result = 0;
4117 // Fill in the description
4118 // Query the provider for the provider specific bits
4119 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
4120 src->provider->nstat_descriptor_length);
4121 if (result != 0) {
4122 return result;
4123 }
4124
4125 return nstat_accumulate_msg(state, &desc->hdr, size);
4126 }
4127
4128 static int
4129 nstat_control_send_update(
4130 nstat_control_state *state,
4131 nstat_src *src,
4132 u_int64_t context,
4133 u_int64_t event,
4134 u_int16_t hdr_flags,
4135 int *gone)
4136 {
4137 // Provider doesn't support getting the descriptor or counts? Done.
4138 if ((src->provider->nstat_descriptor_length == 0 ||
4139 src->provider->nstat_copy_descriptor == NULL) &&
4140 src->provider->nstat_counts == NULL) {
4141 return EOPNOTSUPP;
4142 }
4143
4144 // Allocate storage for the descriptor message
4145 mbuf_t msg;
4146 unsigned int one = 1;
4147 size_t size = offsetof(nstat_msg_src_update, data) +
4148 src->provider->nstat_descriptor_length;
4149 assert(size <= MAX_NSTAT_MSG_HDR_LENGTH);
4150
4151 if (mbuf_allocpacket(MBUF_DONTWAIT, size, &one, &msg) != 0) {
4152 return ENOMEM;
4153 }
4154
4155 nstat_msg_src_update *desc = (nstat_msg_src_update*)mbuf_data(msg);
4156 bzero(desc, size);
4157 desc->hdr.context = context;
4158 desc->hdr.type = NSTAT_MSG_TYPE_SRC_UPDATE;
4159 desc->hdr.length = (u_int16_t)size;
4160 desc->hdr.flags = hdr_flags;
4161 desc->srcref = src->srcref;
4162 desc->event_flags = event;
4163 desc->provider = src->provider->nstat_provider_id;
4164
4165 mbuf_setlen(msg, size);
4166 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
4167
4168 errno_t result = 0;
4169 if (src->provider->nstat_descriptor_length != 0 && src->provider->nstat_copy_descriptor) {
4170 // Query the provider for the provider specific bits
4171 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
4172 src->provider->nstat_descriptor_length);
4173 if (result != 0) {
4174 mbuf_freem(msg);
4175 return result;
4176 }
4177 }
4178
4179 if (src->provider->nstat_counts) {
4180 result = src->provider->nstat_counts(src->cookie, &desc->counts, gone);
4181 if (result == 0) {
4182 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
4183 desc->counts.nstat_rxbytes == 0 && desc->counts.nstat_txbytes == 0) {
4184 result = EAGAIN;
4185 } else {
4186 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg, CTL_DATA_EOR);
4187 }
4188 }
4189 }
4190
4191 if (result != 0) {
4192 nstat_stats.nstat_srcupatefailures += 1;
4193 mbuf_freem(msg);
4194 }
4195
4196 return result;
4197 }
4198
4199 static errno_t
4200 nstat_control_append_update(
4201 nstat_control_state *state,
4202 nstat_src *src,
4203 int *gone)
4204 {
4205 size_t size = offsetof(nstat_msg_src_update, data) + src->provider->nstat_descriptor_length;
4206 if (size > 512 || ((src->provider->nstat_descriptor_length == 0 ||
4207 src->provider->nstat_copy_descriptor == NULL) &&
4208 src->provider->nstat_counts == NULL)) {
4209 return EOPNOTSUPP;
4210 }
4211
4212 // Fill out a buffer on the stack, we will copy to the mbuf later
4213 u_int64_t buffer[size / sizeof(u_int64_t) + 1]; // u_int64_t to ensure alignment
4214 bzero(buffer, size);
4215
4216 nstat_msg_src_update *desc = (nstat_msg_src_update*)buffer;
4217 desc->hdr.type = NSTAT_MSG_TYPE_SRC_UPDATE;
4218 desc->hdr.length = (u_int16_t)size;
4219 desc->srcref = src->srcref;
4220 desc->event_flags = 0;
4221 desc->provider = src->provider->nstat_provider_id;
4222
4223 errno_t result = 0;
4224 // Fill in the description
4225 if (src->provider->nstat_descriptor_length != 0 && src->provider->nstat_copy_descriptor) {
4226 // Query the provider for the provider specific bits
4227 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
4228 src->provider->nstat_descriptor_length);
4229 if (result != 0) {
4230 nstat_stats.nstat_copy_descriptor_failures++;
4231 if (nstat_debug != 0) {
4232 printf("%s: src->provider->nstat_copy_descriptor: %d\n", __func__, result);
4233 }
4234 return result;
4235 }
4236 }
4237
4238 if (src->provider->nstat_counts) {
4239 result = src->provider->nstat_counts(src->cookie, &desc->counts, gone);
4240 if (result != 0) {
4241 nstat_stats.nstat_provider_counts_failures++;
4242 if (nstat_debug != 0) {
4243 printf("%s: src->provider->nstat_counts: %d\n", __func__, result);
4244 }
4245 return result;
4246 }
4247
4248 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
4249 desc->counts.nstat_rxbytes == 0 && desc->counts.nstat_txbytes == 0) {
4250 return EAGAIN;
4251 }
4252 }
4253
4254 return nstat_accumulate_msg(state, &desc->hdr, size);
4255 }
4256
4257 static errno_t
4258 nstat_control_send_removed(
4259 nstat_control_state *state,
4260 nstat_src *src)
4261 {
4262 nstat_msg_src_removed removed;
4263 errno_t result;
4264
4265 bzero(&removed, sizeof(removed));
4266 removed.hdr.type = NSTAT_MSG_TYPE_SRC_REMOVED;
4267 removed.hdr.length = sizeof(removed);
4268 removed.hdr.context = 0;
4269 removed.srcref = src->srcref;
4270 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &removed,
4271 sizeof(removed), CTL_DATA_EOR | CTL_DATA_CRIT);
4272 if (result != 0) {
4273 nstat_stats.nstat_msgremovedfailures += 1;
4274 }
4275
4276 return result;
4277 }
4278
4279 static errno_t
4280 nstat_control_handle_add_request(
4281 nstat_control_state *state,
4282 mbuf_t m)
4283 {
4284 errno_t result;
4285
4286 // Verify the header fits in the first mbuf
4287 if (mbuf_len(m) < offsetof(nstat_msg_add_src_req, param)) {
4288 return EINVAL;
4289 }
4290
4291 // Calculate the length of the parameter field
4292 ssize_t paramlength = mbuf_pkthdr_len(m) - offsetof(nstat_msg_add_src_req, param);
4293 if (paramlength < 0 || paramlength > 2 * 1024) {
4294 return EINVAL;
4295 }
4296
4297 nstat_provider *provider = NULL;
4298 nstat_provider_cookie_t cookie = NULL;
4299 nstat_msg_add_src_req *req = mbuf_data(m);
4300 if (mbuf_pkthdr_len(m) > mbuf_len(m)) {
4301 // parameter is too large, we need to make a contiguous copy
4302 void *data = kheap_alloc(KHEAP_TEMP, paramlength, Z_WAITOK);
4303
4304 if (!data) {
4305 return ENOMEM;
4306 }
4307 result = mbuf_copydata(m, offsetof(nstat_msg_add_src_req, param), paramlength, data);
4308 if (result == 0) {
4309 result = nstat_lookup_entry(req->provider, data, paramlength, &provider, &cookie);
4310 }
4311 kheap_free(KHEAP_TEMP, data, paramlength);
4312 } else {
4313 result = nstat_lookup_entry(req->provider, (void*)&req->param, paramlength, &provider, &cookie);
4314 }
4315
4316 if (result != 0) {
4317 return result;
4318 }
4319
4320 result = nstat_control_source_add(req->hdr.context, state, provider, cookie);
4321 if (result != 0) {
4322 provider->nstat_release(cookie, 0);
4323 }
4324
4325 return result;
4326 }
4327
4328 static errno_t
4329 nstat_set_provider_filter(
4330 nstat_control_state *state,
4331 nstat_msg_add_all_srcs *req)
4332 {
4333 nstat_provider_id_t provider_id = req->provider;
4334
4335 u_int32_t prev_ncs_watching = atomic_or_32_ov(&state->ncs_watching, (1 << provider_id));
4336
4337 if ((prev_ncs_watching & (1 << provider_id)) != 0) {
4338 return EALREADY;
4339 }
4340
4341 state->ncs_watching |= (1 << provider_id);
4342 state->ncs_provider_filters[provider_id].npf_flags = req->filter;
4343 state->ncs_provider_filters[provider_id].npf_events = req->events;
4344 state->ncs_provider_filters[provider_id].npf_pid = req->target_pid;
4345 uuid_copy(state->ncs_provider_filters[provider_id].npf_uuid, req->target_uuid);
4346 return 0;
4347 }
4348
4349 static errno_t
4350 nstat_control_handle_add_all(
4351 nstat_control_state *state,
4352 mbuf_t m)
4353 {
4354 errno_t result = 0;
4355
4356 // Verify the header fits in the first mbuf
4357 if (mbuf_len(m) < sizeof(nstat_msg_add_all_srcs)) {
4358 return EINVAL;
4359 }
4360
4361 nstat_msg_add_all_srcs *req = mbuf_data(m);
4362 if (req->provider > NSTAT_PROVIDER_LAST) {
4363 return ENOENT;
4364 }
4365
4366 nstat_provider *provider = nstat_find_provider_by_id(req->provider);
4367
4368 if (!provider) {
4369 return ENOENT;
4370 }
4371 if (provider->nstat_watcher_add == NULL) {
4372 return ENOTSUP;
4373 }
4374
4375 if (nstat_privcheck != 0) {
4376 result = priv_check_cred(kauth_cred_get(),
4377 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
4378 if (result != 0) {
4379 return result;
4380 }
4381 }
4382
4383 lck_mtx_lock(&state->ncs_mtx);
4384 if (req->filter & NSTAT_FILTER_SUPPRESS_SRC_ADDED) {
4385 // Suppression of source messages implicitly requires the use of update messages
4386 state->ncs_flags |= NSTAT_FLAG_SUPPORTS_UPDATES;
4387 }
4388 lck_mtx_unlock(&state->ncs_mtx);
4389
4390 // rdar://problem/30301300 Different providers require different synchronization
4391 // to ensure that a new entry does not get double counted due to being added prior
4392 // to all current provider entries being added. Hence pass the provider the details
4393 // in the original request for this to be applied atomically
4394
4395 result = provider->nstat_watcher_add(state, req);
4396
4397 if (result == 0) {
4398 nstat_enqueue_success(req->hdr.context, state, 0);
4399 }
4400
4401 return result;
4402 }
4403
4404 static errno_t
4405 nstat_control_source_add(
4406 u_int64_t context,
4407 nstat_control_state *state,
4408 nstat_provider *provider,
4409 nstat_provider_cookie_t cookie)
4410 {
4411 // Fill out source added message if appropriate
4412 mbuf_t msg = NULL;
4413 nstat_src_ref_t *srcrefp = NULL;
4414
4415 u_int64_t provider_filter_flagss =
4416 state->ncs_provider_filters[provider->nstat_provider_id].npf_flags;
4417 boolean_t tell_user =
4418 ((provider_filter_flagss & NSTAT_FILTER_SUPPRESS_SRC_ADDED) == 0);
4419 u_int32_t src_filter =
4420 (provider_filter_flagss & NSTAT_FILTER_PROVIDER_NOZEROBYTES)
4421 ? NSTAT_FILTER_NOZEROBYTES : 0;
4422
4423 if (provider_filter_flagss & NSTAT_FILTER_TCP_NO_EARLY_CLOSE) {
4424 src_filter |= NSTAT_FILTER_TCP_NO_EARLY_CLOSE;
4425 }
4426
4427 if (tell_user) {
4428 unsigned int one = 1;
4429
4430 if (mbuf_allocpacket(MBUF_DONTWAIT, sizeof(nstat_msg_src_added),
4431 &one, &msg) != 0) {
4432 return ENOMEM;
4433 }
4434
4435 mbuf_setlen(msg, sizeof(nstat_msg_src_added));
4436 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
4437 nstat_msg_src_added *add = mbuf_data(msg);
4438 bzero(add, sizeof(*add));
4439 add->hdr.type = NSTAT_MSG_TYPE_SRC_ADDED;
4440 assert(mbuf_len(msg) <= MAX_NSTAT_MSG_HDR_LENGTH);
4441 add->hdr.length = (u_int16_t)mbuf_len(msg);
4442 add->hdr.context = context;
4443 add->provider = provider->nstat_provider_id;
4444 srcrefp = &add->srcref;
4445 }
4446
4447 // Allocate storage for the source
4448 nstat_src *src = kheap_alloc(KHEAP_NET_STAT, sizeof(*src), Z_WAITOK);
4449 if (src == NULL) {
4450 if (msg) {
4451 mbuf_freem(msg);
4452 }
4453 return ENOMEM;
4454 }
4455
4456 // Fill in the source, including picking an unused source ref
4457 lck_mtx_lock(&state->ncs_mtx);
4458
4459 src->srcref = nstat_control_next_src_ref(state);
4460 if (srcrefp) {
4461 *srcrefp = src->srcref;
4462 }
4463
4464 if (state->ncs_flags & NSTAT_FLAG_CLEANUP || src->srcref == NSTAT_SRC_REF_INVALID) {
4465 lck_mtx_unlock(&state->ncs_mtx);
4466 kheap_free(KHEAP_NET_STAT, src, sizeof(*src));
4467 if (msg) {
4468 mbuf_freem(msg);
4469 }
4470 return EINVAL;
4471 }
4472 src->provider = provider;
4473 src->cookie = cookie;
4474 src->filter = src_filter;
4475 src->seq = 0;
4476
4477 if (msg) {
4478 // send the source added message if appropriate
4479 errno_t result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg,
4480 CTL_DATA_EOR);
4481 if (result != 0) {
4482 nstat_stats.nstat_srcaddedfailures += 1;
4483 lck_mtx_unlock(&state->ncs_mtx);
4484 kheap_free(KHEAP_NET_STAT, src, sizeof(*src));
4485 mbuf_freem(msg);
4486 return result;
4487 }
4488 }
4489 // Put the source in the list
4490 TAILQ_INSERT_HEAD(&state->ncs_src_queue, src, ns_control_link);
4491 src->ns_control = state;
4492
4493 lck_mtx_unlock(&state->ncs_mtx);
4494
4495 return 0;
4496 }
4497
4498 static errno_t
4499 nstat_control_handle_remove_request(
4500 nstat_control_state *state,
4501 mbuf_t m)
4502 {
4503 nstat_src_ref_t srcref = NSTAT_SRC_REF_INVALID;
4504 nstat_src *src;
4505
4506 if (mbuf_copydata(m, offsetof(nstat_msg_rem_src_req, srcref), sizeof(srcref), &srcref) != 0) {
4507 return EINVAL;
4508 }
4509
4510 lck_mtx_lock(&state->ncs_mtx);
4511
4512 // Remove this source as we look for it
4513 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
4514 {
4515 if (src->srcref == srcref) {
4516 break;
4517 }
4518 }
4519 if (src) {
4520 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
4521 }
4522
4523 lck_mtx_unlock(&state->ncs_mtx);
4524
4525 if (src) {
4526 nstat_control_cleanup_source(state, src, FALSE);
4527 }
4528
4529 return src ? 0 : ENOENT;
4530 }
4531
4532 static errno_t
4533 nstat_control_handle_query_request(
4534 nstat_control_state *state,
4535 mbuf_t m)
4536 {
4537 // TBD: handle this from another thread so we can enqueue a lot of data
4538 // As written, if a client requests query all, this function will be
4539 // called from their send of the request message. We will attempt to write
4540 // responses and succeed until the buffer fills up. Since the clients thread
4541 // is blocked on send, it won't be reading unless the client has two threads
4542 // using this socket, one for read and one for write. Two threads probably
4543 // won't work with this code anyhow since we don't have proper locking in
4544 // place yet.
4545 tailq_head_nstat_src dead_list;
4546 errno_t result = ENOENT;
4547 nstat_msg_query_src_req req;
4548
4549 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0) {
4550 return EINVAL;
4551 }
4552
4553 const boolean_t all_srcs = (req.srcref == NSTAT_SRC_REF_ALL);
4554 TAILQ_INIT(&dead_list);
4555
4556 lck_mtx_lock(&state->ncs_mtx);
4557
4558 if (all_srcs) {
4559 state->ncs_flags |= NSTAT_FLAG_REQCOUNTS;
4560 }
4561 nstat_src *src, *tmpsrc;
4562 u_int64_t src_count = 0;
4563 boolean_t partial = FALSE;
4564
4565 /*
4566 * Error handling policy and sequence number generation is folded into
4567 * nstat_control_begin_query.
4568 */
4569 partial = nstat_control_begin_query(state, &req.hdr);
4570
4571
4572 TAILQ_FOREACH_SAFE(src, &state->ncs_src_queue, ns_control_link, tmpsrc)
4573 {
4574 int gone = 0;
4575
4576 // XXX ignore IFACE types?
4577 if (all_srcs || src->srcref == req.srcref) {
4578 if (nstat_control_reporting_allowed(state, src)
4579 && (!partial || !all_srcs || src->seq != state->ncs_seq)) {
4580 if (all_srcs &&
4581 (req.hdr.flags & NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE) != 0) {
4582 result = nstat_control_append_counts(state, src, &gone);
4583 } else {
4584 result = nstat_control_send_counts(state, src, req.hdr.context, 0, &gone);
4585 }
4586
4587 if (ENOMEM == result || ENOBUFS == result) {
4588 /*
4589 * If the counts message failed to
4590 * enqueue then we should clear our flag so
4591 * that a client doesn't miss anything on
4592 * idle cleanup. We skip the "gone"
4593 * processing in the hope that we may
4594 * catch it another time.
4595 */
4596 state->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
4597 break;
4598 }
4599 if (partial) {
4600 /*
4601 * We skip over hard errors and
4602 * filtered sources.
4603 */
4604 src->seq = state->ncs_seq;
4605 src_count++;
4606 }
4607 }
4608 }
4609
4610 if (gone) {
4611 // send one last descriptor message so client may see last state
4612 // If we can't send the notification now, it
4613 // will be sent in the idle cleanup.
4614 result = nstat_control_send_description(state, src, 0, 0);
4615 if (result != 0) {
4616 nstat_stats.nstat_control_send_description_failures++;
4617 if (nstat_debug != 0) {
4618 printf("%s - nstat_control_send_description() %d\n", __func__, result);
4619 }
4620 state->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
4621 break;
4622 }
4623
4624 // pull src out of the list
4625 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
4626 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
4627 }
4628
4629 if (all_srcs) {
4630 if (src_count >= QUERY_CONTINUATION_SRC_COUNT) {
4631 break;
4632 }
4633 } else if (req.srcref == src->srcref) {
4634 break;
4635 }
4636 }
4637
4638 nstat_flush_accumulated_msgs(state);
4639
4640 u_int16_t flags = 0;
4641 if (req.srcref == NSTAT_SRC_REF_ALL) {
4642 flags = nstat_control_end_query(state, src, partial);
4643 }
4644
4645 lck_mtx_unlock(&state->ncs_mtx);
4646
4647 /*
4648 * If an error occurred enqueueing data, then allow the error to
4649 * propagate to nstat_control_send. This way, the error is sent to
4650 * user-level.
4651 */
4652 if (all_srcs && ENOMEM != result && ENOBUFS != result) {
4653 nstat_enqueue_success(req.hdr.context, state, flags);
4654 result = 0;
4655 }
4656
4657 while ((src = TAILQ_FIRST(&dead_list))) {
4658 TAILQ_REMOVE(&dead_list, src, ns_control_link);
4659 nstat_control_cleanup_source(state, src, FALSE);
4660 }
4661
4662 return result;
4663 }
4664
4665 static errno_t
4666 nstat_control_handle_get_src_description(
4667 nstat_control_state *state,
4668 mbuf_t m)
4669 {
4670 nstat_msg_get_src_description req;
4671 errno_t result = ENOENT;
4672 nstat_src *src;
4673
4674 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0) {
4675 return EINVAL;
4676 }
4677
4678 lck_mtx_lock(&state->ncs_mtx);
4679 u_int64_t src_count = 0;
4680 boolean_t partial = FALSE;
4681 const boolean_t all_srcs = (req.srcref == NSTAT_SRC_REF_ALL);
4682
4683 /*
4684 * Error handling policy and sequence number generation is folded into
4685 * nstat_control_begin_query.
4686 */
4687 partial = nstat_control_begin_query(state, &req.hdr);
4688
4689 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
4690 {
4691 if (all_srcs || src->srcref == req.srcref) {
4692 if (nstat_control_reporting_allowed(state, src)
4693 && (!all_srcs || !partial || src->seq != state->ncs_seq)) {
4694 if ((req.hdr.flags & NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE) != 0 && all_srcs) {
4695 result = nstat_control_append_description(state, src);
4696 } else {
4697 result = nstat_control_send_description(state, src, req.hdr.context, 0);
4698 }
4699
4700 if (ENOMEM == result || ENOBUFS == result) {
4701 /*
4702 * If the description message failed to
4703 * enqueue then we give up for now.
4704 */
4705 break;
4706 }
4707 if (partial) {
4708 /*
4709 * Note, we skip over hard errors and
4710 * filtered sources.
4711 */
4712 src->seq = state->ncs_seq;
4713 src_count++;
4714 if (src_count >= QUERY_CONTINUATION_SRC_COUNT) {
4715 break;
4716 }
4717 }
4718 }
4719
4720 if (!all_srcs) {
4721 break;
4722 }
4723 }
4724 }
4725 nstat_flush_accumulated_msgs(state);
4726
4727 u_int16_t flags = 0;
4728 if (req.srcref == NSTAT_SRC_REF_ALL) {
4729 flags = nstat_control_end_query(state, src, partial);
4730 }
4731
4732 lck_mtx_unlock(&state->ncs_mtx);
4733 /*
4734 * If an error occurred enqueueing data, then allow the error to
4735 * propagate to nstat_control_send. This way, the error is sent to
4736 * user-level.
4737 */
4738 if (all_srcs && ENOMEM != result && ENOBUFS != result) {
4739 nstat_enqueue_success(req.hdr.context, state, flags);
4740 result = 0;
4741 }
4742
4743 return result;
4744 }
4745
4746 static errno_t
4747 nstat_control_handle_set_filter(
4748 nstat_control_state *state,
4749 mbuf_t m)
4750 {
4751 nstat_msg_set_filter req;
4752 nstat_src *src;
4753
4754 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0) {
4755 return EINVAL;
4756 }
4757 if (req.srcref == NSTAT_SRC_REF_ALL ||
4758 req.srcref == NSTAT_SRC_REF_INVALID) {
4759 return EINVAL;
4760 }
4761
4762 lck_mtx_lock(&state->ncs_mtx);
4763 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
4764 {
4765 if (req.srcref == src->srcref) {
4766 src->filter = req.filter;
4767 break;
4768 }
4769 }
4770 lck_mtx_unlock(&state->ncs_mtx);
4771 if (src == NULL) {
4772 return ENOENT;
4773 }
4774
4775 return 0;
4776 }
4777
4778 static void
4779 nstat_send_error(
4780 nstat_control_state *state,
4781 u_int64_t context,
4782 u_int32_t error)
4783 {
4784 errno_t result;
4785 struct nstat_msg_error err;
4786
4787 bzero(&err, sizeof(err));
4788 err.hdr.type = NSTAT_MSG_TYPE_ERROR;
4789 err.hdr.length = sizeof(err);
4790 err.hdr.context = context;
4791 err.error = error;
4792
4793 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &err,
4794 sizeof(err), CTL_DATA_EOR | CTL_DATA_CRIT);
4795 if (result != 0) {
4796 nstat_stats.nstat_msgerrorfailures++;
4797 }
4798 }
4799
4800 static boolean_t
4801 nstat_control_begin_query(
4802 nstat_control_state *state,
4803 const nstat_msg_hdr *hdrp)
4804 {
4805 boolean_t partial = FALSE;
4806
4807 if (hdrp->flags & NSTAT_MSG_HDR_FLAG_CONTINUATION) {
4808 /* A partial query all has been requested. */
4809 partial = TRUE;
4810
4811 if (state->ncs_context != hdrp->context) {
4812 if (state->ncs_context != 0) {
4813 nstat_send_error(state, state->ncs_context, EAGAIN);
4814 }
4815
4816 /* Initialize state for a partial query all. */
4817 state->ncs_context = hdrp->context;
4818 state->ncs_seq++;
4819 }
4820 }
4821
4822 return partial;
4823 }
4824
4825 static u_int16_t
4826 nstat_control_end_query(
4827 nstat_control_state *state,
4828 nstat_src *last_src,
4829 boolean_t partial)
4830 {
4831 u_int16_t flags = 0;
4832
4833 if (last_src == NULL || !partial) {
4834 /*
4835 * We iterated through the entire srcs list or exited early
4836 * from the loop when a partial update was not requested (an
4837 * error occurred), so clear context to indicate internally
4838 * that the query is finished.
4839 */
4840 state->ncs_context = 0;
4841 } else {
4842 /*
4843 * Indicate to userlevel to make another partial request as
4844 * there are still sources left to be reported.
4845 */
4846 flags |= NSTAT_MSG_HDR_FLAG_CONTINUATION;
4847 }
4848
4849 return flags;
4850 }
4851
4852 static errno_t
4853 nstat_control_handle_get_update(
4854 nstat_control_state *state,
4855 mbuf_t m)
4856 {
4857 nstat_msg_query_src_req req;
4858
4859 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0) {
4860 return EINVAL;
4861 }
4862
4863 lck_mtx_lock(&state->ncs_mtx);
4864
4865 state->ncs_flags |= NSTAT_FLAG_SUPPORTS_UPDATES;
4866
4867 errno_t result = ENOENT;
4868 nstat_src *src, *tmpsrc;
4869 tailq_head_nstat_src dead_list;
4870 u_int64_t src_count = 0;
4871 boolean_t partial = FALSE;
4872 TAILQ_INIT(&dead_list);
4873
4874 /*
4875 * Error handling policy and sequence number generation is folded into
4876 * nstat_control_begin_query.
4877 */
4878 partial = nstat_control_begin_query(state, &req.hdr);
4879
4880 TAILQ_FOREACH_SAFE(src, &state->ncs_src_queue, ns_control_link, tmpsrc)
4881 {
4882 int gone;
4883
4884 gone = 0;
4885 if (nstat_control_reporting_allowed(state, src)) {
4886 /* skip this source if it has the current state
4887 * sequence number as it's already been reported in
4888 * this query-all partial sequence. */
4889 if (req.srcref == NSTAT_SRC_REF_ALL
4890 && (FALSE == partial || src->seq != state->ncs_seq)) {
4891 result = nstat_control_append_update(state, src, &gone);
4892 if (ENOMEM == result || ENOBUFS == result) {
4893 /*
4894 * If the update message failed to
4895 * enqueue then give up.
4896 */
4897 break;
4898 }
4899 if (partial) {
4900 /*
4901 * We skip over hard errors and
4902 * filtered sources.
4903 */
4904 src->seq = state->ncs_seq;
4905 src_count++;
4906 }
4907 } else if (src->srcref == req.srcref) {
4908 result = nstat_control_send_update(state, src, req.hdr.context, 0, 0, &gone);
4909 }
4910 }
4911
4912 if (gone) {
4913 // pull src out of the list
4914 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
4915 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
4916 }
4917
4918 if (req.srcref != NSTAT_SRC_REF_ALL && req.srcref == src->srcref) {
4919 break;
4920 }
4921 if (src_count >= QUERY_CONTINUATION_SRC_COUNT) {
4922 break;
4923 }
4924 }
4925
4926 nstat_flush_accumulated_msgs(state);
4927
4928
4929 u_int16_t flags = 0;
4930 if (req.srcref == NSTAT_SRC_REF_ALL) {
4931 flags = nstat_control_end_query(state, src, partial);
4932 }
4933
4934 lck_mtx_unlock(&state->ncs_mtx);
4935 /*
4936 * If an error occurred enqueueing data, then allow the error to
4937 * propagate to nstat_control_send. This way, the error is sent to
4938 * user-level.
4939 */
4940 if (req.srcref == NSTAT_SRC_REF_ALL && ENOMEM != result && ENOBUFS != result) {
4941 nstat_enqueue_success(req.hdr.context, state, flags);
4942 result = 0;
4943 }
4944
4945 while ((src = TAILQ_FIRST(&dead_list))) {
4946 TAILQ_REMOVE(&dead_list, src, ns_control_link);
4947 // release src and send notification
4948 nstat_control_cleanup_source(state, src, FALSE);
4949 }
4950
4951 return result;
4952 }
4953
4954 static errno_t
4955 nstat_control_handle_subscribe_sysinfo(
4956 nstat_control_state *state)
4957 {
4958 errno_t result = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
4959
4960 if (result != 0) {
4961 return result;
4962 }
4963
4964 lck_mtx_lock(&state->ncs_mtx);
4965 state->ncs_flags |= NSTAT_FLAG_SYSINFO_SUBSCRIBED;
4966 lck_mtx_unlock(&state->ncs_mtx);
4967
4968 return 0;
4969 }
4970
4971 static errno_t
4972 nstat_control_send(
4973 kern_ctl_ref kctl,
4974 u_int32_t unit,
4975 void *uinfo,
4976 mbuf_t m,
4977 __unused int flags)
4978 {
4979 nstat_control_state *state = (nstat_control_state*)uinfo;
4980 struct nstat_msg_hdr *hdr;
4981 struct nstat_msg_hdr storage;
4982 errno_t result = 0;
4983
4984 if (mbuf_pkthdr_len(m) < sizeof(*hdr)) {
4985 // Is this the right thing to do?
4986 mbuf_freem(m);
4987 return EINVAL;
4988 }
4989
4990 if (mbuf_len(m) >= sizeof(*hdr)) {
4991 hdr = mbuf_data(m);
4992 } else {
4993 mbuf_copydata(m, 0, sizeof(storage), &storage);
4994 hdr = &storage;
4995 }
4996
4997 // Legacy clients may not set the length
4998 // Those clients are likely not setting the flags either
4999 // Fix everything up so old clients continue to work
5000 if (hdr->length != mbuf_pkthdr_len(m)) {
5001 hdr->flags = 0;
5002 assert(mbuf_pkthdr_len(m) <= MAX_NSTAT_MSG_HDR_LENGTH);
5003 hdr->length = (u_int16_t)mbuf_pkthdr_len(m);
5004 if (hdr == &storage) {
5005 mbuf_copyback(m, 0, sizeof(*hdr), hdr, MBUF_DONTWAIT);
5006 }
5007 }
5008
5009 switch (hdr->type) {
5010 case NSTAT_MSG_TYPE_ADD_SRC:
5011 result = nstat_control_handle_add_request(state, m);
5012 break;
5013
5014 case NSTAT_MSG_TYPE_ADD_ALL_SRCS:
5015 result = nstat_control_handle_add_all(state, m);
5016 break;
5017
5018 case NSTAT_MSG_TYPE_REM_SRC:
5019 result = nstat_control_handle_remove_request(state, m);
5020 break;
5021
5022 case NSTAT_MSG_TYPE_QUERY_SRC:
5023 result = nstat_control_handle_query_request(state, m);
5024 break;
5025
5026 case NSTAT_MSG_TYPE_GET_SRC_DESC:
5027 result = nstat_control_handle_get_src_description(state, m);
5028 break;
5029
5030 case NSTAT_MSG_TYPE_SET_FILTER:
5031 result = nstat_control_handle_set_filter(state, m);
5032 break;
5033
5034 case NSTAT_MSG_TYPE_GET_UPDATE:
5035 result = nstat_control_handle_get_update(state, m);
5036 break;
5037
5038 case NSTAT_MSG_TYPE_SUBSCRIBE_SYSINFO:
5039 result = nstat_control_handle_subscribe_sysinfo(state);
5040 break;
5041
5042 default:
5043 result = EINVAL;
5044 break;
5045 }
5046
5047 if (result != 0) {
5048 struct nstat_msg_error err;
5049
5050 bzero(&err, sizeof(err));
5051 err.hdr.type = NSTAT_MSG_TYPE_ERROR;
5052 err.hdr.length = (u_int16_t)(sizeof(err) + mbuf_pkthdr_len(m));
5053 err.hdr.context = hdr->context;
5054 err.error = result;
5055
5056 if (mbuf_prepend(&m, sizeof(err), MBUF_DONTWAIT) == 0 &&
5057 mbuf_copyback(m, 0, sizeof(err), &err, MBUF_DONTWAIT) == 0) {
5058 result = ctl_enqueuembuf(kctl, unit, m, CTL_DATA_EOR | CTL_DATA_CRIT);
5059 if (result != 0) {
5060 mbuf_freem(m);
5061 }
5062 m = NULL;
5063 }
5064
5065 if (result != 0) {
5066 // Unable to prepend the error to the request - just send the error
5067 err.hdr.length = sizeof(err);
5068 result = ctl_enqueuedata(kctl, unit, &err, sizeof(err),
5069 CTL_DATA_EOR | CTL_DATA_CRIT);
5070 if (result != 0) {
5071 nstat_stats.nstat_msgerrorfailures += 1;
5072 }
5073 }
5074 nstat_stats.nstat_handle_msg_failures += 1;
5075 }
5076
5077 if (m) {
5078 mbuf_freem(m);
5079 }
5080
5081 return result;
5082 }
5083
5084
5085 static int
5086 tcp_progress_indicators_for_interface(unsigned int ifindex, uint64_t recentflow_maxduration, uint16_t filter_flags, struct xtcpprogress_indicators *indicators)
5087 {
5088 int error = 0;
5089 struct inpcb *inp;
5090 uint64_t min_recent_start_time;
5091
5092 min_recent_start_time = mach_continuous_time() - recentflow_maxduration;
5093 bzero(indicators, sizeof(*indicators));
5094
5095 lck_rw_lock_shared(tcbinfo.ipi_lock);
5096 /*
5097 * For progress indicators we don't need to special case TCP to collect time wait connections
5098 */
5099 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list)
5100 {
5101 struct tcpcb *tp = intotcpcb(inp);
5102 if (tp && inp->inp_last_outifp &&
5103 inp->inp_last_outifp->if_index == ifindex &&
5104 inp->inp_state != INPCB_STATE_DEAD &&
5105 ((filter_flags == 0) ||
5106 ((filter_flags & NSTAT_IFNET_IS_NON_LOCAL) && !(tp->t_flags & TF_LOCAL)) ||
5107 ((filter_flags & NSTAT_IFNET_IS_LOCAL) && (tp->t_flags & TF_LOCAL)))) {
5108 struct tcp_conn_status connstatus;
5109 indicators->xp_numflows++;
5110 tcp_get_connectivity_status(tp, &connstatus);
5111 if (connstatus.write_probe_failed) {
5112 indicators->xp_write_probe_fails++;
5113 }
5114 if (connstatus.read_probe_failed) {
5115 indicators->xp_read_probe_fails++;
5116 }
5117 if (connstatus.conn_probe_failed) {
5118 indicators->xp_conn_probe_fails++;
5119 }
5120 if (inp->inp_start_timestamp > min_recent_start_time) {
5121 uint64_t flow_count;
5122
5123 indicators->xp_recentflows++;
5124 atomic_get_64(flow_count, &inp->inp_stat->rxbytes);
5125 indicators->xp_recentflows_rxbytes += flow_count;
5126 atomic_get_64(flow_count, &inp->inp_stat->txbytes);
5127 indicators->xp_recentflows_txbytes += flow_count;
5128
5129 indicators->xp_recentflows_rxooo += tp->t_stat.rxoutoforderbytes;
5130 indicators->xp_recentflows_rxdup += tp->t_stat.rxduplicatebytes;
5131 indicators->xp_recentflows_retx += tp->t_stat.txretransmitbytes;
5132 if (tp->snd_max - tp->snd_una) {
5133 indicators->xp_recentflows_unacked++;
5134 }
5135 }
5136 }
5137 }
5138 lck_rw_done(tcbinfo.ipi_lock);
5139
5140 return error;
5141 }
5142
5143
5144 __private_extern__ int
5145 ntstat_tcp_progress_indicators(struct sysctl_req *req)
5146 {
5147 struct xtcpprogress_indicators indicators = {};
5148 int error = 0;
5149 struct tcpprogressreq requested;
5150
5151 if (priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0) != 0) {
5152 return EACCES;
5153 }
5154 if (req->newptr == USER_ADDR_NULL) {
5155 return EINVAL;
5156 }
5157 if (req->newlen < sizeof(req)) {
5158 return EINVAL;
5159 }
5160 error = SYSCTL_IN(req, &requested, sizeof(requested));
5161 if (error != 0) {
5162 return error;
5163 }
5164 error = tcp_progress_indicators_for_interface((unsigned int)requested.ifindex, requested.recentflow_maxduration, (uint16_t)requested.filter_flags, &indicators);
5165 if (error != 0) {
5166 return error;
5167 }
5168 error = SYSCTL_OUT(req, &indicators, sizeof(indicators));
5169
5170 return error;
5171 }
5172
5173
5174
5175