2 * Copyright (c) 2010-2017 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/param.h>
30 #include <sys/types.h>
31 #include <sys/kpi_mbuf.h>
32 #include <sys/socket.h>
33 #include <sys/kern_control.h>
34 #include <sys/mcache.h>
35 #include <sys/socketvar.h>
36 #include <sys/sysctl.h>
37 #include <sys/queue.h>
39 #include <sys/protosw.h>
41 #include <kern/clock.h>
42 #include <kern/debug.h>
44 #include <libkern/libkern.h>
45 #include <libkern/OSMalloc.h>
46 #include <libkern/OSAtomic.h>
47 #include <libkern/locks.h>
50 #include <net/if_var.h>
51 #include <net/if_types.h>
52 #include <net/route.h>
54 // These includes appear in ntstat.h but we include them here first so they won't trigger
55 // any clang diagnostic errors.
56 #include <netinet/in.h>
57 #include <netinet/in_stat.h>
58 #include <netinet/tcp.h>
60 #pragma clang diagnostic push
61 #pragma clang diagnostic error "-Wpadded"
62 #pragma clang diagnostic error "-Wpacked"
63 // This header defines structures shared with user space, so we need to ensure there is
64 // no compiler inserted padding in case the user space process isn't using the same
65 // architecture as the kernel (example: i386 process with x86_64 kernel).
66 #include <net/ntstat.h>
67 #pragma clang diagnostic pop
69 #include <netinet/ip_var.h>
70 #include <netinet/in_pcb.h>
71 #include <netinet/in_var.h>
72 #include <netinet/tcp_var.h>
73 #include <netinet/tcp_fsm.h>
74 #include <netinet/tcp_cc.h>
75 #include <netinet/udp.h>
76 #include <netinet/udp_var.h>
77 #include <netinet6/in6_pcb.h>
78 #include <netinet6/in6_var.h>
80 __private_extern__
int nstat_collect
= 1;
82 #if (DEBUG || DEVELOPMENT)
83 SYSCTL_INT(_net
, OID_AUTO
, statistics
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
84 &nstat_collect
, 0, "Collect detailed statistics");
85 #endif /* (DEBUG || DEVELOPMENT) */
88 static int nstat_privcheck
= 1;
90 static int nstat_privcheck
= 0;
92 SYSCTL_INT(_net
, OID_AUTO
, statistics_privcheck
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
93 &nstat_privcheck
, 0, "Entitlement check");
95 SYSCTL_NODE(_net
, OID_AUTO
, stats
,
96 CTLFLAG_RW
|CTLFLAG_LOCKED
, 0, "network statistics");
98 static int nstat_debug
= 0;
99 SYSCTL_INT(_net_stats
, OID_AUTO
, debug
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
100 &nstat_debug
, 0, "");
102 static int nstat_sendspace
= 2048;
103 SYSCTL_INT(_net_stats
, OID_AUTO
, sendspace
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
104 &nstat_sendspace
, 0, "");
106 static int nstat_recvspace
= 8192;
107 SYSCTL_INT(_net_stats
, OID_AUTO
, recvspace
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
108 &nstat_recvspace
, 0, "");
110 static struct nstat_stats nstat_stats
;
111 SYSCTL_STRUCT(_net_stats
, OID_AUTO
, stats
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
112 &nstat_stats
, nstat_stats
, "");
114 static u_int32_t nstat_lim_interval
= 30 * 60; /* Report interval, seconds */
115 static u_int32_t nstat_lim_min_tx_pkts
= 100;
116 static u_int32_t nstat_lim_min_rx_pkts
= 100;
117 #if (DEBUG || DEVELOPMENT)
118 SYSCTL_INT(_net_stats
, OID_AUTO
, lim_report_interval
,
119 CTLFLAG_RW
| CTLFLAG_LOCKED
, &nstat_lim_interval
, 0,
120 "Low internet stat report interval");
122 SYSCTL_INT(_net_stats
, OID_AUTO
, lim_min_tx_pkts
,
123 CTLFLAG_RW
| CTLFLAG_LOCKED
, &nstat_lim_min_tx_pkts
, 0,
124 "Low Internet, min transmit packets threshold");
126 SYSCTL_INT(_net_stats
, OID_AUTO
, lim_min_rx_pkts
,
127 CTLFLAG_RW
| CTLFLAG_LOCKED
, &nstat_lim_min_rx_pkts
, 0,
128 "Low Internet, min receive packets threshold");
129 #endif /* DEBUG || DEVELOPMENT */
131 static struct net_api_stats net_api_stats_before
;
132 static u_int64_t net_api_stats_last_report_time
;
133 #define NET_API_STATS_REPORT_INTERVAL (12 * 60 * 60) /* 12 hours, in seconds */
134 static u_int32_t net_api_stats_report_interval
= NET_API_STATS_REPORT_INTERVAL
;
136 #if (DEBUG || DEVELOPMENT)
137 SYSCTL_UINT(_net_stats
, OID_AUTO
, api_report_interval
,
138 CTLFLAG_RW
| CTLFLAG_LOCKED
, &net_api_stats_report_interval
, 0, "");
139 #endif /* DEBUG || DEVELOPMENT */
143 NSTAT_FLAG_CLEANUP
= (1 << 0),
144 NSTAT_FLAG_REQCOUNTS
= (1 << 1),
145 NSTAT_FLAG_SUPPORTS_UPDATES
= (1 << 2),
146 NSTAT_FLAG_SYSINFO_SUBSCRIBED
= (1 << 3),
150 #define QUERY_CONTINUATION_SRC_COUNT 50
152 #define QUERY_CONTINUATION_SRC_COUNT 100
155 typedef TAILQ_HEAD(, nstat_src
) tailq_head_nstat_src
;
156 typedef TAILQ_ENTRY(nstat_src
) tailq_entry_nstat_src
;
158 typedef struct nstat_provider_filter
161 u_int64_t npf_events
;
164 } nstat_provider_filter
;
167 typedef struct nstat_control_state
169 struct nstat_control_state
*ncs_next
;
170 u_int32_t ncs_watching
;
171 decl_lck_mtx_data(, ncs_mtx
);
172 kern_ctl_ref ncs_kctl
;
174 nstat_src_ref_t ncs_next_srcref
;
175 tailq_head_nstat_src ncs_src_queue
;
176 mbuf_t ncs_accumulated
;
178 nstat_provider_filter ncs_provider_filters
[NSTAT_PROVIDER_COUNT
];
179 /* state maintained for partial query requests */
180 u_int64_t ncs_context
;
182 } nstat_control_state
;
184 typedef struct nstat_provider
186 struct nstat_provider
*next
;
187 nstat_provider_id_t nstat_provider_id
;
188 size_t nstat_descriptor_length
;
189 errno_t (*nstat_lookup
)(const void *data
, u_int32_t length
, nstat_provider_cookie_t
*out_cookie
);
190 int (*nstat_gone
)(nstat_provider_cookie_t cookie
);
191 errno_t (*nstat_counts
)(nstat_provider_cookie_t cookie
, struct nstat_counts
*out_counts
, int *out_gone
);
192 errno_t (*nstat_watcher_add
)(nstat_control_state
*state
, nstat_msg_add_all_srcs
*req
);
193 void (*nstat_watcher_remove
)(nstat_control_state
*state
);
194 errno_t (*nstat_copy_descriptor
)(nstat_provider_cookie_t cookie
, void *data
, u_int32_t len
);
195 void (*nstat_release
)(nstat_provider_cookie_t cookie
, boolean_t locked
);
196 bool (*nstat_reporting_allowed
)(nstat_provider_cookie_t cookie
, nstat_provider_filter
*filter
);
199 typedef STAILQ_HEAD(, nstat_src
) stailq_head_nstat_src
;
200 typedef STAILQ_ENTRY(nstat_src
) stailq_entry_nstat_src
;
202 typedef TAILQ_HEAD(, nstat_tu_shadow
) tailq_head_tu_shadow
;
203 typedef TAILQ_ENTRY(nstat_tu_shadow
) tailq_entry_tu_shadow
;
205 typedef TAILQ_HEAD(, nstat_procdetails
) tailq_head_procdetails
;
206 typedef TAILQ_ENTRY(nstat_procdetails
) tailq_entry_procdetails
;
208 typedef struct nstat_src
210 tailq_entry_nstat_src ns_control_link
; // All sources for the nstat_control_state, for iterating over.
211 nstat_control_state
*ns_control
; // The nstat_control_state that this is a source for
212 nstat_src_ref_t srcref
;
213 nstat_provider
*provider
;
214 nstat_provider_cookie_t cookie
;
219 static errno_t
nstat_control_send_counts(nstat_control_state
*,
220 nstat_src
*, unsigned long long, u_int16_t
, int *);
221 static int nstat_control_send_description(nstat_control_state
*state
, nstat_src
*src
, u_int64_t context
, u_int16_t hdr_flags
);
222 static int nstat_control_send_update(nstat_control_state
*state
, nstat_src
*src
, u_int64_t context
, u_int16_t hdr_flags
, int *gone
);
223 static errno_t
nstat_control_send_removed(nstat_control_state
*, nstat_src
*);
224 static errno_t
nstat_control_send_goodbye(nstat_control_state
*state
, nstat_src
*src
);
225 static void nstat_control_cleanup_source(nstat_control_state
*state
, nstat_src
*src
, boolean_t
);
226 static bool nstat_control_reporting_allowed(nstat_control_state
*state
, nstat_src
*src
);
227 static boolean_t
nstat_control_begin_query(nstat_control_state
*state
, const nstat_msg_hdr
*hdrp
);
228 static u_int16_t
nstat_control_end_query(nstat_control_state
*state
, nstat_src
*last_src
, boolean_t partial
);
229 static void nstat_ifnet_report_ecn_stats(void);
230 static void nstat_ifnet_report_lim_stats(void);
231 static void nstat_net_api_report_stats(void);
232 static errno_t
nstat_set_provider_filter( nstat_control_state
*state
, nstat_msg_add_all_srcs
*req
);
234 static u_int32_t nstat_udp_watchers
= 0;
235 static u_int32_t nstat_tcp_watchers
= 0;
237 static void nstat_control_register(void);
240 * The lock order is as follows:
242 * socket_lock (inpcb)
246 static volatile OSMallocTag nstat_malloc_tag
= NULL
;
247 static nstat_control_state
*nstat_controls
= NULL
;
248 static uint64_t nstat_idle_time
= 0;
249 static decl_lck_mtx_data(, nstat_mtx
);
251 /* some extern definitions */
252 extern void mbuf_report_peak_usage(void);
253 extern void tcp_report_stats(void);
257 const struct sockaddr
*src
,
258 struct sockaddr
*dst
,
261 if (src
->sa_len
> maxlen
) return;
263 bcopy(src
, dst
, src
->sa_len
);
264 if (src
->sa_family
== AF_INET6
&&
265 src
->sa_len
>= sizeof(struct sockaddr_in6
))
267 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)(void *)dst
;
268 if (IN6_IS_SCOPE_EMBED(&sin6
->sin6_addr
))
270 if (sin6
->sin6_scope_id
== 0)
271 sin6
->sin6_scope_id
= ntohs(sin6
->sin6_addr
.s6_addr16
[1]);
272 sin6
->sin6_addr
.s6_addr16
[1] = 0;
278 nstat_ip_to_sockaddr(
279 const struct in_addr
*ip
,
281 struct sockaddr_in
*sin
,
284 if (maxlen
< sizeof(struct sockaddr_in
))
287 sin
->sin_family
= AF_INET
;
288 sin
->sin_len
= sizeof(*sin
);
289 sin
->sin_port
= port
;
294 nstat_ifnet_to_flags(
298 u_int32_t functional_type
= if_functional_type(ifp
, FALSE
);
300 /* Panic if someone adds a functional type without updating ntstat. */
301 VERIFY(0 <= functional_type
&& functional_type
<= IFRTYPE_FUNCTIONAL_LAST
);
303 switch (functional_type
)
305 case IFRTYPE_FUNCTIONAL_UNKNOWN
:
306 flags
|= NSTAT_IFNET_IS_UNKNOWN_TYPE
;
308 case IFRTYPE_FUNCTIONAL_LOOPBACK
:
309 flags
|= NSTAT_IFNET_IS_LOOPBACK
;
311 case IFRTYPE_FUNCTIONAL_WIRED
:
312 case IFRTYPE_FUNCTIONAL_INTCOPROC
:
313 flags
|= NSTAT_IFNET_IS_WIRED
;
315 case IFRTYPE_FUNCTIONAL_WIFI_INFRA
:
316 flags
|= NSTAT_IFNET_IS_WIFI
;
318 case IFRTYPE_FUNCTIONAL_WIFI_AWDL
:
319 flags
|= NSTAT_IFNET_IS_WIFI
;
320 flags
|= NSTAT_IFNET_IS_AWDL
;
322 case IFRTYPE_FUNCTIONAL_CELLULAR
:
323 flags
|= NSTAT_IFNET_IS_CELLULAR
;
327 if (IFNET_IS_EXPENSIVE(ifp
))
329 flags
|= NSTAT_IFNET_IS_EXPENSIVE
;
336 nstat_inpcb_to_flags(
337 const struct inpcb
*inp
)
341 if ((inp
!= NULL
) && (inp
->inp_last_outifp
!= NULL
))
343 struct ifnet
*ifp
= inp
->inp_last_outifp
;
344 flags
= nstat_ifnet_to_flags(ifp
);
346 if (flags
& NSTAT_IFNET_IS_CELLULAR
)
348 if (inp
->inp_socket
!= NULL
&&
349 (inp
->inp_socket
->so_flags1
& SOF1_CELLFALLBACK
))
350 flags
|= NSTAT_IFNET_VIA_CELLFALLBACK
;
355 flags
= NSTAT_IFNET_IS_UNKNOWN_TYPE
;
361 #pragma mark -- Network Statistic Providers --
363 static errno_t
nstat_control_source_add(u_int64_t context
, nstat_control_state
*state
, nstat_provider
*provider
, nstat_provider_cookie_t cookie
);
364 struct nstat_provider
*nstat_providers
= NULL
;
366 static struct nstat_provider
*
367 nstat_find_provider_by_id(
368 nstat_provider_id_t id
)
370 struct nstat_provider
*provider
;
372 for (provider
= nstat_providers
; provider
!= NULL
; provider
= provider
->next
)
374 if (provider
->nstat_provider_id
== id
)
383 nstat_provider_id_t id
,
386 nstat_provider
**out_provider
,
387 nstat_provider_cookie_t
*out_cookie
)
389 *out_provider
= nstat_find_provider_by_id(id
);
390 if (*out_provider
== NULL
)
395 return (*out_provider
)->nstat_lookup(data
, length
, out_cookie
);
398 static void nstat_init_route_provider(void);
399 static void nstat_init_tcp_provider(void);
400 static void nstat_init_udp_provider(void);
401 static void nstat_init_ifnet_provider(void);
403 __private_extern__
void
406 if (nstat_malloc_tag
!= NULL
) return;
408 OSMallocTag tag
= OSMalloc_Tagalloc(NET_STAT_CONTROL_NAME
, OSMT_DEFAULT
);
409 if (!OSCompareAndSwapPtr(NULL
, tag
, &nstat_malloc_tag
))
411 OSMalloc_Tagfree(tag
);
412 tag
= nstat_malloc_tag
;
416 // we need to initialize other things, we do it here as this code path will only be hit once;
417 nstat_init_route_provider();
418 nstat_init_tcp_provider();
419 nstat_init_udp_provider();
420 nstat_init_ifnet_provider();
421 nstat_control_register();
425 #pragma mark -- Aligned Buffer Allocation --
434 nstat_malloc_aligned(
439 struct align_header
*hdr
= NULL
;
440 u_int32_t size
= length
+ sizeof(*hdr
) + alignment
- 1;
442 u_int8_t
*buffer
= OSMalloc(size
, tag
);
443 if (buffer
== NULL
) return NULL
;
445 u_int8_t
*aligned
= buffer
+ sizeof(*hdr
);
446 aligned
= (u_int8_t
*)P2ROUNDUP(aligned
, alignment
);
448 hdr
= (struct align_header
*)(void *)(aligned
- sizeof(*hdr
));
449 hdr
->offset
= aligned
- buffer
;
460 struct align_header
*hdr
= (struct align_header
*)(void *)((u_int8_t
*)buffer
- sizeof(*hdr
));
461 OSFree(((char*)buffer
) - hdr
->offset
, hdr
->length
, tag
);
464 #pragma mark -- Route Provider --
466 static nstat_provider nstat_route_provider
;
472 nstat_provider_cookie_t
*out_cookie
)
474 // rt_lookup doesn't take const params but it doesn't modify the parameters for
475 // the lookup. So...we use a union to eliminate the warning.
479 const struct sockaddr
*const_sa
;
482 const nstat_route_add_param
*param
= (const nstat_route_add_param
*)data
;
485 if (length
< sizeof(*param
))
490 if (param
->dst
.v4
.sin_family
== 0 ||
491 param
->dst
.v4
.sin_family
> AF_MAX
||
492 (param
->mask
.v4
.sin_family
!= 0 && param
->mask
.v4
.sin_family
!= param
->dst
.v4
.sin_family
))
497 if (param
->dst
.v4
.sin_len
> sizeof(param
->dst
) ||
498 (param
->mask
.v4
.sin_family
&& param
->mask
.v4
.sin_len
> sizeof(param
->mask
.v4
.sin_len
)))
502 if ((param
->dst
.v4
.sin_family
== AF_INET
&&
503 param
->dst
.v4
.sin_len
< sizeof(struct sockaddr_in
)) ||
504 (param
->dst
.v6
.sin6_family
== AF_INET6
&&
505 param
->dst
.v6
.sin6_len
< sizeof(struct sockaddr_in6
)))
510 dst
.const_sa
= (const struct sockaddr
*)¶m
->dst
;
511 mask
.const_sa
= param
->mask
.v4
.sin_family
? (const struct sockaddr
*)¶m
->mask
: NULL
;
513 struct radix_node_head
*rnh
= rt_tables
[dst
.sa
->sa_family
];
514 if (rnh
== NULL
) return EAFNOSUPPORT
;
516 lck_mtx_lock(rnh_lock
);
517 struct rtentry
*rt
= rt_lookup(TRUE
, dst
.sa
, mask
.sa
, rnh
, param
->ifindex
);
518 lck_mtx_unlock(rnh_lock
);
520 if (rt
) *out_cookie
= (nstat_provider_cookie_t
)rt
;
522 return rt
? 0 : ENOENT
;
527 nstat_provider_cookie_t cookie
)
529 struct rtentry
*rt
= (struct rtentry
*)cookie
;
530 return ((rt
->rt_flags
& RTF_UP
) == 0) ? 1 : 0;
535 nstat_provider_cookie_t cookie
,
536 struct nstat_counts
*out_counts
,
539 struct rtentry
*rt
= (struct rtentry
*)cookie
;
540 struct nstat_counts
*rt_stats
= rt
->rt_stats
;
542 if (out_gone
) *out_gone
= 0;
544 if (out_gone
&& (rt
->rt_flags
& RTF_UP
) == 0) *out_gone
= 1;
548 atomic_get_64(out_counts
->nstat_rxpackets
, &rt_stats
->nstat_rxpackets
);
549 atomic_get_64(out_counts
->nstat_rxbytes
, &rt_stats
->nstat_rxbytes
);
550 atomic_get_64(out_counts
->nstat_txpackets
, &rt_stats
->nstat_txpackets
);
551 atomic_get_64(out_counts
->nstat_txbytes
, &rt_stats
->nstat_txbytes
);
552 out_counts
->nstat_rxduplicatebytes
= rt_stats
->nstat_rxduplicatebytes
;
553 out_counts
->nstat_rxoutoforderbytes
= rt_stats
->nstat_rxoutoforderbytes
;
554 out_counts
->nstat_txretransmit
= rt_stats
->nstat_txretransmit
;
555 out_counts
->nstat_connectattempts
= rt_stats
->nstat_connectattempts
;
556 out_counts
->nstat_connectsuccesses
= rt_stats
->nstat_connectsuccesses
;
557 out_counts
->nstat_min_rtt
= rt_stats
->nstat_min_rtt
;
558 out_counts
->nstat_avg_rtt
= rt_stats
->nstat_avg_rtt
;
559 out_counts
->nstat_var_rtt
= rt_stats
->nstat_var_rtt
;
560 out_counts
->nstat_cell_rxbytes
= out_counts
->nstat_cell_txbytes
= 0;
564 bzero(out_counts
, sizeof(*out_counts
));
572 nstat_provider_cookie_t cookie
,
575 rtfree((struct rtentry
*)cookie
);
578 static u_int32_t nstat_route_watchers
= 0;
581 nstat_route_walktree_add(
582 struct radix_node
*rn
,
586 struct rtentry
*rt
= (struct rtentry
*)rn
;
587 nstat_control_state
*state
= (nstat_control_state
*)context
;
589 LCK_MTX_ASSERT(rnh_lock
, LCK_MTX_ASSERT_OWNED
);
591 /* RTF_UP can't change while rnh_lock is held */
592 if ((rt
->rt_flags
& RTF_UP
) != 0)
594 /* Clear RTPRF_OURS if the route is still usable */
596 if (rt_validate(rt
)) {
597 RT_ADDREF_LOCKED(rt
);
604 /* Otherwise if RTF_CONDEMNED, treat it as if it were down */
608 result
= nstat_control_source_add(0, state
, &nstat_route_provider
, rt
);
617 nstat_route_add_watcher(
618 nstat_control_state
*state
,
619 nstat_msg_add_all_srcs
*req
)
624 lck_mtx_lock(rnh_lock
);
626 result
= nstat_set_provider_filter(state
, req
);
629 OSIncrementAtomic(&nstat_route_watchers
);
631 for (i
= 1; i
< AF_MAX
; i
++)
633 struct radix_node_head
*rnh
;
637 result
= rnh
->rnh_walktree(rnh
, nstat_route_walktree_add
, state
);
640 // This is probably resource exhaustion.
641 // There currently isn't a good way to recover from this.
642 // Least bad seems to be to give up on the add-all but leave
643 // the watcher in place.
648 lck_mtx_unlock(rnh_lock
);
653 __private_extern__
void
654 nstat_route_new_entry(
657 if (nstat_route_watchers
== 0)
660 lck_mtx_lock(&nstat_mtx
);
661 if ((rt
->rt_flags
& RTF_UP
) != 0)
663 nstat_control_state
*state
;
664 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
666 if ((state
->ncs_watching
& (1 << NSTAT_PROVIDER_ROUTE
)) != 0)
668 // this client is watching routes
669 // acquire a reference for the route
672 // add the source, if that fails, release the reference
673 if (nstat_control_source_add(0, state
, &nstat_route_provider
, rt
) != 0)
678 lck_mtx_unlock(&nstat_mtx
);
682 nstat_route_remove_watcher(
683 __unused nstat_control_state
*state
)
685 OSDecrementAtomic(&nstat_route_watchers
);
689 nstat_route_copy_descriptor(
690 nstat_provider_cookie_t cookie
,
694 nstat_route_descriptor
*desc
= (nstat_route_descriptor
*)data
;
695 if (len
< sizeof(*desc
))
699 bzero(desc
, sizeof(*desc
));
701 struct rtentry
*rt
= (struct rtentry
*)cookie
;
702 desc
->id
= (uint64_t)VM_KERNEL_ADDRPERM(rt
);
703 desc
->parent_id
= (uint64_t)VM_KERNEL_ADDRPERM(rt
->rt_parent
);
704 desc
->gateway_id
= (uint64_t)VM_KERNEL_ADDRPERM(rt
->rt_gwroute
);
709 if ((sa
= rt_key(rt
)))
710 nstat_copy_sa_out(sa
, &desc
->dst
.sa
, sizeof(desc
->dst
));
713 if ((sa
= rt_mask(rt
)) && sa
->sa_len
<= sizeof(desc
->mask
))
714 memcpy(&desc
->mask
, sa
, sa
->sa_len
);
717 if ((sa
= rt
->rt_gateway
))
718 nstat_copy_sa_out(sa
, &desc
->gateway
.sa
, sizeof(desc
->gateway
));
721 desc
->ifindex
= rt
->rt_ifp
->if_index
;
723 desc
->flags
= rt
->rt_flags
;
729 nstat_route_reporting_allowed(nstat_provider_cookie_t cookie
, nstat_provider_filter
*filter
)
733 if ((filter
->npf_flags
& NSTAT_FILTER_IFNET_FLAGS
) != 0)
735 struct rtentry
*rt
= (struct rtentry
*)cookie
;
736 struct ifnet
*ifp
= rt
->rt_ifp
;
740 uint16_t interface_properties
= nstat_ifnet_to_flags(ifp
);
742 if ((filter
->npf_flags
& interface_properties
) == 0)
752 nstat_init_route_provider(void)
754 bzero(&nstat_route_provider
, sizeof(nstat_route_provider
));
755 nstat_route_provider
.nstat_descriptor_length
= sizeof(nstat_route_descriptor
);
756 nstat_route_provider
.nstat_provider_id
= NSTAT_PROVIDER_ROUTE
;
757 nstat_route_provider
.nstat_lookup
= nstat_route_lookup
;
758 nstat_route_provider
.nstat_gone
= nstat_route_gone
;
759 nstat_route_provider
.nstat_counts
= nstat_route_counts
;
760 nstat_route_provider
.nstat_release
= nstat_route_release
;
761 nstat_route_provider
.nstat_watcher_add
= nstat_route_add_watcher
;
762 nstat_route_provider
.nstat_watcher_remove
= nstat_route_remove_watcher
;
763 nstat_route_provider
.nstat_copy_descriptor
= nstat_route_copy_descriptor
;
764 nstat_route_provider
.nstat_reporting_allowed
= nstat_route_reporting_allowed
;
765 nstat_route_provider
.next
= nstat_providers
;
766 nstat_providers
= &nstat_route_provider
;
769 #pragma mark -- Route Collection --
771 __private_extern__
struct nstat_counts
*
775 struct nstat_counts
*result
= rte
->rt_stats
;
776 if (result
) return result
;
778 if (nstat_malloc_tag
== NULL
) nstat_init();
780 result
= nstat_malloc_aligned(sizeof(*result
), sizeof(u_int64_t
), nstat_malloc_tag
);
781 if (!result
) return result
;
783 bzero(result
, sizeof(*result
));
785 if (!OSCompareAndSwapPtr(NULL
, result
, &rte
->rt_stats
))
787 nstat_free_aligned(result
, nstat_malloc_tag
);
788 result
= rte
->rt_stats
;
794 __private_extern__
void
800 nstat_free_aligned(rte
->rt_stats
, nstat_malloc_tag
);
801 rte
->rt_stats
= NULL
;
805 __private_extern__
void
806 nstat_route_connect_attempt(
811 struct nstat_counts
* stats
= nstat_route_attach(rte
);
814 OSIncrementAtomic(&stats
->nstat_connectattempts
);
817 rte
= rte
->rt_parent
;
821 __private_extern__
void
822 nstat_route_connect_success(
828 struct nstat_counts
* stats
= nstat_route_attach(rte
);
831 OSIncrementAtomic(&stats
->nstat_connectsuccesses
);
834 rte
= rte
->rt_parent
;
838 __private_extern__
void
847 struct nstat_counts
* stats
= nstat_route_attach(rte
);
850 if ((flags
& NSTAT_TX_FLAG_RETRANSMIT
) != 0)
852 OSAddAtomic(bytes
, &stats
->nstat_txretransmit
);
856 OSAddAtomic64((SInt64
)packets
, (SInt64
*)&stats
->nstat_txpackets
);
857 OSAddAtomic64((SInt64
)bytes
, (SInt64
*)&stats
->nstat_txbytes
);
861 rte
= rte
->rt_parent
;
865 __private_extern__
void
874 struct nstat_counts
* stats
= nstat_route_attach(rte
);
879 OSAddAtomic64((SInt64
)packets
, (SInt64
*)&stats
->nstat_rxpackets
);
880 OSAddAtomic64((SInt64
)bytes
, (SInt64
*)&stats
->nstat_rxbytes
);
884 if (flags
& NSTAT_RX_FLAG_OUT_OF_ORDER
)
885 OSAddAtomic(bytes
, &stats
->nstat_rxoutoforderbytes
);
886 if (flags
& NSTAT_RX_FLAG_DUPLICATE
)
887 OSAddAtomic(bytes
, &stats
->nstat_rxduplicatebytes
);
891 rte
= rte
->rt_parent
;
895 /* atomically average current value at _val_addr with _new_val and store */
896 #define NSTAT_EWMA_ATOMIC(_val_addr, _new_val, _decay) do { \
897 volatile uint32_t _old_val; \
898 volatile uint32_t _avg; \
900 _old_val = *_val_addr; \
907 _avg = _old_val - (_old_val >> _decay) + (_new_val >> _decay); \
909 if (_old_val == _avg) break; \
910 } while (!OSCompareAndSwap(_old_val, _avg, _val_addr)); \
913 /* atomically compute minimum of current value at _val_addr with _new_val and store */
914 #define NSTAT_MIN_ATOMIC(_val_addr, _new_val) do { \
915 volatile uint32_t _old_val; \
917 _old_val = *_val_addr; \
918 if (_old_val != 0 && _old_val < _new_val) \
922 } while (!OSCompareAndSwap(_old_val, _new_val, _val_addr)); \
925 __private_extern__
void
931 const uint32_t decay
= 3;
935 struct nstat_counts
* stats
= nstat_route_attach(rte
);
938 NSTAT_EWMA_ATOMIC(&stats
->nstat_avg_rtt
, rtt
, decay
);
939 NSTAT_MIN_ATOMIC(&stats
->nstat_min_rtt
, rtt
);
940 NSTAT_EWMA_ATOMIC(&stats
->nstat_var_rtt
, rtt_var
, decay
);
942 rte
= rte
->rt_parent
;
946 __private_extern__
void
949 uint32_t connect_attempts
,
950 uint32_t connect_successes
,
953 uint32_t rx_duplicatebytes
,
954 uint32_t rx_outoforderbytes
,
957 uint32_t tx_retransmit
,
961 const uint32_t decay
= 3;
965 struct nstat_counts
* stats
= nstat_route_attach(rte
);
968 OSAddAtomic(connect_attempts
, &stats
->nstat_connectattempts
);
969 OSAddAtomic(connect_successes
, &stats
->nstat_connectsuccesses
);
970 OSAddAtomic64((SInt64
)tx_packets
, (SInt64
*)&stats
->nstat_txpackets
);
971 OSAddAtomic64((SInt64
)tx_bytes
, (SInt64
*)&stats
->nstat_txbytes
);
972 OSAddAtomic(tx_retransmit
, &stats
->nstat_txretransmit
);
973 OSAddAtomic64((SInt64
)rx_packets
, (SInt64
*)&stats
->nstat_rxpackets
);
974 OSAddAtomic64((SInt64
)rx_bytes
, (SInt64
*)&stats
->nstat_rxbytes
);
975 OSAddAtomic(rx_outoforderbytes
, &stats
->nstat_rxoutoforderbytes
);
976 OSAddAtomic(rx_duplicatebytes
, &stats
->nstat_rxduplicatebytes
);
979 NSTAT_EWMA_ATOMIC(&stats
->nstat_avg_rtt
, rtt
, decay
);
980 NSTAT_MIN_ATOMIC(&stats
->nstat_min_rtt
, rtt
);
981 NSTAT_EWMA_ATOMIC(&stats
->nstat_var_rtt
, rtt_var
, decay
);
984 rte
= rte
->rt_parent
;
988 #pragma mark -- TCP Kernel Provider --
991 * Due to the way the kernel deallocates a process (the process structure
992 * might be gone by the time we get the PCB detach notification),
993 * we need to cache the process name. Without this, proc_name() would
994 * return null and the process name would never be sent to userland.
996 * For UDP sockets, we also store the cached the connection tuples along with
997 * the interface index. This is necessary because when UDP sockets are
998 * disconnected, the connection tuples are forever lost from the inpcb, thus
999 * we need to keep track of the last call to connect() in ntstat.
1001 struct nstat_tucookie
{
1003 char pname
[MAXCOMLEN
+1];
1007 struct sockaddr_in v4
;
1008 struct sockaddr_in6 v6
;
1012 struct sockaddr_in v4
;
1013 struct sockaddr_in6 v6
;
1015 unsigned int if_index
;
1016 uint16_t ifnet_properties
;
1019 static struct nstat_tucookie
*
1020 nstat_tucookie_alloc_internal(
1025 struct nstat_tucookie
*cookie
;
1027 cookie
= OSMalloc(sizeof(*cookie
), nstat_malloc_tag
);
1031 LCK_MTX_ASSERT(&nstat_mtx
, LCK_MTX_ASSERT_NOTOWNED
);
1032 if (ref
&& in_pcb_checkstate(inp
, WNT_ACQUIRE
, locked
) == WNT_STOPUSING
)
1034 OSFree(cookie
, sizeof(*cookie
), nstat_malloc_tag
);
1037 bzero(cookie
, sizeof(*cookie
));
1039 proc_name(inp
->inp_socket
->last_pid
, cookie
->pname
,
1040 sizeof(cookie
->pname
));
1042 * We only increment the reference count for UDP sockets because we
1043 * only cache UDP socket tuples.
1045 if (SOCK_PROTO(inp
->inp_socket
) == IPPROTO_UDP
)
1046 OSIncrementAtomic(&inp
->inp_nstat_refcnt
);
1051 static struct nstat_tucookie
*
1052 nstat_tucookie_alloc(
1055 return nstat_tucookie_alloc_internal(inp
, false, false);
1058 static struct nstat_tucookie
*
1059 nstat_tucookie_alloc_ref(
1062 return nstat_tucookie_alloc_internal(inp
, true, false);
1065 static struct nstat_tucookie
*
1066 nstat_tucookie_alloc_ref_locked(
1069 return nstat_tucookie_alloc_internal(inp
, true, true);
1073 nstat_tucookie_release_internal(
1074 struct nstat_tucookie
*cookie
,
1077 if (SOCK_PROTO(cookie
->inp
->inp_socket
) == IPPROTO_UDP
)
1078 OSDecrementAtomic(&cookie
->inp
->inp_nstat_refcnt
);
1079 in_pcb_checkstate(cookie
->inp
, WNT_RELEASE
, inplock
);
1080 OSFree(cookie
, sizeof(*cookie
), nstat_malloc_tag
);
1084 nstat_tucookie_release(
1085 struct nstat_tucookie
*cookie
)
1087 nstat_tucookie_release_internal(cookie
, false);
1091 nstat_tucookie_release_locked(
1092 struct nstat_tucookie
*cookie
)
1094 nstat_tucookie_release_internal(cookie
, true);
1098 static nstat_provider nstat_tcp_provider
;
1101 nstat_tcpudp_lookup(
1102 struct inpcbinfo
*inpinfo
,
1105 nstat_provider_cookie_t
*out_cookie
)
1107 struct inpcb
*inp
= NULL
;
1109 // parameter validation
1110 const nstat_tcp_add_param
*param
= (const nstat_tcp_add_param
*)data
;
1111 if (length
< sizeof(*param
))
1116 // src and dst must match
1117 if (param
->remote
.v4
.sin_family
!= 0 &&
1118 param
->remote
.v4
.sin_family
!= param
->local
.v4
.sin_family
)
1124 switch (param
->local
.v4
.sin_family
)
1128 if (param
->local
.v4
.sin_len
!= sizeof(param
->local
.v4
) ||
1129 (param
->remote
.v4
.sin_family
!= 0 &&
1130 param
->remote
.v4
.sin_len
!= sizeof(param
->remote
.v4
)))
1135 inp
= in_pcblookup_hash(inpinfo
, param
->remote
.v4
.sin_addr
, param
->remote
.v4
.sin_port
,
1136 param
->local
.v4
.sin_addr
, param
->local
.v4
.sin_port
, 1, NULL
);
1145 const struct in6_addr
*in6c
;
1146 struct in6_addr
*in6
;
1149 if (param
->local
.v6
.sin6_len
!= sizeof(param
->local
.v6
) ||
1150 (param
->remote
.v6
.sin6_family
!= 0 &&
1151 param
->remote
.v6
.sin6_len
!= sizeof(param
->remote
.v6
)))
1156 local
.in6c
= ¶m
->local
.v6
.sin6_addr
;
1157 remote
.in6c
= ¶m
->remote
.v6
.sin6_addr
;
1159 inp
= in6_pcblookup_hash(inpinfo
, remote
.in6
, param
->remote
.v6
.sin6_port
,
1160 local
.in6
, param
->local
.v6
.sin6_port
, 1, NULL
);
1172 // At this point we have a ref to the inpcb
1173 *out_cookie
= nstat_tucookie_alloc(inp
);
1174 if (*out_cookie
== NULL
)
1175 in_pcb_checkstate(inp
, WNT_RELEASE
, 0);
1184 nstat_provider_cookie_t
*out_cookie
)
1186 return nstat_tcpudp_lookup(&tcbinfo
, data
, length
, out_cookie
);
1191 nstat_provider_cookie_t cookie
)
1193 struct nstat_tucookie
*tucookie
=
1194 (struct nstat_tucookie
*)cookie
;
1198 return (!(inp
= tucookie
->inp
) ||
1199 !(tp
= intotcpcb(inp
)) ||
1200 inp
->inp_state
== INPCB_STATE_DEAD
) ? 1 : 0;
1205 nstat_provider_cookie_t cookie
,
1206 struct nstat_counts
*out_counts
,
1209 struct nstat_tucookie
*tucookie
=
1210 (struct nstat_tucookie
*)cookie
;
1213 bzero(out_counts
, sizeof(*out_counts
));
1215 if (out_gone
) *out_gone
= 0;
1217 // if the pcb is in the dead state, we should stop using it
1218 if (nstat_tcp_gone(cookie
))
1220 if (out_gone
) *out_gone
= 1;
1221 if (!(inp
= tucookie
->inp
) || !intotcpcb(inp
))
1224 inp
= tucookie
->inp
;
1225 struct tcpcb
*tp
= intotcpcb(inp
);
1227 atomic_get_64(out_counts
->nstat_rxpackets
, &inp
->inp_stat
->rxpackets
);
1228 atomic_get_64(out_counts
->nstat_rxbytes
, &inp
->inp_stat
->rxbytes
);
1229 atomic_get_64(out_counts
->nstat_txpackets
, &inp
->inp_stat
->txpackets
);
1230 atomic_get_64(out_counts
->nstat_txbytes
, &inp
->inp_stat
->txbytes
);
1231 out_counts
->nstat_rxduplicatebytes
= tp
->t_stat
.rxduplicatebytes
;
1232 out_counts
->nstat_rxoutoforderbytes
= tp
->t_stat
.rxoutoforderbytes
;
1233 out_counts
->nstat_txretransmit
= tp
->t_stat
.txretransmitbytes
;
1234 out_counts
->nstat_connectattempts
= tp
->t_state
>= TCPS_SYN_SENT
? 1 : 0;
1235 out_counts
->nstat_connectsuccesses
= tp
->t_state
>= TCPS_ESTABLISHED
? 1 : 0;
1236 out_counts
->nstat_avg_rtt
= tp
->t_srtt
;
1237 out_counts
->nstat_min_rtt
= tp
->t_rttbest
;
1238 out_counts
->nstat_var_rtt
= tp
->t_rttvar
;
1239 if (out_counts
->nstat_avg_rtt
< out_counts
->nstat_min_rtt
)
1240 out_counts
->nstat_min_rtt
= out_counts
->nstat_avg_rtt
;
1241 atomic_get_64(out_counts
->nstat_cell_rxbytes
, &inp
->inp_cstat
->rxbytes
);
1242 atomic_get_64(out_counts
->nstat_cell_txbytes
, &inp
->inp_cstat
->txbytes
);
1243 atomic_get_64(out_counts
->nstat_wifi_rxbytes
, &inp
->inp_wstat
->rxbytes
);
1244 atomic_get_64(out_counts
->nstat_wifi_txbytes
, &inp
->inp_wstat
->txbytes
);
1245 atomic_get_64(out_counts
->nstat_wired_rxbytes
, &inp
->inp_Wstat
->rxbytes
);
1246 atomic_get_64(out_counts
->nstat_wired_txbytes
, &inp
->inp_Wstat
->txbytes
);
1253 nstat_provider_cookie_t cookie
,
1256 struct nstat_tucookie
*tucookie
=
1257 (struct nstat_tucookie
*)cookie
;
1259 nstat_tucookie_release_internal(tucookie
, locked
);
1263 nstat_tcp_add_watcher(
1264 nstat_control_state
*state
,
1265 nstat_msg_add_all_srcs
*req
)
1267 // There is a tricky issue around getting all TCP sockets added once
1268 // and only once. nstat_tcp_new_pcb() is called prior to the new item
1269 // being placed on any lists where it might be found.
1270 // By locking the tcbinfo.ipi_lock prior to marking the state as a watcher,
1271 // it should be impossible for a new socket to be added twice.
1272 // On the other hand, there is still a timing issue where a new socket
1273 // results in a call to nstat_tcp_new_pcb() before this watcher
1274 // is instantiated and yet the socket doesn't make it into ipi_listhead
1275 // prior to the scan. <rdar://problem/30361716>
1279 lck_rw_lock_shared(tcbinfo
.ipi_lock
);
1280 result
= nstat_set_provider_filter(state
, req
);
1282 OSIncrementAtomic(&nstat_tcp_watchers
);
1284 // Add all current tcp inpcbs. Ignore those in timewait
1286 struct nstat_tucookie
*cookie
;
1287 LIST_FOREACH(inp
, tcbinfo
.ipi_listhead
, inp_list
)
1289 cookie
= nstat_tucookie_alloc_ref(inp
);
1292 if (nstat_control_source_add(0, state
, &nstat_tcp_provider
,
1295 nstat_tucookie_release(cookie
);
1301 lck_rw_done(tcbinfo
.ipi_lock
);
1307 nstat_tcp_remove_watcher(
1308 __unused nstat_control_state
*state
)
1310 OSDecrementAtomic(&nstat_tcp_watchers
);
1313 __private_extern__
void
1317 struct nstat_tucookie
*cookie
;
1319 inp
->inp_start_timestamp
= mach_continuous_time();
1321 if (nstat_tcp_watchers
== 0)
1324 socket_lock(inp
->inp_socket
, 0);
1325 lck_mtx_lock(&nstat_mtx
);
1326 nstat_control_state
*state
;
1327 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
1329 if ((state
->ncs_watching
& (1 << NSTAT_PROVIDER_TCP_KERNEL
)) != 0)
1331 // this client is watching tcp
1332 // acquire a reference for it
1333 cookie
= nstat_tucookie_alloc_ref_locked(inp
);
1336 // add the source, if that fails, release the reference
1337 if (nstat_control_source_add(0, state
,
1338 &nstat_tcp_provider
, cookie
) != 0)
1340 nstat_tucookie_release_locked(cookie
);
1345 lck_mtx_unlock(&nstat_mtx
);
1346 socket_unlock(inp
->inp_socket
, 0);
1349 __private_extern__
void
1350 nstat_pcb_detach(struct inpcb
*inp
)
1352 nstat_control_state
*state
;
1354 tailq_head_nstat_src dead_list
;
1355 struct nstat_tucookie
*tucookie
;
1358 if (inp
== NULL
|| (nstat_tcp_watchers
== 0 && nstat_udp_watchers
== 0))
1361 TAILQ_INIT(&dead_list
);
1362 lck_mtx_lock(&nstat_mtx
);
1363 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
1365 lck_mtx_lock(&state
->ncs_mtx
);
1366 TAILQ_FOREACH(src
, &state
->ncs_src_queue
, ns_control_link
)
1368 nstat_provider_id_t provider_id
= src
->provider
->nstat_provider_id
;
1369 if (provider_id
== NSTAT_PROVIDER_TCP_KERNEL
|| provider_id
== NSTAT_PROVIDER_UDP_KERNEL
)
1371 tucookie
= (struct nstat_tucookie
*)src
->cookie
;
1372 if (tucookie
->inp
== inp
)
1379 result
= nstat_control_send_goodbye(state
, src
);
1381 TAILQ_REMOVE(&state
->ncs_src_queue
, src
, ns_control_link
);
1382 TAILQ_INSERT_TAIL(&dead_list
, src
, ns_control_link
);
1384 lck_mtx_unlock(&state
->ncs_mtx
);
1386 lck_mtx_unlock(&nstat_mtx
);
1388 while ((src
= TAILQ_FIRST(&dead_list
)))
1390 TAILQ_REMOVE(&dead_list
, src
, ns_control_link
);
1391 nstat_control_cleanup_source(NULL
, src
, TRUE
);
1395 __private_extern__
void
1396 nstat_pcb_cache(struct inpcb
*inp
)
1398 nstat_control_state
*state
;
1400 struct nstat_tucookie
*tucookie
;
1402 if (inp
== NULL
|| nstat_udp_watchers
== 0 ||
1403 inp
->inp_nstat_refcnt
== 0)
1405 VERIFY(SOCK_PROTO(inp
->inp_socket
) == IPPROTO_UDP
);
1406 lck_mtx_lock(&nstat_mtx
);
1407 for (state
= nstat_controls
; state
; state
= state
->ncs_next
) {
1408 lck_mtx_lock(&state
->ncs_mtx
);
1409 TAILQ_FOREACH(src
, &state
->ncs_src_queue
, ns_control_link
)
1411 tucookie
= (struct nstat_tucookie
*)src
->cookie
;
1412 if (tucookie
->inp
== inp
)
1414 if (inp
->inp_vflag
& INP_IPV6
)
1416 in6_ip6_to_sockaddr(&inp
->in6p_laddr
,
1418 &tucookie
->local
.v6
,
1419 sizeof(tucookie
->local
));
1420 in6_ip6_to_sockaddr(&inp
->in6p_faddr
,
1422 &tucookie
->remote
.v6
,
1423 sizeof(tucookie
->remote
));
1425 else if (inp
->inp_vflag
& INP_IPV4
)
1427 nstat_ip_to_sockaddr(&inp
->inp_laddr
,
1429 &tucookie
->local
.v4
,
1430 sizeof(tucookie
->local
));
1431 nstat_ip_to_sockaddr(&inp
->inp_faddr
,
1433 &tucookie
->remote
.v4
,
1434 sizeof(tucookie
->remote
));
1436 if (inp
->inp_last_outifp
)
1437 tucookie
->if_index
=
1438 inp
->inp_last_outifp
->if_index
;
1440 tucookie
->ifnet_properties
= nstat_inpcb_to_flags(inp
);
1441 tucookie
->cached
= true;
1445 lck_mtx_unlock(&state
->ncs_mtx
);
1447 lck_mtx_unlock(&nstat_mtx
);
1450 __private_extern__
void
1451 nstat_pcb_invalidate_cache(struct inpcb
*inp
)
1453 nstat_control_state
*state
;
1455 struct nstat_tucookie
*tucookie
;
1457 if (inp
== NULL
|| nstat_udp_watchers
== 0 ||
1458 inp
->inp_nstat_refcnt
== 0)
1460 VERIFY(SOCK_PROTO(inp
->inp_socket
) == IPPROTO_UDP
);
1461 lck_mtx_lock(&nstat_mtx
);
1462 for (state
= nstat_controls
; state
; state
= state
->ncs_next
) {
1463 lck_mtx_lock(&state
->ncs_mtx
);
1464 TAILQ_FOREACH(src
, &state
->ncs_src_queue
, ns_control_link
)
1466 tucookie
= (struct nstat_tucookie
*)src
->cookie
;
1467 if (tucookie
->inp
== inp
)
1469 tucookie
->cached
= false;
1473 lck_mtx_unlock(&state
->ncs_mtx
);
1475 lck_mtx_unlock(&nstat_mtx
);
1479 nstat_tcp_copy_descriptor(
1480 nstat_provider_cookie_t cookie
,
1484 if (len
< sizeof(nstat_tcp_descriptor
))
1489 if (nstat_tcp_gone(cookie
))
1492 nstat_tcp_descriptor
*desc
= (nstat_tcp_descriptor
*)data
;
1493 struct nstat_tucookie
*tucookie
=
1494 (struct nstat_tucookie
*)cookie
;
1495 struct inpcb
*inp
= tucookie
->inp
;
1496 struct tcpcb
*tp
= intotcpcb(inp
);
1497 bzero(desc
, sizeof(*desc
));
1499 if (inp
->inp_vflag
& INP_IPV6
)
1501 in6_ip6_to_sockaddr(&inp
->in6p_laddr
, inp
->inp_lport
,
1502 &desc
->local
.v6
, sizeof(desc
->local
));
1503 in6_ip6_to_sockaddr(&inp
->in6p_faddr
, inp
->inp_fport
,
1504 &desc
->remote
.v6
, sizeof(desc
->remote
));
1506 else if (inp
->inp_vflag
& INP_IPV4
)
1508 nstat_ip_to_sockaddr(&inp
->inp_laddr
, inp
->inp_lport
,
1509 &desc
->local
.v4
, sizeof(desc
->local
));
1510 nstat_ip_to_sockaddr(&inp
->inp_faddr
, inp
->inp_fport
,
1511 &desc
->remote
.v4
, sizeof(desc
->remote
));
1514 desc
->state
= intotcpcb(inp
)->t_state
;
1515 desc
->ifindex
= (inp
->inp_last_outifp
== NULL
) ? 0 :
1516 inp
->inp_last_outifp
->if_index
;
1518 // danger - not locked, values could be bogus
1519 desc
->txunacked
= tp
->snd_max
- tp
->snd_una
;
1520 desc
->txwindow
= tp
->snd_wnd
;
1521 desc
->txcwindow
= tp
->snd_cwnd
;
1523 if (CC_ALGO(tp
)->name
!= NULL
) {
1524 strlcpy(desc
->cc_algo
, CC_ALGO(tp
)->name
,
1525 sizeof(desc
->cc_algo
));
1528 struct socket
*so
= inp
->inp_socket
;
1531 // TBD - take the socket lock around these to make sure
1533 desc
->upid
= so
->last_upid
;
1534 desc
->pid
= so
->last_pid
;
1535 desc
->traffic_class
= so
->so_traffic_class
;
1536 if ((so
->so_flags1
& SOF1_TRAFFIC_MGT_SO_BACKGROUND
))
1537 desc
->traffic_mgt_flags
|= TRAFFIC_MGT_SO_BACKGROUND
;
1538 if ((so
->so_flags1
& SOF1_TRAFFIC_MGT_TCP_RECVBG
))
1539 desc
->traffic_mgt_flags
|= TRAFFIC_MGT_TCP_RECVBG
;
1540 proc_name(desc
->pid
, desc
->pname
, sizeof(desc
->pname
));
1541 if (desc
->pname
[0] == 0)
1543 strlcpy(desc
->pname
, tucookie
->pname
,
1544 sizeof(desc
->pname
));
1548 desc
->pname
[sizeof(desc
->pname
) - 1] = 0;
1549 strlcpy(tucookie
->pname
, desc
->pname
,
1550 sizeof(tucookie
->pname
));
1552 memcpy(desc
->uuid
, so
->last_uuid
, sizeof(so
->last_uuid
));
1553 memcpy(desc
->vuuid
, so
->so_vuuid
, sizeof(so
->so_vuuid
));
1554 if (so
->so_flags
& SOF_DELEGATED
) {
1555 desc
->eupid
= so
->e_upid
;
1556 desc
->epid
= so
->e_pid
;
1557 memcpy(desc
->euuid
, so
->e_uuid
, sizeof(so
->e_uuid
));
1559 desc
->eupid
= desc
->upid
;
1560 desc
->epid
= desc
->pid
;
1561 memcpy(desc
->euuid
, desc
->uuid
, sizeof(desc
->uuid
));
1563 desc
->sndbufsize
= so
->so_snd
.sb_hiwat
;
1564 desc
->sndbufused
= so
->so_snd
.sb_cc
;
1565 desc
->rcvbufsize
= so
->so_rcv
.sb_hiwat
;
1566 desc
->rcvbufused
= so
->so_rcv
.sb_cc
;
1569 tcp_get_connectivity_status(tp
, &desc
->connstatus
);
1570 desc
->ifnet_properties
= nstat_inpcb_to_flags(inp
);
1571 inp_get_activity_bitmap(inp
, &desc
->activity_bitmap
);
1572 desc
->start_timestamp
= inp
->inp_start_timestamp
;
1573 desc
->timestamp
= mach_continuous_time();
1578 nstat_tcpudp_reporting_allowed(nstat_provider_cookie_t cookie
, nstat_provider_filter
*filter
, bool is_UDP
)
1582 if ((filter
->npf_flags
& (NSTAT_FILTER_IFNET_FLAGS
|NSTAT_FILTER_SPECIFIC_USER
)) != 0)
1584 struct nstat_tucookie
*tucookie
= (struct nstat_tucookie
*)cookie
;
1585 struct inpcb
*inp
= tucookie
->inp
;
1587 /* Only apply interface filter if at least one is allowed. */
1588 if ((filter
->npf_flags
& NSTAT_FILTER_IFNET_FLAGS
) != 0)
1590 uint16_t interface_properties
= nstat_inpcb_to_flags(inp
);
1592 if ((filter
->npf_flags
& interface_properties
) == 0)
1594 // For UDP, we could have an undefined interface and yet transfers may have occurred.
1595 // We allow reporting if there have been transfers of the requested kind.
1596 // This is imperfect as we cannot account for the expensive attribute over wifi.
1597 // We also assume that cellular is expensive and we have no way to select for AWDL
1602 if ((filter
->npf_flags
& (NSTAT_FILTER_ACCEPT_CELLULAR
|NSTAT_FILTER_ACCEPT_EXPENSIVE
)) &&
1603 (inp
->inp_cstat
->rxbytes
|| inp
->inp_cstat
->txbytes
))
1607 if ((filter
->npf_flags
& NSTAT_FILTER_ACCEPT_WIFI
) &&
1608 (inp
->inp_wstat
->rxbytes
|| inp
->inp_wstat
->txbytes
))
1612 if ((filter
->npf_flags
& NSTAT_FILTER_ACCEPT_WIRED
) &&
1613 (inp
->inp_Wstat
->rxbytes
|| inp
->inp_Wstat
->txbytes
))
1627 if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER
) != 0) && (retval
))
1629 struct socket
*so
= inp
->inp_socket
;
1634 if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER_BY_PID
) != 0) &&
1635 (filter
->npf_pid
== so
->last_pid
))
1639 else if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER_BY_EPID
) != 0) &&
1640 (filter
->npf_pid
== (so
->so_flags
& SOF_DELEGATED
)? so
->e_upid
: so
->last_pid
))
1644 else if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER_BY_UUID
) != 0) &&
1645 (memcmp(filter
->npf_uuid
, so
->last_uuid
, sizeof(so
->last_uuid
)) == 0))
1649 else if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER_BY_EUUID
) != 0) &&
1650 (memcmp(filter
->npf_uuid
, (so
->so_flags
& SOF_DELEGATED
)? so
->e_uuid
: so
->last_uuid
,
1651 sizeof(so
->last_uuid
)) == 0))
1662 nstat_tcp_reporting_allowed(nstat_provider_cookie_t cookie
, nstat_provider_filter
*filter
)
1664 return nstat_tcpudp_reporting_allowed(cookie
, filter
, FALSE
);
1668 nstat_init_tcp_provider(void)
1670 bzero(&nstat_tcp_provider
, sizeof(nstat_tcp_provider
));
1671 nstat_tcp_provider
.nstat_descriptor_length
= sizeof(nstat_tcp_descriptor
);
1672 nstat_tcp_provider
.nstat_provider_id
= NSTAT_PROVIDER_TCP_KERNEL
;
1673 nstat_tcp_provider
.nstat_lookup
= nstat_tcp_lookup
;
1674 nstat_tcp_provider
.nstat_gone
= nstat_tcp_gone
;
1675 nstat_tcp_provider
.nstat_counts
= nstat_tcp_counts
;
1676 nstat_tcp_provider
.nstat_release
= nstat_tcp_release
;
1677 nstat_tcp_provider
.nstat_watcher_add
= nstat_tcp_add_watcher
;
1678 nstat_tcp_provider
.nstat_watcher_remove
= nstat_tcp_remove_watcher
;
1679 nstat_tcp_provider
.nstat_copy_descriptor
= nstat_tcp_copy_descriptor
;
1680 nstat_tcp_provider
.nstat_reporting_allowed
= nstat_tcp_reporting_allowed
;
1681 nstat_tcp_provider
.next
= nstat_providers
;
1682 nstat_providers
= &nstat_tcp_provider
;
1685 #pragma mark -- UDP Provider --
1687 static nstat_provider nstat_udp_provider
;
1693 nstat_provider_cookie_t
*out_cookie
)
1695 return nstat_tcpudp_lookup(&udbinfo
, data
, length
, out_cookie
);
1700 nstat_provider_cookie_t cookie
)
1702 struct nstat_tucookie
*tucookie
=
1703 (struct nstat_tucookie
*)cookie
;
1706 return (!(inp
= tucookie
->inp
) ||
1707 inp
->inp_state
== INPCB_STATE_DEAD
) ? 1 : 0;
1712 nstat_provider_cookie_t cookie
,
1713 struct nstat_counts
*out_counts
,
1716 struct nstat_tucookie
*tucookie
=
1717 (struct nstat_tucookie
*)cookie
;
1719 if (out_gone
) *out_gone
= 0;
1721 // if the pcb is in the dead state, we should stop using it
1722 if (nstat_udp_gone(cookie
))
1724 if (out_gone
) *out_gone
= 1;
1728 struct inpcb
*inp
= tucookie
->inp
;
1730 atomic_get_64(out_counts
->nstat_rxpackets
, &inp
->inp_stat
->rxpackets
);
1731 atomic_get_64(out_counts
->nstat_rxbytes
, &inp
->inp_stat
->rxbytes
);
1732 atomic_get_64(out_counts
->nstat_txpackets
, &inp
->inp_stat
->txpackets
);
1733 atomic_get_64(out_counts
->nstat_txbytes
, &inp
->inp_stat
->txbytes
);
1734 atomic_get_64(out_counts
->nstat_cell_rxbytes
, &inp
->inp_cstat
->rxbytes
);
1735 atomic_get_64(out_counts
->nstat_cell_txbytes
, &inp
->inp_cstat
->txbytes
);
1736 atomic_get_64(out_counts
->nstat_wifi_rxbytes
, &inp
->inp_wstat
->rxbytes
);
1737 atomic_get_64(out_counts
->nstat_wifi_txbytes
, &inp
->inp_wstat
->txbytes
);
1738 atomic_get_64(out_counts
->nstat_wired_rxbytes
, &inp
->inp_Wstat
->rxbytes
);
1739 atomic_get_64(out_counts
->nstat_wired_txbytes
, &inp
->inp_Wstat
->txbytes
);
1746 nstat_provider_cookie_t cookie
,
1749 struct nstat_tucookie
*tucookie
=
1750 (struct nstat_tucookie
*)cookie
;
1752 nstat_tucookie_release_internal(tucookie
, locked
);
1756 nstat_udp_add_watcher(
1757 nstat_control_state
*state
,
1758 nstat_msg_add_all_srcs
*req
)
1760 // There is a tricky issue around getting all UDP sockets added once
1761 // and only once. nstat_udp_new_pcb() is called prior to the new item
1762 // being placed on any lists where it might be found.
1763 // By locking the udpinfo.ipi_lock prior to marking the state as a watcher,
1764 // it should be impossible for a new socket to be added twice.
1765 // On the other hand, there is still a timing issue where a new socket
1766 // results in a call to nstat_udp_new_pcb() before this watcher
1767 // is instantiated and yet the socket doesn't make it into ipi_listhead
1768 // prior to the scan. <rdar://problem/30361716>
1772 lck_rw_lock_shared(udbinfo
.ipi_lock
);
1773 result
= nstat_set_provider_filter(state
, req
);
1777 struct nstat_tucookie
*cookie
;
1779 OSIncrementAtomic(&nstat_udp_watchers
);
1781 // Add all current UDP inpcbs.
1782 LIST_FOREACH(inp
, udbinfo
.ipi_listhead
, inp_list
)
1784 cookie
= nstat_tucookie_alloc_ref(inp
);
1787 if (nstat_control_source_add(0, state
, &nstat_udp_provider
,
1790 nstat_tucookie_release(cookie
);
1796 lck_rw_done(udbinfo
.ipi_lock
);
1802 nstat_udp_remove_watcher(
1803 __unused nstat_control_state
*state
)
1805 OSDecrementAtomic(&nstat_udp_watchers
);
1808 __private_extern__
void
1812 struct nstat_tucookie
*cookie
;
1814 inp
->inp_start_timestamp
= mach_continuous_time();
1816 if (nstat_udp_watchers
== 0)
1819 socket_lock(inp
->inp_socket
, 0);
1820 lck_mtx_lock(&nstat_mtx
);
1821 nstat_control_state
*state
;
1822 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
1824 if ((state
->ncs_watching
& (1 << NSTAT_PROVIDER_UDP_KERNEL
)) != 0)
1826 // this client is watching tcp
1827 // acquire a reference for it
1828 cookie
= nstat_tucookie_alloc_ref_locked(inp
);
1831 // add the source, if that fails, release the reference
1832 if (nstat_control_source_add(0, state
,
1833 &nstat_udp_provider
, cookie
) != 0)
1835 nstat_tucookie_release_locked(cookie
);
1840 lck_mtx_unlock(&nstat_mtx
);
1841 socket_unlock(inp
->inp_socket
, 0);
1845 nstat_udp_copy_descriptor(
1846 nstat_provider_cookie_t cookie
,
1850 if (len
< sizeof(nstat_udp_descriptor
))
1855 if (nstat_udp_gone(cookie
))
1858 struct nstat_tucookie
*tucookie
=
1859 (struct nstat_tucookie
*)cookie
;
1860 nstat_udp_descriptor
*desc
= (nstat_udp_descriptor
*)data
;
1861 struct inpcb
*inp
= tucookie
->inp
;
1863 bzero(desc
, sizeof(*desc
));
1865 if (tucookie
->cached
== false) {
1866 if (inp
->inp_vflag
& INP_IPV6
)
1868 in6_ip6_to_sockaddr(&inp
->in6p_laddr
, inp
->inp_lport
,
1869 &desc
->local
.v6
, sizeof(desc
->local
.v6
));
1870 in6_ip6_to_sockaddr(&inp
->in6p_faddr
, inp
->inp_fport
,
1871 &desc
->remote
.v6
, sizeof(desc
->remote
.v6
));
1873 else if (inp
->inp_vflag
& INP_IPV4
)
1875 nstat_ip_to_sockaddr(&inp
->inp_laddr
, inp
->inp_lport
,
1876 &desc
->local
.v4
, sizeof(desc
->local
.v4
));
1877 nstat_ip_to_sockaddr(&inp
->inp_faddr
, inp
->inp_fport
,
1878 &desc
->remote
.v4
, sizeof(desc
->remote
.v4
));
1880 desc
->ifnet_properties
= nstat_inpcb_to_flags(inp
);
1884 if (inp
->inp_vflag
& INP_IPV6
)
1886 memcpy(&desc
->local
.v6
, &tucookie
->local
.v6
,
1887 sizeof(desc
->local
.v6
));
1888 memcpy(&desc
->remote
.v6
, &tucookie
->remote
.v6
,
1889 sizeof(desc
->remote
.v6
));
1891 else if (inp
->inp_vflag
& INP_IPV4
)
1893 memcpy(&desc
->local
.v4
, &tucookie
->local
.v4
,
1894 sizeof(desc
->local
.v4
));
1895 memcpy(&desc
->remote
.v4
, &tucookie
->remote
.v4
,
1896 sizeof(desc
->remote
.v4
));
1898 desc
->ifnet_properties
= tucookie
->ifnet_properties
;
1901 if (inp
->inp_last_outifp
)
1902 desc
->ifindex
= inp
->inp_last_outifp
->if_index
;
1904 desc
->ifindex
= tucookie
->if_index
;
1906 struct socket
*so
= inp
->inp_socket
;
1909 // TBD - take the socket lock around these to make sure
1911 desc
->upid
= so
->last_upid
;
1912 desc
->pid
= so
->last_pid
;
1913 proc_name(desc
->pid
, desc
->pname
, sizeof(desc
->pname
));
1914 if (desc
->pname
[0] == 0)
1916 strlcpy(desc
->pname
, tucookie
->pname
,
1917 sizeof(desc
->pname
));
1921 desc
->pname
[sizeof(desc
->pname
) - 1] = 0;
1922 strlcpy(tucookie
->pname
, desc
->pname
,
1923 sizeof(tucookie
->pname
));
1925 memcpy(desc
->uuid
, so
->last_uuid
, sizeof(so
->last_uuid
));
1926 memcpy(desc
->vuuid
, so
->so_vuuid
, sizeof(so
->so_vuuid
));
1927 if (so
->so_flags
& SOF_DELEGATED
) {
1928 desc
->eupid
= so
->e_upid
;
1929 desc
->epid
= so
->e_pid
;
1930 memcpy(desc
->euuid
, so
->e_uuid
, sizeof(so
->e_uuid
));
1932 desc
->eupid
= desc
->upid
;
1933 desc
->epid
= desc
->pid
;
1934 memcpy(desc
->euuid
, desc
->uuid
, sizeof(desc
->uuid
));
1936 desc
->rcvbufsize
= so
->so_rcv
.sb_hiwat
;
1937 desc
->rcvbufused
= so
->so_rcv
.sb_cc
;
1938 desc
->traffic_class
= so
->so_traffic_class
;
1939 inp_get_activity_bitmap(inp
, &desc
->activity_bitmap
);
1940 desc
->start_timestamp
= inp
->inp_start_timestamp
;
1941 desc
->timestamp
= mach_continuous_time();
1948 nstat_udp_reporting_allowed(nstat_provider_cookie_t cookie
, nstat_provider_filter
*filter
)
1950 return nstat_tcpudp_reporting_allowed(cookie
, filter
, TRUE
);
1955 nstat_init_udp_provider(void)
1957 bzero(&nstat_udp_provider
, sizeof(nstat_udp_provider
));
1958 nstat_udp_provider
.nstat_provider_id
= NSTAT_PROVIDER_UDP_KERNEL
;
1959 nstat_udp_provider
.nstat_descriptor_length
= sizeof(nstat_udp_descriptor
);
1960 nstat_udp_provider
.nstat_lookup
= nstat_udp_lookup
;
1961 nstat_udp_provider
.nstat_gone
= nstat_udp_gone
;
1962 nstat_udp_provider
.nstat_counts
= nstat_udp_counts
;
1963 nstat_udp_provider
.nstat_watcher_add
= nstat_udp_add_watcher
;
1964 nstat_udp_provider
.nstat_watcher_remove
= nstat_udp_remove_watcher
;
1965 nstat_udp_provider
.nstat_copy_descriptor
= nstat_udp_copy_descriptor
;
1966 nstat_udp_provider
.nstat_release
= nstat_udp_release
;
1967 nstat_udp_provider
.nstat_reporting_allowed
= nstat_udp_reporting_allowed
;
1968 nstat_udp_provider
.next
= nstat_providers
;
1969 nstat_providers
= &nstat_udp_provider
;
1974 #pragma mark -- ifnet Provider --
1976 static nstat_provider nstat_ifnet_provider
;
1979 * We store a pointer to the ifnet and the original threshold
1980 * requested by the client.
1982 struct nstat_ifnet_cookie
1992 nstat_provider_cookie_t
*out_cookie
)
1994 const nstat_ifnet_add_param
*param
= (const nstat_ifnet_add_param
*)data
;
1996 boolean_t changed
= FALSE
;
1997 nstat_control_state
*state
;
1999 struct nstat_ifnet_cookie
*cookie
;
2001 if (length
< sizeof(*param
) || param
->threshold
< 1024*1024)
2003 if (nstat_privcheck
!= 0) {
2004 errno_t result
= priv_check_cred(kauth_cred_get(),
2005 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS
, 0);
2009 cookie
= OSMalloc(sizeof(*cookie
), nstat_malloc_tag
);
2012 bzero(cookie
, sizeof(*cookie
));
2014 ifnet_head_lock_shared();
2015 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
)
2017 ifnet_lock_exclusive(ifp
);
2018 if (ifp
->if_index
== param
->ifindex
)
2021 cookie
->threshold
= param
->threshold
;
2022 *out_cookie
= cookie
;
2023 if (!ifp
->if_data_threshold
||
2024 ifp
->if_data_threshold
> param
->threshold
)
2027 ifp
->if_data_threshold
= param
->threshold
;
2029 ifnet_lock_done(ifp
);
2030 ifnet_reference(ifp
);
2033 ifnet_lock_done(ifp
);
2038 * When we change the threshold to something smaller, we notify
2039 * all of our clients with a description message.
2040 * We won't send a message to the client we are currently serving
2041 * because it has no `ifnet source' yet.
2045 lck_mtx_lock(&nstat_mtx
);
2046 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
2048 lck_mtx_lock(&state
->ncs_mtx
);
2049 TAILQ_FOREACH(src
, &state
->ncs_src_queue
, ns_control_link
)
2051 if (src
->provider
!= &nstat_ifnet_provider
)
2053 nstat_control_send_description(state
, src
, 0, 0);
2055 lck_mtx_unlock(&state
->ncs_mtx
);
2057 lck_mtx_unlock(&nstat_mtx
);
2059 if (cookie
->ifp
== NULL
)
2060 OSFree(cookie
, sizeof(*cookie
), nstat_malloc_tag
);
2062 return ifp
? 0 : EINVAL
;
2067 nstat_provider_cookie_t cookie
)
2070 struct nstat_ifnet_cookie
*ifcookie
=
2071 (struct nstat_ifnet_cookie
*)cookie
;
2073 ifnet_head_lock_shared();
2074 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
)
2076 if (ifp
== ifcookie
->ifp
)
2086 nstat_provider_cookie_t cookie
,
2087 struct nstat_counts
*out_counts
,
2090 struct nstat_ifnet_cookie
*ifcookie
=
2091 (struct nstat_ifnet_cookie
*)cookie
;
2092 struct ifnet
*ifp
= ifcookie
->ifp
;
2094 if (out_gone
) *out_gone
= 0;
2096 // if the ifnet is gone, we should stop using it
2097 if (nstat_ifnet_gone(cookie
))
2099 if (out_gone
) *out_gone
= 1;
2103 bzero(out_counts
, sizeof(*out_counts
));
2104 out_counts
->nstat_rxpackets
= ifp
->if_ipackets
;
2105 out_counts
->nstat_rxbytes
= ifp
->if_ibytes
;
2106 out_counts
->nstat_txpackets
= ifp
->if_opackets
;
2107 out_counts
->nstat_txbytes
= ifp
->if_obytes
;
2108 out_counts
->nstat_cell_rxbytes
= out_counts
->nstat_cell_txbytes
= 0;
2113 nstat_ifnet_release(
2114 nstat_provider_cookie_t cookie
,
2115 __unused
int locked
)
2117 struct nstat_ifnet_cookie
*ifcookie
;
2119 nstat_control_state
*state
;
2121 uint64_t minthreshold
= UINT64_MAX
;
2124 * Find all the clients that requested a threshold
2125 * for this ifnet and re-calculate if_data_threshold.
2127 lck_mtx_lock(&nstat_mtx
);
2128 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
2130 lck_mtx_lock(&state
->ncs_mtx
);
2131 TAILQ_FOREACH(src
, &state
->ncs_src_queue
, ns_control_link
)
2133 /* Skip the provider we are about to detach. */
2134 if (src
->provider
!= &nstat_ifnet_provider
||
2135 src
->cookie
== cookie
)
2137 ifcookie
= (struct nstat_ifnet_cookie
*)src
->cookie
;
2138 if (ifcookie
->threshold
< minthreshold
)
2139 minthreshold
= ifcookie
->threshold
;
2141 lck_mtx_unlock(&state
->ncs_mtx
);
2143 lck_mtx_unlock(&nstat_mtx
);
2145 * Reset if_data_threshold or disable it.
2147 ifcookie
= (struct nstat_ifnet_cookie
*)cookie
;
2148 ifp
= ifcookie
->ifp
;
2149 if (ifnet_is_attached(ifp
, 1)) {
2150 ifnet_lock_exclusive(ifp
);
2151 if (minthreshold
== UINT64_MAX
)
2152 ifp
->if_data_threshold
= 0;
2154 ifp
->if_data_threshold
= minthreshold
;
2155 ifnet_lock_done(ifp
);
2156 ifnet_decr_iorefcnt(ifp
);
2159 OSFree(ifcookie
, sizeof(*ifcookie
), nstat_malloc_tag
);
2163 nstat_ifnet_copy_link_status(
2165 struct nstat_ifnet_descriptor
*desc
)
2167 struct if_link_status
*ifsr
= ifp
->if_link_status
;
2168 nstat_ifnet_desc_link_status
*link_status
= &desc
->link_status
;
2170 link_status
->link_status_type
= NSTAT_IFNET_DESC_LINK_STATUS_TYPE_NONE
;
2174 lck_rw_lock_shared(&ifp
->if_link_status_lock
);
2176 if (ifp
->if_type
== IFT_CELLULAR
) {
2178 nstat_ifnet_desc_cellular_status
*cell_status
= &link_status
->u
.cellular
;
2179 struct if_cellular_status_v1
*if_cell_sr
=
2180 &ifsr
->ifsr_u
.ifsr_cell
.if_cell_u
.if_status_v1
;
2182 if (ifsr
->ifsr_version
!= IF_CELLULAR_STATUS_REPORT_VERSION_1
)
2185 link_status
->link_status_type
= NSTAT_IFNET_DESC_LINK_STATUS_TYPE_CELLULAR
;
2187 if (if_cell_sr
->valid_bitmask
& IF_CELL_LINK_QUALITY_METRIC_VALID
) {
2188 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_LINK_QUALITY_METRIC_VALID
;
2189 cell_status
->link_quality_metric
= if_cell_sr
->link_quality_metric
;
2191 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_EFFECTIVE_BANDWIDTH_VALID
) {
2192 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_BANDWIDTH_VALID
;
2193 cell_status
->ul_effective_bandwidth
= if_cell_sr
->ul_effective_bandwidth
;
2195 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MAX_BANDWIDTH_VALID
) {
2196 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_MAX_BANDWIDTH_VALID
;
2197 cell_status
->ul_max_bandwidth
= if_cell_sr
->ul_max_bandwidth
;
2199 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MIN_LATENCY_VALID
) {
2200 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_MIN_LATENCY_VALID
;
2201 cell_status
->ul_min_latency
= if_cell_sr
->ul_min_latency
;
2203 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_EFFECTIVE_LATENCY_VALID
) {
2204 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_LATENCY_VALID
;
2205 cell_status
->ul_effective_latency
= if_cell_sr
->ul_effective_latency
;
2207 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MAX_LATENCY_VALID
) {
2208 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_MAX_LATENCY_VALID
;
2209 cell_status
->ul_max_latency
= if_cell_sr
->ul_max_latency
;
2211 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_RETXT_LEVEL_VALID
) {
2212 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID
;
2213 if (if_cell_sr
->ul_retxt_level
== IF_CELL_UL_RETXT_LEVEL_NONE
)
2214 cell_status
->ul_retxt_level
= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_NONE
;
2215 else if (if_cell_sr
->ul_retxt_level
== IF_CELL_UL_RETXT_LEVEL_LOW
)
2216 cell_status
->ul_retxt_level
= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_LOW
;
2217 else if (if_cell_sr
->ul_retxt_level
== IF_CELL_UL_RETXT_LEVEL_MEDIUM
)
2218 cell_status
->ul_retxt_level
= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_MEDIUM
;
2219 else if (if_cell_sr
->ul_retxt_level
== IF_CELL_UL_RETXT_LEVEL_HIGH
)
2220 cell_status
->ul_retxt_level
= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_HIGH
;
2222 cell_status
->valid_bitmask
&= ~NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID
;
2224 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_BYTES_LOST_VALID
) {
2225 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_BYTES_LOST_VALID
;
2226 cell_status
->ul_bytes_lost
= if_cell_sr
->ul_bytes_lost
;
2228 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MIN_QUEUE_SIZE_VALID
) {
2229 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_MIN_QUEUE_SIZE_VALID
;
2230 cell_status
->ul_min_queue_size
= if_cell_sr
->ul_min_queue_size
;
2232 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_AVG_QUEUE_SIZE_VALID
) {
2233 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_AVG_QUEUE_SIZE_VALID
;
2234 cell_status
->ul_avg_queue_size
= if_cell_sr
->ul_avg_queue_size
;
2236 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MAX_QUEUE_SIZE_VALID
) {
2237 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_MAX_QUEUE_SIZE_VALID
;
2238 cell_status
->ul_max_queue_size
= if_cell_sr
->ul_max_queue_size
;
2240 if (if_cell_sr
->valid_bitmask
& IF_CELL_DL_EFFECTIVE_BANDWIDTH_VALID
) {
2241 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_DL_EFFECTIVE_BANDWIDTH_VALID
;
2242 cell_status
->dl_effective_bandwidth
= if_cell_sr
->dl_effective_bandwidth
;
2244 if (if_cell_sr
->valid_bitmask
& IF_CELL_DL_MAX_BANDWIDTH_VALID
) {
2245 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_DL_MAX_BANDWIDTH_VALID
;
2246 cell_status
->dl_max_bandwidth
= if_cell_sr
->dl_max_bandwidth
;
2248 if (if_cell_sr
->valid_bitmask
& IF_CELL_CONFIG_INACTIVITY_TIME_VALID
) {
2249 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_CONFIG_INACTIVITY_TIME_VALID
;
2250 cell_status
->config_inactivity_time
= if_cell_sr
->config_inactivity_time
;
2252 if (if_cell_sr
->valid_bitmask
& IF_CELL_CONFIG_BACKOFF_TIME_VALID
) {
2253 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_CONFIG_BACKOFF_TIME_VALID
;
2254 cell_status
->config_backoff_time
= if_cell_sr
->config_backoff_time
;
2256 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MSS_RECOMMENDED_VALID
) {
2257 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_MSS_RECOMMENDED_VALID
;
2258 cell_status
->mss_recommended
= if_cell_sr
->mss_recommended
;
2260 } else if (ifp
->if_subfamily
== IFNET_SUBFAMILY_WIFI
) {
2262 nstat_ifnet_desc_wifi_status
*wifi_status
= &link_status
->u
.wifi
;
2263 struct if_wifi_status_v1
*if_wifi_sr
=
2264 &ifsr
->ifsr_u
.ifsr_wifi
.if_wifi_u
.if_status_v1
;
2266 if (ifsr
->ifsr_version
!= IF_WIFI_STATUS_REPORT_VERSION_1
)
2269 link_status
->link_status_type
= NSTAT_IFNET_DESC_LINK_STATUS_TYPE_WIFI
;
2271 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_LINK_QUALITY_METRIC_VALID
) {
2272 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_LINK_QUALITY_METRIC_VALID
;
2273 wifi_status
->link_quality_metric
= if_wifi_sr
->link_quality_metric
;
2275 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID
) {
2276 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID
;
2277 wifi_status
->ul_effective_bandwidth
= if_wifi_sr
->ul_effective_bandwidth
;
2279 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_MAX_BANDWIDTH_VALID
) {
2280 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_MAX_BANDWIDTH_VALID
;
2281 wifi_status
->ul_max_bandwidth
= if_wifi_sr
->ul_max_bandwidth
;
2283 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_MIN_LATENCY_VALID
) {
2284 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_MIN_LATENCY_VALID
;
2285 wifi_status
->ul_min_latency
= if_wifi_sr
->ul_min_latency
;
2287 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_EFFECTIVE_LATENCY_VALID
) {
2288 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_LATENCY_VALID
;
2289 wifi_status
->ul_effective_latency
= if_wifi_sr
->ul_effective_latency
;
2291 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_MAX_LATENCY_VALID
) {
2292 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_MAX_LATENCY_VALID
;
2293 wifi_status
->ul_max_latency
= if_wifi_sr
->ul_max_latency
;
2295 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_RETXT_LEVEL_VALID
) {
2296 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID
;
2297 if (if_wifi_sr
->ul_retxt_level
== IF_WIFI_UL_RETXT_LEVEL_NONE
)
2298 wifi_status
->ul_retxt_level
= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_NONE
;
2299 else if (if_wifi_sr
->ul_retxt_level
== IF_WIFI_UL_RETXT_LEVEL_LOW
)
2300 wifi_status
->ul_retxt_level
= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_LOW
;
2301 else if (if_wifi_sr
->ul_retxt_level
== IF_WIFI_UL_RETXT_LEVEL_MEDIUM
)
2302 wifi_status
->ul_retxt_level
= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_MEDIUM
;
2303 else if (if_wifi_sr
->ul_retxt_level
== IF_WIFI_UL_RETXT_LEVEL_HIGH
)
2304 wifi_status
->ul_retxt_level
= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_HIGH
;
2306 wifi_status
->valid_bitmask
&= ~NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID
;
2308 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_BYTES_LOST_VALID
) {
2309 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_BYTES_LOST_VALID
;
2310 wifi_status
->ul_bytes_lost
= if_wifi_sr
->ul_bytes_lost
;
2312 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_ERROR_RATE_VALID
) {
2313 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_ERROR_RATE_VALID
;
2314 wifi_status
->ul_error_rate
= if_wifi_sr
->ul_error_rate
;
2316 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID
) {
2317 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID
;
2318 wifi_status
->dl_effective_bandwidth
= if_wifi_sr
->dl_effective_bandwidth
;
2320 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_MAX_BANDWIDTH_VALID
) {
2321 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_MAX_BANDWIDTH_VALID
;
2322 wifi_status
->dl_max_bandwidth
= if_wifi_sr
->dl_max_bandwidth
;
2324 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_MIN_LATENCY_VALID
) {
2325 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_MIN_LATENCY_VALID
;
2326 wifi_status
->dl_min_latency
= if_wifi_sr
->dl_min_latency
;
2328 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_EFFECTIVE_LATENCY_VALID
) {
2329 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_LATENCY_VALID
;
2330 wifi_status
->dl_effective_latency
= if_wifi_sr
->dl_effective_latency
;
2332 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_MAX_LATENCY_VALID
) {
2333 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_MAX_LATENCY_VALID
;
2334 wifi_status
->dl_max_latency
= if_wifi_sr
->dl_max_latency
;
2336 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_ERROR_RATE_VALID
) {
2337 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_ERROR_RATE_VALID
;
2338 wifi_status
->dl_error_rate
= if_wifi_sr
->dl_error_rate
;
2340 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_CONFIG_FREQUENCY_VALID
) {
2341 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID
;
2342 if (if_wifi_sr
->config_frequency
== IF_WIFI_CONFIG_FREQUENCY_2_4_GHZ
)
2343 wifi_status
->config_frequency
= NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_2_4_GHZ
;
2344 else if (if_wifi_sr
->config_frequency
== IF_WIFI_CONFIG_FREQUENCY_5_0_GHZ
)
2345 wifi_status
->config_frequency
= NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_5_0_GHZ
;
2347 wifi_status
->valid_bitmask
&= ~NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID
;
2349 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_CONFIG_MULTICAST_RATE_VALID
) {
2350 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_CONFIG_MULTICAST_RATE_VALID
;
2351 wifi_status
->config_multicast_rate
= if_wifi_sr
->config_multicast_rate
;
2353 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_CONFIG_SCAN_COUNT_VALID
) {
2354 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_COUNT_VALID
;
2355 wifi_status
->scan_count
= if_wifi_sr
->scan_count
;
2357 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_CONFIG_SCAN_DURATION_VALID
) {
2358 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_DURATION_VALID
;
2359 wifi_status
->scan_duration
= if_wifi_sr
->scan_duration
;
2364 lck_rw_done(&ifp
->if_link_status_lock
);
2367 static u_int64_t nstat_ifnet_last_report_time
= 0;
2368 extern int tcp_report_stats_interval
;
2371 nstat_ifnet_compute_percentages(struct if_tcp_ecn_perf_stat
*ifst
)
2373 /* Retransmit percentage */
2374 if (ifst
->total_rxmitpkts
> 0 && ifst
->total_txpkts
> 0) {
2375 /* shift by 10 for precision */
2376 ifst
->rxmit_percent
=
2377 ((ifst
->total_rxmitpkts
<< 10) * 100) / ifst
->total_txpkts
;
2379 ifst
->rxmit_percent
= 0;
2382 /* Out-of-order percentage */
2383 if (ifst
->total_oopkts
> 0 && ifst
->total_rxpkts
> 0) {
2384 /* shift by 10 for precision */
2386 ((ifst
->total_oopkts
<< 10) * 100) / ifst
->total_rxpkts
;
2388 ifst
->oo_percent
= 0;
2391 /* Reorder percentage */
2392 if (ifst
->total_reorderpkts
> 0 &&
2393 (ifst
->total_txpkts
+ ifst
->total_rxpkts
) > 0) {
2394 /* shift by 10 for precision */
2395 ifst
->reorder_percent
=
2396 ((ifst
->total_reorderpkts
<< 10) * 100) /
2397 (ifst
->total_txpkts
+ ifst
->total_rxpkts
);
2399 ifst
->reorder_percent
= 0;
2404 nstat_ifnet_normalize_counter(struct if_tcp_ecn_stat
*if_st
)
2406 u_int64_t ecn_on_conn
, ecn_off_conn
;
2410 ecn_on_conn
= if_st
->ecn_client_success
+
2411 if_st
->ecn_server_success
;
2412 ecn_off_conn
= if_st
->ecn_off_conn
+
2413 (if_st
->ecn_client_setup
- if_st
->ecn_client_success
) +
2414 (if_st
->ecn_server_setup
- if_st
->ecn_server_success
);
2417 * report sack episodes, rst_drop and rxmit_drop
2418 * as a ratio per connection, shift by 10 for precision
2420 if (ecn_on_conn
> 0) {
2421 if_st
->ecn_on
.sack_episodes
=
2422 (if_st
->ecn_on
.sack_episodes
<< 10) / ecn_on_conn
;
2423 if_st
->ecn_on
.rst_drop
=
2424 (if_st
->ecn_on
.rst_drop
<< 10) * 100 / ecn_on_conn
;
2425 if_st
->ecn_on
.rxmit_drop
=
2426 (if_st
->ecn_on
.rxmit_drop
<< 10) * 100 / ecn_on_conn
;
2428 /* set to zero, just in case */
2429 if_st
->ecn_on
.sack_episodes
= 0;
2430 if_st
->ecn_on
.rst_drop
= 0;
2431 if_st
->ecn_on
.rxmit_drop
= 0;
2434 if (ecn_off_conn
> 0) {
2435 if_st
->ecn_off
.sack_episodes
=
2436 (if_st
->ecn_off
.sack_episodes
<< 10) / ecn_off_conn
;
2437 if_st
->ecn_off
.rst_drop
=
2438 (if_st
->ecn_off
.rst_drop
<< 10) * 100 / ecn_off_conn
;
2439 if_st
->ecn_off
.rxmit_drop
=
2440 (if_st
->ecn_off
.rxmit_drop
<< 10) * 100 / ecn_off_conn
;
2442 if_st
->ecn_off
.sack_episodes
= 0;
2443 if_st
->ecn_off
.rst_drop
= 0;
2444 if_st
->ecn_off
.rxmit_drop
= 0;
2446 if_st
->ecn_total_conn
= ecn_off_conn
+ ecn_on_conn
;
2450 nstat_ifnet_report_ecn_stats(void)
2452 u_int64_t uptime
, last_report_time
;
2453 struct nstat_sysinfo_data data
;
2454 struct nstat_sysinfo_ifnet_ecn_stats
*st
;
2457 uptime
= net_uptime();
2459 if ((int)(uptime
- nstat_ifnet_last_report_time
) <
2460 tcp_report_stats_interval
)
2463 last_report_time
= nstat_ifnet_last_report_time
;
2464 nstat_ifnet_last_report_time
= uptime
;
2465 data
.flags
= NSTAT_SYSINFO_IFNET_ECN_STATS
;
2466 st
= &data
.u
.ifnet_ecn_stats
;
2468 ifnet_head_lock_shared();
2469 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
) {
2470 if (ifp
->if_ipv4_stat
== NULL
|| ifp
->if_ipv6_stat
== NULL
)
2473 if (!IF_FULLY_ATTACHED(ifp
))
2476 /* Limit reporting to Wifi, Ethernet and cellular. */
2477 if (!(IFNET_IS_ETHERNET(ifp
) || IFNET_IS_CELLULAR(ifp
)))
2480 bzero(st
, sizeof(*st
));
2481 if (IFNET_IS_CELLULAR(ifp
)) {
2482 st
->ifnet_type
= NSTAT_IFNET_ECN_TYPE_CELLULAR
;
2483 } else if (IFNET_IS_WIFI(ifp
)) {
2484 st
->ifnet_type
= NSTAT_IFNET_ECN_TYPE_WIFI
;
2486 st
->ifnet_type
= NSTAT_IFNET_ECN_TYPE_ETHERNET
;
2488 data
.unsent_data_cnt
= ifp
->if_unsent_data_cnt
;
2489 /* skip if there was no update since last report */
2490 if (ifp
->if_ipv4_stat
->timestamp
<= 0 ||
2491 ifp
->if_ipv4_stat
->timestamp
< last_report_time
)
2493 st
->ifnet_proto
= NSTAT_IFNET_ECN_PROTO_IPV4
;
2494 /* compute percentages using packet counts */
2495 nstat_ifnet_compute_percentages(&ifp
->if_ipv4_stat
->ecn_on
);
2496 nstat_ifnet_compute_percentages(&ifp
->if_ipv4_stat
->ecn_off
);
2497 nstat_ifnet_normalize_counter(ifp
->if_ipv4_stat
);
2498 bcopy(ifp
->if_ipv4_stat
, &st
->ecn_stat
,
2499 sizeof(st
->ecn_stat
));
2500 nstat_sysinfo_send_data(&data
);
2501 bzero(ifp
->if_ipv4_stat
, sizeof(*ifp
->if_ipv4_stat
));
2504 /* skip if there was no update since last report */
2505 if (ifp
->if_ipv6_stat
->timestamp
<= 0 ||
2506 ifp
->if_ipv6_stat
->timestamp
< last_report_time
)
2508 st
->ifnet_proto
= NSTAT_IFNET_ECN_PROTO_IPV6
;
2510 /* compute percentages using packet counts */
2511 nstat_ifnet_compute_percentages(&ifp
->if_ipv6_stat
->ecn_on
);
2512 nstat_ifnet_compute_percentages(&ifp
->if_ipv6_stat
->ecn_off
);
2513 nstat_ifnet_normalize_counter(ifp
->if_ipv6_stat
);
2514 bcopy(ifp
->if_ipv6_stat
, &st
->ecn_stat
,
2515 sizeof(st
->ecn_stat
));
2516 nstat_sysinfo_send_data(&data
);
2518 /* Zero the stats in ifp */
2519 bzero(ifp
->if_ipv6_stat
, sizeof(*ifp
->if_ipv6_stat
));
2525 /* Some thresholds to determine Low Iternet mode */
2526 #define NSTAT_LIM_DL_MAX_BANDWIDTH_THRESHOLD 1000000 /* 1 Mbps */
2527 #define NSTAT_LIM_UL_MAX_BANDWIDTH_THRESHOLD 500000 /* 500 Kbps */
2528 #define NSTAT_LIM_UL_MIN_RTT_THRESHOLD 1000 /* 1 second */
2529 #define NSTAT_LIM_CONN_TIMEOUT_PERCENT_THRESHOLD (10 << 10) /* 10 percent connection timeouts */
2530 #define NSTAT_LIM_PACKET_LOSS_PERCENT_THRESHOLD (2 << 10) /* 2 percent packet loss rate */
2533 nstat_lim_activity_check(struct if_lim_perf_stat
*st
)
2535 /* check that the current activity is enough to report stats */
2536 if (st
->lim_total_txpkts
< nstat_lim_min_tx_pkts
||
2537 st
->lim_total_rxpkts
< nstat_lim_min_rx_pkts
||
2538 st
->lim_conn_attempts
== 0)
2542 * Compute percentages if there was enough activity. Use
2543 * shift-left by 10 to preserve precision.
2545 st
->lim_packet_loss_percent
= ((st
->lim_total_retxpkts
<< 10) /
2546 st
->lim_total_txpkts
) * 100;
2548 st
->lim_packet_ooo_percent
= ((st
->lim_total_oopkts
<< 10) /
2549 st
->lim_total_rxpkts
) * 100;
2551 st
->lim_conn_timeout_percent
= ((st
->lim_conn_timeouts
<< 10) /
2552 st
->lim_conn_attempts
) * 100;
2555 * Is Low Internet detected? First order metrics are bandwidth
2556 * and RTT. If these metrics are below the minimum thresholds
2557 * defined then the network attachment can be classified as
2558 * having Low Internet capacity.
2560 * High connection timeout rate also indicates Low Internet
2563 if (st
->lim_dl_max_bandwidth
> 0 &&
2564 st
->lim_dl_max_bandwidth
<= NSTAT_LIM_DL_MAX_BANDWIDTH_THRESHOLD
)
2565 st
->lim_dl_detected
= 1;
2567 if ((st
->lim_ul_max_bandwidth
> 0 &&
2568 st
->lim_ul_max_bandwidth
<= NSTAT_LIM_UL_MAX_BANDWIDTH_THRESHOLD
) ||
2569 st
->lim_rtt_min
>= NSTAT_LIM_UL_MIN_RTT_THRESHOLD
)
2570 st
->lim_ul_detected
= 1;
2572 if (st
->lim_conn_attempts
> 20 &&
2573 st
->lim_conn_timeout_percent
>=
2574 NSTAT_LIM_CONN_TIMEOUT_PERCENT_THRESHOLD
)
2575 st
->lim_ul_detected
= 1;
2577 * Second order metrics: If there was high packet loss even after
2578 * using delay based algorithms then we classify it as Low Internet
2581 if (st
->lim_bk_txpkts
>= nstat_lim_min_tx_pkts
&&
2582 st
->lim_packet_loss_percent
>=
2583 NSTAT_LIM_PACKET_LOSS_PERCENT_THRESHOLD
)
2584 st
->lim_ul_detected
= 1;
2588 static u_int64_t nstat_lim_last_report_time
= 0;
2590 nstat_ifnet_report_lim_stats(void)
2593 struct nstat_sysinfo_data data
;
2594 struct nstat_sysinfo_lim_stats
*st
;
2598 uptime
= net_uptime();
2600 if ((u_int32_t
)(uptime
- nstat_lim_last_report_time
) <
2604 nstat_lim_last_report_time
= uptime
;
2605 data
.flags
= NSTAT_SYSINFO_LIM_STATS
;
2606 st
= &data
.u
.lim_stats
;
2607 data
.unsent_data_cnt
= 0;
2609 ifnet_head_lock_shared();
2610 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
) {
2611 if (!IF_FULLY_ATTACHED(ifp
))
2614 /* Limit reporting to Wifi, Ethernet and cellular */
2615 if (!(IFNET_IS_ETHERNET(ifp
) || IFNET_IS_CELLULAR(ifp
)))
2618 if (!nstat_lim_activity_check(&ifp
->if_lim_stat
))
2621 bzero(st
, sizeof(*st
));
2622 st
->ifnet_siglen
= sizeof (st
->ifnet_signature
);
2623 err
= ifnet_get_netsignature(ifp
, AF_INET
,
2624 (u_int8_t
*)&st
->ifnet_siglen
, NULL
,
2625 st
->ifnet_signature
);
2627 err
= ifnet_get_netsignature(ifp
, AF_INET6
,
2628 (u_int8_t
*)&st
->ifnet_siglen
, NULL
,
2629 st
->ifnet_signature
);
2633 ifnet_lock_shared(ifp
);
2634 if (IFNET_IS_CELLULAR(ifp
)) {
2635 st
->ifnet_type
= NSTAT_IFNET_DESC_LINK_STATUS_TYPE_CELLULAR
;
2636 } else if (IFNET_IS_WIFI(ifp
)) {
2637 st
->ifnet_type
= NSTAT_IFNET_DESC_LINK_STATUS_TYPE_WIFI
;
2639 st
->ifnet_type
= NSTAT_IFNET_DESC_LINK_STATUS_TYPE_ETHERNET
;
2641 bcopy(&ifp
->if_lim_stat
, &st
->lim_stat
,
2642 sizeof(st
->lim_stat
));
2644 /* Zero the stats in ifp */
2645 bzero(&ifp
->if_lim_stat
, sizeof(ifp
->if_lim_stat
));
2646 ifnet_lock_done(ifp
);
2647 nstat_sysinfo_send_data(&data
);
2653 nstat_ifnet_copy_descriptor(
2654 nstat_provider_cookie_t cookie
,
2658 nstat_ifnet_descriptor
*desc
= (nstat_ifnet_descriptor
*)data
;
2659 struct nstat_ifnet_cookie
*ifcookie
=
2660 (struct nstat_ifnet_cookie
*)cookie
;
2661 struct ifnet
*ifp
= ifcookie
->ifp
;
2663 if (len
< sizeof(nstat_ifnet_descriptor
))
2666 if (nstat_ifnet_gone(cookie
))
2669 bzero(desc
, sizeof(*desc
));
2670 ifnet_lock_shared(ifp
);
2671 strlcpy(desc
->name
, ifp
->if_xname
, sizeof(desc
->name
));
2672 desc
->ifindex
= ifp
->if_index
;
2673 desc
->threshold
= ifp
->if_data_threshold
;
2674 desc
->type
= ifp
->if_type
;
2675 if (ifp
->if_desc
.ifd_len
< sizeof(desc
->description
))
2676 memcpy(desc
->description
, ifp
->if_desc
.ifd_desc
,
2677 sizeof(desc
->description
));
2678 nstat_ifnet_copy_link_status(ifp
, desc
);
2679 ifnet_lock_done(ifp
);
2684 nstat_init_ifnet_provider(void)
2686 bzero(&nstat_ifnet_provider
, sizeof(nstat_ifnet_provider
));
2687 nstat_ifnet_provider
.nstat_provider_id
= NSTAT_PROVIDER_IFNET
;
2688 nstat_ifnet_provider
.nstat_descriptor_length
= sizeof(nstat_ifnet_descriptor
);
2689 nstat_ifnet_provider
.nstat_lookup
= nstat_ifnet_lookup
;
2690 nstat_ifnet_provider
.nstat_gone
= nstat_ifnet_gone
;
2691 nstat_ifnet_provider
.nstat_counts
= nstat_ifnet_counts
;
2692 nstat_ifnet_provider
.nstat_watcher_add
= NULL
;
2693 nstat_ifnet_provider
.nstat_watcher_remove
= NULL
;
2694 nstat_ifnet_provider
.nstat_copy_descriptor
= nstat_ifnet_copy_descriptor
;
2695 nstat_ifnet_provider
.nstat_release
= nstat_ifnet_release
;
2696 nstat_ifnet_provider
.next
= nstat_providers
;
2697 nstat_providers
= &nstat_ifnet_provider
;
2700 __private_extern__
void
2701 nstat_ifnet_threshold_reached(unsigned int ifindex
)
2703 nstat_control_state
*state
;
2706 struct nstat_ifnet_cookie
*ifcookie
;
2708 lck_mtx_lock(&nstat_mtx
);
2709 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
2711 lck_mtx_lock(&state
->ncs_mtx
);
2712 TAILQ_FOREACH(src
, &state
->ncs_src_queue
, ns_control_link
)
2714 if (src
->provider
!= &nstat_ifnet_provider
)
2716 ifcookie
= (struct nstat_ifnet_cookie
*)src
->cookie
;
2717 ifp
= ifcookie
->ifp
;
2718 if (ifp
->if_index
!= ifindex
)
2720 nstat_control_send_counts(state
, src
, 0, 0, NULL
);
2722 lck_mtx_unlock(&state
->ncs_mtx
);
2724 lck_mtx_unlock(&nstat_mtx
);
2727 #pragma mark -- Sysinfo --
2729 nstat_set_keyval_scalar(nstat_sysinfo_keyval
*kv
, int key
, u_int32_t val
)
2731 kv
->nstat_sysinfo_key
= key
;
2732 kv
->nstat_sysinfo_flags
= NSTAT_SYSINFO_FLAG_SCALAR
;
2733 kv
->u
.nstat_sysinfo_scalar
= val
;
2734 kv
->nstat_sysinfo_valsize
= sizeof(kv
->u
.nstat_sysinfo_scalar
);
2738 nstat_set_keyval_string(nstat_sysinfo_keyval
*kv
, int key
, u_int8_t
*buf
,
2741 kv
->nstat_sysinfo_key
= key
;
2742 kv
->nstat_sysinfo_flags
= NSTAT_SYSINFO_FLAG_STRING
;
2743 kv
->nstat_sysinfo_valsize
= min(len
,
2744 NSTAT_SYSINFO_KEYVAL_STRING_MAXSIZE
);
2745 bcopy(buf
, kv
->u
.nstat_sysinfo_string
, kv
->nstat_sysinfo_valsize
);
2749 nstat_sysinfo_send_data_internal(
2750 nstat_control_state
*control
,
2751 nstat_sysinfo_data
*data
)
2753 nstat_msg_sysinfo_counts
*syscnt
= NULL
;
2754 size_t allocsize
= 0, countsize
= 0, nkeyvals
= 0, finalsize
= 0;
2755 nstat_sysinfo_keyval
*kv
;
2759 allocsize
= offsetof(nstat_msg_sysinfo_counts
, counts
);
2760 countsize
= offsetof(nstat_sysinfo_counts
, nstat_sysinfo_keyvals
);
2761 finalsize
= allocsize
;
2763 /* get number of key-vals for each kind of stat */
2764 switch (data
->flags
)
2766 case NSTAT_SYSINFO_MBUF_STATS
:
2767 nkeyvals
= sizeof(struct nstat_sysinfo_mbuf_stats
) /
2770 case NSTAT_SYSINFO_TCP_STATS
:
2771 nkeyvals
= NSTAT_SYSINFO_TCP_STATS_COUNT
;
2773 case NSTAT_SYSINFO_IFNET_ECN_STATS
:
2774 nkeyvals
= (sizeof(struct if_tcp_ecn_stat
) /
2777 /* Two more keys for ifnet type and proto */
2780 /* One key for unsent data. */
2783 case NSTAT_SYSINFO_LIM_STATS
:
2784 nkeyvals
= NSTAT_LIM_STAT_KEYVAL_COUNT
;
2786 case NSTAT_SYSINFO_NET_API_STATS
:
2787 nkeyvals
= NSTAT_NET_API_STAT_KEYVAL_COUNT
;
2792 countsize
+= sizeof(nstat_sysinfo_keyval
) * nkeyvals
;
2793 allocsize
+= countsize
;
2795 syscnt
= OSMalloc(allocsize
, nstat_malloc_tag
);
2798 bzero(syscnt
, allocsize
);
2800 kv
= (nstat_sysinfo_keyval
*) &syscnt
->counts
.nstat_sysinfo_keyvals
;
2801 switch (data
->flags
)
2803 case NSTAT_SYSINFO_MBUF_STATS
:
2805 nstat_set_keyval_scalar(&kv
[i
++],
2806 NSTAT_SYSINFO_KEY_MBUF_256B_TOTAL
,
2807 data
->u
.mb_stats
.total_256b
);
2808 nstat_set_keyval_scalar(&kv
[i
++],
2809 NSTAT_SYSINFO_KEY_MBUF_2KB_TOTAL
,
2810 data
->u
.mb_stats
.total_2kb
);
2811 nstat_set_keyval_scalar(&kv
[i
++],
2812 NSTAT_SYSINFO_KEY_MBUF_4KB_TOTAL
,
2813 data
->u
.mb_stats
.total_4kb
);
2814 nstat_set_keyval_scalar(&kv
[i
++],
2815 NSTAT_SYSINFO_MBUF_16KB_TOTAL
,
2816 data
->u
.mb_stats
.total_16kb
);
2817 nstat_set_keyval_scalar(&kv
[i
++],
2818 NSTAT_SYSINFO_KEY_SOCK_MBCNT
,
2819 data
->u
.mb_stats
.sbmb_total
);
2820 nstat_set_keyval_scalar(&kv
[i
++],
2821 NSTAT_SYSINFO_KEY_SOCK_ATMBLIMIT
,
2822 data
->u
.mb_stats
.sb_atmbuflimit
);
2823 nstat_set_keyval_scalar(&kv
[i
++],
2824 NSTAT_SYSINFO_MBUF_DRAIN_CNT
,
2825 data
->u
.mb_stats
.draincnt
);
2826 nstat_set_keyval_scalar(&kv
[i
++],
2827 NSTAT_SYSINFO_MBUF_MEM_RELEASED
,
2828 data
->u
.mb_stats
.memreleased
);
2829 nstat_set_keyval_scalar(&kv
[i
++],
2830 NSTAT_SYSINFO_KEY_SOCK_MBFLOOR
,
2831 data
->u
.mb_stats
.sbmb_floor
);
2832 VERIFY(i
== nkeyvals
);
2835 case NSTAT_SYSINFO_TCP_STATS
:
2837 nstat_set_keyval_scalar(&kv
[i
++],
2838 NSTAT_SYSINFO_KEY_IPV4_AVGRTT
,
2839 data
->u
.tcp_stats
.ipv4_avgrtt
);
2840 nstat_set_keyval_scalar(&kv
[i
++],
2841 NSTAT_SYSINFO_KEY_IPV6_AVGRTT
,
2842 data
->u
.tcp_stats
.ipv6_avgrtt
);
2843 nstat_set_keyval_scalar(&kv
[i
++],
2844 NSTAT_SYSINFO_KEY_SEND_PLR
,
2845 data
->u
.tcp_stats
.send_plr
);
2846 nstat_set_keyval_scalar(&kv
[i
++],
2847 NSTAT_SYSINFO_KEY_RECV_PLR
,
2848 data
->u
.tcp_stats
.recv_plr
);
2849 nstat_set_keyval_scalar(&kv
[i
++],
2850 NSTAT_SYSINFO_KEY_SEND_TLRTO
,
2851 data
->u
.tcp_stats
.send_tlrto_rate
);
2852 nstat_set_keyval_scalar(&kv
[i
++],
2853 NSTAT_SYSINFO_KEY_SEND_REORDERRATE
,
2854 data
->u
.tcp_stats
.send_reorder_rate
);
2855 nstat_set_keyval_scalar(&kv
[i
++],
2856 NSTAT_SYSINFO_CONNECTION_ATTEMPTS
,
2857 data
->u
.tcp_stats
.connection_attempts
);
2858 nstat_set_keyval_scalar(&kv
[i
++],
2859 NSTAT_SYSINFO_CONNECTION_ACCEPTS
,
2860 data
->u
.tcp_stats
.connection_accepts
);
2861 nstat_set_keyval_scalar(&kv
[i
++],
2862 NSTAT_SYSINFO_ECN_CLIENT_ENABLED
,
2863 data
->u
.tcp_stats
.ecn_client_enabled
);
2864 nstat_set_keyval_scalar(&kv
[i
++],
2865 NSTAT_SYSINFO_ECN_SERVER_ENABLED
,
2866 data
->u
.tcp_stats
.ecn_server_enabled
);
2867 nstat_set_keyval_scalar(&kv
[i
++],
2868 NSTAT_SYSINFO_ECN_CLIENT_SETUP
,
2869 data
->u
.tcp_stats
.ecn_client_setup
);
2870 nstat_set_keyval_scalar(&kv
[i
++],
2871 NSTAT_SYSINFO_ECN_SERVER_SETUP
,
2872 data
->u
.tcp_stats
.ecn_server_setup
);
2873 nstat_set_keyval_scalar(&kv
[i
++],
2874 NSTAT_SYSINFO_ECN_CLIENT_SUCCESS
,
2875 data
->u
.tcp_stats
.ecn_client_success
);
2876 nstat_set_keyval_scalar(&kv
[i
++],
2877 NSTAT_SYSINFO_ECN_SERVER_SUCCESS
,
2878 data
->u
.tcp_stats
.ecn_server_success
);
2879 nstat_set_keyval_scalar(&kv
[i
++],
2880 NSTAT_SYSINFO_ECN_NOT_SUPPORTED
,
2881 data
->u
.tcp_stats
.ecn_not_supported
);
2882 nstat_set_keyval_scalar(&kv
[i
++],
2883 NSTAT_SYSINFO_ECN_LOST_SYN
,
2884 data
->u
.tcp_stats
.ecn_lost_syn
);
2885 nstat_set_keyval_scalar(&kv
[i
++],
2886 NSTAT_SYSINFO_ECN_LOST_SYNACK
,
2887 data
->u
.tcp_stats
.ecn_lost_synack
);
2888 nstat_set_keyval_scalar(&kv
[i
++],
2889 NSTAT_SYSINFO_ECN_RECV_CE
,
2890 data
->u
.tcp_stats
.ecn_recv_ce
);
2891 nstat_set_keyval_scalar(&kv
[i
++],
2892 NSTAT_SYSINFO_ECN_RECV_ECE
,
2893 data
->u
.tcp_stats
.ecn_recv_ece
);
2894 nstat_set_keyval_scalar(&kv
[i
++],
2895 NSTAT_SYSINFO_ECN_SENT_ECE
,
2896 data
->u
.tcp_stats
.ecn_sent_ece
);
2897 nstat_set_keyval_scalar(&kv
[i
++],
2898 NSTAT_SYSINFO_ECN_CONN_RECV_CE
,
2899 data
->u
.tcp_stats
.ecn_conn_recv_ce
);
2900 nstat_set_keyval_scalar(&kv
[i
++],
2901 NSTAT_SYSINFO_ECN_CONN_RECV_ECE
,
2902 data
->u
.tcp_stats
.ecn_conn_recv_ece
);
2903 nstat_set_keyval_scalar(&kv
[i
++],
2904 NSTAT_SYSINFO_ECN_CONN_PLNOCE
,
2905 data
->u
.tcp_stats
.ecn_conn_plnoce
);
2906 nstat_set_keyval_scalar(&kv
[i
++],
2907 NSTAT_SYSINFO_ECN_CONN_PL_CE
,
2908 data
->u
.tcp_stats
.ecn_conn_pl_ce
);
2909 nstat_set_keyval_scalar(&kv
[i
++],
2910 NSTAT_SYSINFO_ECN_CONN_NOPL_CE
,
2911 data
->u
.tcp_stats
.ecn_conn_nopl_ce
);
2912 nstat_set_keyval_scalar(&kv
[i
++],
2913 NSTAT_SYSINFO_ECN_FALLBACK_SYNLOSS
,
2914 data
->u
.tcp_stats
.ecn_fallback_synloss
);
2915 nstat_set_keyval_scalar(&kv
[i
++],
2916 NSTAT_SYSINFO_ECN_FALLBACK_REORDER
,
2917 data
->u
.tcp_stats
.ecn_fallback_reorder
);
2918 nstat_set_keyval_scalar(&kv
[i
++],
2919 NSTAT_SYSINFO_ECN_FALLBACK_CE
,
2920 data
->u
.tcp_stats
.ecn_fallback_ce
);
2921 nstat_set_keyval_scalar(&kv
[i
++],
2922 NSTAT_SYSINFO_TFO_SYN_DATA_RCV
,
2923 data
->u
.tcp_stats
.tfo_syn_data_rcv
);
2924 nstat_set_keyval_scalar(&kv
[i
++],
2925 NSTAT_SYSINFO_TFO_COOKIE_REQ_RCV
,
2926 data
->u
.tcp_stats
.tfo_cookie_req_rcv
);
2927 nstat_set_keyval_scalar(&kv
[i
++],
2928 NSTAT_SYSINFO_TFO_COOKIE_SENT
,
2929 data
->u
.tcp_stats
.tfo_cookie_sent
);
2930 nstat_set_keyval_scalar(&kv
[i
++],
2931 NSTAT_SYSINFO_TFO_COOKIE_INVALID
,
2932 data
->u
.tcp_stats
.tfo_cookie_invalid
);
2933 nstat_set_keyval_scalar(&kv
[i
++],
2934 NSTAT_SYSINFO_TFO_COOKIE_REQ
,
2935 data
->u
.tcp_stats
.tfo_cookie_req
);
2936 nstat_set_keyval_scalar(&kv
[i
++],
2937 NSTAT_SYSINFO_TFO_COOKIE_RCV
,
2938 data
->u
.tcp_stats
.tfo_cookie_rcv
);
2939 nstat_set_keyval_scalar(&kv
[i
++],
2940 NSTAT_SYSINFO_TFO_SYN_DATA_SENT
,
2941 data
->u
.tcp_stats
.tfo_syn_data_sent
);
2942 nstat_set_keyval_scalar(&kv
[i
++],
2943 NSTAT_SYSINFO_TFO_SYN_DATA_ACKED
,
2944 data
->u
.tcp_stats
.tfo_syn_data_acked
);
2945 nstat_set_keyval_scalar(&kv
[i
++],
2946 NSTAT_SYSINFO_TFO_SYN_LOSS
,
2947 data
->u
.tcp_stats
.tfo_syn_loss
);
2948 nstat_set_keyval_scalar(&kv
[i
++],
2949 NSTAT_SYSINFO_TFO_BLACKHOLE
,
2950 data
->u
.tcp_stats
.tfo_blackhole
);
2951 nstat_set_keyval_scalar(&kv
[i
++],
2952 NSTAT_SYSINFO_TFO_COOKIE_WRONG
,
2953 data
->u
.tcp_stats
.tfo_cookie_wrong
);
2954 nstat_set_keyval_scalar(&kv
[i
++],
2955 NSTAT_SYSINFO_TFO_NO_COOKIE_RCV
,
2956 data
->u
.tcp_stats
.tfo_no_cookie_rcv
);
2957 nstat_set_keyval_scalar(&kv
[i
++],
2958 NSTAT_SYSINFO_TFO_HEURISTICS_DISABLE
,
2959 data
->u
.tcp_stats
.tfo_heuristics_disable
);
2960 nstat_set_keyval_scalar(&kv
[i
++],
2961 NSTAT_SYSINFO_TFO_SEND_BLACKHOLE
,
2962 data
->u
.tcp_stats
.tfo_sndblackhole
);
2963 nstat_set_keyval_scalar(&kv
[i
++],
2964 NSTAT_SYSINFO_MPTCP_HANDOVER_ATTEMPT
,
2965 data
->u
.tcp_stats
.mptcp_handover_attempt
);
2966 nstat_set_keyval_scalar(&kv
[i
++],
2967 NSTAT_SYSINFO_MPTCP_INTERACTIVE_ATTEMPT
,
2968 data
->u
.tcp_stats
.mptcp_interactive_attempt
);
2969 nstat_set_keyval_scalar(&kv
[i
++],
2970 NSTAT_SYSINFO_MPTCP_AGGREGATE_ATTEMPT
,
2971 data
->u
.tcp_stats
.mptcp_aggregate_attempt
);
2972 nstat_set_keyval_scalar(&kv
[i
++],
2973 NSTAT_SYSINFO_MPTCP_FP_HANDOVER_ATTEMPT
,
2974 data
->u
.tcp_stats
.mptcp_fp_handover_attempt
);
2975 nstat_set_keyval_scalar(&kv
[i
++],
2976 NSTAT_SYSINFO_MPTCP_FP_INTERACTIVE_ATTEMPT
,
2977 data
->u
.tcp_stats
.mptcp_fp_interactive_attempt
);
2978 nstat_set_keyval_scalar(&kv
[i
++],
2979 NSTAT_SYSINFO_MPTCP_FP_AGGREGATE_ATTEMPT
,
2980 data
->u
.tcp_stats
.mptcp_fp_aggregate_attempt
);
2981 nstat_set_keyval_scalar(&kv
[i
++],
2982 NSTAT_SYSINFO_MPTCP_HEURISTIC_FALLBACK
,
2983 data
->u
.tcp_stats
.mptcp_heuristic_fallback
);
2984 nstat_set_keyval_scalar(&kv
[i
++],
2985 NSTAT_SYSINFO_MPTCP_FP_HEURISTIC_FALLBACK
,
2986 data
->u
.tcp_stats
.mptcp_fp_heuristic_fallback
);
2987 nstat_set_keyval_scalar(&kv
[i
++],
2988 NSTAT_SYSINFO_MPTCP_HANDOVER_SUCCESS_WIFI
,
2989 data
->u
.tcp_stats
.mptcp_handover_success_wifi
);
2990 nstat_set_keyval_scalar(&kv
[i
++],
2991 NSTAT_SYSINFO_MPTCP_HANDOVER_SUCCESS_CELL
,
2992 data
->u
.tcp_stats
.mptcp_handover_success_cell
);
2993 nstat_set_keyval_scalar(&kv
[i
++],
2994 NSTAT_SYSINFO_MPTCP_INTERACTIVE_SUCCESS
,
2995 data
->u
.tcp_stats
.mptcp_interactive_success
);
2996 nstat_set_keyval_scalar(&kv
[i
++],
2997 NSTAT_SYSINFO_MPTCP_AGGREGATE_SUCCESS
,
2998 data
->u
.tcp_stats
.mptcp_aggregate_success
);
2999 nstat_set_keyval_scalar(&kv
[i
++],
3000 NSTAT_SYSINFO_MPTCP_FP_HANDOVER_SUCCESS_WIFI
,
3001 data
->u
.tcp_stats
.mptcp_fp_handover_success_wifi
);
3002 nstat_set_keyval_scalar(&kv
[i
++],
3003 NSTAT_SYSINFO_MPTCP_FP_HANDOVER_SUCCESS_CELL
,
3004 data
->u
.tcp_stats
.mptcp_fp_handover_success_cell
);
3005 nstat_set_keyval_scalar(&kv
[i
++],
3006 NSTAT_SYSINFO_MPTCP_FP_INTERACTIVE_SUCCESS
,
3007 data
->u
.tcp_stats
.mptcp_fp_interactive_success
);
3008 nstat_set_keyval_scalar(&kv
[i
++],
3009 NSTAT_SYSINFO_MPTCP_FP_AGGREGATE_SUCCESS
,
3010 data
->u
.tcp_stats
.mptcp_fp_aggregate_success
);
3011 nstat_set_keyval_scalar(&kv
[i
++],
3012 NSTAT_SYSINFO_MPTCP_HANDOVER_CELL_FROM_WIFI
,
3013 data
->u
.tcp_stats
.mptcp_handover_cell_from_wifi
);
3014 nstat_set_keyval_scalar(&kv
[i
++],
3015 NSTAT_SYSINFO_MPTCP_HANDOVER_WIFI_FROM_CELL
,
3016 data
->u
.tcp_stats
.mptcp_handover_wifi_from_cell
);
3017 nstat_set_keyval_scalar(&kv
[i
++],
3018 NSTAT_SYSINFO_MPTCP_INTERACTIVE_CELL_FROM_WIFI
,
3019 data
->u
.tcp_stats
.mptcp_interactive_cell_from_wifi
);
3020 nstat_set_keyval_scalar(&kv
[i
++],
3021 NSTAT_SYSINFO_MPTCP_HANDOVER_CELL_BYTES
,
3022 data
->u
.tcp_stats
.mptcp_handover_cell_bytes
);
3023 nstat_set_keyval_scalar(&kv
[i
++],
3024 NSTAT_SYSINFO_MPTCP_INTERACTIVE_CELL_BYTES
,
3025 data
->u
.tcp_stats
.mptcp_interactive_cell_bytes
);
3026 nstat_set_keyval_scalar(&kv
[i
++],
3027 NSTAT_SYSINFO_MPTCP_AGGREGATE_CELL_BYTES
,
3028 data
->u
.tcp_stats
.mptcp_aggregate_cell_bytes
);
3029 nstat_set_keyval_scalar(&kv
[i
++],
3030 NSTAT_SYSINFO_MPTCP_HANDOVER_ALL_BYTES
,
3031 data
->u
.tcp_stats
.mptcp_handover_all_bytes
);
3032 nstat_set_keyval_scalar(&kv
[i
++],
3033 NSTAT_SYSINFO_MPTCP_INTERACTIVE_ALL_BYTES
,
3034 data
->u
.tcp_stats
.mptcp_interactive_all_bytes
);
3035 nstat_set_keyval_scalar(&kv
[i
++],
3036 NSTAT_SYSINFO_MPTCP_AGGREGATE_ALL_BYTES
,
3037 data
->u
.tcp_stats
.mptcp_aggregate_all_bytes
);
3038 nstat_set_keyval_scalar(&kv
[i
++],
3039 NSTAT_SYSINFO_MPTCP_BACK_TO_WIFI
,
3040 data
->u
.tcp_stats
.mptcp_back_to_wifi
);
3041 nstat_set_keyval_scalar(&kv
[i
++],
3042 NSTAT_SYSINFO_MPTCP_WIFI_PROXY
,
3043 data
->u
.tcp_stats
.mptcp_wifi_proxy
);
3044 nstat_set_keyval_scalar(&kv
[i
++],
3045 NSTAT_SYSINFO_MPTCP_CELL_PROXY
,
3046 data
->u
.tcp_stats
.mptcp_cell_proxy
);
3047 VERIFY(i
== nkeyvals
);
3050 case NSTAT_SYSINFO_IFNET_ECN_STATS
:
3052 nstat_set_keyval_scalar(&kv
[i
++],
3053 NSTAT_SYSINFO_ECN_IFNET_TYPE
,
3054 data
->u
.ifnet_ecn_stats
.ifnet_type
);
3055 nstat_set_keyval_scalar(&kv
[i
++],
3056 NSTAT_SYSINFO_ECN_IFNET_PROTO
,
3057 data
->u
.ifnet_ecn_stats
.ifnet_proto
);
3058 nstat_set_keyval_scalar(&kv
[i
++],
3059 NSTAT_SYSINFO_ECN_IFNET_CLIENT_SETUP
,
3060 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_client_setup
);
3061 nstat_set_keyval_scalar(&kv
[i
++],
3062 NSTAT_SYSINFO_ECN_IFNET_SERVER_SETUP
,
3063 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_server_setup
);
3064 nstat_set_keyval_scalar(&kv
[i
++],
3065 NSTAT_SYSINFO_ECN_IFNET_CLIENT_SUCCESS
,
3066 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_client_success
);
3067 nstat_set_keyval_scalar(&kv
[i
++],
3068 NSTAT_SYSINFO_ECN_IFNET_SERVER_SUCCESS
,
3069 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_server_success
);
3070 nstat_set_keyval_scalar(&kv
[i
++],
3071 NSTAT_SYSINFO_ECN_IFNET_PEER_NOSUPPORT
,
3072 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_peer_nosupport
);
3073 nstat_set_keyval_scalar(&kv
[i
++],
3074 NSTAT_SYSINFO_ECN_IFNET_SYN_LOST
,
3075 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_syn_lost
);
3076 nstat_set_keyval_scalar(&kv
[i
++],
3077 NSTAT_SYSINFO_ECN_IFNET_SYNACK_LOST
,
3078 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_synack_lost
);
3079 nstat_set_keyval_scalar(&kv
[i
++],
3080 NSTAT_SYSINFO_ECN_IFNET_RECV_CE
,
3081 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_recv_ce
);
3082 nstat_set_keyval_scalar(&kv
[i
++],
3083 NSTAT_SYSINFO_ECN_IFNET_RECV_ECE
,
3084 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_recv_ece
);
3085 nstat_set_keyval_scalar(&kv
[i
++],
3086 NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_CE
,
3087 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_conn_recv_ce
);
3088 nstat_set_keyval_scalar(&kv
[i
++],
3089 NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_ECE
,
3090 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_conn_recv_ece
);
3091 nstat_set_keyval_scalar(&kv
[i
++],
3092 NSTAT_SYSINFO_ECN_IFNET_CONN_PLNOCE
,
3093 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_conn_plnoce
);
3094 nstat_set_keyval_scalar(&kv
[i
++],
3095 NSTAT_SYSINFO_ECN_IFNET_CONN_PLCE
,
3096 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_conn_plce
);
3097 nstat_set_keyval_scalar(&kv
[i
++],
3098 NSTAT_SYSINFO_ECN_IFNET_CONN_NOPLCE
,
3099 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_conn_noplce
);
3100 nstat_set_keyval_scalar(&kv
[i
++],
3101 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNLOSS
,
3102 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_fallback_synloss
);
3103 nstat_set_keyval_scalar(&kv
[i
++],
3104 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_REORDER
,
3105 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_fallback_reorder
);
3106 nstat_set_keyval_scalar(&kv
[i
++],
3107 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_CE
,
3108 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_fallback_ce
);
3109 nstat_set_keyval_scalar(&kv
[i
++],
3110 NSTAT_SYSINFO_ECN_IFNET_ON_RTT_AVG
,
3111 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.rtt_avg
);
3112 nstat_set_keyval_scalar(&kv
[i
++],
3113 NSTAT_SYSINFO_ECN_IFNET_ON_RTT_VAR
,
3114 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.rtt_var
);
3115 nstat_set_keyval_scalar(&kv
[i
++],
3116 NSTAT_SYSINFO_ECN_IFNET_ON_OOPERCENT
,
3117 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.oo_percent
);
3118 nstat_set_keyval_scalar(&kv
[i
++],
3119 NSTAT_SYSINFO_ECN_IFNET_ON_SACK_EPISODE
,
3120 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.sack_episodes
);
3121 nstat_set_keyval_scalar(&kv
[i
++],
3122 NSTAT_SYSINFO_ECN_IFNET_ON_REORDER_PERCENT
,
3123 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.reorder_percent
);
3124 nstat_set_keyval_scalar(&kv
[i
++],
3125 NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_PERCENT
,
3126 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.rxmit_percent
);
3127 nstat_set_keyval_scalar(&kv
[i
++],
3128 NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_DROP
,
3129 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.rxmit_drop
);
3130 nstat_set_keyval_scalar(&kv
[i
++],
3131 NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_AVG
,
3132 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.rtt_avg
);
3133 nstat_set_keyval_scalar(&kv
[i
++],
3134 NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_VAR
,
3135 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.rtt_var
);
3136 nstat_set_keyval_scalar(&kv
[i
++],
3137 NSTAT_SYSINFO_ECN_IFNET_OFF_OOPERCENT
,
3138 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.oo_percent
);
3139 nstat_set_keyval_scalar(&kv
[i
++],
3140 NSTAT_SYSINFO_ECN_IFNET_OFF_SACK_EPISODE
,
3141 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.sack_episodes
);
3142 nstat_set_keyval_scalar(&kv
[i
++],
3143 NSTAT_SYSINFO_ECN_IFNET_OFF_REORDER_PERCENT
,
3144 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.reorder_percent
);
3145 nstat_set_keyval_scalar(&kv
[i
++],
3146 NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_PERCENT
,
3147 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.rxmit_percent
);
3148 nstat_set_keyval_scalar(&kv
[i
++],
3149 NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_DROP
,
3150 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.rxmit_drop
);
3151 nstat_set_keyval_scalar(&kv
[i
++],
3152 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_TXPKTS
,
3153 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.total_txpkts
);
3154 nstat_set_keyval_scalar(&kv
[i
++],
3155 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXMTPKTS
,
3156 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.total_rxmitpkts
);
3157 nstat_set_keyval_scalar(&kv
[i
++],
3158 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXPKTS
,
3159 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.total_rxpkts
);
3160 nstat_set_keyval_scalar(&kv
[i
++],
3161 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_OOPKTS
,
3162 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.total_oopkts
);
3163 nstat_set_keyval_scalar(&kv
[i
++],
3164 NSTAT_SYSINFO_ECN_IFNET_ON_DROP_RST
,
3165 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.rst_drop
);
3166 nstat_set_keyval_scalar(&kv
[i
++],
3167 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_TXPKTS
,
3168 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.total_txpkts
);
3169 nstat_set_keyval_scalar(&kv
[i
++],
3170 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXMTPKTS
,
3171 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.total_rxmitpkts
);
3172 nstat_set_keyval_scalar(&kv
[i
++],
3173 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXPKTS
,
3174 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.total_rxpkts
);
3175 nstat_set_keyval_scalar(&kv
[i
++],
3176 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_OOPKTS
,
3177 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.total_oopkts
);
3178 nstat_set_keyval_scalar(&kv
[i
++],
3179 NSTAT_SYSINFO_ECN_IFNET_OFF_DROP_RST
,
3180 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.rst_drop
);
3181 nstat_set_keyval_scalar(&kv
[i
++],
3182 NSTAT_SYSINFO_ECN_IFNET_TOTAL_CONN
,
3183 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_total_conn
);
3184 nstat_set_keyval_scalar(&kv
[i
++],
3185 NSTAT_SYSINFO_IFNET_UNSENT_DATA
,
3186 data
->unsent_data_cnt
);
3187 nstat_set_keyval_scalar(&kv
[i
++],
3188 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRST
,
3189 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_fallback_droprst
);
3190 nstat_set_keyval_scalar(&kv
[i
++],
3191 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRXMT
,
3192 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_fallback_droprxmt
);
3193 nstat_set_keyval_scalar(&kv
[i
++],
3194 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNRST
,
3195 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_fallback_synrst
);
3198 case NSTAT_SYSINFO_LIM_STATS
:
3200 nstat_set_keyval_string(&kv
[i
++],
3201 NSTAT_SYSINFO_LIM_IFNET_SIGNATURE
,
3202 data
->u
.lim_stats
.ifnet_signature
,
3203 data
->u
.lim_stats
.ifnet_siglen
);
3204 nstat_set_keyval_scalar(&kv
[i
++],
3205 NSTAT_SYSINFO_LIM_IFNET_DL_MAX_BANDWIDTH
,
3206 data
->u
.lim_stats
.lim_stat
.lim_dl_max_bandwidth
);
3207 nstat_set_keyval_scalar(&kv
[i
++],
3208 NSTAT_SYSINFO_LIM_IFNET_UL_MAX_BANDWIDTH
,
3209 data
->u
.lim_stats
.lim_stat
.lim_ul_max_bandwidth
);
3210 nstat_set_keyval_scalar(&kv
[i
++],
3211 NSTAT_SYSINFO_LIM_IFNET_PACKET_LOSS_PERCENT
,
3212 data
->u
.lim_stats
.lim_stat
.lim_packet_loss_percent
);
3213 nstat_set_keyval_scalar(&kv
[i
++],
3214 NSTAT_SYSINFO_LIM_IFNET_PACKET_OOO_PERCENT
,
3215 data
->u
.lim_stats
.lim_stat
.lim_packet_ooo_percent
);
3216 nstat_set_keyval_scalar(&kv
[i
++],
3217 NSTAT_SYSINFO_LIM_IFNET_RTT_VARIANCE
,
3218 data
->u
.lim_stats
.lim_stat
.lim_rtt_variance
);
3219 nstat_set_keyval_scalar(&kv
[i
++],
3220 NSTAT_SYSINFO_LIM_IFNET_RTT_MIN
,
3221 data
->u
.lim_stats
.lim_stat
.lim_rtt_min
);
3222 nstat_set_keyval_scalar(&kv
[i
++],
3223 NSTAT_SYSINFO_LIM_IFNET_RTT_AVG
,
3224 data
->u
.lim_stats
.lim_stat
.lim_rtt_average
);
3225 nstat_set_keyval_scalar(&kv
[i
++],
3226 NSTAT_SYSINFO_LIM_IFNET_CONN_TIMEOUT_PERCENT
,
3227 data
->u
.lim_stats
.lim_stat
.lim_conn_timeout_percent
);
3228 nstat_set_keyval_scalar(&kv
[i
++],
3229 NSTAT_SYSINFO_LIM_IFNET_DL_DETECTED
,
3230 data
->u
.lim_stats
.lim_stat
.lim_dl_detected
);
3231 nstat_set_keyval_scalar(&kv
[i
++],
3232 NSTAT_SYSINFO_LIM_IFNET_UL_DETECTED
,
3233 data
->u
.lim_stats
.lim_stat
.lim_ul_detected
);
3234 nstat_set_keyval_scalar(&kv
[i
++],
3235 NSTAT_SYSINFO_LIM_IFNET_TYPE
,
3236 data
->u
.lim_stats
.ifnet_type
);
3239 case NSTAT_SYSINFO_NET_API_STATS
:
3241 nstat_set_keyval_scalar(&kv
[i
++],
3242 NSTAT_SYSINFO_API_IF_FLTR_ATTACH
,
3243 data
->u
.net_api_stats
.net_api_stats
.nas_iflt_attach_total
);
3244 nstat_set_keyval_scalar(&kv
[i
++],
3245 NSTAT_SYSINFO_API_IF_FLTR_ATTACH_OS
,
3246 data
->u
.net_api_stats
.net_api_stats
.nas_iflt_attach_os_total
);
3247 nstat_set_keyval_scalar(&kv
[i
++],
3248 NSTAT_SYSINFO_API_IP_FLTR_ADD
,
3249 data
->u
.net_api_stats
.net_api_stats
.nas_ipf_add_total
);
3250 nstat_set_keyval_scalar(&kv
[i
++],
3251 NSTAT_SYSINFO_API_IP_FLTR_ADD_OS
,
3252 data
->u
.net_api_stats
.net_api_stats
.nas_ipf_add_os_total
);
3253 nstat_set_keyval_scalar(&kv
[i
++],
3254 NSTAT_SYSINFO_API_SOCK_FLTR_ATTACH
,
3255 data
->u
.net_api_stats
.net_api_stats
.nas_sfltr_register_total
);
3256 nstat_set_keyval_scalar(&kv
[i
++],
3257 NSTAT_SYSINFO_API_SOCK_FLTR_ATTACH_OS
,
3258 data
->u
.net_api_stats
.net_api_stats
.nas_sfltr_register_os_total
);
3261 nstat_set_keyval_scalar(&kv
[i
++],
3262 NSTAT_SYSINFO_API_SOCK_ALLOC_TOTAL
,
3263 data
->u
.net_api_stats
.net_api_stats
.nas_socket_alloc_total
);
3264 nstat_set_keyval_scalar(&kv
[i
++],
3265 NSTAT_SYSINFO_API_SOCK_ALLOC_KERNEL
,
3266 data
->u
.net_api_stats
.net_api_stats
.nas_socket_in_kernel_total
);
3267 nstat_set_keyval_scalar(&kv
[i
++],
3268 NSTAT_SYSINFO_API_SOCK_ALLOC_KERNEL_OS
,
3269 data
->u
.net_api_stats
.net_api_stats
.nas_socket_in_kernel_os_total
);
3270 nstat_set_keyval_scalar(&kv
[i
++],
3271 NSTAT_SYSINFO_API_SOCK_NECP_CLIENTUUID
,
3272 data
->u
.net_api_stats
.net_api_stats
.nas_socket_necp_clientuuid_total
);
3274 nstat_set_keyval_scalar(&kv
[i
++],
3275 NSTAT_SYSINFO_API_SOCK_DOMAIN_LOCAL
,
3276 data
->u
.net_api_stats
.net_api_stats
.nas_socket_domain_local_total
);
3277 nstat_set_keyval_scalar(&kv
[i
++],
3278 NSTAT_SYSINFO_API_SOCK_DOMAIN_ROUTE
,
3279 data
->u
.net_api_stats
.net_api_stats
.nas_socket_domain_route_total
);
3280 nstat_set_keyval_scalar(&kv
[i
++],
3281 NSTAT_SYSINFO_API_SOCK_DOMAIN_INET
,
3282 data
->u
.net_api_stats
.net_api_stats
.nas_socket_domain_inet_total
);
3283 nstat_set_keyval_scalar(&kv
[i
++],
3284 NSTAT_SYSINFO_API_SOCK_DOMAIN_INET6
,
3285 data
->u
.net_api_stats
.net_api_stats
.nas_socket_domain_inet6_total
);
3286 nstat_set_keyval_scalar(&kv
[i
++],
3287 NSTAT_SYSINFO_API_SOCK_DOMAIN_SYSTEM
,
3288 data
->u
.net_api_stats
.net_api_stats
.nas_socket_domain_system_total
);
3289 nstat_set_keyval_scalar(&kv
[i
++],
3290 NSTAT_SYSINFO_API_SOCK_DOMAIN_MULTIPATH
,
3291 data
->u
.net_api_stats
.net_api_stats
.nas_socket_domain_multipath_total
);
3292 nstat_set_keyval_scalar(&kv
[i
++],
3293 NSTAT_SYSINFO_API_SOCK_DOMAIN_KEY
,
3294 data
->u
.net_api_stats
.net_api_stats
.nas_socket_domain_key_total
);
3295 nstat_set_keyval_scalar(&kv
[i
++],
3296 NSTAT_SYSINFO_API_SOCK_DOMAIN_NDRV
,
3297 data
->u
.net_api_stats
.net_api_stats
.nas_socket_domain_ndrv_total
);
3298 nstat_set_keyval_scalar(&kv
[i
++],
3299 NSTAT_SYSINFO_API_SOCK_DOMAIN_OTHER
,
3300 data
->u
.net_api_stats
.net_api_stats
.nas_socket_domain_other_total
);
3302 nstat_set_keyval_scalar(&kv
[i
++],
3303 NSTAT_SYSINFO_API_SOCK_INET_STREAM
,
3304 data
->u
.net_api_stats
.net_api_stats
.nas_socket_inet_stream_total
);
3305 nstat_set_keyval_scalar(&kv
[i
++],
3306 NSTAT_SYSINFO_API_SOCK_INET_DGRAM
,
3307 data
->u
.net_api_stats
.net_api_stats
.nas_socket_inet_dgram_total
);
3308 nstat_set_keyval_scalar(&kv
[i
++],
3309 NSTAT_SYSINFO_API_SOCK_INET_DGRAM_CONNECTED
,
3310 data
->u
.net_api_stats
.net_api_stats
.nas_socket_inet_dgram_connected
);
3311 nstat_set_keyval_scalar(&kv
[i
++],
3312 NSTAT_SYSINFO_API_SOCK_INET_DGRAM_DNS
,
3313 data
->u
.net_api_stats
.net_api_stats
.nas_socket_inet_dgram_dns
);
3314 nstat_set_keyval_scalar(&kv
[i
++],
3315 NSTAT_SYSINFO_API_SOCK_INET_DGRAM_NO_DATA
,
3316 data
->u
.net_api_stats
.net_api_stats
.nas_socket_inet_dgram_no_data
);
3318 nstat_set_keyval_scalar(&kv
[i
++],
3319 NSTAT_SYSINFO_API_SOCK_INET6_STREAM
,
3320 data
->u
.net_api_stats
.net_api_stats
.nas_socket_inet6_stream_total
);
3321 nstat_set_keyval_scalar(&kv
[i
++],
3322 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM
,
3323 data
->u
.net_api_stats
.net_api_stats
.nas_socket_inet6_dgram_total
);
3324 nstat_set_keyval_scalar(&kv
[i
++],
3325 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_CONNECTED
,
3326 data
->u
.net_api_stats
.net_api_stats
.nas_socket_inet6_dgram_connected
);
3327 nstat_set_keyval_scalar(&kv
[i
++],
3328 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_DNS
,
3329 data
->u
.net_api_stats
.net_api_stats
.nas_socket_inet6_dgram_dns
);
3330 nstat_set_keyval_scalar(&kv
[i
++],
3331 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_NO_DATA
,
3332 data
->u
.net_api_stats
.net_api_stats
.nas_socket_inet6_dgram_no_data
);
3334 nstat_set_keyval_scalar(&kv
[i
++],
3335 NSTAT_SYSINFO_API_SOCK_INET_MCAST_JOIN
,
3336 data
->u
.net_api_stats
.net_api_stats
.nas_socket_mcast_join_total
);
3337 nstat_set_keyval_scalar(&kv
[i
++],
3338 NSTAT_SYSINFO_API_SOCK_INET_MCAST_JOIN_OS
,
3339 data
->u
.net_api_stats
.net_api_stats
.nas_socket_mcast_join_os_total
);
3341 nstat_set_keyval_scalar(&kv
[i
++],
3342 NSTAT_SYSINFO_API_NEXUS_FLOW_INET_STREAM
,
3343 data
->u
.net_api_stats
.net_api_stats
.nas_nx_flow_inet_stream_total
);
3344 nstat_set_keyval_scalar(&kv
[i
++],
3345 NSTAT_SYSINFO_API_NEXUS_FLOW_INET_DATAGRAM
,
3346 data
->u
.net_api_stats
.net_api_stats
.nas_nx_flow_inet_dgram_total
);
3348 nstat_set_keyval_scalar(&kv
[i
++],
3349 NSTAT_SYSINFO_API_NEXUS_FLOW_INET6_STREAM
,
3350 data
->u
.net_api_stats
.net_api_stats
.nas_nx_flow_inet6_stream_total
);
3351 nstat_set_keyval_scalar(&kv
[i
++],
3352 NSTAT_SYSINFO_API_NEXUS_FLOW_INET6_DATAGRAM
,
3353 data
->u
.net_api_stats
.net_api_stats
.nas_nx_flow_inet6_dgram_total
);
3355 nstat_set_keyval_scalar(&kv
[i
++],
3356 NSTAT_SYSINFO_API_IFNET_ALLOC
,
3357 data
->u
.net_api_stats
.net_api_stats
.nas_ifnet_alloc_total
);
3358 nstat_set_keyval_scalar(&kv
[i
++],
3359 NSTAT_SYSINFO_API_IFNET_ALLOC_OS
,
3360 data
->u
.net_api_stats
.net_api_stats
.nas_ifnet_alloc_os_total
);
3362 nstat_set_keyval_scalar(&kv
[i
++],
3363 NSTAT_SYSINFO_API_PF_ADDRULE
,
3364 data
->u
.net_api_stats
.net_api_stats
.nas_pf_addrule_total
);
3365 nstat_set_keyval_scalar(&kv
[i
++],
3366 NSTAT_SYSINFO_API_PF_ADDRULE_OS
,
3367 data
->u
.net_api_stats
.net_api_stats
.nas_pf_addrule_os
);
3369 nstat_set_keyval_scalar(&kv
[i
++],
3370 NSTAT_SYSINFO_API_VMNET_START
,
3371 data
->u
.net_api_stats
.net_api_stats
.nas_vmnet_total
);
3374 nstat_set_keyval_scalar(&kv
[i
++],
3375 NSTAT_SYSINFO_API_REPORT_INTERVAL
,
3376 data
->u
.net_api_stats
.report_interval
);
3383 VERIFY(i
> 0 && i
<= nkeyvals
);
3384 countsize
= offsetof(nstat_sysinfo_counts
,
3385 nstat_sysinfo_keyvals
) +
3386 sizeof(nstat_sysinfo_keyval
) * i
;
3387 finalsize
+= countsize
;
3388 syscnt
->hdr
.type
= NSTAT_MSG_TYPE_SYSINFO_COUNTS
;
3389 syscnt
->hdr
.length
= finalsize
;
3390 syscnt
->counts
.nstat_sysinfo_len
= countsize
;
3392 result
= ctl_enqueuedata(control
->ncs_kctl
,
3393 control
->ncs_unit
, syscnt
, finalsize
, CTL_DATA_EOR
);
3396 nstat_stats
.nstat_sysinfofailures
+= 1;
3398 OSFree(syscnt
, allocsize
, nstat_malloc_tag
);
3403 __private_extern__
void
3404 nstat_sysinfo_send_data(
3405 nstat_sysinfo_data
*data
)
3407 nstat_control_state
*control
;
3409 lck_mtx_lock(&nstat_mtx
);
3410 for (control
= nstat_controls
; control
; control
= control
->ncs_next
) {
3411 lck_mtx_lock(&control
->ncs_mtx
);
3412 if ((control
->ncs_flags
& NSTAT_FLAG_SYSINFO_SUBSCRIBED
) != 0) {
3413 nstat_sysinfo_send_data_internal(control
, data
);
3415 lck_mtx_unlock(&control
->ncs_mtx
);
3417 lck_mtx_unlock(&nstat_mtx
);
3421 nstat_sysinfo_generate_report(void)
3423 mbuf_report_peak_usage();
3425 nstat_ifnet_report_ecn_stats();
3426 nstat_ifnet_report_lim_stats();
3427 nstat_net_api_report_stats();
3430 #pragma mark -- net_api --
3432 static struct net_api_stats net_api_stats_before
;
3433 static u_int64_t net_api_stats_last_report_time
;
3436 nstat_net_api_report_stats(void)
3438 struct nstat_sysinfo_data data
;
3439 struct nstat_sysinfo_net_api_stats
*st
= &data
.u
.net_api_stats
;
3442 uptime
= net_uptime();
3444 if ((u_int32_t
)(uptime
- net_api_stats_last_report_time
) <
3445 net_api_stats_report_interval
)
3448 st
->report_interval
= uptime
- net_api_stats_last_report_time
;
3449 net_api_stats_last_report_time
= uptime
;
3451 data
.flags
= NSTAT_SYSINFO_NET_API_STATS
;
3452 data
.unsent_data_cnt
= 0;
3455 * Some of the fields in the report are the current value and
3456 * other fields are the delta from the last report:
3457 * - Report difference for the per flow counters as they increase
3459 * - Report current value for other counters as they tend not to change
3462 #define STATCOPY(f) \
3463 (st->net_api_stats.f = net_api_stats.f)
3464 #define STATDIFF(f) \
3465 (st->net_api_stats.f = net_api_stats.f - net_api_stats_before.f)
3467 STATCOPY(nas_iflt_attach_count
);
3468 STATCOPY(nas_iflt_attach_total
);
3469 STATCOPY(nas_iflt_attach_os_total
);
3471 STATCOPY(nas_ipf_add_count
);
3472 STATCOPY(nas_ipf_add_total
);
3473 STATCOPY(nas_ipf_add_os_total
);
3475 STATCOPY(nas_sfltr_register_count
);
3476 STATCOPY(nas_sfltr_register_total
);
3477 STATCOPY(nas_sfltr_register_os_total
);
3479 STATDIFF(nas_socket_alloc_total
);
3480 STATDIFF(nas_socket_in_kernel_total
);
3481 STATDIFF(nas_socket_in_kernel_os_total
);
3482 STATDIFF(nas_socket_necp_clientuuid_total
);
3484 STATDIFF(nas_socket_domain_local_total
);
3485 STATDIFF(nas_socket_domain_route_total
);
3486 STATDIFF(nas_socket_domain_inet_total
);
3487 STATDIFF(nas_socket_domain_inet6_total
);
3488 STATDIFF(nas_socket_domain_system_total
);
3489 STATDIFF(nas_socket_domain_multipath_total
);
3490 STATDIFF(nas_socket_domain_key_total
);
3491 STATDIFF(nas_socket_domain_ndrv_total
);
3492 STATDIFF(nas_socket_domain_other_total
);
3494 STATDIFF(nas_socket_inet_stream_total
);
3495 STATDIFF(nas_socket_inet_dgram_total
);
3496 STATDIFF(nas_socket_inet_dgram_connected
);
3497 STATDIFF(nas_socket_inet_dgram_dns
);
3498 STATDIFF(nas_socket_inet_dgram_no_data
);
3500 STATDIFF(nas_socket_inet6_stream_total
);
3501 STATDIFF(nas_socket_inet6_dgram_total
);
3502 STATDIFF(nas_socket_inet6_dgram_connected
);
3503 STATDIFF(nas_socket_inet6_dgram_dns
);
3504 STATDIFF(nas_socket_inet6_dgram_no_data
);
3506 STATDIFF(nas_socket_mcast_join_total
);
3507 STATDIFF(nas_socket_mcast_join_os_total
);
3509 STATDIFF(nas_sock_inet6_stream_exthdr_in
);
3510 STATDIFF(nas_sock_inet6_stream_exthdr_out
);
3511 STATDIFF(nas_sock_inet6_dgram_exthdr_in
);
3512 STATDIFF(nas_sock_inet6_dgram_exthdr_out
);
3514 STATDIFF(nas_nx_flow_inet_stream_total
);
3515 STATDIFF(nas_nx_flow_inet_dgram_total
);
3517 STATDIFF(nas_nx_flow_inet6_stream_total
);
3518 STATDIFF(nas_nx_flow_inet6_dgram_total
);
3520 STATCOPY(nas_ifnet_alloc_count
);
3521 STATCOPY(nas_ifnet_alloc_total
);
3522 STATCOPY(nas_ifnet_alloc_os_count
);
3523 STATCOPY(nas_ifnet_alloc_os_total
);
3525 STATCOPY(nas_pf_addrule_total
);
3526 STATCOPY(nas_pf_addrule_os
);
3528 STATCOPY(nas_vmnet_total
);
3533 nstat_sysinfo_send_data(&data
);
3536 * Save a copy of the current fields so we can diff them the next time
3538 memcpy(&net_api_stats_before
, &net_api_stats
,
3539 sizeof(struct net_api_stats
));
3540 _CASSERT(sizeof (net_api_stats_before
) == sizeof (net_api_stats
));
3544 #pragma mark -- Kernel Control Socket --
3546 static kern_ctl_ref nstat_ctlref
= NULL
;
3547 static lck_grp_t
*nstat_lck_grp
= NULL
;
3549 static errno_t
nstat_control_connect(kern_ctl_ref kctl
, struct sockaddr_ctl
*sac
, void **uinfo
);
3550 static errno_t
nstat_control_disconnect(kern_ctl_ref kctl
, u_int32_t unit
, void *uinfo
);
3551 static errno_t
nstat_control_send(kern_ctl_ref kctl
, u_int32_t unit
, void *uinfo
, mbuf_t m
, int flags
);
3554 nstat_enqueue_success(
3556 nstat_control_state
*state
,
3559 nstat_msg_hdr success
;
3562 bzero(&success
, sizeof(success
));
3563 success
.context
= context
;
3564 success
.type
= NSTAT_MSG_TYPE_SUCCESS
;
3565 success
.length
= sizeof(success
);
3566 success
.flags
= flags
;
3567 result
= ctl_enqueuedata(state
->ncs_kctl
, state
->ncs_unit
, &success
,
3568 sizeof(success
), CTL_DATA_EOR
| CTL_DATA_CRIT
);
3570 if (nstat_debug
!= 0)
3571 printf("%s: could not enqueue success message %d\n",
3573 nstat_stats
.nstat_successmsgfailures
+= 1;
3579 nstat_control_send_goodbye(
3580 nstat_control_state
*state
,
3586 if (nstat_control_reporting_allowed(state
, src
))
3588 if ((state
->ncs_flags
& NSTAT_FLAG_SUPPORTS_UPDATES
) != 0)
3590 result
= nstat_control_send_update(state
, src
, 0, NSTAT_MSG_HDR_FLAG_CLOSING
, NULL
);
3594 if (nstat_debug
!= 0)
3595 printf("%s - nstat_control_send_update() %d\n", __func__
, result
);
3600 // send one last counts notification
3601 result
= nstat_control_send_counts(state
, src
, 0, NSTAT_MSG_HDR_FLAG_CLOSING
, NULL
);
3605 if (nstat_debug
!= 0)
3606 printf("%s - nstat_control_send_counts() %d\n", __func__
, result
);
3609 // send a last description
3610 result
= nstat_control_send_description(state
, src
, 0, NSTAT_MSG_HDR_FLAG_CLOSING
);
3614 if (nstat_debug
!= 0)
3615 printf("%s - nstat_control_send_description() %d\n", __func__
, result
);
3620 // send the source removed notification
3621 result
= nstat_control_send_removed(state
, src
);
3622 if (result
!= 0 && nstat_debug
)
3625 if (nstat_debug
!= 0)
3626 printf("%s - nstat_control_send_removed() %d\n", __func__
, result
);
3630 nstat_stats
.nstat_control_send_goodbye_failures
++;
3637 nstat_flush_accumulated_msgs(
3638 nstat_control_state
*state
)
3641 if (state
->ncs_accumulated
!= NULL
&& mbuf_len(state
->ncs_accumulated
) > 0)
3643 mbuf_pkthdr_setlen(state
->ncs_accumulated
, mbuf_len(state
->ncs_accumulated
));
3644 result
= ctl_enqueuembuf(state
->ncs_kctl
, state
->ncs_unit
, state
->ncs_accumulated
, CTL_DATA_EOR
);
3647 nstat_stats
.nstat_flush_accumulated_msgs_failures
++;
3648 if (nstat_debug
!= 0)
3649 printf("%s - ctl_enqueuembuf failed: %d\n", __func__
, result
);
3650 mbuf_freem(state
->ncs_accumulated
);
3652 state
->ncs_accumulated
= NULL
;
3658 nstat_accumulate_msg(
3659 nstat_control_state
*state
,
3663 if (state
->ncs_accumulated
&& mbuf_trailingspace(state
->ncs_accumulated
) < length
)
3665 // Will send the current mbuf
3666 nstat_flush_accumulated_msgs(state
);
3671 if (state
->ncs_accumulated
== NULL
)
3673 unsigned int one
= 1;
3674 if (mbuf_allocpacket(MBUF_DONTWAIT
, NSTAT_MAX_MSG_SIZE
, &one
, &state
->ncs_accumulated
) != 0)
3676 if (nstat_debug
!= 0)
3677 printf("%s - mbuf_allocpacket failed\n", __func__
);
3682 mbuf_setlen(state
->ncs_accumulated
, 0);
3688 hdr
->length
= length
;
3689 result
= mbuf_copyback(state
->ncs_accumulated
, mbuf_len(state
->ncs_accumulated
),
3690 length
, hdr
, MBUF_DONTWAIT
);
3695 nstat_flush_accumulated_msgs(state
);
3696 if (nstat_debug
!= 0)
3697 printf("%s - resorting to ctl_enqueuedata\n", __func__
);
3698 result
= ctl_enqueuedata(state
->ncs_kctl
, state
->ncs_unit
, hdr
, length
, CTL_DATA_EOR
);
3702 nstat_stats
.nstat_accumulate_msg_failures
++;
3709 __unused thread_call_param_t p0
,
3710 __unused thread_call_param_t p1
)
3712 nstat_control_state
*control
;
3713 nstat_src
*src
, *tmpsrc
;
3714 tailq_head_nstat_src dead_list
;
3715 TAILQ_INIT(&dead_list
);
3717 lck_mtx_lock(&nstat_mtx
);
3719 nstat_idle_time
= 0;
3721 for (control
= nstat_controls
; control
; control
= control
->ncs_next
)
3723 lck_mtx_lock(&control
->ncs_mtx
);
3724 if (!(control
->ncs_flags
& NSTAT_FLAG_REQCOUNTS
))
3726 TAILQ_FOREACH_SAFE(src
, &control
->ncs_src_queue
, ns_control_link
, tmpsrc
)
3728 if (src
->provider
->nstat_gone(src
->cookie
))
3732 // Pull it off the list
3733 TAILQ_REMOVE(&control
->ncs_src_queue
, src
, ns_control_link
);
3735 result
= nstat_control_send_goodbye(control
, src
);
3737 // Put this on the list to release later
3738 TAILQ_INSERT_TAIL(&dead_list
, src
, ns_control_link
);
3742 control
->ncs_flags
&= ~NSTAT_FLAG_REQCOUNTS
;
3743 lck_mtx_unlock(&control
->ncs_mtx
);
3748 clock_interval_to_deadline(60, NSEC_PER_SEC
, &nstat_idle_time
);
3749 thread_call_func_delayed((thread_call_func_t
)nstat_idle_check
, NULL
, nstat_idle_time
);
3752 lck_mtx_unlock(&nstat_mtx
);
3754 /* Generate any system level reports, if needed */
3755 nstat_sysinfo_generate_report();
3757 // Release the sources now that we aren't holding lots of locks
3758 while ((src
= TAILQ_FIRST(&dead_list
)))
3760 TAILQ_REMOVE(&dead_list
, src
, ns_control_link
);
3761 nstat_control_cleanup_source(NULL
, src
, FALSE
);
3769 nstat_control_register(void)
3771 // Create our lock group first
3772 lck_grp_attr_t
*grp_attr
= lck_grp_attr_alloc_init();
3773 lck_grp_attr_setdefault(grp_attr
);
3774 nstat_lck_grp
= lck_grp_alloc_init("network statistics kctl", grp_attr
);
3775 lck_grp_attr_free(grp_attr
);
3777 lck_mtx_init(&nstat_mtx
, nstat_lck_grp
, NULL
);
3779 // Register the control
3780 struct kern_ctl_reg nstat_control
;
3781 bzero(&nstat_control
, sizeof(nstat_control
));
3782 strlcpy(nstat_control
.ctl_name
, NET_STAT_CONTROL_NAME
, sizeof(nstat_control
.ctl_name
));
3783 nstat_control
.ctl_flags
= CTL_FLAG_REG_EXTENDED
| CTL_FLAG_REG_CRIT
;
3784 nstat_control
.ctl_sendsize
= nstat_sendspace
;
3785 nstat_control
.ctl_recvsize
= nstat_recvspace
;
3786 nstat_control
.ctl_connect
= nstat_control_connect
;
3787 nstat_control
.ctl_disconnect
= nstat_control_disconnect
;
3788 nstat_control
.ctl_send
= nstat_control_send
;
3790 ctl_register(&nstat_control
, &nstat_ctlref
);
3794 nstat_control_cleanup_source(
3795 nstat_control_state
*state
,
3796 struct nstat_src
*src
,
3803 result
= nstat_control_send_removed(state
, src
);
3806 nstat_stats
.nstat_control_cleanup_source_failures
++;
3807 if (nstat_debug
!= 0)
3808 printf("%s - nstat_control_send_removed() %d\n",
3812 // Cleanup the source if we found it.
3813 src
->provider
->nstat_release(src
->cookie
, locked
);
3814 OSFree(src
, sizeof(*src
), nstat_malloc_tag
);
3819 nstat_control_reporting_allowed(
3820 nstat_control_state
*state
,
3823 if (src
->provider
->nstat_reporting_allowed
== NULL
)
3827 src
->provider
->nstat_reporting_allowed(src
->cookie
,
3828 &state
->ncs_provider_filters
[src
->provider
->nstat_provider_id
])
3834 nstat_control_connect(
3836 struct sockaddr_ctl
*sac
,
3839 nstat_control_state
*state
= OSMalloc(sizeof(*state
), nstat_malloc_tag
);
3840 if (state
== NULL
) return ENOMEM
;
3842 bzero(state
, sizeof(*state
));
3843 lck_mtx_init(&state
->ncs_mtx
, nstat_lck_grp
, NULL
);
3844 state
->ncs_kctl
= kctl
;
3845 state
->ncs_unit
= sac
->sc_unit
;
3846 state
->ncs_flags
= NSTAT_FLAG_REQCOUNTS
;
3849 lck_mtx_lock(&nstat_mtx
);
3850 state
->ncs_next
= nstat_controls
;
3851 nstat_controls
= state
;
3853 if (nstat_idle_time
== 0)
3855 clock_interval_to_deadline(60, NSEC_PER_SEC
, &nstat_idle_time
);
3856 thread_call_func_delayed((thread_call_func_t
)nstat_idle_check
, NULL
, nstat_idle_time
);
3859 lck_mtx_unlock(&nstat_mtx
);
3865 nstat_control_disconnect(
3866 __unused kern_ctl_ref kctl
,
3867 __unused u_int32_t unit
,
3871 nstat_control_state
*state
= (nstat_control_state
*)uinfo
;
3872 tailq_head_nstat_src cleanup_list
;
3875 TAILQ_INIT(&cleanup_list
);
3877 // pull it out of the global list of states
3878 lck_mtx_lock(&nstat_mtx
);
3879 nstat_control_state
**statepp
;
3880 for (statepp
= &nstat_controls
; *statepp
; statepp
= &(*statepp
)->ncs_next
)
3882 if (*statepp
== state
)
3884 *statepp
= state
->ncs_next
;
3888 lck_mtx_unlock(&nstat_mtx
);
3890 lck_mtx_lock(&state
->ncs_mtx
);
3891 // Stop watching for sources
3892 nstat_provider
*provider
;
3893 watching
= state
->ncs_watching
;
3894 state
->ncs_watching
= 0;
3895 for (provider
= nstat_providers
; provider
&& watching
; provider
= provider
->next
)
3897 if ((watching
& (1 << provider
->nstat_provider_id
)) != 0)
3899 watching
&= ~(1 << provider
->nstat_provider_id
);
3900 provider
->nstat_watcher_remove(state
);
3904 // set cleanup flags
3905 state
->ncs_flags
|= NSTAT_FLAG_CLEANUP
;
3907 if (state
->ncs_accumulated
)
3909 mbuf_freem(state
->ncs_accumulated
);
3910 state
->ncs_accumulated
= NULL
;
3913 // Copy out the list of sources
3914 TAILQ_CONCAT(&cleanup_list
, &state
->ncs_src_queue
, ns_control_link
);
3915 lck_mtx_unlock(&state
->ncs_mtx
);
3917 while ((src
= TAILQ_FIRST(&cleanup_list
)))
3919 TAILQ_REMOVE(&cleanup_list
, src
, ns_control_link
);
3920 nstat_control_cleanup_source(NULL
, src
, FALSE
);
3923 lck_mtx_destroy(&state
->ncs_mtx
, nstat_lck_grp
);
3924 OSFree(state
, sizeof(*state
), nstat_malloc_tag
);
3929 static nstat_src_ref_t
3930 nstat_control_next_src_ref(
3931 nstat_control_state
*state
)
3933 return ++state
->ncs_next_srcref
;
3937 nstat_control_send_counts(
3938 nstat_control_state
*state
,
3940 unsigned long long context
,
3941 u_int16_t hdr_flags
,
3944 nstat_msg_src_counts counts
;
3947 /* Some providers may not have any counts to send */
3948 if (src
->provider
->nstat_counts
== NULL
)
3951 bzero(&counts
, sizeof(counts
));
3952 counts
.hdr
.type
= NSTAT_MSG_TYPE_SRC_COUNTS
;
3953 counts
.hdr
.length
= sizeof(counts
);
3954 counts
.hdr
.flags
= hdr_flags
;
3955 counts
.hdr
.context
= context
;
3956 counts
.srcref
= src
->srcref
;
3957 counts
.event_flags
= 0;
3959 if (src
->provider
->nstat_counts(src
->cookie
, &counts
.counts
, gone
) == 0)
3961 if ((src
->filter
& NSTAT_FILTER_NOZEROBYTES
) &&
3962 counts
.counts
.nstat_rxbytes
== 0 &&
3963 counts
.counts
.nstat_txbytes
== 0)
3969 result
= ctl_enqueuedata(state
->ncs_kctl
,
3970 state
->ncs_unit
, &counts
, sizeof(counts
),
3973 nstat_stats
.nstat_sendcountfailures
+= 1;
3980 nstat_control_append_counts(
3981 nstat_control_state
*state
,
3985 /* Some providers may not have any counts to send */
3986 if (!src
->provider
->nstat_counts
) return 0;
3988 nstat_msg_src_counts counts
;
3989 bzero(&counts
, sizeof(counts
));
3990 counts
.hdr
.type
= NSTAT_MSG_TYPE_SRC_COUNTS
;
3991 counts
.hdr
.length
= sizeof(counts
);
3992 counts
.srcref
= src
->srcref
;
3993 counts
.event_flags
= 0;
3996 result
= src
->provider
->nstat_counts(src
->cookie
, &counts
.counts
, gone
);
4002 if ((src
->filter
& NSTAT_FILTER_NOZEROBYTES
) == NSTAT_FILTER_NOZEROBYTES
&&
4003 counts
.counts
.nstat_rxbytes
== 0 && counts
.counts
.nstat_txbytes
== 0)
4008 return nstat_accumulate_msg(state
, &counts
.hdr
, counts
.hdr
.length
);
4012 nstat_control_send_description(
4013 nstat_control_state
*state
,
4016 u_int16_t hdr_flags
)
4018 // Provider doesn't support getting the descriptor? Done.
4019 if (src
->provider
->nstat_descriptor_length
== 0 ||
4020 src
->provider
->nstat_copy_descriptor
== NULL
)
4025 // Allocate storage for the descriptor message
4027 unsigned int one
= 1;
4028 u_int32_t size
= offsetof(nstat_msg_src_description
, data
) + src
->provider
->nstat_descriptor_length
;
4029 if (mbuf_allocpacket(MBUF_DONTWAIT
, size
, &one
, &msg
) != 0)
4034 nstat_msg_src_description
*desc
= (nstat_msg_src_description
*)mbuf_data(msg
);
4036 mbuf_setlen(msg
, size
);
4037 mbuf_pkthdr_setlen(msg
, mbuf_len(msg
));
4039 // Query the provider for the provider specific bits
4040 errno_t result
= src
->provider
->nstat_copy_descriptor(src
->cookie
, desc
->data
, src
->provider
->nstat_descriptor_length
);
4048 desc
->hdr
.context
= context
;
4049 desc
->hdr
.type
= NSTAT_MSG_TYPE_SRC_DESC
;
4050 desc
->hdr
.length
= size
;
4051 desc
->hdr
.flags
= hdr_flags
;
4052 desc
->srcref
= src
->srcref
;
4053 desc
->event_flags
= 0;
4054 desc
->provider
= src
->provider
->nstat_provider_id
;
4056 result
= ctl_enqueuembuf(state
->ncs_kctl
, state
->ncs_unit
, msg
, CTL_DATA_EOR
);
4059 nstat_stats
.nstat_descriptionfailures
+= 1;
4067 nstat_control_append_description(
4068 nstat_control_state
*state
,
4071 size_t size
= offsetof(nstat_msg_src_description
, data
) + src
->provider
->nstat_descriptor_length
;
4072 if (size
> 512 || src
->provider
->nstat_descriptor_length
== 0 ||
4073 src
->provider
->nstat_copy_descriptor
== NULL
)
4078 // Fill out a buffer on the stack, we will copy to the mbuf later
4079 u_int64_t buffer
[size
/sizeof(u_int64_t
) + 1]; // u_int64_t to ensure alignment
4080 bzero(buffer
, size
);
4082 nstat_msg_src_description
*desc
= (nstat_msg_src_description
*)buffer
;
4083 desc
->hdr
.type
= NSTAT_MSG_TYPE_SRC_DESC
;
4084 desc
->hdr
.length
= size
;
4085 desc
->srcref
= src
->srcref
;
4086 desc
->event_flags
= 0;
4087 desc
->provider
= src
->provider
->nstat_provider_id
;
4090 // Fill in the description
4091 // Query the provider for the provider specific bits
4092 result
= src
->provider
->nstat_copy_descriptor(src
->cookie
, desc
->data
,
4093 src
->provider
->nstat_descriptor_length
);
4099 return nstat_accumulate_msg(state
, &desc
->hdr
, size
);
4103 nstat_control_send_update(
4104 nstat_control_state
*state
,
4107 u_int16_t hdr_flags
,
4110 // Provider doesn't support getting the descriptor or counts? Done.
4111 if ((src
->provider
->nstat_descriptor_length
== 0 ||
4112 src
->provider
->nstat_copy_descriptor
== NULL
) &&
4113 src
->provider
->nstat_counts
== NULL
)
4118 // Allocate storage for the descriptor message
4120 unsigned int one
= 1;
4121 u_int32_t size
= offsetof(nstat_msg_src_update
, data
) +
4122 src
->provider
->nstat_descriptor_length
;
4123 if (mbuf_allocpacket(MBUF_DONTWAIT
, size
, &one
, &msg
) != 0)
4128 nstat_msg_src_update
*desc
= (nstat_msg_src_update
*)mbuf_data(msg
);
4130 desc
->hdr
.context
= context
;
4131 desc
->hdr
.type
= NSTAT_MSG_TYPE_SRC_UPDATE
;
4132 desc
->hdr
.length
= size
;
4133 desc
->hdr
.flags
= hdr_flags
;
4134 desc
->srcref
= src
->srcref
;
4135 desc
->event_flags
= 0;
4136 desc
->provider
= src
->provider
->nstat_provider_id
;
4138 mbuf_setlen(msg
, size
);
4139 mbuf_pkthdr_setlen(msg
, mbuf_len(msg
));
4142 if (src
->provider
->nstat_descriptor_length
!= 0 && src
->provider
->nstat_copy_descriptor
)
4144 // Query the provider for the provider specific bits
4145 result
= src
->provider
->nstat_copy_descriptor(src
->cookie
, desc
->data
,
4146 src
->provider
->nstat_descriptor_length
);
4154 if (src
->provider
->nstat_counts
)
4156 result
= src
->provider
->nstat_counts(src
->cookie
, &desc
->counts
, gone
);
4159 if ((src
->filter
& NSTAT_FILTER_NOZEROBYTES
) == NSTAT_FILTER_NOZEROBYTES
&&
4160 desc
->counts
.nstat_rxbytes
== 0 && desc
->counts
.nstat_txbytes
== 0)
4166 result
= ctl_enqueuembuf(state
->ncs_kctl
, state
->ncs_unit
, msg
, CTL_DATA_EOR
);
4173 nstat_stats
.nstat_srcupatefailures
+= 1;
4181 nstat_control_append_update(
4182 nstat_control_state
*state
,
4186 size_t size
= offsetof(nstat_msg_src_update
, data
) + src
->provider
->nstat_descriptor_length
;
4187 if (size
> 512 || ((src
->provider
->nstat_descriptor_length
== 0 ||
4188 src
->provider
->nstat_copy_descriptor
== NULL
) &&
4189 src
->provider
->nstat_counts
== NULL
))
4194 // Fill out a buffer on the stack, we will copy to the mbuf later
4195 u_int64_t buffer
[size
/sizeof(u_int64_t
) + 1]; // u_int64_t to ensure alignment
4196 bzero(buffer
, size
);
4198 nstat_msg_src_update
*desc
= (nstat_msg_src_update
*)buffer
;
4199 desc
->hdr
.type
= NSTAT_MSG_TYPE_SRC_UPDATE
;
4200 desc
->hdr
.length
= size
;
4201 desc
->srcref
= src
->srcref
;
4202 desc
->event_flags
= 0;
4203 desc
->provider
= src
->provider
->nstat_provider_id
;
4206 // Fill in the description
4207 if (src
->provider
->nstat_descriptor_length
!= 0 && src
->provider
->nstat_copy_descriptor
)
4209 // Query the provider for the provider specific bits
4210 result
= src
->provider
->nstat_copy_descriptor(src
->cookie
, desc
->data
,
4211 src
->provider
->nstat_descriptor_length
);
4214 nstat_stats
.nstat_copy_descriptor_failures
++;
4215 if (nstat_debug
!= 0)
4216 printf("%s: src->provider->nstat_copy_descriptor: %d\n", __func__
, result
);
4221 if (src
->provider
->nstat_counts
)
4223 result
= src
->provider
->nstat_counts(src
->cookie
, &desc
->counts
, gone
);
4226 nstat_stats
.nstat_provider_counts_failures
++;
4227 if (nstat_debug
!= 0)
4228 printf("%s: src->provider->nstat_counts: %d\n", __func__
, result
);
4232 if ((src
->filter
& NSTAT_FILTER_NOZEROBYTES
) == NSTAT_FILTER_NOZEROBYTES
&&
4233 desc
->counts
.nstat_rxbytes
== 0 && desc
->counts
.nstat_txbytes
== 0)
4239 return nstat_accumulate_msg(state
, &desc
->hdr
, size
);
4243 nstat_control_send_removed(
4244 nstat_control_state
*state
,
4247 nstat_msg_src_removed removed
;
4250 bzero(&removed
, sizeof(removed
));
4251 removed
.hdr
.type
= NSTAT_MSG_TYPE_SRC_REMOVED
;
4252 removed
.hdr
.length
= sizeof(removed
);
4253 removed
.hdr
.context
= 0;
4254 removed
.srcref
= src
->srcref
;
4255 result
= ctl_enqueuedata(state
->ncs_kctl
, state
->ncs_unit
, &removed
,
4256 sizeof(removed
), CTL_DATA_EOR
| CTL_DATA_CRIT
);
4258 nstat_stats
.nstat_msgremovedfailures
+= 1;
4264 nstat_control_handle_add_request(
4265 nstat_control_state
*state
,
4270 // Verify the header fits in the first mbuf
4271 if (mbuf_len(m
) < offsetof(nstat_msg_add_src_req
, param
))
4276 // Calculate the length of the parameter field
4277 int32_t paramlength
= mbuf_pkthdr_len(m
) - offsetof(nstat_msg_add_src_req
, param
);
4278 if (paramlength
< 0 || paramlength
> 2 * 1024)
4283 nstat_provider
*provider
= NULL
;
4284 nstat_provider_cookie_t cookie
= NULL
;
4285 nstat_msg_add_src_req
*req
= mbuf_data(m
);
4286 if (mbuf_pkthdr_len(m
) > mbuf_len(m
))
4288 // parameter is too large, we need to make a contiguous copy
4289 void *data
= OSMalloc(paramlength
, nstat_malloc_tag
);
4291 if (!data
) return ENOMEM
;
4292 result
= mbuf_copydata(m
, offsetof(nstat_msg_add_src_req
, param
), paramlength
, data
);
4294 result
= nstat_lookup_entry(req
->provider
, data
, paramlength
, &provider
, &cookie
);
4295 OSFree(data
, paramlength
, nstat_malloc_tag
);
4299 result
= nstat_lookup_entry(req
->provider
, (void*)&req
->param
, paramlength
, &provider
, &cookie
);
4307 result
= nstat_control_source_add(req
->hdr
.context
, state
, provider
, cookie
);
4309 provider
->nstat_release(cookie
, 0);
4315 nstat_set_provider_filter(
4316 nstat_control_state
*state
,
4317 nstat_msg_add_all_srcs
*req
)
4319 nstat_provider_id_t provider_id
= req
->provider
;
4321 u_int32_t prev_ncs_watching
= atomic_or_32_ov(&state
->ncs_watching
, (1 << provider_id
));
4323 if ((prev_ncs_watching
& (1 << provider_id
)) != 0)
4326 state
->ncs_watching
|= (1 << provider_id
);
4327 state
->ncs_provider_filters
[provider_id
].npf_flags
= req
->filter
;
4328 state
->ncs_provider_filters
[provider_id
].npf_events
= req
->events
;
4329 state
->ncs_provider_filters
[provider_id
].npf_pid
= req
->target_pid
;
4330 uuid_copy(state
->ncs_provider_filters
[provider_id
].npf_uuid
, req
->target_uuid
);
4335 nstat_control_handle_add_all(
4336 nstat_control_state
*state
,
4341 // Verify the header fits in the first mbuf
4342 if (mbuf_len(m
) < sizeof(nstat_msg_add_all_srcs
))
4347 nstat_msg_add_all_srcs
*req
= mbuf_data(m
);
4348 if (req
->provider
> NSTAT_PROVIDER_LAST
) return ENOENT
;
4350 nstat_provider
*provider
= nstat_find_provider_by_id(req
->provider
);
4352 if (!provider
) return ENOENT
;
4353 if (provider
->nstat_watcher_add
== NULL
) return ENOTSUP
;
4355 if (nstat_privcheck
!= 0) {
4356 result
= priv_check_cred(kauth_cred_get(),
4357 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS
, 0);
4362 lck_mtx_lock(&state
->ncs_mtx
);
4363 if (req
->filter
& NSTAT_FILTER_SUPPRESS_SRC_ADDED
)
4365 // Suppression of source messages implicitly requires the use of update messages
4366 state
->ncs_flags
|= NSTAT_FLAG_SUPPORTS_UPDATES
;
4368 lck_mtx_unlock(&state
->ncs_mtx
);
4370 // rdar://problem/30301300 Different providers require different synchronization
4371 // to ensure that a new entry does not get double counted due to being added prior
4372 // to all current provider entries being added. Hence pass the provider the details
4373 // in the original request for this to be applied atomically
4375 result
= provider
->nstat_watcher_add(state
, req
);
4378 nstat_enqueue_success(req
->hdr
.context
, state
, 0);
4384 nstat_control_source_add(
4386 nstat_control_state
*state
,
4387 nstat_provider
*provider
,
4388 nstat_provider_cookie_t cookie
)
4390 // Fill out source added message if appropriate
4392 nstat_src_ref_t
*srcrefp
= NULL
;
4394 u_int64_t provider_filter_flagss
=
4395 state
->ncs_provider_filters
[provider
->nstat_provider_id
].npf_flags
;
4396 boolean_t tell_user
=
4397 ((provider_filter_flagss
& NSTAT_FILTER_SUPPRESS_SRC_ADDED
) == 0);
4398 u_int32_t src_filter
=
4399 (provider_filter_flagss
& NSTAT_FILTER_PROVIDER_NOZEROBYTES
)
4400 ? NSTAT_FILTER_NOZEROBYTES
: 0;
4402 if (provider_filter_flagss
& NSTAT_FILTER_TCP_NO_EARLY_CLOSE
)
4404 src_filter
|= NSTAT_FILTER_TCP_NO_EARLY_CLOSE
;
4409 unsigned int one
= 1;
4411 if (mbuf_allocpacket(MBUF_DONTWAIT
, sizeof(nstat_msg_src_added
),
4415 mbuf_setlen(msg
, sizeof(nstat_msg_src_added
));
4416 mbuf_pkthdr_setlen(msg
, mbuf_len(msg
));
4417 nstat_msg_src_added
*add
= mbuf_data(msg
);
4418 bzero(add
, sizeof(*add
));
4419 add
->hdr
.type
= NSTAT_MSG_TYPE_SRC_ADDED
;
4420 add
->hdr
.length
= mbuf_len(msg
);
4421 add
->hdr
.context
= context
;
4422 add
->provider
= provider
->nstat_provider_id
;
4423 srcrefp
= &add
->srcref
;
4426 // Allocate storage for the source
4427 nstat_src
*src
= OSMalloc(sizeof(*src
), nstat_malloc_tag
);
4430 if (msg
) mbuf_freem(msg
);
4434 // Fill in the source, including picking an unused source ref
4435 lck_mtx_lock(&state
->ncs_mtx
);
4437 src
->srcref
= nstat_control_next_src_ref(state
);
4439 *srcrefp
= src
->srcref
;
4441 if (state
->ncs_flags
& NSTAT_FLAG_CLEANUP
|| src
->srcref
== NSTAT_SRC_REF_INVALID
)
4443 lck_mtx_unlock(&state
->ncs_mtx
);
4444 OSFree(src
, sizeof(*src
), nstat_malloc_tag
);
4445 if (msg
) mbuf_freem(msg
);
4448 src
->provider
= provider
;
4449 src
->cookie
= cookie
;
4450 src
->filter
= src_filter
;
4455 // send the source added message if appropriate
4456 errno_t result
= ctl_enqueuembuf(state
->ncs_kctl
, state
->ncs_unit
, msg
,
4460 nstat_stats
.nstat_srcaddedfailures
+= 1;
4461 lck_mtx_unlock(&state
->ncs_mtx
);
4462 OSFree(src
, sizeof(*src
), nstat_malloc_tag
);
4467 // Put the source in the list
4468 TAILQ_INSERT_HEAD(&state
->ncs_src_queue
, src
, ns_control_link
);
4469 src
->ns_control
= state
;
4471 lck_mtx_unlock(&state
->ncs_mtx
);
4477 nstat_control_handle_remove_request(
4478 nstat_control_state
*state
,
4481 nstat_src_ref_t srcref
= NSTAT_SRC_REF_INVALID
;
4484 if (mbuf_copydata(m
, offsetof(nstat_msg_rem_src_req
, srcref
), sizeof(srcref
), &srcref
) != 0)
4489 lck_mtx_lock(&state
->ncs_mtx
);
4491 // Remove this source as we look for it
4492 TAILQ_FOREACH(src
, &state
->ncs_src_queue
, ns_control_link
)
4494 if (src
->srcref
== srcref
)
4501 TAILQ_REMOVE(&state
->ncs_src_queue
, src
, ns_control_link
);
4504 lck_mtx_unlock(&state
->ncs_mtx
);
4506 if (src
) nstat_control_cleanup_source(state
, src
, FALSE
);
4508 return src
? 0 : ENOENT
;
4512 nstat_control_handle_query_request(
4513 nstat_control_state
*state
,
4516 // TBD: handle this from another thread so we can enqueue a lot of data
4517 // As written, if a client requests query all, this function will be
4518 // called from their send of the request message. We will attempt to write
4519 // responses and succeed until the buffer fills up. Since the clients thread
4520 // is blocked on send, it won't be reading unless the client has two threads
4521 // using this socket, one for read and one for write. Two threads probably
4522 // won't work with this code anyhow since we don't have proper locking in
4524 tailq_head_nstat_src dead_list
;
4525 errno_t result
= ENOENT
;
4526 nstat_msg_query_src_req req
;
4528 if (mbuf_copydata(m
, 0, sizeof(req
), &req
) != 0)
4533 const boolean_t all_srcs
= (req
.srcref
== NSTAT_SRC_REF_ALL
);
4534 TAILQ_INIT(&dead_list
);
4536 lck_mtx_lock(&state
->ncs_mtx
);
4540 state
->ncs_flags
|= NSTAT_FLAG_REQCOUNTS
;
4542 nstat_src
*src
, *tmpsrc
;
4543 u_int64_t src_count
= 0;
4544 boolean_t partial
= FALSE
;
4547 * Error handling policy and sequence number generation is folded into
4548 * nstat_control_begin_query.
4550 partial
= nstat_control_begin_query(state
, &req
.hdr
);
4553 TAILQ_FOREACH_SAFE(src
, &state
->ncs_src_queue
, ns_control_link
, tmpsrc
)
4557 // XXX ignore IFACE types?
4558 if (all_srcs
|| src
->srcref
== req
.srcref
)
4560 if (nstat_control_reporting_allowed(state
, src
)
4561 && (!partial
|| !all_srcs
|| src
->seq
!= state
->ncs_seq
))
4564 (req
.hdr
.flags
& NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE
) != 0)
4566 result
= nstat_control_append_counts(state
, src
, &gone
);
4570 result
= nstat_control_send_counts(state
, src
, req
.hdr
.context
, 0, &gone
);
4573 if (ENOMEM
== result
|| ENOBUFS
== result
)
4576 * If the counts message failed to
4577 * enqueue then we should clear our flag so
4578 * that a client doesn't miss anything on
4579 * idle cleanup. We skip the "gone"
4580 * processing in the hope that we may
4581 * catch it another time.
4583 state
->ncs_flags
&= ~NSTAT_FLAG_REQCOUNTS
;
4589 * We skip over hard errors and
4592 src
->seq
= state
->ncs_seq
;
4600 // send one last descriptor message so client may see last state
4601 // If we can't send the notification now, it
4602 // will be sent in the idle cleanup.
4603 result
= nstat_control_send_description(state
, src
, 0, 0);
4606 nstat_stats
.nstat_control_send_description_failures
++;
4607 if (nstat_debug
!= 0)
4608 printf("%s - nstat_control_send_description() %d\n", __func__
, result
);
4609 state
->ncs_flags
&= ~NSTAT_FLAG_REQCOUNTS
;
4613 // pull src out of the list
4614 TAILQ_REMOVE(&state
->ncs_src_queue
, src
, ns_control_link
);
4615 TAILQ_INSERT_TAIL(&dead_list
, src
, ns_control_link
);
4620 if (src_count
>= QUERY_CONTINUATION_SRC_COUNT
)
4625 else if (req
.srcref
== src
->srcref
)
4631 nstat_flush_accumulated_msgs(state
);
4633 u_int16_t flags
= 0;
4634 if (req
.srcref
== NSTAT_SRC_REF_ALL
)
4635 flags
= nstat_control_end_query(state
, src
, partial
);
4637 lck_mtx_unlock(&state
->ncs_mtx
);
4640 * If an error occurred enqueueing data, then allow the error to
4641 * propagate to nstat_control_send. This way, the error is sent to
4644 if (all_srcs
&& ENOMEM
!= result
&& ENOBUFS
!= result
)
4646 nstat_enqueue_success(req
.hdr
.context
, state
, flags
);
4650 while ((src
= TAILQ_FIRST(&dead_list
)))
4652 TAILQ_REMOVE(&dead_list
, src
, ns_control_link
);
4653 nstat_control_cleanup_source(state
, src
, FALSE
);
4660 nstat_control_handle_get_src_description(
4661 nstat_control_state
*state
,
4664 nstat_msg_get_src_description req
;
4665 errno_t result
= ENOENT
;
4668 if (mbuf_copydata(m
, 0, sizeof(req
), &req
) != 0)
4673 lck_mtx_lock(&state
->ncs_mtx
);
4674 u_int64_t src_count
= 0;
4675 boolean_t partial
= FALSE
;
4676 const boolean_t all_srcs
= (req
.srcref
== NSTAT_SRC_REF_ALL
);
4679 * Error handling policy and sequence number generation is folded into
4680 * nstat_control_begin_query.
4682 partial
= nstat_control_begin_query(state
, &req
.hdr
);
4684 TAILQ_FOREACH(src
, &state
->ncs_src_queue
, ns_control_link
)
4686 if (all_srcs
|| src
->srcref
== req
.srcref
)
4688 if (nstat_control_reporting_allowed(state
, src
)
4689 && (!all_srcs
|| !partial
|| src
->seq
!= state
->ncs_seq
))
4691 if ((req
.hdr
.flags
& NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE
) != 0 && all_srcs
)
4693 result
= nstat_control_append_description(state
, src
);
4697 result
= nstat_control_send_description(state
, src
, req
.hdr
.context
, 0);
4700 if (ENOMEM
== result
|| ENOBUFS
== result
)
4703 * If the description message failed to
4704 * enqueue then we give up for now.
4711 * Note, we skip over hard errors and
4714 src
->seq
= state
->ncs_seq
;
4716 if (src_count
>= QUERY_CONTINUATION_SRC_COUNT
)
4729 nstat_flush_accumulated_msgs(state
);
4731 u_int16_t flags
= 0;
4732 if (req
.srcref
== NSTAT_SRC_REF_ALL
)
4733 flags
= nstat_control_end_query(state
, src
, partial
);
4735 lck_mtx_unlock(&state
->ncs_mtx
);
4737 * If an error occurred enqueueing data, then allow the error to
4738 * propagate to nstat_control_send. This way, the error is sent to
4741 if (all_srcs
&& ENOMEM
!= result
&& ENOBUFS
!= result
)
4743 nstat_enqueue_success(req
.hdr
.context
, state
, flags
);
4751 nstat_control_handle_set_filter(
4752 nstat_control_state
*state
,
4755 nstat_msg_set_filter req
;
4758 if (mbuf_copydata(m
, 0, sizeof(req
), &req
) != 0)
4760 if (req
.srcref
== NSTAT_SRC_REF_ALL
||
4761 req
.srcref
== NSTAT_SRC_REF_INVALID
)
4764 lck_mtx_lock(&state
->ncs_mtx
);
4765 TAILQ_FOREACH(src
, &state
->ncs_src_queue
, ns_control_link
)
4767 if (req
.srcref
== src
->srcref
)
4769 src
->filter
= req
.filter
;
4773 lck_mtx_unlock(&state
->ncs_mtx
);
4782 nstat_control_state
*state
,
4787 struct nstat_msg_error err
;
4789 bzero(&err
, sizeof(err
));
4790 err
.hdr
.type
= NSTAT_MSG_TYPE_ERROR
;
4791 err
.hdr
.length
= sizeof(err
);
4792 err
.hdr
.context
= context
;
4795 result
= ctl_enqueuedata(state
->ncs_kctl
, state
->ncs_unit
, &err
,
4796 sizeof(err
), CTL_DATA_EOR
| CTL_DATA_CRIT
);
4798 nstat_stats
.nstat_msgerrorfailures
++;
4802 nstat_control_begin_query(
4803 nstat_control_state
*state
,
4804 const nstat_msg_hdr
*hdrp
)
4806 boolean_t partial
= FALSE
;
4808 if (hdrp
->flags
& NSTAT_MSG_HDR_FLAG_CONTINUATION
)
4810 /* A partial query all has been requested. */
4813 if (state
->ncs_context
!= hdrp
->context
)
4815 if (state
->ncs_context
!= 0)
4816 nstat_send_error(state
, state
->ncs_context
, EAGAIN
);
4818 /* Initialize state for a partial query all. */
4819 state
->ncs_context
= hdrp
->context
;
4828 nstat_control_end_query(
4829 nstat_control_state
*state
,
4830 nstat_src
*last_src
,
4833 u_int16_t flags
= 0;
4835 if (last_src
== NULL
|| !partial
)
4838 * We iterated through the entire srcs list or exited early
4839 * from the loop when a partial update was not requested (an
4840 * error occurred), so clear context to indicate internally
4841 * that the query is finished.
4843 state
->ncs_context
= 0;
4848 * Indicate to userlevel to make another partial request as
4849 * there are still sources left to be reported.
4851 flags
|= NSTAT_MSG_HDR_FLAG_CONTINUATION
;
4858 nstat_control_handle_get_update(
4859 nstat_control_state
*state
,
4862 nstat_msg_query_src_req req
;
4864 if (mbuf_copydata(m
, 0, sizeof(req
), &req
) != 0)
4869 lck_mtx_lock(&state
->ncs_mtx
);
4871 state
->ncs_flags
|= NSTAT_FLAG_SUPPORTS_UPDATES
;
4873 errno_t result
= ENOENT
;
4874 nstat_src
*src
, *tmpsrc
;
4875 tailq_head_nstat_src dead_list
;
4876 u_int64_t src_count
= 0;
4877 boolean_t partial
= FALSE
;
4878 TAILQ_INIT(&dead_list
);
4881 * Error handling policy and sequence number generation is folded into
4882 * nstat_control_begin_query.
4884 partial
= nstat_control_begin_query(state
, &req
.hdr
);
4886 TAILQ_FOREACH_SAFE(src
, &state
->ncs_src_queue
, ns_control_link
, tmpsrc
)
4891 if (nstat_control_reporting_allowed(state
, src
))
4893 /* skip this source if it has the current state
4894 * sequence number as it's already been reported in
4895 * this query-all partial sequence. */
4896 if (req
.srcref
== NSTAT_SRC_REF_ALL
4897 && (FALSE
== partial
|| src
->seq
!= state
->ncs_seq
))
4899 result
= nstat_control_append_update(state
, src
, &gone
);
4900 if (ENOMEM
== result
|| ENOBUFS
== result
)
4903 * If the update message failed to
4904 * enqueue then give up.
4911 * We skip over hard errors and
4914 src
->seq
= state
->ncs_seq
;
4918 else if (src
->srcref
== req
.srcref
)
4920 result
= nstat_control_send_update(state
, src
, req
.hdr
.context
, 0, &gone
);
4926 // pull src out of the list
4927 TAILQ_REMOVE(&state
->ncs_src_queue
, src
, ns_control_link
);
4928 TAILQ_INSERT_TAIL(&dead_list
, src
, ns_control_link
);
4931 if (req
.srcref
!= NSTAT_SRC_REF_ALL
&& req
.srcref
== src
->srcref
)
4935 if (src_count
>= QUERY_CONTINUATION_SRC_COUNT
)
4941 nstat_flush_accumulated_msgs(state
);
4944 u_int16_t flags
= 0;
4945 if (req
.srcref
== NSTAT_SRC_REF_ALL
)
4946 flags
= nstat_control_end_query(state
, src
, partial
);
4948 lck_mtx_unlock(&state
->ncs_mtx
);
4950 * If an error occurred enqueueing data, then allow the error to
4951 * propagate to nstat_control_send. This way, the error is sent to
4954 if (req
.srcref
== NSTAT_SRC_REF_ALL
&& ENOMEM
!= result
&& ENOBUFS
!= result
)
4956 nstat_enqueue_success(req
.hdr
.context
, state
, flags
);
4960 while ((src
= TAILQ_FIRST(&dead_list
)))
4962 TAILQ_REMOVE(&dead_list
, src
, ns_control_link
);
4963 // release src and send notification
4964 nstat_control_cleanup_source(state
, src
, FALSE
);
4971 nstat_control_handle_subscribe_sysinfo(
4972 nstat_control_state
*state
)
4974 errno_t result
= priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS
, 0);
4981 lck_mtx_lock(&state
->ncs_mtx
);
4982 state
->ncs_flags
|= NSTAT_FLAG_SYSINFO_SUBSCRIBED
;
4983 lck_mtx_unlock(&state
->ncs_mtx
);
4996 nstat_control_state
*state
= (nstat_control_state
*)uinfo
;
4997 struct nstat_msg_hdr
*hdr
;
4998 struct nstat_msg_hdr storage
;
5001 if (mbuf_pkthdr_len(m
) < sizeof(*hdr
))
5003 // Is this the right thing to do?
5008 if (mbuf_len(m
) >= sizeof(*hdr
))
5014 mbuf_copydata(m
, 0, sizeof(storage
), &storage
);
5018 // Legacy clients may not set the length
5019 // Those clients are likely not setting the flags either
5020 // Fix everything up so old clients continue to work
5021 if (hdr
->length
!= mbuf_pkthdr_len(m
))
5024 hdr
->length
= mbuf_pkthdr_len(m
);
5025 if (hdr
== &storage
)
5027 mbuf_copyback(m
, 0, sizeof(*hdr
), hdr
, MBUF_DONTWAIT
);
5033 case NSTAT_MSG_TYPE_ADD_SRC
:
5034 result
= nstat_control_handle_add_request(state
, m
);
5037 case NSTAT_MSG_TYPE_ADD_ALL_SRCS
:
5038 result
= nstat_control_handle_add_all(state
, m
);
5041 case NSTAT_MSG_TYPE_REM_SRC
:
5042 result
= nstat_control_handle_remove_request(state
, m
);
5045 case NSTAT_MSG_TYPE_QUERY_SRC
:
5046 result
= nstat_control_handle_query_request(state
, m
);
5049 case NSTAT_MSG_TYPE_GET_SRC_DESC
:
5050 result
= nstat_control_handle_get_src_description(state
, m
);
5053 case NSTAT_MSG_TYPE_SET_FILTER
:
5054 result
= nstat_control_handle_set_filter(state
, m
);
5057 case NSTAT_MSG_TYPE_GET_UPDATE
:
5058 result
= nstat_control_handle_get_update(state
, m
);
5061 case NSTAT_MSG_TYPE_SUBSCRIBE_SYSINFO
:
5062 result
= nstat_control_handle_subscribe_sysinfo(state
);
5072 struct nstat_msg_error err
;
5074 bzero(&err
, sizeof(err
));
5075 err
.hdr
.type
= NSTAT_MSG_TYPE_ERROR
;
5076 err
.hdr
.length
= sizeof(err
) + mbuf_pkthdr_len(m
);
5077 err
.hdr
.context
= hdr
->context
;
5080 if (mbuf_prepend(&m
, sizeof(err
), MBUF_DONTWAIT
) == 0 &&
5081 mbuf_copyback(m
, 0, sizeof(err
), &err
, MBUF_DONTWAIT
) == 0)
5083 result
= ctl_enqueuembuf(kctl
, unit
, m
, CTL_DATA_EOR
| CTL_DATA_CRIT
);
5093 // Unable to prepend the error to the request - just send the error
5094 err
.hdr
.length
= sizeof(err
);
5095 result
= ctl_enqueuedata(kctl
, unit
, &err
, sizeof(err
),
5096 CTL_DATA_EOR
| CTL_DATA_CRIT
);
5098 nstat_stats
.nstat_msgerrorfailures
+= 1;
5100 nstat_stats
.nstat_handle_msg_failures
+= 1;
5103 if (m
) mbuf_freem(m
);