2 * Copyright (c) 2010-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/param.h>
30 #include <sys/types.h>
31 #include <sys/kpi_mbuf.h>
32 #include <sys/socket.h>
33 #include <sys/kern_control.h>
34 #include <sys/mcache.h>
35 #include <sys/socketvar.h>
36 #include <sys/sysctl.h>
37 #include <sys/queue.h>
39 #include <sys/protosw.h>
41 #include <kern/clock.h>
42 #include <kern/debug.h>
44 #include <libkern/libkern.h>
45 #include <libkern/OSAtomic.h>
46 #include <libkern/locks.h>
49 #include <net/if_var.h>
50 #include <net/if_types.h>
51 #include <net/route.h>
53 // These includes appear in ntstat.h but we include them here first so they won't trigger
54 // any clang diagnostic errors.
55 #include <netinet/in.h>
56 #include <netinet/in_stat.h>
57 #include <netinet/tcp.h>
59 #pragma clang diagnostic push
60 #pragma clang diagnostic error "-Wpadded"
61 #pragma clang diagnostic error "-Wpacked"
62 // This header defines structures shared with user space, so we need to ensure there is
63 // no compiler inserted padding in case the user space process isn't using the same
64 // architecture as the kernel (example: i386 process with x86_64 kernel).
65 #include <net/ntstat.h>
66 #pragma clang diagnostic pop
68 #include <netinet/ip_var.h>
69 #include <netinet/in_pcb.h>
70 #include <netinet/in_var.h>
71 #include <netinet/tcp_var.h>
72 #include <netinet/tcp_fsm.h>
73 #include <netinet/tcp_cc.h>
74 #include <netinet/udp.h>
75 #include <netinet/udp_var.h>
76 #include <netinet6/in6_pcb.h>
77 #include <netinet6/in6_var.h>
79 __private_extern__
int nstat_collect
= 1;
81 #if (DEBUG || DEVELOPMENT)
82 SYSCTL_INT(_net
, OID_AUTO
, statistics
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
83 &nstat_collect
, 0, "Collect detailed statistics");
84 #endif /* (DEBUG || DEVELOPMENT) */
86 #if !XNU_TARGET_OS_OSX
87 static int nstat_privcheck
= 1;
88 #else /* XNU_TARGET_OS_OSX */
89 static int nstat_privcheck
= 0;
90 #endif /* XNU_TARGET_OS_OSX */
91 SYSCTL_INT(_net
, OID_AUTO
, statistics_privcheck
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
92 &nstat_privcheck
, 0, "Entitlement check");
94 SYSCTL_NODE(_net
, OID_AUTO
, stats
,
95 CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "network statistics");
97 static int nstat_debug
= 0;
98 SYSCTL_INT(_net_stats
, OID_AUTO
, debug
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
101 static int nstat_sendspace
= 2048;
102 SYSCTL_INT(_net_stats
, OID_AUTO
, sendspace
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
103 &nstat_sendspace
, 0, "");
105 static int nstat_recvspace
= 8192;
106 SYSCTL_INT(_net_stats
, OID_AUTO
, recvspace
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
107 &nstat_recvspace
, 0, "");
109 static struct nstat_stats nstat_stats
;
110 SYSCTL_STRUCT(_net_stats
, OID_AUTO
, stats
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
111 &nstat_stats
, nstat_stats
, "");
113 static u_int32_t nstat_lim_interval
= 30 * 60; /* Report interval, seconds */
114 static u_int32_t nstat_lim_min_tx_pkts
= 100;
115 static u_int32_t nstat_lim_min_rx_pkts
= 100;
116 #if (DEBUG || DEVELOPMENT)
117 SYSCTL_INT(_net_stats
, OID_AUTO
, lim_report_interval
,
118 CTLFLAG_RW
| CTLFLAG_LOCKED
, &nstat_lim_interval
, 0,
119 "Low internet stat report interval");
121 SYSCTL_INT(_net_stats
, OID_AUTO
, lim_min_tx_pkts
,
122 CTLFLAG_RW
| CTLFLAG_LOCKED
, &nstat_lim_min_tx_pkts
, 0,
123 "Low Internet, min transmit packets threshold");
125 SYSCTL_INT(_net_stats
, OID_AUTO
, lim_min_rx_pkts
,
126 CTLFLAG_RW
| CTLFLAG_LOCKED
, &nstat_lim_min_rx_pkts
, 0,
127 "Low Internet, min receive packets threshold");
128 #endif /* DEBUG || DEVELOPMENT */
130 static struct net_api_stats net_api_stats_before
;
131 static u_int64_t net_api_stats_last_report_time
;
132 #define NET_API_STATS_REPORT_INTERVAL (12 * 60 * 60) /* 12 hours, in seconds */
133 static u_int32_t net_api_stats_report_interval
= NET_API_STATS_REPORT_INTERVAL
;
135 #if (DEBUG || DEVELOPMENT)
136 SYSCTL_UINT(_net_stats
, OID_AUTO
, api_report_interval
,
137 CTLFLAG_RW
| CTLFLAG_LOCKED
, &net_api_stats_report_interval
, 0, "");
138 #endif /* DEBUG || DEVELOPMENT */
141 NSTAT_FLAG_CLEANUP
= (1 << 0),
142 NSTAT_FLAG_REQCOUNTS
= (1 << 1),
143 NSTAT_FLAG_SUPPORTS_UPDATES
= (1 << 2),
144 NSTAT_FLAG_SYSINFO_SUBSCRIBED
= (1 << 3),
147 #if !XNU_TARGET_OS_OSX
148 #define QUERY_CONTINUATION_SRC_COUNT 50
149 #else /* XNU_TARGET_OS_OSX */
150 #define QUERY_CONTINUATION_SRC_COUNT 100
151 #endif /* XNU_TARGET_OS_OSX */
153 typedef TAILQ_HEAD(, nstat_src
) tailq_head_nstat_src
;
154 typedef TAILQ_ENTRY(nstat_src
) tailq_entry_nstat_src
;
156 typedef struct nstat_provider_filter
{
158 u_int64_t npf_events
;
161 } nstat_provider_filter
;
164 typedef struct nstat_control_state
{
165 struct nstat_control_state
*ncs_next
;
166 u_int32_t ncs_watching
;
167 decl_lck_mtx_data(, ncs_mtx
);
168 kern_ctl_ref ncs_kctl
;
170 nstat_src_ref_t ncs_next_srcref
;
171 tailq_head_nstat_src ncs_src_queue
;
172 mbuf_t ncs_accumulated
;
174 nstat_provider_filter ncs_provider_filters
[NSTAT_PROVIDER_COUNT
];
175 /* state maintained for partial query requests */
176 u_int64_t ncs_context
;
178 } nstat_control_state
;
180 typedef struct nstat_provider
{
181 struct nstat_provider
*next
;
182 nstat_provider_id_t nstat_provider_id
;
183 size_t nstat_descriptor_length
;
184 errno_t (*nstat_lookup
)(const void *data
, u_int32_t length
, nstat_provider_cookie_t
*out_cookie
);
185 int (*nstat_gone
)(nstat_provider_cookie_t cookie
);
186 errno_t (*nstat_counts
)(nstat_provider_cookie_t cookie
, struct nstat_counts
*out_counts
, int *out_gone
);
187 errno_t (*nstat_watcher_add
)(nstat_control_state
*state
, nstat_msg_add_all_srcs
*req
);
188 void (*nstat_watcher_remove
)(nstat_control_state
*state
);
189 errno_t (*nstat_copy_descriptor
)(nstat_provider_cookie_t cookie
, void *data
, size_t len
);
190 void (*nstat_release
)(nstat_provider_cookie_t cookie
, boolean_t locked
);
191 bool (*nstat_reporting_allowed
)(nstat_provider_cookie_t cookie
, nstat_provider_filter
*filter
);
194 typedef STAILQ_HEAD(, nstat_src
) stailq_head_nstat_src
;
195 typedef STAILQ_ENTRY(nstat_src
) stailq_entry_nstat_src
;
197 typedef TAILQ_HEAD(, nstat_tu_shadow
) tailq_head_tu_shadow
;
198 typedef TAILQ_ENTRY(nstat_tu_shadow
) tailq_entry_tu_shadow
;
200 typedef TAILQ_HEAD(, nstat_procdetails
) tailq_head_procdetails
;
201 typedef TAILQ_ENTRY(nstat_procdetails
) tailq_entry_procdetails
;
203 typedef struct nstat_src
{
204 tailq_entry_nstat_src ns_control_link
; // All sources for the nstat_control_state, for iterating over.
205 nstat_control_state
*ns_control
; // The nstat_control_state that this is a source for
206 nstat_src_ref_t srcref
;
207 nstat_provider
*provider
;
208 nstat_provider_cookie_t cookie
;
213 static errno_t
nstat_control_send_counts(nstat_control_state
*, nstat_src
*, unsigned long long, u_int16_t
, int *);
214 static int nstat_control_send_description(nstat_control_state
*state
, nstat_src
*src
, u_int64_t context
, u_int16_t hdr_flags
);
215 static int nstat_control_send_update(nstat_control_state
*state
, nstat_src
*src
, u_int64_t context
, u_int64_t event
, u_int16_t hdr_flags
, int *gone
);
216 static errno_t
nstat_control_send_removed(nstat_control_state
*, nstat_src
*);
217 static errno_t
nstat_control_send_goodbye(nstat_control_state
*state
, nstat_src
*src
);
218 static void nstat_control_cleanup_source(nstat_control_state
*state
, nstat_src
*src
, boolean_t
);
219 static bool nstat_control_reporting_allowed(nstat_control_state
*state
, nstat_src
*src
);
220 static boolean_t
nstat_control_begin_query(nstat_control_state
*state
, const nstat_msg_hdr
*hdrp
);
221 static u_int16_t
nstat_control_end_query(nstat_control_state
*state
, nstat_src
*last_src
, boolean_t partial
);
222 static void nstat_ifnet_report_ecn_stats(void);
223 static void nstat_ifnet_report_lim_stats(void);
224 static void nstat_net_api_report_stats(void);
225 static errno_t
nstat_set_provider_filter( nstat_control_state
*state
, nstat_msg_add_all_srcs
*req
);
226 static errno_t
nstat_control_send_event(nstat_control_state
*state
, nstat_src
*src
, u_int64_t event
);
228 static u_int32_t nstat_udp_watchers
= 0;
229 static u_int32_t nstat_tcp_watchers
= 0;
231 static void nstat_control_register(void);
234 * The lock order is as follows:
236 * socket_lock (inpcb)
240 static KALLOC_HEAP_DEFINE(KHEAP_NET_STAT
, NET_STAT_CONTROL_NAME
,
242 static nstat_control_state
*nstat_controls
= NULL
;
243 static uint64_t nstat_idle_time
= 0;
244 static decl_lck_mtx_data(, nstat_mtx
);
246 /* some extern definitions */
247 extern void mbuf_report_peak_usage(void);
248 extern void tcp_report_stats(void);
252 const struct sockaddr
*src
,
253 struct sockaddr
*dst
,
256 if (src
->sa_len
> maxlen
) {
260 bcopy(src
, dst
, src
->sa_len
);
261 if (src
->sa_family
== AF_INET6
&&
262 src
->sa_len
>= sizeof(struct sockaddr_in6
)) {
263 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)(void *)dst
;
264 if (IN6_IS_SCOPE_EMBED(&sin6
->sin6_addr
)) {
265 if (sin6
->sin6_scope_id
== 0) {
266 sin6
->sin6_scope_id
= ntohs(sin6
->sin6_addr
.s6_addr16
[1]);
268 sin6
->sin6_addr
.s6_addr16
[1] = 0;
274 nstat_ip_to_sockaddr(
275 const struct in_addr
*ip
,
277 struct sockaddr_in
*sin
,
280 if (maxlen
< sizeof(struct sockaddr_in
)) {
284 sin
->sin_family
= AF_INET
;
285 sin
->sin_len
= sizeof(*sin
);
286 sin
->sin_port
= port
;
291 nstat_ifnet_to_flags(
295 u_int32_t functional_type
= if_functional_type(ifp
, FALSE
);
297 /* Panic if someone adds a functional type without updating ntstat. */
298 VERIFY(0 <= functional_type
&& functional_type
<= IFRTYPE_FUNCTIONAL_LAST
);
300 switch (functional_type
) {
301 case IFRTYPE_FUNCTIONAL_UNKNOWN
:
302 flags
|= NSTAT_IFNET_IS_UNKNOWN_TYPE
;
304 case IFRTYPE_FUNCTIONAL_LOOPBACK
:
305 flags
|= NSTAT_IFNET_IS_LOOPBACK
;
307 case IFRTYPE_FUNCTIONAL_WIRED
:
308 case IFRTYPE_FUNCTIONAL_INTCOPROC
:
309 flags
|= NSTAT_IFNET_IS_WIRED
;
311 case IFRTYPE_FUNCTIONAL_WIFI_INFRA
:
312 flags
|= NSTAT_IFNET_IS_WIFI
;
314 case IFRTYPE_FUNCTIONAL_WIFI_AWDL
:
315 flags
|= NSTAT_IFNET_IS_WIFI
;
316 flags
|= NSTAT_IFNET_IS_AWDL
;
318 case IFRTYPE_FUNCTIONAL_CELLULAR
:
319 flags
|= NSTAT_IFNET_IS_CELLULAR
;
321 case IFRTYPE_FUNCTIONAL_COMPANIONLINK
:
322 flags
|= NSTAT_IFNET_IS_COMPANIONLINK
;
326 if (IFNET_IS_EXPENSIVE(ifp
)) {
327 flags
|= NSTAT_IFNET_IS_EXPENSIVE
;
329 if (IFNET_IS_CONSTRAINED(ifp
)) {
330 flags
|= NSTAT_IFNET_IS_CONSTRAINED
;
337 nstat_inpcb_to_flags(
338 const struct inpcb
*inp
)
343 if (inp
->inp_last_outifp
!= NULL
) {
344 struct ifnet
*ifp
= inp
->inp_last_outifp
;
345 flags
= nstat_ifnet_to_flags(ifp
);
347 struct tcpcb
*tp
= intotcpcb(inp
);
349 if (tp
->t_flags
& TF_LOCAL
) {
350 flags
|= NSTAT_IFNET_IS_LOCAL
;
352 flags
|= NSTAT_IFNET_IS_NON_LOCAL
;
356 flags
= NSTAT_IFNET_IS_UNKNOWN_TYPE
;
358 if (inp
->inp_socket
!= NULL
&&
359 (inp
->inp_socket
->so_flags1
& SOF1_CELLFALLBACK
)) {
360 flags
|= NSTAT_IFNET_VIA_CELLFALLBACK
;
366 #pragma mark -- Network Statistic Providers --
368 static errno_t
nstat_control_source_add(u_int64_t context
, nstat_control_state
*state
, nstat_provider
*provider
, nstat_provider_cookie_t cookie
);
369 struct nstat_provider
*nstat_providers
= NULL
;
371 static struct nstat_provider
*
372 nstat_find_provider_by_id(
373 nstat_provider_id_t id
)
375 struct nstat_provider
*provider
;
377 for (provider
= nstat_providers
; provider
!= NULL
; provider
= provider
->next
) {
378 if (provider
->nstat_provider_id
== id
) {
388 nstat_provider_id_t id
,
391 nstat_provider
**out_provider
,
392 nstat_provider_cookie_t
*out_cookie
)
394 *out_provider
= nstat_find_provider_by_id(id
);
395 if (*out_provider
== NULL
) {
399 return (*out_provider
)->nstat_lookup(data
, length
, out_cookie
);
402 static void nstat_init_route_provider(void);
403 static void nstat_init_tcp_provider(void);
404 static void nstat_init_udp_provider(void);
405 static void nstat_init_ifnet_provider(void);
407 __private_extern__
void
410 nstat_init_route_provider();
411 nstat_init_tcp_provider();
412 nstat_init_udp_provider();
413 nstat_init_ifnet_provider();
414 nstat_control_register();
417 #pragma mark -- Aligned Buffer Allocation --
419 struct align_header
{
425 nstat_malloc_aligned(
428 zalloc_flags_t flags
)
430 struct align_header
*hdr
= NULL
;
431 size_t size
= length
+ sizeof(*hdr
) + alignment
- 1;
433 // Arbitrary limit to prevent abuse
434 if (length
> (64 * 1024)) {
437 u_int8_t
*buffer
= kheap_alloc(KHEAP_NET_STAT
, size
, flags
);
438 if (buffer
== NULL
) {
442 u_int8_t
*aligned
= buffer
+ sizeof(*hdr
);
443 aligned
= (u_int8_t
*)P2ROUNDUP(aligned
, alignment
);
445 hdr
= (struct align_header
*)(void *)(aligned
- sizeof(*hdr
));
446 hdr
->offset
= aligned
- buffer
;
456 struct align_header
*hdr
= (struct align_header
*)(void *)((u_int8_t
*)buffer
- sizeof(*hdr
));
457 (kheap_free
)(KHEAP_NET_STAT
, (char *)buffer
- hdr
->offset
, hdr
->length
);
460 #pragma mark -- Route Provider --
462 static nstat_provider nstat_route_provider
;
468 nstat_provider_cookie_t
*out_cookie
)
470 // rt_lookup doesn't take const params but it doesn't modify the parameters for
471 // the lookup. So...we use a union to eliminate the warning.
474 const struct sockaddr
*const_sa
;
477 const nstat_route_add_param
*param
= (const nstat_route_add_param
*)data
;
480 if (length
< sizeof(*param
)) {
484 if (param
->dst
.v4
.sin_family
== 0 ||
485 param
->dst
.v4
.sin_family
> AF_MAX
||
486 (param
->mask
.v4
.sin_family
!= 0 && param
->mask
.v4
.sin_family
!= param
->dst
.v4
.sin_family
)) {
490 if (param
->dst
.v4
.sin_len
> sizeof(param
->dst
) ||
491 (param
->mask
.v4
.sin_family
&& param
->mask
.v4
.sin_len
> sizeof(param
->mask
.v4
.sin_len
))) {
494 if ((param
->dst
.v4
.sin_family
== AF_INET
&&
495 param
->dst
.v4
.sin_len
< sizeof(struct sockaddr_in
)) ||
496 (param
->dst
.v6
.sin6_family
== AF_INET6
&&
497 param
->dst
.v6
.sin6_len
< sizeof(struct sockaddr_in6
))) {
501 dst
.const_sa
= (const struct sockaddr
*)¶m
->dst
;
502 mask
.const_sa
= param
->mask
.v4
.sin_family
? (const struct sockaddr
*)¶m
->mask
: NULL
;
504 struct radix_node_head
*rnh
= rt_tables
[dst
.sa
->sa_family
];
509 lck_mtx_lock(rnh_lock
);
510 struct rtentry
*rt
= rt_lookup(TRUE
, dst
.sa
, mask
.sa
, rnh
, param
->ifindex
);
511 lck_mtx_unlock(rnh_lock
);
514 *out_cookie
= (nstat_provider_cookie_t
)rt
;
517 return rt
? 0 : ENOENT
;
522 nstat_provider_cookie_t cookie
)
524 struct rtentry
*rt
= (struct rtentry
*)cookie
;
525 return ((rt
->rt_flags
& RTF_UP
) == 0) ? 1 : 0;
530 nstat_provider_cookie_t cookie
,
531 struct nstat_counts
*out_counts
,
534 struct rtentry
*rt
= (struct rtentry
*)cookie
;
535 struct nstat_counts
*rt_stats
= rt
->rt_stats
;
541 if (out_gone
&& (rt
->rt_flags
& RTF_UP
) == 0) {
546 atomic_get_64(out_counts
->nstat_rxpackets
, &rt_stats
->nstat_rxpackets
);
547 atomic_get_64(out_counts
->nstat_rxbytes
, &rt_stats
->nstat_rxbytes
);
548 atomic_get_64(out_counts
->nstat_txpackets
, &rt_stats
->nstat_txpackets
);
549 atomic_get_64(out_counts
->nstat_txbytes
, &rt_stats
->nstat_txbytes
);
550 out_counts
->nstat_rxduplicatebytes
= rt_stats
->nstat_rxduplicatebytes
;
551 out_counts
->nstat_rxoutoforderbytes
= rt_stats
->nstat_rxoutoforderbytes
;
552 out_counts
->nstat_txretransmit
= rt_stats
->nstat_txretransmit
;
553 out_counts
->nstat_connectattempts
= rt_stats
->nstat_connectattempts
;
554 out_counts
->nstat_connectsuccesses
= rt_stats
->nstat_connectsuccesses
;
555 out_counts
->nstat_min_rtt
= rt_stats
->nstat_min_rtt
;
556 out_counts
->nstat_avg_rtt
= rt_stats
->nstat_avg_rtt
;
557 out_counts
->nstat_var_rtt
= rt_stats
->nstat_var_rtt
;
558 out_counts
->nstat_cell_rxbytes
= out_counts
->nstat_cell_txbytes
= 0;
560 bzero(out_counts
, sizeof(*out_counts
));
568 nstat_provider_cookie_t cookie
,
571 rtfree((struct rtentry
*)cookie
);
574 static u_int32_t nstat_route_watchers
= 0;
577 nstat_route_walktree_add(
578 struct radix_node
*rn
,
582 struct rtentry
*rt
= (struct rtentry
*)rn
;
583 nstat_control_state
*state
= (nstat_control_state
*)context
;
585 LCK_MTX_ASSERT(rnh_lock
, LCK_MTX_ASSERT_OWNED
);
587 /* RTF_UP can't change while rnh_lock is held */
588 if ((rt
->rt_flags
& RTF_UP
) != 0) {
589 /* Clear RTPRF_OURS if the route is still usable */
591 if (rt_validate(rt
)) {
592 RT_ADDREF_LOCKED(rt
);
599 /* Otherwise if RTF_CONDEMNED, treat it as if it were down */
604 result
= nstat_control_source_add(0, state
, &nstat_route_provider
, rt
);
614 nstat_route_add_watcher(
615 nstat_control_state
*state
,
616 nstat_msg_add_all_srcs
*req
)
621 lck_mtx_lock(rnh_lock
);
623 result
= nstat_set_provider_filter(state
, req
);
625 OSIncrementAtomic(&nstat_route_watchers
);
627 for (i
= 1; i
< AF_MAX
; i
++) {
628 struct radix_node_head
*rnh
;
634 result
= rnh
->rnh_walktree(rnh
, nstat_route_walktree_add
, state
);
636 // This is probably resource exhaustion.
637 // There currently isn't a good way to recover from this.
638 // Least bad seems to be to give up on the add-all but leave
639 // the watcher in place.
644 lck_mtx_unlock(rnh_lock
);
649 __private_extern__
void
650 nstat_route_new_entry(
653 if (nstat_route_watchers
== 0) {
657 lck_mtx_lock(&nstat_mtx
);
658 if ((rt
->rt_flags
& RTF_UP
) != 0) {
659 nstat_control_state
*state
;
660 for (state
= nstat_controls
; state
; state
= state
->ncs_next
) {
661 if ((state
->ncs_watching
& (1 << NSTAT_PROVIDER_ROUTE
)) != 0) {
662 // this client is watching routes
663 // acquire a reference for the route
666 // add the source, if that fails, release the reference
667 if (nstat_control_source_add(0, state
, &nstat_route_provider
, rt
) != 0) {
673 lck_mtx_unlock(&nstat_mtx
);
677 nstat_route_remove_watcher(
678 __unused nstat_control_state
*state
)
680 OSDecrementAtomic(&nstat_route_watchers
);
684 nstat_route_copy_descriptor(
685 nstat_provider_cookie_t cookie
,
689 nstat_route_descriptor
*desc
= (nstat_route_descriptor
*)data
;
690 if (len
< sizeof(*desc
)) {
693 bzero(desc
, sizeof(*desc
));
695 struct rtentry
*rt
= (struct rtentry
*)cookie
;
696 desc
->id
= (uint64_t)VM_KERNEL_ADDRPERM(rt
);
697 desc
->parent_id
= (uint64_t)VM_KERNEL_ADDRPERM(rt
->rt_parent
);
698 desc
->gateway_id
= (uint64_t)VM_KERNEL_ADDRPERM(rt
->rt_gwroute
);
703 if ((sa
= rt_key(rt
))) {
704 nstat_copy_sa_out(sa
, &desc
->dst
.sa
, sizeof(desc
->dst
));
708 if ((sa
= rt_mask(rt
)) && sa
->sa_len
<= sizeof(desc
->mask
)) {
709 memcpy(&desc
->mask
, sa
, sa
->sa_len
);
713 if ((sa
= rt
->rt_gateway
)) {
714 nstat_copy_sa_out(sa
, &desc
->gateway
.sa
, sizeof(desc
->gateway
));
718 desc
->ifindex
= rt
->rt_ifp
->if_index
;
721 desc
->flags
= rt
->rt_flags
;
727 nstat_route_reporting_allowed(nstat_provider_cookie_t cookie
, nstat_provider_filter
*filter
)
731 if ((filter
->npf_flags
& NSTAT_FILTER_IFNET_FLAGS
) != 0) {
732 struct rtentry
*rt
= (struct rtentry
*)cookie
;
733 struct ifnet
*ifp
= rt
->rt_ifp
;
736 uint16_t interface_properties
= nstat_ifnet_to_flags(ifp
);
738 if ((filter
->npf_flags
& interface_properties
) == 0) {
747 nstat_init_route_provider(void)
749 bzero(&nstat_route_provider
, sizeof(nstat_route_provider
));
750 nstat_route_provider
.nstat_descriptor_length
= sizeof(nstat_route_descriptor
);
751 nstat_route_provider
.nstat_provider_id
= NSTAT_PROVIDER_ROUTE
;
752 nstat_route_provider
.nstat_lookup
= nstat_route_lookup
;
753 nstat_route_provider
.nstat_gone
= nstat_route_gone
;
754 nstat_route_provider
.nstat_counts
= nstat_route_counts
;
755 nstat_route_provider
.nstat_release
= nstat_route_release
;
756 nstat_route_provider
.nstat_watcher_add
= nstat_route_add_watcher
;
757 nstat_route_provider
.nstat_watcher_remove
= nstat_route_remove_watcher
;
758 nstat_route_provider
.nstat_copy_descriptor
= nstat_route_copy_descriptor
;
759 nstat_route_provider
.nstat_reporting_allowed
= nstat_route_reporting_allowed
;
760 nstat_route_provider
.next
= nstat_providers
;
761 nstat_providers
= &nstat_route_provider
;
764 #pragma mark -- Route Collection --
766 __private_extern__
struct nstat_counts
*
770 struct nstat_counts
*result
= rte
->rt_stats
;
775 result
= nstat_malloc_aligned(sizeof(*result
), sizeof(u_int64_t
),
781 if (!OSCompareAndSwapPtr(NULL
, result
, &rte
->rt_stats
)) {
782 nstat_free_aligned(result
);
783 result
= rte
->rt_stats
;
789 __private_extern__
void
794 nstat_free_aligned(rte
->rt_stats
);
795 rte
->rt_stats
= NULL
;
799 __private_extern__
void
800 nstat_route_connect_attempt(
804 struct nstat_counts
* stats
= nstat_route_attach(rte
);
806 OSIncrementAtomic(&stats
->nstat_connectattempts
);
809 rte
= rte
->rt_parent
;
813 __private_extern__
void
814 nstat_route_connect_success(
819 struct nstat_counts
* stats
= nstat_route_attach(rte
);
821 OSIncrementAtomic(&stats
->nstat_connectsuccesses
);
824 rte
= rte
->rt_parent
;
828 __private_extern__
void
836 struct nstat_counts
* stats
= nstat_route_attach(rte
);
838 if ((flags
& NSTAT_TX_FLAG_RETRANSMIT
) != 0) {
839 OSAddAtomic(bytes
, &stats
->nstat_txretransmit
);
841 OSAddAtomic64((SInt64
)packets
, (SInt64
*)&stats
->nstat_txpackets
);
842 OSAddAtomic64((SInt64
)bytes
, (SInt64
*)&stats
->nstat_txbytes
);
846 rte
= rte
->rt_parent
;
850 __private_extern__
void
858 struct nstat_counts
* stats
= nstat_route_attach(rte
);
861 OSAddAtomic64((SInt64
)packets
, (SInt64
*)&stats
->nstat_rxpackets
);
862 OSAddAtomic64((SInt64
)bytes
, (SInt64
*)&stats
->nstat_rxbytes
);
864 if (flags
& NSTAT_RX_FLAG_OUT_OF_ORDER
) {
865 OSAddAtomic(bytes
, &stats
->nstat_rxoutoforderbytes
);
867 if (flags
& NSTAT_RX_FLAG_DUPLICATE
) {
868 OSAddAtomic(bytes
, &stats
->nstat_rxduplicatebytes
);
873 rte
= rte
->rt_parent
;
877 /* atomically average current value at _val_addr with _new_val and store */
878 #define NSTAT_EWMA_ATOMIC(_val_addr, _new_val, _decay) do { \
879 volatile uint32_t _old_val; \
880 volatile uint32_t _avg; \
882 _old_val = *_val_addr; \
889 _avg = _old_val - (_old_val >> _decay) + (_new_val >> _decay); \
891 if (_old_val == _avg) break; \
892 } while (!OSCompareAndSwap(_old_val, _avg, _val_addr)); \
895 /* atomically compute minimum of current value at _val_addr with _new_val and store */
896 #define NSTAT_MIN_ATOMIC(_val_addr, _new_val) do { \
897 volatile uint32_t _old_val; \
899 _old_val = *_val_addr; \
900 if (_old_val != 0 && _old_val < _new_val) \
904 } while (!OSCompareAndSwap(_old_val, _new_val, _val_addr)); \
907 __private_extern__
void
913 const uint32_t decay
= 3;
916 struct nstat_counts
* stats
= nstat_route_attach(rte
);
918 NSTAT_EWMA_ATOMIC(&stats
->nstat_avg_rtt
, rtt
, decay
);
919 NSTAT_MIN_ATOMIC(&stats
->nstat_min_rtt
, rtt
);
920 NSTAT_EWMA_ATOMIC(&stats
->nstat_var_rtt
, rtt_var
, decay
);
922 rte
= rte
->rt_parent
;
926 __private_extern__
void
929 uint32_t connect_attempts
,
930 uint32_t connect_successes
,
933 uint32_t rx_duplicatebytes
,
934 uint32_t rx_outoforderbytes
,
937 uint32_t tx_retransmit
,
941 const uint32_t decay
= 3;
944 struct nstat_counts
* stats
= nstat_route_attach(rte
);
946 OSAddAtomic(connect_attempts
, &stats
->nstat_connectattempts
);
947 OSAddAtomic(connect_successes
, &stats
->nstat_connectsuccesses
);
948 OSAddAtomic64((SInt64
)tx_packets
, (SInt64
*)&stats
->nstat_txpackets
);
949 OSAddAtomic64((SInt64
)tx_bytes
, (SInt64
*)&stats
->nstat_txbytes
);
950 OSAddAtomic(tx_retransmit
, &stats
->nstat_txretransmit
);
951 OSAddAtomic64((SInt64
)rx_packets
, (SInt64
*)&stats
->nstat_rxpackets
);
952 OSAddAtomic64((SInt64
)rx_bytes
, (SInt64
*)&stats
->nstat_rxbytes
);
953 OSAddAtomic(rx_outoforderbytes
, &stats
->nstat_rxoutoforderbytes
);
954 OSAddAtomic(rx_duplicatebytes
, &stats
->nstat_rxduplicatebytes
);
957 NSTAT_EWMA_ATOMIC(&stats
->nstat_avg_rtt
, rtt
, decay
);
958 NSTAT_MIN_ATOMIC(&stats
->nstat_min_rtt
, rtt
);
959 NSTAT_EWMA_ATOMIC(&stats
->nstat_var_rtt
, rtt_var
, decay
);
962 rte
= rte
->rt_parent
;
966 #pragma mark -- TCP Kernel Provider --
969 * Due to the way the kernel deallocates a process (the process structure
970 * might be gone by the time we get the PCB detach notification),
971 * we need to cache the process name. Without this, proc_name() would
972 * return null and the process name would never be sent to userland.
974 * For UDP sockets, we also store the cached the connection tuples along with
975 * the interface index. This is necessary because when UDP sockets are
976 * disconnected, the connection tuples are forever lost from the inpcb, thus
977 * we need to keep track of the last call to connect() in ntstat.
979 struct nstat_tucookie
{
981 char pname
[MAXCOMLEN
+ 1];
984 struct sockaddr_in v4
;
985 struct sockaddr_in6 v6
;
988 struct sockaddr_in v4
;
989 struct sockaddr_in6 v6
;
991 unsigned int if_index
;
992 uint16_t ifnet_properties
;
995 static struct nstat_tucookie
*
996 nstat_tucookie_alloc_internal(
1001 struct nstat_tucookie
*cookie
;
1003 cookie
= kheap_alloc(KHEAP_NET_STAT
, sizeof(*cookie
), Z_WAITOK
);
1004 if (cookie
== NULL
) {
1008 LCK_MTX_ASSERT(&nstat_mtx
, LCK_MTX_ASSERT_NOTOWNED
);
1010 if (ref
&& in_pcb_checkstate(inp
, WNT_ACQUIRE
, locked
) == WNT_STOPUSING
) {
1011 kheap_free(KHEAP_NET_STAT
, cookie
, sizeof(*cookie
));
1014 bzero(cookie
, sizeof(*cookie
));
1016 proc_name(inp
->inp_socket
->last_pid
, cookie
->pname
,
1017 sizeof(cookie
->pname
));
1019 * We only increment the reference count for UDP sockets because we
1020 * only cache UDP socket tuples.
1022 if (SOCK_PROTO(inp
->inp_socket
) == IPPROTO_UDP
) {
1023 OSIncrementAtomic(&inp
->inp_nstat_refcnt
);
1029 static struct nstat_tucookie
*
1030 nstat_tucookie_alloc(
1033 return nstat_tucookie_alloc_internal(inp
, false, false);
1036 static struct nstat_tucookie
*
1037 nstat_tucookie_alloc_ref(
1040 return nstat_tucookie_alloc_internal(inp
, true, false);
1043 static struct nstat_tucookie
*
1044 nstat_tucookie_alloc_ref_locked(
1047 return nstat_tucookie_alloc_internal(inp
, true, true);
1051 nstat_tucookie_release_internal(
1052 struct nstat_tucookie
*cookie
,
1055 if (SOCK_PROTO(cookie
->inp
->inp_socket
) == IPPROTO_UDP
) {
1056 OSDecrementAtomic(&cookie
->inp
->inp_nstat_refcnt
);
1058 in_pcb_checkstate(cookie
->inp
, WNT_RELEASE
, inplock
);
1059 kheap_free(KHEAP_NET_STAT
, cookie
, sizeof(*cookie
));
1063 nstat_tucookie_release(
1064 struct nstat_tucookie
*cookie
)
1066 nstat_tucookie_release_internal(cookie
, false);
1070 nstat_tucookie_release_locked(
1071 struct nstat_tucookie
*cookie
)
1073 nstat_tucookie_release_internal(cookie
, true);
1077 static nstat_provider nstat_tcp_provider
;
1080 nstat_tcpudp_lookup(
1081 struct inpcbinfo
*inpinfo
,
1084 nstat_provider_cookie_t
*out_cookie
)
1086 struct inpcb
*inp
= NULL
;
1088 // parameter validation
1089 const nstat_tcp_add_param
*param
= (const nstat_tcp_add_param
*)data
;
1090 if (length
< sizeof(*param
)) {
1094 // src and dst must match
1095 if (param
->remote
.v4
.sin_family
!= 0 &&
1096 param
->remote
.v4
.sin_family
!= param
->local
.v4
.sin_family
) {
1101 switch (param
->local
.v4
.sin_family
) {
1104 if (param
->local
.v4
.sin_len
!= sizeof(param
->local
.v4
) ||
1105 (param
->remote
.v4
.sin_family
!= 0 &&
1106 param
->remote
.v4
.sin_len
!= sizeof(param
->remote
.v4
))) {
1110 inp
= in_pcblookup_hash(inpinfo
, param
->remote
.v4
.sin_addr
, param
->remote
.v4
.sin_port
,
1111 param
->local
.v4
.sin_addr
, param
->local
.v4
.sin_port
, 1, NULL
);
1118 const struct in6_addr
*in6c
;
1119 struct in6_addr
*in6
;
1122 if (param
->local
.v6
.sin6_len
!= sizeof(param
->local
.v6
) ||
1123 (param
->remote
.v6
.sin6_family
!= 0 &&
1124 param
->remote
.v6
.sin6_len
!= sizeof(param
->remote
.v6
))) {
1128 local
.in6c
= ¶m
->local
.v6
.sin6_addr
;
1129 remote
.in6c
= ¶m
->remote
.v6
.sin6_addr
;
1131 inp
= in6_pcblookup_hash(inpinfo
, remote
.in6
, param
->remote
.v6
.sin6_port
,
1132 local
.in6
, param
->local
.v6
.sin6_port
, 1, NULL
);
1144 // At this point we have a ref to the inpcb
1145 *out_cookie
= nstat_tucookie_alloc(inp
);
1146 if (*out_cookie
== NULL
) {
1147 in_pcb_checkstate(inp
, WNT_RELEASE
, 0);
1157 nstat_provider_cookie_t
*out_cookie
)
1159 return nstat_tcpudp_lookup(&tcbinfo
, data
, length
, out_cookie
);
1164 nstat_provider_cookie_t cookie
)
1166 struct nstat_tucookie
*tucookie
=
1167 (struct nstat_tucookie
*)cookie
;
1171 return (!(inp
= tucookie
->inp
) ||
1172 !(tp
= intotcpcb(inp
)) ||
1173 inp
->inp_state
== INPCB_STATE_DEAD
) ? 1 : 0;
1178 nstat_provider_cookie_t cookie
,
1179 struct nstat_counts
*out_counts
,
1182 struct nstat_tucookie
*tucookie
=
1183 (struct nstat_tucookie
*)cookie
;
1186 bzero(out_counts
, sizeof(*out_counts
));
1192 // if the pcb is in the dead state, we should stop using it
1193 if (nstat_tcp_gone(cookie
)) {
1197 if (!(inp
= tucookie
->inp
) || !intotcpcb(inp
)) {
1201 inp
= tucookie
->inp
;
1202 struct tcpcb
*tp
= intotcpcb(inp
);
1204 atomic_get_64(out_counts
->nstat_rxpackets
, &inp
->inp_stat
->rxpackets
);
1205 atomic_get_64(out_counts
->nstat_rxbytes
, &inp
->inp_stat
->rxbytes
);
1206 atomic_get_64(out_counts
->nstat_txpackets
, &inp
->inp_stat
->txpackets
);
1207 atomic_get_64(out_counts
->nstat_txbytes
, &inp
->inp_stat
->txbytes
);
1208 out_counts
->nstat_rxduplicatebytes
= tp
->t_stat
.rxduplicatebytes
;
1209 out_counts
->nstat_rxoutoforderbytes
= tp
->t_stat
.rxoutoforderbytes
;
1210 out_counts
->nstat_txretransmit
= tp
->t_stat
.txretransmitbytes
;
1211 out_counts
->nstat_connectattempts
= tp
->t_state
>= TCPS_SYN_SENT
? 1 : 0;
1212 out_counts
->nstat_connectsuccesses
= tp
->t_state
>= TCPS_ESTABLISHED
? 1 : 0;
1213 out_counts
->nstat_avg_rtt
= tp
->t_srtt
;
1214 out_counts
->nstat_min_rtt
= tp
->t_rttbest
;
1215 out_counts
->nstat_var_rtt
= tp
->t_rttvar
;
1216 if (out_counts
->nstat_avg_rtt
< out_counts
->nstat_min_rtt
) {
1217 out_counts
->nstat_min_rtt
= out_counts
->nstat_avg_rtt
;
1219 atomic_get_64(out_counts
->nstat_cell_rxbytes
, &inp
->inp_cstat
->rxbytes
);
1220 atomic_get_64(out_counts
->nstat_cell_txbytes
, &inp
->inp_cstat
->txbytes
);
1221 atomic_get_64(out_counts
->nstat_wifi_rxbytes
, &inp
->inp_wstat
->rxbytes
);
1222 atomic_get_64(out_counts
->nstat_wifi_txbytes
, &inp
->inp_wstat
->txbytes
);
1223 atomic_get_64(out_counts
->nstat_wired_rxbytes
, &inp
->inp_Wstat
->rxbytes
);
1224 atomic_get_64(out_counts
->nstat_wired_txbytes
, &inp
->inp_Wstat
->txbytes
);
1231 nstat_provider_cookie_t cookie
,
1234 struct nstat_tucookie
*tucookie
=
1235 (struct nstat_tucookie
*)cookie
;
1237 nstat_tucookie_release_internal(tucookie
, locked
);
1241 nstat_tcp_add_watcher(
1242 nstat_control_state
*state
,
1243 nstat_msg_add_all_srcs
*req
)
1245 // There is a tricky issue around getting all TCP sockets added once
1246 // and only once. nstat_tcp_new_pcb() is called prior to the new item
1247 // being placed on any lists where it might be found.
1248 // By locking the tcbinfo.ipi_lock prior to marking the state as a watcher,
1249 // it should be impossible for a new socket to be added twice.
1250 // On the other hand, there is still a timing issue where a new socket
1251 // results in a call to nstat_tcp_new_pcb() before this watcher
1252 // is instantiated and yet the socket doesn't make it into ipi_listhead
1253 // prior to the scan. <rdar://problem/30361716>
1257 lck_rw_lock_shared(tcbinfo
.ipi_lock
);
1258 result
= nstat_set_provider_filter(state
, req
);
1260 OSIncrementAtomic(&nstat_tcp_watchers
);
1262 // Add all current tcp inpcbs. Ignore those in timewait
1264 struct nstat_tucookie
*cookie
;
1265 LIST_FOREACH(inp
, tcbinfo
.ipi_listhead
, inp_list
)
1267 cookie
= nstat_tucookie_alloc_ref(inp
);
1268 if (cookie
== NULL
) {
1271 if (nstat_control_source_add(0, state
, &nstat_tcp_provider
,
1273 nstat_tucookie_release(cookie
);
1279 lck_rw_done(tcbinfo
.ipi_lock
);
1285 nstat_tcp_remove_watcher(
1286 __unused nstat_control_state
*state
)
1288 OSDecrementAtomic(&nstat_tcp_watchers
);
1291 __private_extern__
void
1295 struct nstat_tucookie
*cookie
;
1297 inp
->inp_start_timestamp
= mach_continuous_time();
1299 if (nstat_tcp_watchers
== 0) {
1303 socket_lock(inp
->inp_socket
, 0);
1304 lck_mtx_lock(&nstat_mtx
);
1305 nstat_control_state
*state
;
1306 for (state
= nstat_controls
; state
; state
= state
->ncs_next
) {
1307 if ((state
->ncs_watching
& (1 << NSTAT_PROVIDER_TCP_KERNEL
)) != 0) {
1308 // this client is watching tcp
1309 // acquire a reference for it
1310 cookie
= nstat_tucookie_alloc_ref_locked(inp
);
1311 if (cookie
== NULL
) {
1314 // add the source, if that fails, release the reference
1315 if (nstat_control_source_add(0, state
,
1316 &nstat_tcp_provider
, cookie
) != 0) {
1317 nstat_tucookie_release_locked(cookie
);
1322 lck_mtx_unlock(&nstat_mtx
);
1323 socket_unlock(inp
->inp_socket
, 0);
1326 __private_extern__
void
1327 nstat_pcb_detach(struct inpcb
*inp
)
1329 nstat_control_state
*state
;
1331 tailq_head_nstat_src dead_list
;
1332 struct nstat_tucookie
*tucookie
;
1335 if (inp
== NULL
|| (nstat_tcp_watchers
== 0 && nstat_udp_watchers
== 0)) {
1339 TAILQ_INIT(&dead_list
);
1340 lck_mtx_lock(&nstat_mtx
);
1341 for (state
= nstat_controls
; state
; state
= state
->ncs_next
) {
1342 lck_mtx_lock(&state
->ncs_mtx
);
1343 TAILQ_FOREACH(src
, &state
->ncs_src_queue
, ns_control_link
)
1345 nstat_provider_id_t provider_id
= src
->provider
->nstat_provider_id
;
1346 if (provider_id
== NSTAT_PROVIDER_TCP_KERNEL
|| provider_id
== NSTAT_PROVIDER_UDP_KERNEL
) {
1347 tucookie
= (struct nstat_tucookie
*)src
->cookie
;
1348 if (tucookie
->inp
== inp
) {
1355 result
= nstat_control_send_goodbye(state
, src
);
1357 TAILQ_REMOVE(&state
->ncs_src_queue
, src
, ns_control_link
);
1358 TAILQ_INSERT_TAIL(&dead_list
, src
, ns_control_link
);
1360 lck_mtx_unlock(&state
->ncs_mtx
);
1362 lck_mtx_unlock(&nstat_mtx
);
1364 while ((src
= TAILQ_FIRST(&dead_list
))) {
1365 TAILQ_REMOVE(&dead_list
, src
, ns_control_link
);
1366 nstat_control_cleanup_source(NULL
, src
, TRUE
);
1370 __private_extern__
void
1371 nstat_pcb_event(struct inpcb
*inp
, u_int64_t event
)
1373 nstat_control_state
*state
;
1375 struct nstat_tucookie
*tucookie
;
1377 nstat_provider_id_t provider_id
;
1379 if (inp
== NULL
|| (nstat_tcp_watchers
== 0 && nstat_udp_watchers
== 0)) {
1383 lck_mtx_lock(&nstat_mtx
);
1384 for (state
= nstat_controls
; state
; state
= state
->ncs_next
) {
1385 if (((state
->ncs_provider_filters
[NSTAT_PROVIDER_TCP_KERNEL
].npf_events
& event
) == 0) &&
1386 ((state
->ncs_provider_filters
[NSTAT_PROVIDER_UDP_KERNEL
].npf_events
& event
) == 0)) {
1389 lck_mtx_lock(&state
->ncs_mtx
);
1390 TAILQ_FOREACH(src
, &state
->ncs_src_queue
, ns_control_link
)
1392 provider_id
= src
->provider
->nstat_provider_id
;
1393 if (provider_id
== NSTAT_PROVIDER_TCP_KERNEL
|| provider_id
== NSTAT_PROVIDER_UDP_KERNEL
) {
1394 tucookie
= (struct nstat_tucookie
*)src
->cookie
;
1395 if (tucookie
->inp
== inp
) {
1401 if (src
&& ((state
->ncs_provider_filters
[provider_id
].npf_events
& event
) != 0)) {
1402 result
= nstat_control_send_event(state
, src
, event
);
1404 lck_mtx_unlock(&state
->ncs_mtx
);
1406 lck_mtx_unlock(&nstat_mtx
);
1410 __private_extern__
void
1411 nstat_pcb_cache(struct inpcb
*inp
)
1413 nstat_control_state
*state
;
1415 struct nstat_tucookie
*tucookie
;
1417 if (inp
== NULL
|| nstat_udp_watchers
== 0 ||
1418 inp
->inp_nstat_refcnt
== 0) {
1421 VERIFY(SOCK_PROTO(inp
->inp_socket
) == IPPROTO_UDP
);
1422 lck_mtx_lock(&nstat_mtx
);
1423 for (state
= nstat_controls
; state
; state
= state
->ncs_next
) {
1424 lck_mtx_lock(&state
->ncs_mtx
);
1425 TAILQ_FOREACH(src
, &state
->ncs_src_queue
, ns_control_link
)
1427 tucookie
= (struct nstat_tucookie
*)src
->cookie
;
1428 if (tucookie
->inp
== inp
) {
1429 if (inp
->inp_vflag
& INP_IPV6
) {
1430 in6_ip6_to_sockaddr(&inp
->in6p_laddr
,
1432 &tucookie
->local
.v6
,
1433 sizeof(tucookie
->local
));
1434 in6_ip6_to_sockaddr(&inp
->in6p_faddr
,
1436 &tucookie
->remote
.v6
,
1437 sizeof(tucookie
->remote
));
1438 } else if (inp
->inp_vflag
& INP_IPV4
) {
1439 nstat_ip_to_sockaddr(&inp
->inp_laddr
,
1441 &tucookie
->local
.v4
,
1442 sizeof(tucookie
->local
));
1443 nstat_ip_to_sockaddr(&inp
->inp_faddr
,
1445 &tucookie
->remote
.v4
,
1446 sizeof(tucookie
->remote
));
1448 if (inp
->inp_last_outifp
) {
1449 tucookie
->if_index
=
1450 inp
->inp_last_outifp
->if_index
;
1453 tucookie
->ifnet_properties
= nstat_inpcb_to_flags(inp
);
1454 tucookie
->cached
= true;
1458 lck_mtx_unlock(&state
->ncs_mtx
);
1460 lck_mtx_unlock(&nstat_mtx
);
1463 __private_extern__
void
1464 nstat_pcb_invalidate_cache(struct inpcb
*inp
)
1466 nstat_control_state
*state
;
1468 struct nstat_tucookie
*tucookie
;
1470 if (inp
== NULL
|| nstat_udp_watchers
== 0 ||
1471 inp
->inp_nstat_refcnt
== 0) {
1474 VERIFY(SOCK_PROTO(inp
->inp_socket
) == IPPROTO_UDP
);
1475 lck_mtx_lock(&nstat_mtx
);
1476 for (state
= nstat_controls
; state
; state
= state
->ncs_next
) {
1477 lck_mtx_lock(&state
->ncs_mtx
);
1478 TAILQ_FOREACH(src
, &state
->ncs_src_queue
, ns_control_link
)
1480 tucookie
= (struct nstat_tucookie
*)src
->cookie
;
1481 if (tucookie
->inp
== inp
) {
1482 tucookie
->cached
= false;
1486 lck_mtx_unlock(&state
->ncs_mtx
);
1488 lck_mtx_unlock(&nstat_mtx
);
1492 nstat_tcp_copy_descriptor(
1493 nstat_provider_cookie_t cookie
,
1497 if (len
< sizeof(nstat_tcp_descriptor
)) {
1501 if (nstat_tcp_gone(cookie
)) {
1505 nstat_tcp_descriptor
*desc
= (nstat_tcp_descriptor
*)data
;
1506 struct nstat_tucookie
*tucookie
=
1507 (struct nstat_tucookie
*)cookie
;
1508 struct inpcb
*inp
= tucookie
->inp
;
1509 struct tcpcb
*tp
= intotcpcb(inp
);
1510 bzero(desc
, sizeof(*desc
));
1512 if (inp
->inp_vflag
& INP_IPV6
) {
1513 in6_ip6_to_sockaddr(&inp
->in6p_laddr
, inp
->inp_lport
,
1514 &desc
->local
.v6
, sizeof(desc
->local
));
1515 in6_ip6_to_sockaddr(&inp
->in6p_faddr
, inp
->inp_fport
,
1516 &desc
->remote
.v6
, sizeof(desc
->remote
));
1517 } else if (inp
->inp_vflag
& INP_IPV4
) {
1518 nstat_ip_to_sockaddr(&inp
->inp_laddr
, inp
->inp_lport
,
1519 &desc
->local
.v4
, sizeof(desc
->local
));
1520 nstat_ip_to_sockaddr(&inp
->inp_faddr
, inp
->inp_fport
,
1521 &desc
->remote
.v4
, sizeof(desc
->remote
));
1524 desc
->state
= intotcpcb(inp
)->t_state
;
1525 desc
->ifindex
= (inp
->inp_last_outifp
== NULL
) ? 0 :
1526 inp
->inp_last_outifp
->if_index
;
1528 // danger - not locked, values could be bogus
1529 desc
->txunacked
= tp
->snd_max
- tp
->snd_una
;
1530 desc
->txwindow
= tp
->snd_wnd
;
1531 desc
->txcwindow
= tp
->snd_cwnd
;
1533 if (CC_ALGO(tp
)->name
!= NULL
) {
1534 strlcpy(desc
->cc_algo
, CC_ALGO(tp
)->name
,
1535 sizeof(desc
->cc_algo
));
1538 struct socket
*so
= inp
->inp_socket
;
1540 // TBD - take the socket lock around these to make sure
1542 desc
->upid
= so
->last_upid
;
1543 desc
->pid
= so
->last_pid
;
1544 desc
->traffic_class
= so
->so_traffic_class
;
1545 if ((so
->so_flags1
& SOF1_TRAFFIC_MGT_SO_BACKGROUND
)) {
1546 desc
->traffic_mgt_flags
|= TRAFFIC_MGT_SO_BACKGROUND
;
1548 if ((so
->so_flags1
& SOF1_TRAFFIC_MGT_TCP_RECVBG
)) {
1549 desc
->traffic_mgt_flags
|= TRAFFIC_MGT_TCP_RECVBG
;
1551 proc_name(desc
->pid
, desc
->pname
, sizeof(desc
->pname
));
1552 if (desc
->pname
[0] == 0) {
1553 strlcpy(desc
->pname
, tucookie
->pname
,
1554 sizeof(desc
->pname
));
1556 desc
->pname
[sizeof(desc
->pname
) - 1] = 0;
1557 strlcpy(tucookie
->pname
, desc
->pname
,
1558 sizeof(tucookie
->pname
));
1560 memcpy(desc
->uuid
, so
->last_uuid
, sizeof(so
->last_uuid
));
1561 memcpy(desc
->vuuid
, so
->so_vuuid
, sizeof(so
->so_vuuid
));
1562 if (so
->so_flags
& SOF_DELEGATED
) {
1563 desc
->eupid
= so
->e_upid
;
1564 desc
->epid
= so
->e_pid
;
1565 memcpy(desc
->euuid
, so
->e_uuid
, sizeof(so
->e_uuid
));
1567 desc
->eupid
= desc
->upid
;
1568 desc
->epid
= desc
->pid
;
1569 memcpy(desc
->euuid
, desc
->uuid
, sizeof(desc
->uuid
));
1571 uuid_copy(desc
->fuuid
, inp
->necp_client_uuid
);
1572 desc
->sndbufsize
= so
->so_snd
.sb_hiwat
;
1573 desc
->sndbufused
= so
->so_snd
.sb_cc
;
1574 desc
->rcvbufsize
= so
->so_rcv
.sb_hiwat
;
1575 desc
->rcvbufused
= so
->so_rcv
.sb_cc
;
1578 tcp_get_connectivity_status(tp
, &desc
->connstatus
);
1579 desc
->ifnet_properties
= nstat_inpcb_to_flags(inp
);
1580 inp_get_activity_bitmap(inp
, &desc
->activity_bitmap
);
1581 desc
->start_timestamp
= inp
->inp_start_timestamp
;
1582 desc
->timestamp
= mach_continuous_time();
1587 nstat_tcpudp_reporting_allowed(nstat_provider_cookie_t cookie
, nstat_provider_filter
*filter
, bool is_UDP
)
1591 if ((filter
->npf_flags
& (NSTAT_FILTER_IFNET_FLAGS
| NSTAT_FILTER_SPECIFIC_USER
)) != 0) {
1592 struct nstat_tucookie
*tucookie
= (struct nstat_tucookie
*)cookie
;
1593 struct inpcb
*inp
= tucookie
->inp
;
1595 /* Only apply interface filter if at least one is allowed. */
1596 if ((filter
->npf_flags
& NSTAT_FILTER_IFNET_FLAGS
) != 0) {
1597 uint16_t interface_properties
= nstat_inpcb_to_flags(inp
);
1599 if ((filter
->npf_flags
& interface_properties
) == 0) {
1600 // For UDP, we could have an undefined interface and yet transfers may have occurred.
1601 // We allow reporting if there have been transfers of the requested kind.
1602 // This is imperfect as we cannot account for the expensive attribute over wifi.
1603 // We also assume that cellular is expensive and we have no way to select for AWDL
1606 if ((filter
->npf_flags
& (NSTAT_FILTER_ACCEPT_CELLULAR
| NSTAT_FILTER_ACCEPT_EXPENSIVE
)) &&
1607 (inp
->inp_cstat
->rxbytes
|| inp
->inp_cstat
->txbytes
)) {
1610 if ((filter
->npf_flags
& NSTAT_FILTER_ACCEPT_WIFI
) &&
1611 (inp
->inp_wstat
->rxbytes
|| inp
->inp_wstat
->txbytes
)) {
1614 if ((filter
->npf_flags
& NSTAT_FILTER_ACCEPT_WIRED
) &&
1615 (inp
->inp_Wstat
->rxbytes
|| inp
->inp_Wstat
->txbytes
)) {
1626 if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER
) != 0) && (retval
)) {
1627 struct socket
*so
= inp
->inp_socket
;
1631 if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER_BY_PID
) != 0) &&
1632 (filter
->npf_pid
== so
->last_pid
)) {
1634 } else if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER_BY_EPID
) != 0) &&
1635 (filter
->npf_pid
== (so
->so_flags
& SOF_DELEGATED
)? so
->e_upid
: so
->last_pid
)) {
1637 } else if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER_BY_UUID
) != 0) &&
1638 (memcmp(filter
->npf_uuid
, so
->last_uuid
, sizeof(so
->last_uuid
)) == 0)) {
1640 } else if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER_BY_EUUID
) != 0) &&
1641 (memcmp(filter
->npf_uuid
, (so
->so_flags
& SOF_DELEGATED
)? so
->e_uuid
: so
->last_uuid
,
1642 sizeof(so
->last_uuid
)) == 0)) {
1652 nstat_tcp_reporting_allowed(nstat_provider_cookie_t cookie
, nstat_provider_filter
*filter
)
1654 return nstat_tcpudp_reporting_allowed(cookie
, filter
, FALSE
);
1658 nstat_init_tcp_provider(void)
1660 bzero(&nstat_tcp_provider
, sizeof(nstat_tcp_provider
));
1661 nstat_tcp_provider
.nstat_descriptor_length
= sizeof(nstat_tcp_descriptor
);
1662 nstat_tcp_provider
.nstat_provider_id
= NSTAT_PROVIDER_TCP_KERNEL
;
1663 nstat_tcp_provider
.nstat_lookup
= nstat_tcp_lookup
;
1664 nstat_tcp_provider
.nstat_gone
= nstat_tcp_gone
;
1665 nstat_tcp_provider
.nstat_counts
= nstat_tcp_counts
;
1666 nstat_tcp_provider
.nstat_release
= nstat_tcp_release
;
1667 nstat_tcp_provider
.nstat_watcher_add
= nstat_tcp_add_watcher
;
1668 nstat_tcp_provider
.nstat_watcher_remove
= nstat_tcp_remove_watcher
;
1669 nstat_tcp_provider
.nstat_copy_descriptor
= nstat_tcp_copy_descriptor
;
1670 nstat_tcp_provider
.nstat_reporting_allowed
= nstat_tcp_reporting_allowed
;
1671 nstat_tcp_provider
.next
= nstat_providers
;
1672 nstat_providers
= &nstat_tcp_provider
;
1675 #pragma mark -- UDP Provider --
1677 static nstat_provider nstat_udp_provider
;
1683 nstat_provider_cookie_t
*out_cookie
)
1685 return nstat_tcpudp_lookup(&udbinfo
, data
, length
, out_cookie
);
1690 nstat_provider_cookie_t cookie
)
1692 struct nstat_tucookie
*tucookie
=
1693 (struct nstat_tucookie
*)cookie
;
1696 return (!(inp
= tucookie
->inp
) ||
1697 inp
->inp_state
== INPCB_STATE_DEAD
) ? 1 : 0;
1702 nstat_provider_cookie_t cookie
,
1703 struct nstat_counts
*out_counts
,
1706 struct nstat_tucookie
*tucookie
=
1707 (struct nstat_tucookie
*)cookie
;
1713 // if the pcb is in the dead state, we should stop using it
1714 if (nstat_udp_gone(cookie
)) {
1718 if (!tucookie
->inp
) {
1722 struct inpcb
*inp
= tucookie
->inp
;
1724 atomic_get_64(out_counts
->nstat_rxpackets
, &inp
->inp_stat
->rxpackets
);
1725 atomic_get_64(out_counts
->nstat_rxbytes
, &inp
->inp_stat
->rxbytes
);
1726 atomic_get_64(out_counts
->nstat_txpackets
, &inp
->inp_stat
->txpackets
);
1727 atomic_get_64(out_counts
->nstat_txbytes
, &inp
->inp_stat
->txbytes
);
1728 atomic_get_64(out_counts
->nstat_cell_rxbytes
, &inp
->inp_cstat
->rxbytes
);
1729 atomic_get_64(out_counts
->nstat_cell_txbytes
, &inp
->inp_cstat
->txbytes
);
1730 atomic_get_64(out_counts
->nstat_wifi_rxbytes
, &inp
->inp_wstat
->rxbytes
);
1731 atomic_get_64(out_counts
->nstat_wifi_txbytes
, &inp
->inp_wstat
->txbytes
);
1732 atomic_get_64(out_counts
->nstat_wired_rxbytes
, &inp
->inp_Wstat
->rxbytes
);
1733 atomic_get_64(out_counts
->nstat_wired_txbytes
, &inp
->inp_Wstat
->txbytes
);
1740 nstat_provider_cookie_t cookie
,
1743 struct nstat_tucookie
*tucookie
=
1744 (struct nstat_tucookie
*)cookie
;
1746 nstat_tucookie_release_internal(tucookie
, locked
);
1750 nstat_udp_add_watcher(
1751 nstat_control_state
*state
,
1752 nstat_msg_add_all_srcs
*req
)
1754 // There is a tricky issue around getting all UDP sockets added once
1755 // and only once. nstat_udp_new_pcb() is called prior to the new item
1756 // being placed on any lists where it might be found.
1757 // By locking the udpinfo.ipi_lock prior to marking the state as a watcher,
1758 // it should be impossible for a new socket to be added twice.
1759 // On the other hand, there is still a timing issue where a new socket
1760 // results in a call to nstat_udp_new_pcb() before this watcher
1761 // is instantiated and yet the socket doesn't make it into ipi_listhead
1762 // prior to the scan. <rdar://problem/30361716>
1766 lck_rw_lock_shared(udbinfo
.ipi_lock
);
1767 result
= nstat_set_provider_filter(state
, req
);
1771 struct nstat_tucookie
*cookie
;
1773 OSIncrementAtomic(&nstat_udp_watchers
);
1775 // Add all current UDP inpcbs.
1776 LIST_FOREACH(inp
, udbinfo
.ipi_listhead
, inp_list
)
1778 cookie
= nstat_tucookie_alloc_ref(inp
);
1779 if (cookie
== NULL
) {
1782 if (nstat_control_source_add(0, state
, &nstat_udp_provider
,
1784 nstat_tucookie_release(cookie
);
1790 lck_rw_done(udbinfo
.ipi_lock
);
1796 nstat_udp_remove_watcher(
1797 __unused nstat_control_state
*state
)
1799 OSDecrementAtomic(&nstat_udp_watchers
);
1802 __private_extern__
void
1806 struct nstat_tucookie
*cookie
;
1808 inp
->inp_start_timestamp
= mach_continuous_time();
1810 if (nstat_udp_watchers
== 0) {
1814 socket_lock(inp
->inp_socket
, 0);
1815 lck_mtx_lock(&nstat_mtx
);
1816 nstat_control_state
*state
;
1817 for (state
= nstat_controls
; state
; state
= state
->ncs_next
) {
1818 if ((state
->ncs_watching
& (1 << NSTAT_PROVIDER_UDP_KERNEL
)) != 0) {
1819 // this client is watching tcp
1820 // acquire a reference for it
1821 cookie
= nstat_tucookie_alloc_ref_locked(inp
);
1822 if (cookie
== NULL
) {
1825 // add the source, if that fails, release the reference
1826 if (nstat_control_source_add(0, state
,
1827 &nstat_udp_provider
, cookie
) != 0) {
1828 nstat_tucookie_release_locked(cookie
);
1833 lck_mtx_unlock(&nstat_mtx
);
1834 socket_unlock(inp
->inp_socket
, 0);
1838 nstat_udp_copy_descriptor(
1839 nstat_provider_cookie_t cookie
,
1843 if (len
< sizeof(nstat_udp_descriptor
)) {
1847 if (nstat_udp_gone(cookie
)) {
1851 struct nstat_tucookie
*tucookie
=
1852 (struct nstat_tucookie
*)cookie
;
1853 nstat_udp_descriptor
*desc
= (nstat_udp_descriptor
*)data
;
1854 struct inpcb
*inp
= tucookie
->inp
;
1856 bzero(desc
, sizeof(*desc
));
1858 if (tucookie
->cached
== false) {
1859 if (inp
->inp_vflag
& INP_IPV6
) {
1860 in6_ip6_to_sockaddr(&inp
->in6p_laddr
, inp
->inp_lport
,
1861 &desc
->local
.v6
, sizeof(desc
->local
.v6
));
1862 in6_ip6_to_sockaddr(&inp
->in6p_faddr
, inp
->inp_fport
,
1863 &desc
->remote
.v6
, sizeof(desc
->remote
.v6
));
1864 } else if (inp
->inp_vflag
& INP_IPV4
) {
1865 nstat_ip_to_sockaddr(&inp
->inp_laddr
, inp
->inp_lport
,
1866 &desc
->local
.v4
, sizeof(desc
->local
.v4
));
1867 nstat_ip_to_sockaddr(&inp
->inp_faddr
, inp
->inp_fport
,
1868 &desc
->remote
.v4
, sizeof(desc
->remote
.v4
));
1870 desc
->ifnet_properties
= nstat_inpcb_to_flags(inp
);
1872 if (inp
->inp_vflag
& INP_IPV6
) {
1873 memcpy(&desc
->local
.v6
, &tucookie
->local
.v6
,
1874 sizeof(desc
->local
.v6
));
1875 memcpy(&desc
->remote
.v6
, &tucookie
->remote
.v6
,
1876 sizeof(desc
->remote
.v6
));
1877 } else if (inp
->inp_vflag
& INP_IPV4
) {
1878 memcpy(&desc
->local
.v4
, &tucookie
->local
.v4
,
1879 sizeof(desc
->local
.v4
));
1880 memcpy(&desc
->remote
.v4
, &tucookie
->remote
.v4
,
1881 sizeof(desc
->remote
.v4
));
1883 desc
->ifnet_properties
= tucookie
->ifnet_properties
;
1886 if (inp
->inp_last_outifp
) {
1887 desc
->ifindex
= inp
->inp_last_outifp
->if_index
;
1889 desc
->ifindex
= tucookie
->if_index
;
1892 struct socket
*so
= inp
->inp_socket
;
1894 // TBD - take the socket lock around these to make sure
1896 desc
->upid
= so
->last_upid
;
1897 desc
->pid
= so
->last_pid
;
1898 proc_name(desc
->pid
, desc
->pname
, sizeof(desc
->pname
));
1899 if (desc
->pname
[0] == 0) {
1900 strlcpy(desc
->pname
, tucookie
->pname
,
1901 sizeof(desc
->pname
));
1903 desc
->pname
[sizeof(desc
->pname
) - 1] = 0;
1904 strlcpy(tucookie
->pname
, desc
->pname
,
1905 sizeof(tucookie
->pname
));
1907 memcpy(desc
->uuid
, so
->last_uuid
, sizeof(so
->last_uuid
));
1908 memcpy(desc
->vuuid
, so
->so_vuuid
, sizeof(so
->so_vuuid
));
1909 if (so
->so_flags
& SOF_DELEGATED
) {
1910 desc
->eupid
= so
->e_upid
;
1911 desc
->epid
= so
->e_pid
;
1912 memcpy(desc
->euuid
, so
->e_uuid
, sizeof(so
->e_uuid
));
1914 desc
->eupid
= desc
->upid
;
1915 desc
->epid
= desc
->pid
;
1916 memcpy(desc
->euuid
, desc
->uuid
, sizeof(desc
->uuid
));
1918 uuid_copy(desc
->fuuid
, inp
->necp_client_uuid
);
1919 desc
->rcvbufsize
= so
->so_rcv
.sb_hiwat
;
1920 desc
->rcvbufused
= so
->so_rcv
.sb_cc
;
1921 desc
->traffic_class
= so
->so_traffic_class
;
1922 inp_get_activity_bitmap(inp
, &desc
->activity_bitmap
);
1923 desc
->start_timestamp
= inp
->inp_start_timestamp
;
1924 desc
->timestamp
= mach_continuous_time();
1931 nstat_udp_reporting_allowed(nstat_provider_cookie_t cookie
, nstat_provider_filter
*filter
)
1933 return nstat_tcpudp_reporting_allowed(cookie
, filter
, TRUE
);
1938 nstat_init_udp_provider(void)
1940 bzero(&nstat_udp_provider
, sizeof(nstat_udp_provider
));
1941 nstat_udp_provider
.nstat_provider_id
= NSTAT_PROVIDER_UDP_KERNEL
;
1942 nstat_udp_provider
.nstat_descriptor_length
= sizeof(nstat_udp_descriptor
);
1943 nstat_udp_provider
.nstat_lookup
= nstat_udp_lookup
;
1944 nstat_udp_provider
.nstat_gone
= nstat_udp_gone
;
1945 nstat_udp_provider
.nstat_counts
= nstat_udp_counts
;
1946 nstat_udp_provider
.nstat_watcher_add
= nstat_udp_add_watcher
;
1947 nstat_udp_provider
.nstat_watcher_remove
= nstat_udp_remove_watcher
;
1948 nstat_udp_provider
.nstat_copy_descriptor
= nstat_udp_copy_descriptor
;
1949 nstat_udp_provider
.nstat_release
= nstat_udp_release
;
1950 nstat_udp_provider
.nstat_reporting_allowed
= nstat_udp_reporting_allowed
;
1951 nstat_udp_provider
.next
= nstat_providers
;
1952 nstat_providers
= &nstat_udp_provider
;
1957 #pragma mark -- ifnet Provider --
1959 static nstat_provider nstat_ifnet_provider
;
1962 * We store a pointer to the ifnet and the original threshold
1963 * requested by the client.
1965 struct nstat_ifnet_cookie
{
1974 nstat_provider_cookie_t
*out_cookie
)
1976 const nstat_ifnet_add_param
*param
= (const nstat_ifnet_add_param
*)data
;
1978 boolean_t changed
= FALSE
;
1979 nstat_control_state
*state
;
1981 struct nstat_ifnet_cookie
*cookie
;
1983 if (length
< sizeof(*param
) || param
->threshold
< 1024 * 1024) {
1986 if (nstat_privcheck
!= 0) {
1987 errno_t result
= priv_check_cred(kauth_cred_get(),
1988 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS
, 0);
1993 cookie
= kheap_alloc(KHEAP_NET_STAT
, sizeof(*cookie
), Z_WAITOK
| Z_ZERO
);
1994 if (cookie
== NULL
) {
1998 ifnet_head_lock_shared();
1999 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
)
2001 ifnet_lock_exclusive(ifp
);
2002 if (ifp
->if_index
== param
->ifindex
) {
2004 cookie
->threshold
= param
->threshold
;
2005 *out_cookie
= cookie
;
2006 if (!ifp
->if_data_threshold
||
2007 ifp
->if_data_threshold
> param
->threshold
) {
2009 ifp
->if_data_threshold
= param
->threshold
;
2011 ifnet_lock_done(ifp
);
2012 ifnet_reference(ifp
);
2015 ifnet_lock_done(ifp
);
2020 * When we change the threshold to something smaller, we notify
2021 * all of our clients with a description message.
2022 * We won't send a message to the client we are currently serving
2023 * because it has no `ifnet source' yet.
2026 lck_mtx_lock(&nstat_mtx
);
2027 for (state
= nstat_controls
; state
; state
= state
->ncs_next
) {
2028 lck_mtx_lock(&state
->ncs_mtx
);
2029 TAILQ_FOREACH(src
, &state
->ncs_src_queue
, ns_control_link
)
2031 if (src
->provider
!= &nstat_ifnet_provider
) {
2034 nstat_control_send_description(state
, src
, 0, 0);
2036 lck_mtx_unlock(&state
->ncs_mtx
);
2038 lck_mtx_unlock(&nstat_mtx
);
2040 if (cookie
->ifp
== NULL
) {
2041 kheap_free(KHEAP_NET_STAT
, cookie
, sizeof(*cookie
));
2044 return ifp
? 0 : EINVAL
;
2049 nstat_provider_cookie_t cookie
)
2052 struct nstat_ifnet_cookie
*ifcookie
=
2053 (struct nstat_ifnet_cookie
*)cookie
;
2055 ifnet_head_lock_shared();
2056 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
)
2058 if (ifp
== ifcookie
->ifp
) {
2069 nstat_provider_cookie_t cookie
,
2070 struct nstat_counts
*out_counts
,
2073 struct nstat_ifnet_cookie
*ifcookie
=
2074 (struct nstat_ifnet_cookie
*)cookie
;
2075 struct ifnet
*ifp
= ifcookie
->ifp
;
2081 // if the ifnet is gone, we should stop using it
2082 if (nstat_ifnet_gone(cookie
)) {
2089 bzero(out_counts
, sizeof(*out_counts
));
2090 out_counts
->nstat_rxpackets
= ifp
->if_ipackets
;
2091 out_counts
->nstat_rxbytes
= ifp
->if_ibytes
;
2092 out_counts
->nstat_txpackets
= ifp
->if_opackets
;
2093 out_counts
->nstat_txbytes
= ifp
->if_obytes
;
2094 out_counts
->nstat_cell_rxbytes
= out_counts
->nstat_cell_txbytes
= 0;
2099 nstat_ifnet_release(
2100 nstat_provider_cookie_t cookie
,
2101 __unused
int locked
)
2103 struct nstat_ifnet_cookie
*ifcookie
;
2105 nstat_control_state
*state
;
2107 uint64_t minthreshold
= UINT64_MAX
;
2110 * Find all the clients that requested a threshold
2111 * for this ifnet and re-calculate if_data_threshold.
2113 lck_mtx_lock(&nstat_mtx
);
2114 for (state
= nstat_controls
; state
; state
= state
->ncs_next
) {
2115 lck_mtx_lock(&state
->ncs_mtx
);
2116 TAILQ_FOREACH(src
, &state
->ncs_src_queue
, ns_control_link
)
2118 /* Skip the provider we are about to detach. */
2119 if (src
->provider
!= &nstat_ifnet_provider
||
2120 src
->cookie
== cookie
) {
2123 ifcookie
= (struct nstat_ifnet_cookie
*)src
->cookie
;
2124 if (ifcookie
->threshold
< minthreshold
) {
2125 minthreshold
= ifcookie
->threshold
;
2128 lck_mtx_unlock(&state
->ncs_mtx
);
2130 lck_mtx_unlock(&nstat_mtx
);
2132 * Reset if_data_threshold or disable it.
2134 ifcookie
= (struct nstat_ifnet_cookie
*)cookie
;
2135 ifp
= ifcookie
->ifp
;
2136 if (ifnet_is_attached(ifp
, 1)) {
2137 ifnet_lock_exclusive(ifp
);
2138 if (minthreshold
== UINT64_MAX
) {
2139 ifp
->if_data_threshold
= 0;
2141 ifp
->if_data_threshold
= minthreshold
;
2143 ifnet_lock_done(ifp
);
2144 ifnet_decr_iorefcnt(ifp
);
2147 kheap_free(KHEAP_NET_STAT
, ifcookie
, sizeof(*ifcookie
));
2151 nstat_ifnet_copy_link_status(
2153 struct nstat_ifnet_descriptor
*desc
)
2155 struct if_link_status
*ifsr
= ifp
->if_link_status
;
2156 nstat_ifnet_desc_link_status
*link_status
= &desc
->link_status
;
2158 link_status
->link_status_type
= NSTAT_IFNET_DESC_LINK_STATUS_TYPE_NONE
;
2163 lck_rw_lock_shared(&ifp
->if_link_status_lock
);
2165 if (ifp
->if_type
== IFT_CELLULAR
) {
2166 nstat_ifnet_desc_cellular_status
*cell_status
= &link_status
->u
.cellular
;
2167 struct if_cellular_status_v1
*if_cell_sr
=
2168 &ifsr
->ifsr_u
.ifsr_cell
.if_cell_u
.if_status_v1
;
2170 if (ifsr
->ifsr_version
!= IF_CELLULAR_STATUS_REPORT_VERSION_1
) {
2174 link_status
->link_status_type
= NSTAT_IFNET_DESC_LINK_STATUS_TYPE_CELLULAR
;
2176 if (if_cell_sr
->valid_bitmask
& IF_CELL_LINK_QUALITY_METRIC_VALID
) {
2177 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_LINK_QUALITY_METRIC_VALID
;
2178 cell_status
->link_quality_metric
= if_cell_sr
->link_quality_metric
;
2180 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_EFFECTIVE_BANDWIDTH_VALID
) {
2181 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_BANDWIDTH_VALID
;
2182 cell_status
->ul_effective_bandwidth
= if_cell_sr
->ul_effective_bandwidth
;
2184 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MAX_BANDWIDTH_VALID
) {
2185 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_MAX_BANDWIDTH_VALID
;
2186 cell_status
->ul_max_bandwidth
= if_cell_sr
->ul_max_bandwidth
;
2188 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MIN_LATENCY_VALID
) {
2189 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_MIN_LATENCY_VALID
;
2190 cell_status
->ul_min_latency
= if_cell_sr
->ul_min_latency
;
2192 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_EFFECTIVE_LATENCY_VALID
) {
2193 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_LATENCY_VALID
;
2194 cell_status
->ul_effective_latency
= if_cell_sr
->ul_effective_latency
;
2196 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MAX_LATENCY_VALID
) {
2197 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_MAX_LATENCY_VALID
;
2198 cell_status
->ul_max_latency
= if_cell_sr
->ul_max_latency
;
2200 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_RETXT_LEVEL_VALID
) {
2201 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID
;
2202 if (if_cell_sr
->ul_retxt_level
== IF_CELL_UL_RETXT_LEVEL_NONE
) {
2203 cell_status
->ul_retxt_level
= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_NONE
;
2204 } else if (if_cell_sr
->ul_retxt_level
== IF_CELL_UL_RETXT_LEVEL_LOW
) {
2205 cell_status
->ul_retxt_level
= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_LOW
;
2206 } else if (if_cell_sr
->ul_retxt_level
== IF_CELL_UL_RETXT_LEVEL_MEDIUM
) {
2207 cell_status
->ul_retxt_level
= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_MEDIUM
;
2208 } else if (if_cell_sr
->ul_retxt_level
== IF_CELL_UL_RETXT_LEVEL_HIGH
) {
2209 cell_status
->ul_retxt_level
= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_HIGH
;
2211 cell_status
->valid_bitmask
&= ~NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID
;
2214 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_BYTES_LOST_VALID
) {
2215 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_BYTES_LOST_VALID
;
2216 cell_status
->ul_bytes_lost
= if_cell_sr
->ul_bytes_lost
;
2218 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MIN_QUEUE_SIZE_VALID
) {
2219 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_MIN_QUEUE_SIZE_VALID
;
2220 cell_status
->ul_min_queue_size
= if_cell_sr
->ul_min_queue_size
;
2222 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_AVG_QUEUE_SIZE_VALID
) {
2223 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_AVG_QUEUE_SIZE_VALID
;
2224 cell_status
->ul_avg_queue_size
= if_cell_sr
->ul_avg_queue_size
;
2226 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MAX_QUEUE_SIZE_VALID
) {
2227 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_MAX_QUEUE_SIZE_VALID
;
2228 cell_status
->ul_max_queue_size
= if_cell_sr
->ul_max_queue_size
;
2230 if (if_cell_sr
->valid_bitmask
& IF_CELL_DL_EFFECTIVE_BANDWIDTH_VALID
) {
2231 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_DL_EFFECTIVE_BANDWIDTH_VALID
;
2232 cell_status
->dl_effective_bandwidth
= if_cell_sr
->dl_effective_bandwidth
;
2234 if (if_cell_sr
->valid_bitmask
& IF_CELL_DL_MAX_BANDWIDTH_VALID
) {
2235 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_DL_MAX_BANDWIDTH_VALID
;
2236 cell_status
->dl_max_bandwidth
= if_cell_sr
->dl_max_bandwidth
;
2238 if (if_cell_sr
->valid_bitmask
& IF_CELL_CONFIG_INACTIVITY_TIME_VALID
) {
2239 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_CONFIG_INACTIVITY_TIME_VALID
;
2240 cell_status
->config_inactivity_time
= if_cell_sr
->config_inactivity_time
;
2242 if (if_cell_sr
->valid_bitmask
& IF_CELL_CONFIG_BACKOFF_TIME_VALID
) {
2243 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_CONFIG_BACKOFF_TIME_VALID
;
2244 cell_status
->config_backoff_time
= if_cell_sr
->config_backoff_time
;
2246 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MSS_RECOMMENDED_VALID
) {
2247 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_MSS_RECOMMENDED_VALID
;
2248 cell_status
->mss_recommended
= if_cell_sr
->mss_recommended
;
2250 } else if (IFNET_IS_WIFI(ifp
)) {
2251 nstat_ifnet_desc_wifi_status
*wifi_status
= &link_status
->u
.wifi
;
2252 struct if_wifi_status_v1
*if_wifi_sr
=
2253 &ifsr
->ifsr_u
.ifsr_wifi
.if_wifi_u
.if_status_v1
;
2255 if (ifsr
->ifsr_version
!= IF_WIFI_STATUS_REPORT_VERSION_1
) {
2259 link_status
->link_status_type
= NSTAT_IFNET_DESC_LINK_STATUS_TYPE_WIFI
;
2261 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_LINK_QUALITY_METRIC_VALID
) {
2262 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_LINK_QUALITY_METRIC_VALID
;
2263 wifi_status
->link_quality_metric
= if_wifi_sr
->link_quality_metric
;
2265 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID
) {
2266 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID
;
2267 wifi_status
->ul_effective_bandwidth
= if_wifi_sr
->ul_effective_bandwidth
;
2269 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_MAX_BANDWIDTH_VALID
) {
2270 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_MAX_BANDWIDTH_VALID
;
2271 wifi_status
->ul_max_bandwidth
= if_wifi_sr
->ul_max_bandwidth
;
2273 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_MIN_LATENCY_VALID
) {
2274 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_MIN_LATENCY_VALID
;
2275 wifi_status
->ul_min_latency
= if_wifi_sr
->ul_min_latency
;
2277 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_EFFECTIVE_LATENCY_VALID
) {
2278 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_LATENCY_VALID
;
2279 wifi_status
->ul_effective_latency
= if_wifi_sr
->ul_effective_latency
;
2281 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_MAX_LATENCY_VALID
) {
2282 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_MAX_LATENCY_VALID
;
2283 wifi_status
->ul_max_latency
= if_wifi_sr
->ul_max_latency
;
2285 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_RETXT_LEVEL_VALID
) {
2286 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID
;
2287 if (if_wifi_sr
->ul_retxt_level
== IF_WIFI_UL_RETXT_LEVEL_NONE
) {
2288 wifi_status
->ul_retxt_level
= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_NONE
;
2289 } else if (if_wifi_sr
->ul_retxt_level
== IF_WIFI_UL_RETXT_LEVEL_LOW
) {
2290 wifi_status
->ul_retxt_level
= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_LOW
;
2291 } else if (if_wifi_sr
->ul_retxt_level
== IF_WIFI_UL_RETXT_LEVEL_MEDIUM
) {
2292 wifi_status
->ul_retxt_level
= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_MEDIUM
;
2293 } else if (if_wifi_sr
->ul_retxt_level
== IF_WIFI_UL_RETXT_LEVEL_HIGH
) {
2294 wifi_status
->ul_retxt_level
= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_HIGH
;
2296 wifi_status
->valid_bitmask
&= ~NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID
;
2299 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_BYTES_LOST_VALID
) {
2300 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_BYTES_LOST_VALID
;
2301 wifi_status
->ul_bytes_lost
= if_wifi_sr
->ul_bytes_lost
;
2303 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_ERROR_RATE_VALID
) {
2304 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_ERROR_RATE_VALID
;
2305 wifi_status
->ul_error_rate
= if_wifi_sr
->ul_error_rate
;
2307 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID
) {
2308 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID
;
2309 wifi_status
->dl_effective_bandwidth
= if_wifi_sr
->dl_effective_bandwidth
;
2311 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_MAX_BANDWIDTH_VALID
) {
2312 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_MAX_BANDWIDTH_VALID
;
2313 wifi_status
->dl_max_bandwidth
= if_wifi_sr
->dl_max_bandwidth
;
2315 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_MIN_LATENCY_VALID
) {
2316 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_MIN_LATENCY_VALID
;
2317 wifi_status
->dl_min_latency
= if_wifi_sr
->dl_min_latency
;
2319 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_EFFECTIVE_LATENCY_VALID
) {
2320 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_LATENCY_VALID
;
2321 wifi_status
->dl_effective_latency
= if_wifi_sr
->dl_effective_latency
;
2323 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_MAX_LATENCY_VALID
) {
2324 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_MAX_LATENCY_VALID
;
2325 wifi_status
->dl_max_latency
= if_wifi_sr
->dl_max_latency
;
2327 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_ERROR_RATE_VALID
) {
2328 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_ERROR_RATE_VALID
;
2329 wifi_status
->dl_error_rate
= if_wifi_sr
->dl_error_rate
;
2331 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_CONFIG_FREQUENCY_VALID
) {
2332 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID
;
2333 if (if_wifi_sr
->config_frequency
== IF_WIFI_CONFIG_FREQUENCY_2_4_GHZ
) {
2334 wifi_status
->config_frequency
= NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_2_4_GHZ
;
2335 } else if (if_wifi_sr
->config_frequency
== IF_WIFI_CONFIG_FREQUENCY_5_0_GHZ
) {
2336 wifi_status
->config_frequency
= NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_5_0_GHZ
;
2338 wifi_status
->valid_bitmask
&= ~NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID
;
2341 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_CONFIG_MULTICAST_RATE_VALID
) {
2342 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_CONFIG_MULTICAST_RATE_VALID
;
2343 wifi_status
->config_multicast_rate
= if_wifi_sr
->config_multicast_rate
;
2345 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_CONFIG_SCAN_COUNT_VALID
) {
2346 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_COUNT_VALID
;
2347 wifi_status
->scan_count
= if_wifi_sr
->scan_count
;
2349 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_CONFIG_SCAN_DURATION_VALID
) {
2350 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_DURATION_VALID
;
2351 wifi_status
->scan_duration
= if_wifi_sr
->scan_duration
;
2356 lck_rw_done(&ifp
->if_link_status_lock
);
2359 static u_int64_t nstat_ifnet_last_report_time
= 0;
2360 extern int tcp_report_stats_interval
;
2363 nstat_ifnet_compute_percentages(struct if_tcp_ecn_perf_stat
*ifst
)
2365 /* Retransmit percentage */
2366 if (ifst
->total_rxmitpkts
> 0 && ifst
->total_txpkts
> 0) {
2367 /* shift by 10 for precision */
2368 ifst
->rxmit_percent
=
2369 ((ifst
->total_rxmitpkts
<< 10) * 100) / ifst
->total_txpkts
;
2371 ifst
->rxmit_percent
= 0;
2374 /* Out-of-order percentage */
2375 if (ifst
->total_oopkts
> 0 && ifst
->total_rxpkts
> 0) {
2376 /* shift by 10 for precision */
2378 ((ifst
->total_oopkts
<< 10) * 100) / ifst
->total_rxpkts
;
2380 ifst
->oo_percent
= 0;
2383 /* Reorder percentage */
2384 if (ifst
->total_reorderpkts
> 0 &&
2385 (ifst
->total_txpkts
+ ifst
->total_rxpkts
) > 0) {
2386 /* shift by 10 for precision */
2387 ifst
->reorder_percent
=
2388 ((ifst
->total_reorderpkts
<< 10) * 100) /
2389 (ifst
->total_txpkts
+ ifst
->total_rxpkts
);
2391 ifst
->reorder_percent
= 0;
2396 nstat_ifnet_normalize_counter(struct if_tcp_ecn_stat
*if_st
)
2398 u_int64_t ecn_on_conn
, ecn_off_conn
;
2400 if (if_st
== NULL
) {
2403 ecn_on_conn
= if_st
->ecn_client_success
+
2404 if_st
->ecn_server_success
;
2405 ecn_off_conn
= if_st
->ecn_off_conn
+
2406 (if_st
->ecn_client_setup
- if_st
->ecn_client_success
) +
2407 (if_st
->ecn_server_setup
- if_st
->ecn_server_success
);
2410 * report sack episodes, rst_drop and rxmit_drop
2411 * as a ratio per connection, shift by 10 for precision
2413 if (ecn_on_conn
> 0) {
2414 if_st
->ecn_on
.sack_episodes
=
2415 (if_st
->ecn_on
.sack_episodes
<< 10) / ecn_on_conn
;
2416 if_st
->ecn_on
.rst_drop
=
2417 (if_st
->ecn_on
.rst_drop
<< 10) * 100 / ecn_on_conn
;
2418 if_st
->ecn_on
.rxmit_drop
=
2419 (if_st
->ecn_on
.rxmit_drop
<< 10) * 100 / ecn_on_conn
;
2421 /* set to zero, just in case */
2422 if_st
->ecn_on
.sack_episodes
= 0;
2423 if_st
->ecn_on
.rst_drop
= 0;
2424 if_st
->ecn_on
.rxmit_drop
= 0;
2427 if (ecn_off_conn
> 0) {
2428 if_st
->ecn_off
.sack_episodes
=
2429 (if_st
->ecn_off
.sack_episodes
<< 10) / ecn_off_conn
;
2430 if_st
->ecn_off
.rst_drop
=
2431 (if_st
->ecn_off
.rst_drop
<< 10) * 100 / ecn_off_conn
;
2432 if_st
->ecn_off
.rxmit_drop
=
2433 (if_st
->ecn_off
.rxmit_drop
<< 10) * 100 / ecn_off_conn
;
2435 if_st
->ecn_off
.sack_episodes
= 0;
2436 if_st
->ecn_off
.rst_drop
= 0;
2437 if_st
->ecn_off
.rxmit_drop
= 0;
2439 if_st
->ecn_total_conn
= ecn_off_conn
+ ecn_on_conn
;
2443 nstat_ifnet_report_ecn_stats(void)
2445 u_int64_t uptime
, last_report_time
;
2446 struct nstat_sysinfo_data data
;
2447 struct nstat_sysinfo_ifnet_ecn_stats
*st
;
2450 uptime
= net_uptime();
2452 if ((int)(uptime
- nstat_ifnet_last_report_time
) <
2453 tcp_report_stats_interval
) {
2457 last_report_time
= nstat_ifnet_last_report_time
;
2458 nstat_ifnet_last_report_time
= uptime
;
2459 data
.flags
= NSTAT_SYSINFO_IFNET_ECN_STATS
;
2460 st
= &data
.u
.ifnet_ecn_stats
;
2462 ifnet_head_lock_shared();
2463 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
) {
2464 if (ifp
->if_ipv4_stat
== NULL
|| ifp
->if_ipv6_stat
== NULL
) {
2468 if (!IF_FULLY_ATTACHED(ifp
)) {
2472 /* Limit reporting to Wifi, Ethernet and cellular. */
2473 if (!(IFNET_IS_ETHERNET(ifp
) || IFNET_IS_CELLULAR(ifp
))) {
2477 bzero(st
, sizeof(*st
));
2478 if (IFNET_IS_CELLULAR(ifp
)) {
2479 st
->ifnet_type
= NSTAT_IFNET_ECN_TYPE_CELLULAR
;
2480 } else if (IFNET_IS_WIFI(ifp
)) {
2481 st
->ifnet_type
= NSTAT_IFNET_ECN_TYPE_WIFI
;
2483 st
->ifnet_type
= NSTAT_IFNET_ECN_TYPE_ETHERNET
;
2485 data
.unsent_data_cnt
= ifp
->if_unsent_data_cnt
;
2486 /* skip if there was no update since last report */
2487 if (ifp
->if_ipv4_stat
->timestamp
<= 0 ||
2488 ifp
->if_ipv4_stat
->timestamp
< last_report_time
) {
2491 st
->ifnet_proto
= NSTAT_IFNET_ECN_PROTO_IPV4
;
2492 /* compute percentages using packet counts */
2493 nstat_ifnet_compute_percentages(&ifp
->if_ipv4_stat
->ecn_on
);
2494 nstat_ifnet_compute_percentages(&ifp
->if_ipv4_stat
->ecn_off
);
2495 nstat_ifnet_normalize_counter(ifp
->if_ipv4_stat
);
2496 bcopy(ifp
->if_ipv4_stat
, &st
->ecn_stat
,
2497 sizeof(st
->ecn_stat
));
2498 nstat_sysinfo_send_data(&data
);
2499 bzero(ifp
->if_ipv4_stat
, sizeof(*ifp
->if_ipv4_stat
));
2502 /* skip if there was no update since last report */
2503 if (ifp
->if_ipv6_stat
->timestamp
<= 0 ||
2504 ifp
->if_ipv6_stat
->timestamp
< last_report_time
) {
2507 st
->ifnet_proto
= NSTAT_IFNET_ECN_PROTO_IPV6
;
2509 /* compute percentages using packet counts */
2510 nstat_ifnet_compute_percentages(&ifp
->if_ipv6_stat
->ecn_on
);
2511 nstat_ifnet_compute_percentages(&ifp
->if_ipv6_stat
->ecn_off
);
2512 nstat_ifnet_normalize_counter(ifp
->if_ipv6_stat
);
2513 bcopy(ifp
->if_ipv6_stat
, &st
->ecn_stat
,
2514 sizeof(st
->ecn_stat
));
2515 nstat_sysinfo_send_data(&data
);
2517 /* Zero the stats in ifp */
2518 bzero(ifp
->if_ipv6_stat
, sizeof(*ifp
->if_ipv6_stat
));
2523 /* Some thresholds to determine Low Iternet mode */
2524 #define NSTAT_LIM_DL_MAX_BANDWIDTH_THRESHOLD 1000000 /* 1 Mbps */
2525 #define NSTAT_LIM_UL_MAX_BANDWIDTH_THRESHOLD 500000 /* 500 Kbps */
2526 #define NSTAT_LIM_UL_MIN_RTT_THRESHOLD 1000 /* 1 second */
2527 #define NSTAT_LIM_CONN_TIMEOUT_PERCENT_THRESHOLD (10 << 10) /* 10 percent connection timeouts */
2528 #define NSTAT_LIM_PACKET_LOSS_PERCENT_THRESHOLD (2 << 10) /* 2 percent packet loss rate */
2531 nstat_lim_activity_check(struct if_lim_perf_stat
*st
)
2533 /* check that the current activity is enough to report stats */
2534 if (st
->lim_total_txpkts
< nstat_lim_min_tx_pkts
||
2535 st
->lim_total_rxpkts
< nstat_lim_min_rx_pkts
||
2536 st
->lim_conn_attempts
== 0) {
2541 * Compute percentages if there was enough activity. Use
2542 * shift-left by 10 to preserve precision.
2544 st
->lim_packet_loss_percent
= ((st
->lim_total_retxpkts
<< 10) /
2545 st
->lim_total_txpkts
) * 100;
2547 st
->lim_packet_ooo_percent
= ((st
->lim_total_oopkts
<< 10) /
2548 st
->lim_total_rxpkts
) * 100;
2550 st
->lim_conn_timeout_percent
= ((st
->lim_conn_timeouts
<< 10) /
2551 st
->lim_conn_attempts
) * 100;
2554 * Is Low Internet detected? First order metrics are bandwidth
2555 * and RTT. If these metrics are below the minimum thresholds
2556 * defined then the network attachment can be classified as
2557 * having Low Internet capacity.
2559 * High connection timeout rate also indicates Low Internet
2562 if (st
->lim_dl_max_bandwidth
> 0 &&
2563 st
->lim_dl_max_bandwidth
<= NSTAT_LIM_DL_MAX_BANDWIDTH_THRESHOLD
) {
2564 st
->lim_dl_detected
= 1;
2567 if ((st
->lim_ul_max_bandwidth
> 0 &&
2568 st
->lim_ul_max_bandwidth
<= NSTAT_LIM_UL_MAX_BANDWIDTH_THRESHOLD
) ||
2569 st
->lim_rtt_min
>= NSTAT_LIM_UL_MIN_RTT_THRESHOLD
) {
2570 st
->lim_ul_detected
= 1;
2573 if (st
->lim_conn_attempts
> 20 &&
2574 st
->lim_conn_timeout_percent
>=
2575 NSTAT_LIM_CONN_TIMEOUT_PERCENT_THRESHOLD
) {
2576 st
->lim_ul_detected
= 1;
2579 * Second order metrics: If there was high packet loss even after
2580 * using delay based algorithms then we classify it as Low Internet
2583 if (st
->lim_bk_txpkts
>= nstat_lim_min_tx_pkts
&&
2584 st
->lim_packet_loss_percent
>=
2585 NSTAT_LIM_PACKET_LOSS_PERCENT_THRESHOLD
) {
2586 st
->lim_ul_detected
= 1;
2591 static u_int64_t nstat_lim_last_report_time
= 0;
2593 nstat_ifnet_report_lim_stats(void)
2596 struct nstat_sysinfo_data data
;
2597 struct nstat_sysinfo_lim_stats
*st
;
2601 uptime
= net_uptime();
2603 if ((u_int32_t
)(uptime
- nstat_lim_last_report_time
) <
2604 nstat_lim_interval
) {
2608 nstat_lim_last_report_time
= uptime
;
2609 data
.flags
= NSTAT_SYSINFO_LIM_STATS
;
2610 st
= &data
.u
.lim_stats
;
2611 data
.unsent_data_cnt
= 0;
2613 ifnet_head_lock_shared();
2614 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
) {
2615 if (!IF_FULLY_ATTACHED(ifp
)) {
2619 /* Limit reporting to Wifi, Ethernet and cellular */
2620 if (!(IFNET_IS_ETHERNET(ifp
) || IFNET_IS_CELLULAR(ifp
))) {
2624 if (!nstat_lim_activity_check(&ifp
->if_lim_stat
)) {
2628 bzero(st
, sizeof(*st
));
2629 st
->ifnet_siglen
= sizeof(st
->ifnet_signature
);
2630 err
= ifnet_get_netsignature(ifp
, AF_INET
,
2631 (u_int8_t
*)&st
->ifnet_siglen
, NULL
,
2632 st
->ifnet_signature
);
2634 err
= ifnet_get_netsignature(ifp
, AF_INET6
,
2635 (u_int8_t
*)&st
->ifnet_siglen
, NULL
,
2636 st
->ifnet_signature
);
2641 ifnet_lock_shared(ifp
);
2642 if (IFNET_IS_CELLULAR(ifp
)) {
2643 st
->ifnet_type
= NSTAT_IFNET_DESC_LINK_STATUS_TYPE_CELLULAR
;
2644 } else if (IFNET_IS_WIFI(ifp
)) {
2645 st
->ifnet_type
= NSTAT_IFNET_DESC_LINK_STATUS_TYPE_WIFI
;
2647 st
->ifnet_type
= NSTAT_IFNET_DESC_LINK_STATUS_TYPE_ETHERNET
;
2649 bcopy(&ifp
->if_lim_stat
, &st
->lim_stat
,
2650 sizeof(st
->lim_stat
));
2652 /* Zero the stats in ifp */
2653 bzero(&ifp
->if_lim_stat
, sizeof(ifp
->if_lim_stat
));
2654 ifnet_lock_done(ifp
);
2655 nstat_sysinfo_send_data(&data
);
2661 nstat_ifnet_copy_descriptor(
2662 nstat_provider_cookie_t cookie
,
2666 nstat_ifnet_descriptor
*desc
= (nstat_ifnet_descriptor
*)data
;
2667 struct nstat_ifnet_cookie
*ifcookie
=
2668 (struct nstat_ifnet_cookie
*)cookie
;
2669 struct ifnet
*ifp
= ifcookie
->ifp
;
2671 if (len
< sizeof(nstat_ifnet_descriptor
)) {
2675 if (nstat_ifnet_gone(cookie
)) {
2679 bzero(desc
, sizeof(*desc
));
2680 ifnet_lock_shared(ifp
);
2681 strlcpy(desc
->name
, ifp
->if_xname
, sizeof(desc
->name
));
2682 desc
->ifindex
= ifp
->if_index
;
2683 desc
->threshold
= ifp
->if_data_threshold
;
2684 desc
->type
= ifp
->if_type
;
2685 if (ifp
->if_desc
.ifd_len
< sizeof(desc
->description
)) {
2686 memcpy(desc
->description
, ifp
->if_desc
.ifd_desc
,
2687 sizeof(desc
->description
));
2689 nstat_ifnet_copy_link_status(ifp
, desc
);
2690 ifnet_lock_done(ifp
);
2695 nstat_init_ifnet_provider(void)
2697 bzero(&nstat_ifnet_provider
, sizeof(nstat_ifnet_provider
));
2698 nstat_ifnet_provider
.nstat_provider_id
= NSTAT_PROVIDER_IFNET
;
2699 nstat_ifnet_provider
.nstat_descriptor_length
= sizeof(nstat_ifnet_descriptor
);
2700 nstat_ifnet_provider
.nstat_lookup
= nstat_ifnet_lookup
;
2701 nstat_ifnet_provider
.nstat_gone
= nstat_ifnet_gone
;
2702 nstat_ifnet_provider
.nstat_counts
= nstat_ifnet_counts
;
2703 nstat_ifnet_provider
.nstat_watcher_add
= NULL
;
2704 nstat_ifnet_provider
.nstat_watcher_remove
= NULL
;
2705 nstat_ifnet_provider
.nstat_copy_descriptor
= nstat_ifnet_copy_descriptor
;
2706 nstat_ifnet_provider
.nstat_release
= nstat_ifnet_release
;
2707 nstat_ifnet_provider
.next
= nstat_providers
;
2708 nstat_providers
= &nstat_ifnet_provider
;
2711 __private_extern__
void
2712 nstat_ifnet_threshold_reached(unsigned int ifindex
)
2714 nstat_control_state
*state
;
2717 struct nstat_ifnet_cookie
*ifcookie
;
2719 lck_mtx_lock(&nstat_mtx
);
2720 for (state
= nstat_controls
; state
; state
= state
->ncs_next
) {
2721 lck_mtx_lock(&state
->ncs_mtx
);
2722 TAILQ_FOREACH(src
, &state
->ncs_src_queue
, ns_control_link
)
2724 if (src
->provider
!= &nstat_ifnet_provider
) {
2727 ifcookie
= (struct nstat_ifnet_cookie
*)src
->cookie
;
2728 ifp
= ifcookie
->ifp
;
2729 if (ifp
->if_index
!= ifindex
) {
2732 nstat_control_send_counts(state
, src
, 0, 0, NULL
);
2734 lck_mtx_unlock(&state
->ncs_mtx
);
2736 lck_mtx_unlock(&nstat_mtx
);
2739 #pragma mark -- Sysinfo --
2741 nstat_set_keyval_scalar(nstat_sysinfo_keyval
*kv
, int key
, u_int32_t val
)
2743 kv
->nstat_sysinfo_key
= key
;
2744 kv
->nstat_sysinfo_flags
= NSTAT_SYSINFO_FLAG_SCALAR
;
2745 kv
->u
.nstat_sysinfo_scalar
= val
;
2746 kv
->nstat_sysinfo_valsize
= sizeof(kv
->u
.nstat_sysinfo_scalar
);
2750 nstat_set_keyval_u64_scalar(nstat_sysinfo_keyval
*kv
, int key
, u_int64_t val
)
2752 kv
->nstat_sysinfo_key
= key
;
2753 kv
->nstat_sysinfo_flags
= NSTAT_SYSINFO_FLAG_SCALAR
;
2754 kv
->u
.nstat_sysinfo_scalar
= val
;
2755 kv
->nstat_sysinfo_valsize
= sizeof(kv
->u
.nstat_sysinfo_scalar
);
2759 nstat_set_keyval_string(nstat_sysinfo_keyval
*kv
, int key
, u_int8_t
*buf
,
2762 kv
->nstat_sysinfo_key
= key
;
2763 kv
->nstat_sysinfo_flags
= NSTAT_SYSINFO_FLAG_STRING
;
2764 kv
->nstat_sysinfo_valsize
= min(len
,
2765 NSTAT_SYSINFO_KEYVAL_STRING_MAXSIZE
);
2766 bcopy(buf
, kv
->u
.nstat_sysinfo_string
, kv
->nstat_sysinfo_valsize
);
2770 nstat_sysinfo_send_data_internal(
2771 nstat_control_state
*control
,
2772 nstat_sysinfo_data
*data
)
2774 nstat_msg_sysinfo_counts
*syscnt
= NULL
;
2775 size_t allocsize
= 0, countsize
= 0, nkeyvals
= 0, finalsize
= 0;
2776 nstat_sysinfo_keyval
*kv
;
2780 allocsize
= offsetof(nstat_msg_sysinfo_counts
, counts
);
2781 countsize
= offsetof(nstat_sysinfo_counts
, nstat_sysinfo_keyvals
);
2782 finalsize
= allocsize
;
2784 /* get number of key-vals for each kind of stat */
2785 switch (data
->flags
) {
2786 case NSTAT_SYSINFO_MBUF_STATS
:
2787 nkeyvals
= sizeof(struct nstat_sysinfo_mbuf_stats
) /
2790 case NSTAT_SYSINFO_TCP_STATS
:
2791 nkeyvals
= NSTAT_SYSINFO_TCP_STATS_COUNT
;
2793 case NSTAT_SYSINFO_IFNET_ECN_STATS
:
2794 nkeyvals
= (sizeof(struct if_tcp_ecn_stat
) /
2797 /* Two more keys for ifnet type and proto */
2800 /* One key for unsent data. */
2803 case NSTAT_SYSINFO_LIM_STATS
:
2804 nkeyvals
= NSTAT_LIM_STAT_KEYVAL_COUNT
;
2806 case NSTAT_SYSINFO_NET_API_STATS
:
2807 nkeyvals
= NSTAT_NET_API_STAT_KEYVAL_COUNT
;
2812 countsize
+= sizeof(nstat_sysinfo_keyval
) * nkeyvals
;
2813 allocsize
+= countsize
;
2815 syscnt
= kheap_alloc(KHEAP_TEMP
, allocsize
, Z_WAITOK
| Z_ZERO
);
2816 if (syscnt
== NULL
) {
2820 kv
= (nstat_sysinfo_keyval
*) &syscnt
->counts
.nstat_sysinfo_keyvals
;
2821 switch (data
->flags
) {
2822 case NSTAT_SYSINFO_MBUF_STATS
:
2824 nstat_set_keyval_scalar(&kv
[i
++],
2825 NSTAT_SYSINFO_KEY_MBUF_256B_TOTAL
,
2826 data
->u
.mb_stats
.total_256b
);
2827 nstat_set_keyval_scalar(&kv
[i
++],
2828 NSTAT_SYSINFO_KEY_MBUF_2KB_TOTAL
,
2829 data
->u
.mb_stats
.total_2kb
);
2830 nstat_set_keyval_scalar(&kv
[i
++],
2831 NSTAT_SYSINFO_KEY_MBUF_4KB_TOTAL
,
2832 data
->u
.mb_stats
.total_4kb
);
2833 nstat_set_keyval_scalar(&kv
[i
++],
2834 NSTAT_SYSINFO_MBUF_16KB_TOTAL
,
2835 data
->u
.mb_stats
.total_16kb
);
2836 nstat_set_keyval_scalar(&kv
[i
++],
2837 NSTAT_SYSINFO_KEY_SOCK_MBCNT
,
2838 data
->u
.mb_stats
.sbmb_total
);
2839 nstat_set_keyval_scalar(&kv
[i
++],
2840 NSTAT_SYSINFO_KEY_SOCK_ATMBLIMIT
,
2841 data
->u
.mb_stats
.sb_atmbuflimit
);
2842 nstat_set_keyval_scalar(&kv
[i
++],
2843 NSTAT_SYSINFO_MBUF_DRAIN_CNT
,
2844 data
->u
.mb_stats
.draincnt
);
2845 nstat_set_keyval_scalar(&kv
[i
++],
2846 NSTAT_SYSINFO_MBUF_MEM_RELEASED
,
2847 data
->u
.mb_stats
.memreleased
);
2848 nstat_set_keyval_scalar(&kv
[i
++],
2849 NSTAT_SYSINFO_KEY_SOCK_MBFLOOR
,
2850 data
->u
.mb_stats
.sbmb_floor
);
2851 VERIFY(i
== nkeyvals
);
2854 case NSTAT_SYSINFO_TCP_STATS
:
2856 nstat_set_keyval_scalar(&kv
[i
++],
2857 NSTAT_SYSINFO_KEY_IPV4_AVGRTT
,
2858 data
->u
.tcp_stats
.ipv4_avgrtt
);
2859 nstat_set_keyval_scalar(&kv
[i
++],
2860 NSTAT_SYSINFO_KEY_IPV6_AVGRTT
,
2861 data
->u
.tcp_stats
.ipv6_avgrtt
);
2862 nstat_set_keyval_scalar(&kv
[i
++],
2863 NSTAT_SYSINFO_KEY_SEND_PLR
,
2864 data
->u
.tcp_stats
.send_plr
);
2865 nstat_set_keyval_scalar(&kv
[i
++],
2866 NSTAT_SYSINFO_KEY_RECV_PLR
,
2867 data
->u
.tcp_stats
.recv_plr
);
2868 nstat_set_keyval_scalar(&kv
[i
++],
2869 NSTAT_SYSINFO_KEY_SEND_TLRTO
,
2870 data
->u
.tcp_stats
.send_tlrto_rate
);
2871 nstat_set_keyval_scalar(&kv
[i
++],
2872 NSTAT_SYSINFO_KEY_SEND_REORDERRATE
,
2873 data
->u
.tcp_stats
.send_reorder_rate
);
2874 nstat_set_keyval_scalar(&kv
[i
++],
2875 NSTAT_SYSINFO_CONNECTION_ATTEMPTS
,
2876 data
->u
.tcp_stats
.connection_attempts
);
2877 nstat_set_keyval_scalar(&kv
[i
++],
2878 NSTAT_SYSINFO_CONNECTION_ACCEPTS
,
2879 data
->u
.tcp_stats
.connection_accepts
);
2880 nstat_set_keyval_scalar(&kv
[i
++],
2881 NSTAT_SYSINFO_ECN_CLIENT_ENABLED
,
2882 data
->u
.tcp_stats
.ecn_client_enabled
);
2883 nstat_set_keyval_scalar(&kv
[i
++],
2884 NSTAT_SYSINFO_ECN_SERVER_ENABLED
,
2885 data
->u
.tcp_stats
.ecn_server_enabled
);
2886 nstat_set_keyval_scalar(&kv
[i
++],
2887 NSTAT_SYSINFO_ECN_CLIENT_SETUP
,
2888 data
->u
.tcp_stats
.ecn_client_setup
);
2889 nstat_set_keyval_scalar(&kv
[i
++],
2890 NSTAT_SYSINFO_ECN_SERVER_SETUP
,
2891 data
->u
.tcp_stats
.ecn_server_setup
);
2892 nstat_set_keyval_scalar(&kv
[i
++],
2893 NSTAT_SYSINFO_ECN_CLIENT_SUCCESS
,
2894 data
->u
.tcp_stats
.ecn_client_success
);
2895 nstat_set_keyval_scalar(&kv
[i
++],
2896 NSTAT_SYSINFO_ECN_SERVER_SUCCESS
,
2897 data
->u
.tcp_stats
.ecn_server_success
);
2898 nstat_set_keyval_scalar(&kv
[i
++],
2899 NSTAT_SYSINFO_ECN_NOT_SUPPORTED
,
2900 data
->u
.tcp_stats
.ecn_not_supported
);
2901 nstat_set_keyval_scalar(&kv
[i
++],
2902 NSTAT_SYSINFO_ECN_LOST_SYN
,
2903 data
->u
.tcp_stats
.ecn_lost_syn
);
2904 nstat_set_keyval_scalar(&kv
[i
++],
2905 NSTAT_SYSINFO_ECN_LOST_SYNACK
,
2906 data
->u
.tcp_stats
.ecn_lost_synack
);
2907 nstat_set_keyval_scalar(&kv
[i
++],
2908 NSTAT_SYSINFO_ECN_RECV_CE
,
2909 data
->u
.tcp_stats
.ecn_recv_ce
);
2910 nstat_set_keyval_scalar(&kv
[i
++],
2911 NSTAT_SYSINFO_ECN_RECV_ECE
,
2912 data
->u
.tcp_stats
.ecn_recv_ece
);
2913 nstat_set_keyval_scalar(&kv
[i
++],
2914 NSTAT_SYSINFO_ECN_SENT_ECE
,
2915 data
->u
.tcp_stats
.ecn_sent_ece
);
2916 nstat_set_keyval_scalar(&kv
[i
++],
2917 NSTAT_SYSINFO_ECN_CONN_RECV_CE
,
2918 data
->u
.tcp_stats
.ecn_conn_recv_ce
);
2919 nstat_set_keyval_scalar(&kv
[i
++],
2920 NSTAT_SYSINFO_ECN_CONN_RECV_ECE
,
2921 data
->u
.tcp_stats
.ecn_conn_recv_ece
);
2922 nstat_set_keyval_scalar(&kv
[i
++],
2923 NSTAT_SYSINFO_ECN_CONN_PLNOCE
,
2924 data
->u
.tcp_stats
.ecn_conn_plnoce
);
2925 nstat_set_keyval_scalar(&kv
[i
++],
2926 NSTAT_SYSINFO_ECN_CONN_PL_CE
,
2927 data
->u
.tcp_stats
.ecn_conn_pl_ce
);
2928 nstat_set_keyval_scalar(&kv
[i
++],
2929 NSTAT_SYSINFO_ECN_CONN_NOPL_CE
,
2930 data
->u
.tcp_stats
.ecn_conn_nopl_ce
);
2931 nstat_set_keyval_scalar(&kv
[i
++],
2932 NSTAT_SYSINFO_ECN_FALLBACK_SYNLOSS
,
2933 data
->u
.tcp_stats
.ecn_fallback_synloss
);
2934 nstat_set_keyval_scalar(&kv
[i
++],
2935 NSTAT_SYSINFO_ECN_FALLBACK_REORDER
,
2936 data
->u
.tcp_stats
.ecn_fallback_reorder
);
2937 nstat_set_keyval_scalar(&kv
[i
++],
2938 NSTAT_SYSINFO_ECN_FALLBACK_CE
,
2939 data
->u
.tcp_stats
.ecn_fallback_ce
);
2940 nstat_set_keyval_scalar(&kv
[i
++],
2941 NSTAT_SYSINFO_TFO_SYN_DATA_RCV
,
2942 data
->u
.tcp_stats
.tfo_syn_data_rcv
);
2943 nstat_set_keyval_scalar(&kv
[i
++],
2944 NSTAT_SYSINFO_TFO_COOKIE_REQ_RCV
,
2945 data
->u
.tcp_stats
.tfo_cookie_req_rcv
);
2946 nstat_set_keyval_scalar(&kv
[i
++],
2947 NSTAT_SYSINFO_TFO_COOKIE_SENT
,
2948 data
->u
.tcp_stats
.tfo_cookie_sent
);
2949 nstat_set_keyval_scalar(&kv
[i
++],
2950 NSTAT_SYSINFO_TFO_COOKIE_INVALID
,
2951 data
->u
.tcp_stats
.tfo_cookie_invalid
);
2952 nstat_set_keyval_scalar(&kv
[i
++],
2953 NSTAT_SYSINFO_TFO_COOKIE_REQ
,
2954 data
->u
.tcp_stats
.tfo_cookie_req
);
2955 nstat_set_keyval_scalar(&kv
[i
++],
2956 NSTAT_SYSINFO_TFO_COOKIE_RCV
,
2957 data
->u
.tcp_stats
.tfo_cookie_rcv
);
2958 nstat_set_keyval_scalar(&kv
[i
++],
2959 NSTAT_SYSINFO_TFO_SYN_DATA_SENT
,
2960 data
->u
.tcp_stats
.tfo_syn_data_sent
);
2961 nstat_set_keyval_scalar(&kv
[i
++],
2962 NSTAT_SYSINFO_TFO_SYN_DATA_ACKED
,
2963 data
->u
.tcp_stats
.tfo_syn_data_acked
);
2964 nstat_set_keyval_scalar(&kv
[i
++],
2965 NSTAT_SYSINFO_TFO_SYN_LOSS
,
2966 data
->u
.tcp_stats
.tfo_syn_loss
);
2967 nstat_set_keyval_scalar(&kv
[i
++],
2968 NSTAT_SYSINFO_TFO_BLACKHOLE
,
2969 data
->u
.tcp_stats
.tfo_blackhole
);
2970 nstat_set_keyval_scalar(&kv
[i
++],
2971 NSTAT_SYSINFO_TFO_COOKIE_WRONG
,
2972 data
->u
.tcp_stats
.tfo_cookie_wrong
);
2973 nstat_set_keyval_scalar(&kv
[i
++],
2974 NSTAT_SYSINFO_TFO_NO_COOKIE_RCV
,
2975 data
->u
.tcp_stats
.tfo_no_cookie_rcv
);
2976 nstat_set_keyval_scalar(&kv
[i
++],
2977 NSTAT_SYSINFO_TFO_HEURISTICS_DISABLE
,
2978 data
->u
.tcp_stats
.tfo_heuristics_disable
);
2979 nstat_set_keyval_scalar(&kv
[i
++],
2980 NSTAT_SYSINFO_TFO_SEND_BLACKHOLE
,
2981 data
->u
.tcp_stats
.tfo_sndblackhole
);
2982 nstat_set_keyval_scalar(&kv
[i
++],
2983 NSTAT_SYSINFO_MPTCP_HANDOVER_ATTEMPT
,
2984 data
->u
.tcp_stats
.mptcp_handover_attempt
);
2985 nstat_set_keyval_scalar(&kv
[i
++],
2986 NSTAT_SYSINFO_MPTCP_INTERACTIVE_ATTEMPT
,
2987 data
->u
.tcp_stats
.mptcp_interactive_attempt
);
2988 nstat_set_keyval_scalar(&kv
[i
++],
2989 NSTAT_SYSINFO_MPTCP_AGGREGATE_ATTEMPT
,
2990 data
->u
.tcp_stats
.mptcp_aggregate_attempt
);
2991 nstat_set_keyval_scalar(&kv
[i
++],
2992 NSTAT_SYSINFO_MPTCP_FP_HANDOVER_ATTEMPT
,
2993 data
->u
.tcp_stats
.mptcp_fp_handover_attempt
);
2994 nstat_set_keyval_scalar(&kv
[i
++],
2995 NSTAT_SYSINFO_MPTCP_FP_INTERACTIVE_ATTEMPT
,
2996 data
->u
.tcp_stats
.mptcp_fp_interactive_attempt
);
2997 nstat_set_keyval_scalar(&kv
[i
++],
2998 NSTAT_SYSINFO_MPTCP_FP_AGGREGATE_ATTEMPT
,
2999 data
->u
.tcp_stats
.mptcp_fp_aggregate_attempt
);
3000 nstat_set_keyval_scalar(&kv
[i
++],
3001 NSTAT_SYSINFO_MPTCP_HEURISTIC_FALLBACK
,
3002 data
->u
.tcp_stats
.mptcp_heuristic_fallback
);
3003 nstat_set_keyval_scalar(&kv
[i
++],
3004 NSTAT_SYSINFO_MPTCP_FP_HEURISTIC_FALLBACK
,
3005 data
->u
.tcp_stats
.mptcp_fp_heuristic_fallback
);
3006 nstat_set_keyval_scalar(&kv
[i
++],
3007 NSTAT_SYSINFO_MPTCP_HANDOVER_SUCCESS_WIFI
,
3008 data
->u
.tcp_stats
.mptcp_handover_success_wifi
);
3009 nstat_set_keyval_scalar(&kv
[i
++],
3010 NSTAT_SYSINFO_MPTCP_HANDOVER_SUCCESS_CELL
,
3011 data
->u
.tcp_stats
.mptcp_handover_success_cell
);
3012 nstat_set_keyval_scalar(&kv
[i
++],
3013 NSTAT_SYSINFO_MPTCP_INTERACTIVE_SUCCESS
,
3014 data
->u
.tcp_stats
.mptcp_interactive_success
);
3015 nstat_set_keyval_scalar(&kv
[i
++],
3016 NSTAT_SYSINFO_MPTCP_AGGREGATE_SUCCESS
,
3017 data
->u
.tcp_stats
.mptcp_aggregate_success
);
3018 nstat_set_keyval_scalar(&kv
[i
++],
3019 NSTAT_SYSINFO_MPTCP_FP_HANDOVER_SUCCESS_WIFI
,
3020 data
->u
.tcp_stats
.mptcp_fp_handover_success_wifi
);
3021 nstat_set_keyval_scalar(&kv
[i
++],
3022 NSTAT_SYSINFO_MPTCP_FP_HANDOVER_SUCCESS_CELL
,
3023 data
->u
.tcp_stats
.mptcp_fp_handover_success_cell
);
3024 nstat_set_keyval_scalar(&kv
[i
++],
3025 NSTAT_SYSINFO_MPTCP_FP_INTERACTIVE_SUCCESS
,
3026 data
->u
.tcp_stats
.mptcp_fp_interactive_success
);
3027 nstat_set_keyval_scalar(&kv
[i
++],
3028 NSTAT_SYSINFO_MPTCP_FP_AGGREGATE_SUCCESS
,
3029 data
->u
.tcp_stats
.mptcp_fp_aggregate_success
);
3030 nstat_set_keyval_scalar(&kv
[i
++],
3031 NSTAT_SYSINFO_MPTCP_HANDOVER_CELL_FROM_WIFI
,
3032 data
->u
.tcp_stats
.mptcp_handover_cell_from_wifi
);
3033 nstat_set_keyval_scalar(&kv
[i
++],
3034 NSTAT_SYSINFO_MPTCP_HANDOVER_WIFI_FROM_CELL
,
3035 data
->u
.tcp_stats
.mptcp_handover_wifi_from_cell
);
3036 nstat_set_keyval_scalar(&kv
[i
++],
3037 NSTAT_SYSINFO_MPTCP_INTERACTIVE_CELL_FROM_WIFI
,
3038 data
->u
.tcp_stats
.mptcp_interactive_cell_from_wifi
);
3039 nstat_set_keyval_u64_scalar(&kv
[i
++],
3040 NSTAT_SYSINFO_MPTCP_HANDOVER_CELL_BYTES
,
3041 data
->u
.tcp_stats
.mptcp_handover_cell_bytes
);
3042 nstat_set_keyval_u64_scalar(&kv
[i
++],
3043 NSTAT_SYSINFO_MPTCP_INTERACTIVE_CELL_BYTES
,
3044 data
->u
.tcp_stats
.mptcp_interactive_cell_bytes
);
3045 nstat_set_keyval_u64_scalar(&kv
[i
++],
3046 NSTAT_SYSINFO_MPTCP_AGGREGATE_CELL_BYTES
,
3047 data
->u
.tcp_stats
.mptcp_aggregate_cell_bytes
);
3048 nstat_set_keyval_u64_scalar(&kv
[i
++],
3049 NSTAT_SYSINFO_MPTCP_HANDOVER_ALL_BYTES
,
3050 data
->u
.tcp_stats
.mptcp_handover_all_bytes
);
3051 nstat_set_keyval_u64_scalar(&kv
[i
++],
3052 NSTAT_SYSINFO_MPTCP_INTERACTIVE_ALL_BYTES
,
3053 data
->u
.tcp_stats
.mptcp_interactive_all_bytes
);
3054 nstat_set_keyval_u64_scalar(&kv
[i
++],
3055 NSTAT_SYSINFO_MPTCP_AGGREGATE_ALL_BYTES
,
3056 data
->u
.tcp_stats
.mptcp_aggregate_all_bytes
);
3057 nstat_set_keyval_scalar(&kv
[i
++],
3058 NSTAT_SYSINFO_MPTCP_BACK_TO_WIFI
,
3059 data
->u
.tcp_stats
.mptcp_back_to_wifi
);
3060 nstat_set_keyval_scalar(&kv
[i
++],
3061 NSTAT_SYSINFO_MPTCP_WIFI_PROXY
,
3062 data
->u
.tcp_stats
.mptcp_wifi_proxy
);
3063 nstat_set_keyval_scalar(&kv
[i
++],
3064 NSTAT_SYSINFO_MPTCP_CELL_PROXY
,
3065 data
->u
.tcp_stats
.mptcp_cell_proxy
);
3066 nstat_set_keyval_scalar(&kv
[i
++],
3067 NSTAT_SYSINFO_MPTCP_TRIGGERED_CELL
,
3068 data
->u
.tcp_stats
.mptcp_triggered_cell
);
3069 VERIFY(i
== nkeyvals
);
3072 case NSTAT_SYSINFO_IFNET_ECN_STATS
:
3074 nstat_set_keyval_scalar(&kv
[i
++],
3075 NSTAT_SYSINFO_ECN_IFNET_TYPE
,
3076 data
->u
.ifnet_ecn_stats
.ifnet_type
);
3077 nstat_set_keyval_scalar(&kv
[i
++],
3078 NSTAT_SYSINFO_ECN_IFNET_PROTO
,
3079 data
->u
.ifnet_ecn_stats
.ifnet_proto
);
3080 nstat_set_keyval_u64_scalar(&kv
[i
++],
3081 NSTAT_SYSINFO_ECN_IFNET_CLIENT_SETUP
,
3082 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_client_setup
);
3083 nstat_set_keyval_u64_scalar(&kv
[i
++],
3084 NSTAT_SYSINFO_ECN_IFNET_SERVER_SETUP
,
3085 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_server_setup
);
3086 nstat_set_keyval_u64_scalar(&kv
[i
++],
3087 NSTAT_SYSINFO_ECN_IFNET_CLIENT_SUCCESS
,
3088 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_client_success
);
3089 nstat_set_keyval_u64_scalar(&kv
[i
++],
3090 NSTAT_SYSINFO_ECN_IFNET_SERVER_SUCCESS
,
3091 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_server_success
);
3092 nstat_set_keyval_u64_scalar(&kv
[i
++],
3093 NSTAT_SYSINFO_ECN_IFNET_PEER_NOSUPPORT
,
3094 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_peer_nosupport
);
3095 nstat_set_keyval_u64_scalar(&kv
[i
++],
3096 NSTAT_SYSINFO_ECN_IFNET_SYN_LOST
,
3097 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_syn_lost
);
3098 nstat_set_keyval_u64_scalar(&kv
[i
++],
3099 NSTAT_SYSINFO_ECN_IFNET_SYNACK_LOST
,
3100 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_synack_lost
);
3101 nstat_set_keyval_u64_scalar(&kv
[i
++],
3102 NSTAT_SYSINFO_ECN_IFNET_RECV_CE
,
3103 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_recv_ce
);
3104 nstat_set_keyval_u64_scalar(&kv
[i
++],
3105 NSTAT_SYSINFO_ECN_IFNET_RECV_ECE
,
3106 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_recv_ece
);
3107 nstat_set_keyval_u64_scalar(&kv
[i
++],
3108 NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_CE
,
3109 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_conn_recv_ce
);
3110 nstat_set_keyval_u64_scalar(&kv
[i
++],
3111 NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_ECE
,
3112 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_conn_recv_ece
);
3113 nstat_set_keyval_u64_scalar(&kv
[i
++],
3114 NSTAT_SYSINFO_ECN_IFNET_CONN_PLNOCE
,
3115 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_conn_plnoce
);
3116 nstat_set_keyval_u64_scalar(&kv
[i
++],
3117 NSTAT_SYSINFO_ECN_IFNET_CONN_PLCE
,
3118 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_conn_plce
);
3119 nstat_set_keyval_u64_scalar(&kv
[i
++],
3120 NSTAT_SYSINFO_ECN_IFNET_CONN_NOPLCE
,
3121 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_conn_noplce
);
3122 nstat_set_keyval_u64_scalar(&kv
[i
++],
3123 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNLOSS
,
3124 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_fallback_synloss
);
3125 nstat_set_keyval_u64_scalar(&kv
[i
++],
3126 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_REORDER
,
3127 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_fallback_reorder
);
3128 nstat_set_keyval_u64_scalar(&kv
[i
++],
3129 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_CE
,
3130 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_fallback_ce
);
3131 nstat_set_keyval_u64_scalar(&kv
[i
++],
3132 NSTAT_SYSINFO_ECN_IFNET_ON_RTT_AVG
,
3133 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.rtt_avg
);
3134 nstat_set_keyval_u64_scalar(&kv
[i
++],
3135 NSTAT_SYSINFO_ECN_IFNET_ON_RTT_VAR
,
3136 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.rtt_var
);
3137 nstat_set_keyval_u64_scalar(&kv
[i
++],
3138 NSTAT_SYSINFO_ECN_IFNET_ON_OOPERCENT
,
3139 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.oo_percent
);
3140 nstat_set_keyval_u64_scalar(&kv
[i
++],
3141 NSTAT_SYSINFO_ECN_IFNET_ON_SACK_EPISODE
,
3142 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.sack_episodes
);
3143 nstat_set_keyval_u64_scalar(&kv
[i
++],
3144 NSTAT_SYSINFO_ECN_IFNET_ON_REORDER_PERCENT
,
3145 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.reorder_percent
);
3146 nstat_set_keyval_u64_scalar(&kv
[i
++],
3147 NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_PERCENT
,
3148 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.rxmit_percent
);
3149 nstat_set_keyval_u64_scalar(&kv
[i
++],
3150 NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_DROP
,
3151 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.rxmit_drop
);
3152 nstat_set_keyval_u64_scalar(&kv
[i
++],
3153 NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_AVG
,
3154 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.rtt_avg
);
3155 nstat_set_keyval_u64_scalar(&kv
[i
++],
3156 NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_VAR
,
3157 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.rtt_var
);
3158 nstat_set_keyval_u64_scalar(&kv
[i
++],
3159 NSTAT_SYSINFO_ECN_IFNET_OFF_OOPERCENT
,
3160 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.oo_percent
);
3161 nstat_set_keyval_u64_scalar(&kv
[i
++],
3162 NSTAT_SYSINFO_ECN_IFNET_OFF_SACK_EPISODE
,
3163 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.sack_episodes
);
3164 nstat_set_keyval_u64_scalar(&kv
[i
++],
3165 NSTAT_SYSINFO_ECN_IFNET_OFF_REORDER_PERCENT
,
3166 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.reorder_percent
);
3167 nstat_set_keyval_u64_scalar(&kv
[i
++],
3168 NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_PERCENT
,
3169 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.rxmit_percent
);
3170 nstat_set_keyval_u64_scalar(&kv
[i
++],
3171 NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_DROP
,
3172 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.rxmit_drop
);
3173 nstat_set_keyval_u64_scalar(&kv
[i
++],
3174 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_TXPKTS
,
3175 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.total_txpkts
);
3176 nstat_set_keyval_u64_scalar(&kv
[i
++],
3177 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXMTPKTS
,
3178 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.total_rxmitpkts
);
3179 nstat_set_keyval_u64_scalar(&kv
[i
++],
3180 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXPKTS
,
3181 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.total_rxpkts
);
3182 nstat_set_keyval_u64_scalar(&kv
[i
++],
3183 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_OOPKTS
,
3184 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.total_oopkts
);
3185 nstat_set_keyval_u64_scalar(&kv
[i
++],
3186 NSTAT_SYSINFO_ECN_IFNET_ON_DROP_RST
,
3187 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.rst_drop
);
3188 nstat_set_keyval_u64_scalar(&kv
[i
++],
3189 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_TXPKTS
,
3190 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.total_txpkts
);
3191 nstat_set_keyval_u64_scalar(&kv
[i
++],
3192 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXMTPKTS
,
3193 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.total_rxmitpkts
);
3194 nstat_set_keyval_u64_scalar(&kv
[i
++],
3195 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXPKTS
,
3196 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.total_rxpkts
);
3197 nstat_set_keyval_u64_scalar(&kv
[i
++],
3198 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_OOPKTS
,
3199 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.total_oopkts
);
3200 nstat_set_keyval_u64_scalar(&kv
[i
++],
3201 NSTAT_SYSINFO_ECN_IFNET_OFF_DROP_RST
,
3202 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.rst_drop
);
3203 nstat_set_keyval_u64_scalar(&kv
[i
++],
3204 NSTAT_SYSINFO_ECN_IFNET_TOTAL_CONN
,
3205 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_total_conn
);
3206 nstat_set_keyval_scalar(&kv
[i
++],
3207 NSTAT_SYSINFO_IFNET_UNSENT_DATA
,
3208 data
->unsent_data_cnt
);
3209 nstat_set_keyval_u64_scalar(&kv
[i
++],
3210 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRST
,
3211 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_fallback_droprst
);
3212 nstat_set_keyval_u64_scalar(&kv
[i
++],
3213 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRXMT
,
3214 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_fallback_droprxmt
);
3215 nstat_set_keyval_u64_scalar(&kv
[i
++],
3216 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNRST
,
3217 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_fallback_synrst
);
3220 case NSTAT_SYSINFO_LIM_STATS
:
3222 nstat_set_keyval_string(&kv
[i
++],
3223 NSTAT_SYSINFO_LIM_IFNET_SIGNATURE
,
3224 data
->u
.lim_stats
.ifnet_signature
,
3225 data
->u
.lim_stats
.ifnet_siglen
);
3226 nstat_set_keyval_u64_scalar(&kv
[i
++],
3227 NSTAT_SYSINFO_LIM_IFNET_DL_MAX_BANDWIDTH
,
3228 data
->u
.lim_stats
.lim_stat
.lim_dl_max_bandwidth
);
3229 nstat_set_keyval_u64_scalar(&kv
[i
++],
3230 NSTAT_SYSINFO_LIM_IFNET_UL_MAX_BANDWIDTH
,
3231 data
->u
.lim_stats
.lim_stat
.lim_ul_max_bandwidth
);
3232 nstat_set_keyval_u64_scalar(&kv
[i
++],
3233 NSTAT_SYSINFO_LIM_IFNET_PACKET_LOSS_PERCENT
,
3234 data
->u
.lim_stats
.lim_stat
.lim_packet_loss_percent
);
3235 nstat_set_keyval_u64_scalar(&kv
[i
++],
3236 NSTAT_SYSINFO_LIM_IFNET_PACKET_OOO_PERCENT
,
3237 data
->u
.lim_stats
.lim_stat
.lim_packet_ooo_percent
);
3238 nstat_set_keyval_u64_scalar(&kv
[i
++],
3239 NSTAT_SYSINFO_LIM_IFNET_RTT_VARIANCE
,
3240 data
->u
.lim_stats
.lim_stat
.lim_rtt_variance
);
3241 nstat_set_keyval_u64_scalar(&kv
[i
++],
3242 NSTAT_SYSINFO_LIM_IFNET_RTT_MIN
,
3243 data
->u
.lim_stats
.lim_stat
.lim_rtt_min
);
3244 nstat_set_keyval_u64_scalar(&kv
[i
++],
3245 NSTAT_SYSINFO_LIM_IFNET_RTT_AVG
,
3246 data
->u
.lim_stats
.lim_stat
.lim_rtt_average
);
3247 nstat_set_keyval_u64_scalar(&kv
[i
++],
3248 NSTAT_SYSINFO_LIM_IFNET_CONN_TIMEOUT_PERCENT
,
3249 data
->u
.lim_stats
.lim_stat
.lim_conn_timeout_percent
);
3250 nstat_set_keyval_scalar(&kv
[i
++],
3251 NSTAT_SYSINFO_LIM_IFNET_DL_DETECTED
,
3252 data
->u
.lim_stats
.lim_stat
.lim_dl_detected
);
3253 nstat_set_keyval_scalar(&kv
[i
++],
3254 NSTAT_SYSINFO_LIM_IFNET_UL_DETECTED
,
3255 data
->u
.lim_stats
.lim_stat
.lim_ul_detected
);
3256 nstat_set_keyval_scalar(&kv
[i
++],
3257 NSTAT_SYSINFO_LIM_IFNET_TYPE
,
3258 data
->u
.lim_stats
.ifnet_type
);
3261 case NSTAT_SYSINFO_NET_API_STATS
:
3263 nstat_set_keyval_u64_scalar(&kv
[i
++],
3264 NSTAT_SYSINFO_API_IF_FLTR_ATTACH
,
3265 data
->u
.net_api_stats
.net_api_stats
.nas_iflt_attach_total
);
3266 nstat_set_keyval_u64_scalar(&kv
[i
++],
3267 NSTAT_SYSINFO_API_IF_FLTR_ATTACH_OS
,
3268 data
->u
.net_api_stats
.net_api_stats
.nas_iflt_attach_os_total
);
3269 nstat_set_keyval_u64_scalar(&kv
[i
++],
3270 NSTAT_SYSINFO_API_IP_FLTR_ADD
,
3271 data
->u
.net_api_stats
.net_api_stats
.nas_ipf_add_total
);
3272 nstat_set_keyval_u64_scalar(&kv
[i
++],
3273 NSTAT_SYSINFO_API_IP_FLTR_ADD_OS
,
3274 data
->u
.net_api_stats
.net_api_stats
.nas_ipf_add_os_total
);
3275 nstat_set_keyval_u64_scalar(&kv
[i
++],
3276 NSTAT_SYSINFO_API_SOCK_FLTR_ATTACH
,
3277 data
->u
.net_api_stats
.net_api_stats
.nas_sfltr_register_total
);
3278 nstat_set_keyval_u64_scalar(&kv
[i
++],
3279 NSTAT_SYSINFO_API_SOCK_FLTR_ATTACH_OS
,
3280 data
->u
.net_api_stats
.net_api_stats
.nas_sfltr_register_os_total
);
3283 nstat_set_keyval_u64_scalar(&kv
[i
++],
3284 NSTAT_SYSINFO_API_SOCK_ALLOC_TOTAL
,
3285 data
->u
.net_api_stats
.net_api_stats
.nas_socket_alloc_total
);
3286 nstat_set_keyval_u64_scalar(&kv
[i
++],
3287 NSTAT_SYSINFO_API_SOCK_ALLOC_KERNEL
,
3288 data
->u
.net_api_stats
.net_api_stats
.nas_socket_in_kernel_total
);
3289 nstat_set_keyval_u64_scalar(&kv
[i
++],
3290 NSTAT_SYSINFO_API_SOCK_ALLOC_KERNEL_OS
,
3291 data
->u
.net_api_stats
.net_api_stats
.nas_socket_in_kernel_os_total
);
3292 nstat_set_keyval_u64_scalar(&kv
[i
++],
3293 NSTAT_SYSINFO_API_SOCK_NECP_CLIENTUUID
,
3294 data
->u
.net_api_stats
.net_api_stats
.nas_socket_necp_clientuuid_total
);
3296 nstat_set_keyval_u64_scalar(&kv
[i
++],
3297 NSTAT_SYSINFO_API_SOCK_DOMAIN_LOCAL
,
3298 data
->u
.net_api_stats
.net_api_stats
.nas_socket_domain_local_total
);
3299 nstat_set_keyval_u64_scalar(&kv
[i
++],
3300 NSTAT_SYSINFO_API_SOCK_DOMAIN_ROUTE
,
3301 data
->u
.net_api_stats
.net_api_stats
.nas_socket_domain_route_total
);
3302 nstat_set_keyval_u64_scalar(&kv
[i
++],
3303 NSTAT_SYSINFO_API_SOCK_DOMAIN_INET
,
3304 data
->u
.net_api_stats
.net_api_stats
.nas_socket_domain_inet_total
);
3305 nstat_set_keyval_u64_scalar(&kv
[i
++],
3306 NSTAT_SYSINFO_API_SOCK_DOMAIN_INET6
,
3307 data
->u
.net_api_stats
.net_api_stats
.nas_socket_domain_inet6_total
);
3308 nstat_set_keyval_u64_scalar(&kv
[i
++],
3309 NSTAT_SYSINFO_API_SOCK_DOMAIN_SYSTEM
,
3310 data
->u
.net_api_stats
.net_api_stats
.nas_socket_domain_system_total
);
3311 nstat_set_keyval_u64_scalar(&kv
[i
++],
3312 NSTAT_SYSINFO_API_SOCK_DOMAIN_MULTIPATH
,
3313 data
->u
.net_api_stats
.net_api_stats
.nas_socket_domain_multipath_total
);
3314 nstat_set_keyval_u64_scalar(&kv
[i
++],
3315 NSTAT_SYSINFO_API_SOCK_DOMAIN_KEY
,
3316 data
->u
.net_api_stats
.net_api_stats
.nas_socket_domain_key_total
);
3317 nstat_set_keyval_u64_scalar(&kv
[i
++],
3318 NSTAT_SYSINFO_API_SOCK_DOMAIN_NDRV
,
3319 data
->u
.net_api_stats
.net_api_stats
.nas_socket_domain_ndrv_total
);
3320 nstat_set_keyval_u64_scalar(&kv
[i
++],
3321 NSTAT_SYSINFO_API_SOCK_DOMAIN_OTHER
,
3322 data
->u
.net_api_stats
.net_api_stats
.nas_socket_domain_other_total
);
3324 nstat_set_keyval_u64_scalar(&kv
[i
++],
3325 NSTAT_SYSINFO_API_SOCK_INET_STREAM
,
3326 data
->u
.net_api_stats
.net_api_stats
.nas_socket_inet_stream_total
);
3327 nstat_set_keyval_u64_scalar(&kv
[i
++],
3328 NSTAT_SYSINFO_API_SOCK_INET_DGRAM
,
3329 data
->u
.net_api_stats
.net_api_stats
.nas_socket_inet_dgram_total
);
3330 nstat_set_keyval_u64_scalar(&kv
[i
++],
3331 NSTAT_SYSINFO_API_SOCK_INET_DGRAM_CONNECTED
,
3332 data
->u
.net_api_stats
.net_api_stats
.nas_socket_inet_dgram_connected
);
3333 nstat_set_keyval_u64_scalar(&kv
[i
++],
3334 NSTAT_SYSINFO_API_SOCK_INET_DGRAM_DNS
,
3335 data
->u
.net_api_stats
.net_api_stats
.nas_socket_inet_dgram_dns
);
3336 nstat_set_keyval_u64_scalar(&kv
[i
++],
3337 NSTAT_SYSINFO_API_SOCK_INET_DGRAM_NO_DATA
,
3338 data
->u
.net_api_stats
.net_api_stats
.nas_socket_inet_dgram_no_data
);
3340 nstat_set_keyval_u64_scalar(&kv
[i
++],
3341 NSTAT_SYSINFO_API_SOCK_INET6_STREAM
,
3342 data
->u
.net_api_stats
.net_api_stats
.nas_socket_inet6_stream_total
);
3343 nstat_set_keyval_u64_scalar(&kv
[i
++],
3344 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM
,
3345 data
->u
.net_api_stats
.net_api_stats
.nas_socket_inet6_dgram_total
);
3346 nstat_set_keyval_u64_scalar(&kv
[i
++],
3347 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_CONNECTED
,
3348 data
->u
.net_api_stats
.net_api_stats
.nas_socket_inet6_dgram_connected
);
3349 nstat_set_keyval_u64_scalar(&kv
[i
++],
3350 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_DNS
,
3351 data
->u
.net_api_stats
.net_api_stats
.nas_socket_inet6_dgram_dns
);
3352 nstat_set_keyval_u64_scalar(&kv
[i
++],
3353 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_NO_DATA
,
3354 data
->u
.net_api_stats
.net_api_stats
.nas_socket_inet6_dgram_no_data
);
3356 nstat_set_keyval_u64_scalar(&kv
[i
++],
3357 NSTAT_SYSINFO_API_SOCK_INET_MCAST_JOIN
,
3358 data
->u
.net_api_stats
.net_api_stats
.nas_socket_mcast_join_total
);
3359 nstat_set_keyval_u64_scalar(&kv
[i
++],
3360 NSTAT_SYSINFO_API_SOCK_INET_MCAST_JOIN_OS
,
3361 data
->u
.net_api_stats
.net_api_stats
.nas_socket_mcast_join_os_total
);
3363 nstat_set_keyval_u64_scalar(&kv
[i
++],
3364 NSTAT_SYSINFO_API_NEXUS_FLOW_INET_STREAM
,
3365 data
->u
.net_api_stats
.net_api_stats
.nas_nx_flow_inet_stream_total
);
3366 nstat_set_keyval_u64_scalar(&kv
[i
++],
3367 NSTAT_SYSINFO_API_NEXUS_FLOW_INET_DATAGRAM
,
3368 data
->u
.net_api_stats
.net_api_stats
.nas_nx_flow_inet_dgram_total
);
3370 nstat_set_keyval_u64_scalar(&kv
[i
++],
3371 NSTAT_SYSINFO_API_NEXUS_FLOW_INET6_STREAM
,
3372 data
->u
.net_api_stats
.net_api_stats
.nas_nx_flow_inet6_stream_total
);
3373 nstat_set_keyval_u64_scalar(&kv
[i
++],
3374 NSTAT_SYSINFO_API_NEXUS_FLOW_INET6_DATAGRAM
,
3375 data
->u
.net_api_stats
.net_api_stats
.nas_nx_flow_inet6_dgram_total
);
3377 nstat_set_keyval_u64_scalar(&kv
[i
++],
3378 NSTAT_SYSINFO_API_IFNET_ALLOC
,
3379 data
->u
.net_api_stats
.net_api_stats
.nas_ifnet_alloc_total
);
3380 nstat_set_keyval_u64_scalar(&kv
[i
++],
3381 NSTAT_SYSINFO_API_IFNET_ALLOC_OS
,
3382 data
->u
.net_api_stats
.net_api_stats
.nas_ifnet_alloc_os_total
);
3384 nstat_set_keyval_u64_scalar(&kv
[i
++],
3385 NSTAT_SYSINFO_API_PF_ADDRULE
,
3386 data
->u
.net_api_stats
.net_api_stats
.nas_pf_addrule_total
);
3387 nstat_set_keyval_u64_scalar(&kv
[i
++],
3388 NSTAT_SYSINFO_API_PF_ADDRULE_OS
,
3389 data
->u
.net_api_stats
.net_api_stats
.nas_pf_addrule_os
);
3391 nstat_set_keyval_u64_scalar(&kv
[i
++],
3392 NSTAT_SYSINFO_API_VMNET_START
,
3393 data
->u
.net_api_stats
.net_api_stats
.nas_vmnet_total
);
3396 nstat_set_keyval_scalar(&kv
[i
++],
3397 NSTAT_SYSINFO_API_REPORT_INTERVAL
,
3398 data
->u
.net_api_stats
.report_interval
);
3403 if (syscnt
!= NULL
) {
3404 VERIFY(i
> 0 && i
<= nkeyvals
);
3405 countsize
= offsetof(nstat_sysinfo_counts
,
3406 nstat_sysinfo_keyvals
) +
3407 sizeof(nstat_sysinfo_keyval
) * i
;
3408 finalsize
+= countsize
;
3409 syscnt
->hdr
.type
= NSTAT_MSG_TYPE_SYSINFO_COUNTS
;
3410 assert(finalsize
<= MAX_NSTAT_MSG_HDR_LENGTH
);
3411 syscnt
->hdr
.length
= (u_int16_t
)finalsize
;
3412 syscnt
->counts
.nstat_sysinfo_len
= (u_int32_t
)countsize
;
3414 result
= ctl_enqueuedata(control
->ncs_kctl
,
3415 control
->ncs_unit
, syscnt
, finalsize
, CTL_DATA_EOR
);
3417 nstat_stats
.nstat_sysinfofailures
+= 1;
3419 kheap_free(KHEAP_TEMP
, syscnt
, allocsize
);
3424 __private_extern__
void
3425 nstat_sysinfo_send_data(
3426 nstat_sysinfo_data
*data
)
3428 nstat_control_state
*control
;
3430 lck_mtx_lock(&nstat_mtx
);
3431 for (control
= nstat_controls
; control
; control
= control
->ncs_next
) {
3432 lck_mtx_lock(&control
->ncs_mtx
);
3433 if ((control
->ncs_flags
& NSTAT_FLAG_SYSINFO_SUBSCRIBED
) != 0) {
3434 nstat_sysinfo_send_data_internal(control
, data
);
3436 lck_mtx_unlock(&control
->ncs_mtx
);
3438 lck_mtx_unlock(&nstat_mtx
);
3442 nstat_sysinfo_generate_report(void)
3444 mbuf_report_peak_usage();
3446 nstat_ifnet_report_ecn_stats();
3447 nstat_ifnet_report_lim_stats();
3448 nstat_net_api_report_stats();
3451 #pragma mark -- net_api --
3453 static struct net_api_stats net_api_stats_before
;
3454 static u_int64_t net_api_stats_last_report_time
;
3457 nstat_net_api_report_stats(void)
3459 struct nstat_sysinfo_data data
;
3460 struct nstat_sysinfo_net_api_stats
*st
= &data
.u
.net_api_stats
;
3463 uptime
= net_uptime();
3465 if ((u_int32_t
)(uptime
- net_api_stats_last_report_time
) <
3466 net_api_stats_report_interval
) {
3470 st
->report_interval
= (u_int32_t
)(uptime
- net_api_stats_last_report_time
);
3471 net_api_stats_last_report_time
= uptime
;
3473 data
.flags
= NSTAT_SYSINFO_NET_API_STATS
;
3474 data
.unsent_data_cnt
= 0;
3477 * Some of the fields in the report are the current value and
3478 * other fields are the delta from the last report:
3479 * - Report difference for the per flow counters as they increase
3481 * - Report current value for other counters as they tend not to change
3484 #define STATCOPY(f) \
3485 (st->net_api_stats.f = net_api_stats.f)
3486 #define STATDIFF(f) \
3487 (st->net_api_stats.f = net_api_stats.f - net_api_stats_before.f)
3489 STATCOPY(nas_iflt_attach_count
);
3490 STATCOPY(nas_iflt_attach_total
);
3491 STATCOPY(nas_iflt_attach_os_total
);
3493 STATCOPY(nas_ipf_add_count
);
3494 STATCOPY(nas_ipf_add_total
);
3495 STATCOPY(nas_ipf_add_os_total
);
3497 STATCOPY(nas_sfltr_register_count
);
3498 STATCOPY(nas_sfltr_register_total
);
3499 STATCOPY(nas_sfltr_register_os_total
);
3501 STATDIFF(nas_socket_alloc_total
);
3502 STATDIFF(nas_socket_in_kernel_total
);
3503 STATDIFF(nas_socket_in_kernel_os_total
);
3504 STATDIFF(nas_socket_necp_clientuuid_total
);
3506 STATDIFF(nas_socket_domain_local_total
);
3507 STATDIFF(nas_socket_domain_route_total
);
3508 STATDIFF(nas_socket_domain_inet_total
);
3509 STATDIFF(nas_socket_domain_inet6_total
);
3510 STATDIFF(nas_socket_domain_system_total
);
3511 STATDIFF(nas_socket_domain_multipath_total
);
3512 STATDIFF(nas_socket_domain_key_total
);
3513 STATDIFF(nas_socket_domain_ndrv_total
);
3514 STATDIFF(nas_socket_domain_other_total
);
3516 STATDIFF(nas_socket_inet_stream_total
);
3517 STATDIFF(nas_socket_inet_dgram_total
);
3518 STATDIFF(nas_socket_inet_dgram_connected
);
3519 STATDIFF(nas_socket_inet_dgram_dns
);
3520 STATDIFF(nas_socket_inet_dgram_no_data
);
3522 STATDIFF(nas_socket_inet6_stream_total
);
3523 STATDIFF(nas_socket_inet6_dgram_total
);
3524 STATDIFF(nas_socket_inet6_dgram_connected
);
3525 STATDIFF(nas_socket_inet6_dgram_dns
);
3526 STATDIFF(nas_socket_inet6_dgram_no_data
);
3528 STATDIFF(nas_socket_mcast_join_total
);
3529 STATDIFF(nas_socket_mcast_join_os_total
);
3531 STATDIFF(nas_sock_inet6_stream_exthdr_in
);
3532 STATDIFF(nas_sock_inet6_stream_exthdr_out
);
3533 STATDIFF(nas_sock_inet6_dgram_exthdr_in
);
3534 STATDIFF(nas_sock_inet6_dgram_exthdr_out
);
3536 STATDIFF(nas_nx_flow_inet_stream_total
);
3537 STATDIFF(nas_nx_flow_inet_dgram_total
);
3539 STATDIFF(nas_nx_flow_inet6_stream_total
);
3540 STATDIFF(nas_nx_flow_inet6_dgram_total
);
3542 STATCOPY(nas_ifnet_alloc_count
);
3543 STATCOPY(nas_ifnet_alloc_total
);
3544 STATCOPY(nas_ifnet_alloc_os_count
);
3545 STATCOPY(nas_ifnet_alloc_os_total
);
3547 STATCOPY(nas_pf_addrule_total
);
3548 STATCOPY(nas_pf_addrule_os
);
3550 STATCOPY(nas_vmnet_total
);
3555 nstat_sysinfo_send_data(&data
);
3558 * Save a copy of the current fields so we can diff them the next time
3560 memcpy(&net_api_stats_before
, &net_api_stats
,
3561 sizeof(struct net_api_stats
));
3562 _CASSERT(sizeof(net_api_stats_before
) == sizeof(net_api_stats
));
3566 #pragma mark -- Kernel Control Socket --
3568 static kern_ctl_ref nstat_ctlref
= NULL
;
3569 static lck_grp_t
*nstat_lck_grp
= NULL
;
3571 static errno_t
nstat_control_connect(kern_ctl_ref kctl
, struct sockaddr_ctl
*sac
, void **uinfo
);
3572 static errno_t
nstat_control_disconnect(kern_ctl_ref kctl
, u_int32_t unit
, void *uinfo
);
3573 static errno_t
nstat_control_send(kern_ctl_ref kctl
, u_int32_t unit
, void *uinfo
, mbuf_t m
, int flags
);
3576 nstat_enqueue_success(
3578 nstat_control_state
*state
,
3581 nstat_msg_hdr success
;
3584 bzero(&success
, sizeof(success
));
3585 success
.context
= context
;
3586 success
.type
= NSTAT_MSG_TYPE_SUCCESS
;
3587 success
.length
= sizeof(success
);
3588 success
.flags
= flags
;
3589 result
= ctl_enqueuedata(state
->ncs_kctl
, state
->ncs_unit
, &success
,
3590 sizeof(success
), CTL_DATA_EOR
| CTL_DATA_CRIT
);
3592 if (nstat_debug
!= 0) {
3593 printf("%s: could not enqueue success message %d\n",
3596 nstat_stats
.nstat_successmsgfailures
+= 1;
3602 nstat_control_send_event(
3603 nstat_control_state
*state
,
3610 if (nstat_control_reporting_allowed(state
, src
)) {
3611 if ((state
->ncs_flags
& NSTAT_FLAG_SUPPORTS_UPDATES
) != 0) {
3612 result
= nstat_control_send_update(state
, src
, 0, event
, 0, NULL
);
3615 if (nstat_debug
!= 0) {
3616 printf("%s - nstat_control_send_event() %d\n", __func__
, result
);
3620 if (nstat_debug
!= 0) {
3621 printf("%s - nstat_control_send_event() used when updates not supported\n", __func__
);
3629 nstat_control_send_goodbye(
3630 nstat_control_state
*state
,
3636 if (nstat_control_reporting_allowed(state
, src
)) {
3637 if ((state
->ncs_flags
& NSTAT_FLAG_SUPPORTS_UPDATES
) != 0) {
3638 result
= nstat_control_send_update(state
, src
, 0, 0, NSTAT_MSG_HDR_FLAG_CLOSING
, NULL
);
3641 if (nstat_debug
!= 0) {
3642 printf("%s - nstat_control_send_update() %d\n", __func__
, result
);
3646 // send one last counts notification
3647 result
= nstat_control_send_counts(state
, src
, 0, NSTAT_MSG_HDR_FLAG_CLOSING
, NULL
);
3650 if (nstat_debug
!= 0) {
3651 printf("%s - nstat_control_send_counts() %d\n", __func__
, result
);
3655 // send a last description
3656 result
= nstat_control_send_description(state
, src
, 0, NSTAT_MSG_HDR_FLAG_CLOSING
);
3659 if (nstat_debug
!= 0) {
3660 printf("%s - nstat_control_send_description() %d\n", __func__
, result
);
3666 // send the source removed notification
3667 result
= nstat_control_send_removed(state
, src
);
3668 if (result
!= 0 && nstat_debug
) {
3670 if (nstat_debug
!= 0) {
3671 printf("%s - nstat_control_send_removed() %d\n", __func__
, result
);
3676 nstat_stats
.nstat_control_send_goodbye_failures
++;
3684 nstat_flush_accumulated_msgs(
3685 nstat_control_state
*state
)
3688 if (state
->ncs_accumulated
!= NULL
&& mbuf_len(state
->ncs_accumulated
) > 0) {
3689 mbuf_pkthdr_setlen(state
->ncs_accumulated
, mbuf_len(state
->ncs_accumulated
));
3690 result
= ctl_enqueuembuf(state
->ncs_kctl
, state
->ncs_unit
, state
->ncs_accumulated
, CTL_DATA_EOR
);
3692 nstat_stats
.nstat_flush_accumulated_msgs_failures
++;
3693 if (nstat_debug
!= 0) {
3694 printf("%s - ctl_enqueuembuf failed: %d\n", __func__
, result
);
3696 mbuf_freem(state
->ncs_accumulated
);
3698 state
->ncs_accumulated
= NULL
;
3704 nstat_accumulate_msg(
3705 nstat_control_state
*state
,
3709 assert(length
<= MAX_NSTAT_MSG_HDR_LENGTH
);
3711 if (state
->ncs_accumulated
&& mbuf_trailingspace(state
->ncs_accumulated
) < length
) {
3712 // Will send the current mbuf
3713 nstat_flush_accumulated_msgs(state
);
3718 if (state
->ncs_accumulated
== NULL
) {
3719 unsigned int one
= 1;
3720 if (mbuf_allocpacket(MBUF_DONTWAIT
, NSTAT_MAX_MSG_SIZE
, &one
, &state
->ncs_accumulated
) != 0) {
3721 if (nstat_debug
!= 0) {
3722 printf("%s - mbuf_allocpacket failed\n", __func__
);
3726 mbuf_setlen(state
->ncs_accumulated
, 0);
3731 hdr
->length
= (u_int16_t
)length
;
3732 result
= mbuf_copyback(state
->ncs_accumulated
, mbuf_len(state
->ncs_accumulated
),
3733 length
, hdr
, MBUF_DONTWAIT
);
3737 nstat_flush_accumulated_msgs(state
);
3738 if (nstat_debug
!= 0) {
3739 printf("%s - resorting to ctl_enqueuedata\n", __func__
);
3741 result
= ctl_enqueuedata(state
->ncs_kctl
, state
->ncs_unit
, hdr
, length
, CTL_DATA_EOR
);
3745 nstat_stats
.nstat_accumulate_msg_failures
++;
3753 __unused thread_call_param_t p0
,
3754 __unused thread_call_param_t p1
)
3756 nstat_control_state
*control
;
3757 nstat_src
*src
, *tmpsrc
;
3758 tailq_head_nstat_src dead_list
;
3759 TAILQ_INIT(&dead_list
);
3761 lck_mtx_lock(&nstat_mtx
);
3763 nstat_idle_time
= 0;
3765 for (control
= nstat_controls
; control
; control
= control
->ncs_next
) {
3766 lck_mtx_lock(&control
->ncs_mtx
);
3767 if (!(control
->ncs_flags
& NSTAT_FLAG_REQCOUNTS
)) {
3768 TAILQ_FOREACH_SAFE(src
, &control
->ncs_src_queue
, ns_control_link
, tmpsrc
)
3770 if (src
->provider
->nstat_gone(src
->cookie
)) {
3773 // Pull it off the list
3774 TAILQ_REMOVE(&control
->ncs_src_queue
, src
, ns_control_link
);
3776 result
= nstat_control_send_goodbye(control
, src
);
3778 // Put this on the list to release later
3779 TAILQ_INSERT_TAIL(&dead_list
, src
, ns_control_link
);
3783 control
->ncs_flags
&= ~NSTAT_FLAG_REQCOUNTS
;
3784 lck_mtx_unlock(&control
->ncs_mtx
);
3787 if (nstat_controls
) {
3788 clock_interval_to_deadline(60, NSEC_PER_SEC
, &nstat_idle_time
);
3789 thread_call_func_delayed((thread_call_func_t
)nstat_idle_check
, NULL
, nstat_idle_time
);
3792 lck_mtx_unlock(&nstat_mtx
);
3794 /* Generate any system level reports, if needed */
3795 nstat_sysinfo_generate_report();
3797 // Release the sources now that we aren't holding lots of locks
3798 while ((src
= TAILQ_FIRST(&dead_list
))) {
3799 TAILQ_REMOVE(&dead_list
, src
, ns_control_link
);
3800 nstat_control_cleanup_source(NULL
, src
, FALSE
);
3808 nstat_control_register(void)
3810 // Create our lock group first
3811 lck_grp_attr_t
*grp_attr
= lck_grp_attr_alloc_init();
3812 lck_grp_attr_setdefault(grp_attr
);
3813 nstat_lck_grp
= lck_grp_alloc_init("network statistics kctl", grp_attr
);
3814 lck_grp_attr_free(grp_attr
);
3816 lck_mtx_init(&nstat_mtx
, nstat_lck_grp
, NULL
);
3818 // Register the control
3819 struct kern_ctl_reg nstat_control
;
3820 bzero(&nstat_control
, sizeof(nstat_control
));
3821 strlcpy(nstat_control
.ctl_name
, NET_STAT_CONTROL_NAME
, sizeof(nstat_control
.ctl_name
));
3822 nstat_control
.ctl_flags
= CTL_FLAG_REG_EXTENDED
| CTL_FLAG_REG_CRIT
;
3823 nstat_control
.ctl_sendsize
= nstat_sendspace
;
3824 nstat_control
.ctl_recvsize
= nstat_recvspace
;
3825 nstat_control
.ctl_connect
= nstat_control_connect
;
3826 nstat_control
.ctl_disconnect
= nstat_control_disconnect
;
3827 nstat_control
.ctl_send
= nstat_control_send
;
3829 ctl_register(&nstat_control
, &nstat_ctlref
);
3833 nstat_control_cleanup_source(
3834 nstat_control_state
*state
,
3835 struct nstat_src
*src
,
3841 result
= nstat_control_send_removed(state
, src
);
3843 nstat_stats
.nstat_control_cleanup_source_failures
++;
3844 if (nstat_debug
!= 0) {
3845 printf("%s - nstat_control_send_removed() %d\n",
3850 // Cleanup the source if we found it.
3851 src
->provider
->nstat_release(src
->cookie
, locked
);
3852 kheap_free(KHEAP_NET_STAT
, src
, sizeof(*src
));
3857 nstat_control_reporting_allowed(
3858 nstat_control_state
*state
,
3861 if (src
->provider
->nstat_reporting_allowed
== NULL
) {
3865 return src
->provider
->nstat_reporting_allowed(src
->cookie
,
3866 &state
->ncs_provider_filters
[src
->provider
->nstat_provider_id
]);
3871 nstat_control_connect(
3873 struct sockaddr_ctl
*sac
,
3876 nstat_control_state
*state
= kheap_alloc(KHEAP_NET_STAT
,
3877 sizeof(*state
), Z_WAITOK
| Z_ZERO
);
3878 if (state
== NULL
) {
3882 lck_mtx_init(&state
->ncs_mtx
, nstat_lck_grp
, NULL
);
3883 state
->ncs_kctl
= kctl
;
3884 state
->ncs_unit
= sac
->sc_unit
;
3885 state
->ncs_flags
= NSTAT_FLAG_REQCOUNTS
;
3888 lck_mtx_lock(&nstat_mtx
);
3889 state
->ncs_next
= nstat_controls
;
3890 nstat_controls
= state
;
3892 if (nstat_idle_time
== 0) {
3893 clock_interval_to_deadline(60, NSEC_PER_SEC
, &nstat_idle_time
);
3894 thread_call_func_delayed((thread_call_func_t
)nstat_idle_check
, NULL
, nstat_idle_time
);
3897 lck_mtx_unlock(&nstat_mtx
);
3903 nstat_control_disconnect(
3904 __unused kern_ctl_ref kctl
,
3905 __unused u_int32_t unit
,
3909 nstat_control_state
*state
= (nstat_control_state
*)uinfo
;
3910 tailq_head_nstat_src cleanup_list
;
3913 TAILQ_INIT(&cleanup_list
);
3915 // pull it out of the global list of states
3916 lck_mtx_lock(&nstat_mtx
);
3917 nstat_control_state
**statepp
;
3918 for (statepp
= &nstat_controls
; *statepp
; statepp
= &(*statepp
)->ncs_next
) {
3919 if (*statepp
== state
) {
3920 *statepp
= state
->ncs_next
;
3924 lck_mtx_unlock(&nstat_mtx
);
3926 lck_mtx_lock(&state
->ncs_mtx
);
3927 // Stop watching for sources
3928 nstat_provider
*provider
;
3929 watching
= state
->ncs_watching
;
3930 state
->ncs_watching
= 0;
3931 for (provider
= nstat_providers
; provider
&& watching
; provider
= provider
->next
) {
3932 if ((watching
& (1 << provider
->nstat_provider_id
)) != 0) {
3933 watching
&= ~(1 << provider
->nstat_provider_id
);
3934 provider
->nstat_watcher_remove(state
);
3938 // set cleanup flags
3939 state
->ncs_flags
|= NSTAT_FLAG_CLEANUP
;
3941 if (state
->ncs_accumulated
) {
3942 mbuf_freem(state
->ncs_accumulated
);
3943 state
->ncs_accumulated
= NULL
;
3946 // Copy out the list of sources
3947 TAILQ_CONCAT(&cleanup_list
, &state
->ncs_src_queue
, ns_control_link
);
3948 lck_mtx_unlock(&state
->ncs_mtx
);
3950 while ((src
= TAILQ_FIRST(&cleanup_list
))) {
3951 TAILQ_REMOVE(&cleanup_list
, src
, ns_control_link
);
3952 nstat_control_cleanup_source(NULL
, src
, FALSE
);
3955 lck_mtx_destroy(&state
->ncs_mtx
, nstat_lck_grp
);
3956 kheap_free(KHEAP_NET_STAT
, state
, sizeof(*state
));
3961 static nstat_src_ref_t
3962 nstat_control_next_src_ref(
3963 nstat_control_state
*state
)
3965 return ++state
->ncs_next_srcref
;
3969 nstat_control_send_counts(
3970 nstat_control_state
*state
,
3972 unsigned long long context
,
3973 u_int16_t hdr_flags
,
3976 nstat_msg_src_counts counts
;
3979 /* Some providers may not have any counts to send */
3980 if (src
->provider
->nstat_counts
== NULL
) {
3984 bzero(&counts
, sizeof(counts
));
3985 counts
.hdr
.type
= NSTAT_MSG_TYPE_SRC_COUNTS
;
3986 counts
.hdr
.length
= sizeof(counts
);
3987 counts
.hdr
.flags
= hdr_flags
;
3988 counts
.hdr
.context
= context
;
3989 counts
.srcref
= src
->srcref
;
3990 counts
.event_flags
= 0;
3992 if (src
->provider
->nstat_counts(src
->cookie
, &counts
.counts
, gone
) == 0) {
3993 if ((src
->filter
& NSTAT_FILTER_NOZEROBYTES
) &&
3994 counts
.counts
.nstat_rxbytes
== 0 &&
3995 counts
.counts
.nstat_txbytes
== 0) {
3998 result
= ctl_enqueuedata(state
->ncs_kctl
,
3999 state
->ncs_unit
, &counts
, sizeof(counts
),
4002 nstat_stats
.nstat_sendcountfailures
+= 1;
4010 nstat_control_append_counts(
4011 nstat_control_state
*state
,
4015 /* Some providers may not have any counts to send */
4016 if (!src
->provider
->nstat_counts
) {
4020 nstat_msg_src_counts counts
;
4021 bzero(&counts
, sizeof(counts
));
4022 counts
.hdr
.type
= NSTAT_MSG_TYPE_SRC_COUNTS
;
4023 counts
.hdr
.length
= sizeof(counts
);
4024 counts
.srcref
= src
->srcref
;
4025 counts
.event_flags
= 0;
4028 result
= src
->provider
->nstat_counts(src
->cookie
, &counts
.counts
, gone
);
4033 if ((src
->filter
& NSTAT_FILTER_NOZEROBYTES
) == NSTAT_FILTER_NOZEROBYTES
&&
4034 counts
.counts
.nstat_rxbytes
== 0 && counts
.counts
.nstat_txbytes
== 0) {
4038 return nstat_accumulate_msg(state
, &counts
.hdr
, counts
.hdr
.length
);
4042 nstat_control_send_description(
4043 nstat_control_state
*state
,
4046 u_int16_t hdr_flags
)
4048 // Provider doesn't support getting the descriptor? Done.
4049 if (src
->provider
->nstat_descriptor_length
== 0 ||
4050 src
->provider
->nstat_copy_descriptor
== NULL
) {
4054 // Allocate storage for the descriptor message
4056 unsigned int one
= 1;
4057 size_t size
= offsetof(nstat_msg_src_description
, data
) + src
->provider
->nstat_descriptor_length
;
4058 assert(size
<= MAX_NSTAT_MSG_HDR_LENGTH
);
4060 if (mbuf_allocpacket(MBUF_DONTWAIT
, size
, &one
, &msg
) != 0) {
4064 nstat_msg_src_description
*desc
= (nstat_msg_src_description
*)mbuf_data(msg
);
4066 mbuf_setlen(msg
, size
);
4067 mbuf_pkthdr_setlen(msg
, mbuf_len(msg
));
4069 // Query the provider for the provider specific bits
4070 errno_t result
= src
->provider
->nstat_copy_descriptor(src
->cookie
, desc
->data
, src
->provider
->nstat_descriptor_length
);
4077 desc
->hdr
.context
= context
;
4078 desc
->hdr
.type
= NSTAT_MSG_TYPE_SRC_DESC
;
4079 desc
->hdr
.length
= (u_int16_t
)size
;
4080 desc
->hdr
.flags
= hdr_flags
;
4081 desc
->srcref
= src
->srcref
;
4082 desc
->event_flags
= 0;
4083 desc
->provider
= src
->provider
->nstat_provider_id
;
4085 result
= ctl_enqueuembuf(state
->ncs_kctl
, state
->ncs_unit
, msg
, CTL_DATA_EOR
);
4087 nstat_stats
.nstat_descriptionfailures
+= 1;
4095 nstat_control_append_description(
4096 nstat_control_state
*state
,
4099 size_t size
= offsetof(nstat_msg_src_description
, data
) + src
->provider
->nstat_descriptor_length
;
4100 if (size
> 512 || src
->provider
->nstat_descriptor_length
== 0 ||
4101 src
->provider
->nstat_copy_descriptor
== NULL
) {
4105 // Fill out a buffer on the stack, we will copy to the mbuf later
4106 u_int64_t buffer
[size
/ sizeof(u_int64_t
) + 1]; // u_int64_t to ensure alignment
4107 bzero(buffer
, size
);
4109 nstat_msg_src_description
*desc
= (nstat_msg_src_description
*)buffer
;
4110 desc
->hdr
.type
= NSTAT_MSG_TYPE_SRC_DESC
;
4111 desc
->hdr
.length
= (u_int16_t
)size
;
4112 desc
->srcref
= src
->srcref
;
4113 desc
->event_flags
= 0;
4114 desc
->provider
= src
->provider
->nstat_provider_id
;
4117 // Fill in the description
4118 // Query the provider for the provider specific bits
4119 result
= src
->provider
->nstat_copy_descriptor(src
->cookie
, desc
->data
,
4120 src
->provider
->nstat_descriptor_length
);
4125 return nstat_accumulate_msg(state
, &desc
->hdr
, size
);
4129 nstat_control_send_update(
4130 nstat_control_state
*state
,
4134 u_int16_t hdr_flags
,
4137 // Provider doesn't support getting the descriptor or counts? Done.
4138 if ((src
->provider
->nstat_descriptor_length
== 0 ||
4139 src
->provider
->nstat_copy_descriptor
== NULL
) &&
4140 src
->provider
->nstat_counts
== NULL
) {
4144 // Allocate storage for the descriptor message
4146 unsigned int one
= 1;
4147 size_t size
= offsetof(nstat_msg_src_update
, data
) +
4148 src
->provider
->nstat_descriptor_length
;
4149 assert(size
<= MAX_NSTAT_MSG_HDR_LENGTH
);
4151 if (mbuf_allocpacket(MBUF_DONTWAIT
, size
, &one
, &msg
) != 0) {
4155 nstat_msg_src_update
*desc
= (nstat_msg_src_update
*)mbuf_data(msg
);
4157 desc
->hdr
.context
= context
;
4158 desc
->hdr
.type
= NSTAT_MSG_TYPE_SRC_UPDATE
;
4159 desc
->hdr
.length
= (u_int16_t
)size
;
4160 desc
->hdr
.flags
= hdr_flags
;
4161 desc
->srcref
= src
->srcref
;
4162 desc
->event_flags
= event
;
4163 desc
->provider
= src
->provider
->nstat_provider_id
;
4165 mbuf_setlen(msg
, size
);
4166 mbuf_pkthdr_setlen(msg
, mbuf_len(msg
));
4169 if (src
->provider
->nstat_descriptor_length
!= 0 && src
->provider
->nstat_copy_descriptor
) {
4170 // Query the provider for the provider specific bits
4171 result
= src
->provider
->nstat_copy_descriptor(src
->cookie
, desc
->data
,
4172 src
->provider
->nstat_descriptor_length
);
4179 if (src
->provider
->nstat_counts
) {
4180 result
= src
->provider
->nstat_counts(src
->cookie
, &desc
->counts
, gone
);
4182 if ((src
->filter
& NSTAT_FILTER_NOZEROBYTES
) == NSTAT_FILTER_NOZEROBYTES
&&
4183 desc
->counts
.nstat_rxbytes
== 0 && desc
->counts
.nstat_txbytes
== 0) {
4186 result
= ctl_enqueuembuf(state
->ncs_kctl
, state
->ncs_unit
, msg
, CTL_DATA_EOR
);
4192 nstat_stats
.nstat_srcupatefailures
+= 1;
4200 nstat_control_append_update(
4201 nstat_control_state
*state
,
4205 size_t size
= offsetof(nstat_msg_src_update
, data
) + src
->provider
->nstat_descriptor_length
;
4206 if (size
> 512 || ((src
->provider
->nstat_descriptor_length
== 0 ||
4207 src
->provider
->nstat_copy_descriptor
== NULL
) &&
4208 src
->provider
->nstat_counts
== NULL
)) {
4212 // Fill out a buffer on the stack, we will copy to the mbuf later
4213 u_int64_t buffer
[size
/ sizeof(u_int64_t
) + 1]; // u_int64_t to ensure alignment
4214 bzero(buffer
, size
);
4216 nstat_msg_src_update
*desc
= (nstat_msg_src_update
*)buffer
;
4217 desc
->hdr
.type
= NSTAT_MSG_TYPE_SRC_UPDATE
;
4218 desc
->hdr
.length
= (u_int16_t
)size
;
4219 desc
->srcref
= src
->srcref
;
4220 desc
->event_flags
= 0;
4221 desc
->provider
= src
->provider
->nstat_provider_id
;
4224 // Fill in the description
4225 if (src
->provider
->nstat_descriptor_length
!= 0 && src
->provider
->nstat_copy_descriptor
) {
4226 // Query the provider for the provider specific bits
4227 result
= src
->provider
->nstat_copy_descriptor(src
->cookie
, desc
->data
,
4228 src
->provider
->nstat_descriptor_length
);
4230 nstat_stats
.nstat_copy_descriptor_failures
++;
4231 if (nstat_debug
!= 0) {
4232 printf("%s: src->provider->nstat_copy_descriptor: %d\n", __func__
, result
);
4238 if (src
->provider
->nstat_counts
) {
4239 result
= src
->provider
->nstat_counts(src
->cookie
, &desc
->counts
, gone
);
4241 nstat_stats
.nstat_provider_counts_failures
++;
4242 if (nstat_debug
!= 0) {
4243 printf("%s: src->provider->nstat_counts: %d\n", __func__
, result
);
4248 if ((src
->filter
& NSTAT_FILTER_NOZEROBYTES
) == NSTAT_FILTER_NOZEROBYTES
&&
4249 desc
->counts
.nstat_rxbytes
== 0 && desc
->counts
.nstat_txbytes
== 0) {
4254 return nstat_accumulate_msg(state
, &desc
->hdr
, size
);
4258 nstat_control_send_removed(
4259 nstat_control_state
*state
,
4262 nstat_msg_src_removed removed
;
4265 bzero(&removed
, sizeof(removed
));
4266 removed
.hdr
.type
= NSTAT_MSG_TYPE_SRC_REMOVED
;
4267 removed
.hdr
.length
= sizeof(removed
);
4268 removed
.hdr
.context
= 0;
4269 removed
.srcref
= src
->srcref
;
4270 result
= ctl_enqueuedata(state
->ncs_kctl
, state
->ncs_unit
, &removed
,
4271 sizeof(removed
), CTL_DATA_EOR
| CTL_DATA_CRIT
);
4273 nstat_stats
.nstat_msgremovedfailures
+= 1;
4280 nstat_control_handle_add_request(
4281 nstat_control_state
*state
,
4286 // Verify the header fits in the first mbuf
4287 if (mbuf_len(m
) < offsetof(nstat_msg_add_src_req
, param
)) {
4291 // Calculate the length of the parameter field
4292 ssize_t paramlength
= mbuf_pkthdr_len(m
) - offsetof(nstat_msg_add_src_req
, param
);
4293 if (paramlength
< 0 || paramlength
> 2 * 1024) {
4297 nstat_provider
*provider
= NULL
;
4298 nstat_provider_cookie_t cookie
= NULL
;
4299 nstat_msg_add_src_req
*req
= mbuf_data(m
);
4300 if (mbuf_pkthdr_len(m
) > mbuf_len(m
)) {
4301 // parameter is too large, we need to make a contiguous copy
4302 void *data
= kheap_alloc(KHEAP_TEMP
, paramlength
, Z_WAITOK
);
4307 result
= mbuf_copydata(m
, offsetof(nstat_msg_add_src_req
, param
), paramlength
, data
);
4309 result
= nstat_lookup_entry(req
->provider
, data
, paramlength
, &provider
, &cookie
);
4311 kheap_free(KHEAP_TEMP
, data
, paramlength
);
4313 result
= nstat_lookup_entry(req
->provider
, (void*)&req
->param
, paramlength
, &provider
, &cookie
);
4320 result
= nstat_control_source_add(req
->hdr
.context
, state
, provider
, cookie
);
4322 provider
->nstat_release(cookie
, 0);
4329 nstat_set_provider_filter(
4330 nstat_control_state
*state
,
4331 nstat_msg_add_all_srcs
*req
)
4333 nstat_provider_id_t provider_id
= req
->provider
;
4335 u_int32_t prev_ncs_watching
= atomic_or_32_ov(&state
->ncs_watching
, (1 << provider_id
));
4337 if ((prev_ncs_watching
& (1 << provider_id
)) != 0) {
4341 state
->ncs_watching
|= (1 << provider_id
);
4342 state
->ncs_provider_filters
[provider_id
].npf_flags
= req
->filter
;
4343 state
->ncs_provider_filters
[provider_id
].npf_events
= req
->events
;
4344 state
->ncs_provider_filters
[provider_id
].npf_pid
= req
->target_pid
;
4345 uuid_copy(state
->ncs_provider_filters
[provider_id
].npf_uuid
, req
->target_uuid
);
4350 nstat_control_handle_add_all(
4351 nstat_control_state
*state
,
4356 // Verify the header fits in the first mbuf
4357 if (mbuf_len(m
) < sizeof(nstat_msg_add_all_srcs
)) {
4361 nstat_msg_add_all_srcs
*req
= mbuf_data(m
);
4362 if (req
->provider
> NSTAT_PROVIDER_LAST
) {
4366 nstat_provider
*provider
= nstat_find_provider_by_id(req
->provider
);
4371 if (provider
->nstat_watcher_add
== NULL
) {
4375 if (nstat_privcheck
!= 0) {
4376 result
= priv_check_cred(kauth_cred_get(),
4377 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS
, 0);
4383 lck_mtx_lock(&state
->ncs_mtx
);
4384 if (req
->filter
& NSTAT_FILTER_SUPPRESS_SRC_ADDED
) {
4385 // Suppression of source messages implicitly requires the use of update messages
4386 state
->ncs_flags
|= NSTAT_FLAG_SUPPORTS_UPDATES
;
4388 lck_mtx_unlock(&state
->ncs_mtx
);
4390 // rdar://problem/30301300 Different providers require different synchronization
4391 // to ensure that a new entry does not get double counted due to being added prior
4392 // to all current provider entries being added. Hence pass the provider the details
4393 // in the original request for this to be applied atomically
4395 result
= provider
->nstat_watcher_add(state
, req
);
4398 nstat_enqueue_success(req
->hdr
.context
, state
, 0);
4405 nstat_control_source_add(
4407 nstat_control_state
*state
,
4408 nstat_provider
*provider
,
4409 nstat_provider_cookie_t cookie
)
4411 // Fill out source added message if appropriate
4413 nstat_src_ref_t
*srcrefp
= NULL
;
4415 u_int64_t provider_filter_flagss
=
4416 state
->ncs_provider_filters
[provider
->nstat_provider_id
].npf_flags
;
4417 boolean_t tell_user
=
4418 ((provider_filter_flagss
& NSTAT_FILTER_SUPPRESS_SRC_ADDED
) == 0);
4419 u_int32_t src_filter
=
4420 (provider_filter_flagss
& NSTAT_FILTER_PROVIDER_NOZEROBYTES
)
4421 ? NSTAT_FILTER_NOZEROBYTES
: 0;
4423 if (provider_filter_flagss
& NSTAT_FILTER_TCP_NO_EARLY_CLOSE
) {
4424 src_filter
|= NSTAT_FILTER_TCP_NO_EARLY_CLOSE
;
4428 unsigned int one
= 1;
4430 if (mbuf_allocpacket(MBUF_DONTWAIT
, sizeof(nstat_msg_src_added
),
4435 mbuf_setlen(msg
, sizeof(nstat_msg_src_added
));
4436 mbuf_pkthdr_setlen(msg
, mbuf_len(msg
));
4437 nstat_msg_src_added
*add
= mbuf_data(msg
);
4438 bzero(add
, sizeof(*add
));
4439 add
->hdr
.type
= NSTAT_MSG_TYPE_SRC_ADDED
;
4440 assert(mbuf_len(msg
) <= MAX_NSTAT_MSG_HDR_LENGTH
);
4441 add
->hdr
.length
= (u_int16_t
)mbuf_len(msg
);
4442 add
->hdr
.context
= context
;
4443 add
->provider
= provider
->nstat_provider_id
;
4444 srcrefp
= &add
->srcref
;
4447 // Allocate storage for the source
4448 nstat_src
*src
= kheap_alloc(KHEAP_NET_STAT
, sizeof(*src
), Z_WAITOK
);
4456 // Fill in the source, including picking an unused source ref
4457 lck_mtx_lock(&state
->ncs_mtx
);
4459 src
->srcref
= nstat_control_next_src_ref(state
);
4461 *srcrefp
= src
->srcref
;
4464 if (state
->ncs_flags
& NSTAT_FLAG_CLEANUP
|| src
->srcref
== NSTAT_SRC_REF_INVALID
) {
4465 lck_mtx_unlock(&state
->ncs_mtx
);
4466 kheap_free(KHEAP_NET_STAT
, src
, sizeof(*src
));
4472 src
->provider
= provider
;
4473 src
->cookie
= cookie
;
4474 src
->filter
= src_filter
;
4478 // send the source added message if appropriate
4479 errno_t result
= ctl_enqueuembuf(state
->ncs_kctl
, state
->ncs_unit
, msg
,
4482 nstat_stats
.nstat_srcaddedfailures
+= 1;
4483 lck_mtx_unlock(&state
->ncs_mtx
);
4484 kheap_free(KHEAP_NET_STAT
, src
, sizeof(*src
));
4489 // Put the source in the list
4490 TAILQ_INSERT_HEAD(&state
->ncs_src_queue
, src
, ns_control_link
);
4491 src
->ns_control
= state
;
4493 lck_mtx_unlock(&state
->ncs_mtx
);
4499 nstat_control_handle_remove_request(
4500 nstat_control_state
*state
,
4503 nstat_src_ref_t srcref
= NSTAT_SRC_REF_INVALID
;
4506 if (mbuf_copydata(m
, offsetof(nstat_msg_rem_src_req
, srcref
), sizeof(srcref
), &srcref
) != 0) {
4510 lck_mtx_lock(&state
->ncs_mtx
);
4512 // Remove this source as we look for it
4513 TAILQ_FOREACH(src
, &state
->ncs_src_queue
, ns_control_link
)
4515 if (src
->srcref
== srcref
) {
4520 TAILQ_REMOVE(&state
->ncs_src_queue
, src
, ns_control_link
);
4523 lck_mtx_unlock(&state
->ncs_mtx
);
4526 nstat_control_cleanup_source(state
, src
, FALSE
);
4529 return src
? 0 : ENOENT
;
4533 nstat_control_handle_query_request(
4534 nstat_control_state
*state
,
4537 // TBD: handle this from another thread so we can enqueue a lot of data
4538 // As written, if a client requests query all, this function will be
4539 // called from their send of the request message. We will attempt to write
4540 // responses and succeed until the buffer fills up. Since the clients thread
4541 // is blocked on send, it won't be reading unless the client has two threads
4542 // using this socket, one for read and one for write. Two threads probably
4543 // won't work with this code anyhow since we don't have proper locking in
4545 tailq_head_nstat_src dead_list
;
4546 errno_t result
= ENOENT
;
4547 nstat_msg_query_src_req req
;
4549 if (mbuf_copydata(m
, 0, sizeof(req
), &req
) != 0) {
4553 const boolean_t all_srcs
= (req
.srcref
== NSTAT_SRC_REF_ALL
);
4554 TAILQ_INIT(&dead_list
);
4556 lck_mtx_lock(&state
->ncs_mtx
);
4559 state
->ncs_flags
|= NSTAT_FLAG_REQCOUNTS
;
4561 nstat_src
*src
, *tmpsrc
;
4562 u_int64_t src_count
= 0;
4563 boolean_t partial
= FALSE
;
4566 * Error handling policy and sequence number generation is folded into
4567 * nstat_control_begin_query.
4569 partial
= nstat_control_begin_query(state
, &req
.hdr
);
4572 TAILQ_FOREACH_SAFE(src
, &state
->ncs_src_queue
, ns_control_link
, tmpsrc
)
4576 // XXX ignore IFACE types?
4577 if (all_srcs
|| src
->srcref
== req
.srcref
) {
4578 if (nstat_control_reporting_allowed(state
, src
)
4579 && (!partial
|| !all_srcs
|| src
->seq
!= state
->ncs_seq
)) {
4581 (req
.hdr
.flags
& NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE
) != 0) {
4582 result
= nstat_control_append_counts(state
, src
, &gone
);
4584 result
= nstat_control_send_counts(state
, src
, req
.hdr
.context
, 0, &gone
);
4587 if (ENOMEM
== result
|| ENOBUFS
== result
) {
4589 * If the counts message failed to
4590 * enqueue then we should clear our flag so
4591 * that a client doesn't miss anything on
4592 * idle cleanup. We skip the "gone"
4593 * processing in the hope that we may
4594 * catch it another time.
4596 state
->ncs_flags
&= ~NSTAT_FLAG_REQCOUNTS
;
4601 * We skip over hard errors and
4604 src
->seq
= state
->ncs_seq
;
4611 // send one last descriptor message so client may see last state
4612 // If we can't send the notification now, it
4613 // will be sent in the idle cleanup.
4614 result
= nstat_control_send_description(state
, src
, 0, 0);
4616 nstat_stats
.nstat_control_send_description_failures
++;
4617 if (nstat_debug
!= 0) {
4618 printf("%s - nstat_control_send_description() %d\n", __func__
, result
);
4620 state
->ncs_flags
&= ~NSTAT_FLAG_REQCOUNTS
;
4624 // pull src out of the list
4625 TAILQ_REMOVE(&state
->ncs_src_queue
, src
, ns_control_link
);
4626 TAILQ_INSERT_TAIL(&dead_list
, src
, ns_control_link
);
4630 if (src_count
>= QUERY_CONTINUATION_SRC_COUNT
) {
4633 } else if (req
.srcref
== src
->srcref
) {
4638 nstat_flush_accumulated_msgs(state
);
4640 u_int16_t flags
= 0;
4641 if (req
.srcref
== NSTAT_SRC_REF_ALL
) {
4642 flags
= nstat_control_end_query(state
, src
, partial
);
4645 lck_mtx_unlock(&state
->ncs_mtx
);
4648 * If an error occurred enqueueing data, then allow the error to
4649 * propagate to nstat_control_send. This way, the error is sent to
4652 if (all_srcs
&& ENOMEM
!= result
&& ENOBUFS
!= result
) {
4653 nstat_enqueue_success(req
.hdr
.context
, state
, flags
);
4657 while ((src
= TAILQ_FIRST(&dead_list
))) {
4658 TAILQ_REMOVE(&dead_list
, src
, ns_control_link
);
4659 nstat_control_cleanup_source(state
, src
, FALSE
);
4666 nstat_control_handle_get_src_description(
4667 nstat_control_state
*state
,
4670 nstat_msg_get_src_description req
;
4671 errno_t result
= ENOENT
;
4674 if (mbuf_copydata(m
, 0, sizeof(req
), &req
) != 0) {
4678 lck_mtx_lock(&state
->ncs_mtx
);
4679 u_int64_t src_count
= 0;
4680 boolean_t partial
= FALSE
;
4681 const boolean_t all_srcs
= (req
.srcref
== NSTAT_SRC_REF_ALL
);
4684 * Error handling policy and sequence number generation is folded into
4685 * nstat_control_begin_query.
4687 partial
= nstat_control_begin_query(state
, &req
.hdr
);
4689 TAILQ_FOREACH(src
, &state
->ncs_src_queue
, ns_control_link
)
4691 if (all_srcs
|| src
->srcref
== req
.srcref
) {
4692 if (nstat_control_reporting_allowed(state
, src
)
4693 && (!all_srcs
|| !partial
|| src
->seq
!= state
->ncs_seq
)) {
4694 if ((req
.hdr
.flags
& NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE
) != 0 && all_srcs
) {
4695 result
= nstat_control_append_description(state
, src
);
4697 result
= nstat_control_send_description(state
, src
, req
.hdr
.context
, 0);
4700 if (ENOMEM
== result
|| ENOBUFS
== result
) {
4702 * If the description message failed to
4703 * enqueue then we give up for now.
4709 * Note, we skip over hard errors and
4712 src
->seq
= state
->ncs_seq
;
4714 if (src_count
>= QUERY_CONTINUATION_SRC_COUNT
) {
4725 nstat_flush_accumulated_msgs(state
);
4727 u_int16_t flags
= 0;
4728 if (req
.srcref
== NSTAT_SRC_REF_ALL
) {
4729 flags
= nstat_control_end_query(state
, src
, partial
);
4732 lck_mtx_unlock(&state
->ncs_mtx
);
4734 * If an error occurred enqueueing data, then allow the error to
4735 * propagate to nstat_control_send. This way, the error is sent to
4738 if (all_srcs
&& ENOMEM
!= result
&& ENOBUFS
!= result
) {
4739 nstat_enqueue_success(req
.hdr
.context
, state
, flags
);
4747 nstat_control_handle_set_filter(
4748 nstat_control_state
*state
,
4751 nstat_msg_set_filter req
;
4754 if (mbuf_copydata(m
, 0, sizeof(req
), &req
) != 0) {
4757 if (req
.srcref
== NSTAT_SRC_REF_ALL
||
4758 req
.srcref
== NSTAT_SRC_REF_INVALID
) {
4762 lck_mtx_lock(&state
->ncs_mtx
);
4763 TAILQ_FOREACH(src
, &state
->ncs_src_queue
, ns_control_link
)
4765 if (req
.srcref
== src
->srcref
) {
4766 src
->filter
= req
.filter
;
4770 lck_mtx_unlock(&state
->ncs_mtx
);
4780 nstat_control_state
*state
,
4785 struct nstat_msg_error err
;
4787 bzero(&err
, sizeof(err
));
4788 err
.hdr
.type
= NSTAT_MSG_TYPE_ERROR
;
4789 err
.hdr
.length
= sizeof(err
);
4790 err
.hdr
.context
= context
;
4793 result
= ctl_enqueuedata(state
->ncs_kctl
, state
->ncs_unit
, &err
,
4794 sizeof(err
), CTL_DATA_EOR
| CTL_DATA_CRIT
);
4796 nstat_stats
.nstat_msgerrorfailures
++;
4801 nstat_control_begin_query(
4802 nstat_control_state
*state
,
4803 const nstat_msg_hdr
*hdrp
)
4805 boolean_t partial
= FALSE
;
4807 if (hdrp
->flags
& NSTAT_MSG_HDR_FLAG_CONTINUATION
) {
4808 /* A partial query all has been requested. */
4811 if (state
->ncs_context
!= hdrp
->context
) {
4812 if (state
->ncs_context
!= 0) {
4813 nstat_send_error(state
, state
->ncs_context
, EAGAIN
);
4816 /* Initialize state for a partial query all. */
4817 state
->ncs_context
= hdrp
->context
;
4826 nstat_control_end_query(
4827 nstat_control_state
*state
,
4828 nstat_src
*last_src
,
4831 u_int16_t flags
= 0;
4833 if (last_src
== NULL
|| !partial
) {
4835 * We iterated through the entire srcs list or exited early
4836 * from the loop when a partial update was not requested (an
4837 * error occurred), so clear context to indicate internally
4838 * that the query is finished.
4840 state
->ncs_context
= 0;
4843 * Indicate to userlevel to make another partial request as
4844 * there are still sources left to be reported.
4846 flags
|= NSTAT_MSG_HDR_FLAG_CONTINUATION
;
4853 nstat_control_handle_get_update(
4854 nstat_control_state
*state
,
4857 nstat_msg_query_src_req req
;
4859 if (mbuf_copydata(m
, 0, sizeof(req
), &req
) != 0) {
4863 lck_mtx_lock(&state
->ncs_mtx
);
4865 state
->ncs_flags
|= NSTAT_FLAG_SUPPORTS_UPDATES
;
4867 errno_t result
= ENOENT
;
4868 nstat_src
*src
, *tmpsrc
;
4869 tailq_head_nstat_src dead_list
;
4870 u_int64_t src_count
= 0;
4871 boolean_t partial
= FALSE
;
4872 TAILQ_INIT(&dead_list
);
4875 * Error handling policy and sequence number generation is folded into
4876 * nstat_control_begin_query.
4878 partial
= nstat_control_begin_query(state
, &req
.hdr
);
4880 TAILQ_FOREACH_SAFE(src
, &state
->ncs_src_queue
, ns_control_link
, tmpsrc
)
4885 if (nstat_control_reporting_allowed(state
, src
)) {
4886 /* skip this source if it has the current state
4887 * sequence number as it's already been reported in
4888 * this query-all partial sequence. */
4889 if (req
.srcref
== NSTAT_SRC_REF_ALL
4890 && (FALSE
== partial
|| src
->seq
!= state
->ncs_seq
)) {
4891 result
= nstat_control_append_update(state
, src
, &gone
);
4892 if (ENOMEM
== result
|| ENOBUFS
== result
) {
4894 * If the update message failed to
4895 * enqueue then give up.
4901 * We skip over hard errors and
4904 src
->seq
= state
->ncs_seq
;
4907 } else if (src
->srcref
== req
.srcref
) {
4908 result
= nstat_control_send_update(state
, src
, req
.hdr
.context
, 0, 0, &gone
);
4913 // pull src out of the list
4914 TAILQ_REMOVE(&state
->ncs_src_queue
, src
, ns_control_link
);
4915 TAILQ_INSERT_TAIL(&dead_list
, src
, ns_control_link
);
4918 if (req
.srcref
!= NSTAT_SRC_REF_ALL
&& req
.srcref
== src
->srcref
) {
4921 if (src_count
>= QUERY_CONTINUATION_SRC_COUNT
) {
4926 nstat_flush_accumulated_msgs(state
);
4929 u_int16_t flags
= 0;
4930 if (req
.srcref
== NSTAT_SRC_REF_ALL
) {
4931 flags
= nstat_control_end_query(state
, src
, partial
);
4934 lck_mtx_unlock(&state
->ncs_mtx
);
4936 * If an error occurred enqueueing data, then allow the error to
4937 * propagate to nstat_control_send. This way, the error is sent to
4940 if (req
.srcref
== NSTAT_SRC_REF_ALL
&& ENOMEM
!= result
&& ENOBUFS
!= result
) {
4941 nstat_enqueue_success(req
.hdr
.context
, state
, flags
);
4945 while ((src
= TAILQ_FIRST(&dead_list
))) {
4946 TAILQ_REMOVE(&dead_list
, src
, ns_control_link
);
4947 // release src and send notification
4948 nstat_control_cleanup_source(state
, src
, FALSE
);
4955 nstat_control_handle_subscribe_sysinfo(
4956 nstat_control_state
*state
)
4958 errno_t result
= priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS
, 0);
4964 lck_mtx_lock(&state
->ncs_mtx
);
4965 state
->ncs_flags
|= NSTAT_FLAG_SYSINFO_SUBSCRIBED
;
4966 lck_mtx_unlock(&state
->ncs_mtx
);
4979 nstat_control_state
*state
= (nstat_control_state
*)uinfo
;
4980 struct nstat_msg_hdr
*hdr
;
4981 struct nstat_msg_hdr storage
;
4984 if (mbuf_pkthdr_len(m
) < sizeof(*hdr
)) {
4985 // Is this the right thing to do?
4990 if (mbuf_len(m
) >= sizeof(*hdr
)) {
4993 mbuf_copydata(m
, 0, sizeof(storage
), &storage
);
4997 // Legacy clients may not set the length
4998 // Those clients are likely not setting the flags either
4999 // Fix everything up so old clients continue to work
5000 if (hdr
->length
!= mbuf_pkthdr_len(m
)) {
5002 assert(mbuf_pkthdr_len(m
) <= MAX_NSTAT_MSG_HDR_LENGTH
);
5003 hdr
->length
= (u_int16_t
)mbuf_pkthdr_len(m
);
5004 if (hdr
== &storage
) {
5005 mbuf_copyback(m
, 0, sizeof(*hdr
), hdr
, MBUF_DONTWAIT
);
5009 switch (hdr
->type
) {
5010 case NSTAT_MSG_TYPE_ADD_SRC
:
5011 result
= nstat_control_handle_add_request(state
, m
);
5014 case NSTAT_MSG_TYPE_ADD_ALL_SRCS
:
5015 result
= nstat_control_handle_add_all(state
, m
);
5018 case NSTAT_MSG_TYPE_REM_SRC
:
5019 result
= nstat_control_handle_remove_request(state
, m
);
5022 case NSTAT_MSG_TYPE_QUERY_SRC
:
5023 result
= nstat_control_handle_query_request(state
, m
);
5026 case NSTAT_MSG_TYPE_GET_SRC_DESC
:
5027 result
= nstat_control_handle_get_src_description(state
, m
);
5030 case NSTAT_MSG_TYPE_SET_FILTER
:
5031 result
= nstat_control_handle_set_filter(state
, m
);
5034 case NSTAT_MSG_TYPE_GET_UPDATE
:
5035 result
= nstat_control_handle_get_update(state
, m
);
5038 case NSTAT_MSG_TYPE_SUBSCRIBE_SYSINFO
:
5039 result
= nstat_control_handle_subscribe_sysinfo(state
);
5048 struct nstat_msg_error err
;
5050 bzero(&err
, sizeof(err
));
5051 err
.hdr
.type
= NSTAT_MSG_TYPE_ERROR
;
5052 err
.hdr
.length
= (u_int16_t
)(sizeof(err
) + mbuf_pkthdr_len(m
));
5053 err
.hdr
.context
= hdr
->context
;
5056 if (mbuf_prepend(&m
, sizeof(err
), MBUF_DONTWAIT
) == 0 &&
5057 mbuf_copyback(m
, 0, sizeof(err
), &err
, MBUF_DONTWAIT
) == 0) {
5058 result
= ctl_enqueuembuf(kctl
, unit
, m
, CTL_DATA_EOR
| CTL_DATA_CRIT
);
5066 // Unable to prepend the error to the request - just send the error
5067 err
.hdr
.length
= sizeof(err
);
5068 result
= ctl_enqueuedata(kctl
, unit
, &err
, sizeof(err
),
5069 CTL_DATA_EOR
| CTL_DATA_CRIT
);
5071 nstat_stats
.nstat_msgerrorfailures
+= 1;
5074 nstat_stats
.nstat_handle_msg_failures
+= 1;
5086 tcp_progress_indicators_for_interface(unsigned int ifindex
, uint64_t recentflow_maxduration
, uint16_t filter_flags
, struct xtcpprogress_indicators
*indicators
)
5090 uint64_t min_recent_start_time
;
5092 min_recent_start_time
= mach_continuous_time() - recentflow_maxduration
;
5093 bzero(indicators
, sizeof(*indicators
));
5095 lck_rw_lock_shared(tcbinfo
.ipi_lock
);
5097 * For progress indicators we don't need to special case TCP to collect time wait connections
5099 LIST_FOREACH(inp
, tcbinfo
.ipi_listhead
, inp_list
)
5101 struct tcpcb
*tp
= intotcpcb(inp
);
5102 if (tp
&& inp
->inp_last_outifp
&&
5103 inp
->inp_last_outifp
->if_index
== ifindex
&&
5104 inp
->inp_state
!= INPCB_STATE_DEAD
&&
5105 ((filter_flags
== 0) ||
5106 ((filter_flags
& NSTAT_IFNET_IS_NON_LOCAL
) && !(tp
->t_flags
& TF_LOCAL
)) ||
5107 ((filter_flags
& NSTAT_IFNET_IS_LOCAL
) && (tp
->t_flags
& TF_LOCAL
)))) {
5108 struct tcp_conn_status connstatus
;
5109 indicators
->xp_numflows
++;
5110 tcp_get_connectivity_status(tp
, &connstatus
);
5111 if (connstatus
.write_probe_failed
) {
5112 indicators
->xp_write_probe_fails
++;
5114 if (connstatus
.read_probe_failed
) {
5115 indicators
->xp_read_probe_fails
++;
5117 if (connstatus
.conn_probe_failed
) {
5118 indicators
->xp_conn_probe_fails
++;
5120 if (inp
->inp_start_timestamp
> min_recent_start_time
) {
5121 uint64_t flow_count
;
5123 indicators
->xp_recentflows
++;
5124 atomic_get_64(flow_count
, &inp
->inp_stat
->rxbytes
);
5125 indicators
->xp_recentflows_rxbytes
+= flow_count
;
5126 atomic_get_64(flow_count
, &inp
->inp_stat
->txbytes
);
5127 indicators
->xp_recentflows_txbytes
+= flow_count
;
5129 indicators
->xp_recentflows_rxooo
+= tp
->t_stat
.rxoutoforderbytes
;
5130 indicators
->xp_recentflows_rxdup
+= tp
->t_stat
.rxduplicatebytes
;
5131 indicators
->xp_recentflows_retx
+= tp
->t_stat
.txretransmitbytes
;
5132 if (tp
->snd_max
- tp
->snd_una
) {
5133 indicators
->xp_recentflows_unacked
++;
5138 lck_rw_done(tcbinfo
.ipi_lock
);
5144 __private_extern__
int
5145 ntstat_tcp_progress_indicators(struct sysctl_req
*req
)
5147 struct xtcpprogress_indicators indicators
= {};
5149 struct tcpprogressreq requested
;
5151 if (priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS
, 0) != 0) {
5154 if (req
->newptr
== USER_ADDR_NULL
) {
5157 if (req
->newlen
< sizeof(req
)) {
5160 error
= SYSCTL_IN(req
, &requested
, sizeof(requested
));
5164 error
= tcp_progress_indicators_for_interface((unsigned int)requested
.ifindex
, requested
.recentflow_maxduration
, (uint16_t)requested
.filter_flags
, &indicators
);
5168 error
= SYSCTL_OUT(req
, &indicators
, sizeof(indicators
));