2 * Copyright (c) 2010-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/param.h>
30 #include <sys/types.h>
31 #include <sys/kpi_mbuf.h>
32 #include <sys/socket.h>
33 #include <sys/kern_control.h>
34 #include <sys/mcache.h>
35 #include <sys/socketvar.h>
36 #include <sys/sysctl.h>
37 #include <sys/queue.h>
39 #include <sys/protosw.h>
41 #include <kern/clock.h>
42 #include <kern/debug.h>
44 #include <libkern/libkern.h>
45 #include <libkern/OSMalloc.h>
46 #include <libkern/OSAtomic.h>
47 #include <libkern/locks.h>
50 #include <net/if_var.h>
51 #include <net/if_types.h>
52 #include <net/route.h>
53 #include <net/ntstat.h>
55 #include <netinet/ip_var.h>
56 #include <netinet/in_pcb.h>
57 #include <netinet/in_var.h>
58 #include <netinet/tcp.h>
59 #include <netinet/tcp_var.h>
60 #include <netinet/tcp_fsm.h>
61 #include <netinet/tcp_cc.h>
62 #include <netinet/udp.h>
63 #include <netinet/udp_var.h>
64 #include <netinet6/in6_pcb.h>
65 #include <netinet6/in6_var.h>
67 __private_extern__
int nstat_collect
= 1;
69 #if (DEBUG || DEVELOPMENT)
70 SYSCTL_INT(_net
, OID_AUTO
, statistics
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
71 &nstat_collect
, 0, "Collect detailed statistics");
72 #endif /* (DEBUG || DEVELOPMENT) */
74 static int nstat_privcheck
= 0;
75 SYSCTL_INT(_net
, OID_AUTO
, statistics_privcheck
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
76 &nstat_privcheck
, 0, "Entitlement check");
78 SYSCTL_NODE(_net
, OID_AUTO
, stats
,
79 CTLFLAG_RW
|CTLFLAG_LOCKED
, 0, "network statistics");
81 static int nstat_debug
= 0;
82 SYSCTL_INT(_net_stats
, OID_AUTO
, debug
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
85 static int nstat_sendspace
= 2048;
86 SYSCTL_INT(_net_stats
, OID_AUTO
, sendspace
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
87 &nstat_sendspace
, 0, "");
89 static int nstat_recvspace
= 8192;
90 SYSCTL_INT(_net_stats
, OID_AUTO
, recvspace
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
91 &nstat_recvspace
, 0, "");
93 static struct nstat_stats nstat_stats
;
94 SYSCTL_STRUCT(_net_stats
, OID_AUTO
, stats
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
95 &nstat_stats
, nstat_stats
, "");
99 NSTAT_FLAG_CLEANUP
= (1 << 0),
100 NSTAT_FLAG_REQCOUNTS
= (1 << 1),
101 NSTAT_FLAG_SUPPORTS_UPDATES
= (1 << 2),
102 NSTAT_FLAG_SYSINFO_SUBSCRIBED
= (1 << 3),
105 #define QUERY_CONTINUATION_SRC_COUNT 100
107 typedef struct nstat_provider_filter
110 u_int64_t npf_events
;
113 } nstat_provider_filter
;
116 typedef struct nstat_control_state
118 struct nstat_control_state
*ncs_next
;
119 u_int32_t ncs_watching
;
120 decl_lck_mtx_data(, mtx
);
121 kern_ctl_ref ncs_kctl
;
123 nstat_src_ref_t ncs_next_srcref
;
124 struct nstat_src
*ncs_srcs
;
125 mbuf_t ncs_accumulated
;
127 nstat_provider_filter ncs_provider_filters
[NSTAT_PROVIDER_COUNT
];
128 /* state maintained for partial query requests */
129 u_int64_t ncs_context
;
131 } nstat_control_state
;
133 typedef struct nstat_provider
135 struct nstat_provider
*next
;
136 nstat_provider_id_t nstat_provider_id
;
137 size_t nstat_descriptor_length
;
138 errno_t (*nstat_lookup
)(const void *data
, u_int32_t length
, nstat_provider_cookie_t
*out_cookie
);
139 int (*nstat_gone
)(nstat_provider_cookie_t cookie
);
140 errno_t (*nstat_counts
)(nstat_provider_cookie_t cookie
, struct nstat_counts
*out_counts
, int *out_gone
);
141 errno_t (*nstat_watcher_add
)(nstat_control_state
*state
);
142 void (*nstat_watcher_remove
)(nstat_control_state
*state
);
143 errno_t (*nstat_copy_descriptor
)(nstat_provider_cookie_t cookie
, void *data
, u_int32_t len
);
144 void (*nstat_release
)(nstat_provider_cookie_t cookie
, boolean_t locked
);
145 bool (*nstat_reporting_allowed
)(nstat_provider_cookie_t cookie
, nstat_provider_filter
*filter
);
148 typedef STAILQ_HEAD(, nstat_src
) stailq_head_nstat_src
;
149 typedef STAILQ_ENTRY(nstat_src
) stailq_entry_nstat_src
;
151 typedef TAILQ_HEAD(, nstat_tu_shadow
) tailq_head_tu_shadow
;
152 typedef TAILQ_ENTRY(nstat_tu_shadow
) tailq_entry_tu_shadow
;
154 typedef struct nstat_src
156 struct nstat_src
*next
;
157 nstat_src_ref_t srcref
;
158 nstat_provider
*provider
;
159 nstat_provider_cookie_t cookie
;
164 static errno_t
nstat_control_send_counts(nstat_control_state
*,
165 nstat_src
*, unsigned long long, u_int16_t
, int *);
166 static int nstat_control_send_description(nstat_control_state
*state
, nstat_src
*src
, u_int64_t context
, u_int16_t hdr_flags
);
167 static int nstat_control_send_update(nstat_control_state
*state
, nstat_src
*src
, u_int64_t context
, u_int16_t hdr_flags
, int *gone
);
168 static errno_t
nstat_control_send_removed(nstat_control_state
*, nstat_src
*);
169 static errno_t
nstat_control_send_goodbye(nstat_control_state
*state
, nstat_src
*src
);
170 static void nstat_control_cleanup_source(nstat_control_state
*state
, nstat_src
*src
, boolean_t
);
171 static bool nstat_control_reporting_allowed(nstat_control_state
*state
, nstat_src
*src
);
172 static boolean_t
nstat_control_begin_query(nstat_control_state
*state
, const nstat_msg_hdr
*hdrp
);
173 static u_int16_t
nstat_control_end_query(nstat_control_state
*state
, nstat_src
*last_src
, boolean_t partial
);
174 static void nstat_ifnet_report_ecn_stats(void);
176 static u_int32_t nstat_udp_watchers
= 0;
177 static u_int32_t nstat_userland_udp_watchers
= 0;
178 static u_int32_t nstat_tcp_watchers
= 0;
179 static u_int32_t nstat_userland_tcp_watchers
= 0;
181 static void nstat_control_register(void);
184 * The lock order is as follows:
186 * socket_lock (inpcb)
190 static volatile OSMallocTag nstat_malloc_tag
= NULL
;
191 static nstat_control_state
*nstat_controls
= NULL
;
192 static uint64_t nstat_idle_time
= 0;
193 static decl_lck_mtx_data(, nstat_mtx
);
195 /* some extern definitions */
196 extern void mbuf_report_peak_usage(void);
197 extern void tcp_report_stats(void);
201 const struct sockaddr
*src
,
202 struct sockaddr
*dst
,
205 if (src
->sa_len
> maxlen
) return;
207 bcopy(src
, dst
, src
->sa_len
);
208 if (src
->sa_family
== AF_INET6
&&
209 src
->sa_len
>= sizeof(struct sockaddr_in6
))
211 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)(void *)dst
;
212 if (IN6_IS_SCOPE_EMBED(&sin6
->sin6_addr
))
214 if (sin6
->sin6_scope_id
== 0)
215 sin6
->sin6_scope_id
= ntohs(sin6
->sin6_addr
.s6_addr16
[1]);
216 sin6
->sin6_addr
.s6_addr16
[1] = 0;
222 nstat_ip_to_sockaddr(
223 const struct in_addr
*ip
,
225 struct sockaddr_in
*sin
,
228 if (maxlen
< sizeof(struct sockaddr_in
))
231 sin
->sin_family
= AF_INET
;
232 sin
->sin_len
= sizeof(*sin
);
233 sin
->sin_port
= port
;
238 nstat_ip6_to_sockaddr(
239 const struct in6_addr
*ip6
,
241 struct sockaddr_in6
*sin6
,
244 if (maxlen
< sizeof(struct sockaddr_in6
))
247 sin6
->sin6_family
= AF_INET6
;
248 sin6
->sin6_len
= sizeof(*sin6
);
249 sin6
->sin6_port
= port
;
250 sin6
->sin6_addr
= *ip6
;
251 if (IN6_IS_SCOPE_EMBED(&sin6
->sin6_addr
))
253 sin6
->sin6_scope_id
= ntohs(sin6
->sin6_addr
.s6_addr16
[1]);
254 sin6
->sin6_addr
.s6_addr16
[1] = 0;
259 nstat_ifnet_to_flags(
263 u_int32_t functional_type
= if_functional_type(ifp
, FALSE
);
265 /* Panic if someone adds a functional type without updating ntstat. */
266 VERIFY(0 <= functional_type
&& functional_type
<= IFRTYPE_FUNCTIONAL_LAST
);
268 switch (functional_type
)
270 case IFRTYPE_FUNCTIONAL_UNKNOWN
:
271 flags
|= NSTAT_IFNET_IS_UNKNOWN_TYPE
;
273 case IFRTYPE_FUNCTIONAL_LOOPBACK
:
274 flags
|= NSTAT_IFNET_IS_LOOPBACK
;
276 case IFRTYPE_FUNCTIONAL_WIRED
:
277 case IFRTYPE_FUNCTIONAL_INTCOPROC
:
278 flags
|= NSTAT_IFNET_IS_WIRED
;
280 case IFRTYPE_FUNCTIONAL_WIFI_INFRA
:
281 flags
|= NSTAT_IFNET_IS_WIFI
;
283 case IFRTYPE_FUNCTIONAL_WIFI_AWDL
:
284 flags
|= NSTAT_IFNET_IS_WIFI
;
285 flags
|= NSTAT_IFNET_IS_AWDL
;
287 case IFRTYPE_FUNCTIONAL_CELLULAR
:
288 flags
|= NSTAT_IFNET_IS_CELLULAR
;
292 if (IFNET_IS_EXPENSIVE(ifp
))
294 flags
|= NSTAT_IFNET_IS_EXPENSIVE
;
301 nstat_inpcb_to_flags(
302 const struct inpcb
*inp
)
306 if ((inp
!= NULL
) && (inp
->inp_last_outifp
!= NULL
))
308 struct ifnet
*ifp
= inp
->inp_last_outifp
;
309 flags
= nstat_ifnet_to_flags(ifp
);
311 if (flags
& NSTAT_IFNET_IS_CELLULAR
)
313 if (inp
->inp_socket
!= NULL
&&
314 (inp
->inp_socket
->so_flags1
& SOF1_CELLFALLBACK
))
315 flags
|= NSTAT_IFNET_VIA_CELLFALLBACK
;
320 flags
= NSTAT_IFNET_IS_UNKNOWN_TYPE
;
326 #pragma mark -- Network Statistic Providers --
328 static errno_t
nstat_control_source_add(u_int64_t context
, nstat_control_state
*state
, nstat_provider
*provider
, nstat_provider_cookie_t cookie
);
329 struct nstat_provider
*nstat_providers
= NULL
;
331 static struct nstat_provider
*
332 nstat_find_provider_by_id(
333 nstat_provider_id_t id
)
335 struct nstat_provider
*provider
;
337 for (provider
= nstat_providers
; provider
!= NULL
; provider
= provider
->next
)
339 if (provider
->nstat_provider_id
== id
)
348 nstat_provider_id_t id
,
351 nstat_provider
**out_provider
,
352 nstat_provider_cookie_t
*out_cookie
)
354 *out_provider
= nstat_find_provider_by_id(id
);
355 if (*out_provider
== NULL
)
360 return (*out_provider
)->nstat_lookup(data
, length
, out_cookie
);
363 static void nstat_init_route_provider(void);
364 static void nstat_init_tcp_provider(void);
365 static void nstat_init_userland_tcp_provider(void);
366 static void nstat_init_udp_provider(void);
367 static void nstat_init_userland_udp_provider(void);
368 static void nstat_init_ifnet_provider(void);
370 __private_extern__
void
373 if (nstat_malloc_tag
!= NULL
) return;
375 OSMallocTag tag
= OSMalloc_Tagalloc(NET_STAT_CONTROL_NAME
, OSMT_DEFAULT
);
376 if (!OSCompareAndSwapPtr(NULL
, tag
, &nstat_malloc_tag
))
378 OSMalloc_Tagfree(tag
);
379 tag
= nstat_malloc_tag
;
383 // we need to initialize other things, we do it here as this code path will only be hit once;
384 nstat_init_route_provider();
385 nstat_init_tcp_provider();
386 nstat_init_userland_tcp_provider();
387 nstat_init_udp_provider();
388 nstat_init_userland_udp_provider();
389 nstat_init_ifnet_provider();
390 nstat_control_register();
394 #pragma mark -- Aligned Buffer Allocation --
403 nstat_malloc_aligned(
408 struct align_header
*hdr
= NULL
;
409 u_int32_t size
= length
+ sizeof(*hdr
) + alignment
- 1;
411 u_int8_t
*buffer
= OSMalloc(size
, tag
);
412 if (buffer
== NULL
) return NULL
;
414 u_int8_t
*aligned
= buffer
+ sizeof(*hdr
);
415 aligned
= (u_int8_t
*)P2ROUNDUP(aligned
, alignment
);
417 hdr
= (struct align_header
*)(void *)(aligned
- sizeof(*hdr
));
418 hdr
->offset
= aligned
- buffer
;
429 struct align_header
*hdr
= (struct align_header
*)(void *)((u_int8_t
*)buffer
- sizeof(*hdr
));
430 OSFree(((char*)buffer
) - hdr
->offset
, hdr
->length
, tag
);
433 #pragma mark -- Route Provider --
435 static nstat_provider nstat_route_provider
;
441 nstat_provider_cookie_t
*out_cookie
)
443 // rt_lookup doesn't take const params but it doesn't modify the parameters for
444 // the lookup. So...we use a union to eliminate the warning.
448 const struct sockaddr
*const_sa
;
451 const nstat_route_add_param
*param
= (const nstat_route_add_param
*)data
;
454 if (length
< sizeof(*param
))
459 if (param
->dst
.v4
.sin_family
== 0 ||
460 param
->dst
.v4
.sin_family
> AF_MAX
||
461 (param
->mask
.v4
.sin_family
!= 0 && param
->mask
.v4
.sin_family
!= param
->dst
.v4
.sin_family
))
466 if (param
->dst
.v4
.sin_len
> sizeof(param
->dst
) ||
467 (param
->mask
.v4
.sin_family
&& param
->mask
.v4
.sin_len
> sizeof(param
->mask
.v4
.sin_len
)))
471 if ((param
->dst
.v4
.sin_family
== AF_INET
&&
472 param
->dst
.v4
.sin_len
< sizeof(struct sockaddr_in
)) ||
473 (param
->dst
.v6
.sin6_family
== AF_INET6
&&
474 param
->dst
.v6
.sin6_len
< sizeof(struct sockaddr_in6
)))
479 dst
.const_sa
= (const struct sockaddr
*)¶m
->dst
;
480 mask
.const_sa
= param
->mask
.v4
.sin_family
? (const struct sockaddr
*)¶m
->mask
: NULL
;
482 struct radix_node_head
*rnh
= rt_tables
[dst
.sa
->sa_family
];
483 if (rnh
== NULL
) return EAFNOSUPPORT
;
485 lck_mtx_lock(rnh_lock
);
486 struct rtentry
*rt
= rt_lookup(TRUE
, dst
.sa
, mask
.sa
, rnh
, param
->ifindex
);
487 lck_mtx_unlock(rnh_lock
);
489 if (rt
) *out_cookie
= (nstat_provider_cookie_t
)rt
;
491 return rt
? 0 : ENOENT
;
496 nstat_provider_cookie_t cookie
)
498 struct rtentry
*rt
= (struct rtentry
*)cookie
;
499 return ((rt
->rt_flags
& RTF_UP
) == 0) ? 1 : 0;
504 nstat_provider_cookie_t cookie
,
505 struct nstat_counts
*out_counts
,
508 struct rtentry
*rt
= (struct rtentry
*)cookie
;
509 struct nstat_counts
*rt_stats
= rt
->rt_stats
;
511 if (out_gone
) *out_gone
= 0;
513 if (out_gone
&& (rt
->rt_flags
& RTF_UP
) == 0) *out_gone
= 1;
517 atomic_get_64(out_counts
->nstat_rxpackets
, &rt_stats
->nstat_rxpackets
);
518 atomic_get_64(out_counts
->nstat_rxbytes
, &rt_stats
->nstat_rxbytes
);
519 atomic_get_64(out_counts
->nstat_txpackets
, &rt_stats
->nstat_txpackets
);
520 atomic_get_64(out_counts
->nstat_txbytes
, &rt_stats
->nstat_txbytes
);
521 out_counts
->nstat_rxduplicatebytes
= rt_stats
->nstat_rxduplicatebytes
;
522 out_counts
->nstat_rxoutoforderbytes
= rt_stats
->nstat_rxoutoforderbytes
;
523 out_counts
->nstat_txretransmit
= rt_stats
->nstat_txretransmit
;
524 out_counts
->nstat_connectattempts
= rt_stats
->nstat_connectattempts
;
525 out_counts
->nstat_connectsuccesses
= rt_stats
->nstat_connectsuccesses
;
526 out_counts
->nstat_min_rtt
= rt_stats
->nstat_min_rtt
;
527 out_counts
->nstat_avg_rtt
= rt_stats
->nstat_avg_rtt
;
528 out_counts
->nstat_var_rtt
= rt_stats
->nstat_var_rtt
;
529 out_counts
->nstat_cell_rxbytes
= out_counts
->nstat_cell_txbytes
= 0;
533 bzero(out_counts
, sizeof(*out_counts
));
541 nstat_provider_cookie_t cookie
,
544 rtfree((struct rtentry
*)cookie
);
547 static u_int32_t nstat_route_watchers
= 0;
550 nstat_route_walktree_add(
551 struct radix_node
*rn
,
555 struct rtentry
*rt
= (struct rtentry
*)rn
;
556 nstat_control_state
*state
= (nstat_control_state
*)context
;
558 lck_mtx_assert(rnh_lock
, LCK_MTX_ASSERT_OWNED
);
560 /* RTF_UP can't change while rnh_lock is held */
561 if ((rt
->rt_flags
& RTF_UP
) != 0)
563 /* Clear RTPRF_OURS if the route is still usable */
565 if (rt_validate(rt
)) {
566 RT_ADDREF_LOCKED(rt
);
573 /* Otherwise if RTF_CONDEMNED, treat it as if it were down */
577 result
= nstat_control_source_add(0, state
, &nstat_route_provider
, rt
);
586 nstat_route_add_watcher(
587 nstat_control_state
*state
)
591 OSIncrementAtomic(&nstat_route_watchers
);
593 lck_mtx_lock(rnh_lock
);
594 for (i
= 1; i
< AF_MAX
; i
++)
596 struct radix_node_head
*rnh
;
600 result
= rnh
->rnh_walktree(rnh
, nstat_route_walktree_add
, state
);
606 lck_mtx_unlock(rnh_lock
);
611 __private_extern__
void
612 nstat_route_new_entry(
615 if (nstat_route_watchers
== 0)
618 lck_mtx_lock(&nstat_mtx
);
619 if ((rt
->rt_flags
& RTF_UP
) != 0)
621 nstat_control_state
*state
;
622 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
624 if ((state
->ncs_watching
& (1 << NSTAT_PROVIDER_ROUTE
)) != 0)
626 // this client is watching routes
627 // acquire a reference for the route
630 // add the source, if that fails, release the reference
631 if (nstat_control_source_add(0, state
, &nstat_route_provider
, rt
) != 0)
636 lck_mtx_unlock(&nstat_mtx
);
640 nstat_route_remove_watcher(
641 __unused nstat_control_state
*state
)
643 OSDecrementAtomic(&nstat_route_watchers
);
647 nstat_route_copy_descriptor(
648 nstat_provider_cookie_t cookie
,
652 nstat_route_descriptor
*desc
= (nstat_route_descriptor
*)data
;
653 if (len
< sizeof(*desc
))
657 bzero(desc
, sizeof(*desc
));
659 struct rtentry
*rt
= (struct rtentry
*)cookie
;
660 desc
->id
= (uint64_t)VM_KERNEL_ADDRPERM(rt
);
661 desc
->parent_id
= (uint64_t)VM_KERNEL_ADDRPERM(rt
->rt_parent
);
662 desc
->gateway_id
= (uint64_t)VM_KERNEL_ADDRPERM(rt
->rt_gwroute
);
667 if ((sa
= rt_key(rt
)))
668 nstat_copy_sa_out(sa
, &desc
->dst
.sa
, sizeof(desc
->dst
));
671 if ((sa
= rt_mask(rt
)) && sa
->sa_len
<= sizeof(desc
->mask
))
672 memcpy(&desc
->mask
, sa
, sa
->sa_len
);
675 if ((sa
= rt
->rt_gateway
))
676 nstat_copy_sa_out(sa
, &desc
->gateway
.sa
, sizeof(desc
->gateway
));
679 desc
->ifindex
= rt
->rt_ifp
->if_index
;
681 desc
->flags
= rt
->rt_flags
;
687 nstat_route_reporting_allowed(nstat_provider_cookie_t cookie
, nstat_provider_filter
*filter
)
691 if ((filter
->npf_flags
& NSTAT_FILTER_IFNET_FLAGS
) != 0)
693 struct rtentry
*rt
= (struct rtentry
*)cookie
;
694 struct ifnet
*ifp
= rt
->rt_ifp
;
698 uint16_t interface_properties
= nstat_ifnet_to_flags(ifp
);
700 if ((filter
->npf_flags
& interface_properties
) == 0)
710 nstat_init_route_provider(void)
712 bzero(&nstat_route_provider
, sizeof(nstat_route_provider
));
713 nstat_route_provider
.nstat_descriptor_length
= sizeof(nstat_route_descriptor
);
714 nstat_route_provider
.nstat_provider_id
= NSTAT_PROVIDER_ROUTE
;
715 nstat_route_provider
.nstat_lookup
= nstat_route_lookup
;
716 nstat_route_provider
.nstat_gone
= nstat_route_gone
;
717 nstat_route_provider
.nstat_counts
= nstat_route_counts
;
718 nstat_route_provider
.nstat_release
= nstat_route_release
;
719 nstat_route_provider
.nstat_watcher_add
= nstat_route_add_watcher
;
720 nstat_route_provider
.nstat_watcher_remove
= nstat_route_remove_watcher
;
721 nstat_route_provider
.nstat_copy_descriptor
= nstat_route_copy_descriptor
;
722 nstat_route_provider
.nstat_reporting_allowed
= nstat_route_reporting_allowed
;
723 nstat_route_provider
.next
= nstat_providers
;
724 nstat_providers
= &nstat_route_provider
;
727 #pragma mark -- Route Collection --
729 static struct nstat_counts
*
733 struct nstat_counts
*result
= rte
->rt_stats
;
734 if (result
) return result
;
736 if (nstat_malloc_tag
== NULL
) nstat_init();
738 result
= nstat_malloc_aligned(sizeof(*result
), sizeof(u_int64_t
), nstat_malloc_tag
);
739 if (!result
) return result
;
741 bzero(result
, sizeof(*result
));
743 if (!OSCompareAndSwapPtr(NULL
, result
, &rte
->rt_stats
))
745 nstat_free_aligned(result
, nstat_malloc_tag
);
746 result
= rte
->rt_stats
;
752 __private_extern__
void
758 nstat_free_aligned(rte
->rt_stats
, nstat_malloc_tag
);
759 rte
->rt_stats
= NULL
;
763 __private_extern__
void
764 nstat_route_connect_attempt(
769 struct nstat_counts
* stats
= nstat_route_attach(rte
);
772 OSIncrementAtomic(&stats
->nstat_connectattempts
);
775 rte
= rte
->rt_parent
;
779 __private_extern__
void
780 nstat_route_connect_success(
786 struct nstat_counts
* stats
= nstat_route_attach(rte
);
789 OSIncrementAtomic(&stats
->nstat_connectsuccesses
);
792 rte
= rte
->rt_parent
;
796 __private_extern__
void
805 struct nstat_counts
* stats
= nstat_route_attach(rte
);
808 if ((flags
& NSTAT_TX_FLAG_RETRANSMIT
) != 0)
810 OSAddAtomic(bytes
, &stats
->nstat_txretransmit
);
814 OSAddAtomic64((SInt64
)packets
, (SInt64
*)&stats
->nstat_txpackets
);
815 OSAddAtomic64((SInt64
)bytes
, (SInt64
*)&stats
->nstat_txbytes
);
819 rte
= rte
->rt_parent
;
823 __private_extern__
void
832 struct nstat_counts
* stats
= nstat_route_attach(rte
);
837 OSAddAtomic64((SInt64
)packets
, (SInt64
*)&stats
->nstat_rxpackets
);
838 OSAddAtomic64((SInt64
)bytes
, (SInt64
*)&stats
->nstat_rxbytes
);
842 if (flags
& NSTAT_RX_FLAG_OUT_OF_ORDER
)
843 OSAddAtomic(bytes
, &stats
->nstat_rxoutoforderbytes
);
844 if (flags
& NSTAT_RX_FLAG_DUPLICATE
)
845 OSAddAtomic(bytes
, &stats
->nstat_rxduplicatebytes
);
849 rte
= rte
->rt_parent
;
853 __private_extern__
void
859 const int32_t factor
= 8;
863 struct nstat_counts
* stats
= nstat_route_attach(rte
);
872 oldrtt
= stats
->nstat_avg_rtt
;
879 newrtt
= oldrtt
- (oldrtt
- (int32_t)rtt
) / factor
;
881 if (oldrtt
== newrtt
) break;
882 } while (!OSCompareAndSwap(oldrtt
, newrtt
, &stats
->nstat_avg_rtt
));
887 oldrtt
= stats
->nstat_min_rtt
;
888 if (oldrtt
!= 0 && oldrtt
< (int32_t)rtt
)
892 } while (!OSCompareAndSwap(oldrtt
, rtt
, &stats
->nstat_min_rtt
));
897 oldrtt
= stats
->nstat_var_rtt
;
904 newrtt
= oldrtt
- (oldrtt
- (int32_t)rtt_var
) / factor
;
906 if (oldrtt
== newrtt
) break;
907 } while (!OSCompareAndSwap(oldrtt
, newrtt
, &stats
->nstat_var_rtt
));
910 rte
= rte
->rt_parent
;
915 #pragma mark -- TCP Kernel Provider --
918 * Due to the way the kernel deallocates a process (the process structure
919 * might be gone by the time we get the PCB detach notification),
920 * we need to cache the process name. Without this, proc_name() would
921 * return null and the process name would never be sent to userland.
923 * For UDP sockets, we also store the cached the connection tuples along with
924 * the interface index. This is necessary because when UDP sockets are
925 * disconnected, the connection tuples are forever lost from the inpcb, thus
926 * we need to keep track of the last call to connect() in ntstat.
928 struct nstat_tucookie
{
930 char pname
[MAXCOMLEN
+1];
934 struct sockaddr_in v4
;
935 struct sockaddr_in6 v6
;
939 struct sockaddr_in v4
;
940 struct sockaddr_in6 v6
;
942 unsigned int if_index
;
943 uint16_t ifnet_properties
;
946 static struct nstat_tucookie
*
947 nstat_tucookie_alloc_internal(
952 struct nstat_tucookie
*cookie
;
954 cookie
= OSMalloc(sizeof(*cookie
), nstat_malloc_tag
);
958 lck_mtx_assert(&nstat_mtx
, LCK_MTX_ASSERT_NOTOWNED
);
959 if (ref
&& in_pcb_checkstate(inp
, WNT_ACQUIRE
, locked
) == WNT_STOPUSING
)
961 OSFree(cookie
, sizeof(*cookie
), nstat_malloc_tag
);
964 bzero(cookie
, sizeof(*cookie
));
966 proc_name(inp
->inp_socket
->last_pid
, cookie
->pname
,
967 sizeof(cookie
->pname
));
969 * We only increment the reference count for UDP sockets because we
970 * only cache UDP socket tuples.
972 if (SOCK_PROTO(inp
->inp_socket
) == IPPROTO_UDP
)
973 OSIncrementAtomic(&inp
->inp_nstat_refcnt
);
978 static struct nstat_tucookie
*
979 nstat_tucookie_alloc(
982 return nstat_tucookie_alloc_internal(inp
, false, false);
985 static struct nstat_tucookie
*
986 nstat_tucookie_alloc_ref(
989 return nstat_tucookie_alloc_internal(inp
, true, false);
992 static struct nstat_tucookie
*
993 nstat_tucookie_alloc_ref_locked(
996 return nstat_tucookie_alloc_internal(inp
, true, true);
1000 nstat_tucookie_release_internal(
1001 struct nstat_tucookie
*cookie
,
1004 if (SOCK_PROTO(cookie
->inp
->inp_socket
) == IPPROTO_UDP
)
1005 OSDecrementAtomic(&cookie
->inp
->inp_nstat_refcnt
);
1006 in_pcb_checkstate(cookie
->inp
, WNT_RELEASE
, inplock
);
1007 OSFree(cookie
, sizeof(*cookie
), nstat_malloc_tag
);
1011 nstat_tucookie_release(
1012 struct nstat_tucookie
*cookie
)
1014 nstat_tucookie_release_internal(cookie
, false);
1018 nstat_tucookie_release_locked(
1019 struct nstat_tucookie
*cookie
)
1021 nstat_tucookie_release_internal(cookie
, true);
1025 static nstat_provider nstat_tcp_provider
;
1028 nstat_tcpudp_lookup(
1029 struct inpcbinfo
*inpinfo
,
1032 nstat_provider_cookie_t
*out_cookie
)
1034 struct inpcb
*inp
= NULL
;
1036 // parameter validation
1037 const nstat_tcp_add_param
*param
= (const nstat_tcp_add_param
*)data
;
1038 if (length
< sizeof(*param
))
1043 // src and dst must match
1044 if (param
->remote
.v4
.sin_family
!= 0 &&
1045 param
->remote
.v4
.sin_family
!= param
->local
.v4
.sin_family
)
1051 switch (param
->local
.v4
.sin_family
)
1055 if (param
->local
.v4
.sin_len
!= sizeof(param
->local
.v4
) ||
1056 (param
->remote
.v4
.sin_family
!= 0 &&
1057 param
->remote
.v4
.sin_len
!= sizeof(param
->remote
.v4
)))
1062 inp
= in_pcblookup_hash(inpinfo
, param
->remote
.v4
.sin_addr
, param
->remote
.v4
.sin_port
,
1063 param
->local
.v4
.sin_addr
, param
->local
.v4
.sin_port
, 1, NULL
);
1072 const struct in6_addr
*in6c
;
1073 struct in6_addr
*in6
;
1076 if (param
->local
.v6
.sin6_len
!= sizeof(param
->local
.v6
) ||
1077 (param
->remote
.v6
.sin6_family
!= 0 &&
1078 param
->remote
.v6
.sin6_len
!= sizeof(param
->remote
.v6
)))
1083 local
.in6c
= ¶m
->local
.v6
.sin6_addr
;
1084 remote
.in6c
= ¶m
->remote
.v6
.sin6_addr
;
1086 inp
= in6_pcblookup_hash(inpinfo
, remote
.in6
, param
->remote
.v6
.sin6_port
,
1087 local
.in6
, param
->local
.v6
.sin6_port
, 1, NULL
);
1099 // At this point we have a ref to the inpcb
1100 *out_cookie
= nstat_tucookie_alloc(inp
);
1101 if (*out_cookie
== NULL
)
1102 in_pcb_checkstate(inp
, WNT_RELEASE
, 0);
1111 nstat_provider_cookie_t
*out_cookie
)
1113 return nstat_tcpudp_lookup(&tcbinfo
, data
, length
, out_cookie
);
1118 nstat_provider_cookie_t cookie
)
1120 struct nstat_tucookie
*tucookie
=
1121 (struct nstat_tucookie
*)cookie
;
1125 return (!(inp
= tucookie
->inp
) ||
1126 !(tp
= intotcpcb(inp
)) ||
1127 inp
->inp_state
== INPCB_STATE_DEAD
) ? 1 : 0;
1132 nstat_provider_cookie_t cookie
,
1133 struct nstat_counts
*out_counts
,
1136 struct nstat_tucookie
*tucookie
=
1137 (struct nstat_tucookie
*)cookie
;
1140 bzero(out_counts
, sizeof(*out_counts
));
1142 if (out_gone
) *out_gone
= 0;
1144 // if the pcb is in the dead state, we should stop using it
1145 if (nstat_tcp_gone(cookie
))
1147 if (out_gone
) *out_gone
= 1;
1148 if (!(inp
= tucookie
->inp
) || !intotcpcb(inp
))
1151 inp
= tucookie
->inp
;
1152 struct tcpcb
*tp
= intotcpcb(inp
);
1154 atomic_get_64(out_counts
->nstat_rxpackets
, &inp
->inp_stat
->rxpackets
);
1155 atomic_get_64(out_counts
->nstat_rxbytes
, &inp
->inp_stat
->rxbytes
);
1156 atomic_get_64(out_counts
->nstat_txpackets
, &inp
->inp_stat
->txpackets
);
1157 atomic_get_64(out_counts
->nstat_txbytes
, &inp
->inp_stat
->txbytes
);
1158 out_counts
->nstat_rxduplicatebytes
= tp
->t_stat
.rxduplicatebytes
;
1159 out_counts
->nstat_rxoutoforderbytes
= tp
->t_stat
.rxoutoforderbytes
;
1160 out_counts
->nstat_txretransmit
= tp
->t_stat
.txretransmitbytes
;
1161 out_counts
->nstat_connectattempts
= tp
->t_state
>= TCPS_SYN_SENT
? 1 : 0;
1162 out_counts
->nstat_connectsuccesses
= tp
->t_state
>= TCPS_ESTABLISHED
? 1 : 0;
1163 out_counts
->nstat_avg_rtt
= tp
->t_srtt
;
1164 out_counts
->nstat_min_rtt
= tp
->t_rttbest
;
1165 out_counts
->nstat_var_rtt
= tp
->t_rttvar
;
1166 if (out_counts
->nstat_avg_rtt
< out_counts
->nstat_min_rtt
)
1167 out_counts
->nstat_min_rtt
= out_counts
->nstat_avg_rtt
;
1168 atomic_get_64(out_counts
->nstat_cell_rxbytes
, &inp
->inp_cstat
->rxbytes
);
1169 atomic_get_64(out_counts
->nstat_cell_txbytes
, &inp
->inp_cstat
->txbytes
);
1170 atomic_get_64(out_counts
->nstat_wifi_rxbytes
, &inp
->inp_wstat
->rxbytes
);
1171 atomic_get_64(out_counts
->nstat_wifi_txbytes
, &inp
->inp_wstat
->txbytes
);
1172 atomic_get_64(out_counts
->nstat_wired_rxbytes
, &inp
->inp_Wstat
->rxbytes
);
1173 atomic_get_64(out_counts
->nstat_wired_txbytes
, &inp
->inp_Wstat
->txbytes
);
1180 nstat_provider_cookie_t cookie
,
1183 struct nstat_tucookie
*tucookie
=
1184 (struct nstat_tucookie
*)cookie
;
1186 nstat_tucookie_release_internal(tucookie
, locked
);
1190 nstat_tcp_add_watcher(
1191 nstat_control_state
*state
)
1193 OSIncrementAtomic(&nstat_tcp_watchers
);
1195 lck_rw_lock_shared(tcbinfo
.ipi_lock
);
1197 // Add all current tcp inpcbs. Ignore those in timewait
1199 struct nstat_tucookie
*cookie
;
1200 LIST_FOREACH(inp
, tcbinfo
.ipi_listhead
, inp_list
)
1202 cookie
= nstat_tucookie_alloc_ref(inp
);
1205 if (nstat_control_source_add(0, state
, &nstat_tcp_provider
,
1208 nstat_tucookie_release(cookie
);
1213 lck_rw_done(tcbinfo
.ipi_lock
);
1219 nstat_tcp_remove_watcher(
1220 __unused nstat_control_state
*state
)
1222 OSDecrementAtomic(&nstat_tcp_watchers
);
1225 __private_extern__
void
1229 struct nstat_tucookie
*cookie
;
1231 if (nstat_tcp_watchers
== 0)
1234 socket_lock(inp
->inp_socket
, 0);
1235 lck_mtx_lock(&nstat_mtx
);
1236 nstat_control_state
*state
;
1237 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
1239 if ((state
->ncs_watching
& (1 << NSTAT_PROVIDER_TCP_KERNEL
)) != 0)
1241 // this client is watching tcp
1242 // acquire a reference for it
1243 cookie
= nstat_tucookie_alloc_ref_locked(inp
);
1246 // add the source, if that fails, release the reference
1247 if (nstat_control_source_add(0, state
,
1248 &nstat_tcp_provider
, cookie
) != 0)
1250 nstat_tucookie_release_locked(cookie
);
1255 lck_mtx_unlock(&nstat_mtx
);
1256 socket_unlock(inp
->inp_socket
, 0);
1259 __private_extern__
void
1260 nstat_pcb_detach(struct inpcb
*inp
)
1262 nstat_control_state
*state
;
1263 nstat_src
*src
, *prevsrc
;
1264 nstat_src
*dead_list
= NULL
;
1265 struct nstat_tucookie
*tucookie
;
1268 if (inp
== NULL
|| (nstat_tcp_watchers
== 0 && nstat_udp_watchers
== 0))
1271 lck_mtx_lock(&nstat_mtx
);
1272 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
1274 lck_mtx_lock(&state
->mtx
);
1275 for (prevsrc
= NULL
, src
= state
->ncs_srcs
; src
;
1276 prevsrc
= src
, src
= src
->next
)
1278 nstat_provider_id_t provider_id
= src
->provider
->nstat_provider_id
;
1279 if (provider_id
== NSTAT_PROVIDER_TCP_KERNEL
|| provider_id
== NSTAT_PROVIDER_UDP_KERNEL
)
1281 tucookie
= (struct nstat_tucookie
*)src
->cookie
;
1282 if (tucookie
->inp
== inp
)
1289 result
= nstat_control_send_goodbye(state
, src
);
1292 prevsrc
->next
= src
->next
;
1294 state
->ncs_srcs
= src
->next
;
1296 src
->next
= dead_list
;
1299 lck_mtx_unlock(&state
->mtx
);
1301 lck_mtx_unlock(&nstat_mtx
);
1305 dead_list
= src
->next
;
1307 nstat_control_cleanup_source(NULL
, src
, TRUE
);
1311 __private_extern__
void
1312 nstat_pcb_cache(struct inpcb
*inp
)
1314 nstat_control_state
*state
;
1316 struct nstat_tucookie
*tucookie
;
1318 if (inp
== NULL
|| nstat_udp_watchers
== 0 ||
1319 inp
->inp_nstat_refcnt
== 0)
1321 VERIFY(SOCK_PROTO(inp
->inp_socket
) == IPPROTO_UDP
);
1322 lck_mtx_lock(&nstat_mtx
);
1323 for (state
= nstat_controls
; state
; state
= state
->ncs_next
) {
1324 lck_mtx_lock(&state
->mtx
);
1325 for (src
= state
->ncs_srcs
; src
; src
= src
->next
)
1327 tucookie
= (struct nstat_tucookie
*)src
->cookie
;
1328 if (tucookie
->inp
== inp
)
1330 if (inp
->inp_vflag
& INP_IPV6
)
1332 nstat_ip6_to_sockaddr(&inp
->in6p_laddr
,
1334 &tucookie
->local
.v6
,
1335 sizeof(tucookie
->local
));
1336 nstat_ip6_to_sockaddr(&inp
->in6p_faddr
,
1338 &tucookie
->remote
.v6
,
1339 sizeof(tucookie
->remote
));
1341 else if (inp
->inp_vflag
& INP_IPV4
)
1343 nstat_ip_to_sockaddr(&inp
->inp_laddr
,
1345 &tucookie
->local
.v4
,
1346 sizeof(tucookie
->local
));
1347 nstat_ip_to_sockaddr(&inp
->inp_faddr
,
1349 &tucookie
->remote
.v4
,
1350 sizeof(tucookie
->remote
));
1352 if (inp
->inp_last_outifp
)
1353 tucookie
->if_index
=
1354 inp
->inp_last_outifp
->if_index
;
1356 tucookie
->ifnet_properties
= nstat_inpcb_to_flags(inp
);
1357 tucookie
->cached
= true;
1361 lck_mtx_unlock(&state
->mtx
);
1363 lck_mtx_unlock(&nstat_mtx
);
1366 __private_extern__
void
1367 nstat_pcb_invalidate_cache(struct inpcb
*inp
)
1369 nstat_control_state
*state
;
1371 struct nstat_tucookie
*tucookie
;
1373 if (inp
== NULL
|| nstat_udp_watchers
== 0 ||
1374 inp
->inp_nstat_refcnt
== 0)
1376 VERIFY(SOCK_PROTO(inp
->inp_socket
) == IPPROTO_UDP
);
1377 lck_mtx_lock(&nstat_mtx
);
1378 for (state
= nstat_controls
; state
; state
= state
->ncs_next
) {
1379 lck_mtx_lock(&state
->mtx
);
1380 for (src
= state
->ncs_srcs
; src
; src
= src
->next
)
1382 tucookie
= (struct nstat_tucookie
*)src
->cookie
;
1383 if (tucookie
->inp
== inp
)
1385 tucookie
->cached
= false;
1389 lck_mtx_unlock(&state
->mtx
);
1391 lck_mtx_unlock(&nstat_mtx
);
1395 nstat_tcp_copy_descriptor(
1396 nstat_provider_cookie_t cookie
,
1400 if (len
< sizeof(nstat_tcp_descriptor
))
1405 if (nstat_tcp_gone(cookie
))
1408 nstat_tcp_descriptor
*desc
= (nstat_tcp_descriptor
*)data
;
1409 struct nstat_tucookie
*tucookie
=
1410 (struct nstat_tucookie
*)cookie
;
1411 struct inpcb
*inp
= tucookie
->inp
;
1412 struct tcpcb
*tp
= intotcpcb(inp
);
1413 bzero(desc
, sizeof(*desc
));
1415 if (inp
->inp_vflag
& INP_IPV6
)
1417 nstat_ip6_to_sockaddr(&inp
->in6p_laddr
, inp
->inp_lport
,
1418 &desc
->local
.v6
, sizeof(desc
->local
));
1419 nstat_ip6_to_sockaddr(&inp
->in6p_faddr
, inp
->inp_fport
,
1420 &desc
->remote
.v6
, sizeof(desc
->remote
));
1422 else if (inp
->inp_vflag
& INP_IPV4
)
1424 nstat_ip_to_sockaddr(&inp
->inp_laddr
, inp
->inp_lport
,
1425 &desc
->local
.v4
, sizeof(desc
->local
));
1426 nstat_ip_to_sockaddr(&inp
->inp_faddr
, inp
->inp_fport
,
1427 &desc
->remote
.v4
, sizeof(desc
->remote
));
1430 desc
->state
= intotcpcb(inp
)->t_state
;
1431 desc
->ifindex
= (inp
->inp_last_outifp
== NULL
) ? 0 :
1432 inp
->inp_last_outifp
->if_index
;
1434 // danger - not locked, values could be bogus
1435 desc
->txunacked
= tp
->snd_max
- tp
->snd_una
;
1436 desc
->txwindow
= tp
->snd_wnd
;
1437 desc
->txcwindow
= tp
->snd_cwnd
;
1439 if (CC_ALGO(tp
)->name
!= NULL
) {
1440 strlcpy(desc
->cc_algo
, CC_ALGO(tp
)->name
,
1441 sizeof(desc
->cc_algo
));
1444 struct socket
*so
= inp
->inp_socket
;
1447 // TBD - take the socket lock around these to make sure
1449 desc
->upid
= so
->last_upid
;
1450 desc
->pid
= so
->last_pid
;
1451 desc
->traffic_class
= so
->so_traffic_class
;
1452 if ((so
->so_flags1
& SOF1_TRAFFIC_MGT_SO_BACKGROUND
))
1453 desc
->traffic_mgt_flags
|= TRAFFIC_MGT_SO_BACKGROUND
;
1454 if ((so
->so_flags1
& SOF1_TRAFFIC_MGT_TCP_RECVBG
))
1455 desc
->traffic_mgt_flags
|= TRAFFIC_MGT_TCP_RECVBG
;
1456 proc_name(desc
->pid
, desc
->pname
, sizeof(desc
->pname
));
1457 if (desc
->pname
[0] == 0)
1459 strlcpy(desc
->pname
, tucookie
->pname
,
1460 sizeof(desc
->pname
));
1464 desc
->pname
[sizeof(desc
->pname
) - 1] = 0;
1465 strlcpy(tucookie
->pname
, desc
->pname
,
1466 sizeof(tucookie
->pname
));
1468 memcpy(desc
->uuid
, so
->last_uuid
, sizeof(so
->last_uuid
));
1469 memcpy(desc
->vuuid
, so
->so_vuuid
, sizeof(so
->so_vuuid
));
1470 if (so
->so_flags
& SOF_DELEGATED
) {
1471 desc
->eupid
= so
->e_upid
;
1472 desc
->epid
= so
->e_pid
;
1473 memcpy(desc
->euuid
, so
->e_uuid
, sizeof(so
->e_uuid
));
1475 desc
->eupid
= desc
->upid
;
1476 desc
->epid
= desc
->pid
;
1477 memcpy(desc
->euuid
, desc
->uuid
, sizeof(desc
->uuid
));
1479 desc
->sndbufsize
= so
->so_snd
.sb_hiwat
;
1480 desc
->sndbufused
= so
->so_snd
.sb_cc
;
1481 desc
->rcvbufsize
= so
->so_rcv
.sb_hiwat
;
1482 desc
->rcvbufused
= so
->so_rcv
.sb_cc
;
1485 tcp_get_connectivity_status(tp
, &desc
->connstatus
);
1486 desc
->ifnet_properties
= nstat_inpcb_to_flags(inp
);
1491 nstat_tcpudp_reporting_allowed(nstat_provider_cookie_t cookie
, nstat_provider_filter
*filter
, bool is_UDP
)
1495 if ((filter
->npf_flags
& (NSTAT_FILTER_IFNET_FLAGS
|NSTAT_FILTER_SPECIFIC_USER
)) != 0)
1497 struct nstat_tucookie
*tucookie
= (struct nstat_tucookie
*)cookie
;
1498 struct inpcb
*inp
= tucookie
->inp
;
1500 /* Only apply interface filter if at least one is allowed. */
1501 if ((filter
->npf_flags
& NSTAT_FILTER_IFNET_FLAGS
) != 0)
1503 uint16_t interface_properties
= nstat_inpcb_to_flags(inp
);
1505 if ((filter
->npf_flags
& interface_properties
) == 0)
1507 // For UDP, we could have an undefined interface and yet transfers may have occurred.
1508 // We allow reporting if there have been transfers of the requested kind.
1509 // This is imperfect as we cannot account for the expensive attribute over wifi.
1510 // We also assume that cellular is expensive and we have no way to select for AWDL
1515 if ((filter
->npf_flags
& (NSTAT_FILTER_ACCEPT_CELLULAR
|NSTAT_FILTER_ACCEPT_EXPENSIVE
)) &&
1516 (inp
->inp_cstat
->rxbytes
|| inp
->inp_cstat
->txbytes
))
1520 if ((filter
->npf_flags
& NSTAT_FILTER_ACCEPT_WIFI
) &&
1521 (inp
->inp_wstat
->rxbytes
|| inp
->inp_wstat
->txbytes
))
1525 if ((filter
->npf_flags
& NSTAT_FILTER_ACCEPT_WIRED
) &&
1526 (inp
->inp_Wstat
->rxbytes
|| inp
->inp_Wstat
->txbytes
))
1540 if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER
) != 0) && (retval
))
1542 struct socket
*so
= inp
->inp_socket
;
1547 if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER_BY_PID
) != 0) &&
1548 (filter
->npf_pid
== so
->last_pid
))
1552 else if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER_BY_EPID
) != 0) &&
1553 (filter
->npf_pid
== (so
->so_flags
& SOF_DELEGATED
)? so
->e_upid
: so
->last_pid
))
1557 else if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER_BY_UUID
) != 0) &&
1558 (memcmp(filter
->npf_uuid
, so
->last_uuid
, sizeof(so
->last_uuid
)) == 0))
1562 else if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER_BY_EUUID
) != 0) &&
1563 (memcmp(filter
->npf_uuid
, (so
->so_flags
& SOF_DELEGATED
)? so
->e_uuid
: so
->last_uuid
,
1564 sizeof(so
->last_uuid
)) == 0))
1575 nstat_tcp_reporting_allowed(nstat_provider_cookie_t cookie
, nstat_provider_filter
*filter
)
1577 return nstat_tcpudp_reporting_allowed(cookie
, filter
, FALSE
);
1581 nstat_init_tcp_provider(void)
1583 bzero(&nstat_tcp_provider
, sizeof(nstat_tcp_provider
));
1584 nstat_tcp_provider
.nstat_descriptor_length
= sizeof(nstat_tcp_descriptor
);
1585 nstat_tcp_provider
.nstat_provider_id
= NSTAT_PROVIDER_TCP_KERNEL
;
1586 nstat_tcp_provider
.nstat_lookup
= nstat_tcp_lookup
;
1587 nstat_tcp_provider
.nstat_gone
= nstat_tcp_gone
;
1588 nstat_tcp_provider
.nstat_counts
= nstat_tcp_counts
;
1589 nstat_tcp_provider
.nstat_release
= nstat_tcp_release
;
1590 nstat_tcp_provider
.nstat_watcher_add
= nstat_tcp_add_watcher
;
1591 nstat_tcp_provider
.nstat_watcher_remove
= nstat_tcp_remove_watcher
;
1592 nstat_tcp_provider
.nstat_copy_descriptor
= nstat_tcp_copy_descriptor
;
1593 nstat_tcp_provider
.nstat_reporting_allowed
= nstat_tcp_reporting_allowed
;
1594 nstat_tcp_provider
.next
= nstat_providers
;
1595 nstat_providers
= &nstat_tcp_provider
;
1598 #pragma mark -- UDP Provider --
1600 static nstat_provider nstat_udp_provider
;
1606 nstat_provider_cookie_t
*out_cookie
)
1608 return nstat_tcpudp_lookup(&udbinfo
, data
, length
, out_cookie
);
1613 nstat_provider_cookie_t cookie
)
1615 struct nstat_tucookie
*tucookie
=
1616 (struct nstat_tucookie
*)cookie
;
1619 return (!(inp
= tucookie
->inp
) ||
1620 inp
->inp_state
== INPCB_STATE_DEAD
) ? 1 : 0;
1625 nstat_provider_cookie_t cookie
,
1626 struct nstat_counts
*out_counts
,
1629 struct nstat_tucookie
*tucookie
=
1630 (struct nstat_tucookie
*)cookie
;
1632 if (out_gone
) *out_gone
= 0;
1634 // if the pcb is in the dead state, we should stop using it
1635 if (nstat_udp_gone(cookie
))
1637 if (out_gone
) *out_gone
= 1;
1641 struct inpcb
*inp
= tucookie
->inp
;
1643 atomic_get_64(out_counts
->nstat_rxpackets
, &inp
->inp_stat
->rxpackets
);
1644 atomic_get_64(out_counts
->nstat_rxbytes
, &inp
->inp_stat
->rxbytes
);
1645 atomic_get_64(out_counts
->nstat_txpackets
, &inp
->inp_stat
->txpackets
);
1646 atomic_get_64(out_counts
->nstat_txbytes
, &inp
->inp_stat
->txbytes
);
1647 atomic_get_64(out_counts
->nstat_cell_rxbytes
, &inp
->inp_cstat
->rxbytes
);
1648 atomic_get_64(out_counts
->nstat_cell_txbytes
, &inp
->inp_cstat
->txbytes
);
1649 atomic_get_64(out_counts
->nstat_wifi_rxbytes
, &inp
->inp_wstat
->rxbytes
);
1650 atomic_get_64(out_counts
->nstat_wifi_txbytes
, &inp
->inp_wstat
->txbytes
);
1651 atomic_get_64(out_counts
->nstat_wired_rxbytes
, &inp
->inp_Wstat
->rxbytes
);
1652 atomic_get_64(out_counts
->nstat_wired_txbytes
, &inp
->inp_Wstat
->txbytes
);
1659 nstat_provider_cookie_t cookie
,
1662 struct nstat_tucookie
*tucookie
=
1663 (struct nstat_tucookie
*)cookie
;
1665 nstat_tucookie_release_internal(tucookie
, locked
);
1669 nstat_udp_add_watcher(
1670 nstat_control_state
*state
)
1673 struct nstat_tucookie
*cookie
;
1675 OSIncrementAtomic(&nstat_udp_watchers
);
1677 lck_rw_lock_shared(udbinfo
.ipi_lock
);
1678 // Add all current UDP inpcbs.
1679 LIST_FOREACH(inp
, udbinfo
.ipi_listhead
, inp_list
)
1681 cookie
= nstat_tucookie_alloc_ref(inp
);
1684 if (nstat_control_source_add(0, state
, &nstat_udp_provider
,
1687 nstat_tucookie_release(cookie
);
1692 lck_rw_done(udbinfo
.ipi_lock
);
1698 nstat_udp_remove_watcher(
1699 __unused nstat_control_state
*state
)
1701 OSDecrementAtomic(&nstat_udp_watchers
);
1704 __private_extern__
void
1708 struct nstat_tucookie
*cookie
;
1710 if (nstat_udp_watchers
== 0)
1713 socket_lock(inp
->inp_socket
, 0);
1714 lck_mtx_lock(&nstat_mtx
);
1715 nstat_control_state
*state
;
1716 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
1718 if ((state
->ncs_watching
& (1 << NSTAT_PROVIDER_UDP_KERNEL
)) != 0)
1720 // this client is watching tcp
1721 // acquire a reference for it
1722 cookie
= nstat_tucookie_alloc_ref_locked(inp
);
1725 // add the source, if that fails, release the reference
1726 if (nstat_control_source_add(0, state
,
1727 &nstat_udp_provider
, cookie
) != 0)
1729 nstat_tucookie_release_locked(cookie
);
1734 lck_mtx_unlock(&nstat_mtx
);
1735 socket_unlock(inp
->inp_socket
, 0);
1739 nstat_udp_copy_descriptor(
1740 nstat_provider_cookie_t cookie
,
1744 if (len
< sizeof(nstat_udp_descriptor
))
1749 if (nstat_udp_gone(cookie
))
1752 struct nstat_tucookie
*tucookie
=
1753 (struct nstat_tucookie
*)cookie
;
1754 nstat_udp_descriptor
*desc
= (nstat_udp_descriptor
*)data
;
1755 struct inpcb
*inp
= tucookie
->inp
;
1757 bzero(desc
, sizeof(*desc
));
1759 if (tucookie
->cached
== false) {
1760 if (inp
->inp_vflag
& INP_IPV6
)
1762 nstat_ip6_to_sockaddr(&inp
->in6p_laddr
, inp
->inp_lport
,
1763 &desc
->local
.v6
, sizeof(desc
->local
.v6
));
1764 nstat_ip6_to_sockaddr(&inp
->in6p_faddr
, inp
->inp_fport
,
1765 &desc
->remote
.v6
, sizeof(desc
->remote
.v6
));
1767 else if (inp
->inp_vflag
& INP_IPV4
)
1769 nstat_ip_to_sockaddr(&inp
->inp_laddr
, inp
->inp_lport
,
1770 &desc
->local
.v4
, sizeof(desc
->local
.v4
));
1771 nstat_ip_to_sockaddr(&inp
->inp_faddr
, inp
->inp_fport
,
1772 &desc
->remote
.v4
, sizeof(desc
->remote
.v4
));
1774 desc
->ifnet_properties
= nstat_inpcb_to_flags(inp
);
1778 if (inp
->inp_vflag
& INP_IPV6
)
1780 memcpy(&desc
->local
.v6
, &tucookie
->local
.v6
,
1781 sizeof(desc
->local
.v6
));
1782 memcpy(&desc
->remote
.v6
, &tucookie
->remote
.v6
,
1783 sizeof(desc
->remote
.v6
));
1785 else if (inp
->inp_vflag
& INP_IPV4
)
1787 memcpy(&desc
->local
.v4
, &tucookie
->local
.v4
,
1788 sizeof(desc
->local
.v4
));
1789 memcpy(&desc
->remote
.v4
, &tucookie
->remote
.v4
,
1790 sizeof(desc
->remote
.v4
));
1792 desc
->ifnet_properties
= tucookie
->ifnet_properties
;
1795 if (inp
->inp_last_outifp
)
1796 desc
->ifindex
= inp
->inp_last_outifp
->if_index
;
1798 desc
->ifindex
= tucookie
->if_index
;
1800 struct socket
*so
= inp
->inp_socket
;
1803 // TBD - take the socket lock around these to make sure
1805 desc
->upid
= so
->last_upid
;
1806 desc
->pid
= so
->last_pid
;
1807 proc_name(desc
->pid
, desc
->pname
, sizeof(desc
->pname
));
1808 if (desc
->pname
[0] == 0)
1810 strlcpy(desc
->pname
, tucookie
->pname
,
1811 sizeof(desc
->pname
));
1815 desc
->pname
[sizeof(desc
->pname
) - 1] = 0;
1816 strlcpy(tucookie
->pname
, desc
->pname
,
1817 sizeof(tucookie
->pname
));
1819 memcpy(desc
->uuid
, so
->last_uuid
, sizeof(so
->last_uuid
));
1820 memcpy(desc
->vuuid
, so
->so_vuuid
, sizeof(so
->so_vuuid
));
1821 if (so
->so_flags
& SOF_DELEGATED
) {
1822 desc
->eupid
= so
->e_upid
;
1823 desc
->epid
= so
->e_pid
;
1824 memcpy(desc
->euuid
, so
->e_uuid
, sizeof(so
->e_uuid
));
1826 desc
->eupid
= desc
->upid
;
1827 desc
->epid
= desc
->pid
;
1828 memcpy(desc
->euuid
, desc
->uuid
, sizeof(desc
->uuid
));
1830 desc
->rcvbufsize
= so
->so_rcv
.sb_hiwat
;
1831 desc
->rcvbufused
= so
->so_rcv
.sb_cc
;
1832 desc
->traffic_class
= so
->so_traffic_class
;
1839 nstat_udp_reporting_allowed(nstat_provider_cookie_t cookie
, nstat_provider_filter
*filter
)
1841 return nstat_tcpudp_reporting_allowed(cookie
, filter
, TRUE
);
1846 nstat_init_udp_provider(void)
1848 bzero(&nstat_udp_provider
, sizeof(nstat_udp_provider
));
1849 nstat_udp_provider
.nstat_provider_id
= NSTAT_PROVIDER_UDP_KERNEL
;
1850 nstat_udp_provider
.nstat_descriptor_length
= sizeof(nstat_udp_descriptor
);
1851 nstat_udp_provider
.nstat_lookup
= nstat_udp_lookup
;
1852 nstat_udp_provider
.nstat_gone
= nstat_udp_gone
;
1853 nstat_udp_provider
.nstat_counts
= nstat_udp_counts
;
1854 nstat_udp_provider
.nstat_watcher_add
= nstat_udp_add_watcher
;
1855 nstat_udp_provider
.nstat_watcher_remove
= nstat_udp_remove_watcher
;
1856 nstat_udp_provider
.nstat_copy_descriptor
= nstat_udp_copy_descriptor
;
1857 nstat_udp_provider
.nstat_release
= nstat_udp_release
;
1858 nstat_udp_provider
.nstat_reporting_allowed
= nstat_udp_reporting_allowed
;
1859 nstat_udp_provider
.next
= nstat_providers
;
1860 nstat_providers
= &nstat_udp_provider
;
1863 #pragma mark -- TCP/UDP Userland
1865 // Almost all of this infrastucture is common to both TCP and UDP
1867 static nstat_provider nstat_userland_tcp_provider
;
1868 static nstat_provider nstat_userland_udp_provider
;
1871 struct nstat_tu_shadow
{
1872 tailq_entry_tu_shadow shad_link
;
1873 userland_stats_request_vals_fn
*shad_getvals_fn
;
1874 userland_stats_provider_context
*shad_provider_context
;
1875 u_int64_t shad_properties
;
1877 uint32_t shad_magic
;
1880 // Magic number checking should remain in place until the userland provider has been fully proven
1881 #define TU_SHADOW_MAGIC 0xfeedf00d
1882 #define TU_SHADOW_UNMAGIC 0xdeaddeed
1884 static tailq_head_tu_shadow nstat_userprot_shad_head
= TAILQ_HEAD_INITIALIZER(nstat_userprot_shad_head
);
1887 nstat_userland_tu_lookup(
1888 __unused
const void *data
,
1889 __unused u_int32_t length
,
1890 __unused nstat_provider_cookie_t
*out_cookie
)
1892 // Looking up a specific connection is not supported
1897 nstat_userland_tu_gone(
1898 __unused nstat_provider_cookie_t cookie
)
1900 // Returns non-zero if the source has gone.
1901 // We don't keep a source hanging around, so the answer is always 0
1906 nstat_userland_tu_counts(
1907 nstat_provider_cookie_t cookie
,
1908 struct nstat_counts
*out_counts
,
1911 struct nstat_tu_shadow
*shad
= (struct nstat_tu_shadow
*)cookie
;
1912 assert(shad
->shad_magic
== TU_SHADOW_MAGIC
);
1914 bool result
= (*shad
->shad_getvals_fn
)(shad
->shad_provider_context
, out_counts
, NULL
);
1916 if (out_gone
) *out_gone
= 0;
1918 return (result
)? 0 : EIO
;
1923 nstat_userland_tu_copy_descriptor(
1924 nstat_provider_cookie_t cookie
,
1926 __unused u_int32_t len
)
1928 struct nstat_tu_shadow
*shad
= (struct nstat_tu_shadow
*)cookie
;
1929 assert(shad
->shad_magic
== TU_SHADOW_MAGIC
);
1931 bool result
= (*shad
->shad_getvals_fn
)(shad
->shad_provider_context
, NULL
, data
);
1933 return (result
)? 0 : EIO
;
1937 nstat_userland_tu_release(
1938 __unused nstat_provider_cookie_t cookie
,
1939 __unused
int locked
)
1941 // Called when a nstat_src is detached.
1942 // We don't reference count or ask for delayed release so nothing to do here.
1946 check_reporting_for_user(nstat_provider_filter
*filter
, pid_t pid
, pid_t epid
, uuid_t
*uuid
, uuid_t
*euuid
)
1950 if ((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER
) != 0)
1954 if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER_BY_PID
) != 0) &&
1955 (filter
->npf_pid
== pid
))
1959 else if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER_BY_EPID
) != 0) &&
1960 (filter
->npf_pid
== epid
))
1964 else if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER_BY_UUID
) != 0) &&
1965 (memcmp(filter
->npf_uuid
, uuid
, sizeof(*uuid
)) == 0))
1969 else if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER_BY_EUUID
) != 0) &&
1970 (memcmp(filter
->npf_uuid
, euuid
, sizeof(*euuid
)) == 0))
1979 nstat_userland_tcp_reporting_allowed(nstat_provider_cookie_t cookie
, nstat_provider_filter
*filter
)
1983 if ((filter
->npf_flags
& (NSTAT_FILTER_IFNET_FLAGS
|NSTAT_FILTER_SPECIFIC_USER
)) != 0)
1985 nstat_tcp_descriptor tcp_desc
; // Stack allocation - OK or pushing the limits too far?
1986 struct nstat_tu_shadow
*shad
= (struct nstat_tu_shadow
*)cookie
;
1988 assert(shad
->shad_magic
== TU_SHADOW_MAGIC
);
1990 if ((*shad
->shad_getvals_fn
)(shad
->shad_provider_context
, NULL
, &tcp_desc
))
1992 if ((filter
->npf_flags
& NSTAT_FILTER_IFNET_FLAGS
) != 0)
1994 if ((filter
->npf_flags
& tcp_desc
.ifnet_properties
) == 0)
1999 if ((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER
) != 0)
2001 retval
= check_reporting_for_user(filter
, (pid_t
)tcp_desc
.pid
, (pid_t
)tcp_desc
.epid
,
2002 &tcp_desc
.uuid
, &tcp_desc
.euuid
);
2007 retval
= false; // No further information, so might as well give up now.
2014 nstat_userland_udp_reporting_allowed(nstat_provider_cookie_t cookie
, nstat_provider_filter
*filter
)
2018 if ((filter
->npf_flags
& (NSTAT_FILTER_IFNET_FLAGS
|NSTAT_FILTER_SPECIFIC_USER
)) != 0)
2020 nstat_udp_descriptor udp_desc
; // Stack allocation - OK or pushing the limits too far?
2021 struct nstat_tu_shadow
*shad
= (struct nstat_tu_shadow
*)cookie
;
2023 assert(shad
->shad_magic
== TU_SHADOW_MAGIC
);
2025 if ((*shad
->shad_getvals_fn
)(shad
->shad_provider_context
, NULL
, &udp_desc
))
2027 if ((filter
->npf_flags
& NSTAT_FILTER_IFNET_FLAGS
) != 0)
2029 if ((filter
->npf_flags
& udp_desc
.ifnet_properties
) == 0)
2034 if ((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER
) != 0)
2036 retval
= check_reporting_for_user(filter
, (pid_t
)udp_desc
.pid
, (pid_t
)udp_desc
.epid
,
2037 &udp_desc
.uuid
, &udp_desc
.euuid
);
2042 retval
= false; // No further information, so might as well give up now.
2051 nstat_userland_tcp_add_watcher(
2052 nstat_control_state
*state
)
2054 struct nstat_tu_shadow
*shad
;
2056 OSIncrementAtomic(&nstat_userland_tcp_watchers
);
2058 lck_mtx_lock(&nstat_mtx
);
2060 TAILQ_FOREACH(shad
, &nstat_userprot_shad_head
, shad_link
) {
2061 assert(shad
->shad_magic
== TU_SHADOW_MAGIC
);
2063 if (shad
->shad_provider
== NSTAT_PROVIDER_TCP_USERLAND
)
2065 int result
= nstat_control_source_add(0, state
, &nstat_userland_tcp_provider
, shad
);
2068 printf("%s - nstat_control_source_add returned %d\n", __func__
, result
);
2072 lck_mtx_unlock(&nstat_mtx
);
2078 nstat_userland_udp_add_watcher(
2079 nstat_control_state
*state
)
2081 struct nstat_tu_shadow
*shad
;
2083 OSIncrementAtomic(&nstat_userland_udp_watchers
);
2085 lck_mtx_lock(&nstat_mtx
);
2087 TAILQ_FOREACH(shad
, &nstat_userprot_shad_head
, shad_link
) {
2088 assert(shad
->shad_magic
== TU_SHADOW_MAGIC
);
2090 if (shad
->shad_provider
== NSTAT_PROVIDER_UDP_USERLAND
)
2092 int result
= nstat_control_source_add(0, state
, &nstat_userland_udp_provider
, shad
);
2095 printf("%s - nstat_control_source_add returned %d\n", __func__
, result
);
2099 lck_mtx_unlock(&nstat_mtx
);
2106 nstat_userland_tcp_remove_watcher(
2107 __unused nstat_control_state
*state
)
2109 OSDecrementAtomic(&nstat_userland_tcp_watchers
);
2113 nstat_userland_udp_remove_watcher(
2114 __unused nstat_control_state
*state
)
2116 OSDecrementAtomic(&nstat_userland_udp_watchers
);
2120 nstat_init_userland_tcp_provider(void)
2122 bzero(&nstat_userland_tcp_provider
, sizeof(nstat_tcp_provider
));
2123 nstat_userland_tcp_provider
.nstat_descriptor_length
= sizeof(nstat_tcp_descriptor
);
2124 nstat_userland_tcp_provider
.nstat_provider_id
= NSTAT_PROVIDER_TCP_USERLAND
;
2125 nstat_userland_tcp_provider
.nstat_lookup
= nstat_userland_tu_lookup
;
2126 nstat_userland_tcp_provider
.nstat_gone
= nstat_userland_tu_gone
;
2127 nstat_userland_tcp_provider
.nstat_counts
= nstat_userland_tu_counts
;
2128 nstat_userland_tcp_provider
.nstat_release
= nstat_userland_tu_release
;
2129 nstat_userland_tcp_provider
.nstat_watcher_add
= nstat_userland_tcp_add_watcher
;
2130 nstat_userland_tcp_provider
.nstat_watcher_remove
= nstat_userland_tcp_remove_watcher
;
2131 nstat_userland_tcp_provider
.nstat_copy_descriptor
= nstat_userland_tu_copy_descriptor
;
2132 nstat_userland_tcp_provider
.nstat_reporting_allowed
= nstat_userland_tcp_reporting_allowed
;
2133 nstat_userland_tcp_provider
.next
= nstat_providers
;
2134 nstat_providers
= &nstat_userland_tcp_provider
;
2139 nstat_init_userland_udp_provider(void)
2141 bzero(&nstat_userland_udp_provider
, sizeof(nstat_udp_provider
));
2142 nstat_userland_udp_provider
.nstat_descriptor_length
= sizeof(nstat_udp_descriptor
);
2143 nstat_userland_udp_provider
.nstat_provider_id
= NSTAT_PROVIDER_UDP_USERLAND
;
2144 nstat_userland_udp_provider
.nstat_lookup
= nstat_userland_tu_lookup
;
2145 nstat_userland_udp_provider
.nstat_gone
= nstat_userland_tu_gone
;
2146 nstat_userland_udp_provider
.nstat_counts
= nstat_userland_tu_counts
;
2147 nstat_userland_udp_provider
.nstat_release
= nstat_userland_tu_release
;
2148 nstat_userland_udp_provider
.nstat_watcher_add
= nstat_userland_udp_add_watcher
;
2149 nstat_userland_udp_provider
.nstat_watcher_remove
= nstat_userland_udp_remove_watcher
;
2150 nstat_userland_udp_provider
.nstat_copy_descriptor
= nstat_userland_tu_copy_descriptor
;
2151 nstat_userland_udp_provider
.nstat_reporting_allowed
= nstat_userland_udp_reporting_allowed
;
2152 nstat_userland_udp_provider
.next
= nstat_providers
;
2153 nstat_providers
= &nstat_userland_udp_provider
;
2158 // Things get started with a call to netstats to say that there’s a new connection:
2159 __private_extern__ nstat_userland_context
2160 ntstat_userland_stats_open(userland_stats_provider_context
*ctx
,
2162 u_int64_t properties
,
2163 userland_stats_request_vals_fn req_fn
)
2165 struct nstat_tu_shadow
*shad
;
2167 if ((provider_id
!= NSTAT_PROVIDER_TCP_USERLAND
) && (provider_id
!= NSTAT_PROVIDER_UDP_USERLAND
))
2169 printf("%s - incorrect provider is supplied, %d\n", __func__
, provider_id
);
2173 shad
= OSMalloc(sizeof(*shad
), nstat_malloc_tag
);
2177 shad
->shad_getvals_fn
= req_fn
;
2178 shad
->shad_provider_context
= ctx
;
2179 shad
->shad_provider
= provider_id
;
2180 shad
->shad_properties
= properties
;
2181 shad
->shad_magic
= TU_SHADOW_MAGIC
;
2183 lck_mtx_lock(&nstat_mtx
);
2184 nstat_control_state
*state
;
2186 // Even if there are no watchers, we save the shadow structure
2187 TAILQ_INSERT_HEAD(&nstat_userprot_shad_head
, shad
, shad_link
);
2189 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
2191 if ((state
->ncs_watching
& (1 << provider_id
)) != 0)
2193 // this client is watching tcp/udp userland
2195 int result
= nstat_control_source_add(0, state
, &nstat_userland_tcp_provider
, shad
);
2198 printf("%s - nstat_control_source_add returned %d\n", __func__
, result
);
2202 lck_mtx_unlock(&nstat_mtx
);
2204 return (nstat_userland_context
)shad
;
2208 __private_extern__
void
2209 ntstat_userland_stats_close(nstat_userland_context nstat_ctx
)
2211 struct nstat_tu_shadow
*shad
= (struct nstat_tu_shadow
*)nstat_ctx
;
2212 nstat_src
*dead_list
= NULL
;
2217 assert(shad
->shad_magic
== TU_SHADOW_MAGIC
);
2219 lck_mtx_lock(&nstat_mtx
);
2220 if (nstat_userland_udp_watchers
!= 0 || nstat_userland_tcp_watchers
!= 0)
2222 nstat_control_state
*state
;
2223 nstat_src
*src
, *prevsrc
;
2226 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
2228 lck_mtx_lock(&state
->mtx
);
2229 for (prevsrc
= NULL
, src
= state
->ncs_srcs
; src
;
2230 prevsrc
= src
, src
= src
->next
)
2232 if (shad
== (struct nstat_tu_shadow
*)src
->cookie
)
2238 result
= nstat_control_send_goodbye(state
, src
);
2241 prevsrc
->next
= src
->next
;
2243 state
->ncs_srcs
= src
->next
;
2245 src
->next
= dead_list
;
2248 lck_mtx_unlock(&state
->mtx
);
2251 TAILQ_REMOVE(&nstat_userprot_shad_head
, shad
, shad_link
);
2253 lck_mtx_unlock(&nstat_mtx
);
2259 dead_list
= src
->next
;
2261 nstat_control_cleanup_source(NULL
, src
, TRUE
);
2264 shad
->shad_magic
= TU_SHADOW_UNMAGIC
;
2266 OSFree(shad
, sizeof(*shad
), nstat_malloc_tag
);
2270 __private_extern__
void
2271 ntstat_userland_stats_event(
2272 __unused nstat_userland_context context
,
2273 __unused userland_stats_event_t event
)
2275 // This is a dummy for when we hook up event reporting to NetworkStatistics.
2276 // See <rdar://problem/23022832> NetworkStatistics should provide opt-in notifications
2282 #pragma mark -- ifnet Provider --
2284 static nstat_provider nstat_ifnet_provider
;
2287 * We store a pointer to the ifnet and the original threshold
2288 * requested by the client.
2290 struct nstat_ifnet_cookie
2300 nstat_provider_cookie_t
*out_cookie
)
2302 const nstat_ifnet_add_param
*param
= (const nstat_ifnet_add_param
*)data
;
2304 boolean_t changed
= FALSE
;
2305 nstat_control_state
*state
;
2307 struct nstat_ifnet_cookie
*cookie
;
2309 if (length
< sizeof(*param
) || param
->threshold
< 1024*1024)
2311 if (nstat_privcheck
!= 0) {
2312 errno_t result
= priv_check_cred(kauth_cred_get(),
2313 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS
, 0);
2317 cookie
= OSMalloc(sizeof(*cookie
), nstat_malloc_tag
);
2320 bzero(cookie
, sizeof(*cookie
));
2322 ifnet_head_lock_shared();
2323 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
)
2325 ifnet_lock_exclusive(ifp
);
2326 if (ifp
->if_index
== param
->ifindex
)
2329 cookie
->threshold
= param
->threshold
;
2330 *out_cookie
= cookie
;
2331 if (!ifp
->if_data_threshold
||
2332 ifp
->if_data_threshold
> param
->threshold
)
2335 ifp
->if_data_threshold
= param
->threshold
;
2337 ifnet_lock_done(ifp
);
2338 ifnet_reference(ifp
);
2341 ifnet_lock_done(ifp
);
2346 * When we change the threshold to something smaller, we notify
2347 * all of our clients with a description message.
2348 * We won't send a message to the client we are currently serving
2349 * because it has no `ifnet source' yet.
2353 lck_mtx_lock(&nstat_mtx
);
2354 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
2356 lck_mtx_lock(&state
->mtx
);
2357 for (src
= state
->ncs_srcs
; src
; src
= src
->next
)
2359 if (src
->provider
!= &nstat_ifnet_provider
)
2361 nstat_control_send_description(state
, src
, 0, 0);
2363 lck_mtx_unlock(&state
->mtx
);
2365 lck_mtx_unlock(&nstat_mtx
);
2367 if (cookie
->ifp
== NULL
)
2368 OSFree(cookie
, sizeof(*cookie
), nstat_malloc_tag
);
2370 return ifp
? 0 : EINVAL
;
2375 nstat_provider_cookie_t cookie
)
2378 struct nstat_ifnet_cookie
*ifcookie
=
2379 (struct nstat_ifnet_cookie
*)cookie
;
2381 ifnet_head_lock_shared();
2382 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
)
2384 if (ifp
== ifcookie
->ifp
)
2394 nstat_provider_cookie_t cookie
,
2395 struct nstat_counts
*out_counts
,
2398 struct nstat_ifnet_cookie
*ifcookie
=
2399 (struct nstat_ifnet_cookie
*)cookie
;
2400 struct ifnet
*ifp
= ifcookie
->ifp
;
2402 if (out_gone
) *out_gone
= 0;
2404 // if the ifnet is gone, we should stop using it
2405 if (nstat_ifnet_gone(cookie
))
2407 if (out_gone
) *out_gone
= 1;
2411 bzero(out_counts
, sizeof(*out_counts
));
2412 out_counts
->nstat_rxpackets
= ifp
->if_ipackets
;
2413 out_counts
->nstat_rxbytes
= ifp
->if_ibytes
;
2414 out_counts
->nstat_txpackets
= ifp
->if_opackets
;
2415 out_counts
->nstat_txbytes
= ifp
->if_obytes
;
2416 out_counts
->nstat_cell_rxbytes
= out_counts
->nstat_cell_txbytes
= 0;
2421 nstat_ifnet_release(
2422 nstat_provider_cookie_t cookie
,
2423 __unused
int locked
)
2425 struct nstat_ifnet_cookie
*ifcookie
;
2427 nstat_control_state
*state
;
2429 uint64_t minthreshold
= UINT64_MAX
;
2432 * Find all the clients that requested a threshold
2433 * for this ifnet and re-calculate if_data_threshold.
2435 lck_mtx_lock(&nstat_mtx
);
2436 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
2438 lck_mtx_lock(&state
->mtx
);
2439 for (src
= state
->ncs_srcs
; src
; src
= src
->next
)
2441 /* Skip the provider we are about to detach. */
2442 if (src
->provider
!= &nstat_ifnet_provider
||
2443 src
->cookie
== cookie
)
2445 ifcookie
= (struct nstat_ifnet_cookie
*)src
->cookie
;
2446 if (ifcookie
->threshold
< minthreshold
)
2447 minthreshold
= ifcookie
->threshold
;
2449 lck_mtx_unlock(&state
->mtx
);
2451 lck_mtx_unlock(&nstat_mtx
);
2453 * Reset if_data_threshold or disable it.
2455 ifcookie
= (struct nstat_ifnet_cookie
*)cookie
;
2456 ifp
= ifcookie
->ifp
;
2457 if (ifnet_is_attached(ifp
, 1)) {
2458 ifnet_lock_exclusive(ifp
);
2459 if (minthreshold
== UINT64_MAX
)
2460 ifp
->if_data_threshold
= 0;
2462 ifp
->if_data_threshold
= minthreshold
;
2463 ifnet_lock_done(ifp
);
2464 ifnet_decr_iorefcnt(ifp
);
2467 OSFree(ifcookie
, sizeof(*ifcookie
), nstat_malloc_tag
);
2471 nstat_ifnet_copy_link_status(
2473 struct nstat_ifnet_descriptor
*desc
)
2475 struct if_link_status
*ifsr
= ifp
->if_link_status
;
2476 nstat_ifnet_desc_link_status
*link_status
= &desc
->link_status
;
2478 link_status
->link_status_type
= NSTAT_IFNET_DESC_LINK_STATUS_TYPE_NONE
;
2482 lck_rw_lock_shared(&ifp
->if_link_status_lock
);
2484 if (ifp
->if_type
== IFT_CELLULAR
) {
2486 nstat_ifnet_desc_cellular_status
*cell_status
= &link_status
->u
.cellular
;
2487 struct if_cellular_status_v1
*if_cell_sr
=
2488 &ifsr
->ifsr_u
.ifsr_cell
.if_cell_u
.if_status_v1
;
2490 if (ifsr
->ifsr_version
!= IF_CELLULAR_STATUS_REPORT_VERSION_1
)
2493 link_status
->link_status_type
= NSTAT_IFNET_DESC_LINK_STATUS_TYPE_CELLULAR
;
2495 if (if_cell_sr
->valid_bitmask
& IF_CELL_LINK_QUALITY_METRIC_VALID
) {
2496 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_LINK_QUALITY_METRIC_VALID
;
2497 cell_status
->link_quality_metric
= if_cell_sr
->link_quality_metric
;
2499 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_EFFECTIVE_BANDWIDTH_VALID
) {
2500 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_BANDWIDTH_VALID
;
2501 cell_status
->ul_effective_bandwidth
= if_cell_sr
->ul_effective_bandwidth
;
2503 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MAX_BANDWIDTH_VALID
) {
2504 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_MAX_BANDWIDTH_VALID
;
2505 cell_status
->ul_max_bandwidth
= if_cell_sr
->ul_max_bandwidth
;
2507 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MIN_LATENCY_VALID
) {
2508 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_MIN_LATENCY_VALID
;
2509 cell_status
->ul_min_latency
= if_cell_sr
->ul_min_latency
;
2511 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_EFFECTIVE_LATENCY_VALID
) {
2512 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_LATENCY_VALID
;
2513 cell_status
->ul_effective_latency
= if_cell_sr
->ul_effective_latency
;
2515 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MAX_LATENCY_VALID
) {
2516 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_MAX_LATENCY_VALID
;
2517 cell_status
->ul_max_latency
= if_cell_sr
->ul_max_latency
;
2519 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_RETXT_LEVEL_VALID
) {
2520 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID
;
2521 if (if_cell_sr
->ul_retxt_level
== IF_CELL_UL_RETXT_LEVEL_NONE
)
2522 cell_status
->ul_retxt_level
= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_NONE
;
2523 else if (if_cell_sr
->ul_retxt_level
== IF_CELL_UL_RETXT_LEVEL_LOW
)
2524 cell_status
->ul_retxt_level
= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_LOW
;
2525 else if (if_cell_sr
->ul_retxt_level
== IF_CELL_UL_RETXT_LEVEL_MEDIUM
)
2526 cell_status
->ul_retxt_level
= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_MEDIUM
;
2527 else if (if_cell_sr
->ul_retxt_level
== IF_CELL_UL_RETXT_LEVEL_HIGH
)
2528 cell_status
->ul_retxt_level
= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_HIGH
;
2530 cell_status
->valid_bitmask
&= ~NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID
;
2532 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_BYTES_LOST_VALID
) {
2533 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_BYTES_LOST_VALID
;
2534 cell_status
->ul_bytes_lost
= if_cell_sr
->ul_bytes_lost
;
2536 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MIN_QUEUE_SIZE_VALID
) {
2537 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_MIN_QUEUE_SIZE_VALID
;
2538 cell_status
->ul_min_queue_size
= if_cell_sr
->ul_min_queue_size
;
2540 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_AVG_QUEUE_SIZE_VALID
) {
2541 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_AVG_QUEUE_SIZE_VALID
;
2542 cell_status
->ul_avg_queue_size
= if_cell_sr
->ul_avg_queue_size
;
2544 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MAX_QUEUE_SIZE_VALID
) {
2545 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_MAX_QUEUE_SIZE_VALID
;
2546 cell_status
->ul_max_queue_size
= if_cell_sr
->ul_max_queue_size
;
2548 if (if_cell_sr
->valid_bitmask
& IF_CELL_DL_EFFECTIVE_BANDWIDTH_VALID
) {
2549 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_DL_EFFECTIVE_BANDWIDTH_VALID
;
2550 cell_status
->dl_effective_bandwidth
= if_cell_sr
->dl_effective_bandwidth
;
2552 if (if_cell_sr
->valid_bitmask
& IF_CELL_DL_MAX_BANDWIDTH_VALID
) {
2553 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_DL_MAX_BANDWIDTH_VALID
;
2554 cell_status
->dl_max_bandwidth
= if_cell_sr
->dl_max_bandwidth
;
2556 if (if_cell_sr
->valid_bitmask
& IF_CELL_CONFIG_INACTIVITY_TIME_VALID
) {
2557 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_CONFIG_INACTIVITY_TIME_VALID
;
2558 cell_status
->config_inactivity_time
= if_cell_sr
->config_inactivity_time
;
2560 if (if_cell_sr
->valid_bitmask
& IF_CELL_CONFIG_BACKOFF_TIME_VALID
) {
2561 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_CONFIG_BACKOFF_TIME_VALID
;
2562 cell_status
->config_backoff_time
= if_cell_sr
->config_backoff_time
;
2564 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MSS_RECOMMENDED_VALID
) {
2565 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_MSS_RECOMMENDED_VALID
;
2566 cell_status
->mss_recommended
= if_cell_sr
->mss_recommended
;
2568 } else if (ifp
->if_subfamily
== IFNET_SUBFAMILY_WIFI
) {
2570 nstat_ifnet_desc_wifi_status
*wifi_status
= &link_status
->u
.wifi
;
2571 struct if_wifi_status_v1
*if_wifi_sr
=
2572 &ifsr
->ifsr_u
.ifsr_wifi
.if_wifi_u
.if_status_v1
;
2574 if (ifsr
->ifsr_version
!= IF_WIFI_STATUS_REPORT_VERSION_1
)
2577 link_status
->link_status_type
= NSTAT_IFNET_DESC_LINK_STATUS_TYPE_WIFI
;
2579 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_LINK_QUALITY_METRIC_VALID
) {
2580 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_LINK_QUALITY_METRIC_VALID
;
2581 wifi_status
->link_quality_metric
= if_wifi_sr
->link_quality_metric
;
2583 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID
) {
2584 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID
;
2585 wifi_status
->ul_effective_bandwidth
= if_wifi_sr
->ul_effective_bandwidth
;
2587 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_MAX_BANDWIDTH_VALID
) {
2588 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_MAX_BANDWIDTH_VALID
;
2589 wifi_status
->ul_max_bandwidth
= if_wifi_sr
->ul_max_bandwidth
;
2591 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_MIN_LATENCY_VALID
) {
2592 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_MIN_LATENCY_VALID
;
2593 wifi_status
->ul_min_latency
= if_wifi_sr
->ul_min_latency
;
2595 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_EFFECTIVE_LATENCY_VALID
) {
2596 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_LATENCY_VALID
;
2597 wifi_status
->ul_effective_latency
= if_wifi_sr
->ul_effective_latency
;
2599 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_MAX_LATENCY_VALID
) {
2600 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_MAX_LATENCY_VALID
;
2601 wifi_status
->ul_max_latency
= if_wifi_sr
->ul_max_latency
;
2603 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_RETXT_LEVEL_VALID
) {
2604 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID
;
2605 if (if_wifi_sr
->ul_retxt_level
== IF_WIFI_UL_RETXT_LEVEL_NONE
)
2606 wifi_status
->ul_retxt_level
= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_NONE
;
2607 else if (if_wifi_sr
->ul_retxt_level
== IF_WIFI_UL_RETXT_LEVEL_LOW
)
2608 wifi_status
->ul_retxt_level
= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_LOW
;
2609 else if (if_wifi_sr
->ul_retxt_level
== IF_WIFI_UL_RETXT_LEVEL_MEDIUM
)
2610 wifi_status
->ul_retxt_level
= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_MEDIUM
;
2611 else if (if_wifi_sr
->ul_retxt_level
== IF_WIFI_UL_RETXT_LEVEL_HIGH
)
2612 wifi_status
->ul_retxt_level
= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_HIGH
;
2614 wifi_status
->valid_bitmask
&= ~NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID
;
2616 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_BYTES_LOST_VALID
) {
2617 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_BYTES_LOST_VALID
;
2618 wifi_status
->ul_bytes_lost
= if_wifi_sr
->ul_bytes_lost
;
2620 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_ERROR_RATE_VALID
) {
2621 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_ERROR_RATE_VALID
;
2622 wifi_status
->ul_error_rate
= if_wifi_sr
->ul_error_rate
;
2624 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID
) {
2625 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID
;
2626 wifi_status
->dl_effective_bandwidth
= if_wifi_sr
->dl_effective_bandwidth
;
2628 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_MAX_BANDWIDTH_VALID
) {
2629 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_MAX_BANDWIDTH_VALID
;
2630 wifi_status
->dl_max_bandwidth
= if_wifi_sr
->dl_max_bandwidth
;
2632 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_MIN_LATENCY_VALID
) {
2633 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_MIN_LATENCY_VALID
;
2634 wifi_status
->dl_min_latency
= if_wifi_sr
->dl_min_latency
;
2636 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_EFFECTIVE_LATENCY_VALID
) {
2637 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_LATENCY_VALID
;
2638 wifi_status
->dl_effective_latency
= if_wifi_sr
->dl_effective_latency
;
2640 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_MAX_LATENCY_VALID
) {
2641 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_MAX_LATENCY_VALID
;
2642 wifi_status
->dl_max_latency
= if_wifi_sr
->dl_max_latency
;
2644 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_ERROR_RATE_VALID
) {
2645 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_ERROR_RATE_VALID
;
2646 wifi_status
->dl_error_rate
= if_wifi_sr
->dl_error_rate
;
2648 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_CONFIG_FREQUENCY_VALID
) {
2649 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID
;
2650 if (if_wifi_sr
->config_frequency
== IF_WIFI_CONFIG_FREQUENCY_2_4_GHZ
)
2651 wifi_status
->config_frequency
= NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_2_4_GHZ
;
2652 else if (if_wifi_sr
->config_frequency
== IF_WIFI_CONFIG_FREQUENCY_5_0_GHZ
)
2653 wifi_status
->config_frequency
= NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_5_0_GHZ
;
2655 wifi_status
->valid_bitmask
&= ~NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID
;
2657 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_CONFIG_MULTICAST_RATE_VALID
) {
2658 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_CONFIG_MULTICAST_RATE_VALID
;
2659 wifi_status
->config_multicast_rate
= if_wifi_sr
->config_multicast_rate
;
2661 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_CONFIG_SCAN_COUNT_VALID
) {
2662 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_COUNT_VALID
;
2663 wifi_status
->scan_count
= if_wifi_sr
->scan_count
;
2665 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_CONFIG_SCAN_DURATION_VALID
) {
2666 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_DURATION_VALID
;
2667 wifi_status
->scan_duration
= if_wifi_sr
->scan_duration
;
2672 lck_rw_done(&ifp
->if_link_status_lock
);
2675 static u_int64_t nstat_ifnet_last_report_time
= 0;
2676 extern int tcp_report_stats_interval
;
2679 nstat_ifnet_compute_percentages(struct if_tcp_ecn_perf_stat
*ifst
)
2681 /* Retransmit percentage */
2682 if (ifst
->total_rxmitpkts
> 0 && ifst
->total_txpkts
> 0) {
2683 /* shift by 10 for precision */
2684 ifst
->rxmit_percent
=
2685 ((ifst
->total_rxmitpkts
<< 10) * 100) / ifst
->total_txpkts
;
2687 ifst
->rxmit_percent
= 0;
2690 /* Out-of-order percentage */
2691 if (ifst
->total_oopkts
> 0 && ifst
->total_rxpkts
> 0) {
2692 /* shift by 10 for precision */
2694 ((ifst
->total_oopkts
<< 10) * 100) / ifst
->total_rxpkts
;
2696 ifst
->oo_percent
= 0;
2699 /* Reorder percentage */
2700 if (ifst
->total_reorderpkts
> 0 &&
2701 (ifst
->total_txpkts
+ ifst
->total_rxpkts
) > 0) {
2702 /* shift by 10 for precision */
2703 ifst
->reorder_percent
=
2704 ((ifst
->total_reorderpkts
<< 10) * 100) /
2705 (ifst
->total_txpkts
+ ifst
->total_rxpkts
);
2707 ifst
->reorder_percent
= 0;
2712 nstat_ifnet_normalize_counter(struct if_tcp_ecn_stat
*if_st
)
2714 u_int64_t ecn_on_conn
, ecn_off_conn
;
2718 ecn_on_conn
= if_st
->ecn_client_success
+
2719 if_st
->ecn_server_success
;
2720 ecn_off_conn
= if_st
->ecn_off_conn
+
2721 (if_st
->ecn_client_setup
- if_st
->ecn_client_success
) +
2722 (if_st
->ecn_server_setup
- if_st
->ecn_server_success
);
2725 * report sack episodes, rst_drop and rxmit_drop
2726 * as a ratio per connection, shift by 10 for precision
2728 if (ecn_on_conn
> 0) {
2729 if_st
->ecn_on
.sack_episodes
=
2730 (if_st
->ecn_on
.sack_episodes
<< 10) / ecn_on_conn
;
2731 if_st
->ecn_on
.rst_drop
=
2732 (if_st
->ecn_on
.rst_drop
<< 10) * 100 / ecn_on_conn
;
2733 if_st
->ecn_on
.rxmit_drop
=
2734 (if_st
->ecn_on
.rxmit_drop
<< 10) * 100 / ecn_on_conn
;
2736 /* set to zero, just in case */
2737 if_st
->ecn_on
.sack_episodes
= 0;
2738 if_st
->ecn_on
.rst_drop
= 0;
2739 if_st
->ecn_on
.rxmit_drop
= 0;
2742 if (ecn_off_conn
> 0) {
2743 if_st
->ecn_off
.sack_episodes
=
2744 (if_st
->ecn_off
.sack_episodes
<< 10) / ecn_off_conn
;
2745 if_st
->ecn_off
.rst_drop
=
2746 (if_st
->ecn_off
.rst_drop
<< 10) * 100 / ecn_off_conn
;
2747 if_st
->ecn_off
.rxmit_drop
=
2748 (if_st
->ecn_off
.rxmit_drop
<< 10) * 100 / ecn_off_conn
;
2750 if_st
->ecn_off
.sack_episodes
= 0;
2751 if_st
->ecn_off
.rst_drop
= 0;
2752 if_st
->ecn_off
.rxmit_drop
= 0;
2754 if_st
->ecn_total_conn
= ecn_off_conn
+ ecn_on_conn
;
2758 nstat_ifnet_report_ecn_stats(void)
2760 u_int64_t uptime
, last_report_time
;
2761 struct nstat_sysinfo_data data
;
2762 struct nstat_sysinfo_ifnet_ecn_stats
*st
;
2765 uptime
= net_uptime();
2767 if ((int)(uptime
- nstat_ifnet_last_report_time
) <
2768 tcp_report_stats_interval
)
2771 last_report_time
= nstat_ifnet_last_report_time
;
2772 nstat_ifnet_last_report_time
= uptime
;
2773 data
.flags
= NSTAT_SYSINFO_IFNET_ECN_STATS
;
2774 st
= &data
.u
.ifnet_ecn_stats
;
2776 ifnet_head_lock_shared();
2777 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
) {
2778 if (ifp
->if_ipv4_stat
== NULL
|| ifp
->if_ipv6_stat
== NULL
)
2781 if ((ifp
->if_refflags
& (IFRF_ATTACHED
| IFRF_DETACHING
)) !=
2785 /* Limit reporting to Wifi, Ethernet and cellular. */
2786 if (!(IFNET_IS_ETHERNET(ifp
) || IFNET_IS_CELLULAR(ifp
)))
2789 bzero(st
, sizeof(*st
));
2790 if (IFNET_IS_CELLULAR(ifp
)) {
2791 st
->ifnet_type
= NSTAT_IFNET_ECN_TYPE_CELLULAR
;
2792 } else if (IFNET_IS_WIFI(ifp
)) {
2793 st
->ifnet_type
= NSTAT_IFNET_ECN_TYPE_WIFI
;
2795 st
->ifnet_type
= NSTAT_IFNET_ECN_TYPE_ETHERNET
;
2797 data
.unsent_data_cnt
= ifp
->if_unsent_data_cnt
;
2798 /* skip if there was no update since last report */
2799 if (ifp
->if_ipv4_stat
->timestamp
<= 0 ||
2800 ifp
->if_ipv4_stat
->timestamp
< last_report_time
)
2802 st
->ifnet_proto
= NSTAT_IFNET_ECN_PROTO_IPV4
;
2803 /* compute percentages using packet counts */
2804 nstat_ifnet_compute_percentages(&ifp
->if_ipv4_stat
->ecn_on
);
2805 nstat_ifnet_compute_percentages(&ifp
->if_ipv4_stat
->ecn_off
);
2806 nstat_ifnet_normalize_counter(ifp
->if_ipv4_stat
);
2807 bcopy(ifp
->if_ipv4_stat
, &st
->ecn_stat
,
2808 sizeof(st
->ecn_stat
));
2809 nstat_sysinfo_send_data(&data
);
2810 bzero(ifp
->if_ipv4_stat
, sizeof(*ifp
->if_ipv4_stat
));
2813 /* skip if there was no update since last report */
2814 if (ifp
->if_ipv6_stat
->timestamp
<= 0 ||
2815 ifp
->if_ipv6_stat
->timestamp
< last_report_time
)
2817 st
->ifnet_proto
= NSTAT_IFNET_ECN_PROTO_IPV6
;
2819 /* compute percentages using packet counts */
2820 nstat_ifnet_compute_percentages(&ifp
->if_ipv6_stat
->ecn_on
);
2821 nstat_ifnet_compute_percentages(&ifp
->if_ipv6_stat
->ecn_off
);
2822 nstat_ifnet_normalize_counter(ifp
->if_ipv6_stat
);
2823 bcopy(ifp
->if_ipv6_stat
, &st
->ecn_stat
,
2824 sizeof(st
->ecn_stat
));
2825 nstat_sysinfo_send_data(&data
);
2827 /* Zero the stats in ifp */
2828 bzero(ifp
->if_ipv6_stat
, sizeof(*ifp
->if_ipv6_stat
));
2835 nstat_ifnet_copy_descriptor(
2836 nstat_provider_cookie_t cookie
,
2840 nstat_ifnet_descriptor
*desc
= (nstat_ifnet_descriptor
*)data
;
2841 struct nstat_ifnet_cookie
*ifcookie
=
2842 (struct nstat_ifnet_cookie
*)cookie
;
2843 struct ifnet
*ifp
= ifcookie
->ifp
;
2845 if (len
< sizeof(nstat_ifnet_descriptor
))
2848 if (nstat_ifnet_gone(cookie
))
2851 bzero(desc
, sizeof(*desc
));
2852 ifnet_lock_shared(ifp
);
2853 strlcpy(desc
->name
, ifp
->if_xname
, sizeof(desc
->name
));
2854 desc
->ifindex
= ifp
->if_index
;
2855 desc
->threshold
= ifp
->if_data_threshold
;
2856 desc
->type
= ifp
->if_type
;
2857 if (ifp
->if_desc
.ifd_len
< sizeof(desc
->description
))
2858 memcpy(desc
->description
, ifp
->if_desc
.ifd_desc
,
2859 sizeof(desc
->description
));
2860 nstat_ifnet_copy_link_status(ifp
, desc
);
2861 ifnet_lock_done(ifp
);
2866 nstat_init_ifnet_provider(void)
2868 bzero(&nstat_ifnet_provider
, sizeof(nstat_ifnet_provider
));
2869 nstat_ifnet_provider
.nstat_provider_id
= NSTAT_PROVIDER_IFNET
;
2870 nstat_ifnet_provider
.nstat_descriptor_length
= sizeof(nstat_ifnet_descriptor
);
2871 nstat_ifnet_provider
.nstat_lookup
= nstat_ifnet_lookup
;
2872 nstat_ifnet_provider
.nstat_gone
= nstat_ifnet_gone
;
2873 nstat_ifnet_provider
.nstat_counts
= nstat_ifnet_counts
;
2874 nstat_ifnet_provider
.nstat_watcher_add
= NULL
;
2875 nstat_ifnet_provider
.nstat_watcher_remove
= NULL
;
2876 nstat_ifnet_provider
.nstat_copy_descriptor
= nstat_ifnet_copy_descriptor
;
2877 nstat_ifnet_provider
.nstat_release
= nstat_ifnet_release
;
2878 nstat_ifnet_provider
.next
= nstat_providers
;
2879 nstat_providers
= &nstat_ifnet_provider
;
2882 __private_extern__
void
2883 nstat_ifnet_threshold_reached(unsigned int ifindex
)
2885 nstat_control_state
*state
;
2888 struct nstat_ifnet_cookie
*ifcookie
;
2890 lck_mtx_lock(&nstat_mtx
);
2891 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
2893 lck_mtx_lock(&state
->mtx
);
2894 for (src
= state
->ncs_srcs
; src
; src
= src
->next
)
2896 if (src
->provider
!= &nstat_ifnet_provider
)
2898 ifcookie
= (struct nstat_ifnet_cookie
*)src
->cookie
;
2899 ifp
= ifcookie
->ifp
;
2900 if (ifp
->if_index
!= ifindex
)
2902 nstat_control_send_counts(state
, src
, 0, 0, NULL
);
2904 lck_mtx_unlock(&state
->mtx
);
2906 lck_mtx_unlock(&nstat_mtx
);
2909 #pragma mark -- Sysinfo --
2911 nstat_set_keyval_scalar(nstat_sysinfo_keyval
*kv
, int key
, u_int32_t val
)
2913 kv
->nstat_sysinfo_key
= key
;
2914 kv
->nstat_sysinfo_flags
= NSTAT_SYSINFO_FLAG_SCALAR
;
2915 kv
->u
.nstat_sysinfo_scalar
= val
;
2919 nstat_sysinfo_send_data_internal(
2920 nstat_control_state
*control
,
2921 nstat_sysinfo_data
*data
)
2923 nstat_msg_sysinfo_counts
*syscnt
= NULL
;
2924 size_t allocsize
= 0, countsize
= 0, nkeyvals
= 0, finalsize
= 0;
2925 nstat_sysinfo_keyval
*kv
;
2929 allocsize
= offsetof(nstat_msg_sysinfo_counts
, counts
);
2930 countsize
= offsetof(nstat_sysinfo_counts
, nstat_sysinfo_keyvals
);
2931 finalsize
= allocsize
;
2933 /* get number of key-vals for each kind of stat */
2934 switch (data
->flags
)
2936 case NSTAT_SYSINFO_MBUF_STATS
:
2937 nkeyvals
= sizeof(struct nstat_sysinfo_mbuf_stats
) /
2940 case NSTAT_SYSINFO_TCP_STATS
:
2941 nkeyvals
= sizeof(struct nstat_sysinfo_tcp_stats
) /
2944 case NSTAT_SYSINFO_IFNET_ECN_STATS
:
2945 nkeyvals
= (sizeof(struct if_tcp_ecn_stat
) /
2948 /* Two more keys for ifnet type and proto */
2951 /* One key for unsent data. */
2957 countsize
+= sizeof(nstat_sysinfo_keyval
) * nkeyvals
;
2958 allocsize
+= countsize
;
2960 syscnt
= OSMalloc(allocsize
, nstat_malloc_tag
);
2963 bzero(syscnt
, allocsize
);
2965 kv
= (nstat_sysinfo_keyval
*) &syscnt
->counts
.nstat_sysinfo_keyvals
;
2966 switch (data
->flags
)
2968 case NSTAT_SYSINFO_MBUF_STATS
:
2970 nstat_set_keyval_scalar(&kv
[i
++],
2971 NSTAT_SYSINFO_KEY_MBUF_256B_TOTAL
,
2972 data
->u
.mb_stats
.total_256b
);
2973 nstat_set_keyval_scalar(&kv
[i
++],
2974 NSTAT_SYSINFO_KEY_MBUF_2KB_TOTAL
,
2975 data
->u
.mb_stats
.total_2kb
);
2976 nstat_set_keyval_scalar(&kv
[i
++],
2977 NSTAT_SYSINFO_KEY_MBUF_4KB_TOTAL
,
2978 data
->u
.mb_stats
.total_4kb
);
2979 nstat_set_keyval_scalar(&kv
[i
++],
2980 NSTAT_SYSINFO_MBUF_16KB_TOTAL
,
2981 data
->u
.mb_stats
.total_16kb
);
2982 nstat_set_keyval_scalar(&kv
[i
++],
2983 NSTAT_SYSINFO_KEY_SOCK_MBCNT
,
2984 data
->u
.mb_stats
.sbmb_total
);
2985 nstat_set_keyval_scalar(&kv
[i
++],
2986 NSTAT_SYSINFO_KEY_SOCK_ATMBLIMIT
,
2987 data
->u
.mb_stats
.sb_atmbuflimit
);
2988 nstat_set_keyval_scalar(&kv
[i
++],
2989 NSTAT_SYSINFO_MBUF_DRAIN_CNT
,
2990 data
->u
.mb_stats
.draincnt
);
2991 nstat_set_keyval_scalar(&kv
[i
++],
2992 NSTAT_SYSINFO_MBUF_MEM_RELEASED
,
2993 data
->u
.mb_stats
.memreleased
);
2994 nstat_set_keyval_scalar(&kv
[i
++],
2995 NSTAT_SYSINFO_KEY_SOCK_MBFLOOR
,
2996 data
->u
.mb_stats
.sbmb_floor
);
2997 VERIFY(i
== nkeyvals
);
3000 case NSTAT_SYSINFO_TCP_STATS
:
3002 nstat_set_keyval_scalar(&kv
[i
++],
3003 NSTAT_SYSINFO_KEY_IPV4_AVGRTT
,
3004 data
->u
.tcp_stats
.ipv4_avgrtt
);
3005 nstat_set_keyval_scalar(&kv
[i
++],
3006 NSTAT_SYSINFO_KEY_IPV6_AVGRTT
,
3007 data
->u
.tcp_stats
.ipv6_avgrtt
);
3008 nstat_set_keyval_scalar(&kv
[i
++],
3009 NSTAT_SYSINFO_KEY_SEND_PLR
,
3010 data
->u
.tcp_stats
.send_plr
);
3011 nstat_set_keyval_scalar(&kv
[i
++],
3012 NSTAT_SYSINFO_KEY_RECV_PLR
,
3013 data
->u
.tcp_stats
.recv_plr
);
3014 nstat_set_keyval_scalar(&kv
[i
++],
3015 NSTAT_SYSINFO_KEY_SEND_TLRTO
,
3016 data
->u
.tcp_stats
.send_tlrto_rate
);
3017 nstat_set_keyval_scalar(&kv
[i
++],
3018 NSTAT_SYSINFO_KEY_SEND_REORDERRATE
,
3019 data
->u
.tcp_stats
.send_reorder_rate
);
3020 nstat_set_keyval_scalar(&kv
[i
++],
3021 NSTAT_SYSINFO_CONNECTION_ATTEMPTS
,
3022 data
->u
.tcp_stats
.connection_attempts
);
3023 nstat_set_keyval_scalar(&kv
[i
++],
3024 NSTAT_SYSINFO_CONNECTION_ACCEPTS
,
3025 data
->u
.tcp_stats
.connection_accepts
);
3026 nstat_set_keyval_scalar(&kv
[i
++],
3027 NSTAT_SYSINFO_ECN_CLIENT_ENABLED
,
3028 data
->u
.tcp_stats
.ecn_client_enabled
);
3029 nstat_set_keyval_scalar(&kv
[i
++],
3030 NSTAT_SYSINFO_ECN_SERVER_ENABLED
,
3031 data
->u
.tcp_stats
.ecn_server_enabled
);
3032 nstat_set_keyval_scalar(&kv
[i
++],
3033 NSTAT_SYSINFO_ECN_CLIENT_SETUP
,
3034 data
->u
.tcp_stats
.ecn_client_setup
);
3035 nstat_set_keyval_scalar(&kv
[i
++],
3036 NSTAT_SYSINFO_ECN_SERVER_SETUP
,
3037 data
->u
.tcp_stats
.ecn_server_setup
);
3038 nstat_set_keyval_scalar(&kv
[i
++],
3039 NSTAT_SYSINFO_ECN_CLIENT_SUCCESS
,
3040 data
->u
.tcp_stats
.ecn_client_success
);
3041 nstat_set_keyval_scalar(&kv
[i
++],
3042 NSTAT_SYSINFO_ECN_SERVER_SUCCESS
,
3043 data
->u
.tcp_stats
.ecn_server_success
);
3044 nstat_set_keyval_scalar(&kv
[i
++],
3045 NSTAT_SYSINFO_ECN_NOT_SUPPORTED
,
3046 data
->u
.tcp_stats
.ecn_not_supported
);
3047 nstat_set_keyval_scalar(&kv
[i
++],
3048 NSTAT_SYSINFO_ECN_LOST_SYN
,
3049 data
->u
.tcp_stats
.ecn_lost_syn
);
3050 nstat_set_keyval_scalar(&kv
[i
++],
3051 NSTAT_SYSINFO_ECN_LOST_SYNACK
,
3052 data
->u
.tcp_stats
.ecn_lost_synack
);
3053 nstat_set_keyval_scalar(&kv
[i
++],
3054 NSTAT_SYSINFO_ECN_RECV_CE
,
3055 data
->u
.tcp_stats
.ecn_recv_ce
);
3056 nstat_set_keyval_scalar(&kv
[i
++],
3057 NSTAT_SYSINFO_ECN_RECV_ECE
,
3058 data
->u
.tcp_stats
.ecn_recv_ece
);
3059 nstat_set_keyval_scalar(&kv
[i
++],
3060 NSTAT_SYSINFO_ECN_SENT_ECE
,
3061 data
->u
.tcp_stats
.ecn_sent_ece
);
3062 nstat_set_keyval_scalar(&kv
[i
++],
3063 NSTAT_SYSINFO_ECN_CONN_RECV_CE
,
3064 data
->u
.tcp_stats
.ecn_conn_recv_ce
);
3065 nstat_set_keyval_scalar(&kv
[i
++],
3066 NSTAT_SYSINFO_ECN_CONN_RECV_ECE
,
3067 data
->u
.tcp_stats
.ecn_conn_recv_ece
);
3068 nstat_set_keyval_scalar(&kv
[i
++],
3069 NSTAT_SYSINFO_ECN_CONN_PLNOCE
,
3070 data
->u
.tcp_stats
.ecn_conn_plnoce
);
3071 nstat_set_keyval_scalar(&kv
[i
++],
3072 NSTAT_SYSINFO_ECN_CONN_PL_CE
,
3073 data
->u
.tcp_stats
.ecn_conn_pl_ce
);
3074 nstat_set_keyval_scalar(&kv
[i
++],
3075 NSTAT_SYSINFO_ECN_CONN_NOPL_CE
,
3076 data
->u
.tcp_stats
.ecn_conn_nopl_ce
);
3077 nstat_set_keyval_scalar(&kv
[i
++],
3078 NSTAT_SYSINFO_ECN_FALLBACK_SYNLOSS
,
3079 data
->u
.tcp_stats
.ecn_fallback_synloss
);
3080 nstat_set_keyval_scalar(&kv
[i
++],
3081 NSTAT_SYSINFO_ECN_FALLBACK_REORDER
,
3082 data
->u
.tcp_stats
.ecn_fallback_reorder
);
3083 nstat_set_keyval_scalar(&kv
[i
++],
3084 NSTAT_SYSINFO_ECN_FALLBACK_CE
,
3085 data
->u
.tcp_stats
.ecn_fallback_ce
);
3086 nstat_set_keyval_scalar(&kv
[i
++],
3087 NSTAT_SYSINFO_TFO_SYN_DATA_RCV
,
3088 data
->u
.tcp_stats
.tfo_syn_data_rcv
);
3089 nstat_set_keyval_scalar(&kv
[i
++],
3090 NSTAT_SYSINFO_TFO_COOKIE_REQ_RCV
,
3091 data
->u
.tcp_stats
.tfo_cookie_req_rcv
);
3092 nstat_set_keyval_scalar(&kv
[i
++],
3093 NSTAT_SYSINFO_TFO_COOKIE_SENT
,
3094 data
->u
.tcp_stats
.tfo_cookie_sent
);
3095 nstat_set_keyval_scalar(&kv
[i
++],
3096 NSTAT_SYSINFO_TFO_COOKIE_INVALID
,
3097 data
->u
.tcp_stats
.tfo_cookie_invalid
);
3098 nstat_set_keyval_scalar(&kv
[i
++],
3099 NSTAT_SYSINFO_TFO_COOKIE_REQ
,
3100 data
->u
.tcp_stats
.tfo_cookie_req
);
3101 nstat_set_keyval_scalar(&kv
[i
++],
3102 NSTAT_SYSINFO_TFO_COOKIE_RCV
,
3103 data
->u
.tcp_stats
.tfo_cookie_rcv
);
3104 nstat_set_keyval_scalar(&kv
[i
++],
3105 NSTAT_SYSINFO_TFO_SYN_DATA_SENT
,
3106 data
->u
.tcp_stats
.tfo_syn_data_sent
);
3107 nstat_set_keyval_scalar(&kv
[i
++],
3108 NSTAT_SYSINFO_TFO_SYN_DATA_ACKED
,
3109 data
->u
.tcp_stats
.tfo_syn_data_acked
);
3110 nstat_set_keyval_scalar(&kv
[i
++],
3111 NSTAT_SYSINFO_TFO_SYN_LOSS
,
3112 data
->u
.tcp_stats
.tfo_syn_loss
);
3113 nstat_set_keyval_scalar(&kv
[i
++],
3114 NSTAT_SYSINFO_TFO_BLACKHOLE
,
3115 data
->u
.tcp_stats
.tfo_blackhole
);
3116 nstat_set_keyval_scalar(&kv
[i
++],
3117 NSTAT_SYSINFO_TFO_COOKIE_WRONG
,
3118 data
->u
.tcp_stats
.tfo_cookie_wrong
);
3119 nstat_set_keyval_scalar(&kv
[i
++],
3120 NSTAT_SYSINFO_TFO_NO_COOKIE_RCV
,
3121 data
->u
.tcp_stats
.tfo_no_cookie_rcv
);
3122 nstat_set_keyval_scalar(&kv
[i
++],
3123 NSTAT_SYSINFO_TFO_HEURISTICS_DISABLE
,
3124 data
->u
.tcp_stats
.tfo_heuristics_disable
);
3125 nstat_set_keyval_scalar(&kv
[i
++],
3126 NSTAT_SYSINFO_TFO_SEND_BLACKHOLE
,
3127 data
->u
.tcp_stats
.tfo_sndblackhole
);
3128 VERIFY(i
== nkeyvals
);
3131 case NSTAT_SYSINFO_IFNET_ECN_STATS
:
3133 nstat_set_keyval_scalar(&kv
[i
++],
3134 NSTAT_SYSINFO_ECN_IFNET_TYPE
,
3135 data
->u
.ifnet_ecn_stats
.ifnet_type
);
3136 nstat_set_keyval_scalar(&kv
[i
++],
3137 NSTAT_SYSINFO_ECN_IFNET_PROTO
,
3138 data
->u
.ifnet_ecn_stats
.ifnet_proto
);
3139 nstat_set_keyval_scalar(&kv
[i
++],
3140 NSTAT_SYSINFO_ECN_IFNET_CLIENT_SETUP
,
3141 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_client_setup
);
3142 nstat_set_keyval_scalar(&kv
[i
++],
3143 NSTAT_SYSINFO_ECN_IFNET_SERVER_SETUP
,
3144 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_server_setup
);
3145 nstat_set_keyval_scalar(&kv
[i
++],
3146 NSTAT_SYSINFO_ECN_IFNET_CLIENT_SUCCESS
,
3147 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_client_success
);
3148 nstat_set_keyval_scalar(&kv
[i
++],
3149 NSTAT_SYSINFO_ECN_IFNET_SERVER_SUCCESS
,
3150 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_server_success
);
3151 nstat_set_keyval_scalar(&kv
[i
++],
3152 NSTAT_SYSINFO_ECN_IFNET_PEER_NOSUPPORT
,
3153 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_peer_nosupport
);
3154 nstat_set_keyval_scalar(&kv
[i
++],
3155 NSTAT_SYSINFO_ECN_IFNET_SYN_LOST
,
3156 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_syn_lost
);
3157 nstat_set_keyval_scalar(&kv
[i
++],
3158 NSTAT_SYSINFO_ECN_IFNET_SYNACK_LOST
,
3159 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_synack_lost
);
3160 nstat_set_keyval_scalar(&kv
[i
++],
3161 NSTAT_SYSINFO_ECN_IFNET_RECV_CE
,
3162 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_recv_ce
);
3163 nstat_set_keyval_scalar(&kv
[i
++],
3164 NSTAT_SYSINFO_ECN_IFNET_RECV_ECE
,
3165 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_recv_ece
);
3166 nstat_set_keyval_scalar(&kv
[i
++],
3167 NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_CE
,
3168 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_conn_recv_ce
);
3169 nstat_set_keyval_scalar(&kv
[i
++],
3170 NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_ECE
,
3171 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_conn_recv_ece
);
3172 nstat_set_keyval_scalar(&kv
[i
++],
3173 NSTAT_SYSINFO_ECN_IFNET_CONN_PLNOCE
,
3174 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_conn_plnoce
);
3175 nstat_set_keyval_scalar(&kv
[i
++],
3176 NSTAT_SYSINFO_ECN_IFNET_CONN_PLCE
,
3177 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_conn_plce
);
3178 nstat_set_keyval_scalar(&kv
[i
++],
3179 NSTAT_SYSINFO_ECN_IFNET_CONN_NOPLCE
,
3180 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_conn_noplce
);
3181 nstat_set_keyval_scalar(&kv
[i
++],
3182 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNLOSS
,
3183 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_fallback_synloss
);
3184 nstat_set_keyval_scalar(&kv
[i
++],
3185 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_REORDER
,
3186 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_fallback_reorder
);
3187 nstat_set_keyval_scalar(&kv
[i
++],
3188 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_CE
,
3189 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_fallback_ce
);
3190 nstat_set_keyval_scalar(&kv
[i
++],
3191 NSTAT_SYSINFO_ECN_IFNET_ON_RTT_AVG
,
3192 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.rtt_avg
);
3193 nstat_set_keyval_scalar(&kv
[i
++],
3194 NSTAT_SYSINFO_ECN_IFNET_ON_RTT_VAR
,
3195 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.rtt_var
);
3196 nstat_set_keyval_scalar(&kv
[i
++],
3197 NSTAT_SYSINFO_ECN_IFNET_ON_OOPERCENT
,
3198 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.oo_percent
);
3199 nstat_set_keyval_scalar(&kv
[i
++],
3200 NSTAT_SYSINFO_ECN_IFNET_ON_SACK_EPISODE
,
3201 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.sack_episodes
);
3202 nstat_set_keyval_scalar(&kv
[i
++],
3203 NSTAT_SYSINFO_ECN_IFNET_ON_REORDER_PERCENT
,
3204 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.reorder_percent
);
3205 nstat_set_keyval_scalar(&kv
[i
++],
3206 NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_PERCENT
,
3207 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.rxmit_percent
);
3208 nstat_set_keyval_scalar(&kv
[i
++],
3209 NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_DROP
,
3210 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.rxmit_drop
);
3211 nstat_set_keyval_scalar(&kv
[i
++],
3212 NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_AVG
,
3213 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.rtt_avg
);
3214 nstat_set_keyval_scalar(&kv
[i
++],
3215 NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_VAR
,
3216 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.rtt_var
);
3217 nstat_set_keyval_scalar(&kv
[i
++],
3218 NSTAT_SYSINFO_ECN_IFNET_OFF_OOPERCENT
,
3219 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.oo_percent
);
3220 nstat_set_keyval_scalar(&kv
[i
++],
3221 NSTAT_SYSINFO_ECN_IFNET_OFF_SACK_EPISODE
,
3222 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.sack_episodes
);
3223 nstat_set_keyval_scalar(&kv
[i
++],
3224 NSTAT_SYSINFO_ECN_IFNET_OFF_REORDER_PERCENT
,
3225 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.reorder_percent
);
3226 nstat_set_keyval_scalar(&kv
[i
++],
3227 NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_PERCENT
,
3228 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.rxmit_percent
);
3229 nstat_set_keyval_scalar(&kv
[i
++],
3230 NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_DROP
,
3231 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.rxmit_drop
);
3232 nstat_set_keyval_scalar(&kv
[i
++],
3233 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_TXPKTS
,
3234 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.total_txpkts
);
3235 nstat_set_keyval_scalar(&kv
[i
++],
3236 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXMTPKTS
,
3237 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.total_rxmitpkts
);
3238 nstat_set_keyval_scalar(&kv
[i
++],
3239 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXPKTS
,
3240 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.total_rxpkts
);
3241 nstat_set_keyval_scalar(&kv
[i
++],
3242 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_OOPKTS
,
3243 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.total_oopkts
);
3244 nstat_set_keyval_scalar(&kv
[i
++],
3245 NSTAT_SYSINFO_ECN_IFNET_ON_DROP_RST
,
3246 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.rst_drop
);
3247 nstat_set_keyval_scalar(&kv
[i
++],
3248 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_TXPKTS
,
3249 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.total_txpkts
);
3250 nstat_set_keyval_scalar(&kv
[i
++],
3251 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXMTPKTS
,
3252 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.total_rxmitpkts
);
3253 nstat_set_keyval_scalar(&kv
[i
++],
3254 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXPKTS
,
3255 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.total_rxpkts
);
3256 nstat_set_keyval_scalar(&kv
[i
++],
3257 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_OOPKTS
,
3258 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.total_oopkts
);
3259 nstat_set_keyval_scalar(&kv
[i
++],
3260 NSTAT_SYSINFO_ECN_IFNET_OFF_DROP_RST
,
3261 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.rst_drop
);
3262 nstat_set_keyval_scalar(&kv
[i
++],
3263 NSTAT_SYSINFO_ECN_IFNET_TOTAL_CONN
,
3264 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_total_conn
);
3265 nstat_set_keyval_scalar(&kv
[i
++],
3266 NSTAT_SYSINFO_IFNET_UNSENT_DATA
,
3267 data
->unsent_data_cnt
);
3268 nstat_set_keyval_scalar(&kv
[i
++],
3269 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRST
,
3270 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_fallback_droprst
);
3271 nstat_set_keyval_scalar(&kv
[i
++],
3272 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRXMT
,
3273 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_fallback_droprxmt
);
3279 VERIFY(i
> 0 && i
<= nkeyvals
);
3280 countsize
= offsetof(nstat_sysinfo_counts
,
3281 nstat_sysinfo_keyvals
) +
3282 sizeof(nstat_sysinfo_keyval
) * i
;
3283 finalsize
+= countsize
;
3284 syscnt
->hdr
.type
= NSTAT_MSG_TYPE_SYSINFO_COUNTS
;
3285 syscnt
->hdr
.length
= finalsize
;
3286 syscnt
->counts
.nstat_sysinfo_len
= countsize
;
3288 result
= ctl_enqueuedata(control
->ncs_kctl
,
3289 control
->ncs_unit
, syscnt
, finalsize
, CTL_DATA_EOR
);
3292 nstat_stats
.nstat_sysinfofailures
+= 1;
3294 OSFree(syscnt
, allocsize
, nstat_malloc_tag
);
3299 __private_extern__
void
3300 nstat_sysinfo_send_data(
3301 nstat_sysinfo_data
*data
)
3303 nstat_control_state
*control
;
3305 lck_mtx_lock(&nstat_mtx
);
3306 for (control
= nstat_controls
; control
; control
= control
->ncs_next
)
3308 lck_mtx_lock(&control
->mtx
);
3309 if ((control
->ncs_flags
& NSTAT_FLAG_SYSINFO_SUBSCRIBED
) != 0)
3311 nstat_sysinfo_send_data_internal(control
, data
);
3313 lck_mtx_unlock(&control
->mtx
);
3315 lck_mtx_unlock(&nstat_mtx
);
3319 nstat_sysinfo_generate_report(void)
3321 mbuf_report_peak_usage();
3323 nstat_ifnet_report_ecn_stats();
3326 #pragma mark -- Kernel Control Socket --
3328 static kern_ctl_ref nstat_ctlref
= NULL
;
3329 static lck_grp_t
*nstat_lck_grp
= NULL
;
3331 static errno_t
nstat_control_connect(kern_ctl_ref kctl
, struct sockaddr_ctl
*sac
, void **uinfo
);
3332 static errno_t
nstat_control_disconnect(kern_ctl_ref kctl
, u_int32_t unit
, void *uinfo
);
3333 static errno_t
nstat_control_send(kern_ctl_ref kctl
, u_int32_t unit
, void *uinfo
, mbuf_t m
, int flags
);
3336 nstat_enqueue_success(
3338 nstat_control_state
*state
,
3341 nstat_msg_hdr success
;
3344 bzero(&success
, sizeof(success
));
3345 success
.context
= context
;
3346 success
.type
= NSTAT_MSG_TYPE_SUCCESS
;
3347 success
.length
= sizeof(success
);
3348 success
.flags
= flags
;
3349 result
= ctl_enqueuedata(state
->ncs_kctl
, state
->ncs_unit
, &success
,
3350 sizeof(success
), CTL_DATA_EOR
| CTL_DATA_CRIT
);
3352 if (nstat_debug
!= 0)
3353 printf("%s: could not enqueue success message %d\n",
3355 nstat_stats
.nstat_successmsgfailures
+= 1;
3361 nstat_control_send_goodbye(
3362 nstat_control_state
*state
,
3368 if (nstat_control_reporting_allowed(state
, src
))
3370 if ((state
->ncs_flags
& NSTAT_FLAG_SUPPORTS_UPDATES
) != 0)
3372 result
= nstat_control_send_update(state
, src
, 0, NSTAT_MSG_HDR_FLAG_CLOSING
, NULL
);
3376 if (nstat_debug
!= 0)
3377 printf("%s - nstat_control_send_update() %d\n", __func__
, result
);
3382 // send one last counts notification
3383 result
= nstat_control_send_counts(state
, src
, 0, NSTAT_MSG_HDR_FLAG_CLOSING
, NULL
);
3387 if (nstat_debug
!= 0)
3388 printf("%s - nstat_control_send_counts() %d\n", __func__
, result
);
3391 // send a last description
3392 result
= nstat_control_send_description(state
, src
, 0, NSTAT_MSG_HDR_FLAG_CLOSING
);
3396 if (nstat_debug
!= 0)
3397 printf("%s - nstat_control_send_description() %d\n", __func__
, result
);
3402 // send the source removed notification
3403 result
= nstat_control_send_removed(state
, src
);
3404 if (result
!= 0 && nstat_debug
)
3407 if (nstat_debug
!= 0)
3408 printf("%s - nstat_control_send_removed() %d\n", __func__
, result
);
3412 nstat_stats
.nstat_control_send_goodbye_failures
++;
3419 nstat_flush_accumulated_msgs(
3420 nstat_control_state
*state
)
3423 if (state
->ncs_accumulated
!= NULL
&& mbuf_len(state
->ncs_accumulated
) > 0)
3425 mbuf_pkthdr_setlen(state
->ncs_accumulated
, mbuf_len(state
->ncs_accumulated
));
3426 result
= ctl_enqueuembuf(state
->ncs_kctl
, state
->ncs_unit
, state
->ncs_accumulated
, CTL_DATA_EOR
);
3429 nstat_stats
.nstat_flush_accumulated_msgs_failures
++;
3430 if (nstat_debug
!= 0)
3431 printf("%s - ctl_enqueuembuf failed: %d\n", __func__
, result
);
3432 mbuf_freem(state
->ncs_accumulated
);
3434 state
->ncs_accumulated
= NULL
;
3440 nstat_accumulate_msg(
3441 nstat_control_state
*state
,
3445 if (state
->ncs_accumulated
&& mbuf_trailingspace(state
->ncs_accumulated
) < length
)
3447 // Will send the current mbuf
3448 nstat_flush_accumulated_msgs(state
);
3453 if (state
->ncs_accumulated
== NULL
)
3455 unsigned int one
= 1;
3456 if (mbuf_allocpacket(MBUF_DONTWAIT
, NSTAT_MAX_MSG_SIZE
, &one
, &state
->ncs_accumulated
) != 0)
3458 if (nstat_debug
!= 0)
3459 printf("%s - mbuf_allocpacket failed\n", __func__
);
3464 mbuf_setlen(state
->ncs_accumulated
, 0);
3470 hdr
->length
= length
;
3471 result
= mbuf_copyback(state
->ncs_accumulated
, mbuf_len(state
->ncs_accumulated
),
3472 length
, hdr
, MBUF_DONTWAIT
);
3477 nstat_flush_accumulated_msgs(state
);
3478 if (nstat_debug
!= 0)
3479 printf("%s - resorting to ctl_enqueuedata\n", __func__
);
3480 result
= ctl_enqueuedata(state
->ncs_kctl
, state
->ncs_unit
, hdr
, length
, CTL_DATA_EOR
);
3484 nstat_stats
.nstat_accumulate_msg_failures
++;
3491 __unused thread_call_param_t p0
,
3492 __unused thread_call_param_t p1
)
3494 lck_mtx_lock(&nstat_mtx
);
3496 nstat_idle_time
= 0;
3498 nstat_control_state
*control
;
3499 nstat_src
*dead
= NULL
;
3500 nstat_src
*dead_list
= NULL
;
3501 for (control
= nstat_controls
; control
; control
= control
->ncs_next
)
3503 lck_mtx_lock(&control
->mtx
);
3504 nstat_src
**srcpp
= &control
->ncs_srcs
;
3506 if (!(control
->ncs_flags
& NSTAT_FLAG_REQCOUNTS
))
3508 while(*srcpp
!= NULL
)
3510 if ((*srcpp
)->provider
->nstat_gone((*srcpp
)->cookie
))
3514 // Pull it off the list
3516 *srcpp
= (*srcpp
)->next
;
3518 result
= nstat_control_send_goodbye(control
, dead
);
3520 // Put this on the list to release later
3521 dead
->next
= dead_list
;
3526 srcpp
= &(*srcpp
)->next
;
3530 control
->ncs_flags
&= ~NSTAT_FLAG_REQCOUNTS
;
3531 lck_mtx_unlock(&control
->mtx
);
3536 clock_interval_to_deadline(60, NSEC_PER_SEC
, &nstat_idle_time
);
3537 thread_call_func_delayed((thread_call_func_t
)nstat_idle_check
, NULL
, nstat_idle_time
);
3540 lck_mtx_unlock(&nstat_mtx
);
3542 /* Generate any system level reports, if needed */
3543 nstat_sysinfo_generate_report();
3545 // Release the sources now that we aren't holding lots of locks
3549 dead_list
= dead
->next
;
3551 nstat_control_cleanup_source(NULL
, dead
, FALSE
);
3558 nstat_control_register(void)
3560 // Create our lock group first
3561 lck_grp_attr_t
*grp_attr
= lck_grp_attr_alloc_init();
3562 lck_grp_attr_setdefault(grp_attr
);
3563 nstat_lck_grp
= lck_grp_alloc_init("network statistics kctl", grp_attr
);
3564 lck_grp_attr_free(grp_attr
);
3566 lck_mtx_init(&nstat_mtx
, nstat_lck_grp
, NULL
);
3568 // Register the control
3569 struct kern_ctl_reg nstat_control
;
3570 bzero(&nstat_control
, sizeof(nstat_control
));
3571 strlcpy(nstat_control
.ctl_name
, NET_STAT_CONTROL_NAME
, sizeof(nstat_control
.ctl_name
));
3572 nstat_control
.ctl_flags
= CTL_FLAG_REG_EXTENDED
| CTL_FLAG_REG_CRIT
;
3573 nstat_control
.ctl_sendsize
= nstat_sendspace
;
3574 nstat_control
.ctl_recvsize
= nstat_recvspace
;
3575 nstat_control
.ctl_connect
= nstat_control_connect
;
3576 nstat_control
.ctl_disconnect
= nstat_control_disconnect
;
3577 nstat_control
.ctl_send
= nstat_control_send
;
3579 ctl_register(&nstat_control
, &nstat_ctlref
);
3583 nstat_control_cleanup_source(
3584 nstat_control_state
*state
,
3585 struct nstat_src
*src
,
3592 result
= nstat_control_send_removed(state
, src
);
3595 nstat_stats
.nstat_control_cleanup_source_failures
++;
3596 if (nstat_debug
!= 0)
3597 printf("%s - nstat_control_send_removed() %d\n",
3601 // Cleanup the source if we found it.
3602 src
->provider
->nstat_release(src
->cookie
, locked
);
3603 OSFree(src
, sizeof(*src
), nstat_malloc_tag
);
3608 nstat_control_reporting_allowed(
3609 nstat_control_state
*state
,
3612 if (src
->provider
->nstat_reporting_allowed
== NULL
)
3616 src
->provider
->nstat_reporting_allowed(src
->cookie
,
3617 &state
->ncs_provider_filters
[src
->provider
->nstat_provider_id
])
3623 nstat_control_connect(
3625 struct sockaddr_ctl
*sac
,
3628 nstat_control_state
*state
= OSMalloc(sizeof(*state
), nstat_malloc_tag
);
3629 if (state
== NULL
) return ENOMEM
;
3631 bzero(state
, sizeof(*state
));
3632 lck_mtx_init(&state
->mtx
, nstat_lck_grp
, NULL
);
3633 state
->ncs_kctl
= kctl
;
3634 state
->ncs_unit
= sac
->sc_unit
;
3635 state
->ncs_flags
= NSTAT_FLAG_REQCOUNTS
;
3638 lck_mtx_lock(&nstat_mtx
);
3639 state
->ncs_next
= nstat_controls
;
3640 nstat_controls
= state
;
3642 if (nstat_idle_time
== 0)
3644 clock_interval_to_deadline(60, NSEC_PER_SEC
, &nstat_idle_time
);
3645 thread_call_func_delayed((thread_call_func_t
)nstat_idle_check
, NULL
, nstat_idle_time
);
3648 lck_mtx_unlock(&nstat_mtx
);
3654 nstat_control_disconnect(
3655 __unused kern_ctl_ref kctl
,
3656 __unused u_int32_t unit
,
3660 nstat_control_state
*state
= (nstat_control_state
*)uinfo
;
3662 // pull it out of the global list of states
3663 lck_mtx_lock(&nstat_mtx
);
3664 nstat_control_state
**statepp
;
3665 for (statepp
= &nstat_controls
; *statepp
; statepp
= &(*statepp
)->ncs_next
)
3667 if (*statepp
== state
)
3669 *statepp
= state
->ncs_next
;
3673 lck_mtx_unlock(&nstat_mtx
);
3675 lck_mtx_lock(&state
->mtx
);
3676 // Stop watching for sources
3677 nstat_provider
*provider
;
3678 watching
= state
->ncs_watching
;
3679 state
->ncs_watching
= 0;
3680 for (provider
= nstat_providers
; provider
&& watching
; provider
= provider
->next
)
3682 if ((watching
& (1 << provider
->nstat_provider_id
)) != 0)
3684 watching
&= ~(1 << provider
->nstat_provider_id
);
3685 provider
->nstat_watcher_remove(state
);
3689 // set cleanup flags
3690 state
->ncs_flags
|= NSTAT_FLAG_CLEANUP
;
3692 if (state
->ncs_accumulated
)
3694 mbuf_freem(state
->ncs_accumulated
);
3695 state
->ncs_accumulated
= NULL
;
3698 // Copy out the list of sources
3699 nstat_src
*srcs
= state
->ncs_srcs
;
3700 state
->ncs_srcs
= NULL
;
3701 lck_mtx_unlock(&state
->mtx
);
3707 // pull it out of the list
3712 nstat_control_cleanup_source(NULL
, src
, FALSE
);
3714 lck_mtx_destroy(&state
->mtx
, nstat_lck_grp
);
3715 OSFree(state
, sizeof(*state
), nstat_malloc_tag
);
3720 static nstat_src_ref_t
3721 nstat_control_next_src_ref(
3722 nstat_control_state
*state
)
3724 return ++state
->ncs_next_srcref
;
3728 nstat_control_send_counts(
3729 nstat_control_state
*state
,
3731 unsigned long long context
,
3732 u_int16_t hdr_flags
,
3735 nstat_msg_src_counts counts
;
3738 /* Some providers may not have any counts to send */
3739 if (src
->provider
->nstat_counts
== NULL
)
3742 bzero(&counts
, sizeof(counts
));
3743 counts
.hdr
.type
= NSTAT_MSG_TYPE_SRC_COUNTS
;
3744 counts
.hdr
.length
= sizeof(counts
);
3745 counts
.hdr
.flags
= hdr_flags
;
3746 counts
.hdr
.context
= context
;
3747 counts
.srcref
= src
->srcref
;
3748 counts
.event_flags
= 0;
3750 if (src
->provider
->nstat_counts(src
->cookie
, &counts
.counts
, gone
) == 0)
3752 if ((src
->filter
& NSTAT_FILTER_NOZEROBYTES
) &&
3753 counts
.counts
.nstat_rxbytes
== 0 &&
3754 counts
.counts
.nstat_txbytes
== 0)
3760 result
= ctl_enqueuedata(state
->ncs_kctl
,
3761 state
->ncs_unit
, &counts
, sizeof(counts
),
3764 nstat_stats
.nstat_sendcountfailures
+= 1;
3771 nstat_control_append_counts(
3772 nstat_control_state
*state
,
3776 /* Some providers may not have any counts to send */
3777 if (!src
->provider
->nstat_counts
) return 0;
3779 nstat_msg_src_counts counts
;
3780 bzero(&counts
, sizeof(counts
));
3781 counts
.hdr
.type
= NSTAT_MSG_TYPE_SRC_COUNTS
;
3782 counts
.hdr
.length
= sizeof(counts
);
3783 counts
.srcref
= src
->srcref
;
3784 counts
.event_flags
= 0;
3787 result
= src
->provider
->nstat_counts(src
->cookie
, &counts
.counts
, gone
);
3793 if ((src
->filter
& NSTAT_FILTER_NOZEROBYTES
) == NSTAT_FILTER_NOZEROBYTES
&&
3794 counts
.counts
.nstat_rxbytes
== 0 && counts
.counts
.nstat_txbytes
== 0)
3799 return nstat_accumulate_msg(state
, &counts
.hdr
, counts
.hdr
.length
);
3803 nstat_control_send_description(
3804 nstat_control_state
*state
,
3807 u_int16_t hdr_flags
)
3809 // Provider doesn't support getting the descriptor? Done.
3810 if (src
->provider
->nstat_descriptor_length
== 0 ||
3811 src
->provider
->nstat_copy_descriptor
== NULL
)
3816 // Allocate storage for the descriptor message
3818 unsigned int one
= 1;
3819 u_int32_t size
= offsetof(nstat_msg_src_description
, data
) + src
->provider
->nstat_descriptor_length
;
3820 if (mbuf_allocpacket(MBUF_DONTWAIT
, size
, &one
, &msg
) != 0)
3825 nstat_msg_src_description
*desc
= (nstat_msg_src_description
*)mbuf_data(msg
);
3827 mbuf_setlen(msg
, size
);
3828 mbuf_pkthdr_setlen(msg
, mbuf_len(msg
));
3830 // Query the provider for the provider specific bits
3831 errno_t result
= src
->provider
->nstat_copy_descriptor(src
->cookie
, desc
->data
, src
->provider
->nstat_descriptor_length
);
3839 desc
->hdr
.context
= context
;
3840 desc
->hdr
.type
= NSTAT_MSG_TYPE_SRC_DESC
;
3841 desc
->hdr
.length
= size
;
3842 desc
->hdr
.flags
= hdr_flags
;
3843 desc
->srcref
= src
->srcref
;
3844 desc
->event_flags
= 0;
3845 desc
->provider
= src
->provider
->nstat_provider_id
;
3847 result
= ctl_enqueuembuf(state
->ncs_kctl
, state
->ncs_unit
, msg
, CTL_DATA_EOR
);
3850 nstat_stats
.nstat_descriptionfailures
+= 1;
3858 nstat_control_append_description(
3859 nstat_control_state
*state
,
3862 size_t size
= offsetof(nstat_msg_src_description
, data
) + src
->provider
->nstat_descriptor_length
;
3863 if (size
> 512 || src
->provider
->nstat_descriptor_length
== 0 ||
3864 src
->provider
->nstat_copy_descriptor
== NULL
)
3869 // Fill out a buffer on the stack, we will copy to the mbuf later
3870 u_int64_t buffer
[size
/sizeof(u_int64_t
) + 1]; // u_int64_t to ensure alignment
3871 bzero(buffer
, size
);
3873 nstat_msg_src_description
*desc
= (nstat_msg_src_description
*)buffer
;
3874 desc
->hdr
.type
= NSTAT_MSG_TYPE_SRC_DESC
;
3875 desc
->hdr
.length
= size
;
3876 desc
->srcref
= src
->srcref
;
3877 desc
->event_flags
= 0;
3878 desc
->provider
= src
->provider
->nstat_provider_id
;
3881 // Fill in the description
3882 // Query the provider for the provider specific bits
3883 result
= src
->provider
->nstat_copy_descriptor(src
->cookie
, desc
->data
,
3884 src
->provider
->nstat_descriptor_length
);
3890 return nstat_accumulate_msg(state
, &desc
->hdr
, size
);
3894 nstat_control_send_update(
3895 nstat_control_state
*state
,
3898 u_int16_t hdr_flags
,
3901 // Provider doesn't support getting the descriptor or counts? Done.
3902 if ((src
->provider
->nstat_descriptor_length
== 0 ||
3903 src
->provider
->nstat_copy_descriptor
== NULL
) &&
3904 src
->provider
->nstat_counts
== NULL
)
3909 // Allocate storage for the descriptor message
3911 unsigned int one
= 1;
3912 u_int32_t size
= offsetof(nstat_msg_src_update
, data
) +
3913 src
->provider
->nstat_descriptor_length
;
3914 if (mbuf_allocpacket(MBUF_DONTWAIT
, size
, &one
, &msg
) != 0)
3919 nstat_msg_src_update
*desc
= (nstat_msg_src_update
*)mbuf_data(msg
);
3921 desc
->hdr
.context
= context
;
3922 desc
->hdr
.type
= NSTAT_MSG_TYPE_SRC_UPDATE
;
3923 desc
->hdr
.length
= size
;
3924 desc
->hdr
.flags
= hdr_flags
;
3925 desc
->srcref
= src
->srcref
;
3926 desc
->event_flags
= 0;
3927 desc
->provider
= src
->provider
->nstat_provider_id
;
3929 mbuf_setlen(msg
, size
);
3930 mbuf_pkthdr_setlen(msg
, mbuf_len(msg
));
3933 if (src
->provider
->nstat_descriptor_length
!= 0 && src
->provider
->nstat_copy_descriptor
)
3935 // Query the provider for the provider specific bits
3936 result
= src
->provider
->nstat_copy_descriptor(src
->cookie
, desc
->data
,
3937 src
->provider
->nstat_descriptor_length
);
3945 if (src
->provider
->nstat_counts
)
3947 result
= src
->provider
->nstat_counts(src
->cookie
, &desc
->counts
, gone
);
3950 if ((src
->filter
& NSTAT_FILTER_NOZEROBYTES
) == NSTAT_FILTER_NOZEROBYTES
&&
3951 desc
->counts
.nstat_rxbytes
== 0 && desc
->counts
.nstat_txbytes
== 0)
3957 result
= ctl_enqueuembuf(state
->ncs_kctl
, state
->ncs_unit
, msg
, CTL_DATA_EOR
);
3964 nstat_stats
.nstat_srcupatefailures
+= 1;
3972 nstat_control_append_update(
3973 nstat_control_state
*state
,
3977 size_t size
= offsetof(nstat_msg_src_update
, data
) + src
->provider
->nstat_descriptor_length
;
3978 if (size
> 512 || ((src
->provider
->nstat_descriptor_length
== 0 ||
3979 src
->provider
->nstat_copy_descriptor
== NULL
) &&
3980 src
->provider
->nstat_counts
== NULL
))
3985 // Fill out a buffer on the stack, we will copy to the mbuf later
3986 u_int64_t buffer
[size
/sizeof(u_int64_t
) + 1]; // u_int64_t to ensure alignment
3987 bzero(buffer
, size
);
3989 nstat_msg_src_update
*desc
= (nstat_msg_src_update
*)buffer
;
3990 desc
->hdr
.type
= NSTAT_MSG_TYPE_SRC_UPDATE
;
3991 desc
->hdr
.length
= size
;
3992 desc
->srcref
= src
->srcref
;
3993 desc
->event_flags
= 0;
3994 desc
->provider
= src
->provider
->nstat_provider_id
;
3997 // Fill in the description
3998 if (src
->provider
->nstat_descriptor_length
!= 0 && src
->provider
->nstat_copy_descriptor
)
4000 // Query the provider for the provider specific bits
4001 result
= src
->provider
->nstat_copy_descriptor(src
->cookie
, desc
->data
,
4002 src
->provider
->nstat_descriptor_length
);
4005 nstat_stats
.nstat_copy_descriptor_failures
++;
4006 if (nstat_debug
!= 0)
4007 printf("%s: src->provider->nstat_copy_descriptor: %d\n", __func__
, result
);
4012 if (src
->provider
->nstat_counts
)
4014 result
= src
->provider
->nstat_counts(src
->cookie
, &desc
->counts
, gone
);
4017 nstat_stats
.nstat_provider_counts_failures
++;
4018 if (nstat_debug
!= 0)
4019 printf("%s: src->provider->nstat_counts: %d\n", __func__
, result
);
4023 if ((src
->filter
& NSTAT_FILTER_NOZEROBYTES
) == NSTAT_FILTER_NOZEROBYTES
&&
4024 desc
->counts
.nstat_rxbytes
== 0 && desc
->counts
.nstat_txbytes
== 0)
4030 return nstat_accumulate_msg(state
, &desc
->hdr
, size
);
4034 nstat_control_send_removed(
4035 nstat_control_state
*state
,
4038 nstat_msg_src_removed removed
;
4041 bzero(&removed
, sizeof(removed
));
4042 removed
.hdr
.type
= NSTAT_MSG_TYPE_SRC_REMOVED
;
4043 removed
.hdr
.length
= sizeof(removed
);
4044 removed
.hdr
.context
= 0;
4045 removed
.srcref
= src
->srcref
;
4046 result
= ctl_enqueuedata(state
->ncs_kctl
, state
->ncs_unit
, &removed
,
4047 sizeof(removed
), CTL_DATA_EOR
| CTL_DATA_CRIT
);
4049 nstat_stats
.nstat_msgremovedfailures
+= 1;
4055 nstat_control_handle_add_request(
4056 nstat_control_state
*state
,
4061 // Verify the header fits in the first mbuf
4062 if (mbuf_len(m
) < offsetof(nstat_msg_add_src_req
, param
))
4067 // Calculate the length of the parameter field
4068 int32_t paramlength
= mbuf_pkthdr_len(m
) - offsetof(nstat_msg_add_src_req
, param
);
4069 if (paramlength
< 0 || paramlength
> 2 * 1024)
4074 nstat_provider
*provider
;
4075 nstat_provider_cookie_t cookie
;
4076 nstat_msg_add_src_req
*req
= mbuf_data(m
);
4077 if (mbuf_pkthdr_len(m
) > mbuf_len(m
))
4079 // parameter is too large, we need to make a contiguous copy
4080 void *data
= OSMalloc(paramlength
, nstat_malloc_tag
);
4082 if (!data
) return ENOMEM
;
4083 result
= mbuf_copydata(m
, offsetof(nstat_msg_add_src_req
, param
), paramlength
, data
);
4085 result
= nstat_lookup_entry(req
->provider
, data
, paramlength
, &provider
, &cookie
);
4086 OSFree(data
, paramlength
, nstat_malloc_tag
);
4090 result
= nstat_lookup_entry(req
->provider
, (void*)&req
->param
, paramlength
, &provider
, &cookie
);
4098 result
= nstat_control_source_add(req
->hdr
.context
, state
, provider
, cookie
);
4100 provider
->nstat_release(cookie
, 0);
4106 nstat_control_handle_add_all(
4107 nstat_control_state
*state
,
4112 // Verify the header fits in the first mbuf
4113 if (mbuf_len(m
) < sizeof(nstat_msg_add_all_srcs
))
4119 nstat_msg_add_all_srcs
*req
= mbuf_data(m
);
4120 if (req
->provider
> NSTAT_PROVIDER_LAST
) return ENOENT
;
4122 nstat_provider
*provider
= nstat_find_provider_by_id(req
->provider
);
4124 if (!provider
) return ENOENT
;
4125 if (provider
->nstat_watcher_add
== NULL
) return ENOTSUP
;
4127 if (nstat_privcheck
!= 0) {
4128 result
= priv_check_cred(kauth_cred_get(),
4129 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS
, 0);
4134 // Make sure we don't add the provider twice
4135 lck_mtx_lock(&state
->mtx
);
4136 if ((state
->ncs_watching
& (1 << provider
->nstat_provider_id
)) != 0)
4138 state
->ncs_watching
|= (1 << provider
->nstat_provider_id
);
4139 lck_mtx_unlock(&state
->mtx
);
4140 if (result
!= 0) return result
;
4142 state
->ncs_provider_filters
[req
->provider
].npf_flags
= req
->filter
;
4143 state
->ncs_provider_filters
[req
->provider
].npf_events
= req
->events
;
4144 state
->ncs_provider_filters
[req
->provider
].npf_pid
= req
->target_pid
;
4145 memcpy(state
->ncs_provider_filters
[req
->provider
].npf_uuid
, req
->target_uuid
,
4146 sizeof(state
->ncs_provider_filters
[req
->provider
].npf_uuid
));
4148 result
= provider
->nstat_watcher_add(state
);
4151 state
->ncs_provider_filters
[req
->provider
].npf_flags
= 0;
4152 state
->ncs_provider_filters
[req
->provider
].npf_events
= 0;
4153 state
->ncs_provider_filters
[req
->provider
].npf_pid
= 0;
4154 bzero(state
->ncs_provider_filters
[req
->provider
].npf_uuid
,
4155 sizeof(state
->ncs_provider_filters
[req
->provider
].npf_uuid
));
4157 lck_mtx_lock(&state
->mtx
);
4158 state
->ncs_watching
&= ~(1 << provider
->nstat_provider_id
);
4159 lck_mtx_unlock(&state
->mtx
);
4162 nstat_enqueue_success(req
->hdr
.context
, state
, 0);
4168 nstat_control_source_add(
4170 nstat_control_state
*state
,
4171 nstat_provider
*provider
,
4172 nstat_provider_cookie_t cookie
)
4174 // Fill out source added message if appropriate
4176 nstat_src_ref_t
*srcrefp
= NULL
;
4178 u_int64_t provider_filter_flagss
=
4179 state
->ncs_provider_filters
[provider
->nstat_provider_id
].npf_flags
;
4180 boolean_t tell_user
=
4181 ((provider_filter_flagss
& NSTAT_FILTER_SUPPRESS_SRC_ADDED
) == 0);
4182 u_int32_t src_filter
=
4183 (provider_filter_flagss
& NSTAT_FILTER_PROVIDER_NOZEROBYTES
)
4184 ? NSTAT_FILTER_NOZEROBYTES
: 0;
4188 unsigned int one
= 1;
4190 if (mbuf_allocpacket(MBUF_DONTWAIT
, sizeof(nstat_msg_src_added
),
4194 mbuf_setlen(msg
, sizeof(nstat_msg_src_added
));
4195 mbuf_pkthdr_setlen(msg
, mbuf_len(msg
));
4196 nstat_msg_src_added
*add
= mbuf_data(msg
);
4197 bzero(add
, sizeof(*add
));
4198 add
->hdr
.type
= NSTAT_MSG_TYPE_SRC_ADDED
;
4199 add
->hdr
.length
= mbuf_len(msg
);
4200 add
->hdr
.context
= context
;
4201 add
->provider
= provider
->nstat_provider_id
;
4202 srcrefp
= &add
->srcref
;
4205 // Allocate storage for the source
4206 nstat_src
*src
= OSMalloc(sizeof(*src
), nstat_malloc_tag
);
4209 if (msg
) mbuf_freem(msg
);
4213 // Fill in the source, including picking an unused source ref
4214 lck_mtx_lock(&state
->mtx
);
4216 src
->srcref
= nstat_control_next_src_ref(state
);
4218 *srcrefp
= src
->srcref
;
4220 if (state
->ncs_flags
& NSTAT_FLAG_CLEANUP
|| src
->srcref
== NSTAT_SRC_REF_INVALID
)
4222 lck_mtx_unlock(&state
->mtx
);
4223 OSFree(src
, sizeof(*src
), nstat_malloc_tag
);
4224 if (msg
) mbuf_freem(msg
);
4227 src
->provider
= provider
;
4228 src
->cookie
= cookie
;
4229 src
->filter
= src_filter
;
4234 // send the source added message if appropriate
4235 errno_t result
= ctl_enqueuembuf(state
->ncs_kctl
, state
->ncs_unit
, msg
,
4239 nstat_stats
.nstat_srcaddedfailures
+= 1;
4240 lck_mtx_unlock(&state
->mtx
);
4241 OSFree(src
, sizeof(*src
), nstat_malloc_tag
);
4246 // Put the source in the list
4247 src
->next
= state
->ncs_srcs
;
4248 state
->ncs_srcs
= src
;
4250 lck_mtx_unlock(&state
->mtx
);
4256 nstat_control_handle_remove_request(
4257 nstat_control_state
*state
,
4260 nstat_src_ref_t srcref
= NSTAT_SRC_REF_INVALID
;
4262 if (mbuf_copydata(m
, offsetof(nstat_msg_rem_src_req
, srcref
), sizeof(srcref
), &srcref
) != 0)
4267 lck_mtx_lock(&state
->mtx
);
4269 // Remove this source as we look for it
4271 nstat_src
*src
= NULL
;
4272 for (nextp
= &state
->ncs_srcs
; *nextp
; nextp
= &(*nextp
)->next
)
4274 if ((*nextp
)->srcref
== srcref
)
4282 lck_mtx_unlock(&state
->mtx
);
4284 if (src
) nstat_control_cleanup_source(state
, src
, FALSE
);
4286 return src
? 0 : ENOENT
;
4290 nstat_control_handle_query_request(
4291 nstat_control_state
*state
,
4294 // TBD: handle this from another thread so we can enqueue a lot of data
4295 // As written, if a client requests query all, this function will be
4296 // called from their send of the request message. We will attempt to write
4297 // responses and succeed until the buffer fills up. Since the clients thread
4298 // is blocked on send, it won't be reading unless the client has two threads
4299 // using this socket, one for read and one for write. Two threads probably
4300 // won't work with this code anyhow since we don't have proper locking in
4302 nstat_src
*dead_srcs
= NULL
;
4303 errno_t result
= ENOENT
;
4304 nstat_msg_query_src_req req
;
4306 if (mbuf_copydata(m
, 0, sizeof(req
), &req
) != 0)
4311 const boolean_t all_srcs
= (req
.srcref
== NSTAT_SRC_REF_ALL
);
4313 lck_mtx_lock(&state
->mtx
);
4317 state
->ncs_flags
|= NSTAT_FLAG_REQCOUNTS
;
4319 nstat_src
**srcpp
= &state
->ncs_srcs
;
4320 u_int64_t src_count
= 0;
4321 boolean_t partial
= FALSE
;
4324 * Error handling policy and sequence number generation is folded into
4325 * nstat_control_begin_query.
4327 partial
= nstat_control_begin_query(state
, &req
.hdr
);
4329 while (*srcpp
!= NULL
4330 && (!partial
|| src_count
< QUERY_CONTINUATION_SRC_COUNT
))
4332 nstat_src
*src
= NULL
;
4337 // XXX ignore IFACE types?
4338 if (all_srcs
|| src
->srcref
== req
.srcref
)
4340 if (nstat_control_reporting_allowed(state
, src
)
4341 && (!partial
|| !all_srcs
|| src
->seq
!= state
->ncs_seq
))
4344 (req
.hdr
.flags
& NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE
) != 0)
4346 result
= nstat_control_append_counts(state
, src
, &gone
);
4350 result
= nstat_control_send_counts(state
, src
, req
.hdr
.context
, 0, &gone
);
4353 if (ENOMEM
== result
|| ENOBUFS
== result
)
4356 * If the counts message failed to
4357 * enqueue then we should clear our flag so
4358 * that a client doesn't miss anything on
4359 * idle cleanup. We skip the "gone"
4360 * processing in the hope that we may
4361 * catch it another time.
4363 state
->ncs_flags
&= ~NSTAT_FLAG_REQCOUNTS
;
4369 * We skip over hard errors and
4372 src
->seq
= state
->ncs_seq
;
4380 // send one last descriptor message so client may see last state
4381 // If we can't send the notification now, it
4382 // will be sent in the idle cleanup.
4383 result
= nstat_control_send_description(state
, *srcpp
, 0, 0);
4386 nstat_stats
.nstat_control_send_description_failures
++;
4387 if (nstat_debug
!= 0)
4388 printf("%s - nstat_control_send_description() %d\n", __func__
, result
);
4389 state
->ncs_flags
&= ~NSTAT_FLAG_REQCOUNTS
;
4393 // pull src out of the list
4396 src
->next
= dead_srcs
;
4401 srcpp
= &(*srcpp
)->next
;
4404 if (!all_srcs
&& req
.srcref
== src
->srcref
)
4409 nstat_flush_accumulated_msgs(state
);
4411 u_int16_t flags
= 0;
4412 if (req
.srcref
== NSTAT_SRC_REF_ALL
)
4413 flags
= nstat_control_end_query(state
, *srcpp
, partial
);
4415 lck_mtx_unlock(&state
->mtx
);
4418 * If an error occurred enqueueing data, then allow the error to
4419 * propagate to nstat_control_send. This way, the error is sent to
4422 if (all_srcs
&& ENOMEM
!= result
&& ENOBUFS
!= result
)
4424 nstat_enqueue_success(req
.hdr
.context
, state
, flags
);
4433 dead_srcs
= src
->next
;
4435 // release src and send notification
4436 nstat_control_cleanup_source(state
, src
, FALSE
);
4443 nstat_control_handle_get_src_description(
4444 nstat_control_state
*state
,
4447 nstat_msg_get_src_description req
;
4448 errno_t result
= ENOENT
;
4451 if (mbuf_copydata(m
, 0, sizeof(req
), &req
) != 0)
4456 lck_mtx_lock(&state
->mtx
);
4457 u_int64_t src_count
= 0;
4458 boolean_t partial
= FALSE
;
4459 const boolean_t all_srcs
= (req
.srcref
== NSTAT_SRC_REF_ALL
);
4462 * Error handling policy and sequence number generation is folded into
4463 * nstat_control_begin_query.
4465 partial
= nstat_control_begin_query(state
, &req
.hdr
);
4467 for (src
= state
->ncs_srcs
;
4468 src
&& (!partial
|| src_count
< QUERY_CONTINUATION_SRC_COUNT
);
4471 if (all_srcs
|| src
->srcref
== req
.srcref
)
4473 if (nstat_control_reporting_allowed(state
, src
)
4474 && (!all_srcs
|| !partial
|| src
->seq
!= state
->ncs_seq
))
4476 if ((req
.hdr
.flags
& NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE
) != 0 && all_srcs
)
4478 result
= nstat_control_append_description(state
, src
);
4482 result
= nstat_control_send_description(state
, src
, req
.hdr
.context
, 0);
4485 if (ENOMEM
== result
|| ENOBUFS
== result
)
4488 * If the description message failed to
4489 * enqueue then we give up for now.
4496 * Note, we skip over hard errors and
4499 src
->seq
= state
->ncs_seq
;
4510 nstat_flush_accumulated_msgs(state
);
4512 u_int16_t flags
= 0;
4513 if (req
.srcref
== NSTAT_SRC_REF_ALL
)
4514 flags
= nstat_control_end_query(state
, src
, partial
);
4516 lck_mtx_unlock(&state
->mtx
);
4518 * If an error occurred enqueueing data, then allow the error to
4519 * propagate to nstat_control_send. This way, the error is sent to
4522 if (all_srcs
&& ENOMEM
!= result
&& ENOBUFS
!= result
)
4524 nstat_enqueue_success(req
.hdr
.context
, state
, flags
);
4532 nstat_control_handle_set_filter(
4533 nstat_control_state
*state
,
4536 nstat_msg_set_filter req
;
4539 if (mbuf_copydata(m
, 0, sizeof(req
), &req
) != 0)
4541 if (req
.srcref
== NSTAT_SRC_REF_ALL
||
4542 req
.srcref
== NSTAT_SRC_REF_INVALID
)
4545 lck_mtx_lock(&state
->mtx
);
4546 for (src
= state
->ncs_srcs
; src
; src
= src
->next
)
4547 if (req
.srcref
== src
->srcref
)
4549 src
->filter
= req
.filter
;
4552 lck_mtx_unlock(&state
->mtx
);
4561 nstat_control_state
*state
,
4566 struct nstat_msg_error err
;
4568 bzero(&err
, sizeof(err
));
4569 err
.hdr
.type
= NSTAT_MSG_TYPE_ERROR
;
4570 err
.hdr
.length
= sizeof(err
);
4571 err
.hdr
.context
= context
;
4574 result
= ctl_enqueuedata(state
->ncs_kctl
, state
->ncs_unit
, &err
,
4575 sizeof(err
), CTL_DATA_EOR
| CTL_DATA_CRIT
);
4577 nstat_stats
.nstat_msgerrorfailures
++;
4581 nstat_control_begin_query(
4582 nstat_control_state
*state
,
4583 const nstat_msg_hdr
*hdrp
)
4585 boolean_t partial
= FALSE
;
4587 if (hdrp
->flags
& NSTAT_MSG_HDR_FLAG_CONTINUATION
)
4589 /* A partial query all has been requested. */
4592 if (state
->ncs_context
!= hdrp
->context
)
4594 if (state
->ncs_context
!= 0)
4595 nstat_send_error(state
, state
->ncs_context
, EAGAIN
);
4597 /* Initialize state for a partial query all. */
4598 state
->ncs_context
= hdrp
->context
;
4607 nstat_control_end_query(
4608 nstat_control_state
*state
,
4609 nstat_src
*last_src
,
4612 u_int16_t flags
= 0;
4614 if (last_src
== NULL
|| !partial
)
4617 * We iterated through the entire srcs list or exited early
4618 * from the loop when a partial update was not requested (an
4619 * error occurred), so clear context to indicate internally
4620 * that the query is finished.
4622 state
->ncs_context
= 0;
4627 * Indicate to userlevel to make another partial request as
4628 * there are still sources left to be reported.
4630 flags
|= NSTAT_MSG_HDR_FLAG_CONTINUATION
;
4637 nstat_control_handle_get_update(
4638 nstat_control_state
*state
,
4641 nstat_msg_query_src_req req
;
4643 if (mbuf_copydata(m
, 0, sizeof(req
), &req
) != 0)
4648 lck_mtx_lock(&state
->mtx
);
4650 state
->ncs_flags
|= NSTAT_FLAG_SUPPORTS_UPDATES
;
4652 errno_t result
= ENOENT
;
4654 nstat_src
*dead_srcs
= NULL
;
4655 nstat_src
**srcpp
= &state
->ncs_srcs
;
4656 u_int64_t src_count
= 0;
4657 boolean_t partial
= FALSE
;
4660 * Error handling policy and sequence number generation is folded into
4661 * nstat_control_begin_query.
4663 partial
= nstat_control_begin_query(state
, &req
.hdr
);
4665 while (*srcpp
!= NULL
4666 && (FALSE
== partial
4667 || src_count
< QUERY_CONTINUATION_SRC_COUNT
))
4673 if (nstat_control_reporting_allowed(state
, src
))
4675 /* skip this source if it has the current state
4676 * sequence number as it's already been reported in
4677 * this query-all partial sequence. */
4678 if (req
.srcref
== NSTAT_SRC_REF_ALL
4679 && (FALSE
== partial
|| src
->seq
!= state
->ncs_seq
))
4681 result
= nstat_control_append_update(state
, src
, &gone
);
4682 if (ENOMEM
== result
|| ENOBUFS
== result
)
4685 * If the update message failed to
4686 * enqueue then give up.
4693 * We skip over hard errors and
4696 src
->seq
= state
->ncs_seq
;
4700 else if (src
->srcref
== req
.srcref
)
4702 result
= nstat_control_send_update(state
, src
, req
.hdr
.context
, 0, &gone
);
4708 // pull src out of the list
4711 src
->next
= dead_srcs
;
4716 srcpp
= &(*srcpp
)->next
;
4719 if (req
.srcref
!= NSTAT_SRC_REF_ALL
&& req
.srcref
== src
->srcref
)
4725 nstat_flush_accumulated_msgs(state
);
4728 u_int16_t flags
= 0;
4729 if (req
.srcref
== NSTAT_SRC_REF_ALL
)
4730 flags
= nstat_control_end_query(state
, *srcpp
, partial
);
4732 lck_mtx_unlock(&state
->mtx
);
4734 * If an error occurred enqueueing data, then allow the error to
4735 * propagate to nstat_control_send. This way, the error is sent to
4738 if (req
.srcref
== NSTAT_SRC_REF_ALL
&& ENOMEM
!= result
&& ENOBUFS
!= result
)
4740 nstat_enqueue_success(req
.hdr
.context
, state
, flags
);
4747 dead_srcs
= src
->next
;
4749 // release src and send notification
4750 nstat_control_cleanup_source(state
, src
, FALSE
);
4757 nstat_control_handle_subscribe_sysinfo(
4758 nstat_control_state
*state
)
4760 errno_t result
= priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS
, 0);
4767 lck_mtx_lock(&state
->mtx
);
4768 state
->ncs_flags
|= NSTAT_FLAG_SYSINFO_SUBSCRIBED
;
4769 lck_mtx_unlock(&state
->mtx
);
4782 nstat_control_state
*state
= (nstat_control_state
*)uinfo
;
4783 struct nstat_msg_hdr
*hdr
;
4784 struct nstat_msg_hdr storage
;
4787 if (mbuf_pkthdr_len(m
) < sizeof(*hdr
))
4789 // Is this the right thing to do?
4794 if (mbuf_len(m
) >= sizeof(*hdr
))
4800 mbuf_copydata(m
, 0, sizeof(storage
), &storage
);
4804 // Legacy clients may not set the length
4805 // Those clients are likely not setting the flags either
4806 // Fix everything up so old clients continue to work
4807 if (hdr
->length
!= mbuf_pkthdr_len(m
))
4810 hdr
->length
= mbuf_pkthdr_len(m
);
4811 if (hdr
== &storage
)
4813 mbuf_copyback(m
, 0, sizeof(*hdr
), hdr
, MBUF_DONTWAIT
);
4819 case NSTAT_MSG_TYPE_ADD_SRC
:
4820 result
= nstat_control_handle_add_request(state
, m
);
4823 case NSTAT_MSG_TYPE_ADD_ALL_SRCS
:
4824 result
= nstat_control_handle_add_all(state
, m
);
4827 case NSTAT_MSG_TYPE_REM_SRC
:
4828 result
= nstat_control_handle_remove_request(state
, m
);
4831 case NSTAT_MSG_TYPE_QUERY_SRC
:
4832 result
= nstat_control_handle_query_request(state
, m
);
4835 case NSTAT_MSG_TYPE_GET_SRC_DESC
:
4836 result
= nstat_control_handle_get_src_description(state
, m
);
4839 case NSTAT_MSG_TYPE_SET_FILTER
:
4840 result
= nstat_control_handle_set_filter(state
, m
);
4843 case NSTAT_MSG_TYPE_GET_UPDATE
:
4844 result
= nstat_control_handle_get_update(state
, m
);
4847 case NSTAT_MSG_TYPE_SUBSCRIBE_SYSINFO
:
4848 result
= nstat_control_handle_subscribe_sysinfo(state
);
4858 struct nstat_msg_error err
;
4860 bzero(&err
, sizeof(err
));
4861 err
.hdr
.type
= NSTAT_MSG_TYPE_ERROR
;
4862 err
.hdr
.length
= sizeof(err
) + mbuf_pkthdr_len(m
);
4863 err
.hdr
.context
= hdr
->context
;
4866 if (mbuf_prepend(&m
, sizeof(err
), MBUF_DONTWAIT
) == 0 &&
4867 mbuf_copyback(m
, 0, sizeof(err
), &err
, MBUF_DONTWAIT
) == 0)
4869 result
= ctl_enqueuembuf(kctl
, unit
, m
, CTL_DATA_EOR
| CTL_DATA_CRIT
);
4879 // Unable to prepend the error to the request - just send the error
4880 err
.hdr
.length
= sizeof(err
);
4881 result
= ctl_enqueuedata(kctl
, unit
, &err
, sizeof(err
),
4882 CTL_DATA_EOR
| CTL_DATA_CRIT
);
4884 nstat_stats
.nstat_msgerrorfailures
+= 1;
4886 nstat_stats
.nstat_handle_msg_failures
+= 1;
4889 if (m
) mbuf_freem(m
);