2 * Copyright (c) 2010-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/param.h>
30 #include <sys/types.h>
31 #include <sys/kpi_mbuf.h>
32 #include <sys/socket.h>
33 #include <sys/kern_control.h>
34 #include <sys/mcache.h>
35 #include <sys/socketvar.h>
36 #include <sys/sysctl.h>
37 #include <sys/queue.h>
39 #include <sys/protosw.h>
41 #include <kern/clock.h>
42 #include <kern/debug.h>
44 #include <libkern/libkern.h>
45 #include <libkern/OSMalloc.h>
46 #include <libkern/OSAtomic.h>
47 #include <libkern/locks.h>
50 #include <net/if_var.h>
51 #include <net/if_types.h>
52 #include <net/route.h>
53 #include <net/ntstat.h>
55 #include <netinet/ip_var.h>
56 #include <netinet/in_pcb.h>
57 #include <netinet/in_var.h>
58 #include <netinet/tcp.h>
59 #include <netinet/tcp_var.h>
60 #include <netinet/tcp_fsm.h>
61 #include <netinet/tcp_cc.h>
62 #include <netinet/udp.h>
63 #include <netinet/udp_var.h>
64 #include <netinet6/in6_pcb.h>
65 #include <netinet6/in6_var.h>
67 __private_extern__
int nstat_collect
= 1;
68 SYSCTL_INT(_net
, OID_AUTO
, statistics
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
69 &nstat_collect
, 0, "Collect detailed statistics");
71 static int nstat_privcheck
= 0;
72 SYSCTL_INT(_net
, OID_AUTO
, statistics_privcheck
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
73 &nstat_privcheck
, 0, "Entitlement check");
75 SYSCTL_NODE(_net
, OID_AUTO
, stats
,
76 CTLFLAG_RW
|CTLFLAG_LOCKED
, 0, "network statistics");
78 static int nstat_debug
= 0;
79 SYSCTL_INT(_net_stats
, OID_AUTO
, debug
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
82 static int nstat_sendspace
= 2048;
83 SYSCTL_INT(_net_stats
, OID_AUTO
, sendspace
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
84 &nstat_sendspace
, 0, "");
86 static int nstat_recvspace
= 8192;
87 SYSCTL_INT(_net_stats
, OID_AUTO
, recvspace
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
88 &nstat_recvspace
, 0, "");
90 static struct nstat_stats nstat_stats
;
91 SYSCTL_STRUCT(_net_stats
, OID_AUTO
, stats
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
92 &nstat_stats
, nstat_stats
, "");
96 NSTAT_FLAG_CLEANUP
= (1 << 0),
97 NSTAT_FLAG_REQCOUNTS
= (1 << 1),
98 NSTAT_FLAG_SUPPORTS_UPDATES
= (1 << 2),
99 NSTAT_FLAG_SYSINFO_SUBSCRIBED
= (1 << 3),
102 #define QUERY_CONTINUATION_SRC_COUNT 100
104 typedef struct nstat_provider_filter
107 u_int64_t npf_events
;
110 } nstat_provider_filter
;
113 typedef struct nstat_control_state
115 struct nstat_control_state
*ncs_next
;
116 u_int32_t ncs_watching
;
117 decl_lck_mtx_data(, mtx
);
118 kern_ctl_ref ncs_kctl
;
120 nstat_src_ref_t ncs_next_srcref
;
121 struct nstat_src
*ncs_srcs
;
122 mbuf_t ncs_accumulated
;
124 nstat_provider_filter ncs_provider_filters
[NSTAT_PROVIDER_COUNT
];
125 /* state maintained for partial query requests */
126 u_int64_t ncs_context
;
128 } nstat_control_state
;
130 typedef struct nstat_provider
132 struct nstat_provider
*next
;
133 nstat_provider_id_t nstat_provider_id
;
134 size_t nstat_descriptor_length
;
135 errno_t (*nstat_lookup
)(const void *data
, u_int32_t length
, nstat_provider_cookie_t
*out_cookie
);
136 int (*nstat_gone
)(nstat_provider_cookie_t cookie
);
137 errno_t (*nstat_counts
)(nstat_provider_cookie_t cookie
, struct nstat_counts
*out_counts
, int *out_gone
);
138 errno_t (*nstat_watcher_add
)(nstat_control_state
*state
);
139 void (*nstat_watcher_remove
)(nstat_control_state
*state
);
140 errno_t (*nstat_copy_descriptor
)(nstat_provider_cookie_t cookie
, void *data
, u_int32_t len
);
141 void (*nstat_release
)(nstat_provider_cookie_t cookie
, boolean_t locked
);
142 bool (*nstat_reporting_allowed
)(nstat_provider_cookie_t cookie
, nstat_provider_filter
*filter
);
145 typedef STAILQ_HEAD(, nstat_src
) stailq_head_nstat_src
;
146 typedef STAILQ_ENTRY(nstat_src
) stailq_entry_nstat_src
;
148 typedef TAILQ_HEAD(, nstat_tu_shadow
) tailq_head_tu_shadow
;
149 typedef TAILQ_ENTRY(nstat_tu_shadow
) tailq_entry_tu_shadow
;
151 typedef struct nstat_src
153 struct nstat_src
*next
;
154 nstat_src_ref_t srcref
;
155 nstat_provider
*provider
;
156 nstat_provider_cookie_t cookie
;
161 static errno_t
nstat_control_send_counts(nstat_control_state
*,
162 nstat_src
*, unsigned long long, u_int16_t
, int *);
163 static int nstat_control_send_description(nstat_control_state
*state
, nstat_src
*src
, u_int64_t context
, u_int16_t hdr_flags
);
164 static int nstat_control_send_update(nstat_control_state
*state
, nstat_src
*src
, u_int64_t context
, u_int16_t hdr_flags
, int *gone
);
165 static errno_t
nstat_control_send_removed(nstat_control_state
*, nstat_src
*);
166 static errno_t
nstat_control_send_goodbye(nstat_control_state
*state
, nstat_src
*src
);
167 static void nstat_control_cleanup_source(nstat_control_state
*state
, nstat_src
*src
, boolean_t
);
168 static bool nstat_control_reporting_allowed(nstat_control_state
*state
, nstat_src
*src
);
169 static boolean_t
nstat_control_begin_query(nstat_control_state
*state
, const nstat_msg_hdr
*hdrp
);
170 static u_int16_t
nstat_control_end_query(nstat_control_state
*state
, nstat_src
*last_src
, boolean_t partial
);
171 static void nstat_ifnet_report_ecn_stats(void);
173 static u_int32_t nstat_udp_watchers
= 0;
174 static u_int32_t nstat_userland_udp_watchers
= 0;
175 static u_int32_t nstat_tcp_watchers
= 0;
176 static u_int32_t nstat_userland_tcp_watchers
= 0;
178 static void nstat_control_register(void);
181 * The lock order is as follows:
183 * socket_lock (inpcb)
187 static volatile OSMallocTag nstat_malloc_tag
= NULL
;
188 static nstat_control_state
*nstat_controls
= NULL
;
189 static uint64_t nstat_idle_time
= 0;
190 static decl_lck_mtx_data(, nstat_mtx
);
192 /* some extern definitions */
193 extern void mbuf_report_peak_usage(void);
194 extern void tcp_report_stats(void);
198 const struct sockaddr
*src
,
199 struct sockaddr
*dst
,
202 if (src
->sa_len
> maxlen
) return;
204 bcopy(src
, dst
, src
->sa_len
);
205 if (src
->sa_family
== AF_INET6
&&
206 src
->sa_len
>= sizeof(struct sockaddr_in6
))
208 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)(void *)dst
;
209 if (IN6_IS_SCOPE_EMBED(&sin6
->sin6_addr
))
211 if (sin6
->sin6_scope_id
== 0)
212 sin6
->sin6_scope_id
= ntohs(sin6
->sin6_addr
.s6_addr16
[1]);
213 sin6
->sin6_addr
.s6_addr16
[1] = 0;
219 nstat_ip_to_sockaddr(
220 const struct in_addr
*ip
,
222 struct sockaddr_in
*sin
,
225 if (maxlen
< sizeof(struct sockaddr_in
))
228 sin
->sin_family
= AF_INET
;
229 sin
->sin_len
= sizeof(*sin
);
230 sin
->sin_port
= port
;
235 nstat_ip6_to_sockaddr(
236 const struct in6_addr
*ip6
,
238 struct sockaddr_in6
*sin6
,
241 if (maxlen
< sizeof(struct sockaddr_in6
))
244 sin6
->sin6_family
= AF_INET6
;
245 sin6
->sin6_len
= sizeof(*sin6
);
246 sin6
->sin6_port
= port
;
247 sin6
->sin6_addr
= *ip6
;
248 if (IN6_IS_SCOPE_EMBED(&sin6
->sin6_addr
))
250 sin6
->sin6_scope_id
= ntohs(sin6
->sin6_addr
.s6_addr16
[1]);
251 sin6
->sin6_addr
.s6_addr16
[1] = 0;
256 nstat_ifnet_to_flags(
260 u_int32_t functional_type
= if_functional_type(ifp
, FALSE
);
262 /* Panic if someone adds a functional type without updating ntstat. */
263 VERIFY(0 <= functional_type
&& functional_type
<= IFRTYPE_FUNCTIONAL_LAST
);
265 switch (functional_type
)
267 case IFRTYPE_FUNCTIONAL_UNKNOWN
:
268 flags
|= NSTAT_IFNET_IS_UNKNOWN_TYPE
;
270 case IFRTYPE_FUNCTIONAL_LOOPBACK
:
271 flags
|= NSTAT_IFNET_IS_LOOPBACK
;
273 case IFRTYPE_FUNCTIONAL_WIRED
:
274 flags
|= NSTAT_IFNET_IS_WIRED
;
276 case IFRTYPE_FUNCTIONAL_WIFI_INFRA
:
277 flags
|= NSTAT_IFNET_IS_WIFI
;
279 case IFRTYPE_FUNCTIONAL_WIFI_AWDL
:
280 flags
|= NSTAT_IFNET_IS_WIFI
;
281 flags
|= NSTAT_IFNET_IS_AWDL
;
283 case IFRTYPE_FUNCTIONAL_CELLULAR
:
284 flags
|= NSTAT_IFNET_IS_CELLULAR
;
288 if (IFNET_IS_EXPENSIVE(ifp
))
290 flags
|= NSTAT_IFNET_IS_EXPENSIVE
;
297 nstat_inpcb_to_flags(
298 const struct inpcb
*inp
)
302 if ((inp
!= NULL
) && (inp
->inp_last_outifp
!= NULL
))
304 struct ifnet
*ifp
= inp
->inp_last_outifp
;
305 flags
= nstat_ifnet_to_flags(ifp
);
307 if (flags
& NSTAT_IFNET_IS_CELLULAR
)
309 if (inp
->inp_socket
!= NULL
&&
310 (inp
->inp_socket
->so_flags1
& SOF1_CELLFALLBACK
))
311 flags
|= NSTAT_IFNET_VIA_CELLFALLBACK
;
316 flags
= NSTAT_IFNET_IS_UNKNOWN_TYPE
;
322 #pragma mark -- Network Statistic Providers --
324 static errno_t
nstat_control_source_add(u_int64_t context
, nstat_control_state
*state
, nstat_provider
*provider
, nstat_provider_cookie_t cookie
);
325 struct nstat_provider
*nstat_providers
= NULL
;
327 static struct nstat_provider
*
328 nstat_find_provider_by_id(
329 nstat_provider_id_t id
)
331 struct nstat_provider
*provider
;
333 for (provider
= nstat_providers
; provider
!= NULL
; provider
= provider
->next
)
335 if (provider
->nstat_provider_id
== id
)
344 nstat_provider_id_t id
,
347 nstat_provider
**out_provider
,
348 nstat_provider_cookie_t
*out_cookie
)
350 *out_provider
= nstat_find_provider_by_id(id
);
351 if (*out_provider
== NULL
)
356 return (*out_provider
)->nstat_lookup(data
, length
, out_cookie
);
359 static void nstat_init_route_provider(void);
360 static void nstat_init_tcp_provider(void);
361 static void nstat_init_userland_tcp_provider(void);
362 static void nstat_init_udp_provider(void);
363 static void nstat_init_userland_udp_provider(void);
364 static void nstat_init_ifnet_provider(void);
366 __private_extern__
void
369 if (nstat_malloc_tag
!= NULL
) return;
371 OSMallocTag tag
= OSMalloc_Tagalloc(NET_STAT_CONTROL_NAME
, OSMT_DEFAULT
);
372 if (!OSCompareAndSwapPtr(NULL
, tag
, &nstat_malloc_tag
))
374 OSMalloc_Tagfree(tag
);
375 tag
= nstat_malloc_tag
;
379 // we need to initialize other things, we do it here as this code path will only be hit once;
380 nstat_init_route_provider();
381 nstat_init_tcp_provider();
382 nstat_init_userland_tcp_provider();
383 nstat_init_udp_provider();
384 nstat_init_userland_udp_provider();
385 nstat_init_ifnet_provider();
386 nstat_control_register();
390 #pragma mark -- Aligned Buffer Allocation --
399 nstat_malloc_aligned(
404 struct align_header
*hdr
= NULL
;
405 u_int32_t size
= length
+ sizeof(*hdr
) + alignment
- 1;
407 u_int8_t
*buffer
= OSMalloc(size
, tag
);
408 if (buffer
== NULL
) return NULL
;
410 u_int8_t
*aligned
= buffer
+ sizeof(*hdr
);
411 aligned
= (u_int8_t
*)P2ROUNDUP(aligned
, alignment
);
413 hdr
= (struct align_header
*)(void *)(aligned
- sizeof(*hdr
));
414 hdr
->offset
= aligned
- buffer
;
425 struct align_header
*hdr
= (struct align_header
*)(void *)((u_int8_t
*)buffer
- sizeof(*hdr
));
426 OSFree(((char*)buffer
) - hdr
->offset
, hdr
->length
, tag
);
429 #pragma mark -- Route Provider --
431 static nstat_provider nstat_route_provider
;
437 nstat_provider_cookie_t
*out_cookie
)
439 // rt_lookup doesn't take const params but it doesn't modify the parameters for
440 // the lookup. So...we use a union to eliminate the warning.
444 const struct sockaddr
*const_sa
;
447 const nstat_route_add_param
*param
= (const nstat_route_add_param
*)data
;
450 if (length
< sizeof(*param
))
455 if (param
->dst
.v4
.sin_family
== 0 ||
456 param
->dst
.v4
.sin_family
> AF_MAX
||
457 (param
->mask
.v4
.sin_family
!= 0 && param
->mask
.v4
.sin_family
!= param
->dst
.v4
.sin_family
))
462 if (param
->dst
.v4
.sin_len
> sizeof(param
->dst
) ||
463 (param
->mask
.v4
.sin_family
&& param
->mask
.v4
.sin_len
> sizeof(param
->mask
.v4
.sin_len
)))
467 if ((param
->dst
.v4
.sin_family
== AF_INET
&&
468 param
->dst
.v4
.sin_len
< sizeof(struct sockaddr_in
)) ||
469 (param
->dst
.v6
.sin6_family
== AF_INET6
&&
470 param
->dst
.v6
.sin6_len
< sizeof(struct sockaddr_in6
)))
475 dst
.const_sa
= (const struct sockaddr
*)¶m
->dst
;
476 mask
.const_sa
= param
->mask
.v4
.sin_family
? (const struct sockaddr
*)¶m
->mask
: NULL
;
478 struct radix_node_head
*rnh
= rt_tables
[dst
.sa
->sa_family
];
479 if (rnh
== NULL
) return EAFNOSUPPORT
;
481 lck_mtx_lock(rnh_lock
);
482 struct rtentry
*rt
= rt_lookup(TRUE
, dst
.sa
, mask
.sa
, rnh
, param
->ifindex
);
483 lck_mtx_unlock(rnh_lock
);
485 if (rt
) *out_cookie
= (nstat_provider_cookie_t
)rt
;
487 return rt
? 0 : ENOENT
;
492 nstat_provider_cookie_t cookie
)
494 struct rtentry
*rt
= (struct rtentry
*)cookie
;
495 return ((rt
->rt_flags
& RTF_UP
) == 0) ? 1 : 0;
500 nstat_provider_cookie_t cookie
,
501 struct nstat_counts
*out_counts
,
504 struct rtentry
*rt
= (struct rtentry
*)cookie
;
505 struct nstat_counts
*rt_stats
= rt
->rt_stats
;
507 if (out_gone
) *out_gone
= 0;
509 if (out_gone
&& (rt
->rt_flags
& RTF_UP
) == 0) *out_gone
= 1;
513 atomic_get_64(out_counts
->nstat_rxpackets
, &rt_stats
->nstat_rxpackets
);
514 atomic_get_64(out_counts
->nstat_rxbytes
, &rt_stats
->nstat_rxbytes
);
515 atomic_get_64(out_counts
->nstat_txpackets
, &rt_stats
->nstat_txpackets
);
516 atomic_get_64(out_counts
->nstat_txbytes
, &rt_stats
->nstat_txbytes
);
517 out_counts
->nstat_rxduplicatebytes
= rt_stats
->nstat_rxduplicatebytes
;
518 out_counts
->nstat_rxoutoforderbytes
= rt_stats
->nstat_rxoutoforderbytes
;
519 out_counts
->nstat_txretransmit
= rt_stats
->nstat_txretransmit
;
520 out_counts
->nstat_connectattempts
= rt_stats
->nstat_connectattempts
;
521 out_counts
->nstat_connectsuccesses
= rt_stats
->nstat_connectsuccesses
;
522 out_counts
->nstat_min_rtt
= rt_stats
->nstat_min_rtt
;
523 out_counts
->nstat_avg_rtt
= rt_stats
->nstat_avg_rtt
;
524 out_counts
->nstat_var_rtt
= rt_stats
->nstat_var_rtt
;
525 out_counts
->nstat_cell_rxbytes
= out_counts
->nstat_cell_txbytes
= 0;
529 bzero(out_counts
, sizeof(*out_counts
));
537 nstat_provider_cookie_t cookie
,
540 rtfree((struct rtentry
*)cookie
);
543 static u_int32_t nstat_route_watchers
= 0;
546 nstat_route_walktree_add(
547 struct radix_node
*rn
,
551 struct rtentry
*rt
= (struct rtentry
*)rn
;
552 nstat_control_state
*state
= (nstat_control_state
*)context
;
554 lck_mtx_assert(rnh_lock
, LCK_MTX_ASSERT_OWNED
);
556 /* RTF_UP can't change while rnh_lock is held */
557 if ((rt
->rt_flags
& RTF_UP
) != 0)
559 /* Clear RTPRF_OURS if the route is still usable */
561 if (rt_validate(rt
)) {
562 RT_ADDREF_LOCKED(rt
);
569 /* Otherwise if RTF_CONDEMNED, treat it as if it were down */
573 result
= nstat_control_source_add(0, state
, &nstat_route_provider
, rt
);
582 nstat_route_add_watcher(
583 nstat_control_state
*state
)
587 OSIncrementAtomic(&nstat_route_watchers
);
589 lck_mtx_lock(rnh_lock
);
590 for (i
= 1; i
< AF_MAX
; i
++)
592 struct radix_node_head
*rnh
;
596 result
= rnh
->rnh_walktree(rnh
, nstat_route_walktree_add
, state
);
602 lck_mtx_unlock(rnh_lock
);
607 __private_extern__
void
608 nstat_route_new_entry(
611 if (nstat_route_watchers
== 0)
614 lck_mtx_lock(&nstat_mtx
);
615 if ((rt
->rt_flags
& RTF_UP
) != 0)
617 nstat_control_state
*state
;
618 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
620 if ((state
->ncs_watching
& (1 << NSTAT_PROVIDER_ROUTE
)) != 0)
622 // this client is watching routes
623 // acquire a reference for the route
626 // add the source, if that fails, release the reference
627 if (nstat_control_source_add(0, state
, &nstat_route_provider
, rt
) != 0)
632 lck_mtx_unlock(&nstat_mtx
);
636 nstat_route_remove_watcher(
637 __unused nstat_control_state
*state
)
639 OSDecrementAtomic(&nstat_route_watchers
);
643 nstat_route_copy_descriptor(
644 nstat_provider_cookie_t cookie
,
648 nstat_route_descriptor
*desc
= (nstat_route_descriptor
*)data
;
649 if (len
< sizeof(*desc
))
653 bzero(desc
, sizeof(*desc
));
655 struct rtentry
*rt
= (struct rtentry
*)cookie
;
656 desc
->id
= (uint64_t)VM_KERNEL_ADDRPERM(rt
);
657 desc
->parent_id
= (uint64_t)VM_KERNEL_ADDRPERM(rt
->rt_parent
);
658 desc
->gateway_id
= (uint64_t)VM_KERNEL_ADDRPERM(rt
->rt_gwroute
);
663 if ((sa
= rt_key(rt
)))
664 nstat_copy_sa_out(sa
, &desc
->dst
.sa
, sizeof(desc
->dst
));
667 if ((sa
= rt_mask(rt
)) && sa
->sa_len
<= sizeof(desc
->mask
))
668 memcpy(&desc
->mask
, sa
, sa
->sa_len
);
671 if ((sa
= rt
->rt_gateway
))
672 nstat_copy_sa_out(sa
, &desc
->gateway
.sa
, sizeof(desc
->gateway
));
675 desc
->ifindex
= rt
->rt_ifp
->if_index
;
677 desc
->flags
= rt
->rt_flags
;
683 nstat_route_reporting_allowed(nstat_provider_cookie_t cookie
, nstat_provider_filter
*filter
)
687 if ((filter
->npf_flags
& NSTAT_FILTER_IFNET_FLAGS
) != 0)
689 struct rtentry
*rt
= (struct rtentry
*)cookie
;
690 struct ifnet
*ifp
= rt
->rt_ifp
;
694 uint16_t interface_properties
= nstat_ifnet_to_flags(ifp
);
696 if ((filter
->npf_flags
& interface_properties
) == 0)
706 nstat_init_route_provider(void)
708 bzero(&nstat_route_provider
, sizeof(nstat_route_provider
));
709 nstat_route_provider
.nstat_descriptor_length
= sizeof(nstat_route_descriptor
);
710 nstat_route_provider
.nstat_provider_id
= NSTAT_PROVIDER_ROUTE
;
711 nstat_route_provider
.nstat_lookup
= nstat_route_lookup
;
712 nstat_route_provider
.nstat_gone
= nstat_route_gone
;
713 nstat_route_provider
.nstat_counts
= nstat_route_counts
;
714 nstat_route_provider
.nstat_release
= nstat_route_release
;
715 nstat_route_provider
.nstat_watcher_add
= nstat_route_add_watcher
;
716 nstat_route_provider
.nstat_watcher_remove
= nstat_route_remove_watcher
;
717 nstat_route_provider
.nstat_copy_descriptor
= nstat_route_copy_descriptor
;
718 nstat_route_provider
.nstat_reporting_allowed
= nstat_route_reporting_allowed
;
719 nstat_route_provider
.next
= nstat_providers
;
720 nstat_providers
= &nstat_route_provider
;
723 #pragma mark -- Route Collection --
725 static struct nstat_counts
*
729 struct nstat_counts
*result
= rte
->rt_stats
;
730 if (result
) return result
;
732 if (nstat_malloc_tag
== NULL
) nstat_init();
734 result
= nstat_malloc_aligned(sizeof(*result
), sizeof(u_int64_t
), nstat_malloc_tag
);
735 if (!result
) return result
;
737 bzero(result
, sizeof(*result
));
739 if (!OSCompareAndSwapPtr(NULL
, result
, &rte
->rt_stats
))
741 nstat_free_aligned(result
, nstat_malloc_tag
);
742 result
= rte
->rt_stats
;
748 __private_extern__
void
754 nstat_free_aligned(rte
->rt_stats
, nstat_malloc_tag
);
755 rte
->rt_stats
= NULL
;
759 __private_extern__
void
760 nstat_route_connect_attempt(
765 struct nstat_counts
* stats
= nstat_route_attach(rte
);
768 OSIncrementAtomic(&stats
->nstat_connectattempts
);
771 rte
= rte
->rt_parent
;
775 __private_extern__
void
776 nstat_route_connect_success(
782 struct nstat_counts
* stats
= nstat_route_attach(rte
);
785 OSIncrementAtomic(&stats
->nstat_connectsuccesses
);
788 rte
= rte
->rt_parent
;
792 __private_extern__
void
801 struct nstat_counts
* stats
= nstat_route_attach(rte
);
804 if ((flags
& NSTAT_TX_FLAG_RETRANSMIT
) != 0)
806 OSAddAtomic(bytes
, &stats
->nstat_txretransmit
);
810 OSAddAtomic64((SInt64
)packets
, (SInt64
*)&stats
->nstat_txpackets
);
811 OSAddAtomic64((SInt64
)bytes
, (SInt64
*)&stats
->nstat_txbytes
);
815 rte
= rte
->rt_parent
;
819 __private_extern__
void
828 struct nstat_counts
* stats
= nstat_route_attach(rte
);
833 OSAddAtomic64((SInt64
)packets
, (SInt64
*)&stats
->nstat_rxpackets
);
834 OSAddAtomic64((SInt64
)bytes
, (SInt64
*)&stats
->nstat_rxbytes
);
838 if (flags
& NSTAT_RX_FLAG_OUT_OF_ORDER
)
839 OSAddAtomic(bytes
, &stats
->nstat_rxoutoforderbytes
);
840 if (flags
& NSTAT_RX_FLAG_DUPLICATE
)
841 OSAddAtomic(bytes
, &stats
->nstat_rxduplicatebytes
);
845 rte
= rte
->rt_parent
;
849 __private_extern__
void
855 const int32_t factor
= 8;
859 struct nstat_counts
* stats
= nstat_route_attach(rte
);
868 oldrtt
= stats
->nstat_avg_rtt
;
875 newrtt
= oldrtt
- (oldrtt
- (int32_t)rtt
) / factor
;
877 if (oldrtt
== newrtt
) break;
878 } while (!OSCompareAndSwap(oldrtt
, newrtt
, &stats
->nstat_avg_rtt
));
883 oldrtt
= stats
->nstat_min_rtt
;
884 if (oldrtt
!= 0 && oldrtt
< (int32_t)rtt
)
888 } while (!OSCompareAndSwap(oldrtt
, rtt
, &stats
->nstat_min_rtt
));
893 oldrtt
= stats
->nstat_var_rtt
;
900 newrtt
= oldrtt
- (oldrtt
- (int32_t)rtt_var
) / factor
;
902 if (oldrtt
== newrtt
) break;
903 } while (!OSCompareAndSwap(oldrtt
, newrtt
, &stats
->nstat_var_rtt
));
906 rte
= rte
->rt_parent
;
911 #pragma mark -- TCP Kernel Provider --
914 * Due to the way the kernel deallocates a process (the process structure
915 * might be gone by the time we get the PCB detach notification),
916 * we need to cache the process name. Without this, proc_name() would
917 * return null and the process name would never be sent to userland.
919 * For UDP sockets, we also store the cached the connection tuples along with
920 * the interface index. This is necessary because when UDP sockets are
921 * disconnected, the connection tuples are forever lost from the inpcb, thus
922 * we need to keep track of the last call to connect() in ntstat.
924 struct nstat_tucookie
{
926 char pname
[MAXCOMLEN
+1];
930 struct sockaddr_in v4
;
931 struct sockaddr_in6 v6
;
935 struct sockaddr_in v4
;
936 struct sockaddr_in6 v6
;
938 unsigned int if_index
;
939 uint16_t ifnet_properties
;
942 static struct nstat_tucookie
*
943 nstat_tucookie_alloc_internal(
948 struct nstat_tucookie
*cookie
;
950 cookie
= OSMalloc(sizeof(*cookie
), nstat_malloc_tag
);
954 lck_mtx_assert(&nstat_mtx
, LCK_MTX_ASSERT_NOTOWNED
);
955 if (ref
&& in_pcb_checkstate(inp
, WNT_ACQUIRE
, locked
) == WNT_STOPUSING
)
957 OSFree(cookie
, sizeof(*cookie
), nstat_malloc_tag
);
960 bzero(cookie
, sizeof(*cookie
));
962 proc_name(inp
->inp_socket
->last_pid
, cookie
->pname
,
963 sizeof(cookie
->pname
));
965 * We only increment the reference count for UDP sockets because we
966 * only cache UDP socket tuples.
968 if (SOCK_PROTO(inp
->inp_socket
) == IPPROTO_UDP
)
969 OSIncrementAtomic(&inp
->inp_nstat_refcnt
);
974 static struct nstat_tucookie
*
975 nstat_tucookie_alloc(
978 return nstat_tucookie_alloc_internal(inp
, false, false);
981 static struct nstat_tucookie
*
982 nstat_tucookie_alloc_ref(
985 return nstat_tucookie_alloc_internal(inp
, true, false);
988 static struct nstat_tucookie
*
989 nstat_tucookie_alloc_ref_locked(
992 return nstat_tucookie_alloc_internal(inp
, true, true);
996 nstat_tucookie_release_internal(
997 struct nstat_tucookie
*cookie
,
1000 if (SOCK_PROTO(cookie
->inp
->inp_socket
) == IPPROTO_UDP
)
1001 OSDecrementAtomic(&cookie
->inp
->inp_nstat_refcnt
);
1002 in_pcb_checkstate(cookie
->inp
, WNT_RELEASE
, inplock
);
1003 OSFree(cookie
, sizeof(*cookie
), nstat_malloc_tag
);
1007 nstat_tucookie_release(
1008 struct nstat_tucookie
*cookie
)
1010 nstat_tucookie_release_internal(cookie
, false);
1014 nstat_tucookie_release_locked(
1015 struct nstat_tucookie
*cookie
)
1017 nstat_tucookie_release_internal(cookie
, true);
1021 static nstat_provider nstat_tcp_provider
;
1024 nstat_tcpudp_lookup(
1025 struct inpcbinfo
*inpinfo
,
1028 nstat_provider_cookie_t
*out_cookie
)
1030 struct inpcb
*inp
= NULL
;
1032 // parameter validation
1033 const nstat_tcp_add_param
*param
= (const nstat_tcp_add_param
*)data
;
1034 if (length
< sizeof(*param
))
1039 // src and dst must match
1040 if (param
->remote
.v4
.sin_family
!= 0 &&
1041 param
->remote
.v4
.sin_family
!= param
->local
.v4
.sin_family
)
1047 switch (param
->local
.v4
.sin_family
)
1051 if (param
->local
.v4
.sin_len
!= sizeof(param
->local
.v4
) ||
1052 (param
->remote
.v4
.sin_family
!= 0 &&
1053 param
->remote
.v4
.sin_len
!= sizeof(param
->remote
.v4
)))
1058 inp
= in_pcblookup_hash(inpinfo
, param
->remote
.v4
.sin_addr
, param
->remote
.v4
.sin_port
,
1059 param
->local
.v4
.sin_addr
, param
->local
.v4
.sin_port
, 1, NULL
);
1068 const struct in6_addr
*in6c
;
1069 struct in6_addr
*in6
;
1072 if (param
->local
.v6
.sin6_len
!= sizeof(param
->local
.v6
) ||
1073 (param
->remote
.v6
.sin6_family
!= 0 &&
1074 param
->remote
.v6
.sin6_len
!= sizeof(param
->remote
.v6
)))
1079 local
.in6c
= ¶m
->local
.v6
.sin6_addr
;
1080 remote
.in6c
= ¶m
->remote
.v6
.sin6_addr
;
1082 inp
= in6_pcblookup_hash(inpinfo
, remote
.in6
, param
->remote
.v6
.sin6_port
,
1083 local
.in6
, param
->local
.v6
.sin6_port
, 1, NULL
);
1095 // At this point we have a ref to the inpcb
1096 *out_cookie
= nstat_tucookie_alloc(inp
);
1097 if (*out_cookie
== NULL
)
1098 in_pcb_checkstate(inp
, WNT_RELEASE
, 0);
1107 nstat_provider_cookie_t
*out_cookie
)
1109 return nstat_tcpudp_lookup(&tcbinfo
, data
, length
, out_cookie
);
1114 nstat_provider_cookie_t cookie
)
1116 struct nstat_tucookie
*tucookie
=
1117 (struct nstat_tucookie
*)cookie
;
1121 return (!(inp
= tucookie
->inp
) ||
1122 !(tp
= intotcpcb(inp
)) ||
1123 inp
->inp_state
== INPCB_STATE_DEAD
) ? 1 : 0;
1128 nstat_provider_cookie_t cookie
,
1129 struct nstat_counts
*out_counts
,
1132 struct nstat_tucookie
*tucookie
=
1133 (struct nstat_tucookie
*)cookie
;
1136 bzero(out_counts
, sizeof(*out_counts
));
1138 if (out_gone
) *out_gone
= 0;
1140 // if the pcb is in the dead state, we should stop using it
1141 if (nstat_tcp_gone(cookie
))
1143 if (out_gone
) *out_gone
= 1;
1144 if (!(inp
= tucookie
->inp
) || !intotcpcb(inp
))
1147 inp
= tucookie
->inp
;
1148 struct tcpcb
*tp
= intotcpcb(inp
);
1150 atomic_get_64(out_counts
->nstat_rxpackets
, &inp
->inp_stat
->rxpackets
);
1151 atomic_get_64(out_counts
->nstat_rxbytes
, &inp
->inp_stat
->rxbytes
);
1152 atomic_get_64(out_counts
->nstat_txpackets
, &inp
->inp_stat
->txpackets
);
1153 atomic_get_64(out_counts
->nstat_txbytes
, &inp
->inp_stat
->txbytes
);
1154 out_counts
->nstat_rxduplicatebytes
= tp
->t_stat
.rxduplicatebytes
;
1155 out_counts
->nstat_rxoutoforderbytes
= tp
->t_stat
.rxoutoforderbytes
;
1156 out_counts
->nstat_txretransmit
= tp
->t_stat
.txretransmitbytes
;
1157 out_counts
->nstat_connectattempts
= tp
->t_state
>= TCPS_SYN_SENT
? 1 : 0;
1158 out_counts
->nstat_connectsuccesses
= tp
->t_state
>= TCPS_ESTABLISHED
? 1 : 0;
1159 out_counts
->nstat_avg_rtt
= tp
->t_srtt
;
1160 out_counts
->nstat_min_rtt
= tp
->t_rttbest
;
1161 out_counts
->nstat_var_rtt
= tp
->t_rttvar
;
1162 if (out_counts
->nstat_avg_rtt
< out_counts
->nstat_min_rtt
)
1163 out_counts
->nstat_min_rtt
= out_counts
->nstat_avg_rtt
;
1164 atomic_get_64(out_counts
->nstat_cell_rxbytes
, &inp
->inp_cstat
->rxbytes
);
1165 atomic_get_64(out_counts
->nstat_cell_txbytes
, &inp
->inp_cstat
->txbytes
);
1166 atomic_get_64(out_counts
->nstat_wifi_rxbytes
, &inp
->inp_wstat
->rxbytes
);
1167 atomic_get_64(out_counts
->nstat_wifi_txbytes
, &inp
->inp_wstat
->txbytes
);
1168 atomic_get_64(out_counts
->nstat_wired_rxbytes
, &inp
->inp_Wstat
->rxbytes
);
1169 atomic_get_64(out_counts
->nstat_wired_txbytes
, &inp
->inp_Wstat
->txbytes
);
1176 nstat_provider_cookie_t cookie
,
1179 struct nstat_tucookie
*tucookie
=
1180 (struct nstat_tucookie
*)cookie
;
1182 nstat_tucookie_release_internal(tucookie
, locked
);
1186 nstat_tcp_add_watcher(
1187 nstat_control_state
*state
)
1189 OSIncrementAtomic(&nstat_tcp_watchers
);
1191 lck_rw_lock_shared(tcbinfo
.ipi_lock
);
1193 // Add all current tcp inpcbs. Ignore those in timewait
1195 struct nstat_tucookie
*cookie
;
1196 LIST_FOREACH(inp
, tcbinfo
.ipi_listhead
, inp_list
)
1198 cookie
= nstat_tucookie_alloc_ref(inp
);
1201 if (nstat_control_source_add(0, state
, &nstat_tcp_provider
,
1204 nstat_tucookie_release(cookie
);
1209 lck_rw_done(tcbinfo
.ipi_lock
);
1215 nstat_tcp_remove_watcher(
1216 __unused nstat_control_state
*state
)
1218 OSDecrementAtomic(&nstat_tcp_watchers
);
1221 __private_extern__
void
1225 struct nstat_tucookie
*cookie
;
1227 if (nstat_tcp_watchers
== 0)
1230 socket_lock(inp
->inp_socket
, 0);
1231 lck_mtx_lock(&nstat_mtx
);
1232 nstat_control_state
*state
;
1233 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
1235 if ((state
->ncs_watching
& (1 << NSTAT_PROVIDER_TCP_KERNEL
)) != 0)
1237 // this client is watching tcp
1238 // acquire a reference for it
1239 cookie
= nstat_tucookie_alloc_ref_locked(inp
);
1242 // add the source, if that fails, release the reference
1243 if (nstat_control_source_add(0, state
,
1244 &nstat_tcp_provider
, cookie
) != 0)
1246 nstat_tucookie_release_locked(cookie
);
1251 lck_mtx_unlock(&nstat_mtx
);
1252 socket_unlock(inp
->inp_socket
, 0);
1255 __private_extern__
void
1256 nstat_pcb_detach(struct inpcb
*inp
)
1258 nstat_control_state
*state
;
1259 nstat_src
*src
, *prevsrc
;
1260 nstat_src
*dead_list
= NULL
;
1261 struct nstat_tucookie
*tucookie
;
1264 if (inp
== NULL
|| (nstat_tcp_watchers
== 0 && nstat_udp_watchers
== 0))
1267 lck_mtx_lock(&nstat_mtx
);
1268 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
1270 lck_mtx_lock(&state
->mtx
);
1271 for (prevsrc
= NULL
, src
= state
->ncs_srcs
; src
;
1272 prevsrc
= src
, src
= src
->next
)
1274 tucookie
= (struct nstat_tucookie
*)src
->cookie
;
1275 if (tucookie
->inp
== inp
)
1281 result
= nstat_control_send_goodbye(state
, src
);
1284 prevsrc
->next
= src
->next
;
1286 state
->ncs_srcs
= src
->next
;
1288 src
->next
= dead_list
;
1291 lck_mtx_unlock(&state
->mtx
);
1293 lck_mtx_unlock(&nstat_mtx
);
1297 dead_list
= src
->next
;
1299 nstat_control_cleanup_source(NULL
, src
, TRUE
);
1303 __private_extern__
void
1304 nstat_pcb_cache(struct inpcb
*inp
)
1306 nstat_control_state
*state
;
1308 struct nstat_tucookie
*tucookie
;
1310 if (inp
== NULL
|| nstat_udp_watchers
== 0 ||
1311 inp
->inp_nstat_refcnt
== 0)
1313 VERIFY(SOCK_PROTO(inp
->inp_socket
) == IPPROTO_UDP
);
1314 lck_mtx_lock(&nstat_mtx
);
1315 for (state
= nstat_controls
; state
; state
= state
->ncs_next
) {
1316 lck_mtx_lock(&state
->mtx
);
1317 for (src
= state
->ncs_srcs
; src
; src
= src
->next
)
1319 tucookie
= (struct nstat_tucookie
*)src
->cookie
;
1320 if (tucookie
->inp
== inp
)
1322 if (inp
->inp_vflag
& INP_IPV6
)
1324 nstat_ip6_to_sockaddr(&inp
->in6p_laddr
,
1326 &tucookie
->local
.v6
,
1327 sizeof(tucookie
->local
));
1328 nstat_ip6_to_sockaddr(&inp
->in6p_faddr
,
1330 &tucookie
->remote
.v6
,
1331 sizeof(tucookie
->remote
));
1333 else if (inp
->inp_vflag
& INP_IPV4
)
1335 nstat_ip_to_sockaddr(&inp
->inp_laddr
,
1337 &tucookie
->local
.v4
,
1338 sizeof(tucookie
->local
));
1339 nstat_ip_to_sockaddr(&inp
->inp_faddr
,
1341 &tucookie
->remote
.v4
,
1342 sizeof(tucookie
->remote
));
1344 if (inp
->inp_last_outifp
)
1345 tucookie
->if_index
=
1346 inp
->inp_last_outifp
->if_index
;
1348 tucookie
->ifnet_properties
= nstat_inpcb_to_flags(inp
);
1349 tucookie
->cached
= true;
1353 lck_mtx_unlock(&state
->mtx
);
1355 lck_mtx_unlock(&nstat_mtx
);
1358 __private_extern__
void
1359 nstat_pcb_invalidate_cache(struct inpcb
*inp
)
1361 nstat_control_state
*state
;
1363 struct nstat_tucookie
*tucookie
;
1365 if (inp
== NULL
|| nstat_udp_watchers
== 0 ||
1366 inp
->inp_nstat_refcnt
== 0)
1368 VERIFY(SOCK_PROTO(inp
->inp_socket
) == IPPROTO_UDP
);
1369 lck_mtx_lock(&nstat_mtx
);
1370 for (state
= nstat_controls
; state
; state
= state
->ncs_next
) {
1371 lck_mtx_lock(&state
->mtx
);
1372 for (src
= state
->ncs_srcs
; src
; src
= src
->next
)
1374 tucookie
= (struct nstat_tucookie
*)src
->cookie
;
1375 if (tucookie
->inp
== inp
)
1377 tucookie
->cached
= false;
1381 lck_mtx_unlock(&state
->mtx
);
1383 lck_mtx_unlock(&nstat_mtx
);
1387 nstat_tcp_copy_descriptor(
1388 nstat_provider_cookie_t cookie
,
1392 if (len
< sizeof(nstat_tcp_descriptor
))
1397 if (nstat_tcp_gone(cookie
))
1400 nstat_tcp_descriptor
*desc
= (nstat_tcp_descriptor
*)data
;
1401 struct nstat_tucookie
*tucookie
=
1402 (struct nstat_tucookie
*)cookie
;
1403 struct inpcb
*inp
= tucookie
->inp
;
1404 struct tcpcb
*tp
= intotcpcb(inp
);
1405 bzero(desc
, sizeof(*desc
));
1407 if (inp
->inp_vflag
& INP_IPV6
)
1409 nstat_ip6_to_sockaddr(&inp
->in6p_laddr
, inp
->inp_lport
,
1410 &desc
->local
.v6
, sizeof(desc
->local
));
1411 nstat_ip6_to_sockaddr(&inp
->in6p_faddr
, inp
->inp_fport
,
1412 &desc
->remote
.v6
, sizeof(desc
->remote
));
1414 else if (inp
->inp_vflag
& INP_IPV4
)
1416 nstat_ip_to_sockaddr(&inp
->inp_laddr
, inp
->inp_lport
,
1417 &desc
->local
.v4
, sizeof(desc
->local
));
1418 nstat_ip_to_sockaddr(&inp
->inp_faddr
, inp
->inp_fport
,
1419 &desc
->remote
.v4
, sizeof(desc
->remote
));
1422 desc
->state
= intotcpcb(inp
)->t_state
;
1423 desc
->ifindex
= (inp
->inp_last_outifp
== NULL
) ? 0 :
1424 inp
->inp_last_outifp
->if_index
;
1426 // danger - not locked, values could be bogus
1427 desc
->txunacked
= tp
->snd_max
- tp
->snd_una
;
1428 desc
->txwindow
= tp
->snd_wnd
;
1429 desc
->txcwindow
= tp
->snd_cwnd
;
1431 if (CC_ALGO(tp
)->name
!= NULL
) {
1432 strlcpy(desc
->cc_algo
, CC_ALGO(tp
)->name
,
1433 sizeof(desc
->cc_algo
));
1436 struct socket
*so
= inp
->inp_socket
;
1439 // TBD - take the socket lock around these to make sure
1441 desc
->upid
= so
->last_upid
;
1442 desc
->pid
= so
->last_pid
;
1443 desc
->traffic_class
= so
->so_traffic_class
;
1444 if ((so
->so_flags1
& SOF1_TRAFFIC_MGT_SO_BACKGROUND
))
1445 desc
->traffic_mgt_flags
|= TRAFFIC_MGT_SO_BACKGROUND
;
1446 if ((so
->so_flags1
& SOF1_TRAFFIC_MGT_TCP_RECVBG
))
1447 desc
->traffic_mgt_flags
|= TRAFFIC_MGT_TCP_RECVBG
;
1448 proc_name(desc
->pid
, desc
->pname
, sizeof(desc
->pname
));
1449 if (desc
->pname
[0] == 0)
1451 strlcpy(desc
->pname
, tucookie
->pname
,
1452 sizeof(desc
->pname
));
1456 desc
->pname
[sizeof(desc
->pname
) - 1] = 0;
1457 strlcpy(tucookie
->pname
, desc
->pname
,
1458 sizeof(tucookie
->pname
));
1460 memcpy(desc
->uuid
, so
->last_uuid
, sizeof(so
->last_uuid
));
1461 memcpy(desc
->vuuid
, so
->so_vuuid
, sizeof(so
->so_vuuid
));
1462 if (so
->so_flags
& SOF_DELEGATED
) {
1463 desc
->eupid
= so
->e_upid
;
1464 desc
->epid
= so
->e_pid
;
1465 memcpy(desc
->euuid
, so
->e_uuid
, sizeof(so
->e_uuid
));
1467 desc
->eupid
= desc
->upid
;
1468 desc
->epid
= desc
->pid
;
1469 memcpy(desc
->euuid
, desc
->uuid
, sizeof(desc
->uuid
));
1471 desc
->sndbufsize
= so
->so_snd
.sb_hiwat
;
1472 desc
->sndbufused
= so
->so_snd
.sb_cc
;
1473 desc
->rcvbufsize
= so
->so_rcv
.sb_hiwat
;
1474 desc
->rcvbufused
= so
->so_rcv
.sb_cc
;
1477 tcp_get_connectivity_status(tp
, &desc
->connstatus
);
1478 desc
->ifnet_properties
= nstat_inpcb_to_flags(inp
);
1483 nstat_tcpudp_reporting_allowed(nstat_provider_cookie_t cookie
, nstat_provider_filter
*filter
, bool is_UDP
)
1487 if ((filter
->npf_flags
& (NSTAT_FILTER_IFNET_FLAGS
|NSTAT_FILTER_SPECIFIC_USER
)) != 0)
1489 struct nstat_tucookie
*tucookie
= (struct nstat_tucookie
*)cookie
;
1490 struct inpcb
*inp
= tucookie
->inp
;
1492 /* Only apply interface filter if at least one is allowed. */
1493 if ((filter
->npf_flags
& NSTAT_FILTER_IFNET_FLAGS
) != 0)
1495 uint16_t interface_properties
= nstat_inpcb_to_flags(inp
);
1497 if ((filter
->npf_flags
& interface_properties
) == 0)
1499 // For UDP, we could have an undefined interface and yet transfers may have occurred.
1500 // We allow reporting if there have been transfers of the requested kind.
1501 // This is imperfect as we cannot account for the expensive attribute over wifi.
1502 // We also assume that cellular is expensive and we have no way to select for AWDL
1507 if ((filter
->npf_flags
& (NSTAT_FILTER_ACCEPT_CELLULAR
|NSTAT_FILTER_ACCEPT_EXPENSIVE
)) &&
1508 (inp
->inp_cstat
->rxbytes
|| inp
->inp_cstat
->txbytes
))
1512 if ((filter
->npf_flags
& NSTAT_FILTER_ACCEPT_WIFI
) &&
1513 (inp
->inp_wstat
->rxbytes
|| inp
->inp_wstat
->txbytes
))
1517 if ((filter
->npf_flags
& NSTAT_FILTER_ACCEPT_WIRED
) &&
1518 (inp
->inp_Wstat
->rxbytes
|| inp
->inp_Wstat
->txbytes
))
1532 if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER
) != 0) && (retval
))
1534 struct socket
*so
= inp
->inp_socket
;
1539 if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER_BY_PID
) != 0) &&
1540 (filter
->npf_pid
== so
->last_pid
))
1544 else if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER_BY_EPID
) != 0) &&
1545 (filter
->npf_pid
== (so
->so_flags
& SOF_DELEGATED
)? so
->e_upid
: so
->last_pid
))
1549 else if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER_BY_UUID
) != 0) &&
1550 (memcmp(filter
->npf_uuid
, so
->last_uuid
, sizeof(so
->last_uuid
)) == 0))
1554 else if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER_BY_EUUID
) != 0) &&
1555 (memcmp(filter
->npf_uuid
, (so
->so_flags
& SOF_DELEGATED
)? so
->e_uuid
: so
->last_uuid
,
1556 sizeof(so
->last_uuid
)) == 0))
1567 nstat_tcp_reporting_allowed(nstat_provider_cookie_t cookie
, nstat_provider_filter
*filter
)
1569 return nstat_tcpudp_reporting_allowed(cookie
, filter
, FALSE
);
1573 nstat_init_tcp_provider(void)
1575 bzero(&nstat_tcp_provider
, sizeof(nstat_tcp_provider
));
1576 nstat_tcp_provider
.nstat_descriptor_length
= sizeof(nstat_tcp_descriptor
);
1577 nstat_tcp_provider
.nstat_provider_id
= NSTAT_PROVIDER_TCP_KERNEL
;
1578 nstat_tcp_provider
.nstat_lookup
= nstat_tcp_lookup
;
1579 nstat_tcp_provider
.nstat_gone
= nstat_tcp_gone
;
1580 nstat_tcp_provider
.nstat_counts
= nstat_tcp_counts
;
1581 nstat_tcp_provider
.nstat_release
= nstat_tcp_release
;
1582 nstat_tcp_provider
.nstat_watcher_add
= nstat_tcp_add_watcher
;
1583 nstat_tcp_provider
.nstat_watcher_remove
= nstat_tcp_remove_watcher
;
1584 nstat_tcp_provider
.nstat_copy_descriptor
= nstat_tcp_copy_descriptor
;
1585 nstat_tcp_provider
.nstat_reporting_allowed
= nstat_tcp_reporting_allowed
;
1586 nstat_tcp_provider
.next
= nstat_providers
;
1587 nstat_providers
= &nstat_tcp_provider
;
1590 #pragma mark -- UDP Provider --
1592 static nstat_provider nstat_udp_provider
;
1598 nstat_provider_cookie_t
*out_cookie
)
1600 return nstat_tcpudp_lookup(&udbinfo
, data
, length
, out_cookie
);
1605 nstat_provider_cookie_t cookie
)
1607 struct nstat_tucookie
*tucookie
=
1608 (struct nstat_tucookie
*)cookie
;
1611 return (!(inp
= tucookie
->inp
) ||
1612 inp
->inp_state
== INPCB_STATE_DEAD
) ? 1 : 0;
1617 nstat_provider_cookie_t cookie
,
1618 struct nstat_counts
*out_counts
,
1621 struct nstat_tucookie
*tucookie
=
1622 (struct nstat_tucookie
*)cookie
;
1624 if (out_gone
) *out_gone
= 0;
1626 // if the pcb is in the dead state, we should stop using it
1627 if (nstat_udp_gone(cookie
))
1629 if (out_gone
) *out_gone
= 1;
1633 struct inpcb
*inp
= tucookie
->inp
;
1635 atomic_get_64(out_counts
->nstat_rxpackets
, &inp
->inp_stat
->rxpackets
);
1636 atomic_get_64(out_counts
->nstat_rxbytes
, &inp
->inp_stat
->rxbytes
);
1637 atomic_get_64(out_counts
->nstat_txpackets
, &inp
->inp_stat
->txpackets
);
1638 atomic_get_64(out_counts
->nstat_txbytes
, &inp
->inp_stat
->txbytes
);
1639 atomic_get_64(out_counts
->nstat_cell_rxbytes
, &inp
->inp_cstat
->rxbytes
);
1640 atomic_get_64(out_counts
->nstat_cell_txbytes
, &inp
->inp_cstat
->txbytes
);
1641 atomic_get_64(out_counts
->nstat_wifi_rxbytes
, &inp
->inp_wstat
->rxbytes
);
1642 atomic_get_64(out_counts
->nstat_wifi_txbytes
, &inp
->inp_wstat
->txbytes
);
1643 atomic_get_64(out_counts
->nstat_wired_rxbytes
, &inp
->inp_Wstat
->rxbytes
);
1644 atomic_get_64(out_counts
->nstat_wired_txbytes
, &inp
->inp_Wstat
->txbytes
);
1651 nstat_provider_cookie_t cookie
,
1654 struct nstat_tucookie
*tucookie
=
1655 (struct nstat_tucookie
*)cookie
;
1657 nstat_tucookie_release_internal(tucookie
, locked
);
1661 nstat_udp_add_watcher(
1662 nstat_control_state
*state
)
1665 struct nstat_tucookie
*cookie
;
1667 OSIncrementAtomic(&nstat_udp_watchers
);
1669 lck_rw_lock_shared(udbinfo
.ipi_lock
);
1670 // Add all current UDP inpcbs.
1671 LIST_FOREACH(inp
, udbinfo
.ipi_listhead
, inp_list
)
1673 cookie
= nstat_tucookie_alloc_ref(inp
);
1676 if (nstat_control_source_add(0, state
, &nstat_udp_provider
,
1679 nstat_tucookie_release(cookie
);
1684 lck_rw_done(udbinfo
.ipi_lock
);
1690 nstat_udp_remove_watcher(
1691 __unused nstat_control_state
*state
)
1693 OSDecrementAtomic(&nstat_udp_watchers
);
1696 __private_extern__
void
1700 struct nstat_tucookie
*cookie
;
1702 if (nstat_udp_watchers
== 0)
1705 socket_lock(inp
->inp_socket
, 0);
1706 lck_mtx_lock(&nstat_mtx
);
1707 nstat_control_state
*state
;
1708 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
1710 if ((state
->ncs_watching
& (1 << NSTAT_PROVIDER_UDP_KERNEL
)) != 0)
1712 // this client is watching tcp
1713 // acquire a reference for it
1714 cookie
= nstat_tucookie_alloc_ref_locked(inp
);
1717 // add the source, if that fails, release the reference
1718 if (nstat_control_source_add(0, state
,
1719 &nstat_udp_provider
, cookie
) != 0)
1721 nstat_tucookie_release_locked(cookie
);
1726 lck_mtx_unlock(&nstat_mtx
);
1727 socket_unlock(inp
->inp_socket
, 0);
1731 nstat_udp_copy_descriptor(
1732 nstat_provider_cookie_t cookie
,
1736 if (len
< sizeof(nstat_udp_descriptor
))
1741 if (nstat_udp_gone(cookie
))
1744 struct nstat_tucookie
*tucookie
=
1745 (struct nstat_tucookie
*)cookie
;
1746 nstat_udp_descriptor
*desc
= (nstat_udp_descriptor
*)data
;
1747 struct inpcb
*inp
= tucookie
->inp
;
1749 bzero(desc
, sizeof(*desc
));
1751 if (tucookie
->cached
== false) {
1752 if (inp
->inp_vflag
& INP_IPV6
)
1754 nstat_ip6_to_sockaddr(&inp
->in6p_laddr
, inp
->inp_lport
,
1755 &desc
->local
.v6
, sizeof(desc
->local
.v6
));
1756 nstat_ip6_to_sockaddr(&inp
->in6p_faddr
, inp
->inp_fport
,
1757 &desc
->remote
.v6
, sizeof(desc
->remote
.v6
));
1759 else if (inp
->inp_vflag
& INP_IPV4
)
1761 nstat_ip_to_sockaddr(&inp
->inp_laddr
, inp
->inp_lport
,
1762 &desc
->local
.v4
, sizeof(desc
->local
.v4
));
1763 nstat_ip_to_sockaddr(&inp
->inp_faddr
, inp
->inp_fport
,
1764 &desc
->remote
.v4
, sizeof(desc
->remote
.v4
));
1766 desc
->ifnet_properties
= nstat_inpcb_to_flags(inp
);
1770 if (inp
->inp_vflag
& INP_IPV6
)
1772 memcpy(&desc
->local
.v6
, &tucookie
->local
.v6
,
1773 sizeof(desc
->local
.v6
));
1774 memcpy(&desc
->remote
.v6
, &tucookie
->remote
.v6
,
1775 sizeof(desc
->remote
.v6
));
1777 else if (inp
->inp_vflag
& INP_IPV4
)
1779 memcpy(&desc
->local
.v4
, &tucookie
->local
.v4
,
1780 sizeof(desc
->local
.v4
));
1781 memcpy(&desc
->remote
.v4
, &tucookie
->remote
.v4
,
1782 sizeof(desc
->remote
.v4
));
1784 desc
->ifnet_properties
= tucookie
->ifnet_properties
;
1787 if (inp
->inp_last_outifp
)
1788 desc
->ifindex
= inp
->inp_last_outifp
->if_index
;
1790 desc
->ifindex
= tucookie
->if_index
;
1792 struct socket
*so
= inp
->inp_socket
;
1795 // TBD - take the socket lock around these to make sure
1797 desc
->upid
= so
->last_upid
;
1798 desc
->pid
= so
->last_pid
;
1799 proc_name(desc
->pid
, desc
->pname
, sizeof(desc
->pname
));
1800 if (desc
->pname
[0] == 0)
1802 strlcpy(desc
->pname
, tucookie
->pname
,
1803 sizeof(desc
->pname
));
1807 desc
->pname
[sizeof(desc
->pname
) - 1] = 0;
1808 strlcpy(tucookie
->pname
, desc
->pname
,
1809 sizeof(tucookie
->pname
));
1811 memcpy(desc
->uuid
, so
->last_uuid
, sizeof(so
->last_uuid
));
1812 memcpy(desc
->vuuid
, so
->so_vuuid
, sizeof(so
->so_vuuid
));
1813 if (so
->so_flags
& SOF_DELEGATED
) {
1814 desc
->eupid
= so
->e_upid
;
1815 desc
->epid
= so
->e_pid
;
1816 memcpy(desc
->euuid
, so
->e_uuid
, sizeof(so
->e_uuid
));
1818 desc
->eupid
= desc
->upid
;
1819 desc
->epid
= desc
->pid
;
1820 memcpy(desc
->euuid
, desc
->uuid
, sizeof(desc
->uuid
));
1822 desc
->rcvbufsize
= so
->so_rcv
.sb_hiwat
;
1823 desc
->rcvbufused
= so
->so_rcv
.sb_cc
;
1824 desc
->traffic_class
= so
->so_traffic_class
;
1831 nstat_udp_reporting_allowed(nstat_provider_cookie_t cookie
, nstat_provider_filter
*filter
)
1833 return nstat_tcpudp_reporting_allowed(cookie
, filter
, TRUE
);
1838 nstat_init_udp_provider(void)
1840 bzero(&nstat_udp_provider
, sizeof(nstat_udp_provider
));
1841 nstat_udp_provider
.nstat_provider_id
= NSTAT_PROVIDER_UDP_KERNEL
;
1842 nstat_udp_provider
.nstat_descriptor_length
= sizeof(nstat_udp_descriptor
);
1843 nstat_udp_provider
.nstat_lookup
= nstat_udp_lookup
;
1844 nstat_udp_provider
.nstat_gone
= nstat_udp_gone
;
1845 nstat_udp_provider
.nstat_counts
= nstat_udp_counts
;
1846 nstat_udp_provider
.nstat_watcher_add
= nstat_udp_add_watcher
;
1847 nstat_udp_provider
.nstat_watcher_remove
= nstat_udp_remove_watcher
;
1848 nstat_udp_provider
.nstat_copy_descriptor
= nstat_udp_copy_descriptor
;
1849 nstat_udp_provider
.nstat_release
= nstat_udp_release
;
1850 nstat_udp_provider
.nstat_reporting_allowed
= nstat_udp_reporting_allowed
;
1851 nstat_udp_provider
.next
= nstat_providers
;
1852 nstat_providers
= &nstat_udp_provider
;
1855 #pragma mark -- TCP/UDP Userland
1857 // Almost all of this infrastucture is common to both TCP and UDP
1859 static nstat_provider nstat_userland_tcp_provider
;
1860 static nstat_provider nstat_userland_udp_provider
;
1863 struct nstat_tu_shadow
{
1864 tailq_entry_tu_shadow shad_link
;
1865 userland_stats_request_vals_fn
*shad_getvals_fn
;
1866 userland_stats_provider_context
*shad_provider_context
;
1867 u_int64_t shad_properties
;
1869 uint32_t shad_magic
;
1872 // Magic number checking should remain in place until the userland provider has been fully proven
1873 #define TU_SHADOW_MAGIC 0xfeedf00d
1874 #define TU_SHADOW_UNMAGIC 0xdeaddeed
1876 static tailq_head_tu_shadow nstat_userprot_shad_head
= TAILQ_HEAD_INITIALIZER(nstat_userprot_shad_head
);
1879 nstat_userland_tu_lookup(
1880 __unused
const void *data
,
1881 __unused u_int32_t length
,
1882 __unused nstat_provider_cookie_t
*out_cookie
)
1884 // Looking up a specific connection is not supported
1889 nstat_userland_tu_gone(
1890 __unused nstat_provider_cookie_t cookie
)
1892 // Returns non-zero if the source has gone.
1893 // We don't keep a source hanging around, so the answer is always 0
1898 nstat_userland_tu_counts(
1899 nstat_provider_cookie_t cookie
,
1900 struct nstat_counts
*out_counts
,
1903 struct nstat_tu_shadow
*shad
= (struct nstat_tu_shadow
*)cookie
;
1904 assert(shad
->shad_magic
== TU_SHADOW_MAGIC
);
1906 bool result
= (*shad
->shad_getvals_fn
)(shad
->shad_provider_context
, out_counts
, NULL
);
1908 if (out_gone
) *out_gone
= 0;
1910 return (result
)? 0 : EIO
;
1915 nstat_userland_tu_copy_descriptor(
1916 nstat_provider_cookie_t cookie
,
1918 __unused u_int32_t len
)
1920 struct nstat_tu_shadow
*shad
= (struct nstat_tu_shadow
*)cookie
;
1921 assert(shad
->shad_magic
== TU_SHADOW_MAGIC
);
1923 bool result
= (*shad
->shad_getvals_fn
)(shad
->shad_provider_context
, NULL
, data
);
1925 return (result
)? 0 : EIO
;
1929 nstat_userland_tu_release(
1930 __unused nstat_provider_cookie_t cookie
,
1931 __unused
int locked
)
1933 // Called when a nstat_src is detached.
1934 // We don't reference count or ask for delayed release so nothing to do here.
1938 check_reporting_for_user(nstat_provider_filter
*filter
, pid_t pid
, pid_t epid
, uuid_t
*uuid
, uuid_t
*euuid
)
1942 if ((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER
) != 0)
1946 if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER_BY_PID
) != 0) &&
1947 (filter
->npf_pid
== pid
))
1951 else if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER_BY_EPID
) != 0) &&
1952 (filter
->npf_pid
== epid
))
1956 else if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER_BY_UUID
) != 0) &&
1957 (memcmp(filter
->npf_uuid
, uuid
, sizeof(*uuid
)) == 0))
1961 else if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER_BY_EUUID
) != 0) &&
1962 (memcmp(filter
->npf_uuid
, euuid
, sizeof(*euuid
)) == 0))
1971 nstat_userland_tcp_reporting_allowed(nstat_provider_cookie_t cookie
, nstat_provider_filter
*filter
)
1975 if ((filter
->npf_flags
& (NSTAT_FILTER_IFNET_FLAGS
|NSTAT_FILTER_SPECIFIC_USER
)) != 0)
1977 nstat_tcp_descriptor tcp_desc
; // Stack allocation - OK or pushing the limits too far?
1978 struct nstat_tu_shadow
*shad
= (struct nstat_tu_shadow
*)cookie
;
1980 assert(shad
->shad_magic
== TU_SHADOW_MAGIC
);
1982 if ((*shad
->shad_getvals_fn
)(shad
->shad_provider_context
, NULL
, &tcp_desc
))
1984 if ((filter
->npf_flags
& NSTAT_FILTER_IFNET_FLAGS
) != 0)
1986 if ((filter
->npf_flags
& tcp_desc
.ifnet_properties
) == 0)
1991 if ((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER
) != 0)
1993 retval
= check_reporting_for_user(filter
, (pid_t
)tcp_desc
.pid
, (pid_t
)tcp_desc
.epid
,
1994 &tcp_desc
.uuid
, &tcp_desc
.euuid
);
1999 retval
= false; // No further information, so might as well give up now.
2006 nstat_userland_udp_reporting_allowed(nstat_provider_cookie_t cookie
, nstat_provider_filter
*filter
)
2010 if ((filter
->npf_flags
& (NSTAT_FILTER_IFNET_FLAGS
|NSTAT_FILTER_SPECIFIC_USER
)) != 0)
2012 nstat_udp_descriptor udp_desc
; // Stack allocation - OK or pushing the limits too far?
2013 struct nstat_tu_shadow
*shad
= (struct nstat_tu_shadow
*)cookie
;
2015 assert(shad
->shad_magic
== TU_SHADOW_MAGIC
);
2017 if ((*shad
->shad_getvals_fn
)(shad
->shad_provider_context
, NULL
, &udp_desc
))
2019 if ((filter
->npf_flags
& NSTAT_FILTER_IFNET_FLAGS
) != 0)
2021 if ((filter
->npf_flags
& udp_desc
.ifnet_properties
) == 0)
2026 if ((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER
) != 0)
2028 retval
= check_reporting_for_user(filter
, (pid_t
)udp_desc
.pid
, (pid_t
)udp_desc
.epid
,
2029 &udp_desc
.uuid
, &udp_desc
.euuid
);
2034 retval
= false; // No further information, so might as well give up now.
2043 nstat_userland_tcp_add_watcher(
2044 nstat_control_state
*state
)
2046 struct nstat_tu_shadow
*shad
;
2048 OSIncrementAtomic(&nstat_userland_tcp_watchers
);
2050 lck_mtx_lock(&nstat_mtx
);
2052 TAILQ_FOREACH(shad
, &nstat_userprot_shad_head
, shad_link
) {
2053 assert(shad
->shad_magic
== TU_SHADOW_MAGIC
);
2055 if (shad
->shad_provider
== NSTAT_PROVIDER_TCP_USERLAND
)
2057 int result
= nstat_control_source_add(0, state
, &nstat_userland_tcp_provider
, shad
);
2060 printf("%s - nstat_control_source_add returned %d\n", __func__
, result
);
2064 lck_mtx_unlock(&nstat_mtx
);
2070 nstat_userland_udp_add_watcher(
2071 nstat_control_state
*state
)
2073 struct nstat_tu_shadow
*shad
;
2075 OSIncrementAtomic(&nstat_userland_udp_watchers
);
2077 lck_mtx_lock(&nstat_mtx
);
2079 TAILQ_FOREACH(shad
, &nstat_userprot_shad_head
, shad_link
) {
2080 assert(shad
->shad_magic
== TU_SHADOW_MAGIC
);
2082 if (shad
->shad_provider
== NSTAT_PROVIDER_UDP_USERLAND
)
2084 int result
= nstat_control_source_add(0, state
, &nstat_userland_udp_provider
, shad
);
2087 printf("%s - nstat_control_source_add returned %d\n", __func__
, result
);
2091 lck_mtx_unlock(&nstat_mtx
);
2098 nstat_userland_tcp_remove_watcher(
2099 __unused nstat_control_state
*state
)
2101 OSDecrementAtomic(&nstat_userland_tcp_watchers
);
2105 nstat_userland_udp_remove_watcher(
2106 __unused nstat_control_state
*state
)
2108 OSDecrementAtomic(&nstat_userland_udp_watchers
);
2112 nstat_init_userland_tcp_provider(void)
2114 bzero(&nstat_userland_tcp_provider
, sizeof(nstat_tcp_provider
));
2115 nstat_userland_tcp_provider
.nstat_descriptor_length
= sizeof(nstat_tcp_descriptor
);
2116 nstat_userland_tcp_provider
.nstat_provider_id
= NSTAT_PROVIDER_TCP_USERLAND
;
2117 nstat_userland_tcp_provider
.nstat_lookup
= nstat_userland_tu_lookup
;
2118 nstat_userland_tcp_provider
.nstat_gone
= nstat_userland_tu_gone
;
2119 nstat_userland_tcp_provider
.nstat_counts
= nstat_userland_tu_counts
;
2120 nstat_userland_tcp_provider
.nstat_release
= nstat_userland_tu_release
;
2121 nstat_userland_tcp_provider
.nstat_watcher_add
= nstat_userland_tcp_add_watcher
;
2122 nstat_userland_tcp_provider
.nstat_watcher_remove
= nstat_userland_tcp_remove_watcher
;
2123 nstat_userland_tcp_provider
.nstat_copy_descriptor
= nstat_userland_tu_copy_descriptor
;
2124 nstat_userland_tcp_provider
.nstat_reporting_allowed
= nstat_userland_tcp_reporting_allowed
;
2125 nstat_userland_tcp_provider
.next
= nstat_providers
;
2126 nstat_providers
= &nstat_userland_tcp_provider
;
2131 nstat_init_userland_udp_provider(void)
2133 bzero(&nstat_userland_udp_provider
, sizeof(nstat_udp_provider
));
2134 nstat_userland_udp_provider
.nstat_descriptor_length
= sizeof(nstat_udp_descriptor
);
2135 nstat_userland_udp_provider
.nstat_provider_id
= NSTAT_PROVIDER_UDP_USERLAND
;
2136 nstat_userland_udp_provider
.nstat_lookup
= nstat_userland_tu_lookup
;
2137 nstat_userland_udp_provider
.nstat_gone
= nstat_userland_tu_gone
;
2138 nstat_userland_udp_provider
.nstat_counts
= nstat_userland_tu_counts
;
2139 nstat_userland_udp_provider
.nstat_release
= nstat_userland_tu_release
;
2140 nstat_userland_udp_provider
.nstat_watcher_add
= nstat_userland_udp_add_watcher
;
2141 nstat_userland_udp_provider
.nstat_watcher_remove
= nstat_userland_udp_remove_watcher
;
2142 nstat_userland_udp_provider
.nstat_copy_descriptor
= nstat_userland_tu_copy_descriptor
;
2143 nstat_userland_udp_provider
.nstat_reporting_allowed
= nstat_userland_udp_reporting_allowed
;
2144 nstat_userland_udp_provider
.next
= nstat_providers
;
2145 nstat_providers
= &nstat_userland_udp_provider
;
2150 // Things get started with a call to netstats to say that there’s a new connection:
2151 __private_extern__ nstat_userland_context
2152 ntstat_userland_stats_open(userland_stats_provider_context
*ctx
,
2154 u_int64_t properties
,
2155 userland_stats_request_vals_fn req_fn
)
2157 struct nstat_tu_shadow
*shad
;
2159 if ((provider_id
!= NSTAT_PROVIDER_TCP_USERLAND
) && (provider_id
!= NSTAT_PROVIDER_UDP_USERLAND
))
2161 printf("%s - incorrect provider is supplied, %d\n", __func__
, provider_id
);
2165 shad
= OSMalloc(sizeof(*shad
), nstat_malloc_tag
);
2169 shad
->shad_getvals_fn
= req_fn
;
2170 shad
->shad_provider_context
= ctx
;
2171 shad
->shad_provider
= provider_id
;
2172 shad
->shad_properties
= properties
;
2173 shad
->shad_magic
= TU_SHADOW_MAGIC
;
2175 lck_mtx_lock(&nstat_mtx
);
2176 nstat_control_state
*state
;
2178 // Even if there are no watchers, we save the shadow structure
2179 TAILQ_INSERT_HEAD(&nstat_userprot_shad_head
, shad
, shad_link
);
2181 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
2183 if ((state
->ncs_watching
& (1 << provider_id
)) != 0)
2185 // this client is watching tcp/udp userland
2187 int result
= nstat_control_source_add(0, state
, &nstat_userland_tcp_provider
, shad
);
2190 printf("%s - nstat_control_source_add returned %d\n", __func__
, result
);
2194 lck_mtx_unlock(&nstat_mtx
);
2196 return (nstat_userland_context
)shad
;
2200 __private_extern__
void
2201 ntstat_userland_stats_close(nstat_userland_context nstat_ctx
)
2203 struct nstat_tu_shadow
*shad
= (struct nstat_tu_shadow
*)nstat_ctx
;
2204 nstat_src
*dead_list
= NULL
;
2209 assert(shad
->shad_magic
== TU_SHADOW_MAGIC
);
2211 lck_mtx_lock(&nstat_mtx
);
2212 if (nstat_userland_udp_watchers
!= 0 || nstat_userland_tcp_watchers
!= 0)
2214 nstat_control_state
*state
;
2215 nstat_src
*src
, *prevsrc
;
2218 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
2220 lck_mtx_lock(&state
->mtx
);
2221 for (prevsrc
= NULL
, src
= state
->ncs_srcs
; src
;
2222 prevsrc
= src
, src
= src
->next
)
2224 if (shad
== (struct nstat_tu_shadow
*)src
->cookie
)
2230 result
= nstat_control_send_goodbye(state
, src
);
2233 prevsrc
->next
= src
->next
;
2235 state
->ncs_srcs
= src
->next
;
2237 src
->next
= dead_list
;
2240 lck_mtx_unlock(&state
->mtx
);
2243 TAILQ_REMOVE(&nstat_userprot_shad_head
, shad
, shad_link
);
2245 lck_mtx_unlock(&nstat_mtx
);
2251 dead_list
= src
->next
;
2253 nstat_control_cleanup_source(NULL
, src
, TRUE
);
2256 shad
->shad_magic
= TU_SHADOW_UNMAGIC
;
2258 OSFree(shad
, sizeof(*shad
), nstat_malloc_tag
);
2262 __private_extern__
void
2263 ntstat_userland_stats_event(
2264 __unused nstat_userland_context context
,
2265 __unused userland_stats_event_t event
)
2267 // This is a dummy for when we hook up event reporting to NetworkStatistics.
2268 // See <rdar://problem/23022832> NetworkStatistics should provide opt-in notifications
2274 #pragma mark -- ifnet Provider --
2276 static nstat_provider nstat_ifnet_provider
;
2279 * We store a pointer to the ifnet and the original threshold
2280 * requested by the client.
2282 struct nstat_ifnet_cookie
2292 nstat_provider_cookie_t
*out_cookie
)
2294 const nstat_ifnet_add_param
*param
= (const nstat_ifnet_add_param
*)data
;
2296 boolean_t changed
= FALSE
;
2297 nstat_control_state
*state
;
2299 struct nstat_ifnet_cookie
*cookie
;
2301 if (length
< sizeof(*param
) || param
->threshold
< 1024*1024)
2303 if (nstat_privcheck
!= 0) {
2304 errno_t result
= priv_check_cred(kauth_cred_get(),
2305 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS
, 0);
2309 cookie
= OSMalloc(sizeof(*cookie
), nstat_malloc_tag
);
2312 bzero(cookie
, sizeof(*cookie
));
2314 ifnet_head_lock_shared();
2315 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
)
2317 ifnet_lock_exclusive(ifp
);
2318 if (ifp
->if_index
== param
->ifindex
)
2321 cookie
->threshold
= param
->threshold
;
2322 *out_cookie
= cookie
;
2323 if (!ifp
->if_data_threshold
||
2324 ifp
->if_data_threshold
> param
->threshold
)
2327 ifp
->if_data_threshold
= param
->threshold
;
2329 ifnet_lock_done(ifp
);
2330 ifnet_reference(ifp
);
2333 ifnet_lock_done(ifp
);
2338 * When we change the threshold to something smaller, we notify
2339 * all of our clients with a description message.
2340 * We won't send a message to the client we are currently serving
2341 * because it has no `ifnet source' yet.
2345 lck_mtx_lock(&nstat_mtx
);
2346 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
2348 lck_mtx_lock(&state
->mtx
);
2349 for (src
= state
->ncs_srcs
; src
; src
= src
->next
)
2351 if (src
->provider
!= &nstat_ifnet_provider
)
2353 nstat_control_send_description(state
, src
, 0, 0);
2355 lck_mtx_unlock(&state
->mtx
);
2357 lck_mtx_unlock(&nstat_mtx
);
2359 if (cookie
->ifp
== NULL
)
2360 OSFree(cookie
, sizeof(*cookie
), nstat_malloc_tag
);
2362 return ifp
? 0 : EINVAL
;
2367 nstat_provider_cookie_t cookie
)
2370 struct nstat_ifnet_cookie
*ifcookie
=
2371 (struct nstat_ifnet_cookie
*)cookie
;
2373 ifnet_head_lock_shared();
2374 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
)
2376 if (ifp
== ifcookie
->ifp
)
2386 nstat_provider_cookie_t cookie
,
2387 struct nstat_counts
*out_counts
,
2390 struct nstat_ifnet_cookie
*ifcookie
=
2391 (struct nstat_ifnet_cookie
*)cookie
;
2392 struct ifnet
*ifp
= ifcookie
->ifp
;
2394 if (out_gone
) *out_gone
= 0;
2396 // if the ifnet is gone, we should stop using it
2397 if (nstat_ifnet_gone(cookie
))
2399 if (out_gone
) *out_gone
= 1;
2403 bzero(out_counts
, sizeof(*out_counts
));
2404 out_counts
->nstat_rxpackets
= ifp
->if_ipackets
;
2405 out_counts
->nstat_rxbytes
= ifp
->if_ibytes
;
2406 out_counts
->nstat_txpackets
= ifp
->if_opackets
;
2407 out_counts
->nstat_txbytes
= ifp
->if_obytes
;
2408 out_counts
->nstat_cell_rxbytes
= out_counts
->nstat_cell_txbytes
= 0;
2413 nstat_ifnet_release(
2414 nstat_provider_cookie_t cookie
,
2415 __unused
int locked
)
2417 struct nstat_ifnet_cookie
*ifcookie
;
2419 nstat_control_state
*state
;
2421 uint64_t minthreshold
= UINT64_MAX
;
2424 * Find all the clients that requested a threshold
2425 * for this ifnet and re-calculate if_data_threshold.
2427 lck_mtx_lock(&nstat_mtx
);
2428 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
2430 lck_mtx_lock(&state
->mtx
);
2431 for (src
= state
->ncs_srcs
; src
; src
= src
->next
)
2433 /* Skip the provider we are about to detach. */
2434 if (src
->provider
!= &nstat_ifnet_provider
||
2435 src
->cookie
== cookie
)
2437 ifcookie
= (struct nstat_ifnet_cookie
*)src
->cookie
;
2438 if (ifcookie
->threshold
< minthreshold
)
2439 minthreshold
= ifcookie
->threshold
;
2441 lck_mtx_unlock(&state
->mtx
);
2443 lck_mtx_unlock(&nstat_mtx
);
2445 * Reset if_data_threshold or disable it.
2447 ifcookie
= (struct nstat_ifnet_cookie
*)cookie
;
2448 ifp
= ifcookie
->ifp
;
2449 if (ifnet_is_attached(ifp
, 1)) {
2450 ifnet_lock_exclusive(ifp
);
2451 if (minthreshold
== UINT64_MAX
)
2452 ifp
->if_data_threshold
= 0;
2454 ifp
->if_data_threshold
= minthreshold
;
2455 ifnet_lock_done(ifp
);
2456 ifnet_decr_iorefcnt(ifp
);
2459 OSFree(ifcookie
, sizeof(*ifcookie
), nstat_malloc_tag
);
2463 nstat_ifnet_copy_link_status(
2465 struct nstat_ifnet_descriptor
*desc
)
2467 struct if_link_status
*ifsr
= ifp
->if_link_status
;
2468 nstat_ifnet_desc_link_status
*link_status
= &desc
->link_status
;
2470 link_status
->link_status_type
= NSTAT_IFNET_DESC_LINK_STATUS_TYPE_NONE
;
2474 lck_rw_lock_shared(&ifp
->if_link_status_lock
);
2476 if (ifp
->if_type
== IFT_CELLULAR
) {
2478 nstat_ifnet_desc_cellular_status
*cell_status
= &link_status
->u
.cellular
;
2479 struct if_cellular_status_v1
*if_cell_sr
=
2480 &ifsr
->ifsr_u
.ifsr_cell
.if_cell_u
.if_status_v1
;
2482 if (ifsr
->ifsr_version
!= IF_CELLULAR_STATUS_REPORT_VERSION_1
)
2485 link_status
->link_status_type
= NSTAT_IFNET_DESC_LINK_STATUS_TYPE_CELLULAR
;
2487 if (if_cell_sr
->valid_bitmask
& IF_CELL_LINK_QUALITY_METRIC_VALID
) {
2488 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_LINK_QUALITY_METRIC_VALID
;
2489 cell_status
->link_quality_metric
= if_cell_sr
->link_quality_metric
;
2491 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_EFFECTIVE_BANDWIDTH_VALID
) {
2492 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_BANDWIDTH_VALID
;
2493 cell_status
->ul_effective_bandwidth
= if_cell_sr
->ul_effective_bandwidth
;
2495 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MAX_BANDWIDTH_VALID
) {
2496 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_MAX_BANDWIDTH_VALID
;
2497 cell_status
->ul_max_bandwidth
= if_cell_sr
->ul_max_bandwidth
;
2499 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MIN_LATENCY_VALID
) {
2500 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_MIN_LATENCY_VALID
;
2501 cell_status
->ul_min_latency
= if_cell_sr
->ul_min_latency
;
2503 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_EFFECTIVE_LATENCY_VALID
) {
2504 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_LATENCY_VALID
;
2505 cell_status
->ul_effective_latency
= if_cell_sr
->ul_effective_latency
;
2507 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MAX_LATENCY_VALID
) {
2508 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_MAX_LATENCY_VALID
;
2509 cell_status
->ul_max_latency
= if_cell_sr
->ul_max_latency
;
2511 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_RETXT_LEVEL_VALID
) {
2512 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID
;
2513 if (if_cell_sr
->ul_retxt_level
== IF_CELL_UL_RETXT_LEVEL_NONE
)
2514 cell_status
->ul_retxt_level
= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_NONE
;
2515 else if (if_cell_sr
->ul_retxt_level
== IF_CELL_UL_RETXT_LEVEL_LOW
)
2516 cell_status
->ul_retxt_level
= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_LOW
;
2517 else if (if_cell_sr
->ul_retxt_level
== IF_CELL_UL_RETXT_LEVEL_MEDIUM
)
2518 cell_status
->ul_retxt_level
= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_MEDIUM
;
2519 else if (if_cell_sr
->ul_retxt_level
== IF_CELL_UL_RETXT_LEVEL_HIGH
)
2520 cell_status
->ul_retxt_level
= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_HIGH
;
2522 cell_status
->valid_bitmask
&= ~NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID
;
2524 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_BYTES_LOST_VALID
) {
2525 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_BYTES_LOST_VALID
;
2526 cell_status
->ul_bytes_lost
= if_cell_sr
->ul_bytes_lost
;
2528 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MIN_QUEUE_SIZE_VALID
) {
2529 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_MIN_QUEUE_SIZE_VALID
;
2530 cell_status
->ul_min_queue_size
= if_cell_sr
->ul_min_queue_size
;
2532 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_AVG_QUEUE_SIZE_VALID
) {
2533 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_AVG_QUEUE_SIZE_VALID
;
2534 cell_status
->ul_avg_queue_size
= if_cell_sr
->ul_avg_queue_size
;
2536 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MAX_QUEUE_SIZE_VALID
) {
2537 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_MAX_QUEUE_SIZE_VALID
;
2538 cell_status
->ul_max_queue_size
= if_cell_sr
->ul_max_queue_size
;
2540 if (if_cell_sr
->valid_bitmask
& IF_CELL_DL_EFFECTIVE_BANDWIDTH_VALID
) {
2541 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_DL_EFFECTIVE_BANDWIDTH_VALID
;
2542 cell_status
->dl_effective_bandwidth
= if_cell_sr
->dl_effective_bandwidth
;
2544 if (if_cell_sr
->valid_bitmask
& IF_CELL_DL_MAX_BANDWIDTH_VALID
) {
2545 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_DL_MAX_BANDWIDTH_VALID
;
2546 cell_status
->dl_max_bandwidth
= if_cell_sr
->dl_max_bandwidth
;
2548 if (if_cell_sr
->valid_bitmask
& IF_CELL_CONFIG_INACTIVITY_TIME_VALID
) {
2549 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_CONFIG_INACTIVITY_TIME_VALID
;
2550 cell_status
->config_inactivity_time
= if_cell_sr
->config_inactivity_time
;
2552 if (if_cell_sr
->valid_bitmask
& IF_CELL_CONFIG_BACKOFF_TIME_VALID
) {
2553 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_CONFIG_BACKOFF_TIME_VALID
;
2554 cell_status
->config_backoff_time
= if_cell_sr
->config_backoff_time
;
2556 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MSS_RECOMMENDED_VALID
) {
2557 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_MSS_RECOMMENDED_VALID
;
2558 cell_status
->mss_recommended
= if_cell_sr
->mss_recommended
;
2560 } else if (ifp
->if_subfamily
== IFNET_SUBFAMILY_WIFI
) {
2562 nstat_ifnet_desc_wifi_status
*wifi_status
= &link_status
->u
.wifi
;
2563 struct if_wifi_status_v1
*if_wifi_sr
=
2564 &ifsr
->ifsr_u
.ifsr_wifi
.if_wifi_u
.if_status_v1
;
2566 if (ifsr
->ifsr_version
!= IF_WIFI_STATUS_REPORT_VERSION_1
)
2569 link_status
->link_status_type
= NSTAT_IFNET_DESC_LINK_STATUS_TYPE_WIFI
;
2571 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_LINK_QUALITY_METRIC_VALID
) {
2572 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_LINK_QUALITY_METRIC_VALID
;
2573 wifi_status
->link_quality_metric
= if_wifi_sr
->link_quality_metric
;
2575 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID
) {
2576 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID
;
2577 wifi_status
->ul_effective_bandwidth
= if_wifi_sr
->ul_effective_bandwidth
;
2579 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_MAX_BANDWIDTH_VALID
) {
2580 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_MAX_BANDWIDTH_VALID
;
2581 wifi_status
->ul_max_bandwidth
= if_wifi_sr
->ul_max_bandwidth
;
2583 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_MIN_LATENCY_VALID
) {
2584 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_MIN_LATENCY_VALID
;
2585 wifi_status
->ul_min_latency
= if_wifi_sr
->ul_min_latency
;
2587 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_EFFECTIVE_LATENCY_VALID
) {
2588 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_LATENCY_VALID
;
2589 wifi_status
->ul_effective_latency
= if_wifi_sr
->ul_effective_latency
;
2591 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_MAX_LATENCY_VALID
) {
2592 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_MAX_LATENCY_VALID
;
2593 wifi_status
->ul_max_latency
= if_wifi_sr
->ul_max_latency
;
2595 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_RETXT_LEVEL_VALID
) {
2596 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID
;
2597 if (if_wifi_sr
->ul_retxt_level
== IF_WIFI_UL_RETXT_LEVEL_NONE
)
2598 wifi_status
->ul_retxt_level
= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_NONE
;
2599 else if (if_wifi_sr
->ul_retxt_level
== IF_WIFI_UL_RETXT_LEVEL_LOW
)
2600 wifi_status
->ul_retxt_level
= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_LOW
;
2601 else if (if_wifi_sr
->ul_retxt_level
== IF_WIFI_UL_RETXT_LEVEL_MEDIUM
)
2602 wifi_status
->ul_retxt_level
= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_MEDIUM
;
2603 else if (if_wifi_sr
->ul_retxt_level
== IF_WIFI_UL_RETXT_LEVEL_HIGH
)
2604 wifi_status
->ul_retxt_level
= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_HIGH
;
2606 wifi_status
->valid_bitmask
&= ~NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID
;
2608 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_BYTES_LOST_VALID
) {
2609 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_BYTES_LOST_VALID
;
2610 wifi_status
->ul_bytes_lost
= if_wifi_sr
->ul_bytes_lost
;
2612 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_ERROR_RATE_VALID
) {
2613 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_ERROR_RATE_VALID
;
2614 wifi_status
->ul_error_rate
= if_wifi_sr
->ul_error_rate
;
2616 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID
) {
2617 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID
;
2618 wifi_status
->dl_effective_bandwidth
= if_wifi_sr
->dl_effective_bandwidth
;
2620 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_MAX_BANDWIDTH_VALID
) {
2621 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_MAX_BANDWIDTH_VALID
;
2622 wifi_status
->dl_max_bandwidth
= if_wifi_sr
->dl_max_bandwidth
;
2624 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_MIN_LATENCY_VALID
) {
2625 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_MIN_LATENCY_VALID
;
2626 wifi_status
->dl_min_latency
= if_wifi_sr
->dl_min_latency
;
2628 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_EFFECTIVE_LATENCY_VALID
) {
2629 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_LATENCY_VALID
;
2630 wifi_status
->dl_effective_latency
= if_wifi_sr
->dl_effective_latency
;
2632 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_MAX_LATENCY_VALID
) {
2633 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_MAX_LATENCY_VALID
;
2634 wifi_status
->dl_max_latency
= if_wifi_sr
->dl_max_latency
;
2636 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_ERROR_RATE_VALID
) {
2637 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_ERROR_RATE_VALID
;
2638 wifi_status
->dl_error_rate
= if_wifi_sr
->dl_error_rate
;
2640 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_CONFIG_FREQUENCY_VALID
) {
2641 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID
;
2642 if (if_wifi_sr
->config_frequency
== IF_WIFI_CONFIG_FREQUENCY_2_4_GHZ
)
2643 wifi_status
->config_frequency
= NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_2_4_GHZ
;
2644 else if (if_wifi_sr
->config_frequency
== IF_WIFI_CONFIG_FREQUENCY_5_0_GHZ
)
2645 wifi_status
->config_frequency
= NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_5_0_GHZ
;
2647 wifi_status
->valid_bitmask
&= ~NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID
;
2649 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_CONFIG_MULTICAST_RATE_VALID
) {
2650 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_CONFIG_MULTICAST_RATE_VALID
;
2651 wifi_status
->config_multicast_rate
= if_wifi_sr
->config_multicast_rate
;
2653 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_CONFIG_SCAN_COUNT_VALID
) {
2654 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_COUNT_VALID
;
2655 wifi_status
->scan_count
= if_wifi_sr
->scan_count
;
2657 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_CONFIG_SCAN_DURATION_VALID
) {
2658 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_DURATION_VALID
;
2659 wifi_status
->scan_duration
= if_wifi_sr
->scan_duration
;
2664 lck_rw_done(&ifp
->if_link_status_lock
);
2667 static u_int64_t nstat_ifnet_last_report_time
= 0;
2668 extern int tcp_report_stats_interval
;
2671 nstat_ifnet_compute_percentages(struct if_tcp_ecn_perf_stat
*ifst
)
2673 /* Retransmit percentage */
2674 if (ifst
->total_rxmitpkts
> 0 && ifst
->total_txpkts
> 0) {
2675 /* shift by 10 for precision */
2676 ifst
->rxmit_percent
=
2677 ((ifst
->total_rxmitpkts
<< 10) * 100) / ifst
->total_txpkts
;
2679 ifst
->rxmit_percent
= 0;
2682 /* Out-of-order percentage */
2683 if (ifst
->total_oopkts
> 0 && ifst
->total_rxpkts
> 0) {
2684 /* shift by 10 for precision */
2686 ((ifst
->total_oopkts
<< 10) * 100) / ifst
->total_rxpkts
;
2688 ifst
->oo_percent
= 0;
2691 /* Reorder percentage */
2692 if (ifst
->total_reorderpkts
> 0 &&
2693 (ifst
->total_txpkts
+ ifst
->total_rxpkts
) > 0) {
2694 /* shift by 10 for precision */
2695 ifst
->reorder_percent
=
2696 ((ifst
->total_reorderpkts
<< 10) * 100) /
2697 (ifst
->total_txpkts
+ ifst
->total_rxpkts
);
2699 ifst
->reorder_percent
= 0;
2704 nstat_ifnet_normalize_counter(struct if_tcp_ecn_stat
*if_st
)
2706 u_int64_t ecn_on_conn
, ecn_off_conn
;
2710 ecn_on_conn
= if_st
->ecn_client_success
+
2711 if_st
->ecn_server_success
;
2712 ecn_off_conn
= if_st
->ecn_off_conn
+
2713 (if_st
->ecn_client_setup
- if_st
->ecn_client_success
) +
2714 (if_st
->ecn_server_setup
- if_st
->ecn_server_success
);
2717 * report sack episodes, rst_drop and rxmit_drop
2718 * as a ratio per connection, shift by 10 for precision
2720 if (ecn_on_conn
> 0) {
2721 if_st
->ecn_on
.sack_episodes
=
2722 (if_st
->ecn_on
.sack_episodes
<< 10) / ecn_on_conn
;
2723 if_st
->ecn_on
.rst_drop
=
2724 (if_st
->ecn_on
.rst_drop
<< 10) * 100 / ecn_on_conn
;
2725 if_st
->ecn_on
.rxmit_drop
=
2726 (if_st
->ecn_on
.rxmit_drop
<< 10) * 100 / ecn_on_conn
;
2728 /* set to zero, just in case */
2729 if_st
->ecn_on
.sack_episodes
= 0;
2730 if_st
->ecn_on
.rst_drop
= 0;
2731 if_st
->ecn_on
.rxmit_drop
= 0;
2734 if (ecn_off_conn
> 0) {
2735 if_st
->ecn_off
.sack_episodes
=
2736 (if_st
->ecn_off
.sack_episodes
<< 10) / ecn_off_conn
;
2737 if_st
->ecn_off
.rst_drop
=
2738 (if_st
->ecn_off
.rst_drop
<< 10) * 100 / ecn_off_conn
;
2739 if_st
->ecn_off
.rxmit_drop
=
2740 (if_st
->ecn_off
.rxmit_drop
<< 10) * 100 / ecn_off_conn
;
2742 if_st
->ecn_off
.sack_episodes
= 0;
2743 if_st
->ecn_off
.rst_drop
= 0;
2744 if_st
->ecn_off
.rxmit_drop
= 0;
2746 if_st
->ecn_total_conn
= ecn_off_conn
+ ecn_on_conn
;
2750 nstat_ifnet_report_ecn_stats(void)
2752 u_int64_t uptime
, last_report_time
;
2753 struct nstat_sysinfo_data data
;
2754 struct nstat_sysinfo_ifnet_ecn_stats
*st
;
2757 uptime
= net_uptime();
2759 if ((int)(uptime
- nstat_ifnet_last_report_time
) <
2760 tcp_report_stats_interval
)
2763 last_report_time
= nstat_ifnet_last_report_time
;
2764 nstat_ifnet_last_report_time
= uptime
;
2765 data
.flags
= NSTAT_SYSINFO_IFNET_ECN_STATS
;
2766 st
= &data
.u
.ifnet_ecn_stats
;
2768 ifnet_head_lock_shared();
2769 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
) {
2770 if (ifp
->if_ipv4_stat
== NULL
|| ifp
->if_ipv6_stat
== NULL
)
2773 if ((ifp
->if_refflags
& (IFRF_ATTACHED
| IFRF_DETACHING
)) !=
2777 /* Limit reporting to Wifi, Ethernet and cellular. */
2778 if (!(IFNET_IS_ETHERNET(ifp
) || IFNET_IS_CELLULAR(ifp
)))
2781 bzero(st
, sizeof(*st
));
2782 if (IFNET_IS_CELLULAR(ifp
)) {
2783 st
->ifnet_type
= NSTAT_IFNET_ECN_TYPE_CELLULAR
;
2784 } else if (IFNET_IS_WIFI(ifp
)) {
2785 st
->ifnet_type
= NSTAT_IFNET_ECN_TYPE_WIFI
;
2787 st
->ifnet_type
= NSTAT_IFNET_ECN_TYPE_ETHERNET
;
2789 data
.unsent_data_cnt
= ifp
->if_unsent_data_cnt
;
2790 /* skip if there was no update since last report */
2791 if (ifp
->if_ipv4_stat
->timestamp
<= 0 ||
2792 ifp
->if_ipv4_stat
->timestamp
< last_report_time
)
2794 st
->ifnet_proto
= NSTAT_IFNET_ECN_PROTO_IPV4
;
2795 /* compute percentages using packet counts */
2796 nstat_ifnet_compute_percentages(&ifp
->if_ipv4_stat
->ecn_on
);
2797 nstat_ifnet_compute_percentages(&ifp
->if_ipv4_stat
->ecn_off
);
2798 nstat_ifnet_normalize_counter(ifp
->if_ipv4_stat
);
2799 bcopy(ifp
->if_ipv4_stat
, &st
->ecn_stat
,
2800 sizeof(st
->ecn_stat
));
2801 nstat_sysinfo_send_data(&data
);
2802 bzero(ifp
->if_ipv4_stat
, sizeof(*ifp
->if_ipv4_stat
));
2805 /* skip if there was no update since last report */
2806 if (ifp
->if_ipv6_stat
->timestamp
<= 0 ||
2807 ifp
->if_ipv6_stat
->timestamp
< last_report_time
)
2809 st
->ifnet_proto
= NSTAT_IFNET_ECN_PROTO_IPV6
;
2811 /* compute percentages using packet counts */
2812 nstat_ifnet_compute_percentages(&ifp
->if_ipv6_stat
->ecn_on
);
2813 nstat_ifnet_compute_percentages(&ifp
->if_ipv6_stat
->ecn_off
);
2814 nstat_ifnet_normalize_counter(ifp
->if_ipv6_stat
);
2815 bcopy(ifp
->if_ipv6_stat
, &st
->ecn_stat
,
2816 sizeof(st
->ecn_stat
));
2817 nstat_sysinfo_send_data(&data
);
2819 /* Zero the stats in ifp */
2820 bzero(ifp
->if_ipv6_stat
, sizeof(*ifp
->if_ipv6_stat
));
2827 nstat_ifnet_copy_descriptor(
2828 nstat_provider_cookie_t cookie
,
2832 nstat_ifnet_descriptor
*desc
= (nstat_ifnet_descriptor
*)data
;
2833 struct nstat_ifnet_cookie
*ifcookie
=
2834 (struct nstat_ifnet_cookie
*)cookie
;
2835 struct ifnet
*ifp
= ifcookie
->ifp
;
2837 if (len
< sizeof(nstat_ifnet_descriptor
))
2840 if (nstat_ifnet_gone(cookie
))
2843 bzero(desc
, sizeof(*desc
));
2844 ifnet_lock_shared(ifp
);
2845 strlcpy(desc
->name
, ifp
->if_xname
, sizeof(desc
->name
));
2846 desc
->ifindex
= ifp
->if_index
;
2847 desc
->threshold
= ifp
->if_data_threshold
;
2848 desc
->type
= ifp
->if_type
;
2849 if (ifp
->if_desc
.ifd_len
< sizeof(desc
->description
))
2850 memcpy(desc
->description
, ifp
->if_desc
.ifd_desc
,
2851 sizeof(desc
->description
));
2852 nstat_ifnet_copy_link_status(ifp
, desc
);
2853 ifnet_lock_done(ifp
);
2858 nstat_init_ifnet_provider(void)
2860 bzero(&nstat_ifnet_provider
, sizeof(nstat_ifnet_provider
));
2861 nstat_ifnet_provider
.nstat_provider_id
= NSTAT_PROVIDER_IFNET
;
2862 nstat_ifnet_provider
.nstat_descriptor_length
= sizeof(nstat_ifnet_descriptor
);
2863 nstat_ifnet_provider
.nstat_lookup
= nstat_ifnet_lookup
;
2864 nstat_ifnet_provider
.nstat_gone
= nstat_ifnet_gone
;
2865 nstat_ifnet_provider
.nstat_counts
= nstat_ifnet_counts
;
2866 nstat_ifnet_provider
.nstat_watcher_add
= NULL
;
2867 nstat_ifnet_provider
.nstat_watcher_remove
= NULL
;
2868 nstat_ifnet_provider
.nstat_copy_descriptor
= nstat_ifnet_copy_descriptor
;
2869 nstat_ifnet_provider
.nstat_release
= nstat_ifnet_release
;
2870 nstat_ifnet_provider
.next
= nstat_providers
;
2871 nstat_providers
= &nstat_ifnet_provider
;
2874 __private_extern__
void
2875 nstat_ifnet_threshold_reached(unsigned int ifindex
)
2877 nstat_control_state
*state
;
2880 struct nstat_ifnet_cookie
*ifcookie
;
2882 lck_mtx_lock(&nstat_mtx
);
2883 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
2885 lck_mtx_lock(&state
->mtx
);
2886 for (src
= state
->ncs_srcs
; src
; src
= src
->next
)
2888 if (src
->provider
!= &nstat_ifnet_provider
)
2890 ifcookie
= (struct nstat_ifnet_cookie
*)src
->cookie
;
2891 ifp
= ifcookie
->ifp
;
2892 if (ifp
->if_index
!= ifindex
)
2894 nstat_control_send_counts(state
, src
, 0, 0, NULL
);
2896 lck_mtx_unlock(&state
->mtx
);
2898 lck_mtx_unlock(&nstat_mtx
);
2901 #pragma mark -- Sysinfo --
2903 nstat_set_keyval_scalar(nstat_sysinfo_keyval
*kv
, int key
, u_int32_t val
)
2905 kv
->nstat_sysinfo_key
= key
;
2906 kv
->nstat_sysinfo_flags
= NSTAT_SYSINFO_FLAG_SCALAR
;
2907 kv
->u
.nstat_sysinfo_scalar
= val
;
2911 nstat_sysinfo_send_data_internal(
2912 nstat_control_state
*control
,
2913 nstat_sysinfo_data
*data
)
2915 nstat_msg_sysinfo_counts
*syscnt
= NULL
;
2916 size_t allocsize
= 0, countsize
= 0, nkeyvals
= 0, finalsize
= 0;
2917 nstat_sysinfo_keyval
*kv
;
2921 allocsize
= offsetof(nstat_msg_sysinfo_counts
, counts
);
2922 countsize
= offsetof(nstat_sysinfo_counts
, nstat_sysinfo_keyvals
);
2923 finalsize
= allocsize
;
2925 /* get number of key-vals for each kind of stat */
2926 switch (data
->flags
)
2928 case NSTAT_SYSINFO_MBUF_STATS
:
2929 nkeyvals
= sizeof(struct nstat_sysinfo_mbuf_stats
) /
2932 case NSTAT_SYSINFO_TCP_STATS
:
2933 nkeyvals
= sizeof(struct nstat_sysinfo_tcp_stats
) /
2936 case NSTAT_SYSINFO_IFNET_ECN_STATS
:
2937 nkeyvals
= (sizeof(struct if_tcp_ecn_stat
) /
2940 /* Two more keys for ifnet type and proto */
2943 /* One key for unsent data. */
2949 countsize
+= sizeof(nstat_sysinfo_keyval
) * nkeyvals
;
2950 allocsize
+= countsize
;
2952 syscnt
= OSMalloc(allocsize
, nstat_malloc_tag
);
2955 bzero(syscnt
, allocsize
);
2957 kv
= (nstat_sysinfo_keyval
*) &syscnt
->counts
.nstat_sysinfo_keyvals
;
2958 switch (data
->flags
)
2960 case NSTAT_SYSINFO_MBUF_STATS
:
2962 nstat_set_keyval_scalar(&kv
[i
++],
2963 NSTAT_SYSINFO_KEY_MBUF_256B_TOTAL
,
2964 data
->u
.mb_stats
.total_256b
);
2965 nstat_set_keyval_scalar(&kv
[i
++],
2966 NSTAT_SYSINFO_KEY_MBUF_2KB_TOTAL
,
2967 data
->u
.mb_stats
.total_2kb
);
2968 nstat_set_keyval_scalar(&kv
[i
++],
2969 NSTAT_SYSINFO_KEY_MBUF_4KB_TOTAL
,
2970 data
->u
.mb_stats
.total_4kb
);
2971 nstat_set_keyval_scalar(&kv
[i
++],
2972 NSTAT_SYSINFO_MBUF_16KB_TOTAL
,
2973 data
->u
.mb_stats
.total_16kb
);
2974 nstat_set_keyval_scalar(&kv
[i
++],
2975 NSTAT_SYSINFO_KEY_SOCK_MBCNT
,
2976 data
->u
.mb_stats
.sbmb_total
);
2977 nstat_set_keyval_scalar(&kv
[i
++],
2978 NSTAT_SYSINFO_KEY_SOCK_ATMBLIMIT
,
2979 data
->u
.mb_stats
.sb_atmbuflimit
);
2980 nstat_set_keyval_scalar(&kv
[i
++],
2981 NSTAT_SYSINFO_MBUF_DRAIN_CNT
,
2982 data
->u
.mb_stats
.draincnt
);
2983 nstat_set_keyval_scalar(&kv
[i
++],
2984 NSTAT_SYSINFO_MBUF_MEM_RELEASED
,
2985 data
->u
.mb_stats
.memreleased
);
2986 nstat_set_keyval_scalar(&kv
[i
++],
2987 NSTAT_SYSINFO_KEY_SOCK_MBFLOOR
,
2988 data
->u
.mb_stats
.sbmb_floor
);
2989 VERIFY(i
== nkeyvals
);
2992 case NSTAT_SYSINFO_TCP_STATS
:
2994 nstat_set_keyval_scalar(&kv
[i
++],
2995 NSTAT_SYSINFO_KEY_IPV4_AVGRTT
,
2996 data
->u
.tcp_stats
.ipv4_avgrtt
);
2997 nstat_set_keyval_scalar(&kv
[i
++],
2998 NSTAT_SYSINFO_KEY_IPV6_AVGRTT
,
2999 data
->u
.tcp_stats
.ipv6_avgrtt
);
3000 nstat_set_keyval_scalar(&kv
[i
++],
3001 NSTAT_SYSINFO_KEY_SEND_PLR
,
3002 data
->u
.tcp_stats
.send_plr
);
3003 nstat_set_keyval_scalar(&kv
[i
++],
3004 NSTAT_SYSINFO_KEY_RECV_PLR
,
3005 data
->u
.tcp_stats
.recv_plr
);
3006 nstat_set_keyval_scalar(&kv
[i
++],
3007 NSTAT_SYSINFO_KEY_SEND_TLRTO
,
3008 data
->u
.tcp_stats
.send_tlrto_rate
);
3009 nstat_set_keyval_scalar(&kv
[i
++],
3010 NSTAT_SYSINFO_KEY_SEND_REORDERRATE
,
3011 data
->u
.tcp_stats
.send_reorder_rate
);
3012 nstat_set_keyval_scalar(&kv
[i
++],
3013 NSTAT_SYSINFO_CONNECTION_ATTEMPTS
,
3014 data
->u
.tcp_stats
.connection_attempts
);
3015 nstat_set_keyval_scalar(&kv
[i
++],
3016 NSTAT_SYSINFO_CONNECTION_ACCEPTS
,
3017 data
->u
.tcp_stats
.connection_accepts
);
3018 nstat_set_keyval_scalar(&kv
[i
++],
3019 NSTAT_SYSINFO_ECN_CLIENT_ENABLED
,
3020 data
->u
.tcp_stats
.ecn_client_enabled
);
3021 nstat_set_keyval_scalar(&kv
[i
++],
3022 NSTAT_SYSINFO_ECN_SERVER_ENABLED
,
3023 data
->u
.tcp_stats
.ecn_server_enabled
);
3024 nstat_set_keyval_scalar(&kv
[i
++],
3025 NSTAT_SYSINFO_ECN_CLIENT_SETUP
,
3026 data
->u
.tcp_stats
.ecn_client_setup
);
3027 nstat_set_keyval_scalar(&kv
[i
++],
3028 NSTAT_SYSINFO_ECN_SERVER_SETUP
,
3029 data
->u
.tcp_stats
.ecn_server_setup
);
3030 nstat_set_keyval_scalar(&kv
[i
++],
3031 NSTAT_SYSINFO_ECN_CLIENT_SUCCESS
,
3032 data
->u
.tcp_stats
.ecn_client_success
);
3033 nstat_set_keyval_scalar(&kv
[i
++],
3034 NSTAT_SYSINFO_ECN_SERVER_SUCCESS
,
3035 data
->u
.tcp_stats
.ecn_server_success
);
3036 nstat_set_keyval_scalar(&kv
[i
++],
3037 NSTAT_SYSINFO_ECN_NOT_SUPPORTED
,
3038 data
->u
.tcp_stats
.ecn_not_supported
);
3039 nstat_set_keyval_scalar(&kv
[i
++],
3040 NSTAT_SYSINFO_ECN_LOST_SYN
,
3041 data
->u
.tcp_stats
.ecn_lost_syn
);
3042 nstat_set_keyval_scalar(&kv
[i
++],
3043 NSTAT_SYSINFO_ECN_LOST_SYNACK
,
3044 data
->u
.tcp_stats
.ecn_lost_synack
);
3045 nstat_set_keyval_scalar(&kv
[i
++],
3046 NSTAT_SYSINFO_ECN_RECV_CE
,
3047 data
->u
.tcp_stats
.ecn_recv_ce
);
3048 nstat_set_keyval_scalar(&kv
[i
++],
3049 NSTAT_SYSINFO_ECN_RECV_ECE
,
3050 data
->u
.tcp_stats
.ecn_recv_ece
);
3051 nstat_set_keyval_scalar(&kv
[i
++],
3052 NSTAT_SYSINFO_ECN_SENT_ECE
,
3053 data
->u
.tcp_stats
.ecn_sent_ece
);
3054 nstat_set_keyval_scalar(&kv
[i
++],
3055 NSTAT_SYSINFO_ECN_CONN_RECV_CE
,
3056 data
->u
.tcp_stats
.ecn_conn_recv_ce
);
3057 nstat_set_keyval_scalar(&kv
[i
++],
3058 NSTAT_SYSINFO_ECN_CONN_RECV_ECE
,
3059 data
->u
.tcp_stats
.ecn_conn_recv_ece
);
3060 nstat_set_keyval_scalar(&kv
[i
++],
3061 NSTAT_SYSINFO_ECN_CONN_PLNOCE
,
3062 data
->u
.tcp_stats
.ecn_conn_plnoce
);
3063 nstat_set_keyval_scalar(&kv
[i
++],
3064 NSTAT_SYSINFO_ECN_CONN_PL_CE
,
3065 data
->u
.tcp_stats
.ecn_conn_pl_ce
);
3066 nstat_set_keyval_scalar(&kv
[i
++],
3067 NSTAT_SYSINFO_ECN_CONN_NOPL_CE
,
3068 data
->u
.tcp_stats
.ecn_conn_nopl_ce
);
3069 nstat_set_keyval_scalar(&kv
[i
++],
3070 NSTAT_SYSINFO_ECN_FALLBACK_SYNLOSS
,
3071 data
->u
.tcp_stats
.ecn_fallback_synloss
);
3072 nstat_set_keyval_scalar(&kv
[i
++],
3073 NSTAT_SYSINFO_ECN_FALLBACK_REORDER
,
3074 data
->u
.tcp_stats
.ecn_fallback_reorder
);
3075 nstat_set_keyval_scalar(&kv
[i
++],
3076 NSTAT_SYSINFO_ECN_FALLBACK_CE
,
3077 data
->u
.tcp_stats
.ecn_fallback_ce
);
3078 nstat_set_keyval_scalar(&kv
[i
++],
3079 NSTAT_SYSINFO_TFO_SYN_DATA_RCV
,
3080 data
->u
.tcp_stats
.tfo_syn_data_rcv
);
3081 nstat_set_keyval_scalar(&kv
[i
++],
3082 NSTAT_SYSINFO_TFO_COOKIE_REQ_RCV
,
3083 data
->u
.tcp_stats
.tfo_cookie_req_rcv
);
3084 nstat_set_keyval_scalar(&kv
[i
++],
3085 NSTAT_SYSINFO_TFO_COOKIE_SENT
,
3086 data
->u
.tcp_stats
.tfo_cookie_sent
);
3087 nstat_set_keyval_scalar(&kv
[i
++],
3088 NSTAT_SYSINFO_TFO_COOKIE_INVALID
,
3089 data
->u
.tcp_stats
.tfo_cookie_invalid
);
3090 nstat_set_keyval_scalar(&kv
[i
++],
3091 NSTAT_SYSINFO_TFO_COOKIE_REQ
,
3092 data
->u
.tcp_stats
.tfo_cookie_req
);
3093 nstat_set_keyval_scalar(&kv
[i
++],
3094 NSTAT_SYSINFO_TFO_COOKIE_RCV
,
3095 data
->u
.tcp_stats
.tfo_cookie_rcv
);
3096 nstat_set_keyval_scalar(&kv
[i
++],
3097 NSTAT_SYSINFO_TFO_SYN_DATA_SENT
,
3098 data
->u
.tcp_stats
.tfo_syn_data_sent
);
3099 nstat_set_keyval_scalar(&kv
[i
++],
3100 NSTAT_SYSINFO_TFO_SYN_DATA_ACKED
,
3101 data
->u
.tcp_stats
.tfo_syn_data_acked
);
3102 nstat_set_keyval_scalar(&kv
[i
++],
3103 NSTAT_SYSINFO_TFO_SYN_LOSS
,
3104 data
->u
.tcp_stats
.tfo_syn_loss
);
3105 nstat_set_keyval_scalar(&kv
[i
++],
3106 NSTAT_SYSINFO_TFO_BLACKHOLE
,
3107 data
->u
.tcp_stats
.tfo_blackhole
);
3108 nstat_set_keyval_scalar(&kv
[i
++],
3109 NSTAT_SYSINFO_TFO_COOKIE_WRONG
,
3110 data
->u
.tcp_stats
.tfo_cookie_wrong
);
3111 nstat_set_keyval_scalar(&kv
[i
++],
3112 NSTAT_SYSINFO_TFO_NO_COOKIE_RCV
,
3113 data
->u
.tcp_stats
.tfo_no_cookie_rcv
);
3114 nstat_set_keyval_scalar(&kv
[i
++],
3115 NSTAT_SYSINFO_TFO_HEURISTICS_DISABLE
,
3116 data
->u
.tcp_stats
.tfo_heuristics_disable
);
3117 nstat_set_keyval_scalar(&kv
[i
++],
3118 NSTAT_SYSINFO_TFO_SEND_BLACKHOLE
,
3119 data
->u
.tcp_stats
.tfo_sndblackhole
);
3120 VERIFY(i
== nkeyvals
);
3123 case NSTAT_SYSINFO_IFNET_ECN_STATS
:
3125 nstat_set_keyval_scalar(&kv
[i
++],
3126 NSTAT_SYSINFO_ECN_IFNET_TYPE
,
3127 data
->u
.ifnet_ecn_stats
.ifnet_type
);
3128 nstat_set_keyval_scalar(&kv
[i
++],
3129 NSTAT_SYSINFO_ECN_IFNET_PROTO
,
3130 data
->u
.ifnet_ecn_stats
.ifnet_proto
);
3131 nstat_set_keyval_scalar(&kv
[i
++],
3132 NSTAT_SYSINFO_ECN_IFNET_CLIENT_SETUP
,
3133 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_client_setup
);
3134 nstat_set_keyval_scalar(&kv
[i
++],
3135 NSTAT_SYSINFO_ECN_IFNET_SERVER_SETUP
,
3136 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_server_setup
);
3137 nstat_set_keyval_scalar(&kv
[i
++],
3138 NSTAT_SYSINFO_ECN_IFNET_CLIENT_SUCCESS
,
3139 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_client_success
);
3140 nstat_set_keyval_scalar(&kv
[i
++],
3141 NSTAT_SYSINFO_ECN_IFNET_SERVER_SUCCESS
,
3142 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_server_success
);
3143 nstat_set_keyval_scalar(&kv
[i
++],
3144 NSTAT_SYSINFO_ECN_IFNET_PEER_NOSUPPORT
,
3145 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_peer_nosupport
);
3146 nstat_set_keyval_scalar(&kv
[i
++],
3147 NSTAT_SYSINFO_ECN_IFNET_SYN_LOST
,
3148 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_syn_lost
);
3149 nstat_set_keyval_scalar(&kv
[i
++],
3150 NSTAT_SYSINFO_ECN_IFNET_SYNACK_LOST
,
3151 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_synack_lost
);
3152 nstat_set_keyval_scalar(&kv
[i
++],
3153 NSTAT_SYSINFO_ECN_IFNET_RECV_CE
,
3154 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_recv_ce
);
3155 nstat_set_keyval_scalar(&kv
[i
++],
3156 NSTAT_SYSINFO_ECN_IFNET_RECV_ECE
,
3157 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_recv_ece
);
3158 nstat_set_keyval_scalar(&kv
[i
++],
3159 NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_CE
,
3160 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_conn_recv_ce
);
3161 nstat_set_keyval_scalar(&kv
[i
++],
3162 NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_ECE
,
3163 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_conn_recv_ece
);
3164 nstat_set_keyval_scalar(&kv
[i
++],
3165 NSTAT_SYSINFO_ECN_IFNET_CONN_PLNOCE
,
3166 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_conn_plnoce
);
3167 nstat_set_keyval_scalar(&kv
[i
++],
3168 NSTAT_SYSINFO_ECN_IFNET_CONN_PLCE
,
3169 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_conn_plce
);
3170 nstat_set_keyval_scalar(&kv
[i
++],
3171 NSTAT_SYSINFO_ECN_IFNET_CONN_NOPLCE
,
3172 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_conn_noplce
);
3173 nstat_set_keyval_scalar(&kv
[i
++],
3174 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNLOSS
,
3175 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_fallback_synloss
);
3176 nstat_set_keyval_scalar(&kv
[i
++],
3177 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_REORDER
,
3178 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_fallback_reorder
);
3179 nstat_set_keyval_scalar(&kv
[i
++],
3180 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_CE
,
3181 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_fallback_ce
);
3182 nstat_set_keyval_scalar(&kv
[i
++],
3183 NSTAT_SYSINFO_ECN_IFNET_ON_RTT_AVG
,
3184 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.rtt_avg
);
3185 nstat_set_keyval_scalar(&kv
[i
++],
3186 NSTAT_SYSINFO_ECN_IFNET_ON_RTT_VAR
,
3187 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.rtt_var
);
3188 nstat_set_keyval_scalar(&kv
[i
++],
3189 NSTAT_SYSINFO_ECN_IFNET_ON_OOPERCENT
,
3190 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.oo_percent
);
3191 nstat_set_keyval_scalar(&kv
[i
++],
3192 NSTAT_SYSINFO_ECN_IFNET_ON_SACK_EPISODE
,
3193 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.sack_episodes
);
3194 nstat_set_keyval_scalar(&kv
[i
++],
3195 NSTAT_SYSINFO_ECN_IFNET_ON_REORDER_PERCENT
,
3196 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.reorder_percent
);
3197 nstat_set_keyval_scalar(&kv
[i
++],
3198 NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_PERCENT
,
3199 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.rxmit_percent
);
3200 nstat_set_keyval_scalar(&kv
[i
++],
3201 NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_DROP
,
3202 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.rxmit_drop
);
3203 nstat_set_keyval_scalar(&kv
[i
++],
3204 NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_AVG
,
3205 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.rtt_avg
);
3206 nstat_set_keyval_scalar(&kv
[i
++],
3207 NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_VAR
,
3208 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.rtt_var
);
3209 nstat_set_keyval_scalar(&kv
[i
++],
3210 NSTAT_SYSINFO_ECN_IFNET_OFF_OOPERCENT
,
3211 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.oo_percent
);
3212 nstat_set_keyval_scalar(&kv
[i
++],
3213 NSTAT_SYSINFO_ECN_IFNET_OFF_SACK_EPISODE
,
3214 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.sack_episodes
);
3215 nstat_set_keyval_scalar(&kv
[i
++],
3216 NSTAT_SYSINFO_ECN_IFNET_OFF_REORDER_PERCENT
,
3217 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.reorder_percent
);
3218 nstat_set_keyval_scalar(&kv
[i
++],
3219 NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_PERCENT
,
3220 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.rxmit_percent
);
3221 nstat_set_keyval_scalar(&kv
[i
++],
3222 NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_DROP
,
3223 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.rxmit_drop
);
3224 nstat_set_keyval_scalar(&kv
[i
++],
3225 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_TXPKTS
,
3226 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.total_txpkts
);
3227 nstat_set_keyval_scalar(&kv
[i
++],
3228 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXMTPKTS
,
3229 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.total_rxmitpkts
);
3230 nstat_set_keyval_scalar(&kv
[i
++],
3231 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXPKTS
,
3232 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.total_rxpkts
);
3233 nstat_set_keyval_scalar(&kv
[i
++],
3234 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_OOPKTS
,
3235 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.total_oopkts
);
3236 nstat_set_keyval_scalar(&kv
[i
++],
3237 NSTAT_SYSINFO_ECN_IFNET_ON_DROP_RST
,
3238 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.rst_drop
);
3239 nstat_set_keyval_scalar(&kv
[i
++],
3240 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_TXPKTS
,
3241 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.total_txpkts
);
3242 nstat_set_keyval_scalar(&kv
[i
++],
3243 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXMTPKTS
,
3244 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.total_rxmitpkts
);
3245 nstat_set_keyval_scalar(&kv
[i
++],
3246 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXPKTS
,
3247 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.total_rxpkts
);
3248 nstat_set_keyval_scalar(&kv
[i
++],
3249 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_OOPKTS
,
3250 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.total_oopkts
);
3251 nstat_set_keyval_scalar(&kv
[i
++],
3252 NSTAT_SYSINFO_ECN_IFNET_OFF_DROP_RST
,
3253 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.rst_drop
);
3254 nstat_set_keyval_scalar(&kv
[i
++],
3255 NSTAT_SYSINFO_ECN_IFNET_TOTAL_CONN
,
3256 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_total_conn
);
3257 nstat_set_keyval_scalar(&kv
[i
++],
3258 NSTAT_SYSINFO_IFNET_UNSENT_DATA
,
3259 data
->unsent_data_cnt
);
3260 nstat_set_keyval_scalar(&kv
[i
++],
3261 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRST
,
3262 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_fallback_droprst
);
3263 nstat_set_keyval_scalar(&kv
[i
++],
3264 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRXMT
,
3265 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_fallback_droprxmt
);
3271 VERIFY(i
> 0 && i
<= nkeyvals
);
3272 countsize
= offsetof(nstat_sysinfo_counts
,
3273 nstat_sysinfo_keyvals
) +
3274 sizeof(nstat_sysinfo_keyval
) * i
;
3275 finalsize
+= countsize
;
3276 syscnt
->hdr
.type
= NSTAT_MSG_TYPE_SYSINFO_COUNTS
;
3277 syscnt
->hdr
.length
= finalsize
;
3278 syscnt
->counts
.nstat_sysinfo_len
= countsize
;
3280 result
= ctl_enqueuedata(control
->ncs_kctl
,
3281 control
->ncs_unit
, syscnt
, finalsize
, CTL_DATA_EOR
);
3284 nstat_stats
.nstat_sysinfofailures
+= 1;
3286 OSFree(syscnt
, allocsize
, nstat_malloc_tag
);
3291 __private_extern__
void
3292 nstat_sysinfo_send_data(
3293 nstat_sysinfo_data
*data
)
3295 nstat_control_state
*control
;
3297 lck_mtx_lock(&nstat_mtx
);
3298 for (control
= nstat_controls
; control
; control
= control
->ncs_next
)
3300 lck_mtx_lock(&control
->mtx
);
3301 if ((control
->ncs_flags
& NSTAT_FLAG_SYSINFO_SUBSCRIBED
) != 0)
3303 nstat_sysinfo_send_data_internal(control
, data
);
3305 lck_mtx_unlock(&control
->mtx
);
3307 lck_mtx_unlock(&nstat_mtx
);
3311 nstat_sysinfo_generate_report(void)
3313 mbuf_report_peak_usage();
3315 nstat_ifnet_report_ecn_stats();
3318 #pragma mark -- Kernel Control Socket --
3320 static kern_ctl_ref nstat_ctlref
= NULL
;
3321 static lck_grp_t
*nstat_lck_grp
= NULL
;
3323 static errno_t
nstat_control_connect(kern_ctl_ref kctl
, struct sockaddr_ctl
*sac
, void **uinfo
);
3324 static errno_t
nstat_control_disconnect(kern_ctl_ref kctl
, u_int32_t unit
, void *uinfo
);
3325 static errno_t
nstat_control_send(kern_ctl_ref kctl
, u_int32_t unit
, void *uinfo
, mbuf_t m
, int flags
);
3328 nstat_enqueue_success(
3330 nstat_control_state
*state
,
3333 nstat_msg_hdr success
;
3336 bzero(&success
, sizeof(success
));
3337 success
.context
= context
;
3338 success
.type
= NSTAT_MSG_TYPE_SUCCESS
;
3339 success
.length
= sizeof(success
);
3340 success
.flags
= flags
;
3341 result
= ctl_enqueuedata(state
->ncs_kctl
, state
->ncs_unit
, &success
,
3342 sizeof(success
), CTL_DATA_EOR
| CTL_DATA_CRIT
);
3344 if (nstat_debug
!= 0)
3345 printf("%s: could not enqueue success message %d\n",
3347 nstat_stats
.nstat_successmsgfailures
+= 1;
3353 nstat_control_send_goodbye(
3354 nstat_control_state
*state
,
3360 if (nstat_control_reporting_allowed(state
, src
))
3362 if ((state
->ncs_flags
& NSTAT_FLAG_SUPPORTS_UPDATES
) != 0)
3364 result
= nstat_control_send_update(state
, src
, 0, NSTAT_MSG_HDR_FLAG_CLOSING
, NULL
);
3368 if (nstat_debug
!= 0)
3369 printf("%s - nstat_control_send_update() %d\n", __func__
, result
);
3374 // send one last counts notification
3375 result
= nstat_control_send_counts(state
, src
, 0, NSTAT_MSG_HDR_FLAG_CLOSING
, NULL
);
3379 if (nstat_debug
!= 0)
3380 printf("%s - nstat_control_send_counts() %d\n", __func__
, result
);
3383 // send a last description
3384 result
= nstat_control_send_description(state
, src
, 0, NSTAT_MSG_HDR_FLAG_CLOSING
);
3388 if (nstat_debug
!= 0)
3389 printf("%s - nstat_control_send_description() %d\n", __func__
, result
);
3394 // send the source removed notification
3395 result
= nstat_control_send_removed(state
, src
);
3396 if (result
!= 0 && nstat_debug
)
3399 if (nstat_debug
!= 0)
3400 printf("%s - nstat_control_send_removed() %d\n", __func__
, result
);
3404 nstat_stats
.nstat_control_send_goodbye_failures
++;
3411 nstat_flush_accumulated_msgs(
3412 nstat_control_state
*state
)
3415 if (state
->ncs_accumulated
!= NULL
&& mbuf_len(state
->ncs_accumulated
) > 0)
3417 mbuf_pkthdr_setlen(state
->ncs_accumulated
, mbuf_len(state
->ncs_accumulated
));
3418 result
= ctl_enqueuembuf(state
->ncs_kctl
, state
->ncs_unit
, state
->ncs_accumulated
, CTL_DATA_EOR
);
3421 nstat_stats
.nstat_flush_accumulated_msgs_failures
++;
3422 if (nstat_debug
!= 0)
3423 printf("%s - ctl_enqueuembuf failed: %d\n", __func__
, result
);
3424 mbuf_freem(state
->ncs_accumulated
);
3426 state
->ncs_accumulated
= NULL
;
3432 nstat_accumulate_msg(
3433 nstat_control_state
*state
,
3437 if (state
->ncs_accumulated
&& mbuf_trailingspace(state
->ncs_accumulated
) < length
)
3439 // Will send the current mbuf
3440 nstat_flush_accumulated_msgs(state
);
3445 if (state
->ncs_accumulated
== NULL
)
3447 unsigned int one
= 1;
3448 if (mbuf_allocpacket(MBUF_DONTWAIT
, NSTAT_MAX_MSG_SIZE
, &one
, &state
->ncs_accumulated
) != 0)
3450 if (nstat_debug
!= 0)
3451 printf("%s - mbuf_allocpacket failed\n", __func__
);
3456 mbuf_setlen(state
->ncs_accumulated
, 0);
3462 hdr
->length
= length
;
3463 result
= mbuf_copyback(state
->ncs_accumulated
, mbuf_len(state
->ncs_accumulated
),
3464 length
, hdr
, MBUF_DONTWAIT
);
3469 nstat_flush_accumulated_msgs(state
);
3470 if (nstat_debug
!= 0)
3471 printf("%s - resorting to ctl_enqueuedata\n", __func__
);
3472 result
= ctl_enqueuedata(state
->ncs_kctl
, state
->ncs_unit
, hdr
, length
, CTL_DATA_EOR
);
3476 nstat_stats
.nstat_accumulate_msg_failures
++;
3483 __unused thread_call_param_t p0
,
3484 __unused thread_call_param_t p1
)
3486 lck_mtx_lock(&nstat_mtx
);
3488 nstat_idle_time
= 0;
3490 nstat_control_state
*control
;
3491 nstat_src
*dead
= NULL
;
3492 nstat_src
*dead_list
= NULL
;
3493 for (control
= nstat_controls
; control
; control
= control
->ncs_next
)
3495 lck_mtx_lock(&control
->mtx
);
3496 nstat_src
**srcpp
= &control
->ncs_srcs
;
3498 if (!(control
->ncs_flags
& NSTAT_FLAG_REQCOUNTS
))
3500 while(*srcpp
!= NULL
)
3502 if ((*srcpp
)->provider
->nstat_gone((*srcpp
)->cookie
))
3506 // Pull it off the list
3508 *srcpp
= (*srcpp
)->next
;
3510 result
= nstat_control_send_goodbye(control
, dead
);
3512 // Put this on the list to release later
3513 dead
->next
= dead_list
;
3518 srcpp
= &(*srcpp
)->next
;
3522 control
->ncs_flags
&= ~NSTAT_FLAG_REQCOUNTS
;
3523 lck_mtx_unlock(&control
->mtx
);
3528 clock_interval_to_deadline(60, NSEC_PER_SEC
, &nstat_idle_time
);
3529 thread_call_func_delayed((thread_call_func_t
)nstat_idle_check
, NULL
, nstat_idle_time
);
3532 lck_mtx_unlock(&nstat_mtx
);
3534 /* Generate any system level reports, if needed */
3535 nstat_sysinfo_generate_report();
3537 // Release the sources now that we aren't holding lots of locks
3541 dead_list
= dead
->next
;
3543 nstat_control_cleanup_source(NULL
, dead
, FALSE
);
3550 nstat_control_register(void)
3552 // Create our lock group first
3553 lck_grp_attr_t
*grp_attr
= lck_grp_attr_alloc_init();
3554 lck_grp_attr_setdefault(grp_attr
);
3555 nstat_lck_grp
= lck_grp_alloc_init("network statistics kctl", grp_attr
);
3556 lck_grp_attr_free(grp_attr
);
3558 lck_mtx_init(&nstat_mtx
, nstat_lck_grp
, NULL
);
3560 // Register the control
3561 struct kern_ctl_reg nstat_control
;
3562 bzero(&nstat_control
, sizeof(nstat_control
));
3563 strlcpy(nstat_control
.ctl_name
, NET_STAT_CONTROL_NAME
, sizeof(nstat_control
.ctl_name
));
3564 nstat_control
.ctl_flags
= CTL_FLAG_REG_EXTENDED
| CTL_FLAG_REG_CRIT
;
3565 nstat_control
.ctl_sendsize
= nstat_sendspace
;
3566 nstat_control
.ctl_recvsize
= nstat_recvspace
;
3567 nstat_control
.ctl_connect
= nstat_control_connect
;
3568 nstat_control
.ctl_disconnect
= nstat_control_disconnect
;
3569 nstat_control
.ctl_send
= nstat_control_send
;
3571 ctl_register(&nstat_control
, &nstat_ctlref
);
3575 nstat_control_cleanup_source(
3576 nstat_control_state
*state
,
3577 struct nstat_src
*src
,
3584 result
= nstat_control_send_removed(state
, src
);
3587 nstat_stats
.nstat_control_cleanup_source_failures
++;
3588 if (nstat_debug
!= 0)
3589 printf("%s - nstat_control_send_removed() %d\n",
3593 // Cleanup the source if we found it.
3594 src
->provider
->nstat_release(src
->cookie
, locked
);
3595 OSFree(src
, sizeof(*src
), nstat_malloc_tag
);
3600 nstat_control_reporting_allowed(
3601 nstat_control_state
*state
,
3604 if (src
->provider
->nstat_reporting_allowed
== NULL
)
3608 src
->provider
->nstat_reporting_allowed(src
->cookie
,
3609 &state
->ncs_provider_filters
[src
->provider
->nstat_provider_id
])
3615 nstat_control_connect(
3617 struct sockaddr_ctl
*sac
,
3620 nstat_control_state
*state
= OSMalloc(sizeof(*state
), nstat_malloc_tag
);
3621 if (state
== NULL
) return ENOMEM
;
3623 bzero(state
, sizeof(*state
));
3624 lck_mtx_init(&state
->mtx
, nstat_lck_grp
, NULL
);
3625 state
->ncs_kctl
= kctl
;
3626 state
->ncs_unit
= sac
->sc_unit
;
3627 state
->ncs_flags
= NSTAT_FLAG_REQCOUNTS
;
3630 lck_mtx_lock(&nstat_mtx
);
3631 state
->ncs_next
= nstat_controls
;
3632 nstat_controls
= state
;
3634 if (nstat_idle_time
== 0)
3636 clock_interval_to_deadline(60, NSEC_PER_SEC
, &nstat_idle_time
);
3637 thread_call_func_delayed((thread_call_func_t
)nstat_idle_check
, NULL
, nstat_idle_time
);
3640 lck_mtx_unlock(&nstat_mtx
);
3646 nstat_control_disconnect(
3647 __unused kern_ctl_ref kctl
,
3648 __unused u_int32_t unit
,
3652 nstat_control_state
*state
= (nstat_control_state
*)uinfo
;
3654 // pull it out of the global list of states
3655 lck_mtx_lock(&nstat_mtx
);
3656 nstat_control_state
**statepp
;
3657 for (statepp
= &nstat_controls
; *statepp
; statepp
= &(*statepp
)->ncs_next
)
3659 if (*statepp
== state
)
3661 *statepp
= state
->ncs_next
;
3665 lck_mtx_unlock(&nstat_mtx
);
3667 lck_mtx_lock(&state
->mtx
);
3668 // Stop watching for sources
3669 nstat_provider
*provider
;
3670 watching
= state
->ncs_watching
;
3671 state
->ncs_watching
= 0;
3672 for (provider
= nstat_providers
; provider
&& watching
; provider
= provider
->next
)
3674 if ((watching
& (1 << provider
->nstat_provider_id
)) != 0)
3676 watching
&= ~(1 << provider
->nstat_provider_id
);
3677 provider
->nstat_watcher_remove(state
);
3681 // set cleanup flags
3682 state
->ncs_flags
|= NSTAT_FLAG_CLEANUP
;
3684 if (state
->ncs_accumulated
)
3686 mbuf_freem(state
->ncs_accumulated
);
3687 state
->ncs_accumulated
= NULL
;
3690 // Copy out the list of sources
3691 nstat_src
*srcs
= state
->ncs_srcs
;
3692 state
->ncs_srcs
= NULL
;
3693 lck_mtx_unlock(&state
->mtx
);
3699 // pull it out of the list
3704 nstat_control_cleanup_source(NULL
, src
, FALSE
);
3706 lck_mtx_destroy(&state
->mtx
, nstat_lck_grp
);
3707 OSFree(state
, sizeof(*state
), nstat_malloc_tag
);
3712 static nstat_src_ref_t
3713 nstat_control_next_src_ref(
3714 nstat_control_state
*state
)
3716 return ++state
->ncs_next_srcref
;
3720 nstat_control_send_counts(
3721 nstat_control_state
*state
,
3723 unsigned long long context
,
3724 u_int16_t hdr_flags
,
3727 nstat_msg_src_counts counts
;
3730 /* Some providers may not have any counts to send */
3731 if (src
->provider
->nstat_counts
== NULL
)
3734 bzero(&counts
, sizeof(counts
));
3735 counts
.hdr
.type
= NSTAT_MSG_TYPE_SRC_COUNTS
;
3736 counts
.hdr
.length
= sizeof(counts
);
3737 counts
.hdr
.flags
= hdr_flags
;
3738 counts
.hdr
.context
= context
;
3739 counts
.srcref
= src
->srcref
;
3740 counts
.event_flags
= 0;
3742 if (src
->provider
->nstat_counts(src
->cookie
, &counts
.counts
, gone
) == 0)
3744 if ((src
->filter
& NSTAT_FILTER_NOZEROBYTES
) &&
3745 counts
.counts
.nstat_rxbytes
== 0 &&
3746 counts
.counts
.nstat_txbytes
== 0)
3752 result
= ctl_enqueuedata(state
->ncs_kctl
,
3753 state
->ncs_unit
, &counts
, sizeof(counts
),
3756 nstat_stats
.nstat_sendcountfailures
+= 1;
3763 nstat_control_append_counts(
3764 nstat_control_state
*state
,
3768 /* Some providers may not have any counts to send */
3769 if (!src
->provider
->nstat_counts
) return 0;
3771 nstat_msg_src_counts counts
;
3772 bzero(&counts
, sizeof(counts
));
3773 counts
.hdr
.type
= NSTAT_MSG_TYPE_SRC_COUNTS
;
3774 counts
.hdr
.length
= sizeof(counts
);
3775 counts
.srcref
= src
->srcref
;
3776 counts
.event_flags
= 0;
3779 result
= src
->provider
->nstat_counts(src
->cookie
, &counts
.counts
, gone
);
3785 if ((src
->filter
& NSTAT_FILTER_NOZEROBYTES
) == NSTAT_FILTER_NOZEROBYTES
&&
3786 counts
.counts
.nstat_rxbytes
== 0 && counts
.counts
.nstat_txbytes
== 0)
3791 return nstat_accumulate_msg(state
, &counts
.hdr
, counts
.hdr
.length
);
3795 nstat_control_send_description(
3796 nstat_control_state
*state
,
3799 u_int16_t hdr_flags
)
3801 // Provider doesn't support getting the descriptor? Done.
3802 if (src
->provider
->nstat_descriptor_length
== 0 ||
3803 src
->provider
->nstat_copy_descriptor
== NULL
)
3808 // Allocate storage for the descriptor message
3810 unsigned int one
= 1;
3811 u_int32_t size
= offsetof(nstat_msg_src_description
, data
) + src
->provider
->nstat_descriptor_length
;
3812 if (mbuf_allocpacket(MBUF_DONTWAIT
, size
, &one
, &msg
) != 0)
3817 nstat_msg_src_description
*desc
= (nstat_msg_src_description
*)mbuf_data(msg
);
3819 mbuf_setlen(msg
, size
);
3820 mbuf_pkthdr_setlen(msg
, mbuf_len(msg
));
3822 // Query the provider for the provider specific bits
3823 errno_t result
= src
->provider
->nstat_copy_descriptor(src
->cookie
, desc
->data
, src
->provider
->nstat_descriptor_length
);
3831 desc
->hdr
.context
= context
;
3832 desc
->hdr
.type
= NSTAT_MSG_TYPE_SRC_DESC
;
3833 desc
->hdr
.length
= size
;
3834 desc
->hdr
.flags
= hdr_flags
;
3835 desc
->srcref
= src
->srcref
;
3836 desc
->event_flags
= 0;
3837 desc
->provider
= src
->provider
->nstat_provider_id
;
3839 result
= ctl_enqueuembuf(state
->ncs_kctl
, state
->ncs_unit
, msg
, CTL_DATA_EOR
);
3842 nstat_stats
.nstat_descriptionfailures
+= 1;
3850 nstat_control_append_description(
3851 nstat_control_state
*state
,
3854 size_t size
= offsetof(nstat_msg_src_description
, data
) + src
->provider
->nstat_descriptor_length
;
3855 if (size
> 512 || src
->provider
->nstat_descriptor_length
== 0 ||
3856 src
->provider
->nstat_copy_descriptor
== NULL
)
3861 // Fill out a buffer on the stack, we will copy to the mbuf later
3862 u_int64_t buffer
[size
/sizeof(u_int64_t
) + 1]; // u_int64_t to ensure alignment
3863 bzero(buffer
, size
);
3865 nstat_msg_src_description
*desc
= (nstat_msg_src_description
*)buffer
;
3866 desc
->hdr
.type
= NSTAT_MSG_TYPE_SRC_DESC
;
3867 desc
->hdr
.length
= size
;
3868 desc
->srcref
= src
->srcref
;
3869 desc
->event_flags
= 0;
3870 desc
->provider
= src
->provider
->nstat_provider_id
;
3873 // Fill in the description
3874 // Query the provider for the provider specific bits
3875 result
= src
->provider
->nstat_copy_descriptor(src
->cookie
, desc
->data
,
3876 src
->provider
->nstat_descriptor_length
);
3882 return nstat_accumulate_msg(state
, &desc
->hdr
, size
);
3886 nstat_control_send_update(
3887 nstat_control_state
*state
,
3890 u_int16_t hdr_flags
,
3893 // Provider doesn't support getting the descriptor or counts? Done.
3894 if ((src
->provider
->nstat_descriptor_length
== 0 ||
3895 src
->provider
->nstat_copy_descriptor
== NULL
) &&
3896 src
->provider
->nstat_counts
== NULL
)
3901 // Allocate storage for the descriptor message
3903 unsigned int one
= 1;
3904 u_int32_t size
= offsetof(nstat_msg_src_update
, data
) +
3905 src
->provider
->nstat_descriptor_length
;
3906 if (mbuf_allocpacket(MBUF_DONTWAIT
, size
, &one
, &msg
) != 0)
3911 nstat_msg_src_update
*desc
= (nstat_msg_src_update
*)mbuf_data(msg
);
3913 desc
->hdr
.context
= context
;
3914 desc
->hdr
.type
= NSTAT_MSG_TYPE_SRC_UPDATE
;
3915 desc
->hdr
.length
= size
;
3916 desc
->hdr
.flags
= hdr_flags
;
3917 desc
->srcref
= src
->srcref
;
3918 desc
->event_flags
= 0;
3919 desc
->provider
= src
->provider
->nstat_provider_id
;
3921 mbuf_setlen(msg
, size
);
3922 mbuf_pkthdr_setlen(msg
, mbuf_len(msg
));
3925 if (src
->provider
->nstat_descriptor_length
!= 0 && src
->provider
->nstat_copy_descriptor
)
3927 // Query the provider for the provider specific bits
3928 result
= src
->provider
->nstat_copy_descriptor(src
->cookie
, desc
->data
,
3929 src
->provider
->nstat_descriptor_length
);
3937 if (src
->provider
->nstat_counts
)
3939 result
= src
->provider
->nstat_counts(src
->cookie
, &desc
->counts
, gone
);
3942 if ((src
->filter
& NSTAT_FILTER_NOZEROBYTES
) == NSTAT_FILTER_NOZEROBYTES
&&
3943 desc
->counts
.nstat_rxbytes
== 0 && desc
->counts
.nstat_txbytes
== 0)
3949 result
= ctl_enqueuembuf(state
->ncs_kctl
, state
->ncs_unit
, msg
, CTL_DATA_EOR
);
3956 nstat_stats
.nstat_srcupatefailures
+= 1;
3964 nstat_control_append_update(
3965 nstat_control_state
*state
,
3969 size_t size
= offsetof(nstat_msg_src_update
, data
) + src
->provider
->nstat_descriptor_length
;
3970 if (size
> 512 || ((src
->provider
->nstat_descriptor_length
== 0 ||
3971 src
->provider
->nstat_copy_descriptor
== NULL
) &&
3972 src
->provider
->nstat_counts
== NULL
))
3977 // Fill out a buffer on the stack, we will copy to the mbuf later
3978 u_int64_t buffer
[size
/sizeof(u_int64_t
) + 1]; // u_int64_t to ensure alignment
3979 bzero(buffer
, size
);
3981 nstat_msg_src_update
*desc
= (nstat_msg_src_update
*)buffer
;
3982 desc
->hdr
.type
= NSTAT_MSG_TYPE_SRC_UPDATE
;
3983 desc
->hdr
.length
= size
;
3984 desc
->srcref
= src
->srcref
;
3985 desc
->event_flags
= 0;
3986 desc
->provider
= src
->provider
->nstat_provider_id
;
3989 // Fill in the description
3990 if (src
->provider
->nstat_descriptor_length
!= 0 && src
->provider
->nstat_copy_descriptor
)
3992 // Query the provider for the provider specific bits
3993 result
= src
->provider
->nstat_copy_descriptor(src
->cookie
, desc
->data
,
3994 src
->provider
->nstat_descriptor_length
);
3997 nstat_stats
.nstat_copy_descriptor_failures
++;
3998 if (nstat_debug
!= 0)
3999 printf("%s: src->provider->nstat_copy_descriptor: %d\n", __func__
, result
);
4004 if (src
->provider
->nstat_counts
)
4006 result
= src
->provider
->nstat_counts(src
->cookie
, &desc
->counts
, gone
);
4009 nstat_stats
.nstat_provider_counts_failures
++;
4010 if (nstat_debug
!= 0)
4011 printf("%s: src->provider->nstat_counts: %d\n", __func__
, result
);
4015 if ((src
->filter
& NSTAT_FILTER_NOZEROBYTES
) == NSTAT_FILTER_NOZEROBYTES
&&
4016 desc
->counts
.nstat_rxbytes
== 0 && desc
->counts
.nstat_txbytes
== 0)
4022 return nstat_accumulate_msg(state
, &desc
->hdr
, size
);
4026 nstat_control_send_removed(
4027 nstat_control_state
*state
,
4030 nstat_msg_src_removed removed
;
4033 bzero(&removed
, sizeof(removed
));
4034 removed
.hdr
.type
= NSTAT_MSG_TYPE_SRC_REMOVED
;
4035 removed
.hdr
.length
= sizeof(removed
);
4036 removed
.hdr
.context
= 0;
4037 removed
.srcref
= src
->srcref
;
4038 result
= ctl_enqueuedata(state
->ncs_kctl
, state
->ncs_unit
, &removed
,
4039 sizeof(removed
), CTL_DATA_EOR
| CTL_DATA_CRIT
);
4041 nstat_stats
.nstat_msgremovedfailures
+= 1;
4047 nstat_control_handle_add_request(
4048 nstat_control_state
*state
,
4053 // Verify the header fits in the first mbuf
4054 if (mbuf_len(m
) < offsetof(nstat_msg_add_src_req
, param
))
4059 // Calculate the length of the parameter field
4060 int32_t paramlength
= mbuf_pkthdr_len(m
) - offsetof(nstat_msg_add_src_req
, param
);
4061 if (paramlength
< 0 || paramlength
> 2 * 1024)
4066 nstat_provider
*provider
;
4067 nstat_provider_cookie_t cookie
;
4068 nstat_msg_add_src_req
*req
= mbuf_data(m
);
4069 if (mbuf_pkthdr_len(m
) > mbuf_len(m
))
4071 // parameter is too large, we need to make a contiguous copy
4072 void *data
= OSMalloc(paramlength
, nstat_malloc_tag
);
4074 if (!data
) return ENOMEM
;
4075 result
= mbuf_copydata(m
, offsetof(nstat_msg_add_src_req
, param
), paramlength
, data
);
4077 result
= nstat_lookup_entry(req
->provider
, data
, paramlength
, &provider
, &cookie
);
4078 OSFree(data
, paramlength
, nstat_malloc_tag
);
4082 result
= nstat_lookup_entry(req
->provider
, (void*)&req
->param
, paramlength
, &provider
, &cookie
);
4090 result
= nstat_control_source_add(req
->hdr
.context
, state
, provider
, cookie
);
4092 provider
->nstat_release(cookie
, 0);
4098 nstat_control_handle_add_all(
4099 nstat_control_state
*state
,
4104 // Verify the header fits in the first mbuf
4105 if (mbuf_len(m
) < sizeof(nstat_msg_add_all_srcs
))
4111 nstat_msg_add_all_srcs
*req
= mbuf_data(m
);
4112 if (req
->provider
> NSTAT_PROVIDER_LAST
) return ENOENT
;
4114 nstat_provider
*provider
= nstat_find_provider_by_id(req
->provider
);
4116 if (!provider
) return ENOENT
;
4117 if (provider
->nstat_watcher_add
== NULL
) return ENOTSUP
;
4119 if (nstat_privcheck
!= 0) {
4120 result
= priv_check_cred(kauth_cred_get(),
4121 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS
, 0);
4126 // Make sure we don't add the provider twice
4127 lck_mtx_lock(&state
->mtx
);
4128 if ((state
->ncs_watching
& (1 << provider
->nstat_provider_id
)) != 0)
4130 state
->ncs_watching
|= (1 << provider
->nstat_provider_id
);
4131 lck_mtx_unlock(&state
->mtx
);
4132 if (result
!= 0) return result
;
4134 state
->ncs_provider_filters
[req
->provider
].npf_flags
= req
->filter
;
4135 state
->ncs_provider_filters
[req
->provider
].npf_events
= req
->events
;
4136 state
->ncs_provider_filters
[req
->provider
].npf_pid
= req
->target_pid
;
4137 memcpy(state
->ncs_provider_filters
[req
->provider
].npf_uuid
, req
->target_uuid
,
4138 sizeof(state
->ncs_provider_filters
[req
->provider
].npf_uuid
));
4140 result
= provider
->nstat_watcher_add(state
);
4143 state
->ncs_provider_filters
[req
->provider
].npf_flags
= 0;
4144 state
->ncs_provider_filters
[req
->provider
].npf_events
= 0;
4145 state
->ncs_provider_filters
[req
->provider
].npf_pid
= 0;
4146 bzero(state
->ncs_provider_filters
[req
->provider
].npf_uuid
,
4147 sizeof(state
->ncs_provider_filters
[req
->provider
].npf_uuid
));
4149 lck_mtx_lock(&state
->mtx
);
4150 state
->ncs_watching
&= ~(1 << provider
->nstat_provider_id
);
4151 lck_mtx_unlock(&state
->mtx
);
4154 nstat_enqueue_success(req
->hdr
.context
, state
, 0);
4160 nstat_control_source_add(
4162 nstat_control_state
*state
,
4163 nstat_provider
*provider
,
4164 nstat_provider_cookie_t cookie
)
4166 // Fill out source added message if appropriate
4168 nstat_src_ref_t
*srcrefp
= NULL
;
4170 u_int64_t provider_filter_flagss
=
4171 state
->ncs_provider_filters
[provider
->nstat_provider_id
].npf_flags
;
4172 boolean_t tell_user
=
4173 ((provider_filter_flagss
& NSTAT_FILTER_SUPPRESS_SRC_ADDED
) == 0);
4174 u_int32_t src_filter
=
4175 (provider_filter_flagss
& NSTAT_FILTER_PROVIDER_NOZEROBYTES
)
4176 ? NSTAT_FILTER_NOZEROBYTES
: 0;
4180 unsigned int one
= 1;
4182 if (mbuf_allocpacket(MBUF_DONTWAIT
, sizeof(nstat_msg_src_added
),
4186 mbuf_setlen(msg
, sizeof(nstat_msg_src_added
));
4187 mbuf_pkthdr_setlen(msg
, mbuf_len(msg
));
4188 nstat_msg_src_added
*add
= mbuf_data(msg
);
4189 bzero(add
, sizeof(*add
));
4190 add
->hdr
.type
= NSTAT_MSG_TYPE_SRC_ADDED
;
4191 add
->hdr
.length
= mbuf_len(msg
);
4192 add
->hdr
.context
= context
;
4193 add
->provider
= provider
->nstat_provider_id
;
4194 srcrefp
= &add
->srcref
;
4197 // Allocate storage for the source
4198 nstat_src
*src
= OSMalloc(sizeof(*src
), nstat_malloc_tag
);
4201 if (msg
) mbuf_freem(msg
);
4205 // Fill in the source, including picking an unused source ref
4206 lck_mtx_lock(&state
->mtx
);
4208 src
->srcref
= nstat_control_next_src_ref(state
);
4210 *srcrefp
= src
->srcref
;
4212 if (state
->ncs_flags
& NSTAT_FLAG_CLEANUP
|| src
->srcref
== NSTAT_SRC_REF_INVALID
)
4214 lck_mtx_unlock(&state
->mtx
);
4215 OSFree(src
, sizeof(*src
), nstat_malloc_tag
);
4216 if (msg
) mbuf_freem(msg
);
4219 src
->provider
= provider
;
4220 src
->cookie
= cookie
;
4221 src
->filter
= src_filter
;
4225 // send the source added message if appropriate
4226 errno_t result
= ctl_enqueuembuf(state
->ncs_kctl
, state
->ncs_unit
, msg
,
4230 nstat_stats
.nstat_srcaddedfailures
+= 1;
4231 lck_mtx_unlock(&state
->mtx
);
4232 OSFree(src
, sizeof(*src
), nstat_malloc_tag
);
4237 // Put the source in the list
4238 src
->next
= state
->ncs_srcs
;
4239 state
->ncs_srcs
= src
;
4241 lck_mtx_unlock(&state
->mtx
);
4247 nstat_control_handle_remove_request(
4248 nstat_control_state
*state
,
4251 nstat_src_ref_t srcref
= NSTAT_SRC_REF_INVALID
;
4253 if (mbuf_copydata(m
, offsetof(nstat_msg_rem_src_req
, srcref
), sizeof(srcref
), &srcref
) != 0)
4258 lck_mtx_lock(&state
->mtx
);
4260 // Remove this source as we look for it
4262 nstat_src
*src
= NULL
;
4263 for (nextp
= &state
->ncs_srcs
; *nextp
; nextp
= &(*nextp
)->next
)
4265 if ((*nextp
)->srcref
== srcref
)
4273 lck_mtx_unlock(&state
->mtx
);
4275 if (src
) nstat_control_cleanup_source(state
, src
, FALSE
);
4277 return src
? 0 : ENOENT
;
4281 nstat_control_handle_query_request(
4282 nstat_control_state
*state
,
4285 // TBD: handle this from another thread so we can enqueue a lot of data
4286 // As written, if a client requests query all, this function will be
4287 // called from their send of the request message. We will attempt to write
4288 // responses and succeed until the buffer fills up. Since the clients thread
4289 // is blocked on send, it won't be reading unless the client has two threads
4290 // using this socket, one for read and one for write. Two threads probably
4291 // won't work with this code anyhow since we don't have proper locking in
4293 nstat_src
*dead_srcs
= NULL
;
4294 errno_t result
= ENOENT
;
4295 nstat_msg_query_src_req req
;
4297 if (mbuf_copydata(m
, 0, sizeof(req
), &req
) != 0)
4302 const boolean_t all_srcs
= (req
.srcref
== NSTAT_SRC_REF_ALL
);
4304 lck_mtx_lock(&state
->mtx
);
4308 state
->ncs_flags
|= NSTAT_FLAG_REQCOUNTS
;
4310 nstat_src
**srcpp
= &state
->ncs_srcs
;
4311 u_int64_t src_count
= 0;
4312 boolean_t partial
= FALSE
;
4315 * Error handling policy and sequence number generation is folded into
4316 * nstat_control_begin_query.
4318 partial
= nstat_control_begin_query(state
, &req
.hdr
);
4320 while (*srcpp
!= NULL
4321 && (!partial
|| src_count
< QUERY_CONTINUATION_SRC_COUNT
))
4323 nstat_src
*src
= NULL
;
4328 // XXX ignore IFACE types?
4329 if (all_srcs
|| src
->srcref
== req
.srcref
)
4331 if (nstat_control_reporting_allowed(state
, src
)
4332 && (!partial
|| !all_srcs
|| src
->seq
!= state
->ncs_seq
))
4335 (req
.hdr
.flags
& NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE
) != 0)
4337 result
= nstat_control_append_counts(state
, src
, &gone
);
4341 result
= nstat_control_send_counts(state
, src
, req
.hdr
.context
, 0, &gone
);
4344 if (ENOMEM
== result
|| ENOBUFS
== result
)
4347 * If the counts message failed to
4348 * enqueue then we should clear our flag so
4349 * that a client doesn't miss anything on
4350 * idle cleanup. We skip the "gone"
4351 * processing in the hope that we may
4352 * catch it another time.
4354 state
->ncs_flags
&= ~NSTAT_FLAG_REQCOUNTS
;
4360 * We skip over hard errors and
4363 src
->seq
= state
->ncs_seq
;
4371 // send one last descriptor message so client may see last state
4372 // If we can't send the notification now, it
4373 // will be sent in the idle cleanup.
4374 result
= nstat_control_send_description(state
, *srcpp
, 0, 0);
4377 nstat_stats
.nstat_control_send_description_failures
++;
4378 if (nstat_debug
!= 0)
4379 printf("%s - nstat_control_send_description() %d\n", __func__
, result
);
4380 state
->ncs_flags
&= ~NSTAT_FLAG_REQCOUNTS
;
4384 // pull src out of the list
4387 src
->next
= dead_srcs
;
4392 srcpp
= &(*srcpp
)->next
;
4395 if (!all_srcs
&& req
.srcref
== src
->srcref
)
4400 nstat_flush_accumulated_msgs(state
);
4402 u_int16_t flags
= 0;
4403 if (req
.srcref
== NSTAT_SRC_REF_ALL
)
4404 flags
= nstat_control_end_query(state
, *srcpp
, partial
);
4406 lck_mtx_unlock(&state
->mtx
);
4409 * If an error occurred enqueueing data, then allow the error to
4410 * propagate to nstat_control_send. This way, the error is sent to
4413 if (all_srcs
&& ENOMEM
!= result
&& ENOBUFS
!= result
)
4415 nstat_enqueue_success(req
.hdr
.context
, state
, flags
);
4424 dead_srcs
= src
->next
;
4426 // release src and send notification
4427 nstat_control_cleanup_source(state
, src
, FALSE
);
4434 nstat_control_handle_get_src_description(
4435 nstat_control_state
*state
,
4438 nstat_msg_get_src_description req
;
4439 errno_t result
= ENOENT
;
4442 if (mbuf_copydata(m
, 0, sizeof(req
), &req
) != 0)
4447 lck_mtx_lock(&state
->mtx
);
4448 u_int64_t src_count
= 0;
4449 boolean_t partial
= FALSE
;
4450 const boolean_t all_srcs
= (req
.srcref
== NSTAT_SRC_REF_ALL
);
4453 * Error handling policy and sequence number generation is folded into
4454 * nstat_control_begin_query.
4456 partial
= nstat_control_begin_query(state
, &req
.hdr
);
4458 for (src
= state
->ncs_srcs
;
4459 src
&& (!partial
|| src_count
< QUERY_CONTINUATION_SRC_COUNT
);
4462 if (all_srcs
|| src
->srcref
== req
.srcref
)
4464 if (nstat_control_reporting_allowed(state
, src
)
4465 && (!all_srcs
|| !partial
|| src
->seq
!= state
->ncs_seq
))
4467 if ((req
.hdr
.flags
& NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE
) != 0 && all_srcs
)
4469 result
= nstat_control_append_description(state
, src
);
4473 result
= nstat_control_send_description(state
, src
, req
.hdr
.context
, 0);
4476 if (ENOMEM
== result
|| ENOBUFS
== result
)
4479 * If the description message failed to
4480 * enqueue then we give up for now.
4487 * Note, we skip over hard errors and
4490 src
->seq
= state
->ncs_seq
;
4501 nstat_flush_accumulated_msgs(state
);
4503 u_int16_t flags
= 0;
4504 if (req
.srcref
== NSTAT_SRC_REF_ALL
)
4505 flags
= nstat_control_end_query(state
, src
, partial
);
4507 lck_mtx_unlock(&state
->mtx
);
4509 * If an error occurred enqueueing data, then allow the error to
4510 * propagate to nstat_control_send. This way, the error is sent to
4513 if (all_srcs
&& ENOMEM
!= result
&& ENOBUFS
!= result
)
4515 nstat_enqueue_success(req
.hdr
.context
, state
, flags
);
4523 nstat_control_handle_set_filter(
4524 nstat_control_state
*state
,
4527 nstat_msg_set_filter req
;
4530 if (mbuf_copydata(m
, 0, sizeof(req
), &req
) != 0)
4532 if (req
.srcref
== NSTAT_SRC_REF_ALL
||
4533 req
.srcref
== NSTAT_SRC_REF_INVALID
)
4536 lck_mtx_lock(&state
->mtx
);
4537 for (src
= state
->ncs_srcs
; src
; src
= src
->next
)
4538 if (req
.srcref
== src
->srcref
)
4540 src
->filter
= req
.filter
;
4543 lck_mtx_unlock(&state
->mtx
);
4552 nstat_control_state
*state
,
4557 struct nstat_msg_error err
;
4559 bzero(&err
, sizeof(err
));
4560 err
.hdr
.type
= NSTAT_MSG_TYPE_ERROR
;
4561 err
.hdr
.length
= sizeof(err
);
4562 err
.hdr
.context
= context
;
4565 result
= ctl_enqueuedata(state
->ncs_kctl
, state
->ncs_unit
, &err
,
4566 sizeof(err
), CTL_DATA_EOR
| CTL_DATA_CRIT
);
4568 nstat_stats
.nstat_msgerrorfailures
++;
4572 nstat_control_begin_query(
4573 nstat_control_state
*state
,
4574 const nstat_msg_hdr
*hdrp
)
4576 boolean_t partial
= FALSE
;
4578 if (hdrp
->flags
& NSTAT_MSG_HDR_FLAG_CONTINUATION
)
4580 /* A partial query all has been requested. */
4583 if (state
->ncs_context
!= hdrp
->context
)
4585 if (state
->ncs_context
!= 0)
4586 nstat_send_error(state
, state
->ncs_context
, EAGAIN
);
4588 /* Initialize state for a partial query all. */
4589 state
->ncs_context
= hdrp
->context
;
4593 else if (state
->ncs_context
!= 0)
4596 * A continuation of a paced-query was in progress. Send that
4597 * context an error and reset the state. If the same context
4598 * has changed its mind, just send the full query results.
4600 if (state
->ncs_context
!= hdrp
->context
)
4601 nstat_send_error(state
, state
->ncs_context
, EAGAIN
);
4608 nstat_control_end_query(
4609 nstat_control_state
*state
,
4610 nstat_src
*last_src
,
4613 u_int16_t flags
= 0;
4615 if (last_src
== NULL
|| !partial
)
4618 * We iterated through the entire srcs list or exited early
4619 * from the loop when a partial update was not requested (an
4620 * error occurred), so clear context to indicate internally
4621 * that the query is finished.
4623 state
->ncs_context
= 0;
4628 * Indicate to userlevel to make another partial request as
4629 * there are still sources left to be reported.
4631 flags
|= NSTAT_MSG_HDR_FLAG_CONTINUATION
;
4638 nstat_control_handle_get_update(
4639 nstat_control_state
*state
,
4642 nstat_msg_query_src_req req
;
4644 if (mbuf_copydata(m
, 0, sizeof(req
), &req
) != 0)
4649 lck_mtx_lock(&state
->mtx
);
4651 state
->ncs_flags
|= NSTAT_FLAG_SUPPORTS_UPDATES
;
4653 errno_t result
= ENOENT
;
4655 nstat_src
*dead_srcs
= NULL
;
4656 nstat_src
**srcpp
= &state
->ncs_srcs
;
4657 u_int64_t src_count
= 0;
4658 boolean_t partial
= FALSE
;
4661 * Error handling policy and sequence number generation is folded into
4662 * nstat_control_begin_query.
4664 partial
= nstat_control_begin_query(state
, &req
.hdr
);
4666 while (*srcpp
!= NULL
4667 && (FALSE
== partial
4668 || src_count
< QUERY_CONTINUATION_SRC_COUNT
))
4674 if (nstat_control_reporting_allowed(state
, src
))
4676 /* skip this source if it has the current state
4677 * sequence number as it's already been reported in
4678 * this query-all partial sequence. */
4679 if (req
.srcref
== NSTAT_SRC_REF_ALL
4680 && (FALSE
== partial
|| src
->seq
!= state
->ncs_seq
))
4682 result
= nstat_control_append_update(state
, src
, &gone
);
4683 if (ENOMEM
== result
|| ENOBUFS
== result
)
4686 * If the update message failed to
4687 * enqueue then give up.
4694 * We skip over hard errors and
4697 src
->seq
= state
->ncs_seq
;
4701 else if (src
->srcref
== req
.srcref
)
4703 result
= nstat_control_send_update(state
, src
, req
.hdr
.context
, 0, &gone
);
4709 // pull src out of the list
4712 src
->next
= dead_srcs
;
4717 srcpp
= &(*srcpp
)->next
;
4720 if (req
.srcref
!= NSTAT_SRC_REF_ALL
&& req
.srcref
== src
->srcref
)
4726 nstat_flush_accumulated_msgs(state
);
4729 u_int16_t flags
= 0;
4730 if (req
.srcref
== NSTAT_SRC_REF_ALL
)
4731 flags
= nstat_control_end_query(state
, *srcpp
, partial
);
4733 lck_mtx_unlock(&state
->mtx
);
4735 * If an error occurred enqueueing data, then allow the error to
4736 * propagate to nstat_control_send. This way, the error is sent to
4739 if (req
.srcref
== NSTAT_SRC_REF_ALL
&& ENOMEM
!= result
&& ENOBUFS
!= result
)
4741 nstat_enqueue_success(req
.hdr
.context
, state
, flags
);
4748 dead_srcs
= src
->next
;
4750 // release src and send notification
4751 nstat_control_cleanup_source(state
, src
, FALSE
);
4758 nstat_control_handle_subscribe_sysinfo(
4759 nstat_control_state
*state
)
4761 errno_t result
= priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS
, 0);
4768 lck_mtx_lock(&state
->mtx
);
4769 state
->ncs_flags
|= NSTAT_FLAG_SYSINFO_SUBSCRIBED
;
4770 lck_mtx_unlock(&state
->mtx
);
4783 nstat_control_state
*state
= (nstat_control_state
*)uinfo
;
4784 struct nstat_msg_hdr
*hdr
;
4785 struct nstat_msg_hdr storage
;
4788 if (mbuf_pkthdr_len(m
) < sizeof(*hdr
))
4790 // Is this the right thing to do?
4795 if (mbuf_len(m
) >= sizeof(*hdr
))
4801 mbuf_copydata(m
, 0, sizeof(storage
), &storage
);
4805 // Legacy clients may not set the length
4806 // Those clients are likely not setting the flags either
4807 // Fix everything up so old clients continue to work
4808 if (hdr
->length
!= mbuf_pkthdr_len(m
))
4811 hdr
->length
= mbuf_pkthdr_len(m
);
4812 if (hdr
== &storage
)
4814 mbuf_copyback(m
, 0, sizeof(*hdr
), hdr
, MBUF_DONTWAIT
);
4820 case NSTAT_MSG_TYPE_ADD_SRC
:
4821 result
= nstat_control_handle_add_request(state
, m
);
4824 case NSTAT_MSG_TYPE_ADD_ALL_SRCS
:
4825 result
= nstat_control_handle_add_all(state
, m
);
4828 case NSTAT_MSG_TYPE_REM_SRC
:
4829 result
= nstat_control_handle_remove_request(state
, m
);
4832 case NSTAT_MSG_TYPE_QUERY_SRC
:
4833 result
= nstat_control_handle_query_request(state
, m
);
4836 case NSTAT_MSG_TYPE_GET_SRC_DESC
:
4837 result
= nstat_control_handle_get_src_description(state
, m
);
4840 case NSTAT_MSG_TYPE_SET_FILTER
:
4841 result
= nstat_control_handle_set_filter(state
, m
);
4844 case NSTAT_MSG_TYPE_GET_UPDATE
:
4845 result
= nstat_control_handle_get_update(state
, m
);
4848 case NSTAT_MSG_TYPE_SUBSCRIBE_SYSINFO
:
4849 result
= nstat_control_handle_subscribe_sysinfo(state
);
4859 struct nstat_msg_error err
;
4861 bzero(&err
, sizeof(err
));
4862 err
.hdr
.type
= NSTAT_MSG_TYPE_ERROR
;
4863 err
.hdr
.length
= sizeof(err
) + mbuf_pkthdr_len(m
);
4864 err
.hdr
.context
= hdr
->context
;
4867 if (mbuf_prepend(&m
, sizeof(err
), MBUF_DONTWAIT
) == 0 &&
4868 mbuf_copyback(m
, 0, sizeof(err
), &err
, MBUF_DONTWAIT
) == 0)
4870 result
= ctl_enqueuembuf(kctl
, unit
, m
, CTL_DATA_EOR
| CTL_DATA_CRIT
);
4880 // Unable to prepend the error to the request - just send the error
4881 err
.hdr
.length
= sizeof(err
);
4882 result
= ctl_enqueuedata(kctl
, unit
, &err
, sizeof(err
),
4883 CTL_DATA_EOR
| CTL_DATA_CRIT
);
4885 nstat_stats
.nstat_msgerrorfailures
+= 1;
4887 nstat_stats
.nstat_handle_msg_failures
+= 1;
4890 if (m
) mbuf_freem(m
);