2 * Copyright (c) 2010-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/param.h>
30 #include <sys/types.h>
31 #include <sys/kpi_mbuf.h>
32 #include <sys/socket.h>
33 #include <sys/kern_control.h>
34 #include <sys/mcache.h>
35 #include <sys/socketvar.h>
36 #include <sys/sysctl.h>
37 #include <sys/queue.h>
39 #include <sys/protosw.h>
41 #include <kern/clock.h>
42 #include <kern/debug.h>
44 #include <libkern/libkern.h>
45 #include <libkern/OSMalloc.h>
46 #include <libkern/OSAtomic.h>
47 #include <libkern/locks.h>
50 #include <net/if_var.h>
51 #include <net/if_types.h>
52 #include <net/route.h>
53 #include <net/ntstat.h>
55 #include <netinet/ip_var.h>
56 #include <netinet/in_pcb.h>
57 #include <netinet/in_var.h>
58 #include <netinet/tcp.h>
59 #include <netinet/tcp_var.h>
60 #include <netinet/tcp_fsm.h>
61 #include <netinet/tcp_cc.h>
62 #include <netinet/udp.h>
63 #include <netinet/udp_var.h>
64 #include <netinet6/in6_pcb.h>
65 #include <netinet6/in6_var.h>
67 __private_extern__
int nstat_collect
= 1;
68 SYSCTL_INT(_net
, OID_AUTO
, statistics
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
69 &nstat_collect
, 0, "Collect detailed statistics");
71 static int nstat_privcheck
= 0;
72 SYSCTL_INT(_net
, OID_AUTO
, statistics_privcheck
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
73 &nstat_privcheck
, 0, "Entitlement check");
75 SYSCTL_NODE(_net
, OID_AUTO
, stats
,
76 CTLFLAG_RW
|CTLFLAG_LOCKED
, 0, "network statistics");
78 static int nstat_debug
= 0;
79 SYSCTL_INT(_net_stats
, OID_AUTO
, debug
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
82 static int nstat_sendspace
= 2048;
83 SYSCTL_INT(_net_stats
, OID_AUTO
, sendspace
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
84 &nstat_sendspace
, 0, "");
86 static int nstat_recvspace
= 8192;
87 SYSCTL_INT(_net_stats
, OID_AUTO
, recvspace
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
88 &nstat_recvspace
, 0, "");
90 static struct nstat_stats nstat_stats
;
91 SYSCTL_STRUCT(_net_stats
, OID_AUTO
, stats
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
92 &nstat_stats
, nstat_stats
, "");
96 NSTAT_FLAG_CLEANUP
= (1 << 0),
97 NSTAT_FLAG_REQCOUNTS
= (1 << 1),
98 NSTAT_FLAG_SUPPORTS_UPDATES
= (1 << 2),
99 NSTAT_FLAG_SYSINFO_SUBSCRIBED
= (1 << 3),
102 #define QUERY_CONTINUATION_SRC_COUNT 100
104 typedef struct nstat_provider_filter
107 u_int64_t npf_events
;
110 } nstat_provider_filter
;
113 typedef struct nstat_control_state
115 struct nstat_control_state
*ncs_next
;
116 u_int32_t ncs_watching
;
117 decl_lck_mtx_data(, mtx
);
118 kern_ctl_ref ncs_kctl
;
120 nstat_src_ref_t ncs_next_srcref
;
121 struct nstat_src
*ncs_srcs
;
122 mbuf_t ncs_accumulated
;
124 nstat_provider_filter ncs_provider_filters
[NSTAT_PROVIDER_COUNT
];
125 /* state maintained for partial query requests */
126 u_int64_t ncs_context
;
128 } nstat_control_state
;
130 typedef struct nstat_provider
132 struct nstat_provider
*next
;
133 nstat_provider_id_t nstat_provider_id
;
134 size_t nstat_descriptor_length
;
135 errno_t (*nstat_lookup
)(const void *data
, u_int32_t length
, nstat_provider_cookie_t
*out_cookie
);
136 int (*nstat_gone
)(nstat_provider_cookie_t cookie
);
137 errno_t (*nstat_counts
)(nstat_provider_cookie_t cookie
, struct nstat_counts
*out_counts
, int *out_gone
);
138 errno_t (*nstat_watcher_add
)(nstat_control_state
*state
);
139 void (*nstat_watcher_remove
)(nstat_control_state
*state
);
140 errno_t (*nstat_copy_descriptor
)(nstat_provider_cookie_t cookie
, void *data
, u_int32_t len
);
141 void (*nstat_release
)(nstat_provider_cookie_t cookie
, boolean_t locked
);
142 bool (*nstat_reporting_allowed
)(nstat_provider_cookie_t cookie
, nstat_provider_filter
*filter
);
145 typedef STAILQ_HEAD(, nstat_src
) stailq_head_nstat_src
;
146 typedef STAILQ_ENTRY(nstat_src
) stailq_entry_nstat_src
;
148 typedef TAILQ_HEAD(, nstat_tu_shadow
) tailq_head_tu_shadow
;
149 typedef TAILQ_ENTRY(nstat_tu_shadow
) tailq_entry_tu_shadow
;
151 typedef struct nstat_src
153 struct nstat_src
*next
;
154 nstat_src_ref_t srcref
;
155 nstat_provider
*provider
;
156 nstat_provider_cookie_t cookie
;
161 static errno_t
nstat_control_send_counts(nstat_control_state
*,
162 nstat_src
*, unsigned long long, u_int16_t
, int *);
163 static int nstat_control_send_description(nstat_control_state
*state
, nstat_src
*src
, u_int64_t context
, u_int16_t hdr_flags
);
164 static int nstat_control_send_update(nstat_control_state
*state
, nstat_src
*src
, u_int64_t context
, u_int16_t hdr_flags
, int *gone
);
165 static errno_t
nstat_control_send_removed(nstat_control_state
*, nstat_src
*);
166 static errno_t
nstat_control_send_goodbye(nstat_control_state
*state
, nstat_src
*src
);
167 static void nstat_control_cleanup_source(nstat_control_state
*state
, nstat_src
*src
, boolean_t
);
168 static bool nstat_control_reporting_allowed(nstat_control_state
*state
, nstat_src
*src
);
169 static boolean_t
nstat_control_begin_query(nstat_control_state
*state
, const nstat_msg_hdr
*hdrp
);
170 static u_int16_t
nstat_control_end_query(nstat_control_state
*state
, nstat_src
*last_src
, boolean_t partial
);
171 static void nstat_ifnet_report_ecn_stats(void);
173 static u_int32_t nstat_udp_watchers
= 0;
174 static u_int32_t nstat_userland_udp_watchers
= 0;
175 static u_int32_t nstat_tcp_watchers
= 0;
176 static u_int32_t nstat_userland_tcp_watchers
= 0;
178 static void nstat_control_register(void);
181 * The lock order is as follows:
183 * socket_lock (inpcb)
187 static volatile OSMallocTag nstat_malloc_tag
= NULL
;
188 static nstat_control_state
*nstat_controls
= NULL
;
189 static uint64_t nstat_idle_time
= 0;
190 static decl_lck_mtx_data(, nstat_mtx
);
192 /* some extern definitions */
193 extern void mbuf_report_peak_usage(void);
194 extern void tcp_report_stats(void);
198 const struct sockaddr
*src
,
199 struct sockaddr
*dst
,
202 if (src
->sa_len
> maxlen
) return;
204 bcopy(src
, dst
, src
->sa_len
);
205 if (src
->sa_family
== AF_INET6
&&
206 src
->sa_len
>= sizeof(struct sockaddr_in6
))
208 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)(void *)dst
;
209 if (IN6_IS_SCOPE_EMBED(&sin6
->sin6_addr
))
211 if (sin6
->sin6_scope_id
== 0)
212 sin6
->sin6_scope_id
= ntohs(sin6
->sin6_addr
.s6_addr16
[1]);
213 sin6
->sin6_addr
.s6_addr16
[1] = 0;
219 nstat_ip_to_sockaddr(
220 const struct in_addr
*ip
,
222 struct sockaddr_in
*sin
,
225 if (maxlen
< sizeof(struct sockaddr_in
))
228 sin
->sin_family
= AF_INET
;
229 sin
->sin_len
= sizeof(*sin
);
230 sin
->sin_port
= port
;
235 nstat_ip6_to_sockaddr(
236 const struct in6_addr
*ip6
,
238 struct sockaddr_in6
*sin6
,
241 if (maxlen
< sizeof(struct sockaddr_in6
))
244 sin6
->sin6_family
= AF_INET6
;
245 sin6
->sin6_len
= sizeof(*sin6
);
246 sin6
->sin6_port
= port
;
247 sin6
->sin6_addr
= *ip6
;
248 if (IN6_IS_SCOPE_EMBED(&sin6
->sin6_addr
))
250 sin6
->sin6_scope_id
= ntohs(sin6
->sin6_addr
.s6_addr16
[1]);
251 sin6
->sin6_addr
.s6_addr16
[1] = 0;
256 nstat_ifnet_to_flags(
260 u_int32_t functional_type
= if_functional_type(ifp
, FALSE
);
262 /* Panic if someone adds a functional type without updating ntstat. */
263 VERIFY(0 <= functional_type
&& functional_type
<= IFRTYPE_FUNCTIONAL_LAST
);
265 switch (functional_type
)
267 case IFRTYPE_FUNCTIONAL_UNKNOWN
:
268 flags
|= NSTAT_IFNET_IS_UNKNOWN_TYPE
;
270 case IFRTYPE_FUNCTIONAL_LOOPBACK
:
271 flags
|= NSTAT_IFNET_IS_LOOPBACK
;
273 case IFRTYPE_FUNCTIONAL_WIRED
:
274 case IFRTYPE_FUNCTIONAL_INTCOPROC
:
275 flags
|= NSTAT_IFNET_IS_WIRED
;
277 case IFRTYPE_FUNCTIONAL_WIFI_INFRA
:
278 flags
|= NSTAT_IFNET_IS_WIFI
;
280 case IFRTYPE_FUNCTIONAL_WIFI_AWDL
:
281 flags
|= NSTAT_IFNET_IS_WIFI
;
282 flags
|= NSTAT_IFNET_IS_AWDL
;
284 case IFRTYPE_FUNCTIONAL_CELLULAR
:
285 flags
|= NSTAT_IFNET_IS_CELLULAR
;
289 if (IFNET_IS_EXPENSIVE(ifp
))
291 flags
|= NSTAT_IFNET_IS_EXPENSIVE
;
298 nstat_inpcb_to_flags(
299 const struct inpcb
*inp
)
303 if ((inp
!= NULL
) && (inp
->inp_last_outifp
!= NULL
))
305 struct ifnet
*ifp
= inp
->inp_last_outifp
;
306 flags
= nstat_ifnet_to_flags(ifp
);
308 if (flags
& NSTAT_IFNET_IS_CELLULAR
)
310 if (inp
->inp_socket
!= NULL
&&
311 (inp
->inp_socket
->so_flags1
& SOF1_CELLFALLBACK
))
312 flags
|= NSTAT_IFNET_VIA_CELLFALLBACK
;
317 flags
= NSTAT_IFNET_IS_UNKNOWN_TYPE
;
323 #pragma mark -- Network Statistic Providers --
325 static errno_t
nstat_control_source_add(u_int64_t context
, nstat_control_state
*state
, nstat_provider
*provider
, nstat_provider_cookie_t cookie
);
326 struct nstat_provider
*nstat_providers
= NULL
;
328 static struct nstat_provider
*
329 nstat_find_provider_by_id(
330 nstat_provider_id_t id
)
332 struct nstat_provider
*provider
;
334 for (provider
= nstat_providers
; provider
!= NULL
; provider
= provider
->next
)
336 if (provider
->nstat_provider_id
== id
)
345 nstat_provider_id_t id
,
348 nstat_provider
**out_provider
,
349 nstat_provider_cookie_t
*out_cookie
)
351 *out_provider
= nstat_find_provider_by_id(id
);
352 if (*out_provider
== NULL
)
357 return (*out_provider
)->nstat_lookup(data
, length
, out_cookie
);
360 static void nstat_init_route_provider(void);
361 static void nstat_init_tcp_provider(void);
362 static void nstat_init_userland_tcp_provider(void);
363 static void nstat_init_udp_provider(void);
364 static void nstat_init_userland_udp_provider(void);
365 static void nstat_init_ifnet_provider(void);
367 __private_extern__
void
370 if (nstat_malloc_tag
!= NULL
) return;
372 OSMallocTag tag
= OSMalloc_Tagalloc(NET_STAT_CONTROL_NAME
, OSMT_DEFAULT
);
373 if (!OSCompareAndSwapPtr(NULL
, tag
, &nstat_malloc_tag
))
375 OSMalloc_Tagfree(tag
);
376 tag
= nstat_malloc_tag
;
380 // we need to initialize other things, we do it here as this code path will only be hit once;
381 nstat_init_route_provider();
382 nstat_init_tcp_provider();
383 nstat_init_userland_tcp_provider();
384 nstat_init_udp_provider();
385 nstat_init_userland_udp_provider();
386 nstat_init_ifnet_provider();
387 nstat_control_register();
391 #pragma mark -- Aligned Buffer Allocation --
400 nstat_malloc_aligned(
405 struct align_header
*hdr
= NULL
;
406 u_int32_t size
= length
+ sizeof(*hdr
) + alignment
- 1;
408 u_int8_t
*buffer
= OSMalloc(size
, tag
);
409 if (buffer
== NULL
) return NULL
;
411 u_int8_t
*aligned
= buffer
+ sizeof(*hdr
);
412 aligned
= (u_int8_t
*)P2ROUNDUP(aligned
, alignment
);
414 hdr
= (struct align_header
*)(void *)(aligned
- sizeof(*hdr
));
415 hdr
->offset
= aligned
- buffer
;
426 struct align_header
*hdr
= (struct align_header
*)(void *)((u_int8_t
*)buffer
- sizeof(*hdr
));
427 OSFree(((char*)buffer
) - hdr
->offset
, hdr
->length
, tag
);
430 #pragma mark -- Route Provider --
432 static nstat_provider nstat_route_provider
;
438 nstat_provider_cookie_t
*out_cookie
)
440 // rt_lookup doesn't take const params but it doesn't modify the parameters for
441 // the lookup. So...we use a union to eliminate the warning.
445 const struct sockaddr
*const_sa
;
448 const nstat_route_add_param
*param
= (const nstat_route_add_param
*)data
;
451 if (length
< sizeof(*param
))
456 if (param
->dst
.v4
.sin_family
== 0 ||
457 param
->dst
.v4
.sin_family
> AF_MAX
||
458 (param
->mask
.v4
.sin_family
!= 0 && param
->mask
.v4
.sin_family
!= param
->dst
.v4
.sin_family
))
463 if (param
->dst
.v4
.sin_len
> sizeof(param
->dst
) ||
464 (param
->mask
.v4
.sin_family
&& param
->mask
.v4
.sin_len
> sizeof(param
->mask
.v4
.sin_len
)))
468 if ((param
->dst
.v4
.sin_family
== AF_INET
&&
469 param
->dst
.v4
.sin_len
< sizeof(struct sockaddr_in
)) ||
470 (param
->dst
.v6
.sin6_family
== AF_INET6
&&
471 param
->dst
.v6
.sin6_len
< sizeof(struct sockaddr_in6
)))
476 dst
.const_sa
= (const struct sockaddr
*)¶m
->dst
;
477 mask
.const_sa
= param
->mask
.v4
.sin_family
? (const struct sockaddr
*)¶m
->mask
: NULL
;
479 struct radix_node_head
*rnh
= rt_tables
[dst
.sa
->sa_family
];
480 if (rnh
== NULL
) return EAFNOSUPPORT
;
482 lck_mtx_lock(rnh_lock
);
483 struct rtentry
*rt
= rt_lookup(TRUE
, dst
.sa
, mask
.sa
, rnh
, param
->ifindex
);
484 lck_mtx_unlock(rnh_lock
);
486 if (rt
) *out_cookie
= (nstat_provider_cookie_t
)rt
;
488 return rt
? 0 : ENOENT
;
493 nstat_provider_cookie_t cookie
)
495 struct rtentry
*rt
= (struct rtentry
*)cookie
;
496 return ((rt
->rt_flags
& RTF_UP
) == 0) ? 1 : 0;
501 nstat_provider_cookie_t cookie
,
502 struct nstat_counts
*out_counts
,
505 struct rtentry
*rt
= (struct rtentry
*)cookie
;
506 struct nstat_counts
*rt_stats
= rt
->rt_stats
;
508 if (out_gone
) *out_gone
= 0;
510 if (out_gone
&& (rt
->rt_flags
& RTF_UP
) == 0) *out_gone
= 1;
514 atomic_get_64(out_counts
->nstat_rxpackets
, &rt_stats
->nstat_rxpackets
);
515 atomic_get_64(out_counts
->nstat_rxbytes
, &rt_stats
->nstat_rxbytes
);
516 atomic_get_64(out_counts
->nstat_txpackets
, &rt_stats
->nstat_txpackets
);
517 atomic_get_64(out_counts
->nstat_txbytes
, &rt_stats
->nstat_txbytes
);
518 out_counts
->nstat_rxduplicatebytes
= rt_stats
->nstat_rxduplicatebytes
;
519 out_counts
->nstat_rxoutoforderbytes
= rt_stats
->nstat_rxoutoforderbytes
;
520 out_counts
->nstat_txretransmit
= rt_stats
->nstat_txretransmit
;
521 out_counts
->nstat_connectattempts
= rt_stats
->nstat_connectattempts
;
522 out_counts
->nstat_connectsuccesses
= rt_stats
->nstat_connectsuccesses
;
523 out_counts
->nstat_min_rtt
= rt_stats
->nstat_min_rtt
;
524 out_counts
->nstat_avg_rtt
= rt_stats
->nstat_avg_rtt
;
525 out_counts
->nstat_var_rtt
= rt_stats
->nstat_var_rtt
;
526 out_counts
->nstat_cell_rxbytes
= out_counts
->nstat_cell_txbytes
= 0;
530 bzero(out_counts
, sizeof(*out_counts
));
538 nstat_provider_cookie_t cookie
,
541 rtfree((struct rtentry
*)cookie
);
544 static u_int32_t nstat_route_watchers
= 0;
547 nstat_route_walktree_add(
548 struct radix_node
*rn
,
552 struct rtentry
*rt
= (struct rtentry
*)rn
;
553 nstat_control_state
*state
= (nstat_control_state
*)context
;
555 lck_mtx_assert(rnh_lock
, LCK_MTX_ASSERT_OWNED
);
557 /* RTF_UP can't change while rnh_lock is held */
558 if ((rt
->rt_flags
& RTF_UP
) != 0)
560 /* Clear RTPRF_OURS if the route is still usable */
562 if (rt_validate(rt
)) {
563 RT_ADDREF_LOCKED(rt
);
570 /* Otherwise if RTF_CONDEMNED, treat it as if it were down */
574 result
= nstat_control_source_add(0, state
, &nstat_route_provider
, rt
);
583 nstat_route_add_watcher(
584 nstat_control_state
*state
)
588 OSIncrementAtomic(&nstat_route_watchers
);
590 lck_mtx_lock(rnh_lock
);
591 for (i
= 1; i
< AF_MAX
; i
++)
593 struct radix_node_head
*rnh
;
597 result
= rnh
->rnh_walktree(rnh
, nstat_route_walktree_add
, state
);
603 lck_mtx_unlock(rnh_lock
);
608 __private_extern__
void
609 nstat_route_new_entry(
612 if (nstat_route_watchers
== 0)
615 lck_mtx_lock(&nstat_mtx
);
616 if ((rt
->rt_flags
& RTF_UP
) != 0)
618 nstat_control_state
*state
;
619 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
621 if ((state
->ncs_watching
& (1 << NSTAT_PROVIDER_ROUTE
)) != 0)
623 // this client is watching routes
624 // acquire a reference for the route
627 // add the source, if that fails, release the reference
628 if (nstat_control_source_add(0, state
, &nstat_route_provider
, rt
) != 0)
633 lck_mtx_unlock(&nstat_mtx
);
637 nstat_route_remove_watcher(
638 __unused nstat_control_state
*state
)
640 OSDecrementAtomic(&nstat_route_watchers
);
644 nstat_route_copy_descriptor(
645 nstat_provider_cookie_t cookie
,
649 nstat_route_descriptor
*desc
= (nstat_route_descriptor
*)data
;
650 if (len
< sizeof(*desc
))
654 bzero(desc
, sizeof(*desc
));
656 struct rtentry
*rt
= (struct rtentry
*)cookie
;
657 desc
->id
= (uint64_t)VM_KERNEL_ADDRPERM(rt
);
658 desc
->parent_id
= (uint64_t)VM_KERNEL_ADDRPERM(rt
->rt_parent
);
659 desc
->gateway_id
= (uint64_t)VM_KERNEL_ADDRPERM(rt
->rt_gwroute
);
664 if ((sa
= rt_key(rt
)))
665 nstat_copy_sa_out(sa
, &desc
->dst
.sa
, sizeof(desc
->dst
));
668 if ((sa
= rt_mask(rt
)) && sa
->sa_len
<= sizeof(desc
->mask
))
669 memcpy(&desc
->mask
, sa
, sa
->sa_len
);
672 if ((sa
= rt
->rt_gateway
))
673 nstat_copy_sa_out(sa
, &desc
->gateway
.sa
, sizeof(desc
->gateway
));
676 desc
->ifindex
= rt
->rt_ifp
->if_index
;
678 desc
->flags
= rt
->rt_flags
;
684 nstat_route_reporting_allowed(nstat_provider_cookie_t cookie
, nstat_provider_filter
*filter
)
688 if ((filter
->npf_flags
& NSTAT_FILTER_IFNET_FLAGS
) != 0)
690 struct rtentry
*rt
= (struct rtentry
*)cookie
;
691 struct ifnet
*ifp
= rt
->rt_ifp
;
695 uint16_t interface_properties
= nstat_ifnet_to_flags(ifp
);
697 if ((filter
->npf_flags
& interface_properties
) == 0)
707 nstat_init_route_provider(void)
709 bzero(&nstat_route_provider
, sizeof(nstat_route_provider
));
710 nstat_route_provider
.nstat_descriptor_length
= sizeof(nstat_route_descriptor
);
711 nstat_route_provider
.nstat_provider_id
= NSTAT_PROVIDER_ROUTE
;
712 nstat_route_provider
.nstat_lookup
= nstat_route_lookup
;
713 nstat_route_provider
.nstat_gone
= nstat_route_gone
;
714 nstat_route_provider
.nstat_counts
= nstat_route_counts
;
715 nstat_route_provider
.nstat_release
= nstat_route_release
;
716 nstat_route_provider
.nstat_watcher_add
= nstat_route_add_watcher
;
717 nstat_route_provider
.nstat_watcher_remove
= nstat_route_remove_watcher
;
718 nstat_route_provider
.nstat_copy_descriptor
= nstat_route_copy_descriptor
;
719 nstat_route_provider
.nstat_reporting_allowed
= nstat_route_reporting_allowed
;
720 nstat_route_provider
.next
= nstat_providers
;
721 nstat_providers
= &nstat_route_provider
;
724 #pragma mark -- Route Collection --
726 static struct nstat_counts
*
730 struct nstat_counts
*result
= rte
->rt_stats
;
731 if (result
) return result
;
733 if (nstat_malloc_tag
== NULL
) nstat_init();
735 result
= nstat_malloc_aligned(sizeof(*result
), sizeof(u_int64_t
), nstat_malloc_tag
);
736 if (!result
) return result
;
738 bzero(result
, sizeof(*result
));
740 if (!OSCompareAndSwapPtr(NULL
, result
, &rte
->rt_stats
))
742 nstat_free_aligned(result
, nstat_malloc_tag
);
743 result
= rte
->rt_stats
;
749 __private_extern__
void
755 nstat_free_aligned(rte
->rt_stats
, nstat_malloc_tag
);
756 rte
->rt_stats
= NULL
;
760 __private_extern__
void
761 nstat_route_connect_attempt(
766 struct nstat_counts
* stats
= nstat_route_attach(rte
);
769 OSIncrementAtomic(&stats
->nstat_connectattempts
);
772 rte
= rte
->rt_parent
;
776 __private_extern__
void
777 nstat_route_connect_success(
783 struct nstat_counts
* stats
= nstat_route_attach(rte
);
786 OSIncrementAtomic(&stats
->nstat_connectsuccesses
);
789 rte
= rte
->rt_parent
;
793 __private_extern__
void
802 struct nstat_counts
* stats
= nstat_route_attach(rte
);
805 if ((flags
& NSTAT_TX_FLAG_RETRANSMIT
) != 0)
807 OSAddAtomic(bytes
, &stats
->nstat_txretransmit
);
811 OSAddAtomic64((SInt64
)packets
, (SInt64
*)&stats
->nstat_txpackets
);
812 OSAddAtomic64((SInt64
)bytes
, (SInt64
*)&stats
->nstat_txbytes
);
816 rte
= rte
->rt_parent
;
820 __private_extern__
void
829 struct nstat_counts
* stats
= nstat_route_attach(rte
);
834 OSAddAtomic64((SInt64
)packets
, (SInt64
*)&stats
->nstat_rxpackets
);
835 OSAddAtomic64((SInt64
)bytes
, (SInt64
*)&stats
->nstat_rxbytes
);
839 if (flags
& NSTAT_RX_FLAG_OUT_OF_ORDER
)
840 OSAddAtomic(bytes
, &stats
->nstat_rxoutoforderbytes
);
841 if (flags
& NSTAT_RX_FLAG_DUPLICATE
)
842 OSAddAtomic(bytes
, &stats
->nstat_rxduplicatebytes
);
846 rte
= rte
->rt_parent
;
850 __private_extern__
void
856 const int32_t factor
= 8;
860 struct nstat_counts
* stats
= nstat_route_attach(rte
);
869 oldrtt
= stats
->nstat_avg_rtt
;
876 newrtt
= oldrtt
- (oldrtt
- (int32_t)rtt
) / factor
;
878 if (oldrtt
== newrtt
) break;
879 } while (!OSCompareAndSwap(oldrtt
, newrtt
, &stats
->nstat_avg_rtt
));
884 oldrtt
= stats
->nstat_min_rtt
;
885 if (oldrtt
!= 0 && oldrtt
< (int32_t)rtt
)
889 } while (!OSCompareAndSwap(oldrtt
, rtt
, &stats
->nstat_min_rtt
));
894 oldrtt
= stats
->nstat_var_rtt
;
901 newrtt
= oldrtt
- (oldrtt
- (int32_t)rtt_var
) / factor
;
903 if (oldrtt
== newrtt
) break;
904 } while (!OSCompareAndSwap(oldrtt
, newrtt
, &stats
->nstat_var_rtt
));
907 rte
= rte
->rt_parent
;
912 #pragma mark -- TCP Kernel Provider --
915 * Due to the way the kernel deallocates a process (the process structure
916 * might be gone by the time we get the PCB detach notification),
917 * we need to cache the process name. Without this, proc_name() would
918 * return null and the process name would never be sent to userland.
920 * For UDP sockets, we also store the cached the connection tuples along with
921 * the interface index. This is necessary because when UDP sockets are
922 * disconnected, the connection tuples are forever lost from the inpcb, thus
923 * we need to keep track of the last call to connect() in ntstat.
925 struct nstat_tucookie
{
927 char pname
[MAXCOMLEN
+1];
931 struct sockaddr_in v4
;
932 struct sockaddr_in6 v6
;
936 struct sockaddr_in v4
;
937 struct sockaddr_in6 v6
;
939 unsigned int if_index
;
940 uint16_t ifnet_properties
;
943 static struct nstat_tucookie
*
944 nstat_tucookie_alloc_internal(
949 struct nstat_tucookie
*cookie
;
951 cookie
= OSMalloc(sizeof(*cookie
), nstat_malloc_tag
);
955 lck_mtx_assert(&nstat_mtx
, LCK_MTX_ASSERT_NOTOWNED
);
956 if (ref
&& in_pcb_checkstate(inp
, WNT_ACQUIRE
, locked
) == WNT_STOPUSING
)
958 OSFree(cookie
, sizeof(*cookie
), nstat_malloc_tag
);
961 bzero(cookie
, sizeof(*cookie
));
963 proc_name(inp
->inp_socket
->last_pid
, cookie
->pname
,
964 sizeof(cookie
->pname
));
966 * We only increment the reference count for UDP sockets because we
967 * only cache UDP socket tuples.
969 if (SOCK_PROTO(inp
->inp_socket
) == IPPROTO_UDP
)
970 OSIncrementAtomic(&inp
->inp_nstat_refcnt
);
975 static struct nstat_tucookie
*
976 nstat_tucookie_alloc(
979 return nstat_tucookie_alloc_internal(inp
, false, false);
982 static struct nstat_tucookie
*
983 nstat_tucookie_alloc_ref(
986 return nstat_tucookie_alloc_internal(inp
, true, false);
989 static struct nstat_tucookie
*
990 nstat_tucookie_alloc_ref_locked(
993 return nstat_tucookie_alloc_internal(inp
, true, true);
997 nstat_tucookie_release_internal(
998 struct nstat_tucookie
*cookie
,
1001 if (SOCK_PROTO(cookie
->inp
->inp_socket
) == IPPROTO_UDP
)
1002 OSDecrementAtomic(&cookie
->inp
->inp_nstat_refcnt
);
1003 in_pcb_checkstate(cookie
->inp
, WNT_RELEASE
, inplock
);
1004 OSFree(cookie
, sizeof(*cookie
), nstat_malloc_tag
);
1008 nstat_tucookie_release(
1009 struct nstat_tucookie
*cookie
)
1011 nstat_tucookie_release_internal(cookie
, false);
1015 nstat_tucookie_release_locked(
1016 struct nstat_tucookie
*cookie
)
1018 nstat_tucookie_release_internal(cookie
, true);
1022 static nstat_provider nstat_tcp_provider
;
1025 nstat_tcpudp_lookup(
1026 struct inpcbinfo
*inpinfo
,
1029 nstat_provider_cookie_t
*out_cookie
)
1031 struct inpcb
*inp
= NULL
;
1033 // parameter validation
1034 const nstat_tcp_add_param
*param
= (const nstat_tcp_add_param
*)data
;
1035 if (length
< sizeof(*param
))
1040 // src and dst must match
1041 if (param
->remote
.v4
.sin_family
!= 0 &&
1042 param
->remote
.v4
.sin_family
!= param
->local
.v4
.sin_family
)
1048 switch (param
->local
.v4
.sin_family
)
1052 if (param
->local
.v4
.sin_len
!= sizeof(param
->local
.v4
) ||
1053 (param
->remote
.v4
.sin_family
!= 0 &&
1054 param
->remote
.v4
.sin_len
!= sizeof(param
->remote
.v4
)))
1059 inp
= in_pcblookup_hash(inpinfo
, param
->remote
.v4
.sin_addr
, param
->remote
.v4
.sin_port
,
1060 param
->local
.v4
.sin_addr
, param
->local
.v4
.sin_port
, 1, NULL
);
1069 const struct in6_addr
*in6c
;
1070 struct in6_addr
*in6
;
1073 if (param
->local
.v6
.sin6_len
!= sizeof(param
->local
.v6
) ||
1074 (param
->remote
.v6
.sin6_family
!= 0 &&
1075 param
->remote
.v6
.sin6_len
!= sizeof(param
->remote
.v6
)))
1080 local
.in6c
= ¶m
->local
.v6
.sin6_addr
;
1081 remote
.in6c
= ¶m
->remote
.v6
.sin6_addr
;
1083 inp
= in6_pcblookup_hash(inpinfo
, remote
.in6
, param
->remote
.v6
.sin6_port
,
1084 local
.in6
, param
->local
.v6
.sin6_port
, 1, NULL
);
1096 // At this point we have a ref to the inpcb
1097 *out_cookie
= nstat_tucookie_alloc(inp
);
1098 if (*out_cookie
== NULL
)
1099 in_pcb_checkstate(inp
, WNT_RELEASE
, 0);
1108 nstat_provider_cookie_t
*out_cookie
)
1110 return nstat_tcpudp_lookup(&tcbinfo
, data
, length
, out_cookie
);
1115 nstat_provider_cookie_t cookie
)
1117 struct nstat_tucookie
*tucookie
=
1118 (struct nstat_tucookie
*)cookie
;
1122 return (!(inp
= tucookie
->inp
) ||
1123 !(tp
= intotcpcb(inp
)) ||
1124 inp
->inp_state
== INPCB_STATE_DEAD
) ? 1 : 0;
1129 nstat_provider_cookie_t cookie
,
1130 struct nstat_counts
*out_counts
,
1133 struct nstat_tucookie
*tucookie
=
1134 (struct nstat_tucookie
*)cookie
;
1137 bzero(out_counts
, sizeof(*out_counts
));
1139 if (out_gone
) *out_gone
= 0;
1141 // if the pcb is in the dead state, we should stop using it
1142 if (nstat_tcp_gone(cookie
))
1144 if (out_gone
) *out_gone
= 1;
1145 if (!(inp
= tucookie
->inp
) || !intotcpcb(inp
))
1148 inp
= tucookie
->inp
;
1149 struct tcpcb
*tp
= intotcpcb(inp
);
1151 atomic_get_64(out_counts
->nstat_rxpackets
, &inp
->inp_stat
->rxpackets
);
1152 atomic_get_64(out_counts
->nstat_rxbytes
, &inp
->inp_stat
->rxbytes
);
1153 atomic_get_64(out_counts
->nstat_txpackets
, &inp
->inp_stat
->txpackets
);
1154 atomic_get_64(out_counts
->nstat_txbytes
, &inp
->inp_stat
->txbytes
);
1155 out_counts
->nstat_rxduplicatebytes
= tp
->t_stat
.rxduplicatebytes
;
1156 out_counts
->nstat_rxoutoforderbytes
= tp
->t_stat
.rxoutoforderbytes
;
1157 out_counts
->nstat_txretransmit
= tp
->t_stat
.txretransmitbytes
;
1158 out_counts
->nstat_connectattempts
= tp
->t_state
>= TCPS_SYN_SENT
? 1 : 0;
1159 out_counts
->nstat_connectsuccesses
= tp
->t_state
>= TCPS_ESTABLISHED
? 1 : 0;
1160 out_counts
->nstat_avg_rtt
= tp
->t_srtt
;
1161 out_counts
->nstat_min_rtt
= tp
->t_rttbest
;
1162 out_counts
->nstat_var_rtt
= tp
->t_rttvar
;
1163 if (out_counts
->nstat_avg_rtt
< out_counts
->nstat_min_rtt
)
1164 out_counts
->nstat_min_rtt
= out_counts
->nstat_avg_rtt
;
1165 atomic_get_64(out_counts
->nstat_cell_rxbytes
, &inp
->inp_cstat
->rxbytes
);
1166 atomic_get_64(out_counts
->nstat_cell_txbytes
, &inp
->inp_cstat
->txbytes
);
1167 atomic_get_64(out_counts
->nstat_wifi_rxbytes
, &inp
->inp_wstat
->rxbytes
);
1168 atomic_get_64(out_counts
->nstat_wifi_txbytes
, &inp
->inp_wstat
->txbytes
);
1169 atomic_get_64(out_counts
->nstat_wired_rxbytes
, &inp
->inp_Wstat
->rxbytes
);
1170 atomic_get_64(out_counts
->nstat_wired_txbytes
, &inp
->inp_Wstat
->txbytes
);
1177 nstat_provider_cookie_t cookie
,
1180 struct nstat_tucookie
*tucookie
=
1181 (struct nstat_tucookie
*)cookie
;
1183 nstat_tucookie_release_internal(tucookie
, locked
);
1187 nstat_tcp_add_watcher(
1188 nstat_control_state
*state
)
1190 OSIncrementAtomic(&nstat_tcp_watchers
);
1192 lck_rw_lock_shared(tcbinfo
.ipi_lock
);
1194 // Add all current tcp inpcbs. Ignore those in timewait
1196 struct nstat_tucookie
*cookie
;
1197 LIST_FOREACH(inp
, tcbinfo
.ipi_listhead
, inp_list
)
1199 cookie
= nstat_tucookie_alloc_ref(inp
);
1202 if (nstat_control_source_add(0, state
, &nstat_tcp_provider
,
1205 nstat_tucookie_release(cookie
);
1210 lck_rw_done(tcbinfo
.ipi_lock
);
1216 nstat_tcp_remove_watcher(
1217 __unused nstat_control_state
*state
)
1219 OSDecrementAtomic(&nstat_tcp_watchers
);
1222 __private_extern__
void
1226 struct nstat_tucookie
*cookie
;
1228 if (nstat_tcp_watchers
== 0)
1231 socket_lock(inp
->inp_socket
, 0);
1232 lck_mtx_lock(&nstat_mtx
);
1233 nstat_control_state
*state
;
1234 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
1236 if ((state
->ncs_watching
& (1 << NSTAT_PROVIDER_TCP_KERNEL
)) != 0)
1238 // this client is watching tcp
1239 // acquire a reference for it
1240 cookie
= nstat_tucookie_alloc_ref_locked(inp
);
1243 // add the source, if that fails, release the reference
1244 if (nstat_control_source_add(0, state
,
1245 &nstat_tcp_provider
, cookie
) != 0)
1247 nstat_tucookie_release_locked(cookie
);
1252 lck_mtx_unlock(&nstat_mtx
);
1253 socket_unlock(inp
->inp_socket
, 0);
1256 __private_extern__
void
1257 nstat_pcb_detach(struct inpcb
*inp
)
1259 nstat_control_state
*state
;
1260 nstat_src
*src
, *prevsrc
;
1261 nstat_src
*dead_list
= NULL
;
1262 struct nstat_tucookie
*tucookie
;
1265 if (inp
== NULL
|| (nstat_tcp_watchers
== 0 && nstat_udp_watchers
== 0))
1268 lck_mtx_lock(&nstat_mtx
);
1269 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
1271 lck_mtx_lock(&state
->mtx
);
1272 for (prevsrc
= NULL
, src
= state
->ncs_srcs
; src
;
1273 prevsrc
= src
, src
= src
->next
)
1275 tucookie
= (struct nstat_tucookie
*)src
->cookie
;
1276 if (tucookie
->inp
== inp
)
1282 result
= nstat_control_send_goodbye(state
, src
);
1285 prevsrc
->next
= src
->next
;
1287 state
->ncs_srcs
= src
->next
;
1289 src
->next
= dead_list
;
1292 lck_mtx_unlock(&state
->mtx
);
1294 lck_mtx_unlock(&nstat_mtx
);
1298 dead_list
= src
->next
;
1300 nstat_control_cleanup_source(NULL
, src
, TRUE
);
1304 __private_extern__
void
1305 nstat_pcb_cache(struct inpcb
*inp
)
1307 nstat_control_state
*state
;
1309 struct nstat_tucookie
*tucookie
;
1311 if (inp
== NULL
|| nstat_udp_watchers
== 0 ||
1312 inp
->inp_nstat_refcnt
== 0)
1314 VERIFY(SOCK_PROTO(inp
->inp_socket
) == IPPROTO_UDP
);
1315 lck_mtx_lock(&nstat_mtx
);
1316 for (state
= nstat_controls
; state
; state
= state
->ncs_next
) {
1317 lck_mtx_lock(&state
->mtx
);
1318 for (src
= state
->ncs_srcs
; src
; src
= src
->next
)
1320 tucookie
= (struct nstat_tucookie
*)src
->cookie
;
1321 if (tucookie
->inp
== inp
)
1323 if (inp
->inp_vflag
& INP_IPV6
)
1325 nstat_ip6_to_sockaddr(&inp
->in6p_laddr
,
1327 &tucookie
->local
.v6
,
1328 sizeof(tucookie
->local
));
1329 nstat_ip6_to_sockaddr(&inp
->in6p_faddr
,
1331 &tucookie
->remote
.v6
,
1332 sizeof(tucookie
->remote
));
1334 else if (inp
->inp_vflag
& INP_IPV4
)
1336 nstat_ip_to_sockaddr(&inp
->inp_laddr
,
1338 &tucookie
->local
.v4
,
1339 sizeof(tucookie
->local
));
1340 nstat_ip_to_sockaddr(&inp
->inp_faddr
,
1342 &tucookie
->remote
.v4
,
1343 sizeof(tucookie
->remote
));
1345 if (inp
->inp_last_outifp
)
1346 tucookie
->if_index
=
1347 inp
->inp_last_outifp
->if_index
;
1349 tucookie
->ifnet_properties
= nstat_inpcb_to_flags(inp
);
1350 tucookie
->cached
= true;
1354 lck_mtx_unlock(&state
->mtx
);
1356 lck_mtx_unlock(&nstat_mtx
);
1359 __private_extern__
void
1360 nstat_pcb_invalidate_cache(struct inpcb
*inp
)
1362 nstat_control_state
*state
;
1364 struct nstat_tucookie
*tucookie
;
1366 if (inp
== NULL
|| nstat_udp_watchers
== 0 ||
1367 inp
->inp_nstat_refcnt
== 0)
1369 VERIFY(SOCK_PROTO(inp
->inp_socket
) == IPPROTO_UDP
);
1370 lck_mtx_lock(&nstat_mtx
);
1371 for (state
= nstat_controls
; state
; state
= state
->ncs_next
) {
1372 lck_mtx_lock(&state
->mtx
);
1373 for (src
= state
->ncs_srcs
; src
; src
= src
->next
)
1375 tucookie
= (struct nstat_tucookie
*)src
->cookie
;
1376 if (tucookie
->inp
== inp
)
1378 tucookie
->cached
= false;
1382 lck_mtx_unlock(&state
->mtx
);
1384 lck_mtx_unlock(&nstat_mtx
);
1388 nstat_tcp_copy_descriptor(
1389 nstat_provider_cookie_t cookie
,
1393 if (len
< sizeof(nstat_tcp_descriptor
))
1398 if (nstat_tcp_gone(cookie
))
1401 nstat_tcp_descriptor
*desc
= (nstat_tcp_descriptor
*)data
;
1402 struct nstat_tucookie
*tucookie
=
1403 (struct nstat_tucookie
*)cookie
;
1404 struct inpcb
*inp
= tucookie
->inp
;
1405 struct tcpcb
*tp
= intotcpcb(inp
);
1406 bzero(desc
, sizeof(*desc
));
1408 if (inp
->inp_vflag
& INP_IPV6
)
1410 nstat_ip6_to_sockaddr(&inp
->in6p_laddr
, inp
->inp_lport
,
1411 &desc
->local
.v6
, sizeof(desc
->local
));
1412 nstat_ip6_to_sockaddr(&inp
->in6p_faddr
, inp
->inp_fport
,
1413 &desc
->remote
.v6
, sizeof(desc
->remote
));
1415 else if (inp
->inp_vflag
& INP_IPV4
)
1417 nstat_ip_to_sockaddr(&inp
->inp_laddr
, inp
->inp_lport
,
1418 &desc
->local
.v4
, sizeof(desc
->local
));
1419 nstat_ip_to_sockaddr(&inp
->inp_faddr
, inp
->inp_fport
,
1420 &desc
->remote
.v4
, sizeof(desc
->remote
));
1423 desc
->state
= intotcpcb(inp
)->t_state
;
1424 desc
->ifindex
= (inp
->inp_last_outifp
== NULL
) ? 0 :
1425 inp
->inp_last_outifp
->if_index
;
1427 // danger - not locked, values could be bogus
1428 desc
->txunacked
= tp
->snd_max
- tp
->snd_una
;
1429 desc
->txwindow
= tp
->snd_wnd
;
1430 desc
->txcwindow
= tp
->snd_cwnd
;
1432 if (CC_ALGO(tp
)->name
!= NULL
) {
1433 strlcpy(desc
->cc_algo
, CC_ALGO(tp
)->name
,
1434 sizeof(desc
->cc_algo
));
1437 struct socket
*so
= inp
->inp_socket
;
1440 // TBD - take the socket lock around these to make sure
1442 desc
->upid
= so
->last_upid
;
1443 desc
->pid
= so
->last_pid
;
1444 desc
->traffic_class
= so
->so_traffic_class
;
1445 if ((so
->so_flags1
& SOF1_TRAFFIC_MGT_SO_BACKGROUND
))
1446 desc
->traffic_mgt_flags
|= TRAFFIC_MGT_SO_BACKGROUND
;
1447 if ((so
->so_flags1
& SOF1_TRAFFIC_MGT_TCP_RECVBG
))
1448 desc
->traffic_mgt_flags
|= TRAFFIC_MGT_TCP_RECVBG
;
1449 proc_name(desc
->pid
, desc
->pname
, sizeof(desc
->pname
));
1450 if (desc
->pname
[0] == 0)
1452 strlcpy(desc
->pname
, tucookie
->pname
,
1453 sizeof(desc
->pname
));
1457 desc
->pname
[sizeof(desc
->pname
) - 1] = 0;
1458 strlcpy(tucookie
->pname
, desc
->pname
,
1459 sizeof(tucookie
->pname
));
1461 memcpy(desc
->uuid
, so
->last_uuid
, sizeof(so
->last_uuid
));
1462 memcpy(desc
->vuuid
, so
->so_vuuid
, sizeof(so
->so_vuuid
));
1463 if (so
->so_flags
& SOF_DELEGATED
) {
1464 desc
->eupid
= so
->e_upid
;
1465 desc
->epid
= so
->e_pid
;
1466 memcpy(desc
->euuid
, so
->e_uuid
, sizeof(so
->e_uuid
));
1468 desc
->eupid
= desc
->upid
;
1469 desc
->epid
= desc
->pid
;
1470 memcpy(desc
->euuid
, desc
->uuid
, sizeof(desc
->uuid
));
1472 desc
->sndbufsize
= so
->so_snd
.sb_hiwat
;
1473 desc
->sndbufused
= so
->so_snd
.sb_cc
;
1474 desc
->rcvbufsize
= so
->so_rcv
.sb_hiwat
;
1475 desc
->rcvbufused
= so
->so_rcv
.sb_cc
;
1478 tcp_get_connectivity_status(tp
, &desc
->connstatus
);
1479 desc
->ifnet_properties
= nstat_inpcb_to_flags(inp
);
1484 nstat_tcpudp_reporting_allowed(nstat_provider_cookie_t cookie
, nstat_provider_filter
*filter
, bool is_UDP
)
1488 if ((filter
->npf_flags
& (NSTAT_FILTER_IFNET_FLAGS
|NSTAT_FILTER_SPECIFIC_USER
)) != 0)
1490 struct nstat_tucookie
*tucookie
= (struct nstat_tucookie
*)cookie
;
1491 struct inpcb
*inp
= tucookie
->inp
;
1493 /* Only apply interface filter if at least one is allowed. */
1494 if ((filter
->npf_flags
& NSTAT_FILTER_IFNET_FLAGS
) != 0)
1496 uint16_t interface_properties
= nstat_inpcb_to_flags(inp
);
1498 if ((filter
->npf_flags
& interface_properties
) == 0)
1500 // For UDP, we could have an undefined interface and yet transfers may have occurred.
1501 // We allow reporting if there have been transfers of the requested kind.
1502 // This is imperfect as we cannot account for the expensive attribute over wifi.
1503 // We also assume that cellular is expensive and we have no way to select for AWDL
1508 if ((filter
->npf_flags
& (NSTAT_FILTER_ACCEPT_CELLULAR
|NSTAT_FILTER_ACCEPT_EXPENSIVE
)) &&
1509 (inp
->inp_cstat
->rxbytes
|| inp
->inp_cstat
->txbytes
))
1513 if ((filter
->npf_flags
& NSTAT_FILTER_ACCEPT_WIFI
) &&
1514 (inp
->inp_wstat
->rxbytes
|| inp
->inp_wstat
->txbytes
))
1518 if ((filter
->npf_flags
& NSTAT_FILTER_ACCEPT_WIRED
) &&
1519 (inp
->inp_Wstat
->rxbytes
|| inp
->inp_Wstat
->txbytes
))
1533 if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER
) != 0) && (retval
))
1535 struct socket
*so
= inp
->inp_socket
;
1540 if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER_BY_PID
) != 0) &&
1541 (filter
->npf_pid
== so
->last_pid
))
1545 else if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER_BY_EPID
) != 0) &&
1546 (filter
->npf_pid
== (so
->so_flags
& SOF_DELEGATED
)? so
->e_upid
: so
->last_pid
))
1550 else if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER_BY_UUID
) != 0) &&
1551 (memcmp(filter
->npf_uuid
, so
->last_uuid
, sizeof(so
->last_uuid
)) == 0))
1555 else if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER_BY_EUUID
) != 0) &&
1556 (memcmp(filter
->npf_uuid
, (so
->so_flags
& SOF_DELEGATED
)? so
->e_uuid
: so
->last_uuid
,
1557 sizeof(so
->last_uuid
)) == 0))
1568 nstat_tcp_reporting_allowed(nstat_provider_cookie_t cookie
, nstat_provider_filter
*filter
)
1570 return nstat_tcpudp_reporting_allowed(cookie
, filter
, FALSE
);
1574 nstat_init_tcp_provider(void)
1576 bzero(&nstat_tcp_provider
, sizeof(nstat_tcp_provider
));
1577 nstat_tcp_provider
.nstat_descriptor_length
= sizeof(nstat_tcp_descriptor
);
1578 nstat_tcp_provider
.nstat_provider_id
= NSTAT_PROVIDER_TCP_KERNEL
;
1579 nstat_tcp_provider
.nstat_lookup
= nstat_tcp_lookup
;
1580 nstat_tcp_provider
.nstat_gone
= nstat_tcp_gone
;
1581 nstat_tcp_provider
.nstat_counts
= nstat_tcp_counts
;
1582 nstat_tcp_provider
.nstat_release
= nstat_tcp_release
;
1583 nstat_tcp_provider
.nstat_watcher_add
= nstat_tcp_add_watcher
;
1584 nstat_tcp_provider
.nstat_watcher_remove
= nstat_tcp_remove_watcher
;
1585 nstat_tcp_provider
.nstat_copy_descriptor
= nstat_tcp_copy_descriptor
;
1586 nstat_tcp_provider
.nstat_reporting_allowed
= nstat_tcp_reporting_allowed
;
1587 nstat_tcp_provider
.next
= nstat_providers
;
1588 nstat_providers
= &nstat_tcp_provider
;
1591 #pragma mark -- UDP Provider --
1593 static nstat_provider nstat_udp_provider
;
1599 nstat_provider_cookie_t
*out_cookie
)
1601 return nstat_tcpudp_lookup(&udbinfo
, data
, length
, out_cookie
);
1606 nstat_provider_cookie_t cookie
)
1608 struct nstat_tucookie
*tucookie
=
1609 (struct nstat_tucookie
*)cookie
;
1612 return (!(inp
= tucookie
->inp
) ||
1613 inp
->inp_state
== INPCB_STATE_DEAD
) ? 1 : 0;
1618 nstat_provider_cookie_t cookie
,
1619 struct nstat_counts
*out_counts
,
1622 struct nstat_tucookie
*tucookie
=
1623 (struct nstat_tucookie
*)cookie
;
1625 if (out_gone
) *out_gone
= 0;
1627 // if the pcb is in the dead state, we should stop using it
1628 if (nstat_udp_gone(cookie
))
1630 if (out_gone
) *out_gone
= 1;
1634 struct inpcb
*inp
= tucookie
->inp
;
1636 atomic_get_64(out_counts
->nstat_rxpackets
, &inp
->inp_stat
->rxpackets
);
1637 atomic_get_64(out_counts
->nstat_rxbytes
, &inp
->inp_stat
->rxbytes
);
1638 atomic_get_64(out_counts
->nstat_txpackets
, &inp
->inp_stat
->txpackets
);
1639 atomic_get_64(out_counts
->nstat_txbytes
, &inp
->inp_stat
->txbytes
);
1640 atomic_get_64(out_counts
->nstat_cell_rxbytes
, &inp
->inp_cstat
->rxbytes
);
1641 atomic_get_64(out_counts
->nstat_cell_txbytes
, &inp
->inp_cstat
->txbytes
);
1642 atomic_get_64(out_counts
->nstat_wifi_rxbytes
, &inp
->inp_wstat
->rxbytes
);
1643 atomic_get_64(out_counts
->nstat_wifi_txbytes
, &inp
->inp_wstat
->txbytes
);
1644 atomic_get_64(out_counts
->nstat_wired_rxbytes
, &inp
->inp_Wstat
->rxbytes
);
1645 atomic_get_64(out_counts
->nstat_wired_txbytes
, &inp
->inp_Wstat
->txbytes
);
1652 nstat_provider_cookie_t cookie
,
1655 struct nstat_tucookie
*tucookie
=
1656 (struct nstat_tucookie
*)cookie
;
1658 nstat_tucookie_release_internal(tucookie
, locked
);
1662 nstat_udp_add_watcher(
1663 nstat_control_state
*state
)
1666 struct nstat_tucookie
*cookie
;
1668 OSIncrementAtomic(&nstat_udp_watchers
);
1670 lck_rw_lock_shared(udbinfo
.ipi_lock
);
1671 // Add all current UDP inpcbs.
1672 LIST_FOREACH(inp
, udbinfo
.ipi_listhead
, inp_list
)
1674 cookie
= nstat_tucookie_alloc_ref(inp
);
1677 if (nstat_control_source_add(0, state
, &nstat_udp_provider
,
1680 nstat_tucookie_release(cookie
);
1685 lck_rw_done(udbinfo
.ipi_lock
);
1691 nstat_udp_remove_watcher(
1692 __unused nstat_control_state
*state
)
1694 OSDecrementAtomic(&nstat_udp_watchers
);
1697 __private_extern__
void
1701 struct nstat_tucookie
*cookie
;
1703 if (nstat_udp_watchers
== 0)
1706 socket_lock(inp
->inp_socket
, 0);
1707 lck_mtx_lock(&nstat_mtx
);
1708 nstat_control_state
*state
;
1709 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
1711 if ((state
->ncs_watching
& (1 << NSTAT_PROVIDER_UDP_KERNEL
)) != 0)
1713 // this client is watching tcp
1714 // acquire a reference for it
1715 cookie
= nstat_tucookie_alloc_ref_locked(inp
);
1718 // add the source, if that fails, release the reference
1719 if (nstat_control_source_add(0, state
,
1720 &nstat_udp_provider
, cookie
) != 0)
1722 nstat_tucookie_release_locked(cookie
);
1727 lck_mtx_unlock(&nstat_mtx
);
1728 socket_unlock(inp
->inp_socket
, 0);
1732 nstat_udp_copy_descriptor(
1733 nstat_provider_cookie_t cookie
,
1737 if (len
< sizeof(nstat_udp_descriptor
))
1742 if (nstat_udp_gone(cookie
))
1745 struct nstat_tucookie
*tucookie
=
1746 (struct nstat_tucookie
*)cookie
;
1747 nstat_udp_descriptor
*desc
= (nstat_udp_descriptor
*)data
;
1748 struct inpcb
*inp
= tucookie
->inp
;
1750 bzero(desc
, sizeof(*desc
));
1752 if (tucookie
->cached
== false) {
1753 if (inp
->inp_vflag
& INP_IPV6
)
1755 nstat_ip6_to_sockaddr(&inp
->in6p_laddr
, inp
->inp_lport
,
1756 &desc
->local
.v6
, sizeof(desc
->local
.v6
));
1757 nstat_ip6_to_sockaddr(&inp
->in6p_faddr
, inp
->inp_fport
,
1758 &desc
->remote
.v6
, sizeof(desc
->remote
.v6
));
1760 else if (inp
->inp_vflag
& INP_IPV4
)
1762 nstat_ip_to_sockaddr(&inp
->inp_laddr
, inp
->inp_lport
,
1763 &desc
->local
.v4
, sizeof(desc
->local
.v4
));
1764 nstat_ip_to_sockaddr(&inp
->inp_faddr
, inp
->inp_fport
,
1765 &desc
->remote
.v4
, sizeof(desc
->remote
.v4
));
1767 desc
->ifnet_properties
= nstat_inpcb_to_flags(inp
);
1771 if (inp
->inp_vflag
& INP_IPV6
)
1773 memcpy(&desc
->local
.v6
, &tucookie
->local
.v6
,
1774 sizeof(desc
->local
.v6
));
1775 memcpy(&desc
->remote
.v6
, &tucookie
->remote
.v6
,
1776 sizeof(desc
->remote
.v6
));
1778 else if (inp
->inp_vflag
& INP_IPV4
)
1780 memcpy(&desc
->local
.v4
, &tucookie
->local
.v4
,
1781 sizeof(desc
->local
.v4
));
1782 memcpy(&desc
->remote
.v4
, &tucookie
->remote
.v4
,
1783 sizeof(desc
->remote
.v4
));
1785 desc
->ifnet_properties
= tucookie
->ifnet_properties
;
1788 if (inp
->inp_last_outifp
)
1789 desc
->ifindex
= inp
->inp_last_outifp
->if_index
;
1791 desc
->ifindex
= tucookie
->if_index
;
1793 struct socket
*so
= inp
->inp_socket
;
1796 // TBD - take the socket lock around these to make sure
1798 desc
->upid
= so
->last_upid
;
1799 desc
->pid
= so
->last_pid
;
1800 proc_name(desc
->pid
, desc
->pname
, sizeof(desc
->pname
));
1801 if (desc
->pname
[0] == 0)
1803 strlcpy(desc
->pname
, tucookie
->pname
,
1804 sizeof(desc
->pname
));
1808 desc
->pname
[sizeof(desc
->pname
) - 1] = 0;
1809 strlcpy(tucookie
->pname
, desc
->pname
,
1810 sizeof(tucookie
->pname
));
1812 memcpy(desc
->uuid
, so
->last_uuid
, sizeof(so
->last_uuid
));
1813 memcpy(desc
->vuuid
, so
->so_vuuid
, sizeof(so
->so_vuuid
));
1814 if (so
->so_flags
& SOF_DELEGATED
) {
1815 desc
->eupid
= so
->e_upid
;
1816 desc
->epid
= so
->e_pid
;
1817 memcpy(desc
->euuid
, so
->e_uuid
, sizeof(so
->e_uuid
));
1819 desc
->eupid
= desc
->upid
;
1820 desc
->epid
= desc
->pid
;
1821 memcpy(desc
->euuid
, desc
->uuid
, sizeof(desc
->uuid
));
1823 desc
->rcvbufsize
= so
->so_rcv
.sb_hiwat
;
1824 desc
->rcvbufused
= so
->so_rcv
.sb_cc
;
1825 desc
->traffic_class
= so
->so_traffic_class
;
1832 nstat_udp_reporting_allowed(nstat_provider_cookie_t cookie
, nstat_provider_filter
*filter
)
1834 return nstat_tcpudp_reporting_allowed(cookie
, filter
, TRUE
);
1839 nstat_init_udp_provider(void)
1841 bzero(&nstat_udp_provider
, sizeof(nstat_udp_provider
));
1842 nstat_udp_provider
.nstat_provider_id
= NSTAT_PROVIDER_UDP_KERNEL
;
1843 nstat_udp_provider
.nstat_descriptor_length
= sizeof(nstat_udp_descriptor
);
1844 nstat_udp_provider
.nstat_lookup
= nstat_udp_lookup
;
1845 nstat_udp_provider
.nstat_gone
= nstat_udp_gone
;
1846 nstat_udp_provider
.nstat_counts
= nstat_udp_counts
;
1847 nstat_udp_provider
.nstat_watcher_add
= nstat_udp_add_watcher
;
1848 nstat_udp_provider
.nstat_watcher_remove
= nstat_udp_remove_watcher
;
1849 nstat_udp_provider
.nstat_copy_descriptor
= nstat_udp_copy_descriptor
;
1850 nstat_udp_provider
.nstat_release
= nstat_udp_release
;
1851 nstat_udp_provider
.nstat_reporting_allowed
= nstat_udp_reporting_allowed
;
1852 nstat_udp_provider
.next
= nstat_providers
;
1853 nstat_providers
= &nstat_udp_provider
;
1856 #pragma mark -- TCP/UDP Userland
1858 // Almost all of this infrastucture is common to both TCP and UDP
1860 static nstat_provider nstat_userland_tcp_provider
;
1861 static nstat_provider nstat_userland_udp_provider
;
1864 struct nstat_tu_shadow
{
1865 tailq_entry_tu_shadow shad_link
;
1866 userland_stats_request_vals_fn
*shad_getvals_fn
;
1867 userland_stats_provider_context
*shad_provider_context
;
1868 u_int64_t shad_properties
;
1870 uint32_t shad_magic
;
1873 // Magic number checking should remain in place until the userland provider has been fully proven
1874 #define TU_SHADOW_MAGIC 0xfeedf00d
1875 #define TU_SHADOW_UNMAGIC 0xdeaddeed
1877 static tailq_head_tu_shadow nstat_userprot_shad_head
= TAILQ_HEAD_INITIALIZER(nstat_userprot_shad_head
);
1880 nstat_userland_tu_lookup(
1881 __unused
const void *data
,
1882 __unused u_int32_t length
,
1883 __unused nstat_provider_cookie_t
*out_cookie
)
1885 // Looking up a specific connection is not supported
1890 nstat_userland_tu_gone(
1891 __unused nstat_provider_cookie_t cookie
)
1893 // Returns non-zero if the source has gone.
1894 // We don't keep a source hanging around, so the answer is always 0
1899 nstat_userland_tu_counts(
1900 nstat_provider_cookie_t cookie
,
1901 struct nstat_counts
*out_counts
,
1904 struct nstat_tu_shadow
*shad
= (struct nstat_tu_shadow
*)cookie
;
1905 assert(shad
->shad_magic
== TU_SHADOW_MAGIC
);
1907 bool result
= (*shad
->shad_getvals_fn
)(shad
->shad_provider_context
, out_counts
, NULL
);
1909 if (out_gone
) *out_gone
= 0;
1911 return (result
)? 0 : EIO
;
1916 nstat_userland_tu_copy_descriptor(
1917 nstat_provider_cookie_t cookie
,
1919 __unused u_int32_t len
)
1921 struct nstat_tu_shadow
*shad
= (struct nstat_tu_shadow
*)cookie
;
1922 assert(shad
->shad_magic
== TU_SHADOW_MAGIC
);
1924 bool result
= (*shad
->shad_getvals_fn
)(shad
->shad_provider_context
, NULL
, data
);
1926 return (result
)? 0 : EIO
;
1930 nstat_userland_tu_release(
1931 __unused nstat_provider_cookie_t cookie
,
1932 __unused
int locked
)
1934 // Called when a nstat_src is detached.
1935 // We don't reference count or ask for delayed release so nothing to do here.
1939 check_reporting_for_user(nstat_provider_filter
*filter
, pid_t pid
, pid_t epid
, uuid_t
*uuid
, uuid_t
*euuid
)
1943 if ((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER
) != 0)
1947 if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER_BY_PID
) != 0) &&
1948 (filter
->npf_pid
== pid
))
1952 else if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER_BY_EPID
) != 0) &&
1953 (filter
->npf_pid
== epid
))
1957 else if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER_BY_UUID
) != 0) &&
1958 (memcmp(filter
->npf_uuid
, uuid
, sizeof(*uuid
)) == 0))
1962 else if (((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER_BY_EUUID
) != 0) &&
1963 (memcmp(filter
->npf_uuid
, euuid
, sizeof(*euuid
)) == 0))
1972 nstat_userland_tcp_reporting_allowed(nstat_provider_cookie_t cookie
, nstat_provider_filter
*filter
)
1976 if ((filter
->npf_flags
& (NSTAT_FILTER_IFNET_FLAGS
|NSTAT_FILTER_SPECIFIC_USER
)) != 0)
1978 nstat_tcp_descriptor tcp_desc
; // Stack allocation - OK or pushing the limits too far?
1979 struct nstat_tu_shadow
*shad
= (struct nstat_tu_shadow
*)cookie
;
1981 assert(shad
->shad_magic
== TU_SHADOW_MAGIC
);
1983 if ((*shad
->shad_getvals_fn
)(shad
->shad_provider_context
, NULL
, &tcp_desc
))
1985 if ((filter
->npf_flags
& NSTAT_FILTER_IFNET_FLAGS
) != 0)
1987 if ((filter
->npf_flags
& tcp_desc
.ifnet_properties
) == 0)
1992 if ((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER
) != 0)
1994 retval
= check_reporting_for_user(filter
, (pid_t
)tcp_desc
.pid
, (pid_t
)tcp_desc
.epid
,
1995 &tcp_desc
.uuid
, &tcp_desc
.euuid
);
2000 retval
= false; // No further information, so might as well give up now.
2007 nstat_userland_udp_reporting_allowed(nstat_provider_cookie_t cookie
, nstat_provider_filter
*filter
)
2011 if ((filter
->npf_flags
& (NSTAT_FILTER_IFNET_FLAGS
|NSTAT_FILTER_SPECIFIC_USER
)) != 0)
2013 nstat_udp_descriptor udp_desc
; // Stack allocation - OK or pushing the limits too far?
2014 struct nstat_tu_shadow
*shad
= (struct nstat_tu_shadow
*)cookie
;
2016 assert(shad
->shad_magic
== TU_SHADOW_MAGIC
);
2018 if ((*shad
->shad_getvals_fn
)(shad
->shad_provider_context
, NULL
, &udp_desc
))
2020 if ((filter
->npf_flags
& NSTAT_FILTER_IFNET_FLAGS
) != 0)
2022 if ((filter
->npf_flags
& udp_desc
.ifnet_properties
) == 0)
2027 if ((filter
->npf_flags
& NSTAT_FILTER_SPECIFIC_USER
) != 0)
2029 retval
= check_reporting_for_user(filter
, (pid_t
)udp_desc
.pid
, (pid_t
)udp_desc
.epid
,
2030 &udp_desc
.uuid
, &udp_desc
.euuid
);
2035 retval
= false; // No further information, so might as well give up now.
2044 nstat_userland_tcp_add_watcher(
2045 nstat_control_state
*state
)
2047 struct nstat_tu_shadow
*shad
;
2049 OSIncrementAtomic(&nstat_userland_tcp_watchers
);
2051 lck_mtx_lock(&nstat_mtx
);
2053 TAILQ_FOREACH(shad
, &nstat_userprot_shad_head
, shad_link
) {
2054 assert(shad
->shad_magic
== TU_SHADOW_MAGIC
);
2056 if (shad
->shad_provider
== NSTAT_PROVIDER_TCP_USERLAND
)
2058 int result
= nstat_control_source_add(0, state
, &nstat_userland_tcp_provider
, shad
);
2061 printf("%s - nstat_control_source_add returned %d\n", __func__
, result
);
2065 lck_mtx_unlock(&nstat_mtx
);
2071 nstat_userland_udp_add_watcher(
2072 nstat_control_state
*state
)
2074 struct nstat_tu_shadow
*shad
;
2076 OSIncrementAtomic(&nstat_userland_udp_watchers
);
2078 lck_mtx_lock(&nstat_mtx
);
2080 TAILQ_FOREACH(shad
, &nstat_userprot_shad_head
, shad_link
) {
2081 assert(shad
->shad_magic
== TU_SHADOW_MAGIC
);
2083 if (shad
->shad_provider
== NSTAT_PROVIDER_UDP_USERLAND
)
2085 int result
= nstat_control_source_add(0, state
, &nstat_userland_udp_provider
, shad
);
2088 printf("%s - nstat_control_source_add returned %d\n", __func__
, result
);
2092 lck_mtx_unlock(&nstat_mtx
);
2099 nstat_userland_tcp_remove_watcher(
2100 __unused nstat_control_state
*state
)
2102 OSDecrementAtomic(&nstat_userland_tcp_watchers
);
2106 nstat_userland_udp_remove_watcher(
2107 __unused nstat_control_state
*state
)
2109 OSDecrementAtomic(&nstat_userland_udp_watchers
);
2113 nstat_init_userland_tcp_provider(void)
2115 bzero(&nstat_userland_tcp_provider
, sizeof(nstat_tcp_provider
));
2116 nstat_userland_tcp_provider
.nstat_descriptor_length
= sizeof(nstat_tcp_descriptor
);
2117 nstat_userland_tcp_provider
.nstat_provider_id
= NSTAT_PROVIDER_TCP_USERLAND
;
2118 nstat_userland_tcp_provider
.nstat_lookup
= nstat_userland_tu_lookup
;
2119 nstat_userland_tcp_provider
.nstat_gone
= nstat_userland_tu_gone
;
2120 nstat_userland_tcp_provider
.nstat_counts
= nstat_userland_tu_counts
;
2121 nstat_userland_tcp_provider
.nstat_release
= nstat_userland_tu_release
;
2122 nstat_userland_tcp_provider
.nstat_watcher_add
= nstat_userland_tcp_add_watcher
;
2123 nstat_userland_tcp_provider
.nstat_watcher_remove
= nstat_userland_tcp_remove_watcher
;
2124 nstat_userland_tcp_provider
.nstat_copy_descriptor
= nstat_userland_tu_copy_descriptor
;
2125 nstat_userland_tcp_provider
.nstat_reporting_allowed
= nstat_userland_tcp_reporting_allowed
;
2126 nstat_userland_tcp_provider
.next
= nstat_providers
;
2127 nstat_providers
= &nstat_userland_tcp_provider
;
2132 nstat_init_userland_udp_provider(void)
2134 bzero(&nstat_userland_udp_provider
, sizeof(nstat_udp_provider
));
2135 nstat_userland_udp_provider
.nstat_descriptor_length
= sizeof(nstat_udp_descriptor
);
2136 nstat_userland_udp_provider
.nstat_provider_id
= NSTAT_PROVIDER_UDP_USERLAND
;
2137 nstat_userland_udp_provider
.nstat_lookup
= nstat_userland_tu_lookup
;
2138 nstat_userland_udp_provider
.nstat_gone
= nstat_userland_tu_gone
;
2139 nstat_userland_udp_provider
.nstat_counts
= nstat_userland_tu_counts
;
2140 nstat_userland_udp_provider
.nstat_release
= nstat_userland_tu_release
;
2141 nstat_userland_udp_provider
.nstat_watcher_add
= nstat_userland_udp_add_watcher
;
2142 nstat_userland_udp_provider
.nstat_watcher_remove
= nstat_userland_udp_remove_watcher
;
2143 nstat_userland_udp_provider
.nstat_copy_descriptor
= nstat_userland_tu_copy_descriptor
;
2144 nstat_userland_udp_provider
.nstat_reporting_allowed
= nstat_userland_udp_reporting_allowed
;
2145 nstat_userland_udp_provider
.next
= nstat_providers
;
2146 nstat_providers
= &nstat_userland_udp_provider
;
2151 // Things get started with a call to netstats to say that there’s a new connection:
2152 __private_extern__ nstat_userland_context
2153 ntstat_userland_stats_open(userland_stats_provider_context
*ctx
,
2155 u_int64_t properties
,
2156 userland_stats_request_vals_fn req_fn
)
2158 struct nstat_tu_shadow
*shad
;
2160 if ((provider_id
!= NSTAT_PROVIDER_TCP_USERLAND
) && (provider_id
!= NSTAT_PROVIDER_UDP_USERLAND
))
2162 printf("%s - incorrect provider is supplied, %d\n", __func__
, provider_id
);
2166 shad
= OSMalloc(sizeof(*shad
), nstat_malloc_tag
);
2170 shad
->shad_getvals_fn
= req_fn
;
2171 shad
->shad_provider_context
= ctx
;
2172 shad
->shad_provider
= provider_id
;
2173 shad
->shad_properties
= properties
;
2174 shad
->shad_magic
= TU_SHADOW_MAGIC
;
2176 lck_mtx_lock(&nstat_mtx
);
2177 nstat_control_state
*state
;
2179 // Even if there are no watchers, we save the shadow structure
2180 TAILQ_INSERT_HEAD(&nstat_userprot_shad_head
, shad
, shad_link
);
2182 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
2184 if ((state
->ncs_watching
& (1 << provider_id
)) != 0)
2186 // this client is watching tcp/udp userland
2188 int result
= nstat_control_source_add(0, state
, &nstat_userland_tcp_provider
, shad
);
2191 printf("%s - nstat_control_source_add returned %d\n", __func__
, result
);
2195 lck_mtx_unlock(&nstat_mtx
);
2197 return (nstat_userland_context
)shad
;
2201 __private_extern__
void
2202 ntstat_userland_stats_close(nstat_userland_context nstat_ctx
)
2204 struct nstat_tu_shadow
*shad
= (struct nstat_tu_shadow
*)nstat_ctx
;
2205 nstat_src
*dead_list
= NULL
;
2210 assert(shad
->shad_magic
== TU_SHADOW_MAGIC
);
2212 lck_mtx_lock(&nstat_mtx
);
2213 if (nstat_userland_udp_watchers
!= 0 || nstat_userland_tcp_watchers
!= 0)
2215 nstat_control_state
*state
;
2216 nstat_src
*src
, *prevsrc
;
2219 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
2221 lck_mtx_lock(&state
->mtx
);
2222 for (prevsrc
= NULL
, src
= state
->ncs_srcs
; src
;
2223 prevsrc
= src
, src
= src
->next
)
2225 if (shad
== (struct nstat_tu_shadow
*)src
->cookie
)
2231 result
= nstat_control_send_goodbye(state
, src
);
2234 prevsrc
->next
= src
->next
;
2236 state
->ncs_srcs
= src
->next
;
2238 src
->next
= dead_list
;
2241 lck_mtx_unlock(&state
->mtx
);
2244 TAILQ_REMOVE(&nstat_userprot_shad_head
, shad
, shad_link
);
2246 lck_mtx_unlock(&nstat_mtx
);
2252 dead_list
= src
->next
;
2254 nstat_control_cleanup_source(NULL
, src
, TRUE
);
2257 shad
->shad_magic
= TU_SHADOW_UNMAGIC
;
2259 OSFree(shad
, sizeof(*shad
), nstat_malloc_tag
);
2263 __private_extern__
void
2264 ntstat_userland_stats_event(
2265 __unused nstat_userland_context context
,
2266 __unused userland_stats_event_t event
)
2268 // This is a dummy for when we hook up event reporting to NetworkStatistics.
2269 // See <rdar://problem/23022832> NetworkStatistics should provide opt-in notifications
2275 #pragma mark -- ifnet Provider --
2277 static nstat_provider nstat_ifnet_provider
;
2280 * We store a pointer to the ifnet and the original threshold
2281 * requested by the client.
2283 struct nstat_ifnet_cookie
2293 nstat_provider_cookie_t
*out_cookie
)
2295 const nstat_ifnet_add_param
*param
= (const nstat_ifnet_add_param
*)data
;
2297 boolean_t changed
= FALSE
;
2298 nstat_control_state
*state
;
2300 struct nstat_ifnet_cookie
*cookie
;
2302 if (length
< sizeof(*param
) || param
->threshold
< 1024*1024)
2304 if (nstat_privcheck
!= 0) {
2305 errno_t result
= priv_check_cred(kauth_cred_get(),
2306 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS
, 0);
2310 cookie
= OSMalloc(sizeof(*cookie
), nstat_malloc_tag
);
2313 bzero(cookie
, sizeof(*cookie
));
2315 ifnet_head_lock_shared();
2316 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
)
2318 ifnet_lock_exclusive(ifp
);
2319 if (ifp
->if_index
== param
->ifindex
)
2322 cookie
->threshold
= param
->threshold
;
2323 *out_cookie
= cookie
;
2324 if (!ifp
->if_data_threshold
||
2325 ifp
->if_data_threshold
> param
->threshold
)
2328 ifp
->if_data_threshold
= param
->threshold
;
2330 ifnet_lock_done(ifp
);
2331 ifnet_reference(ifp
);
2334 ifnet_lock_done(ifp
);
2339 * When we change the threshold to something smaller, we notify
2340 * all of our clients with a description message.
2341 * We won't send a message to the client we are currently serving
2342 * because it has no `ifnet source' yet.
2346 lck_mtx_lock(&nstat_mtx
);
2347 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
2349 lck_mtx_lock(&state
->mtx
);
2350 for (src
= state
->ncs_srcs
; src
; src
= src
->next
)
2352 if (src
->provider
!= &nstat_ifnet_provider
)
2354 nstat_control_send_description(state
, src
, 0, 0);
2356 lck_mtx_unlock(&state
->mtx
);
2358 lck_mtx_unlock(&nstat_mtx
);
2360 if (cookie
->ifp
== NULL
)
2361 OSFree(cookie
, sizeof(*cookie
), nstat_malloc_tag
);
2363 return ifp
? 0 : EINVAL
;
2368 nstat_provider_cookie_t cookie
)
2371 struct nstat_ifnet_cookie
*ifcookie
=
2372 (struct nstat_ifnet_cookie
*)cookie
;
2374 ifnet_head_lock_shared();
2375 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
)
2377 if (ifp
== ifcookie
->ifp
)
2387 nstat_provider_cookie_t cookie
,
2388 struct nstat_counts
*out_counts
,
2391 struct nstat_ifnet_cookie
*ifcookie
=
2392 (struct nstat_ifnet_cookie
*)cookie
;
2393 struct ifnet
*ifp
= ifcookie
->ifp
;
2395 if (out_gone
) *out_gone
= 0;
2397 // if the ifnet is gone, we should stop using it
2398 if (nstat_ifnet_gone(cookie
))
2400 if (out_gone
) *out_gone
= 1;
2404 bzero(out_counts
, sizeof(*out_counts
));
2405 out_counts
->nstat_rxpackets
= ifp
->if_ipackets
;
2406 out_counts
->nstat_rxbytes
= ifp
->if_ibytes
;
2407 out_counts
->nstat_txpackets
= ifp
->if_opackets
;
2408 out_counts
->nstat_txbytes
= ifp
->if_obytes
;
2409 out_counts
->nstat_cell_rxbytes
= out_counts
->nstat_cell_txbytes
= 0;
2414 nstat_ifnet_release(
2415 nstat_provider_cookie_t cookie
,
2416 __unused
int locked
)
2418 struct nstat_ifnet_cookie
*ifcookie
;
2420 nstat_control_state
*state
;
2422 uint64_t minthreshold
= UINT64_MAX
;
2425 * Find all the clients that requested a threshold
2426 * for this ifnet and re-calculate if_data_threshold.
2428 lck_mtx_lock(&nstat_mtx
);
2429 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
2431 lck_mtx_lock(&state
->mtx
);
2432 for (src
= state
->ncs_srcs
; src
; src
= src
->next
)
2434 /* Skip the provider we are about to detach. */
2435 if (src
->provider
!= &nstat_ifnet_provider
||
2436 src
->cookie
== cookie
)
2438 ifcookie
= (struct nstat_ifnet_cookie
*)src
->cookie
;
2439 if (ifcookie
->threshold
< minthreshold
)
2440 minthreshold
= ifcookie
->threshold
;
2442 lck_mtx_unlock(&state
->mtx
);
2444 lck_mtx_unlock(&nstat_mtx
);
2446 * Reset if_data_threshold or disable it.
2448 ifcookie
= (struct nstat_ifnet_cookie
*)cookie
;
2449 ifp
= ifcookie
->ifp
;
2450 if (ifnet_is_attached(ifp
, 1)) {
2451 ifnet_lock_exclusive(ifp
);
2452 if (minthreshold
== UINT64_MAX
)
2453 ifp
->if_data_threshold
= 0;
2455 ifp
->if_data_threshold
= minthreshold
;
2456 ifnet_lock_done(ifp
);
2457 ifnet_decr_iorefcnt(ifp
);
2460 OSFree(ifcookie
, sizeof(*ifcookie
), nstat_malloc_tag
);
2464 nstat_ifnet_copy_link_status(
2466 struct nstat_ifnet_descriptor
*desc
)
2468 struct if_link_status
*ifsr
= ifp
->if_link_status
;
2469 nstat_ifnet_desc_link_status
*link_status
= &desc
->link_status
;
2471 link_status
->link_status_type
= NSTAT_IFNET_DESC_LINK_STATUS_TYPE_NONE
;
2475 lck_rw_lock_shared(&ifp
->if_link_status_lock
);
2477 if (ifp
->if_type
== IFT_CELLULAR
) {
2479 nstat_ifnet_desc_cellular_status
*cell_status
= &link_status
->u
.cellular
;
2480 struct if_cellular_status_v1
*if_cell_sr
=
2481 &ifsr
->ifsr_u
.ifsr_cell
.if_cell_u
.if_status_v1
;
2483 if (ifsr
->ifsr_version
!= IF_CELLULAR_STATUS_REPORT_VERSION_1
)
2486 link_status
->link_status_type
= NSTAT_IFNET_DESC_LINK_STATUS_TYPE_CELLULAR
;
2488 if (if_cell_sr
->valid_bitmask
& IF_CELL_LINK_QUALITY_METRIC_VALID
) {
2489 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_LINK_QUALITY_METRIC_VALID
;
2490 cell_status
->link_quality_metric
= if_cell_sr
->link_quality_metric
;
2492 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_EFFECTIVE_BANDWIDTH_VALID
) {
2493 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_BANDWIDTH_VALID
;
2494 cell_status
->ul_effective_bandwidth
= if_cell_sr
->ul_effective_bandwidth
;
2496 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MAX_BANDWIDTH_VALID
) {
2497 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_MAX_BANDWIDTH_VALID
;
2498 cell_status
->ul_max_bandwidth
= if_cell_sr
->ul_max_bandwidth
;
2500 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MIN_LATENCY_VALID
) {
2501 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_MIN_LATENCY_VALID
;
2502 cell_status
->ul_min_latency
= if_cell_sr
->ul_min_latency
;
2504 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_EFFECTIVE_LATENCY_VALID
) {
2505 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_LATENCY_VALID
;
2506 cell_status
->ul_effective_latency
= if_cell_sr
->ul_effective_latency
;
2508 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MAX_LATENCY_VALID
) {
2509 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_MAX_LATENCY_VALID
;
2510 cell_status
->ul_max_latency
= if_cell_sr
->ul_max_latency
;
2512 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_RETXT_LEVEL_VALID
) {
2513 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID
;
2514 if (if_cell_sr
->ul_retxt_level
== IF_CELL_UL_RETXT_LEVEL_NONE
)
2515 cell_status
->ul_retxt_level
= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_NONE
;
2516 else if (if_cell_sr
->ul_retxt_level
== IF_CELL_UL_RETXT_LEVEL_LOW
)
2517 cell_status
->ul_retxt_level
= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_LOW
;
2518 else if (if_cell_sr
->ul_retxt_level
== IF_CELL_UL_RETXT_LEVEL_MEDIUM
)
2519 cell_status
->ul_retxt_level
= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_MEDIUM
;
2520 else if (if_cell_sr
->ul_retxt_level
== IF_CELL_UL_RETXT_LEVEL_HIGH
)
2521 cell_status
->ul_retxt_level
= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_HIGH
;
2523 cell_status
->valid_bitmask
&= ~NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID
;
2525 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_BYTES_LOST_VALID
) {
2526 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_BYTES_LOST_VALID
;
2527 cell_status
->ul_bytes_lost
= if_cell_sr
->ul_bytes_lost
;
2529 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MIN_QUEUE_SIZE_VALID
) {
2530 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_MIN_QUEUE_SIZE_VALID
;
2531 cell_status
->ul_min_queue_size
= if_cell_sr
->ul_min_queue_size
;
2533 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_AVG_QUEUE_SIZE_VALID
) {
2534 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_AVG_QUEUE_SIZE_VALID
;
2535 cell_status
->ul_avg_queue_size
= if_cell_sr
->ul_avg_queue_size
;
2537 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MAX_QUEUE_SIZE_VALID
) {
2538 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_MAX_QUEUE_SIZE_VALID
;
2539 cell_status
->ul_max_queue_size
= if_cell_sr
->ul_max_queue_size
;
2541 if (if_cell_sr
->valid_bitmask
& IF_CELL_DL_EFFECTIVE_BANDWIDTH_VALID
) {
2542 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_DL_EFFECTIVE_BANDWIDTH_VALID
;
2543 cell_status
->dl_effective_bandwidth
= if_cell_sr
->dl_effective_bandwidth
;
2545 if (if_cell_sr
->valid_bitmask
& IF_CELL_DL_MAX_BANDWIDTH_VALID
) {
2546 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_DL_MAX_BANDWIDTH_VALID
;
2547 cell_status
->dl_max_bandwidth
= if_cell_sr
->dl_max_bandwidth
;
2549 if (if_cell_sr
->valid_bitmask
& IF_CELL_CONFIG_INACTIVITY_TIME_VALID
) {
2550 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_CONFIG_INACTIVITY_TIME_VALID
;
2551 cell_status
->config_inactivity_time
= if_cell_sr
->config_inactivity_time
;
2553 if (if_cell_sr
->valid_bitmask
& IF_CELL_CONFIG_BACKOFF_TIME_VALID
) {
2554 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_CONFIG_BACKOFF_TIME_VALID
;
2555 cell_status
->config_backoff_time
= if_cell_sr
->config_backoff_time
;
2557 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MSS_RECOMMENDED_VALID
) {
2558 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_MSS_RECOMMENDED_VALID
;
2559 cell_status
->mss_recommended
= if_cell_sr
->mss_recommended
;
2561 } else if (ifp
->if_subfamily
== IFNET_SUBFAMILY_WIFI
) {
2563 nstat_ifnet_desc_wifi_status
*wifi_status
= &link_status
->u
.wifi
;
2564 struct if_wifi_status_v1
*if_wifi_sr
=
2565 &ifsr
->ifsr_u
.ifsr_wifi
.if_wifi_u
.if_status_v1
;
2567 if (ifsr
->ifsr_version
!= IF_WIFI_STATUS_REPORT_VERSION_1
)
2570 link_status
->link_status_type
= NSTAT_IFNET_DESC_LINK_STATUS_TYPE_WIFI
;
2572 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_LINK_QUALITY_METRIC_VALID
) {
2573 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_LINK_QUALITY_METRIC_VALID
;
2574 wifi_status
->link_quality_metric
= if_wifi_sr
->link_quality_metric
;
2576 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID
) {
2577 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID
;
2578 wifi_status
->ul_effective_bandwidth
= if_wifi_sr
->ul_effective_bandwidth
;
2580 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_MAX_BANDWIDTH_VALID
) {
2581 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_MAX_BANDWIDTH_VALID
;
2582 wifi_status
->ul_max_bandwidth
= if_wifi_sr
->ul_max_bandwidth
;
2584 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_MIN_LATENCY_VALID
) {
2585 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_MIN_LATENCY_VALID
;
2586 wifi_status
->ul_min_latency
= if_wifi_sr
->ul_min_latency
;
2588 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_EFFECTIVE_LATENCY_VALID
) {
2589 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_LATENCY_VALID
;
2590 wifi_status
->ul_effective_latency
= if_wifi_sr
->ul_effective_latency
;
2592 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_MAX_LATENCY_VALID
) {
2593 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_MAX_LATENCY_VALID
;
2594 wifi_status
->ul_max_latency
= if_wifi_sr
->ul_max_latency
;
2596 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_RETXT_LEVEL_VALID
) {
2597 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID
;
2598 if (if_wifi_sr
->ul_retxt_level
== IF_WIFI_UL_RETXT_LEVEL_NONE
)
2599 wifi_status
->ul_retxt_level
= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_NONE
;
2600 else if (if_wifi_sr
->ul_retxt_level
== IF_WIFI_UL_RETXT_LEVEL_LOW
)
2601 wifi_status
->ul_retxt_level
= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_LOW
;
2602 else if (if_wifi_sr
->ul_retxt_level
== IF_WIFI_UL_RETXT_LEVEL_MEDIUM
)
2603 wifi_status
->ul_retxt_level
= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_MEDIUM
;
2604 else if (if_wifi_sr
->ul_retxt_level
== IF_WIFI_UL_RETXT_LEVEL_HIGH
)
2605 wifi_status
->ul_retxt_level
= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_HIGH
;
2607 wifi_status
->valid_bitmask
&= ~NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID
;
2609 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_BYTES_LOST_VALID
) {
2610 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_BYTES_LOST_VALID
;
2611 wifi_status
->ul_bytes_lost
= if_wifi_sr
->ul_bytes_lost
;
2613 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_ERROR_RATE_VALID
) {
2614 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_ERROR_RATE_VALID
;
2615 wifi_status
->ul_error_rate
= if_wifi_sr
->ul_error_rate
;
2617 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID
) {
2618 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID
;
2619 wifi_status
->dl_effective_bandwidth
= if_wifi_sr
->dl_effective_bandwidth
;
2621 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_MAX_BANDWIDTH_VALID
) {
2622 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_MAX_BANDWIDTH_VALID
;
2623 wifi_status
->dl_max_bandwidth
= if_wifi_sr
->dl_max_bandwidth
;
2625 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_MIN_LATENCY_VALID
) {
2626 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_MIN_LATENCY_VALID
;
2627 wifi_status
->dl_min_latency
= if_wifi_sr
->dl_min_latency
;
2629 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_EFFECTIVE_LATENCY_VALID
) {
2630 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_LATENCY_VALID
;
2631 wifi_status
->dl_effective_latency
= if_wifi_sr
->dl_effective_latency
;
2633 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_MAX_LATENCY_VALID
) {
2634 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_MAX_LATENCY_VALID
;
2635 wifi_status
->dl_max_latency
= if_wifi_sr
->dl_max_latency
;
2637 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_ERROR_RATE_VALID
) {
2638 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_ERROR_RATE_VALID
;
2639 wifi_status
->dl_error_rate
= if_wifi_sr
->dl_error_rate
;
2641 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_CONFIG_FREQUENCY_VALID
) {
2642 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID
;
2643 if (if_wifi_sr
->config_frequency
== IF_WIFI_CONFIG_FREQUENCY_2_4_GHZ
)
2644 wifi_status
->config_frequency
= NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_2_4_GHZ
;
2645 else if (if_wifi_sr
->config_frequency
== IF_WIFI_CONFIG_FREQUENCY_5_0_GHZ
)
2646 wifi_status
->config_frequency
= NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_5_0_GHZ
;
2648 wifi_status
->valid_bitmask
&= ~NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID
;
2650 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_CONFIG_MULTICAST_RATE_VALID
) {
2651 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_CONFIG_MULTICAST_RATE_VALID
;
2652 wifi_status
->config_multicast_rate
= if_wifi_sr
->config_multicast_rate
;
2654 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_CONFIG_SCAN_COUNT_VALID
) {
2655 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_COUNT_VALID
;
2656 wifi_status
->scan_count
= if_wifi_sr
->scan_count
;
2658 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_CONFIG_SCAN_DURATION_VALID
) {
2659 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_DURATION_VALID
;
2660 wifi_status
->scan_duration
= if_wifi_sr
->scan_duration
;
2665 lck_rw_done(&ifp
->if_link_status_lock
);
2668 static u_int64_t nstat_ifnet_last_report_time
= 0;
2669 extern int tcp_report_stats_interval
;
2672 nstat_ifnet_compute_percentages(struct if_tcp_ecn_perf_stat
*ifst
)
2674 /* Retransmit percentage */
2675 if (ifst
->total_rxmitpkts
> 0 && ifst
->total_txpkts
> 0) {
2676 /* shift by 10 for precision */
2677 ifst
->rxmit_percent
=
2678 ((ifst
->total_rxmitpkts
<< 10) * 100) / ifst
->total_txpkts
;
2680 ifst
->rxmit_percent
= 0;
2683 /* Out-of-order percentage */
2684 if (ifst
->total_oopkts
> 0 && ifst
->total_rxpkts
> 0) {
2685 /* shift by 10 for precision */
2687 ((ifst
->total_oopkts
<< 10) * 100) / ifst
->total_rxpkts
;
2689 ifst
->oo_percent
= 0;
2692 /* Reorder percentage */
2693 if (ifst
->total_reorderpkts
> 0 &&
2694 (ifst
->total_txpkts
+ ifst
->total_rxpkts
) > 0) {
2695 /* shift by 10 for precision */
2696 ifst
->reorder_percent
=
2697 ((ifst
->total_reorderpkts
<< 10) * 100) /
2698 (ifst
->total_txpkts
+ ifst
->total_rxpkts
);
2700 ifst
->reorder_percent
= 0;
2705 nstat_ifnet_normalize_counter(struct if_tcp_ecn_stat
*if_st
)
2707 u_int64_t ecn_on_conn
, ecn_off_conn
;
2711 ecn_on_conn
= if_st
->ecn_client_success
+
2712 if_st
->ecn_server_success
;
2713 ecn_off_conn
= if_st
->ecn_off_conn
+
2714 (if_st
->ecn_client_setup
- if_st
->ecn_client_success
) +
2715 (if_st
->ecn_server_setup
- if_st
->ecn_server_success
);
2718 * report sack episodes, rst_drop and rxmit_drop
2719 * as a ratio per connection, shift by 10 for precision
2721 if (ecn_on_conn
> 0) {
2722 if_st
->ecn_on
.sack_episodes
=
2723 (if_st
->ecn_on
.sack_episodes
<< 10) / ecn_on_conn
;
2724 if_st
->ecn_on
.rst_drop
=
2725 (if_st
->ecn_on
.rst_drop
<< 10) * 100 / ecn_on_conn
;
2726 if_st
->ecn_on
.rxmit_drop
=
2727 (if_st
->ecn_on
.rxmit_drop
<< 10) * 100 / ecn_on_conn
;
2729 /* set to zero, just in case */
2730 if_st
->ecn_on
.sack_episodes
= 0;
2731 if_st
->ecn_on
.rst_drop
= 0;
2732 if_st
->ecn_on
.rxmit_drop
= 0;
2735 if (ecn_off_conn
> 0) {
2736 if_st
->ecn_off
.sack_episodes
=
2737 (if_st
->ecn_off
.sack_episodes
<< 10) / ecn_off_conn
;
2738 if_st
->ecn_off
.rst_drop
=
2739 (if_st
->ecn_off
.rst_drop
<< 10) * 100 / ecn_off_conn
;
2740 if_st
->ecn_off
.rxmit_drop
=
2741 (if_st
->ecn_off
.rxmit_drop
<< 10) * 100 / ecn_off_conn
;
2743 if_st
->ecn_off
.sack_episodes
= 0;
2744 if_st
->ecn_off
.rst_drop
= 0;
2745 if_st
->ecn_off
.rxmit_drop
= 0;
2747 if_st
->ecn_total_conn
= ecn_off_conn
+ ecn_on_conn
;
2751 nstat_ifnet_report_ecn_stats(void)
2753 u_int64_t uptime
, last_report_time
;
2754 struct nstat_sysinfo_data data
;
2755 struct nstat_sysinfo_ifnet_ecn_stats
*st
;
2758 uptime
= net_uptime();
2760 if ((int)(uptime
- nstat_ifnet_last_report_time
) <
2761 tcp_report_stats_interval
)
2764 last_report_time
= nstat_ifnet_last_report_time
;
2765 nstat_ifnet_last_report_time
= uptime
;
2766 data
.flags
= NSTAT_SYSINFO_IFNET_ECN_STATS
;
2767 st
= &data
.u
.ifnet_ecn_stats
;
2769 ifnet_head_lock_shared();
2770 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
) {
2771 if (ifp
->if_ipv4_stat
== NULL
|| ifp
->if_ipv6_stat
== NULL
)
2774 if ((ifp
->if_refflags
& (IFRF_ATTACHED
| IFRF_DETACHING
)) !=
2778 /* Limit reporting to Wifi, Ethernet and cellular. */
2779 if (!(IFNET_IS_ETHERNET(ifp
) || IFNET_IS_CELLULAR(ifp
)))
2782 bzero(st
, sizeof(*st
));
2783 if (IFNET_IS_CELLULAR(ifp
)) {
2784 st
->ifnet_type
= NSTAT_IFNET_ECN_TYPE_CELLULAR
;
2785 } else if (IFNET_IS_WIFI(ifp
)) {
2786 st
->ifnet_type
= NSTAT_IFNET_ECN_TYPE_WIFI
;
2788 st
->ifnet_type
= NSTAT_IFNET_ECN_TYPE_ETHERNET
;
2790 data
.unsent_data_cnt
= ifp
->if_unsent_data_cnt
;
2791 /* skip if there was no update since last report */
2792 if (ifp
->if_ipv4_stat
->timestamp
<= 0 ||
2793 ifp
->if_ipv4_stat
->timestamp
< last_report_time
)
2795 st
->ifnet_proto
= NSTAT_IFNET_ECN_PROTO_IPV4
;
2796 /* compute percentages using packet counts */
2797 nstat_ifnet_compute_percentages(&ifp
->if_ipv4_stat
->ecn_on
);
2798 nstat_ifnet_compute_percentages(&ifp
->if_ipv4_stat
->ecn_off
);
2799 nstat_ifnet_normalize_counter(ifp
->if_ipv4_stat
);
2800 bcopy(ifp
->if_ipv4_stat
, &st
->ecn_stat
,
2801 sizeof(st
->ecn_stat
));
2802 nstat_sysinfo_send_data(&data
);
2803 bzero(ifp
->if_ipv4_stat
, sizeof(*ifp
->if_ipv4_stat
));
2806 /* skip if there was no update since last report */
2807 if (ifp
->if_ipv6_stat
->timestamp
<= 0 ||
2808 ifp
->if_ipv6_stat
->timestamp
< last_report_time
)
2810 st
->ifnet_proto
= NSTAT_IFNET_ECN_PROTO_IPV6
;
2812 /* compute percentages using packet counts */
2813 nstat_ifnet_compute_percentages(&ifp
->if_ipv6_stat
->ecn_on
);
2814 nstat_ifnet_compute_percentages(&ifp
->if_ipv6_stat
->ecn_off
);
2815 nstat_ifnet_normalize_counter(ifp
->if_ipv6_stat
);
2816 bcopy(ifp
->if_ipv6_stat
, &st
->ecn_stat
,
2817 sizeof(st
->ecn_stat
));
2818 nstat_sysinfo_send_data(&data
);
2820 /* Zero the stats in ifp */
2821 bzero(ifp
->if_ipv6_stat
, sizeof(*ifp
->if_ipv6_stat
));
2828 nstat_ifnet_copy_descriptor(
2829 nstat_provider_cookie_t cookie
,
2833 nstat_ifnet_descriptor
*desc
= (nstat_ifnet_descriptor
*)data
;
2834 struct nstat_ifnet_cookie
*ifcookie
=
2835 (struct nstat_ifnet_cookie
*)cookie
;
2836 struct ifnet
*ifp
= ifcookie
->ifp
;
2838 if (len
< sizeof(nstat_ifnet_descriptor
))
2841 if (nstat_ifnet_gone(cookie
))
2844 bzero(desc
, sizeof(*desc
));
2845 ifnet_lock_shared(ifp
);
2846 strlcpy(desc
->name
, ifp
->if_xname
, sizeof(desc
->name
));
2847 desc
->ifindex
= ifp
->if_index
;
2848 desc
->threshold
= ifp
->if_data_threshold
;
2849 desc
->type
= ifp
->if_type
;
2850 if (ifp
->if_desc
.ifd_len
< sizeof(desc
->description
))
2851 memcpy(desc
->description
, ifp
->if_desc
.ifd_desc
,
2852 sizeof(desc
->description
));
2853 nstat_ifnet_copy_link_status(ifp
, desc
);
2854 ifnet_lock_done(ifp
);
2859 nstat_init_ifnet_provider(void)
2861 bzero(&nstat_ifnet_provider
, sizeof(nstat_ifnet_provider
));
2862 nstat_ifnet_provider
.nstat_provider_id
= NSTAT_PROVIDER_IFNET
;
2863 nstat_ifnet_provider
.nstat_descriptor_length
= sizeof(nstat_ifnet_descriptor
);
2864 nstat_ifnet_provider
.nstat_lookup
= nstat_ifnet_lookup
;
2865 nstat_ifnet_provider
.nstat_gone
= nstat_ifnet_gone
;
2866 nstat_ifnet_provider
.nstat_counts
= nstat_ifnet_counts
;
2867 nstat_ifnet_provider
.nstat_watcher_add
= NULL
;
2868 nstat_ifnet_provider
.nstat_watcher_remove
= NULL
;
2869 nstat_ifnet_provider
.nstat_copy_descriptor
= nstat_ifnet_copy_descriptor
;
2870 nstat_ifnet_provider
.nstat_release
= nstat_ifnet_release
;
2871 nstat_ifnet_provider
.next
= nstat_providers
;
2872 nstat_providers
= &nstat_ifnet_provider
;
2875 __private_extern__
void
2876 nstat_ifnet_threshold_reached(unsigned int ifindex
)
2878 nstat_control_state
*state
;
2881 struct nstat_ifnet_cookie
*ifcookie
;
2883 lck_mtx_lock(&nstat_mtx
);
2884 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
2886 lck_mtx_lock(&state
->mtx
);
2887 for (src
= state
->ncs_srcs
; src
; src
= src
->next
)
2889 if (src
->provider
!= &nstat_ifnet_provider
)
2891 ifcookie
= (struct nstat_ifnet_cookie
*)src
->cookie
;
2892 ifp
= ifcookie
->ifp
;
2893 if (ifp
->if_index
!= ifindex
)
2895 nstat_control_send_counts(state
, src
, 0, 0, NULL
);
2897 lck_mtx_unlock(&state
->mtx
);
2899 lck_mtx_unlock(&nstat_mtx
);
2902 #pragma mark -- Sysinfo --
2904 nstat_set_keyval_scalar(nstat_sysinfo_keyval
*kv
, int key
, u_int32_t val
)
2906 kv
->nstat_sysinfo_key
= key
;
2907 kv
->nstat_sysinfo_flags
= NSTAT_SYSINFO_FLAG_SCALAR
;
2908 kv
->u
.nstat_sysinfo_scalar
= val
;
2912 nstat_sysinfo_send_data_internal(
2913 nstat_control_state
*control
,
2914 nstat_sysinfo_data
*data
)
2916 nstat_msg_sysinfo_counts
*syscnt
= NULL
;
2917 size_t allocsize
= 0, countsize
= 0, nkeyvals
= 0, finalsize
= 0;
2918 nstat_sysinfo_keyval
*kv
;
2922 allocsize
= offsetof(nstat_msg_sysinfo_counts
, counts
);
2923 countsize
= offsetof(nstat_sysinfo_counts
, nstat_sysinfo_keyvals
);
2924 finalsize
= allocsize
;
2926 /* get number of key-vals for each kind of stat */
2927 switch (data
->flags
)
2929 case NSTAT_SYSINFO_MBUF_STATS
:
2930 nkeyvals
= sizeof(struct nstat_sysinfo_mbuf_stats
) /
2933 case NSTAT_SYSINFO_TCP_STATS
:
2934 nkeyvals
= sizeof(struct nstat_sysinfo_tcp_stats
) /
2937 case NSTAT_SYSINFO_IFNET_ECN_STATS
:
2938 nkeyvals
= (sizeof(struct if_tcp_ecn_stat
) /
2941 /* Two more keys for ifnet type and proto */
2944 /* One key for unsent data. */
2950 countsize
+= sizeof(nstat_sysinfo_keyval
) * nkeyvals
;
2951 allocsize
+= countsize
;
2953 syscnt
= OSMalloc(allocsize
, nstat_malloc_tag
);
2956 bzero(syscnt
, allocsize
);
2958 kv
= (nstat_sysinfo_keyval
*) &syscnt
->counts
.nstat_sysinfo_keyvals
;
2959 switch (data
->flags
)
2961 case NSTAT_SYSINFO_MBUF_STATS
:
2963 nstat_set_keyval_scalar(&kv
[i
++],
2964 NSTAT_SYSINFO_KEY_MBUF_256B_TOTAL
,
2965 data
->u
.mb_stats
.total_256b
);
2966 nstat_set_keyval_scalar(&kv
[i
++],
2967 NSTAT_SYSINFO_KEY_MBUF_2KB_TOTAL
,
2968 data
->u
.mb_stats
.total_2kb
);
2969 nstat_set_keyval_scalar(&kv
[i
++],
2970 NSTAT_SYSINFO_KEY_MBUF_4KB_TOTAL
,
2971 data
->u
.mb_stats
.total_4kb
);
2972 nstat_set_keyval_scalar(&kv
[i
++],
2973 NSTAT_SYSINFO_MBUF_16KB_TOTAL
,
2974 data
->u
.mb_stats
.total_16kb
);
2975 nstat_set_keyval_scalar(&kv
[i
++],
2976 NSTAT_SYSINFO_KEY_SOCK_MBCNT
,
2977 data
->u
.mb_stats
.sbmb_total
);
2978 nstat_set_keyval_scalar(&kv
[i
++],
2979 NSTAT_SYSINFO_KEY_SOCK_ATMBLIMIT
,
2980 data
->u
.mb_stats
.sb_atmbuflimit
);
2981 nstat_set_keyval_scalar(&kv
[i
++],
2982 NSTAT_SYSINFO_MBUF_DRAIN_CNT
,
2983 data
->u
.mb_stats
.draincnt
);
2984 nstat_set_keyval_scalar(&kv
[i
++],
2985 NSTAT_SYSINFO_MBUF_MEM_RELEASED
,
2986 data
->u
.mb_stats
.memreleased
);
2987 nstat_set_keyval_scalar(&kv
[i
++],
2988 NSTAT_SYSINFO_KEY_SOCK_MBFLOOR
,
2989 data
->u
.mb_stats
.sbmb_floor
);
2990 VERIFY(i
== nkeyvals
);
2993 case NSTAT_SYSINFO_TCP_STATS
:
2995 nstat_set_keyval_scalar(&kv
[i
++],
2996 NSTAT_SYSINFO_KEY_IPV4_AVGRTT
,
2997 data
->u
.tcp_stats
.ipv4_avgrtt
);
2998 nstat_set_keyval_scalar(&kv
[i
++],
2999 NSTAT_SYSINFO_KEY_IPV6_AVGRTT
,
3000 data
->u
.tcp_stats
.ipv6_avgrtt
);
3001 nstat_set_keyval_scalar(&kv
[i
++],
3002 NSTAT_SYSINFO_KEY_SEND_PLR
,
3003 data
->u
.tcp_stats
.send_plr
);
3004 nstat_set_keyval_scalar(&kv
[i
++],
3005 NSTAT_SYSINFO_KEY_RECV_PLR
,
3006 data
->u
.tcp_stats
.recv_plr
);
3007 nstat_set_keyval_scalar(&kv
[i
++],
3008 NSTAT_SYSINFO_KEY_SEND_TLRTO
,
3009 data
->u
.tcp_stats
.send_tlrto_rate
);
3010 nstat_set_keyval_scalar(&kv
[i
++],
3011 NSTAT_SYSINFO_KEY_SEND_REORDERRATE
,
3012 data
->u
.tcp_stats
.send_reorder_rate
);
3013 nstat_set_keyval_scalar(&kv
[i
++],
3014 NSTAT_SYSINFO_CONNECTION_ATTEMPTS
,
3015 data
->u
.tcp_stats
.connection_attempts
);
3016 nstat_set_keyval_scalar(&kv
[i
++],
3017 NSTAT_SYSINFO_CONNECTION_ACCEPTS
,
3018 data
->u
.tcp_stats
.connection_accepts
);
3019 nstat_set_keyval_scalar(&kv
[i
++],
3020 NSTAT_SYSINFO_ECN_CLIENT_ENABLED
,
3021 data
->u
.tcp_stats
.ecn_client_enabled
);
3022 nstat_set_keyval_scalar(&kv
[i
++],
3023 NSTAT_SYSINFO_ECN_SERVER_ENABLED
,
3024 data
->u
.tcp_stats
.ecn_server_enabled
);
3025 nstat_set_keyval_scalar(&kv
[i
++],
3026 NSTAT_SYSINFO_ECN_CLIENT_SETUP
,
3027 data
->u
.tcp_stats
.ecn_client_setup
);
3028 nstat_set_keyval_scalar(&kv
[i
++],
3029 NSTAT_SYSINFO_ECN_SERVER_SETUP
,
3030 data
->u
.tcp_stats
.ecn_server_setup
);
3031 nstat_set_keyval_scalar(&kv
[i
++],
3032 NSTAT_SYSINFO_ECN_CLIENT_SUCCESS
,
3033 data
->u
.tcp_stats
.ecn_client_success
);
3034 nstat_set_keyval_scalar(&kv
[i
++],
3035 NSTAT_SYSINFO_ECN_SERVER_SUCCESS
,
3036 data
->u
.tcp_stats
.ecn_server_success
);
3037 nstat_set_keyval_scalar(&kv
[i
++],
3038 NSTAT_SYSINFO_ECN_NOT_SUPPORTED
,
3039 data
->u
.tcp_stats
.ecn_not_supported
);
3040 nstat_set_keyval_scalar(&kv
[i
++],
3041 NSTAT_SYSINFO_ECN_LOST_SYN
,
3042 data
->u
.tcp_stats
.ecn_lost_syn
);
3043 nstat_set_keyval_scalar(&kv
[i
++],
3044 NSTAT_SYSINFO_ECN_LOST_SYNACK
,
3045 data
->u
.tcp_stats
.ecn_lost_synack
);
3046 nstat_set_keyval_scalar(&kv
[i
++],
3047 NSTAT_SYSINFO_ECN_RECV_CE
,
3048 data
->u
.tcp_stats
.ecn_recv_ce
);
3049 nstat_set_keyval_scalar(&kv
[i
++],
3050 NSTAT_SYSINFO_ECN_RECV_ECE
,
3051 data
->u
.tcp_stats
.ecn_recv_ece
);
3052 nstat_set_keyval_scalar(&kv
[i
++],
3053 NSTAT_SYSINFO_ECN_SENT_ECE
,
3054 data
->u
.tcp_stats
.ecn_sent_ece
);
3055 nstat_set_keyval_scalar(&kv
[i
++],
3056 NSTAT_SYSINFO_ECN_CONN_RECV_CE
,
3057 data
->u
.tcp_stats
.ecn_conn_recv_ce
);
3058 nstat_set_keyval_scalar(&kv
[i
++],
3059 NSTAT_SYSINFO_ECN_CONN_RECV_ECE
,
3060 data
->u
.tcp_stats
.ecn_conn_recv_ece
);
3061 nstat_set_keyval_scalar(&kv
[i
++],
3062 NSTAT_SYSINFO_ECN_CONN_PLNOCE
,
3063 data
->u
.tcp_stats
.ecn_conn_plnoce
);
3064 nstat_set_keyval_scalar(&kv
[i
++],
3065 NSTAT_SYSINFO_ECN_CONN_PL_CE
,
3066 data
->u
.tcp_stats
.ecn_conn_pl_ce
);
3067 nstat_set_keyval_scalar(&kv
[i
++],
3068 NSTAT_SYSINFO_ECN_CONN_NOPL_CE
,
3069 data
->u
.tcp_stats
.ecn_conn_nopl_ce
);
3070 nstat_set_keyval_scalar(&kv
[i
++],
3071 NSTAT_SYSINFO_ECN_FALLBACK_SYNLOSS
,
3072 data
->u
.tcp_stats
.ecn_fallback_synloss
);
3073 nstat_set_keyval_scalar(&kv
[i
++],
3074 NSTAT_SYSINFO_ECN_FALLBACK_REORDER
,
3075 data
->u
.tcp_stats
.ecn_fallback_reorder
);
3076 nstat_set_keyval_scalar(&kv
[i
++],
3077 NSTAT_SYSINFO_ECN_FALLBACK_CE
,
3078 data
->u
.tcp_stats
.ecn_fallback_ce
);
3079 nstat_set_keyval_scalar(&kv
[i
++],
3080 NSTAT_SYSINFO_TFO_SYN_DATA_RCV
,
3081 data
->u
.tcp_stats
.tfo_syn_data_rcv
);
3082 nstat_set_keyval_scalar(&kv
[i
++],
3083 NSTAT_SYSINFO_TFO_COOKIE_REQ_RCV
,
3084 data
->u
.tcp_stats
.tfo_cookie_req_rcv
);
3085 nstat_set_keyval_scalar(&kv
[i
++],
3086 NSTAT_SYSINFO_TFO_COOKIE_SENT
,
3087 data
->u
.tcp_stats
.tfo_cookie_sent
);
3088 nstat_set_keyval_scalar(&kv
[i
++],
3089 NSTAT_SYSINFO_TFO_COOKIE_INVALID
,
3090 data
->u
.tcp_stats
.tfo_cookie_invalid
);
3091 nstat_set_keyval_scalar(&kv
[i
++],
3092 NSTAT_SYSINFO_TFO_COOKIE_REQ
,
3093 data
->u
.tcp_stats
.tfo_cookie_req
);
3094 nstat_set_keyval_scalar(&kv
[i
++],
3095 NSTAT_SYSINFO_TFO_COOKIE_RCV
,
3096 data
->u
.tcp_stats
.tfo_cookie_rcv
);
3097 nstat_set_keyval_scalar(&kv
[i
++],
3098 NSTAT_SYSINFO_TFO_SYN_DATA_SENT
,
3099 data
->u
.tcp_stats
.tfo_syn_data_sent
);
3100 nstat_set_keyval_scalar(&kv
[i
++],
3101 NSTAT_SYSINFO_TFO_SYN_DATA_ACKED
,
3102 data
->u
.tcp_stats
.tfo_syn_data_acked
);
3103 nstat_set_keyval_scalar(&kv
[i
++],
3104 NSTAT_SYSINFO_TFO_SYN_LOSS
,
3105 data
->u
.tcp_stats
.tfo_syn_loss
);
3106 nstat_set_keyval_scalar(&kv
[i
++],
3107 NSTAT_SYSINFO_TFO_BLACKHOLE
,
3108 data
->u
.tcp_stats
.tfo_blackhole
);
3109 nstat_set_keyval_scalar(&kv
[i
++],
3110 NSTAT_SYSINFO_TFO_COOKIE_WRONG
,
3111 data
->u
.tcp_stats
.tfo_cookie_wrong
);
3112 nstat_set_keyval_scalar(&kv
[i
++],
3113 NSTAT_SYSINFO_TFO_NO_COOKIE_RCV
,
3114 data
->u
.tcp_stats
.tfo_no_cookie_rcv
);
3115 nstat_set_keyval_scalar(&kv
[i
++],
3116 NSTAT_SYSINFO_TFO_HEURISTICS_DISABLE
,
3117 data
->u
.tcp_stats
.tfo_heuristics_disable
);
3118 nstat_set_keyval_scalar(&kv
[i
++],
3119 NSTAT_SYSINFO_TFO_SEND_BLACKHOLE
,
3120 data
->u
.tcp_stats
.tfo_sndblackhole
);
3121 VERIFY(i
== nkeyvals
);
3124 case NSTAT_SYSINFO_IFNET_ECN_STATS
:
3126 nstat_set_keyval_scalar(&kv
[i
++],
3127 NSTAT_SYSINFO_ECN_IFNET_TYPE
,
3128 data
->u
.ifnet_ecn_stats
.ifnet_type
);
3129 nstat_set_keyval_scalar(&kv
[i
++],
3130 NSTAT_SYSINFO_ECN_IFNET_PROTO
,
3131 data
->u
.ifnet_ecn_stats
.ifnet_proto
);
3132 nstat_set_keyval_scalar(&kv
[i
++],
3133 NSTAT_SYSINFO_ECN_IFNET_CLIENT_SETUP
,
3134 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_client_setup
);
3135 nstat_set_keyval_scalar(&kv
[i
++],
3136 NSTAT_SYSINFO_ECN_IFNET_SERVER_SETUP
,
3137 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_server_setup
);
3138 nstat_set_keyval_scalar(&kv
[i
++],
3139 NSTAT_SYSINFO_ECN_IFNET_CLIENT_SUCCESS
,
3140 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_client_success
);
3141 nstat_set_keyval_scalar(&kv
[i
++],
3142 NSTAT_SYSINFO_ECN_IFNET_SERVER_SUCCESS
,
3143 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_server_success
);
3144 nstat_set_keyval_scalar(&kv
[i
++],
3145 NSTAT_SYSINFO_ECN_IFNET_PEER_NOSUPPORT
,
3146 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_peer_nosupport
);
3147 nstat_set_keyval_scalar(&kv
[i
++],
3148 NSTAT_SYSINFO_ECN_IFNET_SYN_LOST
,
3149 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_syn_lost
);
3150 nstat_set_keyval_scalar(&kv
[i
++],
3151 NSTAT_SYSINFO_ECN_IFNET_SYNACK_LOST
,
3152 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_synack_lost
);
3153 nstat_set_keyval_scalar(&kv
[i
++],
3154 NSTAT_SYSINFO_ECN_IFNET_RECV_CE
,
3155 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_recv_ce
);
3156 nstat_set_keyval_scalar(&kv
[i
++],
3157 NSTAT_SYSINFO_ECN_IFNET_RECV_ECE
,
3158 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_recv_ece
);
3159 nstat_set_keyval_scalar(&kv
[i
++],
3160 NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_CE
,
3161 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_conn_recv_ce
);
3162 nstat_set_keyval_scalar(&kv
[i
++],
3163 NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_ECE
,
3164 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_conn_recv_ece
);
3165 nstat_set_keyval_scalar(&kv
[i
++],
3166 NSTAT_SYSINFO_ECN_IFNET_CONN_PLNOCE
,
3167 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_conn_plnoce
);
3168 nstat_set_keyval_scalar(&kv
[i
++],
3169 NSTAT_SYSINFO_ECN_IFNET_CONN_PLCE
,
3170 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_conn_plce
);
3171 nstat_set_keyval_scalar(&kv
[i
++],
3172 NSTAT_SYSINFO_ECN_IFNET_CONN_NOPLCE
,
3173 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_conn_noplce
);
3174 nstat_set_keyval_scalar(&kv
[i
++],
3175 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNLOSS
,
3176 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_fallback_synloss
);
3177 nstat_set_keyval_scalar(&kv
[i
++],
3178 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_REORDER
,
3179 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_fallback_reorder
);
3180 nstat_set_keyval_scalar(&kv
[i
++],
3181 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_CE
,
3182 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_fallback_ce
);
3183 nstat_set_keyval_scalar(&kv
[i
++],
3184 NSTAT_SYSINFO_ECN_IFNET_ON_RTT_AVG
,
3185 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.rtt_avg
);
3186 nstat_set_keyval_scalar(&kv
[i
++],
3187 NSTAT_SYSINFO_ECN_IFNET_ON_RTT_VAR
,
3188 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.rtt_var
);
3189 nstat_set_keyval_scalar(&kv
[i
++],
3190 NSTAT_SYSINFO_ECN_IFNET_ON_OOPERCENT
,
3191 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.oo_percent
);
3192 nstat_set_keyval_scalar(&kv
[i
++],
3193 NSTAT_SYSINFO_ECN_IFNET_ON_SACK_EPISODE
,
3194 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.sack_episodes
);
3195 nstat_set_keyval_scalar(&kv
[i
++],
3196 NSTAT_SYSINFO_ECN_IFNET_ON_REORDER_PERCENT
,
3197 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.reorder_percent
);
3198 nstat_set_keyval_scalar(&kv
[i
++],
3199 NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_PERCENT
,
3200 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.rxmit_percent
);
3201 nstat_set_keyval_scalar(&kv
[i
++],
3202 NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_DROP
,
3203 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.rxmit_drop
);
3204 nstat_set_keyval_scalar(&kv
[i
++],
3205 NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_AVG
,
3206 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.rtt_avg
);
3207 nstat_set_keyval_scalar(&kv
[i
++],
3208 NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_VAR
,
3209 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.rtt_var
);
3210 nstat_set_keyval_scalar(&kv
[i
++],
3211 NSTAT_SYSINFO_ECN_IFNET_OFF_OOPERCENT
,
3212 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.oo_percent
);
3213 nstat_set_keyval_scalar(&kv
[i
++],
3214 NSTAT_SYSINFO_ECN_IFNET_OFF_SACK_EPISODE
,
3215 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.sack_episodes
);
3216 nstat_set_keyval_scalar(&kv
[i
++],
3217 NSTAT_SYSINFO_ECN_IFNET_OFF_REORDER_PERCENT
,
3218 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.reorder_percent
);
3219 nstat_set_keyval_scalar(&kv
[i
++],
3220 NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_PERCENT
,
3221 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.rxmit_percent
);
3222 nstat_set_keyval_scalar(&kv
[i
++],
3223 NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_DROP
,
3224 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.rxmit_drop
);
3225 nstat_set_keyval_scalar(&kv
[i
++],
3226 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_TXPKTS
,
3227 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.total_txpkts
);
3228 nstat_set_keyval_scalar(&kv
[i
++],
3229 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXMTPKTS
,
3230 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.total_rxmitpkts
);
3231 nstat_set_keyval_scalar(&kv
[i
++],
3232 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXPKTS
,
3233 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.total_rxpkts
);
3234 nstat_set_keyval_scalar(&kv
[i
++],
3235 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_OOPKTS
,
3236 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.total_oopkts
);
3237 nstat_set_keyval_scalar(&kv
[i
++],
3238 NSTAT_SYSINFO_ECN_IFNET_ON_DROP_RST
,
3239 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.rst_drop
);
3240 nstat_set_keyval_scalar(&kv
[i
++],
3241 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_TXPKTS
,
3242 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.total_txpkts
);
3243 nstat_set_keyval_scalar(&kv
[i
++],
3244 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXMTPKTS
,
3245 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.total_rxmitpkts
);
3246 nstat_set_keyval_scalar(&kv
[i
++],
3247 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXPKTS
,
3248 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.total_rxpkts
);
3249 nstat_set_keyval_scalar(&kv
[i
++],
3250 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_OOPKTS
,
3251 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.total_oopkts
);
3252 nstat_set_keyval_scalar(&kv
[i
++],
3253 NSTAT_SYSINFO_ECN_IFNET_OFF_DROP_RST
,
3254 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.rst_drop
);
3255 nstat_set_keyval_scalar(&kv
[i
++],
3256 NSTAT_SYSINFO_ECN_IFNET_TOTAL_CONN
,
3257 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_total_conn
);
3258 nstat_set_keyval_scalar(&kv
[i
++],
3259 NSTAT_SYSINFO_IFNET_UNSENT_DATA
,
3260 data
->unsent_data_cnt
);
3261 nstat_set_keyval_scalar(&kv
[i
++],
3262 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRST
,
3263 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_fallback_droprst
);
3264 nstat_set_keyval_scalar(&kv
[i
++],
3265 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRXMT
,
3266 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_fallback_droprxmt
);
3272 VERIFY(i
> 0 && i
<= nkeyvals
);
3273 countsize
= offsetof(nstat_sysinfo_counts
,
3274 nstat_sysinfo_keyvals
) +
3275 sizeof(nstat_sysinfo_keyval
) * i
;
3276 finalsize
+= countsize
;
3277 syscnt
->hdr
.type
= NSTAT_MSG_TYPE_SYSINFO_COUNTS
;
3278 syscnt
->hdr
.length
= finalsize
;
3279 syscnt
->counts
.nstat_sysinfo_len
= countsize
;
3281 result
= ctl_enqueuedata(control
->ncs_kctl
,
3282 control
->ncs_unit
, syscnt
, finalsize
, CTL_DATA_EOR
);
3285 nstat_stats
.nstat_sysinfofailures
+= 1;
3287 OSFree(syscnt
, allocsize
, nstat_malloc_tag
);
3292 __private_extern__
void
3293 nstat_sysinfo_send_data(
3294 nstat_sysinfo_data
*data
)
3296 nstat_control_state
*control
;
3298 lck_mtx_lock(&nstat_mtx
);
3299 for (control
= nstat_controls
; control
; control
= control
->ncs_next
)
3301 lck_mtx_lock(&control
->mtx
);
3302 if ((control
->ncs_flags
& NSTAT_FLAG_SYSINFO_SUBSCRIBED
) != 0)
3304 nstat_sysinfo_send_data_internal(control
, data
);
3306 lck_mtx_unlock(&control
->mtx
);
3308 lck_mtx_unlock(&nstat_mtx
);
3312 nstat_sysinfo_generate_report(void)
3314 mbuf_report_peak_usage();
3316 nstat_ifnet_report_ecn_stats();
3319 #pragma mark -- Kernel Control Socket --
3321 static kern_ctl_ref nstat_ctlref
= NULL
;
3322 static lck_grp_t
*nstat_lck_grp
= NULL
;
3324 static errno_t
nstat_control_connect(kern_ctl_ref kctl
, struct sockaddr_ctl
*sac
, void **uinfo
);
3325 static errno_t
nstat_control_disconnect(kern_ctl_ref kctl
, u_int32_t unit
, void *uinfo
);
3326 static errno_t
nstat_control_send(kern_ctl_ref kctl
, u_int32_t unit
, void *uinfo
, mbuf_t m
, int flags
);
3329 nstat_enqueue_success(
3331 nstat_control_state
*state
,
3334 nstat_msg_hdr success
;
3337 bzero(&success
, sizeof(success
));
3338 success
.context
= context
;
3339 success
.type
= NSTAT_MSG_TYPE_SUCCESS
;
3340 success
.length
= sizeof(success
);
3341 success
.flags
= flags
;
3342 result
= ctl_enqueuedata(state
->ncs_kctl
, state
->ncs_unit
, &success
,
3343 sizeof(success
), CTL_DATA_EOR
| CTL_DATA_CRIT
);
3345 if (nstat_debug
!= 0)
3346 printf("%s: could not enqueue success message %d\n",
3348 nstat_stats
.nstat_successmsgfailures
+= 1;
3354 nstat_control_send_goodbye(
3355 nstat_control_state
*state
,
3361 if (nstat_control_reporting_allowed(state
, src
))
3363 if ((state
->ncs_flags
& NSTAT_FLAG_SUPPORTS_UPDATES
) != 0)
3365 result
= nstat_control_send_update(state
, src
, 0, NSTAT_MSG_HDR_FLAG_CLOSING
, NULL
);
3369 if (nstat_debug
!= 0)
3370 printf("%s - nstat_control_send_update() %d\n", __func__
, result
);
3375 // send one last counts notification
3376 result
= nstat_control_send_counts(state
, src
, 0, NSTAT_MSG_HDR_FLAG_CLOSING
, NULL
);
3380 if (nstat_debug
!= 0)
3381 printf("%s - nstat_control_send_counts() %d\n", __func__
, result
);
3384 // send a last description
3385 result
= nstat_control_send_description(state
, src
, 0, NSTAT_MSG_HDR_FLAG_CLOSING
);
3389 if (nstat_debug
!= 0)
3390 printf("%s - nstat_control_send_description() %d\n", __func__
, result
);
3395 // send the source removed notification
3396 result
= nstat_control_send_removed(state
, src
);
3397 if (result
!= 0 && nstat_debug
)
3400 if (nstat_debug
!= 0)
3401 printf("%s - nstat_control_send_removed() %d\n", __func__
, result
);
3405 nstat_stats
.nstat_control_send_goodbye_failures
++;
3412 nstat_flush_accumulated_msgs(
3413 nstat_control_state
*state
)
3416 if (state
->ncs_accumulated
!= NULL
&& mbuf_len(state
->ncs_accumulated
) > 0)
3418 mbuf_pkthdr_setlen(state
->ncs_accumulated
, mbuf_len(state
->ncs_accumulated
));
3419 result
= ctl_enqueuembuf(state
->ncs_kctl
, state
->ncs_unit
, state
->ncs_accumulated
, CTL_DATA_EOR
);
3422 nstat_stats
.nstat_flush_accumulated_msgs_failures
++;
3423 if (nstat_debug
!= 0)
3424 printf("%s - ctl_enqueuembuf failed: %d\n", __func__
, result
);
3425 mbuf_freem(state
->ncs_accumulated
);
3427 state
->ncs_accumulated
= NULL
;
3433 nstat_accumulate_msg(
3434 nstat_control_state
*state
,
3438 if (state
->ncs_accumulated
&& mbuf_trailingspace(state
->ncs_accumulated
) < length
)
3440 // Will send the current mbuf
3441 nstat_flush_accumulated_msgs(state
);
3446 if (state
->ncs_accumulated
== NULL
)
3448 unsigned int one
= 1;
3449 if (mbuf_allocpacket(MBUF_DONTWAIT
, NSTAT_MAX_MSG_SIZE
, &one
, &state
->ncs_accumulated
) != 0)
3451 if (nstat_debug
!= 0)
3452 printf("%s - mbuf_allocpacket failed\n", __func__
);
3457 mbuf_setlen(state
->ncs_accumulated
, 0);
3463 hdr
->length
= length
;
3464 result
= mbuf_copyback(state
->ncs_accumulated
, mbuf_len(state
->ncs_accumulated
),
3465 length
, hdr
, MBUF_DONTWAIT
);
3470 nstat_flush_accumulated_msgs(state
);
3471 if (nstat_debug
!= 0)
3472 printf("%s - resorting to ctl_enqueuedata\n", __func__
);
3473 result
= ctl_enqueuedata(state
->ncs_kctl
, state
->ncs_unit
, hdr
, length
, CTL_DATA_EOR
);
3477 nstat_stats
.nstat_accumulate_msg_failures
++;
3484 __unused thread_call_param_t p0
,
3485 __unused thread_call_param_t p1
)
3487 lck_mtx_lock(&nstat_mtx
);
3489 nstat_idle_time
= 0;
3491 nstat_control_state
*control
;
3492 nstat_src
*dead
= NULL
;
3493 nstat_src
*dead_list
= NULL
;
3494 for (control
= nstat_controls
; control
; control
= control
->ncs_next
)
3496 lck_mtx_lock(&control
->mtx
);
3497 nstat_src
**srcpp
= &control
->ncs_srcs
;
3499 if (!(control
->ncs_flags
& NSTAT_FLAG_REQCOUNTS
))
3501 while(*srcpp
!= NULL
)
3503 if ((*srcpp
)->provider
->nstat_gone((*srcpp
)->cookie
))
3507 // Pull it off the list
3509 *srcpp
= (*srcpp
)->next
;
3511 result
= nstat_control_send_goodbye(control
, dead
);
3513 // Put this on the list to release later
3514 dead
->next
= dead_list
;
3519 srcpp
= &(*srcpp
)->next
;
3523 control
->ncs_flags
&= ~NSTAT_FLAG_REQCOUNTS
;
3524 lck_mtx_unlock(&control
->mtx
);
3529 clock_interval_to_deadline(60, NSEC_PER_SEC
, &nstat_idle_time
);
3530 thread_call_func_delayed((thread_call_func_t
)nstat_idle_check
, NULL
, nstat_idle_time
);
3533 lck_mtx_unlock(&nstat_mtx
);
3535 /* Generate any system level reports, if needed */
3536 nstat_sysinfo_generate_report();
3538 // Release the sources now that we aren't holding lots of locks
3542 dead_list
= dead
->next
;
3544 nstat_control_cleanup_source(NULL
, dead
, FALSE
);
3551 nstat_control_register(void)
3553 // Create our lock group first
3554 lck_grp_attr_t
*grp_attr
= lck_grp_attr_alloc_init();
3555 lck_grp_attr_setdefault(grp_attr
);
3556 nstat_lck_grp
= lck_grp_alloc_init("network statistics kctl", grp_attr
);
3557 lck_grp_attr_free(grp_attr
);
3559 lck_mtx_init(&nstat_mtx
, nstat_lck_grp
, NULL
);
3561 // Register the control
3562 struct kern_ctl_reg nstat_control
;
3563 bzero(&nstat_control
, sizeof(nstat_control
));
3564 strlcpy(nstat_control
.ctl_name
, NET_STAT_CONTROL_NAME
, sizeof(nstat_control
.ctl_name
));
3565 nstat_control
.ctl_flags
= CTL_FLAG_REG_EXTENDED
| CTL_FLAG_REG_CRIT
;
3566 nstat_control
.ctl_sendsize
= nstat_sendspace
;
3567 nstat_control
.ctl_recvsize
= nstat_recvspace
;
3568 nstat_control
.ctl_connect
= nstat_control_connect
;
3569 nstat_control
.ctl_disconnect
= nstat_control_disconnect
;
3570 nstat_control
.ctl_send
= nstat_control_send
;
3572 ctl_register(&nstat_control
, &nstat_ctlref
);
3576 nstat_control_cleanup_source(
3577 nstat_control_state
*state
,
3578 struct nstat_src
*src
,
3585 result
= nstat_control_send_removed(state
, src
);
3588 nstat_stats
.nstat_control_cleanup_source_failures
++;
3589 if (nstat_debug
!= 0)
3590 printf("%s - nstat_control_send_removed() %d\n",
3594 // Cleanup the source if we found it.
3595 src
->provider
->nstat_release(src
->cookie
, locked
);
3596 OSFree(src
, sizeof(*src
), nstat_malloc_tag
);
3601 nstat_control_reporting_allowed(
3602 nstat_control_state
*state
,
3605 if (src
->provider
->nstat_reporting_allowed
== NULL
)
3609 src
->provider
->nstat_reporting_allowed(src
->cookie
,
3610 &state
->ncs_provider_filters
[src
->provider
->nstat_provider_id
])
3616 nstat_control_connect(
3618 struct sockaddr_ctl
*sac
,
3621 nstat_control_state
*state
= OSMalloc(sizeof(*state
), nstat_malloc_tag
);
3622 if (state
== NULL
) return ENOMEM
;
3624 bzero(state
, sizeof(*state
));
3625 lck_mtx_init(&state
->mtx
, nstat_lck_grp
, NULL
);
3626 state
->ncs_kctl
= kctl
;
3627 state
->ncs_unit
= sac
->sc_unit
;
3628 state
->ncs_flags
= NSTAT_FLAG_REQCOUNTS
;
3631 lck_mtx_lock(&nstat_mtx
);
3632 state
->ncs_next
= nstat_controls
;
3633 nstat_controls
= state
;
3635 if (nstat_idle_time
== 0)
3637 clock_interval_to_deadline(60, NSEC_PER_SEC
, &nstat_idle_time
);
3638 thread_call_func_delayed((thread_call_func_t
)nstat_idle_check
, NULL
, nstat_idle_time
);
3641 lck_mtx_unlock(&nstat_mtx
);
3647 nstat_control_disconnect(
3648 __unused kern_ctl_ref kctl
,
3649 __unused u_int32_t unit
,
3653 nstat_control_state
*state
= (nstat_control_state
*)uinfo
;
3655 // pull it out of the global list of states
3656 lck_mtx_lock(&nstat_mtx
);
3657 nstat_control_state
**statepp
;
3658 for (statepp
= &nstat_controls
; *statepp
; statepp
= &(*statepp
)->ncs_next
)
3660 if (*statepp
== state
)
3662 *statepp
= state
->ncs_next
;
3666 lck_mtx_unlock(&nstat_mtx
);
3668 lck_mtx_lock(&state
->mtx
);
3669 // Stop watching for sources
3670 nstat_provider
*provider
;
3671 watching
= state
->ncs_watching
;
3672 state
->ncs_watching
= 0;
3673 for (provider
= nstat_providers
; provider
&& watching
; provider
= provider
->next
)
3675 if ((watching
& (1 << provider
->nstat_provider_id
)) != 0)
3677 watching
&= ~(1 << provider
->nstat_provider_id
);
3678 provider
->nstat_watcher_remove(state
);
3682 // set cleanup flags
3683 state
->ncs_flags
|= NSTAT_FLAG_CLEANUP
;
3685 if (state
->ncs_accumulated
)
3687 mbuf_freem(state
->ncs_accumulated
);
3688 state
->ncs_accumulated
= NULL
;
3691 // Copy out the list of sources
3692 nstat_src
*srcs
= state
->ncs_srcs
;
3693 state
->ncs_srcs
= NULL
;
3694 lck_mtx_unlock(&state
->mtx
);
3700 // pull it out of the list
3705 nstat_control_cleanup_source(NULL
, src
, FALSE
);
3707 lck_mtx_destroy(&state
->mtx
, nstat_lck_grp
);
3708 OSFree(state
, sizeof(*state
), nstat_malloc_tag
);
3713 static nstat_src_ref_t
3714 nstat_control_next_src_ref(
3715 nstat_control_state
*state
)
3717 return ++state
->ncs_next_srcref
;
3721 nstat_control_send_counts(
3722 nstat_control_state
*state
,
3724 unsigned long long context
,
3725 u_int16_t hdr_flags
,
3728 nstat_msg_src_counts counts
;
3731 /* Some providers may not have any counts to send */
3732 if (src
->provider
->nstat_counts
== NULL
)
3735 bzero(&counts
, sizeof(counts
));
3736 counts
.hdr
.type
= NSTAT_MSG_TYPE_SRC_COUNTS
;
3737 counts
.hdr
.length
= sizeof(counts
);
3738 counts
.hdr
.flags
= hdr_flags
;
3739 counts
.hdr
.context
= context
;
3740 counts
.srcref
= src
->srcref
;
3741 counts
.event_flags
= 0;
3743 if (src
->provider
->nstat_counts(src
->cookie
, &counts
.counts
, gone
) == 0)
3745 if ((src
->filter
& NSTAT_FILTER_NOZEROBYTES
) &&
3746 counts
.counts
.nstat_rxbytes
== 0 &&
3747 counts
.counts
.nstat_txbytes
== 0)
3753 result
= ctl_enqueuedata(state
->ncs_kctl
,
3754 state
->ncs_unit
, &counts
, sizeof(counts
),
3757 nstat_stats
.nstat_sendcountfailures
+= 1;
3764 nstat_control_append_counts(
3765 nstat_control_state
*state
,
3769 /* Some providers may not have any counts to send */
3770 if (!src
->provider
->nstat_counts
) return 0;
3772 nstat_msg_src_counts counts
;
3773 bzero(&counts
, sizeof(counts
));
3774 counts
.hdr
.type
= NSTAT_MSG_TYPE_SRC_COUNTS
;
3775 counts
.hdr
.length
= sizeof(counts
);
3776 counts
.srcref
= src
->srcref
;
3777 counts
.event_flags
= 0;
3780 result
= src
->provider
->nstat_counts(src
->cookie
, &counts
.counts
, gone
);
3786 if ((src
->filter
& NSTAT_FILTER_NOZEROBYTES
) == NSTAT_FILTER_NOZEROBYTES
&&
3787 counts
.counts
.nstat_rxbytes
== 0 && counts
.counts
.nstat_txbytes
== 0)
3792 return nstat_accumulate_msg(state
, &counts
.hdr
, counts
.hdr
.length
);
3796 nstat_control_send_description(
3797 nstat_control_state
*state
,
3800 u_int16_t hdr_flags
)
3802 // Provider doesn't support getting the descriptor? Done.
3803 if (src
->provider
->nstat_descriptor_length
== 0 ||
3804 src
->provider
->nstat_copy_descriptor
== NULL
)
3809 // Allocate storage for the descriptor message
3811 unsigned int one
= 1;
3812 u_int32_t size
= offsetof(nstat_msg_src_description
, data
) + src
->provider
->nstat_descriptor_length
;
3813 if (mbuf_allocpacket(MBUF_DONTWAIT
, size
, &one
, &msg
) != 0)
3818 nstat_msg_src_description
*desc
= (nstat_msg_src_description
*)mbuf_data(msg
);
3820 mbuf_setlen(msg
, size
);
3821 mbuf_pkthdr_setlen(msg
, mbuf_len(msg
));
3823 // Query the provider for the provider specific bits
3824 errno_t result
= src
->provider
->nstat_copy_descriptor(src
->cookie
, desc
->data
, src
->provider
->nstat_descriptor_length
);
3832 desc
->hdr
.context
= context
;
3833 desc
->hdr
.type
= NSTAT_MSG_TYPE_SRC_DESC
;
3834 desc
->hdr
.length
= size
;
3835 desc
->hdr
.flags
= hdr_flags
;
3836 desc
->srcref
= src
->srcref
;
3837 desc
->event_flags
= 0;
3838 desc
->provider
= src
->provider
->nstat_provider_id
;
3840 result
= ctl_enqueuembuf(state
->ncs_kctl
, state
->ncs_unit
, msg
, CTL_DATA_EOR
);
3843 nstat_stats
.nstat_descriptionfailures
+= 1;
3851 nstat_control_append_description(
3852 nstat_control_state
*state
,
3855 size_t size
= offsetof(nstat_msg_src_description
, data
) + src
->provider
->nstat_descriptor_length
;
3856 if (size
> 512 || src
->provider
->nstat_descriptor_length
== 0 ||
3857 src
->provider
->nstat_copy_descriptor
== NULL
)
3862 // Fill out a buffer on the stack, we will copy to the mbuf later
3863 u_int64_t buffer
[size
/sizeof(u_int64_t
) + 1]; // u_int64_t to ensure alignment
3864 bzero(buffer
, size
);
3866 nstat_msg_src_description
*desc
= (nstat_msg_src_description
*)buffer
;
3867 desc
->hdr
.type
= NSTAT_MSG_TYPE_SRC_DESC
;
3868 desc
->hdr
.length
= size
;
3869 desc
->srcref
= src
->srcref
;
3870 desc
->event_flags
= 0;
3871 desc
->provider
= src
->provider
->nstat_provider_id
;
3874 // Fill in the description
3875 // Query the provider for the provider specific bits
3876 result
= src
->provider
->nstat_copy_descriptor(src
->cookie
, desc
->data
,
3877 src
->provider
->nstat_descriptor_length
);
3883 return nstat_accumulate_msg(state
, &desc
->hdr
, size
);
3887 nstat_control_send_update(
3888 nstat_control_state
*state
,
3891 u_int16_t hdr_flags
,
3894 // Provider doesn't support getting the descriptor or counts? Done.
3895 if ((src
->provider
->nstat_descriptor_length
== 0 ||
3896 src
->provider
->nstat_copy_descriptor
== NULL
) &&
3897 src
->provider
->nstat_counts
== NULL
)
3902 // Allocate storage for the descriptor message
3904 unsigned int one
= 1;
3905 u_int32_t size
= offsetof(nstat_msg_src_update
, data
) +
3906 src
->provider
->nstat_descriptor_length
;
3907 if (mbuf_allocpacket(MBUF_DONTWAIT
, size
, &one
, &msg
) != 0)
3912 nstat_msg_src_update
*desc
= (nstat_msg_src_update
*)mbuf_data(msg
);
3914 desc
->hdr
.context
= context
;
3915 desc
->hdr
.type
= NSTAT_MSG_TYPE_SRC_UPDATE
;
3916 desc
->hdr
.length
= size
;
3917 desc
->hdr
.flags
= hdr_flags
;
3918 desc
->srcref
= src
->srcref
;
3919 desc
->event_flags
= 0;
3920 desc
->provider
= src
->provider
->nstat_provider_id
;
3922 mbuf_setlen(msg
, size
);
3923 mbuf_pkthdr_setlen(msg
, mbuf_len(msg
));
3926 if (src
->provider
->nstat_descriptor_length
!= 0 && src
->provider
->nstat_copy_descriptor
)
3928 // Query the provider for the provider specific bits
3929 result
= src
->provider
->nstat_copy_descriptor(src
->cookie
, desc
->data
,
3930 src
->provider
->nstat_descriptor_length
);
3938 if (src
->provider
->nstat_counts
)
3940 result
= src
->provider
->nstat_counts(src
->cookie
, &desc
->counts
, gone
);
3943 if ((src
->filter
& NSTAT_FILTER_NOZEROBYTES
) == NSTAT_FILTER_NOZEROBYTES
&&
3944 desc
->counts
.nstat_rxbytes
== 0 && desc
->counts
.nstat_txbytes
== 0)
3950 result
= ctl_enqueuembuf(state
->ncs_kctl
, state
->ncs_unit
, msg
, CTL_DATA_EOR
);
3957 nstat_stats
.nstat_srcupatefailures
+= 1;
3965 nstat_control_append_update(
3966 nstat_control_state
*state
,
3970 size_t size
= offsetof(nstat_msg_src_update
, data
) + src
->provider
->nstat_descriptor_length
;
3971 if (size
> 512 || ((src
->provider
->nstat_descriptor_length
== 0 ||
3972 src
->provider
->nstat_copy_descriptor
== NULL
) &&
3973 src
->provider
->nstat_counts
== NULL
))
3978 // Fill out a buffer on the stack, we will copy to the mbuf later
3979 u_int64_t buffer
[size
/sizeof(u_int64_t
) + 1]; // u_int64_t to ensure alignment
3980 bzero(buffer
, size
);
3982 nstat_msg_src_update
*desc
= (nstat_msg_src_update
*)buffer
;
3983 desc
->hdr
.type
= NSTAT_MSG_TYPE_SRC_UPDATE
;
3984 desc
->hdr
.length
= size
;
3985 desc
->srcref
= src
->srcref
;
3986 desc
->event_flags
= 0;
3987 desc
->provider
= src
->provider
->nstat_provider_id
;
3990 // Fill in the description
3991 if (src
->provider
->nstat_descriptor_length
!= 0 && src
->provider
->nstat_copy_descriptor
)
3993 // Query the provider for the provider specific bits
3994 result
= src
->provider
->nstat_copy_descriptor(src
->cookie
, desc
->data
,
3995 src
->provider
->nstat_descriptor_length
);
3998 nstat_stats
.nstat_copy_descriptor_failures
++;
3999 if (nstat_debug
!= 0)
4000 printf("%s: src->provider->nstat_copy_descriptor: %d\n", __func__
, result
);
4005 if (src
->provider
->nstat_counts
)
4007 result
= src
->provider
->nstat_counts(src
->cookie
, &desc
->counts
, gone
);
4010 nstat_stats
.nstat_provider_counts_failures
++;
4011 if (nstat_debug
!= 0)
4012 printf("%s: src->provider->nstat_counts: %d\n", __func__
, result
);
4016 if ((src
->filter
& NSTAT_FILTER_NOZEROBYTES
) == NSTAT_FILTER_NOZEROBYTES
&&
4017 desc
->counts
.nstat_rxbytes
== 0 && desc
->counts
.nstat_txbytes
== 0)
4023 return nstat_accumulate_msg(state
, &desc
->hdr
, size
);
4027 nstat_control_send_removed(
4028 nstat_control_state
*state
,
4031 nstat_msg_src_removed removed
;
4034 bzero(&removed
, sizeof(removed
));
4035 removed
.hdr
.type
= NSTAT_MSG_TYPE_SRC_REMOVED
;
4036 removed
.hdr
.length
= sizeof(removed
);
4037 removed
.hdr
.context
= 0;
4038 removed
.srcref
= src
->srcref
;
4039 result
= ctl_enqueuedata(state
->ncs_kctl
, state
->ncs_unit
, &removed
,
4040 sizeof(removed
), CTL_DATA_EOR
| CTL_DATA_CRIT
);
4042 nstat_stats
.nstat_msgremovedfailures
+= 1;
4048 nstat_control_handle_add_request(
4049 nstat_control_state
*state
,
4054 // Verify the header fits in the first mbuf
4055 if (mbuf_len(m
) < offsetof(nstat_msg_add_src_req
, param
))
4060 // Calculate the length of the parameter field
4061 int32_t paramlength
= mbuf_pkthdr_len(m
) - offsetof(nstat_msg_add_src_req
, param
);
4062 if (paramlength
< 0 || paramlength
> 2 * 1024)
4067 nstat_provider
*provider
;
4068 nstat_provider_cookie_t cookie
;
4069 nstat_msg_add_src_req
*req
= mbuf_data(m
);
4070 if (mbuf_pkthdr_len(m
) > mbuf_len(m
))
4072 // parameter is too large, we need to make a contiguous copy
4073 void *data
= OSMalloc(paramlength
, nstat_malloc_tag
);
4075 if (!data
) return ENOMEM
;
4076 result
= mbuf_copydata(m
, offsetof(nstat_msg_add_src_req
, param
), paramlength
, data
);
4078 result
= nstat_lookup_entry(req
->provider
, data
, paramlength
, &provider
, &cookie
);
4079 OSFree(data
, paramlength
, nstat_malloc_tag
);
4083 result
= nstat_lookup_entry(req
->provider
, (void*)&req
->param
, paramlength
, &provider
, &cookie
);
4091 result
= nstat_control_source_add(req
->hdr
.context
, state
, provider
, cookie
);
4093 provider
->nstat_release(cookie
, 0);
4099 nstat_control_handle_add_all(
4100 nstat_control_state
*state
,
4105 // Verify the header fits in the first mbuf
4106 if (mbuf_len(m
) < sizeof(nstat_msg_add_all_srcs
))
4112 nstat_msg_add_all_srcs
*req
= mbuf_data(m
);
4113 if (req
->provider
> NSTAT_PROVIDER_LAST
) return ENOENT
;
4115 nstat_provider
*provider
= nstat_find_provider_by_id(req
->provider
);
4117 if (!provider
) return ENOENT
;
4118 if (provider
->nstat_watcher_add
== NULL
) return ENOTSUP
;
4120 if (nstat_privcheck
!= 0) {
4121 result
= priv_check_cred(kauth_cred_get(),
4122 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS
, 0);
4127 // Make sure we don't add the provider twice
4128 lck_mtx_lock(&state
->mtx
);
4129 if ((state
->ncs_watching
& (1 << provider
->nstat_provider_id
)) != 0)
4131 state
->ncs_watching
|= (1 << provider
->nstat_provider_id
);
4132 lck_mtx_unlock(&state
->mtx
);
4133 if (result
!= 0) return result
;
4135 state
->ncs_provider_filters
[req
->provider
].npf_flags
= req
->filter
;
4136 state
->ncs_provider_filters
[req
->provider
].npf_events
= req
->events
;
4137 state
->ncs_provider_filters
[req
->provider
].npf_pid
= req
->target_pid
;
4138 memcpy(state
->ncs_provider_filters
[req
->provider
].npf_uuid
, req
->target_uuid
,
4139 sizeof(state
->ncs_provider_filters
[req
->provider
].npf_uuid
));
4141 result
= provider
->nstat_watcher_add(state
);
4144 state
->ncs_provider_filters
[req
->provider
].npf_flags
= 0;
4145 state
->ncs_provider_filters
[req
->provider
].npf_events
= 0;
4146 state
->ncs_provider_filters
[req
->provider
].npf_pid
= 0;
4147 bzero(state
->ncs_provider_filters
[req
->provider
].npf_uuid
,
4148 sizeof(state
->ncs_provider_filters
[req
->provider
].npf_uuid
));
4150 lck_mtx_lock(&state
->mtx
);
4151 state
->ncs_watching
&= ~(1 << provider
->nstat_provider_id
);
4152 lck_mtx_unlock(&state
->mtx
);
4155 nstat_enqueue_success(req
->hdr
.context
, state
, 0);
4161 nstat_control_source_add(
4163 nstat_control_state
*state
,
4164 nstat_provider
*provider
,
4165 nstat_provider_cookie_t cookie
)
4167 // Fill out source added message if appropriate
4169 nstat_src_ref_t
*srcrefp
= NULL
;
4171 u_int64_t provider_filter_flagss
=
4172 state
->ncs_provider_filters
[provider
->nstat_provider_id
].npf_flags
;
4173 boolean_t tell_user
=
4174 ((provider_filter_flagss
& NSTAT_FILTER_SUPPRESS_SRC_ADDED
) == 0);
4175 u_int32_t src_filter
=
4176 (provider_filter_flagss
& NSTAT_FILTER_PROVIDER_NOZEROBYTES
)
4177 ? NSTAT_FILTER_NOZEROBYTES
: 0;
4181 unsigned int one
= 1;
4183 if (mbuf_allocpacket(MBUF_DONTWAIT
, sizeof(nstat_msg_src_added
),
4187 mbuf_setlen(msg
, sizeof(nstat_msg_src_added
));
4188 mbuf_pkthdr_setlen(msg
, mbuf_len(msg
));
4189 nstat_msg_src_added
*add
= mbuf_data(msg
);
4190 bzero(add
, sizeof(*add
));
4191 add
->hdr
.type
= NSTAT_MSG_TYPE_SRC_ADDED
;
4192 add
->hdr
.length
= mbuf_len(msg
);
4193 add
->hdr
.context
= context
;
4194 add
->provider
= provider
->nstat_provider_id
;
4195 srcrefp
= &add
->srcref
;
4198 // Allocate storage for the source
4199 nstat_src
*src
= OSMalloc(sizeof(*src
), nstat_malloc_tag
);
4202 if (msg
) mbuf_freem(msg
);
4206 // Fill in the source, including picking an unused source ref
4207 lck_mtx_lock(&state
->mtx
);
4209 src
->srcref
= nstat_control_next_src_ref(state
);
4211 *srcrefp
= src
->srcref
;
4213 if (state
->ncs_flags
& NSTAT_FLAG_CLEANUP
|| src
->srcref
== NSTAT_SRC_REF_INVALID
)
4215 lck_mtx_unlock(&state
->mtx
);
4216 OSFree(src
, sizeof(*src
), nstat_malloc_tag
);
4217 if (msg
) mbuf_freem(msg
);
4220 src
->provider
= provider
;
4221 src
->cookie
= cookie
;
4222 src
->filter
= src_filter
;
4226 // send the source added message if appropriate
4227 errno_t result
= ctl_enqueuembuf(state
->ncs_kctl
, state
->ncs_unit
, msg
,
4231 nstat_stats
.nstat_srcaddedfailures
+= 1;
4232 lck_mtx_unlock(&state
->mtx
);
4233 OSFree(src
, sizeof(*src
), nstat_malloc_tag
);
4238 // Put the source in the list
4239 src
->next
= state
->ncs_srcs
;
4240 state
->ncs_srcs
= src
;
4242 lck_mtx_unlock(&state
->mtx
);
4248 nstat_control_handle_remove_request(
4249 nstat_control_state
*state
,
4252 nstat_src_ref_t srcref
= NSTAT_SRC_REF_INVALID
;
4254 if (mbuf_copydata(m
, offsetof(nstat_msg_rem_src_req
, srcref
), sizeof(srcref
), &srcref
) != 0)
4259 lck_mtx_lock(&state
->mtx
);
4261 // Remove this source as we look for it
4263 nstat_src
*src
= NULL
;
4264 for (nextp
= &state
->ncs_srcs
; *nextp
; nextp
= &(*nextp
)->next
)
4266 if ((*nextp
)->srcref
== srcref
)
4274 lck_mtx_unlock(&state
->mtx
);
4276 if (src
) nstat_control_cleanup_source(state
, src
, FALSE
);
4278 return src
? 0 : ENOENT
;
4282 nstat_control_handle_query_request(
4283 nstat_control_state
*state
,
4286 // TBD: handle this from another thread so we can enqueue a lot of data
4287 // As written, if a client requests query all, this function will be
4288 // called from their send of the request message. We will attempt to write
4289 // responses and succeed until the buffer fills up. Since the clients thread
4290 // is blocked on send, it won't be reading unless the client has two threads
4291 // using this socket, one for read and one for write. Two threads probably
4292 // won't work with this code anyhow since we don't have proper locking in
4294 nstat_src
*dead_srcs
= NULL
;
4295 errno_t result
= ENOENT
;
4296 nstat_msg_query_src_req req
;
4298 if (mbuf_copydata(m
, 0, sizeof(req
), &req
) != 0)
4303 const boolean_t all_srcs
= (req
.srcref
== NSTAT_SRC_REF_ALL
);
4305 lck_mtx_lock(&state
->mtx
);
4309 state
->ncs_flags
|= NSTAT_FLAG_REQCOUNTS
;
4311 nstat_src
**srcpp
= &state
->ncs_srcs
;
4312 u_int64_t src_count
= 0;
4313 boolean_t partial
= FALSE
;
4316 * Error handling policy and sequence number generation is folded into
4317 * nstat_control_begin_query.
4319 partial
= nstat_control_begin_query(state
, &req
.hdr
);
4321 while (*srcpp
!= NULL
4322 && (!partial
|| src_count
< QUERY_CONTINUATION_SRC_COUNT
))
4324 nstat_src
*src
= NULL
;
4329 // XXX ignore IFACE types?
4330 if (all_srcs
|| src
->srcref
== req
.srcref
)
4332 if (nstat_control_reporting_allowed(state
, src
)
4333 && (!partial
|| !all_srcs
|| src
->seq
!= state
->ncs_seq
))
4336 (req
.hdr
.flags
& NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE
) != 0)
4338 result
= nstat_control_append_counts(state
, src
, &gone
);
4342 result
= nstat_control_send_counts(state
, src
, req
.hdr
.context
, 0, &gone
);
4345 if (ENOMEM
== result
|| ENOBUFS
== result
)
4348 * If the counts message failed to
4349 * enqueue then we should clear our flag so
4350 * that a client doesn't miss anything on
4351 * idle cleanup. We skip the "gone"
4352 * processing in the hope that we may
4353 * catch it another time.
4355 state
->ncs_flags
&= ~NSTAT_FLAG_REQCOUNTS
;
4361 * We skip over hard errors and
4364 src
->seq
= state
->ncs_seq
;
4372 // send one last descriptor message so client may see last state
4373 // If we can't send the notification now, it
4374 // will be sent in the idle cleanup.
4375 result
= nstat_control_send_description(state
, *srcpp
, 0, 0);
4378 nstat_stats
.nstat_control_send_description_failures
++;
4379 if (nstat_debug
!= 0)
4380 printf("%s - nstat_control_send_description() %d\n", __func__
, result
);
4381 state
->ncs_flags
&= ~NSTAT_FLAG_REQCOUNTS
;
4385 // pull src out of the list
4388 src
->next
= dead_srcs
;
4393 srcpp
= &(*srcpp
)->next
;
4396 if (!all_srcs
&& req
.srcref
== src
->srcref
)
4401 nstat_flush_accumulated_msgs(state
);
4403 u_int16_t flags
= 0;
4404 if (req
.srcref
== NSTAT_SRC_REF_ALL
)
4405 flags
= nstat_control_end_query(state
, *srcpp
, partial
);
4407 lck_mtx_unlock(&state
->mtx
);
4410 * If an error occurred enqueueing data, then allow the error to
4411 * propagate to nstat_control_send. This way, the error is sent to
4414 if (all_srcs
&& ENOMEM
!= result
&& ENOBUFS
!= result
)
4416 nstat_enqueue_success(req
.hdr
.context
, state
, flags
);
4425 dead_srcs
= src
->next
;
4427 // release src and send notification
4428 nstat_control_cleanup_source(state
, src
, FALSE
);
4435 nstat_control_handle_get_src_description(
4436 nstat_control_state
*state
,
4439 nstat_msg_get_src_description req
;
4440 errno_t result
= ENOENT
;
4443 if (mbuf_copydata(m
, 0, sizeof(req
), &req
) != 0)
4448 lck_mtx_lock(&state
->mtx
);
4449 u_int64_t src_count
= 0;
4450 boolean_t partial
= FALSE
;
4451 const boolean_t all_srcs
= (req
.srcref
== NSTAT_SRC_REF_ALL
);
4454 * Error handling policy and sequence number generation is folded into
4455 * nstat_control_begin_query.
4457 partial
= nstat_control_begin_query(state
, &req
.hdr
);
4459 for (src
= state
->ncs_srcs
;
4460 src
&& (!partial
|| src_count
< QUERY_CONTINUATION_SRC_COUNT
);
4463 if (all_srcs
|| src
->srcref
== req
.srcref
)
4465 if (nstat_control_reporting_allowed(state
, src
)
4466 && (!all_srcs
|| !partial
|| src
->seq
!= state
->ncs_seq
))
4468 if ((req
.hdr
.flags
& NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE
) != 0 && all_srcs
)
4470 result
= nstat_control_append_description(state
, src
);
4474 result
= nstat_control_send_description(state
, src
, req
.hdr
.context
, 0);
4477 if (ENOMEM
== result
|| ENOBUFS
== result
)
4480 * If the description message failed to
4481 * enqueue then we give up for now.
4488 * Note, we skip over hard errors and
4491 src
->seq
= state
->ncs_seq
;
4502 nstat_flush_accumulated_msgs(state
);
4504 u_int16_t flags
= 0;
4505 if (req
.srcref
== NSTAT_SRC_REF_ALL
)
4506 flags
= nstat_control_end_query(state
, src
, partial
);
4508 lck_mtx_unlock(&state
->mtx
);
4510 * If an error occurred enqueueing data, then allow the error to
4511 * propagate to nstat_control_send. This way, the error is sent to
4514 if (all_srcs
&& ENOMEM
!= result
&& ENOBUFS
!= result
)
4516 nstat_enqueue_success(req
.hdr
.context
, state
, flags
);
4524 nstat_control_handle_set_filter(
4525 nstat_control_state
*state
,
4528 nstat_msg_set_filter req
;
4531 if (mbuf_copydata(m
, 0, sizeof(req
), &req
) != 0)
4533 if (req
.srcref
== NSTAT_SRC_REF_ALL
||
4534 req
.srcref
== NSTAT_SRC_REF_INVALID
)
4537 lck_mtx_lock(&state
->mtx
);
4538 for (src
= state
->ncs_srcs
; src
; src
= src
->next
)
4539 if (req
.srcref
== src
->srcref
)
4541 src
->filter
= req
.filter
;
4544 lck_mtx_unlock(&state
->mtx
);
4553 nstat_control_state
*state
,
4558 struct nstat_msg_error err
;
4560 bzero(&err
, sizeof(err
));
4561 err
.hdr
.type
= NSTAT_MSG_TYPE_ERROR
;
4562 err
.hdr
.length
= sizeof(err
);
4563 err
.hdr
.context
= context
;
4566 result
= ctl_enqueuedata(state
->ncs_kctl
, state
->ncs_unit
, &err
,
4567 sizeof(err
), CTL_DATA_EOR
| CTL_DATA_CRIT
);
4569 nstat_stats
.nstat_msgerrorfailures
++;
4573 nstat_control_begin_query(
4574 nstat_control_state
*state
,
4575 const nstat_msg_hdr
*hdrp
)
4577 boolean_t partial
= FALSE
;
4579 if (hdrp
->flags
& NSTAT_MSG_HDR_FLAG_CONTINUATION
)
4581 /* A partial query all has been requested. */
4584 if (state
->ncs_context
!= hdrp
->context
)
4586 if (state
->ncs_context
!= 0)
4587 nstat_send_error(state
, state
->ncs_context
, EAGAIN
);
4589 /* Initialize state for a partial query all. */
4590 state
->ncs_context
= hdrp
->context
;
4594 else if (state
->ncs_context
!= 0)
4597 * A continuation of a paced-query was in progress. Send that
4598 * context an error and reset the state. If the same context
4599 * has changed its mind, just send the full query results.
4601 if (state
->ncs_context
!= hdrp
->context
)
4602 nstat_send_error(state
, state
->ncs_context
, EAGAIN
);
4609 nstat_control_end_query(
4610 nstat_control_state
*state
,
4611 nstat_src
*last_src
,
4614 u_int16_t flags
= 0;
4616 if (last_src
== NULL
|| !partial
)
4619 * We iterated through the entire srcs list or exited early
4620 * from the loop when a partial update was not requested (an
4621 * error occurred), so clear context to indicate internally
4622 * that the query is finished.
4624 state
->ncs_context
= 0;
4629 * Indicate to userlevel to make another partial request as
4630 * there are still sources left to be reported.
4632 flags
|= NSTAT_MSG_HDR_FLAG_CONTINUATION
;
4639 nstat_control_handle_get_update(
4640 nstat_control_state
*state
,
4643 nstat_msg_query_src_req req
;
4645 if (mbuf_copydata(m
, 0, sizeof(req
), &req
) != 0)
4650 lck_mtx_lock(&state
->mtx
);
4652 state
->ncs_flags
|= NSTAT_FLAG_SUPPORTS_UPDATES
;
4654 errno_t result
= ENOENT
;
4656 nstat_src
*dead_srcs
= NULL
;
4657 nstat_src
**srcpp
= &state
->ncs_srcs
;
4658 u_int64_t src_count
= 0;
4659 boolean_t partial
= FALSE
;
4662 * Error handling policy and sequence number generation is folded into
4663 * nstat_control_begin_query.
4665 partial
= nstat_control_begin_query(state
, &req
.hdr
);
4667 while (*srcpp
!= NULL
4668 && (FALSE
== partial
4669 || src_count
< QUERY_CONTINUATION_SRC_COUNT
))
4675 if (nstat_control_reporting_allowed(state
, src
))
4677 /* skip this source if it has the current state
4678 * sequence number as it's already been reported in
4679 * this query-all partial sequence. */
4680 if (req
.srcref
== NSTAT_SRC_REF_ALL
4681 && (FALSE
== partial
|| src
->seq
!= state
->ncs_seq
))
4683 result
= nstat_control_append_update(state
, src
, &gone
);
4684 if (ENOMEM
== result
|| ENOBUFS
== result
)
4687 * If the update message failed to
4688 * enqueue then give up.
4695 * We skip over hard errors and
4698 src
->seq
= state
->ncs_seq
;
4702 else if (src
->srcref
== req
.srcref
)
4704 result
= nstat_control_send_update(state
, src
, req
.hdr
.context
, 0, &gone
);
4710 // pull src out of the list
4713 src
->next
= dead_srcs
;
4718 srcpp
= &(*srcpp
)->next
;
4721 if (req
.srcref
!= NSTAT_SRC_REF_ALL
&& req
.srcref
== src
->srcref
)
4727 nstat_flush_accumulated_msgs(state
);
4730 u_int16_t flags
= 0;
4731 if (req
.srcref
== NSTAT_SRC_REF_ALL
)
4732 flags
= nstat_control_end_query(state
, *srcpp
, partial
);
4734 lck_mtx_unlock(&state
->mtx
);
4736 * If an error occurred enqueueing data, then allow the error to
4737 * propagate to nstat_control_send. This way, the error is sent to
4740 if (req
.srcref
== NSTAT_SRC_REF_ALL
&& ENOMEM
!= result
&& ENOBUFS
!= result
)
4742 nstat_enqueue_success(req
.hdr
.context
, state
, flags
);
4749 dead_srcs
= src
->next
;
4751 // release src and send notification
4752 nstat_control_cleanup_source(state
, src
, FALSE
);
4759 nstat_control_handle_subscribe_sysinfo(
4760 nstat_control_state
*state
)
4762 errno_t result
= priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS
, 0);
4769 lck_mtx_lock(&state
->mtx
);
4770 state
->ncs_flags
|= NSTAT_FLAG_SYSINFO_SUBSCRIBED
;
4771 lck_mtx_unlock(&state
->mtx
);
4784 nstat_control_state
*state
= (nstat_control_state
*)uinfo
;
4785 struct nstat_msg_hdr
*hdr
;
4786 struct nstat_msg_hdr storage
;
4789 if (mbuf_pkthdr_len(m
) < sizeof(*hdr
))
4791 // Is this the right thing to do?
4796 if (mbuf_len(m
) >= sizeof(*hdr
))
4802 mbuf_copydata(m
, 0, sizeof(storage
), &storage
);
4806 // Legacy clients may not set the length
4807 // Those clients are likely not setting the flags either
4808 // Fix everything up so old clients continue to work
4809 if (hdr
->length
!= mbuf_pkthdr_len(m
))
4812 hdr
->length
= mbuf_pkthdr_len(m
);
4813 if (hdr
== &storage
)
4815 mbuf_copyback(m
, 0, sizeof(*hdr
), hdr
, MBUF_DONTWAIT
);
4821 case NSTAT_MSG_TYPE_ADD_SRC
:
4822 result
= nstat_control_handle_add_request(state
, m
);
4825 case NSTAT_MSG_TYPE_ADD_ALL_SRCS
:
4826 result
= nstat_control_handle_add_all(state
, m
);
4829 case NSTAT_MSG_TYPE_REM_SRC
:
4830 result
= nstat_control_handle_remove_request(state
, m
);
4833 case NSTAT_MSG_TYPE_QUERY_SRC
:
4834 result
= nstat_control_handle_query_request(state
, m
);
4837 case NSTAT_MSG_TYPE_GET_SRC_DESC
:
4838 result
= nstat_control_handle_get_src_description(state
, m
);
4841 case NSTAT_MSG_TYPE_SET_FILTER
:
4842 result
= nstat_control_handle_set_filter(state
, m
);
4845 case NSTAT_MSG_TYPE_GET_UPDATE
:
4846 result
= nstat_control_handle_get_update(state
, m
);
4849 case NSTAT_MSG_TYPE_SUBSCRIBE_SYSINFO
:
4850 result
= nstat_control_handle_subscribe_sysinfo(state
);
4860 struct nstat_msg_error err
;
4862 bzero(&err
, sizeof(err
));
4863 err
.hdr
.type
= NSTAT_MSG_TYPE_ERROR
;
4864 err
.hdr
.length
= sizeof(err
) + mbuf_pkthdr_len(m
);
4865 err
.hdr
.context
= hdr
->context
;
4868 if (mbuf_prepend(&m
, sizeof(err
), MBUF_DONTWAIT
) == 0 &&
4869 mbuf_copyback(m
, 0, sizeof(err
), &err
, MBUF_DONTWAIT
) == 0)
4871 result
= ctl_enqueuembuf(kctl
, unit
, m
, CTL_DATA_EOR
| CTL_DATA_CRIT
);
4881 // Unable to prepend the error to the request - just send the error
4882 err
.hdr
.length
= sizeof(err
);
4883 result
= ctl_enqueuedata(kctl
, unit
, &err
, sizeof(err
),
4884 CTL_DATA_EOR
| CTL_DATA_CRIT
);
4886 nstat_stats
.nstat_msgerrorfailures
+= 1;
4888 nstat_stats
.nstat_handle_msg_failures
+= 1;
4891 if (m
) mbuf_freem(m
);