2 * Copyright (c) 2010-2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/param.h>
30 #include <sys/types.h>
31 #include <sys/kpi_mbuf.h>
32 #include <sys/socket.h>
33 #include <sys/kern_control.h>
34 #include <sys/mcache.h>
35 #include <sys/socketvar.h>
36 #include <sys/sysctl.h>
37 #include <sys/queue.h>
39 #include <sys/protosw.h>
41 #include <kern/clock.h>
42 #include <kern/debug.h>
44 #include <libkern/libkern.h>
45 #include <libkern/OSMalloc.h>
46 #include <libkern/OSAtomic.h>
47 #include <libkern/locks.h>
50 #include <net/if_var.h>
51 #include <net/if_types.h>
52 #include <net/route.h>
53 #include <net/ntstat.h>
55 #include <netinet/ip_var.h>
56 #include <netinet/in_pcb.h>
57 #include <netinet/in_var.h>
58 #include <netinet/tcp.h>
59 #include <netinet/tcp_var.h>
60 #include <netinet/tcp_fsm.h>
61 #include <netinet/tcp_cc.h>
62 #include <netinet/udp.h>
63 #include <netinet/udp_var.h>
64 #include <netinet6/in6_pcb.h>
65 #include <netinet6/in6_var.h>
67 __private_extern__
int nstat_collect
= 1;
68 SYSCTL_INT(_net
, OID_AUTO
, statistics
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
69 &nstat_collect
, 0, "Collect detailed statistics");
71 static int nstat_privcheck
= 0;
72 SYSCTL_INT(_net
, OID_AUTO
, statistics_privcheck
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
73 &nstat_privcheck
, 0, "Entitlement check");
75 SYSCTL_NODE(_net
, OID_AUTO
, stats
,
76 CTLFLAG_RW
|CTLFLAG_LOCKED
, 0, "network statistics");
78 static int nstat_debug
= 0;
79 SYSCTL_INT(_net_stats
, OID_AUTO
, debug
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
82 static int nstat_sendspace
= 2048;
83 SYSCTL_INT(_net_stats
, OID_AUTO
, sendspace
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
84 &nstat_sendspace
, 0, "");
86 static int nstat_recvspace
= 8192;
87 SYSCTL_INT(_net_stats
, OID_AUTO
, recvspace
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
88 &nstat_recvspace
, 0, "");
90 static struct nstat_stats nstat_stats
;
91 SYSCTL_STRUCT(_net_stats
, OID_AUTO
, stats
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
92 &nstat_stats
, nstat_stats
, "");
96 NSTAT_FLAG_CLEANUP
= (1 << 0),
97 NSTAT_FLAG_REQCOUNTS
= (1 << 1),
98 NSTAT_FLAG_SUPPORTS_UPDATES
= (1 << 2),
99 NSTAT_FLAG_SYSINFO_SUBSCRIBED
= (1 << 3),
102 #define QUERY_CONTINUATION_SRC_COUNT 100
104 typedef struct nstat_control_state
106 struct nstat_control_state
*ncs_next
;
107 u_int32_t ncs_watching
;
108 decl_lck_mtx_data(, mtx
);
109 kern_ctl_ref ncs_kctl
;
111 nstat_src_ref_t ncs_next_srcref
;
112 struct nstat_src
*ncs_srcs
;
113 mbuf_t ncs_accumulated
;
115 u_int64_t ncs_provider_filters
[NSTAT_PROVIDER_COUNT
];
116 /* state maintained for partial query requests */
117 u_int64_t ncs_context
;
119 } nstat_control_state
;
121 typedef struct nstat_provider
123 struct nstat_provider
*next
;
124 nstat_provider_id_t nstat_provider_id
;
125 size_t nstat_descriptor_length
;
126 errno_t (*nstat_lookup
)(const void *data
, u_int32_t length
, nstat_provider_cookie_t
*out_cookie
);
127 int (*nstat_gone
)(nstat_provider_cookie_t cookie
);
128 errno_t (*nstat_counts
)(nstat_provider_cookie_t cookie
, struct nstat_counts
*out_counts
, int *out_gone
);
129 errno_t (*nstat_watcher_add
)(nstat_control_state
*state
);
130 void (*nstat_watcher_remove
)(nstat_control_state
*state
);
131 errno_t (*nstat_copy_descriptor
)(nstat_provider_cookie_t cookie
, void *data
, u_int32_t len
);
132 void (*nstat_release
)(nstat_provider_cookie_t cookie
, boolean_t locked
);
133 bool (*nstat_reporting_allowed
)(nstat_provider_cookie_t cookie
, uint64_t filter
);
137 typedef struct nstat_src
139 struct nstat_src
*next
;
140 nstat_src_ref_t srcref
;
141 nstat_provider
*provider
;
142 nstat_provider_cookie_t cookie
;
147 static errno_t
nstat_control_send_counts(nstat_control_state
*,
148 nstat_src
*, unsigned long long, u_int16_t
, int *);
149 static int nstat_control_send_description(nstat_control_state
*state
, nstat_src
*src
, u_int64_t context
, u_int16_t hdr_flags
);
150 static int nstat_control_send_update(nstat_control_state
*state
, nstat_src
*src
, u_int64_t context
, u_int16_t hdr_flags
, int *gone
);
151 static errno_t
nstat_control_send_removed(nstat_control_state
*, nstat_src
*);
152 static errno_t
nstat_control_send_goodbye(nstat_control_state
*state
, nstat_src
*src
);
153 static void nstat_control_cleanup_source(nstat_control_state
*state
, nstat_src
*src
, boolean_t
);
154 static bool nstat_control_reporting_allowed(nstat_control_state
*state
, nstat_src
*src
);
155 static boolean_t
nstat_control_begin_query(nstat_control_state
*state
, const nstat_msg_hdr
*hdrp
);
156 static u_int16_t
nstat_control_end_query(nstat_control_state
*state
, nstat_src
*last_src
, boolean_t partial
);
157 static void nstat_ifnet_report_ecn_stats(void);
159 static u_int32_t nstat_udp_watchers
= 0;
160 static u_int32_t nstat_tcp_watchers
= 0;
162 static void nstat_control_register(void);
165 * The lock order is as follows:
167 * socket_lock (inpcb)
171 static volatile OSMallocTag nstat_malloc_tag
= NULL
;
172 static nstat_control_state
*nstat_controls
= NULL
;
173 static uint64_t nstat_idle_time
= 0;
174 static decl_lck_mtx_data(, nstat_mtx
);
176 /* some extern definitions */
177 extern void mbuf_report_peak_usage(void);
178 extern void tcp_report_stats(void);
182 const struct sockaddr
*src
,
183 struct sockaddr
*dst
,
186 if (src
->sa_len
> maxlen
) return;
188 bcopy(src
, dst
, src
->sa_len
);
189 if (src
->sa_family
== AF_INET6
&&
190 src
->sa_len
>= sizeof(struct sockaddr_in6
))
192 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)(void *)dst
;
193 if (IN6_IS_SCOPE_EMBED(&sin6
->sin6_addr
))
195 if (sin6
->sin6_scope_id
== 0)
196 sin6
->sin6_scope_id
= ntohs(sin6
->sin6_addr
.s6_addr16
[1]);
197 sin6
->sin6_addr
.s6_addr16
[1] = 0;
203 nstat_ip_to_sockaddr(
204 const struct in_addr
*ip
,
206 struct sockaddr_in
*sin
,
209 if (maxlen
< sizeof(struct sockaddr_in
))
212 sin
->sin_family
= AF_INET
;
213 sin
->sin_len
= sizeof(*sin
);
214 sin
->sin_port
= port
;
219 nstat_ip6_to_sockaddr(
220 const struct in6_addr
*ip6
,
222 struct sockaddr_in6
*sin6
,
225 if (maxlen
< sizeof(struct sockaddr_in6
))
228 sin6
->sin6_family
= AF_INET6
;
229 sin6
->sin6_len
= sizeof(*sin6
);
230 sin6
->sin6_port
= port
;
231 sin6
->sin6_addr
= *ip6
;
232 if (IN6_IS_SCOPE_EMBED(&sin6
->sin6_addr
))
234 sin6
->sin6_scope_id
= ntohs(sin6
->sin6_addr
.s6_addr16
[1]);
235 sin6
->sin6_addr
.s6_addr16
[1] = 0;
240 nstat_inpcb_to_flags(
241 const struct inpcb
*inp
)
245 if ((inp
!= NULL
) && (inp
->inp_last_outifp
!= NULL
))
247 struct ifnet
*ifp
= inp
->inp_last_outifp
;
249 u_int32_t functional_type
= if_functional_type(ifp
);
251 /* Panic if someone adds a functional type without updating ntstat. */
252 VERIFY(0 <= functional_type
&& functional_type
<= IFRTYPE_FUNCTIONAL_LAST
);
254 switch (functional_type
)
256 case IFRTYPE_FUNCTIONAL_UNKNOWN
:
257 flags
|= NSTAT_IFNET_IS_UNKNOWN_TYPE
;
259 case IFRTYPE_FUNCTIONAL_LOOPBACK
:
260 flags
|= NSTAT_IFNET_IS_LOOPBACK
;
262 case IFRTYPE_FUNCTIONAL_WIRED
:
263 flags
|= NSTAT_IFNET_IS_WIRED
;
265 case IFRTYPE_FUNCTIONAL_WIFI_INFRA
:
266 flags
|= NSTAT_IFNET_IS_WIFI
;
268 case IFRTYPE_FUNCTIONAL_WIFI_AWDL
:
269 flags
|= NSTAT_IFNET_IS_WIFI
;
270 flags
|= NSTAT_IFNET_IS_AWDL
;
272 case IFRTYPE_FUNCTIONAL_CELLULAR
:
273 flags
|= NSTAT_IFNET_IS_CELLULAR
;
274 if (inp
->inp_socket
!= NULL
&&
275 (inp
->inp_socket
->so_flags1
& SOF1_CELLFALLBACK
))
276 flags
|= NSTAT_IFNET_VIA_CELLFALLBACK
;
280 if (IFNET_IS_EXPENSIVE(ifp
))
282 flags
|= NSTAT_IFNET_IS_EXPENSIVE
;
287 flags
= NSTAT_IFNET_IS_UNKNOWN_TYPE
;
293 #pragma mark -- Network Statistic Providers --
295 static errno_t
nstat_control_source_add(u_int64_t context
, nstat_control_state
*state
, nstat_provider
*provider
, nstat_provider_cookie_t cookie
);
296 struct nstat_provider
*nstat_providers
= NULL
;
298 static struct nstat_provider
*
299 nstat_find_provider_by_id(
300 nstat_provider_id_t id
)
302 struct nstat_provider
*provider
;
304 for (provider
= nstat_providers
; provider
!= NULL
; provider
= provider
->next
)
306 if (provider
->nstat_provider_id
== id
)
315 nstat_provider_id_t id
,
318 nstat_provider
**out_provider
,
319 nstat_provider_cookie_t
*out_cookie
)
321 *out_provider
= nstat_find_provider_by_id(id
);
322 if (*out_provider
== NULL
)
327 return (*out_provider
)->nstat_lookup(data
, length
, out_cookie
);
330 static void nstat_init_route_provider(void);
331 static void nstat_init_tcp_provider(void);
332 static void nstat_init_udp_provider(void);
333 static void nstat_init_ifnet_provider(void);
335 __private_extern__
void
338 if (nstat_malloc_tag
!= NULL
) return;
340 OSMallocTag tag
= OSMalloc_Tagalloc(NET_STAT_CONTROL_NAME
, OSMT_DEFAULT
);
341 if (!OSCompareAndSwapPtr(NULL
, tag
, &nstat_malloc_tag
))
343 OSMalloc_Tagfree(tag
);
344 tag
= nstat_malloc_tag
;
348 // we need to initialize other things, we do it here as this code path will only be hit once;
349 nstat_init_route_provider();
350 nstat_init_tcp_provider();
351 nstat_init_udp_provider();
352 nstat_init_ifnet_provider();
353 nstat_control_register();
357 #pragma mark -- Aligned Buffer Allocation --
366 nstat_malloc_aligned(
371 struct align_header
*hdr
= NULL
;
372 u_int32_t size
= length
+ sizeof(*hdr
) + alignment
- 1;
374 u_int8_t
*buffer
= OSMalloc(size
, tag
);
375 if (buffer
== NULL
) return NULL
;
377 u_int8_t
*aligned
= buffer
+ sizeof(*hdr
);
378 aligned
= (u_int8_t
*)P2ROUNDUP(aligned
, alignment
);
380 hdr
= (struct align_header
*)(void *)(aligned
- sizeof(*hdr
));
381 hdr
->offset
= aligned
- buffer
;
392 struct align_header
*hdr
= (struct align_header
*)(void *)((u_int8_t
*)buffer
- sizeof(*hdr
));
393 OSFree(((char*)buffer
) - hdr
->offset
, hdr
->length
, tag
);
396 #pragma mark -- Route Provider --
398 static nstat_provider nstat_route_provider
;
404 nstat_provider_cookie_t
*out_cookie
)
406 // rt_lookup doesn't take const params but it doesn't modify the parameters for
407 // the lookup. So...we use a union to eliminate the warning.
411 const struct sockaddr
*const_sa
;
414 const nstat_route_add_param
*param
= (const nstat_route_add_param
*)data
;
417 if (length
< sizeof(*param
))
422 if (param
->dst
.v4
.sin_family
== 0 ||
423 param
->dst
.v4
.sin_family
> AF_MAX
||
424 (param
->mask
.v4
.sin_family
!= 0 && param
->mask
.v4
.sin_family
!= param
->dst
.v4
.sin_family
))
429 if (param
->dst
.v4
.sin_len
> sizeof(param
->dst
) ||
430 (param
->mask
.v4
.sin_family
&& param
->mask
.v4
.sin_len
> sizeof(param
->mask
.v4
.sin_len
)))
434 if ((param
->dst
.v4
.sin_family
== AF_INET
&&
435 param
->dst
.v4
.sin_len
< sizeof(struct sockaddr_in
)) ||
436 (param
->dst
.v6
.sin6_family
== AF_INET6
&&
437 param
->dst
.v6
.sin6_len
< sizeof(struct sockaddr_in6
)))
442 dst
.const_sa
= (const struct sockaddr
*)¶m
->dst
;
443 mask
.const_sa
= param
->mask
.v4
.sin_family
? (const struct sockaddr
*)¶m
->mask
: NULL
;
445 struct radix_node_head
*rnh
= rt_tables
[dst
.sa
->sa_family
];
446 if (rnh
== NULL
) return EAFNOSUPPORT
;
448 lck_mtx_lock(rnh_lock
);
449 struct rtentry
*rt
= rt_lookup(TRUE
, dst
.sa
, mask
.sa
, rnh
, param
->ifindex
);
450 lck_mtx_unlock(rnh_lock
);
452 if (rt
) *out_cookie
= (nstat_provider_cookie_t
)rt
;
454 return rt
? 0 : ENOENT
;
459 nstat_provider_cookie_t cookie
)
461 struct rtentry
*rt
= (struct rtentry
*)cookie
;
462 return ((rt
->rt_flags
& RTF_UP
) == 0) ? 1 : 0;
467 nstat_provider_cookie_t cookie
,
468 struct nstat_counts
*out_counts
,
471 struct rtentry
*rt
= (struct rtentry
*)cookie
;
472 struct nstat_counts
*rt_stats
= rt
->rt_stats
;
474 if (out_gone
) *out_gone
= 0;
476 if (out_gone
&& (rt
->rt_flags
& RTF_UP
) == 0) *out_gone
= 1;
480 atomic_get_64(out_counts
->nstat_rxpackets
, &rt_stats
->nstat_rxpackets
);
481 atomic_get_64(out_counts
->nstat_rxbytes
, &rt_stats
->nstat_rxbytes
);
482 atomic_get_64(out_counts
->nstat_txpackets
, &rt_stats
->nstat_txpackets
);
483 atomic_get_64(out_counts
->nstat_txbytes
, &rt_stats
->nstat_txbytes
);
484 out_counts
->nstat_rxduplicatebytes
= rt_stats
->nstat_rxduplicatebytes
;
485 out_counts
->nstat_rxoutoforderbytes
= rt_stats
->nstat_rxoutoforderbytes
;
486 out_counts
->nstat_txretransmit
= rt_stats
->nstat_txretransmit
;
487 out_counts
->nstat_connectattempts
= rt_stats
->nstat_connectattempts
;
488 out_counts
->nstat_connectsuccesses
= rt_stats
->nstat_connectsuccesses
;
489 out_counts
->nstat_min_rtt
= rt_stats
->nstat_min_rtt
;
490 out_counts
->nstat_avg_rtt
= rt_stats
->nstat_avg_rtt
;
491 out_counts
->nstat_var_rtt
= rt_stats
->nstat_var_rtt
;
492 out_counts
->nstat_cell_rxbytes
= out_counts
->nstat_cell_txbytes
= 0;
496 bzero(out_counts
, sizeof(*out_counts
));
504 nstat_provider_cookie_t cookie
,
507 rtfree((struct rtentry
*)cookie
);
510 static u_int32_t nstat_route_watchers
= 0;
513 nstat_route_walktree_add(
514 struct radix_node
*rn
,
518 struct rtentry
*rt
= (struct rtentry
*)rn
;
519 nstat_control_state
*state
= (nstat_control_state
*)context
;
521 lck_mtx_assert(rnh_lock
, LCK_MTX_ASSERT_OWNED
);
523 /* RTF_UP can't change while rnh_lock is held */
524 if ((rt
->rt_flags
& RTF_UP
) != 0)
526 /* Clear RTPRF_OURS if the route is still usable */
528 if (rt_validate(rt
)) {
529 RT_ADDREF_LOCKED(rt
);
536 /* Otherwise if RTF_CONDEMNED, treat it as if it were down */
540 result
= nstat_control_source_add(0, state
, &nstat_route_provider
, rt
);
549 nstat_route_add_watcher(
550 nstat_control_state
*state
)
554 OSIncrementAtomic(&nstat_route_watchers
);
556 lck_mtx_lock(rnh_lock
);
557 for (i
= 1; i
< AF_MAX
; i
++)
559 struct radix_node_head
*rnh
;
563 result
= rnh
->rnh_walktree(rnh
, nstat_route_walktree_add
, state
);
569 lck_mtx_unlock(rnh_lock
);
574 __private_extern__
void
575 nstat_route_new_entry(
578 if (nstat_route_watchers
== 0)
581 lck_mtx_lock(&nstat_mtx
);
582 if ((rt
->rt_flags
& RTF_UP
) != 0)
584 nstat_control_state
*state
;
585 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
587 if ((state
->ncs_watching
& (1 << NSTAT_PROVIDER_ROUTE
)) != 0)
589 // this client is watching routes
590 // acquire a reference for the route
593 // add the source, if that fails, release the reference
594 if (nstat_control_source_add(0, state
, &nstat_route_provider
, rt
) != 0)
599 lck_mtx_unlock(&nstat_mtx
);
603 nstat_route_remove_watcher(
604 __unused nstat_control_state
*state
)
606 OSDecrementAtomic(&nstat_route_watchers
);
610 nstat_route_copy_descriptor(
611 nstat_provider_cookie_t cookie
,
615 nstat_route_descriptor
*desc
= (nstat_route_descriptor
*)data
;
616 if (len
< sizeof(*desc
))
620 bzero(desc
, sizeof(*desc
));
622 struct rtentry
*rt
= (struct rtentry
*)cookie
;
623 desc
->id
= (uint64_t)VM_KERNEL_ADDRPERM(rt
);
624 desc
->parent_id
= (uint64_t)VM_KERNEL_ADDRPERM(rt
->rt_parent
);
625 desc
->gateway_id
= (uint64_t)VM_KERNEL_ADDRPERM(rt
->rt_gwroute
);
630 if ((sa
= rt_key(rt
)))
631 nstat_copy_sa_out(sa
, &desc
->dst
.sa
, sizeof(desc
->dst
));
634 if ((sa
= rt_mask(rt
)) && sa
->sa_len
<= sizeof(desc
->mask
))
635 memcpy(&desc
->mask
, sa
, sa
->sa_len
);
638 if ((sa
= rt
->rt_gateway
))
639 nstat_copy_sa_out(sa
, &desc
->gateway
.sa
, sizeof(desc
->gateway
));
642 desc
->ifindex
= rt
->rt_ifp
->if_index
;
644 desc
->flags
= rt
->rt_flags
;
650 nstat_init_route_provider(void)
652 bzero(&nstat_route_provider
, sizeof(nstat_route_provider
));
653 nstat_route_provider
.nstat_descriptor_length
= sizeof(nstat_route_descriptor
);
654 nstat_route_provider
.nstat_provider_id
= NSTAT_PROVIDER_ROUTE
;
655 nstat_route_provider
.nstat_lookup
= nstat_route_lookup
;
656 nstat_route_provider
.nstat_gone
= nstat_route_gone
;
657 nstat_route_provider
.nstat_counts
= nstat_route_counts
;
658 nstat_route_provider
.nstat_release
= nstat_route_release
;
659 nstat_route_provider
.nstat_watcher_add
= nstat_route_add_watcher
;
660 nstat_route_provider
.nstat_watcher_remove
= nstat_route_remove_watcher
;
661 nstat_route_provider
.nstat_copy_descriptor
= nstat_route_copy_descriptor
;
662 nstat_route_provider
.next
= nstat_providers
;
663 nstat_providers
= &nstat_route_provider
;
666 #pragma mark -- Route Collection --
668 static struct nstat_counts
*
672 struct nstat_counts
*result
= rte
->rt_stats
;
673 if (result
) return result
;
675 if (nstat_malloc_tag
== NULL
) nstat_init();
677 result
= nstat_malloc_aligned(sizeof(*result
), sizeof(u_int64_t
), nstat_malloc_tag
);
678 if (!result
) return result
;
680 bzero(result
, sizeof(*result
));
682 if (!OSCompareAndSwapPtr(NULL
, result
, &rte
->rt_stats
))
684 nstat_free_aligned(result
, nstat_malloc_tag
);
685 result
= rte
->rt_stats
;
691 __private_extern__
void
697 nstat_free_aligned(rte
->rt_stats
, nstat_malloc_tag
);
698 rte
->rt_stats
= NULL
;
702 __private_extern__
void
703 nstat_route_connect_attempt(
708 struct nstat_counts
* stats
= nstat_route_attach(rte
);
711 OSIncrementAtomic(&stats
->nstat_connectattempts
);
714 rte
= rte
->rt_parent
;
718 __private_extern__
void
719 nstat_route_connect_success(
725 struct nstat_counts
* stats
= nstat_route_attach(rte
);
728 OSIncrementAtomic(&stats
->nstat_connectsuccesses
);
731 rte
= rte
->rt_parent
;
735 __private_extern__
void
744 struct nstat_counts
* stats
= nstat_route_attach(rte
);
747 if ((flags
& NSTAT_TX_FLAG_RETRANSMIT
) != 0)
749 OSAddAtomic(bytes
, &stats
->nstat_txretransmit
);
753 OSAddAtomic64((SInt64
)packets
, (SInt64
*)&stats
->nstat_txpackets
);
754 OSAddAtomic64((SInt64
)bytes
, (SInt64
*)&stats
->nstat_txbytes
);
758 rte
= rte
->rt_parent
;
762 __private_extern__
void
771 struct nstat_counts
* stats
= nstat_route_attach(rte
);
776 OSAddAtomic64((SInt64
)packets
, (SInt64
*)&stats
->nstat_rxpackets
);
777 OSAddAtomic64((SInt64
)bytes
, (SInt64
*)&stats
->nstat_rxbytes
);
781 if (flags
& NSTAT_RX_FLAG_OUT_OF_ORDER
)
782 OSAddAtomic(bytes
, &stats
->nstat_rxoutoforderbytes
);
783 if (flags
& NSTAT_RX_FLAG_DUPLICATE
)
784 OSAddAtomic(bytes
, &stats
->nstat_rxduplicatebytes
);
788 rte
= rte
->rt_parent
;
792 __private_extern__
void
798 const int32_t factor
= 8;
802 struct nstat_counts
* stats
= nstat_route_attach(rte
);
811 oldrtt
= stats
->nstat_avg_rtt
;
818 newrtt
= oldrtt
- (oldrtt
- (int32_t)rtt
) / factor
;
820 if (oldrtt
== newrtt
) break;
821 } while (!OSCompareAndSwap(oldrtt
, newrtt
, &stats
->nstat_avg_rtt
));
826 oldrtt
= stats
->nstat_min_rtt
;
827 if (oldrtt
!= 0 && oldrtt
< (int32_t)rtt
)
831 } while (!OSCompareAndSwap(oldrtt
, rtt
, &stats
->nstat_min_rtt
));
836 oldrtt
= stats
->nstat_var_rtt
;
843 newrtt
= oldrtt
- (oldrtt
- (int32_t)rtt_var
) / factor
;
845 if (oldrtt
== newrtt
) break;
846 } while (!OSCompareAndSwap(oldrtt
, newrtt
, &stats
->nstat_var_rtt
));
849 rte
= rte
->rt_parent
;
854 #pragma mark -- TCP Provider --
857 * Due to the way the kernel deallocates a process (the process structure
858 * might be gone by the time we get the PCB detach notification),
859 * we need to cache the process name. Without this, proc_name() would
860 * return null and the process name would never be sent to userland.
862 * For UDP sockets, we also store the cached the connection tuples along with
863 * the interface index. This is necessary because when UDP sockets are
864 * disconnected, the connection tuples are forever lost from the inpcb, thus
865 * we need to keep track of the last call to connect() in ntstat.
867 struct nstat_tucookie
{
869 char pname
[MAXCOMLEN
+1];
873 struct sockaddr_in v4
;
874 struct sockaddr_in6 v6
;
878 struct sockaddr_in v4
;
879 struct sockaddr_in6 v6
;
881 unsigned int if_index
;
882 uint16_t ifnet_properties
;
885 static struct nstat_tucookie
*
886 nstat_tucookie_alloc_internal(
891 struct nstat_tucookie
*cookie
;
893 cookie
= OSMalloc(sizeof(*cookie
), nstat_malloc_tag
);
897 lck_mtx_assert(&nstat_mtx
, LCK_MTX_ASSERT_NOTOWNED
);
898 if (ref
&& in_pcb_checkstate(inp
, WNT_ACQUIRE
, locked
) == WNT_STOPUSING
)
900 OSFree(cookie
, sizeof(*cookie
), nstat_malloc_tag
);
903 bzero(cookie
, sizeof(*cookie
));
905 proc_name(inp
->inp_socket
->last_pid
, cookie
->pname
,
906 sizeof(cookie
->pname
));
908 * We only increment the reference count for UDP sockets because we
909 * only cache UDP socket tuples.
911 if (SOCK_PROTO(inp
->inp_socket
) == IPPROTO_UDP
)
912 OSIncrementAtomic(&inp
->inp_nstat_refcnt
);
917 static struct nstat_tucookie
*
918 nstat_tucookie_alloc(
921 return nstat_tucookie_alloc_internal(inp
, false, false);
924 static struct nstat_tucookie
*
925 nstat_tucookie_alloc_ref(
928 return nstat_tucookie_alloc_internal(inp
, true, false);
931 static struct nstat_tucookie
*
932 nstat_tucookie_alloc_ref_locked(
935 return nstat_tucookie_alloc_internal(inp
, true, true);
939 nstat_tucookie_release_internal(
940 struct nstat_tucookie
*cookie
,
943 if (SOCK_PROTO(cookie
->inp
->inp_socket
) == IPPROTO_UDP
)
944 OSDecrementAtomic(&cookie
->inp
->inp_nstat_refcnt
);
945 in_pcb_checkstate(cookie
->inp
, WNT_RELEASE
, inplock
);
946 OSFree(cookie
, sizeof(*cookie
), nstat_malloc_tag
);
950 nstat_tucookie_release(
951 struct nstat_tucookie
*cookie
)
953 nstat_tucookie_release_internal(cookie
, false);
957 nstat_tucookie_release_locked(
958 struct nstat_tucookie
*cookie
)
960 nstat_tucookie_release_internal(cookie
, true);
964 static nstat_provider nstat_tcp_provider
;
968 struct inpcbinfo
*inpinfo
,
971 nstat_provider_cookie_t
*out_cookie
)
973 struct inpcb
*inp
= NULL
;
975 // parameter validation
976 const nstat_tcp_add_param
*param
= (const nstat_tcp_add_param
*)data
;
977 if (length
< sizeof(*param
))
982 // src and dst must match
983 if (param
->remote
.v4
.sin_family
!= 0 &&
984 param
->remote
.v4
.sin_family
!= param
->local
.v4
.sin_family
)
990 switch (param
->local
.v4
.sin_family
)
994 if (param
->local
.v4
.sin_len
!= sizeof(param
->local
.v4
) ||
995 (param
->remote
.v4
.sin_family
!= 0 &&
996 param
->remote
.v4
.sin_len
!= sizeof(param
->remote
.v4
)))
1001 inp
= in_pcblookup_hash(inpinfo
, param
->remote
.v4
.sin_addr
, param
->remote
.v4
.sin_port
,
1002 param
->local
.v4
.sin_addr
, param
->local
.v4
.sin_port
, 1, NULL
);
1011 const struct in6_addr
*in6c
;
1012 struct in6_addr
*in6
;
1015 if (param
->local
.v6
.sin6_len
!= sizeof(param
->local
.v6
) ||
1016 (param
->remote
.v6
.sin6_family
!= 0 &&
1017 param
->remote
.v6
.sin6_len
!= sizeof(param
->remote
.v6
)))
1022 local
.in6c
= ¶m
->local
.v6
.sin6_addr
;
1023 remote
.in6c
= ¶m
->remote
.v6
.sin6_addr
;
1025 inp
= in6_pcblookup_hash(inpinfo
, remote
.in6
, param
->remote
.v6
.sin6_port
,
1026 local
.in6
, param
->local
.v6
.sin6_port
, 1, NULL
);
1038 // At this point we have a ref to the inpcb
1039 *out_cookie
= nstat_tucookie_alloc(inp
);
1040 if (*out_cookie
== NULL
)
1041 in_pcb_checkstate(inp
, WNT_RELEASE
, 0);
1050 nstat_provider_cookie_t
*out_cookie
)
1052 return nstat_tcpudp_lookup(&tcbinfo
, data
, length
, out_cookie
);
1057 nstat_provider_cookie_t cookie
)
1059 struct nstat_tucookie
*tucookie
=
1060 (struct nstat_tucookie
*)cookie
;
1064 return (!(inp
= tucookie
->inp
) ||
1065 !(tp
= intotcpcb(inp
)) ||
1066 inp
->inp_state
== INPCB_STATE_DEAD
) ? 1 : 0;
1071 nstat_provider_cookie_t cookie
,
1072 struct nstat_counts
*out_counts
,
1075 struct nstat_tucookie
*tucookie
=
1076 (struct nstat_tucookie
*)cookie
;
1079 bzero(out_counts
, sizeof(*out_counts
));
1081 if (out_gone
) *out_gone
= 0;
1083 // if the pcb is in the dead state, we should stop using it
1084 if (nstat_tcp_gone(cookie
))
1086 if (out_gone
) *out_gone
= 1;
1087 if (!(inp
= tucookie
->inp
) || !intotcpcb(inp
))
1090 inp
= tucookie
->inp
;
1091 struct tcpcb
*tp
= intotcpcb(inp
);
1093 atomic_get_64(out_counts
->nstat_rxpackets
, &inp
->inp_stat
->rxpackets
);
1094 atomic_get_64(out_counts
->nstat_rxbytes
, &inp
->inp_stat
->rxbytes
);
1095 atomic_get_64(out_counts
->nstat_txpackets
, &inp
->inp_stat
->txpackets
);
1096 atomic_get_64(out_counts
->nstat_txbytes
, &inp
->inp_stat
->txbytes
);
1097 out_counts
->nstat_rxduplicatebytes
= tp
->t_stat
.rxduplicatebytes
;
1098 out_counts
->nstat_rxoutoforderbytes
= tp
->t_stat
.rxoutoforderbytes
;
1099 out_counts
->nstat_txretransmit
= tp
->t_stat
.txretransmitbytes
;
1100 out_counts
->nstat_connectattempts
= tp
->t_state
>= TCPS_SYN_SENT
? 1 : 0;
1101 out_counts
->nstat_connectsuccesses
= tp
->t_state
>= TCPS_ESTABLISHED
? 1 : 0;
1102 out_counts
->nstat_avg_rtt
= tp
->t_srtt
;
1103 out_counts
->nstat_min_rtt
= tp
->t_rttbest
;
1104 out_counts
->nstat_var_rtt
= tp
->t_rttvar
;
1105 if (out_counts
->nstat_avg_rtt
< out_counts
->nstat_min_rtt
)
1106 out_counts
->nstat_min_rtt
= out_counts
->nstat_avg_rtt
;
1107 atomic_get_64(out_counts
->nstat_cell_rxbytes
, &inp
->inp_cstat
->rxbytes
);
1108 atomic_get_64(out_counts
->nstat_cell_txbytes
, &inp
->inp_cstat
->txbytes
);
1109 atomic_get_64(out_counts
->nstat_wifi_rxbytes
, &inp
->inp_wstat
->rxbytes
);
1110 atomic_get_64(out_counts
->nstat_wifi_txbytes
, &inp
->inp_wstat
->txbytes
);
1111 atomic_get_64(out_counts
->nstat_wired_rxbytes
, &inp
->inp_Wstat
->rxbytes
);
1112 atomic_get_64(out_counts
->nstat_wired_txbytes
, &inp
->inp_Wstat
->txbytes
);
1119 nstat_provider_cookie_t cookie
,
1122 struct nstat_tucookie
*tucookie
=
1123 (struct nstat_tucookie
*)cookie
;
1125 nstat_tucookie_release_internal(tucookie
, locked
);
1129 nstat_tcp_add_watcher(
1130 nstat_control_state
*state
)
1132 OSIncrementAtomic(&nstat_tcp_watchers
);
1134 lck_rw_lock_shared(tcbinfo
.ipi_lock
);
1136 // Add all current tcp inpcbs. Ignore those in timewait
1138 struct nstat_tucookie
*cookie
;
1139 LIST_FOREACH(inp
, tcbinfo
.ipi_listhead
, inp_list
)
1141 cookie
= nstat_tucookie_alloc_ref(inp
);
1144 if (nstat_control_source_add(0, state
, &nstat_tcp_provider
,
1147 nstat_tucookie_release(cookie
);
1152 lck_rw_done(tcbinfo
.ipi_lock
);
1158 nstat_tcp_remove_watcher(
1159 __unused nstat_control_state
*state
)
1161 OSDecrementAtomic(&nstat_tcp_watchers
);
1164 __private_extern__
void
1168 struct nstat_tucookie
*cookie
;
1170 if (nstat_tcp_watchers
== 0)
1173 socket_lock(inp
->inp_socket
, 0);
1174 lck_mtx_lock(&nstat_mtx
);
1175 nstat_control_state
*state
;
1176 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
1178 if ((state
->ncs_watching
& (1 << NSTAT_PROVIDER_TCP
)) != 0)
1180 // this client is watching tcp
1181 // acquire a reference for it
1182 cookie
= nstat_tucookie_alloc_ref_locked(inp
);
1185 // add the source, if that fails, release the reference
1186 if (nstat_control_source_add(0, state
,
1187 &nstat_tcp_provider
, cookie
) != 0)
1189 nstat_tucookie_release_locked(cookie
);
1194 lck_mtx_unlock(&nstat_mtx
);
1195 socket_unlock(inp
->inp_socket
, 0);
1198 __private_extern__
void
1199 nstat_pcb_detach(struct inpcb
*inp
)
1201 nstat_control_state
*state
;
1202 nstat_src
*src
, *prevsrc
;
1203 nstat_src
*dead_list
= NULL
;
1204 struct nstat_tucookie
*tucookie
;
1207 if (inp
== NULL
|| (nstat_tcp_watchers
== 0 && nstat_udp_watchers
== 0))
1210 lck_mtx_lock(&nstat_mtx
);
1211 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
1213 lck_mtx_lock(&state
->mtx
);
1214 for (prevsrc
= NULL
, src
= state
->ncs_srcs
; src
;
1215 prevsrc
= src
, src
= src
->next
)
1217 tucookie
= (struct nstat_tucookie
*)src
->cookie
;
1218 if (tucookie
->inp
== inp
)
1224 result
= nstat_control_send_goodbye(state
, src
);
1227 prevsrc
->next
= src
->next
;
1229 state
->ncs_srcs
= src
->next
;
1231 src
->next
= dead_list
;
1234 lck_mtx_unlock(&state
->mtx
);
1236 lck_mtx_unlock(&nstat_mtx
);
1240 dead_list
= src
->next
;
1242 nstat_control_cleanup_source(NULL
, src
, TRUE
);
1246 __private_extern__
void
1247 nstat_pcb_cache(struct inpcb
*inp
)
1249 nstat_control_state
*state
;
1251 struct nstat_tucookie
*tucookie
;
1253 if (inp
== NULL
|| nstat_udp_watchers
== 0 ||
1254 inp
->inp_nstat_refcnt
== 0)
1256 VERIFY(SOCK_PROTO(inp
->inp_socket
) == IPPROTO_UDP
);
1257 lck_mtx_lock(&nstat_mtx
);
1258 for (state
= nstat_controls
; state
; state
= state
->ncs_next
) {
1259 lck_mtx_lock(&state
->mtx
);
1260 for (src
= state
->ncs_srcs
; src
; src
= src
->next
)
1262 tucookie
= (struct nstat_tucookie
*)src
->cookie
;
1263 if (tucookie
->inp
== inp
)
1265 if (inp
->inp_vflag
& INP_IPV6
)
1267 nstat_ip6_to_sockaddr(&inp
->in6p_laddr
,
1269 &tucookie
->local
.v6
,
1270 sizeof(tucookie
->local
));
1271 nstat_ip6_to_sockaddr(&inp
->in6p_faddr
,
1273 &tucookie
->remote
.v6
,
1274 sizeof(tucookie
->remote
));
1276 else if (inp
->inp_vflag
& INP_IPV4
)
1278 nstat_ip_to_sockaddr(&inp
->inp_laddr
,
1280 &tucookie
->local
.v4
,
1281 sizeof(tucookie
->local
));
1282 nstat_ip_to_sockaddr(&inp
->inp_faddr
,
1284 &tucookie
->remote
.v4
,
1285 sizeof(tucookie
->remote
));
1287 if (inp
->inp_last_outifp
)
1288 tucookie
->if_index
=
1289 inp
->inp_last_outifp
->if_index
;
1291 tucookie
->ifnet_properties
= nstat_inpcb_to_flags(inp
);
1292 tucookie
->cached
= true;
1296 lck_mtx_unlock(&state
->mtx
);
1298 lck_mtx_unlock(&nstat_mtx
);
1301 __private_extern__
void
1302 nstat_pcb_invalidate_cache(struct inpcb
*inp
)
1304 nstat_control_state
*state
;
1306 struct nstat_tucookie
*tucookie
;
1308 if (inp
== NULL
|| nstat_udp_watchers
== 0 ||
1309 inp
->inp_nstat_refcnt
== 0)
1311 VERIFY(SOCK_PROTO(inp
->inp_socket
) == IPPROTO_UDP
);
1312 lck_mtx_lock(&nstat_mtx
);
1313 for (state
= nstat_controls
; state
; state
= state
->ncs_next
) {
1314 lck_mtx_lock(&state
->mtx
);
1315 for (src
= state
->ncs_srcs
; src
; src
= src
->next
)
1317 tucookie
= (struct nstat_tucookie
*)src
->cookie
;
1318 if (tucookie
->inp
== inp
)
1320 tucookie
->cached
= false;
1324 lck_mtx_unlock(&state
->mtx
);
1326 lck_mtx_unlock(&nstat_mtx
);
1330 nstat_tcp_copy_descriptor(
1331 nstat_provider_cookie_t cookie
,
1335 if (len
< sizeof(nstat_tcp_descriptor
))
1340 if (nstat_tcp_gone(cookie
))
1343 nstat_tcp_descriptor
*desc
= (nstat_tcp_descriptor
*)data
;
1344 struct nstat_tucookie
*tucookie
=
1345 (struct nstat_tucookie
*)cookie
;
1346 struct inpcb
*inp
= tucookie
->inp
;
1347 struct tcpcb
*tp
= intotcpcb(inp
);
1348 bzero(desc
, sizeof(*desc
));
1350 if (inp
->inp_vflag
& INP_IPV6
)
1352 nstat_ip6_to_sockaddr(&inp
->in6p_laddr
, inp
->inp_lport
,
1353 &desc
->local
.v6
, sizeof(desc
->local
));
1354 nstat_ip6_to_sockaddr(&inp
->in6p_faddr
, inp
->inp_fport
,
1355 &desc
->remote
.v6
, sizeof(desc
->remote
));
1357 else if (inp
->inp_vflag
& INP_IPV4
)
1359 nstat_ip_to_sockaddr(&inp
->inp_laddr
, inp
->inp_lport
,
1360 &desc
->local
.v4
, sizeof(desc
->local
));
1361 nstat_ip_to_sockaddr(&inp
->inp_faddr
, inp
->inp_fport
,
1362 &desc
->remote
.v4
, sizeof(desc
->remote
));
1365 desc
->state
= intotcpcb(inp
)->t_state
;
1366 desc
->ifindex
= (inp
->inp_last_outifp
== NULL
) ? 0 :
1367 inp
->inp_last_outifp
->if_index
;
1369 // danger - not locked, values could be bogus
1370 desc
->txunacked
= tp
->snd_max
- tp
->snd_una
;
1371 desc
->txwindow
= tp
->snd_wnd
;
1372 desc
->txcwindow
= tp
->snd_cwnd
;
1374 if (CC_ALGO(tp
)->name
!= NULL
) {
1375 strlcpy(desc
->cc_algo
, CC_ALGO(tp
)->name
,
1376 sizeof(desc
->cc_algo
));
1379 struct socket
*so
= inp
->inp_socket
;
1382 // TBD - take the socket lock around these to make sure
1384 desc
->upid
= so
->last_upid
;
1385 desc
->pid
= so
->last_pid
;
1386 desc
->traffic_class
= so
->so_traffic_class
;
1387 desc
->traffic_mgt_flags
= so
->so_traffic_mgt_flags
;
1388 proc_name(desc
->pid
, desc
->pname
, sizeof(desc
->pname
));
1389 if (desc
->pname
[0] == 0)
1391 strlcpy(desc
->pname
, tucookie
->pname
,
1392 sizeof(desc
->pname
));
1396 desc
->pname
[sizeof(desc
->pname
) - 1] = 0;
1397 strlcpy(tucookie
->pname
, desc
->pname
,
1398 sizeof(tucookie
->pname
));
1400 memcpy(desc
->uuid
, so
->last_uuid
, sizeof(so
->last_uuid
));
1401 memcpy(desc
->vuuid
, so
->so_vuuid
, sizeof(so
->so_vuuid
));
1402 if (so
->so_flags
& SOF_DELEGATED
) {
1403 desc
->eupid
= so
->e_upid
;
1404 desc
->epid
= so
->e_pid
;
1405 memcpy(desc
->euuid
, so
->e_uuid
, sizeof(so
->e_uuid
));
1407 desc
->eupid
= desc
->upid
;
1408 desc
->epid
= desc
->pid
;
1409 memcpy(desc
->euuid
, desc
->uuid
, sizeof(desc
->uuid
));
1411 desc
->sndbufsize
= so
->so_snd
.sb_hiwat
;
1412 desc
->sndbufused
= so
->so_snd
.sb_cc
;
1413 desc
->rcvbufsize
= so
->so_rcv
.sb_hiwat
;
1414 desc
->rcvbufused
= so
->so_rcv
.sb_cc
;
1417 tcp_get_connectivity_status(tp
, &desc
->connstatus
);
1418 desc
->ifnet_properties
= nstat_inpcb_to_flags(inp
);
1423 nstat_tcpudp_reporting_allowed(nstat_provider_cookie_t cookie
, uint64_t filter
)
1427 /* Only apply interface filter if at least one is allowed. */
1428 if ((filter
& NSTAT_FILTER_ACCEPT_ALL
) != 0)
1430 struct nstat_tucookie
*tucookie
= (struct nstat_tucookie
*)cookie
;
1431 struct inpcb
*inp
= tucookie
->inp
;
1433 uint16_t interface_properties
= nstat_inpcb_to_flags(inp
);
1435 /* For now, just check on interface type. */
1436 retval
= ((filter
& interface_properties
) != 0);
1442 nstat_init_tcp_provider(void)
1444 bzero(&nstat_tcp_provider
, sizeof(nstat_tcp_provider
));
1445 nstat_tcp_provider
.nstat_descriptor_length
= sizeof(nstat_tcp_descriptor
);
1446 nstat_tcp_provider
.nstat_provider_id
= NSTAT_PROVIDER_TCP
;
1447 nstat_tcp_provider
.nstat_lookup
= nstat_tcp_lookup
;
1448 nstat_tcp_provider
.nstat_gone
= nstat_tcp_gone
;
1449 nstat_tcp_provider
.nstat_counts
= nstat_tcp_counts
;
1450 nstat_tcp_provider
.nstat_release
= nstat_tcp_release
;
1451 nstat_tcp_provider
.nstat_watcher_add
= nstat_tcp_add_watcher
;
1452 nstat_tcp_provider
.nstat_watcher_remove
= nstat_tcp_remove_watcher
;
1453 nstat_tcp_provider
.nstat_copy_descriptor
= nstat_tcp_copy_descriptor
;
1454 nstat_tcp_provider
.nstat_reporting_allowed
= nstat_tcpudp_reporting_allowed
;
1455 nstat_tcp_provider
.next
= nstat_providers
;
1456 nstat_providers
= &nstat_tcp_provider
;
1459 #pragma mark -- UDP Provider --
1461 static nstat_provider nstat_udp_provider
;
1467 nstat_provider_cookie_t
*out_cookie
)
1469 return nstat_tcpudp_lookup(&udbinfo
, data
, length
, out_cookie
);
1474 nstat_provider_cookie_t cookie
)
1476 struct nstat_tucookie
*tucookie
=
1477 (struct nstat_tucookie
*)cookie
;
1480 return (!(inp
= tucookie
->inp
) ||
1481 inp
->inp_state
== INPCB_STATE_DEAD
) ? 1 : 0;
1486 nstat_provider_cookie_t cookie
,
1487 struct nstat_counts
*out_counts
,
1490 struct nstat_tucookie
*tucookie
=
1491 (struct nstat_tucookie
*)cookie
;
1493 if (out_gone
) *out_gone
= 0;
1495 // if the pcb is in the dead state, we should stop using it
1496 if (nstat_udp_gone(cookie
))
1498 if (out_gone
) *out_gone
= 1;
1502 struct inpcb
*inp
= tucookie
->inp
;
1504 atomic_get_64(out_counts
->nstat_rxpackets
, &inp
->inp_stat
->rxpackets
);
1505 atomic_get_64(out_counts
->nstat_rxbytes
, &inp
->inp_stat
->rxbytes
);
1506 atomic_get_64(out_counts
->nstat_txpackets
, &inp
->inp_stat
->txpackets
);
1507 atomic_get_64(out_counts
->nstat_txbytes
, &inp
->inp_stat
->txbytes
);
1508 atomic_get_64(out_counts
->nstat_cell_rxbytes
, &inp
->inp_cstat
->rxbytes
);
1509 atomic_get_64(out_counts
->nstat_cell_txbytes
, &inp
->inp_cstat
->txbytes
);
1510 atomic_get_64(out_counts
->nstat_wifi_rxbytes
, &inp
->inp_wstat
->rxbytes
);
1511 atomic_get_64(out_counts
->nstat_wifi_txbytes
, &inp
->inp_wstat
->txbytes
);
1512 atomic_get_64(out_counts
->nstat_wired_rxbytes
, &inp
->inp_Wstat
->rxbytes
);
1513 atomic_get_64(out_counts
->nstat_wired_txbytes
, &inp
->inp_Wstat
->txbytes
);
1520 nstat_provider_cookie_t cookie
,
1523 struct nstat_tucookie
*tucookie
=
1524 (struct nstat_tucookie
*)cookie
;
1526 nstat_tucookie_release_internal(tucookie
, locked
);
1530 nstat_udp_add_watcher(
1531 nstat_control_state
*state
)
1534 struct nstat_tucookie
*cookie
;
1536 OSIncrementAtomic(&nstat_udp_watchers
);
1538 lck_rw_lock_shared(udbinfo
.ipi_lock
);
1539 // Add all current UDP inpcbs.
1540 LIST_FOREACH(inp
, udbinfo
.ipi_listhead
, inp_list
)
1542 cookie
= nstat_tucookie_alloc_ref(inp
);
1545 if (nstat_control_source_add(0, state
, &nstat_udp_provider
,
1548 nstat_tucookie_release(cookie
);
1553 lck_rw_done(udbinfo
.ipi_lock
);
1559 nstat_udp_remove_watcher(
1560 __unused nstat_control_state
*state
)
1562 OSDecrementAtomic(&nstat_udp_watchers
);
1565 __private_extern__
void
1569 struct nstat_tucookie
*cookie
;
1571 if (nstat_udp_watchers
== 0)
1574 socket_lock(inp
->inp_socket
, 0);
1575 lck_mtx_lock(&nstat_mtx
);
1576 nstat_control_state
*state
;
1577 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
1579 if ((state
->ncs_watching
& (1 << NSTAT_PROVIDER_UDP
)) != 0)
1581 // this client is watching tcp
1582 // acquire a reference for it
1583 cookie
= nstat_tucookie_alloc_ref_locked(inp
);
1586 // add the source, if that fails, release the reference
1587 if (nstat_control_source_add(0, state
,
1588 &nstat_udp_provider
, cookie
) != 0)
1590 nstat_tucookie_release_locked(cookie
);
1595 lck_mtx_unlock(&nstat_mtx
);
1596 socket_unlock(inp
->inp_socket
, 0);
1600 nstat_udp_copy_descriptor(
1601 nstat_provider_cookie_t cookie
,
1605 if (len
< sizeof(nstat_udp_descriptor
))
1610 if (nstat_udp_gone(cookie
))
1613 struct nstat_tucookie
*tucookie
=
1614 (struct nstat_tucookie
*)cookie
;
1615 nstat_udp_descriptor
*desc
= (nstat_udp_descriptor
*)data
;
1616 struct inpcb
*inp
= tucookie
->inp
;
1618 bzero(desc
, sizeof(*desc
));
1620 if (tucookie
->cached
== false) {
1621 if (inp
->inp_vflag
& INP_IPV6
)
1623 nstat_ip6_to_sockaddr(&inp
->in6p_laddr
, inp
->inp_lport
,
1624 &desc
->local
.v6
, sizeof(desc
->local
.v6
));
1625 nstat_ip6_to_sockaddr(&inp
->in6p_faddr
, inp
->inp_fport
,
1626 &desc
->remote
.v6
, sizeof(desc
->remote
.v6
));
1628 else if (inp
->inp_vflag
& INP_IPV4
)
1630 nstat_ip_to_sockaddr(&inp
->inp_laddr
, inp
->inp_lport
,
1631 &desc
->local
.v4
, sizeof(desc
->local
.v4
));
1632 nstat_ip_to_sockaddr(&inp
->inp_faddr
, inp
->inp_fport
,
1633 &desc
->remote
.v4
, sizeof(desc
->remote
.v4
));
1635 desc
->ifnet_properties
= nstat_inpcb_to_flags(inp
);
1639 if (inp
->inp_vflag
& INP_IPV6
)
1641 memcpy(&desc
->local
.v6
, &tucookie
->local
.v6
,
1642 sizeof(desc
->local
.v6
));
1643 memcpy(&desc
->remote
.v6
, &tucookie
->remote
.v6
,
1644 sizeof(desc
->remote
.v6
));
1646 else if (inp
->inp_vflag
& INP_IPV4
)
1648 memcpy(&desc
->local
.v4
, &tucookie
->local
.v4
,
1649 sizeof(desc
->local
.v4
));
1650 memcpy(&desc
->remote
.v4
, &tucookie
->remote
.v4
,
1651 sizeof(desc
->remote
.v4
));
1653 desc
->ifnet_properties
= tucookie
->ifnet_properties
;
1656 if (inp
->inp_last_outifp
)
1657 desc
->ifindex
= inp
->inp_last_outifp
->if_index
;
1659 desc
->ifindex
= tucookie
->if_index
;
1661 struct socket
*so
= inp
->inp_socket
;
1664 // TBD - take the socket lock around these to make sure
1666 desc
->upid
= so
->last_upid
;
1667 desc
->pid
= so
->last_pid
;
1668 proc_name(desc
->pid
, desc
->pname
, sizeof(desc
->pname
));
1669 if (desc
->pname
[0] == 0)
1671 strlcpy(desc
->pname
, tucookie
->pname
,
1672 sizeof(desc
->pname
));
1676 desc
->pname
[sizeof(desc
->pname
) - 1] = 0;
1677 strlcpy(tucookie
->pname
, desc
->pname
,
1678 sizeof(tucookie
->pname
));
1680 memcpy(desc
->uuid
, so
->last_uuid
, sizeof(so
->last_uuid
));
1681 memcpy(desc
->vuuid
, so
->so_vuuid
, sizeof(so
->so_vuuid
));
1682 if (so
->so_flags
& SOF_DELEGATED
) {
1683 desc
->eupid
= so
->e_upid
;
1684 desc
->epid
= so
->e_pid
;
1685 memcpy(desc
->euuid
, so
->e_uuid
, sizeof(so
->e_uuid
));
1687 desc
->eupid
= desc
->upid
;
1688 desc
->epid
= desc
->pid
;
1689 memcpy(desc
->euuid
, desc
->uuid
, sizeof(desc
->uuid
));
1691 desc
->rcvbufsize
= so
->so_rcv
.sb_hiwat
;
1692 desc
->rcvbufused
= so
->so_rcv
.sb_cc
;
1693 desc
->traffic_class
= so
->so_traffic_class
;
1700 nstat_init_udp_provider(void)
1702 bzero(&nstat_udp_provider
, sizeof(nstat_udp_provider
));
1703 nstat_udp_provider
.nstat_provider_id
= NSTAT_PROVIDER_UDP
;
1704 nstat_udp_provider
.nstat_descriptor_length
= sizeof(nstat_udp_descriptor
);
1705 nstat_udp_provider
.nstat_lookup
= nstat_udp_lookup
;
1706 nstat_udp_provider
.nstat_gone
= nstat_udp_gone
;
1707 nstat_udp_provider
.nstat_counts
= nstat_udp_counts
;
1708 nstat_udp_provider
.nstat_watcher_add
= nstat_udp_add_watcher
;
1709 nstat_udp_provider
.nstat_watcher_remove
= nstat_udp_remove_watcher
;
1710 nstat_udp_provider
.nstat_copy_descriptor
= nstat_udp_copy_descriptor
;
1711 nstat_udp_provider
.nstat_release
= nstat_udp_release
;
1712 nstat_udp_provider
.nstat_reporting_allowed
= nstat_tcpudp_reporting_allowed
;
1713 nstat_udp_provider
.next
= nstat_providers
;
1714 nstat_providers
= &nstat_udp_provider
;
1717 #pragma mark -- ifnet Provider --
1719 static nstat_provider nstat_ifnet_provider
;
1722 * We store a pointer to the ifnet and the original threshold
1723 * requested by the client.
1725 struct nstat_ifnet_cookie
1735 nstat_provider_cookie_t
*out_cookie
)
1737 const nstat_ifnet_add_param
*param
= (const nstat_ifnet_add_param
*)data
;
1739 boolean_t changed
= FALSE
;
1740 nstat_control_state
*state
;
1742 struct nstat_ifnet_cookie
*cookie
;
1744 if (length
< sizeof(*param
) || param
->threshold
< 1024*1024)
1746 if (nstat_privcheck
!= 0) {
1747 errno_t result
= priv_check_cred(kauth_cred_get(),
1748 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS
, 0);
1752 cookie
= OSMalloc(sizeof(*cookie
), nstat_malloc_tag
);
1755 bzero(cookie
, sizeof(*cookie
));
1757 ifnet_head_lock_shared();
1758 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
)
1760 ifnet_lock_exclusive(ifp
);
1761 if (ifp
->if_index
== param
->ifindex
)
1764 cookie
->threshold
= param
->threshold
;
1765 *out_cookie
= cookie
;
1766 if (!ifp
->if_data_threshold
||
1767 ifp
->if_data_threshold
> param
->threshold
)
1770 ifp
->if_data_threshold
= param
->threshold
;
1772 ifnet_lock_done(ifp
);
1773 ifnet_reference(ifp
);
1776 ifnet_lock_done(ifp
);
1781 * When we change the threshold to something smaller, we notify
1782 * all of our clients with a description message.
1783 * We won't send a message to the client we are currently serving
1784 * because it has no `ifnet source' yet.
1788 lck_mtx_lock(&nstat_mtx
);
1789 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
1791 lck_mtx_lock(&state
->mtx
);
1792 for (src
= state
->ncs_srcs
; src
; src
= src
->next
)
1794 if (src
->provider
!= &nstat_ifnet_provider
)
1796 nstat_control_send_description(state
, src
, 0, 0);
1798 lck_mtx_unlock(&state
->mtx
);
1800 lck_mtx_unlock(&nstat_mtx
);
1802 if (cookie
->ifp
== NULL
)
1803 OSFree(cookie
, sizeof(*cookie
), nstat_malloc_tag
);
1805 return ifp
? 0 : EINVAL
;
1810 nstat_provider_cookie_t cookie
)
1813 struct nstat_ifnet_cookie
*ifcookie
=
1814 (struct nstat_ifnet_cookie
*)cookie
;
1816 ifnet_head_lock_shared();
1817 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
)
1819 if (ifp
== ifcookie
->ifp
)
1829 nstat_provider_cookie_t cookie
,
1830 struct nstat_counts
*out_counts
,
1833 struct nstat_ifnet_cookie
*ifcookie
=
1834 (struct nstat_ifnet_cookie
*)cookie
;
1835 struct ifnet
*ifp
= ifcookie
->ifp
;
1837 if (out_gone
) *out_gone
= 0;
1839 // if the ifnet is gone, we should stop using it
1840 if (nstat_ifnet_gone(cookie
))
1842 if (out_gone
) *out_gone
= 1;
1846 bzero(out_counts
, sizeof(*out_counts
));
1847 out_counts
->nstat_rxpackets
= ifp
->if_ipackets
;
1848 out_counts
->nstat_rxbytes
= ifp
->if_ibytes
;
1849 out_counts
->nstat_txpackets
= ifp
->if_opackets
;
1850 out_counts
->nstat_txbytes
= ifp
->if_obytes
;
1851 out_counts
->nstat_cell_rxbytes
= out_counts
->nstat_cell_txbytes
= 0;
1856 nstat_ifnet_release(
1857 nstat_provider_cookie_t cookie
,
1858 __unused
int locked
)
1860 struct nstat_ifnet_cookie
*ifcookie
;
1862 nstat_control_state
*state
;
1864 uint64_t minthreshold
= UINT64_MAX
;
1867 * Find all the clients that requested a threshold
1868 * for this ifnet and re-calculate if_data_threshold.
1870 lck_mtx_lock(&nstat_mtx
);
1871 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
1873 lck_mtx_lock(&state
->mtx
);
1874 for (src
= state
->ncs_srcs
; src
; src
= src
->next
)
1876 /* Skip the provider we are about to detach. */
1877 if (src
->provider
!= &nstat_ifnet_provider
||
1878 src
->cookie
== cookie
)
1880 ifcookie
= (struct nstat_ifnet_cookie
*)src
->cookie
;
1881 if (ifcookie
->threshold
< minthreshold
)
1882 minthreshold
= ifcookie
->threshold
;
1884 lck_mtx_unlock(&state
->mtx
);
1886 lck_mtx_unlock(&nstat_mtx
);
1888 * Reset if_data_threshold or disable it.
1890 ifcookie
= (struct nstat_ifnet_cookie
*)cookie
;
1891 ifp
= ifcookie
->ifp
;
1892 if (ifnet_is_attached(ifp
, 1)) {
1893 ifnet_lock_exclusive(ifp
);
1894 if (minthreshold
== UINT64_MAX
)
1895 ifp
->if_data_threshold
= 0;
1897 ifp
->if_data_threshold
= minthreshold
;
1898 ifnet_lock_done(ifp
);
1899 ifnet_decr_iorefcnt(ifp
);
1902 OSFree(ifcookie
, sizeof(*ifcookie
), nstat_malloc_tag
);
1906 nstat_ifnet_copy_link_status(
1908 struct nstat_ifnet_descriptor
*desc
)
1910 struct if_link_status
*ifsr
= ifp
->if_link_status
;
1911 nstat_ifnet_desc_link_status
*link_status
= &desc
->link_status
;
1913 link_status
->link_status_type
= NSTAT_IFNET_DESC_LINK_STATUS_TYPE_NONE
;
1917 lck_rw_lock_shared(&ifp
->if_link_status_lock
);
1919 if (ifp
->if_type
== IFT_CELLULAR
) {
1921 nstat_ifnet_desc_cellular_status
*cell_status
= &link_status
->u
.cellular
;
1922 struct if_cellular_status_v1
*if_cell_sr
=
1923 &ifsr
->ifsr_u
.ifsr_cell
.if_cell_u
.if_status_v1
;
1925 if (ifsr
->ifsr_version
!= IF_CELLULAR_STATUS_REPORT_VERSION_1
)
1928 link_status
->link_status_type
= NSTAT_IFNET_DESC_LINK_STATUS_TYPE_CELLULAR
;
1930 if (if_cell_sr
->valid_bitmask
& IF_CELL_LINK_QUALITY_METRIC_VALID
) {
1931 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_LINK_QUALITY_METRIC_VALID
;
1932 cell_status
->link_quality_metric
= if_cell_sr
->link_quality_metric
;
1934 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_EFFECTIVE_BANDWIDTH_VALID
) {
1935 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_BANDWIDTH_VALID
;
1936 cell_status
->ul_effective_bandwidth
= if_cell_sr
->ul_effective_bandwidth
;
1938 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MAX_BANDWIDTH_VALID
) {
1939 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_MAX_BANDWIDTH_VALID
;
1940 cell_status
->ul_max_bandwidth
= if_cell_sr
->ul_max_bandwidth
;
1942 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MIN_LATENCY_VALID
) {
1943 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_MIN_LATENCY_VALID
;
1944 cell_status
->ul_min_latency
= if_cell_sr
->ul_min_latency
;
1946 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_EFFECTIVE_LATENCY_VALID
) {
1947 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_LATENCY_VALID
;
1948 cell_status
->ul_effective_latency
= if_cell_sr
->ul_effective_latency
;
1950 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MAX_LATENCY_VALID
) {
1951 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_MAX_LATENCY_VALID
;
1952 cell_status
->ul_max_latency
= if_cell_sr
->ul_max_latency
;
1954 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_RETXT_LEVEL_VALID
) {
1955 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID
;
1956 if (if_cell_sr
->ul_retxt_level
== IF_CELL_UL_RETXT_LEVEL_NONE
)
1957 cell_status
->ul_retxt_level
= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_NONE
;
1958 else if (if_cell_sr
->ul_retxt_level
== IF_CELL_UL_RETXT_LEVEL_LOW
)
1959 cell_status
->ul_retxt_level
= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_LOW
;
1960 else if (if_cell_sr
->ul_retxt_level
== IF_CELL_UL_RETXT_LEVEL_MEDIUM
)
1961 cell_status
->ul_retxt_level
= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_MEDIUM
;
1962 else if (if_cell_sr
->ul_retxt_level
== IF_CELL_UL_RETXT_LEVEL_HIGH
)
1963 cell_status
->ul_retxt_level
= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_HIGH
;
1965 cell_status
->valid_bitmask
&= ~NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID
;
1967 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_BYTES_LOST_VALID
) {
1968 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_BYTES_LOST_VALID
;
1969 cell_status
->ul_bytes_lost
= if_cell_sr
->ul_bytes_lost
;
1971 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MIN_QUEUE_SIZE_VALID
) {
1972 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_MIN_QUEUE_SIZE_VALID
;
1973 cell_status
->ul_min_queue_size
= if_cell_sr
->ul_min_queue_size
;
1975 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_AVG_QUEUE_SIZE_VALID
) {
1976 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_AVG_QUEUE_SIZE_VALID
;
1977 cell_status
->ul_avg_queue_size
= if_cell_sr
->ul_avg_queue_size
;
1979 if (if_cell_sr
->valid_bitmask
& IF_CELL_UL_MAX_QUEUE_SIZE_VALID
) {
1980 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_UL_MAX_QUEUE_SIZE_VALID
;
1981 cell_status
->ul_max_queue_size
= if_cell_sr
->ul_max_queue_size
;
1983 if (if_cell_sr
->valid_bitmask
& IF_CELL_DL_EFFECTIVE_BANDWIDTH_VALID
) {
1984 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_DL_EFFECTIVE_BANDWIDTH_VALID
;
1985 cell_status
->dl_effective_bandwidth
= if_cell_sr
->dl_effective_bandwidth
;
1987 if (if_cell_sr
->valid_bitmask
& IF_CELL_DL_MAX_BANDWIDTH_VALID
) {
1988 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_DL_MAX_BANDWIDTH_VALID
;
1989 cell_status
->dl_max_bandwidth
= if_cell_sr
->dl_max_bandwidth
;
1991 if (if_cell_sr
->valid_bitmask
& IF_CELL_CONFIG_INACTIVITY_TIME_VALID
) {
1992 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_CONFIG_INACTIVITY_TIME_VALID
;
1993 cell_status
->config_inactivity_time
= if_cell_sr
->config_inactivity_time
;
1995 if (if_cell_sr
->valid_bitmask
& IF_CELL_CONFIG_BACKOFF_TIME_VALID
) {
1996 cell_status
->valid_bitmask
|= NSTAT_IFNET_DESC_CELL_CONFIG_BACKOFF_TIME_VALID
;
1997 cell_status
->config_backoff_time
= if_cell_sr
->config_backoff_time
;
2000 } else if (ifp
->if_subfamily
== IFNET_SUBFAMILY_WIFI
) {
2002 nstat_ifnet_desc_wifi_status
*wifi_status
= &link_status
->u
.wifi
;
2003 struct if_wifi_status_v1
*if_wifi_sr
=
2004 &ifsr
->ifsr_u
.ifsr_wifi
.if_wifi_u
.if_status_v1
;
2006 if (ifsr
->ifsr_version
!= IF_WIFI_STATUS_REPORT_VERSION_1
)
2009 link_status
->link_status_type
= NSTAT_IFNET_DESC_LINK_STATUS_TYPE_WIFI
;
2011 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_LINK_QUALITY_METRIC_VALID
) {
2012 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_LINK_QUALITY_METRIC_VALID
;
2013 wifi_status
->link_quality_metric
= if_wifi_sr
->link_quality_metric
;
2015 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID
) {
2016 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID
;
2017 wifi_status
->ul_effective_bandwidth
= if_wifi_sr
->ul_effective_bandwidth
;
2019 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_MAX_BANDWIDTH_VALID
) {
2020 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_MAX_BANDWIDTH_VALID
;
2021 wifi_status
->ul_max_bandwidth
= if_wifi_sr
->ul_max_bandwidth
;
2023 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_MIN_LATENCY_VALID
) {
2024 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_MIN_LATENCY_VALID
;
2025 wifi_status
->ul_min_latency
= if_wifi_sr
->ul_min_latency
;
2027 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_EFFECTIVE_LATENCY_VALID
) {
2028 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_LATENCY_VALID
;
2029 wifi_status
->ul_effective_latency
= if_wifi_sr
->ul_effective_latency
;
2031 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_MAX_LATENCY_VALID
) {
2032 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_MAX_LATENCY_VALID
;
2033 wifi_status
->ul_max_latency
= if_wifi_sr
->ul_max_latency
;
2035 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_RETXT_LEVEL_VALID
) {
2036 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID
;
2037 if (if_wifi_sr
->ul_retxt_level
== IF_WIFI_UL_RETXT_LEVEL_NONE
)
2038 wifi_status
->ul_retxt_level
= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_NONE
;
2039 else if (if_wifi_sr
->ul_retxt_level
== IF_WIFI_UL_RETXT_LEVEL_LOW
)
2040 wifi_status
->ul_retxt_level
= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_LOW
;
2041 else if (if_wifi_sr
->ul_retxt_level
== IF_WIFI_UL_RETXT_LEVEL_MEDIUM
)
2042 wifi_status
->ul_retxt_level
= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_MEDIUM
;
2043 else if (if_wifi_sr
->ul_retxt_level
== IF_WIFI_UL_RETXT_LEVEL_HIGH
)
2044 wifi_status
->ul_retxt_level
= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_HIGH
;
2046 wifi_status
->valid_bitmask
&= ~NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID
;
2048 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_BYTES_LOST_VALID
) {
2049 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_BYTES_LOST_VALID
;
2050 wifi_status
->ul_bytes_lost
= if_wifi_sr
->ul_bytes_lost
;
2052 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_UL_ERROR_RATE_VALID
) {
2053 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_UL_ERROR_RATE_VALID
;
2054 wifi_status
->ul_error_rate
= if_wifi_sr
->ul_error_rate
;
2056 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID
) {
2057 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID
;
2058 wifi_status
->dl_effective_bandwidth
= if_wifi_sr
->dl_effective_bandwidth
;
2060 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_MAX_BANDWIDTH_VALID
) {
2061 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_MAX_BANDWIDTH_VALID
;
2062 wifi_status
->dl_max_bandwidth
= if_wifi_sr
->dl_max_bandwidth
;
2064 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_MIN_LATENCY_VALID
) {
2065 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_MIN_LATENCY_VALID
;
2066 wifi_status
->dl_min_latency
= if_wifi_sr
->dl_min_latency
;
2068 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_EFFECTIVE_LATENCY_VALID
) {
2069 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_LATENCY_VALID
;
2070 wifi_status
->dl_effective_latency
= if_wifi_sr
->dl_effective_latency
;
2072 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_MAX_LATENCY_VALID
) {
2073 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_MAX_LATENCY_VALID
;
2074 wifi_status
->dl_max_latency
= if_wifi_sr
->dl_max_latency
;
2076 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_DL_ERROR_RATE_VALID
) {
2077 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_DL_ERROR_RATE_VALID
;
2078 wifi_status
->dl_error_rate
= if_wifi_sr
->dl_error_rate
;
2080 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_CONFIG_FREQUENCY_VALID
) {
2081 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID
;
2082 if (if_wifi_sr
->config_frequency
== IF_WIFI_CONFIG_FREQUENCY_2_4_GHZ
)
2083 wifi_status
->config_frequency
= NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_2_4_GHZ
;
2084 else if (if_wifi_sr
->config_frequency
== IF_WIFI_CONFIG_FREQUENCY_5_0_GHZ
)
2085 wifi_status
->config_frequency
= NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_5_0_GHZ
;
2087 wifi_status
->valid_bitmask
&= ~NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID
;
2089 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_CONFIG_MULTICAST_RATE_VALID
) {
2090 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_CONFIG_MULTICAST_RATE_VALID
;
2091 wifi_status
->config_multicast_rate
= if_wifi_sr
->config_multicast_rate
;
2093 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_CONFIG_SCAN_COUNT_VALID
) {
2094 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_COUNT_VALID
;
2095 wifi_status
->scan_count
= if_wifi_sr
->scan_count
;
2097 if (if_wifi_sr
->valid_bitmask
& IF_WIFI_CONFIG_SCAN_DURATION_VALID
) {
2098 wifi_status
->valid_bitmask
|= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_DURATION_VALID
;
2099 wifi_status
->scan_duration
= if_wifi_sr
->scan_duration
;
2104 lck_rw_done(&ifp
->if_link_status_lock
);
2107 static u_int64_t nstat_ifnet_last_report_time
= 0;
2108 extern int tcp_report_stats_interval
;
2111 nstat_ifnet_compute_percentages(struct if_tcp_ecn_perf_stat
*ifst
)
2113 /* Retransmit percentage */
2114 if (ifst
->total_rxmitpkts
> 0 && ifst
->total_txpkts
> 0) {
2115 /* shift by 10 for precision */
2116 ifst
->rxmit_percent
=
2117 ((ifst
->total_rxmitpkts
<< 10) * 100) / ifst
->total_txpkts
;
2119 ifst
->rxmit_percent
= 0;
2122 /* Out-of-order percentage */
2123 if (ifst
->total_oopkts
> 0 && ifst
->total_rxpkts
> 0) {
2124 /* shift by 10 for precision */
2126 ((ifst
->total_oopkts
<< 10) * 100) / ifst
->total_rxpkts
;
2128 ifst
->oo_percent
= 0;
2131 /* Reorder percentage */
2132 if (ifst
->total_reorderpkts
> 0 &&
2133 (ifst
->total_txpkts
+ ifst
->total_rxpkts
) > 0) {
2134 /* shift by 10 for precision */
2135 ifst
->reorder_percent
=
2136 ((ifst
->total_reorderpkts
<< 10) * 100) /
2137 (ifst
->total_txpkts
+ ifst
->total_rxpkts
);
2139 ifst
->reorder_percent
= 0;
2144 nstat_ifnet_normalize_counter(struct if_tcp_ecn_stat
*if_st
)
2146 u_int64_t ecn_on_conn
, ecn_off_conn
;
2150 ecn_on_conn
= if_st
->ecn_client_success
+
2151 if_st
->ecn_server_success
;
2152 ecn_off_conn
= if_st
->ecn_off_conn
+
2153 (if_st
->ecn_client_setup
- if_st
->ecn_client_success
) +
2154 (if_st
->ecn_server_setup
- if_st
->ecn_server_success
);
2157 * report sack episodes, rst_drop and rxmit_drop
2158 * as a ratio per connection, shift by 10 for precision
2160 if (ecn_on_conn
> 0) {
2161 if_st
->ecn_on
.sack_episodes
=
2162 (if_st
->ecn_on
.sack_episodes
<< 10) / ecn_on_conn
;
2163 if_st
->ecn_on
.rst_drop
=
2164 (if_st
->ecn_on
.rst_drop
<< 10) * 100 / ecn_on_conn
;
2165 if_st
->ecn_on
.rxmit_drop
=
2166 (if_st
->ecn_on
.rxmit_drop
<< 10) * 100 / ecn_on_conn
;
2168 /* set to zero, just in case */
2169 if_st
->ecn_on
.sack_episodes
= 0;
2170 if_st
->ecn_on
.rst_drop
= 0;
2171 if_st
->ecn_on
.rxmit_drop
= 0;
2174 if (ecn_off_conn
> 0) {
2175 if_st
->ecn_off
.sack_episodes
=
2176 (if_st
->ecn_off
.sack_episodes
<< 10) / ecn_off_conn
;
2177 if_st
->ecn_off
.rst_drop
=
2178 (if_st
->ecn_off
.rst_drop
<< 10) * 100 / ecn_off_conn
;
2179 if_st
->ecn_off
.rxmit_drop
=
2180 (if_st
->ecn_off
.rxmit_drop
<< 10) * 100 / ecn_off_conn
;
2182 if_st
->ecn_off
.sack_episodes
= 0;
2183 if_st
->ecn_off
.rst_drop
= 0;
2184 if_st
->ecn_off
.rxmit_drop
= 0;
2186 if_st
->ecn_total_conn
= ecn_off_conn
+ ecn_on_conn
;
2190 nstat_ifnet_report_ecn_stats(void)
2192 u_int64_t uptime
, last_report_time
;
2193 struct nstat_sysinfo_data data
;
2194 struct nstat_sysinfo_ifnet_ecn_stats
*st
;
2197 uptime
= net_uptime();
2199 if ((int)(uptime
- nstat_ifnet_last_report_time
) <
2200 tcp_report_stats_interval
)
2203 last_report_time
= nstat_ifnet_last_report_time
;
2204 nstat_ifnet_last_report_time
= uptime
;
2205 data
.flags
= NSTAT_SYSINFO_IFNET_ECN_STATS
;
2206 st
= &data
.u
.ifnet_ecn_stats
;
2208 ifnet_head_lock_shared();
2209 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
) {
2210 if (ifp
->if_ipv4_stat
== NULL
|| ifp
->if_ipv6_stat
== NULL
)
2213 if ((ifp
->if_refflags
& (IFRF_ATTACHED
| IFRF_DETACHING
)) !=
2217 /* Limit reporting to Wifi, Ethernet and cellular. */
2218 if (!(IFNET_IS_ETHERNET(ifp
) || IFNET_IS_CELLULAR(ifp
)))
2221 bzero(st
, sizeof(*st
));
2222 if (IFNET_IS_CELLULAR(ifp
)) {
2223 st
->ifnet_type
= NSTAT_IFNET_ECN_TYPE_CELLULAR
;
2224 } else if (IFNET_IS_WIFI(ifp
)) {
2225 st
->ifnet_type
= NSTAT_IFNET_ECN_TYPE_WIFI
;
2227 st
->ifnet_type
= NSTAT_IFNET_ECN_TYPE_ETHERNET
;
2230 /* skip if there was no update since last report */
2231 if (ifp
->if_ipv4_stat
->timestamp
<= 0 ||
2232 ifp
->if_ipv4_stat
->timestamp
< last_report_time
)
2234 st
->ifnet_proto
= NSTAT_IFNET_ECN_PROTO_IPV4
;
2235 /* compute percentages using packet counts */
2236 nstat_ifnet_compute_percentages(&ifp
->if_ipv4_stat
->ecn_on
);
2237 nstat_ifnet_compute_percentages(&ifp
->if_ipv4_stat
->ecn_off
);
2238 nstat_ifnet_normalize_counter(ifp
->if_ipv4_stat
);
2240 bcopy(ifp
->if_ipv4_stat
, &st
->ecn_stat
,
2241 sizeof(st
->ecn_stat
));
2242 nstat_sysinfo_send_data(&data
);
2243 bzero(ifp
->if_ipv4_stat
, sizeof(*ifp
->if_ipv4_stat
));
2246 /* skip if there was no update since last report */
2247 if (ifp
->if_ipv6_stat
->timestamp
<= 0 ||
2248 ifp
->if_ipv6_stat
->timestamp
< last_report_time
)
2250 st
->ifnet_proto
= NSTAT_IFNET_ECN_PROTO_IPV6
;
2252 /* compute percentages using packet counts */
2253 nstat_ifnet_compute_percentages(&ifp
->if_ipv6_stat
->ecn_on
);
2254 nstat_ifnet_compute_percentages(&ifp
->if_ipv6_stat
->ecn_off
);
2255 nstat_ifnet_normalize_counter(ifp
->if_ipv6_stat
);
2257 bcopy(ifp
->if_ipv6_stat
, &st
->ecn_stat
,
2258 sizeof(st
->ecn_stat
));
2259 nstat_sysinfo_send_data(&data
);
2261 /* Zero the stats in ifp */
2262 bzero(ifp
->if_ipv6_stat
, sizeof(*ifp
->if_ipv6_stat
));
2269 nstat_ifnet_copy_descriptor(
2270 nstat_provider_cookie_t cookie
,
2274 nstat_ifnet_descriptor
*desc
= (nstat_ifnet_descriptor
*)data
;
2275 struct nstat_ifnet_cookie
*ifcookie
=
2276 (struct nstat_ifnet_cookie
*)cookie
;
2277 struct ifnet
*ifp
= ifcookie
->ifp
;
2279 if (len
< sizeof(nstat_ifnet_descriptor
))
2282 if (nstat_ifnet_gone(cookie
))
2285 bzero(desc
, sizeof(*desc
));
2286 ifnet_lock_shared(ifp
);
2287 strlcpy(desc
->name
, ifp
->if_xname
, sizeof(desc
->name
));
2288 desc
->ifindex
= ifp
->if_index
;
2289 desc
->threshold
= ifp
->if_data_threshold
;
2290 desc
->type
= ifp
->if_type
;
2291 if (ifp
->if_desc
.ifd_len
< sizeof(desc
->description
))
2292 memcpy(desc
->description
, ifp
->if_desc
.ifd_desc
,
2293 sizeof(desc
->description
));
2294 nstat_ifnet_copy_link_status(ifp
, desc
);
2295 ifnet_lock_done(ifp
);
2300 nstat_init_ifnet_provider(void)
2302 bzero(&nstat_ifnet_provider
, sizeof(nstat_ifnet_provider
));
2303 nstat_ifnet_provider
.nstat_provider_id
= NSTAT_PROVIDER_IFNET
;
2304 nstat_ifnet_provider
.nstat_descriptor_length
= sizeof(nstat_ifnet_descriptor
);
2305 nstat_ifnet_provider
.nstat_lookup
= nstat_ifnet_lookup
;
2306 nstat_ifnet_provider
.nstat_gone
= nstat_ifnet_gone
;
2307 nstat_ifnet_provider
.nstat_counts
= nstat_ifnet_counts
;
2308 nstat_ifnet_provider
.nstat_watcher_add
= NULL
;
2309 nstat_ifnet_provider
.nstat_watcher_remove
= NULL
;
2310 nstat_ifnet_provider
.nstat_copy_descriptor
= nstat_ifnet_copy_descriptor
;
2311 nstat_ifnet_provider
.nstat_release
= nstat_ifnet_release
;
2312 nstat_ifnet_provider
.next
= nstat_providers
;
2313 nstat_providers
= &nstat_ifnet_provider
;
2316 __private_extern__
void
2317 nstat_ifnet_threshold_reached(unsigned int ifindex
)
2319 nstat_control_state
*state
;
2322 struct nstat_ifnet_cookie
*ifcookie
;
2324 lck_mtx_lock(&nstat_mtx
);
2325 for (state
= nstat_controls
; state
; state
= state
->ncs_next
)
2327 lck_mtx_lock(&state
->mtx
);
2328 for (src
= state
->ncs_srcs
; src
; src
= src
->next
)
2330 if (src
->provider
!= &nstat_ifnet_provider
)
2332 ifcookie
= (struct nstat_ifnet_cookie
*)src
->cookie
;
2333 ifp
= ifcookie
->ifp
;
2334 if (ifp
->if_index
!= ifindex
)
2336 nstat_control_send_counts(state
, src
, 0, 0, NULL
);
2338 lck_mtx_unlock(&state
->mtx
);
2340 lck_mtx_unlock(&nstat_mtx
);
2343 #pragma mark -- Sysinfo --
2345 nstat_set_keyval_scalar(nstat_sysinfo_keyval
*kv
, int key
, u_int32_t val
)
2347 kv
->nstat_sysinfo_key
= key
;
2348 kv
->nstat_sysinfo_flags
= NSTAT_SYSINFO_FLAG_SCALAR
;
2349 kv
->u
.nstat_sysinfo_scalar
= val
;
2353 nstat_sysinfo_send_data_internal(
2354 nstat_control_state
*control
,
2355 nstat_sysinfo_data
*data
)
2357 nstat_msg_sysinfo_counts
*syscnt
= NULL
;
2358 size_t allocsize
= 0, countsize
= 0, nkeyvals
= 0, finalsize
= 0;
2359 nstat_sysinfo_keyval
*kv
;
2363 allocsize
= offsetof(nstat_msg_sysinfo_counts
, counts
);
2364 countsize
= offsetof(nstat_sysinfo_counts
, nstat_sysinfo_keyvals
);
2365 finalsize
= allocsize
;
2367 /* get number of key-vals for each kind of stat */
2368 switch (data
->flags
)
2370 case NSTAT_SYSINFO_MBUF_STATS
:
2371 nkeyvals
= sizeof(struct nstat_sysinfo_mbuf_stats
) /
2374 case NSTAT_SYSINFO_TCP_STATS
:
2375 nkeyvals
= sizeof(struct nstat_sysinfo_tcp_stats
) /
2378 case NSTAT_SYSINFO_IFNET_ECN_STATS
:
2379 nkeyvals
= (sizeof(struct if_tcp_ecn_stat
) /
2382 /* Two more keys for ifnet type and proto */
2388 countsize
+= sizeof(nstat_sysinfo_keyval
) * nkeyvals
;
2389 allocsize
+= countsize
;
2391 syscnt
= OSMalloc(allocsize
, nstat_malloc_tag
);
2394 bzero(syscnt
, allocsize
);
2396 kv
= (nstat_sysinfo_keyval
*) &syscnt
->counts
.nstat_sysinfo_keyvals
;
2397 switch (data
->flags
)
2399 case NSTAT_SYSINFO_MBUF_STATS
:
2401 nstat_set_keyval_scalar(&kv
[i
++],
2402 NSTAT_SYSINFO_KEY_MBUF_256B_TOTAL
,
2403 data
->u
.mb_stats
.total_256b
);
2404 nstat_set_keyval_scalar(&kv
[i
++],
2405 NSTAT_SYSINFO_KEY_MBUF_2KB_TOTAL
,
2406 data
->u
.mb_stats
.total_2kb
);
2407 nstat_set_keyval_scalar(&kv
[i
++],
2408 NSTAT_SYSINFO_KEY_MBUF_4KB_TOTAL
,
2409 data
->u
.mb_stats
.total_4kb
);
2410 nstat_set_keyval_scalar(&kv
[i
++],
2411 NSTAT_SYSINFO_MBUF_16KB_TOTAL
,
2412 data
->u
.mb_stats
.total_16kb
);
2413 nstat_set_keyval_scalar(&kv
[i
++],
2414 NSTAT_SYSINFO_KEY_SOCK_MBCNT
,
2415 data
->u
.mb_stats
.sbmb_total
);
2416 nstat_set_keyval_scalar(&kv
[i
++],
2417 NSTAT_SYSINFO_KEY_SOCK_ATMBLIMIT
,
2418 data
->u
.mb_stats
.sb_atmbuflimit
);
2419 nstat_set_keyval_scalar(&kv
[i
++],
2420 NSTAT_SYSINFO_MBUF_DRAIN_CNT
,
2421 data
->u
.mb_stats
.draincnt
);
2422 nstat_set_keyval_scalar(&kv
[i
++],
2423 NSTAT_SYSINFO_MBUF_MEM_RELEASED
,
2424 data
->u
.mb_stats
.memreleased
);
2425 VERIFY(i
== nkeyvals
);
2428 case NSTAT_SYSINFO_TCP_STATS
:
2430 nstat_set_keyval_scalar(&kv
[i
++],
2431 NSTAT_SYSINFO_KEY_IPV4_AVGRTT
,
2432 data
->u
.tcp_stats
.ipv4_avgrtt
);
2433 nstat_set_keyval_scalar(&kv
[i
++],
2434 NSTAT_SYSINFO_KEY_IPV6_AVGRTT
,
2435 data
->u
.tcp_stats
.ipv6_avgrtt
);
2436 nstat_set_keyval_scalar(&kv
[i
++],
2437 NSTAT_SYSINFO_KEY_SEND_PLR
,
2438 data
->u
.tcp_stats
.send_plr
);
2439 nstat_set_keyval_scalar(&kv
[i
++],
2440 NSTAT_SYSINFO_KEY_RECV_PLR
,
2441 data
->u
.tcp_stats
.recv_plr
);
2442 nstat_set_keyval_scalar(&kv
[i
++],
2443 NSTAT_SYSINFO_KEY_SEND_TLRTO
,
2444 data
->u
.tcp_stats
.send_tlrto_rate
);
2445 nstat_set_keyval_scalar(&kv
[i
++],
2446 NSTAT_SYSINFO_KEY_SEND_REORDERRATE
,
2447 data
->u
.tcp_stats
.send_reorder_rate
);
2448 nstat_set_keyval_scalar(&kv
[i
++],
2449 NSTAT_SYSINFO_CONNECTION_ATTEMPTS
,
2450 data
->u
.tcp_stats
.connection_attempts
);
2451 nstat_set_keyval_scalar(&kv
[i
++],
2452 NSTAT_SYSINFO_CONNECTION_ACCEPTS
,
2453 data
->u
.tcp_stats
.connection_accepts
);
2454 nstat_set_keyval_scalar(&kv
[i
++],
2455 NSTAT_SYSINFO_ECN_CLIENT_ENABLED
,
2456 data
->u
.tcp_stats
.ecn_client_enabled
);
2457 nstat_set_keyval_scalar(&kv
[i
++],
2458 NSTAT_SYSINFO_ECN_SERVER_ENABLED
,
2459 data
->u
.tcp_stats
.ecn_server_enabled
);
2460 nstat_set_keyval_scalar(&kv
[i
++],
2461 NSTAT_SYSINFO_ECN_CLIENT_SETUP
,
2462 data
->u
.tcp_stats
.ecn_client_setup
);
2463 nstat_set_keyval_scalar(&kv
[i
++],
2464 NSTAT_SYSINFO_ECN_SERVER_SETUP
,
2465 data
->u
.tcp_stats
.ecn_server_setup
);
2466 nstat_set_keyval_scalar(&kv
[i
++],
2467 NSTAT_SYSINFO_ECN_CLIENT_SUCCESS
,
2468 data
->u
.tcp_stats
.ecn_client_success
);
2469 nstat_set_keyval_scalar(&kv
[i
++],
2470 NSTAT_SYSINFO_ECN_SERVER_SUCCESS
,
2471 data
->u
.tcp_stats
.ecn_server_success
);
2472 nstat_set_keyval_scalar(&kv
[i
++],
2473 NSTAT_SYSINFO_ECN_NOT_SUPPORTED
,
2474 data
->u
.tcp_stats
.ecn_not_supported
);
2475 nstat_set_keyval_scalar(&kv
[i
++],
2476 NSTAT_SYSINFO_ECN_LOST_SYN
,
2477 data
->u
.tcp_stats
.ecn_lost_syn
);
2478 nstat_set_keyval_scalar(&kv
[i
++],
2479 NSTAT_SYSINFO_ECN_LOST_SYNACK
,
2480 data
->u
.tcp_stats
.ecn_lost_synack
);
2481 nstat_set_keyval_scalar(&kv
[i
++],
2482 NSTAT_SYSINFO_ECN_RECV_CE
,
2483 data
->u
.tcp_stats
.ecn_recv_ce
);
2484 nstat_set_keyval_scalar(&kv
[i
++],
2485 NSTAT_SYSINFO_ECN_RECV_ECE
,
2486 data
->u
.tcp_stats
.ecn_recv_ece
);
2487 nstat_set_keyval_scalar(&kv
[i
++],
2488 NSTAT_SYSINFO_ECN_SENT_ECE
,
2489 data
->u
.tcp_stats
.ecn_sent_ece
);
2490 nstat_set_keyval_scalar(&kv
[i
++],
2491 NSTAT_SYSINFO_ECN_CONN_RECV_CE
,
2492 data
->u
.tcp_stats
.ecn_conn_recv_ce
);
2493 nstat_set_keyval_scalar(&kv
[i
++],
2494 NSTAT_SYSINFO_ECN_CONN_RECV_ECE
,
2495 data
->u
.tcp_stats
.ecn_conn_recv_ece
);
2496 nstat_set_keyval_scalar(&kv
[i
++],
2497 NSTAT_SYSINFO_ECN_CONN_PLNOCE
,
2498 data
->u
.tcp_stats
.ecn_conn_plnoce
);
2499 nstat_set_keyval_scalar(&kv
[i
++],
2500 NSTAT_SYSINFO_ECN_CONN_PL_CE
,
2501 data
->u
.tcp_stats
.ecn_conn_pl_ce
);
2502 nstat_set_keyval_scalar(&kv
[i
++],
2503 NSTAT_SYSINFO_ECN_CONN_NOPL_CE
,
2504 data
->u
.tcp_stats
.ecn_conn_nopl_ce
);
2505 nstat_set_keyval_scalar(&kv
[i
++],
2506 NSTAT_SYSINFO_ECN_FALLBACK_SYNLOSS
,
2507 data
->u
.tcp_stats
.ecn_fallback_synloss
);
2508 nstat_set_keyval_scalar(&kv
[i
++],
2509 NSTAT_SYSINFO_ECN_FALLBACK_REORDER
,
2510 data
->u
.tcp_stats
.ecn_fallback_reorder
);
2511 nstat_set_keyval_scalar(&kv
[i
++],
2512 NSTAT_SYSINFO_ECN_FALLBACK_CE
,
2513 data
->u
.tcp_stats
.ecn_fallback_ce
);
2514 nstat_set_keyval_scalar(&kv
[i
++],
2515 NSTAT_SYSINFO_TFO_SYN_DATA_RCV
,
2516 data
->u
.tcp_stats
.tfo_syn_data_rcv
);
2517 nstat_set_keyval_scalar(&kv
[i
++],
2518 NSTAT_SYSINFO_TFO_COOKIE_REQ_RCV
,
2519 data
->u
.tcp_stats
.tfo_cookie_req_rcv
);
2520 nstat_set_keyval_scalar(&kv
[i
++],
2521 NSTAT_SYSINFO_TFO_COOKIE_SENT
,
2522 data
->u
.tcp_stats
.tfo_cookie_sent
);
2523 nstat_set_keyval_scalar(&kv
[i
++],
2524 NSTAT_SYSINFO_TFO_COOKIE_INVALID
,
2525 data
->u
.tcp_stats
.tfo_cookie_invalid
);
2526 nstat_set_keyval_scalar(&kv
[i
++],
2527 NSTAT_SYSINFO_TFO_COOKIE_REQ
,
2528 data
->u
.tcp_stats
.tfo_cookie_req
);
2529 nstat_set_keyval_scalar(&kv
[i
++],
2530 NSTAT_SYSINFO_TFO_COOKIE_RCV
,
2531 data
->u
.tcp_stats
.tfo_cookie_rcv
);
2532 nstat_set_keyval_scalar(&kv
[i
++],
2533 NSTAT_SYSINFO_TFO_SYN_DATA_SENT
,
2534 data
->u
.tcp_stats
.tfo_syn_data_sent
);
2535 nstat_set_keyval_scalar(&kv
[i
++],
2536 NSTAT_SYSINFO_TFO_SYN_DATA_ACKED
,
2537 data
->u
.tcp_stats
.tfo_syn_data_acked
);
2538 nstat_set_keyval_scalar(&kv
[i
++],
2539 NSTAT_SYSINFO_TFO_SYN_LOSS
,
2540 data
->u
.tcp_stats
.tfo_syn_loss
);
2541 nstat_set_keyval_scalar(&kv
[i
++],
2542 NSTAT_SYSINFO_TFO_BLACKHOLE
,
2543 data
->u
.tcp_stats
.tfo_blackhole
);
2544 VERIFY(i
== nkeyvals
);
2547 case NSTAT_SYSINFO_IFNET_ECN_STATS
:
2549 nstat_set_keyval_scalar(&kv
[i
++],
2550 NSTAT_SYSINFO_ECN_IFNET_TYPE
,
2551 data
->u
.ifnet_ecn_stats
.ifnet_type
);
2552 nstat_set_keyval_scalar(&kv
[i
++],
2553 NSTAT_SYSINFO_ECN_IFNET_PROTO
,
2554 data
->u
.ifnet_ecn_stats
.ifnet_proto
);
2555 nstat_set_keyval_scalar(&kv
[i
++],
2556 NSTAT_SYSINFO_ECN_IFNET_CLIENT_SETUP
,
2557 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_client_setup
);
2558 nstat_set_keyval_scalar(&kv
[i
++],
2559 NSTAT_SYSINFO_ECN_IFNET_SERVER_SETUP
,
2560 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_server_setup
);
2561 nstat_set_keyval_scalar(&kv
[i
++],
2562 NSTAT_SYSINFO_ECN_IFNET_CLIENT_SUCCESS
,
2563 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_client_success
);
2564 nstat_set_keyval_scalar(&kv
[i
++],
2565 NSTAT_SYSINFO_ECN_IFNET_SERVER_SUCCESS
,
2566 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_server_success
);
2567 nstat_set_keyval_scalar(&kv
[i
++],
2568 NSTAT_SYSINFO_ECN_IFNET_PEER_NOSUPPORT
,
2569 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_peer_nosupport
);
2570 nstat_set_keyval_scalar(&kv
[i
++],
2571 NSTAT_SYSINFO_ECN_IFNET_SYN_LOST
,
2572 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_syn_lost
);
2573 nstat_set_keyval_scalar(&kv
[i
++],
2574 NSTAT_SYSINFO_ECN_IFNET_SYNACK_LOST
,
2575 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_synack_lost
);
2576 nstat_set_keyval_scalar(&kv
[i
++],
2577 NSTAT_SYSINFO_ECN_IFNET_RECV_CE
,
2578 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_recv_ce
);
2579 nstat_set_keyval_scalar(&kv
[i
++],
2580 NSTAT_SYSINFO_ECN_IFNET_RECV_ECE
,
2581 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_recv_ece
);
2582 nstat_set_keyval_scalar(&kv
[i
++],
2583 NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_CE
,
2584 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_conn_recv_ce
);
2585 nstat_set_keyval_scalar(&kv
[i
++],
2586 NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_ECE
,
2587 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_conn_recv_ece
);
2588 nstat_set_keyval_scalar(&kv
[i
++],
2589 NSTAT_SYSINFO_ECN_IFNET_CONN_PLNOCE
,
2590 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_conn_plnoce
);
2591 nstat_set_keyval_scalar(&kv
[i
++],
2592 NSTAT_SYSINFO_ECN_IFNET_CONN_PLCE
,
2593 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_conn_plce
);
2594 nstat_set_keyval_scalar(&kv
[i
++],
2595 NSTAT_SYSINFO_ECN_IFNET_CONN_NOPLCE
,
2596 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_conn_noplce
);
2597 nstat_set_keyval_scalar(&kv
[i
++],
2598 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNLOSS
,
2599 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_fallback_synloss
);
2600 nstat_set_keyval_scalar(&kv
[i
++],
2601 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_REORDER
,
2602 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_fallback_reorder
);
2603 nstat_set_keyval_scalar(&kv
[i
++],
2604 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_CE
,
2605 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_fallback_ce
);
2606 nstat_set_keyval_scalar(&kv
[i
++],
2607 NSTAT_SYSINFO_ECN_IFNET_ON_RTT_AVG
,
2608 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.rtt_avg
);
2609 nstat_set_keyval_scalar(&kv
[i
++],
2610 NSTAT_SYSINFO_ECN_IFNET_ON_RTT_VAR
,
2611 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.rtt_var
);
2612 nstat_set_keyval_scalar(&kv
[i
++],
2613 NSTAT_SYSINFO_ECN_IFNET_ON_OOPERCENT
,
2614 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.oo_percent
);
2615 nstat_set_keyval_scalar(&kv
[i
++],
2616 NSTAT_SYSINFO_ECN_IFNET_ON_SACK_EPISODE
,
2617 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.sack_episodes
);
2618 nstat_set_keyval_scalar(&kv
[i
++],
2619 NSTAT_SYSINFO_ECN_IFNET_ON_REORDER_PERCENT
,
2620 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.reorder_percent
);
2621 nstat_set_keyval_scalar(&kv
[i
++],
2622 NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_PERCENT
,
2623 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.rxmit_percent
);
2624 nstat_set_keyval_scalar(&kv
[i
++],
2625 NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_DROP
,
2626 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.rxmit_drop
);
2627 nstat_set_keyval_scalar(&kv
[i
++],
2628 NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_AVG
,
2629 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.rtt_avg
);
2630 nstat_set_keyval_scalar(&kv
[i
++],
2631 NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_VAR
,
2632 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.rtt_var
);
2633 nstat_set_keyval_scalar(&kv
[i
++],
2634 NSTAT_SYSINFO_ECN_IFNET_OFF_OOPERCENT
,
2635 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.oo_percent
);
2636 nstat_set_keyval_scalar(&kv
[i
++],
2637 NSTAT_SYSINFO_ECN_IFNET_OFF_SACK_EPISODE
,
2638 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.sack_episodes
);
2639 nstat_set_keyval_scalar(&kv
[i
++],
2640 NSTAT_SYSINFO_ECN_IFNET_OFF_REORDER_PERCENT
,
2641 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.reorder_percent
);
2642 nstat_set_keyval_scalar(&kv
[i
++],
2643 NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_PERCENT
,
2644 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.rxmit_percent
);
2645 nstat_set_keyval_scalar(&kv
[i
++],
2646 NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_DROP
,
2647 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.rxmit_drop
);
2648 nstat_set_keyval_scalar(&kv
[i
++],
2649 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_TXPKTS
,
2650 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.total_txpkts
);
2651 nstat_set_keyval_scalar(&kv
[i
++],
2652 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXMTPKTS
,
2653 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.total_rxmitpkts
);
2654 nstat_set_keyval_scalar(&kv
[i
++],
2655 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXPKTS
,
2656 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.total_rxpkts
);
2657 nstat_set_keyval_scalar(&kv
[i
++],
2658 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_OOPKTS
,
2659 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.total_oopkts
);
2660 nstat_set_keyval_scalar(&kv
[i
++],
2661 NSTAT_SYSINFO_ECN_IFNET_ON_DROP_RST
,
2662 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_on
.rst_drop
);
2663 nstat_set_keyval_scalar(&kv
[i
++],
2664 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_TXPKTS
,
2665 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.total_txpkts
);
2666 nstat_set_keyval_scalar(&kv
[i
++],
2667 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXMTPKTS
,
2668 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.total_rxmitpkts
);
2669 nstat_set_keyval_scalar(&kv
[i
++],
2670 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXPKTS
,
2671 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.total_rxpkts
);
2672 nstat_set_keyval_scalar(&kv
[i
++],
2673 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_OOPKTS
,
2674 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.total_oopkts
);
2675 nstat_set_keyval_scalar(&kv
[i
++],
2676 NSTAT_SYSINFO_ECN_IFNET_OFF_DROP_RST
,
2677 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_off
.rst_drop
);
2678 nstat_set_keyval_scalar(&kv
[i
++],
2679 NSTAT_SYSINFO_ECN_IFNET_TOTAL_CONN
,
2680 data
->u
.ifnet_ecn_stats
.ecn_stat
.ecn_total_conn
);
2686 VERIFY(i
> 0 && i
<= nkeyvals
);
2687 countsize
= offsetof(nstat_sysinfo_counts
,
2688 nstat_sysinfo_keyvals
) +
2689 sizeof(nstat_sysinfo_keyval
) * i
;
2690 finalsize
+= countsize
;
2691 syscnt
->hdr
.type
= NSTAT_MSG_TYPE_SYSINFO_COUNTS
;
2692 syscnt
->hdr
.length
= finalsize
;
2693 syscnt
->counts
.nstat_sysinfo_len
= countsize
;
2695 result
= ctl_enqueuedata(control
->ncs_kctl
,
2696 control
->ncs_unit
, syscnt
, finalsize
, CTL_DATA_EOR
);
2699 nstat_stats
.nstat_sysinfofailures
+= 1;
2701 OSFree(syscnt
, allocsize
, nstat_malloc_tag
);
2706 __private_extern__
void
2707 nstat_sysinfo_send_data(
2708 nstat_sysinfo_data
*data
)
2710 nstat_control_state
*control
;
2712 lck_mtx_lock(&nstat_mtx
);
2713 for (control
= nstat_controls
; control
; control
= control
->ncs_next
)
2715 lck_mtx_lock(&control
->mtx
);
2716 if ((control
->ncs_flags
& NSTAT_FLAG_SYSINFO_SUBSCRIBED
) != 0)
2718 nstat_sysinfo_send_data_internal(control
, data
);
2720 lck_mtx_unlock(&control
->mtx
);
2722 lck_mtx_unlock(&nstat_mtx
);
2726 nstat_sysinfo_generate_report(void)
2728 mbuf_report_peak_usage();
2730 nstat_ifnet_report_ecn_stats();
2733 #pragma mark -- Kernel Control Socket --
2735 static kern_ctl_ref nstat_ctlref
= NULL
;
2736 static lck_grp_t
*nstat_lck_grp
= NULL
;
2738 static errno_t
nstat_control_connect(kern_ctl_ref kctl
, struct sockaddr_ctl
*sac
, void **uinfo
);
2739 static errno_t
nstat_control_disconnect(kern_ctl_ref kctl
, u_int32_t unit
, void *uinfo
);
2740 static errno_t
nstat_control_send(kern_ctl_ref kctl
, u_int32_t unit
, void *uinfo
, mbuf_t m
, int flags
);
2743 nstat_enqueue_success(
2745 nstat_control_state
*state
,
2748 nstat_msg_hdr success
;
2751 bzero(&success
, sizeof(success
));
2752 success
.context
= context
;
2753 success
.type
= NSTAT_MSG_TYPE_SUCCESS
;
2754 success
.length
= sizeof(success
);
2755 success
.flags
= flags
;
2756 result
= ctl_enqueuedata(state
->ncs_kctl
, state
->ncs_unit
, &success
,
2757 sizeof(success
), CTL_DATA_EOR
| CTL_DATA_CRIT
);
2759 if (nstat_debug
!= 0)
2760 printf("%s: could not enqueue success message %d\n",
2762 nstat_stats
.nstat_successmsgfailures
+= 1;
2768 nstat_control_send_goodbye(
2769 nstat_control_state
*state
,
2775 if (nstat_control_reporting_allowed(state
, src
))
2777 if ((state
->ncs_flags
& NSTAT_FLAG_SUPPORTS_UPDATES
) != 0)
2779 result
= nstat_control_send_update(state
, src
, 0, NSTAT_MSG_HDR_FLAG_CLOSING
, NULL
);
2783 if (nstat_debug
!= 0)
2784 printf("%s - nstat_control_send_update() %d\n", __func__
, result
);
2789 // send one last counts notification
2790 result
= nstat_control_send_counts(state
, src
, 0, NSTAT_MSG_HDR_FLAG_CLOSING
, NULL
);
2794 if (nstat_debug
!= 0)
2795 printf("%s - nstat_control_send_counts() %d\n", __func__
, result
);
2798 // send a last description
2799 result
= nstat_control_send_description(state
, src
, 0, NSTAT_MSG_HDR_FLAG_CLOSING
);
2803 if (nstat_debug
!= 0)
2804 printf("%s - nstat_control_send_description() %d\n", __func__
, result
);
2809 // send the source removed notification
2810 result
= nstat_control_send_removed(state
, src
);
2811 if (result
!= 0 && nstat_debug
)
2814 if (nstat_debug
!= 0)
2815 printf("%s - nstat_control_send_removed() %d\n", __func__
, result
);
2819 nstat_stats
.nstat_control_send_goodbye_failures
++;
2826 nstat_flush_accumulated_msgs(
2827 nstat_control_state
*state
)
2830 if (state
->ncs_accumulated
!= NULL
&& mbuf_len(state
->ncs_accumulated
) > 0)
2832 mbuf_pkthdr_setlen(state
->ncs_accumulated
, mbuf_len(state
->ncs_accumulated
));
2833 result
= ctl_enqueuembuf(state
->ncs_kctl
, state
->ncs_unit
, state
->ncs_accumulated
, CTL_DATA_EOR
);
2836 nstat_stats
.nstat_flush_accumulated_msgs_failures
++;
2837 if (nstat_debug
!= 0)
2838 printf("%s - ctl_enqueuembuf failed: %d\n", __func__
, result
);
2839 mbuf_freem(state
->ncs_accumulated
);
2841 state
->ncs_accumulated
= NULL
;
2847 nstat_accumulate_msg(
2848 nstat_control_state
*state
,
2852 if (state
->ncs_accumulated
&& mbuf_trailingspace(state
->ncs_accumulated
) < length
)
2854 // Will send the current mbuf
2855 nstat_flush_accumulated_msgs(state
);
2860 if (state
->ncs_accumulated
== NULL
)
2862 unsigned int one
= 1;
2863 if (mbuf_allocpacket(MBUF_DONTWAIT
, NSTAT_MAX_MSG_SIZE
, &one
, &state
->ncs_accumulated
) != 0)
2865 if (nstat_debug
!= 0)
2866 printf("%s - mbuf_allocpacket failed\n", __func__
);
2871 mbuf_setlen(state
->ncs_accumulated
, 0);
2877 hdr
->length
= length
;
2878 result
= mbuf_copyback(state
->ncs_accumulated
, mbuf_len(state
->ncs_accumulated
),
2879 length
, hdr
, MBUF_DONTWAIT
);
2884 nstat_flush_accumulated_msgs(state
);
2885 if (nstat_debug
!= 0)
2886 printf("%s - resorting to ctl_enqueuedata\n", __func__
);
2887 result
= ctl_enqueuedata(state
->ncs_kctl
, state
->ncs_unit
, hdr
, length
, CTL_DATA_EOR
);
2891 nstat_stats
.nstat_accumulate_msg_failures
++;
2898 __unused thread_call_param_t p0
,
2899 __unused thread_call_param_t p1
)
2901 lck_mtx_lock(&nstat_mtx
);
2903 nstat_idle_time
= 0;
2905 nstat_control_state
*control
;
2906 nstat_src
*dead
= NULL
;
2907 nstat_src
*dead_list
= NULL
;
2908 for (control
= nstat_controls
; control
; control
= control
->ncs_next
)
2910 lck_mtx_lock(&control
->mtx
);
2911 nstat_src
**srcpp
= &control
->ncs_srcs
;
2913 if (!(control
->ncs_flags
& NSTAT_FLAG_REQCOUNTS
))
2915 while(*srcpp
!= NULL
)
2917 if ((*srcpp
)->provider
->nstat_gone((*srcpp
)->cookie
))
2921 // Pull it off the list
2923 *srcpp
= (*srcpp
)->next
;
2925 result
= nstat_control_send_goodbye(control
, dead
);
2927 // Put this on the list to release later
2928 dead
->next
= dead_list
;
2933 srcpp
= &(*srcpp
)->next
;
2937 control
->ncs_flags
&= ~NSTAT_FLAG_REQCOUNTS
;
2938 lck_mtx_unlock(&control
->mtx
);
2943 clock_interval_to_deadline(60, NSEC_PER_SEC
, &nstat_idle_time
);
2944 thread_call_func_delayed((thread_call_func_t
)nstat_idle_check
, NULL
, nstat_idle_time
);
2947 lck_mtx_unlock(&nstat_mtx
);
2949 /* Generate any system level reports, if needed */
2950 nstat_sysinfo_generate_report();
2952 // Release the sources now that we aren't holding lots of locks
2956 dead_list
= dead
->next
;
2958 nstat_control_cleanup_source(NULL
, dead
, FALSE
);
2965 nstat_control_register(void)
2967 // Create our lock group first
2968 lck_grp_attr_t
*grp_attr
= lck_grp_attr_alloc_init();
2969 lck_grp_attr_setdefault(grp_attr
);
2970 nstat_lck_grp
= lck_grp_alloc_init("network statistics kctl", grp_attr
);
2971 lck_grp_attr_free(grp_attr
);
2973 lck_mtx_init(&nstat_mtx
, nstat_lck_grp
, NULL
);
2975 // Register the control
2976 struct kern_ctl_reg nstat_control
;
2977 bzero(&nstat_control
, sizeof(nstat_control
));
2978 strlcpy(nstat_control
.ctl_name
, NET_STAT_CONTROL_NAME
, sizeof(nstat_control
.ctl_name
));
2979 nstat_control
.ctl_flags
= CTL_FLAG_REG_EXTENDED
| CTL_FLAG_REG_CRIT
;
2980 nstat_control
.ctl_sendsize
= nstat_sendspace
;
2981 nstat_control
.ctl_recvsize
= nstat_recvspace
;
2982 nstat_control
.ctl_connect
= nstat_control_connect
;
2983 nstat_control
.ctl_disconnect
= nstat_control_disconnect
;
2984 nstat_control
.ctl_send
= nstat_control_send
;
2986 ctl_register(&nstat_control
, &nstat_ctlref
);
2990 nstat_control_cleanup_source(
2991 nstat_control_state
*state
,
2992 struct nstat_src
*src
,
2999 result
= nstat_control_send_removed(state
, src
);
3002 nstat_stats
.nstat_control_cleanup_source_failures
++;
3003 if (nstat_debug
!= 0)
3004 printf("%s - nstat_control_send_removed() %d\n",
3008 // Cleanup the source if we found it.
3009 src
->provider
->nstat_release(src
->cookie
, locked
);
3010 OSFree(src
, sizeof(*src
), nstat_malloc_tag
);
3015 nstat_control_reporting_allowed(
3016 nstat_control_state
*state
,
3019 if (src
->provider
->nstat_reporting_allowed
== NULL
)
3023 src
->provider
->nstat_reporting_allowed( src
->cookie
,
3024 state
->ncs_provider_filters
[src
->provider
->nstat_provider_id
])
3030 nstat_control_connect(
3032 struct sockaddr_ctl
*sac
,
3035 nstat_control_state
*state
= OSMalloc(sizeof(*state
), nstat_malloc_tag
);
3036 if (state
== NULL
) return ENOMEM
;
3038 bzero(state
, sizeof(*state
));
3039 lck_mtx_init(&state
->mtx
, nstat_lck_grp
, NULL
);
3040 state
->ncs_kctl
= kctl
;
3041 state
->ncs_unit
= sac
->sc_unit
;
3042 state
->ncs_flags
= NSTAT_FLAG_REQCOUNTS
;
3045 lck_mtx_lock(&nstat_mtx
);
3046 state
->ncs_next
= nstat_controls
;
3047 nstat_controls
= state
;
3049 if (nstat_idle_time
== 0)
3051 clock_interval_to_deadline(60, NSEC_PER_SEC
, &nstat_idle_time
);
3052 thread_call_func_delayed((thread_call_func_t
)nstat_idle_check
, NULL
, nstat_idle_time
);
3055 lck_mtx_unlock(&nstat_mtx
);
3061 nstat_control_disconnect(
3062 __unused kern_ctl_ref kctl
,
3063 __unused u_int32_t unit
,
3067 nstat_control_state
*state
= (nstat_control_state
*)uinfo
;
3069 // pull it out of the global list of states
3070 lck_mtx_lock(&nstat_mtx
);
3071 nstat_control_state
**statepp
;
3072 for (statepp
= &nstat_controls
; *statepp
; statepp
= &(*statepp
)->ncs_next
)
3074 if (*statepp
== state
)
3076 *statepp
= state
->ncs_next
;
3080 lck_mtx_unlock(&nstat_mtx
);
3082 lck_mtx_lock(&state
->mtx
);
3083 // Stop watching for sources
3084 nstat_provider
*provider
;
3085 watching
= state
->ncs_watching
;
3086 state
->ncs_watching
= 0;
3087 for (provider
= nstat_providers
; provider
&& watching
; provider
= provider
->next
)
3089 if ((watching
& (1 << provider
->nstat_provider_id
)) != 0)
3091 watching
&= ~(1 << provider
->nstat_provider_id
);
3092 provider
->nstat_watcher_remove(state
);
3096 // set cleanup flags
3097 state
->ncs_flags
|= NSTAT_FLAG_CLEANUP
;
3099 if (state
->ncs_accumulated
)
3101 mbuf_freem(state
->ncs_accumulated
);
3102 state
->ncs_accumulated
= NULL
;
3105 // Copy out the list of sources
3106 nstat_src
*srcs
= state
->ncs_srcs
;
3107 state
->ncs_srcs
= NULL
;
3108 lck_mtx_unlock(&state
->mtx
);
3114 // pull it out of the list
3119 nstat_control_cleanup_source(NULL
, src
, FALSE
);
3121 lck_mtx_destroy(&state
->mtx
, nstat_lck_grp
);
3122 OSFree(state
, sizeof(*state
), nstat_malloc_tag
);
3127 static nstat_src_ref_t
3128 nstat_control_next_src_ref(
3129 nstat_control_state
*state
)
3132 nstat_src_ref_t toReturn
= NSTAT_SRC_REF_INVALID
;
3134 for (i
= 0; i
< 1000 && toReturn
== NSTAT_SRC_REF_INVALID
; i
++)
3136 if (state
->ncs_next_srcref
== NSTAT_SRC_REF_INVALID
||
3137 state
->ncs_next_srcref
== NSTAT_SRC_REF_ALL
)
3139 state
->ncs_next_srcref
= 1;
3143 for (src
= state
->ncs_srcs
; src
; src
= src
->next
)
3145 if (src
->srcref
== state
->ncs_next_srcref
)
3149 if (src
== NULL
) toReturn
= state
->ncs_next_srcref
;
3150 state
->ncs_next_srcref
++;
3157 nstat_control_send_counts(
3158 nstat_control_state
*state
,
3160 unsigned long long context
,
3161 u_int16_t hdr_flags
,
3164 nstat_msg_src_counts counts
;
3167 /* Some providers may not have any counts to send */
3168 if (src
->provider
->nstat_counts
== NULL
)
3171 bzero(&counts
, sizeof(counts
));
3172 counts
.hdr
.type
= NSTAT_MSG_TYPE_SRC_COUNTS
;
3173 counts
.hdr
.length
= sizeof(counts
);
3174 counts
.hdr
.flags
= hdr_flags
;
3175 counts
.hdr
.context
= context
;
3176 counts
.srcref
= src
->srcref
;
3178 if (src
->provider
->nstat_counts(src
->cookie
, &counts
.counts
, gone
) == 0)
3180 if ((src
->filter
& NSTAT_FILTER_NOZEROBYTES
) &&
3181 counts
.counts
.nstat_rxbytes
== 0 &&
3182 counts
.counts
.nstat_txbytes
== 0)
3188 result
= ctl_enqueuedata(state
->ncs_kctl
,
3189 state
->ncs_unit
, &counts
, sizeof(counts
),
3192 nstat_stats
.nstat_sendcountfailures
+= 1;
3199 nstat_control_append_counts(
3200 nstat_control_state
*state
,
3204 /* Some providers may not have any counts to send */
3205 if (!src
->provider
->nstat_counts
) return 0;
3207 nstat_msg_src_counts counts
;
3208 bzero(&counts
, sizeof(counts
));
3209 counts
.hdr
.type
= NSTAT_MSG_TYPE_SRC_COUNTS
;
3210 counts
.hdr
.length
= sizeof(counts
);
3211 counts
.srcref
= src
->srcref
;
3214 result
= src
->provider
->nstat_counts(src
->cookie
, &counts
.counts
, gone
);
3220 if ((src
->filter
& NSTAT_FILTER_NOZEROBYTES
) == NSTAT_FILTER_NOZEROBYTES
&&
3221 counts
.counts
.nstat_rxbytes
== 0 && counts
.counts
.nstat_txbytes
== 0)
3226 return nstat_accumulate_msg(state
, &counts
.hdr
, counts
.hdr
.length
);
3230 nstat_control_send_description(
3231 nstat_control_state
*state
,
3234 u_int16_t hdr_flags
)
3236 // Provider doesn't support getting the descriptor? Done.
3237 if (src
->provider
->nstat_descriptor_length
== 0 ||
3238 src
->provider
->nstat_copy_descriptor
== NULL
)
3243 // Allocate storage for the descriptor message
3245 unsigned int one
= 1;
3246 u_int32_t size
= offsetof(nstat_msg_src_description
, data
) + src
->provider
->nstat_descriptor_length
;
3247 if (mbuf_allocpacket(MBUF_DONTWAIT
, size
, &one
, &msg
) != 0)
3252 nstat_msg_src_description
*desc
= (nstat_msg_src_description
*)mbuf_data(msg
);
3254 mbuf_setlen(msg
, size
);
3255 mbuf_pkthdr_setlen(msg
, mbuf_len(msg
));
3257 // Query the provider for the provider specific bits
3258 errno_t result
= src
->provider
->nstat_copy_descriptor(src
->cookie
, desc
->data
, src
->provider
->nstat_descriptor_length
);
3266 desc
->hdr
.context
= context
;
3267 desc
->hdr
.type
= NSTAT_MSG_TYPE_SRC_DESC
;
3268 desc
->hdr
.length
= size
;
3269 desc
->hdr
.flags
= hdr_flags
;
3270 desc
->srcref
= src
->srcref
;
3271 desc
->provider
= src
->provider
->nstat_provider_id
;
3273 result
= ctl_enqueuembuf(state
->ncs_kctl
, state
->ncs_unit
, msg
, CTL_DATA_EOR
);
3276 nstat_stats
.nstat_descriptionfailures
+= 1;
3284 nstat_control_append_description(
3285 nstat_control_state
*state
,
3288 size_t size
= offsetof(nstat_msg_src_description
, data
) + src
->provider
->nstat_descriptor_length
;
3289 if (size
> 512 || src
->provider
->nstat_descriptor_length
== 0 ||
3290 src
->provider
->nstat_copy_descriptor
== NULL
)
3295 // Fill out a buffer on the stack, we will copy to the mbuf later
3296 u_int64_t buffer
[size
/sizeof(u_int64_t
) + 1]; // u_int64_t to ensure alignment
3297 bzero(buffer
, size
);
3299 nstat_msg_src_description
*desc
= (nstat_msg_src_description
*)buffer
;
3300 desc
->hdr
.type
= NSTAT_MSG_TYPE_SRC_DESC
;
3301 desc
->hdr
.length
= size
;
3302 desc
->srcref
= src
->srcref
;
3303 desc
->provider
= src
->provider
->nstat_provider_id
;
3306 // Fill in the description
3307 // Query the provider for the provider specific bits
3308 result
= src
->provider
->nstat_copy_descriptor(src
->cookie
, desc
->data
,
3309 src
->provider
->nstat_descriptor_length
);
3315 return nstat_accumulate_msg(state
, &desc
->hdr
, size
);
3319 nstat_control_send_update(
3320 nstat_control_state
*state
,
3323 u_int16_t hdr_flags
,
3326 // Provider doesn't support getting the descriptor or counts? Done.
3327 if ((src
->provider
->nstat_descriptor_length
== 0 ||
3328 src
->provider
->nstat_copy_descriptor
== NULL
) &&
3329 src
->provider
->nstat_counts
== NULL
)
3334 // Allocate storage for the descriptor message
3336 unsigned int one
= 1;
3337 u_int32_t size
= offsetof(nstat_msg_src_update
, data
) +
3338 src
->provider
->nstat_descriptor_length
;
3339 if (mbuf_allocpacket(MBUF_DONTWAIT
, size
, &one
, &msg
) != 0)
3344 nstat_msg_src_update
*desc
= (nstat_msg_src_update
*)mbuf_data(msg
);
3346 desc
->hdr
.context
= context
;
3347 desc
->hdr
.type
= NSTAT_MSG_TYPE_SRC_UPDATE
;
3348 desc
->hdr
.length
= size
;
3349 desc
->hdr
.flags
= hdr_flags
;
3350 desc
->srcref
= src
->srcref
;
3351 desc
->provider
= src
->provider
->nstat_provider_id
;
3353 mbuf_setlen(msg
, size
);
3354 mbuf_pkthdr_setlen(msg
, mbuf_len(msg
));
3357 if (src
->provider
->nstat_descriptor_length
!= 0 && src
->provider
->nstat_copy_descriptor
)
3359 // Query the provider for the provider specific bits
3360 result
= src
->provider
->nstat_copy_descriptor(src
->cookie
, desc
->data
,
3361 src
->provider
->nstat_descriptor_length
);
3369 if (src
->provider
->nstat_counts
)
3371 result
= src
->provider
->nstat_counts(src
->cookie
, &desc
->counts
, gone
);
3374 if ((src
->filter
& NSTAT_FILTER_NOZEROBYTES
) == NSTAT_FILTER_NOZEROBYTES
&&
3375 desc
->counts
.nstat_rxbytes
== 0 && desc
->counts
.nstat_txbytes
== 0)
3381 result
= ctl_enqueuembuf(state
->ncs_kctl
, state
->ncs_unit
, msg
, CTL_DATA_EOR
);
3388 nstat_stats
.nstat_srcupatefailures
+= 1;
3396 nstat_control_append_update(
3397 nstat_control_state
*state
,
3401 size_t size
= offsetof(nstat_msg_src_update
, data
) + src
->provider
->nstat_descriptor_length
;
3402 if (size
> 512 || ((src
->provider
->nstat_descriptor_length
== 0 ||
3403 src
->provider
->nstat_copy_descriptor
== NULL
) &&
3404 src
->provider
->nstat_counts
== NULL
))
3409 // Fill out a buffer on the stack, we will copy to the mbuf later
3410 u_int64_t buffer
[size
/sizeof(u_int64_t
) + 1]; // u_int64_t to ensure alignment
3411 bzero(buffer
, size
);
3413 nstat_msg_src_update
*desc
= (nstat_msg_src_update
*)buffer
;
3414 desc
->hdr
.type
= NSTAT_MSG_TYPE_SRC_UPDATE
;
3415 desc
->hdr
.length
= size
;
3416 desc
->srcref
= src
->srcref
;
3417 desc
->provider
= src
->provider
->nstat_provider_id
;
3420 // Fill in the description
3421 if (src
->provider
->nstat_descriptor_length
!= 0 && src
->provider
->nstat_copy_descriptor
)
3423 // Query the provider for the provider specific bits
3424 result
= src
->provider
->nstat_copy_descriptor(src
->cookie
, desc
->data
,
3425 src
->provider
->nstat_descriptor_length
);
3428 nstat_stats
.nstat_copy_descriptor_failures
++;
3429 if (nstat_debug
!= 0)
3430 printf("%s: src->provider->nstat_copy_descriptor: %d\n", __func__
, result
);
3435 if (src
->provider
->nstat_counts
)
3437 result
= src
->provider
->nstat_counts(src
->cookie
, &desc
->counts
, gone
);
3440 nstat_stats
.nstat_provider_counts_failures
++;
3441 if (nstat_debug
!= 0)
3442 printf("%s: src->provider->nstat_counts: %d\n", __func__
, result
);
3446 if ((src
->filter
& NSTAT_FILTER_NOZEROBYTES
) == NSTAT_FILTER_NOZEROBYTES
&&
3447 desc
->counts
.nstat_rxbytes
== 0 && desc
->counts
.nstat_txbytes
== 0)
3453 return nstat_accumulate_msg(state
, &desc
->hdr
, size
);
3457 nstat_control_send_removed(
3458 nstat_control_state
*state
,
3461 nstat_msg_src_removed removed
;
3464 bzero(&removed
, sizeof(removed
));
3465 removed
.hdr
.type
= NSTAT_MSG_TYPE_SRC_REMOVED
;
3466 removed
.hdr
.length
= sizeof(removed
);
3467 removed
.hdr
.context
= 0;
3468 removed
.srcref
= src
->srcref
;
3469 result
= ctl_enqueuedata(state
->ncs_kctl
, state
->ncs_unit
, &removed
,
3470 sizeof(removed
), CTL_DATA_EOR
| CTL_DATA_CRIT
);
3472 nstat_stats
.nstat_msgremovedfailures
+= 1;
3478 nstat_control_handle_add_request(
3479 nstat_control_state
*state
,
3484 // Verify the header fits in the first mbuf
3485 if (mbuf_len(m
) < offsetof(nstat_msg_add_src_req
, param
))
3490 // Calculate the length of the parameter field
3491 int32_t paramlength
= mbuf_pkthdr_len(m
) - offsetof(nstat_msg_add_src_req
, param
);
3492 if (paramlength
< 0 || paramlength
> 2 * 1024)
3497 nstat_provider
*provider
;
3498 nstat_provider_cookie_t cookie
;
3499 nstat_msg_add_src_req
*req
= mbuf_data(m
);
3500 if (mbuf_pkthdr_len(m
) > mbuf_len(m
))
3502 // parameter is too large, we need to make a contiguous copy
3503 void *data
= OSMalloc(paramlength
, nstat_malloc_tag
);
3505 if (!data
) return ENOMEM
;
3506 result
= mbuf_copydata(m
, offsetof(nstat_msg_add_src_req
, param
), paramlength
, data
);
3508 result
= nstat_lookup_entry(req
->provider
, data
, paramlength
, &provider
, &cookie
);
3509 OSFree(data
, paramlength
, nstat_malloc_tag
);
3513 result
= nstat_lookup_entry(req
->provider
, (void*)&req
->param
, paramlength
, &provider
, &cookie
);
3521 result
= nstat_control_source_add(req
->hdr
.context
, state
, provider
, cookie
);
3523 provider
->nstat_release(cookie
, 0);
3529 nstat_control_handle_add_all(
3530 nstat_control_state
*state
,
3535 // Verify the header fits in the first mbuf
3536 if (mbuf_len(m
) < sizeof(nstat_msg_add_all_srcs
))
3542 nstat_msg_add_all_srcs
*req
= mbuf_data(m
);
3543 if (req
->provider
> NSTAT_PROVIDER_LAST
) return ENOENT
;
3545 nstat_provider
*provider
= nstat_find_provider_by_id(req
->provider
);
3546 u_int64_t filter
= req
->filter
;
3548 if (!provider
) return ENOENT
;
3549 if (provider
->nstat_watcher_add
== NULL
) return ENOTSUP
;
3551 if (nstat_privcheck
!= 0) {
3552 result
= priv_check_cred(kauth_cred_get(),
3553 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS
, 0);
3558 // Make sure we don't add the provider twice
3559 lck_mtx_lock(&state
->mtx
);
3560 if ((state
->ncs_watching
& (1 << provider
->nstat_provider_id
)) != 0)
3562 state
->ncs_watching
|= (1 << provider
->nstat_provider_id
);
3563 lck_mtx_unlock(&state
->mtx
);
3564 if (result
!= 0) return result
;
3566 state
->ncs_provider_filters
[req
->provider
] = filter
;
3568 result
= provider
->nstat_watcher_add(state
);
3571 state
->ncs_provider_filters
[req
->provider
] = 0;
3572 lck_mtx_lock(&state
->mtx
);
3573 state
->ncs_watching
&= ~(1 << provider
->nstat_provider_id
);
3574 lck_mtx_unlock(&state
->mtx
);
3577 nstat_enqueue_success(req
->hdr
.context
, state
, 0);
3583 nstat_control_source_add(
3585 nstat_control_state
*state
,
3586 nstat_provider
*provider
,
3587 nstat_provider_cookie_t cookie
)
3589 // Fill out source added message if appropriate
3591 nstat_src_ref_t
*srcrefp
= NULL
;
3593 u_int64_t provider_filters
=
3594 state
->ncs_provider_filters
[provider
->nstat_provider_id
];
3595 boolean_t tell_user
=
3596 ((provider_filters
& NSTAT_FILTER_SUPPRESS_SRC_ADDED
) == 0);
3597 u_int32_t src_filter
=
3598 (provider_filters
& NSTAT_FILTER_PROVIDER_NOZEROBYTES
)
3599 ? NSTAT_FILTER_NOZEROBYTES
: 0;
3603 unsigned int one
= 1;
3605 if (mbuf_allocpacket(MBUF_DONTWAIT
, sizeof(nstat_msg_src_added
),
3609 mbuf_setlen(msg
, sizeof(nstat_msg_src_added
));
3610 mbuf_pkthdr_setlen(msg
, mbuf_len(msg
));
3611 nstat_msg_src_added
*add
= mbuf_data(msg
);
3612 bzero(add
, sizeof(*add
));
3613 add
->hdr
.type
= NSTAT_MSG_TYPE_SRC_ADDED
;
3614 add
->hdr
.length
= mbuf_len(msg
);
3615 add
->hdr
.context
= context
;
3616 add
->provider
= provider
->nstat_provider_id
;
3617 srcrefp
= &add
->srcref
;
3620 // Allocate storage for the source
3621 nstat_src
*src
= OSMalloc(sizeof(*src
), nstat_malloc_tag
);
3624 if (msg
) mbuf_freem(msg
);
3628 // Fill in the source, including picking an unused source ref
3629 lck_mtx_lock(&state
->mtx
);
3631 src
->srcref
= nstat_control_next_src_ref(state
);
3633 *srcrefp
= src
->srcref
;
3635 if (state
->ncs_flags
& NSTAT_FLAG_CLEANUP
|| src
->srcref
== NSTAT_SRC_REF_INVALID
)
3637 lck_mtx_unlock(&state
->mtx
);
3638 OSFree(src
, sizeof(*src
), nstat_malloc_tag
);
3639 if (msg
) mbuf_freem(msg
);
3642 src
->provider
= provider
;
3643 src
->cookie
= cookie
;
3644 src
->filter
= src_filter
;
3648 // send the source added message if appropriate
3649 errno_t result
= ctl_enqueuembuf(state
->ncs_kctl
, state
->ncs_unit
, msg
,
3653 nstat_stats
.nstat_srcaddedfailures
+= 1;
3654 lck_mtx_unlock(&state
->mtx
);
3655 OSFree(src
, sizeof(*src
), nstat_malloc_tag
);
3660 // Put the source in the list
3661 src
->next
= state
->ncs_srcs
;
3662 state
->ncs_srcs
= src
;
3664 lck_mtx_unlock(&state
->mtx
);
3670 nstat_control_handle_remove_request(
3671 nstat_control_state
*state
,
3674 nstat_src_ref_t srcref
= NSTAT_SRC_REF_INVALID
;
3676 if (mbuf_copydata(m
, offsetof(nstat_msg_rem_src_req
, srcref
), sizeof(srcref
), &srcref
) != 0)
3681 lck_mtx_lock(&state
->mtx
);
3683 // Remove this source as we look for it
3685 nstat_src
*src
= NULL
;
3686 for (nextp
= &state
->ncs_srcs
; *nextp
; nextp
= &(*nextp
)->next
)
3688 if ((*nextp
)->srcref
== srcref
)
3696 lck_mtx_unlock(&state
->mtx
);
3698 if (src
) nstat_control_cleanup_source(state
, src
, FALSE
);
3700 return src
? 0 : ENOENT
;
3704 nstat_control_handle_query_request(
3705 nstat_control_state
*state
,
3708 // TBD: handle this from another thread so we can enqueue a lot of data
3709 // As written, if a client requests query all, this function will be
3710 // called from their send of the request message. We will attempt to write
3711 // responses and succeed until the buffer fills up. Since the clients thread
3712 // is blocked on send, it won't be reading unless the client has two threads
3713 // using this socket, one for read and one for write. Two threads probably
3714 // won't work with this code anyhow since we don't have proper locking in
3716 nstat_src
*dead_srcs
= NULL
;
3717 errno_t result
= ENOENT
;
3718 nstat_msg_query_src_req req
;
3720 if (mbuf_copydata(m
, 0, sizeof(req
), &req
) != 0)
3725 const boolean_t all_srcs
= (req
.srcref
== NSTAT_SRC_REF_ALL
);
3727 lck_mtx_lock(&state
->mtx
);
3731 state
->ncs_flags
|= NSTAT_FLAG_REQCOUNTS
;
3733 nstat_src
**srcpp
= &state
->ncs_srcs
;
3734 u_int64_t src_count
= 0;
3735 boolean_t partial
= FALSE
;
3738 * Error handling policy and sequence number generation is folded into
3739 * nstat_control_begin_query.
3741 partial
= nstat_control_begin_query(state
, &req
.hdr
);
3743 while (*srcpp
!= NULL
3744 && (!partial
|| src_count
< QUERY_CONTINUATION_SRC_COUNT
))
3746 nstat_src
*src
= NULL
;
3751 // XXX ignore IFACE types?
3752 if (all_srcs
|| src
->srcref
== req
.srcref
)
3754 if (nstat_control_reporting_allowed(state
, src
)
3755 && (!partial
|| !all_srcs
|| src
->seq
!= state
->ncs_seq
))
3758 (req
.hdr
.flags
& NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE
) != 0)
3760 result
= nstat_control_append_counts(state
, src
, &gone
);
3764 result
= nstat_control_send_counts(state
, src
, req
.hdr
.context
, 0, &gone
);
3767 if (ENOMEM
== result
|| ENOBUFS
== result
)
3770 * If the counts message failed to
3771 * enqueue then we should clear our flag so
3772 * that a client doesn't miss anything on
3773 * idle cleanup. We skip the "gone"
3774 * processing in the hope that we may
3775 * catch it another time.
3777 state
->ncs_flags
&= ~NSTAT_FLAG_REQCOUNTS
;
3783 * We skip over hard errors and
3786 src
->seq
= state
->ncs_seq
;
3794 // send one last descriptor message so client may see last state
3795 // If we can't send the notification now, it
3796 // will be sent in the idle cleanup.
3797 result
= nstat_control_send_description(state
, *srcpp
, 0, 0);
3800 nstat_stats
.nstat_control_send_description_failures
++;
3801 if (nstat_debug
!= 0)
3802 printf("%s - nstat_control_send_description() %d\n", __func__
, result
);
3803 state
->ncs_flags
&= ~NSTAT_FLAG_REQCOUNTS
;
3807 // pull src out of the list
3810 src
->next
= dead_srcs
;
3815 srcpp
= &(*srcpp
)->next
;
3818 if (!all_srcs
&& req
.srcref
== src
->srcref
)
3823 nstat_flush_accumulated_msgs(state
);
3825 u_int16_t flags
= 0;
3826 if (req
.srcref
== NSTAT_SRC_REF_ALL
)
3827 flags
= nstat_control_end_query(state
, *srcpp
, partial
);
3829 lck_mtx_unlock(&state
->mtx
);
3832 * If an error occurred enqueueing data, then allow the error to
3833 * propagate to nstat_control_send. This way, the error is sent to
3836 if (all_srcs
&& ENOMEM
!= result
&& ENOBUFS
!= result
)
3838 nstat_enqueue_success(req
.hdr
.context
, state
, flags
);
3847 dead_srcs
= src
->next
;
3849 // release src and send notification
3850 nstat_control_cleanup_source(state
, src
, FALSE
);
3857 nstat_control_handle_get_src_description(
3858 nstat_control_state
*state
,
3861 nstat_msg_get_src_description req
;
3862 errno_t result
= ENOENT
;
3865 if (mbuf_copydata(m
, 0, sizeof(req
), &req
) != 0)
3870 lck_mtx_lock(&state
->mtx
);
3871 u_int64_t src_count
= 0;
3872 boolean_t partial
= FALSE
;
3873 const boolean_t all_srcs
= (req
.srcref
== NSTAT_SRC_REF_ALL
);
3876 * Error handling policy and sequence number generation is folded into
3877 * nstat_control_begin_query.
3879 partial
= nstat_control_begin_query(state
, &req
.hdr
);
3881 for (src
= state
->ncs_srcs
;
3882 src
&& (!partial
|| src_count
< QUERY_CONTINUATION_SRC_COUNT
);
3885 if (all_srcs
|| src
->srcref
== req
.srcref
)
3887 if (nstat_control_reporting_allowed(state
, src
)
3888 && (!all_srcs
|| !partial
|| src
->seq
!= state
->ncs_seq
))
3890 if ((req
.hdr
.flags
& NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE
) != 0 && all_srcs
)
3892 result
= nstat_control_append_description(state
, src
);
3896 result
= nstat_control_send_description(state
, src
, req
.hdr
.context
, 0);
3899 if (ENOMEM
== result
|| ENOBUFS
== result
)
3902 * If the description message failed to
3903 * enqueue then we give up for now.
3910 * Note, we skip over hard errors and
3913 src
->seq
= state
->ncs_seq
;
3924 nstat_flush_accumulated_msgs(state
);
3926 u_int16_t flags
= 0;
3927 if (req
.srcref
== NSTAT_SRC_REF_ALL
)
3928 flags
= nstat_control_end_query(state
, src
, partial
);
3930 lck_mtx_unlock(&state
->mtx
);
3932 * If an error occurred enqueueing data, then allow the error to
3933 * propagate to nstat_control_send. This way, the error is sent to
3936 if (all_srcs
&& ENOMEM
!= result
&& ENOBUFS
!= result
)
3938 nstat_enqueue_success(req
.hdr
.context
, state
, flags
);
3946 nstat_control_handle_set_filter(
3947 nstat_control_state
*state
,
3950 nstat_msg_set_filter req
;
3953 if (mbuf_copydata(m
, 0, sizeof(req
), &req
) != 0)
3955 if (req
.srcref
== NSTAT_SRC_REF_ALL
||
3956 req
.srcref
== NSTAT_SRC_REF_INVALID
)
3959 lck_mtx_lock(&state
->mtx
);
3960 for (src
= state
->ncs_srcs
; src
; src
= src
->next
)
3961 if (req
.srcref
== src
->srcref
)
3963 src
->filter
= req
.filter
;
3966 lck_mtx_unlock(&state
->mtx
);
3975 nstat_control_state
*state
,
3980 struct nstat_msg_error err
;
3982 bzero(&err
, sizeof(err
));
3983 err
.hdr
.type
= NSTAT_MSG_TYPE_ERROR
;
3984 err
.hdr
.length
= sizeof(err
);
3985 err
.hdr
.context
= context
;
3988 result
= ctl_enqueuedata(state
->ncs_kctl
, state
->ncs_unit
, &err
,
3989 sizeof(err
), CTL_DATA_EOR
| CTL_DATA_CRIT
);
3991 nstat_stats
.nstat_msgerrorfailures
++;
3995 nstat_control_begin_query(
3996 nstat_control_state
*state
,
3997 const nstat_msg_hdr
*hdrp
)
3999 boolean_t partial
= FALSE
;
4001 if (hdrp
->flags
& NSTAT_MSG_HDR_FLAG_CONTINUATION
)
4003 /* A partial query all has been requested. */
4006 if (state
->ncs_context
!= hdrp
->context
)
4008 if (state
->ncs_context
!= 0)
4009 nstat_send_error(state
, state
->ncs_context
, EAGAIN
);
4011 /* Initialize state for a partial query all. */
4012 state
->ncs_context
= hdrp
->context
;
4016 else if (state
->ncs_context
!= 0)
4019 * A continuation of a paced-query was in progress. Send that
4020 * context an error and reset the state. If the same context
4021 * has changed its mind, just send the full query results.
4023 if (state
->ncs_context
!= hdrp
->context
)
4024 nstat_send_error(state
, state
->ncs_context
, EAGAIN
);
4031 nstat_control_end_query(
4032 nstat_control_state
*state
,
4033 nstat_src
*last_src
,
4036 u_int16_t flags
= 0;
4038 if (last_src
== NULL
|| !partial
)
4041 * We iterated through the entire srcs list or exited early
4042 * from the loop when a partial update was not requested (an
4043 * error occurred), so clear context to indicate internally
4044 * that the query is finished.
4046 state
->ncs_context
= 0;
4051 * Indicate to userlevel to make another partial request as
4052 * there are still sources left to be reported.
4054 flags
|= NSTAT_MSG_HDR_FLAG_CONTINUATION
;
4061 nstat_control_handle_get_update(
4062 nstat_control_state
*state
,
4065 nstat_msg_query_src_req req
;
4067 if (mbuf_copydata(m
, 0, sizeof(req
), &req
) != 0)
4072 lck_mtx_lock(&state
->mtx
);
4074 state
->ncs_flags
|= NSTAT_FLAG_SUPPORTS_UPDATES
;
4076 errno_t result
= ENOENT
;
4078 nstat_src
*dead_srcs
= NULL
;
4079 nstat_src
**srcpp
= &state
->ncs_srcs
;
4080 u_int64_t src_count
= 0;
4081 boolean_t partial
= FALSE
;
4084 * Error handling policy and sequence number generation is folded into
4085 * nstat_control_begin_query.
4087 partial
= nstat_control_begin_query(state
, &req
.hdr
);
4089 while (*srcpp
!= NULL
4090 && (FALSE
== partial
4091 || src_count
< QUERY_CONTINUATION_SRC_COUNT
))
4097 if (nstat_control_reporting_allowed(state
, src
))
4099 /* skip this source if it has the current state
4100 * sequence number as it's already been reported in
4101 * this query-all partial sequence. */
4102 if (req
.srcref
== NSTAT_SRC_REF_ALL
4103 && (FALSE
== partial
|| src
->seq
!= state
->ncs_seq
))
4105 result
= nstat_control_append_update(state
, src
, &gone
);
4106 if (ENOMEM
== result
|| ENOBUFS
== result
)
4109 * If the update message failed to
4110 * enqueue then give up.
4117 * We skip over hard errors and
4120 src
->seq
= state
->ncs_seq
;
4124 else if (src
->srcref
== req
.srcref
)
4126 result
= nstat_control_send_update(state
, src
, req
.hdr
.context
, 0, &gone
);
4132 // pull src out of the list
4135 src
->next
= dead_srcs
;
4140 srcpp
= &(*srcpp
)->next
;
4143 if (req
.srcref
!= NSTAT_SRC_REF_ALL
&& req
.srcref
== src
->srcref
)
4149 nstat_flush_accumulated_msgs(state
);
4152 u_int16_t flags
= 0;
4153 if (req
.srcref
== NSTAT_SRC_REF_ALL
)
4154 flags
= nstat_control_end_query(state
, *srcpp
, partial
);
4156 lck_mtx_unlock(&state
->mtx
);
4158 * If an error occurred enqueueing data, then allow the error to
4159 * propagate to nstat_control_send. This way, the error is sent to
4162 if (req
.srcref
== NSTAT_SRC_REF_ALL
&& ENOMEM
!= result
&& ENOBUFS
!= result
)
4164 nstat_enqueue_success(req
.hdr
.context
, state
, flags
);
4171 dead_srcs
= src
->next
;
4173 // release src and send notification
4174 nstat_control_cleanup_source(state
, src
, FALSE
);
4181 nstat_control_handle_subscribe_sysinfo(
4182 nstat_control_state
*state
)
4184 errno_t result
= priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS
, 0);
4191 lck_mtx_lock(&state
->mtx
);
4192 state
->ncs_flags
|= NSTAT_FLAG_SYSINFO_SUBSCRIBED
;
4193 lck_mtx_unlock(&state
->mtx
);
4206 nstat_control_state
*state
= (nstat_control_state
*)uinfo
;
4207 struct nstat_msg_hdr
*hdr
;
4208 struct nstat_msg_hdr storage
;
4211 if (mbuf_pkthdr_len(m
) < sizeof(*hdr
))
4213 // Is this the right thing to do?
4218 if (mbuf_len(m
) >= sizeof(*hdr
))
4224 mbuf_copydata(m
, 0, sizeof(storage
), &storage
);
4228 // Legacy clients may not set the length
4229 // Those clients are likely not setting the flags either
4230 // Fix everything up so old clients continue to work
4231 if (hdr
->length
!= mbuf_pkthdr_len(m
))
4234 hdr
->length
= mbuf_pkthdr_len(m
);
4235 if (hdr
== &storage
)
4237 mbuf_copyback(m
, 0, sizeof(*hdr
), hdr
, MBUF_DONTWAIT
);
4243 case NSTAT_MSG_TYPE_ADD_SRC
:
4244 result
= nstat_control_handle_add_request(state
, m
);
4247 case NSTAT_MSG_TYPE_ADD_ALL_SRCS
:
4248 result
= nstat_control_handle_add_all(state
, m
);
4251 case NSTAT_MSG_TYPE_REM_SRC
:
4252 result
= nstat_control_handle_remove_request(state
, m
);
4255 case NSTAT_MSG_TYPE_QUERY_SRC
:
4256 result
= nstat_control_handle_query_request(state
, m
);
4259 case NSTAT_MSG_TYPE_GET_SRC_DESC
:
4260 result
= nstat_control_handle_get_src_description(state
, m
);
4263 case NSTAT_MSG_TYPE_SET_FILTER
:
4264 result
= nstat_control_handle_set_filter(state
, m
);
4267 case NSTAT_MSG_TYPE_GET_UPDATE
:
4268 result
= nstat_control_handle_get_update(state
, m
);
4271 case NSTAT_MSG_TYPE_SUBSCRIBE_SYSINFO
:
4272 result
= nstat_control_handle_subscribe_sysinfo(state
);
4282 struct nstat_msg_error err
;
4284 bzero(&err
, sizeof(err
));
4285 err
.hdr
.type
= NSTAT_MSG_TYPE_ERROR
;
4286 err
.hdr
.length
= sizeof(err
) + mbuf_pkthdr_len(m
);
4287 err
.hdr
.context
= hdr
->context
;
4290 if (mbuf_prepend(&m
, sizeof(err
), MBUF_DONTWAIT
) == 0 &&
4291 mbuf_copyback(m
, 0, sizeof(err
), &err
, MBUF_DONTWAIT
) == 0)
4293 result
= ctl_enqueuembuf(kctl
, unit
, m
, CTL_DATA_EOR
| CTL_DATA_CRIT
);
4303 // Unable to prepend the error to the request - just send the error
4304 err
.hdr
.length
= sizeof(err
);
4305 result
= ctl_enqueuedata(kctl
, unit
, &err
, sizeof(err
),
4306 CTL_DATA_EOR
| CTL_DATA_CRIT
);
4308 nstat_stats
.nstat_msgerrorfailures
+= 1;
4310 nstat_stats
.nstat_handle_msg_failures
+= 1;
4313 if (m
) mbuf_freem(m
);