]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/net/ntstat.c
xnu-3247.1.106.tar.gz
[apple/xnu.git] / bsd / net / ntstat.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2010-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <sys/param.h>
30#include <sys/types.h>
31#include <sys/kpi_mbuf.h>
32#include <sys/socket.h>
33#include <sys/kern_control.h>
34#include <sys/mcache.h>
35#include <sys/socketvar.h>
36#include <sys/sysctl.h>
37#include <sys/queue.h>
38#include <sys/priv.h>
39#include <sys/protosw.h>
40
41#include <kern/clock.h>
42#include <kern/debug.h>
43
44#include <libkern/libkern.h>
45#include <libkern/OSMalloc.h>
46#include <libkern/OSAtomic.h>
47#include <libkern/locks.h>
48
49#include <net/if.h>
50#include <net/if_var.h>
51#include <net/if_types.h>
52#include <net/route.h>
53#include <net/ntstat.h>
54
55#include <netinet/ip_var.h>
56#include <netinet/in_pcb.h>
57#include <netinet/in_var.h>
58#include <netinet/tcp.h>
59#include <netinet/tcp_var.h>
60#include <netinet/tcp_fsm.h>
61#include <netinet/tcp_cc.h>
62#include <netinet/udp.h>
63#include <netinet/udp_var.h>
64#include <netinet6/in6_pcb.h>
65#include <netinet6/in6_var.h>
66
67__private_extern__ int nstat_collect = 1;
68SYSCTL_INT(_net, OID_AUTO, statistics, CTLFLAG_RW | CTLFLAG_LOCKED,
69 &nstat_collect, 0, "Collect detailed statistics");
70
71static int nstat_privcheck = 0;
72SYSCTL_INT(_net, OID_AUTO, statistics_privcheck, CTLFLAG_RW | CTLFLAG_LOCKED,
73 &nstat_privcheck, 0, "Entitlement check");
74
75SYSCTL_NODE(_net, OID_AUTO, stats,
76 CTLFLAG_RW|CTLFLAG_LOCKED, 0, "network statistics");
77
78static int nstat_debug = 0;
79SYSCTL_INT(_net_stats, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED,
80 &nstat_debug, 0, "");
81
82static int nstat_sendspace = 2048;
83SYSCTL_INT(_net_stats, OID_AUTO, sendspace, CTLFLAG_RW | CTLFLAG_LOCKED,
84 &nstat_sendspace, 0, "");
85
86static int nstat_recvspace = 8192;
87SYSCTL_INT(_net_stats, OID_AUTO, recvspace, CTLFLAG_RW | CTLFLAG_LOCKED,
88 &nstat_recvspace, 0, "");
89
90static struct nstat_stats nstat_stats;
91SYSCTL_STRUCT(_net_stats, OID_AUTO, stats, CTLFLAG_RD | CTLFLAG_LOCKED,
92 &nstat_stats, nstat_stats, "");
93
94
95enum
96{
97 NSTAT_FLAG_CLEANUP = (1 << 0),
98 NSTAT_FLAG_REQCOUNTS = (1 << 1),
99 NSTAT_FLAG_SUPPORTS_UPDATES = (1 << 2),
100 NSTAT_FLAG_SYSINFO_SUBSCRIBED = (1 << 3),
101};
102
103#define QUERY_CONTINUATION_SRC_COUNT 100
104
105typedef struct nstat_control_state
106{
107 struct nstat_control_state *ncs_next;
108 u_int32_t ncs_watching;
109 decl_lck_mtx_data(, mtx);
110 kern_ctl_ref ncs_kctl;
111 u_int32_t ncs_unit;
112 nstat_src_ref_t ncs_next_srcref;
113 struct nstat_src *ncs_srcs;
114 mbuf_t ncs_accumulated;
115 u_int32_t ncs_flags;
116 u_int64_t ncs_provider_filters[NSTAT_PROVIDER_COUNT];
117 /* state maintained for partial query requests */
118 u_int64_t ncs_context;
119 u_int64_t ncs_seq;
120} nstat_control_state;
121
122typedef struct nstat_provider
123{
124 struct nstat_provider *next;
125 nstat_provider_id_t nstat_provider_id;
126 size_t nstat_descriptor_length;
127 errno_t (*nstat_lookup)(const void *data, u_int32_t length, nstat_provider_cookie_t *out_cookie);
128 int (*nstat_gone)(nstat_provider_cookie_t cookie);
129 errno_t (*nstat_counts)(nstat_provider_cookie_t cookie, struct nstat_counts *out_counts, int *out_gone);
130 errno_t (*nstat_watcher_add)(nstat_control_state *state);
131 void (*nstat_watcher_remove)(nstat_control_state *state);
132 errno_t (*nstat_copy_descriptor)(nstat_provider_cookie_t cookie, void *data, u_int32_t len);
133 void (*nstat_release)(nstat_provider_cookie_t cookie, boolean_t locked);
134 bool (*nstat_reporting_allowed)(nstat_provider_cookie_t cookie, uint64_t filter);
135} nstat_provider;
136
137
138typedef struct nstat_src
139{
140 struct nstat_src *next;
141 nstat_src_ref_t srcref;
142 nstat_provider *provider;
143 nstat_provider_cookie_t cookie;
144 uint32_t filter;
145 uint64_t seq;
146} nstat_src;
147
148static errno_t nstat_control_send_counts(nstat_control_state *,
149 nstat_src *, unsigned long long, u_int16_t, int *);
150static int nstat_control_send_description(nstat_control_state *state, nstat_src *src, u_int64_t context, u_int16_t hdr_flags);
151static int nstat_control_send_update(nstat_control_state *state, nstat_src *src, u_int64_t context, u_int16_t hdr_flags, int *gone);
152static errno_t nstat_control_send_removed(nstat_control_state *, nstat_src *);
153static errno_t nstat_control_send_goodbye(nstat_control_state *state, nstat_src *src);
154static void nstat_control_cleanup_source(nstat_control_state *state, nstat_src *src, boolean_t);
155static bool nstat_control_reporting_allowed(nstat_control_state *state, nstat_src *src);
156static boolean_t nstat_control_begin_query(nstat_control_state *state, const nstat_msg_hdr *hdrp);
157static u_int16_t nstat_control_end_query(nstat_control_state *state, nstat_src *last_src, boolean_t partial);
158
159static u_int32_t nstat_udp_watchers = 0;
160static u_int32_t nstat_tcp_watchers = 0;
161
162static void nstat_control_register(void);
163
164/*
165 * The lock order is as follows:
166 *
167 * socket_lock (inpcb)
168 * nstat_mtx
169 * state->mtx
170 */
171static volatile OSMallocTag nstat_malloc_tag = NULL;
172static nstat_control_state *nstat_controls = NULL;
173static uint64_t nstat_idle_time = 0;
174static decl_lck_mtx_data(, nstat_mtx);
175
176/* some extern definitions */
177extern void mbuf_report_peak_usage(void);
178extern void tcp_report_stats(void);
179
180static void
181nstat_copy_sa_out(
182 const struct sockaddr *src,
183 struct sockaddr *dst,
184 int maxlen)
185{
186 if (src->sa_len > maxlen) return;
187
188 bcopy(src, dst, src->sa_len);
189 if (src->sa_family == AF_INET6 &&
190 src->sa_len >= sizeof(struct sockaddr_in6))
191 {
192 struct sockaddr_in6 *sin6 = (struct sockaddr_in6*)(void *)dst;
193 if (IN6_IS_SCOPE_EMBED(&sin6->sin6_addr))
194 {
195 if (sin6->sin6_scope_id == 0)
196 sin6->sin6_scope_id = ntohs(sin6->sin6_addr.s6_addr16[1]);
197 sin6->sin6_addr.s6_addr16[1] = 0;
198 }
199 }
200}
201
202static void
203nstat_ip_to_sockaddr(
204 const struct in_addr *ip,
205 u_int16_t port,
206 struct sockaddr_in *sin,
207 u_int32_t maxlen)
208{
209 if (maxlen < sizeof(struct sockaddr_in))
210 return;
211
212 sin->sin_family = AF_INET;
213 sin->sin_len = sizeof(*sin);
214 sin->sin_port = port;
215 sin->sin_addr = *ip;
216}
217
218static void
219nstat_ip6_to_sockaddr(
220 const struct in6_addr *ip6,
221 u_int16_t port,
222 struct sockaddr_in6 *sin6,
223 u_int32_t maxlen)
224{
225 if (maxlen < sizeof(struct sockaddr_in6))
226 return;
227
228 sin6->sin6_family = AF_INET6;
229 sin6->sin6_len = sizeof(*sin6);
230 sin6->sin6_port = port;
231 sin6->sin6_addr = *ip6;
232 if (IN6_IS_SCOPE_EMBED(&sin6->sin6_addr))
233 {
234 sin6->sin6_scope_id = ntohs(sin6->sin6_addr.s6_addr16[1]);
235 sin6->sin6_addr.s6_addr16[1] = 0;
236 }
237}
238
239static u_int16_t
240nstat_inpcb_to_flags(
241 const struct inpcb *inp)
242{
243 u_int16_t flags = 0;
244
245 if ((inp != NULL ) && (inp->inp_last_outifp != NULL))
246 {
247 struct ifnet *ifp = inp->inp_last_outifp;
248
249 u_int32_t functional_type = if_functional_type(ifp);
250
251 /* Panic if someone adds a functional type without updating ntstat. */
252 VERIFY(0 <= functional_type && functional_type <= IFRTYPE_FUNCTIONAL_LAST);
253
254 switch (functional_type)
255 {
256 case IFRTYPE_FUNCTIONAL_UNKNOWN:
257 flags |= NSTAT_IFNET_IS_UNKNOWN_TYPE;
258 break;
259 case IFRTYPE_FUNCTIONAL_LOOPBACK:
260 flags |= NSTAT_IFNET_IS_LOOPBACK;
261 break;
262 case IFRTYPE_FUNCTIONAL_WIRED:
263 flags |= NSTAT_IFNET_IS_WIRED;
264 break;
265 case IFRTYPE_FUNCTIONAL_WIFI_INFRA:
266 flags |= NSTAT_IFNET_IS_WIFI;
267 break;
268 case IFRTYPE_FUNCTIONAL_WIFI_AWDL:
269 flags |= NSTAT_IFNET_IS_WIFI;
270 flags |= NSTAT_IFNET_IS_AWDL;
271 break;
272 case IFRTYPE_FUNCTIONAL_CELLULAR:
273 flags |= NSTAT_IFNET_IS_CELLULAR;
274 break;
275 }
276
277 if (IFNET_IS_EXPENSIVE(ifp))
278 {
279 flags |= NSTAT_IFNET_IS_EXPENSIVE;
280 }
281 }
282 else
283 {
284 flags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
285 }
286
287 return flags;
288}
289
290#pragma mark -- Network Statistic Providers --
291
292static errno_t nstat_control_source_add(u_int64_t context, nstat_control_state *state, nstat_provider *provider, nstat_provider_cookie_t cookie);
293struct nstat_provider *nstat_providers = NULL;
294
295static struct nstat_provider*
296nstat_find_provider_by_id(
297 nstat_provider_id_t id)
298{
299 struct nstat_provider *provider;
300
301 for (provider = nstat_providers; provider != NULL; provider = provider->next)
302 {
303 if (provider->nstat_provider_id == id)
304 break;
305 }
306
307 return provider;
308}
309
310static errno_t
311nstat_lookup_entry(
312 nstat_provider_id_t id,
313 const void *data,
314 u_int32_t length,
315 nstat_provider **out_provider,
316 nstat_provider_cookie_t *out_cookie)
317{
318 *out_provider = nstat_find_provider_by_id(id);
319 if (*out_provider == NULL)
320 {
321 return ENOENT;
322 }
323
324 return (*out_provider)->nstat_lookup(data, length, out_cookie);
325}
326
327static void nstat_init_route_provider(void);
328static void nstat_init_tcp_provider(void);
329static void nstat_init_udp_provider(void);
330static void nstat_init_ifnet_provider(void);
331
332__private_extern__ void
333nstat_init(void)
334{
335 if (nstat_malloc_tag != NULL) return;
336
337 OSMallocTag tag = OSMalloc_Tagalloc(NET_STAT_CONTROL_NAME, OSMT_DEFAULT);
338 if (!OSCompareAndSwapPtr(NULL, tag, &nstat_malloc_tag))
339 {
340 OSMalloc_Tagfree(tag);
341 tag = nstat_malloc_tag;
342 }
343 else
344 {
345 // we need to initialize other things, we do it here as this code path will only be hit once;
346 nstat_init_route_provider();
347 nstat_init_tcp_provider();
348 nstat_init_udp_provider();
349 nstat_init_ifnet_provider();
350 nstat_control_register();
351 }
352}
353
354#pragma mark -- Aligned Buffer Allocation --
355
356struct align_header
357{
358 u_int32_t offset;
359 u_int32_t length;
360};
361
362static void*
363nstat_malloc_aligned(
364 u_int32_t length,
365 u_int8_t alignment,
366 OSMallocTag tag)
367{
368 struct align_header *hdr = NULL;
369 u_int32_t size = length + sizeof(*hdr) + alignment - 1;
370
371 u_int8_t *buffer = OSMalloc(size, tag);
372 if (buffer == NULL) return NULL;
373
374 u_int8_t *aligned = buffer + sizeof(*hdr);
375 aligned = (u_int8_t*)P2ROUNDUP(aligned, alignment);
376
377 hdr = (struct align_header*)(void *)(aligned - sizeof(*hdr));
378 hdr->offset = aligned - buffer;
379 hdr->length = size;
380
381 return aligned;
382}
383
384static void
385nstat_free_aligned(
386 void *buffer,
387 OSMallocTag tag)
388{
389 struct align_header *hdr = (struct align_header*)(void *)((u_int8_t*)buffer - sizeof(*hdr));
390 OSFree(((char*)buffer) - hdr->offset, hdr->length, tag);
391}
392
393#pragma mark -- Route Provider --
394
395static nstat_provider nstat_route_provider;
396
397static errno_t
398nstat_route_lookup(
399 const void *data,
400 u_int32_t length,
401 nstat_provider_cookie_t *out_cookie)
402{
403 // rt_lookup doesn't take const params but it doesn't modify the parameters for
404 // the lookup. So...we use a union to eliminate the warning.
405 union
406 {
407 struct sockaddr *sa;
408 const struct sockaddr *const_sa;
409 } dst, mask;
410
411 const nstat_route_add_param *param = (const nstat_route_add_param*)data;
412 *out_cookie = NULL;
413
414 if (length < sizeof(*param))
415 {
416 return EINVAL;
417 }
418
419 if (param->dst.v4.sin_family == 0 ||
420 param->dst.v4.sin_family > AF_MAX ||
421 (param->mask.v4.sin_family != 0 && param->mask.v4.sin_family != param->dst.v4.sin_family))
422 {
423 return EINVAL;
424 }
425
426 if (param->dst.v4.sin_len > sizeof(param->dst) ||
427 (param->mask.v4.sin_family && param->mask.v4.sin_len > sizeof(param->mask.v4.sin_len)))
428 {
429 return EINVAL;
430 }
431 if ((param->dst.v4.sin_family == AF_INET &&
432 param->dst.v4.sin_len < sizeof(struct sockaddr_in)) ||
433 (param->dst.v6.sin6_family == AF_INET6 &&
434 param->dst.v6.sin6_len < sizeof(struct sockaddr_in6)))
435 {
436 return EINVAL;
437 }
438
439 dst.const_sa = (const struct sockaddr*)&param->dst;
440 mask.const_sa = param->mask.v4.sin_family ? (const struct sockaddr*)&param->mask : NULL;
441
442 struct radix_node_head *rnh = rt_tables[dst.sa->sa_family];
443 if (rnh == NULL) return EAFNOSUPPORT;
444
445 lck_mtx_lock(rnh_lock);
446 struct rtentry *rt = rt_lookup(TRUE, dst.sa, mask.sa, rnh, param->ifindex);
447 lck_mtx_unlock(rnh_lock);
448
449 if (rt) *out_cookie = (nstat_provider_cookie_t)rt;
450
451 return rt ? 0 : ENOENT;
452}
453
454static int
455nstat_route_gone(
456 nstat_provider_cookie_t cookie)
457{
458 struct rtentry *rt = (struct rtentry*)cookie;
459 return ((rt->rt_flags & RTF_UP) == 0) ? 1 : 0;
460}
461
462static errno_t
463nstat_route_counts(
464 nstat_provider_cookie_t cookie,
465 struct nstat_counts *out_counts,
466 int *out_gone)
467{
468 struct rtentry *rt = (struct rtentry*)cookie;
469 struct nstat_counts *rt_stats = rt->rt_stats;
470
471 if (out_gone) *out_gone = 0;
472
473 if (out_gone && (rt->rt_flags & RTF_UP) == 0) *out_gone = 1;
474
475 if (rt_stats)
476 {
477 atomic_get_64(out_counts->nstat_rxpackets, &rt_stats->nstat_rxpackets);
478 atomic_get_64(out_counts->nstat_rxbytes, &rt_stats->nstat_rxbytes);
479 atomic_get_64(out_counts->nstat_txpackets, &rt_stats->nstat_txpackets);
480 atomic_get_64(out_counts->nstat_txbytes, &rt_stats->nstat_txbytes);
481 out_counts->nstat_rxduplicatebytes = rt_stats->nstat_rxduplicatebytes;
482 out_counts->nstat_rxoutoforderbytes = rt_stats->nstat_rxoutoforderbytes;
483 out_counts->nstat_txretransmit = rt_stats->nstat_txretransmit;
484 out_counts->nstat_connectattempts = rt_stats->nstat_connectattempts;
485 out_counts->nstat_connectsuccesses = rt_stats->nstat_connectsuccesses;
486 out_counts->nstat_min_rtt = rt_stats->nstat_min_rtt;
487 out_counts->nstat_avg_rtt = rt_stats->nstat_avg_rtt;
488 out_counts->nstat_var_rtt = rt_stats->nstat_var_rtt;
489 out_counts->nstat_cell_rxbytes = out_counts->nstat_cell_txbytes = 0;
490 }
491 else
492 {
493 bzero(out_counts, sizeof(*out_counts));
494 }
495
496 return 0;
497}
498
499static void
500nstat_route_release(
501 nstat_provider_cookie_t cookie,
502 __unused int locked)
503{
504 rtfree((struct rtentry*)cookie);
505}
506
507static u_int32_t nstat_route_watchers = 0;
508
509static int
510nstat_route_walktree_add(
511 struct radix_node *rn,
512 void *context)
513{
514 errno_t result = 0;
515 struct rtentry *rt = (struct rtentry *)rn;
516 nstat_control_state *state = (nstat_control_state*)context;
517
518 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
519
520 /* RTF_UP can't change while rnh_lock is held */
521 if ((rt->rt_flags & RTF_UP) != 0)
522 {
523 /* Clear RTPRF_OURS if the route is still usable */
524 RT_LOCK(rt);
525 if (rt_validate(rt)) {
526 RT_ADDREF_LOCKED(rt);
527 RT_UNLOCK(rt);
528 } else {
529 RT_UNLOCK(rt);
530 rt = NULL;
531 }
532
533 /* Otherwise if RTF_CONDEMNED, treat it as if it were down */
534 if (rt == NULL)
535 return (0);
536
537 result = nstat_control_source_add(0, state, &nstat_route_provider, rt);
538 if (result != 0)
539 rtfree_locked(rt);
540 }
541
542 return result;
543}
544
545static errno_t
546nstat_route_add_watcher(
547 nstat_control_state *state)
548{
549 int i;
550 errno_t result = 0;
551 OSIncrementAtomic(&nstat_route_watchers);
552
553 lck_mtx_lock(rnh_lock);
554 for (i = 1; i < AF_MAX; i++)
555 {
556 struct radix_node_head *rnh;
557 rnh = rt_tables[i];
558 if (!rnh) continue;
559
560 result = rnh->rnh_walktree(rnh, nstat_route_walktree_add, state);
561 if (result != 0)
562 {
563 break;
564 }
565 }
566 lck_mtx_unlock(rnh_lock);
567
568 return result;
569}
570
571__private_extern__ void
572nstat_route_new_entry(
573 struct rtentry *rt)
574{
575 if (nstat_route_watchers == 0)
576 return;
577
578 lck_mtx_lock(&nstat_mtx);
579 if ((rt->rt_flags & RTF_UP) != 0)
580 {
581 nstat_control_state *state;
582 for (state = nstat_controls; state; state = state->ncs_next)
583 {
584 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_ROUTE)) != 0)
585 {
586 // this client is watching routes
587 // acquire a reference for the route
588 RT_ADDREF(rt);
589
590 // add the source, if that fails, release the reference
591 if (nstat_control_source_add(0, state, &nstat_route_provider, rt) != 0)
592 RT_REMREF(rt);
593 }
594 }
595 }
596 lck_mtx_unlock(&nstat_mtx);
597}
598
599static void
600nstat_route_remove_watcher(
601 __unused nstat_control_state *state)
602{
603 OSDecrementAtomic(&nstat_route_watchers);
604}
605
606static errno_t
607nstat_route_copy_descriptor(
608 nstat_provider_cookie_t cookie,
609 void *data,
610 u_int32_t len)
611{
612 nstat_route_descriptor *desc = (nstat_route_descriptor*)data;
613 if (len < sizeof(*desc))
614 {
615 return EINVAL;
616 }
617 bzero(desc, sizeof(*desc));
618
619 struct rtentry *rt = (struct rtentry*)cookie;
620 desc->id = (uint64_t)VM_KERNEL_ADDRPERM(rt);
621 desc->parent_id = (uint64_t)VM_KERNEL_ADDRPERM(rt->rt_parent);
622 desc->gateway_id = (uint64_t)VM_KERNEL_ADDRPERM(rt->rt_gwroute);
623
624
625 // key/dest
626 struct sockaddr *sa;
627 if ((sa = rt_key(rt)))
628 nstat_copy_sa_out(sa, &desc->dst.sa, sizeof(desc->dst));
629
630 // mask
631 if ((sa = rt_mask(rt)) && sa->sa_len <= sizeof(desc->mask))
632 memcpy(&desc->mask, sa, sa->sa_len);
633
634 // gateway
635 if ((sa = rt->rt_gateway))
636 nstat_copy_sa_out(sa, &desc->gateway.sa, sizeof(desc->gateway));
637
638 if (rt->rt_ifp)
639 desc->ifindex = rt->rt_ifp->if_index;
640
641 desc->flags = rt->rt_flags;
642
643 return 0;
644}
645
646static void
647nstat_init_route_provider(void)
648{
649 bzero(&nstat_route_provider, sizeof(nstat_route_provider));
650 nstat_route_provider.nstat_descriptor_length = sizeof(nstat_route_descriptor);
651 nstat_route_provider.nstat_provider_id = NSTAT_PROVIDER_ROUTE;
652 nstat_route_provider.nstat_lookup = nstat_route_lookup;
653 nstat_route_provider.nstat_gone = nstat_route_gone;
654 nstat_route_provider.nstat_counts = nstat_route_counts;
655 nstat_route_provider.nstat_release = nstat_route_release;
656 nstat_route_provider.nstat_watcher_add = nstat_route_add_watcher;
657 nstat_route_provider.nstat_watcher_remove = nstat_route_remove_watcher;
658 nstat_route_provider.nstat_copy_descriptor = nstat_route_copy_descriptor;
659 nstat_route_provider.next = nstat_providers;
660 nstat_providers = &nstat_route_provider;
661}
662
663#pragma mark -- Route Collection --
664
665static struct nstat_counts*
666nstat_route_attach(
667 struct rtentry *rte)
668{
669 struct nstat_counts *result = rte->rt_stats;
670 if (result) return result;
671
672 if (nstat_malloc_tag == NULL) nstat_init();
673
674 result = nstat_malloc_aligned(sizeof(*result), sizeof(u_int64_t), nstat_malloc_tag);
675 if (!result) return result;
676
677 bzero(result, sizeof(*result));
678
679 if (!OSCompareAndSwapPtr(NULL, result, &rte->rt_stats))
680 {
681 nstat_free_aligned(result, nstat_malloc_tag);
682 result = rte->rt_stats;
683 }
684
685 return result;
686}
687
688__private_extern__ void
689nstat_route_detach(
690 struct rtentry *rte)
691{
692 if (rte->rt_stats)
693 {
694 nstat_free_aligned(rte->rt_stats, nstat_malloc_tag);
695 rte->rt_stats = NULL;
696 }
697}
698
699__private_extern__ void
700nstat_route_connect_attempt(
701 struct rtentry *rte)
702{
703 while (rte)
704 {
705 struct nstat_counts* stats = nstat_route_attach(rte);
706 if (stats)
707 {
708 OSIncrementAtomic(&stats->nstat_connectattempts);
709 }
710
711 rte = rte->rt_parent;
712 }
713}
714
715__private_extern__ void
716nstat_route_connect_success(
717 struct rtentry *rte)
718{
719 // This route
720 while (rte)
721 {
722 struct nstat_counts* stats = nstat_route_attach(rte);
723 if (stats)
724 {
725 OSIncrementAtomic(&stats->nstat_connectsuccesses);
726 }
727
728 rte = rte->rt_parent;
729 }
730}
731
732__private_extern__ void
733nstat_route_tx(
734 struct rtentry *rte,
735 u_int32_t packets,
736 u_int32_t bytes,
737 u_int32_t flags)
738{
739 while (rte)
740 {
741 struct nstat_counts* stats = nstat_route_attach(rte);
742 if (stats)
743 {
744 if ((flags & NSTAT_TX_FLAG_RETRANSMIT) != 0)
745 {
746 OSAddAtomic(bytes, &stats->nstat_txretransmit);
747 }
748 else
749 {
750 OSAddAtomic64((SInt64)packets, (SInt64*)&stats->nstat_txpackets);
751 OSAddAtomic64((SInt64)bytes, (SInt64*)&stats->nstat_txbytes);
752 }
753 }
754
755 rte = rte->rt_parent;
756 }
757}
758
759__private_extern__ void
760nstat_route_rx(
761 struct rtentry *rte,
762 u_int32_t packets,
763 u_int32_t bytes,
764 u_int32_t flags)
765{
766 while (rte)
767 {
768 struct nstat_counts* stats = nstat_route_attach(rte);
769 if (stats)
770 {
771 if (flags == 0)
772 {
773 OSAddAtomic64((SInt64)packets, (SInt64*)&stats->nstat_rxpackets);
774 OSAddAtomic64((SInt64)bytes, (SInt64*)&stats->nstat_rxbytes);
775 }
776 else
777 {
778 if (flags & NSTAT_RX_FLAG_OUT_OF_ORDER)
779 OSAddAtomic(bytes, &stats->nstat_rxoutoforderbytes);
780 if (flags & NSTAT_RX_FLAG_DUPLICATE)
781 OSAddAtomic(bytes, &stats->nstat_rxduplicatebytes);
782 }
783 }
784
785 rte = rte->rt_parent;
786 }
787}
788
789__private_extern__ void
790nstat_route_rtt(
791 struct rtentry *rte,
792 u_int32_t rtt,
793 u_int32_t rtt_var)
794{
795 const int32_t factor = 8;
796
797 while (rte)
798 {
799 struct nstat_counts* stats = nstat_route_attach(rte);
800 if (stats)
801 {
802 int32_t oldrtt;
803 int32_t newrtt;
804
805 // average
806 do
807 {
808 oldrtt = stats->nstat_avg_rtt;
809 if (oldrtt == 0)
810 {
811 newrtt = rtt;
812 }
813 else
814 {
815 newrtt = oldrtt - (oldrtt - (int32_t)rtt) / factor;
816 }
817 if (oldrtt == newrtt) break;
818 } while (!OSCompareAndSwap(oldrtt, newrtt, &stats->nstat_avg_rtt));
819
820 // minimum
821 do
822 {
823 oldrtt = stats->nstat_min_rtt;
824 if (oldrtt != 0 && oldrtt < (int32_t)rtt)
825 {
826 break;
827 }
828 } while (!OSCompareAndSwap(oldrtt, rtt, &stats->nstat_min_rtt));
829
830 // variance
831 do
832 {
833 oldrtt = stats->nstat_var_rtt;
834 if (oldrtt == 0)
835 {
836 newrtt = rtt_var;
837 }
838 else
839 {
840 newrtt = oldrtt - (oldrtt - (int32_t)rtt_var) / factor;
841 }
842 if (oldrtt == newrtt) break;
843 } while (!OSCompareAndSwap(oldrtt, newrtt, &stats->nstat_var_rtt));
844 }
845
846 rte = rte->rt_parent;
847 }
848}
849
850
851#pragma mark -- TCP Provider --
852
853/*
854 * Due to the way the kernel deallocates a process (the process structure
855 * might be gone by the time we get the PCB detach notification),
856 * we need to cache the process name. Without this, proc_name() would
857 * return null and the process name would never be sent to userland.
858 *
859 * For UDP sockets, we also store the cached the connection tuples along with
860 * the interface index. This is necessary because when UDP sockets are
861 * disconnected, the connection tuples are forever lost from the inpcb, thus
862 * we need to keep track of the last call to connect() in ntstat.
863 */
864struct nstat_tucookie {
865 struct inpcb *inp;
866 char pname[MAXCOMLEN+1];
867 bool cached;
868 union
869 {
870 struct sockaddr_in v4;
871 struct sockaddr_in6 v6;
872 } local;
873 union
874 {
875 struct sockaddr_in v4;
876 struct sockaddr_in6 v6;
877 } remote;
878 unsigned int if_index;
879 uint16_t ifnet_properties;
880};
881
882static struct nstat_tucookie *
883nstat_tucookie_alloc_internal(
884 struct inpcb *inp,
885 bool ref,
886 bool locked)
887{
888 struct nstat_tucookie *cookie;
889
890 cookie = OSMalloc(sizeof(*cookie), nstat_malloc_tag);
891 if (cookie == NULL)
892 return NULL;
893 if (!locked)
894 lck_mtx_assert(&nstat_mtx, LCK_MTX_ASSERT_NOTOWNED);
895 if (ref && in_pcb_checkstate(inp, WNT_ACQUIRE, locked) == WNT_STOPUSING)
896 {
897 OSFree(cookie, sizeof(*cookie), nstat_malloc_tag);
898 return NULL;
899 }
900 bzero(cookie, sizeof(*cookie));
901 cookie->inp = inp;
902 proc_name(inp->inp_socket->last_pid, cookie->pname,
903 sizeof(cookie->pname));
904 /*
905 * We only increment the reference count for UDP sockets because we
906 * only cache UDP socket tuples.
907 */
908 if (SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP)
909 OSIncrementAtomic(&inp->inp_nstat_refcnt);
910
911 return cookie;
912}
913
914static struct nstat_tucookie *
915nstat_tucookie_alloc(
916 struct inpcb *inp)
917{
918 return nstat_tucookie_alloc_internal(inp, false, false);
919}
920
921static struct nstat_tucookie *
922nstat_tucookie_alloc_ref(
923 struct inpcb *inp)
924{
925 return nstat_tucookie_alloc_internal(inp, true, false);
926}
927
928static struct nstat_tucookie *
929nstat_tucookie_alloc_ref_locked(
930 struct inpcb *inp)
931{
932 return nstat_tucookie_alloc_internal(inp, true, true);
933}
934
935static void
936nstat_tucookie_release_internal(
937 struct nstat_tucookie *cookie,
938 int inplock)
939{
940 if (SOCK_PROTO(cookie->inp->inp_socket) == IPPROTO_UDP)
941 OSDecrementAtomic(&cookie->inp->inp_nstat_refcnt);
942 in_pcb_checkstate(cookie->inp, WNT_RELEASE, inplock);
943 OSFree(cookie, sizeof(*cookie), nstat_malloc_tag);
944}
945
946static void
947nstat_tucookie_release(
948 struct nstat_tucookie *cookie)
949{
950 nstat_tucookie_release_internal(cookie, false);
951}
952
953static void
954nstat_tucookie_release_locked(
955 struct nstat_tucookie *cookie)
956{
957 nstat_tucookie_release_internal(cookie, true);
958}
959
960
961static nstat_provider nstat_tcp_provider;
962
963static errno_t
964nstat_tcpudp_lookup(
965 struct inpcbinfo *inpinfo,
966 const void *data,
967 u_int32_t length,
968 nstat_provider_cookie_t *out_cookie)
969{
970 struct inpcb *inp = NULL;
971
972 // parameter validation
973 const nstat_tcp_add_param *param = (const nstat_tcp_add_param*)data;
974 if (length < sizeof(*param))
975 {
976 return EINVAL;
977 }
978
979 // src and dst must match
980 if (param->remote.v4.sin_family != 0 &&
981 param->remote.v4.sin_family != param->local.v4.sin_family)
982 {
983 return EINVAL;
984 }
985
986
987 switch (param->local.v4.sin_family)
988 {
989 case AF_INET:
990 {
991 if (param->local.v4.sin_len != sizeof(param->local.v4) ||
992 (param->remote.v4.sin_family != 0 &&
993 param->remote.v4.sin_len != sizeof(param->remote.v4)))
994 {
995 return EINVAL;
996 }
997
998 inp = in_pcblookup_hash(inpinfo, param->remote.v4.sin_addr, param->remote.v4.sin_port,
999 param->local.v4.sin_addr, param->local.v4.sin_port, 1, NULL);
1000 }
1001 break;
1002
1003#if INET6
1004 case AF_INET6:
1005 {
1006 union
1007 {
1008 const struct in6_addr *in6c;
1009 struct in6_addr *in6;
1010 } local, remote;
1011
1012 if (param->local.v6.sin6_len != sizeof(param->local.v6) ||
1013 (param->remote.v6.sin6_family != 0 &&
1014 param->remote.v6.sin6_len != sizeof(param->remote.v6)))
1015 {
1016 return EINVAL;
1017 }
1018
1019 local.in6c = &param->local.v6.sin6_addr;
1020 remote.in6c = &param->remote.v6.sin6_addr;
1021
1022 inp = in6_pcblookup_hash(inpinfo, remote.in6, param->remote.v6.sin6_port,
1023 local.in6, param->local.v6.sin6_port, 1, NULL);
1024 }
1025 break;
1026#endif
1027
1028 default:
1029 return EINVAL;
1030 }
1031
1032 if (inp == NULL)
1033 return ENOENT;
1034
1035 // At this point we have a ref to the inpcb
1036 *out_cookie = nstat_tucookie_alloc(inp);
1037 if (*out_cookie == NULL)
1038 in_pcb_checkstate(inp, WNT_RELEASE, 0);
1039
1040 return 0;
1041}
1042
1043static errno_t
1044nstat_tcp_lookup(
1045 const void *data,
1046 u_int32_t length,
1047 nstat_provider_cookie_t *out_cookie)
1048{
1049 return nstat_tcpudp_lookup(&tcbinfo, data, length, out_cookie);
1050}
1051
1052static int
1053nstat_tcp_gone(
1054 nstat_provider_cookie_t cookie)
1055{
1056 struct nstat_tucookie *tucookie =
1057 (struct nstat_tucookie *)cookie;
1058 struct inpcb *inp;
1059 struct tcpcb *tp;
1060
1061 return (!(inp = tucookie->inp) ||
1062 !(tp = intotcpcb(inp)) ||
1063 inp->inp_state == INPCB_STATE_DEAD) ? 1 : 0;
1064}
1065
1066static errno_t
1067nstat_tcp_counts(
1068 nstat_provider_cookie_t cookie,
1069 struct nstat_counts *out_counts,
1070 int *out_gone)
1071{
1072 struct nstat_tucookie *tucookie =
1073 (struct nstat_tucookie *)cookie;
1074 struct inpcb *inp;
1075
1076 bzero(out_counts, sizeof(*out_counts));
1077
1078 if (out_gone) *out_gone = 0;
1079
1080 // if the pcb is in the dead state, we should stop using it
1081 if (nstat_tcp_gone(cookie))
1082 {
1083 if (out_gone) *out_gone = 1;
1084 if (!(inp = tucookie->inp) || !intotcpcb(inp))
1085 return EINVAL;
1086 }
1087 inp = tucookie->inp;
1088 struct tcpcb *tp = intotcpcb(inp);
1089
1090 atomic_get_64(out_counts->nstat_rxpackets, &inp->inp_stat->rxpackets);
1091 atomic_get_64(out_counts->nstat_rxbytes, &inp->inp_stat->rxbytes);
1092 atomic_get_64(out_counts->nstat_txpackets, &inp->inp_stat->txpackets);
1093 atomic_get_64(out_counts->nstat_txbytes, &inp->inp_stat->txbytes);
1094 out_counts->nstat_rxduplicatebytes = tp->t_stat.rxduplicatebytes;
1095 out_counts->nstat_rxoutoforderbytes = tp->t_stat.rxoutoforderbytes;
1096 out_counts->nstat_txretransmit = tp->t_stat.txretransmitbytes;
1097 out_counts->nstat_connectattempts = tp->t_state >= TCPS_SYN_SENT ? 1 : 0;
1098 out_counts->nstat_connectsuccesses = tp->t_state >= TCPS_ESTABLISHED ? 1 : 0;
1099 out_counts->nstat_avg_rtt = tp->t_srtt;
1100 out_counts->nstat_min_rtt = tp->t_rttbest;
1101 out_counts->nstat_var_rtt = tp->t_rttvar;
1102 if (out_counts->nstat_avg_rtt < out_counts->nstat_min_rtt)
1103 out_counts->nstat_min_rtt = out_counts->nstat_avg_rtt;
1104 atomic_get_64(out_counts->nstat_cell_rxbytes, &inp->inp_cstat->rxbytes);
1105 atomic_get_64(out_counts->nstat_cell_txbytes, &inp->inp_cstat->txbytes);
1106 atomic_get_64(out_counts->nstat_wifi_rxbytes, &inp->inp_wstat->rxbytes);
1107 atomic_get_64(out_counts->nstat_wifi_txbytes, &inp->inp_wstat->txbytes);
1108 atomic_get_64(out_counts->nstat_wired_rxbytes, &inp->inp_Wstat->rxbytes);
1109 atomic_get_64(out_counts->nstat_wired_txbytes, &inp->inp_Wstat->txbytes);
1110
1111 return 0;
1112}
1113
1114static void
1115nstat_tcp_release(
1116 nstat_provider_cookie_t cookie,
1117 int locked)
1118{
1119 struct nstat_tucookie *tucookie =
1120 (struct nstat_tucookie *)cookie;
1121
1122 nstat_tucookie_release_internal(tucookie, locked);
1123}
1124
1125static errno_t
1126nstat_tcp_add_watcher(
1127 nstat_control_state *state)
1128{
1129 OSIncrementAtomic(&nstat_tcp_watchers);
1130
1131 lck_rw_lock_shared(tcbinfo.ipi_lock);
1132
1133 // Add all current tcp inpcbs. Ignore those in timewait
1134 struct inpcb *inp;
1135 struct nstat_tucookie *cookie;
1136 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list)
1137 {
1138 cookie = nstat_tucookie_alloc_ref(inp);
1139 if (cookie == NULL)
1140 continue;
1141 if (nstat_control_source_add(0, state, &nstat_tcp_provider,
1142 cookie) != 0)
1143 {
1144 nstat_tucookie_release(cookie);
1145 break;
1146 }
1147 }
1148
1149 lck_rw_done(tcbinfo.ipi_lock);
1150
1151 return 0;
1152}
1153
1154static void
1155nstat_tcp_remove_watcher(
1156 __unused nstat_control_state *state)
1157{
1158 OSDecrementAtomic(&nstat_tcp_watchers);
1159}
1160
1161__private_extern__ void
1162nstat_tcp_new_pcb(
1163 struct inpcb *inp)
1164{
1165 struct nstat_tucookie *cookie;
1166
1167 if (nstat_tcp_watchers == 0)
1168 return;
1169
1170 socket_lock(inp->inp_socket, 0);
1171 lck_mtx_lock(&nstat_mtx);
1172 nstat_control_state *state;
1173 for (state = nstat_controls; state; state = state->ncs_next)
1174 {
1175 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_TCP)) != 0)
1176 {
1177 // this client is watching tcp
1178 // acquire a reference for it
1179 cookie = nstat_tucookie_alloc_ref_locked(inp);
1180 if (cookie == NULL)
1181 continue;
1182 // add the source, if that fails, release the reference
1183 if (nstat_control_source_add(0, state,
1184 &nstat_tcp_provider, cookie) != 0)
1185 {
1186 nstat_tucookie_release_locked(cookie);
1187 break;
1188 }
1189 }
1190 }
1191 lck_mtx_unlock(&nstat_mtx);
1192 socket_unlock(inp->inp_socket, 0);
1193}
1194
1195__private_extern__ void
1196nstat_pcb_detach(struct inpcb *inp)
1197{
1198 nstat_control_state *state;
1199 nstat_src *src, *prevsrc;
1200 nstat_src *dead_list = NULL;
1201 struct nstat_tucookie *tucookie;
1202 errno_t result;
1203
1204 if (inp == NULL || (nstat_tcp_watchers == 0 && nstat_udp_watchers == 0))
1205 return;
1206
1207 lck_mtx_lock(&nstat_mtx);
1208 for (state = nstat_controls; state; state = state->ncs_next)
1209 {
1210 lck_mtx_lock(&state->mtx);
1211 for (prevsrc = NULL, src = state->ncs_srcs; src;
1212 prevsrc = src, src = src->next)
1213 {
1214 tucookie = (struct nstat_tucookie *)src->cookie;
1215 if (tucookie->inp == inp)
1216 break;
1217 }
1218
1219 if (src)
1220 {
1221 result = nstat_control_send_goodbye(state, src);
1222
1223 if (prevsrc)
1224 prevsrc->next = src->next;
1225 else
1226 state->ncs_srcs = src->next;
1227
1228 src->next = dead_list;
1229 dead_list = src;
1230 }
1231 lck_mtx_unlock(&state->mtx);
1232 }
1233 lck_mtx_unlock(&nstat_mtx);
1234
1235 while (dead_list) {
1236 src = dead_list;
1237 dead_list = src->next;
1238
1239 nstat_control_cleanup_source(NULL, src, TRUE);
1240 }
1241}
1242
1243__private_extern__ void
1244nstat_pcb_cache(struct inpcb *inp)
1245{
1246 nstat_control_state *state;
1247 nstat_src *src;
1248 struct nstat_tucookie *tucookie;
1249
1250 if (inp == NULL || nstat_udp_watchers == 0 ||
1251 inp->inp_nstat_refcnt == 0)
1252 return;
1253 VERIFY(SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP);
1254 lck_mtx_lock(&nstat_mtx);
1255 for (state = nstat_controls; state; state = state->ncs_next) {
1256 lck_mtx_lock(&state->mtx);
1257 for (src = state->ncs_srcs; src; src = src->next)
1258 {
1259 tucookie = (struct nstat_tucookie *)src->cookie;
1260 if (tucookie->inp == inp)
1261 {
1262 if (inp->inp_vflag & INP_IPV6)
1263 {
1264 nstat_ip6_to_sockaddr(&inp->in6p_laddr,
1265 inp->inp_lport,
1266 &tucookie->local.v6,
1267 sizeof(tucookie->local));
1268 nstat_ip6_to_sockaddr(&inp->in6p_faddr,
1269 inp->inp_fport,
1270 &tucookie->remote.v6,
1271 sizeof(tucookie->remote));
1272 }
1273 else if (inp->inp_vflag & INP_IPV4)
1274 {
1275 nstat_ip_to_sockaddr(&inp->inp_laddr,
1276 inp->inp_lport,
1277 &tucookie->local.v4,
1278 sizeof(tucookie->local));
1279 nstat_ip_to_sockaddr(&inp->inp_faddr,
1280 inp->inp_fport,
1281 &tucookie->remote.v4,
1282 sizeof(tucookie->remote));
1283 }
1284 if (inp->inp_last_outifp)
1285 tucookie->if_index =
1286 inp->inp_last_outifp->if_index;
1287
1288 tucookie->ifnet_properties = nstat_inpcb_to_flags(inp);
1289 tucookie->cached = true;
1290 break;
1291 }
1292 }
1293 lck_mtx_unlock(&state->mtx);
1294 }
1295 lck_mtx_unlock(&nstat_mtx);
1296}
1297
1298__private_extern__ void
1299nstat_pcb_invalidate_cache(struct inpcb *inp)
1300{
1301 nstat_control_state *state;
1302 nstat_src *src;
1303 struct nstat_tucookie *tucookie;
1304
1305 if (inp == NULL || nstat_udp_watchers == 0 ||
1306 inp->inp_nstat_refcnt == 0)
1307 return;
1308 VERIFY(SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP);
1309 lck_mtx_lock(&nstat_mtx);
1310 for (state = nstat_controls; state; state = state->ncs_next) {
1311 lck_mtx_lock(&state->mtx);
1312 for (src = state->ncs_srcs; src; src = src->next)
1313 {
1314 tucookie = (struct nstat_tucookie *)src->cookie;
1315 if (tucookie->inp == inp)
1316 {
1317 tucookie->cached = false;
1318 break;
1319 }
1320 }
1321 lck_mtx_unlock(&state->mtx);
1322 }
1323 lck_mtx_unlock(&nstat_mtx);
1324}
1325
1326static errno_t
1327nstat_tcp_copy_descriptor(
1328 nstat_provider_cookie_t cookie,
1329 void *data,
1330 u_int32_t len)
1331{
1332 if (len < sizeof(nstat_tcp_descriptor))
1333 {
1334 return EINVAL;
1335 }
1336
1337 if (nstat_tcp_gone(cookie))
1338 return EINVAL;
1339
1340 nstat_tcp_descriptor *desc = (nstat_tcp_descriptor*)data;
1341 struct nstat_tucookie *tucookie =
1342 (struct nstat_tucookie *)cookie;
1343 struct inpcb *inp = tucookie->inp;
1344 struct tcpcb *tp = intotcpcb(inp);
1345 bzero(desc, sizeof(*desc));
1346
1347 if (inp->inp_vflag & INP_IPV6)
1348 {
1349 nstat_ip6_to_sockaddr(&inp->in6p_laddr, inp->inp_lport,
1350 &desc->local.v6, sizeof(desc->local));
1351 nstat_ip6_to_sockaddr(&inp->in6p_faddr, inp->inp_fport,
1352 &desc->remote.v6, sizeof(desc->remote));
1353 }
1354 else if (inp->inp_vflag & INP_IPV4)
1355 {
1356 nstat_ip_to_sockaddr(&inp->inp_laddr, inp->inp_lport,
1357 &desc->local.v4, sizeof(desc->local));
1358 nstat_ip_to_sockaddr(&inp->inp_faddr, inp->inp_fport,
1359 &desc->remote.v4, sizeof(desc->remote));
1360 }
1361
1362 desc->state = intotcpcb(inp)->t_state;
1363 desc->ifindex = (inp->inp_last_outifp == NULL) ? 0 :
1364 inp->inp_last_outifp->if_index;
1365
1366 // danger - not locked, values could be bogus
1367 desc->txunacked = tp->snd_max - tp->snd_una;
1368 desc->txwindow = tp->snd_wnd;
1369 desc->txcwindow = tp->snd_cwnd;
1370
1371 if (CC_ALGO(tp)->name != NULL) {
1372 strlcpy(desc->cc_algo, CC_ALGO(tp)->name,
1373 sizeof(desc->cc_algo));
1374 }
1375
1376 struct socket *so = inp->inp_socket;
1377 if (so)
1378 {
1379 // TBD - take the socket lock around these to make sure
1380 // they're in sync?
1381 desc->upid = so->last_upid;
1382 desc->pid = so->last_pid;
1383 desc->traffic_class = so->so_traffic_class;
1384 desc->traffic_mgt_flags = so->so_traffic_mgt_flags;
1385 proc_name(desc->pid, desc->pname, sizeof(desc->pname));
1386 if (desc->pname[0] == 0)
1387 {
1388 strlcpy(desc->pname, tucookie->pname,
1389 sizeof(desc->pname));
1390 }
1391 else
1392 {
1393 desc->pname[sizeof(desc->pname) - 1] = 0;
1394 strlcpy(tucookie->pname, desc->pname,
1395 sizeof(tucookie->pname));
1396 }
1397 memcpy(desc->uuid, so->last_uuid, sizeof(so->last_uuid));
1398 memcpy(desc->vuuid, so->so_vuuid, sizeof(so->so_vuuid));
1399 if (so->so_flags & SOF_DELEGATED) {
1400 desc->eupid = so->e_upid;
1401 desc->epid = so->e_pid;
1402 memcpy(desc->euuid, so->e_uuid, sizeof(so->e_uuid));
1403 } else {
1404 desc->eupid = desc->upid;
1405 desc->epid = desc->pid;
1406 memcpy(desc->euuid, desc->uuid, sizeof(desc->uuid));
1407 }
1408 desc->sndbufsize = so->so_snd.sb_hiwat;
1409 desc->sndbufused = so->so_snd.sb_cc;
1410 desc->rcvbufsize = so->so_rcv.sb_hiwat;
1411 desc->rcvbufused = so->so_rcv.sb_cc;
1412 }
1413
1414 tcp_get_connectivity_status(tp, &desc->connstatus);
1415 desc->ifnet_properties = nstat_inpcb_to_flags(inp);
1416 return 0;
1417}
1418
1419static bool
1420nstat_tcpudp_reporting_allowed(nstat_provider_cookie_t cookie, uint64_t filter)
1421{
1422 bool retval = true;
1423
1424 /* Only apply interface filter if at least one is allowed. */
1425 if ((filter & NSTAT_FILTER_ACCEPT_ALL) != 0)
1426 {
1427 struct nstat_tucookie *tucookie = (struct nstat_tucookie *)cookie;
1428 struct inpcb *inp = tucookie->inp;
1429
1430 uint16_t interface_properties = nstat_inpcb_to_flags(inp);
1431
1432 /* For now, just check on interface type. */
1433 retval = ((filter & interface_properties) != 0);
1434 }
1435 return retval;
1436}
1437
1438static void
1439nstat_init_tcp_provider(void)
1440{
1441 bzero(&nstat_tcp_provider, sizeof(nstat_tcp_provider));
1442 nstat_tcp_provider.nstat_descriptor_length = sizeof(nstat_tcp_descriptor);
1443 nstat_tcp_provider.nstat_provider_id = NSTAT_PROVIDER_TCP;
1444 nstat_tcp_provider.nstat_lookup = nstat_tcp_lookup;
1445 nstat_tcp_provider.nstat_gone = nstat_tcp_gone;
1446 nstat_tcp_provider.nstat_counts = nstat_tcp_counts;
1447 nstat_tcp_provider.nstat_release = nstat_tcp_release;
1448 nstat_tcp_provider.nstat_watcher_add = nstat_tcp_add_watcher;
1449 nstat_tcp_provider.nstat_watcher_remove = nstat_tcp_remove_watcher;
1450 nstat_tcp_provider.nstat_copy_descriptor = nstat_tcp_copy_descriptor;
1451 nstat_tcp_provider.nstat_reporting_allowed = nstat_tcpudp_reporting_allowed;
1452 nstat_tcp_provider.next = nstat_providers;
1453 nstat_providers = &nstat_tcp_provider;
1454}
1455
1456#pragma mark -- UDP Provider --
1457
1458static nstat_provider nstat_udp_provider;
1459
1460static errno_t
1461nstat_udp_lookup(
1462 const void *data,
1463 u_int32_t length,
1464 nstat_provider_cookie_t *out_cookie)
1465{
1466 return nstat_tcpudp_lookup(&udbinfo, data, length, out_cookie);
1467}
1468
1469static int
1470nstat_udp_gone(
1471 nstat_provider_cookie_t cookie)
1472{
1473 struct nstat_tucookie *tucookie =
1474 (struct nstat_tucookie *)cookie;
1475 struct inpcb *inp;
1476
1477 return (!(inp = tucookie->inp) ||
1478 inp->inp_state == INPCB_STATE_DEAD) ? 1 : 0;
1479}
1480
1481static errno_t
1482nstat_udp_counts(
1483 nstat_provider_cookie_t cookie,
1484 struct nstat_counts *out_counts,
1485 int *out_gone)
1486{
1487 struct nstat_tucookie *tucookie =
1488 (struct nstat_tucookie *)cookie;
1489
1490 if (out_gone) *out_gone = 0;
1491
1492 // if the pcb is in the dead state, we should stop using it
1493 if (nstat_udp_gone(cookie))
1494 {
1495 if (out_gone) *out_gone = 1;
1496 if (!tucookie->inp)
1497 return EINVAL;
1498 }
1499 struct inpcb *inp = tucookie->inp;
1500
1501 atomic_get_64(out_counts->nstat_rxpackets, &inp->inp_stat->rxpackets);
1502 atomic_get_64(out_counts->nstat_rxbytes, &inp->inp_stat->rxbytes);
1503 atomic_get_64(out_counts->nstat_txpackets, &inp->inp_stat->txpackets);
1504 atomic_get_64(out_counts->nstat_txbytes, &inp->inp_stat->txbytes);
1505 atomic_get_64(out_counts->nstat_cell_rxbytes, &inp->inp_cstat->rxbytes);
1506 atomic_get_64(out_counts->nstat_cell_txbytes, &inp->inp_cstat->txbytes);
1507 atomic_get_64(out_counts->nstat_wifi_rxbytes, &inp->inp_wstat->rxbytes);
1508 atomic_get_64(out_counts->nstat_wifi_txbytes, &inp->inp_wstat->txbytes);
1509 atomic_get_64(out_counts->nstat_wired_rxbytes, &inp->inp_Wstat->rxbytes);
1510 atomic_get_64(out_counts->nstat_wired_txbytes, &inp->inp_Wstat->txbytes);
1511
1512 return 0;
1513}
1514
1515static void
1516nstat_udp_release(
1517 nstat_provider_cookie_t cookie,
1518 int locked)
1519{
1520 struct nstat_tucookie *tucookie =
1521 (struct nstat_tucookie *)cookie;
1522
1523 nstat_tucookie_release_internal(tucookie, locked);
1524}
1525
1526static errno_t
1527nstat_udp_add_watcher(
1528 nstat_control_state *state)
1529{
1530 struct inpcb *inp;
1531 struct nstat_tucookie *cookie;
1532
1533 OSIncrementAtomic(&nstat_udp_watchers);
1534
1535 lck_rw_lock_shared(udbinfo.ipi_lock);
1536 // Add all current UDP inpcbs.
1537 LIST_FOREACH(inp, udbinfo.ipi_listhead, inp_list)
1538 {
1539 cookie = nstat_tucookie_alloc_ref(inp);
1540 if (cookie == NULL)
1541 continue;
1542 if (nstat_control_source_add(0, state, &nstat_udp_provider,
1543 cookie) != 0)
1544 {
1545 nstat_tucookie_release(cookie);
1546 break;
1547 }
1548 }
1549
1550 lck_rw_done(udbinfo.ipi_lock);
1551
1552 return 0;
1553}
1554
1555static void
1556nstat_udp_remove_watcher(
1557 __unused nstat_control_state *state)
1558{
1559 OSDecrementAtomic(&nstat_udp_watchers);
1560}
1561
1562__private_extern__ void
1563nstat_udp_new_pcb(
1564 struct inpcb *inp)
1565{
1566 struct nstat_tucookie *cookie;
1567
1568 if (nstat_udp_watchers == 0)
1569 return;
1570
1571 socket_lock(inp->inp_socket, 0);
1572 lck_mtx_lock(&nstat_mtx);
1573 nstat_control_state *state;
1574 for (state = nstat_controls; state; state = state->ncs_next)
1575 {
1576 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_UDP)) != 0)
1577 {
1578 // this client is watching tcp
1579 // acquire a reference for it
1580 cookie = nstat_tucookie_alloc_ref_locked(inp);
1581 if (cookie == NULL)
1582 continue;
1583 // add the source, if that fails, release the reference
1584 if (nstat_control_source_add(0, state,
1585 &nstat_udp_provider, cookie) != 0)
1586 {
1587 nstat_tucookie_release_locked(cookie);
1588 break;
1589 }
1590 }
1591 }
1592 lck_mtx_unlock(&nstat_mtx);
1593 socket_unlock(inp->inp_socket, 0);
1594}
1595
1596static errno_t
1597nstat_udp_copy_descriptor(
1598 nstat_provider_cookie_t cookie,
1599 void *data,
1600 u_int32_t len)
1601{
1602 if (len < sizeof(nstat_udp_descriptor))
1603 {
1604 return EINVAL;
1605 }
1606
1607 if (nstat_udp_gone(cookie))
1608 return EINVAL;
1609
1610 struct nstat_tucookie *tucookie =
1611 (struct nstat_tucookie *)cookie;
1612 nstat_udp_descriptor *desc = (nstat_udp_descriptor*)data;
1613 struct inpcb *inp = tucookie->inp;
1614
1615 bzero(desc, sizeof(*desc));
1616
1617 if (tucookie->cached == false) {
1618 if (inp->inp_vflag & INP_IPV6)
1619 {
1620 nstat_ip6_to_sockaddr(&inp->in6p_laddr, inp->inp_lport,
1621 &desc->local.v6, sizeof(desc->local.v6));
1622 nstat_ip6_to_sockaddr(&inp->in6p_faddr, inp->inp_fport,
1623 &desc->remote.v6, sizeof(desc->remote.v6));
1624 }
1625 else if (inp->inp_vflag & INP_IPV4)
1626 {
1627 nstat_ip_to_sockaddr(&inp->inp_laddr, inp->inp_lport,
1628 &desc->local.v4, sizeof(desc->local.v4));
1629 nstat_ip_to_sockaddr(&inp->inp_faddr, inp->inp_fport,
1630 &desc->remote.v4, sizeof(desc->remote.v4));
1631 }
1632 desc->ifnet_properties = nstat_inpcb_to_flags(inp);
1633 }
1634 else
1635 {
1636 if (inp->inp_vflag & INP_IPV6)
1637 {
1638 memcpy(&desc->local.v6, &tucookie->local.v6,
1639 sizeof(desc->local.v6));
1640 memcpy(&desc->remote.v6, &tucookie->remote.v6,
1641 sizeof(desc->remote.v6));
1642 }
1643 else if (inp->inp_vflag & INP_IPV4)
1644 {
1645 memcpy(&desc->local.v4, &tucookie->local.v4,
1646 sizeof(desc->local.v4));
1647 memcpy(&desc->remote.v4, &tucookie->remote.v4,
1648 sizeof(desc->remote.v4));
1649 }
1650 desc->ifnet_properties = tucookie->ifnet_properties;
1651 }
1652
1653 if (inp->inp_last_outifp)
1654 desc->ifindex = inp->inp_last_outifp->if_index;
1655 else
1656 desc->ifindex = tucookie->if_index;
1657
1658 struct socket *so = inp->inp_socket;
1659 if (so)
1660 {
1661 // TBD - take the socket lock around these to make sure
1662 // they're in sync?
1663 desc->upid = so->last_upid;
1664 desc->pid = so->last_pid;
1665 proc_name(desc->pid, desc->pname, sizeof(desc->pname));
1666 if (desc->pname[0] == 0)
1667 {
1668 strlcpy(desc->pname, tucookie->pname,
1669 sizeof(desc->pname));
1670 }
1671 else
1672 {
1673 desc->pname[sizeof(desc->pname) - 1] = 0;
1674 strlcpy(tucookie->pname, desc->pname,
1675 sizeof(tucookie->pname));
1676 }
1677 memcpy(desc->uuid, so->last_uuid, sizeof(so->last_uuid));
1678 memcpy(desc->vuuid, so->so_vuuid, sizeof(so->so_vuuid));
1679 if (so->so_flags & SOF_DELEGATED) {
1680 desc->eupid = so->e_upid;
1681 desc->epid = so->e_pid;
1682 memcpy(desc->euuid, so->e_uuid, sizeof(so->e_uuid));
1683 } else {
1684 desc->eupid = desc->upid;
1685 desc->epid = desc->pid;
1686 memcpy(desc->euuid, desc->uuid, sizeof(desc->uuid));
1687 }
1688 desc->rcvbufsize = so->so_rcv.sb_hiwat;
1689 desc->rcvbufused = so->so_rcv.sb_cc;
1690 desc->traffic_class = so->so_traffic_class;
1691 }
1692
1693 return 0;
1694}
1695
1696static void
1697nstat_init_udp_provider(void)
1698{
1699 bzero(&nstat_udp_provider, sizeof(nstat_udp_provider));
1700 nstat_udp_provider.nstat_provider_id = NSTAT_PROVIDER_UDP;
1701 nstat_udp_provider.nstat_descriptor_length = sizeof(nstat_udp_descriptor);
1702 nstat_udp_provider.nstat_lookup = nstat_udp_lookup;
1703 nstat_udp_provider.nstat_gone = nstat_udp_gone;
1704 nstat_udp_provider.nstat_counts = nstat_udp_counts;
1705 nstat_udp_provider.nstat_watcher_add = nstat_udp_add_watcher;
1706 nstat_udp_provider.nstat_watcher_remove = nstat_udp_remove_watcher;
1707 nstat_udp_provider.nstat_copy_descriptor = nstat_udp_copy_descriptor;
1708 nstat_udp_provider.nstat_release = nstat_udp_release;
1709 nstat_udp_provider.nstat_reporting_allowed = nstat_tcpudp_reporting_allowed;
1710 nstat_udp_provider.next = nstat_providers;
1711 nstat_providers = &nstat_udp_provider;
1712}
1713
1714#pragma mark -- ifnet Provider --
1715
1716static nstat_provider nstat_ifnet_provider;
1717
1718/*
1719 * We store a pointer to the ifnet and the original threshold
1720 * requested by the client.
1721 */
1722struct nstat_ifnet_cookie
1723{
1724 struct ifnet *ifp;
1725 uint64_t threshold;
1726};
1727
1728static errno_t
1729nstat_ifnet_lookup(
1730 const void *data,
1731 u_int32_t length,
1732 nstat_provider_cookie_t *out_cookie)
1733{
1734 const nstat_ifnet_add_param *param = (const nstat_ifnet_add_param *)data;
1735 struct ifnet *ifp;
1736 boolean_t changed = FALSE;
1737 nstat_control_state *state;
1738 nstat_src *src;
1739 struct nstat_ifnet_cookie *cookie;
1740
1741 if (length < sizeof(*param) || param->threshold < 1024*1024)
1742 return EINVAL;
1743 if (nstat_privcheck != 0) {
1744 errno_t result = priv_check_cred(kauth_cred_get(),
1745 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
1746 if (result != 0)
1747 return result;
1748 }
1749 cookie = OSMalloc(sizeof(*cookie), nstat_malloc_tag);
1750 if (cookie == NULL)
1751 return ENOMEM;
1752 bzero(cookie, sizeof(*cookie));
1753
1754 ifnet_head_lock_shared();
1755 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
1756 {
1757 ifnet_lock_exclusive(ifp);
1758 if (ifp->if_index == param->ifindex)
1759 {
1760 cookie->ifp = ifp;
1761 cookie->threshold = param->threshold;
1762 *out_cookie = cookie;
1763 if (!ifp->if_data_threshold ||
1764 ifp->if_data_threshold > param->threshold)
1765 {
1766 changed = TRUE;
1767 ifp->if_data_threshold = param->threshold;
1768 }
1769 ifnet_lock_done(ifp);
1770 ifnet_reference(ifp);
1771 break;
1772 }
1773 ifnet_lock_done(ifp);
1774 }
1775 ifnet_head_done();
1776
1777 /*
1778 * When we change the threshold to something smaller, we notify
1779 * all of our clients with a description message.
1780 * We won't send a message to the client we are currently serving
1781 * because it has no `ifnet source' yet.
1782 */
1783 if (changed)
1784 {
1785 lck_mtx_lock(&nstat_mtx);
1786 for (state = nstat_controls; state; state = state->ncs_next)
1787 {
1788 lck_mtx_lock(&state->mtx);
1789 for (src = state->ncs_srcs; src; src = src->next)
1790 {
1791 if (src->provider != &nstat_ifnet_provider)
1792 continue;
1793 nstat_control_send_description(state, src, 0, 0);
1794 }
1795 lck_mtx_unlock(&state->mtx);
1796 }
1797 lck_mtx_unlock(&nstat_mtx);
1798 }
1799 if (cookie->ifp == NULL)
1800 OSFree(cookie, sizeof(*cookie), nstat_malloc_tag);
1801
1802 return ifp ? 0 : EINVAL;
1803}
1804
1805static int
1806nstat_ifnet_gone(
1807 nstat_provider_cookie_t cookie)
1808{
1809 struct ifnet *ifp;
1810 struct nstat_ifnet_cookie *ifcookie =
1811 (struct nstat_ifnet_cookie *)cookie;
1812
1813 ifnet_head_lock_shared();
1814 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
1815 {
1816 if (ifp == ifcookie->ifp)
1817 break;
1818 }
1819 ifnet_head_done();
1820
1821 return ifp ? 0 : 1;
1822}
1823
1824static errno_t
1825nstat_ifnet_counts(
1826 nstat_provider_cookie_t cookie,
1827 struct nstat_counts *out_counts,
1828 int *out_gone)
1829{
1830 struct nstat_ifnet_cookie *ifcookie =
1831 (struct nstat_ifnet_cookie *)cookie;
1832 struct ifnet *ifp = ifcookie->ifp;
1833
1834 if (out_gone) *out_gone = 0;
1835
1836 // if the ifnet is gone, we should stop using it
1837 if (nstat_ifnet_gone(cookie))
1838 {
1839 if (out_gone) *out_gone = 1;
1840 return EINVAL;
1841 }
1842
1843 bzero(out_counts, sizeof(*out_counts));
1844 out_counts->nstat_rxpackets = ifp->if_ipackets;
1845 out_counts->nstat_rxbytes = ifp->if_ibytes;
1846 out_counts->nstat_txpackets = ifp->if_opackets;
1847 out_counts->nstat_txbytes = ifp->if_obytes;
1848 out_counts->nstat_cell_rxbytes = out_counts->nstat_cell_txbytes = 0;
1849 return 0;
1850}
1851
1852static void
1853nstat_ifnet_release(
1854 nstat_provider_cookie_t cookie,
1855 __unused int locked)
1856{
1857 struct nstat_ifnet_cookie *ifcookie;
1858 struct ifnet *ifp;
1859 nstat_control_state *state;
1860 nstat_src *src;
1861 uint64_t minthreshold = UINT64_MAX;
1862
1863 /*
1864 * Find all the clients that requested a threshold
1865 * for this ifnet and re-calculate if_data_threshold.
1866 */
1867 lck_mtx_lock(&nstat_mtx);
1868 for (state = nstat_controls; state; state = state->ncs_next)
1869 {
1870 lck_mtx_lock(&state->mtx);
1871 for (src = state->ncs_srcs; src; src = src->next)
1872 {
1873 /* Skip the provider we are about to detach. */
1874 if (src->provider != &nstat_ifnet_provider ||
1875 src->cookie == cookie)
1876 continue;
1877 ifcookie = (struct nstat_ifnet_cookie *)src->cookie;
1878 if (ifcookie->threshold < minthreshold)
1879 minthreshold = ifcookie->threshold;
1880 }
1881 lck_mtx_unlock(&state->mtx);
1882 }
1883 lck_mtx_unlock(&nstat_mtx);
1884 /*
1885 * Reset if_data_threshold or disable it.
1886 */
1887 ifcookie = (struct nstat_ifnet_cookie *)cookie;
1888 ifp = ifcookie->ifp;
1889 if (ifnet_is_attached(ifp, 1)) {
1890 ifnet_lock_exclusive(ifp);
1891 if (minthreshold == UINT64_MAX)
1892 ifp->if_data_threshold = 0;
1893 else
1894 ifp->if_data_threshold = minthreshold;
1895 ifnet_lock_done(ifp);
1896 ifnet_decr_iorefcnt(ifp);
1897 }
1898 ifnet_release(ifp);
1899 OSFree(ifcookie, sizeof(*ifcookie), nstat_malloc_tag);
1900}
1901
1902static void
1903nstat_ifnet_copy_link_status(
1904 struct ifnet *ifp,
1905 struct nstat_ifnet_descriptor *desc)
1906{
1907 struct if_link_status *ifsr = ifp->if_link_status;
1908 nstat_ifnet_desc_link_status *link_status = &desc->link_status;
1909
1910 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_NONE;
1911 if (ifsr == NULL)
1912 return;
1913
1914 lck_rw_lock_shared(&ifp->if_link_status_lock);
1915
1916 if (ifp->if_type == IFT_CELLULAR) {
1917
1918 nstat_ifnet_desc_cellular_status *cell_status = &link_status->u.cellular;
1919 struct if_cellular_status_v1 *if_cell_sr =
1920 &ifsr->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
1921
1922 if (ifsr->ifsr_version != IF_CELLULAR_STATUS_REPORT_VERSION_1)
1923 goto done;
1924
1925 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_CELLULAR;
1926
1927 if (if_cell_sr->valid_bitmask & IF_CELL_LINK_QUALITY_METRIC_VALID) {
1928 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_LINK_QUALITY_METRIC_VALID;
1929 cell_status->link_quality_metric = if_cell_sr->link_quality_metric;
1930 }
1931 if (if_cell_sr->valid_bitmask & IF_CELL_UL_EFFECTIVE_BANDWIDTH_VALID) {
1932 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_BANDWIDTH_VALID;
1933 cell_status->ul_effective_bandwidth = if_cell_sr->ul_effective_bandwidth;
1934 }
1935 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_BANDWIDTH_VALID) {
1936 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_BANDWIDTH_VALID;
1937 cell_status->ul_max_bandwidth = if_cell_sr->ul_max_bandwidth;
1938 }
1939 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MIN_LATENCY_VALID) {
1940 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MIN_LATENCY_VALID;
1941 cell_status->ul_min_latency = if_cell_sr->ul_min_latency;
1942 }
1943 if (if_cell_sr->valid_bitmask & IF_CELL_UL_EFFECTIVE_LATENCY_VALID) {
1944 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_LATENCY_VALID;
1945 cell_status->ul_effective_latency = if_cell_sr->ul_effective_latency;
1946 }
1947 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_LATENCY_VALID) {
1948 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_LATENCY_VALID;
1949 cell_status->ul_max_latency = if_cell_sr->ul_max_latency;
1950 }
1951 if (if_cell_sr->valid_bitmask & IF_CELL_UL_RETXT_LEVEL_VALID) {
1952 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID;
1953 if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_NONE)
1954 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_NONE;
1955 else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_LOW)
1956 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_LOW;
1957 else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_MEDIUM)
1958 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_MEDIUM;
1959 else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_HIGH)
1960 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_HIGH;
1961 else
1962 cell_status->valid_bitmask &= ~NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID;
1963 }
1964 if (if_cell_sr->valid_bitmask & IF_CELL_UL_BYTES_LOST_VALID) {
1965 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_BYTES_LOST_VALID;
1966 cell_status->ul_bytes_lost = if_cell_sr->ul_bytes_lost;
1967 }
1968 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MIN_QUEUE_SIZE_VALID) {
1969 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MIN_QUEUE_SIZE_VALID;
1970 cell_status->ul_min_queue_size = if_cell_sr->ul_min_queue_size;
1971 }
1972 if (if_cell_sr->valid_bitmask & IF_CELL_UL_AVG_QUEUE_SIZE_VALID) {
1973 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_AVG_QUEUE_SIZE_VALID;
1974 cell_status->ul_avg_queue_size = if_cell_sr->ul_avg_queue_size;
1975 }
1976 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_QUEUE_SIZE_VALID) {
1977 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_QUEUE_SIZE_VALID;
1978 cell_status->ul_max_queue_size = if_cell_sr->ul_max_queue_size;
1979 }
1980 if (if_cell_sr->valid_bitmask & IF_CELL_DL_EFFECTIVE_BANDWIDTH_VALID) {
1981 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_DL_EFFECTIVE_BANDWIDTH_VALID;
1982 cell_status->dl_effective_bandwidth = if_cell_sr->dl_effective_bandwidth;
1983 }
1984 if (if_cell_sr->valid_bitmask & IF_CELL_DL_MAX_BANDWIDTH_VALID) {
1985 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_DL_MAX_BANDWIDTH_VALID;
1986 cell_status->dl_max_bandwidth = if_cell_sr->dl_max_bandwidth;
1987 }
1988 if (if_cell_sr->valid_bitmask & IF_CELL_CONFIG_INACTIVITY_TIME_VALID) {
1989 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_CONFIG_INACTIVITY_TIME_VALID;
1990 cell_status->config_inactivity_time = if_cell_sr->config_inactivity_time;
1991 }
1992 if (if_cell_sr->valid_bitmask & IF_CELL_CONFIG_BACKOFF_TIME_VALID) {
1993 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_CONFIG_BACKOFF_TIME_VALID;
1994 cell_status->config_backoff_time = if_cell_sr->config_backoff_time;
1995 }
1996
1997 } else if (ifp->if_subfamily == IFNET_SUBFAMILY_WIFI) {
1998
1999 nstat_ifnet_desc_wifi_status *wifi_status = &link_status->u.wifi;
2000 struct if_wifi_status_v1 *if_wifi_sr =
2001 &ifsr->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
2002
2003 if (ifsr->ifsr_version != IF_WIFI_STATUS_REPORT_VERSION_1)
2004 goto done;
2005
2006 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_WIFI;
2007
2008 if (if_wifi_sr->valid_bitmask & IF_WIFI_LINK_QUALITY_METRIC_VALID) {
2009 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_LINK_QUALITY_METRIC_VALID;
2010 wifi_status->link_quality_metric = if_wifi_sr->link_quality_metric;
2011 }
2012 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID) {
2013 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID;
2014 wifi_status->ul_effective_bandwidth = if_wifi_sr->ul_effective_bandwidth;
2015 }
2016 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MAX_BANDWIDTH_VALID) {
2017 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MAX_BANDWIDTH_VALID;
2018 wifi_status->ul_max_bandwidth = if_wifi_sr->ul_max_bandwidth;
2019 }
2020 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MIN_LATENCY_VALID) {
2021 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MIN_LATENCY_VALID;
2022 wifi_status->ul_min_latency = if_wifi_sr->ul_min_latency;
2023 }
2024 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_EFFECTIVE_LATENCY_VALID) {
2025 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_LATENCY_VALID;
2026 wifi_status->ul_effective_latency = if_wifi_sr->ul_effective_latency;
2027 }
2028 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MAX_LATENCY_VALID) {
2029 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MAX_LATENCY_VALID;
2030 wifi_status->ul_max_latency = if_wifi_sr->ul_max_latency;
2031 }
2032 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_RETXT_LEVEL_VALID) {
2033 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID;
2034 if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_NONE)
2035 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_NONE;
2036 else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_LOW)
2037 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_LOW;
2038 else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_MEDIUM)
2039 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_MEDIUM;
2040 else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_HIGH)
2041 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_HIGH;
2042 else
2043 wifi_status->valid_bitmask &= ~NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID;
2044 }
2045 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_BYTES_LOST_VALID) {
2046 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_BYTES_LOST_VALID;
2047 wifi_status->ul_bytes_lost = if_wifi_sr->ul_bytes_lost;
2048 }
2049 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_ERROR_RATE_VALID) {
2050 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_ERROR_RATE_VALID;
2051 wifi_status->ul_error_rate = if_wifi_sr->ul_error_rate;
2052 }
2053 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID) {
2054 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID;
2055 wifi_status->dl_effective_bandwidth = if_wifi_sr->dl_effective_bandwidth;
2056 }
2057 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MAX_BANDWIDTH_VALID) {
2058 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MAX_BANDWIDTH_VALID;
2059 wifi_status->dl_max_bandwidth = if_wifi_sr->dl_max_bandwidth;
2060 }
2061 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MIN_LATENCY_VALID) {
2062 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MIN_LATENCY_VALID;
2063 wifi_status->dl_min_latency = if_wifi_sr->dl_min_latency;
2064 }
2065 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_EFFECTIVE_LATENCY_VALID) {
2066 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_LATENCY_VALID;
2067 wifi_status->dl_effective_latency = if_wifi_sr->dl_effective_latency;
2068 }
2069 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MAX_LATENCY_VALID) {
2070 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MAX_LATENCY_VALID;
2071 wifi_status->dl_max_latency = if_wifi_sr->dl_max_latency;
2072 }
2073 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_ERROR_RATE_VALID) {
2074 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_ERROR_RATE_VALID;
2075 wifi_status->dl_error_rate = if_wifi_sr->dl_error_rate;
2076 }
2077 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_FREQUENCY_VALID) {
2078 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID;
2079 if (if_wifi_sr->config_frequency == IF_WIFI_CONFIG_FREQUENCY_2_4_GHZ)
2080 wifi_status->config_frequency = NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_2_4_GHZ;
2081 else if (if_wifi_sr->config_frequency == IF_WIFI_CONFIG_FREQUENCY_5_0_GHZ)
2082 wifi_status->config_frequency = NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_5_0_GHZ;
2083 else
2084 wifi_status->valid_bitmask &= ~NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID;
2085 }
2086 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_MULTICAST_RATE_VALID) {
2087 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_MULTICAST_RATE_VALID;
2088 wifi_status->config_multicast_rate = if_wifi_sr->config_multicast_rate;
2089 }
2090 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_SCAN_COUNT_VALID) {
2091 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_COUNT_VALID;
2092 wifi_status->scan_count = if_wifi_sr->scan_count;
2093 }
2094 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_SCAN_DURATION_VALID) {
2095 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_DURATION_VALID;
2096 wifi_status->scan_duration = if_wifi_sr->scan_duration;
2097 }
2098 }
2099
2100done:
2101 lck_rw_done(&ifp->if_link_status_lock);
2102}
2103
2104static errno_t
2105nstat_ifnet_copy_descriptor(
2106 nstat_provider_cookie_t cookie,
2107 void *data,
2108 u_int32_t len)
2109{
2110 nstat_ifnet_descriptor *desc = (nstat_ifnet_descriptor *)data;
2111 struct nstat_ifnet_cookie *ifcookie =
2112 (struct nstat_ifnet_cookie *)cookie;
2113 struct ifnet *ifp = ifcookie->ifp;
2114
2115 if (len < sizeof(nstat_ifnet_descriptor))
2116 return EINVAL;
2117
2118 if (nstat_ifnet_gone(cookie))
2119 return EINVAL;
2120
2121 bzero(desc, sizeof(*desc));
2122 ifnet_lock_shared(ifp);
2123 strlcpy(desc->name, ifp->if_xname, sizeof(desc->name));
2124 desc->ifindex = ifp->if_index;
2125 desc->threshold = ifp->if_data_threshold;
2126 desc->type = ifp->if_type;
2127 if (ifp->if_desc.ifd_len < sizeof(desc->description))
2128 memcpy(desc->description, ifp->if_desc.ifd_desc,
2129 sizeof(desc->description));
2130 nstat_ifnet_copy_link_status(ifp, desc);
2131 ifnet_lock_done(ifp);
2132 return 0;
2133}
2134
2135static void
2136nstat_init_ifnet_provider(void)
2137{
2138 bzero(&nstat_ifnet_provider, sizeof(nstat_ifnet_provider));
2139 nstat_ifnet_provider.nstat_provider_id = NSTAT_PROVIDER_IFNET;
2140 nstat_ifnet_provider.nstat_descriptor_length = sizeof(nstat_ifnet_descriptor);
2141 nstat_ifnet_provider.nstat_lookup = nstat_ifnet_lookup;
2142 nstat_ifnet_provider.nstat_gone = nstat_ifnet_gone;
2143 nstat_ifnet_provider.nstat_counts = nstat_ifnet_counts;
2144 nstat_ifnet_provider.nstat_watcher_add = NULL;
2145 nstat_ifnet_provider.nstat_watcher_remove = NULL;
2146 nstat_ifnet_provider.nstat_copy_descriptor = nstat_ifnet_copy_descriptor;
2147 nstat_ifnet_provider.nstat_release = nstat_ifnet_release;
2148 nstat_ifnet_provider.next = nstat_providers;
2149 nstat_providers = &nstat_ifnet_provider;
2150}
2151
2152__private_extern__ void
2153nstat_ifnet_threshold_reached(unsigned int ifindex)
2154{
2155 nstat_control_state *state;
2156 nstat_src *src;
2157 struct ifnet *ifp;
2158 struct nstat_ifnet_cookie *ifcookie;
2159
2160 lck_mtx_lock(&nstat_mtx);
2161 for (state = nstat_controls; state; state = state->ncs_next)
2162 {
2163 lck_mtx_lock(&state->mtx);
2164 for (src = state->ncs_srcs; src; src = src->next)
2165 {
2166 if (src->provider != &nstat_ifnet_provider)
2167 continue;
2168 ifcookie = (struct nstat_ifnet_cookie *)src->cookie;
2169 ifp = ifcookie->ifp;
2170 if (ifp->if_index != ifindex)
2171 continue;
2172 nstat_control_send_counts(state, src, 0, 0, NULL);
2173 }
2174 lck_mtx_unlock(&state->mtx);
2175 }
2176 lck_mtx_unlock(&nstat_mtx);
2177}
2178
2179#pragma mark -- Sysinfo --
2180static void
2181nstat_set_keyval_scalar(nstat_sysinfo_keyval *kv, int key, u_int32_t val)
2182{
2183 kv->nstat_sysinfo_key = key;
2184 kv->nstat_sysinfo_flags = NSTAT_SYSINFO_FLAG_SCALAR;
2185 kv->u.nstat_sysinfo_scalar = val;
2186}
2187
2188static void
2189nstat_sysinfo_send_data_internal(
2190 nstat_control_state *control,
2191 nstat_sysinfo_data *data)
2192{
2193 nstat_msg_sysinfo_counts *syscnt = NULL;
2194 size_t allocsize = 0, countsize = 0, nkeyvals = 0;
2195 nstat_sysinfo_keyval *kv;
2196 errno_t result = 0;
2197 size_t i = 0;
2198
2199 allocsize = offsetof(nstat_msg_sysinfo_counts, counts);
2200 countsize = offsetof(nstat_sysinfo_counts, nstat_sysinfo_keyvals);
2201
2202 /* get number of key-vals for each kind of stat */
2203 switch (data->flags)
2204 {
2205 case NSTAT_SYSINFO_MBUF_STATS:
2206 nkeyvals = sizeof(struct nstat_sysinfo_mbuf_stats) /
2207 sizeof(u_int32_t);
2208 break;
2209 case NSTAT_SYSINFO_TCP_STATS:
2210 nkeyvals = sizeof(struct nstat_sysinfo_tcp_stats) /
2211 sizeof(u_int32_t);
2212 break;
2213 default:
2214 return;
2215 }
2216 countsize += sizeof(nstat_sysinfo_keyval) * nkeyvals;
2217 allocsize += countsize;
2218
2219 syscnt = OSMalloc(allocsize, nstat_malloc_tag);
2220 if (syscnt == NULL)
2221 return;
2222 bzero(syscnt, allocsize);
2223
2224 syscnt->hdr.type = NSTAT_MSG_TYPE_SYSINFO_COUNTS;
2225 syscnt->hdr.length = allocsize;
2226 syscnt->counts.nstat_sysinfo_len = countsize;
2227
2228 kv = (nstat_sysinfo_keyval *) &syscnt->counts.nstat_sysinfo_keyvals;
2229 switch (data->flags)
2230 {
2231 case NSTAT_SYSINFO_MBUF_STATS:
2232 {
2233 nstat_set_keyval_scalar(&kv[i++],
2234 NSTAT_SYSINFO_KEY_MBUF_256B_TOTAL,
2235 data->u.mb_stats.total_256b);
2236 nstat_set_keyval_scalar(&kv[i++],
2237 NSTAT_SYSINFO_KEY_MBUF_2KB_TOTAL,
2238 data->u.mb_stats.total_2kb);
2239 nstat_set_keyval_scalar(&kv[i++],
2240 NSTAT_SYSINFO_KEY_MBUF_4KB_TOTAL,
2241 data->u.mb_stats.total_4kb);
2242 nstat_set_keyval_scalar(&kv[i++],
2243 NSTAT_SYSINFO_MBUF_16KB_TOTAL,
2244 data->u.mb_stats.total_16kb);
2245 nstat_set_keyval_scalar(&kv[i++],
2246 NSTAT_SYSINFO_KEY_SOCK_MBCNT,
2247 data->u.mb_stats.sbmb_total);
2248 nstat_set_keyval_scalar(&kv[i++],
2249 NSTAT_SYSINFO_KEY_SOCK_ATMBLIMIT,
2250 data->u.mb_stats.sb_atmbuflimit);
2251 nstat_set_keyval_scalar(&kv[i++],
2252 NSTAT_SYSINFO_MBUF_DRAIN_CNT,
2253 data->u.mb_stats.draincnt);
2254 nstat_set_keyval_scalar(&kv[i++],
2255 NSTAT_SYSINFO_MBUF_MEM_RELEASED,
2256 data->u.mb_stats.memreleased);
2257 VERIFY(i == nkeyvals);
2258 break;
2259 }
2260 case NSTAT_SYSINFO_TCP_STATS:
2261 {
2262 nstat_set_keyval_scalar(&kv[i++],
2263 NSTAT_SYSINFO_KEY_IPV4_AVGRTT,
2264 data->u.tcp_stats.ipv4_avgrtt);
2265 nstat_set_keyval_scalar(&kv[i++],
2266 NSTAT_SYSINFO_KEY_IPV6_AVGRTT,
2267 data->u.tcp_stats.ipv6_avgrtt);
2268 nstat_set_keyval_scalar(&kv[i++],
2269 NSTAT_SYSINFO_KEY_SEND_PLR,
2270 data->u.tcp_stats.send_plr);
2271 nstat_set_keyval_scalar(&kv[i++],
2272 NSTAT_SYSINFO_KEY_RECV_PLR,
2273 data->u.tcp_stats.recv_plr);
2274 nstat_set_keyval_scalar(&kv[i++],
2275 NSTAT_SYSINFO_KEY_SEND_TLRTO,
2276 data->u.tcp_stats.send_tlrto_rate);
2277 nstat_set_keyval_scalar(&kv[i++],
2278 NSTAT_SYSINFO_KEY_SEND_REORDERRATE,
2279 data->u.tcp_stats.send_reorder_rate);
2280 nstat_set_keyval_scalar(&kv[i++],
2281 NSTAT_SYSINFO_CONNECTION_ATTEMPTS,
2282 data->u.tcp_stats.connection_attempts);
2283 nstat_set_keyval_scalar(&kv[i++],
2284 NSTAT_SYSINFO_CONNECTION_ACCEPTS,
2285 data->u.tcp_stats.connection_accepts);
2286 nstat_set_keyval_scalar(&kv[i++],
2287 NSTAT_SYSINFO_ECN_CLIENT_ENABLED,
2288 data->u.tcp_stats.ecn_client_enabled);
2289 nstat_set_keyval_scalar(&kv[i++],
2290 NSTAT_SYSINFO_ECN_SERVER_ENABLED,
2291 data->u.tcp_stats.ecn_server_enabled);
2292 nstat_set_keyval_scalar(&kv[i++],
2293 NSTAT_SYSINFO_ECN_CLIENT_SETUP,
2294 data->u.tcp_stats.ecn_client_setup);
2295 nstat_set_keyval_scalar(&kv[i++],
2296 NSTAT_SYSINFO_ECN_SERVER_SETUP,
2297 data->u.tcp_stats.ecn_server_setup);
2298 nstat_set_keyval_scalar(&kv[i++],
2299 NSTAT_SYSINFO_ECN_CLIENT_SUCCESS,
2300 data->u.tcp_stats.ecn_client_success);
2301 nstat_set_keyval_scalar(&kv[i++],
2302 NSTAT_SYSINFO_ECN_SERVER_SUCCESS,
2303 data->u.tcp_stats.ecn_server_success);
2304 nstat_set_keyval_scalar(&kv[i++],
2305 NSTAT_SYSINFO_ECN_NOT_SUPPORTED,
2306 data->u.tcp_stats.ecn_not_supported);
2307 nstat_set_keyval_scalar(&kv[i++],
2308 NSTAT_SYSINFO_ECN_LOST_SYN,
2309 data->u.tcp_stats.ecn_lost_syn);
2310 nstat_set_keyval_scalar(&kv[i++],
2311 NSTAT_SYSINFO_ECN_LOST_SYNACK,
2312 data->u.tcp_stats.ecn_lost_synack);
2313 nstat_set_keyval_scalar(&kv[i++],
2314 NSTAT_SYSINFO_ECN_RECV_CE,
2315 data->u.tcp_stats.ecn_recv_ce);
2316 nstat_set_keyval_scalar(&kv[i++],
2317 NSTAT_SYSINFO_ECN_RECV_ECE,
2318 data->u.tcp_stats.ecn_recv_ece);
2319 nstat_set_keyval_scalar(&kv[i++],
2320 NSTAT_SYSINFO_ECN_SENT_ECE,
2321 data->u.tcp_stats.ecn_sent_ece);
2322 nstat_set_keyval_scalar(&kv[i++],
2323 NSTAT_SYSINFO_ECN_CONN_RECV_CE,
2324 data->u.tcp_stats.ecn_conn_recv_ce);
2325 nstat_set_keyval_scalar(&kv[i++],
2326 NSTAT_SYSINFO_ECN_CONN_RECV_ECE,
2327 data->u.tcp_stats.ecn_conn_recv_ece);
2328 nstat_set_keyval_scalar(&kv[i++],
2329 NSTAT_SYSINFO_ECN_CONN_PLNOCE,
2330 data->u.tcp_stats.ecn_conn_plnoce);
2331 nstat_set_keyval_scalar(&kv[i++],
2332 NSTAT_SYSINFO_ECN_CONN_PL_CE,
2333 data->u.tcp_stats.ecn_conn_pl_ce);
2334 nstat_set_keyval_scalar(&kv[i++],
2335 NSTAT_SYSINFO_ECN_CONN_NOPL_CE,
2336 data->u.tcp_stats.ecn_conn_nopl_ce);
2337 nstat_set_keyval_scalar(&kv[i++],
2338 NSTAT_SYSINFO_TFO_SYN_DATA_RCV,
2339 data->u.tcp_stats.tfo_syn_data_rcv);
2340 nstat_set_keyval_scalar(&kv[i++],
2341 NSTAT_SYSINFO_TFO_COOKIE_REQ_RCV,
2342 data->u.tcp_stats.tfo_cookie_req_rcv);
2343 nstat_set_keyval_scalar(&kv[i++],
2344 NSTAT_SYSINFO_TFO_COOKIE_SENT,
2345 data->u.tcp_stats.tfo_cookie_sent);
2346 nstat_set_keyval_scalar(&kv[i++],
2347 NSTAT_SYSINFO_TFO_COOKIE_INVALID,
2348 data->u.tcp_stats.tfo_cookie_invalid);
2349 nstat_set_keyval_scalar(&kv[i++],
2350 NSTAT_SYSINFO_TFO_COOKIE_REQ,
2351 data->u.tcp_stats.tfo_cookie_req);
2352 nstat_set_keyval_scalar(&kv[i++],
2353 NSTAT_SYSINFO_TFO_COOKIE_RCV,
2354 data->u.tcp_stats.tfo_cookie_rcv);
2355 nstat_set_keyval_scalar(&kv[i++],
2356 NSTAT_SYSINFO_TFO_SYN_DATA_SENT,
2357 data->u.tcp_stats.tfo_syn_data_sent);
2358 nstat_set_keyval_scalar(&kv[i++],
2359 NSTAT_SYSINFO_TFO_SYN_DATA_ACKED,
2360 data->u.tcp_stats.tfo_syn_data_acked);
2361 nstat_set_keyval_scalar(&kv[i++],
2362 NSTAT_SYSINFO_TFO_SYN_LOSS,
2363 data->u.tcp_stats.tfo_syn_loss);
2364 nstat_set_keyval_scalar(&kv[i++],
2365 NSTAT_SYSINFO_TFO_BLACKHOLE,
2366 data->u.tcp_stats.tfo_blackhole);
2367
2368 VERIFY(i == nkeyvals);
2369 break;
2370 }
2371 }
2372
2373 if (syscnt != NULL)
2374 {
2375 result = ctl_enqueuedata(control->ncs_kctl,
2376 control->ncs_unit, syscnt, allocsize, CTL_DATA_EOR);
2377 if (result != 0)
2378 {
2379 nstat_stats.nstat_sysinfofailures += 1;
2380 }
2381 OSFree(syscnt, allocsize, nstat_malloc_tag);
2382 }
2383 return;
2384}
2385
2386__private_extern__ void
2387nstat_sysinfo_send_data(
2388 nstat_sysinfo_data *data)
2389{
2390 nstat_control_state *control;
2391
2392 lck_mtx_lock(&nstat_mtx);
2393 for (control = nstat_controls; control; control = control->ncs_next)
2394 {
2395 lck_mtx_lock(&control->mtx);
2396 if ((control->ncs_flags & NSTAT_FLAG_SYSINFO_SUBSCRIBED) != 0)
2397 {
2398 nstat_sysinfo_send_data_internal(control, data);
2399 }
2400 lck_mtx_unlock(&control->mtx);
2401 }
2402 lck_mtx_unlock(&nstat_mtx);
2403}
2404
2405static void
2406nstat_sysinfo_generate_report(void)
2407{
2408 mbuf_report_peak_usage();
2409 tcp_report_stats();
2410}
2411
2412#pragma mark -- Kernel Control Socket --
2413
2414static kern_ctl_ref nstat_ctlref = NULL;
2415static lck_grp_t *nstat_lck_grp = NULL;
2416
2417static errno_t nstat_control_connect(kern_ctl_ref kctl, struct sockaddr_ctl *sac, void **uinfo);
2418static errno_t nstat_control_disconnect(kern_ctl_ref kctl, u_int32_t unit, void *uinfo);
2419static errno_t nstat_control_send(kern_ctl_ref kctl, u_int32_t unit, void *uinfo, mbuf_t m, int flags);
2420
2421static errno_t
2422nstat_enqueue_success(
2423 uint64_t context,
2424 nstat_control_state *state,
2425 u_int16_t flags)
2426{
2427 nstat_msg_hdr success;
2428 errno_t result;
2429
2430 bzero(&success, sizeof(success));
2431 success.context = context;
2432 success.type = NSTAT_MSG_TYPE_SUCCESS;
2433 success.length = sizeof(success);
2434 success.flags = flags;
2435 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &success,
2436 sizeof(success), CTL_DATA_EOR | CTL_DATA_CRIT);
2437 if (result != 0) {
2438 if (nstat_debug != 0)
2439 printf("%s: could not enqueue success message %d\n",
2440 __func__, result);
2441 nstat_stats.nstat_successmsgfailures += 1;
2442 }
2443 return result;
2444}
2445
2446static errno_t
2447nstat_control_send_goodbye(
2448 nstat_control_state *state,
2449 nstat_src *src)
2450{
2451 errno_t result = 0;
2452 int failed = 0;
2453
2454 if (nstat_control_reporting_allowed(state, src))
2455 {
2456 if ((state->ncs_flags & NSTAT_FLAG_SUPPORTS_UPDATES) != 0)
2457 {
2458 result = nstat_control_send_update(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING, NULL);
2459 if (result != 0)
2460 {
2461 failed = 1;
2462 if (nstat_debug != 0)
2463 printf("%s - nstat_control_send_update() %d\n", __func__, result);
2464 }
2465 }
2466 else
2467 {
2468 // send one last counts notification
2469 result = nstat_control_send_counts(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING, NULL);
2470 if (result != 0)
2471 {
2472 failed = 1;
2473 if (nstat_debug != 0)
2474 printf("%s - nstat_control_send_counts() %d\n", __func__, result);
2475 }
2476
2477 // send a last description
2478 result = nstat_control_send_description(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING);
2479 if (result != 0)
2480 {
2481 failed = 1;
2482 if (nstat_debug != 0)
2483 printf("%s - nstat_control_send_description() %d\n", __func__, result);
2484 }
2485 }
2486 }
2487
2488 // send the source removed notification
2489 result = nstat_control_send_removed(state, src);
2490 if (result != 0 && nstat_debug)
2491 {
2492 failed = 1;
2493 if (nstat_debug != 0)
2494 printf("%s - nstat_control_send_removed() %d\n", __func__, result);
2495 }
2496
2497 if (failed != 0)
2498 nstat_stats.nstat_control_send_goodbye_failures++;
2499
2500
2501 return result;
2502}
2503
2504static errno_t
2505nstat_flush_accumulated_msgs(
2506 nstat_control_state *state)
2507{
2508 errno_t result = 0;
2509 if (state->ncs_accumulated && mbuf_len(state->ncs_accumulated))
2510 {
2511 mbuf_pkthdr_setlen(state->ncs_accumulated, mbuf_len(state->ncs_accumulated));
2512 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, state->ncs_accumulated, CTL_DATA_EOR);
2513 if (result != 0 && nstat_debug)
2514 {
2515 nstat_stats.nstat_flush_accumulated_msgs_failures++;
2516 if (nstat_debug != 0)
2517 printf("%s - ctl_enqueuembuf failed: %d\n", __func__, result);
2518 mbuf_freem(state->ncs_accumulated);
2519 }
2520 state->ncs_accumulated = NULL;
2521 }
2522 return result;
2523}
2524
2525static errno_t
2526nstat_accumulate_msg(
2527 nstat_control_state *state,
2528 nstat_msg_hdr *hdr,
2529 size_t length)
2530{
2531 if (state->ncs_accumulated && mbuf_trailingspace(state->ncs_accumulated) < length)
2532 {
2533 // Will send the current mbuf
2534 nstat_flush_accumulated_msgs(state);
2535 }
2536
2537 errno_t result = 0;
2538
2539 if (state->ncs_accumulated == NULL)
2540 {
2541 unsigned int one = 1;
2542 if (mbuf_allocpacket(MBUF_DONTWAIT, NSTAT_MAX_MSG_SIZE, &one, &state->ncs_accumulated) != 0)
2543 {
2544 if (nstat_debug != 0)
2545 printf("%s - mbuf_allocpacket failed\n", __func__);
2546 result = ENOMEM;
2547 }
2548 else
2549 {
2550 mbuf_setlen(state->ncs_accumulated, 0);
2551 }
2552 }
2553
2554 if (result == 0)
2555 {
2556 hdr->length = length;
2557 result = mbuf_copyback(state->ncs_accumulated, mbuf_len(state->ncs_accumulated),
2558 length, hdr, MBUF_DONTWAIT);
2559 }
2560
2561 if (result != 0)
2562 {
2563 nstat_flush_accumulated_msgs(state);
2564 if (nstat_debug != 0)
2565 printf("%s - resorting to ctl_enqueuedata\n", __func__);
2566 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, hdr, length, CTL_DATA_EOR);
2567 }
2568
2569 if (result != 0)
2570 nstat_stats.nstat_accumulate_msg_failures++;
2571
2572 return result;
2573}
2574
2575static void*
2576nstat_idle_check(
2577 __unused thread_call_param_t p0,
2578 __unused thread_call_param_t p1)
2579{
2580 lck_mtx_lock(&nstat_mtx);
2581
2582 nstat_idle_time = 0;
2583
2584 nstat_control_state *control;
2585 nstat_src *dead = NULL;
2586 nstat_src *dead_list = NULL;
2587 for (control = nstat_controls; control; control = control->ncs_next)
2588 {
2589 lck_mtx_lock(&control->mtx);
2590 nstat_src **srcpp = &control->ncs_srcs;
2591
2592 if (!(control->ncs_flags & NSTAT_FLAG_REQCOUNTS))
2593 {
2594 while(*srcpp != NULL)
2595 {
2596 if ((*srcpp)->provider->nstat_gone((*srcpp)->cookie))
2597 {
2598 errno_t result;
2599
2600 // Pull it off the list
2601 dead = *srcpp;
2602 *srcpp = (*srcpp)->next;
2603
2604 result = nstat_control_send_goodbye(control, dead);
2605
2606 // Put this on the list to release later
2607 dead->next = dead_list;
2608 dead_list = dead;
2609 }
2610 else
2611 {
2612 srcpp = &(*srcpp)->next;
2613 }
2614 }
2615 }
2616 control->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
2617 lck_mtx_unlock(&control->mtx);
2618 }
2619
2620 if (nstat_controls)
2621 {
2622 clock_interval_to_deadline(60, NSEC_PER_SEC, &nstat_idle_time);
2623 thread_call_func_delayed((thread_call_func_t)nstat_idle_check, NULL, nstat_idle_time);
2624 }
2625
2626 lck_mtx_unlock(&nstat_mtx);
2627
2628 /* Generate any system level reports, if needed */
2629 nstat_sysinfo_generate_report();
2630
2631 // Release the sources now that we aren't holding lots of locks
2632 while (dead_list)
2633 {
2634 dead = dead_list;
2635 dead_list = dead->next;
2636
2637 nstat_control_cleanup_source(NULL, dead, FALSE);
2638 }
2639
2640 return NULL;
2641}
2642
2643static void
2644nstat_control_register(void)
2645{
2646 // Create our lock group first
2647 lck_grp_attr_t *grp_attr = lck_grp_attr_alloc_init();
2648 lck_grp_attr_setdefault(grp_attr);
2649 nstat_lck_grp = lck_grp_alloc_init("network statistics kctl", grp_attr);
2650 lck_grp_attr_free(grp_attr);
2651
2652 lck_mtx_init(&nstat_mtx, nstat_lck_grp, NULL);
2653
2654 // Register the control
2655 struct kern_ctl_reg nstat_control;
2656 bzero(&nstat_control, sizeof(nstat_control));
2657 strlcpy(nstat_control.ctl_name, NET_STAT_CONTROL_NAME, sizeof(nstat_control.ctl_name));
2658 nstat_control.ctl_flags = CTL_FLAG_REG_EXTENDED | CTL_FLAG_REG_CRIT;
2659 nstat_control.ctl_sendsize = nstat_sendspace;
2660 nstat_control.ctl_recvsize = nstat_recvspace;
2661 nstat_control.ctl_connect = nstat_control_connect;
2662 nstat_control.ctl_disconnect = nstat_control_disconnect;
2663 nstat_control.ctl_send = nstat_control_send;
2664
2665 ctl_register(&nstat_control, &nstat_ctlref);
2666}
2667
2668static void
2669nstat_control_cleanup_source(
2670 nstat_control_state *state,
2671 struct nstat_src *src,
2672 boolean_t locked)
2673{
2674 errno_t result;
2675
2676 if (state)
2677 {
2678 result = nstat_control_send_removed(state, src);
2679 if (result != 0)
2680 {
2681 nstat_stats.nstat_control_cleanup_source_failures++;
2682 if (nstat_debug != 0)
2683 printf("%s - nstat_control_send_removed() %d\n",
2684 __func__, result);
2685 }
2686 }
2687 // Cleanup the source if we found it.
2688 src->provider->nstat_release(src->cookie, locked);
2689 OSFree(src, sizeof(*src), nstat_malloc_tag);
2690}
2691
2692
2693static bool
2694nstat_control_reporting_allowed(
2695 nstat_control_state *state,
2696 nstat_src *src)
2697{
2698 if (src->provider->nstat_reporting_allowed == NULL)
2699 return TRUE;
2700
2701 return (
2702 src->provider->nstat_reporting_allowed( src->cookie,
2703 state->ncs_provider_filters[src->provider->nstat_provider_id])
2704 );
2705}
2706
2707
2708static errno_t
2709nstat_control_connect(
2710 kern_ctl_ref kctl,
2711 struct sockaddr_ctl *sac,
2712 void **uinfo)
2713{
2714 nstat_control_state *state = OSMalloc(sizeof(*state), nstat_malloc_tag);
2715 if (state == NULL) return ENOMEM;
2716
2717 bzero(state, sizeof(*state));
2718 lck_mtx_init(&state->mtx, nstat_lck_grp, NULL);
2719 state->ncs_kctl = kctl;
2720 state->ncs_unit = sac->sc_unit;
2721 state->ncs_flags = NSTAT_FLAG_REQCOUNTS;
2722 *uinfo = state;
2723
2724 lck_mtx_lock(&nstat_mtx);
2725 state->ncs_next = nstat_controls;
2726 nstat_controls = state;
2727
2728 if (nstat_idle_time == 0)
2729 {
2730 clock_interval_to_deadline(60, NSEC_PER_SEC, &nstat_idle_time);
2731 thread_call_func_delayed((thread_call_func_t)nstat_idle_check, NULL, nstat_idle_time);
2732 }
2733
2734 lck_mtx_unlock(&nstat_mtx);
2735
2736 return 0;
2737}
2738
2739static errno_t
2740nstat_control_disconnect(
2741 __unused kern_ctl_ref kctl,
2742 __unused u_int32_t unit,
2743 void *uinfo)
2744{
2745 u_int32_t watching;
2746 nstat_control_state *state = (nstat_control_state*)uinfo;
2747
2748 // pull it out of the global list of states
2749 lck_mtx_lock(&nstat_mtx);
2750 nstat_control_state **statepp;
2751 for (statepp = &nstat_controls; *statepp; statepp = &(*statepp)->ncs_next)
2752 {
2753 if (*statepp == state)
2754 {
2755 *statepp = state->ncs_next;
2756 break;
2757 }
2758 }
2759 lck_mtx_unlock(&nstat_mtx);
2760
2761 lck_mtx_lock(&state->mtx);
2762 // Stop watching for sources
2763 nstat_provider *provider;
2764 watching = state->ncs_watching;
2765 state->ncs_watching = 0;
2766 for (provider = nstat_providers; provider && watching; provider = provider->next)
2767 {
2768 if ((watching & (1 << provider->nstat_provider_id)) != 0)
2769 {
2770 watching &= ~(1 << provider->nstat_provider_id);
2771 provider->nstat_watcher_remove(state);
2772 }
2773 }
2774
2775 // set cleanup flags
2776 state->ncs_flags |= NSTAT_FLAG_CLEANUP;
2777
2778 if (state->ncs_accumulated)
2779 {
2780 mbuf_freem(state->ncs_accumulated);
2781 state->ncs_accumulated = NULL;
2782 }
2783
2784 // Copy out the list of sources
2785 nstat_src *srcs = state->ncs_srcs;
2786 state->ncs_srcs = NULL;
2787 lck_mtx_unlock(&state->mtx);
2788
2789 while (srcs)
2790 {
2791 nstat_src *src;
2792
2793 // pull it out of the list
2794 src = srcs;
2795 srcs = src->next;
2796
2797 // clean it up
2798 nstat_control_cleanup_source(NULL, src, FALSE);
2799 }
2800 lck_mtx_destroy(&state->mtx, nstat_lck_grp);
2801 OSFree(state, sizeof(*state), nstat_malloc_tag);
2802
2803 return 0;
2804}
2805
2806static nstat_src_ref_t
2807nstat_control_next_src_ref(
2808 nstat_control_state *state)
2809{
2810 int i = 0;
2811 nstat_src_ref_t toReturn = NSTAT_SRC_REF_INVALID;
2812
2813 for (i = 0; i < 1000 && toReturn == NSTAT_SRC_REF_INVALID; i++)
2814 {
2815 if (state->ncs_next_srcref == NSTAT_SRC_REF_INVALID ||
2816 state->ncs_next_srcref == NSTAT_SRC_REF_ALL)
2817 {
2818 state->ncs_next_srcref = 1;
2819 }
2820
2821 nstat_src *src;
2822 for (src = state->ncs_srcs; src; src = src->next)
2823 {
2824 if (src->srcref == state->ncs_next_srcref)
2825 break;
2826 }
2827
2828 if (src == NULL) toReturn = state->ncs_next_srcref;
2829 state->ncs_next_srcref++;
2830 }
2831
2832 return toReturn;
2833}
2834
2835static errno_t
2836nstat_control_send_counts(
2837 nstat_control_state *state,
2838 nstat_src *src,
2839 unsigned long long context,
2840 u_int16_t hdr_flags,
2841 int *gone)
2842{
2843 nstat_msg_src_counts counts;
2844 errno_t result = 0;
2845
2846 /* Some providers may not have any counts to send */
2847 if (src->provider->nstat_counts == NULL)
2848 return (0);
2849
2850 bzero(&counts, sizeof(counts));
2851 counts.hdr.type = NSTAT_MSG_TYPE_SRC_COUNTS;
2852 counts.hdr.length = sizeof(counts);
2853 counts.hdr.flags = hdr_flags;
2854 counts.hdr.context = context;
2855 counts.srcref = src->srcref;
2856
2857 if (src->provider->nstat_counts(src->cookie, &counts.counts, gone) == 0)
2858 {
2859 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) &&
2860 counts.counts.nstat_rxbytes == 0 &&
2861 counts.counts.nstat_txbytes == 0)
2862 {
2863 result = EAGAIN;
2864 }
2865 else
2866 {
2867 result = ctl_enqueuedata(state->ncs_kctl,
2868 state->ncs_unit, &counts, sizeof(counts),
2869 CTL_DATA_EOR);
2870 if (result != 0)
2871 nstat_stats.nstat_sendcountfailures += 1;
2872 }
2873 }
2874 return result;
2875}
2876
2877static errno_t
2878nstat_control_append_counts(
2879 nstat_control_state *state,
2880 nstat_src *src,
2881 int *gone)
2882{
2883 /* Some providers may not have any counts to send */
2884 if (!src->provider->nstat_counts) return 0;
2885
2886 nstat_msg_src_counts counts;
2887 bzero(&counts, sizeof(counts));
2888 counts.hdr.type = NSTAT_MSG_TYPE_SRC_COUNTS;
2889 counts.hdr.length = sizeof(counts);
2890 counts.srcref = src->srcref;
2891
2892 errno_t result = 0;
2893 result = src->provider->nstat_counts(src->cookie, &counts.counts, gone);
2894 if (result != 0)
2895 {
2896 return result;
2897 }
2898
2899 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
2900 counts.counts.nstat_rxbytes == 0 && counts.counts.nstat_txbytes == 0)
2901 {
2902 return EAGAIN;
2903 }
2904
2905 return nstat_accumulate_msg(state, &counts.hdr, counts.hdr.length);
2906}
2907
2908static int
2909nstat_control_send_description(
2910 nstat_control_state *state,
2911 nstat_src *src,
2912 u_int64_t context,
2913 u_int16_t hdr_flags)
2914{
2915 // Provider doesn't support getting the descriptor? Done.
2916 if (src->provider->nstat_descriptor_length == 0 ||
2917 src->provider->nstat_copy_descriptor == NULL)
2918 {
2919 return EOPNOTSUPP;
2920 }
2921
2922 // Allocate storage for the descriptor message
2923 mbuf_t msg;
2924 unsigned int one = 1;
2925 u_int32_t size = offsetof(nstat_msg_src_description, data) + src->provider->nstat_descriptor_length;
2926 if (mbuf_allocpacket(MBUF_DONTWAIT, size, &one, &msg) != 0)
2927 {
2928 return ENOMEM;
2929 }
2930
2931 nstat_msg_src_description *desc = (nstat_msg_src_description*)mbuf_data(msg);
2932 bzero(desc, size);
2933 mbuf_setlen(msg, size);
2934 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
2935
2936 // Query the provider for the provider specific bits
2937 errno_t result = src->provider->nstat_copy_descriptor(src->cookie, desc->data, src->provider->nstat_descriptor_length);
2938
2939 if (result != 0)
2940 {
2941 mbuf_freem(msg);
2942 return result;
2943 }
2944
2945 desc->hdr.context = context;
2946 desc->hdr.type = NSTAT_MSG_TYPE_SRC_DESC;
2947 desc->hdr.length = size;
2948 desc->hdr.flags = hdr_flags;
2949 desc->srcref = src->srcref;
2950 desc->provider = src->provider->nstat_provider_id;
2951
2952 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg, CTL_DATA_EOR);
2953 if (result != 0)
2954 {
2955 nstat_stats.nstat_descriptionfailures += 1;
2956 mbuf_freem(msg);
2957 }
2958
2959 return result;
2960}
2961
2962static errno_t
2963nstat_control_append_description(
2964 nstat_control_state *state,
2965 nstat_src *src)
2966{
2967 size_t size = offsetof(nstat_msg_src_description, data) + src->provider->nstat_descriptor_length;
2968 if (size > 512 || src->provider->nstat_descriptor_length == 0 ||
2969 src->provider->nstat_copy_descriptor == NULL)
2970 {
2971 return EOPNOTSUPP;
2972 }
2973
2974 // Fill out a buffer on the stack, we will copy to the mbuf later
2975 u_int64_t buffer[size/sizeof(u_int64_t) + 1]; // u_int64_t to ensure alignment
2976 bzero(buffer, size);
2977
2978 nstat_msg_src_description *desc = (nstat_msg_src_description*)buffer;
2979 desc->hdr.type = NSTAT_MSG_TYPE_SRC_DESC;
2980 desc->hdr.length = size;
2981 desc->srcref = src->srcref;
2982 desc->provider = src->provider->nstat_provider_id;
2983
2984 errno_t result = 0;
2985 // Fill in the description
2986 // Query the provider for the provider specific bits
2987 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
2988 src->provider->nstat_descriptor_length);
2989 if (result != 0)
2990 {
2991 return result;
2992 }
2993
2994 return nstat_accumulate_msg(state, &desc->hdr, size);
2995}
2996
2997static int
2998nstat_control_send_update(
2999 nstat_control_state *state,
3000 nstat_src *src,
3001 u_int64_t context,
3002 u_int16_t hdr_flags,
3003 int *gone)
3004{
3005 // Provider doesn't support getting the descriptor or counts? Done.
3006 if ((src->provider->nstat_descriptor_length == 0 ||
3007 src->provider->nstat_copy_descriptor == NULL) &&
3008 src->provider->nstat_counts == NULL)
3009 {
3010 return EOPNOTSUPP;
3011 }
3012
3013 // Allocate storage for the descriptor message
3014 mbuf_t msg;
3015 unsigned int one = 1;
3016 u_int32_t size = offsetof(nstat_msg_src_update, data) +
3017 src->provider->nstat_descriptor_length;
3018 if (mbuf_allocpacket(MBUF_DONTWAIT, size, &one, &msg) != 0)
3019 {
3020 return ENOMEM;
3021 }
3022
3023 nstat_msg_src_update *desc = (nstat_msg_src_update*)mbuf_data(msg);
3024 bzero(desc, size);
3025 desc->hdr.context = context;
3026 desc->hdr.type = NSTAT_MSG_TYPE_SRC_UPDATE;
3027 desc->hdr.length = size;
3028 desc->hdr.flags = hdr_flags;
3029 desc->srcref = src->srcref;
3030 desc->provider = src->provider->nstat_provider_id;
3031
3032 mbuf_setlen(msg, size);
3033 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
3034
3035 errno_t result = 0;
3036 if (src->provider->nstat_descriptor_length != 0 && src->provider->nstat_copy_descriptor)
3037 {
3038 // Query the provider for the provider specific bits
3039 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
3040 src->provider->nstat_descriptor_length);
3041 if (result != 0)
3042 {
3043 mbuf_freem(msg);
3044 return result;
3045 }
3046 }
3047
3048 if (src->provider->nstat_counts)
3049 {
3050 result = src->provider->nstat_counts(src->cookie, &desc->counts, gone);
3051 if (result == 0)
3052 {
3053 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
3054 desc->counts.nstat_rxbytes == 0 && desc->counts.nstat_txbytes == 0)
3055 {
3056 result = EAGAIN;
3057 }
3058 else
3059 {
3060 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg, CTL_DATA_EOR);
3061 }
3062 }
3063 }
3064
3065 if (result != 0)
3066 {
3067 nstat_stats.nstat_srcupatefailures += 1;
3068 mbuf_freem(msg);
3069 }
3070
3071 return result;
3072}
3073
3074static errno_t
3075nstat_control_append_update(
3076 nstat_control_state *state,
3077 nstat_src *src,
3078 int *gone)
3079{
3080 size_t size = offsetof(nstat_msg_src_update, data) + src->provider->nstat_descriptor_length;
3081 if (size > 512 || ((src->provider->nstat_descriptor_length == 0 ||
3082 src->provider->nstat_copy_descriptor == NULL) &&
3083 src->provider->nstat_counts == NULL))
3084 {
3085 return EOPNOTSUPP;
3086 }
3087
3088 // Fill out a buffer on the stack, we will copy to the mbuf later
3089 u_int64_t buffer[size/sizeof(u_int64_t) + 1]; // u_int64_t to ensure alignment
3090 bzero(buffer, size);
3091
3092 nstat_msg_src_update *desc = (nstat_msg_src_update*)buffer;
3093 desc->hdr.type = NSTAT_MSG_TYPE_SRC_UPDATE;
3094 desc->hdr.length = size;
3095 desc->srcref = src->srcref;
3096 desc->provider = src->provider->nstat_provider_id;
3097
3098 errno_t result = 0;
3099 // Fill in the description
3100 if (src->provider->nstat_descriptor_length != 0 && src->provider->nstat_copy_descriptor)
3101 {
3102 // Query the provider for the provider specific bits
3103 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
3104 src->provider->nstat_descriptor_length);
3105 if (result != 0)
3106 {
3107 nstat_stats.nstat_copy_descriptor_failures++;
3108 if (nstat_debug != 0)
3109 printf("%s: src->provider->nstat_copy_descriptor: %d\n", __func__, result);
3110 return result;
3111 }
3112 }
3113
3114 if (src->provider->nstat_counts)
3115 {
3116 result = src->provider->nstat_counts(src->cookie, &desc->counts, gone);
3117 if (result != 0)
3118 {
3119 nstat_stats.nstat_provider_counts_failures++;
3120 if (nstat_debug != 0)
3121 printf("%s: src->provider->nstat_counts: %d\n", __func__, result);
3122 return result;
3123 }
3124
3125 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
3126 desc->counts.nstat_rxbytes == 0 && desc->counts.nstat_txbytes == 0)
3127 {
3128 return EAGAIN;
3129 }
3130 }
3131
3132 return nstat_accumulate_msg(state, &desc->hdr, size);
3133}
3134
3135static errno_t
3136nstat_control_send_removed(
3137 nstat_control_state *state,
3138 nstat_src *src)
3139{
3140 nstat_msg_src_removed removed;
3141 errno_t result;
3142
3143 bzero(&removed, sizeof(removed));
3144 removed.hdr.type = NSTAT_MSG_TYPE_SRC_REMOVED;
3145 removed.hdr.length = sizeof(removed);
3146 removed.hdr.context = 0;
3147 removed.srcref = src->srcref;
3148 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &removed,
3149 sizeof(removed), CTL_DATA_EOR | CTL_DATA_CRIT);
3150 if (result != 0)
3151 nstat_stats.nstat_msgremovedfailures += 1;
3152
3153 return result;
3154}
3155
3156static errno_t
3157nstat_control_handle_add_request(
3158 nstat_control_state *state,
3159 mbuf_t m)
3160{
3161 errno_t result;
3162
3163 // Verify the header fits in the first mbuf
3164 if (mbuf_len(m) < offsetof(nstat_msg_add_src_req, param))
3165 {
3166 return EINVAL;
3167 }
3168
3169 // Calculate the length of the parameter field
3170 int32_t paramlength = mbuf_pkthdr_len(m) - offsetof(nstat_msg_add_src_req, param);
3171 if (paramlength < 0 || paramlength > 2 * 1024)
3172 {
3173 return EINVAL;
3174 }
3175
3176 nstat_provider *provider;
3177 nstat_provider_cookie_t cookie;
3178 nstat_msg_add_src_req *req = mbuf_data(m);
3179 if (mbuf_pkthdr_len(m) > mbuf_len(m))
3180 {
3181 // parameter is too large, we need to make a contiguous copy
3182 void *data = OSMalloc(paramlength, nstat_malloc_tag);
3183
3184 if (!data) return ENOMEM;
3185 result = mbuf_copydata(m, offsetof(nstat_msg_add_src_req, param), paramlength, data);
3186 if (result == 0)
3187 result = nstat_lookup_entry(req->provider, data, paramlength, &provider, &cookie);
3188 OSFree(data, paramlength, nstat_malloc_tag);
3189 }
3190 else
3191 {
3192 result = nstat_lookup_entry(req->provider, (void*)&req->param, paramlength, &provider, &cookie);
3193 }
3194
3195 if (result != 0)
3196 {
3197 return result;
3198 }
3199
3200 result = nstat_control_source_add(req->hdr.context, state, provider, cookie);
3201 if (result != 0)
3202 provider->nstat_release(cookie, 0);
3203
3204 return result;
3205}
3206
3207static errno_t
3208nstat_control_handle_add_all(
3209 nstat_control_state *state,
3210 mbuf_t m)
3211{
3212 errno_t result = 0;
3213
3214 // Verify the header fits in the first mbuf
3215 if (mbuf_len(m) < sizeof(nstat_msg_add_all_srcs))
3216 {
3217 return EINVAL;
3218 }
3219
3220
3221 nstat_msg_add_all_srcs *req = mbuf_data(m);
3222 if (req->provider > NSTAT_PROVIDER_LAST) return ENOENT;
3223
3224 nstat_provider *provider = nstat_find_provider_by_id(req->provider);
3225 u_int64_t filter = req->filter;
3226
3227 if (!provider) return ENOENT;
3228 if (provider->nstat_watcher_add == NULL) return ENOTSUP;
3229
3230 if (nstat_privcheck != 0) {
3231 result = priv_check_cred(kauth_cred_get(),
3232 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
3233 if (result != 0)
3234 return result;
3235 }
3236
3237 // Make sure we don't add the provider twice
3238 lck_mtx_lock(&state->mtx);
3239 if ((state->ncs_watching & (1 << provider->nstat_provider_id)) != 0)
3240 result = EALREADY;
3241 state->ncs_watching |= (1 << provider->nstat_provider_id);
3242 lck_mtx_unlock(&state->mtx);
3243 if (result != 0) return result;
3244
3245 state->ncs_provider_filters[req->provider] = filter;
3246
3247 result = provider->nstat_watcher_add(state);
3248 if (result != 0)
3249 {
3250 state->ncs_provider_filters[req->provider] = 0;
3251 lck_mtx_lock(&state->mtx);
3252 state->ncs_watching &= ~(1 << provider->nstat_provider_id);
3253 lck_mtx_unlock(&state->mtx);
3254 }
3255 if (result == 0)
3256 nstat_enqueue_success(req->hdr.context, state, 0);
3257
3258 return result;
3259}
3260
3261static errno_t
3262nstat_control_source_add(
3263 u_int64_t context,
3264 nstat_control_state *state,
3265 nstat_provider *provider,
3266 nstat_provider_cookie_t cookie)
3267{
3268 // Fill out source added message if appropriate
3269 mbuf_t msg = NULL;
3270 nstat_src_ref_t *srcrefp = NULL;
3271
3272 u_int64_t provider_filters =
3273 state->ncs_provider_filters[provider->nstat_provider_id];
3274 boolean_t tell_user =
3275 ((provider_filters & NSTAT_FILTER_SUPPRESS_SRC_ADDED) == 0);
3276 u_int32_t src_filter =
3277 (provider_filters & NSTAT_FILTER_PROVIDER_NOZEROBYTES)
3278 ? NSTAT_FILTER_NOZEROBYTES : 0;
3279
3280 if (tell_user)
3281 {
3282 unsigned int one = 1;
3283
3284 if (mbuf_allocpacket(MBUF_DONTWAIT, sizeof(nstat_msg_src_added),
3285 &one, &msg) != 0)
3286 return ENOMEM;
3287
3288 mbuf_setlen(msg, sizeof(nstat_msg_src_added));
3289 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
3290 nstat_msg_src_added *add = mbuf_data(msg);
3291 bzero(add, sizeof(*add));
3292 add->hdr.type = NSTAT_MSG_TYPE_SRC_ADDED;
3293 add->hdr.length = mbuf_len(msg);
3294 add->hdr.context = context;
3295 add->provider = provider->nstat_provider_id;
3296 srcrefp = &add->srcref;
3297 }
3298
3299 // Allocate storage for the source
3300 nstat_src *src = OSMalloc(sizeof(*src), nstat_malloc_tag);
3301 if (src == NULL)
3302 {
3303 if (msg) mbuf_freem(msg);
3304 return ENOMEM;
3305 }
3306
3307 // Fill in the source, including picking an unused source ref
3308 lck_mtx_lock(&state->mtx);
3309
3310 src->srcref = nstat_control_next_src_ref(state);
3311 if (srcrefp)
3312 *srcrefp = src->srcref;
3313
3314 if (state->ncs_flags & NSTAT_FLAG_CLEANUP || src->srcref == NSTAT_SRC_REF_INVALID)
3315 {
3316 lck_mtx_unlock(&state->mtx);
3317 OSFree(src, sizeof(*src), nstat_malloc_tag);
3318 if (msg) mbuf_freem(msg);
3319 return EINVAL;
3320 }
3321 src->provider = provider;
3322 src->cookie = cookie;
3323 src->filter = src_filter;
3324
3325 if (msg)
3326 {
3327 // send the source added message if appropriate
3328 errno_t result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg,
3329 CTL_DATA_EOR);
3330 if (result != 0)
3331 {
3332 nstat_stats.nstat_srcaddedfailures += 1;
3333 lck_mtx_unlock(&state->mtx);
3334 OSFree(src, sizeof(*src), nstat_malloc_tag);
3335 mbuf_freem(msg);
3336 return result;
3337 }
3338 }
3339 // Put the source in the list
3340 src->next = state->ncs_srcs;
3341 state->ncs_srcs = src;
3342
3343 lck_mtx_unlock(&state->mtx);
3344
3345 return 0;
3346}
3347
3348static errno_t
3349nstat_control_handle_remove_request(
3350 nstat_control_state *state,
3351 mbuf_t m)
3352{
3353 nstat_src_ref_t srcref = NSTAT_SRC_REF_INVALID;
3354
3355 if (mbuf_copydata(m, offsetof(nstat_msg_rem_src_req, srcref), sizeof(srcref), &srcref) != 0)
3356 {
3357 return EINVAL;
3358 }
3359
3360 lck_mtx_lock(&state->mtx);
3361
3362 // Remove this source as we look for it
3363 nstat_src **nextp;
3364 nstat_src *src = NULL;
3365 for (nextp = &state->ncs_srcs; *nextp; nextp = &(*nextp)->next)
3366 {
3367 if ((*nextp)->srcref == srcref)
3368 {
3369 src = *nextp;
3370 *nextp = src->next;
3371 break;
3372 }
3373 }
3374
3375 lck_mtx_unlock(&state->mtx);
3376
3377 if (src) nstat_control_cleanup_source(state, src, FALSE);
3378
3379 return src ? 0 : ENOENT;
3380}
3381
3382static errno_t
3383nstat_control_handle_query_request(
3384 nstat_control_state *state,
3385 mbuf_t m)
3386{
3387 // TBD: handle this from another thread so we can enqueue a lot of data
3388 // As written, if a client requests query all, this function will be
3389 // called from their send of the request message. We will attempt to write
3390 // responses and succeed until the buffer fills up. Since the clients thread
3391 // is blocked on send, it won't be reading unless the client has two threads
3392 // using this socket, one for read and one for write. Two threads probably
3393 // won't work with this code anyhow since we don't have proper locking in
3394 // place yet.
3395 nstat_src *dead_srcs = NULL;
3396 errno_t result = ENOENT;
3397 nstat_msg_query_src_req req;
3398
3399 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0)
3400 {
3401 return EINVAL;
3402 }
3403
3404 const boolean_t all_srcs = (req.srcref == NSTAT_SRC_REF_ALL);
3405
3406 lck_mtx_lock(&state->mtx);
3407
3408 if (all_srcs)
3409 {
3410 state->ncs_flags |= NSTAT_FLAG_REQCOUNTS;
3411 }
3412 nstat_src **srcpp = &state->ncs_srcs;
3413 u_int64_t src_count = 0;
3414 boolean_t partial = FALSE;
3415
3416 /*
3417 * Error handling policy and sequence number generation is folded into
3418 * nstat_control_begin_query.
3419 */
3420 partial = nstat_control_begin_query(state, &req.hdr);
3421
3422 while (*srcpp != NULL
3423 && (!partial || src_count < QUERY_CONTINUATION_SRC_COUNT))
3424 {
3425 nstat_src *src = NULL;
3426 int gone;
3427
3428 src = *srcpp;
3429 gone = 0;
3430 // XXX ignore IFACE types?
3431 if (all_srcs || src->srcref == req.srcref)
3432 {
3433 if (nstat_control_reporting_allowed(state, src)
3434 && (!partial || !all_srcs || src->seq != state->ncs_seq))
3435 {
3436 if (all_srcs &&
3437 (req.hdr.flags & NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE) != 0)
3438 {
3439 result = nstat_control_append_counts(state, src, &gone);
3440 }
3441 else
3442 {
3443 result = nstat_control_send_counts(state, src, req.hdr.context, 0, &gone);
3444 }
3445
3446 if (ENOMEM == result || ENOBUFS == result)
3447 {
3448 /*
3449 * If the counts message failed to
3450 * enqueue then we should clear our flag so
3451 * that a client doesn't miss anything on
3452 * idle cleanup. We skip the "gone"
3453 * processing in the hope that we may
3454 * catch it another time.
3455 */
3456 state->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
3457 break;
3458 }
3459 if (partial)
3460 {
3461 /*
3462 * We skip over hard errors and
3463 * filtered sources.
3464 */
3465 src->seq = state->ncs_seq;
3466 src_count++;
3467 }
3468 }
3469 }
3470
3471 if (gone)
3472 {
3473 // send one last descriptor message so client may see last state
3474 // If we can't send the notification now, it
3475 // will be sent in the idle cleanup.
3476 result = nstat_control_send_description(state, *srcpp, 0, 0);
3477 if (result != 0)
3478 {
3479 nstat_stats.nstat_control_send_description_failures++;
3480 if (nstat_debug != 0)
3481 printf("%s - nstat_control_send_description() %d\n", __func__, result);
3482 state->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
3483 break;
3484 }
3485
3486 // pull src out of the list
3487 *srcpp = src->next;
3488
3489 src->next = dead_srcs;
3490 dead_srcs = src;
3491 }
3492 else
3493 {
3494 srcpp = &(*srcpp)->next;
3495 }
3496
3497 if (!all_srcs && req.srcref == src->srcref)
3498 {
3499 break;
3500 }
3501 }
3502 nstat_flush_accumulated_msgs(state);
3503
3504 u_int16_t flags = 0;
3505 if (req.srcref == NSTAT_SRC_REF_ALL)
3506 flags = nstat_control_end_query(state, *srcpp, partial);
3507
3508 lck_mtx_unlock(&state->mtx);
3509
3510 /*
3511 * If an error occurred enqueueing data, then allow the error to
3512 * propagate to nstat_control_send. This way, the error is sent to
3513 * user-level.
3514 */
3515 if (all_srcs && ENOMEM != result && ENOBUFS != result)
3516 {
3517 nstat_enqueue_success(req.hdr.context, state, flags);
3518 result = 0;
3519 }
3520
3521 while (dead_srcs)
3522 {
3523 nstat_src *src;
3524
3525 src = dead_srcs;
3526 dead_srcs = src->next;
3527
3528 // release src and send notification
3529 nstat_control_cleanup_source(state, src, FALSE);
3530 }
3531
3532 return result;
3533}
3534
3535static errno_t
3536nstat_control_handle_get_src_description(
3537 nstat_control_state *state,
3538 mbuf_t m)
3539{
3540 nstat_msg_get_src_description req;
3541 errno_t result = ENOENT;
3542 nstat_src *src;
3543
3544 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0)
3545 {
3546 return EINVAL;
3547 }
3548
3549 lck_mtx_lock(&state->mtx);
3550 u_int64_t src_count = 0;
3551 boolean_t partial = FALSE;
3552 const boolean_t all_srcs = (req.srcref == NSTAT_SRC_REF_ALL);
3553
3554 /*
3555 * Error handling policy and sequence number generation is folded into
3556 * nstat_control_begin_query.
3557 */
3558 partial = nstat_control_begin_query(state, &req.hdr);
3559
3560 for (src = state->ncs_srcs;
3561 src && (!partial || src_count < QUERY_CONTINUATION_SRC_COUNT);
3562 src = src->next)
3563 {
3564 if (all_srcs || src->srcref == req.srcref)
3565 {
3566 if (nstat_control_reporting_allowed(state, src)
3567 && (!all_srcs || !partial || src->seq != state->ncs_seq))
3568 {
3569 if ((req.hdr.flags & NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE) != 0 && all_srcs)
3570 {
3571 result = nstat_control_append_description(state, src);
3572 }
3573 else
3574 {
3575 result = nstat_control_send_description(state, src, req.hdr.context, 0);
3576 }
3577
3578 if (ENOMEM == result || ENOBUFS == result)
3579 {
3580 /*
3581 * If the description message failed to
3582 * enqueue then we give up for now.
3583 */
3584 break;
3585 }
3586 if (partial)
3587 {
3588 /*
3589 * Note, we skip over hard errors and
3590 * filtered sources.
3591 */
3592 src->seq = state->ncs_seq;
3593 src_count++;
3594 }
3595 }
3596
3597 if (!all_srcs)
3598 {
3599 break;
3600 }
3601 }
3602 }
3603 nstat_flush_accumulated_msgs(state);
3604
3605 u_int16_t flags = 0;
3606 if (req.srcref == NSTAT_SRC_REF_ALL)
3607 flags = nstat_control_end_query(state, src, partial);
3608
3609 lck_mtx_unlock(&state->mtx);
3610 /*
3611 * If an error occurred enqueueing data, then allow the error to
3612 * propagate to nstat_control_send. This way, the error is sent to
3613 * user-level.
3614 */
3615 if (all_srcs && ENOMEM != result && ENOBUFS != result)
3616 {
3617 nstat_enqueue_success(req.hdr.context, state, flags);
3618 result = 0;
3619 }
3620
3621 return result;
3622}
3623
3624static errno_t
3625nstat_control_handle_set_filter(
3626 nstat_control_state *state,
3627 mbuf_t m)
3628{
3629 nstat_msg_set_filter req;
3630 nstat_src *src;
3631
3632 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0)
3633 return EINVAL;
3634 if (req.srcref == NSTAT_SRC_REF_ALL ||
3635 req.srcref == NSTAT_SRC_REF_INVALID)
3636 return EINVAL;
3637
3638 lck_mtx_lock(&state->mtx);
3639 for (src = state->ncs_srcs; src; src = src->next)
3640 if (req.srcref == src->srcref)
3641 {
3642 src->filter = req.filter;
3643 break;
3644 }
3645 lck_mtx_unlock(&state->mtx);
3646 if (src == NULL)
3647 return ENOENT;
3648
3649 return 0;
3650}
3651
3652static void
3653nstat_send_error(
3654 nstat_control_state *state,
3655 u_int64_t context,
3656 u_int32_t error)
3657{
3658 errno_t result;
3659 struct nstat_msg_error err;
3660
3661 bzero(&err, sizeof(err));
3662 err.hdr.type = NSTAT_MSG_TYPE_ERROR;
3663 err.hdr.length = sizeof(err);
3664 err.hdr.context = context;
3665 err.error = error;
3666
3667 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &err,
3668 sizeof(err), CTL_DATA_EOR | CTL_DATA_CRIT);
3669 if (result != 0)
3670 nstat_stats.nstat_msgerrorfailures++;
3671}
3672
3673static boolean_t
3674nstat_control_begin_query(
3675 nstat_control_state *state,
3676 const nstat_msg_hdr *hdrp)
3677{
3678 boolean_t partial = FALSE;
3679
3680 if (hdrp->flags & NSTAT_MSG_HDR_FLAG_CONTINUATION)
3681 {
3682 /* A partial query all has been requested. */
3683 partial = TRUE;
3684
3685 if (state->ncs_context != hdrp->context)
3686 {
3687 if (state->ncs_context != 0)
3688 nstat_send_error(state, state->ncs_context, EAGAIN);
3689
3690 /* Initialize state for a partial query all. */
3691 state->ncs_context = hdrp->context;
3692 state->ncs_seq++;
3693 }
3694 }
3695 else if (state->ncs_context != 0)
3696 {
3697 /*
3698 * A continuation of a paced-query was in progress. Send that
3699 * context an error and reset the state. If the same context
3700 * has changed its mind, just send the full query results.
3701 */
3702 if (state->ncs_context != hdrp->context)
3703 nstat_send_error(state, state->ncs_context, EAGAIN);
3704 }
3705
3706 return partial;
3707}
3708
3709static u_int16_t
3710nstat_control_end_query(
3711 nstat_control_state *state,
3712 nstat_src *last_src,
3713 boolean_t partial)
3714{
3715 u_int16_t flags = 0;
3716
3717 if (last_src == NULL || !partial)
3718 {
3719 /*
3720 * We iterated through the entire srcs list or exited early
3721 * from the loop when a partial update was not requested (an
3722 * error occurred), so clear context to indicate internally
3723 * that the query is finished.
3724 */
3725 state->ncs_context = 0;
3726 }
3727 else
3728 {
3729 /*
3730 * Indicate to userlevel to make another partial request as
3731 * there are still sources left to be reported.
3732 */
3733 flags |= NSTAT_MSG_HDR_FLAG_CONTINUATION;
3734 }
3735
3736 return flags;
3737}
3738
3739static errno_t
3740nstat_control_handle_get_update(
3741 nstat_control_state *state,
3742 mbuf_t m)
3743{
3744 nstat_msg_query_src_req req;
3745
3746 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0)
3747 {
3748 return EINVAL;
3749 }
3750
3751 lck_mtx_lock(&state->mtx);
3752
3753 state->ncs_flags |= NSTAT_FLAG_SUPPORTS_UPDATES;
3754
3755 errno_t result = ENOENT;
3756 nstat_src *src;
3757 nstat_src *dead_srcs = NULL;
3758 nstat_src **srcpp = &state->ncs_srcs;
3759 u_int64_t src_count = 0;
3760 boolean_t partial = FALSE;
3761
3762 /*
3763 * Error handling policy and sequence number generation is folded into
3764 * nstat_control_begin_query.
3765 */
3766 partial = nstat_control_begin_query(state, &req.hdr);
3767
3768 while (*srcpp != NULL
3769 && (FALSE == partial
3770 || src_count < QUERY_CONTINUATION_SRC_COUNT))
3771 {
3772 int gone;
3773
3774 gone = 0;
3775 src = *srcpp;
3776 if (nstat_control_reporting_allowed(state, src))
3777 {
3778 /* skip this source if it has the current state
3779 * sequence number as it's already been reported in
3780 * this query-all partial sequence. */
3781 if (req.srcref == NSTAT_SRC_REF_ALL
3782 && (FALSE == partial || src->seq != state->ncs_seq))
3783 {
3784 result = nstat_control_append_update(state, src, &gone);
3785 if (ENOMEM == result || ENOBUFS == result)
3786 {
3787 /*
3788 * If the update message failed to
3789 * enqueue then give up.
3790 */
3791 break;
3792 }
3793 if (partial)
3794 {
3795 /*
3796 * We skip over hard errors and
3797 * filtered sources.
3798 */
3799 src->seq = state->ncs_seq;
3800 src_count++;
3801 }
3802 }
3803 else if (src->srcref == req.srcref)
3804 {
3805 result = nstat_control_send_update(state, src, req.hdr.context, 0, &gone);
3806 }
3807 }
3808
3809 if (gone)
3810 {
3811 // pull src out of the list
3812 *srcpp = src->next;
3813
3814 src->next = dead_srcs;
3815 dead_srcs = src;
3816 }
3817 else
3818 {
3819 srcpp = &(*srcpp)->next;
3820 }
3821
3822 if (req.srcref != NSTAT_SRC_REF_ALL && req.srcref == src->srcref)
3823 {
3824 break;
3825 }
3826 }
3827
3828 nstat_flush_accumulated_msgs(state);
3829
3830
3831 u_int16_t flags = 0;
3832 if (req.srcref == NSTAT_SRC_REF_ALL)
3833 flags = nstat_control_end_query(state, *srcpp, partial);
3834
3835 lck_mtx_unlock(&state->mtx);
3836 /*
3837 * If an error occurred enqueueing data, then allow the error to
3838 * propagate to nstat_control_send. This way, the error is sent to
3839 * user-level.
3840 */
3841 if (req.srcref == NSTAT_SRC_REF_ALL && ENOMEM != result && ENOBUFS != result)
3842 {
3843 nstat_enqueue_success(req.hdr.context, state, flags);
3844 result = 0;
3845 }
3846
3847 while (dead_srcs)
3848 {
3849 src = dead_srcs;
3850 dead_srcs = src->next;
3851
3852 // release src and send notification
3853 nstat_control_cleanup_source(state, src, FALSE);
3854 }
3855
3856 return result;
3857}
3858
3859static errno_t
3860nstat_control_handle_subscribe_sysinfo(
3861 nstat_control_state *state)
3862{
3863 errno_t result = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
3864
3865 if (result != 0)
3866 {
3867 return result;
3868 }
3869
3870 lck_mtx_lock(&state->mtx);
3871 state->ncs_flags |= NSTAT_FLAG_SYSINFO_SUBSCRIBED;
3872 lck_mtx_unlock(&state->mtx);
3873
3874 return 0;
3875}
3876
3877static errno_t
3878nstat_control_send(
3879 kern_ctl_ref kctl,
3880 u_int32_t unit,
3881 void *uinfo,
3882 mbuf_t m,
3883 __unused int flags)
3884{
3885 nstat_control_state *state = (nstat_control_state*)uinfo;
3886 struct nstat_msg_hdr *hdr;
3887 struct nstat_msg_hdr storage;
3888 errno_t result = 0;
3889
3890 if (mbuf_pkthdr_len(m) < sizeof(*hdr))
3891 {
3892 // Is this the right thing to do?
3893 mbuf_freem(m);
3894 return EINVAL;
3895 }
3896
3897 if (mbuf_len(m) >= sizeof(*hdr))
3898 {
3899 hdr = mbuf_data(m);
3900 }
3901 else
3902 {
3903 mbuf_copydata(m, 0, sizeof(storage), &storage);
3904 hdr = &storage;
3905 }
3906
3907 // Legacy clients may not set the length
3908 // Those clients are likely not setting the flags either
3909 // Fix everything up so old clients continue to work
3910 if (hdr->length != mbuf_pkthdr_len(m))
3911 {
3912 hdr->flags = 0;
3913 hdr->length = mbuf_pkthdr_len(m);
3914 if (hdr == &storage)
3915 {
3916 mbuf_copyback(m, 0, sizeof(*hdr), hdr, MBUF_DONTWAIT);
3917 }
3918 }
3919
3920 switch (hdr->type)
3921 {
3922 case NSTAT_MSG_TYPE_ADD_SRC:
3923 result = nstat_control_handle_add_request(state, m);
3924 break;
3925
3926 case NSTAT_MSG_TYPE_ADD_ALL_SRCS:
3927 result = nstat_control_handle_add_all(state, m);
3928 break;
3929
3930 case NSTAT_MSG_TYPE_REM_SRC:
3931 result = nstat_control_handle_remove_request(state, m);
3932 break;
3933
3934 case NSTAT_MSG_TYPE_QUERY_SRC:
3935 result = nstat_control_handle_query_request(state, m);
3936 break;
3937
3938 case NSTAT_MSG_TYPE_GET_SRC_DESC:
3939 result = nstat_control_handle_get_src_description(state, m);
3940 break;
3941
3942 case NSTAT_MSG_TYPE_SET_FILTER:
3943 result = nstat_control_handle_set_filter(state, m);
3944 break;
3945
3946 case NSTAT_MSG_TYPE_GET_UPDATE:
3947 result = nstat_control_handle_get_update(state, m);
3948 break;
3949
3950 case NSTAT_MSG_TYPE_SUBSCRIBE_SYSINFO:
3951 result = nstat_control_handle_subscribe_sysinfo(state);
3952 break;
3953
3954 default:
3955 result = EINVAL;
3956 break;
3957 }
3958
3959 if (result != 0)
3960 {
3961 struct nstat_msg_error err;
3962
3963 bzero(&err, sizeof(err));
3964 err.hdr.type = NSTAT_MSG_TYPE_ERROR;
3965 err.hdr.length = sizeof(err) + mbuf_pkthdr_len(m);
3966 err.hdr.context = hdr->context;
3967 err.error = result;
3968
3969 if (mbuf_prepend(&m, sizeof(err), MBUF_DONTWAIT) == 0 &&
3970 mbuf_copyback(m, 0, sizeof(err), &err, MBUF_DONTWAIT) == 0)
3971 {
3972 result = ctl_enqueuembuf(kctl, unit, m, CTL_DATA_EOR | CTL_DATA_CRIT);
3973 if (result != 0)
3974 {
3975 mbuf_freem(m);
3976 }
3977 m = NULL;
3978 }
3979
3980 if (result != 0)
3981 {
3982 // Unable to prepend the error to the request - just send the error
3983 err.hdr.length = sizeof(err);
3984 result = ctl_enqueuedata(kctl, unit, &err, sizeof(err),
3985 CTL_DATA_EOR | CTL_DATA_CRIT);
3986 if (result != 0)
3987 nstat_stats.nstat_msgerrorfailures += 1;
3988 }
3989 nstat_stats.nstat_handle_msg_failures += 1;
3990 }
3991
3992 if (m) mbuf_freem(m);
3993
3994 return result;
3995}