]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/ntstat.c
f742a560a052c05679212a27f75db1ab678d67fe
[apple/xnu.git] / bsd / net / ntstat.c
1 /*
2 * Copyright (c) 2010-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/param.h>
30 #include <sys/types.h>
31 #include <sys/kpi_mbuf.h>
32 #include <sys/socket.h>
33 #include <sys/kern_control.h>
34 #include <sys/mcache.h>
35 #include <sys/socketvar.h>
36 #include <sys/sysctl.h>
37 #include <sys/queue.h>
38 #include <sys/priv.h>
39 #include <sys/protosw.h>
40
41 #include <kern/clock.h>
42 #include <kern/debug.h>
43
44 #include <libkern/libkern.h>
45 #include <libkern/OSMalloc.h>
46 #include <libkern/OSAtomic.h>
47 #include <libkern/locks.h>
48
49 #include <net/if.h>
50 #include <net/if_var.h>
51 #include <net/if_types.h>
52 #include <net/route.h>
53 #include <net/ntstat.h>
54
55 #include <netinet/ip_var.h>
56 #include <netinet/in_pcb.h>
57 #include <netinet/in_var.h>
58 #include <netinet/tcp.h>
59 #include <netinet/tcp_var.h>
60 #include <netinet/tcp_fsm.h>
61 #include <netinet/tcp_cc.h>
62 #include <netinet/udp.h>
63 #include <netinet/udp_var.h>
64 #include <netinet6/in6_pcb.h>
65 #include <netinet6/in6_var.h>
66
67 __private_extern__ int nstat_collect = 1;
68 SYSCTL_INT(_net, OID_AUTO, statistics, CTLFLAG_RW | CTLFLAG_LOCKED,
69 &nstat_collect, 0, "Collect detailed statistics");
70
71 static int nstat_privcheck = 0;
72 SYSCTL_INT(_net, OID_AUTO, statistics_privcheck, CTLFLAG_RW | CTLFLAG_LOCKED,
73 &nstat_privcheck, 0, "Entitlement check");
74
75 SYSCTL_NODE(_net, OID_AUTO, stats,
76 CTLFLAG_RW|CTLFLAG_LOCKED, 0, "network statistics");
77
78 static int nstat_debug = 0;
79 SYSCTL_INT(_net_stats, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED,
80 &nstat_debug, 0, "");
81
82 static int nstat_sendspace = 2048;
83 SYSCTL_INT(_net_stats, OID_AUTO, sendspace, CTLFLAG_RW | CTLFLAG_LOCKED,
84 &nstat_sendspace, 0, "");
85
86 static int nstat_recvspace = 8192;
87 SYSCTL_INT(_net_stats, OID_AUTO, recvspace, CTLFLAG_RW | CTLFLAG_LOCKED,
88 &nstat_recvspace, 0, "");
89
90 static struct nstat_stats nstat_stats;
91 SYSCTL_STRUCT(_net_stats, OID_AUTO, stats, CTLFLAG_RD | CTLFLAG_LOCKED,
92 &nstat_stats, nstat_stats, "");
93
94 enum
95 {
96 NSTAT_FLAG_CLEANUP = (1 << 0),
97 NSTAT_FLAG_REQCOUNTS = (1 << 1),
98 NSTAT_FLAG_SUPPORTS_UPDATES = (1 << 2),
99 NSTAT_FLAG_SYSINFO_SUBSCRIBED = (1 << 3),
100 };
101
102 #define QUERY_CONTINUATION_SRC_COUNT 100
103
104 typedef struct nstat_control_state
105 {
106 struct nstat_control_state *ncs_next;
107 u_int32_t ncs_watching;
108 decl_lck_mtx_data(, mtx);
109 kern_ctl_ref ncs_kctl;
110 u_int32_t ncs_unit;
111 nstat_src_ref_t ncs_next_srcref;
112 struct nstat_src *ncs_srcs;
113 mbuf_t ncs_accumulated;
114 u_int32_t ncs_flags;
115 u_int64_t ncs_provider_filters[NSTAT_PROVIDER_COUNT];
116 /* state maintained for partial query requests */
117 u_int64_t ncs_context;
118 u_int64_t ncs_seq;
119 } nstat_control_state;
120
121 typedef struct nstat_provider
122 {
123 struct nstat_provider *next;
124 nstat_provider_id_t nstat_provider_id;
125 size_t nstat_descriptor_length;
126 errno_t (*nstat_lookup)(const void *data, u_int32_t length, nstat_provider_cookie_t *out_cookie);
127 int (*nstat_gone)(nstat_provider_cookie_t cookie);
128 errno_t (*nstat_counts)(nstat_provider_cookie_t cookie, struct nstat_counts *out_counts, int *out_gone);
129 errno_t (*nstat_watcher_add)(nstat_control_state *state);
130 void (*nstat_watcher_remove)(nstat_control_state *state);
131 errno_t (*nstat_copy_descriptor)(nstat_provider_cookie_t cookie, void *data, u_int32_t len);
132 void (*nstat_release)(nstat_provider_cookie_t cookie, boolean_t locked);
133 bool (*nstat_reporting_allowed)(nstat_provider_cookie_t cookie, uint64_t filter);
134 } nstat_provider;
135
136
137 typedef struct nstat_src
138 {
139 struct nstat_src *next;
140 nstat_src_ref_t srcref;
141 nstat_provider *provider;
142 nstat_provider_cookie_t cookie;
143 uint32_t filter;
144 uint64_t seq;
145 } nstat_src;
146
147 static errno_t nstat_control_send_counts(nstat_control_state *,
148 nstat_src *, unsigned long long, u_int16_t, int *);
149 static int nstat_control_send_description(nstat_control_state *state, nstat_src *src, u_int64_t context, u_int16_t hdr_flags);
150 static int nstat_control_send_update(nstat_control_state *state, nstat_src *src, u_int64_t context, u_int16_t hdr_flags, int *gone);
151 static errno_t nstat_control_send_removed(nstat_control_state *, nstat_src *);
152 static errno_t nstat_control_send_goodbye(nstat_control_state *state, nstat_src *src);
153 static void nstat_control_cleanup_source(nstat_control_state *state, nstat_src *src, boolean_t);
154 static bool nstat_control_reporting_allowed(nstat_control_state *state, nstat_src *src);
155 static boolean_t nstat_control_begin_query(nstat_control_state *state, const nstat_msg_hdr *hdrp);
156 static u_int16_t nstat_control_end_query(nstat_control_state *state, nstat_src *last_src, boolean_t partial);
157 static void nstat_ifnet_report_ecn_stats(void);
158
159 static u_int32_t nstat_udp_watchers = 0;
160 static u_int32_t nstat_tcp_watchers = 0;
161
162 static void nstat_control_register(void);
163
164 /*
165 * The lock order is as follows:
166 *
167 * socket_lock (inpcb)
168 * nstat_mtx
169 * state->mtx
170 */
171 static volatile OSMallocTag nstat_malloc_tag = NULL;
172 static nstat_control_state *nstat_controls = NULL;
173 static uint64_t nstat_idle_time = 0;
174 static decl_lck_mtx_data(, nstat_mtx);
175
176 /* some extern definitions */
177 extern void mbuf_report_peak_usage(void);
178 extern void tcp_report_stats(void);
179
180 static void
181 nstat_copy_sa_out(
182 const struct sockaddr *src,
183 struct sockaddr *dst,
184 int maxlen)
185 {
186 if (src->sa_len > maxlen) return;
187
188 bcopy(src, dst, src->sa_len);
189 if (src->sa_family == AF_INET6 &&
190 src->sa_len >= sizeof(struct sockaddr_in6))
191 {
192 struct sockaddr_in6 *sin6 = (struct sockaddr_in6*)(void *)dst;
193 if (IN6_IS_SCOPE_EMBED(&sin6->sin6_addr))
194 {
195 if (sin6->sin6_scope_id == 0)
196 sin6->sin6_scope_id = ntohs(sin6->sin6_addr.s6_addr16[1]);
197 sin6->sin6_addr.s6_addr16[1] = 0;
198 }
199 }
200 }
201
202 static void
203 nstat_ip_to_sockaddr(
204 const struct in_addr *ip,
205 u_int16_t port,
206 struct sockaddr_in *sin,
207 u_int32_t maxlen)
208 {
209 if (maxlen < sizeof(struct sockaddr_in))
210 return;
211
212 sin->sin_family = AF_INET;
213 sin->sin_len = sizeof(*sin);
214 sin->sin_port = port;
215 sin->sin_addr = *ip;
216 }
217
218 static void
219 nstat_ip6_to_sockaddr(
220 const struct in6_addr *ip6,
221 u_int16_t port,
222 struct sockaddr_in6 *sin6,
223 u_int32_t maxlen)
224 {
225 if (maxlen < sizeof(struct sockaddr_in6))
226 return;
227
228 sin6->sin6_family = AF_INET6;
229 sin6->sin6_len = sizeof(*sin6);
230 sin6->sin6_port = port;
231 sin6->sin6_addr = *ip6;
232 if (IN6_IS_SCOPE_EMBED(&sin6->sin6_addr))
233 {
234 sin6->sin6_scope_id = ntohs(sin6->sin6_addr.s6_addr16[1]);
235 sin6->sin6_addr.s6_addr16[1] = 0;
236 }
237 }
238
239 static u_int16_t
240 nstat_inpcb_to_flags(
241 const struct inpcb *inp)
242 {
243 u_int16_t flags = 0;
244
245 if ((inp != NULL ) && (inp->inp_last_outifp != NULL))
246 {
247 struct ifnet *ifp = inp->inp_last_outifp;
248
249 u_int32_t functional_type = if_functional_type(ifp);
250
251 /* Panic if someone adds a functional type without updating ntstat. */
252 VERIFY(0 <= functional_type && functional_type <= IFRTYPE_FUNCTIONAL_LAST);
253
254 switch (functional_type)
255 {
256 case IFRTYPE_FUNCTIONAL_UNKNOWN:
257 flags |= NSTAT_IFNET_IS_UNKNOWN_TYPE;
258 break;
259 case IFRTYPE_FUNCTIONAL_LOOPBACK:
260 flags |= NSTAT_IFNET_IS_LOOPBACK;
261 break;
262 case IFRTYPE_FUNCTIONAL_WIRED:
263 flags |= NSTAT_IFNET_IS_WIRED;
264 break;
265 case IFRTYPE_FUNCTIONAL_WIFI_INFRA:
266 flags |= NSTAT_IFNET_IS_WIFI;
267 break;
268 case IFRTYPE_FUNCTIONAL_WIFI_AWDL:
269 flags |= NSTAT_IFNET_IS_WIFI;
270 flags |= NSTAT_IFNET_IS_AWDL;
271 break;
272 case IFRTYPE_FUNCTIONAL_CELLULAR:
273 flags |= NSTAT_IFNET_IS_CELLULAR;
274 if (inp->inp_socket != NULL &&
275 (inp->inp_socket->so_flags1 & SOF1_CELLFALLBACK))
276 flags |= NSTAT_IFNET_VIA_CELLFALLBACK;
277 break;
278 }
279
280 if (IFNET_IS_EXPENSIVE(ifp))
281 {
282 flags |= NSTAT_IFNET_IS_EXPENSIVE;
283 }
284 }
285 else
286 {
287 flags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
288 }
289
290 return flags;
291 }
292
293 #pragma mark -- Network Statistic Providers --
294
295 static errno_t nstat_control_source_add(u_int64_t context, nstat_control_state *state, nstat_provider *provider, nstat_provider_cookie_t cookie);
296 struct nstat_provider *nstat_providers = NULL;
297
298 static struct nstat_provider*
299 nstat_find_provider_by_id(
300 nstat_provider_id_t id)
301 {
302 struct nstat_provider *provider;
303
304 for (provider = nstat_providers; provider != NULL; provider = provider->next)
305 {
306 if (provider->nstat_provider_id == id)
307 break;
308 }
309
310 return provider;
311 }
312
313 static errno_t
314 nstat_lookup_entry(
315 nstat_provider_id_t id,
316 const void *data,
317 u_int32_t length,
318 nstat_provider **out_provider,
319 nstat_provider_cookie_t *out_cookie)
320 {
321 *out_provider = nstat_find_provider_by_id(id);
322 if (*out_provider == NULL)
323 {
324 return ENOENT;
325 }
326
327 return (*out_provider)->nstat_lookup(data, length, out_cookie);
328 }
329
330 static void nstat_init_route_provider(void);
331 static void nstat_init_tcp_provider(void);
332 static void nstat_init_udp_provider(void);
333 static void nstat_init_ifnet_provider(void);
334
335 __private_extern__ void
336 nstat_init(void)
337 {
338 if (nstat_malloc_tag != NULL) return;
339
340 OSMallocTag tag = OSMalloc_Tagalloc(NET_STAT_CONTROL_NAME, OSMT_DEFAULT);
341 if (!OSCompareAndSwapPtr(NULL, tag, &nstat_malloc_tag))
342 {
343 OSMalloc_Tagfree(tag);
344 tag = nstat_malloc_tag;
345 }
346 else
347 {
348 // we need to initialize other things, we do it here as this code path will only be hit once;
349 nstat_init_route_provider();
350 nstat_init_tcp_provider();
351 nstat_init_udp_provider();
352 nstat_init_ifnet_provider();
353 nstat_control_register();
354 }
355 }
356
357 #pragma mark -- Aligned Buffer Allocation --
358
359 struct align_header
360 {
361 u_int32_t offset;
362 u_int32_t length;
363 };
364
365 static void*
366 nstat_malloc_aligned(
367 u_int32_t length,
368 u_int8_t alignment,
369 OSMallocTag tag)
370 {
371 struct align_header *hdr = NULL;
372 u_int32_t size = length + sizeof(*hdr) + alignment - 1;
373
374 u_int8_t *buffer = OSMalloc(size, tag);
375 if (buffer == NULL) return NULL;
376
377 u_int8_t *aligned = buffer + sizeof(*hdr);
378 aligned = (u_int8_t*)P2ROUNDUP(aligned, alignment);
379
380 hdr = (struct align_header*)(void *)(aligned - sizeof(*hdr));
381 hdr->offset = aligned - buffer;
382 hdr->length = size;
383
384 return aligned;
385 }
386
387 static void
388 nstat_free_aligned(
389 void *buffer,
390 OSMallocTag tag)
391 {
392 struct align_header *hdr = (struct align_header*)(void *)((u_int8_t*)buffer - sizeof(*hdr));
393 OSFree(((char*)buffer) - hdr->offset, hdr->length, tag);
394 }
395
396 #pragma mark -- Route Provider --
397
398 static nstat_provider nstat_route_provider;
399
400 static errno_t
401 nstat_route_lookup(
402 const void *data,
403 u_int32_t length,
404 nstat_provider_cookie_t *out_cookie)
405 {
406 // rt_lookup doesn't take const params but it doesn't modify the parameters for
407 // the lookup. So...we use a union to eliminate the warning.
408 union
409 {
410 struct sockaddr *sa;
411 const struct sockaddr *const_sa;
412 } dst, mask;
413
414 const nstat_route_add_param *param = (const nstat_route_add_param*)data;
415 *out_cookie = NULL;
416
417 if (length < sizeof(*param))
418 {
419 return EINVAL;
420 }
421
422 if (param->dst.v4.sin_family == 0 ||
423 param->dst.v4.sin_family > AF_MAX ||
424 (param->mask.v4.sin_family != 0 && param->mask.v4.sin_family != param->dst.v4.sin_family))
425 {
426 return EINVAL;
427 }
428
429 if (param->dst.v4.sin_len > sizeof(param->dst) ||
430 (param->mask.v4.sin_family && param->mask.v4.sin_len > sizeof(param->mask.v4.sin_len)))
431 {
432 return EINVAL;
433 }
434 if ((param->dst.v4.sin_family == AF_INET &&
435 param->dst.v4.sin_len < sizeof(struct sockaddr_in)) ||
436 (param->dst.v6.sin6_family == AF_INET6 &&
437 param->dst.v6.sin6_len < sizeof(struct sockaddr_in6)))
438 {
439 return EINVAL;
440 }
441
442 dst.const_sa = (const struct sockaddr*)&param->dst;
443 mask.const_sa = param->mask.v4.sin_family ? (const struct sockaddr*)&param->mask : NULL;
444
445 struct radix_node_head *rnh = rt_tables[dst.sa->sa_family];
446 if (rnh == NULL) return EAFNOSUPPORT;
447
448 lck_mtx_lock(rnh_lock);
449 struct rtentry *rt = rt_lookup(TRUE, dst.sa, mask.sa, rnh, param->ifindex);
450 lck_mtx_unlock(rnh_lock);
451
452 if (rt) *out_cookie = (nstat_provider_cookie_t)rt;
453
454 return rt ? 0 : ENOENT;
455 }
456
457 static int
458 nstat_route_gone(
459 nstat_provider_cookie_t cookie)
460 {
461 struct rtentry *rt = (struct rtentry*)cookie;
462 return ((rt->rt_flags & RTF_UP) == 0) ? 1 : 0;
463 }
464
465 static errno_t
466 nstat_route_counts(
467 nstat_provider_cookie_t cookie,
468 struct nstat_counts *out_counts,
469 int *out_gone)
470 {
471 struct rtentry *rt = (struct rtentry*)cookie;
472 struct nstat_counts *rt_stats = rt->rt_stats;
473
474 if (out_gone) *out_gone = 0;
475
476 if (out_gone && (rt->rt_flags & RTF_UP) == 0) *out_gone = 1;
477
478 if (rt_stats)
479 {
480 atomic_get_64(out_counts->nstat_rxpackets, &rt_stats->nstat_rxpackets);
481 atomic_get_64(out_counts->nstat_rxbytes, &rt_stats->nstat_rxbytes);
482 atomic_get_64(out_counts->nstat_txpackets, &rt_stats->nstat_txpackets);
483 atomic_get_64(out_counts->nstat_txbytes, &rt_stats->nstat_txbytes);
484 out_counts->nstat_rxduplicatebytes = rt_stats->nstat_rxduplicatebytes;
485 out_counts->nstat_rxoutoforderbytes = rt_stats->nstat_rxoutoforderbytes;
486 out_counts->nstat_txretransmit = rt_stats->nstat_txretransmit;
487 out_counts->nstat_connectattempts = rt_stats->nstat_connectattempts;
488 out_counts->nstat_connectsuccesses = rt_stats->nstat_connectsuccesses;
489 out_counts->nstat_min_rtt = rt_stats->nstat_min_rtt;
490 out_counts->nstat_avg_rtt = rt_stats->nstat_avg_rtt;
491 out_counts->nstat_var_rtt = rt_stats->nstat_var_rtt;
492 out_counts->nstat_cell_rxbytes = out_counts->nstat_cell_txbytes = 0;
493 }
494 else
495 {
496 bzero(out_counts, sizeof(*out_counts));
497 }
498
499 return 0;
500 }
501
502 static void
503 nstat_route_release(
504 nstat_provider_cookie_t cookie,
505 __unused int locked)
506 {
507 rtfree((struct rtentry*)cookie);
508 }
509
510 static u_int32_t nstat_route_watchers = 0;
511
512 static int
513 nstat_route_walktree_add(
514 struct radix_node *rn,
515 void *context)
516 {
517 errno_t result = 0;
518 struct rtentry *rt = (struct rtentry *)rn;
519 nstat_control_state *state = (nstat_control_state*)context;
520
521 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
522
523 /* RTF_UP can't change while rnh_lock is held */
524 if ((rt->rt_flags & RTF_UP) != 0)
525 {
526 /* Clear RTPRF_OURS if the route is still usable */
527 RT_LOCK(rt);
528 if (rt_validate(rt)) {
529 RT_ADDREF_LOCKED(rt);
530 RT_UNLOCK(rt);
531 } else {
532 RT_UNLOCK(rt);
533 rt = NULL;
534 }
535
536 /* Otherwise if RTF_CONDEMNED, treat it as if it were down */
537 if (rt == NULL)
538 return (0);
539
540 result = nstat_control_source_add(0, state, &nstat_route_provider, rt);
541 if (result != 0)
542 rtfree_locked(rt);
543 }
544
545 return result;
546 }
547
548 static errno_t
549 nstat_route_add_watcher(
550 nstat_control_state *state)
551 {
552 int i;
553 errno_t result = 0;
554 OSIncrementAtomic(&nstat_route_watchers);
555
556 lck_mtx_lock(rnh_lock);
557 for (i = 1; i < AF_MAX; i++)
558 {
559 struct radix_node_head *rnh;
560 rnh = rt_tables[i];
561 if (!rnh) continue;
562
563 result = rnh->rnh_walktree(rnh, nstat_route_walktree_add, state);
564 if (result != 0)
565 {
566 break;
567 }
568 }
569 lck_mtx_unlock(rnh_lock);
570
571 return result;
572 }
573
574 __private_extern__ void
575 nstat_route_new_entry(
576 struct rtentry *rt)
577 {
578 if (nstat_route_watchers == 0)
579 return;
580
581 lck_mtx_lock(&nstat_mtx);
582 if ((rt->rt_flags & RTF_UP) != 0)
583 {
584 nstat_control_state *state;
585 for (state = nstat_controls; state; state = state->ncs_next)
586 {
587 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_ROUTE)) != 0)
588 {
589 // this client is watching routes
590 // acquire a reference for the route
591 RT_ADDREF(rt);
592
593 // add the source, if that fails, release the reference
594 if (nstat_control_source_add(0, state, &nstat_route_provider, rt) != 0)
595 RT_REMREF(rt);
596 }
597 }
598 }
599 lck_mtx_unlock(&nstat_mtx);
600 }
601
602 static void
603 nstat_route_remove_watcher(
604 __unused nstat_control_state *state)
605 {
606 OSDecrementAtomic(&nstat_route_watchers);
607 }
608
609 static errno_t
610 nstat_route_copy_descriptor(
611 nstat_provider_cookie_t cookie,
612 void *data,
613 u_int32_t len)
614 {
615 nstat_route_descriptor *desc = (nstat_route_descriptor*)data;
616 if (len < sizeof(*desc))
617 {
618 return EINVAL;
619 }
620 bzero(desc, sizeof(*desc));
621
622 struct rtentry *rt = (struct rtentry*)cookie;
623 desc->id = (uint64_t)VM_KERNEL_ADDRPERM(rt);
624 desc->parent_id = (uint64_t)VM_KERNEL_ADDRPERM(rt->rt_parent);
625 desc->gateway_id = (uint64_t)VM_KERNEL_ADDRPERM(rt->rt_gwroute);
626
627
628 // key/dest
629 struct sockaddr *sa;
630 if ((sa = rt_key(rt)))
631 nstat_copy_sa_out(sa, &desc->dst.sa, sizeof(desc->dst));
632
633 // mask
634 if ((sa = rt_mask(rt)) && sa->sa_len <= sizeof(desc->mask))
635 memcpy(&desc->mask, sa, sa->sa_len);
636
637 // gateway
638 if ((sa = rt->rt_gateway))
639 nstat_copy_sa_out(sa, &desc->gateway.sa, sizeof(desc->gateway));
640
641 if (rt->rt_ifp)
642 desc->ifindex = rt->rt_ifp->if_index;
643
644 desc->flags = rt->rt_flags;
645
646 return 0;
647 }
648
649 static void
650 nstat_init_route_provider(void)
651 {
652 bzero(&nstat_route_provider, sizeof(nstat_route_provider));
653 nstat_route_provider.nstat_descriptor_length = sizeof(nstat_route_descriptor);
654 nstat_route_provider.nstat_provider_id = NSTAT_PROVIDER_ROUTE;
655 nstat_route_provider.nstat_lookup = nstat_route_lookup;
656 nstat_route_provider.nstat_gone = nstat_route_gone;
657 nstat_route_provider.nstat_counts = nstat_route_counts;
658 nstat_route_provider.nstat_release = nstat_route_release;
659 nstat_route_provider.nstat_watcher_add = nstat_route_add_watcher;
660 nstat_route_provider.nstat_watcher_remove = nstat_route_remove_watcher;
661 nstat_route_provider.nstat_copy_descriptor = nstat_route_copy_descriptor;
662 nstat_route_provider.next = nstat_providers;
663 nstat_providers = &nstat_route_provider;
664 }
665
666 #pragma mark -- Route Collection --
667
668 static struct nstat_counts*
669 nstat_route_attach(
670 struct rtentry *rte)
671 {
672 struct nstat_counts *result = rte->rt_stats;
673 if (result) return result;
674
675 if (nstat_malloc_tag == NULL) nstat_init();
676
677 result = nstat_malloc_aligned(sizeof(*result), sizeof(u_int64_t), nstat_malloc_tag);
678 if (!result) return result;
679
680 bzero(result, sizeof(*result));
681
682 if (!OSCompareAndSwapPtr(NULL, result, &rte->rt_stats))
683 {
684 nstat_free_aligned(result, nstat_malloc_tag);
685 result = rte->rt_stats;
686 }
687
688 return result;
689 }
690
691 __private_extern__ void
692 nstat_route_detach(
693 struct rtentry *rte)
694 {
695 if (rte->rt_stats)
696 {
697 nstat_free_aligned(rte->rt_stats, nstat_malloc_tag);
698 rte->rt_stats = NULL;
699 }
700 }
701
702 __private_extern__ void
703 nstat_route_connect_attempt(
704 struct rtentry *rte)
705 {
706 while (rte)
707 {
708 struct nstat_counts* stats = nstat_route_attach(rte);
709 if (stats)
710 {
711 OSIncrementAtomic(&stats->nstat_connectattempts);
712 }
713
714 rte = rte->rt_parent;
715 }
716 }
717
718 __private_extern__ void
719 nstat_route_connect_success(
720 struct rtentry *rte)
721 {
722 // This route
723 while (rte)
724 {
725 struct nstat_counts* stats = nstat_route_attach(rte);
726 if (stats)
727 {
728 OSIncrementAtomic(&stats->nstat_connectsuccesses);
729 }
730
731 rte = rte->rt_parent;
732 }
733 }
734
735 __private_extern__ void
736 nstat_route_tx(
737 struct rtentry *rte,
738 u_int32_t packets,
739 u_int32_t bytes,
740 u_int32_t flags)
741 {
742 while (rte)
743 {
744 struct nstat_counts* stats = nstat_route_attach(rte);
745 if (stats)
746 {
747 if ((flags & NSTAT_TX_FLAG_RETRANSMIT) != 0)
748 {
749 OSAddAtomic(bytes, &stats->nstat_txretransmit);
750 }
751 else
752 {
753 OSAddAtomic64((SInt64)packets, (SInt64*)&stats->nstat_txpackets);
754 OSAddAtomic64((SInt64)bytes, (SInt64*)&stats->nstat_txbytes);
755 }
756 }
757
758 rte = rte->rt_parent;
759 }
760 }
761
762 __private_extern__ void
763 nstat_route_rx(
764 struct rtentry *rte,
765 u_int32_t packets,
766 u_int32_t bytes,
767 u_int32_t flags)
768 {
769 while (rte)
770 {
771 struct nstat_counts* stats = nstat_route_attach(rte);
772 if (stats)
773 {
774 if (flags == 0)
775 {
776 OSAddAtomic64((SInt64)packets, (SInt64*)&stats->nstat_rxpackets);
777 OSAddAtomic64((SInt64)bytes, (SInt64*)&stats->nstat_rxbytes);
778 }
779 else
780 {
781 if (flags & NSTAT_RX_FLAG_OUT_OF_ORDER)
782 OSAddAtomic(bytes, &stats->nstat_rxoutoforderbytes);
783 if (flags & NSTAT_RX_FLAG_DUPLICATE)
784 OSAddAtomic(bytes, &stats->nstat_rxduplicatebytes);
785 }
786 }
787
788 rte = rte->rt_parent;
789 }
790 }
791
792 __private_extern__ void
793 nstat_route_rtt(
794 struct rtentry *rte,
795 u_int32_t rtt,
796 u_int32_t rtt_var)
797 {
798 const int32_t factor = 8;
799
800 while (rte)
801 {
802 struct nstat_counts* stats = nstat_route_attach(rte);
803 if (stats)
804 {
805 int32_t oldrtt;
806 int32_t newrtt;
807
808 // average
809 do
810 {
811 oldrtt = stats->nstat_avg_rtt;
812 if (oldrtt == 0)
813 {
814 newrtt = rtt;
815 }
816 else
817 {
818 newrtt = oldrtt - (oldrtt - (int32_t)rtt) / factor;
819 }
820 if (oldrtt == newrtt) break;
821 } while (!OSCompareAndSwap(oldrtt, newrtt, &stats->nstat_avg_rtt));
822
823 // minimum
824 do
825 {
826 oldrtt = stats->nstat_min_rtt;
827 if (oldrtt != 0 && oldrtt < (int32_t)rtt)
828 {
829 break;
830 }
831 } while (!OSCompareAndSwap(oldrtt, rtt, &stats->nstat_min_rtt));
832
833 // variance
834 do
835 {
836 oldrtt = stats->nstat_var_rtt;
837 if (oldrtt == 0)
838 {
839 newrtt = rtt_var;
840 }
841 else
842 {
843 newrtt = oldrtt - (oldrtt - (int32_t)rtt_var) / factor;
844 }
845 if (oldrtt == newrtt) break;
846 } while (!OSCompareAndSwap(oldrtt, newrtt, &stats->nstat_var_rtt));
847 }
848
849 rte = rte->rt_parent;
850 }
851 }
852
853
854 #pragma mark -- TCP Provider --
855
856 /*
857 * Due to the way the kernel deallocates a process (the process structure
858 * might be gone by the time we get the PCB detach notification),
859 * we need to cache the process name. Without this, proc_name() would
860 * return null and the process name would never be sent to userland.
861 *
862 * For UDP sockets, we also store the cached the connection tuples along with
863 * the interface index. This is necessary because when UDP sockets are
864 * disconnected, the connection tuples are forever lost from the inpcb, thus
865 * we need to keep track of the last call to connect() in ntstat.
866 */
867 struct nstat_tucookie {
868 struct inpcb *inp;
869 char pname[MAXCOMLEN+1];
870 bool cached;
871 union
872 {
873 struct sockaddr_in v4;
874 struct sockaddr_in6 v6;
875 } local;
876 union
877 {
878 struct sockaddr_in v4;
879 struct sockaddr_in6 v6;
880 } remote;
881 unsigned int if_index;
882 uint16_t ifnet_properties;
883 };
884
885 static struct nstat_tucookie *
886 nstat_tucookie_alloc_internal(
887 struct inpcb *inp,
888 bool ref,
889 bool locked)
890 {
891 struct nstat_tucookie *cookie;
892
893 cookie = OSMalloc(sizeof(*cookie), nstat_malloc_tag);
894 if (cookie == NULL)
895 return NULL;
896 if (!locked)
897 lck_mtx_assert(&nstat_mtx, LCK_MTX_ASSERT_NOTOWNED);
898 if (ref && in_pcb_checkstate(inp, WNT_ACQUIRE, locked) == WNT_STOPUSING)
899 {
900 OSFree(cookie, sizeof(*cookie), nstat_malloc_tag);
901 return NULL;
902 }
903 bzero(cookie, sizeof(*cookie));
904 cookie->inp = inp;
905 proc_name(inp->inp_socket->last_pid, cookie->pname,
906 sizeof(cookie->pname));
907 /*
908 * We only increment the reference count for UDP sockets because we
909 * only cache UDP socket tuples.
910 */
911 if (SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP)
912 OSIncrementAtomic(&inp->inp_nstat_refcnt);
913
914 return cookie;
915 }
916
917 static struct nstat_tucookie *
918 nstat_tucookie_alloc(
919 struct inpcb *inp)
920 {
921 return nstat_tucookie_alloc_internal(inp, false, false);
922 }
923
924 static struct nstat_tucookie *
925 nstat_tucookie_alloc_ref(
926 struct inpcb *inp)
927 {
928 return nstat_tucookie_alloc_internal(inp, true, false);
929 }
930
931 static struct nstat_tucookie *
932 nstat_tucookie_alloc_ref_locked(
933 struct inpcb *inp)
934 {
935 return nstat_tucookie_alloc_internal(inp, true, true);
936 }
937
938 static void
939 nstat_tucookie_release_internal(
940 struct nstat_tucookie *cookie,
941 int inplock)
942 {
943 if (SOCK_PROTO(cookie->inp->inp_socket) == IPPROTO_UDP)
944 OSDecrementAtomic(&cookie->inp->inp_nstat_refcnt);
945 in_pcb_checkstate(cookie->inp, WNT_RELEASE, inplock);
946 OSFree(cookie, sizeof(*cookie), nstat_malloc_tag);
947 }
948
949 static void
950 nstat_tucookie_release(
951 struct nstat_tucookie *cookie)
952 {
953 nstat_tucookie_release_internal(cookie, false);
954 }
955
956 static void
957 nstat_tucookie_release_locked(
958 struct nstat_tucookie *cookie)
959 {
960 nstat_tucookie_release_internal(cookie, true);
961 }
962
963
964 static nstat_provider nstat_tcp_provider;
965
966 static errno_t
967 nstat_tcpudp_lookup(
968 struct inpcbinfo *inpinfo,
969 const void *data,
970 u_int32_t length,
971 nstat_provider_cookie_t *out_cookie)
972 {
973 struct inpcb *inp = NULL;
974
975 // parameter validation
976 const nstat_tcp_add_param *param = (const nstat_tcp_add_param*)data;
977 if (length < sizeof(*param))
978 {
979 return EINVAL;
980 }
981
982 // src and dst must match
983 if (param->remote.v4.sin_family != 0 &&
984 param->remote.v4.sin_family != param->local.v4.sin_family)
985 {
986 return EINVAL;
987 }
988
989
990 switch (param->local.v4.sin_family)
991 {
992 case AF_INET:
993 {
994 if (param->local.v4.sin_len != sizeof(param->local.v4) ||
995 (param->remote.v4.sin_family != 0 &&
996 param->remote.v4.sin_len != sizeof(param->remote.v4)))
997 {
998 return EINVAL;
999 }
1000
1001 inp = in_pcblookup_hash(inpinfo, param->remote.v4.sin_addr, param->remote.v4.sin_port,
1002 param->local.v4.sin_addr, param->local.v4.sin_port, 1, NULL);
1003 }
1004 break;
1005
1006 #if INET6
1007 case AF_INET6:
1008 {
1009 union
1010 {
1011 const struct in6_addr *in6c;
1012 struct in6_addr *in6;
1013 } local, remote;
1014
1015 if (param->local.v6.sin6_len != sizeof(param->local.v6) ||
1016 (param->remote.v6.sin6_family != 0 &&
1017 param->remote.v6.sin6_len != sizeof(param->remote.v6)))
1018 {
1019 return EINVAL;
1020 }
1021
1022 local.in6c = &param->local.v6.sin6_addr;
1023 remote.in6c = &param->remote.v6.sin6_addr;
1024
1025 inp = in6_pcblookup_hash(inpinfo, remote.in6, param->remote.v6.sin6_port,
1026 local.in6, param->local.v6.sin6_port, 1, NULL);
1027 }
1028 break;
1029 #endif
1030
1031 default:
1032 return EINVAL;
1033 }
1034
1035 if (inp == NULL)
1036 return ENOENT;
1037
1038 // At this point we have a ref to the inpcb
1039 *out_cookie = nstat_tucookie_alloc(inp);
1040 if (*out_cookie == NULL)
1041 in_pcb_checkstate(inp, WNT_RELEASE, 0);
1042
1043 return 0;
1044 }
1045
1046 static errno_t
1047 nstat_tcp_lookup(
1048 const void *data,
1049 u_int32_t length,
1050 nstat_provider_cookie_t *out_cookie)
1051 {
1052 return nstat_tcpudp_lookup(&tcbinfo, data, length, out_cookie);
1053 }
1054
1055 static int
1056 nstat_tcp_gone(
1057 nstat_provider_cookie_t cookie)
1058 {
1059 struct nstat_tucookie *tucookie =
1060 (struct nstat_tucookie *)cookie;
1061 struct inpcb *inp;
1062 struct tcpcb *tp;
1063
1064 return (!(inp = tucookie->inp) ||
1065 !(tp = intotcpcb(inp)) ||
1066 inp->inp_state == INPCB_STATE_DEAD) ? 1 : 0;
1067 }
1068
1069 static errno_t
1070 nstat_tcp_counts(
1071 nstat_provider_cookie_t cookie,
1072 struct nstat_counts *out_counts,
1073 int *out_gone)
1074 {
1075 struct nstat_tucookie *tucookie =
1076 (struct nstat_tucookie *)cookie;
1077 struct inpcb *inp;
1078
1079 bzero(out_counts, sizeof(*out_counts));
1080
1081 if (out_gone) *out_gone = 0;
1082
1083 // if the pcb is in the dead state, we should stop using it
1084 if (nstat_tcp_gone(cookie))
1085 {
1086 if (out_gone) *out_gone = 1;
1087 if (!(inp = tucookie->inp) || !intotcpcb(inp))
1088 return EINVAL;
1089 }
1090 inp = tucookie->inp;
1091 struct tcpcb *tp = intotcpcb(inp);
1092
1093 atomic_get_64(out_counts->nstat_rxpackets, &inp->inp_stat->rxpackets);
1094 atomic_get_64(out_counts->nstat_rxbytes, &inp->inp_stat->rxbytes);
1095 atomic_get_64(out_counts->nstat_txpackets, &inp->inp_stat->txpackets);
1096 atomic_get_64(out_counts->nstat_txbytes, &inp->inp_stat->txbytes);
1097 out_counts->nstat_rxduplicatebytes = tp->t_stat.rxduplicatebytes;
1098 out_counts->nstat_rxoutoforderbytes = tp->t_stat.rxoutoforderbytes;
1099 out_counts->nstat_txretransmit = tp->t_stat.txretransmitbytes;
1100 out_counts->nstat_connectattempts = tp->t_state >= TCPS_SYN_SENT ? 1 : 0;
1101 out_counts->nstat_connectsuccesses = tp->t_state >= TCPS_ESTABLISHED ? 1 : 0;
1102 out_counts->nstat_avg_rtt = tp->t_srtt;
1103 out_counts->nstat_min_rtt = tp->t_rttbest;
1104 out_counts->nstat_var_rtt = tp->t_rttvar;
1105 if (out_counts->nstat_avg_rtt < out_counts->nstat_min_rtt)
1106 out_counts->nstat_min_rtt = out_counts->nstat_avg_rtt;
1107 atomic_get_64(out_counts->nstat_cell_rxbytes, &inp->inp_cstat->rxbytes);
1108 atomic_get_64(out_counts->nstat_cell_txbytes, &inp->inp_cstat->txbytes);
1109 atomic_get_64(out_counts->nstat_wifi_rxbytes, &inp->inp_wstat->rxbytes);
1110 atomic_get_64(out_counts->nstat_wifi_txbytes, &inp->inp_wstat->txbytes);
1111 atomic_get_64(out_counts->nstat_wired_rxbytes, &inp->inp_Wstat->rxbytes);
1112 atomic_get_64(out_counts->nstat_wired_txbytes, &inp->inp_Wstat->txbytes);
1113
1114 return 0;
1115 }
1116
1117 static void
1118 nstat_tcp_release(
1119 nstat_provider_cookie_t cookie,
1120 int locked)
1121 {
1122 struct nstat_tucookie *tucookie =
1123 (struct nstat_tucookie *)cookie;
1124
1125 nstat_tucookie_release_internal(tucookie, locked);
1126 }
1127
1128 static errno_t
1129 nstat_tcp_add_watcher(
1130 nstat_control_state *state)
1131 {
1132 OSIncrementAtomic(&nstat_tcp_watchers);
1133
1134 lck_rw_lock_shared(tcbinfo.ipi_lock);
1135
1136 // Add all current tcp inpcbs. Ignore those in timewait
1137 struct inpcb *inp;
1138 struct nstat_tucookie *cookie;
1139 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list)
1140 {
1141 cookie = nstat_tucookie_alloc_ref(inp);
1142 if (cookie == NULL)
1143 continue;
1144 if (nstat_control_source_add(0, state, &nstat_tcp_provider,
1145 cookie) != 0)
1146 {
1147 nstat_tucookie_release(cookie);
1148 break;
1149 }
1150 }
1151
1152 lck_rw_done(tcbinfo.ipi_lock);
1153
1154 return 0;
1155 }
1156
1157 static void
1158 nstat_tcp_remove_watcher(
1159 __unused nstat_control_state *state)
1160 {
1161 OSDecrementAtomic(&nstat_tcp_watchers);
1162 }
1163
1164 __private_extern__ void
1165 nstat_tcp_new_pcb(
1166 struct inpcb *inp)
1167 {
1168 struct nstat_tucookie *cookie;
1169
1170 if (nstat_tcp_watchers == 0)
1171 return;
1172
1173 socket_lock(inp->inp_socket, 0);
1174 lck_mtx_lock(&nstat_mtx);
1175 nstat_control_state *state;
1176 for (state = nstat_controls; state; state = state->ncs_next)
1177 {
1178 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_TCP)) != 0)
1179 {
1180 // this client is watching tcp
1181 // acquire a reference for it
1182 cookie = nstat_tucookie_alloc_ref_locked(inp);
1183 if (cookie == NULL)
1184 continue;
1185 // add the source, if that fails, release the reference
1186 if (nstat_control_source_add(0, state,
1187 &nstat_tcp_provider, cookie) != 0)
1188 {
1189 nstat_tucookie_release_locked(cookie);
1190 break;
1191 }
1192 }
1193 }
1194 lck_mtx_unlock(&nstat_mtx);
1195 socket_unlock(inp->inp_socket, 0);
1196 }
1197
1198 __private_extern__ void
1199 nstat_pcb_detach(struct inpcb *inp)
1200 {
1201 nstat_control_state *state;
1202 nstat_src *src, *prevsrc;
1203 nstat_src *dead_list = NULL;
1204 struct nstat_tucookie *tucookie;
1205 errno_t result;
1206
1207 if (inp == NULL || (nstat_tcp_watchers == 0 && nstat_udp_watchers == 0))
1208 return;
1209
1210 lck_mtx_lock(&nstat_mtx);
1211 for (state = nstat_controls; state; state = state->ncs_next)
1212 {
1213 lck_mtx_lock(&state->mtx);
1214 for (prevsrc = NULL, src = state->ncs_srcs; src;
1215 prevsrc = src, src = src->next)
1216 {
1217 tucookie = (struct nstat_tucookie *)src->cookie;
1218 if (tucookie->inp == inp)
1219 break;
1220 }
1221
1222 if (src)
1223 {
1224 result = nstat_control_send_goodbye(state, src);
1225
1226 if (prevsrc)
1227 prevsrc->next = src->next;
1228 else
1229 state->ncs_srcs = src->next;
1230
1231 src->next = dead_list;
1232 dead_list = src;
1233 }
1234 lck_mtx_unlock(&state->mtx);
1235 }
1236 lck_mtx_unlock(&nstat_mtx);
1237
1238 while (dead_list) {
1239 src = dead_list;
1240 dead_list = src->next;
1241
1242 nstat_control_cleanup_source(NULL, src, TRUE);
1243 }
1244 }
1245
1246 __private_extern__ void
1247 nstat_pcb_cache(struct inpcb *inp)
1248 {
1249 nstat_control_state *state;
1250 nstat_src *src;
1251 struct nstat_tucookie *tucookie;
1252
1253 if (inp == NULL || nstat_udp_watchers == 0 ||
1254 inp->inp_nstat_refcnt == 0)
1255 return;
1256 VERIFY(SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP);
1257 lck_mtx_lock(&nstat_mtx);
1258 for (state = nstat_controls; state; state = state->ncs_next) {
1259 lck_mtx_lock(&state->mtx);
1260 for (src = state->ncs_srcs; src; src = src->next)
1261 {
1262 tucookie = (struct nstat_tucookie *)src->cookie;
1263 if (tucookie->inp == inp)
1264 {
1265 if (inp->inp_vflag & INP_IPV6)
1266 {
1267 nstat_ip6_to_sockaddr(&inp->in6p_laddr,
1268 inp->inp_lport,
1269 &tucookie->local.v6,
1270 sizeof(tucookie->local));
1271 nstat_ip6_to_sockaddr(&inp->in6p_faddr,
1272 inp->inp_fport,
1273 &tucookie->remote.v6,
1274 sizeof(tucookie->remote));
1275 }
1276 else if (inp->inp_vflag & INP_IPV4)
1277 {
1278 nstat_ip_to_sockaddr(&inp->inp_laddr,
1279 inp->inp_lport,
1280 &tucookie->local.v4,
1281 sizeof(tucookie->local));
1282 nstat_ip_to_sockaddr(&inp->inp_faddr,
1283 inp->inp_fport,
1284 &tucookie->remote.v4,
1285 sizeof(tucookie->remote));
1286 }
1287 if (inp->inp_last_outifp)
1288 tucookie->if_index =
1289 inp->inp_last_outifp->if_index;
1290
1291 tucookie->ifnet_properties = nstat_inpcb_to_flags(inp);
1292 tucookie->cached = true;
1293 break;
1294 }
1295 }
1296 lck_mtx_unlock(&state->mtx);
1297 }
1298 lck_mtx_unlock(&nstat_mtx);
1299 }
1300
1301 __private_extern__ void
1302 nstat_pcb_invalidate_cache(struct inpcb *inp)
1303 {
1304 nstat_control_state *state;
1305 nstat_src *src;
1306 struct nstat_tucookie *tucookie;
1307
1308 if (inp == NULL || nstat_udp_watchers == 0 ||
1309 inp->inp_nstat_refcnt == 0)
1310 return;
1311 VERIFY(SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP);
1312 lck_mtx_lock(&nstat_mtx);
1313 for (state = nstat_controls; state; state = state->ncs_next) {
1314 lck_mtx_lock(&state->mtx);
1315 for (src = state->ncs_srcs; src; src = src->next)
1316 {
1317 tucookie = (struct nstat_tucookie *)src->cookie;
1318 if (tucookie->inp == inp)
1319 {
1320 tucookie->cached = false;
1321 break;
1322 }
1323 }
1324 lck_mtx_unlock(&state->mtx);
1325 }
1326 lck_mtx_unlock(&nstat_mtx);
1327 }
1328
1329 static errno_t
1330 nstat_tcp_copy_descriptor(
1331 nstat_provider_cookie_t cookie,
1332 void *data,
1333 u_int32_t len)
1334 {
1335 if (len < sizeof(nstat_tcp_descriptor))
1336 {
1337 return EINVAL;
1338 }
1339
1340 if (nstat_tcp_gone(cookie))
1341 return EINVAL;
1342
1343 nstat_tcp_descriptor *desc = (nstat_tcp_descriptor*)data;
1344 struct nstat_tucookie *tucookie =
1345 (struct nstat_tucookie *)cookie;
1346 struct inpcb *inp = tucookie->inp;
1347 struct tcpcb *tp = intotcpcb(inp);
1348 bzero(desc, sizeof(*desc));
1349
1350 if (inp->inp_vflag & INP_IPV6)
1351 {
1352 nstat_ip6_to_sockaddr(&inp->in6p_laddr, inp->inp_lport,
1353 &desc->local.v6, sizeof(desc->local));
1354 nstat_ip6_to_sockaddr(&inp->in6p_faddr, inp->inp_fport,
1355 &desc->remote.v6, sizeof(desc->remote));
1356 }
1357 else if (inp->inp_vflag & INP_IPV4)
1358 {
1359 nstat_ip_to_sockaddr(&inp->inp_laddr, inp->inp_lport,
1360 &desc->local.v4, sizeof(desc->local));
1361 nstat_ip_to_sockaddr(&inp->inp_faddr, inp->inp_fport,
1362 &desc->remote.v4, sizeof(desc->remote));
1363 }
1364
1365 desc->state = intotcpcb(inp)->t_state;
1366 desc->ifindex = (inp->inp_last_outifp == NULL) ? 0 :
1367 inp->inp_last_outifp->if_index;
1368
1369 // danger - not locked, values could be bogus
1370 desc->txunacked = tp->snd_max - tp->snd_una;
1371 desc->txwindow = tp->snd_wnd;
1372 desc->txcwindow = tp->snd_cwnd;
1373
1374 if (CC_ALGO(tp)->name != NULL) {
1375 strlcpy(desc->cc_algo, CC_ALGO(tp)->name,
1376 sizeof(desc->cc_algo));
1377 }
1378
1379 struct socket *so = inp->inp_socket;
1380 if (so)
1381 {
1382 // TBD - take the socket lock around these to make sure
1383 // they're in sync?
1384 desc->upid = so->last_upid;
1385 desc->pid = so->last_pid;
1386 desc->traffic_class = so->so_traffic_class;
1387 desc->traffic_mgt_flags = so->so_traffic_mgt_flags;
1388 proc_name(desc->pid, desc->pname, sizeof(desc->pname));
1389 if (desc->pname[0] == 0)
1390 {
1391 strlcpy(desc->pname, tucookie->pname,
1392 sizeof(desc->pname));
1393 }
1394 else
1395 {
1396 desc->pname[sizeof(desc->pname) - 1] = 0;
1397 strlcpy(tucookie->pname, desc->pname,
1398 sizeof(tucookie->pname));
1399 }
1400 memcpy(desc->uuid, so->last_uuid, sizeof(so->last_uuid));
1401 memcpy(desc->vuuid, so->so_vuuid, sizeof(so->so_vuuid));
1402 if (so->so_flags & SOF_DELEGATED) {
1403 desc->eupid = so->e_upid;
1404 desc->epid = so->e_pid;
1405 memcpy(desc->euuid, so->e_uuid, sizeof(so->e_uuid));
1406 } else {
1407 desc->eupid = desc->upid;
1408 desc->epid = desc->pid;
1409 memcpy(desc->euuid, desc->uuid, sizeof(desc->uuid));
1410 }
1411 desc->sndbufsize = so->so_snd.sb_hiwat;
1412 desc->sndbufused = so->so_snd.sb_cc;
1413 desc->rcvbufsize = so->so_rcv.sb_hiwat;
1414 desc->rcvbufused = so->so_rcv.sb_cc;
1415 }
1416
1417 tcp_get_connectivity_status(tp, &desc->connstatus);
1418 desc->ifnet_properties = nstat_inpcb_to_flags(inp);
1419 return 0;
1420 }
1421
1422 static bool
1423 nstat_tcpudp_reporting_allowed(nstat_provider_cookie_t cookie, uint64_t filter)
1424 {
1425 bool retval = true;
1426
1427 /* Only apply interface filter if at least one is allowed. */
1428 if ((filter & NSTAT_FILTER_ACCEPT_ALL) != 0)
1429 {
1430 struct nstat_tucookie *tucookie = (struct nstat_tucookie *)cookie;
1431 struct inpcb *inp = tucookie->inp;
1432
1433 uint16_t interface_properties = nstat_inpcb_to_flags(inp);
1434
1435 /* For now, just check on interface type. */
1436 retval = ((filter & interface_properties) != 0);
1437 }
1438 return retval;
1439 }
1440
1441 static void
1442 nstat_init_tcp_provider(void)
1443 {
1444 bzero(&nstat_tcp_provider, sizeof(nstat_tcp_provider));
1445 nstat_tcp_provider.nstat_descriptor_length = sizeof(nstat_tcp_descriptor);
1446 nstat_tcp_provider.nstat_provider_id = NSTAT_PROVIDER_TCP;
1447 nstat_tcp_provider.nstat_lookup = nstat_tcp_lookup;
1448 nstat_tcp_provider.nstat_gone = nstat_tcp_gone;
1449 nstat_tcp_provider.nstat_counts = nstat_tcp_counts;
1450 nstat_tcp_provider.nstat_release = nstat_tcp_release;
1451 nstat_tcp_provider.nstat_watcher_add = nstat_tcp_add_watcher;
1452 nstat_tcp_provider.nstat_watcher_remove = nstat_tcp_remove_watcher;
1453 nstat_tcp_provider.nstat_copy_descriptor = nstat_tcp_copy_descriptor;
1454 nstat_tcp_provider.nstat_reporting_allowed = nstat_tcpudp_reporting_allowed;
1455 nstat_tcp_provider.next = nstat_providers;
1456 nstat_providers = &nstat_tcp_provider;
1457 }
1458
1459 #pragma mark -- UDP Provider --
1460
1461 static nstat_provider nstat_udp_provider;
1462
1463 static errno_t
1464 nstat_udp_lookup(
1465 const void *data,
1466 u_int32_t length,
1467 nstat_provider_cookie_t *out_cookie)
1468 {
1469 return nstat_tcpudp_lookup(&udbinfo, data, length, out_cookie);
1470 }
1471
1472 static int
1473 nstat_udp_gone(
1474 nstat_provider_cookie_t cookie)
1475 {
1476 struct nstat_tucookie *tucookie =
1477 (struct nstat_tucookie *)cookie;
1478 struct inpcb *inp;
1479
1480 return (!(inp = tucookie->inp) ||
1481 inp->inp_state == INPCB_STATE_DEAD) ? 1 : 0;
1482 }
1483
1484 static errno_t
1485 nstat_udp_counts(
1486 nstat_provider_cookie_t cookie,
1487 struct nstat_counts *out_counts,
1488 int *out_gone)
1489 {
1490 struct nstat_tucookie *tucookie =
1491 (struct nstat_tucookie *)cookie;
1492
1493 if (out_gone) *out_gone = 0;
1494
1495 // if the pcb is in the dead state, we should stop using it
1496 if (nstat_udp_gone(cookie))
1497 {
1498 if (out_gone) *out_gone = 1;
1499 if (!tucookie->inp)
1500 return EINVAL;
1501 }
1502 struct inpcb *inp = tucookie->inp;
1503
1504 atomic_get_64(out_counts->nstat_rxpackets, &inp->inp_stat->rxpackets);
1505 atomic_get_64(out_counts->nstat_rxbytes, &inp->inp_stat->rxbytes);
1506 atomic_get_64(out_counts->nstat_txpackets, &inp->inp_stat->txpackets);
1507 atomic_get_64(out_counts->nstat_txbytes, &inp->inp_stat->txbytes);
1508 atomic_get_64(out_counts->nstat_cell_rxbytes, &inp->inp_cstat->rxbytes);
1509 atomic_get_64(out_counts->nstat_cell_txbytes, &inp->inp_cstat->txbytes);
1510 atomic_get_64(out_counts->nstat_wifi_rxbytes, &inp->inp_wstat->rxbytes);
1511 atomic_get_64(out_counts->nstat_wifi_txbytes, &inp->inp_wstat->txbytes);
1512 atomic_get_64(out_counts->nstat_wired_rxbytes, &inp->inp_Wstat->rxbytes);
1513 atomic_get_64(out_counts->nstat_wired_txbytes, &inp->inp_Wstat->txbytes);
1514
1515 return 0;
1516 }
1517
1518 static void
1519 nstat_udp_release(
1520 nstat_provider_cookie_t cookie,
1521 int locked)
1522 {
1523 struct nstat_tucookie *tucookie =
1524 (struct nstat_tucookie *)cookie;
1525
1526 nstat_tucookie_release_internal(tucookie, locked);
1527 }
1528
1529 static errno_t
1530 nstat_udp_add_watcher(
1531 nstat_control_state *state)
1532 {
1533 struct inpcb *inp;
1534 struct nstat_tucookie *cookie;
1535
1536 OSIncrementAtomic(&nstat_udp_watchers);
1537
1538 lck_rw_lock_shared(udbinfo.ipi_lock);
1539 // Add all current UDP inpcbs.
1540 LIST_FOREACH(inp, udbinfo.ipi_listhead, inp_list)
1541 {
1542 cookie = nstat_tucookie_alloc_ref(inp);
1543 if (cookie == NULL)
1544 continue;
1545 if (nstat_control_source_add(0, state, &nstat_udp_provider,
1546 cookie) != 0)
1547 {
1548 nstat_tucookie_release(cookie);
1549 break;
1550 }
1551 }
1552
1553 lck_rw_done(udbinfo.ipi_lock);
1554
1555 return 0;
1556 }
1557
1558 static void
1559 nstat_udp_remove_watcher(
1560 __unused nstat_control_state *state)
1561 {
1562 OSDecrementAtomic(&nstat_udp_watchers);
1563 }
1564
1565 __private_extern__ void
1566 nstat_udp_new_pcb(
1567 struct inpcb *inp)
1568 {
1569 struct nstat_tucookie *cookie;
1570
1571 if (nstat_udp_watchers == 0)
1572 return;
1573
1574 socket_lock(inp->inp_socket, 0);
1575 lck_mtx_lock(&nstat_mtx);
1576 nstat_control_state *state;
1577 for (state = nstat_controls; state; state = state->ncs_next)
1578 {
1579 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_UDP)) != 0)
1580 {
1581 // this client is watching tcp
1582 // acquire a reference for it
1583 cookie = nstat_tucookie_alloc_ref_locked(inp);
1584 if (cookie == NULL)
1585 continue;
1586 // add the source, if that fails, release the reference
1587 if (nstat_control_source_add(0, state,
1588 &nstat_udp_provider, cookie) != 0)
1589 {
1590 nstat_tucookie_release_locked(cookie);
1591 break;
1592 }
1593 }
1594 }
1595 lck_mtx_unlock(&nstat_mtx);
1596 socket_unlock(inp->inp_socket, 0);
1597 }
1598
1599 static errno_t
1600 nstat_udp_copy_descriptor(
1601 nstat_provider_cookie_t cookie,
1602 void *data,
1603 u_int32_t len)
1604 {
1605 if (len < sizeof(nstat_udp_descriptor))
1606 {
1607 return EINVAL;
1608 }
1609
1610 if (nstat_udp_gone(cookie))
1611 return EINVAL;
1612
1613 struct nstat_tucookie *tucookie =
1614 (struct nstat_tucookie *)cookie;
1615 nstat_udp_descriptor *desc = (nstat_udp_descriptor*)data;
1616 struct inpcb *inp = tucookie->inp;
1617
1618 bzero(desc, sizeof(*desc));
1619
1620 if (tucookie->cached == false) {
1621 if (inp->inp_vflag & INP_IPV6)
1622 {
1623 nstat_ip6_to_sockaddr(&inp->in6p_laddr, inp->inp_lport,
1624 &desc->local.v6, sizeof(desc->local.v6));
1625 nstat_ip6_to_sockaddr(&inp->in6p_faddr, inp->inp_fport,
1626 &desc->remote.v6, sizeof(desc->remote.v6));
1627 }
1628 else if (inp->inp_vflag & INP_IPV4)
1629 {
1630 nstat_ip_to_sockaddr(&inp->inp_laddr, inp->inp_lport,
1631 &desc->local.v4, sizeof(desc->local.v4));
1632 nstat_ip_to_sockaddr(&inp->inp_faddr, inp->inp_fport,
1633 &desc->remote.v4, sizeof(desc->remote.v4));
1634 }
1635 desc->ifnet_properties = nstat_inpcb_to_flags(inp);
1636 }
1637 else
1638 {
1639 if (inp->inp_vflag & INP_IPV6)
1640 {
1641 memcpy(&desc->local.v6, &tucookie->local.v6,
1642 sizeof(desc->local.v6));
1643 memcpy(&desc->remote.v6, &tucookie->remote.v6,
1644 sizeof(desc->remote.v6));
1645 }
1646 else if (inp->inp_vflag & INP_IPV4)
1647 {
1648 memcpy(&desc->local.v4, &tucookie->local.v4,
1649 sizeof(desc->local.v4));
1650 memcpy(&desc->remote.v4, &tucookie->remote.v4,
1651 sizeof(desc->remote.v4));
1652 }
1653 desc->ifnet_properties = tucookie->ifnet_properties;
1654 }
1655
1656 if (inp->inp_last_outifp)
1657 desc->ifindex = inp->inp_last_outifp->if_index;
1658 else
1659 desc->ifindex = tucookie->if_index;
1660
1661 struct socket *so = inp->inp_socket;
1662 if (so)
1663 {
1664 // TBD - take the socket lock around these to make sure
1665 // they're in sync?
1666 desc->upid = so->last_upid;
1667 desc->pid = so->last_pid;
1668 proc_name(desc->pid, desc->pname, sizeof(desc->pname));
1669 if (desc->pname[0] == 0)
1670 {
1671 strlcpy(desc->pname, tucookie->pname,
1672 sizeof(desc->pname));
1673 }
1674 else
1675 {
1676 desc->pname[sizeof(desc->pname) - 1] = 0;
1677 strlcpy(tucookie->pname, desc->pname,
1678 sizeof(tucookie->pname));
1679 }
1680 memcpy(desc->uuid, so->last_uuid, sizeof(so->last_uuid));
1681 memcpy(desc->vuuid, so->so_vuuid, sizeof(so->so_vuuid));
1682 if (so->so_flags & SOF_DELEGATED) {
1683 desc->eupid = so->e_upid;
1684 desc->epid = so->e_pid;
1685 memcpy(desc->euuid, so->e_uuid, sizeof(so->e_uuid));
1686 } else {
1687 desc->eupid = desc->upid;
1688 desc->epid = desc->pid;
1689 memcpy(desc->euuid, desc->uuid, sizeof(desc->uuid));
1690 }
1691 desc->rcvbufsize = so->so_rcv.sb_hiwat;
1692 desc->rcvbufused = so->so_rcv.sb_cc;
1693 desc->traffic_class = so->so_traffic_class;
1694 }
1695
1696 return 0;
1697 }
1698
1699 static void
1700 nstat_init_udp_provider(void)
1701 {
1702 bzero(&nstat_udp_provider, sizeof(nstat_udp_provider));
1703 nstat_udp_provider.nstat_provider_id = NSTAT_PROVIDER_UDP;
1704 nstat_udp_provider.nstat_descriptor_length = sizeof(nstat_udp_descriptor);
1705 nstat_udp_provider.nstat_lookup = nstat_udp_lookup;
1706 nstat_udp_provider.nstat_gone = nstat_udp_gone;
1707 nstat_udp_provider.nstat_counts = nstat_udp_counts;
1708 nstat_udp_provider.nstat_watcher_add = nstat_udp_add_watcher;
1709 nstat_udp_provider.nstat_watcher_remove = nstat_udp_remove_watcher;
1710 nstat_udp_provider.nstat_copy_descriptor = nstat_udp_copy_descriptor;
1711 nstat_udp_provider.nstat_release = nstat_udp_release;
1712 nstat_udp_provider.nstat_reporting_allowed = nstat_tcpudp_reporting_allowed;
1713 nstat_udp_provider.next = nstat_providers;
1714 nstat_providers = &nstat_udp_provider;
1715 }
1716
1717 #pragma mark -- ifnet Provider --
1718
1719 static nstat_provider nstat_ifnet_provider;
1720
1721 /*
1722 * We store a pointer to the ifnet and the original threshold
1723 * requested by the client.
1724 */
1725 struct nstat_ifnet_cookie
1726 {
1727 struct ifnet *ifp;
1728 uint64_t threshold;
1729 };
1730
1731 static errno_t
1732 nstat_ifnet_lookup(
1733 const void *data,
1734 u_int32_t length,
1735 nstat_provider_cookie_t *out_cookie)
1736 {
1737 const nstat_ifnet_add_param *param = (const nstat_ifnet_add_param *)data;
1738 struct ifnet *ifp;
1739 boolean_t changed = FALSE;
1740 nstat_control_state *state;
1741 nstat_src *src;
1742 struct nstat_ifnet_cookie *cookie;
1743
1744 if (length < sizeof(*param) || param->threshold < 1024*1024)
1745 return EINVAL;
1746 if (nstat_privcheck != 0) {
1747 errno_t result = priv_check_cred(kauth_cred_get(),
1748 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
1749 if (result != 0)
1750 return result;
1751 }
1752 cookie = OSMalloc(sizeof(*cookie), nstat_malloc_tag);
1753 if (cookie == NULL)
1754 return ENOMEM;
1755 bzero(cookie, sizeof(*cookie));
1756
1757 ifnet_head_lock_shared();
1758 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
1759 {
1760 ifnet_lock_exclusive(ifp);
1761 if (ifp->if_index == param->ifindex)
1762 {
1763 cookie->ifp = ifp;
1764 cookie->threshold = param->threshold;
1765 *out_cookie = cookie;
1766 if (!ifp->if_data_threshold ||
1767 ifp->if_data_threshold > param->threshold)
1768 {
1769 changed = TRUE;
1770 ifp->if_data_threshold = param->threshold;
1771 }
1772 ifnet_lock_done(ifp);
1773 ifnet_reference(ifp);
1774 break;
1775 }
1776 ifnet_lock_done(ifp);
1777 }
1778 ifnet_head_done();
1779
1780 /*
1781 * When we change the threshold to something smaller, we notify
1782 * all of our clients with a description message.
1783 * We won't send a message to the client we are currently serving
1784 * because it has no `ifnet source' yet.
1785 */
1786 if (changed)
1787 {
1788 lck_mtx_lock(&nstat_mtx);
1789 for (state = nstat_controls; state; state = state->ncs_next)
1790 {
1791 lck_mtx_lock(&state->mtx);
1792 for (src = state->ncs_srcs; src; src = src->next)
1793 {
1794 if (src->provider != &nstat_ifnet_provider)
1795 continue;
1796 nstat_control_send_description(state, src, 0, 0);
1797 }
1798 lck_mtx_unlock(&state->mtx);
1799 }
1800 lck_mtx_unlock(&nstat_mtx);
1801 }
1802 if (cookie->ifp == NULL)
1803 OSFree(cookie, sizeof(*cookie), nstat_malloc_tag);
1804
1805 return ifp ? 0 : EINVAL;
1806 }
1807
1808 static int
1809 nstat_ifnet_gone(
1810 nstat_provider_cookie_t cookie)
1811 {
1812 struct ifnet *ifp;
1813 struct nstat_ifnet_cookie *ifcookie =
1814 (struct nstat_ifnet_cookie *)cookie;
1815
1816 ifnet_head_lock_shared();
1817 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
1818 {
1819 if (ifp == ifcookie->ifp)
1820 break;
1821 }
1822 ifnet_head_done();
1823
1824 return ifp ? 0 : 1;
1825 }
1826
1827 static errno_t
1828 nstat_ifnet_counts(
1829 nstat_provider_cookie_t cookie,
1830 struct nstat_counts *out_counts,
1831 int *out_gone)
1832 {
1833 struct nstat_ifnet_cookie *ifcookie =
1834 (struct nstat_ifnet_cookie *)cookie;
1835 struct ifnet *ifp = ifcookie->ifp;
1836
1837 if (out_gone) *out_gone = 0;
1838
1839 // if the ifnet is gone, we should stop using it
1840 if (nstat_ifnet_gone(cookie))
1841 {
1842 if (out_gone) *out_gone = 1;
1843 return EINVAL;
1844 }
1845
1846 bzero(out_counts, sizeof(*out_counts));
1847 out_counts->nstat_rxpackets = ifp->if_ipackets;
1848 out_counts->nstat_rxbytes = ifp->if_ibytes;
1849 out_counts->nstat_txpackets = ifp->if_opackets;
1850 out_counts->nstat_txbytes = ifp->if_obytes;
1851 out_counts->nstat_cell_rxbytes = out_counts->nstat_cell_txbytes = 0;
1852 return 0;
1853 }
1854
1855 static void
1856 nstat_ifnet_release(
1857 nstat_provider_cookie_t cookie,
1858 __unused int locked)
1859 {
1860 struct nstat_ifnet_cookie *ifcookie;
1861 struct ifnet *ifp;
1862 nstat_control_state *state;
1863 nstat_src *src;
1864 uint64_t minthreshold = UINT64_MAX;
1865
1866 /*
1867 * Find all the clients that requested a threshold
1868 * for this ifnet and re-calculate if_data_threshold.
1869 */
1870 lck_mtx_lock(&nstat_mtx);
1871 for (state = nstat_controls; state; state = state->ncs_next)
1872 {
1873 lck_mtx_lock(&state->mtx);
1874 for (src = state->ncs_srcs; src; src = src->next)
1875 {
1876 /* Skip the provider we are about to detach. */
1877 if (src->provider != &nstat_ifnet_provider ||
1878 src->cookie == cookie)
1879 continue;
1880 ifcookie = (struct nstat_ifnet_cookie *)src->cookie;
1881 if (ifcookie->threshold < minthreshold)
1882 minthreshold = ifcookie->threshold;
1883 }
1884 lck_mtx_unlock(&state->mtx);
1885 }
1886 lck_mtx_unlock(&nstat_mtx);
1887 /*
1888 * Reset if_data_threshold or disable it.
1889 */
1890 ifcookie = (struct nstat_ifnet_cookie *)cookie;
1891 ifp = ifcookie->ifp;
1892 if (ifnet_is_attached(ifp, 1)) {
1893 ifnet_lock_exclusive(ifp);
1894 if (minthreshold == UINT64_MAX)
1895 ifp->if_data_threshold = 0;
1896 else
1897 ifp->if_data_threshold = minthreshold;
1898 ifnet_lock_done(ifp);
1899 ifnet_decr_iorefcnt(ifp);
1900 }
1901 ifnet_release(ifp);
1902 OSFree(ifcookie, sizeof(*ifcookie), nstat_malloc_tag);
1903 }
1904
1905 static void
1906 nstat_ifnet_copy_link_status(
1907 struct ifnet *ifp,
1908 struct nstat_ifnet_descriptor *desc)
1909 {
1910 struct if_link_status *ifsr = ifp->if_link_status;
1911 nstat_ifnet_desc_link_status *link_status = &desc->link_status;
1912
1913 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_NONE;
1914 if (ifsr == NULL)
1915 return;
1916
1917 lck_rw_lock_shared(&ifp->if_link_status_lock);
1918
1919 if (ifp->if_type == IFT_CELLULAR) {
1920
1921 nstat_ifnet_desc_cellular_status *cell_status = &link_status->u.cellular;
1922 struct if_cellular_status_v1 *if_cell_sr =
1923 &ifsr->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
1924
1925 if (ifsr->ifsr_version != IF_CELLULAR_STATUS_REPORT_VERSION_1)
1926 goto done;
1927
1928 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_CELLULAR;
1929
1930 if (if_cell_sr->valid_bitmask & IF_CELL_LINK_QUALITY_METRIC_VALID) {
1931 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_LINK_QUALITY_METRIC_VALID;
1932 cell_status->link_quality_metric = if_cell_sr->link_quality_metric;
1933 }
1934 if (if_cell_sr->valid_bitmask & IF_CELL_UL_EFFECTIVE_BANDWIDTH_VALID) {
1935 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_BANDWIDTH_VALID;
1936 cell_status->ul_effective_bandwidth = if_cell_sr->ul_effective_bandwidth;
1937 }
1938 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_BANDWIDTH_VALID) {
1939 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_BANDWIDTH_VALID;
1940 cell_status->ul_max_bandwidth = if_cell_sr->ul_max_bandwidth;
1941 }
1942 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MIN_LATENCY_VALID) {
1943 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MIN_LATENCY_VALID;
1944 cell_status->ul_min_latency = if_cell_sr->ul_min_latency;
1945 }
1946 if (if_cell_sr->valid_bitmask & IF_CELL_UL_EFFECTIVE_LATENCY_VALID) {
1947 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_LATENCY_VALID;
1948 cell_status->ul_effective_latency = if_cell_sr->ul_effective_latency;
1949 }
1950 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_LATENCY_VALID) {
1951 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_LATENCY_VALID;
1952 cell_status->ul_max_latency = if_cell_sr->ul_max_latency;
1953 }
1954 if (if_cell_sr->valid_bitmask & IF_CELL_UL_RETXT_LEVEL_VALID) {
1955 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID;
1956 if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_NONE)
1957 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_NONE;
1958 else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_LOW)
1959 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_LOW;
1960 else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_MEDIUM)
1961 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_MEDIUM;
1962 else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_HIGH)
1963 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_HIGH;
1964 else
1965 cell_status->valid_bitmask &= ~NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID;
1966 }
1967 if (if_cell_sr->valid_bitmask & IF_CELL_UL_BYTES_LOST_VALID) {
1968 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_BYTES_LOST_VALID;
1969 cell_status->ul_bytes_lost = if_cell_sr->ul_bytes_lost;
1970 }
1971 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MIN_QUEUE_SIZE_VALID) {
1972 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MIN_QUEUE_SIZE_VALID;
1973 cell_status->ul_min_queue_size = if_cell_sr->ul_min_queue_size;
1974 }
1975 if (if_cell_sr->valid_bitmask & IF_CELL_UL_AVG_QUEUE_SIZE_VALID) {
1976 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_AVG_QUEUE_SIZE_VALID;
1977 cell_status->ul_avg_queue_size = if_cell_sr->ul_avg_queue_size;
1978 }
1979 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_QUEUE_SIZE_VALID) {
1980 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_QUEUE_SIZE_VALID;
1981 cell_status->ul_max_queue_size = if_cell_sr->ul_max_queue_size;
1982 }
1983 if (if_cell_sr->valid_bitmask & IF_CELL_DL_EFFECTIVE_BANDWIDTH_VALID) {
1984 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_DL_EFFECTIVE_BANDWIDTH_VALID;
1985 cell_status->dl_effective_bandwidth = if_cell_sr->dl_effective_bandwidth;
1986 }
1987 if (if_cell_sr->valid_bitmask & IF_CELL_DL_MAX_BANDWIDTH_VALID) {
1988 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_DL_MAX_BANDWIDTH_VALID;
1989 cell_status->dl_max_bandwidth = if_cell_sr->dl_max_bandwidth;
1990 }
1991 if (if_cell_sr->valid_bitmask & IF_CELL_CONFIG_INACTIVITY_TIME_VALID) {
1992 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_CONFIG_INACTIVITY_TIME_VALID;
1993 cell_status->config_inactivity_time = if_cell_sr->config_inactivity_time;
1994 }
1995 if (if_cell_sr->valid_bitmask & IF_CELL_CONFIG_BACKOFF_TIME_VALID) {
1996 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_CONFIG_BACKOFF_TIME_VALID;
1997 cell_status->config_backoff_time = if_cell_sr->config_backoff_time;
1998 }
1999
2000 } else if (ifp->if_subfamily == IFNET_SUBFAMILY_WIFI) {
2001
2002 nstat_ifnet_desc_wifi_status *wifi_status = &link_status->u.wifi;
2003 struct if_wifi_status_v1 *if_wifi_sr =
2004 &ifsr->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
2005
2006 if (ifsr->ifsr_version != IF_WIFI_STATUS_REPORT_VERSION_1)
2007 goto done;
2008
2009 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_WIFI;
2010
2011 if (if_wifi_sr->valid_bitmask & IF_WIFI_LINK_QUALITY_METRIC_VALID) {
2012 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_LINK_QUALITY_METRIC_VALID;
2013 wifi_status->link_quality_metric = if_wifi_sr->link_quality_metric;
2014 }
2015 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID) {
2016 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID;
2017 wifi_status->ul_effective_bandwidth = if_wifi_sr->ul_effective_bandwidth;
2018 }
2019 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MAX_BANDWIDTH_VALID) {
2020 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MAX_BANDWIDTH_VALID;
2021 wifi_status->ul_max_bandwidth = if_wifi_sr->ul_max_bandwidth;
2022 }
2023 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MIN_LATENCY_VALID) {
2024 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MIN_LATENCY_VALID;
2025 wifi_status->ul_min_latency = if_wifi_sr->ul_min_latency;
2026 }
2027 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_EFFECTIVE_LATENCY_VALID) {
2028 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_LATENCY_VALID;
2029 wifi_status->ul_effective_latency = if_wifi_sr->ul_effective_latency;
2030 }
2031 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MAX_LATENCY_VALID) {
2032 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MAX_LATENCY_VALID;
2033 wifi_status->ul_max_latency = if_wifi_sr->ul_max_latency;
2034 }
2035 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_RETXT_LEVEL_VALID) {
2036 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID;
2037 if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_NONE)
2038 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_NONE;
2039 else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_LOW)
2040 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_LOW;
2041 else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_MEDIUM)
2042 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_MEDIUM;
2043 else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_HIGH)
2044 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_HIGH;
2045 else
2046 wifi_status->valid_bitmask &= ~NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID;
2047 }
2048 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_BYTES_LOST_VALID) {
2049 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_BYTES_LOST_VALID;
2050 wifi_status->ul_bytes_lost = if_wifi_sr->ul_bytes_lost;
2051 }
2052 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_ERROR_RATE_VALID) {
2053 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_ERROR_RATE_VALID;
2054 wifi_status->ul_error_rate = if_wifi_sr->ul_error_rate;
2055 }
2056 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID) {
2057 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID;
2058 wifi_status->dl_effective_bandwidth = if_wifi_sr->dl_effective_bandwidth;
2059 }
2060 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MAX_BANDWIDTH_VALID) {
2061 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MAX_BANDWIDTH_VALID;
2062 wifi_status->dl_max_bandwidth = if_wifi_sr->dl_max_bandwidth;
2063 }
2064 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MIN_LATENCY_VALID) {
2065 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MIN_LATENCY_VALID;
2066 wifi_status->dl_min_latency = if_wifi_sr->dl_min_latency;
2067 }
2068 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_EFFECTIVE_LATENCY_VALID) {
2069 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_LATENCY_VALID;
2070 wifi_status->dl_effective_latency = if_wifi_sr->dl_effective_latency;
2071 }
2072 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MAX_LATENCY_VALID) {
2073 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MAX_LATENCY_VALID;
2074 wifi_status->dl_max_latency = if_wifi_sr->dl_max_latency;
2075 }
2076 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_ERROR_RATE_VALID) {
2077 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_ERROR_RATE_VALID;
2078 wifi_status->dl_error_rate = if_wifi_sr->dl_error_rate;
2079 }
2080 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_FREQUENCY_VALID) {
2081 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID;
2082 if (if_wifi_sr->config_frequency == IF_WIFI_CONFIG_FREQUENCY_2_4_GHZ)
2083 wifi_status->config_frequency = NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_2_4_GHZ;
2084 else if (if_wifi_sr->config_frequency == IF_WIFI_CONFIG_FREQUENCY_5_0_GHZ)
2085 wifi_status->config_frequency = NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_5_0_GHZ;
2086 else
2087 wifi_status->valid_bitmask &= ~NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID;
2088 }
2089 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_MULTICAST_RATE_VALID) {
2090 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_MULTICAST_RATE_VALID;
2091 wifi_status->config_multicast_rate = if_wifi_sr->config_multicast_rate;
2092 }
2093 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_SCAN_COUNT_VALID) {
2094 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_COUNT_VALID;
2095 wifi_status->scan_count = if_wifi_sr->scan_count;
2096 }
2097 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_SCAN_DURATION_VALID) {
2098 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_DURATION_VALID;
2099 wifi_status->scan_duration = if_wifi_sr->scan_duration;
2100 }
2101 }
2102
2103 done:
2104 lck_rw_done(&ifp->if_link_status_lock);
2105 }
2106
2107 static u_int64_t nstat_ifnet_last_report_time = 0;
2108 extern int tcp_report_stats_interval;
2109
2110 static void
2111 nstat_ifnet_compute_percentages(struct if_tcp_ecn_perf_stat *ifst)
2112 {
2113 /* Retransmit percentage */
2114 if (ifst->total_rxmitpkts > 0 && ifst->total_txpkts > 0) {
2115 /* shift by 10 for precision */
2116 ifst->rxmit_percent =
2117 ((ifst->total_rxmitpkts << 10) * 100) / ifst->total_txpkts;
2118 } else {
2119 ifst->rxmit_percent = 0;
2120 }
2121
2122 /* Out-of-order percentage */
2123 if (ifst->total_oopkts > 0 && ifst->total_rxpkts > 0) {
2124 /* shift by 10 for precision */
2125 ifst->oo_percent =
2126 ((ifst->total_oopkts << 10) * 100) / ifst->total_rxpkts;
2127 } else {
2128 ifst->oo_percent = 0;
2129 }
2130
2131 /* Reorder percentage */
2132 if (ifst->total_reorderpkts > 0 &&
2133 (ifst->total_txpkts + ifst->total_rxpkts) > 0) {
2134 /* shift by 10 for precision */
2135 ifst->reorder_percent =
2136 ((ifst->total_reorderpkts << 10) * 100) /
2137 (ifst->total_txpkts + ifst->total_rxpkts);
2138 } else {
2139 ifst->reorder_percent = 0;
2140 }
2141 }
2142
2143 static void
2144 nstat_ifnet_normalize_counter(struct if_tcp_ecn_stat *if_st)
2145 {
2146 u_int64_t ecn_on_conn, ecn_off_conn;
2147
2148 if (if_st == NULL)
2149 return;
2150 ecn_on_conn = if_st->ecn_client_success +
2151 if_st->ecn_server_success;
2152 ecn_off_conn = if_st->ecn_off_conn +
2153 (if_st->ecn_client_setup - if_st->ecn_client_success) +
2154 (if_st->ecn_server_setup - if_st->ecn_server_success);
2155
2156 /*
2157 * report sack episodes, rst_drop and rxmit_drop
2158 * as a ratio per connection, shift by 10 for precision
2159 */
2160 if (ecn_on_conn > 0) {
2161 if_st->ecn_on.sack_episodes =
2162 (if_st->ecn_on.sack_episodes << 10) / ecn_on_conn;
2163 if_st->ecn_on.rst_drop =
2164 (if_st->ecn_on.rst_drop << 10) * 100 / ecn_on_conn;
2165 if_st->ecn_on.rxmit_drop =
2166 (if_st->ecn_on.rxmit_drop << 10) * 100 / ecn_on_conn;
2167 } else {
2168 /* set to zero, just in case */
2169 if_st->ecn_on.sack_episodes = 0;
2170 if_st->ecn_on.rst_drop = 0;
2171 if_st->ecn_on.rxmit_drop = 0;
2172 }
2173
2174 if (ecn_off_conn > 0) {
2175 if_st->ecn_off.sack_episodes =
2176 (if_st->ecn_off.sack_episodes << 10) / ecn_off_conn;
2177 if_st->ecn_off.rst_drop =
2178 (if_st->ecn_off.rst_drop << 10) * 100 / ecn_off_conn;
2179 if_st->ecn_off.rxmit_drop =
2180 (if_st->ecn_off.rxmit_drop << 10) * 100 / ecn_off_conn;
2181 } else {
2182 if_st->ecn_off.sack_episodes = 0;
2183 if_st->ecn_off.rst_drop = 0;
2184 if_st->ecn_off.rxmit_drop = 0;
2185 }
2186 if_st->ecn_total_conn = ecn_off_conn + ecn_on_conn;
2187 }
2188
2189 void
2190 nstat_ifnet_report_ecn_stats(void)
2191 {
2192 u_int64_t uptime, last_report_time;
2193 struct nstat_sysinfo_data data;
2194 struct nstat_sysinfo_ifnet_ecn_stats *st;
2195 struct ifnet *ifp;
2196
2197 uptime = net_uptime();
2198
2199 if ((int)(uptime - nstat_ifnet_last_report_time) <
2200 tcp_report_stats_interval)
2201 return;
2202
2203 last_report_time = nstat_ifnet_last_report_time;
2204 nstat_ifnet_last_report_time = uptime;
2205 data.flags = NSTAT_SYSINFO_IFNET_ECN_STATS;
2206 st = &data.u.ifnet_ecn_stats;
2207
2208 ifnet_head_lock_shared();
2209 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2210 if (ifp->if_ipv4_stat == NULL || ifp->if_ipv6_stat == NULL)
2211 continue;
2212
2213 if ((ifp->if_refflags & (IFRF_ATTACHED | IFRF_DETACHING)) !=
2214 IFRF_ATTACHED)
2215 continue;
2216
2217 /* Limit reporting to Wifi, Ethernet and cellular. */
2218 if (!(IFNET_IS_ETHERNET(ifp) || IFNET_IS_CELLULAR(ifp)))
2219 continue;
2220
2221 bzero(st, sizeof(*st));
2222 if (IFNET_IS_CELLULAR(ifp)) {
2223 st->ifnet_type = NSTAT_IFNET_ECN_TYPE_CELLULAR;
2224 } else if (IFNET_IS_WIFI(ifp)) {
2225 st->ifnet_type = NSTAT_IFNET_ECN_TYPE_WIFI;
2226 } else {
2227 st->ifnet_type = NSTAT_IFNET_ECN_TYPE_ETHERNET;
2228 }
2229
2230 /* skip if there was no update since last report */
2231 if (ifp->if_ipv4_stat->timestamp <= 0 ||
2232 ifp->if_ipv4_stat->timestamp < last_report_time)
2233 goto v6;
2234 st->ifnet_proto = NSTAT_IFNET_ECN_PROTO_IPV4;
2235 /* compute percentages using packet counts */
2236 nstat_ifnet_compute_percentages(&ifp->if_ipv4_stat->ecn_on);
2237 nstat_ifnet_compute_percentages(&ifp->if_ipv4_stat->ecn_off);
2238 nstat_ifnet_normalize_counter(ifp->if_ipv4_stat);
2239
2240 bcopy(ifp->if_ipv4_stat, &st->ecn_stat,
2241 sizeof(st->ecn_stat));
2242 nstat_sysinfo_send_data(&data);
2243 bzero(ifp->if_ipv4_stat, sizeof(*ifp->if_ipv4_stat));
2244
2245 v6:
2246 /* skip if there was no update since last report */
2247 if (ifp->if_ipv6_stat->timestamp <= 0 ||
2248 ifp->if_ipv6_stat->timestamp < last_report_time)
2249 continue;
2250 st->ifnet_proto = NSTAT_IFNET_ECN_PROTO_IPV6;
2251
2252 /* compute percentages using packet counts */
2253 nstat_ifnet_compute_percentages(&ifp->if_ipv6_stat->ecn_on);
2254 nstat_ifnet_compute_percentages(&ifp->if_ipv6_stat->ecn_off);
2255 nstat_ifnet_normalize_counter(ifp->if_ipv6_stat);
2256
2257 bcopy(ifp->if_ipv6_stat, &st->ecn_stat,
2258 sizeof(st->ecn_stat));
2259 nstat_sysinfo_send_data(&data);
2260
2261 /* Zero the stats in ifp */
2262 bzero(ifp->if_ipv6_stat, sizeof(*ifp->if_ipv6_stat));
2263 }
2264 ifnet_head_done();
2265
2266 }
2267
2268 static errno_t
2269 nstat_ifnet_copy_descriptor(
2270 nstat_provider_cookie_t cookie,
2271 void *data,
2272 u_int32_t len)
2273 {
2274 nstat_ifnet_descriptor *desc = (nstat_ifnet_descriptor *)data;
2275 struct nstat_ifnet_cookie *ifcookie =
2276 (struct nstat_ifnet_cookie *)cookie;
2277 struct ifnet *ifp = ifcookie->ifp;
2278
2279 if (len < sizeof(nstat_ifnet_descriptor))
2280 return EINVAL;
2281
2282 if (nstat_ifnet_gone(cookie))
2283 return EINVAL;
2284
2285 bzero(desc, sizeof(*desc));
2286 ifnet_lock_shared(ifp);
2287 strlcpy(desc->name, ifp->if_xname, sizeof(desc->name));
2288 desc->ifindex = ifp->if_index;
2289 desc->threshold = ifp->if_data_threshold;
2290 desc->type = ifp->if_type;
2291 if (ifp->if_desc.ifd_len < sizeof(desc->description))
2292 memcpy(desc->description, ifp->if_desc.ifd_desc,
2293 sizeof(desc->description));
2294 nstat_ifnet_copy_link_status(ifp, desc);
2295 ifnet_lock_done(ifp);
2296 return 0;
2297 }
2298
2299 static void
2300 nstat_init_ifnet_provider(void)
2301 {
2302 bzero(&nstat_ifnet_provider, sizeof(nstat_ifnet_provider));
2303 nstat_ifnet_provider.nstat_provider_id = NSTAT_PROVIDER_IFNET;
2304 nstat_ifnet_provider.nstat_descriptor_length = sizeof(nstat_ifnet_descriptor);
2305 nstat_ifnet_provider.nstat_lookup = nstat_ifnet_lookup;
2306 nstat_ifnet_provider.nstat_gone = nstat_ifnet_gone;
2307 nstat_ifnet_provider.nstat_counts = nstat_ifnet_counts;
2308 nstat_ifnet_provider.nstat_watcher_add = NULL;
2309 nstat_ifnet_provider.nstat_watcher_remove = NULL;
2310 nstat_ifnet_provider.nstat_copy_descriptor = nstat_ifnet_copy_descriptor;
2311 nstat_ifnet_provider.nstat_release = nstat_ifnet_release;
2312 nstat_ifnet_provider.next = nstat_providers;
2313 nstat_providers = &nstat_ifnet_provider;
2314 }
2315
2316 __private_extern__ void
2317 nstat_ifnet_threshold_reached(unsigned int ifindex)
2318 {
2319 nstat_control_state *state;
2320 nstat_src *src;
2321 struct ifnet *ifp;
2322 struct nstat_ifnet_cookie *ifcookie;
2323
2324 lck_mtx_lock(&nstat_mtx);
2325 for (state = nstat_controls; state; state = state->ncs_next)
2326 {
2327 lck_mtx_lock(&state->mtx);
2328 for (src = state->ncs_srcs; src; src = src->next)
2329 {
2330 if (src->provider != &nstat_ifnet_provider)
2331 continue;
2332 ifcookie = (struct nstat_ifnet_cookie *)src->cookie;
2333 ifp = ifcookie->ifp;
2334 if (ifp->if_index != ifindex)
2335 continue;
2336 nstat_control_send_counts(state, src, 0, 0, NULL);
2337 }
2338 lck_mtx_unlock(&state->mtx);
2339 }
2340 lck_mtx_unlock(&nstat_mtx);
2341 }
2342
2343 #pragma mark -- Sysinfo --
2344 static void
2345 nstat_set_keyval_scalar(nstat_sysinfo_keyval *kv, int key, u_int32_t val)
2346 {
2347 kv->nstat_sysinfo_key = key;
2348 kv->nstat_sysinfo_flags = NSTAT_SYSINFO_FLAG_SCALAR;
2349 kv->u.nstat_sysinfo_scalar = val;
2350 }
2351
2352 static void
2353 nstat_sysinfo_send_data_internal(
2354 nstat_control_state *control,
2355 nstat_sysinfo_data *data)
2356 {
2357 nstat_msg_sysinfo_counts *syscnt = NULL;
2358 size_t allocsize = 0, countsize = 0, nkeyvals = 0, finalsize = 0;
2359 nstat_sysinfo_keyval *kv;
2360 errno_t result = 0;
2361 size_t i = 0;
2362
2363 allocsize = offsetof(nstat_msg_sysinfo_counts, counts);
2364 countsize = offsetof(nstat_sysinfo_counts, nstat_sysinfo_keyvals);
2365 finalsize = allocsize;
2366
2367 /* get number of key-vals for each kind of stat */
2368 switch (data->flags)
2369 {
2370 case NSTAT_SYSINFO_MBUF_STATS:
2371 nkeyvals = sizeof(struct nstat_sysinfo_mbuf_stats) /
2372 sizeof(u_int32_t);
2373 break;
2374 case NSTAT_SYSINFO_TCP_STATS:
2375 nkeyvals = sizeof(struct nstat_sysinfo_tcp_stats) /
2376 sizeof(u_int32_t);
2377 break;
2378 case NSTAT_SYSINFO_IFNET_ECN_STATS:
2379 nkeyvals = (sizeof(struct if_tcp_ecn_stat) /
2380 sizeof(u_int64_t));
2381
2382 /* Two more keys for ifnet type and proto */
2383 nkeyvals += 2;
2384 break;
2385 default:
2386 return;
2387 }
2388 countsize += sizeof(nstat_sysinfo_keyval) * nkeyvals;
2389 allocsize += countsize;
2390
2391 syscnt = OSMalloc(allocsize, nstat_malloc_tag);
2392 if (syscnt == NULL)
2393 return;
2394 bzero(syscnt, allocsize);
2395
2396 kv = (nstat_sysinfo_keyval *) &syscnt->counts.nstat_sysinfo_keyvals;
2397 switch (data->flags)
2398 {
2399 case NSTAT_SYSINFO_MBUF_STATS:
2400 {
2401 nstat_set_keyval_scalar(&kv[i++],
2402 NSTAT_SYSINFO_KEY_MBUF_256B_TOTAL,
2403 data->u.mb_stats.total_256b);
2404 nstat_set_keyval_scalar(&kv[i++],
2405 NSTAT_SYSINFO_KEY_MBUF_2KB_TOTAL,
2406 data->u.mb_stats.total_2kb);
2407 nstat_set_keyval_scalar(&kv[i++],
2408 NSTAT_SYSINFO_KEY_MBUF_4KB_TOTAL,
2409 data->u.mb_stats.total_4kb);
2410 nstat_set_keyval_scalar(&kv[i++],
2411 NSTAT_SYSINFO_MBUF_16KB_TOTAL,
2412 data->u.mb_stats.total_16kb);
2413 nstat_set_keyval_scalar(&kv[i++],
2414 NSTAT_SYSINFO_KEY_SOCK_MBCNT,
2415 data->u.mb_stats.sbmb_total);
2416 nstat_set_keyval_scalar(&kv[i++],
2417 NSTAT_SYSINFO_KEY_SOCK_ATMBLIMIT,
2418 data->u.mb_stats.sb_atmbuflimit);
2419 nstat_set_keyval_scalar(&kv[i++],
2420 NSTAT_SYSINFO_MBUF_DRAIN_CNT,
2421 data->u.mb_stats.draincnt);
2422 nstat_set_keyval_scalar(&kv[i++],
2423 NSTAT_SYSINFO_MBUF_MEM_RELEASED,
2424 data->u.mb_stats.memreleased);
2425 VERIFY(i == nkeyvals);
2426 break;
2427 }
2428 case NSTAT_SYSINFO_TCP_STATS:
2429 {
2430 nstat_set_keyval_scalar(&kv[i++],
2431 NSTAT_SYSINFO_KEY_IPV4_AVGRTT,
2432 data->u.tcp_stats.ipv4_avgrtt);
2433 nstat_set_keyval_scalar(&kv[i++],
2434 NSTAT_SYSINFO_KEY_IPV6_AVGRTT,
2435 data->u.tcp_stats.ipv6_avgrtt);
2436 nstat_set_keyval_scalar(&kv[i++],
2437 NSTAT_SYSINFO_KEY_SEND_PLR,
2438 data->u.tcp_stats.send_plr);
2439 nstat_set_keyval_scalar(&kv[i++],
2440 NSTAT_SYSINFO_KEY_RECV_PLR,
2441 data->u.tcp_stats.recv_plr);
2442 nstat_set_keyval_scalar(&kv[i++],
2443 NSTAT_SYSINFO_KEY_SEND_TLRTO,
2444 data->u.tcp_stats.send_tlrto_rate);
2445 nstat_set_keyval_scalar(&kv[i++],
2446 NSTAT_SYSINFO_KEY_SEND_REORDERRATE,
2447 data->u.tcp_stats.send_reorder_rate);
2448 nstat_set_keyval_scalar(&kv[i++],
2449 NSTAT_SYSINFO_CONNECTION_ATTEMPTS,
2450 data->u.tcp_stats.connection_attempts);
2451 nstat_set_keyval_scalar(&kv[i++],
2452 NSTAT_SYSINFO_CONNECTION_ACCEPTS,
2453 data->u.tcp_stats.connection_accepts);
2454 nstat_set_keyval_scalar(&kv[i++],
2455 NSTAT_SYSINFO_ECN_CLIENT_ENABLED,
2456 data->u.tcp_stats.ecn_client_enabled);
2457 nstat_set_keyval_scalar(&kv[i++],
2458 NSTAT_SYSINFO_ECN_SERVER_ENABLED,
2459 data->u.tcp_stats.ecn_server_enabled);
2460 nstat_set_keyval_scalar(&kv[i++],
2461 NSTAT_SYSINFO_ECN_CLIENT_SETUP,
2462 data->u.tcp_stats.ecn_client_setup);
2463 nstat_set_keyval_scalar(&kv[i++],
2464 NSTAT_SYSINFO_ECN_SERVER_SETUP,
2465 data->u.tcp_stats.ecn_server_setup);
2466 nstat_set_keyval_scalar(&kv[i++],
2467 NSTAT_SYSINFO_ECN_CLIENT_SUCCESS,
2468 data->u.tcp_stats.ecn_client_success);
2469 nstat_set_keyval_scalar(&kv[i++],
2470 NSTAT_SYSINFO_ECN_SERVER_SUCCESS,
2471 data->u.tcp_stats.ecn_server_success);
2472 nstat_set_keyval_scalar(&kv[i++],
2473 NSTAT_SYSINFO_ECN_NOT_SUPPORTED,
2474 data->u.tcp_stats.ecn_not_supported);
2475 nstat_set_keyval_scalar(&kv[i++],
2476 NSTAT_SYSINFO_ECN_LOST_SYN,
2477 data->u.tcp_stats.ecn_lost_syn);
2478 nstat_set_keyval_scalar(&kv[i++],
2479 NSTAT_SYSINFO_ECN_LOST_SYNACK,
2480 data->u.tcp_stats.ecn_lost_synack);
2481 nstat_set_keyval_scalar(&kv[i++],
2482 NSTAT_SYSINFO_ECN_RECV_CE,
2483 data->u.tcp_stats.ecn_recv_ce);
2484 nstat_set_keyval_scalar(&kv[i++],
2485 NSTAT_SYSINFO_ECN_RECV_ECE,
2486 data->u.tcp_stats.ecn_recv_ece);
2487 nstat_set_keyval_scalar(&kv[i++],
2488 NSTAT_SYSINFO_ECN_SENT_ECE,
2489 data->u.tcp_stats.ecn_sent_ece);
2490 nstat_set_keyval_scalar(&kv[i++],
2491 NSTAT_SYSINFO_ECN_CONN_RECV_CE,
2492 data->u.tcp_stats.ecn_conn_recv_ce);
2493 nstat_set_keyval_scalar(&kv[i++],
2494 NSTAT_SYSINFO_ECN_CONN_RECV_ECE,
2495 data->u.tcp_stats.ecn_conn_recv_ece);
2496 nstat_set_keyval_scalar(&kv[i++],
2497 NSTAT_SYSINFO_ECN_CONN_PLNOCE,
2498 data->u.tcp_stats.ecn_conn_plnoce);
2499 nstat_set_keyval_scalar(&kv[i++],
2500 NSTAT_SYSINFO_ECN_CONN_PL_CE,
2501 data->u.tcp_stats.ecn_conn_pl_ce);
2502 nstat_set_keyval_scalar(&kv[i++],
2503 NSTAT_SYSINFO_ECN_CONN_NOPL_CE,
2504 data->u.tcp_stats.ecn_conn_nopl_ce);
2505 nstat_set_keyval_scalar(&kv[i++],
2506 NSTAT_SYSINFO_ECN_FALLBACK_SYNLOSS,
2507 data->u.tcp_stats.ecn_fallback_synloss);
2508 nstat_set_keyval_scalar(&kv[i++],
2509 NSTAT_SYSINFO_ECN_FALLBACK_REORDER,
2510 data->u.tcp_stats.ecn_fallback_reorder);
2511 nstat_set_keyval_scalar(&kv[i++],
2512 NSTAT_SYSINFO_ECN_FALLBACK_CE,
2513 data->u.tcp_stats.ecn_fallback_ce);
2514 nstat_set_keyval_scalar(&kv[i++],
2515 NSTAT_SYSINFO_TFO_SYN_DATA_RCV,
2516 data->u.tcp_stats.tfo_syn_data_rcv);
2517 nstat_set_keyval_scalar(&kv[i++],
2518 NSTAT_SYSINFO_TFO_COOKIE_REQ_RCV,
2519 data->u.tcp_stats.tfo_cookie_req_rcv);
2520 nstat_set_keyval_scalar(&kv[i++],
2521 NSTAT_SYSINFO_TFO_COOKIE_SENT,
2522 data->u.tcp_stats.tfo_cookie_sent);
2523 nstat_set_keyval_scalar(&kv[i++],
2524 NSTAT_SYSINFO_TFO_COOKIE_INVALID,
2525 data->u.tcp_stats.tfo_cookie_invalid);
2526 nstat_set_keyval_scalar(&kv[i++],
2527 NSTAT_SYSINFO_TFO_COOKIE_REQ,
2528 data->u.tcp_stats.tfo_cookie_req);
2529 nstat_set_keyval_scalar(&kv[i++],
2530 NSTAT_SYSINFO_TFO_COOKIE_RCV,
2531 data->u.tcp_stats.tfo_cookie_rcv);
2532 nstat_set_keyval_scalar(&kv[i++],
2533 NSTAT_SYSINFO_TFO_SYN_DATA_SENT,
2534 data->u.tcp_stats.tfo_syn_data_sent);
2535 nstat_set_keyval_scalar(&kv[i++],
2536 NSTAT_SYSINFO_TFO_SYN_DATA_ACKED,
2537 data->u.tcp_stats.tfo_syn_data_acked);
2538 nstat_set_keyval_scalar(&kv[i++],
2539 NSTAT_SYSINFO_TFO_SYN_LOSS,
2540 data->u.tcp_stats.tfo_syn_loss);
2541 nstat_set_keyval_scalar(&kv[i++],
2542 NSTAT_SYSINFO_TFO_BLACKHOLE,
2543 data->u.tcp_stats.tfo_blackhole);
2544 VERIFY(i == nkeyvals);
2545 break;
2546 }
2547 case NSTAT_SYSINFO_IFNET_ECN_STATS:
2548 {
2549 nstat_set_keyval_scalar(&kv[i++],
2550 NSTAT_SYSINFO_ECN_IFNET_TYPE,
2551 data->u.ifnet_ecn_stats.ifnet_type);
2552 nstat_set_keyval_scalar(&kv[i++],
2553 NSTAT_SYSINFO_ECN_IFNET_PROTO,
2554 data->u.ifnet_ecn_stats.ifnet_proto);
2555 nstat_set_keyval_scalar(&kv[i++],
2556 NSTAT_SYSINFO_ECN_IFNET_CLIENT_SETUP,
2557 data->u.ifnet_ecn_stats.ecn_stat.ecn_client_setup);
2558 nstat_set_keyval_scalar(&kv[i++],
2559 NSTAT_SYSINFO_ECN_IFNET_SERVER_SETUP,
2560 data->u.ifnet_ecn_stats.ecn_stat.ecn_server_setup);
2561 nstat_set_keyval_scalar(&kv[i++],
2562 NSTAT_SYSINFO_ECN_IFNET_CLIENT_SUCCESS,
2563 data->u.ifnet_ecn_stats.ecn_stat.ecn_client_success);
2564 nstat_set_keyval_scalar(&kv[i++],
2565 NSTAT_SYSINFO_ECN_IFNET_SERVER_SUCCESS,
2566 data->u.ifnet_ecn_stats.ecn_stat.ecn_server_success);
2567 nstat_set_keyval_scalar(&kv[i++],
2568 NSTAT_SYSINFO_ECN_IFNET_PEER_NOSUPPORT,
2569 data->u.ifnet_ecn_stats.ecn_stat.ecn_peer_nosupport);
2570 nstat_set_keyval_scalar(&kv[i++],
2571 NSTAT_SYSINFO_ECN_IFNET_SYN_LOST,
2572 data->u.ifnet_ecn_stats.ecn_stat.ecn_syn_lost);
2573 nstat_set_keyval_scalar(&kv[i++],
2574 NSTAT_SYSINFO_ECN_IFNET_SYNACK_LOST,
2575 data->u.ifnet_ecn_stats.ecn_stat.ecn_synack_lost);
2576 nstat_set_keyval_scalar(&kv[i++],
2577 NSTAT_SYSINFO_ECN_IFNET_RECV_CE,
2578 data->u.ifnet_ecn_stats.ecn_stat.ecn_recv_ce);
2579 nstat_set_keyval_scalar(&kv[i++],
2580 NSTAT_SYSINFO_ECN_IFNET_RECV_ECE,
2581 data->u.ifnet_ecn_stats.ecn_stat.ecn_recv_ece);
2582 nstat_set_keyval_scalar(&kv[i++],
2583 NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_CE,
2584 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_recv_ce);
2585 nstat_set_keyval_scalar(&kv[i++],
2586 NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_ECE,
2587 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_recv_ece);
2588 nstat_set_keyval_scalar(&kv[i++],
2589 NSTAT_SYSINFO_ECN_IFNET_CONN_PLNOCE,
2590 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_plnoce);
2591 nstat_set_keyval_scalar(&kv[i++],
2592 NSTAT_SYSINFO_ECN_IFNET_CONN_PLCE,
2593 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_plce);
2594 nstat_set_keyval_scalar(&kv[i++],
2595 NSTAT_SYSINFO_ECN_IFNET_CONN_NOPLCE,
2596 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_noplce);
2597 nstat_set_keyval_scalar(&kv[i++],
2598 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNLOSS,
2599 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_synloss);
2600 nstat_set_keyval_scalar(&kv[i++],
2601 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_REORDER,
2602 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_reorder);
2603 nstat_set_keyval_scalar(&kv[i++],
2604 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_CE,
2605 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_ce);
2606 nstat_set_keyval_scalar(&kv[i++],
2607 NSTAT_SYSINFO_ECN_IFNET_ON_RTT_AVG,
2608 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rtt_avg);
2609 nstat_set_keyval_scalar(&kv[i++],
2610 NSTAT_SYSINFO_ECN_IFNET_ON_RTT_VAR,
2611 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rtt_var);
2612 nstat_set_keyval_scalar(&kv[i++],
2613 NSTAT_SYSINFO_ECN_IFNET_ON_OOPERCENT,
2614 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.oo_percent);
2615 nstat_set_keyval_scalar(&kv[i++],
2616 NSTAT_SYSINFO_ECN_IFNET_ON_SACK_EPISODE,
2617 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.sack_episodes);
2618 nstat_set_keyval_scalar(&kv[i++],
2619 NSTAT_SYSINFO_ECN_IFNET_ON_REORDER_PERCENT,
2620 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.reorder_percent);
2621 nstat_set_keyval_scalar(&kv[i++],
2622 NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_PERCENT,
2623 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rxmit_percent);
2624 nstat_set_keyval_scalar(&kv[i++],
2625 NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_DROP,
2626 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rxmit_drop);
2627 nstat_set_keyval_scalar(&kv[i++],
2628 NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_AVG,
2629 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rtt_avg);
2630 nstat_set_keyval_scalar(&kv[i++],
2631 NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_VAR,
2632 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rtt_var);
2633 nstat_set_keyval_scalar(&kv[i++],
2634 NSTAT_SYSINFO_ECN_IFNET_OFF_OOPERCENT,
2635 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.oo_percent);
2636 nstat_set_keyval_scalar(&kv[i++],
2637 NSTAT_SYSINFO_ECN_IFNET_OFF_SACK_EPISODE,
2638 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.sack_episodes);
2639 nstat_set_keyval_scalar(&kv[i++],
2640 NSTAT_SYSINFO_ECN_IFNET_OFF_REORDER_PERCENT,
2641 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.reorder_percent);
2642 nstat_set_keyval_scalar(&kv[i++],
2643 NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_PERCENT,
2644 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rxmit_percent);
2645 nstat_set_keyval_scalar(&kv[i++],
2646 NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_DROP,
2647 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rxmit_drop);
2648 nstat_set_keyval_scalar(&kv[i++],
2649 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_TXPKTS,
2650 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_txpkts);
2651 nstat_set_keyval_scalar(&kv[i++],
2652 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXMTPKTS,
2653 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_rxmitpkts);
2654 nstat_set_keyval_scalar(&kv[i++],
2655 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXPKTS,
2656 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_rxpkts);
2657 nstat_set_keyval_scalar(&kv[i++],
2658 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_OOPKTS,
2659 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_oopkts);
2660 nstat_set_keyval_scalar(&kv[i++],
2661 NSTAT_SYSINFO_ECN_IFNET_ON_DROP_RST,
2662 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rst_drop);
2663 nstat_set_keyval_scalar(&kv[i++],
2664 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_TXPKTS,
2665 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_txpkts);
2666 nstat_set_keyval_scalar(&kv[i++],
2667 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXMTPKTS,
2668 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_rxmitpkts);
2669 nstat_set_keyval_scalar(&kv[i++],
2670 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXPKTS,
2671 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_rxpkts);
2672 nstat_set_keyval_scalar(&kv[i++],
2673 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_OOPKTS,
2674 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_oopkts);
2675 nstat_set_keyval_scalar(&kv[i++],
2676 NSTAT_SYSINFO_ECN_IFNET_OFF_DROP_RST,
2677 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rst_drop);
2678 nstat_set_keyval_scalar(&kv[i++],
2679 NSTAT_SYSINFO_ECN_IFNET_TOTAL_CONN,
2680 data->u.ifnet_ecn_stats.ecn_stat.ecn_total_conn);
2681 break;
2682 }
2683 }
2684 if (syscnt != NULL)
2685 {
2686 VERIFY(i > 0 && i <= nkeyvals);
2687 countsize = offsetof(nstat_sysinfo_counts,
2688 nstat_sysinfo_keyvals) +
2689 sizeof(nstat_sysinfo_keyval) * i;
2690 finalsize += countsize;
2691 syscnt->hdr.type = NSTAT_MSG_TYPE_SYSINFO_COUNTS;
2692 syscnt->hdr.length = finalsize;
2693 syscnt->counts.nstat_sysinfo_len = countsize;
2694
2695 result = ctl_enqueuedata(control->ncs_kctl,
2696 control->ncs_unit, syscnt, finalsize, CTL_DATA_EOR);
2697 if (result != 0)
2698 {
2699 nstat_stats.nstat_sysinfofailures += 1;
2700 }
2701 OSFree(syscnt, allocsize, nstat_malloc_tag);
2702 }
2703 return;
2704 }
2705
2706 __private_extern__ void
2707 nstat_sysinfo_send_data(
2708 nstat_sysinfo_data *data)
2709 {
2710 nstat_control_state *control;
2711
2712 lck_mtx_lock(&nstat_mtx);
2713 for (control = nstat_controls; control; control = control->ncs_next)
2714 {
2715 lck_mtx_lock(&control->mtx);
2716 if ((control->ncs_flags & NSTAT_FLAG_SYSINFO_SUBSCRIBED) != 0)
2717 {
2718 nstat_sysinfo_send_data_internal(control, data);
2719 }
2720 lck_mtx_unlock(&control->mtx);
2721 }
2722 lck_mtx_unlock(&nstat_mtx);
2723 }
2724
2725 static void
2726 nstat_sysinfo_generate_report(void)
2727 {
2728 mbuf_report_peak_usage();
2729 tcp_report_stats();
2730 nstat_ifnet_report_ecn_stats();
2731 }
2732
2733 #pragma mark -- Kernel Control Socket --
2734
2735 static kern_ctl_ref nstat_ctlref = NULL;
2736 static lck_grp_t *nstat_lck_grp = NULL;
2737
2738 static errno_t nstat_control_connect(kern_ctl_ref kctl, struct sockaddr_ctl *sac, void **uinfo);
2739 static errno_t nstat_control_disconnect(kern_ctl_ref kctl, u_int32_t unit, void *uinfo);
2740 static errno_t nstat_control_send(kern_ctl_ref kctl, u_int32_t unit, void *uinfo, mbuf_t m, int flags);
2741
2742 static errno_t
2743 nstat_enqueue_success(
2744 uint64_t context,
2745 nstat_control_state *state,
2746 u_int16_t flags)
2747 {
2748 nstat_msg_hdr success;
2749 errno_t result;
2750
2751 bzero(&success, sizeof(success));
2752 success.context = context;
2753 success.type = NSTAT_MSG_TYPE_SUCCESS;
2754 success.length = sizeof(success);
2755 success.flags = flags;
2756 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &success,
2757 sizeof(success), CTL_DATA_EOR | CTL_DATA_CRIT);
2758 if (result != 0) {
2759 if (nstat_debug != 0)
2760 printf("%s: could not enqueue success message %d\n",
2761 __func__, result);
2762 nstat_stats.nstat_successmsgfailures += 1;
2763 }
2764 return result;
2765 }
2766
2767 static errno_t
2768 nstat_control_send_goodbye(
2769 nstat_control_state *state,
2770 nstat_src *src)
2771 {
2772 errno_t result = 0;
2773 int failed = 0;
2774
2775 if (nstat_control_reporting_allowed(state, src))
2776 {
2777 if ((state->ncs_flags & NSTAT_FLAG_SUPPORTS_UPDATES) != 0)
2778 {
2779 result = nstat_control_send_update(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING, NULL);
2780 if (result != 0)
2781 {
2782 failed = 1;
2783 if (nstat_debug != 0)
2784 printf("%s - nstat_control_send_update() %d\n", __func__, result);
2785 }
2786 }
2787 else
2788 {
2789 // send one last counts notification
2790 result = nstat_control_send_counts(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING, NULL);
2791 if (result != 0)
2792 {
2793 failed = 1;
2794 if (nstat_debug != 0)
2795 printf("%s - nstat_control_send_counts() %d\n", __func__, result);
2796 }
2797
2798 // send a last description
2799 result = nstat_control_send_description(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING);
2800 if (result != 0)
2801 {
2802 failed = 1;
2803 if (nstat_debug != 0)
2804 printf("%s - nstat_control_send_description() %d\n", __func__, result);
2805 }
2806 }
2807 }
2808
2809 // send the source removed notification
2810 result = nstat_control_send_removed(state, src);
2811 if (result != 0 && nstat_debug)
2812 {
2813 failed = 1;
2814 if (nstat_debug != 0)
2815 printf("%s - nstat_control_send_removed() %d\n", __func__, result);
2816 }
2817
2818 if (failed != 0)
2819 nstat_stats.nstat_control_send_goodbye_failures++;
2820
2821
2822 return result;
2823 }
2824
2825 static errno_t
2826 nstat_flush_accumulated_msgs(
2827 nstat_control_state *state)
2828 {
2829 errno_t result = 0;
2830 if (state->ncs_accumulated != NULL && mbuf_len(state->ncs_accumulated) > 0)
2831 {
2832 mbuf_pkthdr_setlen(state->ncs_accumulated, mbuf_len(state->ncs_accumulated));
2833 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, state->ncs_accumulated, CTL_DATA_EOR);
2834 if (result != 0)
2835 {
2836 nstat_stats.nstat_flush_accumulated_msgs_failures++;
2837 if (nstat_debug != 0)
2838 printf("%s - ctl_enqueuembuf failed: %d\n", __func__, result);
2839 mbuf_freem(state->ncs_accumulated);
2840 }
2841 state->ncs_accumulated = NULL;
2842 }
2843 return result;
2844 }
2845
2846 static errno_t
2847 nstat_accumulate_msg(
2848 nstat_control_state *state,
2849 nstat_msg_hdr *hdr,
2850 size_t length)
2851 {
2852 if (state->ncs_accumulated && mbuf_trailingspace(state->ncs_accumulated) < length)
2853 {
2854 // Will send the current mbuf
2855 nstat_flush_accumulated_msgs(state);
2856 }
2857
2858 errno_t result = 0;
2859
2860 if (state->ncs_accumulated == NULL)
2861 {
2862 unsigned int one = 1;
2863 if (mbuf_allocpacket(MBUF_DONTWAIT, NSTAT_MAX_MSG_SIZE, &one, &state->ncs_accumulated) != 0)
2864 {
2865 if (nstat_debug != 0)
2866 printf("%s - mbuf_allocpacket failed\n", __func__);
2867 result = ENOMEM;
2868 }
2869 else
2870 {
2871 mbuf_setlen(state->ncs_accumulated, 0);
2872 }
2873 }
2874
2875 if (result == 0)
2876 {
2877 hdr->length = length;
2878 result = mbuf_copyback(state->ncs_accumulated, mbuf_len(state->ncs_accumulated),
2879 length, hdr, MBUF_DONTWAIT);
2880 }
2881
2882 if (result != 0)
2883 {
2884 nstat_flush_accumulated_msgs(state);
2885 if (nstat_debug != 0)
2886 printf("%s - resorting to ctl_enqueuedata\n", __func__);
2887 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, hdr, length, CTL_DATA_EOR);
2888 }
2889
2890 if (result != 0)
2891 nstat_stats.nstat_accumulate_msg_failures++;
2892
2893 return result;
2894 }
2895
2896 static void*
2897 nstat_idle_check(
2898 __unused thread_call_param_t p0,
2899 __unused thread_call_param_t p1)
2900 {
2901 lck_mtx_lock(&nstat_mtx);
2902
2903 nstat_idle_time = 0;
2904
2905 nstat_control_state *control;
2906 nstat_src *dead = NULL;
2907 nstat_src *dead_list = NULL;
2908 for (control = nstat_controls; control; control = control->ncs_next)
2909 {
2910 lck_mtx_lock(&control->mtx);
2911 nstat_src **srcpp = &control->ncs_srcs;
2912
2913 if (!(control->ncs_flags & NSTAT_FLAG_REQCOUNTS))
2914 {
2915 while(*srcpp != NULL)
2916 {
2917 if ((*srcpp)->provider->nstat_gone((*srcpp)->cookie))
2918 {
2919 errno_t result;
2920
2921 // Pull it off the list
2922 dead = *srcpp;
2923 *srcpp = (*srcpp)->next;
2924
2925 result = nstat_control_send_goodbye(control, dead);
2926
2927 // Put this on the list to release later
2928 dead->next = dead_list;
2929 dead_list = dead;
2930 }
2931 else
2932 {
2933 srcpp = &(*srcpp)->next;
2934 }
2935 }
2936 }
2937 control->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
2938 lck_mtx_unlock(&control->mtx);
2939 }
2940
2941 if (nstat_controls)
2942 {
2943 clock_interval_to_deadline(60, NSEC_PER_SEC, &nstat_idle_time);
2944 thread_call_func_delayed((thread_call_func_t)nstat_idle_check, NULL, nstat_idle_time);
2945 }
2946
2947 lck_mtx_unlock(&nstat_mtx);
2948
2949 /* Generate any system level reports, if needed */
2950 nstat_sysinfo_generate_report();
2951
2952 // Release the sources now that we aren't holding lots of locks
2953 while (dead_list)
2954 {
2955 dead = dead_list;
2956 dead_list = dead->next;
2957
2958 nstat_control_cleanup_source(NULL, dead, FALSE);
2959 }
2960
2961 return NULL;
2962 }
2963
2964 static void
2965 nstat_control_register(void)
2966 {
2967 // Create our lock group first
2968 lck_grp_attr_t *grp_attr = lck_grp_attr_alloc_init();
2969 lck_grp_attr_setdefault(grp_attr);
2970 nstat_lck_grp = lck_grp_alloc_init("network statistics kctl", grp_attr);
2971 lck_grp_attr_free(grp_attr);
2972
2973 lck_mtx_init(&nstat_mtx, nstat_lck_grp, NULL);
2974
2975 // Register the control
2976 struct kern_ctl_reg nstat_control;
2977 bzero(&nstat_control, sizeof(nstat_control));
2978 strlcpy(nstat_control.ctl_name, NET_STAT_CONTROL_NAME, sizeof(nstat_control.ctl_name));
2979 nstat_control.ctl_flags = CTL_FLAG_REG_EXTENDED | CTL_FLAG_REG_CRIT;
2980 nstat_control.ctl_sendsize = nstat_sendspace;
2981 nstat_control.ctl_recvsize = nstat_recvspace;
2982 nstat_control.ctl_connect = nstat_control_connect;
2983 nstat_control.ctl_disconnect = nstat_control_disconnect;
2984 nstat_control.ctl_send = nstat_control_send;
2985
2986 ctl_register(&nstat_control, &nstat_ctlref);
2987 }
2988
2989 static void
2990 nstat_control_cleanup_source(
2991 nstat_control_state *state,
2992 struct nstat_src *src,
2993 boolean_t locked)
2994 {
2995 errno_t result;
2996
2997 if (state)
2998 {
2999 result = nstat_control_send_removed(state, src);
3000 if (result != 0)
3001 {
3002 nstat_stats.nstat_control_cleanup_source_failures++;
3003 if (nstat_debug != 0)
3004 printf("%s - nstat_control_send_removed() %d\n",
3005 __func__, result);
3006 }
3007 }
3008 // Cleanup the source if we found it.
3009 src->provider->nstat_release(src->cookie, locked);
3010 OSFree(src, sizeof(*src), nstat_malloc_tag);
3011 }
3012
3013
3014 static bool
3015 nstat_control_reporting_allowed(
3016 nstat_control_state *state,
3017 nstat_src *src)
3018 {
3019 if (src->provider->nstat_reporting_allowed == NULL)
3020 return TRUE;
3021
3022 return (
3023 src->provider->nstat_reporting_allowed( src->cookie,
3024 state->ncs_provider_filters[src->provider->nstat_provider_id])
3025 );
3026 }
3027
3028
3029 static errno_t
3030 nstat_control_connect(
3031 kern_ctl_ref kctl,
3032 struct sockaddr_ctl *sac,
3033 void **uinfo)
3034 {
3035 nstat_control_state *state = OSMalloc(sizeof(*state), nstat_malloc_tag);
3036 if (state == NULL) return ENOMEM;
3037
3038 bzero(state, sizeof(*state));
3039 lck_mtx_init(&state->mtx, nstat_lck_grp, NULL);
3040 state->ncs_kctl = kctl;
3041 state->ncs_unit = sac->sc_unit;
3042 state->ncs_flags = NSTAT_FLAG_REQCOUNTS;
3043 *uinfo = state;
3044
3045 lck_mtx_lock(&nstat_mtx);
3046 state->ncs_next = nstat_controls;
3047 nstat_controls = state;
3048
3049 if (nstat_idle_time == 0)
3050 {
3051 clock_interval_to_deadline(60, NSEC_PER_SEC, &nstat_idle_time);
3052 thread_call_func_delayed((thread_call_func_t)nstat_idle_check, NULL, nstat_idle_time);
3053 }
3054
3055 lck_mtx_unlock(&nstat_mtx);
3056
3057 return 0;
3058 }
3059
3060 static errno_t
3061 nstat_control_disconnect(
3062 __unused kern_ctl_ref kctl,
3063 __unused u_int32_t unit,
3064 void *uinfo)
3065 {
3066 u_int32_t watching;
3067 nstat_control_state *state = (nstat_control_state*)uinfo;
3068
3069 // pull it out of the global list of states
3070 lck_mtx_lock(&nstat_mtx);
3071 nstat_control_state **statepp;
3072 for (statepp = &nstat_controls; *statepp; statepp = &(*statepp)->ncs_next)
3073 {
3074 if (*statepp == state)
3075 {
3076 *statepp = state->ncs_next;
3077 break;
3078 }
3079 }
3080 lck_mtx_unlock(&nstat_mtx);
3081
3082 lck_mtx_lock(&state->mtx);
3083 // Stop watching for sources
3084 nstat_provider *provider;
3085 watching = state->ncs_watching;
3086 state->ncs_watching = 0;
3087 for (provider = nstat_providers; provider && watching; provider = provider->next)
3088 {
3089 if ((watching & (1 << provider->nstat_provider_id)) != 0)
3090 {
3091 watching &= ~(1 << provider->nstat_provider_id);
3092 provider->nstat_watcher_remove(state);
3093 }
3094 }
3095
3096 // set cleanup flags
3097 state->ncs_flags |= NSTAT_FLAG_CLEANUP;
3098
3099 if (state->ncs_accumulated)
3100 {
3101 mbuf_freem(state->ncs_accumulated);
3102 state->ncs_accumulated = NULL;
3103 }
3104
3105 // Copy out the list of sources
3106 nstat_src *srcs = state->ncs_srcs;
3107 state->ncs_srcs = NULL;
3108 lck_mtx_unlock(&state->mtx);
3109
3110 while (srcs)
3111 {
3112 nstat_src *src;
3113
3114 // pull it out of the list
3115 src = srcs;
3116 srcs = src->next;
3117
3118 // clean it up
3119 nstat_control_cleanup_source(NULL, src, FALSE);
3120 }
3121 lck_mtx_destroy(&state->mtx, nstat_lck_grp);
3122 OSFree(state, sizeof(*state), nstat_malloc_tag);
3123
3124 return 0;
3125 }
3126
3127 static nstat_src_ref_t
3128 nstat_control_next_src_ref(
3129 nstat_control_state *state)
3130 {
3131 int i = 0;
3132 nstat_src_ref_t toReturn = NSTAT_SRC_REF_INVALID;
3133
3134 for (i = 0; i < 1000 && toReturn == NSTAT_SRC_REF_INVALID; i++)
3135 {
3136 if (state->ncs_next_srcref == NSTAT_SRC_REF_INVALID ||
3137 state->ncs_next_srcref == NSTAT_SRC_REF_ALL)
3138 {
3139 state->ncs_next_srcref = 1;
3140 }
3141
3142 nstat_src *src;
3143 for (src = state->ncs_srcs; src; src = src->next)
3144 {
3145 if (src->srcref == state->ncs_next_srcref)
3146 break;
3147 }
3148
3149 if (src == NULL) toReturn = state->ncs_next_srcref;
3150 state->ncs_next_srcref++;
3151 }
3152
3153 return toReturn;
3154 }
3155
3156 static errno_t
3157 nstat_control_send_counts(
3158 nstat_control_state *state,
3159 nstat_src *src,
3160 unsigned long long context,
3161 u_int16_t hdr_flags,
3162 int *gone)
3163 {
3164 nstat_msg_src_counts counts;
3165 errno_t result = 0;
3166
3167 /* Some providers may not have any counts to send */
3168 if (src->provider->nstat_counts == NULL)
3169 return (0);
3170
3171 bzero(&counts, sizeof(counts));
3172 counts.hdr.type = NSTAT_MSG_TYPE_SRC_COUNTS;
3173 counts.hdr.length = sizeof(counts);
3174 counts.hdr.flags = hdr_flags;
3175 counts.hdr.context = context;
3176 counts.srcref = src->srcref;
3177
3178 if (src->provider->nstat_counts(src->cookie, &counts.counts, gone) == 0)
3179 {
3180 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) &&
3181 counts.counts.nstat_rxbytes == 0 &&
3182 counts.counts.nstat_txbytes == 0)
3183 {
3184 result = EAGAIN;
3185 }
3186 else
3187 {
3188 result = ctl_enqueuedata(state->ncs_kctl,
3189 state->ncs_unit, &counts, sizeof(counts),
3190 CTL_DATA_EOR);
3191 if (result != 0)
3192 nstat_stats.nstat_sendcountfailures += 1;
3193 }
3194 }
3195 return result;
3196 }
3197
3198 static errno_t
3199 nstat_control_append_counts(
3200 nstat_control_state *state,
3201 nstat_src *src,
3202 int *gone)
3203 {
3204 /* Some providers may not have any counts to send */
3205 if (!src->provider->nstat_counts) return 0;
3206
3207 nstat_msg_src_counts counts;
3208 bzero(&counts, sizeof(counts));
3209 counts.hdr.type = NSTAT_MSG_TYPE_SRC_COUNTS;
3210 counts.hdr.length = sizeof(counts);
3211 counts.srcref = src->srcref;
3212
3213 errno_t result = 0;
3214 result = src->provider->nstat_counts(src->cookie, &counts.counts, gone);
3215 if (result != 0)
3216 {
3217 return result;
3218 }
3219
3220 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
3221 counts.counts.nstat_rxbytes == 0 && counts.counts.nstat_txbytes == 0)
3222 {
3223 return EAGAIN;
3224 }
3225
3226 return nstat_accumulate_msg(state, &counts.hdr, counts.hdr.length);
3227 }
3228
3229 static int
3230 nstat_control_send_description(
3231 nstat_control_state *state,
3232 nstat_src *src,
3233 u_int64_t context,
3234 u_int16_t hdr_flags)
3235 {
3236 // Provider doesn't support getting the descriptor? Done.
3237 if (src->provider->nstat_descriptor_length == 0 ||
3238 src->provider->nstat_copy_descriptor == NULL)
3239 {
3240 return EOPNOTSUPP;
3241 }
3242
3243 // Allocate storage for the descriptor message
3244 mbuf_t msg;
3245 unsigned int one = 1;
3246 u_int32_t size = offsetof(nstat_msg_src_description, data) + src->provider->nstat_descriptor_length;
3247 if (mbuf_allocpacket(MBUF_DONTWAIT, size, &one, &msg) != 0)
3248 {
3249 return ENOMEM;
3250 }
3251
3252 nstat_msg_src_description *desc = (nstat_msg_src_description*)mbuf_data(msg);
3253 bzero(desc, size);
3254 mbuf_setlen(msg, size);
3255 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
3256
3257 // Query the provider for the provider specific bits
3258 errno_t result = src->provider->nstat_copy_descriptor(src->cookie, desc->data, src->provider->nstat_descriptor_length);
3259
3260 if (result != 0)
3261 {
3262 mbuf_freem(msg);
3263 return result;
3264 }
3265
3266 desc->hdr.context = context;
3267 desc->hdr.type = NSTAT_MSG_TYPE_SRC_DESC;
3268 desc->hdr.length = size;
3269 desc->hdr.flags = hdr_flags;
3270 desc->srcref = src->srcref;
3271 desc->provider = src->provider->nstat_provider_id;
3272
3273 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg, CTL_DATA_EOR);
3274 if (result != 0)
3275 {
3276 nstat_stats.nstat_descriptionfailures += 1;
3277 mbuf_freem(msg);
3278 }
3279
3280 return result;
3281 }
3282
3283 static errno_t
3284 nstat_control_append_description(
3285 nstat_control_state *state,
3286 nstat_src *src)
3287 {
3288 size_t size = offsetof(nstat_msg_src_description, data) + src->provider->nstat_descriptor_length;
3289 if (size > 512 || src->provider->nstat_descriptor_length == 0 ||
3290 src->provider->nstat_copy_descriptor == NULL)
3291 {
3292 return EOPNOTSUPP;
3293 }
3294
3295 // Fill out a buffer on the stack, we will copy to the mbuf later
3296 u_int64_t buffer[size/sizeof(u_int64_t) + 1]; // u_int64_t to ensure alignment
3297 bzero(buffer, size);
3298
3299 nstat_msg_src_description *desc = (nstat_msg_src_description*)buffer;
3300 desc->hdr.type = NSTAT_MSG_TYPE_SRC_DESC;
3301 desc->hdr.length = size;
3302 desc->srcref = src->srcref;
3303 desc->provider = src->provider->nstat_provider_id;
3304
3305 errno_t result = 0;
3306 // Fill in the description
3307 // Query the provider for the provider specific bits
3308 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
3309 src->provider->nstat_descriptor_length);
3310 if (result != 0)
3311 {
3312 return result;
3313 }
3314
3315 return nstat_accumulate_msg(state, &desc->hdr, size);
3316 }
3317
3318 static int
3319 nstat_control_send_update(
3320 nstat_control_state *state,
3321 nstat_src *src,
3322 u_int64_t context,
3323 u_int16_t hdr_flags,
3324 int *gone)
3325 {
3326 // Provider doesn't support getting the descriptor or counts? Done.
3327 if ((src->provider->nstat_descriptor_length == 0 ||
3328 src->provider->nstat_copy_descriptor == NULL) &&
3329 src->provider->nstat_counts == NULL)
3330 {
3331 return EOPNOTSUPP;
3332 }
3333
3334 // Allocate storage for the descriptor message
3335 mbuf_t msg;
3336 unsigned int one = 1;
3337 u_int32_t size = offsetof(nstat_msg_src_update, data) +
3338 src->provider->nstat_descriptor_length;
3339 if (mbuf_allocpacket(MBUF_DONTWAIT, size, &one, &msg) != 0)
3340 {
3341 return ENOMEM;
3342 }
3343
3344 nstat_msg_src_update *desc = (nstat_msg_src_update*)mbuf_data(msg);
3345 bzero(desc, size);
3346 desc->hdr.context = context;
3347 desc->hdr.type = NSTAT_MSG_TYPE_SRC_UPDATE;
3348 desc->hdr.length = size;
3349 desc->hdr.flags = hdr_flags;
3350 desc->srcref = src->srcref;
3351 desc->provider = src->provider->nstat_provider_id;
3352
3353 mbuf_setlen(msg, size);
3354 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
3355
3356 errno_t result = 0;
3357 if (src->provider->nstat_descriptor_length != 0 && src->provider->nstat_copy_descriptor)
3358 {
3359 // Query the provider for the provider specific bits
3360 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
3361 src->provider->nstat_descriptor_length);
3362 if (result != 0)
3363 {
3364 mbuf_freem(msg);
3365 return result;
3366 }
3367 }
3368
3369 if (src->provider->nstat_counts)
3370 {
3371 result = src->provider->nstat_counts(src->cookie, &desc->counts, gone);
3372 if (result == 0)
3373 {
3374 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
3375 desc->counts.nstat_rxbytes == 0 && desc->counts.nstat_txbytes == 0)
3376 {
3377 result = EAGAIN;
3378 }
3379 else
3380 {
3381 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg, CTL_DATA_EOR);
3382 }
3383 }
3384 }
3385
3386 if (result != 0)
3387 {
3388 nstat_stats.nstat_srcupatefailures += 1;
3389 mbuf_freem(msg);
3390 }
3391
3392 return result;
3393 }
3394
3395 static errno_t
3396 nstat_control_append_update(
3397 nstat_control_state *state,
3398 nstat_src *src,
3399 int *gone)
3400 {
3401 size_t size = offsetof(nstat_msg_src_update, data) + src->provider->nstat_descriptor_length;
3402 if (size > 512 || ((src->provider->nstat_descriptor_length == 0 ||
3403 src->provider->nstat_copy_descriptor == NULL) &&
3404 src->provider->nstat_counts == NULL))
3405 {
3406 return EOPNOTSUPP;
3407 }
3408
3409 // Fill out a buffer on the stack, we will copy to the mbuf later
3410 u_int64_t buffer[size/sizeof(u_int64_t) + 1]; // u_int64_t to ensure alignment
3411 bzero(buffer, size);
3412
3413 nstat_msg_src_update *desc = (nstat_msg_src_update*)buffer;
3414 desc->hdr.type = NSTAT_MSG_TYPE_SRC_UPDATE;
3415 desc->hdr.length = size;
3416 desc->srcref = src->srcref;
3417 desc->provider = src->provider->nstat_provider_id;
3418
3419 errno_t result = 0;
3420 // Fill in the description
3421 if (src->provider->nstat_descriptor_length != 0 && src->provider->nstat_copy_descriptor)
3422 {
3423 // Query the provider for the provider specific bits
3424 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
3425 src->provider->nstat_descriptor_length);
3426 if (result != 0)
3427 {
3428 nstat_stats.nstat_copy_descriptor_failures++;
3429 if (nstat_debug != 0)
3430 printf("%s: src->provider->nstat_copy_descriptor: %d\n", __func__, result);
3431 return result;
3432 }
3433 }
3434
3435 if (src->provider->nstat_counts)
3436 {
3437 result = src->provider->nstat_counts(src->cookie, &desc->counts, gone);
3438 if (result != 0)
3439 {
3440 nstat_stats.nstat_provider_counts_failures++;
3441 if (nstat_debug != 0)
3442 printf("%s: src->provider->nstat_counts: %d\n", __func__, result);
3443 return result;
3444 }
3445
3446 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
3447 desc->counts.nstat_rxbytes == 0 && desc->counts.nstat_txbytes == 0)
3448 {
3449 return EAGAIN;
3450 }
3451 }
3452
3453 return nstat_accumulate_msg(state, &desc->hdr, size);
3454 }
3455
3456 static errno_t
3457 nstat_control_send_removed(
3458 nstat_control_state *state,
3459 nstat_src *src)
3460 {
3461 nstat_msg_src_removed removed;
3462 errno_t result;
3463
3464 bzero(&removed, sizeof(removed));
3465 removed.hdr.type = NSTAT_MSG_TYPE_SRC_REMOVED;
3466 removed.hdr.length = sizeof(removed);
3467 removed.hdr.context = 0;
3468 removed.srcref = src->srcref;
3469 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &removed,
3470 sizeof(removed), CTL_DATA_EOR | CTL_DATA_CRIT);
3471 if (result != 0)
3472 nstat_stats.nstat_msgremovedfailures += 1;
3473
3474 return result;
3475 }
3476
3477 static errno_t
3478 nstat_control_handle_add_request(
3479 nstat_control_state *state,
3480 mbuf_t m)
3481 {
3482 errno_t result;
3483
3484 // Verify the header fits in the first mbuf
3485 if (mbuf_len(m) < offsetof(nstat_msg_add_src_req, param))
3486 {
3487 return EINVAL;
3488 }
3489
3490 // Calculate the length of the parameter field
3491 int32_t paramlength = mbuf_pkthdr_len(m) - offsetof(nstat_msg_add_src_req, param);
3492 if (paramlength < 0 || paramlength > 2 * 1024)
3493 {
3494 return EINVAL;
3495 }
3496
3497 nstat_provider *provider;
3498 nstat_provider_cookie_t cookie;
3499 nstat_msg_add_src_req *req = mbuf_data(m);
3500 if (mbuf_pkthdr_len(m) > mbuf_len(m))
3501 {
3502 // parameter is too large, we need to make a contiguous copy
3503 void *data = OSMalloc(paramlength, nstat_malloc_tag);
3504
3505 if (!data) return ENOMEM;
3506 result = mbuf_copydata(m, offsetof(nstat_msg_add_src_req, param), paramlength, data);
3507 if (result == 0)
3508 result = nstat_lookup_entry(req->provider, data, paramlength, &provider, &cookie);
3509 OSFree(data, paramlength, nstat_malloc_tag);
3510 }
3511 else
3512 {
3513 result = nstat_lookup_entry(req->provider, (void*)&req->param, paramlength, &provider, &cookie);
3514 }
3515
3516 if (result != 0)
3517 {
3518 return result;
3519 }
3520
3521 result = nstat_control_source_add(req->hdr.context, state, provider, cookie);
3522 if (result != 0)
3523 provider->nstat_release(cookie, 0);
3524
3525 return result;
3526 }
3527
3528 static errno_t
3529 nstat_control_handle_add_all(
3530 nstat_control_state *state,
3531 mbuf_t m)
3532 {
3533 errno_t result = 0;
3534
3535 // Verify the header fits in the first mbuf
3536 if (mbuf_len(m) < sizeof(nstat_msg_add_all_srcs))
3537 {
3538 return EINVAL;
3539 }
3540
3541
3542 nstat_msg_add_all_srcs *req = mbuf_data(m);
3543 if (req->provider > NSTAT_PROVIDER_LAST) return ENOENT;
3544
3545 nstat_provider *provider = nstat_find_provider_by_id(req->provider);
3546 u_int64_t filter = req->filter;
3547
3548 if (!provider) return ENOENT;
3549 if (provider->nstat_watcher_add == NULL) return ENOTSUP;
3550
3551 if (nstat_privcheck != 0) {
3552 result = priv_check_cred(kauth_cred_get(),
3553 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
3554 if (result != 0)
3555 return result;
3556 }
3557
3558 // Make sure we don't add the provider twice
3559 lck_mtx_lock(&state->mtx);
3560 if ((state->ncs_watching & (1 << provider->nstat_provider_id)) != 0)
3561 result = EALREADY;
3562 state->ncs_watching |= (1 << provider->nstat_provider_id);
3563 lck_mtx_unlock(&state->mtx);
3564 if (result != 0) return result;
3565
3566 state->ncs_provider_filters[req->provider] = filter;
3567
3568 result = provider->nstat_watcher_add(state);
3569 if (result != 0)
3570 {
3571 state->ncs_provider_filters[req->provider] = 0;
3572 lck_mtx_lock(&state->mtx);
3573 state->ncs_watching &= ~(1 << provider->nstat_provider_id);
3574 lck_mtx_unlock(&state->mtx);
3575 }
3576 if (result == 0)
3577 nstat_enqueue_success(req->hdr.context, state, 0);
3578
3579 return result;
3580 }
3581
3582 static errno_t
3583 nstat_control_source_add(
3584 u_int64_t context,
3585 nstat_control_state *state,
3586 nstat_provider *provider,
3587 nstat_provider_cookie_t cookie)
3588 {
3589 // Fill out source added message if appropriate
3590 mbuf_t msg = NULL;
3591 nstat_src_ref_t *srcrefp = NULL;
3592
3593 u_int64_t provider_filters =
3594 state->ncs_provider_filters[provider->nstat_provider_id];
3595 boolean_t tell_user =
3596 ((provider_filters & NSTAT_FILTER_SUPPRESS_SRC_ADDED) == 0);
3597 u_int32_t src_filter =
3598 (provider_filters & NSTAT_FILTER_PROVIDER_NOZEROBYTES)
3599 ? NSTAT_FILTER_NOZEROBYTES : 0;
3600
3601 if (tell_user)
3602 {
3603 unsigned int one = 1;
3604
3605 if (mbuf_allocpacket(MBUF_DONTWAIT, sizeof(nstat_msg_src_added),
3606 &one, &msg) != 0)
3607 return ENOMEM;
3608
3609 mbuf_setlen(msg, sizeof(nstat_msg_src_added));
3610 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
3611 nstat_msg_src_added *add = mbuf_data(msg);
3612 bzero(add, sizeof(*add));
3613 add->hdr.type = NSTAT_MSG_TYPE_SRC_ADDED;
3614 add->hdr.length = mbuf_len(msg);
3615 add->hdr.context = context;
3616 add->provider = provider->nstat_provider_id;
3617 srcrefp = &add->srcref;
3618 }
3619
3620 // Allocate storage for the source
3621 nstat_src *src = OSMalloc(sizeof(*src), nstat_malloc_tag);
3622 if (src == NULL)
3623 {
3624 if (msg) mbuf_freem(msg);
3625 return ENOMEM;
3626 }
3627
3628 // Fill in the source, including picking an unused source ref
3629 lck_mtx_lock(&state->mtx);
3630
3631 src->srcref = nstat_control_next_src_ref(state);
3632 if (srcrefp)
3633 *srcrefp = src->srcref;
3634
3635 if (state->ncs_flags & NSTAT_FLAG_CLEANUP || src->srcref == NSTAT_SRC_REF_INVALID)
3636 {
3637 lck_mtx_unlock(&state->mtx);
3638 OSFree(src, sizeof(*src), nstat_malloc_tag);
3639 if (msg) mbuf_freem(msg);
3640 return EINVAL;
3641 }
3642 src->provider = provider;
3643 src->cookie = cookie;
3644 src->filter = src_filter;
3645
3646 if (msg)
3647 {
3648 // send the source added message if appropriate
3649 errno_t result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg,
3650 CTL_DATA_EOR);
3651 if (result != 0)
3652 {
3653 nstat_stats.nstat_srcaddedfailures += 1;
3654 lck_mtx_unlock(&state->mtx);
3655 OSFree(src, sizeof(*src), nstat_malloc_tag);
3656 mbuf_freem(msg);
3657 return result;
3658 }
3659 }
3660 // Put the source in the list
3661 src->next = state->ncs_srcs;
3662 state->ncs_srcs = src;
3663
3664 lck_mtx_unlock(&state->mtx);
3665
3666 return 0;
3667 }
3668
3669 static errno_t
3670 nstat_control_handle_remove_request(
3671 nstat_control_state *state,
3672 mbuf_t m)
3673 {
3674 nstat_src_ref_t srcref = NSTAT_SRC_REF_INVALID;
3675
3676 if (mbuf_copydata(m, offsetof(nstat_msg_rem_src_req, srcref), sizeof(srcref), &srcref) != 0)
3677 {
3678 return EINVAL;
3679 }
3680
3681 lck_mtx_lock(&state->mtx);
3682
3683 // Remove this source as we look for it
3684 nstat_src **nextp;
3685 nstat_src *src = NULL;
3686 for (nextp = &state->ncs_srcs; *nextp; nextp = &(*nextp)->next)
3687 {
3688 if ((*nextp)->srcref == srcref)
3689 {
3690 src = *nextp;
3691 *nextp = src->next;
3692 break;
3693 }
3694 }
3695
3696 lck_mtx_unlock(&state->mtx);
3697
3698 if (src) nstat_control_cleanup_source(state, src, FALSE);
3699
3700 return src ? 0 : ENOENT;
3701 }
3702
3703 static errno_t
3704 nstat_control_handle_query_request(
3705 nstat_control_state *state,
3706 mbuf_t m)
3707 {
3708 // TBD: handle this from another thread so we can enqueue a lot of data
3709 // As written, if a client requests query all, this function will be
3710 // called from their send of the request message. We will attempt to write
3711 // responses and succeed until the buffer fills up. Since the clients thread
3712 // is blocked on send, it won't be reading unless the client has two threads
3713 // using this socket, one for read and one for write. Two threads probably
3714 // won't work with this code anyhow since we don't have proper locking in
3715 // place yet.
3716 nstat_src *dead_srcs = NULL;
3717 errno_t result = ENOENT;
3718 nstat_msg_query_src_req req;
3719
3720 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0)
3721 {
3722 return EINVAL;
3723 }
3724
3725 const boolean_t all_srcs = (req.srcref == NSTAT_SRC_REF_ALL);
3726
3727 lck_mtx_lock(&state->mtx);
3728
3729 if (all_srcs)
3730 {
3731 state->ncs_flags |= NSTAT_FLAG_REQCOUNTS;
3732 }
3733 nstat_src **srcpp = &state->ncs_srcs;
3734 u_int64_t src_count = 0;
3735 boolean_t partial = FALSE;
3736
3737 /*
3738 * Error handling policy and sequence number generation is folded into
3739 * nstat_control_begin_query.
3740 */
3741 partial = nstat_control_begin_query(state, &req.hdr);
3742
3743 while (*srcpp != NULL
3744 && (!partial || src_count < QUERY_CONTINUATION_SRC_COUNT))
3745 {
3746 nstat_src *src = NULL;
3747 int gone;
3748
3749 src = *srcpp;
3750 gone = 0;
3751 // XXX ignore IFACE types?
3752 if (all_srcs || src->srcref == req.srcref)
3753 {
3754 if (nstat_control_reporting_allowed(state, src)
3755 && (!partial || !all_srcs || src->seq != state->ncs_seq))
3756 {
3757 if (all_srcs &&
3758 (req.hdr.flags & NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE) != 0)
3759 {
3760 result = nstat_control_append_counts(state, src, &gone);
3761 }
3762 else
3763 {
3764 result = nstat_control_send_counts(state, src, req.hdr.context, 0, &gone);
3765 }
3766
3767 if (ENOMEM == result || ENOBUFS == result)
3768 {
3769 /*
3770 * If the counts message failed to
3771 * enqueue then we should clear our flag so
3772 * that a client doesn't miss anything on
3773 * idle cleanup. We skip the "gone"
3774 * processing in the hope that we may
3775 * catch it another time.
3776 */
3777 state->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
3778 break;
3779 }
3780 if (partial)
3781 {
3782 /*
3783 * We skip over hard errors and
3784 * filtered sources.
3785 */
3786 src->seq = state->ncs_seq;
3787 src_count++;
3788 }
3789 }
3790 }
3791
3792 if (gone)
3793 {
3794 // send one last descriptor message so client may see last state
3795 // If we can't send the notification now, it
3796 // will be sent in the idle cleanup.
3797 result = nstat_control_send_description(state, *srcpp, 0, 0);
3798 if (result != 0)
3799 {
3800 nstat_stats.nstat_control_send_description_failures++;
3801 if (nstat_debug != 0)
3802 printf("%s - nstat_control_send_description() %d\n", __func__, result);
3803 state->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
3804 break;
3805 }
3806
3807 // pull src out of the list
3808 *srcpp = src->next;
3809
3810 src->next = dead_srcs;
3811 dead_srcs = src;
3812 }
3813 else
3814 {
3815 srcpp = &(*srcpp)->next;
3816 }
3817
3818 if (!all_srcs && req.srcref == src->srcref)
3819 {
3820 break;
3821 }
3822 }
3823 nstat_flush_accumulated_msgs(state);
3824
3825 u_int16_t flags = 0;
3826 if (req.srcref == NSTAT_SRC_REF_ALL)
3827 flags = nstat_control_end_query(state, *srcpp, partial);
3828
3829 lck_mtx_unlock(&state->mtx);
3830
3831 /*
3832 * If an error occurred enqueueing data, then allow the error to
3833 * propagate to nstat_control_send. This way, the error is sent to
3834 * user-level.
3835 */
3836 if (all_srcs && ENOMEM != result && ENOBUFS != result)
3837 {
3838 nstat_enqueue_success(req.hdr.context, state, flags);
3839 result = 0;
3840 }
3841
3842 while (dead_srcs)
3843 {
3844 nstat_src *src;
3845
3846 src = dead_srcs;
3847 dead_srcs = src->next;
3848
3849 // release src and send notification
3850 nstat_control_cleanup_source(state, src, FALSE);
3851 }
3852
3853 return result;
3854 }
3855
3856 static errno_t
3857 nstat_control_handle_get_src_description(
3858 nstat_control_state *state,
3859 mbuf_t m)
3860 {
3861 nstat_msg_get_src_description req;
3862 errno_t result = ENOENT;
3863 nstat_src *src;
3864
3865 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0)
3866 {
3867 return EINVAL;
3868 }
3869
3870 lck_mtx_lock(&state->mtx);
3871 u_int64_t src_count = 0;
3872 boolean_t partial = FALSE;
3873 const boolean_t all_srcs = (req.srcref == NSTAT_SRC_REF_ALL);
3874
3875 /*
3876 * Error handling policy and sequence number generation is folded into
3877 * nstat_control_begin_query.
3878 */
3879 partial = nstat_control_begin_query(state, &req.hdr);
3880
3881 for (src = state->ncs_srcs;
3882 src && (!partial || src_count < QUERY_CONTINUATION_SRC_COUNT);
3883 src = src->next)
3884 {
3885 if (all_srcs || src->srcref == req.srcref)
3886 {
3887 if (nstat_control_reporting_allowed(state, src)
3888 && (!all_srcs || !partial || src->seq != state->ncs_seq))
3889 {
3890 if ((req.hdr.flags & NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE) != 0 && all_srcs)
3891 {
3892 result = nstat_control_append_description(state, src);
3893 }
3894 else
3895 {
3896 result = nstat_control_send_description(state, src, req.hdr.context, 0);
3897 }
3898
3899 if (ENOMEM == result || ENOBUFS == result)
3900 {
3901 /*
3902 * If the description message failed to
3903 * enqueue then we give up for now.
3904 */
3905 break;
3906 }
3907 if (partial)
3908 {
3909 /*
3910 * Note, we skip over hard errors and
3911 * filtered sources.
3912 */
3913 src->seq = state->ncs_seq;
3914 src_count++;
3915 }
3916 }
3917
3918 if (!all_srcs)
3919 {
3920 break;
3921 }
3922 }
3923 }
3924 nstat_flush_accumulated_msgs(state);
3925
3926 u_int16_t flags = 0;
3927 if (req.srcref == NSTAT_SRC_REF_ALL)
3928 flags = nstat_control_end_query(state, src, partial);
3929
3930 lck_mtx_unlock(&state->mtx);
3931 /*
3932 * If an error occurred enqueueing data, then allow the error to
3933 * propagate to nstat_control_send. This way, the error is sent to
3934 * user-level.
3935 */
3936 if (all_srcs && ENOMEM != result && ENOBUFS != result)
3937 {
3938 nstat_enqueue_success(req.hdr.context, state, flags);
3939 result = 0;
3940 }
3941
3942 return result;
3943 }
3944
3945 static errno_t
3946 nstat_control_handle_set_filter(
3947 nstat_control_state *state,
3948 mbuf_t m)
3949 {
3950 nstat_msg_set_filter req;
3951 nstat_src *src;
3952
3953 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0)
3954 return EINVAL;
3955 if (req.srcref == NSTAT_SRC_REF_ALL ||
3956 req.srcref == NSTAT_SRC_REF_INVALID)
3957 return EINVAL;
3958
3959 lck_mtx_lock(&state->mtx);
3960 for (src = state->ncs_srcs; src; src = src->next)
3961 if (req.srcref == src->srcref)
3962 {
3963 src->filter = req.filter;
3964 break;
3965 }
3966 lck_mtx_unlock(&state->mtx);
3967 if (src == NULL)
3968 return ENOENT;
3969
3970 return 0;
3971 }
3972
3973 static void
3974 nstat_send_error(
3975 nstat_control_state *state,
3976 u_int64_t context,
3977 u_int32_t error)
3978 {
3979 errno_t result;
3980 struct nstat_msg_error err;
3981
3982 bzero(&err, sizeof(err));
3983 err.hdr.type = NSTAT_MSG_TYPE_ERROR;
3984 err.hdr.length = sizeof(err);
3985 err.hdr.context = context;
3986 err.error = error;
3987
3988 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &err,
3989 sizeof(err), CTL_DATA_EOR | CTL_DATA_CRIT);
3990 if (result != 0)
3991 nstat_stats.nstat_msgerrorfailures++;
3992 }
3993
3994 static boolean_t
3995 nstat_control_begin_query(
3996 nstat_control_state *state,
3997 const nstat_msg_hdr *hdrp)
3998 {
3999 boolean_t partial = FALSE;
4000
4001 if (hdrp->flags & NSTAT_MSG_HDR_FLAG_CONTINUATION)
4002 {
4003 /* A partial query all has been requested. */
4004 partial = TRUE;
4005
4006 if (state->ncs_context != hdrp->context)
4007 {
4008 if (state->ncs_context != 0)
4009 nstat_send_error(state, state->ncs_context, EAGAIN);
4010
4011 /* Initialize state for a partial query all. */
4012 state->ncs_context = hdrp->context;
4013 state->ncs_seq++;
4014 }
4015 }
4016 else if (state->ncs_context != 0)
4017 {
4018 /*
4019 * A continuation of a paced-query was in progress. Send that
4020 * context an error and reset the state. If the same context
4021 * has changed its mind, just send the full query results.
4022 */
4023 if (state->ncs_context != hdrp->context)
4024 nstat_send_error(state, state->ncs_context, EAGAIN);
4025 }
4026
4027 return partial;
4028 }
4029
4030 static u_int16_t
4031 nstat_control_end_query(
4032 nstat_control_state *state,
4033 nstat_src *last_src,
4034 boolean_t partial)
4035 {
4036 u_int16_t flags = 0;
4037
4038 if (last_src == NULL || !partial)
4039 {
4040 /*
4041 * We iterated through the entire srcs list or exited early
4042 * from the loop when a partial update was not requested (an
4043 * error occurred), so clear context to indicate internally
4044 * that the query is finished.
4045 */
4046 state->ncs_context = 0;
4047 }
4048 else
4049 {
4050 /*
4051 * Indicate to userlevel to make another partial request as
4052 * there are still sources left to be reported.
4053 */
4054 flags |= NSTAT_MSG_HDR_FLAG_CONTINUATION;
4055 }
4056
4057 return flags;
4058 }
4059
4060 static errno_t
4061 nstat_control_handle_get_update(
4062 nstat_control_state *state,
4063 mbuf_t m)
4064 {
4065 nstat_msg_query_src_req req;
4066
4067 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0)
4068 {
4069 return EINVAL;
4070 }
4071
4072 lck_mtx_lock(&state->mtx);
4073
4074 state->ncs_flags |= NSTAT_FLAG_SUPPORTS_UPDATES;
4075
4076 errno_t result = ENOENT;
4077 nstat_src *src;
4078 nstat_src *dead_srcs = NULL;
4079 nstat_src **srcpp = &state->ncs_srcs;
4080 u_int64_t src_count = 0;
4081 boolean_t partial = FALSE;
4082
4083 /*
4084 * Error handling policy and sequence number generation is folded into
4085 * nstat_control_begin_query.
4086 */
4087 partial = nstat_control_begin_query(state, &req.hdr);
4088
4089 while (*srcpp != NULL
4090 && (FALSE == partial
4091 || src_count < QUERY_CONTINUATION_SRC_COUNT))
4092 {
4093 int gone;
4094
4095 gone = 0;
4096 src = *srcpp;
4097 if (nstat_control_reporting_allowed(state, src))
4098 {
4099 /* skip this source if it has the current state
4100 * sequence number as it's already been reported in
4101 * this query-all partial sequence. */
4102 if (req.srcref == NSTAT_SRC_REF_ALL
4103 && (FALSE == partial || src->seq != state->ncs_seq))
4104 {
4105 result = nstat_control_append_update(state, src, &gone);
4106 if (ENOMEM == result || ENOBUFS == result)
4107 {
4108 /*
4109 * If the update message failed to
4110 * enqueue then give up.
4111 */
4112 break;
4113 }
4114 if (partial)
4115 {
4116 /*
4117 * We skip over hard errors and
4118 * filtered sources.
4119 */
4120 src->seq = state->ncs_seq;
4121 src_count++;
4122 }
4123 }
4124 else if (src->srcref == req.srcref)
4125 {
4126 result = nstat_control_send_update(state, src, req.hdr.context, 0, &gone);
4127 }
4128 }
4129
4130 if (gone)
4131 {
4132 // pull src out of the list
4133 *srcpp = src->next;
4134
4135 src->next = dead_srcs;
4136 dead_srcs = src;
4137 }
4138 else
4139 {
4140 srcpp = &(*srcpp)->next;
4141 }
4142
4143 if (req.srcref != NSTAT_SRC_REF_ALL && req.srcref == src->srcref)
4144 {
4145 break;
4146 }
4147 }
4148
4149 nstat_flush_accumulated_msgs(state);
4150
4151
4152 u_int16_t flags = 0;
4153 if (req.srcref == NSTAT_SRC_REF_ALL)
4154 flags = nstat_control_end_query(state, *srcpp, partial);
4155
4156 lck_mtx_unlock(&state->mtx);
4157 /*
4158 * If an error occurred enqueueing data, then allow the error to
4159 * propagate to nstat_control_send. This way, the error is sent to
4160 * user-level.
4161 */
4162 if (req.srcref == NSTAT_SRC_REF_ALL && ENOMEM != result && ENOBUFS != result)
4163 {
4164 nstat_enqueue_success(req.hdr.context, state, flags);
4165 result = 0;
4166 }
4167
4168 while (dead_srcs)
4169 {
4170 src = dead_srcs;
4171 dead_srcs = src->next;
4172
4173 // release src and send notification
4174 nstat_control_cleanup_source(state, src, FALSE);
4175 }
4176
4177 return result;
4178 }
4179
4180 static errno_t
4181 nstat_control_handle_subscribe_sysinfo(
4182 nstat_control_state *state)
4183 {
4184 errno_t result = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
4185
4186 if (result != 0)
4187 {
4188 return result;
4189 }
4190
4191 lck_mtx_lock(&state->mtx);
4192 state->ncs_flags |= NSTAT_FLAG_SYSINFO_SUBSCRIBED;
4193 lck_mtx_unlock(&state->mtx);
4194
4195 return 0;
4196 }
4197
4198 static errno_t
4199 nstat_control_send(
4200 kern_ctl_ref kctl,
4201 u_int32_t unit,
4202 void *uinfo,
4203 mbuf_t m,
4204 __unused int flags)
4205 {
4206 nstat_control_state *state = (nstat_control_state*)uinfo;
4207 struct nstat_msg_hdr *hdr;
4208 struct nstat_msg_hdr storage;
4209 errno_t result = 0;
4210
4211 if (mbuf_pkthdr_len(m) < sizeof(*hdr))
4212 {
4213 // Is this the right thing to do?
4214 mbuf_freem(m);
4215 return EINVAL;
4216 }
4217
4218 if (mbuf_len(m) >= sizeof(*hdr))
4219 {
4220 hdr = mbuf_data(m);
4221 }
4222 else
4223 {
4224 mbuf_copydata(m, 0, sizeof(storage), &storage);
4225 hdr = &storage;
4226 }
4227
4228 // Legacy clients may not set the length
4229 // Those clients are likely not setting the flags either
4230 // Fix everything up so old clients continue to work
4231 if (hdr->length != mbuf_pkthdr_len(m))
4232 {
4233 hdr->flags = 0;
4234 hdr->length = mbuf_pkthdr_len(m);
4235 if (hdr == &storage)
4236 {
4237 mbuf_copyback(m, 0, sizeof(*hdr), hdr, MBUF_DONTWAIT);
4238 }
4239 }
4240
4241 switch (hdr->type)
4242 {
4243 case NSTAT_MSG_TYPE_ADD_SRC:
4244 result = nstat_control_handle_add_request(state, m);
4245 break;
4246
4247 case NSTAT_MSG_TYPE_ADD_ALL_SRCS:
4248 result = nstat_control_handle_add_all(state, m);
4249 break;
4250
4251 case NSTAT_MSG_TYPE_REM_SRC:
4252 result = nstat_control_handle_remove_request(state, m);
4253 break;
4254
4255 case NSTAT_MSG_TYPE_QUERY_SRC:
4256 result = nstat_control_handle_query_request(state, m);
4257 break;
4258
4259 case NSTAT_MSG_TYPE_GET_SRC_DESC:
4260 result = nstat_control_handle_get_src_description(state, m);
4261 break;
4262
4263 case NSTAT_MSG_TYPE_SET_FILTER:
4264 result = nstat_control_handle_set_filter(state, m);
4265 break;
4266
4267 case NSTAT_MSG_TYPE_GET_UPDATE:
4268 result = nstat_control_handle_get_update(state, m);
4269 break;
4270
4271 case NSTAT_MSG_TYPE_SUBSCRIBE_SYSINFO:
4272 result = nstat_control_handle_subscribe_sysinfo(state);
4273 break;
4274
4275 default:
4276 result = EINVAL;
4277 break;
4278 }
4279
4280 if (result != 0)
4281 {
4282 struct nstat_msg_error err;
4283
4284 bzero(&err, sizeof(err));
4285 err.hdr.type = NSTAT_MSG_TYPE_ERROR;
4286 err.hdr.length = sizeof(err) + mbuf_pkthdr_len(m);
4287 err.hdr.context = hdr->context;
4288 err.error = result;
4289
4290 if (mbuf_prepend(&m, sizeof(err), MBUF_DONTWAIT) == 0 &&
4291 mbuf_copyback(m, 0, sizeof(err), &err, MBUF_DONTWAIT) == 0)
4292 {
4293 result = ctl_enqueuembuf(kctl, unit, m, CTL_DATA_EOR | CTL_DATA_CRIT);
4294 if (result != 0)
4295 {
4296 mbuf_freem(m);
4297 }
4298 m = NULL;
4299 }
4300
4301 if (result != 0)
4302 {
4303 // Unable to prepend the error to the request - just send the error
4304 err.hdr.length = sizeof(err);
4305 result = ctl_enqueuedata(kctl, unit, &err, sizeof(err),
4306 CTL_DATA_EOR | CTL_DATA_CRIT);
4307 if (result != 0)
4308 nstat_stats.nstat_msgerrorfailures += 1;
4309 }
4310 nstat_stats.nstat_handle_msg_failures += 1;
4311 }
4312
4313 if (m) mbuf_freem(m);
4314
4315 return result;
4316 }