]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/ntstat.c
35a02c0b3e0b30b4ae79f29b0c8115cf4159bff7
[apple/xnu.git] / bsd / net / ntstat.c
1 /*
2 * Copyright (c) 2010-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/param.h>
30 #include <sys/types.h>
31 #include <sys/kpi_mbuf.h>
32 #include <sys/socket.h>
33 #include <sys/kern_control.h>
34 #include <sys/mcache.h>
35 #include <sys/socketvar.h>
36 #include <sys/sysctl.h>
37 #include <sys/queue.h>
38 #include <sys/priv.h>
39 #include <sys/protosw.h>
40
41 #include <kern/clock.h>
42 #include <kern/debug.h>
43
44 #include <libkern/libkern.h>
45 #include <libkern/OSMalloc.h>
46 #include <libkern/OSAtomic.h>
47 #include <libkern/locks.h>
48
49 #include <net/if.h>
50 #include <net/if_var.h>
51 #include <net/if_types.h>
52 #include <net/route.h>
53 #include <net/ntstat.h>
54
55 #include <netinet/ip_var.h>
56 #include <netinet/in_pcb.h>
57 #include <netinet/in_var.h>
58 #include <netinet/tcp.h>
59 #include <netinet/tcp_var.h>
60 #include <netinet/tcp_fsm.h>
61 #include <netinet/tcp_cc.h>
62 #include <netinet/udp.h>
63 #include <netinet/udp_var.h>
64 #include <netinet6/in6_pcb.h>
65 #include <netinet6/in6_var.h>
66
67 __private_extern__ int nstat_collect = 1;
68 SYSCTL_INT(_net, OID_AUTO, statistics, CTLFLAG_RW | CTLFLAG_LOCKED,
69 &nstat_collect, 0, "Collect detailed statistics");
70
71 static int nstat_privcheck = 0;
72 SYSCTL_INT(_net, OID_AUTO, statistics_privcheck, CTLFLAG_RW | CTLFLAG_LOCKED,
73 &nstat_privcheck, 0, "Entitlement check");
74
75 SYSCTL_NODE(_net, OID_AUTO, stats,
76 CTLFLAG_RW|CTLFLAG_LOCKED, 0, "network statistics");
77
78 static int nstat_debug = 0;
79 SYSCTL_INT(_net_stats, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED,
80 &nstat_debug, 0, "");
81
82 static int nstat_sendspace = 2048;
83 SYSCTL_INT(_net_stats, OID_AUTO, sendspace, CTLFLAG_RW | CTLFLAG_LOCKED,
84 &nstat_sendspace, 0, "");
85
86 static int nstat_recvspace = 8192;
87 SYSCTL_INT(_net_stats, OID_AUTO, recvspace, CTLFLAG_RW | CTLFLAG_LOCKED,
88 &nstat_recvspace, 0, "");
89
90 static struct nstat_stats nstat_stats;
91 SYSCTL_STRUCT(_net_stats, OID_AUTO, stats, CTLFLAG_RD | CTLFLAG_LOCKED,
92 &nstat_stats, nstat_stats, "");
93
94 enum
95 {
96 NSTAT_FLAG_CLEANUP = (1 << 0),
97 NSTAT_FLAG_REQCOUNTS = (1 << 1),
98 NSTAT_FLAG_SUPPORTS_UPDATES = (1 << 2),
99 NSTAT_FLAG_SYSINFO_SUBSCRIBED = (1 << 3),
100 };
101
102 #define QUERY_CONTINUATION_SRC_COUNT 100
103
104 typedef struct nstat_provider_filter
105 {
106 u_int64_t npf_flags;
107 u_int64_t npf_events;
108 pid_t npf_pid;
109 uuid_t npf_uuid;
110 } nstat_provider_filter;
111
112
113 typedef struct nstat_control_state
114 {
115 struct nstat_control_state *ncs_next;
116 u_int32_t ncs_watching;
117 decl_lck_mtx_data(, mtx);
118 kern_ctl_ref ncs_kctl;
119 u_int32_t ncs_unit;
120 nstat_src_ref_t ncs_next_srcref;
121 struct nstat_src *ncs_srcs;
122 mbuf_t ncs_accumulated;
123 u_int32_t ncs_flags;
124 nstat_provider_filter ncs_provider_filters[NSTAT_PROVIDER_COUNT];
125 /* state maintained for partial query requests */
126 u_int64_t ncs_context;
127 u_int64_t ncs_seq;
128 } nstat_control_state;
129
130 typedef struct nstat_provider
131 {
132 struct nstat_provider *next;
133 nstat_provider_id_t nstat_provider_id;
134 size_t nstat_descriptor_length;
135 errno_t (*nstat_lookup)(const void *data, u_int32_t length, nstat_provider_cookie_t *out_cookie);
136 int (*nstat_gone)(nstat_provider_cookie_t cookie);
137 errno_t (*nstat_counts)(nstat_provider_cookie_t cookie, struct nstat_counts *out_counts, int *out_gone);
138 errno_t (*nstat_watcher_add)(nstat_control_state *state);
139 void (*nstat_watcher_remove)(nstat_control_state *state);
140 errno_t (*nstat_copy_descriptor)(nstat_provider_cookie_t cookie, void *data, u_int32_t len);
141 void (*nstat_release)(nstat_provider_cookie_t cookie, boolean_t locked);
142 bool (*nstat_reporting_allowed)(nstat_provider_cookie_t cookie, nstat_provider_filter *filter);
143 } nstat_provider;
144
145 typedef STAILQ_HEAD(, nstat_src) stailq_head_nstat_src;
146 typedef STAILQ_ENTRY(nstat_src) stailq_entry_nstat_src;
147
148 typedef TAILQ_HEAD(, nstat_tu_shadow) tailq_head_tu_shadow;
149 typedef TAILQ_ENTRY(nstat_tu_shadow) tailq_entry_tu_shadow;
150
151 typedef struct nstat_src
152 {
153 struct nstat_src *next;
154 nstat_src_ref_t srcref;
155 nstat_provider *provider;
156 nstat_provider_cookie_t cookie;
157 uint32_t filter;
158 uint64_t seq;
159 } nstat_src;
160
161 static errno_t nstat_control_send_counts(nstat_control_state *,
162 nstat_src *, unsigned long long, u_int16_t, int *);
163 static int nstat_control_send_description(nstat_control_state *state, nstat_src *src, u_int64_t context, u_int16_t hdr_flags);
164 static int nstat_control_send_update(nstat_control_state *state, nstat_src *src, u_int64_t context, u_int16_t hdr_flags, int *gone);
165 static errno_t nstat_control_send_removed(nstat_control_state *, nstat_src *);
166 static errno_t nstat_control_send_goodbye(nstat_control_state *state, nstat_src *src);
167 static void nstat_control_cleanup_source(nstat_control_state *state, nstat_src *src, boolean_t);
168 static bool nstat_control_reporting_allowed(nstat_control_state *state, nstat_src *src);
169 static boolean_t nstat_control_begin_query(nstat_control_state *state, const nstat_msg_hdr *hdrp);
170 static u_int16_t nstat_control_end_query(nstat_control_state *state, nstat_src *last_src, boolean_t partial);
171 static void nstat_ifnet_report_ecn_stats(void);
172
173 static u_int32_t nstat_udp_watchers = 0;
174 static u_int32_t nstat_userland_udp_watchers = 0;
175 static u_int32_t nstat_tcp_watchers = 0;
176 static u_int32_t nstat_userland_tcp_watchers = 0;
177
178 static void nstat_control_register(void);
179
180 /*
181 * The lock order is as follows:
182 *
183 * socket_lock (inpcb)
184 * nstat_mtx
185 * state->mtx
186 */
187 static volatile OSMallocTag nstat_malloc_tag = NULL;
188 static nstat_control_state *nstat_controls = NULL;
189 static uint64_t nstat_idle_time = 0;
190 static decl_lck_mtx_data(, nstat_mtx);
191
192 /* some extern definitions */
193 extern void mbuf_report_peak_usage(void);
194 extern void tcp_report_stats(void);
195
196 static void
197 nstat_copy_sa_out(
198 const struct sockaddr *src,
199 struct sockaddr *dst,
200 int maxlen)
201 {
202 if (src->sa_len > maxlen) return;
203
204 bcopy(src, dst, src->sa_len);
205 if (src->sa_family == AF_INET6 &&
206 src->sa_len >= sizeof(struct sockaddr_in6))
207 {
208 struct sockaddr_in6 *sin6 = (struct sockaddr_in6*)(void *)dst;
209 if (IN6_IS_SCOPE_EMBED(&sin6->sin6_addr))
210 {
211 if (sin6->sin6_scope_id == 0)
212 sin6->sin6_scope_id = ntohs(sin6->sin6_addr.s6_addr16[1]);
213 sin6->sin6_addr.s6_addr16[1] = 0;
214 }
215 }
216 }
217
218 static void
219 nstat_ip_to_sockaddr(
220 const struct in_addr *ip,
221 u_int16_t port,
222 struct sockaddr_in *sin,
223 u_int32_t maxlen)
224 {
225 if (maxlen < sizeof(struct sockaddr_in))
226 return;
227
228 sin->sin_family = AF_INET;
229 sin->sin_len = sizeof(*sin);
230 sin->sin_port = port;
231 sin->sin_addr = *ip;
232 }
233
234 static void
235 nstat_ip6_to_sockaddr(
236 const struct in6_addr *ip6,
237 u_int16_t port,
238 struct sockaddr_in6 *sin6,
239 u_int32_t maxlen)
240 {
241 if (maxlen < sizeof(struct sockaddr_in6))
242 return;
243
244 sin6->sin6_family = AF_INET6;
245 sin6->sin6_len = sizeof(*sin6);
246 sin6->sin6_port = port;
247 sin6->sin6_addr = *ip6;
248 if (IN6_IS_SCOPE_EMBED(&sin6->sin6_addr))
249 {
250 sin6->sin6_scope_id = ntohs(sin6->sin6_addr.s6_addr16[1]);
251 sin6->sin6_addr.s6_addr16[1] = 0;
252 }
253 }
254
255 static u_int16_t
256 nstat_ifnet_to_flags(
257 struct ifnet *ifp)
258 {
259 u_int16_t flags = 0;
260 u_int32_t functional_type = if_functional_type(ifp, FALSE);
261
262 /* Panic if someone adds a functional type without updating ntstat. */
263 VERIFY(0 <= functional_type && functional_type <= IFRTYPE_FUNCTIONAL_LAST);
264
265 switch (functional_type)
266 {
267 case IFRTYPE_FUNCTIONAL_UNKNOWN:
268 flags |= NSTAT_IFNET_IS_UNKNOWN_TYPE;
269 break;
270 case IFRTYPE_FUNCTIONAL_LOOPBACK:
271 flags |= NSTAT_IFNET_IS_LOOPBACK;
272 break;
273 case IFRTYPE_FUNCTIONAL_WIRED:
274 flags |= NSTAT_IFNET_IS_WIRED;
275 break;
276 case IFRTYPE_FUNCTIONAL_WIFI_INFRA:
277 flags |= NSTAT_IFNET_IS_WIFI;
278 break;
279 case IFRTYPE_FUNCTIONAL_WIFI_AWDL:
280 flags |= NSTAT_IFNET_IS_WIFI;
281 flags |= NSTAT_IFNET_IS_AWDL;
282 break;
283 case IFRTYPE_FUNCTIONAL_CELLULAR:
284 flags |= NSTAT_IFNET_IS_CELLULAR;
285 break;
286 }
287
288 if (IFNET_IS_EXPENSIVE(ifp))
289 {
290 flags |= NSTAT_IFNET_IS_EXPENSIVE;
291 }
292
293 return flags;
294 }
295
296 static u_int16_t
297 nstat_inpcb_to_flags(
298 const struct inpcb *inp)
299 {
300 u_int16_t flags = 0;
301
302 if ((inp != NULL ) && (inp->inp_last_outifp != NULL))
303 {
304 struct ifnet *ifp = inp->inp_last_outifp;
305 flags = nstat_ifnet_to_flags(ifp);
306
307 if (flags & NSTAT_IFNET_IS_CELLULAR)
308 {
309 if (inp->inp_socket != NULL &&
310 (inp->inp_socket->so_flags1 & SOF1_CELLFALLBACK))
311 flags |= NSTAT_IFNET_VIA_CELLFALLBACK;
312 }
313 }
314 else
315 {
316 flags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
317 }
318
319 return flags;
320 }
321
322 #pragma mark -- Network Statistic Providers --
323
324 static errno_t nstat_control_source_add(u_int64_t context, nstat_control_state *state, nstat_provider *provider, nstat_provider_cookie_t cookie);
325 struct nstat_provider *nstat_providers = NULL;
326
327 static struct nstat_provider*
328 nstat_find_provider_by_id(
329 nstat_provider_id_t id)
330 {
331 struct nstat_provider *provider;
332
333 for (provider = nstat_providers; provider != NULL; provider = provider->next)
334 {
335 if (provider->nstat_provider_id == id)
336 break;
337 }
338
339 return provider;
340 }
341
342 static errno_t
343 nstat_lookup_entry(
344 nstat_provider_id_t id,
345 const void *data,
346 u_int32_t length,
347 nstat_provider **out_provider,
348 nstat_provider_cookie_t *out_cookie)
349 {
350 *out_provider = nstat_find_provider_by_id(id);
351 if (*out_provider == NULL)
352 {
353 return ENOENT;
354 }
355
356 return (*out_provider)->nstat_lookup(data, length, out_cookie);
357 }
358
359 static void nstat_init_route_provider(void);
360 static void nstat_init_tcp_provider(void);
361 static void nstat_init_userland_tcp_provider(void);
362 static void nstat_init_udp_provider(void);
363 static void nstat_init_userland_udp_provider(void);
364 static void nstat_init_ifnet_provider(void);
365
366 __private_extern__ void
367 nstat_init(void)
368 {
369 if (nstat_malloc_tag != NULL) return;
370
371 OSMallocTag tag = OSMalloc_Tagalloc(NET_STAT_CONTROL_NAME, OSMT_DEFAULT);
372 if (!OSCompareAndSwapPtr(NULL, tag, &nstat_malloc_tag))
373 {
374 OSMalloc_Tagfree(tag);
375 tag = nstat_malloc_tag;
376 }
377 else
378 {
379 // we need to initialize other things, we do it here as this code path will only be hit once;
380 nstat_init_route_provider();
381 nstat_init_tcp_provider();
382 nstat_init_userland_tcp_provider();
383 nstat_init_udp_provider();
384 nstat_init_userland_udp_provider();
385 nstat_init_ifnet_provider();
386 nstat_control_register();
387 }
388 }
389
390 #pragma mark -- Aligned Buffer Allocation --
391
392 struct align_header
393 {
394 u_int32_t offset;
395 u_int32_t length;
396 };
397
398 static void*
399 nstat_malloc_aligned(
400 u_int32_t length,
401 u_int8_t alignment,
402 OSMallocTag tag)
403 {
404 struct align_header *hdr = NULL;
405 u_int32_t size = length + sizeof(*hdr) + alignment - 1;
406
407 u_int8_t *buffer = OSMalloc(size, tag);
408 if (buffer == NULL) return NULL;
409
410 u_int8_t *aligned = buffer + sizeof(*hdr);
411 aligned = (u_int8_t*)P2ROUNDUP(aligned, alignment);
412
413 hdr = (struct align_header*)(void *)(aligned - sizeof(*hdr));
414 hdr->offset = aligned - buffer;
415 hdr->length = size;
416
417 return aligned;
418 }
419
420 static void
421 nstat_free_aligned(
422 void *buffer,
423 OSMallocTag tag)
424 {
425 struct align_header *hdr = (struct align_header*)(void *)((u_int8_t*)buffer - sizeof(*hdr));
426 OSFree(((char*)buffer) - hdr->offset, hdr->length, tag);
427 }
428
429 #pragma mark -- Route Provider --
430
431 static nstat_provider nstat_route_provider;
432
433 static errno_t
434 nstat_route_lookup(
435 const void *data,
436 u_int32_t length,
437 nstat_provider_cookie_t *out_cookie)
438 {
439 // rt_lookup doesn't take const params but it doesn't modify the parameters for
440 // the lookup. So...we use a union to eliminate the warning.
441 union
442 {
443 struct sockaddr *sa;
444 const struct sockaddr *const_sa;
445 } dst, mask;
446
447 const nstat_route_add_param *param = (const nstat_route_add_param*)data;
448 *out_cookie = NULL;
449
450 if (length < sizeof(*param))
451 {
452 return EINVAL;
453 }
454
455 if (param->dst.v4.sin_family == 0 ||
456 param->dst.v4.sin_family > AF_MAX ||
457 (param->mask.v4.sin_family != 0 && param->mask.v4.sin_family != param->dst.v4.sin_family))
458 {
459 return EINVAL;
460 }
461
462 if (param->dst.v4.sin_len > sizeof(param->dst) ||
463 (param->mask.v4.sin_family && param->mask.v4.sin_len > sizeof(param->mask.v4.sin_len)))
464 {
465 return EINVAL;
466 }
467 if ((param->dst.v4.sin_family == AF_INET &&
468 param->dst.v4.sin_len < sizeof(struct sockaddr_in)) ||
469 (param->dst.v6.sin6_family == AF_INET6 &&
470 param->dst.v6.sin6_len < sizeof(struct sockaddr_in6)))
471 {
472 return EINVAL;
473 }
474
475 dst.const_sa = (const struct sockaddr*)&param->dst;
476 mask.const_sa = param->mask.v4.sin_family ? (const struct sockaddr*)&param->mask : NULL;
477
478 struct radix_node_head *rnh = rt_tables[dst.sa->sa_family];
479 if (rnh == NULL) return EAFNOSUPPORT;
480
481 lck_mtx_lock(rnh_lock);
482 struct rtentry *rt = rt_lookup(TRUE, dst.sa, mask.sa, rnh, param->ifindex);
483 lck_mtx_unlock(rnh_lock);
484
485 if (rt) *out_cookie = (nstat_provider_cookie_t)rt;
486
487 return rt ? 0 : ENOENT;
488 }
489
490 static int
491 nstat_route_gone(
492 nstat_provider_cookie_t cookie)
493 {
494 struct rtentry *rt = (struct rtentry*)cookie;
495 return ((rt->rt_flags & RTF_UP) == 0) ? 1 : 0;
496 }
497
498 static errno_t
499 nstat_route_counts(
500 nstat_provider_cookie_t cookie,
501 struct nstat_counts *out_counts,
502 int *out_gone)
503 {
504 struct rtentry *rt = (struct rtentry*)cookie;
505 struct nstat_counts *rt_stats = rt->rt_stats;
506
507 if (out_gone) *out_gone = 0;
508
509 if (out_gone && (rt->rt_flags & RTF_UP) == 0) *out_gone = 1;
510
511 if (rt_stats)
512 {
513 atomic_get_64(out_counts->nstat_rxpackets, &rt_stats->nstat_rxpackets);
514 atomic_get_64(out_counts->nstat_rxbytes, &rt_stats->nstat_rxbytes);
515 atomic_get_64(out_counts->nstat_txpackets, &rt_stats->nstat_txpackets);
516 atomic_get_64(out_counts->nstat_txbytes, &rt_stats->nstat_txbytes);
517 out_counts->nstat_rxduplicatebytes = rt_stats->nstat_rxduplicatebytes;
518 out_counts->nstat_rxoutoforderbytes = rt_stats->nstat_rxoutoforderbytes;
519 out_counts->nstat_txretransmit = rt_stats->nstat_txretransmit;
520 out_counts->nstat_connectattempts = rt_stats->nstat_connectattempts;
521 out_counts->nstat_connectsuccesses = rt_stats->nstat_connectsuccesses;
522 out_counts->nstat_min_rtt = rt_stats->nstat_min_rtt;
523 out_counts->nstat_avg_rtt = rt_stats->nstat_avg_rtt;
524 out_counts->nstat_var_rtt = rt_stats->nstat_var_rtt;
525 out_counts->nstat_cell_rxbytes = out_counts->nstat_cell_txbytes = 0;
526 }
527 else
528 {
529 bzero(out_counts, sizeof(*out_counts));
530 }
531
532 return 0;
533 }
534
535 static void
536 nstat_route_release(
537 nstat_provider_cookie_t cookie,
538 __unused int locked)
539 {
540 rtfree((struct rtentry*)cookie);
541 }
542
543 static u_int32_t nstat_route_watchers = 0;
544
545 static int
546 nstat_route_walktree_add(
547 struct radix_node *rn,
548 void *context)
549 {
550 errno_t result = 0;
551 struct rtentry *rt = (struct rtentry *)rn;
552 nstat_control_state *state = (nstat_control_state*)context;
553
554 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
555
556 /* RTF_UP can't change while rnh_lock is held */
557 if ((rt->rt_flags & RTF_UP) != 0)
558 {
559 /* Clear RTPRF_OURS if the route is still usable */
560 RT_LOCK(rt);
561 if (rt_validate(rt)) {
562 RT_ADDREF_LOCKED(rt);
563 RT_UNLOCK(rt);
564 } else {
565 RT_UNLOCK(rt);
566 rt = NULL;
567 }
568
569 /* Otherwise if RTF_CONDEMNED, treat it as if it were down */
570 if (rt == NULL)
571 return (0);
572
573 result = nstat_control_source_add(0, state, &nstat_route_provider, rt);
574 if (result != 0)
575 rtfree_locked(rt);
576 }
577
578 return result;
579 }
580
581 static errno_t
582 nstat_route_add_watcher(
583 nstat_control_state *state)
584 {
585 int i;
586 errno_t result = 0;
587 OSIncrementAtomic(&nstat_route_watchers);
588
589 lck_mtx_lock(rnh_lock);
590 for (i = 1; i < AF_MAX; i++)
591 {
592 struct radix_node_head *rnh;
593 rnh = rt_tables[i];
594 if (!rnh) continue;
595
596 result = rnh->rnh_walktree(rnh, nstat_route_walktree_add, state);
597 if (result != 0)
598 {
599 break;
600 }
601 }
602 lck_mtx_unlock(rnh_lock);
603
604 return result;
605 }
606
607 __private_extern__ void
608 nstat_route_new_entry(
609 struct rtentry *rt)
610 {
611 if (nstat_route_watchers == 0)
612 return;
613
614 lck_mtx_lock(&nstat_mtx);
615 if ((rt->rt_flags & RTF_UP) != 0)
616 {
617 nstat_control_state *state;
618 for (state = nstat_controls; state; state = state->ncs_next)
619 {
620 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_ROUTE)) != 0)
621 {
622 // this client is watching routes
623 // acquire a reference for the route
624 RT_ADDREF(rt);
625
626 // add the source, if that fails, release the reference
627 if (nstat_control_source_add(0, state, &nstat_route_provider, rt) != 0)
628 RT_REMREF(rt);
629 }
630 }
631 }
632 lck_mtx_unlock(&nstat_mtx);
633 }
634
635 static void
636 nstat_route_remove_watcher(
637 __unused nstat_control_state *state)
638 {
639 OSDecrementAtomic(&nstat_route_watchers);
640 }
641
642 static errno_t
643 nstat_route_copy_descriptor(
644 nstat_provider_cookie_t cookie,
645 void *data,
646 u_int32_t len)
647 {
648 nstat_route_descriptor *desc = (nstat_route_descriptor*)data;
649 if (len < sizeof(*desc))
650 {
651 return EINVAL;
652 }
653 bzero(desc, sizeof(*desc));
654
655 struct rtentry *rt = (struct rtentry*)cookie;
656 desc->id = (uint64_t)VM_KERNEL_ADDRPERM(rt);
657 desc->parent_id = (uint64_t)VM_KERNEL_ADDRPERM(rt->rt_parent);
658 desc->gateway_id = (uint64_t)VM_KERNEL_ADDRPERM(rt->rt_gwroute);
659
660
661 // key/dest
662 struct sockaddr *sa;
663 if ((sa = rt_key(rt)))
664 nstat_copy_sa_out(sa, &desc->dst.sa, sizeof(desc->dst));
665
666 // mask
667 if ((sa = rt_mask(rt)) && sa->sa_len <= sizeof(desc->mask))
668 memcpy(&desc->mask, sa, sa->sa_len);
669
670 // gateway
671 if ((sa = rt->rt_gateway))
672 nstat_copy_sa_out(sa, &desc->gateway.sa, sizeof(desc->gateway));
673
674 if (rt->rt_ifp)
675 desc->ifindex = rt->rt_ifp->if_index;
676
677 desc->flags = rt->rt_flags;
678
679 return 0;
680 }
681
682 static bool
683 nstat_route_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter)
684 {
685 bool retval = true;
686
687 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0)
688 {
689 struct rtentry *rt = (struct rtentry*)cookie;
690 struct ifnet *ifp = rt->rt_ifp;
691
692 if (ifp)
693 {
694 uint16_t interface_properties = nstat_ifnet_to_flags(ifp);
695
696 if ((filter->npf_flags & interface_properties) == 0)
697 {
698 retval = false;
699 }
700 }
701 }
702 return retval;
703 }
704
705 static void
706 nstat_init_route_provider(void)
707 {
708 bzero(&nstat_route_provider, sizeof(nstat_route_provider));
709 nstat_route_provider.nstat_descriptor_length = sizeof(nstat_route_descriptor);
710 nstat_route_provider.nstat_provider_id = NSTAT_PROVIDER_ROUTE;
711 nstat_route_provider.nstat_lookup = nstat_route_lookup;
712 nstat_route_provider.nstat_gone = nstat_route_gone;
713 nstat_route_provider.nstat_counts = nstat_route_counts;
714 nstat_route_provider.nstat_release = nstat_route_release;
715 nstat_route_provider.nstat_watcher_add = nstat_route_add_watcher;
716 nstat_route_provider.nstat_watcher_remove = nstat_route_remove_watcher;
717 nstat_route_provider.nstat_copy_descriptor = nstat_route_copy_descriptor;
718 nstat_route_provider.nstat_reporting_allowed = nstat_route_reporting_allowed;
719 nstat_route_provider.next = nstat_providers;
720 nstat_providers = &nstat_route_provider;
721 }
722
723 #pragma mark -- Route Collection --
724
725 static struct nstat_counts*
726 nstat_route_attach(
727 struct rtentry *rte)
728 {
729 struct nstat_counts *result = rte->rt_stats;
730 if (result) return result;
731
732 if (nstat_malloc_tag == NULL) nstat_init();
733
734 result = nstat_malloc_aligned(sizeof(*result), sizeof(u_int64_t), nstat_malloc_tag);
735 if (!result) return result;
736
737 bzero(result, sizeof(*result));
738
739 if (!OSCompareAndSwapPtr(NULL, result, &rte->rt_stats))
740 {
741 nstat_free_aligned(result, nstat_malloc_tag);
742 result = rte->rt_stats;
743 }
744
745 return result;
746 }
747
748 __private_extern__ void
749 nstat_route_detach(
750 struct rtentry *rte)
751 {
752 if (rte->rt_stats)
753 {
754 nstat_free_aligned(rte->rt_stats, nstat_malloc_tag);
755 rte->rt_stats = NULL;
756 }
757 }
758
759 __private_extern__ void
760 nstat_route_connect_attempt(
761 struct rtentry *rte)
762 {
763 while (rte)
764 {
765 struct nstat_counts* stats = nstat_route_attach(rte);
766 if (stats)
767 {
768 OSIncrementAtomic(&stats->nstat_connectattempts);
769 }
770
771 rte = rte->rt_parent;
772 }
773 }
774
775 __private_extern__ void
776 nstat_route_connect_success(
777 struct rtentry *rte)
778 {
779 // This route
780 while (rte)
781 {
782 struct nstat_counts* stats = nstat_route_attach(rte);
783 if (stats)
784 {
785 OSIncrementAtomic(&stats->nstat_connectsuccesses);
786 }
787
788 rte = rte->rt_parent;
789 }
790 }
791
792 __private_extern__ void
793 nstat_route_tx(
794 struct rtentry *rte,
795 u_int32_t packets,
796 u_int32_t bytes,
797 u_int32_t flags)
798 {
799 while (rte)
800 {
801 struct nstat_counts* stats = nstat_route_attach(rte);
802 if (stats)
803 {
804 if ((flags & NSTAT_TX_FLAG_RETRANSMIT) != 0)
805 {
806 OSAddAtomic(bytes, &stats->nstat_txretransmit);
807 }
808 else
809 {
810 OSAddAtomic64((SInt64)packets, (SInt64*)&stats->nstat_txpackets);
811 OSAddAtomic64((SInt64)bytes, (SInt64*)&stats->nstat_txbytes);
812 }
813 }
814
815 rte = rte->rt_parent;
816 }
817 }
818
819 __private_extern__ void
820 nstat_route_rx(
821 struct rtentry *rte,
822 u_int32_t packets,
823 u_int32_t bytes,
824 u_int32_t flags)
825 {
826 while (rte)
827 {
828 struct nstat_counts* stats = nstat_route_attach(rte);
829 if (stats)
830 {
831 if (flags == 0)
832 {
833 OSAddAtomic64((SInt64)packets, (SInt64*)&stats->nstat_rxpackets);
834 OSAddAtomic64((SInt64)bytes, (SInt64*)&stats->nstat_rxbytes);
835 }
836 else
837 {
838 if (flags & NSTAT_RX_FLAG_OUT_OF_ORDER)
839 OSAddAtomic(bytes, &stats->nstat_rxoutoforderbytes);
840 if (flags & NSTAT_RX_FLAG_DUPLICATE)
841 OSAddAtomic(bytes, &stats->nstat_rxduplicatebytes);
842 }
843 }
844
845 rte = rte->rt_parent;
846 }
847 }
848
849 __private_extern__ void
850 nstat_route_rtt(
851 struct rtentry *rte,
852 u_int32_t rtt,
853 u_int32_t rtt_var)
854 {
855 const int32_t factor = 8;
856
857 while (rte)
858 {
859 struct nstat_counts* stats = nstat_route_attach(rte);
860 if (stats)
861 {
862 int32_t oldrtt;
863 int32_t newrtt;
864
865 // average
866 do
867 {
868 oldrtt = stats->nstat_avg_rtt;
869 if (oldrtt == 0)
870 {
871 newrtt = rtt;
872 }
873 else
874 {
875 newrtt = oldrtt - (oldrtt - (int32_t)rtt) / factor;
876 }
877 if (oldrtt == newrtt) break;
878 } while (!OSCompareAndSwap(oldrtt, newrtt, &stats->nstat_avg_rtt));
879
880 // minimum
881 do
882 {
883 oldrtt = stats->nstat_min_rtt;
884 if (oldrtt != 0 && oldrtt < (int32_t)rtt)
885 {
886 break;
887 }
888 } while (!OSCompareAndSwap(oldrtt, rtt, &stats->nstat_min_rtt));
889
890 // variance
891 do
892 {
893 oldrtt = stats->nstat_var_rtt;
894 if (oldrtt == 0)
895 {
896 newrtt = rtt_var;
897 }
898 else
899 {
900 newrtt = oldrtt - (oldrtt - (int32_t)rtt_var) / factor;
901 }
902 if (oldrtt == newrtt) break;
903 } while (!OSCompareAndSwap(oldrtt, newrtt, &stats->nstat_var_rtt));
904 }
905
906 rte = rte->rt_parent;
907 }
908 }
909
910
911 #pragma mark -- TCP Kernel Provider --
912
913 /*
914 * Due to the way the kernel deallocates a process (the process structure
915 * might be gone by the time we get the PCB detach notification),
916 * we need to cache the process name. Without this, proc_name() would
917 * return null and the process name would never be sent to userland.
918 *
919 * For UDP sockets, we also store the cached the connection tuples along with
920 * the interface index. This is necessary because when UDP sockets are
921 * disconnected, the connection tuples are forever lost from the inpcb, thus
922 * we need to keep track of the last call to connect() in ntstat.
923 */
924 struct nstat_tucookie {
925 struct inpcb *inp;
926 char pname[MAXCOMLEN+1];
927 bool cached;
928 union
929 {
930 struct sockaddr_in v4;
931 struct sockaddr_in6 v6;
932 } local;
933 union
934 {
935 struct sockaddr_in v4;
936 struct sockaddr_in6 v6;
937 } remote;
938 unsigned int if_index;
939 uint16_t ifnet_properties;
940 };
941
942 static struct nstat_tucookie *
943 nstat_tucookie_alloc_internal(
944 struct inpcb *inp,
945 bool ref,
946 bool locked)
947 {
948 struct nstat_tucookie *cookie;
949
950 cookie = OSMalloc(sizeof(*cookie), nstat_malloc_tag);
951 if (cookie == NULL)
952 return NULL;
953 if (!locked)
954 lck_mtx_assert(&nstat_mtx, LCK_MTX_ASSERT_NOTOWNED);
955 if (ref && in_pcb_checkstate(inp, WNT_ACQUIRE, locked) == WNT_STOPUSING)
956 {
957 OSFree(cookie, sizeof(*cookie), nstat_malloc_tag);
958 return NULL;
959 }
960 bzero(cookie, sizeof(*cookie));
961 cookie->inp = inp;
962 proc_name(inp->inp_socket->last_pid, cookie->pname,
963 sizeof(cookie->pname));
964 /*
965 * We only increment the reference count for UDP sockets because we
966 * only cache UDP socket tuples.
967 */
968 if (SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP)
969 OSIncrementAtomic(&inp->inp_nstat_refcnt);
970
971 return cookie;
972 }
973
974 static struct nstat_tucookie *
975 nstat_tucookie_alloc(
976 struct inpcb *inp)
977 {
978 return nstat_tucookie_alloc_internal(inp, false, false);
979 }
980
981 static struct nstat_tucookie *
982 nstat_tucookie_alloc_ref(
983 struct inpcb *inp)
984 {
985 return nstat_tucookie_alloc_internal(inp, true, false);
986 }
987
988 static struct nstat_tucookie *
989 nstat_tucookie_alloc_ref_locked(
990 struct inpcb *inp)
991 {
992 return nstat_tucookie_alloc_internal(inp, true, true);
993 }
994
995 static void
996 nstat_tucookie_release_internal(
997 struct nstat_tucookie *cookie,
998 int inplock)
999 {
1000 if (SOCK_PROTO(cookie->inp->inp_socket) == IPPROTO_UDP)
1001 OSDecrementAtomic(&cookie->inp->inp_nstat_refcnt);
1002 in_pcb_checkstate(cookie->inp, WNT_RELEASE, inplock);
1003 OSFree(cookie, sizeof(*cookie), nstat_malloc_tag);
1004 }
1005
1006 static void
1007 nstat_tucookie_release(
1008 struct nstat_tucookie *cookie)
1009 {
1010 nstat_tucookie_release_internal(cookie, false);
1011 }
1012
1013 static void
1014 nstat_tucookie_release_locked(
1015 struct nstat_tucookie *cookie)
1016 {
1017 nstat_tucookie_release_internal(cookie, true);
1018 }
1019
1020
1021 static nstat_provider nstat_tcp_provider;
1022
1023 static errno_t
1024 nstat_tcpudp_lookup(
1025 struct inpcbinfo *inpinfo,
1026 const void *data,
1027 u_int32_t length,
1028 nstat_provider_cookie_t *out_cookie)
1029 {
1030 struct inpcb *inp = NULL;
1031
1032 // parameter validation
1033 const nstat_tcp_add_param *param = (const nstat_tcp_add_param*)data;
1034 if (length < sizeof(*param))
1035 {
1036 return EINVAL;
1037 }
1038
1039 // src and dst must match
1040 if (param->remote.v4.sin_family != 0 &&
1041 param->remote.v4.sin_family != param->local.v4.sin_family)
1042 {
1043 return EINVAL;
1044 }
1045
1046
1047 switch (param->local.v4.sin_family)
1048 {
1049 case AF_INET:
1050 {
1051 if (param->local.v4.sin_len != sizeof(param->local.v4) ||
1052 (param->remote.v4.sin_family != 0 &&
1053 param->remote.v4.sin_len != sizeof(param->remote.v4)))
1054 {
1055 return EINVAL;
1056 }
1057
1058 inp = in_pcblookup_hash(inpinfo, param->remote.v4.sin_addr, param->remote.v4.sin_port,
1059 param->local.v4.sin_addr, param->local.v4.sin_port, 1, NULL);
1060 }
1061 break;
1062
1063 #if INET6
1064 case AF_INET6:
1065 {
1066 union
1067 {
1068 const struct in6_addr *in6c;
1069 struct in6_addr *in6;
1070 } local, remote;
1071
1072 if (param->local.v6.sin6_len != sizeof(param->local.v6) ||
1073 (param->remote.v6.sin6_family != 0 &&
1074 param->remote.v6.sin6_len != sizeof(param->remote.v6)))
1075 {
1076 return EINVAL;
1077 }
1078
1079 local.in6c = &param->local.v6.sin6_addr;
1080 remote.in6c = &param->remote.v6.sin6_addr;
1081
1082 inp = in6_pcblookup_hash(inpinfo, remote.in6, param->remote.v6.sin6_port,
1083 local.in6, param->local.v6.sin6_port, 1, NULL);
1084 }
1085 break;
1086 #endif
1087
1088 default:
1089 return EINVAL;
1090 }
1091
1092 if (inp == NULL)
1093 return ENOENT;
1094
1095 // At this point we have a ref to the inpcb
1096 *out_cookie = nstat_tucookie_alloc(inp);
1097 if (*out_cookie == NULL)
1098 in_pcb_checkstate(inp, WNT_RELEASE, 0);
1099
1100 return 0;
1101 }
1102
1103 static errno_t
1104 nstat_tcp_lookup(
1105 const void *data,
1106 u_int32_t length,
1107 nstat_provider_cookie_t *out_cookie)
1108 {
1109 return nstat_tcpudp_lookup(&tcbinfo, data, length, out_cookie);
1110 }
1111
1112 static int
1113 nstat_tcp_gone(
1114 nstat_provider_cookie_t cookie)
1115 {
1116 struct nstat_tucookie *tucookie =
1117 (struct nstat_tucookie *)cookie;
1118 struct inpcb *inp;
1119 struct tcpcb *tp;
1120
1121 return (!(inp = tucookie->inp) ||
1122 !(tp = intotcpcb(inp)) ||
1123 inp->inp_state == INPCB_STATE_DEAD) ? 1 : 0;
1124 }
1125
1126 static errno_t
1127 nstat_tcp_counts(
1128 nstat_provider_cookie_t cookie,
1129 struct nstat_counts *out_counts,
1130 int *out_gone)
1131 {
1132 struct nstat_tucookie *tucookie =
1133 (struct nstat_tucookie *)cookie;
1134 struct inpcb *inp;
1135
1136 bzero(out_counts, sizeof(*out_counts));
1137
1138 if (out_gone) *out_gone = 0;
1139
1140 // if the pcb is in the dead state, we should stop using it
1141 if (nstat_tcp_gone(cookie))
1142 {
1143 if (out_gone) *out_gone = 1;
1144 if (!(inp = tucookie->inp) || !intotcpcb(inp))
1145 return EINVAL;
1146 }
1147 inp = tucookie->inp;
1148 struct tcpcb *tp = intotcpcb(inp);
1149
1150 atomic_get_64(out_counts->nstat_rxpackets, &inp->inp_stat->rxpackets);
1151 atomic_get_64(out_counts->nstat_rxbytes, &inp->inp_stat->rxbytes);
1152 atomic_get_64(out_counts->nstat_txpackets, &inp->inp_stat->txpackets);
1153 atomic_get_64(out_counts->nstat_txbytes, &inp->inp_stat->txbytes);
1154 out_counts->nstat_rxduplicatebytes = tp->t_stat.rxduplicatebytes;
1155 out_counts->nstat_rxoutoforderbytes = tp->t_stat.rxoutoforderbytes;
1156 out_counts->nstat_txretransmit = tp->t_stat.txretransmitbytes;
1157 out_counts->nstat_connectattempts = tp->t_state >= TCPS_SYN_SENT ? 1 : 0;
1158 out_counts->nstat_connectsuccesses = tp->t_state >= TCPS_ESTABLISHED ? 1 : 0;
1159 out_counts->nstat_avg_rtt = tp->t_srtt;
1160 out_counts->nstat_min_rtt = tp->t_rttbest;
1161 out_counts->nstat_var_rtt = tp->t_rttvar;
1162 if (out_counts->nstat_avg_rtt < out_counts->nstat_min_rtt)
1163 out_counts->nstat_min_rtt = out_counts->nstat_avg_rtt;
1164 atomic_get_64(out_counts->nstat_cell_rxbytes, &inp->inp_cstat->rxbytes);
1165 atomic_get_64(out_counts->nstat_cell_txbytes, &inp->inp_cstat->txbytes);
1166 atomic_get_64(out_counts->nstat_wifi_rxbytes, &inp->inp_wstat->rxbytes);
1167 atomic_get_64(out_counts->nstat_wifi_txbytes, &inp->inp_wstat->txbytes);
1168 atomic_get_64(out_counts->nstat_wired_rxbytes, &inp->inp_Wstat->rxbytes);
1169 atomic_get_64(out_counts->nstat_wired_txbytes, &inp->inp_Wstat->txbytes);
1170
1171 return 0;
1172 }
1173
1174 static void
1175 nstat_tcp_release(
1176 nstat_provider_cookie_t cookie,
1177 int locked)
1178 {
1179 struct nstat_tucookie *tucookie =
1180 (struct nstat_tucookie *)cookie;
1181
1182 nstat_tucookie_release_internal(tucookie, locked);
1183 }
1184
1185 static errno_t
1186 nstat_tcp_add_watcher(
1187 nstat_control_state *state)
1188 {
1189 OSIncrementAtomic(&nstat_tcp_watchers);
1190
1191 lck_rw_lock_shared(tcbinfo.ipi_lock);
1192
1193 // Add all current tcp inpcbs. Ignore those in timewait
1194 struct inpcb *inp;
1195 struct nstat_tucookie *cookie;
1196 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list)
1197 {
1198 cookie = nstat_tucookie_alloc_ref(inp);
1199 if (cookie == NULL)
1200 continue;
1201 if (nstat_control_source_add(0, state, &nstat_tcp_provider,
1202 cookie) != 0)
1203 {
1204 nstat_tucookie_release(cookie);
1205 break;
1206 }
1207 }
1208
1209 lck_rw_done(tcbinfo.ipi_lock);
1210
1211 return 0;
1212 }
1213
1214 static void
1215 nstat_tcp_remove_watcher(
1216 __unused nstat_control_state *state)
1217 {
1218 OSDecrementAtomic(&nstat_tcp_watchers);
1219 }
1220
1221 __private_extern__ void
1222 nstat_tcp_new_pcb(
1223 struct inpcb *inp)
1224 {
1225 struct nstat_tucookie *cookie;
1226
1227 if (nstat_tcp_watchers == 0)
1228 return;
1229
1230 socket_lock(inp->inp_socket, 0);
1231 lck_mtx_lock(&nstat_mtx);
1232 nstat_control_state *state;
1233 for (state = nstat_controls; state; state = state->ncs_next)
1234 {
1235 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_TCP_KERNEL)) != 0)
1236 {
1237 // this client is watching tcp
1238 // acquire a reference for it
1239 cookie = nstat_tucookie_alloc_ref_locked(inp);
1240 if (cookie == NULL)
1241 continue;
1242 // add the source, if that fails, release the reference
1243 if (nstat_control_source_add(0, state,
1244 &nstat_tcp_provider, cookie) != 0)
1245 {
1246 nstat_tucookie_release_locked(cookie);
1247 break;
1248 }
1249 }
1250 }
1251 lck_mtx_unlock(&nstat_mtx);
1252 socket_unlock(inp->inp_socket, 0);
1253 }
1254
1255 __private_extern__ void
1256 nstat_pcb_detach(struct inpcb *inp)
1257 {
1258 nstat_control_state *state;
1259 nstat_src *src, *prevsrc;
1260 nstat_src *dead_list = NULL;
1261 struct nstat_tucookie *tucookie;
1262 errno_t result;
1263
1264 if (inp == NULL || (nstat_tcp_watchers == 0 && nstat_udp_watchers == 0))
1265 return;
1266
1267 lck_mtx_lock(&nstat_mtx);
1268 for (state = nstat_controls; state; state = state->ncs_next)
1269 {
1270 lck_mtx_lock(&state->mtx);
1271 for (prevsrc = NULL, src = state->ncs_srcs; src;
1272 prevsrc = src, src = src->next)
1273 {
1274 tucookie = (struct nstat_tucookie *)src->cookie;
1275 if (tucookie->inp == inp)
1276 break;
1277 }
1278
1279 if (src)
1280 {
1281 result = nstat_control_send_goodbye(state, src);
1282
1283 if (prevsrc)
1284 prevsrc->next = src->next;
1285 else
1286 state->ncs_srcs = src->next;
1287
1288 src->next = dead_list;
1289 dead_list = src;
1290 }
1291 lck_mtx_unlock(&state->mtx);
1292 }
1293 lck_mtx_unlock(&nstat_mtx);
1294
1295 while (dead_list) {
1296 src = dead_list;
1297 dead_list = src->next;
1298
1299 nstat_control_cleanup_source(NULL, src, TRUE);
1300 }
1301 }
1302
1303 __private_extern__ void
1304 nstat_pcb_cache(struct inpcb *inp)
1305 {
1306 nstat_control_state *state;
1307 nstat_src *src;
1308 struct nstat_tucookie *tucookie;
1309
1310 if (inp == NULL || nstat_udp_watchers == 0 ||
1311 inp->inp_nstat_refcnt == 0)
1312 return;
1313 VERIFY(SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP);
1314 lck_mtx_lock(&nstat_mtx);
1315 for (state = nstat_controls; state; state = state->ncs_next) {
1316 lck_mtx_lock(&state->mtx);
1317 for (src = state->ncs_srcs; src; src = src->next)
1318 {
1319 tucookie = (struct nstat_tucookie *)src->cookie;
1320 if (tucookie->inp == inp)
1321 {
1322 if (inp->inp_vflag & INP_IPV6)
1323 {
1324 nstat_ip6_to_sockaddr(&inp->in6p_laddr,
1325 inp->inp_lport,
1326 &tucookie->local.v6,
1327 sizeof(tucookie->local));
1328 nstat_ip6_to_sockaddr(&inp->in6p_faddr,
1329 inp->inp_fport,
1330 &tucookie->remote.v6,
1331 sizeof(tucookie->remote));
1332 }
1333 else if (inp->inp_vflag & INP_IPV4)
1334 {
1335 nstat_ip_to_sockaddr(&inp->inp_laddr,
1336 inp->inp_lport,
1337 &tucookie->local.v4,
1338 sizeof(tucookie->local));
1339 nstat_ip_to_sockaddr(&inp->inp_faddr,
1340 inp->inp_fport,
1341 &tucookie->remote.v4,
1342 sizeof(tucookie->remote));
1343 }
1344 if (inp->inp_last_outifp)
1345 tucookie->if_index =
1346 inp->inp_last_outifp->if_index;
1347
1348 tucookie->ifnet_properties = nstat_inpcb_to_flags(inp);
1349 tucookie->cached = true;
1350 break;
1351 }
1352 }
1353 lck_mtx_unlock(&state->mtx);
1354 }
1355 lck_mtx_unlock(&nstat_mtx);
1356 }
1357
1358 __private_extern__ void
1359 nstat_pcb_invalidate_cache(struct inpcb *inp)
1360 {
1361 nstat_control_state *state;
1362 nstat_src *src;
1363 struct nstat_tucookie *tucookie;
1364
1365 if (inp == NULL || nstat_udp_watchers == 0 ||
1366 inp->inp_nstat_refcnt == 0)
1367 return;
1368 VERIFY(SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP);
1369 lck_mtx_lock(&nstat_mtx);
1370 for (state = nstat_controls; state; state = state->ncs_next) {
1371 lck_mtx_lock(&state->mtx);
1372 for (src = state->ncs_srcs; src; src = src->next)
1373 {
1374 tucookie = (struct nstat_tucookie *)src->cookie;
1375 if (tucookie->inp == inp)
1376 {
1377 tucookie->cached = false;
1378 break;
1379 }
1380 }
1381 lck_mtx_unlock(&state->mtx);
1382 }
1383 lck_mtx_unlock(&nstat_mtx);
1384 }
1385
1386 static errno_t
1387 nstat_tcp_copy_descriptor(
1388 nstat_provider_cookie_t cookie,
1389 void *data,
1390 u_int32_t len)
1391 {
1392 if (len < sizeof(nstat_tcp_descriptor))
1393 {
1394 return EINVAL;
1395 }
1396
1397 if (nstat_tcp_gone(cookie))
1398 return EINVAL;
1399
1400 nstat_tcp_descriptor *desc = (nstat_tcp_descriptor*)data;
1401 struct nstat_tucookie *tucookie =
1402 (struct nstat_tucookie *)cookie;
1403 struct inpcb *inp = tucookie->inp;
1404 struct tcpcb *tp = intotcpcb(inp);
1405 bzero(desc, sizeof(*desc));
1406
1407 if (inp->inp_vflag & INP_IPV6)
1408 {
1409 nstat_ip6_to_sockaddr(&inp->in6p_laddr, inp->inp_lport,
1410 &desc->local.v6, sizeof(desc->local));
1411 nstat_ip6_to_sockaddr(&inp->in6p_faddr, inp->inp_fport,
1412 &desc->remote.v6, sizeof(desc->remote));
1413 }
1414 else if (inp->inp_vflag & INP_IPV4)
1415 {
1416 nstat_ip_to_sockaddr(&inp->inp_laddr, inp->inp_lport,
1417 &desc->local.v4, sizeof(desc->local));
1418 nstat_ip_to_sockaddr(&inp->inp_faddr, inp->inp_fport,
1419 &desc->remote.v4, sizeof(desc->remote));
1420 }
1421
1422 desc->state = intotcpcb(inp)->t_state;
1423 desc->ifindex = (inp->inp_last_outifp == NULL) ? 0 :
1424 inp->inp_last_outifp->if_index;
1425
1426 // danger - not locked, values could be bogus
1427 desc->txunacked = tp->snd_max - tp->snd_una;
1428 desc->txwindow = tp->snd_wnd;
1429 desc->txcwindow = tp->snd_cwnd;
1430
1431 if (CC_ALGO(tp)->name != NULL) {
1432 strlcpy(desc->cc_algo, CC_ALGO(tp)->name,
1433 sizeof(desc->cc_algo));
1434 }
1435
1436 struct socket *so = inp->inp_socket;
1437 if (so)
1438 {
1439 // TBD - take the socket lock around these to make sure
1440 // they're in sync?
1441 desc->upid = so->last_upid;
1442 desc->pid = so->last_pid;
1443 desc->traffic_class = so->so_traffic_class;
1444 if ((so->so_flags1 & SOF1_TRAFFIC_MGT_SO_BACKGROUND))
1445 desc->traffic_mgt_flags |= TRAFFIC_MGT_SO_BACKGROUND;
1446 if ((so->so_flags1 & SOF1_TRAFFIC_MGT_TCP_RECVBG))
1447 desc->traffic_mgt_flags |= TRAFFIC_MGT_TCP_RECVBG;
1448 proc_name(desc->pid, desc->pname, sizeof(desc->pname));
1449 if (desc->pname[0] == 0)
1450 {
1451 strlcpy(desc->pname, tucookie->pname,
1452 sizeof(desc->pname));
1453 }
1454 else
1455 {
1456 desc->pname[sizeof(desc->pname) - 1] = 0;
1457 strlcpy(tucookie->pname, desc->pname,
1458 sizeof(tucookie->pname));
1459 }
1460 memcpy(desc->uuid, so->last_uuid, sizeof(so->last_uuid));
1461 memcpy(desc->vuuid, so->so_vuuid, sizeof(so->so_vuuid));
1462 if (so->so_flags & SOF_DELEGATED) {
1463 desc->eupid = so->e_upid;
1464 desc->epid = so->e_pid;
1465 memcpy(desc->euuid, so->e_uuid, sizeof(so->e_uuid));
1466 } else {
1467 desc->eupid = desc->upid;
1468 desc->epid = desc->pid;
1469 memcpy(desc->euuid, desc->uuid, sizeof(desc->uuid));
1470 }
1471 desc->sndbufsize = so->so_snd.sb_hiwat;
1472 desc->sndbufused = so->so_snd.sb_cc;
1473 desc->rcvbufsize = so->so_rcv.sb_hiwat;
1474 desc->rcvbufused = so->so_rcv.sb_cc;
1475 }
1476
1477 tcp_get_connectivity_status(tp, &desc->connstatus);
1478 desc->ifnet_properties = nstat_inpcb_to_flags(inp);
1479 return 0;
1480 }
1481
1482 static bool
1483 nstat_tcpudp_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter, bool is_UDP)
1484 {
1485 bool retval = true;
1486
1487 if ((filter->npf_flags & (NSTAT_FILTER_IFNET_FLAGS|NSTAT_FILTER_SPECIFIC_USER)) != 0)
1488 {
1489 struct nstat_tucookie *tucookie = (struct nstat_tucookie *)cookie;
1490 struct inpcb *inp = tucookie->inp;
1491
1492 /* Only apply interface filter if at least one is allowed. */
1493 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0)
1494 {
1495 uint16_t interface_properties = nstat_inpcb_to_flags(inp);
1496
1497 if ((filter->npf_flags & interface_properties) == 0)
1498 {
1499 // For UDP, we could have an undefined interface and yet transfers may have occurred.
1500 // We allow reporting if there have been transfers of the requested kind.
1501 // This is imperfect as we cannot account for the expensive attribute over wifi.
1502 // We also assume that cellular is expensive and we have no way to select for AWDL
1503 if (is_UDP)
1504 {
1505 do
1506 {
1507 if ((filter->npf_flags & (NSTAT_FILTER_ACCEPT_CELLULAR|NSTAT_FILTER_ACCEPT_EXPENSIVE)) &&
1508 (inp->inp_cstat->rxbytes || inp->inp_cstat->txbytes))
1509 {
1510 break;
1511 }
1512 if ((filter->npf_flags & NSTAT_FILTER_ACCEPT_WIFI) &&
1513 (inp->inp_wstat->rxbytes || inp->inp_wstat->txbytes))
1514 {
1515 break;
1516 }
1517 if ((filter->npf_flags & NSTAT_FILTER_ACCEPT_WIRED) &&
1518 (inp->inp_Wstat->rxbytes || inp->inp_Wstat->txbytes))
1519 {
1520 break;
1521 }
1522 return false;
1523 } while (0);
1524 }
1525 else
1526 {
1527 return false;
1528 }
1529 }
1530 }
1531
1532 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) && (retval))
1533 {
1534 struct socket *so = inp->inp_socket;
1535 retval = false;
1536
1537 if (so)
1538 {
1539 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_PID) != 0) &&
1540 (filter->npf_pid == so->last_pid))
1541 {
1542 retval = true;
1543 }
1544 else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EPID) != 0) &&
1545 (filter->npf_pid == (so->so_flags & SOF_DELEGATED)? so->e_upid : so->last_pid))
1546 {
1547 retval = true;
1548 }
1549 else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_UUID) != 0) &&
1550 (memcmp(filter->npf_uuid, so->last_uuid, sizeof(so->last_uuid)) == 0))
1551 {
1552 retval = true;
1553 }
1554 else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EUUID) != 0) &&
1555 (memcmp(filter->npf_uuid, (so->so_flags & SOF_DELEGATED)? so->e_uuid : so->last_uuid,
1556 sizeof(so->last_uuid)) == 0))
1557 {
1558 retval = true;
1559 }
1560 }
1561 }
1562 }
1563 return retval;
1564 }
1565
1566 static bool
1567 nstat_tcp_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter)
1568 {
1569 return nstat_tcpudp_reporting_allowed(cookie, filter, FALSE);
1570 }
1571
1572 static void
1573 nstat_init_tcp_provider(void)
1574 {
1575 bzero(&nstat_tcp_provider, sizeof(nstat_tcp_provider));
1576 nstat_tcp_provider.nstat_descriptor_length = sizeof(nstat_tcp_descriptor);
1577 nstat_tcp_provider.nstat_provider_id = NSTAT_PROVIDER_TCP_KERNEL;
1578 nstat_tcp_provider.nstat_lookup = nstat_tcp_lookup;
1579 nstat_tcp_provider.nstat_gone = nstat_tcp_gone;
1580 nstat_tcp_provider.nstat_counts = nstat_tcp_counts;
1581 nstat_tcp_provider.nstat_release = nstat_tcp_release;
1582 nstat_tcp_provider.nstat_watcher_add = nstat_tcp_add_watcher;
1583 nstat_tcp_provider.nstat_watcher_remove = nstat_tcp_remove_watcher;
1584 nstat_tcp_provider.nstat_copy_descriptor = nstat_tcp_copy_descriptor;
1585 nstat_tcp_provider.nstat_reporting_allowed = nstat_tcp_reporting_allowed;
1586 nstat_tcp_provider.next = nstat_providers;
1587 nstat_providers = &nstat_tcp_provider;
1588 }
1589
1590 #pragma mark -- UDP Provider --
1591
1592 static nstat_provider nstat_udp_provider;
1593
1594 static errno_t
1595 nstat_udp_lookup(
1596 const void *data,
1597 u_int32_t length,
1598 nstat_provider_cookie_t *out_cookie)
1599 {
1600 return nstat_tcpudp_lookup(&udbinfo, data, length, out_cookie);
1601 }
1602
1603 static int
1604 nstat_udp_gone(
1605 nstat_provider_cookie_t cookie)
1606 {
1607 struct nstat_tucookie *tucookie =
1608 (struct nstat_tucookie *)cookie;
1609 struct inpcb *inp;
1610
1611 return (!(inp = tucookie->inp) ||
1612 inp->inp_state == INPCB_STATE_DEAD) ? 1 : 0;
1613 }
1614
1615 static errno_t
1616 nstat_udp_counts(
1617 nstat_provider_cookie_t cookie,
1618 struct nstat_counts *out_counts,
1619 int *out_gone)
1620 {
1621 struct nstat_tucookie *tucookie =
1622 (struct nstat_tucookie *)cookie;
1623
1624 if (out_gone) *out_gone = 0;
1625
1626 // if the pcb is in the dead state, we should stop using it
1627 if (nstat_udp_gone(cookie))
1628 {
1629 if (out_gone) *out_gone = 1;
1630 if (!tucookie->inp)
1631 return EINVAL;
1632 }
1633 struct inpcb *inp = tucookie->inp;
1634
1635 atomic_get_64(out_counts->nstat_rxpackets, &inp->inp_stat->rxpackets);
1636 atomic_get_64(out_counts->nstat_rxbytes, &inp->inp_stat->rxbytes);
1637 atomic_get_64(out_counts->nstat_txpackets, &inp->inp_stat->txpackets);
1638 atomic_get_64(out_counts->nstat_txbytes, &inp->inp_stat->txbytes);
1639 atomic_get_64(out_counts->nstat_cell_rxbytes, &inp->inp_cstat->rxbytes);
1640 atomic_get_64(out_counts->nstat_cell_txbytes, &inp->inp_cstat->txbytes);
1641 atomic_get_64(out_counts->nstat_wifi_rxbytes, &inp->inp_wstat->rxbytes);
1642 atomic_get_64(out_counts->nstat_wifi_txbytes, &inp->inp_wstat->txbytes);
1643 atomic_get_64(out_counts->nstat_wired_rxbytes, &inp->inp_Wstat->rxbytes);
1644 atomic_get_64(out_counts->nstat_wired_txbytes, &inp->inp_Wstat->txbytes);
1645
1646 return 0;
1647 }
1648
1649 static void
1650 nstat_udp_release(
1651 nstat_provider_cookie_t cookie,
1652 int locked)
1653 {
1654 struct nstat_tucookie *tucookie =
1655 (struct nstat_tucookie *)cookie;
1656
1657 nstat_tucookie_release_internal(tucookie, locked);
1658 }
1659
1660 static errno_t
1661 nstat_udp_add_watcher(
1662 nstat_control_state *state)
1663 {
1664 struct inpcb *inp;
1665 struct nstat_tucookie *cookie;
1666
1667 OSIncrementAtomic(&nstat_udp_watchers);
1668
1669 lck_rw_lock_shared(udbinfo.ipi_lock);
1670 // Add all current UDP inpcbs.
1671 LIST_FOREACH(inp, udbinfo.ipi_listhead, inp_list)
1672 {
1673 cookie = nstat_tucookie_alloc_ref(inp);
1674 if (cookie == NULL)
1675 continue;
1676 if (nstat_control_source_add(0, state, &nstat_udp_provider,
1677 cookie) != 0)
1678 {
1679 nstat_tucookie_release(cookie);
1680 break;
1681 }
1682 }
1683
1684 lck_rw_done(udbinfo.ipi_lock);
1685
1686 return 0;
1687 }
1688
1689 static void
1690 nstat_udp_remove_watcher(
1691 __unused nstat_control_state *state)
1692 {
1693 OSDecrementAtomic(&nstat_udp_watchers);
1694 }
1695
1696 __private_extern__ void
1697 nstat_udp_new_pcb(
1698 struct inpcb *inp)
1699 {
1700 struct nstat_tucookie *cookie;
1701
1702 if (nstat_udp_watchers == 0)
1703 return;
1704
1705 socket_lock(inp->inp_socket, 0);
1706 lck_mtx_lock(&nstat_mtx);
1707 nstat_control_state *state;
1708 for (state = nstat_controls; state; state = state->ncs_next)
1709 {
1710 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_UDP_KERNEL)) != 0)
1711 {
1712 // this client is watching tcp
1713 // acquire a reference for it
1714 cookie = nstat_tucookie_alloc_ref_locked(inp);
1715 if (cookie == NULL)
1716 continue;
1717 // add the source, if that fails, release the reference
1718 if (nstat_control_source_add(0, state,
1719 &nstat_udp_provider, cookie) != 0)
1720 {
1721 nstat_tucookie_release_locked(cookie);
1722 break;
1723 }
1724 }
1725 }
1726 lck_mtx_unlock(&nstat_mtx);
1727 socket_unlock(inp->inp_socket, 0);
1728 }
1729
1730 static errno_t
1731 nstat_udp_copy_descriptor(
1732 nstat_provider_cookie_t cookie,
1733 void *data,
1734 u_int32_t len)
1735 {
1736 if (len < sizeof(nstat_udp_descriptor))
1737 {
1738 return EINVAL;
1739 }
1740
1741 if (nstat_udp_gone(cookie))
1742 return EINVAL;
1743
1744 struct nstat_tucookie *tucookie =
1745 (struct nstat_tucookie *)cookie;
1746 nstat_udp_descriptor *desc = (nstat_udp_descriptor*)data;
1747 struct inpcb *inp = tucookie->inp;
1748
1749 bzero(desc, sizeof(*desc));
1750
1751 if (tucookie->cached == false) {
1752 if (inp->inp_vflag & INP_IPV6)
1753 {
1754 nstat_ip6_to_sockaddr(&inp->in6p_laddr, inp->inp_lport,
1755 &desc->local.v6, sizeof(desc->local.v6));
1756 nstat_ip6_to_sockaddr(&inp->in6p_faddr, inp->inp_fport,
1757 &desc->remote.v6, sizeof(desc->remote.v6));
1758 }
1759 else if (inp->inp_vflag & INP_IPV4)
1760 {
1761 nstat_ip_to_sockaddr(&inp->inp_laddr, inp->inp_lport,
1762 &desc->local.v4, sizeof(desc->local.v4));
1763 nstat_ip_to_sockaddr(&inp->inp_faddr, inp->inp_fport,
1764 &desc->remote.v4, sizeof(desc->remote.v4));
1765 }
1766 desc->ifnet_properties = nstat_inpcb_to_flags(inp);
1767 }
1768 else
1769 {
1770 if (inp->inp_vflag & INP_IPV6)
1771 {
1772 memcpy(&desc->local.v6, &tucookie->local.v6,
1773 sizeof(desc->local.v6));
1774 memcpy(&desc->remote.v6, &tucookie->remote.v6,
1775 sizeof(desc->remote.v6));
1776 }
1777 else if (inp->inp_vflag & INP_IPV4)
1778 {
1779 memcpy(&desc->local.v4, &tucookie->local.v4,
1780 sizeof(desc->local.v4));
1781 memcpy(&desc->remote.v4, &tucookie->remote.v4,
1782 sizeof(desc->remote.v4));
1783 }
1784 desc->ifnet_properties = tucookie->ifnet_properties;
1785 }
1786
1787 if (inp->inp_last_outifp)
1788 desc->ifindex = inp->inp_last_outifp->if_index;
1789 else
1790 desc->ifindex = tucookie->if_index;
1791
1792 struct socket *so = inp->inp_socket;
1793 if (so)
1794 {
1795 // TBD - take the socket lock around these to make sure
1796 // they're in sync?
1797 desc->upid = so->last_upid;
1798 desc->pid = so->last_pid;
1799 proc_name(desc->pid, desc->pname, sizeof(desc->pname));
1800 if (desc->pname[0] == 0)
1801 {
1802 strlcpy(desc->pname, tucookie->pname,
1803 sizeof(desc->pname));
1804 }
1805 else
1806 {
1807 desc->pname[sizeof(desc->pname) - 1] = 0;
1808 strlcpy(tucookie->pname, desc->pname,
1809 sizeof(tucookie->pname));
1810 }
1811 memcpy(desc->uuid, so->last_uuid, sizeof(so->last_uuid));
1812 memcpy(desc->vuuid, so->so_vuuid, sizeof(so->so_vuuid));
1813 if (so->so_flags & SOF_DELEGATED) {
1814 desc->eupid = so->e_upid;
1815 desc->epid = so->e_pid;
1816 memcpy(desc->euuid, so->e_uuid, sizeof(so->e_uuid));
1817 } else {
1818 desc->eupid = desc->upid;
1819 desc->epid = desc->pid;
1820 memcpy(desc->euuid, desc->uuid, sizeof(desc->uuid));
1821 }
1822 desc->rcvbufsize = so->so_rcv.sb_hiwat;
1823 desc->rcvbufused = so->so_rcv.sb_cc;
1824 desc->traffic_class = so->so_traffic_class;
1825 }
1826
1827 return 0;
1828 }
1829
1830 static bool
1831 nstat_udp_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter)
1832 {
1833 return nstat_tcpudp_reporting_allowed(cookie, filter, TRUE);
1834 }
1835
1836
1837 static void
1838 nstat_init_udp_provider(void)
1839 {
1840 bzero(&nstat_udp_provider, sizeof(nstat_udp_provider));
1841 nstat_udp_provider.nstat_provider_id = NSTAT_PROVIDER_UDP_KERNEL;
1842 nstat_udp_provider.nstat_descriptor_length = sizeof(nstat_udp_descriptor);
1843 nstat_udp_provider.nstat_lookup = nstat_udp_lookup;
1844 nstat_udp_provider.nstat_gone = nstat_udp_gone;
1845 nstat_udp_provider.nstat_counts = nstat_udp_counts;
1846 nstat_udp_provider.nstat_watcher_add = nstat_udp_add_watcher;
1847 nstat_udp_provider.nstat_watcher_remove = nstat_udp_remove_watcher;
1848 nstat_udp_provider.nstat_copy_descriptor = nstat_udp_copy_descriptor;
1849 nstat_udp_provider.nstat_release = nstat_udp_release;
1850 nstat_udp_provider.nstat_reporting_allowed = nstat_udp_reporting_allowed;
1851 nstat_udp_provider.next = nstat_providers;
1852 nstat_providers = &nstat_udp_provider;
1853 }
1854
1855 #pragma mark -- TCP/UDP Userland
1856
1857 // Almost all of this infrastucture is common to both TCP and UDP
1858
1859 static nstat_provider nstat_userland_tcp_provider;
1860 static nstat_provider nstat_userland_udp_provider;
1861
1862
1863 struct nstat_tu_shadow {
1864 tailq_entry_tu_shadow shad_link;
1865 userland_stats_request_vals_fn *shad_getvals_fn;
1866 userland_stats_provider_context *shad_provider_context;
1867 u_int64_t shad_properties;
1868 int shad_provider;
1869 uint32_t shad_magic;
1870 };
1871
1872 // Magic number checking should remain in place until the userland provider has been fully proven
1873 #define TU_SHADOW_MAGIC 0xfeedf00d
1874 #define TU_SHADOW_UNMAGIC 0xdeaddeed
1875
1876 static tailq_head_tu_shadow nstat_userprot_shad_head = TAILQ_HEAD_INITIALIZER(nstat_userprot_shad_head);
1877
1878 static errno_t
1879 nstat_userland_tu_lookup(
1880 __unused const void *data,
1881 __unused u_int32_t length,
1882 __unused nstat_provider_cookie_t *out_cookie)
1883 {
1884 // Looking up a specific connection is not supported
1885 return ENOTSUP;
1886 }
1887
1888 static int
1889 nstat_userland_tu_gone(
1890 __unused nstat_provider_cookie_t cookie)
1891 {
1892 // Returns non-zero if the source has gone.
1893 // We don't keep a source hanging around, so the answer is always 0
1894 return 0;
1895 }
1896
1897 static errno_t
1898 nstat_userland_tu_counts(
1899 nstat_provider_cookie_t cookie,
1900 struct nstat_counts *out_counts,
1901 int *out_gone)
1902 {
1903 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
1904 assert(shad->shad_magic == TU_SHADOW_MAGIC);
1905
1906 bool result = (*shad->shad_getvals_fn)(shad->shad_provider_context, out_counts, NULL);
1907
1908 if (out_gone) *out_gone = 0;
1909
1910 return (result)? 0 : EIO;
1911 }
1912
1913
1914 static errno_t
1915 nstat_userland_tu_copy_descriptor(
1916 nstat_provider_cookie_t cookie,
1917 void *data,
1918 __unused u_int32_t len)
1919 {
1920 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
1921 assert(shad->shad_magic == TU_SHADOW_MAGIC);
1922
1923 bool result = (*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, data);
1924
1925 return (result)? 0 : EIO;
1926 }
1927
1928 static void
1929 nstat_userland_tu_release(
1930 __unused nstat_provider_cookie_t cookie,
1931 __unused int locked)
1932 {
1933 // Called when a nstat_src is detached.
1934 // We don't reference count or ask for delayed release so nothing to do here.
1935 }
1936
1937 static bool
1938 check_reporting_for_user(nstat_provider_filter *filter, pid_t pid, pid_t epid, uuid_t *uuid, uuid_t *euuid)
1939 {
1940 bool retval = true;
1941
1942 if ((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0)
1943 {
1944 retval = false;
1945
1946 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_PID) != 0) &&
1947 (filter->npf_pid == pid))
1948 {
1949 retval = true;
1950 }
1951 else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EPID) != 0) &&
1952 (filter->npf_pid == epid))
1953 {
1954 retval = true;
1955 }
1956 else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_UUID) != 0) &&
1957 (memcmp(filter->npf_uuid, uuid, sizeof(*uuid)) == 0))
1958 {
1959 retval = true;
1960 }
1961 else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EUUID) != 0) &&
1962 (memcmp(filter->npf_uuid, euuid, sizeof(*euuid)) == 0))
1963 {
1964 retval = true;
1965 }
1966 }
1967 return retval;
1968 }
1969
1970 static bool
1971 nstat_userland_tcp_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter)
1972 {
1973 bool retval = true;
1974
1975 if ((filter->npf_flags & (NSTAT_FILTER_IFNET_FLAGS|NSTAT_FILTER_SPECIFIC_USER)) != 0)
1976 {
1977 nstat_tcp_descriptor tcp_desc; // Stack allocation - OK or pushing the limits too far?
1978 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
1979
1980 assert(shad->shad_magic == TU_SHADOW_MAGIC);
1981
1982 if ((*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, &tcp_desc))
1983 {
1984 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0)
1985 {
1986 if ((filter->npf_flags & tcp_desc.ifnet_properties) == 0)
1987 {
1988 return false;
1989 }
1990 }
1991 if ((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0)
1992 {
1993 retval = check_reporting_for_user(filter, (pid_t)tcp_desc.pid, (pid_t)tcp_desc.epid,
1994 &tcp_desc.uuid, &tcp_desc.euuid);
1995 }
1996 }
1997 else
1998 {
1999 retval = false; // No further information, so might as well give up now.
2000 }
2001 }
2002 return retval;
2003 }
2004
2005 static bool
2006 nstat_userland_udp_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter)
2007 {
2008 bool retval = true;
2009
2010 if ((filter->npf_flags & (NSTAT_FILTER_IFNET_FLAGS|NSTAT_FILTER_SPECIFIC_USER)) != 0)
2011 {
2012 nstat_udp_descriptor udp_desc; // Stack allocation - OK or pushing the limits too far?
2013 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
2014
2015 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2016
2017 if ((*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, &udp_desc))
2018 {
2019 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0)
2020 {
2021 if ((filter->npf_flags & udp_desc.ifnet_properties) == 0)
2022 {
2023 return false;
2024 }
2025 }
2026 if ((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0)
2027 {
2028 retval = check_reporting_for_user(filter, (pid_t)udp_desc.pid, (pid_t)udp_desc.epid,
2029 &udp_desc.uuid, &udp_desc.euuid);
2030 }
2031 }
2032 else
2033 {
2034 retval = false; // No further information, so might as well give up now.
2035 }
2036 }
2037 return retval;
2038 }
2039
2040
2041
2042 static errno_t
2043 nstat_userland_tcp_add_watcher(
2044 nstat_control_state *state)
2045 {
2046 struct nstat_tu_shadow *shad;
2047
2048 OSIncrementAtomic(&nstat_userland_tcp_watchers);
2049
2050 lck_mtx_lock(&nstat_mtx);
2051
2052 TAILQ_FOREACH(shad, &nstat_userprot_shad_head, shad_link) {
2053 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2054
2055 if (shad->shad_provider == NSTAT_PROVIDER_TCP_USERLAND)
2056 {
2057 int result = nstat_control_source_add(0, state, &nstat_userland_tcp_provider, shad);
2058 if (result != 0)
2059 {
2060 printf("%s - nstat_control_source_add returned %d\n", __func__, result);
2061 }
2062 }
2063 }
2064 lck_mtx_unlock(&nstat_mtx);
2065
2066 return 0;
2067 }
2068
2069 static errno_t
2070 nstat_userland_udp_add_watcher(
2071 nstat_control_state *state)
2072 {
2073 struct nstat_tu_shadow *shad;
2074
2075 OSIncrementAtomic(&nstat_userland_udp_watchers);
2076
2077 lck_mtx_lock(&nstat_mtx);
2078
2079 TAILQ_FOREACH(shad, &nstat_userprot_shad_head, shad_link) {
2080 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2081
2082 if (shad->shad_provider == NSTAT_PROVIDER_UDP_USERLAND)
2083 {
2084 int result = nstat_control_source_add(0, state, &nstat_userland_udp_provider, shad);
2085 if (result != 0)
2086 {
2087 printf("%s - nstat_control_source_add returned %d\n", __func__, result);
2088 }
2089 }
2090 }
2091 lck_mtx_unlock(&nstat_mtx);
2092
2093 return 0;
2094 }
2095
2096
2097 static void
2098 nstat_userland_tcp_remove_watcher(
2099 __unused nstat_control_state *state)
2100 {
2101 OSDecrementAtomic(&nstat_userland_tcp_watchers);
2102 }
2103
2104 static void
2105 nstat_userland_udp_remove_watcher(
2106 __unused nstat_control_state *state)
2107 {
2108 OSDecrementAtomic(&nstat_userland_udp_watchers);
2109 }
2110
2111 static void
2112 nstat_init_userland_tcp_provider(void)
2113 {
2114 bzero(&nstat_userland_tcp_provider, sizeof(nstat_tcp_provider));
2115 nstat_userland_tcp_provider.nstat_descriptor_length = sizeof(nstat_tcp_descriptor);
2116 nstat_userland_tcp_provider.nstat_provider_id = NSTAT_PROVIDER_TCP_USERLAND;
2117 nstat_userland_tcp_provider.nstat_lookup = nstat_userland_tu_lookup;
2118 nstat_userland_tcp_provider.nstat_gone = nstat_userland_tu_gone;
2119 nstat_userland_tcp_provider.nstat_counts = nstat_userland_tu_counts;
2120 nstat_userland_tcp_provider.nstat_release = nstat_userland_tu_release;
2121 nstat_userland_tcp_provider.nstat_watcher_add = nstat_userland_tcp_add_watcher;
2122 nstat_userland_tcp_provider.nstat_watcher_remove = nstat_userland_tcp_remove_watcher;
2123 nstat_userland_tcp_provider.nstat_copy_descriptor = nstat_userland_tu_copy_descriptor;
2124 nstat_userland_tcp_provider.nstat_reporting_allowed = nstat_userland_tcp_reporting_allowed;
2125 nstat_userland_tcp_provider.next = nstat_providers;
2126 nstat_providers = &nstat_userland_tcp_provider;
2127 }
2128
2129
2130 static void
2131 nstat_init_userland_udp_provider(void)
2132 {
2133 bzero(&nstat_userland_udp_provider, sizeof(nstat_udp_provider));
2134 nstat_userland_udp_provider.nstat_descriptor_length = sizeof(nstat_udp_descriptor);
2135 nstat_userland_udp_provider.nstat_provider_id = NSTAT_PROVIDER_UDP_USERLAND;
2136 nstat_userland_udp_provider.nstat_lookup = nstat_userland_tu_lookup;
2137 nstat_userland_udp_provider.nstat_gone = nstat_userland_tu_gone;
2138 nstat_userland_udp_provider.nstat_counts = nstat_userland_tu_counts;
2139 nstat_userland_udp_provider.nstat_release = nstat_userland_tu_release;
2140 nstat_userland_udp_provider.nstat_watcher_add = nstat_userland_udp_add_watcher;
2141 nstat_userland_udp_provider.nstat_watcher_remove = nstat_userland_udp_remove_watcher;
2142 nstat_userland_udp_provider.nstat_copy_descriptor = nstat_userland_tu_copy_descriptor;
2143 nstat_userland_udp_provider.nstat_reporting_allowed = nstat_userland_udp_reporting_allowed;
2144 nstat_userland_udp_provider.next = nstat_providers;
2145 nstat_providers = &nstat_userland_udp_provider;
2146 }
2147
2148
2149
2150 // Things get started with a call to netstats to say that there’s a new connection:
2151 __private_extern__ nstat_userland_context
2152 ntstat_userland_stats_open(userland_stats_provider_context *ctx,
2153 int provider_id,
2154 u_int64_t properties,
2155 userland_stats_request_vals_fn req_fn)
2156 {
2157 struct nstat_tu_shadow *shad;
2158
2159 if ((provider_id != NSTAT_PROVIDER_TCP_USERLAND) && (provider_id != NSTAT_PROVIDER_UDP_USERLAND))
2160 {
2161 printf("%s - incorrect provider is supplied, %d\n", __func__, provider_id);
2162 return NULL;
2163 }
2164
2165 shad = OSMalloc(sizeof(*shad), nstat_malloc_tag);
2166 if (shad == NULL)
2167 return NULL;
2168
2169 shad->shad_getvals_fn = req_fn;
2170 shad->shad_provider_context = ctx;
2171 shad->shad_provider = provider_id;
2172 shad->shad_properties = properties;
2173 shad->shad_magic = TU_SHADOW_MAGIC;
2174
2175 lck_mtx_lock(&nstat_mtx);
2176 nstat_control_state *state;
2177
2178 // Even if there are no watchers, we save the shadow structure
2179 TAILQ_INSERT_HEAD(&nstat_userprot_shad_head, shad, shad_link);
2180
2181 for (state = nstat_controls; state; state = state->ncs_next)
2182 {
2183 if ((state->ncs_watching & (1 << provider_id)) != 0)
2184 {
2185 // this client is watching tcp/udp userland
2186 // Link to it.
2187 int result = nstat_control_source_add(0, state, &nstat_userland_tcp_provider, shad);
2188 if (result != 0)
2189 {
2190 printf("%s - nstat_control_source_add returned %d\n", __func__, result);
2191 }
2192 }
2193 }
2194 lck_mtx_unlock(&nstat_mtx);
2195
2196 return (nstat_userland_context)shad;
2197 }
2198
2199
2200 __private_extern__ void
2201 ntstat_userland_stats_close(nstat_userland_context nstat_ctx)
2202 {
2203 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)nstat_ctx;
2204 nstat_src *dead_list = NULL;
2205
2206 if (shad == NULL)
2207 return;
2208
2209 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2210
2211 lck_mtx_lock(&nstat_mtx);
2212 if (nstat_userland_udp_watchers != 0 || nstat_userland_tcp_watchers != 0)
2213 {
2214 nstat_control_state *state;
2215 nstat_src *src, *prevsrc;
2216 errno_t result;
2217
2218 for (state = nstat_controls; state; state = state->ncs_next)
2219 {
2220 lck_mtx_lock(&state->mtx);
2221 for (prevsrc = NULL, src = state->ncs_srcs; src;
2222 prevsrc = src, src = src->next)
2223 {
2224 if (shad == (struct nstat_tu_shadow *)src->cookie)
2225 break;
2226 }
2227
2228 if (src)
2229 {
2230 result = nstat_control_send_goodbye(state, src);
2231
2232 if (prevsrc)
2233 prevsrc->next = src->next;
2234 else
2235 state->ncs_srcs = src->next;
2236
2237 src->next = dead_list;
2238 dead_list = src;
2239 }
2240 lck_mtx_unlock(&state->mtx);
2241 }
2242 }
2243 TAILQ_REMOVE(&nstat_userprot_shad_head, shad, shad_link);
2244
2245 lck_mtx_unlock(&nstat_mtx);
2246
2247 while (dead_list)
2248 {
2249 nstat_src *src;
2250 src = dead_list;
2251 dead_list = src->next;
2252
2253 nstat_control_cleanup_source(NULL, src, TRUE);
2254 }
2255
2256 shad->shad_magic = TU_SHADOW_UNMAGIC;
2257
2258 OSFree(shad, sizeof(*shad), nstat_malloc_tag);
2259 }
2260
2261
2262 __private_extern__ void
2263 ntstat_userland_stats_event(
2264 __unused nstat_userland_context context,
2265 __unused userland_stats_event_t event)
2266 {
2267 // This is a dummy for when we hook up event reporting to NetworkStatistics.
2268 // See <rdar://problem/23022832> NetworkStatistics should provide opt-in notifications
2269 }
2270
2271
2272
2273
2274 #pragma mark -- ifnet Provider --
2275
2276 static nstat_provider nstat_ifnet_provider;
2277
2278 /*
2279 * We store a pointer to the ifnet and the original threshold
2280 * requested by the client.
2281 */
2282 struct nstat_ifnet_cookie
2283 {
2284 struct ifnet *ifp;
2285 uint64_t threshold;
2286 };
2287
2288 static errno_t
2289 nstat_ifnet_lookup(
2290 const void *data,
2291 u_int32_t length,
2292 nstat_provider_cookie_t *out_cookie)
2293 {
2294 const nstat_ifnet_add_param *param = (const nstat_ifnet_add_param *)data;
2295 struct ifnet *ifp;
2296 boolean_t changed = FALSE;
2297 nstat_control_state *state;
2298 nstat_src *src;
2299 struct nstat_ifnet_cookie *cookie;
2300
2301 if (length < sizeof(*param) || param->threshold < 1024*1024)
2302 return EINVAL;
2303 if (nstat_privcheck != 0) {
2304 errno_t result = priv_check_cred(kauth_cred_get(),
2305 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
2306 if (result != 0)
2307 return result;
2308 }
2309 cookie = OSMalloc(sizeof(*cookie), nstat_malloc_tag);
2310 if (cookie == NULL)
2311 return ENOMEM;
2312 bzero(cookie, sizeof(*cookie));
2313
2314 ifnet_head_lock_shared();
2315 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
2316 {
2317 ifnet_lock_exclusive(ifp);
2318 if (ifp->if_index == param->ifindex)
2319 {
2320 cookie->ifp = ifp;
2321 cookie->threshold = param->threshold;
2322 *out_cookie = cookie;
2323 if (!ifp->if_data_threshold ||
2324 ifp->if_data_threshold > param->threshold)
2325 {
2326 changed = TRUE;
2327 ifp->if_data_threshold = param->threshold;
2328 }
2329 ifnet_lock_done(ifp);
2330 ifnet_reference(ifp);
2331 break;
2332 }
2333 ifnet_lock_done(ifp);
2334 }
2335 ifnet_head_done();
2336
2337 /*
2338 * When we change the threshold to something smaller, we notify
2339 * all of our clients with a description message.
2340 * We won't send a message to the client we are currently serving
2341 * because it has no `ifnet source' yet.
2342 */
2343 if (changed)
2344 {
2345 lck_mtx_lock(&nstat_mtx);
2346 for (state = nstat_controls; state; state = state->ncs_next)
2347 {
2348 lck_mtx_lock(&state->mtx);
2349 for (src = state->ncs_srcs; src; src = src->next)
2350 {
2351 if (src->provider != &nstat_ifnet_provider)
2352 continue;
2353 nstat_control_send_description(state, src, 0, 0);
2354 }
2355 lck_mtx_unlock(&state->mtx);
2356 }
2357 lck_mtx_unlock(&nstat_mtx);
2358 }
2359 if (cookie->ifp == NULL)
2360 OSFree(cookie, sizeof(*cookie), nstat_malloc_tag);
2361
2362 return ifp ? 0 : EINVAL;
2363 }
2364
2365 static int
2366 nstat_ifnet_gone(
2367 nstat_provider_cookie_t cookie)
2368 {
2369 struct ifnet *ifp;
2370 struct nstat_ifnet_cookie *ifcookie =
2371 (struct nstat_ifnet_cookie *)cookie;
2372
2373 ifnet_head_lock_shared();
2374 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
2375 {
2376 if (ifp == ifcookie->ifp)
2377 break;
2378 }
2379 ifnet_head_done();
2380
2381 return ifp ? 0 : 1;
2382 }
2383
2384 static errno_t
2385 nstat_ifnet_counts(
2386 nstat_provider_cookie_t cookie,
2387 struct nstat_counts *out_counts,
2388 int *out_gone)
2389 {
2390 struct nstat_ifnet_cookie *ifcookie =
2391 (struct nstat_ifnet_cookie *)cookie;
2392 struct ifnet *ifp = ifcookie->ifp;
2393
2394 if (out_gone) *out_gone = 0;
2395
2396 // if the ifnet is gone, we should stop using it
2397 if (nstat_ifnet_gone(cookie))
2398 {
2399 if (out_gone) *out_gone = 1;
2400 return EINVAL;
2401 }
2402
2403 bzero(out_counts, sizeof(*out_counts));
2404 out_counts->nstat_rxpackets = ifp->if_ipackets;
2405 out_counts->nstat_rxbytes = ifp->if_ibytes;
2406 out_counts->nstat_txpackets = ifp->if_opackets;
2407 out_counts->nstat_txbytes = ifp->if_obytes;
2408 out_counts->nstat_cell_rxbytes = out_counts->nstat_cell_txbytes = 0;
2409 return 0;
2410 }
2411
2412 static void
2413 nstat_ifnet_release(
2414 nstat_provider_cookie_t cookie,
2415 __unused int locked)
2416 {
2417 struct nstat_ifnet_cookie *ifcookie;
2418 struct ifnet *ifp;
2419 nstat_control_state *state;
2420 nstat_src *src;
2421 uint64_t minthreshold = UINT64_MAX;
2422
2423 /*
2424 * Find all the clients that requested a threshold
2425 * for this ifnet and re-calculate if_data_threshold.
2426 */
2427 lck_mtx_lock(&nstat_mtx);
2428 for (state = nstat_controls; state; state = state->ncs_next)
2429 {
2430 lck_mtx_lock(&state->mtx);
2431 for (src = state->ncs_srcs; src; src = src->next)
2432 {
2433 /* Skip the provider we are about to detach. */
2434 if (src->provider != &nstat_ifnet_provider ||
2435 src->cookie == cookie)
2436 continue;
2437 ifcookie = (struct nstat_ifnet_cookie *)src->cookie;
2438 if (ifcookie->threshold < minthreshold)
2439 minthreshold = ifcookie->threshold;
2440 }
2441 lck_mtx_unlock(&state->mtx);
2442 }
2443 lck_mtx_unlock(&nstat_mtx);
2444 /*
2445 * Reset if_data_threshold or disable it.
2446 */
2447 ifcookie = (struct nstat_ifnet_cookie *)cookie;
2448 ifp = ifcookie->ifp;
2449 if (ifnet_is_attached(ifp, 1)) {
2450 ifnet_lock_exclusive(ifp);
2451 if (minthreshold == UINT64_MAX)
2452 ifp->if_data_threshold = 0;
2453 else
2454 ifp->if_data_threshold = minthreshold;
2455 ifnet_lock_done(ifp);
2456 ifnet_decr_iorefcnt(ifp);
2457 }
2458 ifnet_release(ifp);
2459 OSFree(ifcookie, sizeof(*ifcookie), nstat_malloc_tag);
2460 }
2461
2462 static void
2463 nstat_ifnet_copy_link_status(
2464 struct ifnet *ifp,
2465 struct nstat_ifnet_descriptor *desc)
2466 {
2467 struct if_link_status *ifsr = ifp->if_link_status;
2468 nstat_ifnet_desc_link_status *link_status = &desc->link_status;
2469
2470 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_NONE;
2471 if (ifsr == NULL)
2472 return;
2473
2474 lck_rw_lock_shared(&ifp->if_link_status_lock);
2475
2476 if (ifp->if_type == IFT_CELLULAR) {
2477
2478 nstat_ifnet_desc_cellular_status *cell_status = &link_status->u.cellular;
2479 struct if_cellular_status_v1 *if_cell_sr =
2480 &ifsr->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
2481
2482 if (ifsr->ifsr_version != IF_CELLULAR_STATUS_REPORT_VERSION_1)
2483 goto done;
2484
2485 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_CELLULAR;
2486
2487 if (if_cell_sr->valid_bitmask & IF_CELL_LINK_QUALITY_METRIC_VALID) {
2488 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_LINK_QUALITY_METRIC_VALID;
2489 cell_status->link_quality_metric = if_cell_sr->link_quality_metric;
2490 }
2491 if (if_cell_sr->valid_bitmask & IF_CELL_UL_EFFECTIVE_BANDWIDTH_VALID) {
2492 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_BANDWIDTH_VALID;
2493 cell_status->ul_effective_bandwidth = if_cell_sr->ul_effective_bandwidth;
2494 }
2495 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_BANDWIDTH_VALID) {
2496 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_BANDWIDTH_VALID;
2497 cell_status->ul_max_bandwidth = if_cell_sr->ul_max_bandwidth;
2498 }
2499 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MIN_LATENCY_VALID) {
2500 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MIN_LATENCY_VALID;
2501 cell_status->ul_min_latency = if_cell_sr->ul_min_latency;
2502 }
2503 if (if_cell_sr->valid_bitmask & IF_CELL_UL_EFFECTIVE_LATENCY_VALID) {
2504 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_LATENCY_VALID;
2505 cell_status->ul_effective_latency = if_cell_sr->ul_effective_latency;
2506 }
2507 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_LATENCY_VALID) {
2508 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_LATENCY_VALID;
2509 cell_status->ul_max_latency = if_cell_sr->ul_max_latency;
2510 }
2511 if (if_cell_sr->valid_bitmask & IF_CELL_UL_RETXT_LEVEL_VALID) {
2512 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID;
2513 if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_NONE)
2514 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_NONE;
2515 else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_LOW)
2516 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_LOW;
2517 else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_MEDIUM)
2518 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_MEDIUM;
2519 else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_HIGH)
2520 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_HIGH;
2521 else
2522 cell_status->valid_bitmask &= ~NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID;
2523 }
2524 if (if_cell_sr->valid_bitmask & IF_CELL_UL_BYTES_LOST_VALID) {
2525 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_BYTES_LOST_VALID;
2526 cell_status->ul_bytes_lost = if_cell_sr->ul_bytes_lost;
2527 }
2528 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MIN_QUEUE_SIZE_VALID) {
2529 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MIN_QUEUE_SIZE_VALID;
2530 cell_status->ul_min_queue_size = if_cell_sr->ul_min_queue_size;
2531 }
2532 if (if_cell_sr->valid_bitmask & IF_CELL_UL_AVG_QUEUE_SIZE_VALID) {
2533 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_AVG_QUEUE_SIZE_VALID;
2534 cell_status->ul_avg_queue_size = if_cell_sr->ul_avg_queue_size;
2535 }
2536 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_QUEUE_SIZE_VALID) {
2537 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_QUEUE_SIZE_VALID;
2538 cell_status->ul_max_queue_size = if_cell_sr->ul_max_queue_size;
2539 }
2540 if (if_cell_sr->valid_bitmask & IF_CELL_DL_EFFECTIVE_BANDWIDTH_VALID) {
2541 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_DL_EFFECTIVE_BANDWIDTH_VALID;
2542 cell_status->dl_effective_bandwidth = if_cell_sr->dl_effective_bandwidth;
2543 }
2544 if (if_cell_sr->valid_bitmask & IF_CELL_DL_MAX_BANDWIDTH_VALID) {
2545 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_DL_MAX_BANDWIDTH_VALID;
2546 cell_status->dl_max_bandwidth = if_cell_sr->dl_max_bandwidth;
2547 }
2548 if (if_cell_sr->valid_bitmask & IF_CELL_CONFIG_INACTIVITY_TIME_VALID) {
2549 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_CONFIG_INACTIVITY_TIME_VALID;
2550 cell_status->config_inactivity_time = if_cell_sr->config_inactivity_time;
2551 }
2552 if (if_cell_sr->valid_bitmask & IF_CELL_CONFIG_BACKOFF_TIME_VALID) {
2553 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_CONFIG_BACKOFF_TIME_VALID;
2554 cell_status->config_backoff_time = if_cell_sr->config_backoff_time;
2555 }
2556 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MSS_RECOMMENDED_VALID) {
2557 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_MSS_RECOMMENDED_VALID;
2558 cell_status->mss_recommended = if_cell_sr->mss_recommended;
2559 }
2560 } else if (ifp->if_subfamily == IFNET_SUBFAMILY_WIFI) {
2561
2562 nstat_ifnet_desc_wifi_status *wifi_status = &link_status->u.wifi;
2563 struct if_wifi_status_v1 *if_wifi_sr =
2564 &ifsr->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
2565
2566 if (ifsr->ifsr_version != IF_WIFI_STATUS_REPORT_VERSION_1)
2567 goto done;
2568
2569 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_WIFI;
2570
2571 if (if_wifi_sr->valid_bitmask & IF_WIFI_LINK_QUALITY_METRIC_VALID) {
2572 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_LINK_QUALITY_METRIC_VALID;
2573 wifi_status->link_quality_metric = if_wifi_sr->link_quality_metric;
2574 }
2575 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID) {
2576 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID;
2577 wifi_status->ul_effective_bandwidth = if_wifi_sr->ul_effective_bandwidth;
2578 }
2579 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MAX_BANDWIDTH_VALID) {
2580 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MAX_BANDWIDTH_VALID;
2581 wifi_status->ul_max_bandwidth = if_wifi_sr->ul_max_bandwidth;
2582 }
2583 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MIN_LATENCY_VALID) {
2584 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MIN_LATENCY_VALID;
2585 wifi_status->ul_min_latency = if_wifi_sr->ul_min_latency;
2586 }
2587 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_EFFECTIVE_LATENCY_VALID) {
2588 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_LATENCY_VALID;
2589 wifi_status->ul_effective_latency = if_wifi_sr->ul_effective_latency;
2590 }
2591 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MAX_LATENCY_VALID) {
2592 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MAX_LATENCY_VALID;
2593 wifi_status->ul_max_latency = if_wifi_sr->ul_max_latency;
2594 }
2595 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_RETXT_LEVEL_VALID) {
2596 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID;
2597 if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_NONE)
2598 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_NONE;
2599 else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_LOW)
2600 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_LOW;
2601 else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_MEDIUM)
2602 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_MEDIUM;
2603 else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_HIGH)
2604 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_HIGH;
2605 else
2606 wifi_status->valid_bitmask &= ~NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID;
2607 }
2608 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_BYTES_LOST_VALID) {
2609 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_BYTES_LOST_VALID;
2610 wifi_status->ul_bytes_lost = if_wifi_sr->ul_bytes_lost;
2611 }
2612 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_ERROR_RATE_VALID) {
2613 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_ERROR_RATE_VALID;
2614 wifi_status->ul_error_rate = if_wifi_sr->ul_error_rate;
2615 }
2616 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID) {
2617 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID;
2618 wifi_status->dl_effective_bandwidth = if_wifi_sr->dl_effective_bandwidth;
2619 }
2620 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MAX_BANDWIDTH_VALID) {
2621 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MAX_BANDWIDTH_VALID;
2622 wifi_status->dl_max_bandwidth = if_wifi_sr->dl_max_bandwidth;
2623 }
2624 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MIN_LATENCY_VALID) {
2625 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MIN_LATENCY_VALID;
2626 wifi_status->dl_min_latency = if_wifi_sr->dl_min_latency;
2627 }
2628 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_EFFECTIVE_LATENCY_VALID) {
2629 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_LATENCY_VALID;
2630 wifi_status->dl_effective_latency = if_wifi_sr->dl_effective_latency;
2631 }
2632 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MAX_LATENCY_VALID) {
2633 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MAX_LATENCY_VALID;
2634 wifi_status->dl_max_latency = if_wifi_sr->dl_max_latency;
2635 }
2636 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_ERROR_RATE_VALID) {
2637 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_ERROR_RATE_VALID;
2638 wifi_status->dl_error_rate = if_wifi_sr->dl_error_rate;
2639 }
2640 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_FREQUENCY_VALID) {
2641 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID;
2642 if (if_wifi_sr->config_frequency == IF_WIFI_CONFIG_FREQUENCY_2_4_GHZ)
2643 wifi_status->config_frequency = NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_2_4_GHZ;
2644 else if (if_wifi_sr->config_frequency == IF_WIFI_CONFIG_FREQUENCY_5_0_GHZ)
2645 wifi_status->config_frequency = NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_5_0_GHZ;
2646 else
2647 wifi_status->valid_bitmask &= ~NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID;
2648 }
2649 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_MULTICAST_RATE_VALID) {
2650 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_MULTICAST_RATE_VALID;
2651 wifi_status->config_multicast_rate = if_wifi_sr->config_multicast_rate;
2652 }
2653 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_SCAN_COUNT_VALID) {
2654 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_COUNT_VALID;
2655 wifi_status->scan_count = if_wifi_sr->scan_count;
2656 }
2657 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_SCAN_DURATION_VALID) {
2658 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_DURATION_VALID;
2659 wifi_status->scan_duration = if_wifi_sr->scan_duration;
2660 }
2661 }
2662
2663 done:
2664 lck_rw_done(&ifp->if_link_status_lock);
2665 }
2666
2667 static u_int64_t nstat_ifnet_last_report_time = 0;
2668 extern int tcp_report_stats_interval;
2669
2670 static void
2671 nstat_ifnet_compute_percentages(struct if_tcp_ecn_perf_stat *ifst)
2672 {
2673 /* Retransmit percentage */
2674 if (ifst->total_rxmitpkts > 0 && ifst->total_txpkts > 0) {
2675 /* shift by 10 for precision */
2676 ifst->rxmit_percent =
2677 ((ifst->total_rxmitpkts << 10) * 100) / ifst->total_txpkts;
2678 } else {
2679 ifst->rxmit_percent = 0;
2680 }
2681
2682 /* Out-of-order percentage */
2683 if (ifst->total_oopkts > 0 && ifst->total_rxpkts > 0) {
2684 /* shift by 10 for precision */
2685 ifst->oo_percent =
2686 ((ifst->total_oopkts << 10) * 100) / ifst->total_rxpkts;
2687 } else {
2688 ifst->oo_percent = 0;
2689 }
2690
2691 /* Reorder percentage */
2692 if (ifst->total_reorderpkts > 0 &&
2693 (ifst->total_txpkts + ifst->total_rxpkts) > 0) {
2694 /* shift by 10 for precision */
2695 ifst->reorder_percent =
2696 ((ifst->total_reorderpkts << 10) * 100) /
2697 (ifst->total_txpkts + ifst->total_rxpkts);
2698 } else {
2699 ifst->reorder_percent = 0;
2700 }
2701 }
2702
2703 static void
2704 nstat_ifnet_normalize_counter(struct if_tcp_ecn_stat *if_st)
2705 {
2706 u_int64_t ecn_on_conn, ecn_off_conn;
2707
2708 if (if_st == NULL)
2709 return;
2710 ecn_on_conn = if_st->ecn_client_success +
2711 if_st->ecn_server_success;
2712 ecn_off_conn = if_st->ecn_off_conn +
2713 (if_st->ecn_client_setup - if_st->ecn_client_success) +
2714 (if_st->ecn_server_setup - if_st->ecn_server_success);
2715
2716 /*
2717 * report sack episodes, rst_drop and rxmit_drop
2718 * as a ratio per connection, shift by 10 for precision
2719 */
2720 if (ecn_on_conn > 0) {
2721 if_st->ecn_on.sack_episodes =
2722 (if_st->ecn_on.sack_episodes << 10) / ecn_on_conn;
2723 if_st->ecn_on.rst_drop =
2724 (if_st->ecn_on.rst_drop << 10) * 100 / ecn_on_conn;
2725 if_st->ecn_on.rxmit_drop =
2726 (if_st->ecn_on.rxmit_drop << 10) * 100 / ecn_on_conn;
2727 } else {
2728 /* set to zero, just in case */
2729 if_st->ecn_on.sack_episodes = 0;
2730 if_st->ecn_on.rst_drop = 0;
2731 if_st->ecn_on.rxmit_drop = 0;
2732 }
2733
2734 if (ecn_off_conn > 0) {
2735 if_st->ecn_off.sack_episodes =
2736 (if_st->ecn_off.sack_episodes << 10) / ecn_off_conn;
2737 if_st->ecn_off.rst_drop =
2738 (if_st->ecn_off.rst_drop << 10) * 100 / ecn_off_conn;
2739 if_st->ecn_off.rxmit_drop =
2740 (if_st->ecn_off.rxmit_drop << 10) * 100 / ecn_off_conn;
2741 } else {
2742 if_st->ecn_off.sack_episodes = 0;
2743 if_st->ecn_off.rst_drop = 0;
2744 if_st->ecn_off.rxmit_drop = 0;
2745 }
2746 if_st->ecn_total_conn = ecn_off_conn + ecn_on_conn;
2747 }
2748
2749 static void
2750 nstat_ifnet_report_ecn_stats(void)
2751 {
2752 u_int64_t uptime, last_report_time;
2753 struct nstat_sysinfo_data data;
2754 struct nstat_sysinfo_ifnet_ecn_stats *st;
2755 struct ifnet *ifp;
2756
2757 uptime = net_uptime();
2758
2759 if ((int)(uptime - nstat_ifnet_last_report_time) <
2760 tcp_report_stats_interval)
2761 return;
2762
2763 last_report_time = nstat_ifnet_last_report_time;
2764 nstat_ifnet_last_report_time = uptime;
2765 data.flags = NSTAT_SYSINFO_IFNET_ECN_STATS;
2766 st = &data.u.ifnet_ecn_stats;
2767
2768 ifnet_head_lock_shared();
2769 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2770 if (ifp->if_ipv4_stat == NULL || ifp->if_ipv6_stat == NULL)
2771 continue;
2772
2773 if ((ifp->if_refflags & (IFRF_ATTACHED | IFRF_DETACHING)) !=
2774 IFRF_ATTACHED)
2775 continue;
2776
2777 /* Limit reporting to Wifi, Ethernet and cellular. */
2778 if (!(IFNET_IS_ETHERNET(ifp) || IFNET_IS_CELLULAR(ifp)))
2779 continue;
2780
2781 bzero(st, sizeof(*st));
2782 if (IFNET_IS_CELLULAR(ifp)) {
2783 st->ifnet_type = NSTAT_IFNET_ECN_TYPE_CELLULAR;
2784 } else if (IFNET_IS_WIFI(ifp)) {
2785 st->ifnet_type = NSTAT_IFNET_ECN_TYPE_WIFI;
2786 } else {
2787 st->ifnet_type = NSTAT_IFNET_ECN_TYPE_ETHERNET;
2788 }
2789 data.unsent_data_cnt = ifp->if_unsent_data_cnt;
2790 /* skip if there was no update since last report */
2791 if (ifp->if_ipv4_stat->timestamp <= 0 ||
2792 ifp->if_ipv4_stat->timestamp < last_report_time)
2793 goto v6;
2794 st->ifnet_proto = NSTAT_IFNET_ECN_PROTO_IPV4;
2795 /* compute percentages using packet counts */
2796 nstat_ifnet_compute_percentages(&ifp->if_ipv4_stat->ecn_on);
2797 nstat_ifnet_compute_percentages(&ifp->if_ipv4_stat->ecn_off);
2798 nstat_ifnet_normalize_counter(ifp->if_ipv4_stat);
2799 bcopy(ifp->if_ipv4_stat, &st->ecn_stat,
2800 sizeof(st->ecn_stat));
2801 nstat_sysinfo_send_data(&data);
2802 bzero(ifp->if_ipv4_stat, sizeof(*ifp->if_ipv4_stat));
2803
2804 v6:
2805 /* skip if there was no update since last report */
2806 if (ifp->if_ipv6_stat->timestamp <= 0 ||
2807 ifp->if_ipv6_stat->timestamp < last_report_time)
2808 continue;
2809 st->ifnet_proto = NSTAT_IFNET_ECN_PROTO_IPV6;
2810
2811 /* compute percentages using packet counts */
2812 nstat_ifnet_compute_percentages(&ifp->if_ipv6_stat->ecn_on);
2813 nstat_ifnet_compute_percentages(&ifp->if_ipv6_stat->ecn_off);
2814 nstat_ifnet_normalize_counter(ifp->if_ipv6_stat);
2815 bcopy(ifp->if_ipv6_stat, &st->ecn_stat,
2816 sizeof(st->ecn_stat));
2817 nstat_sysinfo_send_data(&data);
2818
2819 /* Zero the stats in ifp */
2820 bzero(ifp->if_ipv6_stat, sizeof(*ifp->if_ipv6_stat));
2821 }
2822 ifnet_head_done();
2823
2824 }
2825
2826 static errno_t
2827 nstat_ifnet_copy_descriptor(
2828 nstat_provider_cookie_t cookie,
2829 void *data,
2830 u_int32_t len)
2831 {
2832 nstat_ifnet_descriptor *desc = (nstat_ifnet_descriptor *)data;
2833 struct nstat_ifnet_cookie *ifcookie =
2834 (struct nstat_ifnet_cookie *)cookie;
2835 struct ifnet *ifp = ifcookie->ifp;
2836
2837 if (len < sizeof(nstat_ifnet_descriptor))
2838 return EINVAL;
2839
2840 if (nstat_ifnet_gone(cookie))
2841 return EINVAL;
2842
2843 bzero(desc, sizeof(*desc));
2844 ifnet_lock_shared(ifp);
2845 strlcpy(desc->name, ifp->if_xname, sizeof(desc->name));
2846 desc->ifindex = ifp->if_index;
2847 desc->threshold = ifp->if_data_threshold;
2848 desc->type = ifp->if_type;
2849 if (ifp->if_desc.ifd_len < sizeof(desc->description))
2850 memcpy(desc->description, ifp->if_desc.ifd_desc,
2851 sizeof(desc->description));
2852 nstat_ifnet_copy_link_status(ifp, desc);
2853 ifnet_lock_done(ifp);
2854 return 0;
2855 }
2856
2857 static void
2858 nstat_init_ifnet_provider(void)
2859 {
2860 bzero(&nstat_ifnet_provider, sizeof(nstat_ifnet_provider));
2861 nstat_ifnet_provider.nstat_provider_id = NSTAT_PROVIDER_IFNET;
2862 nstat_ifnet_provider.nstat_descriptor_length = sizeof(nstat_ifnet_descriptor);
2863 nstat_ifnet_provider.nstat_lookup = nstat_ifnet_lookup;
2864 nstat_ifnet_provider.nstat_gone = nstat_ifnet_gone;
2865 nstat_ifnet_provider.nstat_counts = nstat_ifnet_counts;
2866 nstat_ifnet_provider.nstat_watcher_add = NULL;
2867 nstat_ifnet_provider.nstat_watcher_remove = NULL;
2868 nstat_ifnet_provider.nstat_copy_descriptor = nstat_ifnet_copy_descriptor;
2869 nstat_ifnet_provider.nstat_release = nstat_ifnet_release;
2870 nstat_ifnet_provider.next = nstat_providers;
2871 nstat_providers = &nstat_ifnet_provider;
2872 }
2873
2874 __private_extern__ void
2875 nstat_ifnet_threshold_reached(unsigned int ifindex)
2876 {
2877 nstat_control_state *state;
2878 nstat_src *src;
2879 struct ifnet *ifp;
2880 struct nstat_ifnet_cookie *ifcookie;
2881
2882 lck_mtx_lock(&nstat_mtx);
2883 for (state = nstat_controls; state; state = state->ncs_next)
2884 {
2885 lck_mtx_lock(&state->mtx);
2886 for (src = state->ncs_srcs; src; src = src->next)
2887 {
2888 if (src->provider != &nstat_ifnet_provider)
2889 continue;
2890 ifcookie = (struct nstat_ifnet_cookie *)src->cookie;
2891 ifp = ifcookie->ifp;
2892 if (ifp->if_index != ifindex)
2893 continue;
2894 nstat_control_send_counts(state, src, 0, 0, NULL);
2895 }
2896 lck_mtx_unlock(&state->mtx);
2897 }
2898 lck_mtx_unlock(&nstat_mtx);
2899 }
2900
2901 #pragma mark -- Sysinfo --
2902 static void
2903 nstat_set_keyval_scalar(nstat_sysinfo_keyval *kv, int key, u_int32_t val)
2904 {
2905 kv->nstat_sysinfo_key = key;
2906 kv->nstat_sysinfo_flags = NSTAT_SYSINFO_FLAG_SCALAR;
2907 kv->u.nstat_sysinfo_scalar = val;
2908 }
2909
2910 static void
2911 nstat_sysinfo_send_data_internal(
2912 nstat_control_state *control,
2913 nstat_sysinfo_data *data)
2914 {
2915 nstat_msg_sysinfo_counts *syscnt = NULL;
2916 size_t allocsize = 0, countsize = 0, nkeyvals = 0, finalsize = 0;
2917 nstat_sysinfo_keyval *kv;
2918 errno_t result = 0;
2919 size_t i = 0;
2920
2921 allocsize = offsetof(nstat_msg_sysinfo_counts, counts);
2922 countsize = offsetof(nstat_sysinfo_counts, nstat_sysinfo_keyvals);
2923 finalsize = allocsize;
2924
2925 /* get number of key-vals for each kind of stat */
2926 switch (data->flags)
2927 {
2928 case NSTAT_SYSINFO_MBUF_STATS:
2929 nkeyvals = sizeof(struct nstat_sysinfo_mbuf_stats) /
2930 sizeof(u_int32_t);
2931 break;
2932 case NSTAT_SYSINFO_TCP_STATS:
2933 nkeyvals = sizeof(struct nstat_sysinfo_tcp_stats) /
2934 sizeof(u_int32_t);
2935 break;
2936 case NSTAT_SYSINFO_IFNET_ECN_STATS:
2937 nkeyvals = (sizeof(struct if_tcp_ecn_stat) /
2938 sizeof(u_int64_t));
2939
2940 /* Two more keys for ifnet type and proto */
2941 nkeyvals += 2;
2942
2943 /* One key for unsent data. */
2944 nkeyvals++;
2945 break;
2946 default:
2947 return;
2948 }
2949 countsize += sizeof(nstat_sysinfo_keyval) * nkeyvals;
2950 allocsize += countsize;
2951
2952 syscnt = OSMalloc(allocsize, nstat_malloc_tag);
2953 if (syscnt == NULL)
2954 return;
2955 bzero(syscnt, allocsize);
2956
2957 kv = (nstat_sysinfo_keyval *) &syscnt->counts.nstat_sysinfo_keyvals;
2958 switch (data->flags)
2959 {
2960 case NSTAT_SYSINFO_MBUF_STATS:
2961 {
2962 nstat_set_keyval_scalar(&kv[i++],
2963 NSTAT_SYSINFO_KEY_MBUF_256B_TOTAL,
2964 data->u.mb_stats.total_256b);
2965 nstat_set_keyval_scalar(&kv[i++],
2966 NSTAT_SYSINFO_KEY_MBUF_2KB_TOTAL,
2967 data->u.mb_stats.total_2kb);
2968 nstat_set_keyval_scalar(&kv[i++],
2969 NSTAT_SYSINFO_KEY_MBUF_4KB_TOTAL,
2970 data->u.mb_stats.total_4kb);
2971 nstat_set_keyval_scalar(&kv[i++],
2972 NSTAT_SYSINFO_MBUF_16KB_TOTAL,
2973 data->u.mb_stats.total_16kb);
2974 nstat_set_keyval_scalar(&kv[i++],
2975 NSTAT_SYSINFO_KEY_SOCK_MBCNT,
2976 data->u.mb_stats.sbmb_total);
2977 nstat_set_keyval_scalar(&kv[i++],
2978 NSTAT_SYSINFO_KEY_SOCK_ATMBLIMIT,
2979 data->u.mb_stats.sb_atmbuflimit);
2980 nstat_set_keyval_scalar(&kv[i++],
2981 NSTAT_SYSINFO_MBUF_DRAIN_CNT,
2982 data->u.mb_stats.draincnt);
2983 nstat_set_keyval_scalar(&kv[i++],
2984 NSTAT_SYSINFO_MBUF_MEM_RELEASED,
2985 data->u.mb_stats.memreleased);
2986 nstat_set_keyval_scalar(&kv[i++],
2987 NSTAT_SYSINFO_KEY_SOCK_MBFLOOR,
2988 data->u.mb_stats.sbmb_floor);
2989 VERIFY(i == nkeyvals);
2990 break;
2991 }
2992 case NSTAT_SYSINFO_TCP_STATS:
2993 {
2994 nstat_set_keyval_scalar(&kv[i++],
2995 NSTAT_SYSINFO_KEY_IPV4_AVGRTT,
2996 data->u.tcp_stats.ipv4_avgrtt);
2997 nstat_set_keyval_scalar(&kv[i++],
2998 NSTAT_SYSINFO_KEY_IPV6_AVGRTT,
2999 data->u.tcp_stats.ipv6_avgrtt);
3000 nstat_set_keyval_scalar(&kv[i++],
3001 NSTAT_SYSINFO_KEY_SEND_PLR,
3002 data->u.tcp_stats.send_plr);
3003 nstat_set_keyval_scalar(&kv[i++],
3004 NSTAT_SYSINFO_KEY_RECV_PLR,
3005 data->u.tcp_stats.recv_plr);
3006 nstat_set_keyval_scalar(&kv[i++],
3007 NSTAT_SYSINFO_KEY_SEND_TLRTO,
3008 data->u.tcp_stats.send_tlrto_rate);
3009 nstat_set_keyval_scalar(&kv[i++],
3010 NSTAT_SYSINFO_KEY_SEND_REORDERRATE,
3011 data->u.tcp_stats.send_reorder_rate);
3012 nstat_set_keyval_scalar(&kv[i++],
3013 NSTAT_SYSINFO_CONNECTION_ATTEMPTS,
3014 data->u.tcp_stats.connection_attempts);
3015 nstat_set_keyval_scalar(&kv[i++],
3016 NSTAT_SYSINFO_CONNECTION_ACCEPTS,
3017 data->u.tcp_stats.connection_accepts);
3018 nstat_set_keyval_scalar(&kv[i++],
3019 NSTAT_SYSINFO_ECN_CLIENT_ENABLED,
3020 data->u.tcp_stats.ecn_client_enabled);
3021 nstat_set_keyval_scalar(&kv[i++],
3022 NSTAT_SYSINFO_ECN_SERVER_ENABLED,
3023 data->u.tcp_stats.ecn_server_enabled);
3024 nstat_set_keyval_scalar(&kv[i++],
3025 NSTAT_SYSINFO_ECN_CLIENT_SETUP,
3026 data->u.tcp_stats.ecn_client_setup);
3027 nstat_set_keyval_scalar(&kv[i++],
3028 NSTAT_SYSINFO_ECN_SERVER_SETUP,
3029 data->u.tcp_stats.ecn_server_setup);
3030 nstat_set_keyval_scalar(&kv[i++],
3031 NSTAT_SYSINFO_ECN_CLIENT_SUCCESS,
3032 data->u.tcp_stats.ecn_client_success);
3033 nstat_set_keyval_scalar(&kv[i++],
3034 NSTAT_SYSINFO_ECN_SERVER_SUCCESS,
3035 data->u.tcp_stats.ecn_server_success);
3036 nstat_set_keyval_scalar(&kv[i++],
3037 NSTAT_SYSINFO_ECN_NOT_SUPPORTED,
3038 data->u.tcp_stats.ecn_not_supported);
3039 nstat_set_keyval_scalar(&kv[i++],
3040 NSTAT_SYSINFO_ECN_LOST_SYN,
3041 data->u.tcp_stats.ecn_lost_syn);
3042 nstat_set_keyval_scalar(&kv[i++],
3043 NSTAT_SYSINFO_ECN_LOST_SYNACK,
3044 data->u.tcp_stats.ecn_lost_synack);
3045 nstat_set_keyval_scalar(&kv[i++],
3046 NSTAT_SYSINFO_ECN_RECV_CE,
3047 data->u.tcp_stats.ecn_recv_ce);
3048 nstat_set_keyval_scalar(&kv[i++],
3049 NSTAT_SYSINFO_ECN_RECV_ECE,
3050 data->u.tcp_stats.ecn_recv_ece);
3051 nstat_set_keyval_scalar(&kv[i++],
3052 NSTAT_SYSINFO_ECN_SENT_ECE,
3053 data->u.tcp_stats.ecn_sent_ece);
3054 nstat_set_keyval_scalar(&kv[i++],
3055 NSTAT_SYSINFO_ECN_CONN_RECV_CE,
3056 data->u.tcp_stats.ecn_conn_recv_ce);
3057 nstat_set_keyval_scalar(&kv[i++],
3058 NSTAT_SYSINFO_ECN_CONN_RECV_ECE,
3059 data->u.tcp_stats.ecn_conn_recv_ece);
3060 nstat_set_keyval_scalar(&kv[i++],
3061 NSTAT_SYSINFO_ECN_CONN_PLNOCE,
3062 data->u.tcp_stats.ecn_conn_plnoce);
3063 nstat_set_keyval_scalar(&kv[i++],
3064 NSTAT_SYSINFO_ECN_CONN_PL_CE,
3065 data->u.tcp_stats.ecn_conn_pl_ce);
3066 nstat_set_keyval_scalar(&kv[i++],
3067 NSTAT_SYSINFO_ECN_CONN_NOPL_CE,
3068 data->u.tcp_stats.ecn_conn_nopl_ce);
3069 nstat_set_keyval_scalar(&kv[i++],
3070 NSTAT_SYSINFO_ECN_FALLBACK_SYNLOSS,
3071 data->u.tcp_stats.ecn_fallback_synloss);
3072 nstat_set_keyval_scalar(&kv[i++],
3073 NSTAT_SYSINFO_ECN_FALLBACK_REORDER,
3074 data->u.tcp_stats.ecn_fallback_reorder);
3075 nstat_set_keyval_scalar(&kv[i++],
3076 NSTAT_SYSINFO_ECN_FALLBACK_CE,
3077 data->u.tcp_stats.ecn_fallback_ce);
3078 nstat_set_keyval_scalar(&kv[i++],
3079 NSTAT_SYSINFO_TFO_SYN_DATA_RCV,
3080 data->u.tcp_stats.tfo_syn_data_rcv);
3081 nstat_set_keyval_scalar(&kv[i++],
3082 NSTAT_SYSINFO_TFO_COOKIE_REQ_RCV,
3083 data->u.tcp_stats.tfo_cookie_req_rcv);
3084 nstat_set_keyval_scalar(&kv[i++],
3085 NSTAT_SYSINFO_TFO_COOKIE_SENT,
3086 data->u.tcp_stats.tfo_cookie_sent);
3087 nstat_set_keyval_scalar(&kv[i++],
3088 NSTAT_SYSINFO_TFO_COOKIE_INVALID,
3089 data->u.tcp_stats.tfo_cookie_invalid);
3090 nstat_set_keyval_scalar(&kv[i++],
3091 NSTAT_SYSINFO_TFO_COOKIE_REQ,
3092 data->u.tcp_stats.tfo_cookie_req);
3093 nstat_set_keyval_scalar(&kv[i++],
3094 NSTAT_SYSINFO_TFO_COOKIE_RCV,
3095 data->u.tcp_stats.tfo_cookie_rcv);
3096 nstat_set_keyval_scalar(&kv[i++],
3097 NSTAT_SYSINFO_TFO_SYN_DATA_SENT,
3098 data->u.tcp_stats.tfo_syn_data_sent);
3099 nstat_set_keyval_scalar(&kv[i++],
3100 NSTAT_SYSINFO_TFO_SYN_DATA_ACKED,
3101 data->u.tcp_stats.tfo_syn_data_acked);
3102 nstat_set_keyval_scalar(&kv[i++],
3103 NSTAT_SYSINFO_TFO_SYN_LOSS,
3104 data->u.tcp_stats.tfo_syn_loss);
3105 nstat_set_keyval_scalar(&kv[i++],
3106 NSTAT_SYSINFO_TFO_BLACKHOLE,
3107 data->u.tcp_stats.tfo_blackhole);
3108 nstat_set_keyval_scalar(&kv[i++],
3109 NSTAT_SYSINFO_TFO_COOKIE_WRONG,
3110 data->u.tcp_stats.tfo_cookie_wrong);
3111 nstat_set_keyval_scalar(&kv[i++],
3112 NSTAT_SYSINFO_TFO_NO_COOKIE_RCV,
3113 data->u.tcp_stats.tfo_no_cookie_rcv);
3114 nstat_set_keyval_scalar(&kv[i++],
3115 NSTAT_SYSINFO_TFO_HEURISTICS_DISABLE,
3116 data->u.tcp_stats.tfo_heuristics_disable);
3117 nstat_set_keyval_scalar(&kv[i++],
3118 NSTAT_SYSINFO_TFO_SEND_BLACKHOLE,
3119 data->u.tcp_stats.tfo_sndblackhole);
3120 VERIFY(i == nkeyvals);
3121 break;
3122 }
3123 case NSTAT_SYSINFO_IFNET_ECN_STATS:
3124 {
3125 nstat_set_keyval_scalar(&kv[i++],
3126 NSTAT_SYSINFO_ECN_IFNET_TYPE,
3127 data->u.ifnet_ecn_stats.ifnet_type);
3128 nstat_set_keyval_scalar(&kv[i++],
3129 NSTAT_SYSINFO_ECN_IFNET_PROTO,
3130 data->u.ifnet_ecn_stats.ifnet_proto);
3131 nstat_set_keyval_scalar(&kv[i++],
3132 NSTAT_SYSINFO_ECN_IFNET_CLIENT_SETUP,
3133 data->u.ifnet_ecn_stats.ecn_stat.ecn_client_setup);
3134 nstat_set_keyval_scalar(&kv[i++],
3135 NSTAT_SYSINFO_ECN_IFNET_SERVER_SETUP,
3136 data->u.ifnet_ecn_stats.ecn_stat.ecn_server_setup);
3137 nstat_set_keyval_scalar(&kv[i++],
3138 NSTAT_SYSINFO_ECN_IFNET_CLIENT_SUCCESS,
3139 data->u.ifnet_ecn_stats.ecn_stat.ecn_client_success);
3140 nstat_set_keyval_scalar(&kv[i++],
3141 NSTAT_SYSINFO_ECN_IFNET_SERVER_SUCCESS,
3142 data->u.ifnet_ecn_stats.ecn_stat.ecn_server_success);
3143 nstat_set_keyval_scalar(&kv[i++],
3144 NSTAT_SYSINFO_ECN_IFNET_PEER_NOSUPPORT,
3145 data->u.ifnet_ecn_stats.ecn_stat.ecn_peer_nosupport);
3146 nstat_set_keyval_scalar(&kv[i++],
3147 NSTAT_SYSINFO_ECN_IFNET_SYN_LOST,
3148 data->u.ifnet_ecn_stats.ecn_stat.ecn_syn_lost);
3149 nstat_set_keyval_scalar(&kv[i++],
3150 NSTAT_SYSINFO_ECN_IFNET_SYNACK_LOST,
3151 data->u.ifnet_ecn_stats.ecn_stat.ecn_synack_lost);
3152 nstat_set_keyval_scalar(&kv[i++],
3153 NSTAT_SYSINFO_ECN_IFNET_RECV_CE,
3154 data->u.ifnet_ecn_stats.ecn_stat.ecn_recv_ce);
3155 nstat_set_keyval_scalar(&kv[i++],
3156 NSTAT_SYSINFO_ECN_IFNET_RECV_ECE,
3157 data->u.ifnet_ecn_stats.ecn_stat.ecn_recv_ece);
3158 nstat_set_keyval_scalar(&kv[i++],
3159 NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_CE,
3160 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_recv_ce);
3161 nstat_set_keyval_scalar(&kv[i++],
3162 NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_ECE,
3163 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_recv_ece);
3164 nstat_set_keyval_scalar(&kv[i++],
3165 NSTAT_SYSINFO_ECN_IFNET_CONN_PLNOCE,
3166 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_plnoce);
3167 nstat_set_keyval_scalar(&kv[i++],
3168 NSTAT_SYSINFO_ECN_IFNET_CONN_PLCE,
3169 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_plce);
3170 nstat_set_keyval_scalar(&kv[i++],
3171 NSTAT_SYSINFO_ECN_IFNET_CONN_NOPLCE,
3172 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_noplce);
3173 nstat_set_keyval_scalar(&kv[i++],
3174 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNLOSS,
3175 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_synloss);
3176 nstat_set_keyval_scalar(&kv[i++],
3177 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_REORDER,
3178 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_reorder);
3179 nstat_set_keyval_scalar(&kv[i++],
3180 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_CE,
3181 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_ce);
3182 nstat_set_keyval_scalar(&kv[i++],
3183 NSTAT_SYSINFO_ECN_IFNET_ON_RTT_AVG,
3184 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rtt_avg);
3185 nstat_set_keyval_scalar(&kv[i++],
3186 NSTAT_SYSINFO_ECN_IFNET_ON_RTT_VAR,
3187 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rtt_var);
3188 nstat_set_keyval_scalar(&kv[i++],
3189 NSTAT_SYSINFO_ECN_IFNET_ON_OOPERCENT,
3190 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.oo_percent);
3191 nstat_set_keyval_scalar(&kv[i++],
3192 NSTAT_SYSINFO_ECN_IFNET_ON_SACK_EPISODE,
3193 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.sack_episodes);
3194 nstat_set_keyval_scalar(&kv[i++],
3195 NSTAT_SYSINFO_ECN_IFNET_ON_REORDER_PERCENT,
3196 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.reorder_percent);
3197 nstat_set_keyval_scalar(&kv[i++],
3198 NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_PERCENT,
3199 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rxmit_percent);
3200 nstat_set_keyval_scalar(&kv[i++],
3201 NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_DROP,
3202 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rxmit_drop);
3203 nstat_set_keyval_scalar(&kv[i++],
3204 NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_AVG,
3205 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rtt_avg);
3206 nstat_set_keyval_scalar(&kv[i++],
3207 NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_VAR,
3208 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rtt_var);
3209 nstat_set_keyval_scalar(&kv[i++],
3210 NSTAT_SYSINFO_ECN_IFNET_OFF_OOPERCENT,
3211 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.oo_percent);
3212 nstat_set_keyval_scalar(&kv[i++],
3213 NSTAT_SYSINFO_ECN_IFNET_OFF_SACK_EPISODE,
3214 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.sack_episodes);
3215 nstat_set_keyval_scalar(&kv[i++],
3216 NSTAT_SYSINFO_ECN_IFNET_OFF_REORDER_PERCENT,
3217 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.reorder_percent);
3218 nstat_set_keyval_scalar(&kv[i++],
3219 NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_PERCENT,
3220 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rxmit_percent);
3221 nstat_set_keyval_scalar(&kv[i++],
3222 NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_DROP,
3223 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rxmit_drop);
3224 nstat_set_keyval_scalar(&kv[i++],
3225 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_TXPKTS,
3226 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_txpkts);
3227 nstat_set_keyval_scalar(&kv[i++],
3228 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXMTPKTS,
3229 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_rxmitpkts);
3230 nstat_set_keyval_scalar(&kv[i++],
3231 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXPKTS,
3232 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_rxpkts);
3233 nstat_set_keyval_scalar(&kv[i++],
3234 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_OOPKTS,
3235 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_oopkts);
3236 nstat_set_keyval_scalar(&kv[i++],
3237 NSTAT_SYSINFO_ECN_IFNET_ON_DROP_RST,
3238 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rst_drop);
3239 nstat_set_keyval_scalar(&kv[i++],
3240 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_TXPKTS,
3241 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_txpkts);
3242 nstat_set_keyval_scalar(&kv[i++],
3243 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXMTPKTS,
3244 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_rxmitpkts);
3245 nstat_set_keyval_scalar(&kv[i++],
3246 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXPKTS,
3247 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_rxpkts);
3248 nstat_set_keyval_scalar(&kv[i++],
3249 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_OOPKTS,
3250 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_oopkts);
3251 nstat_set_keyval_scalar(&kv[i++],
3252 NSTAT_SYSINFO_ECN_IFNET_OFF_DROP_RST,
3253 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rst_drop);
3254 nstat_set_keyval_scalar(&kv[i++],
3255 NSTAT_SYSINFO_ECN_IFNET_TOTAL_CONN,
3256 data->u.ifnet_ecn_stats.ecn_stat.ecn_total_conn);
3257 nstat_set_keyval_scalar(&kv[i++],
3258 NSTAT_SYSINFO_IFNET_UNSENT_DATA,
3259 data->unsent_data_cnt);
3260 nstat_set_keyval_scalar(&kv[i++],
3261 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRST,
3262 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_droprst);
3263 nstat_set_keyval_scalar(&kv[i++],
3264 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRXMT,
3265 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_droprxmt);
3266 break;
3267 }
3268 }
3269 if (syscnt != NULL)
3270 {
3271 VERIFY(i > 0 && i <= nkeyvals);
3272 countsize = offsetof(nstat_sysinfo_counts,
3273 nstat_sysinfo_keyvals) +
3274 sizeof(nstat_sysinfo_keyval) * i;
3275 finalsize += countsize;
3276 syscnt->hdr.type = NSTAT_MSG_TYPE_SYSINFO_COUNTS;
3277 syscnt->hdr.length = finalsize;
3278 syscnt->counts.nstat_sysinfo_len = countsize;
3279
3280 result = ctl_enqueuedata(control->ncs_kctl,
3281 control->ncs_unit, syscnt, finalsize, CTL_DATA_EOR);
3282 if (result != 0)
3283 {
3284 nstat_stats.nstat_sysinfofailures += 1;
3285 }
3286 OSFree(syscnt, allocsize, nstat_malloc_tag);
3287 }
3288 return;
3289 }
3290
3291 __private_extern__ void
3292 nstat_sysinfo_send_data(
3293 nstat_sysinfo_data *data)
3294 {
3295 nstat_control_state *control;
3296
3297 lck_mtx_lock(&nstat_mtx);
3298 for (control = nstat_controls; control; control = control->ncs_next)
3299 {
3300 lck_mtx_lock(&control->mtx);
3301 if ((control->ncs_flags & NSTAT_FLAG_SYSINFO_SUBSCRIBED) != 0)
3302 {
3303 nstat_sysinfo_send_data_internal(control, data);
3304 }
3305 lck_mtx_unlock(&control->mtx);
3306 }
3307 lck_mtx_unlock(&nstat_mtx);
3308 }
3309
3310 static void
3311 nstat_sysinfo_generate_report(void)
3312 {
3313 mbuf_report_peak_usage();
3314 tcp_report_stats();
3315 nstat_ifnet_report_ecn_stats();
3316 }
3317
3318 #pragma mark -- Kernel Control Socket --
3319
3320 static kern_ctl_ref nstat_ctlref = NULL;
3321 static lck_grp_t *nstat_lck_grp = NULL;
3322
3323 static errno_t nstat_control_connect(kern_ctl_ref kctl, struct sockaddr_ctl *sac, void **uinfo);
3324 static errno_t nstat_control_disconnect(kern_ctl_ref kctl, u_int32_t unit, void *uinfo);
3325 static errno_t nstat_control_send(kern_ctl_ref kctl, u_int32_t unit, void *uinfo, mbuf_t m, int flags);
3326
3327 static errno_t
3328 nstat_enqueue_success(
3329 uint64_t context,
3330 nstat_control_state *state,
3331 u_int16_t flags)
3332 {
3333 nstat_msg_hdr success;
3334 errno_t result;
3335
3336 bzero(&success, sizeof(success));
3337 success.context = context;
3338 success.type = NSTAT_MSG_TYPE_SUCCESS;
3339 success.length = sizeof(success);
3340 success.flags = flags;
3341 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &success,
3342 sizeof(success), CTL_DATA_EOR | CTL_DATA_CRIT);
3343 if (result != 0) {
3344 if (nstat_debug != 0)
3345 printf("%s: could not enqueue success message %d\n",
3346 __func__, result);
3347 nstat_stats.nstat_successmsgfailures += 1;
3348 }
3349 return result;
3350 }
3351
3352 static errno_t
3353 nstat_control_send_goodbye(
3354 nstat_control_state *state,
3355 nstat_src *src)
3356 {
3357 errno_t result = 0;
3358 int failed = 0;
3359
3360 if (nstat_control_reporting_allowed(state, src))
3361 {
3362 if ((state->ncs_flags & NSTAT_FLAG_SUPPORTS_UPDATES) != 0)
3363 {
3364 result = nstat_control_send_update(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING, NULL);
3365 if (result != 0)
3366 {
3367 failed = 1;
3368 if (nstat_debug != 0)
3369 printf("%s - nstat_control_send_update() %d\n", __func__, result);
3370 }
3371 }
3372 else
3373 {
3374 // send one last counts notification
3375 result = nstat_control_send_counts(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING, NULL);
3376 if (result != 0)
3377 {
3378 failed = 1;
3379 if (nstat_debug != 0)
3380 printf("%s - nstat_control_send_counts() %d\n", __func__, result);
3381 }
3382
3383 // send a last description
3384 result = nstat_control_send_description(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING);
3385 if (result != 0)
3386 {
3387 failed = 1;
3388 if (nstat_debug != 0)
3389 printf("%s - nstat_control_send_description() %d\n", __func__, result);
3390 }
3391 }
3392 }
3393
3394 // send the source removed notification
3395 result = nstat_control_send_removed(state, src);
3396 if (result != 0 && nstat_debug)
3397 {
3398 failed = 1;
3399 if (nstat_debug != 0)
3400 printf("%s - nstat_control_send_removed() %d\n", __func__, result);
3401 }
3402
3403 if (failed != 0)
3404 nstat_stats.nstat_control_send_goodbye_failures++;
3405
3406
3407 return result;
3408 }
3409
3410 static errno_t
3411 nstat_flush_accumulated_msgs(
3412 nstat_control_state *state)
3413 {
3414 errno_t result = 0;
3415 if (state->ncs_accumulated != NULL && mbuf_len(state->ncs_accumulated) > 0)
3416 {
3417 mbuf_pkthdr_setlen(state->ncs_accumulated, mbuf_len(state->ncs_accumulated));
3418 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, state->ncs_accumulated, CTL_DATA_EOR);
3419 if (result != 0)
3420 {
3421 nstat_stats.nstat_flush_accumulated_msgs_failures++;
3422 if (nstat_debug != 0)
3423 printf("%s - ctl_enqueuembuf failed: %d\n", __func__, result);
3424 mbuf_freem(state->ncs_accumulated);
3425 }
3426 state->ncs_accumulated = NULL;
3427 }
3428 return result;
3429 }
3430
3431 static errno_t
3432 nstat_accumulate_msg(
3433 nstat_control_state *state,
3434 nstat_msg_hdr *hdr,
3435 size_t length)
3436 {
3437 if (state->ncs_accumulated && mbuf_trailingspace(state->ncs_accumulated) < length)
3438 {
3439 // Will send the current mbuf
3440 nstat_flush_accumulated_msgs(state);
3441 }
3442
3443 errno_t result = 0;
3444
3445 if (state->ncs_accumulated == NULL)
3446 {
3447 unsigned int one = 1;
3448 if (mbuf_allocpacket(MBUF_DONTWAIT, NSTAT_MAX_MSG_SIZE, &one, &state->ncs_accumulated) != 0)
3449 {
3450 if (nstat_debug != 0)
3451 printf("%s - mbuf_allocpacket failed\n", __func__);
3452 result = ENOMEM;
3453 }
3454 else
3455 {
3456 mbuf_setlen(state->ncs_accumulated, 0);
3457 }
3458 }
3459
3460 if (result == 0)
3461 {
3462 hdr->length = length;
3463 result = mbuf_copyback(state->ncs_accumulated, mbuf_len(state->ncs_accumulated),
3464 length, hdr, MBUF_DONTWAIT);
3465 }
3466
3467 if (result != 0)
3468 {
3469 nstat_flush_accumulated_msgs(state);
3470 if (nstat_debug != 0)
3471 printf("%s - resorting to ctl_enqueuedata\n", __func__);
3472 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, hdr, length, CTL_DATA_EOR);
3473 }
3474
3475 if (result != 0)
3476 nstat_stats.nstat_accumulate_msg_failures++;
3477
3478 return result;
3479 }
3480
3481 static void*
3482 nstat_idle_check(
3483 __unused thread_call_param_t p0,
3484 __unused thread_call_param_t p1)
3485 {
3486 lck_mtx_lock(&nstat_mtx);
3487
3488 nstat_idle_time = 0;
3489
3490 nstat_control_state *control;
3491 nstat_src *dead = NULL;
3492 nstat_src *dead_list = NULL;
3493 for (control = nstat_controls; control; control = control->ncs_next)
3494 {
3495 lck_mtx_lock(&control->mtx);
3496 nstat_src **srcpp = &control->ncs_srcs;
3497
3498 if (!(control->ncs_flags & NSTAT_FLAG_REQCOUNTS))
3499 {
3500 while(*srcpp != NULL)
3501 {
3502 if ((*srcpp)->provider->nstat_gone((*srcpp)->cookie))
3503 {
3504 errno_t result;
3505
3506 // Pull it off the list
3507 dead = *srcpp;
3508 *srcpp = (*srcpp)->next;
3509
3510 result = nstat_control_send_goodbye(control, dead);
3511
3512 // Put this on the list to release later
3513 dead->next = dead_list;
3514 dead_list = dead;
3515 }
3516 else
3517 {
3518 srcpp = &(*srcpp)->next;
3519 }
3520 }
3521 }
3522 control->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
3523 lck_mtx_unlock(&control->mtx);
3524 }
3525
3526 if (nstat_controls)
3527 {
3528 clock_interval_to_deadline(60, NSEC_PER_SEC, &nstat_idle_time);
3529 thread_call_func_delayed((thread_call_func_t)nstat_idle_check, NULL, nstat_idle_time);
3530 }
3531
3532 lck_mtx_unlock(&nstat_mtx);
3533
3534 /* Generate any system level reports, if needed */
3535 nstat_sysinfo_generate_report();
3536
3537 // Release the sources now that we aren't holding lots of locks
3538 while (dead_list)
3539 {
3540 dead = dead_list;
3541 dead_list = dead->next;
3542
3543 nstat_control_cleanup_source(NULL, dead, FALSE);
3544 }
3545
3546 return NULL;
3547 }
3548
3549 static void
3550 nstat_control_register(void)
3551 {
3552 // Create our lock group first
3553 lck_grp_attr_t *grp_attr = lck_grp_attr_alloc_init();
3554 lck_grp_attr_setdefault(grp_attr);
3555 nstat_lck_grp = lck_grp_alloc_init("network statistics kctl", grp_attr);
3556 lck_grp_attr_free(grp_attr);
3557
3558 lck_mtx_init(&nstat_mtx, nstat_lck_grp, NULL);
3559
3560 // Register the control
3561 struct kern_ctl_reg nstat_control;
3562 bzero(&nstat_control, sizeof(nstat_control));
3563 strlcpy(nstat_control.ctl_name, NET_STAT_CONTROL_NAME, sizeof(nstat_control.ctl_name));
3564 nstat_control.ctl_flags = CTL_FLAG_REG_EXTENDED | CTL_FLAG_REG_CRIT;
3565 nstat_control.ctl_sendsize = nstat_sendspace;
3566 nstat_control.ctl_recvsize = nstat_recvspace;
3567 nstat_control.ctl_connect = nstat_control_connect;
3568 nstat_control.ctl_disconnect = nstat_control_disconnect;
3569 nstat_control.ctl_send = nstat_control_send;
3570
3571 ctl_register(&nstat_control, &nstat_ctlref);
3572 }
3573
3574 static void
3575 nstat_control_cleanup_source(
3576 nstat_control_state *state,
3577 struct nstat_src *src,
3578 boolean_t locked)
3579 {
3580 errno_t result;
3581
3582 if (state)
3583 {
3584 result = nstat_control_send_removed(state, src);
3585 if (result != 0)
3586 {
3587 nstat_stats.nstat_control_cleanup_source_failures++;
3588 if (nstat_debug != 0)
3589 printf("%s - nstat_control_send_removed() %d\n",
3590 __func__, result);
3591 }
3592 }
3593 // Cleanup the source if we found it.
3594 src->provider->nstat_release(src->cookie, locked);
3595 OSFree(src, sizeof(*src), nstat_malloc_tag);
3596 }
3597
3598
3599 static bool
3600 nstat_control_reporting_allowed(
3601 nstat_control_state *state,
3602 nstat_src *src)
3603 {
3604 if (src->provider->nstat_reporting_allowed == NULL)
3605 return TRUE;
3606
3607 return (
3608 src->provider->nstat_reporting_allowed(src->cookie,
3609 &state->ncs_provider_filters[src->provider->nstat_provider_id])
3610 );
3611 }
3612
3613
3614 static errno_t
3615 nstat_control_connect(
3616 kern_ctl_ref kctl,
3617 struct sockaddr_ctl *sac,
3618 void **uinfo)
3619 {
3620 nstat_control_state *state = OSMalloc(sizeof(*state), nstat_malloc_tag);
3621 if (state == NULL) return ENOMEM;
3622
3623 bzero(state, sizeof(*state));
3624 lck_mtx_init(&state->mtx, nstat_lck_grp, NULL);
3625 state->ncs_kctl = kctl;
3626 state->ncs_unit = sac->sc_unit;
3627 state->ncs_flags = NSTAT_FLAG_REQCOUNTS;
3628 *uinfo = state;
3629
3630 lck_mtx_lock(&nstat_mtx);
3631 state->ncs_next = nstat_controls;
3632 nstat_controls = state;
3633
3634 if (nstat_idle_time == 0)
3635 {
3636 clock_interval_to_deadline(60, NSEC_PER_SEC, &nstat_idle_time);
3637 thread_call_func_delayed((thread_call_func_t)nstat_idle_check, NULL, nstat_idle_time);
3638 }
3639
3640 lck_mtx_unlock(&nstat_mtx);
3641
3642 return 0;
3643 }
3644
3645 static errno_t
3646 nstat_control_disconnect(
3647 __unused kern_ctl_ref kctl,
3648 __unused u_int32_t unit,
3649 void *uinfo)
3650 {
3651 u_int32_t watching;
3652 nstat_control_state *state = (nstat_control_state*)uinfo;
3653
3654 // pull it out of the global list of states
3655 lck_mtx_lock(&nstat_mtx);
3656 nstat_control_state **statepp;
3657 for (statepp = &nstat_controls; *statepp; statepp = &(*statepp)->ncs_next)
3658 {
3659 if (*statepp == state)
3660 {
3661 *statepp = state->ncs_next;
3662 break;
3663 }
3664 }
3665 lck_mtx_unlock(&nstat_mtx);
3666
3667 lck_mtx_lock(&state->mtx);
3668 // Stop watching for sources
3669 nstat_provider *provider;
3670 watching = state->ncs_watching;
3671 state->ncs_watching = 0;
3672 for (provider = nstat_providers; provider && watching; provider = provider->next)
3673 {
3674 if ((watching & (1 << provider->nstat_provider_id)) != 0)
3675 {
3676 watching &= ~(1 << provider->nstat_provider_id);
3677 provider->nstat_watcher_remove(state);
3678 }
3679 }
3680
3681 // set cleanup flags
3682 state->ncs_flags |= NSTAT_FLAG_CLEANUP;
3683
3684 if (state->ncs_accumulated)
3685 {
3686 mbuf_freem(state->ncs_accumulated);
3687 state->ncs_accumulated = NULL;
3688 }
3689
3690 // Copy out the list of sources
3691 nstat_src *srcs = state->ncs_srcs;
3692 state->ncs_srcs = NULL;
3693 lck_mtx_unlock(&state->mtx);
3694
3695 while (srcs)
3696 {
3697 nstat_src *src;
3698
3699 // pull it out of the list
3700 src = srcs;
3701 srcs = src->next;
3702
3703 // clean it up
3704 nstat_control_cleanup_source(NULL, src, FALSE);
3705 }
3706 lck_mtx_destroy(&state->mtx, nstat_lck_grp);
3707 OSFree(state, sizeof(*state), nstat_malloc_tag);
3708
3709 return 0;
3710 }
3711
3712 static nstat_src_ref_t
3713 nstat_control_next_src_ref(
3714 nstat_control_state *state)
3715 {
3716 return ++state->ncs_next_srcref;
3717 }
3718
3719 static errno_t
3720 nstat_control_send_counts(
3721 nstat_control_state *state,
3722 nstat_src *src,
3723 unsigned long long context,
3724 u_int16_t hdr_flags,
3725 int *gone)
3726 {
3727 nstat_msg_src_counts counts;
3728 errno_t result = 0;
3729
3730 /* Some providers may not have any counts to send */
3731 if (src->provider->nstat_counts == NULL)
3732 return (0);
3733
3734 bzero(&counts, sizeof(counts));
3735 counts.hdr.type = NSTAT_MSG_TYPE_SRC_COUNTS;
3736 counts.hdr.length = sizeof(counts);
3737 counts.hdr.flags = hdr_flags;
3738 counts.hdr.context = context;
3739 counts.srcref = src->srcref;
3740 counts.event_flags = 0;
3741
3742 if (src->provider->nstat_counts(src->cookie, &counts.counts, gone) == 0)
3743 {
3744 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) &&
3745 counts.counts.nstat_rxbytes == 0 &&
3746 counts.counts.nstat_txbytes == 0)
3747 {
3748 result = EAGAIN;
3749 }
3750 else
3751 {
3752 result = ctl_enqueuedata(state->ncs_kctl,
3753 state->ncs_unit, &counts, sizeof(counts),
3754 CTL_DATA_EOR);
3755 if (result != 0)
3756 nstat_stats.nstat_sendcountfailures += 1;
3757 }
3758 }
3759 return result;
3760 }
3761
3762 static errno_t
3763 nstat_control_append_counts(
3764 nstat_control_state *state,
3765 nstat_src *src,
3766 int *gone)
3767 {
3768 /* Some providers may not have any counts to send */
3769 if (!src->provider->nstat_counts) return 0;
3770
3771 nstat_msg_src_counts counts;
3772 bzero(&counts, sizeof(counts));
3773 counts.hdr.type = NSTAT_MSG_TYPE_SRC_COUNTS;
3774 counts.hdr.length = sizeof(counts);
3775 counts.srcref = src->srcref;
3776 counts.event_flags = 0;
3777
3778 errno_t result = 0;
3779 result = src->provider->nstat_counts(src->cookie, &counts.counts, gone);
3780 if (result != 0)
3781 {
3782 return result;
3783 }
3784
3785 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
3786 counts.counts.nstat_rxbytes == 0 && counts.counts.nstat_txbytes == 0)
3787 {
3788 return EAGAIN;
3789 }
3790
3791 return nstat_accumulate_msg(state, &counts.hdr, counts.hdr.length);
3792 }
3793
3794 static int
3795 nstat_control_send_description(
3796 nstat_control_state *state,
3797 nstat_src *src,
3798 u_int64_t context,
3799 u_int16_t hdr_flags)
3800 {
3801 // Provider doesn't support getting the descriptor? Done.
3802 if (src->provider->nstat_descriptor_length == 0 ||
3803 src->provider->nstat_copy_descriptor == NULL)
3804 {
3805 return EOPNOTSUPP;
3806 }
3807
3808 // Allocate storage for the descriptor message
3809 mbuf_t msg;
3810 unsigned int one = 1;
3811 u_int32_t size = offsetof(nstat_msg_src_description, data) + src->provider->nstat_descriptor_length;
3812 if (mbuf_allocpacket(MBUF_DONTWAIT, size, &one, &msg) != 0)
3813 {
3814 return ENOMEM;
3815 }
3816
3817 nstat_msg_src_description *desc = (nstat_msg_src_description*)mbuf_data(msg);
3818 bzero(desc, size);
3819 mbuf_setlen(msg, size);
3820 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
3821
3822 // Query the provider for the provider specific bits
3823 errno_t result = src->provider->nstat_copy_descriptor(src->cookie, desc->data, src->provider->nstat_descriptor_length);
3824
3825 if (result != 0)
3826 {
3827 mbuf_freem(msg);
3828 return result;
3829 }
3830
3831 desc->hdr.context = context;
3832 desc->hdr.type = NSTAT_MSG_TYPE_SRC_DESC;
3833 desc->hdr.length = size;
3834 desc->hdr.flags = hdr_flags;
3835 desc->srcref = src->srcref;
3836 desc->event_flags = 0;
3837 desc->provider = src->provider->nstat_provider_id;
3838
3839 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg, CTL_DATA_EOR);
3840 if (result != 0)
3841 {
3842 nstat_stats.nstat_descriptionfailures += 1;
3843 mbuf_freem(msg);
3844 }
3845
3846 return result;
3847 }
3848
3849 static errno_t
3850 nstat_control_append_description(
3851 nstat_control_state *state,
3852 nstat_src *src)
3853 {
3854 size_t size = offsetof(nstat_msg_src_description, data) + src->provider->nstat_descriptor_length;
3855 if (size > 512 || src->provider->nstat_descriptor_length == 0 ||
3856 src->provider->nstat_copy_descriptor == NULL)
3857 {
3858 return EOPNOTSUPP;
3859 }
3860
3861 // Fill out a buffer on the stack, we will copy to the mbuf later
3862 u_int64_t buffer[size/sizeof(u_int64_t) + 1]; // u_int64_t to ensure alignment
3863 bzero(buffer, size);
3864
3865 nstat_msg_src_description *desc = (nstat_msg_src_description*)buffer;
3866 desc->hdr.type = NSTAT_MSG_TYPE_SRC_DESC;
3867 desc->hdr.length = size;
3868 desc->srcref = src->srcref;
3869 desc->event_flags = 0;
3870 desc->provider = src->provider->nstat_provider_id;
3871
3872 errno_t result = 0;
3873 // Fill in the description
3874 // Query the provider for the provider specific bits
3875 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
3876 src->provider->nstat_descriptor_length);
3877 if (result != 0)
3878 {
3879 return result;
3880 }
3881
3882 return nstat_accumulate_msg(state, &desc->hdr, size);
3883 }
3884
3885 static int
3886 nstat_control_send_update(
3887 nstat_control_state *state,
3888 nstat_src *src,
3889 u_int64_t context,
3890 u_int16_t hdr_flags,
3891 int *gone)
3892 {
3893 // Provider doesn't support getting the descriptor or counts? Done.
3894 if ((src->provider->nstat_descriptor_length == 0 ||
3895 src->provider->nstat_copy_descriptor == NULL) &&
3896 src->provider->nstat_counts == NULL)
3897 {
3898 return EOPNOTSUPP;
3899 }
3900
3901 // Allocate storage for the descriptor message
3902 mbuf_t msg;
3903 unsigned int one = 1;
3904 u_int32_t size = offsetof(nstat_msg_src_update, data) +
3905 src->provider->nstat_descriptor_length;
3906 if (mbuf_allocpacket(MBUF_DONTWAIT, size, &one, &msg) != 0)
3907 {
3908 return ENOMEM;
3909 }
3910
3911 nstat_msg_src_update *desc = (nstat_msg_src_update*)mbuf_data(msg);
3912 bzero(desc, size);
3913 desc->hdr.context = context;
3914 desc->hdr.type = NSTAT_MSG_TYPE_SRC_UPDATE;
3915 desc->hdr.length = size;
3916 desc->hdr.flags = hdr_flags;
3917 desc->srcref = src->srcref;
3918 desc->event_flags = 0;
3919 desc->provider = src->provider->nstat_provider_id;
3920
3921 mbuf_setlen(msg, size);
3922 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
3923
3924 errno_t result = 0;
3925 if (src->provider->nstat_descriptor_length != 0 && src->provider->nstat_copy_descriptor)
3926 {
3927 // Query the provider for the provider specific bits
3928 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
3929 src->provider->nstat_descriptor_length);
3930 if (result != 0)
3931 {
3932 mbuf_freem(msg);
3933 return result;
3934 }
3935 }
3936
3937 if (src->provider->nstat_counts)
3938 {
3939 result = src->provider->nstat_counts(src->cookie, &desc->counts, gone);
3940 if (result == 0)
3941 {
3942 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
3943 desc->counts.nstat_rxbytes == 0 && desc->counts.nstat_txbytes == 0)
3944 {
3945 result = EAGAIN;
3946 }
3947 else
3948 {
3949 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg, CTL_DATA_EOR);
3950 }
3951 }
3952 }
3953
3954 if (result != 0)
3955 {
3956 nstat_stats.nstat_srcupatefailures += 1;
3957 mbuf_freem(msg);
3958 }
3959
3960 return result;
3961 }
3962
3963 static errno_t
3964 nstat_control_append_update(
3965 nstat_control_state *state,
3966 nstat_src *src,
3967 int *gone)
3968 {
3969 size_t size = offsetof(nstat_msg_src_update, data) + src->provider->nstat_descriptor_length;
3970 if (size > 512 || ((src->provider->nstat_descriptor_length == 0 ||
3971 src->provider->nstat_copy_descriptor == NULL) &&
3972 src->provider->nstat_counts == NULL))
3973 {
3974 return EOPNOTSUPP;
3975 }
3976
3977 // Fill out a buffer on the stack, we will copy to the mbuf later
3978 u_int64_t buffer[size/sizeof(u_int64_t) + 1]; // u_int64_t to ensure alignment
3979 bzero(buffer, size);
3980
3981 nstat_msg_src_update *desc = (nstat_msg_src_update*)buffer;
3982 desc->hdr.type = NSTAT_MSG_TYPE_SRC_UPDATE;
3983 desc->hdr.length = size;
3984 desc->srcref = src->srcref;
3985 desc->event_flags = 0;
3986 desc->provider = src->provider->nstat_provider_id;
3987
3988 errno_t result = 0;
3989 // Fill in the description
3990 if (src->provider->nstat_descriptor_length != 0 && src->provider->nstat_copy_descriptor)
3991 {
3992 // Query the provider for the provider specific bits
3993 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
3994 src->provider->nstat_descriptor_length);
3995 if (result != 0)
3996 {
3997 nstat_stats.nstat_copy_descriptor_failures++;
3998 if (nstat_debug != 0)
3999 printf("%s: src->provider->nstat_copy_descriptor: %d\n", __func__, result);
4000 return result;
4001 }
4002 }
4003
4004 if (src->provider->nstat_counts)
4005 {
4006 result = src->provider->nstat_counts(src->cookie, &desc->counts, gone);
4007 if (result != 0)
4008 {
4009 nstat_stats.nstat_provider_counts_failures++;
4010 if (nstat_debug != 0)
4011 printf("%s: src->provider->nstat_counts: %d\n", __func__, result);
4012 return result;
4013 }
4014
4015 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
4016 desc->counts.nstat_rxbytes == 0 && desc->counts.nstat_txbytes == 0)
4017 {
4018 return EAGAIN;
4019 }
4020 }
4021
4022 return nstat_accumulate_msg(state, &desc->hdr, size);
4023 }
4024
4025 static errno_t
4026 nstat_control_send_removed(
4027 nstat_control_state *state,
4028 nstat_src *src)
4029 {
4030 nstat_msg_src_removed removed;
4031 errno_t result;
4032
4033 bzero(&removed, sizeof(removed));
4034 removed.hdr.type = NSTAT_MSG_TYPE_SRC_REMOVED;
4035 removed.hdr.length = sizeof(removed);
4036 removed.hdr.context = 0;
4037 removed.srcref = src->srcref;
4038 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &removed,
4039 sizeof(removed), CTL_DATA_EOR | CTL_DATA_CRIT);
4040 if (result != 0)
4041 nstat_stats.nstat_msgremovedfailures += 1;
4042
4043 return result;
4044 }
4045
4046 static errno_t
4047 nstat_control_handle_add_request(
4048 nstat_control_state *state,
4049 mbuf_t m)
4050 {
4051 errno_t result;
4052
4053 // Verify the header fits in the first mbuf
4054 if (mbuf_len(m) < offsetof(nstat_msg_add_src_req, param))
4055 {
4056 return EINVAL;
4057 }
4058
4059 // Calculate the length of the parameter field
4060 int32_t paramlength = mbuf_pkthdr_len(m) - offsetof(nstat_msg_add_src_req, param);
4061 if (paramlength < 0 || paramlength > 2 * 1024)
4062 {
4063 return EINVAL;
4064 }
4065
4066 nstat_provider *provider;
4067 nstat_provider_cookie_t cookie;
4068 nstat_msg_add_src_req *req = mbuf_data(m);
4069 if (mbuf_pkthdr_len(m) > mbuf_len(m))
4070 {
4071 // parameter is too large, we need to make a contiguous copy
4072 void *data = OSMalloc(paramlength, nstat_malloc_tag);
4073
4074 if (!data) return ENOMEM;
4075 result = mbuf_copydata(m, offsetof(nstat_msg_add_src_req, param), paramlength, data);
4076 if (result == 0)
4077 result = nstat_lookup_entry(req->provider, data, paramlength, &provider, &cookie);
4078 OSFree(data, paramlength, nstat_malloc_tag);
4079 }
4080 else
4081 {
4082 result = nstat_lookup_entry(req->provider, (void*)&req->param, paramlength, &provider, &cookie);
4083 }
4084
4085 if (result != 0)
4086 {
4087 return result;
4088 }
4089
4090 result = nstat_control_source_add(req->hdr.context, state, provider, cookie);
4091 if (result != 0)
4092 provider->nstat_release(cookie, 0);
4093
4094 return result;
4095 }
4096
4097 static errno_t
4098 nstat_control_handle_add_all(
4099 nstat_control_state *state,
4100 mbuf_t m)
4101 {
4102 errno_t result = 0;
4103
4104 // Verify the header fits in the first mbuf
4105 if (mbuf_len(m) < sizeof(nstat_msg_add_all_srcs))
4106 {
4107 return EINVAL;
4108 }
4109
4110
4111 nstat_msg_add_all_srcs *req = mbuf_data(m);
4112 if (req->provider > NSTAT_PROVIDER_LAST) return ENOENT;
4113
4114 nstat_provider *provider = nstat_find_provider_by_id(req->provider);
4115
4116 if (!provider) return ENOENT;
4117 if (provider->nstat_watcher_add == NULL) return ENOTSUP;
4118
4119 if (nstat_privcheck != 0) {
4120 result = priv_check_cred(kauth_cred_get(),
4121 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
4122 if (result != 0)
4123 return result;
4124 }
4125
4126 // Make sure we don't add the provider twice
4127 lck_mtx_lock(&state->mtx);
4128 if ((state->ncs_watching & (1 << provider->nstat_provider_id)) != 0)
4129 result = EALREADY;
4130 state->ncs_watching |= (1 << provider->nstat_provider_id);
4131 lck_mtx_unlock(&state->mtx);
4132 if (result != 0) return result;
4133
4134 state->ncs_provider_filters[req->provider].npf_flags = req->filter;
4135 state->ncs_provider_filters[req->provider].npf_events = req->events;
4136 state->ncs_provider_filters[req->provider].npf_pid = req->target_pid;
4137 memcpy(state->ncs_provider_filters[req->provider].npf_uuid, req->target_uuid,
4138 sizeof(state->ncs_provider_filters[req->provider].npf_uuid));
4139
4140 result = provider->nstat_watcher_add(state);
4141 if (result != 0)
4142 {
4143 state->ncs_provider_filters[req->provider].npf_flags = 0;
4144 state->ncs_provider_filters[req->provider].npf_events = 0;
4145 state->ncs_provider_filters[req->provider].npf_pid = 0;
4146 bzero(state->ncs_provider_filters[req->provider].npf_uuid,
4147 sizeof(state->ncs_provider_filters[req->provider].npf_uuid));
4148
4149 lck_mtx_lock(&state->mtx);
4150 state->ncs_watching &= ~(1 << provider->nstat_provider_id);
4151 lck_mtx_unlock(&state->mtx);
4152 }
4153 if (result == 0)
4154 nstat_enqueue_success(req->hdr.context, state, 0);
4155
4156 return result;
4157 }
4158
4159 static errno_t
4160 nstat_control_source_add(
4161 u_int64_t context,
4162 nstat_control_state *state,
4163 nstat_provider *provider,
4164 nstat_provider_cookie_t cookie)
4165 {
4166 // Fill out source added message if appropriate
4167 mbuf_t msg = NULL;
4168 nstat_src_ref_t *srcrefp = NULL;
4169
4170 u_int64_t provider_filter_flagss =
4171 state->ncs_provider_filters[provider->nstat_provider_id].npf_flags;
4172 boolean_t tell_user =
4173 ((provider_filter_flagss & NSTAT_FILTER_SUPPRESS_SRC_ADDED) == 0);
4174 u_int32_t src_filter =
4175 (provider_filter_flagss & NSTAT_FILTER_PROVIDER_NOZEROBYTES)
4176 ? NSTAT_FILTER_NOZEROBYTES : 0;
4177
4178 if (tell_user)
4179 {
4180 unsigned int one = 1;
4181
4182 if (mbuf_allocpacket(MBUF_DONTWAIT, sizeof(nstat_msg_src_added),
4183 &one, &msg) != 0)
4184 return ENOMEM;
4185
4186 mbuf_setlen(msg, sizeof(nstat_msg_src_added));
4187 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
4188 nstat_msg_src_added *add = mbuf_data(msg);
4189 bzero(add, sizeof(*add));
4190 add->hdr.type = NSTAT_MSG_TYPE_SRC_ADDED;
4191 add->hdr.length = mbuf_len(msg);
4192 add->hdr.context = context;
4193 add->provider = provider->nstat_provider_id;
4194 srcrefp = &add->srcref;
4195 }
4196
4197 // Allocate storage for the source
4198 nstat_src *src = OSMalloc(sizeof(*src), nstat_malloc_tag);
4199 if (src == NULL)
4200 {
4201 if (msg) mbuf_freem(msg);
4202 return ENOMEM;
4203 }
4204
4205 // Fill in the source, including picking an unused source ref
4206 lck_mtx_lock(&state->mtx);
4207
4208 src->srcref = nstat_control_next_src_ref(state);
4209 if (srcrefp)
4210 *srcrefp = src->srcref;
4211
4212 if (state->ncs_flags & NSTAT_FLAG_CLEANUP || src->srcref == NSTAT_SRC_REF_INVALID)
4213 {
4214 lck_mtx_unlock(&state->mtx);
4215 OSFree(src, sizeof(*src), nstat_malloc_tag);
4216 if (msg) mbuf_freem(msg);
4217 return EINVAL;
4218 }
4219 src->provider = provider;
4220 src->cookie = cookie;
4221 src->filter = src_filter;
4222
4223 if (msg)
4224 {
4225 // send the source added message if appropriate
4226 errno_t result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg,
4227 CTL_DATA_EOR);
4228 if (result != 0)
4229 {
4230 nstat_stats.nstat_srcaddedfailures += 1;
4231 lck_mtx_unlock(&state->mtx);
4232 OSFree(src, sizeof(*src), nstat_malloc_tag);
4233 mbuf_freem(msg);
4234 return result;
4235 }
4236 }
4237 // Put the source in the list
4238 src->next = state->ncs_srcs;
4239 state->ncs_srcs = src;
4240
4241 lck_mtx_unlock(&state->mtx);
4242
4243 return 0;
4244 }
4245
4246 static errno_t
4247 nstat_control_handle_remove_request(
4248 nstat_control_state *state,
4249 mbuf_t m)
4250 {
4251 nstat_src_ref_t srcref = NSTAT_SRC_REF_INVALID;
4252
4253 if (mbuf_copydata(m, offsetof(nstat_msg_rem_src_req, srcref), sizeof(srcref), &srcref) != 0)
4254 {
4255 return EINVAL;
4256 }
4257
4258 lck_mtx_lock(&state->mtx);
4259
4260 // Remove this source as we look for it
4261 nstat_src **nextp;
4262 nstat_src *src = NULL;
4263 for (nextp = &state->ncs_srcs; *nextp; nextp = &(*nextp)->next)
4264 {
4265 if ((*nextp)->srcref == srcref)
4266 {
4267 src = *nextp;
4268 *nextp = src->next;
4269 break;
4270 }
4271 }
4272
4273 lck_mtx_unlock(&state->mtx);
4274
4275 if (src) nstat_control_cleanup_source(state, src, FALSE);
4276
4277 return src ? 0 : ENOENT;
4278 }
4279
4280 static errno_t
4281 nstat_control_handle_query_request(
4282 nstat_control_state *state,
4283 mbuf_t m)
4284 {
4285 // TBD: handle this from another thread so we can enqueue a lot of data
4286 // As written, if a client requests query all, this function will be
4287 // called from their send of the request message. We will attempt to write
4288 // responses and succeed until the buffer fills up. Since the clients thread
4289 // is blocked on send, it won't be reading unless the client has two threads
4290 // using this socket, one for read and one for write. Two threads probably
4291 // won't work with this code anyhow since we don't have proper locking in
4292 // place yet.
4293 nstat_src *dead_srcs = NULL;
4294 errno_t result = ENOENT;
4295 nstat_msg_query_src_req req;
4296
4297 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0)
4298 {
4299 return EINVAL;
4300 }
4301
4302 const boolean_t all_srcs = (req.srcref == NSTAT_SRC_REF_ALL);
4303
4304 lck_mtx_lock(&state->mtx);
4305
4306 if (all_srcs)
4307 {
4308 state->ncs_flags |= NSTAT_FLAG_REQCOUNTS;
4309 }
4310 nstat_src **srcpp = &state->ncs_srcs;
4311 u_int64_t src_count = 0;
4312 boolean_t partial = FALSE;
4313
4314 /*
4315 * Error handling policy and sequence number generation is folded into
4316 * nstat_control_begin_query.
4317 */
4318 partial = nstat_control_begin_query(state, &req.hdr);
4319
4320 while (*srcpp != NULL
4321 && (!partial || src_count < QUERY_CONTINUATION_SRC_COUNT))
4322 {
4323 nstat_src *src = NULL;
4324 int gone;
4325
4326 src = *srcpp;
4327 gone = 0;
4328 // XXX ignore IFACE types?
4329 if (all_srcs || src->srcref == req.srcref)
4330 {
4331 if (nstat_control_reporting_allowed(state, src)
4332 && (!partial || !all_srcs || src->seq != state->ncs_seq))
4333 {
4334 if (all_srcs &&
4335 (req.hdr.flags & NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE) != 0)
4336 {
4337 result = nstat_control_append_counts(state, src, &gone);
4338 }
4339 else
4340 {
4341 result = nstat_control_send_counts(state, src, req.hdr.context, 0, &gone);
4342 }
4343
4344 if (ENOMEM == result || ENOBUFS == result)
4345 {
4346 /*
4347 * If the counts message failed to
4348 * enqueue then we should clear our flag so
4349 * that a client doesn't miss anything on
4350 * idle cleanup. We skip the "gone"
4351 * processing in the hope that we may
4352 * catch it another time.
4353 */
4354 state->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
4355 break;
4356 }
4357 if (partial)
4358 {
4359 /*
4360 * We skip over hard errors and
4361 * filtered sources.
4362 */
4363 src->seq = state->ncs_seq;
4364 src_count++;
4365 }
4366 }
4367 }
4368
4369 if (gone)
4370 {
4371 // send one last descriptor message so client may see last state
4372 // If we can't send the notification now, it
4373 // will be sent in the idle cleanup.
4374 result = nstat_control_send_description(state, *srcpp, 0, 0);
4375 if (result != 0)
4376 {
4377 nstat_stats.nstat_control_send_description_failures++;
4378 if (nstat_debug != 0)
4379 printf("%s - nstat_control_send_description() %d\n", __func__, result);
4380 state->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
4381 break;
4382 }
4383
4384 // pull src out of the list
4385 *srcpp = src->next;
4386
4387 src->next = dead_srcs;
4388 dead_srcs = src;
4389 }
4390 else
4391 {
4392 srcpp = &(*srcpp)->next;
4393 }
4394
4395 if (!all_srcs && req.srcref == src->srcref)
4396 {
4397 break;
4398 }
4399 }
4400 nstat_flush_accumulated_msgs(state);
4401
4402 u_int16_t flags = 0;
4403 if (req.srcref == NSTAT_SRC_REF_ALL)
4404 flags = nstat_control_end_query(state, *srcpp, partial);
4405
4406 lck_mtx_unlock(&state->mtx);
4407
4408 /*
4409 * If an error occurred enqueueing data, then allow the error to
4410 * propagate to nstat_control_send. This way, the error is sent to
4411 * user-level.
4412 */
4413 if (all_srcs && ENOMEM != result && ENOBUFS != result)
4414 {
4415 nstat_enqueue_success(req.hdr.context, state, flags);
4416 result = 0;
4417 }
4418
4419 while (dead_srcs)
4420 {
4421 nstat_src *src;
4422
4423 src = dead_srcs;
4424 dead_srcs = src->next;
4425
4426 // release src and send notification
4427 nstat_control_cleanup_source(state, src, FALSE);
4428 }
4429
4430 return result;
4431 }
4432
4433 static errno_t
4434 nstat_control_handle_get_src_description(
4435 nstat_control_state *state,
4436 mbuf_t m)
4437 {
4438 nstat_msg_get_src_description req;
4439 errno_t result = ENOENT;
4440 nstat_src *src;
4441
4442 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0)
4443 {
4444 return EINVAL;
4445 }
4446
4447 lck_mtx_lock(&state->mtx);
4448 u_int64_t src_count = 0;
4449 boolean_t partial = FALSE;
4450 const boolean_t all_srcs = (req.srcref == NSTAT_SRC_REF_ALL);
4451
4452 /*
4453 * Error handling policy and sequence number generation is folded into
4454 * nstat_control_begin_query.
4455 */
4456 partial = nstat_control_begin_query(state, &req.hdr);
4457
4458 for (src = state->ncs_srcs;
4459 src && (!partial || src_count < QUERY_CONTINUATION_SRC_COUNT);
4460 src = src->next)
4461 {
4462 if (all_srcs || src->srcref == req.srcref)
4463 {
4464 if (nstat_control_reporting_allowed(state, src)
4465 && (!all_srcs || !partial || src->seq != state->ncs_seq))
4466 {
4467 if ((req.hdr.flags & NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE) != 0 && all_srcs)
4468 {
4469 result = nstat_control_append_description(state, src);
4470 }
4471 else
4472 {
4473 result = nstat_control_send_description(state, src, req.hdr.context, 0);
4474 }
4475
4476 if (ENOMEM == result || ENOBUFS == result)
4477 {
4478 /*
4479 * If the description message failed to
4480 * enqueue then we give up for now.
4481 */
4482 break;
4483 }
4484 if (partial)
4485 {
4486 /*
4487 * Note, we skip over hard errors and
4488 * filtered sources.
4489 */
4490 src->seq = state->ncs_seq;
4491 src_count++;
4492 }
4493 }
4494
4495 if (!all_srcs)
4496 {
4497 break;
4498 }
4499 }
4500 }
4501 nstat_flush_accumulated_msgs(state);
4502
4503 u_int16_t flags = 0;
4504 if (req.srcref == NSTAT_SRC_REF_ALL)
4505 flags = nstat_control_end_query(state, src, partial);
4506
4507 lck_mtx_unlock(&state->mtx);
4508 /*
4509 * If an error occurred enqueueing data, then allow the error to
4510 * propagate to nstat_control_send. This way, the error is sent to
4511 * user-level.
4512 */
4513 if (all_srcs && ENOMEM != result && ENOBUFS != result)
4514 {
4515 nstat_enqueue_success(req.hdr.context, state, flags);
4516 result = 0;
4517 }
4518
4519 return result;
4520 }
4521
4522 static errno_t
4523 nstat_control_handle_set_filter(
4524 nstat_control_state *state,
4525 mbuf_t m)
4526 {
4527 nstat_msg_set_filter req;
4528 nstat_src *src;
4529
4530 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0)
4531 return EINVAL;
4532 if (req.srcref == NSTAT_SRC_REF_ALL ||
4533 req.srcref == NSTAT_SRC_REF_INVALID)
4534 return EINVAL;
4535
4536 lck_mtx_lock(&state->mtx);
4537 for (src = state->ncs_srcs; src; src = src->next)
4538 if (req.srcref == src->srcref)
4539 {
4540 src->filter = req.filter;
4541 break;
4542 }
4543 lck_mtx_unlock(&state->mtx);
4544 if (src == NULL)
4545 return ENOENT;
4546
4547 return 0;
4548 }
4549
4550 static void
4551 nstat_send_error(
4552 nstat_control_state *state,
4553 u_int64_t context,
4554 u_int32_t error)
4555 {
4556 errno_t result;
4557 struct nstat_msg_error err;
4558
4559 bzero(&err, sizeof(err));
4560 err.hdr.type = NSTAT_MSG_TYPE_ERROR;
4561 err.hdr.length = sizeof(err);
4562 err.hdr.context = context;
4563 err.error = error;
4564
4565 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &err,
4566 sizeof(err), CTL_DATA_EOR | CTL_DATA_CRIT);
4567 if (result != 0)
4568 nstat_stats.nstat_msgerrorfailures++;
4569 }
4570
4571 static boolean_t
4572 nstat_control_begin_query(
4573 nstat_control_state *state,
4574 const nstat_msg_hdr *hdrp)
4575 {
4576 boolean_t partial = FALSE;
4577
4578 if (hdrp->flags & NSTAT_MSG_HDR_FLAG_CONTINUATION)
4579 {
4580 /* A partial query all has been requested. */
4581 partial = TRUE;
4582
4583 if (state->ncs_context != hdrp->context)
4584 {
4585 if (state->ncs_context != 0)
4586 nstat_send_error(state, state->ncs_context, EAGAIN);
4587
4588 /* Initialize state for a partial query all. */
4589 state->ncs_context = hdrp->context;
4590 state->ncs_seq++;
4591 }
4592 }
4593 else if (state->ncs_context != 0)
4594 {
4595 /*
4596 * A continuation of a paced-query was in progress. Send that
4597 * context an error and reset the state. If the same context
4598 * has changed its mind, just send the full query results.
4599 */
4600 if (state->ncs_context != hdrp->context)
4601 nstat_send_error(state, state->ncs_context, EAGAIN);
4602 }
4603
4604 return partial;
4605 }
4606
4607 static u_int16_t
4608 nstat_control_end_query(
4609 nstat_control_state *state,
4610 nstat_src *last_src,
4611 boolean_t partial)
4612 {
4613 u_int16_t flags = 0;
4614
4615 if (last_src == NULL || !partial)
4616 {
4617 /*
4618 * We iterated through the entire srcs list or exited early
4619 * from the loop when a partial update was not requested (an
4620 * error occurred), so clear context to indicate internally
4621 * that the query is finished.
4622 */
4623 state->ncs_context = 0;
4624 }
4625 else
4626 {
4627 /*
4628 * Indicate to userlevel to make another partial request as
4629 * there are still sources left to be reported.
4630 */
4631 flags |= NSTAT_MSG_HDR_FLAG_CONTINUATION;
4632 }
4633
4634 return flags;
4635 }
4636
4637 static errno_t
4638 nstat_control_handle_get_update(
4639 nstat_control_state *state,
4640 mbuf_t m)
4641 {
4642 nstat_msg_query_src_req req;
4643
4644 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0)
4645 {
4646 return EINVAL;
4647 }
4648
4649 lck_mtx_lock(&state->mtx);
4650
4651 state->ncs_flags |= NSTAT_FLAG_SUPPORTS_UPDATES;
4652
4653 errno_t result = ENOENT;
4654 nstat_src *src;
4655 nstat_src *dead_srcs = NULL;
4656 nstat_src **srcpp = &state->ncs_srcs;
4657 u_int64_t src_count = 0;
4658 boolean_t partial = FALSE;
4659
4660 /*
4661 * Error handling policy and sequence number generation is folded into
4662 * nstat_control_begin_query.
4663 */
4664 partial = nstat_control_begin_query(state, &req.hdr);
4665
4666 while (*srcpp != NULL
4667 && (FALSE == partial
4668 || src_count < QUERY_CONTINUATION_SRC_COUNT))
4669 {
4670 int gone;
4671
4672 gone = 0;
4673 src = *srcpp;
4674 if (nstat_control_reporting_allowed(state, src))
4675 {
4676 /* skip this source if it has the current state
4677 * sequence number as it's already been reported in
4678 * this query-all partial sequence. */
4679 if (req.srcref == NSTAT_SRC_REF_ALL
4680 && (FALSE == partial || src->seq != state->ncs_seq))
4681 {
4682 result = nstat_control_append_update(state, src, &gone);
4683 if (ENOMEM == result || ENOBUFS == result)
4684 {
4685 /*
4686 * If the update message failed to
4687 * enqueue then give up.
4688 */
4689 break;
4690 }
4691 if (partial)
4692 {
4693 /*
4694 * We skip over hard errors and
4695 * filtered sources.
4696 */
4697 src->seq = state->ncs_seq;
4698 src_count++;
4699 }
4700 }
4701 else if (src->srcref == req.srcref)
4702 {
4703 result = nstat_control_send_update(state, src, req.hdr.context, 0, &gone);
4704 }
4705 }
4706
4707 if (gone)
4708 {
4709 // pull src out of the list
4710 *srcpp = src->next;
4711
4712 src->next = dead_srcs;
4713 dead_srcs = src;
4714 }
4715 else
4716 {
4717 srcpp = &(*srcpp)->next;
4718 }
4719
4720 if (req.srcref != NSTAT_SRC_REF_ALL && req.srcref == src->srcref)
4721 {
4722 break;
4723 }
4724 }
4725
4726 nstat_flush_accumulated_msgs(state);
4727
4728
4729 u_int16_t flags = 0;
4730 if (req.srcref == NSTAT_SRC_REF_ALL)
4731 flags = nstat_control_end_query(state, *srcpp, partial);
4732
4733 lck_mtx_unlock(&state->mtx);
4734 /*
4735 * If an error occurred enqueueing data, then allow the error to
4736 * propagate to nstat_control_send. This way, the error is sent to
4737 * user-level.
4738 */
4739 if (req.srcref == NSTAT_SRC_REF_ALL && ENOMEM != result && ENOBUFS != result)
4740 {
4741 nstat_enqueue_success(req.hdr.context, state, flags);
4742 result = 0;
4743 }
4744
4745 while (dead_srcs)
4746 {
4747 src = dead_srcs;
4748 dead_srcs = src->next;
4749
4750 // release src and send notification
4751 nstat_control_cleanup_source(state, src, FALSE);
4752 }
4753
4754 return result;
4755 }
4756
4757 static errno_t
4758 nstat_control_handle_subscribe_sysinfo(
4759 nstat_control_state *state)
4760 {
4761 errno_t result = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
4762
4763 if (result != 0)
4764 {
4765 return result;
4766 }
4767
4768 lck_mtx_lock(&state->mtx);
4769 state->ncs_flags |= NSTAT_FLAG_SYSINFO_SUBSCRIBED;
4770 lck_mtx_unlock(&state->mtx);
4771
4772 return 0;
4773 }
4774
4775 static errno_t
4776 nstat_control_send(
4777 kern_ctl_ref kctl,
4778 u_int32_t unit,
4779 void *uinfo,
4780 mbuf_t m,
4781 __unused int flags)
4782 {
4783 nstat_control_state *state = (nstat_control_state*)uinfo;
4784 struct nstat_msg_hdr *hdr;
4785 struct nstat_msg_hdr storage;
4786 errno_t result = 0;
4787
4788 if (mbuf_pkthdr_len(m) < sizeof(*hdr))
4789 {
4790 // Is this the right thing to do?
4791 mbuf_freem(m);
4792 return EINVAL;
4793 }
4794
4795 if (mbuf_len(m) >= sizeof(*hdr))
4796 {
4797 hdr = mbuf_data(m);
4798 }
4799 else
4800 {
4801 mbuf_copydata(m, 0, sizeof(storage), &storage);
4802 hdr = &storage;
4803 }
4804
4805 // Legacy clients may not set the length
4806 // Those clients are likely not setting the flags either
4807 // Fix everything up so old clients continue to work
4808 if (hdr->length != mbuf_pkthdr_len(m))
4809 {
4810 hdr->flags = 0;
4811 hdr->length = mbuf_pkthdr_len(m);
4812 if (hdr == &storage)
4813 {
4814 mbuf_copyback(m, 0, sizeof(*hdr), hdr, MBUF_DONTWAIT);
4815 }
4816 }
4817
4818 switch (hdr->type)
4819 {
4820 case NSTAT_MSG_TYPE_ADD_SRC:
4821 result = nstat_control_handle_add_request(state, m);
4822 break;
4823
4824 case NSTAT_MSG_TYPE_ADD_ALL_SRCS:
4825 result = nstat_control_handle_add_all(state, m);
4826 break;
4827
4828 case NSTAT_MSG_TYPE_REM_SRC:
4829 result = nstat_control_handle_remove_request(state, m);
4830 break;
4831
4832 case NSTAT_MSG_TYPE_QUERY_SRC:
4833 result = nstat_control_handle_query_request(state, m);
4834 break;
4835
4836 case NSTAT_MSG_TYPE_GET_SRC_DESC:
4837 result = nstat_control_handle_get_src_description(state, m);
4838 break;
4839
4840 case NSTAT_MSG_TYPE_SET_FILTER:
4841 result = nstat_control_handle_set_filter(state, m);
4842 break;
4843
4844 case NSTAT_MSG_TYPE_GET_UPDATE:
4845 result = nstat_control_handle_get_update(state, m);
4846 break;
4847
4848 case NSTAT_MSG_TYPE_SUBSCRIBE_SYSINFO:
4849 result = nstat_control_handle_subscribe_sysinfo(state);
4850 break;
4851
4852 default:
4853 result = EINVAL;
4854 break;
4855 }
4856
4857 if (result != 0)
4858 {
4859 struct nstat_msg_error err;
4860
4861 bzero(&err, sizeof(err));
4862 err.hdr.type = NSTAT_MSG_TYPE_ERROR;
4863 err.hdr.length = sizeof(err) + mbuf_pkthdr_len(m);
4864 err.hdr.context = hdr->context;
4865 err.error = result;
4866
4867 if (mbuf_prepend(&m, sizeof(err), MBUF_DONTWAIT) == 0 &&
4868 mbuf_copyback(m, 0, sizeof(err), &err, MBUF_DONTWAIT) == 0)
4869 {
4870 result = ctl_enqueuembuf(kctl, unit, m, CTL_DATA_EOR | CTL_DATA_CRIT);
4871 if (result != 0)
4872 {
4873 mbuf_freem(m);
4874 }
4875 m = NULL;
4876 }
4877
4878 if (result != 0)
4879 {
4880 // Unable to prepend the error to the request - just send the error
4881 err.hdr.length = sizeof(err);
4882 result = ctl_enqueuedata(kctl, unit, &err, sizeof(err),
4883 CTL_DATA_EOR | CTL_DATA_CRIT);
4884 if (result != 0)
4885 nstat_stats.nstat_msgerrorfailures += 1;
4886 }
4887 nstat_stats.nstat_handle_msg_failures += 1;
4888 }
4889
4890 if (m) mbuf_freem(m);
4891
4892 return result;
4893 }