+
+ if (inp->inp_last_outifp)
+ desc->ifindex = inp->inp_last_outifp->if_index;
+ else
+ desc->ifindex = tucookie->if_index;
+
+ struct socket *so = inp->inp_socket;
+ if (so)
+ {
+ // TBD - take the socket lock around these to make sure
+ // they're in sync?
+ desc->upid = so->last_upid;
+ desc->pid = so->last_pid;
+ proc_name(desc->pid, desc->pname, sizeof(desc->pname));
+ if (desc->pname[0] == 0)
+ {
+ strlcpy(desc->pname, tucookie->pname,
+ sizeof(desc->pname));
+ }
+ else
+ {
+ desc->pname[sizeof(desc->pname) - 1] = 0;
+ strlcpy(tucookie->pname, desc->pname,
+ sizeof(tucookie->pname));
+ }
+ memcpy(desc->uuid, so->last_uuid, sizeof(so->last_uuid));
+ memcpy(desc->vuuid, so->so_vuuid, sizeof(so->so_vuuid));
+ if (so->so_flags & SOF_DELEGATED) {
+ desc->eupid = so->e_upid;
+ desc->epid = so->e_pid;
+ memcpy(desc->euuid, so->e_uuid, sizeof(so->e_uuid));
+ } else {
+ desc->eupid = desc->upid;
+ desc->epid = desc->pid;
+ memcpy(desc->euuid, desc->uuid, sizeof(desc->uuid));
+ }
+ desc->rcvbufsize = so->so_rcv.sb_hiwat;
+ desc->rcvbufused = so->so_rcv.sb_cc;
+ desc->traffic_class = so->so_traffic_class;
+ }
+
+ return 0;
+}
+
+static bool
+nstat_udp_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter)
+{
+ return nstat_tcpudp_reporting_allowed(cookie, filter, TRUE);
+}
+
+
+static void
+nstat_init_udp_provider(void)
+{
+ bzero(&nstat_udp_provider, sizeof(nstat_udp_provider));
+ nstat_udp_provider.nstat_provider_id = NSTAT_PROVIDER_UDP_KERNEL;
+ nstat_udp_provider.nstat_descriptor_length = sizeof(nstat_udp_descriptor);
+ nstat_udp_provider.nstat_lookup = nstat_udp_lookup;
+ nstat_udp_provider.nstat_gone = nstat_udp_gone;
+ nstat_udp_provider.nstat_counts = nstat_udp_counts;
+ nstat_udp_provider.nstat_watcher_add = nstat_udp_add_watcher;
+ nstat_udp_provider.nstat_watcher_remove = nstat_udp_remove_watcher;
+ nstat_udp_provider.nstat_copy_descriptor = nstat_udp_copy_descriptor;
+ nstat_udp_provider.nstat_release = nstat_udp_release;
+ nstat_udp_provider.nstat_reporting_allowed = nstat_udp_reporting_allowed;
+ nstat_udp_provider.next = nstat_providers;
+ nstat_providers = &nstat_udp_provider;
+}
+
+#pragma mark -- TCP/UDP Userland
+
+// Almost all of this infrastucture is common to both TCP and UDP
+
+static nstat_provider nstat_userland_tcp_provider;
+static nstat_provider nstat_userland_udp_provider;
+
+
+struct nstat_tu_shadow {
+ tailq_entry_tu_shadow shad_link;
+ userland_stats_request_vals_fn *shad_getvals_fn;
+ userland_stats_provider_context *shad_provider_context;
+ u_int64_t shad_properties;
+ int shad_provider;
+ uint32_t shad_magic;
+};
+
+// Magic number checking should remain in place until the userland provider has been fully proven
+#define TU_SHADOW_MAGIC 0xfeedf00d
+#define TU_SHADOW_UNMAGIC 0xdeaddeed
+
+static tailq_head_tu_shadow nstat_userprot_shad_head = TAILQ_HEAD_INITIALIZER(nstat_userprot_shad_head);
+
+static errno_t
+nstat_userland_tu_lookup(
+ __unused const void *data,
+ __unused u_int32_t length,
+ __unused nstat_provider_cookie_t *out_cookie)
+{
+ // Looking up a specific connection is not supported
+ return ENOTSUP;
+}
+
+static int
+nstat_userland_tu_gone(
+ __unused nstat_provider_cookie_t cookie)
+{
+ // Returns non-zero if the source has gone.
+ // We don't keep a source hanging around, so the answer is always 0
+ return 0;
+}
+
+static errno_t
+nstat_userland_tu_counts(
+ nstat_provider_cookie_t cookie,
+ struct nstat_counts *out_counts,
+ int *out_gone)
+ {
+ struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
+ assert(shad->shad_magic == TU_SHADOW_MAGIC);
+
+ bool result = (*shad->shad_getvals_fn)(shad->shad_provider_context, out_counts, NULL);
+
+ if (out_gone) *out_gone = 0;
+
+ return (result)? 0 : EIO;
+}
+
+
+static errno_t
+nstat_userland_tu_copy_descriptor(
+ nstat_provider_cookie_t cookie,
+ void *data,
+ __unused u_int32_t len)
+{
+ struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
+ assert(shad->shad_magic == TU_SHADOW_MAGIC);
+
+ bool result = (*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, data);
+
+ return (result)? 0 : EIO;
+}
+
+static void
+nstat_userland_tu_release(
+ __unused nstat_provider_cookie_t cookie,
+ __unused int locked)
+{
+ // Called when a nstat_src is detached.
+ // We don't reference count or ask for delayed release so nothing to do here.
+}
+
+static bool
+check_reporting_for_user(nstat_provider_filter *filter, pid_t pid, pid_t epid, uuid_t *uuid, uuid_t *euuid)
+{
+ bool retval = true;
+
+ if ((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0)
+ {
+ retval = false;
+
+ if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_PID) != 0) &&
+ (filter->npf_pid == pid))
+ {
+ retval = true;
+ }
+ else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EPID) != 0) &&
+ (filter->npf_pid == epid))
+ {
+ retval = true;
+ }
+ else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_UUID) != 0) &&
+ (memcmp(filter->npf_uuid, uuid, sizeof(*uuid)) == 0))
+ {
+ retval = true;
+ }
+ else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EUUID) != 0) &&
+ (memcmp(filter->npf_uuid, euuid, sizeof(*euuid)) == 0))
+ {
+ retval = true;
+ }
+ }
+ return retval;
+}
+
+static bool
+nstat_userland_tcp_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter)
+{
+ bool retval = true;
+
+ if ((filter->npf_flags & (NSTAT_FILTER_IFNET_FLAGS|NSTAT_FILTER_SPECIFIC_USER)) != 0)
+ {
+ nstat_tcp_descriptor tcp_desc; // Stack allocation - OK or pushing the limits too far?
+ struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
+
+ assert(shad->shad_magic == TU_SHADOW_MAGIC);
+
+ if ((*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, &tcp_desc))
+ {
+ if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0)
+ {
+ if ((filter->npf_flags & tcp_desc.ifnet_properties) == 0)
+ {
+ return false;
+ }
+ }
+ if ((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0)
+ {
+ retval = check_reporting_for_user(filter, (pid_t)tcp_desc.pid, (pid_t)tcp_desc.epid,
+ &tcp_desc.uuid, &tcp_desc.euuid);
+ }
+ }
+ else
+ {
+ retval = false; // No further information, so might as well give up now.
+ }
+ }
+ return retval;
+}
+
+static bool
+nstat_userland_udp_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter)
+{
+ bool retval = true;
+
+ if ((filter->npf_flags & (NSTAT_FILTER_IFNET_FLAGS|NSTAT_FILTER_SPECIFIC_USER)) != 0)
+ {
+ nstat_udp_descriptor udp_desc; // Stack allocation - OK or pushing the limits too far?
+ struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
+
+ assert(shad->shad_magic == TU_SHADOW_MAGIC);
+
+ if ((*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, &udp_desc))
+ {
+ if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0)
+ {
+ if ((filter->npf_flags & udp_desc.ifnet_properties) == 0)
+ {
+ return false;
+ }
+ }
+ if ((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0)
+ {
+ retval = check_reporting_for_user(filter, (pid_t)udp_desc.pid, (pid_t)udp_desc.epid,
+ &udp_desc.uuid, &udp_desc.euuid);
+ }
+ }
+ else
+ {
+ retval = false; // No further information, so might as well give up now.
+ }
+ }
+ return retval;
+}
+
+
+
+static errno_t
+nstat_userland_tcp_add_watcher(
+ nstat_control_state *state)
+{
+ struct nstat_tu_shadow *shad;
+
+ OSIncrementAtomic(&nstat_userland_tcp_watchers);
+
+ lck_mtx_lock(&nstat_mtx);
+
+ TAILQ_FOREACH(shad, &nstat_userprot_shad_head, shad_link) {
+ assert(shad->shad_magic == TU_SHADOW_MAGIC);
+
+ if (shad->shad_provider == NSTAT_PROVIDER_TCP_USERLAND)
+ {
+ int result = nstat_control_source_add(0, state, &nstat_userland_tcp_provider, shad);
+ if (result != 0)
+ {
+ printf("%s - nstat_control_source_add returned %d\n", __func__, result);
+ }
+ }
+ }
+ lck_mtx_unlock(&nstat_mtx);
+
+ return 0;
+}
+
+static errno_t
+nstat_userland_udp_add_watcher(
+ nstat_control_state *state)
+{
+ struct nstat_tu_shadow *shad;
+
+ OSIncrementAtomic(&nstat_userland_udp_watchers);
+
+ lck_mtx_lock(&nstat_mtx);
+
+ TAILQ_FOREACH(shad, &nstat_userprot_shad_head, shad_link) {
+ assert(shad->shad_magic == TU_SHADOW_MAGIC);
+
+ if (shad->shad_provider == NSTAT_PROVIDER_UDP_USERLAND)
+ {
+ int result = nstat_control_source_add(0, state, &nstat_userland_udp_provider, shad);
+ if (result != 0)
+ {
+ printf("%s - nstat_control_source_add returned %d\n", __func__, result);
+ }
+ }
+ }
+ lck_mtx_unlock(&nstat_mtx);
+
+ return 0;
+}
+
+
+static void
+nstat_userland_tcp_remove_watcher(
+ __unused nstat_control_state *state)
+{
+ OSDecrementAtomic(&nstat_userland_tcp_watchers);
+}
+
+static void
+nstat_userland_udp_remove_watcher(
+ __unused nstat_control_state *state)
+{
+ OSDecrementAtomic(&nstat_userland_udp_watchers);
+}
+
+static void
+nstat_init_userland_tcp_provider(void)
+{
+ bzero(&nstat_userland_tcp_provider, sizeof(nstat_tcp_provider));
+ nstat_userland_tcp_provider.nstat_descriptor_length = sizeof(nstat_tcp_descriptor);
+ nstat_userland_tcp_provider.nstat_provider_id = NSTAT_PROVIDER_TCP_USERLAND;
+ nstat_userland_tcp_provider.nstat_lookup = nstat_userland_tu_lookup;
+ nstat_userland_tcp_provider.nstat_gone = nstat_userland_tu_gone;
+ nstat_userland_tcp_provider.nstat_counts = nstat_userland_tu_counts;
+ nstat_userland_tcp_provider.nstat_release = nstat_userland_tu_release;
+ nstat_userland_tcp_provider.nstat_watcher_add = nstat_userland_tcp_add_watcher;
+ nstat_userland_tcp_provider.nstat_watcher_remove = nstat_userland_tcp_remove_watcher;
+ nstat_userland_tcp_provider.nstat_copy_descriptor = nstat_userland_tu_copy_descriptor;
+ nstat_userland_tcp_provider.nstat_reporting_allowed = nstat_userland_tcp_reporting_allowed;
+ nstat_userland_tcp_provider.next = nstat_providers;
+ nstat_providers = &nstat_userland_tcp_provider;
+}
+
+
+static void
+nstat_init_userland_udp_provider(void)
+{
+ bzero(&nstat_userland_udp_provider, sizeof(nstat_udp_provider));
+ nstat_userland_udp_provider.nstat_descriptor_length = sizeof(nstat_udp_descriptor);
+ nstat_userland_udp_provider.nstat_provider_id = NSTAT_PROVIDER_UDP_USERLAND;
+ nstat_userland_udp_provider.nstat_lookup = nstat_userland_tu_lookup;
+ nstat_userland_udp_provider.nstat_gone = nstat_userland_tu_gone;
+ nstat_userland_udp_provider.nstat_counts = nstat_userland_tu_counts;
+ nstat_userland_udp_provider.nstat_release = nstat_userland_tu_release;
+ nstat_userland_udp_provider.nstat_watcher_add = nstat_userland_udp_add_watcher;
+ nstat_userland_udp_provider.nstat_watcher_remove = nstat_userland_udp_remove_watcher;
+ nstat_userland_udp_provider.nstat_copy_descriptor = nstat_userland_tu_copy_descriptor;
+ nstat_userland_udp_provider.nstat_reporting_allowed = nstat_userland_udp_reporting_allowed;
+ nstat_userland_udp_provider.next = nstat_providers;
+ nstat_providers = &nstat_userland_udp_provider;
+}
+
+
+
+// Things get started with a call to netstats to say that there’s a new connection:
+__private_extern__ nstat_userland_context
+ntstat_userland_stats_open(userland_stats_provider_context *ctx,
+ int provider_id,
+ u_int64_t properties,
+ userland_stats_request_vals_fn req_fn)
+{
+ struct nstat_tu_shadow *shad;
+
+ if ((provider_id != NSTAT_PROVIDER_TCP_USERLAND) && (provider_id != NSTAT_PROVIDER_UDP_USERLAND))
+ {
+ printf("%s - incorrect provider is supplied, %d\n", __func__, provider_id);
+ return NULL;
+ }
+
+ shad = OSMalloc(sizeof(*shad), nstat_malloc_tag);
+ if (shad == NULL)
+ return NULL;
+
+ shad->shad_getvals_fn = req_fn;
+ shad->shad_provider_context = ctx;
+ shad->shad_provider = provider_id;
+ shad->shad_properties = properties;
+ shad->shad_magic = TU_SHADOW_MAGIC;
+
+ lck_mtx_lock(&nstat_mtx);
+ nstat_control_state *state;
+
+ // Even if there are no watchers, we save the shadow structure
+ TAILQ_INSERT_HEAD(&nstat_userprot_shad_head, shad, shad_link);
+
+ for (state = nstat_controls; state; state = state->ncs_next)