+ struct necp_client_update *client_update = _MALLOC(sizeof(struct necp_client_update) + client->result_length,
+ M_NECP, M_WAITOK | M_ZERO);
+ if (client_update != NULL) {
+ client_update->update_length = sizeof(struct necp_client_observer_update) + client->result_length;
+ uuid_copy(client_update->client_id, client->client_id);
+ client_update->update.update_type = NECP_CLIENT_UPDATE_TYPE_RESULT;
+ memcpy(client_update->update.tlv_buffer, client->result, client->result_length);
+ TAILQ_INSERT_TAIL(&observer_fd->update_list, client_update, chain);
+ observer_fd->update_count++;
+
+ necp_fd_notify(observer_fd, true);
+ }
+
+ NECP_FD_UNLOCK(observer_fd);
+}
+
+static void
+necp_client_update_observer_remove_internal(struct necp_fd_data *observer_fd, struct necp_client *client)
+{
+ NECP_FD_LOCK(observer_fd);
+
+ if (observer_fd->update_count >= necp_observer_message_limit) {
+ NECP_FD_UNLOCK(observer_fd);
+ return;
+ }
+
+ struct necp_client_update *client_update = _MALLOC(sizeof(struct necp_client_update),
+ M_NECP, M_WAITOK | M_ZERO);
+ if (client_update != NULL) {
+ client_update->update_length = sizeof(struct necp_client_observer_update);
+ uuid_copy(client_update->client_id, client->client_id);
+ client_update->update.update_type = NECP_CLIENT_UPDATE_TYPE_REMOVE;
+ TAILQ_INSERT_TAIL(&observer_fd->update_list, client_update, chain);
+ observer_fd->update_count++;
+
+ necp_fd_notify(observer_fd, true);
+ }
+
+ NECP_FD_UNLOCK(observer_fd);
+}
+
+static void
+necp_client_update_observer_add(struct necp_client *client)
+{
+ NECP_OBSERVER_LIST_LOCK_SHARED();
+
+ if (LIST_EMPTY(&necp_fd_observer_list)) {
+ // No observers, bail
+ NECP_OBSERVER_LIST_UNLOCK();
+ return;
+ }
+
+ struct necp_fd_data *observer_fd = NULL;
+ LIST_FOREACH(observer_fd, &necp_fd_observer_list, chain) {
+ necp_client_update_observer_add_internal(observer_fd, client);
+ }
+
+ NECP_OBSERVER_LIST_UNLOCK();
+}
+
+static void
+necp_client_update_observer_update(struct necp_client *client)
+{
+ NECP_OBSERVER_LIST_LOCK_SHARED();
+
+ if (LIST_EMPTY(&necp_fd_observer_list)) {
+ // No observers, bail
+ NECP_OBSERVER_LIST_UNLOCK();
+ return;
+ }
+
+ struct necp_fd_data *observer_fd = NULL;
+ LIST_FOREACH(observer_fd, &necp_fd_observer_list, chain) {
+ necp_client_update_observer_update_internal(observer_fd, client);
+ }
+
+ NECP_OBSERVER_LIST_UNLOCK();
+}
+
+static void
+necp_client_update_observer_remove(struct necp_client *client)
+{
+ NECP_OBSERVER_LIST_LOCK_SHARED();
+
+ if (LIST_EMPTY(&necp_fd_observer_list)) {
+ // No observers, bail
+ NECP_OBSERVER_LIST_UNLOCK();
+ return;
+ }
+
+ struct necp_fd_data *observer_fd = NULL;
+ LIST_FOREACH(observer_fd, &necp_fd_observer_list, chain) {
+ necp_client_update_observer_remove_internal(observer_fd, client);
+ }
+
+ NECP_OBSERVER_LIST_UNLOCK();
+}
+
+static void
+necp_destroy_client_flow_registration(struct necp_client *client,
+ struct necp_client_flow_registration *flow_registration,
+ pid_t pid, bool abort)
+{
+ NECP_CLIENT_ASSERT_LOCKED(client);
+
+
+ struct necp_client_flow *search_flow = NULL;
+ struct necp_client_flow *temp_flow = NULL;
+ LIST_FOREACH_SAFE(search_flow, &flow_registration->flow_list, flow_chain, temp_flow) {
+ if (search_flow->nexus &&
+ !uuid_is_null(search_flow->u.nexus_agent)) {
+ // Note that if we had defuncted the client earlier, this would result in a harmless ENOENT
+ int netagent_error = netagent_client_message(search_flow->u.nexus_agent,
+ ((flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID) ?
+ client->client_id :
+ flow_registration->registration_id),
+ pid, client->agent_handle,
+ (abort ? NETAGENT_MESSAGE_TYPE_ABORT_NEXUS :
+ NETAGENT_MESSAGE_TYPE_CLOSE_NEXUS));
+ if (netagent_error != 0 && netagent_error != ENOENT) {
+ NECPLOG(LOG_ERR, "necp_client_remove close nexus error (%d)", netagent_error);
+ }
+ uuid_clear(search_flow->u.nexus_agent);
+ }
+ if (search_flow->assigned_results != NULL) {
+ FREE(search_flow->assigned_results, M_NETAGENT);
+ search_flow->assigned_results = NULL;
+ }
+ LIST_REMOVE(search_flow, flow_chain);
+ if (search_flow->socket) {
+ OSDecrementAtomic(&necp_socket_flow_count);
+ } else {
+ OSDecrementAtomic(&necp_if_flow_count);
+ }
+ mcache_free(necp_flow_cache, search_flow);
+ }
+
+ RB_REMOVE(_necp_client_flow_tree, &client->flow_registrations, flow_registration);
+ flow_registration->client = NULL;
+
+ mcache_free(necp_flow_registration_cache, flow_registration);
+}
+
+static void
+necp_destroy_client(struct necp_client *client, pid_t pid, bool abort)
+{
+ NECP_CLIENT_ASSERT_UNLOCKED(client);
+
+ necp_client_update_observer_remove(client);
+
+ NECP_CLIENT_LOCK(client);
+
+ // Free route
+ NECP_CLIENT_ROUTE_LOCK(client);
+ if (client->current_route != NULL) {
+ rtfree(client->current_route);
+ client->current_route = NULL;
+ }
+ NECP_CLIENT_ROUTE_UNLOCK(client);
+
+ // Remove flow assignments
+ struct necp_client_flow_registration *flow_registration = NULL;
+ struct necp_client_flow_registration *temp_flow_registration = NULL;
+ RB_FOREACH_SAFE(flow_registration, _necp_client_flow_tree, &client->flow_registrations, temp_flow_registration) {
+ necp_destroy_client_flow_registration(client, flow_registration, pid, abort);
+ }
+
+ // Remove agent assertions
+ struct necp_client_assertion *search_assertion = NULL;
+ struct necp_client_assertion *temp_assertion = NULL;
+ LIST_FOREACH_SAFE(search_assertion, &client->assertion_list, assertion_chain, temp_assertion) {
+ int netagent_error = netagent_client_message(search_assertion->asserted_netagent, client->client_id, pid,
+ client->agent_handle, NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT);
+ if (netagent_error != 0) {
+ NECPLOG((netagent_error == ENOENT ? LOG_DEBUG : LOG_ERR),
+ "necp_client_remove unassert agent error (%d)", netagent_error);
+ }
+ LIST_REMOVE(search_assertion, assertion_chain);
+ FREE(search_assertion, M_NECP);
+ }
+
+ if (!necp_client_release_locked(client)) {
+ NECP_CLIENT_UNLOCK(client);
+ }
+
+ OSDecrementAtomic(&necp_client_count);
+}
+
+static int
+necpop_close(struct fileglob *fg, vfs_context_t ctx)
+{
+#pragma unused(ctx)
+ struct necp_fd_data *fd_data = NULL;
+ int error = 0;
+
+ fd_data = (struct necp_fd_data *)fg->fg_data;
+ fg->fg_data = NULL;
+
+ if (fd_data != NULL) {
+ struct _necp_client_tree clients_to_close;
+ RB_INIT(&clients_to_close);
+
+ // Remove from list quickly
+ if (fd_data->flags & NECP_OPEN_FLAG_PUSH_OBSERVER) {
+ NECP_OBSERVER_LIST_LOCK_EXCLUSIVE();
+ LIST_REMOVE(fd_data, chain);
+ NECP_OBSERVER_LIST_UNLOCK();
+ } else {
+ NECP_FD_LIST_LOCK_EXCLUSIVE();
+ LIST_REMOVE(fd_data, chain);
+ NECP_FD_LIST_UNLOCK();
+ }
+
+ NECP_FD_LOCK(fd_data);
+ pid_t pid = fd_data->proc_pid;
+
+ struct necp_client_flow_registration *flow_registration = NULL;
+ struct necp_client_flow_registration *temp_flow_registration = NULL;
+ RB_FOREACH_SAFE(flow_registration, _necp_fd_flow_tree, &fd_data->flows, temp_flow_registration) {
+ NECP_FLOW_TREE_LOCK_EXCLUSIVE();
+ RB_REMOVE(_necp_client_flow_global_tree, &necp_client_flow_global_tree, flow_registration);
+ NECP_FLOW_TREE_UNLOCK();
+ RB_REMOVE(_necp_fd_flow_tree, &fd_data->flows, flow_registration);
+ }
+
+ struct necp_client *client = NULL;
+ struct necp_client *temp_client = NULL;
+ RB_FOREACH_SAFE(client, _necp_client_tree, &fd_data->clients, temp_client) {
+ NECP_CLIENT_TREE_LOCK_EXCLUSIVE();
+ RB_REMOVE(_necp_client_global_tree, &necp_client_global_tree, client);
+ NECP_CLIENT_TREE_UNLOCK();
+ RB_REMOVE(_necp_client_tree, &fd_data->clients, client);
+ RB_INSERT(_necp_client_tree, &clients_to_close, client);
+ }
+
+ struct necp_client_update *client_update = NULL;
+ struct necp_client_update *temp_update = NULL;
+ TAILQ_FOREACH_SAFE(client_update, &fd_data->update_list, chain, temp_update) {
+ // Flush pending updates
+ TAILQ_REMOVE(&fd_data->update_list, client_update, chain);
+ FREE(client_update, M_NECP);
+ }
+ fd_data->update_count = 0;
+
+
+ NECP_FD_UNLOCK(fd_data);
+
+ selthreadclear(&fd_data->si);
+
+ lck_mtx_destroy(&fd_data->fd_lock, necp_fd_mtx_grp);
+
+ if (fd_data->flags & NECP_OPEN_FLAG_PUSH_OBSERVER) {
+ OSDecrementAtomic(&necp_observer_fd_count);
+ } else {
+ OSDecrementAtomic(&necp_client_fd_count);
+ }
+
+ zfree(necp_client_fd_zone, fd_data);
+ fd_data = NULL;
+
+ RB_FOREACH_SAFE(client, _necp_client_tree, &clients_to_close, temp_client) {
+ RB_REMOVE(_necp_client_tree, &clients_to_close, client);
+ necp_destroy_client(client, pid, true);
+ }
+ }
+
+ return (error);
+}
+
+/// NECP client utilities
+
+static inline bool
+necp_address_is_wildcard(const union necp_sockaddr_union * const addr)
+{
+ return ((addr->sa.sa_family == AF_INET && addr->sin.sin_addr.s_addr == INADDR_ANY) ||
+ (addr->sa.sa_family == AF_INET6 && IN6_IS_ADDR_UNSPECIFIED(&addr->sin6.sin6_addr)));
+}
+
+static int
+necp_find_fd_data(int fd, struct necp_fd_data **fd_data)
+{
+ proc_t p = current_proc();
+ struct fileproc *fp = NULL;
+ int error = 0;
+
+ proc_fdlock_spin(p);
+ if ((error = fp_lookup(p, fd, &fp, 1)) != 0) {
+ goto done;
+ }
+ if (fp->f_fglob->fg_ops->fo_type != DTYPE_NETPOLICY) {
+ fp_drop(p, fd, fp, 1);
+ error = ENODEV;
+ goto done;
+ }
+ *fd_data = (struct necp_fd_data *)fp->f_fglob->fg_data;
+
+ if ((*fd_data)->necp_fd_type != necp_fd_type_client) {
+ // Not a client fd, ignore
+ error = EINVAL;
+ goto done;
+ }
+
+done:
+ proc_fdunlock(p);
+ return (error);
+}
+
+
+static struct necp_client_flow *
+necp_client_add_interface_flow(struct necp_client_flow_registration *flow_registration,
+ uint32_t interface_index)
+{
+ struct necp_client_flow *new_flow = mcache_alloc(necp_flow_cache, MCR_SLEEP);
+ if (new_flow == NULL) {
+ NECPLOG0(LOG_ERR, "Failed to allocate interface flow");
+ return NULL;
+ }
+
+ memset(new_flow, 0, sizeof(*new_flow));
+
+ // Neither nexus nor socket
+ new_flow->interface_index = interface_index;
+ new_flow->u.socket_handle = flow_registration->interface_handle;
+ new_flow->u.cb = flow_registration->interface_cb;
+
+ OSIncrementAtomic(&necp_if_flow_count);
+
+ LIST_INSERT_HEAD(&flow_registration->flow_list, new_flow, flow_chain);
+
+ return new_flow;
+}
+
+static struct necp_client_flow *
+necp_client_add_interface_flow_if_needed(struct necp_client *client,
+ struct necp_client_flow_registration *flow_registration,
+ uint32_t interface_index)
+{
+ if (!client->allow_multiple_flows ||
+ interface_index == IFSCOPE_NONE) {
+ // Interface not set, or client not allowed to use this mode
+ return NULL;
+ }
+
+ struct necp_client_flow *flow = NULL;
+ LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
+ if (!flow->nexus && !flow->socket && flow->interface_index == interface_index) {
+ // Already have the flow
+ flow->invalid = FALSE;
+ flow->u.socket_handle = flow_registration->interface_handle;
+ flow->u.cb = flow_registration->interface_cb;
+ return NULL;
+ }
+ }
+ return necp_client_add_interface_flow(flow_registration, interface_index);
+}
+
+static void
+necp_client_add_interface_option_if_needed(struct necp_client *client,
+ uint32_t interface_index,
+ uint32_t interface_generation,
+ uuid_t *nexus_agent)
+{
+ if (interface_index == IFSCOPE_NONE ||
+ (client->interface_option_count != 0 && !client->allow_multiple_flows)) {
+ // Interface not set, or client not allowed to use this mode
+ return;
+ }
+
+ if (client->interface_option_count >= NECP_CLIENT_MAX_INTERFACE_OPTIONS) {
+ // Cannot take any more interface options
+ return;
+ }
+
+ // Check if already present
+ for (u_int32_t option_i = 0; option_i < client->interface_option_count; option_i++) {
+ if (option_i < NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT) {
+ struct necp_client_interface_option *option = &client->interface_options[option_i];
+ if (option->interface_index == interface_index) {
+ if (nexus_agent == NULL) {
+ return;
+ }
+ if (uuid_compare(option->nexus_agent, *nexus_agent) == 0) {
+ return;
+ }
+ if (uuid_is_null(option->nexus_agent)) {
+ uuid_copy(option->nexus_agent, *nexus_agent);
+ return;
+ }
+ // If we get to this point, this is a new nexus flow
+ }
+ } else {
+ struct necp_client_interface_option *option = &client->extra_interface_options[option_i - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
+ if (option->interface_index == interface_index) {
+ if (nexus_agent == NULL) {
+ return;
+ }
+ if (uuid_compare(option->nexus_agent, *nexus_agent) == 0) {
+ return;
+ }
+ if (uuid_is_null(option->nexus_agent)) {
+ uuid_copy(option->nexus_agent, *nexus_agent);
+ return;
+ }
+ // If we get to this point, this is a new nexus flow
+ }
+ }
+ }
+
+ // Add a new entry
+ if (client->interface_option_count < NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT) {
+ // Add to static
+ struct necp_client_interface_option *option = &client->interface_options[client->interface_option_count];
+ option->interface_index = interface_index;
+ option->interface_generation = interface_generation;
+ if (nexus_agent != NULL) {
+ uuid_copy(option->nexus_agent, *nexus_agent);
+ }
+ client->interface_option_count++;
+ } else {
+ // Add to extra
+ if (client->extra_interface_options == NULL) {
+ client->extra_interface_options = _MALLOC(sizeof(struct necp_client_interface_option) * NECP_CLIENT_INTERFACE_OPTION_EXTRA_COUNT, M_NECP, M_WAITOK | M_ZERO);
+ }
+ if (client->extra_interface_options != NULL) {
+ struct necp_client_interface_option *option = &client->extra_interface_options[client->interface_option_count - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
+ option->interface_index = interface_index;
+ option->interface_generation = interface_generation;
+ if (nexus_agent != NULL) {
+ uuid_copy(option->nexus_agent, *nexus_agent);
+ }
+ client->interface_option_count++;
+ }
+ }
+}
+
+static bool
+necp_client_flow_is_viable(proc_t proc, struct necp_client *client,
+ struct necp_client_flow *flow)
+{
+ struct necp_aggregate_result result;
+ bool ignore_address = (client->allow_multiple_flows && !flow->nexus && !flow->socket);
+
+ flow->necp_flow_flags = 0;
+ int error = necp_application_find_policy_match_internal(proc, client->parameters,
+ (u_int32_t)client->parameters_length,
+ &result, &flow->necp_flow_flags,
+ flow->interface_index,
+ &flow->local_addr, &flow->remote_addr, NULL, ignore_address);
+
+ return (error == 0 &&
+ result.routed_interface_index != IFSCOPE_NONE &&
+ result.routing_result != NECP_KERNEL_POLICY_RESULT_DROP);
+}
+
+static void
+necp_flow_add_interface_flows(proc_t proc,
+ struct necp_client *client,
+ struct necp_client_flow_registration *flow_registration,
+ bool send_initial)
+{
+ // Traverse all interfaces and add a tracking flow if needed
+ for (u_int32_t option_i = 0; option_i < client->interface_option_count; option_i++) {
+ if (option_i < NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT) {
+ struct necp_client_interface_option *option = &client->interface_options[option_i];
+ struct necp_client_flow *flow = necp_client_add_interface_flow_if_needed(client, flow_registration, option->interface_index);
+ if (flow != NULL && send_initial) {
+ flow->viable = necp_client_flow_is_viable(proc, client, flow);
+ if (flow->viable && flow->u.cb) {
+ bool viable = flow->viable;
+ flow->u.cb(flow_registration->interface_handle, NECP_CLIENT_CBACTION_INITIAL, flow->interface_index, flow->necp_flow_flags, &viable);
+ flow->viable = viable;
+ }
+ }
+ } else {
+ struct necp_client_interface_option *option = &client->extra_interface_options[option_i - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
+ struct necp_client_flow *flow = necp_client_add_interface_flow_if_needed(client, flow_registration, option->interface_index);
+ if (flow != NULL && send_initial) {
+ flow->viable = necp_client_flow_is_viable(proc, client, flow);
+ if (flow->viable && flow->u.cb) {
+ bool viable = flow->viable;
+ flow->u.cb(flow_registration->interface_handle, NECP_CLIENT_CBACTION_INITIAL, flow->interface_index, flow->necp_flow_flags, &viable);
+ flow->viable = viable;
+ }
+ }
+ }
+ }
+}
+
+static bool
+necp_client_update_flows(proc_t proc,
+ struct necp_client *client,
+ struct _necp_flow_defunct_list *defunct_list)
+{
+ NECP_CLIENT_ASSERT_LOCKED(client);
+
+ bool client_updated = FALSE;
+ struct necp_client_flow *flow = NULL;
+ struct necp_client_flow *temp_flow = NULL;
+ struct necp_client_flow_registration *flow_registration = NULL;
+ RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
+ if (flow_registration->interface_cb != NULL) {
+ // Add any interface flows that are not already tracked
+ necp_flow_add_interface_flows(proc, client, flow_registration, false);
+ }
+
+ LIST_FOREACH_SAFE(flow, &flow_registration->flow_list, flow_chain, temp_flow) {
+ // Check policy result for flow
+ int old_flags = flow->necp_flow_flags;
+ bool viable = necp_client_flow_is_viable(proc, client, flow);
+
+ // TODO: Defunct nexus flows that are blocked by policy
+
+ if (flow->viable != viable) {
+ flow->viable = viable;
+ client_updated = TRUE;
+ }
+
+ if ((old_flags & NECP_CLIENT_RESULT_FLAG_FORCE_UPDATE) !=
+ (flow->necp_flow_flags & NECP_CLIENT_RESULT_FLAG_FORCE_UPDATE)) {
+ client_updated = TRUE;
+ }
+
+ if (flow->viable && client_updated && (flow->socket || (!flow->socket && !flow->nexus)) && flow->u.cb) {
+ bool flow_viable = flow->viable;
+ flow->u.cb(flow->u.socket_handle, NECP_CLIENT_CBACTION_VIABLE, flow->interface_index, flow->necp_flow_flags, &viable);
+ flow->viable = flow_viable;
+ }
+
+ if (!flow->viable || flow->invalid) {
+ if (client_updated && (flow->socket || (!flow->socket && !flow->nexus)) && flow->u.cb) {
+ bool flow_viable = flow->viable;
+ flow->u.cb(flow->u.socket_handle, NECP_CLIENT_CBACTION_NONVIABLE, flow->interface_index, flow->necp_flow_flags, &viable);
+ flow->viable = flow_viable;
+ }
+ // The callback might change the viable-flag of the
+ // flow depending on its policy. Thus, we need to
+ // check the flags again after the callback.
+ }
+
+ (void)defunct_list;
+
+ // Handle flows that no longer match
+ if (!flow->viable || flow->invalid) {
+ // Drop them as long as they aren't assigned data
+ if (!flow->nexus && !flow->assigned) {
+ if (flow->assigned_results != NULL) {
+ FREE(flow->assigned_results, M_NETAGENT);
+ flow->assigned_results = NULL;
+ client_updated = TRUE;
+ }
+ LIST_REMOVE(flow, flow_chain);
+ if (flow->socket) {
+ OSDecrementAtomic(&necp_socket_flow_count);
+ } else {
+ OSDecrementAtomic(&necp_if_flow_count);
+ }
+ mcache_free(necp_flow_cache, flow);
+ }
+ }
+ }
+ }
+
+ return (client_updated);
+}
+
+static void
+necp_client_mark_all_nonsocket_flows_as_invalid(struct necp_client *client)
+{
+ struct necp_client_flow_registration *flow_registration = NULL;
+ struct necp_client_flow *flow = NULL;
+ RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
+ LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
+ if (!flow->socket) { // Socket flows are not marked as invalid
+ flow->invalid = TRUE;
+ }
+ }
+ }
+
+ // Reset option count every update
+ client->interface_option_count = 0;
+}
+
+static bool
+necp_netagent_applies_to_client(struct necp_client *client,
+ const struct necp_client_parsed_parameters *parameters,
+ uuid_t *netagent_uuid, bool allow_nexus,
+ uint32_t interface_index, uint32_t interface_generation)
+{
+#pragma unused(interface_index, interface_generation)
+ bool applies = FALSE;
+ u_int32_t flags = netagent_get_flags(*netagent_uuid);
+ if (!(flags & NETAGENT_FLAG_REGISTERED)) {
+ // Unregistered agents never apply
+ return (applies);
+ }
+
+ if (!allow_nexus &&
+ (flags & NETAGENT_FLAG_NEXUS_PROVIDER)) {
+ // Hide nexus providers unless allowed
+ // Direct interfaces and direct policies are allowed to use a nexus
+ // Delegate interfaces or re-scoped interfaces are not allowed
+ return (applies);
+ }
+
+ if (uuid_compare(client->failed_trigger_agent.netagent_uuid, *netagent_uuid) == 0) {
+ if (client->failed_trigger_agent.generation == netagent_get_generation(*netagent_uuid)) {
+ // If this agent was triggered, and failed, and hasn't changed, keep hiding it
+ return (applies);
+ } else {
+ // Mismatch generation, clear out old trigger
+ uuid_clear(client->failed_trigger_agent.netagent_uuid);
+ client->failed_trigger_agent.generation = 0;
+ }
+ }
+
+ if (flags & NETAGENT_FLAG_SPECIFIC_USE_ONLY) {