2 * Copyright (c) 2013-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <sys/systm.h>
31 #include <sys/types.h>
32 #include <sys/queue.h>
33 #include <sys/malloc.h>
34 #include <libkern/OSMalloc.h>
35 #include <sys/kernel.h>
36 #include <sys/kern_control.h>
38 #include <sys/kpi_mbuf.h>
39 #include <sys/proc_uuid_policy.h>
41 #include <sys/domain.h>
42 #include <sys/protosw.h>
43 #include <sys/socket.h>
44 #include <sys/socketvar.h>
45 #include <sys/coalition.h>
47 #include <sys/codesign.h>
48 #include <kern/cs_blobs.h>
49 #include <netinet/ip.h>
50 #include <netinet/ip6.h>
51 #include <netinet/tcp.h>
52 #include <netinet/tcp_var.h>
53 #include <netinet/tcp_cache.h>
54 #include <netinet/udp.h>
55 #include <netinet/in_pcb.h>
56 #include <netinet/in_tclass.h>
57 #include <netinet6/esp.h>
58 #include <net/flowhash.h>
59 #include <net/if_var.h>
60 #include <sys/kauth.h>
61 #include <sys/sysctl.h>
62 #include <sys/sysproto.h>
64 #include <sys/kern_event.h>
65 #include <sys/file_internal.h>
66 #include <IOKit/IOBSD.h>
67 #include <libkern/crypto/rand.h>
68 #include <corecrypto/cchmac.h>
69 #include <corecrypto/ccsha2.h>
70 #include <os/refcnt.h>
71 #include <net/network_agent.h>
75 * NECP - Network Extension Control Policy database
76 * ------------------------------------------------
77 * The goal of this module is to allow clients connecting via a
78 * kernel control socket to create high-level policy sessions, which
79 * are ingested into low-level kernel policies that control and tag
80 * traffic at the application, socket, and IP layers.
82 * ------------------------------------------------
84 * ------------------------------------------------
85 * Each session owns a list of session policies, each of which can
86 * specify any combination of conditions and a single result. Each
87 * session also has a priority level (such as High, Default, or Low)
88 * which is requested by the client. Based on the requested level,
89 * a session order value is assigned to the session, which will be used
90 * to sort kernel policies generated by the session. The session client
91 * can specify the sub-order for each policy it creates which will be
92 * used to further sort the kernel policies.
94 * Kernel Control Socket --> 1 necp_session --> list of necp_session_policy structs
96 * ------------------------------------------------
98 * ------------------------------------------------
99 * Whenever a session send the Apply command, its policies are ingested
100 * and generate kernel policies. There are two phases of kernel policy
103 * 1. The session policy is parsed to create kernel policies at the socket
104 * and IP layers, when applicable. For example, a policy that requires
105 * all traffic from App1 to Pass will generate a socket kernel policy to
106 * match App1 and mark packets with ID1, and also an IP policy to match
107 * ID1 and let the packet pass. This is handled in necp_apply_policy. The
108 * resulting kernel policies are added to the global socket and IP layer
110 * necp_session_policy --> necp_kernel_socket_policy and necp_kernel_ip_output_policy
113 * necp_kernel_socket_policies necp_kernel_ip_output_policies
115 * 2. Once the global lists of kernel policies have been filled out, each
116 * list is traversed to create optimized sub-lists ("Maps") which are used during
117 * data-path evaluation. IP policies are sent into necp_kernel_ip_output_policies_map,
118 * which hashes incoming packets based on marked socket-layer policies, and removes
119 * duplicate or overlapping policies. Socket policies are sent into two maps,
120 * necp_kernel_socket_policies_map and necp_kernel_socket_policies_app_layer_map.
121 * The app layer map is used for policy checks coming in from user space, and is one
122 * list with duplicate and overlapping policies removed. The socket map hashes based
123 * on app UUID, and removes duplicate and overlapping policies.
124 * necp_kernel_socket_policy --> necp_kernel_socket_policies_app_layer_map
125 * |-> necp_kernel_socket_policies_map
127 * necp_kernel_ip_output_policies --> necp_kernel_ip_output_policies_map
129 * ------------------------------------------------
131 * ------------------------------------------------
132 * The Drop All Level is a sysctl that controls the level at which policies are allowed
133 * to override a global drop rule. If the value is 0, no drop rule is applied. If the value
134 * is 1, all traffic is dropped. If the value is greater than 1, all kernel policies created
135 * by a session with a priority level better than (numerically less than) the
136 * Drop All Level will allow matching traffic to not be dropped. The Drop All Level is
137 * dynamically interpreted into necp_drop_all_order, which specifies the equivalent assigned
138 * session orders to be dropped.
141 u_int32_t necp_drop_all_order
= 0;
142 u_int32_t necp_drop_all_level
= 0;
144 u_int32_t necp_pass_loopback
= 1; // 0=Off, 1=On
145 u_int32_t necp_pass_keepalives
= 1; // 0=Off, 1=On
146 u_int32_t necp_pass_interpose
= 1; // 0=Off, 1=On
148 u_int32_t necp_drop_unentitled_order
= 0;
149 #ifdef XNU_TARGET_OS_WATCH
150 u_int32_t necp_drop_unentitled_level
= NECP_SESSION_PRIORITY_CONTROL
+ 1; // Block all unentitled traffic from policies below control level
151 #else // XNU_TARGET_OS_WATCH
152 u_int32_t necp_drop_unentitled_level
= 0;
153 #endif // XNU_TARGET_OS_WATCH
155 u_int32_t necp_debug
= 0; // 0=None, 1=Basic, 2=EveryMatch
157 u_int32_t necp_session_count
= 0;
159 #define LIST_INSERT_SORTED_ASCENDING(head, elm, field, sortfield, tmpelm) do { \
160 if (LIST_EMPTY((head)) || (LIST_FIRST(head)->sortfield >= (elm)->sortfield)) { \
161 LIST_INSERT_HEAD((head), elm, field); \
163 LIST_FOREACH(tmpelm, head, field) { \
164 if (LIST_NEXT(tmpelm, field) == NULL || LIST_NEXT(tmpelm, field)->sortfield >= (elm)->sortfield) { \
165 LIST_INSERT_AFTER(tmpelm, elm, field); \
172 #define LIST_INSERT_SORTED_TWICE_ASCENDING(head, elm, field, firstsortfield, secondsortfield, tmpelm) do { \
173 if (LIST_EMPTY((head)) || (LIST_FIRST(head)->firstsortfield > (elm)->firstsortfield) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield >= (elm)->secondsortfield))) { \
174 LIST_INSERT_HEAD((head), elm, field); \
176 LIST_FOREACH(tmpelm, head, field) { \
177 if (LIST_NEXT(tmpelm, field) == NULL || (LIST_NEXT(tmpelm, field)->firstsortfield > (elm)->firstsortfield) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield >= (elm)->secondsortfield))) { \
178 LIST_INSERT_AFTER(tmpelm, elm, field); \
185 #define LIST_INSERT_SORTED_THRICE_ASCENDING(head, elm, field, firstsortfield, secondsortfield, thirdsortfield, tmpelm) do { \
186 if (LIST_EMPTY((head)) || (LIST_FIRST(head)->firstsortfield > (elm)->firstsortfield) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield >= (elm)->secondsortfield)) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield == (elm)->secondsortfield) && (LIST_FIRST(head)->thirdsortfield >= (elm)->thirdsortfield))) { \
187 LIST_INSERT_HEAD((head), elm, field); \
189 LIST_FOREACH(tmpelm, head, field) { \
190 if (LIST_NEXT(tmpelm, field) == NULL || (LIST_NEXT(tmpelm, field)->firstsortfield > (elm)->firstsortfield) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield >= (elm)->secondsortfield)) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield == (elm)->secondsortfield) && (LIST_NEXT(tmpelm, field)->thirdsortfield >= (elm)->thirdsortfield))) { \
191 LIST_INSERT_AFTER(tmpelm, elm, field); \
198 #define IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(x) ((x) == NECP_ROUTE_RULE_DENY_INTERFACE || (x) == NECP_ROUTE_RULE_ALLOW_INTERFACE)
200 #define IS_NECP_DEST_IN_LOCAL_NETWORKS(rt) \
201 ((rt) != NULL && !((rt)->rt_flags & RTF_GATEWAY) && ((rt)->rt_ifa && (rt)->rt_ifa->ifa_ifp && !((rt)->rt_ifa->ifa_ifp->if_flags & IFF_POINTOPOINT)))
203 #define NECP_KERNEL_CONDITION_ALL_INTERFACES 0x000001
204 #define NECP_KERNEL_CONDITION_BOUND_INTERFACE 0x000002
205 #define NECP_KERNEL_CONDITION_PROTOCOL 0x000004
206 #define NECP_KERNEL_CONDITION_LOCAL_START 0x000008
207 #define NECP_KERNEL_CONDITION_LOCAL_END 0x000010
208 #define NECP_KERNEL_CONDITION_LOCAL_PREFIX 0x000020
209 #define NECP_KERNEL_CONDITION_REMOTE_START 0x000040
210 #define NECP_KERNEL_CONDITION_REMOTE_END 0x000080
211 #define NECP_KERNEL_CONDITION_REMOTE_PREFIX 0x000100
212 #define NECP_KERNEL_CONDITION_APP_ID 0x000200
213 #define NECP_KERNEL_CONDITION_REAL_APP_ID 0x000400
214 #define NECP_KERNEL_CONDITION_DOMAIN 0x000800
215 #define NECP_KERNEL_CONDITION_ACCOUNT_ID 0x001000
216 #define NECP_KERNEL_CONDITION_POLICY_ID 0x002000
217 #define NECP_KERNEL_CONDITION_PID 0x004000
218 #define NECP_KERNEL_CONDITION_UID 0x008000
219 #define NECP_KERNEL_CONDITION_LAST_INTERFACE 0x010000 // Only set from packets looping between interfaces
220 #define NECP_KERNEL_CONDITION_TRAFFIC_CLASS 0x020000
221 #define NECP_KERNEL_CONDITION_ENTITLEMENT 0x040000
222 #define NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT 0x080000
223 #define NECP_KERNEL_CONDITION_AGENT_TYPE 0x100000
224 #define NECP_KERNEL_CONDITION_HAS_CLIENT 0x200000
225 #define NECP_KERNEL_CONDITION_LOCAL_NETWORKS 0x400000
226 #define NECP_KERNEL_CONDITION_CLIENT_FLAGS 0x800000
227 #define NECP_KERNEL_CONDITION_LOCAL_EMPTY 0x1000000
228 #define NECP_KERNEL_CONDITION_REMOTE_EMPTY 0x2000000
229 #define NECP_KERNEL_CONDITION_PLATFORM_BINARY 0x4000000
231 #define NECP_MAX_POLICY_RESULT_SIZE 512
232 #define NECP_MAX_ROUTE_RULES_ARRAY_SIZE 1024
233 #define NECP_MAX_CONDITIONS_ARRAY_SIZE 4096
234 #define NECP_MAX_POLICY_LIST_COUNT 1024
236 // Cap the policy size at the max result + conditions size, with room for extra TLVs
237 #define NECP_MAX_POLICY_SIZE (1024 + NECP_MAX_POLICY_RESULT_SIZE + NECP_MAX_CONDITIONS_ARRAY_SIZE)
239 struct necp_service_registration
{
240 LIST_ENTRY(necp_service_registration
) session_chain
;
241 LIST_ENTRY(necp_service_registration
) kernel_chain
;
242 u_int32_t service_id
;
245 struct necp_session
{
246 u_int8_t necp_fd_type
;
247 u_int32_t control_unit
;
248 u_int32_t session_priority
; // Descriptive priority rating
249 u_int32_t session_order
;
251 necp_policy_id last_policy_id
;
253 decl_lck_mtx_data(, lock
);
255 bool proc_locked
; // Messages must come from proc_uuid
260 LIST_HEAD(_policies
, necp_session_policy
) policies
;
262 LIST_HEAD(_services
, necp_service_registration
) services
;
264 TAILQ_ENTRY(necp_session
) chain
;
267 #define NECP_SESSION_LOCK(_s) lck_mtx_lock(&_s->lock)
268 #define NECP_SESSION_UNLOCK(_s) lck_mtx_unlock(&_s->lock)
270 static TAILQ_HEAD(_necp_session_list
, necp_session
) necp_session_list
;
272 struct necp_socket_info
{
275 union necp_sockaddr_union local_addr
;
276 union necp_sockaddr_union remote_addr
;
277 u_int32_t bound_interface_index
;
278 u_int32_t traffic_class
;
280 u_int32_t application_id
;
281 u_int32_t real_application_id
;
282 u_int32_t account_id
;
283 u_int32_t drop_order
;
284 u_int32_t client_flags
;
287 unsigned has_client
: 1;
288 unsigned is_platform_binary
: 1;
289 unsigned __pad_bits
: 6;
292 static kern_ctl_ref necp_kctlref
;
293 static u_int32_t necp_family
;
294 static OSMallocTag necp_malloc_tag
;
295 static lck_grp_attr_t
*necp_kernel_policy_grp_attr
= NULL
;
296 static lck_attr_t
*necp_kernel_policy_mtx_attr
= NULL
;
297 static lck_grp_t
*necp_kernel_policy_mtx_grp
= NULL
;
298 decl_lck_rw_data(static, necp_kernel_policy_lock
);
300 static lck_grp_attr_t
*necp_route_rule_grp_attr
= NULL
;
301 static lck_attr_t
*necp_route_rule_mtx_attr
= NULL
;
302 static lck_grp_t
*necp_route_rule_mtx_grp
= NULL
;
303 decl_lck_rw_data(static, necp_route_rule_lock
);
305 os_refgrp_decl(static, necp_refgrp
, "NECPRefGroup", NULL
);
308 * On modification, invalidate cached lookups by bumping the generation count.
309 * Other calls will need to take the slowpath of taking
310 * the subsystem lock.
312 static volatile int32_t necp_kernel_socket_policies_gencount
;
313 #define BUMP_KERNEL_SOCKET_POLICIES_GENERATION_COUNT() do { \
314 if (OSIncrementAtomic(&necp_kernel_socket_policies_gencount) == (INT32_MAX - 1)) { \
315 necp_kernel_socket_policies_gencount = 1; \
321 * Allow priviledged processes to bypass the default drop-all
322 * via entitlement check. For OSX, since entitlement check is
323 * not supported for configd, configd signing identity is checked
326 #define SIGNING_ID_CONFIGD "com.apple.configd"
327 #define SIGNING_ID_CONFIGD_LEN (sizeof(SIGNING_ID_CONFIGD) - 1)
330 NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
= 0,
331 NECP_DROP_ALL_BYPASS_CHECK_RESULT_TRUE
= 1,
332 NECP_DROP_ALL_BYPASS_CHECK_RESULT_FALSE
= 2,
333 } necp_drop_all_bypass_check_result_t
;
335 static u_int32_t necp_kernel_application_policies_condition_mask
;
336 static size_t necp_kernel_application_policies_count
;
337 static u_int32_t necp_kernel_socket_policies_condition_mask
;
338 static size_t necp_kernel_socket_policies_count
;
339 static size_t necp_kernel_socket_policies_non_app_count
;
340 static LIST_HEAD(_necpkernelsocketconnectpolicies
, necp_kernel_socket_policy
) necp_kernel_socket_policies
;
341 #define NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS 5
342 #define NECP_SOCKET_MAP_APP_ID_TO_BUCKET(appid) (appid ? (appid%(NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS - 1) + 1) : 0)
343 static struct necp_kernel_socket_policy
**necp_kernel_socket_policies_map
[NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
];
344 static struct necp_kernel_socket_policy
**necp_kernel_socket_policies_app_layer_map
;
346 * A note on policy 'maps': these are used for boosting efficiency when matching policies. For each dimension of the map,
347 * such as an ID, the 0 bucket is reserved for sockets/packets that do not have this parameter, while the other
348 * buckets lead to an array of policy pointers that form the list applicable when the (parameter%(NUM_BUCKETS - 1) + 1) == bucket_index.
350 * For example, a packet with policy ID of 7, when there are 4 ID buckets, will map to bucket (7%3 + 1) = 2.
353 static u_int32_t necp_kernel_ip_output_policies_condition_mask
;
354 static size_t necp_kernel_ip_output_policies_count
;
355 static size_t necp_kernel_ip_output_policies_non_id_count
;
356 static LIST_HEAD(_necpkernelipoutputpolicies
, necp_kernel_ip_output_policy
) necp_kernel_ip_output_policies
;
357 #define NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS 5
358 #define NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(id) (id ? (id%(NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS - 1) + 1) : 0)
359 static struct necp_kernel_ip_output_policy
**necp_kernel_ip_output_policies_map
[NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
];
360 static struct necp_kernel_socket_policy pass_policy
=
362 .id
= NECP_KERNEL_POLICY_ID_NO_MATCH
,
363 .result
= NECP_KERNEL_POLICY_RESULT_PASS
,
366 static struct necp_session
*necp_create_session(void);
367 static void necp_delete_session(struct necp_session
*session
);
369 static necp_policy_id
necp_handle_policy_add(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
,
370 u_int8_t
*tlv_buffer
, size_t tlv_buffer_length
, int offset
, int *error
);
371 static void necp_handle_policy_get(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
372 static void necp_handle_policy_delete(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
373 static void necp_handle_policy_apply_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
374 static void necp_handle_policy_list_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
375 static void necp_handle_policy_delete_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
376 static int necp_handle_policy_dump_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
,
377 user_addr_t out_buffer
, size_t out_buffer_length
, int offset
);
378 static void necp_handle_set_session_priority(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
379 static void necp_handle_lock_session_to_proc(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
380 static void necp_handle_register_service(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
381 static void necp_handle_unregister_service(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
383 #define MAX_RESULT_STRING_LEN 64
384 static inline const char * necp_get_result_description(char *result_string
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
);
386 static struct necp_session_policy
*necp_policy_create(struct necp_session
*session
, necp_policy_order order
, u_int8_t
*conditions_array
, u_int32_t conditions_array_size
, u_int8_t
*route_rules_array
, u_int32_t route_rules_array_size
, u_int8_t
*result
, u_int32_t result_size
);
387 static struct necp_session_policy
*necp_policy_find(struct necp_session
*session
, necp_policy_id policy_id
);
388 static bool necp_policy_mark_for_deletion(struct necp_session
*session
, struct necp_session_policy
*policy
);
389 static bool necp_policy_mark_all_for_deletion(struct necp_session
*session
);
390 static bool necp_policy_delete(struct necp_session
*session
, struct necp_session_policy
*policy
);
391 static void necp_policy_apply_all(struct necp_session
*session
);
393 static necp_kernel_policy_id
necp_kernel_socket_policy_add(necp_policy_order order
, u_int32_t session_order
, int session_pid
, u_int32_t condition_mask
, u_int32_t condition_negated_mask
, necp_app_id cond_app_id
, necp_app_id cond_real_app_id
, char *cond_custom_entitlement
, u_int32_t cond_account_id
, char *domain
, pid_t cond_pid
, uid_t cond_uid
, ifnet_t cond_bound_interface
, struct necp_policy_condition_tc_range cond_traffic_class
, u_int16_t cond_protocol
, union necp_sockaddr_union
*cond_local_start
, union necp_sockaddr_union
*cond_local_end
, u_int8_t cond_local_prefix
, union necp_sockaddr_union
*cond_remote_start
, union necp_sockaddr_union
*cond_remote_end
, u_int8_t cond_remote_prefix
, struct necp_policy_condition_agent_type
*cond_agent_type
, u_int32_t cond_client_flags
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
);
394 static bool necp_kernel_socket_policy_delete(necp_kernel_policy_id policy_id
);
395 static bool necp_kernel_socket_policies_reprocess(void);
396 static bool necp_kernel_socket_policies_update_uuid_table(void);
397 static inline struct necp_kernel_socket_policy
*necp_socket_find_policy_match_with_info_locked(struct necp_kernel_socket_policy
**policy_search_array
, struct necp_socket_info
*info
, necp_kernel_policy_filter
*return_filter
, u_int32_t
*return_route_rule_id_array
, size_t *return_route_rule_id_array_count
, size_t route_rule_id_array_count
, necp_kernel_policy_result
*return_service_action
, necp_kernel_policy_service
*return_service
, u_int32_t
*return_netagent_array
, u_int32_t
*return_netagent_use_flags_array
, size_t netagent_array_count
, struct necp_client_parameter_netagent_type
*required_agent_types
, u_int32_t num_required_agent_types
, proc_t proc
, necp_kernel_policy_id
*skip_policy_id
, struct rtentry
*rt
, necp_kernel_policy_result
*return_drop_dest_policy_result
, necp_drop_all_bypass_check_result_t
*return_drop_all_bypass
);
399 static necp_kernel_policy_id
necp_kernel_ip_output_policy_add(necp_policy_order order
, necp_policy_order suborder
, u_int32_t session_order
, int session_pid
, u_int32_t condition_mask
, u_int32_t condition_negated_mask
, necp_kernel_policy_id cond_policy_id
, ifnet_t cond_bound_interface
, u_int32_t cond_last_interface_index
, u_int16_t cond_protocol
, union necp_sockaddr_union
*cond_local_start
, union necp_sockaddr_union
*cond_local_end
, u_int8_t cond_local_prefix
, union necp_sockaddr_union
*cond_remote_start
, union necp_sockaddr_union
*cond_remote_end
, u_int8_t cond_remote_prefix
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
);
400 static bool necp_kernel_ip_output_policy_delete(necp_kernel_policy_id policy_id
);
401 static bool necp_kernel_ip_output_policies_reprocess(void);
403 static bool necp_is_addr_in_range(struct sockaddr
*addr
, struct sockaddr
*range_start
, struct sockaddr
*range_end
);
404 static bool necp_is_range_in_range(struct sockaddr
*inner_range_start
, struct sockaddr
*inner_range_end
, struct sockaddr
*range_start
, struct sockaddr
*range_end
);
405 static bool necp_is_addr_in_subnet(struct sockaddr
*addr
, struct sockaddr
*subnet_addr
, u_int8_t subnet_prefix
);
406 static int necp_addr_compare(struct sockaddr
*sa1
, struct sockaddr
*sa2
, int check_port
);
407 static bool necp_buffer_compare_with_bit_prefix(u_int8_t
*p1
, u_int8_t
*p2
, u_int32_t bits
);
408 static bool necp_addr_is_empty(struct sockaddr
*addr
);
409 static bool necp_is_loopback(struct sockaddr
*local_addr
, struct sockaddr
*remote_addr
, struct inpcb
*inp
, struct mbuf
*packet
);
410 static bool necp_is_intcoproc(struct inpcb
*inp
, struct mbuf
*packet
);
412 struct necp_uuid_id_mapping
{
413 LIST_ENTRY(necp_uuid_id_mapping
) chain
;
416 os_refcnt_t refcount
;
417 u_int32_t table_usecount
; // Add to UUID policy table count
419 static size_t necp_num_uuid_app_id_mappings
;
420 static bool necp_uuid_app_id_mappings_dirty
;
421 #define NECP_UUID_APP_ID_HASH_SIZE 64
422 static u_long necp_uuid_app_id_hash_mask
;
423 static u_long necp_uuid_app_id_hash_num_buckets
;
424 static LIST_HEAD(necp_uuid_id_mapping_head
, necp_uuid_id_mapping
) * necp_uuid_app_id_hashtbl
, necp_uuid_service_id_list
; // App map is real hash table, service map is just mapping
425 #define APPUUIDHASH(uuid) (&necp_uuid_app_id_hashtbl[uuid[0] & necp_uuid_app_id_hash_mask]) // Assume first byte of UUIDs are evenly distributed
426 static u_int32_t
necp_create_uuid_app_id_mapping(uuid_t uuid
, bool *allocated_mapping
, bool uuid_policy_table
);
427 static bool necp_remove_uuid_app_id_mapping(uuid_t uuid
, bool *removed_mapping
, bool uuid_policy_table
);
428 static struct necp_uuid_id_mapping
*necp_uuid_lookup_uuid_with_app_id_locked(u_int32_t local_id
);
430 static struct necp_uuid_id_mapping
*necp_uuid_lookup_service_id_locked(uuid_t uuid
);
431 static struct necp_uuid_id_mapping
*necp_uuid_lookup_uuid_with_service_id_locked(u_int32_t local_id
);
432 static u_int32_t
necp_create_uuid_service_id_mapping(uuid_t uuid
);
433 static bool necp_remove_uuid_service_id_mapping(uuid_t uuid
);
435 struct necp_string_id_mapping
{
436 LIST_ENTRY(necp_string_id_mapping
) chain
;
439 os_refcnt_t refcount
;
441 static LIST_HEAD(necp_string_id_mapping_list
, necp_string_id_mapping
) necp_account_id_list
;
442 static u_int32_t
necp_create_string_to_id_mapping(struct necp_string_id_mapping_list
*list
, char *domain
);
443 static bool necp_remove_string_to_id_mapping(struct necp_string_id_mapping_list
*list
, char *domain
);
444 static struct necp_string_id_mapping
*necp_lookup_string_with_id_locked(struct necp_string_id_mapping_list
*list
, u_int32_t local_id
);
446 static struct necp_kernel_socket_policy
*necp_kernel_socket_policy_find(necp_kernel_policy_id policy_id
);
447 static struct necp_kernel_ip_output_policy
*necp_kernel_ip_output_policy_find(necp_kernel_policy_id policy_id
);
449 static LIST_HEAD(_necp_kernel_service_list
, necp_service_registration
) necp_registered_service_list
;
451 static char *necp_create_trimmed_domain(char *string
, size_t length
);
452 static inline int necp_count_dots(char *string
, size_t length
);
454 static char *necp_copy_string(char *string
, size_t length
);
455 static bool necp_update_qos_marking(struct ifnet
*ifp
, u_int32_t route_rule_id
);
457 #define ROUTE_RULE_IS_AGGREGATE(ruleid) (ruleid > UINT16_MAX)
459 #define MAX_ROUTE_RULE_INTERFACES 10
460 struct necp_route_rule
{
461 LIST_ENTRY(necp_route_rule
) chain
;
463 u_int32_t default_action
;
464 u_int8_t cellular_action
;
465 u_int8_t wifi_action
;
466 u_int8_t wired_action
;
467 u_int8_t expensive_action
;
468 u_int8_t constrained_action
;
469 u_int exception_if_indices
[MAX_ROUTE_RULE_INTERFACES
];
470 u_int8_t exception_if_actions
[MAX_ROUTE_RULE_INTERFACES
];
471 os_refcnt_t refcount
;
473 static LIST_HEAD(necp_route_rule_list
, necp_route_rule
) necp_route_rules
;
474 static u_int32_t
necp_create_route_rule(struct necp_route_rule_list
*list
, u_int8_t
*route_rules_array
, u_int32_t route_rules_array_size
);
475 static bool necp_remove_route_rule(struct necp_route_rule_list
*list
, u_int32_t route_rule_id
);
476 static bool necp_route_is_allowed(struct rtentry
*route
, ifnet_t interface
, u_int32_t route_rule_id
, u_int32_t
*interface_type_denied
);
477 static struct necp_route_rule
*necp_lookup_route_rule_locked(struct necp_route_rule_list
*list
, u_int32_t route_rule_id
);
478 static inline void necp_get_parent_cred_result(proc_t proc
, struct necp_socket_info
*info
);
480 #define MAX_AGGREGATE_ROUTE_RULES 16
481 struct necp_aggregate_route_rule
{
482 LIST_ENTRY(necp_aggregate_route_rule
) chain
;
484 u_int32_t rule_ids
[MAX_AGGREGATE_ROUTE_RULES
];
486 static LIST_HEAD(necp_aggregate_route_rule_list
, necp_aggregate_route_rule
) necp_aggregate_route_rules
;
487 static u_int32_t
necp_create_aggregate_route_rule(u_int32_t
*rule_ids
);
489 // Sysctl definitions
490 static int sysctl_handle_necp_level SYSCTL_HANDLER_ARGS
;
491 static int sysctl_handle_necp_unentitled_level SYSCTL_HANDLER_ARGS
;
493 SYSCTL_NODE(_net
, OID_AUTO
, necp
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "NECP");
494 SYSCTL_INT(_net_necp
, NECPCTL_PASS_LOOPBACK
, pass_loopback
, CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_pass_loopback
, 0, "");
495 SYSCTL_INT(_net_necp
, NECPCTL_PASS_KEEPALIVES
, pass_keepalives
, CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_pass_keepalives
, 0, "");
496 SYSCTL_INT(_net_necp
, NECPCTL_PASS_INTERPOSE
, pass_interpose
, CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_pass_interpose
, 0, "");
497 SYSCTL_INT(_net_necp
, NECPCTL_DEBUG
, debug
, CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_debug
, 0, "");
498 SYSCTL_PROC(_net_necp
, NECPCTL_DROP_UNENTITLED_LEVEL
, drop_unentitled_level
, CTLTYPE_INT
| CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_drop_unentitled_level
, 0, &sysctl_handle_necp_unentitled_level
, "IU", "");
499 SYSCTL_PROC(_net_necp
, NECPCTL_DROP_ALL_LEVEL
, drop_all_level
, CTLTYPE_INT
| CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_drop_all_level
, 0, &sysctl_handle_necp_level
, "IU", "");
500 SYSCTL_LONG(_net_necp
, NECPCTL_SOCKET_POLICY_COUNT
, socket_policy_count
, CTLFLAG_LOCKED
| CTLFLAG_RD
, &necp_kernel_socket_policies_count
, "");
501 SYSCTL_LONG(_net_necp
, NECPCTL_SOCKET_NON_APP_POLICY_COUNT
, socket_non_app_policy_count
, CTLFLAG_LOCKED
| CTLFLAG_RD
, &necp_kernel_socket_policies_non_app_count
, "");
502 SYSCTL_LONG(_net_necp
, NECPCTL_IP_POLICY_COUNT
, ip_policy_count
, CTLFLAG_LOCKED
| CTLFLAG_RD
, &necp_kernel_ip_output_policies_count
, "");
503 SYSCTL_INT(_net_necp
, NECPCTL_SESSION_COUNT
, session_count
, CTLFLAG_LOCKED
| CTLFLAG_RD
, &necp_session_count
, 0, "");
505 static struct necp_drop_dest_policy necp_drop_dest_policy
;
506 static int necp_drop_dest_debug
= 0; // 0: off, 1: match, >1: every evaluation
507 SYSCTL_INT(_net_necp
, OID_AUTO
, drop_dest_debug
, CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_drop_dest_debug
, 0, "");
509 static int sysctl_handle_necp_drop_dest_level SYSCTL_HANDLER_ARGS
;
510 SYSCTL_PROC(_net_necp
, OID_AUTO
, drop_dest_level
, CTLTYPE_STRUCT
| CTLFLAG_LOCKED
| CTLFLAG_ANYBODY
| CTLFLAG_RW
,
511 0, 0, &sysctl_handle_necp_drop_dest_level
, "S,necp_drop_dest_level", "");
513 static bool necp_address_matches_drop_dest_policy(union necp_sockaddr_union
*, u_int32_t
);
515 // Session order allocation
517 necp_allocate_new_session_order(u_int32_t priority
, u_int32_t control_unit
)
519 u_int32_t new_order
= 0;
521 // For now, just allocate 1000 orders for each priority
522 if (priority
== NECP_SESSION_PRIORITY_UNKNOWN
|| priority
> NECP_SESSION_NUM_PRIORITIES
) {
523 priority
= NECP_SESSION_PRIORITY_DEFAULT
;
526 // Use the control unit to decide the offset into the priority list
527 new_order
= (control_unit
) + ((priority
- 1) * 1000);
532 static inline u_int32_t
533 necp_get_first_order_for_priority(u_int32_t priority
)
538 return ((priority
- 1) * 1000) + 1;
543 sysctl_handle_necp_level SYSCTL_HANDLER_ARGS
545 #pragma unused(arg1, arg2)
546 int error
= sysctl_handle_int(oidp
, oidp
->oid_arg1
, oidp
->oid_arg2
, req
);
547 necp_drop_all_order
= necp_get_first_order_for_priority(necp_drop_all_level
);
552 sysctl_handle_necp_unentitled_level SYSCTL_HANDLER_ARGS
554 #pragma unused(arg1, arg2)
555 int error
= sysctl_handle_int(oidp
, oidp
->oid_arg1
, oidp
->oid_arg2
, req
);
556 necp_drop_unentitled_order
= necp_get_first_order_for_priority(necp_drop_unentitled_level
);
560 // Use a macro here to avoid computing the kauth_cred_t when necp_drop_unentitled_level is 0
561 static inline u_int32_t
562 _necp_process_drop_order_inner(kauth_cred_t cred
)
564 if (priv_check_cred(cred
, PRIV_NET_PRIVILEGED_CLIENT_ACCESS
, 0) != 0 &&
565 priv_check_cred(cred
, PRIV_NET_PRIVILEGED_SERVER_ACCESS
, 0) != 0) {
566 return necp_drop_unentitled_order
;
572 #define necp_process_drop_order(_cred) (necp_drop_unentitled_order != 0 ? _necp_process_drop_order_inner(_cred) : necp_drop_unentitled_order)
573 #pragma GCC poison _necp_process_drop_order_inner
577 static int necp_session_op_close(struct fileglob
*, vfs_context_t
);
579 static const struct fileops necp_session_fd_ops
= {
580 .fo_type
= DTYPE_NETPOLICY
,
581 .fo_read
= fo_no_read
,
582 .fo_write
= fo_no_write
,
583 .fo_ioctl
= fo_no_ioctl
,
584 .fo_select
= fo_no_select
,
585 .fo_close
= necp_session_op_close
,
586 .fo_drain
= fo_no_drain
,
587 .fo_kqfilter
= fo_no_kqfilter
,
590 static inline necp_drop_all_bypass_check_result_t
591 necp_check_drop_all_bypass_result(proc_t proc
)
594 proc
= current_proc();
596 return NECP_DROP_ALL_BYPASS_CHECK_RESULT_FALSE
;
600 #if defined(XNU_TARGET_OS_OSX)
601 const char *signing_id
= NULL
;
602 const bool isConfigd
= (csproc_get_platform_binary(proc
) &&
603 (signing_id
= cs_identity_get(proc
)) &&
604 (strlen(signing_id
) == SIGNING_ID_CONFIGD_LEN
) &&
605 (memcmp(signing_id
, SIGNING_ID_CONFIGD
, SIGNING_ID_CONFIGD_LEN
) == 0));
607 return NECP_DROP_ALL_BYPASS_CHECK_RESULT_TRUE
;
611 const task_t task
= proc_task(proc
);
612 if (task
== NULL
|| !IOTaskHasEntitlement(task
, "com.apple.private.necp.drop_all_bypass")) {
613 return NECP_DROP_ALL_BYPASS_CHECK_RESULT_FALSE
;
615 return NECP_DROP_ALL_BYPASS_CHECK_RESULT_TRUE
;
620 necp_session_open(struct proc
*p
, struct necp_session_open_args
*uap
, int *retval
)
624 struct necp_session
*session
= NULL
;
625 struct fileproc
*fp
= NULL
;
628 uid_t uid
= kauth_cred_getuid(proc_ucred(p
));
629 if (uid
!= 0 && priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES
, 0) != 0) {
630 NECPLOG0(LOG_ERR
, "Process does not hold necessary entitlement to open NECP session");
635 error
= falloc(p
, &fp
, &fd
, vfs_context_current());
640 session
= necp_create_session();
641 if (session
== NULL
) {
646 fp
->f_fglob
->fg_flag
= 0;
647 fp
->f_fglob
->fg_ops
= &necp_session_fd_ops
;
648 fp
->f_fglob
->fg_data
= session
;
651 FDFLAGS_SET(p
, fd
, (UF_EXCLOSE
| UF_FORKCLOSE
));
652 procfdtbl_releasefd(p
, fd
, NULL
);
653 fp_drop(p
, fd
, fp
, 1);
669 necp_session_op_close(struct fileglob
*fg
, vfs_context_t ctx
)
672 struct necp_session
*session
= (struct necp_session
*)fg
->fg_data
;
675 if (session
!= NULL
) {
676 necp_policy_mark_all_for_deletion(session
);
677 necp_policy_apply_all(session
);
678 necp_delete_session(session
);
686 necp_session_find_from_fd(int fd
, struct necp_session
**session
)
688 proc_t p
= current_proc();
689 struct fileproc
*fp
= NULL
;
693 if ((error
= fp_lookup(p
, fd
, &fp
, 1)) != 0) {
696 if (fp
->f_fglob
->fg_ops
->fo_type
!= DTYPE_NETPOLICY
) {
697 fp_drop(p
, fd
, fp
, 1);
701 *session
= (struct necp_session
*)fp
->f_fglob
->fg_data
;
703 if ((*session
)->necp_fd_type
!= necp_fd_type_session
) {
704 // Not a client fd, ignore
705 fp_drop(p
, fd
, fp
, 1);
716 necp_session_add_policy(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
719 u_int8_t
*tlv_buffer
= NULL
;
721 if (uap
->in_buffer_length
== 0 || uap
->in_buffer_length
> NECP_MAX_POLICY_SIZE
|| uap
->in_buffer
== 0) {
722 NECPLOG(LOG_ERR
, "necp_session_add_policy invalid input (%zu)", uap
->in_buffer_length
);
727 if (uap
->out_buffer_length
< sizeof(necp_policy_id
) || uap
->out_buffer
== 0) {
728 NECPLOG(LOG_ERR
, "necp_session_add_policy invalid output buffer (%zu)", uap
->out_buffer_length
);
733 if ((tlv_buffer
= _MALLOC(uap
->in_buffer_length
, M_NECP
, M_WAITOK
| M_ZERO
)) == NULL
) {
738 error
= copyin(uap
->in_buffer
, tlv_buffer
, uap
->in_buffer_length
);
740 NECPLOG(LOG_ERR
, "necp_session_add_policy tlv copyin error (%d)", error
);
744 necp_policy_id new_policy_id
= necp_handle_policy_add(session
, 0, NULL
, tlv_buffer
, uap
->in_buffer_length
, 0, &error
);
746 NECPLOG(LOG_ERR
, "necp_session_add_policy failed to add policy (%d)", error
);
750 error
= copyout(&new_policy_id
, uap
->out_buffer
, sizeof(new_policy_id
));
752 NECPLOG(LOG_ERR
, "necp_session_add_policy policy_id copyout error (%d)", error
);
757 if (tlv_buffer
!= NULL
) {
758 FREE(tlv_buffer
, M_NECP
);
767 necp_session_get_policy(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
770 u_int8_t
*response
= NULL
;
772 if (uap
->in_buffer_length
< sizeof(necp_policy_id
) || uap
->in_buffer
== 0) {
773 NECPLOG(LOG_ERR
, "necp_session_get_policy invalid input (%zu)", uap
->in_buffer_length
);
778 necp_policy_id policy_id
= 0;
779 error
= copyin(uap
->in_buffer
, &policy_id
, sizeof(policy_id
));
781 NECPLOG(LOG_ERR
, "necp_session_get_policy policy_id copyin error (%d)", error
);
785 struct necp_session_policy
*policy
= necp_policy_find(session
, policy_id
);
786 if (policy
== NULL
|| policy
->pending_deletion
) {
787 NECPLOG(LOG_ERR
, "Failed to find policy with id %d", policy_id
);
792 u_int32_t order_tlv_size
= sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(necp_policy_order
);
793 u_int32_t result_tlv_size
= (policy
->result_size
? (sizeof(u_int8_t
) + sizeof(u_int32_t
) + policy
->result_size
) : 0);
794 u_int32_t response_size
= order_tlv_size
+ result_tlv_size
+ policy
->conditions_size
;
796 if (uap
->out_buffer_length
< response_size
|| uap
->out_buffer
== 0) {
797 NECPLOG(LOG_ERR
, "necp_session_get_policy buffer not large enough (%u < %u)", uap
->out_buffer_length
, response_size
);
802 if (response_size
> NECP_MAX_POLICY_SIZE
) {
803 NECPLOG(LOG_ERR
, "necp_session_get_policy size too large to copy (%u)", response_size
);
808 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
| M_ZERO
);
809 if (response
== NULL
) {
814 u_int8_t
*cursor
= response
;
815 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ORDER
, sizeof(necp_policy_order
), &policy
->order
, response
, response_size
);
816 if (result_tlv_size
) {
817 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_RESULT
, policy
->result_size
, &policy
->result
, response
, response_size
);
819 if (policy
->conditions_size
) {
820 memcpy(((u_int8_t
*)(void *)(cursor
)), policy
->conditions
, policy
->conditions_size
);
823 error
= copyout(response
, uap
->out_buffer
, response_size
);
825 NECPLOG(LOG_ERR
, "necp_session_get_policy TLV copyout error (%d)", error
);
830 if (response
!= NULL
) {
831 FREE(response
, M_NECP
);
840 necp_session_delete_policy(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
844 if (uap
->in_buffer_length
< sizeof(necp_policy_id
) || uap
->in_buffer
== 0) {
845 NECPLOG(LOG_ERR
, "necp_session_delete_policy invalid input (%zu)", uap
->in_buffer_length
);
850 necp_policy_id delete_policy_id
= 0;
851 error
= copyin(uap
->in_buffer
, &delete_policy_id
, sizeof(delete_policy_id
));
853 NECPLOG(LOG_ERR
, "necp_session_delete_policy policy_id copyin error (%d)", error
);
857 struct necp_session_policy
*policy
= necp_policy_find(session
, delete_policy_id
);
858 if (policy
== NULL
|| policy
->pending_deletion
) {
859 NECPLOG(LOG_ERR
, "necp_session_delete_policy failed to find policy with id %u", delete_policy_id
);
864 necp_policy_mark_for_deletion(session
, policy
);
871 necp_session_apply_all(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
874 necp_policy_apply_all(session
);
880 necp_session_list_all(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
882 u_int32_t tlv_size
= (sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(necp_policy_id
));
883 u_int32_t response_size
= 0;
884 u_int8_t
*response
= NULL
;
885 int num_policies
= 0;
886 int cur_policy_index
= 0;
888 struct necp_session_policy
*policy
;
890 LIST_FOREACH(policy
, &session
->policies
, chain
) {
891 if (!policy
->pending_deletion
) {
896 if (num_policies
> NECP_MAX_POLICY_LIST_COUNT
) {
897 NECPLOG(LOG_ERR
, "necp_session_list_all size too large to copy (%u policies)", num_policies
);
902 response_size
= num_policies
* tlv_size
;
903 if (uap
->out_buffer_length
< response_size
|| uap
->out_buffer
== 0) {
904 NECPLOG(LOG_ERR
, "necp_session_list_all buffer not large enough (%u < %u)", uap
->out_buffer_length
, response_size
);
909 // Create a response with one Policy ID TLV for each policy
910 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
| M_ZERO
);
911 if (response
== NULL
) {
916 u_int8_t
*cursor
= response
;
917 LIST_FOREACH(policy
, &session
->policies
, chain
) {
918 if (!policy
->pending_deletion
&& cur_policy_index
< num_policies
) {
919 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ID
, sizeof(u_int32_t
), &policy
->local_id
, response
, response_size
);
924 error
= copyout(response
, uap
->out_buffer
, response_size
);
926 NECPLOG(LOG_ERR
, "necp_session_list_all TLV copyout error (%d)", error
);
931 if (response
!= NULL
) {
932 FREE(response
, M_NECP
);
942 necp_session_delete_all(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
945 necp_policy_mark_all_for_deletion(session
);
951 necp_session_set_session_priority(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
954 struct necp_session_policy
*policy
= NULL
;
955 struct necp_session_policy
*temp_policy
= NULL
;
957 if (uap
->in_buffer_length
< sizeof(necp_session_priority
) || uap
->in_buffer
== 0) {
958 NECPLOG(LOG_ERR
, "necp_session_set_session_priority invalid input (%zu)", uap
->in_buffer_length
);
963 necp_session_priority requested_session_priority
= 0;
964 error
= copyin(uap
->in_buffer
, &requested_session_priority
, sizeof(requested_session_priority
));
966 NECPLOG(LOG_ERR
, "necp_session_set_session_priority priority copyin error (%d)", error
);
970 // Enforce special session priorities with entitlements
971 if (requested_session_priority
== NECP_SESSION_PRIORITY_CONTROL
||
972 requested_session_priority
== NECP_SESSION_PRIORITY_PRIVILEGED_TUNNEL
) {
973 errno_t cred_result
= priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES
, 0);
974 if (cred_result
!= 0) {
975 NECPLOG(LOG_ERR
, "Session does not hold necessary entitlement to claim priority level %d", requested_session_priority
);
981 if (session
->session_priority
!= requested_session_priority
) {
982 session
->session_priority
= requested_session_priority
;
983 session
->session_order
= necp_allocate_new_session_order(session
->session_priority
, session
->control_unit
);
984 session
->dirty
= TRUE
;
986 // Mark all policies as needing updates
987 LIST_FOREACH_SAFE(policy
, &session
->policies
, chain
, temp_policy
) {
988 policy
->pending_update
= TRUE
;
998 necp_session_lock_to_process(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
1001 session
->proc_locked
= TRUE
;
1007 necp_session_register_service(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
1010 struct necp_service_registration
*new_service
= NULL
;
1012 if (uap
->in_buffer_length
< sizeof(uuid_t
) || uap
->in_buffer
== 0) {
1013 NECPLOG(LOG_ERR
, "necp_session_register_service invalid input (%zu)", uap
->in_buffer_length
);
1018 uuid_t service_uuid
;
1019 error
= copyin(uap
->in_buffer
, service_uuid
, sizeof(service_uuid
));
1021 NECPLOG(LOG_ERR
, "necp_session_register_service uuid copyin error (%d)", error
);
1025 MALLOC(new_service
, struct necp_service_registration
*, sizeof(*new_service
), M_NECP
, M_WAITOK
| M_ZERO
);
1026 if (new_service
== NULL
) {
1027 NECPLOG0(LOG_ERR
, "Failed to allocate service registration");
1032 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
1033 new_service
->service_id
= necp_create_uuid_service_id_mapping(service_uuid
);
1034 LIST_INSERT_HEAD(&session
->services
, new_service
, session_chain
);
1035 LIST_INSERT_HEAD(&necp_registered_service_list
, new_service
, kernel_chain
);
1036 lck_rw_done(&necp_kernel_policy_lock
);
1044 necp_session_unregister_service(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
1047 struct necp_service_registration
*service
= NULL
;
1048 struct necp_service_registration
*temp_service
= NULL
;
1049 struct necp_uuid_id_mapping
*mapping
= NULL
;
1051 if (uap
->in_buffer_length
< sizeof(uuid_t
) || uap
->in_buffer
== 0) {
1052 NECPLOG(LOG_ERR
, "necp_session_unregister_service invalid input (%zu)", uap
->in_buffer_length
);
1057 uuid_t service_uuid
;
1058 error
= copyin(uap
->in_buffer
, service_uuid
, sizeof(service_uuid
));
1060 NECPLOG(LOG_ERR
, "necp_session_unregister_service uuid copyin error (%d)", error
);
1064 // Remove all matching services for this session
1065 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
1066 mapping
= necp_uuid_lookup_service_id_locked(service_uuid
);
1067 if (mapping
!= NULL
) {
1068 LIST_FOREACH_SAFE(service
, &session
->services
, session_chain
, temp_service
) {
1069 if (service
->service_id
== mapping
->id
) {
1070 LIST_REMOVE(service
, session_chain
);
1071 LIST_REMOVE(service
, kernel_chain
);
1072 FREE(service
, M_NECP
);
1075 necp_remove_uuid_service_id_mapping(service_uuid
);
1077 lck_rw_done(&necp_kernel_policy_lock
);
1085 necp_session_dump_all(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
1089 if (uap
->out_buffer_length
== 0 || uap
->out_buffer
== 0) {
1090 NECPLOG(LOG_ERR
, "necp_session_dump_all invalid output buffer (%zu)", uap
->out_buffer_length
);
1095 error
= necp_handle_policy_dump_all(session
, 0, NULL
, uap
->out_buffer
, uap
->out_buffer_length
, 0);
1102 necp_session_action(struct proc
*p
, struct necp_session_action_args
*uap
, int *retval
)
1106 int return_value
= 0;
1107 struct necp_session
*session
= NULL
;
1108 error
= necp_session_find_from_fd(uap
->necp_fd
, &session
);
1110 NECPLOG(LOG_ERR
, "necp_session_action find fd error (%d)", error
);
1114 NECP_SESSION_LOCK(session
);
1116 if (session
->proc_locked
) {
1117 // Verify that the calling process is allowed to do actions
1119 proc_getexecutableuuid(current_proc(), proc_uuid
, sizeof(proc_uuid
));
1120 if (uuid_compare(proc_uuid
, session
->proc_uuid
) != 0) {
1125 // If not locked, update the proc_uuid and proc_pid of the session
1126 proc_getexecutableuuid(current_proc(), session
->proc_uuid
, sizeof(session
->proc_uuid
));
1127 session
->proc_pid
= proc_pid(current_proc());
1130 u_int32_t action
= uap
->action
;
1132 case NECP_SESSION_ACTION_POLICY_ADD
: {
1133 return_value
= necp_session_add_policy(session
, uap
, retval
);
1136 case NECP_SESSION_ACTION_POLICY_GET
: {
1137 return_value
= necp_session_get_policy(session
, uap
, retval
);
1140 case NECP_SESSION_ACTION_POLICY_DELETE
: {
1141 return_value
= necp_session_delete_policy(session
, uap
, retval
);
1144 case NECP_SESSION_ACTION_POLICY_APPLY_ALL
: {
1145 return_value
= necp_session_apply_all(session
, uap
, retval
);
1148 case NECP_SESSION_ACTION_POLICY_LIST_ALL
: {
1149 return_value
= necp_session_list_all(session
, uap
, retval
);
1152 case NECP_SESSION_ACTION_POLICY_DELETE_ALL
: {
1153 return_value
= necp_session_delete_all(session
, uap
, retval
);
1156 case NECP_SESSION_ACTION_SET_SESSION_PRIORITY
: {
1157 return_value
= necp_session_set_session_priority(session
, uap
, retval
);
1160 case NECP_SESSION_ACTION_LOCK_SESSION_TO_PROC
: {
1161 return_value
= necp_session_lock_to_process(session
, uap
, retval
);
1164 case NECP_SESSION_ACTION_REGISTER_SERVICE
: {
1165 return_value
= necp_session_register_service(session
, uap
, retval
);
1168 case NECP_SESSION_ACTION_UNREGISTER_SERVICE
: {
1169 return_value
= necp_session_unregister_service(session
, uap
, retval
);
1172 case NECP_SESSION_ACTION_POLICY_DUMP_ALL
: {
1173 return_value
= necp_session_dump_all(session
, uap
, retval
);
1177 NECPLOG(LOG_ERR
, "necp_session_action unknown action (%u)", action
);
1178 return_value
= EINVAL
;
1184 NECP_SESSION_UNLOCK(session
);
1185 file_drop(uap
->necp_fd
);
1187 return return_value
;
1190 // Kernel Control functions
1191 static errno_t
necp_register_control(void);
1192 static errno_t
necp_ctl_connect(kern_ctl_ref kctlref
, struct sockaddr_ctl
*sac
, void **unitinfo
);
1193 static errno_t
necp_ctl_disconnect(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
);
1194 static errno_t
necp_ctl_send(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, mbuf_t m
, int flags
);
1195 static void necp_ctl_rcvd(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, int flags
);
1196 static errno_t
necp_ctl_getopt(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, int opt
, void *data
, size_t *len
);
1197 static errno_t
necp_ctl_setopt(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, int opt
, void *data
, size_t len
);
1199 static bool necp_send_ctl_data(struct necp_session
*session
, u_int8_t
*buffer
, size_t buffer_size
);
1201 struct necp_resolver_key_state
{
1202 const struct ccdigest_info
*digest_info
;
1203 uint8_t key
[CCSHA256_OUTPUT_SIZE
];
1205 static struct necp_resolver_key_state s_necp_resolver_key_state
;
1208 necp_generate_resolver_key(void)
1210 s_necp_resolver_key_state
.digest_info
= ccsha256_di();
1211 cc_rand_generate(s_necp_resolver_key_state
.key
, sizeof(s_necp_resolver_key_state
.key
));
1215 necp_sign_update_context(const struct ccdigest_info
*di
,
1219 u_int32_t query_length
,
1221 u_int32_t answer_length
)
1223 const uint8_t context
[32] = {[0 ... 31] = 0x20}; // 0x20 repeated 32 times
1224 const char *context_string
= "NECP Resolver Binder";
1225 uint8_t separator
= 0;
1226 cchmac_update(di
, ctx
, sizeof(context
), context
);
1227 cchmac_update(di
, ctx
, strlen(context_string
), context_string
);
1228 cchmac_update(di
, ctx
, sizeof(separator
), &separator
);
1229 cchmac_update(di
, ctx
, sizeof(uuid_t
), client_id
);
1230 cchmac_update(di
, ctx
, sizeof(query_length
), &query_length
);
1231 cchmac_update(di
, ctx
, query_length
, query
);
1232 cchmac_update(di
, ctx
, sizeof(answer_length
), &answer_length
);
1233 cchmac_update(di
, ctx
, answer_length
, answer
);
1237 necp_sign_resolver_answer(uuid_t client_id
, u_int8_t
*query
, u_int32_t query_length
,
1238 u_int8_t
*answer
, u_int32_t answer_length
,
1239 u_int8_t
*tag
, u_int32_t
*out_tag_length
)
1241 if (s_necp_resolver_key_state
.digest_info
== NULL
) {
1245 if (query
== NULL
||
1246 query_length
== 0 ||
1248 answer_length
== 0 ||
1250 out_tag_length
== NULL
) {
1254 size_t required_tag_length
= s_necp_resolver_key_state
.digest_info
->output_size
;
1255 if (*out_tag_length
< required_tag_length
) {
1259 *out_tag_length
= required_tag_length
;
1261 cchmac_ctx_decl(s_necp_resolver_key_state
.digest_info
->state_size
,
1262 s_necp_resolver_key_state
.digest_info
->block_size
, ctx
);
1263 cchmac_init(s_necp_resolver_key_state
.digest_info
, ctx
,
1264 sizeof(s_necp_resolver_key_state
.key
),
1265 s_necp_resolver_key_state
.key
);
1266 necp_sign_update_context(s_necp_resolver_key_state
.digest_info
,
1267 ctx
, client_id
, query
, query_length
,
1268 answer
, answer_length
);
1269 cchmac_final(s_necp_resolver_key_state
.digest_info
, ctx
, tag
);
1275 necp_validate_resolver_answer(uuid_t client_id
, u_int8_t
*query
, u_int32_t query_length
,
1276 u_int8_t
*answer
, u_int32_t answer_length
,
1277 u_int8_t
*tag
, u_int32_t tag_length
)
1279 if (s_necp_resolver_key_state
.digest_info
== NULL
) {
1283 if (query
== NULL
||
1284 query_length
== 0 ||
1286 answer_length
== 0 ||
1292 size_t required_tag_length
= s_necp_resolver_key_state
.digest_info
->output_size
;
1293 if (tag_length
!= required_tag_length
) {
1297 uint8_t actual_tag
[required_tag_length
];
1299 cchmac_ctx_decl(s_necp_resolver_key_state
.digest_info
->state_size
,
1300 s_necp_resolver_key_state
.digest_info
->block_size
, ctx
);
1301 cchmac_init(s_necp_resolver_key_state
.digest_info
, ctx
,
1302 sizeof(s_necp_resolver_key_state
.key
),
1303 s_necp_resolver_key_state
.key
);
1304 necp_sign_update_context(s_necp_resolver_key_state
.digest_info
,
1305 ctx
, client_id
, query
, query_length
,
1306 answer
, answer_length
);
1307 cchmac_final(s_necp_resolver_key_state
.digest_info
, ctx
, actual_tag
);
1309 return cc_cmp_safe(s_necp_resolver_key_state
.digest_info
->output_size
, tag
, actual_tag
) == 0;
1317 result
= necp_register_control();
1322 necp_kernel_policy_grp_attr
= lck_grp_attr_alloc_init();
1323 if (necp_kernel_policy_grp_attr
== NULL
) {
1324 NECPLOG0(LOG_ERR
, "lck_grp_attr_alloc_init failed");
1329 necp_kernel_policy_mtx_grp
= lck_grp_alloc_init(NECP_CONTROL_NAME
, necp_kernel_policy_grp_attr
);
1330 if (necp_kernel_policy_mtx_grp
== NULL
) {
1331 NECPLOG0(LOG_ERR
, "lck_grp_alloc_init failed");
1336 necp_kernel_policy_mtx_attr
= lck_attr_alloc_init();
1337 if (necp_kernel_policy_mtx_attr
== NULL
) {
1338 NECPLOG0(LOG_ERR
, "lck_attr_alloc_init failed");
1343 lck_rw_init(&necp_kernel_policy_lock
, necp_kernel_policy_mtx_grp
, necp_kernel_policy_mtx_attr
);
1345 necp_route_rule_grp_attr
= lck_grp_attr_alloc_init();
1346 if (necp_route_rule_grp_attr
== NULL
) {
1347 NECPLOG0(LOG_ERR
, "lck_grp_attr_alloc_init failed");
1352 necp_route_rule_mtx_grp
= lck_grp_alloc_init("necp_route_rule", necp_route_rule_grp_attr
);
1353 if (necp_route_rule_mtx_grp
== NULL
) {
1354 NECPLOG0(LOG_ERR
, "lck_grp_alloc_init failed");
1359 necp_route_rule_mtx_attr
= lck_attr_alloc_init();
1360 if (necp_route_rule_mtx_attr
== NULL
) {
1361 NECPLOG0(LOG_ERR
, "lck_attr_alloc_init failed");
1366 lck_rw_init(&necp_route_rule_lock
, necp_route_rule_mtx_grp
, necp_route_rule_mtx_attr
);
1370 TAILQ_INIT(&necp_session_list
);
1372 LIST_INIT(&necp_kernel_socket_policies
);
1373 LIST_INIT(&necp_kernel_ip_output_policies
);
1375 LIST_INIT(&necp_account_id_list
);
1377 LIST_INIT(&necp_uuid_service_id_list
);
1379 LIST_INIT(&necp_registered_service_list
);
1381 LIST_INIT(&necp_route_rules
);
1382 LIST_INIT(&necp_aggregate_route_rules
);
1384 necp_generate_resolver_key();
1386 necp_uuid_app_id_hashtbl
= hashinit(NECP_UUID_APP_ID_HASH_SIZE
, M_NECP
, &necp_uuid_app_id_hash_mask
);
1387 necp_uuid_app_id_hash_num_buckets
= necp_uuid_app_id_hash_mask
+ 1;
1388 necp_num_uuid_app_id_mappings
= 0;
1389 necp_uuid_app_id_mappings_dirty
= FALSE
;
1391 necp_kernel_application_policies_condition_mask
= 0;
1392 necp_kernel_socket_policies_condition_mask
= 0;
1393 necp_kernel_ip_output_policies_condition_mask
= 0;
1395 necp_kernel_application_policies_count
= 0;
1396 necp_kernel_socket_policies_count
= 0;
1397 necp_kernel_socket_policies_non_app_count
= 0;
1398 necp_kernel_ip_output_policies_count
= 0;
1399 necp_kernel_ip_output_policies_non_id_count
= 0;
1401 necp_kernel_socket_policies_gencount
= 1;
1403 memset(&necp_kernel_socket_policies_map
, 0, sizeof(necp_kernel_socket_policies_map
));
1404 memset(&necp_kernel_ip_output_policies_map
, 0, sizeof(necp_kernel_ip_output_policies_map
));
1405 necp_kernel_socket_policies_app_layer_map
= NULL
;
1407 necp_drop_unentitled_order
= necp_get_first_order_for_priority(necp_drop_unentitled_level
);
1411 if (necp_kernel_policy_mtx_attr
!= NULL
) {
1412 lck_attr_free(necp_kernel_policy_mtx_attr
);
1413 necp_kernel_policy_mtx_attr
= NULL
;
1415 if (necp_kernel_policy_mtx_grp
!= NULL
) {
1416 lck_grp_free(necp_kernel_policy_mtx_grp
);
1417 necp_kernel_policy_mtx_grp
= NULL
;
1419 if (necp_kernel_policy_grp_attr
!= NULL
) {
1420 lck_grp_attr_free(necp_kernel_policy_grp_attr
);
1421 necp_kernel_policy_grp_attr
= NULL
;
1423 if (necp_route_rule_mtx_attr
!= NULL
) {
1424 lck_attr_free(necp_route_rule_mtx_attr
);
1425 necp_route_rule_mtx_attr
= NULL
;
1427 if (necp_route_rule_mtx_grp
!= NULL
) {
1428 lck_grp_free(necp_route_rule_mtx_grp
);
1429 necp_route_rule_mtx_grp
= NULL
;
1431 if (necp_route_rule_grp_attr
!= NULL
) {
1432 lck_grp_attr_free(necp_route_rule_grp_attr
);
1433 necp_route_rule_grp_attr
= NULL
;
1435 if (necp_kctlref
!= NULL
) {
1436 ctl_deregister(necp_kctlref
);
1437 necp_kctlref
= NULL
;
1444 necp_register_control(void)
1446 struct kern_ctl_reg kern_ctl
;
1449 // Create a tag to allocate memory
1450 necp_malloc_tag
= OSMalloc_Tagalloc(NECP_CONTROL_NAME
, OSMT_DEFAULT
);
1452 // Find a unique value for our interface family
1453 result
= mbuf_tag_id_find(NECP_CONTROL_NAME
, &necp_family
);
1455 NECPLOG(LOG_ERR
, "mbuf_tag_id_find_internal failed: %d", result
);
1459 bzero(&kern_ctl
, sizeof(kern_ctl
));
1460 strlcpy(kern_ctl
.ctl_name
, NECP_CONTROL_NAME
, sizeof(kern_ctl
.ctl_name
));
1461 kern_ctl
.ctl_name
[sizeof(kern_ctl
.ctl_name
) - 1] = 0;
1462 kern_ctl
.ctl_flags
= CTL_FLAG_PRIVILEGED
; // Require root
1463 kern_ctl
.ctl_sendsize
= 64 * 1024;
1464 kern_ctl
.ctl_recvsize
= 64 * 1024;
1465 kern_ctl
.ctl_connect
= necp_ctl_connect
;
1466 kern_ctl
.ctl_disconnect
= necp_ctl_disconnect
;
1467 kern_ctl
.ctl_send
= necp_ctl_send
;
1468 kern_ctl
.ctl_rcvd
= necp_ctl_rcvd
;
1469 kern_ctl
.ctl_setopt
= necp_ctl_setopt
;
1470 kern_ctl
.ctl_getopt
= necp_ctl_getopt
;
1472 result
= ctl_register(&kern_ctl
, &necp_kctlref
);
1474 NECPLOG(LOG_ERR
, "ctl_register failed: %d", result
);
1482 necp_post_change_event(struct kev_necp_policies_changed_data
*necp_event_data
)
1484 struct kev_msg ev_msg
;
1485 memset(&ev_msg
, 0, sizeof(ev_msg
));
1487 ev_msg
.vendor_code
= KEV_VENDOR_APPLE
;
1488 ev_msg
.kev_class
= KEV_NETWORK_CLASS
;
1489 ev_msg
.kev_subclass
= KEV_NECP_SUBCLASS
;
1490 ev_msg
.event_code
= KEV_NECP_POLICIES_CHANGED
;
1492 ev_msg
.dv
[0].data_ptr
= necp_event_data
;
1493 ev_msg
.dv
[0].data_length
= sizeof(necp_event_data
->changed_count
);
1494 ev_msg
.dv
[1].data_length
= 0;
1496 kev_post_msg(&ev_msg
);
1500 necp_ctl_connect(kern_ctl_ref kctlref
, struct sockaddr_ctl
*sac
, void **unitinfo
)
1502 #pragma unused(kctlref, sac)
1503 *unitinfo
= necp_create_session();
1504 if (*unitinfo
== NULL
) {
1505 // Could not allocate session
1513 necp_ctl_disconnect(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
)
1515 #pragma unused(kctlref, unit)
1516 struct necp_session
*session
= (struct necp_session
*)unitinfo
;
1517 if (session
!= NULL
) {
1518 necp_policy_mark_all_for_deletion(session
);
1519 necp_policy_apply_all(session
);
1520 necp_delete_session((struct necp_session
*)unitinfo
);
1529 necp_packet_find_tlv(mbuf_t packet
, int offset
, u_int8_t type
, int *err
, int next
)
1531 size_t cursor
= offset
;
1533 u_int32_t curr_length
;
1540 error
= mbuf_copydata(packet
, cursor
, sizeof(curr_type
), &curr_type
);
1547 curr_type
= NECP_TLV_NIL
;
1550 if (curr_type
!= type
) {
1551 cursor
+= sizeof(curr_type
);
1552 error
= mbuf_copydata(packet
, cursor
, sizeof(curr_length
), &curr_length
);
1557 cursor
+= (sizeof(curr_length
) + curr_length
);
1559 } while (curr_type
!= type
);
1565 necp_packet_get_tlv_at_offset(mbuf_t packet
, int tlv_offset
, u_int32_t buff_len
, void *buff
, u_int32_t
*value_size
)
1570 if (tlv_offset
< 0) {
1574 error
= mbuf_copydata(packet
, tlv_offset
+ sizeof(u_int8_t
), sizeof(length
), &length
);
1579 u_int32_t total_len
= m_length2(packet
, NULL
);
1580 if (total_len
< (tlv_offset
+ sizeof(u_int8_t
) + sizeof(length
) + length
)) {
1581 NECPLOG(LOG_ERR
, "Got a bad TLV, length (%u) + offset (%d) < total length (%u)",
1582 length
, (tlv_offset
+ sizeof(u_int8_t
) + sizeof(length
)), total_len
);
1586 if (value_size
!= NULL
) {
1587 *value_size
= length
;
1590 if (buff
!= NULL
&& buff_len
> 0) {
1591 u_int32_t to_copy
= (length
< buff_len
) ? length
: buff_len
;
1592 error
= mbuf_copydata(packet
, tlv_offset
+ sizeof(u_int8_t
) + sizeof(length
), to_copy
, buff
);
1602 necp_buffer_write_packet_header(u_int8_t
*buffer
, u_int8_t packet_type
, u_int8_t flags
, u_int32_t message_id
)
1604 ((struct necp_packet_header
*)(void *)buffer
)->packet_type
= packet_type
;
1605 ((struct necp_packet_header
*)(void *)buffer
)->flags
= flags
;
1606 ((struct necp_packet_header
*)(void *)buffer
)->message_id
= message_id
;
1607 return buffer
+ sizeof(struct necp_packet_header
);
1611 necp_buffer_write_tlv_validate(u_int8_t
*cursor
, u_int8_t type
, u_int32_t length
,
1612 u_int8_t
*buffer
, u_int32_t buffer_length
)
1614 if (cursor
< buffer
|| (uintptr_t)(cursor
- buffer
) > buffer_length
) {
1615 NECPLOG0(LOG_ERR
, "Cannot write TLV in buffer (invalid cursor)");
1618 u_int8_t
*next_tlv
= (u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
) + length
);
1619 if (next_tlv
<= buffer
|| // make sure the next TLV start doesn't overflow
1620 (uintptr_t)(next_tlv
- buffer
) > buffer_length
) { // make sure the next TLV has enough room in buffer
1621 NECPLOG(LOG_ERR
, "Cannot write TLV in buffer (TLV length %u, buffer length %u)",
1622 length
, buffer_length
);
1629 necp_buffer_write_tlv_if_different(u_int8_t
*cursor
, u_int8_t type
,
1630 u_int32_t length
, const void *value
, bool *updated
,
1631 u_int8_t
*buffer
, u_int32_t buffer_length
)
1633 if (!necp_buffer_write_tlv_validate(cursor
, type
, length
, buffer
, buffer_length
)) {
1634 // If we can't fit this TLV, return the current cursor
1637 u_int8_t
*next_tlv
= (u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
) + length
);
1638 if (*updated
|| *(u_int8_t
*)(cursor
) != type
) {
1639 *(u_int8_t
*)(cursor
) = type
;
1642 if (*updated
|| *(u_int32_t
*)(void *)(cursor
+ sizeof(type
)) != length
) {
1643 *(u_int32_t
*)(void *)(cursor
+ sizeof(type
)) = length
;
1647 if (*updated
|| memcmp((u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
)), value
, length
) != 0) {
1648 memcpy((u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
)), value
, length
);
1656 necp_buffer_write_tlv(u_int8_t
*cursor
, u_int8_t type
,
1657 u_int32_t length
, const void *value
,
1658 u_int8_t
*buffer
, u_int32_t buffer_length
)
1660 if (!necp_buffer_write_tlv_validate(cursor
, type
, length
, buffer
, buffer_length
)) {
1663 u_int8_t
*next_tlv
= (u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
) + length
);
1664 *(u_int8_t
*)(cursor
) = type
;
1665 *(u_int32_t
*)(void *)(cursor
+ sizeof(type
)) = length
;
1667 memcpy((u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
)), value
, length
);
1674 necp_buffer_get_tlv_type(u_int8_t
*buffer
, int tlv_offset
)
1676 u_int8_t
*type
= NULL
;
1678 if (buffer
== NULL
) {
1682 type
= (u_int8_t
*)((u_int8_t
*)buffer
+ tlv_offset
);
1683 return type
? *type
: 0;
1687 necp_buffer_get_tlv_length(u_int8_t
*buffer
, int tlv_offset
)
1689 u_int32_t
*length
= NULL
;
1691 if (buffer
== NULL
) {
1695 length
= (u_int32_t
*)(void *)((u_int8_t
*)buffer
+ tlv_offset
+ sizeof(u_int8_t
));
1696 return length
? *length
: 0;
1700 necp_buffer_get_tlv_value(u_int8_t
*buffer
, int tlv_offset
, u_int32_t
*value_size
)
1702 u_int8_t
*value
= NULL
;
1703 u_int32_t length
= necp_buffer_get_tlv_length(buffer
, tlv_offset
);
1709 *value_size
= length
;
1712 value
= (u_int8_t
*)((u_int8_t
*)buffer
+ tlv_offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
));
1717 necp_buffer_find_tlv(u_int8_t
*buffer
, u_int32_t buffer_length
, int offset
, u_int8_t type
, int *err
, int next
)
1728 int cursor
= offset
;
1730 u_int32_t curr_length
;
1734 if ((((u_int32_t
)cursor
) + sizeof(curr_type
) + sizeof(curr_length
)) > buffer_length
) {
1738 curr_type
= necp_buffer_get_tlv_type(buffer
, cursor
);
1741 curr_type
= NECP_TLV_NIL
;
1743 curr_length
= necp_buffer_get_tlv_length(buffer
, cursor
);
1744 if (curr_length
> buffer_length
- ((u_int32_t
)cursor
+ sizeof(curr_type
) + sizeof(curr_length
))) {
1748 next_cursor
= (cursor
+ sizeof(curr_type
) + sizeof(curr_length
) + curr_length
);
1749 if (curr_type
== type
) {
1750 // check if entire TLV fits inside buffer
1751 if (((u_int32_t
)next_cursor
) <= buffer_length
) {
1760 cursor
= next_cursor
;
1765 necp_find_tlv(mbuf_t packet
, u_int8_t
*buffer
, u_int32_t buffer_length
, int offset
, u_int8_t type
, int *err
, int next
)
1768 if (packet
!= NULL
) {
1769 cursor
= necp_packet_find_tlv(packet
, offset
, type
, err
, next
);
1770 } else if (buffer
!= NULL
) {
1771 cursor
= necp_buffer_find_tlv(buffer
, buffer_length
, offset
, type
, err
, next
);
1777 necp_get_tlv_at_offset(mbuf_t packet
, u_int8_t
*buffer
, u_int32_t buffer_length
,
1778 int tlv_offset
, u_int32_t out_buffer_length
, void *out_buffer
, u_int32_t
*value_size
)
1780 if (packet
!= NULL
) {
1781 // Handle mbuf parsing
1782 return necp_packet_get_tlv_at_offset(packet
, tlv_offset
, out_buffer_length
, out_buffer
, value_size
);
1785 if (buffer
== NULL
) {
1786 NECPLOG0(LOG_ERR
, "necp_get_tlv_at_offset buffer is NULL");
1790 // Handle buffer parsing
1792 // Validate that buffer has enough room for any TLV
1793 if (tlv_offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
) > buffer_length
) {
1794 NECPLOG(LOG_ERR
, "necp_get_tlv_at_offset buffer_length is too small for TLV (%u < %u)",
1795 buffer_length
, tlv_offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
));
1799 // Validate that buffer has enough room for this TLV
1800 u_int32_t tlv_length
= necp_buffer_get_tlv_length(buffer
, tlv_offset
);
1801 if (tlv_length
> buffer_length
- (tlv_offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
))) {
1802 NECPLOG(LOG_ERR
, "necp_get_tlv_at_offset buffer_length is too small for TLV of length %u (%u < %u)",
1803 tlv_length
, buffer_length
, tlv_offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
) + tlv_length
);
1807 if (out_buffer
!= NULL
&& out_buffer_length
> 0) {
1808 // Validate that out buffer is large enough for value
1809 if (out_buffer_length
< tlv_length
) {
1810 NECPLOG(LOG_ERR
, "necp_get_tlv_at_offset out_buffer_length is too small for TLV value (%u < %u)",
1811 out_buffer_length
, tlv_length
);
1815 // Get value pointer
1816 u_int8_t
*tlv_value
= necp_buffer_get_tlv_value(buffer
, tlv_offset
, NULL
);
1817 if (tlv_value
== NULL
) {
1818 NECPLOG0(LOG_ERR
, "necp_get_tlv_at_offset tlv_value is NULL");
1823 memcpy(out_buffer
, tlv_value
, tlv_length
);
1827 if (value_size
!= NULL
) {
1828 *value_size
= tlv_length
;
1835 necp_get_tlv(mbuf_t packet
, u_int8_t
*buffer
, u_int32_t buffer_length
,
1836 int offset
, u_int8_t type
, u_int32_t buff_len
, void *buff
, u_int32_t
*value_size
)
1840 int tlv_offset
= necp_find_tlv(packet
, buffer
, buffer_length
, offset
, type
, &error
, 0);
1841 if (tlv_offset
< 0) {
1845 return necp_get_tlv_at_offset(packet
, buffer
, buffer_length
, tlv_offset
, buff_len
, buff
, value_size
);
1849 necp_send_ctl_data(struct necp_session
*session
, u_int8_t
*buffer
, size_t buffer_size
)
1853 if (necp_kctlref
== NULL
|| session
== NULL
|| buffer
== NULL
|| buffer_size
== 0) {
1857 error
= ctl_enqueuedata(necp_kctlref
, session
->control_unit
, buffer
, buffer_size
, CTL_DATA_EOR
);
1863 necp_send_success_response(struct necp_session
*session
, u_int8_t packet_type
, u_int32_t message_id
)
1865 bool success
= TRUE
;
1866 u_int8_t
*response
= NULL
;
1867 u_int8_t
*cursor
= NULL
;
1868 size_t response_size
= sizeof(struct necp_packet_header
) + sizeof(u_int8_t
) + sizeof(u_int32_t
);
1869 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
);
1870 if (response
== NULL
) {
1874 cursor
= necp_buffer_write_packet_header(cursor
, packet_type
, NECP_PACKET_FLAGS_RESPONSE
, message_id
);
1875 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_NIL
, 0, NULL
, response
, response_size
);
1877 if (!(success
= necp_send_ctl_data(session
, (u_int8_t
*)response
, response_size
))) {
1878 NECPLOG0(LOG_ERR
, "Failed to send response");
1881 FREE(response
, M_NECP
);
1886 necp_send_error_response(struct necp_session
*session
, u_int8_t packet_type
, u_int32_t message_id
, u_int32_t error
)
1888 bool success
= TRUE
;
1889 u_int8_t
*response
= NULL
;
1890 u_int8_t
*cursor
= NULL
;
1891 size_t response_size
= sizeof(struct necp_packet_header
) + sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(u_int32_t
);
1892 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
);
1893 if (response
== NULL
) {
1897 cursor
= necp_buffer_write_packet_header(cursor
, packet_type
, NECP_PACKET_FLAGS_RESPONSE
, message_id
);
1898 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_ERROR
, sizeof(error
), &error
, response
, response_size
);
1900 if (!(success
= necp_send_ctl_data(session
, (u_int8_t
*)response
, response_size
))) {
1901 NECPLOG0(LOG_ERR
, "Failed to send response");
1904 FREE(response
, M_NECP
);
1909 necp_send_policy_id_response(struct necp_session
*session
, u_int8_t packet_type
, u_int32_t message_id
, necp_policy_id policy_id
)
1911 bool success
= TRUE
;
1912 u_int8_t
*response
= NULL
;
1913 u_int8_t
*cursor
= NULL
;
1914 size_t response_size
= sizeof(struct necp_packet_header
) + sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(u_int32_t
);
1915 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
);
1916 if (response
== NULL
) {
1920 cursor
= necp_buffer_write_packet_header(cursor
, packet_type
, NECP_PACKET_FLAGS_RESPONSE
, message_id
);
1921 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ID
, sizeof(policy_id
), &policy_id
, response
, response_size
);
1923 if (!(success
= necp_send_ctl_data(session
, (u_int8_t
*)response
, response_size
))) {
1924 NECPLOG0(LOG_ERR
, "Failed to send response");
1927 FREE(response
, M_NECP
);
1932 necp_ctl_send(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, mbuf_t packet
, int flags
)
1934 #pragma unused(kctlref, unit, flags)
1935 struct necp_session
*session
= (struct necp_session
*)unitinfo
;
1936 struct necp_packet_header header
;
1939 if (session
== NULL
) {
1940 NECPLOG0(LOG_ERR
, "Got a NULL session");
1945 if (mbuf_pkthdr_len(packet
) < sizeof(header
)) {
1946 NECPLOG(LOG_ERR
, "Got a bad packet, length (%lu) < sizeof header (%lu)", mbuf_pkthdr_len(packet
), sizeof(header
));
1951 error
= mbuf_copydata(packet
, 0, sizeof(header
), &header
);
1953 NECPLOG(LOG_ERR
, "mbuf_copydata failed for the header: %d", error
);
1958 if (session
->proc_locked
) {
1959 // Verify that the calling process is allowed to send messages
1961 proc_getexecutableuuid(current_proc(), proc_uuid
, sizeof(proc_uuid
));
1962 if (uuid_compare(proc_uuid
, session
->proc_uuid
) != 0) {
1963 necp_send_error_response(session
, header
.packet_type
, header
.message_id
, NECP_ERROR_INVALID_PROCESS
);
1967 // If not locked, update the proc_uuid and proc_pid of the session
1968 proc_getexecutableuuid(current_proc(), session
->proc_uuid
, sizeof(session
->proc_uuid
));
1969 session
->proc_pid
= proc_pid(current_proc());
1972 switch (header
.packet_type
) {
1973 case NECP_PACKET_TYPE_POLICY_ADD
: {
1974 necp_handle_policy_add(session
, header
.message_id
, packet
, NULL
, 0, sizeof(header
), NULL
);
1977 case NECP_PACKET_TYPE_POLICY_GET
: {
1978 necp_handle_policy_get(session
, header
.message_id
, packet
, sizeof(header
));
1981 case NECP_PACKET_TYPE_POLICY_DELETE
: {
1982 necp_handle_policy_delete(session
, header
.message_id
, packet
, sizeof(header
));
1985 case NECP_PACKET_TYPE_POLICY_APPLY_ALL
: {
1986 necp_handle_policy_apply_all(session
, header
.message_id
, packet
, sizeof(header
));
1989 case NECP_PACKET_TYPE_POLICY_LIST_ALL
: {
1990 necp_handle_policy_list_all(session
, header
.message_id
, packet
, sizeof(header
));
1993 case NECP_PACKET_TYPE_POLICY_DELETE_ALL
: {
1994 necp_handle_policy_delete_all(session
, header
.message_id
, packet
, sizeof(header
));
1997 case NECP_PACKET_TYPE_POLICY_DUMP_ALL
: {
1998 necp_handle_policy_dump_all(session
, header
.message_id
, packet
, 0, 0, sizeof(header
));
2001 case NECP_PACKET_TYPE_SET_SESSION_PRIORITY
: {
2002 necp_handle_set_session_priority(session
, header
.message_id
, packet
, sizeof(header
));
2005 case NECP_PACKET_TYPE_LOCK_SESSION_TO_PROC
: {
2006 necp_handle_lock_session_to_proc(session
, header
.message_id
, packet
, sizeof(header
));
2009 case NECP_PACKET_TYPE_REGISTER_SERVICE
: {
2010 necp_handle_register_service(session
, header
.message_id
, packet
, sizeof(header
));
2013 case NECP_PACKET_TYPE_UNREGISTER_SERVICE
: {
2014 necp_handle_unregister_service(session
, header
.message_id
, packet
, sizeof(header
));
2018 NECPLOG(LOG_ERR
, "Received unknown message type %d", header
.packet_type
);
2019 necp_send_error_response(session
, header
.packet_type
, header
.message_id
, NECP_ERROR_UNKNOWN_PACKET_TYPE
);
2030 necp_ctl_rcvd(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, int flags
)
2032 #pragma unused(kctlref, unit, unitinfo, flags)
2037 necp_ctl_getopt(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, int opt
, void *data
, size_t *len
)
2039 #pragma unused(kctlref, unit, unitinfo, opt, data, len)
2044 necp_ctl_setopt(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, int opt
, void *data
, size_t len
)
2046 #pragma unused(kctlref, unit, unitinfo, opt, data, len)
2050 // Session Management
2052 static struct necp_session
*
2053 necp_create_session(void)
2055 struct necp_session
*new_session
= NULL
;
2057 MALLOC(new_session
, struct necp_session
*, sizeof(*new_session
), M_NECP
, M_WAITOK
| M_ZERO
);
2058 if (new_session
== NULL
) {
2062 new_session
->necp_fd_type
= necp_fd_type_session
;
2063 new_session
->session_priority
= NECP_SESSION_PRIORITY_UNKNOWN
;
2064 new_session
->dirty
= FALSE
;
2065 LIST_INIT(&new_session
->policies
);
2066 lck_mtx_init(&new_session
->lock
, necp_kernel_policy_mtx_grp
, necp_kernel_policy_mtx_attr
);
2069 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
2071 // Find the next available control unit
2072 u_int32_t control_unit
= 1;
2073 struct necp_session
*next_session
= NULL
;
2074 TAILQ_FOREACH(next_session
, &necp_session_list
, chain
) {
2075 if (next_session
->control_unit
> control_unit
) {
2076 // Found a gap, grab this control unit
2080 // Try the next control unit, loop around
2081 control_unit
= next_session
->control_unit
+ 1;
2084 new_session
->control_unit
= control_unit
;
2085 new_session
->session_order
= necp_allocate_new_session_order(new_session
->session_priority
, control_unit
);
2087 if (next_session
!= NULL
) {
2088 TAILQ_INSERT_BEFORE(next_session
, new_session
, chain
);
2090 TAILQ_INSERT_TAIL(&necp_session_list
, new_session
, chain
);
2093 necp_session_count
++;
2094 lck_rw_done(&necp_kernel_policy_lock
);
2097 NECPLOG(LOG_DEBUG
, "Created NECP session, control unit %d", control_unit
);
2105 necp_delete_session(struct necp_session
*session
)
2107 if (session
!= NULL
) {
2108 struct necp_service_registration
*service
= NULL
;
2109 struct necp_service_registration
*temp_service
= NULL
;
2110 LIST_FOREACH_SAFE(service
, &session
->services
, session_chain
, temp_service
) {
2111 LIST_REMOVE(service
, session_chain
);
2112 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
2113 LIST_REMOVE(service
, kernel_chain
);
2114 lck_rw_done(&necp_kernel_policy_lock
);
2115 FREE(service
, M_NECP
);
2118 NECPLOG0(LOG_DEBUG
, "Deleted NECP session");
2121 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
2122 TAILQ_REMOVE(&necp_session_list
, session
, chain
);
2123 necp_session_count
--;
2124 lck_rw_done(&necp_kernel_policy_lock
);
2126 lck_mtx_destroy(&session
->lock
, necp_kernel_policy_mtx_grp
);
2127 FREE(session
, M_NECP
);
2131 // Session Policy Management
2133 static inline u_int8_t
2134 necp_policy_result_get_type_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
2136 return (buffer
&& length
>= sizeof(u_int8_t
)) ? buffer
[0] : 0;
2139 static inline u_int32_t
2140 necp_policy_result_get_parameter_length_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
2142 return (buffer
&& length
> sizeof(u_int8_t
)) ? (length
- sizeof(u_int8_t
)) : 0;
2145 static inline u_int8_t
*
2146 necp_policy_result_get_parameter_pointer_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
2148 return (buffer
&& length
> sizeof(u_int8_t
)) ? (buffer
+ sizeof(u_int8_t
)) : NULL
;
2152 necp_policy_result_requires_route_rules(u_int8_t
*buffer
, u_int32_t length
)
2154 u_int8_t type
= necp_policy_result_get_type_from_buffer(buffer
, length
);
2155 if (type
== NECP_POLICY_RESULT_ROUTE_RULES
) {
2162 necp_address_is_valid(struct sockaddr
*address
)
2164 if (address
->sa_family
== AF_INET
) {
2165 return address
->sa_len
== sizeof(struct sockaddr_in
);
2166 } else if (address
->sa_family
== AF_INET6
) {
2167 return address
->sa_len
== sizeof(struct sockaddr_in6
);
2174 necp_policy_result_is_valid(u_int8_t
*buffer
, u_int32_t length
)
2176 bool validated
= FALSE
;
2177 u_int8_t type
= necp_policy_result_get_type_from_buffer(buffer
, length
);
2178 u_int32_t parameter_length
= necp_policy_result_get_parameter_length_from_buffer(buffer
, length
);
2180 case NECP_POLICY_RESULT_PASS
:
2181 case NECP_POLICY_RESULT_DROP
:
2182 case NECP_POLICY_RESULT_ROUTE_RULES
:
2183 case NECP_POLICY_RESULT_SCOPED_DIRECT
:
2184 case NECP_POLICY_RESULT_ALLOW_UNENTITLED
: {
2188 case NECP_POLICY_RESULT_SKIP
:
2189 case NECP_POLICY_RESULT_SOCKET_DIVERT
:
2190 case NECP_POLICY_RESULT_SOCKET_FILTER
: {
2191 if (parameter_length
>= sizeof(u_int32_t
)) {
2196 case NECP_POLICY_RESULT_IP_TUNNEL
: {
2197 if (parameter_length
> sizeof(u_int32_t
)) {
2202 case NECP_POLICY_RESULT_SOCKET_SCOPED
: {
2203 if (parameter_length
> 0) {
2208 case NECP_POLICY_RESULT_TRIGGER
:
2209 case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED
:
2210 case NECP_POLICY_RESULT_TRIGGER_SCOPED
:
2211 case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED
:
2212 case NECP_POLICY_RESULT_USE_NETAGENT
:
2213 case NECP_POLICY_RESULT_NETAGENT_SCOPED
:{
2214 if (parameter_length
>= sizeof(uuid_t
)) {
2226 NECPLOG(LOG_DEBUG
, "Policy result type %d, valid %d", type
, validated
);
2232 static inline u_int8_t
2233 necp_policy_condition_get_type_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
2235 return (buffer
&& length
>= sizeof(u_int8_t
)) ? buffer
[0] : 0;
2238 static inline u_int8_t
2239 necp_policy_condition_get_flags_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
2241 return (buffer
&& length
>= (2 * sizeof(u_int8_t
))) ? buffer
[1] : 0;
2244 static inline u_int32_t
2245 necp_policy_condition_get_value_length_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
2247 return (buffer
&& length
>= (2 * sizeof(u_int8_t
))) ? (length
- (2 * sizeof(u_int8_t
))) : 0;
2250 static inline u_int8_t
*
2251 necp_policy_condition_get_value_pointer_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
2253 return (buffer
&& length
> (2 * sizeof(u_int8_t
))) ? (buffer
+ (2 * sizeof(u_int8_t
))) : NULL
;
2257 necp_policy_condition_is_default(u_int8_t
*buffer
, u_int32_t length
)
2259 return necp_policy_condition_get_type_from_buffer(buffer
, length
) == NECP_POLICY_CONDITION_DEFAULT
;
2263 necp_policy_condition_is_application(u_int8_t
*buffer
, u_int32_t length
)
2265 return necp_policy_condition_get_type_from_buffer(buffer
, length
) == NECP_POLICY_CONDITION_APPLICATION
;
2269 necp_policy_condition_is_real_application(u_int8_t
*buffer
, u_int32_t length
)
2271 return necp_policy_condition_get_type_from_buffer(buffer
, length
) == NECP_POLICY_CONDITION_REAL_APPLICATION
;
2275 necp_policy_condition_requires_application(u_int8_t
*buffer
, u_int32_t length
)
2277 u_int8_t type
= necp_policy_condition_get_type_from_buffer(buffer
, length
);
2278 return type
== NECP_POLICY_CONDITION_REAL_APPLICATION
;
2282 necp_policy_condition_requires_real_application(u_int8_t
*buffer
, u_int32_t length
)
2284 u_int8_t type
= necp_policy_condition_get_type_from_buffer(buffer
, length
);
2285 return type
== NECP_POLICY_CONDITION_ENTITLEMENT
;
2289 necp_policy_condition_is_valid(u_int8_t
*buffer
, u_int32_t length
, u_int8_t policy_result_type
)
2291 bool validated
= FALSE
;
2292 bool result_cannot_have_ip_layer
= (policy_result_type
== NECP_POLICY_RESULT_SOCKET_DIVERT
||
2293 policy_result_type
== NECP_POLICY_RESULT_SOCKET_FILTER
||
2294 policy_result_type
== NECP_POLICY_RESULT_TRIGGER
||
2295 policy_result_type
== NECP_POLICY_RESULT_TRIGGER_IF_NEEDED
||
2296 policy_result_type
== NECP_POLICY_RESULT_TRIGGER_SCOPED
||
2297 policy_result_type
== NECP_POLICY_RESULT_NO_TRIGGER_SCOPED
||
2298 policy_result_type
== NECP_POLICY_RESULT_SOCKET_SCOPED
||
2299 policy_result_type
== NECP_POLICY_RESULT_ROUTE_RULES
||
2300 policy_result_type
== NECP_POLICY_RESULT_USE_NETAGENT
||
2301 policy_result_type
== NECP_POLICY_RESULT_NETAGENT_SCOPED
||
2302 policy_result_type
== NECP_POLICY_RESULT_SCOPED_DIRECT
||
2303 policy_result_type
== NECP_POLICY_RESULT_ALLOW_UNENTITLED
) ? TRUE
: FALSE
;
2304 u_int32_t condition_length
= necp_policy_condition_get_value_length_from_buffer(buffer
, length
);
2305 u_int8_t
*condition_value
= necp_policy_condition_get_value_pointer_from_buffer(buffer
, length
);
2306 u_int8_t type
= necp_policy_condition_get_type_from_buffer(buffer
, length
);
2307 u_int8_t flags
= necp_policy_condition_get_flags_from_buffer(buffer
, length
);
2309 case NECP_POLICY_CONDITION_APPLICATION
:
2310 case NECP_POLICY_CONDITION_REAL_APPLICATION
: {
2311 if (!(flags
& NECP_POLICY_CONDITION_FLAGS_NEGATIVE
) &&
2312 condition_length
>= sizeof(uuid_t
) &&
2313 condition_value
!= NULL
&&
2314 !uuid_is_null(condition_value
)) {
2319 case NECP_POLICY_CONDITION_DOMAIN
:
2320 case NECP_POLICY_CONDITION_ACCOUNT
:
2321 case NECP_POLICY_CONDITION_BOUND_INTERFACE
: {
2322 if (condition_length
> 0) {
2327 case NECP_POLICY_CONDITION_TRAFFIC_CLASS
: {
2328 if (condition_length
>= sizeof(struct necp_policy_condition_tc_range
)) {
2333 case NECP_POLICY_CONDITION_DEFAULT
:
2334 case NECP_POLICY_CONDITION_ALL_INTERFACES
:
2335 case NECP_POLICY_CONDITION_ENTITLEMENT
:
2336 case NECP_POLICY_CONDITION_PLATFORM_BINARY
:
2337 case NECP_POLICY_CONDITION_HAS_CLIENT
:
2338 case NECP_POLICY_CONDITION_LOCAL_NETWORKS
: {
2339 if (!(flags
& NECP_POLICY_CONDITION_FLAGS_NEGATIVE
)) {
2344 case NECP_POLICY_CONDITION_IP_PROTOCOL
: {
2345 if (condition_length
>= sizeof(u_int16_t
)) {
2350 case NECP_POLICY_CONDITION_PID
: {
2351 if (condition_length
>= sizeof(pid_t
) &&
2352 condition_value
!= NULL
&&
2353 *((pid_t
*)(void *)condition_value
) != 0) {
2358 case NECP_POLICY_CONDITION_UID
: {
2359 if (condition_length
>= sizeof(uid_t
)) {
2364 case NECP_POLICY_CONDITION_LOCAL_ADDR
:
2365 case NECP_POLICY_CONDITION_REMOTE_ADDR
: {
2366 if (!result_cannot_have_ip_layer
&& condition_length
>= sizeof(struct necp_policy_condition_addr
) &&
2367 necp_address_is_valid(&((struct necp_policy_condition_addr
*)(void *)condition_value
)->address
.sa
)) {
2372 case NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE
:
2373 case NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE
: {
2374 if (!result_cannot_have_ip_layer
&& condition_length
>= sizeof(struct necp_policy_condition_addr_range
) &&
2375 necp_address_is_valid(&((struct necp_policy_condition_addr_range
*)(void *)condition_value
)->start_address
.sa
) &&
2376 necp_address_is_valid(&((struct necp_policy_condition_addr_range
*)(void *)condition_value
)->end_address
.sa
)) {
2381 case NECP_POLICY_CONDITION_AGENT_TYPE
: {
2382 if (!(flags
& NECP_POLICY_CONDITION_FLAGS_NEGATIVE
) &&
2383 condition_length
>= sizeof(struct necp_policy_condition_agent_type
)) {
2388 case NECP_POLICY_CONDITION_FLOW_IP_PROTOCOL
: {
2389 if (condition_length
>= sizeof(u_int16_t
)) {
2394 case NECP_POLICY_CONDITION_FLOW_LOCAL_ADDR
:
2395 case NECP_POLICY_CONDITION_FLOW_REMOTE_ADDR
: {
2396 if (condition_length
>= sizeof(struct necp_policy_condition_addr
) &&
2397 necp_address_is_valid(&((struct necp_policy_condition_addr
*)(void *)condition_value
)->address
.sa
)) {
2402 case NECP_POLICY_CONDITION_FLOW_LOCAL_ADDR_RANGE
:
2403 case NECP_POLICY_CONDITION_FLOW_REMOTE_ADDR_RANGE
: {
2404 if (condition_length
>= sizeof(struct necp_policy_condition_addr_range
) &&
2405 necp_address_is_valid(&((struct necp_policy_condition_addr_range
*)(void *)condition_value
)->start_address
.sa
) &&
2406 necp_address_is_valid(&((struct necp_policy_condition_addr_range
*)(void *)condition_value
)->end_address
.sa
)) {
2411 case NECP_POLICY_CONDITION_CLIENT_FLAGS
: {
2412 if (condition_length
== 0 || condition_length
>= sizeof(u_int32_t
)) {
2417 case NECP_POLICY_CONDITION_FLOW_LOCAL_ADDR_EMPTY
: {
2421 case NECP_POLICY_CONDITION_FLOW_REMOTE_ADDR_EMPTY
: {
2432 NECPLOG(LOG_DEBUG
, "Policy condition type %d, valid %d", type
, validated
);
2439 necp_policy_route_rule_is_default(u_int8_t
*buffer
, u_int32_t length
)
2441 return necp_policy_condition_get_value_length_from_buffer(buffer
, length
) == 0 &&
2442 necp_policy_condition_get_flags_from_buffer(buffer
, length
) == 0;
2446 necp_policy_route_rule_is_valid(u_int8_t
*buffer
, u_int32_t length
)
2448 bool validated
= FALSE
;
2449 u_int8_t type
= necp_policy_condition_get_type_from_buffer(buffer
, length
);
2451 case NECP_ROUTE_RULE_ALLOW_INTERFACE
: {
2455 case NECP_ROUTE_RULE_DENY_INTERFACE
: {
2459 case NECP_ROUTE_RULE_QOS_MARKING
: {
2463 case NECP_ROUTE_RULE_DENY_LQM_ABORT
: {
2474 NECPLOG(LOG_DEBUG
, "Policy route rule type %d, valid %d", type
, validated
);
2481 necp_get_posix_error_for_necp_error(int response_error
)
2483 switch (response_error
) {
2484 case NECP_ERROR_UNKNOWN_PACKET_TYPE
:
2485 case NECP_ERROR_INVALID_TLV
:
2486 case NECP_ERROR_POLICY_RESULT_INVALID
:
2487 case NECP_ERROR_POLICY_CONDITIONS_INVALID
:
2488 case NECP_ERROR_ROUTE_RULES_INVALID
: {
2491 case NECP_ERROR_POLICY_ID_NOT_FOUND
: {
2494 case NECP_ERROR_INVALID_PROCESS
: {
2497 case NECP_ERROR_INTERNAL
:
2505 necp_handle_set_session_priority(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
2508 struct necp_session_policy
*policy
= NULL
;
2509 struct necp_session_policy
*temp_policy
= NULL
;
2510 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
2511 u_int32_t requested_session_priority
= NECP_SESSION_PRIORITY_UNKNOWN
;
2514 error
= necp_get_tlv(packet
, NULL
, 0, offset
, NECP_TLV_SESSION_PRIORITY
, sizeof(requested_session_priority
), &requested_session_priority
, NULL
);
2516 NECPLOG(LOG_ERR
, "Failed to get session priority: %d", error
);
2517 response_error
= NECP_ERROR_INVALID_TLV
;
2521 if (session
== NULL
) {
2522 NECPLOG0(LOG_ERR
, "Failed to find session");
2523 response_error
= NECP_ERROR_INTERNAL
;
2527 // Enforce special session priorities with entitlements
2528 if (requested_session_priority
== NECP_SESSION_PRIORITY_CONTROL
||
2529 requested_session_priority
== NECP_SESSION_PRIORITY_PRIVILEGED_TUNNEL
) {
2530 errno_t cred_result
= priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES
, 0);
2531 if (cred_result
!= 0) {
2532 NECPLOG(LOG_ERR
, "Session does not hold necessary entitlement to claim priority level %d", requested_session_priority
);
2537 if (session
->session_priority
!= requested_session_priority
) {
2538 session
->session_priority
= requested_session_priority
;
2539 session
->session_order
= necp_allocate_new_session_order(session
->session_priority
, session
->control_unit
);
2540 session
->dirty
= TRUE
;
2542 // Mark all policies as needing updates
2543 LIST_FOREACH_SAFE(policy
, &session
->policies
, chain
, temp_policy
) {
2544 policy
->pending_update
= TRUE
;
2548 necp_send_success_response(session
, NECP_PACKET_TYPE_SET_SESSION_PRIORITY
, message_id
);
2552 necp_send_error_response(session
, NECP_PACKET_TYPE_SET_SESSION_PRIORITY
, message_id
, response_error
);
2556 necp_handle_lock_session_to_proc(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
2558 #pragma unused(packet, offset)
2559 // proc_uuid already filled out
2560 session
->proc_locked
= TRUE
;
2561 necp_send_success_response(session
, NECP_PACKET_TYPE_LOCK_SESSION_TO_PROC
, message_id
);
2565 necp_handle_register_service(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
2568 struct necp_service_registration
*new_service
= NULL
;
2569 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
2570 uuid_t service_uuid
;
2571 uuid_clear(service_uuid
);
2573 if (session
== NULL
) {
2574 NECPLOG0(LOG_ERR
, "Failed to find session");
2575 response_error
= NECP_ERROR_INTERNAL
;
2579 // Enforce entitlements
2580 errno_t cred_result
= priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES
, 0);
2581 if (cred_result
!= 0) {
2582 NECPLOG0(LOG_ERR
, "Session does not hold necessary entitlement to register service");
2586 // Read service uuid
2587 error
= necp_get_tlv(packet
, NULL
, 0, offset
, NECP_TLV_SERVICE_UUID
, sizeof(uuid_t
), service_uuid
, NULL
);
2589 NECPLOG(LOG_ERR
, "Failed to get service UUID: %d", error
);
2590 response_error
= NECP_ERROR_INVALID_TLV
;
2594 MALLOC(new_service
, struct necp_service_registration
*, sizeof(*new_service
), M_NECP
, M_WAITOK
);
2595 if (new_service
== NULL
) {
2596 NECPLOG0(LOG_ERR
, "Failed to allocate service registration");
2597 response_error
= NECP_ERROR_INTERNAL
;
2601 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
2602 memset(new_service
, 0, sizeof(*new_service
));
2603 new_service
->service_id
= necp_create_uuid_service_id_mapping(service_uuid
);
2604 LIST_INSERT_HEAD(&session
->services
, new_service
, session_chain
);
2605 LIST_INSERT_HEAD(&necp_registered_service_list
, new_service
, kernel_chain
);
2606 lck_rw_done(&necp_kernel_policy_lock
);
2608 necp_send_success_response(session
, NECP_PACKET_TYPE_REGISTER_SERVICE
, message_id
);
2611 necp_send_error_response(session
, NECP_PACKET_TYPE_REGISTER_SERVICE
, message_id
, response_error
);
2615 necp_handle_unregister_service(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
2618 struct necp_service_registration
*service
= NULL
;
2619 struct necp_service_registration
*temp_service
= NULL
;
2620 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
2621 struct necp_uuid_id_mapping
*mapping
= NULL
;
2622 uuid_t service_uuid
;
2623 uuid_clear(service_uuid
);
2625 if (session
== NULL
) {
2626 NECPLOG0(LOG_ERR
, "Failed to find session");
2627 response_error
= NECP_ERROR_INTERNAL
;
2631 // Read service uuid
2632 error
= necp_get_tlv(packet
, NULL
, 0, offset
, NECP_TLV_SERVICE_UUID
, sizeof(uuid_t
), service_uuid
, NULL
);
2634 NECPLOG(LOG_ERR
, "Failed to get service UUID: %d", error
);
2635 response_error
= NECP_ERROR_INVALID_TLV
;
2639 // Mark remove all matching services for this session
2640 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
2641 mapping
= necp_uuid_lookup_service_id_locked(service_uuid
);
2642 if (mapping
!= NULL
) {
2643 LIST_FOREACH_SAFE(service
, &session
->services
, session_chain
, temp_service
) {
2644 if (service
->service_id
== mapping
->id
) {
2645 LIST_REMOVE(service
, session_chain
);
2646 LIST_REMOVE(service
, kernel_chain
);
2647 FREE(service
, M_NECP
);
2650 necp_remove_uuid_service_id_mapping(service_uuid
);
2652 lck_rw_done(&necp_kernel_policy_lock
);
2654 necp_send_success_response(session
, NECP_PACKET_TYPE_UNREGISTER_SERVICE
, message_id
);
2657 necp_send_error_response(session
, NECP_PACKET_TYPE_UNREGISTER_SERVICE
, message_id
, response_error
);
2660 static necp_policy_id
2661 necp_handle_policy_add(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
,
2662 u_int8_t
*tlv_buffer
, size_t tlv_buffer_length
, int offset
, int *return_error
)
2664 bool has_default_condition
= FALSE
;
2665 bool has_non_default_condition
= FALSE
;
2666 bool has_application_condition
= FALSE
;
2667 bool has_real_application_condition
= FALSE
;
2668 bool requires_application_condition
= FALSE
;
2669 bool requires_real_application_condition
= FALSE
;
2670 u_int8_t
*conditions_array
= NULL
;
2671 u_int32_t conditions_array_size
= 0;
2672 int conditions_array_cursor
;
2674 bool has_default_route_rule
= FALSE
;
2675 u_int8_t
*route_rules_array
= NULL
;
2676 u_int32_t route_rules_array_size
= 0;
2677 int route_rules_array_cursor
;
2681 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
2683 necp_policy_order order
= 0;
2684 struct necp_session_policy
*policy
= NULL
;
2685 u_int8_t
*policy_result
= NULL
;
2686 u_int32_t policy_result_size
= 0;
2688 // Read policy order
2689 error
= necp_get_tlv(packet
, tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_POLICY_ORDER
, sizeof(order
), &order
, NULL
);
2691 NECPLOG(LOG_ERR
, "Failed to get policy order: %d", error
);
2692 response_error
= NECP_ERROR_INVALID_TLV
;
2696 // Read policy result
2697 cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_POLICY_RESULT
, &error
, 0);
2698 if (error
|| cursor
< 0) {
2699 NECPLOG(LOG_ERR
, "Failed to find policy result TLV: %d", error
);
2700 response_error
= NECP_ERROR_INVALID_TLV
;
2703 error
= necp_get_tlv_at_offset(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, 0, NULL
, &policy_result_size
);
2704 if (error
|| policy_result_size
== 0) {
2705 NECPLOG(LOG_ERR
, "Failed to get policy result length: %d", error
);
2706 response_error
= NECP_ERROR_INVALID_TLV
;
2709 if (policy_result_size
> NECP_MAX_POLICY_RESULT_SIZE
) {
2710 NECPLOG(LOG_ERR
, "Policy result length too large: %u", policy_result_size
);
2711 response_error
= NECP_ERROR_INVALID_TLV
;
2714 MALLOC(policy_result
, u_int8_t
*, policy_result_size
, M_NECP
, M_WAITOK
);
2715 if (policy_result
== NULL
) {
2716 NECPLOG(LOG_ERR
, "Failed to allocate a policy result buffer (size %d)", policy_result_size
);
2717 response_error
= NECP_ERROR_INTERNAL
;
2720 error
= necp_get_tlv_at_offset(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, policy_result_size
, policy_result
, NULL
);
2722 NECPLOG(LOG_ERR
, "Failed to get policy result: %d", error
);
2723 response_error
= NECP_ERROR_POLICY_RESULT_INVALID
;
2726 if (!necp_policy_result_is_valid(policy_result
, policy_result_size
)) {
2727 NECPLOG0(LOG_ERR
, "Failed to validate policy result");
2728 response_error
= NECP_ERROR_POLICY_RESULT_INVALID
;
2732 if (necp_policy_result_requires_route_rules(policy_result
, policy_result_size
)) {
2733 // Read route rules conditions
2734 for (cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_ROUTE_RULE
, &error
, 0);
2736 cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, NECP_TLV_ROUTE_RULE
, &error
, 1)) {
2737 u_int32_t route_rule_size
= 0;
2738 necp_get_tlv_at_offset(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, 0, NULL
, &route_rule_size
);
2739 if (os_add_overflow(route_rules_array_size
,
2740 (sizeof(u_int8_t
) + sizeof(u_int32_t
) + route_rule_size
),
2741 &route_rules_array_size
)) {
2742 NECPLOG0(LOG_ERR
, "Route rules size overflowed, too large");
2743 response_error
= NECP_ERROR_INVALID_TLV
;
2748 if (route_rules_array_size
== 0) {
2749 NECPLOG0(LOG_ERR
, "Failed to get policy route rules");
2750 response_error
= NECP_ERROR_INVALID_TLV
;
2753 if (route_rules_array_size
> NECP_MAX_ROUTE_RULES_ARRAY_SIZE
) {
2754 NECPLOG(LOG_ERR
, "Route rules length too large: %u", route_rules_array_size
);
2755 response_error
= NECP_ERROR_INVALID_TLV
;
2758 MALLOC(route_rules_array
, u_int8_t
*, route_rules_array_size
, M_NECP
, M_WAITOK
);
2759 if (route_rules_array
== NULL
) {
2760 NECPLOG(LOG_ERR
, "Failed to allocate a policy route rules array (size %d)", route_rules_array_size
);
2761 response_error
= NECP_ERROR_INTERNAL
;
2765 route_rules_array_cursor
= 0;
2766 for (cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_ROUTE_RULE
, &error
, 0);
2768 cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, NECP_TLV_ROUTE_RULE
, &error
, 1)) {
2769 u_int8_t route_rule_type
= NECP_TLV_ROUTE_RULE
;
2770 u_int32_t route_rule_size
= 0;
2771 necp_get_tlv_at_offset(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, 0, NULL
, &route_rule_size
);
2772 if (route_rule_size
> 0 &&
2773 (sizeof(route_rule_type
) + sizeof(route_rule_size
) + route_rule_size
) <= (route_rules_array_size
- route_rules_array_cursor
)) {
2775 memcpy((route_rules_array
+ route_rules_array_cursor
), &route_rule_type
, sizeof(route_rule_type
));
2776 route_rules_array_cursor
+= sizeof(route_rule_type
);
2779 memcpy((route_rules_array
+ route_rules_array_cursor
), &route_rule_size
, sizeof(route_rule_size
));
2780 route_rules_array_cursor
+= sizeof(route_rule_size
);
2783 necp_get_tlv_at_offset(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, route_rule_size
, (route_rules_array
+ route_rules_array_cursor
), NULL
);
2785 if (!necp_policy_route_rule_is_valid((route_rules_array
+ route_rules_array_cursor
), route_rule_size
)) {
2786 NECPLOG0(LOG_ERR
, "Failed to validate policy route rule");
2787 response_error
= NECP_ERROR_ROUTE_RULES_INVALID
;
2791 if (necp_policy_route_rule_is_default((route_rules_array
+ route_rules_array_cursor
), route_rule_size
)) {
2792 if (has_default_route_rule
) {
2793 NECPLOG0(LOG_ERR
, "Failed to validate route rule; contained multiple default route rules");
2794 response_error
= NECP_ERROR_ROUTE_RULES_INVALID
;
2797 has_default_route_rule
= TRUE
;
2800 route_rules_array_cursor
+= route_rule_size
;
2805 // Read policy conditions
2806 for (cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_POLICY_CONDITION
, &error
, 0);
2808 cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, NECP_TLV_POLICY_CONDITION
, &error
, 1)) {
2809 u_int32_t condition_size
= 0;
2810 necp_get_tlv_at_offset(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, 0, NULL
, &condition_size
);
2812 if (condition_size
> 0) {
2813 if (os_add_overflow(conditions_array_size
,
2814 (sizeof(u_int8_t
) + sizeof(u_int32_t
) + condition_size
),
2815 &conditions_array_size
)) {
2816 NECPLOG0(LOG_ERR
, "Conditions size overflowed, too large");
2817 response_error
= NECP_ERROR_INVALID_TLV
;
2823 if (conditions_array_size
== 0) {
2824 NECPLOG0(LOG_ERR
, "Failed to get policy conditions");
2825 response_error
= NECP_ERROR_INVALID_TLV
;
2828 if (conditions_array_size
> NECP_MAX_CONDITIONS_ARRAY_SIZE
) {
2829 NECPLOG(LOG_ERR
, "Conditions length too large: %u", conditions_array_size
);
2830 response_error
= NECP_ERROR_INVALID_TLV
;
2833 MALLOC(conditions_array
, u_int8_t
*, conditions_array_size
, M_NECP
, M_WAITOK
);
2834 if (conditions_array
== NULL
) {
2835 NECPLOG(LOG_ERR
, "Failed to allocate a policy conditions array (size %d)", conditions_array_size
);
2836 response_error
= NECP_ERROR_INTERNAL
;
2840 conditions_array_cursor
= 0;
2841 for (cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_POLICY_CONDITION
, &error
, 0);
2843 cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, NECP_TLV_POLICY_CONDITION
, &error
, 1)) {
2844 u_int8_t condition_type
= NECP_TLV_POLICY_CONDITION
;
2845 u_int32_t condition_size
= 0;
2846 necp_get_tlv_at_offset(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, 0, NULL
, &condition_size
);
2847 if (condition_size
> 0 &&
2848 (sizeof(condition_type
) + sizeof(condition_size
) + condition_size
) <= (conditions_array_size
- conditions_array_cursor
)) {
2850 memcpy((conditions_array
+ conditions_array_cursor
), &condition_type
, sizeof(condition_type
));
2851 conditions_array_cursor
+= sizeof(condition_type
);
2854 memcpy((conditions_array
+ conditions_array_cursor
), &condition_size
, sizeof(condition_size
));
2855 conditions_array_cursor
+= sizeof(condition_size
);
2858 necp_get_tlv_at_offset(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, condition_size
, (conditions_array
+ conditions_array_cursor
), NULL
);
2859 if (!necp_policy_condition_is_valid((conditions_array
+ conditions_array_cursor
), condition_size
, necp_policy_result_get_type_from_buffer(policy_result
, policy_result_size
))) {
2860 NECPLOG0(LOG_ERR
, "Failed to validate policy condition");
2861 response_error
= NECP_ERROR_POLICY_CONDITIONS_INVALID
;
2865 if (necp_policy_condition_is_default((conditions_array
+ conditions_array_cursor
), condition_size
)) {
2866 has_default_condition
= TRUE
;
2868 has_non_default_condition
= TRUE
;
2870 if (has_default_condition
&& has_non_default_condition
) {
2871 NECPLOG0(LOG_ERR
, "Failed to validate conditions; contained default and non-default conditions");
2872 response_error
= NECP_ERROR_POLICY_CONDITIONS_INVALID
;
2876 if (necp_policy_condition_is_application((conditions_array
+ conditions_array_cursor
), condition_size
)) {
2877 has_application_condition
= TRUE
;
2880 if (necp_policy_condition_is_real_application((conditions_array
+ conditions_array_cursor
), condition_size
)) {
2881 has_real_application_condition
= TRUE
;
2884 if (necp_policy_condition_requires_application((conditions_array
+ conditions_array_cursor
), condition_size
)) {
2885 requires_application_condition
= TRUE
;
2888 if (necp_policy_condition_requires_real_application((conditions_array
+ conditions_array_cursor
), condition_size
)) {
2889 requires_real_application_condition
= TRUE
;
2892 conditions_array_cursor
+= condition_size
;
2896 if (requires_application_condition
&& !has_application_condition
) {
2897 NECPLOG0(LOG_ERR
, "Failed to validate conditions; did not contain application condition");
2898 response_error
= NECP_ERROR_POLICY_CONDITIONS_INVALID
;
2902 if (requires_real_application_condition
&& !has_real_application_condition
) {
2903 NECPLOG0(LOG_ERR
, "Failed to validate conditions; did not contain real application condition");
2904 response_error
= NECP_ERROR_POLICY_CONDITIONS_INVALID
;
2908 if ((policy
= necp_policy_create(session
, order
, conditions_array
, conditions_array_size
, route_rules_array
, route_rules_array_size
, policy_result
, policy_result_size
)) == NULL
) {
2909 response_error
= NECP_ERROR_INTERNAL
;
2913 if (packet
!= NULL
) {
2914 necp_send_policy_id_response(session
, NECP_PACKET_TYPE_POLICY_ADD
, message_id
, policy
->local_id
);
2916 return policy
->local_id
;
2919 if (policy_result
!= NULL
) {
2920 FREE(policy_result
, M_NECP
);
2922 if (conditions_array
!= NULL
) {
2923 FREE(conditions_array
, M_NECP
);
2925 if (route_rules_array
!= NULL
) {
2926 FREE(route_rules_array
, M_NECP
);
2929 if (packet
!= NULL
) {
2930 necp_send_error_response(session
, NECP_PACKET_TYPE_POLICY_ADD
, message_id
, response_error
);
2932 if (return_error
!= NULL
) {
2933 *return_error
= necp_get_posix_error_for_necp_error(response_error
);
2939 necp_handle_policy_get(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
2941 #pragma unused(offset)
2943 u_int8_t
*response
= NULL
;
2944 u_int8_t
*cursor
= NULL
;
2945 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
2946 necp_policy_id policy_id
= 0;
2947 u_int32_t order_tlv_size
= 0;
2948 u_int32_t result_tlv_size
= 0;
2949 u_int32_t response_size
= 0;
2951 struct necp_session_policy
*policy
= NULL
;
2954 error
= necp_get_tlv(packet
, NULL
, 0, offset
, NECP_TLV_POLICY_ID
, sizeof(policy_id
), &policy_id
, NULL
);
2956 NECPLOG(LOG_ERR
, "Failed to get policy id: %d", error
);
2957 response_error
= NECP_ERROR_INVALID_TLV
;
2961 policy
= necp_policy_find(session
, policy_id
);
2962 if (policy
== NULL
|| policy
->pending_deletion
) {
2963 NECPLOG(LOG_ERR
, "Failed to find policy with id %d", policy_id
);
2964 response_error
= NECP_ERROR_POLICY_ID_NOT_FOUND
;
2968 order_tlv_size
= sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(necp_policy_order
);
2969 result_tlv_size
= (policy
->result_size
? (sizeof(u_int8_t
) + sizeof(u_int32_t
) + policy
->result_size
) : 0);
2970 response_size
= sizeof(struct necp_packet_header
) + order_tlv_size
+ result_tlv_size
+ policy
->conditions_size
;
2971 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
);
2972 if (response
== NULL
) {
2973 necp_send_error_response(session
, NECP_PACKET_TYPE_POLICY_GET
, message_id
, NECP_ERROR_INTERNAL
);
2978 cursor
= necp_buffer_write_packet_header(cursor
, NECP_PACKET_TYPE_POLICY_GET
, NECP_PACKET_FLAGS_RESPONSE
, message_id
);
2979 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ORDER
, sizeof(necp_policy_order
), &policy
->order
, response
, response_size
);
2981 if (result_tlv_size
) {
2982 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_RESULT
, policy
->result_size
, &policy
->result
, response
, response_size
);
2984 if (policy
->conditions_size
) {
2985 memcpy(((u_int8_t
*)(void *)(cursor
)), policy
->conditions
, policy
->conditions_size
);
2988 if (!necp_send_ctl_data(session
, (u_int8_t
*)response
, response_size
)) {
2989 NECPLOG0(LOG_ERR
, "Failed to send response");
2992 FREE(response
, M_NECP
);
2996 necp_send_error_response(session
, NECP_PACKET_TYPE_POLICY_GET
, message_id
, response_error
);
3000 necp_handle_policy_delete(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
3003 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
3004 necp_policy_id policy_id
= 0;
3006 struct necp_session_policy
*policy
= NULL
;
3009 error
= necp_get_tlv(packet
, NULL
, 0, offset
, NECP_TLV_POLICY_ID
, sizeof(policy_id
), &policy_id
, NULL
);
3011 NECPLOG(LOG_ERR
, "Failed to get policy id: %d", error
);
3012 response_error
= NECP_ERROR_INVALID_TLV
;
3016 policy
= necp_policy_find(session
, policy_id
);
3017 if (policy
== NULL
|| policy
->pending_deletion
) {
3018 NECPLOG(LOG_ERR
, "Failed to find policy with id %d", policy_id
);
3019 response_error
= NECP_ERROR_POLICY_ID_NOT_FOUND
;
3023 necp_policy_mark_for_deletion(session
, policy
);
3025 necp_send_success_response(session
, NECP_PACKET_TYPE_POLICY_DELETE
, message_id
);
3029 necp_send_error_response(session
, NECP_PACKET_TYPE_POLICY_DELETE
, message_id
, response_error
);
3033 necp_handle_policy_apply_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
3035 #pragma unused(packet, offset)
3036 necp_policy_apply_all(session
);
3037 necp_send_success_response(session
, NECP_PACKET_TYPE_POLICY_APPLY_ALL
, message_id
);
3041 necp_handle_policy_list_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
3043 #pragma unused(packet, offset)
3044 u_int32_t tlv_size
= (sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(u_int32_t
));
3045 u_int32_t response_size
= 0;
3046 u_int8_t
*response
= NULL
;
3047 u_int8_t
*cursor
= NULL
;
3048 int num_policies
= 0;
3049 int cur_policy_index
= 0;
3050 struct necp_session_policy
*policy
;
3052 LIST_FOREACH(policy
, &session
->policies
, chain
) {
3053 if (!policy
->pending_deletion
) {
3058 // Create a response with one Policy ID TLV for each policy
3059 response_size
= sizeof(struct necp_packet_header
) + num_policies
* tlv_size
;
3060 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
);
3061 if (response
== NULL
) {
3062 necp_send_error_response(session
, NECP_PACKET_TYPE_POLICY_LIST_ALL
, message_id
, NECP_ERROR_INTERNAL
);
3067 cursor
= necp_buffer_write_packet_header(cursor
, NECP_PACKET_TYPE_POLICY_LIST_ALL
, NECP_PACKET_FLAGS_RESPONSE
, message_id
);
3069 LIST_FOREACH(policy
, &session
->policies
, chain
) {
3070 if (!policy
->pending_deletion
&& cur_policy_index
< num_policies
) {
3071 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ID
, sizeof(u_int32_t
), &policy
->local_id
, response
, response_size
);
3076 if (!necp_send_ctl_data(session
, (u_int8_t
*)response
, response_size
)) {
3077 NECPLOG0(LOG_ERR
, "Failed to send response");
3080 FREE(response
, M_NECP
);
3084 necp_handle_policy_delete_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
3086 #pragma unused(packet, offset)
3087 necp_policy_mark_all_for_deletion(session
);
3088 necp_send_success_response(session
, NECP_PACKET_TYPE_POLICY_DELETE_ALL
, message_id
);
3091 static necp_policy_id
3092 necp_policy_get_new_id(struct necp_session
*session
)
3094 session
->last_policy_id
++;
3095 if (session
->last_policy_id
< 1) {
3096 session
->last_policy_id
= 1;
3099 necp_policy_id newid
= session
->last_policy_id
;
3102 NECPLOG0(LOG_ERR
, "Allocate policy id failed.\n");
3110 * For the policy dump response this is the structure:
3112 * <NECP_PACKET_HEADER>
3114 * type : NECP_TLV_POLICY_DUMP
3119 * type : NECP_TLV_POLICY_ID
3124 * type : NECP_TLV_POLICY_ORDER
3129 * type : NECP_TLV_POLICY_RESULT_STRING
3134 * type : NECP_TLV_POLICY_OWNER
3139 * type : NECP_TLV_POLICY_CONDITION
3144 * type : NECP_POLICY_CONDITION_ALL_INTERFACES
3149 * type : NECP_POLICY_CONDITION_BOUND_INTERFACES
3159 * type : NECP_TLV_POLICY_DUMP
3164 * type : NECP_TLV_POLICY_ID
3169 * type : NECP_TLV_POLICY_ORDER
3174 * type : NECP_TLV_POLICY_RESULT_STRING
3179 * type : NECP_TLV_POLICY_OWNER
3184 * type : NECP_TLV_POLICY_CONDITION
3189 * type : NECP_POLICY_CONDITION_ALL_INTERFACES
3194 * type : NECP_POLICY_CONDITION_BOUND_INTERFACES
3206 necp_handle_policy_dump_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
,
3207 user_addr_t out_buffer
, size_t out_buffer_length
, int offset
)
3209 #pragma unused(offset)
3210 struct necp_kernel_socket_policy
*policy
= NULL
;
3212 int policy_count
= 0;
3213 u_int8_t
**tlv_buffer_pointers
= NULL
;
3214 u_int32_t
*tlv_buffer_lengths
= NULL
;
3215 u_int32_t total_tlv_len
= 0;
3216 u_int8_t
*result_buf
= NULL
;
3217 u_int8_t
*result_buf_cursor
= result_buf
;
3218 char result_string
[MAX_RESULT_STRING_LEN
];
3219 char proc_name_string
[MAXCOMLEN
+ 1];
3222 bool error_occured
= false;
3223 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
3225 #define REPORT_ERROR(error) error_occured = true; \
3226 response_error = error; \
3229 #define UNLOCK_AND_REPORT_ERROR(lock, error) lck_rw_done(lock); \
3232 errno_t cred_result
= priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES
, 0);
3233 if (cred_result
!= 0) {
3234 NECPLOG0(LOG_ERR
, "Session does not hold the necessary entitlement to get Network Extension Policy information");
3235 REPORT_ERROR(NECP_ERROR_INTERNAL
);
3239 lck_rw_lock_shared(&necp_kernel_policy_lock
);
3242 NECPLOG0(LOG_DEBUG
, "Gathering policies");
3245 policy_count
= necp_kernel_application_policies_count
;
3247 MALLOC(tlv_buffer_pointers
, u_int8_t
* *, sizeof(u_int8_t
*) * policy_count
, M_NECP
, M_NOWAIT
| M_ZERO
);
3248 if (tlv_buffer_pointers
== NULL
) {
3249 NECPLOG(LOG_DEBUG
, "Failed to allocate tlv_buffer_pointers (%u bytes)", sizeof(u_int8_t
*) * policy_count
);
3250 UNLOCK_AND_REPORT_ERROR(&necp_kernel_policy_lock
, NECP_ERROR_INTERNAL
);
3253 MALLOC(tlv_buffer_lengths
, u_int32_t
*, sizeof(u_int32_t
) * policy_count
, M_NECP
, M_NOWAIT
| M_ZERO
);
3254 if (tlv_buffer_lengths
== NULL
) {
3255 NECPLOG(LOG_DEBUG
, "Failed to allocate tlv_buffer_lengths (%u bytes)", sizeof(u_int32_t
) * policy_count
);
3256 UNLOCK_AND_REPORT_ERROR(&necp_kernel_policy_lock
, NECP_ERROR_INTERNAL
);
3259 for (policy_i
= 0; necp_kernel_socket_policies_app_layer_map
!= NULL
&& necp_kernel_socket_policies_app_layer_map
[policy_i
] != NULL
; policy_i
++) {
3260 policy
= necp_kernel_socket_policies_app_layer_map
[policy_i
];
3262 memset(result_string
, 0, MAX_RESULT_STRING_LEN
);
3263 memset(proc_name_string
, 0, MAXCOMLEN
+ 1);
3265 necp_get_result_description(result_string
, policy
->result
, policy
->result_parameter
);
3266 proc_name(policy
->session_pid
, proc_name_string
, MAXCOMLEN
);
3268 u_int16_t proc_name_len
= strlen(proc_name_string
) + 1;
3269 u_int16_t result_string_len
= strlen(result_string
) + 1;
3272 NECPLOG(LOG_DEBUG
, "Policy: process: %s, result: %s", proc_name_string
, result_string
);
3275 u_int32_t total_allocated_bytes
= sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(policy
->id
) + // NECP_TLV_POLICY_ID
3276 sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(policy
->order
) + // NECP_TLV_POLICY_ORDER
3277 sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(policy
->session_order
) + // NECP_TLV_POLICY_SESSION_ORDER
3278 sizeof(u_int8_t
) + sizeof(u_int32_t
) + result_string_len
+ // NECP_TLV_POLICY_RESULT_STRING
3279 sizeof(u_int8_t
) + sizeof(u_int32_t
) + proc_name_len
+ // NECP_TLV_POLICY_OWNER
3280 sizeof(u_int8_t
) + sizeof(u_int32_t
); // NECP_TLV_POLICY_CONDITION
3282 // We now traverse the condition_mask to see how much space we need to allocate
3283 u_int32_t condition_mask
= policy
->condition_mask
;
3284 u_int8_t num_conditions
= 0;
3285 struct necp_string_id_mapping
*account_id_entry
= NULL
;
3286 char if_name
[IFXNAMSIZ
];
3287 u_int32_t condition_tlv_length
= 0;
3288 memset(if_name
, 0, sizeof(if_name
));
3290 if (condition_mask
== NECP_POLICY_CONDITION_DEFAULT
) {
3293 if (condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) {
3296 if (condition_mask
& NECP_KERNEL_CONDITION_HAS_CLIENT
) {
3299 if (condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
3300 snprintf(if_name
, IFXNAMSIZ
, "%s%d", ifnet_name(policy
->cond_bound_interface
), ifnet_unit(policy
->cond_bound_interface
));
3301 condition_tlv_length
+= strlen(if_name
) + 1;
3304 if (condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
3305 condition_tlv_length
+= sizeof(policy
->cond_protocol
);
3308 if (condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
3309 condition_tlv_length
+= sizeof(uuid_t
);
3312 if (condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
3313 condition_tlv_length
+= sizeof(uuid_t
);
3316 if (condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
3317 u_int32_t domain_len
= strlen(policy
->cond_domain
) + 1;
3318 condition_tlv_length
+= domain_len
;
3321 if (condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
) {
3322 account_id_entry
= necp_lookup_string_with_id_locked(&necp_account_id_list
, policy
->cond_account_id
);
3323 u_int32_t account_id_len
= 0;
3324 if (account_id_entry
) {
3325 account_id_len
= account_id_entry
->string
? strlen(account_id_entry
->string
) + 1 : 0;
3327 condition_tlv_length
+= account_id_len
;
3330 if (condition_mask
& NECP_KERNEL_CONDITION_PID
) {
3331 condition_tlv_length
+= sizeof(pid_t
);
3334 if (condition_mask
& NECP_KERNEL_CONDITION_UID
) {
3335 condition_tlv_length
+= sizeof(uid_t
);
3338 if (condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
3339 condition_tlv_length
+= sizeof(struct necp_policy_condition_tc_range
);
3342 if (condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
) {
3345 if (condition_mask
& NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
) {
3346 u_int32_t entitlement_len
= strlen(policy
->cond_custom_entitlement
) + 1;
3347 condition_tlv_length
+= entitlement_len
;
3350 if (condition_mask
& NECP_KERNEL_CONDITION_PLATFORM_BINARY
) {
3353 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_NETWORKS
) {
3356 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
3357 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
3358 condition_tlv_length
+= sizeof(struct necp_policy_condition_addr_range
);
3360 condition_tlv_length
+= sizeof(struct necp_policy_condition_addr
);
3364 if (condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
3365 if (condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
3366 condition_tlv_length
+= sizeof(struct necp_policy_condition_addr_range
);
3368 condition_tlv_length
+= sizeof(struct necp_policy_condition_addr
);
3372 if (condition_mask
& NECP_KERNEL_CONDITION_AGENT_TYPE
) {
3373 condition_tlv_length
+= sizeof(struct necp_policy_condition_agent_type
);
3376 if (condition_mask
& NECP_KERNEL_CONDITION_CLIENT_FLAGS
) {
3377 condition_tlv_length
+= sizeof(u_int32_t
);
3380 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_EMPTY
) {
3383 if (condition_mask
& NECP_KERNEL_CONDITION_REMOTE_EMPTY
) {
3388 condition_tlv_length
+= num_conditions
* (sizeof(u_int8_t
) + sizeof(u_int32_t
)); // These are for the condition TLVs. The space for "value" is already accounted for above.
3389 total_allocated_bytes
+= condition_tlv_length
;
3391 u_int8_t
*tlv_buffer
;
3392 MALLOC(tlv_buffer
, u_int8_t
*, total_allocated_bytes
, M_NECP
, M_NOWAIT
| M_ZERO
);
3393 if (tlv_buffer
== NULL
) {
3394 NECPLOG(LOG_DEBUG
, "Failed to allocate tlv_buffer (%u bytes)", total_allocated_bytes
);
3398 u_int8_t
*cursor
= tlv_buffer
;
3399 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ID
, sizeof(policy
->id
), &policy
->id
, tlv_buffer
, total_allocated_bytes
);
3400 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ORDER
, sizeof(necp_policy_order
), &policy
->order
, tlv_buffer
, total_allocated_bytes
);
3401 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_SESSION_ORDER
, sizeof(policy
->session_order
), &policy
->session_order
, tlv_buffer
, total_allocated_bytes
);
3402 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_RESULT_STRING
, result_string_len
, result_string
, tlv_buffer
, total_allocated_bytes
);
3403 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_OWNER
, proc_name_len
, proc_name_string
, tlv_buffer
, total_allocated_bytes
);
3406 u_int8_t q_cond_buf
[N_QUICK
]; // Minor optimization
3408 u_int8_t
*cond_buf
; // To be used for condition TLVs
3409 if (condition_tlv_length
<= N_QUICK
) {
3410 cond_buf
= q_cond_buf
;
3412 MALLOC(cond_buf
, u_int8_t
*, condition_tlv_length
, M_NECP
, M_NOWAIT
);
3413 if (cond_buf
== NULL
) {
3414 NECPLOG(LOG_DEBUG
, "Failed to allocate cond_buffer (%u bytes)", condition_tlv_length
);
3415 FREE(tlv_buffer
, M_NECP
);
3420 memset(cond_buf
, 0, condition_tlv_length
);
3421 u_int8_t
*cond_buf_cursor
= cond_buf
;
3422 if (condition_mask
== NECP_POLICY_CONDITION_DEFAULT
) {
3423 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_DEFAULT
, 0, "", cond_buf
, condition_tlv_length
);
3425 if (condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) {
3426 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_ALL_INTERFACES
, 0, "", cond_buf
, condition_tlv_length
);
3428 if (condition_mask
& NECP_KERNEL_CONDITION_HAS_CLIENT
) {
3429 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_HAS_CLIENT
, 0, "", cond_buf
, condition_tlv_length
);
3431 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_NETWORKS
) {
3432 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_LOCAL_NETWORKS
, 0, "", cond_buf
, condition_tlv_length
);
3434 if (condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
3435 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_BOUND_INTERFACE
, strlen(if_name
) + 1,
3436 if_name
, cond_buf
, condition_tlv_length
);
3438 if (condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
3439 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_IP_PROTOCOL
, sizeof(policy
->cond_protocol
), &policy
->cond_protocol
,
3440 cond_buf
, condition_tlv_length
);
3442 if (condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
3443 struct necp_uuid_id_mapping
*entry
= necp_uuid_lookup_uuid_with_app_id_locked(policy
->cond_app_id
);
3444 if (entry
!= NULL
) {
3445 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_APPLICATION
, sizeof(entry
->uuid
), entry
->uuid
,
3446 cond_buf
, condition_tlv_length
);
3449 if (condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
3450 struct necp_uuid_id_mapping
*entry
= necp_uuid_lookup_uuid_with_app_id_locked(policy
->cond_real_app_id
);
3451 if (entry
!= NULL
) {
3452 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_REAL_APPLICATION
, sizeof(entry
->uuid
), entry
->uuid
,
3453 cond_buf
, condition_tlv_length
);
3456 if (condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
3457 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_DOMAIN
, strlen(policy
->cond_domain
) + 1, policy
->cond_domain
,
3458 cond_buf
, condition_tlv_length
);
3460 if (condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
) {
3461 if (account_id_entry
!= NULL
) {
3462 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_ACCOUNT
, strlen(account_id_entry
->string
) + 1, account_id_entry
->string
,
3463 cond_buf
, condition_tlv_length
);
3466 if (condition_mask
& NECP_KERNEL_CONDITION_PID
) {
3467 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_PID
, sizeof(policy
->cond_pid
), &policy
->cond_pid
,
3468 cond_buf
, condition_tlv_length
);
3470 if (condition_mask
& NECP_KERNEL_CONDITION_UID
) {
3471 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_UID
, sizeof(policy
->cond_uid
), &policy
->cond_uid
,
3472 cond_buf
, condition_tlv_length
);
3474 if (condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
3475 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_TRAFFIC_CLASS
, sizeof(policy
->cond_traffic_class
), &policy
->cond_traffic_class
,
3476 cond_buf
, condition_tlv_length
);
3478 if (condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
) {
3479 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_ENTITLEMENT
, 0, "",
3480 cond_buf
, condition_tlv_length
);
3482 if (condition_mask
& NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
) {
3483 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_ENTITLEMENT
, strlen(policy
->cond_custom_entitlement
) + 1, policy
->cond_custom_entitlement
,
3484 cond_buf
, condition_tlv_length
);
3486 if (condition_mask
& NECP_KERNEL_CONDITION_PLATFORM_BINARY
) {
3487 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_PLATFORM_BINARY
, 0, "", cond_buf
, condition_tlv_length
);
3489 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
3490 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
3491 struct necp_policy_condition_addr_range range
;
3492 memcpy(&range
.start_address
, &policy
->cond_local_start
, sizeof(policy
->cond_local_start
));
3493 memcpy(&range
.end_address
, &policy
->cond_local_end
, sizeof(policy
->cond_local_end
));
3494 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE
, sizeof(range
), &range
,
3495 cond_buf
, condition_tlv_length
);
3497 struct necp_policy_condition_addr addr
;
3498 addr
.prefix
= policy
->cond_local_prefix
;
3499 memcpy(&addr
.address
, &policy
->cond_local_start
, sizeof(policy
->cond_local_start
));
3500 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_LOCAL_ADDR
, sizeof(addr
), &addr
,
3501 cond_buf
, condition_tlv_length
);
3504 if (condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
3505 if (condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
3506 struct necp_policy_condition_addr_range range
;
3507 memcpy(&range
.start_address
, &policy
->cond_remote_start
, sizeof(policy
->cond_remote_start
));
3508 memcpy(&range
.end_address
, &policy
->cond_remote_end
, sizeof(policy
->cond_remote_end
));
3509 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE
, sizeof(range
), &range
,
3510 cond_buf
, condition_tlv_length
);
3512 struct necp_policy_condition_addr addr
;
3513 addr
.prefix
= policy
->cond_remote_prefix
;
3514 memcpy(&addr
.address
, &policy
->cond_remote_start
, sizeof(policy
->cond_remote_start
));
3515 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_REMOTE_ADDR
, sizeof(addr
), &addr
,
3516 cond_buf
, condition_tlv_length
);
3519 if (condition_mask
& NECP_KERNEL_CONDITION_AGENT_TYPE
) {
3520 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_AGENT_TYPE
,
3521 sizeof(policy
->cond_agent_type
), &policy
->cond_agent_type
,
3522 cond_buf
, condition_tlv_length
);
3524 if (condition_mask
& NECP_KERNEL_CONDITION_CLIENT_FLAGS
) {
3525 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_CLIENT_FLAGS
, sizeof(policy
->cond_client_flags
), &policy
->cond_client_flags
, cond_buf
, condition_tlv_length
);
3527 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_EMPTY
) {
3528 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_FLOW_LOCAL_ADDR_EMPTY
, 0, "", cond_buf
, condition_tlv_length
);
3530 if (condition_mask
& NECP_KERNEL_CONDITION_REMOTE_EMPTY
) {
3531 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_FLOW_REMOTE_ADDR_EMPTY
, 0, "", cond_buf
, condition_tlv_length
);
3535 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_CONDITION
, cond_buf_cursor
- cond_buf
, cond_buf
, tlv_buffer
, total_allocated_bytes
);
3536 if (cond_buf
!= q_cond_buf
) {
3537 FREE(cond_buf
, M_NECP
);
3540 tlv_buffer_pointers
[policy_i
] = tlv_buffer
;
3541 tlv_buffer_lengths
[policy_i
] = (cursor
- tlv_buffer
);
3543 // This is the length of the TLV for NECP_TLV_POLICY_DUMP
3544 total_tlv_len
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + (cursor
- tlv_buffer
);
3548 lck_rw_done(&necp_kernel_policy_lock
);
3551 if (packet
!= NULL
) {
3552 u_int32_t total_result_length
= sizeof(struct necp_packet_header
) + total_tlv_len
;
3554 // Allow malloc to wait, since the total buffer may be large and we are not holding any locks
3555 MALLOC(result_buf
, u_int8_t
*, total_result_length
, M_NECP
, M_WAITOK
| M_ZERO
);
3556 if (result_buf
== NULL
) {
3557 NECPLOG(LOG_DEBUG
, "Failed to allocate result_buffer (%u bytes)", total_result_length
);
3558 REPORT_ERROR(NECP_ERROR_INTERNAL
);
3561 result_buf_cursor
= result_buf
;
3562 result_buf_cursor
= necp_buffer_write_packet_header(result_buf_cursor
, NECP_PACKET_TYPE_POLICY_DUMP_ALL
, NECP_PACKET_FLAGS_RESPONSE
, message_id
);
3564 for (int i
= 0; i
< policy_count
; i
++) {
3565 if (tlv_buffer_pointers
[i
] != NULL
) {
3566 result_buf_cursor
= necp_buffer_write_tlv(result_buf_cursor
, NECP_TLV_POLICY_DUMP
, tlv_buffer_lengths
[i
], tlv_buffer_pointers
[i
], result_buf
, total_result_length
);
3570 if (!necp_send_ctl_data(session
, result_buf
, result_buf_cursor
- result_buf
)) {
3571 NECPLOG(LOG_ERR
, "Failed to send response (%u bytes)", result_buf_cursor
- result_buf
);
3573 NECPLOG(LOG_ERR
, "Sent data worth %u bytes. Total result buffer length was %u bytes", result_buf_cursor
- result_buf
, total_result_length
);
3578 if (out_buffer
!= 0) {
3579 if (out_buffer_length
< total_tlv_len
+ sizeof(u_int32_t
)) {
3580 NECPLOG(LOG_DEBUG
, "out_buffer_length too small (%u < %u)", out_buffer_length
, total_tlv_len
+ sizeof(u_int32_t
));
3581 REPORT_ERROR(NECP_ERROR_INVALID_TLV
);
3584 // Allow malloc to wait, since the total buffer may be large and we are not holding any locks
3585 MALLOC(result_buf
, u_int8_t
*, total_tlv_len
+ sizeof(u_int32_t
), M_NECP
, M_WAITOK
| M_ZERO
);
3586 if (result_buf
== NULL
) {
3587 NECPLOG(LOG_DEBUG
, "Failed to allocate result_buffer (%u bytes)", total_tlv_len
+ sizeof(u_int32_t
));
3588 REPORT_ERROR(NECP_ERROR_INTERNAL
);
3591 // Add four bytes for total length at the start
3592 memcpy(result_buf
, &total_tlv_len
, sizeof(u_int32_t
));
3595 result_buf_cursor
= result_buf
+ sizeof(u_int32_t
);
3596 for (int i
= 0; i
< policy_count
; i
++) {
3597 if (tlv_buffer_pointers
[i
] != NULL
) {
3598 result_buf_cursor
= necp_buffer_write_tlv(result_buf_cursor
, NECP_TLV_POLICY_DUMP
, tlv_buffer_lengths
[i
], tlv_buffer_pointers
[i
],
3599 result_buf
, total_tlv_len
+ sizeof(u_int32_t
));
3603 int copy_error
= copyout(result_buf
, out_buffer
, total_tlv_len
+ sizeof(u_int32_t
));
3605 NECPLOG(LOG_DEBUG
, "Failed to copy out result_buffer (%u bytes)", total_tlv_len
+ sizeof(u_int32_t
));
3606 REPORT_ERROR(NECP_ERROR_INTERNAL
);
3612 if (error_occured
) {
3613 if (packet
!= NULL
) {
3614 if (!necp_send_error_response(session
, NECP_PACKET_TYPE_POLICY_DUMP_ALL
, message_id
, response_error
)) {
3615 NECPLOG0(LOG_ERR
, "Failed to send error response");
3617 NECPLOG0(LOG_ERR
, "Sent error response");
3620 error_code
= necp_get_posix_error_for_necp_error(response_error
);
3623 if (result_buf
!= NULL
) {
3624 FREE(result_buf
, M_NECP
);
3627 if (tlv_buffer_pointers
!= NULL
) {
3628 for (int i
= 0; i
< policy_count
; i
++) {
3629 if (tlv_buffer_pointers
[i
] != NULL
) {
3630 FREE(tlv_buffer_pointers
[i
], M_NECP
);
3631 tlv_buffer_pointers
[i
] = NULL
;
3634 FREE(tlv_buffer_pointers
, M_NECP
);
3637 if (tlv_buffer_lengths
!= NULL
) {
3638 FREE(tlv_buffer_lengths
, M_NECP
);
3641 #undef RESET_COND_BUF
3643 #undef UNLOCK_AND_REPORT_ERROR
3648 static struct necp_session_policy
*
3649 necp_policy_create(struct necp_session
*session
, necp_policy_order order
, u_int8_t
*conditions_array
, u_int32_t conditions_array_size
, u_int8_t
*route_rules_array
, u_int32_t route_rules_array_size
, u_int8_t
*result
, u_int32_t result_size
)
3651 struct necp_session_policy
*new_policy
= NULL
;
3652 struct necp_session_policy
*tmp_policy
= NULL
;
3654 if (session
== NULL
|| conditions_array
== NULL
|| result
== NULL
|| result_size
== 0) {
3658 MALLOC_ZONE(new_policy
, struct necp_session_policy
*, sizeof(*new_policy
), M_NECP_SESSION_POLICY
, M_WAITOK
);
3659 if (new_policy
== NULL
) {
3663 memset(new_policy
, 0, sizeof(*new_policy
)); // M_ZERO is not supported for MALLOC_ZONE
3664 new_policy
->applied
= FALSE
;
3665 new_policy
->pending_deletion
= FALSE
;
3666 new_policy
->pending_update
= FALSE
;
3667 new_policy
->order
= order
;
3668 new_policy
->conditions
= conditions_array
;
3669 new_policy
->conditions_size
= conditions_array_size
;
3670 new_policy
->route_rules
= route_rules_array
;
3671 new_policy
->route_rules_size
= route_rules_array_size
;
3672 new_policy
->result
= result
;
3673 new_policy
->result_size
= result_size
;
3674 new_policy
->local_id
= necp_policy_get_new_id(session
);
3676 LIST_INSERT_SORTED_ASCENDING(&session
->policies
, new_policy
, chain
, order
, tmp_policy
);
3678 session
->dirty
= TRUE
;
3681 NECPLOG(LOG_DEBUG
, "Created NECP policy, order %d", order
);
3687 static struct necp_session_policy
*
3688 necp_policy_find(struct necp_session
*session
, necp_policy_id policy_id
)
3690 struct necp_session_policy
*policy
= NULL
;
3691 if (policy_id
== 0) {
3695 LIST_FOREACH(policy
, &session
->policies
, chain
) {
3696 if (policy
->local_id
== policy_id
) {
3704 static inline u_int8_t
3705 necp_policy_get_result_type(struct necp_session_policy
*policy
)
3707 return policy
? necp_policy_result_get_type_from_buffer(policy
->result
, policy
->result_size
) : 0;
3710 static inline u_int32_t
3711 necp_policy_get_result_parameter_length(struct necp_session_policy
*policy
)
3713 return policy
? necp_policy_result_get_parameter_length_from_buffer(policy
->result
, policy
->result_size
) : 0;
3717 necp_policy_get_result_parameter(struct necp_session_policy
*policy
, u_int8_t
*parameter_buffer
, u_int32_t parameter_buffer_length
)
3720 u_int32_t parameter_length
= necp_policy_result_get_parameter_length_from_buffer(policy
->result
, policy
->result_size
);
3721 if (parameter_buffer_length
>= parameter_length
) {
3722 u_int8_t
*parameter
= necp_policy_result_get_parameter_pointer_from_buffer(policy
->result
, policy
->result_size
);
3723 if (parameter
&& parameter_buffer
) {
3724 memcpy(parameter_buffer
, parameter
, parameter_length
);
3734 necp_policy_mark_for_deletion(struct necp_session
*session
, struct necp_session_policy
*policy
)
3736 if (session
== NULL
|| policy
== NULL
) {
3740 policy
->pending_deletion
= TRUE
;
3741 session
->dirty
= TRUE
;
3744 NECPLOG0(LOG_DEBUG
, "Marked NECP policy for removal");
3750 necp_policy_mark_all_for_deletion(struct necp_session
*session
)
3752 struct necp_session_policy
*policy
= NULL
;
3753 struct necp_session_policy
*temp_policy
= NULL
;
3755 LIST_FOREACH_SAFE(policy
, &session
->policies
, chain
, temp_policy
) {
3756 necp_policy_mark_for_deletion(session
, policy
);
3763 necp_policy_delete(struct necp_session
*session
, struct necp_session_policy
*policy
)
3765 if (session
== NULL
|| policy
== NULL
) {
3769 LIST_REMOVE(policy
, chain
);
3771 if (policy
->result
) {
3772 FREE(policy
->result
, M_NECP
);
3773 policy
->result
= NULL
;
3776 if (policy
->conditions
) {
3777 FREE(policy
->conditions
, M_NECP
);
3778 policy
->conditions
= NULL
;
3781 if (policy
->route_rules
) {
3782 FREE(policy
->route_rules
, M_NECP
);
3783 policy
->route_rules
= NULL
;
3786 FREE_ZONE(policy
, sizeof(*policy
), M_NECP_SESSION_POLICY
);
3789 NECPLOG0(LOG_DEBUG
, "Removed NECP policy");
3795 necp_policy_unapply(struct necp_session_policy
*policy
)
3798 if (policy
== NULL
) {
3802 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
3804 // Release local uuid mappings
3805 if (!uuid_is_null(policy
->applied_app_uuid
)) {
3806 bool removed_mapping
= FALSE
;
3807 if (necp_remove_uuid_app_id_mapping(policy
->applied_app_uuid
, &removed_mapping
, TRUE
) && removed_mapping
) {
3808 necp_uuid_app_id_mappings_dirty
= TRUE
;
3809 necp_num_uuid_app_id_mappings
--;
3811 uuid_clear(policy
->applied_app_uuid
);
3813 if (!uuid_is_null(policy
->applied_real_app_uuid
)) {
3814 necp_remove_uuid_app_id_mapping(policy
->applied_real_app_uuid
, NULL
, FALSE
);
3815 uuid_clear(policy
->applied_real_app_uuid
);
3817 if (!uuid_is_null(policy
->applied_result_uuid
)) {
3818 necp_remove_uuid_service_id_mapping(policy
->applied_result_uuid
);
3819 uuid_clear(policy
->applied_result_uuid
);
3822 // Release string mappings
3823 if (policy
->applied_account
!= NULL
) {
3824 necp_remove_string_to_id_mapping(&necp_account_id_list
, policy
->applied_account
);
3825 FREE(policy
->applied_account
, M_NECP
);
3826 policy
->applied_account
= NULL
;
3829 // Release route rule
3830 if (policy
->applied_route_rules_id
!= 0) {
3831 necp_remove_route_rule(&necp_route_rules
, policy
->applied_route_rules_id
);
3832 policy
->applied_route_rules_id
= 0;
3835 // Remove socket policies
3836 for (i
= 0; i
< MAX_KERNEL_SOCKET_POLICIES
; i
++) {
3837 if (policy
->kernel_socket_policies
[i
] != 0) {
3838 necp_kernel_socket_policy_delete(policy
->kernel_socket_policies
[i
]);
3839 policy
->kernel_socket_policies
[i
] = 0;
3843 // Remove IP output policies
3844 for (i
= 0; i
< MAX_KERNEL_IP_OUTPUT_POLICIES
; i
++) {
3845 if (policy
->kernel_ip_output_policies
[i
] != 0) {
3846 necp_kernel_ip_output_policy_delete(policy
->kernel_ip_output_policies
[i
]);
3847 policy
->kernel_ip_output_policies
[i
] = 0;
3851 policy
->applied
= FALSE
;
3856 #define NECP_KERNEL_POLICY_SUBORDER_ID_TUNNEL_CONDITION 0
3857 #define NECP_KERNEL_POLICY_SUBORDER_NON_ID_TUNNEL_CONDITION 1
3858 #define NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION 2
3859 #define NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS 3
3860 struct necp_policy_result_ip_tunnel
{
3861 u_int32_t secondary_result
;
3862 char interface_name
[IFXNAMSIZ
];
3863 } __attribute__((__packed__
));
3865 struct necp_policy_result_service
{
3868 } __attribute__((__packed__
));
3871 necp_policy_apply(struct necp_session
*session
, struct necp_session_policy
*policy
)
3873 bool socket_only_conditions
= FALSE
;
3874 bool socket_ip_conditions
= FALSE
;
3876 bool socket_layer_non_id_conditions
= FALSE
;
3877 bool ip_output_layer_non_id_conditions
= FALSE
;
3878 bool ip_output_layer_non_id_only
= FALSE
;
3879 bool ip_output_layer_id_condition
= FALSE
;
3880 bool ip_output_layer_tunnel_condition_from_id
= FALSE
;
3881 bool ip_output_layer_tunnel_condition_from_non_id
= FALSE
;
3882 necp_kernel_policy_id cond_ip_output_layer_id
= NECP_KERNEL_POLICY_ID_NONE
;
3884 u_int32_t master_condition_mask
= 0;
3885 u_int32_t master_condition_negated_mask
= 0;
3886 ifnet_t cond_bound_interface
= NULL
;
3887 u_int32_t cond_account_id
= 0;
3888 char *cond_domain
= NULL
;
3889 char *cond_custom_entitlement
= NULL
;
3892 necp_app_id cond_app_id
= 0;
3893 necp_app_id cond_real_app_id
= 0;
3894 struct necp_policy_condition_tc_range cond_traffic_class
;
3895 cond_traffic_class
.start_tc
= 0;
3896 cond_traffic_class
.end_tc
= 0;
3897 u_int16_t cond_protocol
= 0;
3898 union necp_sockaddr_union cond_local_start
;
3899 union necp_sockaddr_union cond_local_end
;
3900 u_int8_t cond_local_prefix
= 0;
3901 union necp_sockaddr_union cond_remote_start
;
3902 union necp_sockaddr_union cond_remote_end
;
3903 u_int8_t cond_remote_prefix
= 0;
3904 u_int32_t cond_client_flags
= 0;
3905 u_int32_t offset
= 0;
3906 u_int8_t ultimate_result
= 0;
3907 u_int32_t secondary_result
= 0;
3908 struct necp_policy_condition_agent_type cond_agent_type
= {};
3909 necp_kernel_policy_result_parameter secondary_result_parameter
;
3910 memset(&secondary_result_parameter
, 0, sizeof(secondary_result_parameter
));
3911 u_int32_t cond_last_interface_index
= 0;
3912 necp_kernel_policy_result_parameter ultimate_result_parameter
;
3913 memset(&ultimate_result_parameter
, 0, sizeof(ultimate_result_parameter
));
3915 if (policy
== NULL
) {
3919 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
3921 // Process conditions
3922 while (offset
< policy
->conditions_size
) {
3923 u_int32_t length
= 0;
3924 u_int8_t
*value
= necp_buffer_get_tlv_value(policy
->conditions
, offset
, &length
);
3926 u_int8_t condition_type
= necp_policy_condition_get_type_from_buffer(value
, length
);
3927 u_int8_t condition_flags
= necp_policy_condition_get_flags_from_buffer(value
, length
);
3928 bool condition_is_negative
= condition_flags
& NECP_POLICY_CONDITION_FLAGS_NEGATIVE
;
3929 u_int32_t condition_length
= necp_policy_condition_get_value_length_from_buffer(value
, length
);
3930 u_int8_t
*condition_value
= necp_policy_condition_get_value_pointer_from_buffer(value
, length
);
3931 switch (condition_type
) {
3932 case NECP_POLICY_CONDITION_DEFAULT
: {
3933 socket_ip_conditions
= TRUE
;
3936 case NECP_POLICY_CONDITION_ALL_INTERFACES
: {
3937 master_condition_mask
|= NECP_KERNEL_CONDITION_ALL_INTERFACES
;
3938 socket_ip_conditions
= TRUE
;
3941 case NECP_POLICY_CONDITION_HAS_CLIENT
: {
3942 master_condition_mask
|= NECP_KERNEL_CONDITION_HAS_CLIENT
;
3943 socket_only_conditions
= TRUE
;
3946 case NECP_POLICY_CONDITION_ENTITLEMENT
: {
3947 if (condition_length
> 0) {
3948 if (cond_custom_entitlement
== NULL
) {
3949 cond_custom_entitlement
= necp_copy_string((char *)condition_value
, condition_length
);
3950 if (cond_custom_entitlement
!= NULL
) {
3951 master_condition_mask
|= NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
;
3952 socket_only_conditions
= TRUE
;
3956 master_condition_mask
|= NECP_KERNEL_CONDITION_ENTITLEMENT
;
3957 socket_only_conditions
= TRUE
;
3961 case NECP_POLICY_CONDITION_PLATFORM_BINARY
: {
3962 master_condition_mask
|= NECP_KERNEL_CONDITION_PLATFORM_BINARY
;
3963 socket_only_conditions
= TRUE
;
3966 case NECP_POLICY_CONDITION_DOMAIN
: {
3967 // Make sure there is only one such rule
3968 if (condition_length
> 0 && cond_domain
== NULL
) {
3969 cond_domain
= necp_create_trimmed_domain((char *)condition_value
, condition_length
);
3970 if (cond_domain
!= NULL
) {
3971 master_condition_mask
|= NECP_KERNEL_CONDITION_DOMAIN
;
3972 if (condition_is_negative
) {
3973 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_DOMAIN
;
3975 socket_only_conditions
= TRUE
;
3980 case NECP_POLICY_CONDITION_ACCOUNT
: {
3981 // Make sure there is only one such rule
3982 if (condition_length
> 0 && cond_account_id
== 0 && policy
->applied_account
== NULL
) {
3983 char *string
= NULL
;
3984 MALLOC(string
, char *, condition_length
+ 1, M_NECP
, M_WAITOK
);
3985 if (string
!= NULL
) {
3986 memcpy(string
, condition_value
, condition_length
);
3987 string
[condition_length
] = 0;
3988 cond_account_id
= necp_create_string_to_id_mapping(&necp_account_id_list
, string
);
3989 if (cond_account_id
!= 0) {
3990 policy
->applied_account
= string
; // Save the string in parent policy
3991 master_condition_mask
|= NECP_KERNEL_CONDITION_ACCOUNT_ID
;
3992 if (condition_is_negative
) {
3993 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_ACCOUNT_ID
;
3995 socket_only_conditions
= TRUE
;
3997 FREE(string
, M_NECP
);
4003 case NECP_POLICY_CONDITION_APPLICATION
: {
4004 // Make sure there is only one such rule, because we save the uuid in the policy
4005 if (condition_length
>= sizeof(uuid_t
) && cond_app_id
== 0) {
4006 bool allocated_mapping
= FALSE
;
4007 uuid_t application_uuid
;
4008 memcpy(application_uuid
, condition_value
, sizeof(uuid_t
));
4009 cond_app_id
= necp_create_uuid_app_id_mapping(application_uuid
, &allocated_mapping
, TRUE
);
4010 if (cond_app_id
!= 0) {
4011 if (allocated_mapping
) {
4012 necp_uuid_app_id_mappings_dirty
= TRUE
;
4013 necp_num_uuid_app_id_mappings
++;
4015 uuid_copy(policy
->applied_app_uuid
, application_uuid
);
4016 master_condition_mask
|= NECP_KERNEL_CONDITION_APP_ID
;
4017 if (condition_is_negative
) {
4018 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_APP_ID
;
4020 socket_only_conditions
= TRUE
;
4025 case NECP_POLICY_CONDITION_REAL_APPLICATION
: {
4026 // Make sure there is only one such rule, because we save the uuid in the policy
4027 if (condition_length
>= sizeof(uuid_t
) && cond_real_app_id
== 0) {
4028 uuid_t real_application_uuid
;
4029 memcpy(real_application_uuid
, condition_value
, sizeof(uuid_t
));
4030 cond_real_app_id
= necp_create_uuid_app_id_mapping(real_application_uuid
, NULL
, FALSE
);
4031 if (cond_real_app_id
!= 0) {
4032 uuid_copy(policy
->applied_real_app_uuid
, real_application_uuid
);
4033 master_condition_mask
|= NECP_KERNEL_CONDITION_REAL_APP_ID
;
4034 if (condition_is_negative
) {
4035 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REAL_APP_ID
;
4037 socket_only_conditions
= TRUE
;
4042 case NECP_POLICY_CONDITION_PID
: {
4043 if (condition_length
>= sizeof(pid_t
)) {
4044 master_condition_mask
|= NECP_KERNEL_CONDITION_PID
;
4045 if (condition_is_negative
) {
4046 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_PID
;
4048 memcpy(&cond_pid
, condition_value
, sizeof(cond_pid
));
4049 socket_only_conditions
= TRUE
;
4053 case NECP_POLICY_CONDITION_UID
: {
4054 if (condition_length
>= sizeof(uid_t
)) {
4055 master_condition_mask
|= NECP_KERNEL_CONDITION_UID
;
4056 if (condition_is_negative
) {
4057 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_UID
;
4059 memcpy(&cond_uid
, condition_value
, sizeof(cond_uid
));
4060 socket_only_conditions
= TRUE
;
4064 case NECP_POLICY_CONDITION_TRAFFIC_CLASS
: {
4065 if (condition_length
>= sizeof(struct necp_policy_condition_tc_range
)) {
4066 master_condition_mask
|= NECP_KERNEL_CONDITION_TRAFFIC_CLASS
;
4067 if (condition_is_negative
) {
4068 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_TRAFFIC_CLASS
;
4070 memcpy(&cond_traffic_class
, condition_value
, sizeof(cond_traffic_class
));
4071 socket_only_conditions
= TRUE
;
4075 case NECP_POLICY_CONDITION_BOUND_INTERFACE
: {
4076 if (condition_length
<= IFXNAMSIZ
&& condition_length
> 0) {
4077 char interface_name
[IFXNAMSIZ
];
4078 memcpy(interface_name
, condition_value
, condition_length
);
4079 interface_name
[condition_length
- 1] = 0; // Make sure the string is NULL terminated
4080 if (ifnet_find_by_name(interface_name
, &cond_bound_interface
) == 0) {
4081 master_condition_mask
|= NECP_KERNEL_CONDITION_BOUND_INTERFACE
;
4082 if (condition_is_negative
) {
4083 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_BOUND_INTERFACE
;
4086 socket_ip_conditions
= TRUE
;
4090 case NECP_POLICY_CONDITION_IP_PROTOCOL
:
4091 case NECP_POLICY_CONDITION_FLOW_IP_PROTOCOL
: {
4092 if (condition_length
>= sizeof(u_int16_t
)) {
4093 master_condition_mask
|= NECP_KERNEL_CONDITION_PROTOCOL
;
4094 if (condition_is_negative
) {
4095 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_PROTOCOL
;
4097 memcpy(&cond_protocol
, condition_value
, sizeof(cond_protocol
));
4098 if (condition_type
== NECP_POLICY_CONDITION_FLOW_IP_PROTOCOL
) {
4099 socket_only_conditions
= TRUE
;
4101 socket_ip_conditions
= TRUE
;
4106 case NECP_POLICY_CONDITION_LOCAL_NETWORKS
: {
4107 master_condition_mask
|= NECP_KERNEL_CONDITION_LOCAL_NETWORKS
;
4108 socket_ip_conditions
= TRUE
;
4111 case NECP_POLICY_CONDITION_LOCAL_ADDR
:
4112 case NECP_POLICY_CONDITION_FLOW_LOCAL_ADDR
: {
4113 struct necp_policy_condition_addr
*address_struct
= (struct necp_policy_condition_addr
*)(void *)condition_value
;
4114 if (!necp_address_is_valid(&address_struct
->address
.sa
)) {
4118 cond_local_prefix
= address_struct
->prefix
;
4119 memcpy(&cond_local_start
, &address_struct
->address
, sizeof(address_struct
->address
));
4120 master_condition_mask
|= NECP_KERNEL_CONDITION_LOCAL_START
;
4121 master_condition_mask
|= NECP_KERNEL_CONDITION_LOCAL_PREFIX
;
4122 if (condition_is_negative
) {
4123 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_LOCAL_START
;
4124 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_LOCAL_PREFIX
;
4126 if (condition_type
== NECP_POLICY_CONDITION_FLOW_LOCAL_ADDR
) {
4127 socket_only_conditions
= TRUE
;
4129 socket_ip_conditions
= TRUE
;
4133 case NECP_POLICY_CONDITION_REMOTE_ADDR
:
4134 case NECP_POLICY_CONDITION_FLOW_REMOTE_ADDR
: {
4135 struct necp_policy_condition_addr
*address_struct
= (struct necp_policy_condition_addr
*)(void *)condition_value
;
4136 if (!necp_address_is_valid(&address_struct
->address
.sa
)) {
4140 cond_remote_prefix
= address_struct
->prefix
;
4141 memcpy(&cond_remote_start
, &address_struct
->address
, sizeof(address_struct
->address
));
4142 master_condition_mask
|= NECP_KERNEL_CONDITION_REMOTE_START
;
4143 master_condition_mask
|= NECP_KERNEL_CONDITION_REMOTE_PREFIX
;
4144 if (condition_is_negative
) {
4145 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REMOTE_START
;
4146 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REMOTE_PREFIX
;
4148 if (condition_type
== NECP_POLICY_CONDITION_FLOW_REMOTE_ADDR
) {
4149 socket_only_conditions
= TRUE
;
4151 socket_ip_conditions
= TRUE
;
4155 case NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE
:
4156 case NECP_POLICY_CONDITION_FLOW_LOCAL_ADDR_RANGE
: {
4157 struct necp_policy_condition_addr_range
*address_struct
= (struct necp_policy_condition_addr_range
*)(void *)condition_value
;
4158 if (!necp_address_is_valid(&address_struct
->start_address
.sa
) ||
4159 !necp_address_is_valid(&address_struct
->end_address
.sa
)) {
4163 memcpy(&cond_local_start
, &address_struct
->start_address
, sizeof(address_struct
->start_address
));
4164 memcpy(&cond_local_end
, &address_struct
->end_address
, sizeof(address_struct
->end_address
));
4165 master_condition_mask
|= NECP_KERNEL_CONDITION_LOCAL_START
;
4166 master_condition_mask
|= NECP_KERNEL_CONDITION_LOCAL_END
;
4167 if (condition_is_negative
) {
4168 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_LOCAL_START
;
4169 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_LOCAL_END
;
4171 if (condition_type
== NECP_POLICY_CONDITION_FLOW_LOCAL_ADDR_RANGE
) {
4172 socket_only_conditions
= TRUE
;
4174 socket_ip_conditions
= TRUE
;
4178 case NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE
:
4179 case NECP_POLICY_CONDITION_FLOW_REMOTE_ADDR_RANGE
: {
4180 struct necp_policy_condition_addr_range
*address_struct
= (struct necp_policy_condition_addr_range
*)(void *)condition_value
;
4181 if (!necp_address_is_valid(&address_struct
->start_address
.sa
) ||
4182 !necp_address_is_valid(&address_struct
->end_address
.sa
)) {
4186 memcpy(&cond_remote_start
, &address_struct
->start_address
, sizeof(address_struct
->start_address
));
4187 memcpy(&cond_remote_end
, &address_struct
->end_address
, sizeof(address_struct
->end_address
));
4188 master_condition_mask
|= NECP_KERNEL_CONDITION_REMOTE_START
;
4189 master_condition_mask
|= NECP_KERNEL_CONDITION_REMOTE_END
;
4190 if (condition_is_negative
) {
4191 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REMOTE_START
;
4192 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REMOTE_END
;
4194 if (condition_type
== NECP_POLICY_CONDITION_FLOW_REMOTE_ADDR_RANGE
) {
4195 socket_only_conditions
= TRUE
;
4197 socket_ip_conditions
= TRUE
;
4201 case NECP_POLICY_CONDITION_AGENT_TYPE
: {
4202 if (condition_length
>= sizeof(cond_agent_type
)) {
4203 master_condition_mask
|= NECP_KERNEL_CONDITION_AGENT_TYPE
;
4204 memcpy(&cond_agent_type
, condition_value
, sizeof(cond_agent_type
));
4205 socket_only_conditions
= TRUE
;
4209 case NECP_POLICY_CONDITION_CLIENT_FLAGS
: {
4210 if (condition_is_negative
) {
4211 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_CLIENT_FLAGS
;
4213 master_condition_mask
|= NECP_KERNEL_CONDITION_CLIENT_FLAGS
;
4214 socket_only_conditions
= TRUE
;
4215 if (condition_length
>= sizeof(u_int32_t
)) {
4216 memcpy(&cond_client_flags
, condition_value
, sizeof(cond_client_flags
));
4218 // Empty means match on fallback traffic
4219 cond_client_flags
= NECP_CLIENT_PARAMETER_FLAG_FALLBACK_TRAFFIC
;
4223 case NECP_POLICY_CONDITION_FLOW_LOCAL_ADDR_EMPTY
: {
4224 master_condition_mask
|= NECP_KERNEL_CONDITION_LOCAL_EMPTY
;
4225 if (condition_is_negative
) {
4226 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_LOCAL_EMPTY
;
4228 socket_only_conditions
= TRUE
;
4231 case NECP_POLICY_CONDITION_FLOW_REMOTE_ADDR_EMPTY
: {
4232 master_condition_mask
|= NECP_KERNEL_CONDITION_REMOTE_EMPTY
;
4233 if (condition_is_negative
) {
4234 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REMOTE_EMPTY
;
4236 socket_only_conditions
= TRUE
;
4244 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
4248 ultimate_result
= necp_policy_get_result_type(policy
);
4249 switch (ultimate_result
) {
4250 case NECP_POLICY_RESULT_PASS
: {
4251 if (socket_only_conditions
) { // socket_ip_conditions can be TRUE or FALSE
4252 socket_layer_non_id_conditions
= TRUE
;
4253 ip_output_layer_id_condition
= TRUE
;
4254 } else if (socket_ip_conditions
) {
4255 socket_layer_non_id_conditions
= TRUE
;
4256 ip_output_layer_id_condition
= TRUE
;
4257 ip_output_layer_non_id_conditions
= TRUE
;
4261 case NECP_POLICY_RESULT_DROP
: {
4262 if (socket_only_conditions
) { // socket_ip_conditions can be TRUE or FALSE
4263 socket_layer_non_id_conditions
= TRUE
;
4264 } else if (socket_ip_conditions
) {
4265 socket_layer_non_id_conditions
= TRUE
;
4266 ip_output_layer_non_id_conditions
= TRUE
;
4267 ip_output_layer_non_id_only
= TRUE
; // Only apply drop to packets that didn't go through socket layer
4271 case NECP_POLICY_RESULT_SKIP
: {
4272 u_int32_t skip_policy_order
= 0;
4273 if (necp_policy_get_result_parameter(policy
, (u_int8_t
*)&skip_policy_order
, sizeof(skip_policy_order
))) {
4274 ultimate_result_parameter
.skip_policy_order
= skip_policy_order
;
4277 if (socket_only_conditions
) { // socket_ip_conditions can be TRUE or FALSE
4278 socket_layer_non_id_conditions
= TRUE
;
4279 ip_output_layer_id_condition
= TRUE
;
4280 } else if (socket_ip_conditions
) {
4281 socket_layer_non_id_conditions
= TRUE
;
4282 ip_output_layer_non_id_conditions
= TRUE
;
4286 case NECP_POLICY_RESULT_SOCKET_DIVERT
:
4287 case NECP_POLICY_RESULT_SOCKET_FILTER
: {
4288 u_int32_t control_unit
= 0;
4289 if (necp_policy_get_result_parameter(policy
, (u_int8_t
*)&control_unit
, sizeof(control_unit
))) {
4290 ultimate_result_parameter
.flow_divert_control_unit
= control_unit
;
4292 socket_layer_non_id_conditions
= TRUE
;
4295 case NECP_POLICY_RESULT_IP_TUNNEL
: {
4296 struct necp_policy_result_ip_tunnel tunnel_parameters
;
4297 u_int32_t tunnel_parameters_length
= necp_policy_get_result_parameter_length(policy
);
4298 if (tunnel_parameters_length
> sizeof(u_int32_t
) &&
4299 tunnel_parameters_length
<= sizeof(struct necp_policy_result_ip_tunnel
) &&
4300 necp_policy_get_result_parameter(policy
, (u_int8_t
*)&tunnel_parameters
, sizeof(tunnel_parameters
))) {
4301 ifnet_t tunnel_interface
= NULL
;
4302 tunnel_parameters
.interface_name
[tunnel_parameters_length
- sizeof(u_int32_t
) - 1] = 0; // Make sure the string is NULL terminated
4303 if (ifnet_find_by_name(tunnel_parameters
.interface_name
, &tunnel_interface
) == 0) {
4304 ultimate_result_parameter
.tunnel_interface_index
= tunnel_interface
->if_index
;
4305 ifnet_release(tunnel_interface
);
4308 secondary_result
= tunnel_parameters
.secondary_result
;
4309 if (secondary_result
) {
4310 cond_last_interface_index
= ultimate_result_parameter
.tunnel_interface_index
;
4314 if (socket_only_conditions
) { // socket_ip_conditions can be TRUE or FALSE
4315 socket_layer_non_id_conditions
= TRUE
;
4316 ip_output_layer_id_condition
= TRUE
;
4317 if (secondary_result
) {
4318 ip_output_layer_tunnel_condition_from_id
= TRUE
;
4320 } else if (socket_ip_conditions
) {
4321 socket_layer_non_id_conditions
= TRUE
;
4322 ip_output_layer_id_condition
= TRUE
;
4323 ip_output_layer_non_id_conditions
= TRUE
;
4324 if (secondary_result
) {
4325 ip_output_layer_tunnel_condition_from_id
= TRUE
;
4326 ip_output_layer_tunnel_condition_from_non_id
= TRUE
;
4331 case NECP_POLICY_RESULT_TRIGGER
:
4332 case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED
:
4333 case NECP_POLICY_RESULT_TRIGGER_SCOPED
:
4334 case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED
: {
4335 struct necp_policy_result_service service_parameters
;
4336 u_int32_t service_result_length
= necp_policy_get_result_parameter_length(policy
);
4337 bool has_extra_service_data
= FALSE
;
4338 if (service_result_length
>= (sizeof(service_parameters
))) {
4339 has_extra_service_data
= TRUE
;
4341 if (necp_policy_get_result_parameter(policy
, (u_int8_t
*)&service_parameters
, sizeof(service_parameters
))) {
4342 ultimate_result_parameter
.service
.identifier
= necp_create_uuid_service_id_mapping(service_parameters
.identifier
);
4343 if (ultimate_result_parameter
.service
.identifier
!= 0) {
4344 uuid_copy(policy
->applied_result_uuid
, service_parameters
.identifier
);
4345 socket_layer_non_id_conditions
= TRUE
;
4346 if (has_extra_service_data
) {
4347 ultimate_result_parameter
.service
.data
= service_parameters
.data
;
4349 ultimate_result_parameter
.service
.data
= 0;
4355 case NECP_POLICY_RESULT_USE_NETAGENT
:
4356 case NECP_POLICY_RESULT_NETAGENT_SCOPED
: {
4357 uuid_t netagent_uuid
;
4358 if (necp_policy_get_result_parameter(policy
, (u_int8_t
*)&netagent_uuid
, sizeof(netagent_uuid
))) {
4359 ultimate_result_parameter
.netagent_id
= necp_create_uuid_service_id_mapping(netagent_uuid
);
4360 if (ultimate_result_parameter
.netagent_id
!= 0) {
4361 uuid_copy(policy
->applied_result_uuid
, netagent_uuid
);
4362 socket_layer_non_id_conditions
= TRUE
;
4367 case NECP_POLICY_RESULT_SOCKET_SCOPED
: {
4368 u_int32_t interface_name_length
= necp_policy_get_result_parameter_length(policy
);
4369 if (interface_name_length
<= IFXNAMSIZ
&& interface_name_length
> 0) {
4370 char interface_name
[IFXNAMSIZ
];
4371 ifnet_t scope_interface
= NULL
;
4372 necp_policy_get_result_parameter(policy
, (u_int8_t
*)interface_name
, interface_name_length
);
4373 interface_name
[interface_name_length
- 1] = 0; // Make sure the string is NULL terminated
4374 if (ifnet_find_by_name(interface_name
, &scope_interface
) == 0) {
4375 ultimate_result_parameter
.scoped_interface_index
= scope_interface
->if_index
;
4376 socket_layer_non_id_conditions
= TRUE
;
4377 ifnet_release(scope_interface
);
4382 case NECP_POLICY_RESULT_SCOPED_DIRECT
: {
4383 socket_layer_non_id_conditions
= TRUE
;
4386 case NECP_POLICY_RESULT_ALLOW_UNENTITLED
: {
4387 socket_layer_non_id_conditions
= TRUE
;
4390 case NECP_POLICY_RESULT_ROUTE_RULES
: {
4391 if (policy
->route_rules
!= NULL
&& policy
->route_rules_size
> 0) {
4392 u_int32_t route_rule_id
= necp_create_route_rule(&necp_route_rules
, policy
->route_rules
, policy
->route_rules_size
);
4393 if (route_rule_id
> 0) {
4394 policy
->applied_route_rules_id
= route_rule_id
;
4395 ultimate_result_parameter
.route_rule_id
= route_rule_id
;
4396 if (socket_only_conditions
) { // socket_ip_conditions can be TRUE or FALSE
4397 socket_layer_non_id_conditions
= TRUE
;
4398 } else if (socket_ip_conditions
) {
4399 socket_layer_non_id_conditions
= TRUE
;
4400 ip_output_layer_non_id_conditions
= TRUE
;
4401 ip_output_layer_non_id_only
= TRUE
; // Only apply route rules to packets that didn't go through socket layer
4412 if (socket_layer_non_id_conditions
) {
4413 necp_kernel_policy_id policy_id
= necp_kernel_socket_policy_add(policy
->order
, session
->session_order
, session
->proc_pid
, master_condition_mask
, master_condition_negated_mask
, cond_app_id
, cond_real_app_id
, cond_custom_entitlement
, cond_account_id
, cond_domain
, cond_pid
, cond_uid
, cond_bound_interface
, cond_traffic_class
, cond_protocol
, &cond_local_start
, &cond_local_end
, cond_local_prefix
, &cond_remote_start
, &cond_remote_end
, cond_remote_prefix
, &cond_agent_type
, cond_client_flags
, ultimate_result
, ultimate_result_parameter
);
4415 if (policy_id
== 0) {
4416 NECPLOG0(LOG_DEBUG
, "Error applying socket kernel policy");
4420 cond_ip_output_layer_id
= policy_id
;
4421 policy
->kernel_socket_policies
[0] = policy_id
;
4424 if (ip_output_layer_non_id_conditions
) {
4425 u_int32_t condition_mask
= master_condition_mask
;
4426 if (ip_output_layer_non_id_only
) {
4427 condition_mask
|= NECP_KERNEL_CONDITION_POLICY_ID
;
4430 necp_kernel_policy_id policy_id
= necp_kernel_ip_output_policy_add(policy
->order
, NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS
, session
->session_order
, session
->proc_pid
, condition_mask
, master_condition_negated_mask
, NECP_KERNEL_POLICY_ID_NONE
, cond_bound_interface
, 0, cond_protocol
, &cond_local_start
, &cond_local_end
, cond_local_prefix
, &cond_remote_start
, &cond_remote_end
, cond_remote_prefix
, ultimate_result
, ultimate_result_parameter
);
4432 if (policy_id
== 0) {
4433 NECPLOG0(LOG_DEBUG
, "Error applying IP output kernel policy");
4437 policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS
] = policy_id
;
4440 if (ip_output_layer_id_condition
) {
4441 necp_kernel_policy_id policy_id
= necp_kernel_ip_output_policy_add(policy
->order
, NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION
, session
->session_order
, session
->proc_pid
, NECP_KERNEL_CONDITION_POLICY_ID
| NECP_KERNEL_CONDITION_ALL_INTERFACES
, 0, cond_ip_output_layer_id
, NULL
, 0, 0, NULL
, NULL
, 0, NULL
, NULL
, 0, ultimate_result
, ultimate_result_parameter
);
4443 if (policy_id
== 0) {
4444 NECPLOG0(LOG_DEBUG
, "Error applying IP output kernel policy");
4448 policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION
] = policy_id
;
4451 // Extra policies for IP Output tunnels for when packets loop back
4452 if (ip_output_layer_tunnel_condition_from_id
) {
4453 necp_kernel_policy_id policy_id
= necp_kernel_ip_output_policy_add(policy
->order
, NECP_KERNEL_POLICY_SUBORDER_NON_ID_TUNNEL_CONDITION
, session
->session_order
, session
->proc_pid
, NECP_KERNEL_CONDITION_POLICY_ID
| NECP_KERNEL_CONDITION_LAST_INTERFACE
| NECP_KERNEL_CONDITION_ALL_INTERFACES
, 0, policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS
], NULL
, cond_last_interface_index
, 0, NULL
, NULL
, 0, NULL
, NULL
, 0, secondary_result
, secondary_result_parameter
);
4455 if (policy_id
== 0) {
4456 NECPLOG0(LOG_DEBUG
, "Error applying IP output kernel policy");
4460 policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_NON_ID_TUNNEL_CONDITION
] = policy_id
;
4463 if (ip_output_layer_tunnel_condition_from_id
) {
4464 necp_kernel_policy_id policy_id
= necp_kernel_ip_output_policy_add(policy
->order
, NECP_KERNEL_POLICY_SUBORDER_ID_TUNNEL_CONDITION
, session
->session_order
, session
->proc_pid
, NECP_KERNEL_CONDITION_POLICY_ID
| NECP_KERNEL_CONDITION_LAST_INTERFACE
| NECP_KERNEL_CONDITION_ALL_INTERFACES
, 0, policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION
], NULL
, cond_last_interface_index
, 0, NULL
, NULL
, 0, NULL
, NULL
, 0, secondary_result
, secondary_result_parameter
);
4466 if (policy_id
== 0) {
4467 NECPLOG0(LOG_DEBUG
, "Error applying IP output kernel policy");
4471 policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_ID_TUNNEL_CONDITION
] = policy_id
;
4474 policy
->applied
= TRUE
;
4475 policy
->pending_update
= FALSE
;
4483 necp_policy_apply_all(struct necp_session
*session
)
4485 struct necp_session_policy
*policy
= NULL
;
4486 struct necp_session_policy
*temp_policy
= NULL
;
4487 struct kev_necp_policies_changed_data kev_data
;
4488 kev_data
.changed_count
= 0;
4490 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
4492 // Remove exisiting applied policies
4493 if (session
->dirty
) {
4494 LIST_FOREACH_SAFE(policy
, &session
->policies
, chain
, temp_policy
) {
4495 if (policy
->pending_deletion
) {
4496 if (policy
->applied
) {
4497 necp_policy_unapply(policy
);
4499 // Delete the policy
4500 necp_policy_delete(session
, policy
);
4501 } else if (!policy
->applied
) {
4502 necp_policy_apply(session
, policy
);
4503 } else if (policy
->pending_update
) {
4504 // Must have been applied, but needs an update. Remove and re-add.
4505 necp_policy_unapply(policy
);
4506 necp_policy_apply(session
, policy
);
4510 necp_kernel_socket_policies_update_uuid_table();
4511 necp_kernel_socket_policies_reprocess();
4512 necp_kernel_ip_output_policies_reprocess();
4514 // Clear dirty bit flags
4515 session
->dirty
= FALSE
;
4518 lck_rw_done(&necp_kernel_policy_lock
);
4520 necp_update_all_clients();
4521 necp_post_change_event(&kev_data
);
4524 NECPLOG0(LOG_DEBUG
, "Applied NECP policies");
4528 // Kernel Policy Management
4529 // ---------------------
4530 // Kernel policies are derived from session policies
4531 static necp_kernel_policy_id
4532 necp_kernel_policy_get_new_id(bool socket_level
)
4534 static necp_kernel_policy_id necp_last_kernel_socket_policy_id
= 0;
4535 static necp_kernel_policy_id necp_last_kernel_ip_policy_id
= 0;
4537 necp_kernel_policy_id newid
= NECP_KERNEL_POLICY_ID_NONE
;
4539 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4542 bool wrapped
= FALSE
;
4544 necp_last_kernel_socket_policy_id
++;
4545 if (necp_last_kernel_socket_policy_id
< NECP_KERNEL_POLICY_ID_FIRST_VALID_SOCKET
||
4546 necp_last_kernel_socket_policy_id
>= NECP_KERNEL_POLICY_ID_FIRST_VALID_IP
) {
4548 // Already wrapped, give up
4549 NECPLOG0(LOG_ERR
, "Failed to find a free socket kernel policy ID.\n");
4550 return NECP_KERNEL_POLICY_ID_NONE
;
4552 necp_last_kernel_socket_policy_id
= NECP_KERNEL_POLICY_ID_FIRST_VALID_SOCKET
;
4555 newid
= necp_last_kernel_socket_policy_id
;
4556 } while (necp_kernel_socket_policy_find(newid
) != NULL
); // If already used, keep trying
4558 bool wrapped
= FALSE
;
4560 necp_last_kernel_ip_policy_id
++;
4561 if (necp_last_kernel_ip_policy_id
< NECP_KERNEL_POLICY_ID_FIRST_VALID_IP
) {
4563 // Already wrapped, give up
4564 NECPLOG0(LOG_ERR
, "Failed to find a free IP kernel policy ID.\n");
4565 return NECP_KERNEL_POLICY_ID_NONE
;
4567 necp_last_kernel_ip_policy_id
= NECP_KERNEL_POLICY_ID_FIRST_VALID_IP
;
4570 newid
= necp_last_kernel_ip_policy_id
;
4571 } while (necp_kernel_ip_output_policy_find(newid
) != NULL
); // If already used, keep trying
4574 if (newid
== NECP_KERNEL_POLICY_ID_NONE
) {
4575 NECPLOG0(LOG_ERR
, "Allocate kernel policy id failed.\n");
4576 return NECP_KERNEL_POLICY_ID_NONE
;
4582 #define NECP_KERNEL_VALID_SOCKET_CONDITIONS (NECP_KERNEL_CONDITION_APP_ID | NECP_KERNEL_CONDITION_REAL_APP_ID | NECP_KERNEL_CONDITION_DOMAIN | NECP_KERNEL_CONDITION_ACCOUNT_ID | NECP_KERNEL_CONDITION_PID | NECP_KERNEL_CONDITION_UID | NECP_KERNEL_CONDITION_ALL_INTERFACES | NECP_KERNEL_CONDITION_BOUND_INTERFACE | NECP_KERNEL_CONDITION_TRAFFIC_CLASS | NECP_KERNEL_CONDITION_PROTOCOL | NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_ENTITLEMENT | NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT | NECP_KERNEL_CONDITION_AGENT_TYPE | NECP_KERNEL_CONDITION_HAS_CLIENT | NECP_KERNEL_CONDITION_LOCAL_NETWORKS | NECP_KERNEL_CONDITION_CLIENT_FLAGS | NECP_KERNEL_CONDITION_LOCAL_EMPTY | NECP_KERNEL_CONDITION_REMOTE_EMPTY | NECP_KERNEL_CONDITION_PLATFORM_BINARY)
4584 static necp_kernel_policy_id
4585 necp_kernel_socket_policy_add(necp_policy_order order
, u_int32_t session_order
, int session_pid
, u_int32_t condition_mask
, u_int32_t condition_negated_mask
, necp_app_id cond_app_id
, necp_app_id cond_real_app_id
, char *cond_custom_entitlement
, u_int32_t cond_account_id
, char *cond_domain
, pid_t cond_pid
, uid_t cond_uid
, ifnet_t cond_bound_interface
, struct necp_policy_condition_tc_range cond_traffic_class
, u_int16_t cond_protocol
, union necp_sockaddr_union
*cond_local_start
, union necp_sockaddr_union
*cond_local_end
, u_int8_t cond_local_prefix
, union necp_sockaddr_union
*cond_remote_start
, union necp_sockaddr_union
*cond_remote_end
, u_int8_t cond_remote_prefix
, struct necp_policy_condition_agent_type
*cond_agent_type
, u_int32_t cond_client_flags
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
)
4587 struct necp_kernel_socket_policy
*new_kernel_policy
= NULL
;
4588 struct necp_kernel_socket_policy
*tmp_kernel_policy
= NULL
;
4590 MALLOC_ZONE(new_kernel_policy
, struct necp_kernel_socket_policy
*, sizeof(*new_kernel_policy
), M_NECP_SOCKET_POLICY
, M_WAITOK
);
4591 if (new_kernel_policy
== NULL
) {
4595 memset(new_kernel_policy
, 0, sizeof(*new_kernel_policy
)); // M_ZERO is not supported for MALLOC_ZONE
4596 new_kernel_policy
->id
= necp_kernel_policy_get_new_id(true);
4597 new_kernel_policy
->order
= order
;
4598 new_kernel_policy
->session_order
= session_order
;
4599 new_kernel_policy
->session_pid
= session_pid
;
4601 // Sanitize condition mask
4602 new_kernel_policy
->condition_mask
= (condition_mask
& NECP_KERNEL_VALID_SOCKET_CONDITIONS
);
4603 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
)) {
4604 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_BOUND_INTERFACE
;
4606 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) && !(new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
)) {
4607 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_REAL_APP_ID
;
4609 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
) && !(new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
)) {
4610 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_ENTITLEMENT
;
4612 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
)) {
4613 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_LOCAL_PREFIX
;
4615 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
)) {
4616 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_REMOTE_PREFIX
;
4618 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_EMPTY
) {
4619 new_kernel_policy
->condition_mask
&= ~(NECP_KERNEL_CONDITION_LOCAL_PREFIX
| NECP_KERNEL_CONDITION_LOCAL_END
);
4621 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_EMPTY
)) {
4622 new_kernel_policy
->condition_mask
&= ~(NECP_KERNEL_CONDITION_REMOTE_PREFIX
| NECP_KERNEL_CONDITION_REMOTE_END
);
4624 new_kernel_policy
->condition_negated_mask
= condition_negated_mask
& new_kernel_policy
->condition_mask
;
4626 // Set condition values
4627 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
4628 new_kernel_policy
->cond_app_id
= cond_app_id
;
4630 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
4631 new_kernel_policy
->cond_real_app_id
= cond_real_app_id
;
4633 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
) {
4634 new_kernel_policy
->cond_custom_entitlement
= cond_custom_entitlement
;
4635 new_kernel_policy
->cond_custom_entitlement_matched
= necp_boolean_state_unknown
;
4637 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
) {
4638 new_kernel_policy
->cond_account_id
= cond_account_id
;
4640 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
4641 new_kernel_policy
->cond_domain
= cond_domain
;
4642 new_kernel_policy
->cond_domain_dot_count
= necp_count_dots(cond_domain
, strlen(cond_domain
));
4644 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PID
) {
4645 new_kernel_policy
->cond_pid
= cond_pid
;
4647 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_UID
) {
4648 new_kernel_policy
->cond_uid
= cond_uid
;
4650 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
4651 if (cond_bound_interface
) {
4652 ifnet_reference(cond_bound_interface
);
4654 new_kernel_policy
->cond_bound_interface
= cond_bound_interface
;
4656 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
4657 new_kernel_policy
->cond_traffic_class
= cond_traffic_class
;
4659 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
4660 new_kernel_policy
->cond_protocol
= cond_protocol
;
4662 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
4663 memcpy(&new_kernel_policy
->cond_local_start
, cond_local_start
, cond_local_start
->sa
.sa_len
);
4665 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
4666 memcpy(&new_kernel_policy
->cond_local_end
, cond_local_end
, cond_local_end
->sa
.sa_len
);
4668 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
4669 new_kernel_policy
->cond_local_prefix
= cond_local_prefix
;
4671 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
4672 memcpy(&new_kernel_policy
->cond_remote_start
, cond_remote_start
, cond_remote_start
->sa
.sa_len
);
4674 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
4675 memcpy(&new_kernel_policy
->cond_remote_end
, cond_remote_end
, cond_remote_end
->sa
.sa_len
);
4677 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
4678 new_kernel_policy
->cond_remote_prefix
= cond_remote_prefix
;
4680 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_AGENT_TYPE
) {
4681 memcpy(&new_kernel_policy
->cond_agent_type
, cond_agent_type
, sizeof(*cond_agent_type
));
4683 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_CLIENT_FLAGS
) {
4684 new_kernel_policy
->cond_client_flags
= cond_client_flags
;
4687 new_kernel_policy
->result
= result
;
4688 memcpy(&new_kernel_policy
->result_parameter
, &result_parameter
, sizeof(result_parameter
));
4691 NECPLOG(LOG_DEBUG
, "Added kernel policy: socket, id=%d, mask=%x\n", new_kernel_policy
->id
, new_kernel_policy
->condition_mask
);
4693 LIST_INSERT_SORTED_TWICE_ASCENDING(&necp_kernel_socket_policies
, new_kernel_policy
, chain
, session_order
, order
, tmp_kernel_policy
);
4695 return new_kernel_policy
? new_kernel_policy
->id
: 0;
4698 static struct necp_kernel_socket_policy
*
4699 necp_kernel_socket_policy_find(necp_kernel_policy_id policy_id
)
4701 struct necp_kernel_socket_policy
*kernel_policy
= NULL
;
4702 struct necp_kernel_socket_policy
*tmp_kernel_policy
= NULL
;
4704 if (policy_id
== 0) {
4708 LIST_FOREACH_SAFE(kernel_policy
, &necp_kernel_socket_policies
, chain
, tmp_kernel_policy
) {
4709 if (kernel_policy
->id
== policy_id
) {
4710 return kernel_policy
;
4718 necp_kernel_socket_policy_delete(necp_kernel_policy_id policy_id
)
4720 struct necp_kernel_socket_policy
*policy
= NULL
;
4722 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4724 policy
= necp_kernel_socket_policy_find(policy_id
);
4726 LIST_REMOVE(policy
, chain
);
4728 if (policy
->cond_bound_interface
) {
4729 ifnet_release(policy
->cond_bound_interface
);
4730 policy
->cond_bound_interface
= NULL
;
4733 if (policy
->cond_domain
) {
4734 FREE(policy
->cond_domain
, M_NECP
);
4735 policy
->cond_domain
= NULL
;
4738 if (policy
->cond_custom_entitlement
) {
4739 FREE(policy
->cond_custom_entitlement
, M_NECP
);
4740 policy
->cond_custom_entitlement
= NULL
;
4743 FREE_ZONE(policy
, sizeof(*policy
), M_NECP_SOCKET_POLICY
);
4750 static inline const char *
4751 necp_get_result_description(char *result_string
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
)
4753 uuid_string_t uuid_string
;
4755 case NECP_KERNEL_POLICY_RESULT_NONE
: {
4756 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "None");
4759 case NECP_KERNEL_POLICY_RESULT_PASS
: {
4760 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "Pass");
4763 case NECP_KERNEL_POLICY_RESULT_SKIP
: {
4764 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "Skip (%u)", result_parameter
.skip_policy_order
);
4767 case NECP_KERNEL_POLICY_RESULT_DROP
: {
4768 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "Drop");
4771 case NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
: {
4772 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "SocketDivert (%d)", result_parameter
.flow_divert_control_unit
);
4775 case NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER
: {
4776 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "SocketFilter (%d)", result_parameter
.filter_control_unit
);
4779 case NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
: {
4780 ifnet_t interface
= ifindex2ifnet
[result_parameter
.tunnel_interface_index
];
4781 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "IPTunnel (%s%d)", ifnet_name(interface
), ifnet_unit(interface
));
4784 case NECP_KERNEL_POLICY_RESULT_IP_FILTER
: {
4785 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "IPFilter");
4788 case NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED
: {
4789 ifnet_t interface
= ifindex2ifnet
[result_parameter
.scoped_interface_index
];
4790 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "SocketScoped (%s%d)", ifnet_name(interface
), ifnet_unit(interface
));
4793 case NECP_KERNEL_POLICY_RESULT_SCOPED_DIRECT
: {
4794 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "ScopedDirect");
4797 case NECP_KERNEL_POLICY_RESULT_ALLOW_UNENTITLED
: {
4798 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "AllowUnentitled");
4801 case NECP_KERNEL_POLICY_RESULT_ROUTE_RULES
: {
4803 char interface_names
[MAX_ROUTE_RULE_INTERFACES
][IFXNAMSIZ
];
4804 struct necp_route_rule
*route_rule
= necp_lookup_route_rule_locked(&necp_route_rules
, result_parameter
.route_rule_id
);
4805 if (route_rule
!= NULL
) {
4806 for (index
= 0; index
< MAX_ROUTE_RULE_INTERFACES
; index
++) {
4807 if (route_rule
->exception_if_indices
[index
] != 0) {
4808 ifnet_t interface
= ifindex2ifnet
[route_rule
->exception_if_indices
[index
]];
4809 snprintf(interface_names
[index
], IFXNAMSIZ
, "%s%d", ifnet_name(interface
), ifnet_unit(interface
));
4811 memset(interface_names
[index
], 0, IFXNAMSIZ
);
4814 switch (route_rule
->default_action
) {
4815 case NECP_ROUTE_RULE_DENY_INTERFACE
:
4816 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "RouteRules (Only %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
4817 (route_rule
->cellular_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? "Cell " : "",
4818 (route_rule
->wifi_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? "WiFi " : "",
4819 (route_rule
->wired_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? "Wired " : "",
4820 (route_rule
->expensive_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? "Exp " : "",
4821 (route_rule
->constrained_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? "Constrained " : "",
4822 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[0] : "",
4823 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4824 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[1] : "",
4825 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4826 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[2] : "",
4827 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4828 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[3] : "",
4829 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4830 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[4] : "",
4831 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4832 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[5] : "",
4833 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4834 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[6] : "",
4835 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4836 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[7] : "",
4837 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4838 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[8] : "",
4839 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4840 (route_rule
->exception_if_actions
[9] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[9] : "");
4842 case NECP_ROUTE_RULE_ALLOW_INTERFACE
:
4843 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "RouteRules (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
4844 (route_rule
->cellular_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!Cell " : "",
4845 (route_rule
->wifi_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!WiFi " : "",
4846 (route_rule
->wired_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!Wired " : "",
4847 (route_rule
->expensive_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!Exp " : "",
4848 (route_rule
->constrained_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!Constrained " : "",
4849 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4850 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[0] : "",
4851 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4852 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[1] : "",
4853 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4854 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[2] : "",
4855 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4856 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[3] : "",
4857 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4858 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[4] : "",
4859 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4860 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[5] : "",
4861 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4862 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[6] : "",
4863 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4864 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[7] : "",
4865 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4866 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[8] : "",
4867 (route_rule
->exception_if_actions
[9] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4868 (route_rule
->exception_if_actions
[9] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[9] : "");
4870 case NECP_ROUTE_RULE_QOS_MARKING
:
4871 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "RouteRules (QoSMarking %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
4872 (route_rule
->cellular_action
== NECP_ROUTE_RULE_QOS_MARKING
) ? "Cell " : "",
4873 (route_rule
->wifi_action
== NECP_ROUTE_RULE_QOS_MARKING
) ? "WiFi " : "",
4874 (route_rule
->wired_action
== NECP_ROUTE_RULE_QOS_MARKING
) ? "Wired " : "",
4875 (route_rule
->expensive_action
== NECP_ROUTE_RULE_QOS_MARKING
) ? "Exp " : "",
4876 (route_rule
->constrained_action
== NECP_ROUTE_RULE_QOS_MARKING
) ? "Constrained " : "",
4877 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[0] : "",
4878 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4879 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[1] : "",
4880 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4881 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[2] : "",
4882 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4883 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[3] : "",
4884 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4885 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[4] : "",
4886 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4887 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[5] : "",
4888 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4889 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[6] : "",
4890 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4891 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[7] : "",
4892 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4893 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[8] : "",
4894 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4895 (route_rule
->exception_if_actions
[9] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[9] : "");
4898 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "RouteRules (Unknown)");
4904 case NECP_KERNEL_POLICY_RESULT_USE_NETAGENT
: {
4905 bool found_mapping
= FALSE
;
4906 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.netagent_id
);
4907 if (mapping
!= NULL
) {
4908 uuid_unparse(mapping
->uuid
, uuid_string
);
4909 found_mapping
= TRUE
;
4911 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "UseNetAgent (%s)", found_mapping
? uuid_string
: "Unknown");
4914 case NECP_KERNEL_POLICY_RESULT_NETAGENT_SCOPED
: {
4915 bool found_mapping
= FALSE
;
4916 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.netagent_id
);
4917 if (mapping
!= NULL
) {
4918 uuid_unparse(mapping
->uuid
, uuid_string
);
4919 found_mapping
= TRUE
;
4921 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "NetAgentScoped (%s)", found_mapping
? uuid_string
: "Unknown");
4924 case NECP_POLICY_RESULT_TRIGGER
: {
4925 bool found_mapping
= FALSE
;
4926 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.service
.identifier
);
4927 if (mapping
!= NULL
) {
4928 uuid_unparse(mapping
->uuid
, uuid_string
);
4929 found_mapping
= TRUE
;
4931 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "Trigger (%s.%d)", found_mapping
? uuid_string
: "Unknown", result_parameter
.service
.data
);
4934 case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED
: {
4935 bool found_mapping
= FALSE
;
4936 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.service
.identifier
);
4937 if (mapping
!= NULL
) {
4938 uuid_unparse(mapping
->uuid
, uuid_string
);
4939 found_mapping
= TRUE
;
4941 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "TriggerIfNeeded (%s.%d)", found_mapping
? uuid_string
: "Unknown", result_parameter
.service
.data
);
4944 case NECP_POLICY_RESULT_TRIGGER_SCOPED
: {
4945 bool found_mapping
= FALSE
;
4946 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.service
.identifier
);
4947 if (mapping
!= NULL
) {
4948 uuid_unparse(mapping
->uuid
, uuid_string
);
4949 found_mapping
= TRUE
;
4951 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "TriggerScoped (%s.%d)", found_mapping
? uuid_string
: "Unknown", result_parameter
.service
.data
);
4954 case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED
: {
4955 bool found_mapping
= FALSE
;
4956 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.service
.identifier
);
4957 if (mapping
!= NULL
) {
4958 uuid_unparse(mapping
->uuid
, uuid_string
);
4959 found_mapping
= TRUE
;
4961 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "NoTriggerScoped (%s.%d)", found_mapping
? uuid_string
: "Unknown", result_parameter
.service
.data
);
4965 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "Unknown %d (%d)", result
, result_parameter
.tunnel_interface_index
);
4969 return result_string
;
4973 necp_kernel_socket_policies_dump_all(void)
4976 struct necp_kernel_socket_policy
*policy
= NULL
;
4979 char result_string
[MAX_RESULT_STRING_LEN
];
4980 char proc_name_string
[MAXCOMLEN
+ 1];
4981 memset(result_string
, 0, MAX_RESULT_STRING_LEN
);
4982 memset(proc_name_string
, 0, MAXCOMLEN
+ 1);
4984 NECPLOG0(LOG_DEBUG
, "NECP Application Policies:\n");
4985 NECPLOG0(LOG_DEBUG
, "-----------\n");
4986 for (policy_i
= 0; necp_kernel_socket_policies_app_layer_map
!= NULL
&& necp_kernel_socket_policies_app_layer_map
[policy_i
] != NULL
; policy_i
++) {
4987 policy
= necp_kernel_socket_policies_app_layer_map
[policy_i
];
4988 proc_name(policy
->session_pid
, proc_name_string
, MAXCOMLEN
);
4989 NECPLOG(LOG_DEBUG
, "\t%3d. Policy ID: %5d\tProcess: %10.10s\tOrder: %04d.%04d\tMask: %5x\tResult: %s\n", policy_i
, policy
->id
, proc_name_string
, policy
->session_order
, policy
->order
, policy
->condition_mask
, necp_get_result_description(result_string
, policy
->result
, policy
->result_parameter
));
4991 if (necp_kernel_socket_policies_app_layer_map
[0] != NULL
) {
4992 NECPLOG0(LOG_DEBUG
, "-----------\n");
4995 NECPLOG0(LOG_DEBUG
, "NECP Socket Policies:\n");
4996 NECPLOG0(LOG_DEBUG
, "-----------\n");
4997 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
4998 NECPLOG(LOG_DEBUG
, "\tApp Bucket: %d\n", app_i
);
4999 for (policy_i
= 0; necp_kernel_socket_policies_map
[app_i
] != NULL
&& (necp_kernel_socket_policies_map
[app_i
])[policy_i
] != NULL
; policy_i
++) {
5000 policy
= (necp_kernel_socket_policies_map
[app_i
])[policy_i
];
5001 proc_name(policy
->session_pid
, proc_name_string
, MAXCOMLEN
);
5002 NECPLOG(LOG_DEBUG
, "\t%3d. Policy ID: %5d\tProcess: %10.10s\tOrder: %04d.%04d\tMask: %5x\tResult: %s\n", policy_i
, policy
->id
, proc_name_string
, policy
->session_order
, policy
->order
, policy
->condition_mask
, necp_get_result_description(result_string
, policy
->result
, policy
->result_parameter
));
5004 NECPLOG0(LOG_DEBUG
, "-----------\n");
5010 necp_kernel_socket_result_is_trigger_service_type(struct necp_kernel_socket_policy
*kernel_policy
)
5012 return kernel_policy
->result
>= NECP_KERNEL_POLICY_RESULT_TRIGGER
&& kernel_policy
->result
<= NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED
;
5016 necp_kernel_socket_policy_results_overlap(struct necp_kernel_socket_policy
*upper_policy
, struct necp_kernel_socket_policy
*lower_policy
)
5018 if (upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_DROP
) {
5019 // Drop always cancels out lower policies
5021 } else if (upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER
||
5022 upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_ROUTE_RULES
||
5023 upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_USE_NETAGENT
||
5024 upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_NETAGENT_SCOPED
||
5025 upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_ALLOW_UNENTITLED
) {
5026 // Filters and route rules never cancel out lower policies
5028 } else if (necp_kernel_socket_result_is_trigger_service_type(upper_policy
)) {
5029 // Trigger/Scoping policies can overlap one another, but not other results
5030 return necp_kernel_socket_result_is_trigger_service_type(lower_policy
);
5031 } else if (upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
5032 if (upper_policy
->session_order
!= lower_policy
->session_order
) {
5033 // A skip cannot override a policy of a different session
5036 if (upper_policy
->result_parameter
.skip_policy_order
== 0 ||
5037 lower_policy
->order
>= upper_policy
->result_parameter
.skip_policy_order
) {
5038 // This policy is beyond the skip
5041 // This policy is inside the skip
5047 // A hard pass, flow divert, tunnel, or scope will currently block out lower policies
5052 necp_kernel_socket_policy_is_unnecessary(struct necp_kernel_socket_policy
*policy
, struct necp_kernel_socket_policy
**policy_array
, int valid_indices
)
5054 bool can_skip
= FALSE
;
5055 u_int32_t highest_skip_session_order
= 0;
5056 u_int32_t highest_skip_order
= 0;
5058 for (i
= 0; i
< valid_indices
; i
++) {
5059 struct necp_kernel_socket_policy
*compared_policy
= policy_array
[i
];
5061 // For policies in a skip window, we can't mark conflicting policies as unnecessary
5063 if (highest_skip_session_order
!= compared_policy
->session_order
||
5064 (highest_skip_order
!= 0 && compared_policy
->order
>= highest_skip_order
)) {
5065 // If we've moved on to the next session, or passed the skip window
5066 highest_skip_session_order
= 0;
5067 highest_skip_order
= 0;
5070 // If this policy is also a skip, in can increase the skip window
5071 if (compared_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
5072 if (compared_policy
->result_parameter
.skip_policy_order
> highest_skip_order
) {
5073 highest_skip_order
= compared_policy
->result_parameter
.skip_policy_order
;
5080 if (compared_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
5081 // This policy is a skip. Set the skip window accordingly
5083 highest_skip_session_order
= compared_policy
->session_order
;
5084 highest_skip_order
= compared_policy
->result_parameter
.skip_policy_order
;
5087 // The result of the compared policy must be able to block out this policy result
5088 if (!necp_kernel_socket_policy_results_overlap(compared_policy
, policy
)) {
5092 // If new policy matches All Interfaces, compared policy must also
5093 if ((policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) && !(compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
)) {
5097 // If new policy matches Local Networks, compared policy must also
5098 if ((policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_NETWORKS
) && !(compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_NETWORKS
)) {
5102 // Default makes lower policies unecessary always
5103 if (compared_policy
->condition_mask
== 0) {
5107 // Compared must be more general than policy, and include only conditions within policy
5108 if ((policy
->condition_mask
& compared_policy
->condition_mask
) != compared_policy
->condition_mask
) {
5112 // Negative conditions must match for the overlapping conditions
5113 if ((policy
->condition_negated_mask
& compared_policy
->condition_mask
) != (compared_policy
->condition_negated_mask
& compared_policy
->condition_mask
)) {
5117 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
&&
5118 strcmp(compared_policy
->cond_domain
, policy
->cond_domain
) != 0) {
5122 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
&&
5123 strcmp(compared_policy
->cond_custom_entitlement
, policy
->cond_custom_entitlement
) != 0) {
5127 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
&&
5128 compared_policy
->cond_account_id
!= policy
->cond_account_id
) {
5132 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
&&
5133 compared_policy
->cond_policy_id
!= policy
->cond_policy_id
) {
5137 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
&&
5138 compared_policy
->cond_app_id
!= policy
->cond_app_id
) {
5142 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
&&
5143 compared_policy
->cond_real_app_id
!= policy
->cond_real_app_id
) {
5147 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_PID
&&
5148 compared_policy
->cond_pid
!= policy
->cond_pid
) {
5152 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_UID
&&
5153 compared_policy
->cond_uid
!= policy
->cond_uid
) {
5157 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
&&
5158 compared_policy
->cond_bound_interface
!= policy
->cond_bound_interface
) {
5162 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
&&
5163 compared_policy
->cond_protocol
!= policy
->cond_protocol
) {
5167 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_CLIENT_FLAGS
&&
5168 compared_policy
->cond_client_flags
!= policy
->cond_client_flags
) {
5172 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
&&
5173 !(compared_policy
->cond_traffic_class
.start_tc
<= policy
->cond_traffic_class
.start_tc
&&
5174 compared_policy
->cond_traffic_class
.end_tc
>= policy
->cond_traffic_class
.end_tc
)) {
5178 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
5179 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
5180 if (!necp_is_range_in_range((struct sockaddr
*)&policy
->cond_local_start
, (struct sockaddr
*)&policy
->cond_local_end
, (struct sockaddr
*)&compared_policy
->cond_local_start
, (struct sockaddr
*)&compared_policy
->cond_local_end
)) {
5183 } else if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
5184 if (compared_policy
->cond_local_prefix
> policy
->cond_local_prefix
||
5185 !necp_is_addr_in_subnet((struct sockaddr
*)&policy
->cond_local_start
, (struct sockaddr
*)&compared_policy
->cond_local_start
, compared_policy
->cond_local_prefix
)) {
5191 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
5192 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
5193 if (!necp_is_range_in_range((struct sockaddr
*)&policy
->cond_remote_start
, (struct sockaddr
*)&policy
->cond_remote_end
, (struct sockaddr
*)&compared_policy
->cond_remote_start
, (struct sockaddr
*)&compared_policy
->cond_remote_end
)) {
5196 } else if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
5197 if (compared_policy
->cond_remote_prefix
> policy
->cond_remote_prefix
||
5198 !necp_is_addr_in_subnet((struct sockaddr
*)&policy
->cond_remote_start
, (struct sockaddr
*)&compared_policy
->cond_remote_start
, compared_policy
->cond_remote_prefix
)) {
5204 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_AGENT_TYPE
&&
5205 memcmp(&compared_policy
->cond_agent_type
, &policy
->cond_agent_type
, sizeof(policy
->cond_agent_type
)) == 0) {
5216 necp_kernel_socket_policies_reprocess(void)
5219 int bucket_allocation_counts
[NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
];
5220 int bucket_current_free_index
[NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
];
5221 int app_layer_allocation_count
= 0;
5222 int app_layer_current_free_index
= 0;
5223 struct necp_kernel_socket_policy
*kernel_policy
= NULL
;
5225 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5228 necp_kernel_application_policies_condition_mask
= 0;
5229 necp_kernel_socket_policies_condition_mask
= 0;
5230 necp_kernel_application_policies_count
= 0;
5231 necp_kernel_socket_policies_count
= 0;
5232 necp_kernel_socket_policies_non_app_count
= 0;
5234 // Reset all maps to NULL
5235 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
5236 if (necp_kernel_socket_policies_map
[app_i
] != NULL
) {
5237 FREE(necp_kernel_socket_policies_map
[app_i
], M_NECP
);
5238 necp_kernel_socket_policies_map
[app_i
] = NULL
;
5242 bucket_allocation_counts
[app_i
] = 0;
5244 if (necp_kernel_socket_policies_app_layer_map
!= NULL
) {
5245 FREE(necp_kernel_socket_policies_app_layer_map
, M_NECP
);
5246 necp_kernel_socket_policies_app_layer_map
= NULL
;
5249 // Create masks and counts
5250 LIST_FOREACH(kernel_policy
, &necp_kernel_socket_policies
, chain
) {
5251 // App layer mask/count
5252 necp_kernel_application_policies_condition_mask
|= kernel_policy
->condition_mask
;
5253 necp_kernel_application_policies_count
++;
5254 app_layer_allocation_count
++;
5256 if ((kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_AGENT_TYPE
)) {
5257 // Agent type conditions only apply to app layer
5261 // Update socket layer bucket mask/counts
5262 necp_kernel_socket_policies_condition_mask
|= kernel_policy
->condition_mask
;
5263 necp_kernel_socket_policies_count
++;
5265 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) ||
5266 kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
5267 necp_kernel_socket_policies_non_app_count
++;
5268 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
5269 bucket_allocation_counts
[app_i
]++;
5272 bucket_allocation_counts
[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(kernel_policy
->cond_app_id
)]++;
5277 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
5278 if (bucket_allocation_counts
[app_i
] > 0) {
5279 // Allocate a NULL-terminated array of policy pointers for each bucket
5280 MALLOC(necp_kernel_socket_policies_map
[app_i
], struct necp_kernel_socket_policy
**, sizeof(struct necp_kernel_socket_policy
*) * (bucket_allocation_counts
[app_i
] + 1), M_NECP
, M_WAITOK
);
5281 if (necp_kernel_socket_policies_map
[app_i
] == NULL
) {
5285 // Initialize the first entry to NULL
5286 (necp_kernel_socket_policies_map
[app_i
])[0] = NULL
;
5288 bucket_current_free_index
[app_i
] = 0;
5290 MALLOC(necp_kernel_socket_policies_app_layer_map
, struct necp_kernel_socket_policy
**, sizeof(struct necp_kernel_socket_policy
*) * (app_layer_allocation_count
+ 1), M_NECP
, M_WAITOK
);
5291 if (necp_kernel_socket_policies_app_layer_map
== NULL
) {
5294 necp_kernel_socket_policies_app_layer_map
[0] = NULL
;
5297 LIST_FOREACH(kernel_policy
, &necp_kernel_socket_policies
, chain
) {
5298 // Add app layer policies
5299 if (!necp_kernel_socket_policy_is_unnecessary(kernel_policy
, necp_kernel_socket_policies_app_layer_map
, app_layer_current_free_index
)) {
5300 necp_kernel_socket_policies_app_layer_map
[app_layer_current_free_index
] = kernel_policy
;
5301 app_layer_current_free_index
++;
5302 necp_kernel_socket_policies_app_layer_map
[app_layer_current_free_index
] = NULL
;
5305 if ((kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_AGENT_TYPE
)) {
5306 // Agent type conditions only apply to app layer
5310 // Add socket policies
5311 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) ||
5312 kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
5313 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
5314 if (!necp_kernel_socket_policy_is_unnecessary(kernel_policy
, necp_kernel_socket_policies_map
[app_i
], bucket_current_free_index
[app_i
])) {
5315 (necp_kernel_socket_policies_map
[app_i
])[(bucket_current_free_index
[app_i
])] = kernel_policy
;
5316 bucket_current_free_index
[app_i
]++;
5317 (necp_kernel_socket_policies_map
[app_i
])[(bucket_current_free_index
[app_i
])] = NULL
;
5321 app_i
= NECP_SOCKET_MAP_APP_ID_TO_BUCKET(kernel_policy
->cond_app_id
);
5322 if (!necp_kernel_socket_policy_is_unnecessary(kernel_policy
, necp_kernel_socket_policies_map
[app_i
], bucket_current_free_index
[app_i
])) {
5323 (necp_kernel_socket_policies_map
[app_i
])[(bucket_current_free_index
[app_i
])] = kernel_policy
;
5324 bucket_current_free_index
[app_i
]++;
5325 (necp_kernel_socket_policies_map
[app_i
])[(bucket_current_free_index
[app_i
])] = NULL
;
5329 necp_kernel_socket_policies_dump_all();
5330 BUMP_KERNEL_SOCKET_POLICIES_GENERATION_COUNT();
5334 // Free memory, reset masks to 0
5335 necp_kernel_application_policies_condition_mask
= 0;
5336 necp_kernel_socket_policies_condition_mask
= 0;
5337 necp_kernel_application_policies_count
= 0;
5338 necp_kernel_socket_policies_count
= 0;
5339 necp_kernel_socket_policies_non_app_count
= 0;
5340 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
5341 if (necp_kernel_socket_policies_map
[app_i
] != NULL
) {
5342 FREE(necp_kernel_socket_policies_map
[app_i
], M_NECP
);
5343 necp_kernel_socket_policies_map
[app_i
] = NULL
;
5346 if (necp_kernel_socket_policies_app_layer_map
!= NULL
) {
5347 FREE(necp_kernel_socket_policies_app_layer_map
, M_NECP
);
5348 necp_kernel_socket_policies_app_layer_map
= NULL
;
5354 necp_get_new_string_id(void)
5356 static u_int32_t necp_last_string_id
= 0;
5358 u_int32_t newid
= 0;
5360 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5362 bool wrapped
= FALSE
;
5364 necp_last_string_id
++;
5365 if (necp_last_string_id
< 1) {
5367 // Already wrapped, give up
5368 NECPLOG0(LOG_ERR
, "Failed to find a free app UUID.\n");
5371 necp_last_string_id
= 1;
5374 newid
= necp_last_string_id
;
5375 } while (necp_lookup_string_with_id_locked(&necp_account_id_list
, newid
) != NULL
); // If already used, keep trying
5378 NECPLOG0(LOG_ERR
, "Allocate string id failed.\n");
5385 static struct necp_string_id_mapping
*
5386 necp_lookup_string_to_id_locked(struct necp_string_id_mapping_list
*list
, char *string
)
5388 struct necp_string_id_mapping
*searchentry
= NULL
;
5389 struct necp_string_id_mapping
*foundentry
= NULL
;
5391 LIST_FOREACH(searchentry
, list
, chain
) {
5392 if (strcmp(searchentry
->string
, string
) == 0) {
5393 foundentry
= searchentry
;
5401 static struct necp_string_id_mapping
*
5402 necp_lookup_string_with_id_locked(struct necp_string_id_mapping_list
*list
, u_int32_t local_id
)
5404 struct necp_string_id_mapping
*searchentry
= NULL
;
5405 struct necp_string_id_mapping
*foundentry
= NULL
;
5407 LIST_FOREACH(searchentry
, list
, chain
) {
5408 if (searchentry
->id
== local_id
) {
5409 foundentry
= searchentry
;
5418 necp_create_string_to_id_mapping(struct necp_string_id_mapping_list
*list
, char *string
)
5420 u_int32_t string_id
= 0;
5421 struct necp_string_id_mapping
*existing_mapping
= NULL
;
5423 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5425 existing_mapping
= necp_lookup_string_to_id_locked(list
, string
);
5426 if (existing_mapping
!= NULL
) {
5427 string_id
= existing_mapping
->id
;
5428 os_ref_retain_locked(&existing_mapping
->refcount
);
5430 struct necp_string_id_mapping
*new_mapping
= NULL
;
5431 MALLOC(new_mapping
, struct necp_string_id_mapping
*, sizeof(struct necp_string_id_mapping
), M_NECP
, M_WAITOK
);
5432 if (new_mapping
!= NULL
) {
5433 memset(new_mapping
, 0, sizeof(struct necp_string_id_mapping
));
5435 size_t length
= strlen(string
) + 1;
5436 MALLOC(new_mapping
->string
, char *, length
, M_NECP
, M_WAITOK
);
5437 if (new_mapping
->string
!= NULL
) {
5438 memcpy(new_mapping
->string
, string
, length
);
5439 new_mapping
->id
= necp_get_new_string_id();
5440 os_ref_init(&new_mapping
->refcount
, &necp_refgrp
);
5441 LIST_INSERT_HEAD(list
, new_mapping
, chain
);
5442 string_id
= new_mapping
->id
;
5444 FREE(new_mapping
, M_NECP
);
5453 necp_remove_string_to_id_mapping(struct necp_string_id_mapping_list
*list
, char *string
)
5455 struct necp_string_id_mapping
*existing_mapping
= NULL
;
5457 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5459 existing_mapping
= necp_lookup_string_to_id_locked(list
, string
);
5460 if (existing_mapping
!= NULL
) {
5461 if (os_ref_release_locked(&existing_mapping
->refcount
) == 0) {
5462 LIST_REMOVE(existing_mapping
, chain
);
5463 FREE(existing_mapping
->string
, M_NECP
);
5464 FREE(existing_mapping
, M_NECP
);
5472 #define NECP_FIRST_VALID_ROUTE_RULE_ID 1
5473 #define NECP_FIRST_VALID_AGGREGATE_ROUTE_RULE_ID UINT16_MAX
5475 necp_get_new_route_rule_id(bool aggregate
)
5477 static u_int32_t necp_last_route_rule_id
= 0;
5478 static u_int32_t necp_last_aggregate_route_rule_id
= 0;
5480 u_int32_t newid
= 0;
5483 // Main necp_kernel_policy_lock protects non-aggregate rule IDs
5484 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5486 bool wrapped
= FALSE
;
5488 necp_last_route_rule_id
++;
5489 if (necp_last_route_rule_id
< NECP_FIRST_VALID_ROUTE_RULE_ID
||
5490 necp_last_route_rule_id
>= NECP_FIRST_VALID_AGGREGATE_ROUTE_RULE_ID
) {
5492 // Already wrapped, give up
5493 NECPLOG0(LOG_ERR
, "Failed to find a free route rule id.\n");
5496 necp_last_route_rule_id
= NECP_FIRST_VALID_ROUTE_RULE_ID
;
5499 newid
= necp_last_route_rule_id
;
5500 } while (necp_lookup_route_rule_locked(&necp_route_rules
, newid
) != NULL
); // If already used, keep trying
5502 // necp_route_rule_lock protects aggregate rule IDs
5503 LCK_RW_ASSERT(&necp_route_rule_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5505 bool wrapped
= FALSE
;
5507 necp_last_aggregate_route_rule_id
++;
5508 if (necp_last_aggregate_route_rule_id
< NECP_FIRST_VALID_AGGREGATE_ROUTE_RULE_ID
) {
5510 // Already wrapped, give up
5511 NECPLOG0(LOG_ERR
, "Failed to find a free aggregate route rule id.\n");
5514 necp_last_aggregate_route_rule_id
= NECP_FIRST_VALID_AGGREGATE_ROUTE_RULE_ID
;
5517 newid
= necp_last_aggregate_route_rule_id
;
5518 } while (necp_lookup_route_rule_locked(&necp_route_rules
, newid
) != NULL
); // If already used, keep trying
5522 NECPLOG0(LOG_ERR
, "Allocate route rule ID failed.\n");
5529 static struct necp_route_rule
*
5530 necp_lookup_route_rule_locked(struct necp_route_rule_list
*list
, u_int32_t route_rule_id
)
5532 struct necp_route_rule
*searchentry
= NULL
;
5533 struct necp_route_rule
*foundentry
= NULL
;
5535 LIST_FOREACH(searchentry
, list
, chain
) {
5536 if (searchentry
->id
== route_rule_id
) {
5537 foundentry
= searchentry
;
5545 static struct necp_route_rule
*
5546 necp_lookup_route_rule_by_contents_locked(struct necp_route_rule_list
*list
, u_int32_t default_action
, u_int8_t cellular_action
, u_int8_t wifi_action
, u_int8_t wired_action
, u_int8_t expensive_action
, u_int8_t constrained_action
, u_int32_t
*if_indices
, u_int8_t
*if_actions
)
5548 struct necp_route_rule
*searchentry
= NULL
;
5549 struct necp_route_rule
*foundentry
= NULL
;
5551 LIST_FOREACH(searchentry
, list
, chain
) {
5552 if (searchentry
->default_action
== default_action
&&
5553 searchentry
->cellular_action
== cellular_action
&&
5554 searchentry
->wifi_action
== wifi_action
&&
5555 searchentry
->wired_action
== wired_action
&&
5556 searchentry
->expensive_action
== expensive_action
&&
5557 searchentry
->constrained_action
== constrained_action
) {
5558 bool match_failed
= FALSE
;
5563 for (index_a
= 0; index_a
< MAX_ROUTE_RULE_INTERFACES
; index_a
++) {
5564 bool found_index
= FALSE
;
5565 if (searchentry
->exception_if_indices
[index_a
] == 0) {
5569 for (index_b
= 0; index_b
< MAX_ROUTE_RULE_INTERFACES
; index_b
++) {
5570 if (if_indices
[index_b
] == 0) {
5573 if (index_b
>= count_b
) {
5574 count_b
= index_b
+ 1;
5576 if (searchentry
->exception_if_indices
[index_a
] == if_indices
[index_b
] &&
5577 searchentry
->exception_if_actions
[index_a
] == if_actions
[index_b
]) {
5583 match_failed
= TRUE
;
5587 if (!match_failed
&& count_a
== count_b
) {
5588 foundentry
= searchentry
;
5598 necp_create_route_rule(struct necp_route_rule_list
*list
, u_int8_t
*route_rules_array
, u_int32_t route_rules_array_size
)
5601 u_int32_t route_rule_id
= 0;
5602 struct necp_route_rule
*existing_rule
= NULL
;
5603 u_int32_t default_action
= NECP_ROUTE_RULE_ALLOW_INTERFACE
;
5604 u_int8_t cellular_action
= NECP_ROUTE_RULE_NONE
;
5605 u_int8_t wifi_action
= NECP_ROUTE_RULE_NONE
;
5606 u_int8_t wired_action
= NECP_ROUTE_RULE_NONE
;
5607 u_int8_t expensive_action
= NECP_ROUTE_RULE_NONE
;
5608 u_int8_t constrained_action
= NECP_ROUTE_RULE_NONE
;
5609 u_int32_t if_indices
[MAX_ROUTE_RULE_INTERFACES
];
5610 size_t num_valid_indices
= 0;
5611 memset(&if_indices
, 0, sizeof(if_indices
));
5612 u_int8_t if_actions
[MAX_ROUTE_RULE_INTERFACES
];
5613 memset(&if_actions
, 0, sizeof(if_actions
));
5615 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5617 if (route_rules_array
== NULL
|| route_rules_array_size
== 0) {
5622 while (offset
< route_rules_array_size
) {
5623 ifnet_t rule_interface
= NULL
;
5624 char interface_name
[IFXNAMSIZ
];
5625 u_int32_t length
= 0;
5626 u_int8_t
*value
= necp_buffer_get_tlv_value(route_rules_array
, offset
, &length
);
5628 u_int8_t rule_type
= necp_policy_condition_get_type_from_buffer(value
, length
);
5629 u_int8_t rule_flags
= necp_policy_condition_get_flags_from_buffer(value
, length
);
5630 u_int32_t rule_length
= necp_policy_condition_get_value_length_from_buffer(value
, length
);
5631 u_int8_t
*rule_value
= necp_policy_condition_get_value_pointer_from_buffer(value
, length
);
5633 if (rule_type
== NECP_ROUTE_RULE_NONE
) {
5634 // Don't allow an explicit rule to be None action
5638 if (rule_length
== 0) {
5639 if (rule_flags
& NECP_ROUTE_RULE_FLAG_CELLULAR
) {
5640 cellular_action
= rule_type
;
5642 if (rule_flags
& NECP_ROUTE_RULE_FLAG_WIFI
) {
5643 wifi_action
= rule_type
;
5645 if (rule_flags
& NECP_ROUTE_RULE_FLAG_WIRED
) {
5646 wired_action
= rule_type
;
5648 if (rule_flags
& NECP_ROUTE_RULE_FLAG_EXPENSIVE
) {
5649 expensive_action
= rule_type
;
5651 if (rule_flags
& NECP_ROUTE_RULE_FLAG_CONSTRAINED
) {
5652 constrained_action
= rule_type
;
5654 if (rule_flags
== 0) {
5655 default_action
= rule_type
;
5657 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
5661 if (num_valid_indices
>= MAX_ROUTE_RULE_INTERFACES
) {
5662 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
5666 if (rule_length
<= IFXNAMSIZ
) {
5667 memcpy(interface_name
, rule_value
, rule_length
);
5668 interface_name
[rule_length
- 1] = 0; // Make sure the string is NULL terminated
5669 if (ifnet_find_by_name(interface_name
, &rule_interface
) == 0) {
5670 if_actions
[num_valid_indices
] = rule_type
;
5671 if_indices
[num_valid_indices
++] = rule_interface
->if_index
;
5672 ifnet_release(rule_interface
);
5675 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
5678 existing_rule
= necp_lookup_route_rule_by_contents_locked(list
, default_action
, cellular_action
, wifi_action
, wired_action
, expensive_action
, constrained_action
, if_indices
, if_actions
);
5679 if (existing_rule
!= NULL
) {
5680 route_rule_id
= existing_rule
->id
;
5681 os_ref_retain_locked(&existing_rule
->refcount
);
5683 struct necp_route_rule
*new_rule
= NULL
;
5684 MALLOC(new_rule
, struct necp_route_rule
*, sizeof(struct necp_route_rule
), M_NECP
, M_WAITOK
);
5685 if (new_rule
!= NULL
) {
5686 memset(new_rule
, 0, sizeof(struct necp_route_rule
));
5687 route_rule_id
= new_rule
->id
= necp_get_new_route_rule_id(false);
5688 new_rule
->default_action
= default_action
;
5689 new_rule
->cellular_action
= cellular_action
;
5690 new_rule
->wifi_action
= wifi_action
;
5691 new_rule
->wired_action
= wired_action
;
5692 new_rule
->expensive_action
= expensive_action
;
5693 new_rule
->constrained_action
= constrained_action
;
5694 memcpy(&new_rule
->exception_if_indices
, &if_indices
, sizeof(if_indices
));
5695 memcpy(&new_rule
->exception_if_actions
, &if_actions
, sizeof(if_actions
));
5696 os_ref_init(&new_rule
->refcount
, &necp_refgrp
);
5697 LIST_INSERT_HEAD(list
, new_rule
, chain
);
5700 return route_rule_id
;
5704 necp_remove_aggregate_route_rule_for_id(u_int32_t rule_id
)
5707 lck_rw_lock_exclusive(&necp_route_rule_lock
);
5709 struct necp_aggregate_route_rule
*existing_rule
= NULL
;
5710 struct necp_aggregate_route_rule
*tmp_rule
= NULL
;
5712 LIST_FOREACH_SAFE(existing_rule
, &necp_aggregate_route_rules
, chain
, tmp_rule
) {
5714 for (index
= 0; index
< MAX_AGGREGATE_ROUTE_RULES
; index
++) {
5715 u_int32_t route_rule_id
= existing_rule
->rule_ids
[index
];
5716 if (route_rule_id
== rule_id
) {
5717 LIST_REMOVE(existing_rule
, chain
);
5718 FREE(existing_rule
, M_NECP
);
5724 lck_rw_done(&necp_route_rule_lock
);
5729 necp_remove_route_rule(struct necp_route_rule_list
*list
, u_int32_t route_rule_id
)
5731 struct necp_route_rule
*existing_rule
= NULL
;
5733 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5735 existing_rule
= necp_lookup_route_rule_locked(list
, route_rule_id
);
5736 if (existing_rule
!= NULL
) {
5737 if (os_ref_release_locked(&existing_rule
->refcount
) == 0) {
5738 necp_remove_aggregate_route_rule_for_id(existing_rule
->id
);
5739 LIST_REMOVE(existing_rule
, chain
);
5740 FREE(existing_rule
, M_NECP
);
5748 static struct necp_aggregate_route_rule
*
5749 necp_lookup_aggregate_route_rule_locked(u_int32_t route_rule_id
)
5751 struct necp_aggregate_route_rule
*searchentry
= NULL
;
5752 struct necp_aggregate_route_rule
*foundentry
= NULL
;
5754 lck_rw_lock_shared(&necp_route_rule_lock
);
5756 LIST_FOREACH(searchentry
, &necp_aggregate_route_rules
, chain
) {
5757 if (searchentry
->id
== route_rule_id
) {
5758 foundentry
= searchentry
;
5763 lck_rw_done(&necp_route_rule_lock
);
5769 necp_create_aggregate_route_rule(u_int32_t
*rule_ids
)
5771 u_int32_t aggregate_route_rule_id
= 0;
5772 struct necp_aggregate_route_rule
*new_rule
= NULL
;
5773 struct necp_aggregate_route_rule
*existing_rule
= NULL
;
5775 lck_rw_lock_exclusive(&necp_route_rule_lock
);
5777 // Check if the rule already exists
5778 LIST_FOREACH(existing_rule
, &necp_aggregate_route_rules
, chain
) {
5779 if (memcmp(existing_rule
->rule_ids
, rule_ids
, (sizeof(u_int32_t
) * MAX_AGGREGATE_ROUTE_RULES
)) == 0) {
5780 lck_rw_done(&necp_route_rule_lock
);
5781 return existing_rule
->id
;
5785 MALLOC(new_rule
, struct necp_aggregate_route_rule
*, sizeof(struct necp_aggregate_route_rule
), M_NECP
, M_WAITOK
);
5786 if (new_rule
!= NULL
) {
5787 memset(new_rule
, 0, sizeof(struct necp_aggregate_route_rule
));
5788 aggregate_route_rule_id
= new_rule
->id
= necp_get_new_route_rule_id(true);
5789 new_rule
->id
= aggregate_route_rule_id
;
5790 memcpy(new_rule
->rule_ids
, rule_ids
, (sizeof(u_int32_t
) * MAX_AGGREGATE_ROUTE_RULES
));
5791 LIST_INSERT_HEAD(&necp_aggregate_route_rules
, new_rule
, chain
);
5793 lck_rw_done(&necp_route_rule_lock
);
5795 return aggregate_route_rule_id
;
5798 #define NECP_NULL_SERVICE_ID 1
5799 #define NECP_FIRST_VALID_SERVICE_ID 2
5800 #define NECP_FIRST_VALID_APP_ID UINT16_MAX
5802 necp_get_new_uuid_id(bool service
)
5804 static u_int32_t necp_last_service_uuid_id
= 0;
5805 static u_int32_t necp_last_app_uuid_id
= 0;
5807 u_int32_t newid
= 0;
5809 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5812 bool wrapped
= FALSE
;
5814 necp_last_service_uuid_id
++;
5815 if (necp_last_service_uuid_id
< NECP_FIRST_VALID_SERVICE_ID
||
5816 necp_last_service_uuid_id
>= NECP_FIRST_VALID_APP_ID
) {
5818 // Already wrapped, give up
5819 NECPLOG0(LOG_ERR
, "Failed to find a free service UUID.\n");
5820 return NECP_NULL_SERVICE_ID
;
5822 necp_last_service_uuid_id
= NECP_FIRST_VALID_SERVICE_ID
;
5825 newid
= necp_last_service_uuid_id
;
5826 } while (necp_uuid_lookup_uuid_with_service_id_locked(newid
) != NULL
); // If already used, keep trying
5828 bool wrapped
= FALSE
;
5830 necp_last_app_uuid_id
++;
5831 if (necp_last_app_uuid_id
< NECP_FIRST_VALID_APP_ID
) {
5833 // Already wrapped, give up
5834 NECPLOG0(LOG_ERR
, "Failed to find a free app UUID.\n");
5835 return NECP_NULL_SERVICE_ID
;
5837 necp_last_app_uuid_id
= NECP_FIRST_VALID_APP_ID
;
5840 newid
= necp_last_app_uuid_id
;
5841 } while (necp_uuid_lookup_uuid_with_app_id_locked(newid
) != NULL
); // If already used, keep trying
5844 if (newid
== NECP_NULL_SERVICE_ID
) {
5845 NECPLOG0(LOG_ERR
, "Allocate uuid ID failed.\n");
5846 return NECP_NULL_SERVICE_ID
;
5852 static struct necp_uuid_id_mapping
*
5853 necp_uuid_lookup_app_id_locked(uuid_t uuid
)
5855 struct necp_uuid_id_mapping
*searchentry
= NULL
;
5856 struct necp_uuid_id_mapping
*foundentry
= NULL
;
5858 LIST_FOREACH(searchentry
, APPUUIDHASH(uuid
), chain
) {
5859 if (uuid_compare(searchentry
->uuid
, uuid
) == 0) {
5860 foundentry
= searchentry
;
5868 static struct necp_uuid_id_mapping
*
5869 necp_uuid_lookup_uuid_with_app_id_locked(u_int32_t local_id
)
5871 struct necp_uuid_id_mapping
*searchentry
= NULL
;
5872 struct necp_uuid_id_mapping
*foundentry
= NULL
;
5874 struct necp_uuid_id_mapping_head
*uuid_list_head
= NULL
;
5875 for (uuid_list_head
= &necp_uuid_app_id_hashtbl
[necp_uuid_app_id_hash_num_buckets
- 1]; uuid_list_head
>= necp_uuid_app_id_hashtbl
; uuid_list_head
--) {
5876 LIST_FOREACH(searchentry
, uuid_list_head
, chain
) {
5877 if (searchentry
->id
== local_id
) {
5878 foundentry
= searchentry
;
5888 necp_create_uuid_app_id_mapping(uuid_t uuid
, bool *allocated_mapping
, bool uuid_policy_table
)
5890 u_int32_t local_id
= 0;
5891 struct necp_uuid_id_mapping
*existing_mapping
= NULL
;
5893 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5895 if (allocated_mapping
) {
5896 *allocated_mapping
= FALSE
;
5899 existing_mapping
= necp_uuid_lookup_app_id_locked(uuid
);
5900 if (existing_mapping
!= NULL
) {
5901 local_id
= existing_mapping
->id
;
5902 os_ref_retain_locked(&existing_mapping
->refcount
);
5903 if (uuid_policy_table
) {
5904 existing_mapping
->table_usecount
++;
5907 struct necp_uuid_id_mapping
*new_mapping
= NULL
;
5908 MALLOC(new_mapping
, struct necp_uuid_id_mapping
*, sizeof(*new_mapping
), M_NECP
, M_WAITOK
);
5909 if (new_mapping
!= NULL
) {
5910 uuid_copy(new_mapping
->uuid
, uuid
);
5911 new_mapping
->id
= necp_get_new_uuid_id(false);
5912 os_ref_init(&new_mapping
->refcount
, &necp_refgrp
);
5913 if (uuid_policy_table
) {
5914 new_mapping
->table_usecount
= 1;
5916 new_mapping
->table_usecount
= 0;
5919 LIST_INSERT_HEAD(APPUUIDHASH(uuid
), new_mapping
, chain
);
5921 if (allocated_mapping
) {
5922 *allocated_mapping
= TRUE
;
5925 local_id
= new_mapping
->id
;
5933 necp_remove_uuid_app_id_mapping(uuid_t uuid
, bool *removed_mapping
, bool uuid_policy_table
)
5935 struct necp_uuid_id_mapping
*existing_mapping
= NULL
;
5937 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5939 if (removed_mapping
) {
5940 *removed_mapping
= FALSE
;
5943 existing_mapping
= necp_uuid_lookup_app_id_locked(uuid
);
5944 if (existing_mapping
!= NULL
) {
5945 if (uuid_policy_table
) {
5946 existing_mapping
->table_usecount
--;
5948 if (os_ref_release_locked(&existing_mapping
->refcount
) == 0) {
5949 LIST_REMOVE(existing_mapping
, chain
);
5950 FREE(existing_mapping
, M_NECP
);
5951 if (removed_mapping
) {
5952 *removed_mapping
= TRUE
;
5961 static struct necp_uuid_id_mapping
*
5962 necp_uuid_get_null_service_id_mapping(void)
5964 static struct necp_uuid_id_mapping null_mapping
;
5965 uuid_clear(null_mapping
.uuid
);
5966 null_mapping
.id
= NECP_NULL_SERVICE_ID
;
5968 return &null_mapping
;
5971 static struct necp_uuid_id_mapping
*
5972 necp_uuid_lookup_service_id_locked(uuid_t uuid
)
5974 struct necp_uuid_id_mapping
*searchentry
= NULL
;
5975 struct necp_uuid_id_mapping
*foundentry
= NULL
;
5977 if (uuid_is_null(uuid
)) {
5978 return necp_uuid_get_null_service_id_mapping();
5981 LIST_FOREACH(searchentry
, &necp_uuid_service_id_list
, chain
) {
5982 if (uuid_compare(searchentry
->uuid
, uuid
) == 0) {
5983 foundentry
= searchentry
;
5991 static struct necp_uuid_id_mapping
*
5992 necp_uuid_lookup_uuid_with_service_id_locked(u_int32_t local_id
)
5994 struct necp_uuid_id_mapping
*searchentry
= NULL
;
5995 struct necp_uuid_id_mapping
*foundentry
= NULL
;
5997 if (local_id
== NECP_NULL_SERVICE_ID
) {
5998 return necp_uuid_get_null_service_id_mapping();
6001 LIST_FOREACH(searchentry
, &necp_uuid_service_id_list
, chain
) {
6002 if (searchentry
->id
== local_id
) {
6003 foundentry
= searchentry
;
6012 necp_create_uuid_service_id_mapping(uuid_t uuid
)
6014 u_int32_t local_id
= 0;
6015 struct necp_uuid_id_mapping
*existing_mapping
= NULL
;
6017 if (uuid_is_null(uuid
)) {
6018 return NECP_NULL_SERVICE_ID
;
6021 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
6023 existing_mapping
= necp_uuid_lookup_service_id_locked(uuid
);
6024 if (existing_mapping
!= NULL
) {
6025 local_id
= existing_mapping
->id
;
6026 os_ref_retain_locked(&existing_mapping
->refcount
);
6028 struct necp_uuid_id_mapping
*new_mapping
= NULL
;
6029 MALLOC(new_mapping
, struct necp_uuid_id_mapping
*, sizeof(*new_mapping
), M_NECP
, M_WAITOK
);
6030 if (new_mapping
!= NULL
) {
6031 uuid_copy(new_mapping
->uuid
, uuid
);
6032 new_mapping
->id
= necp_get_new_uuid_id(true);
6033 os_ref_init(&new_mapping
->refcount
, &necp_refgrp
);
6035 LIST_INSERT_HEAD(&necp_uuid_service_id_list
, new_mapping
, chain
);
6037 local_id
= new_mapping
->id
;
6045 necp_remove_uuid_service_id_mapping(uuid_t uuid
)
6047 struct necp_uuid_id_mapping
*existing_mapping
= NULL
;
6049 if (uuid_is_null(uuid
)) {
6053 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
6055 existing_mapping
= necp_uuid_lookup_service_id_locked(uuid
);
6056 if (existing_mapping
!= NULL
) {
6057 if (os_ref_release_locked(&existing_mapping
->refcount
) == 0) {
6058 LIST_REMOVE(existing_mapping
, chain
);
6059 FREE(existing_mapping
, M_NECP
);
6069 necp_kernel_socket_policies_update_uuid_table(void)
6071 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
6073 if (necp_uuid_app_id_mappings_dirty
) {
6074 if (proc_uuid_policy_kernel(PROC_UUID_POLICY_OPERATION_CLEAR
, NULL
, PROC_UUID_NECP_APP_POLICY
) < 0) {
6075 NECPLOG0(LOG_DEBUG
, "Error clearing uuids from policy table\n");
6079 if (necp_num_uuid_app_id_mappings
> 0) {
6080 struct necp_uuid_id_mapping_head
*uuid_list_head
= NULL
;
6081 for (uuid_list_head
= &necp_uuid_app_id_hashtbl
[necp_uuid_app_id_hash_num_buckets
- 1]; uuid_list_head
>= necp_uuid_app_id_hashtbl
; uuid_list_head
--) {
6082 struct necp_uuid_id_mapping
*mapping
= NULL
;
6083 LIST_FOREACH(mapping
, uuid_list_head
, chain
) {
6084 if (mapping
->table_usecount
> 0 &&
6085 proc_uuid_policy_kernel(PROC_UUID_POLICY_OPERATION_ADD
, mapping
->uuid
, PROC_UUID_NECP_APP_POLICY
) < 0) {
6086 NECPLOG0(LOG_DEBUG
, "Error adding uuid to policy table\n");
6092 necp_uuid_app_id_mappings_dirty
= FALSE
;
6098 #define NECP_KERNEL_VALID_IP_OUTPUT_CONDITIONS (NECP_KERNEL_CONDITION_ALL_INTERFACES | NECP_KERNEL_CONDITION_BOUND_INTERFACE | NECP_KERNEL_CONDITION_PROTOCOL | NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_POLICY_ID | NECP_KERNEL_CONDITION_LAST_INTERFACE | NECP_KERNEL_CONDITION_LOCAL_NETWORKS)
6099 static necp_kernel_policy_id
6100 necp_kernel_ip_output_policy_add(necp_policy_order order
, necp_policy_order suborder
, u_int32_t session_order
, int session_pid
, u_int32_t condition_mask
, u_int32_t condition_negated_mask
, necp_kernel_policy_id cond_policy_id
, ifnet_t cond_bound_interface
, u_int32_t cond_last_interface_index
, u_int16_t cond_protocol
, union necp_sockaddr_union
*cond_local_start
, union necp_sockaddr_union
*cond_local_end
, u_int8_t cond_local_prefix
, union necp_sockaddr_union
*cond_remote_start
, union necp_sockaddr_union
*cond_remote_end
, u_int8_t cond_remote_prefix
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
)
6102 struct necp_kernel_ip_output_policy
*new_kernel_policy
= NULL
;
6103 struct necp_kernel_ip_output_policy
*tmp_kernel_policy
= NULL
;
6105 MALLOC_ZONE(new_kernel_policy
, struct necp_kernel_ip_output_policy
*, sizeof(*new_kernel_policy
), M_NECP_IP_POLICY
, M_WAITOK
);
6106 if (new_kernel_policy
== NULL
) {
6110 memset(new_kernel_policy
, 0, sizeof(*new_kernel_policy
)); // M_ZERO is not supported for MALLOC_ZONE
6111 new_kernel_policy
->id
= necp_kernel_policy_get_new_id(false);
6112 new_kernel_policy
->suborder
= suborder
;
6113 new_kernel_policy
->order
= order
;
6114 new_kernel_policy
->session_order
= session_order
;
6115 new_kernel_policy
->session_pid
= session_pid
;
6117 // Sanitize condition mask
6118 new_kernel_policy
->condition_mask
= (condition_mask
& NECP_KERNEL_VALID_IP_OUTPUT_CONDITIONS
);
6119 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
)) {
6120 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_BOUND_INTERFACE
;
6122 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
)) {
6123 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_LOCAL_PREFIX
;
6125 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
)) {
6126 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_REMOTE_PREFIX
;
6128 new_kernel_policy
->condition_negated_mask
= condition_negated_mask
& new_kernel_policy
->condition_mask
;
6130 // Set condition values
6131 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
) {
6132 new_kernel_policy
->cond_policy_id
= cond_policy_id
;
6134 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
6135 if (cond_bound_interface
) {
6136 ifnet_reference(cond_bound_interface
);
6138 new_kernel_policy
->cond_bound_interface
= cond_bound_interface
;
6140 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LAST_INTERFACE
) {
6141 new_kernel_policy
->cond_last_interface_index
= cond_last_interface_index
;
6143 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
6144 new_kernel_policy
->cond_protocol
= cond_protocol
;
6146 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
6147 memcpy(&new_kernel_policy
->cond_local_start
, cond_local_start
, cond_local_start
->sa
.sa_len
);
6149 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
6150 memcpy(&new_kernel_policy
->cond_local_end
, cond_local_end
, cond_local_end
->sa
.sa_len
);
6152 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
6153 new_kernel_policy
->cond_local_prefix
= cond_local_prefix
;
6155 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
6156 memcpy(&new_kernel_policy
->cond_remote_start
, cond_remote_start
, cond_remote_start
->sa
.sa_len
);
6158 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
6159 memcpy(&new_kernel_policy
->cond_remote_end
, cond_remote_end
, cond_remote_end
->sa
.sa_len
);
6161 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
6162 new_kernel_policy
->cond_remote_prefix
= cond_remote_prefix
;
6165 new_kernel_policy
->result
= result
;
6166 memcpy(&new_kernel_policy
->result_parameter
, &result_parameter
, sizeof(result_parameter
));
6169 NECPLOG(LOG_DEBUG
, "Added kernel policy: ip output, id=%d, mask=%x\n", new_kernel_policy
->id
, new_kernel_policy
->condition_mask
);
6171 LIST_INSERT_SORTED_THRICE_ASCENDING(&necp_kernel_ip_output_policies
, new_kernel_policy
, chain
, session_order
, order
, suborder
, tmp_kernel_policy
);
6173 return new_kernel_policy
? new_kernel_policy
->id
: 0;
6176 static struct necp_kernel_ip_output_policy
*
6177 necp_kernel_ip_output_policy_find(necp_kernel_policy_id policy_id
)
6179 struct necp_kernel_ip_output_policy
*kernel_policy
= NULL
;
6180 struct necp_kernel_ip_output_policy
*tmp_kernel_policy
= NULL
;
6182 if (policy_id
== 0) {
6186 LIST_FOREACH_SAFE(kernel_policy
, &necp_kernel_ip_output_policies
, chain
, tmp_kernel_policy
) {
6187 if (kernel_policy
->id
== policy_id
) {
6188 return kernel_policy
;
6196 necp_kernel_ip_output_policy_delete(necp_kernel_policy_id policy_id
)
6198 struct necp_kernel_ip_output_policy
*policy
= NULL
;
6200 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
6202 policy
= necp_kernel_ip_output_policy_find(policy_id
);
6204 LIST_REMOVE(policy
, chain
);
6206 if (policy
->cond_bound_interface
) {
6207 ifnet_release(policy
->cond_bound_interface
);
6208 policy
->cond_bound_interface
= NULL
;
6211 FREE_ZONE(policy
, sizeof(*policy
), M_NECP_IP_POLICY
);
6219 necp_kernel_ip_output_policies_dump_all(void)
6222 struct necp_kernel_ip_output_policy
*policy
= NULL
;
6225 char result_string
[MAX_RESULT_STRING_LEN
];
6226 char proc_name_string
[MAXCOMLEN
+ 1];
6227 memset(result_string
, 0, MAX_RESULT_STRING_LEN
);
6228 memset(proc_name_string
, 0, MAXCOMLEN
+ 1);
6230 NECPLOG0(LOG_DEBUG
, "NECP IP Output Policies:\n");
6231 NECPLOG0(LOG_DEBUG
, "-----------\n");
6232 for (id_i
= 0; id_i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; id_i
++) {
6233 NECPLOG(LOG_DEBUG
, " ID Bucket: %d\n", id_i
);
6234 for (policy_i
= 0; necp_kernel_ip_output_policies_map
[id_i
] != NULL
&& (necp_kernel_ip_output_policies_map
[id_i
])[policy_i
] != NULL
; policy_i
++) {
6235 policy
= (necp_kernel_ip_output_policies_map
[id_i
])[policy_i
];
6236 proc_name(policy
->session_pid
, proc_name_string
, MAXCOMLEN
);
6237 NECPLOG(LOG_DEBUG
, "\t%3d. Policy ID: %5d\tProcess: %10.10s\tOrder: %04d.%04d.%d\tMask: %5x\tResult: %s\n", policy_i
, policy
->id
, proc_name_string
, policy
->session_order
, policy
->order
, policy
->suborder
, policy
->condition_mask
, necp_get_result_description(result_string
, policy
->result
, policy
->result_parameter
));
6239 NECPLOG0(LOG_DEBUG
, "-----------\n");
6245 necp_kernel_ip_output_policy_results_overlap(struct necp_kernel_ip_output_policy
*upper_policy
, struct necp_kernel_ip_output_policy
*lower_policy
)
6247 if (upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
6248 if (upper_policy
->session_order
!= lower_policy
->session_order
) {
6249 // A skip cannot override a policy of a different session
6252 if (upper_policy
->result_parameter
.skip_policy_order
== 0 ||
6253 lower_policy
->order
>= upper_policy
->result_parameter
.skip_policy_order
) {
6254 // This policy is beyond the skip
6257 // This policy is inside the skip
6263 // All other IP Output policy results (drop, tunnel, hard pass) currently overlap
6268 necp_kernel_ip_output_policy_is_unnecessary(struct necp_kernel_ip_output_policy
*policy
, struct necp_kernel_ip_output_policy
**policy_array
, int valid_indices
)
6270 bool can_skip
= FALSE
;
6271 u_int32_t highest_skip_session_order
= 0;
6272 u_int32_t highest_skip_order
= 0;
6274 for (i
= 0; i
< valid_indices
; i
++) {
6275 struct necp_kernel_ip_output_policy
*compared_policy
= policy_array
[i
];
6277 // For policies in a skip window, we can't mark conflicting policies as unnecessary
6279 if (highest_skip_session_order
!= compared_policy
->session_order
||
6280 (highest_skip_order
!= 0 && compared_policy
->order
>= highest_skip_order
)) {
6281 // If we've moved on to the next session, or passed the skip window
6282 highest_skip_session_order
= 0;
6283 highest_skip_order
= 0;
6286 // If this policy is also a skip, in can increase the skip window
6287 if (compared_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
6288 if (compared_policy
->result_parameter
.skip_policy_order
> highest_skip_order
) {
6289 highest_skip_order
= compared_policy
->result_parameter
.skip_policy_order
;
6296 if (compared_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
6297 // This policy is a skip. Set the skip window accordingly
6299 highest_skip_session_order
= compared_policy
->session_order
;
6300 highest_skip_order
= compared_policy
->result_parameter
.skip_policy_order
;
6303 // The result of the compared policy must be able to block out this policy result
6304 if (!necp_kernel_ip_output_policy_results_overlap(compared_policy
, policy
)) {
6308 // If new policy matches All Interfaces, compared policy must also
6309 if ((policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) && !(compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
)) {
6313 // If new policy matches Local Networks, compared policy must also
6314 if ((policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_NETWORKS
) && !(compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_NETWORKS
)) {
6318 // Default makes lower policies unecessary always
6319 if (compared_policy
->condition_mask
== 0) {
6323 // Compared must be more general than policy, and include only conditions within policy
6324 if ((policy
->condition_mask
& compared_policy
->condition_mask
) != compared_policy
->condition_mask
) {
6328 // Negative conditions must match for the overlapping conditions
6329 if ((policy
->condition_negated_mask
& compared_policy
->condition_mask
) != (compared_policy
->condition_negated_mask
& compared_policy
->condition_mask
)) {
6333 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
&&
6334 compared_policy
->cond_policy_id
!= policy
->cond_policy_id
) {
6338 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
&&
6339 compared_policy
->cond_bound_interface
!= policy
->cond_bound_interface
) {
6343 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
&&
6344 compared_policy
->cond_protocol
!= policy
->cond_protocol
) {
6348 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
6349 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
6350 if (!necp_is_range_in_range((struct sockaddr
*)&policy
->cond_local_start
, (struct sockaddr
*)&policy
->cond_local_end
, (struct sockaddr
*)&compared_policy
->cond_local_start
, (struct sockaddr
*)&compared_policy
->cond_local_end
)) {
6353 } else if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
6354 if (compared_policy
->cond_local_prefix
> policy
->cond_local_prefix
||
6355 !necp_is_addr_in_subnet((struct sockaddr
*)&policy
->cond_local_start
, (struct sockaddr
*)&compared_policy
->cond_local_start
, compared_policy
->cond_local_prefix
)) {
6361 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
6362 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
6363 if (!necp_is_range_in_range((struct sockaddr
*)&policy
->cond_remote_start
, (struct sockaddr
*)&policy
->cond_remote_end
, (struct sockaddr
*)&compared_policy
->cond_remote_start
, (struct sockaddr
*)&compared_policy
->cond_remote_end
)) {
6366 } else if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
6367 if (compared_policy
->cond_remote_prefix
> policy
->cond_remote_prefix
||
6368 !necp_is_addr_in_subnet((struct sockaddr
*)&policy
->cond_remote_start
, (struct sockaddr
*)&compared_policy
->cond_remote_start
, compared_policy
->cond_remote_prefix
)) {
6381 necp_kernel_ip_output_policies_reprocess(void)
6384 int bucket_allocation_counts
[NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
];
6385 int bucket_current_free_index
[NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
];
6386 struct necp_kernel_ip_output_policy
*kernel_policy
= NULL
;
6388 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
6391 necp_kernel_ip_output_policies_condition_mask
= 0;
6392 necp_kernel_ip_output_policies_count
= 0;
6393 necp_kernel_ip_output_policies_non_id_count
= 0;
6395 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
6396 if (necp_kernel_ip_output_policies_map
[i
] != NULL
) {
6397 FREE(necp_kernel_ip_output_policies_map
[i
], M_NECP
);
6398 necp_kernel_ip_output_policies_map
[i
] = NULL
;
6402 bucket_allocation_counts
[i
] = 0;
6405 LIST_FOREACH(kernel_policy
, &necp_kernel_ip_output_policies
, chain
) {
6407 necp_kernel_ip_output_policies_condition_mask
|= kernel_policy
->condition_mask
;
6408 necp_kernel_ip_output_policies_count
++;
6410 /* Update bucket counts:
6411 * Non-id and SKIP policies will be added to all buckets
6412 * Add local networks policy to all buckets for incoming IP
6414 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
) ||
6415 (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_NETWORKS
) ||
6416 kernel_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
6417 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
6418 bucket_allocation_counts
[i
]++;
6421 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
)) {
6422 necp_kernel_ip_output_policies_non_id_count
++;
6424 bucket_allocation_counts
[NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(kernel_policy
->cond_policy_id
)]++;
6428 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
6429 if (bucket_allocation_counts
[i
] > 0) {
6430 // Allocate a NULL-terminated array of policy pointers for each bucket
6431 MALLOC(necp_kernel_ip_output_policies_map
[i
], struct necp_kernel_ip_output_policy
**, sizeof(struct necp_kernel_ip_output_policy
*) * (bucket_allocation_counts
[i
] + 1), M_NECP
, M_WAITOK
);
6432 if (necp_kernel_ip_output_policies_map
[i
] == NULL
) {
6436 // Initialize the first entry to NULL
6437 (necp_kernel_ip_output_policies_map
[i
])[0] = NULL
;
6439 bucket_current_free_index
[i
] = 0;
6442 LIST_FOREACH(kernel_policy
, &necp_kernel_ip_output_policies
, chain
) {
6443 // Insert pointers into map
6444 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
) ||
6445 (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_NETWORKS
) ||
6446 kernel_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
6447 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
6448 if (!necp_kernel_ip_output_policy_is_unnecessary(kernel_policy
, necp_kernel_ip_output_policies_map
[i
], bucket_current_free_index
[i
])) {
6449 (necp_kernel_ip_output_policies_map
[i
])[(bucket_current_free_index
[i
])] = kernel_policy
;
6450 bucket_current_free_index
[i
]++;
6451 (necp_kernel_ip_output_policies_map
[i
])[(bucket_current_free_index
[i
])] = NULL
;
6455 i
= NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(kernel_policy
->cond_policy_id
);
6456 if (!necp_kernel_ip_output_policy_is_unnecessary(kernel_policy
, necp_kernel_ip_output_policies_map
[i
], bucket_current_free_index
[i
])) {
6457 (necp_kernel_ip_output_policies_map
[i
])[(bucket_current_free_index
[i
])] = kernel_policy
;
6458 bucket_current_free_index
[i
]++;
6459 (necp_kernel_ip_output_policies_map
[i
])[(bucket_current_free_index
[i
])] = NULL
;
6463 necp_kernel_ip_output_policies_dump_all();
6467 // Free memory, reset mask to 0
6468 necp_kernel_ip_output_policies_condition_mask
= 0;
6469 necp_kernel_ip_output_policies_count
= 0;
6470 necp_kernel_ip_output_policies_non_id_count
= 0;
6471 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
6472 if (necp_kernel_ip_output_policies_map
[i
] != NULL
) {
6473 FREE(necp_kernel_ip_output_policies_map
[i
], M_NECP
);
6474 necp_kernel_ip_output_policies_map
[i
] = NULL
;
6480 // Outbound Policy Matching
6481 // ---------------------
6487 static struct substring
6488 necp_trim_dots_and_stars(char *string
, size_t length
)
6490 struct substring sub
;
6491 sub
.string
= string
;
6492 sub
.length
= string
? length
: 0;
6494 while (sub
.length
&& (sub
.string
[0] == '.' || sub
.string
[0] == '*')) {
6499 while (sub
.length
&& (sub
.string
[sub
.length
- 1] == '.' || sub
.string
[sub
.length
- 1] == '*')) {
6507 necp_create_trimmed_domain(char *string
, size_t length
)
6509 char *trimmed_domain
= NULL
;
6510 struct substring sub
= necp_trim_dots_and_stars(string
, length
);
6512 MALLOC(trimmed_domain
, char *, sub
.length
+ 1, M_NECP
, M_WAITOK
);
6513 if (trimmed_domain
== NULL
) {
6517 memcpy(trimmed_domain
, sub
.string
, sub
.length
);
6518 trimmed_domain
[sub
.length
] = 0;
6520 return trimmed_domain
;
6524 necp_count_dots(char *string
, size_t length
)
6529 for (i
= 0; i
< length
; i
++) {
6530 if (string
[i
] == '.') {
6539 necp_check_suffix(struct substring parent
, struct substring suffix
, bool require_dot_before_suffix
)
6541 if (parent
.length
<= suffix
.length
) {
6545 size_t length_difference
= (parent
.length
- suffix
.length
);
6547 if (require_dot_before_suffix
) {
6548 if (((char *)(parent
.string
+ length_difference
- 1))[0] != '.') {
6553 // strncasecmp does case-insensitive check for all UTF-8 strings (ignores non-ASCII characters)
6554 return strncasecmp(parent
.string
+ length_difference
, suffix
.string
, suffix
.length
) == 0;
6558 necp_hostname_matches_domain(struct substring hostname_substring
, u_int8_t hostname_dot_count
, char *domain
, u_int8_t domain_dot_count
)
6560 if (hostname_substring
.string
== NULL
|| domain
== NULL
) {
6561 return hostname_substring
.string
== domain
;
6564 struct substring domain_substring
;
6565 domain_substring
.string
= domain
;
6566 domain_substring
.length
= strlen(domain
);
6568 if (hostname_dot_count
== domain_dot_count
) {
6569 // strncasecmp does case-insensitive check for all UTF-8 strings (ignores non-ASCII characters)
6570 if (hostname_substring
.length
== domain_substring
.length
&&
6571 strncasecmp(hostname_substring
.string
, domain_substring
.string
, hostname_substring
.length
) == 0) {
6574 } else if (domain_dot_count
< hostname_dot_count
) {
6575 if (necp_check_suffix(hostname_substring
, domain_substring
, TRUE
)) {
6584 net_domain_contains_hostname(char *hostname_string
, char *domain_string
)
6586 if (hostname_string
== NULL
||
6587 domain_string
== NULL
) {
6591 struct substring hostname_substring
;
6592 hostname_substring
.string
= hostname_string
;
6593 hostname_substring
.length
= strlen(hostname_string
);
6595 return necp_hostname_matches_domain(hostname_substring
,
6596 necp_count_dots(hostname_string
, hostname_substring
.length
),
6598 necp_count_dots(domain_string
, strlen(domain_string
)));
6601 #define NECP_MAX_STRING_LEN 1024
6604 necp_copy_string(char *string
, size_t length
)
6606 char *copied_string
= NULL
;
6608 if (length
> NECP_MAX_STRING_LEN
) {
6612 MALLOC(copied_string
, char *, length
+ 1, M_NECP
, M_WAITOK
);
6613 if (copied_string
== NULL
) {
6617 memcpy(copied_string
, string
, length
);
6618 copied_string
[length
] = 0;
6620 return copied_string
;
6624 necp_get_primary_direct_interface_index(void)
6626 u_int32_t interface_index
= IFSCOPE_NONE
;
6628 ifnet_head_lock_shared();
6629 struct ifnet
*ordered_interface
= NULL
;
6630 TAILQ_FOREACH(ordered_interface
, &ifnet_ordered_head
, if_ordered_link
) {
6631 const u_int8_t functional_type
= if_functional_type(ordered_interface
, TRUE
);
6632 if (functional_type
!= IFRTYPE_FUNCTIONAL_UNKNOWN
&&
6633 functional_type
!= IFRTYPE_FUNCTIONAL_LOOPBACK
) {
6634 // All known, non-loopback functional types represent direct physical interfaces (Wi-Fi, Cellular, Wired)
6635 interface_index
= ordered_interface
->if_index
;
6641 return interface_index
;
6645 necp_get_parent_cred_result(proc_t proc
, struct necp_socket_info
*info
)
6647 task_t task
= proc_task(proc
? proc
: current_proc());
6648 coalition_t coal
= task_get_coalition(task
, COALITION_TYPE_JETSAM
);
6650 if (coal
== COALITION_NULL
|| coalition_is_leader(task
, coal
)) {
6651 // No parent, nothing to do
6655 task_t lead_task
= coalition_get_leader(coal
);
6656 if (lead_task
!= NULL
) {
6657 proc_t lead_proc
= get_bsdtask_info(lead_task
);
6658 if (lead_proc
!= NULL
) {
6659 kauth_cred_t lead_cred
= kauth_cred_proc_ref(lead_proc
);
6660 if (lead_cred
!= NULL
) {
6661 errno_t cred_result
= priv_check_cred(lead_cred
, PRIV_NET_PRIVILEGED_NECP_MATCH
, 0);
6662 kauth_cred_unref(&lead_cred
);
6663 info
->cred_result
= cred_result
;
6666 task_deallocate(lead_task
);
6670 #define NECP_KERNEL_ADDRESS_TYPE_CONDITIONS (NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_LOCAL_EMPTY | NECP_KERNEL_CONDITION_REMOTE_EMPTY | NECP_KERNEL_CONDITION_LOCAL_NETWORKS)
6672 necp_application_fillout_info_locked(uuid_t application_uuid
, uuid_t real_application_uuid
, char *account
, char *domain
, pid_t pid
, uid_t uid
, u_int16_t protocol
, u_int32_t bound_interface_index
, u_int32_t traffic_class
, union necp_sockaddr_union
*local_addr
, union necp_sockaddr_union
*remote_addr
, u_int16_t local_port
, u_int16_t remote_port
, bool has_client
, proc_t proc
, u_int32_t drop_order
, u_int32_t client_flags
, struct necp_socket_info
*info
)
6674 memset(info
, 0, sizeof(struct necp_socket_info
));
6678 info
->protocol
= protocol
;
6679 info
->bound_interface_index
= bound_interface_index
;
6680 info
->traffic_class
= traffic_class
;
6681 info
->has_client
= has_client
;
6682 info
->drop_order
= drop_order
;
6683 info
->client_flags
= client_flags
;
6685 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
&& proc
!= NULL
) {
6686 info
->cred_result
= priv_check_cred(proc_ucred(proc
), PRIV_NET_PRIVILEGED_NECP_MATCH
, 0);
6687 if (info
->cred_result
!= 0) {
6688 // Process does not have entitlement, check the parent process
6689 necp_get_parent_cred_result(proc
, info
);
6693 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_PLATFORM_BINARY
&& proc
!= NULL
) {
6694 info
->is_platform_binary
= csproc_get_platform_binary(proc
) ? true : false;
6697 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_APP_ID
&& !uuid_is_null(application_uuid
)) {
6698 struct necp_uuid_id_mapping
*existing_mapping
= necp_uuid_lookup_app_id_locked(application_uuid
);
6699 if (existing_mapping
) {
6700 info
->application_id
= existing_mapping
->id
;
6704 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
&& !uuid_is_null(real_application_uuid
)) {
6705 if (uuid_compare(application_uuid
, real_application_uuid
) == 0) {
6706 info
->real_application_id
= info
->application_id
;
6708 struct necp_uuid_id_mapping
*existing_mapping
= necp_uuid_lookup_app_id_locked(real_application_uuid
);
6709 if (existing_mapping
) {
6710 info
->real_application_id
= existing_mapping
->id
;
6715 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
&& account
!= NULL
) {
6716 struct necp_string_id_mapping
*existing_mapping
= necp_lookup_string_to_id_locked(&necp_account_id_list
, account
);
6717 if (existing_mapping
) {
6718 info
->account_id
= existing_mapping
->id
;
6722 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
6723 info
->domain
= domain
;
6726 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_ADDRESS_TYPE_CONDITIONS
) {
6727 if (local_addr
&& local_addr
->sa
.sa_len
> 0) {
6728 memcpy(&info
->local_addr
, local_addr
, local_addr
->sa
.sa_len
);
6729 if (local_port
!= 0) {
6730 info
->local_addr
.sin6
.sin6_port
= local_port
;
6732 } else if (local_port
!= 0) {
6733 info
->local_addr
.sin6
.sin6_len
= sizeof(struct sockaddr_in6
);
6734 info
->local_addr
.sin6
.sin6_family
= AF_INET6
;
6735 info
->local_addr
.sin6
.sin6_port
= local_port
;
6737 if (remote_addr
&& remote_addr
->sa
.sa_len
> 0) {
6738 memcpy(&info
->remote_addr
, remote_addr
, remote_addr
->sa
.sa_len
);
6739 if (remote_port
!= 0) {
6740 info
->remote_addr
.sin6
.sin6_port
= remote_port
;
6742 } else if (remote_port
!= 0) {
6743 info
->remote_addr
.sin6
.sin6_len
= sizeof(struct sockaddr_in6
);
6744 info
->remote_addr
.sin6
.sin6_family
= AF_INET6
;
6745 info
->remote_addr
.sin6
.sin6_port
= remote_port
;
6751 necp_send_application_interface_denied_event(pid_t pid
, uuid_t proc_uuid
, u_int32_t if_functional_type
)
6753 struct kev_netpolicy_ifdenied ev_ifdenied
;
6755 bzero(&ev_ifdenied
, sizeof(ev_ifdenied
));
6757 ev_ifdenied
.ev_data
.epid
= pid
;
6758 uuid_copy(ev_ifdenied
.ev_data
.euuid
, proc_uuid
);
6759 ev_ifdenied
.ev_if_functional_type
= if_functional_type
;
6761 netpolicy_post_msg(KEV_NETPOLICY_IFDENIED
, &ev_ifdenied
.ev_data
, sizeof(ev_ifdenied
));
6764 extern char *proc_name_address(void *p
);
6766 #define NECP_VERIFY_DELEGATION_ENTITLEMENT(_p, _d) \
6767 if (!has_checked_delegation_entitlement) { \
6768 has_delegation_entitlement = (priv_check_cred(proc_ucred(_p), PRIV_NET_PRIVILEGED_SOCKET_DELEGATE, 0) == 0); \
6769 has_checked_delegation_entitlement = TRUE; \
6771 if (!has_delegation_entitlement) { \
6772 NECPLOG(LOG_ERR, "%s(%d) does not hold the necessary entitlement to delegate network traffic for other processes by %s", \
6773 proc_name_address(_p), proc_pid(_p), _d); \
6778 necp_application_find_policy_match_internal(proc_t proc
,
6779 u_int8_t
*parameters
,
6780 u_int32_t parameters_size
,
6781 struct necp_aggregate_result
*returned_result
,
6784 u_int required_interface_index
,
6785 const union necp_sockaddr_union
*override_local_addr
,
6786 const union necp_sockaddr_union
*override_remote_addr
,
6787 struct necp_client_endpoint
*returned_v4_gateway
,
6788 struct necp_client_endpoint
*returned_v6_gateway
,
6789 struct rtentry
**returned_route
, bool ignore_address
,
6795 struct necp_kernel_socket_policy
*matched_policy
= NULL
;
6796 struct necp_socket_info info
;
6797 necp_kernel_policy_filter filter_control_unit
= 0;
6798 necp_kernel_policy_result service_action
= 0;
6799 necp_kernel_policy_service service
= { 0, 0 };
6801 u_int16_t protocol
= 0;
6802 u_int32_t bound_interface_index
= required_interface_index
;
6803 u_int32_t traffic_class
= 0;
6804 u_int32_t client_flags
= 0;
6805 union necp_sockaddr_union local_addr
;
6806 union necp_sockaddr_union remote_addr
;
6807 bool no_remote_addr
= FALSE
;
6808 u_int8_t remote_family
= 0;
6809 bool no_local_addr
= FALSE
;
6810 u_int16_t local_port
= 0;
6811 u_int16_t remote_port
= 0;
6812 necp_drop_all_bypass_check_result_t drop_all_bypass
= NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
;
6814 if (override_local_addr
) {
6815 memcpy(&local_addr
, override_local_addr
, sizeof(local_addr
));
6817 memset(&local_addr
, 0, sizeof(local_addr
));
6819 if (override_remote_addr
) {
6820 memcpy(&remote_addr
, override_remote_addr
, sizeof(remote_addr
));
6822 memset(&remote_addr
, 0, sizeof(remote_addr
));
6825 // Initialize UID, PID, and UUIDs to the current process
6826 uid_t uid
= kauth_cred_getuid(proc_ucred(proc
));
6827 pid_t pid
= proc_pid(proc
);
6828 uuid_t application_uuid
;
6829 uuid_clear(application_uuid
);
6830 uuid_t real_application_uuid
;
6831 uuid_clear(real_application_uuid
);
6832 proc_getexecutableuuid(proc
, real_application_uuid
, sizeof(real_application_uuid
));
6833 uuid_copy(application_uuid
, real_application_uuid
);
6835 char *domain
= NULL
;
6836 char *account
= NULL
;
6838 #define NECP_MAX_REQUIRED_AGENTS 16
6839 u_int32_t num_required_agent_types
= 0;
6840 struct necp_client_parameter_netagent_type required_agent_types
[NECP_MAX_REQUIRED_AGENTS
];
6841 memset(&required_agent_types
, 0, sizeof(required_agent_types
));
6843 u_int32_t netagent_ids
[NECP_MAX_NETAGENTS
];
6844 u_int32_t netagent_use_flags
[NECP_MAX_NETAGENTS
];
6845 memset(&netagent_ids
, 0, sizeof(netagent_ids
));
6846 memset(&netagent_use_flags
, 0, sizeof(netagent_use_flags
));
6847 int netagent_cursor
;
6849 bool has_checked_delegation_entitlement
= FALSE
;
6850 bool has_delegation_entitlement
= FALSE
;
6852 if (returned_result
== NULL
) {
6856 if (returned_v4_gateway
!= NULL
) {
6857 memset(returned_v4_gateway
, 0, sizeof(struct necp_client_endpoint
));
6860 if (returned_v6_gateway
!= NULL
) {
6861 memset(returned_v6_gateway
, 0, sizeof(struct necp_client_endpoint
));
6864 memset(returned_result
, 0, sizeof(struct necp_aggregate_result
));
6866 u_int32_t drop_order
= necp_process_drop_order(proc_ucred(proc
));
6868 necp_kernel_policy_result drop_dest_policy_result
= NECP_KERNEL_POLICY_RESULT_NONE
;
6870 lck_rw_lock_shared(&necp_kernel_policy_lock
);
6871 if (necp_kernel_application_policies_count
== 0) {
6872 if (necp_drop_all_order
> 0 || drop_order
> 0) {
6873 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
6874 lck_rw_done(&necp_kernel_policy_lock
);
6878 lck_rw_done(&necp_kernel_policy_lock
);
6880 while ((offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
)) <= parameters_size
) {
6881 u_int8_t type
= necp_buffer_get_tlv_type(parameters
, offset
);
6882 u_int32_t length
= necp_buffer_get_tlv_length(parameters
, offset
);
6884 if (length
> (parameters_size
- (offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
)))) {
6885 // If the length is larger than what can fit in the remaining parameters size, bail
6886 NECPLOG(LOG_ERR
, "Invalid TLV length (%u)", length
);
6891 u_int8_t
*value
= necp_buffer_get_tlv_value(parameters
, offset
, NULL
);
6892 if (value
!= NULL
) {
6894 case NECP_CLIENT_PARAMETER_APPLICATION
: {
6895 if (length
>= sizeof(uuid_t
)) {
6896 if (uuid_compare(application_uuid
, value
) == 0) {
6901 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc
, "euuid");
6903 uuid_copy(application_uuid
, value
);
6907 case NECP_CLIENT_PARAMETER_REAL_APPLICATION
: {
6908 if (length
>= sizeof(uuid_t
)) {
6909 if (uuid_compare(real_application_uuid
, value
) == 0) {
6914 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc
, "uuid");
6916 uuid_copy(real_application_uuid
, value
);
6920 case NECP_CLIENT_PARAMETER_PID
: {
6921 if (length
>= sizeof(pid_t
)) {
6922 if (memcmp(&pid
, value
, sizeof(pid_t
)) == 0) {
6927 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc
, "pid");
6929 memcpy(&pid
, value
, sizeof(pid_t
));
6933 case NECP_CLIENT_PARAMETER_UID
: {
6934 if (length
>= sizeof(uid_t
)) {
6935 if (memcmp(&uid
, value
, sizeof(uid_t
)) == 0) {
6940 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc
, "uid");
6942 memcpy(&uid
, value
, sizeof(uid_t
));
6946 case NECP_CLIENT_PARAMETER_DOMAIN
: {
6947 domain
= (char *)value
;
6948 domain
[length
- 1] = 0;
6951 case NECP_CLIENT_PARAMETER_ACCOUNT
: {
6952 account
= (char *)value
;
6953 account
[length
- 1] = 0;
6956 case NECP_CLIENT_PARAMETER_TRAFFIC_CLASS
: {
6957 if (length
>= sizeof(u_int32_t
)) {
6958 memcpy(&traffic_class
, value
, sizeof(u_int32_t
));
6962 case NECP_CLIENT_PARAMETER_IP_PROTOCOL
: {
6963 if (length
>= sizeof(u_int16_t
)) {
6964 memcpy(&protocol
, value
, sizeof(u_int16_t
));
6965 } else if (length
>= sizeof(u_int8_t
)) {
6966 memcpy(&protocol
, value
, sizeof(u_int8_t
));
6970 case NECP_CLIENT_PARAMETER_BOUND_INTERFACE
: {
6971 if (length
<= IFXNAMSIZ
&& length
> 0) {
6972 ifnet_t bound_interface
= NULL
;
6973 char interface_name
[IFXNAMSIZ
];
6974 memcpy(interface_name
, value
, length
);
6975 interface_name
[length
- 1] = 0; // Make sure the string is NULL terminated
6976 if (ifnet_find_by_name(interface_name
, &bound_interface
) == 0) {
6977 bound_interface_index
= bound_interface
->if_index
;
6978 ifnet_release(bound_interface
);
6983 case NECP_CLIENT_PARAMETER_LOCAL_ADDRESS
: {
6984 if (ignore_address
|| override_local_addr
) {
6988 if (length
>= sizeof(struct necp_policy_condition_addr
)) {
6989 struct necp_policy_condition_addr
*address_struct
= (struct necp_policy_condition_addr
*)(void *)value
;
6990 if (necp_address_is_valid(&address_struct
->address
.sa
)) {
6991 memcpy(&local_addr
, &address_struct
->address
, sizeof(address_struct
->address
));
6996 case NECP_CLIENT_PARAMETER_REMOTE_ADDRESS
: {
6997 if (ignore_address
|| override_remote_addr
) {
7001 if (length
>= sizeof(struct necp_policy_condition_addr
)) {
7002 struct necp_policy_condition_addr
*address_struct
= (struct necp_policy_condition_addr
*)(void *)value
;
7003 if (necp_address_is_valid(&address_struct
->address
.sa
)) {
7004 memcpy(&remote_addr
, &address_struct
->address
, sizeof(address_struct
->address
));
7009 case NECP_CLIENT_PARAMETER_LOCAL_ENDPOINT
: {
7010 if (ignore_address
|| override_local_addr
) {
7014 if (length
>= sizeof(struct necp_client_endpoint
)) {
7015 struct necp_client_endpoint
*endpoint
= (struct necp_client_endpoint
*)(void *)value
;
7016 if (endpoint
->u
.endpoint
.endpoint_family
== AF_UNSPEC
&&
7017 endpoint
->u
.endpoint
.endpoint_port
!= 0) {
7019 local_port
= endpoint
->u
.endpoint
.endpoint_port
;
7024 case NECP_CLIENT_PARAMETER_REMOTE_ENDPOINT
: {
7025 if (ignore_address
|| override_remote_addr
) {
7029 if (length
>= sizeof(struct necp_client_endpoint
)) {
7030 struct necp_client_endpoint
*endpoint
= (struct necp_client_endpoint
*)(void *)value
;
7031 if (endpoint
->u
.endpoint
.endpoint_family
== AF_UNSPEC
&&
7032 endpoint
->u
.endpoint
.endpoint_port
!= 0) {
7034 remote_port
= endpoint
->u
.endpoint
.endpoint_port
;
7039 case NECP_CLIENT_PARAMETER_FLAGS
: {
7040 if (length
>= sizeof(client_flags
)) {
7041 memcpy(&client_flags
, value
, sizeof(client_flags
));
7045 case NECP_CLIENT_PARAMETER_REQUIRE_AGENT_TYPE
:
7046 case NECP_CLIENT_PARAMETER_PREFER_AGENT_TYPE
: {
7047 if (num_required_agent_types
>= NECP_MAX_REQUIRED_AGENTS
) {
7050 if (length
>= sizeof(struct necp_client_parameter_netagent_type
)) {
7051 memcpy(&required_agent_types
[num_required_agent_types
], value
, sizeof(struct necp_client_parameter_netagent_type
));
7052 num_required_agent_types
++;
7063 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
7067 lck_rw_lock_shared(&necp_kernel_policy_lock
);
7069 u_int32_t route_rule_id_array
[MAX_AGGREGATE_ROUTE_RULES
];
7070 size_t route_rule_id_array_count
= 0;
7071 necp_application_fillout_info_locked(application_uuid
, real_application_uuid
, account
, domain
, pid
, uid
, protocol
, bound_interface_index
, traffic_class
, &local_addr
, &remote_addr
, local_port
, remote_port
, has_client
, proc
, drop_order
, client_flags
, &info
);
7072 matched_policy
= necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_app_layer_map
, &info
, &filter_control_unit
, route_rule_id_array
, &route_rule_id_array_count
, MAX_AGGREGATE_ROUTE_RULES
, &service_action
, &service
, netagent_ids
, netagent_use_flags
, NECP_MAX_NETAGENTS
, required_agent_types
, num_required_agent_types
, proc
, NULL
, NULL
, &drop_dest_policy_result
, &drop_all_bypass
);
7073 if (matched_policy
) {
7074 returned_result
->policy_id
= matched_policy
->id
;
7075 returned_result
->routing_result
= matched_policy
->result
;
7076 memcpy(&returned_result
->routing_result_parameter
, &matched_policy
->result_parameter
, sizeof(returned_result
->routing_result_parameter
));
7078 bool drop_all
= false;
7079 if (necp_drop_all_order
> 0 || info
.drop_order
> 0 || drop_dest_policy_result
== NECP_KERNEL_POLICY_RESULT_DROP
) {
7080 // Mark socket as a drop if drop_all is set
7082 if (drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
) {
7083 drop_all_bypass
= necp_check_drop_all_bypass_result(proc
);
7086 if (drop_all
&& drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_FALSE
) {
7087 returned_result
->policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7088 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7090 returned_result
->policy_id
= 0;
7091 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_NONE
;
7094 if (filter_control_unit
== NECP_FILTER_UNIT_NO_FILTER
) {
7095 returned_result
->filter_control_unit
= 0;
7097 returned_result
->filter_control_unit
= filter_control_unit
;
7099 returned_result
->service_action
= service_action
;
7101 // Handle trigger service
7102 if (service
.identifier
!= 0) {
7103 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(service
.identifier
);
7104 if (mapping
!= NULL
) {
7105 struct necp_service_registration
*service_registration
= NULL
;
7106 uuid_copy(returned_result
->service_uuid
, mapping
->uuid
);
7107 returned_result
->service_data
= service
.data
;
7108 if (service
.identifier
== NECP_NULL_SERVICE_ID
) {
7109 // NULL service is always 'registered'
7110 returned_result
->service_flags
|= NECP_SERVICE_FLAGS_REGISTERED
;
7112 LIST_FOREACH(service_registration
, &necp_registered_service_list
, kernel_chain
) {
7113 if (service
.identifier
== service_registration
->service_id
) {
7114 returned_result
->service_flags
|= NECP_SERVICE_FLAGS_REGISTERED
;
7123 for (netagent_cursor
= 0; netagent_cursor
< NECP_MAX_NETAGENTS
; netagent_cursor
++) {
7124 struct necp_uuid_id_mapping
*mapping
= NULL
;
7125 u_int32_t netagent_id
= netagent_ids
[netagent_cursor
];
7126 if (netagent_id
== 0) {
7129 mapping
= necp_uuid_lookup_uuid_with_service_id_locked(netagent_id
);
7130 if (mapping
!= NULL
) {
7131 uuid_copy(returned_result
->netagents
[netagent_cursor
], mapping
->uuid
);
7132 returned_result
->netagent_use_flags
[netagent_cursor
] = netagent_use_flags
[netagent_cursor
];
7136 // Do routing evaluation
7137 u_int output_bound_interface
= bound_interface_index
;
7138 if (returned_result
->routing_result
== NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED
) {
7139 output_bound_interface
= returned_result
->routing_result_parameter
.scoped_interface_index
;
7140 } else if (returned_result
->routing_result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
) {
7141 output_bound_interface
= returned_result
->routing_result_parameter
.tunnel_interface_index
;
7142 } else if (returned_result
->routing_result
== NECP_KERNEL_POLICY_RESULT_SCOPED_DIRECT
) {
7143 output_bound_interface
= necp_get_primary_direct_interface_index();
7144 if (output_bound_interface
== IFSCOPE_NONE
) {
7145 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7147 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED
;
7148 returned_result
->routing_result_parameter
.scoped_interface_index
= output_bound_interface
;
7152 if (local_addr
.sa
.sa_len
== 0 ||
7153 (local_addr
.sa
.sa_family
== AF_INET
&& local_addr
.sin
.sin_addr
.s_addr
== 0) ||
7154 (local_addr
.sa
.sa_family
== AF_INET6
&& IN6_IS_ADDR_UNSPECIFIED(&local_addr
.sin6
.sin6_addr
))) {
7155 no_local_addr
= TRUE
;
7158 if (remote_addr
.sa
.sa_len
== 0 ||
7159 (remote_addr
.sa
.sa_family
== AF_INET
&& remote_addr
.sin
.sin_addr
.s_addr
== 0) ||
7160 (remote_addr
.sa
.sa_family
== AF_INET6
&& IN6_IS_ADDR_UNSPECIFIED(&remote_addr
.sin6
.sin6_addr
))) {
7161 no_remote_addr
= TRUE
;
7162 remote_family
= remote_addr
.sa
.sa_family
;
7165 returned_result
->routed_interface_index
= 0;
7166 struct rtentry
*rt
= NULL
;
7167 if (!no_local_addr
&& (client_flags
& NECP_CLIENT_PARAMETER_FLAG_LISTENER
) != 0) {
7168 // Treat the output bound interface as the routed interface for local address
7169 // validation later.
7170 returned_result
->routed_interface_index
= output_bound_interface
;
7172 if (no_remote_addr
) {
7173 memset(&remote_addr
, 0, sizeof(remote_addr
));
7174 if (remote_family
== AF_INET6
) {
7175 // Reset address to ::
7176 remote_addr
.sa
.sa_family
= AF_INET6
;
7177 remote_addr
.sa
.sa_len
= sizeof(struct sockaddr_in6
);
7179 // Reset address to 0.0.0.0
7180 remote_addr
.sa
.sa_family
= AF_INET
;
7181 remote_addr
.sa
.sa_len
= sizeof(struct sockaddr_in
);
7185 rt
= rtalloc1_scoped((struct sockaddr
*)&remote_addr
, 0, 0,
7186 output_bound_interface
);
7188 if (remote_addr
.sa
.sa_family
== AF_INET
&& rt
!= NULL
&&
7189 IS_INTF_CLAT46(rt
->rt_ifp
)) {
7192 returned_result
->routed_interface_index
= 0;
7195 if (no_remote_addr
&& remote_family
== AF_UNSPEC
&&
7196 (rt
== NULL
|| rt
->rt_ifp
== NULL
)) {
7197 // Route lookup for default IPv4 failed, try IPv6
7199 // Cleanup old route if necessary
7205 // Reset address to ::
7206 memset(&remote_addr
, 0, sizeof(remote_addr
));
7207 remote_addr
.sa
.sa_family
= AF_INET6
;
7208 remote_addr
.sa
.sa_len
= sizeof(struct sockaddr_in6
);
7211 rt
= rtalloc1_scoped((struct sockaddr
*)&remote_addr
, 0, 0,
7212 output_bound_interface
);
7216 rt
->rt_ifp
!= NULL
) {
7217 returned_result
->routed_interface_index
= rt
->rt_ifp
->if_index
;
7219 * For local addresses, we allow the interface scope to be
7220 * either the loopback interface or the interface hosting the
7223 if (bound_interface_index
!= IFSCOPE_NONE
&&
7224 rt
->rt_ifa
!= NULL
&& rt
->rt_ifa
->ifa_ifp
&&
7225 (output_bound_interface
== lo_ifp
->if_index
||
7226 rt
->rt_ifp
->if_index
== lo_ifp
->if_index
||
7227 rt
->rt_ifa
->ifa_ifp
->if_index
== bound_interface_index
)) {
7228 struct sockaddr_storage dst
;
7229 unsigned int ifscope
= bound_interface_index
;
7232 * Transform dst into the internal routing table form
7234 (void) sa_copy((struct sockaddr
*)&remote_addr
,
7237 if ((rt
->rt_ifp
->if_index
== lo_ifp
->if_index
) ||
7238 rt_ifa_is_dst((struct sockaddr
*)&dst
, rt
->rt_ifa
)) {
7239 returned_result
->routed_interface_index
=
7240 bound_interface_index
;
7246 if (returned_result
->routed_interface_index
!= 0 &&
7247 returned_result
->routed_interface_index
!= lo_ifp
->if_index
&& // Loopback can accept any local address
7249 // Transform local_addr into the ifaddr form
7250 // IPv6 Scope IDs are always embedded in the ifaddr list
7251 struct sockaddr_storage local_address_sanitized
;
7252 u_int ifscope
= IFSCOPE_NONE
;
7253 (void)sa_copy(&local_addr
.sa
, &local_address_sanitized
, &ifscope
);
7254 SIN(&local_address_sanitized
)->sin_port
= 0;
7255 if (local_address_sanitized
.ss_family
== AF_INET6
) {
7256 SIN6(&local_address_sanitized
)->sin6_scope_id
= 0;
7259 // Validate local address on routed interface
7260 struct ifaddr
*ifa
= ifa_ifwithaddr_scoped((struct sockaddr
*)&local_address_sanitized
, returned_result
->routed_interface_index
);
7262 // Interface address not found, reject route
7263 returned_result
->routed_interface_index
= 0;
7269 ifaddr_release(ifa
);
7274 if (flags
!= NULL
) {
7275 if ((client_flags
& NECP_CLIENT_PARAMETER_FLAG_LISTENER
) == 0) {
7276 // Check for local/direct
7277 bool is_local
= FALSE
;
7278 if (rt
!= NULL
&& (rt
->rt_flags
& RTF_LOCAL
)) {
7280 } else if (returned_result
->routed_interface_index
!= 0 &&
7282 // Clean up the address before comparison with interface addresses
7284 // Transform remote_addr into the ifaddr form
7285 // IPv6 Scope IDs are always embedded in the ifaddr list
7286 struct sockaddr_storage remote_address_sanitized
;
7287 u_int ifscope
= IFSCOPE_NONE
;
7288 (void)sa_copy(&remote_addr
.sa
, &remote_address_sanitized
, &ifscope
);
7289 SIN(&remote_address_sanitized
)->sin_port
= 0;
7290 if (remote_address_sanitized
.ss_family
== AF_INET6
) {
7291 SIN6(&remote_address_sanitized
)->sin6_scope_id
= 0;
7294 // Check if remote address is an interface address
7295 struct ifaddr
*ifa
= ifa_ifwithaddr((struct sockaddr
*)&remote_address_sanitized
);
7296 if (ifa
!= NULL
&& ifa
->ifa_ifp
!= NULL
) {
7297 u_int if_index_for_remote_addr
= ifa
->ifa_ifp
->if_index
;
7298 if (if_index_for_remote_addr
== returned_result
->routed_interface_index
||
7299 if_index_for_remote_addr
== lo_ifp
->if_index
) {
7304 ifaddr_release(ifa
);
7310 *flags
|= (NECP_CLIENT_RESULT_FLAG_IS_LOCAL
| NECP_CLIENT_RESULT_FLAG_IS_DIRECT
);
7313 !(rt
->rt_flags
& RTF_GATEWAY
) &&
7314 (rt
->rt_ifa
&& rt
->rt_ifa
->ifa_ifp
&& !(rt
->rt_ifa
->ifa_ifp
->if_flags
& IFF_POINTOPOINT
))) {
7315 // Route is directly accessible
7316 *flags
|= NECP_CLIENT_RESULT_FLAG_IS_DIRECT
;
7321 rt
->rt_ifp
!= NULL
) {
7322 // Check probe status
7323 if (rt
->rt_ifp
->if_eflags
& IFEF_PROBE_CONNECTIVITY
) {
7324 *flags
|= NECP_CLIENT_RESULT_FLAG_PROBE_CONNECTIVITY
;
7327 if (rt
->rt_ifp
->if_type
== IFT_CELLULAR
) {
7328 struct if_cellular_status_v1
*ifsr
;
7330 ifnet_lock_shared(rt
->rt_ifp
);
7331 lck_rw_lock_exclusive(&rt
->rt_ifp
->if_link_status_lock
);
7333 if (rt
->rt_ifp
->if_link_status
!= NULL
) {
7334 ifsr
= &rt
->rt_ifp
->if_link_status
->ifsr_u
.ifsr_cell
.if_cell_u
.if_status_v1
;
7336 if (ifsr
->valid_bitmask
& IF_CELL_UL_MSS_RECOMMENDED_VALID
) {
7337 if (ifsr
->mss_recommended
== IF_CELL_UL_MSS_RECOMMENDED_NONE
) {
7338 returned_result
->mss_recommended
= NECP_CLIENT_RESULT_RECOMMENDED_MSS_NONE
;
7339 } else if (ifsr
->mss_recommended
== IF_CELL_UL_MSS_RECOMMENDED_MEDIUM
) {
7340 returned_result
->mss_recommended
= NECP_CLIENT_RESULT_RECOMMENDED_MSS_MEDIUM
;
7341 } else if (ifsr
->mss_recommended
== IF_CELL_UL_MSS_RECOMMENDED_LOW
) {
7342 returned_result
->mss_recommended
= NECP_CLIENT_RESULT_RECOMMENDED_MSS_LOW
;
7346 lck_rw_done(&rt
->rt_ifp
->if_link_status_lock
);
7347 ifnet_lock_done(rt
->rt_ifp
);
7350 // Check link quality
7351 if ((client_flags
& NECP_CLIENT_PARAMETER_FLAG_DISCRETIONARY
) &&
7352 (rt
->rt_ifp
->if_interface_state
.valid_bitmask
& IF_INTERFACE_STATE_LQM_STATE_VALID
) &&
7353 rt
->rt_ifp
->if_interface_state
.lqm_state
== IFNET_LQM_THRESH_ABORT
) {
7354 *flags
|= NECP_CLIENT_RESULT_FLAG_LINK_QUALITY_ABORT
;
7357 // Check QoS marking (fastlane)
7358 for (size_t route_rule_index
= 0; route_rule_index
< route_rule_id_array_count
; route_rule_index
++) {
7359 if (necp_update_qos_marking(rt
->rt_ifp
, route_rule_id_array
[route_rule_index
])) {
7360 *flags
|= NECP_CLIENT_RESULT_FLAG_ALLOW_QOS_MARKING
;
7361 // If the route can use QoS markings, stop iterating route rules
7366 if (IFNET_IS_LOW_POWER(rt
->rt_ifp
)) {
7367 *flags
|= NECP_CLIENT_RESULT_FLAG_INTERFACE_LOW_POWER
;
7370 if (traffic_class
== SO_TC_BK_SYS
) {
7371 // Block BK_SYS traffic if interface is throttled
7372 u_int32_t throttle_level
= 0;
7373 if (ifnet_get_throttle(rt
->rt_ifp
, &throttle_level
) == 0) {
7374 if (throttle_level
== IFNET_THROTTLE_OPPORTUNISTIC
) {
7375 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7376 memset(&returned_result
->routing_result_parameter
, 0, sizeof(returned_result
->routing_result_parameter
));
7383 if (returned_result
->routed_interface_index
!= 0) {
7384 union necp_sockaddr_union default_address
;
7385 struct rtentry
*v4Route
= NULL
;
7386 struct rtentry
*v6Route
= NULL
;
7388 memset(&default_address
, 0, sizeof(default_address
));
7390 // Reset address to 0.0.0.0
7391 default_address
.sa
.sa_family
= AF_INET
;
7392 default_address
.sa
.sa_len
= sizeof(struct sockaddr_in
);
7393 v4Route
= rtalloc1_scoped((struct sockaddr
*)&default_address
, 0, 0,
7394 returned_result
->routed_interface_index
);
7396 // Reset address to ::
7397 default_address
.sa
.sa_family
= AF_INET6
;
7398 default_address
.sa
.sa_len
= sizeof(struct sockaddr_in6
);
7399 v6Route
= rtalloc1_scoped((struct sockaddr
*)&default_address
, 0, 0,
7400 returned_result
->routed_interface_index
);
7402 if (v4Route
!= NULL
) {
7403 if (v4Route
->rt_ifp
!= NULL
&& !IS_INTF_CLAT46(v4Route
->rt_ifp
)) {
7404 *flags
|= NECP_CLIENT_RESULT_FLAG_HAS_IPV4
;
7406 if (returned_v4_gateway
!= NULL
&&
7407 v4Route
->rt_gateway
!= NULL
&&
7408 v4Route
->rt_gateway
->sa_len
== sizeof(returned_v4_gateway
->u
.sin
)) {
7409 memcpy(&returned_v4_gateway
->u
.sin
, v4Route
->rt_gateway
, sizeof(returned_v4_gateway
->u
.sin
));
7410 memset(&returned_v4_gateway
->u
.sin
.sin_zero
, 0, sizeof(returned_v4_gateway
->u
.sin
.sin_zero
));
7416 if (v6Route
!= NULL
) {
7417 if (v6Route
->rt_ifp
!= NULL
) {
7418 *flags
|= NECP_CLIENT_RESULT_FLAG_HAS_IPV6
;
7420 if (ifnet_get_nat64prefix(v6Route
->rt_ifp
, NULL
) == 0) {
7421 *flags
|= NECP_CLIENT_RESULT_FLAG_HAS_NAT64
;
7424 if (returned_v6_gateway
!= NULL
&&
7425 v6Route
->rt_gateway
!= NULL
&&
7426 v6Route
->rt_gateway
->sa_len
== sizeof(returned_v6_gateway
->u
.sin6
)) {
7427 memcpy(&returned_v6_gateway
->u
.sin6
, v6Route
->rt_gateway
, sizeof(returned_v6_gateway
->u
.sin6
));
7435 for (size_t route_rule_index
= 0; route_rule_index
< route_rule_id_array_count
; route_rule_index
++) {
7436 u_int32_t interface_type_denied
= IFRTYPE_FUNCTIONAL_UNKNOWN
;
7437 bool route_is_allowed
= necp_route_is_allowed(rt
, NULL
, route_rule_id_array
[route_rule_index
], &interface_type_denied
);
7438 if (!route_is_allowed
) {
7439 // If the route is blocked, treat the lookup as a drop
7440 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7441 memset(&returned_result
->routing_result_parameter
, 0, sizeof(returned_result
->routing_result_parameter
));
7443 if (interface_type_denied
!= IFRTYPE_FUNCTIONAL_UNKNOWN
) {
7444 if (reason
!= NULL
) {
7445 if (interface_type_denied
== IFRTYPE_FUNCTIONAL_CELLULAR
) {
7446 *reason
= NECP_CLIENT_RESULT_REASON_CELLULAR_DENIED
;
7447 } else if (interface_type_denied
== IFRTYPE_FUNCTIONAL_WIFI_INFRA
) {
7448 *reason
= NECP_CLIENT_RESULT_REASON_WIFI_DENIED
;
7451 necp_send_application_interface_denied_event(pid
, application_uuid
, interface_type_denied
);
7453 // If the route gets denied, stop matching rules
7458 if (rt
!= NULL
&& rt
->rt_ifp
!= NULL
) {
7459 const bool expensive_prohibited
= ((client_flags
& NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_EXPENSIVE
) &&
7460 IFNET_IS_EXPENSIVE(rt
->rt_ifp
));
7461 const bool constrained_prohibited
= ((client_flags
& NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_CONSTRAINED
) &&
7462 IFNET_IS_CONSTRAINED(rt
->rt_ifp
));
7463 if (reason
!= NULL
) {
7464 if (expensive_prohibited
) {
7465 *reason
= NECP_CLIENT_RESULT_REASON_EXPENSIVE_PROHIBITED
;
7466 } else if (constrained_prohibited
) {
7467 *reason
= NECP_CLIENT_RESULT_REASON_CONSTRAINED_PROHIBITED
;
7470 if (expensive_prohibited
|| constrained_prohibited
) {
7471 // If the client flags prohibited a property of the interface, treat it as a drop
7472 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7473 memset(&returned_result
->routing_result_parameter
, 0, sizeof(returned_result
->routing_result_parameter
));
7478 if (returned_route
!= NULL
) {
7479 *returned_route
= rt
;
7486 lck_rw_done(&necp_kernel_policy_lock
);
7492 necp_is_route_local(union necp_sockaddr_union
*remote_addr
)
7494 bool no_remote_addr
= FALSE
;
7495 u_int8_t remote_family
= 0;
7496 struct rtentry
*rt
= NULL
;
7497 bool is_local
= FALSE
;
7499 if (remote_addr
== NULL
) {
7503 if (remote_addr
->sa
.sa_len
== 0 ||
7504 (remote_addr
->sa
.sa_family
== AF_INET
&& remote_addr
->sin
.sin_addr
.s_addr
== 0) ||
7505 (remote_addr
->sa
.sa_family
== AF_INET6
&& IN6_IS_ADDR_UNSPECIFIED(&remote_addr
->sin6
.sin6_addr
))) {
7506 no_remote_addr
= TRUE
;
7507 remote_family
= remote_addr
->sa
.sa_family
;
7510 if (no_remote_addr
) {
7511 memset(remote_addr
, 0, sizeof(union necp_sockaddr_union
));
7512 if (remote_family
== AF_INET6
) {
7513 // Reset address to ::
7514 remote_addr
->sa
.sa_family
= AF_INET6
;
7515 remote_addr
->sa
.sa_len
= sizeof(struct sockaddr_in6
);
7517 // Reset address to 0.0.0.0
7518 remote_addr
->sa
.sa_family
= AF_INET
;
7519 remote_addr
->sa
.sa_len
= sizeof(struct sockaddr_in
);
7523 // Lookup route regardless of the scoped interface to check if
7524 // remote address is in a local network.
7525 rt
= rtalloc1_scoped((struct sockaddr
*)remote_addr
, 0, 0, 0);
7530 if (remote_addr
->sa
.sa_family
== AF_INET
&& IS_INTF_CLAT46(rt
->rt_ifp
)) {
7533 is_local
= IS_NECP_DEST_IN_LOCAL_NETWORKS(rt
);
7543 necp_socket_check_policy(struct necp_kernel_socket_policy
*kernel_policy
, necp_app_id app_id
, necp_app_id real_app_id
, errno_t cred_result
, u_int32_t account_id
, struct substring domain
, u_int8_t domain_dot_count
, pid_t pid
, uid_t uid
, u_int32_t bound_interface_index
, u_int32_t traffic_class
, u_int16_t protocol
, union necp_sockaddr_union
*local
, union necp_sockaddr_union
*remote
, struct necp_client_parameter_netagent_type
*required_agent_types
, u_int32_t num_required_agent_types
, bool has_client
, uint32_t client_flags
, int is_platform_binary
, proc_t proc
, struct rtentry
*rt
)
7545 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
)) {
7546 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
7547 u_int32_t cond_bound_interface_index
= kernel_policy
->cond_bound_interface
? kernel_policy
->cond_bound_interface
->if_index
: 0;
7548 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
7549 if (bound_interface_index
== cond_bound_interface_index
) {
7550 // No match, matches forbidden interface
7554 if (bound_interface_index
!= cond_bound_interface_index
) {
7555 // No match, does not match required interface
7560 if (bound_interface_index
!= 0) {
7561 // No match, requires a non-bound packet
7567 if (kernel_policy
->condition_mask
== 0) {
7571 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
7572 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
7573 if (app_id
== kernel_policy
->cond_app_id
) {
7574 // No match, matches forbidden application
7578 if (app_id
!= kernel_policy
->cond_app_id
) {
7579 // No match, does not match required application
7585 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
7586 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
7587 if (real_app_id
== kernel_policy
->cond_real_app_id
) {
7588 // No match, matches forbidden application
7592 if (real_app_id
!= kernel_policy
->cond_real_app_id
) {
7593 // No match, does not match required application
7599 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_HAS_CLIENT
) {
7605 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
) {
7606 if (cred_result
!= 0) {
7607 // Process is missing entitlement
7612 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PLATFORM_BINARY
) {
7613 if (is_platform_binary
== 0) {
7614 // Process is not platform binary
7619 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
) {
7620 if (kernel_policy
->cond_custom_entitlement_matched
== necp_boolean_state_false
) {
7621 // Process is missing entitlement based on previous check
7623 } else if (kernel_policy
->cond_custom_entitlement_matched
== necp_boolean_state_unknown
) {
7624 if (kernel_policy
->cond_custom_entitlement
!= NULL
) {
7626 // No process found, cannot check entitlement
7629 task_t task
= proc_task(proc
);
7631 !IOTaskHasEntitlement(task
, kernel_policy
->cond_custom_entitlement
)) {
7632 // Process is missing custom entitlement
7633 kernel_policy
->cond_custom_entitlement_matched
= necp_boolean_state_false
;
7636 kernel_policy
->cond_custom_entitlement_matched
= necp_boolean_state_true
;
7642 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
7643 bool domain_matches
= necp_hostname_matches_domain(domain
, domain_dot_count
, kernel_policy
->cond_domain
, kernel_policy
->cond_domain_dot_count
);
7644 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
7645 if (domain_matches
) {
7646 // No match, matches forbidden domain
7650 if (!domain_matches
) {
7651 // No match, does not match required domain
7657 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
) {
7658 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
) {
7659 if (account_id
== kernel_policy
->cond_account_id
) {
7660 // No match, matches forbidden account
7664 if (account_id
!= kernel_policy
->cond_account_id
) {
7665 // No match, does not match required account
7671 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PID
) {
7672 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_PID
) {
7673 if (pid
== kernel_policy
->cond_pid
) {
7674 // No match, matches forbidden pid
7678 if (pid
!= kernel_policy
->cond_pid
) {
7679 // No match, does not match required pid
7685 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_UID
) {
7686 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_UID
) {
7687 if (uid
== kernel_policy
->cond_uid
) {
7688 // No match, matches forbidden uid
7692 if (uid
!= kernel_policy
->cond_uid
) {
7693 // No match, does not match required uid
7699 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
7700 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
7701 if (traffic_class
>= kernel_policy
->cond_traffic_class
.start_tc
&&
7702 traffic_class
<= kernel_policy
->cond_traffic_class
.end_tc
) {
7703 // No match, matches forbidden traffic class
7707 if (traffic_class
< kernel_policy
->cond_traffic_class
.start_tc
||
7708 traffic_class
> kernel_policy
->cond_traffic_class
.end_tc
) {
7709 // No match, does not match required traffic class
7715 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
7716 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
7717 if (protocol
== kernel_policy
->cond_protocol
) {
7718 // No match, matches forbidden protocol
7722 if (protocol
!= kernel_policy
->cond_protocol
) {
7723 // No match, does not match required protocol
7729 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_AGENT_TYPE
) {
7730 bool matches_agent_type
= FALSE
;
7731 for (u_int32_t i
= 0; i
< num_required_agent_types
; i
++) {
7732 struct necp_client_parameter_netagent_type
*required_agent_type
= &required_agent_types
[i
];
7733 if ((strlen(kernel_policy
->cond_agent_type
.agent_domain
) == 0 ||
7734 strncmp(required_agent_type
->netagent_domain
, kernel_policy
->cond_agent_type
.agent_domain
, NETAGENT_DOMAINSIZE
) == 0) &&
7735 (strlen(kernel_policy
->cond_agent_type
.agent_type
) == 0 ||
7736 strncmp(required_agent_type
->netagent_type
, kernel_policy
->cond_agent_type
.agent_type
, NETAGENT_TYPESIZE
) == 0)) {
7737 // Found a required agent that matches
7738 matches_agent_type
= TRUE
;
7742 if (!matches_agent_type
) {
7747 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_NETWORKS
) {
7748 bool is_local
= FALSE
;
7751 is_local
= IS_NECP_DEST_IN_LOCAL_NETWORKS(rt
);
7753 is_local
= necp_is_route_local(remote
);
7757 // Either no route to validate or no match for local networks
7762 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
7763 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
7764 bool inRange
= necp_is_addr_in_range((struct sockaddr
*)local
, (struct sockaddr
*)&kernel_policy
->cond_local_start
, (struct sockaddr
*)&kernel_policy
->cond_local_end
);
7765 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
7774 } else if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
7775 bool inSubnet
= necp_is_addr_in_subnet((struct sockaddr
*)local
, (struct sockaddr
*)&kernel_policy
->cond_local_start
, kernel_policy
->cond_local_prefix
);
7776 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
7788 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
7789 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
7790 bool inRange
= necp_is_addr_in_range((struct sockaddr
*)remote
, (struct sockaddr
*)&kernel_policy
->cond_remote_start
, (struct sockaddr
*)&kernel_policy
->cond_remote_end
);
7791 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
7800 } else if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
7801 bool inSubnet
= necp_is_addr_in_subnet((struct sockaddr
*)remote
, (struct sockaddr
*)&kernel_policy
->cond_remote_start
, kernel_policy
->cond_remote_prefix
);
7802 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
7814 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_CLIENT_FLAGS
) {
7815 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_CLIENT_FLAGS
) {
7816 if ((client_flags
& kernel_policy
->cond_client_flags
) == kernel_policy
->cond_client_flags
) {
7817 // Flags do match, and condition is negative, fail.
7821 if ((client_flags
& kernel_policy
->cond_client_flags
) != kernel_policy
->cond_client_flags
) {
7822 // Flags do not match, fail.
7828 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_EMPTY
) {
7829 bool isEmpty
= necp_addr_is_empty((struct sockaddr
*)local
);
7830 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_LOCAL_EMPTY
) {
7841 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_EMPTY
) {
7842 bool isEmpty
= necp_addr_is_empty((struct sockaddr
*)remote
);
7843 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REMOTE_EMPTY
) {
7857 static inline u_int32_t
7858 necp_socket_calc_flowhash_locked(struct necp_socket_info
*info
)
7860 return net_flowhash(info
, sizeof(*info
), necp_kernel_socket_policies_gencount
);
7864 necp_socket_fillout_info_locked(struct inpcb
*inp
, struct sockaddr
*override_local_addr
, struct sockaddr
*override_remote_addr
, u_int32_t override_bound_interface
, u_int32_t drop_order
, struct necp_socket_info
*info
)
7866 struct socket
*so
= NULL
;
7868 memset(info
, 0, sizeof(struct necp_socket_info
));
7870 so
= inp
->inp_socket
;
7872 info
->drop_order
= drop_order
;
7874 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_PID
) {
7875 info
->pid
= ((so
->so_flags
& SOF_DELEGATED
) ? so
->e_pid
: so
->last_pid
);
7878 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_UID
) {
7879 info
->uid
= kauth_cred_getuid(so
->so_cred
);
7882 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
7883 info
->traffic_class
= so
->so_traffic_class
;
7886 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_HAS_CLIENT
) {
7887 info
->has_client
= !uuid_is_null(inp
->necp_client_uuid
);
7890 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_CLIENT_FLAGS
) {
7891 info
->client_flags
= 0;
7892 if (INP_NO_CONSTRAINED(inp
)) {
7893 info
->client_flags
|= NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_CONSTRAINED
;
7895 if (INP_NO_EXPENSIVE(inp
)) {
7896 info
->client_flags
|= NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_EXPENSIVE
;
7898 if (inp
->inp_socket
->so_flags1
& SOF1_CELLFALLBACK
) {
7899 info
->client_flags
|= NECP_CLIENT_PARAMETER_FLAG_FALLBACK_TRAFFIC
;
7901 if (inp
->inp_socket
->so_flags1
& SOF1_INBOUND
) {
7902 info
->client_flags
|= NECP_CLIENT_PARAMETER_FLAG_INBOUND
;
7904 if (inp
->inp_socket
->so_options
& SO_ACCEPTCONN
||
7905 inp
->inp_flags2
& INP2_EXTERNAL_PORT
) {
7906 info
->client_flags
|= NECP_CLIENT_PARAMETER_FLAG_LISTENER
;
7910 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
7911 if (inp
->inp_ip_p
) {
7912 info
->protocol
= inp
->inp_ip_p
;
7914 info
->protocol
= SOCK_PROTO(so
);
7918 if (inp
->inp_flags2
& INP2_WANT_APP_POLICY
&& necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
7919 struct necp_uuid_id_mapping
*existing_mapping
= necp_uuid_lookup_app_id_locked(((so
->so_flags
& SOF_DELEGATED
) ? so
->e_uuid
: so
->last_uuid
));
7920 if (existing_mapping
) {
7921 info
->application_id
= existing_mapping
->id
;
7924 if (!(so
->so_flags
& SOF_DELEGATED
)) {
7925 info
->real_application_id
= info
->application_id
;
7926 } else if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
7927 struct necp_uuid_id_mapping
*real_existing_mapping
= necp_uuid_lookup_app_id_locked(so
->last_uuid
);
7928 if (real_existing_mapping
) {
7929 info
->real_application_id
= real_existing_mapping
->id
;
7933 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
) {
7934 info
->cred_result
= priv_check_cred(so
->so_cred
, PRIV_NET_PRIVILEGED_NECP_MATCH
, 0);
7935 if (info
->cred_result
!= 0) {
7936 // Process does not have entitlement, check the parent process
7937 necp_get_parent_cred_result(NULL
, info
);
7941 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_PLATFORM_BINARY
) {
7942 info
->is_platform_binary
= csproc_get_platform_binary(current_proc()) ? true : false;
7946 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
&& inp
->inp_necp_attributes
.inp_account
!= NULL
) {
7947 struct necp_string_id_mapping
*existing_mapping
= necp_lookup_string_to_id_locked(&necp_account_id_list
, inp
->inp_necp_attributes
.inp_account
);
7948 if (existing_mapping
) {
7949 info
->account_id
= existing_mapping
->id
;
7953 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
7954 info
->domain
= inp
->inp_necp_attributes
.inp_domain
;
7957 if (override_bound_interface
) {
7958 info
->bound_interface_index
= override_bound_interface
;
7960 if ((inp
->inp_flags
& INP_BOUND_IF
) && inp
->inp_boundifp
) {
7961 info
->bound_interface_index
= inp
->inp_boundifp
->if_index
;
7965 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_ADDRESS_TYPE_CONDITIONS
) {
7966 if (override_local_addr
!= NULL
) {
7967 if (override_local_addr
->sa_family
== AF_INET6
&& override_local_addr
->sa_len
<= sizeof(struct sockaddr_in6
)) {
7968 memcpy(&info
->local_addr
, override_local_addr
, override_local_addr
->sa_len
);
7969 if (IN6_IS_ADDR_V4MAPPED(&(info
->local_addr
.sin6
.sin6_addr
))) {
7970 struct sockaddr_in sin
;
7971 in6_sin6_2_sin(&sin
, &(info
->local_addr
.sin6
));
7972 memset(&info
->local_addr
, 0, sizeof(union necp_sockaddr_union
));
7973 memcpy(&info
->local_addr
, &sin
, sin
.sin_len
);
7975 } else if (override_local_addr
->sa_family
== AF_INET
&& override_local_addr
->sa_len
<= sizeof(struct sockaddr_in
)) {
7976 memcpy(&info
->local_addr
, override_local_addr
, override_local_addr
->sa_len
);
7979 if (inp
->inp_vflag
& INP_IPV4
) {
7980 ((struct sockaddr_in
*)&info
->local_addr
)->sin_family
= AF_INET
;
7981 ((struct sockaddr_in
*)&info
->local_addr
)->sin_len
= sizeof(struct sockaddr_in
);
7982 ((struct sockaddr_in
*)&info
->local_addr
)->sin_port
= inp
->inp_lport
;
7983 memcpy(&((struct sockaddr_in
*)&info
->local_addr
)->sin_addr
, &inp
->inp_laddr
, sizeof(struct in_addr
));
7984 } else if (inp
->inp_vflag
& INP_IPV6
) {
7985 ((struct sockaddr_in6
*)&info
->local_addr
)->sin6_family
= AF_INET6
;
7986 ((struct sockaddr_in6
*)&info
->local_addr
)->sin6_len
= sizeof(struct sockaddr_in6
);
7987 ((struct sockaddr_in6
*)&info
->local_addr
)->sin6_port
= inp
->inp_lport
;
7988 memcpy(&((struct sockaddr_in6
*)&info
->local_addr
)->sin6_addr
, &inp
->in6p_laddr
, sizeof(struct in6_addr
));
7992 if (override_remote_addr
!= NULL
) {
7993 if (override_remote_addr
->sa_family
== AF_INET6
&& override_remote_addr
->sa_len
<= sizeof(struct sockaddr_in6
)) {
7994 memcpy(&info
->remote_addr
, override_remote_addr
, override_remote_addr
->sa_len
);
7995 if (IN6_IS_ADDR_V4MAPPED(&(info
->remote_addr
.sin6
.sin6_addr
))) {
7996 struct sockaddr_in sin
;
7997 in6_sin6_2_sin(&sin
, &(info
->remote_addr
.sin6
));
7998 memset(&info
->remote_addr
, 0, sizeof(union necp_sockaddr_union
));
7999 memcpy(&info
->remote_addr
, &sin
, sin
.sin_len
);
8001 } else if (override_remote_addr
->sa_family
== AF_INET
&& override_remote_addr
->sa_len
<= sizeof(struct sockaddr_in
)) {
8002 memcpy(&info
->remote_addr
, override_remote_addr
, override_remote_addr
->sa_len
);
8005 if (inp
->inp_vflag
& INP_IPV4
) {
8006 ((struct sockaddr_in
*)&info
->remote_addr
)->sin_family
= AF_INET
;
8007 ((struct sockaddr_in
*)&info
->remote_addr
)->sin_len
= sizeof(struct sockaddr_in
);
8008 ((struct sockaddr_in
*)&info
->remote_addr
)->sin_port
= inp
->inp_fport
;
8009 memcpy(&((struct sockaddr_in
*)&info
->remote_addr
)->sin_addr
, &inp
->inp_faddr
, sizeof(struct in_addr
));
8010 } else if (inp
->inp_vflag
& INP_IPV6
) {
8011 ((struct sockaddr_in6
*)&info
->remote_addr
)->sin6_family
= AF_INET6
;
8012 ((struct sockaddr_in6
*)&info
->remote_addr
)->sin6_len
= sizeof(struct sockaddr_in6
);
8013 ((struct sockaddr_in6
*)&info
->remote_addr
)->sin6_port
= inp
->inp_fport
;
8014 memcpy(&((struct sockaddr_in6
*)&info
->remote_addr
)->sin6_addr
, &inp
->in6p_faddr
, sizeof(struct in6_addr
));
8020 static inline struct necp_kernel_socket_policy
*
8021 necp_socket_find_policy_match_with_info_locked(struct necp_kernel_socket_policy
**policy_search_array
, struct necp_socket_info
*info
,
8022 necp_kernel_policy_filter
*return_filter
,
8023 u_int32_t
*return_route_rule_id_array
, size_t *return_route_rule_id_array_count
, size_t route_rule_id_array_count
,
8024 necp_kernel_policy_result
*return_service_action
, necp_kernel_policy_service
*return_service
,
8025 u_int32_t
*return_netagent_array
, u_int32_t
*return_netagent_use_flags_array
, size_t netagent_array_count
,
8026 struct necp_client_parameter_netagent_type
*required_agent_types
,
8027 u_int32_t num_required_agent_types
, proc_t proc
, necp_kernel_policy_id
*skip_policy_id
, struct rtentry
*rt
,
8028 necp_kernel_policy_result
*return_drop_dest_policy_result
, necp_drop_all_bypass_check_result_t
*return_drop_all_bypass
)
8030 struct necp_kernel_socket_policy
*matched_policy
= NULL
;
8031 u_int32_t skip_order
= 0;
8032 u_int32_t skip_session_order
= 0;
8033 size_t route_rule_id_count
= 0;
8035 size_t netagent_cursor
= 0;
8036 necp_drop_all_bypass_check_result_t drop_all_bypass
= NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
;
8037 if (return_drop_all_bypass
!= NULL
) {
8038 *return_drop_all_bypass
= drop_all_bypass
;
8041 // Pre-process domain for quick matching
8042 struct substring domain_substring
= necp_trim_dots_and_stars(info
->domain
, info
->domain
? strlen(info
->domain
) : 0);
8043 u_int8_t domain_dot_count
= necp_count_dots(domain_substring
.string
, domain_substring
.length
);
8045 if (return_filter
!= NULL
) {
8049 if (return_route_rule_id_array_count
!= NULL
) {
8050 *return_route_rule_id_array_count
= 0;
8053 if (return_service_action
!= NULL
) {
8054 *return_service_action
= 0;
8057 if (return_service
!= NULL
) {
8058 return_service
->identifier
= 0;
8059 return_service
->data
= 0;
8062 // Do not subject layer-2 filter to NECP policies, return a PASS policy
8063 if (necp_pass_interpose
> 0 && info
->client_flags
& NECP_CLIENT_PARAMETER_FLAG_INTERPOSE
) {
8064 return &pass_policy
;
8067 *return_drop_dest_policy_result
= NECP_KERNEL_POLICY_RESULT_NONE
;
8069 if (policy_search_array
!= NULL
) {
8070 for (i
= 0; policy_search_array
[i
] != NULL
; i
++) {
8071 if (necp_drop_all_order
!= 0 && policy_search_array
[i
]->session_order
>= necp_drop_all_order
) {
8072 // We've hit a drop all rule
8073 if (drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
) {
8074 drop_all_bypass
= necp_check_drop_all_bypass_result(proc
);
8075 if (return_drop_all_bypass
!= NULL
) {
8076 *return_drop_all_bypass
= drop_all_bypass
;
8079 if (drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_FALSE
) {
8083 if (necp_drop_dest_policy
.entry_count
!= 0 &&
8084 necp_address_matches_drop_dest_policy(&info
->remote_addr
, policy_search_array
[i
]->session_order
)) {
8085 // We've hit a drop by destination address rule
8086 *return_drop_dest_policy_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
8089 if (info
->drop_order
!= 0 && policy_search_array
[i
]->session_order
>= info
->drop_order
) {
8090 // We've hit a drop order for this socket
8093 if (skip_session_order
&& policy_search_array
[i
]->session_order
>= skip_session_order
) {
8096 skip_session_order
= 0;
8099 if (policy_search_array
[i
]->order
< skip_order
) {
8105 skip_session_order
= 0;
8107 } else if (skip_session_order
) {
8112 if (necp_socket_check_policy(policy_search_array
[i
], info
->application_id
, info
->real_application_id
, info
->cred_result
, info
->account_id
, domain_substring
, domain_dot_count
, info
->pid
, info
->uid
, info
->bound_interface_index
, info
->traffic_class
, info
->protocol
, &info
->local_addr
, &info
->remote_addr
, required_agent_types
, num_required_agent_types
, info
->has_client
, info
->client_flags
, info
->is_platform_binary
, proc
, rt
)) {
8113 if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER
) {
8114 if (return_filter
&& *return_filter
!= NECP_FILTER_UNIT_NO_FILTER
) {
8115 necp_kernel_policy_filter control_unit
= policy_search_array
[i
]->result_parameter
.filter_control_unit
;
8116 if (control_unit
== NECP_FILTER_UNIT_NO_FILTER
) {
8117 *return_filter
= control_unit
;
8119 *return_filter
|= control_unit
;
8121 if (necp_debug
> 1) {
8122 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Filter %d", info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
, policy_search_array
[i
]->result_parameter
.filter_control_unit
);
8126 } else if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_ROUTE_RULES
) {
8127 if (return_route_rule_id_array
&& route_rule_id_count
< route_rule_id_array_count
) {
8128 return_route_rule_id_array
[route_rule_id_count
++] = policy_search_array
[i
]->result_parameter
.route_rule_id
;
8129 if (necp_debug
> 1) {
8130 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Route Rule %d", info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
, policy_search_array
[i
]->result_parameter
.route_rule_id
);
8134 } else if (necp_kernel_socket_result_is_trigger_service_type(policy_search_array
[i
])) {
8135 if (return_service_action
&& *return_service_action
== 0) {
8136 *return_service_action
= policy_search_array
[i
]->result
;
8137 if (necp_debug
> 1) {
8138 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Service Action %d", info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
, policy_search_array
[i
]->result
);
8141 if (return_service
&& return_service
->identifier
== 0) {
8142 return_service
->identifier
= policy_search_array
[i
]->result_parameter
.service
.identifier
;
8143 return_service
->data
= policy_search_array
[i
]->result_parameter
.service
.data
;
8144 if (necp_debug
> 1) {
8145 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Service ID %d Data %d", info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
, policy_search_array
[i
]->result_parameter
.service
.identifier
, policy_search_array
[i
]->result_parameter
.service
.data
);
8149 } else if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_USE_NETAGENT
||
8150 policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_NETAGENT_SCOPED
) {
8151 if (return_netagent_array
!= NULL
&&
8152 netagent_cursor
< netagent_array_count
) {
8153 return_netagent_array
[netagent_cursor
] = policy_search_array
[i
]->result_parameter
.netagent_id
;
8154 if (return_netagent_use_flags_array
!= NULL
&&
8155 policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_NETAGENT_SCOPED
) {
8156 return_netagent_use_flags_array
[netagent_cursor
] |= NECP_AGENT_USE_FLAG_SCOPE
;
8159 if (necp_debug
> 1) {
8160 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) %s Netagent %d",
8161 info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
,
8162 policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_USE_NETAGENT
? "Use" : "Scope",
8163 policy_search_array
[i
]->result_parameter
.netagent_id
);
8169 // Matched policy is a skip. Do skip and continue.
8170 if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
8171 skip_order
= policy_search_array
[i
]->result_parameter
.skip_policy_order
;
8172 skip_session_order
= policy_search_array
[i
]->session_order
+ 1;
8173 if (skip_policy_id
) {
8174 *skip_policy_id
= policy_search_array
[i
]->id
;
8179 // Matched an allow unentitled, which clears any drop order
8180 if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_ALLOW_UNENTITLED
) {
8181 info
->drop_order
= 0;
8185 // Passed all tests, found a match
8186 matched_policy
= policy_search_array
[i
];
8192 if (return_route_rule_id_array_count
!= NULL
) {
8193 *return_route_rule_id_array_count
= route_rule_id_count
;
8195 return matched_policy
;
8199 necp_socket_uses_interface(struct inpcb
*inp
, u_int32_t interface_index
)
8201 bool found_match
= FALSE
;
8203 ifaddr_t
*addresses
= NULL
;
8204 union necp_sockaddr_union address_storage
;
8206 int family
= AF_INET
;
8207 ifnet_t interface
= ifindex2ifnet
[interface_index
];
8209 if (inp
== NULL
|| interface
== NULL
) {
8213 if (inp
->inp_vflag
& INP_IPV4
) {
8215 } else if (inp
->inp_vflag
& INP_IPV6
) {
8219 result
= ifnet_get_address_list_family(interface
, &addresses
, family
);
8221 NECPLOG(LOG_ERR
, "Failed to get address list for %s%d", ifnet_name(interface
), ifnet_unit(interface
));
8225 for (i
= 0; addresses
[i
] != NULL
; i
++) {
8226 if (ifaddr_address(addresses
[i
], &address_storage
.sa
, sizeof(address_storage
)) == 0) {
8227 if (family
== AF_INET
) {
8228 if (memcmp(&address_storage
.sin
.sin_addr
, &inp
->inp_laddr
, sizeof(inp
->inp_laddr
)) == 0) {
8232 } else if (family
== AF_INET6
) {
8233 if (memcmp(&address_storage
.sin6
.sin6_addr
, &inp
->in6p_laddr
, sizeof(inp
->in6p_laddr
)) == 0) {
8242 ifnet_free_address_list(addresses
);
8248 necp_socket_is_connected(struct inpcb
*inp
)
8250 return inp
->inp_socket
->so_state
& (SS_ISCONNECTING
| SS_ISCONNECTED
| SS_ISDISCONNECTING
);
8254 necp_socket_bypass(struct sockaddr
*override_local_addr
, struct sockaddr
*override_remote_addr
, struct inpcb
*inp
)
8256 if (necp_pass_loopback
> 0 && necp_is_loopback(override_local_addr
, override_remote_addr
, inp
, NULL
)) {
8258 } else if (necp_is_intcoproc(inp
, NULL
)) {
8265 necp_kernel_policy_id
8266 necp_socket_find_policy_match(struct inpcb
*inp
, struct sockaddr
*override_local_addr
, struct sockaddr
*override_remote_addr
, u_int32_t override_bound_interface
)
8268 struct socket
*so
= NULL
;
8269 necp_kernel_policy_filter filter_control_unit
= 0;
8270 struct necp_kernel_socket_policy
*matched_policy
= NULL
;
8271 necp_kernel_policy_id matched_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
8272 necp_kernel_policy_result service_action
= 0;
8273 necp_kernel_policy_service service
= { 0, 0 };
8274 u_int32_t drop_dest_policy_result
= NECP_KERNEL_POLICY_RESULT_NONE
;
8275 necp_drop_all_bypass_check_result_t drop_all_bypass
= NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
;
8277 u_int32_t netagent_ids
[NECP_MAX_NETAGENTS
];
8278 memset(&netagent_ids
, 0, sizeof(netagent_ids
));
8279 int netagent_cursor
;
8281 struct necp_socket_info info
;
8284 return NECP_KERNEL_POLICY_ID_NONE
;
8287 // Ignore invalid addresses
8288 if (override_local_addr
!= NULL
&&
8289 !necp_address_is_valid(override_local_addr
)) {
8290 override_local_addr
= NULL
;
8292 if (override_remote_addr
!= NULL
&&
8293 !necp_address_is_valid(override_remote_addr
)) {
8294 override_remote_addr
= NULL
;
8297 so
= inp
->inp_socket
;
8299 u_int32_t drop_order
= necp_process_drop_order(so
->so_cred
);
8301 // Don't lock. Possible race condition, but we don't want the performance hit.
8302 if (necp_kernel_socket_policies_count
== 0 ||
8303 (!(inp
->inp_flags2
& INP2_WANT_APP_POLICY
) && necp_kernel_socket_policies_non_app_count
== 0)) {
8304 if (necp_drop_all_order
> 0 || drop_order
> 0) {
8305 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8306 inp
->inp_policyresult
.skip_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8307 inp
->inp_policyresult
.policy_gencount
= 0;
8308 inp
->inp_policyresult
.app_id
= 0;
8309 inp
->inp_policyresult
.flowhash
= 0;
8310 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
8311 inp
->inp_policyresult
.results
.route_rule_id
= 0;
8312 if (necp_socket_bypass(override_local_addr
, override_remote_addr
, inp
)) {
8313 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_PASS
;
8315 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_DROP
;
8318 return NECP_KERNEL_POLICY_ID_NONE
;
8321 // Check for loopback exception
8322 if (necp_socket_bypass(override_local_addr
, override_remote_addr
, inp
)) {
8323 // Mark socket as a pass
8324 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8325 inp
->inp_policyresult
.skip_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8326 inp
->inp_policyresult
.policy_gencount
= 0;
8327 inp
->inp_policyresult
.app_id
= 0;
8328 inp
->inp_policyresult
.flowhash
= 0;
8329 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
8330 inp
->inp_policyresult
.results
.route_rule_id
= 0;
8331 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_PASS
;
8332 return NECP_KERNEL_POLICY_ID_NONE
;
8336 lck_rw_lock_shared(&necp_kernel_policy_lock
);
8338 necp_socket_fillout_info_locked(inp
, override_local_addr
, override_remote_addr
, override_bound_interface
, drop_order
, &info
);
8339 inp
->inp_policyresult
.app_id
= info
.application_id
;
8342 u_int32_t flowhash
= necp_socket_calc_flowhash_locked(&info
);
8343 if (inp
->inp_policyresult
.policy_id
!= NECP_KERNEL_POLICY_ID_NONE
&&
8344 inp
->inp_policyresult
.policy_gencount
== necp_kernel_socket_policies_gencount
&&
8345 inp
->inp_policyresult
.flowhash
== flowhash
) {
8346 // If already matched this socket on this generation of table, skip
8349 lck_rw_done(&necp_kernel_policy_lock
);
8351 return inp
->inp_policyresult
.policy_id
;
8354 // Match socket to policy
8355 necp_kernel_policy_id skip_policy_id
;
8356 u_int32_t route_rule_id_array
[MAX_AGGREGATE_ROUTE_RULES
];
8357 size_t route_rule_id_array_count
= 0;
8358 matched_policy
= necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_map
[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(info
.application_id
)], &info
, &filter_control_unit
, route_rule_id_array
, &route_rule_id_array_count
, MAX_AGGREGATE_ROUTE_RULES
, &service_action
, &service
, netagent_ids
, NULL
, NECP_MAX_NETAGENTS
, NULL
, 0, current_proc(), &skip_policy_id
, inp
->inp_route
.ro_rt
, &drop_dest_policy_result
, &drop_all_bypass
);
8360 // If the socket matched a scoped service policy, mark as Drop if not registered.
8361 // This covers the cases in which a service is required (on demand) but hasn't started yet.
8362 if ((service_action
== NECP_KERNEL_POLICY_RESULT_TRIGGER_SCOPED
||
8363 service_action
== NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED
) &&
8364 service
.identifier
!= 0 &&
8365 service
.identifier
!= NECP_NULL_SERVICE_ID
) {
8366 bool service_is_registered
= FALSE
;
8367 struct necp_service_registration
*service_registration
= NULL
;
8368 LIST_FOREACH(service_registration
, &necp_registered_service_list
, kernel_chain
) {
8369 if (service
.identifier
== service_registration
->service_id
) {
8370 service_is_registered
= TRUE
;
8374 if (!service_is_registered
) {
8375 // Mark socket as a drop if service is not registered
8376 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8377 inp
->inp_policyresult
.skip_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8378 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
8379 inp
->inp_policyresult
.flowhash
= flowhash
;
8380 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
8381 inp
->inp_policyresult
.results
.route_rule_id
= 0;
8382 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_DROP
;
8384 if (necp_debug
> 1) {
8385 NECPLOG(LOG_DEBUG
, "Socket Policy: (BoundInterface %d Proto %d) Dropping packet because service is not registered", info
.bound_interface_index
, info
.protocol
);
8389 lck_rw_done(&necp_kernel_policy_lock
);
8390 return NECP_KERNEL_POLICY_ID_NONE
;
8394 for (netagent_cursor
= 0; netagent_cursor
< NECP_MAX_NETAGENTS
; netagent_cursor
++) {
8395 struct necp_uuid_id_mapping
*mapping
= NULL
;
8396 u_int32_t netagent_id
= netagent_ids
[netagent_cursor
];
8397 if (netagent_id
== 0) {
8400 mapping
= necp_uuid_lookup_uuid_with_service_id_locked(netagent_id
);
8401 if (mapping
!= NULL
) {
8402 u_int32_t agent_flags
= 0;
8403 agent_flags
= netagent_get_flags(mapping
->uuid
);
8404 if (agent_flags
& NETAGENT_FLAG_REGISTERED
) {
8405 if (agent_flags
& NETAGENT_FLAG_ACTIVE
) {
8407 } else if ((agent_flags
& NETAGENT_FLAG_VOLUNTARY
) == 0) {
8408 if (agent_flags
& NETAGENT_FLAG_KERNEL_ACTIVATED
) {
8409 int trigger_error
= 0;
8410 trigger_error
= netagent_kernel_trigger(mapping
->uuid
);
8411 if (necp_debug
> 1) {
8412 NECPLOG(LOG_DEBUG
, "Socket Policy: Triggering inactive agent, error %d", trigger_error
);
8416 // Mark socket as a drop if required agent is not active
8417 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8418 inp
->inp_policyresult
.skip_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8419 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
8420 inp
->inp_policyresult
.flowhash
= flowhash
;
8421 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
8422 inp
->inp_policyresult
.results
.route_rule_id
= 0;
8423 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_DROP
;
8425 if (necp_debug
> 1) {
8426 NECPLOG(LOG_DEBUG
, "Socket Policy: (BoundInterface %d Proto %d) Dropping packet because agent is not active", info
.bound_interface_index
, info
.protocol
);
8430 lck_rw_done(&necp_kernel_policy_lock
);
8431 return NECP_KERNEL_POLICY_ID_NONE
;
8437 u_int32_t route_rule_id
= 0;
8438 if (route_rule_id_array_count
== 1) {
8439 route_rule_id
= route_rule_id_array
[0];
8440 } else if (route_rule_id_array_count
> 1) {
8441 route_rule_id
= necp_create_aggregate_route_rule(route_rule_id_array
);
8444 bool reset_tcp_mss
= false;
8445 if (matched_policy
) {
8446 matched_policy_id
= matched_policy
->id
;
8447 inp
->inp_policyresult
.policy_id
= matched_policy
->id
;
8448 inp
->inp_policyresult
.skip_policy_id
= skip_policy_id
;
8449 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
8450 inp
->inp_policyresult
.flowhash
= flowhash
;
8451 inp
->inp_policyresult
.results
.filter_control_unit
= filter_control_unit
;
8452 inp
->inp_policyresult
.results
.route_rule_id
= route_rule_id
;
8453 inp
->inp_policyresult
.results
.result
= matched_policy
->result
;
8454 memcpy(&inp
->inp_policyresult
.results
.result_parameter
, &matched_policy
->result_parameter
, sizeof(matched_policy
->result_parameter
));
8456 if (necp_socket_is_connected(inp
) &&
8457 (matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_DROP
||
8458 (matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&& !necp_socket_uses_interface(inp
, matched_policy
->result_parameter
.tunnel_interface_index
)))) {
8460 NECPLOG(LOG_DEBUG
, "Marking socket in state %d as defunct", so
->so_state
);
8462 sosetdefunct(current_proc(), so
, SHUTDOWN_SOCKET_LEVEL_NECP
| SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL
, TRUE
);
8463 } else if (necp_socket_is_connected(inp
) &&
8464 matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&&
8465 info
.protocol
== IPPROTO_TCP
) {
8466 // Reset MSS on TCP socket if tunnel policy changes
8467 reset_tcp_mss
= true;
8470 if (necp_debug
> 1) {
8471 NECPLOG(LOG_DEBUG
, "Socket Policy: %p (BoundInterface %d Proto %d) Policy %d Result %d Parameter %d", inp
->inp_socket
, info
.bound_interface_index
, info
.protocol
, matched_policy
->id
, matched_policy
->result
, matched_policy
->result_parameter
.tunnel_interface_index
);
8474 bool drop_all
= false;
8475 if (necp_drop_all_order
> 0 || info
.drop_order
> 0 || drop_dest_policy_result
== NECP_KERNEL_POLICY_RESULT_DROP
) {
8476 // Mark socket as a drop if set
8478 if (drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
) {
8479 drop_all_bypass
= necp_check_drop_all_bypass_result(NULL
);
8482 if (drop_all
&& drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_FALSE
) {
8483 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8484 inp
->inp_policyresult
.skip_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8485 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
8486 inp
->inp_policyresult
.flowhash
= flowhash
;
8487 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
8488 inp
->inp_policyresult
.results
.route_rule_id
= 0;
8489 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_DROP
;
8491 // Mark non-matching socket so we don't re-check it
8492 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8493 inp
->inp_policyresult
.skip_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8494 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
8495 inp
->inp_policyresult
.flowhash
= flowhash
;
8496 inp
->inp_policyresult
.results
.filter_control_unit
= filter_control_unit
; // We may have matched a filter, so mark it!
8497 inp
->inp_policyresult
.results
.route_rule_id
= route_rule_id
; // We may have matched a route rule, so mark it!
8498 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_NONE
;
8503 lck_rw_done(&necp_kernel_policy_lock
);
8505 if (reset_tcp_mss
) {
8506 // Update MSS when not holding the policy lock to avoid recursive locking
8507 tcp_mtudisc(inp
, 0);
8510 return matched_policy_id
;
8514 necp_ip_output_check_policy(struct necp_kernel_ip_output_policy
*kernel_policy
, necp_kernel_policy_id socket_policy_id
, necp_kernel_policy_id socket_skip_policy_id
, u_int32_t bound_interface_index
, u_int32_t last_interface_index
, u_int16_t protocol
, union necp_sockaddr_union
*local
, union necp_sockaddr_union
*remote
, struct rtentry
*rt
)
8516 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
)) {
8517 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
8518 u_int32_t cond_bound_interface_index
= kernel_policy
->cond_bound_interface
? kernel_policy
->cond_bound_interface
->if_index
: 0;
8519 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
8520 if (bound_interface_index
== cond_bound_interface_index
) {
8521 // No match, matches forbidden interface
8525 if (bound_interface_index
!= cond_bound_interface_index
) {
8526 // No match, does not match required interface
8531 if (bound_interface_index
!= 0) {
8532 // No match, requires a non-bound packet
8538 if (kernel_policy
->condition_mask
== 0) {
8542 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
) {
8543 necp_kernel_policy_id matched_policy_id
=
8544 kernel_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
? socket_skip_policy_id
: socket_policy_id
;
8545 if (matched_policy_id
!= kernel_policy
->cond_policy_id
) {
8546 // No match, does not match required id
8551 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LAST_INTERFACE
) {
8552 if (last_interface_index
!= kernel_policy
->cond_last_interface_index
) {
8557 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
8558 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
8559 if (protocol
== kernel_policy
->cond_protocol
) {
8560 // No match, matches forbidden protocol
8564 if (protocol
!= kernel_policy
->cond_protocol
) {
8565 // No match, does not match required protocol
8571 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_NETWORKS
) {
8572 bool is_local
= FALSE
;
8575 is_local
= IS_NECP_DEST_IN_LOCAL_NETWORKS(rt
);
8577 is_local
= necp_is_route_local(remote
);
8581 // Either no route to validate or no match for local networks
8586 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
8587 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
8588 bool inRange
= necp_is_addr_in_range((struct sockaddr
*)local
, (struct sockaddr
*)&kernel_policy
->cond_local_start
, (struct sockaddr
*)&kernel_policy
->cond_local_end
);
8589 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
8598 } else if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
8599 bool inSubnet
= necp_is_addr_in_subnet((struct sockaddr
*)local
, (struct sockaddr
*)&kernel_policy
->cond_local_start
, kernel_policy
->cond_local_prefix
);
8600 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
8612 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
8613 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
8614 bool inRange
= necp_is_addr_in_range((struct sockaddr
*)remote
, (struct sockaddr
*)&kernel_policy
->cond_remote_start
, (struct sockaddr
*)&kernel_policy
->cond_remote_end
);
8615 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
8624 } else if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
8625 bool inSubnet
= necp_is_addr_in_subnet((struct sockaddr
*)remote
, (struct sockaddr
*)&kernel_policy
->cond_remote_start
, kernel_policy
->cond_remote_prefix
);
8626 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
8641 static inline struct necp_kernel_ip_output_policy
*
8642 necp_ip_output_find_policy_match_locked(necp_kernel_policy_id socket_policy_id
, necp_kernel_policy_id socket_skip_policy_id
, u_int32_t bound_interface_index
, u_int32_t last_interface_index
, u_int16_t protocol
, union necp_sockaddr_union
*local_addr
, union necp_sockaddr_union
*remote_addr
, struct rtentry
*rt
, u_int32_t
*return_route_rule_id
, necp_kernel_policy_result
*return_drop_dest_policy_result
, necp_drop_all_bypass_check_result_t
*return_drop_all_bypass
)
8644 u_int32_t skip_order
= 0;
8645 u_int32_t skip_session_order
= 0;
8646 struct necp_kernel_ip_output_policy
*matched_policy
= NULL
;
8647 struct necp_kernel_ip_output_policy
**policy_search_array
= necp_kernel_ip_output_policies_map
[NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(socket_policy_id
)];
8648 u_int32_t route_rule_id_array
[MAX_AGGREGATE_ROUTE_RULES
];
8649 size_t route_rule_id_count
= 0;
8650 necp_drop_all_bypass_check_result_t drop_all_bypass
= NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
;
8651 if (return_drop_all_bypass
!= NULL
) {
8652 *return_drop_all_bypass
= drop_all_bypass
;
8655 if (return_route_rule_id
!= NULL
) {
8656 *return_route_rule_id
= 0;
8659 *return_drop_dest_policy_result
= NECP_KERNEL_POLICY_RESULT_NONE
;
8661 if (policy_search_array
!= NULL
) {
8662 for (int i
= 0; policy_search_array
[i
] != NULL
; i
++) {
8663 if (necp_drop_all_order
!= 0 && policy_search_array
[i
]->session_order
>= necp_drop_all_order
) {
8664 // We've hit a drop all rule
8665 if (drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
) {
8666 drop_all_bypass
= necp_check_drop_all_bypass_result(NULL
);
8667 if (return_drop_all_bypass
!= NULL
) {
8668 *return_drop_all_bypass
= drop_all_bypass
;
8671 if (drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_FALSE
) {
8675 if (necp_drop_dest_policy
.entry_count
> 0 &&
8676 necp_address_matches_drop_dest_policy(remote_addr
, policy_search_array
[i
]->session_order
)) {
8677 // We've hit a drop by destination address rule
8678 *return_drop_dest_policy_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
8681 if (skip_session_order
&& policy_search_array
[i
]->session_order
>= skip_session_order
) {
8684 skip_session_order
= 0;
8687 if (policy_search_array
[i
]->order
< skip_order
) {
8693 skip_session_order
= 0;
8695 } else if (skip_session_order
) {
8700 if (necp_ip_output_check_policy(policy_search_array
[i
], socket_policy_id
, socket_skip_policy_id
, bound_interface_index
, last_interface_index
, protocol
, local_addr
, remote_addr
, rt
)) {
8701 if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_ROUTE_RULES
) {
8702 if (return_route_rule_id
!= NULL
&& route_rule_id_count
< MAX_AGGREGATE_ROUTE_RULES
) {
8703 route_rule_id_array
[route_rule_id_count
++] = policy_search_array
[i
]->result_parameter
.route_rule_id
;
8706 } else if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
8707 skip_order
= policy_search_array
[i
]->result_parameter
.skip_policy_order
;
8708 skip_session_order
= policy_search_array
[i
]->session_order
+ 1;
8712 // Passed all tests, found a match
8713 matched_policy
= policy_search_array
[i
];
8719 if (route_rule_id_count
== 1) {
8720 *return_route_rule_id
= route_rule_id_array
[0];
8721 } else if (route_rule_id_count
> 1) {
8722 *return_route_rule_id
= necp_create_aggregate_route_rule(route_rule_id_array
);
8725 return matched_policy
;
8729 necp_output_bypass(struct mbuf
*packet
)
8731 if (necp_pass_loopback
> 0 && necp_is_loopback(NULL
, NULL
, NULL
, packet
)) {
8734 if (necp_pass_keepalives
> 0 && necp_get_is_keepalive_from_packet(packet
)) {
8737 if (necp_is_intcoproc(NULL
, packet
)) {
8743 necp_kernel_policy_id
8744 necp_ip_output_find_policy_match(struct mbuf
*packet
, int flags
, struct ip_out_args
*ipoa
, struct rtentry
*rt
,
8745 necp_kernel_policy_result
*result
, necp_kernel_policy_result_parameter
*result_parameter
)
8747 struct ip
*ip
= NULL
;
8748 int hlen
= sizeof(struct ip
);
8749 necp_kernel_policy_id socket_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
8750 necp_kernel_policy_id socket_skip_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
8751 necp_kernel_policy_id matched_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
8752 struct necp_kernel_ip_output_policy
*matched_policy
= NULL
;
8753 u_int16_t protocol
= 0;
8754 u_int32_t bound_interface_index
= 0;
8755 u_int32_t last_interface_index
= 0;
8756 union necp_sockaddr_union local_addr
;
8757 union necp_sockaddr_union remote_addr
;
8758 u_int32_t drop_dest_policy_result
= NECP_KERNEL_POLICY_RESULT_NONE
;
8759 necp_drop_all_bypass_check_result_t drop_all_bypass
= NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
;
8765 if (result_parameter
) {
8766 memset(result_parameter
, 0, sizeof(*result_parameter
));
8769 if (packet
== NULL
) {
8770 return NECP_KERNEL_POLICY_ID_NONE
;
8773 socket_policy_id
= necp_get_policy_id_from_packet(packet
);
8774 socket_skip_policy_id
= necp_get_skip_policy_id_from_packet(packet
);
8776 // Exit early for an empty list
8777 // Don't lock. Possible race condition, but we don't want the performance hit.
8778 if (necp_kernel_ip_output_policies_count
== 0 ||
8779 (socket_policy_id
== NECP_KERNEL_POLICY_ID_NONE
&& necp_kernel_ip_output_policies_non_id_count
== 0 && necp_drop_dest_policy
.entry_count
== 0)) {
8780 if (necp_drop_all_order
> 0) {
8781 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8783 if (necp_output_bypass(packet
)) {
8784 *result
= NECP_KERNEL_POLICY_RESULT_PASS
;
8786 *result
= NECP_KERNEL_POLICY_RESULT_DROP
;
8791 return matched_policy_id
;
8794 // Check for loopback exception
8795 if (necp_output_bypass(packet
)) {
8796 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8798 *result
= NECP_KERNEL_POLICY_RESULT_PASS
;
8800 return matched_policy_id
;
8803 last_interface_index
= necp_get_last_interface_index_from_packet(packet
);
8805 // Process packet to get relevant fields
8806 ip
= mtod(packet
, struct ip
*);
8808 hlen
= _IP_VHL_HL(ip
->ip_vhl
) << 2;
8810 hlen
= ip
->ip_hl
<< 2;
8813 protocol
= ip
->ip_p
;
8815 if ((flags
& IP_OUTARGS
) && (ipoa
!= NULL
) &&
8816 (ipoa
->ipoa_flags
& IPOAF_BOUND_IF
) &&
8817 ipoa
->ipoa_boundif
!= IFSCOPE_NONE
) {
8818 bound_interface_index
= ipoa
->ipoa_boundif
;
8821 local_addr
.sin
.sin_family
= AF_INET
;
8822 local_addr
.sin
.sin_len
= sizeof(struct sockaddr_in
);
8823 memcpy(&local_addr
.sin
.sin_addr
, &ip
->ip_src
, sizeof(ip
->ip_src
));
8825 remote_addr
.sin
.sin_family
= AF_INET
;
8826 remote_addr
.sin
.sin_len
= sizeof(struct sockaddr_in
);
8827 memcpy(&((struct sockaddr_in
*)&remote_addr
)->sin_addr
, &ip
->ip_dst
, sizeof(ip
->ip_dst
));
8832 if ((int)(hlen
+ sizeof(th
)) <= packet
->m_pkthdr
.len
) {
8833 m_copydata(packet
, hlen
, sizeof(th
), (u_int8_t
*)&th
);
8834 ((struct sockaddr_in
*)&local_addr
)->sin_port
= th
.th_sport
;
8835 ((struct sockaddr_in
*)&remote_addr
)->sin_port
= th
.th_dport
;
8841 if ((int)(hlen
+ sizeof(uh
)) <= packet
->m_pkthdr
.len
) {
8842 m_copydata(packet
, hlen
, sizeof(uh
), (u_int8_t
*)&uh
);
8843 ((struct sockaddr_in
*)&local_addr
)->sin_port
= uh
.uh_sport
;
8844 ((struct sockaddr_in
*)&remote_addr
)->sin_port
= uh
.uh_dport
;
8849 ((struct sockaddr_in
*)&local_addr
)->sin_port
= 0;
8850 ((struct sockaddr_in
*)&remote_addr
)->sin_port
= 0;
8855 // Match packet to policy
8856 lck_rw_lock_shared(&necp_kernel_policy_lock
);
8857 u_int32_t route_rule_id
= 0;
8858 matched_policy
= necp_ip_output_find_policy_match_locked(socket_policy_id
, socket_skip_policy_id
, bound_interface_index
, last_interface_index
, protocol
, &local_addr
, &remote_addr
, rt
, &route_rule_id
, &drop_dest_policy_result
, &drop_all_bypass
);
8859 if (matched_policy
) {
8860 matched_policy_id
= matched_policy
->id
;
8862 *result
= matched_policy
->result
;
8865 if (result_parameter
) {
8866 memcpy(result_parameter
, &matched_policy
->result_parameter
, sizeof(matched_policy
->result_parameter
));
8869 if (route_rule_id
!= 0 &&
8870 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
== 0) {
8871 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
= route_rule_id
;
8874 if (necp_debug
> 1) {
8875 NECPLOG(LOG_DEBUG
, "IP Output: (ID %d BoundInterface %d LastInterface %d Proto %d) Policy %d Result %d Parameter %d Route Rule %u", socket_policy_id
, bound_interface_index
, last_interface_index
, protocol
, matched_policy
->id
, matched_policy
->result
, matched_policy
->result_parameter
.tunnel_interface_index
, route_rule_id
);
8878 bool drop_all
= false;
8880 * Apply drop-all only to packets which have never matched a primary policy (check
8881 * if the packet saved policy id is none or falls within the socket policy id range).
8883 if (socket_policy_id
< NECP_KERNEL_POLICY_ID_FIRST_VALID_IP
&&
8884 (necp_drop_all_order
> 0 || drop_dest_policy_result
== NECP_KERNEL_POLICY_RESULT_DROP
)) {
8886 if (drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
) {
8887 drop_all_bypass
= necp_check_drop_all_bypass_result(NULL
);
8890 if (drop_all
&& drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_FALSE
) {
8891 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8893 *result
= NECP_KERNEL_POLICY_RESULT_DROP
;
8895 } else if (route_rule_id
!= 0 &&
8896 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
== 0) {
8897 // If we matched a route rule, mark it
8898 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
= route_rule_id
;
8902 lck_rw_done(&necp_kernel_policy_lock
);
8904 return matched_policy_id
;
8907 necp_kernel_policy_id
8908 necp_ip6_output_find_policy_match(struct mbuf
*packet
, int flags
, struct ip6_out_args
*ip6oa
, struct rtentry
*rt
,
8909 necp_kernel_policy_result
*result
, necp_kernel_policy_result_parameter
*result_parameter
)
8911 struct ip6_hdr
*ip6
= NULL
;
8914 necp_kernel_policy_id socket_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
8915 necp_kernel_policy_id socket_skip_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
8916 necp_kernel_policy_id matched_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
8917 struct necp_kernel_ip_output_policy
*matched_policy
= NULL
;
8918 u_int16_t protocol
= 0;
8919 u_int32_t bound_interface_index
= 0;
8920 u_int32_t last_interface_index
= 0;
8921 union necp_sockaddr_union local_addr
;
8922 union necp_sockaddr_union remote_addr
;
8923 u_int32_t drop_dest_policy_result
= NECP_KERNEL_POLICY_RESULT_NONE
;
8924 necp_drop_all_bypass_check_result_t drop_all_bypass
= NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
;
8930 if (result_parameter
) {
8931 memset(result_parameter
, 0, sizeof(*result_parameter
));
8934 if (packet
== NULL
) {
8935 return NECP_KERNEL_POLICY_ID_NONE
;
8938 socket_policy_id
= necp_get_policy_id_from_packet(packet
);
8939 socket_skip_policy_id
= necp_get_skip_policy_id_from_packet(packet
);
8941 // Exit early for an empty list
8942 // Don't lock. Possible race condition, but we don't want the performance hit.
8943 if (necp_kernel_ip_output_policies_count
== 0 ||
8944 (socket_policy_id
== NECP_KERNEL_POLICY_ID_NONE
&& necp_kernel_ip_output_policies_non_id_count
== 0 && necp_drop_dest_policy
.entry_count
== 0)) {
8945 if (necp_drop_all_order
> 0) {
8946 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8948 if (necp_output_bypass(packet
)) {
8949 *result
= NECP_KERNEL_POLICY_RESULT_PASS
;
8951 *result
= NECP_KERNEL_POLICY_RESULT_DROP
;
8956 return matched_policy_id
;
8959 // Check for loopback exception
8960 if (necp_output_bypass(packet
)) {
8961 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8963 *result
= NECP_KERNEL_POLICY_RESULT_PASS
;
8965 return matched_policy_id
;
8968 last_interface_index
= necp_get_last_interface_index_from_packet(packet
);
8970 // Process packet to get relevant fields
8971 ip6
= mtod(packet
, struct ip6_hdr
*);
8973 if ((flags
& IPV6_OUTARGS
) && (ip6oa
!= NULL
) &&
8974 (ip6oa
->ip6oa_flags
& IP6OAF_BOUND_IF
) &&
8975 ip6oa
->ip6oa_boundif
!= IFSCOPE_NONE
) {
8976 bound_interface_index
= ip6oa
->ip6oa_boundif
;
8979 ((struct sockaddr_in6
*)&local_addr
)->sin6_family
= AF_INET6
;
8980 ((struct sockaddr_in6
*)&local_addr
)->sin6_len
= sizeof(struct sockaddr_in6
);
8981 memcpy(&((struct sockaddr_in6
*)&local_addr
)->sin6_addr
, &ip6
->ip6_src
, sizeof(ip6
->ip6_src
));
8983 ((struct sockaddr_in6
*)&remote_addr
)->sin6_family
= AF_INET6
;
8984 ((struct sockaddr_in6
*)&remote_addr
)->sin6_len
= sizeof(struct sockaddr_in6
);
8985 memcpy(&((struct sockaddr_in6
*)&remote_addr
)->sin6_addr
, &ip6
->ip6_dst
, sizeof(ip6
->ip6_dst
));
8987 offset
= ip6_lasthdr(packet
, 0, IPPROTO_IPV6
, &next
);
8988 if (offset
>= 0 && packet
->m_pkthdr
.len
>= offset
) {
8993 if ((int)(offset
+ sizeof(th
)) <= packet
->m_pkthdr
.len
) {
8994 m_copydata(packet
, offset
, sizeof(th
), (u_int8_t
*)&th
);
8995 ((struct sockaddr_in6
*)&local_addr
)->sin6_port
= th
.th_sport
;
8996 ((struct sockaddr_in6
*)&remote_addr
)->sin6_port
= th
.th_dport
;
9002 if ((int)(offset
+ sizeof(uh
)) <= packet
->m_pkthdr
.len
) {
9003 m_copydata(packet
, offset
, sizeof(uh
), (u_int8_t
*)&uh
);
9004 ((struct sockaddr_in6
*)&local_addr
)->sin6_port
= uh
.uh_sport
;
9005 ((struct sockaddr_in6
*)&remote_addr
)->sin6_port
= uh
.uh_dport
;
9010 ((struct sockaddr_in6
*)&local_addr
)->sin6_port
= 0;
9011 ((struct sockaddr_in6
*)&remote_addr
)->sin6_port
= 0;
9017 // Match packet to policy
9018 lck_rw_lock_shared(&necp_kernel_policy_lock
);
9019 u_int32_t route_rule_id
= 0;
9020 matched_policy
= necp_ip_output_find_policy_match_locked(socket_policy_id
, socket_skip_policy_id
, bound_interface_index
, last_interface_index
, protocol
, &local_addr
, &remote_addr
, rt
, &route_rule_id
, &drop_dest_policy_result
, &drop_all_bypass
);
9021 if (matched_policy
) {
9022 matched_policy_id
= matched_policy
->id
;
9024 *result
= matched_policy
->result
;
9027 if (result_parameter
) {
9028 memcpy(result_parameter
, &matched_policy
->result_parameter
, sizeof(matched_policy
->result_parameter
));
9031 if (route_rule_id
!= 0 &&
9032 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
== 0) {
9033 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
= route_rule_id
;
9036 if (necp_debug
> 1) {
9037 NECPLOG(LOG_DEBUG
, "IP6 Output: (ID %d BoundInterface %d LastInterface %d Proto %d) Policy %d Result %d Parameter %d Route Rule %u", socket_policy_id
, bound_interface_index
, last_interface_index
, protocol
, matched_policy
->id
, matched_policy
->result
, matched_policy
->result_parameter
.tunnel_interface_index
, route_rule_id
);
9040 bool drop_all
= false;
9042 * Apply drop-all only to packets which have never matched a primary policy (check
9043 * if the packet saved policy id is none or falls within the socket policy id range).
9045 if (socket_policy_id
< NECP_KERNEL_POLICY_ID_FIRST_VALID_IP
&&
9046 (necp_drop_all_order
> 0 || drop_dest_policy_result
== NECP_KERNEL_POLICY_RESULT_DROP
)) {
9048 if (drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
) {
9049 drop_all_bypass
= necp_check_drop_all_bypass_result(NULL
);
9052 if (drop_all
&& drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_FALSE
) {
9053 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
9055 *result
= NECP_KERNEL_POLICY_RESULT_DROP
;
9057 } else if (route_rule_id
!= 0 &&
9058 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
== 0) {
9059 // If we matched a route rule, mark it
9060 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
= route_rule_id
;
9064 lck_rw_done(&necp_kernel_policy_lock
);
9066 return matched_policy_id
;
9071 necp_is_addr_in_range(struct sockaddr
*addr
, struct sockaddr
*range_start
, struct sockaddr
*range_end
)
9075 if (addr
== NULL
|| range_start
== NULL
|| range_end
== NULL
) {
9079 /* Must be greater than or equal to start */
9080 cmp
= necp_addr_compare(addr
, range_start
, 1);
9081 if (cmp
!= 0 && cmp
!= 1) {
9085 /* Must be less than or equal to end */
9086 cmp
= necp_addr_compare(addr
, range_end
, 1);
9087 if (cmp
!= 0 && cmp
!= -1) {
9095 necp_is_range_in_range(struct sockaddr
*inner_range_start
, struct sockaddr
*inner_range_end
, struct sockaddr
*range_start
, struct sockaddr
*range_end
)
9099 if (inner_range_start
== NULL
|| inner_range_end
== NULL
|| range_start
== NULL
|| range_end
== NULL
) {
9103 /* Must be greater than or equal to start */
9104 cmp
= necp_addr_compare(inner_range_start
, range_start
, 1);
9105 if (cmp
!= 0 && cmp
!= 1) {
9109 /* Must be less than or equal to end */
9110 cmp
= necp_addr_compare(inner_range_end
, range_end
, 1);
9111 if (cmp
!= 0 && cmp
!= -1) {
9119 necp_is_addr_in_subnet(struct sockaddr
*addr
, struct sockaddr
*subnet_addr
, u_int8_t subnet_prefix
)
9121 if (addr
== NULL
|| subnet_addr
== NULL
) {
9125 if (addr
->sa_family
!= subnet_addr
->sa_family
|| addr
->sa_len
!= subnet_addr
->sa_len
) {
9129 switch (addr
->sa_family
) {
9131 if (satosin(subnet_addr
)->sin_port
!= 0 &&
9132 satosin(addr
)->sin_port
!= satosin(subnet_addr
)->sin_port
) {
9135 return necp_buffer_compare_with_bit_prefix((u_int8_t
*)&satosin(addr
)->sin_addr
, (u_int8_t
*)&satosin(subnet_addr
)->sin_addr
, subnet_prefix
);
9138 if (satosin6(subnet_addr
)->sin6_port
!= 0 &&
9139 satosin6(addr
)->sin6_port
!= satosin6(subnet_addr
)->sin6_port
) {
9142 if (satosin6(addr
)->sin6_scope_id
&&
9143 satosin6(subnet_addr
)->sin6_scope_id
&&
9144 satosin6(addr
)->sin6_scope_id
!= satosin6(subnet_addr
)->sin6_scope_id
) {
9147 return necp_buffer_compare_with_bit_prefix((u_int8_t
*)&satosin6(addr
)->sin6_addr
, (u_int8_t
*)&satosin6(subnet_addr
)->sin6_addr
, subnet_prefix
);
9162 * 2: Not comparable or error
9165 necp_addr_compare(struct sockaddr
*sa1
, struct sockaddr
*sa2
, int check_port
)
9168 int port_result
= 0;
9170 if (sa1
->sa_family
!= sa2
->sa_family
|| sa1
->sa_len
!= sa2
->sa_len
) {
9174 if (sa1
->sa_len
== 0) {
9178 switch (sa1
->sa_family
) {
9180 if (sa1
->sa_len
!= sizeof(struct sockaddr_in
)) {
9184 result
= memcmp(&satosin(sa1
)->sin_addr
.s_addr
, &satosin(sa2
)->sin_addr
.s_addr
, sizeof(satosin(sa1
)->sin_addr
.s_addr
));
9187 if (satosin(sa1
)->sin_port
< satosin(sa2
)->sin_port
) {
9189 } else if (satosin(sa1
)->sin_port
> satosin(sa2
)->sin_port
) {
9194 result
= port_result
;
9195 } else if ((result
> 0 && port_result
< 0) || (result
< 0 && port_result
> 0)) {
9203 if (sa1
->sa_len
!= sizeof(struct sockaddr_in6
)) {
9207 if (satosin6(sa1
)->sin6_scope_id
!= satosin6(sa2
)->sin6_scope_id
) {
9211 result
= memcmp(&satosin6(sa1
)->sin6_addr
.s6_addr
[0], &satosin6(sa2
)->sin6_addr
.s6_addr
[0], sizeof(struct in6_addr
));
9214 if (satosin6(sa1
)->sin6_port
< satosin6(sa2
)->sin6_port
) {
9216 } else if (satosin6(sa1
)->sin6_port
> satosin6(sa2
)->sin6_port
) {
9221 result
= port_result
;
9222 } else if ((result
> 0 && port_result
< 0) || (result
< 0 && port_result
> 0)) {
9230 result
= memcmp(sa1
, sa2
, sa1
->sa_len
);
9237 } else if (result
> 0) {
9245 necp_buffer_compare_with_bit_prefix(u_int8_t
*p1
, u_int8_t
*p2
, u_int32_t bits
)
9249 /* Handle null pointers */
9250 if (p1
== NULL
|| p2
== NULL
) {
9255 if (*p1
++ != *p2
++) {
9262 mask
= ~((1 << (8 - bits
)) - 1);
9263 if ((*p1
& mask
) != (*p2
& mask
)) {
9271 necp_addr_is_empty(struct sockaddr
*addr
)
9277 if (addr
->sa_len
== 0) {
9281 switch (addr
->sa_family
) {
9283 static struct sockaddr_in ipv4_empty_address
= {
9284 .sin_len
= sizeof(struct sockaddr_in
),
9285 .sin_family
= AF_INET
,
9287 .sin_addr
= { .s_addr
= 0 }, // 0.0.0.0
9290 if (necp_addr_compare(addr
, (struct sockaddr
*)&ipv4_empty_address
, 0) == 0) {
9297 static struct sockaddr_in6 ipv6_empty_address
= {
9298 .sin6_len
= sizeof(struct sockaddr_in6
),
9299 .sin6_family
= AF_INET6
,
9302 .sin6_addr
= IN6ADDR_ANY_INIT
, // ::
9305 if (necp_addr_compare(addr
, (struct sockaddr
*)&ipv6_empty_address
, 0) == 0) {
9319 necp_update_qos_marking(struct ifnet
*ifp
, u_int32_t route_rule_id
)
9321 bool qos_marking
= FALSE
;
9322 int exception_index
= 0;
9323 struct necp_route_rule
*route_rule
= NULL
;
9325 route_rule
= necp_lookup_route_rule_locked(&necp_route_rules
, route_rule_id
);
9326 if (route_rule
== NULL
) {
9327 qos_marking
= FALSE
;
9331 qos_marking
= (route_rule
->default_action
== NECP_ROUTE_RULE_QOS_MARKING
) ? TRUE
: FALSE
;
9337 for (exception_index
= 0; exception_index
< MAX_ROUTE_RULE_INTERFACES
; exception_index
++) {
9338 if (route_rule
->exception_if_indices
[exception_index
] == 0) {
9341 if (route_rule
->exception_if_actions
[exception_index
] != NECP_ROUTE_RULE_QOS_MARKING
) {
9344 if (route_rule
->exception_if_indices
[exception_index
] == ifp
->if_index
) {
9346 if (necp_debug
> 2) {
9347 NECPLOG(LOG_DEBUG
, "QoS Marking : Interface match %d for Rule %d Allowed %d",
9348 route_rule
->exception_if_indices
[exception_index
], route_rule_id
, qos_marking
);
9354 if ((route_rule
->cellular_action
== NECP_ROUTE_RULE_QOS_MARKING
&& IFNET_IS_CELLULAR(ifp
)) ||
9355 (route_rule
->wifi_action
== NECP_ROUTE_RULE_QOS_MARKING
&& IFNET_IS_WIFI(ifp
)) ||
9356 (route_rule
->wired_action
== NECP_ROUTE_RULE_QOS_MARKING
&& IFNET_IS_WIRED(ifp
)) ||
9357 (route_rule
->expensive_action
== NECP_ROUTE_RULE_QOS_MARKING
&& IFNET_IS_EXPENSIVE(ifp
)) ||
9358 (route_rule
->constrained_action
== NECP_ROUTE_RULE_QOS_MARKING
&& IFNET_IS_CONSTRAINED(ifp
))) {
9360 if (necp_debug
> 2) {
9361 NECPLOG(LOG_DEBUG
, "QoS Marking: C:%d WF:%d W:%d E:%d Cn:%d for Rule %d Allowed %d",
9362 route_rule
->cellular_action
, route_rule
->wifi_action
, route_rule
->wired_action
,
9363 route_rule
->expensive_action
, route_rule
->constrained_action
, route_rule_id
, qos_marking
);
9368 if (necp_debug
> 1) {
9369 NECPLOG(LOG_DEBUG
, "QoS Marking: Rule %d ifp %s Allowed %d",
9370 route_rule_id
, ifp
? ifp
->if_xname
: "", qos_marking
);
9376 necp_socket_update_qos_marking(struct inpcb
*inp
, struct rtentry
*route
, struct ifnet
*interface
, u_int32_t route_rule_id
)
9378 bool qos_marking
= FALSE
;
9379 struct ifnet
*ifp
= interface
= NULL
;
9381 if (net_qos_policy_restricted
== 0) {
9384 if (inp
->inp_socket
== NULL
) {
9387 if ((inp
->inp_socket
->so_flags1
& SOF1_QOSMARKING_POLICY_OVERRIDE
)) {
9391 * This is racy but we do not need the performance hit of taking necp_kernel_policy_lock
9393 if (inp
->inp_policyresult
.results
.qos_marking_gencount
== necp_kernel_socket_policies_gencount
) {
9397 lck_rw_lock_shared(&necp_kernel_policy_lock
);
9399 if (ifp
== NULL
&& route
!= NULL
) {
9400 ifp
= route
->rt_ifp
;
9403 * By default, until we have a interface, do not mark and reevaluate the Qos marking policy
9405 if (ifp
== NULL
|| route_rule_id
== 0) {
9406 qos_marking
= FALSE
;
9410 if (ROUTE_RULE_IS_AGGREGATE(route_rule_id
)) {
9411 struct necp_aggregate_route_rule
*aggregate_route_rule
= necp_lookup_aggregate_route_rule_locked(route_rule_id
);
9412 if (aggregate_route_rule
!= NULL
) {
9414 for (index
= 0; index
< MAX_AGGREGATE_ROUTE_RULES
; index
++) {
9415 u_int32_t sub_route_rule_id
= aggregate_route_rule
->rule_ids
[index
];
9416 if (sub_route_rule_id
== 0) {
9419 qos_marking
= necp_update_qos_marking(ifp
, sub_route_rule_id
);
9420 if (qos_marking
== TRUE
) {
9426 qos_marking
= necp_update_qos_marking(ifp
, route_rule_id
);
9429 * Now that we have an interface we remember the gencount
9431 inp
->inp_policyresult
.results
.qos_marking_gencount
= necp_kernel_socket_policies_gencount
;
9434 lck_rw_done(&necp_kernel_policy_lock
);
9436 if (qos_marking
== TRUE
) {
9437 inp
->inp_socket
->so_flags1
|= SOF1_QOSMARKING_ALLOWED
;
9439 inp
->inp_socket
->so_flags1
&= ~SOF1_QOSMARKING_ALLOWED
;
9444 necp_route_is_lqm_abort(struct ifnet
*ifp
, struct ifnet
*delegated_ifp
)
9447 (ifp
->if_interface_state
.valid_bitmask
& IF_INTERFACE_STATE_LQM_STATE_VALID
) &&
9448 ifp
->if_interface_state
.lqm_state
== IFNET_LQM_THRESH_ABORT
) {
9451 if (delegated_ifp
!= NULL
&&
9452 (delegated_ifp
->if_interface_state
.valid_bitmask
& IF_INTERFACE_STATE_LQM_STATE_VALID
) &&
9453 delegated_ifp
->if_interface_state
.lqm_state
== IFNET_LQM_THRESH_ABORT
) {
9460 necp_route_is_allowed_inner(struct rtentry
*route
, struct ifnet
*ifp
, u_int32_t route_rule_id
, u_int32_t
*interface_type_denied
)
9462 bool default_is_allowed
= TRUE
;
9463 u_int8_t type_aggregate_action
= NECP_ROUTE_RULE_NONE
;
9464 int exception_index
= 0;
9465 struct ifnet
*delegated_ifp
= NULL
;
9466 struct necp_route_rule
*route_rule
= NULL
;
9468 route_rule
= necp_lookup_route_rule_locked(&necp_route_rules
, route_rule_id
);
9469 if (route_rule
== NULL
) {
9473 default_is_allowed
= (route_rule
->default_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
;
9475 ifp
= route
->rt_ifp
;
9478 if (necp_debug
> 1 && !default_is_allowed
) {
9479 NECPLOG(LOG_DEBUG
, "Route Allowed: No interface for route, using default for Rule %d Allowed %d", route_rule_id
, default_is_allowed
);
9481 return default_is_allowed
;
9484 delegated_ifp
= ifp
->if_delegated
.ifp
;
9485 for (exception_index
= 0; exception_index
< MAX_ROUTE_RULE_INTERFACES
; exception_index
++) {
9486 if (route_rule
->exception_if_indices
[exception_index
] == 0) {
9489 if (route_rule
->exception_if_indices
[exception_index
] == ifp
->if_index
||
9490 (delegated_ifp
!= NULL
&& route_rule
->exception_if_indices
[exception_index
] == delegated_ifp
->if_index
)) {
9491 if (route_rule
->exception_if_actions
[exception_index
] == NECP_ROUTE_RULE_DENY_LQM_ABORT
) {
9492 const bool lqm_abort
= necp_route_is_lqm_abort(ifp
, delegated_ifp
);
9493 if (necp_debug
> 1 && lqm_abort
) {
9494 NECPLOG(LOG_DEBUG
, "Route Allowed: Interface match %d for Rule %d Deny LQM Abort",
9495 route_rule
->exception_if_indices
[exception_index
], route_rule_id
);
9498 } else if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule
->exception_if_actions
[exception_index
])) {
9499 if (necp_debug
> 1) {
9500 NECPLOG(LOG_DEBUG
, "Route Allowed: Interface match %d for Rule %d Allowed %d", route_rule
->exception_if_indices
[exception_index
], route_rule_id
, ((route_rule
->exception_if_actions
[exception_index
] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
));
9502 return (route_rule
->exception_if_actions
[exception_index
] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
;
9507 if (IFNET_IS_CELLULAR(ifp
)) {
9508 if (route_rule
->cellular_action
== NECP_ROUTE_RULE_DENY_LQM_ABORT
) {
9509 if (necp_route_is_lqm_abort(ifp
, delegated_ifp
)) {
9510 if (interface_type_denied
!= NULL
) {
9511 *interface_type_denied
= IFRTYPE_FUNCTIONAL_CELLULAR
;
9513 // Mark aggregate action as deny
9514 type_aggregate_action
= NECP_ROUTE_RULE_DENY_INTERFACE
;
9516 } else if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule
->cellular_action
)) {
9517 if (interface_type_denied
!= NULL
) {
9518 *interface_type_denied
= IFRTYPE_FUNCTIONAL_CELLULAR
;
9520 if (type_aggregate_action
== NECP_ROUTE_RULE_NONE
||
9521 (type_aggregate_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
&&
9522 route_rule
->cellular_action
== NECP_ROUTE_RULE_DENY_INTERFACE
)) {
9523 // Deny wins if there is a conflict
9524 type_aggregate_action
= route_rule
->cellular_action
;
9529 if (IFNET_IS_WIFI(ifp
)) {
9530 if (route_rule
->wifi_action
== NECP_ROUTE_RULE_DENY_LQM_ABORT
) {
9531 if (necp_route_is_lqm_abort(ifp
, delegated_ifp
)) {
9532 if (interface_type_denied
!= NULL
) {
9533 *interface_type_denied
= IFRTYPE_FUNCTIONAL_WIFI_INFRA
;
9535 // Mark aggregate action as deny
9536 type_aggregate_action
= NECP_ROUTE_RULE_DENY_INTERFACE
;
9538 } else if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule
->wifi_action
)) {
9539 if (interface_type_denied
!= NULL
) {
9540 *interface_type_denied
= IFRTYPE_FUNCTIONAL_WIFI_INFRA
;
9542 if (type_aggregate_action
== NECP_ROUTE_RULE_NONE
||
9543 (type_aggregate_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
&&
9544 route_rule
->wifi_action
== NECP_ROUTE_RULE_DENY_INTERFACE
)) {
9545 // Deny wins if there is a conflict
9546 type_aggregate_action
= route_rule
->wifi_action
;
9551 if (IFNET_IS_WIRED(ifp
)) {
9552 if (route_rule
->wired_action
== NECP_ROUTE_RULE_DENY_LQM_ABORT
) {
9553 if (necp_route_is_lqm_abort(ifp
, delegated_ifp
)) {
9554 if (interface_type_denied
!= NULL
) {
9555 *interface_type_denied
= IFRTYPE_FUNCTIONAL_WIRED
;
9557 // Mark aggregate action as deny
9558 type_aggregate_action
= NECP_ROUTE_RULE_DENY_INTERFACE
;
9560 } else if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule
->wired_action
)) {
9561 if (interface_type_denied
!= NULL
) {
9562 *interface_type_denied
= IFRTYPE_FUNCTIONAL_WIRED
;
9564 if (type_aggregate_action
== NECP_ROUTE_RULE_NONE
||
9565 (type_aggregate_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
&&
9566 route_rule
->wired_action
== NECP_ROUTE_RULE_DENY_INTERFACE
)) {
9567 // Deny wins if there is a conflict
9568 type_aggregate_action
= route_rule
->wired_action
;
9573 if (IFNET_IS_EXPENSIVE(ifp
)) {
9574 if (route_rule
->expensive_action
== NECP_ROUTE_RULE_DENY_LQM_ABORT
) {
9575 if (necp_route_is_lqm_abort(ifp
, delegated_ifp
)) {
9576 // Mark aggregate action as deny
9577 type_aggregate_action
= NECP_ROUTE_RULE_DENY_INTERFACE
;
9579 } else if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule
->expensive_action
)) {
9580 if (type_aggregate_action
== NECP_ROUTE_RULE_NONE
||
9581 (type_aggregate_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
&&
9582 route_rule
->expensive_action
== NECP_ROUTE_RULE_DENY_INTERFACE
)) {
9583 // Deny wins if there is a conflict
9584 type_aggregate_action
= route_rule
->expensive_action
;
9589 if (IFNET_IS_CONSTRAINED(ifp
)) {
9590 if (route_rule
->constrained_action
== NECP_ROUTE_RULE_DENY_LQM_ABORT
) {
9591 if (necp_route_is_lqm_abort(ifp
, delegated_ifp
)) {
9592 // Mark aggregate action as deny
9593 type_aggregate_action
= NECP_ROUTE_RULE_DENY_INTERFACE
;
9595 } else if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule
->constrained_action
)) {
9596 if (type_aggregate_action
== NECP_ROUTE_RULE_NONE
||
9597 (type_aggregate_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
&&
9598 route_rule
->constrained_action
== NECP_ROUTE_RULE_DENY_INTERFACE
)) {
9599 // Deny wins if there is a conflict
9600 type_aggregate_action
= route_rule
->constrained_action
;
9605 if (type_aggregate_action
!= NECP_ROUTE_RULE_NONE
) {
9606 if (necp_debug
> 1) {
9607 NECPLOG(LOG_DEBUG
, "Route Allowed: C:%d WF:%d W:%d E:%d for Rule %d Allowed %d", route_rule
->cellular_action
, route_rule
->wifi_action
, route_rule
->wired_action
, route_rule
->expensive_action
, route_rule_id
, ((type_aggregate_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
));
9609 return (type_aggregate_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
;
9612 if (necp_debug
> 1 && !default_is_allowed
) {
9613 NECPLOG(LOG_DEBUG
, "Route Allowed: Using default for Rule %d Allowed %d", route_rule_id
, default_is_allowed
);
9615 return default_is_allowed
;
9619 necp_route_is_allowed(struct rtentry
*route
, struct ifnet
*interface
, u_int32_t route_rule_id
, u_int32_t
*interface_type_denied
)
9621 if ((route
== NULL
&& interface
== NULL
) || route_rule_id
== 0) {
9622 if (necp_debug
> 1) {
9623 NECPLOG(LOG_DEBUG
, "Route Allowed: no route or interface, Rule %d Allowed %d", route_rule_id
, TRUE
);
9628 if (ROUTE_RULE_IS_AGGREGATE(route_rule_id
)) {
9629 struct necp_aggregate_route_rule
*aggregate_route_rule
= necp_lookup_aggregate_route_rule_locked(route_rule_id
);
9630 if (aggregate_route_rule
!= NULL
) {
9632 for (index
= 0; index
< MAX_AGGREGATE_ROUTE_RULES
; index
++) {
9633 u_int32_t sub_route_rule_id
= aggregate_route_rule
->rule_ids
[index
];
9634 if (sub_route_rule_id
== 0) {
9637 if (!necp_route_is_allowed_inner(route
, interface
, sub_route_rule_id
, interface_type_denied
)) {
9643 return necp_route_is_allowed_inner(route
, interface
, route_rule_id
, interface_type_denied
);
9650 necp_packet_is_allowed_over_interface(struct mbuf
*packet
, struct ifnet
*interface
)
9652 bool is_allowed
= TRUE
;
9653 u_int32_t route_rule_id
= necp_get_route_rule_id_from_packet(packet
);
9654 if (route_rule_id
!= 0 &&
9655 interface
!= NULL
) {
9656 lck_rw_lock_shared(&necp_kernel_policy_lock
);
9657 is_allowed
= necp_route_is_allowed(NULL
, interface
, necp_get_route_rule_id_from_packet(packet
), NULL
);
9658 lck_rw_done(&necp_kernel_policy_lock
);
9664 necp_netagents_allow_traffic(u_int32_t
*netagent_ids
, size_t netagent_id_count
)
9666 size_t netagent_cursor
;
9667 for (netagent_cursor
= 0; netagent_cursor
< netagent_id_count
; netagent_cursor
++) {
9668 struct necp_uuid_id_mapping
*mapping
= NULL
;
9669 u_int32_t netagent_id
= netagent_ids
[netagent_cursor
];
9670 if (netagent_id
== 0) {
9673 mapping
= necp_uuid_lookup_uuid_with_service_id_locked(netagent_id
);
9674 if (mapping
!= NULL
) {
9675 u_int32_t agent_flags
= 0;
9676 agent_flags
= netagent_get_flags(mapping
->uuid
);
9677 if (agent_flags
& NETAGENT_FLAG_REGISTERED
) {
9678 if (agent_flags
& NETAGENT_FLAG_ACTIVE
) {
9680 } else if ((agent_flags
& NETAGENT_FLAG_VOLUNTARY
) == 0) {
9690 necp_socket_is_allowed_to_send_recv_internal(struct inpcb
*inp
, struct sockaddr
*override_local_addr
, struct sockaddr
*override_remote_addr
, ifnet_t interface
, necp_kernel_policy_id
*return_policy_id
, u_int32_t
*return_route_rule_id
, necp_kernel_policy_id
*return_skip_policy_id
)
9692 u_int32_t verifyifindex
= interface
? interface
->if_index
: 0;
9693 bool allowed_to_receive
= TRUE
;
9694 struct necp_socket_info info
;
9695 u_int32_t flowhash
= 0;
9696 necp_kernel_policy_result service_action
= 0;
9697 necp_kernel_policy_service service
= { 0, 0 };
9698 u_int32_t route_rule_id
= 0;
9699 struct rtentry
*route
= NULL
;
9700 u_int32_t interface_type_denied
= IFRTYPE_FUNCTIONAL_UNKNOWN
;
9701 necp_kernel_policy_result drop_dest_policy_result
= NECP_KERNEL_POLICY_RESULT_NONE
;
9702 necp_drop_all_bypass_check_result_t drop_all_bypass
= NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
;
9703 u_int32_t netagent_ids
[NECP_MAX_NETAGENTS
];
9704 memset(&netagent_ids
, 0, sizeof(netagent_ids
));
9706 if (return_policy_id
) {
9707 *return_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
9709 if (return_skip_policy_id
) {
9710 *return_skip_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
9712 if (return_route_rule_id
) {
9713 *return_route_rule_id
= 0;
9720 route
= inp
->inp_route
.ro_rt
;
9722 struct socket
*so
= inp
->inp_socket
;
9724 u_int32_t drop_order
= necp_process_drop_order(so
->so_cred
);
9726 // Don't lock. Possible race condition, but we don't want the performance hit.
9727 if (necp_kernel_socket_policies_count
== 0 ||
9728 (!(inp
->inp_flags2
& INP2_WANT_APP_POLICY
) && necp_kernel_socket_policies_non_app_count
== 0)) {
9729 if (necp_drop_all_order
> 0 || drop_order
> 0) {
9730 if (necp_socket_bypass(override_local_addr
, override_remote_addr
, inp
)) {
9731 allowed_to_receive
= TRUE
;
9733 allowed_to_receive
= FALSE
;
9739 // If this socket is connected, or we are not taking addresses into account, try to reuse last result
9740 if ((necp_socket_is_connected(inp
) || (override_local_addr
== NULL
&& override_remote_addr
== NULL
)) && inp
->inp_policyresult
.policy_id
!= NECP_KERNEL_POLICY_ID_NONE
) {
9741 bool policies_have_changed
= FALSE
;
9742 bool route_allowed
= TRUE
;
9744 if (inp
->inp_policyresult
.policy_gencount
!= necp_kernel_socket_policies_gencount
) {
9745 policies_have_changed
= TRUE
;
9747 if (inp
->inp_policyresult
.results
.route_rule_id
!= 0) {
9748 lck_rw_lock_shared(&necp_kernel_policy_lock
);
9749 if (!necp_route_is_allowed(route
, interface
, inp
->inp_policyresult
.results
.route_rule_id
, &interface_type_denied
)) {
9750 route_allowed
= FALSE
;
9752 lck_rw_done(&necp_kernel_policy_lock
);
9756 if (!policies_have_changed
) {
9757 if (!route_allowed
||
9758 inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_DROP
||
9759 inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
||
9760 (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&& interface
&&
9761 inp
->inp_policyresult
.results
.result_parameter
.tunnel_interface_index
!= verifyifindex
)) {
9762 allowed_to_receive
= FALSE
;
9764 if (return_policy_id
) {
9765 *return_policy_id
= inp
->inp_policyresult
.policy_id
;
9767 if (return_skip_policy_id
) {
9768 *return_skip_policy_id
= inp
->inp_policyresult
.skip_policy_id
;
9770 if (return_route_rule_id
) {
9771 *return_route_rule_id
= inp
->inp_policyresult
.results
.route_rule_id
;
9778 // Check for loopback exception
9779 if (necp_socket_bypass(override_local_addr
, override_remote_addr
, inp
)) {
9780 allowed_to_receive
= TRUE
;
9784 // Actually calculate policy result
9785 lck_rw_lock_shared(&necp_kernel_policy_lock
);
9786 necp_socket_fillout_info_locked(inp
, override_local_addr
, override_remote_addr
, 0, drop_order
, &info
);
9788 flowhash
= necp_socket_calc_flowhash_locked(&info
);
9789 if (inp
->inp_policyresult
.policy_id
!= NECP_KERNEL_POLICY_ID_NONE
&&
9790 inp
->inp_policyresult
.policy_gencount
== necp_kernel_socket_policies_gencount
&&
9791 inp
->inp_policyresult
.flowhash
== flowhash
) {
9792 if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_DROP
||
9793 inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
||
9794 (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&& interface
&&
9795 inp
->inp_policyresult
.results
.result_parameter
.tunnel_interface_index
!= verifyifindex
) ||
9796 (inp
->inp_policyresult
.results
.route_rule_id
!= 0 &&
9797 !necp_route_is_allowed(route
, interface
, inp
->inp_policyresult
.results
.route_rule_id
, &interface_type_denied
))) {
9798 allowed_to_receive
= FALSE
;
9800 if (return_policy_id
) {
9801 *return_policy_id
= inp
->inp_policyresult
.policy_id
;
9803 if (return_route_rule_id
) {
9804 *return_route_rule_id
= inp
->inp_policyresult
.results
.route_rule_id
;
9806 if (return_skip_policy_id
) {
9807 *return_skip_policy_id
= inp
->inp_policyresult
.skip_policy_id
;
9810 lck_rw_done(&necp_kernel_policy_lock
);
9814 u_int32_t route_rule_id_array
[MAX_AGGREGATE_ROUTE_RULES
];
9815 size_t route_rule_id_array_count
= 0;
9816 struct necp_kernel_socket_policy
*matched_policy
= necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_map
[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(info
.application_id
)], &info
, NULL
, route_rule_id_array
, &route_rule_id_array_count
, MAX_AGGREGATE_ROUTE_RULES
, &service_action
, &service
, netagent_ids
, NULL
, NECP_MAX_NETAGENTS
, NULL
, 0, current_proc(), return_skip_policy_id
, inp
->inp_route
.ro_rt
, &drop_dest_policy_result
, &drop_all_bypass
);
9818 if (route_rule_id_array_count
== 1) {
9819 route_rule_id
= route_rule_id_array
[0];
9820 } else if (route_rule_id_array_count
> 1) {
9821 route_rule_id
= necp_create_aggregate_route_rule(route_rule_id_array
);
9824 if (matched_policy
!= NULL
) {
9825 if (matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_DROP
||
9826 matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
||
9827 (matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&& interface
&&
9828 matched_policy
->result_parameter
.tunnel_interface_index
!= verifyifindex
) ||
9829 ((service_action
== NECP_KERNEL_POLICY_RESULT_TRIGGER_SCOPED
||
9830 service_action
== NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED
) &&
9831 service
.identifier
!= 0 && service
.identifier
!= NECP_NULL_SERVICE_ID
) ||
9832 (route_rule_id
!= 0 &&
9833 !necp_route_is_allowed(route
, interface
, route_rule_id
, &interface_type_denied
)) ||
9834 !necp_netagents_allow_traffic(netagent_ids
, NECP_MAX_NETAGENTS
)) {
9835 allowed_to_receive
= FALSE
;
9837 if (return_policy_id
) {
9838 *return_policy_id
= matched_policy
->id
;
9840 if (return_route_rule_id
) {
9841 *return_route_rule_id
= route_rule_id
;
9844 lck_rw_done(&necp_kernel_policy_lock
);
9846 if (necp_debug
> 1 && matched_policy
->id
!= inp
->inp_policyresult
.policy_id
) {
9847 NECPLOG(LOG_DEBUG
, "Socket Send/Recv Policy: Policy %d Allowed %d", return_policy_id
? *return_policy_id
: 0, allowed_to_receive
);
9851 bool drop_all
= false;
9852 if (necp_drop_all_order
> 0 || info
.drop_order
> 0 || drop_dest_policy_result
== NECP_KERNEL_POLICY_RESULT_DROP
) {
9854 if (drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
) {
9855 drop_all_bypass
= necp_check_drop_all_bypass_result(NULL
);
9858 if (drop_all
&& drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_FALSE
) {
9859 allowed_to_receive
= FALSE
;
9861 if (return_policy_id
) {
9862 *return_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
9864 if (return_route_rule_id
) {
9865 *return_route_rule_id
= route_rule_id
;
9870 lck_rw_done(&necp_kernel_policy_lock
);
9873 if (!allowed_to_receive
&& interface_type_denied
!= IFRTYPE_FUNCTIONAL_UNKNOWN
) {
9874 soevent(inp
->inp_socket
, (SO_FILT_HINT_LOCKED
| SO_FILT_HINT_IFDENIED
));
9877 return allowed_to_receive
;
9881 necp_socket_is_allowed_to_send_recv_v4(struct inpcb
*inp
, u_int16_t local_port
, u_int16_t remote_port
, struct in_addr
*local_addr
, struct in_addr
*remote_addr
, ifnet_t interface
, necp_kernel_policy_id
*return_policy_id
, u_int32_t
*return_route_rule_id
, necp_kernel_policy_id
*return_skip_policy_id
)
9883 struct sockaddr_in local
= {};
9884 struct sockaddr_in remote
= {};
9885 local
.sin_family
= remote
.sin_family
= AF_INET
;
9886 local
.sin_len
= remote
.sin_len
= sizeof(struct sockaddr_in
);
9887 local
.sin_port
= local_port
;
9888 remote
.sin_port
= remote_port
;
9889 memcpy(&local
.sin_addr
, local_addr
, sizeof(local
.sin_addr
));
9890 memcpy(&remote
.sin_addr
, remote_addr
, sizeof(remote
.sin_addr
));
9892 return necp_socket_is_allowed_to_send_recv_internal(inp
, (struct sockaddr
*)&local
, (struct sockaddr
*)&remote
, interface
,
9893 return_policy_id
, return_route_rule_id
, return_skip_policy_id
);
9897 necp_socket_is_allowed_to_send_recv_v6(struct inpcb
*inp
, u_int16_t local_port
, u_int16_t remote_port
, struct in6_addr
*local_addr
, struct in6_addr
*remote_addr
, ifnet_t interface
, necp_kernel_policy_id
*return_policy_id
, u_int32_t
*return_route_rule_id
, necp_kernel_policy_id
*return_skip_policy_id
)
9899 struct sockaddr_in6 local
= {};
9900 struct sockaddr_in6 remote
= {};
9901 local
.sin6_family
= remote
.sin6_family
= AF_INET6
;
9902 local
.sin6_len
= remote
.sin6_len
= sizeof(struct sockaddr_in6
);
9903 local
.sin6_port
= local_port
;
9904 remote
.sin6_port
= remote_port
;
9905 memcpy(&local
.sin6_addr
, local_addr
, sizeof(local
.sin6_addr
));
9906 memcpy(&remote
.sin6_addr
, remote_addr
, sizeof(remote
.sin6_addr
));
9908 return necp_socket_is_allowed_to_send_recv_internal(inp
, (struct sockaddr
*)&local
, (struct sockaddr
*)&remote
, interface
,
9909 return_policy_id
, return_route_rule_id
, return_skip_policy_id
);
9913 necp_socket_is_allowed_to_send_recv(struct inpcb
*inp
, ifnet_t interface
, necp_kernel_policy_id
*return_policy_id
,
9914 u_int32_t
*return_route_rule_id
,
9915 necp_kernel_policy_id
*return_skip_policy_id
)
9917 return necp_socket_is_allowed_to_send_recv_internal(inp
, NULL
, NULL
, interface
,
9918 return_policy_id
, return_route_rule_id
,
9919 return_skip_policy_id
);
9923 necp_mark_packet_from_socket(struct mbuf
*packet
, struct inpcb
*inp
, necp_kernel_policy_id policy_id
, u_int32_t route_rule_id
,
9924 necp_kernel_policy_id skip_policy_id
)
9926 if (packet
== NULL
|| inp
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
9930 // Mark ID for Pass and IP Tunnel
9931 if (policy_id
!= NECP_KERNEL_POLICY_ID_NONE
) {
9932 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= policy_id
;
9933 } else if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_PASS
||
9934 inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
) {
9935 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= inp
->inp_policyresult
.policy_id
;
9937 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
9939 packet
->m_pkthdr
.necp_mtag
.necp_last_interface_index
= 0;
9940 if (route_rule_id
!= 0) {
9941 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
= route_rule_id
;
9943 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
= inp
->inp_policyresult
.results
.route_rule_id
;
9945 packet
->m_pkthdr
.necp_mtag
.necp_app_id
= inp
->inp_policyresult
.app_id
;
9947 if (skip_policy_id
!= NECP_KERNEL_POLICY_ID_NONE
&&
9948 skip_policy_id
!= NECP_KERNEL_POLICY_ID_NO_MATCH
) {
9949 // Only mark the skip policy if it is a valid policy ID
9950 packet
->m_pkthdr
.necp_mtag
.necp_skip_policy_id
= skip_policy_id
;
9951 } else if (inp
->inp_policyresult
.results
.filter_control_unit
== NECP_FILTER_UNIT_NO_FILTER
) {
9952 // Overload the meaning of "NECP_KERNEL_POLICY_ID_NO_MATCH"
9953 // to indicate that NECP_FILTER_UNIT_NO_FILTER was set
9954 // See necp_get_skip_policy_id_from_packet() and
9955 // necp_packet_should_skip_filters().
9956 packet
->m_pkthdr
.necp_mtag
.necp_skip_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
9958 packet
->m_pkthdr
.necp_mtag
.necp_skip_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
9965 necp_mark_packet_from_ip(struct mbuf
*packet
, necp_kernel_policy_id policy_id
)
9967 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
9971 // Mark ID for Pass and IP Tunnel
9972 if (policy_id
!= NECP_KERNEL_POLICY_ID_NONE
) {
9973 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= policy_id
;
9975 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
9982 necp_mark_packet_from_interface(struct mbuf
*packet
, ifnet_t interface
)
9984 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
9988 // Mark ID for Pass and IP Tunnel
9989 if (interface
!= NULL
) {
9990 packet
->m_pkthdr
.necp_mtag
.necp_last_interface_index
= interface
->if_index
;
9997 necp_mark_packet_as_keepalive(struct mbuf
*packet
, bool is_keepalive
)
9999 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
10003 if (is_keepalive
) {
10004 packet
->m_pkthdr
.pkt_flags
|= PKTF_KEEPALIVE
;
10006 packet
->m_pkthdr
.pkt_flags
&= ~PKTF_KEEPALIVE
;
10012 necp_kernel_policy_id
10013 necp_get_policy_id_from_packet(struct mbuf
*packet
)
10015 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
10016 return NECP_KERNEL_POLICY_ID_NONE
;
10019 return packet
->m_pkthdr
.necp_mtag
.necp_policy_id
;
10022 necp_kernel_policy_id
10023 necp_get_skip_policy_id_from_packet(struct mbuf
*packet
)
10025 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
10026 return NECP_KERNEL_POLICY_ID_NONE
;
10029 // Check for overloaded value. See necp_mark_packet_from_socket().
10030 if (packet
->m_pkthdr
.necp_mtag
.necp_skip_policy_id
== NECP_KERNEL_POLICY_ID_NO_MATCH
) {
10031 return NECP_KERNEL_POLICY_ID_NONE
;
10034 return packet
->m_pkthdr
.necp_mtag
.necp_skip_policy_id
;
10038 necp_packet_should_skip_filters(struct mbuf
*packet
)
10040 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
10044 // Check for overloaded value. See necp_mark_packet_from_socket().
10045 return packet
->m_pkthdr
.necp_mtag
.necp_skip_policy_id
== NECP_KERNEL_POLICY_ID_NO_MATCH
;
10049 necp_get_last_interface_index_from_packet(struct mbuf
*packet
)
10051 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
10055 return packet
->m_pkthdr
.necp_mtag
.necp_last_interface_index
;
10059 necp_get_route_rule_id_from_packet(struct mbuf
*packet
)
10061 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
10065 return packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
;
10069 necp_get_app_uuid_from_packet(struct mbuf
*packet
,
10072 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
10076 bool found_mapping
= FALSE
;
10077 if (packet
->m_pkthdr
.necp_mtag
.necp_app_id
!= 0) {
10078 lck_rw_lock_shared(&necp_kernel_policy_lock
);
10079 struct necp_uuid_id_mapping
*entry
= necp_uuid_lookup_uuid_with_app_id_locked(packet
->m_pkthdr
.necp_mtag
.necp_app_id
);
10080 if (entry
!= NULL
) {
10081 uuid_copy(app_uuid
, entry
->uuid
);
10082 found_mapping
= true;
10084 lck_rw_done(&necp_kernel_policy_lock
);
10086 if (!found_mapping
) {
10087 uuid_clear(app_uuid
);
10093 necp_get_is_keepalive_from_packet(struct mbuf
*packet
)
10095 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
10099 return packet
->m_pkthdr
.pkt_flags
& PKTF_KEEPALIVE
;
10103 necp_socket_get_content_filter_control_unit(struct socket
*so
)
10105 struct inpcb
*inp
= sotoinpcb(so
);
10110 return inp
->inp_policyresult
.results
.filter_control_unit
;
10114 necp_socket_should_use_flow_divert(struct inpcb
*inp
)
10120 return inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
;
10124 necp_socket_get_flow_divert_control_unit(struct inpcb
*inp
)
10130 if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
) {
10131 return inp
->inp_policyresult
.results
.result_parameter
.flow_divert_control_unit
;
10138 necp_socket_should_rescope(struct inpcb
*inp
)
10144 return inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED
||
10145 inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SCOPED_DIRECT
;
10149 necp_socket_get_rescope_if_index(struct inpcb
*inp
)
10155 if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED
) {
10156 return inp
->inp_policyresult
.results
.result_parameter
.scoped_interface_index
;
10157 } else if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SCOPED_DIRECT
) {
10158 return necp_get_primary_direct_interface_index();
10165 necp_socket_get_effective_mtu(struct inpcb
*inp
, u_int32_t current_mtu
)
10168 return current_mtu
;
10171 if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&&
10172 (inp
->inp_flags
& INP_BOUND_IF
) &&
10173 inp
->inp_boundifp
) {
10174 u_int bound_interface_index
= inp
->inp_boundifp
->if_index
;
10175 u_int tunnel_interface_index
= inp
->inp_policyresult
.results
.result_parameter
.tunnel_interface_index
;
10177 // The result is IP Tunnel, and is rescoping from one interface to another. Recalculate MTU.
10178 if (bound_interface_index
!= tunnel_interface_index
) {
10179 ifnet_t tunnel_interface
= NULL
;
10181 ifnet_head_lock_shared();
10182 tunnel_interface
= ifindex2ifnet
[tunnel_interface_index
];
10185 if (tunnel_interface
!= NULL
) {
10186 u_int32_t direct_tunnel_mtu
= tunnel_interface
->if_mtu
;
10187 u_int32_t delegate_tunnel_mtu
= (tunnel_interface
->if_delegated
.ifp
!= NULL
) ? tunnel_interface
->if_delegated
.ifp
->if_mtu
: 0;
10188 if (delegate_tunnel_mtu
!= 0 &&
10189 strncmp(tunnel_interface
->if_name
, "ipsec", strlen("ipsec")) == 0) {
10190 // For ipsec interfaces, calculate the overhead from the delegate interface
10191 u_int32_t tunnel_overhead
= (u_int32_t
)(esp_hdrsiz(NULL
) + sizeof(struct ip6_hdr
));
10192 if (delegate_tunnel_mtu
> tunnel_overhead
) {
10193 delegate_tunnel_mtu
-= tunnel_overhead
;
10196 if (delegate_tunnel_mtu
< direct_tunnel_mtu
) {
10197 // If the (delegate - overhead) < direct, return (delegate - overhead)
10198 return delegate_tunnel_mtu
;
10200 // Otherwise return direct
10201 return direct_tunnel_mtu
;
10204 // For non-ipsec interfaces, just return the tunnel MTU
10205 return direct_tunnel_mtu
;
10211 // By default, just return the MTU passed in
10212 return current_mtu
;
10216 necp_get_ifnet_from_result_parameter(necp_kernel_policy_result_parameter
*result_parameter
)
10218 if (result_parameter
== NULL
) {
10222 return ifindex2ifnet
[result_parameter
->tunnel_interface_index
];
10226 necp_packet_can_rebind_to_ifnet(struct mbuf
*packet
, struct ifnet
*interface
, struct route
*new_route
, int family
)
10228 bool found_match
= FALSE
;
10229 errno_t result
= 0;
10230 ifaddr_t
*addresses
= NULL
;
10231 union necp_sockaddr_union address_storage
;
10234 if (packet
== NULL
|| interface
== NULL
|| new_route
== NULL
|| (family
!= AF_INET
&& family
!= AF_INET6
)) {
10238 result
= ifnet_get_address_list_family(interface
, &addresses
, family
);
10240 NECPLOG(LOG_ERR
, "Failed to get address list for %s%d", ifnet_name(interface
), ifnet_unit(interface
));
10244 for (i
= 0; addresses
[i
] != NULL
; i
++) {
10245 ROUTE_RELEASE(new_route
);
10246 if (ifaddr_address(addresses
[i
], &address_storage
.sa
, sizeof(address_storage
)) == 0) {
10247 if (family
== AF_INET
) {
10248 struct ip
*ip
= mtod(packet
, struct ip
*);
10249 if (memcmp(&address_storage
.sin
.sin_addr
, &ip
->ip_src
, sizeof(ip
->ip_src
)) == 0) {
10250 struct sockaddr_in
*dst4
= (struct sockaddr_in
*)(void *)&new_route
->ro_dst
;
10251 dst4
->sin_family
= AF_INET
;
10252 dst4
->sin_len
= sizeof(struct sockaddr_in
);
10253 dst4
->sin_addr
= ip
->ip_dst
;
10254 rtalloc_scoped(new_route
, interface
->if_index
);
10255 if (!ROUTE_UNUSABLE(new_route
)) {
10256 found_match
= TRUE
;
10260 } else if (family
== AF_INET6
) {
10261 struct ip6_hdr
*ip6
= mtod(packet
, struct ip6_hdr
*);
10262 if (memcmp(&address_storage
.sin6
.sin6_addr
, &ip6
->ip6_src
, sizeof(ip6
->ip6_src
)) == 0) {
10263 struct sockaddr_in6
*dst6
= (struct sockaddr_in6
*)(void *)&new_route
->ro_dst
;
10264 dst6
->sin6_family
= AF_INET6
;
10265 dst6
->sin6_len
= sizeof(struct sockaddr_in6
);
10266 dst6
->sin6_addr
= ip6
->ip6_dst
;
10267 rtalloc_scoped(new_route
, interface
->if_index
);
10268 if (!ROUTE_UNUSABLE(new_route
)) {
10269 found_match
= TRUE
;
10278 ifnet_free_address_list(addresses
);
10280 return found_match
;
10284 necp_addr_is_loopback(struct sockaddr
*address
)
10286 if (address
== NULL
) {
10290 if (address
->sa_family
== AF_INET
) {
10291 return ntohl(((struct sockaddr_in
*)(void *)address
)->sin_addr
.s_addr
) == INADDR_LOOPBACK
;
10292 } else if (address
->sa_family
== AF_INET6
) {
10293 return IN6_IS_ADDR_LOOPBACK(&((struct sockaddr_in6
*)(void *)address
)->sin6_addr
);
10300 necp_is_loopback(struct sockaddr
*local_addr
, struct sockaddr
*remote_addr
, struct inpcb
*inp
, struct mbuf
*packet
)
10302 // Note: This function only checks for the loopback addresses.
10303 // In the future, we may want to expand to also allow any traffic
10304 // going through the loopback interface, but until then, this
10305 // check is cheaper.
10307 if (local_addr
!= NULL
&& necp_addr_is_loopback(local_addr
)) {
10311 if (remote_addr
!= NULL
&& necp_addr_is_loopback(remote_addr
)) {
10316 if ((inp
->inp_flags
& INP_BOUND_IF
) && inp
->inp_boundifp
&& (inp
->inp_boundifp
->if_flags
& IFF_LOOPBACK
)) {
10319 if (inp
->inp_vflag
& INP_IPV4
) {
10320 if (ntohl(inp
->inp_laddr
.s_addr
) == INADDR_LOOPBACK
||
10321 ntohl(inp
->inp_faddr
.s_addr
) == INADDR_LOOPBACK
) {
10324 } else if (inp
->inp_vflag
& INP_IPV6
) {
10325 if (IN6_IS_ADDR_LOOPBACK(&inp
->in6p_laddr
) ||
10326 IN6_IS_ADDR_LOOPBACK(&inp
->in6p_faddr
)) {
10332 if (packet
!= NULL
) {
10333 struct ip
*ip
= mtod(packet
, struct ip
*);
10334 if (ip
->ip_v
== 4) {
10335 if (ntohl(ip
->ip_src
.s_addr
) == INADDR_LOOPBACK
) {
10338 if (ntohl(ip
->ip_dst
.s_addr
) == INADDR_LOOPBACK
) {
10341 } else if (ip
->ip_v
== 6) {
10342 struct ip6_hdr
*ip6
= mtod(packet
, struct ip6_hdr
*);
10343 if (IN6_IS_ADDR_LOOPBACK(&ip6
->ip6_src
)) {
10346 if (IN6_IS_ADDR_LOOPBACK(&ip6
->ip6_dst
)) {
10356 necp_is_intcoproc(struct inpcb
*inp
, struct mbuf
*packet
)
10359 if (!(inp
->inp_vflag
& INP_IPV6
)) {
10362 if (INP_INTCOPROC_ALLOWED(inp
)) {
10365 if ((inp
->inp_flags
& INP_BOUND_IF
) &&
10366 IFNET_IS_INTCOPROC(inp
->inp_boundifp
)) {
10371 if (packet
!= NULL
) {
10372 struct ip6_hdr
*ip6
= mtod(packet
, struct ip6_hdr
*);
10373 if ((ip6
->ip6_vfc
& IPV6_VERSION_MASK
) == IPV6_VERSION
&&
10374 IN6_IS_ADDR_LINKLOCAL(&ip6
->ip6_dst
) &&
10375 ip6
->ip6_dst
.s6_addr32
[2] == ntohl(0xaede48ff) &&
10376 ip6
->ip6_dst
.s6_addr32
[3] == ntohl(0xfe334455)) {
10385 necp_address_matches_drop_dest_policy(union necp_sockaddr_union
*sau
, u_int32_t session_order
)
10387 char dest_str
[MAX_IPv6_STR_LEN
];
10389 if (necp_drop_dest_debug
> 0) {
10390 if (sau
->sa
.sa_family
== AF_INET
) {
10391 (void) inet_ntop(AF_INET
, &sau
->sin
.sin_addr
, dest_str
, sizeof(dest_str
));
10392 } else if (sau
->sa
.sa_family
== AF_INET6
) {
10393 (void) inet_ntop(AF_INET6
, &sau
->sin6
.sin6_addr
, dest_str
, sizeof(dest_str
));
10398 for (u_int32_t i
= 0; i
< necp_drop_dest_policy
.entry_count
; i
++) {
10399 struct necp_drop_dest_entry
*necp_drop_dest_entry
= &necp_drop_dest_policy
.entries
[i
];
10400 struct necp_policy_condition_addr
*npca
= &necp_drop_dest_entry
->cond_addr
;
10402 if (session_order
>= necp_drop_dest_entry
->order
&& necp_is_addr_in_subnet(&sau
->sa
, &npca
->address
.sa
, npca
->prefix
)) {
10403 if (necp_drop_dest_debug
> 0) {
10404 char subnet_str
[MAX_IPv6_STR_LEN
];
10405 struct proc
*p
= current_proc();
10406 pid_t pid
= proc_pid(p
);
10408 if (sau
->sa
.sa_family
== AF_INET
) {
10409 (void) inet_ntop(AF_INET
, &npca
->address
.sin
, subnet_str
, sizeof(subnet_str
));
10410 os_log(OS_LOG_DEFAULT
, "%s (process %s:%u) %s matches %s/%u", __func__
, proc_best_name(p
), pid
, dest_str
, subnet_str
, npca
->prefix
);
10411 } else if (sau
->sa
.sa_family
== AF_INET6
) {
10412 (void) inet_ntop(AF_INET6
, &npca
->address
.sin6
, subnet_str
, sizeof(subnet_str
));
10413 os_log(OS_LOG_DEFAULT
, "%s (process %s:%u) %s matches %s/%u", __func__
, proc_best_name(p
), pid
, dest_str
, subnet_str
, npca
->prefix
);
10419 if (necp_drop_dest_debug
> 1) {
10420 struct proc
*p
= current_proc();
10421 pid_t pid
= proc_pid(p
);
10423 os_log(OS_LOG_DEFAULT
, "%s (process %s:%u) %s no match", __func__
, proc_best_name(p
), pid
, dest_str
);
10429 sysctl_handle_necp_drop_dest_level SYSCTL_HANDLER_ARGS
10431 #pragma unused(arg1, arg2, oidp)
10434 struct necp_drop_dest_policy tmp_drop_dest_policy
;
10435 struct proc
*p
= current_proc();
10436 pid_t pid
= proc_pid(p
);
10438 if (req
->newptr
!= USER_ADDR_NULL
&& proc_suser(current_proc()) != 0 &&
10439 priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES
, 0) != 0) {
10440 NECPLOG(LOG_ERR
, "%s (process %s:%u) not permitted", __func__
, proc_best_name(p
), pid
);
10443 if (req
->newptr
!= USER_ADDR_NULL
&& req
->newlen
!= sizeof(struct necp_drop_dest_policy
)) {
10444 NECPLOG(LOG_ERR
, "%s (process %s:%u) bad newlen %lu", __func__
, proc_best_name(p
), pid
, req
->newlen
);
10448 memcpy(&tmp_drop_dest_policy
, &necp_drop_dest_policy
, sizeof(struct necp_drop_dest_policy
));
10449 error
= sysctl_io_opaque(req
, &tmp_drop_dest_policy
, sizeof(struct necp_drop_dest_policy
), &changed
);
10451 NECPLOG(LOG_ERR
, "%s (process %s:%u) sysctl_io_opaque() error %d", __func__
, proc_best_name(p
), pid
, error
);
10454 if (changed
== 0 || req
->newptr
== USER_ADDR_NULL
) {
10459 // Validate the passed parameters
10461 if (tmp_drop_dest_policy
.entry_count
>= MAX_NECP_DROP_DEST_LEVEL_ADDRS
) {
10462 NECPLOG(LOG_ERR
, "%s (process %s:%u) bad entry_count %u", __func__
, proc_best_name(p
), pid
, tmp_drop_dest_policy
.entry_count
);
10465 for (u_int32_t i
= 0; i
< tmp_drop_dest_policy
.entry_count
; i
++) {
10466 struct necp_drop_dest_entry
*tmp_drop_dest_entry
= &tmp_drop_dest_policy
.entries
[i
];
10467 struct necp_policy_condition_addr
*npca
= &tmp_drop_dest_entry
->cond_addr
;
10469 switch (tmp_drop_dest_entry
->level
) {
10470 case NECP_SESSION_PRIORITY_UNKNOWN
:
10471 if (tmp_drop_dest_policy
.entry_count
!= 0) {
10472 NECPLOG(LOG_ERR
, "%s (process %s:%u) NECP_SESSION_PRIORITY_UNKNOWN bad entry_count %u", __func__
, proc_best_name(p
), pid
, tmp_drop_dest_policy
.entry_count
);
10476 case NECP_SESSION_PRIORITY_CONTROL
:
10477 case NECP_SESSION_PRIORITY_PRIVILEGED_TUNNEL
:
10478 case NECP_SESSION_PRIORITY_HIGH
:
10479 case NECP_SESSION_PRIORITY_DEFAULT
:
10480 case NECP_SESSION_PRIORITY_LOW
:
10481 if (tmp_drop_dest_policy
.entry_count
== 0) {
10482 NECPLOG(LOG_ERR
, "%s (process %s:%u) priority %u entry_count 0", __func__
, proc_best_name(p
), pid
, tmp_drop_dest_entry
->level
);
10487 NECPLOG(LOG_ERR
, "%s (process %s:%u) bad level %u", __func__
, proc_best_name(p
), pid
, tmp_drop_dest_entry
->level
);
10492 switch (npca
->address
.sa
.sa_family
) {
10494 if (npca
->prefix
> 32) {
10495 NECPLOG(LOG_ERR
, "%s (process %s:%u) AF_INET bad prefix %u", __func__
, proc_best_name(p
), pid
, npca
->prefix
);
10498 if (npca
->address
.sin
.sin_len
!= sizeof(struct sockaddr_in
)) {
10499 NECPLOG(LOG_ERR
, "%s (process %s:%u) AF_INET bad sin_len %u", __func__
, proc_best_name(p
), pid
, npca
->address
.sin
.sin_len
);
10502 if (npca
->address
.sin
.sin_port
!= 0) {
10503 NECPLOG(LOG_ERR
, "%s (process %s:%u) AF_INET bad sin_port %u, not zero", __func__
, proc_best_name(p
), pid
, npca
->address
.sin
.sin_port
);
10509 if (npca
->prefix
> 128) {
10510 NECPLOG(LOG_ERR
, "%s (process %s:%u) AF_INET6 bad prefix %u", __func__
, proc_best_name(p
), pid
, npca
->prefix
);
10513 if (npca
->address
.sin6
.sin6_len
!= sizeof(struct sockaddr_in6
)) {
10514 NECPLOG(LOG_ERR
, "%s (process %s:%u) AF_INET6 bad sin6_len %u", __func__
, proc_best_name(p
), pid
, npca
->address
.sin6
.sin6_len
);
10517 if (npca
->address
.sin6
.sin6_port
!= 0) {
10518 NECPLOG(LOG_ERR
, "%s (process %s:%u) AF_INET6 bad sin6_port %u, not zero", __func__
, proc_best_name(p
), pid
, npca
->address
.sin6
.sin6_port
);
10521 if (npca
->address
.sin6
.sin6_flowinfo
!= 0) {
10522 NECPLOG(LOG_ERR
, "%s (process %s:%u) AF_INET6 bad sin6_flowinfo %u, not zero", __func__
, proc_best_name(p
), pid
, npca
->address
.sin6
.sin6_flowinfo
);
10525 if (npca
->address
.sin6
.sin6_scope_id
!= 0) {
10526 NECPLOG(LOG_ERR
, "%s (process %s:%u) AF_INET6 bad sin6_scope_id %u, not zero", __func__
, proc_best_name(p
), pid
, npca
->address
.sin6
.sin6_scope_id
);
10538 // Commit the changed policy
10540 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
10541 memset(&necp_drop_dest_policy
, 0, sizeof(struct necp_drop_dest_policy
));
10543 necp_drop_dest_policy
.entry_count
= tmp_drop_dest_policy
.entry_count
;
10544 for (u_int32_t i
= 0; i
< tmp_drop_dest_policy
.entry_count
; i
++) {
10545 struct necp_drop_dest_entry
*tmp_drop_dest_entry
= &tmp_drop_dest_policy
.entries
[i
];
10546 struct necp_drop_dest_entry
*necp_drop_dest_entry
= &necp_drop_dest_policy
.entries
[i
];
10548 memcpy(necp_drop_dest_entry
, tmp_drop_dest_entry
, sizeof(struct necp_drop_dest_entry
));
10550 necp_drop_dest_entry
->order
= necp_get_first_order_for_priority(necp_drop_dest_entry
->level
);
10552 lck_rw_done(&necp_kernel_policy_lock
);