2 * Copyright (c) 2013-2017 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <sys/systm.h>
31 #include <sys/types.h>
32 #include <sys/queue.h>
33 #include <sys/malloc.h>
34 #include <libkern/OSMalloc.h>
35 #include <sys/kernel.h>
36 #include <sys/kern_control.h>
38 #include <sys/kpi_mbuf.h>
39 #include <sys/proc_uuid_policy.h>
41 #include <sys/domain.h>
42 #include <sys/protosw.h>
43 #include <sys/socket.h>
44 #include <sys/socketvar.h>
45 #include <netinet/ip.h>
46 #include <netinet/ip6.h>
47 #include <netinet/tcp.h>
48 #include <netinet/tcp_var.h>
49 #include <netinet/tcp_cache.h>
50 #include <netinet/udp.h>
51 #include <netinet/in_pcb.h>
52 #include <netinet/in_tclass.h>
53 #include <netinet6/esp.h>
54 #include <net/flowhash.h>
55 #include <net/if_var.h>
56 #include <sys/kauth.h>
57 #include <sys/sysctl.h>
58 #include <sys/sysproto.h>
60 #include <sys/kern_event.h>
61 #include <sys/file_internal.h>
62 #include <IOKit/IOBSD.h>
63 #include <net/network_agent.h>
67 * NECP - Network Extension Control Policy database
68 * ------------------------------------------------
69 * The goal of this module is to allow clients connecting via a
70 * kernel control socket to create high-level policy sessions, which
71 * are ingested into low-level kernel policies that control and tag
72 * traffic at the application, socket, and IP layers.
74 * ------------------------------------------------
76 * ------------------------------------------------
77 * Each session owns a list of session policies, each of which can
78 * specify any combination of conditions and a single result. Each
79 * session also has a priority level (such as High, Default, or Low)
80 * which is requested by the client. Based on the requested level,
81 * a session order value is assigned to the session, which will be used
82 * to sort kernel policies generated by the session. The session client
83 * can specify the sub-order for each policy it creates which will be
84 * used to further sort the kernel policies.
86 * Kernel Control Socket --> 1 necp_session --> list of necp_session_policy structs
88 * ------------------------------------------------
90 * ------------------------------------------------
91 * Whenever a session send the Apply command, its policies are ingested
92 * and generate kernel policies. There are two phases of kernel policy
95 * 1. The session policy is parsed to create kernel policies at the socket
96 * and IP layers, when applicable. For example, a policy that requires
97 * all traffic from App1 to Pass will generate a socket kernel policy to
98 * match App1 and mark packets with ID1, and also an IP policy to match
99 * ID1 and let the packet pass. This is handled in necp_apply_policy. The
100 * resulting kernel policies are added to the global socket and IP layer
102 * necp_session_policy --> necp_kernel_socket_policy and necp_kernel_ip_output_policy
105 * necp_kernel_socket_policies necp_kernel_ip_output_policies
107 * 2. Once the global lists of kernel policies have been filled out, each
108 * list is traversed to create optimized sub-lists ("Maps") which are used during
109 * data-path evaluation. IP policies are sent into necp_kernel_ip_output_policies_map,
110 * which hashes incoming packets based on marked socket-layer policies, and removes
111 * duplicate or overlapping policies. Socket policies are sent into two maps,
112 * necp_kernel_socket_policies_map and necp_kernel_socket_policies_app_layer_map.
113 * The app layer map is used for policy checks coming in from user space, and is one
114 * list with duplicate and overlapping policies removed. The socket map hashes based
115 * on app UUID, and removes duplicate and overlapping policies.
116 * necp_kernel_socket_policy --> necp_kernel_socket_policies_app_layer_map
117 * |-> necp_kernel_socket_policies_map
119 * necp_kernel_ip_output_policies --> necp_kernel_ip_output_policies_map
121 * ------------------------------------------------
123 * ------------------------------------------------
124 * The Drop All Level is a sysctl that controls the level at which policies are allowed
125 * to override a global drop rule. If the value is 0, no drop rule is applied. If the value
126 * is 1, all traffic is dropped. If the value is greater than 1, all kernel policies created
127 * by a session with a priority level better than (numerically less than) the
128 * Drop All Level will allow matching traffic to not be dropped. The Drop All Level is
129 * dynamically interpreted into necp_drop_all_order, which specifies the equivalent assigned
130 * session orders to be dropped.
133 u_int32_t necp_drop_all_order
= 0;
134 u_int32_t necp_drop_all_level
= 0;
136 u_int32_t necp_pass_loopback
= 1; // 0=Off, 1=On
137 u_int32_t necp_pass_keepalives
= 1; // 0=Off, 1=On
139 u_int32_t necp_debug
= 0; // 0=None, 1=Basic, 2=EveryMatch
141 u_int32_t necp_session_count
= 0;
143 #define LIST_INSERT_SORTED_ASCENDING(head, elm, field, sortfield, tmpelm) do { \
144 if (LIST_EMPTY((head)) || (LIST_FIRST(head)->sortfield >= (elm)->sortfield)) { \
145 LIST_INSERT_HEAD((head), elm, field); \
147 LIST_FOREACH(tmpelm, head, field) { \
148 if (LIST_NEXT(tmpelm, field) == NULL || LIST_NEXT(tmpelm, field)->sortfield >= (elm)->sortfield) { \
149 LIST_INSERT_AFTER(tmpelm, elm, field); \
156 #define LIST_INSERT_SORTED_TWICE_ASCENDING(head, elm, field, firstsortfield, secondsortfield, tmpelm) do { \
157 if (LIST_EMPTY((head)) || (LIST_FIRST(head)->firstsortfield > (elm)->firstsortfield) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield >= (elm)->secondsortfield))) { \
158 LIST_INSERT_HEAD((head), elm, field); \
160 LIST_FOREACH(tmpelm, head, field) { \
161 if (LIST_NEXT(tmpelm, field) == NULL || (LIST_NEXT(tmpelm, field)->firstsortfield > (elm)->firstsortfield) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield >= (elm)->secondsortfield))) { \
162 LIST_INSERT_AFTER(tmpelm, elm, field); \
169 #define LIST_INSERT_SORTED_THRICE_ASCENDING(head, elm, field, firstsortfield, secondsortfield, thirdsortfield, tmpelm) do { \
170 if (LIST_EMPTY((head)) || (LIST_FIRST(head)->firstsortfield > (elm)->firstsortfield) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield >= (elm)->secondsortfield)) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield == (elm)->secondsortfield) && (LIST_FIRST(head)->thirdsortfield >= (elm)->thirdsortfield))) { \
171 LIST_INSERT_HEAD((head), elm, field); \
173 LIST_FOREACH(tmpelm, head, field) { \
174 if (LIST_NEXT(tmpelm, field) == NULL || (LIST_NEXT(tmpelm, field)->firstsortfield > (elm)->firstsortfield) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield >= (elm)->secondsortfield)) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield == (elm)->secondsortfield) && (LIST_NEXT(tmpelm, field)->thirdsortfield >= (elm)->thirdsortfield))) { \
175 LIST_INSERT_AFTER(tmpelm, elm, field); \
182 #define IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(x) ((x) == NECP_ROUTE_RULE_DENY_INTERFACE || (x) == NECP_ROUTE_RULE_ALLOW_INTERFACE)
184 #define NECP_KERNEL_CONDITION_ALL_INTERFACES 0x00001
185 #define NECP_KERNEL_CONDITION_BOUND_INTERFACE 0x00002
186 #define NECP_KERNEL_CONDITION_PROTOCOL 0x00004
187 #define NECP_KERNEL_CONDITION_LOCAL_START 0x00008
188 #define NECP_KERNEL_CONDITION_LOCAL_END 0x00010
189 #define NECP_KERNEL_CONDITION_LOCAL_PREFIX 0x00020
190 #define NECP_KERNEL_CONDITION_REMOTE_START 0x00040
191 #define NECP_KERNEL_CONDITION_REMOTE_END 0x00080
192 #define NECP_KERNEL_CONDITION_REMOTE_PREFIX 0x00100
193 #define NECP_KERNEL_CONDITION_APP_ID 0x00200
194 #define NECP_KERNEL_CONDITION_REAL_APP_ID 0x00400
195 #define NECP_KERNEL_CONDITION_DOMAIN 0x00800
196 #define NECP_KERNEL_CONDITION_ACCOUNT_ID 0x01000
197 #define NECP_KERNEL_CONDITION_POLICY_ID 0x02000
198 #define NECP_KERNEL_CONDITION_PID 0x04000
199 #define NECP_KERNEL_CONDITION_UID 0x08000
200 #define NECP_KERNEL_CONDITION_LAST_INTERFACE 0x10000 // Only set from packets looping between interfaces
201 #define NECP_KERNEL_CONDITION_TRAFFIC_CLASS 0x20000
202 #define NECP_KERNEL_CONDITION_ENTITLEMENT 0x40000
203 #define NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT 0x80000
205 #define NECP_MAX_POLICY_RESULT_SIZE 512
206 #define NECP_MAX_ROUTE_RULES_ARRAY_SIZE 1024
207 #define NECP_MAX_CONDITIONS_ARRAY_SIZE 4096
208 #define NECP_MAX_POLICY_LIST_COUNT 1024
210 // Cap the policy size at the max result + conditions size, with room for extra TLVs
211 #define NECP_MAX_POLICY_SIZE (1024 + NECP_MAX_POLICY_RESULT_SIZE + NECP_MAX_CONDITIONS_ARRAY_SIZE)
213 struct necp_service_registration
{
214 LIST_ENTRY(necp_service_registration
) session_chain
;
215 LIST_ENTRY(necp_service_registration
) kernel_chain
;
216 u_int32_t service_id
;
219 struct necp_session
{
220 u_int8_t necp_fd_type
;
221 u_int32_t control_unit
;
222 u_int32_t session_priority
; // Descriptive priority rating
223 u_int32_t session_order
;
225 decl_lck_mtx_data(, lock
);
227 bool proc_locked
; // Messages must come from proc_uuid
232 LIST_HEAD(_policies
, necp_session_policy
) policies
;
234 LIST_HEAD(_services
, necp_service_registration
) services
;
236 TAILQ_ENTRY(necp_session
) chain
;
239 #define NECP_SESSION_LOCK(_s) lck_mtx_lock(&_s->lock)
240 #define NECP_SESSION_UNLOCK(_s) lck_mtx_unlock(&_s->lock)
242 static TAILQ_HEAD(_necp_session_list
, necp_session
) necp_session_list
;
244 struct necp_socket_info
{
247 union necp_sockaddr_union local_addr
;
248 union necp_sockaddr_union remote_addr
;
249 u_int32_t bound_interface_index
;
250 u_int32_t traffic_class
;
252 u_int32_t application_id
;
253 u_int32_t real_application_id
;
254 u_int32_t account_id
;
259 static kern_ctl_ref necp_kctlref
;
260 static u_int32_t necp_family
;
261 static OSMallocTag necp_malloc_tag
;
262 static lck_grp_attr_t
*necp_kernel_policy_grp_attr
= NULL
;
263 static lck_attr_t
*necp_kernel_policy_mtx_attr
= NULL
;
264 static lck_grp_t
*necp_kernel_policy_mtx_grp
= NULL
;
265 decl_lck_rw_data(static, necp_kernel_policy_lock
);
267 static lck_grp_attr_t
*necp_route_rule_grp_attr
= NULL
;
268 static lck_attr_t
*necp_route_rule_mtx_attr
= NULL
;
269 static lck_grp_t
*necp_route_rule_mtx_grp
= NULL
;
270 decl_lck_rw_data(static, necp_route_rule_lock
);
272 static necp_policy_id necp_last_policy_id
= 0;
273 static necp_kernel_policy_id necp_last_kernel_policy_id
= 0;
274 static u_int32_t necp_last_uuid_id
= 0;
275 static u_int32_t necp_last_string_id
= 0;
276 static u_int32_t necp_last_route_rule_id
= 0;
277 static u_int32_t necp_last_aggregate_route_rule_id
= 0;
280 * On modification, invalidate cached lookups by bumping the generation count.
281 * Other calls will need to take the slowpath of taking
282 * the subsystem lock.
284 static volatile int32_t necp_kernel_socket_policies_gencount
;
285 #define BUMP_KERNEL_SOCKET_POLICIES_GENERATION_COUNT() do { \
286 if (OSIncrementAtomic(&necp_kernel_socket_policies_gencount) == (INT32_MAX - 1)) { \
287 necp_kernel_socket_policies_gencount = 1; \
291 static u_int32_t necp_kernel_application_policies_condition_mask
;
292 static size_t necp_kernel_application_policies_count
;
293 static u_int32_t necp_kernel_socket_policies_condition_mask
;
294 static size_t necp_kernel_socket_policies_count
;
295 static size_t necp_kernel_socket_policies_non_app_count
;
296 static LIST_HEAD(_necpkernelsocketconnectpolicies
, necp_kernel_socket_policy
) necp_kernel_socket_policies
;
297 #define NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS 5
298 #define NECP_SOCKET_MAP_APP_ID_TO_BUCKET(appid) (appid ? (appid%(NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS - 1) + 1) : 0)
299 static struct necp_kernel_socket_policy
**necp_kernel_socket_policies_map
[NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
];
300 static struct necp_kernel_socket_policy
**necp_kernel_socket_policies_app_layer_map
;
302 * A note on policy 'maps': these are used for boosting efficiency when matching policies. For each dimension of the map,
303 * such as an ID, the 0 bucket is reserved for sockets/packets that do not have this parameter, while the other
304 * buckets lead to an array of policy pointers that form the list applicable when the (parameter%(NUM_BUCKETS - 1) + 1) == bucket_index.
306 * For example, a packet with policy ID of 7, when there are 4 ID buckets, will map to bucket (7%3 + 1) = 2.
309 static u_int32_t necp_kernel_ip_output_policies_condition_mask
;
310 static size_t necp_kernel_ip_output_policies_count
;
311 static size_t necp_kernel_ip_output_policies_non_id_count
;
312 static LIST_HEAD(_necpkernelipoutputpolicies
, necp_kernel_ip_output_policy
) necp_kernel_ip_output_policies
;
313 #define NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS 5
314 #define NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(id) (id ? (id%(NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS - 1) + 1) : 0)
315 static struct necp_kernel_ip_output_policy
**necp_kernel_ip_output_policies_map
[NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
];
317 static struct necp_session
*necp_create_session(void);
318 static void necp_delete_session(struct necp_session
*session
);
320 static necp_policy_id
necp_handle_policy_add(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
,
321 u_int8_t
*tlv_buffer
, size_t tlv_buffer_length
, int offset
, int *error
);
322 static void necp_handle_policy_get(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
323 static void necp_handle_policy_delete(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
324 static void necp_handle_policy_apply_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
325 static void necp_handle_policy_list_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
326 static void necp_handle_policy_delete_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
327 static int necp_handle_policy_dump_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
,
328 user_addr_t out_buffer
, size_t out_buffer_length
, int offset
);
329 static void necp_handle_set_session_priority(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
330 static void necp_handle_lock_session_to_proc(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
331 static void necp_handle_register_service(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
332 static void necp_handle_unregister_service(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
334 #define MAX_RESULT_STRING_LEN 64
335 static inline const char * necp_get_result_description(char *result_string
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
);
337 static struct necp_session_policy
*necp_policy_create(struct necp_session
*session
, necp_policy_order order
, u_int8_t
*conditions_array
, u_int32_t conditions_array_size
, u_int8_t
*route_rules_array
, u_int32_t route_rules_array_size
, u_int8_t
*result
, u_int32_t result_size
);
338 static struct necp_session_policy
*necp_policy_find(struct necp_session
*session
, necp_policy_id policy_id
);
339 static bool necp_policy_mark_for_deletion(struct necp_session
*session
, struct necp_session_policy
*policy
);
340 static bool necp_policy_mark_all_for_deletion(struct necp_session
*session
);
341 static bool necp_policy_delete(struct necp_session
*session
, struct necp_session_policy
*policy
);
342 static void necp_policy_apply_all(struct necp_session
*session
);
344 static necp_kernel_policy_id
necp_kernel_socket_policy_add(necp_policy_id parent_policy_id
, necp_policy_order order
, u_int32_t session_order
, int session_pid
, u_int32_t condition_mask
, u_int32_t condition_negated_mask
, necp_app_id cond_app_id
, necp_app_id cond_real_app_id
, char *cond_custom_entitlement
, u_int32_t cond_account_id
, char *domain
, pid_t cond_pid
, uid_t cond_uid
, ifnet_t cond_bound_interface
, struct necp_policy_condition_tc_range cond_traffic_class
, u_int16_t cond_protocol
, union necp_sockaddr_union
*cond_local_start
, union necp_sockaddr_union
*cond_local_end
, u_int8_t cond_local_prefix
, union necp_sockaddr_union
*cond_remote_start
, union necp_sockaddr_union
*cond_remote_end
, u_int8_t cond_remote_prefix
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
);
345 static bool necp_kernel_socket_policy_delete(necp_kernel_policy_id policy_id
);
346 static bool necp_kernel_socket_policies_reprocess(void);
347 static bool necp_kernel_socket_policies_update_uuid_table(void);
348 static inline struct necp_kernel_socket_policy
*necp_socket_find_policy_match_with_info_locked(struct necp_kernel_socket_policy
**policy_search_array
, struct necp_socket_info
*info
, necp_kernel_policy_filter
*return_filter
, u_int32_t
*return_route_rule_id
, necp_kernel_policy_result
*return_service_action
, necp_kernel_policy_service
*return_service
, u_int32_t
*return_netagent_array
, size_t netagent_array_count
, proc_t proc
);
350 static necp_kernel_policy_id
necp_kernel_ip_output_policy_add(necp_policy_id parent_policy_id
, necp_policy_order order
, necp_policy_order suborder
, u_int32_t session_order
, int session_pid
, u_int32_t condition_mask
, u_int32_t condition_negated_mask
, necp_kernel_policy_id cond_policy_id
, ifnet_t cond_bound_interface
, u_int32_t cond_last_interface_index
, u_int16_t cond_protocol
, union necp_sockaddr_union
*cond_local_start
, union necp_sockaddr_union
*cond_local_end
, u_int8_t cond_local_prefix
, union necp_sockaddr_union
*cond_remote_start
, union necp_sockaddr_union
*cond_remote_end
, u_int8_t cond_remote_prefix
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
);
351 static bool necp_kernel_ip_output_policy_delete(necp_kernel_policy_id policy_id
);
352 static bool necp_kernel_ip_output_policies_reprocess(void);
354 static bool necp_is_addr_in_range(struct sockaddr
*addr
, struct sockaddr
*range_start
, struct sockaddr
*range_end
);
355 static bool necp_is_range_in_range(struct sockaddr
*inner_range_start
, struct sockaddr
*inner_range_end
, struct sockaddr
*range_start
, struct sockaddr
*range_end
);
356 static bool necp_is_addr_in_subnet(struct sockaddr
*addr
, struct sockaddr
*subnet_addr
, u_int8_t subnet_prefix
);
357 static int necp_addr_compare(struct sockaddr
*sa1
, struct sockaddr
*sa2
, int check_port
);
358 static bool necp_buffer_compare_with_bit_prefix(u_int8_t
*p1
, u_int8_t
*p2
, u_int32_t bits
);
359 static bool necp_is_loopback(struct sockaddr
*local_addr
, struct sockaddr
*remote_addr
, struct inpcb
*inp
, struct mbuf
*packet
);
360 static bool necp_is_intcoproc(struct inpcb
*inp
, struct mbuf
*packet
);
362 struct necp_uuid_id_mapping
{
363 LIST_ENTRY(necp_uuid_id_mapping
) chain
;
367 u_int32_t table_refcount
; // Add to UUID policy table count
369 static size_t necp_num_uuid_app_id_mappings
;
370 static bool necp_uuid_app_id_mappings_dirty
;
371 #define NECP_UUID_APP_ID_HASH_SIZE 64
372 static u_long necp_uuid_app_id_hash_mask
;
373 static u_long necp_uuid_app_id_hash_num_buckets
;
374 static LIST_HEAD(necp_uuid_id_mapping_head
, necp_uuid_id_mapping
) *necp_uuid_app_id_hashtbl
, necp_uuid_service_id_list
; // App map is real hash table, service map is just mapping
375 #define APPUUIDHASH(uuid) (&necp_uuid_app_id_hashtbl[uuid[0] & necp_uuid_app_id_hash_mask]) // Assume first byte of UUIDs are evenly distributed
376 static u_int32_t
necp_create_uuid_app_id_mapping(uuid_t uuid
, bool *allocated_mapping
, bool uuid_policy_table
);
377 static bool necp_remove_uuid_app_id_mapping(uuid_t uuid
, bool *removed_mapping
, bool uuid_policy_table
);
378 static struct necp_uuid_id_mapping
*necp_uuid_lookup_uuid_with_app_id_locked(u_int32_t local_id
);
380 static struct necp_uuid_id_mapping
*necp_uuid_lookup_service_id_locked(uuid_t uuid
);
381 static struct necp_uuid_id_mapping
*necp_uuid_lookup_uuid_with_service_id_locked(u_int32_t local_id
);
382 static u_int32_t
necp_create_uuid_service_id_mapping(uuid_t uuid
);
383 static bool necp_remove_uuid_service_id_mapping(uuid_t uuid
);
385 struct necp_string_id_mapping
{
386 LIST_ENTRY(necp_string_id_mapping
) chain
;
391 static LIST_HEAD(necp_string_id_mapping_list
, necp_string_id_mapping
) necp_account_id_list
;
392 static u_int32_t
necp_create_string_to_id_mapping(struct necp_string_id_mapping_list
*list
, char *domain
);
393 static bool necp_remove_string_to_id_mapping(struct necp_string_id_mapping_list
*list
, char *domain
);
394 static struct necp_string_id_mapping
*necp_lookup_string_with_id_locked(struct necp_string_id_mapping_list
*list
, u_int32_t local_id
);
396 static LIST_HEAD(_necp_kernel_service_list
, necp_service_registration
) necp_registered_service_list
;
398 static char *necp_create_trimmed_domain(char *string
, size_t length
);
399 static inline int necp_count_dots(char *string
, size_t length
);
401 static char *necp_copy_string(char *string
, size_t length
);
402 static bool necp_update_qos_marking(struct ifnet
*ifp
, u_int32_t route_rule_id
);
404 #define ROUTE_RULE_IS_AGGREGATE(ruleid) (ruleid > UINT16_MAX)
406 #define MAX_ROUTE_RULE_INTERFACES 10
407 struct necp_route_rule
{
408 LIST_ENTRY(necp_route_rule
) chain
;
410 u_int32_t default_action
;
411 u_int8_t cellular_action
;
412 u_int8_t wifi_action
;
413 u_int8_t wired_action
;
414 u_int8_t expensive_action
;
415 u_int exception_if_indices
[MAX_ROUTE_RULE_INTERFACES
];
416 u_int8_t exception_if_actions
[MAX_ROUTE_RULE_INTERFACES
];
419 static LIST_HEAD(necp_route_rule_list
, necp_route_rule
) necp_route_rules
;
420 static u_int32_t
necp_create_route_rule(struct necp_route_rule_list
*list
, u_int8_t
*route_rules_array
, u_int32_t route_rules_array_size
);
421 static bool necp_remove_route_rule(struct necp_route_rule_list
*list
, u_int32_t route_rule_id
);
422 static bool necp_route_is_allowed(struct rtentry
*route
, ifnet_t interface
, u_int32_t route_rule_id
, u_int32_t
*interface_type_denied
);
423 static struct necp_route_rule
*necp_lookup_route_rule_locked(struct necp_route_rule_list
*list
, u_int32_t route_rule_id
);
425 #define MAX_AGGREGATE_ROUTE_RULES 16
426 struct necp_aggregate_route_rule
{
427 LIST_ENTRY(necp_aggregate_route_rule
) chain
;
429 u_int32_t rule_ids
[MAX_AGGREGATE_ROUTE_RULES
];
431 static LIST_HEAD(necp_aggregate_route_rule_list
, necp_aggregate_route_rule
) necp_aggregate_route_rules
;
432 static u_int32_t
necp_create_aggregate_route_rule(u_int32_t
*rule_ids
);
434 // Sysctl definitions
435 static int sysctl_handle_necp_level SYSCTL_HANDLER_ARGS
;
437 SYSCTL_NODE(_net
, OID_AUTO
, necp
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "NECP");
438 SYSCTL_INT(_net_necp
, NECPCTL_PASS_LOOPBACK
, pass_loopback
, CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_pass_loopback
, 0, "");
439 SYSCTL_INT(_net_necp
, NECPCTL_PASS_KEEPALIVES
, pass_keepalives
, CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_pass_keepalives
, 0, "");
440 SYSCTL_INT(_net_necp
, NECPCTL_DEBUG
, debug
, CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_debug
, 0, "");
441 SYSCTL_PROC(_net_necp
, NECPCTL_DROP_ALL_LEVEL
, drop_all_level
, CTLTYPE_INT
| CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_drop_all_level
, 0, &sysctl_handle_necp_level
, "IU", "");
442 SYSCTL_LONG(_net_necp
, NECPCTL_SOCKET_POLICY_COUNT
, socket_policy_count
, CTLFLAG_LOCKED
| CTLFLAG_RD
, &necp_kernel_socket_policies_count
, "");
443 SYSCTL_LONG(_net_necp
, NECPCTL_SOCKET_NON_APP_POLICY_COUNT
, socket_non_app_policy_count
, CTLFLAG_LOCKED
| CTLFLAG_RD
, &necp_kernel_socket_policies_non_app_count
, "");
444 SYSCTL_LONG(_net_necp
, NECPCTL_IP_POLICY_COUNT
, ip_policy_count
, CTLFLAG_LOCKED
| CTLFLAG_RD
, &necp_kernel_ip_output_policies_count
, "");
445 SYSCTL_INT(_net_necp
, NECPCTL_SESSION_COUNT
, session_count
, CTLFLAG_LOCKED
| CTLFLAG_RD
, &necp_session_count
, 0, "");
447 // Session order allocation
449 necp_allocate_new_session_order(u_int32_t priority
, u_int32_t control_unit
)
451 u_int32_t new_order
= 0;
453 // For now, just allocate 1000 orders for each priority
454 if (priority
== NECP_SESSION_PRIORITY_UNKNOWN
|| priority
> NECP_SESSION_NUM_PRIORITIES
) {
455 priority
= NECP_SESSION_PRIORITY_DEFAULT
;
458 // Use the control unit to decide the offset into the priority list
459 new_order
= (control_unit
) + ((priority
- 1) * 1000);
464 static inline u_int32_t
465 necp_get_first_order_for_priority(u_int32_t priority
)
467 return (((priority
- 1) * 1000) + 1);
472 sysctl_handle_necp_level SYSCTL_HANDLER_ARGS
474 #pragma unused(arg1, arg2)
475 int error
= sysctl_handle_int(oidp
, oidp
->oid_arg1
, oidp
->oid_arg2
, req
);
476 if (necp_drop_all_level
== 0) {
477 necp_drop_all_order
= 0;
479 necp_drop_all_order
= necp_get_first_order_for_priority(necp_drop_all_level
);
486 static int noop_read(struct fileproc
*, struct uio
*, int, vfs_context_t
);
487 static int noop_write(struct fileproc
*, struct uio
*, int, vfs_context_t
);
488 static int noop_ioctl(struct fileproc
*, unsigned long, caddr_t
,
490 static int noop_select(struct fileproc
*, int, void *, vfs_context_t
);
491 static int necp_session_op_close(struct fileglob
*, vfs_context_t
);
492 static int noop_kqfilter(struct fileproc
*, struct knote
*,
493 struct kevent_internal_s
*, vfs_context_t
);
495 static const struct fileops necp_session_fd_ops
= {
496 .fo_type
= DTYPE_NETPOLICY
,
497 .fo_read
= noop_read
,
498 .fo_write
= noop_write
,
499 .fo_ioctl
= noop_ioctl
,
500 .fo_select
= noop_select
,
501 .fo_close
= necp_session_op_close
,
502 .fo_kqfilter
= noop_kqfilter
,
507 noop_read(struct fileproc
*fp
, struct uio
*uio
, int flags
, vfs_context_t ctx
)
509 #pragma unused(fp, uio, flags, ctx)
514 noop_write(struct fileproc
*fp
, struct uio
*uio
, int flags
,
517 #pragma unused(fp, uio, flags, ctx)
522 noop_ioctl(struct fileproc
*fp
, unsigned long com
, caddr_t data
,
525 #pragma unused(fp, com, data, ctx)
530 noop_select(struct fileproc
*fp
, int which
, void *wql
, vfs_context_t ctx
)
532 #pragma unused(fp, which, wql, ctx)
537 noop_kqfilter(struct fileproc
*fp
, struct knote
*kn
,
538 struct kevent_internal_s
*kev
, vfs_context_t ctx
)
540 #pragma unused(fp, kn, kev, ctx)
545 necp_session_open(struct proc
*p
, struct necp_session_open_args
*uap
, int *retval
)
549 struct necp_session
*session
= NULL
;
550 struct fileproc
*fp
= NULL
;
553 uid_t uid
= kauth_cred_getuid(proc_ucred(p
));
554 if (uid
!= 0 && priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES
, 0) != 0) {
555 NECPLOG0(LOG_ERR
, "Process does not hold necessary entitlement to open NECP session");
560 error
= falloc(p
, &fp
, &fd
, vfs_context_current());
565 session
= necp_create_session();
566 if (session
== NULL
) {
571 fp
->f_fglob
->fg_flag
= 0;
572 fp
->f_fglob
->fg_ops
= &necp_session_fd_ops
;
573 fp
->f_fglob
->fg_data
= session
;
576 FDFLAGS_SET(p
, fd
, (UF_EXCLOSE
| UF_FORKCLOSE
));
577 procfdtbl_releasefd(p
, fd
, NULL
);
578 fp_drop(p
, fd
, fp
, 1);
594 necp_session_op_close(struct fileglob
*fg
, vfs_context_t ctx
)
597 struct necp_session
*session
= (struct necp_session
*)fg
->fg_data
;
600 if (session
!= NULL
) {
601 necp_policy_mark_all_for_deletion(session
);
602 necp_policy_apply_all(session
);
603 necp_delete_session(session
);
611 necp_session_find_from_fd(int fd
, struct necp_session
**session
)
613 proc_t p
= current_proc();
614 struct fileproc
*fp
= NULL
;
618 if ((error
= fp_lookup(p
, fd
, &fp
, 1)) != 0) {
621 if (fp
->f_fglob
->fg_ops
->fo_type
!= DTYPE_NETPOLICY
) {
622 fp_drop(p
, fd
, fp
, 1);
626 *session
= (struct necp_session
*)fp
->f_fglob
->fg_data
;
634 necp_session_add_policy(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
637 u_int8_t
*tlv_buffer
= NULL
;
639 if (uap
->in_buffer_length
== 0 || uap
->in_buffer_length
> NECP_MAX_POLICY_SIZE
|| uap
->in_buffer
== 0) {
640 NECPLOG(LOG_ERR
, "necp_session_add_policy invalid input (%zu)", uap
->in_buffer_length
);
645 if (uap
->out_buffer_length
< sizeof(necp_policy_id
) || uap
->out_buffer
== 0) {
646 NECPLOG(LOG_ERR
, "necp_session_add_policy invalid output buffer (%zu)", uap
->out_buffer_length
);
651 if ((tlv_buffer
= _MALLOC(uap
->in_buffer_length
, M_NECP
, M_WAITOK
| M_ZERO
)) == NULL
) {
656 error
= copyin(uap
->in_buffer
, tlv_buffer
, uap
->in_buffer_length
);
658 NECPLOG(LOG_ERR
, "necp_session_add_policy tlv copyin error (%d)", error
);
662 necp_policy_id new_policy_id
= necp_handle_policy_add(session
, 0, NULL
, tlv_buffer
, uap
->in_buffer_length
, 0, &error
);
664 NECPLOG(LOG_ERR
, "necp_session_add_policy failed to add policy (%d)", error
);
668 error
= copyout(&new_policy_id
, uap
->out_buffer
, sizeof(new_policy_id
));
670 NECPLOG(LOG_ERR
, "necp_session_add_policy policy_id copyout error (%d)", error
);
675 if (tlv_buffer
!= NULL
) {
676 FREE(tlv_buffer
, M_NECP
);
685 necp_session_get_policy(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
688 u_int8_t
*response
= NULL
;
690 if (uap
->in_buffer_length
< sizeof(necp_policy_id
) || uap
->in_buffer
== 0) {
691 NECPLOG(LOG_ERR
, "necp_session_get_policy invalid input (%zu)", uap
->in_buffer_length
);
696 necp_policy_id policy_id
= 0;
697 error
= copyin(uap
->in_buffer
, &policy_id
, sizeof(policy_id
));
699 NECPLOG(LOG_ERR
, "necp_session_get_policy policy_id copyin error (%d)", error
);
703 struct necp_session_policy
*policy
= necp_policy_find(session
, policy_id
);
704 if (policy
== NULL
|| policy
->pending_deletion
) {
705 NECPLOG(LOG_ERR
, "Failed to find policy with id %d", policy_id
);
710 u_int32_t order_tlv_size
= sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(necp_policy_order
);
711 u_int32_t result_tlv_size
= (policy
->result_size
? (sizeof(u_int8_t
) + sizeof(u_int32_t
) + policy
->result_size
) : 0);
712 u_int32_t response_size
= order_tlv_size
+ result_tlv_size
+ policy
->conditions_size
;
714 if (uap
->out_buffer_length
< response_size
|| uap
->out_buffer
== 0) {
715 NECPLOG(LOG_ERR
, "necp_session_get_policy buffer not large enough (%u < %u)", uap
->out_buffer_length
, response_size
);
720 if (response_size
> NECP_MAX_POLICY_SIZE
) {
721 NECPLOG(LOG_ERR
, "necp_session_get_policy size too large to copy (%u)", response_size
);
726 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
| M_ZERO
);
727 if (response
== NULL
) {
732 u_int8_t
*cursor
= response
;
733 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ORDER
, sizeof(necp_policy_order
), &policy
->order
, response
, response_size
);
734 if (result_tlv_size
) {
735 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_RESULT
, policy
->result_size
, &policy
->result
, response
, response_size
);
737 if (policy
->conditions_size
) {
738 memcpy(((u_int8_t
*)(void *)(cursor
)), policy
->conditions
, policy
->conditions_size
);
741 error
= copyout(response
, uap
->out_buffer
, response_size
);
743 NECPLOG(LOG_ERR
, "necp_session_get_policy TLV copyout error (%d)", error
);
748 if (response
!= NULL
) {
749 FREE(response
, M_NECP
);
758 necp_session_delete_policy(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
762 if (uap
->in_buffer_length
< sizeof(necp_policy_id
) || uap
->in_buffer
== 0) {
763 NECPLOG(LOG_ERR
, "necp_session_delete_policy invalid input (%zu)", uap
->in_buffer_length
);
768 necp_policy_id delete_policy_id
= 0;
769 error
= copyin(uap
->in_buffer
, &delete_policy_id
, sizeof(delete_policy_id
));
771 NECPLOG(LOG_ERR
, "necp_session_delete_policy policy_id copyin error (%d)", error
);
775 struct necp_session_policy
*policy
= necp_policy_find(session
, delete_policy_id
);
776 if (policy
== NULL
|| policy
->pending_deletion
) {
777 NECPLOG(LOG_ERR
, "necp_session_delete_policy failed to find policy with id %u", delete_policy_id
);
782 necp_policy_mark_for_deletion(session
, policy
);
789 necp_session_apply_all(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
792 necp_policy_apply_all(session
);
798 necp_session_list_all(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
800 u_int32_t tlv_size
= (sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(necp_policy_id
));
801 u_int32_t response_size
= 0;
802 u_int8_t
*response
= NULL
;
803 int num_policies
= 0;
804 int cur_policy_index
= 0;
806 struct necp_session_policy
*policy
;
808 LIST_FOREACH(policy
, &session
->policies
, chain
) {
809 if (!policy
->pending_deletion
) {
814 if (num_policies
> NECP_MAX_POLICY_LIST_COUNT
) {
815 NECPLOG(LOG_ERR
, "necp_session_list_all size too large to copy (%u policies)", num_policies
);
820 response_size
= num_policies
* tlv_size
;
821 if (uap
->out_buffer_length
< response_size
|| uap
->out_buffer
== 0) {
822 NECPLOG(LOG_ERR
, "necp_session_list_all buffer not large enough (%u < %u)", uap
->out_buffer_length
, response_size
);
827 // Create a response with one Policy ID TLV for each policy
828 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
| M_ZERO
);
829 if (response
== NULL
) {
834 u_int8_t
*cursor
= response
;
835 LIST_FOREACH(policy
, &session
->policies
, chain
) {
836 if (!policy
->pending_deletion
&& cur_policy_index
< num_policies
) {
837 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ID
, sizeof(u_int32_t
), &policy
->id
, response
, response_size
);
842 error
= copyout(response
, uap
->out_buffer
, response_size
);
844 NECPLOG(LOG_ERR
, "necp_session_list_all TLV copyout error (%d)", error
);
849 if (response
!= NULL
) {
850 FREE(response
, M_NECP
);
860 necp_session_delete_all(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
863 necp_policy_mark_all_for_deletion(session
);
869 necp_session_set_session_priority(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
872 struct necp_session_policy
*policy
= NULL
;
873 struct necp_session_policy
*temp_policy
= NULL
;
875 if (uap
->in_buffer_length
< sizeof(necp_session_priority
) || uap
->in_buffer
== 0) {
876 NECPLOG(LOG_ERR
, "necp_session_set_session_priority invalid input (%zu)", uap
->in_buffer_length
);
881 necp_session_priority requested_session_priority
= 0;
882 error
= copyin(uap
->in_buffer
, &requested_session_priority
, sizeof(requested_session_priority
));
884 NECPLOG(LOG_ERR
, "necp_session_set_session_priority priority copyin error (%d)", error
);
888 // Enforce special session priorities with entitlements
889 if (requested_session_priority
== NECP_SESSION_PRIORITY_CONTROL
||
890 requested_session_priority
== NECP_SESSION_PRIORITY_PRIVILEGED_TUNNEL
) {
891 errno_t cred_result
= priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES
, 0);
892 if (cred_result
!= 0) {
893 NECPLOG(LOG_ERR
, "Session does not hold necessary entitlement to claim priority level %d", requested_session_priority
);
899 if (session
->session_priority
!= requested_session_priority
) {
900 session
->session_priority
= requested_session_priority
;
901 session
->session_order
= necp_allocate_new_session_order(session
->session_priority
, session
->control_unit
);
902 session
->dirty
= TRUE
;
904 // Mark all policies as needing updates
905 LIST_FOREACH_SAFE(policy
, &session
->policies
, chain
, temp_policy
) {
906 policy
->pending_update
= TRUE
;
916 necp_session_lock_to_process(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
919 session
->proc_locked
= TRUE
;
925 necp_session_register_service(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
928 struct necp_service_registration
*new_service
= NULL
;
930 if (uap
->in_buffer_length
< sizeof(uuid_t
) || uap
->in_buffer
== 0) {
931 NECPLOG(LOG_ERR
, "necp_session_register_service invalid input (%zu)", uap
->in_buffer_length
);
937 error
= copyin(uap
->in_buffer
, service_uuid
, sizeof(service_uuid
));
939 NECPLOG(LOG_ERR
, "necp_session_register_service uuid copyin error (%d)", error
);
943 MALLOC(new_service
, struct necp_service_registration
*, sizeof(*new_service
), M_NECP
, M_WAITOK
| M_ZERO
);
944 if (new_service
== NULL
) {
945 NECPLOG0(LOG_ERR
, "Failed to allocate service registration");
950 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
951 new_service
->service_id
= necp_create_uuid_service_id_mapping(service_uuid
);
952 LIST_INSERT_HEAD(&session
->services
, new_service
, session_chain
);
953 LIST_INSERT_HEAD(&necp_registered_service_list
, new_service
, kernel_chain
);
954 lck_rw_done(&necp_kernel_policy_lock
);
962 necp_session_unregister_service(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
965 struct necp_service_registration
*service
= NULL
;
966 struct necp_service_registration
*temp_service
= NULL
;
967 struct necp_uuid_id_mapping
*mapping
= NULL
;
969 if (uap
->in_buffer_length
< sizeof(uuid_t
) || uap
->in_buffer
== 0) {
970 NECPLOG(LOG_ERR
, "necp_session_unregister_service invalid input (%zu)", uap
->in_buffer_length
);
976 error
= copyin(uap
->in_buffer
, service_uuid
, sizeof(service_uuid
));
978 NECPLOG(LOG_ERR
, "necp_session_unregister_service uuid copyin error (%d)", error
);
982 // Remove all matching services for this session
983 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
984 mapping
= necp_uuid_lookup_service_id_locked(service_uuid
);
985 if (mapping
!= NULL
) {
986 LIST_FOREACH_SAFE(service
, &session
->services
, session_chain
, temp_service
) {
987 if (service
->service_id
== mapping
->id
) {
988 LIST_REMOVE(service
, session_chain
);
989 LIST_REMOVE(service
, kernel_chain
);
990 FREE(service
, M_NECP
);
993 necp_remove_uuid_service_id_mapping(service_uuid
);
995 lck_rw_done(&necp_kernel_policy_lock
);
1003 necp_session_dump_all(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
1007 if (uap
->out_buffer_length
== 0 || uap
->out_buffer
== 0) {
1008 NECPLOG(LOG_ERR
, "necp_session_dump_all invalid output buffer (%zu)", uap
->out_buffer_length
);
1013 error
= necp_handle_policy_dump_all(session
, 0, NULL
, uap
->out_buffer
, uap
->out_buffer_length
, 0);
1020 necp_session_action(struct proc
*p
, struct necp_session_action_args
*uap
, int *retval
)
1024 int return_value
= 0;
1025 struct necp_session
*session
= NULL
;
1026 error
= necp_session_find_from_fd(uap
->necp_fd
, &session
);
1028 NECPLOG(LOG_ERR
, "necp_session_action find fd error (%d)", error
);
1032 NECP_SESSION_LOCK(session
);
1034 if (session
->proc_locked
) {
1035 // Verify that the calling process is allowed to do actions
1037 proc_getexecutableuuid(current_proc(), proc_uuid
, sizeof(proc_uuid
));
1038 if (uuid_compare(proc_uuid
, session
->proc_uuid
) != 0) {
1043 // If not locked, update the proc_uuid and proc_pid of the session
1044 proc_getexecutableuuid(current_proc(), session
->proc_uuid
, sizeof(session
->proc_uuid
));
1045 session
->proc_pid
= proc_pid(current_proc());
1048 u_int32_t action
= uap
->action
;
1050 case NECP_SESSION_ACTION_POLICY_ADD
: {
1051 return_value
= necp_session_add_policy(session
, uap
, retval
);
1054 case NECP_SESSION_ACTION_POLICY_GET
: {
1055 return_value
= necp_session_get_policy(session
, uap
, retval
);
1058 case NECP_SESSION_ACTION_POLICY_DELETE
: {
1059 return_value
= necp_session_delete_policy(session
, uap
, retval
);
1062 case NECP_SESSION_ACTION_POLICY_APPLY_ALL
: {
1063 return_value
= necp_session_apply_all(session
, uap
, retval
);
1066 case NECP_SESSION_ACTION_POLICY_LIST_ALL
: {
1067 return_value
= necp_session_list_all(session
, uap
, retval
);
1070 case NECP_SESSION_ACTION_POLICY_DELETE_ALL
: {
1071 return_value
= necp_session_delete_all(session
, uap
, retval
);
1074 case NECP_SESSION_ACTION_SET_SESSION_PRIORITY
: {
1075 return_value
= necp_session_set_session_priority(session
, uap
, retval
);
1078 case NECP_SESSION_ACTION_LOCK_SESSION_TO_PROC
: {
1079 return_value
= necp_session_lock_to_process(session
, uap
, retval
);
1082 case NECP_SESSION_ACTION_REGISTER_SERVICE
: {
1083 return_value
= necp_session_register_service(session
, uap
, retval
);
1086 case NECP_SESSION_ACTION_UNREGISTER_SERVICE
: {
1087 return_value
= necp_session_unregister_service(session
, uap
, retval
);
1090 case NECP_SESSION_ACTION_POLICY_DUMP_ALL
: {
1091 return_value
= necp_session_dump_all(session
, uap
, retval
);
1095 NECPLOG(LOG_ERR
, "necp_session_action unknown action (%u)", action
);
1096 return_value
= EINVAL
;
1102 NECP_SESSION_UNLOCK(session
);
1103 file_drop(uap
->necp_fd
);
1105 return (return_value
);
1108 // Kernel Control functions
1109 static errno_t
necp_register_control(void);
1110 static errno_t
necp_ctl_connect(kern_ctl_ref kctlref
, struct sockaddr_ctl
*sac
, void **unitinfo
);
1111 static errno_t
necp_ctl_disconnect(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
);
1112 static errno_t
necp_ctl_send(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, mbuf_t m
, int flags
);
1113 static void necp_ctl_rcvd(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, int flags
);
1114 static errno_t
necp_ctl_getopt(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, int opt
, void *data
, size_t *len
);
1115 static errno_t
necp_ctl_setopt(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, int opt
, void *data
, size_t len
);
1117 static bool necp_send_ctl_data(struct necp_session
*session
, u_int8_t
*buffer
, size_t buffer_size
);
1124 result
= necp_register_control();
1129 necp_kernel_policy_grp_attr
= lck_grp_attr_alloc_init();
1130 if (necp_kernel_policy_grp_attr
== NULL
) {
1131 NECPLOG0(LOG_ERR
, "lck_grp_attr_alloc_init failed");
1136 necp_kernel_policy_mtx_grp
= lck_grp_alloc_init(NECP_CONTROL_NAME
, necp_kernel_policy_grp_attr
);
1137 if (necp_kernel_policy_mtx_grp
== NULL
) {
1138 NECPLOG0(LOG_ERR
, "lck_grp_alloc_init failed");
1143 necp_kernel_policy_mtx_attr
= lck_attr_alloc_init();
1144 if (necp_kernel_policy_mtx_attr
== NULL
) {
1145 NECPLOG0(LOG_ERR
, "lck_attr_alloc_init failed");
1150 lck_rw_init(&necp_kernel_policy_lock
, necp_kernel_policy_mtx_grp
, necp_kernel_policy_mtx_attr
);
1152 necp_route_rule_grp_attr
= lck_grp_attr_alloc_init();
1153 if (necp_route_rule_grp_attr
== NULL
) {
1154 NECPLOG0(LOG_ERR
, "lck_grp_attr_alloc_init failed");
1159 necp_route_rule_mtx_grp
= lck_grp_alloc_init("necp_route_rule", necp_route_rule_grp_attr
);
1160 if (necp_route_rule_mtx_grp
== NULL
) {
1161 NECPLOG0(LOG_ERR
, "lck_grp_alloc_init failed");
1166 necp_route_rule_mtx_attr
= lck_attr_alloc_init();
1167 if (necp_route_rule_mtx_attr
== NULL
) {
1168 NECPLOG0(LOG_ERR
, "lck_attr_alloc_init failed");
1173 lck_rw_init(&necp_route_rule_lock
, necp_route_rule_mtx_grp
, necp_route_rule_mtx_attr
);
1177 TAILQ_INIT(&necp_session_list
);
1179 LIST_INIT(&necp_kernel_socket_policies
);
1180 LIST_INIT(&necp_kernel_ip_output_policies
);
1182 LIST_INIT(&necp_account_id_list
);
1184 LIST_INIT(&necp_uuid_service_id_list
);
1186 LIST_INIT(&necp_registered_service_list
);
1188 LIST_INIT(&necp_route_rules
);
1189 LIST_INIT(&necp_aggregate_route_rules
);
1191 necp_uuid_app_id_hashtbl
= hashinit(NECP_UUID_APP_ID_HASH_SIZE
, M_NECP
, &necp_uuid_app_id_hash_mask
);
1192 necp_uuid_app_id_hash_num_buckets
= necp_uuid_app_id_hash_mask
+ 1;
1193 necp_num_uuid_app_id_mappings
= 0;
1194 necp_uuid_app_id_mappings_dirty
= FALSE
;
1196 necp_kernel_application_policies_condition_mask
= 0;
1197 necp_kernel_socket_policies_condition_mask
= 0;
1198 necp_kernel_ip_output_policies_condition_mask
= 0;
1200 necp_kernel_application_policies_count
= 0;
1201 necp_kernel_socket_policies_count
= 0;
1202 necp_kernel_socket_policies_non_app_count
= 0;
1203 necp_kernel_ip_output_policies_count
= 0;
1204 necp_kernel_ip_output_policies_non_id_count
= 0;
1206 necp_last_policy_id
= 0;
1207 necp_last_kernel_policy_id
= 0;
1208 necp_last_uuid_id
= 0;
1209 necp_last_string_id
= 0;
1210 necp_last_route_rule_id
= 0;
1211 necp_last_aggregate_route_rule_id
= 0;
1213 necp_kernel_socket_policies_gencount
= 1;
1215 memset(&necp_kernel_socket_policies_map
, 0, sizeof(necp_kernel_socket_policies_map
));
1216 memset(&necp_kernel_ip_output_policies_map
, 0, sizeof(necp_kernel_ip_output_policies_map
));
1217 necp_kernel_socket_policies_app_layer_map
= NULL
;
1221 if (necp_kernel_policy_mtx_attr
!= NULL
) {
1222 lck_attr_free(necp_kernel_policy_mtx_attr
);
1223 necp_kernel_policy_mtx_attr
= NULL
;
1225 if (necp_kernel_policy_mtx_grp
!= NULL
) {
1226 lck_grp_free(necp_kernel_policy_mtx_grp
);
1227 necp_kernel_policy_mtx_grp
= NULL
;
1229 if (necp_kernel_policy_grp_attr
!= NULL
) {
1230 lck_grp_attr_free(necp_kernel_policy_grp_attr
);
1231 necp_kernel_policy_grp_attr
= NULL
;
1233 if (necp_route_rule_mtx_attr
!= NULL
) {
1234 lck_attr_free(necp_route_rule_mtx_attr
);
1235 necp_route_rule_mtx_attr
= NULL
;
1237 if (necp_route_rule_mtx_grp
!= NULL
) {
1238 lck_grp_free(necp_route_rule_mtx_grp
);
1239 necp_route_rule_mtx_grp
= NULL
;
1241 if (necp_route_rule_grp_attr
!= NULL
) {
1242 lck_grp_attr_free(necp_route_rule_grp_attr
);
1243 necp_route_rule_grp_attr
= NULL
;
1245 if (necp_kctlref
!= NULL
) {
1246 ctl_deregister(necp_kctlref
);
1247 necp_kctlref
= NULL
;
1254 necp_register_control(void)
1256 struct kern_ctl_reg kern_ctl
;
1259 // Create a tag to allocate memory
1260 necp_malloc_tag
= OSMalloc_Tagalloc(NECP_CONTROL_NAME
, OSMT_DEFAULT
);
1262 // Find a unique value for our interface family
1263 result
= mbuf_tag_id_find(NECP_CONTROL_NAME
, &necp_family
);
1265 NECPLOG(LOG_ERR
, "mbuf_tag_id_find_internal failed: %d", result
);
1269 bzero(&kern_ctl
, sizeof(kern_ctl
));
1270 strlcpy(kern_ctl
.ctl_name
, NECP_CONTROL_NAME
, sizeof(kern_ctl
.ctl_name
));
1271 kern_ctl
.ctl_name
[sizeof(kern_ctl
.ctl_name
) - 1] = 0;
1272 kern_ctl
.ctl_flags
= CTL_FLAG_PRIVILEGED
; // Require root
1273 kern_ctl
.ctl_sendsize
= 64 * 1024;
1274 kern_ctl
.ctl_recvsize
= 64 * 1024;
1275 kern_ctl
.ctl_connect
= necp_ctl_connect
;
1276 kern_ctl
.ctl_disconnect
= necp_ctl_disconnect
;
1277 kern_ctl
.ctl_send
= necp_ctl_send
;
1278 kern_ctl
.ctl_rcvd
= necp_ctl_rcvd
;
1279 kern_ctl
.ctl_setopt
= necp_ctl_setopt
;
1280 kern_ctl
.ctl_getopt
= necp_ctl_getopt
;
1282 result
= ctl_register(&kern_ctl
, &necp_kctlref
);
1284 NECPLOG(LOG_ERR
, "ctl_register failed: %d", result
);
1292 necp_post_change_event(struct kev_necp_policies_changed_data
*necp_event_data
)
1294 struct kev_msg ev_msg
;
1295 memset(&ev_msg
, 0, sizeof(ev_msg
));
1297 ev_msg
.vendor_code
= KEV_VENDOR_APPLE
;
1298 ev_msg
.kev_class
= KEV_NETWORK_CLASS
;
1299 ev_msg
.kev_subclass
= KEV_NECP_SUBCLASS
;
1300 ev_msg
.event_code
= KEV_NECP_POLICIES_CHANGED
;
1302 ev_msg
.dv
[0].data_ptr
= necp_event_data
;
1303 ev_msg
.dv
[0].data_length
= sizeof(necp_event_data
->changed_count
);
1304 ev_msg
.dv
[1].data_length
= 0;
1306 kev_post_msg(&ev_msg
);
1310 necp_ctl_connect(kern_ctl_ref kctlref
, struct sockaddr_ctl
*sac
, void **unitinfo
)
1312 #pragma unused(kctlref, sac)
1313 *unitinfo
= necp_create_session();
1314 if (*unitinfo
== NULL
) {
1315 // Could not allocate session
1323 necp_ctl_disconnect(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
)
1325 #pragma unused(kctlref, unit)
1326 struct necp_session
*session
= (struct necp_session
*)unitinfo
;
1327 if (session
!= NULL
) {
1328 necp_policy_mark_all_for_deletion(session
);
1329 necp_policy_apply_all(session
);
1330 necp_delete_session((struct necp_session
*)unitinfo
);
1339 necp_packet_find_tlv(mbuf_t packet
, int offset
, u_int8_t type
, int *err
, int next
)
1341 size_t cursor
= offset
;
1343 u_int32_t curr_length
;
1350 error
= mbuf_copydata(packet
, cursor
, sizeof(curr_type
), &curr_type
);
1357 curr_type
= NECP_TLV_NIL
;
1360 if (curr_type
!= type
) {
1361 cursor
+= sizeof(curr_type
);
1362 error
= mbuf_copydata(packet
, cursor
, sizeof(curr_length
), &curr_length
);
1367 cursor
+= (sizeof(curr_length
) + curr_length
);
1369 } while (curr_type
!= type
);
1375 necp_packet_get_tlv_at_offset(mbuf_t packet
, int tlv_offset
, u_int32_t buff_len
, void *buff
, u_int32_t
*value_size
)
1380 if (tlv_offset
< 0) {
1384 error
= mbuf_copydata(packet
, tlv_offset
+ sizeof(u_int8_t
), sizeof(length
), &length
);
1389 u_int32_t total_len
= m_length2(packet
, NULL
);
1390 if (total_len
< (tlv_offset
+ sizeof(u_int8_t
) + sizeof(length
) + length
)) {
1391 NECPLOG(LOG_ERR
, "Got a bad TLV, length (%u) + offset (%d) < total length (%u)",
1392 length
, (tlv_offset
+ sizeof(u_int8_t
) + sizeof(length
)), total_len
);
1396 if (value_size
!= NULL
) {
1397 *value_size
= length
;
1400 if (buff
!= NULL
&& buff_len
> 0) {
1401 u_int32_t to_copy
= (length
< buff_len
) ? length
: buff_len
;
1402 error
= mbuf_copydata(packet
, tlv_offset
+ sizeof(u_int8_t
) + sizeof(length
), to_copy
, buff
);
1412 necp_buffer_write_packet_header(u_int8_t
*buffer
, u_int8_t packet_type
, u_int8_t flags
, u_int32_t message_id
)
1414 ((struct necp_packet_header
*)(void *)buffer
)->packet_type
= packet_type
;
1415 ((struct necp_packet_header
*)(void *)buffer
)->flags
= flags
;
1416 ((struct necp_packet_header
*)(void *)buffer
)->message_id
= message_id
;
1417 return (buffer
+ sizeof(struct necp_packet_header
));
1421 necp_buffer_write_tlv_validate(u_int8_t
*cursor
, u_int8_t type
, u_int32_t length
,
1422 u_int8_t
*buffer
, u_int32_t buffer_length
)
1424 if (cursor
< buffer
|| (uintptr_t)(cursor
- buffer
) > buffer_length
) {
1425 NECPLOG0(LOG_ERR
, "Cannot write TLV in buffer (invalid cursor)");
1428 u_int8_t
*next_tlv
= (u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
) + length
);
1429 if (next_tlv
<= buffer
|| // make sure the next TLV start doesn't overflow
1430 (uintptr_t)(next_tlv
- buffer
) > buffer_length
) { // make sure the next TLV has enough room in buffer
1431 NECPLOG(LOG_ERR
, "Cannot write TLV in buffer (TLV length %u, buffer length %u)",
1432 length
, buffer_length
);
1439 necp_buffer_write_tlv_if_different(u_int8_t
*cursor
, u_int8_t type
,
1440 u_int32_t length
, const void *value
, bool *updated
,
1441 u_int8_t
*buffer
, u_int32_t buffer_length
)
1443 if (!necp_buffer_write_tlv_validate(cursor
, type
, length
, buffer
, buffer_length
)) {
1446 u_int8_t
*next_tlv
= (u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
) + length
);
1447 if (*updated
|| *(u_int8_t
*)(cursor
) != type
) {
1448 *(u_int8_t
*)(cursor
) = type
;
1451 if (*updated
|| *(u_int32_t
*)(void *)(cursor
+ sizeof(type
)) != length
) {
1452 *(u_int32_t
*)(void *)(cursor
+ sizeof(type
)) = length
;
1456 if (*updated
|| memcmp((u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
)), value
, length
) != 0) {
1457 memcpy((u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
)), value
, length
);
1465 necp_buffer_write_tlv(u_int8_t
*cursor
, u_int8_t type
,
1466 u_int32_t length
, const void *value
,
1467 u_int8_t
*buffer
, u_int32_t buffer_length
)
1469 if (!necp_buffer_write_tlv_validate(cursor
, type
, length
, buffer
, buffer_length
)) {
1472 u_int8_t
*next_tlv
= (u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
) + length
);
1473 *(u_int8_t
*)(cursor
) = type
;
1474 *(u_int32_t
*)(void *)(cursor
+ sizeof(type
)) = length
;
1476 memcpy((u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
)), value
, length
);
1483 necp_buffer_get_tlv_type(u_int8_t
*buffer
, int tlv_offset
)
1485 u_int8_t
*type
= NULL
;
1487 if (buffer
== NULL
) {
1491 type
= (u_int8_t
*)((u_int8_t
*)buffer
+ tlv_offset
);
1492 return (type
? *type
: 0);
1496 necp_buffer_get_tlv_length(u_int8_t
*buffer
, int tlv_offset
)
1498 u_int32_t
*length
= NULL
;
1500 if (buffer
== NULL
) {
1504 length
= (u_int32_t
*)(void *)((u_int8_t
*)buffer
+ tlv_offset
+ sizeof(u_int8_t
));
1505 return (length
? *length
: 0);
1509 necp_buffer_get_tlv_value(u_int8_t
*buffer
, int tlv_offset
, u_int32_t
*value_size
)
1511 u_int8_t
*value
= NULL
;
1512 u_int32_t length
= necp_buffer_get_tlv_length(buffer
, tlv_offset
);
1518 *value_size
= length
;
1521 value
= (u_int8_t
*)((u_int8_t
*)buffer
+ tlv_offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
));
1526 necp_buffer_find_tlv(u_int8_t
*buffer
, u_int32_t buffer_length
, int offset
, u_int8_t type
, int next
)
1531 int cursor
= offset
;
1533 u_int32_t curr_length
;
1537 if ((((u_int32_t
)cursor
) + sizeof(curr_type
) + sizeof(curr_length
)) > buffer_length
) {
1541 curr_type
= necp_buffer_get_tlv_type(buffer
, cursor
);
1544 curr_type
= NECP_TLV_NIL
;
1546 curr_length
= necp_buffer_get_tlv_length(buffer
, cursor
);
1547 if (curr_length
> buffer_length
- ((u_int32_t
)cursor
+ sizeof(curr_type
) + sizeof(curr_length
))) {
1551 next_cursor
= (cursor
+ sizeof(curr_type
) + sizeof(curr_length
) + curr_length
);
1552 if (curr_type
== type
) {
1553 // check if entire TLV fits inside buffer
1554 if (((u_int32_t
)next_cursor
) <= buffer_length
) {
1560 cursor
= next_cursor
;
1565 necp_find_tlv(mbuf_t packet
, u_int8_t
*buffer
, u_int32_t buffer_length
, int offset
, u_int8_t type
, int *err
, int next
)
1568 if (packet
!= NULL
) {
1569 cursor
= necp_packet_find_tlv(packet
, offset
, type
, err
, next
);
1570 } else if (buffer
!= NULL
) {
1571 cursor
= necp_buffer_find_tlv(buffer
, buffer_length
, offset
, type
, next
);
1577 necp_get_tlv_at_offset(mbuf_t packet
, u_int8_t
*buffer
, u_int32_t buffer_length
,
1578 int tlv_offset
, u_int32_t out_buffer_length
, void *out_buffer
, u_int32_t
*value_size
)
1580 if (packet
!= NULL
) {
1581 // Handle mbuf parsing
1582 return necp_packet_get_tlv_at_offset(packet
, tlv_offset
, out_buffer_length
, out_buffer
, value_size
);
1585 if (buffer
== NULL
) {
1586 NECPLOG0(LOG_ERR
, "necp_get_tlv_at_offset buffer is NULL");
1590 // Handle buffer parsing
1592 // Validate that buffer has enough room for any TLV
1593 if (tlv_offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
) > buffer_length
) {
1594 NECPLOG(LOG_ERR
, "necp_get_tlv_at_offset buffer_length is too small for TLV (%u < %u)",
1595 buffer_length
, tlv_offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
));
1599 // Validate that buffer has enough room for this TLV
1600 u_int32_t tlv_length
= necp_buffer_get_tlv_length(buffer
, tlv_offset
);
1601 if (tlv_length
> buffer_length
- (tlv_offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
))) {
1602 NECPLOG(LOG_ERR
, "necp_get_tlv_at_offset buffer_length is too small for TLV of length %u (%u < %u)",
1603 tlv_length
, buffer_length
, tlv_offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
) + tlv_length
);
1607 if (out_buffer
!= NULL
&& out_buffer_length
> 0) {
1608 // Validate that out buffer is large enough for value
1609 if (out_buffer_length
< tlv_length
) {
1610 NECPLOG(LOG_ERR
, "necp_get_tlv_at_offset out_buffer_length is too small for TLV value (%u < %u)",
1611 out_buffer_length
, tlv_length
);
1615 // Get value pointer
1616 u_int8_t
*tlv_value
= necp_buffer_get_tlv_value(buffer
, tlv_offset
, NULL
);
1617 if (tlv_value
== NULL
) {
1618 NECPLOG0(LOG_ERR
, "necp_get_tlv_at_offset tlv_value is NULL");
1623 memcpy(out_buffer
, tlv_value
, tlv_length
);
1627 if (value_size
!= NULL
) {
1628 *value_size
= tlv_length
;
1635 necp_get_tlv(mbuf_t packet
, u_int8_t
*buffer
, u_int32_t buffer_length
,
1636 int offset
, u_int8_t type
, u_int32_t buff_len
, void *buff
, u_int32_t
*value_size
)
1640 int tlv_offset
= necp_find_tlv(packet
, buffer
, buffer_length
, offset
, type
, &error
, 0);
1641 if (tlv_offset
< 0) {
1645 return (necp_get_tlv_at_offset(packet
, buffer
, buffer_length
, tlv_offset
, buff_len
, buff
, value_size
));
1649 necp_send_ctl_data(struct necp_session
*session
, u_int8_t
*buffer
, size_t buffer_size
)
1653 if (necp_kctlref
== NULL
|| session
== NULL
|| buffer
== NULL
|| buffer_size
== 0) {
1657 error
= ctl_enqueuedata(necp_kctlref
, session
->control_unit
, buffer
, buffer_size
, CTL_DATA_EOR
);
1659 return (error
== 0);
1663 necp_send_success_response(struct necp_session
*session
, u_int8_t packet_type
, u_int32_t message_id
)
1665 bool success
= TRUE
;
1666 u_int8_t
*response
= NULL
;
1667 u_int8_t
*cursor
= NULL
;
1668 size_t response_size
= sizeof(struct necp_packet_header
) + sizeof(u_int8_t
) + sizeof(u_int32_t
);
1669 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
);
1670 if (response
== NULL
) {
1674 cursor
= necp_buffer_write_packet_header(cursor
, packet_type
, NECP_PACKET_FLAGS_RESPONSE
, message_id
);
1675 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_NIL
, 0, NULL
, response
, response_size
);
1677 if (!(success
= necp_send_ctl_data(session
, (u_int8_t
*)response
, response_size
))) {
1678 NECPLOG0(LOG_ERR
, "Failed to send response");
1681 FREE(response
, M_NECP
);
1686 necp_send_error_response(struct necp_session
*session
, u_int8_t packet_type
, u_int32_t message_id
, u_int32_t error
)
1688 bool success
= TRUE
;
1689 u_int8_t
*response
= NULL
;
1690 u_int8_t
*cursor
= NULL
;
1691 size_t response_size
= sizeof(struct necp_packet_header
) + sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(u_int32_t
);
1692 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
);
1693 if (response
== NULL
) {
1697 cursor
= necp_buffer_write_packet_header(cursor
, packet_type
, NECP_PACKET_FLAGS_RESPONSE
, message_id
);
1698 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_ERROR
, sizeof(error
), &error
, response
, response_size
);
1700 if (!(success
= necp_send_ctl_data(session
, (u_int8_t
*)response
, response_size
))) {
1701 NECPLOG0(LOG_ERR
, "Failed to send response");
1704 FREE(response
, M_NECP
);
1709 necp_send_policy_id_response(struct necp_session
*session
, u_int8_t packet_type
, u_int32_t message_id
, necp_policy_id policy_id
)
1711 bool success
= TRUE
;
1712 u_int8_t
*response
= NULL
;
1713 u_int8_t
*cursor
= NULL
;
1714 size_t response_size
= sizeof(struct necp_packet_header
) + sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(u_int32_t
);
1715 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
);
1716 if (response
== NULL
) {
1720 cursor
= necp_buffer_write_packet_header(cursor
, packet_type
, NECP_PACKET_FLAGS_RESPONSE
, message_id
);
1721 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ID
, sizeof(policy_id
), &policy_id
, response
, response_size
);
1723 if (!(success
= necp_send_ctl_data(session
, (u_int8_t
*)response
, response_size
))) {
1724 NECPLOG0(LOG_ERR
, "Failed to send response");
1727 FREE(response
, M_NECP
);
1732 necp_ctl_send(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, mbuf_t packet
, int flags
)
1734 #pragma unused(kctlref, unit, flags)
1735 struct necp_session
*session
= (struct necp_session
*)unitinfo
;
1736 struct necp_packet_header header
;
1739 if (session
== NULL
) {
1740 NECPLOG0(LOG_ERR
, "Got a NULL session");
1745 if (mbuf_pkthdr_len(packet
) < sizeof(header
)) {
1746 NECPLOG(LOG_ERR
, "Got a bad packet, length (%lu) < sizeof header (%lu)", mbuf_pkthdr_len(packet
), sizeof(header
));
1751 error
= mbuf_copydata(packet
, 0, sizeof(header
), &header
);
1753 NECPLOG(LOG_ERR
, "mbuf_copydata failed for the header: %d", error
);
1758 if (session
->proc_locked
) {
1759 // Verify that the calling process is allowed to send messages
1761 proc_getexecutableuuid(current_proc(), proc_uuid
, sizeof(proc_uuid
));
1762 if (uuid_compare(proc_uuid
, session
->proc_uuid
) != 0) {
1763 necp_send_error_response(session
, header
.packet_type
, header
.message_id
, NECP_ERROR_INVALID_PROCESS
);
1767 // If not locked, update the proc_uuid and proc_pid of the session
1768 proc_getexecutableuuid(current_proc(), session
->proc_uuid
, sizeof(session
->proc_uuid
));
1769 session
->proc_pid
= proc_pid(current_proc());
1772 switch (header
.packet_type
) {
1773 case NECP_PACKET_TYPE_POLICY_ADD
: {
1774 necp_handle_policy_add(session
, header
.message_id
, packet
, NULL
, 0, sizeof(header
), NULL
);
1777 case NECP_PACKET_TYPE_POLICY_GET
: {
1778 necp_handle_policy_get(session
, header
.message_id
, packet
, sizeof(header
));
1781 case NECP_PACKET_TYPE_POLICY_DELETE
: {
1782 necp_handle_policy_delete(session
, header
.message_id
, packet
, sizeof(header
));
1785 case NECP_PACKET_TYPE_POLICY_APPLY_ALL
: {
1786 necp_handle_policy_apply_all(session
, header
.message_id
, packet
, sizeof(header
));
1789 case NECP_PACKET_TYPE_POLICY_LIST_ALL
: {
1790 necp_handle_policy_list_all(session
, header
.message_id
, packet
, sizeof(header
));
1793 case NECP_PACKET_TYPE_POLICY_DELETE_ALL
: {
1794 necp_handle_policy_delete_all(session
, header
.message_id
, packet
, sizeof(header
));
1797 case NECP_PACKET_TYPE_POLICY_DUMP_ALL
: {
1798 necp_handle_policy_dump_all(session
, header
.message_id
, packet
, 0, 0, sizeof(header
));
1801 case NECP_PACKET_TYPE_SET_SESSION_PRIORITY
: {
1802 necp_handle_set_session_priority(session
, header
.message_id
, packet
, sizeof(header
));
1805 case NECP_PACKET_TYPE_LOCK_SESSION_TO_PROC
: {
1806 necp_handle_lock_session_to_proc(session
, header
.message_id
, packet
, sizeof(header
));
1809 case NECP_PACKET_TYPE_REGISTER_SERVICE
: {
1810 necp_handle_register_service(session
, header
.message_id
, packet
, sizeof(header
));
1813 case NECP_PACKET_TYPE_UNREGISTER_SERVICE
: {
1814 necp_handle_unregister_service(session
, header
.message_id
, packet
, sizeof(header
));
1818 NECPLOG(LOG_ERR
, "Received unknown message type %d", header
.packet_type
);
1819 necp_send_error_response(session
, header
.packet_type
, header
.message_id
, NECP_ERROR_UNKNOWN_PACKET_TYPE
);
1830 necp_ctl_rcvd(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, int flags
)
1832 #pragma unused(kctlref, unit, unitinfo, flags)
1837 necp_ctl_getopt(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, int opt
, void *data
, size_t *len
)
1839 #pragma unused(kctlref, unit, unitinfo, opt, data, len)
1844 necp_ctl_setopt(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, int opt
, void *data
, size_t len
)
1846 #pragma unused(kctlref, unit, unitinfo, opt, data, len)
1850 // Session Management
1852 static struct necp_session
*
1853 necp_create_session(void)
1855 struct necp_session
*new_session
= NULL
;
1857 MALLOC(new_session
, struct necp_session
*, sizeof(*new_session
), M_NECP
, M_WAITOK
| M_ZERO
);
1858 if (new_session
== NULL
) {
1862 new_session
->necp_fd_type
= necp_fd_type_session
;
1863 new_session
->session_priority
= NECP_SESSION_PRIORITY_UNKNOWN
;
1864 new_session
->dirty
= FALSE
;
1865 LIST_INIT(&new_session
->policies
);
1866 lck_mtx_init(&new_session
->lock
, necp_kernel_policy_mtx_grp
, necp_kernel_policy_mtx_attr
);
1869 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
1871 // Find the next available control unit
1872 u_int32_t control_unit
= 1;
1873 struct necp_session
*next_session
= NULL
;
1874 TAILQ_FOREACH(next_session
, &necp_session_list
, chain
) {
1875 if (next_session
->control_unit
> control_unit
) {
1876 // Found a gap, grab this control unit
1880 // Try the next control unit, loop around
1881 control_unit
= next_session
->control_unit
+ 1;
1884 new_session
->control_unit
= control_unit
;
1885 new_session
->session_order
= necp_allocate_new_session_order(new_session
->session_priority
, control_unit
);
1887 if (next_session
!= NULL
) {
1888 TAILQ_INSERT_BEFORE(next_session
, new_session
, chain
);
1890 TAILQ_INSERT_TAIL(&necp_session_list
, new_session
, chain
);
1893 necp_session_count
++;
1894 lck_rw_done(&necp_kernel_policy_lock
);
1897 NECPLOG(LOG_DEBUG
, "Created NECP session, control unit %d", control_unit
);
1901 return (new_session
);
1905 necp_delete_session(struct necp_session
*session
)
1907 if (session
!= NULL
) {
1908 struct necp_service_registration
*service
= NULL
;
1909 struct necp_service_registration
*temp_service
= NULL
;
1910 LIST_FOREACH_SAFE(service
, &session
->services
, session_chain
, temp_service
) {
1911 LIST_REMOVE(service
, session_chain
);
1912 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
1913 LIST_REMOVE(service
, kernel_chain
);
1914 lck_rw_done(&necp_kernel_policy_lock
);
1915 FREE(service
, M_NECP
);
1918 NECPLOG0(LOG_DEBUG
, "Deleted NECP session");
1921 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
1922 TAILQ_REMOVE(&necp_session_list
, session
, chain
);
1923 necp_session_count
--;
1924 lck_rw_done(&necp_kernel_policy_lock
);
1926 lck_mtx_destroy(&session
->lock
, necp_kernel_policy_mtx_grp
);
1927 FREE(session
, M_NECP
);
1931 // Session Policy Management
1933 static inline u_int8_t
1934 necp_policy_result_get_type_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
1936 return ((buffer
&& length
>= sizeof(u_int8_t
)) ? buffer
[0] : 0);
1939 static inline u_int32_t
1940 necp_policy_result_get_parameter_length_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
1942 return ((buffer
&& length
> sizeof(u_int8_t
)) ? (length
- sizeof(u_int8_t
)) : 0);
1945 static inline u_int8_t
*
1946 necp_policy_result_get_parameter_pointer_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
1948 return ((buffer
&& length
> sizeof(u_int8_t
)) ? (buffer
+ sizeof(u_int8_t
)) : NULL
);
1952 necp_policy_result_requires_route_rules(u_int8_t
*buffer
, u_int32_t length
)
1954 u_int8_t type
= necp_policy_result_get_type_from_buffer(buffer
, length
);
1955 if (type
== NECP_POLICY_RESULT_ROUTE_RULES
) {
1962 necp_address_is_valid(struct sockaddr
*address
)
1964 if (address
->sa_family
== AF_INET
) {
1965 return (address
->sa_len
== sizeof(struct sockaddr_in
));
1966 } else if (address
->sa_family
== AF_INET6
) {
1967 return (address
->sa_len
== sizeof(struct sockaddr_in6
));
1974 necp_policy_result_is_valid(u_int8_t
*buffer
, u_int32_t length
)
1976 bool validated
= FALSE
;
1977 u_int8_t type
= necp_policy_result_get_type_from_buffer(buffer
, length
);
1978 u_int32_t parameter_length
= necp_policy_result_get_parameter_length_from_buffer(buffer
, length
);
1980 case NECP_POLICY_RESULT_PASS
: {
1984 case NECP_POLICY_RESULT_SKIP
: {
1985 if (parameter_length
>= sizeof(u_int32_t
)) {
1990 case NECP_POLICY_RESULT_DROP
: {
1994 case NECP_POLICY_RESULT_SOCKET_DIVERT
: {
1995 if (parameter_length
>= sizeof(u_int32_t
)) {
2000 case NECP_POLICY_RESULT_SOCKET_SCOPED
: {
2001 if (parameter_length
> 0) {
2006 case NECP_POLICY_RESULT_IP_TUNNEL
: {
2007 if (parameter_length
> sizeof(u_int32_t
)) {
2012 case NECP_POLICY_RESULT_SOCKET_FILTER
: {
2013 if (parameter_length
>= sizeof(u_int32_t
)) {
2018 case NECP_POLICY_RESULT_ROUTE_RULES
: {
2022 case NECP_POLICY_RESULT_TRIGGER
:
2023 case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED
:
2024 case NECP_POLICY_RESULT_TRIGGER_SCOPED
:
2025 case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED
:
2026 case NECP_POLICY_RESULT_USE_NETAGENT
: {
2027 if (parameter_length
>= sizeof(uuid_t
)) {
2039 NECPLOG(LOG_DEBUG
, "Policy result type %d, valid %d", type
, validated
);
2045 static inline u_int8_t
2046 necp_policy_condition_get_type_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
2048 return ((buffer
&& length
>= sizeof(u_int8_t
)) ? buffer
[0] : 0);
2051 static inline u_int8_t
2052 necp_policy_condition_get_flags_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
2054 return ((buffer
&& length
>= (2 * sizeof(u_int8_t
))) ? buffer
[1] : 0);
2057 static inline u_int32_t
2058 necp_policy_condition_get_value_length_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
2060 return ((buffer
&& length
>= (2 * sizeof(u_int8_t
))) ? (length
- (2 * sizeof(u_int8_t
))) : 0);
2063 static inline u_int8_t
*
2064 necp_policy_condition_get_value_pointer_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
2066 return ((buffer
&& length
> (2 * sizeof(u_int8_t
))) ? (buffer
+ (2 * sizeof(u_int8_t
))) : NULL
);
2070 necp_policy_condition_is_default(u_int8_t
*buffer
, u_int32_t length
)
2072 return (necp_policy_condition_get_type_from_buffer(buffer
, length
) == NECP_POLICY_CONDITION_DEFAULT
);
2076 necp_policy_condition_is_application(u_int8_t
*buffer
, u_int32_t length
)
2078 return (necp_policy_condition_get_type_from_buffer(buffer
, length
) == NECP_POLICY_CONDITION_APPLICATION
);
2082 necp_policy_condition_is_real_application(u_int8_t
*buffer
, u_int32_t length
)
2084 return (necp_policy_condition_get_type_from_buffer(buffer
, length
) == NECP_POLICY_CONDITION_REAL_APPLICATION
);
2088 necp_policy_condition_requires_application(u_int8_t
*buffer
, u_int32_t length
)
2090 u_int8_t type
= necp_policy_condition_get_type_from_buffer(buffer
, length
);
2091 return (type
== NECP_POLICY_CONDITION_REAL_APPLICATION
);
2095 necp_policy_condition_requires_real_application(u_int8_t
*buffer
, u_int32_t length
)
2097 u_int8_t type
= necp_policy_condition_get_type_from_buffer(buffer
, length
);
2098 return (type
== NECP_POLICY_CONDITION_ENTITLEMENT
);
2102 necp_policy_condition_is_valid(u_int8_t
*buffer
, u_int32_t length
, u_int8_t policy_result_type
)
2104 bool validated
= FALSE
;
2105 bool result_cannot_have_ip_layer
= (policy_result_type
== NECP_POLICY_RESULT_SOCKET_DIVERT
||
2106 policy_result_type
== NECP_POLICY_RESULT_SOCKET_FILTER
||
2107 policy_result_type
== NECP_POLICY_RESULT_TRIGGER
||
2108 policy_result_type
== NECP_POLICY_RESULT_TRIGGER_IF_NEEDED
||
2109 policy_result_type
== NECP_POLICY_RESULT_TRIGGER_SCOPED
||
2110 policy_result_type
== NECP_POLICY_RESULT_NO_TRIGGER_SCOPED
||
2111 policy_result_type
== NECP_POLICY_RESULT_SOCKET_SCOPED
||
2112 policy_result_type
== NECP_POLICY_RESULT_ROUTE_RULES
||
2113 policy_result_type
== NECP_POLICY_RESULT_USE_NETAGENT
) ? TRUE
: FALSE
;
2114 u_int32_t condition_length
= necp_policy_condition_get_value_length_from_buffer(buffer
, length
);
2115 u_int8_t
*condition_value
= necp_policy_condition_get_value_pointer_from_buffer(buffer
, length
);
2116 u_int8_t type
= necp_policy_condition_get_type_from_buffer(buffer
, length
);
2117 u_int8_t flags
= necp_policy_condition_get_flags_from_buffer(buffer
, length
);
2119 case NECP_POLICY_CONDITION_APPLICATION
:
2120 case NECP_POLICY_CONDITION_REAL_APPLICATION
: {
2121 if (!(flags
& NECP_POLICY_CONDITION_FLAGS_NEGATIVE
) &&
2122 condition_length
>= sizeof(uuid_t
) &&
2123 condition_value
!= NULL
&&
2124 !uuid_is_null(condition_value
)) {
2129 case NECP_POLICY_CONDITION_DOMAIN
:
2130 case NECP_POLICY_CONDITION_ACCOUNT
:
2131 case NECP_POLICY_CONDITION_BOUND_INTERFACE
: {
2132 if (condition_length
> 0) {
2137 case NECP_POLICY_CONDITION_TRAFFIC_CLASS
: {
2138 if (condition_length
>= sizeof(struct necp_policy_condition_tc_range
)) {
2143 case NECP_POLICY_CONDITION_DEFAULT
:
2144 case NECP_POLICY_CONDITION_ALL_INTERFACES
:
2145 case NECP_POLICY_CONDITION_ENTITLEMENT
: {
2146 if (!(flags
& NECP_POLICY_CONDITION_FLAGS_NEGATIVE
)) {
2151 case NECP_POLICY_CONDITION_IP_PROTOCOL
: {
2152 if (condition_length
>= sizeof(u_int16_t
)) {
2157 case NECP_POLICY_CONDITION_PID
: {
2158 if (condition_length
>= sizeof(pid_t
) &&
2159 condition_value
!= NULL
&&
2160 *((pid_t
*)(void *)condition_value
) != 0) {
2165 case NECP_POLICY_CONDITION_UID
: {
2166 if (condition_length
>= sizeof(uid_t
)) {
2171 case NECP_POLICY_CONDITION_LOCAL_ADDR
:
2172 case NECP_POLICY_CONDITION_REMOTE_ADDR
: {
2173 if (!result_cannot_have_ip_layer
&& condition_length
>= sizeof(struct necp_policy_condition_addr
) &&
2174 necp_address_is_valid(&((struct necp_policy_condition_addr
*)(void *)condition_value
)->address
.sa
)) {
2179 case NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE
:
2180 case NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE
: {
2181 if (!result_cannot_have_ip_layer
&& condition_length
>= sizeof(struct necp_policy_condition_addr_range
) &&
2182 necp_address_is_valid(&((struct necp_policy_condition_addr_range
*)(void *)condition_value
)->start_address
.sa
) &&
2183 necp_address_is_valid(&((struct necp_policy_condition_addr_range
*)(void *)condition_value
)->end_address
.sa
)) {
2195 NECPLOG(LOG_DEBUG
, "Policy condition type %d, valid %d", type
, validated
);
2202 necp_policy_route_rule_is_default(u_int8_t
*buffer
, u_int32_t length
)
2204 return (necp_policy_condition_get_value_length_from_buffer(buffer
, length
) == 0 &&
2205 necp_policy_condition_get_flags_from_buffer(buffer
, length
) == 0);
2209 necp_policy_route_rule_is_valid(u_int8_t
*buffer
, u_int32_t length
)
2211 bool validated
= FALSE
;
2212 u_int8_t type
= necp_policy_condition_get_type_from_buffer(buffer
, length
);
2214 case NECP_ROUTE_RULE_ALLOW_INTERFACE
: {
2218 case NECP_ROUTE_RULE_DENY_INTERFACE
: {
2222 case NECP_ROUTE_RULE_QOS_MARKING
: {
2233 NECPLOG(LOG_DEBUG
, "Policy route rule type %d, valid %d", type
, validated
);
2240 necp_get_posix_error_for_necp_error(int response_error
)
2242 switch (response_error
) {
2243 case NECP_ERROR_UNKNOWN_PACKET_TYPE
:
2244 case NECP_ERROR_INVALID_TLV
:
2245 case NECP_ERROR_POLICY_RESULT_INVALID
:
2246 case NECP_ERROR_POLICY_CONDITIONS_INVALID
:
2247 case NECP_ERROR_ROUTE_RULES_INVALID
: {
2250 case NECP_ERROR_POLICY_ID_NOT_FOUND
: {
2253 case NECP_ERROR_INVALID_PROCESS
: {
2256 case NECP_ERROR_INTERNAL
:
2264 necp_handle_set_session_priority(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
2267 struct necp_session_policy
*policy
= NULL
;
2268 struct necp_session_policy
*temp_policy
= NULL
;
2269 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
2270 u_int32_t requested_session_priority
= NECP_SESSION_PRIORITY_UNKNOWN
;
2273 error
= necp_get_tlv(packet
, NULL
, 0, offset
, NECP_TLV_SESSION_PRIORITY
, sizeof(requested_session_priority
), &requested_session_priority
, NULL
);
2275 NECPLOG(LOG_ERR
, "Failed to get session priority: %d", error
);
2276 response_error
= NECP_ERROR_INVALID_TLV
;
2280 if (session
== NULL
) {
2281 NECPLOG0(LOG_ERR
, "Failed to find session");
2282 response_error
= NECP_ERROR_INTERNAL
;
2286 // Enforce special session priorities with entitlements
2287 if (requested_session_priority
== NECP_SESSION_PRIORITY_CONTROL
||
2288 requested_session_priority
== NECP_SESSION_PRIORITY_PRIVILEGED_TUNNEL
) {
2289 errno_t cred_result
= priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES
, 0);
2290 if (cred_result
!= 0) {
2291 NECPLOG(LOG_ERR
, "Session does not hold necessary entitlement to claim priority level %d", requested_session_priority
);
2296 if (session
->session_priority
!= requested_session_priority
) {
2297 session
->session_priority
= requested_session_priority
;
2298 session
->session_order
= necp_allocate_new_session_order(session
->session_priority
, session
->control_unit
);
2299 session
->dirty
= TRUE
;
2301 // Mark all policies as needing updates
2302 LIST_FOREACH_SAFE(policy
, &session
->policies
, chain
, temp_policy
) {
2303 policy
->pending_update
= TRUE
;
2307 necp_send_success_response(session
, NECP_PACKET_TYPE_SET_SESSION_PRIORITY
, message_id
);
2311 necp_send_error_response(session
, NECP_PACKET_TYPE_SET_SESSION_PRIORITY
, message_id
, response_error
);
2315 necp_handle_lock_session_to_proc(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
2317 #pragma unused(packet, offset)
2318 // proc_uuid already filled out
2319 session
->proc_locked
= TRUE
;
2320 necp_send_success_response(session
, NECP_PACKET_TYPE_LOCK_SESSION_TO_PROC
, message_id
);
2324 necp_handle_register_service(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
2327 struct necp_service_registration
*new_service
= NULL
;
2328 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
2329 uuid_t service_uuid
;
2330 uuid_clear(service_uuid
);
2332 if (session
== NULL
) {
2333 NECPLOG0(LOG_ERR
, "Failed to find session");
2334 response_error
= NECP_ERROR_INTERNAL
;
2338 // Enforce entitlements
2339 errno_t cred_result
= priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES
, 0);
2340 if (cred_result
!= 0) {
2341 NECPLOG0(LOG_ERR
, "Session does not hold necessary entitlement to register service");
2345 // Read service uuid
2346 error
= necp_get_tlv(packet
, NULL
, 0, offset
, NECP_TLV_SERVICE_UUID
, sizeof(uuid_t
), service_uuid
, NULL
);
2348 NECPLOG(LOG_ERR
, "Failed to get service UUID: %d", error
);
2349 response_error
= NECP_ERROR_INVALID_TLV
;
2353 MALLOC(new_service
, struct necp_service_registration
*, sizeof(*new_service
), M_NECP
, M_WAITOK
);
2354 if (new_service
== NULL
) {
2355 NECPLOG0(LOG_ERR
, "Failed to allocate service registration");
2356 response_error
= NECP_ERROR_INTERNAL
;
2360 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
2361 memset(new_service
, 0, sizeof(*new_service
));
2362 new_service
->service_id
= necp_create_uuid_service_id_mapping(service_uuid
);
2363 LIST_INSERT_HEAD(&session
->services
, new_service
, session_chain
);
2364 LIST_INSERT_HEAD(&necp_registered_service_list
, new_service
, kernel_chain
);
2365 lck_rw_done(&necp_kernel_policy_lock
);
2367 necp_send_success_response(session
, NECP_PACKET_TYPE_REGISTER_SERVICE
, message_id
);
2370 necp_send_error_response(session
, NECP_PACKET_TYPE_REGISTER_SERVICE
, message_id
, response_error
);
2374 necp_handle_unregister_service(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
2377 struct necp_service_registration
*service
= NULL
;
2378 struct necp_service_registration
*temp_service
= NULL
;
2379 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
2380 struct necp_uuid_id_mapping
*mapping
= NULL
;
2381 uuid_t service_uuid
;
2382 uuid_clear(service_uuid
);
2384 if (session
== NULL
) {
2385 NECPLOG0(LOG_ERR
, "Failed to find session");
2386 response_error
= NECP_ERROR_INTERNAL
;
2390 // Read service uuid
2391 error
= necp_get_tlv(packet
, NULL
, 0, offset
, NECP_TLV_SERVICE_UUID
, sizeof(uuid_t
), service_uuid
, NULL
);
2393 NECPLOG(LOG_ERR
, "Failed to get service UUID: %d", error
);
2394 response_error
= NECP_ERROR_INVALID_TLV
;
2398 // Mark remove all matching services for this session
2399 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
2400 mapping
= necp_uuid_lookup_service_id_locked(service_uuid
);
2401 if (mapping
!= NULL
) {
2402 LIST_FOREACH_SAFE(service
, &session
->services
, session_chain
, temp_service
) {
2403 if (service
->service_id
== mapping
->id
) {
2404 LIST_REMOVE(service
, session_chain
);
2405 LIST_REMOVE(service
, kernel_chain
);
2406 FREE(service
, M_NECP
);
2409 necp_remove_uuid_service_id_mapping(service_uuid
);
2411 lck_rw_done(&necp_kernel_policy_lock
);
2413 necp_send_success_response(session
, NECP_PACKET_TYPE_UNREGISTER_SERVICE
, message_id
);
2416 necp_send_error_response(session
, NECP_PACKET_TYPE_UNREGISTER_SERVICE
, message_id
, response_error
);
2419 static necp_policy_id
2420 necp_handle_policy_add(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
,
2421 u_int8_t
*tlv_buffer
, size_t tlv_buffer_length
, int offset
, int *return_error
)
2423 bool has_default_condition
= FALSE
;
2424 bool has_non_default_condition
= FALSE
;
2425 bool has_application_condition
= FALSE
;
2426 bool has_real_application_condition
= FALSE
;
2427 bool requires_application_condition
= FALSE
;
2428 bool requires_real_application_condition
= FALSE
;
2429 u_int8_t
*conditions_array
= NULL
;
2430 u_int32_t conditions_array_size
= 0;
2431 int conditions_array_cursor
;
2433 bool has_default_route_rule
= FALSE
;
2434 u_int8_t
*route_rules_array
= NULL
;
2435 u_int32_t route_rules_array_size
= 0;
2436 int route_rules_array_cursor
;
2440 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
2442 necp_policy_order order
= 0;
2443 struct necp_session_policy
*policy
= NULL
;
2444 u_int8_t
*policy_result
= NULL
;
2445 u_int32_t policy_result_size
= 0;
2447 // Read policy order
2448 error
= necp_get_tlv(packet
, tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_POLICY_ORDER
, sizeof(order
), &order
, NULL
);
2450 NECPLOG(LOG_ERR
, "Failed to get policy order: %d", error
);
2451 response_error
= NECP_ERROR_INVALID_TLV
;
2455 // Read policy result
2456 cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_POLICY_RESULT
, &error
, 0);
2457 error
= necp_get_tlv_at_offset(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, 0, NULL
, &policy_result_size
);
2458 if (error
|| policy_result_size
== 0) {
2459 NECPLOG(LOG_ERR
, "Failed to get policy result length: %d", error
);
2460 response_error
= NECP_ERROR_INVALID_TLV
;
2463 if (policy_result_size
> NECP_MAX_POLICY_RESULT_SIZE
) {
2464 NECPLOG(LOG_ERR
, "Policy result length too large: %u", policy_result_size
);
2465 response_error
= NECP_ERROR_INVALID_TLV
;
2468 MALLOC(policy_result
, u_int8_t
*, policy_result_size
, M_NECP
, M_WAITOK
);
2469 if (policy_result
== NULL
) {
2470 NECPLOG(LOG_ERR
, "Failed to allocate a policy result buffer (size %d)", policy_result_size
);
2471 response_error
= NECP_ERROR_INTERNAL
;
2474 error
= necp_get_tlv_at_offset(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, policy_result_size
, policy_result
, NULL
);
2476 NECPLOG(LOG_ERR
, "Failed to get policy result: %d", error
);
2477 response_error
= NECP_ERROR_POLICY_RESULT_INVALID
;
2480 if (!necp_policy_result_is_valid(policy_result
, policy_result_size
)) {
2481 NECPLOG0(LOG_ERR
, "Failed to validate policy result");
2482 response_error
= NECP_ERROR_POLICY_RESULT_INVALID
;
2486 if (necp_policy_result_requires_route_rules(policy_result
, policy_result_size
)) {
2487 // Read route rules conditions
2488 for (cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_ROUTE_RULE
, &error
, 0);
2490 cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, NECP_TLV_ROUTE_RULE
, &error
, 1)) {
2491 u_int32_t route_rule_size
= 0;
2492 necp_get_tlv_at_offset(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, 0, NULL
, &route_rule_size
);
2493 if (route_rule_size
> 0) {
2494 route_rules_array_size
+= (sizeof(u_int8_t
) + sizeof(u_int32_t
) + route_rule_size
);
2498 if (route_rules_array_size
== 0) {
2499 NECPLOG0(LOG_ERR
, "Failed to get policy route rules");
2500 response_error
= NECP_ERROR_INVALID_TLV
;
2503 if (route_rules_array_size
> NECP_MAX_ROUTE_RULES_ARRAY_SIZE
) {
2504 NECPLOG(LOG_ERR
, "Route rules length too large: %u", route_rules_array_size
);
2505 response_error
= NECP_ERROR_INVALID_TLV
;
2508 MALLOC(route_rules_array
, u_int8_t
*, route_rules_array_size
, M_NECP
, M_WAITOK
);
2509 if (route_rules_array
== NULL
) {
2510 NECPLOG(LOG_ERR
, "Failed to allocate a policy route rules array (size %d)", route_rules_array_size
);
2511 response_error
= NECP_ERROR_INTERNAL
;
2515 route_rules_array_cursor
= 0;
2516 for (cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_ROUTE_RULE
, &error
, 0);
2518 cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, NECP_TLV_ROUTE_RULE
, &error
, 1)) {
2519 u_int8_t route_rule_type
= NECP_TLV_ROUTE_RULE
;
2520 u_int32_t route_rule_size
= 0;
2521 necp_get_tlv_at_offset(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, 0, NULL
, &route_rule_size
);
2522 if (route_rule_size
> 0 && route_rule_size
<= (route_rules_array_size
- route_rules_array_cursor
)) {
2524 memcpy((route_rules_array
+ route_rules_array_cursor
), &route_rule_type
, sizeof(route_rule_type
));
2525 route_rules_array_cursor
+= sizeof(route_rule_type
);
2528 memcpy((route_rules_array
+ route_rules_array_cursor
), &route_rule_size
, sizeof(route_rule_size
));
2529 route_rules_array_cursor
+= sizeof(route_rule_size
);
2532 necp_get_tlv_at_offset(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, route_rule_size
, (route_rules_array
+ route_rules_array_cursor
), NULL
);
2534 if (!necp_policy_route_rule_is_valid((route_rules_array
+ route_rules_array_cursor
), route_rule_size
)) {
2535 NECPLOG0(LOG_ERR
, "Failed to validate policy route rule");
2536 response_error
= NECP_ERROR_ROUTE_RULES_INVALID
;
2540 if (necp_policy_route_rule_is_default((route_rules_array
+ route_rules_array_cursor
), route_rule_size
)) {
2541 if (has_default_route_rule
) {
2542 NECPLOG0(LOG_ERR
, "Failed to validate route rule; contained multiple default route rules");
2543 response_error
= NECP_ERROR_ROUTE_RULES_INVALID
;
2546 has_default_route_rule
= TRUE
;
2549 route_rules_array_cursor
+= route_rule_size
;
2554 // Read policy conditions
2555 for (cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_POLICY_CONDITION
, &error
, 0);
2557 cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, NECP_TLV_POLICY_CONDITION
, &error
, 1)) {
2558 u_int32_t condition_size
= 0;
2559 necp_get_tlv_at_offset(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, 0, NULL
, &condition_size
);
2561 if (condition_size
> 0) {
2562 conditions_array_size
+= (sizeof(u_int8_t
) + sizeof(u_int32_t
) + condition_size
);
2566 if (conditions_array_size
== 0) {
2567 NECPLOG0(LOG_ERR
, "Failed to get policy conditions");
2568 response_error
= NECP_ERROR_INVALID_TLV
;
2571 if (conditions_array_size
> NECP_MAX_CONDITIONS_ARRAY_SIZE
) {
2572 NECPLOG(LOG_ERR
, "Conditions length too large: %u", conditions_array_size
);
2573 response_error
= NECP_ERROR_INVALID_TLV
;
2576 MALLOC(conditions_array
, u_int8_t
*, conditions_array_size
, M_NECP
, M_WAITOK
);
2577 if (conditions_array
== NULL
) {
2578 NECPLOG(LOG_ERR
, "Failed to allocate a policy conditions array (size %d)", conditions_array_size
);
2579 response_error
= NECP_ERROR_INTERNAL
;
2583 conditions_array_cursor
= 0;
2584 for (cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_POLICY_CONDITION
, &error
, 0);
2586 cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, NECP_TLV_POLICY_CONDITION
, &error
, 1)) {
2587 u_int8_t condition_type
= NECP_TLV_POLICY_CONDITION
;
2588 u_int32_t condition_size
= 0;
2589 necp_get_tlv_at_offset(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, 0, NULL
, &condition_size
);
2590 if (condition_size
> 0 && condition_size
<= (conditions_array_size
- conditions_array_cursor
)) {
2592 memcpy((conditions_array
+ conditions_array_cursor
), &condition_type
, sizeof(condition_type
));
2593 conditions_array_cursor
+= sizeof(condition_type
);
2596 memcpy((conditions_array
+ conditions_array_cursor
), &condition_size
, sizeof(condition_size
));
2597 conditions_array_cursor
+= sizeof(condition_size
);
2600 necp_get_tlv_at_offset(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, condition_size
, (conditions_array
+ conditions_array_cursor
), NULL
);
2601 if (!necp_policy_condition_is_valid((conditions_array
+ conditions_array_cursor
), condition_size
, necp_policy_result_get_type_from_buffer(policy_result
, policy_result_size
))) {
2602 NECPLOG0(LOG_ERR
, "Failed to validate policy condition");
2603 response_error
= NECP_ERROR_POLICY_CONDITIONS_INVALID
;
2607 if (necp_policy_condition_is_default((conditions_array
+ conditions_array_cursor
), condition_size
)) {
2608 has_default_condition
= TRUE
;
2610 has_non_default_condition
= TRUE
;
2612 if (has_default_condition
&& has_non_default_condition
) {
2613 NECPLOG0(LOG_ERR
, "Failed to validate conditions; contained default and non-default conditions");
2614 response_error
= NECP_ERROR_POLICY_CONDITIONS_INVALID
;
2618 if (necp_policy_condition_is_application((conditions_array
+ conditions_array_cursor
), condition_size
)) {
2619 has_application_condition
= TRUE
;
2622 if (necp_policy_condition_is_real_application((conditions_array
+ conditions_array_cursor
), condition_size
)) {
2623 has_real_application_condition
= TRUE
;
2626 if (necp_policy_condition_requires_application((conditions_array
+ conditions_array_cursor
), condition_size
)) {
2627 requires_application_condition
= TRUE
;
2630 if (necp_policy_condition_requires_real_application((conditions_array
+ conditions_array_cursor
), condition_size
)) {
2631 requires_real_application_condition
= TRUE
;
2634 conditions_array_cursor
+= condition_size
;
2638 if (requires_application_condition
&& !has_application_condition
) {
2639 NECPLOG0(LOG_ERR
, "Failed to validate conditions; did not contain application condition");
2640 response_error
= NECP_ERROR_POLICY_CONDITIONS_INVALID
;
2644 if (requires_real_application_condition
&& !has_real_application_condition
) {
2645 NECPLOG0(LOG_ERR
, "Failed to validate conditions; did not contain real application condition");
2646 response_error
= NECP_ERROR_POLICY_CONDITIONS_INVALID
;
2650 if ((policy
= necp_policy_create(session
, order
, conditions_array
, conditions_array_size
, route_rules_array
, route_rules_array_size
, policy_result
, policy_result_size
)) == NULL
) {
2651 response_error
= NECP_ERROR_INTERNAL
;
2655 if (packet
!= NULL
) {
2656 necp_send_policy_id_response(session
, NECP_PACKET_TYPE_POLICY_ADD
, message_id
, policy
->id
);
2658 return (policy
->id
);
2661 if (policy_result
!= NULL
) {
2662 FREE(policy_result
, M_NECP
);
2664 if (conditions_array
!= NULL
) {
2665 FREE(conditions_array
, M_NECP
);
2667 if (route_rules_array
!= NULL
) {
2668 FREE(route_rules_array
, M_NECP
);
2671 if (packet
!= NULL
) {
2672 necp_send_error_response(session
, NECP_PACKET_TYPE_POLICY_ADD
, message_id
, response_error
);
2674 if (return_error
!= NULL
) {
2675 *return_error
= necp_get_posix_error_for_necp_error(response_error
);
2681 necp_handle_policy_get(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
2683 #pragma unused(offset)
2685 u_int8_t
*response
= NULL
;
2686 u_int8_t
*cursor
= NULL
;
2687 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
2688 necp_policy_id policy_id
= 0;
2689 u_int32_t order_tlv_size
= 0;
2690 u_int32_t result_tlv_size
= 0;
2691 u_int32_t response_size
= 0;
2693 struct necp_session_policy
*policy
= NULL
;
2696 error
= necp_get_tlv(packet
, NULL
, 0, offset
, NECP_TLV_POLICY_ID
, sizeof(policy_id
), &policy_id
, NULL
);
2698 NECPLOG(LOG_ERR
, "Failed to get policy id: %d", error
);
2699 response_error
= NECP_ERROR_INVALID_TLV
;
2703 policy
= necp_policy_find(session
, policy_id
);
2704 if (policy
== NULL
|| policy
->pending_deletion
) {
2705 NECPLOG(LOG_ERR
, "Failed to find policy with id %d", policy_id
);
2706 response_error
= NECP_ERROR_POLICY_ID_NOT_FOUND
;
2710 order_tlv_size
= sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(necp_policy_order
);
2711 result_tlv_size
= (policy
->result_size
? (sizeof(u_int8_t
) + sizeof(u_int32_t
) + policy
->result_size
) : 0);
2712 response_size
= sizeof(struct necp_packet_header
) + order_tlv_size
+ result_tlv_size
+ policy
->conditions_size
;
2713 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
);
2714 if (response
== NULL
) {
2715 necp_send_error_response(session
, NECP_PACKET_TYPE_POLICY_GET
, message_id
, NECP_ERROR_INTERNAL
);
2720 cursor
= necp_buffer_write_packet_header(cursor
, NECP_PACKET_TYPE_POLICY_GET
, NECP_PACKET_FLAGS_RESPONSE
, message_id
);
2721 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ORDER
, sizeof(necp_policy_order
), &policy
->order
, response
, response_size
);
2723 if (result_tlv_size
) {
2724 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_RESULT
, policy
->result_size
, &policy
->result
, response
, response_size
);
2726 if (policy
->conditions_size
) {
2727 memcpy(((u_int8_t
*)(void *)(cursor
)), policy
->conditions
, policy
->conditions_size
);
2730 if (!necp_send_ctl_data(session
, (u_int8_t
*)response
, response_size
)) {
2731 NECPLOG0(LOG_ERR
, "Failed to send response");
2734 FREE(response
, M_NECP
);
2738 necp_send_error_response(session
, NECP_PACKET_TYPE_POLICY_GET
, message_id
, response_error
);
2742 necp_handle_policy_delete(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
2745 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
2746 necp_policy_id policy_id
= 0;
2748 struct necp_session_policy
*policy
= NULL
;
2751 error
= necp_get_tlv(packet
, NULL
, 0, offset
, NECP_TLV_POLICY_ID
, sizeof(policy_id
), &policy_id
, NULL
);
2753 NECPLOG(LOG_ERR
, "Failed to get policy id: %d", error
);
2754 response_error
= NECP_ERROR_INVALID_TLV
;
2758 policy
= necp_policy_find(session
, policy_id
);
2759 if (policy
== NULL
|| policy
->pending_deletion
) {
2760 NECPLOG(LOG_ERR
, "Failed to find policy with id %d", policy_id
);
2761 response_error
= NECP_ERROR_POLICY_ID_NOT_FOUND
;
2765 necp_policy_mark_for_deletion(session
, policy
);
2767 necp_send_success_response(session
, NECP_PACKET_TYPE_POLICY_DELETE
, message_id
);
2771 necp_send_error_response(session
, NECP_PACKET_TYPE_POLICY_DELETE
, message_id
, response_error
);
2775 necp_handle_policy_apply_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
2777 #pragma unused(packet, offset)
2778 necp_policy_apply_all(session
);
2779 necp_send_success_response(session
, NECP_PACKET_TYPE_POLICY_APPLY_ALL
, message_id
);
2783 necp_handle_policy_list_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
2785 #pragma unused(packet, offset)
2786 u_int32_t tlv_size
= (sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(u_int32_t
));
2787 u_int32_t response_size
= 0;
2788 u_int8_t
*response
= NULL
;
2789 u_int8_t
*cursor
= NULL
;
2790 int num_policies
= 0;
2791 int cur_policy_index
= 0;
2792 struct necp_session_policy
*policy
;
2794 LIST_FOREACH(policy
, &session
->policies
, chain
) {
2795 if (!policy
->pending_deletion
) {
2800 // Create a response with one Policy ID TLV for each policy
2801 response_size
= sizeof(struct necp_packet_header
) + num_policies
* tlv_size
;
2802 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
);
2803 if (response
== NULL
) {
2804 necp_send_error_response(session
, NECP_PACKET_TYPE_POLICY_LIST_ALL
, message_id
, NECP_ERROR_INTERNAL
);
2809 cursor
= necp_buffer_write_packet_header(cursor
, NECP_PACKET_TYPE_POLICY_LIST_ALL
, NECP_PACKET_FLAGS_RESPONSE
, message_id
);
2811 LIST_FOREACH(policy
, &session
->policies
, chain
) {
2812 if (!policy
->pending_deletion
&& cur_policy_index
< num_policies
) {
2813 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ID
, sizeof(u_int32_t
), &policy
->id
, response
, response_size
);
2818 if (!necp_send_ctl_data(session
, (u_int8_t
*)response
, response_size
)) {
2819 NECPLOG0(LOG_ERR
, "Failed to send response");
2822 FREE(response
, M_NECP
);
2826 necp_handle_policy_delete_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
2828 #pragma unused(packet, offset)
2829 necp_policy_mark_all_for_deletion(session
);
2830 necp_send_success_response(session
, NECP_PACKET_TYPE_POLICY_DELETE_ALL
, message_id
);
2833 static necp_policy_id
2834 necp_policy_get_new_id(void)
2836 necp_policy_id newid
= 0;
2838 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
2840 necp_last_policy_id
++;
2841 if (necp_last_policy_id
< 1) {
2842 necp_last_policy_id
= 1;
2845 newid
= necp_last_policy_id
;
2846 lck_rw_done(&necp_kernel_policy_lock
);
2849 NECPLOG0(LOG_DEBUG
, "Allocate policy id failed.\n");
2857 * For the policy dump response this is the structure:
2859 * <NECP_PACKET_HEADER>
2861 * type : NECP_TLV_POLICY_DUMP
2866 * type : NECP_TLV_POLICY_ID
2871 * type : NECP_TLV_POLICY_ORDER
2876 * type : NECP_TLV_POLICY_RESULT_STRING
2881 * type : NECP_TLV_POLICY_OWNER
2886 * type : NECP_TLV_POLICY_CONDITION
2891 * type : NECP_POLICY_CONDITION_ALL_INTERFACES
2896 * type : NECP_POLICY_CONDITION_BOUND_INTERFACES
2906 * type : NECP_TLV_POLICY_DUMP
2911 * type : NECP_TLV_POLICY_ID
2916 * type : NECP_TLV_POLICY_ORDER
2921 * type : NECP_TLV_POLICY_RESULT_STRING
2926 * type : NECP_TLV_POLICY_OWNER
2931 * type : NECP_TLV_POLICY_CONDITION
2936 * type : NECP_POLICY_CONDITION_ALL_INTERFACES
2941 * type : NECP_POLICY_CONDITION_BOUND_INTERFACES
2953 necp_handle_policy_dump_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
,
2954 user_addr_t out_buffer
, size_t out_buffer_length
, int offset
)
2956 #pragma unused(offset)
2957 struct necp_kernel_socket_policy
*policy
= NULL
;
2959 int policy_count
= 0;
2960 u_int8_t
**tlv_buffer_pointers
= NULL
;
2961 u_int32_t
*tlv_buffer_lengths
= NULL
;
2962 u_int32_t total_tlv_len
= 0;
2963 u_int8_t
*result_buf
= NULL
;
2964 u_int8_t
*result_buf_cursor
= result_buf
;
2965 char result_string
[MAX_RESULT_STRING_LEN
];
2966 char proc_name_string
[MAXCOMLEN
+ 1];
2969 bool error_occured
= false;
2970 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
2972 #define REPORT_ERROR(error) error_occured = true; \
2973 response_error = error; \
2976 #define UNLOCK_AND_REPORT_ERROR(lock, error) lck_rw_done(lock); \
2979 errno_t cred_result
= priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES
, 0);
2980 if (cred_result
!= 0) {
2981 NECPLOG0(LOG_ERR
, "Session does not hold the necessary entitlement to get Network Extension Policy information");
2982 REPORT_ERROR(NECP_ERROR_INTERNAL
);
2986 lck_rw_lock_shared(&necp_kernel_policy_lock
);
2989 NECPLOG0(LOG_DEBUG
, "Gathering policies");
2992 policy_count
= necp_kernel_application_policies_count
;
2994 MALLOC(tlv_buffer_pointers
, u_int8_t
**, sizeof(u_int8_t
*) * policy_count
, M_NECP
, M_NOWAIT
| M_ZERO
);
2995 if (tlv_buffer_pointers
== NULL
) {
2996 NECPLOG(LOG_DEBUG
, "Failed to allocate tlv_buffer_pointers (%u bytes)", sizeof(u_int8_t
*) * policy_count
);
2997 UNLOCK_AND_REPORT_ERROR(&necp_kernel_policy_lock
, NECP_ERROR_INTERNAL
);
3000 MALLOC(tlv_buffer_lengths
, u_int32_t
*, sizeof(u_int32_t
) * policy_count
, M_NECP
, M_NOWAIT
| M_ZERO
);
3001 if (tlv_buffer_lengths
== NULL
) {
3002 NECPLOG(LOG_DEBUG
, "Failed to allocate tlv_buffer_lengths (%u bytes)", sizeof(u_int32_t
) * policy_count
);
3003 UNLOCK_AND_REPORT_ERROR(&necp_kernel_policy_lock
, NECP_ERROR_INTERNAL
);
3006 for (policy_i
= 0; necp_kernel_socket_policies_app_layer_map
!= NULL
&& necp_kernel_socket_policies_app_layer_map
[policy_i
] != NULL
; policy_i
++) {
3007 policy
= necp_kernel_socket_policies_app_layer_map
[policy_i
];
3009 memset(result_string
, 0, MAX_RESULT_STRING_LEN
);
3010 memset(proc_name_string
, 0, MAXCOMLEN
+ 1);
3012 necp_get_result_description(result_string
, policy
->result
, policy
->result_parameter
);
3013 proc_name(policy
->session_pid
, proc_name_string
, MAXCOMLEN
);
3015 u_int16_t proc_name_len
= strlen(proc_name_string
) + 1;
3016 u_int16_t result_string_len
= strlen(result_string
) + 1;
3019 NECPLOG(LOG_DEBUG
, "Policy: process: %s, result: %s", proc_name_string
, result_string
);
3022 u_int32_t total_allocated_bytes
= sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(policy
->id
) + // NECP_TLV_POLICY_ID
3023 sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(policy
->order
) + // NECP_TLV_POLICY_ORDER
3024 sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(policy
->session_order
) + // NECP_TLV_POLICY_SESSION_ORDER
3025 sizeof(u_int8_t
) + sizeof(u_int32_t
) + result_string_len
+ // NECP_TLV_POLICY_RESULT_STRING
3026 sizeof(u_int8_t
) + sizeof(u_int32_t
) + proc_name_len
+ // NECP_TLV_POLICY_OWNER
3027 sizeof(u_int8_t
) + sizeof(u_int32_t
); // NECP_TLV_POLICY_CONDITION
3029 // We now traverse the condition_mask to see how much space we need to allocate
3030 u_int32_t condition_mask
= policy
->condition_mask
;
3031 u_int8_t num_conditions
= 0;
3032 struct necp_string_id_mapping
*account_id_entry
= NULL
;
3033 char if_name
[IFXNAMSIZ
];
3034 u_int32_t condition_tlv_length
= 0;
3035 memset(if_name
, 0, sizeof(if_name
));
3037 if (condition_mask
== NECP_POLICY_CONDITION_DEFAULT
) {
3040 if (condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) {
3043 if (condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
3044 snprintf(if_name
, IFXNAMSIZ
, "%s%d", ifnet_name(policy
->cond_bound_interface
), ifnet_unit(policy
->cond_bound_interface
));
3045 condition_tlv_length
+= strlen(if_name
) + 1;
3048 if (condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
3049 condition_tlv_length
+= sizeof(policy
->cond_protocol
);
3052 if (condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
3053 condition_tlv_length
+= sizeof(uuid_t
);
3056 if (condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
3057 condition_tlv_length
+= sizeof(uuid_t
);
3060 if (condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
3061 u_int32_t domain_len
= strlen(policy
->cond_domain
) + 1;
3062 condition_tlv_length
+= domain_len
;
3065 if (condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
) {
3066 account_id_entry
= necp_lookup_string_with_id_locked(&necp_account_id_list
, policy
->cond_account_id
);
3067 u_int32_t account_id_len
= 0;
3068 if (account_id_entry
) {
3069 account_id_len
= account_id_entry
->string
? strlen(account_id_entry
->string
) + 1 : 0;
3071 condition_tlv_length
+= account_id_len
;
3074 if (condition_mask
& NECP_KERNEL_CONDITION_PID
) {
3075 condition_tlv_length
+= sizeof(pid_t
);
3078 if (condition_mask
& NECP_KERNEL_CONDITION_UID
) {
3079 condition_tlv_length
+= sizeof(uid_t
);
3082 if (condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
3083 condition_tlv_length
+= sizeof(struct necp_policy_condition_tc_range
);
3086 if (condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
) {
3089 if (condition_mask
& NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
) {
3090 u_int32_t entitlement_len
= strlen(policy
->cond_custom_entitlement
) + 1;
3091 condition_tlv_length
+= entitlement_len
;
3094 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
3095 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
3096 condition_tlv_length
+= sizeof(struct necp_policy_condition_addr_range
);
3098 condition_tlv_length
+= sizeof(struct necp_policy_condition_addr
);
3102 if (condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
3103 if (condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
3104 condition_tlv_length
+= sizeof(struct necp_policy_condition_addr_range
);
3106 condition_tlv_length
+= sizeof(struct necp_policy_condition_addr
);
3112 condition_tlv_length
+= num_conditions
* (sizeof(u_int8_t
) + sizeof(u_int32_t
)); // These are for the condition TLVs. The space for "value" is already accounted for above.
3113 total_allocated_bytes
+= condition_tlv_length
;
3115 u_int8_t
*tlv_buffer
;
3116 MALLOC(tlv_buffer
, u_int8_t
*, total_allocated_bytes
, M_NECP
, M_NOWAIT
| M_ZERO
);
3117 if (tlv_buffer
== NULL
) {
3118 NECPLOG(LOG_DEBUG
, "Failed to allocate tlv_buffer (%u bytes)", total_allocated_bytes
);
3122 u_int8_t
*cursor
= tlv_buffer
;
3123 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ID
, sizeof(policy
->id
), &policy
->id
, tlv_buffer
, total_allocated_bytes
);
3124 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ORDER
, sizeof(necp_policy_order
), &policy
->order
, tlv_buffer
, total_allocated_bytes
);
3125 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_SESSION_ORDER
, sizeof(policy
->session_order
), &policy
->session_order
, tlv_buffer
, total_allocated_bytes
);
3126 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_RESULT_STRING
, result_string_len
, result_string
, tlv_buffer
, total_allocated_bytes
);
3127 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_OWNER
, proc_name_len
, proc_name_string
, tlv_buffer
, total_allocated_bytes
);
3130 u_int8_t q_cond_buf
[N_QUICK
]; // Minor optimization
3132 u_int8_t
*cond_buf
; // To be used for condition TLVs
3133 if (condition_tlv_length
<= N_QUICK
) {
3134 cond_buf
= q_cond_buf
;
3136 MALLOC(cond_buf
, u_int8_t
*, condition_tlv_length
, M_NECP
, M_NOWAIT
);
3137 if (cond_buf
== NULL
) {
3138 NECPLOG(LOG_DEBUG
, "Failed to allocate cond_buffer (%u bytes)", condition_tlv_length
);
3139 FREE(tlv_buffer
, M_NECP
);
3144 memset(cond_buf
, 0, condition_tlv_length
);
3145 u_int8_t
*cond_buf_cursor
= cond_buf
;
3146 if (condition_mask
== NECP_POLICY_CONDITION_DEFAULT
) {
3147 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_DEFAULT
, 0, "", cond_buf
, condition_tlv_length
);
3149 if (condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) {
3150 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_ALL_INTERFACES
, 0, "", cond_buf
, condition_tlv_length
);
3152 if (condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
3153 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_BOUND_INTERFACE
, strlen(if_name
) + 1,
3154 if_name
, cond_buf
, condition_tlv_length
);
3156 if (condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
3157 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_IP_PROTOCOL
, sizeof(policy
->cond_protocol
), &policy
->cond_protocol
,
3158 cond_buf
, condition_tlv_length
);
3160 if (condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
3161 struct necp_uuid_id_mapping
*entry
= necp_uuid_lookup_uuid_with_app_id_locked(policy
->cond_app_id
);
3162 if (entry
!= NULL
) {
3163 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_APPLICATION
, sizeof(entry
->uuid
), entry
->uuid
,
3164 cond_buf
, condition_tlv_length
);
3167 if (condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
3168 struct necp_uuid_id_mapping
*entry
= necp_uuid_lookup_uuid_with_app_id_locked(policy
->cond_real_app_id
);
3169 if (entry
!= NULL
) {
3170 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_REAL_APPLICATION
, sizeof(entry
->uuid
), entry
->uuid
,
3171 cond_buf
, condition_tlv_length
);
3174 if (condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
3175 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_DOMAIN
, strlen(policy
->cond_domain
) + 1, policy
->cond_domain
,
3176 cond_buf
, condition_tlv_length
);
3178 if (condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
) {
3179 if (account_id_entry
!= NULL
) {
3180 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_ACCOUNT
, strlen(account_id_entry
->string
) + 1, account_id_entry
->string
,
3181 cond_buf
, condition_tlv_length
);
3184 if (condition_mask
& NECP_KERNEL_CONDITION_PID
) {
3185 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_PID
, sizeof(policy
->cond_pid
), &policy
->cond_pid
,
3186 cond_buf
, condition_tlv_length
);
3188 if (condition_mask
& NECP_KERNEL_CONDITION_UID
) {
3189 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_UID
, sizeof(policy
->cond_uid
), &policy
->cond_uid
,
3190 cond_buf
, condition_tlv_length
);
3192 if (condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
3193 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_TRAFFIC_CLASS
, sizeof(policy
->cond_traffic_class
), &policy
->cond_traffic_class
,
3194 cond_buf
, condition_tlv_length
);
3196 if (condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
) {
3197 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_ENTITLEMENT
, 0, "",
3198 cond_buf
, condition_tlv_length
);
3200 if (condition_mask
& NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
) {
3201 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_ENTITLEMENT
, strlen(policy
->cond_custom_entitlement
) + 1, policy
->cond_custom_entitlement
,
3202 cond_buf
, condition_tlv_length
);
3204 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
3205 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
3206 struct necp_policy_condition_addr_range range
;
3207 memcpy(&range
.start_address
, &policy
->cond_local_start
, sizeof(policy
->cond_local_start
));
3208 memcpy(&range
.end_address
, &policy
->cond_local_end
, sizeof(policy
->cond_local_end
));
3209 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE
, sizeof(range
), &range
,
3210 cond_buf
, condition_tlv_length
);
3212 struct necp_policy_condition_addr addr
;
3213 addr
.prefix
= policy
->cond_local_prefix
;
3214 memcpy(&addr
.address
, &policy
->cond_local_start
, sizeof(policy
->cond_local_start
));
3215 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_LOCAL_ADDR
, sizeof(addr
), &addr
,
3216 cond_buf
, condition_tlv_length
);
3219 if (condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
3220 if (condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
3221 struct necp_policy_condition_addr_range range
;
3222 memcpy(&range
.start_address
, &policy
->cond_remote_start
, sizeof(policy
->cond_remote_start
));
3223 memcpy(&range
.end_address
, &policy
->cond_remote_end
, sizeof(policy
->cond_remote_end
));
3224 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE
, sizeof(range
), &range
,
3225 cond_buf
, condition_tlv_length
);
3227 struct necp_policy_condition_addr addr
;
3228 addr
.prefix
= policy
->cond_remote_prefix
;
3229 memcpy(&addr
.address
, &policy
->cond_remote_start
, sizeof(policy
->cond_remote_start
));
3230 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_REMOTE_ADDR
, sizeof(addr
), &addr
,
3231 cond_buf
, condition_tlv_length
);
3236 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_CONDITION
, cond_buf_cursor
- cond_buf
, cond_buf
, tlv_buffer
, total_allocated_bytes
);
3237 if (cond_buf
!= q_cond_buf
) {
3238 FREE(cond_buf
, M_NECP
);
3241 tlv_buffer_pointers
[policy_i
] = tlv_buffer
;
3242 tlv_buffer_lengths
[policy_i
] = (cursor
- tlv_buffer
);
3244 // This is the length of the TLV for NECP_TLV_POLICY_DUMP
3245 total_tlv_len
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + (cursor
- tlv_buffer
);
3249 lck_rw_done(&necp_kernel_policy_lock
);
3252 if (packet
!= NULL
) {
3253 u_int32_t total_result_length
= sizeof(struct necp_packet_header
) + total_tlv_len
;
3255 // Allow malloc to wait, since the total buffer may be large and we are not holding any locks
3256 MALLOC(result_buf
, u_int8_t
*, total_result_length
, M_NECP
, M_WAITOK
| M_ZERO
);
3257 if (result_buf
== NULL
) {
3258 NECPLOG(LOG_DEBUG
, "Failed to allocate result_buffer (%u bytes)", total_result_length
);
3259 REPORT_ERROR(NECP_ERROR_INTERNAL
);
3262 result_buf_cursor
= result_buf
;
3263 result_buf_cursor
= necp_buffer_write_packet_header(result_buf_cursor
, NECP_PACKET_TYPE_POLICY_DUMP_ALL
, NECP_PACKET_FLAGS_RESPONSE
, message_id
);
3265 for (int i
= 0; i
< policy_count
; i
++) {
3266 if (tlv_buffer_pointers
[i
] != NULL
) {
3267 result_buf_cursor
= necp_buffer_write_tlv(result_buf_cursor
, NECP_TLV_POLICY_DUMP
, tlv_buffer_lengths
[i
], tlv_buffer_pointers
[i
], result_buf
, total_result_length
);
3271 if (!necp_send_ctl_data(session
, result_buf
, result_buf_cursor
- result_buf
)) {
3272 NECPLOG(LOG_ERR
, "Failed to send response (%u bytes)", result_buf_cursor
- result_buf
);
3274 NECPLOG(LOG_ERR
, "Sent data worth %u bytes. Total result buffer length was %u bytes", result_buf_cursor
- result_buf
, total_result_length
);
3279 if (out_buffer
!= 0) {
3280 if (out_buffer_length
< total_tlv_len
+ sizeof(u_int32_t
)) {
3281 NECPLOG(LOG_DEBUG
, "out_buffer_length too small (%u < %u)", out_buffer_length
, total_tlv_len
+ sizeof(u_int32_t
));
3282 REPORT_ERROR(NECP_ERROR_INVALID_TLV
);
3285 // Allow malloc to wait, since the total buffer may be large and we are not holding any locks
3286 MALLOC(result_buf
, u_int8_t
*, total_tlv_len
+ sizeof(u_int32_t
), M_NECP
, M_WAITOK
| M_ZERO
);
3287 if (result_buf
== NULL
) {
3288 NECPLOG(LOG_DEBUG
, "Failed to allocate result_buffer (%u bytes)", total_tlv_len
+ sizeof(u_int32_t
));
3289 REPORT_ERROR(NECP_ERROR_INTERNAL
);
3292 // Add four bytes for total length at the start
3293 memcpy(result_buf
, &total_tlv_len
, sizeof(u_int32_t
));
3296 result_buf_cursor
= result_buf
+ sizeof(u_int32_t
);
3297 for (int i
= 0; i
< policy_count
; i
++) {
3298 if (tlv_buffer_pointers
[i
] != NULL
) {
3299 result_buf_cursor
= necp_buffer_write_tlv(result_buf_cursor
, NECP_TLV_POLICY_DUMP
, tlv_buffer_lengths
[i
], tlv_buffer_pointers
[i
],
3300 result_buf
, total_tlv_len
+ sizeof(u_int32_t
));
3304 int copy_error
= copyout(result_buf
, out_buffer
, total_tlv_len
+ sizeof(u_int32_t
));
3306 NECPLOG(LOG_DEBUG
, "Failed to copy out result_buffer (%u bytes)", total_tlv_len
+ sizeof(u_int32_t
));
3307 REPORT_ERROR(NECP_ERROR_INTERNAL
);
3313 if (error_occured
) {
3314 if (packet
!= NULL
) {
3315 if(!necp_send_error_response(session
, NECP_PACKET_TYPE_POLICY_DUMP_ALL
, message_id
, response_error
)) {
3316 NECPLOG0(LOG_ERR
, "Failed to send error response");
3318 NECPLOG0(LOG_ERR
, "Sent error response");
3321 error_code
= necp_get_posix_error_for_necp_error(response_error
);
3324 if (result_buf
!= NULL
) {
3325 FREE(result_buf
, M_NECP
);
3328 if (tlv_buffer_pointers
!= NULL
) {
3329 for (int i
= 0; i
< policy_count
; i
++) {
3330 if (tlv_buffer_pointers
[i
] != NULL
) {
3331 FREE(tlv_buffer_pointers
[i
], M_NECP
);
3332 tlv_buffer_pointers
[i
] = NULL
;
3335 FREE(tlv_buffer_pointers
, M_NECP
);
3338 if (tlv_buffer_lengths
!= NULL
) {
3339 FREE(tlv_buffer_lengths
, M_NECP
);
3342 #undef RESET_COND_BUF
3344 #undef UNLOCK_AND_REPORT_ERROR
3346 return (error_code
);
3349 static struct necp_session_policy
*
3350 necp_policy_create(struct necp_session
*session
, necp_policy_order order
, u_int8_t
*conditions_array
, u_int32_t conditions_array_size
, u_int8_t
*route_rules_array
, u_int32_t route_rules_array_size
, u_int8_t
*result
, u_int32_t result_size
)
3352 struct necp_session_policy
*new_policy
= NULL
;
3353 struct necp_session_policy
*tmp_policy
= NULL
;
3355 if (session
== NULL
|| conditions_array
== NULL
|| result
== NULL
|| result_size
== 0) {
3359 MALLOC_ZONE(new_policy
, struct necp_session_policy
*, sizeof(*new_policy
), M_NECP_SESSION_POLICY
, M_WAITOK
);
3360 if (new_policy
== NULL
) {
3364 memset(new_policy
, 0, sizeof(*new_policy
)); // M_ZERO is not supported for MALLOC_ZONE
3365 new_policy
->applied
= FALSE
;
3366 new_policy
->pending_deletion
= FALSE
;
3367 new_policy
->pending_update
= FALSE
;
3368 new_policy
->order
= order
;
3369 new_policy
->conditions
= conditions_array
;
3370 new_policy
->conditions_size
= conditions_array_size
;
3371 new_policy
->route_rules
= route_rules_array
;
3372 new_policy
->route_rules_size
= route_rules_array_size
;
3373 new_policy
->result
= result
;
3374 new_policy
->result_size
= result_size
;
3375 new_policy
->id
= necp_policy_get_new_id();
3377 LIST_INSERT_SORTED_ASCENDING(&session
->policies
, new_policy
, chain
, order
, tmp_policy
);
3379 session
->dirty
= TRUE
;
3382 NECPLOG(LOG_DEBUG
, "Created NECP policy, order %d", order
);
3385 return (new_policy
);
3388 static struct necp_session_policy
*
3389 necp_policy_find(struct necp_session
*session
, necp_policy_id policy_id
)
3391 struct necp_session_policy
*policy
= NULL
;
3392 if (policy_id
== 0) {
3396 LIST_FOREACH(policy
, &session
->policies
, chain
) {
3397 if (policy
->id
== policy_id
) {
3405 static inline u_int8_t
3406 necp_policy_get_result_type(struct necp_session_policy
*policy
)
3408 return (policy
? necp_policy_result_get_type_from_buffer(policy
->result
, policy
->result_size
) : 0);
3411 static inline u_int32_t
3412 necp_policy_get_result_parameter_length(struct necp_session_policy
*policy
)
3414 return (policy
? necp_policy_result_get_parameter_length_from_buffer(policy
->result
, policy
->result_size
) : 0);
3418 necp_policy_get_result_parameter(struct necp_session_policy
*policy
, u_int8_t
*parameter_buffer
, u_int32_t parameter_buffer_length
)
3421 u_int32_t parameter_length
= necp_policy_result_get_parameter_length_from_buffer(policy
->result
, policy
->result_size
);
3422 if (parameter_buffer_length
>= parameter_length
) {
3423 u_int8_t
*parameter
= necp_policy_result_get_parameter_pointer_from_buffer(policy
->result
, policy
->result_size
);
3424 if (parameter
&& parameter_buffer
) {
3425 memcpy(parameter_buffer
, parameter
, parameter_length
);
3435 necp_policy_mark_for_deletion(struct necp_session
*session
, struct necp_session_policy
*policy
)
3437 if (session
== NULL
|| policy
== NULL
) {
3441 policy
->pending_deletion
= TRUE
;
3442 session
->dirty
= TRUE
;
3445 NECPLOG0(LOG_DEBUG
, "Marked NECP policy for removal");
3451 necp_policy_mark_all_for_deletion(struct necp_session
*session
)
3453 struct necp_session_policy
*policy
= NULL
;
3454 struct necp_session_policy
*temp_policy
= NULL
;
3456 LIST_FOREACH_SAFE(policy
, &session
->policies
, chain
, temp_policy
) {
3457 necp_policy_mark_for_deletion(session
, policy
);
3464 necp_policy_delete(struct necp_session
*session
, struct necp_session_policy
*policy
)
3466 if (session
== NULL
|| policy
== NULL
) {
3470 LIST_REMOVE(policy
, chain
);
3472 if (policy
->result
) {
3473 FREE(policy
->result
, M_NECP
);
3474 policy
->result
= NULL
;
3477 if (policy
->conditions
) {
3478 FREE(policy
->conditions
, M_NECP
);
3479 policy
->conditions
= NULL
;
3482 if (policy
->route_rules
) {
3483 FREE(policy
->route_rules
, M_NECP
);
3484 policy
->route_rules
= NULL
;
3487 FREE_ZONE(policy
, sizeof(*policy
), M_NECP_SESSION_POLICY
);
3490 NECPLOG0(LOG_DEBUG
, "Removed NECP policy");
3496 necp_policy_unapply(struct necp_session_policy
*policy
)
3499 if (policy
== NULL
) {
3503 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
3505 // Release local uuid mappings
3506 if (!uuid_is_null(policy
->applied_app_uuid
)) {
3507 bool removed_mapping
= FALSE
;
3508 if (necp_remove_uuid_app_id_mapping(policy
->applied_app_uuid
, &removed_mapping
, TRUE
) && removed_mapping
) {
3509 necp_uuid_app_id_mappings_dirty
= TRUE
;
3510 necp_num_uuid_app_id_mappings
--;
3512 uuid_clear(policy
->applied_app_uuid
);
3514 if (!uuid_is_null(policy
->applied_real_app_uuid
)) {
3515 necp_remove_uuid_app_id_mapping(policy
->applied_real_app_uuid
, NULL
, FALSE
);
3516 uuid_clear(policy
->applied_real_app_uuid
);
3518 if (!uuid_is_null(policy
->applied_result_uuid
)) {
3519 necp_remove_uuid_service_id_mapping(policy
->applied_result_uuid
);
3520 uuid_clear(policy
->applied_result_uuid
);
3523 // Release string mappings
3524 if (policy
->applied_account
!= NULL
) {
3525 necp_remove_string_to_id_mapping(&necp_account_id_list
, policy
->applied_account
);
3526 FREE(policy
->applied_account
, M_NECP
);
3527 policy
->applied_account
= NULL
;
3530 // Release route rule
3531 if (policy
->applied_route_rules_id
!= 0) {
3532 necp_remove_route_rule(&necp_route_rules
, policy
->applied_route_rules_id
);
3533 policy
->applied_route_rules_id
= 0;
3536 // Remove socket policies
3537 for (i
= 0; i
< MAX_KERNEL_SOCKET_POLICIES
; i
++) {
3538 if (policy
->kernel_socket_policies
[i
] != 0) {
3539 necp_kernel_socket_policy_delete(policy
->kernel_socket_policies
[i
]);
3540 policy
->kernel_socket_policies
[i
] = 0;
3544 // Remove IP output policies
3545 for (i
= 0; i
< MAX_KERNEL_IP_OUTPUT_POLICIES
; i
++) {
3546 if (policy
->kernel_ip_output_policies
[i
] != 0) {
3547 necp_kernel_ip_output_policy_delete(policy
->kernel_ip_output_policies
[i
]);
3548 policy
->kernel_ip_output_policies
[i
] = 0;
3552 policy
->applied
= FALSE
;
3557 #define NECP_KERNEL_POLICY_SUBORDER_ID_TUNNEL_CONDITION 0
3558 #define NECP_KERNEL_POLICY_SUBORDER_NON_ID_TUNNEL_CONDITION 1
3559 #define NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION 2
3560 #define NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS 3
3561 struct necp_policy_result_ip_tunnel
{
3562 u_int32_t secondary_result
;
3563 char interface_name
[IFXNAMSIZ
];
3564 } __attribute__((__packed__
));
3566 struct necp_policy_result_service
{
3569 } __attribute__((__packed__
));
3572 necp_policy_apply(struct necp_session
*session
, struct necp_session_policy
*policy
)
3574 bool socket_only_conditions
= FALSE
;
3575 bool socket_ip_conditions
= FALSE
;
3577 bool socket_layer_non_id_conditions
= FALSE
;
3578 bool ip_output_layer_non_id_conditions
= FALSE
;
3579 bool ip_output_layer_non_id_only
= FALSE
;
3580 bool ip_output_layer_id_condition
= FALSE
;
3581 bool ip_output_layer_tunnel_condition_from_id
= FALSE
;
3582 bool ip_output_layer_tunnel_condition_from_non_id
= FALSE
;
3583 necp_kernel_policy_id cond_ip_output_layer_id
= NECP_KERNEL_POLICY_ID_NONE
;
3585 u_int32_t master_condition_mask
= 0;
3586 u_int32_t master_condition_negated_mask
= 0;
3587 ifnet_t cond_bound_interface
= NULL
;
3588 u_int32_t cond_account_id
= 0;
3589 char *cond_domain
= NULL
;
3590 char *cond_custom_entitlement
= NULL
;
3593 necp_app_id cond_app_id
= 0;
3594 necp_app_id cond_real_app_id
= 0;
3595 struct necp_policy_condition_tc_range cond_traffic_class
;
3596 cond_traffic_class
.start_tc
= 0;
3597 cond_traffic_class
.end_tc
= 0;
3598 u_int16_t cond_protocol
= 0;
3599 union necp_sockaddr_union cond_local_start
;
3600 union necp_sockaddr_union cond_local_end
;
3601 u_int8_t cond_local_prefix
= 0;
3602 union necp_sockaddr_union cond_remote_start
;
3603 union necp_sockaddr_union cond_remote_end
;
3604 u_int8_t cond_remote_prefix
= 0;
3605 u_int32_t offset
= 0;
3606 u_int8_t ultimate_result
= 0;
3607 u_int32_t secondary_result
= 0;
3608 necp_kernel_policy_result_parameter secondary_result_parameter
;
3609 memset(&secondary_result_parameter
, 0, sizeof(secondary_result_parameter
));
3610 u_int32_t cond_last_interface_index
= 0;
3611 necp_kernel_policy_result_parameter ultimate_result_parameter
;
3612 memset(&ultimate_result_parameter
, 0, sizeof(ultimate_result_parameter
));
3614 if (policy
== NULL
) {
3618 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
3620 // Process conditions
3621 while (offset
< policy
->conditions_size
) {
3622 u_int32_t length
= 0;
3623 u_int8_t
*value
= necp_buffer_get_tlv_value(policy
->conditions
, offset
, &length
);
3625 u_int8_t condition_type
= necp_policy_condition_get_type_from_buffer(value
, length
);
3626 u_int8_t condition_flags
= necp_policy_condition_get_flags_from_buffer(value
, length
);
3627 bool condition_is_negative
= condition_flags
& NECP_POLICY_CONDITION_FLAGS_NEGATIVE
;
3628 u_int32_t condition_length
= necp_policy_condition_get_value_length_from_buffer(value
, length
);
3629 u_int8_t
*condition_value
= necp_policy_condition_get_value_pointer_from_buffer(value
, length
);
3630 switch (condition_type
) {
3631 case NECP_POLICY_CONDITION_DEFAULT
: {
3632 socket_ip_conditions
= TRUE
;
3635 case NECP_POLICY_CONDITION_ALL_INTERFACES
: {
3636 master_condition_mask
|= NECP_KERNEL_CONDITION_ALL_INTERFACES
;
3637 socket_ip_conditions
= TRUE
;
3640 case NECP_POLICY_CONDITION_ENTITLEMENT
: {
3641 if (condition_length
> 0) {
3642 if (cond_custom_entitlement
== NULL
) {
3643 cond_custom_entitlement
= necp_copy_string((char *)condition_value
, condition_length
);
3644 if (cond_custom_entitlement
!= NULL
) {
3645 master_condition_mask
|= NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
;
3646 socket_only_conditions
= TRUE
;
3650 master_condition_mask
|= NECP_KERNEL_CONDITION_ENTITLEMENT
;
3651 socket_only_conditions
= TRUE
;
3655 case NECP_POLICY_CONDITION_DOMAIN
: {
3656 // Make sure there is only one such rule
3657 if (condition_length
> 0 && cond_domain
== NULL
) {
3658 cond_domain
= necp_create_trimmed_domain((char *)condition_value
, condition_length
);
3659 if (cond_domain
!= NULL
) {
3660 master_condition_mask
|= NECP_KERNEL_CONDITION_DOMAIN
;
3661 if (condition_is_negative
) {
3662 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_DOMAIN
;
3664 socket_only_conditions
= TRUE
;
3669 case NECP_POLICY_CONDITION_ACCOUNT
: {
3670 // Make sure there is only one such rule
3671 if (condition_length
> 0 && cond_account_id
== 0 && policy
->applied_account
== NULL
) {
3672 char *string
= NULL
;
3673 MALLOC(string
, char *, condition_length
+ 1, M_NECP
, M_WAITOK
);
3674 if (string
!= NULL
) {
3675 memcpy(string
, condition_value
, condition_length
);
3676 string
[condition_length
] = 0;
3677 cond_account_id
= necp_create_string_to_id_mapping(&necp_account_id_list
, string
);
3678 if (cond_account_id
!= 0) {
3679 policy
->applied_account
= string
; // Save the string in parent policy
3680 master_condition_mask
|= NECP_KERNEL_CONDITION_ACCOUNT_ID
;
3681 if (condition_is_negative
) {
3682 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_ACCOUNT_ID
;
3684 socket_only_conditions
= TRUE
;
3686 FREE(string
, M_NECP
);
3692 case NECP_POLICY_CONDITION_APPLICATION
: {
3693 // Make sure there is only one such rule, because we save the uuid in the policy
3694 if (condition_length
>= sizeof(uuid_t
) && cond_app_id
== 0) {
3695 bool allocated_mapping
= FALSE
;
3696 uuid_t application_uuid
;
3697 memcpy(application_uuid
, condition_value
, sizeof(uuid_t
));
3698 cond_app_id
= necp_create_uuid_app_id_mapping(application_uuid
, &allocated_mapping
, TRUE
);
3699 if (cond_app_id
!= 0) {
3700 if (allocated_mapping
) {
3701 necp_uuid_app_id_mappings_dirty
= TRUE
;
3702 necp_num_uuid_app_id_mappings
++;
3704 uuid_copy(policy
->applied_app_uuid
, application_uuid
);
3705 master_condition_mask
|= NECP_KERNEL_CONDITION_APP_ID
;
3706 if (condition_is_negative
) {
3707 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_APP_ID
;
3709 socket_only_conditions
= TRUE
;
3714 case NECP_POLICY_CONDITION_REAL_APPLICATION
: {
3715 // Make sure there is only one such rule, because we save the uuid in the policy
3716 if (condition_length
>= sizeof(uuid_t
) && cond_real_app_id
== 0) {
3717 uuid_t real_application_uuid
;
3718 memcpy(real_application_uuid
, condition_value
, sizeof(uuid_t
));
3719 cond_real_app_id
= necp_create_uuid_app_id_mapping(real_application_uuid
, NULL
, FALSE
);
3720 if (cond_real_app_id
!= 0) {
3721 uuid_copy(policy
->applied_real_app_uuid
, real_application_uuid
);
3722 master_condition_mask
|= NECP_KERNEL_CONDITION_REAL_APP_ID
;
3723 if (condition_is_negative
) {
3724 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REAL_APP_ID
;
3726 socket_only_conditions
= TRUE
;
3731 case NECP_POLICY_CONDITION_PID
: {
3732 if (condition_length
>= sizeof(pid_t
)) {
3733 master_condition_mask
|= NECP_KERNEL_CONDITION_PID
;
3734 if (condition_is_negative
) {
3735 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_PID
;
3737 memcpy(&cond_pid
, condition_value
, sizeof(cond_pid
));
3738 socket_only_conditions
= TRUE
;
3742 case NECP_POLICY_CONDITION_UID
: {
3743 if (condition_length
>= sizeof(uid_t
)) {
3744 master_condition_mask
|= NECP_KERNEL_CONDITION_UID
;
3745 if (condition_is_negative
) {
3746 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_UID
;
3748 memcpy(&cond_uid
, condition_value
, sizeof(cond_uid
));
3749 socket_only_conditions
= TRUE
;
3753 case NECP_POLICY_CONDITION_TRAFFIC_CLASS
: {
3754 if (condition_length
>= sizeof(struct necp_policy_condition_tc_range
)) {
3755 master_condition_mask
|= NECP_KERNEL_CONDITION_TRAFFIC_CLASS
;
3756 if (condition_is_negative
) {
3757 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_TRAFFIC_CLASS
;
3759 memcpy(&cond_traffic_class
, condition_value
, sizeof(cond_traffic_class
));
3760 socket_only_conditions
= TRUE
;
3764 case NECP_POLICY_CONDITION_BOUND_INTERFACE
: {
3765 if (condition_length
<= IFXNAMSIZ
&& condition_length
> 0) {
3766 char interface_name
[IFXNAMSIZ
];
3767 memcpy(interface_name
, condition_value
, condition_length
);
3768 interface_name
[condition_length
- 1] = 0; // Make sure the string is NULL terminated
3769 if (ifnet_find_by_name(interface_name
, &cond_bound_interface
) == 0) {
3770 master_condition_mask
|= NECP_KERNEL_CONDITION_BOUND_INTERFACE
;
3771 if (condition_is_negative
) {
3772 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_BOUND_INTERFACE
;
3775 socket_ip_conditions
= TRUE
;
3779 case NECP_POLICY_CONDITION_IP_PROTOCOL
: {
3780 if (condition_length
>= sizeof(u_int16_t
)) {
3781 master_condition_mask
|= NECP_KERNEL_CONDITION_PROTOCOL
;
3782 if (condition_is_negative
) {
3783 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_PROTOCOL
;
3785 memcpy(&cond_protocol
, condition_value
, sizeof(cond_protocol
));
3786 socket_ip_conditions
= TRUE
;
3790 case NECP_POLICY_CONDITION_LOCAL_ADDR
: {
3791 struct necp_policy_condition_addr
*address_struct
= (struct necp_policy_condition_addr
*)(void *)condition_value
;
3792 if (!necp_address_is_valid(&address_struct
->address
.sa
)) {
3796 cond_local_prefix
= address_struct
->prefix
;
3797 memcpy(&cond_local_start
, &address_struct
->address
, sizeof(address_struct
->address
));
3798 master_condition_mask
|= NECP_KERNEL_CONDITION_LOCAL_START
;
3799 master_condition_mask
|= NECP_KERNEL_CONDITION_LOCAL_PREFIX
;
3800 if (condition_is_negative
) {
3801 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_LOCAL_START
;
3802 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_LOCAL_PREFIX
;
3804 socket_ip_conditions
= TRUE
;
3807 case NECP_POLICY_CONDITION_REMOTE_ADDR
: {
3808 struct necp_policy_condition_addr
*address_struct
= (struct necp_policy_condition_addr
*)(void *)condition_value
;
3809 if (!necp_address_is_valid(&address_struct
->address
.sa
)) {
3813 cond_remote_prefix
= address_struct
->prefix
;
3814 memcpy(&cond_remote_start
, &address_struct
->address
, sizeof(address_struct
->address
));
3815 master_condition_mask
|= NECP_KERNEL_CONDITION_REMOTE_START
;
3816 master_condition_mask
|= NECP_KERNEL_CONDITION_REMOTE_PREFIX
;
3817 if (condition_is_negative
) {
3818 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REMOTE_START
;
3819 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REMOTE_PREFIX
;
3821 socket_ip_conditions
= TRUE
;
3824 case NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE
: {
3825 struct necp_policy_condition_addr_range
*address_struct
= (struct necp_policy_condition_addr_range
*)(void *)condition_value
;
3826 if (!necp_address_is_valid(&address_struct
->start_address
.sa
) ||
3827 !necp_address_is_valid(&address_struct
->end_address
.sa
)) {
3831 memcpy(&cond_local_start
, &address_struct
->start_address
, sizeof(address_struct
->start_address
));
3832 memcpy(&cond_local_end
, &address_struct
->end_address
, sizeof(address_struct
->end_address
));
3833 master_condition_mask
|= NECP_KERNEL_CONDITION_LOCAL_START
;
3834 master_condition_mask
|= NECP_KERNEL_CONDITION_LOCAL_END
;
3835 if (condition_is_negative
) {
3836 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_LOCAL_START
;
3837 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_LOCAL_END
;
3839 socket_ip_conditions
= TRUE
;
3842 case NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE
: {
3843 struct necp_policy_condition_addr_range
*address_struct
= (struct necp_policy_condition_addr_range
*)(void *)condition_value
;
3844 if (!necp_address_is_valid(&address_struct
->start_address
.sa
) ||
3845 !necp_address_is_valid(&address_struct
->end_address
.sa
)) {
3849 memcpy(&cond_remote_start
, &address_struct
->start_address
, sizeof(address_struct
->start_address
));
3850 memcpy(&cond_remote_end
, &address_struct
->end_address
, sizeof(address_struct
->end_address
));
3851 master_condition_mask
|= NECP_KERNEL_CONDITION_REMOTE_START
;
3852 master_condition_mask
|= NECP_KERNEL_CONDITION_REMOTE_END
;
3853 if (condition_is_negative
) {
3854 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REMOTE_START
;
3855 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REMOTE_END
;
3857 socket_ip_conditions
= TRUE
;
3865 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
3869 ultimate_result
= necp_policy_get_result_type(policy
);
3870 switch (ultimate_result
) {
3871 case NECP_POLICY_RESULT_PASS
: {
3872 if (socket_only_conditions
) { // socket_ip_conditions can be TRUE or FALSE
3873 socket_layer_non_id_conditions
= TRUE
;
3874 ip_output_layer_id_condition
= TRUE
;
3875 } else if (socket_ip_conditions
) {
3876 socket_layer_non_id_conditions
= TRUE
;
3877 ip_output_layer_id_condition
= TRUE
;
3878 ip_output_layer_non_id_conditions
= TRUE
;
3882 case NECP_POLICY_RESULT_DROP
: {
3883 if (socket_only_conditions
) { // socket_ip_conditions can be TRUE or FALSE
3884 socket_layer_non_id_conditions
= TRUE
;
3885 } else if (socket_ip_conditions
) {
3886 socket_layer_non_id_conditions
= TRUE
;
3887 ip_output_layer_non_id_conditions
= TRUE
;
3888 ip_output_layer_non_id_only
= TRUE
; // Only apply drop to packets that didn't go through socket layer
3892 case NECP_POLICY_RESULT_SKIP
: {
3893 u_int32_t skip_policy_order
= 0;
3894 if (necp_policy_get_result_parameter(policy
, (u_int8_t
*)&skip_policy_order
, sizeof(skip_policy_order
))) {
3895 ultimate_result_parameter
.skip_policy_order
= skip_policy_order
;
3898 if (socket_only_conditions
) { // socket_ip_conditions can be TRUE or FALSE
3899 socket_layer_non_id_conditions
= TRUE
;
3900 ip_output_layer_id_condition
= TRUE
;
3901 } else if (socket_ip_conditions
) {
3902 socket_layer_non_id_conditions
= TRUE
;
3903 ip_output_layer_non_id_conditions
= TRUE
;
3907 case NECP_POLICY_RESULT_SOCKET_DIVERT
:
3908 case NECP_POLICY_RESULT_SOCKET_FILTER
: {
3909 u_int32_t control_unit
= 0;
3910 if (necp_policy_get_result_parameter(policy
, (u_int8_t
*)&control_unit
, sizeof(control_unit
))) {
3911 ultimate_result_parameter
.flow_divert_control_unit
= control_unit
;
3913 socket_layer_non_id_conditions
= TRUE
;
3916 case NECP_POLICY_RESULT_IP_TUNNEL
: {
3917 struct necp_policy_result_ip_tunnel tunnel_parameters
;
3918 u_int32_t tunnel_parameters_length
= necp_policy_get_result_parameter_length(policy
);
3919 if (tunnel_parameters_length
> sizeof(u_int32_t
) &&
3920 tunnel_parameters_length
<= sizeof(struct necp_policy_result_ip_tunnel
) &&
3921 necp_policy_get_result_parameter(policy
, (u_int8_t
*)&tunnel_parameters
, sizeof(tunnel_parameters
))) {
3922 ifnet_t tunnel_interface
= NULL
;
3923 tunnel_parameters
.interface_name
[tunnel_parameters_length
- sizeof(u_int32_t
) - 1] = 0; // Make sure the string is NULL terminated
3924 if (ifnet_find_by_name(tunnel_parameters
.interface_name
, &tunnel_interface
) == 0) {
3925 ultimate_result_parameter
.tunnel_interface_index
= tunnel_interface
->if_index
;
3926 ifnet_release(tunnel_interface
);
3929 secondary_result
= tunnel_parameters
.secondary_result
;
3930 if (secondary_result
) {
3931 cond_last_interface_index
= ultimate_result_parameter
.tunnel_interface_index
;
3935 if (socket_only_conditions
) { // socket_ip_conditions can be TRUE or FALSE
3936 socket_layer_non_id_conditions
= TRUE
;
3937 ip_output_layer_id_condition
= TRUE
;
3938 if (secondary_result
) {
3939 ip_output_layer_tunnel_condition_from_id
= TRUE
;
3941 } else if (socket_ip_conditions
) {
3942 socket_layer_non_id_conditions
= TRUE
;
3943 ip_output_layer_id_condition
= TRUE
;
3944 ip_output_layer_non_id_conditions
= TRUE
;
3945 if (secondary_result
) {
3946 ip_output_layer_tunnel_condition_from_id
= TRUE
;
3947 ip_output_layer_tunnel_condition_from_non_id
= TRUE
;
3952 case NECP_POLICY_RESULT_TRIGGER
:
3953 case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED
:
3954 case NECP_POLICY_RESULT_TRIGGER_SCOPED
:
3955 case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED
: {
3956 struct necp_policy_result_service service_parameters
;
3957 u_int32_t service_result_length
= necp_policy_get_result_parameter_length(policy
);
3958 bool has_extra_service_data
= FALSE
;
3959 if (service_result_length
>= (sizeof(service_parameters
))) {
3960 has_extra_service_data
= TRUE
;
3962 if (necp_policy_get_result_parameter(policy
, (u_int8_t
*)&service_parameters
, sizeof(service_parameters
))) {
3963 ultimate_result_parameter
.service
.identifier
= necp_create_uuid_service_id_mapping(service_parameters
.identifier
);
3964 if (ultimate_result_parameter
.service
.identifier
!= 0) {
3965 uuid_copy(policy
->applied_result_uuid
, service_parameters
.identifier
);
3966 socket_layer_non_id_conditions
= TRUE
;
3967 if (has_extra_service_data
) {
3968 ultimate_result_parameter
.service
.data
= service_parameters
.data
;
3970 ultimate_result_parameter
.service
.data
= 0;
3976 case NECP_POLICY_RESULT_USE_NETAGENT
: {
3977 uuid_t netagent_uuid
;
3978 if (necp_policy_get_result_parameter(policy
, (u_int8_t
*)&netagent_uuid
, sizeof(netagent_uuid
))) {
3979 ultimate_result_parameter
.netagent_id
= necp_create_uuid_service_id_mapping(netagent_uuid
);
3980 if (ultimate_result_parameter
.netagent_id
!= 0) {
3981 uuid_copy(policy
->applied_result_uuid
, netagent_uuid
);
3982 socket_layer_non_id_conditions
= TRUE
;
3987 case NECP_POLICY_RESULT_SOCKET_SCOPED
: {
3988 u_int32_t interface_name_length
= necp_policy_get_result_parameter_length(policy
);
3989 if (interface_name_length
<= IFXNAMSIZ
&& interface_name_length
> 0) {
3990 char interface_name
[IFXNAMSIZ
];
3991 ifnet_t scope_interface
= NULL
;
3992 necp_policy_get_result_parameter(policy
, (u_int8_t
*)interface_name
, interface_name_length
);
3993 interface_name
[interface_name_length
- 1] = 0; // Make sure the string is NULL terminated
3994 if (ifnet_find_by_name(interface_name
, &scope_interface
) == 0) {
3995 ultimate_result_parameter
.scoped_interface_index
= scope_interface
->if_index
;
3996 socket_layer_non_id_conditions
= TRUE
;
3997 ifnet_release(scope_interface
);
4002 case NECP_POLICY_RESULT_ROUTE_RULES
: {
4003 if (policy
->route_rules
!= NULL
&& policy
->route_rules_size
> 0) {
4004 u_int32_t route_rule_id
= necp_create_route_rule(&necp_route_rules
, policy
->route_rules
, policy
->route_rules_size
);
4005 if (route_rule_id
> 0) {
4006 policy
->applied_route_rules_id
= route_rule_id
;
4007 ultimate_result_parameter
.route_rule_id
= route_rule_id
;
4008 socket_layer_non_id_conditions
= TRUE
;
4018 if (socket_layer_non_id_conditions
) {
4019 necp_kernel_policy_id policy_id
= necp_kernel_socket_policy_add(policy
->id
, policy
->order
, session
->session_order
, session
->proc_pid
, master_condition_mask
, master_condition_negated_mask
, cond_app_id
, cond_real_app_id
, cond_custom_entitlement
, cond_account_id
, cond_domain
, cond_pid
, cond_uid
, cond_bound_interface
, cond_traffic_class
, cond_protocol
, &cond_local_start
, &cond_local_end
, cond_local_prefix
, &cond_remote_start
, &cond_remote_end
, cond_remote_prefix
, ultimate_result
, ultimate_result_parameter
);
4021 if (policy_id
== 0) {
4022 NECPLOG0(LOG_DEBUG
, "Error applying socket kernel policy");
4026 cond_ip_output_layer_id
= policy_id
;
4027 policy
->kernel_socket_policies
[0] = policy_id
;
4030 if (ip_output_layer_non_id_conditions
) {
4031 u_int32_t condition_mask
= master_condition_mask
;
4032 if (ip_output_layer_non_id_only
) {
4033 condition_mask
|= NECP_KERNEL_CONDITION_POLICY_ID
;
4035 necp_kernel_policy_id policy_id
= necp_kernel_ip_output_policy_add(policy
->id
, policy
->order
, NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS
, session
->session_order
, session
->proc_pid
, condition_mask
, master_condition_negated_mask
, NECP_KERNEL_POLICY_ID_NONE
, cond_bound_interface
, 0, cond_protocol
, &cond_local_start
, &cond_local_end
, cond_local_prefix
, &cond_remote_start
, &cond_remote_end
, cond_remote_prefix
, ultimate_result
, ultimate_result_parameter
);
4037 if (policy_id
== 0) {
4038 NECPLOG0(LOG_DEBUG
, "Error applying IP output kernel policy");
4042 policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS
] = policy_id
;
4045 if (ip_output_layer_id_condition
) {
4046 necp_kernel_policy_id policy_id
= necp_kernel_ip_output_policy_add(policy
->id
, policy
->order
, NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION
, session
->session_order
, session
->proc_pid
, NECP_KERNEL_CONDITION_POLICY_ID
| NECP_KERNEL_CONDITION_ALL_INTERFACES
, 0, cond_ip_output_layer_id
, NULL
, 0, 0, NULL
, NULL
, 0, NULL
, NULL
, 0, ultimate_result
, ultimate_result_parameter
);
4048 if (policy_id
== 0) {
4049 NECPLOG0(LOG_DEBUG
, "Error applying IP output kernel policy");
4053 policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION
] = policy_id
;
4056 // Extra policies for IP Output tunnels for when packets loop back
4057 if (ip_output_layer_tunnel_condition_from_id
) {
4058 necp_kernel_policy_id policy_id
= necp_kernel_ip_output_policy_add(policy
->id
, policy
->order
, NECP_KERNEL_POLICY_SUBORDER_NON_ID_TUNNEL_CONDITION
, session
->session_order
, session
->proc_pid
, NECP_KERNEL_CONDITION_POLICY_ID
| NECP_KERNEL_CONDITION_LAST_INTERFACE
| NECP_KERNEL_CONDITION_ALL_INTERFACES
, 0, policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS
], NULL
, cond_last_interface_index
, 0, NULL
, NULL
, 0, NULL
, NULL
, 0, secondary_result
, secondary_result_parameter
);
4060 if (policy_id
== 0) {
4061 NECPLOG0(LOG_DEBUG
, "Error applying IP output kernel policy");
4065 policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_NON_ID_TUNNEL_CONDITION
] = policy_id
;
4068 if (ip_output_layer_tunnel_condition_from_id
) {
4069 necp_kernel_policy_id policy_id
= necp_kernel_ip_output_policy_add(policy
->id
, policy
->order
, NECP_KERNEL_POLICY_SUBORDER_ID_TUNNEL_CONDITION
, session
->session_order
, session
->proc_pid
, NECP_KERNEL_CONDITION_POLICY_ID
| NECP_KERNEL_CONDITION_LAST_INTERFACE
| NECP_KERNEL_CONDITION_ALL_INTERFACES
, 0, policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION
], NULL
, cond_last_interface_index
, 0, NULL
, NULL
, 0, NULL
, NULL
, 0, secondary_result
, secondary_result_parameter
);
4071 if (policy_id
== 0) {
4072 NECPLOG0(LOG_DEBUG
, "Error applying IP output kernel policy");
4076 policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_ID_TUNNEL_CONDITION
] = policy_id
;
4079 policy
->applied
= TRUE
;
4080 policy
->pending_update
= FALSE
;
4088 necp_policy_apply_all(struct necp_session
*session
)
4090 struct necp_session_policy
*policy
= NULL
;
4091 struct necp_session_policy
*temp_policy
= NULL
;
4092 struct kev_necp_policies_changed_data kev_data
;
4093 kev_data
.changed_count
= 0;
4095 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
4097 // Remove exisiting applied policies
4098 if (session
->dirty
) {
4099 LIST_FOREACH_SAFE(policy
, &session
->policies
, chain
, temp_policy
) {
4100 if (policy
->pending_deletion
) {
4101 if (policy
->applied
) {
4102 necp_policy_unapply(policy
);
4104 // Delete the policy
4105 necp_policy_delete(session
, policy
);
4106 } else if (!policy
->applied
) {
4107 necp_policy_apply(session
, policy
);
4108 } else if (policy
->pending_update
) {
4109 // Must have been applied, but needs an update. Remove and re-add.
4110 necp_policy_unapply(policy
);
4111 necp_policy_apply(session
, policy
);
4115 necp_kernel_socket_policies_update_uuid_table();
4116 necp_kernel_socket_policies_reprocess();
4117 necp_kernel_ip_output_policies_reprocess();
4119 // Clear dirty bit flags
4120 session
->dirty
= FALSE
;
4123 lck_rw_done(&necp_kernel_policy_lock
);
4125 necp_update_all_clients();
4126 necp_post_change_event(&kev_data
);
4129 NECPLOG0(LOG_DEBUG
, "Applied NECP policies");
4133 // Kernel Policy Management
4134 // ---------------------
4135 // Kernel policies are derived from session policies
4136 static necp_kernel_policy_id
4137 necp_kernel_policy_get_new_id(bool socket_level
)
4139 static necp_kernel_policy_id necp_last_kernel_socket_policy_id
= 0;
4140 static necp_kernel_policy_id necp_last_kernel_ip_policy_id
= 0;
4142 necp_kernel_policy_id newid
= NECP_KERNEL_POLICY_ID_NONE
;
4144 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4147 necp_last_kernel_socket_policy_id
++;
4148 if (necp_last_kernel_socket_policy_id
< NECP_KERNEL_POLICY_ID_FIRST_VALID_SOCKET
||
4149 necp_last_kernel_socket_policy_id
>= NECP_KERNEL_POLICY_ID_FIRST_VALID_IP
) {
4150 necp_last_kernel_socket_policy_id
= NECP_KERNEL_POLICY_ID_FIRST_VALID_SOCKET
;
4152 newid
= necp_last_kernel_socket_policy_id
;
4154 necp_last_kernel_ip_policy_id
++;
4155 if (necp_last_kernel_ip_policy_id
< NECP_KERNEL_POLICY_ID_FIRST_VALID_IP
) {
4156 necp_last_kernel_ip_policy_id
= NECP_KERNEL_POLICY_ID_FIRST_VALID_IP
;
4158 newid
= necp_last_kernel_ip_policy_id
;
4161 if (newid
== NECP_KERNEL_POLICY_ID_NONE
) {
4162 NECPLOG0(LOG_DEBUG
, "Allocate kernel policy id failed.\n");
4169 #define NECP_KERNEL_VALID_SOCKET_CONDITIONS (NECP_KERNEL_CONDITION_APP_ID | NECP_KERNEL_CONDITION_REAL_APP_ID | NECP_KERNEL_CONDITION_DOMAIN | NECP_KERNEL_CONDITION_ACCOUNT_ID | NECP_KERNEL_CONDITION_PID | NECP_KERNEL_CONDITION_UID | NECP_KERNEL_CONDITION_ALL_INTERFACES | NECP_KERNEL_CONDITION_BOUND_INTERFACE | NECP_KERNEL_CONDITION_TRAFFIC_CLASS | NECP_KERNEL_CONDITION_PROTOCOL | NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_ENTITLEMENT | NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT)
4170 static necp_kernel_policy_id
4171 necp_kernel_socket_policy_add(necp_policy_id parent_policy_id
, necp_policy_order order
, u_int32_t session_order
, int session_pid
, u_int32_t condition_mask
, u_int32_t condition_negated_mask
, necp_app_id cond_app_id
, necp_app_id cond_real_app_id
, char *cond_custom_entitlement
, u_int32_t cond_account_id
, char *cond_domain
, pid_t cond_pid
, uid_t cond_uid
, ifnet_t cond_bound_interface
, struct necp_policy_condition_tc_range cond_traffic_class
, u_int16_t cond_protocol
, union necp_sockaddr_union
*cond_local_start
, union necp_sockaddr_union
*cond_local_end
, u_int8_t cond_local_prefix
, union necp_sockaddr_union
*cond_remote_start
, union necp_sockaddr_union
*cond_remote_end
, u_int8_t cond_remote_prefix
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
)
4173 struct necp_kernel_socket_policy
*new_kernel_policy
= NULL
;
4174 struct necp_kernel_socket_policy
*tmp_kernel_policy
= NULL
;
4176 MALLOC_ZONE(new_kernel_policy
, struct necp_kernel_socket_policy
*, sizeof(*new_kernel_policy
), M_NECP_SOCKET_POLICY
, M_WAITOK
);
4177 if (new_kernel_policy
== NULL
) {
4181 memset(new_kernel_policy
, 0, sizeof(*new_kernel_policy
)); // M_ZERO is not supported for MALLOC_ZONE
4182 new_kernel_policy
->parent_policy_id
= parent_policy_id
;
4183 new_kernel_policy
->id
= necp_kernel_policy_get_new_id(true);
4184 new_kernel_policy
->order
= order
;
4185 new_kernel_policy
->session_order
= session_order
;
4186 new_kernel_policy
->session_pid
= session_pid
;
4188 // Sanitize condition mask
4189 new_kernel_policy
->condition_mask
= (condition_mask
& NECP_KERNEL_VALID_SOCKET_CONDITIONS
);
4190 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
)) {
4191 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_BOUND_INTERFACE
;
4193 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) && !(new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
)) {
4194 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_REAL_APP_ID
;
4196 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
) && !(new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
)) {
4197 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_ENTITLEMENT
;
4199 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
)) {
4200 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_LOCAL_PREFIX
;
4202 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
)) {
4203 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_REMOTE_PREFIX
;
4205 new_kernel_policy
->condition_negated_mask
= condition_negated_mask
& new_kernel_policy
->condition_mask
;
4207 // Set condition values
4208 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
4209 new_kernel_policy
->cond_app_id
= cond_app_id
;
4211 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
4212 new_kernel_policy
->cond_real_app_id
= cond_real_app_id
;
4214 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
) {
4215 new_kernel_policy
->cond_custom_entitlement
= cond_custom_entitlement
;
4216 new_kernel_policy
->cond_custom_entitlement_matched
= necp_boolean_state_unknown
;
4218 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
) {
4219 new_kernel_policy
->cond_account_id
= cond_account_id
;
4221 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
4222 new_kernel_policy
->cond_domain
= cond_domain
;
4223 new_kernel_policy
->cond_domain_dot_count
= necp_count_dots(cond_domain
, strlen(cond_domain
));
4225 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PID
) {
4226 new_kernel_policy
->cond_pid
= cond_pid
;
4228 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_UID
) {
4229 new_kernel_policy
->cond_uid
= cond_uid
;
4231 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
4232 if (cond_bound_interface
) {
4233 ifnet_reference(cond_bound_interface
);
4235 new_kernel_policy
->cond_bound_interface
= cond_bound_interface
;
4237 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
4238 new_kernel_policy
->cond_traffic_class
= cond_traffic_class
;
4240 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
4241 new_kernel_policy
->cond_protocol
= cond_protocol
;
4243 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
4244 memcpy(&new_kernel_policy
->cond_local_start
, cond_local_start
, cond_local_start
->sa
.sa_len
);
4246 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
4247 memcpy(&new_kernel_policy
->cond_local_end
, cond_local_end
, cond_local_end
->sa
.sa_len
);
4249 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
4250 new_kernel_policy
->cond_local_prefix
= cond_local_prefix
;
4252 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
4253 memcpy(&new_kernel_policy
->cond_remote_start
, cond_remote_start
, cond_remote_start
->sa
.sa_len
);
4255 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
4256 memcpy(&new_kernel_policy
->cond_remote_end
, cond_remote_end
, cond_remote_end
->sa
.sa_len
);
4258 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
4259 new_kernel_policy
->cond_remote_prefix
= cond_remote_prefix
;
4262 new_kernel_policy
->result
= result
;
4263 memcpy(&new_kernel_policy
->result_parameter
, &result_parameter
, sizeof(result_parameter
));
4266 NECPLOG(LOG_DEBUG
, "Added kernel policy: socket, id=%d, mask=%x\n", new_kernel_policy
->id
, new_kernel_policy
->condition_mask
);
4268 LIST_INSERT_SORTED_TWICE_ASCENDING(&necp_kernel_socket_policies
, new_kernel_policy
, chain
, session_order
, order
, tmp_kernel_policy
);
4270 return (new_kernel_policy
? new_kernel_policy
->id
: 0);
4273 static struct necp_kernel_socket_policy
*
4274 necp_kernel_socket_policy_find(necp_kernel_policy_id policy_id
)
4276 struct necp_kernel_socket_policy
*kernel_policy
= NULL
;
4277 struct necp_kernel_socket_policy
*tmp_kernel_policy
= NULL
;
4279 if (policy_id
== 0) {
4283 LIST_FOREACH_SAFE(kernel_policy
, &necp_kernel_socket_policies
, chain
, tmp_kernel_policy
) {
4284 if (kernel_policy
->id
== policy_id
) {
4285 return (kernel_policy
);
4293 necp_kernel_socket_policy_delete(necp_kernel_policy_id policy_id
)
4295 struct necp_kernel_socket_policy
*policy
= NULL
;
4297 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4299 policy
= necp_kernel_socket_policy_find(policy_id
);
4301 LIST_REMOVE(policy
, chain
);
4303 if (policy
->cond_bound_interface
) {
4304 ifnet_release(policy
->cond_bound_interface
);
4305 policy
->cond_bound_interface
= NULL
;
4308 if (policy
->cond_domain
) {
4309 FREE(policy
->cond_domain
, M_NECP
);
4310 policy
->cond_domain
= NULL
;
4313 if (policy
->cond_custom_entitlement
) {
4314 FREE(policy
->cond_custom_entitlement
, M_NECP
);
4315 policy
->cond_custom_entitlement
= NULL
;
4318 FREE_ZONE(policy
, sizeof(*policy
), M_NECP_SOCKET_POLICY
);
4325 static inline const char *
4326 necp_get_result_description(char *result_string
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
)
4328 uuid_string_t uuid_string
;
4330 case NECP_KERNEL_POLICY_RESULT_NONE
: {
4331 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "None");
4334 case NECP_KERNEL_POLICY_RESULT_PASS
: {
4335 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "Pass");
4338 case NECP_KERNEL_POLICY_RESULT_SKIP
: {
4339 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "Skip (%u)", result_parameter
.skip_policy_order
);
4342 case NECP_KERNEL_POLICY_RESULT_DROP
: {
4343 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "Drop");
4346 case NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
: {
4347 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "SocketDivert (%d)", result_parameter
.flow_divert_control_unit
);
4350 case NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER
: {
4351 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "SocketFilter (%d)", result_parameter
.filter_control_unit
);
4354 case NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
: {
4355 ifnet_t interface
= ifindex2ifnet
[result_parameter
.tunnel_interface_index
];
4356 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "IPTunnel (%s%d)", ifnet_name(interface
), ifnet_unit(interface
));
4359 case NECP_KERNEL_POLICY_RESULT_IP_FILTER
: {
4360 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "IPFilter");
4363 case NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED
: {
4364 ifnet_t interface
= ifindex2ifnet
[result_parameter
.scoped_interface_index
];
4365 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "SocketScoped (%s%d)", ifnet_name(interface
), ifnet_unit(interface
));
4368 case NECP_KERNEL_POLICY_RESULT_ROUTE_RULES
: {
4370 char interface_names
[IFXNAMSIZ
][MAX_ROUTE_RULE_INTERFACES
];
4371 struct necp_route_rule
*route_rule
= necp_lookup_route_rule_locked(&necp_route_rules
, result_parameter
.route_rule_id
);
4372 if (route_rule
!= NULL
) {
4373 for (index
= 0; index
< MAX_ROUTE_RULE_INTERFACES
; index
++) {
4374 if (route_rule
->exception_if_indices
[index
] != 0) {
4375 ifnet_t interface
= ifindex2ifnet
[route_rule
->exception_if_indices
[index
]];
4376 snprintf(interface_names
[index
], IFXNAMSIZ
, "%s%d", ifnet_name(interface
), ifnet_unit(interface
));
4378 memset(interface_names
[index
], 0, IFXNAMSIZ
);
4381 switch (route_rule
->default_action
) {
4382 case NECP_ROUTE_RULE_DENY_INTERFACE
:
4383 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "RouteRules (Only %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
4384 (route_rule
->cellular_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? "Cell " : "",
4385 (route_rule
->wifi_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? "WiFi " : "",
4386 (route_rule
->wired_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? "Wired " : "",
4387 (route_rule
->expensive_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? "Exp " : "",
4388 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[0] : "",
4389 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4390 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[1] : "",
4391 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4392 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[2] : "",
4393 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4394 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[3] : "",
4395 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4396 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[4] : "",
4397 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4398 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[5] : "",
4399 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4400 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[6] : "",
4401 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4402 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[7] : "",
4403 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4404 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[8] : "",
4405 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4406 (route_rule
->exception_if_actions
[9] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[9] : "");
4408 case NECP_ROUTE_RULE_ALLOW_INTERFACE
:
4409 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "RouteRules (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
4410 (route_rule
->cellular_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!Cell " : "",
4411 (route_rule
->wifi_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!WiFi " : "",
4412 (route_rule
->wired_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!Wired " : "",
4413 (route_rule
->expensive_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!Exp " : "",
4414 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4415 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[0] : "",
4416 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4417 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[1] : "",
4418 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4419 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[2] : "",
4420 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4421 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[3] : "",
4422 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4423 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[4] : "",
4424 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4425 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[5] : "",
4426 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4427 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[6] : "",
4428 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4429 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[7] : "",
4430 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4431 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[8] : "",
4432 (route_rule
->exception_if_actions
[9] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4433 (route_rule
->exception_if_actions
[9] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[9] : "");
4435 case NECP_ROUTE_RULE_QOS_MARKING
:
4436 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "RouteRules (QoSMarking %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
4437 (route_rule
->cellular_action
== NECP_ROUTE_RULE_QOS_MARKING
) ? "Cell " : "",
4438 (route_rule
->wifi_action
== NECP_ROUTE_RULE_QOS_MARKING
) ? "WiFi " : "",
4439 (route_rule
->wired_action
== NECP_ROUTE_RULE_QOS_MARKING
) ? "Wired " : "",
4440 (route_rule
->expensive_action
== NECP_ROUTE_RULE_QOS_MARKING
) ? "Exp " : "",
4441 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[0] : "",
4442 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4443 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[1] : "",
4444 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4445 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[2] : "",
4446 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4447 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[3] : "",
4448 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4449 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[4] : "",
4450 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4451 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[5] : "",
4452 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4453 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[6] : "",
4454 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4455 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[7] : "",
4456 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4457 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[8] : "",
4458 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4459 (route_rule
->exception_if_actions
[9] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[9] : "");
4462 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "RouteRules (Unknown)");
4468 case NECP_KERNEL_POLICY_RESULT_USE_NETAGENT
: {
4469 bool found_mapping
= FALSE
;
4470 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.netagent_id
);
4471 if (mapping
!= NULL
) {
4472 uuid_unparse(mapping
->uuid
, uuid_string
);
4473 found_mapping
= TRUE
;
4475 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "UseNetAgent (%s)", found_mapping
? uuid_string
: "Unknown");
4478 case NECP_POLICY_RESULT_TRIGGER
: {
4479 bool found_mapping
= FALSE
;
4480 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.service
.identifier
);
4481 if (mapping
!= NULL
) {
4482 uuid_unparse(mapping
->uuid
, uuid_string
);
4483 found_mapping
= TRUE
;
4485 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "Trigger (%s.%d)", found_mapping
? uuid_string
: "Unknown", result_parameter
.service
.data
);
4488 case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED
: {
4489 bool found_mapping
= FALSE
;
4490 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.service
.identifier
);
4491 if (mapping
!= NULL
) {
4492 uuid_unparse(mapping
->uuid
, uuid_string
);
4493 found_mapping
= TRUE
;
4495 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "TriggerIfNeeded (%s.%d)", found_mapping
? uuid_string
: "Unknown", result_parameter
.service
.data
);
4498 case NECP_POLICY_RESULT_TRIGGER_SCOPED
: {
4499 bool found_mapping
= FALSE
;
4500 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.service
.identifier
);
4501 if (mapping
!= NULL
) {
4502 uuid_unparse(mapping
->uuid
, uuid_string
);
4503 found_mapping
= TRUE
;
4505 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "TriggerScoped (%s.%d)", found_mapping
? uuid_string
: "Unknown", result_parameter
.service
.data
);
4508 case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED
: {
4509 bool found_mapping
= FALSE
;
4510 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.service
.identifier
);
4511 if (mapping
!= NULL
) {
4512 uuid_unparse(mapping
->uuid
, uuid_string
);
4513 found_mapping
= TRUE
;
4515 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "NoTriggerScoped (%s.%d)", found_mapping
? uuid_string
: "Unknown", result_parameter
.service
.data
);
4519 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "Unknown %d (%d)", result
, result_parameter
.tunnel_interface_index
);
4523 return (result_string
);
4527 necp_kernel_socket_policies_dump_all(void)
4530 struct necp_kernel_socket_policy
*policy
= NULL
;
4533 char result_string
[MAX_RESULT_STRING_LEN
];
4534 char proc_name_string
[MAXCOMLEN
+ 1];
4535 memset(result_string
, 0, MAX_RESULT_STRING_LEN
);
4536 memset(proc_name_string
, 0, MAXCOMLEN
+ 1);
4538 NECPLOG0(LOG_DEBUG
, "NECP Application Policies:\n");
4539 NECPLOG0(LOG_DEBUG
, "-----------\n");
4540 for (policy_i
= 0; necp_kernel_socket_policies_app_layer_map
!= NULL
&& necp_kernel_socket_policies_app_layer_map
[policy_i
] != NULL
; policy_i
++) {
4541 policy
= necp_kernel_socket_policies_app_layer_map
[policy_i
];
4542 proc_name(policy
->session_pid
, proc_name_string
, MAXCOMLEN
);
4543 NECPLOG(LOG_DEBUG
, "\t%3d. Policy ID: %5d\tProcess: %10.10s\tOrder: %04d.%04d\tMask: %5x\tResult: %s\n", policy_i
, policy
->id
, proc_name_string
, policy
->session_order
, policy
->order
, policy
->condition_mask
, necp_get_result_description(result_string
, policy
->result
, policy
->result_parameter
));
4545 if (necp_kernel_socket_policies_app_layer_map
[0] != NULL
) {
4546 NECPLOG0(LOG_DEBUG
, "-----------\n");
4549 NECPLOG0(LOG_DEBUG
, "NECP Socket Policies:\n");
4550 NECPLOG0(LOG_DEBUG
, "-----------\n");
4551 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
4552 NECPLOG(LOG_DEBUG
, "\tApp Bucket: %d\n", app_i
);
4553 for (policy_i
= 0; necp_kernel_socket_policies_map
[app_i
] != NULL
&& (necp_kernel_socket_policies_map
[app_i
])[policy_i
] != NULL
; policy_i
++) {
4554 policy
= (necp_kernel_socket_policies_map
[app_i
])[policy_i
];
4555 proc_name(policy
->session_pid
, proc_name_string
, MAXCOMLEN
);
4556 NECPLOG(LOG_DEBUG
, "\t%3d. Policy ID: %5d\tProcess: %10.10s\tOrder: %04d.%04d\tMask: %5x\tResult: %s\n", policy_i
, policy
->id
, proc_name_string
, policy
->session_order
, policy
->order
, policy
->condition_mask
, necp_get_result_description(result_string
, policy
->result
, policy
->result_parameter
));
4558 NECPLOG0(LOG_DEBUG
, "-----------\n");
4564 necp_kernel_socket_result_is_trigger_service_type(struct necp_kernel_socket_policy
*kernel_policy
)
4566 return (kernel_policy
->result
>= NECP_KERNEL_POLICY_RESULT_TRIGGER
&& kernel_policy
->result
<= NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED
);
4570 necp_kernel_socket_policy_results_overlap(struct necp_kernel_socket_policy
*upper_policy
, struct necp_kernel_socket_policy
*lower_policy
)
4572 if (upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_DROP
) {
4573 // Drop always cancels out lower policies
4575 } else if (upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER
||
4576 upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_ROUTE_RULES
||
4577 upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_USE_NETAGENT
) {
4578 // Filters and route rules never cancel out lower policies
4580 } else if (necp_kernel_socket_result_is_trigger_service_type(upper_policy
)) {
4581 // Trigger/Scoping policies can overlap one another, but not other results
4582 return (necp_kernel_socket_result_is_trigger_service_type(lower_policy
));
4583 } else if (upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
4584 if (upper_policy
->session_order
!= lower_policy
->session_order
) {
4585 // A skip cannot override a policy of a different session
4588 if (upper_policy
->result_parameter
.skip_policy_order
== 0 ||
4589 lower_policy
->order
>= upper_policy
->result_parameter
.skip_policy_order
) {
4590 // This policy is beyond the skip
4593 // This policy is inside the skip
4599 // A hard pass, flow divert, tunnel, or scope will currently block out lower policies
4604 necp_kernel_socket_policy_is_unnecessary(struct necp_kernel_socket_policy
*policy
, struct necp_kernel_socket_policy
**policy_array
, int valid_indices
)
4606 bool can_skip
= FALSE
;
4607 u_int32_t highest_skip_session_order
= 0;
4608 u_int32_t highest_skip_order
= 0;
4610 for (i
= 0; i
< valid_indices
; i
++) {
4611 struct necp_kernel_socket_policy
*compared_policy
= policy_array
[i
];
4613 // For policies in a skip window, we can't mark conflicting policies as unnecessary
4615 if (highest_skip_session_order
!= compared_policy
->session_order
||
4616 (highest_skip_order
!= 0 && compared_policy
->order
>= highest_skip_order
)) {
4617 // If we've moved on to the next session, or passed the skip window
4618 highest_skip_session_order
= 0;
4619 highest_skip_order
= 0;
4622 // If this policy is also a skip, in can increase the skip window
4623 if (compared_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
4624 if (compared_policy
->result_parameter
.skip_policy_order
> highest_skip_order
) {
4625 highest_skip_order
= compared_policy
->result_parameter
.skip_policy_order
;
4632 if (compared_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
4633 // This policy is a skip. Set the skip window accordingly
4635 highest_skip_session_order
= compared_policy
->session_order
;
4636 highest_skip_order
= compared_policy
->result_parameter
.skip_policy_order
;
4639 // The result of the compared policy must be able to block out this policy result
4640 if (!necp_kernel_socket_policy_results_overlap(compared_policy
, policy
)) {
4644 // If new policy matches All Interfaces, compared policy must also
4645 if ((policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) && !(compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
)) {
4649 // Default makes lower policies unecessary always
4650 if (compared_policy
->condition_mask
== 0) {
4654 // Compared must be more general than policy, and include only conditions within policy
4655 if ((policy
->condition_mask
& compared_policy
->condition_mask
) != compared_policy
->condition_mask
) {
4659 // Negative conditions must match for the overlapping conditions
4660 if ((policy
->condition_negated_mask
& compared_policy
->condition_mask
) != (compared_policy
->condition_negated_mask
& compared_policy
->condition_mask
)) {
4664 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
&&
4665 strcmp(compared_policy
->cond_domain
, policy
->cond_domain
) != 0) {
4669 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
&&
4670 strcmp(compared_policy
->cond_custom_entitlement
, policy
->cond_custom_entitlement
) != 0) {
4674 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
&&
4675 compared_policy
->cond_account_id
!= policy
->cond_account_id
) {
4679 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
&&
4680 compared_policy
->cond_policy_id
!= policy
->cond_policy_id
) {
4684 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
&&
4685 compared_policy
->cond_app_id
!= policy
->cond_app_id
) {
4689 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
&&
4690 compared_policy
->cond_real_app_id
!= policy
->cond_real_app_id
) {
4694 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_PID
&&
4695 compared_policy
->cond_pid
!= policy
->cond_pid
) {
4699 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_UID
&&
4700 compared_policy
->cond_uid
!= policy
->cond_uid
) {
4704 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
&&
4705 compared_policy
->cond_bound_interface
!= policy
->cond_bound_interface
) {
4709 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
&&
4710 compared_policy
->cond_protocol
!= policy
->cond_protocol
) {
4714 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
&&
4715 !(compared_policy
->cond_traffic_class
.start_tc
<= policy
->cond_traffic_class
.start_tc
&&
4716 compared_policy
->cond_traffic_class
.end_tc
>= policy
->cond_traffic_class
.end_tc
)) {
4720 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
4721 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
4722 if (!necp_is_range_in_range((struct sockaddr
*)&policy
->cond_local_start
, (struct sockaddr
*)&policy
->cond_local_end
, (struct sockaddr
*)&compared_policy
->cond_local_start
, (struct sockaddr
*)&compared_policy
->cond_local_end
)) {
4725 } else if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
4726 if (compared_policy
->cond_local_prefix
> policy
->cond_local_prefix
||
4727 !necp_is_addr_in_subnet((struct sockaddr
*)&policy
->cond_local_start
, (struct sockaddr
*)&compared_policy
->cond_local_start
, compared_policy
->cond_local_prefix
)) {
4733 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
4734 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
4735 if (!necp_is_range_in_range((struct sockaddr
*)&policy
->cond_remote_start
, (struct sockaddr
*)&policy
->cond_remote_end
, (struct sockaddr
*)&compared_policy
->cond_remote_start
, (struct sockaddr
*)&compared_policy
->cond_remote_end
)) {
4738 } else if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
4739 if (compared_policy
->cond_remote_prefix
> policy
->cond_remote_prefix
||
4740 !necp_is_addr_in_subnet((struct sockaddr
*)&policy
->cond_remote_start
, (struct sockaddr
*)&compared_policy
->cond_remote_start
, compared_policy
->cond_remote_prefix
)) {
4753 necp_kernel_socket_policies_reprocess(void)
4756 int bucket_allocation_counts
[NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
];
4757 int bucket_current_free_index
[NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
];
4758 int app_layer_allocation_count
= 0;
4759 int app_layer_current_free_index
= 0;
4760 struct necp_kernel_socket_policy
*kernel_policy
= NULL
;
4762 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4765 necp_kernel_application_policies_condition_mask
= 0;
4766 necp_kernel_socket_policies_condition_mask
= 0;
4767 necp_kernel_application_policies_count
= 0;
4768 necp_kernel_socket_policies_count
= 0;
4769 necp_kernel_socket_policies_non_app_count
= 0;
4771 // Reset all maps to NULL
4772 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
4773 if (necp_kernel_socket_policies_map
[app_i
] != NULL
) {
4774 FREE(necp_kernel_socket_policies_map
[app_i
], M_NECP
);
4775 necp_kernel_socket_policies_map
[app_i
] = NULL
;
4779 bucket_allocation_counts
[app_i
] = 0;
4781 if (necp_kernel_socket_policies_app_layer_map
!= NULL
) {
4782 FREE(necp_kernel_socket_policies_app_layer_map
, M_NECP
);
4783 necp_kernel_socket_policies_app_layer_map
= NULL
;
4786 // Create masks and counts
4787 LIST_FOREACH(kernel_policy
, &necp_kernel_socket_policies
, chain
) {
4788 // App layer mask/count
4789 necp_kernel_application_policies_condition_mask
|= kernel_policy
->condition_mask
;
4790 necp_kernel_application_policies_count
++;
4791 app_layer_allocation_count
++;
4793 // Update socket layer bucket mask/counts
4794 necp_kernel_socket_policies_condition_mask
|= kernel_policy
->condition_mask
;
4795 necp_kernel_socket_policies_count
++;
4797 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) ||
4798 kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
4799 necp_kernel_socket_policies_non_app_count
++;
4800 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
4801 bucket_allocation_counts
[app_i
]++;
4804 bucket_allocation_counts
[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(kernel_policy
->cond_app_id
)]++;
4809 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
4810 if (bucket_allocation_counts
[app_i
] > 0) {
4811 // Allocate a NULL-terminated array of policy pointers for each bucket
4812 MALLOC(necp_kernel_socket_policies_map
[app_i
], struct necp_kernel_socket_policy
**, sizeof(struct necp_kernel_socket_policy
*) * (bucket_allocation_counts
[app_i
] + 1), M_NECP
, M_WAITOK
);
4813 if (necp_kernel_socket_policies_map
[app_i
] == NULL
) {
4817 // Initialize the first entry to NULL
4818 (necp_kernel_socket_policies_map
[app_i
])[0] = NULL
;
4820 bucket_current_free_index
[app_i
] = 0;
4822 MALLOC(necp_kernel_socket_policies_app_layer_map
, struct necp_kernel_socket_policy
**, sizeof(struct necp_kernel_socket_policy
*) * (app_layer_allocation_count
+ 1), M_NECP
, M_WAITOK
);
4823 if (necp_kernel_socket_policies_app_layer_map
== NULL
) {
4826 necp_kernel_socket_policies_app_layer_map
[0] = NULL
;
4829 LIST_FOREACH(kernel_policy
, &necp_kernel_socket_policies
, chain
) {
4830 // Insert pointers into map
4831 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) ||
4832 kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
4833 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
4834 if (!necp_kernel_socket_policy_is_unnecessary(kernel_policy
, necp_kernel_socket_policies_map
[app_i
], bucket_current_free_index
[app_i
])) {
4835 (necp_kernel_socket_policies_map
[app_i
])[(bucket_current_free_index
[app_i
])] = kernel_policy
;
4836 bucket_current_free_index
[app_i
]++;
4837 (necp_kernel_socket_policies_map
[app_i
])[(bucket_current_free_index
[app_i
])] = NULL
;
4841 app_i
= NECP_SOCKET_MAP_APP_ID_TO_BUCKET(kernel_policy
->cond_app_id
);
4842 if (!necp_kernel_socket_policy_is_unnecessary(kernel_policy
, necp_kernel_socket_policies_map
[app_i
], bucket_current_free_index
[app_i
])) {
4843 (necp_kernel_socket_policies_map
[app_i
])[(bucket_current_free_index
[app_i
])] = kernel_policy
;
4844 bucket_current_free_index
[app_i
]++;
4845 (necp_kernel_socket_policies_map
[app_i
])[(bucket_current_free_index
[app_i
])] = NULL
;
4849 if (!necp_kernel_socket_policy_is_unnecessary(kernel_policy
, necp_kernel_socket_policies_app_layer_map
, app_layer_current_free_index
)) {
4850 necp_kernel_socket_policies_app_layer_map
[app_layer_current_free_index
] = kernel_policy
;
4851 app_layer_current_free_index
++;
4852 necp_kernel_socket_policies_app_layer_map
[app_layer_current_free_index
] = NULL
;
4855 necp_kernel_socket_policies_dump_all();
4856 BUMP_KERNEL_SOCKET_POLICIES_GENERATION_COUNT();
4860 // Free memory, reset masks to 0
4861 necp_kernel_application_policies_condition_mask
= 0;
4862 necp_kernel_socket_policies_condition_mask
= 0;
4863 necp_kernel_application_policies_count
= 0;
4864 necp_kernel_socket_policies_count
= 0;
4865 necp_kernel_socket_policies_non_app_count
= 0;
4866 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
4867 if (necp_kernel_socket_policies_map
[app_i
] != NULL
) {
4868 FREE(necp_kernel_socket_policies_map
[app_i
], M_NECP
);
4869 necp_kernel_socket_policies_map
[app_i
] = NULL
;
4872 if (necp_kernel_socket_policies_app_layer_map
!= NULL
) {
4873 FREE(necp_kernel_socket_policies_app_layer_map
, M_NECP
);
4874 necp_kernel_socket_policies_app_layer_map
= NULL
;
4880 necp_get_new_string_id(void)
4882 u_int32_t newid
= 0;
4884 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4886 necp_last_string_id
++;
4887 if (necp_last_string_id
< 1) {
4888 necp_last_string_id
= 1;
4891 newid
= necp_last_string_id
;
4893 NECPLOG0(LOG_DEBUG
, "Allocate string id failed.\n");
4900 static struct necp_string_id_mapping
*
4901 necp_lookup_string_to_id_locked(struct necp_string_id_mapping_list
*list
, char *string
)
4903 struct necp_string_id_mapping
*searchentry
= NULL
;
4904 struct necp_string_id_mapping
*foundentry
= NULL
;
4906 LIST_FOREACH(searchentry
, list
, chain
) {
4907 if (strcmp(searchentry
->string
, string
) == 0) {
4908 foundentry
= searchentry
;
4913 return (foundentry
);
4916 static struct necp_string_id_mapping
*
4917 necp_lookup_string_with_id_locked(struct necp_string_id_mapping_list
*list
, u_int32_t local_id
)
4919 struct necp_string_id_mapping
*searchentry
= NULL
;
4920 struct necp_string_id_mapping
*foundentry
= NULL
;
4922 LIST_FOREACH(searchentry
, list
, chain
) {
4923 if (searchentry
->id
== local_id
) {
4924 foundentry
= searchentry
;
4929 return (foundentry
);
4933 necp_create_string_to_id_mapping(struct necp_string_id_mapping_list
*list
, char *string
)
4935 u_int32_t string_id
= 0;
4936 struct necp_string_id_mapping
*existing_mapping
= NULL
;
4938 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4940 existing_mapping
= necp_lookup_string_to_id_locked(list
, string
);
4941 if (existing_mapping
!= NULL
) {
4942 string_id
= existing_mapping
->id
;
4943 existing_mapping
->refcount
++;
4945 struct necp_string_id_mapping
*new_mapping
= NULL
;
4946 MALLOC(new_mapping
, struct necp_string_id_mapping
*, sizeof(struct necp_string_id_mapping
), M_NECP
, M_WAITOK
);
4947 if (new_mapping
!= NULL
) {
4948 memset(new_mapping
, 0, sizeof(struct necp_string_id_mapping
));
4950 size_t length
= strlen(string
) + 1;
4951 MALLOC(new_mapping
->string
, char *, length
, M_NECP
, M_WAITOK
);
4952 if (new_mapping
->string
!= NULL
) {
4953 memcpy(new_mapping
->string
, string
, length
);
4954 new_mapping
->id
= necp_get_new_string_id();
4955 new_mapping
->refcount
= 1;
4956 LIST_INSERT_HEAD(list
, new_mapping
, chain
);
4957 string_id
= new_mapping
->id
;
4959 FREE(new_mapping
, M_NECP
);
4968 necp_remove_string_to_id_mapping(struct necp_string_id_mapping_list
*list
, char *string
)
4970 struct necp_string_id_mapping
*existing_mapping
= NULL
;
4972 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4974 existing_mapping
= necp_lookup_string_to_id_locked(list
, string
);
4975 if (existing_mapping
!= NULL
) {
4976 if (--existing_mapping
->refcount
== 0) {
4977 LIST_REMOVE(existing_mapping
, chain
);
4978 FREE(existing_mapping
->string
, M_NECP
);
4979 FREE(existing_mapping
, M_NECP
);
4988 necp_get_new_route_rule_id(void)
4990 u_int32_t newid
= 0;
4992 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4994 necp_last_route_rule_id
++;
4995 if (necp_last_route_rule_id
< 1 || necp_last_route_rule_id
> UINT16_MAX
) {
4996 necp_last_route_rule_id
= 1;
4999 newid
= necp_last_route_rule_id
;
5001 NECPLOG0(LOG_DEBUG
, "Allocate route rule id failed.\n");
5009 necp_get_new_aggregate_route_rule_id(void)
5011 u_int32_t newid
= 0;
5013 LCK_RW_ASSERT(&necp_route_rule_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5015 necp_last_aggregate_route_rule_id
++;
5016 if (necp_last_aggregate_route_rule_id
<= UINT16_MAX
) {
5017 necp_last_aggregate_route_rule_id
= UINT16_MAX
+ 1;
5020 newid
= necp_last_aggregate_route_rule_id
;
5022 NECPLOG0(LOG_DEBUG
, "Allocate aggregate route rule id failed.\n");
5029 static struct necp_route_rule
*
5030 necp_lookup_route_rule_locked(struct necp_route_rule_list
*list
, u_int32_t route_rule_id
)
5032 struct necp_route_rule
*searchentry
= NULL
;
5033 struct necp_route_rule
*foundentry
= NULL
;
5035 LIST_FOREACH(searchentry
, list
, chain
) {
5036 if (searchentry
->id
== route_rule_id
) {
5037 foundentry
= searchentry
;
5042 return (foundentry
);
5045 static struct necp_route_rule
*
5046 necp_lookup_route_rule_by_contents_locked(struct necp_route_rule_list
*list
, u_int32_t default_action
, u_int8_t cellular_action
, u_int8_t wifi_action
, u_int8_t wired_action
, u_int8_t expensive_action
, u_int32_t
*if_indices
, u_int8_t
*if_actions
)
5048 struct necp_route_rule
*searchentry
= NULL
;
5049 struct necp_route_rule
*foundentry
= NULL
;
5051 LIST_FOREACH(searchentry
, list
, chain
) {
5052 if (searchentry
->default_action
== default_action
&&
5053 searchentry
->cellular_action
== cellular_action
&&
5054 searchentry
->wifi_action
== wifi_action
&&
5055 searchentry
->wired_action
== wired_action
&&
5056 searchentry
->expensive_action
== expensive_action
) {
5057 bool match_failed
= FALSE
;
5062 for (index_a
= 0; index_a
< MAX_ROUTE_RULE_INTERFACES
; index_a
++) {
5063 bool found_index
= FALSE
;
5064 if (searchentry
->exception_if_indices
[index_a
] == 0) {
5068 for (index_b
= 0; index_b
< MAX_ROUTE_RULE_INTERFACES
; index_b
++) {
5069 if (if_indices
[index_b
] == 0) {
5072 if (index_b
>= count_b
) {
5073 count_b
= index_b
+ 1;
5075 if (searchentry
->exception_if_indices
[index_a
] == if_indices
[index_b
] &&
5076 searchentry
->exception_if_actions
[index_a
] == if_actions
[index_b
]) {
5082 match_failed
= TRUE
;
5086 if (!match_failed
&& count_a
== count_b
) {
5087 foundentry
= searchentry
;
5093 return (foundentry
);
5097 necp_create_route_rule(struct necp_route_rule_list
*list
, u_int8_t
*route_rules_array
, u_int32_t route_rules_array_size
)
5100 u_int32_t route_rule_id
= 0;
5101 struct necp_route_rule
*existing_rule
= NULL
;
5102 u_int32_t default_action
= NECP_ROUTE_RULE_ALLOW_INTERFACE
;
5103 u_int8_t cellular_action
= NECP_ROUTE_RULE_NONE
;
5104 u_int8_t wifi_action
= NECP_ROUTE_RULE_NONE
;
5105 u_int8_t wired_action
= NECP_ROUTE_RULE_NONE
;
5106 u_int8_t expensive_action
= NECP_ROUTE_RULE_NONE
;
5107 u_int32_t if_indices
[MAX_ROUTE_RULE_INTERFACES
];
5108 size_t num_valid_indices
= 0;
5109 memset(&if_indices
, 0, sizeof(if_indices
));
5110 u_int8_t if_actions
[MAX_ROUTE_RULE_INTERFACES
];
5111 memset(&if_actions
, 0, sizeof(if_actions
));
5113 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5115 if (route_rules_array
== NULL
|| route_rules_array_size
== 0) {
5120 while (offset
< route_rules_array_size
) {
5121 ifnet_t rule_interface
= NULL
;
5122 char interface_name
[IFXNAMSIZ
];
5123 u_int32_t length
= 0;
5124 u_int8_t
*value
= necp_buffer_get_tlv_value(route_rules_array
, offset
, &length
);
5126 u_int8_t rule_type
= necp_policy_condition_get_type_from_buffer(value
, length
);
5127 u_int8_t rule_flags
= necp_policy_condition_get_flags_from_buffer(value
, length
);
5128 u_int32_t rule_length
= necp_policy_condition_get_value_length_from_buffer(value
, length
);
5129 u_int8_t
*rule_value
= necp_policy_condition_get_value_pointer_from_buffer(value
, length
);
5131 if (rule_type
== NECP_ROUTE_RULE_NONE
) {
5132 // Don't allow an explicit rule to be None action
5136 if (rule_length
== 0) {
5137 if (rule_flags
& NECP_ROUTE_RULE_FLAG_CELLULAR
) {
5138 cellular_action
= rule_type
;
5140 if (rule_flags
& NECP_ROUTE_RULE_FLAG_WIFI
) {
5141 wifi_action
= rule_type
;
5143 if (rule_flags
& NECP_ROUTE_RULE_FLAG_WIRED
) {
5144 wired_action
= rule_type
;
5146 if (rule_flags
& NECP_ROUTE_RULE_FLAG_EXPENSIVE
) {
5147 expensive_action
= rule_type
;
5149 if (rule_flags
== 0) {
5150 default_action
= rule_type
;
5152 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
5156 if (num_valid_indices
>= MAX_ROUTE_RULE_INTERFACES
) {
5157 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
5161 if (rule_length
<= IFXNAMSIZ
) {
5162 memcpy(interface_name
, rule_value
, rule_length
);
5163 interface_name
[rule_length
- 1] = 0; // Make sure the string is NULL terminated
5164 if (ifnet_find_by_name(interface_name
, &rule_interface
) == 0) {
5165 if_actions
[num_valid_indices
] = rule_type
;
5166 if_indices
[num_valid_indices
++] = rule_interface
->if_index
;
5167 ifnet_release(rule_interface
);
5170 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
5173 existing_rule
= necp_lookup_route_rule_by_contents_locked(list
, default_action
, cellular_action
, wifi_action
, wired_action
, expensive_action
, if_indices
, if_actions
);
5174 if (existing_rule
!= NULL
) {
5175 route_rule_id
= existing_rule
->id
;
5176 existing_rule
->refcount
++;
5178 struct necp_route_rule
*new_rule
= NULL
;
5179 MALLOC(new_rule
, struct necp_route_rule
*, sizeof(struct necp_route_rule
), M_NECP
, M_WAITOK
);
5180 if (new_rule
!= NULL
) {
5181 memset(new_rule
, 0, sizeof(struct necp_route_rule
));
5182 route_rule_id
= new_rule
->id
= necp_get_new_route_rule_id();
5183 new_rule
->default_action
= default_action
;
5184 new_rule
->cellular_action
= cellular_action
;
5185 new_rule
->wifi_action
= wifi_action
;
5186 new_rule
->wired_action
= wired_action
;
5187 new_rule
->expensive_action
= expensive_action
;
5188 memcpy(&new_rule
->exception_if_indices
, &if_indices
, sizeof(if_indices
));
5189 memcpy(&new_rule
->exception_if_actions
, &if_actions
, sizeof(if_actions
));
5190 new_rule
->refcount
= 1;
5191 LIST_INSERT_HEAD(list
, new_rule
, chain
);
5194 return (route_rule_id
);
5198 necp_remove_aggregate_route_rule_for_id(u_int32_t rule_id
)
5201 lck_rw_lock_exclusive(&necp_route_rule_lock
);
5203 struct necp_aggregate_route_rule
*existing_rule
= NULL
;
5204 struct necp_aggregate_route_rule
*tmp_rule
= NULL
;
5206 LIST_FOREACH_SAFE(existing_rule
, &necp_aggregate_route_rules
, chain
, tmp_rule
) {
5208 for (index
= 0; index
< MAX_AGGREGATE_ROUTE_RULES
; index
++) {
5209 u_int32_t route_rule_id
= existing_rule
->rule_ids
[index
];
5210 if (route_rule_id
== rule_id
) {
5211 LIST_REMOVE(existing_rule
, chain
);
5212 FREE(existing_rule
, M_NECP
);
5218 lck_rw_done(&necp_route_rule_lock
);
5223 necp_remove_route_rule(struct necp_route_rule_list
*list
, u_int32_t route_rule_id
)
5225 struct necp_route_rule
*existing_rule
= NULL
;
5227 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5229 existing_rule
= necp_lookup_route_rule_locked(list
, route_rule_id
);
5230 if (existing_rule
!= NULL
) {
5231 if (--existing_rule
->refcount
== 0) {
5232 necp_remove_aggregate_route_rule_for_id(existing_rule
->id
);
5233 LIST_REMOVE(existing_rule
, chain
);
5234 FREE(existing_rule
, M_NECP
);
5242 static struct necp_aggregate_route_rule
*
5243 necp_lookup_aggregate_route_rule_locked(u_int32_t route_rule_id
)
5245 struct necp_aggregate_route_rule
*searchentry
= NULL
;
5246 struct necp_aggregate_route_rule
*foundentry
= NULL
;
5248 lck_rw_lock_shared(&necp_route_rule_lock
);
5250 LIST_FOREACH(searchentry
, &necp_aggregate_route_rules
, chain
) {
5251 if (searchentry
->id
== route_rule_id
) {
5252 foundentry
= searchentry
;
5257 lck_rw_done(&necp_route_rule_lock
);
5259 return (foundentry
);
5263 necp_create_aggregate_route_rule(u_int32_t
*rule_ids
)
5265 u_int32_t aggregate_route_rule_id
= 0;
5266 struct necp_aggregate_route_rule
*new_rule
= NULL
;
5267 struct necp_aggregate_route_rule
*existing_rule
= NULL
;
5269 LIST_FOREACH(existing_rule
, &necp_aggregate_route_rules
, chain
) {
5270 if (memcmp(existing_rule
->rule_ids
, rule_ids
, (sizeof(u_int32_t
) * MAX_AGGREGATE_ROUTE_RULES
)) == 0) {
5271 return (existing_rule
->id
);
5275 lck_rw_lock_exclusive(&necp_route_rule_lock
);
5277 LIST_FOREACH(existing_rule
, &necp_aggregate_route_rules
, chain
) {
5278 // Re-check, in case something else created the rule while we are waiting to lock
5279 if (memcmp(existing_rule
->rule_ids
, rule_ids
, (sizeof(u_int32_t
) * MAX_AGGREGATE_ROUTE_RULES
)) == 0) {
5280 lck_rw_done(&necp_route_rule_lock
);
5281 return (existing_rule
->id
);
5285 MALLOC(new_rule
, struct necp_aggregate_route_rule
*, sizeof(struct necp_aggregate_route_rule
), M_NECP
, M_WAITOK
);
5286 if (new_rule
!= NULL
) {
5287 memset(new_rule
, 0, sizeof(struct necp_aggregate_route_rule
));
5288 aggregate_route_rule_id
= new_rule
->id
= necp_get_new_aggregate_route_rule_id();
5289 new_rule
->id
= aggregate_route_rule_id
;
5290 memcpy(new_rule
->rule_ids
, rule_ids
, (sizeof(u_int32_t
) * MAX_AGGREGATE_ROUTE_RULES
));
5291 LIST_INSERT_HEAD(&necp_aggregate_route_rules
, new_rule
, chain
);
5293 lck_rw_done(&necp_route_rule_lock
);
5295 return (aggregate_route_rule_id
);
5298 #define NECP_NULL_SERVICE_ID 1
5300 necp_get_new_uuid_id(void)
5302 u_int32_t newid
= 0;
5304 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5306 necp_last_uuid_id
++;
5307 if (necp_last_uuid_id
< (NECP_NULL_SERVICE_ID
+ 1)) {
5308 necp_last_uuid_id
= (NECP_NULL_SERVICE_ID
+ 1);
5311 newid
= necp_last_uuid_id
;
5313 NECPLOG0(LOG_DEBUG
, "Allocate uuid id failed.\n");
5320 static struct necp_uuid_id_mapping
*
5321 necp_uuid_lookup_app_id_locked(uuid_t uuid
)
5323 struct necp_uuid_id_mapping
*searchentry
= NULL
;
5324 struct necp_uuid_id_mapping
*foundentry
= NULL
;
5326 LIST_FOREACH(searchentry
, APPUUIDHASH(uuid
), chain
) {
5327 if (uuid_compare(searchentry
->uuid
, uuid
) == 0) {
5328 foundentry
= searchentry
;
5333 return (foundentry
);
5336 static struct necp_uuid_id_mapping
*
5337 necp_uuid_lookup_uuid_with_app_id_locked(u_int32_t local_id
)
5339 struct necp_uuid_id_mapping
*searchentry
= NULL
;
5340 struct necp_uuid_id_mapping
*foundentry
= NULL
;
5342 struct necp_uuid_id_mapping_head
*uuid_list_head
= NULL
;
5343 for (uuid_list_head
= &necp_uuid_app_id_hashtbl
[necp_uuid_app_id_hash_num_buckets
- 1]; uuid_list_head
>= necp_uuid_app_id_hashtbl
; uuid_list_head
--) {
5344 LIST_FOREACH(searchentry
, uuid_list_head
, chain
) {
5345 if (searchentry
->id
== local_id
) {
5346 foundentry
= searchentry
;
5352 return (foundentry
);
5356 necp_create_uuid_app_id_mapping(uuid_t uuid
, bool *allocated_mapping
, bool uuid_policy_table
)
5358 u_int32_t local_id
= 0;
5359 struct necp_uuid_id_mapping
*existing_mapping
= NULL
;
5361 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5363 if (allocated_mapping
) {
5364 *allocated_mapping
= FALSE
;
5367 existing_mapping
= necp_uuid_lookup_app_id_locked(uuid
);
5368 if (existing_mapping
!= NULL
) {
5369 local_id
= existing_mapping
->id
;
5370 existing_mapping
->refcount
++;
5371 if (uuid_policy_table
) {
5372 existing_mapping
->table_refcount
++;
5375 struct necp_uuid_id_mapping
*new_mapping
= NULL
;
5376 MALLOC(new_mapping
, struct necp_uuid_id_mapping
*, sizeof(*new_mapping
), M_NECP
, M_WAITOK
);
5377 if (new_mapping
!= NULL
) {
5378 uuid_copy(new_mapping
->uuid
, uuid
);
5379 new_mapping
->id
= necp_get_new_uuid_id();
5380 new_mapping
->refcount
= 1;
5381 if (uuid_policy_table
) {
5382 new_mapping
->table_refcount
= 1;
5384 new_mapping
->table_refcount
= 0;
5387 LIST_INSERT_HEAD(APPUUIDHASH(uuid
), new_mapping
, chain
);
5389 if (allocated_mapping
) {
5390 *allocated_mapping
= TRUE
;
5393 local_id
= new_mapping
->id
;
5401 necp_remove_uuid_app_id_mapping(uuid_t uuid
, bool *removed_mapping
, bool uuid_policy_table
)
5403 struct necp_uuid_id_mapping
*existing_mapping
= NULL
;
5405 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5407 if (removed_mapping
) {
5408 *removed_mapping
= FALSE
;
5411 existing_mapping
= necp_uuid_lookup_app_id_locked(uuid
);
5412 if (existing_mapping
!= NULL
) {
5413 if (uuid_policy_table
) {
5414 existing_mapping
->table_refcount
--;
5416 if (--existing_mapping
->refcount
== 0) {
5417 LIST_REMOVE(existing_mapping
, chain
);
5418 FREE(existing_mapping
, M_NECP
);
5419 if (removed_mapping
) {
5420 *removed_mapping
= TRUE
;
5429 static struct necp_uuid_id_mapping
*
5430 necp_uuid_get_null_service_id_mapping(void)
5432 static struct necp_uuid_id_mapping null_mapping
;
5433 uuid_clear(null_mapping
.uuid
);
5434 null_mapping
.id
= NECP_NULL_SERVICE_ID
;
5436 return (&null_mapping
);
5439 static struct necp_uuid_id_mapping
*
5440 necp_uuid_lookup_service_id_locked(uuid_t uuid
)
5442 struct necp_uuid_id_mapping
*searchentry
= NULL
;
5443 struct necp_uuid_id_mapping
*foundentry
= NULL
;
5445 if (uuid_is_null(uuid
)) {
5446 return necp_uuid_get_null_service_id_mapping();
5449 LIST_FOREACH(searchentry
, &necp_uuid_service_id_list
, chain
) {
5450 if (uuid_compare(searchentry
->uuid
, uuid
) == 0) {
5451 foundentry
= searchentry
;
5456 return (foundentry
);
5459 static struct necp_uuid_id_mapping
*
5460 necp_uuid_lookup_uuid_with_service_id_locked(u_int32_t local_id
)
5462 struct necp_uuid_id_mapping
*searchentry
= NULL
;
5463 struct necp_uuid_id_mapping
*foundentry
= NULL
;
5465 if (local_id
== NECP_NULL_SERVICE_ID
) {
5466 return necp_uuid_get_null_service_id_mapping();
5469 LIST_FOREACH(searchentry
, &necp_uuid_service_id_list
, chain
) {
5470 if (searchentry
->id
== local_id
) {
5471 foundentry
= searchentry
;
5476 return (foundentry
);
5480 necp_create_uuid_service_id_mapping(uuid_t uuid
)
5482 u_int32_t local_id
= 0;
5483 struct necp_uuid_id_mapping
*existing_mapping
= NULL
;
5485 if (uuid_is_null(uuid
)) {
5486 return (NECP_NULL_SERVICE_ID
);
5489 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5491 existing_mapping
= necp_uuid_lookup_service_id_locked(uuid
);
5492 if (existing_mapping
!= NULL
) {
5493 local_id
= existing_mapping
->id
;
5494 existing_mapping
->refcount
++;
5496 struct necp_uuid_id_mapping
*new_mapping
= NULL
;
5497 MALLOC(new_mapping
, struct necp_uuid_id_mapping
*, sizeof(*new_mapping
), M_NECP
, M_WAITOK
);
5498 if (new_mapping
!= NULL
) {
5499 uuid_copy(new_mapping
->uuid
, uuid
);
5500 new_mapping
->id
= necp_get_new_uuid_id();
5501 new_mapping
->refcount
= 1;
5503 LIST_INSERT_HEAD(&necp_uuid_service_id_list
, new_mapping
, chain
);
5505 local_id
= new_mapping
->id
;
5513 necp_remove_uuid_service_id_mapping(uuid_t uuid
)
5515 struct necp_uuid_id_mapping
*existing_mapping
= NULL
;
5517 if (uuid_is_null(uuid
)) {
5521 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5523 existing_mapping
= necp_uuid_lookup_app_id_locked(uuid
);
5524 if (existing_mapping
!= NULL
) {
5525 if (--existing_mapping
->refcount
== 0) {
5526 LIST_REMOVE(existing_mapping
, chain
);
5527 FREE(existing_mapping
, M_NECP
);
5537 necp_kernel_socket_policies_update_uuid_table(void)
5539 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5541 if (necp_uuid_app_id_mappings_dirty
) {
5542 if (proc_uuid_policy_kernel(PROC_UUID_POLICY_OPERATION_CLEAR
, NULL
, PROC_UUID_NECP_APP_POLICY
) < 0) {
5543 NECPLOG0(LOG_DEBUG
, "Error clearing uuids from policy table\n");
5547 if (necp_num_uuid_app_id_mappings
> 0) {
5548 struct necp_uuid_id_mapping_head
*uuid_list_head
= NULL
;
5549 for (uuid_list_head
= &necp_uuid_app_id_hashtbl
[necp_uuid_app_id_hash_num_buckets
- 1]; uuid_list_head
>= necp_uuid_app_id_hashtbl
; uuid_list_head
--) {
5550 struct necp_uuid_id_mapping
*mapping
= NULL
;
5551 LIST_FOREACH(mapping
, uuid_list_head
, chain
) {
5552 if (mapping
->table_refcount
> 0 &&
5553 proc_uuid_policy_kernel(PROC_UUID_POLICY_OPERATION_ADD
, mapping
->uuid
, PROC_UUID_NECP_APP_POLICY
) < 0) {
5554 NECPLOG0(LOG_DEBUG
, "Error adding uuid to policy table\n");
5560 necp_uuid_app_id_mappings_dirty
= FALSE
;
5566 #define NECP_KERNEL_VALID_IP_OUTPUT_CONDITIONS (NECP_KERNEL_CONDITION_ALL_INTERFACES | NECP_KERNEL_CONDITION_BOUND_INTERFACE | NECP_KERNEL_CONDITION_PROTOCOL | NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_POLICY_ID | NECP_KERNEL_CONDITION_LAST_INTERFACE)
5567 static necp_kernel_policy_id
5568 necp_kernel_ip_output_policy_add(necp_policy_id parent_policy_id
, necp_policy_order order
, necp_policy_order suborder
, u_int32_t session_order
, int session_pid
, u_int32_t condition_mask
, u_int32_t condition_negated_mask
, necp_kernel_policy_id cond_policy_id
, ifnet_t cond_bound_interface
, u_int32_t cond_last_interface_index
, u_int16_t cond_protocol
, union necp_sockaddr_union
*cond_local_start
, union necp_sockaddr_union
*cond_local_end
, u_int8_t cond_local_prefix
, union necp_sockaddr_union
*cond_remote_start
, union necp_sockaddr_union
*cond_remote_end
, u_int8_t cond_remote_prefix
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
)
5570 struct necp_kernel_ip_output_policy
*new_kernel_policy
= NULL
;
5571 struct necp_kernel_ip_output_policy
*tmp_kernel_policy
= NULL
;
5573 MALLOC_ZONE(new_kernel_policy
, struct necp_kernel_ip_output_policy
*, sizeof(*new_kernel_policy
), M_NECP_IP_POLICY
, M_WAITOK
);
5574 if (new_kernel_policy
== NULL
) {
5578 memset(new_kernel_policy
, 0, sizeof(*new_kernel_policy
)); // M_ZERO is not supported for MALLOC_ZONE
5579 new_kernel_policy
->parent_policy_id
= parent_policy_id
;
5580 new_kernel_policy
->id
= necp_kernel_policy_get_new_id(false);
5581 new_kernel_policy
->suborder
= suborder
;
5582 new_kernel_policy
->order
= order
;
5583 new_kernel_policy
->session_order
= session_order
;
5584 new_kernel_policy
->session_pid
= session_pid
;
5586 // Sanitize condition mask
5587 new_kernel_policy
->condition_mask
= (condition_mask
& NECP_KERNEL_VALID_IP_OUTPUT_CONDITIONS
);
5588 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
)) {
5589 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_BOUND_INTERFACE
;
5591 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
)) {
5592 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_LOCAL_PREFIX
;
5594 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
)) {
5595 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_REMOTE_PREFIX
;
5597 new_kernel_policy
->condition_negated_mask
= condition_negated_mask
& new_kernel_policy
->condition_mask
;
5599 // Set condition values
5600 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
) {
5601 new_kernel_policy
->cond_policy_id
= cond_policy_id
;
5603 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
5604 if (cond_bound_interface
) {
5605 ifnet_reference(cond_bound_interface
);
5607 new_kernel_policy
->cond_bound_interface
= cond_bound_interface
;
5609 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LAST_INTERFACE
) {
5610 new_kernel_policy
->cond_last_interface_index
= cond_last_interface_index
;
5612 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
5613 new_kernel_policy
->cond_protocol
= cond_protocol
;
5615 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
5616 memcpy(&new_kernel_policy
->cond_local_start
, cond_local_start
, cond_local_start
->sa
.sa_len
);
5618 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
5619 memcpy(&new_kernel_policy
->cond_local_end
, cond_local_end
, cond_local_end
->sa
.sa_len
);
5621 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
5622 new_kernel_policy
->cond_local_prefix
= cond_local_prefix
;
5624 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
5625 memcpy(&new_kernel_policy
->cond_remote_start
, cond_remote_start
, cond_remote_start
->sa
.sa_len
);
5627 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
5628 memcpy(&new_kernel_policy
->cond_remote_end
, cond_remote_end
, cond_remote_end
->sa
.sa_len
);
5630 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
5631 new_kernel_policy
->cond_remote_prefix
= cond_remote_prefix
;
5634 new_kernel_policy
->result
= result
;
5635 memcpy(&new_kernel_policy
->result_parameter
, &result_parameter
, sizeof(result_parameter
));
5638 NECPLOG(LOG_DEBUG
, "Added kernel policy: ip output, id=%d, mask=%x\n", new_kernel_policy
->id
, new_kernel_policy
->condition_mask
);
5640 LIST_INSERT_SORTED_THRICE_ASCENDING(&necp_kernel_ip_output_policies
, new_kernel_policy
, chain
, session_order
, order
, suborder
, tmp_kernel_policy
);
5642 return (new_kernel_policy
? new_kernel_policy
->id
: 0);
5645 static struct necp_kernel_ip_output_policy
*
5646 necp_kernel_ip_output_policy_find(necp_kernel_policy_id policy_id
)
5648 struct necp_kernel_ip_output_policy
*kernel_policy
= NULL
;
5649 struct necp_kernel_ip_output_policy
*tmp_kernel_policy
= NULL
;
5651 if (policy_id
== 0) {
5655 LIST_FOREACH_SAFE(kernel_policy
, &necp_kernel_ip_output_policies
, chain
, tmp_kernel_policy
) {
5656 if (kernel_policy
->id
== policy_id
) {
5657 return (kernel_policy
);
5665 necp_kernel_ip_output_policy_delete(necp_kernel_policy_id policy_id
)
5667 struct necp_kernel_ip_output_policy
*policy
= NULL
;
5669 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5671 policy
= necp_kernel_ip_output_policy_find(policy_id
);
5673 LIST_REMOVE(policy
, chain
);
5675 if (policy
->cond_bound_interface
) {
5676 ifnet_release(policy
->cond_bound_interface
);
5677 policy
->cond_bound_interface
= NULL
;
5680 FREE_ZONE(policy
, sizeof(*policy
), M_NECP_IP_POLICY
);
5688 necp_kernel_ip_output_policies_dump_all(void)
5691 struct necp_kernel_ip_output_policy
*policy
= NULL
;
5694 char result_string
[MAX_RESULT_STRING_LEN
];
5695 char proc_name_string
[MAXCOMLEN
+ 1];
5696 memset(result_string
, 0, MAX_RESULT_STRING_LEN
);
5697 memset(proc_name_string
, 0, MAXCOMLEN
+ 1);
5699 NECPLOG0(LOG_DEBUG
, "NECP IP Output Policies:\n");
5700 NECPLOG0(LOG_DEBUG
, "-----------\n");
5701 for (id_i
= 0; id_i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; id_i
++) {
5702 NECPLOG(LOG_DEBUG
, " ID Bucket: %d\n", id_i
);
5703 for (policy_i
= 0; necp_kernel_ip_output_policies_map
[id_i
] != NULL
&& (necp_kernel_ip_output_policies_map
[id_i
])[policy_i
] != NULL
; policy_i
++) {
5704 policy
= (necp_kernel_ip_output_policies_map
[id_i
])[policy_i
];
5705 proc_name(policy
->session_pid
, proc_name_string
, MAXCOMLEN
);
5706 NECPLOG(LOG_DEBUG
, "\t%3d. Policy ID: %5d\tProcess: %10.10s\tOrder: %04d.%04d.%d\tMask: %5x\tResult: %s\n", policy_i
, policy
->id
, proc_name_string
, policy
->session_order
, policy
->order
, policy
->suborder
, policy
->condition_mask
, necp_get_result_description(result_string
, policy
->result
, policy
->result_parameter
));
5708 NECPLOG0(LOG_DEBUG
, "-----------\n");
5714 necp_kernel_ip_output_policy_results_overlap(struct necp_kernel_ip_output_policy
*upper_policy
, struct necp_kernel_ip_output_policy
*lower_policy
)
5716 if (upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
5717 if (upper_policy
->session_order
!= lower_policy
->session_order
) {
5718 // A skip cannot override a policy of a different session
5721 if (upper_policy
->result_parameter
.skip_policy_order
== 0 ||
5722 lower_policy
->order
>= upper_policy
->result_parameter
.skip_policy_order
) {
5723 // This policy is beyond the skip
5726 // This policy is inside the skip
5732 // All other IP Output policy results (drop, tunnel, hard pass) currently overlap
5737 necp_kernel_ip_output_policy_is_unnecessary(struct necp_kernel_ip_output_policy
*policy
, struct necp_kernel_ip_output_policy
**policy_array
, int valid_indices
)
5739 bool can_skip
= FALSE
;
5740 u_int32_t highest_skip_session_order
= 0;
5741 u_int32_t highest_skip_order
= 0;
5743 for (i
= 0; i
< valid_indices
; i
++) {
5744 struct necp_kernel_ip_output_policy
*compared_policy
= policy_array
[i
];
5746 // For policies in a skip window, we can't mark conflicting policies as unnecessary
5748 if (highest_skip_session_order
!= compared_policy
->session_order
||
5749 (highest_skip_order
!= 0 && compared_policy
->order
>= highest_skip_order
)) {
5750 // If we've moved on to the next session, or passed the skip window
5751 highest_skip_session_order
= 0;
5752 highest_skip_order
= 0;
5755 // If this policy is also a skip, in can increase the skip window
5756 if (compared_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
5757 if (compared_policy
->result_parameter
.skip_policy_order
> highest_skip_order
) {
5758 highest_skip_order
= compared_policy
->result_parameter
.skip_policy_order
;
5765 if (compared_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
5766 // This policy is a skip. Set the skip window accordingly
5768 highest_skip_session_order
= compared_policy
->session_order
;
5769 highest_skip_order
= compared_policy
->result_parameter
.skip_policy_order
;
5772 // The result of the compared policy must be able to block out this policy result
5773 if (!necp_kernel_ip_output_policy_results_overlap(compared_policy
, policy
)) {
5777 // If new policy matches All Interfaces, compared policy must also
5778 if ((policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) && !(compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
)) {
5782 // Default makes lower policies unecessary always
5783 if (compared_policy
->condition_mask
== 0) {
5787 // Compared must be more general than policy, and include only conditions within policy
5788 if ((policy
->condition_mask
& compared_policy
->condition_mask
) != compared_policy
->condition_mask
) {
5792 // Negative conditions must match for the overlapping conditions
5793 if ((policy
->condition_negated_mask
& compared_policy
->condition_mask
) != (compared_policy
->condition_negated_mask
& compared_policy
->condition_mask
)) {
5797 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
&&
5798 compared_policy
->cond_policy_id
!= policy
->cond_policy_id
) {
5802 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
&&
5803 compared_policy
->cond_bound_interface
!= policy
->cond_bound_interface
) {
5807 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
&&
5808 compared_policy
->cond_protocol
!= policy
->cond_protocol
) {
5812 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
5813 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
5814 if (!necp_is_range_in_range((struct sockaddr
*)&policy
->cond_local_start
, (struct sockaddr
*)&policy
->cond_local_end
, (struct sockaddr
*)&compared_policy
->cond_local_start
, (struct sockaddr
*)&compared_policy
->cond_local_end
)) {
5817 } else if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
5818 if (compared_policy
->cond_local_prefix
> policy
->cond_local_prefix
||
5819 !necp_is_addr_in_subnet((struct sockaddr
*)&policy
->cond_local_start
, (struct sockaddr
*)&compared_policy
->cond_local_start
, compared_policy
->cond_local_prefix
)) {
5825 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
5826 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
5827 if (!necp_is_range_in_range((struct sockaddr
*)&policy
->cond_remote_start
, (struct sockaddr
*)&policy
->cond_remote_end
, (struct sockaddr
*)&compared_policy
->cond_remote_start
, (struct sockaddr
*)&compared_policy
->cond_remote_end
)) {
5830 } else if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
5831 if (compared_policy
->cond_remote_prefix
> policy
->cond_remote_prefix
||
5832 !necp_is_addr_in_subnet((struct sockaddr
*)&policy
->cond_remote_start
, (struct sockaddr
*)&compared_policy
->cond_remote_start
, compared_policy
->cond_remote_prefix
)) {
5845 necp_kernel_ip_output_policies_reprocess(void)
5848 int bucket_allocation_counts
[NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
];
5849 int bucket_current_free_index
[NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
];
5850 struct necp_kernel_ip_output_policy
*kernel_policy
= NULL
;
5852 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5855 necp_kernel_ip_output_policies_condition_mask
= 0;
5856 necp_kernel_ip_output_policies_count
= 0;
5857 necp_kernel_ip_output_policies_non_id_count
= 0;
5859 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
5860 if (necp_kernel_ip_output_policies_map
[i
] != NULL
) {
5861 FREE(necp_kernel_ip_output_policies_map
[i
], M_NECP
);
5862 necp_kernel_ip_output_policies_map
[i
] = NULL
;
5866 bucket_allocation_counts
[i
] = 0;
5869 LIST_FOREACH(kernel_policy
, &necp_kernel_ip_output_policies
, chain
) {
5871 necp_kernel_ip_output_policies_condition_mask
|= kernel_policy
->condition_mask
;
5872 necp_kernel_ip_output_policies_count
++;
5874 // Update bucket counts
5875 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
)) {
5876 necp_kernel_ip_output_policies_non_id_count
++;
5877 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
5878 bucket_allocation_counts
[i
]++;
5881 bucket_allocation_counts
[NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(kernel_policy
->cond_policy_id
)]++;
5885 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
5886 if (bucket_allocation_counts
[i
] > 0) {
5887 // Allocate a NULL-terminated array of policy pointers for each bucket
5888 MALLOC(necp_kernel_ip_output_policies_map
[i
], struct necp_kernel_ip_output_policy
**, sizeof(struct necp_kernel_ip_output_policy
*) * (bucket_allocation_counts
[i
] + 1), M_NECP
, M_WAITOK
);
5889 if (necp_kernel_ip_output_policies_map
[i
] == NULL
) {
5893 // Initialize the first entry to NULL
5894 (necp_kernel_ip_output_policies_map
[i
])[0] = NULL
;
5896 bucket_current_free_index
[i
] = 0;
5899 LIST_FOREACH(kernel_policy
, &necp_kernel_ip_output_policies
, chain
) {
5900 // Insert pointers into map
5901 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
)) {
5902 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
5903 if (!necp_kernel_ip_output_policy_is_unnecessary(kernel_policy
, necp_kernel_ip_output_policies_map
[i
], bucket_current_free_index
[i
])) {
5904 (necp_kernel_ip_output_policies_map
[i
])[(bucket_current_free_index
[i
])] = kernel_policy
;
5905 bucket_current_free_index
[i
]++;
5906 (necp_kernel_ip_output_policies_map
[i
])[(bucket_current_free_index
[i
])] = NULL
;
5910 i
= NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(kernel_policy
->cond_policy_id
);
5911 if (!necp_kernel_ip_output_policy_is_unnecessary(kernel_policy
, necp_kernel_ip_output_policies_map
[i
], bucket_current_free_index
[i
])) {
5912 (necp_kernel_ip_output_policies_map
[i
])[(bucket_current_free_index
[i
])] = kernel_policy
;
5913 bucket_current_free_index
[i
]++;
5914 (necp_kernel_ip_output_policies_map
[i
])[(bucket_current_free_index
[i
])] = NULL
;
5918 necp_kernel_ip_output_policies_dump_all();
5922 // Free memory, reset mask to 0
5923 necp_kernel_ip_output_policies_condition_mask
= 0;
5924 necp_kernel_ip_output_policies_count
= 0;
5925 necp_kernel_ip_output_policies_non_id_count
= 0;
5926 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
5927 if (necp_kernel_ip_output_policies_map
[i
] != NULL
) {
5928 FREE(necp_kernel_ip_output_policies_map
[i
], M_NECP
);
5929 necp_kernel_ip_output_policies_map
[i
] = NULL
;
5935 // Outbound Policy Matching
5936 // ---------------------
5942 static struct substring
5943 necp_trim_dots_and_stars(char *string
, size_t length
)
5945 struct substring sub
;
5946 sub
.string
= string
;
5947 sub
.length
= string
? length
: 0;
5949 while (sub
.length
&& (sub
.string
[0] == '.' || sub
.string
[0] == '*')) {
5954 while (sub
.length
&& (sub
.string
[sub
.length
- 1] == '.' || sub
.string
[sub
.length
- 1] == '*')) {
5962 necp_create_trimmed_domain(char *string
, size_t length
)
5964 char *trimmed_domain
= NULL
;
5965 struct substring sub
= necp_trim_dots_and_stars(string
, length
);
5967 MALLOC(trimmed_domain
, char *, sub
.length
+ 1, M_NECP
, M_WAITOK
);
5968 if (trimmed_domain
== NULL
) {
5972 memcpy(trimmed_domain
, sub
.string
, sub
.length
);
5973 trimmed_domain
[sub
.length
] = 0;
5975 return (trimmed_domain
);
5979 necp_count_dots(char *string
, size_t length
)
5984 for (i
= 0; i
< length
; i
++) {
5985 if (string
[i
] == '.') {
5994 necp_check_suffix(struct substring parent
, struct substring suffix
, bool require_dot_before_suffix
)
5996 if (parent
.length
<= suffix
.length
) {
6000 size_t length_difference
= (parent
.length
- suffix
.length
);
6002 if (require_dot_before_suffix
) {
6003 if (((char *)(parent
.string
+ length_difference
- 1))[0] != '.') {
6008 // strncasecmp does case-insensitive check for all UTF-8 strings (ignores non-ASCII characters)
6009 return (strncasecmp(parent
.string
+ length_difference
, suffix
.string
, suffix
.length
) == 0);
6013 necp_hostname_matches_domain(struct substring hostname_substring
, u_int8_t hostname_dot_count
, char *domain
, u_int8_t domain_dot_count
)
6015 if (hostname_substring
.string
== NULL
|| domain
== NULL
) {
6016 return (hostname_substring
.string
== domain
);
6019 struct substring domain_substring
;
6020 domain_substring
.string
= domain
;
6021 domain_substring
.length
= strlen(domain
);
6023 if (hostname_dot_count
== domain_dot_count
) {
6024 // strncasecmp does case-insensitive check for all UTF-8 strings (ignores non-ASCII characters)
6025 if (hostname_substring
.length
== domain_substring
.length
&&
6026 strncasecmp(hostname_substring
.string
, domain_substring
.string
, hostname_substring
.length
) == 0) {
6029 } else if (domain_dot_count
< hostname_dot_count
) {
6030 if (necp_check_suffix(hostname_substring
, domain_substring
, TRUE
)) {
6039 necp_copy_string(char *string
, size_t length
)
6041 char *copied_string
= NULL
;
6043 MALLOC(copied_string
, char *, length
+ 1, M_NECP
, M_WAITOK
);
6044 if (copied_string
== NULL
) {
6048 memcpy(copied_string
, string
, length
);
6049 copied_string
[length
] = 0;
6051 return (copied_string
);
6054 #define NECP_KERNEL_ADDRESS_TYPE_CONDITIONS (NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX)
6056 necp_application_fillout_info_locked(uuid_t application_uuid
, uuid_t real_application_uuid
, char *account
, char *domain
, pid_t pid
, uid_t uid
, u_int16_t protocol
, u_int32_t bound_interface_index
, u_int32_t traffic_class
, union necp_sockaddr_union
*local_addr
, union necp_sockaddr_union
*remote_addr
, proc_t proc
, struct necp_socket_info
*info
)
6058 memset(info
, 0, sizeof(struct necp_socket_info
));
6062 info
->protocol
= protocol
;
6063 info
->bound_interface_index
= bound_interface_index
;
6064 info
->traffic_class
= traffic_class
;
6066 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
&& proc
!= NULL
) {
6067 info
->cred_result
= priv_check_cred(proc_ucred(proc
), PRIV_NET_PRIVILEGED_NECP_MATCH
, 0);
6070 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_APP_ID
&& !uuid_is_null(application_uuid
)) {
6071 struct necp_uuid_id_mapping
*existing_mapping
= necp_uuid_lookup_app_id_locked(application_uuid
);
6072 if (existing_mapping
) {
6073 info
->application_id
= existing_mapping
->id
;
6077 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
&& !uuid_is_null(real_application_uuid
)) {
6078 if (uuid_compare(application_uuid
, real_application_uuid
) == 0) {
6079 info
->real_application_id
= info
->application_id
;
6081 struct necp_uuid_id_mapping
*existing_mapping
= necp_uuid_lookup_app_id_locked(real_application_uuid
);
6082 if (existing_mapping
) {
6083 info
->real_application_id
= existing_mapping
->id
;
6088 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
&& account
!= NULL
) {
6089 struct necp_string_id_mapping
*existing_mapping
= necp_lookup_string_to_id_locked(&necp_account_id_list
, account
);
6090 if (existing_mapping
) {
6091 info
->account_id
= existing_mapping
->id
;
6095 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
6096 info
->domain
= domain
;
6099 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_ADDRESS_TYPE_CONDITIONS
) {
6100 if (local_addr
&& local_addr
->sa
.sa_len
> 0) {
6101 memcpy(&info
->local_addr
, local_addr
, local_addr
->sa
.sa_len
);
6103 if (remote_addr
&& remote_addr
->sa
.sa_len
> 0) {
6104 memcpy(&info
->remote_addr
, remote_addr
, remote_addr
->sa
.sa_len
);
6110 necp_send_application_interface_denied_event(pid_t pid
, uuid_t proc_uuid
, u_int32_t if_functional_type
)
6112 struct kev_netpolicy_ifdenied ev_ifdenied
;
6114 bzero(&ev_ifdenied
, sizeof(ev_ifdenied
));
6116 ev_ifdenied
.ev_data
.epid
= pid
;
6117 uuid_copy(ev_ifdenied
.ev_data
.euuid
, proc_uuid
);
6118 ev_ifdenied
.ev_if_functional_type
= if_functional_type
;
6120 netpolicy_post_msg(KEV_NETPOLICY_IFDENIED
, &ev_ifdenied
.ev_data
, sizeof(ev_ifdenied
));
6123 extern char *proc_name_address(void *p
);
6125 #define NECP_VERIFY_DELEGATION_ENTITLEMENT(_p, _d) \
6126 if (!has_checked_delegation_entitlement) { \
6127 has_delegation_entitlement = (priv_check_cred(proc_ucred(_p), PRIV_NET_PRIVILEGED_SOCKET_DELEGATE, 0) == 0); \
6128 has_checked_delegation_entitlement = TRUE; \
6130 if (!has_delegation_entitlement) { \
6131 NECPLOG(LOG_ERR, "%s(%d) does not hold the necessary entitlement to delegate network traffic for other processes by %s", \
6132 proc_name_address(_p), proc_pid(_p), _d); \
6137 necp_application_find_policy_match_internal(proc_t proc
,
6138 u_int8_t
*parameters
,
6139 u_int32_t parameters_size
,
6140 struct necp_aggregate_result
*returned_result
,
6142 u_int required_interface_index
,
6143 const union necp_sockaddr_union
*override_local_addr
,
6144 const union necp_sockaddr_union
*override_remote_addr
,
6145 struct rtentry
**returned_route
, bool ignore_address
)
6150 struct necp_kernel_socket_policy
*matched_policy
= NULL
;
6151 struct necp_socket_info info
;
6152 necp_kernel_policy_filter filter_control_unit
= 0;
6153 u_int32_t route_rule_id
= 0;
6154 necp_kernel_policy_result service_action
= 0;
6155 necp_kernel_policy_service service
= { 0, 0 };
6157 u_int16_t protocol
= 0;
6158 u_int32_t bound_interface_index
= required_interface_index
;
6159 u_int32_t traffic_class
= 0;
6160 u_int32_t client_flags
= 0;
6161 union necp_sockaddr_union local_addr
;
6162 union necp_sockaddr_union remote_addr
;
6163 bool no_remote_addr
= FALSE
;
6164 u_int8_t remote_family
= 0;
6165 bool no_local_addr
= FALSE
;
6167 if (override_local_addr
) {
6168 memcpy(&local_addr
, override_local_addr
, sizeof(local_addr
));
6170 memset(&local_addr
, 0, sizeof(local_addr
));
6172 if (override_remote_addr
) {
6173 memcpy(&remote_addr
, override_remote_addr
, sizeof(remote_addr
));
6175 memset(&remote_addr
, 0, sizeof(remote_addr
));
6178 // Initialize UID, PID, and UUIDs to the current process
6179 uid_t uid
= kauth_cred_getuid(proc_ucred(proc
));
6180 pid_t pid
= proc_pid(proc
);
6181 uuid_t application_uuid
;
6182 uuid_clear(application_uuid
);
6183 uuid_t real_application_uuid
;
6184 uuid_clear(real_application_uuid
);
6185 proc_getexecutableuuid(proc
, real_application_uuid
, sizeof(real_application_uuid
));
6186 uuid_copy(application_uuid
, real_application_uuid
);
6188 char *domain
= NULL
;
6189 char *account
= NULL
;
6191 u_int32_t netagent_ids
[NECP_MAX_NETAGENTS
];
6192 memset(&netagent_ids
, 0, sizeof(netagent_ids
));
6193 int netagent_cursor
;
6195 bool has_checked_delegation_entitlement
= FALSE
;
6196 bool has_delegation_entitlement
= FALSE
;
6198 if (returned_result
== NULL
) {
6202 memset(returned_result
, 0, sizeof(struct necp_aggregate_result
));
6204 lck_rw_lock_shared(&necp_kernel_policy_lock
);
6205 if (necp_kernel_application_policies_count
== 0) {
6206 if (necp_drop_all_order
> 0) {
6207 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
6208 lck_rw_done(&necp_kernel_policy_lock
);
6212 lck_rw_done(&necp_kernel_policy_lock
);
6214 while ((offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
)) <= parameters_size
) {
6215 u_int8_t type
= necp_buffer_get_tlv_type(parameters
, offset
);
6216 u_int32_t length
= necp_buffer_get_tlv_length(parameters
, offset
);
6218 if (length
> (parameters_size
- (offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
)))) {
6219 // If the length is larger than what can fit in the remaining parameters size, bail
6220 NECPLOG(LOG_ERR
, "Invalid TLV length (%u)", length
);
6225 u_int8_t
*value
= necp_buffer_get_tlv_value(parameters
, offset
, NULL
);
6226 if (value
!= NULL
) {
6228 case NECP_CLIENT_PARAMETER_APPLICATION
: {
6229 if (length
>= sizeof(uuid_t
)) {
6230 if (uuid_compare(application_uuid
, value
) == 0) {
6235 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc
, "euuid");
6237 uuid_copy(application_uuid
, value
);
6241 case NECP_CLIENT_PARAMETER_REAL_APPLICATION
: {
6242 if (length
>= sizeof(uuid_t
)) {
6243 if (uuid_compare(real_application_uuid
, value
) == 0) {
6248 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc
, "uuid");
6250 uuid_copy(real_application_uuid
, value
);
6254 case NECP_CLIENT_PARAMETER_PID
: {
6255 if (length
>= sizeof(pid_t
)) {
6256 if (memcmp(&pid
, value
, sizeof(pid_t
)) == 0) {
6261 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc
, "pid");
6263 memcpy(&pid
, value
, sizeof(pid_t
));
6267 case NECP_CLIENT_PARAMETER_UID
: {
6268 if (length
>= sizeof(uid_t
)) {
6269 if (memcmp(&uid
, value
, sizeof(uid_t
)) == 0) {
6274 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc
, "uid");
6276 memcpy(&uid
, value
, sizeof(uid_t
));
6280 case NECP_CLIENT_PARAMETER_DOMAIN
: {
6281 domain
= (char *)value
;
6282 domain
[length
- 1] = 0;
6285 case NECP_CLIENT_PARAMETER_ACCOUNT
: {
6286 account
= (char *)value
;
6287 account
[length
- 1] = 0;
6290 case NECP_CLIENT_PARAMETER_TRAFFIC_CLASS
: {
6291 if (length
>= sizeof(u_int32_t
)) {
6292 memcpy(&traffic_class
, value
, sizeof(u_int32_t
));
6296 case NECP_CLIENT_PARAMETER_IP_PROTOCOL
: {
6297 if (length
>= sizeof(u_int16_t
)) {
6298 memcpy(&protocol
, value
, sizeof(u_int16_t
));
6302 case NECP_CLIENT_PARAMETER_BOUND_INTERFACE
: {
6303 if (length
<= IFXNAMSIZ
&& length
> 0) {
6304 ifnet_t bound_interface
= NULL
;
6305 char interface_name
[IFXNAMSIZ
];
6306 memcpy(interface_name
, value
, length
);
6307 interface_name
[length
- 1] = 0; // Make sure the string is NULL terminated
6308 if (ifnet_find_by_name(interface_name
, &bound_interface
) == 0) {
6309 bound_interface_index
= bound_interface
->if_index
;
6310 ifnet_release(bound_interface
);
6315 case NECP_CLIENT_PARAMETER_LOCAL_ADDRESS
: {
6316 if (ignore_address
) {
6320 if (length
>= sizeof(struct necp_policy_condition_addr
)) {
6321 struct necp_policy_condition_addr
*address_struct
= (struct necp_policy_condition_addr
*)(void *)value
;
6322 if (necp_address_is_valid(&address_struct
->address
.sa
)) {
6323 memcpy(&local_addr
, &address_struct
->address
, sizeof(address_struct
->address
));
6328 case NECP_CLIENT_PARAMETER_REMOTE_ADDRESS
: {
6329 if (ignore_address
) {
6333 if (length
>= sizeof(struct necp_policy_condition_addr
)) {
6334 struct necp_policy_condition_addr
*address_struct
= (struct necp_policy_condition_addr
*)(void *)value
;
6335 if (necp_address_is_valid(&address_struct
->address
.sa
)) {
6336 memcpy(&remote_addr
, &address_struct
->address
, sizeof(address_struct
->address
));
6341 case NECP_CLIENT_PARAMETER_FLAGS
: {
6342 if (length
>= sizeof(client_flags
)) {
6343 memcpy(&client_flags
, value
, sizeof(client_flags
));
6353 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
6357 lck_rw_lock_shared(&necp_kernel_policy_lock
);
6359 necp_application_fillout_info_locked(application_uuid
, real_application_uuid
, account
, domain
, pid
, uid
, protocol
, bound_interface_index
, traffic_class
, &local_addr
, &remote_addr
, proc
, &info
);
6360 matched_policy
= necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_app_layer_map
, &info
, &filter_control_unit
, &route_rule_id
, &service_action
, &service
, netagent_ids
, NECP_MAX_NETAGENTS
, proc
);
6361 if (matched_policy
) {
6362 returned_result
->policy_id
= matched_policy
->id
;
6363 returned_result
->routing_result
= matched_policy
->result
;
6364 memcpy(&returned_result
->routing_result_parameter
, &matched_policy
->result_parameter
, sizeof(returned_result
->routing_result_parameter
));
6365 } else if (necp_drop_all_order
> 0) {
6366 // Mark socket as a drop if drop_all is set
6367 returned_result
->policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
6368 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
6370 returned_result
->policy_id
= 0;
6371 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_NONE
;
6373 returned_result
->filter_control_unit
= filter_control_unit
;
6374 returned_result
->service_action
= service_action
;
6376 // Handle trigger service
6377 if (service
.identifier
!= 0) {
6378 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(service
.identifier
);
6379 if (mapping
!= NULL
) {
6380 struct necp_service_registration
*service_registration
= NULL
;
6381 uuid_copy(returned_result
->service_uuid
, mapping
->uuid
);
6382 returned_result
->service_data
= service
.data
;
6383 if (service
.identifier
== NECP_NULL_SERVICE_ID
) {
6384 // NULL service is always 'registered'
6385 returned_result
->service_flags
|= NECP_SERVICE_FLAGS_REGISTERED
;
6387 LIST_FOREACH(service_registration
, &necp_registered_service_list
, kernel_chain
) {
6388 if (service
.identifier
== service_registration
->service_id
) {
6389 returned_result
->service_flags
|= NECP_SERVICE_FLAGS_REGISTERED
;
6398 for (netagent_cursor
= 0; netagent_cursor
< NECP_MAX_NETAGENTS
; netagent_cursor
++) {
6399 struct necp_uuid_id_mapping
*mapping
= NULL
;
6400 u_int32_t netagent_id
= netagent_ids
[netagent_cursor
];
6401 if (netagent_id
== 0) {
6404 mapping
= necp_uuid_lookup_uuid_with_service_id_locked(netagent_id
);
6405 if (mapping
!= NULL
) {
6406 uuid_copy(returned_result
->netagents
[netagent_cursor
], mapping
->uuid
);
6407 returned_result
->netagent_flags
[netagent_cursor
] = netagent_get_flags(mapping
->uuid
);
6411 // Do routing evaluation
6412 u_int output_bound_interface
= bound_interface_index
;
6413 if (returned_result
->routing_result
== NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED
) {
6414 output_bound_interface
= returned_result
->routing_result_parameter
.scoped_interface_index
;
6415 } else if (returned_result
->routing_result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
) {
6416 output_bound_interface
= returned_result
->routing_result_parameter
.tunnel_interface_index
;
6419 if (local_addr
.sa
.sa_len
== 0 ||
6420 (local_addr
.sa
.sa_family
== AF_INET
&& local_addr
.sin
.sin_addr
.s_addr
== 0) ||
6421 (local_addr
.sa
.sa_family
== AF_INET6
&& IN6_IS_ADDR_UNSPECIFIED(&local_addr
.sin6
.sin6_addr
))) {
6422 no_local_addr
= TRUE
;
6425 if (remote_addr
.sa
.sa_len
== 0 ||
6426 (remote_addr
.sa
.sa_family
== AF_INET
&& remote_addr
.sin
.sin_addr
.s_addr
== 0) ||
6427 (remote_addr
.sa
.sa_family
== AF_INET6
&& IN6_IS_ADDR_UNSPECIFIED(&remote_addr
.sin6
.sin6_addr
))) {
6428 no_remote_addr
= TRUE
;
6429 remote_family
= remote_addr
.sa
.sa_family
;
6432 returned_result
->routed_interface_index
= 0;
6433 struct rtentry
*rt
= NULL
;
6434 if (!no_local_addr
&& (client_flags
& NECP_CLIENT_PARAMETER_FLAG_LISTENER
) != 0) {
6435 // Treat the output bound interface as the routed interface for local address
6436 // validation later.
6437 returned_result
->routed_interface_index
= output_bound_interface
;
6439 if (no_remote_addr
) {
6440 memset(&remote_addr
, 0, sizeof(remote_addr
));
6441 if (remote_family
== AF_INET6
) {
6442 // Reset address to ::
6443 remote_addr
.sa
.sa_family
= AF_INET6
;
6444 remote_addr
.sa
.sa_len
= sizeof(struct sockaddr_in6
);
6446 // Reset address to 0.0.0.0
6447 remote_addr
.sa
.sa_family
= AF_INET
;
6448 remote_addr
.sa
.sa_len
= sizeof(struct sockaddr_in
);
6452 rt
= rtalloc1_scoped((struct sockaddr
*)&remote_addr
, 0, 0,
6453 output_bound_interface
);
6455 if (no_remote_addr
&& remote_family
== 0 &&
6456 (rt
== NULL
|| rt
->rt_ifp
== NULL
)) {
6457 // Route lookup for default IPv4 failed, try IPv6
6459 // Cleanup old route if necessary
6465 // Reset address to ::
6466 memset(&remote_addr
, 0, sizeof(remote_addr
));
6467 remote_addr
.sa
.sa_family
= AF_INET6
;
6468 remote_addr
.sa
.sa_len
= sizeof(struct sockaddr_in6
);
6471 rt
= rtalloc1_scoped((struct sockaddr
*)&remote_addr
, 0, 0,
6472 output_bound_interface
);
6476 rt
->rt_ifp
!= NULL
) {
6477 returned_result
->routed_interface_index
= rt
->rt_ifp
->if_index
;
6479 * For local addresses, we allow the interface scope to be
6480 * either the loopback interface or the interface hosting the
6483 if (bound_interface_index
!= IFSCOPE_NONE
&&
6484 rt
->rt_ifa
!= NULL
&& rt
->rt_ifa
->ifa_ifp
&&
6485 (output_bound_interface
== lo_ifp
->if_index
||
6486 rt
->rt_ifp
->if_index
== lo_ifp
->if_index
||
6487 rt
->rt_ifa
->ifa_ifp
->if_index
== bound_interface_index
)) {
6488 struct sockaddr_storage dst
;
6489 unsigned int ifscope
= bound_interface_index
;
6492 * Transform dst into the internal routing table form
6494 (void) sa_copy((struct sockaddr
*)&remote_addr
,
6497 if ((rt
->rt_ifp
->if_index
== lo_ifp
->if_index
) ||
6498 rt_ifa_is_dst((struct sockaddr
*)&dst
, rt
->rt_ifa
))
6499 returned_result
->routed_interface_index
=
6500 bound_interface_index
;
6505 if (returned_result
->routed_interface_index
!= 0 &&
6506 returned_result
->routed_interface_index
!= lo_ifp
->if_index
&& // Loopback can accept any local address
6509 // Transform local_addr into the ifaddr form
6510 // IPv6 Scope IDs are always embedded in the ifaddr list
6511 struct sockaddr_storage local_address_sanitized
;
6512 u_int ifscope
= IFSCOPE_NONE
;
6513 (void)sa_copy(&local_addr
.sa
, &local_address_sanitized
, &ifscope
);
6514 SIN(&local_address_sanitized
)->sin_port
= 0;
6515 if (local_address_sanitized
.ss_family
== AF_INET6
) {
6516 SIN6(&local_address_sanitized
)->sin6_scope_id
= 0;
6519 // Validate local address on routed interface
6520 struct ifaddr
*ifa
= ifa_ifwithaddr_scoped((struct sockaddr
*)&local_address_sanitized
, returned_result
->routed_interface_index
);
6522 // Interface address not found, reject route
6523 returned_result
->routed_interface_index
= 0;
6529 ifaddr_release(ifa
);
6534 if (flags
!= NULL
) {
6535 if ((client_flags
& NECP_CLIENT_PARAMETER_FLAG_LISTENER
) == 0) {
6536 // Check for local/direct
6537 bool is_local
= FALSE
;
6538 if (rt
!= NULL
&& (rt
->rt_flags
& RTF_LOCAL
)) {
6540 } else if (returned_result
->routed_interface_index
!= 0 &&
6542 // Clean up the address before comparison with interface addresses
6544 // Transform remote_addr into the ifaddr form
6545 // IPv6 Scope IDs are always embedded in the ifaddr list
6546 struct sockaddr_storage remote_address_sanitized
;
6547 u_int ifscope
= IFSCOPE_NONE
;
6548 (void)sa_copy(&remote_addr
.sa
, &remote_address_sanitized
, &ifscope
);
6549 SIN(&remote_address_sanitized
)->sin_port
= 0;
6550 if (remote_address_sanitized
.ss_family
== AF_INET6
) {
6551 SIN6(&remote_address_sanitized
)->sin6_scope_id
= 0;
6554 // Check if remote address is an interface address
6555 struct ifaddr
*ifa
= ifa_ifwithaddr((struct sockaddr
*)&remote_address_sanitized
);
6556 if (ifa
!= NULL
&& ifa
->ifa_ifp
!= NULL
) {
6557 u_int if_index_for_remote_addr
= ifa
->ifa_ifp
->if_index
;
6558 if (if_index_for_remote_addr
== returned_result
->routed_interface_index
||
6559 if_index_for_remote_addr
== lo_ifp
->if_index
) {
6564 ifaddr_release(ifa
);
6570 *flags
|= (NECP_CLIENT_RESULT_FLAG_IS_LOCAL
| NECP_CLIENT_RESULT_FLAG_IS_DIRECT
);
6573 !(rt
->rt_flags
& RTF_GATEWAY
) &&
6574 (rt
->rt_ifa
&& rt
->rt_ifa
->ifa_ifp
&& !(rt
->rt_ifa
->ifa_ifp
->if_flags
& IFF_POINTOPOINT
))) {
6575 // Route is directly accessible
6576 *flags
|= NECP_CLIENT_RESULT_FLAG_IS_DIRECT
;
6581 rt
->rt_ifp
!= NULL
) {
6582 // Check probe status
6583 if (rt
->rt_ifp
->if_eflags
& IFEF_PROBE_CONNECTIVITY
) {
6584 *flags
|= NECP_CLIENT_RESULT_FLAG_PROBE_CONNECTIVITY
;
6587 if (rt
->rt_ifp
->if_type
== IFT_CELLULAR
) {
6588 struct if_cellular_status_v1
*ifsr
;
6590 ifnet_lock_shared(rt
->rt_ifp
);
6591 lck_rw_lock_exclusive(&rt
->rt_ifp
->if_link_status_lock
);
6593 if (rt
->rt_ifp
->if_link_status
!= NULL
) {
6594 ifsr
= &rt
->rt_ifp
->if_link_status
->ifsr_u
.ifsr_cell
.if_cell_u
.if_status_v1
;
6596 if (ifsr
->valid_bitmask
& IF_CELL_UL_MSS_RECOMMENDED_VALID
) {
6597 if (ifsr
->mss_recommended
== IF_CELL_UL_MSS_RECOMMENDED_NONE
) {
6598 returned_result
->mss_recommended
= NECP_CLIENT_RESULT_RECOMMENDED_MSS_NONE
;
6599 } else if (ifsr
->mss_recommended
== IF_CELL_UL_MSS_RECOMMENDED_MEDIUM
) {
6600 returned_result
->mss_recommended
= NECP_CLIENT_RESULT_RECOMMENDED_MSS_MEDIUM
;
6601 } else if (ifsr
->mss_recommended
== IF_CELL_UL_MSS_RECOMMENDED_LOW
) {
6602 returned_result
->mss_recommended
= NECP_CLIENT_RESULT_RECOMMENDED_MSS_LOW
;
6606 lck_rw_done(&rt
->rt_ifp
->if_link_status_lock
);
6607 ifnet_lock_done(rt
->rt_ifp
);
6610 // Check link quality
6611 if ((client_flags
& NECP_CLIENT_PARAMETER_FLAG_DISCRETIONARY
) &&
6612 (rt
->rt_ifp
->if_interface_state
.valid_bitmask
& IF_INTERFACE_STATE_LQM_STATE_VALID
) &&
6613 rt
->rt_ifp
->if_interface_state
.lqm_state
== IFNET_LQM_THRESH_ABORT
) {
6614 *flags
|= NECP_CLIENT_RESULT_FLAG_LINK_QUALITY_ABORT
;
6617 // Check QoS marking (fastlane)
6618 if (necp_update_qos_marking(rt
->rt_ifp
, route_rule_id
)) {
6619 *flags
|= NECP_CLIENT_RESULT_FLAG_ALLOW_QOS_MARKING
;
6624 if (returned_result
->routed_interface_index
!= 0) {
6625 union necp_sockaddr_union default_address
;
6626 struct rtentry
*v4Route
= NULL
;
6627 struct rtentry
*v6Route
= NULL
;
6629 memset(&default_address
, 0, sizeof(default_address
));
6631 // Reset address to 0.0.0.0
6632 default_address
.sa
.sa_family
= AF_INET
;
6633 default_address
.sa
.sa_len
= sizeof(struct sockaddr_in
);
6634 v4Route
= rtalloc1_scoped((struct sockaddr
*)&default_address
, 0, 0,
6635 returned_result
->routed_interface_index
);
6637 // Reset address to ::
6638 default_address
.sa
.sa_family
= AF_INET6
;
6639 default_address
.sa
.sa_len
= sizeof(struct sockaddr_in6
);
6640 v6Route
= rtalloc1_scoped((struct sockaddr
*)&default_address
, 0, 0,
6641 returned_result
->routed_interface_index
);
6643 if (v4Route
!= NULL
) {
6644 if (v4Route
->rt_ifp
!= NULL
) {
6645 *flags
|= NECP_CLIENT_RESULT_FLAG_HAS_IPV4
;
6651 if (v6Route
!= NULL
) {
6652 if (v6Route
->rt_ifp
!= NULL
) {
6653 *flags
|= NECP_CLIENT_RESULT_FLAG_HAS_IPV6
;
6661 u_int32_t interface_type_denied
= IFRTYPE_FUNCTIONAL_UNKNOWN
;
6662 bool route_is_allowed
= necp_route_is_allowed(rt
, NULL
, route_rule_id
, &interface_type_denied
);
6663 if (!route_is_allowed
) {
6664 // If the route is blocked, treat the lookup as a drop
6665 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
6666 memset(&returned_result
->routing_result_parameter
, 0, sizeof(returned_result
->routing_result_parameter
));
6668 if (interface_type_denied
!= IFRTYPE_FUNCTIONAL_UNKNOWN
) {
6669 necp_send_application_interface_denied_event(pid
, application_uuid
, interface_type_denied
);
6674 if (returned_route
!= NULL
) {
6675 *returned_route
= rt
;
6682 lck_rw_done(&necp_kernel_policy_lock
);
6688 necp_socket_check_policy(struct necp_kernel_socket_policy
*kernel_policy
, necp_app_id app_id
, necp_app_id real_app_id
, errno_t cred_result
, u_int32_t account_id
, struct substring domain
, u_int8_t domain_dot_count
, pid_t pid
, uid_t uid
, u_int32_t bound_interface_index
, u_int32_t traffic_class
, u_int16_t protocol
, union necp_sockaddr_union
*local
, union necp_sockaddr_union
*remote
, proc_t proc
)
6690 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
)) {
6691 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
6692 u_int32_t cond_bound_interface_index
= kernel_policy
->cond_bound_interface
? kernel_policy
->cond_bound_interface
->if_index
: 0;
6693 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
6694 if (bound_interface_index
== cond_bound_interface_index
) {
6695 // No match, matches forbidden interface
6699 if (bound_interface_index
!= cond_bound_interface_index
) {
6700 // No match, does not match required interface
6705 if (bound_interface_index
!= 0) {
6706 // No match, requires a non-bound packet
6712 if (kernel_policy
->condition_mask
== 0) {
6716 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
6717 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
6718 if (app_id
== kernel_policy
->cond_app_id
) {
6719 // No match, matches forbidden application
6723 if (app_id
!= kernel_policy
->cond_app_id
) {
6724 // No match, does not match required application
6730 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
6731 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
6732 if (real_app_id
== kernel_policy
->cond_real_app_id
) {
6733 // No match, matches forbidden application
6737 if (real_app_id
!= kernel_policy
->cond_real_app_id
) {
6738 // No match, does not match required application
6744 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
) {
6745 if (cred_result
!= 0) {
6746 // Process is missing entitlement
6751 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
) {
6752 if (kernel_policy
->cond_custom_entitlement_matched
== necp_boolean_state_false
) {
6753 // Process is missing entitlement based on previous check
6755 } else if (kernel_policy
->cond_custom_entitlement_matched
== necp_boolean_state_unknown
) {
6756 if (kernel_policy
->cond_custom_entitlement
!= NULL
) {
6758 // No process found, cannot check entitlement
6761 task_t task
= proc_task(proc
);
6763 !IOTaskHasEntitlement(task
, kernel_policy
->cond_custom_entitlement
)) {
6764 // Process is missing custom entitlement
6765 kernel_policy
->cond_custom_entitlement_matched
= necp_boolean_state_false
;
6768 kernel_policy
->cond_custom_entitlement_matched
= necp_boolean_state_true
;
6774 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
6775 bool domain_matches
= necp_hostname_matches_domain(domain
, domain_dot_count
, kernel_policy
->cond_domain
, kernel_policy
->cond_domain_dot_count
);
6776 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
6777 if (domain_matches
) {
6778 // No match, matches forbidden domain
6782 if (!domain_matches
) {
6783 // No match, does not match required domain
6789 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
) {
6790 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
) {
6791 if (account_id
== kernel_policy
->cond_account_id
) {
6792 // No match, matches forbidden account
6796 if (account_id
!= kernel_policy
->cond_account_id
) {
6797 // No match, does not match required account
6803 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PID
) {
6804 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_PID
) {
6805 if (pid
== kernel_policy
->cond_pid
) {
6806 // No match, matches forbidden pid
6810 if (pid
!= kernel_policy
->cond_pid
) {
6811 // No match, does not match required pid
6817 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_UID
) {
6818 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_UID
) {
6819 if (uid
== kernel_policy
->cond_uid
) {
6820 // No match, matches forbidden uid
6824 if (uid
!= kernel_policy
->cond_uid
) {
6825 // No match, does not match required uid
6831 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
6832 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
6833 if (traffic_class
>= kernel_policy
->cond_traffic_class
.start_tc
&&
6834 traffic_class
<= kernel_policy
->cond_traffic_class
.end_tc
) {
6835 // No match, matches forbidden traffic class
6839 if (traffic_class
< kernel_policy
->cond_traffic_class
.start_tc
||
6840 traffic_class
> kernel_policy
->cond_traffic_class
.end_tc
) {
6841 // No match, does not match required traffic class
6847 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
6848 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
6849 if (protocol
== kernel_policy
->cond_protocol
) {
6850 // No match, matches forbidden protocol
6854 if (protocol
!= kernel_policy
->cond_protocol
) {
6855 // No match, does not match required protocol
6861 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
6862 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
6863 bool inRange
= necp_is_addr_in_range((struct sockaddr
*)local
, (struct sockaddr
*)&kernel_policy
->cond_local_start
, (struct sockaddr
*)&kernel_policy
->cond_local_end
);
6864 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
6873 } else if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
6874 bool inSubnet
= necp_is_addr_in_subnet((struct sockaddr
*)local
, (struct sockaddr
*)&kernel_policy
->cond_local_start
, kernel_policy
->cond_local_prefix
);
6875 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
6887 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
6888 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
6889 bool inRange
= necp_is_addr_in_range((struct sockaddr
*)remote
, (struct sockaddr
*)&kernel_policy
->cond_remote_start
, (struct sockaddr
*)&kernel_policy
->cond_remote_end
);
6890 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
6899 } else if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
6900 bool inSubnet
= necp_is_addr_in_subnet((struct sockaddr
*)remote
, (struct sockaddr
*)&kernel_policy
->cond_remote_start
, kernel_policy
->cond_remote_prefix
);
6901 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
6916 static inline u_int32_t
6917 necp_socket_calc_flowhash_locked(struct necp_socket_info
*info
)
6919 return (net_flowhash(info
, sizeof(*info
), necp_kernel_socket_policies_gencount
));
6923 necp_socket_fillout_info_locked(struct inpcb
*inp
, struct sockaddr
*override_local_addr
, struct sockaddr
*override_remote_addr
, u_int32_t override_bound_interface
, struct necp_socket_info
*info
)
6925 struct socket
*so
= NULL
;
6927 memset(info
, 0, sizeof(struct necp_socket_info
));
6929 so
= inp
->inp_socket
;
6931 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_PID
) {
6932 info
->pid
= ((so
->so_flags
& SOF_DELEGATED
) ? so
->e_pid
: so
->last_pid
);
6935 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_UID
) {
6936 info
->uid
= kauth_cred_getuid(so
->so_cred
);
6939 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
6940 info
->traffic_class
= so
->so_traffic_class
;
6943 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
6944 if (inp
->inp_ip_p
) {
6945 info
->protocol
= inp
->inp_ip_p
;
6947 info
->protocol
= SOCK_PROTO(so
);
6951 if (inp
->inp_flags2
& INP2_WANT_APP_POLICY
&& necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
6952 struct necp_uuid_id_mapping
*existing_mapping
= necp_uuid_lookup_app_id_locked(((so
->so_flags
& SOF_DELEGATED
) ? so
->e_uuid
: so
->last_uuid
));
6953 if (existing_mapping
) {
6954 info
->application_id
= existing_mapping
->id
;
6957 if (!(so
->so_flags
& SOF_DELEGATED
)) {
6958 info
->real_application_id
= info
->application_id
;
6959 } else if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
6960 struct necp_uuid_id_mapping
*real_existing_mapping
= necp_uuid_lookup_app_id_locked(so
->last_uuid
);
6961 if (real_existing_mapping
) {
6962 info
->real_application_id
= real_existing_mapping
->id
;
6966 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
) {
6967 info
->cred_result
= priv_check_cred(so
->so_cred
, PRIV_NET_PRIVILEGED_NECP_MATCH
, 0);
6971 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
&& inp
->inp_necp_attributes
.inp_account
!= NULL
) {
6972 struct necp_string_id_mapping
*existing_mapping
= necp_lookup_string_to_id_locked(&necp_account_id_list
, inp
->inp_necp_attributes
.inp_account
);
6973 if (existing_mapping
) {
6974 info
->account_id
= existing_mapping
->id
;
6978 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
6979 info
->domain
= inp
->inp_necp_attributes
.inp_domain
;
6982 if (override_bound_interface
) {
6983 info
->bound_interface_index
= override_bound_interface
;
6985 if ((inp
->inp_flags
& INP_BOUND_IF
) && inp
->inp_boundifp
) {
6986 info
->bound_interface_index
= inp
->inp_boundifp
->if_index
;
6990 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_ADDRESS_TYPE_CONDITIONS
) {
6991 if (inp
->inp_vflag
& INP_IPV4
) {
6992 if (override_local_addr
) {
6993 if (override_local_addr
->sa_len
<= sizeof(struct sockaddr_in
)) {
6994 memcpy(&info
->local_addr
, override_local_addr
, override_local_addr
->sa_len
);
6997 ((struct sockaddr_in
*)&info
->local_addr
)->sin_family
= AF_INET
;
6998 ((struct sockaddr_in
*)&info
->local_addr
)->sin_len
= sizeof(struct sockaddr_in
);
6999 ((struct sockaddr_in
*)&info
->local_addr
)->sin_port
= inp
->inp_lport
;
7000 memcpy(&((struct sockaddr_in
*)&info
->local_addr
)->sin_addr
, &inp
->inp_laddr
, sizeof(struct in_addr
));
7003 if (override_remote_addr
) {
7004 if (override_remote_addr
->sa_len
<= sizeof(struct sockaddr_in
)) {
7005 memcpy(&info
->remote_addr
, override_remote_addr
, override_remote_addr
->sa_len
);
7008 ((struct sockaddr_in
*)&info
->remote_addr
)->sin_family
= AF_INET
;
7009 ((struct sockaddr_in
*)&info
->remote_addr
)->sin_len
= sizeof(struct sockaddr_in
);
7010 ((struct sockaddr_in
*)&info
->remote_addr
)->sin_port
= inp
->inp_fport
;
7011 memcpy(&((struct sockaddr_in
*)&info
->remote_addr
)->sin_addr
, &inp
->inp_faddr
, sizeof(struct in_addr
));
7013 } else if (inp
->inp_vflag
& INP_IPV6
) {
7014 if (override_local_addr
) {
7015 if (override_local_addr
->sa_len
<= sizeof(struct sockaddr_in6
)) {
7016 memcpy(&info
->local_addr
, override_local_addr
, override_local_addr
->sa_len
);
7019 ((struct sockaddr_in6
*)&info
->local_addr
)->sin6_family
= AF_INET6
;
7020 ((struct sockaddr_in6
*)&info
->local_addr
)->sin6_len
= sizeof(struct sockaddr_in6
);
7021 ((struct sockaddr_in6
*)&info
->local_addr
)->sin6_port
= inp
->inp_lport
;
7022 memcpy(&((struct sockaddr_in6
*)&info
->local_addr
)->sin6_addr
, &inp
->in6p_laddr
, sizeof(struct in6_addr
));
7025 if (override_remote_addr
) {
7026 if (override_remote_addr
->sa_len
<= sizeof(struct sockaddr_in6
)) {
7027 memcpy(&info
->remote_addr
, override_remote_addr
, override_remote_addr
->sa_len
);
7030 ((struct sockaddr_in6
*)&info
->remote_addr
)->sin6_family
= AF_INET6
;
7031 ((struct sockaddr_in6
*)&info
->remote_addr
)->sin6_len
= sizeof(struct sockaddr_in6
);
7032 ((struct sockaddr_in6
*)&info
->remote_addr
)->sin6_port
= inp
->inp_fport
;
7033 memcpy(&((struct sockaddr_in6
*)&info
->remote_addr
)->sin6_addr
, &inp
->in6p_faddr
, sizeof(struct in6_addr
));
7039 static inline struct necp_kernel_socket_policy
*
7040 necp_socket_find_policy_match_with_info_locked(struct necp_kernel_socket_policy
**policy_search_array
, struct necp_socket_info
*info
, necp_kernel_policy_filter
*return_filter
, u_int32_t
*return_route_rule_id
, necp_kernel_policy_result
*return_service_action
, necp_kernel_policy_service
*return_service
, u_int32_t
*return_netagent_array
, size_t netagent_array_count
, proc_t proc
)
7042 struct necp_kernel_socket_policy
*matched_policy
= NULL
;
7043 u_int32_t skip_order
= 0;
7044 u_int32_t skip_session_order
= 0;
7045 u_int32_t route_rule_id_array
[MAX_AGGREGATE_ROUTE_RULES
];
7046 size_t route_rule_id_count
= 0;
7048 size_t netagent_cursor
= 0;
7050 // Pre-process domain for quick matching
7051 struct substring domain_substring
= necp_trim_dots_and_stars(info
->domain
, info
->domain
? strlen(info
->domain
) : 0);
7052 u_int8_t domain_dot_count
= necp_count_dots(domain_substring
.string
, domain_substring
.length
);
7054 if (return_filter
) {
7058 if (return_route_rule_id
) {
7059 *return_route_rule_id
= 0;
7062 if (return_service_action
) {
7063 *return_service_action
= 0;
7066 if (return_service
) {
7067 return_service
->identifier
= 0;
7068 return_service
->data
= 0;
7071 if (policy_search_array
!= NULL
) {
7072 for (i
= 0; policy_search_array
[i
] != NULL
; i
++) {
7073 if (necp_drop_all_order
!= 0 && policy_search_array
[i
]->session_order
>= necp_drop_all_order
) {
7074 // We've hit a drop all rule
7077 if (skip_session_order
&& policy_search_array
[i
]->session_order
>= skip_session_order
) {
7080 skip_session_order
= 0;
7083 if (policy_search_array
[i
]->order
< skip_order
) {
7089 skip_session_order
= 0;
7091 } else if (skip_session_order
) {
7095 if (necp_socket_check_policy(policy_search_array
[i
], info
->application_id
, info
->real_application_id
, info
->cred_result
, info
->account_id
, domain_substring
, domain_dot_count
, info
->pid
, info
->uid
, info
->bound_interface_index
, info
->traffic_class
, info
->protocol
, &info
->local_addr
, &info
->remote_addr
, proc
)) {
7096 if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER
) {
7097 if (return_filter
&& *return_filter
== 0) {
7098 *return_filter
= policy_search_array
[i
]->result_parameter
.filter_control_unit
;
7099 if (necp_debug
> 1) {
7100 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Filter %d", info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
, policy_search_array
[i
]->result_parameter
.filter_control_unit
);
7104 } else if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_ROUTE_RULES
) {
7105 if (return_route_rule_id
&& route_rule_id_count
< MAX_AGGREGATE_ROUTE_RULES
) {
7106 route_rule_id_array
[route_rule_id_count
++] = policy_search_array
[i
]->result_parameter
.route_rule_id
;
7107 if (necp_debug
> 1) {
7108 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Route Rule %d", info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
, policy_search_array
[i
]->result_parameter
.route_rule_id
);
7112 } else if (necp_kernel_socket_result_is_trigger_service_type(policy_search_array
[i
])) {
7113 if (return_service_action
&& *return_service_action
== 0) {
7114 *return_service_action
= policy_search_array
[i
]->result
;
7115 if (necp_debug
> 1) {
7116 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Service Action %d", info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
, policy_search_array
[i
]->result
);
7119 if (return_service
&& return_service
->identifier
== 0) {
7120 return_service
->identifier
= policy_search_array
[i
]->result_parameter
.service
.identifier
;
7121 return_service
->data
= policy_search_array
[i
]->result_parameter
.service
.data
;
7122 if (necp_debug
> 1) {
7123 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Service ID %d Data %d", info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
, policy_search_array
[i
]->result_parameter
.service
.identifier
, policy_search_array
[i
]->result_parameter
.service
.data
);
7127 } else if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_USE_NETAGENT
) {
7128 if (return_netagent_array
!= NULL
&&
7129 netagent_cursor
< netagent_array_count
) {
7130 return_netagent_array
[netagent_cursor
] = policy_search_array
[i
]->result_parameter
.netagent_id
;
7132 if (necp_debug
> 1) {
7133 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Use Netagent %d", info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
, policy_search_array
[i
]->result_parameter
.netagent_id
);
7139 // Matched policy is a skip. Do skip and continue.
7140 if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
7141 skip_order
= policy_search_array
[i
]->result_parameter
.skip_policy_order
;
7142 skip_session_order
= policy_search_array
[i
]->session_order
+ 1;
7146 // Passed all tests, found a match
7147 matched_policy
= policy_search_array
[i
];
7153 if (route_rule_id_count
== 1) {
7154 *return_route_rule_id
= route_rule_id_array
[0];
7155 } else if (route_rule_id_count
> 1) {
7156 *return_route_rule_id
= necp_create_aggregate_route_rule(route_rule_id_array
);
7158 return (matched_policy
);
7162 necp_socket_uses_interface(struct inpcb
*inp
, u_int32_t interface_index
)
7164 bool found_match
= FALSE
;
7166 ifaddr_t
*addresses
= NULL
;
7167 union necp_sockaddr_union address_storage
;
7169 int family
= AF_INET
;
7170 ifnet_t interface
= ifindex2ifnet
[interface_index
];
7172 if (inp
== NULL
|| interface
== NULL
) {
7176 if (inp
->inp_vflag
& INP_IPV4
) {
7178 } else if (inp
->inp_vflag
& INP_IPV6
) {
7182 result
= ifnet_get_address_list_family(interface
, &addresses
, family
);
7184 NECPLOG(LOG_ERR
, "Failed to get address list for %s%d", ifnet_name(interface
), ifnet_unit(interface
));
7188 for (i
= 0; addresses
[i
] != NULL
; i
++) {
7189 if (ifaddr_address(addresses
[i
], &address_storage
.sa
, sizeof(address_storage
)) == 0) {
7190 if (family
== AF_INET
) {
7191 if (memcmp(&address_storage
.sin
.sin_addr
, &inp
->inp_laddr
, sizeof(inp
->inp_laddr
)) == 0) {
7195 } else if (family
== AF_INET6
) {
7196 if (memcmp(&address_storage
.sin6
.sin6_addr
, &inp
->in6p_laddr
, sizeof(inp
->in6p_laddr
)) == 0) {
7205 ifnet_free_address_list(addresses
);
7207 return (found_match
);
7211 necp_socket_is_connected(struct inpcb
*inp
)
7213 return (inp
->inp_socket
->so_state
& (SS_ISCONNECTING
| SS_ISCONNECTED
| SS_ISDISCONNECTING
));
7217 necp_socket_bypass(struct sockaddr
*override_local_addr
, struct sockaddr
*override_remote_addr
, struct inpcb
*inp
)
7220 if (necp_pass_loopback
> 0 && necp_is_loopback(override_local_addr
, override_remote_addr
, inp
, NULL
)) {
7222 } else if (necp_is_intcoproc(inp
, NULL
)) {
7229 necp_kernel_policy_id
7230 necp_socket_find_policy_match(struct inpcb
*inp
, struct sockaddr
*override_local_addr
, struct sockaddr
*override_remote_addr
, u_int32_t override_bound_interface
)
7232 struct socket
*so
= NULL
;
7233 necp_kernel_policy_filter filter_control_unit
= 0;
7234 u_int32_t route_rule_id
= 0;
7235 struct necp_kernel_socket_policy
*matched_policy
= NULL
;
7236 necp_kernel_policy_id matched_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
7237 necp_kernel_policy_result service_action
= 0;
7238 necp_kernel_policy_service service
= { 0, 0 };
7240 u_int32_t netagent_ids
[NECP_MAX_NETAGENTS
];
7241 memset(&netagent_ids
, 0, sizeof(netagent_ids
));
7242 int netagent_cursor
;
7244 struct necp_socket_info info
;
7247 return (NECP_KERNEL_POLICY_ID_NONE
);
7250 // Ignore invalid addresses
7251 if (override_local_addr
!= NULL
&&
7252 !necp_address_is_valid(override_local_addr
)) {
7253 override_local_addr
= NULL
;
7255 if (override_remote_addr
!= NULL
&&
7256 !necp_address_is_valid(override_remote_addr
)) {
7257 override_remote_addr
= NULL
;
7260 so
= inp
->inp_socket
;
7262 // Don't lock. Possible race condition, but we don't want the performance hit.
7263 if (necp_kernel_socket_policies_count
== 0 ||
7264 (!(inp
->inp_flags2
& INP2_WANT_APP_POLICY
) && necp_kernel_socket_policies_non_app_count
== 0)) {
7265 if (necp_drop_all_order
> 0) {
7266 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7267 inp
->inp_policyresult
.policy_gencount
= 0;
7268 inp
->inp_policyresult
.app_id
= 0;
7269 inp
->inp_policyresult
.flowhash
= 0;
7270 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
7271 inp
->inp_policyresult
.results
.route_rule_id
= 0;
7272 if (necp_socket_bypass(override_local_addr
, override_remote_addr
, inp
)) {
7273 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_PASS
;
7275 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7278 return (NECP_KERNEL_POLICY_ID_NONE
);
7281 // Check for loopback exception
7282 if (necp_socket_bypass(override_local_addr
, override_remote_addr
, inp
)) {
7283 // Mark socket as a pass
7284 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7285 inp
->inp_policyresult
.policy_gencount
= 0;
7286 inp
->inp_policyresult
.app_id
= 0;
7287 inp
->inp_policyresult
.flowhash
= 0;
7288 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
7289 inp
->inp_policyresult
.results
.route_rule_id
= 0;
7290 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_PASS
;
7291 return (NECP_KERNEL_POLICY_ID_NONE
);
7295 lck_rw_lock_shared(&necp_kernel_policy_lock
);
7297 necp_socket_fillout_info_locked(inp
, override_local_addr
, override_remote_addr
, override_bound_interface
, &info
);
7298 inp
->inp_policyresult
.app_id
= info
.application_id
;
7301 u_int32_t flowhash
= necp_socket_calc_flowhash_locked(&info
);
7302 if (inp
->inp_policyresult
.policy_id
!= NECP_KERNEL_POLICY_ID_NONE
&&
7303 inp
->inp_policyresult
.policy_gencount
== necp_kernel_socket_policies_gencount
&&
7304 inp
->inp_policyresult
.flowhash
== flowhash
) {
7305 // If already matched this socket on this generation of table, skip
7308 lck_rw_done(&necp_kernel_policy_lock
);
7310 return (inp
->inp_policyresult
.policy_id
);
7313 // Match socket to policy
7314 matched_policy
= necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_map
[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(info
.application_id
)], &info
, &filter_control_unit
, &route_rule_id
, &service_action
, &service
, netagent_ids
, NECP_MAX_NETAGENTS
, current_proc());
7315 // If the socket matched a scoped service policy, mark as Drop if not registered.
7316 // This covers the cases in which a service is required (on demand) but hasn't started yet.
7317 if ((service_action
== NECP_KERNEL_POLICY_RESULT_TRIGGER_SCOPED
||
7318 service_action
== NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED
) &&
7319 service
.identifier
!= 0 &&
7320 service
.identifier
!= NECP_NULL_SERVICE_ID
) {
7321 bool service_is_registered
= FALSE
;
7322 struct necp_service_registration
*service_registration
= NULL
;
7323 LIST_FOREACH(service_registration
, &necp_registered_service_list
, kernel_chain
) {
7324 if (service
.identifier
== service_registration
->service_id
) {
7325 service_is_registered
= TRUE
;
7329 if (!service_is_registered
) {
7330 // Mark socket as a drop if service is not registered
7331 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7332 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
7333 inp
->inp_policyresult
.flowhash
= flowhash
;
7334 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
7335 inp
->inp_policyresult
.results
.route_rule_id
= 0;
7336 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7338 if (necp_debug
> 1) {
7339 NECPLOG(LOG_DEBUG
, "Socket Policy: (BoundInterface %d Proto %d) Dropping packet because service is not registered", info
.bound_interface_index
, info
.protocol
);
7343 lck_rw_done(&necp_kernel_policy_lock
);
7344 return (NECP_KERNEL_POLICY_ID_NONE
);
7348 for (netagent_cursor
= 0; netagent_cursor
< NECP_MAX_NETAGENTS
; netagent_cursor
++) {
7349 struct necp_uuid_id_mapping
*mapping
= NULL
;
7350 u_int32_t netagent_id
= netagent_ids
[netagent_cursor
];
7351 if (netagent_id
== 0) {
7354 mapping
= necp_uuid_lookup_uuid_with_service_id_locked(netagent_id
);
7355 if (mapping
!= NULL
) {
7356 u_int32_t agent_flags
= 0;
7357 agent_flags
= netagent_get_flags(mapping
->uuid
);
7358 if (agent_flags
& NETAGENT_FLAG_REGISTERED
) {
7359 if (agent_flags
& NETAGENT_FLAG_ACTIVE
) {
7361 } else if ((agent_flags
& NETAGENT_FLAG_VOLUNTARY
) == 0) {
7362 if (agent_flags
& NETAGENT_FLAG_KERNEL_ACTIVATED
) {
7363 int trigger_error
= 0;
7364 trigger_error
= netagent_kernel_trigger(mapping
->uuid
);
7365 if (necp_debug
> 1) {
7366 NECPLOG(LOG_DEBUG
, "Socket Policy: Triggering inactive agent, error %d", trigger_error
);
7370 // Mark socket as a drop if required agent is not active
7371 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7372 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
7373 inp
->inp_policyresult
.flowhash
= flowhash
;
7374 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
7375 inp
->inp_policyresult
.results
.route_rule_id
= 0;
7376 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7378 if (necp_debug
> 1) {
7379 NECPLOG(LOG_DEBUG
, "Socket Policy: (BoundInterface %d Proto %d) Dropping packet because agent is not active", info
.bound_interface_index
, info
.protocol
);
7383 lck_rw_done(&necp_kernel_policy_lock
);
7384 return (NECP_KERNEL_POLICY_ID_NONE
);
7389 if (matched_policy
) {
7390 matched_policy_id
= matched_policy
->id
;
7391 inp
->inp_policyresult
.policy_id
= matched_policy
->id
;
7392 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
7393 inp
->inp_policyresult
.flowhash
= flowhash
;
7394 inp
->inp_policyresult
.results
.filter_control_unit
= filter_control_unit
;
7395 inp
->inp_policyresult
.results
.route_rule_id
= route_rule_id
;
7396 inp
->inp_policyresult
.results
.result
= matched_policy
->result
;
7397 memcpy(&inp
->inp_policyresult
.results
.result_parameter
, &matched_policy
->result_parameter
, sizeof(matched_policy
->result_parameter
));
7399 if (necp_socket_is_connected(inp
) &&
7400 (matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_DROP
||
7401 (matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&& !necp_socket_uses_interface(inp
, matched_policy
->result_parameter
.tunnel_interface_index
)))) {
7403 NECPLOG(LOG_DEBUG
, "Marking socket in state %d as defunct", so
->so_state
);
7405 sosetdefunct(current_proc(), so
, SHUTDOWN_SOCKET_LEVEL_NECP
| SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL
, TRUE
);
7406 } else if (necp_socket_is_connected(inp
) &&
7407 matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&&
7408 info
.protocol
== IPPROTO_TCP
) {
7409 // Reset MSS on TCP socket if tunnel policy changes
7410 tcp_mtudisc(inp
, 0);
7413 if (necp_debug
> 1) {
7414 NECPLOG(LOG_DEBUG
, "Socket Policy: %p (BoundInterface %d Proto %d) Policy %d Result %d Parameter %d", inp
->inp_socket
, info
.bound_interface_index
, info
.protocol
, matched_policy
->id
, matched_policy
->result
, matched_policy
->result_parameter
.tunnel_interface_index
);
7416 } else if (necp_drop_all_order
> 0) {
7417 // Mark socket as a drop if set
7418 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7419 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
7420 inp
->inp_policyresult
.flowhash
= flowhash
;
7421 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
7422 inp
->inp_policyresult
.results
.route_rule_id
= 0;
7423 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7425 // Mark non-matching socket so we don't re-check it
7426 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7427 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
7428 inp
->inp_policyresult
.flowhash
= flowhash
;
7429 inp
->inp_policyresult
.results
.filter_control_unit
= filter_control_unit
; // We may have matched a filter, so mark it!
7430 inp
->inp_policyresult
.results
.route_rule_id
= route_rule_id
; // We may have matched a route rule, so mark it!
7431 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_NONE
;
7435 lck_rw_done(&necp_kernel_policy_lock
);
7437 return (matched_policy_id
);
7441 necp_ip_output_check_policy(struct necp_kernel_ip_output_policy
*kernel_policy
, necp_kernel_policy_id socket_policy_id
, u_int32_t bound_interface_index
, u_int32_t last_interface_index
, u_int16_t protocol
, union necp_sockaddr_union
*local
, union necp_sockaddr_union
*remote
)
7443 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
)) {
7444 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
7445 u_int32_t cond_bound_interface_index
= kernel_policy
->cond_bound_interface
? kernel_policy
->cond_bound_interface
->if_index
: 0;
7446 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
7447 if (bound_interface_index
== cond_bound_interface_index
) {
7448 // No match, matches forbidden interface
7452 if (bound_interface_index
!= cond_bound_interface_index
) {
7453 // No match, does not match required interface
7458 if (bound_interface_index
!= 0) {
7459 // No match, requires a non-bound packet
7465 if (kernel_policy
->condition_mask
== 0) {
7469 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
) {
7470 if (socket_policy_id
!= kernel_policy
->cond_policy_id
) {
7471 // No match, does not match required id
7476 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LAST_INTERFACE
) {
7477 if (last_interface_index
!= kernel_policy
->cond_last_interface_index
) {
7482 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
7483 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
7484 if (protocol
== kernel_policy
->cond_protocol
) {
7485 // No match, matches forbidden protocol
7489 if (protocol
!= kernel_policy
->cond_protocol
) {
7490 // No match, does not match required protocol
7496 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
7497 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
7498 bool inRange
= necp_is_addr_in_range((struct sockaddr
*)local
, (struct sockaddr
*)&kernel_policy
->cond_local_start
, (struct sockaddr
*)&kernel_policy
->cond_local_end
);
7499 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
7508 } else if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
7509 bool inSubnet
= necp_is_addr_in_subnet((struct sockaddr
*)local
, (struct sockaddr
*)&kernel_policy
->cond_local_start
, kernel_policy
->cond_local_prefix
);
7510 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
7522 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
7523 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
7524 bool inRange
= necp_is_addr_in_range((struct sockaddr
*)remote
, (struct sockaddr
*)&kernel_policy
->cond_remote_start
, (struct sockaddr
*)&kernel_policy
->cond_remote_end
);
7525 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
7534 } else if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
7535 bool inSubnet
= necp_is_addr_in_subnet((struct sockaddr
*)remote
, (struct sockaddr
*)&kernel_policy
->cond_remote_start
, kernel_policy
->cond_remote_prefix
);
7536 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
7551 static inline struct necp_kernel_ip_output_policy
*
7552 necp_ip_output_find_policy_match_locked(necp_kernel_policy_id socket_policy_id
, u_int32_t bound_interface_index
, u_int32_t last_interface_index
, u_int16_t protocol
, union necp_sockaddr_union
*local_addr
, union necp_sockaddr_union
*remote_addr
)
7554 u_int32_t skip_order
= 0;
7555 u_int32_t skip_session_order
= 0;
7557 struct necp_kernel_ip_output_policy
*matched_policy
= NULL
;
7558 struct necp_kernel_ip_output_policy
**policy_search_array
= necp_kernel_ip_output_policies_map
[NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(socket_policy_id
)];
7559 if (policy_search_array
!= NULL
) {
7560 for (i
= 0; policy_search_array
[i
] != NULL
; i
++) {
7561 if (necp_drop_all_order
!= 0 && policy_search_array
[i
]->session_order
>= necp_drop_all_order
) {
7562 // We've hit a drop all rule
7565 if (skip_session_order
&& policy_search_array
[i
]->session_order
>= skip_session_order
) {
7568 skip_session_order
= 0;
7571 if (policy_search_array
[i
]->order
< skip_order
) {
7577 skip_session_order
= 0;
7579 } else if (skip_session_order
) {
7583 if (necp_ip_output_check_policy(policy_search_array
[i
], socket_policy_id
, bound_interface_index
, last_interface_index
, protocol
, local_addr
, remote_addr
)) {
7584 // Passed all tests, found a match
7585 matched_policy
= policy_search_array
[i
];
7587 if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
7588 skip_order
= policy_search_array
[i
]->result_parameter
.skip_policy_order
;
7589 skip_session_order
= policy_search_array
[i
]->session_order
+ 1;
7598 return (matched_policy
);
7602 necp_output_bypass(struct mbuf
*packet
)
7604 if (necp_pass_loopback
> 0 && necp_is_loopback(NULL
, NULL
, NULL
, packet
)) {
7607 if (necp_pass_keepalives
> 0 && necp_get_is_keepalive_from_packet(packet
)) {
7610 if (necp_is_intcoproc(NULL
, packet
)) {
7616 necp_kernel_policy_id
7617 necp_ip_output_find_policy_match(struct mbuf
*packet
, int flags
, struct ip_out_args
*ipoa
, necp_kernel_policy_result
*result
, necp_kernel_policy_result_parameter
*result_parameter
)
7619 struct ip
*ip
= NULL
;
7620 int hlen
= sizeof(struct ip
);
7621 necp_kernel_policy_id socket_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
7622 necp_kernel_policy_id matched_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
7623 struct necp_kernel_ip_output_policy
*matched_policy
= NULL
;
7624 u_int16_t protocol
= 0;
7625 u_int32_t bound_interface_index
= 0;
7626 u_int32_t last_interface_index
= 0;
7627 union necp_sockaddr_union local_addr
;
7628 union necp_sockaddr_union remote_addr
;
7634 if (result_parameter
) {
7635 memset(result_parameter
, 0, sizeof(*result_parameter
));
7638 if (packet
== NULL
) {
7639 return (NECP_KERNEL_POLICY_ID_NONE
);
7642 socket_policy_id
= necp_get_policy_id_from_packet(packet
);
7644 // Exit early for an empty list
7645 // Don't lock. Possible race condition, but we don't want the performance hit.
7646 if (necp_kernel_ip_output_policies_count
== 0 ||
7647 ((socket_policy_id
== NECP_KERNEL_POLICY_ID_NONE
) && necp_kernel_ip_output_policies_non_id_count
== 0)) {
7648 if (necp_drop_all_order
> 0) {
7649 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7651 if (necp_output_bypass(packet
)) {
7652 *result
= NECP_KERNEL_POLICY_RESULT_PASS
;
7654 *result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7659 return (matched_policy_id
);
7662 // Check for loopback exception
7663 if (necp_output_bypass(packet
)) {
7664 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7666 *result
= NECP_KERNEL_POLICY_RESULT_PASS
;
7668 return (matched_policy_id
);
7671 last_interface_index
= necp_get_last_interface_index_from_packet(packet
);
7673 // Process packet to get relevant fields
7674 ip
= mtod(packet
, struct ip
*);
7676 hlen
= _IP_VHL_HL(ip
->ip_vhl
) << 2;
7678 hlen
= ip
->ip_hl
<< 2;
7681 protocol
= ip
->ip_p
;
7683 if ((flags
& IP_OUTARGS
) && (ipoa
!= NULL
) &&
7684 (ipoa
->ipoa_flags
& IPOAF_BOUND_IF
) &&
7685 ipoa
->ipoa_boundif
!= IFSCOPE_NONE
) {
7686 bound_interface_index
= ipoa
->ipoa_boundif
;
7689 local_addr
.sin
.sin_family
= AF_INET
;
7690 local_addr
.sin
.sin_len
= sizeof(struct sockaddr_in
);
7691 memcpy(&local_addr
.sin
.sin_addr
, &ip
->ip_src
, sizeof(ip
->ip_src
));
7693 remote_addr
.sin
.sin_family
= AF_INET
;
7694 remote_addr
.sin
.sin_len
= sizeof(struct sockaddr_in
);
7695 memcpy(&((struct sockaddr_in
*)&remote_addr
)->sin_addr
, &ip
->ip_dst
, sizeof(ip
->ip_dst
));
7700 if ((int)(hlen
+ sizeof(th
)) <= packet
->m_pkthdr
.len
) {
7701 m_copydata(packet
, hlen
, sizeof(th
), (u_int8_t
*)&th
);
7702 ((struct sockaddr_in
*)&local_addr
)->sin_port
= th
.th_sport
;
7703 ((struct sockaddr_in
*)&remote_addr
)->sin_port
= th
.th_dport
;
7709 if ((int)(hlen
+ sizeof(uh
)) <= packet
->m_pkthdr
.len
) {
7710 m_copydata(packet
, hlen
, sizeof(uh
), (u_int8_t
*)&uh
);
7711 ((struct sockaddr_in
*)&local_addr
)->sin_port
= uh
.uh_sport
;
7712 ((struct sockaddr_in
*)&remote_addr
)->sin_port
= uh
.uh_dport
;
7717 ((struct sockaddr_in
*)&local_addr
)->sin_port
= 0;
7718 ((struct sockaddr_in
*)&remote_addr
)->sin_port
= 0;
7723 // Match packet to policy
7724 lck_rw_lock_shared(&necp_kernel_policy_lock
);
7725 matched_policy
= necp_ip_output_find_policy_match_locked(socket_policy_id
, bound_interface_index
, last_interface_index
, protocol
, &local_addr
, &remote_addr
);
7726 if (matched_policy
) {
7727 matched_policy_id
= matched_policy
->id
;
7729 *result
= matched_policy
->result
;
7732 if (result_parameter
) {
7733 memcpy(result_parameter
, &matched_policy
->result_parameter
, sizeof(matched_policy
->result_parameter
));
7736 if (necp_debug
> 1) {
7737 NECPLOG(LOG_DEBUG
, "IP Output: (ID %d BoundInterface %d LastInterface %d Proto %d) Policy %d Result %d Parameter %d", socket_policy_id
, bound_interface_index
, last_interface_index
, protocol
, matched_policy
->id
, matched_policy
->result
, matched_policy
->result_parameter
.tunnel_interface_index
);
7739 } else if (necp_drop_all_order
> 0) {
7740 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7742 *result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7746 lck_rw_done(&necp_kernel_policy_lock
);
7748 return (matched_policy_id
);
7751 necp_kernel_policy_id
7752 necp_ip6_output_find_policy_match(struct mbuf
*packet
, int flags
, struct ip6_out_args
*ip6oa
, necp_kernel_policy_result
*result
, necp_kernel_policy_result_parameter
*result_parameter
)
7754 struct ip6_hdr
*ip6
= NULL
;
7757 necp_kernel_policy_id socket_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
7758 necp_kernel_policy_id matched_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
7759 struct necp_kernel_ip_output_policy
*matched_policy
= NULL
;
7760 u_int16_t protocol
= 0;
7761 u_int32_t bound_interface_index
= 0;
7762 u_int32_t last_interface_index
= 0;
7763 union necp_sockaddr_union local_addr
;
7764 union necp_sockaddr_union remote_addr
;
7770 if (result_parameter
) {
7771 memset(result_parameter
, 0, sizeof(*result_parameter
));
7774 if (packet
== NULL
) {
7775 return (NECP_KERNEL_POLICY_ID_NONE
);
7778 socket_policy_id
= necp_get_policy_id_from_packet(packet
);
7780 // Exit early for an empty list
7781 // Don't lock. Possible race condition, but we don't want the performance hit.
7782 if (necp_kernel_ip_output_policies_count
== 0 ||
7783 ((socket_policy_id
== NECP_KERNEL_POLICY_ID_NONE
) && necp_kernel_ip_output_policies_non_id_count
== 0)) {
7784 if (necp_drop_all_order
> 0) {
7785 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7787 if (necp_output_bypass(packet
)) {
7788 *result
= NECP_KERNEL_POLICY_RESULT_PASS
;
7790 *result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7795 return (matched_policy_id
);
7798 // Check for loopback exception
7799 if (necp_output_bypass(packet
)) {
7800 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7802 *result
= NECP_KERNEL_POLICY_RESULT_PASS
;
7804 return (matched_policy_id
);
7807 last_interface_index
= necp_get_last_interface_index_from_packet(packet
);
7809 // Process packet to get relevant fields
7810 ip6
= mtod(packet
, struct ip6_hdr
*);
7812 if ((flags
& IPV6_OUTARGS
) && (ip6oa
!= NULL
) &&
7813 (ip6oa
->ip6oa_flags
& IP6OAF_BOUND_IF
) &&
7814 ip6oa
->ip6oa_boundif
!= IFSCOPE_NONE
) {
7815 bound_interface_index
= ip6oa
->ip6oa_boundif
;
7818 ((struct sockaddr_in6
*)&local_addr
)->sin6_family
= AF_INET6
;
7819 ((struct sockaddr_in6
*)&local_addr
)->sin6_len
= sizeof(struct sockaddr_in6
);
7820 memcpy(&((struct sockaddr_in6
*)&local_addr
)->sin6_addr
, &ip6
->ip6_src
, sizeof(ip6
->ip6_src
));
7822 ((struct sockaddr_in6
*)&remote_addr
)->sin6_family
= AF_INET6
;
7823 ((struct sockaddr_in6
*)&remote_addr
)->sin6_len
= sizeof(struct sockaddr_in6
);
7824 memcpy(&((struct sockaddr_in6
*)&remote_addr
)->sin6_addr
, &ip6
->ip6_dst
, sizeof(ip6
->ip6_dst
));
7826 offset
= ip6_lasthdr(packet
, 0, IPPROTO_IPV6
, &next
);
7827 if (offset
>= 0 && packet
->m_pkthdr
.len
>= offset
) {
7832 if ((int)(offset
+ sizeof(th
)) <= packet
->m_pkthdr
.len
) {
7833 m_copydata(packet
, offset
, sizeof(th
), (u_int8_t
*)&th
);
7834 ((struct sockaddr_in6
*)&local_addr
)->sin6_port
= th
.th_sport
;
7835 ((struct sockaddr_in6
*)&remote_addr
)->sin6_port
= th
.th_dport
;
7841 if ((int)(offset
+ sizeof(uh
)) <= packet
->m_pkthdr
.len
) {
7842 m_copydata(packet
, offset
, sizeof(uh
), (u_int8_t
*)&uh
);
7843 ((struct sockaddr_in6
*)&local_addr
)->sin6_port
= uh
.uh_sport
;
7844 ((struct sockaddr_in6
*)&remote_addr
)->sin6_port
= uh
.uh_dport
;
7849 ((struct sockaddr_in6
*)&local_addr
)->sin6_port
= 0;
7850 ((struct sockaddr_in6
*)&remote_addr
)->sin6_port
= 0;
7856 // Match packet to policy
7857 lck_rw_lock_shared(&necp_kernel_policy_lock
);
7858 matched_policy
= necp_ip_output_find_policy_match_locked(socket_policy_id
, bound_interface_index
, last_interface_index
, protocol
, &local_addr
, &remote_addr
);
7859 if (matched_policy
) {
7860 matched_policy_id
= matched_policy
->id
;
7862 *result
= matched_policy
->result
;
7865 if (result_parameter
) {
7866 memcpy(result_parameter
, &matched_policy
->result_parameter
, sizeof(matched_policy
->result_parameter
));
7869 if (necp_debug
> 1) {
7870 NECPLOG(LOG_DEBUG
, "IP6 Output: (ID %d BoundInterface %d LastInterface %d Proto %d) Policy %d Result %d Parameter %d", socket_policy_id
, bound_interface_index
, last_interface_index
, protocol
, matched_policy
->id
, matched_policy
->result
, matched_policy
->result_parameter
.tunnel_interface_index
);
7872 } else if (necp_drop_all_order
> 0) {
7873 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7875 *result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7879 lck_rw_done(&necp_kernel_policy_lock
);
7881 return (matched_policy_id
);
7886 necp_is_addr_in_range(struct sockaddr
*addr
, struct sockaddr
*range_start
, struct sockaddr
*range_end
)
7890 if (addr
== NULL
|| range_start
== NULL
|| range_end
== NULL
) {
7894 /* Must be greater than or equal to start */
7895 cmp
= necp_addr_compare(addr
, range_start
, 1);
7896 if (cmp
!= 0 && cmp
!= 1) {
7900 /* Must be less than or equal to end */
7901 cmp
= necp_addr_compare(addr
, range_end
, 1);
7902 if (cmp
!= 0 && cmp
!= -1) {
7910 necp_is_range_in_range(struct sockaddr
*inner_range_start
, struct sockaddr
*inner_range_end
, struct sockaddr
*range_start
, struct sockaddr
*range_end
)
7914 if (inner_range_start
== NULL
|| inner_range_end
== NULL
|| range_start
== NULL
|| range_end
== NULL
) {
7918 /* Must be greater than or equal to start */
7919 cmp
= necp_addr_compare(inner_range_start
, range_start
, 1);
7920 if (cmp
!= 0 && cmp
!= 1) {
7924 /* Must be less than or equal to end */
7925 cmp
= necp_addr_compare(inner_range_end
, range_end
, 1);
7926 if (cmp
!= 0 && cmp
!= -1) {
7934 necp_is_addr_in_subnet(struct sockaddr
*addr
, struct sockaddr
*subnet_addr
, u_int8_t subnet_prefix
)
7936 if (addr
== NULL
|| subnet_addr
== NULL
) {
7940 if (addr
->sa_family
!= subnet_addr
->sa_family
|| addr
->sa_len
!= subnet_addr
->sa_len
) {
7944 switch (addr
->sa_family
) {
7946 if (satosin(subnet_addr
)->sin_port
!= 0 &&
7947 satosin(addr
)->sin_port
!= satosin(subnet_addr
)->sin_port
) {
7950 return (necp_buffer_compare_with_bit_prefix((u_int8_t
*)&satosin(addr
)->sin_addr
, (u_int8_t
*)&satosin(subnet_addr
)->sin_addr
, subnet_prefix
));
7953 if (satosin6(subnet_addr
)->sin6_port
!= 0 &&
7954 satosin6(addr
)->sin6_port
!= satosin6(subnet_addr
)->sin6_port
) {
7957 if (satosin6(addr
)->sin6_scope_id
&&
7958 satosin6(subnet_addr
)->sin6_scope_id
&&
7959 satosin6(addr
)->sin6_scope_id
!= satosin6(subnet_addr
)->sin6_scope_id
) {
7962 return (necp_buffer_compare_with_bit_prefix((u_int8_t
*)&satosin6(addr
)->sin6_addr
, (u_int8_t
*)&satosin6(subnet_addr
)->sin6_addr
, subnet_prefix
));
7977 * 2: Not comparable or error
7980 necp_addr_compare(struct sockaddr
*sa1
, struct sockaddr
*sa2
, int check_port
)
7983 int port_result
= 0;
7985 if (sa1
->sa_family
!= sa2
->sa_family
|| sa1
->sa_len
!= sa2
->sa_len
) {
7989 if (sa1
->sa_len
== 0) {
7993 switch (sa1
->sa_family
) {
7995 if (sa1
->sa_len
!= sizeof(struct sockaddr_in
)) {
7999 result
= memcmp(&satosin(sa1
)->sin_addr
.s_addr
, &satosin(sa2
)->sin_addr
.s_addr
, sizeof(satosin(sa1
)->sin_addr
.s_addr
));
8002 if (satosin(sa1
)->sin_port
< satosin(sa2
)->sin_port
) {
8004 } else if (satosin(sa1
)->sin_port
> satosin(sa2
)->sin_port
) {
8009 result
= port_result
;
8010 } else if ((result
> 0 && port_result
< 0) || (result
< 0 && port_result
> 0)) {
8018 if (sa1
->sa_len
!= sizeof(struct sockaddr_in6
)) {
8022 if (satosin6(sa1
)->sin6_scope_id
!= satosin6(sa2
)->sin6_scope_id
) {
8026 result
= memcmp(&satosin6(sa1
)->sin6_addr
.s6_addr
[0], &satosin6(sa2
)->sin6_addr
.s6_addr
[0], sizeof(struct in6_addr
));
8029 if (satosin6(sa1
)->sin6_port
< satosin6(sa2
)->sin6_port
) {
8031 } else if (satosin6(sa1
)->sin6_port
> satosin6(sa2
)->sin6_port
) {
8036 result
= port_result
;
8037 } else if ((result
> 0 && port_result
< 0) || (result
< 0 && port_result
> 0)) {
8045 result
= memcmp(sa1
, sa2
, sa1
->sa_len
);
8052 } else if (result
> 0) {
8060 necp_buffer_compare_with_bit_prefix(u_int8_t
*p1
, u_int8_t
*p2
, u_int32_t bits
)
8064 /* Handle null pointers */
8065 if (p1
== NULL
|| p2
== NULL
) {
8070 if (*p1
++ != *p2
++) {
8077 mask
= ~((1<<(8-bits
))-1);
8078 if ((*p1
& mask
) != (*p2
& mask
)) {
8086 necp_update_qos_marking(struct ifnet
*ifp
, u_int32_t route_rule_id
)
8088 bool qos_marking
= FALSE
;
8089 int exception_index
= 0;
8090 struct necp_route_rule
*route_rule
= NULL
;
8092 route_rule
= necp_lookup_route_rule_locked(&necp_route_rules
, route_rule_id
);
8093 if (route_rule
== NULL
) {
8094 qos_marking
= FALSE
;
8098 qos_marking
= (route_rule
->default_action
== NECP_ROUTE_RULE_QOS_MARKING
) ? TRUE
: FALSE
;
8104 for (exception_index
= 0; exception_index
< MAX_ROUTE_RULE_INTERFACES
; exception_index
++) {
8105 if (route_rule
->exception_if_indices
[exception_index
] == 0) {
8108 if (route_rule
->exception_if_actions
[exception_index
] != NECP_ROUTE_RULE_QOS_MARKING
) {
8111 if (route_rule
->exception_if_indices
[exception_index
] == ifp
->if_index
) {
8113 if (necp_debug
> 2) {
8114 NECPLOG(LOG_DEBUG
, "QoS Marking : Interface match %d for Rule %d Allowed %d",
8115 route_rule
->exception_if_indices
[exception_index
], route_rule_id
, qos_marking
);
8121 if ((route_rule
->cellular_action
== NECP_ROUTE_RULE_QOS_MARKING
&& IFNET_IS_CELLULAR(ifp
)) ||
8122 (route_rule
->wifi_action
== NECP_ROUTE_RULE_QOS_MARKING
&& IFNET_IS_WIFI(ifp
)) ||
8123 (route_rule
->wired_action
== NECP_ROUTE_RULE_QOS_MARKING
&& IFNET_IS_WIRED(ifp
)) ||
8124 (route_rule
->expensive_action
== NECP_ROUTE_RULE_QOS_MARKING
&& IFNET_IS_EXPENSIVE(ifp
))) {
8126 if (necp_debug
> 2) {
8127 NECPLOG(LOG_DEBUG
, "QoS Marking: C:%d WF:%d W:%d E:%d for Rule %d Allowed %d",
8128 route_rule
->cellular_action
, route_rule
->wifi_action
, route_rule
->wired_action
,
8129 route_rule
->expensive_action
, route_rule_id
, qos_marking
);
8134 if (necp_debug
> 1) {
8135 NECPLOG(LOG_DEBUG
, "QoS Marking: Rule %d ifp %s Allowed %d",
8136 route_rule_id
, ifp
? ifp
->if_xname
: "", qos_marking
);
8138 return (qos_marking
);
8142 necp_socket_update_qos_marking(struct inpcb
*inp
, struct rtentry
*route
, struct ifnet
*interface
, u_int32_t route_rule_id
)
8144 bool qos_marking
= FALSE
;
8145 struct ifnet
*ifp
= interface
= NULL
;
8147 if (net_qos_policy_restricted
== 0) {
8150 if (inp
->inp_socket
== NULL
) {
8153 if ((inp
->inp_socket
->so_flags1
& SOF1_QOSMARKING_POLICY_OVERRIDE
)) {
8157 * This is racy but we do not need the performance hit of taking necp_kernel_policy_lock
8159 if (inp
->inp_policyresult
.results
.qos_marking_gencount
== necp_kernel_socket_policies_gencount
) {
8163 lck_rw_lock_shared(&necp_kernel_policy_lock
);
8165 if (ifp
== NULL
&& route
!= NULL
) {
8166 ifp
= route
->rt_ifp
;
8169 * By default, until we have a interface, do not mark and reevaluate the Qos marking policy
8171 if (ifp
== NULL
|| route_rule_id
== 0) {
8172 qos_marking
= FALSE
;
8176 if (ROUTE_RULE_IS_AGGREGATE(route_rule_id
)) {
8177 struct necp_aggregate_route_rule
*aggregate_route_rule
= necp_lookup_aggregate_route_rule_locked(route_rule_id
);
8178 if (aggregate_route_rule
!= NULL
) {
8180 for (index
= 0; index
< MAX_AGGREGATE_ROUTE_RULES
; index
++) {
8181 u_int32_t sub_route_rule_id
= aggregate_route_rule
->rule_ids
[index
];
8182 if (sub_route_rule_id
== 0) {
8185 qos_marking
= necp_update_qos_marking(ifp
, sub_route_rule_id
);
8186 if (qos_marking
== TRUE
) {
8192 qos_marking
= necp_update_qos_marking(ifp
, route_rule_id
);
8195 * Now that we have an interface we remember the gencount
8197 inp
->inp_policyresult
.results
.qos_marking_gencount
= necp_kernel_socket_policies_gencount
;
8200 lck_rw_done(&necp_kernel_policy_lock
);
8202 if (qos_marking
== TRUE
) {
8203 inp
->inp_socket
->so_flags1
|= SOF1_QOSMARKING_ALLOWED
;
8205 inp
->inp_socket
->so_flags1
&= ~SOF1_QOSMARKING_ALLOWED
;
8210 necp_route_is_allowed_inner(struct rtentry
*route
, struct ifnet
*ifp
, u_int32_t route_rule_id
, u_int32_t
*interface_type_denied
)
8212 bool default_is_allowed
= TRUE
;
8213 u_int8_t type_aggregate_action
= NECP_ROUTE_RULE_NONE
;
8214 int exception_index
= 0;
8215 struct ifnet
*delegated_ifp
= NULL
;
8216 struct necp_route_rule
*route_rule
= NULL
;
8218 route_rule
= necp_lookup_route_rule_locked(&necp_route_rules
, route_rule_id
);
8219 if (route_rule
== NULL
) {
8223 default_is_allowed
= (route_rule
->default_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
;
8225 ifp
= route
->rt_ifp
;
8228 if (necp_debug
> 1 && !default_is_allowed
) {
8229 NECPLOG(LOG_DEBUG
, "Route Allowed: No interface for route, using default for Rule %d Allowed %d", route_rule_id
, default_is_allowed
);
8231 return (default_is_allowed
);
8234 delegated_ifp
= ifp
->if_delegated
.ifp
;
8235 for (exception_index
= 0; exception_index
< MAX_ROUTE_RULE_INTERFACES
; exception_index
++) {
8236 if (route_rule
->exception_if_indices
[exception_index
] == 0) {
8239 if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule
->exception_if_actions
[exception_index
]) == FALSE
) {
8242 if (route_rule
->exception_if_indices
[exception_index
] == ifp
->if_index
||
8243 (delegated_ifp
!= NULL
&& route_rule
->exception_if_indices
[exception_index
] == delegated_ifp
->if_index
)) {
8244 if (necp_debug
> 1) {
8245 NECPLOG(LOG_DEBUG
, "Route Allowed: Interface match %d for Rule %d Allowed %d", route_rule
->exception_if_indices
[exception_index
], route_rule_id
, ((route_rule
->exception_if_actions
[exception_index
] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
));
8247 return ((route_rule
->exception_if_actions
[exception_index
] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
);
8251 if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule
->cellular_action
) &&
8252 IFNET_IS_CELLULAR(ifp
)) {
8253 if (interface_type_denied
!= NULL
) {
8254 *interface_type_denied
= IFRTYPE_FUNCTIONAL_CELLULAR
;
8256 if (type_aggregate_action
== NECP_ROUTE_RULE_NONE
||
8257 (type_aggregate_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
&&
8258 route_rule
->cellular_action
== NECP_ROUTE_RULE_DENY_INTERFACE
)) {
8259 // Deny wins if there is a conflict
8260 type_aggregate_action
= route_rule
->cellular_action
;
8264 if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule
->wifi_action
) &&
8265 IFNET_IS_WIFI(ifp
)) {
8266 if (interface_type_denied
!= NULL
) {
8267 *interface_type_denied
= IFRTYPE_FUNCTIONAL_WIFI_INFRA
;
8269 if (type_aggregate_action
== NECP_ROUTE_RULE_NONE
||
8270 (type_aggregate_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
&&
8271 route_rule
->wifi_action
== NECP_ROUTE_RULE_DENY_INTERFACE
)) {
8272 // Deny wins if there is a conflict
8273 type_aggregate_action
= route_rule
->wifi_action
;
8277 if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule
->wired_action
) &&
8278 IFNET_IS_WIRED(ifp
)) {
8279 if (interface_type_denied
!= NULL
) {
8280 *interface_type_denied
= IFRTYPE_FUNCTIONAL_WIRED
;
8282 if (type_aggregate_action
== NECP_ROUTE_RULE_NONE
||
8283 (type_aggregate_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
&&
8284 route_rule
->wired_action
== NECP_ROUTE_RULE_DENY_INTERFACE
)) {
8285 // Deny wins if there is a conflict
8286 type_aggregate_action
= route_rule
->wired_action
;
8290 if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule
->expensive_action
) &&
8291 IFNET_IS_EXPENSIVE(ifp
)) {
8292 if (type_aggregate_action
== NECP_ROUTE_RULE_NONE
||
8293 (type_aggregate_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
&&
8294 route_rule
->expensive_action
== NECP_ROUTE_RULE_DENY_INTERFACE
)) {
8295 // Deny wins if there is a conflict
8296 type_aggregate_action
= route_rule
->expensive_action
;
8300 if (type_aggregate_action
!= NECP_ROUTE_RULE_NONE
) {
8301 if (necp_debug
> 1) {
8302 NECPLOG(LOG_DEBUG
, "Route Allowed: C:%d WF:%d W:%d E:%d for Rule %d Allowed %d", route_rule
->cellular_action
, route_rule
->wifi_action
, route_rule
->wired_action
, route_rule
->expensive_action
, route_rule_id
, ((type_aggregate_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
));
8304 return ((type_aggregate_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
);
8307 if (necp_debug
> 1 && !default_is_allowed
) {
8308 NECPLOG(LOG_DEBUG
, "Route Allowed: Using default for Rule %d Allowed %d", route_rule_id
, default_is_allowed
);
8310 return (default_is_allowed
);
8314 necp_route_is_allowed(struct rtentry
*route
, struct ifnet
*interface
, u_int32_t route_rule_id
, u_int32_t
*interface_type_denied
)
8316 if ((route
== NULL
&& interface
== NULL
) || route_rule_id
== 0) {
8317 if (necp_debug
> 1) {
8318 NECPLOG(LOG_DEBUG
, "Route Allowed: no route or interface, Rule %d Allowed %d", route_rule_id
, TRUE
);
8323 if (ROUTE_RULE_IS_AGGREGATE(route_rule_id
)) {
8324 struct necp_aggregate_route_rule
*aggregate_route_rule
= necp_lookup_aggregate_route_rule_locked(route_rule_id
);
8325 if (aggregate_route_rule
!= NULL
) {
8327 for (index
= 0; index
< MAX_AGGREGATE_ROUTE_RULES
; index
++) {
8328 u_int32_t sub_route_rule_id
= aggregate_route_rule
->rule_ids
[index
];
8329 if (sub_route_rule_id
== 0) {
8332 if (!necp_route_is_allowed_inner(route
, interface
, sub_route_rule_id
, interface_type_denied
)) {
8338 return (necp_route_is_allowed_inner(route
, interface
, route_rule_id
, interface_type_denied
));
8345 necp_packet_is_allowed_over_interface(struct mbuf
*packet
, struct ifnet
*interface
)
8347 bool is_allowed
= TRUE
;
8348 u_int32_t route_rule_id
= necp_get_route_rule_id_from_packet(packet
);
8349 if (route_rule_id
!= 0 &&
8350 interface
!= NULL
) {
8351 lck_rw_lock_shared(&necp_kernel_policy_lock
);
8352 is_allowed
= necp_route_is_allowed(NULL
, interface
, necp_get_route_rule_id_from_packet(packet
), NULL
);
8353 lck_rw_done(&necp_kernel_policy_lock
);
8355 return (is_allowed
);
8359 necp_netagents_allow_traffic(u_int32_t
*netagent_ids
, size_t netagent_id_count
)
8361 size_t netagent_cursor
;
8362 for (netagent_cursor
= 0; netagent_cursor
< netagent_id_count
; netagent_cursor
++) {
8363 struct necp_uuid_id_mapping
*mapping
= NULL
;
8364 u_int32_t netagent_id
= netagent_ids
[netagent_cursor
];
8365 if (netagent_id
== 0) {
8368 mapping
= necp_uuid_lookup_uuid_with_service_id_locked(netagent_id
);
8369 if (mapping
!= NULL
) {
8370 u_int32_t agent_flags
= 0;
8371 agent_flags
= netagent_get_flags(mapping
->uuid
);
8372 if (agent_flags
& NETAGENT_FLAG_REGISTERED
) {
8373 if (agent_flags
& NETAGENT_FLAG_ACTIVE
) {
8375 } else if ((agent_flags
& NETAGENT_FLAG_VOLUNTARY
) == 0) {
8385 necp_socket_is_allowed_to_send_recv_internal(struct inpcb
*inp
, struct sockaddr
*override_local_addr
, struct sockaddr
*override_remote_addr
, ifnet_t interface
, necp_kernel_policy_id
*return_policy_id
, u_int32_t
*return_route_rule_id
)
8387 u_int32_t verifyifindex
= interface
? interface
->if_index
: 0;
8388 bool allowed_to_receive
= TRUE
;
8389 struct necp_socket_info info
;
8390 u_int32_t flowhash
= 0;
8391 necp_kernel_policy_result service_action
= 0;
8392 necp_kernel_policy_service service
= { 0, 0 };
8393 u_int32_t route_rule_id
= 0;
8394 struct rtentry
*route
= NULL
;
8395 u_int32_t interface_type_denied
= IFRTYPE_FUNCTIONAL_UNKNOWN
;
8397 u_int32_t netagent_ids
[NECP_MAX_NETAGENTS
];
8398 memset(&netagent_ids
, 0, sizeof(netagent_ids
));
8400 if (return_policy_id
) {
8401 *return_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
8403 if (return_route_rule_id
) {
8404 *return_route_rule_id
= 0;
8411 route
= inp
->inp_route
.ro_rt
;
8413 // Don't lock. Possible race condition, but we don't want the performance hit.
8414 if (necp_kernel_socket_policies_count
== 0 ||
8415 (!(inp
->inp_flags2
& INP2_WANT_APP_POLICY
) && necp_kernel_socket_policies_non_app_count
== 0)) {
8416 if (necp_drop_all_order
> 0) {
8417 if (necp_socket_bypass(override_local_addr
, override_remote_addr
, inp
)) {
8418 allowed_to_receive
= TRUE
;
8420 allowed_to_receive
= FALSE
;
8426 // If this socket is connected, or we are not taking addresses into account, try to reuse last result
8427 if ((necp_socket_is_connected(inp
) || (override_local_addr
== NULL
&& override_remote_addr
== NULL
)) && inp
->inp_policyresult
.policy_id
!= NECP_KERNEL_POLICY_ID_NONE
) {
8428 bool policies_have_changed
= FALSE
;
8429 bool route_allowed
= TRUE
;
8431 if (inp
->inp_policyresult
.policy_gencount
!= necp_kernel_socket_policies_gencount
) {
8432 policies_have_changed
= TRUE
;
8434 if (inp
->inp_policyresult
.results
.route_rule_id
!= 0) {
8435 lck_rw_lock_shared(&necp_kernel_policy_lock
);
8436 if (!necp_route_is_allowed(route
, interface
, inp
->inp_policyresult
.results
.route_rule_id
, &interface_type_denied
)) {
8437 route_allowed
= FALSE
;
8439 lck_rw_done(&necp_kernel_policy_lock
);
8443 if (!policies_have_changed
) {
8444 if (!route_allowed
||
8445 inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_DROP
||
8446 inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
||
8447 (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&& interface
&&
8448 inp
->inp_policyresult
.results
.result_parameter
.tunnel_interface_index
!= verifyifindex
)) {
8449 allowed_to_receive
= FALSE
;
8451 if (return_policy_id
) {
8452 *return_policy_id
= inp
->inp_policyresult
.policy_id
;
8454 if (return_route_rule_id
) {
8455 *return_route_rule_id
= inp
->inp_policyresult
.results
.route_rule_id
;
8462 // Check for loopback exception
8463 if (necp_socket_bypass(override_local_addr
, override_remote_addr
, inp
)) {
8464 allowed_to_receive
= TRUE
;
8468 // Actually calculate policy result
8469 lck_rw_lock_shared(&necp_kernel_policy_lock
);
8470 necp_socket_fillout_info_locked(inp
, override_local_addr
, override_remote_addr
, 0, &info
);
8472 flowhash
= necp_socket_calc_flowhash_locked(&info
);
8473 if (inp
->inp_policyresult
.policy_id
!= NECP_KERNEL_POLICY_ID_NONE
&&
8474 inp
->inp_policyresult
.policy_gencount
== necp_kernel_socket_policies_gencount
&&
8475 inp
->inp_policyresult
.flowhash
== flowhash
) {
8476 if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_DROP
||
8477 inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
||
8478 (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&& interface
&&
8479 inp
->inp_policyresult
.results
.result_parameter
.tunnel_interface_index
!= verifyifindex
) ||
8480 (inp
->inp_policyresult
.results
.route_rule_id
!= 0 &&
8481 !necp_route_is_allowed(route
, interface
, inp
->inp_policyresult
.results
.route_rule_id
, &interface_type_denied
))) {
8482 allowed_to_receive
= FALSE
;
8484 if (return_policy_id
) {
8485 *return_policy_id
= inp
->inp_policyresult
.policy_id
;
8487 if (return_route_rule_id
) {
8488 *return_route_rule_id
= inp
->inp_policyresult
.results
.route_rule_id
;
8491 lck_rw_done(&necp_kernel_policy_lock
);
8495 struct necp_kernel_socket_policy
*matched_policy
= necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_map
[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(info
.application_id
)], &info
, NULL
, &route_rule_id
, &service_action
, &service
, netagent_ids
, NECP_MAX_NETAGENTS
, current_proc());
8496 if (matched_policy
!= NULL
) {
8497 if (matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_DROP
||
8498 matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
||
8499 (matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&& interface
&&
8500 matched_policy
->result_parameter
.tunnel_interface_index
!= verifyifindex
) ||
8501 ((service_action
== NECP_KERNEL_POLICY_RESULT_TRIGGER_SCOPED
||
8502 service_action
== NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED
) &&
8503 service
.identifier
!= 0 && service
.identifier
!= NECP_NULL_SERVICE_ID
) ||
8504 (route_rule_id
!= 0 &&
8505 !necp_route_is_allowed(route
, interface
, route_rule_id
, &interface_type_denied
)) ||
8506 !necp_netagents_allow_traffic(netagent_ids
, NECP_MAX_NETAGENTS
)) {
8507 allowed_to_receive
= FALSE
;
8509 if (return_policy_id
) {
8510 *return_policy_id
= matched_policy
->id
;
8512 if (return_route_rule_id
) {
8513 *return_route_rule_id
= route_rule_id
;
8516 lck_rw_done(&necp_kernel_policy_lock
);
8518 if (necp_debug
> 1 && matched_policy
->id
!= inp
->inp_policyresult
.policy_id
) {
8519 NECPLOG(LOG_DEBUG
, "Socket Send/Recv Policy: Policy %d Allowed %d", return_policy_id
? *return_policy_id
: 0, allowed_to_receive
);
8522 } else if (necp_drop_all_order
> 0) {
8523 allowed_to_receive
= FALSE
;
8525 if (return_policy_id
) {
8526 *return_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8528 if (return_route_rule_id
) {
8529 *return_route_rule_id
= route_rule_id
;
8533 lck_rw_done(&necp_kernel_policy_lock
);
8536 if (!allowed_to_receive
&& interface_type_denied
!= IFRTYPE_FUNCTIONAL_UNKNOWN
) {
8537 soevent(inp
->inp_socket
, (SO_FILT_HINT_LOCKED
| SO_FILT_HINT_IFDENIED
));
8540 return (allowed_to_receive
);
8544 necp_socket_is_allowed_to_send_recv_v4(struct inpcb
*inp
, u_int16_t local_port
, u_int16_t remote_port
, struct in_addr
*local_addr
, struct in_addr
*remote_addr
, ifnet_t interface
, necp_kernel_policy_id
*return_policy_id
, u_int32_t
*return_route_rule_id
)
8546 struct sockaddr_in local
;
8547 struct sockaddr_in remote
;
8548 local
.sin_family
= remote
.sin_family
= AF_INET
;
8549 local
.sin_len
= remote
.sin_len
= sizeof(struct sockaddr_in
);
8550 local
.sin_port
= local_port
;
8551 remote
.sin_port
= remote_port
;
8552 memcpy(&local
.sin_addr
, local_addr
, sizeof(local
.sin_addr
));
8553 memcpy(&remote
.sin_addr
, remote_addr
, sizeof(remote
.sin_addr
));
8555 return (necp_socket_is_allowed_to_send_recv_internal(inp
, (struct sockaddr
*)&local
, (struct sockaddr
*)&remote
, interface
, return_policy_id
, return_route_rule_id
));
8559 necp_socket_is_allowed_to_send_recv_v6(struct inpcb
*inp
, u_int16_t local_port
, u_int16_t remote_port
, struct in6_addr
*local_addr
, struct in6_addr
*remote_addr
, ifnet_t interface
, necp_kernel_policy_id
*return_policy_id
, u_int32_t
*return_route_rule_id
)
8561 struct sockaddr_in6 local
;
8562 struct sockaddr_in6 remote
;
8563 local
.sin6_family
= remote
.sin6_family
= AF_INET6
;
8564 local
.sin6_len
= remote
.sin6_len
= sizeof(struct sockaddr_in6
);
8565 local
.sin6_port
= local_port
;
8566 remote
.sin6_port
= remote_port
;
8567 memcpy(&local
.sin6_addr
, local_addr
, sizeof(local
.sin6_addr
));
8568 memcpy(&remote
.sin6_addr
, remote_addr
, sizeof(remote
.sin6_addr
));
8570 return (necp_socket_is_allowed_to_send_recv_internal(inp
, (struct sockaddr
*)&local
, (struct sockaddr
*)&remote
, interface
, return_policy_id
, return_route_rule_id
));
8574 necp_socket_is_allowed_to_send_recv(struct inpcb
*inp
, necp_kernel_policy_id
*return_policy_id
, u_int32_t
*return_route_rule_id
)
8576 return (necp_socket_is_allowed_to_send_recv_internal(inp
, NULL
, NULL
, NULL
, return_policy_id
, return_route_rule_id
));
8580 necp_mark_packet_from_socket(struct mbuf
*packet
, struct inpcb
*inp
, necp_kernel_policy_id policy_id
, u_int32_t route_rule_id
)
8582 if (packet
== NULL
|| inp
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
8586 // Mark ID for Pass and IP Tunnel
8587 if (policy_id
!= NECP_KERNEL_POLICY_ID_NONE
) {
8588 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= policy_id
;
8589 } else if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_PASS
||
8590 inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
) {
8591 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= inp
->inp_policyresult
.policy_id
;
8593 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
8595 packet
->m_pkthdr
.necp_mtag
.necp_last_interface_index
= 0;
8596 if (route_rule_id
!= 0) {
8597 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
= route_rule_id
;
8599 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
= inp
->inp_policyresult
.results
.route_rule_id
;
8601 packet
->m_pkthdr
.necp_mtag
.necp_app_id
= inp
->inp_policyresult
.app_id
;
8607 necp_mark_packet_from_ip(struct mbuf
*packet
, necp_kernel_policy_id policy_id
)
8609 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
8613 // Mark ID for Pass and IP Tunnel
8614 if (policy_id
!= NECP_KERNEL_POLICY_ID_NONE
) {
8615 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= policy_id
;
8617 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
8624 necp_mark_packet_from_interface(struct mbuf
*packet
, ifnet_t interface
)
8626 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
8630 // Mark ID for Pass and IP Tunnel
8631 if (interface
!= NULL
) {
8632 packet
->m_pkthdr
.necp_mtag
.necp_last_interface_index
= interface
->if_index
;
8639 necp_mark_packet_as_keepalive(struct mbuf
*packet
, bool is_keepalive
)
8641 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
8646 packet
->m_pkthdr
.pkt_flags
|= PKTF_KEEPALIVE
;
8648 packet
->m_pkthdr
.pkt_flags
&= ~PKTF_KEEPALIVE
;
8654 necp_kernel_policy_id
8655 necp_get_policy_id_from_packet(struct mbuf
*packet
)
8657 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
8658 return (NECP_KERNEL_POLICY_ID_NONE
);
8661 return (packet
->m_pkthdr
.necp_mtag
.necp_policy_id
);
8665 necp_get_last_interface_index_from_packet(struct mbuf
*packet
)
8667 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
8671 return (packet
->m_pkthdr
.necp_mtag
.necp_last_interface_index
);
8675 necp_get_route_rule_id_from_packet(struct mbuf
*packet
)
8677 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
8681 return (packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
);
8685 necp_get_app_uuid_from_packet(struct mbuf
*packet
,
8688 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
8692 bool found_mapping
= FALSE
;
8693 if (packet
->m_pkthdr
.necp_mtag
.necp_app_id
!= 0) {
8694 lck_rw_lock_shared(&necp_kernel_policy_lock
);
8695 struct necp_uuid_id_mapping
*entry
= necp_uuid_lookup_uuid_with_app_id_locked(packet
->m_pkthdr
.necp_mtag
.necp_app_id
);
8696 if (entry
!= NULL
) {
8697 uuid_copy(app_uuid
, entry
->uuid
);
8698 found_mapping
= true;
8700 lck_rw_done(&necp_kernel_policy_lock
);
8702 if (!found_mapping
) {
8703 uuid_clear(app_uuid
);
8709 necp_get_is_keepalive_from_packet(struct mbuf
*packet
)
8711 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
8715 return (packet
->m_pkthdr
.pkt_flags
& PKTF_KEEPALIVE
);
8719 necp_socket_get_content_filter_control_unit(struct socket
*so
)
8721 struct inpcb
*inp
= sotoinpcb(so
);
8726 return (inp
->inp_policyresult
.results
.filter_control_unit
);
8730 necp_socket_should_use_flow_divert(struct inpcb
*inp
)
8736 return (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
);
8740 necp_socket_get_flow_divert_control_unit(struct inpcb
*inp
)
8746 if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
) {
8747 return (inp
->inp_policyresult
.results
.result_parameter
.flow_divert_control_unit
);
8754 necp_socket_should_rescope(struct inpcb
*inp
)
8760 return (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED
);
8764 necp_socket_get_rescope_if_index(struct inpcb
*inp
)
8770 if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED
) {
8771 return (inp
->inp_policyresult
.results
.result_parameter
.scoped_interface_index
);
8778 necp_socket_get_effective_mtu(struct inpcb
*inp
, u_int32_t current_mtu
)
8781 return (current_mtu
);
8784 if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&&
8785 (inp
->inp_flags
& INP_BOUND_IF
) &&
8786 inp
->inp_boundifp
) {
8788 u_int bound_interface_index
= inp
->inp_boundifp
->if_index
;
8789 u_int tunnel_interface_index
= inp
->inp_policyresult
.results
.result_parameter
.tunnel_interface_index
;
8791 // The result is IP Tunnel, and is rescoping from one interface to another. Recalculate MTU.
8792 if (bound_interface_index
!= tunnel_interface_index
) {
8793 ifnet_t tunnel_interface
= NULL
;
8795 ifnet_head_lock_shared();
8796 tunnel_interface
= ifindex2ifnet
[tunnel_interface_index
];
8799 if (tunnel_interface
!= NULL
) {
8800 u_int32_t direct_tunnel_mtu
= tunnel_interface
->if_mtu
;
8801 u_int32_t delegate_tunnel_mtu
= (tunnel_interface
->if_delegated
.ifp
!= NULL
) ? tunnel_interface
->if_delegated
.ifp
->if_mtu
: 0;
8802 if (delegate_tunnel_mtu
!= 0 &&
8803 strncmp(tunnel_interface
->if_name
, "ipsec", strlen("ipsec")) == 0) {
8804 // For ipsec interfaces, calculate the overhead from the delegate interface
8805 u_int32_t tunnel_overhead
= (u_int32_t
)(esp_hdrsiz(NULL
) + sizeof(struct ip6_hdr
));
8806 if (delegate_tunnel_mtu
> tunnel_overhead
) {
8807 delegate_tunnel_mtu
-= tunnel_overhead
;
8810 if (delegate_tunnel_mtu
< direct_tunnel_mtu
) {
8811 // If the (delegate - overhead) < direct, return (delegate - overhead)
8812 return (delegate_tunnel_mtu
);
8814 // Otherwise return direct
8815 return (direct_tunnel_mtu
);
8818 // For non-ipsec interfaces, just return the tunnel MTU
8819 return (direct_tunnel_mtu
);
8825 // By default, just return the MTU passed in
8826 return (current_mtu
);
8830 necp_get_ifnet_from_result_parameter(necp_kernel_policy_result_parameter
*result_parameter
)
8832 if (result_parameter
== NULL
) {
8836 return (ifindex2ifnet
[result_parameter
->tunnel_interface_index
]);
8840 necp_packet_can_rebind_to_ifnet(struct mbuf
*packet
, struct ifnet
*interface
, struct route
*new_route
, int family
)
8842 bool found_match
= FALSE
;
8844 ifaddr_t
*addresses
= NULL
;
8845 union necp_sockaddr_union address_storage
;
8848 if (packet
== NULL
|| interface
== NULL
|| new_route
== NULL
|| (family
!= AF_INET
&& family
!= AF_INET6
)) {
8852 result
= ifnet_get_address_list_family(interface
, &addresses
, family
);
8854 NECPLOG(LOG_ERR
, "Failed to get address list for %s%d", ifnet_name(interface
), ifnet_unit(interface
));
8858 for (i
= 0; addresses
[i
] != NULL
; i
++) {
8859 ROUTE_RELEASE(new_route
);
8860 if (ifaddr_address(addresses
[i
], &address_storage
.sa
, sizeof(address_storage
)) == 0) {
8861 if (family
== AF_INET
) {
8862 struct ip
*ip
= mtod(packet
, struct ip
*);
8863 if (memcmp(&address_storage
.sin
.sin_addr
, &ip
->ip_src
, sizeof(ip
->ip_src
)) == 0) {
8864 struct sockaddr_in
*dst4
= (struct sockaddr_in
*)(void *)&new_route
->ro_dst
;
8865 dst4
->sin_family
= AF_INET
;
8866 dst4
->sin_len
= sizeof(struct sockaddr_in
);
8867 dst4
->sin_addr
= ip
->ip_dst
;
8868 rtalloc_scoped(new_route
, interface
->if_index
);
8869 if (!ROUTE_UNUSABLE(new_route
)) {
8874 } else if (family
== AF_INET6
) {
8875 struct ip6_hdr
*ip6
= mtod(packet
, struct ip6_hdr
*);
8876 if (memcmp(&address_storage
.sin6
.sin6_addr
, &ip6
->ip6_src
, sizeof(ip6
->ip6_src
)) == 0) {
8877 struct sockaddr_in6
*dst6
= (struct sockaddr_in6
*)(void *)&new_route
->ro_dst
;
8878 dst6
->sin6_family
= AF_INET6
;
8879 dst6
->sin6_len
= sizeof(struct sockaddr_in6
);
8880 dst6
->sin6_addr
= ip6
->ip6_dst
;
8881 rtalloc_scoped(new_route
, interface
->if_index
);
8882 if (!ROUTE_UNUSABLE(new_route
)) {
8892 ifnet_free_address_list(addresses
);
8894 return (found_match
);
8898 necp_addr_is_loopback(struct sockaddr
*address
)
8900 if (address
== NULL
) {
8904 if (address
->sa_family
== AF_INET
) {
8905 return (ntohl(((struct sockaddr_in
*)(void *)address
)->sin_addr
.s_addr
) == INADDR_LOOPBACK
);
8906 } else if (address
->sa_family
== AF_INET6
) {
8907 return IN6_IS_ADDR_LOOPBACK(&((struct sockaddr_in6
*)(void *)address
)->sin6_addr
);
8914 necp_is_loopback(struct sockaddr
*local_addr
, struct sockaddr
*remote_addr
, struct inpcb
*inp
, struct mbuf
*packet
)
8916 // Note: This function only checks for the loopback addresses.
8917 // In the future, we may want to expand to also allow any traffic
8918 // going through the loopback interface, but until then, this
8919 // check is cheaper.
8921 if (local_addr
!= NULL
&& necp_addr_is_loopback(local_addr
)) {
8925 if (remote_addr
!= NULL
&& necp_addr_is_loopback(remote_addr
)) {
8930 if ((inp
->inp_flags
& INP_BOUND_IF
) && inp
->inp_boundifp
&& (inp
->inp_boundifp
->if_flags
& IFF_LOOPBACK
)) {
8933 if (inp
->inp_vflag
& INP_IPV4
) {
8934 if (ntohl(inp
->inp_laddr
.s_addr
) == INADDR_LOOPBACK
||
8935 ntohl(inp
->inp_faddr
.s_addr
) == INADDR_LOOPBACK
) {
8938 } else if (inp
->inp_vflag
& INP_IPV6
) {
8939 if (IN6_IS_ADDR_LOOPBACK(&inp
->in6p_laddr
) ||
8940 IN6_IS_ADDR_LOOPBACK(&inp
->in6p_faddr
)) {
8946 if (packet
!= NULL
) {
8947 struct ip
*ip
= mtod(packet
, struct ip
*);
8948 if (ip
->ip_v
== 4) {
8949 if (ntohl(ip
->ip_src
.s_addr
) == INADDR_LOOPBACK
) {
8952 if (ntohl(ip
->ip_dst
.s_addr
) == INADDR_LOOPBACK
) {
8955 } else if (ip
->ip_v
== 6) {
8956 struct ip6_hdr
*ip6
= mtod(packet
, struct ip6_hdr
*);
8957 if (IN6_IS_ADDR_LOOPBACK(&ip6
->ip6_src
)) {
8960 if (IN6_IS_ADDR_LOOPBACK(&ip6
->ip6_dst
)) {
8970 necp_is_intcoproc(struct inpcb
*inp
, struct mbuf
*packet
)
8974 return (sflt_permission_check(inp
) ? true : false);
8976 if (packet
!= NULL
) {
8977 struct ip6_hdr
*ip6
= mtod(packet
, struct ip6_hdr
*);
8978 if ((ip6
->ip6_vfc
& IPV6_VERSION_MASK
) == IPV6_VERSION
&&
8979 IN6_IS_ADDR_LINKLOCAL(&ip6
->ip6_dst
) &&
8980 ip6
->ip6_dst
.s6_addr32
[2] == ntohl(0xaede48ff) &&
8981 ip6
->ip6_dst
.s6_addr32
[3] == ntohl(0xfe334455)) {