2 * Copyright (c) 2013-2017 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <sys/systm.h>
31 #include <sys/types.h>
32 #include <sys/queue.h>
33 #include <sys/malloc.h>
34 #include <libkern/OSMalloc.h>
35 #include <sys/kernel.h>
36 #include <sys/kern_control.h>
38 #include <sys/kpi_mbuf.h>
39 #include <sys/proc_uuid_policy.h>
41 #include <sys/domain.h>
42 #include <sys/protosw.h>
43 #include <sys/socket.h>
44 #include <sys/socketvar.h>
45 #include <sys/coalition.h>
46 #include <netinet/ip.h>
47 #include <netinet/ip6.h>
48 #include <netinet/tcp.h>
49 #include <netinet/tcp_var.h>
50 #include <netinet/tcp_cache.h>
51 #include <netinet/udp.h>
52 #include <netinet/in_pcb.h>
53 #include <netinet/in_tclass.h>
54 #include <netinet6/esp.h>
55 #include <net/flowhash.h>
56 #include <net/if_var.h>
57 #include <sys/kauth.h>
58 #include <sys/sysctl.h>
59 #include <sys/sysproto.h>
61 #include <sys/kern_event.h>
62 #include <sys/file_internal.h>
63 #include <IOKit/IOBSD.h>
64 #include <net/network_agent.h>
68 * NECP - Network Extension Control Policy database
69 * ------------------------------------------------
70 * The goal of this module is to allow clients connecting via a
71 * kernel control socket to create high-level policy sessions, which
72 * are ingested into low-level kernel policies that control and tag
73 * traffic at the application, socket, and IP layers.
75 * ------------------------------------------------
77 * ------------------------------------------------
78 * Each session owns a list of session policies, each of which can
79 * specify any combination of conditions and a single result. Each
80 * session also has a priority level (such as High, Default, or Low)
81 * which is requested by the client. Based on the requested level,
82 * a session order value is assigned to the session, which will be used
83 * to sort kernel policies generated by the session. The session client
84 * can specify the sub-order for each policy it creates which will be
85 * used to further sort the kernel policies.
87 * Kernel Control Socket --> 1 necp_session --> list of necp_session_policy structs
89 * ------------------------------------------------
91 * ------------------------------------------------
92 * Whenever a session send the Apply command, its policies are ingested
93 * and generate kernel policies. There are two phases of kernel policy
96 * 1. The session policy is parsed to create kernel policies at the socket
97 * and IP layers, when applicable. For example, a policy that requires
98 * all traffic from App1 to Pass will generate a socket kernel policy to
99 * match App1 and mark packets with ID1, and also an IP policy to match
100 * ID1 and let the packet pass. This is handled in necp_apply_policy. The
101 * resulting kernel policies are added to the global socket and IP layer
103 * necp_session_policy --> necp_kernel_socket_policy and necp_kernel_ip_output_policy
106 * necp_kernel_socket_policies necp_kernel_ip_output_policies
108 * 2. Once the global lists of kernel policies have been filled out, each
109 * list is traversed to create optimized sub-lists ("Maps") which are used during
110 * data-path evaluation. IP policies are sent into necp_kernel_ip_output_policies_map,
111 * which hashes incoming packets based on marked socket-layer policies, and removes
112 * duplicate or overlapping policies. Socket policies are sent into two maps,
113 * necp_kernel_socket_policies_map and necp_kernel_socket_policies_app_layer_map.
114 * The app layer map is used for policy checks coming in from user space, and is one
115 * list with duplicate and overlapping policies removed. The socket map hashes based
116 * on app UUID, and removes duplicate and overlapping policies.
117 * necp_kernel_socket_policy --> necp_kernel_socket_policies_app_layer_map
118 * |-> necp_kernel_socket_policies_map
120 * necp_kernel_ip_output_policies --> necp_kernel_ip_output_policies_map
122 * ------------------------------------------------
124 * ------------------------------------------------
125 * The Drop All Level is a sysctl that controls the level at which policies are allowed
126 * to override a global drop rule. If the value is 0, no drop rule is applied. If the value
127 * is 1, all traffic is dropped. If the value is greater than 1, all kernel policies created
128 * by a session with a priority level better than (numerically less than) the
129 * Drop All Level will allow matching traffic to not be dropped. The Drop All Level is
130 * dynamically interpreted into necp_drop_all_order, which specifies the equivalent assigned
131 * session orders to be dropped.
134 u_int32_t necp_drop_all_order
= 0;
135 u_int32_t necp_drop_all_level
= 0;
137 u_int32_t necp_pass_loopback
= 1; // 0=Off, 1=On
138 u_int32_t necp_pass_keepalives
= 1; // 0=Off, 1=On
140 u_int32_t necp_debug
= 0; // 0=None, 1=Basic, 2=EveryMatch
142 u_int32_t necp_session_count
= 0;
144 #define LIST_INSERT_SORTED_ASCENDING(head, elm, field, sortfield, tmpelm) do { \
145 if (LIST_EMPTY((head)) || (LIST_FIRST(head)->sortfield >= (elm)->sortfield)) { \
146 LIST_INSERT_HEAD((head), elm, field); \
148 LIST_FOREACH(tmpelm, head, field) { \
149 if (LIST_NEXT(tmpelm, field) == NULL || LIST_NEXT(tmpelm, field)->sortfield >= (elm)->sortfield) { \
150 LIST_INSERT_AFTER(tmpelm, elm, field); \
157 #define LIST_INSERT_SORTED_TWICE_ASCENDING(head, elm, field, firstsortfield, secondsortfield, tmpelm) do { \
158 if (LIST_EMPTY((head)) || (LIST_FIRST(head)->firstsortfield > (elm)->firstsortfield) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield >= (elm)->secondsortfield))) { \
159 LIST_INSERT_HEAD((head), elm, field); \
161 LIST_FOREACH(tmpelm, head, field) { \
162 if (LIST_NEXT(tmpelm, field) == NULL || (LIST_NEXT(tmpelm, field)->firstsortfield > (elm)->firstsortfield) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield >= (elm)->secondsortfield))) { \
163 LIST_INSERT_AFTER(tmpelm, elm, field); \
170 #define LIST_INSERT_SORTED_THRICE_ASCENDING(head, elm, field, firstsortfield, secondsortfield, thirdsortfield, tmpelm) do { \
171 if (LIST_EMPTY((head)) || (LIST_FIRST(head)->firstsortfield > (elm)->firstsortfield) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield >= (elm)->secondsortfield)) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield == (elm)->secondsortfield) && (LIST_FIRST(head)->thirdsortfield >= (elm)->thirdsortfield))) { \
172 LIST_INSERT_HEAD((head), elm, field); \
174 LIST_FOREACH(tmpelm, head, field) { \
175 if (LIST_NEXT(tmpelm, field) == NULL || (LIST_NEXT(tmpelm, field)->firstsortfield > (elm)->firstsortfield) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield >= (elm)->secondsortfield)) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield == (elm)->secondsortfield) && (LIST_NEXT(tmpelm, field)->thirdsortfield >= (elm)->thirdsortfield))) { \
176 LIST_INSERT_AFTER(tmpelm, elm, field); \
183 #define IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(x) ((x) == NECP_ROUTE_RULE_DENY_INTERFACE || (x) == NECP_ROUTE_RULE_ALLOW_INTERFACE)
185 #define NECP_KERNEL_CONDITION_ALL_INTERFACES 0x00001
186 #define NECP_KERNEL_CONDITION_BOUND_INTERFACE 0x00002
187 #define NECP_KERNEL_CONDITION_PROTOCOL 0x00004
188 #define NECP_KERNEL_CONDITION_LOCAL_START 0x00008
189 #define NECP_KERNEL_CONDITION_LOCAL_END 0x00010
190 #define NECP_KERNEL_CONDITION_LOCAL_PREFIX 0x00020
191 #define NECP_KERNEL_CONDITION_REMOTE_START 0x00040
192 #define NECP_KERNEL_CONDITION_REMOTE_END 0x00080
193 #define NECP_KERNEL_CONDITION_REMOTE_PREFIX 0x00100
194 #define NECP_KERNEL_CONDITION_APP_ID 0x00200
195 #define NECP_KERNEL_CONDITION_REAL_APP_ID 0x00400
196 #define NECP_KERNEL_CONDITION_DOMAIN 0x00800
197 #define NECP_KERNEL_CONDITION_ACCOUNT_ID 0x01000
198 #define NECP_KERNEL_CONDITION_POLICY_ID 0x02000
199 #define NECP_KERNEL_CONDITION_PID 0x04000
200 #define NECP_KERNEL_CONDITION_UID 0x08000
201 #define NECP_KERNEL_CONDITION_LAST_INTERFACE 0x10000 // Only set from packets looping between interfaces
202 #define NECP_KERNEL_CONDITION_TRAFFIC_CLASS 0x20000
203 #define NECP_KERNEL_CONDITION_ENTITLEMENT 0x40000
204 #define NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT 0x80000
206 #define NECP_MAX_POLICY_RESULT_SIZE 512
207 #define NECP_MAX_ROUTE_RULES_ARRAY_SIZE 1024
208 #define NECP_MAX_CONDITIONS_ARRAY_SIZE 4096
209 #define NECP_MAX_POLICY_LIST_COUNT 1024
211 // Cap the policy size at the max result + conditions size, with room for extra TLVs
212 #define NECP_MAX_POLICY_SIZE (1024 + NECP_MAX_POLICY_RESULT_SIZE + NECP_MAX_CONDITIONS_ARRAY_SIZE)
214 struct necp_service_registration
{
215 LIST_ENTRY(necp_service_registration
) session_chain
;
216 LIST_ENTRY(necp_service_registration
) kernel_chain
;
217 u_int32_t service_id
;
220 struct necp_session
{
221 u_int8_t necp_fd_type
;
222 u_int32_t control_unit
;
223 u_int32_t session_priority
; // Descriptive priority rating
224 u_int32_t session_order
;
226 decl_lck_mtx_data(, lock
);
228 bool proc_locked
; // Messages must come from proc_uuid
233 LIST_HEAD(_policies
, necp_session_policy
) policies
;
235 LIST_HEAD(_services
, necp_service_registration
) services
;
237 TAILQ_ENTRY(necp_session
) chain
;
240 #define NECP_SESSION_LOCK(_s) lck_mtx_lock(&_s->lock)
241 #define NECP_SESSION_UNLOCK(_s) lck_mtx_unlock(&_s->lock)
243 static TAILQ_HEAD(_necp_session_list
, necp_session
) necp_session_list
;
245 struct necp_socket_info
{
248 union necp_sockaddr_union local_addr
;
249 union necp_sockaddr_union remote_addr
;
250 u_int32_t bound_interface_index
;
251 u_int32_t traffic_class
;
253 u_int32_t application_id
;
254 u_int32_t real_application_id
;
255 u_int32_t account_id
;
260 static kern_ctl_ref necp_kctlref
;
261 static u_int32_t necp_family
;
262 static OSMallocTag necp_malloc_tag
;
263 static lck_grp_attr_t
*necp_kernel_policy_grp_attr
= NULL
;
264 static lck_attr_t
*necp_kernel_policy_mtx_attr
= NULL
;
265 static lck_grp_t
*necp_kernel_policy_mtx_grp
= NULL
;
266 decl_lck_rw_data(static, necp_kernel_policy_lock
);
268 static lck_grp_attr_t
*necp_route_rule_grp_attr
= NULL
;
269 static lck_attr_t
*necp_route_rule_mtx_attr
= NULL
;
270 static lck_grp_t
*necp_route_rule_mtx_grp
= NULL
;
271 decl_lck_rw_data(static, necp_route_rule_lock
);
273 static necp_policy_id necp_last_policy_id
= 0;
274 static necp_kernel_policy_id necp_last_kernel_policy_id
= 0;
275 static u_int32_t necp_last_uuid_id
= 0;
276 static u_int32_t necp_last_string_id
= 0;
277 static u_int32_t necp_last_route_rule_id
= 0;
278 static u_int32_t necp_last_aggregate_route_rule_id
= 0;
281 * On modification, invalidate cached lookups by bumping the generation count.
282 * Other calls will need to take the slowpath of taking
283 * the subsystem lock.
285 static volatile int32_t necp_kernel_socket_policies_gencount
;
286 #define BUMP_KERNEL_SOCKET_POLICIES_GENERATION_COUNT() do { \
287 if (OSIncrementAtomic(&necp_kernel_socket_policies_gencount) == (INT32_MAX - 1)) { \
288 necp_kernel_socket_policies_gencount = 1; \
292 static u_int32_t necp_kernel_application_policies_condition_mask
;
293 static size_t necp_kernel_application_policies_count
;
294 static u_int32_t necp_kernel_socket_policies_condition_mask
;
295 static size_t necp_kernel_socket_policies_count
;
296 static size_t necp_kernel_socket_policies_non_app_count
;
297 static LIST_HEAD(_necpkernelsocketconnectpolicies
, necp_kernel_socket_policy
) necp_kernel_socket_policies
;
298 #define NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS 5
299 #define NECP_SOCKET_MAP_APP_ID_TO_BUCKET(appid) (appid ? (appid%(NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS - 1) + 1) : 0)
300 static struct necp_kernel_socket_policy
**necp_kernel_socket_policies_map
[NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
];
301 static struct necp_kernel_socket_policy
**necp_kernel_socket_policies_app_layer_map
;
303 * A note on policy 'maps': these are used for boosting efficiency when matching policies. For each dimension of the map,
304 * such as an ID, the 0 bucket is reserved for sockets/packets that do not have this parameter, while the other
305 * buckets lead to an array of policy pointers that form the list applicable when the (parameter%(NUM_BUCKETS - 1) + 1) == bucket_index.
307 * For example, a packet with policy ID of 7, when there are 4 ID buckets, will map to bucket (7%3 + 1) = 2.
310 static u_int32_t necp_kernel_ip_output_policies_condition_mask
;
311 static size_t necp_kernel_ip_output_policies_count
;
312 static size_t necp_kernel_ip_output_policies_non_id_count
;
313 static LIST_HEAD(_necpkernelipoutputpolicies
, necp_kernel_ip_output_policy
) necp_kernel_ip_output_policies
;
314 #define NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS 5
315 #define NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(id) (id ? (id%(NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS - 1) + 1) : 0)
316 static struct necp_kernel_ip_output_policy
**necp_kernel_ip_output_policies_map
[NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
];
318 static struct necp_session
*necp_create_session(void);
319 static void necp_delete_session(struct necp_session
*session
);
321 static necp_policy_id
necp_handle_policy_add(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
,
322 u_int8_t
*tlv_buffer
, size_t tlv_buffer_length
, int offset
, int *error
);
323 static void necp_handle_policy_get(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
324 static void necp_handle_policy_delete(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
325 static void necp_handle_policy_apply_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
326 static void necp_handle_policy_list_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
327 static void necp_handle_policy_delete_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
328 static int necp_handle_policy_dump_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
,
329 user_addr_t out_buffer
, size_t out_buffer_length
, int offset
);
330 static void necp_handle_set_session_priority(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
331 static void necp_handle_lock_session_to_proc(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
332 static void necp_handle_register_service(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
333 static void necp_handle_unregister_service(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
335 #define MAX_RESULT_STRING_LEN 64
336 static inline const char * necp_get_result_description(char *result_string
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
);
338 static struct necp_session_policy
*necp_policy_create(struct necp_session
*session
, necp_policy_order order
, u_int8_t
*conditions_array
, u_int32_t conditions_array_size
, u_int8_t
*route_rules_array
, u_int32_t route_rules_array_size
, u_int8_t
*result
, u_int32_t result_size
);
339 static struct necp_session_policy
*necp_policy_find(struct necp_session
*session
, necp_policy_id policy_id
);
340 static bool necp_policy_mark_for_deletion(struct necp_session
*session
, struct necp_session_policy
*policy
);
341 static bool necp_policy_mark_all_for_deletion(struct necp_session
*session
);
342 static bool necp_policy_delete(struct necp_session
*session
, struct necp_session_policy
*policy
);
343 static void necp_policy_apply_all(struct necp_session
*session
);
345 static necp_kernel_policy_id
necp_kernel_socket_policy_add(necp_policy_id parent_policy_id
, necp_policy_order order
, u_int32_t session_order
, int session_pid
, u_int32_t condition_mask
, u_int32_t condition_negated_mask
, necp_app_id cond_app_id
, necp_app_id cond_real_app_id
, char *cond_custom_entitlement
, u_int32_t cond_account_id
, char *domain
, pid_t cond_pid
, uid_t cond_uid
, ifnet_t cond_bound_interface
, struct necp_policy_condition_tc_range cond_traffic_class
, u_int16_t cond_protocol
, union necp_sockaddr_union
*cond_local_start
, union necp_sockaddr_union
*cond_local_end
, u_int8_t cond_local_prefix
, union necp_sockaddr_union
*cond_remote_start
, union necp_sockaddr_union
*cond_remote_end
, u_int8_t cond_remote_prefix
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
);
346 static bool necp_kernel_socket_policy_delete(necp_kernel_policy_id policy_id
);
347 static bool necp_kernel_socket_policies_reprocess(void);
348 static bool necp_kernel_socket_policies_update_uuid_table(void);
349 static inline struct necp_kernel_socket_policy
*necp_socket_find_policy_match_with_info_locked(struct necp_kernel_socket_policy
**policy_search_array
, struct necp_socket_info
*info
, necp_kernel_policy_filter
*return_filter
, u_int32_t
*return_route_rule_id
, necp_kernel_policy_result
*return_service_action
, necp_kernel_policy_service
*return_service
, u_int32_t
*return_netagent_array
, size_t netagent_array_count
, proc_t proc
);
351 static necp_kernel_policy_id
necp_kernel_ip_output_policy_add(necp_policy_id parent_policy_id
, necp_policy_order order
, necp_policy_order suborder
, u_int32_t session_order
, int session_pid
, u_int32_t condition_mask
, u_int32_t condition_negated_mask
, necp_kernel_policy_id cond_policy_id
, ifnet_t cond_bound_interface
, u_int32_t cond_last_interface_index
, u_int16_t cond_protocol
, union necp_sockaddr_union
*cond_local_start
, union necp_sockaddr_union
*cond_local_end
, u_int8_t cond_local_prefix
, union necp_sockaddr_union
*cond_remote_start
, union necp_sockaddr_union
*cond_remote_end
, u_int8_t cond_remote_prefix
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
);
352 static bool necp_kernel_ip_output_policy_delete(necp_kernel_policy_id policy_id
);
353 static bool necp_kernel_ip_output_policies_reprocess(void);
355 static bool necp_is_addr_in_range(struct sockaddr
*addr
, struct sockaddr
*range_start
, struct sockaddr
*range_end
);
356 static bool necp_is_range_in_range(struct sockaddr
*inner_range_start
, struct sockaddr
*inner_range_end
, struct sockaddr
*range_start
, struct sockaddr
*range_end
);
357 static bool necp_is_addr_in_subnet(struct sockaddr
*addr
, struct sockaddr
*subnet_addr
, u_int8_t subnet_prefix
);
358 static int necp_addr_compare(struct sockaddr
*sa1
, struct sockaddr
*sa2
, int check_port
);
359 static bool necp_buffer_compare_with_bit_prefix(u_int8_t
*p1
, u_int8_t
*p2
, u_int32_t bits
);
360 static bool necp_is_loopback(struct sockaddr
*local_addr
, struct sockaddr
*remote_addr
, struct inpcb
*inp
, struct mbuf
*packet
);
361 static bool necp_is_intcoproc(struct inpcb
*inp
, struct mbuf
*packet
);
363 struct necp_uuid_id_mapping
{
364 LIST_ENTRY(necp_uuid_id_mapping
) chain
;
368 u_int32_t table_refcount
; // Add to UUID policy table count
370 static size_t necp_num_uuid_app_id_mappings
;
371 static bool necp_uuid_app_id_mappings_dirty
;
372 #define NECP_UUID_APP_ID_HASH_SIZE 64
373 static u_long necp_uuid_app_id_hash_mask
;
374 static u_long necp_uuid_app_id_hash_num_buckets
;
375 static LIST_HEAD(necp_uuid_id_mapping_head
, necp_uuid_id_mapping
) *necp_uuid_app_id_hashtbl
, necp_uuid_service_id_list
; // App map is real hash table, service map is just mapping
376 #define APPUUIDHASH(uuid) (&necp_uuid_app_id_hashtbl[uuid[0] & necp_uuid_app_id_hash_mask]) // Assume first byte of UUIDs are evenly distributed
377 static u_int32_t
necp_create_uuid_app_id_mapping(uuid_t uuid
, bool *allocated_mapping
, bool uuid_policy_table
);
378 static bool necp_remove_uuid_app_id_mapping(uuid_t uuid
, bool *removed_mapping
, bool uuid_policy_table
);
379 static struct necp_uuid_id_mapping
*necp_uuid_lookup_uuid_with_app_id_locked(u_int32_t local_id
);
381 static struct necp_uuid_id_mapping
*necp_uuid_lookup_service_id_locked(uuid_t uuid
);
382 static struct necp_uuid_id_mapping
*necp_uuid_lookup_uuid_with_service_id_locked(u_int32_t local_id
);
383 static u_int32_t
necp_create_uuid_service_id_mapping(uuid_t uuid
);
384 static bool necp_remove_uuid_service_id_mapping(uuid_t uuid
);
386 struct necp_string_id_mapping
{
387 LIST_ENTRY(necp_string_id_mapping
) chain
;
392 static LIST_HEAD(necp_string_id_mapping_list
, necp_string_id_mapping
) necp_account_id_list
;
393 static u_int32_t
necp_create_string_to_id_mapping(struct necp_string_id_mapping_list
*list
, char *domain
);
394 static bool necp_remove_string_to_id_mapping(struct necp_string_id_mapping_list
*list
, char *domain
);
395 static struct necp_string_id_mapping
*necp_lookup_string_with_id_locked(struct necp_string_id_mapping_list
*list
, u_int32_t local_id
);
397 static struct necp_kernel_socket_policy
*necp_kernel_socket_policy_find(necp_kernel_policy_id policy_id
);
398 static struct necp_kernel_ip_output_policy
*necp_kernel_ip_output_policy_find(necp_kernel_policy_id policy_id
);
400 static LIST_HEAD(_necp_kernel_service_list
, necp_service_registration
) necp_registered_service_list
;
402 static char *necp_create_trimmed_domain(char *string
, size_t length
);
403 static inline int necp_count_dots(char *string
, size_t length
);
405 static char *necp_copy_string(char *string
, size_t length
);
406 static bool necp_update_qos_marking(struct ifnet
*ifp
, u_int32_t route_rule_id
);
408 #define ROUTE_RULE_IS_AGGREGATE(ruleid) (ruleid > UINT16_MAX)
410 #define MAX_ROUTE_RULE_INTERFACES 10
411 struct necp_route_rule
{
412 LIST_ENTRY(necp_route_rule
) chain
;
414 u_int32_t default_action
;
415 u_int8_t cellular_action
;
416 u_int8_t wifi_action
;
417 u_int8_t wired_action
;
418 u_int8_t expensive_action
;
419 u_int exception_if_indices
[MAX_ROUTE_RULE_INTERFACES
];
420 u_int8_t exception_if_actions
[MAX_ROUTE_RULE_INTERFACES
];
423 static LIST_HEAD(necp_route_rule_list
, necp_route_rule
) necp_route_rules
;
424 static u_int32_t
necp_create_route_rule(struct necp_route_rule_list
*list
, u_int8_t
*route_rules_array
, u_int32_t route_rules_array_size
);
425 static bool necp_remove_route_rule(struct necp_route_rule_list
*list
, u_int32_t route_rule_id
);
426 static bool necp_route_is_allowed(struct rtentry
*route
, ifnet_t interface
, u_int32_t route_rule_id
, u_int32_t
*interface_type_denied
);
427 static struct necp_route_rule
*necp_lookup_route_rule_locked(struct necp_route_rule_list
*list
, u_int32_t route_rule_id
);
428 static inline void necp_get_parent_cred_result(proc_t proc
, struct necp_socket_info
*info
);
430 #define MAX_AGGREGATE_ROUTE_RULES 16
431 struct necp_aggregate_route_rule
{
432 LIST_ENTRY(necp_aggregate_route_rule
) chain
;
434 u_int32_t rule_ids
[MAX_AGGREGATE_ROUTE_RULES
];
436 static LIST_HEAD(necp_aggregate_route_rule_list
, necp_aggregate_route_rule
) necp_aggregate_route_rules
;
437 static u_int32_t
necp_create_aggregate_route_rule(u_int32_t
*rule_ids
);
439 // Sysctl definitions
440 static int sysctl_handle_necp_level SYSCTL_HANDLER_ARGS
;
442 SYSCTL_NODE(_net
, OID_AUTO
, necp
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "NECP");
443 SYSCTL_INT(_net_necp
, NECPCTL_PASS_LOOPBACK
, pass_loopback
, CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_pass_loopback
, 0, "");
444 SYSCTL_INT(_net_necp
, NECPCTL_PASS_KEEPALIVES
, pass_keepalives
, CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_pass_keepalives
, 0, "");
445 SYSCTL_INT(_net_necp
, NECPCTL_DEBUG
, debug
, CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_debug
, 0, "");
446 SYSCTL_PROC(_net_necp
, NECPCTL_DROP_ALL_LEVEL
, drop_all_level
, CTLTYPE_INT
| CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_drop_all_level
, 0, &sysctl_handle_necp_level
, "IU", "");
447 SYSCTL_LONG(_net_necp
, NECPCTL_SOCKET_POLICY_COUNT
, socket_policy_count
, CTLFLAG_LOCKED
| CTLFLAG_RD
, &necp_kernel_socket_policies_count
, "");
448 SYSCTL_LONG(_net_necp
, NECPCTL_SOCKET_NON_APP_POLICY_COUNT
, socket_non_app_policy_count
, CTLFLAG_LOCKED
| CTLFLAG_RD
, &necp_kernel_socket_policies_non_app_count
, "");
449 SYSCTL_LONG(_net_necp
, NECPCTL_IP_POLICY_COUNT
, ip_policy_count
, CTLFLAG_LOCKED
| CTLFLAG_RD
, &necp_kernel_ip_output_policies_count
, "");
450 SYSCTL_INT(_net_necp
, NECPCTL_SESSION_COUNT
, session_count
, CTLFLAG_LOCKED
| CTLFLAG_RD
, &necp_session_count
, 0, "");
452 // Session order allocation
454 necp_allocate_new_session_order(u_int32_t priority
, u_int32_t control_unit
)
456 u_int32_t new_order
= 0;
458 // For now, just allocate 1000 orders for each priority
459 if (priority
== NECP_SESSION_PRIORITY_UNKNOWN
|| priority
> NECP_SESSION_NUM_PRIORITIES
) {
460 priority
= NECP_SESSION_PRIORITY_DEFAULT
;
463 // Use the control unit to decide the offset into the priority list
464 new_order
= (control_unit
) + ((priority
- 1) * 1000);
469 static inline u_int32_t
470 necp_get_first_order_for_priority(u_int32_t priority
)
472 return (((priority
- 1) * 1000) + 1);
477 sysctl_handle_necp_level SYSCTL_HANDLER_ARGS
479 #pragma unused(arg1, arg2)
480 int error
= sysctl_handle_int(oidp
, oidp
->oid_arg1
, oidp
->oid_arg2
, req
);
481 if (necp_drop_all_level
== 0) {
482 necp_drop_all_order
= 0;
484 necp_drop_all_order
= necp_get_first_order_for_priority(necp_drop_all_level
);
491 static int noop_read(struct fileproc
*, struct uio
*, int, vfs_context_t
);
492 static int noop_write(struct fileproc
*, struct uio
*, int, vfs_context_t
);
493 static int noop_ioctl(struct fileproc
*, unsigned long, caddr_t
,
495 static int noop_select(struct fileproc
*, int, void *, vfs_context_t
);
496 static int necp_session_op_close(struct fileglob
*, vfs_context_t
);
497 static int noop_kqfilter(struct fileproc
*, struct knote
*,
498 struct kevent_internal_s
*, vfs_context_t
);
500 static const struct fileops necp_session_fd_ops
= {
501 .fo_type
= DTYPE_NETPOLICY
,
502 .fo_read
= noop_read
,
503 .fo_write
= noop_write
,
504 .fo_ioctl
= noop_ioctl
,
505 .fo_select
= noop_select
,
506 .fo_close
= necp_session_op_close
,
507 .fo_kqfilter
= noop_kqfilter
,
512 noop_read(struct fileproc
*fp
, struct uio
*uio
, int flags
, vfs_context_t ctx
)
514 #pragma unused(fp, uio, flags, ctx)
519 noop_write(struct fileproc
*fp
, struct uio
*uio
, int flags
,
522 #pragma unused(fp, uio, flags, ctx)
527 noop_ioctl(struct fileproc
*fp
, unsigned long com
, caddr_t data
,
530 #pragma unused(fp, com, data, ctx)
535 noop_select(struct fileproc
*fp
, int which
, void *wql
, vfs_context_t ctx
)
537 #pragma unused(fp, which, wql, ctx)
542 noop_kqfilter(struct fileproc
*fp
, struct knote
*kn
,
543 struct kevent_internal_s
*kev
, vfs_context_t ctx
)
545 #pragma unused(fp, kn, kev, ctx)
550 necp_session_open(struct proc
*p
, struct necp_session_open_args
*uap
, int *retval
)
554 struct necp_session
*session
= NULL
;
555 struct fileproc
*fp
= NULL
;
558 uid_t uid
= kauth_cred_getuid(proc_ucred(p
));
559 if (uid
!= 0 && priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES
, 0) != 0) {
560 NECPLOG0(LOG_ERR
, "Process does not hold necessary entitlement to open NECP session");
565 error
= falloc(p
, &fp
, &fd
, vfs_context_current());
570 session
= necp_create_session();
571 if (session
== NULL
) {
576 fp
->f_fglob
->fg_flag
= 0;
577 fp
->f_fglob
->fg_ops
= &necp_session_fd_ops
;
578 fp
->f_fglob
->fg_data
= session
;
581 FDFLAGS_SET(p
, fd
, (UF_EXCLOSE
| UF_FORKCLOSE
));
582 procfdtbl_releasefd(p
, fd
, NULL
);
583 fp_drop(p
, fd
, fp
, 1);
599 necp_session_op_close(struct fileglob
*fg
, vfs_context_t ctx
)
602 struct necp_session
*session
= (struct necp_session
*)fg
->fg_data
;
605 if (session
!= NULL
) {
606 necp_policy_mark_all_for_deletion(session
);
607 necp_policy_apply_all(session
);
608 necp_delete_session(session
);
616 necp_session_find_from_fd(int fd
, struct necp_session
**session
)
618 proc_t p
= current_proc();
619 struct fileproc
*fp
= NULL
;
623 if ((error
= fp_lookup(p
, fd
, &fp
, 1)) != 0) {
626 if (fp
->f_fglob
->fg_ops
->fo_type
!= DTYPE_NETPOLICY
) {
627 fp_drop(p
, fd
, fp
, 1);
631 *session
= (struct necp_session
*)fp
->f_fglob
->fg_data
;
639 necp_session_add_policy(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
642 u_int8_t
*tlv_buffer
= NULL
;
644 if (uap
->in_buffer_length
== 0 || uap
->in_buffer_length
> NECP_MAX_POLICY_SIZE
|| uap
->in_buffer
== 0) {
645 NECPLOG(LOG_ERR
, "necp_session_add_policy invalid input (%zu)", uap
->in_buffer_length
);
650 if (uap
->out_buffer_length
< sizeof(necp_policy_id
) || uap
->out_buffer
== 0) {
651 NECPLOG(LOG_ERR
, "necp_session_add_policy invalid output buffer (%zu)", uap
->out_buffer_length
);
656 if ((tlv_buffer
= _MALLOC(uap
->in_buffer_length
, M_NECP
, M_WAITOK
| M_ZERO
)) == NULL
) {
661 error
= copyin(uap
->in_buffer
, tlv_buffer
, uap
->in_buffer_length
);
663 NECPLOG(LOG_ERR
, "necp_session_add_policy tlv copyin error (%d)", error
);
667 necp_policy_id new_policy_id
= necp_handle_policy_add(session
, 0, NULL
, tlv_buffer
, uap
->in_buffer_length
, 0, &error
);
669 NECPLOG(LOG_ERR
, "necp_session_add_policy failed to add policy (%d)", error
);
673 error
= copyout(&new_policy_id
, uap
->out_buffer
, sizeof(new_policy_id
));
675 NECPLOG(LOG_ERR
, "necp_session_add_policy policy_id copyout error (%d)", error
);
680 if (tlv_buffer
!= NULL
) {
681 FREE(tlv_buffer
, M_NECP
);
690 necp_session_get_policy(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
693 u_int8_t
*response
= NULL
;
695 if (uap
->in_buffer_length
< sizeof(necp_policy_id
) || uap
->in_buffer
== 0) {
696 NECPLOG(LOG_ERR
, "necp_session_get_policy invalid input (%zu)", uap
->in_buffer_length
);
701 necp_policy_id policy_id
= 0;
702 error
= copyin(uap
->in_buffer
, &policy_id
, sizeof(policy_id
));
704 NECPLOG(LOG_ERR
, "necp_session_get_policy policy_id copyin error (%d)", error
);
708 struct necp_session_policy
*policy
= necp_policy_find(session
, policy_id
);
709 if (policy
== NULL
|| policy
->pending_deletion
) {
710 NECPLOG(LOG_ERR
, "Failed to find policy with id %d", policy_id
);
715 u_int32_t order_tlv_size
= sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(necp_policy_order
);
716 u_int32_t result_tlv_size
= (policy
->result_size
? (sizeof(u_int8_t
) + sizeof(u_int32_t
) + policy
->result_size
) : 0);
717 u_int32_t response_size
= order_tlv_size
+ result_tlv_size
+ policy
->conditions_size
;
719 if (uap
->out_buffer_length
< response_size
|| uap
->out_buffer
== 0) {
720 NECPLOG(LOG_ERR
, "necp_session_get_policy buffer not large enough (%u < %u)", uap
->out_buffer_length
, response_size
);
725 if (response_size
> NECP_MAX_POLICY_SIZE
) {
726 NECPLOG(LOG_ERR
, "necp_session_get_policy size too large to copy (%u)", response_size
);
731 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
| M_ZERO
);
732 if (response
== NULL
) {
737 u_int8_t
*cursor
= response
;
738 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ORDER
, sizeof(necp_policy_order
), &policy
->order
, response
, response_size
);
739 if (result_tlv_size
) {
740 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_RESULT
, policy
->result_size
, &policy
->result
, response
, response_size
);
742 if (policy
->conditions_size
) {
743 memcpy(((u_int8_t
*)(void *)(cursor
)), policy
->conditions
, policy
->conditions_size
);
746 error
= copyout(response
, uap
->out_buffer
, response_size
);
748 NECPLOG(LOG_ERR
, "necp_session_get_policy TLV copyout error (%d)", error
);
753 if (response
!= NULL
) {
754 FREE(response
, M_NECP
);
763 necp_session_delete_policy(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
767 if (uap
->in_buffer_length
< sizeof(necp_policy_id
) || uap
->in_buffer
== 0) {
768 NECPLOG(LOG_ERR
, "necp_session_delete_policy invalid input (%zu)", uap
->in_buffer_length
);
773 necp_policy_id delete_policy_id
= 0;
774 error
= copyin(uap
->in_buffer
, &delete_policy_id
, sizeof(delete_policy_id
));
776 NECPLOG(LOG_ERR
, "necp_session_delete_policy policy_id copyin error (%d)", error
);
780 struct necp_session_policy
*policy
= necp_policy_find(session
, delete_policy_id
);
781 if (policy
== NULL
|| policy
->pending_deletion
) {
782 NECPLOG(LOG_ERR
, "necp_session_delete_policy failed to find policy with id %u", delete_policy_id
);
787 necp_policy_mark_for_deletion(session
, policy
);
794 necp_session_apply_all(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
797 necp_policy_apply_all(session
);
803 necp_session_list_all(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
805 u_int32_t tlv_size
= (sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(necp_policy_id
));
806 u_int32_t response_size
= 0;
807 u_int8_t
*response
= NULL
;
808 int num_policies
= 0;
809 int cur_policy_index
= 0;
811 struct necp_session_policy
*policy
;
813 LIST_FOREACH(policy
, &session
->policies
, chain
) {
814 if (!policy
->pending_deletion
) {
819 if (num_policies
> NECP_MAX_POLICY_LIST_COUNT
) {
820 NECPLOG(LOG_ERR
, "necp_session_list_all size too large to copy (%u policies)", num_policies
);
825 response_size
= num_policies
* tlv_size
;
826 if (uap
->out_buffer_length
< response_size
|| uap
->out_buffer
== 0) {
827 NECPLOG(LOG_ERR
, "necp_session_list_all buffer not large enough (%u < %u)", uap
->out_buffer_length
, response_size
);
832 // Create a response with one Policy ID TLV for each policy
833 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
| M_ZERO
);
834 if (response
== NULL
) {
839 u_int8_t
*cursor
= response
;
840 LIST_FOREACH(policy
, &session
->policies
, chain
) {
841 if (!policy
->pending_deletion
&& cur_policy_index
< num_policies
) {
842 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ID
, sizeof(u_int32_t
), &policy
->id
, response
, response_size
);
847 error
= copyout(response
, uap
->out_buffer
, response_size
);
849 NECPLOG(LOG_ERR
, "necp_session_list_all TLV copyout error (%d)", error
);
854 if (response
!= NULL
) {
855 FREE(response
, M_NECP
);
865 necp_session_delete_all(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
868 necp_policy_mark_all_for_deletion(session
);
874 necp_session_set_session_priority(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
877 struct necp_session_policy
*policy
= NULL
;
878 struct necp_session_policy
*temp_policy
= NULL
;
880 if (uap
->in_buffer_length
< sizeof(necp_session_priority
) || uap
->in_buffer
== 0) {
881 NECPLOG(LOG_ERR
, "necp_session_set_session_priority invalid input (%zu)", uap
->in_buffer_length
);
886 necp_session_priority requested_session_priority
= 0;
887 error
= copyin(uap
->in_buffer
, &requested_session_priority
, sizeof(requested_session_priority
));
889 NECPLOG(LOG_ERR
, "necp_session_set_session_priority priority copyin error (%d)", error
);
893 // Enforce special session priorities with entitlements
894 if (requested_session_priority
== NECP_SESSION_PRIORITY_CONTROL
||
895 requested_session_priority
== NECP_SESSION_PRIORITY_PRIVILEGED_TUNNEL
) {
896 errno_t cred_result
= priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES
, 0);
897 if (cred_result
!= 0) {
898 NECPLOG(LOG_ERR
, "Session does not hold necessary entitlement to claim priority level %d", requested_session_priority
);
904 if (session
->session_priority
!= requested_session_priority
) {
905 session
->session_priority
= requested_session_priority
;
906 session
->session_order
= necp_allocate_new_session_order(session
->session_priority
, session
->control_unit
);
907 session
->dirty
= TRUE
;
909 // Mark all policies as needing updates
910 LIST_FOREACH_SAFE(policy
, &session
->policies
, chain
, temp_policy
) {
911 policy
->pending_update
= TRUE
;
921 necp_session_lock_to_process(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
924 session
->proc_locked
= TRUE
;
930 necp_session_register_service(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
933 struct necp_service_registration
*new_service
= NULL
;
935 if (uap
->in_buffer_length
< sizeof(uuid_t
) || uap
->in_buffer
== 0) {
936 NECPLOG(LOG_ERR
, "necp_session_register_service invalid input (%zu)", uap
->in_buffer_length
);
942 error
= copyin(uap
->in_buffer
, service_uuid
, sizeof(service_uuid
));
944 NECPLOG(LOG_ERR
, "necp_session_register_service uuid copyin error (%d)", error
);
948 MALLOC(new_service
, struct necp_service_registration
*, sizeof(*new_service
), M_NECP
, M_WAITOK
| M_ZERO
);
949 if (new_service
== NULL
) {
950 NECPLOG0(LOG_ERR
, "Failed to allocate service registration");
955 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
956 new_service
->service_id
= necp_create_uuid_service_id_mapping(service_uuid
);
957 LIST_INSERT_HEAD(&session
->services
, new_service
, session_chain
);
958 LIST_INSERT_HEAD(&necp_registered_service_list
, new_service
, kernel_chain
);
959 lck_rw_done(&necp_kernel_policy_lock
);
967 necp_session_unregister_service(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
970 struct necp_service_registration
*service
= NULL
;
971 struct necp_service_registration
*temp_service
= NULL
;
972 struct necp_uuid_id_mapping
*mapping
= NULL
;
974 if (uap
->in_buffer_length
< sizeof(uuid_t
) || uap
->in_buffer
== 0) {
975 NECPLOG(LOG_ERR
, "necp_session_unregister_service invalid input (%zu)", uap
->in_buffer_length
);
981 error
= copyin(uap
->in_buffer
, service_uuid
, sizeof(service_uuid
));
983 NECPLOG(LOG_ERR
, "necp_session_unregister_service uuid copyin error (%d)", error
);
987 // Remove all matching services for this session
988 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
989 mapping
= necp_uuid_lookup_service_id_locked(service_uuid
);
990 if (mapping
!= NULL
) {
991 LIST_FOREACH_SAFE(service
, &session
->services
, session_chain
, temp_service
) {
992 if (service
->service_id
== mapping
->id
) {
993 LIST_REMOVE(service
, session_chain
);
994 LIST_REMOVE(service
, kernel_chain
);
995 FREE(service
, M_NECP
);
998 necp_remove_uuid_service_id_mapping(service_uuid
);
1000 lck_rw_done(&necp_kernel_policy_lock
);
1008 necp_session_dump_all(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
1012 if (uap
->out_buffer_length
== 0 || uap
->out_buffer
== 0) {
1013 NECPLOG(LOG_ERR
, "necp_session_dump_all invalid output buffer (%zu)", uap
->out_buffer_length
);
1018 error
= necp_handle_policy_dump_all(session
, 0, NULL
, uap
->out_buffer
, uap
->out_buffer_length
, 0);
1025 necp_session_action(struct proc
*p
, struct necp_session_action_args
*uap
, int *retval
)
1029 int return_value
= 0;
1030 struct necp_session
*session
= NULL
;
1031 error
= necp_session_find_from_fd(uap
->necp_fd
, &session
);
1033 NECPLOG(LOG_ERR
, "necp_session_action find fd error (%d)", error
);
1037 NECP_SESSION_LOCK(session
);
1039 if (session
->proc_locked
) {
1040 // Verify that the calling process is allowed to do actions
1042 proc_getexecutableuuid(current_proc(), proc_uuid
, sizeof(proc_uuid
));
1043 if (uuid_compare(proc_uuid
, session
->proc_uuid
) != 0) {
1048 // If not locked, update the proc_uuid and proc_pid of the session
1049 proc_getexecutableuuid(current_proc(), session
->proc_uuid
, sizeof(session
->proc_uuid
));
1050 session
->proc_pid
= proc_pid(current_proc());
1053 u_int32_t action
= uap
->action
;
1055 case NECP_SESSION_ACTION_POLICY_ADD
: {
1056 return_value
= necp_session_add_policy(session
, uap
, retval
);
1059 case NECP_SESSION_ACTION_POLICY_GET
: {
1060 return_value
= necp_session_get_policy(session
, uap
, retval
);
1063 case NECP_SESSION_ACTION_POLICY_DELETE
: {
1064 return_value
= necp_session_delete_policy(session
, uap
, retval
);
1067 case NECP_SESSION_ACTION_POLICY_APPLY_ALL
: {
1068 return_value
= necp_session_apply_all(session
, uap
, retval
);
1071 case NECP_SESSION_ACTION_POLICY_LIST_ALL
: {
1072 return_value
= necp_session_list_all(session
, uap
, retval
);
1075 case NECP_SESSION_ACTION_POLICY_DELETE_ALL
: {
1076 return_value
= necp_session_delete_all(session
, uap
, retval
);
1079 case NECP_SESSION_ACTION_SET_SESSION_PRIORITY
: {
1080 return_value
= necp_session_set_session_priority(session
, uap
, retval
);
1083 case NECP_SESSION_ACTION_LOCK_SESSION_TO_PROC
: {
1084 return_value
= necp_session_lock_to_process(session
, uap
, retval
);
1087 case NECP_SESSION_ACTION_REGISTER_SERVICE
: {
1088 return_value
= necp_session_register_service(session
, uap
, retval
);
1091 case NECP_SESSION_ACTION_UNREGISTER_SERVICE
: {
1092 return_value
= necp_session_unregister_service(session
, uap
, retval
);
1095 case NECP_SESSION_ACTION_POLICY_DUMP_ALL
: {
1096 return_value
= necp_session_dump_all(session
, uap
, retval
);
1100 NECPLOG(LOG_ERR
, "necp_session_action unknown action (%u)", action
);
1101 return_value
= EINVAL
;
1107 NECP_SESSION_UNLOCK(session
);
1108 file_drop(uap
->necp_fd
);
1110 return (return_value
);
1113 // Kernel Control functions
1114 static errno_t
necp_register_control(void);
1115 static errno_t
necp_ctl_connect(kern_ctl_ref kctlref
, struct sockaddr_ctl
*sac
, void **unitinfo
);
1116 static errno_t
necp_ctl_disconnect(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
);
1117 static errno_t
necp_ctl_send(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, mbuf_t m
, int flags
);
1118 static void necp_ctl_rcvd(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, int flags
);
1119 static errno_t
necp_ctl_getopt(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, int opt
, void *data
, size_t *len
);
1120 static errno_t
necp_ctl_setopt(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, int opt
, void *data
, size_t len
);
1122 static bool necp_send_ctl_data(struct necp_session
*session
, u_int8_t
*buffer
, size_t buffer_size
);
1129 result
= necp_register_control();
1134 necp_kernel_policy_grp_attr
= lck_grp_attr_alloc_init();
1135 if (necp_kernel_policy_grp_attr
== NULL
) {
1136 NECPLOG0(LOG_ERR
, "lck_grp_attr_alloc_init failed");
1141 necp_kernel_policy_mtx_grp
= lck_grp_alloc_init(NECP_CONTROL_NAME
, necp_kernel_policy_grp_attr
);
1142 if (necp_kernel_policy_mtx_grp
== NULL
) {
1143 NECPLOG0(LOG_ERR
, "lck_grp_alloc_init failed");
1148 necp_kernel_policy_mtx_attr
= lck_attr_alloc_init();
1149 if (necp_kernel_policy_mtx_attr
== NULL
) {
1150 NECPLOG0(LOG_ERR
, "lck_attr_alloc_init failed");
1155 lck_rw_init(&necp_kernel_policy_lock
, necp_kernel_policy_mtx_grp
, necp_kernel_policy_mtx_attr
);
1157 necp_route_rule_grp_attr
= lck_grp_attr_alloc_init();
1158 if (necp_route_rule_grp_attr
== NULL
) {
1159 NECPLOG0(LOG_ERR
, "lck_grp_attr_alloc_init failed");
1164 necp_route_rule_mtx_grp
= lck_grp_alloc_init("necp_route_rule", necp_route_rule_grp_attr
);
1165 if (necp_route_rule_mtx_grp
== NULL
) {
1166 NECPLOG0(LOG_ERR
, "lck_grp_alloc_init failed");
1171 necp_route_rule_mtx_attr
= lck_attr_alloc_init();
1172 if (necp_route_rule_mtx_attr
== NULL
) {
1173 NECPLOG0(LOG_ERR
, "lck_attr_alloc_init failed");
1178 lck_rw_init(&necp_route_rule_lock
, necp_route_rule_mtx_grp
, necp_route_rule_mtx_attr
);
1182 TAILQ_INIT(&necp_session_list
);
1184 LIST_INIT(&necp_kernel_socket_policies
);
1185 LIST_INIT(&necp_kernel_ip_output_policies
);
1187 LIST_INIT(&necp_account_id_list
);
1189 LIST_INIT(&necp_uuid_service_id_list
);
1191 LIST_INIT(&necp_registered_service_list
);
1193 LIST_INIT(&necp_route_rules
);
1194 LIST_INIT(&necp_aggregate_route_rules
);
1196 necp_uuid_app_id_hashtbl
= hashinit(NECP_UUID_APP_ID_HASH_SIZE
, M_NECP
, &necp_uuid_app_id_hash_mask
);
1197 necp_uuid_app_id_hash_num_buckets
= necp_uuid_app_id_hash_mask
+ 1;
1198 necp_num_uuid_app_id_mappings
= 0;
1199 necp_uuid_app_id_mappings_dirty
= FALSE
;
1201 necp_kernel_application_policies_condition_mask
= 0;
1202 necp_kernel_socket_policies_condition_mask
= 0;
1203 necp_kernel_ip_output_policies_condition_mask
= 0;
1205 necp_kernel_application_policies_count
= 0;
1206 necp_kernel_socket_policies_count
= 0;
1207 necp_kernel_socket_policies_non_app_count
= 0;
1208 necp_kernel_ip_output_policies_count
= 0;
1209 necp_kernel_ip_output_policies_non_id_count
= 0;
1211 necp_last_policy_id
= 0;
1212 necp_last_kernel_policy_id
= 0;
1213 necp_last_uuid_id
= 0;
1214 necp_last_string_id
= 0;
1215 necp_last_route_rule_id
= 0;
1216 necp_last_aggregate_route_rule_id
= 0;
1218 necp_kernel_socket_policies_gencount
= 1;
1220 memset(&necp_kernel_socket_policies_map
, 0, sizeof(necp_kernel_socket_policies_map
));
1221 memset(&necp_kernel_ip_output_policies_map
, 0, sizeof(necp_kernel_ip_output_policies_map
));
1222 necp_kernel_socket_policies_app_layer_map
= NULL
;
1226 if (necp_kernel_policy_mtx_attr
!= NULL
) {
1227 lck_attr_free(necp_kernel_policy_mtx_attr
);
1228 necp_kernel_policy_mtx_attr
= NULL
;
1230 if (necp_kernel_policy_mtx_grp
!= NULL
) {
1231 lck_grp_free(necp_kernel_policy_mtx_grp
);
1232 necp_kernel_policy_mtx_grp
= NULL
;
1234 if (necp_kernel_policy_grp_attr
!= NULL
) {
1235 lck_grp_attr_free(necp_kernel_policy_grp_attr
);
1236 necp_kernel_policy_grp_attr
= NULL
;
1238 if (necp_route_rule_mtx_attr
!= NULL
) {
1239 lck_attr_free(necp_route_rule_mtx_attr
);
1240 necp_route_rule_mtx_attr
= NULL
;
1242 if (necp_route_rule_mtx_grp
!= NULL
) {
1243 lck_grp_free(necp_route_rule_mtx_grp
);
1244 necp_route_rule_mtx_grp
= NULL
;
1246 if (necp_route_rule_grp_attr
!= NULL
) {
1247 lck_grp_attr_free(necp_route_rule_grp_attr
);
1248 necp_route_rule_grp_attr
= NULL
;
1250 if (necp_kctlref
!= NULL
) {
1251 ctl_deregister(necp_kctlref
);
1252 necp_kctlref
= NULL
;
1259 necp_register_control(void)
1261 struct kern_ctl_reg kern_ctl
;
1264 // Create a tag to allocate memory
1265 necp_malloc_tag
= OSMalloc_Tagalloc(NECP_CONTROL_NAME
, OSMT_DEFAULT
);
1267 // Find a unique value for our interface family
1268 result
= mbuf_tag_id_find(NECP_CONTROL_NAME
, &necp_family
);
1270 NECPLOG(LOG_ERR
, "mbuf_tag_id_find_internal failed: %d", result
);
1274 bzero(&kern_ctl
, sizeof(kern_ctl
));
1275 strlcpy(kern_ctl
.ctl_name
, NECP_CONTROL_NAME
, sizeof(kern_ctl
.ctl_name
));
1276 kern_ctl
.ctl_name
[sizeof(kern_ctl
.ctl_name
) - 1] = 0;
1277 kern_ctl
.ctl_flags
= CTL_FLAG_PRIVILEGED
; // Require root
1278 kern_ctl
.ctl_sendsize
= 64 * 1024;
1279 kern_ctl
.ctl_recvsize
= 64 * 1024;
1280 kern_ctl
.ctl_connect
= necp_ctl_connect
;
1281 kern_ctl
.ctl_disconnect
= necp_ctl_disconnect
;
1282 kern_ctl
.ctl_send
= necp_ctl_send
;
1283 kern_ctl
.ctl_rcvd
= necp_ctl_rcvd
;
1284 kern_ctl
.ctl_setopt
= necp_ctl_setopt
;
1285 kern_ctl
.ctl_getopt
= necp_ctl_getopt
;
1287 result
= ctl_register(&kern_ctl
, &necp_kctlref
);
1289 NECPLOG(LOG_ERR
, "ctl_register failed: %d", result
);
1297 necp_post_change_event(struct kev_necp_policies_changed_data
*necp_event_data
)
1299 struct kev_msg ev_msg
;
1300 memset(&ev_msg
, 0, sizeof(ev_msg
));
1302 ev_msg
.vendor_code
= KEV_VENDOR_APPLE
;
1303 ev_msg
.kev_class
= KEV_NETWORK_CLASS
;
1304 ev_msg
.kev_subclass
= KEV_NECP_SUBCLASS
;
1305 ev_msg
.event_code
= KEV_NECP_POLICIES_CHANGED
;
1307 ev_msg
.dv
[0].data_ptr
= necp_event_data
;
1308 ev_msg
.dv
[0].data_length
= sizeof(necp_event_data
->changed_count
);
1309 ev_msg
.dv
[1].data_length
= 0;
1311 kev_post_msg(&ev_msg
);
1315 necp_ctl_connect(kern_ctl_ref kctlref
, struct sockaddr_ctl
*sac
, void **unitinfo
)
1317 #pragma unused(kctlref, sac)
1318 *unitinfo
= necp_create_session();
1319 if (*unitinfo
== NULL
) {
1320 // Could not allocate session
1328 necp_ctl_disconnect(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
)
1330 #pragma unused(kctlref, unit)
1331 struct necp_session
*session
= (struct necp_session
*)unitinfo
;
1332 if (session
!= NULL
) {
1333 necp_policy_mark_all_for_deletion(session
);
1334 necp_policy_apply_all(session
);
1335 necp_delete_session((struct necp_session
*)unitinfo
);
1344 necp_packet_find_tlv(mbuf_t packet
, int offset
, u_int8_t type
, int *err
, int next
)
1346 size_t cursor
= offset
;
1348 u_int32_t curr_length
;
1355 error
= mbuf_copydata(packet
, cursor
, sizeof(curr_type
), &curr_type
);
1362 curr_type
= NECP_TLV_NIL
;
1365 if (curr_type
!= type
) {
1366 cursor
+= sizeof(curr_type
);
1367 error
= mbuf_copydata(packet
, cursor
, sizeof(curr_length
), &curr_length
);
1372 cursor
+= (sizeof(curr_length
) + curr_length
);
1374 } while (curr_type
!= type
);
1380 necp_packet_get_tlv_at_offset(mbuf_t packet
, int tlv_offset
, u_int32_t buff_len
, void *buff
, u_int32_t
*value_size
)
1385 if (tlv_offset
< 0) {
1389 error
= mbuf_copydata(packet
, tlv_offset
+ sizeof(u_int8_t
), sizeof(length
), &length
);
1394 u_int32_t total_len
= m_length2(packet
, NULL
);
1395 if (total_len
< (tlv_offset
+ sizeof(u_int8_t
) + sizeof(length
) + length
)) {
1396 NECPLOG(LOG_ERR
, "Got a bad TLV, length (%u) + offset (%d) < total length (%u)",
1397 length
, (tlv_offset
+ sizeof(u_int8_t
) + sizeof(length
)), total_len
);
1401 if (value_size
!= NULL
) {
1402 *value_size
= length
;
1405 if (buff
!= NULL
&& buff_len
> 0) {
1406 u_int32_t to_copy
= (length
< buff_len
) ? length
: buff_len
;
1407 error
= mbuf_copydata(packet
, tlv_offset
+ sizeof(u_int8_t
) + sizeof(length
), to_copy
, buff
);
1417 necp_buffer_write_packet_header(u_int8_t
*buffer
, u_int8_t packet_type
, u_int8_t flags
, u_int32_t message_id
)
1419 ((struct necp_packet_header
*)(void *)buffer
)->packet_type
= packet_type
;
1420 ((struct necp_packet_header
*)(void *)buffer
)->flags
= flags
;
1421 ((struct necp_packet_header
*)(void *)buffer
)->message_id
= message_id
;
1422 return (buffer
+ sizeof(struct necp_packet_header
));
1426 necp_buffer_write_tlv_validate(u_int8_t
*cursor
, u_int8_t type
, u_int32_t length
,
1427 u_int8_t
*buffer
, u_int32_t buffer_length
)
1429 if (cursor
< buffer
|| (uintptr_t)(cursor
- buffer
) > buffer_length
) {
1430 NECPLOG0(LOG_ERR
, "Cannot write TLV in buffer (invalid cursor)");
1433 u_int8_t
*next_tlv
= (u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
) + length
);
1434 if (next_tlv
<= buffer
|| // make sure the next TLV start doesn't overflow
1435 (uintptr_t)(next_tlv
- buffer
) > buffer_length
) { // make sure the next TLV has enough room in buffer
1436 NECPLOG(LOG_ERR
, "Cannot write TLV in buffer (TLV length %u, buffer length %u)",
1437 length
, buffer_length
);
1444 necp_buffer_write_tlv_if_different(u_int8_t
*cursor
, u_int8_t type
,
1445 u_int32_t length
, const void *value
, bool *updated
,
1446 u_int8_t
*buffer
, u_int32_t buffer_length
)
1448 if (!necp_buffer_write_tlv_validate(cursor
, type
, length
, buffer
, buffer_length
)) {
1451 u_int8_t
*next_tlv
= (u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
) + length
);
1452 if (*updated
|| *(u_int8_t
*)(cursor
) != type
) {
1453 *(u_int8_t
*)(cursor
) = type
;
1456 if (*updated
|| *(u_int32_t
*)(void *)(cursor
+ sizeof(type
)) != length
) {
1457 *(u_int32_t
*)(void *)(cursor
+ sizeof(type
)) = length
;
1461 if (*updated
|| memcmp((u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
)), value
, length
) != 0) {
1462 memcpy((u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
)), value
, length
);
1470 necp_buffer_write_tlv(u_int8_t
*cursor
, u_int8_t type
,
1471 u_int32_t length
, const void *value
,
1472 u_int8_t
*buffer
, u_int32_t buffer_length
)
1474 if (!necp_buffer_write_tlv_validate(cursor
, type
, length
, buffer
, buffer_length
)) {
1477 u_int8_t
*next_tlv
= (u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
) + length
);
1478 *(u_int8_t
*)(cursor
) = type
;
1479 *(u_int32_t
*)(void *)(cursor
+ sizeof(type
)) = length
;
1481 memcpy((u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
)), value
, length
);
1488 necp_buffer_get_tlv_type(u_int8_t
*buffer
, int tlv_offset
)
1490 u_int8_t
*type
= NULL
;
1492 if (buffer
== NULL
) {
1496 type
= (u_int8_t
*)((u_int8_t
*)buffer
+ tlv_offset
);
1497 return (type
? *type
: 0);
1501 necp_buffer_get_tlv_length(u_int8_t
*buffer
, int tlv_offset
)
1503 u_int32_t
*length
= NULL
;
1505 if (buffer
== NULL
) {
1509 length
= (u_int32_t
*)(void *)((u_int8_t
*)buffer
+ tlv_offset
+ sizeof(u_int8_t
));
1510 return (length
? *length
: 0);
1514 necp_buffer_get_tlv_value(u_int8_t
*buffer
, int tlv_offset
, u_int32_t
*value_size
)
1516 u_int8_t
*value
= NULL
;
1517 u_int32_t length
= necp_buffer_get_tlv_length(buffer
, tlv_offset
);
1523 *value_size
= length
;
1526 value
= (u_int8_t
*)((u_int8_t
*)buffer
+ tlv_offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
));
1531 necp_buffer_find_tlv(u_int8_t
*buffer
, u_int32_t buffer_length
, int offset
, u_int8_t type
, int next
)
1536 int cursor
= offset
;
1538 u_int32_t curr_length
;
1542 if ((((u_int32_t
)cursor
) + sizeof(curr_type
) + sizeof(curr_length
)) > buffer_length
) {
1546 curr_type
= necp_buffer_get_tlv_type(buffer
, cursor
);
1549 curr_type
= NECP_TLV_NIL
;
1551 curr_length
= necp_buffer_get_tlv_length(buffer
, cursor
);
1552 if (curr_length
> buffer_length
- ((u_int32_t
)cursor
+ sizeof(curr_type
) + sizeof(curr_length
))) {
1556 next_cursor
= (cursor
+ sizeof(curr_type
) + sizeof(curr_length
) + curr_length
);
1557 if (curr_type
== type
) {
1558 // check if entire TLV fits inside buffer
1559 if (((u_int32_t
)next_cursor
) <= buffer_length
) {
1565 cursor
= next_cursor
;
1570 necp_find_tlv(mbuf_t packet
, u_int8_t
*buffer
, u_int32_t buffer_length
, int offset
, u_int8_t type
, int *err
, int next
)
1573 if (packet
!= NULL
) {
1574 cursor
= necp_packet_find_tlv(packet
, offset
, type
, err
, next
);
1575 } else if (buffer
!= NULL
) {
1576 cursor
= necp_buffer_find_tlv(buffer
, buffer_length
, offset
, type
, next
);
1582 necp_get_tlv_at_offset(mbuf_t packet
, u_int8_t
*buffer
, u_int32_t buffer_length
,
1583 int tlv_offset
, u_int32_t out_buffer_length
, void *out_buffer
, u_int32_t
*value_size
)
1585 if (packet
!= NULL
) {
1586 // Handle mbuf parsing
1587 return necp_packet_get_tlv_at_offset(packet
, tlv_offset
, out_buffer_length
, out_buffer
, value_size
);
1590 if (buffer
== NULL
) {
1591 NECPLOG0(LOG_ERR
, "necp_get_tlv_at_offset buffer is NULL");
1595 // Handle buffer parsing
1597 // Validate that buffer has enough room for any TLV
1598 if (tlv_offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
) > buffer_length
) {
1599 NECPLOG(LOG_ERR
, "necp_get_tlv_at_offset buffer_length is too small for TLV (%u < %u)",
1600 buffer_length
, tlv_offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
));
1604 // Validate that buffer has enough room for this TLV
1605 u_int32_t tlv_length
= necp_buffer_get_tlv_length(buffer
, tlv_offset
);
1606 if (tlv_length
> buffer_length
- (tlv_offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
))) {
1607 NECPLOG(LOG_ERR
, "necp_get_tlv_at_offset buffer_length is too small for TLV of length %u (%u < %u)",
1608 tlv_length
, buffer_length
, tlv_offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
) + tlv_length
);
1612 if (out_buffer
!= NULL
&& out_buffer_length
> 0) {
1613 // Validate that out buffer is large enough for value
1614 if (out_buffer_length
< tlv_length
) {
1615 NECPLOG(LOG_ERR
, "necp_get_tlv_at_offset out_buffer_length is too small for TLV value (%u < %u)",
1616 out_buffer_length
, tlv_length
);
1620 // Get value pointer
1621 u_int8_t
*tlv_value
= necp_buffer_get_tlv_value(buffer
, tlv_offset
, NULL
);
1622 if (tlv_value
== NULL
) {
1623 NECPLOG0(LOG_ERR
, "necp_get_tlv_at_offset tlv_value is NULL");
1628 memcpy(out_buffer
, tlv_value
, tlv_length
);
1632 if (value_size
!= NULL
) {
1633 *value_size
= tlv_length
;
1640 necp_get_tlv(mbuf_t packet
, u_int8_t
*buffer
, u_int32_t buffer_length
,
1641 int offset
, u_int8_t type
, u_int32_t buff_len
, void *buff
, u_int32_t
*value_size
)
1645 int tlv_offset
= necp_find_tlv(packet
, buffer
, buffer_length
, offset
, type
, &error
, 0);
1646 if (tlv_offset
< 0) {
1650 return (necp_get_tlv_at_offset(packet
, buffer
, buffer_length
, tlv_offset
, buff_len
, buff
, value_size
));
1654 necp_send_ctl_data(struct necp_session
*session
, u_int8_t
*buffer
, size_t buffer_size
)
1658 if (necp_kctlref
== NULL
|| session
== NULL
|| buffer
== NULL
|| buffer_size
== 0) {
1662 error
= ctl_enqueuedata(necp_kctlref
, session
->control_unit
, buffer
, buffer_size
, CTL_DATA_EOR
);
1664 return (error
== 0);
1668 necp_send_success_response(struct necp_session
*session
, u_int8_t packet_type
, u_int32_t message_id
)
1670 bool success
= TRUE
;
1671 u_int8_t
*response
= NULL
;
1672 u_int8_t
*cursor
= NULL
;
1673 size_t response_size
= sizeof(struct necp_packet_header
) + sizeof(u_int8_t
) + sizeof(u_int32_t
);
1674 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
);
1675 if (response
== NULL
) {
1679 cursor
= necp_buffer_write_packet_header(cursor
, packet_type
, NECP_PACKET_FLAGS_RESPONSE
, message_id
);
1680 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_NIL
, 0, NULL
, response
, response_size
);
1682 if (!(success
= necp_send_ctl_data(session
, (u_int8_t
*)response
, response_size
))) {
1683 NECPLOG0(LOG_ERR
, "Failed to send response");
1686 FREE(response
, M_NECP
);
1691 necp_send_error_response(struct necp_session
*session
, u_int8_t packet_type
, u_int32_t message_id
, u_int32_t error
)
1693 bool success
= TRUE
;
1694 u_int8_t
*response
= NULL
;
1695 u_int8_t
*cursor
= NULL
;
1696 size_t response_size
= sizeof(struct necp_packet_header
) + sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(u_int32_t
);
1697 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
);
1698 if (response
== NULL
) {
1702 cursor
= necp_buffer_write_packet_header(cursor
, packet_type
, NECP_PACKET_FLAGS_RESPONSE
, message_id
);
1703 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_ERROR
, sizeof(error
), &error
, response
, response_size
);
1705 if (!(success
= necp_send_ctl_data(session
, (u_int8_t
*)response
, response_size
))) {
1706 NECPLOG0(LOG_ERR
, "Failed to send response");
1709 FREE(response
, M_NECP
);
1714 necp_send_policy_id_response(struct necp_session
*session
, u_int8_t packet_type
, u_int32_t message_id
, necp_policy_id policy_id
)
1716 bool success
= TRUE
;
1717 u_int8_t
*response
= NULL
;
1718 u_int8_t
*cursor
= NULL
;
1719 size_t response_size
= sizeof(struct necp_packet_header
) + sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(u_int32_t
);
1720 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
);
1721 if (response
== NULL
) {
1725 cursor
= necp_buffer_write_packet_header(cursor
, packet_type
, NECP_PACKET_FLAGS_RESPONSE
, message_id
);
1726 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ID
, sizeof(policy_id
), &policy_id
, response
, response_size
);
1728 if (!(success
= necp_send_ctl_data(session
, (u_int8_t
*)response
, response_size
))) {
1729 NECPLOG0(LOG_ERR
, "Failed to send response");
1732 FREE(response
, M_NECP
);
1737 necp_ctl_send(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, mbuf_t packet
, int flags
)
1739 #pragma unused(kctlref, unit, flags)
1740 struct necp_session
*session
= (struct necp_session
*)unitinfo
;
1741 struct necp_packet_header header
;
1744 if (session
== NULL
) {
1745 NECPLOG0(LOG_ERR
, "Got a NULL session");
1750 if (mbuf_pkthdr_len(packet
) < sizeof(header
)) {
1751 NECPLOG(LOG_ERR
, "Got a bad packet, length (%lu) < sizeof header (%lu)", mbuf_pkthdr_len(packet
), sizeof(header
));
1756 error
= mbuf_copydata(packet
, 0, sizeof(header
), &header
);
1758 NECPLOG(LOG_ERR
, "mbuf_copydata failed for the header: %d", error
);
1763 if (session
->proc_locked
) {
1764 // Verify that the calling process is allowed to send messages
1766 proc_getexecutableuuid(current_proc(), proc_uuid
, sizeof(proc_uuid
));
1767 if (uuid_compare(proc_uuid
, session
->proc_uuid
) != 0) {
1768 necp_send_error_response(session
, header
.packet_type
, header
.message_id
, NECP_ERROR_INVALID_PROCESS
);
1772 // If not locked, update the proc_uuid and proc_pid of the session
1773 proc_getexecutableuuid(current_proc(), session
->proc_uuid
, sizeof(session
->proc_uuid
));
1774 session
->proc_pid
= proc_pid(current_proc());
1777 switch (header
.packet_type
) {
1778 case NECP_PACKET_TYPE_POLICY_ADD
: {
1779 necp_handle_policy_add(session
, header
.message_id
, packet
, NULL
, 0, sizeof(header
), NULL
);
1782 case NECP_PACKET_TYPE_POLICY_GET
: {
1783 necp_handle_policy_get(session
, header
.message_id
, packet
, sizeof(header
));
1786 case NECP_PACKET_TYPE_POLICY_DELETE
: {
1787 necp_handle_policy_delete(session
, header
.message_id
, packet
, sizeof(header
));
1790 case NECP_PACKET_TYPE_POLICY_APPLY_ALL
: {
1791 necp_handle_policy_apply_all(session
, header
.message_id
, packet
, sizeof(header
));
1794 case NECP_PACKET_TYPE_POLICY_LIST_ALL
: {
1795 necp_handle_policy_list_all(session
, header
.message_id
, packet
, sizeof(header
));
1798 case NECP_PACKET_TYPE_POLICY_DELETE_ALL
: {
1799 necp_handle_policy_delete_all(session
, header
.message_id
, packet
, sizeof(header
));
1802 case NECP_PACKET_TYPE_POLICY_DUMP_ALL
: {
1803 necp_handle_policy_dump_all(session
, header
.message_id
, packet
, 0, 0, sizeof(header
));
1806 case NECP_PACKET_TYPE_SET_SESSION_PRIORITY
: {
1807 necp_handle_set_session_priority(session
, header
.message_id
, packet
, sizeof(header
));
1810 case NECP_PACKET_TYPE_LOCK_SESSION_TO_PROC
: {
1811 necp_handle_lock_session_to_proc(session
, header
.message_id
, packet
, sizeof(header
));
1814 case NECP_PACKET_TYPE_REGISTER_SERVICE
: {
1815 necp_handle_register_service(session
, header
.message_id
, packet
, sizeof(header
));
1818 case NECP_PACKET_TYPE_UNREGISTER_SERVICE
: {
1819 necp_handle_unregister_service(session
, header
.message_id
, packet
, sizeof(header
));
1823 NECPLOG(LOG_ERR
, "Received unknown message type %d", header
.packet_type
);
1824 necp_send_error_response(session
, header
.packet_type
, header
.message_id
, NECP_ERROR_UNKNOWN_PACKET_TYPE
);
1835 necp_ctl_rcvd(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, int flags
)
1837 #pragma unused(kctlref, unit, unitinfo, flags)
1842 necp_ctl_getopt(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, int opt
, void *data
, size_t *len
)
1844 #pragma unused(kctlref, unit, unitinfo, opt, data, len)
1849 necp_ctl_setopt(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, int opt
, void *data
, size_t len
)
1851 #pragma unused(kctlref, unit, unitinfo, opt, data, len)
1855 // Session Management
1857 static struct necp_session
*
1858 necp_create_session(void)
1860 struct necp_session
*new_session
= NULL
;
1862 MALLOC(new_session
, struct necp_session
*, sizeof(*new_session
), M_NECP
, M_WAITOK
| M_ZERO
);
1863 if (new_session
== NULL
) {
1867 new_session
->necp_fd_type
= necp_fd_type_session
;
1868 new_session
->session_priority
= NECP_SESSION_PRIORITY_UNKNOWN
;
1869 new_session
->dirty
= FALSE
;
1870 LIST_INIT(&new_session
->policies
);
1871 lck_mtx_init(&new_session
->lock
, necp_kernel_policy_mtx_grp
, necp_kernel_policy_mtx_attr
);
1874 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
1876 // Find the next available control unit
1877 u_int32_t control_unit
= 1;
1878 struct necp_session
*next_session
= NULL
;
1879 TAILQ_FOREACH(next_session
, &necp_session_list
, chain
) {
1880 if (next_session
->control_unit
> control_unit
) {
1881 // Found a gap, grab this control unit
1885 // Try the next control unit, loop around
1886 control_unit
= next_session
->control_unit
+ 1;
1889 new_session
->control_unit
= control_unit
;
1890 new_session
->session_order
= necp_allocate_new_session_order(new_session
->session_priority
, control_unit
);
1892 if (next_session
!= NULL
) {
1893 TAILQ_INSERT_BEFORE(next_session
, new_session
, chain
);
1895 TAILQ_INSERT_TAIL(&necp_session_list
, new_session
, chain
);
1898 necp_session_count
++;
1899 lck_rw_done(&necp_kernel_policy_lock
);
1902 NECPLOG(LOG_DEBUG
, "Created NECP session, control unit %d", control_unit
);
1906 return (new_session
);
1910 necp_delete_session(struct necp_session
*session
)
1912 if (session
!= NULL
) {
1913 struct necp_service_registration
*service
= NULL
;
1914 struct necp_service_registration
*temp_service
= NULL
;
1915 LIST_FOREACH_SAFE(service
, &session
->services
, session_chain
, temp_service
) {
1916 LIST_REMOVE(service
, session_chain
);
1917 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
1918 LIST_REMOVE(service
, kernel_chain
);
1919 lck_rw_done(&necp_kernel_policy_lock
);
1920 FREE(service
, M_NECP
);
1923 NECPLOG0(LOG_DEBUG
, "Deleted NECP session");
1926 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
1927 TAILQ_REMOVE(&necp_session_list
, session
, chain
);
1928 necp_session_count
--;
1929 lck_rw_done(&necp_kernel_policy_lock
);
1931 lck_mtx_destroy(&session
->lock
, necp_kernel_policy_mtx_grp
);
1932 FREE(session
, M_NECP
);
1936 // Session Policy Management
1938 static inline u_int8_t
1939 necp_policy_result_get_type_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
1941 return ((buffer
&& length
>= sizeof(u_int8_t
)) ? buffer
[0] : 0);
1944 static inline u_int32_t
1945 necp_policy_result_get_parameter_length_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
1947 return ((buffer
&& length
> sizeof(u_int8_t
)) ? (length
- sizeof(u_int8_t
)) : 0);
1950 static inline u_int8_t
*
1951 necp_policy_result_get_parameter_pointer_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
1953 return ((buffer
&& length
> sizeof(u_int8_t
)) ? (buffer
+ sizeof(u_int8_t
)) : NULL
);
1957 necp_policy_result_requires_route_rules(u_int8_t
*buffer
, u_int32_t length
)
1959 u_int8_t type
= necp_policy_result_get_type_from_buffer(buffer
, length
);
1960 if (type
== NECP_POLICY_RESULT_ROUTE_RULES
) {
1967 necp_address_is_valid(struct sockaddr
*address
)
1969 if (address
->sa_family
== AF_INET
) {
1970 return (address
->sa_len
== sizeof(struct sockaddr_in
));
1971 } else if (address
->sa_family
== AF_INET6
) {
1972 return (address
->sa_len
== sizeof(struct sockaddr_in6
));
1979 necp_policy_result_is_valid(u_int8_t
*buffer
, u_int32_t length
)
1981 bool validated
= FALSE
;
1982 u_int8_t type
= necp_policy_result_get_type_from_buffer(buffer
, length
);
1983 u_int32_t parameter_length
= necp_policy_result_get_parameter_length_from_buffer(buffer
, length
);
1985 case NECP_POLICY_RESULT_PASS
: {
1989 case NECP_POLICY_RESULT_SKIP
: {
1990 if (parameter_length
>= sizeof(u_int32_t
)) {
1995 case NECP_POLICY_RESULT_DROP
: {
1999 case NECP_POLICY_RESULT_SOCKET_DIVERT
: {
2000 if (parameter_length
>= sizeof(u_int32_t
)) {
2005 case NECP_POLICY_RESULT_SOCKET_SCOPED
: {
2006 if (parameter_length
> 0) {
2011 case NECP_POLICY_RESULT_IP_TUNNEL
: {
2012 if (parameter_length
> sizeof(u_int32_t
)) {
2017 case NECP_POLICY_RESULT_SOCKET_FILTER
: {
2018 if (parameter_length
>= sizeof(u_int32_t
)) {
2023 case NECP_POLICY_RESULT_ROUTE_RULES
: {
2027 case NECP_POLICY_RESULT_TRIGGER
:
2028 case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED
:
2029 case NECP_POLICY_RESULT_TRIGGER_SCOPED
:
2030 case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED
:
2031 case NECP_POLICY_RESULT_USE_NETAGENT
: {
2032 if (parameter_length
>= sizeof(uuid_t
)) {
2044 NECPLOG(LOG_DEBUG
, "Policy result type %d, valid %d", type
, validated
);
2050 static inline u_int8_t
2051 necp_policy_condition_get_type_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
2053 return ((buffer
&& length
>= sizeof(u_int8_t
)) ? buffer
[0] : 0);
2056 static inline u_int8_t
2057 necp_policy_condition_get_flags_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
2059 return ((buffer
&& length
>= (2 * sizeof(u_int8_t
))) ? buffer
[1] : 0);
2062 static inline u_int32_t
2063 necp_policy_condition_get_value_length_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
2065 return ((buffer
&& length
>= (2 * sizeof(u_int8_t
))) ? (length
- (2 * sizeof(u_int8_t
))) : 0);
2068 static inline u_int8_t
*
2069 necp_policy_condition_get_value_pointer_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
2071 return ((buffer
&& length
> (2 * sizeof(u_int8_t
))) ? (buffer
+ (2 * sizeof(u_int8_t
))) : NULL
);
2075 necp_policy_condition_is_default(u_int8_t
*buffer
, u_int32_t length
)
2077 return (necp_policy_condition_get_type_from_buffer(buffer
, length
) == NECP_POLICY_CONDITION_DEFAULT
);
2081 necp_policy_condition_is_application(u_int8_t
*buffer
, u_int32_t length
)
2083 return (necp_policy_condition_get_type_from_buffer(buffer
, length
) == NECP_POLICY_CONDITION_APPLICATION
);
2087 necp_policy_condition_is_real_application(u_int8_t
*buffer
, u_int32_t length
)
2089 return (necp_policy_condition_get_type_from_buffer(buffer
, length
) == NECP_POLICY_CONDITION_REAL_APPLICATION
);
2093 necp_policy_condition_requires_application(u_int8_t
*buffer
, u_int32_t length
)
2095 u_int8_t type
= necp_policy_condition_get_type_from_buffer(buffer
, length
);
2096 return (type
== NECP_POLICY_CONDITION_REAL_APPLICATION
);
2100 necp_policy_condition_requires_real_application(u_int8_t
*buffer
, u_int32_t length
)
2102 u_int8_t type
= necp_policy_condition_get_type_from_buffer(buffer
, length
);
2103 return (type
== NECP_POLICY_CONDITION_ENTITLEMENT
);
2107 necp_policy_condition_is_valid(u_int8_t
*buffer
, u_int32_t length
, u_int8_t policy_result_type
)
2109 bool validated
= FALSE
;
2110 bool result_cannot_have_ip_layer
= (policy_result_type
== NECP_POLICY_RESULT_SOCKET_DIVERT
||
2111 policy_result_type
== NECP_POLICY_RESULT_SOCKET_FILTER
||
2112 policy_result_type
== NECP_POLICY_RESULT_TRIGGER
||
2113 policy_result_type
== NECP_POLICY_RESULT_TRIGGER_IF_NEEDED
||
2114 policy_result_type
== NECP_POLICY_RESULT_TRIGGER_SCOPED
||
2115 policy_result_type
== NECP_POLICY_RESULT_NO_TRIGGER_SCOPED
||
2116 policy_result_type
== NECP_POLICY_RESULT_SOCKET_SCOPED
||
2117 policy_result_type
== NECP_POLICY_RESULT_ROUTE_RULES
||
2118 policy_result_type
== NECP_POLICY_RESULT_USE_NETAGENT
) ? TRUE
: FALSE
;
2119 u_int32_t condition_length
= necp_policy_condition_get_value_length_from_buffer(buffer
, length
);
2120 u_int8_t
*condition_value
= necp_policy_condition_get_value_pointer_from_buffer(buffer
, length
);
2121 u_int8_t type
= necp_policy_condition_get_type_from_buffer(buffer
, length
);
2122 u_int8_t flags
= necp_policy_condition_get_flags_from_buffer(buffer
, length
);
2124 case NECP_POLICY_CONDITION_APPLICATION
:
2125 case NECP_POLICY_CONDITION_REAL_APPLICATION
: {
2126 if (!(flags
& NECP_POLICY_CONDITION_FLAGS_NEGATIVE
) &&
2127 condition_length
>= sizeof(uuid_t
) &&
2128 condition_value
!= NULL
&&
2129 !uuid_is_null(condition_value
)) {
2134 case NECP_POLICY_CONDITION_DOMAIN
:
2135 case NECP_POLICY_CONDITION_ACCOUNT
:
2136 case NECP_POLICY_CONDITION_BOUND_INTERFACE
: {
2137 if (condition_length
> 0) {
2142 case NECP_POLICY_CONDITION_TRAFFIC_CLASS
: {
2143 if (condition_length
>= sizeof(struct necp_policy_condition_tc_range
)) {
2148 case NECP_POLICY_CONDITION_DEFAULT
:
2149 case NECP_POLICY_CONDITION_ALL_INTERFACES
:
2150 case NECP_POLICY_CONDITION_ENTITLEMENT
: {
2151 if (!(flags
& NECP_POLICY_CONDITION_FLAGS_NEGATIVE
)) {
2156 case NECP_POLICY_CONDITION_IP_PROTOCOL
: {
2157 if (condition_length
>= sizeof(u_int16_t
)) {
2162 case NECP_POLICY_CONDITION_PID
: {
2163 if (condition_length
>= sizeof(pid_t
) &&
2164 condition_value
!= NULL
&&
2165 *((pid_t
*)(void *)condition_value
) != 0) {
2170 case NECP_POLICY_CONDITION_UID
: {
2171 if (condition_length
>= sizeof(uid_t
)) {
2176 case NECP_POLICY_CONDITION_LOCAL_ADDR
:
2177 case NECP_POLICY_CONDITION_REMOTE_ADDR
: {
2178 if (!result_cannot_have_ip_layer
&& condition_length
>= sizeof(struct necp_policy_condition_addr
) &&
2179 necp_address_is_valid(&((struct necp_policy_condition_addr
*)(void *)condition_value
)->address
.sa
)) {
2184 case NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE
:
2185 case NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE
: {
2186 if (!result_cannot_have_ip_layer
&& condition_length
>= sizeof(struct necp_policy_condition_addr_range
) &&
2187 necp_address_is_valid(&((struct necp_policy_condition_addr_range
*)(void *)condition_value
)->start_address
.sa
) &&
2188 necp_address_is_valid(&((struct necp_policy_condition_addr_range
*)(void *)condition_value
)->end_address
.sa
)) {
2200 NECPLOG(LOG_DEBUG
, "Policy condition type %d, valid %d", type
, validated
);
2207 necp_policy_route_rule_is_default(u_int8_t
*buffer
, u_int32_t length
)
2209 return (necp_policy_condition_get_value_length_from_buffer(buffer
, length
) == 0 &&
2210 necp_policy_condition_get_flags_from_buffer(buffer
, length
) == 0);
2214 necp_policy_route_rule_is_valid(u_int8_t
*buffer
, u_int32_t length
)
2216 bool validated
= FALSE
;
2217 u_int8_t type
= necp_policy_condition_get_type_from_buffer(buffer
, length
);
2219 case NECP_ROUTE_RULE_ALLOW_INTERFACE
: {
2223 case NECP_ROUTE_RULE_DENY_INTERFACE
: {
2227 case NECP_ROUTE_RULE_QOS_MARKING
: {
2238 NECPLOG(LOG_DEBUG
, "Policy route rule type %d, valid %d", type
, validated
);
2245 necp_get_posix_error_for_necp_error(int response_error
)
2247 switch (response_error
) {
2248 case NECP_ERROR_UNKNOWN_PACKET_TYPE
:
2249 case NECP_ERROR_INVALID_TLV
:
2250 case NECP_ERROR_POLICY_RESULT_INVALID
:
2251 case NECP_ERROR_POLICY_CONDITIONS_INVALID
:
2252 case NECP_ERROR_ROUTE_RULES_INVALID
: {
2255 case NECP_ERROR_POLICY_ID_NOT_FOUND
: {
2258 case NECP_ERROR_INVALID_PROCESS
: {
2261 case NECP_ERROR_INTERNAL
:
2269 necp_handle_set_session_priority(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
2272 struct necp_session_policy
*policy
= NULL
;
2273 struct necp_session_policy
*temp_policy
= NULL
;
2274 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
2275 u_int32_t requested_session_priority
= NECP_SESSION_PRIORITY_UNKNOWN
;
2278 error
= necp_get_tlv(packet
, NULL
, 0, offset
, NECP_TLV_SESSION_PRIORITY
, sizeof(requested_session_priority
), &requested_session_priority
, NULL
);
2280 NECPLOG(LOG_ERR
, "Failed to get session priority: %d", error
);
2281 response_error
= NECP_ERROR_INVALID_TLV
;
2285 if (session
== NULL
) {
2286 NECPLOG0(LOG_ERR
, "Failed to find session");
2287 response_error
= NECP_ERROR_INTERNAL
;
2291 // Enforce special session priorities with entitlements
2292 if (requested_session_priority
== NECP_SESSION_PRIORITY_CONTROL
||
2293 requested_session_priority
== NECP_SESSION_PRIORITY_PRIVILEGED_TUNNEL
) {
2294 errno_t cred_result
= priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES
, 0);
2295 if (cred_result
!= 0) {
2296 NECPLOG(LOG_ERR
, "Session does not hold necessary entitlement to claim priority level %d", requested_session_priority
);
2301 if (session
->session_priority
!= requested_session_priority
) {
2302 session
->session_priority
= requested_session_priority
;
2303 session
->session_order
= necp_allocate_new_session_order(session
->session_priority
, session
->control_unit
);
2304 session
->dirty
= TRUE
;
2306 // Mark all policies as needing updates
2307 LIST_FOREACH_SAFE(policy
, &session
->policies
, chain
, temp_policy
) {
2308 policy
->pending_update
= TRUE
;
2312 necp_send_success_response(session
, NECP_PACKET_TYPE_SET_SESSION_PRIORITY
, message_id
);
2316 necp_send_error_response(session
, NECP_PACKET_TYPE_SET_SESSION_PRIORITY
, message_id
, response_error
);
2320 necp_handle_lock_session_to_proc(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
2322 #pragma unused(packet, offset)
2323 // proc_uuid already filled out
2324 session
->proc_locked
= TRUE
;
2325 necp_send_success_response(session
, NECP_PACKET_TYPE_LOCK_SESSION_TO_PROC
, message_id
);
2329 necp_handle_register_service(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
2332 struct necp_service_registration
*new_service
= NULL
;
2333 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
2334 uuid_t service_uuid
;
2335 uuid_clear(service_uuid
);
2337 if (session
== NULL
) {
2338 NECPLOG0(LOG_ERR
, "Failed to find session");
2339 response_error
= NECP_ERROR_INTERNAL
;
2343 // Enforce entitlements
2344 errno_t cred_result
= priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES
, 0);
2345 if (cred_result
!= 0) {
2346 NECPLOG0(LOG_ERR
, "Session does not hold necessary entitlement to register service");
2350 // Read service uuid
2351 error
= necp_get_tlv(packet
, NULL
, 0, offset
, NECP_TLV_SERVICE_UUID
, sizeof(uuid_t
), service_uuid
, NULL
);
2353 NECPLOG(LOG_ERR
, "Failed to get service UUID: %d", error
);
2354 response_error
= NECP_ERROR_INVALID_TLV
;
2358 MALLOC(new_service
, struct necp_service_registration
*, sizeof(*new_service
), M_NECP
, M_WAITOK
);
2359 if (new_service
== NULL
) {
2360 NECPLOG0(LOG_ERR
, "Failed to allocate service registration");
2361 response_error
= NECP_ERROR_INTERNAL
;
2365 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
2366 memset(new_service
, 0, sizeof(*new_service
));
2367 new_service
->service_id
= necp_create_uuid_service_id_mapping(service_uuid
);
2368 LIST_INSERT_HEAD(&session
->services
, new_service
, session_chain
);
2369 LIST_INSERT_HEAD(&necp_registered_service_list
, new_service
, kernel_chain
);
2370 lck_rw_done(&necp_kernel_policy_lock
);
2372 necp_send_success_response(session
, NECP_PACKET_TYPE_REGISTER_SERVICE
, message_id
);
2375 necp_send_error_response(session
, NECP_PACKET_TYPE_REGISTER_SERVICE
, message_id
, response_error
);
2379 necp_handle_unregister_service(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
2382 struct necp_service_registration
*service
= NULL
;
2383 struct necp_service_registration
*temp_service
= NULL
;
2384 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
2385 struct necp_uuid_id_mapping
*mapping
= NULL
;
2386 uuid_t service_uuid
;
2387 uuid_clear(service_uuid
);
2389 if (session
== NULL
) {
2390 NECPLOG0(LOG_ERR
, "Failed to find session");
2391 response_error
= NECP_ERROR_INTERNAL
;
2395 // Read service uuid
2396 error
= necp_get_tlv(packet
, NULL
, 0, offset
, NECP_TLV_SERVICE_UUID
, sizeof(uuid_t
), service_uuid
, NULL
);
2398 NECPLOG(LOG_ERR
, "Failed to get service UUID: %d", error
);
2399 response_error
= NECP_ERROR_INVALID_TLV
;
2403 // Mark remove all matching services for this session
2404 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
2405 mapping
= necp_uuid_lookup_service_id_locked(service_uuid
);
2406 if (mapping
!= NULL
) {
2407 LIST_FOREACH_SAFE(service
, &session
->services
, session_chain
, temp_service
) {
2408 if (service
->service_id
== mapping
->id
) {
2409 LIST_REMOVE(service
, session_chain
);
2410 LIST_REMOVE(service
, kernel_chain
);
2411 FREE(service
, M_NECP
);
2414 necp_remove_uuid_service_id_mapping(service_uuid
);
2416 lck_rw_done(&necp_kernel_policy_lock
);
2418 necp_send_success_response(session
, NECP_PACKET_TYPE_UNREGISTER_SERVICE
, message_id
);
2421 necp_send_error_response(session
, NECP_PACKET_TYPE_UNREGISTER_SERVICE
, message_id
, response_error
);
2424 static necp_policy_id
2425 necp_handle_policy_add(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
,
2426 u_int8_t
*tlv_buffer
, size_t tlv_buffer_length
, int offset
, int *return_error
)
2428 bool has_default_condition
= FALSE
;
2429 bool has_non_default_condition
= FALSE
;
2430 bool has_application_condition
= FALSE
;
2431 bool has_real_application_condition
= FALSE
;
2432 bool requires_application_condition
= FALSE
;
2433 bool requires_real_application_condition
= FALSE
;
2434 u_int8_t
*conditions_array
= NULL
;
2435 u_int32_t conditions_array_size
= 0;
2436 int conditions_array_cursor
;
2438 bool has_default_route_rule
= FALSE
;
2439 u_int8_t
*route_rules_array
= NULL
;
2440 u_int32_t route_rules_array_size
= 0;
2441 int route_rules_array_cursor
;
2445 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
2447 necp_policy_order order
= 0;
2448 struct necp_session_policy
*policy
= NULL
;
2449 u_int8_t
*policy_result
= NULL
;
2450 u_int32_t policy_result_size
= 0;
2452 // Read policy order
2453 error
= necp_get_tlv(packet
, tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_POLICY_ORDER
, sizeof(order
), &order
, NULL
);
2455 NECPLOG(LOG_ERR
, "Failed to get policy order: %d", error
);
2456 response_error
= NECP_ERROR_INVALID_TLV
;
2460 // Read policy result
2461 cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_POLICY_RESULT
, &error
, 0);
2462 error
= necp_get_tlv_at_offset(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, 0, NULL
, &policy_result_size
);
2463 if (error
|| policy_result_size
== 0) {
2464 NECPLOG(LOG_ERR
, "Failed to get policy result length: %d", error
);
2465 response_error
= NECP_ERROR_INVALID_TLV
;
2468 if (policy_result_size
> NECP_MAX_POLICY_RESULT_SIZE
) {
2469 NECPLOG(LOG_ERR
, "Policy result length too large: %u", policy_result_size
);
2470 response_error
= NECP_ERROR_INVALID_TLV
;
2473 MALLOC(policy_result
, u_int8_t
*, policy_result_size
, M_NECP
, M_WAITOK
);
2474 if (policy_result
== NULL
) {
2475 NECPLOG(LOG_ERR
, "Failed to allocate a policy result buffer (size %d)", policy_result_size
);
2476 response_error
= NECP_ERROR_INTERNAL
;
2479 error
= necp_get_tlv_at_offset(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, policy_result_size
, policy_result
, NULL
);
2481 NECPLOG(LOG_ERR
, "Failed to get policy result: %d", error
);
2482 response_error
= NECP_ERROR_POLICY_RESULT_INVALID
;
2485 if (!necp_policy_result_is_valid(policy_result
, policy_result_size
)) {
2486 NECPLOG0(LOG_ERR
, "Failed to validate policy result");
2487 response_error
= NECP_ERROR_POLICY_RESULT_INVALID
;
2491 if (necp_policy_result_requires_route_rules(policy_result
, policy_result_size
)) {
2492 // Read route rules conditions
2493 for (cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_ROUTE_RULE
, &error
, 0);
2495 cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, NECP_TLV_ROUTE_RULE
, &error
, 1)) {
2496 u_int32_t route_rule_size
= 0;
2497 necp_get_tlv_at_offset(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, 0, NULL
, &route_rule_size
);
2498 if (route_rule_size
> 0) {
2499 route_rules_array_size
+= (sizeof(u_int8_t
) + sizeof(u_int32_t
) + route_rule_size
);
2503 if (route_rules_array_size
== 0) {
2504 NECPLOG0(LOG_ERR
, "Failed to get policy route rules");
2505 response_error
= NECP_ERROR_INVALID_TLV
;
2508 if (route_rules_array_size
> NECP_MAX_ROUTE_RULES_ARRAY_SIZE
) {
2509 NECPLOG(LOG_ERR
, "Route rules length too large: %u", route_rules_array_size
);
2510 response_error
= NECP_ERROR_INVALID_TLV
;
2513 MALLOC(route_rules_array
, u_int8_t
*, route_rules_array_size
, M_NECP
, M_WAITOK
);
2514 if (route_rules_array
== NULL
) {
2515 NECPLOG(LOG_ERR
, "Failed to allocate a policy route rules array (size %d)", route_rules_array_size
);
2516 response_error
= NECP_ERROR_INTERNAL
;
2520 route_rules_array_cursor
= 0;
2521 for (cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_ROUTE_RULE
, &error
, 0);
2523 cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, NECP_TLV_ROUTE_RULE
, &error
, 1)) {
2524 u_int8_t route_rule_type
= NECP_TLV_ROUTE_RULE
;
2525 u_int32_t route_rule_size
= 0;
2526 necp_get_tlv_at_offset(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, 0, NULL
, &route_rule_size
);
2527 if (route_rule_size
> 0 && route_rule_size
<= (route_rules_array_size
- route_rules_array_cursor
)) {
2529 memcpy((route_rules_array
+ route_rules_array_cursor
), &route_rule_type
, sizeof(route_rule_type
));
2530 route_rules_array_cursor
+= sizeof(route_rule_type
);
2533 memcpy((route_rules_array
+ route_rules_array_cursor
), &route_rule_size
, sizeof(route_rule_size
));
2534 route_rules_array_cursor
+= sizeof(route_rule_size
);
2537 necp_get_tlv_at_offset(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, route_rule_size
, (route_rules_array
+ route_rules_array_cursor
), NULL
);
2539 if (!necp_policy_route_rule_is_valid((route_rules_array
+ route_rules_array_cursor
), route_rule_size
)) {
2540 NECPLOG0(LOG_ERR
, "Failed to validate policy route rule");
2541 response_error
= NECP_ERROR_ROUTE_RULES_INVALID
;
2545 if (necp_policy_route_rule_is_default((route_rules_array
+ route_rules_array_cursor
), route_rule_size
)) {
2546 if (has_default_route_rule
) {
2547 NECPLOG0(LOG_ERR
, "Failed to validate route rule; contained multiple default route rules");
2548 response_error
= NECP_ERROR_ROUTE_RULES_INVALID
;
2551 has_default_route_rule
= TRUE
;
2554 route_rules_array_cursor
+= route_rule_size
;
2559 // Read policy conditions
2560 for (cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_POLICY_CONDITION
, &error
, 0);
2562 cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, NECP_TLV_POLICY_CONDITION
, &error
, 1)) {
2563 u_int32_t condition_size
= 0;
2564 necp_get_tlv_at_offset(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, 0, NULL
, &condition_size
);
2566 if (condition_size
> 0) {
2567 conditions_array_size
+= (sizeof(u_int8_t
) + sizeof(u_int32_t
) + condition_size
);
2571 if (conditions_array_size
== 0) {
2572 NECPLOG0(LOG_ERR
, "Failed to get policy conditions");
2573 response_error
= NECP_ERROR_INVALID_TLV
;
2576 if (conditions_array_size
> NECP_MAX_CONDITIONS_ARRAY_SIZE
) {
2577 NECPLOG(LOG_ERR
, "Conditions length too large: %u", conditions_array_size
);
2578 response_error
= NECP_ERROR_INVALID_TLV
;
2581 MALLOC(conditions_array
, u_int8_t
*, conditions_array_size
, M_NECP
, M_WAITOK
);
2582 if (conditions_array
== NULL
) {
2583 NECPLOG(LOG_ERR
, "Failed to allocate a policy conditions array (size %d)", conditions_array_size
);
2584 response_error
= NECP_ERROR_INTERNAL
;
2588 conditions_array_cursor
= 0;
2589 for (cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_POLICY_CONDITION
, &error
, 0);
2591 cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, NECP_TLV_POLICY_CONDITION
, &error
, 1)) {
2592 u_int8_t condition_type
= NECP_TLV_POLICY_CONDITION
;
2593 u_int32_t condition_size
= 0;
2594 necp_get_tlv_at_offset(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, 0, NULL
, &condition_size
);
2595 if (condition_size
> 0 && condition_size
<= (conditions_array_size
- conditions_array_cursor
)) {
2597 memcpy((conditions_array
+ conditions_array_cursor
), &condition_type
, sizeof(condition_type
));
2598 conditions_array_cursor
+= sizeof(condition_type
);
2601 memcpy((conditions_array
+ conditions_array_cursor
), &condition_size
, sizeof(condition_size
));
2602 conditions_array_cursor
+= sizeof(condition_size
);
2605 necp_get_tlv_at_offset(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, condition_size
, (conditions_array
+ conditions_array_cursor
), NULL
);
2606 if (!necp_policy_condition_is_valid((conditions_array
+ conditions_array_cursor
), condition_size
, necp_policy_result_get_type_from_buffer(policy_result
, policy_result_size
))) {
2607 NECPLOG0(LOG_ERR
, "Failed to validate policy condition");
2608 response_error
= NECP_ERROR_POLICY_CONDITIONS_INVALID
;
2612 if (necp_policy_condition_is_default((conditions_array
+ conditions_array_cursor
), condition_size
)) {
2613 has_default_condition
= TRUE
;
2615 has_non_default_condition
= TRUE
;
2617 if (has_default_condition
&& has_non_default_condition
) {
2618 NECPLOG0(LOG_ERR
, "Failed to validate conditions; contained default and non-default conditions");
2619 response_error
= NECP_ERROR_POLICY_CONDITIONS_INVALID
;
2623 if (necp_policy_condition_is_application((conditions_array
+ conditions_array_cursor
), condition_size
)) {
2624 has_application_condition
= TRUE
;
2627 if (necp_policy_condition_is_real_application((conditions_array
+ conditions_array_cursor
), condition_size
)) {
2628 has_real_application_condition
= TRUE
;
2631 if (necp_policy_condition_requires_application((conditions_array
+ conditions_array_cursor
), condition_size
)) {
2632 requires_application_condition
= TRUE
;
2635 if (necp_policy_condition_requires_real_application((conditions_array
+ conditions_array_cursor
), condition_size
)) {
2636 requires_real_application_condition
= TRUE
;
2639 conditions_array_cursor
+= condition_size
;
2643 if (requires_application_condition
&& !has_application_condition
) {
2644 NECPLOG0(LOG_ERR
, "Failed to validate conditions; did not contain application condition");
2645 response_error
= NECP_ERROR_POLICY_CONDITIONS_INVALID
;
2649 if (requires_real_application_condition
&& !has_real_application_condition
) {
2650 NECPLOG0(LOG_ERR
, "Failed to validate conditions; did not contain real application condition");
2651 response_error
= NECP_ERROR_POLICY_CONDITIONS_INVALID
;
2655 if ((policy
= necp_policy_create(session
, order
, conditions_array
, conditions_array_size
, route_rules_array
, route_rules_array_size
, policy_result
, policy_result_size
)) == NULL
) {
2656 response_error
= NECP_ERROR_INTERNAL
;
2660 if (packet
!= NULL
) {
2661 necp_send_policy_id_response(session
, NECP_PACKET_TYPE_POLICY_ADD
, message_id
, policy
->id
);
2663 return (policy
->id
);
2666 if (policy_result
!= NULL
) {
2667 FREE(policy_result
, M_NECP
);
2669 if (conditions_array
!= NULL
) {
2670 FREE(conditions_array
, M_NECP
);
2672 if (route_rules_array
!= NULL
) {
2673 FREE(route_rules_array
, M_NECP
);
2676 if (packet
!= NULL
) {
2677 necp_send_error_response(session
, NECP_PACKET_TYPE_POLICY_ADD
, message_id
, response_error
);
2679 if (return_error
!= NULL
) {
2680 *return_error
= necp_get_posix_error_for_necp_error(response_error
);
2686 necp_handle_policy_get(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
2688 #pragma unused(offset)
2690 u_int8_t
*response
= NULL
;
2691 u_int8_t
*cursor
= NULL
;
2692 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
2693 necp_policy_id policy_id
= 0;
2694 u_int32_t order_tlv_size
= 0;
2695 u_int32_t result_tlv_size
= 0;
2696 u_int32_t response_size
= 0;
2698 struct necp_session_policy
*policy
= NULL
;
2701 error
= necp_get_tlv(packet
, NULL
, 0, offset
, NECP_TLV_POLICY_ID
, sizeof(policy_id
), &policy_id
, NULL
);
2703 NECPLOG(LOG_ERR
, "Failed to get policy id: %d", error
);
2704 response_error
= NECP_ERROR_INVALID_TLV
;
2708 policy
= necp_policy_find(session
, policy_id
);
2709 if (policy
== NULL
|| policy
->pending_deletion
) {
2710 NECPLOG(LOG_ERR
, "Failed to find policy with id %d", policy_id
);
2711 response_error
= NECP_ERROR_POLICY_ID_NOT_FOUND
;
2715 order_tlv_size
= sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(necp_policy_order
);
2716 result_tlv_size
= (policy
->result_size
? (sizeof(u_int8_t
) + sizeof(u_int32_t
) + policy
->result_size
) : 0);
2717 response_size
= sizeof(struct necp_packet_header
) + order_tlv_size
+ result_tlv_size
+ policy
->conditions_size
;
2718 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
);
2719 if (response
== NULL
) {
2720 necp_send_error_response(session
, NECP_PACKET_TYPE_POLICY_GET
, message_id
, NECP_ERROR_INTERNAL
);
2725 cursor
= necp_buffer_write_packet_header(cursor
, NECP_PACKET_TYPE_POLICY_GET
, NECP_PACKET_FLAGS_RESPONSE
, message_id
);
2726 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ORDER
, sizeof(necp_policy_order
), &policy
->order
, response
, response_size
);
2728 if (result_tlv_size
) {
2729 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_RESULT
, policy
->result_size
, &policy
->result
, response
, response_size
);
2731 if (policy
->conditions_size
) {
2732 memcpy(((u_int8_t
*)(void *)(cursor
)), policy
->conditions
, policy
->conditions_size
);
2735 if (!necp_send_ctl_data(session
, (u_int8_t
*)response
, response_size
)) {
2736 NECPLOG0(LOG_ERR
, "Failed to send response");
2739 FREE(response
, M_NECP
);
2743 necp_send_error_response(session
, NECP_PACKET_TYPE_POLICY_GET
, message_id
, response_error
);
2747 necp_handle_policy_delete(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
2750 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
2751 necp_policy_id policy_id
= 0;
2753 struct necp_session_policy
*policy
= NULL
;
2756 error
= necp_get_tlv(packet
, NULL
, 0, offset
, NECP_TLV_POLICY_ID
, sizeof(policy_id
), &policy_id
, NULL
);
2758 NECPLOG(LOG_ERR
, "Failed to get policy id: %d", error
);
2759 response_error
= NECP_ERROR_INVALID_TLV
;
2763 policy
= necp_policy_find(session
, policy_id
);
2764 if (policy
== NULL
|| policy
->pending_deletion
) {
2765 NECPLOG(LOG_ERR
, "Failed to find policy with id %d", policy_id
);
2766 response_error
= NECP_ERROR_POLICY_ID_NOT_FOUND
;
2770 necp_policy_mark_for_deletion(session
, policy
);
2772 necp_send_success_response(session
, NECP_PACKET_TYPE_POLICY_DELETE
, message_id
);
2776 necp_send_error_response(session
, NECP_PACKET_TYPE_POLICY_DELETE
, message_id
, response_error
);
2780 necp_handle_policy_apply_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
2782 #pragma unused(packet, offset)
2783 necp_policy_apply_all(session
);
2784 necp_send_success_response(session
, NECP_PACKET_TYPE_POLICY_APPLY_ALL
, message_id
);
2788 necp_handle_policy_list_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
2790 #pragma unused(packet, offset)
2791 u_int32_t tlv_size
= (sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(u_int32_t
));
2792 u_int32_t response_size
= 0;
2793 u_int8_t
*response
= NULL
;
2794 u_int8_t
*cursor
= NULL
;
2795 int num_policies
= 0;
2796 int cur_policy_index
= 0;
2797 struct necp_session_policy
*policy
;
2799 LIST_FOREACH(policy
, &session
->policies
, chain
) {
2800 if (!policy
->pending_deletion
) {
2805 // Create a response with one Policy ID TLV for each policy
2806 response_size
= sizeof(struct necp_packet_header
) + num_policies
* tlv_size
;
2807 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
);
2808 if (response
== NULL
) {
2809 necp_send_error_response(session
, NECP_PACKET_TYPE_POLICY_LIST_ALL
, message_id
, NECP_ERROR_INTERNAL
);
2814 cursor
= necp_buffer_write_packet_header(cursor
, NECP_PACKET_TYPE_POLICY_LIST_ALL
, NECP_PACKET_FLAGS_RESPONSE
, message_id
);
2816 LIST_FOREACH(policy
, &session
->policies
, chain
) {
2817 if (!policy
->pending_deletion
&& cur_policy_index
< num_policies
) {
2818 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ID
, sizeof(u_int32_t
), &policy
->id
, response
, response_size
);
2823 if (!necp_send_ctl_data(session
, (u_int8_t
*)response
, response_size
)) {
2824 NECPLOG0(LOG_ERR
, "Failed to send response");
2827 FREE(response
, M_NECP
);
2831 necp_handle_policy_delete_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
2833 #pragma unused(packet, offset)
2834 necp_policy_mark_all_for_deletion(session
);
2835 necp_send_success_response(session
, NECP_PACKET_TYPE_POLICY_DELETE_ALL
, message_id
);
2838 static necp_policy_id
2839 necp_policy_get_new_id(void)
2841 necp_policy_id newid
= 0;
2843 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
2845 necp_last_policy_id
++;
2846 if (necp_last_policy_id
< 1) {
2847 necp_last_policy_id
= 1;
2850 newid
= necp_last_policy_id
;
2851 lck_rw_done(&necp_kernel_policy_lock
);
2854 NECPLOG0(LOG_DEBUG
, "Allocate policy id failed.\n");
2862 * For the policy dump response this is the structure:
2864 * <NECP_PACKET_HEADER>
2866 * type : NECP_TLV_POLICY_DUMP
2871 * type : NECP_TLV_POLICY_ID
2876 * type : NECP_TLV_POLICY_ORDER
2881 * type : NECP_TLV_POLICY_RESULT_STRING
2886 * type : NECP_TLV_POLICY_OWNER
2891 * type : NECP_TLV_POLICY_CONDITION
2896 * type : NECP_POLICY_CONDITION_ALL_INTERFACES
2901 * type : NECP_POLICY_CONDITION_BOUND_INTERFACES
2911 * type : NECP_TLV_POLICY_DUMP
2916 * type : NECP_TLV_POLICY_ID
2921 * type : NECP_TLV_POLICY_ORDER
2926 * type : NECP_TLV_POLICY_RESULT_STRING
2931 * type : NECP_TLV_POLICY_OWNER
2936 * type : NECP_TLV_POLICY_CONDITION
2941 * type : NECP_POLICY_CONDITION_ALL_INTERFACES
2946 * type : NECP_POLICY_CONDITION_BOUND_INTERFACES
2958 necp_handle_policy_dump_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
,
2959 user_addr_t out_buffer
, size_t out_buffer_length
, int offset
)
2961 #pragma unused(offset)
2962 struct necp_kernel_socket_policy
*policy
= NULL
;
2964 int policy_count
= 0;
2965 u_int8_t
**tlv_buffer_pointers
= NULL
;
2966 u_int32_t
*tlv_buffer_lengths
= NULL
;
2967 u_int32_t total_tlv_len
= 0;
2968 u_int8_t
*result_buf
= NULL
;
2969 u_int8_t
*result_buf_cursor
= result_buf
;
2970 char result_string
[MAX_RESULT_STRING_LEN
];
2971 char proc_name_string
[MAXCOMLEN
+ 1];
2974 bool error_occured
= false;
2975 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
2977 #define REPORT_ERROR(error) error_occured = true; \
2978 response_error = error; \
2981 #define UNLOCK_AND_REPORT_ERROR(lock, error) lck_rw_done(lock); \
2984 errno_t cred_result
= priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES
, 0);
2985 if (cred_result
!= 0) {
2986 NECPLOG0(LOG_ERR
, "Session does not hold the necessary entitlement to get Network Extension Policy information");
2987 REPORT_ERROR(NECP_ERROR_INTERNAL
);
2991 lck_rw_lock_shared(&necp_kernel_policy_lock
);
2994 NECPLOG0(LOG_DEBUG
, "Gathering policies");
2997 policy_count
= necp_kernel_application_policies_count
;
2999 MALLOC(tlv_buffer_pointers
, u_int8_t
**, sizeof(u_int8_t
*) * policy_count
, M_NECP
, M_NOWAIT
| M_ZERO
);
3000 if (tlv_buffer_pointers
== NULL
) {
3001 NECPLOG(LOG_DEBUG
, "Failed to allocate tlv_buffer_pointers (%u bytes)", sizeof(u_int8_t
*) * policy_count
);
3002 UNLOCK_AND_REPORT_ERROR(&necp_kernel_policy_lock
, NECP_ERROR_INTERNAL
);
3005 MALLOC(tlv_buffer_lengths
, u_int32_t
*, sizeof(u_int32_t
) * policy_count
, M_NECP
, M_NOWAIT
| M_ZERO
);
3006 if (tlv_buffer_lengths
== NULL
) {
3007 NECPLOG(LOG_DEBUG
, "Failed to allocate tlv_buffer_lengths (%u bytes)", sizeof(u_int32_t
) * policy_count
);
3008 UNLOCK_AND_REPORT_ERROR(&necp_kernel_policy_lock
, NECP_ERROR_INTERNAL
);
3011 for (policy_i
= 0; necp_kernel_socket_policies_app_layer_map
!= NULL
&& necp_kernel_socket_policies_app_layer_map
[policy_i
] != NULL
; policy_i
++) {
3012 policy
= necp_kernel_socket_policies_app_layer_map
[policy_i
];
3014 memset(result_string
, 0, MAX_RESULT_STRING_LEN
);
3015 memset(proc_name_string
, 0, MAXCOMLEN
+ 1);
3017 necp_get_result_description(result_string
, policy
->result
, policy
->result_parameter
);
3018 proc_name(policy
->session_pid
, proc_name_string
, MAXCOMLEN
);
3020 u_int16_t proc_name_len
= strlen(proc_name_string
) + 1;
3021 u_int16_t result_string_len
= strlen(result_string
) + 1;
3024 NECPLOG(LOG_DEBUG
, "Policy: process: %s, result: %s", proc_name_string
, result_string
);
3027 u_int32_t total_allocated_bytes
= sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(policy
->id
) + // NECP_TLV_POLICY_ID
3028 sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(policy
->order
) + // NECP_TLV_POLICY_ORDER
3029 sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(policy
->session_order
) + // NECP_TLV_POLICY_SESSION_ORDER
3030 sizeof(u_int8_t
) + sizeof(u_int32_t
) + result_string_len
+ // NECP_TLV_POLICY_RESULT_STRING
3031 sizeof(u_int8_t
) + sizeof(u_int32_t
) + proc_name_len
+ // NECP_TLV_POLICY_OWNER
3032 sizeof(u_int8_t
) + sizeof(u_int32_t
); // NECP_TLV_POLICY_CONDITION
3034 // We now traverse the condition_mask to see how much space we need to allocate
3035 u_int32_t condition_mask
= policy
->condition_mask
;
3036 u_int8_t num_conditions
= 0;
3037 struct necp_string_id_mapping
*account_id_entry
= NULL
;
3038 char if_name
[IFXNAMSIZ
];
3039 u_int32_t condition_tlv_length
= 0;
3040 memset(if_name
, 0, sizeof(if_name
));
3042 if (condition_mask
== NECP_POLICY_CONDITION_DEFAULT
) {
3045 if (condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) {
3048 if (condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
3049 snprintf(if_name
, IFXNAMSIZ
, "%s%d", ifnet_name(policy
->cond_bound_interface
), ifnet_unit(policy
->cond_bound_interface
));
3050 condition_tlv_length
+= strlen(if_name
) + 1;
3053 if (condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
3054 condition_tlv_length
+= sizeof(policy
->cond_protocol
);
3057 if (condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
3058 condition_tlv_length
+= sizeof(uuid_t
);
3061 if (condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
3062 condition_tlv_length
+= sizeof(uuid_t
);
3065 if (condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
3066 u_int32_t domain_len
= strlen(policy
->cond_domain
) + 1;
3067 condition_tlv_length
+= domain_len
;
3070 if (condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
) {
3071 account_id_entry
= necp_lookup_string_with_id_locked(&necp_account_id_list
, policy
->cond_account_id
);
3072 u_int32_t account_id_len
= 0;
3073 if (account_id_entry
) {
3074 account_id_len
= account_id_entry
->string
? strlen(account_id_entry
->string
) + 1 : 0;
3076 condition_tlv_length
+= account_id_len
;
3079 if (condition_mask
& NECP_KERNEL_CONDITION_PID
) {
3080 condition_tlv_length
+= sizeof(pid_t
);
3083 if (condition_mask
& NECP_KERNEL_CONDITION_UID
) {
3084 condition_tlv_length
+= sizeof(uid_t
);
3087 if (condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
3088 condition_tlv_length
+= sizeof(struct necp_policy_condition_tc_range
);
3091 if (condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
) {
3094 if (condition_mask
& NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
) {
3095 u_int32_t entitlement_len
= strlen(policy
->cond_custom_entitlement
) + 1;
3096 condition_tlv_length
+= entitlement_len
;
3099 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
3100 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
3101 condition_tlv_length
+= sizeof(struct necp_policy_condition_addr_range
);
3103 condition_tlv_length
+= sizeof(struct necp_policy_condition_addr
);
3107 if (condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
3108 if (condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
3109 condition_tlv_length
+= sizeof(struct necp_policy_condition_addr_range
);
3111 condition_tlv_length
+= sizeof(struct necp_policy_condition_addr
);
3117 condition_tlv_length
+= num_conditions
* (sizeof(u_int8_t
) + sizeof(u_int32_t
)); // These are for the condition TLVs. The space for "value" is already accounted for above.
3118 total_allocated_bytes
+= condition_tlv_length
;
3120 u_int8_t
*tlv_buffer
;
3121 MALLOC(tlv_buffer
, u_int8_t
*, total_allocated_bytes
, M_NECP
, M_NOWAIT
| M_ZERO
);
3122 if (tlv_buffer
== NULL
) {
3123 NECPLOG(LOG_DEBUG
, "Failed to allocate tlv_buffer (%u bytes)", total_allocated_bytes
);
3127 u_int8_t
*cursor
= tlv_buffer
;
3128 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ID
, sizeof(policy
->id
), &policy
->id
, tlv_buffer
, total_allocated_bytes
);
3129 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ORDER
, sizeof(necp_policy_order
), &policy
->order
, tlv_buffer
, total_allocated_bytes
);
3130 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_SESSION_ORDER
, sizeof(policy
->session_order
), &policy
->session_order
, tlv_buffer
, total_allocated_bytes
);
3131 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_RESULT_STRING
, result_string_len
, result_string
, tlv_buffer
, total_allocated_bytes
);
3132 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_OWNER
, proc_name_len
, proc_name_string
, tlv_buffer
, total_allocated_bytes
);
3135 u_int8_t q_cond_buf
[N_QUICK
]; // Minor optimization
3137 u_int8_t
*cond_buf
; // To be used for condition TLVs
3138 if (condition_tlv_length
<= N_QUICK
) {
3139 cond_buf
= q_cond_buf
;
3141 MALLOC(cond_buf
, u_int8_t
*, condition_tlv_length
, M_NECP
, M_NOWAIT
);
3142 if (cond_buf
== NULL
) {
3143 NECPLOG(LOG_DEBUG
, "Failed to allocate cond_buffer (%u bytes)", condition_tlv_length
);
3144 FREE(tlv_buffer
, M_NECP
);
3149 memset(cond_buf
, 0, condition_tlv_length
);
3150 u_int8_t
*cond_buf_cursor
= cond_buf
;
3151 if (condition_mask
== NECP_POLICY_CONDITION_DEFAULT
) {
3152 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_DEFAULT
, 0, "", cond_buf
, condition_tlv_length
);
3154 if (condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) {
3155 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_ALL_INTERFACES
, 0, "", cond_buf
, condition_tlv_length
);
3157 if (condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
3158 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_BOUND_INTERFACE
, strlen(if_name
) + 1,
3159 if_name
, cond_buf
, condition_tlv_length
);
3161 if (condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
3162 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_IP_PROTOCOL
, sizeof(policy
->cond_protocol
), &policy
->cond_protocol
,
3163 cond_buf
, condition_tlv_length
);
3165 if (condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
3166 struct necp_uuid_id_mapping
*entry
= necp_uuid_lookup_uuid_with_app_id_locked(policy
->cond_app_id
);
3167 if (entry
!= NULL
) {
3168 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_APPLICATION
, sizeof(entry
->uuid
), entry
->uuid
,
3169 cond_buf
, condition_tlv_length
);
3172 if (condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
3173 struct necp_uuid_id_mapping
*entry
= necp_uuid_lookup_uuid_with_app_id_locked(policy
->cond_real_app_id
);
3174 if (entry
!= NULL
) {
3175 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_REAL_APPLICATION
, sizeof(entry
->uuid
), entry
->uuid
,
3176 cond_buf
, condition_tlv_length
);
3179 if (condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
3180 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_DOMAIN
, strlen(policy
->cond_domain
) + 1, policy
->cond_domain
,
3181 cond_buf
, condition_tlv_length
);
3183 if (condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
) {
3184 if (account_id_entry
!= NULL
) {
3185 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_ACCOUNT
, strlen(account_id_entry
->string
) + 1, account_id_entry
->string
,
3186 cond_buf
, condition_tlv_length
);
3189 if (condition_mask
& NECP_KERNEL_CONDITION_PID
) {
3190 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_PID
, sizeof(policy
->cond_pid
), &policy
->cond_pid
,
3191 cond_buf
, condition_tlv_length
);
3193 if (condition_mask
& NECP_KERNEL_CONDITION_UID
) {
3194 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_UID
, sizeof(policy
->cond_uid
), &policy
->cond_uid
,
3195 cond_buf
, condition_tlv_length
);
3197 if (condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
3198 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_TRAFFIC_CLASS
, sizeof(policy
->cond_traffic_class
), &policy
->cond_traffic_class
,
3199 cond_buf
, condition_tlv_length
);
3201 if (condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
) {
3202 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_ENTITLEMENT
, 0, "",
3203 cond_buf
, condition_tlv_length
);
3205 if (condition_mask
& NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
) {
3206 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_ENTITLEMENT
, strlen(policy
->cond_custom_entitlement
) + 1, policy
->cond_custom_entitlement
,
3207 cond_buf
, condition_tlv_length
);
3209 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
3210 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
3211 struct necp_policy_condition_addr_range range
;
3212 memcpy(&range
.start_address
, &policy
->cond_local_start
, sizeof(policy
->cond_local_start
));
3213 memcpy(&range
.end_address
, &policy
->cond_local_end
, sizeof(policy
->cond_local_end
));
3214 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE
, sizeof(range
), &range
,
3215 cond_buf
, condition_tlv_length
);
3217 struct necp_policy_condition_addr addr
;
3218 addr
.prefix
= policy
->cond_local_prefix
;
3219 memcpy(&addr
.address
, &policy
->cond_local_start
, sizeof(policy
->cond_local_start
));
3220 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_LOCAL_ADDR
, sizeof(addr
), &addr
,
3221 cond_buf
, condition_tlv_length
);
3224 if (condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
3225 if (condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
3226 struct necp_policy_condition_addr_range range
;
3227 memcpy(&range
.start_address
, &policy
->cond_remote_start
, sizeof(policy
->cond_remote_start
));
3228 memcpy(&range
.end_address
, &policy
->cond_remote_end
, sizeof(policy
->cond_remote_end
));
3229 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE
, sizeof(range
), &range
,
3230 cond_buf
, condition_tlv_length
);
3232 struct necp_policy_condition_addr addr
;
3233 addr
.prefix
= policy
->cond_remote_prefix
;
3234 memcpy(&addr
.address
, &policy
->cond_remote_start
, sizeof(policy
->cond_remote_start
));
3235 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_REMOTE_ADDR
, sizeof(addr
), &addr
,
3236 cond_buf
, condition_tlv_length
);
3241 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_CONDITION
, cond_buf_cursor
- cond_buf
, cond_buf
, tlv_buffer
, total_allocated_bytes
);
3242 if (cond_buf
!= q_cond_buf
) {
3243 FREE(cond_buf
, M_NECP
);
3246 tlv_buffer_pointers
[policy_i
] = tlv_buffer
;
3247 tlv_buffer_lengths
[policy_i
] = (cursor
- tlv_buffer
);
3249 // This is the length of the TLV for NECP_TLV_POLICY_DUMP
3250 total_tlv_len
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + (cursor
- tlv_buffer
);
3254 lck_rw_done(&necp_kernel_policy_lock
);
3257 if (packet
!= NULL
) {
3258 u_int32_t total_result_length
= sizeof(struct necp_packet_header
) + total_tlv_len
;
3260 // Allow malloc to wait, since the total buffer may be large and we are not holding any locks
3261 MALLOC(result_buf
, u_int8_t
*, total_result_length
, M_NECP
, M_WAITOK
| M_ZERO
);
3262 if (result_buf
== NULL
) {
3263 NECPLOG(LOG_DEBUG
, "Failed to allocate result_buffer (%u bytes)", total_result_length
);
3264 REPORT_ERROR(NECP_ERROR_INTERNAL
);
3267 result_buf_cursor
= result_buf
;
3268 result_buf_cursor
= necp_buffer_write_packet_header(result_buf_cursor
, NECP_PACKET_TYPE_POLICY_DUMP_ALL
, NECP_PACKET_FLAGS_RESPONSE
, message_id
);
3270 for (int i
= 0; i
< policy_count
; i
++) {
3271 if (tlv_buffer_pointers
[i
] != NULL
) {
3272 result_buf_cursor
= necp_buffer_write_tlv(result_buf_cursor
, NECP_TLV_POLICY_DUMP
, tlv_buffer_lengths
[i
], tlv_buffer_pointers
[i
], result_buf
, total_result_length
);
3276 if (!necp_send_ctl_data(session
, result_buf
, result_buf_cursor
- result_buf
)) {
3277 NECPLOG(LOG_ERR
, "Failed to send response (%u bytes)", result_buf_cursor
- result_buf
);
3279 NECPLOG(LOG_ERR
, "Sent data worth %u bytes. Total result buffer length was %u bytes", result_buf_cursor
- result_buf
, total_result_length
);
3284 if (out_buffer
!= 0) {
3285 if (out_buffer_length
< total_tlv_len
+ sizeof(u_int32_t
)) {
3286 NECPLOG(LOG_DEBUG
, "out_buffer_length too small (%u < %u)", out_buffer_length
, total_tlv_len
+ sizeof(u_int32_t
));
3287 REPORT_ERROR(NECP_ERROR_INVALID_TLV
);
3290 // Allow malloc to wait, since the total buffer may be large and we are not holding any locks
3291 MALLOC(result_buf
, u_int8_t
*, total_tlv_len
+ sizeof(u_int32_t
), M_NECP
, M_WAITOK
| M_ZERO
);
3292 if (result_buf
== NULL
) {
3293 NECPLOG(LOG_DEBUG
, "Failed to allocate result_buffer (%u bytes)", total_tlv_len
+ sizeof(u_int32_t
));
3294 REPORT_ERROR(NECP_ERROR_INTERNAL
);
3297 // Add four bytes for total length at the start
3298 memcpy(result_buf
, &total_tlv_len
, sizeof(u_int32_t
));
3301 result_buf_cursor
= result_buf
+ sizeof(u_int32_t
);
3302 for (int i
= 0; i
< policy_count
; i
++) {
3303 if (tlv_buffer_pointers
[i
] != NULL
) {
3304 result_buf_cursor
= necp_buffer_write_tlv(result_buf_cursor
, NECP_TLV_POLICY_DUMP
, tlv_buffer_lengths
[i
], tlv_buffer_pointers
[i
],
3305 result_buf
, total_tlv_len
+ sizeof(u_int32_t
));
3309 int copy_error
= copyout(result_buf
, out_buffer
, total_tlv_len
+ sizeof(u_int32_t
));
3311 NECPLOG(LOG_DEBUG
, "Failed to copy out result_buffer (%u bytes)", total_tlv_len
+ sizeof(u_int32_t
));
3312 REPORT_ERROR(NECP_ERROR_INTERNAL
);
3318 if (error_occured
) {
3319 if (packet
!= NULL
) {
3320 if(!necp_send_error_response(session
, NECP_PACKET_TYPE_POLICY_DUMP_ALL
, message_id
, response_error
)) {
3321 NECPLOG0(LOG_ERR
, "Failed to send error response");
3323 NECPLOG0(LOG_ERR
, "Sent error response");
3326 error_code
= necp_get_posix_error_for_necp_error(response_error
);
3329 if (result_buf
!= NULL
) {
3330 FREE(result_buf
, M_NECP
);
3333 if (tlv_buffer_pointers
!= NULL
) {
3334 for (int i
= 0; i
< policy_count
; i
++) {
3335 if (tlv_buffer_pointers
[i
] != NULL
) {
3336 FREE(tlv_buffer_pointers
[i
], M_NECP
);
3337 tlv_buffer_pointers
[i
] = NULL
;
3340 FREE(tlv_buffer_pointers
, M_NECP
);
3343 if (tlv_buffer_lengths
!= NULL
) {
3344 FREE(tlv_buffer_lengths
, M_NECP
);
3347 #undef RESET_COND_BUF
3349 #undef UNLOCK_AND_REPORT_ERROR
3351 return (error_code
);
3354 static struct necp_session_policy
*
3355 necp_policy_create(struct necp_session
*session
, necp_policy_order order
, u_int8_t
*conditions_array
, u_int32_t conditions_array_size
, u_int8_t
*route_rules_array
, u_int32_t route_rules_array_size
, u_int8_t
*result
, u_int32_t result_size
)
3357 struct necp_session_policy
*new_policy
= NULL
;
3358 struct necp_session_policy
*tmp_policy
= NULL
;
3360 if (session
== NULL
|| conditions_array
== NULL
|| result
== NULL
|| result_size
== 0) {
3364 MALLOC_ZONE(new_policy
, struct necp_session_policy
*, sizeof(*new_policy
), M_NECP_SESSION_POLICY
, M_WAITOK
);
3365 if (new_policy
== NULL
) {
3369 memset(new_policy
, 0, sizeof(*new_policy
)); // M_ZERO is not supported for MALLOC_ZONE
3370 new_policy
->applied
= FALSE
;
3371 new_policy
->pending_deletion
= FALSE
;
3372 new_policy
->pending_update
= FALSE
;
3373 new_policy
->order
= order
;
3374 new_policy
->conditions
= conditions_array
;
3375 new_policy
->conditions_size
= conditions_array_size
;
3376 new_policy
->route_rules
= route_rules_array
;
3377 new_policy
->route_rules_size
= route_rules_array_size
;
3378 new_policy
->result
= result
;
3379 new_policy
->result_size
= result_size
;
3380 new_policy
->id
= necp_policy_get_new_id();
3382 LIST_INSERT_SORTED_ASCENDING(&session
->policies
, new_policy
, chain
, order
, tmp_policy
);
3384 session
->dirty
= TRUE
;
3387 NECPLOG(LOG_DEBUG
, "Created NECP policy, order %d", order
);
3390 return (new_policy
);
3393 static struct necp_session_policy
*
3394 necp_policy_find(struct necp_session
*session
, necp_policy_id policy_id
)
3396 struct necp_session_policy
*policy
= NULL
;
3397 if (policy_id
== 0) {
3401 LIST_FOREACH(policy
, &session
->policies
, chain
) {
3402 if (policy
->id
== policy_id
) {
3410 static inline u_int8_t
3411 necp_policy_get_result_type(struct necp_session_policy
*policy
)
3413 return (policy
? necp_policy_result_get_type_from_buffer(policy
->result
, policy
->result_size
) : 0);
3416 static inline u_int32_t
3417 necp_policy_get_result_parameter_length(struct necp_session_policy
*policy
)
3419 return (policy
? necp_policy_result_get_parameter_length_from_buffer(policy
->result
, policy
->result_size
) : 0);
3423 necp_policy_get_result_parameter(struct necp_session_policy
*policy
, u_int8_t
*parameter_buffer
, u_int32_t parameter_buffer_length
)
3426 u_int32_t parameter_length
= necp_policy_result_get_parameter_length_from_buffer(policy
->result
, policy
->result_size
);
3427 if (parameter_buffer_length
>= parameter_length
) {
3428 u_int8_t
*parameter
= necp_policy_result_get_parameter_pointer_from_buffer(policy
->result
, policy
->result_size
);
3429 if (parameter
&& parameter_buffer
) {
3430 memcpy(parameter_buffer
, parameter
, parameter_length
);
3440 necp_policy_mark_for_deletion(struct necp_session
*session
, struct necp_session_policy
*policy
)
3442 if (session
== NULL
|| policy
== NULL
) {
3446 policy
->pending_deletion
= TRUE
;
3447 session
->dirty
= TRUE
;
3450 NECPLOG0(LOG_DEBUG
, "Marked NECP policy for removal");
3456 necp_policy_mark_all_for_deletion(struct necp_session
*session
)
3458 struct necp_session_policy
*policy
= NULL
;
3459 struct necp_session_policy
*temp_policy
= NULL
;
3461 LIST_FOREACH_SAFE(policy
, &session
->policies
, chain
, temp_policy
) {
3462 necp_policy_mark_for_deletion(session
, policy
);
3469 necp_policy_delete(struct necp_session
*session
, struct necp_session_policy
*policy
)
3471 if (session
== NULL
|| policy
== NULL
) {
3475 LIST_REMOVE(policy
, chain
);
3477 if (policy
->result
) {
3478 FREE(policy
->result
, M_NECP
);
3479 policy
->result
= NULL
;
3482 if (policy
->conditions
) {
3483 FREE(policy
->conditions
, M_NECP
);
3484 policy
->conditions
= NULL
;
3487 if (policy
->route_rules
) {
3488 FREE(policy
->route_rules
, M_NECP
);
3489 policy
->route_rules
= NULL
;
3492 FREE_ZONE(policy
, sizeof(*policy
), M_NECP_SESSION_POLICY
);
3495 NECPLOG0(LOG_DEBUG
, "Removed NECP policy");
3501 necp_policy_unapply(struct necp_session_policy
*policy
)
3504 if (policy
== NULL
) {
3508 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
3510 // Release local uuid mappings
3511 if (!uuid_is_null(policy
->applied_app_uuid
)) {
3512 bool removed_mapping
= FALSE
;
3513 if (necp_remove_uuid_app_id_mapping(policy
->applied_app_uuid
, &removed_mapping
, TRUE
) && removed_mapping
) {
3514 necp_uuid_app_id_mappings_dirty
= TRUE
;
3515 necp_num_uuid_app_id_mappings
--;
3517 uuid_clear(policy
->applied_app_uuid
);
3519 if (!uuid_is_null(policy
->applied_real_app_uuid
)) {
3520 necp_remove_uuid_app_id_mapping(policy
->applied_real_app_uuid
, NULL
, FALSE
);
3521 uuid_clear(policy
->applied_real_app_uuid
);
3523 if (!uuid_is_null(policy
->applied_result_uuid
)) {
3524 necp_remove_uuid_service_id_mapping(policy
->applied_result_uuid
);
3525 uuid_clear(policy
->applied_result_uuid
);
3528 // Release string mappings
3529 if (policy
->applied_account
!= NULL
) {
3530 necp_remove_string_to_id_mapping(&necp_account_id_list
, policy
->applied_account
);
3531 FREE(policy
->applied_account
, M_NECP
);
3532 policy
->applied_account
= NULL
;
3535 // Release route rule
3536 if (policy
->applied_route_rules_id
!= 0) {
3537 necp_remove_route_rule(&necp_route_rules
, policy
->applied_route_rules_id
);
3538 policy
->applied_route_rules_id
= 0;
3541 // Remove socket policies
3542 for (i
= 0; i
< MAX_KERNEL_SOCKET_POLICIES
; i
++) {
3543 if (policy
->kernel_socket_policies
[i
] != 0) {
3544 necp_kernel_socket_policy_delete(policy
->kernel_socket_policies
[i
]);
3545 policy
->kernel_socket_policies
[i
] = 0;
3549 // Remove IP output policies
3550 for (i
= 0; i
< MAX_KERNEL_IP_OUTPUT_POLICIES
; i
++) {
3551 if (policy
->kernel_ip_output_policies
[i
] != 0) {
3552 necp_kernel_ip_output_policy_delete(policy
->kernel_ip_output_policies
[i
]);
3553 policy
->kernel_ip_output_policies
[i
] = 0;
3557 policy
->applied
= FALSE
;
3562 #define NECP_KERNEL_POLICY_SUBORDER_ID_TUNNEL_CONDITION 0
3563 #define NECP_KERNEL_POLICY_SUBORDER_NON_ID_TUNNEL_CONDITION 1
3564 #define NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION 2
3565 #define NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS 3
3566 struct necp_policy_result_ip_tunnel
{
3567 u_int32_t secondary_result
;
3568 char interface_name
[IFXNAMSIZ
];
3569 } __attribute__((__packed__
));
3571 struct necp_policy_result_service
{
3574 } __attribute__((__packed__
));
3577 necp_policy_apply(struct necp_session
*session
, struct necp_session_policy
*policy
)
3579 bool socket_only_conditions
= FALSE
;
3580 bool socket_ip_conditions
= FALSE
;
3582 bool socket_layer_non_id_conditions
= FALSE
;
3583 bool ip_output_layer_non_id_conditions
= FALSE
;
3584 bool ip_output_layer_non_id_only
= FALSE
;
3585 bool ip_output_layer_id_condition
= FALSE
;
3586 bool ip_output_layer_tunnel_condition_from_id
= FALSE
;
3587 bool ip_output_layer_tunnel_condition_from_non_id
= FALSE
;
3588 necp_kernel_policy_id cond_ip_output_layer_id
= NECP_KERNEL_POLICY_ID_NONE
;
3590 u_int32_t master_condition_mask
= 0;
3591 u_int32_t master_condition_negated_mask
= 0;
3592 ifnet_t cond_bound_interface
= NULL
;
3593 u_int32_t cond_account_id
= 0;
3594 char *cond_domain
= NULL
;
3595 char *cond_custom_entitlement
= NULL
;
3598 necp_app_id cond_app_id
= 0;
3599 necp_app_id cond_real_app_id
= 0;
3600 struct necp_policy_condition_tc_range cond_traffic_class
;
3601 cond_traffic_class
.start_tc
= 0;
3602 cond_traffic_class
.end_tc
= 0;
3603 u_int16_t cond_protocol
= 0;
3604 union necp_sockaddr_union cond_local_start
;
3605 union necp_sockaddr_union cond_local_end
;
3606 u_int8_t cond_local_prefix
= 0;
3607 union necp_sockaddr_union cond_remote_start
;
3608 union necp_sockaddr_union cond_remote_end
;
3609 u_int8_t cond_remote_prefix
= 0;
3610 u_int32_t offset
= 0;
3611 u_int8_t ultimate_result
= 0;
3612 u_int32_t secondary_result
= 0;
3613 necp_kernel_policy_result_parameter secondary_result_parameter
;
3614 memset(&secondary_result_parameter
, 0, sizeof(secondary_result_parameter
));
3615 u_int32_t cond_last_interface_index
= 0;
3616 necp_kernel_policy_result_parameter ultimate_result_parameter
;
3617 memset(&ultimate_result_parameter
, 0, sizeof(ultimate_result_parameter
));
3619 if (policy
== NULL
) {
3623 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
3625 // Process conditions
3626 while (offset
< policy
->conditions_size
) {
3627 u_int32_t length
= 0;
3628 u_int8_t
*value
= necp_buffer_get_tlv_value(policy
->conditions
, offset
, &length
);
3630 u_int8_t condition_type
= necp_policy_condition_get_type_from_buffer(value
, length
);
3631 u_int8_t condition_flags
= necp_policy_condition_get_flags_from_buffer(value
, length
);
3632 bool condition_is_negative
= condition_flags
& NECP_POLICY_CONDITION_FLAGS_NEGATIVE
;
3633 u_int32_t condition_length
= necp_policy_condition_get_value_length_from_buffer(value
, length
);
3634 u_int8_t
*condition_value
= necp_policy_condition_get_value_pointer_from_buffer(value
, length
);
3635 switch (condition_type
) {
3636 case NECP_POLICY_CONDITION_DEFAULT
: {
3637 socket_ip_conditions
= TRUE
;
3640 case NECP_POLICY_CONDITION_ALL_INTERFACES
: {
3641 master_condition_mask
|= NECP_KERNEL_CONDITION_ALL_INTERFACES
;
3642 socket_ip_conditions
= TRUE
;
3645 case NECP_POLICY_CONDITION_ENTITLEMENT
: {
3646 if (condition_length
> 0) {
3647 if (cond_custom_entitlement
== NULL
) {
3648 cond_custom_entitlement
= necp_copy_string((char *)condition_value
, condition_length
);
3649 if (cond_custom_entitlement
!= NULL
) {
3650 master_condition_mask
|= NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
;
3651 socket_only_conditions
= TRUE
;
3655 master_condition_mask
|= NECP_KERNEL_CONDITION_ENTITLEMENT
;
3656 socket_only_conditions
= TRUE
;
3660 case NECP_POLICY_CONDITION_DOMAIN
: {
3661 // Make sure there is only one such rule
3662 if (condition_length
> 0 && cond_domain
== NULL
) {
3663 cond_domain
= necp_create_trimmed_domain((char *)condition_value
, condition_length
);
3664 if (cond_domain
!= NULL
) {
3665 master_condition_mask
|= NECP_KERNEL_CONDITION_DOMAIN
;
3666 if (condition_is_negative
) {
3667 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_DOMAIN
;
3669 socket_only_conditions
= TRUE
;
3674 case NECP_POLICY_CONDITION_ACCOUNT
: {
3675 // Make sure there is only one such rule
3676 if (condition_length
> 0 && cond_account_id
== 0 && policy
->applied_account
== NULL
) {
3677 char *string
= NULL
;
3678 MALLOC(string
, char *, condition_length
+ 1, M_NECP
, M_WAITOK
);
3679 if (string
!= NULL
) {
3680 memcpy(string
, condition_value
, condition_length
);
3681 string
[condition_length
] = 0;
3682 cond_account_id
= necp_create_string_to_id_mapping(&necp_account_id_list
, string
);
3683 if (cond_account_id
!= 0) {
3684 policy
->applied_account
= string
; // Save the string in parent policy
3685 master_condition_mask
|= NECP_KERNEL_CONDITION_ACCOUNT_ID
;
3686 if (condition_is_negative
) {
3687 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_ACCOUNT_ID
;
3689 socket_only_conditions
= TRUE
;
3691 FREE(string
, M_NECP
);
3697 case NECP_POLICY_CONDITION_APPLICATION
: {
3698 // Make sure there is only one such rule, because we save the uuid in the policy
3699 if (condition_length
>= sizeof(uuid_t
) && cond_app_id
== 0) {
3700 bool allocated_mapping
= FALSE
;
3701 uuid_t application_uuid
;
3702 memcpy(application_uuid
, condition_value
, sizeof(uuid_t
));
3703 cond_app_id
= necp_create_uuid_app_id_mapping(application_uuid
, &allocated_mapping
, TRUE
);
3704 if (cond_app_id
!= 0) {
3705 if (allocated_mapping
) {
3706 necp_uuid_app_id_mappings_dirty
= TRUE
;
3707 necp_num_uuid_app_id_mappings
++;
3709 uuid_copy(policy
->applied_app_uuid
, application_uuid
);
3710 master_condition_mask
|= NECP_KERNEL_CONDITION_APP_ID
;
3711 if (condition_is_negative
) {
3712 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_APP_ID
;
3714 socket_only_conditions
= TRUE
;
3719 case NECP_POLICY_CONDITION_REAL_APPLICATION
: {
3720 // Make sure there is only one such rule, because we save the uuid in the policy
3721 if (condition_length
>= sizeof(uuid_t
) && cond_real_app_id
== 0) {
3722 uuid_t real_application_uuid
;
3723 memcpy(real_application_uuid
, condition_value
, sizeof(uuid_t
));
3724 cond_real_app_id
= necp_create_uuid_app_id_mapping(real_application_uuid
, NULL
, FALSE
);
3725 if (cond_real_app_id
!= 0) {
3726 uuid_copy(policy
->applied_real_app_uuid
, real_application_uuid
);
3727 master_condition_mask
|= NECP_KERNEL_CONDITION_REAL_APP_ID
;
3728 if (condition_is_negative
) {
3729 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REAL_APP_ID
;
3731 socket_only_conditions
= TRUE
;
3736 case NECP_POLICY_CONDITION_PID
: {
3737 if (condition_length
>= sizeof(pid_t
)) {
3738 master_condition_mask
|= NECP_KERNEL_CONDITION_PID
;
3739 if (condition_is_negative
) {
3740 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_PID
;
3742 memcpy(&cond_pid
, condition_value
, sizeof(cond_pid
));
3743 socket_only_conditions
= TRUE
;
3747 case NECP_POLICY_CONDITION_UID
: {
3748 if (condition_length
>= sizeof(uid_t
)) {
3749 master_condition_mask
|= NECP_KERNEL_CONDITION_UID
;
3750 if (condition_is_negative
) {
3751 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_UID
;
3753 memcpy(&cond_uid
, condition_value
, sizeof(cond_uid
));
3754 socket_only_conditions
= TRUE
;
3758 case NECP_POLICY_CONDITION_TRAFFIC_CLASS
: {
3759 if (condition_length
>= sizeof(struct necp_policy_condition_tc_range
)) {
3760 master_condition_mask
|= NECP_KERNEL_CONDITION_TRAFFIC_CLASS
;
3761 if (condition_is_negative
) {
3762 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_TRAFFIC_CLASS
;
3764 memcpy(&cond_traffic_class
, condition_value
, sizeof(cond_traffic_class
));
3765 socket_only_conditions
= TRUE
;
3769 case NECP_POLICY_CONDITION_BOUND_INTERFACE
: {
3770 if (condition_length
<= IFXNAMSIZ
&& condition_length
> 0) {
3771 char interface_name
[IFXNAMSIZ
];
3772 memcpy(interface_name
, condition_value
, condition_length
);
3773 interface_name
[condition_length
- 1] = 0; // Make sure the string is NULL terminated
3774 if (ifnet_find_by_name(interface_name
, &cond_bound_interface
) == 0) {
3775 master_condition_mask
|= NECP_KERNEL_CONDITION_BOUND_INTERFACE
;
3776 if (condition_is_negative
) {
3777 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_BOUND_INTERFACE
;
3780 socket_ip_conditions
= TRUE
;
3784 case NECP_POLICY_CONDITION_IP_PROTOCOL
: {
3785 if (condition_length
>= sizeof(u_int16_t
)) {
3786 master_condition_mask
|= NECP_KERNEL_CONDITION_PROTOCOL
;
3787 if (condition_is_negative
) {
3788 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_PROTOCOL
;
3790 memcpy(&cond_protocol
, condition_value
, sizeof(cond_protocol
));
3791 socket_ip_conditions
= TRUE
;
3795 case NECP_POLICY_CONDITION_LOCAL_ADDR
: {
3796 struct necp_policy_condition_addr
*address_struct
= (struct necp_policy_condition_addr
*)(void *)condition_value
;
3797 if (!necp_address_is_valid(&address_struct
->address
.sa
)) {
3801 cond_local_prefix
= address_struct
->prefix
;
3802 memcpy(&cond_local_start
, &address_struct
->address
, sizeof(address_struct
->address
));
3803 master_condition_mask
|= NECP_KERNEL_CONDITION_LOCAL_START
;
3804 master_condition_mask
|= NECP_KERNEL_CONDITION_LOCAL_PREFIX
;
3805 if (condition_is_negative
) {
3806 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_LOCAL_START
;
3807 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_LOCAL_PREFIX
;
3809 socket_ip_conditions
= TRUE
;
3812 case NECP_POLICY_CONDITION_REMOTE_ADDR
: {
3813 struct necp_policy_condition_addr
*address_struct
= (struct necp_policy_condition_addr
*)(void *)condition_value
;
3814 if (!necp_address_is_valid(&address_struct
->address
.sa
)) {
3818 cond_remote_prefix
= address_struct
->prefix
;
3819 memcpy(&cond_remote_start
, &address_struct
->address
, sizeof(address_struct
->address
));
3820 master_condition_mask
|= NECP_KERNEL_CONDITION_REMOTE_START
;
3821 master_condition_mask
|= NECP_KERNEL_CONDITION_REMOTE_PREFIX
;
3822 if (condition_is_negative
) {
3823 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REMOTE_START
;
3824 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REMOTE_PREFIX
;
3826 socket_ip_conditions
= TRUE
;
3829 case NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE
: {
3830 struct necp_policy_condition_addr_range
*address_struct
= (struct necp_policy_condition_addr_range
*)(void *)condition_value
;
3831 if (!necp_address_is_valid(&address_struct
->start_address
.sa
) ||
3832 !necp_address_is_valid(&address_struct
->end_address
.sa
)) {
3836 memcpy(&cond_local_start
, &address_struct
->start_address
, sizeof(address_struct
->start_address
));
3837 memcpy(&cond_local_end
, &address_struct
->end_address
, sizeof(address_struct
->end_address
));
3838 master_condition_mask
|= NECP_KERNEL_CONDITION_LOCAL_START
;
3839 master_condition_mask
|= NECP_KERNEL_CONDITION_LOCAL_END
;
3840 if (condition_is_negative
) {
3841 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_LOCAL_START
;
3842 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_LOCAL_END
;
3844 socket_ip_conditions
= TRUE
;
3847 case NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE
: {
3848 struct necp_policy_condition_addr_range
*address_struct
= (struct necp_policy_condition_addr_range
*)(void *)condition_value
;
3849 if (!necp_address_is_valid(&address_struct
->start_address
.sa
) ||
3850 !necp_address_is_valid(&address_struct
->end_address
.sa
)) {
3854 memcpy(&cond_remote_start
, &address_struct
->start_address
, sizeof(address_struct
->start_address
));
3855 memcpy(&cond_remote_end
, &address_struct
->end_address
, sizeof(address_struct
->end_address
));
3856 master_condition_mask
|= NECP_KERNEL_CONDITION_REMOTE_START
;
3857 master_condition_mask
|= NECP_KERNEL_CONDITION_REMOTE_END
;
3858 if (condition_is_negative
) {
3859 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REMOTE_START
;
3860 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REMOTE_END
;
3862 socket_ip_conditions
= TRUE
;
3870 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
3874 ultimate_result
= necp_policy_get_result_type(policy
);
3875 switch (ultimate_result
) {
3876 case NECP_POLICY_RESULT_PASS
: {
3877 if (socket_only_conditions
) { // socket_ip_conditions can be TRUE or FALSE
3878 socket_layer_non_id_conditions
= TRUE
;
3879 ip_output_layer_id_condition
= TRUE
;
3880 } else if (socket_ip_conditions
) {
3881 socket_layer_non_id_conditions
= TRUE
;
3882 ip_output_layer_id_condition
= TRUE
;
3883 ip_output_layer_non_id_conditions
= TRUE
;
3887 case NECP_POLICY_RESULT_DROP
: {
3888 if (socket_only_conditions
) { // socket_ip_conditions can be TRUE or FALSE
3889 socket_layer_non_id_conditions
= TRUE
;
3890 } else if (socket_ip_conditions
) {
3891 socket_layer_non_id_conditions
= TRUE
;
3892 ip_output_layer_non_id_conditions
= TRUE
;
3893 ip_output_layer_non_id_only
= TRUE
; // Only apply drop to packets that didn't go through socket layer
3897 case NECP_POLICY_RESULT_SKIP
: {
3898 u_int32_t skip_policy_order
= 0;
3899 if (necp_policy_get_result_parameter(policy
, (u_int8_t
*)&skip_policy_order
, sizeof(skip_policy_order
))) {
3900 ultimate_result_parameter
.skip_policy_order
= skip_policy_order
;
3903 if (socket_only_conditions
) { // socket_ip_conditions can be TRUE or FALSE
3904 socket_layer_non_id_conditions
= TRUE
;
3905 ip_output_layer_id_condition
= TRUE
;
3906 } else if (socket_ip_conditions
) {
3907 socket_layer_non_id_conditions
= TRUE
;
3908 ip_output_layer_non_id_conditions
= TRUE
;
3912 case NECP_POLICY_RESULT_SOCKET_DIVERT
:
3913 case NECP_POLICY_RESULT_SOCKET_FILTER
: {
3914 u_int32_t control_unit
= 0;
3915 if (necp_policy_get_result_parameter(policy
, (u_int8_t
*)&control_unit
, sizeof(control_unit
))) {
3916 ultimate_result_parameter
.flow_divert_control_unit
= control_unit
;
3918 socket_layer_non_id_conditions
= TRUE
;
3921 case NECP_POLICY_RESULT_IP_TUNNEL
: {
3922 struct necp_policy_result_ip_tunnel tunnel_parameters
;
3923 u_int32_t tunnel_parameters_length
= necp_policy_get_result_parameter_length(policy
);
3924 if (tunnel_parameters_length
> sizeof(u_int32_t
) &&
3925 tunnel_parameters_length
<= sizeof(struct necp_policy_result_ip_tunnel
) &&
3926 necp_policy_get_result_parameter(policy
, (u_int8_t
*)&tunnel_parameters
, sizeof(tunnel_parameters
))) {
3927 ifnet_t tunnel_interface
= NULL
;
3928 tunnel_parameters
.interface_name
[tunnel_parameters_length
- sizeof(u_int32_t
) - 1] = 0; // Make sure the string is NULL terminated
3929 if (ifnet_find_by_name(tunnel_parameters
.interface_name
, &tunnel_interface
) == 0) {
3930 ultimate_result_parameter
.tunnel_interface_index
= tunnel_interface
->if_index
;
3931 ifnet_release(tunnel_interface
);
3934 secondary_result
= tunnel_parameters
.secondary_result
;
3935 if (secondary_result
) {
3936 cond_last_interface_index
= ultimate_result_parameter
.tunnel_interface_index
;
3940 if (socket_only_conditions
) { // socket_ip_conditions can be TRUE or FALSE
3941 socket_layer_non_id_conditions
= TRUE
;
3942 ip_output_layer_id_condition
= TRUE
;
3943 if (secondary_result
) {
3944 ip_output_layer_tunnel_condition_from_id
= TRUE
;
3946 } else if (socket_ip_conditions
) {
3947 socket_layer_non_id_conditions
= TRUE
;
3948 ip_output_layer_id_condition
= TRUE
;
3949 ip_output_layer_non_id_conditions
= TRUE
;
3950 if (secondary_result
) {
3951 ip_output_layer_tunnel_condition_from_id
= TRUE
;
3952 ip_output_layer_tunnel_condition_from_non_id
= TRUE
;
3957 case NECP_POLICY_RESULT_TRIGGER
:
3958 case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED
:
3959 case NECP_POLICY_RESULT_TRIGGER_SCOPED
:
3960 case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED
: {
3961 struct necp_policy_result_service service_parameters
;
3962 u_int32_t service_result_length
= necp_policy_get_result_parameter_length(policy
);
3963 bool has_extra_service_data
= FALSE
;
3964 if (service_result_length
>= (sizeof(service_parameters
))) {
3965 has_extra_service_data
= TRUE
;
3967 if (necp_policy_get_result_parameter(policy
, (u_int8_t
*)&service_parameters
, sizeof(service_parameters
))) {
3968 ultimate_result_parameter
.service
.identifier
= necp_create_uuid_service_id_mapping(service_parameters
.identifier
);
3969 if (ultimate_result_parameter
.service
.identifier
!= 0) {
3970 uuid_copy(policy
->applied_result_uuid
, service_parameters
.identifier
);
3971 socket_layer_non_id_conditions
= TRUE
;
3972 if (has_extra_service_data
) {
3973 ultimate_result_parameter
.service
.data
= service_parameters
.data
;
3975 ultimate_result_parameter
.service
.data
= 0;
3981 case NECP_POLICY_RESULT_USE_NETAGENT
: {
3982 uuid_t netagent_uuid
;
3983 if (necp_policy_get_result_parameter(policy
, (u_int8_t
*)&netagent_uuid
, sizeof(netagent_uuid
))) {
3984 ultimate_result_parameter
.netagent_id
= necp_create_uuid_service_id_mapping(netagent_uuid
);
3985 if (ultimate_result_parameter
.netagent_id
!= 0) {
3986 uuid_copy(policy
->applied_result_uuid
, netagent_uuid
);
3987 socket_layer_non_id_conditions
= TRUE
;
3992 case NECP_POLICY_RESULT_SOCKET_SCOPED
: {
3993 u_int32_t interface_name_length
= necp_policy_get_result_parameter_length(policy
);
3994 if (interface_name_length
<= IFXNAMSIZ
&& interface_name_length
> 0) {
3995 char interface_name
[IFXNAMSIZ
];
3996 ifnet_t scope_interface
= NULL
;
3997 necp_policy_get_result_parameter(policy
, (u_int8_t
*)interface_name
, interface_name_length
);
3998 interface_name
[interface_name_length
- 1] = 0; // Make sure the string is NULL terminated
3999 if (ifnet_find_by_name(interface_name
, &scope_interface
) == 0) {
4000 ultimate_result_parameter
.scoped_interface_index
= scope_interface
->if_index
;
4001 socket_layer_non_id_conditions
= TRUE
;
4002 ifnet_release(scope_interface
);
4007 case NECP_POLICY_RESULT_ROUTE_RULES
: {
4008 if (policy
->route_rules
!= NULL
&& policy
->route_rules_size
> 0) {
4009 u_int32_t route_rule_id
= necp_create_route_rule(&necp_route_rules
, policy
->route_rules
, policy
->route_rules_size
);
4010 if (route_rule_id
> 0) {
4011 policy
->applied_route_rules_id
= route_rule_id
;
4012 ultimate_result_parameter
.route_rule_id
= route_rule_id
;
4013 socket_layer_non_id_conditions
= TRUE
;
4023 if (socket_layer_non_id_conditions
) {
4024 necp_kernel_policy_id policy_id
= necp_kernel_socket_policy_add(policy
->id
, policy
->order
, session
->session_order
, session
->proc_pid
, master_condition_mask
, master_condition_negated_mask
, cond_app_id
, cond_real_app_id
, cond_custom_entitlement
, cond_account_id
, cond_domain
, cond_pid
, cond_uid
, cond_bound_interface
, cond_traffic_class
, cond_protocol
, &cond_local_start
, &cond_local_end
, cond_local_prefix
, &cond_remote_start
, &cond_remote_end
, cond_remote_prefix
, ultimate_result
, ultimate_result_parameter
);
4026 if (policy_id
== 0) {
4027 NECPLOG0(LOG_DEBUG
, "Error applying socket kernel policy");
4031 cond_ip_output_layer_id
= policy_id
;
4032 policy
->kernel_socket_policies
[0] = policy_id
;
4035 if (ip_output_layer_non_id_conditions
) {
4036 u_int32_t condition_mask
= master_condition_mask
;
4037 if (ip_output_layer_non_id_only
) {
4038 condition_mask
|= NECP_KERNEL_CONDITION_POLICY_ID
;
4040 necp_kernel_policy_id policy_id
= necp_kernel_ip_output_policy_add(policy
->id
, policy
->order
, NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS
, session
->session_order
, session
->proc_pid
, condition_mask
, master_condition_negated_mask
, NECP_KERNEL_POLICY_ID_NONE
, cond_bound_interface
, 0, cond_protocol
, &cond_local_start
, &cond_local_end
, cond_local_prefix
, &cond_remote_start
, &cond_remote_end
, cond_remote_prefix
, ultimate_result
, ultimate_result_parameter
);
4042 if (policy_id
== 0) {
4043 NECPLOG0(LOG_DEBUG
, "Error applying IP output kernel policy");
4047 policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS
] = policy_id
;
4050 if (ip_output_layer_id_condition
) {
4051 necp_kernel_policy_id policy_id
= necp_kernel_ip_output_policy_add(policy
->id
, policy
->order
, NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION
, session
->session_order
, session
->proc_pid
, NECP_KERNEL_CONDITION_POLICY_ID
| NECP_KERNEL_CONDITION_ALL_INTERFACES
, 0, cond_ip_output_layer_id
, NULL
, 0, 0, NULL
, NULL
, 0, NULL
, NULL
, 0, ultimate_result
, ultimate_result_parameter
);
4053 if (policy_id
== 0) {
4054 NECPLOG0(LOG_DEBUG
, "Error applying IP output kernel policy");
4058 policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION
] = policy_id
;
4061 // Extra policies for IP Output tunnels for when packets loop back
4062 if (ip_output_layer_tunnel_condition_from_id
) {
4063 necp_kernel_policy_id policy_id
= necp_kernel_ip_output_policy_add(policy
->id
, policy
->order
, NECP_KERNEL_POLICY_SUBORDER_NON_ID_TUNNEL_CONDITION
, session
->session_order
, session
->proc_pid
, NECP_KERNEL_CONDITION_POLICY_ID
| NECP_KERNEL_CONDITION_LAST_INTERFACE
| NECP_KERNEL_CONDITION_ALL_INTERFACES
, 0, policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS
], NULL
, cond_last_interface_index
, 0, NULL
, NULL
, 0, NULL
, NULL
, 0, secondary_result
, secondary_result_parameter
);
4065 if (policy_id
== 0) {
4066 NECPLOG0(LOG_DEBUG
, "Error applying IP output kernel policy");
4070 policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_NON_ID_TUNNEL_CONDITION
] = policy_id
;
4073 if (ip_output_layer_tunnel_condition_from_id
) {
4074 necp_kernel_policy_id policy_id
= necp_kernel_ip_output_policy_add(policy
->id
, policy
->order
, NECP_KERNEL_POLICY_SUBORDER_ID_TUNNEL_CONDITION
, session
->session_order
, session
->proc_pid
, NECP_KERNEL_CONDITION_POLICY_ID
| NECP_KERNEL_CONDITION_LAST_INTERFACE
| NECP_KERNEL_CONDITION_ALL_INTERFACES
, 0, policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION
], NULL
, cond_last_interface_index
, 0, NULL
, NULL
, 0, NULL
, NULL
, 0, secondary_result
, secondary_result_parameter
);
4076 if (policy_id
== 0) {
4077 NECPLOG0(LOG_DEBUG
, "Error applying IP output kernel policy");
4081 policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_ID_TUNNEL_CONDITION
] = policy_id
;
4084 policy
->applied
= TRUE
;
4085 policy
->pending_update
= FALSE
;
4093 necp_policy_apply_all(struct necp_session
*session
)
4095 struct necp_session_policy
*policy
= NULL
;
4096 struct necp_session_policy
*temp_policy
= NULL
;
4097 struct kev_necp_policies_changed_data kev_data
;
4098 kev_data
.changed_count
= 0;
4100 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
4102 // Remove exisiting applied policies
4103 if (session
->dirty
) {
4104 LIST_FOREACH_SAFE(policy
, &session
->policies
, chain
, temp_policy
) {
4105 if (policy
->pending_deletion
) {
4106 if (policy
->applied
) {
4107 necp_policy_unapply(policy
);
4109 // Delete the policy
4110 necp_policy_delete(session
, policy
);
4111 } else if (!policy
->applied
) {
4112 necp_policy_apply(session
, policy
);
4113 } else if (policy
->pending_update
) {
4114 // Must have been applied, but needs an update. Remove and re-add.
4115 necp_policy_unapply(policy
);
4116 necp_policy_apply(session
, policy
);
4120 necp_kernel_socket_policies_update_uuid_table();
4121 necp_kernel_socket_policies_reprocess();
4122 necp_kernel_ip_output_policies_reprocess();
4124 // Clear dirty bit flags
4125 session
->dirty
= FALSE
;
4128 lck_rw_done(&necp_kernel_policy_lock
);
4130 necp_update_all_clients();
4131 necp_post_change_event(&kev_data
);
4134 NECPLOG0(LOG_DEBUG
, "Applied NECP policies");
4138 // Kernel Policy Management
4139 // ---------------------
4140 // Kernel policies are derived from session policies
4141 static necp_kernel_policy_id
4142 necp_kernel_policy_get_new_id(bool socket_level
)
4144 static necp_kernel_policy_id necp_last_kernel_socket_policy_id
= 0;
4145 static necp_kernel_policy_id necp_last_kernel_ip_policy_id
= 0;
4147 necp_kernel_policy_id newid
= NECP_KERNEL_POLICY_ID_NONE
;
4149 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4152 bool wrapped
= FALSE
;
4154 necp_last_kernel_socket_policy_id
++;
4155 if (necp_last_kernel_socket_policy_id
< NECP_KERNEL_POLICY_ID_FIRST_VALID_SOCKET
||
4156 necp_last_kernel_socket_policy_id
>= NECP_KERNEL_POLICY_ID_FIRST_VALID_IP
) {
4158 // Already wrapped, give up
4159 NECPLOG0(LOG_ERR
, "Failed to find a free socket kernel policy ID.\n");
4160 return (NECP_KERNEL_POLICY_ID_NONE
);
4162 necp_last_kernel_socket_policy_id
= NECP_KERNEL_POLICY_ID_FIRST_VALID_SOCKET
;
4165 newid
= necp_last_kernel_socket_policy_id
;
4166 } while (necp_kernel_socket_policy_find(newid
) != NULL
); // If already used, keep trying
4168 bool wrapped
= FALSE
;
4170 necp_last_kernel_ip_policy_id
++;
4171 if (necp_last_kernel_ip_policy_id
< NECP_KERNEL_POLICY_ID_FIRST_VALID_IP
) {
4173 // Already wrapped, give up
4174 NECPLOG0(LOG_ERR
, "Failed to find a free IP kernel policy ID.\n");
4175 return (NECP_KERNEL_POLICY_ID_NONE
);
4177 necp_last_kernel_ip_policy_id
= NECP_KERNEL_POLICY_ID_FIRST_VALID_IP
;
4180 newid
= necp_last_kernel_ip_policy_id
;
4181 } while (necp_kernel_ip_output_policy_find(newid
) != NULL
); // If already used, keep trying
4184 if (newid
== NECP_KERNEL_POLICY_ID_NONE
) {
4185 NECPLOG0(LOG_ERR
, "Allocate kernel policy id failed.\n");
4186 return (NECP_KERNEL_POLICY_ID_NONE
);
4192 #define NECP_KERNEL_VALID_SOCKET_CONDITIONS (NECP_KERNEL_CONDITION_APP_ID | NECP_KERNEL_CONDITION_REAL_APP_ID | NECP_KERNEL_CONDITION_DOMAIN | NECP_KERNEL_CONDITION_ACCOUNT_ID | NECP_KERNEL_CONDITION_PID | NECP_KERNEL_CONDITION_UID | NECP_KERNEL_CONDITION_ALL_INTERFACES | NECP_KERNEL_CONDITION_BOUND_INTERFACE | NECP_KERNEL_CONDITION_TRAFFIC_CLASS | NECP_KERNEL_CONDITION_PROTOCOL | NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_ENTITLEMENT | NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT)
4193 static necp_kernel_policy_id
4194 necp_kernel_socket_policy_add(necp_policy_id parent_policy_id
, necp_policy_order order
, u_int32_t session_order
, int session_pid
, u_int32_t condition_mask
, u_int32_t condition_negated_mask
, necp_app_id cond_app_id
, necp_app_id cond_real_app_id
, char *cond_custom_entitlement
, u_int32_t cond_account_id
, char *cond_domain
, pid_t cond_pid
, uid_t cond_uid
, ifnet_t cond_bound_interface
, struct necp_policy_condition_tc_range cond_traffic_class
, u_int16_t cond_protocol
, union necp_sockaddr_union
*cond_local_start
, union necp_sockaddr_union
*cond_local_end
, u_int8_t cond_local_prefix
, union necp_sockaddr_union
*cond_remote_start
, union necp_sockaddr_union
*cond_remote_end
, u_int8_t cond_remote_prefix
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
)
4196 struct necp_kernel_socket_policy
*new_kernel_policy
= NULL
;
4197 struct necp_kernel_socket_policy
*tmp_kernel_policy
= NULL
;
4199 MALLOC_ZONE(new_kernel_policy
, struct necp_kernel_socket_policy
*, sizeof(*new_kernel_policy
), M_NECP_SOCKET_POLICY
, M_WAITOK
);
4200 if (new_kernel_policy
== NULL
) {
4204 memset(new_kernel_policy
, 0, sizeof(*new_kernel_policy
)); // M_ZERO is not supported for MALLOC_ZONE
4205 new_kernel_policy
->parent_policy_id
= parent_policy_id
;
4206 new_kernel_policy
->id
= necp_kernel_policy_get_new_id(true);
4207 new_kernel_policy
->order
= order
;
4208 new_kernel_policy
->session_order
= session_order
;
4209 new_kernel_policy
->session_pid
= session_pid
;
4211 // Sanitize condition mask
4212 new_kernel_policy
->condition_mask
= (condition_mask
& NECP_KERNEL_VALID_SOCKET_CONDITIONS
);
4213 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
)) {
4214 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_BOUND_INTERFACE
;
4216 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) && !(new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
)) {
4217 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_REAL_APP_ID
;
4219 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
) && !(new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
)) {
4220 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_ENTITLEMENT
;
4222 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
)) {
4223 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_LOCAL_PREFIX
;
4225 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
)) {
4226 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_REMOTE_PREFIX
;
4228 new_kernel_policy
->condition_negated_mask
= condition_negated_mask
& new_kernel_policy
->condition_mask
;
4230 // Set condition values
4231 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
4232 new_kernel_policy
->cond_app_id
= cond_app_id
;
4234 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
4235 new_kernel_policy
->cond_real_app_id
= cond_real_app_id
;
4237 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
) {
4238 new_kernel_policy
->cond_custom_entitlement
= cond_custom_entitlement
;
4239 new_kernel_policy
->cond_custom_entitlement_matched
= necp_boolean_state_unknown
;
4241 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
) {
4242 new_kernel_policy
->cond_account_id
= cond_account_id
;
4244 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
4245 new_kernel_policy
->cond_domain
= cond_domain
;
4246 new_kernel_policy
->cond_domain_dot_count
= necp_count_dots(cond_domain
, strlen(cond_domain
));
4248 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PID
) {
4249 new_kernel_policy
->cond_pid
= cond_pid
;
4251 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_UID
) {
4252 new_kernel_policy
->cond_uid
= cond_uid
;
4254 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
4255 if (cond_bound_interface
) {
4256 ifnet_reference(cond_bound_interface
);
4258 new_kernel_policy
->cond_bound_interface
= cond_bound_interface
;
4260 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
4261 new_kernel_policy
->cond_traffic_class
= cond_traffic_class
;
4263 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
4264 new_kernel_policy
->cond_protocol
= cond_protocol
;
4266 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
4267 memcpy(&new_kernel_policy
->cond_local_start
, cond_local_start
, cond_local_start
->sa
.sa_len
);
4269 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
4270 memcpy(&new_kernel_policy
->cond_local_end
, cond_local_end
, cond_local_end
->sa
.sa_len
);
4272 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
4273 new_kernel_policy
->cond_local_prefix
= cond_local_prefix
;
4275 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
4276 memcpy(&new_kernel_policy
->cond_remote_start
, cond_remote_start
, cond_remote_start
->sa
.sa_len
);
4278 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
4279 memcpy(&new_kernel_policy
->cond_remote_end
, cond_remote_end
, cond_remote_end
->sa
.sa_len
);
4281 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
4282 new_kernel_policy
->cond_remote_prefix
= cond_remote_prefix
;
4285 new_kernel_policy
->result
= result
;
4286 memcpy(&new_kernel_policy
->result_parameter
, &result_parameter
, sizeof(result_parameter
));
4289 NECPLOG(LOG_DEBUG
, "Added kernel policy: socket, id=%d, mask=%x\n", new_kernel_policy
->id
, new_kernel_policy
->condition_mask
);
4291 LIST_INSERT_SORTED_TWICE_ASCENDING(&necp_kernel_socket_policies
, new_kernel_policy
, chain
, session_order
, order
, tmp_kernel_policy
);
4293 return (new_kernel_policy
? new_kernel_policy
->id
: 0);
4296 static struct necp_kernel_socket_policy
*
4297 necp_kernel_socket_policy_find(necp_kernel_policy_id policy_id
)
4299 struct necp_kernel_socket_policy
*kernel_policy
= NULL
;
4300 struct necp_kernel_socket_policy
*tmp_kernel_policy
= NULL
;
4302 if (policy_id
== 0) {
4306 LIST_FOREACH_SAFE(kernel_policy
, &necp_kernel_socket_policies
, chain
, tmp_kernel_policy
) {
4307 if (kernel_policy
->id
== policy_id
) {
4308 return (kernel_policy
);
4316 necp_kernel_socket_policy_delete(necp_kernel_policy_id policy_id
)
4318 struct necp_kernel_socket_policy
*policy
= NULL
;
4320 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4322 policy
= necp_kernel_socket_policy_find(policy_id
);
4324 LIST_REMOVE(policy
, chain
);
4326 if (policy
->cond_bound_interface
) {
4327 ifnet_release(policy
->cond_bound_interface
);
4328 policy
->cond_bound_interface
= NULL
;
4331 if (policy
->cond_domain
) {
4332 FREE(policy
->cond_domain
, M_NECP
);
4333 policy
->cond_domain
= NULL
;
4336 if (policy
->cond_custom_entitlement
) {
4337 FREE(policy
->cond_custom_entitlement
, M_NECP
);
4338 policy
->cond_custom_entitlement
= NULL
;
4341 FREE_ZONE(policy
, sizeof(*policy
), M_NECP_SOCKET_POLICY
);
4348 static inline const char *
4349 necp_get_result_description(char *result_string
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
)
4351 uuid_string_t uuid_string
;
4353 case NECP_KERNEL_POLICY_RESULT_NONE
: {
4354 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "None");
4357 case NECP_KERNEL_POLICY_RESULT_PASS
: {
4358 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "Pass");
4361 case NECP_KERNEL_POLICY_RESULT_SKIP
: {
4362 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "Skip (%u)", result_parameter
.skip_policy_order
);
4365 case NECP_KERNEL_POLICY_RESULT_DROP
: {
4366 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "Drop");
4369 case NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
: {
4370 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "SocketDivert (%d)", result_parameter
.flow_divert_control_unit
);
4373 case NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER
: {
4374 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "SocketFilter (%d)", result_parameter
.filter_control_unit
);
4377 case NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
: {
4378 ifnet_t interface
= ifindex2ifnet
[result_parameter
.tunnel_interface_index
];
4379 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "IPTunnel (%s%d)", ifnet_name(interface
), ifnet_unit(interface
));
4382 case NECP_KERNEL_POLICY_RESULT_IP_FILTER
: {
4383 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "IPFilter");
4386 case NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED
: {
4387 ifnet_t interface
= ifindex2ifnet
[result_parameter
.scoped_interface_index
];
4388 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "SocketScoped (%s%d)", ifnet_name(interface
), ifnet_unit(interface
));
4391 case NECP_KERNEL_POLICY_RESULT_ROUTE_RULES
: {
4393 char interface_names
[IFXNAMSIZ
][MAX_ROUTE_RULE_INTERFACES
];
4394 struct necp_route_rule
*route_rule
= necp_lookup_route_rule_locked(&necp_route_rules
, result_parameter
.route_rule_id
);
4395 if (route_rule
!= NULL
) {
4396 for (index
= 0; index
< MAX_ROUTE_RULE_INTERFACES
; index
++) {
4397 if (route_rule
->exception_if_indices
[index
] != 0) {
4398 ifnet_t interface
= ifindex2ifnet
[route_rule
->exception_if_indices
[index
]];
4399 snprintf(interface_names
[index
], IFXNAMSIZ
, "%s%d", ifnet_name(interface
), ifnet_unit(interface
));
4401 memset(interface_names
[index
], 0, IFXNAMSIZ
);
4404 switch (route_rule
->default_action
) {
4405 case NECP_ROUTE_RULE_DENY_INTERFACE
:
4406 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "RouteRules (Only %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
4407 (route_rule
->cellular_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? "Cell " : "",
4408 (route_rule
->wifi_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? "WiFi " : "",
4409 (route_rule
->wired_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? "Wired " : "",
4410 (route_rule
->expensive_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? "Exp " : "",
4411 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[0] : "",
4412 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4413 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[1] : "",
4414 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4415 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[2] : "",
4416 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4417 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[3] : "",
4418 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4419 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[4] : "",
4420 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4421 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[5] : "",
4422 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4423 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[6] : "",
4424 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4425 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[7] : "",
4426 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4427 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[8] : "",
4428 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4429 (route_rule
->exception_if_actions
[9] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[9] : "");
4431 case NECP_ROUTE_RULE_ALLOW_INTERFACE
:
4432 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "RouteRules (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
4433 (route_rule
->cellular_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!Cell " : "",
4434 (route_rule
->wifi_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!WiFi " : "",
4435 (route_rule
->wired_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!Wired " : "",
4436 (route_rule
->expensive_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!Exp " : "",
4437 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4438 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[0] : "",
4439 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4440 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[1] : "",
4441 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4442 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[2] : "",
4443 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4444 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[3] : "",
4445 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4446 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[4] : "",
4447 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4448 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[5] : "",
4449 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4450 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[6] : "",
4451 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4452 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[7] : "",
4453 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4454 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[8] : "",
4455 (route_rule
->exception_if_actions
[9] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4456 (route_rule
->exception_if_actions
[9] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[9] : "");
4458 case NECP_ROUTE_RULE_QOS_MARKING
:
4459 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "RouteRules (QoSMarking %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
4460 (route_rule
->cellular_action
== NECP_ROUTE_RULE_QOS_MARKING
) ? "Cell " : "",
4461 (route_rule
->wifi_action
== NECP_ROUTE_RULE_QOS_MARKING
) ? "WiFi " : "",
4462 (route_rule
->wired_action
== NECP_ROUTE_RULE_QOS_MARKING
) ? "Wired " : "",
4463 (route_rule
->expensive_action
== NECP_ROUTE_RULE_QOS_MARKING
) ? "Exp " : "",
4464 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[0] : "",
4465 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4466 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[1] : "",
4467 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4468 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[2] : "",
4469 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4470 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[3] : "",
4471 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4472 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[4] : "",
4473 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4474 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[5] : "",
4475 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4476 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[6] : "",
4477 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4478 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[7] : "",
4479 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4480 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[8] : "",
4481 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4482 (route_rule
->exception_if_actions
[9] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[9] : "");
4485 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "RouteRules (Unknown)");
4491 case NECP_KERNEL_POLICY_RESULT_USE_NETAGENT
: {
4492 bool found_mapping
= FALSE
;
4493 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.netagent_id
);
4494 if (mapping
!= NULL
) {
4495 uuid_unparse(mapping
->uuid
, uuid_string
);
4496 found_mapping
= TRUE
;
4498 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "UseNetAgent (%s)", found_mapping
? uuid_string
: "Unknown");
4501 case NECP_POLICY_RESULT_TRIGGER
: {
4502 bool found_mapping
= FALSE
;
4503 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.service
.identifier
);
4504 if (mapping
!= NULL
) {
4505 uuid_unparse(mapping
->uuid
, uuid_string
);
4506 found_mapping
= TRUE
;
4508 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "Trigger (%s.%d)", found_mapping
? uuid_string
: "Unknown", result_parameter
.service
.data
);
4511 case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED
: {
4512 bool found_mapping
= FALSE
;
4513 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.service
.identifier
);
4514 if (mapping
!= NULL
) {
4515 uuid_unparse(mapping
->uuid
, uuid_string
);
4516 found_mapping
= TRUE
;
4518 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "TriggerIfNeeded (%s.%d)", found_mapping
? uuid_string
: "Unknown", result_parameter
.service
.data
);
4521 case NECP_POLICY_RESULT_TRIGGER_SCOPED
: {
4522 bool found_mapping
= FALSE
;
4523 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.service
.identifier
);
4524 if (mapping
!= NULL
) {
4525 uuid_unparse(mapping
->uuid
, uuid_string
);
4526 found_mapping
= TRUE
;
4528 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "TriggerScoped (%s.%d)", found_mapping
? uuid_string
: "Unknown", result_parameter
.service
.data
);
4531 case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED
: {
4532 bool found_mapping
= FALSE
;
4533 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.service
.identifier
);
4534 if (mapping
!= NULL
) {
4535 uuid_unparse(mapping
->uuid
, uuid_string
);
4536 found_mapping
= TRUE
;
4538 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "NoTriggerScoped (%s.%d)", found_mapping
? uuid_string
: "Unknown", result_parameter
.service
.data
);
4542 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "Unknown %d (%d)", result
, result_parameter
.tunnel_interface_index
);
4546 return (result_string
);
4550 necp_kernel_socket_policies_dump_all(void)
4553 struct necp_kernel_socket_policy
*policy
= NULL
;
4556 char result_string
[MAX_RESULT_STRING_LEN
];
4557 char proc_name_string
[MAXCOMLEN
+ 1];
4558 memset(result_string
, 0, MAX_RESULT_STRING_LEN
);
4559 memset(proc_name_string
, 0, MAXCOMLEN
+ 1);
4561 NECPLOG0(LOG_DEBUG
, "NECP Application Policies:\n");
4562 NECPLOG0(LOG_DEBUG
, "-----------\n");
4563 for (policy_i
= 0; necp_kernel_socket_policies_app_layer_map
!= NULL
&& necp_kernel_socket_policies_app_layer_map
[policy_i
] != NULL
; policy_i
++) {
4564 policy
= necp_kernel_socket_policies_app_layer_map
[policy_i
];
4565 proc_name(policy
->session_pid
, proc_name_string
, MAXCOMLEN
);
4566 NECPLOG(LOG_DEBUG
, "\t%3d. Policy ID: %5d\tProcess: %10.10s\tOrder: %04d.%04d\tMask: %5x\tResult: %s\n", policy_i
, policy
->id
, proc_name_string
, policy
->session_order
, policy
->order
, policy
->condition_mask
, necp_get_result_description(result_string
, policy
->result
, policy
->result_parameter
));
4568 if (necp_kernel_socket_policies_app_layer_map
[0] != NULL
) {
4569 NECPLOG0(LOG_DEBUG
, "-----------\n");
4572 NECPLOG0(LOG_DEBUG
, "NECP Socket Policies:\n");
4573 NECPLOG0(LOG_DEBUG
, "-----------\n");
4574 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
4575 NECPLOG(LOG_DEBUG
, "\tApp Bucket: %d\n", app_i
);
4576 for (policy_i
= 0; necp_kernel_socket_policies_map
[app_i
] != NULL
&& (necp_kernel_socket_policies_map
[app_i
])[policy_i
] != NULL
; policy_i
++) {
4577 policy
= (necp_kernel_socket_policies_map
[app_i
])[policy_i
];
4578 proc_name(policy
->session_pid
, proc_name_string
, MAXCOMLEN
);
4579 NECPLOG(LOG_DEBUG
, "\t%3d. Policy ID: %5d\tProcess: %10.10s\tOrder: %04d.%04d\tMask: %5x\tResult: %s\n", policy_i
, policy
->id
, proc_name_string
, policy
->session_order
, policy
->order
, policy
->condition_mask
, necp_get_result_description(result_string
, policy
->result
, policy
->result_parameter
));
4581 NECPLOG0(LOG_DEBUG
, "-----------\n");
4587 necp_kernel_socket_result_is_trigger_service_type(struct necp_kernel_socket_policy
*kernel_policy
)
4589 return (kernel_policy
->result
>= NECP_KERNEL_POLICY_RESULT_TRIGGER
&& kernel_policy
->result
<= NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED
);
4593 necp_kernel_socket_policy_results_overlap(struct necp_kernel_socket_policy
*upper_policy
, struct necp_kernel_socket_policy
*lower_policy
)
4595 if (upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_DROP
) {
4596 // Drop always cancels out lower policies
4598 } else if (upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER
||
4599 upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_ROUTE_RULES
||
4600 upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_USE_NETAGENT
) {
4601 // Filters and route rules never cancel out lower policies
4603 } else if (necp_kernel_socket_result_is_trigger_service_type(upper_policy
)) {
4604 // Trigger/Scoping policies can overlap one another, but not other results
4605 return (necp_kernel_socket_result_is_trigger_service_type(lower_policy
));
4606 } else if (upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
4607 if (upper_policy
->session_order
!= lower_policy
->session_order
) {
4608 // A skip cannot override a policy of a different session
4611 if (upper_policy
->result_parameter
.skip_policy_order
== 0 ||
4612 lower_policy
->order
>= upper_policy
->result_parameter
.skip_policy_order
) {
4613 // This policy is beyond the skip
4616 // This policy is inside the skip
4622 // A hard pass, flow divert, tunnel, or scope will currently block out lower policies
4627 necp_kernel_socket_policy_is_unnecessary(struct necp_kernel_socket_policy
*policy
, struct necp_kernel_socket_policy
**policy_array
, int valid_indices
)
4629 bool can_skip
= FALSE
;
4630 u_int32_t highest_skip_session_order
= 0;
4631 u_int32_t highest_skip_order
= 0;
4633 for (i
= 0; i
< valid_indices
; i
++) {
4634 struct necp_kernel_socket_policy
*compared_policy
= policy_array
[i
];
4636 // For policies in a skip window, we can't mark conflicting policies as unnecessary
4638 if (highest_skip_session_order
!= compared_policy
->session_order
||
4639 (highest_skip_order
!= 0 && compared_policy
->order
>= highest_skip_order
)) {
4640 // If we've moved on to the next session, or passed the skip window
4641 highest_skip_session_order
= 0;
4642 highest_skip_order
= 0;
4645 // If this policy is also a skip, in can increase the skip window
4646 if (compared_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
4647 if (compared_policy
->result_parameter
.skip_policy_order
> highest_skip_order
) {
4648 highest_skip_order
= compared_policy
->result_parameter
.skip_policy_order
;
4655 if (compared_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
4656 // This policy is a skip. Set the skip window accordingly
4658 highest_skip_session_order
= compared_policy
->session_order
;
4659 highest_skip_order
= compared_policy
->result_parameter
.skip_policy_order
;
4662 // The result of the compared policy must be able to block out this policy result
4663 if (!necp_kernel_socket_policy_results_overlap(compared_policy
, policy
)) {
4667 // If new policy matches All Interfaces, compared policy must also
4668 if ((policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) && !(compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
)) {
4672 // Default makes lower policies unecessary always
4673 if (compared_policy
->condition_mask
== 0) {
4677 // Compared must be more general than policy, and include only conditions within policy
4678 if ((policy
->condition_mask
& compared_policy
->condition_mask
) != compared_policy
->condition_mask
) {
4682 // Negative conditions must match for the overlapping conditions
4683 if ((policy
->condition_negated_mask
& compared_policy
->condition_mask
) != (compared_policy
->condition_negated_mask
& compared_policy
->condition_mask
)) {
4687 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
&&
4688 strcmp(compared_policy
->cond_domain
, policy
->cond_domain
) != 0) {
4692 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
&&
4693 strcmp(compared_policy
->cond_custom_entitlement
, policy
->cond_custom_entitlement
) != 0) {
4697 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
&&
4698 compared_policy
->cond_account_id
!= policy
->cond_account_id
) {
4702 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
&&
4703 compared_policy
->cond_policy_id
!= policy
->cond_policy_id
) {
4707 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
&&
4708 compared_policy
->cond_app_id
!= policy
->cond_app_id
) {
4712 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
&&
4713 compared_policy
->cond_real_app_id
!= policy
->cond_real_app_id
) {
4717 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_PID
&&
4718 compared_policy
->cond_pid
!= policy
->cond_pid
) {
4722 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_UID
&&
4723 compared_policy
->cond_uid
!= policy
->cond_uid
) {
4727 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
&&
4728 compared_policy
->cond_bound_interface
!= policy
->cond_bound_interface
) {
4732 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
&&
4733 compared_policy
->cond_protocol
!= policy
->cond_protocol
) {
4737 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
&&
4738 !(compared_policy
->cond_traffic_class
.start_tc
<= policy
->cond_traffic_class
.start_tc
&&
4739 compared_policy
->cond_traffic_class
.end_tc
>= policy
->cond_traffic_class
.end_tc
)) {
4743 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
4744 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
4745 if (!necp_is_range_in_range((struct sockaddr
*)&policy
->cond_local_start
, (struct sockaddr
*)&policy
->cond_local_end
, (struct sockaddr
*)&compared_policy
->cond_local_start
, (struct sockaddr
*)&compared_policy
->cond_local_end
)) {
4748 } else if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
4749 if (compared_policy
->cond_local_prefix
> policy
->cond_local_prefix
||
4750 !necp_is_addr_in_subnet((struct sockaddr
*)&policy
->cond_local_start
, (struct sockaddr
*)&compared_policy
->cond_local_start
, compared_policy
->cond_local_prefix
)) {
4756 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
4757 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
4758 if (!necp_is_range_in_range((struct sockaddr
*)&policy
->cond_remote_start
, (struct sockaddr
*)&policy
->cond_remote_end
, (struct sockaddr
*)&compared_policy
->cond_remote_start
, (struct sockaddr
*)&compared_policy
->cond_remote_end
)) {
4761 } else if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
4762 if (compared_policy
->cond_remote_prefix
> policy
->cond_remote_prefix
||
4763 !necp_is_addr_in_subnet((struct sockaddr
*)&policy
->cond_remote_start
, (struct sockaddr
*)&compared_policy
->cond_remote_start
, compared_policy
->cond_remote_prefix
)) {
4776 necp_kernel_socket_policies_reprocess(void)
4779 int bucket_allocation_counts
[NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
];
4780 int bucket_current_free_index
[NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
];
4781 int app_layer_allocation_count
= 0;
4782 int app_layer_current_free_index
= 0;
4783 struct necp_kernel_socket_policy
*kernel_policy
= NULL
;
4785 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4788 necp_kernel_application_policies_condition_mask
= 0;
4789 necp_kernel_socket_policies_condition_mask
= 0;
4790 necp_kernel_application_policies_count
= 0;
4791 necp_kernel_socket_policies_count
= 0;
4792 necp_kernel_socket_policies_non_app_count
= 0;
4794 // Reset all maps to NULL
4795 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
4796 if (necp_kernel_socket_policies_map
[app_i
] != NULL
) {
4797 FREE(necp_kernel_socket_policies_map
[app_i
], M_NECP
);
4798 necp_kernel_socket_policies_map
[app_i
] = NULL
;
4802 bucket_allocation_counts
[app_i
] = 0;
4804 if (necp_kernel_socket_policies_app_layer_map
!= NULL
) {
4805 FREE(necp_kernel_socket_policies_app_layer_map
, M_NECP
);
4806 necp_kernel_socket_policies_app_layer_map
= NULL
;
4809 // Create masks and counts
4810 LIST_FOREACH(kernel_policy
, &necp_kernel_socket_policies
, chain
) {
4811 // App layer mask/count
4812 necp_kernel_application_policies_condition_mask
|= kernel_policy
->condition_mask
;
4813 necp_kernel_application_policies_count
++;
4814 app_layer_allocation_count
++;
4816 // Update socket layer bucket mask/counts
4817 necp_kernel_socket_policies_condition_mask
|= kernel_policy
->condition_mask
;
4818 necp_kernel_socket_policies_count
++;
4820 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) ||
4821 kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
4822 necp_kernel_socket_policies_non_app_count
++;
4823 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
4824 bucket_allocation_counts
[app_i
]++;
4827 bucket_allocation_counts
[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(kernel_policy
->cond_app_id
)]++;
4832 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
4833 if (bucket_allocation_counts
[app_i
] > 0) {
4834 // Allocate a NULL-terminated array of policy pointers for each bucket
4835 MALLOC(necp_kernel_socket_policies_map
[app_i
], struct necp_kernel_socket_policy
**, sizeof(struct necp_kernel_socket_policy
*) * (bucket_allocation_counts
[app_i
] + 1), M_NECP
, M_WAITOK
);
4836 if (necp_kernel_socket_policies_map
[app_i
] == NULL
) {
4840 // Initialize the first entry to NULL
4841 (necp_kernel_socket_policies_map
[app_i
])[0] = NULL
;
4843 bucket_current_free_index
[app_i
] = 0;
4845 MALLOC(necp_kernel_socket_policies_app_layer_map
, struct necp_kernel_socket_policy
**, sizeof(struct necp_kernel_socket_policy
*) * (app_layer_allocation_count
+ 1), M_NECP
, M_WAITOK
);
4846 if (necp_kernel_socket_policies_app_layer_map
== NULL
) {
4849 necp_kernel_socket_policies_app_layer_map
[0] = NULL
;
4852 LIST_FOREACH(kernel_policy
, &necp_kernel_socket_policies
, chain
) {
4853 // Insert pointers into map
4854 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) ||
4855 kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
4856 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
4857 if (!necp_kernel_socket_policy_is_unnecessary(kernel_policy
, necp_kernel_socket_policies_map
[app_i
], bucket_current_free_index
[app_i
])) {
4858 (necp_kernel_socket_policies_map
[app_i
])[(bucket_current_free_index
[app_i
])] = kernel_policy
;
4859 bucket_current_free_index
[app_i
]++;
4860 (necp_kernel_socket_policies_map
[app_i
])[(bucket_current_free_index
[app_i
])] = NULL
;
4864 app_i
= NECP_SOCKET_MAP_APP_ID_TO_BUCKET(kernel_policy
->cond_app_id
);
4865 if (!necp_kernel_socket_policy_is_unnecessary(kernel_policy
, necp_kernel_socket_policies_map
[app_i
], bucket_current_free_index
[app_i
])) {
4866 (necp_kernel_socket_policies_map
[app_i
])[(bucket_current_free_index
[app_i
])] = kernel_policy
;
4867 bucket_current_free_index
[app_i
]++;
4868 (necp_kernel_socket_policies_map
[app_i
])[(bucket_current_free_index
[app_i
])] = NULL
;
4872 if (!necp_kernel_socket_policy_is_unnecessary(kernel_policy
, necp_kernel_socket_policies_app_layer_map
, app_layer_current_free_index
)) {
4873 necp_kernel_socket_policies_app_layer_map
[app_layer_current_free_index
] = kernel_policy
;
4874 app_layer_current_free_index
++;
4875 necp_kernel_socket_policies_app_layer_map
[app_layer_current_free_index
] = NULL
;
4878 necp_kernel_socket_policies_dump_all();
4879 BUMP_KERNEL_SOCKET_POLICIES_GENERATION_COUNT();
4883 // Free memory, reset masks to 0
4884 necp_kernel_application_policies_condition_mask
= 0;
4885 necp_kernel_socket_policies_condition_mask
= 0;
4886 necp_kernel_application_policies_count
= 0;
4887 necp_kernel_socket_policies_count
= 0;
4888 necp_kernel_socket_policies_non_app_count
= 0;
4889 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
4890 if (necp_kernel_socket_policies_map
[app_i
] != NULL
) {
4891 FREE(necp_kernel_socket_policies_map
[app_i
], M_NECP
);
4892 necp_kernel_socket_policies_map
[app_i
] = NULL
;
4895 if (necp_kernel_socket_policies_app_layer_map
!= NULL
) {
4896 FREE(necp_kernel_socket_policies_app_layer_map
, M_NECP
);
4897 necp_kernel_socket_policies_app_layer_map
= NULL
;
4903 necp_get_new_string_id(void)
4905 u_int32_t newid
= 0;
4907 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4909 necp_last_string_id
++;
4910 if (necp_last_string_id
< 1) {
4911 necp_last_string_id
= 1;
4914 newid
= necp_last_string_id
;
4916 NECPLOG0(LOG_DEBUG
, "Allocate string id failed.\n");
4923 static struct necp_string_id_mapping
*
4924 necp_lookup_string_to_id_locked(struct necp_string_id_mapping_list
*list
, char *string
)
4926 struct necp_string_id_mapping
*searchentry
= NULL
;
4927 struct necp_string_id_mapping
*foundentry
= NULL
;
4929 LIST_FOREACH(searchentry
, list
, chain
) {
4930 if (strcmp(searchentry
->string
, string
) == 0) {
4931 foundentry
= searchentry
;
4936 return (foundentry
);
4939 static struct necp_string_id_mapping
*
4940 necp_lookup_string_with_id_locked(struct necp_string_id_mapping_list
*list
, u_int32_t local_id
)
4942 struct necp_string_id_mapping
*searchentry
= NULL
;
4943 struct necp_string_id_mapping
*foundentry
= NULL
;
4945 LIST_FOREACH(searchentry
, list
, chain
) {
4946 if (searchentry
->id
== local_id
) {
4947 foundentry
= searchentry
;
4952 return (foundentry
);
4956 necp_create_string_to_id_mapping(struct necp_string_id_mapping_list
*list
, char *string
)
4958 u_int32_t string_id
= 0;
4959 struct necp_string_id_mapping
*existing_mapping
= NULL
;
4961 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4963 existing_mapping
= necp_lookup_string_to_id_locked(list
, string
);
4964 if (existing_mapping
!= NULL
) {
4965 string_id
= existing_mapping
->id
;
4966 existing_mapping
->refcount
++;
4968 struct necp_string_id_mapping
*new_mapping
= NULL
;
4969 MALLOC(new_mapping
, struct necp_string_id_mapping
*, sizeof(struct necp_string_id_mapping
), M_NECP
, M_WAITOK
);
4970 if (new_mapping
!= NULL
) {
4971 memset(new_mapping
, 0, sizeof(struct necp_string_id_mapping
));
4973 size_t length
= strlen(string
) + 1;
4974 MALLOC(new_mapping
->string
, char *, length
, M_NECP
, M_WAITOK
);
4975 if (new_mapping
->string
!= NULL
) {
4976 memcpy(new_mapping
->string
, string
, length
);
4977 new_mapping
->id
= necp_get_new_string_id();
4978 new_mapping
->refcount
= 1;
4979 LIST_INSERT_HEAD(list
, new_mapping
, chain
);
4980 string_id
= new_mapping
->id
;
4982 FREE(new_mapping
, M_NECP
);
4991 necp_remove_string_to_id_mapping(struct necp_string_id_mapping_list
*list
, char *string
)
4993 struct necp_string_id_mapping
*existing_mapping
= NULL
;
4995 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4997 existing_mapping
= necp_lookup_string_to_id_locked(list
, string
);
4998 if (existing_mapping
!= NULL
) {
4999 if (--existing_mapping
->refcount
== 0) {
5000 LIST_REMOVE(existing_mapping
, chain
);
5001 FREE(existing_mapping
->string
, M_NECP
);
5002 FREE(existing_mapping
, M_NECP
);
5011 necp_get_new_route_rule_id(void)
5013 u_int32_t newid
= 0;
5015 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5017 necp_last_route_rule_id
++;
5018 if (necp_last_route_rule_id
< 1 || necp_last_route_rule_id
> UINT16_MAX
) {
5019 necp_last_route_rule_id
= 1;
5022 newid
= necp_last_route_rule_id
;
5024 NECPLOG0(LOG_DEBUG
, "Allocate route rule id failed.\n");
5032 necp_get_new_aggregate_route_rule_id(void)
5034 u_int32_t newid
= 0;
5036 LCK_RW_ASSERT(&necp_route_rule_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5038 necp_last_aggregate_route_rule_id
++;
5039 if (necp_last_aggregate_route_rule_id
<= UINT16_MAX
) {
5040 necp_last_aggregate_route_rule_id
= UINT16_MAX
+ 1;
5043 newid
= necp_last_aggregate_route_rule_id
;
5045 NECPLOG0(LOG_DEBUG
, "Allocate aggregate route rule id failed.\n");
5052 static struct necp_route_rule
*
5053 necp_lookup_route_rule_locked(struct necp_route_rule_list
*list
, u_int32_t route_rule_id
)
5055 struct necp_route_rule
*searchentry
= NULL
;
5056 struct necp_route_rule
*foundentry
= NULL
;
5058 LIST_FOREACH(searchentry
, list
, chain
) {
5059 if (searchentry
->id
== route_rule_id
) {
5060 foundentry
= searchentry
;
5065 return (foundentry
);
5068 static struct necp_route_rule
*
5069 necp_lookup_route_rule_by_contents_locked(struct necp_route_rule_list
*list
, u_int32_t default_action
, u_int8_t cellular_action
, u_int8_t wifi_action
, u_int8_t wired_action
, u_int8_t expensive_action
, u_int32_t
*if_indices
, u_int8_t
*if_actions
)
5071 struct necp_route_rule
*searchentry
= NULL
;
5072 struct necp_route_rule
*foundentry
= NULL
;
5074 LIST_FOREACH(searchentry
, list
, chain
) {
5075 if (searchentry
->default_action
== default_action
&&
5076 searchentry
->cellular_action
== cellular_action
&&
5077 searchentry
->wifi_action
== wifi_action
&&
5078 searchentry
->wired_action
== wired_action
&&
5079 searchentry
->expensive_action
== expensive_action
) {
5080 bool match_failed
= FALSE
;
5085 for (index_a
= 0; index_a
< MAX_ROUTE_RULE_INTERFACES
; index_a
++) {
5086 bool found_index
= FALSE
;
5087 if (searchentry
->exception_if_indices
[index_a
] == 0) {
5091 for (index_b
= 0; index_b
< MAX_ROUTE_RULE_INTERFACES
; index_b
++) {
5092 if (if_indices
[index_b
] == 0) {
5095 if (index_b
>= count_b
) {
5096 count_b
= index_b
+ 1;
5098 if (searchentry
->exception_if_indices
[index_a
] == if_indices
[index_b
] &&
5099 searchentry
->exception_if_actions
[index_a
] == if_actions
[index_b
]) {
5105 match_failed
= TRUE
;
5109 if (!match_failed
&& count_a
== count_b
) {
5110 foundentry
= searchentry
;
5116 return (foundentry
);
5120 necp_create_route_rule(struct necp_route_rule_list
*list
, u_int8_t
*route_rules_array
, u_int32_t route_rules_array_size
)
5123 u_int32_t route_rule_id
= 0;
5124 struct necp_route_rule
*existing_rule
= NULL
;
5125 u_int32_t default_action
= NECP_ROUTE_RULE_ALLOW_INTERFACE
;
5126 u_int8_t cellular_action
= NECP_ROUTE_RULE_NONE
;
5127 u_int8_t wifi_action
= NECP_ROUTE_RULE_NONE
;
5128 u_int8_t wired_action
= NECP_ROUTE_RULE_NONE
;
5129 u_int8_t expensive_action
= NECP_ROUTE_RULE_NONE
;
5130 u_int32_t if_indices
[MAX_ROUTE_RULE_INTERFACES
];
5131 size_t num_valid_indices
= 0;
5132 memset(&if_indices
, 0, sizeof(if_indices
));
5133 u_int8_t if_actions
[MAX_ROUTE_RULE_INTERFACES
];
5134 memset(&if_actions
, 0, sizeof(if_actions
));
5136 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5138 if (route_rules_array
== NULL
|| route_rules_array_size
== 0) {
5143 while (offset
< route_rules_array_size
) {
5144 ifnet_t rule_interface
= NULL
;
5145 char interface_name
[IFXNAMSIZ
];
5146 u_int32_t length
= 0;
5147 u_int8_t
*value
= necp_buffer_get_tlv_value(route_rules_array
, offset
, &length
);
5149 u_int8_t rule_type
= necp_policy_condition_get_type_from_buffer(value
, length
);
5150 u_int8_t rule_flags
= necp_policy_condition_get_flags_from_buffer(value
, length
);
5151 u_int32_t rule_length
= necp_policy_condition_get_value_length_from_buffer(value
, length
);
5152 u_int8_t
*rule_value
= necp_policy_condition_get_value_pointer_from_buffer(value
, length
);
5154 if (rule_type
== NECP_ROUTE_RULE_NONE
) {
5155 // Don't allow an explicit rule to be None action
5159 if (rule_length
== 0) {
5160 if (rule_flags
& NECP_ROUTE_RULE_FLAG_CELLULAR
) {
5161 cellular_action
= rule_type
;
5163 if (rule_flags
& NECP_ROUTE_RULE_FLAG_WIFI
) {
5164 wifi_action
= rule_type
;
5166 if (rule_flags
& NECP_ROUTE_RULE_FLAG_WIRED
) {
5167 wired_action
= rule_type
;
5169 if (rule_flags
& NECP_ROUTE_RULE_FLAG_EXPENSIVE
) {
5170 expensive_action
= rule_type
;
5172 if (rule_flags
== 0) {
5173 default_action
= rule_type
;
5175 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
5179 if (num_valid_indices
>= MAX_ROUTE_RULE_INTERFACES
) {
5180 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
5184 if (rule_length
<= IFXNAMSIZ
) {
5185 memcpy(interface_name
, rule_value
, rule_length
);
5186 interface_name
[rule_length
- 1] = 0; // Make sure the string is NULL terminated
5187 if (ifnet_find_by_name(interface_name
, &rule_interface
) == 0) {
5188 if_actions
[num_valid_indices
] = rule_type
;
5189 if_indices
[num_valid_indices
++] = rule_interface
->if_index
;
5190 ifnet_release(rule_interface
);
5193 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
5196 existing_rule
= necp_lookup_route_rule_by_contents_locked(list
, default_action
, cellular_action
, wifi_action
, wired_action
, expensive_action
, if_indices
, if_actions
);
5197 if (existing_rule
!= NULL
) {
5198 route_rule_id
= existing_rule
->id
;
5199 existing_rule
->refcount
++;
5201 struct necp_route_rule
*new_rule
= NULL
;
5202 MALLOC(new_rule
, struct necp_route_rule
*, sizeof(struct necp_route_rule
), M_NECP
, M_WAITOK
);
5203 if (new_rule
!= NULL
) {
5204 memset(new_rule
, 0, sizeof(struct necp_route_rule
));
5205 route_rule_id
= new_rule
->id
= necp_get_new_route_rule_id();
5206 new_rule
->default_action
= default_action
;
5207 new_rule
->cellular_action
= cellular_action
;
5208 new_rule
->wifi_action
= wifi_action
;
5209 new_rule
->wired_action
= wired_action
;
5210 new_rule
->expensive_action
= expensive_action
;
5211 memcpy(&new_rule
->exception_if_indices
, &if_indices
, sizeof(if_indices
));
5212 memcpy(&new_rule
->exception_if_actions
, &if_actions
, sizeof(if_actions
));
5213 new_rule
->refcount
= 1;
5214 LIST_INSERT_HEAD(list
, new_rule
, chain
);
5217 return (route_rule_id
);
5221 necp_remove_aggregate_route_rule_for_id(u_int32_t rule_id
)
5224 lck_rw_lock_exclusive(&necp_route_rule_lock
);
5226 struct necp_aggregate_route_rule
*existing_rule
= NULL
;
5227 struct necp_aggregate_route_rule
*tmp_rule
= NULL
;
5229 LIST_FOREACH_SAFE(existing_rule
, &necp_aggregate_route_rules
, chain
, tmp_rule
) {
5231 for (index
= 0; index
< MAX_AGGREGATE_ROUTE_RULES
; index
++) {
5232 u_int32_t route_rule_id
= existing_rule
->rule_ids
[index
];
5233 if (route_rule_id
== rule_id
) {
5234 LIST_REMOVE(existing_rule
, chain
);
5235 FREE(existing_rule
, M_NECP
);
5241 lck_rw_done(&necp_route_rule_lock
);
5246 necp_remove_route_rule(struct necp_route_rule_list
*list
, u_int32_t route_rule_id
)
5248 struct necp_route_rule
*existing_rule
= NULL
;
5250 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5252 existing_rule
= necp_lookup_route_rule_locked(list
, route_rule_id
);
5253 if (existing_rule
!= NULL
) {
5254 if (--existing_rule
->refcount
== 0) {
5255 necp_remove_aggregate_route_rule_for_id(existing_rule
->id
);
5256 LIST_REMOVE(existing_rule
, chain
);
5257 FREE(existing_rule
, M_NECP
);
5265 static struct necp_aggregate_route_rule
*
5266 necp_lookup_aggregate_route_rule_locked(u_int32_t route_rule_id
)
5268 struct necp_aggregate_route_rule
*searchentry
= NULL
;
5269 struct necp_aggregate_route_rule
*foundentry
= NULL
;
5271 lck_rw_lock_shared(&necp_route_rule_lock
);
5273 LIST_FOREACH(searchentry
, &necp_aggregate_route_rules
, chain
) {
5274 if (searchentry
->id
== route_rule_id
) {
5275 foundentry
= searchentry
;
5280 lck_rw_done(&necp_route_rule_lock
);
5282 return (foundentry
);
5286 necp_create_aggregate_route_rule(u_int32_t
*rule_ids
)
5288 u_int32_t aggregate_route_rule_id
= 0;
5289 struct necp_aggregate_route_rule
*new_rule
= NULL
;
5290 struct necp_aggregate_route_rule
*existing_rule
= NULL
;
5292 LIST_FOREACH(existing_rule
, &necp_aggregate_route_rules
, chain
) {
5293 if (memcmp(existing_rule
->rule_ids
, rule_ids
, (sizeof(u_int32_t
) * MAX_AGGREGATE_ROUTE_RULES
)) == 0) {
5294 return (existing_rule
->id
);
5298 lck_rw_lock_exclusive(&necp_route_rule_lock
);
5300 LIST_FOREACH(existing_rule
, &necp_aggregate_route_rules
, chain
) {
5301 // Re-check, in case something else created the rule while we are waiting to lock
5302 if (memcmp(existing_rule
->rule_ids
, rule_ids
, (sizeof(u_int32_t
) * MAX_AGGREGATE_ROUTE_RULES
)) == 0) {
5303 lck_rw_done(&necp_route_rule_lock
);
5304 return (existing_rule
->id
);
5308 MALLOC(new_rule
, struct necp_aggregate_route_rule
*, sizeof(struct necp_aggregate_route_rule
), M_NECP
, M_WAITOK
);
5309 if (new_rule
!= NULL
) {
5310 memset(new_rule
, 0, sizeof(struct necp_aggregate_route_rule
));
5311 aggregate_route_rule_id
= new_rule
->id
= necp_get_new_aggregate_route_rule_id();
5312 new_rule
->id
= aggregate_route_rule_id
;
5313 memcpy(new_rule
->rule_ids
, rule_ids
, (sizeof(u_int32_t
) * MAX_AGGREGATE_ROUTE_RULES
));
5314 LIST_INSERT_HEAD(&necp_aggregate_route_rules
, new_rule
, chain
);
5316 lck_rw_done(&necp_route_rule_lock
);
5318 return (aggregate_route_rule_id
);
5321 #define NECP_NULL_SERVICE_ID 1
5323 necp_get_new_uuid_id(void)
5325 u_int32_t newid
= 0;
5327 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5329 necp_last_uuid_id
++;
5330 if (necp_last_uuid_id
< (NECP_NULL_SERVICE_ID
+ 1)) {
5331 necp_last_uuid_id
= (NECP_NULL_SERVICE_ID
+ 1);
5334 newid
= necp_last_uuid_id
;
5336 NECPLOG0(LOG_DEBUG
, "Allocate uuid id failed.\n");
5343 static struct necp_uuid_id_mapping
*
5344 necp_uuid_lookup_app_id_locked(uuid_t uuid
)
5346 struct necp_uuid_id_mapping
*searchentry
= NULL
;
5347 struct necp_uuid_id_mapping
*foundentry
= NULL
;
5349 LIST_FOREACH(searchentry
, APPUUIDHASH(uuid
), chain
) {
5350 if (uuid_compare(searchentry
->uuid
, uuid
) == 0) {
5351 foundentry
= searchentry
;
5356 return (foundentry
);
5359 static struct necp_uuid_id_mapping
*
5360 necp_uuid_lookup_uuid_with_app_id_locked(u_int32_t local_id
)
5362 struct necp_uuid_id_mapping
*searchentry
= NULL
;
5363 struct necp_uuid_id_mapping
*foundentry
= NULL
;
5365 struct necp_uuid_id_mapping_head
*uuid_list_head
= NULL
;
5366 for (uuid_list_head
= &necp_uuid_app_id_hashtbl
[necp_uuid_app_id_hash_num_buckets
- 1]; uuid_list_head
>= necp_uuid_app_id_hashtbl
; uuid_list_head
--) {
5367 LIST_FOREACH(searchentry
, uuid_list_head
, chain
) {
5368 if (searchentry
->id
== local_id
) {
5369 foundentry
= searchentry
;
5375 return (foundentry
);
5379 necp_create_uuid_app_id_mapping(uuid_t uuid
, bool *allocated_mapping
, bool uuid_policy_table
)
5381 u_int32_t local_id
= 0;
5382 struct necp_uuid_id_mapping
*existing_mapping
= NULL
;
5384 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5386 if (allocated_mapping
) {
5387 *allocated_mapping
= FALSE
;
5390 existing_mapping
= necp_uuid_lookup_app_id_locked(uuid
);
5391 if (existing_mapping
!= NULL
) {
5392 local_id
= existing_mapping
->id
;
5393 existing_mapping
->refcount
++;
5394 if (uuid_policy_table
) {
5395 existing_mapping
->table_refcount
++;
5398 struct necp_uuid_id_mapping
*new_mapping
= NULL
;
5399 MALLOC(new_mapping
, struct necp_uuid_id_mapping
*, sizeof(*new_mapping
), M_NECP
, M_WAITOK
);
5400 if (new_mapping
!= NULL
) {
5401 uuid_copy(new_mapping
->uuid
, uuid
);
5402 new_mapping
->id
= necp_get_new_uuid_id();
5403 new_mapping
->refcount
= 1;
5404 if (uuid_policy_table
) {
5405 new_mapping
->table_refcount
= 1;
5407 new_mapping
->table_refcount
= 0;
5410 LIST_INSERT_HEAD(APPUUIDHASH(uuid
), new_mapping
, chain
);
5412 if (allocated_mapping
) {
5413 *allocated_mapping
= TRUE
;
5416 local_id
= new_mapping
->id
;
5424 necp_remove_uuid_app_id_mapping(uuid_t uuid
, bool *removed_mapping
, bool uuid_policy_table
)
5426 struct necp_uuid_id_mapping
*existing_mapping
= NULL
;
5428 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5430 if (removed_mapping
) {
5431 *removed_mapping
= FALSE
;
5434 existing_mapping
= necp_uuid_lookup_app_id_locked(uuid
);
5435 if (existing_mapping
!= NULL
) {
5436 if (uuid_policy_table
) {
5437 existing_mapping
->table_refcount
--;
5439 if (--existing_mapping
->refcount
== 0) {
5440 LIST_REMOVE(existing_mapping
, chain
);
5441 FREE(existing_mapping
, M_NECP
);
5442 if (removed_mapping
) {
5443 *removed_mapping
= TRUE
;
5452 static struct necp_uuid_id_mapping
*
5453 necp_uuid_get_null_service_id_mapping(void)
5455 static struct necp_uuid_id_mapping null_mapping
;
5456 uuid_clear(null_mapping
.uuid
);
5457 null_mapping
.id
= NECP_NULL_SERVICE_ID
;
5459 return (&null_mapping
);
5462 static struct necp_uuid_id_mapping
*
5463 necp_uuid_lookup_service_id_locked(uuid_t uuid
)
5465 struct necp_uuid_id_mapping
*searchentry
= NULL
;
5466 struct necp_uuid_id_mapping
*foundentry
= NULL
;
5468 if (uuid_is_null(uuid
)) {
5469 return necp_uuid_get_null_service_id_mapping();
5472 LIST_FOREACH(searchentry
, &necp_uuid_service_id_list
, chain
) {
5473 if (uuid_compare(searchentry
->uuid
, uuid
) == 0) {
5474 foundentry
= searchentry
;
5479 return (foundentry
);
5482 static struct necp_uuid_id_mapping
*
5483 necp_uuid_lookup_uuid_with_service_id_locked(u_int32_t local_id
)
5485 struct necp_uuid_id_mapping
*searchentry
= NULL
;
5486 struct necp_uuid_id_mapping
*foundentry
= NULL
;
5488 if (local_id
== NECP_NULL_SERVICE_ID
) {
5489 return necp_uuid_get_null_service_id_mapping();
5492 LIST_FOREACH(searchentry
, &necp_uuid_service_id_list
, chain
) {
5493 if (searchentry
->id
== local_id
) {
5494 foundentry
= searchentry
;
5499 return (foundentry
);
5503 necp_create_uuid_service_id_mapping(uuid_t uuid
)
5505 u_int32_t local_id
= 0;
5506 struct necp_uuid_id_mapping
*existing_mapping
= NULL
;
5508 if (uuid_is_null(uuid
)) {
5509 return (NECP_NULL_SERVICE_ID
);
5512 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5514 existing_mapping
= necp_uuid_lookup_service_id_locked(uuid
);
5515 if (existing_mapping
!= NULL
) {
5516 local_id
= existing_mapping
->id
;
5517 existing_mapping
->refcount
++;
5519 struct necp_uuid_id_mapping
*new_mapping
= NULL
;
5520 MALLOC(new_mapping
, struct necp_uuid_id_mapping
*, sizeof(*new_mapping
), M_NECP
, M_WAITOK
);
5521 if (new_mapping
!= NULL
) {
5522 uuid_copy(new_mapping
->uuid
, uuid
);
5523 new_mapping
->id
= necp_get_new_uuid_id();
5524 new_mapping
->refcount
= 1;
5526 LIST_INSERT_HEAD(&necp_uuid_service_id_list
, new_mapping
, chain
);
5528 local_id
= new_mapping
->id
;
5536 necp_remove_uuid_service_id_mapping(uuid_t uuid
)
5538 struct necp_uuid_id_mapping
*existing_mapping
= NULL
;
5540 if (uuid_is_null(uuid
)) {
5544 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5546 existing_mapping
= necp_uuid_lookup_app_id_locked(uuid
);
5547 if (existing_mapping
!= NULL
) {
5548 if (--existing_mapping
->refcount
== 0) {
5549 LIST_REMOVE(existing_mapping
, chain
);
5550 FREE(existing_mapping
, M_NECP
);
5560 necp_kernel_socket_policies_update_uuid_table(void)
5562 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5564 if (necp_uuid_app_id_mappings_dirty
) {
5565 if (proc_uuid_policy_kernel(PROC_UUID_POLICY_OPERATION_CLEAR
, NULL
, PROC_UUID_NECP_APP_POLICY
) < 0) {
5566 NECPLOG0(LOG_DEBUG
, "Error clearing uuids from policy table\n");
5570 if (necp_num_uuid_app_id_mappings
> 0) {
5571 struct necp_uuid_id_mapping_head
*uuid_list_head
= NULL
;
5572 for (uuid_list_head
= &necp_uuid_app_id_hashtbl
[necp_uuid_app_id_hash_num_buckets
- 1]; uuid_list_head
>= necp_uuid_app_id_hashtbl
; uuid_list_head
--) {
5573 struct necp_uuid_id_mapping
*mapping
= NULL
;
5574 LIST_FOREACH(mapping
, uuid_list_head
, chain
) {
5575 if (mapping
->table_refcount
> 0 &&
5576 proc_uuid_policy_kernel(PROC_UUID_POLICY_OPERATION_ADD
, mapping
->uuid
, PROC_UUID_NECP_APP_POLICY
) < 0) {
5577 NECPLOG0(LOG_DEBUG
, "Error adding uuid to policy table\n");
5583 necp_uuid_app_id_mappings_dirty
= FALSE
;
5589 #define NECP_KERNEL_VALID_IP_OUTPUT_CONDITIONS (NECP_KERNEL_CONDITION_ALL_INTERFACES | NECP_KERNEL_CONDITION_BOUND_INTERFACE | NECP_KERNEL_CONDITION_PROTOCOL | NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_POLICY_ID | NECP_KERNEL_CONDITION_LAST_INTERFACE)
5590 static necp_kernel_policy_id
5591 necp_kernel_ip_output_policy_add(necp_policy_id parent_policy_id
, necp_policy_order order
, necp_policy_order suborder
, u_int32_t session_order
, int session_pid
, u_int32_t condition_mask
, u_int32_t condition_negated_mask
, necp_kernel_policy_id cond_policy_id
, ifnet_t cond_bound_interface
, u_int32_t cond_last_interface_index
, u_int16_t cond_protocol
, union necp_sockaddr_union
*cond_local_start
, union necp_sockaddr_union
*cond_local_end
, u_int8_t cond_local_prefix
, union necp_sockaddr_union
*cond_remote_start
, union necp_sockaddr_union
*cond_remote_end
, u_int8_t cond_remote_prefix
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
)
5593 struct necp_kernel_ip_output_policy
*new_kernel_policy
= NULL
;
5594 struct necp_kernel_ip_output_policy
*tmp_kernel_policy
= NULL
;
5596 MALLOC_ZONE(new_kernel_policy
, struct necp_kernel_ip_output_policy
*, sizeof(*new_kernel_policy
), M_NECP_IP_POLICY
, M_WAITOK
);
5597 if (new_kernel_policy
== NULL
) {
5601 memset(new_kernel_policy
, 0, sizeof(*new_kernel_policy
)); // M_ZERO is not supported for MALLOC_ZONE
5602 new_kernel_policy
->parent_policy_id
= parent_policy_id
;
5603 new_kernel_policy
->id
= necp_kernel_policy_get_new_id(false);
5604 new_kernel_policy
->suborder
= suborder
;
5605 new_kernel_policy
->order
= order
;
5606 new_kernel_policy
->session_order
= session_order
;
5607 new_kernel_policy
->session_pid
= session_pid
;
5609 // Sanitize condition mask
5610 new_kernel_policy
->condition_mask
= (condition_mask
& NECP_KERNEL_VALID_IP_OUTPUT_CONDITIONS
);
5611 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
)) {
5612 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_BOUND_INTERFACE
;
5614 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
)) {
5615 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_LOCAL_PREFIX
;
5617 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
)) {
5618 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_REMOTE_PREFIX
;
5620 new_kernel_policy
->condition_negated_mask
= condition_negated_mask
& new_kernel_policy
->condition_mask
;
5622 // Set condition values
5623 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
) {
5624 new_kernel_policy
->cond_policy_id
= cond_policy_id
;
5626 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
5627 if (cond_bound_interface
) {
5628 ifnet_reference(cond_bound_interface
);
5630 new_kernel_policy
->cond_bound_interface
= cond_bound_interface
;
5632 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LAST_INTERFACE
) {
5633 new_kernel_policy
->cond_last_interface_index
= cond_last_interface_index
;
5635 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
5636 new_kernel_policy
->cond_protocol
= cond_protocol
;
5638 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
5639 memcpy(&new_kernel_policy
->cond_local_start
, cond_local_start
, cond_local_start
->sa
.sa_len
);
5641 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
5642 memcpy(&new_kernel_policy
->cond_local_end
, cond_local_end
, cond_local_end
->sa
.sa_len
);
5644 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
5645 new_kernel_policy
->cond_local_prefix
= cond_local_prefix
;
5647 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
5648 memcpy(&new_kernel_policy
->cond_remote_start
, cond_remote_start
, cond_remote_start
->sa
.sa_len
);
5650 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
5651 memcpy(&new_kernel_policy
->cond_remote_end
, cond_remote_end
, cond_remote_end
->sa
.sa_len
);
5653 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
5654 new_kernel_policy
->cond_remote_prefix
= cond_remote_prefix
;
5657 new_kernel_policy
->result
= result
;
5658 memcpy(&new_kernel_policy
->result_parameter
, &result_parameter
, sizeof(result_parameter
));
5661 NECPLOG(LOG_DEBUG
, "Added kernel policy: ip output, id=%d, mask=%x\n", new_kernel_policy
->id
, new_kernel_policy
->condition_mask
);
5663 LIST_INSERT_SORTED_THRICE_ASCENDING(&necp_kernel_ip_output_policies
, new_kernel_policy
, chain
, session_order
, order
, suborder
, tmp_kernel_policy
);
5665 return (new_kernel_policy
? new_kernel_policy
->id
: 0);
5668 static struct necp_kernel_ip_output_policy
*
5669 necp_kernel_ip_output_policy_find(necp_kernel_policy_id policy_id
)
5671 struct necp_kernel_ip_output_policy
*kernel_policy
= NULL
;
5672 struct necp_kernel_ip_output_policy
*tmp_kernel_policy
= NULL
;
5674 if (policy_id
== 0) {
5678 LIST_FOREACH_SAFE(kernel_policy
, &necp_kernel_ip_output_policies
, chain
, tmp_kernel_policy
) {
5679 if (kernel_policy
->id
== policy_id
) {
5680 return (kernel_policy
);
5688 necp_kernel_ip_output_policy_delete(necp_kernel_policy_id policy_id
)
5690 struct necp_kernel_ip_output_policy
*policy
= NULL
;
5692 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5694 policy
= necp_kernel_ip_output_policy_find(policy_id
);
5696 LIST_REMOVE(policy
, chain
);
5698 if (policy
->cond_bound_interface
) {
5699 ifnet_release(policy
->cond_bound_interface
);
5700 policy
->cond_bound_interface
= NULL
;
5703 FREE_ZONE(policy
, sizeof(*policy
), M_NECP_IP_POLICY
);
5711 necp_kernel_ip_output_policies_dump_all(void)
5714 struct necp_kernel_ip_output_policy
*policy
= NULL
;
5717 char result_string
[MAX_RESULT_STRING_LEN
];
5718 char proc_name_string
[MAXCOMLEN
+ 1];
5719 memset(result_string
, 0, MAX_RESULT_STRING_LEN
);
5720 memset(proc_name_string
, 0, MAXCOMLEN
+ 1);
5722 NECPLOG0(LOG_DEBUG
, "NECP IP Output Policies:\n");
5723 NECPLOG0(LOG_DEBUG
, "-----------\n");
5724 for (id_i
= 0; id_i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; id_i
++) {
5725 NECPLOG(LOG_DEBUG
, " ID Bucket: %d\n", id_i
);
5726 for (policy_i
= 0; necp_kernel_ip_output_policies_map
[id_i
] != NULL
&& (necp_kernel_ip_output_policies_map
[id_i
])[policy_i
] != NULL
; policy_i
++) {
5727 policy
= (necp_kernel_ip_output_policies_map
[id_i
])[policy_i
];
5728 proc_name(policy
->session_pid
, proc_name_string
, MAXCOMLEN
);
5729 NECPLOG(LOG_DEBUG
, "\t%3d. Policy ID: %5d\tProcess: %10.10s\tOrder: %04d.%04d.%d\tMask: %5x\tResult: %s\n", policy_i
, policy
->id
, proc_name_string
, policy
->session_order
, policy
->order
, policy
->suborder
, policy
->condition_mask
, necp_get_result_description(result_string
, policy
->result
, policy
->result_parameter
));
5731 NECPLOG0(LOG_DEBUG
, "-----------\n");
5737 necp_kernel_ip_output_policy_results_overlap(struct necp_kernel_ip_output_policy
*upper_policy
, struct necp_kernel_ip_output_policy
*lower_policy
)
5739 if (upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
5740 if (upper_policy
->session_order
!= lower_policy
->session_order
) {
5741 // A skip cannot override a policy of a different session
5744 if (upper_policy
->result_parameter
.skip_policy_order
== 0 ||
5745 lower_policy
->order
>= upper_policy
->result_parameter
.skip_policy_order
) {
5746 // This policy is beyond the skip
5749 // This policy is inside the skip
5755 // All other IP Output policy results (drop, tunnel, hard pass) currently overlap
5760 necp_kernel_ip_output_policy_is_unnecessary(struct necp_kernel_ip_output_policy
*policy
, struct necp_kernel_ip_output_policy
**policy_array
, int valid_indices
)
5762 bool can_skip
= FALSE
;
5763 u_int32_t highest_skip_session_order
= 0;
5764 u_int32_t highest_skip_order
= 0;
5766 for (i
= 0; i
< valid_indices
; i
++) {
5767 struct necp_kernel_ip_output_policy
*compared_policy
= policy_array
[i
];
5769 // For policies in a skip window, we can't mark conflicting policies as unnecessary
5771 if (highest_skip_session_order
!= compared_policy
->session_order
||
5772 (highest_skip_order
!= 0 && compared_policy
->order
>= highest_skip_order
)) {
5773 // If we've moved on to the next session, or passed the skip window
5774 highest_skip_session_order
= 0;
5775 highest_skip_order
= 0;
5778 // If this policy is also a skip, in can increase the skip window
5779 if (compared_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
5780 if (compared_policy
->result_parameter
.skip_policy_order
> highest_skip_order
) {
5781 highest_skip_order
= compared_policy
->result_parameter
.skip_policy_order
;
5788 if (compared_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
5789 // This policy is a skip. Set the skip window accordingly
5791 highest_skip_session_order
= compared_policy
->session_order
;
5792 highest_skip_order
= compared_policy
->result_parameter
.skip_policy_order
;
5795 // The result of the compared policy must be able to block out this policy result
5796 if (!necp_kernel_ip_output_policy_results_overlap(compared_policy
, policy
)) {
5800 // If new policy matches All Interfaces, compared policy must also
5801 if ((policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) && !(compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
)) {
5805 // Default makes lower policies unecessary always
5806 if (compared_policy
->condition_mask
== 0) {
5810 // Compared must be more general than policy, and include only conditions within policy
5811 if ((policy
->condition_mask
& compared_policy
->condition_mask
) != compared_policy
->condition_mask
) {
5815 // Negative conditions must match for the overlapping conditions
5816 if ((policy
->condition_negated_mask
& compared_policy
->condition_mask
) != (compared_policy
->condition_negated_mask
& compared_policy
->condition_mask
)) {
5820 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
&&
5821 compared_policy
->cond_policy_id
!= policy
->cond_policy_id
) {
5825 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
&&
5826 compared_policy
->cond_bound_interface
!= policy
->cond_bound_interface
) {
5830 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
&&
5831 compared_policy
->cond_protocol
!= policy
->cond_protocol
) {
5835 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
5836 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
5837 if (!necp_is_range_in_range((struct sockaddr
*)&policy
->cond_local_start
, (struct sockaddr
*)&policy
->cond_local_end
, (struct sockaddr
*)&compared_policy
->cond_local_start
, (struct sockaddr
*)&compared_policy
->cond_local_end
)) {
5840 } else if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
5841 if (compared_policy
->cond_local_prefix
> policy
->cond_local_prefix
||
5842 !necp_is_addr_in_subnet((struct sockaddr
*)&policy
->cond_local_start
, (struct sockaddr
*)&compared_policy
->cond_local_start
, compared_policy
->cond_local_prefix
)) {
5848 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
5849 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
5850 if (!necp_is_range_in_range((struct sockaddr
*)&policy
->cond_remote_start
, (struct sockaddr
*)&policy
->cond_remote_end
, (struct sockaddr
*)&compared_policy
->cond_remote_start
, (struct sockaddr
*)&compared_policy
->cond_remote_end
)) {
5853 } else if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
5854 if (compared_policy
->cond_remote_prefix
> policy
->cond_remote_prefix
||
5855 !necp_is_addr_in_subnet((struct sockaddr
*)&policy
->cond_remote_start
, (struct sockaddr
*)&compared_policy
->cond_remote_start
, compared_policy
->cond_remote_prefix
)) {
5868 necp_kernel_ip_output_policies_reprocess(void)
5871 int bucket_allocation_counts
[NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
];
5872 int bucket_current_free_index
[NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
];
5873 struct necp_kernel_ip_output_policy
*kernel_policy
= NULL
;
5875 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5878 necp_kernel_ip_output_policies_condition_mask
= 0;
5879 necp_kernel_ip_output_policies_count
= 0;
5880 necp_kernel_ip_output_policies_non_id_count
= 0;
5882 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
5883 if (necp_kernel_ip_output_policies_map
[i
] != NULL
) {
5884 FREE(necp_kernel_ip_output_policies_map
[i
], M_NECP
);
5885 necp_kernel_ip_output_policies_map
[i
] = NULL
;
5889 bucket_allocation_counts
[i
] = 0;
5892 LIST_FOREACH(kernel_policy
, &necp_kernel_ip_output_policies
, chain
) {
5894 necp_kernel_ip_output_policies_condition_mask
|= kernel_policy
->condition_mask
;
5895 necp_kernel_ip_output_policies_count
++;
5897 // Update bucket counts
5898 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
)) {
5899 necp_kernel_ip_output_policies_non_id_count
++;
5900 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
5901 bucket_allocation_counts
[i
]++;
5904 bucket_allocation_counts
[NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(kernel_policy
->cond_policy_id
)]++;
5908 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
5909 if (bucket_allocation_counts
[i
] > 0) {
5910 // Allocate a NULL-terminated array of policy pointers for each bucket
5911 MALLOC(necp_kernel_ip_output_policies_map
[i
], struct necp_kernel_ip_output_policy
**, sizeof(struct necp_kernel_ip_output_policy
*) * (bucket_allocation_counts
[i
] + 1), M_NECP
, M_WAITOK
);
5912 if (necp_kernel_ip_output_policies_map
[i
] == NULL
) {
5916 // Initialize the first entry to NULL
5917 (necp_kernel_ip_output_policies_map
[i
])[0] = NULL
;
5919 bucket_current_free_index
[i
] = 0;
5922 LIST_FOREACH(kernel_policy
, &necp_kernel_ip_output_policies
, chain
) {
5923 // Insert pointers into map
5924 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
)) {
5925 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
5926 if (!necp_kernel_ip_output_policy_is_unnecessary(kernel_policy
, necp_kernel_ip_output_policies_map
[i
], bucket_current_free_index
[i
])) {
5927 (necp_kernel_ip_output_policies_map
[i
])[(bucket_current_free_index
[i
])] = kernel_policy
;
5928 bucket_current_free_index
[i
]++;
5929 (necp_kernel_ip_output_policies_map
[i
])[(bucket_current_free_index
[i
])] = NULL
;
5933 i
= NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(kernel_policy
->cond_policy_id
);
5934 if (!necp_kernel_ip_output_policy_is_unnecessary(kernel_policy
, necp_kernel_ip_output_policies_map
[i
], bucket_current_free_index
[i
])) {
5935 (necp_kernel_ip_output_policies_map
[i
])[(bucket_current_free_index
[i
])] = kernel_policy
;
5936 bucket_current_free_index
[i
]++;
5937 (necp_kernel_ip_output_policies_map
[i
])[(bucket_current_free_index
[i
])] = NULL
;
5941 necp_kernel_ip_output_policies_dump_all();
5945 // Free memory, reset mask to 0
5946 necp_kernel_ip_output_policies_condition_mask
= 0;
5947 necp_kernel_ip_output_policies_count
= 0;
5948 necp_kernel_ip_output_policies_non_id_count
= 0;
5949 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
5950 if (necp_kernel_ip_output_policies_map
[i
] != NULL
) {
5951 FREE(necp_kernel_ip_output_policies_map
[i
], M_NECP
);
5952 necp_kernel_ip_output_policies_map
[i
] = NULL
;
5958 // Outbound Policy Matching
5959 // ---------------------
5965 static struct substring
5966 necp_trim_dots_and_stars(char *string
, size_t length
)
5968 struct substring sub
;
5969 sub
.string
= string
;
5970 sub
.length
= string
? length
: 0;
5972 while (sub
.length
&& (sub
.string
[0] == '.' || sub
.string
[0] == '*')) {
5977 while (sub
.length
&& (sub
.string
[sub
.length
- 1] == '.' || sub
.string
[sub
.length
- 1] == '*')) {
5985 necp_create_trimmed_domain(char *string
, size_t length
)
5987 char *trimmed_domain
= NULL
;
5988 struct substring sub
= necp_trim_dots_and_stars(string
, length
);
5990 MALLOC(trimmed_domain
, char *, sub
.length
+ 1, M_NECP
, M_WAITOK
);
5991 if (trimmed_domain
== NULL
) {
5995 memcpy(trimmed_domain
, sub
.string
, sub
.length
);
5996 trimmed_domain
[sub
.length
] = 0;
5998 return (trimmed_domain
);
6002 necp_count_dots(char *string
, size_t length
)
6007 for (i
= 0; i
< length
; i
++) {
6008 if (string
[i
] == '.') {
6017 necp_check_suffix(struct substring parent
, struct substring suffix
, bool require_dot_before_suffix
)
6019 if (parent
.length
<= suffix
.length
) {
6023 size_t length_difference
= (parent
.length
- suffix
.length
);
6025 if (require_dot_before_suffix
) {
6026 if (((char *)(parent
.string
+ length_difference
- 1))[0] != '.') {
6031 // strncasecmp does case-insensitive check for all UTF-8 strings (ignores non-ASCII characters)
6032 return (strncasecmp(parent
.string
+ length_difference
, suffix
.string
, suffix
.length
) == 0);
6036 necp_hostname_matches_domain(struct substring hostname_substring
, u_int8_t hostname_dot_count
, char *domain
, u_int8_t domain_dot_count
)
6038 if (hostname_substring
.string
== NULL
|| domain
== NULL
) {
6039 return (hostname_substring
.string
== domain
);
6042 struct substring domain_substring
;
6043 domain_substring
.string
= domain
;
6044 domain_substring
.length
= strlen(domain
);
6046 if (hostname_dot_count
== domain_dot_count
) {
6047 // strncasecmp does case-insensitive check for all UTF-8 strings (ignores non-ASCII characters)
6048 if (hostname_substring
.length
== domain_substring
.length
&&
6049 strncasecmp(hostname_substring
.string
, domain_substring
.string
, hostname_substring
.length
) == 0) {
6052 } else if (domain_dot_count
< hostname_dot_count
) {
6053 if (necp_check_suffix(hostname_substring
, domain_substring
, TRUE
)) {
6062 necp_copy_string(char *string
, size_t length
)
6064 char *copied_string
= NULL
;
6066 MALLOC(copied_string
, char *, length
+ 1, M_NECP
, M_WAITOK
);
6067 if (copied_string
== NULL
) {
6071 memcpy(copied_string
, string
, length
);
6072 copied_string
[length
] = 0;
6074 return (copied_string
);
6078 necp_get_parent_cred_result(proc_t proc
, struct necp_socket_info
*info
)
6080 task_t task
= proc_task(proc
? proc
: current_proc());
6081 coalition_t coal
= COALITION_NULL
;
6082 Boolean is_leader
= coalition_is_leader(task
, COALITION_TYPE_JETSAM
, &coal
);
6084 if (is_leader
== TRUE
) {
6085 // No parent, nothing to do
6090 task_t lead_task
= coalition_get_leader(coal
);
6091 if (lead_task
!= NULL
) {
6092 proc_t lead_proc
= get_bsdtask_info(lead_task
);
6093 if (lead_proc
!= NULL
) {
6094 kauth_cred_t lead_cred
= kauth_cred_proc_ref(lead_proc
);
6095 if (lead_cred
!= NULL
) {
6096 errno_t cred_result
= priv_check_cred(lead_cred
, PRIV_NET_PRIVILEGED_NECP_MATCH
, 0);
6097 kauth_cred_unref(&lead_cred
);
6098 info
->cred_result
= cred_result
;
6101 task_deallocate(lead_task
);
6106 #define NECP_KERNEL_ADDRESS_TYPE_CONDITIONS (NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX)
6108 necp_application_fillout_info_locked(uuid_t application_uuid
, uuid_t real_application_uuid
, char *account
, char *domain
, pid_t pid
, uid_t uid
, u_int16_t protocol
, u_int32_t bound_interface_index
, u_int32_t traffic_class
, union necp_sockaddr_union
*local_addr
, union necp_sockaddr_union
*remote_addr
, proc_t proc
, struct necp_socket_info
*info
)
6110 memset(info
, 0, sizeof(struct necp_socket_info
));
6114 info
->protocol
= protocol
;
6115 info
->bound_interface_index
= bound_interface_index
;
6116 info
->traffic_class
= traffic_class
;
6118 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
&& proc
!= NULL
) {
6119 info
->cred_result
= priv_check_cred(proc_ucred(proc
), PRIV_NET_PRIVILEGED_NECP_MATCH
, 0);
6120 if (info
->cred_result
!= 0) {
6121 // Process does not have entitlement, check the parent process
6122 necp_get_parent_cred_result(proc
, info
);
6126 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_APP_ID
&& !uuid_is_null(application_uuid
)) {
6127 struct necp_uuid_id_mapping
*existing_mapping
= necp_uuid_lookup_app_id_locked(application_uuid
);
6128 if (existing_mapping
) {
6129 info
->application_id
= existing_mapping
->id
;
6133 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
&& !uuid_is_null(real_application_uuid
)) {
6134 if (uuid_compare(application_uuid
, real_application_uuid
) == 0) {
6135 info
->real_application_id
= info
->application_id
;
6137 struct necp_uuid_id_mapping
*existing_mapping
= necp_uuid_lookup_app_id_locked(real_application_uuid
);
6138 if (existing_mapping
) {
6139 info
->real_application_id
= existing_mapping
->id
;
6144 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
&& account
!= NULL
) {
6145 struct necp_string_id_mapping
*existing_mapping
= necp_lookup_string_to_id_locked(&necp_account_id_list
, account
);
6146 if (existing_mapping
) {
6147 info
->account_id
= existing_mapping
->id
;
6151 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
6152 info
->domain
= domain
;
6155 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_ADDRESS_TYPE_CONDITIONS
) {
6156 if (local_addr
&& local_addr
->sa
.sa_len
> 0) {
6157 memcpy(&info
->local_addr
, local_addr
, local_addr
->sa
.sa_len
);
6159 if (remote_addr
&& remote_addr
->sa
.sa_len
> 0) {
6160 memcpy(&info
->remote_addr
, remote_addr
, remote_addr
->sa
.sa_len
);
6166 necp_send_application_interface_denied_event(pid_t pid
, uuid_t proc_uuid
, u_int32_t if_functional_type
)
6168 struct kev_netpolicy_ifdenied ev_ifdenied
;
6170 bzero(&ev_ifdenied
, sizeof(ev_ifdenied
));
6172 ev_ifdenied
.ev_data
.epid
= pid
;
6173 uuid_copy(ev_ifdenied
.ev_data
.euuid
, proc_uuid
);
6174 ev_ifdenied
.ev_if_functional_type
= if_functional_type
;
6176 netpolicy_post_msg(KEV_NETPOLICY_IFDENIED
, &ev_ifdenied
.ev_data
, sizeof(ev_ifdenied
));
6179 extern char *proc_name_address(void *p
);
6181 #define NECP_VERIFY_DELEGATION_ENTITLEMENT(_p, _d) \
6182 if (!has_checked_delegation_entitlement) { \
6183 has_delegation_entitlement = (priv_check_cred(proc_ucred(_p), PRIV_NET_PRIVILEGED_SOCKET_DELEGATE, 0) == 0); \
6184 has_checked_delegation_entitlement = TRUE; \
6186 if (!has_delegation_entitlement) { \
6187 NECPLOG(LOG_ERR, "%s(%d) does not hold the necessary entitlement to delegate network traffic for other processes by %s", \
6188 proc_name_address(_p), proc_pid(_p), _d); \
6193 necp_application_find_policy_match_internal(proc_t proc
,
6194 u_int8_t
*parameters
,
6195 u_int32_t parameters_size
,
6196 struct necp_aggregate_result
*returned_result
,
6198 u_int required_interface_index
,
6199 const union necp_sockaddr_union
*override_local_addr
,
6200 const union necp_sockaddr_union
*override_remote_addr
,
6201 struct rtentry
**returned_route
, bool ignore_address
)
6206 struct necp_kernel_socket_policy
*matched_policy
= NULL
;
6207 struct necp_socket_info info
;
6208 necp_kernel_policy_filter filter_control_unit
= 0;
6209 u_int32_t route_rule_id
= 0;
6210 necp_kernel_policy_result service_action
= 0;
6211 necp_kernel_policy_service service
= { 0, 0 };
6213 u_int16_t protocol
= 0;
6214 u_int32_t bound_interface_index
= required_interface_index
;
6215 u_int32_t traffic_class
= 0;
6216 u_int32_t client_flags
= 0;
6217 union necp_sockaddr_union local_addr
;
6218 union necp_sockaddr_union remote_addr
;
6219 bool no_remote_addr
= FALSE
;
6220 u_int8_t remote_family
= 0;
6221 bool no_local_addr
= FALSE
;
6223 if (override_local_addr
) {
6224 memcpy(&local_addr
, override_local_addr
, sizeof(local_addr
));
6226 memset(&local_addr
, 0, sizeof(local_addr
));
6228 if (override_remote_addr
) {
6229 memcpy(&remote_addr
, override_remote_addr
, sizeof(remote_addr
));
6231 memset(&remote_addr
, 0, sizeof(remote_addr
));
6234 // Initialize UID, PID, and UUIDs to the current process
6235 uid_t uid
= kauth_cred_getuid(proc_ucred(proc
));
6236 pid_t pid
= proc_pid(proc
);
6237 uuid_t application_uuid
;
6238 uuid_clear(application_uuid
);
6239 uuid_t real_application_uuid
;
6240 uuid_clear(real_application_uuid
);
6241 proc_getexecutableuuid(proc
, real_application_uuid
, sizeof(real_application_uuid
));
6242 uuid_copy(application_uuid
, real_application_uuid
);
6244 char *domain
= NULL
;
6245 char *account
= NULL
;
6247 u_int32_t netagent_ids
[NECP_MAX_NETAGENTS
];
6248 memset(&netagent_ids
, 0, sizeof(netagent_ids
));
6249 int netagent_cursor
;
6251 bool has_checked_delegation_entitlement
= FALSE
;
6252 bool has_delegation_entitlement
= FALSE
;
6254 if (returned_result
== NULL
) {
6258 memset(returned_result
, 0, sizeof(struct necp_aggregate_result
));
6260 lck_rw_lock_shared(&necp_kernel_policy_lock
);
6261 if (necp_kernel_application_policies_count
== 0) {
6262 if (necp_drop_all_order
> 0) {
6263 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
6264 lck_rw_done(&necp_kernel_policy_lock
);
6268 lck_rw_done(&necp_kernel_policy_lock
);
6270 while ((offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
)) <= parameters_size
) {
6271 u_int8_t type
= necp_buffer_get_tlv_type(parameters
, offset
);
6272 u_int32_t length
= necp_buffer_get_tlv_length(parameters
, offset
);
6274 if (length
> (parameters_size
- (offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
)))) {
6275 // If the length is larger than what can fit in the remaining parameters size, bail
6276 NECPLOG(LOG_ERR
, "Invalid TLV length (%u)", length
);
6281 u_int8_t
*value
= necp_buffer_get_tlv_value(parameters
, offset
, NULL
);
6282 if (value
!= NULL
) {
6284 case NECP_CLIENT_PARAMETER_APPLICATION
: {
6285 if (length
>= sizeof(uuid_t
)) {
6286 if (uuid_compare(application_uuid
, value
) == 0) {
6291 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc
, "euuid");
6293 uuid_copy(application_uuid
, value
);
6297 case NECP_CLIENT_PARAMETER_REAL_APPLICATION
: {
6298 if (length
>= sizeof(uuid_t
)) {
6299 if (uuid_compare(real_application_uuid
, value
) == 0) {
6304 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc
, "uuid");
6306 uuid_copy(real_application_uuid
, value
);
6310 case NECP_CLIENT_PARAMETER_PID
: {
6311 if (length
>= sizeof(pid_t
)) {
6312 if (memcmp(&pid
, value
, sizeof(pid_t
)) == 0) {
6317 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc
, "pid");
6319 memcpy(&pid
, value
, sizeof(pid_t
));
6323 case NECP_CLIENT_PARAMETER_UID
: {
6324 if (length
>= sizeof(uid_t
)) {
6325 if (memcmp(&uid
, value
, sizeof(uid_t
)) == 0) {
6330 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc
, "uid");
6332 memcpy(&uid
, value
, sizeof(uid_t
));
6336 case NECP_CLIENT_PARAMETER_DOMAIN
: {
6337 domain
= (char *)value
;
6338 domain
[length
- 1] = 0;
6341 case NECP_CLIENT_PARAMETER_ACCOUNT
: {
6342 account
= (char *)value
;
6343 account
[length
- 1] = 0;
6346 case NECP_CLIENT_PARAMETER_TRAFFIC_CLASS
: {
6347 if (length
>= sizeof(u_int32_t
)) {
6348 memcpy(&traffic_class
, value
, sizeof(u_int32_t
));
6352 case NECP_CLIENT_PARAMETER_IP_PROTOCOL
: {
6353 if (length
>= sizeof(u_int16_t
)) {
6354 memcpy(&protocol
, value
, sizeof(u_int16_t
));
6358 case NECP_CLIENT_PARAMETER_BOUND_INTERFACE
: {
6359 if (length
<= IFXNAMSIZ
&& length
> 0) {
6360 ifnet_t bound_interface
= NULL
;
6361 char interface_name
[IFXNAMSIZ
];
6362 memcpy(interface_name
, value
, length
);
6363 interface_name
[length
- 1] = 0; // Make sure the string is NULL terminated
6364 if (ifnet_find_by_name(interface_name
, &bound_interface
) == 0) {
6365 bound_interface_index
= bound_interface
->if_index
;
6366 ifnet_release(bound_interface
);
6371 case NECP_CLIENT_PARAMETER_LOCAL_ADDRESS
: {
6372 if (ignore_address
) {
6376 if (length
>= sizeof(struct necp_policy_condition_addr
)) {
6377 struct necp_policy_condition_addr
*address_struct
= (struct necp_policy_condition_addr
*)(void *)value
;
6378 if (necp_address_is_valid(&address_struct
->address
.sa
)) {
6379 memcpy(&local_addr
, &address_struct
->address
, sizeof(address_struct
->address
));
6384 case NECP_CLIENT_PARAMETER_REMOTE_ADDRESS
: {
6385 if (ignore_address
) {
6389 if (length
>= sizeof(struct necp_policy_condition_addr
)) {
6390 struct necp_policy_condition_addr
*address_struct
= (struct necp_policy_condition_addr
*)(void *)value
;
6391 if (necp_address_is_valid(&address_struct
->address
.sa
)) {
6392 memcpy(&remote_addr
, &address_struct
->address
, sizeof(address_struct
->address
));
6397 case NECP_CLIENT_PARAMETER_FLAGS
: {
6398 if (length
>= sizeof(client_flags
)) {
6399 memcpy(&client_flags
, value
, sizeof(client_flags
));
6409 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
6413 lck_rw_lock_shared(&necp_kernel_policy_lock
);
6415 necp_application_fillout_info_locked(application_uuid
, real_application_uuid
, account
, domain
, pid
, uid
, protocol
, bound_interface_index
, traffic_class
, &local_addr
, &remote_addr
, proc
, &info
);
6416 matched_policy
= necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_app_layer_map
, &info
, &filter_control_unit
, &route_rule_id
, &service_action
, &service
, netagent_ids
, NECP_MAX_NETAGENTS
, proc
);
6417 if (matched_policy
) {
6418 returned_result
->policy_id
= matched_policy
->id
;
6419 returned_result
->routing_result
= matched_policy
->result
;
6420 memcpy(&returned_result
->routing_result_parameter
, &matched_policy
->result_parameter
, sizeof(returned_result
->routing_result_parameter
));
6421 } else if (necp_drop_all_order
> 0) {
6422 // Mark socket as a drop if drop_all is set
6423 returned_result
->policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
6424 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
6426 returned_result
->policy_id
= 0;
6427 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_NONE
;
6429 returned_result
->filter_control_unit
= filter_control_unit
;
6430 returned_result
->service_action
= service_action
;
6432 // Handle trigger service
6433 if (service
.identifier
!= 0) {
6434 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(service
.identifier
);
6435 if (mapping
!= NULL
) {
6436 struct necp_service_registration
*service_registration
= NULL
;
6437 uuid_copy(returned_result
->service_uuid
, mapping
->uuid
);
6438 returned_result
->service_data
= service
.data
;
6439 if (service
.identifier
== NECP_NULL_SERVICE_ID
) {
6440 // NULL service is always 'registered'
6441 returned_result
->service_flags
|= NECP_SERVICE_FLAGS_REGISTERED
;
6443 LIST_FOREACH(service_registration
, &necp_registered_service_list
, kernel_chain
) {
6444 if (service
.identifier
== service_registration
->service_id
) {
6445 returned_result
->service_flags
|= NECP_SERVICE_FLAGS_REGISTERED
;
6454 for (netagent_cursor
= 0; netagent_cursor
< NECP_MAX_NETAGENTS
; netagent_cursor
++) {
6455 struct necp_uuid_id_mapping
*mapping
= NULL
;
6456 u_int32_t netagent_id
= netagent_ids
[netagent_cursor
];
6457 if (netagent_id
== 0) {
6460 mapping
= necp_uuid_lookup_uuid_with_service_id_locked(netagent_id
);
6461 if (mapping
!= NULL
) {
6462 uuid_copy(returned_result
->netagents
[netagent_cursor
], mapping
->uuid
);
6463 returned_result
->netagent_flags
[netagent_cursor
] = netagent_get_flags(mapping
->uuid
);
6467 // Do routing evaluation
6468 u_int output_bound_interface
= bound_interface_index
;
6469 if (returned_result
->routing_result
== NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED
) {
6470 output_bound_interface
= returned_result
->routing_result_parameter
.scoped_interface_index
;
6471 } else if (returned_result
->routing_result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
) {
6472 output_bound_interface
= returned_result
->routing_result_parameter
.tunnel_interface_index
;
6475 if (local_addr
.sa
.sa_len
== 0 ||
6476 (local_addr
.sa
.sa_family
== AF_INET
&& local_addr
.sin
.sin_addr
.s_addr
== 0) ||
6477 (local_addr
.sa
.sa_family
== AF_INET6
&& IN6_IS_ADDR_UNSPECIFIED(&local_addr
.sin6
.sin6_addr
))) {
6478 no_local_addr
= TRUE
;
6481 if (remote_addr
.sa
.sa_len
== 0 ||
6482 (remote_addr
.sa
.sa_family
== AF_INET
&& remote_addr
.sin
.sin_addr
.s_addr
== 0) ||
6483 (remote_addr
.sa
.sa_family
== AF_INET6
&& IN6_IS_ADDR_UNSPECIFIED(&remote_addr
.sin6
.sin6_addr
))) {
6484 no_remote_addr
= TRUE
;
6485 remote_family
= remote_addr
.sa
.sa_family
;
6488 returned_result
->routed_interface_index
= 0;
6489 struct rtentry
*rt
= NULL
;
6490 if (!no_local_addr
&& (client_flags
& NECP_CLIENT_PARAMETER_FLAG_LISTENER
) != 0) {
6491 // Treat the output bound interface as the routed interface for local address
6492 // validation later.
6493 returned_result
->routed_interface_index
= output_bound_interface
;
6495 if (no_remote_addr
) {
6496 memset(&remote_addr
, 0, sizeof(remote_addr
));
6497 if (remote_family
== AF_INET6
) {
6498 // Reset address to ::
6499 remote_addr
.sa
.sa_family
= AF_INET6
;
6500 remote_addr
.sa
.sa_len
= sizeof(struct sockaddr_in6
);
6502 // Reset address to 0.0.0.0
6503 remote_addr
.sa
.sa_family
= AF_INET
;
6504 remote_addr
.sa
.sa_len
= sizeof(struct sockaddr_in
);
6508 rt
= rtalloc1_scoped((struct sockaddr
*)&remote_addr
, 0, 0,
6509 output_bound_interface
);
6511 if (no_remote_addr
&& remote_family
== 0 &&
6512 (rt
== NULL
|| rt
->rt_ifp
== NULL
)) {
6513 // Route lookup for default IPv4 failed, try IPv6
6515 // Cleanup old route if necessary
6521 // Reset address to ::
6522 memset(&remote_addr
, 0, sizeof(remote_addr
));
6523 remote_addr
.sa
.sa_family
= AF_INET6
;
6524 remote_addr
.sa
.sa_len
= sizeof(struct sockaddr_in6
);
6527 rt
= rtalloc1_scoped((struct sockaddr
*)&remote_addr
, 0, 0,
6528 output_bound_interface
);
6532 rt
->rt_ifp
!= NULL
) {
6533 returned_result
->routed_interface_index
= rt
->rt_ifp
->if_index
;
6535 * For local addresses, we allow the interface scope to be
6536 * either the loopback interface or the interface hosting the
6539 if (bound_interface_index
!= IFSCOPE_NONE
&&
6540 rt
->rt_ifa
!= NULL
&& rt
->rt_ifa
->ifa_ifp
&&
6541 (output_bound_interface
== lo_ifp
->if_index
||
6542 rt
->rt_ifp
->if_index
== lo_ifp
->if_index
||
6543 rt
->rt_ifa
->ifa_ifp
->if_index
== bound_interface_index
)) {
6544 struct sockaddr_storage dst
;
6545 unsigned int ifscope
= bound_interface_index
;
6548 * Transform dst into the internal routing table form
6550 (void) sa_copy((struct sockaddr
*)&remote_addr
,
6553 if ((rt
->rt_ifp
->if_index
== lo_ifp
->if_index
) ||
6554 rt_ifa_is_dst((struct sockaddr
*)&dst
, rt
->rt_ifa
))
6555 returned_result
->routed_interface_index
=
6556 bound_interface_index
;
6561 if (returned_result
->routed_interface_index
!= 0 &&
6562 returned_result
->routed_interface_index
!= lo_ifp
->if_index
&& // Loopback can accept any local address
6565 // Transform local_addr into the ifaddr form
6566 // IPv6 Scope IDs are always embedded in the ifaddr list
6567 struct sockaddr_storage local_address_sanitized
;
6568 u_int ifscope
= IFSCOPE_NONE
;
6569 (void)sa_copy(&local_addr
.sa
, &local_address_sanitized
, &ifscope
);
6570 SIN(&local_address_sanitized
)->sin_port
= 0;
6571 if (local_address_sanitized
.ss_family
== AF_INET6
) {
6572 SIN6(&local_address_sanitized
)->sin6_scope_id
= 0;
6575 // Validate local address on routed interface
6576 struct ifaddr
*ifa
= ifa_ifwithaddr_scoped((struct sockaddr
*)&local_address_sanitized
, returned_result
->routed_interface_index
);
6578 // Interface address not found, reject route
6579 returned_result
->routed_interface_index
= 0;
6585 ifaddr_release(ifa
);
6590 if (flags
!= NULL
) {
6591 if ((client_flags
& NECP_CLIENT_PARAMETER_FLAG_LISTENER
) == 0) {
6592 // Check for local/direct
6593 bool is_local
= FALSE
;
6594 if (rt
!= NULL
&& (rt
->rt_flags
& RTF_LOCAL
)) {
6596 } else if (returned_result
->routed_interface_index
!= 0 &&
6598 // Clean up the address before comparison with interface addresses
6600 // Transform remote_addr into the ifaddr form
6601 // IPv6 Scope IDs are always embedded in the ifaddr list
6602 struct sockaddr_storage remote_address_sanitized
;
6603 u_int ifscope
= IFSCOPE_NONE
;
6604 (void)sa_copy(&remote_addr
.sa
, &remote_address_sanitized
, &ifscope
);
6605 SIN(&remote_address_sanitized
)->sin_port
= 0;
6606 if (remote_address_sanitized
.ss_family
== AF_INET6
) {
6607 SIN6(&remote_address_sanitized
)->sin6_scope_id
= 0;
6610 // Check if remote address is an interface address
6611 struct ifaddr
*ifa
= ifa_ifwithaddr((struct sockaddr
*)&remote_address_sanitized
);
6612 if (ifa
!= NULL
&& ifa
->ifa_ifp
!= NULL
) {
6613 u_int if_index_for_remote_addr
= ifa
->ifa_ifp
->if_index
;
6614 if (if_index_for_remote_addr
== returned_result
->routed_interface_index
||
6615 if_index_for_remote_addr
== lo_ifp
->if_index
) {
6620 ifaddr_release(ifa
);
6626 *flags
|= (NECP_CLIENT_RESULT_FLAG_IS_LOCAL
| NECP_CLIENT_RESULT_FLAG_IS_DIRECT
);
6629 !(rt
->rt_flags
& RTF_GATEWAY
) &&
6630 (rt
->rt_ifa
&& rt
->rt_ifa
->ifa_ifp
&& !(rt
->rt_ifa
->ifa_ifp
->if_flags
& IFF_POINTOPOINT
))) {
6631 // Route is directly accessible
6632 *flags
|= NECP_CLIENT_RESULT_FLAG_IS_DIRECT
;
6637 rt
->rt_ifp
!= NULL
) {
6638 // Check probe status
6639 if (rt
->rt_ifp
->if_eflags
& IFEF_PROBE_CONNECTIVITY
) {
6640 *flags
|= NECP_CLIENT_RESULT_FLAG_PROBE_CONNECTIVITY
;
6643 if (rt
->rt_ifp
->if_type
== IFT_CELLULAR
) {
6644 struct if_cellular_status_v1
*ifsr
;
6646 ifnet_lock_shared(rt
->rt_ifp
);
6647 lck_rw_lock_exclusive(&rt
->rt_ifp
->if_link_status_lock
);
6649 if (rt
->rt_ifp
->if_link_status
!= NULL
) {
6650 ifsr
= &rt
->rt_ifp
->if_link_status
->ifsr_u
.ifsr_cell
.if_cell_u
.if_status_v1
;
6652 if (ifsr
->valid_bitmask
& IF_CELL_UL_MSS_RECOMMENDED_VALID
) {
6653 if (ifsr
->mss_recommended
== IF_CELL_UL_MSS_RECOMMENDED_NONE
) {
6654 returned_result
->mss_recommended
= NECP_CLIENT_RESULT_RECOMMENDED_MSS_NONE
;
6655 } else if (ifsr
->mss_recommended
== IF_CELL_UL_MSS_RECOMMENDED_MEDIUM
) {
6656 returned_result
->mss_recommended
= NECP_CLIENT_RESULT_RECOMMENDED_MSS_MEDIUM
;
6657 } else if (ifsr
->mss_recommended
== IF_CELL_UL_MSS_RECOMMENDED_LOW
) {
6658 returned_result
->mss_recommended
= NECP_CLIENT_RESULT_RECOMMENDED_MSS_LOW
;
6662 lck_rw_done(&rt
->rt_ifp
->if_link_status_lock
);
6663 ifnet_lock_done(rt
->rt_ifp
);
6666 // Check link quality
6667 if ((client_flags
& NECP_CLIENT_PARAMETER_FLAG_DISCRETIONARY
) &&
6668 (rt
->rt_ifp
->if_interface_state
.valid_bitmask
& IF_INTERFACE_STATE_LQM_STATE_VALID
) &&
6669 rt
->rt_ifp
->if_interface_state
.lqm_state
== IFNET_LQM_THRESH_ABORT
) {
6670 *flags
|= NECP_CLIENT_RESULT_FLAG_LINK_QUALITY_ABORT
;
6673 // Check QoS marking (fastlane)
6674 if (necp_update_qos_marking(rt
->rt_ifp
, route_rule_id
)) {
6675 *flags
|= NECP_CLIENT_RESULT_FLAG_ALLOW_QOS_MARKING
;
6680 if (returned_result
->routed_interface_index
!= 0) {
6681 union necp_sockaddr_union default_address
;
6682 struct rtentry
*v4Route
= NULL
;
6683 struct rtentry
*v6Route
= NULL
;
6685 memset(&default_address
, 0, sizeof(default_address
));
6687 // Reset address to 0.0.0.0
6688 default_address
.sa
.sa_family
= AF_INET
;
6689 default_address
.sa
.sa_len
= sizeof(struct sockaddr_in
);
6690 v4Route
= rtalloc1_scoped((struct sockaddr
*)&default_address
, 0, 0,
6691 returned_result
->routed_interface_index
);
6693 // Reset address to ::
6694 default_address
.sa
.sa_family
= AF_INET6
;
6695 default_address
.sa
.sa_len
= sizeof(struct sockaddr_in6
);
6696 v6Route
= rtalloc1_scoped((struct sockaddr
*)&default_address
, 0, 0,
6697 returned_result
->routed_interface_index
);
6699 if (v4Route
!= NULL
) {
6700 if (v4Route
->rt_ifp
!= NULL
) {
6701 *flags
|= NECP_CLIENT_RESULT_FLAG_HAS_IPV4
;
6707 if (v6Route
!= NULL
) {
6708 if (v6Route
->rt_ifp
!= NULL
) {
6709 *flags
|= NECP_CLIENT_RESULT_FLAG_HAS_IPV6
;
6717 u_int32_t interface_type_denied
= IFRTYPE_FUNCTIONAL_UNKNOWN
;
6718 bool route_is_allowed
= necp_route_is_allowed(rt
, NULL
, route_rule_id
, &interface_type_denied
);
6719 if (!route_is_allowed
) {
6720 // If the route is blocked, treat the lookup as a drop
6721 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
6722 memset(&returned_result
->routing_result_parameter
, 0, sizeof(returned_result
->routing_result_parameter
));
6724 if (interface_type_denied
!= IFRTYPE_FUNCTIONAL_UNKNOWN
) {
6725 necp_send_application_interface_denied_event(pid
, application_uuid
, interface_type_denied
);
6730 if (returned_route
!= NULL
) {
6731 *returned_route
= rt
;
6738 lck_rw_done(&necp_kernel_policy_lock
);
6744 necp_socket_check_policy(struct necp_kernel_socket_policy
*kernel_policy
, necp_app_id app_id
, necp_app_id real_app_id
, errno_t cred_result
, u_int32_t account_id
, struct substring domain
, u_int8_t domain_dot_count
, pid_t pid
, uid_t uid
, u_int32_t bound_interface_index
, u_int32_t traffic_class
, u_int16_t protocol
, union necp_sockaddr_union
*local
, union necp_sockaddr_union
*remote
, proc_t proc
)
6746 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
)) {
6747 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
6748 u_int32_t cond_bound_interface_index
= kernel_policy
->cond_bound_interface
? kernel_policy
->cond_bound_interface
->if_index
: 0;
6749 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
6750 if (bound_interface_index
== cond_bound_interface_index
) {
6751 // No match, matches forbidden interface
6755 if (bound_interface_index
!= cond_bound_interface_index
) {
6756 // No match, does not match required interface
6761 if (bound_interface_index
!= 0) {
6762 // No match, requires a non-bound packet
6768 if (kernel_policy
->condition_mask
== 0) {
6772 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
6773 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
6774 if (app_id
== kernel_policy
->cond_app_id
) {
6775 // No match, matches forbidden application
6779 if (app_id
!= kernel_policy
->cond_app_id
) {
6780 // No match, does not match required application
6786 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
6787 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
6788 if (real_app_id
== kernel_policy
->cond_real_app_id
) {
6789 // No match, matches forbidden application
6793 if (real_app_id
!= kernel_policy
->cond_real_app_id
) {
6794 // No match, does not match required application
6800 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
) {
6801 if (cred_result
!= 0) {
6802 // Process is missing entitlement
6807 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
) {
6808 if (kernel_policy
->cond_custom_entitlement_matched
== necp_boolean_state_false
) {
6809 // Process is missing entitlement based on previous check
6811 } else if (kernel_policy
->cond_custom_entitlement_matched
== necp_boolean_state_unknown
) {
6812 if (kernel_policy
->cond_custom_entitlement
!= NULL
) {
6814 // No process found, cannot check entitlement
6817 task_t task
= proc_task(proc
);
6819 !IOTaskHasEntitlement(task
, kernel_policy
->cond_custom_entitlement
)) {
6820 // Process is missing custom entitlement
6821 kernel_policy
->cond_custom_entitlement_matched
= necp_boolean_state_false
;
6824 kernel_policy
->cond_custom_entitlement_matched
= necp_boolean_state_true
;
6830 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
6831 bool domain_matches
= necp_hostname_matches_domain(domain
, domain_dot_count
, kernel_policy
->cond_domain
, kernel_policy
->cond_domain_dot_count
);
6832 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
6833 if (domain_matches
) {
6834 // No match, matches forbidden domain
6838 if (!domain_matches
) {
6839 // No match, does not match required domain
6845 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
) {
6846 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
) {
6847 if (account_id
== kernel_policy
->cond_account_id
) {
6848 // No match, matches forbidden account
6852 if (account_id
!= kernel_policy
->cond_account_id
) {
6853 // No match, does not match required account
6859 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PID
) {
6860 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_PID
) {
6861 if (pid
== kernel_policy
->cond_pid
) {
6862 // No match, matches forbidden pid
6866 if (pid
!= kernel_policy
->cond_pid
) {
6867 // No match, does not match required pid
6873 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_UID
) {
6874 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_UID
) {
6875 if (uid
== kernel_policy
->cond_uid
) {
6876 // No match, matches forbidden uid
6880 if (uid
!= kernel_policy
->cond_uid
) {
6881 // No match, does not match required uid
6887 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
6888 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
6889 if (traffic_class
>= kernel_policy
->cond_traffic_class
.start_tc
&&
6890 traffic_class
<= kernel_policy
->cond_traffic_class
.end_tc
) {
6891 // No match, matches forbidden traffic class
6895 if (traffic_class
< kernel_policy
->cond_traffic_class
.start_tc
||
6896 traffic_class
> kernel_policy
->cond_traffic_class
.end_tc
) {
6897 // No match, does not match required traffic class
6903 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
6904 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
6905 if (protocol
== kernel_policy
->cond_protocol
) {
6906 // No match, matches forbidden protocol
6910 if (protocol
!= kernel_policy
->cond_protocol
) {
6911 // No match, does not match required protocol
6917 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
6918 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
6919 bool inRange
= necp_is_addr_in_range((struct sockaddr
*)local
, (struct sockaddr
*)&kernel_policy
->cond_local_start
, (struct sockaddr
*)&kernel_policy
->cond_local_end
);
6920 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
6929 } else if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
6930 bool inSubnet
= necp_is_addr_in_subnet((struct sockaddr
*)local
, (struct sockaddr
*)&kernel_policy
->cond_local_start
, kernel_policy
->cond_local_prefix
);
6931 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
6943 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
6944 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
6945 bool inRange
= necp_is_addr_in_range((struct sockaddr
*)remote
, (struct sockaddr
*)&kernel_policy
->cond_remote_start
, (struct sockaddr
*)&kernel_policy
->cond_remote_end
);
6946 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
6955 } else if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
6956 bool inSubnet
= necp_is_addr_in_subnet((struct sockaddr
*)remote
, (struct sockaddr
*)&kernel_policy
->cond_remote_start
, kernel_policy
->cond_remote_prefix
);
6957 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
6972 static inline u_int32_t
6973 necp_socket_calc_flowhash_locked(struct necp_socket_info
*info
)
6975 return (net_flowhash(info
, sizeof(*info
), necp_kernel_socket_policies_gencount
));
6979 necp_socket_fillout_info_locked(struct inpcb
*inp
, struct sockaddr
*override_local_addr
, struct sockaddr
*override_remote_addr
, u_int32_t override_bound_interface
, struct necp_socket_info
*info
)
6981 struct socket
*so
= NULL
;
6983 memset(info
, 0, sizeof(struct necp_socket_info
));
6985 so
= inp
->inp_socket
;
6987 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_PID
) {
6988 info
->pid
= ((so
->so_flags
& SOF_DELEGATED
) ? so
->e_pid
: so
->last_pid
);
6991 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_UID
) {
6992 info
->uid
= kauth_cred_getuid(so
->so_cred
);
6995 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
6996 info
->traffic_class
= so
->so_traffic_class
;
6999 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
7000 if (inp
->inp_ip_p
) {
7001 info
->protocol
= inp
->inp_ip_p
;
7003 info
->protocol
= SOCK_PROTO(so
);
7007 if (inp
->inp_flags2
& INP2_WANT_APP_POLICY
&& necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
7008 struct necp_uuid_id_mapping
*existing_mapping
= necp_uuid_lookup_app_id_locked(((so
->so_flags
& SOF_DELEGATED
) ? so
->e_uuid
: so
->last_uuid
));
7009 if (existing_mapping
) {
7010 info
->application_id
= existing_mapping
->id
;
7013 if (!(so
->so_flags
& SOF_DELEGATED
)) {
7014 info
->real_application_id
= info
->application_id
;
7015 } else if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
7016 struct necp_uuid_id_mapping
*real_existing_mapping
= necp_uuid_lookup_app_id_locked(so
->last_uuid
);
7017 if (real_existing_mapping
) {
7018 info
->real_application_id
= real_existing_mapping
->id
;
7022 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
) {
7023 info
->cred_result
= priv_check_cred(so
->so_cred
, PRIV_NET_PRIVILEGED_NECP_MATCH
, 0);
7024 if (info
->cred_result
!= 0) {
7025 // Process does not have entitlement, check the parent process
7026 necp_get_parent_cred_result(NULL
, info
);
7031 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
&& inp
->inp_necp_attributes
.inp_account
!= NULL
) {
7032 struct necp_string_id_mapping
*existing_mapping
= necp_lookup_string_to_id_locked(&necp_account_id_list
, inp
->inp_necp_attributes
.inp_account
);
7033 if (existing_mapping
) {
7034 info
->account_id
= existing_mapping
->id
;
7038 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
7039 info
->domain
= inp
->inp_necp_attributes
.inp_domain
;
7042 if (override_bound_interface
) {
7043 info
->bound_interface_index
= override_bound_interface
;
7045 if ((inp
->inp_flags
& INP_BOUND_IF
) && inp
->inp_boundifp
) {
7046 info
->bound_interface_index
= inp
->inp_boundifp
->if_index
;
7050 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_ADDRESS_TYPE_CONDITIONS
) {
7051 if (inp
->inp_vflag
& INP_IPV4
) {
7052 if (override_local_addr
) {
7053 if (override_local_addr
->sa_len
<= sizeof(struct sockaddr_in
)) {
7054 memcpy(&info
->local_addr
, override_local_addr
, override_local_addr
->sa_len
);
7057 ((struct sockaddr_in
*)&info
->local_addr
)->sin_family
= AF_INET
;
7058 ((struct sockaddr_in
*)&info
->local_addr
)->sin_len
= sizeof(struct sockaddr_in
);
7059 ((struct sockaddr_in
*)&info
->local_addr
)->sin_port
= inp
->inp_lport
;
7060 memcpy(&((struct sockaddr_in
*)&info
->local_addr
)->sin_addr
, &inp
->inp_laddr
, sizeof(struct in_addr
));
7063 if (override_remote_addr
) {
7064 if (override_remote_addr
->sa_len
<= sizeof(struct sockaddr_in
)) {
7065 memcpy(&info
->remote_addr
, override_remote_addr
, override_remote_addr
->sa_len
);
7068 ((struct sockaddr_in
*)&info
->remote_addr
)->sin_family
= AF_INET
;
7069 ((struct sockaddr_in
*)&info
->remote_addr
)->sin_len
= sizeof(struct sockaddr_in
);
7070 ((struct sockaddr_in
*)&info
->remote_addr
)->sin_port
= inp
->inp_fport
;
7071 memcpy(&((struct sockaddr_in
*)&info
->remote_addr
)->sin_addr
, &inp
->inp_faddr
, sizeof(struct in_addr
));
7073 } else if (inp
->inp_vflag
& INP_IPV6
) {
7074 if (override_local_addr
) {
7075 if (override_local_addr
->sa_len
<= sizeof(struct sockaddr_in6
)) {
7076 memcpy(&info
->local_addr
, override_local_addr
, override_local_addr
->sa_len
);
7079 ((struct sockaddr_in6
*)&info
->local_addr
)->sin6_family
= AF_INET6
;
7080 ((struct sockaddr_in6
*)&info
->local_addr
)->sin6_len
= sizeof(struct sockaddr_in6
);
7081 ((struct sockaddr_in6
*)&info
->local_addr
)->sin6_port
= inp
->inp_lport
;
7082 memcpy(&((struct sockaddr_in6
*)&info
->local_addr
)->sin6_addr
, &inp
->in6p_laddr
, sizeof(struct in6_addr
));
7085 if (override_remote_addr
) {
7086 if (override_remote_addr
->sa_len
<= sizeof(struct sockaddr_in6
)) {
7087 memcpy(&info
->remote_addr
, override_remote_addr
, override_remote_addr
->sa_len
);
7090 ((struct sockaddr_in6
*)&info
->remote_addr
)->sin6_family
= AF_INET6
;
7091 ((struct sockaddr_in6
*)&info
->remote_addr
)->sin6_len
= sizeof(struct sockaddr_in6
);
7092 ((struct sockaddr_in6
*)&info
->remote_addr
)->sin6_port
= inp
->inp_fport
;
7093 memcpy(&((struct sockaddr_in6
*)&info
->remote_addr
)->sin6_addr
, &inp
->in6p_faddr
, sizeof(struct in6_addr
));
7099 static inline struct necp_kernel_socket_policy
*
7100 necp_socket_find_policy_match_with_info_locked(struct necp_kernel_socket_policy
**policy_search_array
, struct necp_socket_info
*info
, necp_kernel_policy_filter
*return_filter
, u_int32_t
*return_route_rule_id
, necp_kernel_policy_result
*return_service_action
, necp_kernel_policy_service
*return_service
, u_int32_t
*return_netagent_array
, size_t netagent_array_count
, proc_t proc
)
7102 struct necp_kernel_socket_policy
*matched_policy
= NULL
;
7103 u_int32_t skip_order
= 0;
7104 u_int32_t skip_session_order
= 0;
7105 u_int32_t route_rule_id_array
[MAX_AGGREGATE_ROUTE_RULES
];
7106 size_t route_rule_id_count
= 0;
7108 size_t netagent_cursor
= 0;
7110 // Pre-process domain for quick matching
7111 struct substring domain_substring
= necp_trim_dots_and_stars(info
->domain
, info
->domain
? strlen(info
->domain
) : 0);
7112 u_int8_t domain_dot_count
= necp_count_dots(domain_substring
.string
, domain_substring
.length
);
7114 if (return_filter
) {
7118 if (return_route_rule_id
) {
7119 *return_route_rule_id
= 0;
7122 if (return_service_action
) {
7123 *return_service_action
= 0;
7126 if (return_service
) {
7127 return_service
->identifier
= 0;
7128 return_service
->data
= 0;
7131 if (policy_search_array
!= NULL
) {
7132 for (i
= 0; policy_search_array
[i
] != NULL
; i
++) {
7133 if (necp_drop_all_order
!= 0 && policy_search_array
[i
]->session_order
>= necp_drop_all_order
) {
7134 // We've hit a drop all rule
7137 if (skip_session_order
&& policy_search_array
[i
]->session_order
>= skip_session_order
) {
7140 skip_session_order
= 0;
7143 if (policy_search_array
[i
]->order
< skip_order
) {
7149 skip_session_order
= 0;
7151 } else if (skip_session_order
) {
7155 if (necp_socket_check_policy(policy_search_array
[i
], info
->application_id
, info
->real_application_id
, info
->cred_result
, info
->account_id
, domain_substring
, domain_dot_count
, info
->pid
, info
->uid
, info
->bound_interface_index
, info
->traffic_class
, info
->protocol
, &info
->local_addr
, &info
->remote_addr
, proc
)) {
7156 if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER
) {
7157 if (return_filter
&& *return_filter
== 0) {
7158 *return_filter
= policy_search_array
[i
]->result_parameter
.filter_control_unit
;
7159 if (necp_debug
> 1) {
7160 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Filter %d", info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
, policy_search_array
[i
]->result_parameter
.filter_control_unit
);
7164 } else if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_ROUTE_RULES
) {
7165 if (return_route_rule_id
&& route_rule_id_count
< MAX_AGGREGATE_ROUTE_RULES
) {
7166 route_rule_id_array
[route_rule_id_count
++] = policy_search_array
[i
]->result_parameter
.route_rule_id
;
7167 if (necp_debug
> 1) {
7168 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Route Rule %d", info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
, policy_search_array
[i
]->result_parameter
.route_rule_id
);
7172 } else if (necp_kernel_socket_result_is_trigger_service_type(policy_search_array
[i
])) {
7173 if (return_service_action
&& *return_service_action
== 0) {
7174 *return_service_action
= policy_search_array
[i
]->result
;
7175 if (necp_debug
> 1) {
7176 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Service Action %d", info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
, policy_search_array
[i
]->result
);
7179 if (return_service
&& return_service
->identifier
== 0) {
7180 return_service
->identifier
= policy_search_array
[i
]->result_parameter
.service
.identifier
;
7181 return_service
->data
= policy_search_array
[i
]->result_parameter
.service
.data
;
7182 if (necp_debug
> 1) {
7183 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Service ID %d Data %d", info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
, policy_search_array
[i
]->result_parameter
.service
.identifier
, policy_search_array
[i
]->result_parameter
.service
.data
);
7187 } else if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_USE_NETAGENT
) {
7188 if (return_netagent_array
!= NULL
&&
7189 netagent_cursor
< netagent_array_count
) {
7190 return_netagent_array
[netagent_cursor
] = policy_search_array
[i
]->result_parameter
.netagent_id
;
7192 if (necp_debug
> 1) {
7193 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Use Netagent %d", info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
, policy_search_array
[i
]->result_parameter
.netagent_id
);
7199 // Matched policy is a skip. Do skip and continue.
7200 if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
7201 skip_order
= policy_search_array
[i
]->result_parameter
.skip_policy_order
;
7202 skip_session_order
= policy_search_array
[i
]->session_order
+ 1;
7206 // Passed all tests, found a match
7207 matched_policy
= policy_search_array
[i
];
7213 if (route_rule_id_count
== 1) {
7214 *return_route_rule_id
= route_rule_id_array
[0];
7215 } else if (route_rule_id_count
> 1) {
7216 *return_route_rule_id
= necp_create_aggregate_route_rule(route_rule_id_array
);
7218 return (matched_policy
);
7222 necp_socket_uses_interface(struct inpcb
*inp
, u_int32_t interface_index
)
7224 bool found_match
= FALSE
;
7226 ifaddr_t
*addresses
= NULL
;
7227 union necp_sockaddr_union address_storage
;
7229 int family
= AF_INET
;
7230 ifnet_t interface
= ifindex2ifnet
[interface_index
];
7232 if (inp
== NULL
|| interface
== NULL
) {
7236 if (inp
->inp_vflag
& INP_IPV4
) {
7238 } else if (inp
->inp_vflag
& INP_IPV6
) {
7242 result
= ifnet_get_address_list_family(interface
, &addresses
, family
);
7244 NECPLOG(LOG_ERR
, "Failed to get address list for %s%d", ifnet_name(interface
), ifnet_unit(interface
));
7248 for (i
= 0; addresses
[i
] != NULL
; i
++) {
7249 if (ifaddr_address(addresses
[i
], &address_storage
.sa
, sizeof(address_storage
)) == 0) {
7250 if (family
== AF_INET
) {
7251 if (memcmp(&address_storage
.sin
.sin_addr
, &inp
->inp_laddr
, sizeof(inp
->inp_laddr
)) == 0) {
7255 } else if (family
== AF_INET6
) {
7256 if (memcmp(&address_storage
.sin6
.sin6_addr
, &inp
->in6p_laddr
, sizeof(inp
->in6p_laddr
)) == 0) {
7265 ifnet_free_address_list(addresses
);
7267 return (found_match
);
7271 necp_socket_is_connected(struct inpcb
*inp
)
7273 return (inp
->inp_socket
->so_state
& (SS_ISCONNECTING
| SS_ISCONNECTED
| SS_ISDISCONNECTING
));
7277 necp_socket_bypass(struct sockaddr
*override_local_addr
, struct sockaddr
*override_remote_addr
, struct inpcb
*inp
)
7280 if (necp_pass_loopback
> 0 && necp_is_loopback(override_local_addr
, override_remote_addr
, inp
, NULL
)) {
7282 } else if (necp_is_intcoproc(inp
, NULL
)) {
7289 necp_kernel_policy_id
7290 necp_socket_find_policy_match(struct inpcb
*inp
, struct sockaddr
*override_local_addr
, struct sockaddr
*override_remote_addr
, u_int32_t override_bound_interface
)
7292 struct socket
*so
= NULL
;
7293 necp_kernel_policy_filter filter_control_unit
= 0;
7294 u_int32_t route_rule_id
= 0;
7295 struct necp_kernel_socket_policy
*matched_policy
= NULL
;
7296 necp_kernel_policy_id matched_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
7297 necp_kernel_policy_result service_action
= 0;
7298 necp_kernel_policy_service service
= { 0, 0 };
7300 u_int32_t netagent_ids
[NECP_MAX_NETAGENTS
];
7301 memset(&netagent_ids
, 0, sizeof(netagent_ids
));
7302 int netagent_cursor
;
7304 struct necp_socket_info info
;
7307 return (NECP_KERNEL_POLICY_ID_NONE
);
7310 // Ignore invalid addresses
7311 if (override_local_addr
!= NULL
&&
7312 !necp_address_is_valid(override_local_addr
)) {
7313 override_local_addr
= NULL
;
7315 if (override_remote_addr
!= NULL
&&
7316 !necp_address_is_valid(override_remote_addr
)) {
7317 override_remote_addr
= NULL
;
7320 so
= inp
->inp_socket
;
7322 // Don't lock. Possible race condition, but we don't want the performance hit.
7323 if (necp_kernel_socket_policies_count
== 0 ||
7324 (!(inp
->inp_flags2
& INP2_WANT_APP_POLICY
) && necp_kernel_socket_policies_non_app_count
== 0)) {
7325 if (necp_drop_all_order
> 0) {
7326 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7327 inp
->inp_policyresult
.policy_gencount
= 0;
7328 inp
->inp_policyresult
.app_id
= 0;
7329 inp
->inp_policyresult
.flowhash
= 0;
7330 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
7331 inp
->inp_policyresult
.results
.route_rule_id
= 0;
7332 if (necp_socket_bypass(override_local_addr
, override_remote_addr
, inp
)) {
7333 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_PASS
;
7335 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7338 return (NECP_KERNEL_POLICY_ID_NONE
);
7341 // Check for loopback exception
7342 if (necp_socket_bypass(override_local_addr
, override_remote_addr
, inp
)) {
7343 // Mark socket as a pass
7344 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7345 inp
->inp_policyresult
.policy_gencount
= 0;
7346 inp
->inp_policyresult
.app_id
= 0;
7347 inp
->inp_policyresult
.flowhash
= 0;
7348 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
7349 inp
->inp_policyresult
.results
.route_rule_id
= 0;
7350 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_PASS
;
7351 return (NECP_KERNEL_POLICY_ID_NONE
);
7355 lck_rw_lock_shared(&necp_kernel_policy_lock
);
7357 necp_socket_fillout_info_locked(inp
, override_local_addr
, override_remote_addr
, override_bound_interface
, &info
);
7358 inp
->inp_policyresult
.app_id
= info
.application_id
;
7361 u_int32_t flowhash
= necp_socket_calc_flowhash_locked(&info
);
7362 if (inp
->inp_policyresult
.policy_id
!= NECP_KERNEL_POLICY_ID_NONE
&&
7363 inp
->inp_policyresult
.policy_gencount
== necp_kernel_socket_policies_gencount
&&
7364 inp
->inp_policyresult
.flowhash
== flowhash
) {
7365 // If already matched this socket on this generation of table, skip
7368 lck_rw_done(&necp_kernel_policy_lock
);
7370 return (inp
->inp_policyresult
.policy_id
);
7373 // Match socket to policy
7374 matched_policy
= necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_map
[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(info
.application_id
)], &info
, &filter_control_unit
, &route_rule_id
, &service_action
, &service
, netagent_ids
, NECP_MAX_NETAGENTS
, current_proc());
7375 // If the socket matched a scoped service policy, mark as Drop if not registered.
7376 // This covers the cases in which a service is required (on demand) but hasn't started yet.
7377 if ((service_action
== NECP_KERNEL_POLICY_RESULT_TRIGGER_SCOPED
||
7378 service_action
== NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED
) &&
7379 service
.identifier
!= 0 &&
7380 service
.identifier
!= NECP_NULL_SERVICE_ID
) {
7381 bool service_is_registered
= FALSE
;
7382 struct necp_service_registration
*service_registration
= NULL
;
7383 LIST_FOREACH(service_registration
, &necp_registered_service_list
, kernel_chain
) {
7384 if (service
.identifier
== service_registration
->service_id
) {
7385 service_is_registered
= TRUE
;
7389 if (!service_is_registered
) {
7390 // Mark socket as a drop if service is not registered
7391 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7392 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
7393 inp
->inp_policyresult
.flowhash
= flowhash
;
7394 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
7395 inp
->inp_policyresult
.results
.route_rule_id
= 0;
7396 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7398 if (necp_debug
> 1) {
7399 NECPLOG(LOG_DEBUG
, "Socket Policy: (BoundInterface %d Proto %d) Dropping packet because service is not registered", info
.bound_interface_index
, info
.protocol
);
7403 lck_rw_done(&necp_kernel_policy_lock
);
7404 return (NECP_KERNEL_POLICY_ID_NONE
);
7408 for (netagent_cursor
= 0; netagent_cursor
< NECP_MAX_NETAGENTS
; netagent_cursor
++) {
7409 struct necp_uuid_id_mapping
*mapping
= NULL
;
7410 u_int32_t netagent_id
= netagent_ids
[netagent_cursor
];
7411 if (netagent_id
== 0) {
7414 mapping
= necp_uuid_lookup_uuid_with_service_id_locked(netagent_id
);
7415 if (mapping
!= NULL
) {
7416 u_int32_t agent_flags
= 0;
7417 agent_flags
= netagent_get_flags(mapping
->uuid
);
7418 if (agent_flags
& NETAGENT_FLAG_REGISTERED
) {
7419 if (agent_flags
& NETAGENT_FLAG_ACTIVE
) {
7421 } else if ((agent_flags
& NETAGENT_FLAG_VOLUNTARY
) == 0) {
7422 if (agent_flags
& NETAGENT_FLAG_KERNEL_ACTIVATED
) {
7423 int trigger_error
= 0;
7424 trigger_error
= netagent_kernel_trigger(mapping
->uuid
);
7425 if (necp_debug
> 1) {
7426 NECPLOG(LOG_DEBUG
, "Socket Policy: Triggering inactive agent, error %d", trigger_error
);
7430 // Mark socket as a drop if required agent is not active
7431 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7432 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
7433 inp
->inp_policyresult
.flowhash
= flowhash
;
7434 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
7435 inp
->inp_policyresult
.results
.route_rule_id
= 0;
7436 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7438 if (necp_debug
> 1) {
7439 NECPLOG(LOG_DEBUG
, "Socket Policy: (BoundInterface %d Proto %d) Dropping packet because agent is not active", info
.bound_interface_index
, info
.protocol
);
7443 lck_rw_done(&necp_kernel_policy_lock
);
7444 return (NECP_KERNEL_POLICY_ID_NONE
);
7449 if (matched_policy
) {
7450 matched_policy_id
= matched_policy
->id
;
7451 inp
->inp_policyresult
.policy_id
= matched_policy
->id
;
7452 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
7453 inp
->inp_policyresult
.flowhash
= flowhash
;
7454 inp
->inp_policyresult
.results
.filter_control_unit
= filter_control_unit
;
7455 inp
->inp_policyresult
.results
.route_rule_id
= route_rule_id
;
7456 inp
->inp_policyresult
.results
.result
= matched_policy
->result
;
7457 memcpy(&inp
->inp_policyresult
.results
.result_parameter
, &matched_policy
->result_parameter
, sizeof(matched_policy
->result_parameter
));
7459 if (necp_socket_is_connected(inp
) &&
7460 (matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_DROP
||
7461 (matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&& !necp_socket_uses_interface(inp
, matched_policy
->result_parameter
.tunnel_interface_index
)))) {
7463 NECPLOG(LOG_DEBUG
, "Marking socket in state %d as defunct", so
->so_state
);
7465 sosetdefunct(current_proc(), so
, SHUTDOWN_SOCKET_LEVEL_NECP
| SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL
, TRUE
);
7466 } else if (necp_socket_is_connected(inp
) &&
7467 matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&&
7468 info
.protocol
== IPPROTO_TCP
) {
7469 // Reset MSS on TCP socket if tunnel policy changes
7470 tcp_mtudisc(inp
, 0);
7473 if (necp_debug
> 1) {
7474 NECPLOG(LOG_DEBUG
, "Socket Policy: %p (BoundInterface %d Proto %d) Policy %d Result %d Parameter %d", inp
->inp_socket
, info
.bound_interface_index
, info
.protocol
, matched_policy
->id
, matched_policy
->result
, matched_policy
->result_parameter
.tunnel_interface_index
);
7476 } else if (necp_drop_all_order
> 0) {
7477 // Mark socket as a drop if set
7478 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7479 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
7480 inp
->inp_policyresult
.flowhash
= flowhash
;
7481 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
7482 inp
->inp_policyresult
.results
.route_rule_id
= 0;
7483 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7485 // Mark non-matching socket so we don't re-check it
7486 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7487 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
7488 inp
->inp_policyresult
.flowhash
= flowhash
;
7489 inp
->inp_policyresult
.results
.filter_control_unit
= filter_control_unit
; // We may have matched a filter, so mark it!
7490 inp
->inp_policyresult
.results
.route_rule_id
= route_rule_id
; // We may have matched a route rule, so mark it!
7491 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_NONE
;
7495 lck_rw_done(&necp_kernel_policy_lock
);
7497 return (matched_policy_id
);
7501 necp_ip_output_check_policy(struct necp_kernel_ip_output_policy
*kernel_policy
, necp_kernel_policy_id socket_policy_id
, u_int32_t bound_interface_index
, u_int32_t last_interface_index
, u_int16_t protocol
, union necp_sockaddr_union
*local
, union necp_sockaddr_union
*remote
)
7503 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
)) {
7504 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
7505 u_int32_t cond_bound_interface_index
= kernel_policy
->cond_bound_interface
? kernel_policy
->cond_bound_interface
->if_index
: 0;
7506 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
7507 if (bound_interface_index
== cond_bound_interface_index
) {
7508 // No match, matches forbidden interface
7512 if (bound_interface_index
!= cond_bound_interface_index
) {
7513 // No match, does not match required interface
7518 if (bound_interface_index
!= 0) {
7519 // No match, requires a non-bound packet
7525 if (kernel_policy
->condition_mask
== 0) {
7529 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
) {
7530 if (socket_policy_id
!= kernel_policy
->cond_policy_id
) {
7531 // No match, does not match required id
7536 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LAST_INTERFACE
) {
7537 if (last_interface_index
!= kernel_policy
->cond_last_interface_index
) {
7542 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
7543 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
7544 if (protocol
== kernel_policy
->cond_protocol
) {
7545 // No match, matches forbidden protocol
7549 if (protocol
!= kernel_policy
->cond_protocol
) {
7550 // No match, does not match required protocol
7556 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
7557 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
7558 bool inRange
= necp_is_addr_in_range((struct sockaddr
*)local
, (struct sockaddr
*)&kernel_policy
->cond_local_start
, (struct sockaddr
*)&kernel_policy
->cond_local_end
);
7559 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
7568 } else if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
7569 bool inSubnet
= necp_is_addr_in_subnet((struct sockaddr
*)local
, (struct sockaddr
*)&kernel_policy
->cond_local_start
, kernel_policy
->cond_local_prefix
);
7570 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
7582 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
7583 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
7584 bool inRange
= necp_is_addr_in_range((struct sockaddr
*)remote
, (struct sockaddr
*)&kernel_policy
->cond_remote_start
, (struct sockaddr
*)&kernel_policy
->cond_remote_end
);
7585 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
7594 } else if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
7595 bool inSubnet
= necp_is_addr_in_subnet((struct sockaddr
*)remote
, (struct sockaddr
*)&kernel_policy
->cond_remote_start
, kernel_policy
->cond_remote_prefix
);
7596 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
7611 static inline struct necp_kernel_ip_output_policy
*
7612 necp_ip_output_find_policy_match_locked(necp_kernel_policy_id socket_policy_id
, u_int32_t bound_interface_index
, u_int32_t last_interface_index
, u_int16_t protocol
, union necp_sockaddr_union
*local_addr
, union necp_sockaddr_union
*remote_addr
)
7614 u_int32_t skip_order
= 0;
7615 u_int32_t skip_session_order
= 0;
7617 struct necp_kernel_ip_output_policy
*matched_policy
= NULL
;
7618 struct necp_kernel_ip_output_policy
**policy_search_array
= necp_kernel_ip_output_policies_map
[NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(socket_policy_id
)];
7619 if (policy_search_array
!= NULL
) {
7620 for (i
= 0; policy_search_array
[i
] != NULL
; i
++) {
7621 if (necp_drop_all_order
!= 0 && policy_search_array
[i
]->session_order
>= necp_drop_all_order
) {
7622 // We've hit a drop all rule
7625 if (skip_session_order
&& policy_search_array
[i
]->session_order
>= skip_session_order
) {
7628 skip_session_order
= 0;
7631 if (policy_search_array
[i
]->order
< skip_order
) {
7637 skip_session_order
= 0;
7639 } else if (skip_session_order
) {
7643 if (necp_ip_output_check_policy(policy_search_array
[i
], socket_policy_id
, bound_interface_index
, last_interface_index
, protocol
, local_addr
, remote_addr
)) {
7644 // Passed all tests, found a match
7645 matched_policy
= policy_search_array
[i
];
7647 if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
7648 skip_order
= policy_search_array
[i
]->result_parameter
.skip_policy_order
;
7649 skip_session_order
= policy_search_array
[i
]->session_order
+ 1;
7658 return (matched_policy
);
7662 necp_output_bypass(struct mbuf
*packet
)
7664 if (necp_pass_loopback
> 0 && necp_is_loopback(NULL
, NULL
, NULL
, packet
)) {
7667 if (necp_pass_keepalives
> 0 && necp_get_is_keepalive_from_packet(packet
)) {
7670 if (necp_is_intcoproc(NULL
, packet
)) {
7676 necp_kernel_policy_id
7677 necp_ip_output_find_policy_match(struct mbuf
*packet
, int flags
, struct ip_out_args
*ipoa
, necp_kernel_policy_result
*result
, necp_kernel_policy_result_parameter
*result_parameter
)
7679 struct ip
*ip
= NULL
;
7680 int hlen
= sizeof(struct ip
);
7681 necp_kernel_policy_id socket_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
7682 necp_kernel_policy_id matched_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
7683 struct necp_kernel_ip_output_policy
*matched_policy
= NULL
;
7684 u_int16_t protocol
= 0;
7685 u_int32_t bound_interface_index
= 0;
7686 u_int32_t last_interface_index
= 0;
7687 union necp_sockaddr_union local_addr
;
7688 union necp_sockaddr_union remote_addr
;
7694 if (result_parameter
) {
7695 memset(result_parameter
, 0, sizeof(*result_parameter
));
7698 if (packet
== NULL
) {
7699 return (NECP_KERNEL_POLICY_ID_NONE
);
7702 socket_policy_id
= necp_get_policy_id_from_packet(packet
);
7704 // Exit early for an empty list
7705 // Don't lock. Possible race condition, but we don't want the performance hit.
7706 if (necp_kernel_ip_output_policies_count
== 0 ||
7707 ((socket_policy_id
== NECP_KERNEL_POLICY_ID_NONE
) && necp_kernel_ip_output_policies_non_id_count
== 0)) {
7708 if (necp_drop_all_order
> 0) {
7709 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7711 if (necp_output_bypass(packet
)) {
7712 *result
= NECP_KERNEL_POLICY_RESULT_PASS
;
7714 *result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7719 return (matched_policy_id
);
7722 // Check for loopback exception
7723 if (necp_output_bypass(packet
)) {
7724 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7726 *result
= NECP_KERNEL_POLICY_RESULT_PASS
;
7728 return (matched_policy_id
);
7731 last_interface_index
= necp_get_last_interface_index_from_packet(packet
);
7733 // Process packet to get relevant fields
7734 ip
= mtod(packet
, struct ip
*);
7736 hlen
= _IP_VHL_HL(ip
->ip_vhl
) << 2;
7738 hlen
= ip
->ip_hl
<< 2;
7741 protocol
= ip
->ip_p
;
7743 if ((flags
& IP_OUTARGS
) && (ipoa
!= NULL
) &&
7744 (ipoa
->ipoa_flags
& IPOAF_BOUND_IF
) &&
7745 ipoa
->ipoa_boundif
!= IFSCOPE_NONE
) {
7746 bound_interface_index
= ipoa
->ipoa_boundif
;
7749 local_addr
.sin
.sin_family
= AF_INET
;
7750 local_addr
.sin
.sin_len
= sizeof(struct sockaddr_in
);
7751 memcpy(&local_addr
.sin
.sin_addr
, &ip
->ip_src
, sizeof(ip
->ip_src
));
7753 remote_addr
.sin
.sin_family
= AF_INET
;
7754 remote_addr
.sin
.sin_len
= sizeof(struct sockaddr_in
);
7755 memcpy(&((struct sockaddr_in
*)&remote_addr
)->sin_addr
, &ip
->ip_dst
, sizeof(ip
->ip_dst
));
7760 if ((int)(hlen
+ sizeof(th
)) <= packet
->m_pkthdr
.len
) {
7761 m_copydata(packet
, hlen
, sizeof(th
), (u_int8_t
*)&th
);
7762 ((struct sockaddr_in
*)&local_addr
)->sin_port
= th
.th_sport
;
7763 ((struct sockaddr_in
*)&remote_addr
)->sin_port
= th
.th_dport
;
7769 if ((int)(hlen
+ sizeof(uh
)) <= packet
->m_pkthdr
.len
) {
7770 m_copydata(packet
, hlen
, sizeof(uh
), (u_int8_t
*)&uh
);
7771 ((struct sockaddr_in
*)&local_addr
)->sin_port
= uh
.uh_sport
;
7772 ((struct sockaddr_in
*)&remote_addr
)->sin_port
= uh
.uh_dport
;
7777 ((struct sockaddr_in
*)&local_addr
)->sin_port
= 0;
7778 ((struct sockaddr_in
*)&remote_addr
)->sin_port
= 0;
7783 // Match packet to policy
7784 lck_rw_lock_shared(&necp_kernel_policy_lock
);
7785 matched_policy
= necp_ip_output_find_policy_match_locked(socket_policy_id
, bound_interface_index
, last_interface_index
, protocol
, &local_addr
, &remote_addr
);
7786 if (matched_policy
) {
7787 matched_policy_id
= matched_policy
->id
;
7789 *result
= matched_policy
->result
;
7792 if (result_parameter
) {
7793 memcpy(result_parameter
, &matched_policy
->result_parameter
, sizeof(matched_policy
->result_parameter
));
7796 if (necp_debug
> 1) {
7797 NECPLOG(LOG_DEBUG
, "IP Output: (ID %d BoundInterface %d LastInterface %d Proto %d) Policy %d Result %d Parameter %d", socket_policy_id
, bound_interface_index
, last_interface_index
, protocol
, matched_policy
->id
, matched_policy
->result
, matched_policy
->result_parameter
.tunnel_interface_index
);
7799 } else if (necp_drop_all_order
> 0) {
7800 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7802 *result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7806 lck_rw_done(&necp_kernel_policy_lock
);
7808 return (matched_policy_id
);
7811 necp_kernel_policy_id
7812 necp_ip6_output_find_policy_match(struct mbuf
*packet
, int flags
, struct ip6_out_args
*ip6oa
, necp_kernel_policy_result
*result
, necp_kernel_policy_result_parameter
*result_parameter
)
7814 struct ip6_hdr
*ip6
= NULL
;
7817 necp_kernel_policy_id socket_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
7818 necp_kernel_policy_id matched_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
7819 struct necp_kernel_ip_output_policy
*matched_policy
= NULL
;
7820 u_int16_t protocol
= 0;
7821 u_int32_t bound_interface_index
= 0;
7822 u_int32_t last_interface_index
= 0;
7823 union necp_sockaddr_union local_addr
;
7824 union necp_sockaddr_union remote_addr
;
7830 if (result_parameter
) {
7831 memset(result_parameter
, 0, sizeof(*result_parameter
));
7834 if (packet
== NULL
) {
7835 return (NECP_KERNEL_POLICY_ID_NONE
);
7838 socket_policy_id
= necp_get_policy_id_from_packet(packet
);
7840 // Exit early for an empty list
7841 // Don't lock. Possible race condition, but we don't want the performance hit.
7842 if (necp_kernel_ip_output_policies_count
== 0 ||
7843 ((socket_policy_id
== NECP_KERNEL_POLICY_ID_NONE
) && necp_kernel_ip_output_policies_non_id_count
== 0)) {
7844 if (necp_drop_all_order
> 0) {
7845 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7847 if (necp_output_bypass(packet
)) {
7848 *result
= NECP_KERNEL_POLICY_RESULT_PASS
;
7850 *result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7855 return (matched_policy_id
);
7858 // Check for loopback exception
7859 if (necp_output_bypass(packet
)) {
7860 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7862 *result
= NECP_KERNEL_POLICY_RESULT_PASS
;
7864 return (matched_policy_id
);
7867 last_interface_index
= necp_get_last_interface_index_from_packet(packet
);
7869 // Process packet to get relevant fields
7870 ip6
= mtod(packet
, struct ip6_hdr
*);
7872 if ((flags
& IPV6_OUTARGS
) && (ip6oa
!= NULL
) &&
7873 (ip6oa
->ip6oa_flags
& IP6OAF_BOUND_IF
) &&
7874 ip6oa
->ip6oa_boundif
!= IFSCOPE_NONE
) {
7875 bound_interface_index
= ip6oa
->ip6oa_boundif
;
7878 ((struct sockaddr_in6
*)&local_addr
)->sin6_family
= AF_INET6
;
7879 ((struct sockaddr_in6
*)&local_addr
)->sin6_len
= sizeof(struct sockaddr_in6
);
7880 memcpy(&((struct sockaddr_in6
*)&local_addr
)->sin6_addr
, &ip6
->ip6_src
, sizeof(ip6
->ip6_src
));
7882 ((struct sockaddr_in6
*)&remote_addr
)->sin6_family
= AF_INET6
;
7883 ((struct sockaddr_in6
*)&remote_addr
)->sin6_len
= sizeof(struct sockaddr_in6
);
7884 memcpy(&((struct sockaddr_in6
*)&remote_addr
)->sin6_addr
, &ip6
->ip6_dst
, sizeof(ip6
->ip6_dst
));
7886 offset
= ip6_lasthdr(packet
, 0, IPPROTO_IPV6
, &next
);
7887 if (offset
>= 0 && packet
->m_pkthdr
.len
>= offset
) {
7892 if ((int)(offset
+ sizeof(th
)) <= packet
->m_pkthdr
.len
) {
7893 m_copydata(packet
, offset
, sizeof(th
), (u_int8_t
*)&th
);
7894 ((struct sockaddr_in6
*)&local_addr
)->sin6_port
= th
.th_sport
;
7895 ((struct sockaddr_in6
*)&remote_addr
)->sin6_port
= th
.th_dport
;
7901 if ((int)(offset
+ sizeof(uh
)) <= packet
->m_pkthdr
.len
) {
7902 m_copydata(packet
, offset
, sizeof(uh
), (u_int8_t
*)&uh
);
7903 ((struct sockaddr_in6
*)&local_addr
)->sin6_port
= uh
.uh_sport
;
7904 ((struct sockaddr_in6
*)&remote_addr
)->sin6_port
= uh
.uh_dport
;
7909 ((struct sockaddr_in6
*)&local_addr
)->sin6_port
= 0;
7910 ((struct sockaddr_in6
*)&remote_addr
)->sin6_port
= 0;
7916 // Match packet to policy
7917 lck_rw_lock_shared(&necp_kernel_policy_lock
);
7918 matched_policy
= necp_ip_output_find_policy_match_locked(socket_policy_id
, bound_interface_index
, last_interface_index
, protocol
, &local_addr
, &remote_addr
);
7919 if (matched_policy
) {
7920 matched_policy_id
= matched_policy
->id
;
7922 *result
= matched_policy
->result
;
7925 if (result_parameter
) {
7926 memcpy(result_parameter
, &matched_policy
->result_parameter
, sizeof(matched_policy
->result_parameter
));
7929 if (necp_debug
> 1) {
7930 NECPLOG(LOG_DEBUG
, "IP6 Output: (ID %d BoundInterface %d LastInterface %d Proto %d) Policy %d Result %d Parameter %d", socket_policy_id
, bound_interface_index
, last_interface_index
, protocol
, matched_policy
->id
, matched_policy
->result
, matched_policy
->result_parameter
.tunnel_interface_index
);
7932 } else if (necp_drop_all_order
> 0) {
7933 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7935 *result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7939 lck_rw_done(&necp_kernel_policy_lock
);
7941 return (matched_policy_id
);
7946 necp_is_addr_in_range(struct sockaddr
*addr
, struct sockaddr
*range_start
, struct sockaddr
*range_end
)
7950 if (addr
== NULL
|| range_start
== NULL
|| range_end
== NULL
) {
7954 /* Must be greater than or equal to start */
7955 cmp
= necp_addr_compare(addr
, range_start
, 1);
7956 if (cmp
!= 0 && cmp
!= 1) {
7960 /* Must be less than or equal to end */
7961 cmp
= necp_addr_compare(addr
, range_end
, 1);
7962 if (cmp
!= 0 && cmp
!= -1) {
7970 necp_is_range_in_range(struct sockaddr
*inner_range_start
, struct sockaddr
*inner_range_end
, struct sockaddr
*range_start
, struct sockaddr
*range_end
)
7974 if (inner_range_start
== NULL
|| inner_range_end
== NULL
|| range_start
== NULL
|| range_end
== NULL
) {
7978 /* Must be greater than or equal to start */
7979 cmp
= necp_addr_compare(inner_range_start
, range_start
, 1);
7980 if (cmp
!= 0 && cmp
!= 1) {
7984 /* Must be less than or equal to end */
7985 cmp
= necp_addr_compare(inner_range_end
, range_end
, 1);
7986 if (cmp
!= 0 && cmp
!= -1) {
7994 necp_is_addr_in_subnet(struct sockaddr
*addr
, struct sockaddr
*subnet_addr
, u_int8_t subnet_prefix
)
7996 if (addr
== NULL
|| subnet_addr
== NULL
) {
8000 if (addr
->sa_family
!= subnet_addr
->sa_family
|| addr
->sa_len
!= subnet_addr
->sa_len
) {
8004 switch (addr
->sa_family
) {
8006 if (satosin(subnet_addr
)->sin_port
!= 0 &&
8007 satosin(addr
)->sin_port
!= satosin(subnet_addr
)->sin_port
) {
8010 return (necp_buffer_compare_with_bit_prefix((u_int8_t
*)&satosin(addr
)->sin_addr
, (u_int8_t
*)&satosin(subnet_addr
)->sin_addr
, subnet_prefix
));
8013 if (satosin6(subnet_addr
)->sin6_port
!= 0 &&
8014 satosin6(addr
)->sin6_port
!= satosin6(subnet_addr
)->sin6_port
) {
8017 if (satosin6(addr
)->sin6_scope_id
&&
8018 satosin6(subnet_addr
)->sin6_scope_id
&&
8019 satosin6(addr
)->sin6_scope_id
!= satosin6(subnet_addr
)->sin6_scope_id
) {
8022 return (necp_buffer_compare_with_bit_prefix((u_int8_t
*)&satosin6(addr
)->sin6_addr
, (u_int8_t
*)&satosin6(subnet_addr
)->sin6_addr
, subnet_prefix
));
8037 * 2: Not comparable or error
8040 necp_addr_compare(struct sockaddr
*sa1
, struct sockaddr
*sa2
, int check_port
)
8043 int port_result
= 0;
8045 if (sa1
->sa_family
!= sa2
->sa_family
|| sa1
->sa_len
!= sa2
->sa_len
) {
8049 if (sa1
->sa_len
== 0) {
8053 switch (sa1
->sa_family
) {
8055 if (sa1
->sa_len
!= sizeof(struct sockaddr_in
)) {
8059 result
= memcmp(&satosin(sa1
)->sin_addr
.s_addr
, &satosin(sa2
)->sin_addr
.s_addr
, sizeof(satosin(sa1
)->sin_addr
.s_addr
));
8062 if (satosin(sa1
)->sin_port
< satosin(sa2
)->sin_port
) {
8064 } else if (satosin(sa1
)->sin_port
> satosin(sa2
)->sin_port
) {
8069 result
= port_result
;
8070 } else if ((result
> 0 && port_result
< 0) || (result
< 0 && port_result
> 0)) {
8078 if (sa1
->sa_len
!= sizeof(struct sockaddr_in6
)) {
8082 if (satosin6(sa1
)->sin6_scope_id
!= satosin6(sa2
)->sin6_scope_id
) {
8086 result
= memcmp(&satosin6(sa1
)->sin6_addr
.s6_addr
[0], &satosin6(sa2
)->sin6_addr
.s6_addr
[0], sizeof(struct in6_addr
));
8089 if (satosin6(sa1
)->sin6_port
< satosin6(sa2
)->sin6_port
) {
8091 } else if (satosin6(sa1
)->sin6_port
> satosin6(sa2
)->sin6_port
) {
8096 result
= port_result
;
8097 } else if ((result
> 0 && port_result
< 0) || (result
< 0 && port_result
> 0)) {
8105 result
= memcmp(sa1
, sa2
, sa1
->sa_len
);
8112 } else if (result
> 0) {
8120 necp_buffer_compare_with_bit_prefix(u_int8_t
*p1
, u_int8_t
*p2
, u_int32_t bits
)
8124 /* Handle null pointers */
8125 if (p1
== NULL
|| p2
== NULL
) {
8130 if (*p1
++ != *p2
++) {
8137 mask
= ~((1<<(8-bits
))-1);
8138 if ((*p1
& mask
) != (*p2
& mask
)) {
8146 necp_update_qos_marking(struct ifnet
*ifp
, u_int32_t route_rule_id
)
8148 bool qos_marking
= FALSE
;
8149 int exception_index
= 0;
8150 struct necp_route_rule
*route_rule
= NULL
;
8152 route_rule
= necp_lookup_route_rule_locked(&necp_route_rules
, route_rule_id
);
8153 if (route_rule
== NULL
) {
8154 qos_marking
= FALSE
;
8158 qos_marking
= (route_rule
->default_action
== NECP_ROUTE_RULE_QOS_MARKING
) ? TRUE
: FALSE
;
8164 for (exception_index
= 0; exception_index
< MAX_ROUTE_RULE_INTERFACES
; exception_index
++) {
8165 if (route_rule
->exception_if_indices
[exception_index
] == 0) {
8168 if (route_rule
->exception_if_actions
[exception_index
] != NECP_ROUTE_RULE_QOS_MARKING
) {
8171 if (route_rule
->exception_if_indices
[exception_index
] == ifp
->if_index
) {
8173 if (necp_debug
> 2) {
8174 NECPLOG(LOG_DEBUG
, "QoS Marking : Interface match %d for Rule %d Allowed %d",
8175 route_rule
->exception_if_indices
[exception_index
], route_rule_id
, qos_marking
);
8181 if ((route_rule
->cellular_action
== NECP_ROUTE_RULE_QOS_MARKING
&& IFNET_IS_CELLULAR(ifp
)) ||
8182 (route_rule
->wifi_action
== NECP_ROUTE_RULE_QOS_MARKING
&& IFNET_IS_WIFI(ifp
)) ||
8183 (route_rule
->wired_action
== NECP_ROUTE_RULE_QOS_MARKING
&& IFNET_IS_WIRED(ifp
)) ||
8184 (route_rule
->expensive_action
== NECP_ROUTE_RULE_QOS_MARKING
&& IFNET_IS_EXPENSIVE(ifp
))) {
8186 if (necp_debug
> 2) {
8187 NECPLOG(LOG_DEBUG
, "QoS Marking: C:%d WF:%d W:%d E:%d for Rule %d Allowed %d",
8188 route_rule
->cellular_action
, route_rule
->wifi_action
, route_rule
->wired_action
,
8189 route_rule
->expensive_action
, route_rule_id
, qos_marking
);
8194 if (necp_debug
> 1) {
8195 NECPLOG(LOG_DEBUG
, "QoS Marking: Rule %d ifp %s Allowed %d",
8196 route_rule_id
, ifp
? ifp
->if_xname
: "", qos_marking
);
8198 return (qos_marking
);
8202 necp_socket_update_qos_marking(struct inpcb
*inp
, struct rtentry
*route
, struct ifnet
*interface
, u_int32_t route_rule_id
)
8204 bool qos_marking
= FALSE
;
8205 struct ifnet
*ifp
= interface
= NULL
;
8207 if (net_qos_policy_restricted
== 0) {
8210 if (inp
->inp_socket
== NULL
) {
8213 if ((inp
->inp_socket
->so_flags1
& SOF1_QOSMARKING_POLICY_OVERRIDE
)) {
8217 * This is racy but we do not need the performance hit of taking necp_kernel_policy_lock
8219 if (inp
->inp_policyresult
.results
.qos_marking_gencount
== necp_kernel_socket_policies_gencount
) {
8223 lck_rw_lock_shared(&necp_kernel_policy_lock
);
8225 if (ifp
== NULL
&& route
!= NULL
) {
8226 ifp
= route
->rt_ifp
;
8229 * By default, until we have a interface, do not mark and reevaluate the Qos marking policy
8231 if (ifp
== NULL
|| route_rule_id
== 0) {
8232 qos_marking
= FALSE
;
8236 if (ROUTE_RULE_IS_AGGREGATE(route_rule_id
)) {
8237 struct necp_aggregate_route_rule
*aggregate_route_rule
= necp_lookup_aggregate_route_rule_locked(route_rule_id
);
8238 if (aggregate_route_rule
!= NULL
) {
8240 for (index
= 0; index
< MAX_AGGREGATE_ROUTE_RULES
; index
++) {
8241 u_int32_t sub_route_rule_id
= aggregate_route_rule
->rule_ids
[index
];
8242 if (sub_route_rule_id
== 0) {
8245 qos_marking
= necp_update_qos_marking(ifp
, sub_route_rule_id
);
8246 if (qos_marking
== TRUE
) {
8252 qos_marking
= necp_update_qos_marking(ifp
, route_rule_id
);
8255 * Now that we have an interface we remember the gencount
8257 inp
->inp_policyresult
.results
.qos_marking_gencount
= necp_kernel_socket_policies_gencount
;
8260 lck_rw_done(&necp_kernel_policy_lock
);
8262 if (qos_marking
== TRUE
) {
8263 inp
->inp_socket
->so_flags1
|= SOF1_QOSMARKING_ALLOWED
;
8265 inp
->inp_socket
->so_flags1
&= ~SOF1_QOSMARKING_ALLOWED
;
8270 necp_route_is_allowed_inner(struct rtentry
*route
, struct ifnet
*ifp
, u_int32_t route_rule_id
, u_int32_t
*interface_type_denied
)
8272 bool default_is_allowed
= TRUE
;
8273 u_int8_t type_aggregate_action
= NECP_ROUTE_RULE_NONE
;
8274 int exception_index
= 0;
8275 struct ifnet
*delegated_ifp
= NULL
;
8276 struct necp_route_rule
*route_rule
= NULL
;
8278 route_rule
= necp_lookup_route_rule_locked(&necp_route_rules
, route_rule_id
);
8279 if (route_rule
== NULL
) {
8283 default_is_allowed
= (route_rule
->default_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
;
8285 ifp
= route
->rt_ifp
;
8288 if (necp_debug
> 1 && !default_is_allowed
) {
8289 NECPLOG(LOG_DEBUG
, "Route Allowed: No interface for route, using default for Rule %d Allowed %d", route_rule_id
, default_is_allowed
);
8291 return (default_is_allowed
);
8294 delegated_ifp
= ifp
->if_delegated
.ifp
;
8295 for (exception_index
= 0; exception_index
< MAX_ROUTE_RULE_INTERFACES
; exception_index
++) {
8296 if (route_rule
->exception_if_indices
[exception_index
] == 0) {
8299 if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule
->exception_if_actions
[exception_index
]) == FALSE
) {
8302 if (route_rule
->exception_if_indices
[exception_index
] == ifp
->if_index
||
8303 (delegated_ifp
!= NULL
&& route_rule
->exception_if_indices
[exception_index
] == delegated_ifp
->if_index
)) {
8304 if (necp_debug
> 1) {
8305 NECPLOG(LOG_DEBUG
, "Route Allowed: Interface match %d for Rule %d Allowed %d", route_rule
->exception_if_indices
[exception_index
], route_rule_id
, ((route_rule
->exception_if_actions
[exception_index
] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
));
8307 return ((route_rule
->exception_if_actions
[exception_index
] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
);
8311 if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule
->cellular_action
) &&
8312 IFNET_IS_CELLULAR(ifp
)) {
8313 if (interface_type_denied
!= NULL
) {
8314 *interface_type_denied
= IFRTYPE_FUNCTIONAL_CELLULAR
;
8316 if (type_aggregate_action
== NECP_ROUTE_RULE_NONE
||
8317 (type_aggregate_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
&&
8318 route_rule
->cellular_action
== NECP_ROUTE_RULE_DENY_INTERFACE
)) {
8319 // Deny wins if there is a conflict
8320 type_aggregate_action
= route_rule
->cellular_action
;
8324 if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule
->wifi_action
) &&
8325 IFNET_IS_WIFI(ifp
)) {
8326 if (interface_type_denied
!= NULL
) {
8327 *interface_type_denied
= IFRTYPE_FUNCTIONAL_WIFI_INFRA
;
8329 if (type_aggregate_action
== NECP_ROUTE_RULE_NONE
||
8330 (type_aggregate_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
&&
8331 route_rule
->wifi_action
== NECP_ROUTE_RULE_DENY_INTERFACE
)) {
8332 // Deny wins if there is a conflict
8333 type_aggregate_action
= route_rule
->wifi_action
;
8337 if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule
->wired_action
) &&
8338 IFNET_IS_WIRED(ifp
)) {
8339 if (interface_type_denied
!= NULL
) {
8340 *interface_type_denied
= IFRTYPE_FUNCTIONAL_WIRED
;
8342 if (type_aggregate_action
== NECP_ROUTE_RULE_NONE
||
8343 (type_aggregate_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
&&
8344 route_rule
->wired_action
== NECP_ROUTE_RULE_DENY_INTERFACE
)) {
8345 // Deny wins if there is a conflict
8346 type_aggregate_action
= route_rule
->wired_action
;
8350 if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule
->expensive_action
) &&
8351 IFNET_IS_EXPENSIVE(ifp
)) {
8352 if (type_aggregate_action
== NECP_ROUTE_RULE_NONE
||
8353 (type_aggregate_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
&&
8354 route_rule
->expensive_action
== NECP_ROUTE_RULE_DENY_INTERFACE
)) {
8355 // Deny wins if there is a conflict
8356 type_aggregate_action
= route_rule
->expensive_action
;
8360 if (type_aggregate_action
!= NECP_ROUTE_RULE_NONE
) {
8361 if (necp_debug
> 1) {
8362 NECPLOG(LOG_DEBUG
, "Route Allowed: C:%d WF:%d W:%d E:%d for Rule %d Allowed %d", route_rule
->cellular_action
, route_rule
->wifi_action
, route_rule
->wired_action
, route_rule
->expensive_action
, route_rule_id
, ((type_aggregate_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
));
8364 return ((type_aggregate_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
);
8367 if (necp_debug
> 1 && !default_is_allowed
) {
8368 NECPLOG(LOG_DEBUG
, "Route Allowed: Using default for Rule %d Allowed %d", route_rule_id
, default_is_allowed
);
8370 return (default_is_allowed
);
8374 necp_route_is_allowed(struct rtentry
*route
, struct ifnet
*interface
, u_int32_t route_rule_id
, u_int32_t
*interface_type_denied
)
8376 if ((route
== NULL
&& interface
== NULL
) || route_rule_id
== 0) {
8377 if (necp_debug
> 1) {
8378 NECPLOG(LOG_DEBUG
, "Route Allowed: no route or interface, Rule %d Allowed %d", route_rule_id
, TRUE
);
8383 if (ROUTE_RULE_IS_AGGREGATE(route_rule_id
)) {
8384 struct necp_aggregate_route_rule
*aggregate_route_rule
= necp_lookup_aggregate_route_rule_locked(route_rule_id
);
8385 if (aggregate_route_rule
!= NULL
) {
8387 for (index
= 0; index
< MAX_AGGREGATE_ROUTE_RULES
; index
++) {
8388 u_int32_t sub_route_rule_id
= aggregate_route_rule
->rule_ids
[index
];
8389 if (sub_route_rule_id
== 0) {
8392 if (!necp_route_is_allowed_inner(route
, interface
, sub_route_rule_id
, interface_type_denied
)) {
8398 return (necp_route_is_allowed_inner(route
, interface
, route_rule_id
, interface_type_denied
));
8405 necp_packet_is_allowed_over_interface(struct mbuf
*packet
, struct ifnet
*interface
)
8407 bool is_allowed
= TRUE
;
8408 u_int32_t route_rule_id
= necp_get_route_rule_id_from_packet(packet
);
8409 if (route_rule_id
!= 0 &&
8410 interface
!= NULL
) {
8411 lck_rw_lock_shared(&necp_kernel_policy_lock
);
8412 is_allowed
= necp_route_is_allowed(NULL
, interface
, necp_get_route_rule_id_from_packet(packet
), NULL
);
8413 lck_rw_done(&necp_kernel_policy_lock
);
8415 return (is_allowed
);
8419 necp_netagents_allow_traffic(u_int32_t
*netagent_ids
, size_t netagent_id_count
)
8421 size_t netagent_cursor
;
8422 for (netagent_cursor
= 0; netagent_cursor
< netagent_id_count
; netagent_cursor
++) {
8423 struct necp_uuid_id_mapping
*mapping
= NULL
;
8424 u_int32_t netagent_id
= netagent_ids
[netagent_cursor
];
8425 if (netagent_id
== 0) {
8428 mapping
= necp_uuid_lookup_uuid_with_service_id_locked(netagent_id
);
8429 if (mapping
!= NULL
) {
8430 u_int32_t agent_flags
= 0;
8431 agent_flags
= netagent_get_flags(mapping
->uuid
);
8432 if (agent_flags
& NETAGENT_FLAG_REGISTERED
) {
8433 if (agent_flags
& NETAGENT_FLAG_ACTIVE
) {
8435 } else if ((agent_flags
& NETAGENT_FLAG_VOLUNTARY
) == 0) {
8445 necp_socket_is_allowed_to_send_recv_internal(struct inpcb
*inp
, struct sockaddr
*override_local_addr
, struct sockaddr
*override_remote_addr
, ifnet_t interface
, necp_kernel_policy_id
*return_policy_id
, u_int32_t
*return_route_rule_id
)
8447 u_int32_t verifyifindex
= interface
? interface
->if_index
: 0;
8448 bool allowed_to_receive
= TRUE
;
8449 struct necp_socket_info info
;
8450 u_int32_t flowhash
= 0;
8451 necp_kernel_policy_result service_action
= 0;
8452 necp_kernel_policy_service service
= { 0, 0 };
8453 u_int32_t route_rule_id
= 0;
8454 struct rtentry
*route
= NULL
;
8455 u_int32_t interface_type_denied
= IFRTYPE_FUNCTIONAL_UNKNOWN
;
8457 u_int32_t netagent_ids
[NECP_MAX_NETAGENTS
];
8458 memset(&netagent_ids
, 0, sizeof(netagent_ids
));
8460 if (return_policy_id
) {
8461 *return_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
8463 if (return_route_rule_id
) {
8464 *return_route_rule_id
= 0;
8471 route
= inp
->inp_route
.ro_rt
;
8473 // Don't lock. Possible race condition, but we don't want the performance hit.
8474 if (necp_kernel_socket_policies_count
== 0 ||
8475 (!(inp
->inp_flags2
& INP2_WANT_APP_POLICY
) && necp_kernel_socket_policies_non_app_count
== 0)) {
8476 if (necp_drop_all_order
> 0) {
8477 if (necp_socket_bypass(override_local_addr
, override_remote_addr
, inp
)) {
8478 allowed_to_receive
= TRUE
;
8480 allowed_to_receive
= FALSE
;
8486 // If this socket is connected, or we are not taking addresses into account, try to reuse last result
8487 if ((necp_socket_is_connected(inp
) || (override_local_addr
== NULL
&& override_remote_addr
== NULL
)) && inp
->inp_policyresult
.policy_id
!= NECP_KERNEL_POLICY_ID_NONE
) {
8488 bool policies_have_changed
= FALSE
;
8489 bool route_allowed
= TRUE
;
8491 if (inp
->inp_policyresult
.policy_gencount
!= necp_kernel_socket_policies_gencount
) {
8492 policies_have_changed
= TRUE
;
8494 if (inp
->inp_policyresult
.results
.route_rule_id
!= 0) {
8495 lck_rw_lock_shared(&necp_kernel_policy_lock
);
8496 if (!necp_route_is_allowed(route
, interface
, inp
->inp_policyresult
.results
.route_rule_id
, &interface_type_denied
)) {
8497 route_allowed
= FALSE
;
8499 lck_rw_done(&necp_kernel_policy_lock
);
8503 if (!policies_have_changed
) {
8504 if (!route_allowed
||
8505 inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_DROP
||
8506 inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
||
8507 (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&& interface
&&
8508 inp
->inp_policyresult
.results
.result_parameter
.tunnel_interface_index
!= verifyifindex
)) {
8509 allowed_to_receive
= FALSE
;
8511 if (return_policy_id
) {
8512 *return_policy_id
= inp
->inp_policyresult
.policy_id
;
8514 if (return_route_rule_id
) {
8515 *return_route_rule_id
= inp
->inp_policyresult
.results
.route_rule_id
;
8522 // Check for loopback exception
8523 if (necp_socket_bypass(override_local_addr
, override_remote_addr
, inp
)) {
8524 allowed_to_receive
= TRUE
;
8528 // Actually calculate policy result
8529 lck_rw_lock_shared(&necp_kernel_policy_lock
);
8530 necp_socket_fillout_info_locked(inp
, override_local_addr
, override_remote_addr
, 0, &info
);
8532 flowhash
= necp_socket_calc_flowhash_locked(&info
);
8533 if (inp
->inp_policyresult
.policy_id
!= NECP_KERNEL_POLICY_ID_NONE
&&
8534 inp
->inp_policyresult
.policy_gencount
== necp_kernel_socket_policies_gencount
&&
8535 inp
->inp_policyresult
.flowhash
== flowhash
) {
8536 if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_DROP
||
8537 inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
||
8538 (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&& interface
&&
8539 inp
->inp_policyresult
.results
.result_parameter
.tunnel_interface_index
!= verifyifindex
) ||
8540 (inp
->inp_policyresult
.results
.route_rule_id
!= 0 &&
8541 !necp_route_is_allowed(route
, interface
, inp
->inp_policyresult
.results
.route_rule_id
, &interface_type_denied
))) {
8542 allowed_to_receive
= FALSE
;
8544 if (return_policy_id
) {
8545 *return_policy_id
= inp
->inp_policyresult
.policy_id
;
8547 if (return_route_rule_id
) {
8548 *return_route_rule_id
= inp
->inp_policyresult
.results
.route_rule_id
;
8551 lck_rw_done(&necp_kernel_policy_lock
);
8555 struct necp_kernel_socket_policy
*matched_policy
= necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_map
[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(info
.application_id
)], &info
, NULL
, &route_rule_id
, &service_action
, &service
, netagent_ids
, NECP_MAX_NETAGENTS
, current_proc());
8556 if (matched_policy
!= NULL
) {
8557 if (matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_DROP
||
8558 matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
||
8559 (matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&& interface
&&
8560 matched_policy
->result_parameter
.tunnel_interface_index
!= verifyifindex
) ||
8561 ((service_action
== NECP_KERNEL_POLICY_RESULT_TRIGGER_SCOPED
||
8562 service_action
== NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED
) &&
8563 service
.identifier
!= 0 && service
.identifier
!= NECP_NULL_SERVICE_ID
) ||
8564 (route_rule_id
!= 0 &&
8565 !necp_route_is_allowed(route
, interface
, route_rule_id
, &interface_type_denied
)) ||
8566 !necp_netagents_allow_traffic(netagent_ids
, NECP_MAX_NETAGENTS
)) {
8567 allowed_to_receive
= FALSE
;
8569 if (return_policy_id
) {
8570 *return_policy_id
= matched_policy
->id
;
8572 if (return_route_rule_id
) {
8573 *return_route_rule_id
= route_rule_id
;
8576 lck_rw_done(&necp_kernel_policy_lock
);
8578 if (necp_debug
> 1 && matched_policy
->id
!= inp
->inp_policyresult
.policy_id
) {
8579 NECPLOG(LOG_DEBUG
, "Socket Send/Recv Policy: Policy %d Allowed %d", return_policy_id
? *return_policy_id
: 0, allowed_to_receive
);
8582 } else if (necp_drop_all_order
> 0) {
8583 allowed_to_receive
= FALSE
;
8585 if (return_policy_id
) {
8586 *return_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8588 if (return_route_rule_id
) {
8589 *return_route_rule_id
= route_rule_id
;
8593 lck_rw_done(&necp_kernel_policy_lock
);
8596 if (!allowed_to_receive
&& interface_type_denied
!= IFRTYPE_FUNCTIONAL_UNKNOWN
) {
8597 soevent(inp
->inp_socket
, (SO_FILT_HINT_LOCKED
| SO_FILT_HINT_IFDENIED
));
8600 return (allowed_to_receive
);
8604 necp_socket_is_allowed_to_send_recv_v4(struct inpcb
*inp
, u_int16_t local_port
, u_int16_t remote_port
, struct in_addr
*local_addr
, struct in_addr
*remote_addr
, ifnet_t interface
, necp_kernel_policy_id
*return_policy_id
, u_int32_t
*return_route_rule_id
)
8606 struct sockaddr_in local
;
8607 struct sockaddr_in remote
;
8608 local
.sin_family
= remote
.sin_family
= AF_INET
;
8609 local
.sin_len
= remote
.sin_len
= sizeof(struct sockaddr_in
);
8610 local
.sin_port
= local_port
;
8611 remote
.sin_port
= remote_port
;
8612 memcpy(&local
.sin_addr
, local_addr
, sizeof(local
.sin_addr
));
8613 memcpy(&remote
.sin_addr
, remote_addr
, sizeof(remote
.sin_addr
));
8615 return (necp_socket_is_allowed_to_send_recv_internal(inp
, (struct sockaddr
*)&local
, (struct sockaddr
*)&remote
, interface
, return_policy_id
, return_route_rule_id
));
8619 necp_socket_is_allowed_to_send_recv_v6(struct inpcb
*inp
, u_int16_t local_port
, u_int16_t remote_port
, struct in6_addr
*local_addr
, struct in6_addr
*remote_addr
, ifnet_t interface
, necp_kernel_policy_id
*return_policy_id
, u_int32_t
*return_route_rule_id
)
8621 struct sockaddr_in6 local
;
8622 struct sockaddr_in6 remote
;
8623 local
.sin6_family
= remote
.sin6_family
= AF_INET6
;
8624 local
.sin6_len
= remote
.sin6_len
= sizeof(struct sockaddr_in6
);
8625 local
.sin6_port
= local_port
;
8626 remote
.sin6_port
= remote_port
;
8627 memcpy(&local
.sin6_addr
, local_addr
, sizeof(local
.sin6_addr
));
8628 memcpy(&remote
.sin6_addr
, remote_addr
, sizeof(remote
.sin6_addr
));
8630 return (necp_socket_is_allowed_to_send_recv_internal(inp
, (struct sockaddr
*)&local
, (struct sockaddr
*)&remote
, interface
, return_policy_id
, return_route_rule_id
));
8634 necp_socket_is_allowed_to_send_recv(struct inpcb
*inp
, necp_kernel_policy_id
*return_policy_id
, u_int32_t
*return_route_rule_id
)
8636 return (necp_socket_is_allowed_to_send_recv_internal(inp
, NULL
, NULL
, NULL
, return_policy_id
, return_route_rule_id
));
8640 necp_mark_packet_from_socket(struct mbuf
*packet
, struct inpcb
*inp
, necp_kernel_policy_id policy_id
, u_int32_t route_rule_id
)
8642 if (packet
== NULL
|| inp
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
8646 // Mark ID for Pass and IP Tunnel
8647 if (policy_id
!= NECP_KERNEL_POLICY_ID_NONE
) {
8648 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= policy_id
;
8649 } else if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_PASS
||
8650 inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
) {
8651 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= inp
->inp_policyresult
.policy_id
;
8653 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
8655 packet
->m_pkthdr
.necp_mtag
.necp_last_interface_index
= 0;
8656 if (route_rule_id
!= 0) {
8657 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
= route_rule_id
;
8659 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
= inp
->inp_policyresult
.results
.route_rule_id
;
8661 packet
->m_pkthdr
.necp_mtag
.necp_app_id
= inp
->inp_policyresult
.app_id
;
8667 necp_mark_packet_from_ip(struct mbuf
*packet
, necp_kernel_policy_id policy_id
)
8669 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
8673 // Mark ID for Pass and IP Tunnel
8674 if (policy_id
!= NECP_KERNEL_POLICY_ID_NONE
) {
8675 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= policy_id
;
8677 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
8684 necp_mark_packet_from_interface(struct mbuf
*packet
, ifnet_t interface
)
8686 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
8690 // Mark ID for Pass and IP Tunnel
8691 if (interface
!= NULL
) {
8692 packet
->m_pkthdr
.necp_mtag
.necp_last_interface_index
= interface
->if_index
;
8699 necp_mark_packet_as_keepalive(struct mbuf
*packet
, bool is_keepalive
)
8701 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
8706 packet
->m_pkthdr
.pkt_flags
|= PKTF_KEEPALIVE
;
8708 packet
->m_pkthdr
.pkt_flags
&= ~PKTF_KEEPALIVE
;
8714 necp_kernel_policy_id
8715 necp_get_policy_id_from_packet(struct mbuf
*packet
)
8717 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
8718 return (NECP_KERNEL_POLICY_ID_NONE
);
8721 return (packet
->m_pkthdr
.necp_mtag
.necp_policy_id
);
8725 necp_get_last_interface_index_from_packet(struct mbuf
*packet
)
8727 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
8731 return (packet
->m_pkthdr
.necp_mtag
.necp_last_interface_index
);
8735 necp_get_route_rule_id_from_packet(struct mbuf
*packet
)
8737 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
8741 return (packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
);
8745 necp_get_app_uuid_from_packet(struct mbuf
*packet
,
8748 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
8752 bool found_mapping
= FALSE
;
8753 if (packet
->m_pkthdr
.necp_mtag
.necp_app_id
!= 0) {
8754 lck_rw_lock_shared(&necp_kernel_policy_lock
);
8755 struct necp_uuid_id_mapping
*entry
= necp_uuid_lookup_uuid_with_app_id_locked(packet
->m_pkthdr
.necp_mtag
.necp_app_id
);
8756 if (entry
!= NULL
) {
8757 uuid_copy(app_uuid
, entry
->uuid
);
8758 found_mapping
= true;
8760 lck_rw_done(&necp_kernel_policy_lock
);
8762 if (!found_mapping
) {
8763 uuid_clear(app_uuid
);
8769 necp_get_is_keepalive_from_packet(struct mbuf
*packet
)
8771 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
8775 return (packet
->m_pkthdr
.pkt_flags
& PKTF_KEEPALIVE
);
8779 necp_socket_get_content_filter_control_unit(struct socket
*so
)
8781 struct inpcb
*inp
= sotoinpcb(so
);
8786 return (inp
->inp_policyresult
.results
.filter_control_unit
);
8790 necp_socket_should_use_flow_divert(struct inpcb
*inp
)
8796 return (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
);
8800 necp_socket_get_flow_divert_control_unit(struct inpcb
*inp
)
8806 if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
) {
8807 return (inp
->inp_policyresult
.results
.result_parameter
.flow_divert_control_unit
);
8814 necp_socket_should_rescope(struct inpcb
*inp
)
8820 return (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED
);
8824 necp_socket_get_rescope_if_index(struct inpcb
*inp
)
8830 if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED
) {
8831 return (inp
->inp_policyresult
.results
.result_parameter
.scoped_interface_index
);
8838 necp_socket_get_effective_mtu(struct inpcb
*inp
, u_int32_t current_mtu
)
8841 return (current_mtu
);
8844 if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&&
8845 (inp
->inp_flags
& INP_BOUND_IF
) &&
8846 inp
->inp_boundifp
) {
8848 u_int bound_interface_index
= inp
->inp_boundifp
->if_index
;
8849 u_int tunnel_interface_index
= inp
->inp_policyresult
.results
.result_parameter
.tunnel_interface_index
;
8851 // The result is IP Tunnel, and is rescoping from one interface to another. Recalculate MTU.
8852 if (bound_interface_index
!= tunnel_interface_index
) {
8853 ifnet_t tunnel_interface
= NULL
;
8855 ifnet_head_lock_shared();
8856 tunnel_interface
= ifindex2ifnet
[tunnel_interface_index
];
8859 if (tunnel_interface
!= NULL
) {
8860 u_int32_t direct_tunnel_mtu
= tunnel_interface
->if_mtu
;
8861 u_int32_t delegate_tunnel_mtu
= (tunnel_interface
->if_delegated
.ifp
!= NULL
) ? tunnel_interface
->if_delegated
.ifp
->if_mtu
: 0;
8862 if (delegate_tunnel_mtu
!= 0 &&
8863 strncmp(tunnel_interface
->if_name
, "ipsec", strlen("ipsec")) == 0) {
8864 // For ipsec interfaces, calculate the overhead from the delegate interface
8865 u_int32_t tunnel_overhead
= (u_int32_t
)(esp_hdrsiz(NULL
) + sizeof(struct ip6_hdr
));
8866 if (delegate_tunnel_mtu
> tunnel_overhead
) {
8867 delegate_tunnel_mtu
-= tunnel_overhead
;
8870 if (delegate_tunnel_mtu
< direct_tunnel_mtu
) {
8871 // If the (delegate - overhead) < direct, return (delegate - overhead)
8872 return (delegate_tunnel_mtu
);
8874 // Otherwise return direct
8875 return (direct_tunnel_mtu
);
8878 // For non-ipsec interfaces, just return the tunnel MTU
8879 return (direct_tunnel_mtu
);
8885 // By default, just return the MTU passed in
8886 return (current_mtu
);
8890 necp_get_ifnet_from_result_parameter(necp_kernel_policy_result_parameter
*result_parameter
)
8892 if (result_parameter
== NULL
) {
8896 return (ifindex2ifnet
[result_parameter
->tunnel_interface_index
]);
8900 necp_packet_can_rebind_to_ifnet(struct mbuf
*packet
, struct ifnet
*interface
, struct route
*new_route
, int family
)
8902 bool found_match
= FALSE
;
8904 ifaddr_t
*addresses
= NULL
;
8905 union necp_sockaddr_union address_storage
;
8908 if (packet
== NULL
|| interface
== NULL
|| new_route
== NULL
|| (family
!= AF_INET
&& family
!= AF_INET6
)) {
8912 result
= ifnet_get_address_list_family(interface
, &addresses
, family
);
8914 NECPLOG(LOG_ERR
, "Failed to get address list for %s%d", ifnet_name(interface
), ifnet_unit(interface
));
8918 for (i
= 0; addresses
[i
] != NULL
; i
++) {
8919 ROUTE_RELEASE(new_route
);
8920 if (ifaddr_address(addresses
[i
], &address_storage
.sa
, sizeof(address_storage
)) == 0) {
8921 if (family
== AF_INET
) {
8922 struct ip
*ip
= mtod(packet
, struct ip
*);
8923 if (memcmp(&address_storage
.sin
.sin_addr
, &ip
->ip_src
, sizeof(ip
->ip_src
)) == 0) {
8924 struct sockaddr_in
*dst4
= (struct sockaddr_in
*)(void *)&new_route
->ro_dst
;
8925 dst4
->sin_family
= AF_INET
;
8926 dst4
->sin_len
= sizeof(struct sockaddr_in
);
8927 dst4
->sin_addr
= ip
->ip_dst
;
8928 rtalloc_scoped(new_route
, interface
->if_index
);
8929 if (!ROUTE_UNUSABLE(new_route
)) {
8934 } else if (family
== AF_INET6
) {
8935 struct ip6_hdr
*ip6
= mtod(packet
, struct ip6_hdr
*);
8936 if (memcmp(&address_storage
.sin6
.sin6_addr
, &ip6
->ip6_src
, sizeof(ip6
->ip6_src
)) == 0) {
8937 struct sockaddr_in6
*dst6
= (struct sockaddr_in6
*)(void *)&new_route
->ro_dst
;
8938 dst6
->sin6_family
= AF_INET6
;
8939 dst6
->sin6_len
= sizeof(struct sockaddr_in6
);
8940 dst6
->sin6_addr
= ip6
->ip6_dst
;
8941 rtalloc_scoped(new_route
, interface
->if_index
);
8942 if (!ROUTE_UNUSABLE(new_route
)) {
8952 ifnet_free_address_list(addresses
);
8954 return (found_match
);
8958 necp_addr_is_loopback(struct sockaddr
*address
)
8960 if (address
== NULL
) {
8964 if (address
->sa_family
== AF_INET
) {
8965 return (ntohl(((struct sockaddr_in
*)(void *)address
)->sin_addr
.s_addr
) == INADDR_LOOPBACK
);
8966 } else if (address
->sa_family
== AF_INET6
) {
8967 return IN6_IS_ADDR_LOOPBACK(&((struct sockaddr_in6
*)(void *)address
)->sin6_addr
);
8974 necp_is_loopback(struct sockaddr
*local_addr
, struct sockaddr
*remote_addr
, struct inpcb
*inp
, struct mbuf
*packet
)
8976 // Note: This function only checks for the loopback addresses.
8977 // In the future, we may want to expand to also allow any traffic
8978 // going through the loopback interface, but until then, this
8979 // check is cheaper.
8981 if (local_addr
!= NULL
&& necp_addr_is_loopback(local_addr
)) {
8985 if (remote_addr
!= NULL
&& necp_addr_is_loopback(remote_addr
)) {
8990 if ((inp
->inp_flags
& INP_BOUND_IF
) && inp
->inp_boundifp
&& (inp
->inp_boundifp
->if_flags
& IFF_LOOPBACK
)) {
8993 if (inp
->inp_vflag
& INP_IPV4
) {
8994 if (ntohl(inp
->inp_laddr
.s_addr
) == INADDR_LOOPBACK
||
8995 ntohl(inp
->inp_faddr
.s_addr
) == INADDR_LOOPBACK
) {
8998 } else if (inp
->inp_vflag
& INP_IPV6
) {
8999 if (IN6_IS_ADDR_LOOPBACK(&inp
->in6p_laddr
) ||
9000 IN6_IS_ADDR_LOOPBACK(&inp
->in6p_faddr
)) {
9006 if (packet
!= NULL
) {
9007 struct ip
*ip
= mtod(packet
, struct ip
*);
9008 if (ip
->ip_v
== 4) {
9009 if (ntohl(ip
->ip_src
.s_addr
) == INADDR_LOOPBACK
) {
9012 if (ntohl(ip
->ip_dst
.s_addr
) == INADDR_LOOPBACK
) {
9015 } else if (ip
->ip_v
== 6) {
9016 struct ip6_hdr
*ip6
= mtod(packet
, struct ip6_hdr
*);
9017 if (IN6_IS_ADDR_LOOPBACK(&ip6
->ip6_src
)) {
9020 if (IN6_IS_ADDR_LOOPBACK(&ip6
->ip6_dst
)) {
9030 necp_is_intcoproc(struct inpcb
*inp
, struct mbuf
*packet
)
9034 return (sflt_permission_check(inp
) ? true : false);
9036 if (packet
!= NULL
) {
9037 struct ip6_hdr
*ip6
= mtod(packet
, struct ip6_hdr
*);
9038 if ((ip6
->ip6_vfc
& IPV6_VERSION_MASK
) == IPV6_VERSION
&&
9039 IN6_IS_ADDR_LINKLOCAL(&ip6
->ip6_dst
) &&
9040 ip6
->ip6_dst
.s6_addr32
[2] == ntohl(0xaede48ff) &&
9041 ip6
->ip6_dst
.s6_addr32
[3] == ntohl(0xfe334455)) {