2 * Copyright (c) 2013-2018 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <sys/systm.h>
31 #include <sys/types.h>
32 #include <sys/queue.h>
33 #include <sys/malloc.h>
34 #include <libkern/OSMalloc.h>
35 #include <sys/kernel.h>
36 #include <sys/kern_control.h>
38 #include <sys/kpi_mbuf.h>
39 #include <sys/proc_uuid_policy.h>
41 #include <sys/domain.h>
42 #include <sys/protosw.h>
43 #include <sys/socket.h>
44 #include <sys/socketvar.h>
45 #include <sys/coalition.h>
46 #include <netinet/ip.h>
47 #include <netinet/ip6.h>
48 #include <netinet/tcp.h>
49 #include <netinet/tcp_var.h>
50 #include <netinet/tcp_cache.h>
51 #include <netinet/udp.h>
52 #include <netinet/in_pcb.h>
53 #include <netinet/in_tclass.h>
54 #include <netinet6/esp.h>
55 #include <net/flowhash.h>
56 #include <net/if_var.h>
57 #include <sys/kauth.h>
58 #include <sys/sysctl.h>
59 #include <sys/sysproto.h>
61 #include <sys/kern_event.h>
62 #include <sys/file_internal.h>
63 #include <IOKit/IOBSD.h>
64 #include <net/network_agent.h>
68 * NECP - Network Extension Control Policy database
69 * ------------------------------------------------
70 * The goal of this module is to allow clients connecting via a
71 * kernel control socket to create high-level policy sessions, which
72 * are ingested into low-level kernel policies that control and tag
73 * traffic at the application, socket, and IP layers.
75 * ------------------------------------------------
77 * ------------------------------------------------
78 * Each session owns a list of session policies, each of which can
79 * specify any combination of conditions and a single result. Each
80 * session also has a priority level (such as High, Default, or Low)
81 * which is requested by the client. Based on the requested level,
82 * a session order value is assigned to the session, which will be used
83 * to sort kernel policies generated by the session. The session client
84 * can specify the sub-order for each policy it creates which will be
85 * used to further sort the kernel policies.
87 * Kernel Control Socket --> 1 necp_session --> list of necp_session_policy structs
89 * ------------------------------------------------
91 * ------------------------------------------------
92 * Whenever a session send the Apply command, its policies are ingested
93 * and generate kernel policies. There are two phases of kernel policy
96 * 1. The session policy is parsed to create kernel policies at the socket
97 * and IP layers, when applicable. For example, a policy that requires
98 * all traffic from App1 to Pass will generate a socket kernel policy to
99 * match App1 and mark packets with ID1, and also an IP policy to match
100 * ID1 and let the packet pass. This is handled in necp_apply_policy. The
101 * resulting kernel policies are added to the global socket and IP layer
103 * necp_session_policy --> necp_kernel_socket_policy and necp_kernel_ip_output_policy
106 * necp_kernel_socket_policies necp_kernel_ip_output_policies
108 * 2. Once the global lists of kernel policies have been filled out, each
109 * list is traversed to create optimized sub-lists ("Maps") which are used during
110 * data-path evaluation. IP policies are sent into necp_kernel_ip_output_policies_map,
111 * which hashes incoming packets based on marked socket-layer policies, and removes
112 * duplicate or overlapping policies. Socket policies are sent into two maps,
113 * necp_kernel_socket_policies_map and necp_kernel_socket_policies_app_layer_map.
114 * The app layer map is used for policy checks coming in from user space, and is one
115 * list with duplicate and overlapping policies removed. The socket map hashes based
116 * on app UUID, and removes duplicate and overlapping policies.
117 * necp_kernel_socket_policy --> necp_kernel_socket_policies_app_layer_map
118 * |-> necp_kernel_socket_policies_map
120 * necp_kernel_ip_output_policies --> necp_kernel_ip_output_policies_map
122 * ------------------------------------------------
124 * ------------------------------------------------
125 * The Drop All Level is a sysctl that controls the level at which policies are allowed
126 * to override a global drop rule. If the value is 0, no drop rule is applied. If the value
127 * is 1, all traffic is dropped. If the value is greater than 1, all kernel policies created
128 * by a session with a priority level better than (numerically less than) the
129 * Drop All Level will allow matching traffic to not be dropped. The Drop All Level is
130 * dynamically interpreted into necp_drop_all_order, which specifies the equivalent assigned
131 * session orders to be dropped.
134 u_int32_t necp_drop_all_order
= 0;
135 u_int32_t necp_drop_all_level
= 0;
137 u_int32_t necp_pass_loopback
= 1; // 0=Off, 1=On
138 u_int32_t necp_pass_keepalives
= 1; // 0=Off, 1=On
140 u_int32_t necp_debug
= 0; // 0=None, 1=Basic, 2=EveryMatch
142 u_int32_t necp_session_count
= 0;
144 #define LIST_INSERT_SORTED_ASCENDING(head, elm, field, sortfield, tmpelm) do { \
145 if (LIST_EMPTY((head)) || (LIST_FIRST(head)->sortfield >= (elm)->sortfield)) { \
146 LIST_INSERT_HEAD((head), elm, field); \
148 LIST_FOREACH(tmpelm, head, field) { \
149 if (LIST_NEXT(tmpelm, field) == NULL || LIST_NEXT(tmpelm, field)->sortfield >= (elm)->sortfield) { \
150 LIST_INSERT_AFTER(tmpelm, elm, field); \
157 #define LIST_INSERT_SORTED_TWICE_ASCENDING(head, elm, field, firstsortfield, secondsortfield, tmpelm) do { \
158 if (LIST_EMPTY((head)) || (LIST_FIRST(head)->firstsortfield > (elm)->firstsortfield) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield >= (elm)->secondsortfield))) { \
159 LIST_INSERT_HEAD((head), elm, field); \
161 LIST_FOREACH(tmpelm, head, field) { \
162 if (LIST_NEXT(tmpelm, field) == NULL || (LIST_NEXT(tmpelm, field)->firstsortfield > (elm)->firstsortfield) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield >= (elm)->secondsortfield))) { \
163 LIST_INSERT_AFTER(tmpelm, elm, field); \
170 #define LIST_INSERT_SORTED_THRICE_ASCENDING(head, elm, field, firstsortfield, secondsortfield, thirdsortfield, tmpelm) do { \
171 if (LIST_EMPTY((head)) || (LIST_FIRST(head)->firstsortfield > (elm)->firstsortfield) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield >= (elm)->secondsortfield)) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield == (elm)->secondsortfield) && (LIST_FIRST(head)->thirdsortfield >= (elm)->thirdsortfield))) { \
172 LIST_INSERT_HEAD((head), elm, field); \
174 LIST_FOREACH(tmpelm, head, field) { \
175 if (LIST_NEXT(tmpelm, field) == NULL || (LIST_NEXT(tmpelm, field)->firstsortfield > (elm)->firstsortfield) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield >= (elm)->secondsortfield)) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield == (elm)->secondsortfield) && (LIST_NEXT(tmpelm, field)->thirdsortfield >= (elm)->thirdsortfield))) { \
176 LIST_INSERT_AFTER(tmpelm, elm, field); \
183 #define IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(x) ((x) == NECP_ROUTE_RULE_DENY_INTERFACE || (x) == NECP_ROUTE_RULE_ALLOW_INTERFACE)
185 #define NECP_KERNEL_CONDITION_ALL_INTERFACES 0x000001
186 #define NECP_KERNEL_CONDITION_BOUND_INTERFACE 0x000002
187 #define NECP_KERNEL_CONDITION_PROTOCOL 0x000004
188 #define NECP_KERNEL_CONDITION_LOCAL_START 0x000008
189 #define NECP_KERNEL_CONDITION_LOCAL_END 0x000010
190 #define NECP_KERNEL_CONDITION_LOCAL_PREFIX 0x000020
191 #define NECP_KERNEL_CONDITION_REMOTE_START 0x000040
192 #define NECP_KERNEL_CONDITION_REMOTE_END 0x000080
193 #define NECP_KERNEL_CONDITION_REMOTE_PREFIX 0x000100
194 #define NECP_KERNEL_CONDITION_APP_ID 0x000200
195 #define NECP_KERNEL_CONDITION_REAL_APP_ID 0x000400
196 #define NECP_KERNEL_CONDITION_DOMAIN 0x000800
197 #define NECP_KERNEL_CONDITION_ACCOUNT_ID 0x001000
198 #define NECP_KERNEL_CONDITION_POLICY_ID 0x002000
199 #define NECP_KERNEL_CONDITION_PID 0x004000
200 #define NECP_KERNEL_CONDITION_UID 0x008000
201 #define NECP_KERNEL_CONDITION_LAST_INTERFACE 0x010000 // Only set from packets looping between interfaces
202 #define NECP_KERNEL_CONDITION_TRAFFIC_CLASS 0x020000
203 #define NECP_KERNEL_CONDITION_ENTITLEMENT 0x040000
204 #define NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT 0x080000
205 #define NECP_KERNEL_CONDITION_AGENT_TYPE 0x100000
207 #define NECP_MAX_POLICY_RESULT_SIZE 512
208 #define NECP_MAX_ROUTE_RULES_ARRAY_SIZE 1024
209 #define NECP_MAX_CONDITIONS_ARRAY_SIZE 4096
210 #define NECP_MAX_POLICY_LIST_COUNT 1024
212 // Cap the policy size at the max result + conditions size, with room for extra TLVs
213 #define NECP_MAX_POLICY_SIZE (1024 + NECP_MAX_POLICY_RESULT_SIZE + NECP_MAX_CONDITIONS_ARRAY_SIZE)
215 struct necp_service_registration
{
216 LIST_ENTRY(necp_service_registration
) session_chain
;
217 LIST_ENTRY(necp_service_registration
) kernel_chain
;
218 u_int32_t service_id
;
221 struct necp_session
{
222 u_int8_t necp_fd_type
;
223 u_int32_t control_unit
;
224 u_int32_t session_priority
; // Descriptive priority rating
225 u_int32_t session_order
;
227 necp_policy_id last_policy_id
;
229 decl_lck_mtx_data(, lock
);
231 bool proc_locked
; // Messages must come from proc_uuid
236 LIST_HEAD(_policies
, necp_session_policy
) policies
;
238 LIST_HEAD(_services
, necp_service_registration
) services
;
240 TAILQ_ENTRY(necp_session
) chain
;
243 #define NECP_SESSION_LOCK(_s) lck_mtx_lock(&_s->lock)
244 #define NECP_SESSION_UNLOCK(_s) lck_mtx_unlock(&_s->lock)
246 static TAILQ_HEAD(_necp_session_list
, necp_session
) necp_session_list
;
248 struct necp_socket_info
{
251 union necp_sockaddr_union local_addr
;
252 union necp_sockaddr_union remote_addr
;
253 u_int32_t bound_interface_index
;
254 u_int32_t traffic_class
;
256 u_int32_t application_id
;
257 u_int32_t real_application_id
;
258 u_int32_t account_id
;
263 static kern_ctl_ref necp_kctlref
;
264 static u_int32_t necp_family
;
265 static OSMallocTag necp_malloc_tag
;
266 static lck_grp_attr_t
*necp_kernel_policy_grp_attr
= NULL
;
267 static lck_attr_t
*necp_kernel_policy_mtx_attr
= NULL
;
268 static lck_grp_t
*necp_kernel_policy_mtx_grp
= NULL
;
269 decl_lck_rw_data(static, necp_kernel_policy_lock
);
271 static lck_grp_attr_t
*necp_route_rule_grp_attr
= NULL
;
272 static lck_attr_t
*necp_route_rule_mtx_attr
= NULL
;
273 static lck_grp_t
*necp_route_rule_mtx_grp
= NULL
;
274 decl_lck_rw_data(static, necp_route_rule_lock
);
277 * On modification, invalidate cached lookups by bumping the generation count.
278 * Other calls will need to take the slowpath of taking
279 * the subsystem lock.
281 static volatile int32_t necp_kernel_socket_policies_gencount
;
282 #define BUMP_KERNEL_SOCKET_POLICIES_GENERATION_COUNT() do { \
283 if (OSIncrementAtomic(&necp_kernel_socket_policies_gencount) == (INT32_MAX - 1)) { \
284 necp_kernel_socket_policies_gencount = 1; \
288 static u_int32_t necp_kernel_application_policies_condition_mask
;
289 static size_t necp_kernel_application_policies_count
;
290 static u_int32_t necp_kernel_socket_policies_condition_mask
;
291 static size_t necp_kernel_socket_policies_count
;
292 static size_t necp_kernel_socket_policies_non_app_count
;
293 static LIST_HEAD(_necpkernelsocketconnectpolicies
, necp_kernel_socket_policy
) necp_kernel_socket_policies
;
294 #define NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS 5
295 #define NECP_SOCKET_MAP_APP_ID_TO_BUCKET(appid) (appid ? (appid%(NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS - 1) + 1) : 0)
296 static struct necp_kernel_socket_policy
**necp_kernel_socket_policies_map
[NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
];
297 static struct necp_kernel_socket_policy
**necp_kernel_socket_policies_app_layer_map
;
299 * A note on policy 'maps': these are used for boosting efficiency when matching policies. For each dimension of the map,
300 * such as an ID, the 0 bucket is reserved for sockets/packets that do not have this parameter, while the other
301 * buckets lead to an array of policy pointers that form the list applicable when the (parameter%(NUM_BUCKETS - 1) + 1) == bucket_index.
303 * For example, a packet with policy ID of 7, when there are 4 ID buckets, will map to bucket (7%3 + 1) = 2.
306 static u_int32_t necp_kernel_ip_output_policies_condition_mask
;
307 static size_t necp_kernel_ip_output_policies_count
;
308 static size_t necp_kernel_ip_output_policies_non_id_count
;
309 static LIST_HEAD(_necpkernelipoutputpolicies
, necp_kernel_ip_output_policy
) necp_kernel_ip_output_policies
;
310 #define NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS 5
311 #define NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(id) (id ? (id%(NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS - 1) + 1) : 0)
312 static struct necp_kernel_ip_output_policy
**necp_kernel_ip_output_policies_map
[NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
];
314 static struct necp_session
*necp_create_session(void);
315 static void necp_delete_session(struct necp_session
*session
);
317 static necp_policy_id
necp_handle_policy_add(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
,
318 u_int8_t
*tlv_buffer
, size_t tlv_buffer_length
, int offset
, int *error
);
319 static void necp_handle_policy_get(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
320 static void necp_handle_policy_delete(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
321 static void necp_handle_policy_apply_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
322 static void necp_handle_policy_list_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
323 static void necp_handle_policy_delete_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
324 static int necp_handle_policy_dump_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
,
325 user_addr_t out_buffer
, size_t out_buffer_length
, int offset
);
326 static void necp_handle_set_session_priority(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
327 static void necp_handle_lock_session_to_proc(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
328 static void necp_handle_register_service(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
329 static void necp_handle_unregister_service(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
331 #define MAX_RESULT_STRING_LEN 64
332 static inline const char * necp_get_result_description(char *result_string
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
);
334 static struct necp_session_policy
*necp_policy_create(struct necp_session
*session
, necp_policy_order order
, u_int8_t
*conditions_array
, u_int32_t conditions_array_size
, u_int8_t
*route_rules_array
, u_int32_t route_rules_array_size
, u_int8_t
*result
, u_int32_t result_size
);
335 static struct necp_session_policy
*necp_policy_find(struct necp_session
*session
, necp_policy_id policy_id
);
336 static bool necp_policy_mark_for_deletion(struct necp_session
*session
, struct necp_session_policy
*policy
);
337 static bool necp_policy_mark_all_for_deletion(struct necp_session
*session
);
338 static bool necp_policy_delete(struct necp_session
*session
, struct necp_session_policy
*policy
);
339 static void necp_policy_apply_all(struct necp_session
*session
);
341 static necp_kernel_policy_id
necp_kernel_socket_policy_add(necp_policy_order order
, u_int32_t session_order
, int session_pid
, u_int32_t condition_mask
, u_int32_t condition_negated_mask
, necp_app_id cond_app_id
, necp_app_id cond_real_app_id
, char *cond_custom_entitlement
, u_int32_t cond_account_id
, char *domain
, pid_t cond_pid
, uid_t cond_uid
, ifnet_t cond_bound_interface
, struct necp_policy_condition_tc_range cond_traffic_class
, u_int16_t cond_protocol
, union necp_sockaddr_union
*cond_local_start
, union necp_sockaddr_union
*cond_local_end
, u_int8_t cond_local_prefix
, union necp_sockaddr_union
*cond_remote_start
, union necp_sockaddr_union
*cond_remote_end
, u_int8_t cond_remote_prefix
, struct necp_policy_condition_agent_type
*cond_agent_type
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
);
342 static bool necp_kernel_socket_policy_delete(necp_kernel_policy_id policy_id
);
343 static bool necp_kernel_socket_policies_reprocess(void);
344 static bool necp_kernel_socket_policies_update_uuid_table(void);
345 static inline struct necp_kernel_socket_policy
*necp_socket_find_policy_match_with_info_locked(struct necp_kernel_socket_policy
**policy_search_array
, struct necp_socket_info
*info
, necp_kernel_policy_filter
*return_filter
, u_int32_t
*return_route_rule_id
, necp_kernel_policy_result
*return_service_action
, necp_kernel_policy_service
*return_service
, u_int32_t
*return_netagent_array
, u_int32_t
*return_netagent_use_flags_array
, size_t netagent_array_count
, struct necp_client_parameter_netagent_type
*required_agent_types
, u_int32_t num_required_agent_types
, proc_t proc
, necp_kernel_policy_id
*skip_policy_id
);
347 static necp_kernel_policy_id
necp_kernel_ip_output_policy_add(necp_policy_order order
, necp_policy_order suborder
, u_int32_t session_order
, int session_pid
, u_int32_t condition_mask
, u_int32_t condition_negated_mask
, necp_kernel_policy_id cond_policy_id
, ifnet_t cond_bound_interface
, u_int32_t cond_last_interface_index
, u_int16_t cond_protocol
, union necp_sockaddr_union
*cond_local_start
, union necp_sockaddr_union
*cond_local_end
, u_int8_t cond_local_prefix
, union necp_sockaddr_union
*cond_remote_start
, union necp_sockaddr_union
*cond_remote_end
, u_int8_t cond_remote_prefix
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
);
348 static bool necp_kernel_ip_output_policy_delete(necp_kernel_policy_id policy_id
);
349 static bool necp_kernel_ip_output_policies_reprocess(void);
351 static bool necp_is_addr_in_range(struct sockaddr
*addr
, struct sockaddr
*range_start
, struct sockaddr
*range_end
);
352 static bool necp_is_range_in_range(struct sockaddr
*inner_range_start
, struct sockaddr
*inner_range_end
, struct sockaddr
*range_start
, struct sockaddr
*range_end
);
353 static bool necp_is_addr_in_subnet(struct sockaddr
*addr
, struct sockaddr
*subnet_addr
, u_int8_t subnet_prefix
);
354 static int necp_addr_compare(struct sockaddr
*sa1
, struct sockaddr
*sa2
, int check_port
);
355 static bool necp_buffer_compare_with_bit_prefix(u_int8_t
*p1
, u_int8_t
*p2
, u_int32_t bits
);
356 static bool necp_is_loopback(struct sockaddr
*local_addr
, struct sockaddr
*remote_addr
, struct inpcb
*inp
, struct mbuf
*packet
);
357 static bool necp_is_intcoproc(struct inpcb
*inp
, struct mbuf
*packet
);
359 struct necp_uuid_id_mapping
{
360 LIST_ENTRY(necp_uuid_id_mapping
) chain
;
364 u_int32_t table_refcount
; // Add to UUID policy table count
366 static size_t necp_num_uuid_app_id_mappings
;
367 static bool necp_uuid_app_id_mappings_dirty
;
368 #define NECP_UUID_APP_ID_HASH_SIZE 64
369 static u_long necp_uuid_app_id_hash_mask
;
370 static u_long necp_uuid_app_id_hash_num_buckets
;
371 static LIST_HEAD(necp_uuid_id_mapping_head
, necp_uuid_id_mapping
) *necp_uuid_app_id_hashtbl
, necp_uuid_service_id_list
; // App map is real hash table, service map is just mapping
372 #define APPUUIDHASH(uuid) (&necp_uuid_app_id_hashtbl[uuid[0] & necp_uuid_app_id_hash_mask]) // Assume first byte of UUIDs are evenly distributed
373 static u_int32_t
necp_create_uuid_app_id_mapping(uuid_t uuid
, bool *allocated_mapping
, bool uuid_policy_table
);
374 static bool necp_remove_uuid_app_id_mapping(uuid_t uuid
, bool *removed_mapping
, bool uuid_policy_table
);
375 static struct necp_uuid_id_mapping
*necp_uuid_lookup_uuid_with_app_id_locked(u_int32_t local_id
);
377 static struct necp_uuid_id_mapping
*necp_uuid_lookup_service_id_locked(uuid_t uuid
);
378 static struct necp_uuid_id_mapping
*necp_uuid_lookup_uuid_with_service_id_locked(u_int32_t local_id
);
379 static u_int32_t
necp_create_uuid_service_id_mapping(uuid_t uuid
);
380 static bool necp_remove_uuid_service_id_mapping(uuid_t uuid
);
382 struct necp_string_id_mapping
{
383 LIST_ENTRY(necp_string_id_mapping
) chain
;
388 static LIST_HEAD(necp_string_id_mapping_list
, necp_string_id_mapping
) necp_account_id_list
;
389 static u_int32_t
necp_create_string_to_id_mapping(struct necp_string_id_mapping_list
*list
, char *domain
);
390 static bool necp_remove_string_to_id_mapping(struct necp_string_id_mapping_list
*list
, char *domain
);
391 static struct necp_string_id_mapping
*necp_lookup_string_with_id_locked(struct necp_string_id_mapping_list
*list
, u_int32_t local_id
);
393 static struct necp_kernel_socket_policy
*necp_kernel_socket_policy_find(necp_kernel_policy_id policy_id
);
394 static struct necp_kernel_ip_output_policy
*necp_kernel_ip_output_policy_find(necp_kernel_policy_id policy_id
);
396 static LIST_HEAD(_necp_kernel_service_list
, necp_service_registration
) necp_registered_service_list
;
398 static char *necp_create_trimmed_domain(char *string
, size_t length
);
399 static inline int necp_count_dots(char *string
, size_t length
);
401 static char *necp_copy_string(char *string
, size_t length
);
402 static bool necp_update_qos_marking(struct ifnet
*ifp
, u_int32_t route_rule_id
);
404 #define ROUTE_RULE_IS_AGGREGATE(ruleid) (ruleid > UINT16_MAX)
406 #define MAX_ROUTE_RULE_INTERFACES 10
407 struct necp_route_rule
{
408 LIST_ENTRY(necp_route_rule
) chain
;
410 u_int32_t default_action
;
411 u_int8_t cellular_action
;
412 u_int8_t wifi_action
;
413 u_int8_t wired_action
;
414 u_int8_t expensive_action
;
415 u_int exception_if_indices
[MAX_ROUTE_RULE_INTERFACES
];
416 u_int8_t exception_if_actions
[MAX_ROUTE_RULE_INTERFACES
];
419 static LIST_HEAD(necp_route_rule_list
, necp_route_rule
) necp_route_rules
;
420 static u_int32_t
necp_create_route_rule(struct necp_route_rule_list
*list
, u_int8_t
*route_rules_array
, u_int32_t route_rules_array_size
);
421 static bool necp_remove_route_rule(struct necp_route_rule_list
*list
, u_int32_t route_rule_id
);
422 static bool necp_route_is_allowed(struct rtentry
*route
, ifnet_t interface
, u_int32_t route_rule_id
, u_int32_t
*interface_type_denied
);
423 static struct necp_route_rule
*necp_lookup_route_rule_locked(struct necp_route_rule_list
*list
, u_int32_t route_rule_id
);
424 static inline void necp_get_parent_cred_result(proc_t proc
, struct necp_socket_info
*info
);
426 #define MAX_AGGREGATE_ROUTE_RULES 16
427 struct necp_aggregate_route_rule
{
428 LIST_ENTRY(necp_aggregate_route_rule
) chain
;
430 u_int32_t rule_ids
[MAX_AGGREGATE_ROUTE_RULES
];
432 static LIST_HEAD(necp_aggregate_route_rule_list
, necp_aggregate_route_rule
) necp_aggregate_route_rules
;
433 static u_int32_t
necp_create_aggregate_route_rule(u_int32_t
*rule_ids
);
435 // Sysctl definitions
436 static int sysctl_handle_necp_level SYSCTL_HANDLER_ARGS
;
438 SYSCTL_NODE(_net
, OID_AUTO
, necp
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "NECP");
439 SYSCTL_INT(_net_necp
, NECPCTL_PASS_LOOPBACK
, pass_loopback
, CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_pass_loopback
, 0, "");
440 SYSCTL_INT(_net_necp
, NECPCTL_PASS_KEEPALIVES
, pass_keepalives
, CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_pass_keepalives
, 0, "");
441 SYSCTL_INT(_net_necp
, NECPCTL_DEBUG
, debug
, CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_debug
, 0, "");
442 SYSCTL_PROC(_net_necp
, NECPCTL_DROP_ALL_LEVEL
, drop_all_level
, CTLTYPE_INT
| CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_drop_all_level
, 0, &sysctl_handle_necp_level
, "IU", "");
443 SYSCTL_LONG(_net_necp
, NECPCTL_SOCKET_POLICY_COUNT
, socket_policy_count
, CTLFLAG_LOCKED
| CTLFLAG_RD
, &necp_kernel_socket_policies_count
, "");
444 SYSCTL_LONG(_net_necp
, NECPCTL_SOCKET_NON_APP_POLICY_COUNT
, socket_non_app_policy_count
, CTLFLAG_LOCKED
| CTLFLAG_RD
, &necp_kernel_socket_policies_non_app_count
, "");
445 SYSCTL_LONG(_net_necp
, NECPCTL_IP_POLICY_COUNT
, ip_policy_count
, CTLFLAG_LOCKED
| CTLFLAG_RD
, &necp_kernel_ip_output_policies_count
, "");
446 SYSCTL_INT(_net_necp
, NECPCTL_SESSION_COUNT
, session_count
, CTLFLAG_LOCKED
| CTLFLAG_RD
, &necp_session_count
, 0, "");
448 // Session order allocation
450 necp_allocate_new_session_order(u_int32_t priority
, u_int32_t control_unit
)
452 u_int32_t new_order
= 0;
454 // For now, just allocate 1000 orders for each priority
455 if (priority
== NECP_SESSION_PRIORITY_UNKNOWN
|| priority
> NECP_SESSION_NUM_PRIORITIES
) {
456 priority
= NECP_SESSION_PRIORITY_DEFAULT
;
459 // Use the control unit to decide the offset into the priority list
460 new_order
= (control_unit
) + ((priority
- 1) * 1000);
465 static inline u_int32_t
466 necp_get_first_order_for_priority(u_int32_t priority
)
468 return (((priority
- 1) * 1000) + 1);
473 sysctl_handle_necp_level SYSCTL_HANDLER_ARGS
475 #pragma unused(arg1, arg2)
476 int error
= sysctl_handle_int(oidp
, oidp
->oid_arg1
, oidp
->oid_arg2
, req
);
477 if (necp_drop_all_level
== 0) {
478 necp_drop_all_order
= 0;
480 necp_drop_all_order
= necp_get_first_order_for_priority(necp_drop_all_level
);
487 static int noop_read(struct fileproc
*, struct uio
*, int, vfs_context_t
);
488 static int noop_write(struct fileproc
*, struct uio
*, int, vfs_context_t
);
489 static int noop_ioctl(struct fileproc
*, unsigned long, caddr_t
,
491 static int noop_select(struct fileproc
*, int, void *, vfs_context_t
);
492 static int necp_session_op_close(struct fileglob
*, vfs_context_t
);
493 static int noop_kqfilter(struct fileproc
*, struct knote
*,
494 struct kevent_internal_s
*, vfs_context_t
);
496 static const struct fileops necp_session_fd_ops
= {
497 .fo_type
= DTYPE_NETPOLICY
,
498 .fo_read
= noop_read
,
499 .fo_write
= noop_write
,
500 .fo_ioctl
= noop_ioctl
,
501 .fo_select
= noop_select
,
502 .fo_close
= necp_session_op_close
,
503 .fo_kqfilter
= noop_kqfilter
,
508 noop_read(struct fileproc
*fp
, struct uio
*uio
, int flags
, vfs_context_t ctx
)
510 #pragma unused(fp, uio, flags, ctx)
515 noop_write(struct fileproc
*fp
, struct uio
*uio
, int flags
,
518 #pragma unused(fp, uio, flags, ctx)
523 noop_ioctl(struct fileproc
*fp
, unsigned long com
, caddr_t data
,
526 #pragma unused(fp, com, data, ctx)
531 noop_select(struct fileproc
*fp
, int which
, void *wql
, vfs_context_t ctx
)
533 #pragma unused(fp, which, wql, ctx)
538 noop_kqfilter(struct fileproc
*fp
, struct knote
*kn
,
539 struct kevent_internal_s
*kev
, vfs_context_t ctx
)
541 #pragma unused(fp, kn, kev, ctx)
546 necp_session_open(struct proc
*p
, struct necp_session_open_args
*uap
, int *retval
)
550 struct necp_session
*session
= NULL
;
551 struct fileproc
*fp
= NULL
;
554 uid_t uid
= kauth_cred_getuid(proc_ucred(p
));
555 if (uid
!= 0 && priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES
, 0) != 0) {
556 NECPLOG0(LOG_ERR
, "Process does not hold necessary entitlement to open NECP session");
561 error
= falloc(p
, &fp
, &fd
, vfs_context_current());
566 session
= necp_create_session();
567 if (session
== NULL
) {
572 fp
->f_fglob
->fg_flag
= 0;
573 fp
->f_fglob
->fg_ops
= &necp_session_fd_ops
;
574 fp
->f_fglob
->fg_data
= session
;
577 FDFLAGS_SET(p
, fd
, (UF_EXCLOSE
| UF_FORKCLOSE
));
578 procfdtbl_releasefd(p
, fd
, NULL
);
579 fp_drop(p
, fd
, fp
, 1);
595 necp_session_op_close(struct fileglob
*fg
, vfs_context_t ctx
)
598 struct necp_session
*session
= (struct necp_session
*)fg
->fg_data
;
601 if (session
!= NULL
) {
602 necp_policy_mark_all_for_deletion(session
);
603 necp_policy_apply_all(session
);
604 necp_delete_session(session
);
612 necp_session_find_from_fd(int fd
, struct necp_session
**session
)
614 proc_t p
= current_proc();
615 struct fileproc
*fp
= NULL
;
619 if ((error
= fp_lookup(p
, fd
, &fp
, 1)) != 0) {
622 if (fp
->f_fglob
->fg_ops
->fo_type
!= DTYPE_NETPOLICY
) {
623 fp_drop(p
, fd
, fp
, 1);
627 *session
= (struct necp_session
*)fp
->f_fglob
->fg_data
;
629 if ((*session
)->necp_fd_type
!= necp_fd_type_session
) {
630 // Not a client fd, ignore
641 necp_session_add_policy(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
644 u_int8_t
*tlv_buffer
= NULL
;
646 if (uap
->in_buffer_length
== 0 || uap
->in_buffer_length
> NECP_MAX_POLICY_SIZE
|| uap
->in_buffer
== 0) {
647 NECPLOG(LOG_ERR
, "necp_session_add_policy invalid input (%zu)", uap
->in_buffer_length
);
652 if (uap
->out_buffer_length
< sizeof(necp_policy_id
) || uap
->out_buffer
== 0) {
653 NECPLOG(LOG_ERR
, "necp_session_add_policy invalid output buffer (%zu)", uap
->out_buffer_length
);
658 if ((tlv_buffer
= _MALLOC(uap
->in_buffer_length
, M_NECP
, M_WAITOK
| M_ZERO
)) == NULL
) {
663 error
= copyin(uap
->in_buffer
, tlv_buffer
, uap
->in_buffer_length
);
665 NECPLOG(LOG_ERR
, "necp_session_add_policy tlv copyin error (%d)", error
);
669 necp_policy_id new_policy_id
= necp_handle_policy_add(session
, 0, NULL
, tlv_buffer
, uap
->in_buffer_length
, 0, &error
);
671 NECPLOG(LOG_ERR
, "necp_session_add_policy failed to add policy (%d)", error
);
675 error
= copyout(&new_policy_id
, uap
->out_buffer
, sizeof(new_policy_id
));
677 NECPLOG(LOG_ERR
, "necp_session_add_policy policy_id copyout error (%d)", error
);
682 if (tlv_buffer
!= NULL
) {
683 FREE(tlv_buffer
, M_NECP
);
692 necp_session_get_policy(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
695 u_int8_t
*response
= NULL
;
697 if (uap
->in_buffer_length
< sizeof(necp_policy_id
) || uap
->in_buffer
== 0) {
698 NECPLOG(LOG_ERR
, "necp_session_get_policy invalid input (%zu)", uap
->in_buffer_length
);
703 necp_policy_id policy_id
= 0;
704 error
= copyin(uap
->in_buffer
, &policy_id
, sizeof(policy_id
));
706 NECPLOG(LOG_ERR
, "necp_session_get_policy policy_id copyin error (%d)", error
);
710 struct necp_session_policy
*policy
= necp_policy_find(session
, policy_id
);
711 if (policy
== NULL
|| policy
->pending_deletion
) {
712 NECPLOG(LOG_ERR
, "Failed to find policy with id %d", policy_id
);
717 u_int32_t order_tlv_size
= sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(necp_policy_order
);
718 u_int32_t result_tlv_size
= (policy
->result_size
? (sizeof(u_int8_t
) + sizeof(u_int32_t
) + policy
->result_size
) : 0);
719 u_int32_t response_size
= order_tlv_size
+ result_tlv_size
+ policy
->conditions_size
;
721 if (uap
->out_buffer_length
< response_size
|| uap
->out_buffer
== 0) {
722 NECPLOG(LOG_ERR
, "necp_session_get_policy buffer not large enough (%u < %u)", uap
->out_buffer_length
, response_size
);
727 if (response_size
> NECP_MAX_POLICY_SIZE
) {
728 NECPLOG(LOG_ERR
, "necp_session_get_policy size too large to copy (%u)", response_size
);
733 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
| M_ZERO
);
734 if (response
== NULL
) {
739 u_int8_t
*cursor
= response
;
740 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ORDER
, sizeof(necp_policy_order
), &policy
->order
, response
, response_size
);
741 if (result_tlv_size
) {
742 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_RESULT
, policy
->result_size
, &policy
->result
, response
, response_size
);
744 if (policy
->conditions_size
) {
745 memcpy(((u_int8_t
*)(void *)(cursor
)), policy
->conditions
, policy
->conditions_size
);
748 error
= copyout(response
, uap
->out_buffer
, response_size
);
750 NECPLOG(LOG_ERR
, "necp_session_get_policy TLV copyout error (%d)", error
);
755 if (response
!= NULL
) {
756 FREE(response
, M_NECP
);
765 necp_session_delete_policy(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
769 if (uap
->in_buffer_length
< sizeof(necp_policy_id
) || uap
->in_buffer
== 0) {
770 NECPLOG(LOG_ERR
, "necp_session_delete_policy invalid input (%zu)", uap
->in_buffer_length
);
775 necp_policy_id delete_policy_id
= 0;
776 error
= copyin(uap
->in_buffer
, &delete_policy_id
, sizeof(delete_policy_id
));
778 NECPLOG(LOG_ERR
, "necp_session_delete_policy policy_id copyin error (%d)", error
);
782 struct necp_session_policy
*policy
= necp_policy_find(session
, delete_policy_id
);
783 if (policy
== NULL
|| policy
->pending_deletion
) {
784 NECPLOG(LOG_ERR
, "necp_session_delete_policy failed to find policy with id %u", delete_policy_id
);
789 necp_policy_mark_for_deletion(session
, policy
);
796 necp_session_apply_all(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
799 necp_policy_apply_all(session
);
805 necp_session_list_all(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
807 u_int32_t tlv_size
= (sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(necp_policy_id
));
808 u_int32_t response_size
= 0;
809 u_int8_t
*response
= NULL
;
810 int num_policies
= 0;
811 int cur_policy_index
= 0;
813 struct necp_session_policy
*policy
;
815 LIST_FOREACH(policy
, &session
->policies
, chain
) {
816 if (!policy
->pending_deletion
) {
821 if (num_policies
> NECP_MAX_POLICY_LIST_COUNT
) {
822 NECPLOG(LOG_ERR
, "necp_session_list_all size too large to copy (%u policies)", num_policies
);
827 response_size
= num_policies
* tlv_size
;
828 if (uap
->out_buffer_length
< response_size
|| uap
->out_buffer
== 0) {
829 NECPLOG(LOG_ERR
, "necp_session_list_all buffer not large enough (%u < %u)", uap
->out_buffer_length
, response_size
);
834 // Create a response with one Policy ID TLV for each policy
835 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
| M_ZERO
);
836 if (response
== NULL
) {
841 u_int8_t
*cursor
= response
;
842 LIST_FOREACH(policy
, &session
->policies
, chain
) {
843 if (!policy
->pending_deletion
&& cur_policy_index
< num_policies
) {
844 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ID
, sizeof(u_int32_t
), &policy
->local_id
, response
, response_size
);
849 error
= copyout(response
, uap
->out_buffer
, response_size
);
851 NECPLOG(LOG_ERR
, "necp_session_list_all TLV copyout error (%d)", error
);
856 if (response
!= NULL
) {
857 FREE(response
, M_NECP
);
867 necp_session_delete_all(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
870 necp_policy_mark_all_for_deletion(session
);
876 necp_session_set_session_priority(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
879 struct necp_session_policy
*policy
= NULL
;
880 struct necp_session_policy
*temp_policy
= NULL
;
882 if (uap
->in_buffer_length
< sizeof(necp_session_priority
) || uap
->in_buffer
== 0) {
883 NECPLOG(LOG_ERR
, "necp_session_set_session_priority invalid input (%zu)", uap
->in_buffer_length
);
888 necp_session_priority requested_session_priority
= 0;
889 error
= copyin(uap
->in_buffer
, &requested_session_priority
, sizeof(requested_session_priority
));
891 NECPLOG(LOG_ERR
, "necp_session_set_session_priority priority copyin error (%d)", error
);
895 // Enforce special session priorities with entitlements
896 if (requested_session_priority
== NECP_SESSION_PRIORITY_CONTROL
||
897 requested_session_priority
== NECP_SESSION_PRIORITY_PRIVILEGED_TUNNEL
) {
898 errno_t cred_result
= priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES
, 0);
899 if (cred_result
!= 0) {
900 NECPLOG(LOG_ERR
, "Session does not hold necessary entitlement to claim priority level %d", requested_session_priority
);
906 if (session
->session_priority
!= requested_session_priority
) {
907 session
->session_priority
= requested_session_priority
;
908 session
->session_order
= necp_allocate_new_session_order(session
->session_priority
, session
->control_unit
);
909 session
->dirty
= TRUE
;
911 // Mark all policies as needing updates
912 LIST_FOREACH_SAFE(policy
, &session
->policies
, chain
, temp_policy
) {
913 policy
->pending_update
= TRUE
;
923 necp_session_lock_to_process(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
926 session
->proc_locked
= TRUE
;
932 necp_session_register_service(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
935 struct necp_service_registration
*new_service
= NULL
;
937 if (uap
->in_buffer_length
< sizeof(uuid_t
) || uap
->in_buffer
== 0) {
938 NECPLOG(LOG_ERR
, "necp_session_register_service invalid input (%zu)", uap
->in_buffer_length
);
944 error
= copyin(uap
->in_buffer
, service_uuid
, sizeof(service_uuid
));
946 NECPLOG(LOG_ERR
, "necp_session_register_service uuid copyin error (%d)", error
);
950 MALLOC(new_service
, struct necp_service_registration
*, sizeof(*new_service
), M_NECP
, M_WAITOK
| M_ZERO
);
951 if (new_service
== NULL
) {
952 NECPLOG0(LOG_ERR
, "Failed to allocate service registration");
957 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
958 new_service
->service_id
= necp_create_uuid_service_id_mapping(service_uuid
);
959 LIST_INSERT_HEAD(&session
->services
, new_service
, session_chain
);
960 LIST_INSERT_HEAD(&necp_registered_service_list
, new_service
, kernel_chain
);
961 lck_rw_done(&necp_kernel_policy_lock
);
969 necp_session_unregister_service(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
972 struct necp_service_registration
*service
= NULL
;
973 struct necp_service_registration
*temp_service
= NULL
;
974 struct necp_uuid_id_mapping
*mapping
= NULL
;
976 if (uap
->in_buffer_length
< sizeof(uuid_t
) || uap
->in_buffer
== 0) {
977 NECPLOG(LOG_ERR
, "necp_session_unregister_service invalid input (%zu)", uap
->in_buffer_length
);
983 error
= copyin(uap
->in_buffer
, service_uuid
, sizeof(service_uuid
));
985 NECPLOG(LOG_ERR
, "necp_session_unregister_service uuid copyin error (%d)", error
);
989 // Remove all matching services for this session
990 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
991 mapping
= necp_uuid_lookup_service_id_locked(service_uuid
);
992 if (mapping
!= NULL
) {
993 LIST_FOREACH_SAFE(service
, &session
->services
, session_chain
, temp_service
) {
994 if (service
->service_id
== mapping
->id
) {
995 LIST_REMOVE(service
, session_chain
);
996 LIST_REMOVE(service
, kernel_chain
);
997 FREE(service
, M_NECP
);
1000 necp_remove_uuid_service_id_mapping(service_uuid
);
1002 lck_rw_done(&necp_kernel_policy_lock
);
1010 necp_session_dump_all(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
1014 if (uap
->out_buffer_length
== 0 || uap
->out_buffer
== 0) {
1015 NECPLOG(LOG_ERR
, "necp_session_dump_all invalid output buffer (%zu)", uap
->out_buffer_length
);
1020 error
= necp_handle_policy_dump_all(session
, 0, NULL
, uap
->out_buffer
, uap
->out_buffer_length
, 0);
1027 necp_session_action(struct proc
*p
, struct necp_session_action_args
*uap
, int *retval
)
1031 int return_value
= 0;
1032 struct necp_session
*session
= NULL
;
1033 error
= necp_session_find_from_fd(uap
->necp_fd
, &session
);
1035 NECPLOG(LOG_ERR
, "necp_session_action find fd error (%d)", error
);
1039 NECP_SESSION_LOCK(session
);
1041 if (session
->proc_locked
) {
1042 // Verify that the calling process is allowed to do actions
1044 proc_getexecutableuuid(current_proc(), proc_uuid
, sizeof(proc_uuid
));
1045 if (uuid_compare(proc_uuid
, session
->proc_uuid
) != 0) {
1050 // If not locked, update the proc_uuid and proc_pid of the session
1051 proc_getexecutableuuid(current_proc(), session
->proc_uuid
, sizeof(session
->proc_uuid
));
1052 session
->proc_pid
= proc_pid(current_proc());
1055 u_int32_t action
= uap
->action
;
1057 case NECP_SESSION_ACTION_POLICY_ADD
: {
1058 return_value
= necp_session_add_policy(session
, uap
, retval
);
1061 case NECP_SESSION_ACTION_POLICY_GET
: {
1062 return_value
= necp_session_get_policy(session
, uap
, retval
);
1065 case NECP_SESSION_ACTION_POLICY_DELETE
: {
1066 return_value
= necp_session_delete_policy(session
, uap
, retval
);
1069 case NECP_SESSION_ACTION_POLICY_APPLY_ALL
: {
1070 return_value
= necp_session_apply_all(session
, uap
, retval
);
1073 case NECP_SESSION_ACTION_POLICY_LIST_ALL
: {
1074 return_value
= necp_session_list_all(session
, uap
, retval
);
1077 case NECP_SESSION_ACTION_POLICY_DELETE_ALL
: {
1078 return_value
= necp_session_delete_all(session
, uap
, retval
);
1081 case NECP_SESSION_ACTION_SET_SESSION_PRIORITY
: {
1082 return_value
= necp_session_set_session_priority(session
, uap
, retval
);
1085 case NECP_SESSION_ACTION_LOCK_SESSION_TO_PROC
: {
1086 return_value
= necp_session_lock_to_process(session
, uap
, retval
);
1089 case NECP_SESSION_ACTION_REGISTER_SERVICE
: {
1090 return_value
= necp_session_register_service(session
, uap
, retval
);
1093 case NECP_SESSION_ACTION_UNREGISTER_SERVICE
: {
1094 return_value
= necp_session_unregister_service(session
, uap
, retval
);
1097 case NECP_SESSION_ACTION_POLICY_DUMP_ALL
: {
1098 return_value
= necp_session_dump_all(session
, uap
, retval
);
1102 NECPLOG(LOG_ERR
, "necp_session_action unknown action (%u)", action
);
1103 return_value
= EINVAL
;
1109 NECP_SESSION_UNLOCK(session
);
1110 file_drop(uap
->necp_fd
);
1112 return (return_value
);
1115 // Kernel Control functions
1116 static errno_t
necp_register_control(void);
1117 static errno_t
necp_ctl_connect(kern_ctl_ref kctlref
, struct sockaddr_ctl
*sac
, void **unitinfo
);
1118 static errno_t
necp_ctl_disconnect(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
);
1119 static errno_t
necp_ctl_send(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, mbuf_t m
, int flags
);
1120 static void necp_ctl_rcvd(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, int flags
);
1121 static errno_t
necp_ctl_getopt(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, int opt
, void *data
, size_t *len
);
1122 static errno_t
necp_ctl_setopt(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, int opt
, void *data
, size_t len
);
1124 static bool necp_send_ctl_data(struct necp_session
*session
, u_int8_t
*buffer
, size_t buffer_size
);
1131 result
= necp_register_control();
1136 necp_kernel_policy_grp_attr
= lck_grp_attr_alloc_init();
1137 if (necp_kernel_policy_grp_attr
== NULL
) {
1138 NECPLOG0(LOG_ERR
, "lck_grp_attr_alloc_init failed");
1143 necp_kernel_policy_mtx_grp
= lck_grp_alloc_init(NECP_CONTROL_NAME
, necp_kernel_policy_grp_attr
);
1144 if (necp_kernel_policy_mtx_grp
== NULL
) {
1145 NECPLOG0(LOG_ERR
, "lck_grp_alloc_init failed");
1150 necp_kernel_policy_mtx_attr
= lck_attr_alloc_init();
1151 if (necp_kernel_policy_mtx_attr
== NULL
) {
1152 NECPLOG0(LOG_ERR
, "lck_attr_alloc_init failed");
1157 lck_rw_init(&necp_kernel_policy_lock
, necp_kernel_policy_mtx_grp
, necp_kernel_policy_mtx_attr
);
1159 necp_route_rule_grp_attr
= lck_grp_attr_alloc_init();
1160 if (necp_route_rule_grp_attr
== NULL
) {
1161 NECPLOG0(LOG_ERR
, "lck_grp_attr_alloc_init failed");
1166 necp_route_rule_mtx_grp
= lck_grp_alloc_init("necp_route_rule", necp_route_rule_grp_attr
);
1167 if (necp_route_rule_mtx_grp
== NULL
) {
1168 NECPLOG0(LOG_ERR
, "lck_grp_alloc_init failed");
1173 necp_route_rule_mtx_attr
= lck_attr_alloc_init();
1174 if (necp_route_rule_mtx_attr
== NULL
) {
1175 NECPLOG0(LOG_ERR
, "lck_attr_alloc_init failed");
1180 lck_rw_init(&necp_route_rule_lock
, necp_route_rule_mtx_grp
, necp_route_rule_mtx_attr
);
1184 TAILQ_INIT(&necp_session_list
);
1186 LIST_INIT(&necp_kernel_socket_policies
);
1187 LIST_INIT(&necp_kernel_ip_output_policies
);
1189 LIST_INIT(&necp_account_id_list
);
1191 LIST_INIT(&necp_uuid_service_id_list
);
1193 LIST_INIT(&necp_registered_service_list
);
1195 LIST_INIT(&necp_route_rules
);
1196 LIST_INIT(&necp_aggregate_route_rules
);
1198 necp_uuid_app_id_hashtbl
= hashinit(NECP_UUID_APP_ID_HASH_SIZE
, M_NECP
, &necp_uuid_app_id_hash_mask
);
1199 necp_uuid_app_id_hash_num_buckets
= necp_uuid_app_id_hash_mask
+ 1;
1200 necp_num_uuid_app_id_mappings
= 0;
1201 necp_uuid_app_id_mappings_dirty
= FALSE
;
1203 necp_kernel_application_policies_condition_mask
= 0;
1204 necp_kernel_socket_policies_condition_mask
= 0;
1205 necp_kernel_ip_output_policies_condition_mask
= 0;
1207 necp_kernel_application_policies_count
= 0;
1208 necp_kernel_socket_policies_count
= 0;
1209 necp_kernel_socket_policies_non_app_count
= 0;
1210 necp_kernel_ip_output_policies_count
= 0;
1211 necp_kernel_ip_output_policies_non_id_count
= 0;
1213 necp_kernel_socket_policies_gencount
= 1;
1215 memset(&necp_kernel_socket_policies_map
, 0, sizeof(necp_kernel_socket_policies_map
));
1216 memset(&necp_kernel_ip_output_policies_map
, 0, sizeof(necp_kernel_ip_output_policies_map
));
1217 necp_kernel_socket_policies_app_layer_map
= NULL
;
1221 if (necp_kernel_policy_mtx_attr
!= NULL
) {
1222 lck_attr_free(necp_kernel_policy_mtx_attr
);
1223 necp_kernel_policy_mtx_attr
= NULL
;
1225 if (necp_kernel_policy_mtx_grp
!= NULL
) {
1226 lck_grp_free(necp_kernel_policy_mtx_grp
);
1227 necp_kernel_policy_mtx_grp
= NULL
;
1229 if (necp_kernel_policy_grp_attr
!= NULL
) {
1230 lck_grp_attr_free(necp_kernel_policy_grp_attr
);
1231 necp_kernel_policy_grp_attr
= NULL
;
1233 if (necp_route_rule_mtx_attr
!= NULL
) {
1234 lck_attr_free(necp_route_rule_mtx_attr
);
1235 necp_route_rule_mtx_attr
= NULL
;
1237 if (necp_route_rule_mtx_grp
!= NULL
) {
1238 lck_grp_free(necp_route_rule_mtx_grp
);
1239 necp_route_rule_mtx_grp
= NULL
;
1241 if (necp_route_rule_grp_attr
!= NULL
) {
1242 lck_grp_attr_free(necp_route_rule_grp_attr
);
1243 necp_route_rule_grp_attr
= NULL
;
1245 if (necp_kctlref
!= NULL
) {
1246 ctl_deregister(necp_kctlref
);
1247 necp_kctlref
= NULL
;
1254 necp_register_control(void)
1256 struct kern_ctl_reg kern_ctl
;
1259 // Create a tag to allocate memory
1260 necp_malloc_tag
= OSMalloc_Tagalloc(NECP_CONTROL_NAME
, OSMT_DEFAULT
);
1262 // Find a unique value for our interface family
1263 result
= mbuf_tag_id_find(NECP_CONTROL_NAME
, &necp_family
);
1265 NECPLOG(LOG_ERR
, "mbuf_tag_id_find_internal failed: %d", result
);
1269 bzero(&kern_ctl
, sizeof(kern_ctl
));
1270 strlcpy(kern_ctl
.ctl_name
, NECP_CONTROL_NAME
, sizeof(kern_ctl
.ctl_name
));
1271 kern_ctl
.ctl_name
[sizeof(kern_ctl
.ctl_name
) - 1] = 0;
1272 kern_ctl
.ctl_flags
= CTL_FLAG_PRIVILEGED
; // Require root
1273 kern_ctl
.ctl_sendsize
= 64 * 1024;
1274 kern_ctl
.ctl_recvsize
= 64 * 1024;
1275 kern_ctl
.ctl_connect
= necp_ctl_connect
;
1276 kern_ctl
.ctl_disconnect
= necp_ctl_disconnect
;
1277 kern_ctl
.ctl_send
= necp_ctl_send
;
1278 kern_ctl
.ctl_rcvd
= necp_ctl_rcvd
;
1279 kern_ctl
.ctl_setopt
= necp_ctl_setopt
;
1280 kern_ctl
.ctl_getopt
= necp_ctl_getopt
;
1282 result
= ctl_register(&kern_ctl
, &necp_kctlref
);
1284 NECPLOG(LOG_ERR
, "ctl_register failed: %d", result
);
1292 necp_post_change_event(struct kev_necp_policies_changed_data
*necp_event_data
)
1294 struct kev_msg ev_msg
;
1295 memset(&ev_msg
, 0, sizeof(ev_msg
));
1297 ev_msg
.vendor_code
= KEV_VENDOR_APPLE
;
1298 ev_msg
.kev_class
= KEV_NETWORK_CLASS
;
1299 ev_msg
.kev_subclass
= KEV_NECP_SUBCLASS
;
1300 ev_msg
.event_code
= KEV_NECP_POLICIES_CHANGED
;
1302 ev_msg
.dv
[0].data_ptr
= necp_event_data
;
1303 ev_msg
.dv
[0].data_length
= sizeof(necp_event_data
->changed_count
);
1304 ev_msg
.dv
[1].data_length
= 0;
1306 kev_post_msg(&ev_msg
);
1310 necp_ctl_connect(kern_ctl_ref kctlref
, struct sockaddr_ctl
*sac
, void **unitinfo
)
1312 #pragma unused(kctlref, sac)
1313 *unitinfo
= necp_create_session();
1314 if (*unitinfo
== NULL
) {
1315 // Could not allocate session
1323 necp_ctl_disconnect(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
)
1325 #pragma unused(kctlref, unit)
1326 struct necp_session
*session
= (struct necp_session
*)unitinfo
;
1327 if (session
!= NULL
) {
1328 necp_policy_mark_all_for_deletion(session
);
1329 necp_policy_apply_all(session
);
1330 necp_delete_session((struct necp_session
*)unitinfo
);
1339 necp_packet_find_tlv(mbuf_t packet
, int offset
, u_int8_t type
, int *err
, int next
)
1341 size_t cursor
= offset
;
1343 u_int32_t curr_length
;
1350 error
= mbuf_copydata(packet
, cursor
, sizeof(curr_type
), &curr_type
);
1357 curr_type
= NECP_TLV_NIL
;
1360 if (curr_type
!= type
) {
1361 cursor
+= sizeof(curr_type
);
1362 error
= mbuf_copydata(packet
, cursor
, sizeof(curr_length
), &curr_length
);
1367 cursor
+= (sizeof(curr_length
) + curr_length
);
1369 } while (curr_type
!= type
);
1375 necp_packet_get_tlv_at_offset(mbuf_t packet
, int tlv_offset
, u_int32_t buff_len
, void *buff
, u_int32_t
*value_size
)
1380 if (tlv_offset
< 0) {
1384 error
= mbuf_copydata(packet
, tlv_offset
+ sizeof(u_int8_t
), sizeof(length
), &length
);
1389 u_int32_t total_len
= m_length2(packet
, NULL
);
1390 if (total_len
< (tlv_offset
+ sizeof(u_int8_t
) + sizeof(length
) + length
)) {
1391 NECPLOG(LOG_ERR
, "Got a bad TLV, length (%u) + offset (%d) < total length (%u)",
1392 length
, (tlv_offset
+ sizeof(u_int8_t
) + sizeof(length
)), total_len
);
1396 if (value_size
!= NULL
) {
1397 *value_size
= length
;
1400 if (buff
!= NULL
&& buff_len
> 0) {
1401 u_int32_t to_copy
= (length
< buff_len
) ? length
: buff_len
;
1402 error
= mbuf_copydata(packet
, tlv_offset
+ sizeof(u_int8_t
) + sizeof(length
), to_copy
, buff
);
1412 necp_buffer_write_packet_header(u_int8_t
*buffer
, u_int8_t packet_type
, u_int8_t flags
, u_int32_t message_id
)
1414 ((struct necp_packet_header
*)(void *)buffer
)->packet_type
= packet_type
;
1415 ((struct necp_packet_header
*)(void *)buffer
)->flags
= flags
;
1416 ((struct necp_packet_header
*)(void *)buffer
)->message_id
= message_id
;
1417 return (buffer
+ sizeof(struct necp_packet_header
));
1421 necp_buffer_write_tlv_validate(u_int8_t
*cursor
, u_int8_t type
, u_int32_t length
,
1422 u_int8_t
*buffer
, u_int32_t buffer_length
)
1424 if (cursor
< buffer
|| (uintptr_t)(cursor
- buffer
) > buffer_length
) {
1425 NECPLOG0(LOG_ERR
, "Cannot write TLV in buffer (invalid cursor)");
1428 u_int8_t
*next_tlv
= (u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
) + length
);
1429 if (next_tlv
<= buffer
|| // make sure the next TLV start doesn't overflow
1430 (uintptr_t)(next_tlv
- buffer
) > buffer_length
) { // make sure the next TLV has enough room in buffer
1431 NECPLOG(LOG_ERR
, "Cannot write TLV in buffer (TLV length %u, buffer length %u)",
1432 length
, buffer_length
);
1439 necp_buffer_write_tlv_if_different(u_int8_t
*cursor
, u_int8_t type
,
1440 u_int32_t length
, const void *value
, bool *updated
,
1441 u_int8_t
*buffer
, u_int32_t buffer_length
)
1443 if (!necp_buffer_write_tlv_validate(cursor
, type
, length
, buffer
, buffer_length
)) {
1446 u_int8_t
*next_tlv
= (u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
) + length
);
1447 if (*updated
|| *(u_int8_t
*)(cursor
) != type
) {
1448 *(u_int8_t
*)(cursor
) = type
;
1451 if (*updated
|| *(u_int32_t
*)(void *)(cursor
+ sizeof(type
)) != length
) {
1452 *(u_int32_t
*)(void *)(cursor
+ sizeof(type
)) = length
;
1456 if (*updated
|| memcmp((u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
)), value
, length
) != 0) {
1457 memcpy((u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
)), value
, length
);
1465 necp_buffer_write_tlv(u_int8_t
*cursor
, u_int8_t type
,
1466 u_int32_t length
, const void *value
,
1467 u_int8_t
*buffer
, u_int32_t buffer_length
)
1469 if (!necp_buffer_write_tlv_validate(cursor
, type
, length
, buffer
, buffer_length
)) {
1472 u_int8_t
*next_tlv
= (u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
) + length
);
1473 *(u_int8_t
*)(cursor
) = type
;
1474 *(u_int32_t
*)(void *)(cursor
+ sizeof(type
)) = length
;
1476 memcpy((u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
)), value
, length
);
1483 necp_buffer_get_tlv_type(u_int8_t
*buffer
, int tlv_offset
)
1485 u_int8_t
*type
= NULL
;
1487 if (buffer
== NULL
) {
1491 type
= (u_int8_t
*)((u_int8_t
*)buffer
+ tlv_offset
);
1492 return (type
? *type
: 0);
1496 necp_buffer_get_tlv_length(u_int8_t
*buffer
, int tlv_offset
)
1498 u_int32_t
*length
= NULL
;
1500 if (buffer
== NULL
) {
1504 length
= (u_int32_t
*)(void *)((u_int8_t
*)buffer
+ tlv_offset
+ sizeof(u_int8_t
));
1505 return (length
? *length
: 0);
1509 necp_buffer_get_tlv_value(u_int8_t
*buffer
, int tlv_offset
, u_int32_t
*value_size
)
1511 u_int8_t
*value
= NULL
;
1512 u_int32_t length
= necp_buffer_get_tlv_length(buffer
, tlv_offset
);
1518 *value_size
= length
;
1521 value
= (u_int8_t
*)((u_int8_t
*)buffer
+ tlv_offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
));
1526 necp_buffer_find_tlv(u_int8_t
*buffer
, u_int32_t buffer_length
, int offset
, u_int8_t type
, int next
)
1531 int cursor
= offset
;
1533 u_int32_t curr_length
;
1537 if ((((u_int32_t
)cursor
) + sizeof(curr_type
) + sizeof(curr_length
)) > buffer_length
) {
1541 curr_type
= necp_buffer_get_tlv_type(buffer
, cursor
);
1544 curr_type
= NECP_TLV_NIL
;
1546 curr_length
= necp_buffer_get_tlv_length(buffer
, cursor
);
1547 if (curr_length
> buffer_length
- ((u_int32_t
)cursor
+ sizeof(curr_type
) + sizeof(curr_length
))) {
1551 next_cursor
= (cursor
+ sizeof(curr_type
) + sizeof(curr_length
) + curr_length
);
1552 if (curr_type
== type
) {
1553 // check if entire TLV fits inside buffer
1554 if (((u_int32_t
)next_cursor
) <= buffer_length
) {
1560 cursor
= next_cursor
;
1565 necp_find_tlv(mbuf_t packet
, u_int8_t
*buffer
, u_int32_t buffer_length
, int offset
, u_int8_t type
, int *err
, int next
)
1568 if (packet
!= NULL
) {
1569 cursor
= necp_packet_find_tlv(packet
, offset
, type
, err
, next
);
1570 } else if (buffer
!= NULL
) {
1571 cursor
= necp_buffer_find_tlv(buffer
, buffer_length
, offset
, type
, next
);
1577 necp_get_tlv_at_offset(mbuf_t packet
, u_int8_t
*buffer
, u_int32_t buffer_length
,
1578 int tlv_offset
, u_int32_t out_buffer_length
, void *out_buffer
, u_int32_t
*value_size
)
1580 if (packet
!= NULL
) {
1581 // Handle mbuf parsing
1582 return necp_packet_get_tlv_at_offset(packet
, tlv_offset
, out_buffer_length
, out_buffer
, value_size
);
1585 if (buffer
== NULL
) {
1586 NECPLOG0(LOG_ERR
, "necp_get_tlv_at_offset buffer is NULL");
1590 // Handle buffer parsing
1592 // Validate that buffer has enough room for any TLV
1593 if (tlv_offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
) > buffer_length
) {
1594 NECPLOG(LOG_ERR
, "necp_get_tlv_at_offset buffer_length is too small for TLV (%u < %u)",
1595 buffer_length
, tlv_offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
));
1599 // Validate that buffer has enough room for this TLV
1600 u_int32_t tlv_length
= necp_buffer_get_tlv_length(buffer
, tlv_offset
);
1601 if (tlv_length
> buffer_length
- (tlv_offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
))) {
1602 NECPLOG(LOG_ERR
, "necp_get_tlv_at_offset buffer_length is too small for TLV of length %u (%u < %u)",
1603 tlv_length
, buffer_length
, tlv_offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
) + tlv_length
);
1607 if (out_buffer
!= NULL
&& out_buffer_length
> 0) {
1608 // Validate that out buffer is large enough for value
1609 if (out_buffer_length
< tlv_length
) {
1610 NECPLOG(LOG_ERR
, "necp_get_tlv_at_offset out_buffer_length is too small for TLV value (%u < %u)",
1611 out_buffer_length
, tlv_length
);
1615 // Get value pointer
1616 u_int8_t
*tlv_value
= necp_buffer_get_tlv_value(buffer
, tlv_offset
, NULL
);
1617 if (tlv_value
== NULL
) {
1618 NECPLOG0(LOG_ERR
, "necp_get_tlv_at_offset tlv_value is NULL");
1623 memcpy(out_buffer
, tlv_value
, tlv_length
);
1627 if (value_size
!= NULL
) {
1628 *value_size
= tlv_length
;
1635 necp_get_tlv(mbuf_t packet
, u_int8_t
*buffer
, u_int32_t buffer_length
,
1636 int offset
, u_int8_t type
, u_int32_t buff_len
, void *buff
, u_int32_t
*value_size
)
1640 int tlv_offset
= necp_find_tlv(packet
, buffer
, buffer_length
, offset
, type
, &error
, 0);
1641 if (tlv_offset
< 0) {
1645 return (necp_get_tlv_at_offset(packet
, buffer
, buffer_length
, tlv_offset
, buff_len
, buff
, value_size
));
1649 necp_send_ctl_data(struct necp_session
*session
, u_int8_t
*buffer
, size_t buffer_size
)
1653 if (necp_kctlref
== NULL
|| session
== NULL
|| buffer
== NULL
|| buffer_size
== 0) {
1657 error
= ctl_enqueuedata(necp_kctlref
, session
->control_unit
, buffer
, buffer_size
, CTL_DATA_EOR
);
1659 return (error
== 0);
1663 necp_send_success_response(struct necp_session
*session
, u_int8_t packet_type
, u_int32_t message_id
)
1665 bool success
= TRUE
;
1666 u_int8_t
*response
= NULL
;
1667 u_int8_t
*cursor
= NULL
;
1668 size_t response_size
= sizeof(struct necp_packet_header
) + sizeof(u_int8_t
) + sizeof(u_int32_t
);
1669 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
);
1670 if (response
== NULL
) {
1674 cursor
= necp_buffer_write_packet_header(cursor
, packet_type
, NECP_PACKET_FLAGS_RESPONSE
, message_id
);
1675 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_NIL
, 0, NULL
, response
, response_size
);
1677 if (!(success
= necp_send_ctl_data(session
, (u_int8_t
*)response
, response_size
))) {
1678 NECPLOG0(LOG_ERR
, "Failed to send response");
1681 FREE(response
, M_NECP
);
1686 necp_send_error_response(struct necp_session
*session
, u_int8_t packet_type
, u_int32_t message_id
, u_int32_t error
)
1688 bool success
= TRUE
;
1689 u_int8_t
*response
= NULL
;
1690 u_int8_t
*cursor
= NULL
;
1691 size_t response_size
= sizeof(struct necp_packet_header
) + sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(u_int32_t
);
1692 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
);
1693 if (response
== NULL
) {
1697 cursor
= necp_buffer_write_packet_header(cursor
, packet_type
, NECP_PACKET_FLAGS_RESPONSE
, message_id
);
1698 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_ERROR
, sizeof(error
), &error
, response
, response_size
);
1700 if (!(success
= necp_send_ctl_data(session
, (u_int8_t
*)response
, response_size
))) {
1701 NECPLOG0(LOG_ERR
, "Failed to send response");
1704 FREE(response
, M_NECP
);
1709 necp_send_policy_id_response(struct necp_session
*session
, u_int8_t packet_type
, u_int32_t message_id
, necp_policy_id policy_id
)
1711 bool success
= TRUE
;
1712 u_int8_t
*response
= NULL
;
1713 u_int8_t
*cursor
= NULL
;
1714 size_t response_size
= sizeof(struct necp_packet_header
) + sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(u_int32_t
);
1715 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
);
1716 if (response
== NULL
) {
1720 cursor
= necp_buffer_write_packet_header(cursor
, packet_type
, NECP_PACKET_FLAGS_RESPONSE
, message_id
);
1721 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ID
, sizeof(policy_id
), &policy_id
, response
, response_size
);
1723 if (!(success
= necp_send_ctl_data(session
, (u_int8_t
*)response
, response_size
))) {
1724 NECPLOG0(LOG_ERR
, "Failed to send response");
1727 FREE(response
, M_NECP
);
1732 necp_ctl_send(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, mbuf_t packet
, int flags
)
1734 #pragma unused(kctlref, unit, flags)
1735 struct necp_session
*session
= (struct necp_session
*)unitinfo
;
1736 struct necp_packet_header header
;
1739 if (session
== NULL
) {
1740 NECPLOG0(LOG_ERR
, "Got a NULL session");
1745 if (mbuf_pkthdr_len(packet
) < sizeof(header
)) {
1746 NECPLOG(LOG_ERR
, "Got a bad packet, length (%lu) < sizeof header (%lu)", mbuf_pkthdr_len(packet
), sizeof(header
));
1751 error
= mbuf_copydata(packet
, 0, sizeof(header
), &header
);
1753 NECPLOG(LOG_ERR
, "mbuf_copydata failed for the header: %d", error
);
1758 if (session
->proc_locked
) {
1759 // Verify that the calling process is allowed to send messages
1761 proc_getexecutableuuid(current_proc(), proc_uuid
, sizeof(proc_uuid
));
1762 if (uuid_compare(proc_uuid
, session
->proc_uuid
) != 0) {
1763 necp_send_error_response(session
, header
.packet_type
, header
.message_id
, NECP_ERROR_INVALID_PROCESS
);
1767 // If not locked, update the proc_uuid and proc_pid of the session
1768 proc_getexecutableuuid(current_proc(), session
->proc_uuid
, sizeof(session
->proc_uuid
));
1769 session
->proc_pid
= proc_pid(current_proc());
1772 switch (header
.packet_type
) {
1773 case NECP_PACKET_TYPE_POLICY_ADD
: {
1774 necp_handle_policy_add(session
, header
.message_id
, packet
, NULL
, 0, sizeof(header
), NULL
);
1777 case NECP_PACKET_TYPE_POLICY_GET
: {
1778 necp_handle_policy_get(session
, header
.message_id
, packet
, sizeof(header
));
1781 case NECP_PACKET_TYPE_POLICY_DELETE
: {
1782 necp_handle_policy_delete(session
, header
.message_id
, packet
, sizeof(header
));
1785 case NECP_PACKET_TYPE_POLICY_APPLY_ALL
: {
1786 necp_handle_policy_apply_all(session
, header
.message_id
, packet
, sizeof(header
));
1789 case NECP_PACKET_TYPE_POLICY_LIST_ALL
: {
1790 necp_handle_policy_list_all(session
, header
.message_id
, packet
, sizeof(header
));
1793 case NECP_PACKET_TYPE_POLICY_DELETE_ALL
: {
1794 necp_handle_policy_delete_all(session
, header
.message_id
, packet
, sizeof(header
));
1797 case NECP_PACKET_TYPE_POLICY_DUMP_ALL
: {
1798 necp_handle_policy_dump_all(session
, header
.message_id
, packet
, 0, 0, sizeof(header
));
1801 case NECP_PACKET_TYPE_SET_SESSION_PRIORITY
: {
1802 necp_handle_set_session_priority(session
, header
.message_id
, packet
, sizeof(header
));
1805 case NECP_PACKET_TYPE_LOCK_SESSION_TO_PROC
: {
1806 necp_handle_lock_session_to_proc(session
, header
.message_id
, packet
, sizeof(header
));
1809 case NECP_PACKET_TYPE_REGISTER_SERVICE
: {
1810 necp_handle_register_service(session
, header
.message_id
, packet
, sizeof(header
));
1813 case NECP_PACKET_TYPE_UNREGISTER_SERVICE
: {
1814 necp_handle_unregister_service(session
, header
.message_id
, packet
, sizeof(header
));
1818 NECPLOG(LOG_ERR
, "Received unknown message type %d", header
.packet_type
);
1819 necp_send_error_response(session
, header
.packet_type
, header
.message_id
, NECP_ERROR_UNKNOWN_PACKET_TYPE
);
1830 necp_ctl_rcvd(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, int flags
)
1832 #pragma unused(kctlref, unit, unitinfo, flags)
1837 necp_ctl_getopt(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, int opt
, void *data
, size_t *len
)
1839 #pragma unused(kctlref, unit, unitinfo, opt, data, len)
1844 necp_ctl_setopt(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, int opt
, void *data
, size_t len
)
1846 #pragma unused(kctlref, unit, unitinfo, opt, data, len)
1850 // Session Management
1852 static struct necp_session
*
1853 necp_create_session(void)
1855 struct necp_session
*new_session
= NULL
;
1857 MALLOC(new_session
, struct necp_session
*, sizeof(*new_session
), M_NECP
, M_WAITOK
| M_ZERO
);
1858 if (new_session
== NULL
) {
1862 new_session
->necp_fd_type
= necp_fd_type_session
;
1863 new_session
->session_priority
= NECP_SESSION_PRIORITY_UNKNOWN
;
1864 new_session
->dirty
= FALSE
;
1865 LIST_INIT(&new_session
->policies
);
1866 lck_mtx_init(&new_session
->lock
, necp_kernel_policy_mtx_grp
, necp_kernel_policy_mtx_attr
);
1869 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
1871 // Find the next available control unit
1872 u_int32_t control_unit
= 1;
1873 struct necp_session
*next_session
= NULL
;
1874 TAILQ_FOREACH(next_session
, &necp_session_list
, chain
) {
1875 if (next_session
->control_unit
> control_unit
) {
1876 // Found a gap, grab this control unit
1880 // Try the next control unit, loop around
1881 control_unit
= next_session
->control_unit
+ 1;
1884 new_session
->control_unit
= control_unit
;
1885 new_session
->session_order
= necp_allocate_new_session_order(new_session
->session_priority
, control_unit
);
1887 if (next_session
!= NULL
) {
1888 TAILQ_INSERT_BEFORE(next_session
, new_session
, chain
);
1890 TAILQ_INSERT_TAIL(&necp_session_list
, new_session
, chain
);
1893 necp_session_count
++;
1894 lck_rw_done(&necp_kernel_policy_lock
);
1897 NECPLOG(LOG_DEBUG
, "Created NECP session, control unit %d", control_unit
);
1901 return (new_session
);
1905 necp_delete_session(struct necp_session
*session
)
1907 if (session
!= NULL
) {
1908 struct necp_service_registration
*service
= NULL
;
1909 struct necp_service_registration
*temp_service
= NULL
;
1910 LIST_FOREACH_SAFE(service
, &session
->services
, session_chain
, temp_service
) {
1911 LIST_REMOVE(service
, session_chain
);
1912 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
1913 LIST_REMOVE(service
, kernel_chain
);
1914 lck_rw_done(&necp_kernel_policy_lock
);
1915 FREE(service
, M_NECP
);
1918 NECPLOG0(LOG_DEBUG
, "Deleted NECP session");
1921 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
1922 TAILQ_REMOVE(&necp_session_list
, session
, chain
);
1923 necp_session_count
--;
1924 lck_rw_done(&necp_kernel_policy_lock
);
1926 lck_mtx_destroy(&session
->lock
, necp_kernel_policy_mtx_grp
);
1927 FREE(session
, M_NECP
);
1931 // Session Policy Management
1933 static inline u_int8_t
1934 necp_policy_result_get_type_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
1936 return ((buffer
&& length
>= sizeof(u_int8_t
)) ? buffer
[0] : 0);
1939 static inline u_int32_t
1940 necp_policy_result_get_parameter_length_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
1942 return ((buffer
&& length
> sizeof(u_int8_t
)) ? (length
- sizeof(u_int8_t
)) : 0);
1945 static inline u_int8_t
*
1946 necp_policy_result_get_parameter_pointer_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
1948 return ((buffer
&& length
> sizeof(u_int8_t
)) ? (buffer
+ sizeof(u_int8_t
)) : NULL
);
1952 necp_policy_result_requires_route_rules(u_int8_t
*buffer
, u_int32_t length
)
1954 u_int8_t type
= necp_policy_result_get_type_from_buffer(buffer
, length
);
1955 if (type
== NECP_POLICY_RESULT_ROUTE_RULES
) {
1962 necp_address_is_valid(struct sockaddr
*address
)
1964 if (address
->sa_family
== AF_INET
) {
1965 return (address
->sa_len
== sizeof(struct sockaddr_in
));
1966 } else if (address
->sa_family
== AF_INET6
) {
1967 return (address
->sa_len
== sizeof(struct sockaddr_in6
));
1974 necp_policy_result_is_valid(u_int8_t
*buffer
, u_int32_t length
)
1976 bool validated
= FALSE
;
1977 u_int8_t type
= necp_policy_result_get_type_from_buffer(buffer
, length
);
1978 u_int32_t parameter_length
= necp_policy_result_get_parameter_length_from_buffer(buffer
, length
);
1980 case NECP_POLICY_RESULT_PASS
:
1981 case NECP_POLICY_RESULT_DROP
:
1982 case NECP_POLICY_RESULT_ROUTE_RULES
:
1983 case NECP_POLICY_RESULT_SCOPED_DIRECT
: {
1987 case NECP_POLICY_RESULT_SKIP
:
1988 case NECP_POLICY_RESULT_SOCKET_DIVERT
:
1989 case NECP_POLICY_RESULT_SOCKET_FILTER
: {
1990 if (parameter_length
>= sizeof(u_int32_t
)) {
1995 case NECP_POLICY_RESULT_IP_TUNNEL
: {
1996 if (parameter_length
> sizeof(u_int32_t
)) {
2001 case NECP_POLICY_RESULT_SOCKET_SCOPED
: {
2002 if (parameter_length
> 0) {
2007 case NECP_POLICY_RESULT_TRIGGER
:
2008 case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED
:
2009 case NECP_POLICY_RESULT_TRIGGER_SCOPED
:
2010 case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED
:
2011 case NECP_POLICY_RESULT_USE_NETAGENT
:
2012 case NECP_POLICY_RESULT_NETAGENT_SCOPED
:{
2013 if (parameter_length
>= sizeof(uuid_t
)) {
2025 NECPLOG(LOG_DEBUG
, "Policy result type %d, valid %d", type
, validated
);
2031 static inline u_int8_t
2032 necp_policy_condition_get_type_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
2034 return ((buffer
&& length
>= sizeof(u_int8_t
)) ? buffer
[0] : 0);
2037 static inline u_int8_t
2038 necp_policy_condition_get_flags_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
2040 return ((buffer
&& length
>= (2 * sizeof(u_int8_t
))) ? buffer
[1] : 0);
2043 static inline u_int32_t
2044 necp_policy_condition_get_value_length_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
2046 return ((buffer
&& length
>= (2 * sizeof(u_int8_t
))) ? (length
- (2 * sizeof(u_int8_t
))) : 0);
2049 static inline u_int8_t
*
2050 necp_policy_condition_get_value_pointer_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
2052 return ((buffer
&& length
> (2 * sizeof(u_int8_t
))) ? (buffer
+ (2 * sizeof(u_int8_t
))) : NULL
);
2056 necp_policy_condition_is_default(u_int8_t
*buffer
, u_int32_t length
)
2058 return (necp_policy_condition_get_type_from_buffer(buffer
, length
) == NECP_POLICY_CONDITION_DEFAULT
);
2062 necp_policy_condition_is_application(u_int8_t
*buffer
, u_int32_t length
)
2064 return (necp_policy_condition_get_type_from_buffer(buffer
, length
) == NECP_POLICY_CONDITION_APPLICATION
);
2068 necp_policy_condition_is_real_application(u_int8_t
*buffer
, u_int32_t length
)
2070 return (necp_policy_condition_get_type_from_buffer(buffer
, length
) == NECP_POLICY_CONDITION_REAL_APPLICATION
);
2074 necp_policy_condition_requires_application(u_int8_t
*buffer
, u_int32_t length
)
2076 u_int8_t type
= necp_policy_condition_get_type_from_buffer(buffer
, length
);
2077 return (type
== NECP_POLICY_CONDITION_REAL_APPLICATION
);
2081 necp_policy_condition_requires_real_application(u_int8_t
*buffer
, u_int32_t length
)
2083 u_int8_t type
= necp_policy_condition_get_type_from_buffer(buffer
, length
);
2084 return (type
== NECP_POLICY_CONDITION_ENTITLEMENT
);
2088 necp_policy_condition_is_valid(u_int8_t
*buffer
, u_int32_t length
, u_int8_t policy_result_type
)
2090 bool validated
= FALSE
;
2091 bool result_cannot_have_ip_layer
= (policy_result_type
== NECP_POLICY_RESULT_SOCKET_DIVERT
||
2092 policy_result_type
== NECP_POLICY_RESULT_SOCKET_FILTER
||
2093 policy_result_type
== NECP_POLICY_RESULT_TRIGGER
||
2094 policy_result_type
== NECP_POLICY_RESULT_TRIGGER_IF_NEEDED
||
2095 policy_result_type
== NECP_POLICY_RESULT_TRIGGER_SCOPED
||
2096 policy_result_type
== NECP_POLICY_RESULT_NO_TRIGGER_SCOPED
||
2097 policy_result_type
== NECP_POLICY_RESULT_SOCKET_SCOPED
||
2098 policy_result_type
== NECP_POLICY_RESULT_ROUTE_RULES
||
2099 policy_result_type
== NECP_POLICY_RESULT_USE_NETAGENT
||
2100 policy_result_type
== NECP_POLICY_RESULT_NETAGENT_SCOPED
||
2101 policy_result_type
== NECP_POLICY_RESULT_SCOPED_DIRECT
) ? TRUE
: FALSE
;
2102 u_int32_t condition_length
= necp_policy_condition_get_value_length_from_buffer(buffer
, length
);
2103 u_int8_t
*condition_value
= necp_policy_condition_get_value_pointer_from_buffer(buffer
, length
);
2104 u_int8_t type
= necp_policy_condition_get_type_from_buffer(buffer
, length
);
2105 u_int8_t flags
= necp_policy_condition_get_flags_from_buffer(buffer
, length
);
2107 case NECP_POLICY_CONDITION_APPLICATION
:
2108 case NECP_POLICY_CONDITION_REAL_APPLICATION
: {
2109 if (!(flags
& NECP_POLICY_CONDITION_FLAGS_NEGATIVE
) &&
2110 condition_length
>= sizeof(uuid_t
) &&
2111 condition_value
!= NULL
&&
2112 !uuid_is_null(condition_value
)) {
2117 case NECP_POLICY_CONDITION_DOMAIN
:
2118 case NECP_POLICY_CONDITION_ACCOUNT
:
2119 case NECP_POLICY_CONDITION_BOUND_INTERFACE
: {
2120 if (condition_length
> 0) {
2125 case NECP_POLICY_CONDITION_TRAFFIC_CLASS
: {
2126 if (condition_length
>= sizeof(struct necp_policy_condition_tc_range
)) {
2131 case NECP_POLICY_CONDITION_DEFAULT
:
2132 case NECP_POLICY_CONDITION_ALL_INTERFACES
:
2133 case NECP_POLICY_CONDITION_ENTITLEMENT
: {
2134 if (!(flags
& NECP_POLICY_CONDITION_FLAGS_NEGATIVE
)) {
2139 case NECP_POLICY_CONDITION_IP_PROTOCOL
: {
2140 if (condition_length
>= sizeof(u_int16_t
)) {
2145 case NECP_POLICY_CONDITION_PID
: {
2146 if (condition_length
>= sizeof(pid_t
) &&
2147 condition_value
!= NULL
&&
2148 *((pid_t
*)(void *)condition_value
) != 0) {
2153 case NECP_POLICY_CONDITION_UID
: {
2154 if (condition_length
>= sizeof(uid_t
)) {
2159 case NECP_POLICY_CONDITION_LOCAL_ADDR
:
2160 case NECP_POLICY_CONDITION_REMOTE_ADDR
: {
2161 if (!result_cannot_have_ip_layer
&& condition_length
>= sizeof(struct necp_policy_condition_addr
) &&
2162 necp_address_is_valid(&((struct necp_policy_condition_addr
*)(void *)condition_value
)->address
.sa
)) {
2167 case NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE
:
2168 case NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE
: {
2169 if (!result_cannot_have_ip_layer
&& condition_length
>= sizeof(struct necp_policy_condition_addr_range
) &&
2170 necp_address_is_valid(&((struct necp_policy_condition_addr_range
*)(void *)condition_value
)->start_address
.sa
) &&
2171 necp_address_is_valid(&((struct necp_policy_condition_addr_range
*)(void *)condition_value
)->end_address
.sa
)) {
2176 case NECP_POLICY_CONDITION_AGENT_TYPE
: {
2177 if (!(flags
& NECP_POLICY_CONDITION_FLAGS_NEGATIVE
) &&
2178 condition_length
>= sizeof(struct necp_policy_condition_agent_type
)) {
2190 NECPLOG(LOG_DEBUG
, "Policy condition type %d, valid %d", type
, validated
);
2197 necp_policy_route_rule_is_default(u_int8_t
*buffer
, u_int32_t length
)
2199 return (necp_policy_condition_get_value_length_from_buffer(buffer
, length
) == 0 &&
2200 necp_policy_condition_get_flags_from_buffer(buffer
, length
) == 0);
2204 necp_policy_route_rule_is_valid(u_int8_t
*buffer
, u_int32_t length
)
2206 bool validated
= FALSE
;
2207 u_int8_t type
= necp_policy_condition_get_type_from_buffer(buffer
, length
);
2209 case NECP_ROUTE_RULE_ALLOW_INTERFACE
: {
2213 case NECP_ROUTE_RULE_DENY_INTERFACE
: {
2217 case NECP_ROUTE_RULE_QOS_MARKING
: {
2221 case NECP_ROUTE_RULE_DENY_LQM_ABORT
: {
2232 NECPLOG(LOG_DEBUG
, "Policy route rule type %d, valid %d", type
, validated
);
2239 necp_get_posix_error_for_necp_error(int response_error
)
2241 switch (response_error
) {
2242 case NECP_ERROR_UNKNOWN_PACKET_TYPE
:
2243 case NECP_ERROR_INVALID_TLV
:
2244 case NECP_ERROR_POLICY_RESULT_INVALID
:
2245 case NECP_ERROR_POLICY_CONDITIONS_INVALID
:
2246 case NECP_ERROR_ROUTE_RULES_INVALID
: {
2249 case NECP_ERROR_POLICY_ID_NOT_FOUND
: {
2252 case NECP_ERROR_INVALID_PROCESS
: {
2255 case NECP_ERROR_INTERNAL
:
2263 necp_handle_set_session_priority(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
2266 struct necp_session_policy
*policy
= NULL
;
2267 struct necp_session_policy
*temp_policy
= NULL
;
2268 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
2269 u_int32_t requested_session_priority
= NECP_SESSION_PRIORITY_UNKNOWN
;
2272 error
= necp_get_tlv(packet
, NULL
, 0, offset
, NECP_TLV_SESSION_PRIORITY
, sizeof(requested_session_priority
), &requested_session_priority
, NULL
);
2274 NECPLOG(LOG_ERR
, "Failed to get session priority: %d", error
);
2275 response_error
= NECP_ERROR_INVALID_TLV
;
2279 if (session
== NULL
) {
2280 NECPLOG0(LOG_ERR
, "Failed to find session");
2281 response_error
= NECP_ERROR_INTERNAL
;
2285 // Enforce special session priorities with entitlements
2286 if (requested_session_priority
== NECP_SESSION_PRIORITY_CONTROL
||
2287 requested_session_priority
== NECP_SESSION_PRIORITY_PRIVILEGED_TUNNEL
) {
2288 errno_t cred_result
= priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES
, 0);
2289 if (cred_result
!= 0) {
2290 NECPLOG(LOG_ERR
, "Session does not hold necessary entitlement to claim priority level %d", requested_session_priority
);
2295 if (session
->session_priority
!= requested_session_priority
) {
2296 session
->session_priority
= requested_session_priority
;
2297 session
->session_order
= necp_allocate_new_session_order(session
->session_priority
, session
->control_unit
);
2298 session
->dirty
= TRUE
;
2300 // Mark all policies as needing updates
2301 LIST_FOREACH_SAFE(policy
, &session
->policies
, chain
, temp_policy
) {
2302 policy
->pending_update
= TRUE
;
2306 necp_send_success_response(session
, NECP_PACKET_TYPE_SET_SESSION_PRIORITY
, message_id
);
2310 necp_send_error_response(session
, NECP_PACKET_TYPE_SET_SESSION_PRIORITY
, message_id
, response_error
);
2314 necp_handle_lock_session_to_proc(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
2316 #pragma unused(packet, offset)
2317 // proc_uuid already filled out
2318 session
->proc_locked
= TRUE
;
2319 necp_send_success_response(session
, NECP_PACKET_TYPE_LOCK_SESSION_TO_PROC
, message_id
);
2323 necp_handle_register_service(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
2326 struct necp_service_registration
*new_service
= NULL
;
2327 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
2328 uuid_t service_uuid
;
2329 uuid_clear(service_uuid
);
2331 if (session
== NULL
) {
2332 NECPLOG0(LOG_ERR
, "Failed to find session");
2333 response_error
= NECP_ERROR_INTERNAL
;
2337 // Enforce entitlements
2338 errno_t cred_result
= priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES
, 0);
2339 if (cred_result
!= 0) {
2340 NECPLOG0(LOG_ERR
, "Session does not hold necessary entitlement to register service");
2344 // Read service uuid
2345 error
= necp_get_tlv(packet
, NULL
, 0, offset
, NECP_TLV_SERVICE_UUID
, sizeof(uuid_t
), service_uuid
, NULL
);
2347 NECPLOG(LOG_ERR
, "Failed to get service UUID: %d", error
);
2348 response_error
= NECP_ERROR_INVALID_TLV
;
2352 MALLOC(new_service
, struct necp_service_registration
*, sizeof(*new_service
), M_NECP
, M_WAITOK
);
2353 if (new_service
== NULL
) {
2354 NECPLOG0(LOG_ERR
, "Failed to allocate service registration");
2355 response_error
= NECP_ERROR_INTERNAL
;
2359 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
2360 memset(new_service
, 0, sizeof(*new_service
));
2361 new_service
->service_id
= necp_create_uuid_service_id_mapping(service_uuid
);
2362 LIST_INSERT_HEAD(&session
->services
, new_service
, session_chain
);
2363 LIST_INSERT_HEAD(&necp_registered_service_list
, new_service
, kernel_chain
);
2364 lck_rw_done(&necp_kernel_policy_lock
);
2366 necp_send_success_response(session
, NECP_PACKET_TYPE_REGISTER_SERVICE
, message_id
);
2369 necp_send_error_response(session
, NECP_PACKET_TYPE_REGISTER_SERVICE
, message_id
, response_error
);
2373 necp_handle_unregister_service(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
2376 struct necp_service_registration
*service
= NULL
;
2377 struct necp_service_registration
*temp_service
= NULL
;
2378 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
2379 struct necp_uuid_id_mapping
*mapping
= NULL
;
2380 uuid_t service_uuid
;
2381 uuid_clear(service_uuid
);
2383 if (session
== NULL
) {
2384 NECPLOG0(LOG_ERR
, "Failed to find session");
2385 response_error
= NECP_ERROR_INTERNAL
;
2389 // Read service uuid
2390 error
= necp_get_tlv(packet
, NULL
, 0, offset
, NECP_TLV_SERVICE_UUID
, sizeof(uuid_t
), service_uuid
, NULL
);
2392 NECPLOG(LOG_ERR
, "Failed to get service UUID: %d", error
);
2393 response_error
= NECP_ERROR_INVALID_TLV
;
2397 // Mark remove all matching services for this session
2398 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
2399 mapping
= necp_uuid_lookup_service_id_locked(service_uuid
);
2400 if (mapping
!= NULL
) {
2401 LIST_FOREACH_SAFE(service
, &session
->services
, session_chain
, temp_service
) {
2402 if (service
->service_id
== mapping
->id
) {
2403 LIST_REMOVE(service
, session_chain
);
2404 LIST_REMOVE(service
, kernel_chain
);
2405 FREE(service
, M_NECP
);
2408 necp_remove_uuid_service_id_mapping(service_uuid
);
2410 lck_rw_done(&necp_kernel_policy_lock
);
2412 necp_send_success_response(session
, NECP_PACKET_TYPE_UNREGISTER_SERVICE
, message_id
);
2415 necp_send_error_response(session
, NECP_PACKET_TYPE_UNREGISTER_SERVICE
, message_id
, response_error
);
2418 static necp_policy_id
2419 necp_handle_policy_add(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
,
2420 u_int8_t
*tlv_buffer
, size_t tlv_buffer_length
, int offset
, int *return_error
)
2422 bool has_default_condition
= FALSE
;
2423 bool has_non_default_condition
= FALSE
;
2424 bool has_application_condition
= FALSE
;
2425 bool has_real_application_condition
= FALSE
;
2426 bool requires_application_condition
= FALSE
;
2427 bool requires_real_application_condition
= FALSE
;
2428 u_int8_t
*conditions_array
= NULL
;
2429 u_int32_t conditions_array_size
= 0;
2430 int conditions_array_cursor
;
2432 bool has_default_route_rule
= FALSE
;
2433 u_int8_t
*route_rules_array
= NULL
;
2434 u_int32_t route_rules_array_size
= 0;
2435 int route_rules_array_cursor
;
2439 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
2441 necp_policy_order order
= 0;
2442 struct necp_session_policy
*policy
= NULL
;
2443 u_int8_t
*policy_result
= NULL
;
2444 u_int32_t policy_result_size
= 0;
2446 // Read policy order
2447 error
= necp_get_tlv(packet
, tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_POLICY_ORDER
, sizeof(order
), &order
, NULL
);
2449 NECPLOG(LOG_ERR
, "Failed to get policy order: %d", error
);
2450 response_error
= NECP_ERROR_INVALID_TLV
;
2454 // Read policy result
2455 cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_POLICY_RESULT
, &error
, 0);
2456 error
= necp_get_tlv_at_offset(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, 0, NULL
, &policy_result_size
);
2457 if (error
|| policy_result_size
== 0) {
2458 NECPLOG(LOG_ERR
, "Failed to get policy result length: %d", error
);
2459 response_error
= NECP_ERROR_INVALID_TLV
;
2462 if (policy_result_size
> NECP_MAX_POLICY_RESULT_SIZE
) {
2463 NECPLOG(LOG_ERR
, "Policy result length too large: %u", policy_result_size
);
2464 response_error
= NECP_ERROR_INVALID_TLV
;
2467 MALLOC(policy_result
, u_int8_t
*, policy_result_size
, M_NECP
, M_WAITOK
);
2468 if (policy_result
== NULL
) {
2469 NECPLOG(LOG_ERR
, "Failed to allocate a policy result buffer (size %d)", policy_result_size
);
2470 response_error
= NECP_ERROR_INTERNAL
;
2473 error
= necp_get_tlv_at_offset(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, policy_result_size
, policy_result
, NULL
);
2475 NECPLOG(LOG_ERR
, "Failed to get policy result: %d", error
);
2476 response_error
= NECP_ERROR_POLICY_RESULT_INVALID
;
2479 if (!necp_policy_result_is_valid(policy_result
, policy_result_size
)) {
2480 NECPLOG0(LOG_ERR
, "Failed to validate policy result");
2481 response_error
= NECP_ERROR_POLICY_RESULT_INVALID
;
2485 if (necp_policy_result_requires_route_rules(policy_result
, policy_result_size
)) {
2486 // Read route rules conditions
2487 for (cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_ROUTE_RULE
, &error
, 0);
2489 cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, NECP_TLV_ROUTE_RULE
, &error
, 1)) {
2490 u_int32_t route_rule_size
= 0;
2491 necp_get_tlv_at_offset(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, 0, NULL
, &route_rule_size
);
2492 if (route_rule_size
> 0) {
2493 route_rules_array_size
+= (sizeof(u_int8_t
) + sizeof(u_int32_t
) + route_rule_size
);
2497 if (route_rules_array_size
== 0) {
2498 NECPLOG0(LOG_ERR
, "Failed to get policy route rules");
2499 response_error
= NECP_ERROR_INVALID_TLV
;
2502 if (route_rules_array_size
> NECP_MAX_ROUTE_RULES_ARRAY_SIZE
) {
2503 NECPLOG(LOG_ERR
, "Route rules length too large: %u", route_rules_array_size
);
2504 response_error
= NECP_ERROR_INVALID_TLV
;
2507 MALLOC(route_rules_array
, u_int8_t
*, route_rules_array_size
, M_NECP
, M_WAITOK
);
2508 if (route_rules_array
== NULL
) {
2509 NECPLOG(LOG_ERR
, "Failed to allocate a policy route rules array (size %d)", route_rules_array_size
);
2510 response_error
= NECP_ERROR_INTERNAL
;
2514 route_rules_array_cursor
= 0;
2515 for (cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_ROUTE_RULE
, &error
, 0);
2517 cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, NECP_TLV_ROUTE_RULE
, &error
, 1)) {
2518 u_int8_t route_rule_type
= NECP_TLV_ROUTE_RULE
;
2519 u_int32_t route_rule_size
= 0;
2520 necp_get_tlv_at_offset(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, 0, NULL
, &route_rule_size
);
2521 if (route_rule_size
> 0 && route_rule_size
<= (route_rules_array_size
- route_rules_array_cursor
)) {
2523 memcpy((route_rules_array
+ route_rules_array_cursor
), &route_rule_type
, sizeof(route_rule_type
));
2524 route_rules_array_cursor
+= sizeof(route_rule_type
);
2527 memcpy((route_rules_array
+ route_rules_array_cursor
), &route_rule_size
, sizeof(route_rule_size
));
2528 route_rules_array_cursor
+= sizeof(route_rule_size
);
2531 necp_get_tlv_at_offset(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, route_rule_size
, (route_rules_array
+ route_rules_array_cursor
), NULL
);
2533 if (!necp_policy_route_rule_is_valid((route_rules_array
+ route_rules_array_cursor
), route_rule_size
)) {
2534 NECPLOG0(LOG_ERR
, "Failed to validate policy route rule");
2535 response_error
= NECP_ERROR_ROUTE_RULES_INVALID
;
2539 if (necp_policy_route_rule_is_default((route_rules_array
+ route_rules_array_cursor
), route_rule_size
)) {
2540 if (has_default_route_rule
) {
2541 NECPLOG0(LOG_ERR
, "Failed to validate route rule; contained multiple default route rules");
2542 response_error
= NECP_ERROR_ROUTE_RULES_INVALID
;
2545 has_default_route_rule
= TRUE
;
2548 route_rules_array_cursor
+= route_rule_size
;
2553 // Read policy conditions
2554 for (cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_POLICY_CONDITION
, &error
, 0);
2556 cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, NECP_TLV_POLICY_CONDITION
, &error
, 1)) {
2557 u_int32_t condition_size
= 0;
2558 necp_get_tlv_at_offset(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, 0, NULL
, &condition_size
);
2560 if (condition_size
> 0) {
2561 conditions_array_size
+= (sizeof(u_int8_t
) + sizeof(u_int32_t
) + condition_size
);
2565 if (conditions_array_size
== 0) {
2566 NECPLOG0(LOG_ERR
, "Failed to get policy conditions");
2567 response_error
= NECP_ERROR_INVALID_TLV
;
2570 if (conditions_array_size
> NECP_MAX_CONDITIONS_ARRAY_SIZE
) {
2571 NECPLOG(LOG_ERR
, "Conditions length too large: %u", conditions_array_size
);
2572 response_error
= NECP_ERROR_INVALID_TLV
;
2575 MALLOC(conditions_array
, u_int8_t
*, conditions_array_size
, M_NECP
, M_WAITOK
);
2576 if (conditions_array
== NULL
) {
2577 NECPLOG(LOG_ERR
, "Failed to allocate a policy conditions array (size %d)", conditions_array_size
);
2578 response_error
= NECP_ERROR_INTERNAL
;
2582 conditions_array_cursor
= 0;
2583 for (cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_POLICY_CONDITION
, &error
, 0);
2585 cursor
= necp_find_tlv(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, NECP_TLV_POLICY_CONDITION
, &error
, 1)) {
2586 u_int8_t condition_type
= NECP_TLV_POLICY_CONDITION
;
2587 u_int32_t condition_size
= 0;
2588 necp_get_tlv_at_offset(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, 0, NULL
, &condition_size
);
2589 if (condition_size
> 0 && condition_size
<= (conditions_array_size
- conditions_array_cursor
)) {
2591 memcpy((conditions_array
+ conditions_array_cursor
), &condition_type
, sizeof(condition_type
));
2592 conditions_array_cursor
+= sizeof(condition_type
);
2595 memcpy((conditions_array
+ conditions_array_cursor
), &condition_size
, sizeof(condition_size
));
2596 conditions_array_cursor
+= sizeof(condition_size
);
2599 necp_get_tlv_at_offset(packet
, tlv_buffer
, tlv_buffer_length
, cursor
, condition_size
, (conditions_array
+ conditions_array_cursor
), NULL
);
2600 if (!necp_policy_condition_is_valid((conditions_array
+ conditions_array_cursor
), condition_size
, necp_policy_result_get_type_from_buffer(policy_result
, policy_result_size
))) {
2601 NECPLOG0(LOG_ERR
, "Failed to validate policy condition");
2602 response_error
= NECP_ERROR_POLICY_CONDITIONS_INVALID
;
2606 if (necp_policy_condition_is_default((conditions_array
+ conditions_array_cursor
), condition_size
)) {
2607 has_default_condition
= TRUE
;
2609 has_non_default_condition
= TRUE
;
2611 if (has_default_condition
&& has_non_default_condition
) {
2612 NECPLOG0(LOG_ERR
, "Failed to validate conditions; contained default and non-default conditions");
2613 response_error
= NECP_ERROR_POLICY_CONDITIONS_INVALID
;
2617 if (necp_policy_condition_is_application((conditions_array
+ conditions_array_cursor
), condition_size
)) {
2618 has_application_condition
= TRUE
;
2621 if (necp_policy_condition_is_real_application((conditions_array
+ conditions_array_cursor
), condition_size
)) {
2622 has_real_application_condition
= TRUE
;
2625 if (necp_policy_condition_requires_application((conditions_array
+ conditions_array_cursor
), condition_size
)) {
2626 requires_application_condition
= TRUE
;
2629 if (necp_policy_condition_requires_real_application((conditions_array
+ conditions_array_cursor
), condition_size
)) {
2630 requires_real_application_condition
= TRUE
;
2633 conditions_array_cursor
+= condition_size
;
2637 if (requires_application_condition
&& !has_application_condition
) {
2638 NECPLOG0(LOG_ERR
, "Failed to validate conditions; did not contain application condition");
2639 response_error
= NECP_ERROR_POLICY_CONDITIONS_INVALID
;
2643 if (requires_real_application_condition
&& !has_real_application_condition
) {
2644 NECPLOG0(LOG_ERR
, "Failed to validate conditions; did not contain real application condition");
2645 response_error
= NECP_ERROR_POLICY_CONDITIONS_INVALID
;
2649 if ((policy
= necp_policy_create(session
, order
, conditions_array
, conditions_array_size
, route_rules_array
, route_rules_array_size
, policy_result
, policy_result_size
)) == NULL
) {
2650 response_error
= NECP_ERROR_INTERNAL
;
2654 if (packet
!= NULL
) {
2655 necp_send_policy_id_response(session
, NECP_PACKET_TYPE_POLICY_ADD
, message_id
, policy
->local_id
);
2657 return (policy
->local_id
);
2660 if (policy_result
!= NULL
) {
2661 FREE(policy_result
, M_NECP
);
2663 if (conditions_array
!= NULL
) {
2664 FREE(conditions_array
, M_NECP
);
2666 if (route_rules_array
!= NULL
) {
2667 FREE(route_rules_array
, M_NECP
);
2670 if (packet
!= NULL
) {
2671 necp_send_error_response(session
, NECP_PACKET_TYPE_POLICY_ADD
, message_id
, response_error
);
2673 if (return_error
!= NULL
) {
2674 *return_error
= necp_get_posix_error_for_necp_error(response_error
);
2680 necp_handle_policy_get(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
2682 #pragma unused(offset)
2684 u_int8_t
*response
= NULL
;
2685 u_int8_t
*cursor
= NULL
;
2686 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
2687 necp_policy_id policy_id
= 0;
2688 u_int32_t order_tlv_size
= 0;
2689 u_int32_t result_tlv_size
= 0;
2690 u_int32_t response_size
= 0;
2692 struct necp_session_policy
*policy
= NULL
;
2695 error
= necp_get_tlv(packet
, NULL
, 0, offset
, NECP_TLV_POLICY_ID
, sizeof(policy_id
), &policy_id
, NULL
);
2697 NECPLOG(LOG_ERR
, "Failed to get policy id: %d", error
);
2698 response_error
= NECP_ERROR_INVALID_TLV
;
2702 policy
= necp_policy_find(session
, policy_id
);
2703 if (policy
== NULL
|| policy
->pending_deletion
) {
2704 NECPLOG(LOG_ERR
, "Failed to find policy with id %d", policy_id
);
2705 response_error
= NECP_ERROR_POLICY_ID_NOT_FOUND
;
2709 order_tlv_size
= sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(necp_policy_order
);
2710 result_tlv_size
= (policy
->result_size
? (sizeof(u_int8_t
) + sizeof(u_int32_t
) + policy
->result_size
) : 0);
2711 response_size
= sizeof(struct necp_packet_header
) + order_tlv_size
+ result_tlv_size
+ policy
->conditions_size
;
2712 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
);
2713 if (response
== NULL
) {
2714 necp_send_error_response(session
, NECP_PACKET_TYPE_POLICY_GET
, message_id
, NECP_ERROR_INTERNAL
);
2719 cursor
= necp_buffer_write_packet_header(cursor
, NECP_PACKET_TYPE_POLICY_GET
, NECP_PACKET_FLAGS_RESPONSE
, message_id
);
2720 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ORDER
, sizeof(necp_policy_order
), &policy
->order
, response
, response_size
);
2722 if (result_tlv_size
) {
2723 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_RESULT
, policy
->result_size
, &policy
->result
, response
, response_size
);
2725 if (policy
->conditions_size
) {
2726 memcpy(((u_int8_t
*)(void *)(cursor
)), policy
->conditions
, policy
->conditions_size
);
2729 if (!necp_send_ctl_data(session
, (u_int8_t
*)response
, response_size
)) {
2730 NECPLOG0(LOG_ERR
, "Failed to send response");
2733 FREE(response
, M_NECP
);
2737 necp_send_error_response(session
, NECP_PACKET_TYPE_POLICY_GET
, message_id
, response_error
);
2741 necp_handle_policy_delete(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
2744 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
2745 necp_policy_id policy_id
= 0;
2747 struct necp_session_policy
*policy
= NULL
;
2750 error
= necp_get_tlv(packet
, NULL
, 0, offset
, NECP_TLV_POLICY_ID
, sizeof(policy_id
), &policy_id
, NULL
);
2752 NECPLOG(LOG_ERR
, "Failed to get policy id: %d", error
);
2753 response_error
= NECP_ERROR_INVALID_TLV
;
2757 policy
= necp_policy_find(session
, policy_id
);
2758 if (policy
== NULL
|| policy
->pending_deletion
) {
2759 NECPLOG(LOG_ERR
, "Failed to find policy with id %d", policy_id
);
2760 response_error
= NECP_ERROR_POLICY_ID_NOT_FOUND
;
2764 necp_policy_mark_for_deletion(session
, policy
);
2766 necp_send_success_response(session
, NECP_PACKET_TYPE_POLICY_DELETE
, message_id
);
2770 necp_send_error_response(session
, NECP_PACKET_TYPE_POLICY_DELETE
, message_id
, response_error
);
2774 necp_handle_policy_apply_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
2776 #pragma unused(packet, offset)
2777 necp_policy_apply_all(session
);
2778 necp_send_success_response(session
, NECP_PACKET_TYPE_POLICY_APPLY_ALL
, message_id
);
2782 necp_handle_policy_list_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
2784 #pragma unused(packet, offset)
2785 u_int32_t tlv_size
= (sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(u_int32_t
));
2786 u_int32_t response_size
= 0;
2787 u_int8_t
*response
= NULL
;
2788 u_int8_t
*cursor
= NULL
;
2789 int num_policies
= 0;
2790 int cur_policy_index
= 0;
2791 struct necp_session_policy
*policy
;
2793 LIST_FOREACH(policy
, &session
->policies
, chain
) {
2794 if (!policy
->pending_deletion
) {
2799 // Create a response with one Policy ID TLV for each policy
2800 response_size
= sizeof(struct necp_packet_header
) + num_policies
* tlv_size
;
2801 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
);
2802 if (response
== NULL
) {
2803 necp_send_error_response(session
, NECP_PACKET_TYPE_POLICY_LIST_ALL
, message_id
, NECP_ERROR_INTERNAL
);
2808 cursor
= necp_buffer_write_packet_header(cursor
, NECP_PACKET_TYPE_POLICY_LIST_ALL
, NECP_PACKET_FLAGS_RESPONSE
, message_id
);
2810 LIST_FOREACH(policy
, &session
->policies
, chain
) {
2811 if (!policy
->pending_deletion
&& cur_policy_index
< num_policies
) {
2812 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ID
, sizeof(u_int32_t
), &policy
->local_id
, response
, response_size
);
2817 if (!necp_send_ctl_data(session
, (u_int8_t
*)response
, response_size
)) {
2818 NECPLOG0(LOG_ERR
, "Failed to send response");
2821 FREE(response
, M_NECP
);
2825 necp_handle_policy_delete_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
2827 #pragma unused(packet, offset)
2828 necp_policy_mark_all_for_deletion(session
);
2829 necp_send_success_response(session
, NECP_PACKET_TYPE_POLICY_DELETE_ALL
, message_id
);
2832 static necp_policy_id
2833 necp_policy_get_new_id(struct necp_session
*session
)
2835 session
->last_policy_id
++;
2836 if (session
->last_policy_id
< 1) {
2837 session
->last_policy_id
= 1;
2840 necp_policy_id newid
= session
->last_policy_id
;
2843 NECPLOG0(LOG_ERR
, "Allocate policy id failed.\n");
2851 * For the policy dump response this is the structure:
2853 * <NECP_PACKET_HEADER>
2855 * type : NECP_TLV_POLICY_DUMP
2860 * type : NECP_TLV_POLICY_ID
2865 * type : NECP_TLV_POLICY_ORDER
2870 * type : NECP_TLV_POLICY_RESULT_STRING
2875 * type : NECP_TLV_POLICY_OWNER
2880 * type : NECP_TLV_POLICY_CONDITION
2885 * type : NECP_POLICY_CONDITION_ALL_INTERFACES
2890 * type : NECP_POLICY_CONDITION_BOUND_INTERFACES
2900 * type : NECP_TLV_POLICY_DUMP
2905 * type : NECP_TLV_POLICY_ID
2910 * type : NECP_TLV_POLICY_ORDER
2915 * type : NECP_TLV_POLICY_RESULT_STRING
2920 * type : NECP_TLV_POLICY_OWNER
2925 * type : NECP_TLV_POLICY_CONDITION
2930 * type : NECP_POLICY_CONDITION_ALL_INTERFACES
2935 * type : NECP_POLICY_CONDITION_BOUND_INTERFACES
2947 necp_handle_policy_dump_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
,
2948 user_addr_t out_buffer
, size_t out_buffer_length
, int offset
)
2950 #pragma unused(offset)
2951 struct necp_kernel_socket_policy
*policy
= NULL
;
2953 int policy_count
= 0;
2954 u_int8_t
**tlv_buffer_pointers
= NULL
;
2955 u_int32_t
*tlv_buffer_lengths
= NULL
;
2956 u_int32_t total_tlv_len
= 0;
2957 u_int8_t
*result_buf
= NULL
;
2958 u_int8_t
*result_buf_cursor
= result_buf
;
2959 char result_string
[MAX_RESULT_STRING_LEN
];
2960 char proc_name_string
[MAXCOMLEN
+ 1];
2963 bool error_occured
= false;
2964 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
2966 #define REPORT_ERROR(error) error_occured = true; \
2967 response_error = error; \
2970 #define UNLOCK_AND_REPORT_ERROR(lock, error) lck_rw_done(lock); \
2973 errno_t cred_result
= priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES
, 0);
2974 if (cred_result
!= 0) {
2975 NECPLOG0(LOG_ERR
, "Session does not hold the necessary entitlement to get Network Extension Policy information");
2976 REPORT_ERROR(NECP_ERROR_INTERNAL
);
2980 lck_rw_lock_shared(&necp_kernel_policy_lock
);
2983 NECPLOG0(LOG_DEBUG
, "Gathering policies");
2986 policy_count
= necp_kernel_application_policies_count
;
2988 MALLOC(tlv_buffer_pointers
, u_int8_t
**, sizeof(u_int8_t
*) * policy_count
, M_NECP
, M_NOWAIT
| M_ZERO
);
2989 if (tlv_buffer_pointers
== NULL
) {
2990 NECPLOG(LOG_DEBUG
, "Failed to allocate tlv_buffer_pointers (%u bytes)", sizeof(u_int8_t
*) * policy_count
);
2991 UNLOCK_AND_REPORT_ERROR(&necp_kernel_policy_lock
, NECP_ERROR_INTERNAL
);
2994 MALLOC(tlv_buffer_lengths
, u_int32_t
*, sizeof(u_int32_t
) * policy_count
, M_NECP
, M_NOWAIT
| M_ZERO
);
2995 if (tlv_buffer_lengths
== NULL
) {
2996 NECPLOG(LOG_DEBUG
, "Failed to allocate tlv_buffer_lengths (%u bytes)", sizeof(u_int32_t
) * policy_count
);
2997 UNLOCK_AND_REPORT_ERROR(&necp_kernel_policy_lock
, NECP_ERROR_INTERNAL
);
3000 for (policy_i
= 0; necp_kernel_socket_policies_app_layer_map
!= NULL
&& necp_kernel_socket_policies_app_layer_map
[policy_i
] != NULL
; policy_i
++) {
3001 policy
= necp_kernel_socket_policies_app_layer_map
[policy_i
];
3003 memset(result_string
, 0, MAX_RESULT_STRING_LEN
);
3004 memset(proc_name_string
, 0, MAXCOMLEN
+ 1);
3006 necp_get_result_description(result_string
, policy
->result
, policy
->result_parameter
);
3007 proc_name(policy
->session_pid
, proc_name_string
, MAXCOMLEN
);
3009 u_int16_t proc_name_len
= strlen(proc_name_string
) + 1;
3010 u_int16_t result_string_len
= strlen(result_string
) + 1;
3013 NECPLOG(LOG_DEBUG
, "Policy: process: %s, result: %s", proc_name_string
, result_string
);
3016 u_int32_t total_allocated_bytes
= sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(policy
->id
) + // NECP_TLV_POLICY_ID
3017 sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(policy
->order
) + // NECP_TLV_POLICY_ORDER
3018 sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(policy
->session_order
) + // NECP_TLV_POLICY_SESSION_ORDER
3019 sizeof(u_int8_t
) + sizeof(u_int32_t
) + result_string_len
+ // NECP_TLV_POLICY_RESULT_STRING
3020 sizeof(u_int8_t
) + sizeof(u_int32_t
) + proc_name_len
+ // NECP_TLV_POLICY_OWNER
3021 sizeof(u_int8_t
) + sizeof(u_int32_t
); // NECP_TLV_POLICY_CONDITION
3023 // We now traverse the condition_mask to see how much space we need to allocate
3024 u_int32_t condition_mask
= policy
->condition_mask
;
3025 u_int8_t num_conditions
= 0;
3026 struct necp_string_id_mapping
*account_id_entry
= NULL
;
3027 char if_name
[IFXNAMSIZ
];
3028 u_int32_t condition_tlv_length
= 0;
3029 memset(if_name
, 0, sizeof(if_name
));
3031 if (condition_mask
== NECP_POLICY_CONDITION_DEFAULT
) {
3034 if (condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) {
3037 if (condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
3038 snprintf(if_name
, IFXNAMSIZ
, "%s%d", ifnet_name(policy
->cond_bound_interface
), ifnet_unit(policy
->cond_bound_interface
));
3039 condition_tlv_length
+= strlen(if_name
) + 1;
3042 if (condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
3043 condition_tlv_length
+= sizeof(policy
->cond_protocol
);
3046 if (condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
3047 condition_tlv_length
+= sizeof(uuid_t
);
3050 if (condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
3051 condition_tlv_length
+= sizeof(uuid_t
);
3054 if (condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
3055 u_int32_t domain_len
= strlen(policy
->cond_domain
) + 1;
3056 condition_tlv_length
+= domain_len
;
3059 if (condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
) {
3060 account_id_entry
= necp_lookup_string_with_id_locked(&necp_account_id_list
, policy
->cond_account_id
);
3061 u_int32_t account_id_len
= 0;
3062 if (account_id_entry
) {
3063 account_id_len
= account_id_entry
->string
? strlen(account_id_entry
->string
) + 1 : 0;
3065 condition_tlv_length
+= account_id_len
;
3068 if (condition_mask
& NECP_KERNEL_CONDITION_PID
) {
3069 condition_tlv_length
+= sizeof(pid_t
);
3072 if (condition_mask
& NECP_KERNEL_CONDITION_UID
) {
3073 condition_tlv_length
+= sizeof(uid_t
);
3076 if (condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
3077 condition_tlv_length
+= sizeof(struct necp_policy_condition_tc_range
);
3080 if (condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
) {
3083 if (condition_mask
& NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
) {
3084 u_int32_t entitlement_len
= strlen(policy
->cond_custom_entitlement
) + 1;
3085 condition_tlv_length
+= entitlement_len
;
3088 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
3089 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
3090 condition_tlv_length
+= sizeof(struct necp_policy_condition_addr_range
);
3092 condition_tlv_length
+= sizeof(struct necp_policy_condition_addr
);
3096 if (condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
3097 if (condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
3098 condition_tlv_length
+= sizeof(struct necp_policy_condition_addr_range
);
3100 condition_tlv_length
+= sizeof(struct necp_policy_condition_addr
);
3104 if (condition_mask
& NECP_KERNEL_CONDITION_AGENT_TYPE
) {
3105 condition_tlv_length
+= sizeof(struct necp_policy_condition_agent_type
);
3110 condition_tlv_length
+= num_conditions
* (sizeof(u_int8_t
) + sizeof(u_int32_t
)); // These are for the condition TLVs. The space for "value" is already accounted for above.
3111 total_allocated_bytes
+= condition_tlv_length
;
3113 u_int8_t
*tlv_buffer
;
3114 MALLOC(tlv_buffer
, u_int8_t
*, total_allocated_bytes
, M_NECP
, M_NOWAIT
| M_ZERO
);
3115 if (tlv_buffer
== NULL
) {
3116 NECPLOG(LOG_DEBUG
, "Failed to allocate tlv_buffer (%u bytes)", total_allocated_bytes
);
3120 u_int8_t
*cursor
= tlv_buffer
;
3121 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ID
, sizeof(policy
->id
), &policy
->id
, tlv_buffer
, total_allocated_bytes
);
3122 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ORDER
, sizeof(necp_policy_order
), &policy
->order
, tlv_buffer
, total_allocated_bytes
);
3123 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_SESSION_ORDER
, sizeof(policy
->session_order
), &policy
->session_order
, tlv_buffer
, total_allocated_bytes
);
3124 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_RESULT_STRING
, result_string_len
, result_string
, tlv_buffer
, total_allocated_bytes
);
3125 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_OWNER
, proc_name_len
, proc_name_string
, tlv_buffer
, total_allocated_bytes
);
3128 u_int8_t q_cond_buf
[N_QUICK
]; // Minor optimization
3130 u_int8_t
*cond_buf
; // To be used for condition TLVs
3131 if (condition_tlv_length
<= N_QUICK
) {
3132 cond_buf
= q_cond_buf
;
3134 MALLOC(cond_buf
, u_int8_t
*, condition_tlv_length
, M_NECP
, M_NOWAIT
);
3135 if (cond_buf
== NULL
) {
3136 NECPLOG(LOG_DEBUG
, "Failed to allocate cond_buffer (%u bytes)", condition_tlv_length
);
3137 FREE(tlv_buffer
, M_NECP
);
3142 memset(cond_buf
, 0, condition_tlv_length
);
3143 u_int8_t
*cond_buf_cursor
= cond_buf
;
3144 if (condition_mask
== NECP_POLICY_CONDITION_DEFAULT
) {
3145 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_DEFAULT
, 0, "", cond_buf
, condition_tlv_length
);
3147 if (condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) {
3148 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_ALL_INTERFACES
, 0, "", cond_buf
, condition_tlv_length
);
3150 if (condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
3151 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_BOUND_INTERFACE
, strlen(if_name
) + 1,
3152 if_name
, cond_buf
, condition_tlv_length
);
3154 if (condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
3155 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_IP_PROTOCOL
, sizeof(policy
->cond_protocol
), &policy
->cond_protocol
,
3156 cond_buf
, condition_tlv_length
);
3158 if (condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
3159 struct necp_uuid_id_mapping
*entry
= necp_uuid_lookup_uuid_with_app_id_locked(policy
->cond_app_id
);
3160 if (entry
!= NULL
) {
3161 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_APPLICATION
, sizeof(entry
->uuid
), entry
->uuid
,
3162 cond_buf
, condition_tlv_length
);
3165 if (condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
3166 struct necp_uuid_id_mapping
*entry
= necp_uuid_lookup_uuid_with_app_id_locked(policy
->cond_real_app_id
);
3167 if (entry
!= NULL
) {
3168 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_REAL_APPLICATION
, sizeof(entry
->uuid
), entry
->uuid
,
3169 cond_buf
, condition_tlv_length
);
3172 if (condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
3173 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_DOMAIN
, strlen(policy
->cond_domain
) + 1, policy
->cond_domain
,
3174 cond_buf
, condition_tlv_length
);
3176 if (condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
) {
3177 if (account_id_entry
!= NULL
) {
3178 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_ACCOUNT
, strlen(account_id_entry
->string
) + 1, account_id_entry
->string
,
3179 cond_buf
, condition_tlv_length
);
3182 if (condition_mask
& NECP_KERNEL_CONDITION_PID
) {
3183 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_PID
, sizeof(policy
->cond_pid
), &policy
->cond_pid
,
3184 cond_buf
, condition_tlv_length
);
3186 if (condition_mask
& NECP_KERNEL_CONDITION_UID
) {
3187 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_UID
, sizeof(policy
->cond_uid
), &policy
->cond_uid
,
3188 cond_buf
, condition_tlv_length
);
3190 if (condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
3191 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_TRAFFIC_CLASS
, sizeof(policy
->cond_traffic_class
), &policy
->cond_traffic_class
,
3192 cond_buf
, condition_tlv_length
);
3194 if (condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
) {
3195 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_ENTITLEMENT
, 0, "",
3196 cond_buf
, condition_tlv_length
);
3198 if (condition_mask
& NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
) {
3199 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_ENTITLEMENT
, strlen(policy
->cond_custom_entitlement
) + 1, policy
->cond_custom_entitlement
,
3200 cond_buf
, condition_tlv_length
);
3202 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
3203 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
3204 struct necp_policy_condition_addr_range range
;
3205 memcpy(&range
.start_address
, &policy
->cond_local_start
, sizeof(policy
->cond_local_start
));
3206 memcpy(&range
.end_address
, &policy
->cond_local_end
, sizeof(policy
->cond_local_end
));
3207 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE
, sizeof(range
), &range
,
3208 cond_buf
, condition_tlv_length
);
3210 struct necp_policy_condition_addr addr
;
3211 addr
.prefix
= policy
->cond_local_prefix
;
3212 memcpy(&addr
.address
, &policy
->cond_local_start
, sizeof(policy
->cond_local_start
));
3213 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_LOCAL_ADDR
, sizeof(addr
), &addr
,
3214 cond_buf
, condition_tlv_length
);
3217 if (condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
3218 if (condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
3219 struct necp_policy_condition_addr_range range
;
3220 memcpy(&range
.start_address
, &policy
->cond_remote_start
, sizeof(policy
->cond_remote_start
));
3221 memcpy(&range
.end_address
, &policy
->cond_remote_end
, sizeof(policy
->cond_remote_end
));
3222 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE
, sizeof(range
), &range
,
3223 cond_buf
, condition_tlv_length
);
3225 struct necp_policy_condition_addr addr
;
3226 addr
.prefix
= policy
->cond_remote_prefix
;
3227 memcpy(&addr
.address
, &policy
->cond_remote_start
, sizeof(policy
->cond_remote_start
));
3228 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_REMOTE_ADDR
, sizeof(addr
), &addr
,
3229 cond_buf
, condition_tlv_length
);
3232 if (condition_mask
& NECP_KERNEL_CONDITION_AGENT_TYPE
) {
3233 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_AGENT_TYPE
,
3234 sizeof(policy
->cond_agent_type
), &policy
->cond_agent_type
,
3235 cond_buf
, condition_tlv_length
);
3239 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_CONDITION
, cond_buf_cursor
- cond_buf
, cond_buf
, tlv_buffer
, total_allocated_bytes
);
3240 if (cond_buf
!= q_cond_buf
) {
3241 FREE(cond_buf
, M_NECP
);
3244 tlv_buffer_pointers
[policy_i
] = tlv_buffer
;
3245 tlv_buffer_lengths
[policy_i
] = (cursor
- tlv_buffer
);
3247 // This is the length of the TLV for NECP_TLV_POLICY_DUMP
3248 total_tlv_len
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + (cursor
- tlv_buffer
);
3252 lck_rw_done(&necp_kernel_policy_lock
);
3255 if (packet
!= NULL
) {
3256 u_int32_t total_result_length
= sizeof(struct necp_packet_header
) + total_tlv_len
;
3258 // Allow malloc to wait, since the total buffer may be large and we are not holding any locks
3259 MALLOC(result_buf
, u_int8_t
*, total_result_length
, M_NECP
, M_WAITOK
| M_ZERO
);
3260 if (result_buf
== NULL
) {
3261 NECPLOG(LOG_DEBUG
, "Failed to allocate result_buffer (%u bytes)", total_result_length
);
3262 REPORT_ERROR(NECP_ERROR_INTERNAL
);
3265 result_buf_cursor
= result_buf
;
3266 result_buf_cursor
= necp_buffer_write_packet_header(result_buf_cursor
, NECP_PACKET_TYPE_POLICY_DUMP_ALL
, NECP_PACKET_FLAGS_RESPONSE
, message_id
);
3268 for (int i
= 0; i
< policy_count
; i
++) {
3269 if (tlv_buffer_pointers
[i
] != NULL
) {
3270 result_buf_cursor
= necp_buffer_write_tlv(result_buf_cursor
, NECP_TLV_POLICY_DUMP
, tlv_buffer_lengths
[i
], tlv_buffer_pointers
[i
], result_buf
, total_result_length
);
3274 if (!necp_send_ctl_data(session
, result_buf
, result_buf_cursor
- result_buf
)) {
3275 NECPLOG(LOG_ERR
, "Failed to send response (%u bytes)", result_buf_cursor
- result_buf
);
3277 NECPLOG(LOG_ERR
, "Sent data worth %u bytes. Total result buffer length was %u bytes", result_buf_cursor
- result_buf
, total_result_length
);
3282 if (out_buffer
!= 0) {
3283 if (out_buffer_length
< total_tlv_len
+ sizeof(u_int32_t
)) {
3284 NECPLOG(LOG_DEBUG
, "out_buffer_length too small (%u < %u)", out_buffer_length
, total_tlv_len
+ sizeof(u_int32_t
));
3285 REPORT_ERROR(NECP_ERROR_INVALID_TLV
);
3288 // Allow malloc to wait, since the total buffer may be large and we are not holding any locks
3289 MALLOC(result_buf
, u_int8_t
*, total_tlv_len
+ sizeof(u_int32_t
), M_NECP
, M_WAITOK
| M_ZERO
);
3290 if (result_buf
== NULL
) {
3291 NECPLOG(LOG_DEBUG
, "Failed to allocate result_buffer (%u bytes)", total_tlv_len
+ sizeof(u_int32_t
));
3292 REPORT_ERROR(NECP_ERROR_INTERNAL
);
3295 // Add four bytes for total length at the start
3296 memcpy(result_buf
, &total_tlv_len
, sizeof(u_int32_t
));
3299 result_buf_cursor
= result_buf
+ sizeof(u_int32_t
);
3300 for (int i
= 0; i
< policy_count
; i
++) {
3301 if (tlv_buffer_pointers
[i
] != NULL
) {
3302 result_buf_cursor
= necp_buffer_write_tlv(result_buf_cursor
, NECP_TLV_POLICY_DUMP
, tlv_buffer_lengths
[i
], tlv_buffer_pointers
[i
],
3303 result_buf
, total_tlv_len
+ sizeof(u_int32_t
));
3307 int copy_error
= copyout(result_buf
, out_buffer
, total_tlv_len
+ sizeof(u_int32_t
));
3309 NECPLOG(LOG_DEBUG
, "Failed to copy out result_buffer (%u bytes)", total_tlv_len
+ sizeof(u_int32_t
));
3310 REPORT_ERROR(NECP_ERROR_INTERNAL
);
3316 if (error_occured
) {
3317 if (packet
!= NULL
) {
3318 if(!necp_send_error_response(session
, NECP_PACKET_TYPE_POLICY_DUMP_ALL
, message_id
, response_error
)) {
3319 NECPLOG0(LOG_ERR
, "Failed to send error response");
3321 NECPLOG0(LOG_ERR
, "Sent error response");
3324 error_code
= necp_get_posix_error_for_necp_error(response_error
);
3327 if (result_buf
!= NULL
) {
3328 FREE(result_buf
, M_NECP
);
3331 if (tlv_buffer_pointers
!= NULL
) {
3332 for (int i
= 0; i
< policy_count
; i
++) {
3333 if (tlv_buffer_pointers
[i
] != NULL
) {
3334 FREE(tlv_buffer_pointers
[i
], M_NECP
);
3335 tlv_buffer_pointers
[i
] = NULL
;
3338 FREE(tlv_buffer_pointers
, M_NECP
);
3341 if (tlv_buffer_lengths
!= NULL
) {
3342 FREE(tlv_buffer_lengths
, M_NECP
);
3345 #undef RESET_COND_BUF
3347 #undef UNLOCK_AND_REPORT_ERROR
3349 return (error_code
);
3352 static struct necp_session_policy
*
3353 necp_policy_create(struct necp_session
*session
, necp_policy_order order
, u_int8_t
*conditions_array
, u_int32_t conditions_array_size
, u_int8_t
*route_rules_array
, u_int32_t route_rules_array_size
, u_int8_t
*result
, u_int32_t result_size
)
3355 struct necp_session_policy
*new_policy
= NULL
;
3356 struct necp_session_policy
*tmp_policy
= NULL
;
3358 if (session
== NULL
|| conditions_array
== NULL
|| result
== NULL
|| result_size
== 0) {
3362 MALLOC_ZONE(new_policy
, struct necp_session_policy
*, sizeof(*new_policy
), M_NECP_SESSION_POLICY
, M_WAITOK
);
3363 if (new_policy
== NULL
) {
3367 memset(new_policy
, 0, sizeof(*new_policy
)); // M_ZERO is not supported for MALLOC_ZONE
3368 new_policy
->applied
= FALSE
;
3369 new_policy
->pending_deletion
= FALSE
;
3370 new_policy
->pending_update
= FALSE
;
3371 new_policy
->order
= order
;
3372 new_policy
->conditions
= conditions_array
;
3373 new_policy
->conditions_size
= conditions_array_size
;
3374 new_policy
->route_rules
= route_rules_array
;
3375 new_policy
->route_rules_size
= route_rules_array_size
;
3376 new_policy
->result
= result
;
3377 new_policy
->result_size
= result_size
;
3378 new_policy
->local_id
= necp_policy_get_new_id(session
);
3380 LIST_INSERT_SORTED_ASCENDING(&session
->policies
, new_policy
, chain
, order
, tmp_policy
);
3382 session
->dirty
= TRUE
;
3385 NECPLOG(LOG_DEBUG
, "Created NECP policy, order %d", order
);
3388 return (new_policy
);
3391 static struct necp_session_policy
*
3392 necp_policy_find(struct necp_session
*session
, necp_policy_id policy_id
)
3394 struct necp_session_policy
*policy
= NULL
;
3395 if (policy_id
== 0) {
3399 LIST_FOREACH(policy
, &session
->policies
, chain
) {
3400 if (policy
->local_id
== policy_id
) {
3408 static inline u_int8_t
3409 necp_policy_get_result_type(struct necp_session_policy
*policy
)
3411 return (policy
? necp_policy_result_get_type_from_buffer(policy
->result
, policy
->result_size
) : 0);
3414 static inline u_int32_t
3415 necp_policy_get_result_parameter_length(struct necp_session_policy
*policy
)
3417 return (policy
? necp_policy_result_get_parameter_length_from_buffer(policy
->result
, policy
->result_size
) : 0);
3421 necp_policy_get_result_parameter(struct necp_session_policy
*policy
, u_int8_t
*parameter_buffer
, u_int32_t parameter_buffer_length
)
3424 u_int32_t parameter_length
= necp_policy_result_get_parameter_length_from_buffer(policy
->result
, policy
->result_size
);
3425 if (parameter_buffer_length
>= parameter_length
) {
3426 u_int8_t
*parameter
= necp_policy_result_get_parameter_pointer_from_buffer(policy
->result
, policy
->result_size
);
3427 if (parameter
&& parameter_buffer
) {
3428 memcpy(parameter_buffer
, parameter
, parameter_length
);
3438 necp_policy_mark_for_deletion(struct necp_session
*session
, struct necp_session_policy
*policy
)
3440 if (session
== NULL
|| policy
== NULL
) {
3444 policy
->pending_deletion
= TRUE
;
3445 session
->dirty
= TRUE
;
3448 NECPLOG0(LOG_DEBUG
, "Marked NECP policy for removal");
3454 necp_policy_mark_all_for_deletion(struct necp_session
*session
)
3456 struct necp_session_policy
*policy
= NULL
;
3457 struct necp_session_policy
*temp_policy
= NULL
;
3459 LIST_FOREACH_SAFE(policy
, &session
->policies
, chain
, temp_policy
) {
3460 necp_policy_mark_for_deletion(session
, policy
);
3467 necp_policy_delete(struct necp_session
*session
, struct necp_session_policy
*policy
)
3469 if (session
== NULL
|| policy
== NULL
) {
3473 LIST_REMOVE(policy
, chain
);
3475 if (policy
->result
) {
3476 FREE(policy
->result
, M_NECP
);
3477 policy
->result
= NULL
;
3480 if (policy
->conditions
) {
3481 FREE(policy
->conditions
, M_NECP
);
3482 policy
->conditions
= NULL
;
3485 if (policy
->route_rules
) {
3486 FREE(policy
->route_rules
, M_NECP
);
3487 policy
->route_rules
= NULL
;
3490 FREE_ZONE(policy
, sizeof(*policy
), M_NECP_SESSION_POLICY
);
3493 NECPLOG0(LOG_DEBUG
, "Removed NECP policy");
3499 necp_policy_unapply(struct necp_session_policy
*policy
)
3502 if (policy
== NULL
) {
3506 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
3508 // Release local uuid mappings
3509 if (!uuid_is_null(policy
->applied_app_uuid
)) {
3510 bool removed_mapping
= FALSE
;
3511 if (necp_remove_uuid_app_id_mapping(policy
->applied_app_uuid
, &removed_mapping
, TRUE
) && removed_mapping
) {
3512 necp_uuid_app_id_mappings_dirty
= TRUE
;
3513 necp_num_uuid_app_id_mappings
--;
3515 uuid_clear(policy
->applied_app_uuid
);
3517 if (!uuid_is_null(policy
->applied_real_app_uuid
)) {
3518 necp_remove_uuid_app_id_mapping(policy
->applied_real_app_uuid
, NULL
, FALSE
);
3519 uuid_clear(policy
->applied_real_app_uuid
);
3521 if (!uuid_is_null(policy
->applied_result_uuid
)) {
3522 necp_remove_uuid_service_id_mapping(policy
->applied_result_uuid
);
3523 uuid_clear(policy
->applied_result_uuid
);
3526 // Release string mappings
3527 if (policy
->applied_account
!= NULL
) {
3528 necp_remove_string_to_id_mapping(&necp_account_id_list
, policy
->applied_account
);
3529 FREE(policy
->applied_account
, M_NECP
);
3530 policy
->applied_account
= NULL
;
3533 // Release route rule
3534 if (policy
->applied_route_rules_id
!= 0) {
3535 necp_remove_route_rule(&necp_route_rules
, policy
->applied_route_rules_id
);
3536 policy
->applied_route_rules_id
= 0;
3539 // Remove socket policies
3540 for (i
= 0; i
< MAX_KERNEL_SOCKET_POLICIES
; i
++) {
3541 if (policy
->kernel_socket_policies
[i
] != 0) {
3542 necp_kernel_socket_policy_delete(policy
->kernel_socket_policies
[i
]);
3543 policy
->kernel_socket_policies
[i
] = 0;
3547 // Remove IP output policies
3548 for (i
= 0; i
< MAX_KERNEL_IP_OUTPUT_POLICIES
; i
++) {
3549 if (policy
->kernel_ip_output_policies
[i
] != 0) {
3550 necp_kernel_ip_output_policy_delete(policy
->kernel_ip_output_policies
[i
]);
3551 policy
->kernel_ip_output_policies
[i
] = 0;
3555 policy
->applied
= FALSE
;
3560 #define NECP_KERNEL_POLICY_SUBORDER_ID_TUNNEL_CONDITION 0
3561 #define NECP_KERNEL_POLICY_SUBORDER_NON_ID_TUNNEL_CONDITION 1
3562 #define NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION 2
3563 #define NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS 3
3564 struct necp_policy_result_ip_tunnel
{
3565 u_int32_t secondary_result
;
3566 char interface_name
[IFXNAMSIZ
];
3567 } __attribute__((__packed__
));
3569 struct necp_policy_result_service
{
3572 } __attribute__((__packed__
));
3575 necp_policy_apply(struct necp_session
*session
, struct necp_session_policy
*policy
)
3577 bool socket_only_conditions
= FALSE
;
3578 bool socket_ip_conditions
= FALSE
;
3580 bool socket_layer_non_id_conditions
= FALSE
;
3581 bool ip_output_layer_non_id_conditions
= FALSE
;
3582 bool ip_output_layer_non_id_only
= FALSE
;
3583 bool ip_output_layer_id_condition
= FALSE
;
3584 bool ip_output_layer_tunnel_condition_from_id
= FALSE
;
3585 bool ip_output_layer_tunnel_condition_from_non_id
= FALSE
;
3586 necp_kernel_policy_id cond_ip_output_layer_id
= NECP_KERNEL_POLICY_ID_NONE
;
3588 u_int32_t master_condition_mask
= 0;
3589 u_int32_t master_condition_negated_mask
= 0;
3590 ifnet_t cond_bound_interface
= NULL
;
3591 u_int32_t cond_account_id
= 0;
3592 char *cond_domain
= NULL
;
3593 char *cond_custom_entitlement
= NULL
;
3596 necp_app_id cond_app_id
= 0;
3597 necp_app_id cond_real_app_id
= 0;
3598 struct necp_policy_condition_tc_range cond_traffic_class
;
3599 cond_traffic_class
.start_tc
= 0;
3600 cond_traffic_class
.end_tc
= 0;
3601 u_int16_t cond_protocol
= 0;
3602 union necp_sockaddr_union cond_local_start
;
3603 union necp_sockaddr_union cond_local_end
;
3604 u_int8_t cond_local_prefix
= 0;
3605 union necp_sockaddr_union cond_remote_start
;
3606 union necp_sockaddr_union cond_remote_end
;
3607 u_int8_t cond_remote_prefix
= 0;
3608 u_int32_t offset
= 0;
3609 u_int8_t ultimate_result
= 0;
3610 u_int32_t secondary_result
= 0;
3611 struct necp_policy_condition_agent_type cond_agent_type
= {};
3612 necp_kernel_policy_result_parameter secondary_result_parameter
;
3613 memset(&secondary_result_parameter
, 0, sizeof(secondary_result_parameter
));
3614 u_int32_t cond_last_interface_index
= 0;
3615 necp_kernel_policy_result_parameter ultimate_result_parameter
;
3616 memset(&ultimate_result_parameter
, 0, sizeof(ultimate_result_parameter
));
3618 if (policy
== NULL
) {
3622 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
3624 // Process conditions
3625 while (offset
< policy
->conditions_size
) {
3626 u_int32_t length
= 0;
3627 u_int8_t
*value
= necp_buffer_get_tlv_value(policy
->conditions
, offset
, &length
);
3629 u_int8_t condition_type
= necp_policy_condition_get_type_from_buffer(value
, length
);
3630 u_int8_t condition_flags
= necp_policy_condition_get_flags_from_buffer(value
, length
);
3631 bool condition_is_negative
= condition_flags
& NECP_POLICY_CONDITION_FLAGS_NEGATIVE
;
3632 u_int32_t condition_length
= necp_policy_condition_get_value_length_from_buffer(value
, length
);
3633 u_int8_t
*condition_value
= necp_policy_condition_get_value_pointer_from_buffer(value
, length
);
3634 switch (condition_type
) {
3635 case NECP_POLICY_CONDITION_DEFAULT
: {
3636 socket_ip_conditions
= TRUE
;
3639 case NECP_POLICY_CONDITION_ALL_INTERFACES
: {
3640 master_condition_mask
|= NECP_KERNEL_CONDITION_ALL_INTERFACES
;
3641 socket_ip_conditions
= TRUE
;
3644 case NECP_POLICY_CONDITION_ENTITLEMENT
: {
3645 if (condition_length
> 0) {
3646 if (cond_custom_entitlement
== NULL
) {
3647 cond_custom_entitlement
= necp_copy_string((char *)condition_value
, condition_length
);
3648 if (cond_custom_entitlement
!= NULL
) {
3649 master_condition_mask
|= NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
;
3650 socket_only_conditions
= TRUE
;
3654 master_condition_mask
|= NECP_KERNEL_CONDITION_ENTITLEMENT
;
3655 socket_only_conditions
= TRUE
;
3659 case NECP_POLICY_CONDITION_DOMAIN
: {
3660 // Make sure there is only one such rule
3661 if (condition_length
> 0 && cond_domain
== NULL
) {
3662 cond_domain
= necp_create_trimmed_domain((char *)condition_value
, condition_length
);
3663 if (cond_domain
!= NULL
) {
3664 master_condition_mask
|= NECP_KERNEL_CONDITION_DOMAIN
;
3665 if (condition_is_negative
) {
3666 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_DOMAIN
;
3668 socket_only_conditions
= TRUE
;
3673 case NECP_POLICY_CONDITION_ACCOUNT
: {
3674 // Make sure there is only one such rule
3675 if (condition_length
> 0 && cond_account_id
== 0 && policy
->applied_account
== NULL
) {
3676 char *string
= NULL
;
3677 MALLOC(string
, char *, condition_length
+ 1, M_NECP
, M_WAITOK
);
3678 if (string
!= NULL
) {
3679 memcpy(string
, condition_value
, condition_length
);
3680 string
[condition_length
] = 0;
3681 cond_account_id
= necp_create_string_to_id_mapping(&necp_account_id_list
, string
);
3682 if (cond_account_id
!= 0) {
3683 policy
->applied_account
= string
; // Save the string in parent policy
3684 master_condition_mask
|= NECP_KERNEL_CONDITION_ACCOUNT_ID
;
3685 if (condition_is_negative
) {
3686 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_ACCOUNT_ID
;
3688 socket_only_conditions
= TRUE
;
3690 FREE(string
, M_NECP
);
3696 case NECP_POLICY_CONDITION_APPLICATION
: {
3697 // Make sure there is only one such rule, because we save the uuid in the policy
3698 if (condition_length
>= sizeof(uuid_t
) && cond_app_id
== 0) {
3699 bool allocated_mapping
= FALSE
;
3700 uuid_t application_uuid
;
3701 memcpy(application_uuid
, condition_value
, sizeof(uuid_t
));
3702 cond_app_id
= necp_create_uuid_app_id_mapping(application_uuid
, &allocated_mapping
, TRUE
);
3703 if (cond_app_id
!= 0) {
3704 if (allocated_mapping
) {
3705 necp_uuid_app_id_mappings_dirty
= TRUE
;
3706 necp_num_uuid_app_id_mappings
++;
3708 uuid_copy(policy
->applied_app_uuid
, application_uuid
);
3709 master_condition_mask
|= NECP_KERNEL_CONDITION_APP_ID
;
3710 if (condition_is_negative
) {
3711 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_APP_ID
;
3713 socket_only_conditions
= TRUE
;
3718 case NECP_POLICY_CONDITION_REAL_APPLICATION
: {
3719 // Make sure there is only one such rule, because we save the uuid in the policy
3720 if (condition_length
>= sizeof(uuid_t
) && cond_real_app_id
== 0) {
3721 uuid_t real_application_uuid
;
3722 memcpy(real_application_uuid
, condition_value
, sizeof(uuid_t
));
3723 cond_real_app_id
= necp_create_uuid_app_id_mapping(real_application_uuid
, NULL
, FALSE
);
3724 if (cond_real_app_id
!= 0) {
3725 uuid_copy(policy
->applied_real_app_uuid
, real_application_uuid
);
3726 master_condition_mask
|= NECP_KERNEL_CONDITION_REAL_APP_ID
;
3727 if (condition_is_negative
) {
3728 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REAL_APP_ID
;
3730 socket_only_conditions
= TRUE
;
3735 case NECP_POLICY_CONDITION_PID
: {
3736 if (condition_length
>= sizeof(pid_t
)) {
3737 master_condition_mask
|= NECP_KERNEL_CONDITION_PID
;
3738 if (condition_is_negative
) {
3739 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_PID
;
3741 memcpy(&cond_pid
, condition_value
, sizeof(cond_pid
));
3742 socket_only_conditions
= TRUE
;
3746 case NECP_POLICY_CONDITION_UID
: {
3747 if (condition_length
>= sizeof(uid_t
)) {
3748 master_condition_mask
|= NECP_KERNEL_CONDITION_UID
;
3749 if (condition_is_negative
) {
3750 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_UID
;
3752 memcpy(&cond_uid
, condition_value
, sizeof(cond_uid
));
3753 socket_only_conditions
= TRUE
;
3757 case NECP_POLICY_CONDITION_TRAFFIC_CLASS
: {
3758 if (condition_length
>= sizeof(struct necp_policy_condition_tc_range
)) {
3759 master_condition_mask
|= NECP_KERNEL_CONDITION_TRAFFIC_CLASS
;
3760 if (condition_is_negative
) {
3761 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_TRAFFIC_CLASS
;
3763 memcpy(&cond_traffic_class
, condition_value
, sizeof(cond_traffic_class
));
3764 socket_only_conditions
= TRUE
;
3768 case NECP_POLICY_CONDITION_BOUND_INTERFACE
: {
3769 if (condition_length
<= IFXNAMSIZ
&& condition_length
> 0) {
3770 char interface_name
[IFXNAMSIZ
];
3771 memcpy(interface_name
, condition_value
, condition_length
);
3772 interface_name
[condition_length
- 1] = 0; // Make sure the string is NULL terminated
3773 if (ifnet_find_by_name(interface_name
, &cond_bound_interface
) == 0) {
3774 master_condition_mask
|= NECP_KERNEL_CONDITION_BOUND_INTERFACE
;
3775 if (condition_is_negative
) {
3776 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_BOUND_INTERFACE
;
3779 socket_ip_conditions
= TRUE
;
3783 case NECP_POLICY_CONDITION_IP_PROTOCOL
: {
3784 if (condition_length
>= sizeof(u_int16_t
)) {
3785 master_condition_mask
|= NECP_KERNEL_CONDITION_PROTOCOL
;
3786 if (condition_is_negative
) {
3787 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_PROTOCOL
;
3789 memcpy(&cond_protocol
, condition_value
, sizeof(cond_protocol
));
3790 socket_ip_conditions
= TRUE
;
3794 case NECP_POLICY_CONDITION_LOCAL_ADDR
: {
3795 struct necp_policy_condition_addr
*address_struct
= (struct necp_policy_condition_addr
*)(void *)condition_value
;
3796 if (!necp_address_is_valid(&address_struct
->address
.sa
)) {
3800 cond_local_prefix
= address_struct
->prefix
;
3801 memcpy(&cond_local_start
, &address_struct
->address
, sizeof(address_struct
->address
));
3802 master_condition_mask
|= NECP_KERNEL_CONDITION_LOCAL_START
;
3803 master_condition_mask
|= NECP_KERNEL_CONDITION_LOCAL_PREFIX
;
3804 if (condition_is_negative
) {
3805 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_LOCAL_START
;
3806 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_LOCAL_PREFIX
;
3808 socket_ip_conditions
= TRUE
;
3811 case NECP_POLICY_CONDITION_REMOTE_ADDR
: {
3812 struct necp_policy_condition_addr
*address_struct
= (struct necp_policy_condition_addr
*)(void *)condition_value
;
3813 if (!necp_address_is_valid(&address_struct
->address
.sa
)) {
3817 cond_remote_prefix
= address_struct
->prefix
;
3818 memcpy(&cond_remote_start
, &address_struct
->address
, sizeof(address_struct
->address
));
3819 master_condition_mask
|= NECP_KERNEL_CONDITION_REMOTE_START
;
3820 master_condition_mask
|= NECP_KERNEL_CONDITION_REMOTE_PREFIX
;
3821 if (condition_is_negative
) {
3822 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REMOTE_START
;
3823 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REMOTE_PREFIX
;
3825 socket_ip_conditions
= TRUE
;
3828 case NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE
: {
3829 struct necp_policy_condition_addr_range
*address_struct
= (struct necp_policy_condition_addr_range
*)(void *)condition_value
;
3830 if (!necp_address_is_valid(&address_struct
->start_address
.sa
) ||
3831 !necp_address_is_valid(&address_struct
->end_address
.sa
)) {
3835 memcpy(&cond_local_start
, &address_struct
->start_address
, sizeof(address_struct
->start_address
));
3836 memcpy(&cond_local_end
, &address_struct
->end_address
, sizeof(address_struct
->end_address
));
3837 master_condition_mask
|= NECP_KERNEL_CONDITION_LOCAL_START
;
3838 master_condition_mask
|= NECP_KERNEL_CONDITION_LOCAL_END
;
3839 if (condition_is_negative
) {
3840 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_LOCAL_START
;
3841 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_LOCAL_END
;
3843 socket_ip_conditions
= TRUE
;
3846 case NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE
: {
3847 struct necp_policy_condition_addr_range
*address_struct
= (struct necp_policy_condition_addr_range
*)(void *)condition_value
;
3848 if (!necp_address_is_valid(&address_struct
->start_address
.sa
) ||
3849 !necp_address_is_valid(&address_struct
->end_address
.sa
)) {
3853 memcpy(&cond_remote_start
, &address_struct
->start_address
, sizeof(address_struct
->start_address
));
3854 memcpy(&cond_remote_end
, &address_struct
->end_address
, sizeof(address_struct
->end_address
));
3855 master_condition_mask
|= NECP_KERNEL_CONDITION_REMOTE_START
;
3856 master_condition_mask
|= NECP_KERNEL_CONDITION_REMOTE_END
;
3857 if (condition_is_negative
) {
3858 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REMOTE_START
;
3859 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REMOTE_END
;
3861 socket_ip_conditions
= TRUE
;
3864 case NECP_POLICY_CONDITION_AGENT_TYPE
: {
3865 if (condition_length
>= sizeof(cond_agent_type
)) {
3866 master_condition_mask
|= NECP_KERNEL_CONDITION_AGENT_TYPE
;
3867 memcpy(&cond_agent_type
, condition_value
, sizeof(cond_agent_type
));
3868 socket_only_conditions
= TRUE
;
3877 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
3881 ultimate_result
= necp_policy_get_result_type(policy
);
3882 switch (ultimate_result
) {
3883 case NECP_POLICY_RESULT_PASS
: {
3884 if (socket_only_conditions
) { // socket_ip_conditions can be TRUE or FALSE
3885 socket_layer_non_id_conditions
= TRUE
;
3886 ip_output_layer_id_condition
= TRUE
;
3887 } else if (socket_ip_conditions
) {
3888 socket_layer_non_id_conditions
= TRUE
;
3889 ip_output_layer_id_condition
= TRUE
;
3890 ip_output_layer_non_id_conditions
= TRUE
;
3894 case NECP_POLICY_RESULT_DROP
: {
3895 if (socket_only_conditions
) { // socket_ip_conditions can be TRUE or FALSE
3896 socket_layer_non_id_conditions
= TRUE
;
3897 } else if (socket_ip_conditions
) {
3898 socket_layer_non_id_conditions
= TRUE
;
3899 ip_output_layer_non_id_conditions
= TRUE
;
3900 ip_output_layer_non_id_only
= TRUE
; // Only apply drop to packets that didn't go through socket layer
3904 case NECP_POLICY_RESULT_SKIP
: {
3905 u_int32_t skip_policy_order
= 0;
3906 if (necp_policy_get_result_parameter(policy
, (u_int8_t
*)&skip_policy_order
, sizeof(skip_policy_order
))) {
3907 ultimate_result_parameter
.skip_policy_order
= skip_policy_order
;
3910 if (socket_only_conditions
) { // socket_ip_conditions can be TRUE or FALSE
3911 socket_layer_non_id_conditions
= TRUE
;
3912 ip_output_layer_id_condition
= TRUE
;
3913 } else if (socket_ip_conditions
) {
3914 socket_layer_non_id_conditions
= TRUE
;
3915 ip_output_layer_non_id_conditions
= TRUE
;
3919 case NECP_POLICY_RESULT_SOCKET_DIVERT
:
3920 case NECP_POLICY_RESULT_SOCKET_FILTER
: {
3921 u_int32_t control_unit
= 0;
3922 if (necp_policy_get_result_parameter(policy
, (u_int8_t
*)&control_unit
, sizeof(control_unit
))) {
3923 ultimate_result_parameter
.flow_divert_control_unit
= control_unit
;
3925 socket_layer_non_id_conditions
= TRUE
;
3928 case NECP_POLICY_RESULT_IP_TUNNEL
: {
3929 struct necp_policy_result_ip_tunnel tunnel_parameters
;
3930 u_int32_t tunnel_parameters_length
= necp_policy_get_result_parameter_length(policy
);
3931 if (tunnel_parameters_length
> sizeof(u_int32_t
) &&
3932 tunnel_parameters_length
<= sizeof(struct necp_policy_result_ip_tunnel
) &&
3933 necp_policy_get_result_parameter(policy
, (u_int8_t
*)&tunnel_parameters
, sizeof(tunnel_parameters
))) {
3934 ifnet_t tunnel_interface
= NULL
;
3935 tunnel_parameters
.interface_name
[tunnel_parameters_length
- sizeof(u_int32_t
) - 1] = 0; // Make sure the string is NULL terminated
3936 if (ifnet_find_by_name(tunnel_parameters
.interface_name
, &tunnel_interface
) == 0) {
3937 ultimate_result_parameter
.tunnel_interface_index
= tunnel_interface
->if_index
;
3938 ifnet_release(tunnel_interface
);
3941 secondary_result
= tunnel_parameters
.secondary_result
;
3942 if (secondary_result
) {
3943 cond_last_interface_index
= ultimate_result_parameter
.tunnel_interface_index
;
3947 if (socket_only_conditions
) { // socket_ip_conditions can be TRUE or FALSE
3948 socket_layer_non_id_conditions
= TRUE
;
3949 ip_output_layer_id_condition
= TRUE
;
3950 if (secondary_result
) {
3951 ip_output_layer_tunnel_condition_from_id
= TRUE
;
3953 } else if (socket_ip_conditions
) {
3954 socket_layer_non_id_conditions
= TRUE
;
3955 ip_output_layer_id_condition
= TRUE
;
3956 ip_output_layer_non_id_conditions
= TRUE
;
3957 if (secondary_result
) {
3958 ip_output_layer_tunnel_condition_from_id
= TRUE
;
3959 ip_output_layer_tunnel_condition_from_non_id
= TRUE
;
3964 case NECP_POLICY_RESULT_TRIGGER
:
3965 case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED
:
3966 case NECP_POLICY_RESULT_TRIGGER_SCOPED
:
3967 case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED
: {
3968 struct necp_policy_result_service service_parameters
;
3969 u_int32_t service_result_length
= necp_policy_get_result_parameter_length(policy
);
3970 bool has_extra_service_data
= FALSE
;
3971 if (service_result_length
>= (sizeof(service_parameters
))) {
3972 has_extra_service_data
= TRUE
;
3974 if (necp_policy_get_result_parameter(policy
, (u_int8_t
*)&service_parameters
, sizeof(service_parameters
))) {
3975 ultimate_result_parameter
.service
.identifier
= necp_create_uuid_service_id_mapping(service_parameters
.identifier
);
3976 if (ultimate_result_parameter
.service
.identifier
!= 0) {
3977 uuid_copy(policy
->applied_result_uuid
, service_parameters
.identifier
);
3978 socket_layer_non_id_conditions
= TRUE
;
3979 if (has_extra_service_data
) {
3980 ultimate_result_parameter
.service
.data
= service_parameters
.data
;
3982 ultimate_result_parameter
.service
.data
= 0;
3988 case NECP_POLICY_RESULT_USE_NETAGENT
:
3989 case NECP_POLICY_RESULT_NETAGENT_SCOPED
: {
3990 uuid_t netagent_uuid
;
3991 if (necp_policy_get_result_parameter(policy
, (u_int8_t
*)&netagent_uuid
, sizeof(netagent_uuid
))) {
3992 ultimate_result_parameter
.netagent_id
= necp_create_uuid_service_id_mapping(netagent_uuid
);
3993 if (ultimate_result_parameter
.netagent_id
!= 0) {
3994 uuid_copy(policy
->applied_result_uuid
, netagent_uuid
);
3995 socket_layer_non_id_conditions
= TRUE
;
4000 case NECP_POLICY_RESULT_SOCKET_SCOPED
: {
4001 u_int32_t interface_name_length
= necp_policy_get_result_parameter_length(policy
);
4002 if (interface_name_length
<= IFXNAMSIZ
&& interface_name_length
> 0) {
4003 char interface_name
[IFXNAMSIZ
];
4004 ifnet_t scope_interface
= NULL
;
4005 necp_policy_get_result_parameter(policy
, (u_int8_t
*)interface_name
, interface_name_length
);
4006 interface_name
[interface_name_length
- 1] = 0; // Make sure the string is NULL terminated
4007 if (ifnet_find_by_name(interface_name
, &scope_interface
) == 0) {
4008 ultimate_result_parameter
.scoped_interface_index
= scope_interface
->if_index
;
4009 socket_layer_non_id_conditions
= TRUE
;
4010 ifnet_release(scope_interface
);
4015 case NECP_POLICY_RESULT_SCOPED_DIRECT
: {
4016 socket_layer_non_id_conditions
= TRUE
;
4019 case NECP_POLICY_RESULT_ROUTE_RULES
: {
4020 if (policy
->route_rules
!= NULL
&& policy
->route_rules_size
> 0) {
4021 u_int32_t route_rule_id
= necp_create_route_rule(&necp_route_rules
, policy
->route_rules
, policy
->route_rules_size
);
4022 if (route_rule_id
> 0) {
4023 policy
->applied_route_rules_id
= route_rule_id
;
4024 ultimate_result_parameter
.route_rule_id
= route_rule_id
;
4025 socket_layer_non_id_conditions
= TRUE
;
4035 if (socket_layer_non_id_conditions
) {
4036 necp_kernel_policy_id policy_id
= necp_kernel_socket_policy_add(policy
->order
, session
->session_order
, session
->proc_pid
, master_condition_mask
, master_condition_negated_mask
, cond_app_id
, cond_real_app_id
, cond_custom_entitlement
, cond_account_id
, cond_domain
, cond_pid
, cond_uid
, cond_bound_interface
, cond_traffic_class
, cond_protocol
, &cond_local_start
, &cond_local_end
, cond_local_prefix
, &cond_remote_start
, &cond_remote_end
, cond_remote_prefix
, &cond_agent_type
, ultimate_result
, ultimate_result_parameter
);
4038 if (policy_id
== 0) {
4039 NECPLOG0(LOG_DEBUG
, "Error applying socket kernel policy");
4043 cond_ip_output_layer_id
= policy_id
;
4044 policy
->kernel_socket_policies
[0] = policy_id
;
4047 if (ip_output_layer_non_id_conditions
) {
4048 u_int32_t condition_mask
= master_condition_mask
;
4049 if (ip_output_layer_non_id_only
) {
4050 condition_mask
|= NECP_KERNEL_CONDITION_POLICY_ID
;
4052 necp_kernel_policy_id policy_id
= necp_kernel_ip_output_policy_add(policy
->order
, NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS
, session
->session_order
, session
->proc_pid
, condition_mask
, master_condition_negated_mask
, NECP_KERNEL_POLICY_ID_NONE
, cond_bound_interface
, 0, cond_protocol
, &cond_local_start
, &cond_local_end
, cond_local_prefix
, &cond_remote_start
, &cond_remote_end
, cond_remote_prefix
, ultimate_result
, ultimate_result_parameter
);
4054 if (policy_id
== 0) {
4055 NECPLOG0(LOG_DEBUG
, "Error applying IP output kernel policy");
4059 policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS
] = policy_id
;
4062 if (ip_output_layer_id_condition
) {
4063 necp_kernel_policy_id policy_id
= necp_kernel_ip_output_policy_add(policy
->order
, NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION
, session
->session_order
, session
->proc_pid
, NECP_KERNEL_CONDITION_POLICY_ID
| NECP_KERNEL_CONDITION_ALL_INTERFACES
, 0, cond_ip_output_layer_id
, NULL
, 0, 0, NULL
, NULL
, 0, NULL
, NULL
, 0, ultimate_result
, ultimate_result_parameter
);
4065 if (policy_id
== 0) {
4066 NECPLOG0(LOG_DEBUG
, "Error applying IP output kernel policy");
4070 policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION
] = policy_id
;
4073 // Extra policies for IP Output tunnels for when packets loop back
4074 if (ip_output_layer_tunnel_condition_from_id
) {
4075 necp_kernel_policy_id policy_id
= necp_kernel_ip_output_policy_add(policy
->order
, NECP_KERNEL_POLICY_SUBORDER_NON_ID_TUNNEL_CONDITION
, session
->session_order
, session
->proc_pid
, NECP_KERNEL_CONDITION_POLICY_ID
| NECP_KERNEL_CONDITION_LAST_INTERFACE
| NECP_KERNEL_CONDITION_ALL_INTERFACES
, 0, policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS
], NULL
, cond_last_interface_index
, 0, NULL
, NULL
, 0, NULL
, NULL
, 0, secondary_result
, secondary_result_parameter
);
4077 if (policy_id
== 0) {
4078 NECPLOG0(LOG_DEBUG
, "Error applying IP output kernel policy");
4082 policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_NON_ID_TUNNEL_CONDITION
] = policy_id
;
4085 if (ip_output_layer_tunnel_condition_from_id
) {
4086 necp_kernel_policy_id policy_id
= necp_kernel_ip_output_policy_add(policy
->order
, NECP_KERNEL_POLICY_SUBORDER_ID_TUNNEL_CONDITION
, session
->session_order
, session
->proc_pid
, NECP_KERNEL_CONDITION_POLICY_ID
| NECP_KERNEL_CONDITION_LAST_INTERFACE
| NECP_KERNEL_CONDITION_ALL_INTERFACES
, 0, policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION
], NULL
, cond_last_interface_index
, 0, NULL
, NULL
, 0, NULL
, NULL
, 0, secondary_result
, secondary_result_parameter
);
4088 if (policy_id
== 0) {
4089 NECPLOG0(LOG_DEBUG
, "Error applying IP output kernel policy");
4093 policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_ID_TUNNEL_CONDITION
] = policy_id
;
4096 policy
->applied
= TRUE
;
4097 policy
->pending_update
= FALSE
;
4105 necp_policy_apply_all(struct necp_session
*session
)
4107 struct necp_session_policy
*policy
= NULL
;
4108 struct necp_session_policy
*temp_policy
= NULL
;
4109 struct kev_necp_policies_changed_data kev_data
;
4110 kev_data
.changed_count
= 0;
4112 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
4114 // Remove exisiting applied policies
4115 if (session
->dirty
) {
4116 LIST_FOREACH_SAFE(policy
, &session
->policies
, chain
, temp_policy
) {
4117 if (policy
->pending_deletion
) {
4118 if (policy
->applied
) {
4119 necp_policy_unapply(policy
);
4121 // Delete the policy
4122 necp_policy_delete(session
, policy
);
4123 } else if (!policy
->applied
) {
4124 necp_policy_apply(session
, policy
);
4125 } else if (policy
->pending_update
) {
4126 // Must have been applied, but needs an update. Remove and re-add.
4127 necp_policy_unapply(policy
);
4128 necp_policy_apply(session
, policy
);
4132 necp_kernel_socket_policies_update_uuid_table();
4133 necp_kernel_socket_policies_reprocess();
4134 necp_kernel_ip_output_policies_reprocess();
4136 // Clear dirty bit flags
4137 session
->dirty
= FALSE
;
4140 lck_rw_done(&necp_kernel_policy_lock
);
4142 necp_update_all_clients();
4143 necp_post_change_event(&kev_data
);
4146 NECPLOG0(LOG_DEBUG
, "Applied NECP policies");
4150 // Kernel Policy Management
4151 // ---------------------
4152 // Kernel policies are derived from session policies
4153 static necp_kernel_policy_id
4154 necp_kernel_policy_get_new_id(bool socket_level
)
4156 static necp_kernel_policy_id necp_last_kernel_socket_policy_id
= 0;
4157 static necp_kernel_policy_id necp_last_kernel_ip_policy_id
= 0;
4159 necp_kernel_policy_id newid
= NECP_KERNEL_POLICY_ID_NONE
;
4161 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4164 bool wrapped
= FALSE
;
4166 necp_last_kernel_socket_policy_id
++;
4167 if (necp_last_kernel_socket_policy_id
< NECP_KERNEL_POLICY_ID_FIRST_VALID_SOCKET
||
4168 necp_last_kernel_socket_policy_id
>= NECP_KERNEL_POLICY_ID_FIRST_VALID_IP
) {
4170 // Already wrapped, give up
4171 NECPLOG0(LOG_ERR
, "Failed to find a free socket kernel policy ID.\n");
4172 return (NECP_KERNEL_POLICY_ID_NONE
);
4174 necp_last_kernel_socket_policy_id
= NECP_KERNEL_POLICY_ID_FIRST_VALID_SOCKET
;
4177 newid
= necp_last_kernel_socket_policy_id
;
4178 } while (necp_kernel_socket_policy_find(newid
) != NULL
); // If already used, keep trying
4180 bool wrapped
= FALSE
;
4182 necp_last_kernel_ip_policy_id
++;
4183 if (necp_last_kernel_ip_policy_id
< NECP_KERNEL_POLICY_ID_FIRST_VALID_IP
) {
4185 // Already wrapped, give up
4186 NECPLOG0(LOG_ERR
, "Failed to find a free IP kernel policy ID.\n");
4187 return (NECP_KERNEL_POLICY_ID_NONE
);
4189 necp_last_kernel_ip_policy_id
= NECP_KERNEL_POLICY_ID_FIRST_VALID_IP
;
4192 newid
= necp_last_kernel_ip_policy_id
;
4193 } while (necp_kernel_ip_output_policy_find(newid
) != NULL
); // If already used, keep trying
4196 if (newid
== NECP_KERNEL_POLICY_ID_NONE
) {
4197 NECPLOG0(LOG_ERR
, "Allocate kernel policy id failed.\n");
4198 return (NECP_KERNEL_POLICY_ID_NONE
);
4204 #define NECP_KERNEL_VALID_SOCKET_CONDITIONS (NECP_KERNEL_CONDITION_APP_ID | NECP_KERNEL_CONDITION_REAL_APP_ID | NECP_KERNEL_CONDITION_DOMAIN | NECP_KERNEL_CONDITION_ACCOUNT_ID | NECP_KERNEL_CONDITION_PID | NECP_KERNEL_CONDITION_UID | NECP_KERNEL_CONDITION_ALL_INTERFACES | NECP_KERNEL_CONDITION_BOUND_INTERFACE | NECP_KERNEL_CONDITION_TRAFFIC_CLASS | NECP_KERNEL_CONDITION_PROTOCOL | NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_ENTITLEMENT | NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT | NECP_KERNEL_CONDITION_AGENT_TYPE)
4205 static necp_kernel_policy_id
4206 necp_kernel_socket_policy_add(necp_policy_order order
, u_int32_t session_order
, int session_pid
, u_int32_t condition_mask
, u_int32_t condition_negated_mask
, necp_app_id cond_app_id
, necp_app_id cond_real_app_id
, char *cond_custom_entitlement
, u_int32_t cond_account_id
, char *cond_domain
, pid_t cond_pid
, uid_t cond_uid
, ifnet_t cond_bound_interface
, struct necp_policy_condition_tc_range cond_traffic_class
, u_int16_t cond_protocol
, union necp_sockaddr_union
*cond_local_start
, union necp_sockaddr_union
*cond_local_end
, u_int8_t cond_local_prefix
, union necp_sockaddr_union
*cond_remote_start
, union necp_sockaddr_union
*cond_remote_end
, u_int8_t cond_remote_prefix
, struct necp_policy_condition_agent_type
*cond_agent_type
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
)
4208 struct necp_kernel_socket_policy
*new_kernel_policy
= NULL
;
4209 struct necp_kernel_socket_policy
*tmp_kernel_policy
= NULL
;
4211 MALLOC_ZONE(new_kernel_policy
, struct necp_kernel_socket_policy
*, sizeof(*new_kernel_policy
), M_NECP_SOCKET_POLICY
, M_WAITOK
);
4212 if (new_kernel_policy
== NULL
) {
4216 memset(new_kernel_policy
, 0, sizeof(*new_kernel_policy
)); // M_ZERO is not supported for MALLOC_ZONE
4217 new_kernel_policy
->id
= necp_kernel_policy_get_new_id(true);
4218 new_kernel_policy
->order
= order
;
4219 new_kernel_policy
->session_order
= session_order
;
4220 new_kernel_policy
->session_pid
= session_pid
;
4222 // Sanitize condition mask
4223 new_kernel_policy
->condition_mask
= (condition_mask
& NECP_KERNEL_VALID_SOCKET_CONDITIONS
);
4224 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
)) {
4225 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_BOUND_INTERFACE
;
4227 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) && !(new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
)) {
4228 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_REAL_APP_ID
;
4230 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
) && !(new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
)) {
4231 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_ENTITLEMENT
;
4233 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
)) {
4234 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_LOCAL_PREFIX
;
4236 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
)) {
4237 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_REMOTE_PREFIX
;
4239 new_kernel_policy
->condition_negated_mask
= condition_negated_mask
& new_kernel_policy
->condition_mask
;
4241 // Set condition values
4242 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
4243 new_kernel_policy
->cond_app_id
= cond_app_id
;
4245 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
4246 new_kernel_policy
->cond_real_app_id
= cond_real_app_id
;
4248 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
) {
4249 new_kernel_policy
->cond_custom_entitlement
= cond_custom_entitlement
;
4250 new_kernel_policy
->cond_custom_entitlement_matched
= necp_boolean_state_unknown
;
4252 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
) {
4253 new_kernel_policy
->cond_account_id
= cond_account_id
;
4255 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
4256 new_kernel_policy
->cond_domain
= cond_domain
;
4257 new_kernel_policy
->cond_domain_dot_count
= necp_count_dots(cond_domain
, strlen(cond_domain
));
4259 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PID
) {
4260 new_kernel_policy
->cond_pid
= cond_pid
;
4262 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_UID
) {
4263 new_kernel_policy
->cond_uid
= cond_uid
;
4265 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
4266 if (cond_bound_interface
) {
4267 ifnet_reference(cond_bound_interface
);
4269 new_kernel_policy
->cond_bound_interface
= cond_bound_interface
;
4271 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
4272 new_kernel_policy
->cond_traffic_class
= cond_traffic_class
;
4274 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
4275 new_kernel_policy
->cond_protocol
= cond_protocol
;
4277 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
4278 memcpy(&new_kernel_policy
->cond_local_start
, cond_local_start
, cond_local_start
->sa
.sa_len
);
4280 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
4281 memcpy(&new_kernel_policy
->cond_local_end
, cond_local_end
, cond_local_end
->sa
.sa_len
);
4283 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
4284 new_kernel_policy
->cond_local_prefix
= cond_local_prefix
;
4286 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
4287 memcpy(&new_kernel_policy
->cond_remote_start
, cond_remote_start
, cond_remote_start
->sa
.sa_len
);
4289 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
4290 memcpy(&new_kernel_policy
->cond_remote_end
, cond_remote_end
, cond_remote_end
->sa
.sa_len
);
4292 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
4293 new_kernel_policy
->cond_remote_prefix
= cond_remote_prefix
;
4295 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_AGENT_TYPE
) {
4296 memcpy(&new_kernel_policy
->cond_agent_type
, cond_agent_type
, sizeof(*cond_agent_type
));
4299 new_kernel_policy
->result
= result
;
4300 memcpy(&new_kernel_policy
->result_parameter
, &result_parameter
, sizeof(result_parameter
));
4303 NECPLOG(LOG_DEBUG
, "Added kernel policy: socket, id=%d, mask=%x\n", new_kernel_policy
->id
, new_kernel_policy
->condition_mask
);
4305 LIST_INSERT_SORTED_TWICE_ASCENDING(&necp_kernel_socket_policies
, new_kernel_policy
, chain
, session_order
, order
, tmp_kernel_policy
);
4307 return (new_kernel_policy
? new_kernel_policy
->id
: 0);
4310 static struct necp_kernel_socket_policy
*
4311 necp_kernel_socket_policy_find(necp_kernel_policy_id policy_id
)
4313 struct necp_kernel_socket_policy
*kernel_policy
= NULL
;
4314 struct necp_kernel_socket_policy
*tmp_kernel_policy
= NULL
;
4316 if (policy_id
== 0) {
4320 LIST_FOREACH_SAFE(kernel_policy
, &necp_kernel_socket_policies
, chain
, tmp_kernel_policy
) {
4321 if (kernel_policy
->id
== policy_id
) {
4322 return (kernel_policy
);
4330 necp_kernel_socket_policy_delete(necp_kernel_policy_id policy_id
)
4332 struct necp_kernel_socket_policy
*policy
= NULL
;
4334 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4336 policy
= necp_kernel_socket_policy_find(policy_id
);
4338 LIST_REMOVE(policy
, chain
);
4340 if (policy
->cond_bound_interface
) {
4341 ifnet_release(policy
->cond_bound_interface
);
4342 policy
->cond_bound_interface
= NULL
;
4345 if (policy
->cond_domain
) {
4346 FREE(policy
->cond_domain
, M_NECP
);
4347 policy
->cond_domain
= NULL
;
4350 if (policy
->cond_custom_entitlement
) {
4351 FREE(policy
->cond_custom_entitlement
, M_NECP
);
4352 policy
->cond_custom_entitlement
= NULL
;
4355 FREE_ZONE(policy
, sizeof(*policy
), M_NECP_SOCKET_POLICY
);
4362 static inline const char *
4363 necp_get_result_description(char *result_string
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
)
4365 uuid_string_t uuid_string
;
4367 case NECP_KERNEL_POLICY_RESULT_NONE
: {
4368 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "None");
4371 case NECP_KERNEL_POLICY_RESULT_PASS
: {
4372 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "Pass");
4375 case NECP_KERNEL_POLICY_RESULT_SKIP
: {
4376 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "Skip (%u)", result_parameter
.skip_policy_order
);
4379 case NECP_KERNEL_POLICY_RESULT_DROP
: {
4380 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "Drop");
4383 case NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
: {
4384 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "SocketDivert (%d)", result_parameter
.flow_divert_control_unit
);
4387 case NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER
: {
4388 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "SocketFilter (%d)", result_parameter
.filter_control_unit
);
4391 case NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
: {
4392 ifnet_t interface
= ifindex2ifnet
[result_parameter
.tunnel_interface_index
];
4393 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "IPTunnel (%s%d)", ifnet_name(interface
), ifnet_unit(interface
));
4396 case NECP_KERNEL_POLICY_RESULT_IP_FILTER
: {
4397 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "IPFilter");
4400 case NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED
: {
4401 ifnet_t interface
= ifindex2ifnet
[result_parameter
.scoped_interface_index
];
4402 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "SocketScoped (%s%d)", ifnet_name(interface
), ifnet_unit(interface
));
4405 case NECP_KERNEL_POLICY_RESULT_SCOPED_DIRECT
: {
4406 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "ScopedDirect");
4409 case NECP_KERNEL_POLICY_RESULT_ROUTE_RULES
: {
4411 char interface_names
[IFXNAMSIZ
][MAX_ROUTE_RULE_INTERFACES
];
4412 struct necp_route_rule
*route_rule
= necp_lookup_route_rule_locked(&necp_route_rules
, result_parameter
.route_rule_id
);
4413 if (route_rule
!= NULL
) {
4414 for (index
= 0; index
< MAX_ROUTE_RULE_INTERFACES
; index
++) {
4415 if (route_rule
->exception_if_indices
[index
] != 0) {
4416 ifnet_t interface
= ifindex2ifnet
[route_rule
->exception_if_indices
[index
]];
4417 snprintf(interface_names
[index
], IFXNAMSIZ
, "%s%d", ifnet_name(interface
), ifnet_unit(interface
));
4419 memset(interface_names
[index
], 0, IFXNAMSIZ
);
4422 switch (route_rule
->default_action
) {
4423 case NECP_ROUTE_RULE_DENY_INTERFACE
:
4424 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "RouteRules (Only %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
4425 (route_rule
->cellular_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? "Cell " : "",
4426 (route_rule
->wifi_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? "WiFi " : "",
4427 (route_rule
->wired_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? "Wired " : "",
4428 (route_rule
->expensive_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? "Exp " : "",
4429 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[0] : "",
4430 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4431 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[1] : "",
4432 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4433 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[2] : "",
4434 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4435 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[3] : "",
4436 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4437 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[4] : "",
4438 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4439 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[5] : "",
4440 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4441 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[6] : "",
4442 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4443 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[7] : "",
4444 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4445 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[8] : "",
4446 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4447 (route_rule
->exception_if_actions
[9] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[9] : "");
4449 case NECP_ROUTE_RULE_ALLOW_INTERFACE
:
4450 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "RouteRules (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
4451 (route_rule
->cellular_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!Cell " : "",
4452 (route_rule
->wifi_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!WiFi " : "",
4453 (route_rule
->wired_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!Wired " : "",
4454 (route_rule
->expensive_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!Exp " : "",
4455 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4456 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[0] : "",
4457 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4458 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[1] : "",
4459 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4460 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[2] : "",
4461 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4462 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[3] : "",
4463 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4464 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[4] : "",
4465 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4466 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[5] : "",
4467 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4468 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[6] : "",
4469 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4470 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[7] : "",
4471 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4472 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[8] : "",
4473 (route_rule
->exception_if_actions
[9] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4474 (route_rule
->exception_if_actions
[9] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[9] : "");
4476 case NECP_ROUTE_RULE_QOS_MARKING
:
4477 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "RouteRules (QoSMarking %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
4478 (route_rule
->cellular_action
== NECP_ROUTE_RULE_QOS_MARKING
) ? "Cell " : "",
4479 (route_rule
->wifi_action
== NECP_ROUTE_RULE_QOS_MARKING
) ? "WiFi " : "",
4480 (route_rule
->wired_action
== NECP_ROUTE_RULE_QOS_MARKING
) ? "Wired " : "",
4481 (route_rule
->expensive_action
== NECP_ROUTE_RULE_QOS_MARKING
) ? "Exp " : "",
4482 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[0] : "",
4483 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4484 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[1] : "",
4485 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4486 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[2] : "",
4487 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4488 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[3] : "",
4489 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4490 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[4] : "",
4491 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4492 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[5] : "",
4493 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4494 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[6] : "",
4495 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4496 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[7] : "",
4497 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4498 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[8] : "",
4499 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4500 (route_rule
->exception_if_actions
[9] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[9] : "");
4503 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "RouteRules (Unknown)");
4509 case NECP_KERNEL_POLICY_RESULT_USE_NETAGENT
: {
4510 bool found_mapping
= FALSE
;
4511 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.netagent_id
);
4512 if (mapping
!= NULL
) {
4513 uuid_unparse(mapping
->uuid
, uuid_string
);
4514 found_mapping
= TRUE
;
4516 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "UseNetAgent (%s)", found_mapping
? uuid_string
: "Unknown");
4519 case NECP_KERNEL_POLICY_RESULT_NETAGENT_SCOPED
: {
4520 bool found_mapping
= FALSE
;
4521 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.netagent_id
);
4522 if (mapping
!= NULL
) {
4523 uuid_unparse(mapping
->uuid
, uuid_string
);
4524 found_mapping
= TRUE
;
4526 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "NetAgentScoped (%s)", found_mapping
? uuid_string
: "Unknown");
4529 case NECP_POLICY_RESULT_TRIGGER
: {
4530 bool found_mapping
= FALSE
;
4531 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.service
.identifier
);
4532 if (mapping
!= NULL
) {
4533 uuid_unparse(mapping
->uuid
, uuid_string
);
4534 found_mapping
= TRUE
;
4536 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "Trigger (%s.%d)", found_mapping
? uuid_string
: "Unknown", result_parameter
.service
.data
);
4539 case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED
: {
4540 bool found_mapping
= FALSE
;
4541 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.service
.identifier
);
4542 if (mapping
!= NULL
) {
4543 uuid_unparse(mapping
->uuid
, uuid_string
);
4544 found_mapping
= TRUE
;
4546 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "TriggerIfNeeded (%s.%d)", found_mapping
? uuid_string
: "Unknown", result_parameter
.service
.data
);
4549 case NECP_POLICY_RESULT_TRIGGER_SCOPED
: {
4550 bool found_mapping
= FALSE
;
4551 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.service
.identifier
);
4552 if (mapping
!= NULL
) {
4553 uuid_unparse(mapping
->uuid
, uuid_string
);
4554 found_mapping
= TRUE
;
4556 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "TriggerScoped (%s.%d)", found_mapping
? uuid_string
: "Unknown", result_parameter
.service
.data
);
4559 case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED
: {
4560 bool found_mapping
= FALSE
;
4561 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.service
.identifier
);
4562 if (mapping
!= NULL
) {
4563 uuid_unparse(mapping
->uuid
, uuid_string
);
4564 found_mapping
= TRUE
;
4566 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "NoTriggerScoped (%s.%d)", found_mapping
? uuid_string
: "Unknown", result_parameter
.service
.data
);
4570 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "Unknown %d (%d)", result
, result_parameter
.tunnel_interface_index
);
4574 return (result_string
);
4578 necp_kernel_socket_policies_dump_all(void)
4581 struct necp_kernel_socket_policy
*policy
= NULL
;
4584 char result_string
[MAX_RESULT_STRING_LEN
];
4585 char proc_name_string
[MAXCOMLEN
+ 1];
4586 memset(result_string
, 0, MAX_RESULT_STRING_LEN
);
4587 memset(proc_name_string
, 0, MAXCOMLEN
+ 1);
4589 NECPLOG0(LOG_DEBUG
, "NECP Application Policies:\n");
4590 NECPLOG0(LOG_DEBUG
, "-----------\n");
4591 for (policy_i
= 0; necp_kernel_socket_policies_app_layer_map
!= NULL
&& necp_kernel_socket_policies_app_layer_map
[policy_i
] != NULL
; policy_i
++) {
4592 policy
= necp_kernel_socket_policies_app_layer_map
[policy_i
];
4593 proc_name(policy
->session_pid
, proc_name_string
, MAXCOMLEN
);
4594 NECPLOG(LOG_DEBUG
, "\t%3d. Policy ID: %5d\tProcess: %10.10s\tOrder: %04d.%04d\tMask: %5x\tResult: %s\n", policy_i
, policy
->id
, proc_name_string
, policy
->session_order
, policy
->order
, policy
->condition_mask
, necp_get_result_description(result_string
, policy
->result
, policy
->result_parameter
));
4596 if (necp_kernel_socket_policies_app_layer_map
[0] != NULL
) {
4597 NECPLOG0(LOG_DEBUG
, "-----------\n");
4600 NECPLOG0(LOG_DEBUG
, "NECP Socket Policies:\n");
4601 NECPLOG0(LOG_DEBUG
, "-----------\n");
4602 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
4603 NECPLOG(LOG_DEBUG
, "\tApp Bucket: %d\n", app_i
);
4604 for (policy_i
= 0; necp_kernel_socket_policies_map
[app_i
] != NULL
&& (necp_kernel_socket_policies_map
[app_i
])[policy_i
] != NULL
; policy_i
++) {
4605 policy
= (necp_kernel_socket_policies_map
[app_i
])[policy_i
];
4606 proc_name(policy
->session_pid
, proc_name_string
, MAXCOMLEN
);
4607 NECPLOG(LOG_DEBUG
, "\t%3d. Policy ID: %5d\tProcess: %10.10s\tOrder: %04d.%04d\tMask: %5x\tResult: %s\n", policy_i
, policy
->id
, proc_name_string
, policy
->session_order
, policy
->order
, policy
->condition_mask
, necp_get_result_description(result_string
, policy
->result
, policy
->result_parameter
));
4609 NECPLOG0(LOG_DEBUG
, "-----------\n");
4615 necp_kernel_socket_result_is_trigger_service_type(struct necp_kernel_socket_policy
*kernel_policy
)
4617 return (kernel_policy
->result
>= NECP_KERNEL_POLICY_RESULT_TRIGGER
&& kernel_policy
->result
<= NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED
);
4621 necp_kernel_socket_policy_results_overlap(struct necp_kernel_socket_policy
*upper_policy
, struct necp_kernel_socket_policy
*lower_policy
)
4623 if (upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_DROP
) {
4624 // Drop always cancels out lower policies
4626 } else if (upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER
||
4627 upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_ROUTE_RULES
||
4628 upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_USE_NETAGENT
||
4629 upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_NETAGENT_SCOPED
) {
4630 // Filters and route rules never cancel out lower policies
4632 } else if (necp_kernel_socket_result_is_trigger_service_type(upper_policy
)) {
4633 // Trigger/Scoping policies can overlap one another, but not other results
4634 return (necp_kernel_socket_result_is_trigger_service_type(lower_policy
));
4635 } else if (upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
4636 if (upper_policy
->session_order
!= lower_policy
->session_order
) {
4637 // A skip cannot override a policy of a different session
4640 if (upper_policy
->result_parameter
.skip_policy_order
== 0 ||
4641 lower_policy
->order
>= upper_policy
->result_parameter
.skip_policy_order
) {
4642 // This policy is beyond the skip
4645 // This policy is inside the skip
4651 // A hard pass, flow divert, tunnel, or scope will currently block out lower policies
4656 necp_kernel_socket_policy_is_unnecessary(struct necp_kernel_socket_policy
*policy
, struct necp_kernel_socket_policy
**policy_array
, int valid_indices
)
4658 bool can_skip
= FALSE
;
4659 u_int32_t highest_skip_session_order
= 0;
4660 u_int32_t highest_skip_order
= 0;
4662 for (i
= 0; i
< valid_indices
; i
++) {
4663 struct necp_kernel_socket_policy
*compared_policy
= policy_array
[i
];
4665 // For policies in a skip window, we can't mark conflicting policies as unnecessary
4667 if (highest_skip_session_order
!= compared_policy
->session_order
||
4668 (highest_skip_order
!= 0 && compared_policy
->order
>= highest_skip_order
)) {
4669 // If we've moved on to the next session, or passed the skip window
4670 highest_skip_session_order
= 0;
4671 highest_skip_order
= 0;
4674 // If this policy is also a skip, in can increase the skip window
4675 if (compared_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
4676 if (compared_policy
->result_parameter
.skip_policy_order
> highest_skip_order
) {
4677 highest_skip_order
= compared_policy
->result_parameter
.skip_policy_order
;
4684 if (compared_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
4685 // This policy is a skip. Set the skip window accordingly
4687 highest_skip_session_order
= compared_policy
->session_order
;
4688 highest_skip_order
= compared_policy
->result_parameter
.skip_policy_order
;
4691 // The result of the compared policy must be able to block out this policy result
4692 if (!necp_kernel_socket_policy_results_overlap(compared_policy
, policy
)) {
4696 // If new policy matches All Interfaces, compared policy must also
4697 if ((policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) && !(compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
)) {
4701 // Default makes lower policies unecessary always
4702 if (compared_policy
->condition_mask
== 0) {
4706 // Compared must be more general than policy, and include only conditions within policy
4707 if ((policy
->condition_mask
& compared_policy
->condition_mask
) != compared_policy
->condition_mask
) {
4711 // Negative conditions must match for the overlapping conditions
4712 if ((policy
->condition_negated_mask
& compared_policy
->condition_mask
) != (compared_policy
->condition_negated_mask
& compared_policy
->condition_mask
)) {
4716 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
&&
4717 strcmp(compared_policy
->cond_domain
, policy
->cond_domain
) != 0) {
4721 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
&&
4722 strcmp(compared_policy
->cond_custom_entitlement
, policy
->cond_custom_entitlement
) != 0) {
4726 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
&&
4727 compared_policy
->cond_account_id
!= policy
->cond_account_id
) {
4731 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
&&
4732 compared_policy
->cond_policy_id
!= policy
->cond_policy_id
) {
4736 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
&&
4737 compared_policy
->cond_app_id
!= policy
->cond_app_id
) {
4741 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
&&
4742 compared_policy
->cond_real_app_id
!= policy
->cond_real_app_id
) {
4746 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_PID
&&
4747 compared_policy
->cond_pid
!= policy
->cond_pid
) {
4751 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_UID
&&
4752 compared_policy
->cond_uid
!= policy
->cond_uid
) {
4756 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
&&
4757 compared_policy
->cond_bound_interface
!= policy
->cond_bound_interface
) {
4761 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
&&
4762 compared_policy
->cond_protocol
!= policy
->cond_protocol
) {
4766 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
&&
4767 !(compared_policy
->cond_traffic_class
.start_tc
<= policy
->cond_traffic_class
.start_tc
&&
4768 compared_policy
->cond_traffic_class
.end_tc
>= policy
->cond_traffic_class
.end_tc
)) {
4772 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
4773 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
4774 if (!necp_is_range_in_range((struct sockaddr
*)&policy
->cond_local_start
, (struct sockaddr
*)&policy
->cond_local_end
, (struct sockaddr
*)&compared_policy
->cond_local_start
, (struct sockaddr
*)&compared_policy
->cond_local_end
)) {
4777 } else if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
4778 if (compared_policy
->cond_local_prefix
> policy
->cond_local_prefix
||
4779 !necp_is_addr_in_subnet((struct sockaddr
*)&policy
->cond_local_start
, (struct sockaddr
*)&compared_policy
->cond_local_start
, compared_policy
->cond_local_prefix
)) {
4785 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
4786 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
4787 if (!necp_is_range_in_range((struct sockaddr
*)&policy
->cond_remote_start
, (struct sockaddr
*)&policy
->cond_remote_end
, (struct sockaddr
*)&compared_policy
->cond_remote_start
, (struct sockaddr
*)&compared_policy
->cond_remote_end
)) {
4790 } else if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
4791 if (compared_policy
->cond_remote_prefix
> policy
->cond_remote_prefix
||
4792 !necp_is_addr_in_subnet((struct sockaddr
*)&policy
->cond_remote_start
, (struct sockaddr
*)&compared_policy
->cond_remote_start
, compared_policy
->cond_remote_prefix
)) {
4798 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_AGENT_TYPE
&&
4799 memcmp(&compared_policy
->cond_agent_type
, &policy
->cond_agent_type
, sizeof(policy
->cond_agent_type
)) == 0) {
4810 necp_kernel_socket_policies_reprocess(void)
4813 int bucket_allocation_counts
[NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
];
4814 int bucket_current_free_index
[NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
];
4815 int app_layer_allocation_count
= 0;
4816 int app_layer_current_free_index
= 0;
4817 struct necp_kernel_socket_policy
*kernel_policy
= NULL
;
4819 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4822 necp_kernel_application_policies_condition_mask
= 0;
4823 necp_kernel_socket_policies_condition_mask
= 0;
4824 necp_kernel_application_policies_count
= 0;
4825 necp_kernel_socket_policies_count
= 0;
4826 necp_kernel_socket_policies_non_app_count
= 0;
4828 // Reset all maps to NULL
4829 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
4830 if (necp_kernel_socket_policies_map
[app_i
] != NULL
) {
4831 FREE(necp_kernel_socket_policies_map
[app_i
], M_NECP
);
4832 necp_kernel_socket_policies_map
[app_i
] = NULL
;
4836 bucket_allocation_counts
[app_i
] = 0;
4838 if (necp_kernel_socket_policies_app_layer_map
!= NULL
) {
4839 FREE(necp_kernel_socket_policies_app_layer_map
, M_NECP
);
4840 necp_kernel_socket_policies_app_layer_map
= NULL
;
4843 // Create masks and counts
4844 LIST_FOREACH(kernel_policy
, &necp_kernel_socket_policies
, chain
) {
4845 // App layer mask/count
4846 necp_kernel_application_policies_condition_mask
|= kernel_policy
->condition_mask
;
4847 necp_kernel_application_policies_count
++;
4848 app_layer_allocation_count
++;
4850 if ((kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_AGENT_TYPE
)) {
4851 // Agent type conditions only apply to app layer
4855 // Update socket layer bucket mask/counts
4856 necp_kernel_socket_policies_condition_mask
|= kernel_policy
->condition_mask
;
4857 necp_kernel_socket_policies_count
++;
4859 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) ||
4860 kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
4861 necp_kernel_socket_policies_non_app_count
++;
4862 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
4863 bucket_allocation_counts
[app_i
]++;
4866 bucket_allocation_counts
[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(kernel_policy
->cond_app_id
)]++;
4871 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
4872 if (bucket_allocation_counts
[app_i
] > 0) {
4873 // Allocate a NULL-terminated array of policy pointers for each bucket
4874 MALLOC(necp_kernel_socket_policies_map
[app_i
], struct necp_kernel_socket_policy
**, sizeof(struct necp_kernel_socket_policy
*) * (bucket_allocation_counts
[app_i
] + 1), M_NECP
, M_WAITOK
);
4875 if (necp_kernel_socket_policies_map
[app_i
] == NULL
) {
4879 // Initialize the first entry to NULL
4880 (necp_kernel_socket_policies_map
[app_i
])[0] = NULL
;
4882 bucket_current_free_index
[app_i
] = 0;
4884 MALLOC(necp_kernel_socket_policies_app_layer_map
, struct necp_kernel_socket_policy
**, sizeof(struct necp_kernel_socket_policy
*) * (app_layer_allocation_count
+ 1), M_NECP
, M_WAITOK
);
4885 if (necp_kernel_socket_policies_app_layer_map
== NULL
) {
4888 necp_kernel_socket_policies_app_layer_map
[0] = NULL
;
4891 LIST_FOREACH(kernel_policy
, &necp_kernel_socket_policies
, chain
) {
4892 // Add app layer policies
4893 if (!necp_kernel_socket_policy_is_unnecessary(kernel_policy
, necp_kernel_socket_policies_app_layer_map
, app_layer_current_free_index
)) {
4894 necp_kernel_socket_policies_app_layer_map
[app_layer_current_free_index
] = kernel_policy
;
4895 app_layer_current_free_index
++;
4896 necp_kernel_socket_policies_app_layer_map
[app_layer_current_free_index
] = NULL
;
4899 if ((kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_AGENT_TYPE
)) {
4900 // Agent type conditions only apply to app layer
4904 // Add socket policies
4905 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) ||
4906 kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
4907 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
4908 if (!necp_kernel_socket_policy_is_unnecessary(kernel_policy
, necp_kernel_socket_policies_map
[app_i
], bucket_current_free_index
[app_i
])) {
4909 (necp_kernel_socket_policies_map
[app_i
])[(bucket_current_free_index
[app_i
])] = kernel_policy
;
4910 bucket_current_free_index
[app_i
]++;
4911 (necp_kernel_socket_policies_map
[app_i
])[(bucket_current_free_index
[app_i
])] = NULL
;
4915 app_i
= NECP_SOCKET_MAP_APP_ID_TO_BUCKET(kernel_policy
->cond_app_id
);
4916 if (!necp_kernel_socket_policy_is_unnecessary(kernel_policy
, necp_kernel_socket_policies_map
[app_i
], bucket_current_free_index
[app_i
])) {
4917 (necp_kernel_socket_policies_map
[app_i
])[(bucket_current_free_index
[app_i
])] = kernel_policy
;
4918 bucket_current_free_index
[app_i
]++;
4919 (necp_kernel_socket_policies_map
[app_i
])[(bucket_current_free_index
[app_i
])] = NULL
;
4923 necp_kernel_socket_policies_dump_all();
4924 BUMP_KERNEL_SOCKET_POLICIES_GENERATION_COUNT();
4928 // Free memory, reset masks to 0
4929 necp_kernel_application_policies_condition_mask
= 0;
4930 necp_kernel_socket_policies_condition_mask
= 0;
4931 necp_kernel_application_policies_count
= 0;
4932 necp_kernel_socket_policies_count
= 0;
4933 necp_kernel_socket_policies_non_app_count
= 0;
4934 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
4935 if (necp_kernel_socket_policies_map
[app_i
] != NULL
) {
4936 FREE(necp_kernel_socket_policies_map
[app_i
], M_NECP
);
4937 necp_kernel_socket_policies_map
[app_i
] = NULL
;
4940 if (necp_kernel_socket_policies_app_layer_map
!= NULL
) {
4941 FREE(necp_kernel_socket_policies_app_layer_map
, M_NECP
);
4942 necp_kernel_socket_policies_app_layer_map
= NULL
;
4948 necp_get_new_string_id(void)
4950 static u_int32_t necp_last_string_id
= 0;
4952 u_int32_t newid
= 0;
4954 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4956 bool wrapped
= FALSE
;
4958 necp_last_string_id
++;
4959 if (necp_last_string_id
< 1) {
4961 // Already wrapped, give up
4962 NECPLOG0(LOG_ERR
, "Failed to find a free app UUID.\n");
4965 necp_last_string_id
= 1;
4968 newid
= necp_last_string_id
;
4969 } while (necp_lookup_string_with_id_locked(&necp_account_id_list
, newid
) != NULL
); // If already used, keep trying
4972 NECPLOG0(LOG_ERR
, "Allocate string id failed.\n");
4979 static struct necp_string_id_mapping
*
4980 necp_lookup_string_to_id_locked(struct necp_string_id_mapping_list
*list
, char *string
)
4982 struct necp_string_id_mapping
*searchentry
= NULL
;
4983 struct necp_string_id_mapping
*foundentry
= NULL
;
4985 LIST_FOREACH(searchentry
, list
, chain
) {
4986 if (strcmp(searchentry
->string
, string
) == 0) {
4987 foundentry
= searchentry
;
4992 return (foundentry
);
4995 static struct necp_string_id_mapping
*
4996 necp_lookup_string_with_id_locked(struct necp_string_id_mapping_list
*list
, u_int32_t local_id
)
4998 struct necp_string_id_mapping
*searchentry
= NULL
;
4999 struct necp_string_id_mapping
*foundentry
= NULL
;
5001 LIST_FOREACH(searchentry
, list
, chain
) {
5002 if (searchentry
->id
== local_id
) {
5003 foundentry
= searchentry
;
5008 return (foundentry
);
5012 necp_create_string_to_id_mapping(struct necp_string_id_mapping_list
*list
, char *string
)
5014 u_int32_t string_id
= 0;
5015 struct necp_string_id_mapping
*existing_mapping
= NULL
;
5017 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5019 existing_mapping
= necp_lookup_string_to_id_locked(list
, string
);
5020 if (existing_mapping
!= NULL
) {
5021 string_id
= existing_mapping
->id
;
5022 existing_mapping
->refcount
++;
5024 struct necp_string_id_mapping
*new_mapping
= NULL
;
5025 MALLOC(new_mapping
, struct necp_string_id_mapping
*, sizeof(struct necp_string_id_mapping
), M_NECP
, M_WAITOK
);
5026 if (new_mapping
!= NULL
) {
5027 memset(new_mapping
, 0, sizeof(struct necp_string_id_mapping
));
5029 size_t length
= strlen(string
) + 1;
5030 MALLOC(new_mapping
->string
, char *, length
, M_NECP
, M_WAITOK
);
5031 if (new_mapping
->string
!= NULL
) {
5032 memcpy(new_mapping
->string
, string
, length
);
5033 new_mapping
->id
= necp_get_new_string_id();
5034 new_mapping
->refcount
= 1;
5035 LIST_INSERT_HEAD(list
, new_mapping
, chain
);
5036 string_id
= new_mapping
->id
;
5038 FREE(new_mapping
, M_NECP
);
5047 necp_remove_string_to_id_mapping(struct necp_string_id_mapping_list
*list
, char *string
)
5049 struct necp_string_id_mapping
*existing_mapping
= NULL
;
5051 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5053 existing_mapping
= necp_lookup_string_to_id_locked(list
, string
);
5054 if (existing_mapping
!= NULL
) {
5055 if (--existing_mapping
->refcount
== 0) {
5056 LIST_REMOVE(existing_mapping
, chain
);
5057 FREE(existing_mapping
->string
, M_NECP
);
5058 FREE(existing_mapping
, M_NECP
);
5066 #define NECP_FIRST_VALID_ROUTE_RULE_ID 1
5067 #define NECP_FIRST_VALID_AGGREGATE_ROUTE_RULE_ID UINT16_MAX
5069 necp_get_new_route_rule_id(bool aggregate
)
5071 static u_int32_t necp_last_route_rule_id
= 0;
5072 static u_int32_t necp_last_aggregate_route_rule_id
= 0;
5074 u_int32_t newid
= 0;
5077 // Main necp_kernel_policy_lock protects non-aggregate rule IDs
5078 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5080 bool wrapped
= FALSE
;
5082 necp_last_route_rule_id
++;
5083 if (necp_last_route_rule_id
< NECP_FIRST_VALID_ROUTE_RULE_ID
||
5084 necp_last_route_rule_id
>= NECP_FIRST_VALID_AGGREGATE_ROUTE_RULE_ID
) {
5086 // Already wrapped, give up
5087 NECPLOG0(LOG_ERR
, "Failed to find a free route rule id.\n");
5090 necp_last_route_rule_id
= NECP_FIRST_VALID_ROUTE_RULE_ID
;
5093 newid
= necp_last_route_rule_id
;
5094 } while (necp_lookup_route_rule_locked(&necp_route_rules
, newid
) != NULL
); // If already used, keep trying
5096 // necp_route_rule_lock protects aggregate rule IDs
5097 LCK_RW_ASSERT(&necp_route_rule_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5099 bool wrapped
= FALSE
;
5101 necp_last_aggregate_route_rule_id
++;
5102 if (necp_last_aggregate_route_rule_id
< NECP_FIRST_VALID_AGGREGATE_ROUTE_RULE_ID
) {
5104 // Already wrapped, give up
5105 NECPLOG0(LOG_ERR
, "Failed to find a free aggregate route rule id.\n");
5108 necp_last_aggregate_route_rule_id
= NECP_FIRST_VALID_AGGREGATE_ROUTE_RULE_ID
;
5111 newid
= necp_last_aggregate_route_rule_id
;
5112 } while (necp_lookup_route_rule_locked(&necp_route_rules
, newid
) != NULL
); // If already used, keep trying
5116 NECPLOG0(LOG_ERR
, "Allocate route rule ID failed.\n");
5123 static struct necp_route_rule
*
5124 necp_lookup_route_rule_locked(struct necp_route_rule_list
*list
, u_int32_t route_rule_id
)
5126 struct necp_route_rule
*searchentry
= NULL
;
5127 struct necp_route_rule
*foundentry
= NULL
;
5129 LIST_FOREACH(searchentry
, list
, chain
) {
5130 if (searchentry
->id
== route_rule_id
) {
5131 foundentry
= searchentry
;
5136 return (foundentry
);
5139 static struct necp_route_rule
*
5140 necp_lookup_route_rule_by_contents_locked(struct necp_route_rule_list
*list
, u_int32_t default_action
, u_int8_t cellular_action
, u_int8_t wifi_action
, u_int8_t wired_action
, u_int8_t expensive_action
, u_int32_t
*if_indices
, u_int8_t
*if_actions
)
5142 struct necp_route_rule
*searchentry
= NULL
;
5143 struct necp_route_rule
*foundentry
= NULL
;
5145 LIST_FOREACH(searchentry
, list
, chain
) {
5146 if (searchentry
->default_action
== default_action
&&
5147 searchentry
->cellular_action
== cellular_action
&&
5148 searchentry
->wifi_action
== wifi_action
&&
5149 searchentry
->wired_action
== wired_action
&&
5150 searchentry
->expensive_action
== expensive_action
) {
5151 bool match_failed
= FALSE
;
5156 for (index_a
= 0; index_a
< MAX_ROUTE_RULE_INTERFACES
; index_a
++) {
5157 bool found_index
= FALSE
;
5158 if (searchentry
->exception_if_indices
[index_a
] == 0) {
5162 for (index_b
= 0; index_b
< MAX_ROUTE_RULE_INTERFACES
; index_b
++) {
5163 if (if_indices
[index_b
] == 0) {
5166 if (index_b
>= count_b
) {
5167 count_b
= index_b
+ 1;
5169 if (searchentry
->exception_if_indices
[index_a
] == if_indices
[index_b
] &&
5170 searchentry
->exception_if_actions
[index_a
] == if_actions
[index_b
]) {
5176 match_failed
= TRUE
;
5180 if (!match_failed
&& count_a
== count_b
) {
5181 foundentry
= searchentry
;
5187 return (foundentry
);
5191 necp_create_route_rule(struct necp_route_rule_list
*list
, u_int8_t
*route_rules_array
, u_int32_t route_rules_array_size
)
5194 u_int32_t route_rule_id
= 0;
5195 struct necp_route_rule
*existing_rule
= NULL
;
5196 u_int32_t default_action
= NECP_ROUTE_RULE_ALLOW_INTERFACE
;
5197 u_int8_t cellular_action
= NECP_ROUTE_RULE_NONE
;
5198 u_int8_t wifi_action
= NECP_ROUTE_RULE_NONE
;
5199 u_int8_t wired_action
= NECP_ROUTE_RULE_NONE
;
5200 u_int8_t expensive_action
= NECP_ROUTE_RULE_NONE
;
5201 u_int32_t if_indices
[MAX_ROUTE_RULE_INTERFACES
];
5202 size_t num_valid_indices
= 0;
5203 memset(&if_indices
, 0, sizeof(if_indices
));
5204 u_int8_t if_actions
[MAX_ROUTE_RULE_INTERFACES
];
5205 memset(&if_actions
, 0, sizeof(if_actions
));
5207 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5209 if (route_rules_array
== NULL
|| route_rules_array_size
== 0) {
5214 while (offset
< route_rules_array_size
) {
5215 ifnet_t rule_interface
= NULL
;
5216 char interface_name
[IFXNAMSIZ
];
5217 u_int32_t length
= 0;
5218 u_int8_t
*value
= necp_buffer_get_tlv_value(route_rules_array
, offset
, &length
);
5220 u_int8_t rule_type
= necp_policy_condition_get_type_from_buffer(value
, length
);
5221 u_int8_t rule_flags
= necp_policy_condition_get_flags_from_buffer(value
, length
);
5222 u_int32_t rule_length
= necp_policy_condition_get_value_length_from_buffer(value
, length
);
5223 u_int8_t
*rule_value
= necp_policy_condition_get_value_pointer_from_buffer(value
, length
);
5225 if (rule_type
== NECP_ROUTE_RULE_NONE
) {
5226 // Don't allow an explicit rule to be None action
5230 if (rule_length
== 0) {
5231 if (rule_flags
& NECP_ROUTE_RULE_FLAG_CELLULAR
) {
5232 cellular_action
= rule_type
;
5234 if (rule_flags
& NECP_ROUTE_RULE_FLAG_WIFI
) {
5235 wifi_action
= rule_type
;
5237 if (rule_flags
& NECP_ROUTE_RULE_FLAG_WIRED
) {
5238 wired_action
= rule_type
;
5240 if (rule_flags
& NECP_ROUTE_RULE_FLAG_EXPENSIVE
) {
5241 expensive_action
= rule_type
;
5243 if (rule_flags
== 0) {
5244 default_action
= rule_type
;
5246 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
5250 if (num_valid_indices
>= MAX_ROUTE_RULE_INTERFACES
) {
5251 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
5255 if (rule_length
<= IFXNAMSIZ
) {
5256 memcpy(interface_name
, rule_value
, rule_length
);
5257 interface_name
[rule_length
- 1] = 0; // Make sure the string is NULL terminated
5258 if (ifnet_find_by_name(interface_name
, &rule_interface
) == 0) {
5259 if_actions
[num_valid_indices
] = rule_type
;
5260 if_indices
[num_valid_indices
++] = rule_interface
->if_index
;
5261 ifnet_release(rule_interface
);
5264 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
5267 existing_rule
= necp_lookup_route_rule_by_contents_locked(list
, default_action
, cellular_action
, wifi_action
, wired_action
, expensive_action
, if_indices
, if_actions
);
5268 if (existing_rule
!= NULL
) {
5269 route_rule_id
= existing_rule
->id
;
5270 existing_rule
->refcount
++;
5272 struct necp_route_rule
*new_rule
= NULL
;
5273 MALLOC(new_rule
, struct necp_route_rule
*, sizeof(struct necp_route_rule
), M_NECP
, M_WAITOK
);
5274 if (new_rule
!= NULL
) {
5275 memset(new_rule
, 0, sizeof(struct necp_route_rule
));
5276 route_rule_id
= new_rule
->id
= necp_get_new_route_rule_id(false);
5277 new_rule
->default_action
= default_action
;
5278 new_rule
->cellular_action
= cellular_action
;
5279 new_rule
->wifi_action
= wifi_action
;
5280 new_rule
->wired_action
= wired_action
;
5281 new_rule
->expensive_action
= expensive_action
;
5282 memcpy(&new_rule
->exception_if_indices
, &if_indices
, sizeof(if_indices
));
5283 memcpy(&new_rule
->exception_if_actions
, &if_actions
, sizeof(if_actions
));
5284 new_rule
->refcount
= 1;
5285 LIST_INSERT_HEAD(list
, new_rule
, chain
);
5288 return (route_rule_id
);
5292 necp_remove_aggregate_route_rule_for_id(u_int32_t rule_id
)
5295 lck_rw_lock_exclusive(&necp_route_rule_lock
);
5297 struct necp_aggregate_route_rule
*existing_rule
= NULL
;
5298 struct necp_aggregate_route_rule
*tmp_rule
= NULL
;
5300 LIST_FOREACH_SAFE(existing_rule
, &necp_aggregate_route_rules
, chain
, tmp_rule
) {
5302 for (index
= 0; index
< MAX_AGGREGATE_ROUTE_RULES
; index
++) {
5303 u_int32_t route_rule_id
= existing_rule
->rule_ids
[index
];
5304 if (route_rule_id
== rule_id
) {
5305 LIST_REMOVE(existing_rule
, chain
);
5306 FREE(existing_rule
, M_NECP
);
5312 lck_rw_done(&necp_route_rule_lock
);
5317 necp_remove_route_rule(struct necp_route_rule_list
*list
, u_int32_t route_rule_id
)
5319 struct necp_route_rule
*existing_rule
= NULL
;
5321 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5323 existing_rule
= necp_lookup_route_rule_locked(list
, route_rule_id
);
5324 if (existing_rule
!= NULL
) {
5325 if (--existing_rule
->refcount
== 0) {
5326 necp_remove_aggregate_route_rule_for_id(existing_rule
->id
);
5327 LIST_REMOVE(existing_rule
, chain
);
5328 FREE(existing_rule
, M_NECP
);
5336 static struct necp_aggregate_route_rule
*
5337 necp_lookup_aggregate_route_rule_locked(u_int32_t route_rule_id
)
5339 struct necp_aggregate_route_rule
*searchentry
= NULL
;
5340 struct necp_aggregate_route_rule
*foundentry
= NULL
;
5342 lck_rw_lock_shared(&necp_route_rule_lock
);
5344 LIST_FOREACH(searchentry
, &necp_aggregate_route_rules
, chain
) {
5345 if (searchentry
->id
== route_rule_id
) {
5346 foundentry
= searchentry
;
5351 lck_rw_done(&necp_route_rule_lock
);
5353 return (foundentry
);
5357 necp_create_aggregate_route_rule(u_int32_t
*rule_ids
)
5359 u_int32_t aggregate_route_rule_id
= 0;
5360 struct necp_aggregate_route_rule
*new_rule
= NULL
;
5361 struct necp_aggregate_route_rule
*existing_rule
= NULL
;
5363 LIST_FOREACH(existing_rule
, &necp_aggregate_route_rules
, chain
) {
5364 if (memcmp(existing_rule
->rule_ids
, rule_ids
, (sizeof(u_int32_t
) * MAX_AGGREGATE_ROUTE_RULES
)) == 0) {
5365 return (existing_rule
->id
);
5369 lck_rw_lock_exclusive(&necp_route_rule_lock
);
5371 LIST_FOREACH(existing_rule
, &necp_aggregate_route_rules
, chain
) {
5372 // Re-check, in case something else created the rule while we are waiting to lock
5373 if (memcmp(existing_rule
->rule_ids
, rule_ids
, (sizeof(u_int32_t
) * MAX_AGGREGATE_ROUTE_RULES
)) == 0) {
5374 lck_rw_done(&necp_route_rule_lock
);
5375 return (existing_rule
->id
);
5379 MALLOC(new_rule
, struct necp_aggregate_route_rule
*, sizeof(struct necp_aggregate_route_rule
), M_NECP
, M_WAITOK
);
5380 if (new_rule
!= NULL
) {
5381 memset(new_rule
, 0, sizeof(struct necp_aggregate_route_rule
));
5382 aggregate_route_rule_id
= new_rule
->id
= necp_get_new_route_rule_id(true);
5383 new_rule
->id
= aggregate_route_rule_id
;
5384 memcpy(new_rule
->rule_ids
, rule_ids
, (sizeof(u_int32_t
) * MAX_AGGREGATE_ROUTE_RULES
));
5385 LIST_INSERT_HEAD(&necp_aggregate_route_rules
, new_rule
, chain
);
5387 lck_rw_done(&necp_route_rule_lock
);
5389 return (aggregate_route_rule_id
);
5392 #define NECP_NULL_SERVICE_ID 1
5393 #define NECP_FIRST_VALID_SERVICE_ID 2
5394 #define NECP_FIRST_VALID_APP_ID UINT16_MAX
5396 necp_get_new_uuid_id(bool service
)
5398 static u_int32_t necp_last_service_uuid_id
= 0;
5399 static u_int32_t necp_last_app_uuid_id
= 0;
5401 u_int32_t newid
= 0;
5403 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5406 bool wrapped
= FALSE
;
5408 necp_last_service_uuid_id
++;
5409 if (necp_last_service_uuid_id
< NECP_FIRST_VALID_SERVICE_ID
||
5410 necp_last_service_uuid_id
>= NECP_FIRST_VALID_APP_ID
) {
5412 // Already wrapped, give up
5413 NECPLOG0(LOG_ERR
, "Failed to find a free service UUID.\n");
5414 return (NECP_NULL_SERVICE_ID
);
5416 necp_last_service_uuid_id
= NECP_FIRST_VALID_SERVICE_ID
;
5419 newid
= necp_last_service_uuid_id
;
5420 } while (necp_uuid_lookup_uuid_with_service_id_locked(newid
) != NULL
); // If already used, keep trying
5422 bool wrapped
= FALSE
;
5424 necp_last_app_uuid_id
++;
5425 if (necp_last_app_uuid_id
< NECP_FIRST_VALID_APP_ID
) {
5427 // Already wrapped, give up
5428 NECPLOG0(LOG_ERR
, "Failed to find a free app UUID.\n");
5429 return (NECP_NULL_SERVICE_ID
);
5431 necp_last_app_uuid_id
= NECP_FIRST_VALID_APP_ID
;
5434 newid
= necp_last_app_uuid_id
;
5435 } while (necp_uuid_lookup_uuid_with_app_id_locked(newid
) != NULL
); // If already used, keep trying
5438 if (newid
== NECP_NULL_SERVICE_ID
) {
5439 NECPLOG0(LOG_ERR
, "Allocate uuid ID failed.\n");
5440 return (NECP_NULL_SERVICE_ID
);
5446 static struct necp_uuid_id_mapping
*
5447 necp_uuid_lookup_app_id_locked(uuid_t uuid
)
5449 struct necp_uuid_id_mapping
*searchentry
= NULL
;
5450 struct necp_uuid_id_mapping
*foundentry
= NULL
;
5452 LIST_FOREACH(searchentry
, APPUUIDHASH(uuid
), chain
) {
5453 if (uuid_compare(searchentry
->uuid
, uuid
) == 0) {
5454 foundentry
= searchentry
;
5459 return (foundentry
);
5462 static struct necp_uuid_id_mapping
*
5463 necp_uuid_lookup_uuid_with_app_id_locked(u_int32_t local_id
)
5465 struct necp_uuid_id_mapping
*searchentry
= NULL
;
5466 struct necp_uuid_id_mapping
*foundentry
= NULL
;
5468 struct necp_uuid_id_mapping_head
*uuid_list_head
= NULL
;
5469 for (uuid_list_head
= &necp_uuid_app_id_hashtbl
[necp_uuid_app_id_hash_num_buckets
- 1]; uuid_list_head
>= necp_uuid_app_id_hashtbl
; uuid_list_head
--) {
5470 LIST_FOREACH(searchentry
, uuid_list_head
, chain
) {
5471 if (searchentry
->id
== local_id
) {
5472 foundentry
= searchentry
;
5478 return (foundentry
);
5482 necp_create_uuid_app_id_mapping(uuid_t uuid
, bool *allocated_mapping
, bool uuid_policy_table
)
5484 u_int32_t local_id
= 0;
5485 struct necp_uuid_id_mapping
*existing_mapping
= NULL
;
5487 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5489 if (allocated_mapping
) {
5490 *allocated_mapping
= FALSE
;
5493 existing_mapping
= necp_uuid_lookup_app_id_locked(uuid
);
5494 if (existing_mapping
!= NULL
) {
5495 local_id
= existing_mapping
->id
;
5496 existing_mapping
->refcount
++;
5497 if (uuid_policy_table
) {
5498 existing_mapping
->table_refcount
++;
5501 struct necp_uuid_id_mapping
*new_mapping
= NULL
;
5502 MALLOC(new_mapping
, struct necp_uuid_id_mapping
*, sizeof(*new_mapping
), M_NECP
, M_WAITOK
);
5503 if (new_mapping
!= NULL
) {
5504 uuid_copy(new_mapping
->uuid
, uuid
);
5505 new_mapping
->id
= necp_get_new_uuid_id(false);
5506 new_mapping
->refcount
= 1;
5507 if (uuid_policy_table
) {
5508 new_mapping
->table_refcount
= 1;
5510 new_mapping
->table_refcount
= 0;
5513 LIST_INSERT_HEAD(APPUUIDHASH(uuid
), new_mapping
, chain
);
5515 if (allocated_mapping
) {
5516 *allocated_mapping
= TRUE
;
5519 local_id
= new_mapping
->id
;
5527 necp_remove_uuid_app_id_mapping(uuid_t uuid
, bool *removed_mapping
, bool uuid_policy_table
)
5529 struct necp_uuid_id_mapping
*existing_mapping
= NULL
;
5531 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5533 if (removed_mapping
) {
5534 *removed_mapping
= FALSE
;
5537 existing_mapping
= necp_uuid_lookup_app_id_locked(uuid
);
5538 if (existing_mapping
!= NULL
) {
5539 if (uuid_policy_table
) {
5540 existing_mapping
->table_refcount
--;
5542 if (--existing_mapping
->refcount
== 0) {
5543 LIST_REMOVE(existing_mapping
, chain
);
5544 FREE(existing_mapping
, M_NECP
);
5545 if (removed_mapping
) {
5546 *removed_mapping
= TRUE
;
5555 static struct necp_uuid_id_mapping
*
5556 necp_uuid_get_null_service_id_mapping(void)
5558 static struct necp_uuid_id_mapping null_mapping
;
5559 uuid_clear(null_mapping
.uuid
);
5560 null_mapping
.id
= NECP_NULL_SERVICE_ID
;
5562 return (&null_mapping
);
5565 static struct necp_uuid_id_mapping
*
5566 necp_uuid_lookup_service_id_locked(uuid_t uuid
)
5568 struct necp_uuid_id_mapping
*searchentry
= NULL
;
5569 struct necp_uuid_id_mapping
*foundentry
= NULL
;
5571 if (uuid_is_null(uuid
)) {
5572 return necp_uuid_get_null_service_id_mapping();
5575 LIST_FOREACH(searchentry
, &necp_uuid_service_id_list
, chain
) {
5576 if (uuid_compare(searchentry
->uuid
, uuid
) == 0) {
5577 foundentry
= searchentry
;
5582 return (foundentry
);
5585 static struct necp_uuid_id_mapping
*
5586 necp_uuid_lookup_uuid_with_service_id_locked(u_int32_t local_id
)
5588 struct necp_uuid_id_mapping
*searchentry
= NULL
;
5589 struct necp_uuid_id_mapping
*foundentry
= NULL
;
5591 if (local_id
== NECP_NULL_SERVICE_ID
) {
5592 return necp_uuid_get_null_service_id_mapping();
5595 LIST_FOREACH(searchentry
, &necp_uuid_service_id_list
, chain
) {
5596 if (searchentry
->id
== local_id
) {
5597 foundentry
= searchentry
;
5602 return (foundentry
);
5606 necp_create_uuid_service_id_mapping(uuid_t uuid
)
5608 u_int32_t local_id
= 0;
5609 struct necp_uuid_id_mapping
*existing_mapping
= NULL
;
5611 if (uuid_is_null(uuid
)) {
5612 return (NECP_NULL_SERVICE_ID
);
5615 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5617 existing_mapping
= necp_uuid_lookup_service_id_locked(uuid
);
5618 if (existing_mapping
!= NULL
) {
5619 local_id
= existing_mapping
->id
;
5620 existing_mapping
->refcount
++;
5622 struct necp_uuid_id_mapping
*new_mapping
= NULL
;
5623 MALLOC(new_mapping
, struct necp_uuid_id_mapping
*, sizeof(*new_mapping
), M_NECP
, M_WAITOK
);
5624 if (new_mapping
!= NULL
) {
5625 uuid_copy(new_mapping
->uuid
, uuid
);
5626 new_mapping
->id
= necp_get_new_uuid_id(true);
5627 new_mapping
->refcount
= 1;
5629 LIST_INSERT_HEAD(&necp_uuid_service_id_list
, new_mapping
, chain
);
5631 local_id
= new_mapping
->id
;
5639 necp_remove_uuid_service_id_mapping(uuid_t uuid
)
5641 struct necp_uuid_id_mapping
*existing_mapping
= NULL
;
5643 if (uuid_is_null(uuid
)) {
5647 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5649 existing_mapping
= necp_uuid_lookup_app_id_locked(uuid
);
5650 if (existing_mapping
!= NULL
) {
5651 if (--existing_mapping
->refcount
== 0) {
5652 LIST_REMOVE(existing_mapping
, chain
);
5653 FREE(existing_mapping
, M_NECP
);
5663 necp_kernel_socket_policies_update_uuid_table(void)
5665 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5667 if (necp_uuid_app_id_mappings_dirty
) {
5668 if (proc_uuid_policy_kernel(PROC_UUID_POLICY_OPERATION_CLEAR
, NULL
, PROC_UUID_NECP_APP_POLICY
) < 0) {
5669 NECPLOG0(LOG_DEBUG
, "Error clearing uuids from policy table\n");
5673 if (necp_num_uuid_app_id_mappings
> 0) {
5674 struct necp_uuid_id_mapping_head
*uuid_list_head
= NULL
;
5675 for (uuid_list_head
= &necp_uuid_app_id_hashtbl
[necp_uuid_app_id_hash_num_buckets
- 1]; uuid_list_head
>= necp_uuid_app_id_hashtbl
; uuid_list_head
--) {
5676 struct necp_uuid_id_mapping
*mapping
= NULL
;
5677 LIST_FOREACH(mapping
, uuid_list_head
, chain
) {
5678 if (mapping
->table_refcount
> 0 &&
5679 proc_uuid_policy_kernel(PROC_UUID_POLICY_OPERATION_ADD
, mapping
->uuid
, PROC_UUID_NECP_APP_POLICY
) < 0) {
5680 NECPLOG0(LOG_DEBUG
, "Error adding uuid to policy table\n");
5686 necp_uuid_app_id_mappings_dirty
= FALSE
;
5692 #define NECP_KERNEL_VALID_IP_OUTPUT_CONDITIONS (NECP_KERNEL_CONDITION_ALL_INTERFACES | NECP_KERNEL_CONDITION_BOUND_INTERFACE | NECP_KERNEL_CONDITION_PROTOCOL | NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_POLICY_ID | NECP_KERNEL_CONDITION_LAST_INTERFACE)
5693 static necp_kernel_policy_id
5694 necp_kernel_ip_output_policy_add(necp_policy_order order
, necp_policy_order suborder
, u_int32_t session_order
, int session_pid
, u_int32_t condition_mask
, u_int32_t condition_negated_mask
, necp_kernel_policy_id cond_policy_id
, ifnet_t cond_bound_interface
, u_int32_t cond_last_interface_index
, u_int16_t cond_protocol
, union necp_sockaddr_union
*cond_local_start
, union necp_sockaddr_union
*cond_local_end
, u_int8_t cond_local_prefix
, union necp_sockaddr_union
*cond_remote_start
, union necp_sockaddr_union
*cond_remote_end
, u_int8_t cond_remote_prefix
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
)
5696 struct necp_kernel_ip_output_policy
*new_kernel_policy
= NULL
;
5697 struct necp_kernel_ip_output_policy
*tmp_kernel_policy
= NULL
;
5699 MALLOC_ZONE(new_kernel_policy
, struct necp_kernel_ip_output_policy
*, sizeof(*new_kernel_policy
), M_NECP_IP_POLICY
, M_WAITOK
);
5700 if (new_kernel_policy
== NULL
) {
5704 memset(new_kernel_policy
, 0, sizeof(*new_kernel_policy
)); // M_ZERO is not supported for MALLOC_ZONE
5705 new_kernel_policy
->id
= necp_kernel_policy_get_new_id(false);
5706 new_kernel_policy
->suborder
= suborder
;
5707 new_kernel_policy
->order
= order
;
5708 new_kernel_policy
->session_order
= session_order
;
5709 new_kernel_policy
->session_pid
= session_pid
;
5711 // Sanitize condition mask
5712 new_kernel_policy
->condition_mask
= (condition_mask
& NECP_KERNEL_VALID_IP_OUTPUT_CONDITIONS
);
5713 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
)) {
5714 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_BOUND_INTERFACE
;
5716 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
)) {
5717 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_LOCAL_PREFIX
;
5719 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
)) {
5720 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_REMOTE_PREFIX
;
5722 new_kernel_policy
->condition_negated_mask
= condition_negated_mask
& new_kernel_policy
->condition_mask
;
5724 // Set condition values
5725 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
) {
5726 new_kernel_policy
->cond_policy_id
= cond_policy_id
;
5728 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
5729 if (cond_bound_interface
) {
5730 ifnet_reference(cond_bound_interface
);
5732 new_kernel_policy
->cond_bound_interface
= cond_bound_interface
;
5734 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LAST_INTERFACE
) {
5735 new_kernel_policy
->cond_last_interface_index
= cond_last_interface_index
;
5737 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
5738 new_kernel_policy
->cond_protocol
= cond_protocol
;
5740 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
5741 memcpy(&new_kernel_policy
->cond_local_start
, cond_local_start
, cond_local_start
->sa
.sa_len
);
5743 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
5744 memcpy(&new_kernel_policy
->cond_local_end
, cond_local_end
, cond_local_end
->sa
.sa_len
);
5746 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
5747 new_kernel_policy
->cond_local_prefix
= cond_local_prefix
;
5749 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
5750 memcpy(&new_kernel_policy
->cond_remote_start
, cond_remote_start
, cond_remote_start
->sa
.sa_len
);
5752 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
5753 memcpy(&new_kernel_policy
->cond_remote_end
, cond_remote_end
, cond_remote_end
->sa
.sa_len
);
5755 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
5756 new_kernel_policy
->cond_remote_prefix
= cond_remote_prefix
;
5759 new_kernel_policy
->result
= result
;
5760 memcpy(&new_kernel_policy
->result_parameter
, &result_parameter
, sizeof(result_parameter
));
5763 NECPLOG(LOG_DEBUG
, "Added kernel policy: ip output, id=%d, mask=%x\n", new_kernel_policy
->id
, new_kernel_policy
->condition_mask
);
5765 LIST_INSERT_SORTED_THRICE_ASCENDING(&necp_kernel_ip_output_policies
, new_kernel_policy
, chain
, session_order
, order
, suborder
, tmp_kernel_policy
);
5767 return (new_kernel_policy
? new_kernel_policy
->id
: 0);
5770 static struct necp_kernel_ip_output_policy
*
5771 necp_kernel_ip_output_policy_find(necp_kernel_policy_id policy_id
)
5773 struct necp_kernel_ip_output_policy
*kernel_policy
= NULL
;
5774 struct necp_kernel_ip_output_policy
*tmp_kernel_policy
= NULL
;
5776 if (policy_id
== 0) {
5780 LIST_FOREACH_SAFE(kernel_policy
, &necp_kernel_ip_output_policies
, chain
, tmp_kernel_policy
) {
5781 if (kernel_policy
->id
== policy_id
) {
5782 return (kernel_policy
);
5790 necp_kernel_ip_output_policy_delete(necp_kernel_policy_id policy_id
)
5792 struct necp_kernel_ip_output_policy
*policy
= NULL
;
5794 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5796 policy
= necp_kernel_ip_output_policy_find(policy_id
);
5798 LIST_REMOVE(policy
, chain
);
5800 if (policy
->cond_bound_interface
) {
5801 ifnet_release(policy
->cond_bound_interface
);
5802 policy
->cond_bound_interface
= NULL
;
5805 FREE_ZONE(policy
, sizeof(*policy
), M_NECP_IP_POLICY
);
5813 necp_kernel_ip_output_policies_dump_all(void)
5816 struct necp_kernel_ip_output_policy
*policy
= NULL
;
5819 char result_string
[MAX_RESULT_STRING_LEN
];
5820 char proc_name_string
[MAXCOMLEN
+ 1];
5821 memset(result_string
, 0, MAX_RESULT_STRING_LEN
);
5822 memset(proc_name_string
, 0, MAXCOMLEN
+ 1);
5824 NECPLOG0(LOG_DEBUG
, "NECP IP Output Policies:\n");
5825 NECPLOG0(LOG_DEBUG
, "-----------\n");
5826 for (id_i
= 0; id_i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; id_i
++) {
5827 NECPLOG(LOG_DEBUG
, " ID Bucket: %d\n", id_i
);
5828 for (policy_i
= 0; necp_kernel_ip_output_policies_map
[id_i
] != NULL
&& (necp_kernel_ip_output_policies_map
[id_i
])[policy_i
] != NULL
; policy_i
++) {
5829 policy
= (necp_kernel_ip_output_policies_map
[id_i
])[policy_i
];
5830 proc_name(policy
->session_pid
, proc_name_string
, MAXCOMLEN
);
5831 NECPLOG(LOG_DEBUG
, "\t%3d. Policy ID: %5d\tProcess: %10.10s\tOrder: %04d.%04d.%d\tMask: %5x\tResult: %s\n", policy_i
, policy
->id
, proc_name_string
, policy
->session_order
, policy
->order
, policy
->suborder
, policy
->condition_mask
, necp_get_result_description(result_string
, policy
->result
, policy
->result_parameter
));
5833 NECPLOG0(LOG_DEBUG
, "-----------\n");
5839 necp_kernel_ip_output_policy_results_overlap(struct necp_kernel_ip_output_policy
*upper_policy
, struct necp_kernel_ip_output_policy
*lower_policy
)
5841 if (upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
5842 if (upper_policy
->session_order
!= lower_policy
->session_order
) {
5843 // A skip cannot override a policy of a different session
5846 if (upper_policy
->result_parameter
.skip_policy_order
== 0 ||
5847 lower_policy
->order
>= upper_policy
->result_parameter
.skip_policy_order
) {
5848 // This policy is beyond the skip
5851 // This policy is inside the skip
5857 // All other IP Output policy results (drop, tunnel, hard pass) currently overlap
5862 necp_kernel_ip_output_policy_is_unnecessary(struct necp_kernel_ip_output_policy
*policy
, struct necp_kernel_ip_output_policy
**policy_array
, int valid_indices
)
5864 bool can_skip
= FALSE
;
5865 u_int32_t highest_skip_session_order
= 0;
5866 u_int32_t highest_skip_order
= 0;
5868 for (i
= 0; i
< valid_indices
; i
++) {
5869 struct necp_kernel_ip_output_policy
*compared_policy
= policy_array
[i
];
5871 // For policies in a skip window, we can't mark conflicting policies as unnecessary
5873 if (highest_skip_session_order
!= compared_policy
->session_order
||
5874 (highest_skip_order
!= 0 && compared_policy
->order
>= highest_skip_order
)) {
5875 // If we've moved on to the next session, or passed the skip window
5876 highest_skip_session_order
= 0;
5877 highest_skip_order
= 0;
5880 // If this policy is also a skip, in can increase the skip window
5881 if (compared_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
5882 if (compared_policy
->result_parameter
.skip_policy_order
> highest_skip_order
) {
5883 highest_skip_order
= compared_policy
->result_parameter
.skip_policy_order
;
5890 if (compared_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
5891 // This policy is a skip. Set the skip window accordingly
5893 highest_skip_session_order
= compared_policy
->session_order
;
5894 highest_skip_order
= compared_policy
->result_parameter
.skip_policy_order
;
5897 // The result of the compared policy must be able to block out this policy result
5898 if (!necp_kernel_ip_output_policy_results_overlap(compared_policy
, policy
)) {
5902 // If new policy matches All Interfaces, compared policy must also
5903 if ((policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) && !(compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
)) {
5907 // Default makes lower policies unecessary always
5908 if (compared_policy
->condition_mask
== 0) {
5912 // Compared must be more general than policy, and include only conditions within policy
5913 if ((policy
->condition_mask
& compared_policy
->condition_mask
) != compared_policy
->condition_mask
) {
5917 // Negative conditions must match for the overlapping conditions
5918 if ((policy
->condition_negated_mask
& compared_policy
->condition_mask
) != (compared_policy
->condition_negated_mask
& compared_policy
->condition_mask
)) {
5922 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
&&
5923 compared_policy
->cond_policy_id
!= policy
->cond_policy_id
) {
5927 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
&&
5928 compared_policy
->cond_bound_interface
!= policy
->cond_bound_interface
) {
5932 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
&&
5933 compared_policy
->cond_protocol
!= policy
->cond_protocol
) {
5937 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
5938 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
5939 if (!necp_is_range_in_range((struct sockaddr
*)&policy
->cond_local_start
, (struct sockaddr
*)&policy
->cond_local_end
, (struct sockaddr
*)&compared_policy
->cond_local_start
, (struct sockaddr
*)&compared_policy
->cond_local_end
)) {
5942 } else if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
5943 if (compared_policy
->cond_local_prefix
> policy
->cond_local_prefix
||
5944 !necp_is_addr_in_subnet((struct sockaddr
*)&policy
->cond_local_start
, (struct sockaddr
*)&compared_policy
->cond_local_start
, compared_policy
->cond_local_prefix
)) {
5950 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
5951 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
5952 if (!necp_is_range_in_range((struct sockaddr
*)&policy
->cond_remote_start
, (struct sockaddr
*)&policy
->cond_remote_end
, (struct sockaddr
*)&compared_policy
->cond_remote_start
, (struct sockaddr
*)&compared_policy
->cond_remote_end
)) {
5955 } else if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
5956 if (compared_policy
->cond_remote_prefix
> policy
->cond_remote_prefix
||
5957 !necp_is_addr_in_subnet((struct sockaddr
*)&policy
->cond_remote_start
, (struct sockaddr
*)&compared_policy
->cond_remote_start
, compared_policy
->cond_remote_prefix
)) {
5970 necp_kernel_ip_output_policies_reprocess(void)
5973 int bucket_allocation_counts
[NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
];
5974 int bucket_current_free_index
[NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
];
5975 struct necp_kernel_ip_output_policy
*kernel_policy
= NULL
;
5977 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5980 necp_kernel_ip_output_policies_condition_mask
= 0;
5981 necp_kernel_ip_output_policies_count
= 0;
5982 necp_kernel_ip_output_policies_non_id_count
= 0;
5984 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
5985 if (necp_kernel_ip_output_policies_map
[i
] != NULL
) {
5986 FREE(necp_kernel_ip_output_policies_map
[i
], M_NECP
);
5987 necp_kernel_ip_output_policies_map
[i
] = NULL
;
5991 bucket_allocation_counts
[i
] = 0;
5994 LIST_FOREACH(kernel_policy
, &necp_kernel_ip_output_policies
, chain
) {
5996 necp_kernel_ip_output_policies_condition_mask
|= kernel_policy
->condition_mask
;
5997 necp_kernel_ip_output_policies_count
++;
5999 /* Update bucket counts:
6000 * Non-id and SKIP policies will be added to all buckets
6002 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
) ||
6003 kernel_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
6004 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
6005 bucket_allocation_counts
[i
]++;
6008 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
)) {
6009 necp_kernel_ip_output_policies_non_id_count
++;
6011 bucket_allocation_counts
[NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(kernel_policy
->cond_policy_id
)]++;
6015 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
6016 if (bucket_allocation_counts
[i
] > 0) {
6017 // Allocate a NULL-terminated array of policy pointers for each bucket
6018 MALLOC(necp_kernel_ip_output_policies_map
[i
], struct necp_kernel_ip_output_policy
**, sizeof(struct necp_kernel_ip_output_policy
*) * (bucket_allocation_counts
[i
] + 1), M_NECP
, M_WAITOK
);
6019 if (necp_kernel_ip_output_policies_map
[i
] == NULL
) {
6023 // Initialize the first entry to NULL
6024 (necp_kernel_ip_output_policies_map
[i
])[0] = NULL
;
6026 bucket_current_free_index
[i
] = 0;
6029 LIST_FOREACH(kernel_policy
, &necp_kernel_ip_output_policies
, chain
) {
6030 // Insert pointers into map
6031 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
) ||
6032 kernel_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
6033 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
6034 if (!necp_kernel_ip_output_policy_is_unnecessary(kernel_policy
, necp_kernel_ip_output_policies_map
[i
], bucket_current_free_index
[i
])) {
6035 (necp_kernel_ip_output_policies_map
[i
])[(bucket_current_free_index
[i
])] = kernel_policy
;
6036 bucket_current_free_index
[i
]++;
6037 (necp_kernel_ip_output_policies_map
[i
])[(bucket_current_free_index
[i
])] = NULL
;
6041 i
= NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(kernel_policy
->cond_policy_id
);
6042 if (!necp_kernel_ip_output_policy_is_unnecessary(kernel_policy
, necp_kernel_ip_output_policies_map
[i
], bucket_current_free_index
[i
])) {
6043 (necp_kernel_ip_output_policies_map
[i
])[(bucket_current_free_index
[i
])] = kernel_policy
;
6044 bucket_current_free_index
[i
]++;
6045 (necp_kernel_ip_output_policies_map
[i
])[(bucket_current_free_index
[i
])] = NULL
;
6049 necp_kernel_ip_output_policies_dump_all();
6053 // Free memory, reset mask to 0
6054 necp_kernel_ip_output_policies_condition_mask
= 0;
6055 necp_kernel_ip_output_policies_count
= 0;
6056 necp_kernel_ip_output_policies_non_id_count
= 0;
6057 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
6058 if (necp_kernel_ip_output_policies_map
[i
] != NULL
) {
6059 FREE(necp_kernel_ip_output_policies_map
[i
], M_NECP
);
6060 necp_kernel_ip_output_policies_map
[i
] = NULL
;
6066 // Outbound Policy Matching
6067 // ---------------------
6073 static struct substring
6074 necp_trim_dots_and_stars(char *string
, size_t length
)
6076 struct substring sub
;
6077 sub
.string
= string
;
6078 sub
.length
= string
? length
: 0;
6080 while (sub
.length
&& (sub
.string
[0] == '.' || sub
.string
[0] == '*')) {
6085 while (sub
.length
&& (sub
.string
[sub
.length
- 1] == '.' || sub
.string
[sub
.length
- 1] == '*')) {
6093 necp_create_trimmed_domain(char *string
, size_t length
)
6095 char *trimmed_domain
= NULL
;
6096 struct substring sub
= necp_trim_dots_and_stars(string
, length
);
6098 MALLOC(trimmed_domain
, char *, sub
.length
+ 1, M_NECP
, M_WAITOK
);
6099 if (trimmed_domain
== NULL
) {
6103 memcpy(trimmed_domain
, sub
.string
, sub
.length
);
6104 trimmed_domain
[sub
.length
] = 0;
6106 return (trimmed_domain
);
6110 necp_count_dots(char *string
, size_t length
)
6115 for (i
= 0; i
< length
; i
++) {
6116 if (string
[i
] == '.') {
6125 necp_check_suffix(struct substring parent
, struct substring suffix
, bool require_dot_before_suffix
)
6127 if (parent
.length
<= suffix
.length
) {
6131 size_t length_difference
= (parent
.length
- suffix
.length
);
6133 if (require_dot_before_suffix
) {
6134 if (((char *)(parent
.string
+ length_difference
- 1))[0] != '.') {
6139 // strncasecmp does case-insensitive check for all UTF-8 strings (ignores non-ASCII characters)
6140 return (strncasecmp(parent
.string
+ length_difference
, suffix
.string
, suffix
.length
) == 0);
6144 necp_hostname_matches_domain(struct substring hostname_substring
, u_int8_t hostname_dot_count
, char *domain
, u_int8_t domain_dot_count
)
6146 if (hostname_substring
.string
== NULL
|| domain
== NULL
) {
6147 return (hostname_substring
.string
== domain
);
6150 struct substring domain_substring
;
6151 domain_substring
.string
= domain
;
6152 domain_substring
.length
= strlen(domain
);
6154 if (hostname_dot_count
== domain_dot_count
) {
6155 // strncasecmp does case-insensitive check for all UTF-8 strings (ignores non-ASCII characters)
6156 if (hostname_substring
.length
== domain_substring
.length
&&
6157 strncasecmp(hostname_substring
.string
, domain_substring
.string
, hostname_substring
.length
) == 0) {
6160 } else if (domain_dot_count
< hostname_dot_count
) {
6161 if (necp_check_suffix(hostname_substring
, domain_substring
, TRUE
)) {
6170 necp_copy_string(char *string
, size_t length
)
6172 char *copied_string
= NULL
;
6174 MALLOC(copied_string
, char *, length
+ 1, M_NECP
, M_WAITOK
);
6175 if (copied_string
== NULL
) {
6179 memcpy(copied_string
, string
, length
);
6180 copied_string
[length
] = 0;
6182 return (copied_string
);
6186 necp_get_primary_direct_interface_index(void)
6188 u_int32_t interface_index
= IFSCOPE_NONE
;
6190 ifnet_head_lock_shared();
6191 struct ifnet
*ordered_interface
= NULL
;
6192 TAILQ_FOREACH(ordered_interface
, &ifnet_ordered_head
, if_ordered_link
) {
6193 const u_int8_t functional_type
= if_functional_type(ordered_interface
, TRUE
);
6194 if (functional_type
!= IFRTYPE_FUNCTIONAL_UNKNOWN
&&
6195 functional_type
!= IFRTYPE_FUNCTIONAL_LOOPBACK
) {
6196 // All known, non-loopback functional types represent direct physical interfaces (Wi-Fi, Cellular, Wired)
6197 interface_index
= ordered_interface
->if_index
;
6203 return interface_index
;
6207 necp_get_parent_cred_result(proc_t proc
, struct necp_socket_info
*info
)
6209 task_t task
= proc_task(proc
? proc
: current_proc());
6210 coalition_t coal
= COALITION_NULL
;
6211 Boolean is_leader
= coalition_is_leader(task
, COALITION_TYPE_JETSAM
, &coal
);
6213 if (is_leader
== TRUE
) {
6214 // No parent, nothing to do
6219 task_t lead_task
= coalition_get_leader(coal
);
6220 if (lead_task
!= NULL
) {
6221 proc_t lead_proc
= get_bsdtask_info(lead_task
);
6222 if (lead_proc
!= NULL
) {
6223 kauth_cred_t lead_cred
= kauth_cred_proc_ref(lead_proc
);
6224 if (lead_cred
!= NULL
) {
6225 errno_t cred_result
= priv_check_cred(lead_cred
, PRIV_NET_PRIVILEGED_NECP_MATCH
, 0);
6226 kauth_cred_unref(&lead_cred
);
6227 info
->cred_result
= cred_result
;
6230 task_deallocate(lead_task
);
6235 #define NECP_KERNEL_ADDRESS_TYPE_CONDITIONS (NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX)
6237 necp_application_fillout_info_locked(uuid_t application_uuid
, uuid_t real_application_uuid
, char *account
, char *domain
, pid_t pid
, uid_t uid
, u_int16_t protocol
, u_int32_t bound_interface_index
, u_int32_t traffic_class
, union necp_sockaddr_union
*local_addr
, union necp_sockaddr_union
*remote_addr
, proc_t proc
, struct necp_socket_info
*info
)
6239 memset(info
, 0, sizeof(struct necp_socket_info
));
6243 info
->protocol
= protocol
;
6244 info
->bound_interface_index
= bound_interface_index
;
6245 info
->traffic_class
= traffic_class
;
6247 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
&& proc
!= NULL
) {
6248 info
->cred_result
= priv_check_cred(proc_ucred(proc
), PRIV_NET_PRIVILEGED_NECP_MATCH
, 0);
6249 if (info
->cred_result
!= 0) {
6250 // Process does not have entitlement, check the parent process
6251 necp_get_parent_cred_result(proc
, info
);
6255 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_APP_ID
&& !uuid_is_null(application_uuid
)) {
6256 struct necp_uuid_id_mapping
*existing_mapping
= necp_uuid_lookup_app_id_locked(application_uuid
);
6257 if (existing_mapping
) {
6258 info
->application_id
= existing_mapping
->id
;
6262 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
&& !uuid_is_null(real_application_uuid
)) {
6263 if (uuid_compare(application_uuid
, real_application_uuid
) == 0) {
6264 info
->real_application_id
= info
->application_id
;
6266 struct necp_uuid_id_mapping
*existing_mapping
= necp_uuid_lookup_app_id_locked(real_application_uuid
);
6267 if (existing_mapping
) {
6268 info
->real_application_id
= existing_mapping
->id
;
6273 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
&& account
!= NULL
) {
6274 struct necp_string_id_mapping
*existing_mapping
= necp_lookup_string_to_id_locked(&necp_account_id_list
, account
);
6275 if (existing_mapping
) {
6276 info
->account_id
= existing_mapping
->id
;
6280 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
6281 info
->domain
= domain
;
6284 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_ADDRESS_TYPE_CONDITIONS
) {
6285 if (local_addr
&& local_addr
->sa
.sa_len
> 0) {
6286 memcpy(&info
->local_addr
, local_addr
, local_addr
->sa
.sa_len
);
6288 if (remote_addr
&& remote_addr
->sa
.sa_len
> 0) {
6289 memcpy(&info
->remote_addr
, remote_addr
, remote_addr
->sa
.sa_len
);
6295 necp_send_application_interface_denied_event(pid_t pid
, uuid_t proc_uuid
, u_int32_t if_functional_type
)
6297 struct kev_netpolicy_ifdenied ev_ifdenied
;
6299 bzero(&ev_ifdenied
, sizeof(ev_ifdenied
));
6301 ev_ifdenied
.ev_data
.epid
= pid
;
6302 uuid_copy(ev_ifdenied
.ev_data
.euuid
, proc_uuid
);
6303 ev_ifdenied
.ev_if_functional_type
= if_functional_type
;
6305 netpolicy_post_msg(KEV_NETPOLICY_IFDENIED
, &ev_ifdenied
.ev_data
, sizeof(ev_ifdenied
));
6308 extern char *proc_name_address(void *p
);
6310 #define NECP_VERIFY_DELEGATION_ENTITLEMENT(_p, _d) \
6311 if (!has_checked_delegation_entitlement) { \
6312 has_delegation_entitlement = (priv_check_cred(proc_ucred(_p), PRIV_NET_PRIVILEGED_SOCKET_DELEGATE, 0) == 0); \
6313 has_checked_delegation_entitlement = TRUE; \
6315 if (!has_delegation_entitlement) { \
6316 NECPLOG(LOG_ERR, "%s(%d) does not hold the necessary entitlement to delegate network traffic for other processes by %s", \
6317 proc_name_address(_p), proc_pid(_p), _d); \
6322 necp_application_find_policy_match_internal(proc_t proc
,
6323 u_int8_t
*parameters
,
6324 u_int32_t parameters_size
,
6325 struct necp_aggregate_result
*returned_result
,
6327 u_int required_interface_index
,
6328 const union necp_sockaddr_union
*override_local_addr
,
6329 const union necp_sockaddr_union
*override_remote_addr
,
6330 struct rtentry
**returned_route
, bool ignore_address
)
6335 struct necp_kernel_socket_policy
*matched_policy
= NULL
;
6336 struct necp_socket_info info
;
6337 necp_kernel_policy_filter filter_control_unit
= 0;
6338 u_int32_t route_rule_id
= 0;
6339 necp_kernel_policy_result service_action
= 0;
6340 necp_kernel_policy_service service
= { 0, 0 };
6342 u_int16_t protocol
= 0;
6343 u_int32_t bound_interface_index
= required_interface_index
;
6344 u_int32_t traffic_class
= 0;
6345 u_int32_t client_flags
= 0;
6346 union necp_sockaddr_union local_addr
;
6347 union necp_sockaddr_union remote_addr
;
6348 bool no_remote_addr
= FALSE
;
6349 u_int8_t remote_family
= 0;
6350 bool no_local_addr
= FALSE
;
6352 if (override_local_addr
) {
6353 memcpy(&local_addr
, override_local_addr
, sizeof(local_addr
));
6355 memset(&local_addr
, 0, sizeof(local_addr
));
6357 if (override_remote_addr
) {
6358 memcpy(&remote_addr
, override_remote_addr
, sizeof(remote_addr
));
6360 memset(&remote_addr
, 0, sizeof(remote_addr
));
6363 // Initialize UID, PID, and UUIDs to the current process
6364 uid_t uid
= kauth_cred_getuid(proc_ucred(proc
));
6365 pid_t pid
= proc_pid(proc
);
6366 uuid_t application_uuid
;
6367 uuid_clear(application_uuid
);
6368 uuid_t real_application_uuid
;
6369 uuid_clear(real_application_uuid
);
6370 proc_getexecutableuuid(proc
, real_application_uuid
, sizeof(real_application_uuid
));
6371 uuid_copy(application_uuid
, real_application_uuid
);
6373 char *domain
= NULL
;
6374 char *account
= NULL
;
6376 #define NECP_MAX_REQUIRED_AGENTS 16
6377 u_int32_t num_required_agent_types
= 0;
6378 struct necp_client_parameter_netagent_type required_agent_types
[NECP_MAX_REQUIRED_AGENTS
];
6379 memset(&required_agent_types
, 0, sizeof(required_agent_types
));
6381 u_int32_t netagent_ids
[NECP_MAX_NETAGENTS
];
6382 u_int32_t netagent_use_flags
[NECP_MAX_NETAGENTS
];
6383 memset(&netagent_ids
, 0, sizeof(netagent_ids
));
6384 memset(&netagent_use_flags
, 0, sizeof(netagent_use_flags
));
6385 int netagent_cursor
;
6387 bool has_checked_delegation_entitlement
= FALSE
;
6388 bool has_delegation_entitlement
= FALSE
;
6390 if (returned_result
== NULL
) {
6394 memset(returned_result
, 0, sizeof(struct necp_aggregate_result
));
6396 lck_rw_lock_shared(&necp_kernel_policy_lock
);
6397 if (necp_kernel_application_policies_count
== 0) {
6398 if (necp_drop_all_order
> 0) {
6399 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
6400 lck_rw_done(&necp_kernel_policy_lock
);
6404 lck_rw_done(&necp_kernel_policy_lock
);
6406 while ((offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
)) <= parameters_size
) {
6407 u_int8_t type
= necp_buffer_get_tlv_type(parameters
, offset
);
6408 u_int32_t length
= necp_buffer_get_tlv_length(parameters
, offset
);
6410 if (length
> (parameters_size
- (offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
)))) {
6411 // If the length is larger than what can fit in the remaining parameters size, bail
6412 NECPLOG(LOG_ERR
, "Invalid TLV length (%u)", length
);
6417 u_int8_t
*value
= necp_buffer_get_tlv_value(parameters
, offset
, NULL
);
6418 if (value
!= NULL
) {
6420 case NECP_CLIENT_PARAMETER_APPLICATION
: {
6421 if (length
>= sizeof(uuid_t
)) {
6422 if (uuid_compare(application_uuid
, value
) == 0) {
6427 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc
, "euuid");
6429 uuid_copy(application_uuid
, value
);
6433 case NECP_CLIENT_PARAMETER_REAL_APPLICATION
: {
6434 if (length
>= sizeof(uuid_t
)) {
6435 if (uuid_compare(real_application_uuid
, value
) == 0) {
6440 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc
, "uuid");
6442 uuid_copy(real_application_uuid
, value
);
6446 case NECP_CLIENT_PARAMETER_PID
: {
6447 if (length
>= sizeof(pid_t
)) {
6448 if (memcmp(&pid
, value
, sizeof(pid_t
)) == 0) {
6453 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc
, "pid");
6455 memcpy(&pid
, value
, sizeof(pid_t
));
6459 case NECP_CLIENT_PARAMETER_UID
: {
6460 if (length
>= sizeof(uid_t
)) {
6461 if (memcmp(&uid
, value
, sizeof(uid_t
)) == 0) {
6466 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc
, "uid");
6468 memcpy(&uid
, value
, sizeof(uid_t
));
6472 case NECP_CLIENT_PARAMETER_DOMAIN
: {
6473 domain
= (char *)value
;
6474 domain
[length
- 1] = 0;
6477 case NECP_CLIENT_PARAMETER_ACCOUNT
: {
6478 account
= (char *)value
;
6479 account
[length
- 1] = 0;
6482 case NECP_CLIENT_PARAMETER_TRAFFIC_CLASS
: {
6483 if (length
>= sizeof(u_int32_t
)) {
6484 memcpy(&traffic_class
, value
, sizeof(u_int32_t
));
6488 case NECP_CLIENT_PARAMETER_IP_PROTOCOL
: {
6489 if (length
>= sizeof(u_int16_t
)) {
6490 memcpy(&protocol
, value
, sizeof(u_int16_t
));
6494 case NECP_CLIENT_PARAMETER_BOUND_INTERFACE
: {
6495 if (length
<= IFXNAMSIZ
&& length
> 0) {
6496 ifnet_t bound_interface
= NULL
;
6497 char interface_name
[IFXNAMSIZ
];
6498 memcpy(interface_name
, value
, length
);
6499 interface_name
[length
- 1] = 0; // Make sure the string is NULL terminated
6500 if (ifnet_find_by_name(interface_name
, &bound_interface
) == 0) {
6501 bound_interface_index
= bound_interface
->if_index
;
6502 ifnet_release(bound_interface
);
6507 case NECP_CLIENT_PARAMETER_LOCAL_ADDRESS
: {
6508 if (ignore_address
) {
6512 if (length
>= sizeof(struct necp_policy_condition_addr
)) {
6513 struct necp_policy_condition_addr
*address_struct
= (struct necp_policy_condition_addr
*)(void *)value
;
6514 if (necp_address_is_valid(&address_struct
->address
.sa
)) {
6515 memcpy(&local_addr
, &address_struct
->address
, sizeof(address_struct
->address
));
6520 case NECP_CLIENT_PARAMETER_REMOTE_ADDRESS
: {
6521 if (ignore_address
) {
6525 if (length
>= sizeof(struct necp_policy_condition_addr
)) {
6526 struct necp_policy_condition_addr
*address_struct
= (struct necp_policy_condition_addr
*)(void *)value
;
6527 if (necp_address_is_valid(&address_struct
->address
.sa
)) {
6528 memcpy(&remote_addr
, &address_struct
->address
, sizeof(address_struct
->address
));
6533 case NECP_CLIENT_PARAMETER_FLAGS
: {
6534 if (length
>= sizeof(client_flags
)) {
6535 memcpy(&client_flags
, value
, sizeof(client_flags
));
6539 case NECP_CLIENT_PARAMETER_REQUIRE_AGENT_TYPE
: {
6540 if (num_required_agent_types
>= NECP_MAX_REQUIRED_AGENTS
) {
6543 if (length
>= sizeof(struct necp_client_parameter_netagent_type
)) {
6544 memcpy(&required_agent_types
[num_required_agent_types
], value
, sizeof(struct necp_client_parameter_netagent_type
));
6545 num_required_agent_types
++;
6556 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
6560 lck_rw_lock_shared(&necp_kernel_policy_lock
);
6562 necp_application_fillout_info_locked(application_uuid
, real_application_uuid
, account
, domain
, pid
, uid
, protocol
, bound_interface_index
, traffic_class
, &local_addr
, &remote_addr
, proc
, &info
);
6563 matched_policy
= necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_app_layer_map
, &info
, &filter_control_unit
, &route_rule_id
, &service_action
, &service
, netagent_ids
, netagent_use_flags
, NECP_MAX_NETAGENTS
, required_agent_types
, num_required_agent_types
, proc
, NULL
);
6564 if (matched_policy
) {
6565 returned_result
->policy_id
= matched_policy
->id
;
6566 returned_result
->routing_result
= matched_policy
->result
;
6567 memcpy(&returned_result
->routing_result_parameter
, &matched_policy
->result_parameter
, sizeof(returned_result
->routing_result_parameter
));
6568 } else if (necp_drop_all_order
> 0) {
6569 // Mark socket as a drop if drop_all is set
6570 returned_result
->policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
6571 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
6573 returned_result
->policy_id
= 0;
6574 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_NONE
;
6576 returned_result
->filter_control_unit
= filter_control_unit
;
6577 returned_result
->service_action
= service_action
;
6579 // Handle trigger service
6580 if (service
.identifier
!= 0) {
6581 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(service
.identifier
);
6582 if (mapping
!= NULL
) {
6583 struct necp_service_registration
*service_registration
= NULL
;
6584 uuid_copy(returned_result
->service_uuid
, mapping
->uuid
);
6585 returned_result
->service_data
= service
.data
;
6586 if (service
.identifier
== NECP_NULL_SERVICE_ID
) {
6587 // NULL service is always 'registered'
6588 returned_result
->service_flags
|= NECP_SERVICE_FLAGS_REGISTERED
;
6590 LIST_FOREACH(service_registration
, &necp_registered_service_list
, kernel_chain
) {
6591 if (service
.identifier
== service_registration
->service_id
) {
6592 returned_result
->service_flags
|= NECP_SERVICE_FLAGS_REGISTERED
;
6601 for (netagent_cursor
= 0; netagent_cursor
< NECP_MAX_NETAGENTS
; netagent_cursor
++) {
6602 struct necp_uuid_id_mapping
*mapping
= NULL
;
6603 u_int32_t netagent_id
= netagent_ids
[netagent_cursor
];
6604 if (netagent_id
== 0) {
6607 mapping
= necp_uuid_lookup_uuid_with_service_id_locked(netagent_id
);
6608 if (mapping
!= NULL
) {
6609 uuid_copy(returned_result
->netagents
[netagent_cursor
], mapping
->uuid
);
6610 returned_result
->netagent_use_flags
[netagent_cursor
] = netagent_use_flags
[netagent_cursor
];
6614 // Do routing evaluation
6615 u_int output_bound_interface
= bound_interface_index
;
6616 if (returned_result
->routing_result
== NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED
) {
6617 output_bound_interface
= returned_result
->routing_result_parameter
.scoped_interface_index
;
6618 } else if (returned_result
->routing_result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
) {
6619 output_bound_interface
= returned_result
->routing_result_parameter
.tunnel_interface_index
;
6620 } else if (returned_result
->routing_result
== NECP_KERNEL_POLICY_RESULT_SCOPED_DIRECT
) {
6621 output_bound_interface
= necp_get_primary_direct_interface_index();
6622 if (output_bound_interface
== IFSCOPE_NONE
) {
6623 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
6625 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED
;
6626 returned_result
->routing_result_parameter
.scoped_interface_index
= output_bound_interface
;
6630 if (local_addr
.sa
.sa_len
== 0 ||
6631 (local_addr
.sa
.sa_family
== AF_INET
&& local_addr
.sin
.sin_addr
.s_addr
== 0) ||
6632 (local_addr
.sa
.sa_family
== AF_INET6
&& IN6_IS_ADDR_UNSPECIFIED(&local_addr
.sin6
.sin6_addr
))) {
6633 no_local_addr
= TRUE
;
6636 if (remote_addr
.sa
.sa_len
== 0 ||
6637 (remote_addr
.sa
.sa_family
== AF_INET
&& remote_addr
.sin
.sin_addr
.s_addr
== 0) ||
6638 (remote_addr
.sa
.sa_family
== AF_INET6
&& IN6_IS_ADDR_UNSPECIFIED(&remote_addr
.sin6
.sin6_addr
))) {
6639 no_remote_addr
= TRUE
;
6640 remote_family
= remote_addr
.sa
.sa_family
;
6643 returned_result
->routed_interface_index
= 0;
6644 struct rtentry
*rt
= NULL
;
6645 if (!no_local_addr
&& (client_flags
& NECP_CLIENT_PARAMETER_FLAG_LISTENER
) != 0) {
6646 // Treat the output bound interface as the routed interface for local address
6647 // validation later.
6648 returned_result
->routed_interface_index
= output_bound_interface
;
6650 if (no_remote_addr
) {
6651 memset(&remote_addr
, 0, sizeof(remote_addr
));
6652 if (remote_family
== AF_INET6
) {
6653 // Reset address to ::
6654 remote_addr
.sa
.sa_family
= AF_INET6
;
6655 remote_addr
.sa
.sa_len
= sizeof(struct sockaddr_in6
);
6657 // Reset address to 0.0.0.0
6658 remote_addr
.sa
.sa_family
= AF_INET
;
6659 remote_addr
.sa
.sa_len
= sizeof(struct sockaddr_in
);
6663 rt
= rtalloc1_scoped((struct sockaddr
*)&remote_addr
, 0, 0,
6664 output_bound_interface
);
6666 if (remote_addr
.sa
.sa_family
== AF_INET
&& rt
!= NULL
&&
6667 IS_INTF_CLAT46(rt
->rt_ifp
)) {
6670 returned_result
->routed_interface_index
= 0;
6673 if (no_remote_addr
&& remote_family
== 0 &&
6674 (rt
== NULL
|| rt
->rt_ifp
== NULL
)) {
6675 // Route lookup for default IPv4 failed, try IPv6
6677 // Cleanup old route if necessary
6683 // Reset address to ::
6684 memset(&remote_addr
, 0, sizeof(remote_addr
));
6685 remote_addr
.sa
.sa_family
= AF_INET6
;
6686 remote_addr
.sa
.sa_len
= sizeof(struct sockaddr_in6
);
6689 rt
= rtalloc1_scoped((struct sockaddr
*)&remote_addr
, 0, 0,
6690 output_bound_interface
);
6694 rt
->rt_ifp
!= NULL
) {
6695 returned_result
->routed_interface_index
= rt
->rt_ifp
->if_index
;
6697 * For local addresses, we allow the interface scope to be
6698 * either the loopback interface or the interface hosting the
6701 if (bound_interface_index
!= IFSCOPE_NONE
&&
6702 rt
->rt_ifa
!= NULL
&& rt
->rt_ifa
->ifa_ifp
&&
6703 (output_bound_interface
== lo_ifp
->if_index
||
6704 rt
->rt_ifp
->if_index
== lo_ifp
->if_index
||
6705 rt
->rt_ifa
->ifa_ifp
->if_index
== bound_interface_index
)) {
6706 struct sockaddr_storage dst
;
6707 unsigned int ifscope
= bound_interface_index
;
6710 * Transform dst into the internal routing table form
6712 (void) sa_copy((struct sockaddr
*)&remote_addr
,
6715 if ((rt
->rt_ifp
->if_index
== lo_ifp
->if_index
) ||
6716 rt_ifa_is_dst((struct sockaddr
*)&dst
, rt
->rt_ifa
))
6717 returned_result
->routed_interface_index
=
6718 bound_interface_index
;
6723 if (returned_result
->routed_interface_index
!= 0 &&
6724 returned_result
->routed_interface_index
!= lo_ifp
->if_index
&& // Loopback can accept any local address
6727 // Transform local_addr into the ifaddr form
6728 // IPv6 Scope IDs are always embedded in the ifaddr list
6729 struct sockaddr_storage local_address_sanitized
;
6730 u_int ifscope
= IFSCOPE_NONE
;
6731 (void)sa_copy(&local_addr
.sa
, &local_address_sanitized
, &ifscope
);
6732 SIN(&local_address_sanitized
)->sin_port
= 0;
6733 if (local_address_sanitized
.ss_family
== AF_INET6
) {
6734 SIN6(&local_address_sanitized
)->sin6_scope_id
= 0;
6737 // Validate local address on routed interface
6738 struct ifaddr
*ifa
= ifa_ifwithaddr_scoped((struct sockaddr
*)&local_address_sanitized
, returned_result
->routed_interface_index
);
6740 // Interface address not found, reject route
6741 returned_result
->routed_interface_index
= 0;
6747 ifaddr_release(ifa
);
6752 if (flags
!= NULL
) {
6753 if ((client_flags
& NECP_CLIENT_PARAMETER_FLAG_LISTENER
) == 0) {
6754 // Check for local/direct
6755 bool is_local
= FALSE
;
6756 if (rt
!= NULL
&& (rt
->rt_flags
& RTF_LOCAL
)) {
6758 } else if (returned_result
->routed_interface_index
!= 0 &&
6760 // Clean up the address before comparison with interface addresses
6762 // Transform remote_addr into the ifaddr form
6763 // IPv6 Scope IDs are always embedded in the ifaddr list
6764 struct sockaddr_storage remote_address_sanitized
;
6765 u_int ifscope
= IFSCOPE_NONE
;
6766 (void)sa_copy(&remote_addr
.sa
, &remote_address_sanitized
, &ifscope
);
6767 SIN(&remote_address_sanitized
)->sin_port
= 0;
6768 if (remote_address_sanitized
.ss_family
== AF_INET6
) {
6769 SIN6(&remote_address_sanitized
)->sin6_scope_id
= 0;
6772 // Check if remote address is an interface address
6773 struct ifaddr
*ifa
= ifa_ifwithaddr((struct sockaddr
*)&remote_address_sanitized
);
6774 if (ifa
!= NULL
&& ifa
->ifa_ifp
!= NULL
) {
6775 u_int if_index_for_remote_addr
= ifa
->ifa_ifp
->if_index
;
6776 if (if_index_for_remote_addr
== returned_result
->routed_interface_index
||
6777 if_index_for_remote_addr
== lo_ifp
->if_index
) {
6782 ifaddr_release(ifa
);
6788 *flags
|= (NECP_CLIENT_RESULT_FLAG_IS_LOCAL
| NECP_CLIENT_RESULT_FLAG_IS_DIRECT
);
6791 !(rt
->rt_flags
& RTF_GATEWAY
) &&
6792 (rt
->rt_ifa
&& rt
->rt_ifa
->ifa_ifp
&& !(rt
->rt_ifa
->ifa_ifp
->if_flags
& IFF_POINTOPOINT
))) {
6793 // Route is directly accessible
6794 *flags
|= NECP_CLIENT_RESULT_FLAG_IS_DIRECT
;
6799 rt
->rt_ifp
!= NULL
) {
6800 // Check probe status
6801 if (rt
->rt_ifp
->if_eflags
& IFEF_PROBE_CONNECTIVITY
) {
6802 *flags
|= NECP_CLIENT_RESULT_FLAG_PROBE_CONNECTIVITY
;
6805 if (rt
->rt_ifp
->if_type
== IFT_CELLULAR
) {
6806 struct if_cellular_status_v1
*ifsr
;
6808 ifnet_lock_shared(rt
->rt_ifp
);
6809 lck_rw_lock_exclusive(&rt
->rt_ifp
->if_link_status_lock
);
6811 if (rt
->rt_ifp
->if_link_status
!= NULL
) {
6812 ifsr
= &rt
->rt_ifp
->if_link_status
->ifsr_u
.ifsr_cell
.if_cell_u
.if_status_v1
;
6814 if (ifsr
->valid_bitmask
& IF_CELL_UL_MSS_RECOMMENDED_VALID
) {
6815 if (ifsr
->mss_recommended
== IF_CELL_UL_MSS_RECOMMENDED_NONE
) {
6816 returned_result
->mss_recommended
= NECP_CLIENT_RESULT_RECOMMENDED_MSS_NONE
;
6817 } else if (ifsr
->mss_recommended
== IF_CELL_UL_MSS_RECOMMENDED_MEDIUM
) {
6818 returned_result
->mss_recommended
= NECP_CLIENT_RESULT_RECOMMENDED_MSS_MEDIUM
;
6819 } else if (ifsr
->mss_recommended
== IF_CELL_UL_MSS_RECOMMENDED_LOW
) {
6820 returned_result
->mss_recommended
= NECP_CLIENT_RESULT_RECOMMENDED_MSS_LOW
;
6824 lck_rw_done(&rt
->rt_ifp
->if_link_status_lock
);
6825 ifnet_lock_done(rt
->rt_ifp
);
6828 // Check link quality
6829 if ((client_flags
& NECP_CLIENT_PARAMETER_FLAG_DISCRETIONARY
) &&
6830 (rt
->rt_ifp
->if_interface_state
.valid_bitmask
& IF_INTERFACE_STATE_LQM_STATE_VALID
) &&
6831 rt
->rt_ifp
->if_interface_state
.lqm_state
== IFNET_LQM_THRESH_ABORT
) {
6832 *flags
|= NECP_CLIENT_RESULT_FLAG_LINK_QUALITY_ABORT
;
6835 // Check QoS marking (fastlane)
6836 if (necp_update_qos_marking(rt
->rt_ifp
, route_rule_id
)) {
6837 *flags
|= NECP_CLIENT_RESULT_FLAG_ALLOW_QOS_MARKING
;
6840 if (IFNET_IS_LOW_POWER(rt
->rt_ifp
)) {
6841 *flags
|= NECP_CLIENT_RESULT_FLAG_INTERFACE_LOW_POWER
;
6846 if (returned_result
->routed_interface_index
!= 0) {
6847 union necp_sockaddr_union default_address
;
6848 struct rtentry
*v4Route
= NULL
;
6849 struct rtentry
*v6Route
= NULL
;
6851 memset(&default_address
, 0, sizeof(default_address
));
6853 // Reset address to 0.0.0.0
6854 default_address
.sa
.sa_family
= AF_INET
;
6855 default_address
.sa
.sa_len
= sizeof(struct sockaddr_in
);
6856 v4Route
= rtalloc1_scoped((struct sockaddr
*)&default_address
, 0, 0,
6857 returned_result
->routed_interface_index
);
6859 // Reset address to ::
6860 default_address
.sa
.sa_family
= AF_INET6
;
6861 default_address
.sa
.sa_len
= sizeof(struct sockaddr_in6
);
6862 v6Route
= rtalloc1_scoped((struct sockaddr
*)&default_address
, 0, 0,
6863 returned_result
->routed_interface_index
);
6865 if (v4Route
!= NULL
) {
6866 if (v4Route
->rt_ifp
!= NULL
&& !IS_INTF_CLAT46(v4Route
->rt_ifp
)) {
6867 *flags
|= NECP_CLIENT_RESULT_FLAG_HAS_IPV4
;
6873 if (v6Route
!= NULL
) {
6874 if (v6Route
->rt_ifp
!= NULL
) {
6875 *flags
|= NECP_CLIENT_RESULT_FLAG_HAS_IPV6
;
6877 if (ifnet_get_nat64prefix(v6Route
->rt_ifp
, NULL
) == 0) {
6878 *flags
|= NECP_CLIENT_RESULT_FLAG_HAS_NAT64
;
6887 u_int32_t interface_type_denied
= IFRTYPE_FUNCTIONAL_UNKNOWN
;
6888 bool route_is_allowed
= necp_route_is_allowed(rt
, NULL
, route_rule_id
, &interface_type_denied
);
6889 if (!route_is_allowed
) {
6890 // If the route is blocked, treat the lookup as a drop
6891 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
6892 memset(&returned_result
->routing_result_parameter
, 0, sizeof(returned_result
->routing_result_parameter
));
6894 if (interface_type_denied
!= IFRTYPE_FUNCTIONAL_UNKNOWN
) {
6895 necp_send_application_interface_denied_event(pid
, application_uuid
, interface_type_denied
);
6900 if (returned_route
!= NULL
) {
6901 *returned_route
= rt
;
6908 lck_rw_done(&necp_kernel_policy_lock
);
6914 necp_socket_check_policy(struct necp_kernel_socket_policy
*kernel_policy
, necp_app_id app_id
, necp_app_id real_app_id
, errno_t cred_result
, u_int32_t account_id
, struct substring domain
, u_int8_t domain_dot_count
, pid_t pid
, uid_t uid
, u_int32_t bound_interface_index
, u_int32_t traffic_class
, u_int16_t protocol
, union necp_sockaddr_union
*local
, union necp_sockaddr_union
*remote
, struct necp_client_parameter_netagent_type
*required_agent_types
, u_int32_t num_required_agent_types
, proc_t proc
)
6916 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
)) {
6917 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
6918 u_int32_t cond_bound_interface_index
= kernel_policy
->cond_bound_interface
? kernel_policy
->cond_bound_interface
->if_index
: 0;
6919 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
6920 if (bound_interface_index
== cond_bound_interface_index
) {
6921 // No match, matches forbidden interface
6925 if (bound_interface_index
!= cond_bound_interface_index
) {
6926 // No match, does not match required interface
6931 if (bound_interface_index
!= 0) {
6932 // No match, requires a non-bound packet
6938 if (kernel_policy
->condition_mask
== 0) {
6942 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
6943 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
6944 if (app_id
== kernel_policy
->cond_app_id
) {
6945 // No match, matches forbidden application
6949 if (app_id
!= kernel_policy
->cond_app_id
) {
6950 // No match, does not match required application
6956 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
6957 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
6958 if (real_app_id
== kernel_policy
->cond_real_app_id
) {
6959 // No match, matches forbidden application
6963 if (real_app_id
!= kernel_policy
->cond_real_app_id
) {
6964 // No match, does not match required application
6970 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
) {
6971 if (cred_result
!= 0) {
6972 // Process is missing entitlement
6977 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
) {
6978 if (kernel_policy
->cond_custom_entitlement_matched
== necp_boolean_state_false
) {
6979 // Process is missing entitlement based on previous check
6981 } else if (kernel_policy
->cond_custom_entitlement_matched
== necp_boolean_state_unknown
) {
6982 if (kernel_policy
->cond_custom_entitlement
!= NULL
) {
6984 // No process found, cannot check entitlement
6987 task_t task
= proc_task(proc
);
6989 !IOTaskHasEntitlement(task
, kernel_policy
->cond_custom_entitlement
)) {
6990 // Process is missing custom entitlement
6991 kernel_policy
->cond_custom_entitlement_matched
= necp_boolean_state_false
;
6994 kernel_policy
->cond_custom_entitlement_matched
= necp_boolean_state_true
;
7000 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
7001 bool domain_matches
= necp_hostname_matches_domain(domain
, domain_dot_count
, kernel_policy
->cond_domain
, kernel_policy
->cond_domain_dot_count
);
7002 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
7003 if (domain_matches
) {
7004 // No match, matches forbidden domain
7008 if (!domain_matches
) {
7009 // No match, does not match required domain
7015 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
) {
7016 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
) {
7017 if (account_id
== kernel_policy
->cond_account_id
) {
7018 // No match, matches forbidden account
7022 if (account_id
!= kernel_policy
->cond_account_id
) {
7023 // No match, does not match required account
7029 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PID
) {
7030 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_PID
) {
7031 if (pid
== kernel_policy
->cond_pid
) {
7032 // No match, matches forbidden pid
7036 if (pid
!= kernel_policy
->cond_pid
) {
7037 // No match, does not match required pid
7043 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_UID
) {
7044 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_UID
) {
7045 if (uid
== kernel_policy
->cond_uid
) {
7046 // No match, matches forbidden uid
7050 if (uid
!= kernel_policy
->cond_uid
) {
7051 // No match, does not match required uid
7057 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
7058 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
7059 if (traffic_class
>= kernel_policy
->cond_traffic_class
.start_tc
&&
7060 traffic_class
<= kernel_policy
->cond_traffic_class
.end_tc
) {
7061 // No match, matches forbidden traffic class
7065 if (traffic_class
< kernel_policy
->cond_traffic_class
.start_tc
||
7066 traffic_class
> kernel_policy
->cond_traffic_class
.end_tc
) {
7067 // No match, does not match required traffic class
7073 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
7074 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
7075 if (protocol
== kernel_policy
->cond_protocol
) {
7076 // No match, matches forbidden protocol
7080 if (protocol
!= kernel_policy
->cond_protocol
) {
7081 // No match, does not match required protocol
7087 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_AGENT_TYPE
) {
7088 bool matches_agent_type
= FALSE
;
7089 for (u_int32_t i
= 0; i
< num_required_agent_types
; i
++) {
7090 struct necp_client_parameter_netagent_type
*required_agent_type
= &required_agent_types
[i
];
7091 if ((strlen(kernel_policy
->cond_agent_type
.agent_domain
) == 0 ||
7092 strncmp(required_agent_type
->netagent_domain
, kernel_policy
->cond_agent_type
.agent_domain
, NETAGENT_DOMAINSIZE
) == 0) &&
7093 (strlen(kernel_policy
->cond_agent_type
.agent_type
) == 0 ||
7094 strncmp(required_agent_type
->netagent_type
, kernel_policy
->cond_agent_type
.agent_type
, NETAGENT_TYPESIZE
) == 0)) {
7095 // Found a required agent that matches
7096 matches_agent_type
= TRUE
;
7100 if (!matches_agent_type
) {
7105 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
7106 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
7107 bool inRange
= necp_is_addr_in_range((struct sockaddr
*)local
, (struct sockaddr
*)&kernel_policy
->cond_local_start
, (struct sockaddr
*)&kernel_policy
->cond_local_end
);
7108 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
7117 } else if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
7118 bool inSubnet
= necp_is_addr_in_subnet((struct sockaddr
*)local
, (struct sockaddr
*)&kernel_policy
->cond_local_start
, kernel_policy
->cond_local_prefix
);
7119 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
7131 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
7132 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
7133 bool inRange
= necp_is_addr_in_range((struct sockaddr
*)remote
, (struct sockaddr
*)&kernel_policy
->cond_remote_start
, (struct sockaddr
*)&kernel_policy
->cond_remote_end
);
7134 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
7143 } else if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
7144 bool inSubnet
= necp_is_addr_in_subnet((struct sockaddr
*)remote
, (struct sockaddr
*)&kernel_policy
->cond_remote_start
, kernel_policy
->cond_remote_prefix
);
7145 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
7160 static inline u_int32_t
7161 necp_socket_calc_flowhash_locked(struct necp_socket_info
*info
)
7163 return (net_flowhash(info
, sizeof(*info
), necp_kernel_socket_policies_gencount
));
7167 necp_socket_fillout_info_locked(struct inpcb
*inp
, struct sockaddr
*override_local_addr
, struct sockaddr
*override_remote_addr
, u_int32_t override_bound_interface
, struct necp_socket_info
*info
)
7169 struct socket
*so
= NULL
;
7171 memset(info
, 0, sizeof(struct necp_socket_info
));
7173 so
= inp
->inp_socket
;
7175 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_PID
) {
7176 info
->pid
= ((so
->so_flags
& SOF_DELEGATED
) ? so
->e_pid
: so
->last_pid
);
7179 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_UID
) {
7180 info
->uid
= kauth_cred_getuid(so
->so_cred
);
7183 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
7184 info
->traffic_class
= so
->so_traffic_class
;
7187 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
7188 if (inp
->inp_ip_p
) {
7189 info
->protocol
= inp
->inp_ip_p
;
7191 info
->protocol
= SOCK_PROTO(so
);
7195 if (inp
->inp_flags2
& INP2_WANT_APP_POLICY
&& necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
7196 struct necp_uuid_id_mapping
*existing_mapping
= necp_uuid_lookup_app_id_locked(((so
->so_flags
& SOF_DELEGATED
) ? so
->e_uuid
: so
->last_uuid
));
7197 if (existing_mapping
) {
7198 info
->application_id
= existing_mapping
->id
;
7201 if (!(so
->so_flags
& SOF_DELEGATED
)) {
7202 info
->real_application_id
= info
->application_id
;
7203 } else if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
7204 struct necp_uuid_id_mapping
*real_existing_mapping
= necp_uuid_lookup_app_id_locked(so
->last_uuid
);
7205 if (real_existing_mapping
) {
7206 info
->real_application_id
= real_existing_mapping
->id
;
7210 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
) {
7211 info
->cred_result
= priv_check_cred(so
->so_cred
, PRIV_NET_PRIVILEGED_NECP_MATCH
, 0);
7212 if (info
->cred_result
!= 0) {
7213 // Process does not have entitlement, check the parent process
7214 necp_get_parent_cred_result(NULL
, info
);
7219 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
&& inp
->inp_necp_attributes
.inp_account
!= NULL
) {
7220 struct necp_string_id_mapping
*existing_mapping
= necp_lookup_string_to_id_locked(&necp_account_id_list
, inp
->inp_necp_attributes
.inp_account
);
7221 if (existing_mapping
) {
7222 info
->account_id
= existing_mapping
->id
;
7226 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
7227 info
->domain
= inp
->inp_necp_attributes
.inp_domain
;
7230 if (override_bound_interface
) {
7231 info
->bound_interface_index
= override_bound_interface
;
7233 if ((inp
->inp_flags
& INP_BOUND_IF
) && inp
->inp_boundifp
) {
7234 info
->bound_interface_index
= inp
->inp_boundifp
->if_index
;
7238 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_ADDRESS_TYPE_CONDITIONS
) {
7239 if (inp
->inp_vflag
& INP_IPV4
) {
7240 if (override_local_addr
) {
7241 if (override_local_addr
->sa_len
<= sizeof(struct sockaddr_in
)) {
7242 memcpy(&info
->local_addr
, override_local_addr
, override_local_addr
->sa_len
);
7245 ((struct sockaddr_in
*)&info
->local_addr
)->sin_family
= AF_INET
;
7246 ((struct sockaddr_in
*)&info
->local_addr
)->sin_len
= sizeof(struct sockaddr_in
);
7247 ((struct sockaddr_in
*)&info
->local_addr
)->sin_port
= inp
->inp_lport
;
7248 memcpy(&((struct sockaddr_in
*)&info
->local_addr
)->sin_addr
, &inp
->inp_laddr
, sizeof(struct in_addr
));
7251 if (override_remote_addr
) {
7252 if (override_remote_addr
->sa_len
<= sizeof(struct sockaddr_in
)) {
7253 memcpy(&info
->remote_addr
, override_remote_addr
, override_remote_addr
->sa_len
);
7256 ((struct sockaddr_in
*)&info
->remote_addr
)->sin_family
= AF_INET
;
7257 ((struct sockaddr_in
*)&info
->remote_addr
)->sin_len
= sizeof(struct sockaddr_in
);
7258 ((struct sockaddr_in
*)&info
->remote_addr
)->sin_port
= inp
->inp_fport
;
7259 memcpy(&((struct sockaddr_in
*)&info
->remote_addr
)->sin_addr
, &inp
->inp_faddr
, sizeof(struct in_addr
));
7261 } else if (inp
->inp_vflag
& INP_IPV6
) {
7262 if (override_local_addr
) {
7263 if (override_local_addr
->sa_len
<= sizeof(struct sockaddr_in6
)) {
7264 memcpy(&info
->local_addr
, override_local_addr
, override_local_addr
->sa_len
);
7267 ((struct sockaddr_in6
*)&info
->local_addr
)->sin6_family
= AF_INET6
;
7268 ((struct sockaddr_in6
*)&info
->local_addr
)->sin6_len
= sizeof(struct sockaddr_in6
);
7269 ((struct sockaddr_in6
*)&info
->local_addr
)->sin6_port
= inp
->inp_lport
;
7270 memcpy(&((struct sockaddr_in6
*)&info
->local_addr
)->sin6_addr
, &inp
->in6p_laddr
, sizeof(struct in6_addr
));
7273 if (override_remote_addr
) {
7274 if (override_remote_addr
->sa_len
<= sizeof(struct sockaddr_in6
)) {
7275 memcpy(&info
->remote_addr
, override_remote_addr
, override_remote_addr
->sa_len
);
7278 ((struct sockaddr_in6
*)&info
->remote_addr
)->sin6_family
= AF_INET6
;
7279 ((struct sockaddr_in6
*)&info
->remote_addr
)->sin6_len
= sizeof(struct sockaddr_in6
);
7280 ((struct sockaddr_in6
*)&info
->remote_addr
)->sin6_port
= inp
->inp_fport
;
7281 memcpy(&((struct sockaddr_in6
*)&info
->remote_addr
)->sin6_addr
, &inp
->in6p_faddr
, sizeof(struct in6_addr
));
7287 static inline struct necp_kernel_socket_policy
*
7288 necp_socket_find_policy_match_with_info_locked(struct necp_kernel_socket_policy
**policy_search_array
, struct necp_socket_info
*info
,
7289 necp_kernel_policy_filter
*return_filter
, u_int32_t
*return_route_rule_id
,
7290 necp_kernel_policy_result
*return_service_action
, necp_kernel_policy_service
*return_service
,
7291 u_int32_t
*return_netagent_array
, u_int32_t
*return_netagent_use_flags_array
, size_t netagent_array_count
,
7292 struct necp_client_parameter_netagent_type
*required_agent_types
,
7293 u_int32_t num_required_agent_types
, proc_t proc
, necp_kernel_policy_id
*skip_policy_id
)
7295 struct necp_kernel_socket_policy
*matched_policy
= NULL
;
7296 u_int32_t skip_order
= 0;
7297 u_int32_t skip_session_order
= 0;
7298 u_int32_t route_rule_id_array
[MAX_AGGREGATE_ROUTE_RULES
];
7299 size_t route_rule_id_count
= 0;
7301 size_t netagent_cursor
= 0;
7303 // Pre-process domain for quick matching
7304 struct substring domain_substring
= necp_trim_dots_and_stars(info
->domain
, info
->domain
? strlen(info
->domain
) : 0);
7305 u_int8_t domain_dot_count
= necp_count_dots(domain_substring
.string
, domain_substring
.length
);
7307 if (return_filter
) {
7311 if (return_route_rule_id
) {
7312 *return_route_rule_id
= 0;
7315 if (return_service_action
) {
7316 *return_service_action
= 0;
7319 if (return_service
) {
7320 return_service
->identifier
= 0;
7321 return_service
->data
= 0;
7324 if (policy_search_array
!= NULL
) {
7325 for (i
= 0; policy_search_array
[i
] != NULL
; i
++) {
7326 if (necp_drop_all_order
!= 0 && policy_search_array
[i
]->session_order
>= necp_drop_all_order
) {
7327 // We've hit a drop all rule
7330 if (skip_session_order
&& policy_search_array
[i
]->session_order
>= skip_session_order
) {
7333 skip_session_order
= 0;
7336 if (policy_search_array
[i
]->order
< skip_order
) {
7342 skip_session_order
= 0;
7344 } else if (skip_session_order
) {
7348 if (necp_socket_check_policy(policy_search_array
[i
], info
->application_id
, info
->real_application_id
, info
->cred_result
, info
->account_id
, domain_substring
, domain_dot_count
, info
->pid
, info
->uid
, info
->bound_interface_index
, info
->traffic_class
, info
->protocol
, &info
->local_addr
, &info
->remote_addr
, required_agent_types
, num_required_agent_types
, proc
)) {
7349 if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER
) {
7350 if (return_filter
&& *return_filter
== 0) {
7351 *return_filter
= policy_search_array
[i
]->result_parameter
.filter_control_unit
;
7352 if (necp_debug
> 1) {
7353 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Filter %d", info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
, policy_search_array
[i
]->result_parameter
.filter_control_unit
);
7357 } else if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_ROUTE_RULES
) {
7358 if (return_route_rule_id
&& route_rule_id_count
< MAX_AGGREGATE_ROUTE_RULES
) {
7359 route_rule_id_array
[route_rule_id_count
++] = policy_search_array
[i
]->result_parameter
.route_rule_id
;
7360 if (necp_debug
> 1) {
7361 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Route Rule %d", info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
, policy_search_array
[i
]->result_parameter
.route_rule_id
);
7365 } else if (necp_kernel_socket_result_is_trigger_service_type(policy_search_array
[i
])) {
7366 if (return_service_action
&& *return_service_action
== 0) {
7367 *return_service_action
= policy_search_array
[i
]->result
;
7368 if (necp_debug
> 1) {
7369 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Service Action %d", info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
, policy_search_array
[i
]->result
);
7372 if (return_service
&& return_service
->identifier
== 0) {
7373 return_service
->identifier
= policy_search_array
[i
]->result_parameter
.service
.identifier
;
7374 return_service
->data
= policy_search_array
[i
]->result_parameter
.service
.data
;
7375 if (necp_debug
> 1) {
7376 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Service ID %d Data %d", info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
, policy_search_array
[i
]->result_parameter
.service
.identifier
, policy_search_array
[i
]->result_parameter
.service
.data
);
7380 } else if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_USE_NETAGENT
||
7381 policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_NETAGENT_SCOPED
) {
7382 if (return_netagent_array
!= NULL
&&
7383 netagent_cursor
< netagent_array_count
) {
7384 return_netagent_array
[netagent_cursor
] = policy_search_array
[i
]->result_parameter
.netagent_id
;
7385 if (return_netagent_use_flags_array
!= NULL
&&
7386 policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_NETAGENT_SCOPED
) {
7387 return_netagent_use_flags_array
[netagent_cursor
] |= NECP_AGENT_USE_FLAG_SCOPE
;
7390 if (necp_debug
> 1) {
7391 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) %s Netagent %d",
7392 info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
,
7393 policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_USE_NETAGENT
? "Use" : "Scope",
7394 policy_search_array
[i
]->result_parameter
.netagent_id
);
7400 // Matched policy is a skip. Do skip and continue.
7401 if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
7402 skip_order
= policy_search_array
[i
]->result_parameter
.skip_policy_order
;
7403 skip_session_order
= policy_search_array
[i
]->session_order
+ 1;
7404 if (skip_policy_id
) {
7405 *skip_policy_id
= policy_search_array
[i
]->id
;
7410 // Passed all tests, found a match
7411 matched_policy
= policy_search_array
[i
];
7417 if (route_rule_id_count
== 1) {
7418 *return_route_rule_id
= route_rule_id_array
[0];
7419 } else if (route_rule_id_count
> 1) {
7420 *return_route_rule_id
= necp_create_aggregate_route_rule(route_rule_id_array
);
7422 return (matched_policy
);
7426 necp_socket_uses_interface(struct inpcb
*inp
, u_int32_t interface_index
)
7428 bool found_match
= FALSE
;
7430 ifaddr_t
*addresses
= NULL
;
7431 union necp_sockaddr_union address_storage
;
7433 int family
= AF_INET
;
7434 ifnet_t interface
= ifindex2ifnet
[interface_index
];
7436 if (inp
== NULL
|| interface
== NULL
) {
7440 if (inp
->inp_vflag
& INP_IPV4
) {
7442 } else if (inp
->inp_vflag
& INP_IPV6
) {
7446 result
= ifnet_get_address_list_family(interface
, &addresses
, family
);
7448 NECPLOG(LOG_ERR
, "Failed to get address list for %s%d", ifnet_name(interface
), ifnet_unit(interface
));
7452 for (i
= 0; addresses
[i
] != NULL
; i
++) {
7453 if (ifaddr_address(addresses
[i
], &address_storage
.sa
, sizeof(address_storage
)) == 0) {
7454 if (family
== AF_INET
) {
7455 if (memcmp(&address_storage
.sin
.sin_addr
, &inp
->inp_laddr
, sizeof(inp
->inp_laddr
)) == 0) {
7459 } else if (family
== AF_INET6
) {
7460 if (memcmp(&address_storage
.sin6
.sin6_addr
, &inp
->in6p_laddr
, sizeof(inp
->in6p_laddr
)) == 0) {
7469 ifnet_free_address_list(addresses
);
7471 return (found_match
);
7475 necp_socket_is_connected(struct inpcb
*inp
)
7477 return (inp
->inp_socket
->so_state
& (SS_ISCONNECTING
| SS_ISCONNECTED
| SS_ISDISCONNECTING
));
7481 necp_socket_bypass(struct sockaddr
*override_local_addr
, struct sockaddr
*override_remote_addr
, struct inpcb
*inp
)
7484 if (necp_pass_loopback
> 0 && necp_is_loopback(override_local_addr
, override_remote_addr
, inp
, NULL
)) {
7486 } else if (necp_is_intcoproc(inp
, NULL
)) {
7493 necp_kernel_policy_id
7494 necp_socket_find_policy_match(struct inpcb
*inp
, struct sockaddr
*override_local_addr
, struct sockaddr
*override_remote_addr
, u_int32_t override_bound_interface
)
7496 struct socket
*so
= NULL
;
7497 necp_kernel_policy_filter filter_control_unit
= 0;
7498 u_int32_t route_rule_id
= 0;
7499 struct necp_kernel_socket_policy
*matched_policy
= NULL
;
7500 necp_kernel_policy_id matched_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
7501 necp_kernel_policy_result service_action
= 0;
7502 necp_kernel_policy_service service
= { 0, 0 };
7504 u_int32_t netagent_ids
[NECP_MAX_NETAGENTS
];
7505 memset(&netagent_ids
, 0, sizeof(netagent_ids
));
7506 int netagent_cursor
;
7508 struct necp_socket_info info
;
7511 return (NECP_KERNEL_POLICY_ID_NONE
);
7514 // Ignore invalid addresses
7515 if (override_local_addr
!= NULL
&&
7516 !necp_address_is_valid(override_local_addr
)) {
7517 override_local_addr
= NULL
;
7519 if (override_remote_addr
!= NULL
&&
7520 !necp_address_is_valid(override_remote_addr
)) {
7521 override_remote_addr
= NULL
;
7524 so
= inp
->inp_socket
;
7526 // Don't lock. Possible race condition, but we don't want the performance hit.
7527 if (necp_kernel_socket_policies_count
== 0 ||
7528 (!(inp
->inp_flags2
& INP2_WANT_APP_POLICY
) && necp_kernel_socket_policies_non_app_count
== 0)) {
7529 if (necp_drop_all_order
> 0) {
7530 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7531 inp
->inp_policyresult
.skip_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7532 inp
->inp_policyresult
.policy_gencount
= 0;
7533 inp
->inp_policyresult
.app_id
= 0;
7534 inp
->inp_policyresult
.flowhash
= 0;
7535 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
7536 inp
->inp_policyresult
.results
.route_rule_id
= 0;
7537 if (necp_socket_bypass(override_local_addr
, override_remote_addr
, inp
)) {
7538 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_PASS
;
7540 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7543 return (NECP_KERNEL_POLICY_ID_NONE
);
7546 // Check for loopback exception
7547 if (necp_socket_bypass(override_local_addr
, override_remote_addr
, inp
)) {
7548 // Mark socket as a pass
7549 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7550 inp
->inp_policyresult
.skip_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7551 inp
->inp_policyresult
.policy_gencount
= 0;
7552 inp
->inp_policyresult
.app_id
= 0;
7553 inp
->inp_policyresult
.flowhash
= 0;
7554 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
7555 inp
->inp_policyresult
.results
.route_rule_id
= 0;
7556 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_PASS
;
7557 return (NECP_KERNEL_POLICY_ID_NONE
);
7561 lck_rw_lock_shared(&necp_kernel_policy_lock
);
7563 necp_socket_fillout_info_locked(inp
, override_local_addr
, override_remote_addr
, override_bound_interface
, &info
);
7564 inp
->inp_policyresult
.app_id
= info
.application_id
;
7567 u_int32_t flowhash
= necp_socket_calc_flowhash_locked(&info
);
7568 if (inp
->inp_policyresult
.policy_id
!= NECP_KERNEL_POLICY_ID_NONE
&&
7569 inp
->inp_policyresult
.policy_gencount
== necp_kernel_socket_policies_gencount
&&
7570 inp
->inp_policyresult
.flowhash
== flowhash
) {
7571 // If already matched this socket on this generation of table, skip
7574 lck_rw_done(&necp_kernel_policy_lock
);
7576 return (inp
->inp_policyresult
.policy_id
);
7579 // Match socket to policy
7580 necp_kernel_policy_id skip_policy_id
;
7581 matched_policy
= necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_map
[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(info
.application_id
)], &info
, &filter_control_unit
, &route_rule_id
, &service_action
, &service
, netagent_ids
, NULL
, NECP_MAX_NETAGENTS
, NULL
, 0, current_proc(), &skip_policy_id
);
7582 // If the socket matched a scoped service policy, mark as Drop if not registered.
7583 // This covers the cases in which a service is required (on demand) but hasn't started yet.
7584 if ((service_action
== NECP_KERNEL_POLICY_RESULT_TRIGGER_SCOPED
||
7585 service_action
== NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED
) &&
7586 service
.identifier
!= 0 &&
7587 service
.identifier
!= NECP_NULL_SERVICE_ID
) {
7588 bool service_is_registered
= FALSE
;
7589 struct necp_service_registration
*service_registration
= NULL
;
7590 LIST_FOREACH(service_registration
, &necp_registered_service_list
, kernel_chain
) {
7591 if (service
.identifier
== service_registration
->service_id
) {
7592 service_is_registered
= TRUE
;
7596 if (!service_is_registered
) {
7597 // Mark socket as a drop if service is not registered
7598 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7599 inp
->inp_policyresult
.skip_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7600 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
7601 inp
->inp_policyresult
.flowhash
= flowhash
;
7602 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
7603 inp
->inp_policyresult
.results
.route_rule_id
= 0;
7604 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7606 if (necp_debug
> 1) {
7607 NECPLOG(LOG_DEBUG
, "Socket Policy: (BoundInterface %d Proto %d) Dropping packet because service is not registered", info
.bound_interface_index
, info
.protocol
);
7611 lck_rw_done(&necp_kernel_policy_lock
);
7612 return (NECP_KERNEL_POLICY_ID_NONE
);
7616 for (netagent_cursor
= 0; netagent_cursor
< NECP_MAX_NETAGENTS
; netagent_cursor
++) {
7617 struct necp_uuid_id_mapping
*mapping
= NULL
;
7618 u_int32_t netagent_id
= netagent_ids
[netagent_cursor
];
7619 if (netagent_id
== 0) {
7622 mapping
= necp_uuid_lookup_uuid_with_service_id_locked(netagent_id
);
7623 if (mapping
!= NULL
) {
7624 u_int32_t agent_flags
= 0;
7625 agent_flags
= netagent_get_flags(mapping
->uuid
);
7626 if (agent_flags
& NETAGENT_FLAG_REGISTERED
) {
7627 if (agent_flags
& NETAGENT_FLAG_ACTIVE
) {
7629 } else if ((agent_flags
& NETAGENT_FLAG_VOLUNTARY
) == 0) {
7630 if (agent_flags
& NETAGENT_FLAG_KERNEL_ACTIVATED
) {
7631 int trigger_error
= 0;
7632 trigger_error
= netagent_kernel_trigger(mapping
->uuid
);
7633 if (necp_debug
> 1) {
7634 NECPLOG(LOG_DEBUG
, "Socket Policy: Triggering inactive agent, error %d", trigger_error
);
7638 // Mark socket as a drop if required agent is not active
7639 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7640 inp
->inp_policyresult
.skip_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7641 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
7642 inp
->inp_policyresult
.flowhash
= flowhash
;
7643 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
7644 inp
->inp_policyresult
.results
.route_rule_id
= 0;
7645 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7647 if (necp_debug
> 1) {
7648 NECPLOG(LOG_DEBUG
, "Socket Policy: (BoundInterface %d Proto %d) Dropping packet because agent is not active", info
.bound_interface_index
, info
.protocol
);
7652 lck_rw_done(&necp_kernel_policy_lock
);
7653 return (NECP_KERNEL_POLICY_ID_NONE
);
7658 if (matched_policy
) {
7659 matched_policy_id
= matched_policy
->id
;
7660 inp
->inp_policyresult
.policy_id
= matched_policy
->id
;
7661 inp
->inp_policyresult
.skip_policy_id
= skip_policy_id
;
7662 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
7663 inp
->inp_policyresult
.flowhash
= flowhash
;
7664 inp
->inp_policyresult
.results
.filter_control_unit
= filter_control_unit
;
7665 inp
->inp_policyresult
.results
.route_rule_id
= route_rule_id
;
7666 inp
->inp_policyresult
.results
.result
= matched_policy
->result
;
7667 memcpy(&inp
->inp_policyresult
.results
.result_parameter
, &matched_policy
->result_parameter
, sizeof(matched_policy
->result_parameter
));
7669 if (necp_socket_is_connected(inp
) &&
7670 (matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_DROP
||
7671 (matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&& !necp_socket_uses_interface(inp
, matched_policy
->result_parameter
.tunnel_interface_index
)))) {
7673 NECPLOG(LOG_DEBUG
, "Marking socket in state %d as defunct", so
->so_state
);
7675 sosetdefunct(current_proc(), so
, SHUTDOWN_SOCKET_LEVEL_NECP
| SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL
, TRUE
);
7676 } else if (necp_socket_is_connected(inp
) &&
7677 matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&&
7678 info
.protocol
== IPPROTO_TCP
) {
7679 // Reset MSS on TCP socket if tunnel policy changes
7680 tcp_mtudisc(inp
, 0);
7683 if (necp_debug
> 1) {
7684 NECPLOG(LOG_DEBUG
, "Socket Policy: %p (BoundInterface %d Proto %d) Policy %d Result %d Parameter %d", inp
->inp_socket
, info
.bound_interface_index
, info
.protocol
, matched_policy
->id
, matched_policy
->result
, matched_policy
->result_parameter
.tunnel_interface_index
);
7686 } else if (necp_drop_all_order
> 0) {
7687 // Mark socket as a drop if set
7688 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7689 inp
->inp_policyresult
.skip_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7690 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
7691 inp
->inp_policyresult
.flowhash
= flowhash
;
7692 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
7693 inp
->inp_policyresult
.results
.route_rule_id
= 0;
7694 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7696 // Mark non-matching socket so we don't re-check it
7697 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7698 inp
->inp_policyresult
.skip_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7699 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
7700 inp
->inp_policyresult
.flowhash
= flowhash
;
7701 inp
->inp_policyresult
.results
.filter_control_unit
= filter_control_unit
; // We may have matched a filter, so mark it!
7702 inp
->inp_policyresult
.results
.route_rule_id
= route_rule_id
; // We may have matched a route rule, so mark it!
7703 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_NONE
;
7707 lck_rw_done(&necp_kernel_policy_lock
);
7709 return (matched_policy_id
);
7713 necp_ip_output_check_policy(struct necp_kernel_ip_output_policy
*kernel_policy
, necp_kernel_policy_id socket_policy_id
, necp_kernel_policy_id socket_skip_policy_id
, u_int32_t bound_interface_index
, u_int32_t last_interface_index
, u_int16_t protocol
, union necp_sockaddr_union
*local
, union necp_sockaddr_union
*remote
)
7715 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
)) {
7716 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
7717 u_int32_t cond_bound_interface_index
= kernel_policy
->cond_bound_interface
? kernel_policy
->cond_bound_interface
->if_index
: 0;
7718 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
7719 if (bound_interface_index
== cond_bound_interface_index
) {
7720 // No match, matches forbidden interface
7724 if (bound_interface_index
!= cond_bound_interface_index
) {
7725 // No match, does not match required interface
7730 if (bound_interface_index
!= 0) {
7731 // No match, requires a non-bound packet
7737 if (kernel_policy
->condition_mask
== 0) {
7741 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
) {
7742 necp_kernel_policy_id matched_policy_id
=
7743 kernel_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
? socket_skip_policy_id
: socket_policy_id
;
7744 if (matched_policy_id
!= kernel_policy
->cond_policy_id
) {
7745 // No match, does not match required id
7750 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LAST_INTERFACE
) {
7751 if (last_interface_index
!= kernel_policy
->cond_last_interface_index
) {
7756 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
7757 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
7758 if (protocol
== kernel_policy
->cond_protocol
) {
7759 // No match, matches forbidden protocol
7763 if (protocol
!= kernel_policy
->cond_protocol
) {
7764 // No match, does not match required protocol
7770 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
7771 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
7772 bool inRange
= necp_is_addr_in_range((struct sockaddr
*)local
, (struct sockaddr
*)&kernel_policy
->cond_local_start
, (struct sockaddr
*)&kernel_policy
->cond_local_end
);
7773 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
7782 } else if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
7783 bool inSubnet
= necp_is_addr_in_subnet((struct sockaddr
*)local
, (struct sockaddr
*)&kernel_policy
->cond_local_start
, kernel_policy
->cond_local_prefix
);
7784 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
7796 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
7797 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
7798 bool inRange
= necp_is_addr_in_range((struct sockaddr
*)remote
, (struct sockaddr
*)&kernel_policy
->cond_remote_start
, (struct sockaddr
*)&kernel_policy
->cond_remote_end
);
7799 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
7808 } else if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
7809 bool inSubnet
= necp_is_addr_in_subnet((struct sockaddr
*)remote
, (struct sockaddr
*)&kernel_policy
->cond_remote_start
, kernel_policy
->cond_remote_prefix
);
7810 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
7825 static inline struct necp_kernel_ip_output_policy
*
7826 necp_ip_output_find_policy_match_locked(necp_kernel_policy_id socket_policy_id
, necp_kernel_policy_id socket_skip_policy_id
, u_int32_t bound_interface_index
, u_int32_t last_interface_index
, u_int16_t protocol
, union necp_sockaddr_union
*local_addr
, union necp_sockaddr_union
*remote_addr
)
7828 u_int32_t skip_order
= 0;
7829 u_int32_t skip_session_order
= 0;
7831 struct necp_kernel_ip_output_policy
*matched_policy
= NULL
;
7832 struct necp_kernel_ip_output_policy
**policy_search_array
= necp_kernel_ip_output_policies_map
[NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(socket_policy_id
)];
7833 if (policy_search_array
!= NULL
) {
7834 for (i
= 0; policy_search_array
[i
] != NULL
; i
++) {
7835 if (necp_drop_all_order
!= 0 && policy_search_array
[i
]->session_order
>= necp_drop_all_order
) {
7836 // We've hit a drop all rule
7839 if (skip_session_order
&& policy_search_array
[i
]->session_order
>= skip_session_order
) {
7842 skip_session_order
= 0;
7845 if (policy_search_array
[i
]->order
< skip_order
) {
7851 skip_session_order
= 0;
7853 } else if (skip_session_order
) {
7857 if (necp_ip_output_check_policy(policy_search_array
[i
], socket_policy_id
, socket_skip_policy_id
, bound_interface_index
, last_interface_index
, protocol
, local_addr
, remote_addr
)) {
7858 // Passed all tests, found a match
7859 matched_policy
= policy_search_array
[i
];
7861 if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
7862 skip_order
= policy_search_array
[i
]->result_parameter
.skip_policy_order
;
7863 skip_session_order
= policy_search_array
[i
]->session_order
+ 1;
7872 return (matched_policy
);
7876 necp_output_bypass(struct mbuf
*packet
)
7878 if (necp_pass_loopback
> 0 && necp_is_loopback(NULL
, NULL
, NULL
, packet
)) {
7881 if (necp_pass_keepalives
> 0 && necp_get_is_keepalive_from_packet(packet
)) {
7884 if (necp_is_intcoproc(NULL
, packet
)) {
7890 necp_kernel_policy_id
7891 necp_ip_output_find_policy_match(struct mbuf
*packet
, int flags
, struct ip_out_args
*ipoa
, necp_kernel_policy_result
*result
, necp_kernel_policy_result_parameter
*result_parameter
)
7893 struct ip
*ip
= NULL
;
7894 int hlen
= sizeof(struct ip
);
7895 necp_kernel_policy_id socket_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
7896 necp_kernel_policy_id socket_skip_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
7897 necp_kernel_policy_id matched_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
7898 struct necp_kernel_ip_output_policy
*matched_policy
= NULL
;
7899 u_int16_t protocol
= 0;
7900 u_int32_t bound_interface_index
= 0;
7901 u_int32_t last_interface_index
= 0;
7902 union necp_sockaddr_union local_addr
;
7903 union necp_sockaddr_union remote_addr
;
7909 if (result_parameter
) {
7910 memset(result_parameter
, 0, sizeof(*result_parameter
));
7913 if (packet
== NULL
) {
7914 return (NECP_KERNEL_POLICY_ID_NONE
);
7917 socket_policy_id
= necp_get_policy_id_from_packet(packet
);
7918 socket_skip_policy_id
= necp_get_skip_policy_id_from_packet(packet
);
7920 // Exit early for an empty list
7921 // Don't lock. Possible race condition, but we don't want the performance hit.
7922 if (necp_kernel_ip_output_policies_count
== 0 ||
7923 ((socket_policy_id
== NECP_KERNEL_POLICY_ID_NONE
) && necp_kernel_ip_output_policies_non_id_count
== 0)) {
7924 if (necp_drop_all_order
> 0) {
7925 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7927 if (necp_output_bypass(packet
)) {
7928 *result
= NECP_KERNEL_POLICY_RESULT_PASS
;
7930 *result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7935 return (matched_policy_id
);
7938 // Check for loopback exception
7939 if (necp_output_bypass(packet
)) {
7940 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7942 *result
= NECP_KERNEL_POLICY_RESULT_PASS
;
7944 return (matched_policy_id
);
7947 last_interface_index
= necp_get_last_interface_index_from_packet(packet
);
7949 // Process packet to get relevant fields
7950 ip
= mtod(packet
, struct ip
*);
7952 hlen
= _IP_VHL_HL(ip
->ip_vhl
) << 2;
7954 hlen
= ip
->ip_hl
<< 2;
7957 protocol
= ip
->ip_p
;
7959 if ((flags
& IP_OUTARGS
) && (ipoa
!= NULL
) &&
7960 (ipoa
->ipoa_flags
& IPOAF_BOUND_IF
) &&
7961 ipoa
->ipoa_boundif
!= IFSCOPE_NONE
) {
7962 bound_interface_index
= ipoa
->ipoa_boundif
;
7965 local_addr
.sin
.sin_family
= AF_INET
;
7966 local_addr
.sin
.sin_len
= sizeof(struct sockaddr_in
);
7967 memcpy(&local_addr
.sin
.sin_addr
, &ip
->ip_src
, sizeof(ip
->ip_src
));
7969 remote_addr
.sin
.sin_family
= AF_INET
;
7970 remote_addr
.sin
.sin_len
= sizeof(struct sockaddr_in
);
7971 memcpy(&((struct sockaddr_in
*)&remote_addr
)->sin_addr
, &ip
->ip_dst
, sizeof(ip
->ip_dst
));
7976 if ((int)(hlen
+ sizeof(th
)) <= packet
->m_pkthdr
.len
) {
7977 m_copydata(packet
, hlen
, sizeof(th
), (u_int8_t
*)&th
);
7978 ((struct sockaddr_in
*)&local_addr
)->sin_port
= th
.th_sport
;
7979 ((struct sockaddr_in
*)&remote_addr
)->sin_port
= th
.th_dport
;
7985 if ((int)(hlen
+ sizeof(uh
)) <= packet
->m_pkthdr
.len
) {
7986 m_copydata(packet
, hlen
, sizeof(uh
), (u_int8_t
*)&uh
);
7987 ((struct sockaddr_in
*)&local_addr
)->sin_port
= uh
.uh_sport
;
7988 ((struct sockaddr_in
*)&remote_addr
)->sin_port
= uh
.uh_dport
;
7993 ((struct sockaddr_in
*)&local_addr
)->sin_port
= 0;
7994 ((struct sockaddr_in
*)&remote_addr
)->sin_port
= 0;
7999 // Match packet to policy
8000 lck_rw_lock_shared(&necp_kernel_policy_lock
);
8001 matched_policy
= necp_ip_output_find_policy_match_locked(socket_policy_id
, socket_skip_policy_id
, bound_interface_index
, last_interface_index
, protocol
, &local_addr
, &remote_addr
);
8002 if (matched_policy
) {
8003 matched_policy_id
= matched_policy
->id
;
8005 *result
= matched_policy
->result
;
8008 if (result_parameter
) {
8009 memcpy(result_parameter
, &matched_policy
->result_parameter
, sizeof(matched_policy
->result_parameter
));
8012 if (necp_debug
> 1) {
8013 NECPLOG(LOG_DEBUG
, "IP Output: (ID %d BoundInterface %d LastInterface %d Proto %d) Policy %d Result %d Parameter %d", socket_policy_id
, bound_interface_index
, last_interface_index
, protocol
, matched_policy
->id
, matched_policy
->result
, matched_policy
->result_parameter
.tunnel_interface_index
);
8015 } else if (necp_drop_all_order
> 0) {
8016 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8018 *result
= NECP_KERNEL_POLICY_RESULT_DROP
;
8022 lck_rw_done(&necp_kernel_policy_lock
);
8024 return (matched_policy_id
);
8027 necp_kernel_policy_id
8028 necp_ip6_output_find_policy_match(struct mbuf
*packet
, int flags
, struct ip6_out_args
*ip6oa
, necp_kernel_policy_result
*result
, necp_kernel_policy_result_parameter
*result_parameter
)
8030 struct ip6_hdr
*ip6
= NULL
;
8033 necp_kernel_policy_id socket_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
8034 necp_kernel_policy_id socket_skip_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
8035 necp_kernel_policy_id matched_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
8036 struct necp_kernel_ip_output_policy
*matched_policy
= NULL
;
8037 u_int16_t protocol
= 0;
8038 u_int32_t bound_interface_index
= 0;
8039 u_int32_t last_interface_index
= 0;
8040 union necp_sockaddr_union local_addr
;
8041 union necp_sockaddr_union remote_addr
;
8047 if (result_parameter
) {
8048 memset(result_parameter
, 0, sizeof(*result_parameter
));
8051 if (packet
== NULL
) {
8052 return (NECP_KERNEL_POLICY_ID_NONE
);
8055 socket_policy_id
= necp_get_policy_id_from_packet(packet
);
8056 socket_skip_policy_id
= necp_get_skip_policy_id_from_packet(packet
);
8058 // Exit early for an empty list
8059 // Don't lock. Possible race condition, but we don't want the performance hit.
8060 if (necp_kernel_ip_output_policies_count
== 0 ||
8061 ((socket_policy_id
== NECP_KERNEL_POLICY_ID_NONE
) && necp_kernel_ip_output_policies_non_id_count
== 0)) {
8062 if (necp_drop_all_order
> 0) {
8063 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8065 if (necp_output_bypass(packet
)) {
8066 *result
= NECP_KERNEL_POLICY_RESULT_PASS
;
8068 *result
= NECP_KERNEL_POLICY_RESULT_DROP
;
8073 return (matched_policy_id
);
8076 // Check for loopback exception
8077 if (necp_output_bypass(packet
)) {
8078 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8080 *result
= NECP_KERNEL_POLICY_RESULT_PASS
;
8082 return (matched_policy_id
);
8085 last_interface_index
= necp_get_last_interface_index_from_packet(packet
);
8087 // Process packet to get relevant fields
8088 ip6
= mtod(packet
, struct ip6_hdr
*);
8090 if ((flags
& IPV6_OUTARGS
) && (ip6oa
!= NULL
) &&
8091 (ip6oa
->ip6oa_flags
& IP6OAF_BOUND_IF
) &&
8092 ip6oa
->ip6oa_boundif
!= IFSCOPE_NONE
) {
8093 bound_interface_index
= ip6oa
->ip6oa_boundif
;
8096 ((struct sockaddr_in6
*)&local_addr
)->sin6_family
= AF_INET6
;
8097 ((struct sockaddr_in6
*)&local_addr
)->sin6_len
= sizeof(struct sockaddr_in6
);
8098 memcpy(&((struct sockaddr_in6
*)&local_addr
)->sin6_addr
, &ip6
->ip6_src
, sizeof(ip6
->ip6_src
));
8100 ((struct sockaddr_in6
*)&remote_addr
)->sin6_family
= AF_INET6
;
8101 ((struct sockaddr_in6
*)&remote_addr
)->sin6_len
= sizeof(struct sockaddr_in6
);
8102 memcpy(&((struct sockaddr_in6
*)&remote_addr
)->sin6_addr
, &ip6
->ip6_dst
, sizeof(ip6
->ip6_dst
));
8104 offset
= ip6_lasthdr(packet
, 0, IPPROTO_IPV6
, &next
);
8105 if (offset
>= 0 && packet
->m_pkthdr
.len
>= offset
) {
8110 if ((int)(offset
+ sizeof(th
)) <= packet
->m_pkthdr
.len
) {
8111 m_copydata(packet
, offset
, sizeof(th
), (u_int8_t
*)&th
);
8112 ((struct sockaddr_in6
*)&local_addr
)->sin6_port
= th
.th_sport
;
8113 ((struct sockaddr_in6
*)&remote_addr
)->sin6_port
= th
.th_dport
;
8119 if ((int)(offset
+ sizeof(uh
)) <= packet
->m_pkthdr
.len
) {
8120 m_copydata(packet
, offset
, sizeof(uh
), (u_int8_t
*)&uh
);
8121 ((struct sockaddr_in6
*)&local_addr
)->sin6_port
= uh
.uh_sport
;
8122 ((struct sockaddr_in6
*)&remote_addr
)->sin6_port
= uh
.uh_dport
;
8127 ((struct sockaddr_in6
*)&local_addr
)->sin6_port
= 0;
8128 ((struct sockaddr_in6
*)&remote_addr
)->sin6_port
= 0;
8134 // Match packet to policy
8135 lck_rw_lock_shared(&necp_kernel_policy_lock
);
8136 matched_policy
= necp_ip_output_find_policy_match_locked(socket_policy_id
, socket_skip_policy_id
, bound_interface_index
, last_interface_index
, protocol
, &local_addr
, &remote_addr
);
8137 if (matched_policy
) {
8138 matched_policy_id
= matched_policy
->id
;
8140 *result
= matched_policy
->result
;
8143 if (result_parameter
) {
8144 memcpy(result_parameter
, &matched_policy
->result_parameter
, sizeof(matched_policy
->result_parameter
));
8147 if (necp_debug
> 1) {
8148 NECPLOG(LOG_DEBUG
, "IP6 Output: (ID %d BoundInterface %d LastInterface %d Proto %d) Policy %d Result %d Parameter %d", socket_policy_id
, bound_interface_index
, last_interface_index
, protocol
, matched_policy
->id
, matched_policy
->result
, matched_policy
->result_parameter
.tunnel_interface_index
);
8150 } else if (necp_drop_all_order
> 0) {
8151 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8153 *result
= NECP_KERNEL_POLICY_RESULT_DROP
;
8157 lck_rw_done(&necp_kernel_policy_lock
);
8159 return (matched_policy_id
);
8164 necp_is_addr_in_range(struct sockaddr
*addr
, struct sockaddr
*range_start
, struct sockaddr
*range_end
)
8168 if (addr
== NULL
|| range_start
== NULL
|| range_end
== NULL
) {
8172 /* Must be greater than or equal to start */
8173 cmp
= necp_addr_compare(addr
, range_start
, 1);
8174 if (cmp
!= 0 && cmp
!= 1) {
8178 /* Must be less than or equal to end */
8179 cmp
= necp_addr_compare(addr
, range_end
, 1);
8180 if (cmp
!= 0 && cmp
!= -1) {
8188 necp_is_range_in_range(struct sockaddr
*inner_range_start
, struct sockaddr
*inner_range_end
, struct sockaddr
*range_start
, struct sockaddr
*range_end
)
8192 if (inner_range_start
== NULL
|| inner_range_end
== NULL
|| range_start
== NULL
|| range_end
== NULL
) {
8196 /* Must be greater than or equal to start */
8197 cmp
= necp_addr_compare(inner_range_start
, range_start
, 1);
8198 if (cmp
!= 0 && cmp
!= 1) {
8202 /* Must be less than or equal to end */
8203 cmp
= necp_addr_compare(inner_range_end
, range_end
, 1);
8204 if (cmp
!= 0 && cmp
!= -1) {
8212 necp_is_addr_in_subnet(struct sockaddr
*addr
, struct sockaddr
*subnet_addr
, u_int8_t subnet_prefix
)
8214 if (addr
== NULL
|| subnet_addr
== NULL
) {
8218 if (addr
->sa_family
!= subnet_addr
->sa_family
|| addr
->sa_len
!= subnet_addr
->sa_len
) {
8222 switch (addr
->sa_family
) {
8224 if (satosin(subnet_addr
)->sin_port
!= 0 &&
8225 satosin(addr
)->sin_port
!= satosin(subnet_addr
)->sin_port
) {
8228 return (necp_buffer_compare_with_bit_prefix((u_int8_t
*)&satosin(addr
)->sin_addr
, (u_int8_t
*)&satosin(subnet_addr
)->sin_addr
, subnet_prefix
));
8231 if (satosin6(subnet_addr
)->sin6_port
!= 0 &&
8232 satosin6(addr
)->sin6_port
!= satosin6(subnet_addr
)->sin6_port
) {
8235 if (satosin6(addr
)->sin6_scope_id
&&
8236 satosin6(subnet_addr
)->sin6_scope_id
&&
8237 satosin6(addr
)->sin6_scope_id
!= satosin6(subnet_addr
)->sin6_scope_id
) {
8240 return (necp_buffer_compare_with_bit_prefix((u_int8_t
*)&satosin6(addr
)->sin6_addr
, (u_int8_t
*)&satosin6(subnet_addr
)->sin6_addr
, subnet_prefix
));
8255 * 2: Not comparable or error
8258 necp_addr_compare(struct sockaddr
*sa1
, struct sockaddr
*sa2
, int check_port
)
8261 int port_result
= 0;
8263 if (sa1
->sa_family
!= sa2
->sa_family
|| sa1
->sa_len
!= sa2
->sa_len
) {
8267 if (sa1
->sa_len
== 0) {
8271 switch (sa1
->sa_family
) {
8273 if (sa1
->sa_len
!= sizeof(struct sockaddr_in
)) {
8277 result
= memcmp(&satosin(sa1
)->sin_addr
.s_addr
, &satosin(sa2
)->sin_addr
.s_addr
, sizeof(satosin(sa1
)->sin_addr
.s_addr
));
8280 if (satosin(sa1
)->sin_port
< satosin(sa2
)->sin_port
) {
8282 } else if (satosin(sa1
)->sin_port
> satosin(sa2
)->sin_port
) {
8287 result
= port_result
;
8288 } else if ((result
> 0 && port_result
< 0) || (result
< 0 && port_result
> 0)) {
8296 if (sa1
->sa_len
!= sizeof(struct sockaddr_in6
)) {
8300 if (satosin6(sa1
)->sin6_scope_id
!= satosin6(sa2
)->sin6_scope_id
) {
8304 result
= memcmp(&satosin6(sa1
)->sin6_addr
.s6_addr
[0], &satosin6(sa2
)->sin6_addr
.s6_addr
[0], sizeof(struct in6_addr
));
8307 if (satosin6(sa1
)->sin6_port
< satosin6(sa2
)->sin6_port
) {
8309 } else if (satosin6(sa1
)->sin6_port
> satosin6(sa2
)->sin6_port
) {
8314 result
= port_result
;
8315 } else if ((result
> 0 && port_result
< 0) || (result
< 0 && port_result
> 0)) {
8323 result
= memcmp(sa1
, sa2
, sa1
->sa_len
);
8330 } else if (result
> 0) {
8338 necp_buffer_compare_with_bit_prefix(u_int8_t
*p1
, u_int8_t
*p2
, u_int32_t bits
)
8342 /* Handle null pointers */
8343 if (p1
== NULL
|| p2
== NULL
) {
8348 if (*p1
++ != *p2
++) {
8355 mask
= ~((1<<(8-bits
))-1);
8356 if ((*p1
& mask
) != (*p2
& mask
)) {
8364 necp_update_qos_marking(struct ifnet
*ifp
, u_int32_t route_rule_id
)
8366 bool qos_marking
= FALSE
;
8367 int exception_index
= 0;
8368 struct necp_route_rule
*route_rule
= NULL
;
8370 route_rule
= necp_lookup_route_rule_locked(&necp_route_rules
, route_rule_id
);
8371 if (route_rule
== NULL
) {
8372 qos_marking
= FALSE
;
8376 qos_marking
= (route_rule
->default_action
== NECP_ROUTE_RULE_QOS_MARKING
) ? TRUE
: FALSE
;
8382 for (exception_index
= 0; exception_index
< MAX_ROUTE_RULE_INTERFACES
; exception_index
++) {
8383 if (route_rule
->exception_if_indices
[exception_index
] == 0) {
8386 if (route_rule
->exception_if_actions
[exception_index
] != NECP_ROUTE_RULE_QOS_MARKING
) {
8389 if (route_rule
->exception_if_indices
[exception_index
] == ifp
->if_index
) {
8391 if (necp_debug
> 2) {
8392 NECPLOG(LOG_DEBUG
, "QoS Marking : Interface match %d for Rule %d Allowed %d",
8393 route_rule
->exception_if_indices
[exception_index
], route_rule_id
, qos_marking
);
8399 if ((route_rule
->cellular_action
== NECP_ROUTE_RULE_QOS_MARKING
&& IFNET_IS_CELLULAR(ifp
)) ||
8400 (route_rule
->wifi_action
== NECP_ROUTE_RULE_QOS_MARKING
&& IFNET_IS_WIFI(ifp
)) ||
8401 (route_rule
->wired_action
== NECP_ROUTE_RULE_QOS_MARKING
&& IFNET_IS_WIRED(ifp
)) ||
8402 (route_rule
->expensive_action
== NECP_ROUTE_RULE_QOS_MARKING
&& IFNET_IS_EXPENSIVE(ifp
))) {
8404 if (necp_debug
> 2) {
8405 NECPLOG(LOG_DEBUG
, "QoS Marking: C:%d WF:%d W:%d E:%d for Rule %d Allowed %d",
8406 route_rule
->cellular_action
, route_rule
->wifi_action
, route_rule
->wired_action
,
8407 route_rule
->expensive_action
, route_rule_id
, qos_marking
);
8412 if (necp_debug
> 1) {
8413 NECPLOG(LOG_DEBUG
, "QoS Marking: Rule %d ifp %s Allowed %d",
8414 route_rule_id
, ifp
? ifp
->if_xname
: "", qos_marking
);
8416 return (qos_marking
);
8420 necp_socket_update_qos_marking(struct inpcb
*inp
, struct rtentry
*route
, struct ifnet
*interface
, u_int32_t route_rule_id
)
8422 bool qos_marking
= FALSE
;
8423 struct ifnet
*ifp
= interface
= NULL
;
8425 if (net_qos_policy_restricted
== 0) {
8428 if (inp
->inp_socket
== NULL
) {
8431 if ((inp
->inp_socket
->so_flags1
& SOF1_QOSMARKING_POLICY_OVERRIDE
)) {
8435 * This is racy but we do not need the performance hit of taking necp_kernel_policy_lock
8437 if (inp
->inp_policyresult
.results
.qos_marking_gencount
== necp_kernel_socket_policies_gencount
) {
8441 lck_rw_lock_shared(&necp_kernel_policy_lock
);
8443 if (ifp
== NULL
&& route
!= NULL
) {
8444 ifp
= route
->rt_ifp
;
8447 * By default, until we have a interface, do not mark and reevaluate the Qos marking policy
8449 if (ifp
== NULL
|| route_rule_id
== 0) {
8450 qos_marking
= FALSE
;
8454 if (ROUTE_RULE_IS_AGGREGATE(route_rule_id
)) {
8455 struct necp_aggregate_route_rule
*aggregate_route_rule
= necp_lookup_aggregate_route_rule_locked(route_rule_id
);
8456 if (aggregate_route_rule
!= NULL
) {
8458 for (index
= 0; index
< MAX_AGGREGATE_ROUTE_RULES
; index
++) {
8459 u_int32_t sub_route_rule_id
= aggregate_route_rule
->rule_ids
[index
];
8460 if (sub_route_rule_id
== 0) {
8463 qos_marking
= necp_update_qos_marking(ifp
, sub_route_rule_id
);
8464 if (qos_marking
== TRUE
) {
8470 qos_marking
= necp_update_qos_marking(ifp
, route_rule_id
);
8473 * Now that we have an interface we remember the gencount
8475 inp
->inp_policyresult
.results
.qos_marking_gencount
= necp_kernel_socket_policies_gencount
;
8478 lck_rw_done(&necp_kernel_policy_lock
);
8480 if (qos_marking
== TRUE
) {
8481 inp
->inp_socket
->so_flags1
|= SOF1_QOSMARKING_ALLOWED
;
8483 inp
->inp_socket
->so_flags1
&= ~SOF1_QOSMARKING_ALLOWED
;
8488 necp_route_is_lqm_abort(struct ifnet
*ifp
, struct ifnet
*delegated_ifp
)
8491 (ifp
->if_interface_state
.valid_bitmask
& IF_INTERFACE_STATE_LQM_STATE_VALID
) &&
8492 ifp
->if_interface_state
.lqm_state
== IFNET_LQM_THRESH_ABORT
) {
8495 if (delegated_ifp
!= NULL
&&
8496 (delegated_ifp
->if_interface_state
.valid_bitmask
& IF_INTERFACE_STATE_LQM_STATE_VALID
) &&
8497 delegated_ifp
->if_interface_state
.lqm_state
== IFNET_LQM_THRESH_ABORT
) {
8504 necp_route_is_allowed_inner(struct rtentry
*route
, struct ifnet
*ifp
, u_int32_t route_rule_id
, u_int32_t
*interface_type_denied
)
8506 bool default_is_allowed
= TRUE
;
8507 u_int8_t type_aggregate_action
= NECP_ROUTE_RULE_NONE
;
8508 int exception_index
= 0;
8509 struct ifnet
*delegated_ifp
= NULL
;
8510 struct necp_route_rule
*route_rule
= NULL
;
8512 route_rule
= necp_lookup_route_rule_locked(&necp_route_rules
, route_rule_id
);
8513 if (route_rule
== NULL
) {
8517 default_is_allowed
= (route_rule
->default_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
;
8519 ifp
= route
->rt_ifp
;
8522 if (necp_debug
> 1 && !default_is_allowed
) {
8523 NECPLOG(LOG_DEBUG
, "Route Allowed: No interface for route, using default for Rule %d Allowed %d", route_rule_id
, default_is_allowed
);
8525 return (default_is_allowed
);
8528 delegated_ifp
= ifp
->if_delegated
.ifp
;
8529 for (exception_index
= 0; exception_index
< MAX_ROUTE_RULE_INTERFACES
; exception_index
++) {
8530 if (route_rule
->exception_if_indices
[exception_index
] == 0) {
8533 if (route_rule
->exception_if_indices
[exception_index
] == ifp
->if_index
||
8534 (delegated_ifp
!= NULL
&& route_rule
->exception_if_indices
[exception_index
] == delegated_ifp
->if_index
)) {
8535 if (route_rule
->exception_if_actions
[exception_index
] == NECP_ROUTE_RULE_DENY_LQM_ABORT
) {
8536 const bool lqm_abort
= necp_route_is_lqm_abort(ifp
, delegated_ifp
);
8537 if (necp_debug
> 1 && lqm_abort
) {
8538 NECPLOG(LOG_DEBUG
, "Route Allowed: Interface match %d for Rule %d Deny LQM Abort",
8539 route_rule
->exception_if_indices
[exception_index
], route_rule_id
);
8542 } else if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule
->exception_if_actions
[exception_index
])) {
8543 if (necp_debug
> 1) {
8544 NECPLOG(LOG_DEBUG
, "Route Allowed: Interface match %d for Rule %d Allowed %d", route_rule
->exception_if_indices
[exception_index
], route_rule_id
, ((route_rule
->exception_if_actions
[exception_index
] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
));
8546 return ((route_rule
->exception_if_actions
[exception_index
] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
);
8551 if (IFNET_IS_CELLULAR(ifp
)) {
8552 if (route_rule
->cellular_action
== NECP_ROUTE_RULE_DENY_LQM_ABORT
) {
8553 if (necp_route_is_lqm_abort(ifp
, delegated_ifp
)) {
8554 if (interface_type_denied
!= NULL
) {
8555 *interface_type_denied
= IFRTYPE_FUNCTIONAL_CELLULAR
;
8557 // Mark aggregate action as deny
8558 type_aggregate_action
= NECP_ROUTE_RULE_DENY_INTERFACE
;
8560 } else if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule
->cellular_action
)) {
8561 if (interface_type_denied
!= NULL
) {
8562 *interface_type_denied
= IFRTYPE_FUNCTIONAL_CELLULAR
;
8564 if (type_aggregate_action
== NECP_ROUTE_RULE_NONE
||
8565 (type_aggregate_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
&&
8566 route_rule
->cellular_action
== NECP_ROUTE_RULE_DENY_INTERFACE
)) {
8567 // Deny wins if there is a conflict
8568 type_aggregate_action
= route_rule
->cellular_action
;
8573 if (IFNET_IS_WIFI(ifp
)) {
8574 if (route_rule
->wifi_action
== NECP_ROUTE_RULE_DENY_LQM_ABORT
) {
8575 if (necp_route_is_lqm_abort(ifp
, delegated_ifp
)) {
8576 if (interface_type_denied
!= NULL
) {
8577 *interface_type_denied
= IFRTYPE_FUNCTIONAL_WIFI_INFRA
;
8579 // Mark aggregate action as deny
8580 type_aggregate_action
= NECP_ROUTE_RULE_DENY_INTERFACE
;
8582 } else if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule
->wifi_action
)) {
8583 if (interface_type_denied
!= NULL
) {
8584 *interface_type_denied
= IFRTYPE_FUNCTIONAL_WIFI_INFRA
;
8586 if (type_aggregate_action
== NECP_ROUTE_RULE_NONE
||
8587 (type_aggregate_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
&&
8588 route_rule
->wifi_action
== NECP_ROUTE_RULE_DENY_INTERFACE
)) {
8589 // Deny wins if there is a conflict
8590 type_aggregate_action
= route_rule
->wifi_action
;
8595 if (IFNET_IS_WIRED(ifp
)) {
8596 if (route_rule
->wired_action
== NECP_ROUTE_RULE_DENY_LQM_ABORT
) {
8597 if (necp_route_is_lqm_abort(ifp
, delegated_ifp
)) {
8598 if (interface_type_denied
!= NULL
) {
8599 *interface_type_denied
= IFRTYPE_FUNCTIONAL_WIRED
;
8601 // Mark aggregate action as deny
8602 type_aggregate_action
= NECP_ROUTE_RULE_DENY_INTERFACE
;
8604 } else if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule
->wired_action
)) {
8605 if (interface_type_denied
!= NULL
) {
8606 *interface_type_denied
= IFRTYPE_FUNCTIONAL_WIRED
;
8608 if (type_aggregate_action
== NECP_ROUTE_RULE_NONE
||
8609 (type_aggregate_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
&&
8610 route_rule
->wired_action
== NECP_ROUTE_RULE_DENY_INTERFACE
)) {
8611 // Deny wins if there is a conflict
8612 type_aggregate_action
= route_rule
->wired_action
;
8617 if (IFNET_IS_EXPENSIVE(ifp
)) {
8618 if (route_rule
->expensive_action
== NECP_ROUTE_RULE_DENY_LQM_ABORT
) {
8619 if (necp_route_is_lqm_abort(ifp
, delegated_ifp
)) {
8620 // Mark aggregate action as deny
8621 type_aggregate_action
= NECP_ROUTE_RULE_DENY_INTERFACE
;
8623 } else if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule
->expensive_action
)) {
8624 if (type_aggregate_action
== NECP_ROUTE_RULE_NONE
||
8625 (type_aggregate_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
&&
8626 route_rule
->expensive_action
== NECP_ROUTE_RULE_DENY_INTERFACE
)) {
8627 // Deny wins if there is a conflict
8628 type_aggregate_action
= route_rule
->expensive_action
;
8633 if (type_aggregate_action
!= NECP_ROUTE_RULE_NONE
) {
8634 if (necp_debug
> 1) {
8635 NECPLOG(LOG_DEBUG
, "Route Allowed: C:%d WF:%d W:%d E:%d for Rule %d Allowed %d", route_rule
->cellular_action
, route_rule
->wifi_action
, route_rule
->wired_action
, route_rule
->expensive_action
, route_rule_id
, ((type_aggregate_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
));
8637 return ((type_aggregate_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
);
8640 if (necp_debug
> 1 && !default_is_allowed
) {
8641 NECPLOG(LOG_DEBUG
, "Route Allowed: Using default for Rule %d Allowed %d", route_rule_id
, default_is_allowed
);
8643 return (default_is_allowed
);
8647 necp_route_is_allowed(struct rtentry
*route
, struct ifnet
*interface
, u_int32_t route_rule_id
, u_int32_t
*interface_type_denied
)
8649 if ((route
== NULL
&& interface
== NULL
) || route_rule_id
== 0) {
8650 if (necp_debug
> 1) {
8651 NECPLOG(LOG_DEBUG
, "Route Allowed: no route or interface, Rule %d Allowed %d", route_rule_id
, TRUE
);
8656 if (ROUTE_RULE_IS_AGGREGATE(route_rule_id
)) {
8657 struct necp_aggregate_route_rule
*aggregate_route_rule
= necp_lookup_aggregate_route_rule_locked(route_rule_id
);
8658 if (aggregate_route_rule
!= NULL
) {
8660 for (index
= 0; index
< MAX_AGGREGATE_ROUTE_RULES
; index
++) {
8661 u_int32_t sub_route_rule_id
= aggregate_route_rule
->rule_ids
[index
];
8662 if (sub_route_rule_id
== 0) {
8665 if (!necp_route_is_allowed_inner(route
, interface
, sub_route_rule_id
, interface_type_denied
)) {
8671 return (necp_route_is_allowed_inner(route
, interface
, route_rule_id
, interface_type_denied
));
8678 necp_packet_is_allowed_over_interface(struct mbuf
*packet
, struct ifnet
*interface
)
8680 bool is_allowed
= TRUE
;
8681 u_int32_t route_rule_id
= necp_get_route_rule_id_from_packet(packet
);
8682 if (route_rule_id
!= 0 &&
8683 interface
!= NULL
) {
8684 lck_rw_lock_shared(&necp_kernel_policy_lock
);
8685 is_allowed
= necp_route_is_allowed(NULL
, interface
, necp_get_route_rule_id_from_packet(packet
), NULL
);
8686 lck_rw_done(&necp_kernel_policy_lock
);
8688 return (is_allowed
);
8692 necp_netagents_allow_traffic(u_int32_t
*netagent_ids
, size_t netagent_id_count
)
8694 size_t netagent_cursor
;
8695 for (netagent_cursor
= 0; netagent_cursor
< netagent_id_count
; netagent_cursor
++) {
8696 struct necp_uuid_id_mapping
*mapping
= NULL
;
8697 u_int32_t netagent_id
= netagent_ids
[netagent_cursor
];
8698 if (netagent_id
== 0) {
8701 mapping
= necp_uuid_lookup_uuid_with_service_id_locked(netagent_id
);
8702 if (mapping
!= NULL
) {
8703 u_int32_t agent_flags
= 0;
8704 agent_flags
= netagent_get_flags(mapping
->uuid
);
8705 if (agent_flags
& NETAGENT_FLAG_REGISTERED
) {
8706 if (agent_flags
& NETAGENT_FLAG_ACTIVE
) {
8708 } else if ((agent_flags
& NETAGENT_FLAG_VOLUNTARY
) == 0) {
8718 necp_socket_is_allowed_to_send_recv_internal(struct inpcb
*inp
, struct sockaddr
*override_local_addr
, struct sockaddr
*override_remote_addr
, ifnet_t interface
, necp_kernel_policy_id
*return_policy_id
, u_int32_t
*return_route_rule_id
, necp_kernel_policy_id
*return_skip_policy_id
)
8720 u_int32_t verifyifindex
= interface
? interface
->if_index
: 0;
8721 bool allowed_to_receive
= TRUE
;
8722 struct necp_socket_info info
;
8723 u_int32_t flowhash
= 0;
8724 necp_kernel_policy_result service_action
= 0;
8725 necp_kernel_policy_service service
= { 0, 0 };
8726 u_int32_t route_rule_id
= 0;
8727 struct rtentry
*route
= NULL
;
8728 u_int32_t interface_type_denied
= IFRTYPE_FUNCTIONAL_UNKNOWN
;
8730 u_int32_t netagent_ids
[NECP_MAX_NETAGENTS
];
8731 memset(&netagent_ids
, 0, sizeof(netagent_ids
));
8733 if (return_policy_id
) {
8734 *return_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
8736 if (return_skip_policy_id
) {
8737 *return_skip_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
8739 if (return_route_rule_id
) {
8740 *return_route_rule_id
= 0;
8747 route
= inp
->inp_route
.ro_rt
;
8749 // Don't lock. Possible race condition, but we don't want the performance hit.
8750 if (necp_kernel_socket_policies_count
== 0 ||
8751 (!(inp
->inp_flags2
& INP2_WANT_APP_POLICY
) && necp_kernel_socket_policies_non_app_count
== 0)) {
8752 if (necp_drop_all_order
> 0) {
8753 if (necp_socket_bypass(override_local_addr
, override_remote_addr
, inp
)) {
8754 allowed_to_receive
= TRUE
;
8756 allowed_to_receive
= FALSE
;
8762 // If this socket is connected, or we are not taking addresses into account, try to reuse last result
8763 if ((necp_socket_is_connected(inp
) || (override_local_addr
== NULL
&& override_remote_addr
== NULL
)) && inp
->inp_policyresult
.policy_id
!= NECP_KERNEL_POLICY_ID_NONE
) {
8764 bool policies_have_changed
= FALSE
;
8765 bool route_allowed
= TRUE
;
8767 if (inp
->inp_policyresult
.policy_gencount
!= necp_kernel_socket_policies_gencount
) {
8768 policies_have_changed
= TRUE
;
8770 if (inp
->inp_policyresult
.results
.route_rule_id
!= 0) {
8771 lck_rw_lock_shared(&necp_kernel_policy_lock
);
8772 if (!necp_route_is_allowed(route
, interface
, inp
->inp_policyresult
.results
.route_rule_id
, &interface_type_denied
)) {
8773 route_allowed
= FALSE
;
8775 lck_rw_done(&necp_kernel_policy_lock
);
8779 if (!policies_have_changed
) {
8780 if (!route_allowed
||
8781 inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_DROP
||
8782 inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
||
8783 (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&& interface
&&
8784 inp
->inp_policyresult
.results
.result_parameter
.tunnel_interface_index
!= verifyifindex
)) {
8785 allowed_to_receive
= FALSE
;
8787 if (return_policy_id
) {
8788 *return_policy_id
= inp
->inp_policyresult
.policy_id
;
8790 if (return_skip_policy_id
) {
8791 *return_skip_policy_id
= inp
->inp_policyresult
.skip_policy_id
;
8793 if (return_route_rule_id
) {
8794 *return_route_rule_id
= inp
->inp_policyresult
.results
.route_rule_id
;
8801 // Check for loopback exception
8802 if (necp_socket_bypass(override_local_addr
, override_remote_addr
, inp
)) {
8803 allowed_to_receive
= TRUE
;
8807 // Actually calculate policy result
8808 lck_rw_lock_shared(&necp_kernel_policy_lock
);
8809 necp_socket_fillout_info_locked(inp
, override_local_addr
, override_remote_addr
, 0, &info
);
8811 flowhash
= necp_socket_calc_flowhash_locked(&info
);
8812 if (inp
->inp_policyresult
.policy_id
!= NECP_KERNEL_POLICY_ID_NONE
&&
8813 inp
->inp_policyresult
.policy_gencount
== necp_kernel_socket_policies_gencount
&&
8814 inp
->inp_policyresult
.flowhash
== flowhash
) {
8815 if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_DROP
||
8816 inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
||
8817 (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&& interface
&&
8818 inp
->inp_policyresult
.results
.result_parameter
.tunnel_interface_index
!= verifyifindex
) ||
8819 (inp
->inp_policyresult
.results
.route_rule_id
!= 0 &&
8820 !necp_route_is_allowed(route
, interface
, inp
->inp_policyresult
.results
.route_rule_id
, &interface_type_denied
))) {
8821 allowed_to_receive
= FALSE
;
8823 if (return_policy_id
) {
8824 *return_policy_id
= inp
->inp_policyresult
.policy_id
;
8826 if (return_route_rule_id
) {
8827 *return_route_rule_id
= inp
->inp_policyresult
.results
.route_rule_id
;
8829 if (return_skip_policy_id
) {
8830 *return_skip_policy_id
= inp
->inp_policyresult
.skip_policy_id
;
8833 lck_rw_done(&necp_kernel_policy_lock
);
8837 struct necp_kernel_socket_policy
*matched_policy
= necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_map
[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(info
.application_id
)], &info
, NULL
, &route_rule_id
, &service_action
, &service
, netagent_ids
, NULL
, NECP_MAX_NETAGENTS
, NULL
, 0, current_proc(), return_skip_policy_id
);
8838 if (matched_policy
!= NULL
) {
8839 if (matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_DROP
||
8840 matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
||
8841 (matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&& interface
&&
8842 matched_policy
->result_parameter
.tunnel_interface_index
!= verifyifindex
) ||
8843 ((service_action
== NECP_KERNEL_POLICY_RESULT_TRIGGER_SCOPED
||
8844 service_action
== NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED
) &&
8845 service
.identifier
!= 0 && service
.identifier
!= NECP_NULL_SERVICE_ID
) ||
8846 (route_rule_id
!= 0 &&
8847 !necp_route_is_allowed(route
, interface
, route_rule_id
, &interface_type_denied
)) ||
8848 !necp_netagents_allow_traffic(netagent_ids
, NECP_MAX_NETAGENTS
)) {
8849 allowed_to_receive
= FALSE
;
8851 if (return_policy_id
) {
8852 *return_policy_id
= matched_policy
->id
;
8854 if (return_route_rule_id
) {
8855 *return_route_rule_id
= route_rule_id
;
8858 lck_rw_done(&necp_kernel_policy_lock
);
8860 if (necp_debug
> 1 && matched_policy
->id
!= inp
->inp_policyresult
.policy_id
) {
8861 NECPLOG(LOG_DEBUG
, "Socket Send/Recv Policy: Policy %d Allowed %d", return_policy_id
? *return_policy_id
: 0, allowed_to_receive
);
8864 } else if (necp_drop_all_order
> 0) {
8865 allowed_to_receive
= FALSE
;
8867 if (return_policy_id
) {
8868 *return_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8870 if (return_route_rule_id
) {
8871 *return_route_rule_id
= route_rule_id
;
8875 lck_rw_done(&necp_kernel_policy_lock
);
8878 if (!allowed_to_receive
&& interface_type_denied
!= IFRTYPE_FUNCTIONAL_UNKNOWN
) {
8879 soevent(inp
->inp_socket
, (SO_FILT_HINT_LOCKED
| SO_FILT_HINT_IFDENIED
));
8882 return (allowed_to_receive
);
8886 necp_socket_is_allowed_to_send_recv_v4(struct inpcb
*inp
, u_int16_t local_port
, u_int16_t remote_port
, struct in_addr
*local_addr
, struct in_addr
*remote_addr
, ifnet_t interface
, necp_kernel_policy_id
*return_policy_id
, u_int32_t
*return_route_rule_id
, necp_kernel_policy_id
*return_skip_policy_id
)
8888 struct sockaddr_in local
= {};
8889 struct sockaddr_in remote
= {};
8890 local
.sin_family
= remote
.sin_family
= AF_INET
;
8891 local
.sin_len
= remote
.sin_len
= sizeof(struct sockaddr_in
);
8892 local
.sin_port
= local_port
;
8893 remote
.sin_port
= remote_port
;
8894 memcpy(&local
.sin_addr
, local_addr
, sizeof(local
.sin_addr
));
8895 memcpy(&remote
.sin_addr
, remote_addr
, sizeof(remote
.sin_addr
));
8897 return (necp_socket_is_allowed_to_send_recv_internal(inp
, (struct sockaddr
*)&local
, (struct sockaddr
*)&remote
, interface
,
8898 return_policy_id
, return_route_rule_id
, return_skip_policy_id
));
8902 necp_socket_is_allowed_to_send_recv_v6(struct inpcb
*inp
, u_int16_t local_port
, u_int16_t remote_port
, struct in6_addr
*local_addr
, struct in6_addr
*remote_addr
, ifnet_t interface
, necp_kernel_policy_id
*return_policy_id
, u_int32_t
*return_route_rule_id
, necp_kernel_policy_id
*return_skip_policy_id
)
8904 struct sockaddr_in6 local
= {};
8905 struct sockaddr_in6 remote
= {};
8906 local
.sin6_family
= remote
.sin6_family
= AF_INET6
;
8907 local
.sin6_len
= remote
.sin6_len
= sizeof(struct sockaddr_in6
);
8908 local
.sin6_port
= local_port
;
8909 remote
.sin6_port
= remote_port
;
8910 memcpy(&local
.sin6_addr
, local_addr
, sizeof(local
.sin6_addr
));
8911 memcpy(&remote
.sin6_addr
, remote_addr
, sizeof(remote
.sin6_addr
));
8913 return (necp_socket_is_allowed_to_send_recv_internal(inp
, (struct sockaddr
*)&local
, (struct sockaddr
*)&remote
, interface
,
8914 return_policy_id
, return_route_rule_id
, return_skip_policy_id
));
8918 necp_socket_is_allowed_to_send_recv(struct inpcb
*inp
, necp_kernel_policy_id
*return_policy_id
, u_int32_t
*return_route_rule_id
,
8919 necp_kernel_policy_id
*return_skip_policy_id
)
8921 return (necp_socket_is_allowed_to_send_recv_internal(inp
, NULL
, NULL
, NULL
, return_policy_id
, return_route_rule_id
, return_skip_policy_id
));
8925 necp_mark_packet_from_socket(struct mbuf
*packet
, struct inpcb
*inp
, necp_kernel_policy_id policy_id
, u_int32_t route_rule_id
,
8926 necp_kernel_policy_id skip_policy_id
)
8928 if (packet
== NULL
|| inp
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
8932 // Mark ID for Pass and IP Tunnel
8933 if (policy_id
!= NECP_KERNEL_POLICY_ID_NONE
) {
8934 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= policy_id
;
8935 } else if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_PASS
||
8936 inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
) {
8937 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= inp
->inp_policyresult
.policy_id
;
8939 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
8941 packet
->m_pkthdr
.necp_mtag
.necp_last_interface_index
= 0;
8942 if (route_rule_id
!= 0) {
8943 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
= route_rule_id
;
8945 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
= inp
->inp_policyresult
.results
.route_rule_id
;
8947 packet
->m_pkthdr
.necp_mtag
.necp_app_id
= inp
->inp_policyresult
.app_id
;
8949 if (skip_policy_id
!= NECP_KERNEL_POLICY_ID_NONE
) {
8950 packet
->m_pkthdr
.necp_mtag
.necp_skip_policy_id
= skip_policy_id
;
8957 necp_mark_packet_from_ip(struct mbuf
*packet
, necp_kernel_policy_id policy_id
)
8959 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
8963 // Mark ID for Pass and IP Tunnel
8964 if (policy_id
!= NECP_KERNEL_POLICY_ID_NONE
) {
8965 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= policy_id
;
8967 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
8974 necp_mark_packet_from_interface(struct mbuf
*packet
, ifnet_t interface
)
8976 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
8980 // Mark ID for Pass and IP Tunnel
8981 if (interface
!= NULL
) {
8982 packet
->m_pkthdr
.necp_mtag
.necp_last_interface_index
= interface
->if_index
;
8989 necp_mark_packet_as_keepalive(struct mbuf
*packet
, bool is_keepalive
)
8991 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
8996 packet
->m_pkthdr
.pkt_flags
|= PKTF_KEEPALIVE
;
8998 packet
->m_pkthdr
.pkt_flags
&= ~PKTF_KEEPALIVE
;
9004 necp_kernel_policy_id
9005 necp_get_policy_id_from_packet(struct mbuf
*packet
)
9007 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
9008 return (NECP_KERNEL_POLICY_ID_NONE
);
9011 return (packet
->m_pkthdr
.necp_mtag
.necp_policy_id
);
9014 necp_kernel_policy_id
9015 necp_get_skip_policy_id_from_packet(struct mbuf
*packet
)
9017 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
9018 return (NECP_KERNEL_POLICY_ID_NONE
);
9021 return (packet
->m_pkthdr
.necp_mtag
.necp_skip_policy_id
);
9025 necp_get_last_interface_index_from_packet(struct mbuf
*packet
)
9027 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
9031 return (packet
->m_pkthdr
.necp_mtag
.necp_last_interface_index
);
9035 necp_get_route_rule_id_from_packet(struct mbuf
*packet
)
9037 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
9041 return (packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
);
9045 necp_get_app_uuid_from_packet(struct mbuf
*packet
,
9048 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
9052 bool found_mapping
= FALSE
;
9053 if (packet
->m_pkthdr
.necp_mtag
.necp_app_id
!= 0) {
9054 lck_rw_lock_shared(&necp_kernel_policy_lock
);
9055 struct necp_uuid_id_mapping
*entry
= necp_uuid_lookup_uuid_with_app_id_locked(packet
->m_pkthdr
.necp_mtag
.necp_app_id
);
9056 if (entry
!= NULL
) {
9057 uuid_copy(app_uuid
, entry
->uuid
);
9058 found_mapping
= true;
9060 lck_rw_done(&necp_kernel_policy_lock
);
9062 if (!found_mapping
) {
9063 uuid_clear(app_uuid
);
9069 necp_get_is_keepalive_from_packet(struct mbuf
*packet
)
9071 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
9075 return (packet
->m_pkthdr
.pkt_flags
& PKTF_KEEPALIVE
);
9079 necp_socket_get_content_filter_control_unit(struct socket
*so
)
9081 struct inpcb
*inp
= sotoinpcb(so
);
9086 return (inp
->inp_policyresult
.results
.filter_control_unit
);
9090 necp_socket_should_use_flow_divert(struct inpcb
*inp
)
9096 return (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
);
9100 necp_socket_get_flow_divert_control_unit(struct inpcb
*inp
)
9106 if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
) {
9107 return (inp
->inp_policyresult
.results
.result_parameter
.flow_divert_control_unit
);
9114 necp_socket_should_rescope(struct inpcb
*inp
)
9120 return (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED
||
9121 inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SCOPED_DIRECT
);
9125 necp_socket_get_rescope_if_index(struct inpcb
*inp
)
9131 if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED
) {
9132 return (inp
->inp_policyresult
.results
.result_parameter
.scoped_interface_index
);
9133 } else if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SCOPED_DIRECT
) {
9134 return (necp_get_primary_direct_interface_index());
9141 necp_socket_get_effective_mtu(struct inpcb
*inp
, u_int32_t current_mtu
)
9144 return (current_mtu
);
9147 if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&&
9148 (inp
->inp_flags
& INP_BOUND_IF
) &&
9149 inp
->inp_boundifp
) {
9151 u_int bound_interface_index
= inp
->inp_boundifp
->if_index
;
9152 u_int tunnel_interface_index
= inp
->inp_policyresult
.results
.result_parameter
.tunnel_interface_index
;
9154 // The result is IP Tunnel, and is rescoping from one interface to another. Recalculate MTU.
9155 if (bound_interface_index
!= tunnel_interface_index
) {
9156 ifnet_t tunnel_interface
= NULL
;
9158 ifnet_head_lock_shared();
9159 tunnel_interface
= ifindex2ifnet
[tunnel_interface_index
];
9162 if (tunnel_interface
!= NULL
) {
9163 u_int32_t direct_tunnel_mtu
= tunnel_interface
->if_mtu
;
9164 u_int32_t delegate_tunnel_mtu
= (tunnel_interface
->if_delegated
.ifp
!= NULL
) ? tunnel_interface
->if_delegated
.ifp
->if_mtu
: 0;
9165 if (delegate_tunnel_mtu
!= 0 &&
9166 strncmp(tunnel_interface
->if_name
, "ipsec", strlen("ipsec")) == 0) {
9167 // For ipsec interfaces, calculate the overhead from the delegate interface
9168 u_int32_t tunnel_overhead
= (u_int32_t
)(esp_hdrsiz(NULL
) + sizeof(struct ip6_hdr
));
9169 if (delegate_tunnel_mtu
> tunnel_overhead
) {
9170 delegate_tunnel_mtu
-= tunnel_overhead
;
9173 if (delegate_tunnel_mtu
< direct_tunnel_mtu
) {
9174 // If the (delegate - overhead) < direct, return (delegate - overhead)
9175 return (delegate_tunnel_mtu
);
9177 // Otherwise return direct
9178 return (direct_tunnel_mtu
);
9181 // For non-ipsec interfaces, just return the tunnel MTU
9182 return (direct_tunnel_mtu
);
9188 // By default, just return the MTU passed in
9189 return (current_mtu
);
9193 necp_get_ifnet_from_result_parameter(necp_kernel_policy_result_parameter
*result_parameter
)
9195 if (result_parameter
== NULL
) {
9199 return (ifindex2ifnet
[result_parameter
->tunnel_interface_index
]);
9203 necp_packet_can_rebind_to_ifnet(struct mbuf
*packet
, struct ifnet
*interface
, struct route
*new_route
, int family
)
9205 bool found_match
= FALSE
;
9207 ifaddr_t
*addresses
= NULL
;
9208 union necp_sockaddr_union address_storage
;
9211 if (packet
== NULL
|| interface
== NULL
|| new_route
== NULL
|| (family
!= AF_INET
&& family
!= AF_INET6
)) {
9215 result
= ifnet_get_address_list_family(interface
, &addresses
, family
);
9217 NECPLOG(LOG_ERR
, "Failed to get address list for %s%d", ifnet_name(interface
), ifnet_unit(interface
));
9221 for (i
= 0; addresses
[i
] != NULL
; i
++) {
9222 ROUTE_RELEASE(new_route
);
9223 if (ifaddr_address(addresses
[i
], &address_storage
.sa
, sizeof(address_storage
)) == 0) {
9224 if (family
== AF_INET
) {
9225 struct ip
*ip
= mtod(packet
, struct ip
*);
9226 if (memcmp(&address_storage
.sin
.sin_addr
, &ip
->ip_src
, sizeof(ip
->ip_src
)) == 0) {
9227 struct sockaddr_in
*dst4
= (struct sockaddr_in
*)(void *)&new_route
->ro_dst
;
9228 dst4
->sin_family
= AF_INET
;
9229 dst4
->sin_len
= sizeof(struct sockaddr_in
);
9230 dst4
->sin_addr
= ip
->ip_dst
;
9231 rtalloc_scoped(new_route
, interface
->if_index
);
9232 if (!ROUTE_UNUSABLE(new_route
)) {
9237 } else if (family
== AF_INET6
) {
9238 struct ip6_hdr
*ip6
= mtod(packet
, struct ip6_hdr
*);
9239 if (memcmp(&address_storage
.sin6
.sin6_addr
, &ip6
->ip6_src
, sizeof(ip6
->ip6_src
)) == 0) {
9240 struct sockaddr_in6
*dst6
= (struct sockaddr_in6
*)(void *)&new_route
->ro_dst
;
9241 dst6
->sin6_family
= AF_INET6
;
9242 dst6
->sin6_len
= sizeof(struct sockaddr_in6
);
9243 dst6
->sin6_addr
= ip6
->ip6_dst
;
9244 rtalloc_scoped(new_route
, interface
->if_index
);
9245 if (!ROUTE_UNUSABLE(new_route
)) {
9255 ifnet_free_address_list(addresses
);
9257 return (found_match
);
9261 necp_addr_is_loopback(struct sockaddr
*address
)
9263 if (address
== NULL
) {
9267 if (address
->sa_family
== AF_INET
) {
9268 return (ntohl(((struct sockaddr_in
*)(void *)address
)->sin_addr
.s_addr
) == INADDR_LOOPBACK
);
9269 } else if (address
->sa_family
== AF_INET6
) {
9270 return IN6_IS_ADDR_LOOPBACK(&((struct sockaddr_in6
*)(void *)address
)->sin6_addr
);
9277 necp_is_loopback(struct sockaddr
*local_addr
, struct sockaddr
*remote_addr
, struct inpcb
*inp
, struct mbuf
*packet
)
9279 // Note: This function only checks for the loopback addresses.
9280 // In the future, we may want to expand to also allow any traffic
9281 // going through the loopback interface, but until then, this
9282 // check is cheaper.
9284 if (local_addr
!= NULL
&& necp_addr_is_loopback(local_addr
)) {
9288 if (remote_addr
!= NULL
&& necp_addr_is_loopback(remote_addr
)) {
9293 if ((inp
->inp_flags
& INP_BOUND_IF
) && inp
->inp_boundifp
&& (inp
->inp_boundifp
->if_flags
& IFF_LOOPBACK
)) {
9296 if (inp
->inp_vflag
& INP_IPV4
) {
9297 if (ntohl(inp
->inp_laddr
.s_addr
) == INADDR_LOOPBACK
||
9298 ntohl(inp
->inp_faddr
.s_addr
) == INADDR_LOOPBACK
) {
9301 } else if (inp
->inp_vflag
& INP_IPV6
) {
9302 if (IN6_IS_ADDR_LOOPBACK(&inp
->in6p_laddr
) ||
9303 IN6_IS_ADDR_LOOPBACK(&inp
->in6p_faddr
)) {
9309 if (packet
!= NULL
) {
9310 struct ip
*ip
= mtod(packet
, struct ip
*);
9311 if (ip
->ip_v
== 4) {
9312 if (ntohl(ip
->ip_src
.s_addr
) == INADDR_LOOPBACK
) {
9315 if (ntohl(ip
->ip_dst
.s_addr
) == INADDR_LOOPBACK
) {
9318 } else if (ip
->ip_v
== 6) {
9319 struct ip6_hdr
*ip6
= mtod(packet
, struct ip6_hdr
*);
9320 if (IN6_IS_ADDR_LOOPBACK(&ip6
->ip6_src
)) {
9323 if (IN6_IS_ADDR_LOOPBACK(&ip6
->ip6_dst
)) {
9333 necp_is_intcoproc(struct inpcb
*inp
, struct mbuf
*packet
)
9337 return (sflt_permission_check(inp
) ? true : false);
9339 if (packet
!= NULL
) {
9340 struct ip6_hdr
*ip6
= mtod(packet
, struct ip6_hdr
*);
9341 if ((ip6
->ip6_vfc
& IPV6_VERSION_MASK
) == IPV6_VERSION
&&
9342 IN6_IS_ADDR_LINKLOCAL(&ip6
->ip6_dst
) &&
9343 ip6
->ip6_dst
.s6_addr32
[2] == ntohl(0xaede48ff) &&
9344 ip6
->ip6_dst
.s6_addr32
[3] == ntohl(0xfe334455)) {