2 * Copyright (c) 2013-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <sys/systm.h>
31 #include <sys/types.h>
32 #include <sys/queue.h>
33 #include <sys/malloc.h>
34 #include <libkern/OSMalloc.h>
35 #include <sys/kernel.h>
36 #include <sys/kern_control.h>
38 #include <sys/kpi_mbuf.h>
39 #include <sys/proc_uuid_policy.h>
41 #include <sys/domain.h>
42 #include <sys/protosw.h>
43 #include <sys/socket.h>
44 #include <sys/socketvar.h>
45 #include <sys/coalition.h>
47 #include <sys/codesign.h>
48 #include <kern/cs_blobs.h>
49 #include <netinet/ip.h>
50 #include <netinet/ip6.h>
51 #include <netinet/tcp.h>
52 #include <netinet/tcp_var.h>
53 #include <netinet/tcp_cache.h>
54 #include <netinet/udp.h>
55 #include <netinet/in_pcb.h>
56 #include <netinet/in_tclass.h>
57 #include <netinet6/esp.h>
58 #include <net/flowhash.h>
59 #include <net/if_var.h>
60 #include <sys/kauth.h>
61 #include <sys/sysctl.h>
62 #include <sys/sysproto.h>
64 #include <sys/kern_event.h>
65 #include <sys/file_internal.h>
66 #include <IOKit/IOBSD.h>
67 #include <libkern/crypto/rand.h>
68 #include <corecrypto/cchmac.h>
69 #include <corecrypto/ccsha2.h>
70 #include <os/refcnt.h>
71 #include <net/network_agent.h>
75 * NECP - Network Extension Control Policy database
76 * ------------------------------------------------
77 * The goal of this module is to allow clients connecting via a
78 * policy file descriptor to create high-level policy sessions, which
79 * are ingested into low-level kernel policies that control and tag
80 * traffic at the application, socket, and IP layers.
82 * ------------------------------------------------
84 * ------------------------------------------------
85 * Each session owns a list of session policies, each of which can
86 * specify any combination of conditions and a single result. Each
87 * session also has a priority level (such as High, Default, or Low)
88 * which is requested by the client. Based on the requested level,
89 * a session order value is assigned to the session, which will be used
90 * to sort kernel policies generated by the session. The session client
91 * can specify the sub-order for each policy it creates which will be
92 * used to further sort the kernel policies.
94 * Policy fd --> 1 necp_session --> list of necp_session_policy structs
96 * ------------------------------------------------
98 * ------------------------------------------------
99 * Whenever a session send the Apply command, its policies are ingested
100 * and generate kernel policies. There are two phases of kernel policy
103 * 1. The session policy is parsed to create kernel policies at the socket
104 * and IP layers, when applicable. For example, a policy that requires
105 * all traffic from App1 to Pass will generate a socket kernel policy to
106 * match App1 and mark packets with ID1, and also an IP policy to match
107 * ID1 and let the packet pass. This is handled in necp_apply_policy. The
108 * resulting kernel policies are added to the global socket and IP layer
110 * necp_session_policy --> necp_kernel_socket_policy and necp_kernel_ip_output_policy
113 * necp_kernel_socket_policies necp_kernel_ip_output_policies
115 * 2. Once the global lists of kernel policies have been filled out, each
116 * list is traversed to create optimized sub-lists ("Maps") which are used during
117 * data-path evaluation. IP policies are sent into necp_kernel_ip_output_policies_map,
118 * which hashes incoming packets based on marked socket-layer policies, and removes
119 * duplicate or overlapping policies. Socket policies are sent into two maps,
120 * necp_kernel_socket_policies_map and necp_kernel_socket_policies_app_layer_map.
121 * The app layer map is used for policy checks coming in from user space, and is one
122 * list with duplicate and overlapping policies removed. The socket map hashes based
123 * on app UUID, and removes duplicate and overlapping policies.
124 * necp_kernel_socket_policy --> necp_kernel_socket_policies_app_layer_map
125 * |-> necp_kernel_socket_policies_map
127 * necp_kernel_ip_output_policies --> necp_kernel_ip_output_policies_map
129 * ------------------------------------------------
131 * ------------------------------------------------
132 * The Drop All Level is a sysctl that controls the level at which policies are allowed
133 * to override a global drop rule. If the value is 0, no drop rule is applied. If the value
134 * is 1, all traffic is dropped. If the value is greater than 1, all kernel policies created
135 * by a session with a priority level better than (numerically less than) the
136 * Drop All Level will allow matching traffic to not be dropped. The Drop All Level is
137 * dynamically interpreted into necp_drop_all_order, which specifies the equivalent assigned
138 * session orders to be dropped.
141 u_int32_t necp_drop_all_order
= 0;
142 u_int32_t necp_drop_all_level
= 0;
144 u_int32_t necp_pass_loopback
= 1; // 0=Off, 1=On
145 u_int32_t necp_pass_keepalives
= 1; // 0=Off, 1=On
146 u_int32_t necp_pass_interpose
= 1; // 0=Off, 1=On
148 u_int32_t necp_drop_unentitled_order
= 0;
149 #ifdef XNU_TARGET_OS_WATCH
150 u_int32_t necp_drop_unentitled_level
= NECP_SESSION_PRIORITY_CONTROL
+ 1; // Block all unentitled traffic from policies below control level
151 #else // XNU_TARGET_OS_WATCH
152 u_int32_t necp_drop_unentitled_level
= 0;
153 #endif // XNU_TARGET_OS_WATCH
155 u_int32_t necp_debug
= 0; // 0=None, 1=Basic, 2=EveryMatch
157 u_int32_t necp_session_count
= 0;
159 #define LIST_INSERT_SORTED_ASCENDING(head, elm, field, sortfield, tmpelm) do { \
160 if (LIST_EMPTY((head)) || (LIST_FIRST(head)->sortfield >= (elm)->sortfield)) { \
161 LIST_INSERT_HEAD((head), elm, field); \
163 LIST_FOREACH(tmpelm, head, field) { \
164 if (LIST_NEXT(tmpelm, field) == NULL || LIST_NEXT(tmpelm, field)->sortfield >= (elm)->sortfield) { \
165 LIST_INSERT_AFTER(tmpelm, elm, field); \
172 #define LIST_INSERT_SORTED_TWICE_ASCENDING(head, elm, field, firstsortfield, secondsortfield, tmpelm) do { \
173 if (LIST_EMPTY((head)) || (LIST_FIRST(head)->firstsortfield > (elm)->firstsortfield) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield >= (elm)->secondsortfield))) { \
174 LIST_INSERT_HEAD((head), elm, field); \
176 LIST_FOREACH(tmpelm, head, field) { \
177 if (LIST_NEXT(tmpelm, field) == NULL || (LIST_NEXT(tmpelm, field)->firstsortfield > (elm)->firstsortfield) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield >= (elm)->secondsortfield))) { \
178 LIST_INSERT_AFTER(tmpelm, elm, field); \
185 #define LIST_INSERT_SORTED_THRICE_ASCENDING(head, elm, field, firstsortfield, secondsortfield, thirdsortfield, tmpelm) do { \
186 if (LIST_EMPTY((head)) || (LIST_FIRST(head)->firstsortfield > (elm)->firstsortfield) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield >= (elm)->secondsortfield)) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield == (elm)->secondsortfield) && (LIST_FIRST(head)->thirdsortfield >= (elm)->thirdsortfield))) { \
187 LIST_INSERT_HEAD((head), elm, field); \
189 LIST_FOREACH(tmpelm, head, field) { \
190 if (LIST_NEXT(tmpelm, field) == NULL || (LIST_NEXT(tmpelm, field)->firstsortfield > (elm)->firstsortfield) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield >= (elm)->secondsortfield)) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield == (elm)->secondsortfield) && (LIST_NEXT(tmpelm, field)->thirdsortfield >= (elm)->thirdsortfield))) { \
191 LIST_INSERT_AFTER(tmpelm, elm, field); \
198 #define IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(x) ((x) == NECP_ROUTE_RULE_DENY_INTERFACE || (x) == NECP_ROUTE_RULE_ALLOW_INTERFACE)
200 #define IS_NECP_DEST_IN_LOCAL_NETWORKS(rt) \
201 ((rt) != NULL && !((rt)->rt_flags & RTF_GATEWAY) && ((rt)->rt_ifa && (rt)->rt_ifa->ifa_ifp && !((rt)->rt_ifa->ifa_ifp->if_flags & IFF_POINTOPOINT)))
203 #define NECP_KERNEL_CONDITION_ALL_INTERFACES 0x000001
204 #define NECP_KERNEL_CONDITION_BOUND_INTERFACE 0x000002
205 #define NECP_KERNEL_CONDITION_PROTOCOL 0x000004
206 #define NECP_KERNEL_CONDITION_LOCAL_START 0x000008
207 #define NECP_KERNEL_CONDITION_LOCAL_END 0x000010
208 #define NECP_KERNEL_CONDITION_LOCAL_PREFIX 0x000020
209 #define NECP_KERNEL_CONDITION_REMOTE_START 0x000040
210 #define NECP_KERNEL_CONDITION_REMOTE_END 0x000080
211 #define NECP_KERNEL_CONDITION_REMOTE_PREFIX 0x000100
212 #define NECP_KERNEL_CONDITION_APP_ID 0x000200
213 #define NECP_KERNEL_CONDITION_REAL_APP_ID 0x000400
214 #define NECP_KERNEL_CONDITION_DOMAIN 0x000800
215 #define NECP_KERNEL_CONDITION_ACCOUNT_ID 0x001000
216 #define NECP_KERNEL_CONDITION_POLICY_ID 0x002000
217 #define NECP_KERNEL_CONDITION_PID 0x004000
218 #define NECP_KERNEL_CONDITION_UID 0x008000
219 #define NECP_KERNEL_CONDITION_LAST_INTERFACE 0x010000 // Only set from packets looping between interfaces
220 #define NECP_KERNEL_CONDITION_TRAFFIC_CLASS 0x020000
221 #define NECP_KERNEL_CONDITION_ENTITLEMENT 0x040000
222 #define NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT 0x080000
223 #define NECP_KERNEL_CONDITION_AGENT_TYPE 0x100000
224 #define NECP_KERNEL_CONDITION_HAS_CLIENT 0x200000
225 #define NECP_KERNEL_CONDITION_LOCAL_NETWORKS 0x400000
226 #define NECP_KERNEL_CONDITION_CLIENT_FLAGS 0x800000
227 #define NECP_KERNEL_CONDITION_LOCAL_EMPTY 0x1000000
228 #define NECP_KERNEL_CONDITION_REMOTE_EMPTY 0x2000000
229 #define NECP_KERNEL_CONDITION_PLATFORM_BINARY 0x4000000
231 #define NECP_MAX_POLICY_RESULT_SIZE 512
232 #define NECP_MAX_ROUTE_RULES_ARRAY_SIZE 1024
233 #define NECP_MAX_CONDITIONS_ARRAY_SIZE 4096
234 #define NECP_MAX_POLICY_LIST_COUNT 1024
236 // Cap the policy size at the max result + conditions size, with room for extra TLVs
237 #define NECP_MAX_POLICY_SIZE (1024 + NECP_MAX_POLICY_RESULT_SIZE + NECP_MAX_CONDITIONS_ARRAY_SIZE)
239 struct necp_service_registration
{
240 LIST_ENTRY(necp_service_registration
) session_chain
;
241 LIST_ENTRY(necp_service_registration
) kernel_chain
;
242 u_int32_t service_id
;
245 struct necp_session
{
246 u_int8_t necp_fd_type
;
247 u_int32_t control_unit
;
248 u_int32_t session_priority
; // Descriptive priority rating
249 u_int32_t session_order
;
251 necp_policy_id last_policy_id
;
253 decl_lck_mtx_data(, lock
);
255 bool proc_locked
; // Messages must come from proc_uuid
260 LIST_HEAD(_policies
, necp_session_policy
) policies
;
262 LIST_HEAD(_services
, necp_service_registration
) services
;
264 TAILQ_ENTRY(necp_session
) chain
;
267 #define NECP_SESSION_LOCK(_s) lck_mtx_lock(&_s->lock)
268 #define NECP_SESSION_UNLOCK(_s) lck_mtx_unlock(&_s->lock)
270 static TAILQ_HEAD(_necp_session_list
, necp_session
) necp_session_list
;
272 struct necp_socket_info
{
275 union necp_sockaddr_union local_addr
;
276 union necp_sockaddr_union remote_addr
;
277 u_int32_t bound_interface_index
;
278 u_int32_t traffic_class
;
280 u_int32_t application_id
;
281 u_int32_t real_application_id
;
282 u_int32_t account_id
;
283 u_int32_t drop_order
;
284 u_int32_t client_flags
;
287 unsigned has_client
: 1;
288 unsigned is_platform_binary
: 1;
289 unsigned used_responsible_pid
: 1;
290 unsigned __pad_bits
: 5;
293 static lck_grp_attr_t
*necp_kernel_policy_grp_attr
= NULL
;
294 static lck_attr_t
*necp_kernel_policy_mtx_attr
= NULL
;
295 static lck_grp_t
*necp_kernel_policy_mtx_grp
= NULL
;
296 decl_lck_rw_data(static, necp_kernel_policy_lock
);
298 static lck_grp_attr_t
*necp_route_rule_grp_attr
= NULL
;
299 static lck_attr_t
*necp_route_rule_mtx_attr
= NULL
;
300 static lck_grp_t
*necp_route_rule_mtx_grp
= NULL
;
301 decl_lck_rw_data(static, necp_route_rule_lock
);
303 os_refgrp_decl(static, necp_refgrp
, "NECPRefGroup", NULL
);
306 * On modification, invalidate cached lookups by bumping the generation count.
307 * Other calls will need to take the slowpath of taking
308 * the subsystem lock.
310 static volatile int32_t necp_kernel_socket_policies_gencount
;
311 #define BUMP_KERNEL_SOCKET_POLICIES_GENERATION_COUNT() do { \
312 if (OSIncrementAtomic(&necp_kernel_socket_policies_gencount) == (INT32_MAX - 1)) { \
313 necp_kernel_socket_policies_gencount = 1; \
319 * Allow priviledged processes to bypass the default drop-all
320 * via entitlement check. For OSX, since entitlement check is
321 * not supported for configd, configd signing identity is checked
324 #define SIGNING_ID_CONFIGD "com.apple.configd"
325 #define SIGNING_ID_CONFIGD_LEN (sizeof(SIGNING_ID_CONFIGD) - 1)
328 NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
= 0,
329 NECP_DROP_ALL_BYPASS_CHECK_RESULT_TRUE
= 1,
330 NECP_DROP_ALL_BYPASS_CHECK_RESULT_FALSE
= 2,
331 } necp_drop_all_bypass_check_result_t
;
333 static u_int32_t necp_kernel_application_policies_condition_mask
;
334 static size_t necp_kernel_application_policies_count
;
335 static u_int32_t necp_kernel_socket_policies_condition_mask
;
336 static size_t necp_kernel_socket_policies_count
;
337 static size_t necp_kernel_socket_policies_non_app_count
;
338 static LIST_HEAD(_necpkernelsocketconnectpolicies
, necp_kernel_socket_policy
) necp_kernel_socket_policies
;
339 #define NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS 5
340 #define NECP_SOCKET_MAP_APP_ID_TO_BUCKET(appid) (appid ? (appid%(NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS - 1) + 1) : 0)
341 static struct necp_kernel_socket_policy
**necp_kernel_socket_policies_map
[NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
];
342 static struct necp_kernel_socket_policy
**necp_kernel_socket_policies_app_layer_map
;
344 * A note on policy 'maps': these are used for boosting efficiency when matching policies. For each dimension of the map,
345 * such as an ID, the 0 bucket is reserved for sockets/packets that do not have this parameter, while the other
346 * buckets lead to an array of policy pointers that form the list applicable when the (parameter%(NUM_BUCKETS - 1) + 1) == bucket_index.
348 * For example, a packet with policy ID of 7, when there are 4 ID buckets, will map to bucket (7%3 + 1) = 2.
351 static u_int32_t necp_kernel_ip_output_policies_condition_mask
;
352 static size_t necp_kernel_ip_output_policies_count
;
353 static size_t necp_kernel_ip_output_policies_non_id_count
;
354 static LIST_HEAD(_necpkernelipoutputpolicies
, necp_kernel_ip_output_policy
) necp_kernel_ip_output_policies
;
355 #define NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS 5
356 #define NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(id) (id ? (id%(NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS - 1) + 1) : 0)
357 static struct necp_kernel_ip_output_policy
**necp_kernel_ip_output_policies_map
[NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
];
358 static struct necp_kernel_socket_policy pass_policy
=
360 .id
= NECP_KERNEL_POLICY_ID_NO_MATCH
,
361 .result
= NECP_KERNEL_POLICY_RESULT_PASS
,
364 static struct necp_session
*necp_create_session(void);
365 static void necp_delete_session(struct necp_session
*session
);
367 static necp_policy_id
necp_handle_policy_add(struct necp_session
*session
,
368 u_int8_t
*tlv_buffer
, size_t tlv_buffer_length
, int offset
, int *error
);
369 static int necp_handle_policy_dump_all(user_addr_t out_buffer
, size_t out_buffer_length
);
371 #define MAX_RESULT_STRING_LEN 64
372 static inline const char * necp_get_result_description(char *result_string
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
);
374 static struct necp_session_policy
*necp_policy_create(struct necp_session
*session
, necp_policy_order order
, u_int8_t
*conditions_array
, u_int32_t conditions_array_size
, u_int8_t
*route_rules_array
, u_int32_t route_rules_array_size
, u_int8_t
*result
, u_int32_t result_size
);
375 static struct necp_session_policy
*necp_policy_find(struct necp_session
*session
, necp_policy_id policy_id
);
376 static bool necp_policy_mark_for_deletion(struct necp_session
*session
, struct necp_session_policy
*policy
);
377 static bool necp_policy_mark_all_for_deletion(struct necp_session
*session
);
378 static bool necp_policy_delete(struct necp_session
*session
, struct necp_session_policy
*policy
);
379 static void necp_policy_apply_all(struct necp_session
*session
);
381 static necp_kernel_policy_id
necp_kernel_socket_policy_add(necp_policy_order order
, u_int32_t session_order
, int session_pid
, u_int32_t condition_mask
, u_int32_t condition_negated_mask
, necp_app_id cond_app_id
, necp_app_id cond_real_app_id
, char *cond_custom_entitlement
, u_int32_t cond_account_id
, char *domain
, pid_t cond_pid
, uid_t cond_uid
, ifnet_t cond_bound_interface
, struct necp_policy_condition_tc_range cond_traffic_class
, u_int16_t cond_protocol
, union necp_sockaddr_union
*cond_local_start
, union necp_sockaddr_union
*cond_local_end
, u_int8_t cond_local_prefix
, union necp_sockaddr_union
*cond_remote_start
, union necp_sockaddr_union
*cond_remote_end
, u_int8_t cond_remote_prefix
, struct necp_policy_condition_agent_type
*cond_agent_type
, u_int32_t cond_client_flags
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
);
382 static bool necp_kernel_socket_policy_delete(necp_kernel_policy_id policy_id
);
383 static bool necp_kernel_socket_policies_reprocess(void);
384 static bool necp_kernel_socket_policies_update_uuid_table(void);
385 static inline struct necp_kernel_socket_policy
*necp_socket_find_policy_match_with_info_locked(struct necp_kernel_socket_policy
**policy_search_array
, struct necp_socket_info
*info
, necp_kernel_policy_filter
*return_filter
, u_int32_t
*return_route_rule_id_array
, size_t *return_route_rule_id_array_count
, size_t route_rule_id_array_count
, necp_kernel_policy_result
*return_service_action
, necp_kernel_policy_service
*return_service
, u_int32_t
*return_netagent_array
, u_int32_t
*return_netagent_use_flags_array
, size_t netagent_array_count
, struct necp_client_parameter_netagent_type
*required_agent_types
, u_int32_t num_required_agent_types
, proc_t proc
, necp_kernel_policy_id
*skip_policy_id
, struct rtentry
*rt
, necp_kernel_policy_result
*return_drop_dest_policy_result
, necp_drop_all_bypass_check_result_t
*return_drop_all_bypass
);
387 static necp_kernel_policy_id
necp_kernel_ip_output_policy_add(necp_policy_order order
, necp_policy_order suborder
, u_int32_t session_order
, int session_pid
, u_int32_t condition_mask
, u_int32_t condition_negated_mask
, necp_kernel_policy_id cond_policy_id
, ifnet_t cond_bound_interface
, u_int32_t cond_last_interface_index
, u_int16_t cond_protocol
, union necp_sockaddr_union
*cond_local_start
, union necp_sockaddr_union
*cond_local_end
, u_int8_t cond_local_prefix
, union necp_sockaddr_union
*cond_remote_start
, union necp_sockaddr_union
*cond_remote_end
, u_int8_t cond_remote_prefix
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
);
388 static bool necp_kernel_ip_output_policy_delete(necp_kernel_policy_id policy_id
);
389 static bool necp_kernel_ip_output_policies_reprocess(void);
391 static bool necp_is_addr_in_range(struct sockaddr
*addr
, struct sockaddr
*range_start
, struct sockaddr
*range_end
);
392 static bool necp_is_range_in_range(struct sockaddr
*inner_range_start
, struct sockaddr
*inner_range_end
, struct sockaddr
*range_start
, struct sockaddr
*range_end
);
393 static bool necp_is_addr_in_subnet(struct sockaddr
*addr
, struct sockaddr
*subnet_addr
, u_int8_t subnet_prefix
);
394 static int necp_addr_compare(struct sockaddr
*sa1
, struct sockaddr
*sa2
, int check_port
);
395 static bool necp_buffer_compare_with_bit_prefix(u_int8_t
*p1
, u_int8_t
*p2
, u_int32_t bits
);
396 static bool necp_addr_is_empty(struct sockaddr
*addr
);
397 static bool necp_is_loopback(struct sockaddr
*local_addr
, struct sockaddr
*remote_addr
, struct inpcb
*inp
, struct mbuf
*packet
, u_int32_t bound_interface_index
);
398 static bool necp_is_intcoproc(struct inpcb
*inp
, struct mbuf
*packet
);
400 struct necp_uuid_id_mapping
{
401 LIST_ENTRY(necp_uuid_id_mapping
) chain
;
404 os_refcnt_t refcount
;
405 u_int32_t table_usecount
; // Add to UUID policy table count
407 static size_t necp_num_uuid_app_id_mappings
;
408 static bool necp_uuid_app_id_mappings_dirty
;
409 #define NECP_UUID_APP_ID_HASH_SIZE 64
410 static u_long necp_uuid_app_id_hash_mask
;
411 static u_long necp_uuid_app_id_hash_num_buckets
;
412 static LIST_HEAD(necp_uuid_id_mapping_head
, necp_uuid_id_mapping
) * necp_uuid_app_id_hashtbl
, necp_uuid_service_id_list
; // App map is real hash table, service map is just mapping
413 #define APPUUIDHASH(uuid) (&necp_uuid_app_id_hashtbl[uuid[0] & necp_uuid_app_id_hash_mask]) // Assume first byte of UUIDs are evenly distributed
414 static u_int32_t
necp_create_uuid_app_id_mapping(uuid_t uuid
, bool *allocated_mapping
, bool uuid_policy_table
);
415 static bool necp_remove_uuid_app_id_mapping(uuid_t uuid
, bool *removed_mapping
, bool uuid_policy_table
);
416 static struct necp_uuid_id_mapping
*necp_uuid_lookup_uuid_with_app_id_locked(u_int32_t local_id
);
418 static struct necp_uuid_id_mapping
*necp_uuid_lookup_service_id_locked(uuid_t uuid
);
419 static struct necp_uuid_id_mapping
*necp_uuid_lookup_uuid_with_service_id_locked(u_int32_t local_id
);
420 static u_int32_t
necp_create_uuid_service_id_mapping(uuid_t uuid
);
421 static bool necp_remove_uuid_service_id_mapping(uuid_t uuid
);
423 struct necp_string_id_mapping
{
424 LIST_ENTRY(necp_string_id_mapping
) chain
;
427 os_refcnt_t refcount
;
429 static LIST_HEAD(necp_string_id_mapping_list
, necp_string_id_mapping
) necp_account_id_list
;
430 static u_int32_t
necp_create_string_to_id_mapping(struct necp_string_id_mapping_list
*list
, char *domain
);
431 static bool necp_remove_string_to_id_mapping(struct necp_string_id_mapping_list
*list
, char *domain
);
432 static struct necp_string_id_mapping
*necp_lookup_string_with_id_locked(struct necp_string_id_mapping_list
*list
, u_int32_t local_id
);
434 static struct necp_kernel_socket_policy
*necp_kernel_socket_policy_find(necp_kernel_policy_id policy_id
);
435 static struct necp_kernel_ip_output_policy
*necp_kernel_ip_output_policy_find(necp_kernel_policy_id policy_id
);
437 static LIST_HEAD(_necp_kernel_service_list
, necp_service_registration
) necp_registered_service_list
;
439 static char *necp_create_trimmed_domain(char *string
, size_t length
);
440 static inline int necp_count_dots(char *string
, size_t length
);
442 static char *necp_copy_string(char *string
, size_t length
);
443 static bool necp_update_qos_marking(struct ifnet
*ifp
, u_int32_t route_rule_id
);
445 #define ROUTE_RULE_IS_AGGREGATE(ruleid) (ruleid > UINT16_MAX)
447 #define MAX_ROUTE_RULE_INTERFACES 10
448 struct necp_route_rule
{
449 LIST_ENTRY(necp_route_rule
) chain
;
451 u_int32_t default_action
;
452 u_int8_t cellular_action
;
453 u_int8_t wifi_action
;
454 u_int8_t wired_action
;
455 u_int8_t expensive_action
;
456 u_int8_t constrained_action
;
457 u_int exception_if_indices
[MAX_ROUTE_RULE_INTERFACES
];
458 u_int8_t exception_if_actions
[MAX_ROUTE_RULE_INTERFACES
];
459 os_refcnt_t refcount
;
461 static LIST_HEAD(necp_route_rule_list
, necp_route_rule
) necp_route_rules
;
462 static u_int32_t
necp_create_route_rule(struct necp_route_rule_list
*list
, u_int8_t
*route_rules_array
, u_int32_t route_rules_array_size
);
463 static bool necp_remove_route_rule(struct necp_route_rule_list
*list
, u_int32_t route_rule_id
);
464 static bool necp_route_is_allowed(struct rtentry
*route
, ifnet_t interface
, u_int32_t route_rule_id
, u_int32_t
*interface_type_denied
);
465 static struct necp_route_rule
*necp_lookup_route_rule_locked(struct necp_route_rule_list
*list
, u_int32_t route_rule_id
);
466 static inline void necp_get_parent_cred_result(proc_t proc
, struct necp_socket_info
*info
);
468 #define MAX_AGGREGATE_ROUTE_RULES 16
469 struct necp_aggregate_route_rule
{
470 LIST_ENTRY(necp_aggregate_route_rule
) chain
;
472 u_int32_t rule_ids
[MAX_AGGREGATE_ROUTE_RULES
];
474 static LIST_HEAD(necp_aggregate_route_rule_list
, necp_aggregate_route_rule
) necp_aggregate_route_rules
;
475 static u_int32_t
necp_create_aggregate_route_rule(u_int32_t
*rule_ids
);
477 // Sysctl definitions
478 static int sysctl_handle_necp_level SYSCTL_HANDLER_ARGS
;
479 static int sysctl_handle_necp_unentitled_level SYSCTL_HANDLER_ARGS
;
481 SYSCTL_NODE(_net
, OID_AUTO
, necp
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "NECP");
482 SYSCTL_INT(_net_necp
, NECPCTL_PASS_LOOPBACK
, pass_loopback
, CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_pass_loopback
, 0, "");
483 SYSCTL_INT(_net_necp
, NECPCTL_PASS_KEEPALIVES
, pass_keepalives
, CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_pass_keepalives
, 0, "");
484 SYSCTL_INT(_net_necp
, NECPCTL_PASS_INTERPOSE
, pass_interpose
, CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_pass_interpose
, 0, "");
485 SYSCTL_INT(_net_necp
, NECPCTL_DEBUG
, debug
, CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_debug
, 0, "");
486 SYSCTL_PROC(_net_necp
, NECPCTL_DROP_UNENTITLED_LEVEL
, drop_unentitled_level
, CTLTYPE_INT
| CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_drop_unentitled_level
, 0, &sysctl_handle_necp_unentitled_level
, "IU", "");
487 SYSCTL_PROC(_net_necp
, NECPCTL_DROP_ALL_LEVEL
, drop_all_level
, CTLTYPE_INT
| CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_drop_all_level
, 0, &sysctl_handle_necp_level
, "IU", "");
488 SYSCTL_LONG(_net_necp
, NECPCTL_SOCKET_POLICY_COUNT
, socket_policy_count
, CTLFLAG_LOCKED
| CTLFLAG_RD
, &necp_kernel_socket_policies_count
, "");
489 SYSCTL_LONG(_net_necp
, NECPCTL_SOCKET_NON_APP_POLICY_COUNT
, socket_non_app_policy_count
, CTLFLAG_LOCKED
| CTLFLAG_RD
, &necp_kernel_socket_policies_non_app_count
, "");
490 SYSCTL_LONG(_net_necp
, NECPCTL_IP_POLICY_COUNT
, ip_policy_count
, CTLFLAG_LOCKED
| CTLFLAG_RD
, &necp_kernel_ip_output_policies_count
, "");
491 SYSCTL_INT(_net_necp
, NECPCTL_SESSION_COUNT
, session_count
, CTLFLAG_LOCKED
| CTLFLAG_RD
, &necp_session_count
, 0, "");
493 static struct necp_drop_dest_policy necp_drop_dest_policy
;
494 static int necp_drop_dest_debug
= 0; // 0: off, 1: match, >1: every evaluation
495 SYSCTL_INT(_net_necp
, OID_AUTO
, drop_dest_debug
, CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_drop_dest_debug
, 0, "");
497 static int sysctl_handle_necp_drop_dest_level SYSCTL_HANDLER_ARGS
;
498 SYSCTL_PROC(_net_necp
, OID_AUTO
, drop_dest_level
, CTLTYPE_STRUCT
| CTLFLAG_LOCKED
| CTLFLAG_ANYBODY
| CTLFLAG_RW
,
499 0, 0, &sysctl_handle_necp_drop_dest_level
, "S,necp_drop_dest_level", "");
501 static bool necp_address_matches_drop_dest_policy(union necp_sockaddr_union
*, u_int32_t
);
503 // Session order allocation
505 necp_allocate_new_session_order(u_int32_t priority
, u_int32_t control_unit
)
507 u_int32_t new_order
= 0;
509 // For now, just allocate 1000 orders for each priority
510 if (priority
== NECP_SESSION_PRIORITY_UNKNOWN
|| priority
> NECP_SESSION_NUM_PRIORITIES
) {
511 priority
= NECP_SESSION_PRIORITY_DEFAULT
;
514 // Use the control unit to decide the offset into the priority list
515 new_order
= (control_unit
) + ((priority
- 1) * 1000);
520 static inline u_int32_t
521 necp_get_first_order_for_priority(u_int32_t priority
)
526 return ((priority
- 1) * 1000) + 1;
531 sysctl_handle_necp_level SYSCTL_HANDLER_ARGS
533 #pragma unused(arg1, arg2)
534 int error
= sysctl_handle_int(oidp
, oidp
->oid_arg1
, oidp
->oid_arg2
, req
);
535 necp_drop_all_order
= necp_get_first_order_for_priority(necp_drop_all_level
);
540 sysctl_handle_necp_unentitled_level SYSCTL_HANDLER_ARGS
542 #pragma unused(arg1, arg2)
543 int error
= sysctl_handle_int(oidp
, oidp
->oid_arg1
, oidp
->oid_arg2
, req
);
544 necp_drop_unentitled_order
= necp_get_first_order_for_priority(necp_drop_unentitled_level
);
548 // Use a macro here to avoid computing the kauth_cred_t when necp_drop_unentitled_level is 0
549 static inline u_int32_t
550 _necp_process_drop_order_inner(kauth_cred_t cred
)
552 if (priv_check_cred(cred
, PRIV_NET_PRIVILEGED_CLIENT_ACCESS
, 0) != 0 &&
553 priv_check_cred(cred
, PRIV_NET_PRIVILEGED_SERVER_ACCESS
, 0) != 0) {
554 return necp_drop_unentitled_order
;
560 #define necp_process_drop_order(_cred) (necp_drop_unentitled_order != 0 ? _necp_process_drop_order_inner(_cred) : necp_drop_unentitled_order)
561 #pragma GCC poison _necp_process_drop_order_inner
565 static int necp_session_op_close(struct fileglob
*, vfs_context_t
);
567 static const struct fileops necp_session_fd_ops
= {
568 .fo_type
= DTYPE_NETPOLICY
,
569 .fo_read
= fo_no_read
,
570 .fo_write
= fo_no_write
,
571 .fo_ioctl
= fo_no_ioctl
,
572 .fo_select
= fo_no_select
,
573 .fo_close
= necp_session_op_close
,
574 .fo_drain
= fo_no_drain
,
575 .fo_kqfilter
= fo_no_kqfilter
,
578 static inline necp_drop_all_bypass_check_result_t
579 necp_check_drop_all_bypass_result(proc_t proc
)
582 proc
= current_proc();
584 return NECP_DROP_ALL_BYPASS_CHECK_RESULT_FALSE
;
588 #if defined(XNU_TARGET_OS_OSX)
589 const char *signing_id
= NULL
;
590 const bool isConfigd
= (csproc_get_platform_binary(proc
) &&
591 (signing_id
= cs_identity_get(proc
)) &&
592 (strlen(signing_id
) == SIGNING_ID_CONFIGD_LEN
) &&
593 (memcmp(signing_id
, SIGNING_ID_CONFIGD
, SIGNING_ID_CONFIGD_LEN
) == 0));
595 return NECP_DROP_ALL_BYPASS_CHECK_RESULT_TRUE
;
599 const task_t task
= proc_task(proc
);
600 if (task
== NULL
|| !IOTaskHasEntitlement(task
, "com.apple.private.necp.drop_all_bypass")) {
601 return NECP_DROP_ALL_BYPASS_CHECK_RESULT_FALSE
;
603 return NECP_DROP_ALL_BYPASS_CHECK_RESULT_TRUE
;
608 necp_session_open(struct proc
*p
, struct necp_session_open_args
*uap
, int *retval
)
612 struct necp_session
*session
= NULL
;
613 struct fileproc
*fp
= NULL
;
616 uid_t uid
= kauth_cred_getuid(proc_ucred(p
));
617 if (uid
!= 0 && priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES
, 0) != 0) {
618 NECPLOG0(LOG_ERR
, "Process does not hold necessary entitlement to open NECP session");
623 error
= falloc(p
, &fp
, &fd
, vfs_context_current());
628 session
= necp_create_session();
629 if (session
== NULL
) {
634 fp
->f_fglob
->fg_flag
= 0;
635 fp
->f_fglob
->fg_ops
= &necp_session_fd_ops
;
636 fp
->f_fglob
->fg_data
= session
;
639 FDFLAGS_SET(p
, fd
, (UF_EXCLOSE
| UF_FORKCLOSE
));
640 procfdtbl_releasefd(p
, fd
, NULL
);
641 fp_drop(p
, fd
, fp
, 1);
657 necp_session_op_close(struct fileglob
*fg
, vfs_context_t ctx
)
660 struct necp_session
*session
= (struct necp_session
*)fg
->fg_data
;
663 if (session
!= NULL
) {
664 necp_policy_mark_all_for_deletion(session
);
665 necp_policy_apply_all(session
);
666 necp_delete_session(session
);
674 necp_session_find_from_fd(int fd
, struct necp_session
**session
)
676 proc_t p
= current_proc();
677 struct fileproc
*fp
= NULL
;
681 if ((error
= fp_lookup(p
, fd
, &fp
, 1)) != 0) {
684 if (fp
->f_fglob
->fg_ops
->fo_type
!= DTYPE_NETPOLICY
) {
685 fp_drop(p
, fd
, fp
, 1);
689 *session
= (struct necp_session
*)fp
->f_fglob
->fg_data
;
691 if ((*session
)->necp_fd_type
!= necp_fd_type_session
) {
692 // Not a client fd, ignore
693 fp_drop(p
, fd
, fp
, 1);
704 necp_session_add_policy(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
707 u_int8_t
*tlv_buffer
= NULL
;
709 if (uap
->in_buffer_length
== 0 || uap
->in_buffer_length
> NECP_MAX_POLICY_SIZE
|| uap
->in_buffer
== 0) {
710 NECPLOG(LOG_ERR
, "necp_session_add_policy invalid input (%zu)", uap
->in_buffer_length
);
715 if (uap
->out_buffer_length
< sizeof(necp_policy_id
) || uap
->out_buffer
== 0) {
716 NECPLOG(LOG_ERR
, "necp_session_add_policy invalid output buffer (%zu)", uap
->out_buffer_length
);
721 if ((tlv_buffer
= _MALLOC(uap
->in_buffer_length
, M_NECP
, M_WAITOK
| M_ZERO
)) == NULL
) {
726 error
= copyin(uap
->in_buffer
, tlv_buffer
, uap
->in_buffer_length
);
728 NECPLOG(LOG_ERR
, "necp_session_add_policy tlv copyin error (%d)", error
);
732 necp_policy_id new_policy_id
= necp_handle_policy_add(session
, tlv_buffer
, uap
->in_buffer_length
, 0, &error
);
734 NECPLOG(LOG_ERR
, "necp_session_add_policy failed to add policy (%d)", error
);
738 error
= copyout(&new_policy_id
, uap
->out_buffer
, sizeof(new_policy_id
));
740 NECPLOG(LOG_ERR
, "necp_session_add_policy policy_id copyout error (%d)", error
);
745 if (tlv_buffer
!= NULL
) {
746 FREE(tlv_buffer
, M_NECP
);
755 necp_session_get_policy(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
758 u_int8_t
*response
= NULL
;
760 if (uap
->in_buffer_length
< sizeof(necp_policy_id
) || uap
->in_buffer
== 0) {
761 NECPLOG(LOG_ERR
, "necp_session_get_policy invalid input (%zu)", uap
->in_buffer_length
);
766 necp_policy_id policy_id
= 0;
767 error
= copyin(uap
->in_buffer
, &policy_id
, sizeof(policy_id
));
769 NECPLOG(LOG_ERR
, "necp_session_get_policy policy_id copyin error (%d)", error
);
773 struct necp_session_policy
*policy
= necp_policy_find(session
, policy_id
);
774 if (policy
== NULL
|| policy
->pending_deletion
) {
775 NECPLOG(LOG_ERR
, "Failed to find policy with id %d", policy_id
);
780 u_int32_t order_tlv_size
= sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(necp_policy_order
);
781 u_int32_t result_tlv_size
= (policy
->result_size
? (sizeof(u_int8_t
) + sizeof(u_int32_t
) + policy
->result_size
) : 0);
782 u_int32_t response_size
= order_tlv_size
+ result_tlv_size
+ policy
->conditions_size
;
784 if (uap
->out_buffer_length
< response_size
|| uap
->out_buffer
== 0) {
785 NECPLOG(LOG_ERR
, "necp_session_get_policy buffer not large enough (%u < %u)", uap
->out_buffer_length
, response_size
);
790 if (response_size
> NECP_MAX_POLICY_SIZE
) {
791 NECPLOG(LOG_ERR
, "necp_session_get_policy size too large to copy (%u)", response_size
);
796 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
| M_ZERO
);
797 if (response
== NULL
) {
802 u_int8_t
*cursor
= response
;
803 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ORDER
, sizeof(necp_policy_order
), &policy
->order
, response
, response_size
);
804 if (result_tlv_size
) {
805 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_RESULT
, policy
->result_size
, &policy
->result
, response
, response_size
);
807 if (policy
->conditions_size
) {
808 memcpy(((u_int8_t
*)(void *)(cursor
)), policy
->conditions
, policy
->conditions_size
);
811 error
= copyout(response
, uap
->out_buffer
, response_size
);
813 NECPLOG(LOG_ERR
, "necp_session_get_policy TLV copyout error (%d)", error
);
818 if (response
!= NULL
) {
819 FREE(response
, M_NECP
);
828 necp_session_delete_policy(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
832 if (uap
->in_buffer_length
< sizeof(necp_policy_id
) || uap
->in_buffer
== 0) {
833 NECPLOG(LOG_ERR
, "necp_session_delete_policy invalid input (%zu)", uap
->in_buffer_length
);
838 necp_policy_id delete_policy_id
= 0;
839 error
= copyin(uap
->in_buffer
, &delete_policy_id
, sizeof(delete_policy_id
));
841 NECPLOG(LOG_ERR
, "necp_session_delete_policy policy_id copyin error (%d)", error
);
845 struct necp_session_policy
*policy
= necp_policy_find(session
, delete_policy_id
);
846 if (policy
== NULL
|| policy
->pending_deletion
) {
847 NECPLOG(LOG_ERR
, "necp_session_delete_policy failed to find policy with id %u", delete_policy_id
);
852 necp_policy_mark_for_deletion(session
, policy
);
859 necp_session_apply_all(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
862 necp_policy_apply_all(session
);
868 necp_session_list_all(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
870 u_int32_t tlv_size
= (sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(necp_policy_id
));
871 u_int32_t response_size
= 0;
872 u_int8_t
*response
= NULL
;
873 int num_policies
= 0;
874 int cur_policy_index
= 0;
876 struct necp_session_policy
*policy
;
878 LIST_FOREACH(policy
, &session
->policies
, chain
) {
879 if (!policy
->pending_deletion
) {
884 if (num_policies
> NECP_MAX_POLICY_LIST_COUNT
) {
885 NECPLOG(LOG_ERR
, "necp_session_list_all size too large to copy (%u policies)", num_policies
);
890 response_size
= num_policies
* tlv_size
;
891 if (uap
->out_buffer_length
< response_size
|| uap
->out_buffer
== 0) {
892 NECPLOG(LOG_ERR
, "necp_session_list_all buffer not large enough (%u < %u)", uap
->out_buffer_length
, response_size
);
897 // Create a response with one Policy ID TLV for each policy
898 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
| M_ZERO
);
899 if (response
== NULL
) {
904 u_int8_t
*cursor
= response
;
905 LIST_FOREACH(policy
, &session
->policies
, chain
) {
906 if (!policy
->pending_deletion
&& cur_policy_index
< num_policies
) {
907 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ID
, sizeof(u_int32_t
), &policy
->local_id
, response
, response_size
);
912 error
= copyout(response
, uap
->out_buffer
, response_size
);
914 NECPLOG(LOG_ERR
, "necp_session_list_all TLV copyout error (%d)", error
);
919 if (response
!= NULL
) {
920 FREE(response
, M_NECP
);
930 necp_session_delete_all(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
933 necp_policy_mark_all_for_deletion(session
);
939 necp_session_set_session_priority(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
942 struct necp_session_policy
*policy
= NULL
;
943 struct necp_session_policy
*temp_policy
= NULL
;
945 if (uap
->in_buffer_length
< sizeof(necp_session_priority
) || uap
->in_buffer
== 0) {
946 NECPLOG(LOG_ERR
, "necp_session_set_session_priority invalid input (%zu)", uap
->in_buffer_length
);
951 necp_session_priority requested_session_priority
= 0;
952 error
= copyin(uap
->in_buffer
, &requested_session_priority
, sizeof(requested_session_priority
));
954 NECPLOG(LOG_ERR
, "necp_session_set_session_priority priority copyin error (%d)", error
);
958 // Enforce special session priorities with entitlements
959 if (requested_session_priority
== NECP_SESSION_PRIORITY_CONTROL
||
960 requested_session_priority
== NECP_SESSION_PRIORITY_PRIVILEGED_TUNNEL
||
961 requested_session_priority
== NECP_SESSION_PRIORITY_HIGH_RESTRICTED
) {
962 errno_t cred_result
= priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES
, 0);
963 if (cred_result
!= 0) {
964 NECPLOG(LOG_ERR
, "Session does not hold necessary entitlement to claim priority level %d", requested_session_priority
);
970 if (session
->session_priority
!= requested_session_priority
) {
971 session
->session_priority
= requested_session_priority
;
972 session
->session_order
= necp_allocate_new_session_order(session
->session_priority
, session
->control_unit
);
973 session
->dirty
= TRUE
;
975 // Mark all policies as needing updates
976 LIST_FOREACH_SAFE(policy
, &session
->policies
, chain
, temp_policy
) {
977 policy
->pending_update
= TRUE
;
987 necp_session_lock_to_process(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
990 session
->proc_locked
= TRUE
;
996 necp_session_register_service(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
999 struct necp_service_registration
*new_service
= NULL
;
1001 if (uap
->in_buffer_length
< sizeof(uuid_t
) || uap
->in_buffer
== 0) {
1002 NECPLOG(LOG_ERR
, "necp_session_register_service invalid input (%zu)", uap
->in_buffer_length
);
1007 uuid_t service_uuid
;
1008 error
= copyin(uap
->in_buffer
, service_uuid
, sizeof(service_uuid
));
1010 NECPLOG(LOG_ERR
, "necp_session_register_service uuid copyin error (%d)", error
);
1014 MALLOC(new_service
, struct necp_service_registration
*, sizeof(*new_service
), M_NECP
, M_WAITOK
| M_ZERO
);
1015 if (new_service
== NULL
) {
1016 NECPLOG0(LOG_ERR
, "Failed to allocate service registration");
1021 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
1022 new_service
->service_id
= necp_create_uuid_service_id_mapping(service_uuid
);
1023 LIST_INSERT_HEAD(&session
->services
, new_service
, session_chain
);
1024 LIST_INSERT_HEAD(&necp_registered_service_list
, new_service
, kernel_chain
);
1025 lck_rw_done(&necp_kernel_policy_lock
);
1033 necp_session_unregister_service(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
1036 struct necp_service_registration
*service
= NULL
;
1037 struct necp_service_registration
*temp_service
= NULL
;
1038 struct necp_uuid_id_mapping
*mapping
= NULL
;
1040 if (uap
->in_buffer_length
< sizeof(uuid_t
) || uap
->in_buffer
== 0) {
1041 NECPLOG(LOG_ERR
, "necp_session_unregister_service invalid input (%zu)", uap
->in_buffer_length
);
1046 uuid_t service_uuid
;
1047 error
= copyin(uap
->in_buffer
, service_uuid
, sizeof(service_uuid
));
1049 NECPLOG(LOG_ERR
, "necp_session_unregister_service uuid copyin error (%d)", error
);
1053 // Remove all matching services for this session
1054 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
1055 mapping
= necp_uuid_lookup_service_id_locked(service_uuid
);
1056 if (mapping
!= NULL
) {
1057 LIST_FOREACH_SAFE(service
, &session
->services
, session_chain
, temp_service
) {
1058 if (service
->service_id
== mapping
->id
) {
1059 LIST_REMOVE(service
, session_chain
);
1060 LIST_REMOVE(service
, kernel_chain
);
1061 FREE(service
, M_NECP
);
1064 necp_remove_uuid_service_id_mapping(service_uuid
);
1066 lck_rw_done(&necp_kernel_policy_lock
);
1074 necp_session_dump_all(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
1076 #pragma unused(session)
1079 if (uap
->out_buffer_length
== 0 || uap
->out_buffer
== 0) {
1080 NECPLOG(LOG_ERR
, "necp_session_dump_all invalid output buffer (%zu)", uap
->out_buffer_length
);
1085 error
= necp_handle_policy_dump_all(uap
->out_buffer
, uap
->out_buffer_length
);
1092 necp_session_action(struct proc
*p
, struct necp_session_action_args
*uap
, int *retval
)
1096 int return_value
= 0;
1097 struct necp_session
*session
= NULL
;
1098 error
= necp_session_find_from_fd(uap
->necp_fd
, &session
);
1100 NECPLOG(LOG_ERR
, "necp_session_action find fd error (%d)", error
);
1104 NECP_SESSION_LOCK(session
);
1106 if (session
->proc_locked
) {
1107 // Verify that the calling process is allowed to do actions
1109 proc_getexecutableuuid(current_proc(), proc_uuid
, sizeof(proc_uuid
));
1110 if (uuid_compare(proc_uuid
, session
->proc_uuid
) != 0) {
1115 // If not locked, update the proc_uuid and proc_pid of the session
1116 proc_getexecutableuuid(current_proc(), session
->proc_uuid
, sizeof(session
->proc_uuid
));
1117 session
->proc_pid
= proc_pid(current_proc());
1120 u_int32_t action
= uap
->action
;
1122 case NECP_SESSION_ACTION_POLICY_ADD
: {
1123 return_value
= necp_session_add_policy(session
, uap
, retval
);
1126 case NECP_SESSION_ACTION_POLICY_GET
: {
1127 return_value
= necp_session_get_policy(session
, uap
, retval
);
1130 case NECP_SESSION_ACTION_POLICY_DELETE
: {
1131 return_value
= necp_session_delete_policy(session
, uap
, retval
);
1134 case NECP_SESSION_ACTION_POLICY_APPLY_ALL
: {
1135 return_value
= necp_session_apply_all(session
, uap
, retval
);
1138 case NECP_SESSION_ACTION_POLICY_LIST_ALL
: {
1139 return_value
= necp_session_list_all(session
, uap
, retval
);
1142 case NECP_SESSION_ACTION_POLICY_DELETE_ALL
: {
1143 return_value
= necp_session_delete_all(session
, uap
, retval
);
1146 case NECP_SESSION_ACTION_SET_SESSION_PRIORITY
: {
1147 return_value
= necp_session_set_session_priority(session
, uap
, retval
);
1150 case NECP_SESSION_ACTION_LOCK_SESSION_TO_PROC
: {
1151 return_value
= necp_session_lock_to_process(session
, uap
, retval
);
1154 case NECP_SESSION_ACTION_REGISTER_SERVICE
: {
1155 return_value
= necp_session_register_service(session
, uap
, retval
);
1158 case NECP_SESSION_ACTION_UNREGISTER_SERVICE
: {
1159 return_value
= necp_session_unregister_service(session
, uap
, retval
);
1162 case NECP_SESSION_ACTION_POLICY_DUMP_ALL
: {
1163 return_value
= necp_session_dump_all(session
, uap
, retval
);
1167 NECPLOG(LOG_ERR
, "necp_session_action unknown action (%u)", action
);
1168 return_value
= EINVAL
;
1174 NECP_SESSION_UNLOCK(session
);
1175 file_drop(uap
->necp_fd
);
1177 return return_value
;
1180 struct necp_resolver_key_state
{
1181 const struct ccdigest_info
*digest_info
;
1182 uint8_t key
[CCSHA256_OUTPUT_SIZE
];
1184 static struct necp_resolver_key_state s_necp_resolver_key_state
;
1187 necp_generate_resolver_key(void)
1189 s_necp_resolver_key_state
.digest_info
= ccsha256_di();
1190 cc_rand_generate(s_necp_resolver_key_state
.key
, sizeof(s_necp_resolver_key_state
.key
));
1194 necp_sign_update_context(const struct ccdigest_info
*di
,
1198 u_int32_t query_length
,
1200 u_int32_t answer_length
)
1202 const uint8_t context
[32] = {[0 ... 31] = 0x20}; // 0x20 repeated 32 times
1203 const char *context_string
= "NECP Resolver Binder";
1204 uint8_t separator
= 0;
1205 cchmac_update(di
, ctx
, sizeof(context
), context
);
1206 cchmac_update(di
, ctx
, strlen(context_string
), context_string
);
1207 cchmac_update(di
, ctx
, sizeof(separator
), &separator
);
1208 cchmac_update(di
, ctx
, sizeof(uuid_t
), client_id
);
1209 cchmac_update(di
, ctx
, sizeof(query_length
), &query_length
);
1210 cchmac_update(di
, ctx
, query_length
, query
);
1211 cchmac_update(di
, ctx
, sizeof(answer_length
), &answer_length
);
1212 cchmac_update(di
, ctx
, answer_length
, answer
);
1216 necp_sign_resolver_answer(uuid_t client_id
, u_int8_t
*query
, u_int32_t query_length
,
1217 u_int8_t
*answer
, u_int32_t answer_length
,
1218 u_int8_t
*tag
, u_int32_t
*out_tag_length
)
1220 if (s_necp_resolver_key_state
.digest_info
== NULL
) {
1224 if (query
== NULL
||
1225 query_length
== 0 ||
1227 answer_length
== 0 ||
1229 out_tag_length
== NULL
) {
1233 size_t required_tag_length
= s_necp_resolver_key_state
.digest_info
->output_size
;
1234 if (*out_tag_length
< required_tag_length
) {
1238 *out_tag_length
= required_tag_length
;
1240 cchmac_ctx_decl(s_necp_resolver_key_state
.digest_info
->state_size
,
1241 s_necp_resolver_key_state
.digest_info
->block_size
, ctx
);
1242 cchmac_init(s_necp_resolver_key_state
.digest_info
, ctx
,
1243 sizeof(s_necp_resolver_key_state
.key
),
1244 s_necp_resolver_key_state
.key
);
1245 necp_sign_update_context(s_necp_resolver_key_state
.digest_info
,
1246 ctx
, client_id
, query
, query_length
,
1247 answer
, answer_length
);
1248 cchmac_final(s_necp_resolver_key_state
.digest_info
, ctx
, tag
);
1254 necp_validate_resolver_answer(uuid_t client_id
, u_int8_t
*query
, u_int32_t query_length
,
1255 u_int8_t
*answer
, u_int32_t answer_length
,
1256 u_int8_t
*tag
, u_int32_t tag_length
)
1258 if (s_necp_resolver_key_state
.digest_info
== NULL
) {
1262 if (query
== NULL
||
1263 query_length
== 0 ||
1265 answer_length
== 0 ||
1271 size_t required_tag_length
= s_necp_resolver_key_state
.digest_info
->output_size
;
1272 if (tag_length
!= required_tag_length
) {
1276 uint8_t actual_tag
[required_tag_length
];
1278 cchmac_ctx_decl(s_necp_resolver_key_state
.digest_info
->state_size
,
1279 s_necp_resolver_key_state
.digest_info
->block_size
, ctx
);
1280 cchmac_init(s_necp_resolver_key_state
.digest_info
, ctx
,
1281 sizeof(s_necp_resolver_key_state
.key
),
1282 s_necp_resolver_key_state
.key
);
1283 necp_sign_update_context(s_necp_resolver_key_state
.digest_info
,
1284 ctx
, client_id
, query
, query_length
,
1285 answer
, answer_length
);
1286 cchmac_final(s_necp_resolver_key_state
.digest_info
, ctx
, actual_tag
);
1288 return cc_cmp_safe(s_necp_resolver_key_state
.digest_info
->output_size
, tag
, actual_tag
) == 0;
1296 necp_kernel_policy_grp_attr
= lck_grp_attr_alloc_init();
1297 if (necp_kernel_policy_grp_attr
== NULL
) {
1298 NECPLOG0(LOG_ERR
, "lck_grp_attr_alloc_init failed");
1303 necp_kernel_policy_mtx_grp
= lck_grp_alloc_init(NECP_CONTROL_NAME
, necp_kernel_policy_grp_attr
);
1304 if (necp_kernel_policy_mtx_grp
== NULL
) {
1305 NECPLOG0(LOG_ERR
, "lck_grp_alloc_init failed");
1310 necp_kernel_policy_mtx_attr
= lck_attr_alloc_init();
1311 if (necp_kernel_policy_mtx_attr
== NULL
) {
1312 NECPLOG0(LOG_ERR
, "lck_attr_alloc_init failed");
1317 lck_rw_init(&necp_kernel_policy_lock
, necp_kernel_policy_mtx_grp
, necp_kernel_policy_mtx_attr
);
1319 necp_route_rule_grp_attr
= lck_grp_attr_alloc_init();
1320 if (necp_route_rule_grp_attr
== NULL
) {
1321 NECPLOG0(LOG_ERR
, "lck_grp_attr_alloc_init failed");
1326 necp_route_rule_mtx_grp
= lck_grp_alloc_init("necp_route_rule", necp_route_rule_grp_attr
);
1327 if (necp_route_rule_mtx_grp
== NULL
) {
1328 NECPLOG0(LOG_ERR
, "lck_grp_alloc_init failed");
1333 necp_route_rule_mtx_attr
= lck_attr_alloc_init();
1334 if (necp_route_rule_mtx_attr
== NULL
) {
1335 NECPLOG0(LOG_ERR
, "lck_attr_alloc_init failed");
1340 lck_rw_init(&necp_route_rule_lock
, necp_route_rule_mtx_grp
, necp_route_rule_mtx_attr
);
1344 TAILQ_INIT(&necp_session_list
);
1346 LIST_INIT(&necp_kernel_socket_policies
);
1347 LIST_INIT(&necp_kernel_ip_output_policies
);
1349 LIST_INIT(&necp_account_id_list
);
1351 LIST_INIT(&necp_uuid_service_id_list
);
1353 LIST_INIT(&necp_registered_service_list
);
1355 LIST_INIT(&necp_route_rules
);
1356 LIST_INIT(&necp_aggregate_route_rules
);
1358 necp_generate_resolver_key();
1360 necp_uuid_app_id_hashtbl
= hashinit(NECP_UUID_APP_ID_HASH_SIZE
, M_NECP
, &necp_uuid_app_id_hash_mask
);
1361 necp_uuid_app_id_hash_num_buckets
= necp_uuid_app_id_hash_mask
+ 1;
1362 necp_num_uuid_app_id_mappings
= 0;
1363 necp_uuid_app_id_mappings_dirty
= FALSE
;
1365 necp_kernel_application_policies_condition_mask
= 0;
1366 necp_kernel_socket_policies_condition_mask
= 0;
1367 necp_kernel_ip_output_policies_condition_mask
= 0;
1369 necp_kernel_application_policies_count
= 0;
1370 necp_kernel_socket_policies_count
= 0;
1371 necp_kernel_socket_policies_non_app_count
= 0;
1372 necp_kernel_ip_output_policies_count
= 0;
1373 necp_kernel_ip_output_policies_non_id_count
= 0;
1375 necp_kernel_socket_policies_gencount
= 1;
1377 memset(&necp_kernel_socket_policies_map
, 0, sizeof(necp_kernel_socket_policies_map
));
1378 memset(&necp_kernel_ip_output_policies_map
, 0, sizeof(necp_kernel_ip_output_policies_map
));
1379 necp_kernel_socket_policies_app_layer_map
= NULL
;
1381 necp_drop_unentitled_order
= necp_get_first_order_for_priority(necp_drop_unentitled_level
);
1385 if (necp_kernel_policy_mtx_attr
!= NULL
) {
1386 lck_attr_free(necp_kernel_policy_mtx_attr
);
1387 necp_kernel_policy_mtx_attr
= NULL
;
1389 if (necp_kernel_policy_mtx_grp
!= NULL
) {
1390 lck_grp_free(necp_kernel_policy_mtx_grp
);
1391 necp_kernel_policy_mtx_grp
= NULL
;
1393 if (necp_kernel_policy_grp_attr
!= NULL
) {
1394 lck_grp_attr_free(necp_kernel_policy_grp_attr
);
1395 necp_kernel_policy_grp_attr
= NULL
;
1397 if (necp_route_rule_mtx_attr
!= NULL
) {
1398 lck_attr_free(necp_route_rule_mtx_attr
);
1399 necp_route_rule_mtx_attr
= NULL
;
1401 if (necp_route_rule_mtx_grp
!= NULL
) {
1402 lck_grp_free(necp_route_rule_mtx_grp
);
1403 necp_route_rule_mtx_grp
= NULL
;
1405 if (necp_route_rule_grp_attr
!= NULL
) {
1406 lck_grp_attr_free(necp_route_rule_grp_attr
);
1407 necp_route_rule_grp_attr
= NULL
;
1414 necp_post_change_event(struct kev_necp_policies_changed_data
*necp_event_data
)
1416 struct kev_msg ev_msg
;
1417 memset(&ev_msg
, 0, sizeof(ev_msg
));
1419 ev_msg
.vendor_code
= KEV_VENDOR_APPLE
;
1420 ev_msg
.kev_class
= KEV_NETWORK_CLASS
;
1421 ev_msg
.kev_subclass
= KEV_NECP_SUBCLASS
;
1422 ev_msg
.event_code
= KEV_NECP_POLICIES_CHANGED
;
1424 ev_msg
.dv
[0].data_ptr
= necp_event_data
;
1425 ev_msg
.dv
[0].data_length
= sizeof(necp_event_data
->changed_count
);
1426 ev_msg
.dv
[1].data_length
= 0;
1428 kev_post_msg(&ev_msg
);
1432 necp_buffer_write_tlv_validate(u_int8_t
*cursor
, u_int8_t type
, u_int32_t length
,
1433 u_int8_t
*buffer
, u_int32_t buffer_length
)
1435 if (cursor
< buffer
|| (uintptr_t)(cursor
- buffer
) > buffer_length
) {
1436 NECPLOG0(LOG_ERR
, "Cannot write TLV in buffer (invalid cursor)");
1439 u_int8_t
*next_tlv
= (u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
) + length
);
1440 if (next_tlv
<= buffer
|| // make sure the next TLV start doesn't overflow
1441 (uintptr_t)(next_tlv
- buffer
) > buffer_length
) { // make sure the next TLV has enough room in buffer
1442 NECPLOG(LOG_ERR
, "Cannot write TLV in buffer (TLV length %u, buffer length %u)",
1443 length
, buffer_length
);
1450 necp_buffer_write_tlv_if_different(u_int8_t
*cursor
, u_int8_t type
,
1451 u_int32_t length
, const void *value
, bool *updated
,
1452 u_int8_t
*buffer
, u_int32_t buffer_length
)
1454 if (!necp_buffer_write_tlv_validate(cursor
, type
, length
, buffer
, buffer_length
)) {
1455 // If we can't fit this TLV, return the current cursor
1458 u_int8_t
*next_tlv
= (u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
) + length
);
1459 if (*updated
|| *(u_int8_t
*)(cursor
) != type
) {
1460 *(u_int8_t
*)(cursor
) = type
;
1463 if (*updated
|| *(u_int32_t
*)(void *)(cursor
+ sizeof(type
)) != length
) {
1464 *(u_int32_t
*)(void *)(cursor
+ sizeof(type
)) = length
;
1468 if (*updated
|| memcmp((u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
)), value
, length
) != 0) {
1469 memcpy((u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
)), value
, length
);
1477 necp_buffer_write_tlv(u_int8_t
*cursor
, u_int8_t type
,
1478 u_int32_t length
, const void *value
,
1479 u_int8_t
*buffer
, u_int32_t buffer_length
)
1481 if (!necp_buffer_write_tlv_validate(cursor
, type
, length
, buffer
, buffer_length
)) {
1484 u_int8_t
*next_tlv
= (u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
) + length
);
1485 *(u_int8_t
*)(cursor
) = type
;
1486 *(u_int32_t
*)(void *)(cursor
+ sizeof(type
)) = length
;
1488 memcpy((u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
)), value
, length
);
1495 necp_buffer_get_tlv_type(u_int8_t
*buffer
, int tlv_offset
)
1497 u_int8_t
*type
= NULL
;
1499 if (buffer
== NULL
) {
1503 type
= (u_int8_t
*)((u_int8_t
*)buffer
+ tlv_offset
);
1504 return type
? *type
: 0;
1508 necp_buffer_get_tlv_length(u_int8_t
*buffer
, int tlv_offset
)
1510 u_int32_t
*length
= NULL
;
1512 if (buffer
== NULL
) {
1516 length
= (u_int32_t
*)(void *)((u_int8_t
*)buffer
+ tlv_offset
+ sizeof(u_int8_t
));
1517 return length
? *length
: 0;
1521 necp_buffer_get_tlv_value(u_int8_t
*buffer
, int tlv_offset
, u_int32_t
*value_size
)
1523 u_int8_t
*value
= NULL
;
1524 u_int32_t length
= necp_buffer_get_tlv_length(buffer
, tlv_offset
);
1530 *value_size
= length
;
1533 value
= (u_int8_t
*)((u_int8_t
*)buffer
+ tlv_offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
));
1538 necp_buffer_find_tlv(u_int8_t
*buffer
, u_int32_t buffer_length
, int offset
, u_int8_t type
, int *err
, int next
)
1549 int cursor
= offset
;
1551 u_int32_t curr_length
;
1555 if ((((u_int32_t
)cursor
) + sizeof(curr_type
) + sizeof(curr_length
)) > buffer_length
) {
1559 curr_type
= necp_buffer_get_tlv_type(buffer
, cursor
);
1562 curr_type
= NECP_TLV_NIL
;
1564 curr_length
= necp_buffer_get_tlv_length(buffer
, cursor
);
1565 if (curr_length
> buffer_length
- ((u_int32_t
)cursor
+ sizeof(curr_type
) + sizeof(curr_length
))) {
1569 next_cursor
= (cursor
+ sizeof(curr_type
) + sizeof(curr_length
) + curr_length
);
1570 if (curr_type
== type
) {
1571 // check if entire TLV fits inside buffer
1572 if (((u_int32_t
)next_cursor
) <= buffer_length
) {
1581 cursor
= next_cursor
;
1586 necp_find_tlv(u_int8_t
*buffer
, u_int32_t buffer_length
, int offset
, u_int8_t type
, int *err
, int next
)
1589 if (buffer
!= NULL
) {
1590 cursor
= necp_buffer_find_tlv(buffer
, buffer_length
, offset
, type
, err
, next
);
1596 necp_get_tlv_at_offset(u_int8_t
*buffer
, u_int32_t buffer_length
,
1597 int tlv_offset
, u_int32_t out_buffer_length
, void *out_buffer
, u_int32_t
*value_size
)
1599 if (buffer
== NULL
) {
1600 NECPLOG0(LOG_ERR
, "necp_get_tlv_at_offset buffer is NULL");
1604 // Handle buffer parsing
1606 // Validate that buffer has enough room for any TLV
1607 if (tlv_offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
) > buffer_length
) {
1608 NECPLOG(LOG_ERR
, "necp_get_tlv_at_offset buffer_length is too small for TLV (%u < %u)",
1609 buffer_length
, tlv_offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
));
1613 // Validate that buffer has enough room for this TLV
1614 u_int32_t tlv_length
= necp_buffer_get_tlv_length(buffer
, tlv_offset
);
1615 if (tlv_length
> buffer_length
- (tlv_offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
))) {
1616 NECPLOG(LOG_ERR
, "necp_get_tlv_at_offset buffer_length is too small for TLV of length %u (%u < %u)",
1617 tlv_length
, buffer_length
, tlv_offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
) + tlv_length
);
1621 if (out_buffer
!= NULL
&& out_buffer_length
> 0) {
1622 // Validate that out buffer is large enough for value
1623 if (out_buffer_length
< tlv_length
) {
1624 NECPLOG(LOG_ERR
, "necp_get_tlv_at_offset out_buffer_length is too small for TLV value (%u < %u)",
1625 out_buffer_length
, tlv_length
);
1629 // Get value pointer
1630 u_int8_t
*tlv_value
= necp_buffer_get_tlv_value(buffer
, tlv_offset
, NULL
);
1631 if (tlv_value
== NULL
) {
1632 NECPLOG0(LOG_ERR
, "necp_get_tlv_at_offset tlv_value is NULL");
1637 memcpy(out_buffer
, tlv_value
, tlv_length
);
1641 if (value_size
!= NULL
) {
1642 *value_size
= tlv_length
;
1649 necp_get_tlv(u_int8_t
*buffer
, u_int32_t buffer_length
,
1650 int offset
, u_int8_t type
, u_int32_t buff_len
, void *buff
, u_int32_t
*value_size
)
1654 int tlv_offset
= necp_find_tlv(buffer
, buffer_length
, offset
, type
, &error
, 0);
1655 if (tlv_offset
< 0) {
1659 return necp_get_tlv_at_offset(buffer
, buffer_length
, tlv_offset
, buff_len
, buff
, value_size
);
1662 // Session Management
1664 static struct necp_session
*
1665 necp_create_session(void)
1667 struct necp_session
*new_session
= NULL
;
1669 MALLOC(new_session
, struct necp_session
*, sizeof(*new_session
), M_NECP
, M_WAITOK
| M_ZERO
);
1670 if (new_session
== NULL
) {
1674 new_session
->necp_fd_type
= necp_fd_type_session
;
1675 new_session
->session_priority
= NECP_SESSION_PRIORITY_UNKNOWN
;
1676 new_session
->dirty
= FALSE
;
1677 LIST_INIT(&new_session
->policies
);
1678 lck_mtx_init(&new_session
->lock
, necp_kernel_policy_mtx_grp
, necp_kernel_policy_mtx_attr
);
1681 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
1683 // Find the next available control unit
1684 u_int32_t control_unit
= 1;
1685 struct necp_session
*next_session
= NULL
;
1686 TAILQ_FOREACH(next_session
, &necp_session_list
, chain
) {
1687 if (next_session
->control_unit
> control_unit
) {
1688 // Found a gap, grab this control unit
1692 // Try the next control unit, loop around
1693 control_unit
= next_session
->control_unit
+ 1;
1696 new_session
->control_unit
= control_unit
;
1697 new_session
->session_order
= necp_allocate_new_session_order(new_session
->session_priority
, control_unit
);
1699 if (next_session
!= NULL
) {
1700 TAILQ_INSERT_BEFORE(next_session
, new_session
, chain
);
1702 TAILQ_INSERT_TAIL(&necp_session_list
, new_session
, chain
);
1705 necp_session_count
++;
1706 lck_rw_done(&necp_kernel_policy_lock
);
1709 NECPLOG(LOG_DEBUG
, "Created NECP session, control unit %d", control_unit
);
1717 necp_delete_session(struct necp_session
*session
)
1719 if (session
!= NULL
) {
1720 struct necp_service_registration
*service
= NULL
;
1721 struct necp_service_registration
*temp_service
= NULL
;
1722 LIST_FOREACH_SAFE(service
, &session
->services
, session_chain
, temp_service
) {
1723 LIST_REMOVE(service
, session_chain
);
1724 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
1725 LIST_REMOVE(service
, kernel_chain
);
1726 lck_rw_done(&necp_kernel_policy_lock
);
1727 FREE(service
, M_NECP
);
1730 NECPLOG0(LOG_DEBUG
, "Deleted NECP session");
1733 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
1734 TAILQ_REMOVE(&necp_session_list
, session
, chain
);
1735 necp_session_count
--;
1736 lck_rw_done(&necp_kernel_policy_lock
);
1738 lck_mtx_destroy(&session
->lock
, necp_kernel_policy_mtx_grp
);
1739 FREE(session
, M_NECP
);
1743 // Session Policy Management
1745 static inline u_int8_t
1746 necp_policy_result_get_type_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
1748 return (buffer
&& length
>= sizeof(u_int8_t
)) ? buffer
[0] : 0;
1751 static inline u_int32_t
1752 necp_policy_result_get_parameter_length_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
1754 return (buffer
&& length
> sizeof(u_int8_t
)) ? (length
- sizeof(u_int8_t
)) : 0;
1757 static inline u_int8_t
*
1758 necp_policy_result_get_parameter_pointer_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
1760 return (buffer
&& length
> sizeof(u_int8_t
)) ? (buffer
+ sizeof(u_int8_t
)) : NULL
;
1764 necp_policy_result_requires_route_rules(u_int8_t
*buffer
, u_int32_t length
)
1766 u_int8_t type
= necp_policy_result_get_type_from_buffer(buffer
, length
);
1767 if (type
== NECP_POLICY_RESULT_ROUTE_RULES
) {
1774 necp_address_is_valid(struct sockaddr
*address
)
1776 if (address
->sa_family
== AF_INET
) {
1777 return address
->sa_len
== sizeof(struct sockaddr_in
);
1778 } else if (address
->sa_family
== AF_INET6
) {
1779 return address
->sa_len
== sizeof(struct sockaddr_in6
);
1786 necp_policy_result_is_valid(u_int8_t
*buffer
, u_int32_t length
)
1788 bool validated
= FALSE
;
1789 u_int8_t type
= necp_policy_result_get_type_from_buffer(buffer
, length
);
1790 u_int32_t parameter_length
= necp_policy_result_get_parameter_length_from_buffer(buffer
, length
);
1792 case NECP_POLICY_RESULT_PASS
:
1793 if (parameter_length
== 0 || parameter_length
== sizeof(u_int32_t
)) {
1797 case NECP_POLICY_RESULT_DROP
:
1798 case NECP_POLICY_RESULT_ROUTE_RULES
:
1799 case NECP_POLICY_RESULT_SCOPED_DIRECT
:
1800 case NECP_POLICY_RESULT_ALLOW_UNENTITLED
: {
1804 case NECP_POLICY_RESULT_SKIP
:
1805 case NECP_POLICY_RESULT_SOCKET_DIVERT
:
1806 case NECP_POLICY_RESULT_SOCKET_FILTER
: {
1807 if (parameter_length
>= sizeof(u_int32_t
)) {
1812 case NECP_POLICY_RESULT_IP_TUNNEL
: {
1813 if (parameter_length
> sizeof(u_int32_t
)) {
1818 case NECP_POLICY_RESULT_SOCKET_SCOPED
: {
1819 if (parameter_length
> 0) {
1824 case NECP_POLICY_RESULT_TRIGGER
:
1825 case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED
:
1826 case NECP_POLICY_RESULT_TRIGGER_SCOPED
:
1827 case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED
:
1828 case NECP_POLICY_RESULT_USE_NETAGENT
:
1829 case NECP_POLICY_RESULT_NETAGENT_SCOPED
:{
1830 if (parameter_length
>= sizeof(uuid_t
)) {
1842 NECPLOG(LOG_DEBUG
, "Policy result type %d, valid %d", type
, validated
);
1848 static inline u_int8_t
1849 necp_policy_condition_get_type_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
1851 return (buffer
&& length
>= sizeof(u_int8_t
)) ? buffer
[0] : 0;
1854 static inline u_int8_t
1855 necp_policy_condition_get_flags_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
1857 return (buffer
&& length
>= (2 * sizeof(u_int8_t
))) ? buffer
[1] : 0;
1860 static inline u_int32_t
1861 necp_policy_condition_get_value_length_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
1863 return (buffer
&& length
>= (2 * sizeof(u_int8_t
))) ? (length
- (2 * sizeof(u_int8_t
))) : 0;
1866 static inline u_int8_t
*
1867 necp_policy_condition_get_value_pointer_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
1869 return (buffer
&& length
> (2 * sizeof(u_int8_t
))) ? (buffer
+ (2 * sizeof(u_int8_t
))) : NULL
;
1873 necp_policy_condition_is_default(u_int8_t
*buffer
, u_int32_t length
)
1875 return necp_policy_condition_get_type_from_buffer(buffer
, length
) == NECP_POLICY_CONDITION_DEFAULT
;
1879 necp_policy_condition_is_application(u_int8_t
*buffer
, u_int32_t length
)
1881 return necp_policy_condition_get_type_from_buffer(buffer
, length
) == NECP_POLICY_CONDITION_APPLICATION
;
1885 necp_policy_condition_is_real_application(u_int8_t
*buffer
, u_int32_t length
)
1887 return necp_policy_condition_get_type_from_buffer(buffer
, length
) == NECP_POLICY_CONDITION_REAL_APPLICATION
;
1891 necp_policy_condition_requires_application(u_int8_t
*buffer
, u_int32_t length
)
1893 u_int8_t type
= necp_policy_condition_get_type_from_buffer(buffer
, length
);
1894 return type
== NECP_POLICY_CONDITION_REAL_APPLICATION
;
1898 necp_policy_condition_requires_real_application(u_int8_t
*buffer
, u_int32_t length
)
1900 u_int8_t type
= necp_policy_condition_get_type_from_buffer(buffer
, length
);
1901 return type
== NECP_POLICY_CONDITION_ENTITLEMENT
;
1905 necp_policy_condition_is_valid(u_int8_t
*buffer
, u_int32_t length
, u_int8_t policy_result_type
)
1907 bool validated
= FALSE
;
1908 bool result_cannot_have_ip_layer
= (policy_result_type
== NECP_POLICY_RESULT_SOCKET_DIVERT
||
1909 policy_result_type
== NECP_POLICY_RESULT_SOCKET_FILTER
||
1910 policy_result_type
== NECP_POLICY_RESULT_TRIGGER
||
1911 policy_result_type
== NECP_POLICY_RESULT_TRIGGER_IF_NEEDED
||
1912 policy_result_type
== NECP_POLICY_RESULT_TRIGGER_SCOPED
||
1913 policy_result_type
== NECP_POLICY_RESULT_NO_TRIGGER_SCOPED
||
1914 policy_result_type
== NECP_POLICY_RESULT_SOCKET_SCOPED
||
1915 policy_result_type
== NECP_POLICY_RESULT_ROUTE_RULES
||
1916 policy_result_type
== NECP_POLICY_RESULT_USE_NETAGENT
||
1917 policy_result_type
== NECP_POLICY_RESULT_NETAGENT_SCOPED
||
1918 policy_result_type
== NECP_POLICY_RESULT_SCOPED_DIRECT
||
1919 policy_result_type
== NECP_POLICY_RESULT_ALLOW_UNENTITLED
) ? TRUE
: FALSE
;
1920 u_int32_t condition_length
= necp_policy_condition_get_value_length_from_buffer(buffer
, length
);
1921 u_int8_t
*condition_value
= necp_policy_condition_get_value_pointer_from_buffer(buffer
, length
);
1922 u_int8_t type
= necp_policy_condition_get_type_from_buffer(buffer
, length
);
1923 u_int8_t flags
= necp_policy_condition_get_flags_from_buffer(buffer
, length
);
1925 case NECP_POLICY_CONDITION_APPLICATION
:
1926 case NECP_POLICY_CONDITION_REAL_APPLICATION
: {
1927 if (!(flags
& NECP_POLICY_CONDITION_FLAGS_NEGATIVE
) &&
1928 condition_length
>= sizeof(uuid_t
) &&
1929 condition_value
!= NULL
&&
1930 !uuid_is_null(condition_value
)) {
1935 case NECP_POLICY_CONDITION_DOMAIN
:
1936 case NECP_POLICY_CONDITION_ACCOUNT
:
1937 case NECP_POLICY_CONDITION_BOUND_INTERFACE
: {
1938 if (condition_length
> 0) {
1943 case NECP_POLICY_CONDITION_TRAFFIC_CLASS
: {
1944 if (condition_length
>= sizeof(struct necp_policy_condition_tc_range
)) {
1949 case NECP_POLICY_CONDITION_DEFAULT
:
1950 case NECP_POLICY_CONDITION_ALL_INTERFACES
:
1951 case NECP_POLICY_CONDITION_ENTITLEMENT
:
1952 case NECP_POLICY_CONDITION_PLATFORM_BINARY
:
1953 case NECP_POLICY_CONDITION_HAS_CLIENT
:
1954 case NECP_POLICY_CONDITION_LOCAL_NETWORKS
: {
1955 if (!(flags
& NECP_POLICY_CONDITION_FLAGS_NEGATIVE
)) {
1960 case NECP_POLICY_CONDITION_IP_PROTOCOL
: {
1961 if (condition_length
>= sizeof(u_int16_t
)) {
1966 case NECP_POLICY_CONDITION_PID
: {
1967 if (condition_length
>= sizeof(pid_t
) &&
1968 condition_value
!= NULL
&&
1969 *((pid_t
*)(void *)condition_value
) != 0) {
1974 case NECP_POLICY_CONDITION_UID
: {
1975 if (condition_length
>= sizeof(uid_t
)) {
1980 case NECP_POLICY_CONDITION_LOCAL_ADDR
:
1981 case NECP_POLICY_CONDITION_REMOTE_ADDR
: {
1982 if (!result_cannot_have_ip_layer
&& condition_length
>= sizeof(struct necp_policy_condition_addr
) &&
1983 necp_address_is_valid(&((struct necp_policy_condition_addr
*)(void *)condition_value
)->address
.sa
)) {
1988 case NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE
:
1989 case NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE
: {
1990 if (!result_cannot_have_ip_layer
&& condition_length
>= sizeof(struct necp_policy_condition_addr_range
) &&
1991 necp_address_is_valid(&((struct necp_policy_condition_addr_range
*)(void *)condition_value
)->start_address
.sa
) &&
1992 necp_address_is_valid(&((struct necp_policy_condition_addr_range
*)(void *)condition_value
)->end_address
.sa
)) {
1997 case NECP_POLICY_CONDITION_AGENT_TYPE
: {
1998 if (!(flags
& NECP_POLICY_CONDITION_FLAGS_NEGATIVE
) &&
1999 condition_length
>= sizeof(struct necp_policy_condition_agent_type
)) {
2004 case NECP_POLICY_CONDITION_FLOW_IP_PROTOCOL
: {
2005 if (condition_length
>= sizeof(u_int16_t
)) {
2010 case NECP_POLICY_CONDITION_FLOW_LOCAL_ADDR
:
2011 case NECP_POLICY_CONDITION_FLOW_REMOTE_ADDR
: {
2012 if (condition_length
>= sizeof(struct necp_policy_condition_addr
) &&
2013 necp_address_is_valid(&((struct necp_policy_condition_addr
*)(void *)condition_value
)->address
.sa
)) {
2018 case NECP_POLICY_CONDITION_FLOW_LOCAL_ADDR_RANGE
:
2019 case NECP_POLICY_CONDITION_FLOW_REMOTE_ADDR_RANGE
: {
2020 if (condition_length
>= sizeof(struct necp_policy_condition_addr_range
) &&
2021 necp_address_is_valid(&((struct necp_policy_condition_addr_range
*)(void *)condition_value
)->start_address
.sa
) &&
2022 necp_address_is_valid(&((struct necp_policy_condition_addr_range
*)(void *)condition_value
)->end_address
.sa
)) {
2027 case NECP_POLICY_CONDITION_CLIENT_FLAGS
: {
2028 if (condition_length
== 0 || condition_length
>= sizeof(u_int32_t
)) {
2033 case NECP_POLICY_CONDITION_FLOW_LOCAL_ADDR_EMPTY
: {
2037 case NECP_POLICY_CONDITION_FLOW_REMOTE_ADDR_EMPTY
: {
2048 NECPLOG(LOG_DEBUG
, "Policy condition type %d, valid %d", type
, validated
);
2055 necp_policy_route_rule_is_default(u_int8_t
*buffer
, u_int32_t length
)
2057 return necp_policy_condition_get_value_length_from_buffer(buffer
, length
) == 0 &&
2058 necp_policy_condition_get_flags_from_buffer(buffer
, length
) == 0;
2062 necp_policy_route_rule_is_valid(u_int8_t
*buffer
, u_int32_t length
)
2064 bool validated
= FALSE
;
2065 u_int8_t type
= necp_policy_condition_get_type_from_buffer(buffer
, length
);
2067 case NECP_ROUTE_RULE_ALLOW_INTERFACE
: {
2071 case NECP_ROUTE_RULE_DENY_INTERFACE
: {
2075 case NECP_ROUTE_RULE_QOS_MARKING
: {
2079 case NECP_ROUTE_RULE_DENY_LQM_ABORT
: {
2090 NECPLOG(LOG_DEBUG
, "Policy route rule type %d, valid %d", type
, validated
);
2097 necp_get_posix_error_for_necp_error(int response_error
)
2099 switch (response_error
) {
2100 case NECP_ERROR_UNKNOWN_PACKET_TYPE
:
2101 case NECP_ERROR_INVALID_TLV
:
2102 case NECP_ERROR_POLICY_RESULT_INVALID
:
2103 case NECP_ERROR_POLICY_CONDITIONS_INVALID
:
2104 case NECP_ERROR_ROUTE_RULES_INVALID
: {
2107 case NECP_ERROR_POLICY_ID_NOT_FOUND
: {
2110 case NECP_ERROR_INVALID_PROCESS
: {
2113 case NECP_ERROR_INTERNAL
:
2120 static necp_policy_id
2121 necp_handle_policy_add(struct necp_session
*session
,
2122 u_int8_t
*tlv_buffer
, size_t tlv_buffer_length
, int offset
, int *return_error
)
2124 bool has_default_condition
= FALSE
;
2125 bool has_non_default_condition
= FALSE
;
2126 bool has_application_condition
= FALSE
;
2127 bool has_real_application_condition
= FALSE
;
2128 bool requires_application_condition
= FALSE
;
2129 bool requires_real_application_condition
= FALSE
;
2130 u_int8_t
*conditions_array
= NULL
;
2131 u_int32_t conditions_array_size
= 0;
2132 int conditions_array_cursor
;
2134 bool has_default_route_rule
= FALSE
;
2135 u_int8_t
*route_rules_array
= NULL
;
2136 u_int32_t route_rules_array_size
= 0;
2137 int route_rules_array_cursor
;
2141 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
2143 necp_policy_order order
= 0;
2144 struct necp_session_policy
*policy
= NULL
;
2145 u_int8_t
*policy_result
= NULL
;
2146 u_int32_t policy_result_size
= 0;
2148 // Read policy order
2149 error
= necp_get_tlv(tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_POLICY_ORDER
, sizeof(order
), &order
, NULL
);
2151 NECPLOG(LOG_ERR
, "Failed to get policy order: %d", error
);
2152 response_error
= NECP_ERROR_INVALID_TLV
;
2156 // Read policy result
2157 cursor
= necp_find_tlv(tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_POLICY_RESULT
, &error
, 0);
2158 if (error
|| cursor
< 0) {
2159 NECPLOG(LOG_ERR
, "Failed to find policy result TLV: %d", error
);
2160 response_error
= NECP_ERROR_INVALID_TLV
;
2163 error
= necp_get_tlv_at_offset(tlv_buffer
, tlv_buffer_length
, cursor
, 0, NULL
, &policy_result_size
);
2164 if (error
|| policy_result_size
== 0) {
2165 NECPLOG(LOG_ERR
, "Failed to get policy result length: %d", error
);
2166 response_error
= NECP_ERROR_INVALID_TLV
;
2169 if (policy_result_size
> NECP_MAX_POLICY_RESULT_SIZE
) {
2170 NECPLOG(LOG_ERR
, "Policy result length too large: %u", policy_result_size
);
2171 response_error
= NECP_ERROR_INVALID_TLV
;
2174 MALLOC(policy_result
, u_int8_t
*, policy_result_size
, M_NECP
, M_WAITOK
);
2175 if (policy_result
== NULL
) {
2176 NECPLOG(LOG_ERR
, "Failed to allocate a policy result buffer (size %d)", policy_result_size
);
2177 response_error
= NECP_ERROR_INTERNAL
;
2180 error
= necp_get_tlv_at_offset(tlv_buffer
, tlv_buffer_length
, cursor
, policy_result_size
, policy_result
, NULL
);
2182 NECPLOG(LOG_ERR
, "Failed to get policy result: %d", error
);
2183 response_error
= NECP_ERROR_POLICY_RESULT_INVALID
;
2186 if (!necp_policy_result_is_valid(policy_result
, policy_result_size
)) {
2187 NECPLOG0(LOG_ERR
, "Failed to validate policy result");
2188 response_error
= NECP_ERROR_POLICY_RESULT_INVALID
;
2192 if (necp_policy_result_requires_route_rules(policy_result
, policy_result_size
)) {
2193 // Read route rules conditions
2194 for (cursor
= necp_find_tlv(tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_ROUTE_RULE
, &error
, 0);
2196 cursor
= necp_find_tlv(tlv_buffer
, tlv_buffer_length
, cursor
, NECP_TLV_ROUTE_RULE
, &error
, 1)) {
2197 u_int32_t route_rule_size
= 0;
2198 necp_get_tlv_at_offset(tlv_buffer
, tlv_buffer_length
, cursor
, 0, NULL
, &route_rule_size
);
2199 if (os_add_overflow(route_rules_array_size
,
2200 (sizeof(u_int8_t
) + sizeof(u_int32_t
) + route_rule_size
),
2201 &route_rules_array_size
)) {
2202 NECPLOG0(LOG_ERR
, "Route rules size overflowed, too large");
2203 response_error
= NECP_ERROR_INVALID_TLV
;
2208 if (route_rules_array_size
== 0) {
2209 NECPLOG0(LOG_ERR
, "Failed to get policy route rules");
2210 response_error
= NECP_ERROR_INVALID_TLV
;
2213 if (route_rules_array_size
> NECP_MAX_ROUTE_RULES_ARRAY_SIZE
) {
2214 NECPLOG(LOG_ERR
, "Route rules length too large: %u", route_rules_array_size
);
2215 response_error
= NECP_ERROR_INVALID_TLV
;
2218 MALLOC(route_rules_array
, u_int8_t
*, route_rules_array_size
, M_NECP
, M_WAITOK
);
2219 if (route_rules_array
== NULL
) {
2220 NECPLOG(LOG_ERR
, "Failed to allocate a policy route rules array (size %d)", route_rules_array_size
);
2221 response_error
= NECP_ERROR_INTERNAL
;
2225 route_rules_array_cursor
= 0;
2226 for (cursor
= necp_find_tlv(tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_ROUTE_RULE
, &error
, 0);
2228 cursor
= necp_find_tlv(tlv_buffer
, tlv_buffer_length
, cursor
, NECP_TLV_ROUTE_RULE
, &error
, 1)) {
2229 u_int8_t route_rule_type
= NECP_TLV_ROUTE_RULE
;
2230 u_int32_t route_rule_size
= 0;
2231 necp_get_tlv_at_offset(tlv_buffer
, tlv_buffer_length
, cursor
, 0, NULL
, &route_rule_size
);
2232 if (route_rule_size
> 0 &&
2233 (sizeof(route_rule_type
) + sizeof(route_rule_size
) + route_rule_size
) <= (route_rules_array_size
- route_rules_array_cursor
)) {
2235 memcpy((route_rules_array
+ route_rules_array_cursor
), &route_rule_type
, sizeof(route_rule_type
));
2236 route_rules_array_cursor
+= sizeof(route_rule_type
);
2239 memcpy((route_rules_array
+ route_rules_array_cursor
), &route_rule_size
, sizeof(route_rule_size
));
2240 route_rules_array_cursor
+= sizeof(route_rule_size
);
2243 necp_get_tlv_at_offset(tlv_buffer
, tlv_buffer_length
, cursor
, route_rule_size
, (route_rules_array
+ route_rules_array_cursor
), NULL
);
2245 if (!necp_policy_route_rule_is_valid((route_rules_array
+ route_rules_array_cursor
), route_rule_size
)) {
2246 NECPLOG0(LOG_ERR
, "Failed to validate policy route rule");
2247 response_error
= NECP_ERROR_ROUTE_RULES_INVALID
;
2251 if (necp_policy_route_rule_is_default((route_rules_array
+ route_rules_array_cursor
), route_rule_size
)) {
2252 if (has_default_route_rule
) {
2253 NECPLOG0(LOG_ERR
, "Failed to validate route rule; contained multiple default route rules");
2254 response_error
= NECP_ERROR_ROUTE_RULES_INVALID
;
2257 has_default_route_rule
= TRUE
;
2260 route_rules_array_cursor
+= route_rule_size
;
2265 // Read policy conditions
2266 for (cursor
= necp_find_tlv(tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_POLICY_CONDITION
, &error
, 0);
2268 cursor
= necp_find_tlv(tlv_buffer
, tlv_buffer_length
, cursor
, NECP_TLV_POLICY_CONDITION
, &error
, 1)) {
2269 u_int32_t condition_size
= 0;
2270 necp_get_tlv_at_offset(tlv_buffer
, tlv_buffer_length
, cursor
, 0, NULL
, &condition_size
);
2272 if (condition_size
> 0) {
2273 if (os_add_overflow(conditions_array_size
,
2274 (sizeof(u_int8_t
) + sizeof(u_int32_t
) + condition_size
),
2275 &conditions_array_size
)) {
2276 NECPLOG0(LOG_ERR
, "Conditions size overflowed, too large");
2277 response_error
= NECP_ERROR_INVALID_TLV
;
2283 if (conditions_array_size
== 0) {
2284 NECPLOG0(LOG_ERR
, "Failed to get policy conditions");
2285 response_error
= NECP_ERROR_INVALID_TLV
;
2288 if (conditions_array_size
> NECP_MAX_CONDITIONS_ARRAY_SIZE
) {
2289 NECPLOG(LOG_ERR
, "Conditions length too large: %u", conditions_array_size
);
2290 response_error
= NECP_ERROR_INVALID_TLV
;
2293 MALLOC(conditions_array
, u_int8_t
*, conditions_array_size
, M_NECP
, M_WAITOK
);
2294 if (conditions_array
== NULL
) {
2295 NECPLOG(LOG_ERR
, "Failed to allocate a policy conditions array (size %d)", conditions_array_size
);
2296 response_error
= NECP_ERROR_INTERNAL
;
2300 conditions_array_cursor
= 0;
2301 for (cursor
= necp_find_tlv(tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_POLICY_CONDITION
, &error
, 0);
2303 cursor
= necp_find_tlv(tlv_buffer
, tlv_buffer_length
, cursor
, NECP_TLV_POLICY_CONDITION
, &error
, 1)) {
2304 u_int8_t condition_type
= NECP_TLV_POLICY_CONDITION
;
2305 u_int32_t condition_size
= 0;
2306 necp_get_tlv_at_offset(tlv_buffer
, tlv_buffer_length
, cursor
, 0, NULL
, &condition_size
);
2307 if (condition_size
> 0 &&
2308 (sizeof(condition_type
) + sizeof(condition_size
) + condition_size
) <= (conditions_array_size
- conditions_array_cursor
)) {
2310 memcpy((conditions_array
+ conditions_array_cursor
), &condition_type
, sizeof(condition_type
));
2311 conditions_array_cursor
+= sizeof(condition_type
);
2314 memcpy((conditions_array
+ conditions_array_cursor
), &condition_size
, sizeof(condition_size
));
2315 conditions_array_cursor
+= sizeof(condition_size
);
2318 necp_get_tlv_at_offset(tlv_buffer
, tlv_buffer_length
, cursor
, condition_size
, (conditions_array
+ conditions_array_cursor
), NULL
);
2319 if (!necp_policy_condition_is_valid((conditions_array
+ conditions_array_cursor
), condition_size
, necp_policy_result_get_type_from_buffer(policy_result
, policy_result_size
))) {
2320 NECPLOG0(LOG_ERR
, "Failed to validate policy condition");
2321 response_error
= NECP_ERROR_POLICY_CONDITIONS_INVALID
;
2325 if (necp_policy_condition_is_default((conditions_array
+ conditions_array_cursor
), condition_size
)) {
2326 has_default_condition
= TRUE
;
2328 has_non_default_condition
= TRUE
;
2330 if (has_default_condition
&& has_non_default_condition
) {
2331 NECPLOG0(LOG_ERR
, "Failed to validate conditions; contained default and non-default conditions");
2332 response_error
= NECP_ERROR_POLICY_CONDITIONS_INVALID
;
2336 if (necp_policy_condition_is_application((conditions_array
+ conditions_array_cursor
), condition_size
)) {
2337 has_application_condition
= TRUE
;
2340 if (necp_policy_condition_is_real_application((conditions_array
+ conditions_array_cursor
), condition_size
)) {
2341 has_real_application_condition
= TRUE
;
2344 if (necp_policy_condition_requires_application((conditions_array
+ conditions_array_cursor
), condition_size
)) {
2345 requires_application_condition
= TRUE
;
2348 if (necp_policy_condition_requires_real_application((conditions_array
+ conditions_array_cursor
), condition_size
)) {
2349 requires_real_application_condition
= TRUE
;
2352 conditions_array_cursor
+= condition_size
;
2356 if (requires_application_condition
&& !has_application_condition
) {
2357 NECPLOG0(LOG_ERR
, "Failed to validate conditions; did not contain application condition");
2358 response_error
= NECP_ERROR_POLICY_CONDITIONS_INVALID
;
2362 if (requires_real_application_condition
&& !has_real_application_condition
) {
2363 NECPLOG0(LOG_ERR
, "Failed to validate conditions; did not contain real application condition");
2364 response_error
= NECP_ERROR_POLICY_CONDITIONS_INVALID
;
2368 if ((policy
= necp_policy_create(session
, order
, conditions_array
, conditions_array_size
, route_rules_array
, route_rules_array_size
, policy_result
, policy_result_size
)) == NULL
) {
2369 response_error
= NECP_ERROR_INTERNAL
;
2373 return policy
->local_id
;
2376 if (policy_result
!= NULL
) {
2377 FREE(policy_result
, M_NECP
);
2379 if (conditions_array
!= NULL
) {
2380 FREE(conditions_array
, M_NECP
);
2382 if (route_rules_array
!= NULL
) {
2383 FREE(route_rules_array
, M_NECP
);
2386 if (return_error
!= NULL
) {
2387 *return_error
= necp_get_posix_error_for_necp_error(response_error
);
2392 static necp_policy_id
2393 necp_policy_get_new_id(struct necp_session
*session
)
2395 session
->last_policy_id
++;
2396 if (session
->last_policy_id
< 1) {
2397 session
->last_policy_id
= 1;
2400 necp_policy_id newid
= session
->last_policy_id
;
2403 NECPLOG0(LOG_ERR
, "Allocate policy id failed.\n");
2411 * For the policy dump response this is the structure:
2413 * <NECP_PACKET_HEADER>
2415 * type : NECP_TLV_POLICY_DUMP
2420 * type : NECP_TLV_POLICY_ID
2425 * type : NECP_TLV_POLICY_ORDER
2430 * type : NECP_TLV_POLICY_RESULT_STRING
2435 * type : NECP_TLV_POLICY_OWNER
2440 * type : NECP_TLV_POLICY_CONDITION
2445 * type : NECP_POLICY_CONDITION_ALL_INTERFACES
2450 * type : NECP_POLICY_CONDITION_BOUND_INTERFACES
2460 * type : NECP_TLV_POLICY_DUMP
2465 * type : NECP_TLV_POLICY_ID
2470 * type : NECP_TLV_POLICY_ORDER
2475 * type : NECP_TLV_POLICY_RESULT_STRING
2480 * type : NECP_TLV_POLICY_OWNER
2485 * type : NECP_TLV_POLICY_CONDITION
2490 * type : NECP_POLICY_CONDITION_ALL_INTERFACES
2495 * type : NECP_POLICY_CONDITION_BOUND_INTERFACES
2507 necp_handle_policy_dump_all(user_addr_t out_buffer
, size_t out_buffer_length
)
2509 struct necp_kernel_socket_policy
*policy
= NULL
;
2511 int policy_count
= 0;
2512 u_int8_t
**tlv_buffer_pointers
= NULL
;
2513 u_int32_t
*tlv_buffer_lengths
= NULL
;
2514 u_int32_t total_tlv_len
= 0;
2515 u_int8_t
*result_buf
= NULL
;
2516 u_int8_t
*result_buf_cursor
= result_buf
;
2517 char result_string
[MAX_RESULT_STRING_LEN
];
2518 char proc_name_string
[MAXCOMLEN
+ 1];
2521 bool error_occured
= false;
2522 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
2524 #define REPORT_ERROR(error) error_occured = true; \
2525 response_error = error; \
2528 #define UNLOCK_AND_REPORT_ERROR(lock, error) lck_rw_done(lock); \
2531 errno_t cred_result
= priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES
, 0);
2532 if (cred_result
!= 0) {
2533 NECPLOG0(LOG_ERR
, "Session does not hold the necessary entitlement to get Network Extension Policy information");
2534 REPORT_ERROR(NECP_ERROR_INTERNAL
);
2538 lck_rw_lock_shared(&necp_kernel_policy_lock
);
2541 NECPLOG0(LOG_DEBUG
, "Gathering policies");
2544 policy_count
= necp_kernel_application_policies_count
;
2546 MALLOC(tlv_buffer_pointers
, u_int8_t
* *, sizeof(u_int8_t
*) * policy_count
, M_NECP
, M_NOWAIT
| M_ZERO
);
2547 if (tlv_buffer_pointers
== NULL
) {
2548 NECPLOG(LOG_DEBUG
, "Failed to allocate tlv_buffer_pointers (%u bytes)", sizeof(u_int8_t
*) * policy_count
);
2549 UNLOCK_AND_REPORT_ERROR(&necp_kernel_policy_lock
, NECP_ERROR_INTERNAL
);
2552 MALLOC(tlv_buffer_lengths
, u_int32_t
*, sizeof(u_int32_t
) * policy_count
, M_NECP
, M_NOWAIT
| M_ZERO
);
2553 if (tlv_buffer_lengths
== NULL
) {
2554 NECPLOG(LOG_DEBUG
, "Failed to allocate tlv_buffer_lengths (%u bytes)", sizeof(u_int32_t
) * policy_count
);
2555 UNLOCK_AND_REPORT_ERROR(&necp_kernel_policy_lock
, NECP_ERROR_INTERNAL
);
2558 for (policy_i
= 0; necp_kernel_socket_policies_app_layer_map
!= NULL
&& necp_kernel_socket_policies_app_layer_map
[policy_i
] != NULL
; policy_i
++) {
2559 policy
= necp_kernel_socket_policies_app_layer_map
[policy_i
];
2561 memset(result_string
, 0, MAX_RESULT_STRING_LEN
);
2562 memset(proc_name_string
, 0, MAXCOMLEN
+ 1);
2564 necp_get_result_description(result_string
, policy
->result
, policy
->result_parameter
);
2565 proc_name(policy
->session_pid
, proc_name_string
, MAXCOMLEN
);
2567 u_int16_t proc_name_len
= strlen(proc_name_string
) + 1;
2568 u_int16_t result_string_len
= strlen(result_string
) + 1;
2571 NECPLOG(LOG_DEBUG
, "Policy: process: %s, result: %s", proc_name_string
, result_string
);
2574 u_int32_t total_allocated_bytes
= sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(policy
->id
) + // NECP_TLV_POLICY_ID
2575 sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(policy
->order
) + // NECP_TLV_POLICY_ORDER
2576 sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(policy
->session_order
) + // NECP_TLV_POLICY_SESSION_ORDER
2577 sizeof(u_int8_t
) + sizeof(u_int32_t
) + result_string_len
+ // NECP_TLV_POLICY_RESULT_STRING
2578 sizeof(u_int8_t
) + sizeof(u_int32_t
) + proc_name_len
+ // NECP_TLV_POLICY_OWNER
2579 sizeof(u_int8_t
) + sizeof(u_int32_t
); // NECP_TLV_POLICY_CONDITION
2581 // We now traverse the condition_mask to see how much space we need to allocate
2582 u_int32_t condition_mask
= policy
->condition_mask
;
2583 u_int8_t num_conditions
= 0;
2584 struct necp_string_id_mapping
*account_id_entry
= NULL
;
2585 char if_name
[IFXNAMSIZ
];
2586 u_int32_t condition_tlv_length
= 0;
2587 memset(if_name
, 0, sizeof(if_name
));
2589 if (condition_mask
== NECP_POLICY_CONDITION_DEFAULT
) {
2592 if (condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) {
2595 if (condition_mask
& NECP_KERNEL_CONDITION_HAS_CLIENT
) {
2598 if (condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
2599 snprintf(if_name
, IFXNAMSIZ
, "%s%d", ifnet_name(policy
->cond_bound_interface
), ifnet_unit(policy
->cond_bound_interface
));
2600 condition_tlv_length
+= strlen(if_name
) + 1;
2603 if (condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
2604 condition_tlv_length
+= sizeof(policy
->cond_protocol
);
2607 if (condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
2608 condition_tlv_length
+= sizeof(uuid_t
);
2611 if (condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
2612 condition_tlv_length
+= sizeof(uuid_t
);
2615 if (condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
2616 u_int32_t domain_len
= strlen(policy
->cond_domain
) + 1;
2617 condition_tlv_length
+= domain_len
;
2620 if (condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
) {
2621 account_id_entry
= necp_lookup_string_with_id_locked(&necp_account_id_list
, policy
->cond_account_id
);
2622 u_int32_t account_id_len
= 0;
2623 if (account_id_entry
) {
2624 account_id_len
= account_id_entry
->string
? strlen(account_id_entry
->string
) + 1 : 0;
2626 condition_tlv_length
+= account_id_len
;
2629 if (condition_mask
& NECP_KERNEL_CONDITION_PID
) {
2630 condition_tlv_length
+= sizeof(pid_t
);
2633 if (condition_mask
& NECP_KERNEL_CONDITION_UID
) {
2634 condition_tlv_length
+= sizeof(uid_t
);
2637 if (condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
2638 condition_tlv_length
+= sizeof(struct necp_policy_condition_tc_range
);
2641 if (condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
) {
2644 if (condition_mask
& NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
) {
2645 u_int32_t entitlement_len
= strlen(policy
->cond_custom_entitlement
) + 1;
2646 condition_tlv_length
+= entitlement_len
;
2649 if (condition_mask
& NECP_KERNEL_CONDITION_PLATFORM_BINARY
) {
2652 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_NETWORKS
) {
2655 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
2656 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
2657 condition_tlv_length
+= sizeof(struct necp_policy_condition_addr_range
);
2659 condition_tlv_length
+= sizeof(struct necp_policy_condition_addr
);
2663 if (condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
2664 if (condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
2665 condition_tlv_length
+= sizeof(struct necp_policy_condition_addr_range
);
2667 condition_tlv_length
+= sizeof(struct necp_policy_condition_addr
);
2671 if (condition_mask
& NECP_KERNEL_CONDITION_AGENT_TYPE
) {
2672 condition_tlv_length
+= sizeof(struct necp_policy_condition_agent_type
);
2675 if (condition_mask
& NECP_KERNEL_CONDITION_CLIENT_FLAGS
) {
2676 condition_tlv_length
+= sizeof(u_int32_t
);
2679 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_EMPTY
) {
2682 if (condition_mask
& NECP_KERNEL_CONDITION_REMOTE_EMPTY
) {
2687 condition_tlv_length
+= num_conditions
* (sizeof(u_int8_t
) + sizeof(u_int32_t
)); // These are for the condition TLVs. The space for "value" is already accounted for above.
2688 total_allocated_bytes
+= condition_tlv_length
;
2690 u_int8_t
*tlv_buffer
;
2691 MALLOC(tlv_buffer
, u_int8_t
*, total_allocated_bytes
, M_NECP
, M_NOWAIT
| M_ZERO
);
2692 if (tlv_buffer
== NULL
) {
2693 NECPLOG(LOG_DEBUG
, "Failed to allocate tlv_buffer (%u bytes)", total_allocated_bytes
);
2697 u_int8_t
*cursor
= tlv_buffer
;
2698 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ID
, sizeof(policy
->id
), &policy
->id
, tlv_buffer
, total_allocated_bytes
);
2699 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ORDER
, sizeof(necp_policy_order
), &policy
->order
, tlv_buffer
, total_allocated_bytes
);
2700 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_SESSION_ORDER
, sizeof(policy
->session_order
), &policy
->session_order
, tlv_buffer
, total_allocated_bytes
);
2701 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_RESULT_STRING
, result_string_len
, result_string
, tlv_buffer
, total_allocated_bytes
);
2702 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_OWNER
, proc_name_len
, proc_name_string
, tlv_buffer
, total_allocated_bytes
);
2705 u_int8_t q_cond_buf
[N_QUICK
]; // Minor optimization
2707 u_int8_t
*cond_buf
; // To be used for condition TLVs
2708 if (condition_tlv_length
<= N_QUICK
) {
2709 cond_buf
= q_cond_buf
;
2711 MALLOC(cond_buf
, u_int8_t
*, condition_tlv_length
, M_NECP
, M_NOWAIT
);
2712 if (cond_buf
== NULL
) {
2713 NECPLOG(LOG_DEBUG
, "Failed to allocate cond_buffer (%u bytes)", condition_tlv_length
);
2714 FREE(tlv_buffer
, M_NECP
);
2719 memset(cond_buf
, 0, condition_tlv_length
);
2720 u_int8_t
*cond_buf_cursor
= cond_buf
;
2721 if (condition_mask
== NECP_POLICY_CONDITION_DEFAULT
) {
2722 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_DEFAULT
, 0, "", cond_buf
, condition_tlv_length
);
2724 if (condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) {
2725 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_ALL_INTERFACES
, 0, "", cond_buf
, condition_tlv_length
);
2727 if (condition_mask
& NECP_KERNEL_CONDITION_HAS_CLIENT
) {
2728 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_HAS_CLIENT
, 0, "", cond_buf
, condition_tlv_length
);
2730 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_NETWORKS
) {
2731 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_LOCAL_NETWORKS
, 0, "", cond_buf
, condition_tlv_length
);
2733 if (condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
2734 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_BOUND_INTERFACE
, strlen(if_name
) + 1,
2735 if_name
, cond_buf
, condition_tlv_length
);
2737 if (condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
2738 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_IP_PROTOCOL
, sizeof(policy
->cond_protocol
), &policy
->cond_protocol
,
2739 cond_buf
, condition_tlv_length
);
2741 if (condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
2742 struct necp_uuid_id_mapping
*entry
= necp_uuid_lookup_uuid_with_app_id_locked(policy
->cond_app_id
);
2743 if (entry
!= NULL
) {
2744 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_APPLICATION
, sizeof(entry
->uuid
), entry
->uuid
,
2745 cond_buf
, condition_tlv_length
);
2748 if (condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
2749 struct necp_uuid_id_mapping
*entry
= necp_uuid_lookup_uuid_with_app_id_locked(policy
->cond_real_app_id
);
2750 if (entry
!= NULL
) {
2751 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_REAL_APPLICATION
, sizeof(entry
->uuid
), entry
->uuid
,
2752 cond_buf
, condition_tlv_length
);
2755 if (condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
2756 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_DOMAIN
, strlen(policy
->cond_domain
) + 1, policy
->cond_domain
,
2757 cond_buf
, condition_tlv_length
);
2759 if (condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
) {
2760 if (account_id_entry
!= NULL
) {
2761 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_ACCOUNT
, strlen(account_id_entry
->string
) + 1, account_id_entry
->string
,
2762 cond_buf
, condition_tlv_length
);
2765 if (condition_mask
& NECP_KERNEL_CONDITION_PID
) {
2766 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_PID
, sizeof(policy
->cond_pid
), &policy
->cond_pid
,
2767 cond_buf
, condition_tlv_length
);
2769 if (condition_mask
& NECP_KERNEL_CONDITION_UID
) {
2770 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_UID
, sizeof(policy
->cond_uid
), &policy
->cond_uid
,
2771 cond_buf
, condition_tlv_length
);
2773 if (condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
2774 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_TRAFFIC_CLASS
, sizeof(policy
->cond_traffic_class
), &policy
->cond_traffic_class
,
2775 cond_buf
, condition_tlv_length
);
2777 if (condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
) {
2778 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_ENTITLEMENT
, 0, "",
2779 cond_buf
, condition_tlv_length
);
2781 if (condition_mask
& NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
) {
2782 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_ENTITLEMENT
, strlen(policy
->cond_custom_entitlement
) + 1, policy
->cond_custom_entitlement
,
2783 cond_buf
, condition_tlv_length
);
2785 if (condition_mask
& NECP_KERNEL_CONDITION_PLATFORM_BINARY
) {
2786 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_PLATFORM_BINARY
, 0, "", cond_buf
, condition_tlv_length
);
2788 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
2789 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
2790 struct necp_policy_condition_addr_range range
;
2791 memcpy(&range
.start_address
, &policy
->cond_local_start
, sizeof(policy
->cond_local_start
));
2792 memcpy(&range
.end_address
, &policy
->cond_local_end
, sizeof(policy
->cond_local_end
));
2793 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE
, sizeof(range
), &range
,
2794 cond_buf
, condition_tlv_length
);
2796 struct necp_policy_condition_addr addr
;
2797 addr
.prefix
= policy
->cond_local_prefix
;
2798 memcpy(&addr
.address
, &policy
->cond_local_start
, sizeof(policy
->cond_local_start
));
2799 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_LOCAL_ADDR
, sizeof(addr
), &addr
,
2800 cond_buf
, condition_tlv_length
);
2803 if (condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
2804 if (condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
2805 struct necp_policy_condition_addr_range range
;
2806 memcpy(&range
.start_address
, &policy
->cond_remote_start
, sizeof(policy
->cond_remote_start
));
2807 memcpy(&range
.end_address
, &policy
->cond_remote_end
, sizeof(policy
->cond_remote_end
));
2808 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE
, sizeof(range
), &range
,
2809 cond_buf
, condition_tlv_length
);
2811 struct necp_policy_condition_addr addr
;
2812 addr
.prefix
= policy
->cond_remote_prefix
;
2813 memcpy(&addr
.address
, &policy
->cond_remote_start
, sizeof(policy
->cond_remote_start
));
2814 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_REMOTE_ADDR
, sizeof(addr
), &addr
,
2815 cond_buf
, condition_tlv_length
);
2818 if (condition_mask
& NECP_KERNEL_CONDITION_AGENT_TYPE
) {
2819 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_AGENT_TYPE
,
2820 sizeof(policy
->cond_agent_type
), &policy
->cond_agent_type
,
2821 cond_buf
, condition_tlv_length
);
2823 if (condition_mask
& NECP_KERNEL_CONDITION_CLIENT_FLAGS
) {
2824 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_CLIENT_FLAGS
, sizeof(policy
->cond_client_flags
), &policy
->cond_client_flags
, cond_buf
, condition_tlv_length
);
2826 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_EMPTY
) {
2827 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_FLOW_LOCAL_ADDR_EMPTY
, 0, "", cond_buf
, condition_tlv_length
);
2829 if (condition_mask
& NECP_KERNEL_CONDITION_REMOTE_EMPTY
) {
2830 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_FLOW_REMOTE_ADDR_EMPTY
, 0, "", cond_buf
, condition_tlv_length
);
2834 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_CONDITION
, cond_buf_cursor
- cond_buf
, cond_buf
, tlv_buffer
, total_allocated_bytes
);
2835 if (cond_buf
!= q_cond_buf
) {
2836 FREE(cond_buf
, M_NECP
);
2839 tlv_buffer_pointers
[policy_i
] = tlv_buffer
;
2840 tlv_buffer_lengths
[policy_i
] = (cursor
- tlv_buffer
);
2842 // This is the length of the TLV for NECP_TLV_POLICY_DUMP
2843 total_tlv_len
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + (cursor
- tlv_buffer
);
2847 lck_rw_done(&necp_kernel_policy_lock
);
2850 if (out_buffer
!= 0) {
2851 if (out_buffer_length
< total_tlv_len
+ sizeof(u_int32_t
)) {
2852 NECPLOG(LOG_DEBUG
, "out_buffer_length too small (%u < %u)", out_buffer_length
, total_tlv_len
+ sizeof(u_int32_t
));
2853 REPORT_ERROR(NECP_ERROR_INVALID_TLV
);
2856 // Allow malloc to wait, since the total buffer may be large and we are not holding any locks
2857 MALLOC(result_buf
, u_int8_t
*, total_tlv_len
+ sizeof(u_int32_t
), M_NECP
, M_WAITOK
| M_ZERO
);
2858 if (result_buf
== NULL
) {
2859 NECPLOG(LOG_DEBUG
, "Failed to allocate result_buffer (%u bytes)", total_tlv_len
+ sizeof(u_int32_t
));
2860 REPORT_ERROR(NECP_ERROR_INTERNAL
);
2863 // Add four bytes for total length at the start
2864 memcpy(result_buf
, &total_tlv_len
, sizeof(u_int32_t
));
2867 result_buf_cursor
= result_buf
+ sizeof(u_int32_t
);
2868 for (int i
= 0; i
< policy_count
; i
++) {
2869 if (tlv_buffer_pointers
[i
] != NULL
) {
2870 result_buf_cursor
= necp_buffer_write_tlv(result_buf_cursor
, NECP_TLV_POLICY_DUMP
, tlv_buffer_lengths
[i
], tlv_buffer_pointers
[i
],
2871 result_buf
, total_tlv_len
+ sizeof(u_int32_t
));
2875 int copy_error
= copyout(result_buf
, out_buffer
, total_tlv_len
+ sizeof(u_int32_t
));
2877 NECPLOG(LOG_DEBUG
, "Failed to copy out result_buffer (%u bytes)", total_tlv_len
+ sizeof(u_int32_t
));
2878 REPORT_ERROR(NECP_ERROR_INTERNAL
);
2884 if (error_occured
) {
2885 error_code
= necp_get_posix_error_for_necp_error(response_error
);
2888 if (result_buf
!= NULL
) {
2889 FREE(result_buf
, M_NECP
);
2892 if (tlv_buffer_pointers
!= NULL
) {
2893 for (int i
= 0; i
< policy_count
; i
++) {
2894 if (tlv_buffer_pointers
[i
] != NULL
) {
2895 FREE(tlv_buffer_pointers
[i
], M_NECP
);
2896 tlv_buffer_pointers
[i
] = NULL
;
2899 FREE(tlv_buffer_pointers
, M_NECP
);
2902 if (tlv_buffer_lengths
!= NULL
) {
2903 FREE(tlv_buffer_lengths
, M_NECP
);
2906 #undef RESET_COND_BUF
2908 #undef UNLOCK_AND_REPORT_ERROR
2913 static struct necp_session_policy
*
2914 necp_policy_create(struct necp_session
*session
, necp_policy_order order
, u_int8_t
*conditions_array
, u_int32_t conditions_array_size
, u_int8_t
*route_rules_array
, u_int32_t route_rules_array_size
, u_int8_t
*result
, u_int32_t result_size
)
2916 struct necp_session_policy
*new_policy
= NULL
;
2917 struct necp_session_policy
*tmp_policy
= NULL
;
2919 if (session
== NULL
|| conditions_array
== NULL
|| result
== NULL
|| result_size
== 0) {
2923 MALLOC_ZONE(new_policy
, struct necp_session_policy
*, sizeof(*new_policy
), M_NECP_SESSION_POLICY
, M_WAITOK
);
2924 if (new_policy
== NULL
) {
2928 memset(new_policy
, 0, sizeof(*new_policy
)); // M_ZERO is not supported for MALLOC_ZONE
2929 new_policy
->applied
= FALSE
;
2930 new_policy
->pending_deletion
= FALSE
;
2931 new_policy
->pending_update
= FALSE
;
2932 new_policy
->order
= order
;
2933 new_policy
->conditions
= conditions_array
;
2934 new_policy
->conditions_size
= conditions_array_size
;
2935 new_policy
->route_rules
= route_rules_array
;
2936 new_policy
->route_rules_size
= route_rules_array_size
;
2937 new_policy
->result
= result
;
2938 new_policy
->result_size
= result_size
;
2939 new_policy
->local_id
= necp_policy_get_new_id(session
);
2941 LIST_INSERT_SORTED_ASCENDING(&session
->policies
, new_policy
, chain
, order
, tmp_policy
);
2943 session
->dirty
= TRUE
;
2946 NECPLOG(LOG_DEBUG
, "Created NECP policy, order %d", order
);
2952 static struct necp_session_policy
*
2953 necp_policy_find(struct necp_session
*session
, necp_policy_id policy_id
)
2955 struct necp_session_policy
*policy
= NULL
;
2956 if (policy_id
== 0) {
2960 LIST_FOREACH(policy
, &session
->policies
, chain
) {
2961 if (policy
->local_id
== policy_id
) {
2969 static inline u_int8_t
2970 necp_policy_get_result_type(struct necp_session_policy
*policy
)
2972 return policy
? necp_policy_result_get_type_from_buffer(policy
->result
, policy
->result_size
) : 0;
2975 static inline u_int32_t
2976 necp_policy_get_result_parameter_length(struct necp_session_policy
*policy
)
2978 return policy
? necp_policy_result_get_parameter_length_from_buffer(policy
->result
, policy
->result_size
) : 0;
2982 necp_policy_get_result_parameter(struct necp_session_policy
*policy
, u_int8_t
*parameter_buffer
, u_int32_t parameter_buffer_length
)
2985 u_int32_t parameter_length
= necp_policy_result_get_parameter_length_from_buffer(policy
->result
, policy
->result_size
);
2986 if (parameter_buffer_length
>= parameter_length
) {
2987 u_int8_t
*parameter
= necp_policy_result_get_parameter_pointer_from_buffer(policy
->result
, policy
->result_size
);
2988 if (parameter
&& parameter_buffer
) {
2989 memcpy(parameter_buffer
, parameter
, parameter_length
);
2999 necp_policy_mark_for_deletion(struct necp_session
*session
, struct necp_session_policy
*policy
)
3001 if (session
== NULL
|| policy
== NULL
) {
3005 policy
->pending_deletion
= TRUE
;
3006 session
->dirty
= TRUE
;
3009 NECPLOG0(LOG_DEBUG
, "Marked NECP policy for removal");
3015 necp_policy_mark_all_for_deletion(struct necp_session
*session
)
3017 struct necp_session_policy
*policy
= NULL
;
3018 struct necp_session_policy
*temp_policy
= NULL
;
3020 LIST_FOREACH_SAFE(policy
, &session
->policies
, chain
, temp_policy
) {
3021 necp_policy_mark_for_deletion(session
, policy
);
3028 necp_policy_delete(struct necp_session
*session
, struct necp_session_policy
*policy
)
3030 if (session
== NULL
|| policy
== NULL
) {
3034 LIST_REMOVE(policy
, chain
);
3036 if (policy
->result
) {
3037 FREE(policy
->result
, M_NECP
);
3038 policy
->result
= NULL
;
3041 if (policy
->conditions
) {
3042 FREE(policy
->conditions
, M_NECP
);
3043 policy
->conditions
= NULL
;
3046 if (policy
->route_rules
) {
3047 FREE(policy
->route_rules
, M_NECP
);
3048 policy
->route_rules
= NULL
;
3051 FREE_ZONE(policy
, sizeof(*policy
), M_NECP_SESSION_POLICY
);
3054 NECPLOG0(LOG_DEBUG
, "Removed NECP policy");
3060 necp_policy_unapply(struct necp_session_policy
*policy
)
3063 if (policy
== NULL
) {
3067 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
3069 // Release local uuid mappings
3070 if (!uuid_is_null(policy
->applied_app_uuid
)) {
3071 bool removed_mapping
= FALSE
;
3072 if (necp_remove_uuid_app_id_mapping(policy
->applied_app_uuid
, &removed_mapping
, TRUE
) && removed_mapping
) {
3073 necp_uuid_app_id_mappings_dirty
= TRUE
;
3074 necp_num_uuid_app_id_mappings
--;
3076 uuid_clear(policy
->applied_app_uuid
);
3078 if (!uuid_is_null(policy
->applied_real_app_uuid
)) {
3079 necp_remove_uuid_app_id_mapping(policy
->applied_real_app_uuid
, NULL
, FALSE
);
3080 uuid_clear(policy
->applied_real_app_uuid
);
3082 if (!uuid_is_null(policy
->applied_result_uuid
)) {
3083 necp_remove_uuid_service_id_mapping(policy
->applied_result_uuid
);
3084 uuid_clear(policy
->applied_result_uuid
);
3087 // Release string mappings
3088 if (policy
->applied_account
!= NULL
) {
3089 necp_remove_string_to_id_mapping(&necp_account_id_list
, policy
->applied_account
);
3090 FREE(policy
->applied_account
, M_NECP
);
3091 policy
->applied_account
= NULL
;
3094 // Release route rule
3095 if (policy
->applied_route_rules_id
!= 0) {
3096 necp_remove_route_rule(&necp_route_rules
, policy
->applied_route_rules_id
);
3097 policy
->applied_route_rules_id
= 0;
3100 // Remove socket policies
3101 for (i
= 0; i
< MAX_KERNEL_SOCKET_POLICIES
; i
++) {
3102 if (policy
->kernel_socket_policies
[i
] != 0) {
3103 necp_kernel_socket_policy_delete(policy
->kernel_socket_policies
[i
]);
3104 policy
->kernel_socket_policies
[i
] = 0;
3108 // Remove IP output policies
3109 for (i
= 0; i
< MAX_KERNEL_IP_OUTPUT_POLICIES
; i
++) {
3110 if (policy
->kernel_ip_output_policies
[i
] != 0) {
3111 necp_kernel_ip_output_policy_delete(policy
->kernel_ip_output_policies
[i
]);
3112 policy
->kernel_ip_output_policies
[i
] = 0;
3116 policy
->applied
= FALSE
;
3121 #define NECP_KERNEL_POLICY_SUBORDER_ID_TUNNEL_CONDITION 0
3122 #define NECP_KERNEL_POLICY_SUBORDER_NON_ID_TUNNEL_CONDITION 1
3123 #define NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION 2
3124 #define NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS 3
3125 struct necp_policy_result_ip_tunnel
{
3126 u_int32_t secondary_result
;
3127 char interface_name
[IFXNAMSIZ
];
3128 } __attribute__((__packed__
));
3130 struct necp_policy_result_service
{
3133 } __attribute__((__packed__
));
3136 necp_policy_apply(struct necp_session
*session
, struct necp_session_policy
*policy
)
3138 bool socket_only_conditions
= FALSE
;
3139 bool socket_ip_conditions
= FALSE
;
3141 bool socket_layer_non_id_conditions
= FALSE
;
3142 bool ip_output_layer_non_id_conditions
= FALSE
;
3143 bool ip_output_layer_non_id_only
= FALSE
;
3144 bool ip_output_layer_id_condition
= FALSE
;
3145 bool ip_output_layer_tunnel_condition_from_id
= FALSE
;
3146 bool ip_output_layer_tunnel_condition_from_non_id
= FALSE
;
3147 necp_kernel_policy_id cond_ip_output_layer_id
= NECP_KERNEL_POLICY_ID_NONE
;
3149 u_int32_t master_condition_mask
= 0;
3150 u_int32_t master_condition_negated_mask
= 0;
3151 ifnet_t cond_bound_interface
= NULL
;
3152 u_int32_t cond_account_id
= 0;
3153 char *cond_domain
= NULL
;
3154 char *cond_custom_entitlement
= NULL
;
3157 necp_app_id cond_app_id
= 0;
3158 necp_app_id cond_real_app_id
= 0;
3159 struct necp_policy_condition_tc_range cond_traffic_class
;
3160 cond_traffic_class
.start_tc
= 0;
3161 cond_traffic_class
.end_tc
= 0;
3162 u_int16_t cond_protocol
= 0;
3163 union necp_sockaddr_union cond_local_start
;
3164 union necp_sockaddr_union cond_local_end
;
3165 u_int8_t cond_local_prefix
= 0;
3166 union necp_sockaddr_union cond_remote_start
;
3167 union necp_sockaddr_union cond_remote_end
;
3168 u_int8_t cond_remote_prefix
= 0;
3169 u_int32_t cond_client_flags
= 0;
3170 u_int32_t offset
= 0;
3171 u_int8_t ultimate_result
= 0;
3172 u_int32_t secondary_result
= 0;
3173 struct necp_policy_condition_agent_type cond_agent_type
= {};
3174 necp_kernel_policy_result_parameter secondary_result_parameter
;
3175 memset(&secondary_result_parameter
, 0, sizeof(secondary_result_parameter
));
3176 u_int32_t cond_last_interface_index
= 0;
3177 necp_kernel_policy_result_parameter ultimate_result_parameter
;
3178 memset(&ultimate_result_parameter
, 0, sizeof(ultimate_result_parameter
));
3180 if (policy
== NULL
) {
3184 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
3186 // Process conditions
3187 while (offset
< policy
->conditions_size
) {
3188 u_int32_t length
= 0;
3189 u_int8_t
*value
= necp_buffer_get_tlv_value(policy
->conditions
, offset
, &length
);
3191 u_int8_t condition_type
= necp_policy_condition_get_type_from_buffer(value
, length
);
3192 u_int8_t condition_flags
= necp_policy_condition_get_flags_from_buffer(value
, length
);
3193 bool condition_is_negative
= condition_flags
& NECP_POLICY_CONDITION_FLAGS_NEGATIVE
;
3194 u_int32_t condition_length
= necp_policy_condition_get_value_length_from_buffer(value
, length
);
3195 u_int8_t
*condition_value
= necp_policy_condition_get_value_pointer_from_buffer(value
, length
);
3196 switch (condition_type
) {
3197 case NECP_POLICY_CONDITION_DEFAULT
: {
3198 socket_ip_conditions
= TRUE
;
3201 case NECP_POLICY_CONDITION_ALL_INTERFACES
: {
3202 master_condition_mask
|= NECP_KERNEL_CONDITION_ALL_INTERFACES
;
3203 socket_ip_conditions
= TRUE
;
3206 case NECP_POLICY_CONDITION_HAS_CLIENT
: {
3207 master_condition_mask
|= NECP_KERNEL_CONDITION_HAS_CLIENT
;
3208 socket_only_conditions
= TRUE
;
3211 case NECP_POLICY_CONDITION_ENTITLEMENT
: {
3212 if (condition_length
> 0) {
3213 if (cond_custom_entitlement
== NULL
) {
3214 cond_custom_entitlement
= necp_copy_string((char *)condition_value
, condition_length
);
3215 if (cond_custom_entitlement
!= NULL
) {
3216 master_condition_mask
|= NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
;
3217 socket_only_conditions
= TRUE
;
3221 master_condition_mask
|= NECP_KERNEL_CONDITION_ENTITLEMENT
;
3222 socket_only_conditions
= TRUE
;
3226 case NECP_POLICY_CONDITION_PLATFORM_BINARY
: {
3227 master_condition_mask
|= NECP_KERNEL_CONDITION_PLATFORM_BINARY
;
3228 socket_only_conditions
= TRUE
;
3231 case NECP_POLICY_CONDITION_DOMAIN
: {
3232 // Make sure there is only one such rule
3233 if (condition_length
> 0 && cond_domain
== NULL
) {
3234 cond_domain
= necp_create_trimmed_domain((char *)condition_value
, condition_length
);
3235 if (cond_domain
!= NULL
) {
3236 master_condition_mask
|= NECP_KERNEL_CONDITION_DOMAIN
;
3237 if (condition_is_negative
) {
3238 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_DOMAIN
;
3240 socket_only_conditions
= TRUE
;
3245 case NECP_POLICY_CONDITION_ACCOUNT
: {
3246 // Make sure there is only one such rule
3247 if (condition_length
> 0 && cond_account_id
== 0 && policy
->applied_account
== NULL
) {
3248 char *string
= NULL
;
3249 MALLOC(string
, char *, condition_length
+ 1, M_NECP
, M_WAITOK
);
3250 if (string
!= NULL
) {
3251 memcpy(string
, condition_value
, condition_length
);
3252 string
[condition_length
] = 0;
3253 cond_account_id
= necp_create_string_to_id_mapping(&necp_account_id_list
, string
);
3254 if (cond_account_id
!= 0) {
3255 policy
->applied_account
= string
; // Save the string in parent policy
3256 master_condition_mask
|= NECP_KERNEL_CONDITION_ACCOUNT_ID
;
3257 if (condition_is_negative
) {
3258 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_ACCOUNT_ID
;
3260 socket_only_conditions
= TRUE
;
3262 FREE(string
, M_NECP
);
3268 case NECP_POLICY_CONDITION_APPLICATION
: {
3269 // Make sure there is only one such rule, because we save the uuid in the policy
3270 if (condition_length
>= sizeof(uuid_t
) && cond_app_id
== 0) {
3271 bool allocated_mapping
= FALSE
;
3272 uuid_t application_uuid
;
3273 memcpy(application_uuid
, condition_value
, sizeof(uuid_t
));
3274 cond_app_id
= necp_create_uuid_app_id_mapping(application_uuid
, &allocated_mapping
, TRUE
);
3275 if (cond_app_id
!= 0) {
3276 if (allocated_mapping
) {
3277 necp_uuid_app_id_mappings_dirty
= TRUE
;
3278 necp_num_uuid_app_id_mappings
++;
3280 uuid_copy(policy
->applied_app_uuid
, application_uuid
);
3281 master_condition_mask
|= NECP_KERNEL_CONDITION_APP_ID
;
3282 if (condition_is_negative
) {
3283 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_APP_ID
;
3285 socket_only_conditions
= TRUE
;
3290 case NECP_POLICY_CONDITION_REAL_APPLICATION
: {
3291 // Make sure there is only one such rule, because we save the uuid in the policy
3292 if (condition_length
>= sizeof(uuid_t
) && cond_real_app_id
== 0) {
3293 uuid_t real_application_uuid
;
3294 memcpy(real_application_uuid
, condition_value
, sizeof(uuid_t
));
3295 cond_real_app_id
= necp_create_uuid_app_id_mapping(real_application_uuid
, NULL
, FALSE
);
3296 if (cond_real_app_id
!= 0) {
3297 uuid_copy(policy
->applied_real_app_uuid
, real_application_uuid
);
3298 master_condition_mask
|= NECP_KERNEL_CONDITION_REAL_APP_ID
;
3299 if (condition_is_negative
) {
3300 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REAL_APP_ID
;
3302 socket_only_conditions
= TRUE
;
3307 case NECP_POLICY_CONDITION_PID
: {
3308 if (condition_length
>= sizeof(pid_t
)) {
3309 master_condition_mask
|= NECP_KERNEL_CONDITION_PID
;
3310 if (condition_is_negative
) {
3311 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_PID
;
3313 memcpy(&cond_pid
, condition_value
, sizeof(cond_pid
));
3314 socket_only_conditions
= TRUE
;
3318 case NECP_POLICY_CONDITION_UID
: {
3319 if (condition_length
>= sizeof(uid_t
)) {
3320 master_condition_mask
|= NECP_KERNEL_CONDITION_UID
;
3321 if (condition_is_negative
) {
3322 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_UID
;
3324 memcpy(&cond_uid
, condition_value
, sizeof(cond_uid
));
3325 socket_only_conditions
= TRUE
;
3329 case NECP_POLICY_CONDITION_TRAFFIC_CLASS
: {
3330 if (condition_length
>= sizeof(struct necp_policy_condition_tc_range
)) {
3331 master_condition_mask
|= NECP_KERNEL_CONDITION_TRAFFIC_CLASS
;
3332 if (condition_is_negative
) {
3333 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_TRAFFIC_CLASS
;
3335 memcpy(&cond_traffic_class
, condition_value
, sizeof(cond_traffic_class
));
3336 socket_only_conditions
= TRUE
;
3340 case NECP_POLICY_CONDITION_BOUND_INTERFACE
: {
3341 if (condition_length
<= IFXNAMSIZ
&& condition_length
> 0) {
3342 char interface_name
[IFXNAMSIZ
];
3343 memcpy(interface_name
, condition_value
, condition_length
);
3344 interface_name
[condition_length
- 1] = 0; // Make sure the string is NULL terminated
3345 if (ifnet_find_by_name(interface_name
, &cond_bound_interface
) == 0) {
3346 master_condition_mask
|= NECP_KERNEL_CONDITION_BOUND_INTERFACE
;
3347 if (condition_is_negative
) {
3348 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_BOUND_INTERFACE
;
3351 socket_ip_conditions
= TRUE
;
3355 case NECP_POLICY_CONDITION_IP_PROTOCOL
:
3356 case NECP_POLICY_CONDITION_FLOW_IP_PROTOCOL
: {
3357 if (condition_length
>= sizeof(u_int16_t
)) {
3358 master_condition_mask
|= NECP_KERNEL_CONDITION_PROTOCOL
;
3359 if (condition_is_negative
) {
3360 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_PROTOCOL
;
3362 memcpy(&cond_protocol
, condition_value
, sizeof(cond_protocol
));
3363 if (condition_type
== NECP_POLICY_CONDITION_FLOW_IP_PROTOCOL
) {
3364 socket_only_conditions
= TRUE
;
3366 socket_ip_conditions
= TRUE
;
3371 case NECP_POLICY_CONDITION_LOCAL_NETWORKS
: {
3372 master_condition_mask
|= NECP_KERNEL_CONDITION_LOCAL_NETWORKS
;
3373 socket_ip_conditions
= TRUE
;
3376 case NECP_POLICY_CONDITION_LOCAL_ADDR
:
3377 case NECP_POLICY_CONDITION_FLOW_LOCAL_ADDR
: {
3378 struct necp_policy_condition_addr
*address_struct
= (struct necp_policy_condition_addr
*)(void *)condition_value
;
3379 if (!necp_address_is_valid(&address_struct
->address
.sa
)) {
3383 cond_local_prefix
= address_struct
->prefix
;
3384 memcpy(&cond_local_start
, &address_struct
->address
, sizeof(address_struct
->address
));
3385 master_condition_mask
|= NECP_KERNEL_CONDITION_LOCAL_START
;
3386 master_condition_mask
|= NECP_KERNEL_CONDITION_LOCAL_PREFIX
;
3387 if (condition_is_negative
) {
3388 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_LOCAL_START
;
3389 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_LOCAL_PREFIX
;
3391 if (condition_type
== NECP_POLICY_CONDITION_FLOW_LOCAL_ADDR
) {
3392 socket_only_conditions
= TRUE
;
3394 socket_ip_conditions
= TRUE
;
3398 case NECP_POLICY_CONDITION_REMOTE_ADDR
:
3399 case NECP_POLICY_CONDITION_FLOW_REMOTE_ADDR
: {
3400 struct necp_policy_condition_addr
*address_struct
= (struct necp_policy_condition_addr
*)(void *)condition_value
;
3401 if (!necp_address_is_valid(&address_struct
->address
.sa
)) {
3405 cond_remote_prefix
= address_struct
->prefix
;
3406 memcpy(&cond_remote_start
, &address_struct
->address
, sizeof(address_struct
->address
));
3407 master_condition_mask
|= NECP_KERNEL_CONDITION_REMOTE_START
;
3408 master_condition_mask
|= NECP_KERNEL_CONDITION_REMOTE_PREFIX
;
3409 if (condition_is_negative
) {
3410 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REMOTE_START
;
3411 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REMOTE_PREFIX
;
3413 if (condition_type
== NECP_POLICY_CONDITION_FLOW_REMOTE_ADDR
) {
3414 socket_only_conditions
= TRUE
;
3416 socket_ip_conditions
= TRUE
;
3420 case NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE
:
3421 case NECP_POLICY_CONDITION_FLOW_LOCAL_ADDR_RANGE
: {
3422 struct necp_policy_condition_addr_range
*address_struct
= (struct necp_policy_condition_addr_range
*)(void *)condition_value
;
3423 if (!necp_address_is_valid(&address_struct
->start_address
.sa
) ||
3424 !necp_address_is_valid(&address_struct
->end_address
.sa
)) {
3428 memcpy(&cond_local_start
, &address_struct
->start_address
, sizeof(address_struct
->start_address
));
3429 memcpy(&cond_local_end
, &address_struct
->end_address
, sizeof(address_struct
->end_address
));
3430 master_condition_mask
|= NECP_KERNEL_CONDITION_LOCAL_START
;
3431 master_condition_mask
|= NECP_KERNEL_CONDITION_LOCAL_END
;
3432 if (condition_is_negative
) {
3433 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_LOCAL_START
;
3434 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_LOCAL_END
;
3436 if (condition_type
== NECP_POLICY_CONDITION_FLOW_LOCAL_ADDR_RANGE
) {
3437 socket_only_conditions
= TRUE
;
3439 socket_ip_conditions
= TRUE
;
3443 case NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE
:
3444 case NECP_POLICY_CONDITION_FLOW_REMOTE_ADDR_RANGE
: {
3445 struct necp_policy_condition_addr_range
*address_struct
= (struct necp_policy_condition_addr_range
*)(void *)condition_value
;
3446 if (!necp_address_is_valid(&address_struct
->start_address
.sa
) ||
3447 !necp_address_is_valid(&address_struct
->end_address
.sa
)) {
3451 memcpy(&cond_remote_start
, &address_struct
->start_address
, sizeof(address_struct
->start_address
));
3452 memcpy(&cond_remote_end
, &address_struct
->end_address
, sizeof(address_struct
->end_address
));
3453 master_condition_mask
|= NECP_KERNEL_CONDITION_REMOTE_START
;
3454 master_condition_mask
|= NECP_KERNEL_CONDITION_REMOTE_END
;
3455 if (condition_is_negative
) {
3456 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REMOTE_START
;
3457 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REMOTE_END
;
3459 if (condition_type
== NECP_POLICY_CONDITION_FLOW_REMOTE_ADDR_RANGE
) {
3460 socket_only_conditions
= TRUE
;
3462 socket_ip_conditions
= TRUE
;
3466 case NECP_POLICY_CONDITION_AGENT_TYPE
: {
3467 if (condition_length
>= sizeof(cond_agent_type
)) {
3468 master_condition_mask
|= NECP_KERNEL_CONDITION_AGENT_TYPE
;
3469 memcpy(&cond_agent_type
, condition_value
, sizeof(cond_agent_type
));
3470 socket_only_conditions
= TRUE
;
3474 case NECP_POLICY_CONDITION_CLIENT_FLAGS
: {
3475 if (condition_is_negative
) {
3476 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_CLIENT_FLAGS
;
3478 master_condition_mask
|= NECP_KERNEL_CONDITION_CLIENT_FLAGS
;
3479 socket_only_conditions
= TRUE
;
3480 if (condition_length
>= sizeof(u_int32_t
)) {
3481 memcpy(&cond_client_flags
, condition_value
, sizeof(cond_client_flags
));
3483 // Empty means match on fallback traffic
3484 cond_client_flags
= NECP_CLIENT_PARAMETER_FLAG_FALLBACK_TRAFFIC
;
3488 case NECP_POLICY_CONDITION_FLOW_LOCAL_ADDR_EMPTY
: {
3489 master_condition_mask
|= NECP_KERNEL_CONDITION_LOCAL_EMPTY
;
3490 if (condition_is_negative
) {
3491 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_LOCAL_EMPTY
;
3493 socket_only_conditions
= TRUE
;
3496 case NECP_POLICY_CONDITION_FLOW_REMOTE_ADDR_EMPTY
: {
3497 master_condition_mask
|= NECP_KERNEL_CONDITION_REMOTE_EMPTY
;
3498 if (condition_is_negative
) {
3499 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REMOTE_EMPTY
;
3501 socket_only_conditions
= TRUE
;
3509 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
3513 ultimate_result
= necp_policy_get_result_type(policy
);
3514 switch (ultimate_result
) {
3515 case NECP_POLICY_RESULT_PASS
: {
3516 u_int32_t pass_flags
= 0;
3517 if (necp_policy_result_get_parameter_length_from_buffer(policy
->result
, policy
->result_size
) > 0) {
3518 if (necp_policy_get_result_parameter(policy
, (u_int8_t
*)&pass_flags
, sizeof(pass_flags
))) {
3519 ultimate_result_parameter
.pass_flags
= pass_flags
;
3522 if (socket_only_conditions
) { // socket_ip_conditions can be TRUE or FALSE
3523 socket_layer_non_id_conditions
= TRUE
;
3524 ip_output_layer_id_condition
= TRUE
;
3525 } else if (socket_ip_conditions
) {
3526 socket_layer_non_id_conditions
= TRUE
;
3527 ip_output_layer_id_condition
= TRUE
;
3528 ip_output_layer_non_id_conditions
= TRUE
;
3532 case NECP_POLICY_RESULT_DROP
: {
3533 if (socket_only_conditions
) { // socket_ip_conditions can be TRUE or FALSE
3534 socket_layer_non_id_conditions
= TRUE
;
3535 } else if (socket_ip_conditions
) {
3536 socket_layer_non_id_conditions
= TRUE
;
3537 ip_output_layer_non_id_conditions
= TRUE
;
3538 ip_output_layer_non_id_only
= TRUE
; // Only apply drop to packets that didn't go through socket layer
3542 case NECP_POLICY_RESULT_SKIP
: {
3543 u_int32_t skip_policy_order
= 0;
3544 if (necp_policy_get_result_parameter(policy
, (u_int8_t
*)&skip_policy_order
, sizeof(skip_policy_order
))) {
3545 ultimate_result_parameter
.skip_policy_order
= skip_policy_order
;
3548 if (socket_only_conditions
) { // socket_ip_conditions can be TRUE or FALSE
3549 socket_layer_non_id_conditions
= TRUE
;
3550 ip_output_layer_id_condition
= TRUE
;
3551 } else if (socket_ip_conditions
) {
3552 socket_layer_non_id_conditions
= TRUE
;
3553 ip_output_layer_non_id_conditions
= TRUE
;
3557 case NECP_POLICY_RESULT_SOCKET_DIVERT
:
3558 case NECP_POLICY_RESULT_SOCKET_FILTER
: {
3559 u_int32_t control_unit
= 0;
3560 if (necp_policy_get_result_parameter(policy
, (u_int8_t
*)&control_unit
, sizeof(control_unit
))) {
3561 ultimate_result_parameter
.flow_divert_control_unit
= control_unit
;
3563 socket_layer_non_id_conditions
= TRUE
;
3566 case NECP_POLICY_RESULT_IP_TUNNEL
: {
3567 struct necp_policy_result_ip_tunnel tunnel_parameters
;
3568 u_int32_t tunnel_parameters_length
= necp_policy_get_result_parameter_length(policy
);
3569 if (tunnel_parameters_length
> sizeof(u_int32_t
) &&
3570 tunnel_parameters_length
<= sizeof(struct necp_policy_result_ip_tunnel
) &&
3571 necp_policy_get_result_parameter(policy
, (u_int8_t
*)&tunnel_parameters
, sizeof(tunnel_parameters
))) {
3572 ifnet_t tunnel_interface
= NULL
;
3573 tunnel_parameters
.interface_name
[tunnel_parameters_length
- sizeof(u_int32_t
) - 1] = 0; // Make sure the string is NULL terminated
3574 if (ifnet_find_by_name(tunnel_parameters
.interface_name
, &tunnel_interface
) == 0) {
3575 ultimate_result_parameter
.tunnel_interface_index
= tunnel_interface
->if_index
;
3576 ifnet_release(tunnel_interface
);
3579 secondary_result
= tunnel_parameters
.secondary_result
;
3580 if (secondary_result
) {
3581 cond_last_interface_index
= ultimate_result_parameter
.tunnel_interface_index
;
3585 if (socket_only_conditions
) { // socket_ip_conditions can be TRUE or FALSE
3586 socket_layer_non_id_conditions
= TRUE
;
3587 ip_output_layer_id_condition
= TRUE
;
3588 if (secondary_result
) {
3589 ip_output_layer_tunnel_condition_from_id
= TRUE
;
3591 } else if (socket_ip_conditions
) {
3592 socket_layer_non_id_conditions
= TRUE
;
3593 ip_output_layer_id_condition
= TRUE
;
3594 ip_output_layer_non_id_conditions
= TRUE
;
3595 if (secondary_result
) {
3596 ip_output_layer_tunnel_condition_from_id
= TRUE
;
3597 ip_output_layer_tunnel_condition_from_non_id
= TRUE
;
3602 case NECP_POLICY_RESULT_TRIGGER
:
3603 case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED
:
3604 case NECP_POLICY_RESULT_TRIGGER_SCOPED
:
3605 case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED
: {
3606 struct necp_policy_result_service service_parameters
;
3607 u_int32_t service_result_length
= necp_policy_get_result_parameter_length(policy
);
3608 bool has_extra_service_data
= FALSE
;
3609 if (service_result_length
>= (sizeof(service_parameters
))) {
3610 has_extra_service_data
= TRUE
;
3612 if (necp_policy_get_result_parameter(policy
, (u_int8_t
*)&service_parameters
, sizeof(service_parameters
))) {
3613 ultimate_result_parameter
.service
.identifier
= necp_create_uuid_service_id_mapping(service_parameters
.identifier
);
3614 if (ultimate_result_parameter
.service
.identifier
!= 0) {
3615 uuid_copy(policy
->applied_result_uuid
, service_parameters
.identifier
);
3616 socket_layer_non_id_conditions
= TRUE
;
3617 if (has_extra_service_data
) {
3618 ultimate_result_parameter
.service
.data
= service_parameters
.data
;
3620 ultimate_result_parameter
.service
.data
= 0;
3626 case NECP_POLICY_RESULT_USE_NETAGENT
:
3627 case NECP_POLICY_RESULT_NETAGENT_SCOPED
: {
3628 uuid_t netagent_uuid
;
3629 if (necp_policy_get_result_parameter(policy
, (u_int8_t
*)&netagent_uuid
, sizeof(netagent_uuid
))) {
3630 ultimate_result_parameter
.netagent_id
= necp_create_uuid_service_id_mapping(netagent_uuid
);
3631 if (ultimate_result_parameter
.netagent_id
!= 0) {
3632 uuid_copy(policy
->applied_result_uuid
, netagent_uuid
);
3633 socket_layer_non_id_conditions
= TRUE
;
3638 case NECP_POLICY_RESULT_SOCKET_SCOPED
: {
3639 u_int32_t interface_name_length
= necp_policy_get_result_parameter_length(policy
);
3640 if (interface_name_length
<= IFXNAMSIZ
&& interface_name_length
> 0) {
3641 char interface_name
[IFXNAMSIZ
];
3642 ifnet_t scope_interface
= NULL
;
3643 necp_policy_get_result_parameter(policy
, (u_int8_t
*)interface_name
, interface_name_length
);
3644 interface_name
[interface_name_length
- 1] = 0; // Make sure the string is NULL terminated
3645 if (ifnet_find_by_name(interface_name
, &scope_interface
) == 0) {
3646 ultimate_result_parameter
.scoped_interface_index
= scope_interface
->if_index
;
3647 socket_layer_non_id_conditions
= TRUE
;
3648 ifnet_release(scope_interface
);
3653 case NECP_POLICY_RESULT_SCOPED_DIRECT
: {
3654 socket_layer_non_id_conditions
= TRUE
;
3657 case NECP_POLICY_RESULT_ALLOW_UNENTITLED
: {
3658 socket_layer_non_id_conditions
= TRUE
;
3661 case NECP_POLICY_RESULT_ROUTE_RULES
: {
3662 if (policy
->route_rules
!= NULL
&& policy
->route_rules_size
> 0) {
3663 u_int32_t route_rule_id
= necp_create_route_rule(&necp_route_rules
, policy
->route_rules
, policy
->route_rules_size
);
3664 if (route_rule_id
> 0) {
3665 policy
->applied_route_rules_id
= route_rule_id
;
3666 ultimate_result_parameter
.route_rule_id
= route_rule_id
;
3667 if (socket_only_conditions
) { // socket_ip_conditions can be TRUE or FALSE
3668 socket_layer_non_id_conditions
= TRUE
;
3669 } else if (socket_ip_conditions
) {
3670 socket_layer_non_id_conditions
= TRUE
;
3671 ip_output_layer_non_id_conditions
= TRUE
;
3672 ip_output_layer_non_id_only
= TRUE
; // Only apply route rules to packets that didn't go through socket layer
3683 if (socket_layer_non_id_conditions
) {
3684 necp_kernel_policy_id policy_id
= necp_kernel_socket_policy_add(policy
->order
, session
->session_order
, session
->proc_pid
, master_condition_mask
, master_condition_negated_mask
, cond_app_id
, cond_real_app_id
, cond_custom_entitlement
, cond_account_id
, cond_domain
, cond_pid
, cond_uid
, cond_bound_interface
, cond_traffic_class
, cond_protocol
, &cond_local_start
, &cond_local_end
, cond_local_prefix
, &cond_remote_start
, &cond_remote_end
, cond_remote_prefix
, &cond_agent_type
, cond_client_flags
, ultimate_result
, ultimate_result_parameter
);
3686 if (policy_id
== 0) {
3687 NECPLOG0(LOG_DEBUG
, "Error applying socket kernel policy");
3691 cond_ip_output_layer_id
= policy_id
;
3692 policy
->kernel_socket_policies
[0] = policy_id
;
3695 if (ip_output_layer_non_id_conditions
) {
3696 u_int32_t condition_mask
= master_condition_mask
;
3697 if (ip_output_layer_non_id_only
) {
3698 condition_mask
|= NECP_KERNEL_CONDITION_POLICY_ID
;
3701 necp_kernel_policy_id policy_id
= necp_kernel_ip_output_policy_add(policy
->order
, NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS
, session
->session_order
, session
->proc_pid
, condition_mask
, master_condition_negated_mask
, NECP_KERNEL_POLICY_ID_NONE
, cond_bound_interface
, 0, cond_protocol
, &cond_local_start
, &cond_local_end
, cond_local_prefix
, &cond_remote_start
, &cond_remote_end
, cond_remote_prefix
, ultimate_result
, ultimate_result_parameter
);
3703 if (policy_id
== 0) {
3704 NECPLOG0(LOG_DEBUG
, "Error applying IP output kernel policy");
3708 policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS
] = policy_id
;
3711 if (ip_output_layer_id_condition
) {
3712 necp_kernel_policy_id policy_id
= necp_kernel_ip_output_policy_add(policy
->order
, NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION
, session
->session_order
, session
->proc_pid
, NECP_KERNEL_CONDITION_POLICY_ID
| NECP_KERNEL_CONDITION_ALL_INTERFACES
, 0, cond_ip_output_layer_id
, NULL
, 0, 0, NULL
, NULL
, 0, NULL
, NULL
, 0, ultimate_result
, ultimate_result_parameter
);
3714 if (policy_id
== 0) {
3715 NECPLOG0(LOG_DEBUG
, "Error applying IP output kernel policy");
3719 policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION
] = policy_id
;
3722 // Extra policies for IP Output tunnels for when packets loop back
3723 if (ip_output_layer_tunnel_condition_from_id
) {
3724 necp_kernel_policy_id policy_id
= necp_kernel_ip_output_policy_add(policy
->order
, NECP_KERNEL_POLICY_SUBORDER_NON_ID_TUNNEL_CONDITION
, session
->session_order
, session
->proc_pid
, NECP_KERNEL_CONDITION_POLICY_ID
| NECP_KERNEL_CONDITION_LAST_INTERFACE
| NECP_KERNEL_CONDITION_ALL_INTERFACES
, 0, policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS
], NULL
, cond_last_interface_index
, 0, NULL
, NULL
, 0, NULL
, NULL
, 0, secondary_result
, secondary_result_parameter
);
3726 if (policy_id
== 0) {
3727 NECPLOG0(LOG_DEBUG
, "Error applying IP output kernel policy");
3731 policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_NON_ID_TUNNEL_CONDITION
] = policy_id
;
3734 if (ip_output_layer_tunnel_condition_from_id
) {
3735 necp_kernel_policy_id policy_id
= necp_kernel_ip_output_policy_add(policy
->order
, NECP_KERNEL_POLICY_SUBORDER_ID_TUNNEL_CONDITION
, session
->session_order
, session
->proc_pid
, NECP_KERNEL_CONDITION_POLICY_ID
| NECP_KERNEL_CONDITION_LAST_INTERFACE
| NECP_KERNEL_CONDITION_ALL_INTERFACES
, 0, policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION
], NULL
, cond_last_interface_index
, 0, NULL
, NULL
, 0, NULL
, NULL
, 0, secondary_result
, secondary_result_parameter
);
3737 if (policy_id
== 0) {
3738 NECPLOG0(LOG_DEBUG
, "Error applying IP output kernel policy");
3742 policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_ID_TUNNEL_CONDITION
] = policy_id
;
3745 policy
->applied
= TRUE
;
3746 policy
->pending_update
= FALSE
;
3754 necp_policy_apply_all(struct necp_session
*session
)
3756 struct necp_session_policy
*policy
= NULL
;
3757 struct necp_session_policy
*temp_policy
= NULL
;
3758 struct kev_necp_policies_changed_data kev_data
;
3759 kev_data
.changed_count
= 0;
3761 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
3763 // Remove exisiting applied policies
3764 if (session
->dirty
) {
3765 LIST_FOREACH_SAFE(policy
, &session
->policies
, chain
, temp_policy
) {
3766 if (policy
->pending_deletion
) {
3767 if (policy
->applied
) {
3768 necp_policy_unapply(policy
);
3770 // Delete the policy
3771 necp_policy_delete(session
, policy
);
3772 } else if (!policy
->applied
) {
3773 necp_policy_apply(session
, policy
);
3774 } else if (policy
->pending_update
) {
3775 // Must have been applied, but needs an update. Remove and re-add.
3776 necp_policy_unapply(policy
);
3777 necp_policy_apply(session
, policy
);
3781 necp_kernel_socket_policies_update_uuid_table();
3782 necp_kernel_socket_policies_reprocess();
3783 necp_kernel_ip_output_policies_reprocess();
3785 // Clear dirty bit flags
3786 session
->dirty
= FALSE
;
3789 lck_rw_done(&necp_kernel_policy_lock
);
3791 necp_update_all_clients();
3792 necp_post_change_event(&kev_data
);
3795 NECPLOG0(LOG_DEBUG
, "Applied NECP policies");
3799 // Kernel Policy Management
3800 // ---------------------
3801 // Kernel policies are derived from session policies
3802 static necp_kernel_policy_id
3803 necp_kernel_policy_get_new_id(bool socket_level
)
3805 static necp_kernel_policy_id necp_last_kernel_socket_policy_id
= 0;
3806 static necp_kernel_policy_id necp_last_kernel_ip_policy_id
= 0;
3808 necp_kernel_policy_id newid
= NECP_KERNEL_POLICY_ID_NONE
;
3810 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
3813 bool wrapped
= FALSE
;
3815 necp_last_kernel_socket_policy_id
++;
3816 if (necp_last_kernel_socket_policy_id
< NECP_KERNEL_POLICY_ID_FIRST_VALID_SOCKET
||
3817 necp_last_kernel_socket_policy_id
>= NECP_KERNEL_POLICY_ID_FIRST_VALID_IP
) {
3819 // Already wrapped, give up
3820 NECPLOG0(LOG_ERR
, "Failed to find a free socket kernel policy ID.\n");
3821 return NECP_KERNEL_POLICY_ID_NONE
;
3823 necp_last_kernel_socket_policy_id
= NECP_KERNEL_POLICY_ID_FIRST_VALID_SOCKET
;
3826 newid
= necp_last_kernel_socket_policy_id
;
3827 } while (necp_kernel_socket_policy_find(newid
) != NULL
); // If already used, keep trying
3829 bool wrapped
= FALSE
;
3831 necp_last_kernel_ip_policy_id
++;
3832 if (necp_last_kernel_ip_policy_id
< NECP_KERNEL_POLICY_ID_FIRST_VALID_IP
) {
3834 // Already wrapped, give up
3835 NECPLOG0(LOG_ERR
, "Failed to find a free IP kernel policy ID.\n");
3836 return NECP_KERNEL_POLICY_ID_NONE
;
3838 necp_last_kernel_ip_policy_id
= NECP_KERNEL_POLICY_ID_FIRST_VALID_IP
;
3841 newid
= necp_last_kernel_ip_policy_id
;
3842 } while (necp_kernel_ip_output_policy_find(newid
) != NULL
); // If already used, keep trying
3845 if (newid
== NECP_KERNEL_POLICY_ID_NONE
) {
3846 NECPLOG0(LOG_ERR
, "Allocate kernel policy id failed.\n");
3847 return NECP_KERNEL_POLICY_ID_NONE
;
3853 #define NECP_KERNEL_VALID_SOCKET_CONDITIONS (NECP_KERNEL_CONDITION_APP_ID | NECP_KERNEL_CONDITION_REAL_APP_ID | NECP_KERNEL_CONDITION_DOMAIN | NECP_KERNEL_CONDITION_ACCOUNT_ID | NECP_KERNEL_CONDITION_PID | NECP_KERNEL_CONDITION_UID | NECP_KERNEL_CONDITION_ALL_INTERFACES | NECP_KERNEL_CONDITION_BOUND_INTERFACE | NECP_KERNEL_CONDITION_TRAFFIC_CLASS | NECP_KERNEL_CONDITION_PROTOCOL | NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_ENTITLEMENT | NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT | NECP_KERNEL_CONDITION_AGENT_TYPE | NECP_KERNEL_CONDITION_HAS_CLIENT | NECP_KERNEL_CONDITION_LOCAL_NETWORKS | NECP_KERNEL_CONDITION_CLIENT_FLAGS | NECP_KERNEL_CONDITION_LOCAL_EMPTY | NECP_KERNEL_CONDITION_REMOTE_EMPTY | NECP_KERNEL_CONDITION_PLATFORM_BINARY)
3855 static necp_kernel_policy_id
3856 necp_kernel_socket_policy_add(necp_policy_order order
, u_int32_t session_order
, int session_pid
, u_int32_t condition_mask
, u_int32_t condition_negated_mask
, necp_app_id cond_app_id
, necp_app_id cond_real_app_id
, char *cond_custom_entitlement
, u_int32_t cond_account_id
, char *cond_domain
, pid_t cond_pid
, uid_t cond_uid
, ifnet_t cond_bound_interface
, struct necp_policy_condition_tc_range cond_traffic_class
, u_int16_t cond_protocol
, union necp_sockaddr_union
*cond_local_start
, union necp_sockaddr_union
*cond_local_end
, u_int8_t cond_local_prefix
, union necp_sockaddr_union
*cond_remote_start
, union necp_sockaddr_union
*cond_remote_end
, u_int8_t cond_remote_prefix
, struct necp_policy_condition_agent_type
*cond_agent_type
, u_int32_t cond_client_flags
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
)
3858 struct necp_kernel_socket_policy
*new_kernel_policy
= NULL
;
3859 struct necp_kernel_socket_policy
*tmp_kernel_policy
= NULL
;
3861 MALLOC_ZONE(new_kernel_policy
, struct necp_kernel_socket_policy
*, sizeof(*new_kernel_policy
), M_NECP_SOCKET_POLICY
, M_WAITOK
);
3862 if (new_kernel_policy
== NULL
) {
3866 memset(new_kernel_policy
, 0, sizeof(*new_kernel_policy
)); // M_ZERO is not supported for MALLOC_ZONE
3867 new_kernel_policy
->id
= necp_kernel_policy_get_new_id(true);
3868 new_kernel_policy
->order
= order
;
3869 new_kernel_policy
->session_order
= session_order
;
3870 new_kernel_policy
->session_pid
= session_pid
;
3872 // Sanitize condition mask
3873 new_kernel_policy
->condition_mask
= (condition_mask
& NECP_KERNEL_VALID_SOCKET_CONDITIONS
);
3874 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
)) {
3875 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_BOUND_INTERFACE
;
3877 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) && !(new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
)) {
3878 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_REAL_APP_ID
;
3880 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
) && !(new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
)) {
3881 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_ENTITLEMENT
;
3883 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
)) {
3884 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_LOCAL_PREFIX
;
3886 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
)) {
3887 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_REMOTE_PREFIX
;
3889 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_EMPTY
) {
3890 new_kernel_policy
->condition_mask
&= ~(NECP_KERNEL_CONDITION_LOCAL_PREFIX
| NECP_KERNEL_CONDITION_LOCAL_END
);
3892 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_EMPTY
)) {
3893 new_kernel_policy
->condition_mask
&= ~(NECP_KERNEL_CONDITION_REMOTE_PREFIX
| NECP_KERNEL_CONDITION_REMOTE_END
);
3895 new_kernel_policy
->condition_negated_mask
= condition_negated_mask
& new_kernel_policy
->condition_mask
;
3897 // Set condition values
3898 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
3899 new_kernel_policy
->cond_app_id
= cond_app_id
;
3901 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
3902 new_kernel_policy
->cond_real_app_id
= cond_real_app_id
;
3904 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
) {
3905 new_kernel_policy
->cond_custom_entitlement
= cond_custom_entitlement
;
3906 new_kernel_policy
->cond_custom_entitlement_matched
= necp_boolean_state_unknown
;
3908 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
) {
3909 new_kernel_policy
->cond_account_id
= cond_account_id
;
3911 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
3912 new_kernel_policy
->cond_domain
= cond_domain
;
3913 new_kernel_policy
->cond_domain_dot_count
= necp_count_dots(cond_domain
, strlen(cond_domain
));
3915 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PID
) {
3916 new_kernel_policy
->cond_pid
= cond_pid
;
3918 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_UID
) {
3919 new_kernel_policy
->cond_uid
= cond_uid
;
3921 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
3922 if (cond_bound_interface
) {
3923 ifnet_reference(cond_bound_interface
);
3925 new_kernel_policy
->cond_bound_interface
= cond_bound_interface
;
3927 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
3928 new_kernel_policy
->cond_traffic_class
= cond_traffic_class
;
3930 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
3931 new_kernel_policy
->cond_protocol
= cond_protocol
;
3933 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
3934 memcpy(&new_kernel_policy
->cond_local_start
, cond_local_start
, cond_local_start
->sa
.sa_len
);
3936 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
3937 memcpy(&new_kernel_policy
->cond_local_end
, cond_local_end
, cond_local_end
->sa
.sa_len
);
3939 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
3940 new_kernel_policy
->cond_local_prefix
= cond_local_prefix
;
3942 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
3943 memcpy(&new_kernel_policy
->cond_remote_start
, cond_remote_start
, cond_remote_start
->sa
.sa_len
);
3945 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
3946 memcpy(&new_kernel_policy
->cond_remote_end
, cond_remote_end
, cond_remote_end
->sa
.sa_len
);
3948 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
3949 new_kernel_policy
->cond_remote_prefix
= cond_remote_prefix
;
3951 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_AGENT_TYPE
) {
3952 memcpy(&new_kernel_policy
->cond_agent_type
, cond_agent_type
, sizeof(*cond_agent_type
));
3954 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_CLIENT_FLAGS
) {
3955 new_kernel_policy
->cond_client_flags
= cond_client_flags
;
3958 new_kernel_policy
->result
= result
;
3959 memcpy(&new_kernel_policy
->result_parameter
, &result_parameter
, sizeof(result_parameter
));
3962 NECPLOG(LOG_DEBUG
, "Added kernel policy: socket, id=%d, mask=%x\n", new_kernel_policy
->id
, new_kernel_policy
->condition_mask
);
3964 LIST_INSERT_SORTED_TWICE_ASCENDING(&necp_kernel_socket_policies
, new_kernel_policy
, chain
, session_order
, order
, tmp_kernel_policy
);
3966 return new_kernel_policy
? new_kernel_policy
->id
: 0;
3969 static struct necp_kernel_socket_policy
*
3970 necp_kernel_socket_policy_find(necp_kernel_policy_id policy_id
)
3972 struct necp_kernel_socket_policy
*kernel_policy
= NULL
;
3973 struct necp_kernel_socket_policy
*tmp_kernel_policy
= NULL
;
3975 if (policy_id
== 0) {
3979 LIST_FOREACH_SAFE(kernel_policy
, &necp_kernel_socket_policies
, chain
, tmp_kernel_policy
) {
3980 if (kernel_policy
->id
== policy_id
) {
3981 return kernel_policy
;
3989 necp_kernel_socket_policy_delete(necp_kernel_policy_id policy_id
)
3991 struct necp_kernel_socket_policy
*policy
= NULL
;
3993 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
3995 policy
= necp_kernel_socket_policy_find(policy_id
);
3997 LIST_REMOVE(policy
, chain
);
3999 if (policy
->cond_bound_interface
) {
4000 ifnet_release(policy
->cond_bound_interface
);
4001 policy
->cond_bound_interface
= NULL
;
4004 if (policy
->cond_domain
) {
4005 FREE(policy
->cond_domain
, M_NECP
);
4006 policy
->cond_domain
= NULL
;
4009 if (policy
->cond_custom_entitlement
) {
4010 FREE(policy
->cond_custom_entitlement
, M_NECP
);
4011 policy
->cond_custom_entitlement
= NULL
;
4014 FREE_ZONE(policy
, sizeof(*policy
), M_NECP_SOCKET_POLICY
);
4021 static inline const char *
4022 necp_get_result_description(char *result_string
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
)
4024 uuid_string_t uuid_string
;
4026 case NECP_KERNEL_POLICY_RESULT_NONE
: {
4027 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "None");
4030 case NECP_KERNEL_POLICY_RESULT_PASS
: {
4031 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "Pass (%X)", result_parameter
.pass_flags
);
4034 case NECP_KERNEL_POLICY_RESULT_SKIP
: {
4035 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "Skip (%u)", result_parameter
.skip_policy_order
);
4038 case NECP_KERNEL_POLICY_RESULT_DROP
: {
4039 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "Drop");
4042 case NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
: {
4043 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "SocketDivert (%d)", result_parameter
.flow_divert_control_unit
);
4046 case NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER
: {
4047 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "SocketFilter (%d)", result_parameter
.filter_control_unit
);
4050 case NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
: {
4051 ifnet_t interface
= ifindex2ifnet
[result_parameter
.tunnel_interface_index
];
4052 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "IPTunnel (%s%d)", ifnet_name(interface
), ifnet_unit(interface
));
4055 case NECP_KERNEL_POLICY_RESULT_IP_FILTER
: {
4056 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "IPFilter");
4059 case NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED
: {
4060 ifnet_t interface
= ifindex2ifnet
[result_parameter
.scoped_interface_index
];
4061 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "SocketScoped (%s%d)", ifnet_name(interface
), ifnet_unit(interface
));
4064 case NECP_KERNEL_POLICY_RESULT_SCOPED_DIRECT
: {
4065 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "ScopedDirect");
4068 case NECP_KERNEL_POLICY_RESULT_ALLOW_UNENTITLED
: {
4069 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "AllowUnentitled");
4072 case NECP_KERNEL_POLICY_RESULT_ROUTE_RULES
: {
4074 char interface_names
[MAX_ROUTE_RULE_INTERFACES
][IFXNAMSIZ
];
4075 struct necp_route_rule
*route_rule
= necp_lookup_route_rule_locked(&necp_route_rules
, result_parameter
.route_rule_id
);
4076 if (route_rule
!= NULL
) {
4077 for (index
= 0; index
< MAX_ROUTE_RULE_INTERFACES
; index
++) {
4078 if (route_rule
->exception_if_indices
[index
] != 0) {
4079 ifnet_t interface
= ifindex2ifnet
[route_rule
->exception_if_indices
[index
]];
4080 snprintf(interface_names
[index
], IFXNAMSIZ
, "%s%d", ifnet_name(interface
), ifnet_unit(interface
));
4082 memset(interface_names
[index
], 0, IFXNAMSIZ
);
4085 switch (route_rule
->default_action
) {
4086 case NECP_ROUTE_RULE_DENY_INTERFACE
:
4087 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "RouteRules (Only %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
4088 (route_rule
->cellular_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? "Cell " : "",
4089 (route_rule
->wifi_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? "WiFi " : "",
4090 (route_rule
->wired_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? "Wired " : "",
4091 (route_rule
->expensive_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? "Exp " : "",
4092 (route_rule
->constrained_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? "Constrained " : "",
4093 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[0] : "",
4094 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4095 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[1] : "",
4096 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4097 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[2] : "",
4098 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4099 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[3] : "",
4100 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4101 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[4] : "",
4102 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4103 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[5] : "",
4104 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4105 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[6] : "",
4106 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4107 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[7] : "",
4108 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4109 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[8] : "",
4110 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4111 (route_rule
->exception_if_actions
[9] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[9] : "");
4113 case NECP_ROUTE_RULE_ALLOW_INTERFACE
:
4114 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "RouteRules (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
4115 (route_rule
->cellular_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!Cell " : "",
4116 (route_rule
->wifi_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!WiFi " : "",
4117 (route_rule
->wired_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!Wired " : "",
4118 (route_rule
->expensive_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!Exp " : "",
4119 (route_rule
->constrained_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!Constrained " : "",
4120 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4121 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[0] : "",
4122 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4123 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[1] : "",
4124 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4125 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[2] : "",
4126 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4127 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[3] : "",
4128 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4129 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[4] : "",
4130 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4131 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[5] : "",
4132 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4133 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[6] : "",
4134 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4135 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[7] : "",
4136 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4137 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[8] : "",
4138 (route_rule
->exception_if_actions
[9] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4139 (route_rule
->exception_if_actions
[9] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[9] : "");
4141 case NECP_ROUTE_RULE_QOS_MARKING
:
4142 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "RouteRules (QoSMarking %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
4143 (route_rule
->cellular_action
== NECP_ROUTE_RULE_QOS_MARKING
) ? "Cell " : "",
4144 (route_rule
->wifi_action
== NECP_ROUTE_RULE_QOS_MARKING
) ? "WiFi " : "",
4145 (route_rule
->wired_action
== NECP_ROUTE_RULE_QOS_MARKING
) ? "Wired " : "",
4146 (route_rule
->expensive_action
== NECP_ROUTE_RULE_QOS_MARKING
) ? "Exp " : "",
4147 (route_rule
->constrained_action
== NECP_ROUTE_RULE_QOS_MARKING
) ? "Constrained " : "",
4148 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[0] : "",
4149 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4150 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[1] : "",
4151 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4152 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[2] : "",
4153 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4154 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[3] : "",
4155 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4156 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[4] : "",
4157 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4158 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[5] : "",
4159 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4160 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[6] : "",
4161 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4162 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[7] : "",
4163 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4164 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[8] : "",
4165 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4166 (route_rule
->exception_if_actions
[9] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[9] : "");
4169 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "RouteRules (Unknown)");
4175 case NECP_KERNEL_POLICY_RESULT_USE_NETAGENT
: {
4176 bool found_mapping
= FALSE
;
4177 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.netagent_id
);
4178 if (mapping
!= NULL
) {
4179 uuid_unparse(mapping
->uuid
, uuid_string
);
4180 found_mapping
= TRUE
;
4182 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "UseNetAgent (%s)", found_mapping
? uuid_string
: "Unknown");
4185 case NECP_KERNEL_POLICY_RESULT_NETAGENT_SCOPED
: {
4186 bool found_mapping
= FALSE
;
4187 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.netagent_id
);
4188 if (mapping
!= NULL
) {
4189 uuid_unparse(mapping
->uuid
, uuid_string
);
4190 found_mapping
= TRUE
;
4192 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "NetAgentScoped (%s)", found_mapping
? uuid_string
: "Unknown");
4195 case NECP_POLICY_RESULT_TRIGGER
: {
4196 bool found_mapping
= FALSE
;
4197 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.service
.identifier
);
4198 if (mapping
!= NULL
) {
4199 uuid_unparse(mapping
->uuid
, uuid_string
);
4200 found_mapping
= TRUE
;
4202 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "Trigger (%s.%d)", found_mapping
? uuid_string
: "Unknown", result_parameter
.service
.data
);
4205 case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED
: {
4206 bool found_mapping
= FALSE
;
4207 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.service
.identifier
);
4208 if (mapping
!= NULL
) {
4209 uuid_unparse(mapping
->uuid
, uuid_string
);
4210 found_mapping
= TRUE
;
4212 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "TriggerIfNeeded (%s.%d)", found_mapping
? uuid_string
: "Unknown", result_parameter
.service
.data
);
4215 case NECP_POLICY_RESULT_TRIGGER_SCOPED
: {
4216 bool found_mapping
= FALSE
;
4217 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.service
.identifier
);
4218 if (mapping
!= NULL
) {
4219 uuid_unparse(mapping
->uuid
, uuid_string
);
4220 found_mapping
= TRUE
;
4222 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "TriggerScoped (%s.%d)", found_mapping
? uuid_string
: "Unknown", result_parameter
.service
.data
);
4225 case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED
: {
4226 bool found_mapping
= FALSE
;
4227 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.service
.identifier
);
4228 if (mapping
!= NULL
) {
4229 uuid_unparse(mapping
->uuid
, uuid_string
);
4230 found_mapping
= TRUE
;
4232 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "NoTriggerScoped (%s.%d)", found_mapping
? uuid_string
: "Unknown", result_parameter
.service
.data
);
4236 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "Unknown %d (%d)", result
, result_parameter
.tunnel_interface_index
);
4240 return result_string
;
4244 necp_kernel_socket_policies_dump_all(void)
4247 struct necp_kernel_socket_policy
*policy
= NULL
;
4250 char result_string
[MAX_RESULT_STRING_LEN
];
4251 char proc_name_string
[MAXCOMLEN
+ 1];
4252 memset(result_string
, 0, MAX_RESULT_STRING_LEN
);
4253 memset(proc_name_string
, 0, MAXCOMLEN
+ 1);
4255 NECPLOG0(LOG_DEBUG
, "NECP Application Policies:\n");
4256 NECPLOG0(LOG_DEBUG
, "-----------\n");
4257 for (policy_i
= 0; necp_kernel_socket_policies_app_layer_map
!= NULL
&& necp_kernel_socket_policies_app_layer_map
[policy_i
] != NULL
; policy_i
++) {
4258 policy
= necp_kernel_socket_policies_app_layer_map
[policy_i
];
4259 proc_name(policy
->session_pid
, proc_name_string
, MAXCOMLEN
);
4260 NECPLOG(LOG_DEBUG
, "\t%3d. Policy ID: %5d\tProcess: %10.10s\tOrder: %04d.%04d\tMask: %5x\tResult: %s\n", policy_i
, policy
->id
, proc_name_string
, policy
->session_order
, policy
->order
, policy
->condition_mask
, necp_get_result_description(result_string
, policy
->result
, policy
->result_parameter
));
4262 if (necp_kernel_socket_policies_app_layer_map
[0] != NULL
) {
4263 NECPLOG0(LOG_DEBUG
, "-----------\n");
4266 NECPLOG0(LOG_DEBUG
, "NECP Socket Policies:\n");
4267 NECPLOG0(LOG_DEBUG
, "-----------\n");
4268 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
4269 NECPLOG(LOG_DEBUG
, "\tApp Bucket: %d\n", app_i
);
4270 for (policy_i
= 0; necp_kernel_socket_policies_map
[app_i
] != NULL
&& (necp_kernel_socket_policies_map
[app_i
])[policy_i
] != NULL
; policy_i
++) {
4271 policy
= (necp_kernel_socket_policies_map
[app_i
])[policy_i
];
4272 proc_name(policy
->session_pid
, proc_name_string
, MAXCOMLEN
);
4273 NECPLOG(LOG_DEBUG
, "\t%3d. Policy ID: %5d\tProcess: %10.10s\tOrder: %04d.%04d\tMask: %5x\tResult: %s\n", policy_i
, policy
->id
, proc_name_string
, policy
->session_order
, policy
->order
, policy
->condition_mask
, necp_get_result_description(result_string
, policy
->result
, policy
->result_parameter
));
4275 NECPLOG0(LOG_DEBUG
, "-----------\n");
4281 necp_kernel_socket_result_is_trigger_service_type(struct necp_kernel_socket_policy
*kernel_policy
)
4283 return kernel_policy
->result
>= NECP_KERNEL_POLICY_RESULT_TRIGGER
&& kernel_policy
->result
<= NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED
;
4287 necp_kernel_socket_policy_results_overlap(struct necp_kernel_socket_policy
*upper_policy
, struct necp_kernel_socket_policy
*lower_policy
)
4289 if (upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_DROP
) {
4290 // Drop always cancels out lower policies
4292 } else if (upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER
||
4293 upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_ROUTE_RULES
||
4294 upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_USE_NETAGENT
||
4295 upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_NETAGENT_SCOPED
||
4296 upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_ALLOW_UNENTITLED
) {
4297 // Filters and route rules never cancel out lower policies
4299 } else if (necp_kernel_socket_result_is_trigger_service_type(upper_policy
)) {
4300 // Trigger/Scoping policies can overlap one another, but not other results
4301 return necp_kernel_socket_result_is_trigger_service_type(lower_policy
);
4302 } else if (upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
4303 if (upper_policy
->session_order
!= lower_policy
->session_order
) {
4304 // A skip cannot override a policy of a different session
4307 if (upper_policy
->result_parameter
.skip_policy_order
== 0 ||
4308 lower_policy
->order
>= upper_policy
->result_parameter
.skip_policy_order
) {
4309 // This policy is beyond the skip
4312 // This policy is inside the skip
4318 // A hard pass, flow divert, tunnel, or scope will currently block out lower policies
4323 necp_kernel_socket_policy_is_unnecessary(struct necp_kernel_socket_policy
*policy
, struct necp_kernel_socket_policy
**policy_array
, int valid_indices
)
4325 bool can_skip
= FALSE
;
4326 u_int32_t highest_skip_session_order
= 0;
4327 u_int32_t highest_skip_order
= 0;
4329 for (i
= 0; i
< valid_indices
; i
++) {
4330 struct necp_kernel_socket_policy
*compared_policy
= policy_array
[i
];
4332 // For policies in a skip window, we can't mark conflicting policies as unnecessary
4334 if (highest_skip_session_order
!= compared_policy
->session_order
||
4335 (highest_skip_order
!= 0 && compared_policy
->order
>= highest_skip_order
)) {
4336 // If we've moved on to the next session, or passed the skip window
4337 highest_skip_session_order
= 0;
4338 highest_skip_order
= 0;
4341 // If this policy is also a skip, in can increase the skip window
4342 if (compared_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
4343 if (compared_policy
->result_parameter
.skip_policy_order
> highest_skip_order
) {
4344 highest_skip_order
= compared_policy
->result_parameter
.skip_policy_order
;
4351 if (compared_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
4352 // This policy is a skip. Set the skip window accordingly
4354 highest_skip_session_order
= compared_policy
->session_order
;
4355 highest_skip_order
= compared_policy
->result_parameter
.skip_policy_order
;
4358 // The result of the compared policy must be able to block out this policy result
4359 if (!necp_kernel_socket_policy_results_overlap(compared_policy
, policy
)) {
4363 // If new policy matches All Interfaces, compared policy must also
4364 if ((policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) && !(compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
)) {
4368 // If new policy matches Local Networks, compared policy must also
4369 if ((policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_NETWORKS
) && !(compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_NETWORKS
)) {
4373 // Default makes lower policies unecessary always
4374 if (compared_policy
->condition_mask
== 0) {
4378 // Compared must be more general than policy, and include only conditions within policy
4379 if ((policy
->condition_mask
& compared_policy
->condition_mask
) != compared_policy
->condition_mask
) {
4383 // Negative conditions must match for the overlapping conditions
4384 if ((policy
->condition_negated_mask
& compared_policy
->condition_mask
) != (compared_policy
->condition_negated_mask
& compared_policy
->condition_mask
)) {
4388 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
&&
4389 strcmp(compared_policy
->cond_domain
, policy
->cond_domain
) != 0) {
4393 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
&&
4394 strcmp(compared_policy
->cond_custom_entitlement
, policy
->cond_custom_entitlement
) != 0) {
4398 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
&&
4399 compared_policy
->cond_account_id
!= policy
->cond_account_id
) {
4403 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
&&
4404 compared_policy
->cond_policy_id
!= policy
->cond_policy_id
) {
4408 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
&&
4409 compared_policy
->cond_app_id
!= policy
->cond_app_id
) {
4413 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
&&
4414 compared_policy
->cond_real_app_id
!= policy
->cond_real_app_id
) {
4418 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_PID
&&
4419 compared_policy
->cond_pid
!= policy
->cond_pid
) {
4423 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_UID
&&
4424 compared_policy
->cond_uid
!= policy
->cond_uid
) {
4428 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
&&
4429 compared_policy
->cond_bound_interface
!= policy
->cond_bound_interface
) {
4433 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
&&
4434 compared_policy
->cond_protocol
!= policy
->cond_protocol
) {
4438 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_CLIENT_FLAGS
&&
4439 compared_policy
->cond_client_flags
!= policy
->cond_client_flags
) {
4443 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
&&
4444 !(compared_policy
->cond_traffic_class
.start_tc
<= policy
->cond_traffic_class
.start_tc
&&
4445 compared_policy
->cond_traffic_class
.end_tc
>= policy
->cond_traffic_class
.end_tc
)) {
4449 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
4450 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
4451 if (!necp_is_range_in_range((struct sockaddr
*)&policy
->cond_local_start
, (struct sockaddr
*)&policy
->cond_local_end
, (struct sockaddr
*)&compared_policy
->cond_local_start
, (struct sockaddr
*)&compared_policy
->cond_local_end
)) {
4454 } else if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
4455 if (compared_policy
->cond_local_prefix
> policy
->cond_local_prefix
||
4456 !necp_is_addr_in_subnet((struct sockaddr
*)&policy
->cond_local_start
, (struct sockaddr
*)&compared_policy
->cond_local_start
, compared_policy
->cond_local_prefix
)) {
4462 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
4463 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
4464 if (!necp_is_range_in_range((struct sockaddr
*)&policy
->cond_remote_start
, (struct sockaddr
*)&policy
->cond_remote_end
, (struct sockaddr
*)&compared_policy
->cond_remote_start
, (struct sockaddr
*)&compared_policy
->cond_remote_end
)) {
4467 } else if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
4468 if (compared_policy
->cond_remote_prefix
> policy
->cond_remote_prefix
||
4469 !necp_is_addr_in_subnet((struct sockaddr
*)&policy
->cond_remote_start
, (struct sockaddr
*)&compared_policy
->cond_remote_start
, compared_policy
->cond_remote_prefix
)) {
4475 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_AGENT_TYPE
&&
4476 memcmp(&compared_policy
->cond_agent_type
, &policy
->cond_agent_type
, sizeof(policy
->cond_agent_type
)) == 0) {
4487 necp_kernel_socket_policies_reprocess(void)
4490 int bucket_allocation_counts
[NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
];
4491 int bucket_current_free_index
[NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
];
4492 int app_layer_allocation_count
= 0;
4493 int app_layer_current_free_index
= 0;
4494 struct necp_kernel_socket_policy
*kernel_policy
= NULL
;
4496 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4499 necp_kernel_application_policies_condition_mask
= 0;
4500 necp_kernel_socket_policies_condition_mask
= 0;
4501 necp_kernel_application_policies_count
= 0;
4502 necp_kernel_socket_policies_count
= 0;
4503 necp_kernel_socket_policies_non_app_count
= 0;
4505 // Reset all maps to NULL
4506 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
4507 if (necp_kernel_socket_policies_map
[app_i
] != NULL
) {
4508 FREE(necp_kernel_socket_policies_map
[app_i
], M_NECP
);
4509 necp_kernel_socket_policies_map
[app_i
] = NULL
;
4513 bucket_allocation_counts
[app_i
] = 0;
4515 if (necp_kernel_socket_policies_app_layer_map
!= NULL
) {
4516 FREE(necp_kernel_socket_policies_app_layer_map
, M_NECP
);
4517 necp_kernel_socket_policies_app_layer_map
= NULL
;
4520 // Create masks and counts
4521 LIST_FOREACH(kernel_policy
, &necp_kernel_socket_policies
, chain
) {
4522 // App layer mask/count
4523 necp_kernel_application_policies_condition_mask
|= kernel_policy
->condition_mask
;
4524 necp_kernel_application_policies_count
++;
4525 app_layer_allocation_count
++;
4527 if ((kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_AGENT_TYPE
)) {
4528 // Agent type conditions only apply to app layer
4532 // Update socket layer bucket mask/counts
4533 necp_kernel_socket_policies_condition_mask
|= kernel_policy
->condition_mask
;
4534 necp_kernel_socket_policies_count
++;
4536 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) ||
4537 kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
4538 necp_kernel_socket_policies_non_app_count
++;
4539 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
4540 bucket_allocation_counts
[app_i
]++;
4543 bucket_allocation_counts
[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(kernel_policy
->cond_app_id
)]++;
4548 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
4549 if (bucket_allocation_counts
[app_i
] > 0) {
4550 // Allocate a NULL-terminated array of policy pointers for each bucket
4551 MALLOC(necp_kernel_socket_policies_map
[app_i
], struct necp_kernel_socket_policy
**, sizeof(struct necp_kernel_socket_policy
*) * (bucket_allocation_counts
[app_i
] + 1), M_NECP
, M_WAITOK
);
4552 if (necp_kernel_socket_policies_map
[app_i
] == NULL
) {
4556 // Initialize the first entry to NULL
4557 (necp_kernel_socket_policies_map
[app_i
])[0] = NULL
;
4559 bucket_current_free_index
[app_i
] = 0;
4561 MALLOC(necp_kernel_socket_policies_app_layer_map
, struct necp_kernel_socket_policy
**, sizeof(struct necp_kernel_socket_policy
*) * (app_layer_allocation_count
+ 1), M_NECP
, M_WAITOK
);
4562 if (necp_kernel_socket_policies_app_layer_map
== NULL
) {
4565 necp_kernel_socket_policies_app_layer_map
[0] = NULL
;
4568 LIST_FOREACH(kernel_policy
, &necp_kernel_socket_policies
, chain
) {
4569 // Add app layer policies
4570 if (!necp_kernel_socket_policy_is_unnecessary(kernel_policy
, necp_kernel_socket_policies_app_layer_map
, app_layer_current_free_index
)) {
4571 necp_kernel_socket_policies_app_layer_map
[app_layer_current_free_index
] = kernel_policy
;
4572 app_layer_current_free_index
++;
4573 necp_kernel_socket_policies_app_layer_map
[app_layer_current_free_index
] = NULL
;
4576 if ((kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_AGENT_TYPE
)) {
4577 // Agent type conditions only apply to app layer
4581 // Add socket policies
4582 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) ||
4583 kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
4584 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
4585 if (!necp_kernel_socket_policy_is_unnecessary(kernel_policy
, necp_kernel_socket_policies_map
[app_i
], bucket_current_free_index
[app_i
])) {
4586 (necp_kernel_socket_policies_map
[app_i
])[(bucket_current_free_index
[app_i
])] = kernel_policy
;
4587 bucket_current_free_index
[app_i
]++;
4588 (necp_kernel_socket_policies_map
[app_i
])[(bucket_current_free_index
[app_i
])] = NULL
;
4592 app_i
= NECP_SOCKET_MAP_APP_ID_TO_BUCKET(kernel_policy
->cond_app_id
);
4593 if (!necp_kernel_socket_policy_is_unnecessary(kernel_policy
, necp_kernel_socket_policies_map
[app_i
], bucket_current_free_index
[app_i
])) {
4594 (necp_kernel_socket_policies_map
[app_i
])[(bucket_current_free_index
[app_i
])] = kernel_policy
;
4595 bucket_current_free_index
[app_i
]++;
4596 (necp_kernel_socket_policies_map
[app_i
])[(bucket_current_free_index
[app_i
])] = NULL
;
4600 necp_kernel_socket_policies_dump_all();
4601 BUMP_KERNEL_SOCKET_POLICIES_GENERATION_COUNT();
4605 // Free memory, reset masks to 0
4606 necp_kernel_application_policies_condition_mask
= 0;
4607 necp_kernel_socket_policies_condition_mask
= 0;
4608 necp_kernel_application_policies_count
= 0;
4609 necp_kernel_socket_policies_count
= 0;
4610 necp_kernel_socket_policies_non_app_count
= 0;
4611 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
4612 if (necp_kernel_socket_policies_map
[app_i
] != NULL
) {
4613 FREE(necp_kernel_socket_policies_map
[app_i
], M_NECP
);
4614 necp_kernel_socket_policies_map
[app_i
] = NULL
;
4617 if (necp_kernel_socket_policies_app_layer_map
!= NULL
) {
4618 FREE(necp_kernel_socket_policies_app_layer_map
, M_NECP
);
4619 necp_kernel_socket_policies_app_layer_map
= NULL
;
4625 necp_get_new_string_id(void)
4627 static u_int32_t necp_last_string_id
= 0;
4629 u_int32_t newid
= 0;
4631 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4633 bool wrapped
= FALSE
;
4635 necp_last_string_id
++;
4636 if (necp_last_string_id
< 1) {
4638 // Already wrapped, give up
4639 NECPLOG0(LOG_ERR
, "Failed to find a free app UUID.\n");
4642 necp_last_string_id
= 1;
4645 newid
= necp_last_string_id
;
4646 } while (necp_lookup_string_with_id_locked(&necp_account_id_list
, newid
) != NULL
); // If already used, keep trying
4649 NECPLOG0(LOG_ERR
, "Allocate string id failed.\n");
4656 static struct necp_string_id_mapping
*
4657 necp_lookup_string_to_id_locked(struct necp_string_id_mapping_list
*list
, char *string
)
4659 struct necp_string_id_mapping
*searchentry
= NULL
;
4660 struct necp_string_id_mapping
*foundentry
= NULL
;
4662 LIST_FOREACH(searchentry
, list
, chain
) {
4663 if (strcmp(searchentry
->string
, string
) == 0) {
4664 foundentry
= searchentry
;
4672 static struct necp_string_id_mapping
*
4673 necp_lookup_string_with_id_locked(struct necp_string_id_mapping_list
*list
, u_int32_t local_id
)
4675 struct necp_string_id_mapping
*searchentry
= NULL
;
4676 struct necp_string_id_mapping
*foundentry
= NULL
;
4678 LIST_FOREACH(searchentry
, list
, chain
) {
4679 if (searchentry
->id
== local_id
) {
4680 foundentry
= searchentry
;
4689 necp_create_string_to_id_mapping(struct necp_string_id_mapping_list
*list
, char *string
)
4691 u_int32_t string_id
= 0;
4692 struct necp_string_id_mapping
*existing_mapping
= NULL
;
4694 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4696 existing_mapping
= necp_lookup_string_to_id_locked(list
, string
);
4697 if (existing_mapping
!= NULL
) {
4698 string_id
= existing_mapping
->id
;
4699 os_ref_retain_locked(&existing_mapping
->refcount
);
4701 struct necp_string_id_mapping
*new_mapping
= NULL
;
4702 MALLOC(new_mapping
, struct necp_string_id_mapping
*, sizeof(struct necp_string_id_mapping
), M_NECP
, M_WAITOK
);
4703 if (new_mapping
!= NULL
) {
4704 memset(new_mapping
, 0, sizeof(struct necp_string_id_mapping
));
4706 size_t length
= strlen(string
) + 1;
4707 MALLOC(new_mapping
->string
, char *, length
, M_NECP
, M_WAITOK
);
4708 if (new_mapping
->string
!= NULL
) {
4709 memcpy(new_mapping
->string
, string
, length
);
4710 new_mapping
->id
= necp_get_new_string_id();
4711 os_ref_init(&new_mapping
->refcount
, &necp_refgrp
);
4712 LIST_INSERT_HEAD(list
, new_mapping
, chain
);
4713 string_id
= new_mapping
->id
;
4715 FREE(new_mapping
, M_NECP
);
4724 necp_remove_string_to_id_mapping(struct necp_string_id_mapping_list
*list
, char *string
)
4726 struct necp_string_id_mapping
*existing_mapping
= NULL
;
4728 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4730 existing_mapping
= necp_lookup_string_to_id_locked(list
, string
);
4731 if (existing_mapping
!= NULL
) {
4732 if (os_ref_release_locked(&existing_mapping
->refcount
) == 0) {
4733 LIST_REMOVE(existing_mapping
, chain
);
4734 FREE(existing_mapping
->string
, M_NECP
);
4735 FREE(existing_mapping
, M_NECP
);
4743 #define NECP_FIRST_VALID_ROUTE_RULE_ID 1
4744 #define NECP_FIRST_VALID_AGGREGATE_ROUTE_RULE_ID UINT16_MAX
4746 necp_get_new_route_rule_id(bool aggregate
)
4748 static u_int32_t necp_last_route_rule_id
= 0;
4749 static u_int32_t necp_last_aggregate_route_rule_id
= 0;
4751 u_int32_t newid
= 0;
4754 // Main necp_kernel_policy_lock protects non-aggregate rule IDs
4755 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4757 bool wrapped
= FALSE
;
4759 necp_last_route_rule_id
++;
4760 if (necp_last_route_rule_id
< NECP_FIRST_VALID_ROUTE_RULE_ID
||
4761 necp_last_route_rule_id
>= NECP_FIRST_VALID_AGGREGATE_ROUTE_RULE_ID
) {
4763 // Already wrapped, give up
4764 NECPLOG0(LOG_ERR
, "Failed to find a free route rule id.\n");
4767 necp_last_route_rule_id
= NECP_FIRST_VALID_ROUTE_RULE_ID
;
4770 newid
= necp_last_route_rule_id
;
4771 } while (necp_lookup_route_rule_locked(&necp_route_rules
, newid
) != NULL
); // If already used, keep trying
4773 // necp_route_rule_lock protects aggregate rule IDs
4774 LCK_RW_ASSERT(&necp_route_rule_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4776 bool wrapped
= FALSE
;
4778 necp_last_aggregate_route_rule_id
++;
4779 if (necp_last_aggregate_route_rule_id
< NECP_FIRST_VALID_AGGREGATE_ROUTE_RULE_ID
) {
4781 // Already wrapped, give up
4782 NECPLOG0(LOG_ERR
, "Failed to find a free aggregate route rule id.\n");
4785 necp_last_aggregate_route_rule_id
= NECP_FIRST_VALID_AGGREGATE_ROUTE_RULE_ID
;
4788 newid
= necp_last_aggregate_route_rule_id
;
4789 } while (necp_lookup_route_rule_locked(&necp_route_rules
, newid
) != NULL
); // If already used, keep trying
4793 NECPLOG0(LOG_ERR
, "Allocate route rule ID failed.\n");
4800 static struct necp_route_rule
*
4801 necp_lookup_route_rule_locked(struct necp_route_rule_list
*list
, u_int32_t route_rule_id
)
4803 struct necp_route_rule
*searchentry
= NULL
;
4804 struct necp_route_rule
*foundentry
= NULL
;
4806 LIST_FOREACH(searchentry
, list
, chain
) {
4807 if (searchentry
->id
== route_rule_id
) {
4808 foundentry
= searchentry
;
4816 static struct necp_route_rule
*
4817 necp_lookup_route_rule_by_contents_locked(struct necp_route_rule_list
*list
, u_int32_t default_action
, u_int8_t cellular_action
, u_int8_t wifi_action
, u_int8_t wired_action
, u_int8_t expensive_action
, u_int8_t constrained_action
, u_int32_t
*if_indices
, u_int8_t
*if_actions
)
4819 struct necp_route_rule
*searchentry
= NULL
;
4820 struct necp_route_rule
*foundentry
= NULL
;
4822 LIST_FOREACH(searchentry
, list
, chain
) {
4823 if (searchentry
->default_action
== default_action
&&
4824 searchentry
->cellular_action
== cellular_action
&&
4825 searchentry
->wifi_action
== wifi_action
&&
4826 searchentry
->wired_action
== wired_action
&&
4827 searchentry
->expensive_action
== expensive_action
&&
4828 searchentry
->constrained_action
== constrained_action
) {
4829 bool match_failed
= FALSE
;
4834 for (index_a
= 0; index_a
< MAX_ROUTE_RULE_INTERFACES
; index_a
++) {
4835 bool found_index
= FALSE
;
4836 if (searchentry
->exception_if_indices
[index_a
] == 0) {
4840 for (index_b
= 0; index_b
< MAX_ROUTE_RULE_INTERFACES
; index_b
++) {
4841 if (if_indices
[index_b
] == 0) {
4844 if (index_b
>= count_b
) {
4845 count_b
= index_b
+ 1;
4847 if (searchentry
->exception_if_indices
[index_a
] == if_indices
[index_b
] &&
4848 searchentry
->exception_if_actions
[index_a
] == if_actions
[index_b
]) {
4854 match_failed
= TRUE
;
4858 if (!match_failed
&& count_a
== count_b
) {
4859 foundentry
= searchentry
;
4869 necp_create_route_rule(struct necp_route_rule_list
*list
, u_int8_t
*route_rules_array
, u_int32_t route_rules_array_size
)
4872 u_int32_t route_rule_id
= 0;
4873 struct necp_route_rule
*existing_rule
= NULL
;
4874 u_int32_t default_action
= NECP_ROUTE_RULE_ALLOW_INTERFACE
;
4875 u_int8_t cellular_action
= NECP_ROUTE_RULE_NONE
;
4876 u_int8_t wifi_action
= NECP_ROUTE_RULE_NONE
;
4877 u_int8_t wired_action
= NECP_ROUTE_RULE_NONE
;
4878 u_int8_t expensive_action
= NECP_ROUTE_RULE_NONE
;
4879 u_int8_t constrained_action
= NECP_ROUTE_RULE_NONE
;
4880 u_int32_t if_indices
[MAX_ROUTE_RULE_INTERFACES
];
4881 size_t num_valid_indices
= 0;
4882 memset(&if_indices
, 0, sizeof(if_indices
));
4883 u_int8_t if_actions
[MAX_ROUTE_RULE_INTERFACES
];
4884 memset(&if_actions
, 0, sizeof(if_actions
));
4886 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4888 if (route_rules_array
== NULL
|| route_rules_array_size
== 0) {
4893 while (offset
< route_rules_array_size
) {
4894 ifnet_t rule_interface
= NULL
;
4895 char interface_name
[IFXNAMSIZ
];
4896 u_int32_t length
= 0;
4897 u_int8_t
*value
= necp_buffer_get_tlv_value(route_rules_array
, offset
, &length
);
4899 u_int8_t rule_type
= necp_policy_condition_get_type_from_buffer(value
, length
);
4900 u_int8_t rule_flags
= necp_policy_condition_get_flags_from_buffer(value
, length
);
4901 u_int32_t rule_length
= necp_policy_condition_get_value_length_from_buffer(value
, length
);
4902 u_int8_t
*rule_value
= necp_policy_condition_get_value_pointer_from_buffer(value
, length
);
4904 if (rule_type
== NECP_ROUTE_RULE_NONE
) {
4905 // Don't allow an explicit rule to be None action
4909 if (rule_length
== 0) {
4910 if (rule_flags
& NECP_ROUTE_RULE_FLAG_CELLULAR
) {
4911 cellular_action
= rule_type
;
4913 if (rule_flags
& NECP_ROUTE_RULE_FLAG_WIFI
) {
4914 wifi_action
= rule_type
;
4916 if (rule_flags
& NECP_ROUTE_RULE_FLAG_WIRED
) {
4917 wired_action
= rule_type
;
4919 if (rule_flags
& NECP_ROUTE_RULE_FLAG_EXPENSIVE
) {
4920 expensive_action
= rule_type
;
4922 if (rule_flags
& NECP_ROUTE_RULE_FLAG_CONSTRAINED
) {
4923 constrained_action
= rule_type
;
4925 if (rule_flags
== 0) {
4926 default_action
= rule_type
;
4928 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
4932 if (num_valid_indices
>= MAX_ROUTE_RULE_INTERFACES
) {
4933 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
4937 if (rule_length
<= IFXNAMSIZ
) {
4938 memcpy(interface_name
, rule_value
, rule_length
);
4939 interface_name
[rule_length
- 1] = 0; // Make sure the string is NULL terminated
4940 if (ifnet_find_by_name(interface_name
, &rule_interface
) == 0) {
4941 if_actions
[num_valid_indices
] = rule_type
;
4942 if_indices
[num_valid_indices
++] = rule_interface
->if_index
;
4943 ifnet_release(rule_interface
);
4946 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
4949 existing_rule
= necp_lookup_route_rule_by_contents_locked(list
, default_action
, cellular_action
, wifi_action
, wired_action
, expensive_action
, constrained_action
, if_indices
, if_actions
);
4950 if (existing_rule
!= NULL
) {
4951 route_rule_id
= existing_rule
->id
;
4952 os_ref_retain_locked(&existing_rule
->refcount
);
4954 struct necp_route_rule
*new_rule
= NULL
;
4955 MALLOC(new_rule
, struct necp_route_rule
*, sizeof(struct necp_route_rule
), M_NECP
, M_WAITOK
);
4956 if (new_rule
!= NULL
) {
4957 memset(new_rule
, 0, sizeof(struct necp_route_rule
));
4958 route_rule_id
= new_rule
->id
= necp_get_new_route_rule_id(false);
4959 new_rule
->default_action
= default_action
;
4960 new_rule
->cellular_action
= cellular_action
;
4961 new_rule
->wifi_action
= wifi_action
;
4962 new_rule
->wired_action
= wired_action
;
4963 new_rule
->expensive_action
= expensive_action
;
4964 new_rule
->constrained_action
= constrained_action
;
4965 memcpy(&new_rule
->exception_if_indices
, &if_indices
, sizeof(if_indices
));
4966 memcpy(&new_rule
->exception_if_actions
, &if_actions
, sizeof(if_actions
));
4967 os_ref_init(&new_rule
->refcount
, &necp_refgrp
);
4968 LIST_INSERT_HEAD(list
, new_rule
, chain
);
4971 return route_rule_id
;
4975 necp_remove_aggregate_route_rule_for_id(u_int32_t rule_id
)
4978 lck_rw_lock_exclusive(&necp_route_rule_lock
);
4980 struct necp_aggregate_route_rule
*existing_rule
= NULL
;
4981 struct necp_aggregate_route_rule
*tmp_rule
= NULL
;
4983 LIST_FOREACH_SAFE(existing_rule
, &necp_aggregate_route_rules
, chain
, tmp_rule
) {
4985 for (index
= 0; index
< MAX_AGGREGATE_ROUTE_RULES
; index
++) {
4986 u_int32_t route_rule_id
= existing_rule
->rule_ids
[index
];
4987 if (route_rule_id
== rule_id
) {
4988 LIST_REMOVE(existing_rule
, chain
);
4989 FREE(existing_rule
, M_NECP
);
4995 lck_rw_done(&necp_route_rule_lock
);
5000 necp_remove_route_rule(struct necp_route_rule_list
*list
, u_int32_t route_rule_id
)
5002 struct necp_route_rule
*existing_rule
= NULL
;
5004 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5006 existing_rule
= necp_lookup_route_rule_locked(list
, route_rule_id
);
5007 if (existing_rule
!= NULL
) {
5008 if (os_ref_release_locked(&existing_rule
->refcount
) == 0) {
5009 necp_remove_aggregate_route_rule_for_id(existing_rule
->id
);
5010 LIST_REMOVE(existing_rule
, chain
);
5011 FREE(existing_rule
, M_NECP
);
5019 static struct necp_aggregate_route_rule
*
5020 necp_lookup_aggregate_route_rule_locked(u_int32_t route_rule_id
)
5022 struct necp_aggregate_route_rule
*searchentry
= NULL
;
5023 struct necp_aggregate_route_rule
*foundentry
= NULL
;
5025 lck_rw_lock_shared(&necp_route_rule_lock
);
5027 LIST_FOREACH(searchentry
, &necp_aggregate_route_rules
, chain
) {
5028 if (searchentry
->id
== route_rule_id
) {
5029 foundentry
= searchentry
;
5034 lck_rw_done(&necp_route_rule_lock
);
5040 necp_create_aggregate_route_rule(u_int32_t
*rule_ids
)
5042 u_int32_t aggregate_route_rule_id
= 0;
5043 struct necp_aggregate_route_rule
*new_rule
= NULL
;
5044 struct necp_aggregate_route_rule
*existing_rule
= NULL
;
5046 lck_rw_lock_exclusive(&necp_route_rule_lock
);
5048 // Check if the rule already exists
5049 LIST_FOREACH(existing_rule
, &necp_aggregate_route_rules
, chain
) {
5050 if (memcmp(existing_rule
->rule_ids
, rule_ids
, (sizeof(u_int32_t
) * MAX_AGGREGATE_ROUTE_RULES
)) == 0) {
5051 lck_rw_done(&necp_route_rule_lock
);
5052 return existing_rule
->id
;
5056 MALLOC(new_rule
, struct necp_aggregate_route_rule
*, sizeof(struct necp_aggregate_route_rule
), M_NECP
, M_WAITOK
);
5057 if (new_rule
!= NULL
) {
5058 memset(new_rule
, 0, sizeof(struct necp_aggregate_route_rule
));
5059 aggregate_route_rule_id
= new_rule
->id
= necp_get_new_route_rule_id(true);
5060 new_rule
->id
= aggregate_route_rule_id
;
5061 memcpy(new_rule
->rule_ids
, rule_ids
, (sizeof(u_int32_t
) * MAX_AGGREGATE_ROUTE_RULES
));
5062 LIST_INSERT_HEAD(&necp_aggregate_route_rules
, new_rule
, chain
);
5064 lck_rw_done(&necp_route_rule_lock
);
5066 return aggregate_route_rule_id
;
5069 #define NECP_NULL_SERVICE_ID 1
5070 #define NECP_FIRST_VALID_SERVICE_ID 2
5071 #define NECP_FIRST_VALID_APP_ID UINT16_MAX
5073 necp_get_new_uuid_id(bool service
)
5075 static u_int32_t necp_last_service_uuid_id
= 0;
5076 static u_int32_t necp_last_app_uuid_id
= 0;
5078 u_int32_t newid
= 0;
5080 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5083 bool wrapped
= FALSE
;
5085 necp_last_service_uuid_id
++;
5086 if (necp_last_service_uuid_id
< NECP_FIRST_VALID_SERVICE_ID
||
5087 necp_last_service_uuid_id
>= NECP_FIRST_VALID_APP_ID
) {
5089 // Already wrapped, give up
5090 NECPLOG0(LOG_ERR
, "Failed to find a free service UUID.\n");
5091 return NECP_NULL_SERVICE_ID
;
5093 necp_last_service_uuid_id
= NECP_FIRST_VALID_SERVICE_ID
;
5096 newid
= necp_last_service_uuid_id
;
5097 } while (necp_uuid_lookup_uuid_with_service_id_locked(newid
) != NULL
); // If already used, keep trying
5099 bool wrapped
= FALSE
;
5101 necp_last_app_uuid_id
++;
5102 if (necp_last_app_uuid_id
< NECP_FIRST_VALID_APP_ID
) {
5104 // Already wrapped, give up
5105 NECPLOG0(LOG_ERR
, "Failed to find a free app UUID.\n");
5106 return NECP_NULL_SERVICE_ID
;
5108 necp_last_app_uuid_id
= NECP_FIRST_VALID_APP_ID
;
5111 newid
= necp_last_app_uuid_id
;
5112 } while (necp_uuid_lookup_uuid_with_app_id_locked(newid
) != NULL
); // If already used, keep trying
5115 if (newid
== NECP_NULL_SERVICE_ID
) {
5116 NECPLOG0(LOG_ERR
, "Allocate uuid ID failed.\n");
5117 return NECP_NULL_SERVICE_ID
;
5123 static struct necp_uuid_id_mapping
*
5124 necp_uuid_lookup_app_id_locked(uuid_t uuid
)
5126 struct necp_uuid_id_mapping
*searchentry
= NULL
;
5127 struct necp_uuid_id_mapping
*foundentry
= NULL
;
5129 LIST_FOREACH(searchentry
, APPUUIDHASH(uuid
), chain
) {
5130 if (uuid_compare(searchentry
->uuid
, uuid
) == 0) {
5131 foundentry
= searchentry
;
5139 static struct necp_uuid_id_mapping
*
5140 necp_uuid_lookup_uuid_with_app_id_locked(u_int32_t local_id
)
5142 struct necp_uuid_id_mapping
*searchentry
= NULL
;
5143 struct necp_uuid_id_mapping
*foundentry
= NULL
;
5145 struct necp_uuid_id_mapping_head
*uuid_list_head
= NULL
;
5146 for (uuid_list_head
= &necp_uuid_app_id_hashtbl
[necp_uuid_app_id_hash_num_buckets
- 1]; uuid_list_head
>= necp_uuid_app_id_hashtbl
; uuid_list_head
--) {
5147 LIST_FOREACH(searchentry
, uuid_list_head
, chain
) {
5148 if (searchentry
->id
== local_id
) {
5149 foundentry
= searchentry
;
5159 necp_create_uuid_app_id_mapping(uuid_t uuid
, bool *allocated_mapping
, bool uuid_policy_table
)
5161 u_int32_t local_id
= 0;
5162 struct necp_uuid_id_mapping
*existing_mapping
= NULL
;
5164 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5166 if (allocated_mapping
) {
5167 *allocated_mapping
= FALSE
;
5170 existing_mapping
= necp_uuid_lookup_app_id_locked(uuid
);
5171 if (existing_mapping
!= NULL
) {
5172 local_id
= existing_mapping
->id
;
5173 os_ref_retain_locked(&existing_mapping
->refcount
);
5174 if (uuid_policy_table
) {
5175 existing_mapping
->table_usecount
++;
5178 struct necp_uuid_id_mapping
*new_mapping
= NULL
;
5179 MALLOC(new_mapping
, struct necp_uuid_id_mapping
*, sizeof(*new_mapping
), M_NECP
, M_WAITOK
);
5180 if (new_mapping
!= NULL
) {
5181 uuid_copy(new_mapping
->uuid
, uuid
);
5182 new_mapping
->id
= necp_get_new_uuid_id(false);
5183 os_ref_init(&new_mapping
->refcount
, &necp_refgrp
);
5184 if (uuid_policy_table
) {
5185 new_mapping
->table_usecount
= 1;
5187 new_mapping
->table_usecount
= 0;
5190 LIST_INSERT_HEAD(APPUUIDHASH(uuid
), new_mapping
, chain
);
5192 if (allocated_mapping
) {
5193 *allocated_mapping
= TRUE
;
5196 local_id
= new_mapping
->id
;
5204 necp_remove_uuid_app_id_mapping(uuid_t uuid
, bool *removed_mapping
, bool uuid_policy_table
)
5206 struct necp_uuid_id_mapping
*existing_mapping
= NULL
;
5208 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5210 if (removed_mapping
) {
5211 *removed_mapping
= FALSE
;
5214 existing_mapping
= necp_uuid_lookup_app_id_locked(uuid
);
5215 if (existing_mapping
!= NULL
) {
5216 if (uuid_policy_table
) {
5217 existing_mapping
->table_usecount
--;
5219 if (os_ref_release_locked(&existing_mapping
->refcount
) == 0) {
5220 LIST_REMOVE(existing_mapping
, chain
);
5221 FREE(existing_mapping
, M_NECP
);
5222 if (removed_mapping
) {
5223 *removed_mapping
= TRUE
;
5232 static struct necp_uuid_id_mapping
*
5233 necp_uuid_get_null_service_id_mapping(void)
5235 static struct necp_uuid_id_mapping null_mapping
;
5236 uuid_clear(null_mapping
.uuid
);
5237 null_mapping
.id
= NECP_NULL_SERVICE_ID
;
5239 return &null_mapping
;
5242 static struct necp_uuid_id_mapping
*
5243 necp_uuid_lookup_service_id_locked(uuid_t uuid
)
5245 struct necp_uuid_id_mapping
*searchentry
= NULL
;
5246 struct necp_uuid_id_mapping
*foundentry
= NULL
;
5248 if (uuid_is_null(uuid
)) {
5249 return necp_uuid_get_null_service_id_mapping();
5252 LIST_FOREACH(searchentry
, &necp_uuid_service_id_list
, chain
) {
5253 if (uuid_compare(searchentry
->uuid
, uuid
) == 0) {
5254 foundentry
= searchentry
;
5262 static struct necp_uuid_id_mapping
*
5263 necp_uuid_lookup_uuid_with_service_id_locked(u_int32_t local_id
)
5265 struct necp_uuid_id_mapping
*searchentry
= NULL
;
5266 struct necp_uuid_id_mapping
*foundentry
= NULL
;
5268 if (local_id
== NECP_NULL_SERVICE_ID
) {
5269 return necp_uuid_get_null_service_id_mapping();
5272 LIST_FOREACH(searchentry
, &necp_uuid_service_id_list
, chain
) {
5273 if (searchentry
->id
== local_id
) {
5274 foundentry
= searchentry
;
5283 necp_create_uuid_service_id_mapping(uuid_t uuid
)
5285 u_int32_t local_id
= 0;
5286 struct necp_uuid_id_mapping
*existing_mapping
= NULL
;
5288 if (uuid_is_null(uuid
)) {
5289 return NECP_NULL_SERVICE_ID
;
5292 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5294 existing_mapping
= necp_uuid_lookup_service_id_locked(uuid
);
5295 if (existing_mapping
!= NULL
) {
5296 local_id
= existing_mapping
->id
;
5297 os_ref_retain_locked(&existing_mapping
->refcount
);
5299 struct necp_uuid_id_mapping
*new_mapping
= NULL
;
5300 MALLOC(new_mapping
, struct necp_uuid_id_mapping
*, sizeof(*new_mapping
), M_NECP
, M_WAITOK
);
5301 if (new_mapping
!= NULL
) {
5302 uuid_copy(new_mapping
->uuid
, uuid
);
5303 new_mapping
->id
= necp_get_new_uuid_id(true);
5304 os_ref_init(&new_mapping
->refcount
, &necp_refgrp
);
5306 LIST_INSERT_HEAD(&necp_uuid_service_id_list
, new_mapping
, chain
);
5308 local_id
= new_mapping
->id
;
5316 necp_remove_uuid_service_id_mapping(uuid_t uuid
)
5318 struct necp_uuid_id_mapping
*existing_mapping
= NULL
;
5320 if (uuid_is_null(uuid
)) {
5324 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5326 existing_mapping
= necp_uuid_lookup_service_id_locked(uuid
);
5327 if (existing_mapping
!= NULL
) {
5328 if (os_ref_release_locked(&existing_mapping
->refcount
) == 0) {
5329 LIST_REMOVE(existing_mapping
, chain
);
5330 FREE(existing_mapping
, M_NECP
);
5340 necp_kernel_socket_policies_update_uuid_table(void)
5342 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5344 if (necp_uuid_app_id_mappings_dirty
) {
5345 if (proc_uuid_policy_kernel(PROC_UUID_POLICY_OPERATION_CLEAR
, NULL
, PROC_UUID_NECP_APP_POLICY
) < 0) {
5346 NECPLOG0(LOG_DEBUG
, "Error clearing uuids from policy table\n");
5350 if (necp_num_uuid_app_id_mappings
> 0) {
5351 struct necp_uuid_id_mapping_head
*uuid_list_head
= NULL
;
5352 for (uuid_list_head
= &necp_uuid_app_id_hashtbl
[necp_uuid_app_id_hash_num_buckets
- 1]; uuid_list_head
>= necp_uuid_app_id_hashtbl
; uuid_list_head
--) {
5353 struct necp_uuid_id_mapping
*mapping
= NULL
;
5354 LIST_FOREACH(mapping
, uuid_list_head
, chain
) {
5355 if (mapping
->table_usecount
> 0 &&
5356 proc_uuid_policy_kernel(PROC_UUID_POLICY_OPERATION_ADD
, mapping
->uuid
, PROC_UUID_NECP_APP_POLICY
) < 0) {
5357 NECPLOG0(LOG_DEBUG
, "Error adding uuid to policy table\n");
5363 necp_uuid_app_id_mappings_dirty
= FALSE
;
5369 #define NECP_KERNEL_VALID_IP_OUTPUT_CONDITIONS (NECP_KERNEL_CONDITION_ALL_INTERFACES | NECP_KERNEL_CONDITION_BOUND_INTERFACE | NECP_KERNEL_CONDITION_PROTOCOL | NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_POLICY_ID | NECP_KERNEL_CONDITION_LAST_INTERFACE | NECP_KERNEL_CONDITION_LOCAL_NETWORKS)
5370 static necp_kernel_policy_id
5371 necp_kernel_ip_output_policy_add(necp_policy_order order
, necp_policy_order suborder
, u_int32_t session_order
, int session_pid
, u_int32_t condition_mask
, u_int32_t condition_negated_mask
, necp_kernel_policy_id cond_policy_id
, ifnet_t cond_bound_interface
, u_int32_t cond_last_interface_index
, u_int16_t cond_protocol
, union necp_sockaddr_union
*cond_local_start
, union necp_sockaddr_union
*cond_local_end
, u_int8_t cond_local_prefix
, union necp_sockaddr_union
*cond_remote_start
, union necp_sockaddr_union
*cond_remote_end
, u_int8_t cond_remote_prefix
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
)
5373 struct necp_kernel_ip_output_policy
*new_kernel_policy
= NULL
;
5374 struct necp_kernel_ip_output_policy
*tmp_kernel_policy
= NULL
;
5376 MALLOC_ZONE(new_kernel_policy
, struct necp_kernel_ip_output_policy
*, sizeof(*new_kernel_policy
), M_NECP_IP_POLICY
, M_WAITOK
);
5377 if (new_kernel_policy
== NULL
) {
5381 memset(new_kernel_policy
, 0, sizeof(*new_kernel_policy
)); // M_ZERO is not supported for MALLOC_ZONE
5382 new_kernel_policy
->id
= necp_kernel_policy_get_new_id(false);
5383 new_kernel_policy
->suborder
= suborder
;
5384 new_kernel_policy
->order
= order
;
5385 new_kernel_policy
->session_order
= session_order
;
5386 new_kernel_policy
->session_pid
= session_pid
;
5388 // Sanitize condition mask
5389 new_kernel_policy
->condition_mask
= (condition_mask
& NECP_KERNEL_VALID_IP_OUTPUT_CONDITIONS
);
5390 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
)) {
5391 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_BOUND_INTERFACE
;
5393 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
)) {
5394 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_LOCAL_PREFIX
;
5396 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
)) {
5397 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_REMOTE_PREFIX
;
5399 new_kernel_policy
->condition_negated_mask
= condition_negated_mask
& new_kernel_policy
->condition_mask
;
5401 // Set condition values
5402 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
) {
5403 new_kernel_policy
->cond_policy_id
= cond_policy_id
;
5405 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
5406 if (cond_bound_interface
) {
5407 ifnet_reference(cond_bound_interface
);
5409 new_kernel_policy
->cond_bound_interface
= cond_bound_interface
;
5411 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LAST_INTERFACE
) {
5412 new_kernel_policy
->cond_last_interface_index
= cond_last_interface_index
;
5414 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
5415 new_kernel_policy
->cond_protocol
= cond_protocol
;
5417 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
5418 memcpy(&new_kernel_policy
->cond_local_start
, cond_local_start
, cond_local_start
->sa
.sa_len
);
5420 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
5421 memcpy(&new_kernel_policy
->cond_local_end
, cond_local_end
, cond_local_end
->sa
.sa_len
);
5423 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
5424 new_kernel_policy
->cond_local_prefix
= cond_local_prefix
;
5426 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
5427 memcpy(&new_kernel_policy
->cond_remote_start
, cond_remote_start
, cond_remote_start
->sa
.sa_len
);
5429 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
5430 memcpy(&new_kernel_policy
->cond_remote_end
, cond_remote_end
, cond_remote_end
->sa
.sa_len
);
5432 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
5433 new_kernel_policy
->cond_remote_prefix
= cond_remote_prefix
;
5436 new_kernel_policy
->result
= result
;
5437 memcpy(&new_kernel_policy
->result_parameter
, &result_parameter
, sizeof(result_parameter
));
5440 NECPLOG(LOG_DEBUG
, "Added kernel policy: ip output, id=%d, mask=%x\n", new_kernel_policy
->id
, new_kernel_policy
->condition_mask
);
5442 LIST_INSERT_SORTED_THRICE_ASCENDING(&necp_kernel_ip_output_policies
, new_kernel_policy
, chain
, session_order
, order
, suborder
, tmp_kernel_policy
);
5444 return new_kernel_policy
? new_kernel_policy
->id
: 0;
5447 static struct necp_kernel_ip_output_policy
*
5448 necp_kernel_ip_output_policy_find(necp_kernel_policy_id policy_id
)
5450 struct necp_kernel_ip_output_policy
*kernel_policy
= NULL
;
5451 struct necp_kernel_ip_output_policy
*tmp_kernel_policy
= NULL
;
5453 if (policy_id
== 0) {
5457 LIST_FOREACH_SAFE(kernel_policy
, &necp_kernel_ip_output_policies
, chain
, tmp_kernel_policy
) {
5458 if (kernel_policy
->id
== policy_id
) {
5459 return kernel_policy
;
5467 necp_kernel_ip_output_policy_delete(necp_kernel_policy_id policy_id
)
5469 struct necp_kernel_ip_output_policy
*policy
= NULL
;
5471 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5473 policy
= necp_kernel_ip_output_policy_find(policy_id
);
5475 LIST_REMOVE(policy
, chain
);
5477 if (policy
->cond_bound_interface
) {
5478 ifnet_release(policy
->cond_bound_interface
);
5479 policy
->cond_bound_interface
= NULL
;
5482 FREE_ZONE(policy
, sizeof(*policy
), M_NECP_IP_POLICY
);
5490 necp_kernel_ip_output_policies_dump_all(void)
5493 struct necp_kernel_ip_output_policy
*policy
= NULL
;
5496 char result_string
[MAX_RESULT_STRING_LEN
];
5497 char proc_name_string
[MAXCOMLEN
+ 1];
5498 memset(result_string
, 0, MAX_RESULT_STRING_LEN
);
5499 memset(proc_name_string
, 0, MAXCOMLEN
+ 1);
5501 NECPLOG0(LOG_DEBUG
, "NECP IP Output Policies:\n");
5502 NECPLOG0(LOG_DEBUG
, "-----------\n");
5503 for (id_i
= 0; id_i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; id_i
++) {
5504 NECPLOG(LOG_DEBUG
, " ID Bucket: %d\n", id_i
);
5505 for (policy_i
= 0; necp_kernel_ip_output_policies_map
[id_i
] != NULL
&& (necp_kernel_ip_output_policies_map
[id_i
])[policy_i
] != NULL
; policy_i
++) {
5506 policy
= (necp_kernel_ip_output_policies_map
[id_i
])[policy_i
];
5507 proc_name(policy
->session_pid
, proc_name_string
, MAXCOMLEN
);
5508 NECPLOG(LOG_DEBUG
, "\t%3d. Policy ID: %5d\tProcess: %10.10s\tOrder: %04d.%04d.%d\tMask: %5x\tResult: %s\n", policy_i
, policy
->id
, proc_name_string
, policy
->session_order
, policy
->order
, policy
->suborder
, policy
->condition_mask
, necp_get_result_description(result_string
, policy
->result
, policy
->result_parameter
));
5510 NECPLOG0(LOG_DEBUG
, "-----------\n");
5516 necp_kernel_ip_output_policy_results_overlap(struct necp_kernel_ip_output_policy
*upper_policy
, struct necp_kernel_ip_output_policy
*lower_policy
)
5518 if (upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
5519 if (upper_policy
->session_order
!= lower_policy
->session_order
) {
5520 // A skip cannot override a policy of a different session
5523 if (upper_policy
->result_parameter
.skip_policy_order
== 0 ||
5524 lower_policy
->order
>= upper_policy
->result_parameter
.skip_policy_order
) {
5525 // This policy is beyond the skip
5528 // This policy is inside the skip
5534 // All other IP Output policy results (drop, tunnel, hard pass) currently overlap
5539 necp_kernel_ip_output_policy_is_unnecessary(struct necp_kernel_ip_output_policy
*policy
, struct necp_kernel_ip_output_policy
**policy_array
, int valid_indices
)
5541 bool can_skip
= FALSE
;
5542 u_int32_t highest_skip_session_order
= 0;
5543 u_int32_t highest_skip_order
= 0;
5545 for (i
= 0; i
< valid_indices
; i
++) {
5546 struct necp_kernel_ip_output_policy
*compared_policy
= policy_array
[i
];
5548 // For policies in a skip window, we can't mark conflicting policies as unnecessary
5550 if (highest_skip_session_order
!= compared_policy
->session_order
||
5551 (highest_skip_order
!= 0 && compared_policy
->order
>= highest_skip_order
)) {
5552 // If we've moved on to the next session, or passed the skip window
5553 highest_skip_session_order
= 0;
5554 highest_skip_order
= 0;
5557 // If this policy is also a skip, in can increase the skip window
5558 if (compared_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
5559 if (compared_policy
->result_parameter
.skip_policy_order
> highest_skip_order
) {
5560 highest_skip_order
= compared_policy
->result_parameter
.skip_policy_order
;
5567 if (compared_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
5568 // This policy is a skip. Set the skip window accordingly
5570 highest_skip_session_order
= compared_policy
->session_order
;
5571 highest_skip_order
= compared_policy
->result_parameter
.skip_policy_order
;
5574 // The result of the compared policy must be able to block out this policy result
5575 if (!necp_kernel_ip_output_policy_results_overlap(compared_policy
, policy
)) {
5579 // If new policy matches All Interfaces, compared policy must also
5580 if ((policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) && !(compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
)) {
5584 // If new policy matches Local Networks, compared policy must also
5585 if ((policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_NETWORKS
) && !(compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_NETWORKS
)) {
5589 // Default makes lower policies unecessary always
5590 if (compared_policy
->condition_mask
== 0) {
5594 // Compared must be more general than policy, and include only conditions within policy
5595 if ((policy
->condition_mask
& compared_policy
->condition_mask
) != compared_policy
->condition_mask
) {
5599 // Negative conditions must match for the overlapping conditions
5600 if ((policy
->condition_negated_mask
& compared_policy
->condition_mask
) != (compared_policy
->condition_negated_mask
& compared_policy
->condition_mask
)) {
5604 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
&&
5605 compared_policy
->cond_policy_id
!= policy
->cond_policy_id
) {
5609 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
&&
5610 compared_policy
->cond_bound_interface
!= policy
->cond_bound_interface
) {
5614 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
&&
5615 compared_policy
->cond_protocol
!= policy
->cond_protocol
) {
5619 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
5620 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
5621 if (!necp_is_range_in_range((struct sockaddr
*)&policy
->cond_local_start
, (struct sockaddr
*)&policy
->cond_local_end
, (struct sockaddr
*)&compared_policy
->cond_local_start
, (struct sockaddr
*)&compared_policy
->cond_local_end
)) {
5624 } else if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
5625 if (compared_policy
->cond_local_prefix
> policy
->cond_local_prefix
||
5626 !necp_is_addr_in_subnet((struct sockaddr
*)&policy
->cond_local_start
, (struct sockaddr
*)&compared_policy
->cond_local_start
, compared_policy
->cond_local_prefix
)) {
5632 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
5633 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
5634 if (!necp_is_range_in_range((struct sockaddr
*)&policy
->cond_remote_start
, (struct sockaddr
*)&policy
->cond_remote_end
, (struct sockaddr
*)&compared_policy
->cond_remote_start
, (struct sockaddr
*)&compared_policy
->cond_remote_end
)) {
5637 } else if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
5638 if (compared_policy
->cond_remote_prefix
> policy
->cond_remote_prefix
||
5639 !necp_is_addr_in_subnet((struct sockaddr
*)&policy
->cond_remote_start
, (struct sockaddr
*)&compared_policy
->cond_remote_start
, compared_policy
->cond_remote_prefix
)) {
5652 necp_kernel_ip_output_policies_reprocess(void)
5655 int bucket_allocation_counts
[NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
];
5656 int bucket_current_free_index
[NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
];
5657 struct necp_kernel_ip_output_policy
*kernel_policy
= NULL
;
5659 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5662 necp_kernel_ip_output_policies_condition_mask
= 0;
5663 necp_kernel_ip_output_policies_count
= 0;
5664 necp_kernel_ip_output_policies_non_id_count
= 0;
5666 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
5667 if (necp_kernel_ip_output_policies_map
[i
] != NULL
) {
5668 FREE(necp_kernel_ip_output_policies_map
[i
], M_NECP
);
5669 necp_kernel_ip_output_policies_map
[i
] = NULL
;
5673 bucket_allocation_counts
[i
] = 0;
5676 LIST_FOREACH(kernel_policy
, &necp_kernel_ip_output_policies
, chain
) {
5678 necp_kernel_ip_output_policies_condition_mask
|= kernel_policy
->condition_mask
;
5679 necp_kernel_ip_output_policies_count
++;
5681 /* Update bucket counts:
5682 * Non-id and SKIP policies will be added to all buckets
5683 * Add local networks policy to all buckets for incoming IP
5685 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
) ||
5686 (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_NETWORKS
) ||
5687 kernel_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
5688 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
5689 bucket_allocation_counts
[i
]++;
5692 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
)) {
5693 necp_kernel_ip_output_policies_non_id_count
++;
5695 bucket_allocation_counts
[NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(kernel_policy
->cond_policy_id
)]++;
5699 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
5700 if (bucket_allocation_counts
[i
] > 0) {
5701 // Allocate a NULL-terminated array of policy pointers for each bucket
5702 MALLOC(necp_kernel_ip_output_policies_map
[i
], struct necp_kernel_ip_output_policy
**, sizeof(struct necp_kernel_ip_output_policy
*) * (bucket_allocation_counts
[i
] + 1), M_NECP
, M_WAITOK
);
5703 if (necp_kernel_ip_output_policies_map
[i
] == NULL
) {
5707 // Initialize the first entry to NULL
5708 (necp_kernel_ip_output_policies_map
[i
])[0] = NULL
;
5710 bucket_current_free_index
[i
] = 0;
5713 LIST_FOREACH(kernel_policy
, &necp_kernel_ip_output_policies
, chain
) {
5714 // Insert pointers into map
5715 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
) ||
5716 (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_NETWORKS
) ||
5717 kernel_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
5718 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
5719 if (!necp_kernel_ip_output_policy_is_unnecessary(kernel_policy
, necp_kernel_ip_output_policies_map
[i
], bucket_current_free_index
[i
])) {
5720 (necp_kernel_ip_output_policies_map
[i
])[(bucket_current_free_index
[i
])] = kernel_policy
;
5721 bucket_current_free_index
[i
]++;
5722 (necp_kernel_ip_output_policies_map
[i
])[(bucket_current_free_index
[i
])] = NULL
;
5726 i
= NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(kernel_policy
->cond_policy_id
);
5727 if (!necp_kernel_ip_output_policy_is_unnecessary(kernel_policy
, necp_kernel_ip_output_policies_map
[i
], bucket_current_free_index
[i
])) {
5728 (necp_kernel_ip_output_policies_map
[i
])[(bucket_current_free_index
[i
])] = kernel_policy
;
5729 bucket_current_free_index
[i
]++;
5730 (necp_kernel_ip_output_policies_map
[i
])[(bucket_current_free_index
[i
])] = NULL
;
5734 necp_kernel_ip_output_policies_dump_all();
5738 // Free memory, reset mask to 0
5739 necp_kernel_ip_output_policies_condition_mask
= 0;
5740 necp_kernel_ip_output_policies_count
= 0;
5741 necp_kernel_ip_output_policies_non_id_count
= 0;
5742 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
5743 if (necp_kernel_ip_output_policies_map
[i
] != NULL
) {
5744 FREE(necp_kernel_ip_output_policies_map
[i
], M_NECP
);
5745 necp_kernel_ip_output_policies_map
[i
] = NULL
;
5751 // Outbound Policy Matching
5752 // ---------------------
5758 static struct substring
5759 necp_trim_dots_and_stars(char *string
, size_t length
)
5761 struct substring sub
;
5762 sub
.string
= string
;
5763 sub
.length
= string
? length
: 0;
5765 while (sub
.length
&& (sub
.string
[0] == '.' || sub
.string
[0] == '*')) {
5770 while (sub
.length
&& (sub
.string
[sub
.length
- 1] == '.' || sub
.string
[sub
.length
- 1] == '*')) {
5778 necp_create_trimmed_domain(char *string
, size_t length
)
5780 char *trimmed_domain
= NULL
;
5781 struct substring sub
= necp_trim_dots_and_stars(string
, length
);
5783 MALLOC(trimmed_domain
, char *, sub
.length
+ 1, M_NECP
, M_WAITOK
);
5784 if (trimmed_domain
== NULL
) {
5788 memcpy(trimmed_domain
, sub
.string
, sub
.length
);
5789 trimmed_domain
[sub
.length
] = 0;
5791 return trimmed_domain
;
5795 necp_count_dots(char *string
, size_t length
)
5800 for (i
= 0; i
< length
; i
++) {
5801 if (string
[i
] == '.') {
5810 necp_check_suffix(struct substring parent
, struct substring suffix
, bool require_dot_before_suffix
)
5812 if (parent
.length
<= suffix
.length
) {
5816 size_t length_difference
= (parent
.length
- suffix
.length
);
5818 if (require_dot_before_suffix
) {
5819 if (((char *)(parent
.string
+ length_difference
- 1))[0] != '.') {
5824 // strncasecmp does case-insensitive check for all UTF-8 strings (ignores non-ASCII characters)
5825 return strncasecmp(parent
.string
+ length_difference
, suffix
.string
, suffix
.length
) == 0;
5829 necp_hostname_matches_domain(struct substring hostname_substring
, u_int8_t hostname_dot_count
, char *domain
, u_int8_t domain_dot_count
)
5831 if (hostname_substring
.string
== NULL
|| domain
== NULL
) {
5832 return hostname_substring
.string
== domain
;
5835 struct substring domain_substring
;
5836 domain_substring
.string
= domain
;
5837 domain_substring
.length
= strlen(domain
);
5839 if (hostname_dot_count
== domain_dot_count
) {
5840 // strncasecmp does case-insensitive check for all UTF-8 strings (ignores non-ASCII characters)
5841 if (hostname_substring
.length
== domain_substring
.length
&&
5842 strncasecmp(hostname_substring
.string
, domain_substring
.string
, hostname_substring
.length
) == 0) {
5845 } else if (domain_dot_count
< hostname_dot_count
) {
5846 if (necp_check_suffix(hostname_substring
, domain_substring
, TRUE
)) {
5855 net_domain_contains_hostname(char *hostname_string
, char *domain_string
)
5857 if (hostname_string
== NULL
||
5858 domain_string
== NULL
) {
5862 struct substring hostname_substring
;
5863 hostname_substring
.string
= hostname_string
;
5864 hostname_substring
.length
= strlen(hostname_string
);
5866 return necp_hostname_matches_domain(hostname_substring
,
5867 necp_count_dots(hostname_string
, hostname_substring
.length
),
5869 necp_count_dots(domain_string
, strlen(domain_string
)));
5872 #define NECP_MAX_STRING_LEN 1024
5875 necp_copy_string(char *string
, size_t length
)
5877 char *copied_string
= NULL
;
5879 if (length
> NECP_MAX_STRING_LEN
) {
5883 MALLOC(copied_string
, char *, length
+ 1, M_NECP
, M_WAITOK
);
5884 if (copied_string
== NULL
) {
5888 memcpy(copied_string
, string
, length
);
5889 copied_string
[length
] = 0;
5891 return copied_string
;
5895 necp_get_primary_direct_interface_index(void)
5897 u_int32_t interface_index
= IFSCOPE_NONE
;
5899 ifnet_head_lock_shared();
5900 struct ifnet
*ordered_interface
= NULL
;
5901 TAILQ_FOREACH(ordered_interface
, &ifnet_ordered_head
, if_ordered_link
) {
5902 const u_int8_t functional_type
= if_functional_type(ordered_interface
, TRUE
);
5903 if (functional_type
!= IFRTYPE_FUNCTIONAL_UNKNOWN
&&
5904 functional_type
!= IFRTYPE_FUNCTIONAL_LOOPBACK
) {
5905 // All known, non-loopback functional types represent direct physical interfaces (Wi-Fi, Cellular, Wired)
5906 interface_index
= ordered_interface
->if_index
;
5912 return interface_index
;
5916 necp_get_parent_cred_result(proc_t proc
, struct necp_socket_info
*info
)
5918 task_t task
= proc_task(proc
? proc
: current_proc());
5919 coalition_t coal
= task_get_coalition(task
, COALITION_TYPE_JETSAM
);
5921 if (coal
== COALITION_NULL
|| coalition_is_leader(task
, coal
)) {
5922 // No parent, nothing to do
5926 task_t lead_task
= coalition_get_leader(coal
);
5927 if (lead_task
!= NULL
) {
5928 proc_t lead_proc
= get_bsdtask_info(lead_task
);
5929 if (lead_proc
!= NULL
) {
5930 kauth_cred_t lead_cred
= kauth_cred_proc_ref(lead_proc
);
5931 if (lead_cred
!= NULL
) {
5932 errno_t cred_result
= priv_check_cred(lead_cred
, PRIV_NET_PRIVILEGED_NECP_MATCH
, 0);
5933 kauth_cred_unref(&lead_cred
);
5934 info
->cred_result
= cred_result
;
5937 task_deallocate(lead_task
);
5941 #define NECP_KERNEL_ADDRESS_TYPE_CONDITIONS (NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_LOCAL_EMPTY | NECP_KERNEL_CONDITION_REMOTE_EMPTY | NECP_KERNEL_CONDITION_LOCAL_NETWORKS)
5943 necp_application_fillout_info_locked(uuid_t application_uuid
, uuid_t real_application_uuid
, uuid_t responsible_application_uuid
, char *account
, char *domain
, pid_t pid
, uid_t uid
, u_int16_t protocol
, u_int32_t bound_interface_index
, u_int32_t traffic_class
, union necp_sockaddr_union
*local_addr
, union necp_sockaddr_union
*remote_addr
, u_int16_t local_port
, u_int16_t remote_port
, bool has_client
, proc_t proc
, u_int32_t drop_order
, u_int32_t client_flags
, struct necp_socket_info
*info
)
5945 memset(info
, 0, sizeof(struct necp_socket_info
));
5949 info
->protocol
= protocol
;
5950 info
->bound_interface_index
= bound_interface_index
;
5951 info
->traffic_class
= traffic_class
;
5952 info
->has_client
= has_client
;
5953 info
->drop_order
= drop_order
;
5954 info
->client_flags
= client_flags
;
5956 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
&& proc
!= NULL
) {
5957 info
->cred_result
= priv_check_cred(proc_ucred(proc
), PRIV_NET_PRIVILEGED_NECP_MATCH
, 0);
5958 if (info
->cred_result
!= 0) {
5959 // Process does not have entitlement, check the parent process
5960 necp_get_parent_cred_result(proc
, info
);
5964 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_PLATFORM_BINARY
&& proc
!= NULL
) {
5965 info
->is_platform_binary
= csproc_get_platform_binary(proc
) ? true : false;
5968 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_APP_ID
&& !uuid_is_null(application_uuid
)) {
5969 struct necp_uuid_id_mapping
*existing_mapping
= necp_uuid_lookup_app_id_locked(application_uuid
);
5970 if (existing_mapping
) {
5971 info
->application_id
= existing_mapping
->id
;
5975 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
&& !uuid_is_null(real_application_uuid
)) {
5976 if (uuid_compare(application_uuid
, real_application_uuid
) == 0) {
5977 info
->real_application_id
= info
->application_id
;
5979 struct necp_uuid_id_mapping
*existing_mapping
= necp_uuid_lookup_app_id_locked(real_application_uuid
);
5980 if (existing_mapping
) {
5981 info
->real_application_id
= existing_mapping
->id
;
5986 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_APP_ID
&& !uuid_is_null(responsible_application_uuid
)) {
5987 struct necp_uuid_id_mapping
*existing_mapping
= necp_uuid_lookup_app_id_locked(responsible_application_uuid
);
5988 if (existing_mapping
!= NULL
) {
5989 info
->real_application_id
= info
->application_id
;
5990 info
->application_id
= existing_mapping
->id
;
5991 info
->used_responsible_pid
= true;
5995 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
&& account
!= NULL
) {
5996 struct necp_string_id_mapping
*existing_mapping
= necp_lookup_string_to_id_locked(&necp_account_id_list
, account
);
5997 if (existing_mapping
) {
5998 info
->account_id
= existing_mapping
->id
;
6002 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
6003 info
->domain
= domain
;
6006 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_ADDRESS_TYPE_CONDITIONS
) {
6007 if (local_addr
&& local_addr
->sa
.sa_len
> 0) {
6008 memcpy(&info
->local_addr
, local_addr
, local_addr
->sa
.sa_len
);
6009 if (local_port
!= 0) {
6010 info
->local_addr
.sin6
.sin6_port
= local_port
;
6012 } else if (local_port
!= 0) {
6013 info
->local_addr
.sin6
.sin6_len
= sizeof(struct sockaddr_in6
);
6014 info
->local_addr
.sin6
.sin6_family
= AF_INET6
;
6015 info
->local_addr
.sin6
.sin6_port
= local_port
;
6017 if (remote_addr
&& remote_addr
->sa
.sa_len
> 0) {
6018 memcpy(&info
->remote_addr
, remote_addr
, remote_addr
->sa
.sa_len
);
6019 if (remote_port
!= 0) {
6020 info
->remote_addr
.sin6
.sin6_port
= remote_port
;
6022 } else if (remote_port
!= 0) {
6023 info
->remote_addr
.sin6
.sin6_len
= sizeof(struct sockaddr_in6
);
6024 info
->remote_addr
.sin6
.sin6_family
= AF_INET6
;
6025 info
->remote_addr
.sin6
.sin6_port
= remote_port
;
6031 necp_send_application_interface_denied_event(pid_t pid
, uuid_t proc_uuid
, u_int32_t if_functional_type
)
6033 struct kev_netpolicy_ifdenied ev_ifdenied
;
6035 bzero(&ev_ifdenied
, sizeof(ev_ifdenied
));
6037 ev_ifdenied
.ev_data
.epid
= pid
;
6038 uuid_copy(ev_ifdenied
.ev_data
.euuid
, proc_uuid
);
6039 ev_ifdenied
.ev_if_functional_type
= if_functional_type
;
6041 netpolicy_post_msg(KEV_NETPOLICY_IFDENIED
, &ev_ifdenied
.ev_data
, sizeof(ev_ifdenied
));
6044 extern char *proc_name_address(void *p
);
6046 #define NECP_VERIFY_DELEGATION_ENTITLEMENT(_p, _d) \
6047 if (!has_checked_delegation_entitlement) { \
6048 has_delegation_entitlement = (priv_check_cred(proc_ucred(_p), PRIV_NET_PRIVILEGED_SOCKET_DELEGATE, 0) == 0); \
6049 has_checked_delegation_entitlement = TRUE; \
6051 if (!has_delegation_entitlement) { \
6052 NECPLOG(LOG_ERR, "%s(%d) does not hold the necessary entitlement to delegate network traffic for other processes by %s", \
6053 proc_name_address(_p), proc_pid(_p), _d); \
6058 necp_application_find_policy_match_internal(proc_t proc
,
6059 u_int8_t
*parameters
,
6060 u_int32_t parameters_size
,
6061 struct necp_aggregate_result
*returned_result
,
6064 u_int required_interface_index
,
6065 const union necp_sockaddr_union
*override_local_addr
,
6066 const union necp_sockaddr_union
*override_remote_addr
,
6067 struct necp_client_endpoint
*returned_v4_gateway
,
6068 struct necp_client_endpoint
*returned_v6_gateway
,
6069 struct rtentry
**returned_route
, bool ignore_address
,
6071 uuid_t
*returned_override_euuid
)
6076 struct necp_kernel_socket_policy
*matched_policy
= NULL
;
6077 struct necp_socket_info info
;
6078 necp_kernel_policy_filter filter_control_unit
= 0;
6079 necp_kernel_policy_result service_action
= 0;
6080 necp_kernel_policy_service service
= { 0, 0 };
6082 u_int16_t protocol
= 0;
6083 u_int32_t bound_interface_index
= required_interface_index
;
6084 u_int32_t traffic_class
= 0;
6085 u_int32_t client_flags
= 0;
6086 union necp_sockaddr_union local_addr
;
6087 union necp_sockaddr_union remote_addr
;
6088 bool no_remote_addr
= FALSE
;
6089 u_int8_t remote_family
= 0;
6090 bool no_local_addr
= FALSE
;
6091 u_int16_t local_port
= 0;
6092 u_int16_t remote_port
= 0;
6093 necp_drop_all_bypass_check_result_t drop_all_bypass
= NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
;
6095 if (override_local_addr
) {
6096 memcpy(&local_addr
, override_local_addr
, sizeof(local_addr
));
6098 memset(&local_addr
, 0, sizeof(local_addr
));
6100 if (override_remote_addr
) {
6101 memcpy(&remote_addr
, override_remote_addr
, sizeof(remote_addr
));
6103 memset(&remote_addr
, 0, sizeof(remote_addr
));
6106 // Initialize UID, PID, and UUIDs to the current process
6107 uid_t uid
= kauth_cred_getuid(proc_ucred(proc
));
6108 pid_t pid
= proc_pid(proc
);
6109 uuid_t application_uuid
;
6110 uuid_clear(application_uuid
);
6111 uuid_t real_application_uuid
;
6112 uuid_clear(real_application_uuid
);
6113 proc_getexecutableuuid(proc
, real_application_uuid
, sizeof(real_application_uuid
));
6114 uuid_copy(application_uuid
, real_application_uuid
);
6115 uuid_t responsible_application_uuid
;
6116 uuid_clear(responsible_application_uuid
);
6118 char *domain
= NULL
;
6119 char *account
= NULL
;
6121 #define NECP_MAX_REQUIRED_AGENTS 16
6122 u_int32_t num_required_agent_types
= 0;
6123 struct necp_client_parameter_netagent_type required_agent_types
[NECP_MAX_REQUIRED_AGENTS
];
6124 memset(&required_agent_types
, 0, sizeof(required_agent_types
));
6126 u_int32_t netagent_ids
[NECP_MAX_NETAGENTS
];
6127 u_int32_t netagent_use_flags
[NECP_MAX_NETAGENTS
];
6128 memset(&netagent_ids
, 0, sizeof(netagent_ids
));
6129 memset(&netagent_use_flags
, 0, sizeof(netagent_use_flags
));
6130 int netagent_cursor
;
6132 bool has_checked_delegation_entitlement
= FALSE
;
6133 bool has_delegation_entitlement
= FALSE
;
6135 #if defined(XNU_TARGET_OS_OSX)
6136 proc_t effective_proc
= proc
;
6137 bool release_eproc
= false;
6138 #endif /* defined(XNU_TARGET_OS_OSX) */
6140 if (returned_result
== NULL
) {
6144 if (returned_v4_gateway
!= NULL
) {
6145 memset(returned_v4_gateway
, 0, sizeof(struct necp_client_endpoint
));
6148 if (returned_v6_gateway
!= NULL
) {
6149 memset(returned_v6_gateway
, 0, sizeof(struct necp_client_endpoint
));
6152 if (returned_override_euuid
!= NULL
) {
6153 uuid_clear(*returned_override_euuid
);
6156 memset(returned_result
, 0, sizeof(struct necp_aggregate_result
));
6158 u_int32_t drop_order
= necp_process_drop_order(proc_ucred(proc
));
6160 necp_kernel_policy_result drop_dest_policy_result
= NECP_KERNEL_POLICY_RESULT_NONE
;
6162 lck_rw_lock_shared(&necp_kernel_policy_lock
);
6163 if (necp_kernel_application_policies_count
== 0) {
6164 if (necp_drop_all_order
> 0 || drop_order
> 0) {
6165 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
6166 lck_rw_done(&necp_kernel_policy_lock
);
6170 lck_rw_done(&necp_kernel_policy_lock
);
6172 while ((offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
)) <= parameters_size
) {
6173 u_int8_t type
= necp_buffer_get_tlv_type(parameters
, offset
);
6174 u_int32_t length
= necp_buffer_get_tlv_length(parameters
, offset
);
6176 if (length
> (parameters_size
- (offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
)))) {
6177 // If the length is larger than what can fit in the remaining parameters size, bail
6178 NECPLOG(LOG_ERR
, "Invalid TLV length (%u)", length
);
6183 u_int8_t
*value
= necp_buffer_get_tlv_value(parameters
, offset
, NULL
);
6184 if (value
!= NULL
) {
6186 case NECP_CLIENT_PARAMETER_APPLICATION
: {
6187 if (length
>= sizeof(uuid_t
)) {
6188 if (uuid_compare(application_uuid
, value
) == 0) {
6193 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc
, "euuid");
6195 uuid_copy(application_uuid
, value
);
6199 case NECP_CLIENT_PARAMETER_REAL_APPLICATION
: {
6200 if (length
>= sizeof(uuid_t
)) {
6201 if (uuid_compare(real_application_uuid
, value
) == 0) {
6206 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc
, "uuid");
6208 uuid_copy(real_application_uuid
, value
);
6212 case NECP_CLIENT_PARAMETER_PID
: {
6213 if (length
>= sizeof(pid_t
)) {
6214 if (memcmp(&pid
, value
, sizeof(pid_t
)) == 0) {
6219 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc
, "pid");
6221 memcpy(&pid
, value
, sizeof(pid_t
));
6225 case NECP_CLIENT_PARAMETER_UID
: {
6226 if (length
>= sizeof(uid_t
)) {
6227 if (memcmp(&uid
, value
, sizeof(uid_t
)) == 0) {
6232 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc
, "uid");
6234 memcpy(&uid
, value
, sizeof(uid_t
));
6238 case NECP_CLIENT_PARAMETER_DOMAIN
: {
6239 domain
= (char *)value
;
6240 domain
[length
- 1] = 0;
6243 case NECP_CLIENT_PARAMETER_ACCOUNT
: {
6244 account
= (char *)value
;
6245 account
[length
- 1] = 0;
6248 case NECP_CLIENT_PARAMETER_TRAFFIC_CLASS
: {
6249 if (length
>= sizeof(u_int32_t
)) {
6250 memcpy(&traffic_class
, value
, sizeof(u_int32_t
));
6254 case NECP_CLIENT_PARAMETER_IP_PROTOCOL
: {
6255 if (length
>= sizeof(u_int16_t
)) {
6256 memcpy(&protocol
, value
, sizeof(u_int16_t
));
6257 } else if (length
>= sizeof(u_int8_t
)) {
6258 memcpy(&protocol
, value
, sizeof(u_int8_t
));
6262 case NECP_CLIENT_PARAMETER_BOUND_INTERFACE
: {
6263 if (length
<= IFXNAMSIZ
&& length
> 0) {
6264 ifnet_t bound_interface
= NULL
;
6265 char interface_name
[IFXNAMSIZ
];
6266 memcpy(interface_name
, value
, length
);
6267 interface_name
[length
- 1] = 0; // Make sure the string is NULL terminated
6268 if (ifnet_find_by_name(interface_name
, &bound_interface
) == 0) {
6269 bound_interface_index
= bound_interface
->if_index
;
6270 ifnet_release(bound_interface
);
6275 case NECP_CLIENT_PARAMETER_LOCAL_ADDRESS
: {
6276 if (ignore_address
|| override_local_addr
) {
6280 if (length
>= sizeof(struct necp_policy_condition_addr
)) {
6281 struct necp_policy_condition_addr
*address_struct
= (struct necp_policy_condition_addr
*)(void *)value
;
6282 if (necp_address_is_valid(&address_struct
->address
.sa
)) {
6283 memcpy(&local_addr
, &address_struct
->address
, sizeof(address_struct
->address
));
6288 case NECP_CLIENT_PARAMETER_REMOTE_ADDRESS
: {
6289 if (ignore_address
|| override_remote_addr
) {
6293 if (length
>= sizeof(struct necp_policy_condition_addr
)) {
6294 struct necp_policy_condition_addr
*address_struct
= (struct necp_policy_condition_addr
*)(void *)value
;
6295 if (necp_address_is_valid(&address_struct
->address
.sa
)) {
6296 memcpy(&remote_addr
, &address_struct
->address
, sizeof(address_struct
->address
));
6301 case NECP_CLIENT_PARAMETER_LOCAL_ENDPOINT
: {
6302 if (ignore_address
|| override_local_addr
) {
6306 if (length
>= sizeof(struct necp_client_endpoint
)) {
6307 struct necp_client_endpoint
*endpoint
= (struct necp_client_endpoint
*)(void *)value
;
6308 if (endpoint
->u
.endpoint
.endpoint_family
== AF_UNSPEC
&&
6309 endpoint
->u
.endpoint
.endpoint_port
!= 0) {
6311 local_port
= endpoint
->u
.endpoint
.endpoint_port
;
6316 case NECP_CLIENT_PARAMETER_REMOTE_ENDPOINT
: {
6317 if (ignore_address
|| override_remote_addr
) {
6321 if (length
>= sizeof(struct necp_client_endpoint
)) {
6322 struct necp_client_endpoint
*endpoint
= (struct necp_client_endpoint
*)(void *)value
;
6323 if (endpoint
->u
.endpoint
.endpoint_family
== AF_UNSPEC
&&
6324 endpoint
->u
.endpoint
.endpoint_port
!= 0) {
6326 remote_port
= endpoint
->u
.endpoint
.endpoint_port
;
6331 case NECP_CLIENT_PARAMETER_FLAGS
: {
6332 if (length
>= sizeof(client_flags
)) {
6333 memcpy(&client_flags
, value
, sizeof(client_flags
));
6337 case NECP_CLIENT_PARAMETER_REQUIRE_AGENT_TYPE
:
6338 case NECP_CLIENT_PARAMETER_PREFER_AGENT_TYPE
: {
6339 if (num_required_agent_types
>= NECP_MAX_REQUIRED_AGENTS
) {
6342 if (length
>= sizeof(struct necp_client_parameter_netagent_type
)) {
6343 memcpy(&required_agent_types
[num_required_agent_types
], value
, sizeof(struct necp_client_parameter_netagent_type
));
6344 num_required_agent_types
++;
6355 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
6358 // Check for loopback exception
6359 if (necp_pass_loopback
> 0 && necp_is_loopback(&local_addr
.sa
, &remote_addr
.sa
, NULL
, NULL
, bound_interface_index
)) {
6360 returned_result
->policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
6361 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_PASS
;
6362 returned_result
->routed_interface_index
= lo_ifp
->if_index
;
6363 *flags
|= (NECP_CLIENT_RESULT_FLAG_IS_LOCAL
| NECP_CLIENT_RESULT_FLAG_IS_DIRECT
);
6367 #if defined(XNU_TARGET_OS_OSX)
6368 if (proc_pid(effective_proc
) != pid
) {
6369 proc_t found_proc
= proc_find(pid
);
6370 if (found_proc
!= PROC_NULL
) {
6371 effective_proc
= found_proc
;
6372 release_eproc
= true;
6375 if (effective_proc
->p_responsible_pid
> 0 && effective_proc
->p_responsible_pid
!= pid
) {
6376 proc_t responsible_proc
= proc_find(effective_proc
->p_responsible_pid
);
6377 if (responsible_proc
!= PROC_NULL
) {
6378 proc_getexecutableuuid(responsible_proc
, responsible_application_uuid
, sizeof(responsible_application_uuid
));
6379 proc_rele(responsible_proc
);
6382 if (release_eproc
&& effective_proc
!= PROC_NULL
) {
6383 proc_rele(effective_proc
);
6385 #endif /* defined(XNU_TARGET_OS_OSX) */
6388 lck_rw_lock_shared(&necp_kernel_policy_lock
);
6390 u_int32_t route_rule_id_array
[MAX_AGGREGATE_ROUTE_RULES
];
6391 size_t route_rule_id_array_count
= 0;
6392 necp_application_fillout_info_locked(application_uuid
, real_application_uuid
, responsible_application_uuid
, account
, domain
, pid
, uid
, protocol
, bound_interface_index
, traffic_class
, &local_addr
, &remote_addr
, local_port
, remote_port
, has_client
, proc
, drop_order
, client_flags
, &info
);
6393 matched_policy
= necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_app_layer_map
, &info
, &filter_control_unit
, route_rule_id_array
, &route_rule_id_array_count
, MAX_AGGREGATE_ROUTE_RULES
, &service_action
, &service
, netagent_ids
, netagent_use_flags
, NECP_MAX_NETAGENTS
, required_agent_types
, num_required_agent_types
, proc
, NULL
, NULL
, &drop_dest_policy_result
, &drop_all_bypass
);
6394 if (matched_policy
) {
6395 returned_result
->policy_id
= matched_policy
->id
;
6396 returned_result
->routing_result
= matched_policy
->result
;
6397 memcpy(&returned_result
->routing_result_parameter
, &matched_policy
->result_parameter
, sizeof(returned_result
->routing_result_parameter
));
6398 if (returned_override_euuid
!= NULL
&& info
.used_responsible_pid
&& !(matched_policy
->condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
)) {
6399 uuid_copy(*returned_override_euuid
, responsible_application_uuid
);
6402 bool drop_all
= false;
6403 if (necp_drop_all_order
> 0 || info
.drop_order
> 0 || drop_dest_policy_result
== NECP_KERNEL_POLICY_RESULT_DROP
) {
6404 // Mark socket as a drop if drop_all is set
6406 if (drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
) {
6407 drop_all_bypass
= necp_check_drop_all_bypass_result(proc
);
6410 if (drop_all
&& drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_FALSE
) {
6411 returned_result
->policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
6412 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
6414 returned_result
->policy_id
= 0;
6415 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_NONE
;
6418 if (filter_control_unit
== NECP_FILTER_UNIT_NO_FILTER
) {
6419 returned_result
->filter_control_unit
= 0;
6421 returned_result
->filter_control_unit
= filter_control_unit
;
6423 returned_result
->service_action
= service_action
;
6425 // Handle trigger service
6426 if (service
.identifier
!= 0) {
6427 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(service
.identifier
);
6428 if (mapping
!= NULL
) {
6429 struct necp_service_registration
*service_registration
= NULL
;
6430 uuid_copy(returned_result
->service_uuid
, mapping
->uuid
);
6431 returned_result
->service_data
= service
.data
;
6432 if (service
.identifier
== NECP_NULL_SERVICE_ID
) {
6433 // NULL service is always 'registered'
6434 returned_result
->service_flags
|= NECP_SERVICE_FLAGS_REGISTERED
;
6436 LIST_FOREACH(service_registration
, &necp_registered_service_list
, kernel_chain
) {
6437 if (service
.identifier
== service_registration
->service_id
) {
6438 returned_result
->service_flags
|= NECP_SERVICE_FLAGS_REGISTERED
;
6447 for (netagent_cursor
= 0; netagent_cursor
< NECP_MAX_NETAGENTS
; netagent_cursor
++) {
6448 struct necp_uuid_id_mapping
*mapping
= NULL
;
6449 u_int32_t netagent_id
= netagent_ids
[netagent_cursor
];
6450 if (netagent_id
== 0) {
6453 mapping
= necp_uuid_lookup_uuid_with_service_id_locked(netagent_id
);
6454 if (mapping
!= NULL
) {
6455 uuid_copy(returned_result
->netagents
[netagent_cursor
], mapping
->uuid
);
6456 returned_result
->netagent_use_flags
[netagent_cursor
] = netagent_use_flags
[netagent_cursor
];
6460 // Do routing evaluation
6461 u_int output_bound_interface
= bound_interface_index
;
6462 if (returned_result
->routing_result
== NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED
) {
6463 output_bound_interface
= returned_result
->routing_result_parameter
.scoped_interface_index
;
6464 } else if (returned_result
->routing_result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
) {
6465 output_bound_interface
= returned_result
->routing_result_parameter
.tunnel_interface_index
;
6466 } else if (returned_result
->routing_result
== NECP_KERNEL_POLICY_RESULT_SCOPED_DIRECT
) {
6467 output_bound_interface
= necp_get_primary_direct_interface_index();
6468 if (output_bound_interface
== IFSCOPE_NONE
) {
6469 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
6471 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED
;
6472 returned_result
->routing_result_parameter
.scoped_interface_index
= output_bound_interface
;
6476 if (local_addr
.sa
.sa_len
== 0 ||
6477 (local_addr
.sa
.sa_family
== AF_INET
&& local_addr
.sin
.sin_addr
.s_addr
== 0) ||
6478 (local_addr
.sa
.sa_family
== AF_INET6
&& IN6_IS_ADDR_UNSPECIFIED(&local_addr
.sin6
.sin6_addr
))) {
6479 no_local_addr
= TRUE
;
6482 if (remote_addr
.sa
.sa_len
== 0 ||
6483 (remote_addr
.sa
.sa_family
== AF_INET
&& remote_addr
.sin
.sin_addr
.s_addr
== 0) ||
6484 (remote_addr
.sa
.sa_family
== AF_INET6
&& IN6_IS_ADDR_UNSPECIFIED(&remote_addr
.sin6
.sin6_addr
))) {
6485 no_remote_addr
= TRUE
;
6486 remote_family
= remote_addr
.sa
.sa_family
;
6489 returned_result
->routed_interface_index
= 0;
6490 struct rtentry
*rt
= NULL
;
6491 if (!no_local_addr
&& (client_flags
& NECP_CLIENT_PARAMETER_FLAG_LISTENER
) != 0) {
6492 // Treat the output bound interface as the routed interface for local address
6493 // validation later.
6494 returned_result
->routed_interface_index
= output_bound_interface
;
6496 if (no_remote_addr
) {
6497 memset(&remote_addr
, 0, sizeof(remote_addr
));
6498 if (remote_family
== AF_INET6
) {
6499 // Reset address to ::
6500 remote_addr
.sa
.sa_family
= AF_INET6
;
6501 remote_addr
.sa
.sa_len
= sizeof(struct sockaddr_in6
);
6503 // Reset address to 0.0.0.0
6504 remote_addr
.sa
.sa_family
= AF_INET
;
6505 remote_addr
.sa
.sa_len
= sizeof(struct sockaddr_in
);
6509 rt
= rtalloc1_scoped((struct sockaddr
*)&remote_addr
, 0, 0,
6510 output_bound_interface
);
6512 if (remote_addr
.sa
.sa_family
== AF_INET
&& rt
!= NULL
&&
6513 IS_INTF_CLAT46(rt
->rt_ifp
)) {
6516 returned_result
->routed_interface_index
= 0;
6519 if (no_remote_addr
&& remote_family
== AF_UNSPEC
&&
6520 (rt
== NULL
|| rt
->rt_ifp
== NULL
)) {
6521 // Route lookup for default IPv4 failed, try IPv6
6523 // Cleanup old route if necessary
6529 // Reset address to ::
6530 memset(&remote_addr
, 0, sizeof(remote_addr
));
6531 remote_addr
.sa
.sa_family
= AF_INET6
;
6532 remote_addr
.sa
.sa_len
= sizeof(struct sockaddr_in6
);
6535 rt
= rtalloc1_scoped((struct sockaddr
*)&remote_addr
, 0, 0,
6536 output_bound_interface
);
6540 rt
->rt_ifp
!= NULL
) {
6541 returned_result
->routed_interface_index
= rt
->rt_ifp
->if_index
;
6543 * For local addresses, we allow the interface scope to be
6544 * either the loopback interface or the interface hosting the
6547 if (bound_interface_index
!= IFSCOPE_NONE
&&
6548 rt
->rt_ifa
!= NULL
&& rt
->rt_ifa
->ifa_ifp
&&
6549 (output_bound_interface
== lo_ifp
->if_index
||
6550 rt
->rt_ifp
->if_index
== lo_ifp
->if_index
||
6551 rt
->rt_ifa
->ifa_ifp
->if_index
== bound_interface_index
)) {
6552 struct sockaddr_storage dst
;
6553 unsigned int ifscope
= bound_interface_index
;
6556 * Transform dst into the internal routing table form
6558 (void) sa_copy((struct sockaddr
*)&remote_addr
,
6561 if ((rt
->rt_ifp
->if_index
== lo_ifp
->if_index
) ||
6562 rt_ifa_is_dst((struct sockaddr
*)&dst
, rt
->rt_ifa
)) {
6563 returned_result
->routed_interface_index
=
6564 bound_interface_index
;
6570 if (returned_result
->routed_interface_index
!= 0 &&
6571 returned_result
->routed_interface_index
!= lo_ifp
->if_index
&& // Loopback can accept any local address
6573 // Transform local_addr into the ifaddr form
6574 // IPv6 Scope IDs are always embedded in the ifaddr list
6575 struct sockaddr_storage local_address_sanitized
;
6576 u_int ifscope
= IFSCOPE_NONE
;
6577 (void)sa_copy(&local_addr
.sa
, &local_address_sanitized
, &ifscope
);
6578 SIN(&local_address_sanitized
)->sin_port
= 0;
6579 if (local_address_sanitized
.ss_family
== AF_INET6
) {
6580 SIN6(&local_address_sanitized
)->sin6_scope_id
= 0;
6583 // Validate local address on routed interface
6584 struct ifaddr
*ifa
= ifa_ifwithaddr_scoped((struct sockaddr
*)&local_address_sanitized
, returned_result
->routed_interface_index
);
6586 // Interface address not found, reject route
6587 returned_result
->routed_interface_index
= 0;
6593 ifaddr_release(ifa
);
6598 if (flags
!= NULL
) {
6599 if ((client_flags
& NECP_CLIENT_PARAMETER_FLAG_LISTENER
) == 0) {
6600 // Check for local/direct
6601 bool is_local
= FALSE
;
6602 if (rt
!= NULL
&& (rt
->rt_flags
& RTF_LOCAL
)) {
6604 } else if (returned_result
->routed_interface_index
!= 0 &&
6606 // Clean up the address before comparison with interface addresses
6608 // Transform remote_addr into the ifaddr form
6609 // IPv6 Scope IDs are always embedded in the ifaddr list
6610 struct sockaddr_storage remote_address_sanitized
;
6611 u_int ifscope
= IFSCOPE_NONE
;
6612 (void)sa_copy(&remote_addr
.sa
, &remote_address_sanitized
, &ifscope
);
6613 SIN(&remote_address_sanitized
)->sin_port
= 0;
6614 if (remote_address_sanitized
.ss_family
== AF_INET6
) {
6615 SIN6(&remote_address_sanitized
)->sin6_scope_id
= 0;
6618 // Check if remote address is an interface address
6619 struct ifaddr
*ifa
= ifa_ifwithaddr((struct sockaddr
*)&remote_address_sanitized
);
6620 if (ifa
!= NULL
&& ifa
->ifa_ifp
!= NULL
) {
6621 u_int if_index_for_remote_addr
= ifa
->ifa_ifp
->if_index
;
6622 if (if_index_for_remote_addr
== returned_result
->routed_interface_index
||
6623 if_index_for_remote_addr
== lo_ifp
->if_index
) {
6628 ifaddr_release(ifa
);
6634 *flags
|= (NECP_CLIENT_RESULT_FLAG_IS_LOCAL
| NECP_CLIENT_RESULT_FLAG_IS_DIRECT
);
6637 !(rt
->rt_flags
& RTF_GATEWAY
) &&
6638 (rt
->rt_ifa
&& rt
->rt_ifa
->ifa_ifp
&& !(rt
->rt_ifa
->ifa_ifp
->if_flags
& IFF_POINTOPOINT
))) {
6639 // Route is directly accessible
6640 *flags
|= NECP_CLIENT_RESULT_FLAG_IS_DIRECT
;
6645 rt
->rt_ifp
!= NULL
) {
6646 // Check probe status
6647 if (rt
->rt_ifp
->if_eflags
& IFEF_PROBE_CONNECTIVITY
) {
6648 *flags
|= NECP_CLIENT_RESULT_FLAG_PROBE_CONNECTIVITY
;
6651 if (rt
->rt_ifp
->if_type
== IFT_CELLULAR
) {
6652 struct if_cellular_status_v1
*ifsr
;
6654 ifnet_lock_shared(rt
->rt_ifp
);
6655 lck_rw_lock_exclusive(&rt
->rt_ifp
->if_link_status_lock
);
6657 if (rt
->rt_ifp
->if_link_status
!= NULL
) {
6658 ifsr
= &rt
->rt_ifp
->if_link_status
->ifsr_u
.ifsr_cell
.if_cell_u
.if_status_v1
;
6660 if (ifsr
->valid_bitmask
& IF_CELL_UL_MSS_RECOMMENDED_VALID
) {
6661 if (ifsr
->mss_recommended
== IF_CELL_UL_MSS_RECOMMENDED_NONE
) {
6662 returned_result
->mss_recommended
= NECP_CLIENT_RESULT_RECOMMENDED_MSS_NONE
;
6663 } else if (ifsr
->mss_recommended
== IF_CELL_UL_MSS_RECOMMENDED_MEDIUM
) {
6664 returned_result
->mss_recommended
= NECP_CLIENT_RESULT_RECOMMENDED_MSS_MEDIUM
;
6665 } else if (ifsr
->mss_recommended
== IF_CELL_UL_MSS_RECOMMENDED_LOW
) {
6666 returned_result
->mss_recommended
= NECP_CLIENT_RESULT_RECOMMENDED_MSS_LOW
;
6670 lck_rw_done(&rt
->rt_ifp
->if_link_status_lock
);
6671 ifnet_lock_done(rt
->rt_ifp
);
6674 // Check link quality
6675 if ((client_flags
& NECP_CLIENT_PARAMETER_FLAG_DISCRETIONARY
) &&
6676 (rt
->rt_ifp
->if_interface_state
.valid_bitmask
& IF_INTERFACE_STATE_LQM_STATE_VALID
) &&
6677 rt
->rt_ifp
->if_interface_state
.lqm_state
== IFNET_LQM_THRESH_ABORT
) {
6678 *flags
|= NECP_CLIENT_RESULT_FLAG_LINK_QUALITY_ABORT
;
6681 // Check QoS marking (fastlane)
6682 for (size_t route_rule_index
= 0; route_rule_index
< route_rule_id_array_count
; route_rule_index
++) {
6683 if (necp_update_qos_marking(rt
->rt_ifp
, route_rule_id_array
[route_rule_index
])) {
6684 *flags
|= NECP_CLIENT_RESULT_FLAG_ALLOW_QOS_MARKING
;
6685 // If the route can use QoS markings, stop iterating route rules
6690 if (IFNET_IS_LOW_POWER(rt
->rt_ifp
)) {
6691 *flags
|= NECP_CLIENT_RESULT_FLAG_INTERFACE_LOW_POWER
;
6694 if (traffic_class
== SO_TC_BK_SYS
) {
6695 // Block BK_SYS traffic if interface is throttled
6696 u_int32_t throttle_level
= 0;
6697 if (ifnet_get_throttle(rt
->rt_ifp
, &throttle_level
) == 0) {
6698 if (throttle_level
== IFNET_THROTTLE_OPPORTUNISTIC
) {
6699 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
6700 memset(&returned_result
->routing_result_parameter
, 0, sizeof(returned_result
->routing_result_parameter
));
6707 if (returned_result
->routed_interface_index
!= 0) {
6708 union necp_sockaddr_union default_address
;
6709 struct rtentry
*v4Route
= NULL
;
6710 struct rtentry
*v6Route
= NULL
;
6712 memset(&default_address
, 0, sizeof(default_address
));
6714 // Reset address to 0.0.0.0
6715 default_address
.sa
.sa_family
= AF_INET
;
6716 default_address
.sa
.sa_len
= sizeof(struct sockaddr_in
);
6717 v4Route
= rtalloc1_scoped((struct sockaddr
*)&default_address
, 0, 0,
6718 returned_result
->routed_interface_index
);
6720 // Reset address to ::
6721 default_address
.sa
.sa_family
= AF_INET6
;
6722 default_address
.sa
.sa_len
= sizeof(struct sockaddr_in6
);
6723 v6Route
= rtalloc1_scoped((struct sockaddr
*)&default_address
, 0, 0,
6724 returned_result
->routed_interface_index
);
6726 if (v4Route
!= NULL
) {
6727 if (v4Route
->rt_ifp
!= NULL
&& !IS_INTF_CLAT46(v4Route
->rt_ifp
)) {
6728 *flags
|= NECP_CLIENT_RESULT_FLAG_HAS_IPV4
;
6730 if (returned_v4_gateway
!= NULL
&&
6731 v4Route
->rt_gateway
!= NULL
&&
6732 v4Route
->rt_gateway
->sa_len
== sizeof(returned_v4_gateway
->u
.sin
)) {
6733 memcpy(&returned_v4_gateway
->u
.sin
, v4Route
->rt_gateway
, sizeof(returned_v4_gateway
->u
.sin
));
6734 memset(&returned_v4_gateway
->u
.sin
.sin_zero
, 0, sizeof(returned_v4_gateway
->u
.sin
.sin_zero
));
6740 if (v6Route
!= NULL
) {
6741 if (v6Route
->rt_ifp
!= NULL
) {
6742 *flags
|= NECP_CLIENT_RESULT_FLAG_HAS_IPV6
;
6744 if (ifnet_get_nat64prefix(v6Route
->rt_ifp
, NULL
) == 0) {
6745 *flags
|= NECP_CLIENT_RESULT_FLAG_HAS_NAT64
;
6748 if (returned_v6_gateway
!= NULL
&&
6749 v6Route
->rt_gateway
!= NULL
&&
6750 v6Route
->rt_gateway
->sa_len
== sizeof(returned_v6_gateway
->u
.sin6
)) {
6751 memcpy(&returned_v6_gateway
->u
.sin6
, v6Route
->rt_gateway
, sizeof(returned_v6_gateway
->u
.sin6
));
6759 for (size_t route_rule_index
= 0; route_rule_index
< route_rule_id_array_count
; route_rule_index
++) {
6760 u_int32_t interface_type_denied
= IFRTYPE_FUNCTIONAL_UNKNOWN
;
6761 bool route_is_allowed
= necp_route_is_allowed(rt
, NULL
, route_rule_id_array
[route_rule_index
], &interface_type_denied
);
6762 if (!route_is_allowed
) {
6763 // If the route is blocked, treat the lookup as a drop
6764 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
6765 memset(&returned_result
->routing_result_parameter
, 0, sizeof(returned_result
->routing_result_parameter
));
6767 if (interface_type_denied
!= IFRTYPE_FUNCTIONAL_UNKNOWN
) {
6768 if (reason
!= NULL
) {
6769 if (interface_type_denied
== IFRTYPE_FUNCTIONAL_CELLULAR
) {
6770 *reason
= NECP_CLIENT_RESULT_REASON_CELLULAR_DENIED
;
6771 } else if (interface_type_denied
== IFRTYPE_FUNCTIONAL_WIFI_INFRA
) {
6772 *reason
= NECP_CLIENT_RESULT_REASON_WIFI_DENIED
;
6775 necp_send_application_interface_denied_event(pid
, application_uuid
, interface_type_denied
);
6777 // If the route gets denied, stop matching rules
6782 if (rt
!= NULL
&& rt
->rt_ifp
!= NULL
) {
6783 const bool expensive_prohibited
= ((client_flags
& NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_EXPENSIVE
) &&
6784 IFNET_IS_EXPENSIVE(rt
->rt_ifp
));
6785 const bool constrained_prohibited
= ((client_flags
& NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_CONSTRAINED
) &&
6786 IFNET_IS_CONSTRAINED(rt
->rt_ifp
));
6787 if (reason
!= NULL
) {
6788 if (expensive_prohibited
) {
6789 *reason
= NECP_CLIENT_RESULT_REASON_EXPENSIVE_PROHIBITED
;
6790 } else if (constrained_prohibited
) {
6791 *reason
= NECP_CLIENT_RESULT_REASON_CONSTRAINED_PROHIBITED
;
6794 if (expensive_prohibited
|| constrained_prohibited
) {
6795 // If the client flags prohibited a property of the interface, treat it as a drop
6796 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
6797 memset(&returned_result
->routing_result_parameter
, 0, sizeof(returned_result
->routing_result_parameter
));
6802 if (returned_route
!= NULL
) {
6803 *returned_route
= rt
;
6810 lck_rw_done(&necp_kernel_policy_lock
);
6816 necp_is_route_local(union necp_sockaddr_union
*remote_addr
)
6818 bool no_remote_addr
= FALSE
;
6819 u_int8_t remote_family
= 0;
6820 struct rtentry
*rt
= NULL
;
6821 bool is_local
= FALSE
;
6823 if (remote_addr
== NULL
) {
6827 if (remote_addr
->sa
.sa_len
== 0 ||
6828 (remote_addr
->sa
.sa_family
== AF_INET
&& remote_addr
->sin
.sin_addr
.s_addr
== 0) ||
6829 (remote_addr
->sa
.sa_family
== AF_INET6
&& IN6_IS_ADDR_UNSPECIFIED(&remote_addr
->sin6
.sin6_addr
))) {
6830 no_remote_addr
= TRUE
;
6831 remote_family
= remote_addr
->sa
.sa_family
;
6834 if (no_remote_addr
) {
6835 memset(remote_addr
, 0, sizeof(union necp_sockaddr_union
));
6836 if (remote_family
== AF_INET6
) {
6837 // Reset address to ::
6838 remote_addr
->sa
.sa_family
= AF_INET6
;
6839 remote_addr
->sa
.sa_len
= sizeof(struct sockaddr_in6
);
6841 // Reset address to 0.0.0.0
6842 remote_addr
->sa
.sa_family
= AF_INET
;
6843 remote_addr
->sa
.sa_len
= sizeof(struct sockaddr_in
);
6847 // Lookup route regardless of the scoped interface to check if
6848 // remote address is in a local network.
6849 rt
= rtalloc1_scoped((struct sockaddr
*)remote_addr
, 0, 0, 0);
6854 if (remote_addr
->sa
.sa_family
== AF_INET
&& IS_INTF_CLAT46(rt
->rt_ifp
)) {
6857 is_local
= IS_NECP_DEST_IN_LOCAL_NETWORKS(rt
);
6867 necp_socket_check_policy(struct necp_kernel_socket_policy
*kernel_policy
, necp_app_id app_id
, necp_app_id real_app_id
, errno_t cred_result
, u_int32_t account_id
, struct substring domain
, u_int8_t domain_dot_count
, pid_t pid
, uid_t uid
, u_int32_t bound_interface_index
, u_int32_t traffic_class
, u_int16_t protocol
, union necp_sockaddr_union
*local
, union necp_sockaddr_union
*remote
, struct necp_client_parameter_netagent_type
*required_agent_types
, u_int32_t num_required_agent_types
, bool has_client
, uint32_t client_flags
, int is_platform_binary
, proc_t proc
, struct rtentry
*rt
)
6869 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
)) {
6870 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
6871 u_int32_t cond_bound_interface_index
= kernel_policy
->cond_bound_interface
? kernel_policy
->cond_bound_interface
->if_index
: 0;
6872 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
6873 if (bound_interface_index
== cond_bound_interface_index
) {
6874 // No match, matches forbidden interface
6878 if (bound_interface_index
!= cond_bound_interface_index
) {
6879 // No match, does not match required interface
6884 if (bound_interface_index
!= 0) {
6885 // No match, requires a non-bound packet
6891 if (kernel_policy
->condition_mask
== 0) {
6895 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
6896 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
6897 if (app_id
== kernel_policy
->cond_app_id
) {
6898 // No match, matches forbidden application
6902 if (app_id
!= kernel_policy
->cond_app_id
) {
6903 // No match, does not match required application
6909 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
6910 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
6911 if (real_app_id
== kernel_policy
->cond_real_app_id
) {
6912 // No match, matches forbidden application
6916 if (real_app_id
!= kernel_policy
->cond_real_app_id
) {
6917 // No match, does not match required application
6923 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_HAS_CLIENT
) {
6929 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
) {
6930 if (cred_result
!= 0) {
6931 // Process is missing entitlement
6936 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PLATFORM_BINARY
) {
6937 if (is_platform_binary
== 0) {
6938 // Process is not platform binary
6943 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
) {
6944 if (kernel_policy
->cond_custom_entitlement_matched
== necp_boolean_state_false
) {
6945 // Process is missing entitlement based on previous check
6947 } else if (kernel_policy
->cond_custom_entitlement_matched
== necp_boolean_state_unknown
) {
6948 if (kernel_policy
->cond_custom_entitlement
!= NULL
) {
6950 // No process found, cannot check entitlement
6953 task_t task
= proc_task(proc
);
6955 !IOTaskHasEntitlement(task
, kernel_policy
->cond_custom_entitlement
)) {
6956 // Process is missing custom entitlement
6957 kernel_policy
->cond_custom_entitlement_matched
= necp_boolean_state_false
;
6960 kernel_policy
->cond_custom_entitlement_matched
= necp_boolean_state_true
;
6966 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
6967 bool domain_matches
= necp_hostname_matches_domain(domain
, domain_dot_count
, kernel_policy
->cond_domain
, kernel_policy
->cond_domain_dot_count
);
6968 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
6969 if (domain_matches
) {
6970 // No match, matches forbidden domain
6974 if (!domain_matches
) {
6975 // No match, does not match required domain
6981 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
) {
6982 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
) {
6983 if (account_id
== kernel_policy
->cond_account_id
) {
6984 // No match, matches forbidden account
6988 if (account_id
!= kernel_policy
->cond_account_id
) {
6989 // No match, does not match required account
6995 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PID
) {
6996 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_PID
) {
6997 if (pid
== kernel_policy
->cond_pid
) {
6998 // No match, matches forbidden pid
7002 if (pid
!= kernel_policy
->cond_pid
) {
7003 // No match, does not match required pid
7009 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_UID
) {
7010 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_UID
) {
7011 if (uid
== kernel_policy
->cond_uid
) {
7012 // No match, matches forbidden uid
7016 if (uid
!= kernel_policy
->cond_uid
) {
7017 // No match, does not match required uid
7023 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
7024 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
7025 if (traffic_class
>= kernel_policy
->cond_traffic_class
.start_tc
&&
7026 traffic_class
<= kernel_policy
->cond_traffic_class
.end_tc
) {
7027 // No match, matches forbidden traffic class
7031 if (traffic_class
< kernel_policy
->cond_traffic_class
.start_tc
||
7032 traffic_class
> kernel_policy
->cond_traffic_class
.end_tc
) {
7033 // No match, does not match required traffic class
7039 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
7040 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
7041 if (protocol
== kernel_policy
->cond_protocol
) {
7042 // No match, matches forbidden protocol
7046 if (protocol
!= kernel_policy
->cond_protocol
) {
7047 // No match, does not match required protocol
7053 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_AGENT_TYPE
) {
7054 bool matches_agent_type
= FALSE
;
7055 for (u_int32_t i
= 0; i
< num_required_agent_types
; i
++) {
7056 struct necp_client_parameter_netagent_type
*required_agent_type
= &required_agent_types
[i
];
7057 if ((strlen(kernel_policy
->cond_agent_type
.agent_domain
) == 0 ||
7058 strncmp(required_agent_type
->netagent_domain
, kernel_policy
->cond_agent_type
.agent_domain
, NETAGENT_DOMAINSIZE
) == 0) &&
7059 (strlen(kernel_policy
->cond_agent_type
.agent_type
) == 0 ||
7060 strncmp(required_agent_type
->netagent_type
, kernel_policy
->cond_agent_type
.agent_type
, NETAGENT_TYPESIZE
) == 0)) {
7061 // Found a required agent that matches
7062 matches_agent_type
= TRUE
;
7066 if (!matches_agent_type
) {
7071 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_NETWORKS
) {
7072 bool is_local
= FALSE
;
7075 is_local
= IS_NECP_DEST_IN_LOCAL_NETWORKS(rt
);
7077 is_local
= necp_is_route_local(remote
);
7081 // Either no route to validate or no match for local networks
7086 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
7087 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
7088 bool inRange
= necp_is_addr_in_range((struct sockaddr
*)local
, (struct sockaddr
*)&kernel_policy
->cond_local_start
, (struct sockaddr
*)&kernel_policy
->cond_local_end
);
7089 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
7098 } else if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
7099 bool inSubnet
= necp_is_addr_in_subnet((struct sockaddr
*)local
, (struct sockaddr
*)&kernel_policy
->cond_local_start
, kernel_policy
->cond_local_prefix
);
7100 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
7112 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
7113 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
7114 bool inRange
= necp_is_addr_in_range((struct sockaddr
*)remote
, (struct sockaddr
*)&kernel_policy
->cond_remote_start
, (struct sockaddr
*)&kernel_policy
->cond_remote_end
);
7115 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
7124 } else if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
7125 bool inSubnet
= necp_is_addr_in_subnet((struct sockaddr
*)remote
, (struct sockaddr
*)&kernel_policy
->cond_remote_start
, kernel_policy
->cond_remote_prefix
);
7126 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
7138 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_CLIENT_FLAGS
) {
7139 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_CLIENT_FLAGS
) {
7140 if ((client_flags
& kernel_policy
->cond_client_flags
) == kernel_policy
->cond_client_flags
) {
7141 // Flags do match, and condition is negative, fail.
7145 if ((client_flags
& kernel_policy
->cond_client_flags
) != kernel_policy
->cond_client_flags
) {
7146 // Flags do not match, fail.
7152 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_EMPTY
) {
7153 bool isEmpty
= necp_addr_is_empty((struct sockaddr
*)local
);
7154 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_LOCAL_EMPTY
) {
7165 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_EMPTY
) {
7166 bool isEmpty
= necp_addr_is_empty((struct sockaddr
*)remote
);
7167 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REMOTE_EMPTY
) {
7181 static inline u_int32_t
7182 necp_socket_calc_flowhash_locked(struct necp_socket_info
*info
)
7184 return net_flowhash(info
, sizeof(*info
), necp_kernel_socket_policies_gencount
);
7188 necp_socket_fillout_info_locked(struct inpcb
*inp
, struct sockaddr
*override_local_addr
, struct sockaddr
*override_remote_addr
, u_int32_t override_bound_interface
, u_int32_t drop_order
, struct necp_socket_info
*info
)
7190 struct socket
*so
= NULL
;
7192 memset(info
, 0, sizeof(struct necp_socket_info
));
7194 so
= inp
->inp_socket
;
7196 info
->drop_order
= drop_order
;
7198 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_PID
) {
7199 info
->pid
= ((so
->so_flags
& SOF_DELEGATED
) ? so
->e_pid
: so
->last_pid
);
7202 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_UID
) {
7203 info
->uid
= kauth_cred_getuid(so
->so_cred
);
7206 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
7207 info
->traffic_class
= so
->so_traffic_class
;
7210 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_HAS_CLIENT
) {
7211 info
->has_client
= !uuid_is_null(inp
->necp_client_uuid
);
7214 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_CLIENT_FLAGS
) {
7215 info
->client_flags
= 0;
7216 if (INP_NO_CONSTRAINED(inp
)) {
7217 info
->client_flags
|= NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_CONSTRAINED
;
7219 if (INP_NO_EXPENSIVE(inp
)) {
7220 info
->client_flags
|= NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_EXPENSIVE
;
7222 if (inp
->inp_socket
->so_flags1
& SOF1_CELLFALLBACK
) {
7223 info
->client_flags
|= NECP_CLIENT_PARAMETER_FLAG_FALLBACK_TRAFFIC
;
7225 if (inp
->inp_socket
->so_flags1
& SOF1_INBOUND
) {
7226 info
->client_flags
|= NECP_CLIENT_PARAMETER_FLAG_INBOUND
;
7228 if (inp
->inp_socket
->so_options
& SO_ACCEPTCONN
||
7229 inp
->inp_flags2
& INP2_EXTERNAL_PORT
) {
7230 info
->client_flags
|= NECP_CLIENT_PARAMETER_FLAG_LISTENER
;
7234 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
7235 if (inp
->inp_ip_p
) {
7236 info
->protocol
= inp
->inp_ip_p
;
7238 info
->protocol
= SOCK_PROTO(so
);
7242 if (inp
->inp_flags2
& INP2_WANT_APP_POLICY
&& necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
7243 u_int32_t responsible_application_id
= 0;
7245 struct necp_uuid_id_mapping
*existing_mapping
= necp_uuid_lookup_app_id_locked(((so
->so_flags
& SOF_DELEGATED
) ? so
->e_uuid
: so
->last_uuid
));
7246 if (existing_mapping
) {
7247 info
->application_id
= existing_mapping
->id
;
7250 #if defined(XNU_TARGET_OS_OSX)
7251 if (so
->so_rpid
> 0) {
7252 existing_mapping
= necp_uuid_lookup_app_id_locked(so
->so_ruuid
);
7253 if (existing_mapping
!= NULL
) {
7254 responsible_application_id
= existing_mapping
->id
;
7259 if (responsible_application_id
> 0) {
7260 info
->real_application_id
= info
->application_id
;
7261 info
->application_id
= responsible_application_id
;
7262 info
->used_responsible_pid
= true;
7263 } else if (!(so
->so_flags
& SOF_DELEGATED
)) {
7264 info
->real_application_id
= info
->application_id
;
7265 } else if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
7266 struct necp_uuid_id_mapping
*real_existing_mapping
= necp_uuid_lookup_app_id_locked(so
->last_uuid
);
7267 if (real_existing_mapping
) {
7268 info
->real_application_id
= real_existing_mapping
->id
;
7272 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
) {
7273 info
->cred_result
= priv_check_cred(so
->so_cred
, PRIV_NET_PRIVILEGED_NECP_MATCH
, 0);
7274 if (info
->cred_result
!= 0) {
7275 // Process does not have entitlement, check the parent process
7276 necp_get_parent_cred_result(NULL
, info
);
7280 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_PLATFORM_BINARY
) {
7281 info
->is_platform_binary
= csproc_get_platform_binary(current_proc()) ? true : false;
7285 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
&& inp
->inp_necp_attributes
.inp_account
!= NULL
) {
7286 struct necp_string_id_mapping
*existing_mapping
= necp_lookup_string_to_id_locked(&necp_account_id_list
, inp
->inp_necp_attributes
.inp_account
);
7287 if (existing_mapping
) {
7288 info
->account_id
= existing_mapping
->id
;
7292 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
7293 info
->domain
= inp
->inp_necp_attributes
.inp_domain
;
7296 if (override_bound_interface
) {
7297 info
->bound_interface_index
= override_bound_interface
;
7299 if ((inp
->inp_flags
& INP_BOUND_IF
) && inp
->inp_boundifp
) {
7300 info
->bound_interface_index
= inp
->inp_boundifp
->if_index
;
7304 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_ADDRESS_TYPE_CONDITIONS
) {
7305 if (override_local_addr
!= NULL
) {
7306 if (override_local_addr
->sa_family
== AF_INET6
&& override_local_addr
->sa_len
<= sizeof(struct sockaddr_in6
)) {
7307 memcpy(&info
->local_addr
, override_local_addr
, override_local_addr
->sa_len
);
7308 if (IN6_IS_ADDR_V4MAPPED(&(info
->local_addr
.sin6
.sin6_addr
))) {
7309 struct sockaddr_in sin
;
7310 in6_sin6_2_sin(&sin
, &(info
->local_addr
.sin6
));
7311 memset(&info
->local_addr
, 0, sizeof(union necp_sockaddr_union
));
7312 memcpy(&info
->local_addr
, &sin
, sin
.sin_len
);
7314 } else if (override_local_addr
->sa_family
== AF_INET
&& override_local_addr
->sa_len
<= sizeof(struct sockaddr_in
)) {
7315 memcpy(&info
->local_addr
, override_local_addr
, override_local_addr
->sa_len
);
7318 if (inp
->inp_vflag
& INP_IPV4
) {
7319 ((struct sockaddr_in
*)&info
->local_addr
)->sin_family
= AF_INET
;
7320 ((struct sockaddr_in
*)&info
->local_addr
)->sin_len
= sizeof(struct sockaddr_in
);
7321 ((struct sockaddr_in
*)&info
->local_addr
)->sin_port
= inp
->inp_lport
;
7322 memcpy(&((struct sockaddr_in
*)&info
->local_addr
)->sin_addr
, &inp
->inp_laddr
, sizeof(struct in_addr
));
7323 } else if (inp
->inp_vflag
& INP_IPV6
) {
7324 ((struct sockaddr_in6
*)&info
->local_addr
)->sin6_family
= AF_INET6
;
7325 ((struct sockaddr_in6
*)&info
->local_addr
)->sin6_len
= sizeof(struct sockaddr_in6
);
7326 ((struct sockaddr_in6
*)&info
->local_addr
)->sin6_port
= inp
->inp_lport
;
7327 memcpy(&((struct sockaddr_in6
*)&info
->local_addr
)->sin6_addr
, &inp
->in6p_laddr
, sizeof(struct in6_addr
));
7331 if (override_remote_addr
!= NULL
) {
7332 if (override_remote_addr
->sa_family
== AF_INET6
&& override_remote_addr
->sa_len
<= sizeof(struct sockaddr_in6
)) {
7333 memcpy(&info
->remote_addr
, override_remote_addr
, override_remote_addr
->sa_len
);
7334 if (IN6_IS_ADDR_V4MAPPED(&(info
->remote_addr
.sin6
.sin6_addr
))) {
7335 struct sockaddr_in sin
;
7336 in6_sin6_2_sin(&sin
, &(info
->remote_addr
.sin6
));
7337 memset(&info
->remote_addr
, 0, sizeof(union necp_sockaddr_union
));
7338 memcpy(&info
->remote_addr
, &sin
, sin
.sin_len
);
7340 } else if (override_remote_addr
->sa_family
== AF_INET
&& override_remote_addr
->sa_len
<= sizeof(struct sockaddr_in
)) {
7341 memcpy(&info
->remote_addr
, override_remote_addr
, override_remote_addr
->sa_len
);
7344 if (inp
->inp_vflag
& INP_IPV4
) {
7345 ((struct sockaddr_in
*)&info
->remote_addr
)->sin_family
= AF_INET
;
7346 ((struct sockaddr_in
*)&info
->remote_addr
)->sin_len
= sizeof(struct sockaddr_in
);
7347 ((struct sockaddr_in
*)&info
->remote_addr
)->sin_port
= inp
->inp_fport
;
7348 memcpy(&((struct sockaddr_in
*)&info
->remote_addr
)->sin_addr
, &inp
->inp_faddr
, sizeof(struct in_addr
));
7349 } else if (inp
->inp_vflag
& INP_IPV6
) {
7350 ((struct sockaddr_in6
*)&info
->remote_addr
)->sin6_family
= AF_INET6
;
7351 ((struct sockaddr_in6
*)&info
->remote_addr
)->sin6_len
= sizeof(struct sockaddr_in6
);
7352 ((struct sockaddr_in6
*)&info
->remote_addr
)->sin6_port
= inp
->inp_fport
;
7353 memcpy(&((struct sockaddr_in6
*)&info
->remote_addr
)->sin6_addr
, &inp
->in6p_faddr
, sizeof(struct in6_addr
));
7359 static inline struct necp_kernel_socket_policy
*
7360 necp_socket_find_policy_match_with_info_locked(struct necp_kernel_socket_policy
**policy_search_array
, struct necp_socket_info
*info
,
7361 necp_kernel_policy_filter
*return_filter
,
7362 u_int32_t
*return_route_rule_id_array
, size_t *return_route_rule_id_array_count
, size_t route_rule_id_array_count
,
7363 necp_kernel_policy_result
*return_service_action
, necp_kernel_policy_service
*return_service
,
7364 u_int32_t
*return_netagent_array
, u_int32_t
*return_netagent_use_flags_array
, size_t netagent_array_count
,
7365 struct necp_client_parameter_netagent_type
*required_agent_types
,
7366 u_int32_t num_required_agent_types
, proc_t proc
, necp_kernel_policy_id
*skip_policy_id
, struct rtentry
*rt
,
7367 necp_kernel_policy_result
*return_drop_dest_policy_result
, necp_drop_all_bypass_check_result_t
*return_drop_all_bypass
)
7369 struct necp_kernel_socket_policy
*matched_policy
= NULL
;
7370 u_int32_t skip_order
= 0;
7371 u_int32_t skip_session_order
= 0;
7372 size_t route_rule_id_count
= 0;
7374 size_t netagent_cursor
= 0;
7375 necp_drop_all_bypass_check_result_t drop_all_bypass
= NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
;
7376 if (return_drop_all_bypass
!= NULL
) {
7377 *return_drop_all_bypass
= drop_all_bypass
;
7380 // Pre-process domain for quick matching
7381 struct substring domain_substring
= necp_trim_dots_and_stars(info
->domain
, info
->domain
? strlen(info
->domain
) : 0);
7382 u_int8_t domain_dot_count
= necp_count_dots(domain_substring
.string
, domain_substring
.length
);
7384 if (return_filter
!= NULL
) {
7388 if (return_route_rule_id_array_count
!= NULL
) {
7389 *return_route_rule_id_array_count
= 0;
7392 if (return_service_action
!= NULL
) {
7393 *return_service_action
= 0;
7396 if (return_service
!= NULL
) {
7397 return_service
->identifier
= 0;
7398 return_service
->data
= 0;
7401 // Do not subject layer-2 filter to NECP policies, return a PASS policy
7402 if (necp_pass_interpose
> 0 && info
->client_flags
& NECP_CLIENT_PARAMETER_FLAG_INTERPOSE
) {
7403 return &pass_policy
;
7406 *return_drop_dest_policy_result
= NECP_KERNEL_POLICY_RESULT_NONE
;
7408 if (policy_search_array
!= NULL
) {
7409 for (i
= 0; policy_search_array
[i
] != NULL
; i
++) {
7410 if (necp_drop_all_order
!= 0 && policy_search_array
[i
]->session_order
>= necp_drop_all_order
) {
7411 // We've hit a drop all rule
7412 if (drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
) {
7413 drop_all_bypass
= necp_check_drop_all_bypass_result(proc
);
7414 if (return_drop_all_bypass
!= NULL
) {
7415 *return_drop_all_bypass
= drop_all_bypass
;
7418 if (drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_FALSE
) {
7422 if (necp_drop_dest_policy
.entry_count
!= 0 &&
7423 necp_address_matches_drop_dest_policy(&info
->remote_addr
, policy_search_array
[i
]->session_order
)) {
7424 // We've hit a drop by destination address rule
7425 *return_drop_dest_policy_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7428 if (info
->drop_order
!= 0 && policy_search_array
[i
]->session_order
>= info
->drop_order
) {
7429 // We've hit a drop order for this socket
7432 if (skip_session_order
&& policy_search_array
[i
]->session_order
>= skip_session_order
) {
7435 skip_session_order
= 0;
7438 if (policy_search_array
[i
]->order
< skip_order
) {
7444 skip_session_order
= 0;
7446 } else if (skip_session_order
) {
7451 if (necp_socket_check_policy(policy_search_array
[i
], info
->application_id
, info
->real_application_id
, info
->cred_result
, info
->account_id
, domain_substring
, domain_dot_count
, info
->pid
, info
->uid
, info
->bound_interface_index
, info
->traffic_class
, info
->protocol
, &info
->local_addr
, &info
->remote_addr
, required_agent_types
, num_required_agent_types
, info
->has_client
, info
->client_flags
, info
->is_platform_binary
, proc
, rt
)) {
7452 if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER
) {
7453 if (return_filter
&& *return_filter
!= NECP_FILTER_UNIT_NO_FILTER
) {
7454 necp_kernel_policy_filter control_unit
= policy_search_array
[i
]->result_parameter
.filter_control_unit
;
7455 if (control_unit
== NECP_FILTER_UNIT_NO_FILTER
) {
7456 *return_filter
= control_unit
;
7458 *return_filter
|= control_unit
;
7460 if (necp_debug
> 1) {
7461 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Filter %d", info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
, policy_search_array
[i
]->result_parameter
.filter_control_unit
);
7465 } else if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_ROUTE_RULES
) {
7466 if (return_route_rule_id_array
&& route_rule_id_count
< route_rule_id_array_count
) {
7467 return_route_rule_id_array
[route_rule_id_count
++] = policy_search_array
[i
]->result_parameter
.route_rule_id
;
7468 if (necp_debug
> 1) {
7469 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Route Rule %d", info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
, policy_search_array
[i
]->result_parameter
.route_rule_id
);
7473 } else if (necp_kernel_socket_result_is_trigger_service_type(policy_search_array
[i
])) {
7474 if (return_service_action
&& *return_service_action
== 0) {
7475 *return_service_action
= policy_search_array
[i
]->result
;
7476 if (necp_debug
> 1) {
7477 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Service Action %d", info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
, policy_search_array
[i
]->result
);
7480 if (return_service
&& return_service
->identifier
== 0) {
7481 return_service
->identifier
= policy_search_array
[i
]->result_parameter
.service
.identifier
;
7482 return_service
->data
= policy_search_array
[i
]->result_parameter
.service
.data
;
7483 if (necp_debug
> 1) {
7484 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Service ID %d Data %d", info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
, policy_search_array
[i
]->result_parameter
.service
.identifier
, policy_search_array
[i
]->result_parameter
.service
.data
);
7488 } else if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_USE_NETAGENT
||
7489 policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_NETAGENT_SCOPED
) {
7490 if (return_netagent_array
!= NULL
&&
7491 netagent_cursor
< netagent_array_count
) {
7492 return_netagent_array
[netagent_cursor
] = policy_search_array
[i
]->result_parameter
.netagent_id
;
7493 if (return_netagent_use_flags_array
!= NULL
&&
7494 policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_NETAGENT_SCOPED
) {
7495 return_netagent_use_flags_array
[netagent_cursor
] |= NECP_AGENT_USE_FLAG_SCOPE
;
7498 if (necp_debug
> 1) {
7499 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) %s Netagent %d",
7500 info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
,
7501 policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_USE_NETAGENT
? "Use" : "Scope",
7502 policy_search_array
[i
]->result_parameter
.netagent_id
);
7508 // Matched policy is a skip. Do skip and continue.
7509 if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
7510 skip_order
= policy_search_array
[i
]->result_parameter
.skip_policy_order
;
7511 skip_session_order
= policy_search_array
[i
]->session_order
+ 1;
7512 if (skip_policy_id
&& *skip_policy_id
== NECP_KERNEL_POLICY_ID_NONE
) {
7513 *skip_policy_id
= policy_search_array
[i
]->id
;
7518 // Matched an allow unentitled, which clears any drop order
7519 if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_ALLOW_UNENTITLED
) {
7520 info
->drop_order
= 0;
7524 // Passed all tests, found a match
7525 matched_policy
= policy_search_array
[i
];
7531 if (return_route_rule_id_array_count
!= NULL
) {
7532 *return_route_rule_id_array_count
= route_rule_id_count
;
7534 return matched_policy
;
7538 necp_socket_uses_interface(struct inpcb
*inp
, u_int32_t interface_index
)
7540 bool found_match
= FALSE
;
7542 ifaddr_t
*addresses
= NULL
;
7543 union necp_sockaddr_union address_storage
;
7545 int family
= AF_INET
;
7546 ifnet_t interface
= ifindex2ifnet
[interface_index
];
7548 if (inp
== NULL
|| interface
== NULL
) {
7552 if (inp
->inp_vflag
& INP_IPV4
) {
7554 } else if (inp
->inp_vflag
& INP_IPV6
) {
7558 result
= ifnet_get_address_list_family(interface
, &addresses
, family
);
7560 NECPLOG(LOG_ERR
, "Failed to get address list for %s%d", ifnet_name(interface
), ifnet_unit(interface
));
7564 for (i
= 0; addresses
[i
] != NULL
; i
++) {
7565 if (ifaddr_address(addresses
[i
], &address_storage
.sa
, sizeof(address_storage
)) == 0) {
7566 if (family
== AF_INET
) {
7567 if (memcmp(&address_storage
.sin
.sin_addr
, &inp
->inp_laddr
, sizeof(inp
->inp_laddr
)) == 0) {
7571 } else if (family
== AF_INET6
) {
7572 if (memcmp(&address_storage
.sin6
.sin6_addr
, &inp
->in6p_laddr
, sizeof(inp
->in6p_laddr
)) == 0) {
7581 ifnet_free_address_list(addresses
);
7587 necp_socket_is_connected(struct inpcb
*inp
)
7589 return inp
->inp_socket
->so_state
& (SS_ISCONNECTING
| SS_ISCONNECTED
| SS_ISDISCONNECTING
);
7593 necp_socket_bypass(struct sockaddr
*override_local_addr
, struct sockaddr
*override_remote_addr
, struct inpcb
*inp
)
7595 if (necp_pass_loopback
> 0 && necp_is_loopback(override_local_addr
, override_remote_addr
, inp
, NULL
, IFSCOPE_NONE
)) {
7597 } else if (necp_is_intcoproc(inp
, NULL
)) {
7604 necp_kernel_policy_id
7605 necp_socket_find_policy_match(struct inpcb
*inp
, struct sockaddr
*override_local_addr
, struct sockaddr
*override_remote_addr
, u_int32_t override_bound_interface
)
7607 struct socket
*so
= NULL
;
7608 necp_kernel_policy_filter filter_control_unit
= 0;
7609 struct necp_kernel_socket_policy
*matched_policy
= NULL
;
7610 necp_kernel_policy_id matched_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
7611 necp_kernel_policy_result service_action
= 0;
7612 necp_kernel_policy_service service
= { 0, 0 };
7613 u_int32_t drop_dest_policy_result
= NECP_KERNEL_POLICY_RESULT_NONE
;
7614 necp_drop_all_bypass_check_result_t drop_all_bypass
= NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
;
7616 u_int32_t netagent_ids
[NECP_MAX_NETAGENTS
];
7617 memset(&netagent_ids
, 0, sizeof(netagent_ids
));
7618 int netagent_cursor
;
7620 struct necp_socket_info info
;
7623 return NECP_KERNEL_POLICY_ID_NONE
;
7626 // Ignore invalid addresses
7627 if (override_local_addr
!= NULL
&&
7628 !necp_address_is_valid(override_local_addr
)) {
7629 override_local_addr
= NULL
;
7631 if (override_remote_addr
!= NULL
&&
7632 !necp_address_is_valid(override_remote_addr
)) {
7633 override_remote_addr
= NULL
;
7636 so
= inp
->inp_socket
;
7638 u_int32_t drop_order
= necp_process_drop_order(so
->so_cred
);
7640 // Don't lock. Possible race condition, but we don't want the performance hit.
7641 if (necp_kernel_socket_policies_count
== 0 ||
7642 (!(inp
->inp_flags2
& INP2_WANT_APP_POLICY
) && necp_kernel_socket_policies_non_app_count
== 0)) {
7643 if (necp_drop_all_order
> 0 || drop_order
> 0) {
7644 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7645 inp
->inp_policyresult
.skip_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7646 inp
->inp_policyresult
.policy_gencount
= 0;
7647 inp
->inp_policyresult
.app_id
= 0;
7648 inp
->inp_policyresult
.flowhash
= 0;
7649 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
7650 inp
->inp_policyresult
.results
.route_rule_id
= 0;
7651 if (necp_socket_bypass(override_local_addr
, override_remote_addr
, inp
)) {
7652 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_PASS
;
7654 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7657 return NECP_KERNEL_POLICY_ID_NONE
;
7660 // Check for loopback exception
7661 if (necp_socket_bypass(override_local_addr
, override_remote_addr
, inp
)) {
7662 if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED
) {
7663 // If the previous policy result was "socket scoped", un-scope the socket.
7664 inp
->inp_flags
&= ~INP_BOUND_IF
;
7665 inp
->inp_boundifp
= NULL
;
7667 // Mark socket as a pass
7668 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7669 inp
->inp_policyresult
.skip_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7670 inp
->inp_policyresult
.policy_gencount
= 0;
7671 inp
->inp_policyresult
.app_id
= 0;
7672 inp
->inp_policyresult
.flowhash
= 0;
7673 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
7674 inp
->inp_policyresult
.results
.route_rule_id
= 0;
7675 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_PASS
;
7676 return NECP_KERNEL_POLICY_ID_NONE
;
7680 lck_rw_lock_shared(&necp_kernel_policy_lock
);
7682 necp_socket_fillout_info_locked(inp
, override_local_addr
, override_remote_addr
, override_bound_interface
, drop_order
, &info
);
7685 u_int32_t flowhash
= necp_socket_calc_flowhash_locked(&info
);
7686 if (inp
->inp_policyresult
.policy_id
!= NECP_KERNEL_POLICY_ID_NONE
&&
7687 inp
->inp_policyresult
.policy_gencount
== necp_kernel_socket_policies_gencount
&&
7688 inp
->inp_policyresult
.flowhash
== flowhash
) {
7689 // If already matched this socket on this generation of table, skip
7692 lck_rw_done(&necp_kernel_policy_lock
);
7694 return inp
->inp_policyresult
.policy_id
;
7697 inp
->inp_policyresult
.app_id
= info
.application_id
;
7699 // Match socket to policy
7700 necp_kernel_policy_id skip_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
7701 u_int32_t route_rule_id_array
[MAX_AGGREGATE_ROUTE_RULES
];
7702 size_t route_rule_id_array_count
= 0;
7703 matched_policy
= necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_map
[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(info
.application_id
)], &info
, &filter_control_unit
, route_rule_id_array
, &route_rule_id_array_count
, MAX_AGGREGATE_ROUTE_RULES
, &service_action
, &service
, netagent_ids
, NULL
, NECP_MAX_NETAGENTS
, NULL
, 0, current_proc(), &skip_policy_id
, inp
->inp_route
.ro_rt
, &drop_dest_policy_result
, &drop_all_bypass
);
7705 // If the socket matched a scoped service policy, mark as Drop if not registered.
7706 // This covers the cases in which a service is required (on demand) but hasn't started yet.
7707 if ((service_action
== NECP_KERNEL_POLICY_RESULT_TRIGGER_SCOPED
||
7708 service_action
== NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED
) &&
7709 service
.identifier
!= 0 &&
7710 service
.identifier
!= NECP_NULL_SERVICE_ID
) {
7711 bool service_is_registered
= FALSE
;
7712 struct necp_service_registration
*service_registration
= NULL
;
7713 LIST_FOREACH(service_registration
, &necp_registered_service_list
, kernel_chain
) {
7714 if (service
.identifier
== service_registration
->service_id
) {
7715 service_is_registered
= TRUE
;
7719 if (!service_is_registered
) {
7720 // Mark socket as a drop if service is not registered
7721 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7722 inp
->inp_policyresult
.skip_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7723 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
7724 inp
->inp_policyresult
.flowhash
= flowhash
;
7725 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
7726 inp
->inp_policyresult
.results
.route_rule_id
= 0;
7727 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7729 if (necp_debug
> 1) {
7730 NECPLOG(LOG_DEBUG
, "Socket Policy: (BoundInterface %d Proto %d) Dropping packet because service is not registered", info
.bound_interface_index
, info
.protocol
);
7734 lck_rw_done(&necp_kernel_policy_lock
);
7735 return NECP_KERNEL_POLICY_ID_NONE
;
7739 for (netagent_cursor
= 0; netagent_cursor
< NECP_MAX_NETAGENTS
; netagent_cursor
++) {
7740 struct necp_uuid_id_mapping
*mapping
= NULL
;
7741 u_int32_t netagent_id
= netagent_ids
[netagent_cursor
];
7742 if (netagent_id
== 0) {
7745 mapping
= necp_uuid_lookup_uuid_with_service_id_locked(netagent_id
);
7746 if (mapping
!= NULL
) {
7747 u_int32_t agent_flags
= 0;
7748 agent_flags
= netagent_get_flags(mapping
->uuid
);
7749 if (agent_flags
& NETAGENT_FLAG_REGISTERED
) {
7750 if (agent_flags
& NETAGENT_FLAG_ACTIVE
) {
7752 } else if ((agent_flags
& NETAGENT_FLAG_VOLUNTARY
) == 0) {
7753 if (agent_flags
& NETAGENT_FLAG_KERNEL_ACTIVATED
) {
7754 int trigger_error
= 0;
7755 trigger_error
= netagent_kernel_trigger(mapping
->uuid
);
7756 if (necp_debug
> 1) {
7757 NECPLOG(LOG_DEBUG
, "Socket Policy: Triggering inactive agent, error %d", trigger_error
);
7761 // Mark socket as a drop if required agent is not active
7762 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7763 inp
->inp_policyresult
.skip_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7764 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
7765 inp
->inp_policyresult
.flowhash
= flowhash
;
7766 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
7767 inp
->inp_policyresult
.results
.route_rule_id
= 0;
7768 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7770 if (necp_debug
> 1) {
7771 NECPLOG(LOG_DEBUG
, "Socket Policy: (BoundInterface %d Proto %d) Dropping packet because agent is not active", info
.bound_interface_index
, info
.protocol
);
7775 lck_rw_done(&necp_kernel_policy_lock
);
7776 return NECP_KERNEL_POLICY_ID_NONE
;
7782 u_int32_t route_rule_id
= 0;
7783 if (route_rule_id_array_count
== 1) {
7784 route_rule_id
= route_rule_id_array
[0];
7785 } else if (route_rule_id_array_count
> 1) {
7786 route_rule_id
= necp_create_aggregate_route_rule(route_rule_id_array
);
7789 bool reset_tcp_mss
= false;
7790 if (matched_policy
) {
7791 matched_policy_id
= matched_policy
->id
;
7792 inp
->inp_policyresult
.policy_id
= matched_policy
->id
;
7793 inp
->inp_policyresult
.skip_policy_id
= skip_policy_id
;
7794 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
7795 inp
->inp_policyresult
.flowhash
= flowhash
;
7796 inp
->inp_policyresult
.results
.filter_control_unit
= filter_control_unit
;
7797 inp
->inp_policyresult
.results
.route_rule_id
= route_rule_id
;
7798 inp
->inp_policyresult
.results
.result
= matched_policy
->result
;
7799 memcpy(&inp
->inp_policyresult
.results
.result_parameter
, &matched_policy
->result_parameter
, sizeof(matched_policy
->result_parameter
));
7801 if (info
.used_responsible_pid
&& (matched_policy
->condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
)) {
7802 inp
->inp_policyresult
.app_id
= info
.real_application_id
;
7805 if (necp_socket_is_connected(inp
) &&
7806 (matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_DROP
||
7807 (matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&& !necp_socket_uses_interface(inp
, matched_policy
->result_parameter
.tunnel_interface_index
)))) {
7809 NECPLOG(LOG_DEBUG
, "Marking socket in state %d as defunct", so
->so_state
);
7811 sosetdefunct(current_proc(), so
, SHUTDOWN_SOCKET_LEVEL_NECP
| SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL
, TRUE
);
7812 } else if (necp_socket_is_connected(inp
) &&
7813 matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&&
7814 info
.protocol
== IPPROTO_TCP
) {
7815 // Reset MSS on TCP socket if tunnel policy changes
7816 reset_tcp_mss
= true;
7819 if (necp_debug
> 1) {
7820 NECPLOG(LOG_DEBUG
, "Socket Policy: %p (BoundInterface %d Proto %d) Policy %d Result %d Parameter %d", inp
->inp_socket
, info
.bound_interface_index
, info
.protocol
, matched_policy
->id
, matched_policy
->result
, matched_policy
->result_parameter
.tunnel_interface_index
);
7823 bool drop_all
= false;
7824 if (necp_drop_all_order
> 0 || info
.drop_order
> 0 || drop_dest_policy_result
== NECP_KERNEL_POLICY_RESULT_DROP
) {
7825 // Mark socket as a drop if set
7827 if (drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
) {
7828 drop_all_bypass
= necp_check_drop_all_bypass_result(NULL
);
7831 if (drop_all
&& drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_FALSE
) {
7832 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7833 inp
->inp_policyresult
.skip_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7834 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
7835 inp
->inp_policyresult
.flowhash
= flowhash
;
7836 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
7837 inp
->inp_policyresult
.results
.route_rule_id
= 0;
7838 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7840 // Mark non-matching socket so we don't re-check it
7841 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7842 inp
->inp_policyresult
.skip_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7843 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
7844 inp
->inp_policyresult
.flowhash
= flowhash
;
7845 inp
->inp_policyresult
.results
.filter_control_unit
= filter_control_unit
; // We may have matched a filter, so mark it!
7846 inp
->inp_policyresult
.results
.route_rule_id
= route_rule_id
; // We may have matched a route rule, so mark it!
7847 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_NONE
;
7852 lck_rw_done(&necp_kernel_policy_lock
);
7854 if (reset_tcp_mss
) {
7855 // Update MSS when not holding the policy lock to avoid recursive locking
7856 tcp_mtudisc(inp
, 0);
7859 return matched_policy_id
;
7863 necp_ip_output_check_policy(struct necp_kernel_ip_output_policy
*kernel_policy
, necp_kernel_policy_id socket_policy_id
, necp_kernel_policy_id socket_skip_policy_id
, u_int32_t bound_interface_index
, u_int32_t last_interface_index
, u_int16_t protocol
, union necp_sockaddr_union
*local
, union necp_sockaddr_union
*remote
, struct rtentry
*rt
)
7865 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
)) {
7866 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
7867 u_int32_t cond_bound_interface_index
= kernel_policy
->cond_bound_interface
? kernel_policy
->cond_bound_interface
->if_index
: 0;
7868 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
7869 if (bound_interface_index
== cond_bound_interface_index
) {
7870 // No match, matches forbidden interface
7874 if (bound_interface_index
!= cond_bound_interface_index
) {
7875 // No match, does not match required interface
7880 if (bound_interface_index
!= 0) {
7881 // No match, requires a non-bound packet
7887 if (kernel_policy
->condition_mask
== 0) {
7891 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
) {
7892 necp_kernel_policy_id matched_policy_id
=
7893 kernel_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
? socket_skip_policy_id
: socket_policy_id
;
7894 if (matched_policy_id
!= kernel_policy
->cond_policy_id
) {
7895 // No match, does not match required id
7900 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LAST_INTERFACE
) {
7901 if (last_interface_index
!= kernel_policy
->cond_last_interface_index
) {
7906 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
7907 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
7908 if (protocol
== kernel_policy
->cond_protocol
) {
7909 // No match, matches forbidden protocol
7913 if (protocol
!= kernel_policy
->cond_protocol
) {
7914 // No match, does not match required protocol
7920 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_NETWORKS
) {
7921 bool is_local
= FALSE
;
7924 is_local
= IS_NECP_DEST_IN_LOCAL_NETWORKS(rt
);
7926 is_local
= necp_is_route_local(remote
);
7930 // Either no route to validate or no match for local networks
7935 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
7936 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
7937 bool inRange
= necp_is_addr_in_range((struct sockaddr
*)local
, (struct sockaddr
*)&kernel_policy
->cond_local_start
, (struct sockaddr
*)&kernel_policy
->cond_local_end
);
7938 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
7947 } else if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
7948 bool inSubnet
= necp_is_addr_in_subnet((struct sockaddr
*)local
, (struct sockaddr
*)&kernel_policy
->cond_local_start
, kernel_policy
->cond_local_prefix
);
7949 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
7961 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
7962 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
7963 bool inRange
= necp_is_addr_in_range((struct sockaddr
*)remote
, (struct sockaddr
*)&kernel_policy
->cond_remote_start
, (struct sockaddr
*)&kernel_policy
->cond_remote_end
);
7964 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
7973 } else if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
7974 bool inSubnet
= necp_is_addr_in_subnet((struct sockaddr
*)remote
, (struct sockaddr
*)&kernel_policy
->cond_remote_start
, kernel_policy
->cond_remote_prefix
);
7975 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
7990 static inline struct necp_kernel_ip_output_policy
*
7991 necp_ip_output_find_policy_match_locked(necp_kernel_policy_id socket_policy_id
, necp_kernel_policy_id socket_skip_policy_id
, u_int32_t bound_interface_index
, u_int32_t last_interface_index
, u_int16_t protocol
, union necp_sockaddr_union
*local_addr
, union necp_sockaddr_union
*remote_addr
, struct rtentry
*rt
, u_int32_t
*return_route_rule_id
, necp_kernel_policy_result
*return_drop_dest_policy_result
, necp_drop_all_bypass_check_result_t
*return_drop_all_bypass
)
7993 u_int32_t skip_order
= 0;
7994 u_int32_t skip_session_order
= 0;
7995 struct necp_kernel_ip_output_policy
*matched_policy
= NULL
;
7996 struct necp_kernel_ip_output_policy
**policy_search_array
= necp_kernel_ip_output_policies_map
[NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(socket_policy_id
)];
7997 u_int32_t route_rule_id_array
[MAX_AGGREGATE_ROUTE_RULES
];
7998 size_t route_rule_id_count
= 0;
7999 necp_drop_all_bypass_check_result_t drop_all_bypass
= NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
;
8000 if (return_drop_all_bypass
!= NULL
) {
8001 *return_drop_all_bypass
= drop_all_bypass
;
8004 if (return_route_rule_id
!= NULL
) {
8005 *return_route_rule_id
= 0;
8008 *return_drop_dest_policy_result
= NECP_KERNEL_POLICY_RESULT_NONE
;
8010 if (policy_search_array
!= NULL
) {
8011 for (int i
= 0; policy_search_array
[i
] != NULL
; i
++) {
8012 if (necp_drop_all_order
!= 0 && policy_search_array
[i
]->session_order
>= necp_drop_all_order
) {
8013 // We've hit a drop all rule
8014 if (drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
) {
8015 drop_all_bypass
= necp_check_drop_all_bypass_result(NULL
);
8016 if (return_drop_all_bypass
!= NULL
) {
8017 *return_drop_all_bypass
= drop_all_bypass
;
8020 if (drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_FALSE
) {
8024 if (necp_drop_dest_policy
.entry_count
> 0 &&
8025 necp_address_matches_drop_dest_policy(remote_addr
, policy_search_array
[i
]->session_order
)) {
8026 // We've hit a drop by destination address rule
8027 *return_drop_dest_policy_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
8030 if (skip_session_order
&& policy_search_array
[i
]->session_order
>= skip_session_order
) {
8033 skip_session_order
= 0;
8036 if (policy_search_array
[i
]->order
< skip_order
) {
8042 skip_session_order
= 0;
8044 } else if (skip_session_order
) {
8049 if (necp_ip_output_check_policy(policy_search_array
[i
], socket_policy_id
, socket_skip_policy_id
, bound_interface_index
, last_interface_index
, protocol
, local_addr
, remote_addr
, rt
)) {
8050 if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_ROUTE_RULES
) {
8051 if (return_route_rule_id
!= NULL
&& route_rule_id_count
< MAX_AGGREGATE_ROUTE_RULES
) {
8052 route_rule_id_array
[route_rule_id_count
++] = policy_search_array
[i
]->result_parameter
.route_rule_id
;
8055 } else if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
8056 skip_order
= policy_search_array
[i
]->result_parameter
.skip_policy_order
;
8057 skip_session_order
= policy_search_array
[i
]->session_order
+ 1;
8061 // Passed all tests, found a match
8062 matched_policy
= policy_search_array
[i
];
8068 if (route_rule_id_count
== 1) {
8069 *return_route_rule_id
= route_rule_id_array
[0];
8070 } else if (route_rule_id_count
> 1) {
8071 *return_route_rule_id
= necp_create_aggregate_route_rule(route_rule_id_array
);
8074 return matched_policy
;
8078 necp_output_bypass(struct mbuf
*packet
)
8080 if (necp_pass_loopback
> 0 && necp_is_loopback(NULL
, NULL
, NULL
, packet
, IFSCOPE_NONE
)) {
8083 if (necp_pass_keepalives
> 0 && necp_get_is_keepalive_from_packet(packet
)) {
8086 if (necp_is_intcoproc(NULL
, packet
)) {
8092 necp_kernel_policy_id
8093 necp_ip_output_find_policy_match(struct mbuf
*packet
, int flags
, struct ip_out_args
*ipoa
, struct rtentry
*rt
,
8094 necp_kernel_policy_result
*result
, necp_kernel_policy_result_parameter
*result_parameter
)
8096 struct ip
*ip
= NULL
;
8097 int hlen
= sizeof(struct ip
);
8098 necp_kernel_policy_id socket_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
8099 necp_kernel_policy_id socket_skip_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
8100 necp_kernel_policy_id matched_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
8101 struct necp_kernel_ip_output_policy
*matched_policy
= NULL
;
8102 u_int16_t protocol
= 0;
8103 u_int32_t bound_interface_index
= 0;
8104 u_int32_t last_interface_index
= 0;
8105 union necp_sockaddr_union local_addr
;
8106 union necp_sockaddr_union remote_addr
;
8107 u_int32_t drop_dest_policy_result
= NECP_KERNEL_POLICY_RESULT_NONE
;
8108 necp_drop_all_bypass_check_result_t drop_all_bypass
= NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
;
8114 if (result_parameter
) {
8115 memset(result_parameter
, 0, sizeof(*result_parameter
));
8118 if (packet
== NULL
) {
8119 return NECP_KERNEL_POLICY_ID_NONE
;
8122 socket_policy_id
= necp_get_policy_id_from_packet(packet
);
8123 socket_skip_policy_id
= necp_get_skip_policy_id_from_packet(packet
);
8125 // Exit early for an empty list
8126 // Don't lock. Possible race condition, but we don't want the performance hit.
8127 if (necp_kernel_ip_output_policies_count
== 0 ||
8128 (socket_policy_id
== NECP_KERNEL_POLICY_ID_NONE
&& necp_kernel_ip_output_policies_non_id_count
== 0 && necp_drop_dest_policy
.entry_count
== 0)) {
8129 if (necp_drop_all_order
> 0) {
8130 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8132 if (necp_output_bypass(packet
)) {
8133 *result
= NECP_KERNEL_POLICY_RESULT_PASS
;
8135 *result
= NECP_KERNEL_POLICY_RESULT_DROP
;
8140 return matched_policy_id
;
8143 // Check for loopback exception
8144 if (necp_output_bypass(packet
)) {
8145 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8147 *result
= NECP_KERNEL_POLICY_RESULT_PASS
;
8149 return matched_policy_id
;
8152 last_interface_index
= necp_get_last_interface_index_from_packet(packet
);
8154 // Process packet to get relevant fields
8155 ip
= mtod(packet
, struct ip
*);
8157 hlen
= _IP_VHL_HL(ip
->ip_vhl
) << 2;
8159 hlen
= ip
->ip_hl
<< 2;
8162 protocol
= ip
->ip_p
;
8164 if ((flags
& IP_OUTARGS
) && (ipoa
!= NULL
) &&
8165 (ipoa
->ipoa_flags
& IPOAF_BOUND_IF
) &&
8166 ipoa
->ipoa_boundif
!= IFSCOPE_NONE
) {
8167 bound_interface_index
= ipoa
->ipoa_boundif
;
8170 local_addr
.sin
.sin_family
= AF_INET
;
8171 local_addr
.sin
.sin_len
= sizeof(struct sockaddr_in
);
8172 memcpy(&local_addr
.sin
.sin_addr
, &ip
->ip_src
, sizeof(ip
->ip_src
));
8174 remote_addr
.sin
.sin_family
= AF_INET
;
8175 remote_addr
.sin
.sin_len
= sizeof(struct sockaddr_in
);
8176 memcpy(&((struct sockaddr_in
*)&remote_addr
)->sin_addr
, &ip
->ip_dst
, sizeof(ip
->ip_dst
));
8181 if ((int)(hlen
+ sizeof(th
)) <= packet
->m_pkthdr
.len
) {
8182 m_copydata(packet
, hlen
, sizeof(th
), (u_int8_t
*)&th
);
8183 ((struct sockaddr_in
*)&local_addr
)->sin_port
= th
.th_sport
;
8184 ((struct sockaddr_in
*)&remote_addr
)->sin_port
= th
.th_dport
;
8190 if ((int)(hlen
+ sizeof(uh
)) <= packet
->m_pkthdr
.len
) {
8191 m_copydata(packet
, hlen
, sizeof(uh
), (u_int8_t
*)&uh
);
8192 ((struct sockaddr_in
*)&local_addr
)->sin_port
= uh
.uh_sport
;
8193 ((struct sockaddr_in
*)&remote_addr
)->sin_port
= uh
.uh_dport
;
8198 ((struct sockaddr_in
*)&local_addr
)->sin_port
= 0;
8199 ((struct sockaddr_in
*)&remote_addr
)->sin_port
= 0;
8204 // Match packet to policy
8205 lck_rw_lock_shared(&necp_kernel_policy_lock
);
8206 u_int32_t route_rule_id
= 0;
8207 matched_policy
= necp_ip_output_find_policy_match_locked(socket_policy_id
, socket_skip_policy_id
, bound_interface_index
, last_interface_index
, protocol
, &local_addr
, &remote_addr
, rt
, &route_rule_id
, &drop_dest_policy_result
, &drop_all_bypass
);
8208 if (matched_policy
) {
8209 matched_policy_id
= matched_policy
->id
;
8211 *result
= matched_policy
->result
;
8214 if (result_parameter
) {
8215 memcpy(result_parameter
, &matched_policy
->result_parameter
, sizeof(matched_policy
->result_parameter
));
8218 if (route_rule_id
!= 0 &&
8219 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
== 0) {
8220 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
= route_rule_id
;
8223 if (necp_debug
> 1) {
8224 NECPLOG(LOG_DEBUG
, "IP Output: (ID %d BoundInterface %d LastInterface %d Proto %d) Policy %d Result %d Parameter %d Route Rule %u", socket_policy_id
, bound_interface_index
, last_interface_index
, protocol
, matched_policy
->id
, matched_policy
->result
, matched_policy
->result_parameter
.tunnel_interface_index
, route_rule_id
);
8227 bool drop_all
= false;
8229 * Apply drop-all only to packets which have never matched a primary policy (check
8230 * if the packet saved policy id is none or falls within the socket policy id range).
8232 if (socket_policy_id
< NECP_KERNEL_POLICY_ID_FIRST_VALID_IP
&&
8233 (necp_drop_all_order
> 0 || drop_dest_policy_result
== NECP_KERNEL_POLICY_RESULT_DROP
)) {
8235 if (drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
) {
8236 drop_all_bypass
= necp_check_drop_all_bypass_result(NULL
);
8239 if (drop_all
&& drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_FALSE
) {
8240 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8242 *result
= NECP_KERNEL_POLICY_RESULT_DROP
;
8244 } else if (route_rule_id
!= 0 &&
8245 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
== 0) {
8246 // If we matched a route rule, mark it
8247 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
= route_rule_id
;
8251 lck_rw_done(&necp_kernel_policy_lock
);
8253 return matched_policy_id
;
8256 necp_kernel_policy_id
8257 necp_ip6_output_find_policy_match(struct mbuf
*packet
, int flags
, struct ip6_out_args
*ip6oa
, struct rtentry
*rt
,
8258 necp_kernel_policy_result
*result
, necp_kernel_policy_result_parameter
*result_parameter
)
8260 struct ip6_hdr
*ip6
= NULL
;
8263 necp_kernel_policy_id socket_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
8264 necp_kernel_policy_id socket_skip_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
8265 necp_kernel_policy_id matched_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
8266 struct necp_kernel_ip_output_policy
*matched_policy
= NULL
;
8267 u_int16_t protocol
= 0;
8268 u_int32_t bound_interface_index
= 0;
8269 u_int32_t last_interface_index
= 0;
8270 union necp_sockaddr_union local_addr
;
8271 union necp_sockaddr_union remote_addr
;
8272 u_int32_t drop_dest_policy_result
= NECP_KERNEL_POLICY_RESULT_NONE
;
8273 necp_drop_all_bypass_check_result_t drop_all_bypass
= NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
;
8279 if (result_parameter
) {
8280 memset(result_parameter
, 0, sizeof(*result_parameter
));
8283 if (packet
== NULL
) {
8284 return NECP_KERNEL_POLICY_ID_NONE
;
8287 socket_policy_id
= necp_get_policy_id_from_packet(packet
);
8288 socket_skip_policy_id
= necp_get_skip_policy_id_from_packet(packet
);
8290 // Exit early for an empty list
8291 // Don't lock. Possible race condition, but we don't want the performance hit.
8292 if (necp_kernel_ip_output_policies_count
== 0 ||
8293 (socket_policy_id
== NECP_KERNEL_POLICY_ID_NONE
&& necp_kernel_ip_output_policies_non_id_count
== 0 && necp_drop_dest_policy
.entry_count
== 0)) {
8294 if (necp_drop_all_order
> 0) {
8295 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8297 if (necp_output_bypass(packet
)) {
8298 *result
= NECP_KERNEL_POLICY_RESULT_PASS
;
8300 *result
= NECP_KERNEL_POLICY_RESULT_DROP
;
8305 return matched_policy_id
;
8308 // Check for loopback exception
8309 if (necp_output_bypass(packet
)) {
8310 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8312 *result
= NECP_KERNEL_POLICY_RESULT_PASS
;
8314 return matched_policy_id
;
8317 last_interface_index
= necp_get_last_interface_index_from_packet(packet
);
8319 // Process packet to get relevant fields
8320 ip6
= mtod(packet
, struct ip6_hdr
*);
8322 if ((flags
& IPV6_OUTARGS
) && (ip6oa
!= NULL
) &&
8323 (ip6oa
->ip6oa_flags
& IP6OAF_BOUND_IF
) &&
8324 ip6oa
->ip6oa_boundif
!= IFSCOPE_NONE
) {
8325 bound_interface_index
= ip6oa
->ip6oa_boundif
;
8328 ((struct sockaddr_in6
*)&local_addr
)->sin6_family
= AF_INET6
;
8329 ((struct sockaddr_in6
*)&local_addr
)->sin6_len
= sizeof(struct sockaddr_in6
);
8330 memcpy(&((struct sockaddr_in6
*)&local_addr
)->sin6_addr
, &ip6
->ip6_src
, sizeof(ip6
->ip6_src
));
8332 ((struct sockaddr_in6
*)&remote_addr
)->sin6_family
= AF_INET6
;
8333 ((struct sockaddr_in6
*)&remote_addr
)->sin6_len
= sizeof(struct sockaddr_in6
);
8334 memcpy(&((struct sockaddr_in6
*)&remote_addr
)->sin6_addr
, &ip6
->ip6_dst
, sizeof(ip6
->ip6_dst
));
8336 offset
= ip6_lasthdr(packet
, 0, IPPROTO_IPV6
, &next
);
8337 if (offset
>= 0 && packet
->m_pkthdr
.len
>= offset
) {
8342 if ((int)(offset
+ sizeof(th
)) <= packet
->m_pkthdr
.len
) {
8343 m_copydata(packet
, offset
, sizeof(th
), (u_int8_t
*)&th
);
8344 ((struct sockaddr_in6
*)&local_addr
)->sin6_port
= th
.th_sport
;
8345 ((struct sockaddr_in6
*)&remote_addr
)->sin6_port
= th
.th_dport
;
8351 if ((int)(offset
+ sizeof(uh
)) <= packet
->m_pkthdr
.len
) {
8352 m_copydata(packet
, offset
, sizeof(uh
), (u_int8_t
*)&uh
);
8353 ((struct sockaddr_in6
*)&local_addr
)->sin6_port
= uh
.uh_sport
;
8354 ((struct sockaddr_in6
*)&remote_addr
)->sin6_port
= uh
.uh_dport
;
8359 ((struct sockaddr_in6
*)&local_addr
)->sin6_port
= 0;
8360 ((struct sockaddr_in6
*)&remote_addr
)->sin6_port
= 0;
8366 // Match packet to policy
8367 lck_rw_lock_shared(&necp_kernel_policy_lock
);
8368 u_int32_t route_rule_id
= 0;
8369 matched_policy
= necp_ip_output_find_policy_match_locked(socket_policy_id
, socket_skip_policy_id
, bound_interface_index
, last_interface_index
, protocol
, &local_addr
, &remote_addr
, rt
, &route_rule_id
, &drop_dest_policy_result
, &drop_all_bypass
);
8370 if (matched_policy
) {
8371 matched_policy_id
= matched_policy
->id
;
8373 *result
= matched_policy
->result
;
8376 if (result_parameter
) {
8377 memcpy(result_parameter
, &matched_policy
->result_parameter
, sizeof(matched_policy
->result_parameter
));
8380 if (route_rule_id
!= 0 &&
8381 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
== 0) {
8382 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
= route_rule_id
;
8385 if (necp_debug
> 1) {
8386 NECPLOG(LOG_DEBUG
, "IP6 Output: (ID %d BoundInterface %d LastInterface %d Proto %d) Policy %d Result %d Parameter %d Route Rule %u", socket_policy_id
, bound_interface_index
, last_interface_index
, protocol
, matched_policy
->id
, matched_policy
->result
, matched_policy
->result_parameter
.tunnel_interface_index
, route_rule_id
);
8389 bool drop_all
= false;
8391 * Apply drop-all only to packets which have never matched a primary policy (check
8392 * if the packet saved policy id is none or falls within the socket policy id range).
8394 if (socket_policy_id
< NECP_KERNEL_POLICY_ID_FIRST_VALID_IP
&&
8395 (necp_drop_all_order
> 0 || drop_dest_policy_result
== NECP_KERNEL_POLICY_RESULT_DROP
)) {
8397 if (drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
) {
8398 drop_all_bypass
= necp_check_drop_all_bypass_result(NULL
);
8401 if (drop_all
&& drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_FALSE
) {
8402 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8404 *result
= NECP_KERNEL_POLICY_RESULT_DROP
;
8406 } else if (route_rule_id
!= 0 &&
8407 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
== 0) {
8408 // If we matched a route rule, mark it
8409 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
= route_rule_id
;
8413 lck_rw_done(&necp_kernel_policy_lock
);
8415 return matched_policy_id
;
8420 necp_is_addr_in_range(struct sockaddr
*addr
, struct sockaddr
*range_start
, struct sockaddr
*range_end
)
8424 if (addr
== NULL
|| range_start
== NULL
|| range_end
== NULL
) {
8428 /* Must be greater than or equal to start */
8429 cmp
= necp_addr_compare(addr
, range_start
, 1);
8430 if (cmp
!= 0 && cmp
!= 1) {
8434 /* Must be less than or equal to end */
8435 cmp
= necp_addr_compare(addr
, range_end
, 1);
8436 if (cmp
!= 0 && cmp
!= -1) {
8444 necp_is_range_in_range(struct sockaddr
*inner_range_start
, struct sockaddr
*inner_range_end
, struct sockaddr
*range_start
, struct sockaddr
*range_end
)
8448 if (inner_range_start
== NULL
|| inner_range_end
== NULL
|| range_start
== NULL
|| range_end
== NULL
) {
8452 /* Must be greater than or equal to start */
8453 cmp
= necp_addr_compare(inner_range_start
, range_start
, 1);
8454 if (cmp
!= 0 && cmp
!= 1) {
8458 /* Must be less than or equal to end */
8459 cmp
= necp_addr_compare(inner_range_end
, range_end
, 1);
8460 if (cmp
!= 0 && cmp
!= -1) {
8468 necp_is_addr_in_subnet(struct sockaddr
*addr
, struct sockaddr
*subnet_addr
, u_int8_t subnet_prefix
)
8470 if (addr
== NULL
|| subnet_addr
== NULL
) {
8474 if (addr
->sa_family
!= subnet_addr
->sa_family
|| addr
->sa_len
!= subnet_addr
->sa_len
) {
8478 switch (addr
->sa_family
) {
8480 if (satosin(subnet_addr
)->sin_port
!= 0 &&
8481 satosin(addr
)->sin_port
!= satosin(subnet_addr
)->sin_port
) {
8484 return necp_buffer_compare_with_bit_prefix((u_int8_t
*)&satosin(addr
)->sin_addr
, (u_int8_t
*)&satosin(subnet_addr
)->sin_addr
, subnet_prefix
);
8487 if (satosin6(subnet_addr
)->sin6_port
!= 0 &&
8488 satosin6(addr
)->sin6_port
!= satosin6(subnet_addr
)->sin6_port
) {
8491 if (satosin6(addr
)->sin6_scope_id
&&
8492 satosin6(subnet_addr
)->sin6_scope_id
&&
8493 satosin6(addr
)->sin6_scope_id
!= satosin6(subnet_addr
)->sin6_scope_id
) {
8496 return necp_buffer_compare_with_bit_prefix((u_int8_t
*)&satosin6(addr
)->sin6_addr
, (u_int8_t
*)&satosin6(subnet_addr
)->sin6_addr
, subnet_prefix
);
8511 * 2: Not comparable or error
8514 necp_addr_compare(struct sockaddr
*sa1
, struct sockaddr
*sa2
, int check_port
)
8517 int port_result
= 0;
8519 if (sa1
->sa_family
!= sa2
->sa_family
|| sa1
->sa_len
!= sa2
->sa_len
) {
8523 if (sa1
->sa_len
== 0) {
8527 switch (sa1
->sa_family
) {
8529 if (sa1
->sa_len
!= sizeof(struct sockaddr_in
)) {
8533 result
= memcmp(&satosin(sa1
)->sin_addr
.s_addr
, &satosin(sa2
)->sin_addr
.s_addr
, sizeof(satosin(sa1
)->sin_addr
.s_addr
));
8536 if (satosin(sa1
)->sin_port
< satosin(sa2
)->sin_port
) {
8538 } else if (satosin(sa1
)->sin_port
> satosin(sa2
)->sin_port
) {
8543 result
= port_result
;
8544 } else if ((result
> 0 && port_result
< 0) || (result
< 0 && port_result
> 0)) {
8552 if (sa1
->sa_len
!= sizeof(struct sockaddr_in6
)) {
8556 if (satosin6(sa1
)->sin6_scope_id
!= satosin6(sa2
)->sin6_scope_id
) {
8560 result
= memcmp(&satosin6(sa1
)->sin6_addr
.s6_addr
[0], &satosin6(sa2
)->sin6_addr
.s6_addr
[0], sizeof(struct in6_addr
));
8563 if (satosin6(sa1
)->sin6_port
< satosin6(sa2
)->sin6_port
) {
8565 } else if (satosin6(sa1
)->sin6_port
> satosin6(sa2
)->sin6_port
) {
8570 result
= port_result
;
8571 } else if ((result
> 0 && port_result
< 0) || (result
< 0 && port_result
> 0)) {
8579 result
= memcmp(sa1
, sa2
, sa1
->sa_len
);
8586 } else if (result
> 0) {
8594 necp_buffer_compare_with_bit_prefix(u_int8_t
*p1
, u_int8_t
*p2
, u_int32_t bits
)
8598 /* Handle null pointers */
8599 if (p1
== NULL
|| p2
== NULL
) {
8604 if (*p1
++ != *p2
++) {
8611 mask
= ~((1 << (8 - bits
)) - 1);
8612 if ((*p1
& mask
) != (*p2
& mask
)) {
8620 necp_addr_is_empty(struct sockaddr
*addr
)
8626 if (addr
->sa_len
== 0) {
8630 switch (addr
->sa_family
) {
8632 static struct sockaddr_in ipv4_empty_address
= {
8633 .sin_len
= sizeof(struct sockaddr_in
),
8634 .sin_family
= AF_INET
,
8636 .sin_addr
= { .s_addr
= 0 }, // 0.0.0.0
8639 if (necp_addr_compare(addr
, (struct sockaddr
*)&ipv4_empty_address
, 0) == 0) {
8646 static struct sockaddr_in6 ipv6_empty_address
= {
8647 .sin6_len
= sizeof(struct sockaddr_in6
),
8648 .sin6_family
= AF_INET6
,
8651 .sin6_addr
= IN6ADDR_ANY_INIT
, // ::
8654 if (necp_addr_compare(addr
, (struct sockaddr
*)&ipv6_empty_address
, 0) == 0) {
8668 necp_update_qos_marking(struct ifnet
*ifp
, u_int32_t route_rule_id
)
8670 bool qos_marking
= FALSE
;
8671 int exception_index
= 0;
8672 struct necp_route_rule
*route_rule
= NULL
;
8674 route_rule
= necp_lookup_route_rule_locked(&necp_route_rules
, route_rule_id
);
8675 if (route_rule
== NULL
) {
8676 qos_marking
= FALSE
;
8680 qos_marking
= (route_rule
->default_action
== NECP_ROUTE_RULE_QOS_MARKING
) ? TRUE
: FALSE
;
8686 for (exception_index
= 0; exception_index
< MAX_ROUTE_RULE_INTERFACES
; exception_index
++) {
8687 if (route_rule
->exception_if_indices
[exception_index
] == 0) {
8690 if (route_rule
->exception_if_actions
[exception_index
] != NECP_ROUTE_RULE_QOS_MARKING
) {
8693 if (route_rule
->exception_if_indices
[exception_index
] == ifp
->if_index
) {
8695 if (necp_debug
> 2) {
8696 NECPLOG(LOG_DEBUG
, "QoS Marking : Interface match %d for Rule %d Allowed %d",
8697 route_rule
->exception_if_indices
[exception_index
], route_rule_id
, qos_marking
);
8703 if ((route_rule
->cellular_action
== NECP_ROUTE_RULE_QOS_MARKING
&& IFNET_IS_CELLULAR(ifp
)) ||
8704 (route_rule
->wifi_action
== NECP_ROUTE_RULE_QOS_MARKING
&& IFNET_IS_WIFI(ifp
)) ||
8705 (route_rule
->wired_action
== NECP_ROUTE_RULE_QOS_MARKING
&& IFNET_IS_WIRED(ifp
)) ||
8706 (route_rule
->expensive_action
== NECP_ROUTE_RULE_QOS_MARKING
&& IFNET_IS_EXPENSIVE(ifp
)) ||
8707 (route_rule
->constrained_action
== NECP_ROUTE_RULE_QOS_MARKING
&& IFNET_IS_CONSTRAINED(ifp
))) {
8709 if (necp_debug
> 2) {
8710 NECPLOG(LOG_DEBUG
, "QoS Marking: C:%d WF:%d W:%d E:%d Cn:%d for Rule %d Allowed %d",
8711 route_rule
->cellular_action
, route_rule
->wifi_action
, route_rule
->wired_action
,
8712 route_rule
->expensive_action
, route_rule
->constrained_action
, route_rule_id
, qos_marking
);
8717 if (necp_debug
> 1) {
8718 NECPLOG(LOG_DEBUG
, "QoS Marking: Rule %d ifp %s Allowed %d",
8719 route_rule_id
, ifp
? ifp
->if_xname
: "", qos_marking
);
8725 necp_socket_update_qos_marking(struct inpcb
*inp
, struct rtentry
*route
, struct ifnet
*interface
, u_int32_t route_rule_id
)
8727 bool qos_marking
= FALSE
;
8728 struct ifnet
*ifp
= interface
= NULL
;
8730 if (net_qos_policy_restricted
== 0) {
8733 if (inp
->inp_socket
== NULL
) {
8736 if ((inp
->inp_socket
->so_flags1
& SOF1_QOSMARKING_POLICY_OVERRIDE
)) {
8740 * This is racy but we do not need the performance hit of taking necp_kernel_policy_lock
8742 if (inp
->inp_policyresult
.results
.qos_marking_gencount
== necp_kernel_socket_policies_gencount
) {
8746 lck_rw_lock_shared(&necp_kernel_policy_lock
);
8748 if (ifp
== NULL
&& route
!= NULL
) {
8749 ifp
= route
->rt_ifp
;
8752 * By default, until we have a interface, do not mark and reevaluate the Qos marking policy
8754 if (ifp
== NULL
|| route_rule_id
== 0) {
8755 qos_marking
= FALSE
;
8759 if (ROUTE_RULE_IS_AGGREGATE(route_rule_id
)) {
8760 struct necp_aggregate_route_rule
*aggregate_route_rule
= necp_lookup_aggregate_route_rule_locked(route_rule_id
);
8761 if (aggregate_route_rule
!= NULL
) {
8763 for (index
= 0; index
< MAX_AGGREGATE_ROUTE_RULES
; index
++) {
8764 u_int32_t sub_route_rule_id
= aggregate_route_rule
->rule_ids
[index
];
8765 if (sub_route_rule_id
== 0) {
8768 qos_marking
= necp_update_qos_marking(ifp
, sub_route_rule_id
);
8769 if (qos_marking
== TRUE
) {
8775 qos_marking
= necp_update_qos_marking(ifp
, route_rule_id
);
8778 * Now that we have an interface we remember the gencount
8780 inp
->inp_policyresult
.results
.qos_marking_gencount
= necp_kernel_socket_policies_gencount
;
8783 lck_rw_done(&necp_kernel_policy_lock
);
8785 if (qos_marking
== TRUE
) {
8786 inp
->inp_socket
->so_flags1
|= SOF1_QOSMARKING_ALLOWED
;
8788 inp
->inp_socket
->so_flags1
&= ~SOF1_QOSMARKING_ALLOWED
;
8793 necp_route_is_lqm_abort(struct ifnet
*ifp
, struct ifnet
*delegated_ifp
)
8796 (ifp
->if_interface_state
.valid_bitmask
& IF_INTERFACE_STATE_LQM_STATE_VALID
) &&
8797 ifp
->if_interface_state
.lqm_state
== IFNET_LQM_THRESH_ABORT
) {
8800 if (delegated_ifp
!= NULL
&&
8801 (delegated_ifp
->if_interface_state
.valid_bitmask
& IF_INTERFACE_STATE_LQM_STATE_VALID
) &&
8802 delegated_ifp
->if_interface_state
.lqm_state
== IFNET_LQM_THRESH_ABORT
) {
8809 necp_route_is_allowed_inner(struct rtentry
*route
, struct ifnet
*ifp
, u_int32_t route_rule_id
, u_int32_t
*interface_type_denied
)
8811 bool default_is_allowed
= TRUE
;
8812 u_int8_t type_aggregate_action
= NECP_ROUTE_RULE_NONE
;
8813 int exception_index
= 0;
8814 struct ifnet
*delegated_ifp
= NULL
;
8815 struct necp_route_rule
*route_rule
= NULL
;
8817 route_rule
= necp_lookup_route_rule_locked(&necp_route_rules
, route_rule_id
);
8818 if (route_rule
== NULL
) {
8822 default_is_allowed
= (route_rule
->default_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
;
8824 ifp
= route
->rt_ifp
;
8827 if (necp_debug
> 1 && !default_is_allowed
) {
8828 NECPLOG(LOG_DEBUG
, "Route Allowed: No interface for route, using default for Rule %d Allowed %d", route_rule_id
, default_is_allowed
);
8830 return default_is_allowed
;
8833 delegated_ifp
= ifp
->if_delegated
.ifp
;
8834 for (exception_index
= 0; exception_index
< MAX_ROUTE_RULE_INTERFACES
; exception_index
++) {
8835 if (route_rule
->exception_if_indices
[exception_index
] == 0) {
8838 if (route_rule
->exception_if_indices
[exception_index
] == ifp
->if_index
||
8839 (delegated_ifp
!= NULL
&& route_rule
->exception_if_indices
[exception_index
] == delegated_ifp
->if_index
)) {
8840 if (route_rule
->exception_if_actions
[exception_index
] == NECP_ROUTE_RULE_DENY_LQM_ABORT
) {
8841 const bool lqm_abort
= necp_route_is_lqm_abort(ifp
, delegated_ifp
);
8842 if (necp_debug
> 1 && lqm_abort
) {
8843 NECPLOG(LOG_DEBUG
, "Route Allowed: Interface match %d for Rule %d Deny LQM Abort",
8844 route_rule
->exception_if_indices
[exception_index
], route_rule_id
);
8847 } else if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule
->exception_if_actions
[exception_index
])) {
8848 if (necp_debug
> 1) {
8849 NECPLOG(LOG_DEBUG
, "Route Allowed: Interface match %d for Rule %d Allowed %d", route_rule
->exception_if_indices
[exception_index
], route_rule_id
, ((route_rule
->exception_if_actions
[exception_index
] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
));
8851 return (route_rule
->exception_if_actions
[exception_index
] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
;
8856 if (IFNET_IS_CELLULAR(ifp
)) {
8857 if (route_rule
->cellular_action
== NECP_ROUTE_RULE_DENY_LQM_ABORT
) {
8858 if (necp_route_is_lqm_abort(ifp
, delegated_ifp
)) {
8859 if (interface_type_denied
!= NULL
) {
8860 *interface_type_denied
= IFRTYPE_FUNCTIONAL_CELLULAR
;
8862 // Mark aggregate action as deny
8863 type_aggregate_action
= NECP_ROUTE_RULE_DENY_INTERFACE
;
8865 } else if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule
->cellular_action
)) {
8866 if (interface_type_denied
!= NULL
) {
8867 *interface_type_denied
= IFRTYPE_FUNCTIONAL_CELLULAR
;
8869 if (type_aggregate_action
== NECP_ROUTE_RULE_NONE
||
8870 (type_aggregate_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
&&
8871 route_rule
->cellular_action
== NECP_ROUTE_RULE_DENY_INTERFACE
)) {
8872 // Deny wins if there is a conflict
8873 type_aggregate_action
= route_rule
->cellular_action
;
8878 if (IFNET_IS_WIFI(ifp
)) {
8879 if (route_rule
->wifi_action
== NECP_ROUTE_RULE_DENY_LQM_ABORT
) {
8880 if (necp_route_is_lqm_abort(ifp
, delegated_ifp
)) {
8881 if (interface_type_denied
!= NULL
) {
8882 *interface_type_denied
= IFRTYPE_FUNCTIONAL_WIFI_INFRA
;
8884 // Mark aggregate action as deny
8885 type_aggregate_action
= NECP_ROUTE_RULE_DENY_INTERFACE
;
8887 } else if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule
->wifi_action
)) {
8888 if (interface_type_denied
!= NULL
) {
8889 *interface_type_denied
= IFRTYPE_FUNCTIONAL_WIFI_INFRA
;
8891 if (type_aggregate_action
== NECP_ROUTE_RULE_NONE
||
8892 (type_aggregate_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
&&
8893 route_rule
->wifi_action
== NECP_ROUTE_RULE_DENY_INTERFACE
)) {
8894 // Deny wins if there is a conflict
8895 type_aggregate_action
= route_rule
->wifi_action
;
8900 if (IFNET_IS_WIRED(ifp
)) {
8901 if (route_rule
->wired_action
== NECP_ROUTE_RULE_DENY_LQM_ABORT
) {
8902 if (necp_route_is_lqm_abort(ifp
, delegated_ifp
)) {
8903 if (interface_type_denied
!= NULL
) {
8904 *interface_type_denied
= IFRTYPE_FUNCTIONAL_WIRED
;
8906 // Mark aggregate action as deny
8907 type_aggregate_action
= NECP_ROUTE_RULE_DENY_INTERFACE
;
8909 } else if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule
->wired_action
)) {
8910 if (interface_type_denied
!= NULL
) {
8911 *interface_type_denied
= IFRTYPE_FUNCTIONAL_WIRED
;
8913 if (type_aggregate_action
== NECP_ROUTE_RULE_NONE
||
8914 (type_aggregate_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
&&
8915 route_rule
->wired_action
== NECP_ROUTE_RULE_DENY_INTERFACE
)) {
8916 // Deny wins if there is a conflict
8917 type_aggregate_action
= route_rule
->wired_action
;
8922 if (IFNET_IS_EXPENSIVE(ifp
)) {
8923 if (route_rule
->expensive_action
== NECP_ROUTE_RULE_DENY_LQM_ABORT
) {
8924 if (necp_route_is_lqm_abort(ifp
, delegated_ifp
)) {
8925 // Mark aggregate action as deny
8926 type_aggregate_action
= NECP_ROUTE_RULE_DENY_INTERFACE
;
8928 } else if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule
->expensive_action
)) {
8929 if (type_aggregate_action
== NECP_ROUTE_RULE_NONE
||
8930 (type_aggregate_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
&&
8931 route_rule
->expensive_action
== NECP_ROUTE_RULE_DENY_INTERFACE
)) {
8932 // Deny wins if there is a conflict
8933 type_aggregate_action
= route_rule
->expensive_action
;
8938 if (IFNET_IS_CONSTRAINED(ifp
)) {
8939 if (route_rule
->constrained_action
== NECP_ROUTE_RULE_DENY_LQM_ABORT
) {
8940 if (necp_route_is_lqm_abort(ifp
, delegated_ifp
)) {
8941 // Mark aggregate action as deny
8942 type_aggregate_action
= NECP_ROUTE_RULE_DENY_INTERFACE
;
8944 } else if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule
->constrained_action
)) {
8945 if (type_aggregate_action
== NECP_ROUTE_RULE_NONE
||
8946 (type_aggregate_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
&&
8947 route_rule
->constrained_action
== NECP_ROUTE_RULE_DENY_INTERFACE
)) {
8948 // Deny wins if there is a conflict
8949 type_aggregate_action
= route_rule
->constrained_action
;
8954 if (type_aggregate_action
!= NECP_ROUTE_RULE_NONE
) {
8955 if (necp_debug
> 1) {
8956 NECPLOG(LOG_DEBUG
, "Route Allowed: C:%d WF:%d W:%d E:%d for Rule %d Allowed %d", route_rule
->cellular_action
, route_rule
->wifi_action
, route_rule
->wired_action
, route_rule
->expensive_action
, route_rule_id
, ((type_aggregate_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
));
8958 return (type_aggregate_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
;
8961 if (necp_debug
> 1 && !default_is_allowed
) {
8962 NECPLOG(LOG_DEBUG
, "Route Allowed: Using default for Rule %d Allowed %d", route_rule_id
, default_is_allowed
);
8964 return default_is_allowed
;
8968 necp_route_is_allowed(struct rtentry
*route
, struct ifnet
*interface
, u_int32_t route_rule_id
, u_int32_t
*interface_type_denied
)
8970 if ((route
== NULL
&& interface
== NULL
) || route_rule_id
== 0) {
8971 if (necp_debug
> 1) {
8972 NECPLOG(LOG_DEBUG
, "Route Allowed: no route or interface, Rule %d Allowed %d", route_rule_id
, TRUE
);
8977 if (ROUTE_RULE_IS_AGGREGATE(route_rule_id
)) {
8978 struct necp_aggregate_route_rule
*aggregate_route_rule
= necp_lookup_aggregate_route_rule_locked(route_rule_id
);
8979 if (aggregate_route_rule
!= NULL
) {
8981 for (index
= 0; index
< MAX_AGGREGATE_ROUTE_RULES
; index
++) {
8982 u_int32_t sub_route_rule_id
= aggregate_route_rule
->rule_ids
[index
];
8983 if (sub_route_rule_id
== 0) {
8986 if (!necp_route_is_allowed_inner(route
, interface
, sub_route_rule_id
, interface_type_denied
)) {
8992 return necp_route_is_allowed_inner(route
, interface
, route_rule_id
, interface_type_denied
);
8999 necp_packet_is_allowed_over_interface(struct mbuf
*packet
, struct ifnet
*interface
)
9001 bool is_allowed
= TRUE
;
9002 u_int32_t route_rule_id
= necp_get_route_rule_id_from_packet(packet
);
9003 if (route_rule_id
!= 0 &&
9004 interface
!= NULL
) {
9005 lck_rw_lock_shared(&necp_kernel_policy_lock
);
9006 is_allowed
= necp_route_is_allowed(NULL
, interface
, necp_get_route_rule_id_from_packet(packet
), NULL
);
9007 lck_rw_done(&necp_kernel_policy_lock
);
9013 necp_netagents_allow_traffic(u_int32_t
*netagent_ids
, size_t netagent_id_count
)
9015 size_t netagent_cursor
;
9016 for (netagent_cursor
= 0; netagent_cursor
< netagent_id_count
; netagent_cursor
++) {
9017 struct necp_uuid_id_mapping
*mapping
= NULL
;
9018 u_int32_t netagent_id
= netagent_ids
[netagent_cursor
];
9019 if (netagent_id
== 0) {
9022 mapping
= necp_uuid_lookup_uuid_with_service_id_locked(netagent_id
);
9023 if (mapping
!= NULL
) {
9024 u_int32_t agent_flags
= 0;
9025 agent_flags
= netagent_get_flags(mapping
->uuid
);
9026 if (agent_flags
& NETAGENT_FLAG_REGISTERED
) {
9027 if (agent_flags
& NETAGENT_FLAG_ACTIVE
) {
9029 } else if ((agent_flags
& NETAGENT_FLAG_VOLUNTARY
) == 0) {
9039 necp_socket_is_allowed_to_send_recv_internal(struct inpcb
*inp
, struct sockaddr
*override_local_addr
, struct sockaddr
*override_remote_addr
, ifnet_t interface
, necp_kernel_policy_id
*return_policy_id
, u_int32_t
*return_route_rule_id
, necp_kernel_policy_id
*return_skip_policy_id
)
9041 u_int32_t verifyifindex
= interface
? interface
->if_index
: 0;
9042 bool allowed_to_receive
= TRUE
;
9043 struct necp_socket_info info
;
9044 u_int32_t flowhash
= 0;
9045 necp_kernel_policy_result service_action
= 0;
9046 necp_kernel_policy_service service
= { 0, 0 };
9047 u_int32_t route_rule_id
= 0;
9048 struct rtentry
*route
= NULL
;
9049 u_int32_t interface_type_denied
= IFRTYPE_FUNCTIONAL_UNKNOWN
;
9050 necp_kernel_policy_result drop_dest_policy_result
= NECP_KERNEL_POLICY_RESULT_NONE
;
9051 necp_drop_all_bypass_check_result_t drop_all_bypass
= NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
;
9052 u_int32_t netagent_ids
[NECP_MAX_NETAGENTS
];
9053 memset(&netagent_ids
, 0, sizeof(netagent_ids
));
9055 if (return_policy_id
) {
9056 *return_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
9058 if (return_skip_policy_id
) {
9059 *return_skip_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
9061 if (return_route_rule_id
) {
9062 *return_route_rule_id
= 0;
9069 route
= inp
->inp_route
.ro_rt
;
9071 struct socket
*so
= inp
->inp_socket
;
9073 u_int32_t drop_order
= necp_process_drop_order(so
->so_cred
);
9075 // Don't lock. Possible race condition, but we don't want the performance hit.
9076 if (necp_kernel_socket_policies_count
== 0 ||
9077 (!(inp
->inp_flags2
& INP2_WANT_APP_POLICY
) && necp_kernel_socket_policies_non_app_count
== 0)) {
9078 if (necp_drop_all_order
> 0 || drop_order
> 0) {
9079 if (necp_socket_bypass(override_local_addr
, override_remote_addr
, inp
)) {
9080 allowed_to_receive
= TRUE
;
9082 allowed_to_receive
= FALSE
;
9088 // If this socket is connected, or we are not taking addresses into account, try to reuse last result
9089 if ((necp_socket_is_connected(inp
) || (override_local_addr
== NULL
&& override_remote_addr
== NULL
)) && inp
->inp_policyresult
.policy_id
!= NECP_KERNEL_POLICY_ID_NONE
) {
9090 bool policies_have_changed
= FALSE
;
9091 bool route_allowed
= TRUE
;
9093 if (inp
->inp_policyresult
.policy_gencount
!= necp_kernel_socket_policies_gencount
) {
9094 policies_have_changed
= TRUE
;
9096 if (inp
->inp_policyresult
.results
.route_rule_id
!= 0) {
9097 lck_rw_lock_shared(&necp_kernel_policy_lock
);
9098 if (!necp_route_is_allowed(route
, interface
, inp
->inp_policyresult
.results
.route_rule_id
, &interface_type_denied
)) {
9099 route_allowed
= FALSE
;
9101 lck_rw_done(&necp_kernel_policy_lock
);
9105 if (!policies_have_changed
) {
9106 if (!route_allowed
||
9107 inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_DROP
||
9108 inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
||
9109 (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&& interface
&&
9110 inp
->inp_policyresult
.results
.result_parameter
.tunnel_interface_index
!= verifyifindex
)) {
9111 allowed_to_receive
= FALSE
;
9113 if (return_policy_id
) {
9114 *return_policy_id
= inp
->inp_policyresult
.policy_id
;
9116 if (return_skip_policy_id
) {
9117 *return_skip_policy_id
= inp
->inp_policyresult
.skip_policy_id
;
9119 if (return_route_rule_id
) {
9120 *return_route_rule_id
= inp
->inp_policyresult
.results
.route_rule_id
;
9127 // Check for loopback exception
9128 if (necp_socket_bypass(override_local_addr
, override_remote_addr
, inp
)) {
9129 allowed_to_receive
= TRUE
;
9133 // Actually calculate policy result
9134 lck_rw_lock_shared(&necp_kernel_policy_lock
);
9135 necp_socket_fillout_info_locked(inp
, override_local_addr
, override_remote_addr
, 0, drop_order
, &info
);
9137 flowhash
= necp_socket_calc_flowhash_locked(&info
);
9138 if (inp
->inp_policyresult
.policy_id
!= NECP_KERNEL_POLICY_ID_NONE
&&
9139 inp
->inp_policyresult
.policy_gencount
== necp_kernel_socket_policies_gencount
&&
9140 inp
->inp_policyresult
.flowhash
== flowhash
) {
9141 if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_DROP
||
9142 inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
||
9143 (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&& interface
&&
9144 inp
->inp_policyresult
.results
.result_parameter
.tunnel_interface_index
!= verifyifindex
) ||
9145 (inp
->inp_policyresult
.results
.route_rule_id
!= 0 &&
9146 !necp_route_is_allowed(route
, interface
, inp
->inp_policyresult
.results
.route_rule_id
, &interface_type_denied
))) {
9147 allowed_to_receive
= FALSE
;
9149 if (return_policy_id
) {
9150 *return_policy_id
= inp
->inp_policyresult
.policy_id
;
9152 if (return_route_rule_id
) {
9153 *return_route_rule_id
= inp
->inp_policyresult
.results
.route_rule_id
;
9155 if (return_skip_policy_id
) {
9156 *return_skip_policy_id
= inp
->inp_policyresult
.skip_policy_id
;
9159 lck_rw_done(&necp_kernel_policy_lock
);
9163 u_int32_t route_rule_id_array
[MAX_AGGREGATE_ROUTE_RULES
];
9164 size_t route_rule_id_array_count
= 0;
9165 struct necp_kernel_socket_policy
*matched_policy
= necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_map
[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(info
.application_id
)], &info
, NULL
, route_rule_id_array
, &route_rule_id_array_count
, MAX_AGGREGATE_ROUTE_RULES
, &service_action
, &service
, netagent_ids
, NULL
, NECP_MAX_NETAGENTS
, NULL
, 0, current_proc(), return_skip_policy_id
, inp
->inp_route
.ro_rt
, &drop_dest_policy_result
, &drop_all_bypass
);
9167 if (route_rule_id_array_count
== 1) {
9168 route_rule_id
= route_rule_id_array
[0];
9169 } else if (route_rule_id_array_count
> 1) {
9170 route_rule_id
= necp_create_aggregate_route_rule(route_rule_id_array
);
9173 if (matched_policy
!= NULL
) {
9174 if (matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_DROP
||
9175 matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
||
9176 (matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&& interface
&&
9177 matched_policy
->result_parameter
.tunnel_interface_index
!= verifyifindex
) ||
9178 ((service_action
== NECP_KERNEL_POLICY_RESULT_TRIGGER_SCOPED
||
9179 service_action
== NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED
) &&
9180 service
.identifier
!= 0 && service
.identifier
!= NECP_NULL_SERVICE_ID
) ||
9181 (route_rule_id
!= 0 &&
9182 !necp_route_is_allowed(route
, interface
, route_rule_id
, &interface_type_denied
)) ||
9183 !necp_netagents_allow_traffic(netagent_ids
, NECP_MAX_NETAGENTS
)) {
9184 allowed_to_receive
= FALSE
;
9186 if (return_policy_id
) {
9187 *return_policy_id
= matched_policy
->id
;
9189 if (return_route_rule_id
) {
9190 *return_route_rule_id
= route_rule_id
;
9193 lck_rw_done(&necp_kernel_policy_lock
);
9195 if (necp_debug
> 1 && matched_policy
->id
!= inp
->inp_policyresult
.policy_id
) {
9196 NECPLOG(LOG_DEBUG
, "Socket Send/Recv Policy: Policy %d Allowed %d", return_policy_id
? *return_policy_id
: 0, allowed_to_receive
);
9200 bool drop_all
= false;
9201 if (necp_drop_all_order
> 0 || info
.drop_order
> 0 || drop_dest_policy_result
== NECP_KERNEL_POLICY_RESULT_DROP
) {
9203 if (drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
) {
9204 drop_all_bypass
= necp_check_drop_all_bypass_result(NULL
);
9207 if (drop_all
&& drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_FALSE
) {
9208 allowed_to_receive
= FALSE
;
9210 if (return_policy_id
) {
9211 *return_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
9213 if (return_route_rule_id
) {
9214 *return_route_rule_id
= route_rule_id
;
9219 lck_rw_done(&necp_kernel_policy_lock
);
9222 if (!allowed_to_receive
&& interface_type_denied
!= IFRTYPE_FUNCTIONAL_UNKNOWN
) {
9223 soevent(inp
->inp_socket
, (SO_FILT_HINT_LOCKED
| SO_FILT_HINT_IFDENIED
));
9226 return allowed_to_receive
;
9230 necp_socket_is_allowed_to_send_recv_v4(struct inpcb
*inp
, u_int16_t local_port
, u_int16_t remote_port
, struct in_addr
*local_addr
, struct in_addr
*remote_addr
, ifnet_t interface
, necp_kernel_policy_id
*return_policy_id
, u_int32_t
*return_route_rule_id
, necp_kernel_policy_id
*return_skip_policy_id
)
9232 struct sockaddr_in local
= {};
9233 struct sockaddr_in remote
= {};
9234 local
.sin_family
= remote
.sin_family
= AF_INET
;
9235 local
.sin_len
= remote
.sin_len
= sizeof(struct sockaddr_in
);
9236 local
.sin_port
= local_port
;
9237 remote
.sin_port
= remote_port
;
9238 memcpy(&local
.sin_addr
, local_addr
, sizeof(local
.sin_addr
));
9239 memcpy(&remote
.sin_addr
, remote_addr
, sizeof(remote
.sin_addr
));
9241 return necp_socket_is_allowed_to_send_recv_internal(inp
, (struct sockaddr
*)&local
, (struct sockaddr
*)&remote
, interface
,
9242 return_policy_id
, return_route_rule_id
, return_skip_policy_id
);
9246 necp_socket_is_allowed_to_send_recv_v6(struct inpcb
*inp
, u_int16_t local_port
, u_int16_t remote_port
, struct in6_addr
*local_addr
, struct in6_addr
*remote_addr
, ifnet_t interface
, necp_kernel_policy_id
*return_policy_id
, u_int32_t
*return_route_rule_id
, necp_kernel_policy_id
*return_skip_policy_id
)
9248 struct sockaddr_in6 local
= {};
9249 struct sockaddr_in6 remote
= {};
9250 local
.sin6_family
= remote
.sin6_family
= AF_INET6
;
9251 local
.sin6_len
= remote
.sin6_len
= sizeof(struct sockaddr_in6
);
9252 local
.sin6_port
= local_port
;
9253 remote
.sin6_port
= remote_port
;
9254 memcpy(&local
.sin6_addr
, local_addr
, sizeof(local
.sin6_addr
));
9255 memcpy(&remote
.sin6_addr
, remote_addr
, sizeof(remote
.sin6_addr
));
9257 return necp_socket_is_allowed_to_send_recv_internal(inp
, (struct sockaddr
*)&local
, (struct sockaddr
*)&remote
, interface
,
9258 return_policy_id
, return_route_rule_id
, return_skip_policy_id
);
9262 necp_socket_is_allowed_to_send_recv(struct inpcb
*inp
, ifnet_t interface
, necp_kernel_policy_id
*return_policy_id
,
9263 u_int32_t
*return_route_rule_id
,
9264 necp_kernel_policy_id
*return_skip_policy_id
)
9266 return necp_socket_is_allowed_to_send_recv_internal(inp
, NULL
, NULL
, interface
,
9267 return_policy_id
, return_route_rule_id
,
9268 return_skip_policy_id
);
9272 necp_mark_packet_from_socket(struct mbuf
*packet
, struct inpcb
*inp
, necp_kernel_policy_id policy_id
, u_int32_t route_rule_id
,
9273 necp_kernel_policy_id skip_policy_id
)
9275 if (packet
== NULL
|| inp
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
9279 // Mark ID for Pass and IP Tunnel
9280 if (policy_id
!= NECP_KERNEL_POLICY_ID_NONE
) {
9281 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= policy_id
;
9282 } else if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_PASS
||
9283 inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
) {
9284 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= inp
->inp_policyresult
.policy_id
;
9286 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
9288 packet
->m_pkthdr
.necp_mtag
.necp_last_interface_index
= 0;
9289 if (route_rule_id
!= 0) {
9290 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
= route_rule_id
;
9292 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
= inp
->inp_policyresult
.results
.route_rule_id
;
9294 packet
->m_pkthdr
.necp_mtag
.necp_app_id
= (inp
->inp_policyresult
.app_id
> UINT16_MAX
? (inp
->inp_policyresult
.app_id
- UINT16_MAX
) : inp
->inp_policyresult
.app_id
);
9296 if (skip_policy_id
!= NECP_KERNEL_POLICY_ID_NONE
&&
9297 skip_policy_id
!= NECP_KERNEL_POLICY_ID_NO_MATCH
) {
9298 // Only mark the skip policy if it is a valid policy ID
9299 packet
->m_pkthdr
.necp_mtag
.necp_skip_policy_id
= skip_policy_id
;
9300 } else if (inp
->inp_policyresult
.results
.filter_control_unit
== NECP_FILTER_UNIT_NO_FILTER
) {
9301 // Overload the meaning of "NECP_KERNEL_POLICY_ID_NO_MATCH"
9302 // to indicate that NECP_FILTER_UNIT_NO_FILTER was set
9303 // See necp_get_skip_policy_id_from_packet() and
9304 // necp_packet_should_skip_filters().
9305 packet
->m_pkthdr
.necp_mtag
.necp_skip_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
9307 packet
->m_pkthdr
.necp_mtag
.necp_skip_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
9314 necp_mark_packet_from_ip(struct mbuf
*packet
, necp_kernel_policy_id policy_id
)
9316 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
9320 // Mark ID for Pass and IP Tunnel
9321 if (policy_id
!= NECP_KERNEL_POLICY_ID_NONE
) {
9322 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= policy_id
;
9324 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
9331 necp_mark_packet_from_interface(struct mbuf
*packet
, ifnet_t interface
)
9333 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
9337 // Mark ID for Pass and IP Tunnel
9338 if (interface
!= NULL
) {
9339 packet
->m_pkthdr
.necp_mtag
.necp_last_interface_index
= interface
->if_index
;
9346 necp_mark_packet_as_keepalive(struct mbuf
*packet
, bool is_keepalive
)
9348 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
9353 packet
->m_pkthdr
.pkt_flags
|= PKTF_KEEPALIVE
;
9355 packet
->m_pkthdr
.pkt_flags
&= ~PKTF_KEEPALIVE
;
9361 necp_kernel_policy_id
9362 necp_get_policy_id_from_packet(struct mbuf
*packet
)
9364 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
9365 return NECP_KERNEL_POLICY_ID_NONE
;
9368 return packet
->m_pkthdr
.necp_mtag
.necp_policy_id
;
9371 necp_kernel_policy_id
9372 necp_get_skip_policy_id_from_packet(struct mbuf
*packet
)
9374 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
9375 return NECP_KERNEL_POLICY_ID_NONE
;
9378 // Check for overloaded value. See necp_mark_packet_from_socket().
9379 if (packet
->m_pkthdr
.necp_mtag
.necp_skip_policy_id
== NECP_KERNEL_POLICY_ID_NO_MATCH
) {
9380 return NECP_KERNEL_POLICY_ID_NONE
;
9383 return packet
->m_pkthdr
.necp_mtag
.necp_skip_policy_id
;
9387 necp_packet_should_skip_filters(struct mbuf
*packet
)
9389 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
9393 // Check for overloaded value. See necp_mark_packet_from_socket().
9394 return packet
->m_pkthdr
.necp_mtag
.necp_skip_policy_id
== NECP_KERNEL_POLICY_ID_NO_MATCH
;
9398 necp_get_last_interface_index_from_packet(struct mbuf
*packet
)
9400 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
9404 return packet
->m_pkthdr
.necp_mtag
.necp_last_interface_index
;
9408 necp_get_route_rule_id_from_packet(struct mbuf
*packet
)
9410 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
9414 return packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
;
9418 necp_get_app_uuid_from_packet(struct mbuf
*packet
,
9421 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
9425 bool found_mapping
= FALSE
;
9426 if (packet
->m_pkthdr
.necp_mtag
.necp_app_id
!= 0) {
9427 lck_rw_lock_shared(&necp_kernel_policy_lock
);
9428 necp_app_id app_id
= (packet
->m_pkthdr
.necp_mtag
.necp_app_id
< UINT16_MAX
? (packet
->m_pkthdr
.necp_mtag
.necp_app_id
+ UINT16_MAX
) : packet
->m_pkthdr
.necp_mtag
.necp_app_id
);
9429 struct necp_uuid_id_mapping
*entry
= necp_uuid_lookup_uuid_with_app_id_locked(app_id
);
9430 if (entry
!= NULL
) {
9431 uuid_copy(app_uuid
, entry
->uuid
);
9432 found_mapping
= true;
9434 lck_rw_done(&necp_kernel_policy_lock
);
9436 if (!found_mapping
) {
9437 uuid_clear(app_uuid
);
9443 necp_get_is_keepalive_from_packet(struct mbuf
*packet
)
9445 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
9449 return packet
->m_pkthdr
.pkt_flags
& PKTF_KEEPALIVE
;
9453 necp_socket_get_content_filter_control_unit(struct socket
*so
)
9455 struct inpcb
*inp
= sotoinpcb(so
);
9460 return inp
->inp_policyresult
.results
.filter_control_unit
;
9464 necp_socket_should_use_flow_divert(struct inpcb
*inp
)
9470 return inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
;
9474 necp_socket_get_flow_divert_control_unit(struct inpcb
*inp
)
9480 if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
) {
9481 return inp
->inp_policyresult
.results
.result_parameter
.flow_divert_control_unit
;
9488 necp_socket_should_rescope(struct inpcb
*inp
)
9494 return inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED
||
9495 inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SCOPED_DIRECT
;
9499 necp_socket_get_rescope_if_index(struct inpcb
*inp
)
9505 if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED
) {
9506 return inp
->inp_policyresult
.results
.result_parameter
.scoped_interface_index
;
9507 } else if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SCOPED_DIRECT
) {
9508 return necp_get_primary_direct_interface_index();
9515 necp_socket_get_effective_mtu(struct inpcb
*inp
, u_int32_t current_mtu
)
9521 if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&&
9522 (inp
->inp_flags
& INP_BOUND_IF
) &&
9523 inp
->inp_boundifp
) {
9524 u_int bound_interface_index
= inp
->inp_boundifp
->if_index
;
9525 u_int tunnel_interface_index
= inp
->inp_policyresult
.results
.result_parameter
.tunnel_interface_index
;
9527 // The result is IP Tunnel, and is rescoping from one interface to another. Recalculate MTU.
9528 if (bound_interface_index
!= tunnel_interface_index
) {
9529 ifnet_t tunnel_interface
= NULL
;
9531 ifnet_head_lock_shared();
9532 tunnel_interface
= ifindex2ifnet
[tunnel_interface_index
];
9535 if (tunnel_interface
!= NULL
) {
9536 u_int32_t direct_tunnel_mtu
= tunnel_interface
->if_mtu
;
9537 u_int32_t delegate_tunnel_mtu
= (tunnel_interface
->if_delegated
.ifp
!= NULL
) ? tunnel_interface
->if_delegated
.ifp
->if_mtu
: 0;
9538 if (delegate_tunnel_mtu
!= 0 &&
9539 strncmp(tunnel_interface
->if_name
, "ipsec", strlen("ipsec")) == 0) {
9540 // For ipsec interfaces, calculate the overhead from the delegate interface
9541 u_int32_t tunnel_overhead
= (u_int32_t
)(esp_hdrsiz(NULL
) + sizeof(struct ip6_hdr
));
9542 if (delegate_tunnel_mtu
> tunnel_overhead
) {
9543 delegate_tunnel_mtu
-= tunnel_overhead
;
9546 if (delegate_tunnel_mtu
< direct_tunnel_mtu
) {
9547 // If the (delegate - overhead) < direct, return (delegate - overhead)
9548 return delegate_tunnel_mtu
;
9550 // Otherwise return direct
9551 return direct_tunnel_mtu
;
9554 // For non-ipsec interfaces, just return the tunnel MTU
9555 return direct_tunnel_mtu
;
9561 // By default, just return the MTU passed in
9566 necp_get_ifnet_from_result_parameter(necp_kernel_policy_result_parameter
*result_parameter
)
9568 if (result_parameter
== NULL
) {
9572 return ifindex2ifnet
[result_parameter
->tunnel_interface_index
];
9576 necp_packet_can_rebind_to_ifnet(struct mbuf
*packet
, struct ifnet
*interface
, struct route
*new_route
, int family
)
9578 bool found_match
= FALSE
;
9580 ifaddr_t
*addresses
= NULL
;
9581 union necp_sockaddr_union address_storage
;
9584 if (packet
== NULL
|| interface
== NULL
|| new_route
== NULL
|| (family
!= AF_INET
&& family
!= AF_INET6
)) {
9588 result
= ifnet_get_address_list_family(interface
, &addresses
, family
);
9590 NECPLOG(LOG_ERR
, "Failed to get address list for %s%d", ifnet_name(interface
), ifnet_unit(interface
));
9594 for (i
= 0; addresses
[i
] != NULL
; i
++) {
9595 ROUTE_RELEASE(new_route
);
9596 if (ifaddr_address(addresses
[i
], &address_storage
.sa
, sizeof(address_storage
)) == 0) {
9597 if (family
== AF_INET
) {
9598 struct ip
*ip
= mtod(packet
, struct ip
*);
9599 if (memcmp(&address_storage
.sin
.sin_addr
, &ip
->ip_src
, sizeof(ip
->ip_src
)) == 0) {
9600 struct sockaddr_in
*dst4
= (struct sockaddr_in
*)(void *)&new_route
->ro_dst
;
9601 dst4
->sin_family
= AF_INET
;
9602 dst4
->sin_len
= sizeof(struct sockaddr_in
);
9603 dst4
->sin_addr
= ip
->ip_dst
;
9604 rtalloc_scoped(new_route
, interface
->if_index
);
9605 if (!ROUTE_UNUSABLE(new_route
)) {
9610 } else if (family
== AF_INET6
) {
9611 struct ip6_hdr
*ip6
= mtod(packet
, struct ip6_hdr
*);
9612 if (memcmp(&address_storage
.sin6
.sin6_addr
, &ip6
->ip6_src
, sizeof(ip6
->ip6_src
)) == 0) {
9613 struct sockaddr_in6
*dst6
= (struct sockaddr_in6
*)(void *)&new_route
->ro_dst
;
9614 dst6
->sin6_family
= AF_INET6
;
9615 dst6
->sin6_len
= sizeof(struct sockaddr_in6
);
9616 dst6
->sin6_addr
= ip6
->ip6_dst
;
9617 rtalloc_scoped(new_route
, interface
->if_index
);
9618 if (!ROUTE_UNUSABLE(new_route
)) {
9628 ifnet_free_address_list(addresses
);
9634 necp_addr_is_loopback(struct sockaddr
*address
)
9636 if (address
== NULL
) {
9640 if (address
->sa_family
== AF_INET
) {
9641 return ntohl(((struct sockaddr_in
*)(void *)address
)->sin_addr
.s_addr
) == INADDR_LOOPBACK
;
9642 } else if (address
->sa_family
== AF_INET6
) {
9643 return IN6_IS_ADDR_LOOPBACK(&((struct sockaddr_in6
*)(void *)address
)->sin6_addr
);
9650 necp_is_loopback(struct sockaddr
*local_addr
, struct sockaddr
*remote_addr
, struct inpcb
*inp
, struct mbuf
*packet
, u_int32_t bound_interface_index
)
9652 // Note: This function only checks for the loopback addresses.
9653 // In the future, we may want to expand to also allow any traffic
9654 // going through the loopback interface, but until then, this
9655 // check is cheaper.
9657 if (local_addr
!= NULL
&& necp_addr_is_loopback(local_addr
)) {
9661 if (remote_addr
!= NULL
&& necp_addr_is_loopback(remote_addr
)) {
9666 if ((inp
->inp_flags
& INP_BOUND_IF
) && inp
->inp_boundifp
&& (inp
->inp_boundifp
->if_flags
& IFF_LOOPBACK
)) {
9669 if (inp
->inp_vflag
& INP_IPV4
) {
9670 if (ntohl(inp
->inp_laddr
.s_addr
) == INADDR_LOOPBACK
||
9671 ntohl(inp
->inp_faddr
.s_addr
) == INADDR_LOOPBACK
) {
9674 } else if (inp
->inp_vflag
& INP_IPV6
) {
9675 if (IN6_IS_ADDR_LOOPBACK(&inp
->in6p_laddr
) ||
9676 IN6_IS_ADDR_LOOPBACK(&inp
->in6p_faddr
)) {
9680 } else if (bound_interface_index
!= IFSCOPE_NONE
&& lo_ifp
->if_index
== bound_interface_index
) {
9684 if (packet
!= NULL
) {
9685 struct ip
*ip
= mtod(packet
, struct ip
*);
9686 if (ip
->ip_v
== 4) {
9687 if (ntohl(ip
->ip_src
.s_addr
) == INADDR_LOOPBACK
) {
9690 if (ntohl(ip
->ip_dst
.s_addr
) == INADDR_LOOPBACK
) {
9693 } else if (ip
->ip_v
== 6) {
9694 struct ip6_hdr
*ip6
= mtod(packet
, struct ip6_hdr
*);
9695 if (IN6_IS_ADDR_LOOPBACK(&ip6
->ip6_src
)) {
9698 if (IN6_IS_ADDR_LOOPBACK(&ip6
->ip6_dst
)) {
9708 necp_is_intcoproc(struct inpcb
*inp
, struct mbuf
*packet
)
9711 if (!(inp
->inp_vflag
& INP_IPV6
)) {
9714 if (INP_INTCOPROC_ALLOWED(inp
)) {
9717 if ((inp
->inp_flags
& INP_BOUND_IF
) &&
9718 IFNET_IS_INTCOPROC(inp
->inp_boundifp
)) {
9723 if (packet
!= NULL
) {
9724 struct ip6_hdr
*ip6
= mtod(packet
, struct ip6_hdr
*);
9725 if ((ip6
->ip6_vfc
& IPV6_VERSION_MASK
) == IPV6_VERSION
&&
9726 IN6_IS_ADDR_LINKLOCAL(&ip6
->ip6_dst
) &&
9727 ip6
->ip6_dst
.s6_addr32
[2] == ntohl(0xaede48ff) &&
9728 ip6
->ip6_dst
.s6_addr32
[3] == ntohl(0xfe334455)) {
9737 necp_address_matches_drop_dest_policy(union necp_sockaddr_union
*sau
, u_int32_t session_order
)
9739 char dest_str
[MAX_IPv6_STR_LEN
];
9741 if (necp_drop_dest_debug
> 0) {
9742 if (sau
->sa
.sa_family
== AF_INET
) {
9743 (void) inet_ntop(AF_INET
, &sau
->sin
.sin_addr
, dest_str
, sizeof(dest_str
));
9744 } else if (sau
->sa
.sa_family
== AF_INET6
) {
9745 (void) inet_ntop(AF_INET6
, &sau
->sin6
.sin6_addr
, dest_str
, sizeof(dest_str
));
9750 for (u_int32_t i
= 0; i
< necp_drop_dest_policy
.entry_count
; i
++) {
9751 struct necp_drop_dest_entry
*necp_drop_dest_entry
= &necp_drop_dest_policy
.entries
[i
];
9752 struct necp_policy_condition_addr
*npca
= &necp_drop_dest_entry
->cond_addr
;
9754 if (session_order
>= necp_drop_dest_entry
->order
&& necp_is_addr_in_subnet(&sau
->sa
, &npca
->address
.sa
, npca
->prefix
)) {
9755 if (necp_drop_dest_debug
> 0) {
9756 char subnet_str
[MAX_IPv6_STR_LEN
];
9757 struct proc
*p
= current_proc();
9758 pid_t pid
= proc_pid(p
);
9760 if (sau
->sa
.sa_family
== AF_INET
) {
9761 (void) inet_ntop(AF_INET
, &npca
->address
.sin
, subnet_str
, sizeof(subnet_str
));
9762 os_log(OS_LOG_DEFAULT
, "%s (process %s:%u) %s matches %s/%u", __func__
, proc_best_name(p
), pid
, dest_str
, subnet_str
, npca
->prefix
);
9763 } else if (sau
->sa
.sa_family
== AF_INET6
) {
9764 (void) inet_ntop(AF_INET6
, &npca
->address
.sin6
, subnet_str
, sizeof(subnet_str
));
9765 os_log(OS_LOG_DEFAULT
, "%s (process %s:%u) %s matches %s/%u", __func__
, proc_best_name(p
), pid
, dest_str
, subnet_str
, npca
->prefix
);
9771 if (necp_drop_dest_debug
> 1) {
9772 struct proc
*p
= current_proc();
9773 pid_t pid
= proc_pid(p
);
9775 os_log(OS_LOG_DEFAULT
, "%s (process %s:%u) %s no match", __func__
, proc_best_name(p
), pid
, dest_str
);
9781 sysctl_handle_necp_drop_dest_level SYSCTL_HANDLER_ARGS
9783 #pragma unused(arg1, arg2, oidp)
9786 struct necp_drop_dest_policy tmp_drop_dest_policy
;
9787 struct proc
*p
= current_proc();
9788 pid_t pid
= proc_pid(p
);
9790 if (req
->newptr
!= USER_ADDR_NULL
&& proc_suser(current_proc()) != 0 &&
9791 priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES
, 0) != 0) {
9792 NECPLOG(LOG_ERR
, "%s (process %s:%u) not permitted", __func__
, proc_best_name(p
), pid
);
9795 if (req
->newptr
!= USER_ADDR_NULL
&& req
->newlen
!= sizeof(struct necp_drop_dest_policy
)) {
9796 NECPLOG(LOG_ERR
, "%s (process %s:%u) bad newlen %lu", __func__
, proc_best_name(p
), pid
, req
->newlen
);
9800 memcpy(&tmp_drop_dest_policy
, &necp_drop_dest_policy
, sizeof(struct necp_drop_dest_policy
));
9801 error
= sysctl_io_opaque(req
, &tmp_drop_dest_policy
, sizeof(struct necp_drop_dest_policy
), &changed
);
9803 NECPLOG(LOG_ERR
, "%s (process %s:%u) sysctl_io_opaque() error %d", __func__
, proc_best_name(p
), pid
, error
);
9806 if (changed
== 0 || req
->newptr
== USER_ADDR_NULL
) {
9811 // Validate the passed parameters
9813 if (tmp_drop_dest_policy
.entry_count
>= MAX_NECP_DROP_DEST_LEVEL_ADDRS
) {
9814 NECPLOG(LOG_ERR
, "%s (process %s:%u) bad entry_count %u", __func__
, proc_best_name(p
), pid
, tmp_drop_dest_policy
.entry_count
);
9817 for (u_int32_t i
= 0; i
< tmp_drop_dest_policy
.entry_count
; i
++) {
9818 struct necp_drop_dest_entry
*tmp_drop_dest_entry
= &tmp_drop_dest_policy
.entries
[i
];
9819 struct necp_policy_condition_addr
*npca
= &tmp_drop_dest_entry
->cond_addr
;
9821 switch (tmp_drop_dest_entry
->level
) {
9822 case NECP_SESSION_PRIORITY_UNKNOWN
:
9823 if (tmp_drop_dest_policy
.entry_count
!= 0) {
9824 NECPLOG(LOG_ERR
, "%s (process %s:%u) NECP_SESSION_PRIORITY_UNKNOWN bad entry_count %u", __func__
, proc_best_name(p
), pid
, tmp_drop_dest_policy
.entry_count
);
9828 case NECP_SESSION_PRIORITY_CONTROL
:
9829 case NECP_SESSION_PRIORITY_PRIVILEGED_TUNNEL
:
9830 case NECP_SESSION_PRIORITY_HIGH
:
9831 case NECP_SESSION_PRIORITY_HIGH_1
:
9832 case NECP_SESSION_PRIORITY_HIGH_2
:
9833 case NECP_SESSION_PRIORITY_HIGH_3
:
9834 case NECP_SESSION_PRIORITY_HIGH_4
:
9835 case NECP_SESSION_PRIORITY_HIGH_RESTRICTED
:
9836 case NECP_SESSION_PRIORITY_DEFAULT
:
9837 case NECP_SESSION_PRIORITY_LOW
:
9838 if (tmp_drop_dest_policy
.entry_count
== 0) {
9839 NECPLOG(LOG_ERR
, "%s (process %s:%u) priority %u entry_count 0", __func__
, proc_best_name(p
), pid
, tmp_drop_dest_entry
->level
);
9844 NECPLOG(LOG_ERR
, "%s (process %s:%u) bad level %u", __func__
, proc_best_name(p
), pid
, tmp_drop_dest_entry
->level
);
9849 switch (npca
->address
.sa
.sa_family
) {
9851 if (npca
->prefix
> 32) {
9852 NECPLOG(LOG_ERR
, "%s (process %s:%u) AF_INET bad prefix %u", __func__
, proc_best_name(p
), pid
, npca
->prefix
);
9855 if (npca
->address
.sin
.sin_len
!= sizeof(struct sockaddr_in
)) {
9856 NECPLOG(LOG_ERR
, "%s (process %s:%u) AF_INET bad sin_len %u", __func__
, proc_best_name(p
), pid
, npca
->address
.sin
.sin_len
);
9859 if (npca
->address
.sin
.sin_port
!= 0) {
9860 NECPLOG(LOG_ERR
, "%s (process %s:%u) AF_INET bad sin_port %u, not zero", __func__
, proc_best_name(p
), pid
, npca
->address
.sin
.sin_port
);
9866 if (npca
->prefix
> 128) {
9867 NECPLOG(LOG_ERR
, "%s (process %s:%u) AF_INET6 bad prefix %u", __func__
, proc_best_name(p
), pid
, npca
->prefix
);
9870 if (npca
->address
.sin6
.sin6_len
!= sizeof(struct sockaddr_in6
)) {
9871 NECPLOG(LOG_ERR
, "%s (process %s:%u) AF_INET6 bad sin6_len %u", __func__
, proc_best_name(p
), pid
, npca
->address
.sin6
.sin6_len
);
9874 if (npca
->address
.sin6
.sin6_port
!= 0) {
9875 NECPLOG(LOG_ERR
, "%s (process %s:%u) AF_INET6 bad sin6_port %u, not zero", __func__
, proc_best_name(p
), pid
, npca
->address
.sin6
.sin6_port
);
9878 if (npca
->address
.sin6
.sin6_flowinfo
!= 0) {
9879 NECPLOG(LOG_ERR
, "%s (process %s:%u) AF_INET6 bad sin6_flowinfo %u, not zero", __func__
, proc_best_name(p
), pid
, npca
->address
.sin6
.sin6_flowinfo
);
9882 if (npca
->address
.sin6
.sin6_scope_id
!= 0) {
9883 NECPLOG(LOG_ERR
, "%s (process %s:%u) AF_INET6 bad sin6_scope_id %u, not zero", __func__
, proc_best_name(p
), pid
, npca
->address
.sin6
.sin6_scope_id
);
9895 // Commit the changed policy
9897 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
9898 memset(&necp_drop_dest_policy
, 0, sizeof(struct necp_drop_dest_policy
));
9900 necp_drop_dest_policy
.entry_count
= tmp_drop_dest_policy
.entry_count
;
9901 for (u_int32_t i
= 0; i
< tmp_drop_dest_policy
.entry_count
; i
++) {
9902 struct necp_drop_dest_entry
*tmp_drop_dest_entry
= &tmp_drop_dest_policy
.entries
[i
];
9903 struct necp_drop_dest_entry
*necp_drop_dest_entry
= &necp_drop_dest_policy
.entries
[i
];
9905 memcpy(necp_drop_dest_entry
, tmp_drop_dest_entry
, sizeof(struct necp_drop_dest_entry
));
9907 necp_drop_dest_entry
->order
= necp_get_first_order_for_priority(necp_drop_dest_entry
->level
);
9909 lck_rw_done(&necp_kernel_policy_lock
);