2 * Copyright (c) 2013-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <sys/systm.h>
31 #include <sys/types.h>
32 #include <sys/queue.h>
33 #include <sys/malloc.h>
34 #include <libkern/OSMalloc.h>
35 #include <sys/kernel.h>
36 #include <sys/kern_control.h>
38 #include <sys/kpi_mbuf.h>
39 #include <sys/proc_uuid_policy.h>
41 #include <sys/domain.h>
42 #include <sys/protosw.h>
43 #include <sys/socket.h>
44 #include <sys/socketvar.h>
45 #include <sys/coalition.h>
47 #include <sys/codesign.h>
48 #include <kern/cs_blobs.h>
49 #include <netinet/ip.h>
50 #include <netinet/ip6.h>
51 #include <netinet/tcp.h>
52 #include <netinet/tcp_var.h>
53 #include <netinet/tcp_cache.h>
54 #include <netinet/udp.h>
55 #include <netinet/in_pcb.h>
56 #include <netinet/in_tclass.h>
57 #include <netinet6/esp.h>
58 #include <net/flowhash.h>
59 #include <net/if_var.h>
60 #include <sys/kauth.h>
61 #include <sys/sysctl.h>
62 #include <sys/sysproto.h>
64 #include <sys/kern_event.h>
65 #include <sys/file_internal.h>
66 #include <IOKit/IOBSD.h>
67 #include <libkern/crypto/rand.h>
68 #include <corecrypto/cchmac.h>
69 #include <corecrypto/ccsha2.h>
70 #include <os/refcnt.h>
71 #include <net/network_agent.h>
75 * NECP - Network Extension Control Policy database
76 * ------------------------------------------------
77 * The goal of this module is to allow clients connecting via a
78 * policy file descriptor to create high-level policy sessions, which
79 * are ingested into low-level kernel policies that control and tag
80 * traffic at the application, socket, and IP layers.
82 * ------------------------------------------------
84 * ------------------------------------------------
85 * Each session owns a list of session policies, each of which can
86 * specify any combination of conditions and a single result. Each
87 * session also has a priority level (such as High, Default, or Low)
88 * which is requested by the client. Based on the requested level,
89 * a session order value is assigned to the session, which will be used
90 * to sort kernel policies generated by the session. The session client
91 * can specify the sub-order for each policy it creates which will be
92 * used to further sort the kernel policies.
94 * Policy fd --> 1 necp_session --> list of necp_session_policy structs
96 * ------------------------------------------------
98 * ------------------------------------------------
99 * Whenever a session send the Apply command, its policies are ingested
100 * and generate kernel policies. There are two phases of kernel policy
103 * 1. The session policy is parsed to create kernel policies at the socket
104 * and IP layers, when applicable. For example, a policy that requires
105 * all traffic from App1 to Pass will generate a socket kernel policy to
106 * match App1 and mark packets with ID1, and also an IP policy to match
107 * ID1 and let the packet pass. This is handled in necp_apply_policy. The
108 * resulting kernel policies are added to the global socket and IP layer
110 * necp_session_policy --> necp_kernel_socket_policy and necp_kernel_ip_output_policy
113 * necp_kernel_socket_policies necp_kernel_ip_output_policies
115 * 2. Once the global lists of kernel policies have been filled out, each
116 * list is traversed to create optimized sub-lists ("Maps") which are used during
117 * data-path evaluation. IP policies are sent into necp_kernel_ip_output_policies_map,
118 * which hashes incoming packets based on marked socket-layer policies, and removes
119 * duplicate or overlapping policies. Socket policies are sent into two maps,
120 * necp_kernel_socket_policies_map and necp_kernel_socket_policies_app_layer_map.
121 * The app layer map is used for policy checks coming in from user space, and is one
122 * list with duplicate and overlapping policies removed. The socket map hashes based
123 * on app UUID, and removes duplicate and overlapping policies.
124 * necp_kernel_socket_policy --> necp_kernel_socket_policies_app_layer_map
125 * |-> necp_kernel_socket_policies_map
127 * necp_kernel_ip_output_policies --> necp_kernel_ip_output_policies_map
129 * ------------------------------------------------
131 * ------------------------------------------------
132 * The Drop All Level is a sysctl that controls the level at which policies are allowed
133 * to override a global drop rule. If the value is 0, no drop rule is applied. If the value
134 * is 1, all traffic is dropped. If the value is greater than 1, all kernel policies created
135 * by a session with a priority level better than (numerically less than) the
136 * Drop All Level will allow matching traffic to not be dropped. The Drop All Level is
137 * dynamically interpreted into necp_drop_all_order, which specifies the equivalent assigned
138 * session orders to be dropped.
141 u_int32_t necp_drop_all_order
= 0;
142 u_int32_t necp_drop_all_level
= 0;
144 u_int32_t necp_pass_loopback
= 1; // 0=Off, 1=On
145 u_int32_t necp_pass_keepalives
= 1; // 0=Off, 1=On
146 u_int32_t necp_pass_interpose
= 1; // 0=Off, 1=On
148 u_int32_t necp_drop_unentitled_order
= 0;
149 #ifdef XNU_TARGET_OS_WATCH
150 u_int32_t necp_drop_unentitled_level
= NECP_SESSION_PRIORITY_CONTROL
+ 1; // Block all unentitled traffic from policies below control level
151 #else // XNU_TARGET_OS_WATCH
152 u_int32_t necp_drop_unentitled_level
= 0;
153 #endif // XNU_TARGET_OS_WATCH
155 u_int32_t necp_debug
= 0; // 0=None, 1=Basic, 2=EveryMatch
157 u_int32_t necp_session_count
= 0;
159 #define LIST_INSERT_SORTED_ASCENDING(head, elm, field, sortfield, tmpelm) do { \
160 if (LIST_EMPTY((head)) || (LIST_FIRST(head)->sortfield >= (elm)->sortfield)) { \
161 LIST_INSERT_HEAD((head), elm, field); \
163 LIST_FOREACH(tmpelm, head, field) { \
164 if (LIST_NEXT(tmpelm, field) == NULL || LIST_NEXT(tmpelm, field)->sortfield >= (elm)->sortfield) { \
165 LIST_INSERT_AFTER(tmpelm, elm, field); \
172 #define LIST_INSERT_SORTED_TWICE_ASCENDING(head, elm, field, firstsortfield, secondsortfield, tmpelm) do { \
173 if (LIST_EMPTY((head)) || (LIST_FIRST(head)->firstsortfield > (elm)->firstsortfield) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield >= (elm)->secondsortfield))) { \
174 LIST_INSERT_HEAD((head), elm, field); \
176 LIST_FOREACH(tmpelm, head, field) { \
177 if (LIST_NEXT(tmpelm, field) == NULL || (LIST_NEXT(tmpelm, field)->firstsortfield > (elm)->firstsortfield) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield >= (elm)->secondsortfield))) { \
178 LIST_INSERT_AFTER(tmpelm, elm, field); \
185 #define LIST_INSERT_SORTED_THRICE_ASCENDING(head, elm, field, firstsortfield, secondsortfield, thirdsortfield, tmpelm) do { \
186 if (LIST_EMPTY((head)) || (LIST_FIRST(head)->firstsortfield > (elm)->firstsortfield) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield >= (elm)->secondsortfield)) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield == (elm)->secondsortfield) && (LIST_FIRST(head)->thirdsortfield >= (elm)->thirdsortfield))) { \
187 LIST_INSERT_HEAD((head), elm, field); \
189 LIST_FOREACH(tmpelm, head, field) { \
190 if (LIST_NEXT(tmpelm, field) == NULL || (LIST_NEXT(tmpelm, field)->firstsortfield > (elm)->firstsortfield) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield >= (elm)->secondsortfield)) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield == (elm)->secondsortfield) && (LIST_NEXT(tmpelm, field)->thirdsortfield >= (elm)->thirdsortfield))) { \
191 LIST_INSERT_AFTER(tmpelm, elm, field); \
198 #define IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(x) ((x) == NECP_ROUTE_RULE_DENY_INTERFACE || (x) == NECP_ROUTE_RULE_ALLOW_INTERFACE)
200 #define IS_NECP_DEST_IN_LOCAL_NETWORKS(rt) \
201 ((rt) != NULL && !((rt)->rt_flags & RTF_GATEWAY) && ((rt)->rt_ifa && (rt)->rt_ifa->ifa_ifp && !((rt)->rt_ifa->ifa_ifp->if_flags & IFF_POINTOPOINT)))
203 #define NECP_KERNEL_CONDITION_ALL_INTERFACES 0x000001
204 #define NECP_KERNEL_CONDITION_BOUND_INTERFACE 0x000002
205 #define NECP_KERNEL_CONDITION_PROTOCOL 0x000004
206 #define NECP_KERNEL_CONDITION_LOCAL_START 0x000008
207 #define NECP_KERNEL_CONDITION_LOCAL_END 0x000010
208 #define NECP_KERNEL_CONDITION_LOCAL_PREFIX 0x000020
209 #define NECP_KERNEL_CONDITION_REMOTE_START 0x000040
210 #define NECP_KERNEL_CONDITION_REMOTE_END 0x000080
211 #define NECP_KERNEL_CONDITION_REMOTE_PREFIX 0x000100
212 #define NECP_KERNEL_CONDITION_APP_ID 0x000200
213 #define NECP_KERNEL_CONDITION_REAL_APP_ID 0x000400
214 #define NECP_KERNEL_CONDITION_DOMAIN 0x000800
215 #define NECP_KERNEL_CONDITION_ACCOUNT_ID 0x001000
216 #define NECP_KERNEL_CONDITION_POLICY_ID 0x002000
217 #define NECP_KERNEL_CONDITION_PID 0x004000
218 #define NECP_KERNEL_CONDITION_UID 0x008000
219 #define NECP_KERNEL_CONDITION_LAST_INTERFACE 0x010000 // Only set from packets looping between interfaces
220 #define NECP_KERNEL_CONDITION_TRAFFIC_CLASS 0x020000
221 #define NECP_KERNEL_CONDITION_ENTITLEMENT 0x040000
222 #define NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT 0x080000
223 #define NECP_KERNEL_CONDITION_AGENT_TYPE 0x100000
224 #define NECP_KERNEL_CONDITION_HAS_CLIENT 0x200000
225 #define NECP_KERNEL_CONDITION_LOCAL_NETWORKS 0x400000
226 #define NECP_KERNEL_CONDITION_CLIENT_FLAGS 0x800000
227 #define NECP_KERNEL_CONDITION_LOCAL_EMPTY 0x1000000
228 #define NECP_KERNEL_CONDITION_REMOTE_EMPTY 0x2000000
229 #define NECP_KERNEL_CONDITION_PLATFORM_BINARY 0x4000000
231 #define NECP_MAX_POLICY_RESULT_SIZE 512
232 #define NECP_MAX_ROUTE_RULES_ARRAY_SIZE 1024
233 #define NECP_MAX_CONDITIONS_ARRAY_SIZE 4096
234 #define NECP_MAX_POLICY_LIST_COUNT 1024
236 // Cap the policy size at the max result + conditions size, with room for extra TLVs
237 #define NECP_MAX_POLICY_SIZE (1024 + NECP_MAX_POLICY_RESULT_SIZE + NECP_MAX_CONDITIONS_ARRAY_SIZE)
239 struct necp_service_registration
{
240 LIST_ENTRY(necp_service_registration
) session_chain
;
241 LIST_ENTRY(necp_service_registration
) kernel_chain
;
242 u_int32_t service_id
;
245 struct necp_session
{
246 u_int8_t necp_fd_type
;
247 u_int32_t control_unit
;
248 u_int32_t session_priority
; // Descriptive priority rating
249 u_int32_t session_order
;
251 necp_policy_id last_policy_id
;
253 decl_lck_mtx_data(, lock
);
255 bool proc_locked
; // Messages must come from proc_uuid
260 LIST_HEAD(_policies
, necp_session_policy
) policies
;
262 LIST_HEAD(_services
, necp_service_registration
) services
;
264 TAILQ_ENTRY(necp_session
) chain
;
267 #define NECP_SESSION_LOCK(_s) lck_mtx_lock(&_s->lock)
268 #define NECP_SESSION_UNLOCK(_s) lck_mtx_unlock(&_s->lock)
270 static TAILQ_HEAD(_necp_session_list
, necp_session
) necp_session_list
;
272 struct necp_socket_info
{
275 union necp_sockaddr_union local_addr
;
276 union necp_sockaddr_union remote_addr
;
277 u_int32_t bound_interface_index
;
278 u_int32_t traffic_class
;
280 u_int32_t application_id
;
281 u_int32_t real_application_id
;
282 u_int32_t account_id
;
283 u_int32_t drop_order
;
284 u_int32_t client_flags
;
287 unsigned has_client
: 1;
288 unsigned is_platform_binary
: 1;
289 unsigned __pad_bits
: 6;
292 static lck_grp_attr_t
*necp_kernel_policy_grp_attr
= NULL
;
293 static lck_attr_t
*necp_kernel_policy_mtx_attr
= NULL
;
294 static lck_grp_t
*necp_kernel_policy_mtx_grp
= NULL
;
295 decl_lck_rw_data(static, necp_kernel_policy_lock
);
297 static lck_grp_attr_t
*necp_route_rule_grp_attr
= NULL
;
298 static lck_attr_t
*necp_route_rule_mtx_attr
= NULL
;
299 static lck_grp_t
*necp_route_rule_mtx_grp
= NULL
;
300 decl_lck_rw_data(static, necp_route_rule_lock
);
302 os_refgrp_decl(static, necp_refgrp
, "NECPRefGroup", NULL
);
305 * On modification, invalidate cached lookups by bumping the generation count.
306 * Other calls will need to take the slowpath of taking
307 * the subsystem lock.
309 static volatile int32_t necp_kernel_socket_policies_gencount
;
310 #define BUMP_KERNEL_SOCKET_POLICIES_GENERATION_COUNT() do { \
311 if (OSIncrementAtomic(&necp_kernel_socket_policies_gencount) == (INT32_MAX - 1)) { \
312 necp_kernel_socket_policies_gencount = 1; \
318 * Allow priviledged processes to bypass the default drop-all
319 * via entitlement check. For OSX, since entitlement check is
320 * not supported for configd, configd signing identity is checked
323 #define SIGNING_ID_CONFIGD "com.apple.configd"
324 #define SIGNING_ID_CONFIGD_LEN (sizeof(SIGNING_ID_CONFIGD) - 1)
327 NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
= 0,
328 NECP_DROP_ALL_BYPASS_CHECK_RESULT_TRUE
= 1,
329 NECP_DROP_ALL_BYPASS_CHECK_RESULT_FALSE
= 2,
330 } necp_drop_all_bypass_check_result_t
;
332 static u_int32_t necp_kernel_application_policies_condition_mask
;
333 static size_t necp_kernel_application_policies_count
;
334 static u_int32_t necp_kernel_socket_policies_condition_mask
;
335 static size_t necp_kernel_socket_policies_count
;
336 static size_t necp_kernel_socket_policies_non_app_count
;
337 static LIST_HEAD(_necpkernelsocketconnectpolicies
, necp_kernel_socket_policy
) necp_kernel_socket_policies
;
338 #define NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS 5
339 #define NECP_SOCKET_MAP_APP_ID_TO_BUCKET(appid) (appid ? (appid%(NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS - 1) + 1) : 0)
340 static struct necp_kernel_socket_policy
**necp_kernel_socket_policies_map
[NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
];
341 static struct necp_kernel_socket_policy
**necp_kernel_socket_policies_app_layer_map
;
343 * A note on policy 'maps': these are used for boosting efficiency when matching policies. For each dimension of the map,
344 * such as an ID, the 0 bucket is reserved for sockets/packets that do not have this parameter, while the other
345 * buckets lead to an array of policy pointers that form the list applicable when the (parameter%(NUM_BUCKETS - 1) + 1) == bucket_index.
347 * For example, a packet with policy ID of 7, when there are 4 ID buckets, will map to bucket (7%3 + 1) = 2.
350 static u_int32_t necp_kernel_ip_output_policies_condition_mask
;
351 static size_t necp_kernel_ip_output_policies_count
;
352 static size_t necp_kernel_ip_output_policies_non_id_count
;
353 static LIST_HEAD(_necpkernelipoutputpolicies
, necp_kernel_ip_output_policy
) necp_kernel_ip_output_policies
;
354 #define NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS 5
355 #define NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(id) (id ? (id%(NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS - 1) + 1) : 0)
356 static struct necp_kernel_ip_output_policy
**necp_kernel_ip_output_policies_map
[NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
];
357 static struct necp_kernel_socket_policy pass_policy
=
359 .id
= NECP_KERNEL_POLICY_ID_NO_MATCH
,
360 .result
= NECP_KERNEL_POLICY_RESULT_PASS
,
363 static struct necp_session
*necp_create_session(void);
364 static void necp_delete_session(struct necp_session
*session
);
366 static necp_policy_id
necp_handle_policy_add(struct necp_session
*session
,
367 u_int8_t
*tlv_buffer
, size_t tlv_buffer_length
, int offset
, int *error
);
368 static int necp_handle_policy_dump_all(user_addr_t out_buffer
, size_t out_buffer_length
);
370 #define MAX_RESULT_STRING_LEN 64
371 static inline const char * necp_get_result_description(char *result_string
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
);
373 static struct necp_session_policy
*necp_policy_create(struct necp_session
*session
, necp_policy_order order
, u_int8_t
*conditions_array
, u_int32_t conditions_array_size
, u_int8_t
*route_rules_array
, u_int32_t route_rules_array_size
, u_int8_t
*result
, u_int32_t result_size
);
374 static struct necp_session_policy
*necp_policy_find(struct necp_session
*session
, necp_policy_id policy_id
);
375 static bool necp_policy_mark_for_deletion(struct necp_session
*session
, struct necp_session_policy
*policy
);
376 static bool necp_policy_mark_all_for_deletion(struct necp_session
*session
);
377 static bool necp_policy_delete(struct necp_session
*session
, struct necp_session_policy
*policy
);
378 static void necp_policy_apply_all(struct necp_session
*session
);
380 static necp_kernel_policy_id
necp_kernel_socket_policy_add(necp_policy_order order
, u_int32_t session_order
, int session_pid
, u_int32_t condition_mask
, u_int32_t condition_negated_mask
, necp_app_id cond_app_id
, necp_app_id cond_real_app_id
, char *cond_custom_entitlement
, u_int32_t cond_account_id
, char *domain
, pid_t cond_pid
, uid_t cond_uid
, ifnet_t cond_bound_interface
, struct necp_policy_condition_tc_range cond_traffic_class
, u_int16_t cond_protocol
, union necp_sockaddr_union
*cond_local_start
, union necp_sockaddr_union
*cond_local_end
, u_int8_t cond_local_prefix
, union necp_sockaddr_union
*cond_remote_start
, union necp_sockaddr_union
*cond_remote_end
, u_int8_t cond_remote_prefix
, struct necp_policy_condition_agent_type
*cond_agent_type
, u_int32_t cond_client_flags
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
);
381 static bool necp_kernel_socket_policy_delete(necp_kernel_policy_id policy_id
);
382 static bool necp_kernel_socket_policies_reprocess(void);
383 static bool necp_kernel_socket_policies_update_uuid_table(void);
384 static inline struct necp_kernel_socket_policy
*necp_socket_find_policy_match_with_info_locked(struct necp_kernel_socket_policy
**policy_search_array
, struct necp_socket_info
*info
, necp_kernel_policy_filter
*return_filter
, u_int32_t
*return_route_rule_id_array
, size_t *return_route_rule_id_array_count
, size_t route_rule_id_array_count
, necp_kernel_policy_result
*return_service_action
, necp_kernel_policy_service
*return_service
, u_int32_t
*return_netagent_array
, u_int32_t
*return_netagent_use_flags_array
, size_t netagent_array_count
, struct necp_client_parameter_netagent_type
*required_agent_types
, u_int32_t num_required_agent_types
, proc_t proc
, necp_kernel_policy_id
*skip_policy_id
, struct rtentry
*rt
, necp_kernel_policy_result
*return_drop_dest_policy_result
, necp_drop_all_bypass_check_result_t
*return_drop_all_bypass
);
386 static necp_kernel_policy_id
necp_kernel_ip_output_policy_add(necp_policy_order order
, necp_policy_order suborder
, u_int32_t session_order
, int session_pid
, u_int32_t condition_mask
, u_int32_t condition_negated_mask
, necp_kernel_policy_id cond_policy_id
, ifnet_t cond_bound_interface
, u_int32_t cond_last_interface_index
, u_int16_t cond_protocol
, union necp_sockaddr_union
*cond_local_start
, union necp_sockaddr_union
*cond_local_end
, u_int8_t cond_local_prefix
, union necp_sockaddr_union
*cond_remote_start
, union necp_sockaddr_union
*cond_remote_end
, u_int8_t cond_remote_prefix
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
);
387 static bool necp_kernel_ip_output_policy_delete(necp_kernel_policy_id policy_id
);
388 static bool necp_kernel_ip_output_policies_reprocess(void);
390 static bool necp_is_addr_in_range(struct sockaddr
*addr
, struct sockaddr
*range_start
, struct sockaddr
*range_end
);
391 static bool necp_is_range_in_range(struct sockaddr
*inner_range_start
, struct sockaddr
*inner_range_end
, struct sockaddr
*range_start
, struct sockaddr
*range_end
);
392 static bool necp_is_addr_in_subnet(struct sockaddr
*addr
, struct sockaddr
*subnet_addr
, u_int8_t subnet_prefix
);
393 static int necp_addr_compare(struct sockaddr
*sa1
, struct sockaddr
*sa2
, int check_port
);
394 static bool necp_buffer_compare_with_bit_prefix(u_int8_t
*p1
, u_int8_t
*p2
, u_int32_t bits
);
395 static bool necp_addr_is_empty(struct sockaddr
*addr
);
396 static bool necp_is_loopback(struct sockaddr
*local_addr
, struct sockaddr
*remote_addr
, struct inpcb
*inp
, struct mbuf
*packet
, u_int32_t bound_interface_index
);
397 static bool necp_is_intcoproc(struct inpcb
*inp
, struct mbuf
*packet
);
399 struct necp_uuid_id_mapping
{
400 LIST_ENTRY(necp_uuid_id_mapping
) chain
;
403 os_refcnt_t refcount
;
404 u_int32_t table_usecount
; // Add to UUID policy table count
406 static size_t necp_num_uuid_app_id_mappings
;
407 static bool necp_uuid_app_id_mappings_dirty
;
408 #define NECP_UUID_APP_ID_HASH_SIZE 64
409 static u_long necp_uuid_app_id_hash_mask
;
410 static u_long necp_uuid_app_id_hash_num_buckets
;
411 static LIST_HEAD(necp_uuid_id_mapping_head
, necp_uuid_id_mapping
) * necp_uuid_app_id_hashtbl
, necp_uuid_service_id_list
; // App map is real hash table, service map is just mapping
412 #define APPUUIDHASH(uuid) (&necp_uuid_app_id_hashtbl[uuid[0] & necp_uuid_app_id_hash_mask]) // Assume first byte of UUIDs are evenly distributed
413 static u_int32_t
necp_create_uuid_app_id_mapping(uuid_t uuid
, bool *allocated_mapping
, bool uuid_policy_table
);
414 static bool necp_remove_uuid_app_id_mapping(uuid_t uuid
, bool *removed_mapping
, bool uuid_policy_table
);
415 static struct necp_uuid_id_mapping
*necp_uuid_lookup_uuid_with_app_id_locked(u_int32_t local_id
);
417 static struct necp_uuid_id_mapping
*necp_uuid_lookup_service_id_locked(uuid_t uuid
);
418 static struct necp_uuid_id_mapping
*necp_uuid_lookup_uuid_with_service_id_locked(u_int32_t local_id
);
419 static u_int32_t
necp_create_uuid_service_id_mapping(uuid_t uuid
);
420 static bool necp_remove_uuid_service_id_mapping(uuid_t uuid
);
422 struct necp_string_id_mapping
{
423 LIST_ENTRY(necp_string_id_mapping
) chain
;
426 os_refcnt_t refcount
;
428 static LIST_HEAD(necp_string_id_mapping_list
, necp_string_id_mapping
) necp_account_id_list
;
429 static u_int32_t
necp_create_string_to_id_mapping(struct necp_string_id_mapping_list
*list
, char *domain
);
430 static bool necp_remove_string_to_id_mapping(struct necp_string_id_mapping_list
*list
, char *domain
);
431 static struct necp_string_id_mapping
*necp_lookup_string_with_id_locked(struct necp_string_id_mapping_list
*list
, u_int32_t local_id
);
433 static struct necp_kernel_socket_policy
*necp_kernel_socket_policy_find(necp_kernel_policy_id policy_id
);
434 static struct necp_kernel_ip_output_policy
*necp_kernel_ip_output_policy_find(necp_kernel_policy_id policy_id
);
436 static LIST_HEAD(_necp_kernel_service_list
, necp_service_registration
) necp_registered_service_list
;
438 static char *necp_create_trimmed_domain(char *string
, size_t length
);
439 static inline int necp_count_dots(char *string
, size_t length
);
441 static char *necp_copy_string(char *string
, size_t length
);
442 static bool necp_update_qos_marking(struct ifnet
*ifp
, u_int32_t route_rule_id
);
444 #define ROUTE_RULE_IS_AGGREGATE(ruleid) (ruleid > UINT16_MAX)
446 #define MAX_ROUTE_RULE_INTERFACES 10
447 struct necp_route_rule
{
448 LIST_ENTRY(necp_route_rule
) chain
;
450 u_int32_t default_action
;
451 u_int8_t cellular_action
;
452 u_int8_t wifi_action
;
453 u_int8_t wired_action
;
454 u_int8_t expensive_action
;
455 u_int8_t constrained_action
;
456 u_int exception_if_indices
[MAX_ROUTE_RULE_INTERFACES
];
457 u_int8_t exception_if_actions
[MAX_ROUTE_RULE_INTERFACES
];
458 os_refcnt_t refcount
;
460 static LIST_HEAD(necp_route_rule_list
, necp_route_rule
) necp_route_rules
;
461 static u_int32_t
necp_create_route_rule(struct necp_route_rule_list
*list
, u_int8_t
*route_rules_array
, u_int32_t route_rules_array_size
);
462 static bool necp_remove_route_rule(struct necp_route_rule_list
*list
, u_int32_t route_rule_id
);
463 static bool necp_route_is_allowed(struct rtentry
*route
, ifnet_t interface
, u_int32_t route_rule_id
, u_int32_t
*interface_type_denied
);
464 static struct necp_route_rule
*necp_lookup_route_rule_locked(struct necp_route_rule_list
*list
, u_int32_t route_rule_id
);
465 static inline void necp_get_parent_cred_result(proc_t proc
, struct necp_socket_info
*info
);
467 #define MAX_AGGREGATE_ROUTE_RULES 16
468 struct necp_aggregate_route_rule
{
469 LIST_ENTRY(necp_aggregate_route_rule
) chain
;
471 u_int32_t rule_ids
[MAX_AGGREGATE_ROUTE_RULES
];
473 static LIST_HEAD(necp_aggregate_route_rule_list
, necp_aggregate_route_rule
) necp_aggregate_route_rules
;
474 static u_int32_t
necp_create_aggregate_route_rule(u_int32_t
*rule_ids
);
476 // Sysctl definitions
477 static int sysctl_handle_necp_level SYSCTL_HANDLER_ARGS
;
478 static int sysctl_handle_necp_unentitled_level SYSCTL_HANDLER_ARGS
;
480 SYSCTL_NODE(_net
, OID_AUTO
, necp
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "NECP");
481 SYSCTL_INT(_net_necp
, NECPCTL_PASS_LOOPBACK
, pass_loopback
, CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_pass_loopback
, 0, "");
482 SYSCTL_INT(_net_necp
, NECPCTL_PASS_KEEPALIVES
, pass_keepalives
, CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_pass_keepalives
, 0, "");
483 SYSCTL_INT(_net_necp
, NECPCTL_PASS_INTERPOSE
, pass_interpose
, CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_pass_interpose
, 0, "");
484 SYSCTL_INT(_net_necp
, NECPCTL_DEBUG
, debug
, CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_debug
, 0, "");
485 SYSCTL_PROC(_net_necp
, NECPCTL_DROP_UNENTITLED_LEVEL
, drop_unentitled_level
, CTLTYPE_INT
| CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_drop_unentitled_level
, 0, &sysctl_handle_necp_unentitled_level
, "IU", "");
486 SYSCTL_PROC(_net_necp
, NECPCTL_DROP_ALL_LEVEL
, drop_all_level
, CTLTYPE_INT
| CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_drop_all_level
, 0, &sysctl_handle_necp_level
, "IU", "");
487 SYSCTL_LONG(_net_necp
, NECPCTL_SOCKET_POLICY_COUNT
, socket_policy_count
, CTLFLAG_LOCKED
| CTLFLAG_RD
, &necp_kernel_socket_policies_count
, "");
488 SYSCTL_LONG(_net_necp
, NECPCTL_SOCKET_NON_APP_POLICY_COUNT
, socket_non_app_policy_count
, CTLFLAG_LOCKED
| CTLFLAG_RD
, &necp_kernel_socket_policies_non_app_count
, "");
489 SYSCTL_LONG(_net_necp
, NECPCTL_IP_POLICY_COUNT
, ip_policy_count
, CTLFLAG_LOCKED
| CTLFLAG_RD
, &necp_kernel_ip_output_policies_count
, "");
490 SYSCTL_INT(_net_necp
, NECPCTL_SESSION_COUNT
, session_count
, CTLFLAG_LOCKED
| CTLFLAG_RD
, &necp_session_count
, 0, "");
492 static struct necp_drop_dest_policy necp_drop_dest_policy
;
493 static int necp_drop_dest_debug
= 0; // 0: off, 1: match, >1: every evaluation
494 SYSCTL_INT(_net_necp
, OID_AUTO
, drop_dest_debug
, CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_drop_dest_debug
, 0, "");
496 static int sysctl_handle_necp_drop_dest_level SYSCTL_HANDLER_ARGS
;
497 SYSCTL_PROC(_net_necp
, OID_AUTO
, drop_dest_level
, CTLTYPE_STRUCT
| CTLFLAG_LOCKED
| CTLFLAG_ANYBODY
| CTLFLAG_RW
,
498 0, 0, &sysctl_handle_necp_drop_dest_level
, "S,necp_drop_dest_level", "");
500 static bool necp_address_matches_drop_dest_policy(union necp_sockaddr_union
*, u_int32_t
);
502 // Session order allocation
504 necp_allocate_new_session_order(u_int32_t priority
, u_int32_t control_unit
)
506 u_int32_t new_order
= 0;
508 // For now, just allocate 1000 orders for each priority
509 if (priority
== NECP_SESSION_PRIORITY_UNKNOWN
|| priority
> NECP_SESSION_NUM_PRIORITIES
) {
510 priority
= NECP_SESSION_PRIORITY_DEFAULT
;
513 // Use the control unit to decide the offset into the priority list
514 new_order
= (control_unit
) + ((priority
- 1) * 1000);
519 static inline u_int32_t
520 necp_get_first_order_for_priority(u_int32_t priority
)
525 return ((priority
- 1) * 1000) + 1;
530 sysctl_handle_necp_level SYSCTL_HANDLER_ARGS
532 #pragma unused(arg1, arg2)
533 int error
= sysctl_handle_int(oidp
, oidp
->oid_arg1
, oidp
->oid_arg2
, req
);
534 necp_drop_all_order
= necp_get_first_order_for_priority(necp_drop_all_level
);
539 sysctl_handle_necp_unentitled_level SYSCTL_HANDLER_ARGS
541 #pragma unused(arg1, arg2)
542 int error
= sysctl_handle_int(oidp
, oidp
->oid_arg1
, oidp
->oid_arg2
, req
);
543 necp_drop_unentitled_order
= necp_get_first_order_for_priority(necp_drop_unentitled_level
);
547 // Use a macro here to avoid computing the kauth_cred_t when necp_drop_unentitled_level is 0
548 static inline u_int32_t
549 _necp_process_drop_order_inner(kauth_cred_t cred
)
551 if (priv_check_cred(cred
, PRIV_NET_PRIVILEGED_CLIENT_ACCESS
, 0) != 0 &&
552 priv_check_cred(cred
, PRIV_NET_PRIVILEGED_SERVER_ACCESS
, 0) != 0) {
553 return necp_drop_unentitled_order
;
559 #define necp_process_drop_order(_cred) (necp_drop_unentitled_order != 0 ? _necp_process_drop_order_inner(_cred) : necp_drop_unentitled_order)
560 #pragma GCC poison _necp_process_drop_order_inner
564 static int necp_session_op_close(struct fileglob
*, vfs_context_t
);
566 static const struct fileops necp_session_fd_ops
= {
567 .fo_type
= DTYPE_NETPOLICY
,
568 .fo_read
= fo_no_read
,
569 .fo_write
= fo_no_write
,
570 .fo_ioctl
= fo_no_ioctl
,
571 .fo_select
= fo_no_select
,
572 .fo_close
= necp_session_op_close
,
573 .fo_drain
= fo_no_drain
,
574 .fo_kqfilter
= fo_no_kqfilter
,
577 static inline necp_drop_all_bypass_check_result_t
578 necp_check_drop_all_bypass_result(proc_t proc
)
581 proc
= current_proc();
583 return NECP_DROP_ALL_BYPASS_CHECK_RESULT_FALSE
;
587 #if defined(XNU_TARGET_OS_OSX)
588 const char *signing_id
= NULL
;
589 const bool isConfigd
= (csproc_get_platform_binary(proc
) &&
590 (signing_id
= cs_identity_get(proc
)) &&
591 (strlen(signing_id
) == SIGNING_ID_CONFIGD_LEN
) &&
592 (memcmp(signing_id
, SIGNING_ID_CONFIGD
, SIGNING_ID_CONFIGD_LEN
) == 0));
594 return NECP_DROP_ALL_BYPASS_CHECK_RESULT_TRUE
;
598 const task_t task
= proc_task(proc
);
599 if (task
== NULL
|| !IOTaskHasEntitlement(task
, "com.apple.private.necp.drop_all_bypass")) {
600 return NECP_DROP_ALL_BYPASS_CHECK_RESULT_FALSE
;
602 return NECP_DROP_ALL_BYPASS_CHECK_RESULT_TRUE
;
607 necp_session_open(struct proc
*p
, struct necp_session_open_args
*uap
, int *retval
)
611 struct necp_session
*session
= NULL
;
612 struct fileproc
*fp
= NULL
;
615 uid_t uid
= kauth_cred_getuid(proc_ucred(p
));
616 if (uid
!= 0 && priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES
, 0) != 0) {
617 NECPLOG0(LOG_ERR
, "Process does not hold necessary entitlement to open NECP session");
622 error
= falloc(p
, &fp
, &fd
, vfs_context_current());
627 session
= necp_create_session();
628 if (session
== NULL
) {
633 fp
->f_fglob
->fg_flag
= 0;
634 fp
->f_fglob
->fg_ops
= &necp_session_fd_ops
;
635 fp
->f_fglob
->fg_data
= session
;
638 FDFLAGS_SET(p
, fd
, (UF_EXCLOSE
| UF_FORKCLOSE
));
639 procfdtbl_releasefd(p
, fd
, NULL
);
640 fp_drop(p
, fd
, fp
, 1);
656 necp_session_op_close(struct fileglob
*fg
, vfs_context_t ctx
)
659 struct necp_session
*session
= (struct necp_session
*)fg
->fg_data
;
662 if (session
!= NULL
) {
663 necp_policy_mark_all_for_deletion(session
);
664 necp_policy_apply_all(session
);
665 necp_delete_session(session
);
673 necp_session_find_from_fd(int fd
, struct necp_session
**session
)
675 proc_t p
= current_proc();
676 struct fileproc
*fp
= NULL
;
680 if ((error
= fp_lookup(p
, fd
, &fp
, 1)) != 0) {
683 if (fp
->f_fglob
->fg_ops
->fo_type
!= DTYPE_NETPOLICY
) {
684 fp_drop(p
, fd
, fp
, 1);
688 *session
= (struct necp_session
*)fp
->f_fglob
->fg_data
;
690 if ((*session
)->necp_fd_type
!= necp_fd_type_session
) {
691 // Not a client fd, ignore
692 fp_drop(p
, fd
, fp
, 1);
703 necp_session_add_policy(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
706 u_int8_t
*tlv_buffer
= NULL
;
708 if (uap
->in_buffer_length
== 0 || uap
->in_buffer_length
> NECP_MAX_POLICY_SIZE
|| uap
->in_buffer
== 0) {
709 NECPLOG(LOG_ERR
, "necp_session_add_policy invalid input (%zu)", uap
->in_buffer_length
);
714 if (uap
->out_buffer_length
< sizeof(necp_policy_id
) || uap
->out_buffer
== 0) {
715 NECPLOG(LOG_ERR
, "necp_session_add_policy invalid output buffer (%zu)", uap
->out_buffer_length
);
720 if ((tlv_buffer
= _MALLOC(uap
->in_buffer_length
, M_NECP
, M_WAITOK
| M_ZERO
)) == NULL
) {
725 error
= copyin(uap
->in_buffer
, tlv_buffer
, uap
->in_buffer_length
);
727 NECPLOG(LOG_ERR
, "necp_session_add_policy tlv copyin error (%d)", error
);
731 necp_policy_id new_policy_id
= necp_handle_policy_add(session
, tlv_buffer
, uap
->in_buffer_length
, 0, &error
);
733 NECPLOG(LOG_ERR
, "necp_session_add_policy failed to add policy (%d)", error
);
737 error
= copyout(&new_policy_id
, uap
->out_buffer
, sizeof(new_policy_id
));
739 NECPLOG(LOG_ERR
, "necp_session_add_policy policy_id copyout error (%d)", error
);
744 if (tlv_buffer
!= NULL
) {
745 FREE(tlv_buffer
, M_NECP
);
754 necp_session_get_policy(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
757 u_int8_t
*response
= NULL
;
759 if (uap
->in_buffer_length
< sizeof(necp_policy_id
) || uap
->in_buffer
== 0) {
760 NECPLOG(LOG_ERR
, "necp_session_get_policy invalid input (%zu)", uap
->in_buffer_length
);
765 necp_policy_id policy_id
= 0;
766 error
= copyin(uap
->in_buffer
, &policy_id
, sizeof(policy_id
));
768 NECPLOG(LOG_ERR
, "necp_session_get_policy policy_id copyin error (%d)", error
);
772 struct necp_session_policy
*policy
= necp_policy_find(session
, policy_id
);
773 if (policy
== NULL
|| policy
->pending_deletion
) {
774 NECPLOG(LOG_ERR
, "Failed to find policy with id %d", policy_id
);
779 u_int32_t order_tlv_size
= sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(necp_policy_order
);
780 u_int32_t result_tlv_size
= (policy
->result_size
? (sizeof(u_int8_t
) + sizeof(u_int32_t
) + policy
->result_size
) : 0);
781 u_int32_t response_size
= order_tlv_size
+ result_tlv_size
+ policy
->conditions_size
;
783 if (uap
->out_buffer_length
< response_size
|| uap
->out_buffer
== 0) {
784 NECPLOG(LOG_ERR
, "necp_session_get_policy buffer not large enough (%u < %u)", uap
->out_buffer_length
, response_size
);
789 if (response_size
> NECP_MAX_POLICY_SIZE
) {
790 NECPLOG(LOG_ERR
, "necp_session_get_policy size too large to copy (%u)", response_size
);
795 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
| M_ZERO
);
796 if (response
== NULL
) {
801 u_int8_t
*cursor
= response
;
802 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ORDER
, sizeof(necp_policy_order
), &policy
->order
, response
, response_size
);
803 if (result_tlv_size
) {
804 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_RESULT
, policy
->result_size
, &policy
->result
, response
, response_size
);
806 if (policy
->conditions_size
) {
807 memcpy(((u_int8_t
*)(void *)(cursor
)), policy
->conditions
, policy
->conditions_size
);
810 error
= copyout(response
, uap
->out_buffer
, response_size
);
812 NECPLOG(LOG_ERR
, "necp_session_get_policy TLV copyout error (%d)", error
);
817 if (response
!= NULL
) {
818 FREE(response
, M_NECP
);
827 necp_session_delete_policy(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
831 if (uap
->in_buffer_length
< sizeof(necp_policy_id
) || uap
->in_buffer
== 0) {
832 NECPLOG(LOG_ERR
, "necp_session_delete_policy invalid input (%zu)", uap
->in_buffer_length
);
837 necp_policy_id delete_policy_id
= 0;
838 error
= copyin(uap
->in_buffer
, &delete_policy_id
, sizeof(delete_policy_id
));
840 NECPLOG(LOG_ERR
, "necp_session_delete_policy policy_id copyin error (%d)", error
);
844 struct necp_session_policy
*policy
= necp_policy_find(session
, delete_policy_id
);
845 if (policy
== NULL
|| policy
->pending_deletion
) {
846 NECPLOG(LOG_ERR
, "necp_session_delete_policy failed to find policy with id %u", delete_policy_id
);
851 necp_policy_mark_for_deletion(session
, policy
);
858 necp_session_apply_all(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
861 necp_policy_apply_all(session
);
867 necp_session_list_all(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
869 u_int32_t tlv_size
= (sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(necp_policy_id
));
870 u_int32_t response_size
= 0;
871 u_int8_t
*response
= NULL
;
872 int num_policies
= 0;
873 int cur_policy_index
= 0;
875 struct necp_session_policy
*policy
;
877 LIST_FOREACH(policy
, &session
->policies
, chain
) {
878 if (!policy
->pending_deletion
) {
883 if (num_policies
> NECP_MAX_POLICY_LIST_COUNT
) {
884 NECPLOG(LOG_ERR
, "necp_session_list_all size too large to copy (%u policies)", num_policies
);
889 response_size
= num_policies
* tlv_size
;
890 if (uap
->out_buffer_length
< response_size
|| uap
->out_buffer
== 0) {
891 NECPLOG(LOG_ERR
, "necp_session_list_all buffer not large enough (%u < %u)", uap
->out_buffer_length
, response_size
);
896 // Create a response with one Policy ID TLV for each policy
897 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
| M_ZERO
);
898 if (response
== NULL
) {
903 u_int8_t
*cursor
= response
;
904 LIST_FOREACH(policy
, &session
->policies
, chain
) {
905 if (!policy
->pending_deletion
&& cur_policy_index
< num_policies
) {
906 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ID
, sizeof(u_int32_t
), &policy
->local_id
, response
, response_size
);
911 error
= copyout(response
, uap
->out_buffer
, response_size
);
913 NECPLOG(LOG_ERR
, "necp_session_list_all TLV copyout error (%d)", error
);
918 if (response
!= NULL
) {
919 FREE(response
, M_NECP
);
929 necp_session_delete_all(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
932 necp_policy_mark_all_for_deletion(session
);
938 necp_session_set_session_priority(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
941 struct necp_session_policy
*policy
= NULL
;
942 struct necp_session_policy
*temp_policy
= NULL
;
944 if (uap
->in_buffer_length
< sizeof(necp_session_priority
) || uap
->in_buffer
== 0) {
945 NECPLOG(LOG_ERR
, "necp_session_set_session_priority invalid input (%zu)", uap
->in_buffer_length
);
950 necp_session_priority requested_session_priority
= 0;
951 error
= copyin(uap
->in_buffer
, &requested_session_priority
, sizeof(requested_session_priority
));
953 NECPLOG(LOG_ERR
, "necp_session_set_session_priority priority copyin error (%d)", error
);
957 // Enforce special session priorities with entitlements
958 if (requested_session_priority
== NECP_SESSION_PRIORITY_CONTROL
||
959 requested_session_priority
== NECP_SESSION_PRIORITY_PRIVILEGED_TUNNEL
) {
960 errno_t cred_result
= priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES
, 0);
961 if (cred_result
!= 0) {
962 NECPLOG(LOG_ERR
, "Session does not hold necessary entitlement to claim priority level %d", requested_session_priority
);
968 if (session
->session_priority
!= requested_session_priority
) {
969 session
->session_priority
= requested_session_priority
;
970 session
->session_order
= necp_allocate_new_session_order(session
->session_priority
, session
->control_unit
);
971 session
->dirty
= TRUE
;
973 // Mark all policies as needing updates
974 LIST_FOREACH_SAFE(policy
, &session
->policies
, chain
, temp_policy
) {
975 policy
->pending_update
= TRUE
;
985 necp_session_lock_to_process(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
988 session
->proc_locked
= TRUE
;
994 necp_session_register_service(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
997 struct necp_service_registration
*new_service
= NULL
;
999 if (uap
->in_buffer_length
< sizeof(uuid_t
) || uap
->in_buffer
== 0) {
1000 NECPLOG(LOG_ERR
, "necp_session_register_service invalid input (%zu)", uap
->in_buffer_length
);
1005 uuid_t service_uuid
;
1006 error
= copyin(uap
->in_buffer
, service_uuid
, sizeof(service_uuid
));
1008 NECPLOG(LOG_ERR
, "necp_session_register_service uuid copyin error (%d)", error
);
1012 MALLOC(new_service
, struct necp_service_registration
*, sizeof(*new_service
), M_NECP
, M_WAITOK
| M_ZERO
);
1013 if (new_service
== NULL
) {
1014 NECPLOG0(LOG_ERR
, "Failed to allocate service registration");
1019 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
1020 new_service
->service_id
= necp_create_uuid_service_id_mapping(service_uuid
);
1021 LIST_INSERT_HEAD(&session
->services
, new_service
, session_chain
);
1022 LIST_INSERT_HEAD(&necp_registered_service_list
, new_service
, kernel_chain
);
1023 lck_rw_done(&necp_kernel_policy_lock
);
1031 necp_session_unregister_service(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
1034 struct necp_service_registration
*service
= NULL
;
1035 struct necp_service_registration
*temp_service
= NULL
;
1036 struct necp_uuid_id_mapping
*mapping
= NULL
;
1038 if (uap
->in_buffer_length
< sizeof(uuid_t
) || uap
->in_buffer
== 0) {
1039 NECPLOG(LOG_ERR
, "necp_session_unregister_service invalid input (%zu)", uap
->in_buffer_length
);
1044 uuid_t service_uuid
;
1045 error
= copyin(uap
->in_buffer
, service_uuid
, sizeof(service_uuid
));
1047 NECPLOG(LOG_ERR
, "necp_session_unregister_service uuid copyin error (%d)", error
);
1051 // Remove all matching services for this session
1052 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
1053 mapping
= necp_uuid_lookup_service_id_locked(service_uuid
);
1054 if (mapping
!= NULL
) {
1055 LIST_FOREACH_SAFE(service
, &session
->services
, session_chain
, temp_service
) {
1056 if (service
->service_id
== mapping
->id
) {
1057 LIST_REMOVE(service
, session_chain
);
1058 LIST_REMOVE(service
, kernel_chain
);
1059 FREE(service
, M_NECP
);
1062 necp_remove_uuid_service_id_mapping(service_uuid
);
1064 lck_rw_done(&necp_kernel_policy_lock
);
1072 necp_session_dump_all(struct necp_session
*session
, struct necp_session_action_args
*uap
, int *retval
)
1074 #pragma unused(session)
1077 if (uap
->out_buffer_length
== 0 || uap
->out_buffer
== 0) {
1078 NECPLOG(LOG_ERR
, "necp_session_dump_all invalid output buffer (%zu)", uap
->out_buffer_length
);
1083 error
= necp_handle_policy_dump_all(uap
->out_buffer
, uap
->out_buffer_length
);
1090 necp_session_action(struct proc
*p
, struct necp_session_action_args
*uap
, int *retval
)
1094 int return_value
= 0;
1095 struct necp_session
*session
= NULL
;
1096 error
= necp_session_find_from_fd(uap
->necp_fd
, &session
);
1098 NECPLOG(LOG_ERR
, "necp_session_action find fd error (%d)", error
);
1102 NECP_SESSION_LOCK(session
);
1104 if (session
->proc_locked
) {
1105 // Verify that the calling process is allowed to do actions
1107 proc_getexecutableuuid(current_proc(), proc_uuid
, sizeof(proc_uuid
));
1108 if (uuid_compare(proc_uuid
, session
->proc_uuid
) != 0) {
1113 // If not locked, update the proc_uuid and proc_pid of the session
1114 proc_getexecutableuuid(current_proc(), session
->proc_uuid
, sizeof(session
->proc_uuid
));
1115 session
->proc_pid
= proc_pid(current_proc());
1118 u_int32_t action
= uap
->action
;
1120 case NECP_SESSION_ACTION_POLICY_ADD
: {
1121 return_value
= necp_session_add_policy(session
, uap
, retval
);
1124 case NECP_SESSION_ACTION_POLICY_GET
: {
1125 return_value
= necp_session_get_policy(session
, uap
, retval
);
1128 case NECP_SESSION_ACTION_POLICY_DELETE
: {
1129 return_value
= necp_session_delete_policy(session
, uap
, retval
);
1132 case NECP_SESSION_ACTION_POLICY_APPLY_ALL
: {
1133 return_value
= necp_session_apply_all(session
, uap
, retval
);
1136 case NECP_SESSION_ACTION_POLICY_LIST_ALL
: {
1137 return_value
= necp_session_list_all(session
, uap
, retval
);
1140 case NECP_SESSION_ACTION_POLICY_DELETE_ALL
: {
1141 return_value
= necp_session_delete_all(session
, uap
, retval
);
1144 case NECP_SESSION_ACTION_SET_SESSION_PRIORITY
: {
1145 return_value
= necp_session_set_session_priority(session
, uap
, retval
);
1148 case NECP_SESSION_ACTION_LOCK_SESSION_TO_PROC
: {
1149 return_value
= necp_session_lock_to_process(session
, uap
, retval
);
1152 case NECP_SESSION_ACTION_REGISTER_SERVICE
: {
1153 return_value
= necp_session_register_service(session
, uap
, retval
);
1156 case NECP_SESSION_ACTION_UNREGISTER_SERVICE
: {
1157 return_value
= necp_session_unregister_service(session
, uap
, retval
);
1160 case NECP_SESSION_ACTION_POLICY_DUMP_ALL
: {
1161 return_value
= necp_session_dump_all(session
, uap
, retval
);
1165 NECPLOG(LOG_ERR
, "necp_session_action unknown action (%u)", action
);
1166 return_value
= EINVAL
;
1172 NECP_SESSION_UNLOCK(session
);
1173 file_drop(uap
->necp_fd
);
1175 return return_value
;
1178 struct necp_resolver_key_state
{
1179 const struct ccdigest_info
*digest_info
;
1180 uint8_t key
[CCSHA256_OUTPUT_SIZE
];
1182 static struct necp_resolver_key_state s_necp_resolver_key_state
;
1185 necp_generate_resolver_key(void)
1187 s_necp_resolver_key_state
.digest_info
= ccsha256_di();
1188 cc_rand_generate(s_necp_resolver_key_state
.key
, sizeof(s_necp_resolver_key_state
.key
));
1192 necp_sign_update_context(const struct ccdigest_info
*di
,
1196 u_int32_t query_length
,
1198 u_int32_t answer_length
)
1200 const uint8_t context
[32] = {[0 ... 31] = 0x20}; // 0x20 repeated 32 times
1201 const char *context_string
= "NECP Resolver Binder";
1202 uint8_t separator
= 0;
1203 cchmac_update(di
, ctx
, sizeof(context
), context
);
1204 cchmac_update(di
, ctx
, strlen(context_string
), context_string
);
1205 cchmac_update(di
, ctx
, sizeof(separator
), &separator
);
1206 cchmac_update(di
, ctx
, sizeof(uuid_t
), client_id
);
1207 cchmac_update(di
, ctx
, sizeof(query_length
), &query_length
);
1208 cchmac_update(di
, ctx
, query_length
, query
);
1209 cchmac_update(di
, ctx
, sizeof(answer_length
), &answer_length
);
1210 cchmac_update(di
, ctx
, answer_length
, answer
);
1214 necp_sign_resolver_answer(uuid_t client_id
, u_int8_t
*query
, u_int32_t query_length
,
1215 u_int8_t
*answer
, u_int32_t answer_length
,
1216 u_int8_t
*tag
, u_int32_t
*out_tag_length
)
1218 if (s_necp_resolver_key_state
.digest_info
== NULL
) {
1222 if (query
== NULL
||
1223 query_length
== 0 ||
1225 answer_length
== 0 ||
1227 out_tag_length
== NULL
) {
1231 size_t required_tag_length
= s_necp_resolver_key_state
.digest_info
->output_size
;
1232 if (*out_tag_length
< required_tag_length
) {
1236 *out_tag_length
= required_tag_length
;
1238 cchmac_ctx_decl(s_necp_resolver_key_state
.digest_info
->state_size
,
1239 s_necp_resolver_key_state
.digest_info
->block_size
, ctx
);
1240 cchmac_init(s_necp_resolver_key_state
.digest_info
, ctx
,
1241 sizeof(s_necp_resolver_key_state
.key
),
1242 s_necp_resolver_key_state
.key
);
1243 necp_sign_update_context(s_necp_resolver_key_state
.digest_info
,
1244 ctx
, client_id
, query
, query_length
,
1245 answer
, answer_length
);
1246 cchmac_final(s_necp_resolver_key_state
.digest_info
, ctx
, tag
);
1252 necp_validate_resolver_answer(uuid_t client_id
, u_int8_t
*query
, u_int32_t query_length
,
1253 u_int8_t
*answer
, u_int32_t answer_length
,
1254 u_int8_t
*tag
, u_int32_t tag_length
)
1256 if (s_necp_resolver_key_state
.digest_info
== NULL
) {
1260 if (query
== NULL
||
1261 query_length
== 0 ||
1263 answer_length
== 0 ||
1269 size_t required_tag_length
= s_necp_resolver_key_state
.digest_info
->output_size
;
1270 if (tag_length
!= required_tag_length
) {
1274 uint8_t actual_tag
[required_tag_length
];
1276 cchmac_ctx_decl(s_necp_resolver_key_state
.digest_info
->state_size
,
1277 s_necp_resolver_key_state
.digest_info
->block_size
, ctx
);
1278 cchmac_init(s_necp_resolver_key_state
.digest_info
, ctx
,
1279 sizeof(s_necp_resolver_key_state
.key
),
1280 s_necp_resolver_key_state
.key
);
1281 necp_sign_update_context(s_necp_resolver_key_state
.digest_info
,
1282 ctx
, client_id
, query
, query_length
,
1283 answer
, answer_length
);
1284 cchmac_final(s_necp_resolver_key_state
.digest_info
, ctx
, actual_tag
);
1286 return cc_cmp_safe(s_necp_resolver_key_state
.digest_info
->output_size
, tag
, actual_tag
) == 0;
1294 necp_kernel_policy_grp_attr
= lck_grp_attr_alloc_init();
1295 if (necp_kernel_policy_grp_attr
== NULL
) {
1296 NECPLOG0(LOG_ERR
, "lck_grp_attr_alloc_init failed");
1301 necp_kernel_policy_mtx_grp
= lck_grp_alloc_init(NECP_CONTROL_NAME
, necp_kernel_policy_grp_attr
);
1302 if (necp_kernel_policy_mtx_grp
== NULL
) {
1303 NECPLOG0(LOG_ERR
, "lck_grp_alloc_init failed");
1308 necp_kernel_policy_mtx_attr
= lck_attr_alloc_init();
1309 if (necp_kernel_policy_mtx_attr
== NULL
) {
1310 NECPLOG0(LOG_ERR
, "lck_attr_alloc_init failed");
1315 lck_rw_init(&necp_kernel_policy_lock
, necp_kernel_policy_mtx_grp
, necp_kernel_policy_mtx_attr
);
1317 necp_route_rule_grp_attr
= lck_grp_attr_alloc_init();
1318 if (necp_route_rule_grp_attr
== NULL
) {
1319 NECPLOG0(LOG_ERR
, "lck_grp_attr_alloc_init failed");
1324 necp_route_rule_mtx_grp
= lck_grp_alloc_init("necp_route_rule", necp_route_rule_grp_attr
);
1325 if (necp_route_rule_mtx_grp
== NULL
) {
1326 NECPLOG0(LOG_ERR
, "lck_grp_alloc_init failed");
1331 necp_route_rule_mtx_attr
= lck_attr_alloc_init();
1332 if (necp_route_rule_mtx_attr
== NULL
) {
1333 NECPLOG0(LOG_ERR
, "lck_attr_alloc_init failed");
1338 lck_rw_init(&necp_route_rule_lock
, necp_route_rule_mtx_grp
, necp_route_rule_mtx_attr
);
1342 TAILQ_INIT(&necp_session_list
);
1344 LIST_INIT(&necp_kernel_socket_policies
);
1345 LIST_INIT(&necp_kernel_ip_output_policies
);
1347 LIST_INIT(&necp_account_id_list
);
1349 LIST_INIT(&necp_uuid_service_id_list
);
1351 LIST_INIT(&necp_registered_service_list
);
1353 LIST_INIT(&necp_route_rules
);
1354 LIST_INIT(&necp_aggregate_route_rules
);
1356 necp_generate_resolver_key();
1358 necp_uuid_app_id_hashtbl
= hashinit(NECP_UUID_APP_ID_HASH_SIZE
, M_NECP
, &necp_uuid_app_id_hash_mask
);
1359 necp_uuid_app_id_hash_num_buckets
= necp_uuid_app_id_hash_mask
+ 1;
1360 necp_num_uuid_app_id_mappings
= 0;
1361 necp_uuid_app_id_mappings_dirty
= FALSE
;
1363 necp_kernel_application_policies_condition_mask
= 0;
1364 necp_kernel_socket_policies_condition_mask
= 0;
1365 necp_kernel_ip_output_policies_condition_mask
= 0;
1367 necp_kernel_application_policies_count
= 0;
1368 necp_kernel_socket_policies_count
= 0;
1369 necp_kernel_socket_policies_non_app_count
= 0;
1370 necp_kernel_ip_output_policies_count
= 0;
1371 necp_kernel_ip_output_policies_non_id_count
= 0;
1373 necp_kernel_socket_policies_gencount
= 1;
1375 memset(&necp_kernel_socket_policies_map
, 0, sizeof(necp_kernel_socket_policies_map
));
1376 memset(&necp_kernel_ip_output_policies_map
, 0, sizeof(necp_kernel_ip_output_policies_map
));
1377 necp_kernel_socket_policies_app_layer_map
= NULL
;
1379 necp_drop_unentitled_order
= necp_get_first_order_for_priority(necp_drop_unentitled_level
);
1383 if (necp_kernel_policy_mtx_attr
!= NULL
) {
1384 lck_attr_free(necp_kernel_policy_mtx_attr
);
1385 necp_kernel_policy_mtx_attr
= NULL
;
1387 if (necp_kernel_policy_mtx_grp
!= NULL
) {
1388 lck_grp_free(necp_kernel_policy_mtx_grp
);
1389 necp_kernel_policy_mtx_grp
= NULL
;
1391 if (necp_kernel_policy_grp_attr
!= NULL
) {
1392 lck_grp_attr_free(necp_kernel_policy_grp_attr
);
1393 necp_kernel_policy_grp_attr
= NULL
;
1395 if (necp_route_rule_mtx_attr
!= NULL
) {
1396 lck_attr_free(necp_route_rule_mtx_attr
);
1397 necp_route_rule_mtx_attr
= NULL
;
1399 if (necp_route_rule_mtx_grp
!= NULL
) {
1400 lck_grp_free(necp_route_rule_mtx_grp
);
1401 necp_route_rule_mtx_grp
= NULL
;
1403 if (necp_route_rule_grp_attr
!= NULL
) {
1404 lck_grp_attr_free(necp_route_rule_grp_attr
);
1405 necp_route_rule_grp_attr
= NULL
;
1412 necp_post_change_event(struct kev_necp_policies_changed_data
*necp_event_data
)
1414 struct kev_msg ev_msg
;
1415 memset(&ev_msg
, 0, sizeof(ev_msg
));
1417 ev_msg
.vendor_code
= KEV_VENDOR_APPLE
;
1418 ev_msg
.kev_class
= KEV_NETWORK_CLASS
;
1419 ev_msg
.kev_subclass
= KEV_NECP_SUBCLASS
;
1420 ev_msg
.event_code
= KEV_NECP_POLICIES_CHANGED
;
1422 ev_msg
.dv
[0].data_ptr
= necp_event_data
;
1423 ev_msg
.dv
[0].data_length
= sizeof(necp_event_data
->changed_count
);
1424 ev_msg
.dv
[1].data_length
= 0;
1426 kev_post_msg(&ev_msg
);
1430 necp_buffer_write_tlv_validate(u_int8_t
*cursor
, u_int8_t type
, u_int32_t length
,
1431 u_int8_t
*buffer
, u_int32_t buffer_length
)
1433 if (cursor
< buffer
|| (uintptr_t)(cursor
- buffer
) > buffer_length
) {
1434 NECPLOG0(LOG_ERR
, "Cannot write TLV in buffer (invalid cursor)");
1437 u_int8_t
*next_tlv
= (u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
) + length
);
1438 if (next_tlv
<= buffer
|| // make sure the next TLV start doesn't overflow
1439 (uintptr_t)(next_tlv
- buffer
) > buffer_length
) { // make sure the next TLV has enough room in buffer
1440 NECPLOG(LOG_ERR
, "Cannot write TLV in buffer (TLV length %u, buffer length %u)",
1441 length
, buffer_length
);
1448 necp_buffer_write_tlv_if_different(u_int8_t
*cursor
, u_int8_t type
,
1449 u_int32_t length
, const void *value
, bool *updated
,
1450 u_int8_t
*buffer
, u_int32_t buffer_length
)
1452 if (!necp_buffer_write_tlv_validate(cursor
, type
, length
, buffer
, buffer_length
)) {
1453 // If we can't fit this TLV, return the current cursor
1456 u_int8_t
*next_tlv
= (u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
) + length
);
1457 if (*updated
|| *(u_int8_t
*)(cursor
) != type
) {
1458 *(u_int8_t
*)(cursor
) = type
;
1461 if (*updated
|| *(u_int32_t
*)(void *)(cursor
+ sizeof(type
)) != length
) {
1462 *(u_int32_t
*)(void *)(cursor
+ sizeof(type
)) = length
;
1466 if (*updated
|| memcmp((u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
)), value
, length
) != 0) {
1467 memcpy((u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
)), value
, length
);
1475 necp_buffer_write_tlv(u_int8_t
*cursor
, u_int8_t type
,
1476 u_int32_t length
, const void *value
,
1477 u_int8_t
*buffer
, u_int32_t buffer_length
)
1479 if (!necp_buffer_write_tlv_validate(cursor
, type
, length
, buffer
, buffer_length
)) {
1482 u_int8_t
*next_tlv
= (u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
) + length
);
1483 *(u_int8_t
*)(cursor
) = type
;
1484 *(u_int32_t
*)(void *)(cursor
+ sizeof(type
)) = length
;
1486 memcpy((u_int8_t
*)(cursor
+ sizeof(type
) + sizeof(length
)), value
, length
);
1493 necp_buffer_get_tlv_type(u_int8_t
*buffer
, int tlv_offset
)
1495 u_int8_t
*type
= NULL
;
1497 if (buffer
== NULL
) {
1501 type
= (u_int8_t
*)((u_int8_t
*)buffer
+ tlv_offset
);
1502 return type
? *type
: 0;
1506 necp_buffer_get_tlv_length(u_int8_t
*buffer
, int tlv_offset
)
1508 u_int32_t
*length
= NULL
;
1510 if (buffer
== NULL
) {
1514 length
= (u_int32_t
*)(void *)((u_int8_t
*)buffer
+ tlv_offset
+ sizeof(u_int8_t
));
1515 return length
? *length
: 0;
1519 necp_buffer_get_tlv_value(u_int8_t
*buffer
, int tlv_offset
, u_int32_t
*value_size
)
1521 u_int8_t
*value
= NULL
;
1522 u_int32_t length
= necp_buffer_get_tlv_length(buffer
, tlv_offset
);
1528 *value_size
= length
;
1531 value
= (u_int8_t
*)((u_int8_t
*)buffer
+ tlv_offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
));
1536 necp_buffer_find_tlv(u_int8_t
*buffer
, u_int32_t buffer_length
, int offset
, u_int8_t type
, int *err
, int next
)
1547 int cursor
= offset
;
1549 u_int32_t curr_length
;
1553 if ((((u_int32_t
)cursor
) + sizeof(curr_type
) + sizeof(curr_length
)) > buffer_length
) {
1557 curr_type
= necp_buffer_get_tlv_type(buffer
, cursor
);
1560 curr_type
= NECP_TLV_NIL
;
1562 curr_length
= necp_buffer_get_tlv_length(buffer
, cursor
);
1563 if (curr_length
> buffer_length
- ((u_int32_t
)cursor
+ sizeof(curr_type
) + sizeof(curr_length
))) {
1567 next_cursor
= (cursor
+ sizeof(curr_type
) + sizeof(curr_length
) + curr_length
);
1568 if (curr_type
== type
) {
1569 // check if entire TLV fits inside buffer
1570 if (((u_int32_t
)next_cursor
) <= buffer_length
) {
1579 cursor
= next_cursor
;
1584 necp_find_tlv(u_int8_t
*buffer
, u_int32_t buffer_length
, int offset
, u_int8_t type
, int *err
, int next
)
1587 if (buffer
!= NULL
) {
1588 cursor
= necp_buffer_find_tlv(buffer
, buffer_length
, offset
, type
, err
, next
);
1594 necp_get_tlv_at_offset(u_int8_t
*buffer
, u_int32_t buffer_length
,
1595 int tlv_offset
, u_int32_t out_buffer_length
, void *out_buffer
, u_int32_t
*value_size
)
1597 if (buffer
== NULL
) {
1598 NECPLOG0(LOG_ERR
, "necp_get_tlv_at_offset buffer is NULL");
1602 // Handle buffer parsing
1604 // Validate that buffer has enough room for any TLV
1605 if (tlv_offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
) > buffer_length
) {
1606 NECPLOG(LOG_ERR
, "necp_get_tlv_at_offset buffer_length is too small for TLV (%u < %u)",
1607 buffer_length
, tlv_offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
));
1611 // Validate that buffer has enough room for this TLV
1612 u_int32_t tlv_length
= necp_buffer_get_tlv_length(buffer
, tlv_offset
);
1613 if (tlv_length
> buffer_length
- (tlv_offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
))) {
1614 NECPLOG(LOG_ERR
, "necp_get_tlv_at_offset buffer_length is too small for TLV of length %u (%u < %u)",
1615 tlv_length
, buffer_length
, tlv_offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
) + tlv_length
);
1619 if (out_buffer
!= NULL
&& out_buffer_length
> 0) {
1620 // Validate that out buffer is large enough for value
1621 if (out_buffer_length
< tlv_length
) {
1622 NECPLOG(LOG_ERR
, "necp_get_tlv_at_offset out_buffer_length is too small for TLV value (%u < %u)",
1623 out_buffer_length
, tlv_length
);
1627 // Get value pointer
1628 u_int8_t
*tlv_value
= necp_buffer_get_tlv_value(buffer
, tlv_offset
, NULL
);
1629 if (tlv_value
== NULL
) {
1630 NECPLOG0(LOG_ERR
, "necp_get_tlv_at_offset tlv_value is NULL");
1635 memcpy(out_buffer
, tlv_value
, tlv_length
);
1639 if (value_size
!= NULL
) {
1640 *value_size
= tlv_length
;
1647 necp_get_tlv(u_int8_t
*buffer
, u_int32_t buffer_length
,
1648 int offset
, u_int8_t type
, u_int32_t buff_len
, void *buff
, u_int32_t
*value_size
)
1652 int tlv_offset
= necp_find_tlv(buffer
, buffer_length
, offset
, type
, &error
, 0);
1653 if (tlv_offset
< 0) {
1657 return necp_get_tlv_at_offset(buffer
, buffer_length
, tlv_offset
, buff_len
, buff
, value_size
);
1660 // Session Management
1662 static struct necp_session
*
1663 necp_create_session(void)
1665 struct necp_session
*new_session
= NULL
;
1667 MALLOC(new_session
, struct necp_session
*, sizeof(*new_session
), M_NECP
, M_WAITOK
| M_ZERO
);
1668 if (new_session
== NULL
) {
1672 new_session
->necp_fd_type
= necp_fd_type_session
;
1673 new_session
->session_priority
= NECP_SESSION_PRIORITY_UNKNOWN
;
1674 new_session
->dirty
= FALSE
;
1675 LIST_INIT(&new_session
->policies
);
1676 lck_mtx_init(&new_session
->lock
, necp_kernel_policy_mtx_grp
, necp_kernel_policy_mtx_attr
);
1679 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
1681 // Find the next available control unit
1682 u_int32_t control_unit
= 1;
1683 struct necp_session
*next_session
= NULL
;
1684 TAILQ_FOREACH(next_session
, &necp_session_list
, chain
) {
1685 if (next_session
->control_unit
> control_unit
) {
1686 // Found a gap, grab this control unit
1690 // Try the next control unit, loop around
1691 control_unit
= next_session
->control_unit
+ 1;
1694 new_session
->control_unit
= control_unit
;
1695 new_session
->session_order
= necp_allocate_new_session_order(new_session
->session_priority
, control_unit
);
1697 if (next_session
!= NULL
) {
1698 TAILQ_INSERT_BEFORE(next_session
, new_session
, chain
);
1700 TAILQ_INSERT_TAIL(&necp_session_list
, new_session
, chain
);
1703 necp_session_count
++;
1704 lck_rw_done(&necp_kernel_policy_lock
);
1707 NECPLOG(LOG_DEBUG
, "Created NECP session, control unit %d", control_unit
);
1715 necp_delete_session(struct necp_session
*session
)
1717 if (session
!= NULL
) {
1718 struct necp_service_registration
*service
= NULL
;
1719 struct necp_service_registration
*temp_service
= NULL
;
1720 LIST_FOREACH_SAFE(service
, &session
->services
, session_chain
, temp_service
) {
1721 LIST_REMOVE(service
, session_chain
);
1722 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
1723 LIST_REMOVE(service
, kernel_chain
);
1724 lck_rw_done(&necp_kernel_policy_lock
);
1725 FREE(service
, M_NECP
);
1728 NECPLOG0(LOG_DEBUG
, "Deleted NECP session");
1731 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
1732 TAILQ_REMOVE(&necp_session_list
, session
, chain
);
1733 necp_session_count
--;
1734 lck_rw_done(&necp_kernel_policy_lock
);
1736 lck_mtx_destroy(&session
->lock
, necp_kernel_policy_mtx_grp
);
1737 FREE(session
, M_NECP
);
1741 // Session Policy Management
1743 static inline u_int8_t
1744 necp_policy_result_get_type_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
1746 return (buffer
&& length
>= sizeof(u_int8_t
)) ? buffer
[0] : 0;
1749 static inline u_int32_t
1750 necp_policy_result_get_parameter_length_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
1752 return (buffer
&& length
> sizeof(u_int8_t
)) ? (length
- sizeof(u_int8_t
)) : 0;
1755 static inline u_int8_t
*
1756 necp_policy_result_get_parameter_pointer_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
1758 return (buffer
&& length
> sizeof(u_int8_t
)) ? (buffer
+ sizeof(u_int8_t
)) : NULL
;
1762 necp_policy_result_requires_route_rules(u_int8_t
*buffer
, u_int32_t length
)
1764 u_int8_t type
= necp_policy_result_get_type_from_buffer(buffer
, length
);
1765 if (type
== NECP_POLICY_RESULT_ROUTE_RULES
) {
1772 necp_address_is_valid(struct sockaddr
*address
)
1774 if (address
->sa_family
== AF_INET
) {
1775 return address
->sa_len
== sizeof(struct sockaddr_in
);
1776 } else if (address
->sa_family
== AF_INET6
) {
1777 return address
->sa_len
== sizeof(struct sockaddr_in6
);
1784 necp_policy_result_is_valid(u_int8_t
*buffer
, u_int32_t length
)
1786 bool validated
= FALSE
;
1787 u_int8_t type
= necp_policy_result_get_type_from_buffer(buffer
, length
);
1788 u_int32_t parameter_length
= necp_policy_result_get_parameter_length_from_buffer(buffer
, length
);
1790 case NECP_POLICY_RESULT_PASS
:
1791 case NECP_POLICY_RESULT_DROP
:
1792 case NECP_POLICY_RESULT_ROUTE_RULES
:
1793 case NECP_POLICY_RESULT_SCOPED_DIRECT
:
1794 case NECP_POLICY_RESULT_ALLOW_UNENTITLED
: {
1798 case NECP_POLICY_RESULT_SKIP
:
1799 case NECP_POLICY_RESULT_SOCKET_DIVERT
:
1800 case NECP_POLICY_RESULT_SOCKET_FILTER
: {
1801 if (parameter_length
>= sizeof(u_int32_t
)) {
1806 case NECP_POLICY_RESULT_IP_TUNNEL
: {
1807 if (parameter_length
> sizeof(u_int32_t
)) {
1812 case NECP_POLICY_RESULT_SOCKET_SCOPED
: {
1813 if (parameter_length
> 0) {
1818 case NECP_POLICY_RESULT_TRIGGER
:
1819 case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED
:
1820 case NECP_POLICY_RESULT_TRIGGER_SCOPED
:
1821 case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED
:
1822 case NECP_POLICY_RESULT_USE_NETAGENT
:
1823 case NECP_POLICY_RESULT_NETAGENT_SCOPED
:{
1824 if (parameter_length
>= sizeof(uuid_t
)) {
1836 NECPLOG(LOG_DEBUG
, "Policy result type %d, valid %d", type
, validated
);
1842 static inline u_int8_t
1843 necp_policy_condition_get_type_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
1845 return (buffer
&& length
>= sizeof(u_int8_t
)) ? buffer
[0] : 0;
1848 static inline u_int8_t
1849 necp_policy_condition_get_flags_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
1851 return (buffer
&& length
>= (2 * sizeof(u_int8_t
))) ? buffer
[1] : 0;
1854 static inline u_int32_t
1855 necp_policy_condition_get_value_length_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
1857 return (buffer
&& length
>= (2 * sizeof(u_int8_t
))) ? (length
- (2 * sizeof(u_int8_t
))) : 0;
1860 static inline u_int8_t
*
1861 necp_policy_condition_get_value_pointer_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
1863 return (buffer
&& length
> (2 * sizeof(u_int8_t
))) ? (buffer
+ (2 * sizeof(u_int8_t
))) : NULL
;
1867 necp_policy_condition_is_default(u_int8_t
*buffer
, u_int32_t length
)
1869 return necp_policy_condition_get_type_from_buffer(buffer
, length
) == NECP_POLICY_CONDITION_DEFAULT
;
1873 necp_policy_condition_is_application(u_int8_t
*buffer
, u_int32_t length
)
1875 return necp_policy_condition_get_type_from_buffer(buffer
, length
) == NECP_POLICY_CONDITION_APPLICATION
;
1879 necp_policy_condition_is_real_application(u_int8_t
*buffer
, u_int32_t length
)
1881 return necp_policy_condition_get_type_from_buffer(buffer
, length
) == NECP_POLICY_CONDITION_REAL_APPLICATION
;
1885 necp_policy_condition_requires_application(u_int8_t
*buffer
, u_int32_t length
)
1887 u_int8_t type
= necp_policy_condition_get_type_from_buffer(buffer
, length
);
1888 return type
== NECP_POLICY_CONDITION_REAL_APPLICATION
;
1892 necp_policy_condition_requires_real_application(u_int8_t
*buffer
, u_int32_t length
)
1894 u_int8_t type
= necp_policy_condition_get_type_from_buffer(buffer
, length
);
1895 return type
== NECP_POLICY_CONDITION_ENTITLEMENT
;
1899 necp_policy_condition_is_valid(u_int8_t
*buffer
, u_int32_t length
, u_int8_t policy_result_type
)
1901 bool validated
= FALSE
;
1902 bool result_cannot_have_ip_layer
= (policy_result_type
== NECP_POLICY_RESULT_SOCKET_DIVERT
||
1903 policy_result_type
== NECP_POLICY_RESULT_SOCKET_FILTER
||
1904 policy_result_type
== NECP_POLICY_RESULT_TRIGGER
||
1905 policy_result_type
== NECP_POLICY_RESULT_TRIGGER_IF_NEEDED
||
1906 policy_result_type
== NECP_POLICY_RESULT_TRIGGER_SCOPED
||
1907 policy_result_type
== NECP_POLICY_RESULT_NO_TRIGGER_SCOPED
||
1908 policy_result_type
== NECP_POLICY_RESULT_SOCKET_SCOPED
||
1909 policy_result_type
== NECP_POLICY_RESULT_ROUTE_RULES
||
1910 policy_result_type
== NECP_POLICY_RESULT_USE_NETAGENT
||
1911 policy_result_type
== NECP_POLICY_RESULT_NETAGENT_SCOPED
||
1912 policy_result_type
== NECP_POLICY_RESULT_SCOPED_DIRECT
||
1913 policy_result_type
== NECP_POLICY_RESULT_ALLOW_UNENTITLED
) ? TRUE
: FALSE
;
1914 u_int32_t condition_length
= necp_policy_condition_get_value_length_from_buffer(buffer
, length
);
1915 u_int8_t
*condition_value
= necp_policy_condition_get_value_pointer_from_buffer(buffer
, length
);
1916 u_int8_t type
= necp_policy_condition_get_type_from_buffer(buffer
, length
);
1917 u_int8_t flags
= necp_policy_condition_get_flags_from_buffer(buffer
, length
);
1919 case NECP_POLICY_CONDITION_APPLICATION
:
1920 case NECP_POLICY_CONDITION_REAL_APPLICATION
: {
1921 if (!(flags
& NECP_POLICY_CONDITION_FLAGS_NEGATIVE
) &&
1922 condition_length
>= sizeof(uuid_t
) &&
1923 condition_value
!= NULL
&&
1924 !uuid_is_null(condition_value
)) {
1929 case NECP_POLICY_CONDITION_DOMAIN
:
1930 case NECP_POLICY_CONDITION_ACCOUNT
:
1931 case NECP_POLICY_CONDITION_BOUND_INTERFACE
: {
1932 if (condition_length
> 0) {
1937 case NECP_POLICY_CONDITION_TRAFFIC_CLASS
: {
1938 if (condition_length
>= sizeof(struct necp_policy_condition_tc_range
)) {
1943 case NECP_POLICY_CONDITION_DEFAULT
:
1944 case NECP_POLICY_CONDITION_ALL_INTERFACES
:
1945 case NECP_POLICY_CONDITION_ENTITLEMENT
:
1946 case NECP_POLICY_CONDITION_PLATFORM_BINARY
:
1947 case NECP_POLICY_CONDITION_HAS_CLIENT
:
1948 case NECP_POLICY_CONDITION_LOCAL_NETWORKS
: {
1949 if (!(flags
& NECP_POLICY_CONDITION_FLAGS_NEGATIVE
)) {
1954 case NECP_POLICY_CONDITION_IP_PROTOCOL
: {
1955 if (condition_length
>= sizeof(u_int16_t
)) {
1960 case NECP_POLICY_CONDITION_PID
: {
1961 if (condition_length
>= sizeof(pid_t
) &&
1962 condition_value
!= NULL
&&
1963 *((pid_t
*)(void *)condition_value
) != 0) {
1968 case NECP_POLICY_CONDITION_UID
: {
1969 if (condition_length
>= sizeof(uid_t
)) {
1974 case NECP_POLICY_CONDITION_LOCAL_ADDR
:
1975 case NECP_POLICY_CONDITION_REMOTE_ADDR
: {
1976 if (!result_cannot_have_ip_layer
&& condition_length
>= sizeof(struct necp_policy_condition_addr
) &&
1977 necp_address_is_valid(&((struct necp_policy_condition_addr
*)(void *)condition_value
)->address
.sa
)) {
1982 case NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE
:
1983 case NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE
: {
1984 if (!result_cannot_have_ip_layer
&& condition_length
>= sizeof(struct necp_policy_condition_addr_range
) &&
1985 necp_address_is_valid(&((struct necp_policy_condition_addr_range
*)(void *)condition_value
)->start_address
.sa
) &&
1986 necp_address_is_valid(&((struct necp_policy_condition_addr_range
*)(void *)condition_value
)->end_address
.sa
)) {
1991 case NECP_POLICY_CONDITION_AGENT_TYPE
: {
1992 if (!(flags
& NECP_POLICY_CONDITION_FLAGS_NEGATIVE
) &&
1993 condition_length
>= sizeof(struct necp_policy_condition_agent_type
)) {
1998 case NECP_POLICY_CONDITION_FLOW_IP_PROTOCOL
: {
1999 if (condition_length
>= sizeof(u_int16_t
)) {
2004 case NECP_POLICY_CONDITION_FLOW_LOCAL_ADDR
:
2005 case NECP_POLICY_CONDITION_FLOW_REMOTE_ADDR
: {
2006 if (condition_length
>= sizeof(struct necp_policy_condition_addr
) &&
2007 necp_address_is_valid(&((struct necp_policy_condition_addr
*)(void *)condition_value
)->address
.sa
)) {
2012 case NECP_POLICY_CONDITION_FLOW_LOCAL_ADDR_RANGE
:
2013 case NECP_POLICY_CONDITION_FLOW_REMOTE_ADDR_RANGE
: {
2014 if (condition_length
>= sizeof(struct necp_policy_condition_addr_range
) &&
2015 necp_address_is_valid(&((struct necp_policy_condition_addr_range
*)(void *)condition_value
)->start_address
.sa
) &&
2016 necp_address_is_valid(&((struct necp_policy_condition_addr_range
*)(void *)condition_value
)->end_address
.sa
)) {
2021 case NECP_POLICY_CONDITION_CLIENT_FLAGS
: {
2022 if (condition_length
== 0 || condition_length
>= sizeof(u_int32_t
)) {
2027 case NECP_POLICY_CONDITION_FLOW_LOCAL_ADDR_EMPTY
: {
2031 case NECP_POLICY_CONDITION_FLOW_REMOTE_ADDR_EMPTY
: {
2042 NECPLOG(LOG_DEBUG
, "Policy condition type %d, valid %d", type
, validated
);
2049 necp_policy_route_rule_is_default(u_int8_t
*buffer
, u_int32_t length
)
2051 return necp_policy_condition_get_value_length_from_buffer(buffer
, length
) == 0 &&
2052 necp_policy_condition_get_flags_from_buffer(buffer
, length
) == 0;
2056 necp_policy_route_rule_is_valid(u_int8_t
*buffer
, u_int32_t length
)
2058 bool validated
= FALSE
;
2059 u_int8_t type
= necp_policy_condition_get_type_from_buffer(buffer
, length
);
2061 case NECP_ROUTE_RULE_ALLOW_INTERFACE
: {
2065 case NECP_ROUTE_RULE_DENY_INTERFACE
: {
2069 case NECP_ROUTE_RULE_QOS_MARKING
: {
2073 case NECP_ROUTE_RULE_DENY_LQM_ABORT
: {
2084 NECPLOG(LOG_DEBUG
, "Policy route rule type %d, valid %d", type
, validated
);
2091 necp_get_posix_error_for_necp_error(int response_error
)
2093 switch (response_error
) {
2094 case NECP_ERROR_UNKNOWN_PACKET_TYPE
:
2095 case NECP_ERROR_INVALID_TLV
:
2096 case NECP_ERROR_POLICY_RESULT_INVALID
:
2097 case NECP_ERROR_POLICY_CONDITIONS_INVALID
:
2098 case NECP_ERROR_ROUTE_RULES_INVALID
: {
2101 case NECP_ERROR_POLICY_ID_NOT_FOUND
: {
2104 case NECP_ERROR_INVALID_PROCESS
: {
2107 case NECP_ERROR_INTERNAL
:
2114 static necp_policy_id
2115 necp_handle_policy_add(struct necp_session
*session
,
2116 u_int8_t
*tlv_buffer
, size_t tlv_buffer_length
, int offset
, int *return_error
)
2118 bool has_default_condition
= FALSE
;
2119 bool has_non_default_condition
= FALSE
;
2120 bool has_application_condition
= FALSE
;
2121 bool has_real_application_condition
= FALSE
;
2122 bool requires_application_condition
= FALSE
;
2123 bool requires_real_application_condition
= FALSE
;
2124 u_int8_t
*conditions_array
= NULL
;
2125 u_int32_t conditions_array_size
= 0;
2126 int conditions_array_cursor
;
2128 bool has_default_route_rule
= FALSE
;
2129 u_int8_t
*route_rules_array
= NULL
;
2130 u_int32_t route_rules_array_size
= 0;
2131 int route_rules_array_cursor
;
2135 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
2137 necp_policy_order order
= 0;
2138 struct necp_session_policy
*policy
= NULL
;
2139 u_int8_t
*policy_result
= NULL
;
2140 u_int32_t policy_result_size
= 0;
2142 // Read policy order
2143 error
= necp_get_tlv(tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_POLICY_ORDER
, sizeof(order
), &order
, NULL
);
2145 NECPLOG(LOG_ERR
, "Failed to get policy order: %d", error
);
2146 response_error
= NECP_ERROR_INVALID_TLV
;
2150 // Read policy result
2151 cursor
= necp_find_tlv(tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_POLICY_RESULT
, &error
, 0);
2152 if (error
|| cursor
< 0) {
2153 NECPLOG(LOG_ERR
, "Failed to find policy result TLV: %d", error
);
2154 response_error
= NECP_ERROR_INVALID_TLV
;
2157 error
= necp_get_tlv_at_offset(tlv_buffer
, tlv_buffer_length
, cursor
, 0, NULL
, &policy_result_size
);
2158 if (error
|| policy_result_size
== 0) {
2159 NECPLOG(LOG_ERR
, "Failed to get policy result length: %d", error
);
2160 response_error
= NECP_ERROR_INVALID_TLV
;
2163 if (policy_result_size
> NECP_MAX_POLICY_RESULT_SIZE
) {
2164 NECPLOG(LOG_ERR
, "Policy result length too large: %u", policy_result_size
);
2165 response_error
= NECP_ERROR_INVALID_TLV
;
2168 MALLOC(policy_result
, u_int8_t
*, policy_result_size
, M_NECP
, M_WAITOK
);
2169 if (policy_result
== NULL
) {
2170 NECPLOG(LOG_ERR
, "Failed to allocate a policy result buffer (size %d)", policy_result_size
);
2171 response_error
= NECP_ERROR_INTERNAL
;
2174 error
= necp_get_tlv_at_offset(tlv_buffer
, tlv_buffer_length
, cursor
, policy_result_size
, policy_result
, NULL
);
2176 NECPLOG(LOG_ERR
, "Failed to get policy result: %d", error
);
2177 response_error
= NECP_ERROR_POLICY_RESULT_INVALID
;
2180 if (!necp_policy_result_is_valid(policy_result
, policy_result_size
)) {
2181 NECPLOG0(LOG_ERR
, "Failed to validate policy result");
2182 response_error
= NECP_ERROR_POLICY_RESULT_INVALID
;
2186 if (necp_policy_result_requires_route_rules(policy_result
, policy_result_size
)) {
2187 // Read route rules conditions
2188 for (cursor
= necp_find_tlv(tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_ROUTE_RULE
, &error
, 0);
2190 cursor
= necp_find_tlv(tlv_buffer
, tlv_buffer_length
, cursor
, NECP_TLV_ROUTE_RULE
, &error
, 1)) {
2191 u_int32_t route_rule_size
= 0;
2192 necp_get_tlv_at_offset(tlv_buffer
, tlv_buffer_length
, cursor
, 0, NULL
, &route_rule_size
);
2193 if (os_add_overflow(route_rules_array_size
,
2194 (sizeof(u_int8_t
) + sizeof(u_int32_t
) + route_rule_size
),
2195 &route_rules_array_size
)) {
2196 NECPLOG0(LOG_ERR
, "Route rules size overflowed, too large");
2197 response_error
= NECP_ERROR_INVALID_TLV
;
2202 if (route_rules_array_size
== 0) {
2203 NECPLOG0(LOG_ERR
, "Failed to get policy route rules");
2204 response_error
= NECP_ERROR_INVALID_TLV
;
2207 if (route_rules_array_size
> NECP_MAX_ROUTE_RULES_ARRAY_SIZE
) {
2208 NECPLOG(LOG_ERR
, "Route rules length too large: %u", route_rules_array_size
);
2209 response_error
= NECP_ERROR_INVALID_TLV
;
2212 MALLOC(route_rules_array
, u_int8_t
*, route_rules_array_size
, M_NECP
, M_WAITOK
);
2213 if (route_rules_array
== NULL
) {
2214 NECPLOG(LOG_ERR
, "Failed to allocate a policy route rules array (size %d)", route_rules_array_size
);
2215 response_error
= NECP_ERROR_INTERNAL
;
2219 route_rules_array_cursor
= 0;
2220 for (cursor
= necp_find_tlv(tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_ROUTE_RULE
, &error
, 0);
2222 cursor
= necp_find_tlv(tlv_buffer
, tlv_buffer_length
, cursor
, NECP_TLV_ROUTE_RULE
, &error
, 1)) {
2223 u_int8_t route_rule_type
= NECP_TLV_ROUTE_RULE
;
2224 u_int32_t route_rule_size
= 0;
2225 necp_get_tlv_at_offset(tlv_buffer
, tlv_buffer_length
, cursor
, 0, NULL
, &route_rule_size
);
2226 if (route_rule_size
> 0 &&
2227 (sizeof(route_rule_type
) + sizeof(route_rule_size
) + route_rule_size
) <= (route_rules_array_size
- route_rules_array_cursor
)) {
2229 memcpy((route_rules_array
+ route_rules_array_cursor
), &route_rule_type
, sizeof(route_rule_type
));
2230 route_rules_array_cursor
+= sizeof(route_rule_type
);
2233 memcpy((route_rules_array
+ route_rules_array_cursor
), &route_rule_size
, sizeof(route_rule_size
));
2234 route_rules_array_cursor
+= sizeof(route_rule_size
);
2237 necp_get_tlv_at_offset(tlv_buffer
, tlv_buffer_length
, cursor
, route_rule_size
, (route_rules_array
+ route_rules_array_cursor
), NULL
);
2239 if (!necp_policy_route_rule_is_valid((route_rules_array
+ route_rules_array_cursor
), route_rule_size
)) {
2240 NECPLOG0(LOG_ERR
, "Failed to validate policy route rule");
2241 response_error
= NECP_ERROR_ROUTE_RULES_INVALID
;
2245 if (necp_policy_route_rule_is_default((route_rules_array
+ route_rules_array_cursor
), route_rule_size
)) {
2246 if (has_default_route_rule
) {
2247 NECPLOG0(LOG_ERR
, "Failed to validate route rule; contained multiple default route rules");
2248 response_error
= NECP_ERROR_ROUTE_RULES_INVALID
;
2251 has_default_route_rule
= TRUE
;
2254 route_rules_array_cursor
+= route_rule_size
;
2259 // Read policy conditions
2260 for (cursor
= necp_find_tlv(tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_POLICY_CONDITION
, &error
, 0);
2262 cursor
= necp_find_tlv(tlv_buffer
, tlv_buffer_length
, cursor
, NECP_TLV_POLICY_CONDITION
, &error
, 1)) {
2263 u_int32_t condition_size
= 0;
2264 necp_get_tlv_at_offset(tlv_buffer
, tlv_buffer_length
, cursor
, 0, NULL
, &condition_size
);
2266 if (condition_size
> 0) {
2267 if (os_add_overflow(conditions_array_size
,
2268 (sizeof(u_int8_t
) + sizeof(u_int32_t
) + condition_size
),
2269 &conditions_array_size
)) {
2270 NECPLOG0(LOG_ERR
, "Conditions size overflowed, too large");
2271 response_error
= NECP_ERROR_INVALID_TLV
;
2277 if (conditions_array_size
== 0) {
2278 NECPLOG0(LOG_ERR
, "Failed to get policy conditions");
2279 response_error
= NECP_ERROR_INVALID_TLV
;
2282 if (conditions_array_size
> NECP_MAX_CONDITIONS_ARRAY_SIZE
) {
2283 NECPLOG(LOG_ERR
, "Conditions length too large: %u", conditions_array_size
);
2284 response_error
= NECP_ERROR_INVALID_TLV
;
2287 MALLOC(conditions_array
, u_int8_t
*, conditions_array_size
, M_NECP
, M_WAITOK
);
2288 if (conditions_array
== NULL
) {
2289 NECPLOG(LOG_ERR
, "Failed to allocate a policy conditions array (size %d)", conditions_array_size
);
2290 response_error
= NECP_ERROR_INTERNAL
;
2294 conditions_array_cursor
= 0;
2295 for (cursor
= necp_find_tlv(tlv_buffer
, tlv_buffer_length
, offset
, NECP_TLV_POLICY_CONDITION
, &error
, 0);
2297 cursor
= necp_find_tlv(tlv_buffer
, tlv_buffer_length
, cursor
, NECP_TLV_POLICY_CONDITION
, &error
, 1)) {
2298 u_int8_t condition_type
= NECP_TLV_POLICY_CONDITION
;
2299 u_int32_t condition_size
= 0;
2300 necp_get_tlv_at_offset(tlv_buffer
, tlv_buffer_length
, cursor
, 0, NULL
, &condition_size
);
2301 if (condition_size
> 0 &&
2302 (sizeof(condition_type
) + sizeof(condition_size
) + condition_size
) <= (conditions_array_size
- conditions_array_cursor
)) {
2304 memcpy((conditions_array
+ conditions_array_cursor
), &condition_type
, sizeof(condition_type
));
2305 conditions_array_cursor
+= sizeof(condition_type
);
2308 memcpy((conditions_array
+ conditions_array_cursor
), &condition_size
, sizeof(condition_size
));
2309 conditions_array_cursor
+= sizeof(condition_size
);
2312 necp_get_tlv_at_offset(tlv_buffer
, tlv_buffer_length
, cursor
, condition_size
, (conditions_array
+ conditions_array_cursor
), NULL
);
2313 if (!necp_policy_condition_is_valid((conditions_array
+ conditions_array_cursor
), condition_size
, necp_policy_result_get_type_from_buffer(policy_result
, policy_result_size
))) {
2314 NECPLOG0(LOG_ERR
, "Failed to validate policy condition");
2315 response_error
= NECP_ERROR_POLICY_CONDITIONS_INVALID
;
2319 if (necp_policy_condition_is_default((conditions_array
+ conditions_array_cursor
), condition_size
)) {
2320 has_default_condition
= TRUE
;
2322 has_non_default_condition
= TRUE
;
2324 if (has_default_condition
&& has_non_default_condition
) {
2325 NECPLOG0(LOG_ERR
, "Failed to validate conditions; contained default and non-default conditions");
2326 response_error
= NECP_ERROR_POLICY_CONDITIONS_INVALID
;
2330 if (necp_policy_condition_is_application((conditions_array
+ conditions_array_cursor
), condition_size
)) {
2331 has_application_condition
= TRUE
;
2334 if (necp_policy_condition_is_real_application((conditions_array
+ conditions_array_cursor
), condition_size
)) {
2335 has_real_application_condition
= TRUE
;
2338 if (necp_policy_condition_requires_application((conditions_array
+ conditions_array_cursor
), condition_size
)) {
2339 requires_application_condition
= TRUE
;
2342 if (necp_policy_condition_requires_real_application((conditions_array
+ conditions_array_cursor
), condition_size
)) {
2343 requires_real_application_condition
= TRUE
;
2346 conditions_array_cursor
+= condition_size
;
2350 if (requires_application_condition
&& !has_application_condition
) {
2351 NECPLOG0(LOG_ERR
, "Failed to validate conditions; did not contain application condition");
2352 response_error
= NECP_ERROR_POLICY_CONDITIONS_INVALID
;
2356 if (requires_real_application_condition
&& !has_real_application_condition
) {
2357 NECPLOG0(LOG_ERR
, "Failed to validate conditions; did not contain real application condition");
2358 response_error
= NECP_ERROR_POLICY_CONDITIONS_INVALID
;
2362 if ((policy
= necp_policy_create(session
, order
, conditions_array
, conditions_array_size
, route_rules_array
, route_rules_array_size
, policy_result
, policy_result_size
)) == NULL
) {
2363 response_error
= NECP_ERROR_INTERNAL
;
2367 return policy
->local_id
;
2370 if (policy_result
!= NULL
) {
2371 FREE(policy_result
, M_NECP
);
2373 if (conditions_array
!= NULL
) {
2374 FREE(conditions_array
, M_NECP
);
2376 if (route_rules_array
!= NULL
) {
2377 FREE(route_rules_array
, M_NECP
);
2380 if (return_error
!= NULL
) {
2381 *return_error
= necp_get_posix_error_for_necp_error(response_error
);
2386 static necp_policy_id
2387 necp_policy_get_new_id(struct necp_session
*session
)
2389 session
->last_policy_id
++;
2390 if (session
->last_policy_id
< 1) {
2391 session
->last_policy_id
= 1;
2394 necp_policy_id newid
= session
->last_policy_id
;
2397 NECPLOG0(LOG_ERR
, "Allocate policy id failed.\n");
2405 * For the policy dump response this is the structure:
2407 * <NECP_PACKET_HEADER>
2409 * type : NECP_TLV_POLICY_DUMP
2414 * type : NECP_TLV_POLICY_ID
2419 * type : NECP_TLV_POLICY_ORDER
2424 * type : NECP_TLV_POLICY_RESULT_STRING
2429 * type : NECP_TLV_POLICY_OWNER
2434 * type : NECP_TLV_POLICY_CONDITION
2439 * type : NECP_POLICY_CONDITION_ALL_INTERFACES
2444 * type : NECP_POLICY_CONDITION_BOUND_INTERFACES
2454 * type : NECP_TLV_POLICY_DUMP
2459 * type : NECP_TLV_POLICY_ID
2464 * type : NECP_TLV_POLICY_ORDER
2469 * type : NECP_TLV_POLICY_RESULT_STRING
2474 * type : NECP_TLV_POLICY_OWNER
2479 * type : NECP_TLV_POLICY_CONDITION
2484 * type : NECP_POLICY_CONDITION_ALL_INTERFACES
2489 * type : NECP_POLICY_CONDITION_BOUND_INTERFACES
2501 necp_handle_policy_dump_all(user_addr_t out_buffer
, size_t out_buffer_length
)
2503 struct necp_kernel_socket_policy
*policy
= NULL
;
2505 int policy_count
= 0;
2506 u_int8_t
**tlv_buffer_pointers
= NULL
;
2507 u_int32_t
*tlv_buffer_lengths
= NULL
;
2508 u_int32_t total_tlv_len
= 0;
2509 u_int8_t
*result_buf
= NULL
;
2510 u_int8_t
*result_buf_cursor
= result_buf
;
2511 char result_string
[MAX_RESULT_STRING_LEN
];
2512 char proc_name_string
[MAXCOMLEN
+ 1];
2515 bool error_occured
= false;
2516 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
2518 #define REPORT_ERROR(error) error_occured = true; \
2519 response_error = error; \
2522 #define UNLOCK_AND_REPORT_ERROR(lock, error) lck_rw_done(lock); \
2525 errno_t cred_result
= priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES
, 0);
2526 if (cred_result
!= 0) {
2527 NECPLOG0(LOG_ERR
, "Session does not hold the necessary entitlement to get Network Extension Policy information");
2528 REPORT_ERROR(NECP_ERROR_INTERNAL
);
2532 lck_rw_lock_shared(&necp_kernel_policy_lock
);
2535 NECPLOG0(LOG_DEBUG
, "Gathering policies");
2538 policy_count
= necp_kernel_application_policies_count
;
2540 MALLOC(tlv_buffer_pointers
, u_int8_t
* *, sizeof(u_int8_t
*) * policy_count
, M_NECP
, M_NOWAIT
| M_ZERO
);
2541 if (tlv_buffer_pointers
== NULL
) {
2542 NECPLOG(LOG_DEBUG
, "Failed to allocate tlv_buffer_pointers (%u bytes)", sizeof(u_int8_t
*) * policy_count
);
2543 UNLOCK_AND_REPORT_ERROR(&necp_kernel_policy_lock
, NECP_ERROR_INTERNAL
);
2546 MALLOC(tlv_buffer_lengths
, u_int32_t
*, sizeof(u_int32_t
) * policy_count
, M_NECP
, M_NOWAIT
| M_ZERO
);
2547 if (tlv_buffer_lengths
== NULL
) {
2548 NECPLOG(LOG_DEBUG
, "Failed to allocate tlv_buffer_lengths (%u bytes)", sizeof(u_int32_t
) * policy_count
);
2549 UNLOCK_AND_REPORT_ERROR(&necp_kernel_policy_lock
, NECP_ERROR_INTERNAL
);
2552 for (policy_i
= 0; necp_kernel_socket_policies_app_layer_map
!= NULL
&& necp_kernel_socket_policies_app_layer_map
[policy_i
] != NULL
; policy_i
++) {
2553 policy
= necp_kernel_socket_policies_app_layer_map
[policy_i
];
2555 memset(result_string
, 0, MAX_RESULT_STRING_LEN
);
2556 memset(proc_name_string
, 0, MAXCOMLEN
+ 1);
2558 necp_get_result_description(result_string
, policy
->result
, policy
->result_parameter
);
2559 proc_name(policy
->session_pid
, proc_name_string
, MAXCOMLEN
);
2561 u_int16_t proc_name_len
= strlen(proc_name_string
) + 1;
2562 u_int16_t result_string_len
= strlen(result_string
) + 1;
2565 NECPLOG(LOG_DEBUG
, "Policy: process: %s, result: %s", proc_name_string
, result_string
);
2568 u_int32_t total_allocated_bytes
= sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(policy
->id
) + // NECP_TLV_POLICY_ID
2569 sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(policy
->order
) + // NECP_TLV_POLICY_ORDER
2570 sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(policy
->session_order
) + // NECP_TLV_POLICY_SESSION_ORDER
2571 sizeof(u_int8_t
) + sizeof(u_int32_t
) + result_string_len
+ // NECP_TLV_POLICY_RESULT_STRING
2572 sizeof(u_int8_t
) + sizeof(u_int32_t
) + proc_name_len
+ // NECP_TLV_POLICY_OWNER
2573 sizeof(u_int8_t
) + sizeof(u_int32_t
); // NECP_TLV_POLICY_CONDITION
2575 // We now traverse the condition_mask to see how much space we need to allocate
2576 u_int32_t condition_mask
= policy
->condition_mask
;
2577 u_int8_t num_conditions
= 0;
2578 struct necp_string_id_mapping
*account_id_entry
= NULL
;
2579 char if_name
[IFXNAMSIZ
];
2580 u_int32_t condition_tlv_length
= 0;
2581 memset(if_name
, 0, sizeof(if_name
));
2583 if (condition_mask
== NECP_POLICY_CONDITION_DEFAULT
) {
2586 if (condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) {
2589 if (condition_mask
& NECP_KERNEL_CONDITION_HAS_CLIENT
) {
2592 if (condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
2593 snprintf(if_name
, IFXNAMSIZ
, "%s%d", ifnet_name(policy
->cond_bound_interface
), ifnet_unit(policy
->cond_bound_interface
));
2594 condition_tlv_length
+= strlen(if_name
) + 1;
2597 if (condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
2598 condition_tlv_length
+= sizeof(policy
->cond_protocol
);
2601 if (condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
2602 condition_tlv_length
+= sizeof(uuid_t
);
2605 if (condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
2606 condition_tlv_length
+= sizeof(uuid_t
);
2609 if (condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
2610 u_int32_t domain_len
= strlen(policy
->cond_domain
) + 1;
2611 condition_tlv_length
+= domain_len
;
2614 if (condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
) {
2615 account_id_entry
= necp_lookup_string_with_id_locked(&necp_account_id_list
, policy
->cond_account_id
);
2616 u_int32_t account_id_len
= 0;
2617 if (account_id_entry
) {
2618 account_id_len
= account_id_entry
->string
? strlen(account_id_entry
->string
) + 1 : 0;
2620 condition_tlv_length
+= account_id_len
;
2623 if (condition_mask
& NECP_KERNEL_CONDITION_PID
) {
2624 condition_tlv_length
+= sizeof(pid_t
);
2627 if (condition_mask
& NECP_KERNEL_CONDITION_UID
) {
2628 condition_tlv_length
+= sizeof(uid_t
);
2631 if (condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
2632 condition_tlv_length
+= sizeof(struct necp_policy_condition_tc_range
);
2635 if (condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
) {
2638 if (condition_mask
& NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
) {
2639 u_int32_t entitlement_len
= strlen(policy
->cond_custom_entitlement
) + 1;
2640 condition_tlv_length
+= entitlement_len
;
2643 if (condition_mask
& NECP_KERNEL_CONDITION_PLATFORM_BINARY
) {
2646 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_NETWORKS
) {
2649 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
2650 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
2651 condition_tlv_length
+= sizeof(struct necp_policy_condition_addr_range
);
2653 condition_tlv_length
+= sizeof(struct necp_policy_condition_addr
);
2657 if (condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
2658 if (condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
2659 condition_tlv_length
+= sizeof(struct necp_policy_condition_addr_range
);
2661 condition_tlv_length
+= sizeof(struct necp_policy_condition_addr
);
2665 if (condition_mask
& NECP_KERNEL_CONDITION_AGENT_TYPE
) {
2666 condition_tlv_length
+= sizeof(struct necp_policy_condition_agent_type
);
2669 if (condition_mask
& NECP_KERNEL_CONDITION_CLIENT_FLAGS
) {
2670 condition_tlv_length
+= sizeof(u_int32_t
);
2673 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_EMPTY
) {
2676 if (condition_mask
& NECP_KERNEL_CONDITION_REMOTE_EMPTY
) {
2681 condition_tlv_length
+= num_conditions
* (sizeof(u_int8_t
) + sizeof(u_int32_t
)); // These are for the condition TLVs. The space for "value" is already accounted for above.
2682 total_allocated_bytes
+= condition_tlv_length
;
2684 u_int8_t
*tlv_buffer
;
2685 MALLOC(tlv_buffer
, u_int8_t
*, total_allocated_bytes
, M_NECP
, M_NOWAIT
| M_ZERO
);
2686 if (tlv_buffer
== NULL
) {
2687 NECPLOG(LOG_DEBUG
, "Failed to allocate tlv_buffer (%u bytes)", total_allocated_bytes
);
2691 u_int8_t
*cursor
= tlv_buffer
;
2692 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ID
, sizeof(policy
->id
), &policy
->id
, tlv_buffer
, total_allocated_bytes
);
2693 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ORDER
, sizeof(necp_policy_order
), &policy
->order
, tlv_buffer
, total_allocated_bytes
);
2694 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_SESSION_ORDER
, sizeof(policy
->session_order
), &policy
->session_order
, tlv_buffer
, total_allocated_bytes
);
2695 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_RESULT_STRING
, result_string_len
, result_string
, tlv_buffer
, total_allocated_bytes
);
2696 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_OWNER
, proc_name_len
, proc_name_string
, tlv_buffer
, total_allocated_bytes
);
2699 u_int8_t q_cond_buf
[N_QUICK
]; // Minor optimization
2701 u_int8_t
*cond_buf
; // To be used for condition TLVs
2702 if (condition_tlv_length
<= N_QUICK
) {
2703 cond_buf
= q_cond_buf
;
2705 MALLOC(cond_buf
, u_int8_t
*, condition_tlv_length
, M_NECP
, M_NOWAIT
);
2706 if (cond_buf
== NULL
) {
2707 NECPLOG(LOG_DEBUG
, "Failed to allocate cond_buffer (%u bytes)", condition_tlv_length
);
2708 FREE(tlv_buffer
, M_NECP
);
2713 memset(cond_buf
, 0, condition_tlv_length
);
2714 u_int8_t
*cond_buf_cursor
= cond_buf
;
2715 if (condition_mask
== NECP_POLICY_CONDITION_DEFAULT
) {
2716 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_DEFAULT
, 0, "", cond_buf
, condition_tlv_length
);
2718 if (condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) {
2719 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_ALL_INTERFACES
, 0, "", cond_buf
, condition_tlv_length
);
2721 if (condition_mask
& NECP_KERNEL_CONDITION_HAS_CLIENT
) {
2722 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_HAS_CLIENT
, 0, "", cond_buf
, condition_tlv_length
);
2724 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_NETWORKS
) {
2725 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_LOCAL_NETWORKS
, 0, "", cond_buf
, condition_tlv_length
);
2727 if (condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
2728 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_BOUND_INTERFACE
, strlen(if_name
) + 1,
2729 if_name
, cond_buf
, condition_tlv_length
);
2731 if (condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
2732 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_IP_PROTOCOL
, sizeof(policy
->cond_protocol
), &policy
->cond_protocol
,
2733 cond_buf
, condition_tlv_length
);
2735 if (condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
2736 struct necp_uuid_id_mapping
*entry
= necp_uuid_lookup_uuid_with_app_id_locked(policy
->cond_app_id
);
2737 if (entry
!= NULL
) {
2738 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_APPLICATION
, sizeof(entry
->uuid
), entry
->uuid
,
2739 cond_buf
, condition_tlv_length
);
2742 if (condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
2743 struct necp_uuid_id_mapping
*entry
= necp_uuid_lookup_uuid_with_app_id_locked(policy
->cond_real_app_id
);
2744 if (entry
!= NULL
) {
2745 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_REAL_APPLICATION
, sizeof(entry
->uuid
), entry
->uuid
,
2746 cond_buf
, condition_tlv_length
);
2749 if (condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
2750 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_DOMAIN
, strlen(policy
->cond_domain
) + 1, policy
->cond_domain
,
2751 cond_buf
, condition_tlv_length
);
2753 if (condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
) {
2754 if (account_id_entry
!= NULL
) {
2755 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_ACCOUNT
, strlen(account_id_entry
->string
) + 1, account_id_entry
->string
,
2756 cond_buf
, condition_tlv_length
);
2759 if (condition_mask
& NECP_KERNEL_CONDITION_PID
) {
2760 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_PID
, sizeof(policy
->cond_pid
), &policy
->cond_pid
,
2761 cond_buf
, condition_tlv_length
);
2763 if (condition_mask
& NECP_KERNEL_CONDITION_UID
) {
2764 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_UID
, sizeof(policy
->cond_uid
), &policy
->cond_uid
,
2765 cond_buf
, condition_tlv_length
);
2767 if (condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
2768 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_TRAFFIC_CLASS
, sizeof(policy
->cond_traffic_class
), &policy
->cond_traffic_class
,
2769 cond_buf
, condition_tlv_length
);
2771 if (condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
) {
2772 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_ENTITLEMENT
, 0, "",
2773 cond_buf
, condition_tlv_length
);
2775 if (condition_mask
& NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
) {
2776 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_ENTITLEMENT
, strlen(policy
->cond_custom_entitlement
) + 1, policy
->cond_custom_entitlement
,
2777 cond_buf
, condition_tlv_length
);
2779 if (condition_mask
& NECP_KERNEL_CONDITION_PLATFORM_BINARY
) {
2780 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_PLATFORM_BINARY
, 0, "", cond_buf
, condition_tlv_length
);
2782 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
2783 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
2784 struct necp_policy_condition_addr_range range
;
2785 memcpy(&range
.start_address
, &policy
->cond_local_start
, sizeof(policy
->cond_local_start
));
2786 memcpy(&range
.end_address
, &policy
->cond_local_end
, sizeof(policy
->cond_local_end
));
2787 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE
, sizeof(range
), &range
,
2788 cond_buf
, condition_tlv_length
);
2790 struct necp_policy_condition_addr addr
;
2791 addr
.prefix
= policy
->cond_local_prefix
;
2792 memcpy(&addr
.address
, &policy
->cond_local_start
, sizeof(policy
->cond_local_start
));
2793 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_LOCAL_ADDR
, sizeof(addr
), &addr
,
2794 cond_buf
, condition_tlv_length
);
2797 if (condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
2798 if (condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
2799 struct necp_policy_condition_addr_range range
;
2800 memcpy(&range
.start_address
, &policy
->cond_remote_start
, sizeof(policy
->cond_remote_start
));
2801 memcpy(&range
.end_address
, &policy
->cond_remote_end
, sizeof(policy
->cond_remote_end
));
2802 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE
, sizeof(range
), &range
,
2803 cond_buf
, condition_tlv_length
);
2805 struct necp_policy_condition_addr addr
;
2806 addr
.prefix
= policy
->cond_remote_prefix
;
2807 memcpy(&addr
.address
, &policy
->cond_remote_start
, sizeof(policy
->cond_remote_start
));
2808 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_REMOTE_ADDR
, sizeof(addr
), &addr
,
2809 cond_buf
, condition_tlv_length
);
2812 if (condition_mask
& NECP_KERNEL_CONDITION_AGENT_TYPE
) {
2813 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_AGENT_TYPE
,
2814 sizeof(policy
->cond_agent_type
), &policy
->cond_agent_type
,
2815 cond_buf
, condition_tlv_length
);
2817 if (condition_mask
& NECP_KERNEL_CONDITION_CLIENT_FLAGS
) {
2818 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_CLIENT_FLAGS
, sizeof(policy
->cond_client_flags
), &policy
->cond_client_flags
, cond_buf
, condition_tlv_length
);
2820 if (condition_mask
& NECP_KERNEL_CONDITION_LOCAL_EMPTY
) {
2821 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_FLOW_LOCAL_ADDR_EMPTY
, 0, "", cond_buf
, condition_tlv_length
);
2823 if (condition_mask
& NECP_KERNEL_CONDITION_REMOTE_EMPTY
) {
2824 cond_buf_cursor
= necp_buffer_write_tlv(cond_buf_cursor
, NECP_POLICY_CONDITION_FLOW_REMOTE_ADDR_EMPTY
, 0, "", cond_buf
, condition_tlv_length
);
2828 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_CONDITION
, cond_buf_cursor
- cond_buf
, cond_buf
, tlv_buffer
, total_allocated_bytes
);
2829 if (cond_buf
!= q_cond_buf
) {
2830 FREE(cond_buf
, M_NECP
);
2833 tlv_buffer_pointers
[policy_i
] = tlv_buffer
;
2834 tlv_buffer_lengths
[policy_i
] = (cursor
- tlv_buffer
);
2836 // This is the length of the TLV for NECP_TLV_POLICY_DUMP
2837 total_tlv_len
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + (cursor
- tlv_buffer
);
2841 lck_rw_done(&necp_kernel_policy_lock
);
2844 if (out_buffer
!= 0) {
2845 if (out_buffer_length
< total_tlv_len
+ sizeof(u_int32_t
)) {
2846 NECPLOG(LOG_DEBUG
, "out_buffer_length too small (%u < %u)", out_buffer_length
, total_tlv_len
+ sizeof(u_int32_t
));
2847 REPORT_ERROR(NECP_ERROR_INVALID_TLV
);
2850 // Allow malloc to wait, since the total buffer may be large and we are not holding any locks
2851 MALLOC(result_buf
, u_int8_t
*, total_tlv_len
+ sizeof(u_int32_t
), M_NECP
, M_WAITOK
| M_ZERO
);
2852 if (result_buf
== NULL
) {
2853 NECPLOG(LOG_DEBUG
, "Failed to allocate result_buffer (%u bytes)", total_tlv_len
+ sizeof(u_int32_t
));
2854 REPORT_ERROR(NECP_ERROR_INTERNAL
);
2857 // Add four bytes for total length at the start
2858 memcpy(result_buf
, &total_tlv_len
, sizeof(u_int32_t
));
2861 result_buf_cursor
= result_buf
+ sizeof(u_int32_t
);
2862 for (int i
= 0; i
< policy_count
; i
++) {
2863 if (tlv_buffer_pointers
[i
] != NULL
) {
2864 result_buf_cursor
= necp_buffer_write_tlv(result_buf_cursor
, NECP_TLV_POLICY_DUMP
, tlv_buffer_lengths
[i
], tlv_buffer_pointers
[i
],
2865 result_buf
, total_tlv_len
+ sizeof(u_int32_t
));
2869 int copy_error
= copyout(result_buf
, out_buffer
, total_tlv_len
+ sizeof(u_int32_t
));
2871 NECPLOG(LOG_DEBUG
, "Failed to copy out result_buffer (%u bytes)", total_tlv_len
+ sizeof(u_int32_t
));
2872 REPORT_ERROR(NECP_ERROR_INTERNAL
);
2878 if (error_occured
) {
2879 error_code
= necp_get_posix_error_for_necp_error(response_error
);
2882 if (result_buf
!= NULL
) {
2883 FREE(result_buf
, M_NECP
);
2886 if (tlv_buffer_pointers
!= NULL
) {
2887 for (int i
= 0; i
< policy_count
; i
++) {
2888 if (tlv_buffer_pointers
[i
] != NULL
) {
2889 FREE(tlv_buffer_pointers
[i
], M_NECP
);
2890 tlv_buffer_pointers
[i
] = NULL
;
2893 FREE(tlv_buffer_pointers
, M_NECP
);
2896 if (tlv_buffer_lengths
!= NULL
) {
2897 FREE(tlv_buffer_lengths
, M_NECP
);
2900 #undef RESET_COND_BUF
2902 #undef UNLOCK_AND_REPORT_ERROR
2907 static struct necp_session_policy
*
2908 necp_policy_create(struct necp_session
*session
, necp_policy_order order
, u_int8_t
*conditions_array
, u_int32_t conditions_array_size
, u_int8_t
*route_rules_array
, u_int32_t route_rules_array_size
, u_int8_t
*result
, u_int32_t result_size
)
2910 struct necp_session_policy
*new_policy
= NULL
;
2911 struct necp_session_policy
*tmp_policy
= NULL
;
2913 if (session
== NULL
|| conditions_array
== NULL
|| result
== NULL
|| result_size
== 0) {
2917 MALLOC_ZONE(new_policy
, struct necp_session_policy
*, sizeof(*new_policy
), M_NECP_SESSION_POLICY
, M_WAITOK
);
2918 if (new_policy
== NULL
) {
2922 memset(new_policy
, 0, sizeof(*new_policy
)); // M_ZERO is not supported for MALLOC_ZONE
2923 new_policy
->applied
= FALSE
;
2924 new_policy
->pending_deletion
= FALSE
;
2925 new_policy
->pending_update
= FALSE
;
2926 new_policy
->order
= order
;
2927 new_policy
->conditions
= conditions_array
;
2928 new_policy
->conditions_size
= conditions_array_size
;
2929 new_policy
->route_rules
= route_rules_array
;
2930 new_policy
->route_rules_size
= route_rules_array_size
;
2931 new_policy
->result
= result
;
2932 new_policy
->result_size
= result_size
;
2933 new_policy
->local_id
= necp_policy_get_new_id(session
);
2935 LIST_INSERT_SORTED_ASCENDING(&session
->policies
, new_policy
, chain
, order
, tmp_policy
);
2937 session
->dirty
= TRUE
;
2940 NECPLOG(LOG_DEBUG
, "Created NECP policy, order %d", order
);
2946 static struct necp_session_policy
*
2947 necp_policy_find(struct necp_session
*session
, necp_policy_id policy_id
)
2949 struct necp_session_policy
*policy
= NULL
;
2950 if (policy_id
== 0) {
2954 LIST_FOREACH(policy
, &session
->policies
, chain
) {
2955 if (policy
->local_id
== policy_id
) {
2963 static inline u_int8_t
2964 necp_policy_get_result_type(struct necp_session_policy
*policy
)
2966 return policy
? necp_policy_result_get_type_from_buffer(policy
->result
, policy
->result_size
) : 0;
2969 static inline u_int32_t
2970 necp_policy_get_result_parameter_length(struct necp_session_policy
*policy
)
2972 return policy
? necp_policy_result_get_parameter_length_from_buffer(policy
->result
, policy
->result_size
) : 0;
2976 necp_policy_get_result_parameter(struct necp_session_policy
*policy
, u_int8_t
*parameter_buffer
, u_int32_t parameter_buffer_length
)
2979 u_int32_t parameter_length
= necp_policy_result_get_parameter_length_from_buffer(policy
->result
, policy
->result_size
);
2980 if (parameter_buffer_length
>= parameter_length
) {
2981 u_int8_t
*parameter
= necp_policy_result_get_parameter_pointer_from_buffer(policy
->result
, policy
->result_size
);
2982 if (parameter
&& parameter_buffer
) {
2983 memcpy(parameter_buffer
, parameter
, parameter_length
);
2993 necp_policy_mark_for_deletion(struct necp_session
*session
, struct necp_session_policy
*policy
)
2995 if (session
== NULL
|| policy
== NULL
) {
2999 policy
->pending_deletion
= TRUE
;
3000 session
->dirty
= TRUE
;
3003 NECPLOG0(LOG_DEBUG
, "Marked NECP policy for removal");
3009 necp_policy_mark_all_for_deletion(struct necp_session
*session
)
3011 struct necp_session_policy
*policy
= NULL
;
3012 struct necp_session_policy
*temp_policy
= NULL
;
3014 LIST_FOREACH_SAFE(policy
, &session
->policies
, chain
, temp_policy
) {
3015 necp_policy_mark_for_deletion(session
, policy
);
3022 necp_policy_delete(struct necp_session
*session
, struct necp_session_policy
*policy
)
3024 if (session
== NULL
|| policy
== NULL
) {
3028 LIST_REMOVE(policy
, chain
);
3030 if (policy
->result
) {
3031 FREE(policy
->result
, M_NECP
);
3032 policy
->result
= NULL
;
3035 if (policy
->conditions
) {
3036 FREE(policy
->conditions
, M_NECP
);
3037 policy
->conditions
= NULL
;
3040 if (policy
->route_rules
) {
3041 FREE(policy
->route_rules
, M_NECP
);
3042 policy
->route_rules
= NULL
;
3045 FREE_ZONE(policy
, sizeof(*policy
), M_NECP_SESSION_POLICY
);
3048 NECPLOG0(LOG_DEBUG
, "Removed NECP policy");
3054 necp_policy_unapply(struct necp_session_policy
*policy
)
3057 if (policy
== NULL
) {
3061 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
3063 // Release local uuid mappings
3064 if (!uuid_is_null(policy
->applied_app_uuid
)) {
3065 bool removed_mapping
= FALSE
;
3066 if (necp_remove_uuid_app_id_mapping(policy
->applied_app_uuid
, &removed_mapping
, TRUE
) && removed_mapping
) {
3067 necp_uuid_app_id_mappings_dirty
= TRUE
;
3068 necp_num_uuid_app_id_mappings
--;
3070 uuid_clear(policy
->applied_app_uuid
);
3072 if (!uuid_is_null(policy
->applied_real_app_uuid
)) {
3073 necp_remove_uuid_app_id_mapping(policy
->applied_real_app_uuid
, NULL
, FALSE
);
3074 uuid_clear(policy
->applied_real_app_uuid
);
3076 if (!uuid_is_null(policy
->applied_result_uuid
)) {
3077 necp_remove_uuid_service_id_mapping(policy
->applied_result_uuid
);
3078 uuid_clear(policy
->applied_result_uuid
);
3081 // Release string mappings
3082 if (policy
->applied_account
!= NULL
) {
3083 necp_remove_string_to_id_mapping(&necp_account_id_list
, policy
->applied_account
);
3084 FREE(policy
->applied_account
, M_NECP
);
3085 policy
->applied_account
= NULL
;
3088 // Release route rule
3089 if (policy
->applied_route_rules_id
!= 0) {
3090 necp_remove_route_rule(&necp_route_rules
, policy
->applied_route_rules_id
);
3091 policy
->applied_route_rules_id
= 0;
3094 // Remove socket policies
3095 for (i
= 0; i
< MAX_KERNEL_SOCKET_POLICIES
; i
++) {
3096 if (policy
->kernel_socket_policies
[i
] != 0) {
3097 necp_kernel_socket_policy_delete(policy
->kernel_socket_policies
[i
]);
3098 policy
->kernel_socket_policies
[i
] = 0;
3102 // Remove IP output policies
3103 for (i
= 0; i
< MAX_KERNEL_IP_OUTPUT_POLICIES
; i
++) {
3104 if (policy
->kernel_ip_output_policies
[i
] != 0) {
3105 necp_kernel_ip_output_policy_delete(policy
->kernel_ip_output_policies
[i
]);
3106 policy
->kernel_ip_output_policies
[i
] = 0;
3110 policy
->applied
= FALSE
;
3115 #define NECP_KERNEL_POLICY_SUBORDER_ID_TUNNEL_CONDITION 0
3116 #define NECP_KERNEL_POLICY_SUBORDER_NON_ID_TUNNEL_CONDITION 1
3117 #define NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION 2
3118 #define NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS 3
3119 struct necp_policy_result_ip_tunnel
{
3120 u_int32_t secondary_result
;
3121 char interface_name
[IFXNAMSIZ
];
3122 } __attribute__((__packed__
));
3124 struct necp_policy_result_service
{
3127 } __attribute__((__packed__
));
3130 necp_policy_apply(struct necp_session
*session
, struct necp_session_policy
*policy
)
3132 bool socket_only_conditions
= FALSE
;
3133 bool socket_ip_conditions
= FALSE
;
3135 bool socket_layer_non_id_conditions
= FALSE
;
3136 bool ip_output_layer_non_id_conditions
= FALSE
;
3137 bool ip_output_layer_non_id_only
= FALSE
;
3138 bool ip_output_layer_id_condition
= FALSE
;
3139 bool ip_output_layer_tunnel_condition_from_id
= FALSE
;
3140 bool ip_output_layer_tunnel_condition_from_non_id
= FALSE
;
3141 necp_kernel_policy_id cond_ip_output_layer_id
= NECP_KERNEL_POLICY_ID_NONE
;
3143 u_int32_t master_condition_mask
= 0;
3144 u_int32_t master_condition_negated_mask
= 0;
3145 ifnet_t cond_bound_interface
= NULL
;
3146 u_int32_t cond_account_id
= 0;
3147 char *cond_domain
= NULL
;
3148 char *cond_custom_entitlement
= NULL
;
3151 necp_app_id cond_app_id
= 0;
3152 necp_app_id cond_real_app_id
= 0;
3153 struct necp_policy_condition_tc_range cond_traffic_class
;
3154 cond_traffic_class
.start_tc
= 0;
3155 cond_traffic_class
.end_tc
= 0;
3156 u_int16_t cond_protocol
= 0;
3157 union necp_sockaddr_union cond_local_start
;
3158 union necp_sockaddr_union cond_local_end
;
3159 u_int8_t cond_local_prefix
= 0;
3160 union necp_sockaddr_union cond_remote_start
;
3161 union necp_sockaddr_union cond_remote_end
;
3162 u_int8_t cond_remote_prefix
= 0;
3163 u_int32_t cond_client_flags
= 0;
3164 u_int32_t offset
= 0;
3165 u_int8_t ultimate_result
= 0;
3166 u_int32_t secondary_result
= 0;
3167 struct necp_policy_condition_agent_type cond_agent_type
= {};
3168 necp_kernel_policy_result_parameter secondary_result_parameter
;
3169 memset(&secondary_result_parameter
, 0, sizeof(secondary_result_parameter
));
3170 u_int32_t cond_last_interface_index
= 0;
3171 necp_kernel_policy_result_parameter ultimate_result_parameter
;
3172 memset(&ultimate_result_parameter
, 0, sizeof(ultimate_result_parameter
));
3174 if (policy
== NULL
) {
3178 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
3180 // Process conditions
3181 while (offset
< policy
->conditions_size
) {
3182 u_int32_t length
= 0;
3183 u_int8_t
*value
= necp_buffer_get_tlv_value(policy
->conditions
, offset
, &length
);
3185 u_int8_t condition_type
= necp_policy_condition_get_type_from_buffer(value
, length
);
3186 u_int8_t condition_flags
= necp_policy_condition_get_flags_from_buffer(value
, length
);
3187 bool condition_is_negative
= condition_flags
& NECP_POLICY_CONDITION_FLAGS_NEGATIVE
;
3188 u_int32_t condition_length
= necp_policy_condition_get_value_length_from_buffer(value
, length
);
3189 u_int8_t
*condition_value
= necp_policy_condition_get_value_pointer_from_buffer(value
, length
);
3190 switch (condition_type
) {
3191 case NECP_POLICY_CONDITION_DEFAULT
: {
3192 socket_ip_conditions
= TRUE
;
3195 case NECP_POLICY_CONDITION_ALL_INTERFACES
: {
3196 master_condition_mask
|= NECP_KERNEL_CONDITION_ALL_INTERFACES
;
3197 socket_ip_conditions
= TRUE
;
3200 case NECP_POLICY_CONDITION_HAS_CLIENT
: {
3201 master_condition_mask
|= NECP_KERNEL_CONDITION_HAS_CLIENT
;
3202 socket_only_conditions
= TRUE
;
3205 case NECP_POLICY_CONDITION_ENTITLEMENT
: {
3206 if (condition_length
> 0) {
3207 if (cond_custom_entitlement
== NULL
) {
3208 cond_custom_entitlement
= necp_copy_string((char *)condition_value
, condition_length
);
3209 if (cond_custom_entitlement
!= NULL
) {
3210 master_condition_mask
|= NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
;
3211 socket_only_conditions
= TRUE
;
3215 master_condition_mask
|= NECP_KERNEL_CONDITION_ENTITLEMENT
;
3216 socket_only_conditions
= TRUE
;
3220 case NECP_POLICY_CONDITION_PLATFORM_BINARY
: {
3221 master_condition_mask
|= NECP_KERNEL_CONDITION_PLATFORM_BINARY
;
3222 socket_only_conditions
= TRUE
;
3225 case NECP_POLICY_CONDITION_DOMAIN
: {
3226 // Make sure there is only one such rule
3227 if (condition_length
> 0 && cond_domain
== NULL
) {
3228 cond_domain
= necp_create_trimmed_domain((char *)condition_value
, condition_length
);
3229 if (cond_domain
!= NULL
) {
3230 master_condition_mask
|= NECP_KERNEL_CONDITION_DOMAIN
;
3231 if (condition_is_negative
) {
3232 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_DOMAIN
;
3234 socket_only_conditions
= TRUE
;
3239 case NECP_POLICY_CONDITION_ACCOUNT
: {
3240 // Make sure there is only one such rule
3241 if (condition_length
> 0 && cond_account_id
== 0 && policy
->applied_account
== NULL
) {
3242 char *string
= NULL
;
3243 MALLOC(string
, char *, condition_length
+ 1, M_NECP
, M_WAITOK
);
3244 if (string
!= NULL
) {
3245 memcpy(string
, condition_value
, condition_length
);
3246 string
[condition_length
] = 0;
3247 cond_account_id
= necp_create_string_to_id_mapping(&necp_account_id_list
, string
);
3248 if (cond_account_id
!= 0) {
3249 policy
->applied_account
= string
; // Save the string in parent policy
3250 master_condition_mask
|= NECP_KERNEL_CONDITION_ACCOUNT_ID
;
3251 if (condition_is_negative
) {
3252 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_ACCOUNT_ID
;
3254 socket_only_conditions
= TRUE
;
3256 FREE(string
, M_NECP
);
3262 case NECP_POLICY_CONDITION_APPLICATION
: {
3263 // Make sure there is only one such rule, because we save the uuid in the policy
3264 if (condition_length
>= sizeof(uuid_t
) && cond_app_id
== 0) {
3265 bool allocated_mapping
= FALSE
;
3266 uuid_t application_uuid
;
3267 memcpy(application_uuid
, condition_value
, sizeof(uuid_t
));
3268 cond_app_id
= necp_create_uuid_app_id_mapping(application_uuid
, &allocated_mapping
, TRUE
);
3269 if (cond_app_id
!= 0) {
3270 if (allocated_mapping
) {
3271 necp_uuid_app_id_mappings_dirty
= TRUE
;
3272 necp_num_uuid_app_id_mappings
++;
3274 uuid_copy(policy
->applied_app_uuid
, application_uuid
);
3275 master_condition_mask
|= NECP_KERNEL_CONDITION_APP_ID
;
3276 if (condition_is_negative
) {
3277 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_APP_ID
;
3279 socket_only_conditions
= TRUE
;
3284 case NECP_POLICY_CONDITION_REAL_APPLICATION
: {
3285 // Make sure there is only one such rule, because we save the uuid in the policy
3286 if (condition_length
>= sizeof(uuid_t
) && cond_real_app_id
== 0) {
3287 uuid_t real_application_uuid
;
3288 memcpy(real_application_uuid
, condition_value
, sizeof(uuid_t
));
3289 cond_real_app_id
= necp_create_uuid_app_id_mapping(real_application_uuid
, NULL
, FALSE
);
3290 if (cond_real_app_id
!= 0) {
3291 uuid_copy(policy
->applied_real_app_uuid
, real_application_uuid
);
3292 master_condition_mask
|= NECP_KERNEL_CONDITION_REAL_APP_ID
;
3293 if (condition_is_negative
) {
3294 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REAL_APP_ID
;
3296 socket_only_conditions
= TRUE
;
3301 case NECP_POLICY_CONDITION_PID
: {
3302 if (condition_length
>= sizeof(pid_t
)) {
3303 master_condition_mask
|= NECP_KERNEL_CONDITION_PID
;
3304 if (condition_is_negative
) {
3305 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_PID
;
3307 memcpy(&cond_pid
, condition_value
, sizeof(cond_pid
));
3308 socket_only_conditions
= TRUE
;
3312 case NECP_POLICY_CONDITION_UID
: {
3313 if (condition_length
>= sizeof(uid_t
)) {
3314 master_condition_mask
|= NECP_KERNEL_CONDITION_UID
;
3315 if (condition_is_negative
) {
3316 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_UID
;
3318 memcpy(&cond_uid
, condition_value
, sizeof(cond_uid
));
3319 socket_only_conditions
= TRUE
;
3323 case NECP_POLICY_CONDITION_TRAFFIC_CLASS
: {
3324 if (condition_length
>= sizeof(struct necp_policy_condition_tc_range
)) {
3325 master_condition_mask
|= NECP_KERNEL_CONDITION_TRAFFIC_CLASS
;
3326 if (condition_is_negative
) {
3327 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_TRAFFIC_CLASS
;
3329 memcpy(&cond_traffic_class
, condition_value
, sizeof(cond_traffic_class
));
3330 socket_only_conditions
= TRUE
;
3334 case NECP_POLICY_CONDITION_BOUND_INTERFACE
: {
3335 if (condition_length
<= IFXNAMSIZ
&& condition_length
> 0) {
3336 char interface_name
[IFXNAMSIZ
];
3337 memcpy(interface_name
, condition_value
, condition_length
);
3338 interface_name
[condition_length
- 1] = 0; // Make sure the string is NULL terminated
3339 if (ifnet_find_by_name(interface_name
, &cond_bound_interface
) == 0) {
3340 master_condition_mask
|= NECP_KERNEL_CONDITION_BOUND_INTERFACE
;
3341 if (condition_is_negative
) {
3342 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_BOUND_INTERFACE
;
3345 socket_ip_conditions
= TRUE
;
3349 case NECP_POLICY_CONDITION_IP_PROTOCOL
:
3350 case NECP_POLICY_CONDITION_FLOW_IP_PROTOCOL
: {
3351 if (condition_length
>= sizeof(u_int16_t
)) {
3352 master_condition_mask
|= NECP_KERNEL_CONDITION_PROTOCOL
;
3353 if (condition_is_negative
) {
3354 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_PROTOCOL
;
3356 memcpy(&cond_protocol
, condition_value
, sizeof(cond_protocol
));
3357 if (condition_type
== NECP_POLICY_CONDITION_FLOW_IP_PROTOCOL
) {
3358 socket_only_conditions
= TRUE
;
3360 socket_ip_conditions
= TRUE
;
3365 case NECP_POLICY_CONDITION_LOCAL_NETWORKS
: {
3366 master_condition_mask
|= NECP_KERNEL_CONDITION_LOCAL_NETWORKS
;
3367 socket_ip_conditions
= TRUE
;
3370 case NECP_POLICY_CONDITION_LOCAL_ADDR
:
3371 case NECP_POLICY_CONDITION_FLOW_LOCAL_ADDR
: {
3372 struct necp_policy_condition_addr
*address_struct
= (struct necp_policy_condition_addr
*)(void *)condition_value
;
3373 if (!necp_address_is_valid(&address_struct
->address
.sa
)) {
3377 cond_local_prefix
= address_struct
->prefix
;
3378 memcpy(&cond_local_start
, &address_struct
->address
, sizeof(address_struct
->address
));
3379 master_condition_mask
|= NECP_KERNEL_CONDITION_LOCAL_START
;
3380 master_condition_mask
|= NECP_KERNEL_CONDITION_LOCAL_PREFIX
;
3381 if (condition_is_negative
) {
3382 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_LOCAL_START
;
3383 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_LOCAL_PREFIX
;
3385 if (condition_type
== NECP_POLICY_CONDITION_FLOW_LOCAL_ADDR
) {
3386 socket_only_conditions
= TRUE
;
3388 socket_ip_conditions
= TRUE
;
3392 case NECP_POLICY_CONDITION_REMOTE_ADDR
:
3393 case NECP_POLICY_CONDITION_FLOW_REMOTE_ADDR
: {
3394 struct necp_policy_condition_addr
*address_struct
= (struct necp_policy_condition_addr
*)(void *)condition_value
;
3395 if (!necp_address_is_valid(&address_struct
->address
.sa
)) {
3399 cond_remote_prefix
= address_struct
->prefix
;
3400 memcpy(&cond_remote_start
, &address_struct
->address
, sizeof(address_struct
->address
));
3401 master_condition_mask
|= NECP_KERNEL_CONDITION_REMOTE_START
;
3402 master_condition_mask
|= NECP_KERNEL_CONDITION_REMOTE_PREFIX
;
3403 if (condition_is_negative
) {
3404 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REMOTE_START
;
3405 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REMOTE_PREFIX
;
3407 if (condition_type
== NECP_POLICY_CONDITION_FLOW_REMOTE_ADDR
) {
3408 socket_only_conditions
= TRUE
;
3410 socket_ip_conditions
= TRUE
;
3414 case NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE
:
3415 case NECP_POLICY_CONDITION_FLOW_LOCAL_ADDR_RANGE
: {
3416 struct necp_policy_condition_addr_range
*address_struct
= (struct necp_policy_condition_addr_range
*)(void *)condition_value
;
3417 if (!necp_address_is_valid(&address_struct
->start_address
.sa
) ||
3418 !necp_address_is_valid(&address_struct
->end_address
.sa
)) {
3422 memcpy(&cond_local_start
, &address_struct
->start_address
, sizeof(address_struct
->start_address
));
3423 memcpy(&cond_local_end
, &address_struct
->end_address
, sizeof(address_struct
->end_address
));
3424 master_condition_mask
|= NECP_KERNEL_CONDITION_LOCAL_START
;
3425 master_condition_mask
|= NECP_KERNEL_CONDITION_LOCAL_END
;
3426 if (condition_is_negative
) {
3427 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_LOCAL_START
;
3428 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_LOCAL_END
;
3430 if (condition_type
== NECP_POLICY_CONDITION_FLOW_LOCAL_ADDR_RANGE
) {
3431 socket_only_conditions
= TRUE
;
3433 socket_ip_conditions
= TRUE
;
3437 case NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE
:
3438 case NECP_POLICY_CONDITION_FLOW_REMOTE_ADDR_RANGE
: {
3439 struct necp_policy_condition_addr_range
*address_struct
= (struct necp_policy_condition_addr_range
*)(void *)condition_value
;
3440 if (!necp_address_is_valid(&address_struct
->start_address
.sa
) ||
3441 !necp_address_is_valid(&address_struct
->end_address
.sa
)) {
3445 memcpy(&cond_remote_start
, &address_struct
->start_address
, sizeof(address_struct
->start_address
));
3446 memcpy(&cond_remote_end
, &address_struct
->end_address
, sizeof(address_struct
->end_address
));
3447 master_condition_mask
|= NECP_KERNEL_CONDITION_REMOTE_START
;
3448 master_condition_mask
|= NECP_KERNEL_CONDITION_REMOTE_END
;
3449 if (condition_is_negative
) {
3450 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REMOTE_START
;
3451 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REMOTE_END
;
3453 if (condition_type
== NECP_POLICY_CONDITION_FLOW_REMOTE_ADDR_RANGE
) {
3454 socket_only_conditions
= TRUE
;
3456 socket_ip_conditions
= TRUE
;
3460 case NECP_POLICY_CONDITION_AGENT_TYPE
: {
3461 if (condition_length
>= sizeof(cond_agent_type
)) {
3462 master_condition_mask
|= NECP_KERNEL_CONDITION_AGENT_TYPE
;
3463 memcpy(&cond_agent_type
, condition_value
, sizeof(cond_agent_type
));
3464 socket_only_conditions
= TRUE
;
3468 case NECP_POLICY_CONDITION_CLIENT_FLAGS
: {
3469 if (condition_is_negative
) {
3470 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_CLIENT_FLAGS
;
3472 master_condition_mask
|= NECP_KERNEL_CONDITION_CLIENT_FLAGS
;
3473 socket_only_conditions
= TRUE
;
3474 if (condition_length
>= sizeof(u_int32_t
)) {
3475 memcpy(&cond_client_flags
, condition_value
, sizeof(cond_client_flags
));
3477 // Empty means match on fallback traffic
3478 cond_client_flags
= NECP_CLIENT_PARAMETER_FLAG_FALLBACK_TRAFFIC
;
3482 case NECP_POLICY_CONDITION_FLOW_LOCAL_ADDR_EMPTY
: {
3483 master_condition_mask
|= NECP_KERNEL_CONDITION_LOCAL_EMPTY
;
3484 if (condition_is_negative
) {
3485 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_LOCAL_EMPTY
;
3487 socket_only_conditions
= TRUE
;
3490 case NECP_POLICY_CONDITION_FLOW_REMOTE_ADDR_EMPTY
: {
3491 master_condition_mask
|= NECP_KERNEL_CONDITION_REMOTE_EMPTY
;
3492 if (condition_is_negative
) {
3493 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REMOTE_EMPTY
;
3495 socket_only_conditions
= TRUE
;
3503 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
3507 ultimate_result
= necp_policy_get_result_type(policy
);
3508 switch (ultimate_result
) {
3509 case NECP_POLICY_RESULT_PASS
: {
3510 if (socket_only_conditions
) { // socket_ip_conditions can be TRUE or FALSE
3511 socket_layer_non_id_conditions
= TRUE
;
3512 ip_output_layer_id_condition
= TRUE
;
3513 } else if (socket_ip_conditions
) {
3514 socket_layer_non_id_conditions
= TRUE
;
3515 ip_output_layer_id_condition
= TRUE
;
3516 ip_output_layer_non_id_conditions
= TRUE
;
3520 case NECP_POLICY_RESULT_DROP
: {
3521 if (socket_only_conditions
) { // socket_ip_conditions can be TRUE or FALSE
3522 socket_layer_non_id_conditions
= TRUE
;
3523 } else if (socket_ip_conditions
) {
3524 socket_layer_non_id_conditions
= TRUE
;
3525 ip_output_layer_non_id_conditions
= TRUE
;
3526 ip_output_layer_non_id_only
= TRUE
; // Only apply drop to packets that didn't go through socket layer
3530 case NECP_POLICY_RESULT_SKIP
: {
3531 u_int32_t skip_policy_order
= 0;
3532 if (necp_policy_get_result_parameter(policy
, (u_int8_t
*)&skip_policy_order
, sizeof(skip_policy_order
))) {
3533 ultimate_result_parameter
.skip_policy_order
= skip_policy_order
;
3536 if (socket_only_conditions
) { // socket_ip_conditions can be TRUE or FALSE
3537 socket_layer_non_id_conditions
= TRUE
;
3538 ip_output_layer_id_condition
= TRUE
;
3539 } else if (socket_ip_conditions
) {
3540 socket_layer_non_id_conditions
= TRUE
;
3541 ip_output_layer_non_id_conditions
= TRUE
;
3545 case NECP_POLICY_RESULT_SOCKET_DIVERT
:
3546 case NECP_POLICY_RESULT_SOCKET_FILTER
: {
3547 u_int32_t control_unit
= 0;
3548 if (necp_policy_get_result_parameter(policy
, (u_int8_t
*)&control_unit
, sizeof(control_unit
))) {
3549 ultimate_result_parameter
.flow_divert_control_unit
= control_unit
;
3551 socket_layer_non_id_conditions
= TRUE
;
3554 case NECP_POLICY_RESULT_IP_TUNNEL
: {
3555 struct necp_policy_result_ip_tunnel tunnel_parameters
;
3556 u_int32_t tunnel_parameters_length
= necp_policy_get_result_parameter_length(policy
);
3557 if (tunnel_parameters_length
> sizeof(u_int32_t
) &&
3558 tunnel_parameters_length
<= sizeof(struct necp_policy_result_ip_tunnel
) &&
3559 necp_policy_get_result_parameter(policy
, (u_int8_t
*)&tunnel_parameters
, sizeof(tunnel_parameters
))) {
3560 ifnet_t tunnel_interface
= NULL
;
3561 tunnel_parameters
.interface_name
[tunnel_parameters_length
- sizeof(u_int32_t
) - 1] = 0; // Make sure the string is NULL terminated
3562 if (ifnet_find_by_name(tunnel_parameters
.interface_name
, &tunnel_interface
) == 0) {
3563 ultimate_result_parameter
.tunnel_interface_index
= tunnel_interface
->if_index
;
3564 ifnet_release(tunnel_interface
);
3567 secondary_result
= tunnel_parameters
.secondary_result
;
3568 if (secondary_result
) {
3569 cond_last_interface_index
= ultimate_result_parameter
.tunnel_interface_index
;
3573 if (socket_only_conditions
) { // socket_ip_conditions can be TRUE or FALSE
3574 socket_layer_non_id_conditions
= TRUE
;
3575 ip_output_layer_id_condition
= TRUE
;
3576 if (secondary_result
) {
3577 ip_output_layer_tunnel_condition_from_id
= TRUE
;
3579 } else if (socket_ip_conditions
) {
3580 socket_layer_non_id_conditions
= TRUE
;
3581 ip_output_layer_id_condition
= TRUE
;
3582 ip_output_layer_non_id_conditions
= TRUE
;
3583 if (secondary_result
) {
3584 ip_output_layer_tunnel_condition_from_id
= TRUE
;
3585 ip_output_layer_tunnel_condition_from_non_id
= TRUE
;
3590 case NECP_POLICY_RESULT_TRIGGER
:
3591 case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED
:
3592 case NECP_POLICY_RESULT_TRIGGER_SCOPED
:
3593 case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED
: {
3594 struct necp_policy_result_service service_parameters
;
3595 u_int32_t service_result_length
= necp_policy_get_result_parameter_length(policy
);
3596 bool has_extra_service_data
= FALSE
;
3597 if (service_result_length
>= (sizeof(service_parameters
))) {
3598 has_extra_service_data
= TRUE
;
3600 if (necp_policy_get_result_parameter(policy
, (u_int8_t
*)&service_parameters
, sizeof(service_parameters
))) {
3601 ultimate_result_parameter
.service
.identifier
= necp_create_uuid_service_id_mapping(service_parameters
.identifier
);
3602 if (ultimate_result_parameter
.service
.identifier
!= 0) {
3603 uuid_copy(policy
->applied_result_uuid
, service_parameters
.identifier
);
3604 socket_layer_non_id_conditions
= TRUE
;
3605 if (has_extra_service_data
) {
3606 ultimate_result_parameter
.service
.data
= service_parameters
.data
;
3608 ultimate_result_parameter
.service
.data
= 0;
3614 case NECP_POLICY_RESULT_USE_NETAGENT
:
3615 case NECP_POLICY_RESULT_NETAGENT_SCOPED
: {
3616 uuid_t netagent_uuid
;
3617 if (necp_policy_get_result_parameter(policy
, (u_int8_t
*)&netagent_uuid
, sizeof(netagent_uuid
))) {
3618 ultimate_result_parameter
.netagent_id
= necp_create_uuid_service_id_mapping(netagent_uuid
);
3619 if (ultimate_result_parameter
.netagent_id
!= 0) {
3620 uuid_copy(policy
->applied_result_uuid
, netagent_uuid
);
3621 socket_layer_non_id_conditions
= TRUE
;
3626 case NECP_POLICY_RESULT_SOCKET_SCOPED
: {
3627 u_int32_t interface_name_length
= necp_policy_get_result_parameter_length(policy
);
3628 if (interface_name_length
<= IFXNAMSIZ
&& interface_name_length
> 0) {
3629 char interface_name
[IFXNAMSIZ
];
3630 ifnet_t scope_interface
= NULL
;
3631 necp_policy_get_result_parameter(policy
, (u_int8_t
*)interface_name
, interface_name_length
);
3632 interface_name
[interface_name_length
- 1] = 0; // Make sure the string is NULL terminated
3633 if (ifnet_find_by_name(interface_name
, &scope_interface
) == 0) {
3634 ultimate_result_parameter
.scoped_interface_index
= scope_interface
->if_index
;
3635 socket_layer_non_id_conditions
= TRUE
;
3636 ifnet_release(scope_interface
);
3641 case NECP_POLICY_RESULT_SCOPED_DIRECT
: {
3642 socket_layer_non_id_conditions
= TRUE
;
3645 case NECP_POLICY_RESULT_ALLOW_UNENTITLED
: {
3646 socket_layer_non_id_conditions
= TRUE
;
3649 case NECP_POLICY_RESULT_ROUTE_RULES
: {
3650 if (policy
->route_rules
!= NULL
&& policy
->route_rules_size
> 0) {
3651 u_int32_t route_rule_id
= necp_create_route_rule(&necp_route_rules
, policy
->route_rules
, policy
->route_rules_size
);
3652 if (route_rule_id
> 0) {
3653 policy
->applied_route_rules_id
= route_rule_id
;
3654 ultimate_result_parameter
.route_rule_id
= route_rule_id
;
3655 if (socket_only_conditions
) { // socket_ip_conditions can be TRUE or FALSE
3656 socket_layer_non_id_conditions
= TRUE
;
3657 } else if (socket_ip_conditions
) {
3658 socket_layer_non_id_conditions
= TRUE
;
3659 ip_output_layer_non_id_conditions
= TRUE
;
3660 ip_output_layer_non_id_only
= TRUE
; // Only apply route rules to packets that didn't go through socket layer
3671 if (socket_layer_non_id_conditions
) {
3672 necp_kernel_policy_id policy_id
= necp_kernel_socket_policy_add(policy
->order
, session
->session_order
, session
->proc_pid
, master_condition_mask
, master_condition_negated_mask
, cond_app_id
, cond_real_app_id
, cond_custom_entitlement
, cond_account_id
, cond_domain
, cond_pid
, cond_uid
, cond_bound_interface
, cond_traffic_class
, cond_protocol
, &cond_local_start
, &cond_local_end
, cond_local_prefix
, &cond_remote_start
, &cond_remote_end
, cond_remote_prefix
, &cond_agent_type
, cond_client_flags
, ultimate_result
, ultimate_result_parameter
);
3674 if (policy_id
== 0) {
3675 NECPLOG0(LOG_DEBUG
, "Error applying socket kernel policy");
3679 cond_ip_output_layer_id
= policy_id
;
3680 policy
->kernel_socket_policies
[0] = policy_id
;
3683 if (ip_output_layer_non_id_conditions
) {
3684 u_int32_t condition_mask
= master_condition_mask
;
3685 if (ip_output_layer_non_id_only
) {
3686 condition_mask
|= NECP_KERNEL_CONDITION_POLICY_ID
;
3689 necp_kernel_policy_id policy_id
= necp_kernel_ip_output_policy_add(policy
->order
, NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS
, session
->session_order
, session
->proc_pid
, condition_mask
, master_condition_negated_mask
, NECP_KERNEL_POLICY_ID_NONE
, cond_bound_interface
, 0, cond_protocol
, &cond_local_start
, &cond_local_end
, cond_local_prefix
, &cond_remote_start
, &cond_remote_end
, cond_remote_prefix
, ultimate_result
, ultimate_result_parameter
);
3691 if (policy_id
== 0) {
3692 NECPLOG0(LOG_DEBUG
, "Error applying IP output kernel policy");
3696 policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS
] = policy_id
;
3699 if (ip_output_layer_id_condition
) {
3700 necp_kernel_policy_id policy_id
= necp_kernel_ip_output_policy_add(policy
->order
, NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION
, session
->session_order
, session
->proc_pid
, NECP_KERNEL_CONDITION_POLICY_ID
| NECP_KERNEL_CONDITION_ALL_INTERFACES
, 0, cond_ip_output_layer_id
, NULL
, 0, 0, NULL
, NULL
, 0, NULL
, NULL
, 0, ultimate_result
, ultimate_result_parameter
);
3702 if (policy_id
== 0) {
3703 NECPLOG0(LOG_DEBUG
, "Error applying IP output kernel policy");
3707 policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION
] = policy_id
;
3710 // Extra policies for IP Output tunnels for when packets loop back
3711 if (ip_output_layer_tunnel_condition_from_id
) {
3712 necp_kernel_policy_id policy_id
= necp_kernel_ip_output_policy_add(policy
->order
, NECP_KERNEL_POLICY_SUBORDER_NON_ID_TUNNEL_CONDITION
, session
->session_order
, session
->proc_pid
, NECP_KERNEL_CONDITION_POLICY_ID
| NECP_KERNEL_CONDITION_LAST_INTERFACE
| NECP_KERNEL_CONDITION_ALL_INTERFACES
, 0, policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS
], NULL
, cond_last_interface_index
, 0, NULL
, NULL
, 0, NULL
, NULL
, 0, secondary_result
, secondary_result_parameter
);
3714 if (policy_id
== 0) {
3715 NECPLOG0(LOG_DEBUG
, "Error applying IP output kernel policy");
3719 policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_NON_ID_TUNNEL_CONDITION
] = policy_id
;
3722 if (ip_output_layer_tunnel_condition_from_id
) {
3723 necp_kernel_policy_id policy_id
= necp_kernel_ip_output_policy_add(policy
->order
, NECP_KERNEL_POLICY_SUBORDER_ID_TUNNEL_CONDITION
, session
->session_order
, session
->proc_pid
, NECP_KERNEL_CONDITION_POLICY_ID
| NECP_KERNEL_CONDITION_LAST_INTERFACE
| NECP_KERNEL_CONDITION_ALL_INTERFACES
, 0, policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION
], NULL
, cond_last_interface_index
, 0, NULL
, NULL
, 0, NULL
, NULL
, 0, secondary_result
, secondary_result_parameter
);
3725 if (policy_id
== 0) {
3726 NECPLOG0(LOG_DEBUG
, "Error applying IP output kernel policy");
3730 policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_ID_TUNNEL_CONDITION
] = policy_id
;
3733 policy
->applied
= TRUE
;
3734 policy
->pending_update
= FALSE
;
3742 necp_policy_apply_all(struct necp_session
*session
)
3744 struct necp_session_policy
*policy
= NULL
;
3745 struct necp_session_policy
*temp_policy
= NULL
;
3746 struct kev_necp_policies_changed_data kev_data
;
3747 kev_data
.changed_count
= 0;
3749 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
3751 // Remove exisiting applied policies
3752 if (session
->dirty
) {
3753 LIST_FOREACH_SAFE(policy
, &session
->policies
, chain
, temp_policy
) {
3754 if (policy
->pending_deletion
) {
3755 if (policy
->applied
) {
3756 necp_policy_unapply(policy
);
3758 // Delete the policy
3759 necp_policy_delete(session
, policy
);
3760 } else if (!policy
->applied
) {
3761 necp_policy_apply(session
, policy
);
3762 } else if (policy
->pending_update
) {
3763 // Must have been applied, but needs an update. Remove and re-add.
3764 necp_policy_unapply(policy
);
3765 necp_policy_apply(session
, policy
);
3769 necp_kernel_socket_policies_update_uuid_table();
3770 necp_kernel_socket_policies_reprocess();
3771 necp_kernel_ip_output_policies_reprocess();
3773 // Clear dirty bit flags
3774 session
->dirty
= FALSE
;
3777 lck_rw_done(&necp_kernel_policy_lock
);
3779 necp_update_all_clients();
3780 necp_post_change_event(&kev_data
);
3783 NECPLOG0(LOG_DEBUG
, "Applied NECP policies");
3787 // Kernel Policy Management
3788 // ---------------------
3789 // Kernel policies are derived from session policies
3790 static necp_kernel_policy_id
3791 necp_kernel_policy_get_new_id(bool socket_level
)
3793 static necp_kernel_policy_id necp_last_kernel_socket_policy_id
= 0;
3794 static necp_kernel_policy_id necp_last_kernel_ip_policy_id
= 0;
3796 necp_kernel_policy_id newid
= NECP_KERNEL_POLICY_ID_NONE
;
3798 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
3801 bool wrapped
= FALSE
;
3803 necp_last_kernel_socket_policy_id
++;
3804 if (necp_last_kernel_socket_policy_id
< NECP_KERNEL_POLICY_ID_FIRST_VALID_SOCKET
||
3805 necp_last_kernel_socket_policy_id
>= NECP_KERNEL_POLICY_ID_FIRST_VALID_IP
) {
3807 // Already wrapped, give up
3808 NECPLOG0(LOG_ERR
, "Failed to find a free socket kernel policy ID.\n");
3809 return NECP_KERNEL_POLICY_ID_NONE
;
3811 necp_last_kernel_socket_policy_id
= NECP_KERNEL_POLICY_ID_FIRST_VALID_SOCKET
;
3814 newid
= necp_last_kernel_socket_policy_id
;
3815 } while (necp_kernel_socket_policy_find(newid
) != NULL
); // If already used, keep trying
3817 bool wrapped
= FALSE
;
3819 necp_last_kernel_ip_policy_id
++;
3820 if (necp_last_kernel_ip_policy_id
< NECP_KERNEL_POLICY_ID_FIRST_VALID_IP
) {
3822 // Already wrapped, give up
3823 NECPLOG0(LOG_ERR
, "Failed to find a free IP kernel policy ID.\n");
3824 return NECP_KERNEL_POLICY_ID_NONE
;
3826 necp_last_kernel_ip_policy_id
= NECP_KERNEL_POLICY_ID_FIRST_VALID_IP
;
3829 newid
= necp_last_kernel_ip_policy_id
;
3830 } while (necp_kernel_ip_output_policy_find(newid
) != NULL
); // If already used, keep trying
3833 if (newid
== NECP_KERNEL_POLICY_ID_NONE
) {
3834 NECPLOG0(LOG_ERR
, "Allocate kernel policy id failed.\n");
3835 return NECP_KERNEL_POLICY_ID_NONE
;
3841 #define NECP_KERNEL_VALID_SOCKET_CONDITIONS (NECP_KERNEL_CONDITION_APP_ID | NECP_KERNEL_CONDITION_REAL_APP_ID | NECP_KERNEL_CONDITION_DOMAIN | NECP_KERNEL_CONDITION_ACCOUNT_ID | NECP_KERNEL_CONDITION_PID | NECP_KERNEL_CONDITION_UID | NECP_KERNEL_CONDITION_ALL_INTERFACES | NECP_KERNEL_CONDITION_BOUND_INTERFACE | NECP_KERNEL_CONDITION_TRAFFIC_CLASS | NECP_KERNEL_CONDITION_PROTOCOL | NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_ENTITLEMENT | NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT | NECP_KERNEL_CONDITION_AGENT_TYPE | NECP_KERNEL_CONDITION_HAS_CLIENT | NECP_KERNEL_CONDITION_LOCAL_NETWORKS | NECP_KERNEL_CONDITION_CLIENT_FLAGS | NECP_KERNEL_CONDITION_LOCAL_EMPTY | NECP_KERNEL_CONDITION_REMOTE_EMPTY | NECP_KERNEL_CONDITION_PLATFORM_BINARY)
3843 static necp_kernel_policy_id
3844 necp_kernel_socket_policy_add(necp_policy_order order
, u_int32_t session_order
, int session_pid
, u_int32_t condition_mask
, u_int32_t condition_negated_mask
, necp_app_id cond_app_id
, necp_app_id cond_real_app_id
, char *cond_custom_entitlement
, u_int32_t cond_account_id
, char *cond_domain
, pid_t cond_pid
, uid_t cond_uid
, ifnet_t cond_bound_interface
, struct necp_policy_condition_tc_range cond_traffic_class
, u_int16_t cond_protocol
, union necp_sockaddr_union
*cond_local_start
, union necp_sockaddr_union
*cond_local_end
, u_int8_t cond_local_prefix
, union necp_sockaddr_union
*cond_remote_start
, union necp_sockaddr_union
*cond_remote_end
, u_int8_t cond_remote_prefix
, struct necp_policy_condition_agent_type
*cond_agent_type
, u_int32_t cond_client_flags
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
)
3846 struct necp_kernel_socket_policy
*new_kernel_policy
= NULL
;
3847 struct necp_kernel_socket_policy
*tmp_kernel_policy
= NULL
;
3849 MALLOC_ZONE(new_kernel_policy
, struct necp_kernel_socket_policy
*, sizeof(*new_kernel_policy
), M_NECP_SOCKET_POLICY
, M_WAITOK
);
3850 if (new_kernel_policy
== NULL
) {
3854 memset(new_kernel_policy
, 0, sizeof(*new_kernel_policy
)); // M_ZERO is not supported for MALLOC_ZONE
3855 new_kernel_policy
->id
= necp_kernel_policy_get_new_id(true);
3856 new_kernel_policy
->order
= order
;
3857 new_kernel_policy
->session_order
= session_order
;
3858 new_kernel_policy
->session_pid
= session_pid
;
3860 // Sanitize condition mask
3861 new_kernel_policy
->condition_mask
= (condition_mask
& NECP_KERNEL_VALID_SOCKET_CONDITIONS
);
3862 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
)) {
3863 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_BOUND_INTERFACE
;
3865 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) && !(new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
)) {
3866 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_REAL_APP_ID
;
3868 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
) && !(new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
)) {
3869 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_ENTITLEMENT
;
3871 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
)) {
3872 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_LOCAL_PREFIX
;
3874 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
)) {
3875 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_REMOTE_PREFIX
;
3877 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_EMPTY
) {
3878 new_kernel_policy
->condition_mask
&= ~(NECP_KERNEL_CONDITION_LOCAL_PREFIX
| NECP_KERNEL_CONDITION_LOCAL_END
);
3880 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_EMPTY
)) {
3881 new_kernel_policy
->condition_mask
&= ~(NECP_KERNEL_CONDITION_REMOTE_PREFIX
| NECP_KERNEL_CONDITION_REMOTE_END
);
3883 new_kernel_policy
->condition_negated_mask
= condition_negated_mask
& new_kernel_policy
->condition_mask
;
3885 // Set condition values
3886 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
3887 new_kernel_policy
->cond_app_id
= cond_app_id
;
3889 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
3890 new_kernel_policy
->cond_real_app_id
= cond_real_app_id
;
3892 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
) {
3893 new_kernel_policy
->cond_custom_entitlement
= cond_custom_entitlement
;
3894 new_kernel_policy
->cond_custom_entitlement_matched
= necp_boolean_state_unknown
;
3896 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
) {
3897 new_kernel_policy
->cond_account_id
= cond_account_id
;
3899 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
3900 new_kernel_policy
->cond_domain
= cond_domain
;
3901 new_kernel_policy
->cond_domain_dot_count
= necp_count_dots(cond_domain
, strlen(cond_domain
));
3903 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PID
) {
3904 new_kernel_policy
->cond_pid
= cond_pid
;
3906 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_UID
) {
3907 new_kernel_policy
->cond_uid
= cond_uid
;
3909 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
3910 if (cond_bound_interface
) {
3911 ifnet_reference(cond_bound_interface
);
3913 new_kernel_policy
->cond_bound_interface
= cond_bound_interface
;
3915 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
3916 new_kernel_policy
->cond_traffic_class
= cond_traffic_class
;
3918 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
3919 new_kernel_policy
->cond_protocol
= cond_protocol
;
3921 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
3922 memcpy(&new_kernel_policy
->cond_local_start
, cond_local_start
, cond_local_start
->sa
.sa_len
);
3924 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
3925 memcpy(&new_kernel_policy
->cond_local_end
, cond_local_end
, cond_local_end
->sa
.sa_len
);
3927 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
3928 new_kernel_policy
->cond_local_prefix
= cond_local_prefix
;
3930 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
3931 memcpy(&new_kernel_policy
->cond_remote_start
, cond_remote_start
, cond_remote_start
->sa
.sa_len
);
3933 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
3934 memcpy(&new_kernel_policy
->cond_remote_end
, cond_remote_end
, cond_remote_end
->sa
.sa_len
);
3936 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
3937 new_kernel_policy
->cond_remote_prefix
= cond_remote_prefix
;
3939 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_AGENT_TYPE
) {
3940 memcpy(&new_kernel_policy
->cond_agent_type
, cond_agent_type
, sizeof(*cond_agent_type
));
3942 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_CLIENT_FLAGS
) {
3943 new_kernel_policy
->cond_client_flags
= cond_client_flags
;
3946 new_kernel_policy
->result
= result
;
3947 memcpy(&new_kernel_policy
->result_parameter
, &result_parameter
, sizeof(result_parameter
));
3950 NECPLOG(LOG_DEBUG
, "Added kernel policy: socket, id=%d, mask=%x\n", new_kernel_policy
->id
, new_kernel_policy
->condition_mask
);
3952 LIST_INSERT_SORTED_TWICE_ASCENDING(&necp_kernel_socket_policies
, new_kernel_policy
, chain
, session_order
, order
, tmp_kernel_policy
);
3954 return new_kernel_policy
? new_kernel_policy
->id
: 0;
3957 static struct necp_kernel_socket_policy
*
3958 necp_kernel_socket_policy_find(necp_kernel_policy_id policy_id
)
3960 struct necp_kernel_socket_policy
*kernel_policy
= NULL
;
3961 struct necp_kernel_socket_policy
*tmp_kernel_policy
= NULL
;
3963 if (policy_id
== 0) {
3967 LIST_FOREACH_SAFE(kernel_policy
, &necp_kernel_socket_policies
, chain
, tmp_kernel_policy
) {
3968 if (kernel_policy
->id
== policy_id
) {
3969 return kernel_policy
;
3977 necp_kernel_socket_policy_delete(necp_kernel_policy_id policy_id
)
3979 struct necp_kernel_socket_policy
*policy
= NULL
;
3981 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
3983 policy
= necp_kernel_socket_policy_find(policy_id
);
3985 LIST_REMOVE(policy
, chain
);
3987 if (policy
->cond_bound_interface
) {
3988 ifnet_release(policy
->cond_bound_interface
);
3989 policy
->cond_bound_interface
= NULL
;
3992 if (policy
->cond_domain
) {
3993 FREE(policy
->cond_domain
, M_NECP
);
3994 policy
->cond_domain
= NULL
;
3997 if (policy
->cond_custom_entitlement
) {
3998 FREE(policy
->cond_custom_entitlement
, M_NECP
);
3999 policy
->cond_custom_entitlement
= NULL
;
4002 FREE_ZONE(policy
, sizeof(*policy
), M_NECP_SOCKET_POLICY
);
4009 static inline const char *
4010 necp_get_result_description(char *result_string
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
)
4012 uuid_string_t uuid_string
;
4014 case NECP_KERNEL_POLICY_RESULT_NONE
: {
4015 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "None");
4018 case NECP_KERNEL_POLICY_RESULT_PASS
: {
4019 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "Pass");
4022 case NECP_KERNEL_POLICY_RESULT_SKIP
: {
4023 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "Skip (%u)", result_parameter
.skip_policy_order
);
4026 case NECP_KERNEL_POLICY_RESULT_DROP
: {
4027 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "Drop");
4030 case NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
: {
4031 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "SocketDivert (%d)", result_parameter
.flow_divert_control_unit
);
4034 case NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER
: {
4035 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "SocketFilter (%d)", result_parameter
.filter_control_unit
);
4038 case NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
: {
4039 ifnet_t interface
= ifindex2ifnet
[result_parameter
.tunnel_interface_index
];
4040 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "IPTunnel (%s%d)", ifnet_name(interface
), ifnet_unit(interface
));
4043 case NECP_KERNEL_POLICY_RESULT_IP_FILTER
: {
4044 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "IPFilter");
4047 case NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED
: {
4048 ifnet_t interface
= ifindex2ifnet
[result_parameter
.scoped_interface_index
];
4049 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "SocketScoped (%s%d)", ifnet_name(interface
), ifnet_unit(interface
));
4052 case NECP_KERNEL_POLICY_RESULT_SCOPED_DIRECT
: {
4053 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "ScopedDirect");
4056 case NECP_KERNEL_POLICY_RESULT_ALLOW_UNENTITLED
: {
4057 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "AllowUnentitled");
4060 case NECP_KERNEL_POLICY_RESULT_ROUTE_RULES
: {
4062 char interface_names
[MAX_ROUTE_RULE_INTERFACES
][IFXNAMSIZ
];
4063 struct necp_route_rule
*route_rule
= necp_lookup_route_rule_locked(&necp_route_rules
, result_parameter
.route_rule_id
);
4064 if (route_rule
!= NULL
) {
4065 for (index
= 0; index
< MAX_ROUTE_RULE_INTERFACES
; index
++) {
4066 if (route_rule
->exception_if_indices
[index
] != 0) {
4067 ifnet_t interface
= ifindex2ifnet
[route_rule
->exception_if_indices
[index
]];
4068 snprintf(interface_names
[index
], IFXNAMSIZ
, "%s%d", ifnet_name(interface
), ifnet_unit(interface
));
4070 memset(interface_names
[index
], 0, IFXNAMSIZ
);
4073 switch (route_rule
->default_action
) {
4074 case NECP_ROUTE_RULE_DENY_INTERFACE
:
4075 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "RouteRules (Only %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
4076 (route_rule
->cellular_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? "Cell " : "",
4077 (route_rule
->wifi_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? "WiFi " : "",
4078 (route_rule
->wired_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? "Wired " : "",
4079 (route_rule
->expensive_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? "Exp " : "",
4080 (route_rule
->constrained_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? "Constrained " : "",
4081 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[0] : "",
4082 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4083 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[1] : "",
4084 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4085 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[2] : "",
4086 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4087 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[3] : "",
4088 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4089 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[4] : "",
4090 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4091 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[5] : "",
4092 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4093 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[6] : "",
4094 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4095 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[7] : "",
4096 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4097 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[8] : "",
4098 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
4099 (route_rule
->exception_if_actions
[9] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[9] : "");
4101 case NECP_ROUTE_RULE_ALLOW_INTERFACE
:
4102 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "RouteRules (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
4103 (route_rule
->cellular_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!Cell " : "",
4104 (route_rule
->wifi_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!WiFi " : "",
4105 (route_rule
->wired_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!Wired " : "",
4106 (route_rule
->expensive_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!Exp " : "",
4107 (route_rule
->constrained_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!Constrained " : "",
4108 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4109 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[0] : "",
4110 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4111 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[1] : "",
4112 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4113 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[2] : "",
4114 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4115 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[3] : "",
4116 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4117 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[4] : "",
4118 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4119 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[5] : "",
4120 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4121 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[6] : "",
4122 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4123 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[7] : "",
4124 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4125 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[8] : "",
4126 (route_rule
->exception_if_actions
[9] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
4127 (route_rule
->exception_if_actions
[9] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[9] : "");
4129 case NECP_ROUTE_RULE_QOS_MARKING
:
4130 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "RouteRules (QoSMarking %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
4131 (route_rule
->cellular_action
== NECP_ROUTE_RULE_QOS_MARKING
) ? "Cell " : "",
4132 (route_rule
->wifi_action
== NECP_ROUTE_RULE_QOS_MARKING
) ? "WiFi " : "",
4133 (route_rule
->wired_action
== NECP_ROUTE_RULE_QOS_MARKING
) ? "Wired " : "",
4134 (route_rule
->expensive_action
== NECP_ROUTE_RULE_QOS_MARKING
) ? "Exp " : "",
4135 (route_rule
->constrained_action
== NECP_ROUTE_RULE_QOS_MARKING
) ? "Constrained " : "",
4136 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[0] : "",
4137 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4138 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[1] : "",
4139 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4140 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[2] : "",
4141 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4142 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[3] : "",
4143 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4144 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[4] : "",
4145 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4146 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[5] : "",
4147 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4148 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[6] : "",
4149 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4150 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[7] : "",
4151 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4152 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[8] : "",
4153 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_QOS_MARKING
) ? " " : "",
4154 (route_rule
->exception_if_actions
[9] == NECP_ROUTE_RULE_QOS_MARKING
) ? interface_names
[9] : "");
4157 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "RouteRules (Unknown)");
4163 case NECP_KERNEL_POLICY_RESULT_USE_NETAGENT
: {
4164 bool found_mapping
= FALSE
;
4165 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.netagent_id
);
4166 if (mapping
!= NULL
) {
4167 uuid_unparse(mapping
->uuid
, uuid_string
);
4168 found_mapping
= TRUE
;
4170 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "UseNetAgent (%s)", found_mapping
? uuid_string
: "Unknown");
4173 case NECP_KERNEL_POLICY_RESULT_NETAGENT_SCOPED
: {
4174 bool found_mapping
= FALSE
;
4175 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.netagent_id
);
4176 if (mapping
!= NULL
) {
4177 uuid_unparse(mapping
->uuid
, uuid_string
);
4178 found_mapping
= TRUE
;
4180 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "NetAgentScoped (%s)", found_mapping
? uuid_string
: "Unknown");
4183 case NECP_POLICY_RESULT_TRIGGER
: {
4184 bool found_mapping
= FALSE
;
4185 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.service
.identifier
);
4186 if (mapping
!= NULL
) {
4187 uuid_unparse(mapping
->uuid
, uuid_string
);
4188 found_mapping
= TRUE
;
4190 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "Trigger (%s.%d)", found_mapping
? uuid_string
: "Unknown", result_parameter
.service
.data
);
4193 case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED
: {
4194 bool found_mapping
= FALSE
;
4195 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.service
.identifier
);
4196 if (mapping
!= NULL
) {
4197 uuid_unparse(mapping
->uuid
, uuid_string
);
4198 found_mapping
= TRUE
;
4200 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "TriggerIfNeeded (%s.%d)", found_mapping
? uuid_string
: "Unknown", result_parameter
.service
.data
);
4203 case NECP_POLICY_RESULT_TRIGGER_SCOPED
: {
4204 bool found_mapping
= FALSE
;
4205 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.service
.identifier
);
4206 if (mapping
!= NULL
) {
4207 uuid_unparse(mapping
->uuid
, uuid_string
);
4208 found_mapping
= TRUE
;
4210 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "TriggerScoped (%s.%d)", found_mapping
? uuid_string
: "Unknown", result_parameter
.service
.data
);
4213 case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED
: {
4214 bool found_mapping
= FALSE
;
4215 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.service
.identifier
);
4216 if (mapping
!= NULL
) {
4217 uuid_unparse(mapping
->uuid
, uuid_string
);
4218 found_mapping
= TRUE
;
4220 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "NoTriggerScoped (%s.%d)", found_mapping
? uuid_string
: "Unknown", result_parameter
.service
.data
);
4224 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "Unknown %d (%d)", result
, result_parameter
.tunnel_interface_index
);
4228 return result_string
;
4232 necp_kernel_socket_policies_dump_all(void)
4235 struct necp_kernel_socket_policy
*policy
= NULL
;
4238 char result_string
[MAX_RESULT_STRING_LEN
];
4239 char proc_name_string
[MAXCOMLEN
+ 1];
4240 memset(result_string
, 0, MAX_RESULT_STRING_LEN
);
4241 memset(proc_name_string
, 0, MAXCOMLEN
+ 1);
4243 NECPLOG0(LOG_DEBUG
, "NECP Application Policies:\n");
4244 NECPLOG0(LOG_DEBUG
, "-----------\n");
4245 for (policy_i
= 0; necp_kernel_socket_policies_app_layer_map
!= NULL
&& necp_kernel_socket_policies_app_layer_map
[policy_i
] != NULL
; policy_i
++) {
4246 policy
= necp_kernel_socket_policies_app_layer_map
[policy_i
];
4247 proc_name(policy
->session_pid
, proc_name_string
, MAXCOMLEN
);
4248 NECPLOG(LOG_DEBUG
, "\t%3d. Policy ID: %5d\tProcess: %10.10s\tOrder: %04d.%04d\tMask: %5x\tResult: %s\n", policy_i
, policy
->id
, proc_name_string
, policy
->session_order
, policy
->order
, policy
->condition_mask
, necp_get_result_description(result_string
, policy
->result
, policy
->result_parameter
));
4250 if (necp_kernel_socket_policies_app_layer_map
[0] != NULL
) {
4251 NECPLOG0(LOG_DEBUG
, "-----------\n");
4254 NECPLOG0(LOG_DEBUG
, "NECP Socket Policies:\n");
4255 NECPLOG0(LOG_DEBUG
, "-----------\n");
4256 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
4257 NECPLOG(LOG_DEBUG
, "\tApp Bucket: %d\n", app_i
);
4258 for (policy_i
= 0; necp_kernel_socket_policies_map
[app_i
] != NULL
&& (necp_kernel_socket_policies_map
[app_i
])[policy_i
] != NULL
; policy_i
++) {
4259 policy
= (necp_kernel_socket_policies_map
[app_i
])[policy_i
];
4260 proc_name(policy
->session_pid
, proc_name_string
, MAXCOMLEN
);
4261 NECPLOG(LOG_DEBUG
, "\t%3d. Policy ID: %5d\tProcess: %10.10s\tOrder: %04d.%04d\tMask: %5x\tResult: %s\n", policy_i
, policy
->id
, proc_name_string
, policy
->session_order
, policy
->order
, policy
->condition_mask
, necp_get_result_description(result_string
, policy
->result
, policy
->result_parameter
));
4263 NECPLOG0(LOG_DEBUG
, "-----------\n");
4269 necp_kernel_socket_result_is_trigger_service_type(struct necp_kernel_socket_policy
*kernel_policy
)
4271 return kernel_policy
->result
>= NECP_KERNEL_POLICY_RESULT_TRIGGER
&& kernel_policy
->result
<= NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED
;
4275 necp_kernel_socket_policy_results_overlap(struct necp_kernel_socket_policy
*upper_policy
, struct necp_kernel_socket_policy
*lower_policy
)
4277 if (upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_DROP
) {
4278 // Drop always cancels out lower policies
4280 } else if (upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER
||
4281 upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_ROUTE_RULES
||
4282 upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_USE_NETAGENT
||
4283 upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_NETAGENT_SCOPED
||
4284 upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_ALLOW_UNENTITLED
) {
4285 // Filters and route rules never cancel out lower policies
4287 } else if (necp_kernel_socket_result_is_trigger_service_type(upper_policy
)) {
4288 // Trigger/Scoping policies can overlap one another, but not other results
4289 return necp_kernel_socket_result_is_trigger_service_type(lower_policy
);
4290 } else if (upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
4291 if (upper_policy
->session_order
!= lower_policy
->session_order
) {
4292 // A skip cannot override a policy of a different session
4295 if (upper_policy
->result_parameter
.skip_policy_order
== 0 ||
4296 lower_policy
->order
>= upper_policy
->result_parameter
.skip_policy_order
) {
4297 // This policy is beyond the skip
4300 // This policy is inside the skip
4306 // A hard pass, flow divert, tunnel, or scope will currently block out lower policies
4311 necp_kernel_socket_policy_is_unnecessary(struct necp_kernel_socket_policy
*policy
, struct necp_kernel_socket_policy
**policy_array
, int valid_indices
)
4313 bool can_skip
= FALSE
;
4314 u_int32_t highest_skip_session_order
= 0;
4315 u_int32_t highest_skip_order
= 0;
4317 for (i
= 0; i
< valid_indices
; i
++) {
4318 struct necp_kernel_socket_policy
*compared_policy
= policy_array
[i
];
4320 // For policies in a skip window, we can't mark conflicting policies as unnecessary
4322 if (highest_skip_session_order
!= compared_policy
->session_order
||
4323 (highest_skip_order
!= 0 && compared_policy
->order
>= highest_skip_order
)) {
4324 // If we've moved on to the next session, or passed the skip window
4325 highest_skip_session_order
= 0;
4326 highest_skip_order
= 0;
4329 // If this policy is also a skip, in can increase the skip window
4330 if (compared_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
4331 if (compared_policy
->result_parameter
.skip_policy_order
> highest_skip_order
) {
4332 highest_skip_order
= compared_policy
->result_parameter
.skip_policy_order
;
4339 if (compared_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
4340 // This policy is a skip. Set the skip window accordingly
4342 highest_skip_session_order
= compared_policy
->session_order
;
4343 highest_skip_order
= compared_policy
->result_parameter
.skip_policy_order
;
4346 // The result of the compared policy must be able to block out this policy result
4347 if (!necp_kernel_socket_policy_results_overlap(compared_policy
, policy
)) {
4351 // If new policy matches All Interfaces, compared policy must also
4352 if ((policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) && !(compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
)) {
4356 // If new policy matches Local Networks, compared policy must also
4357 if ((policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_NETWORKS
) && !(compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_NETWORKS
)) {
4361 // Default makes lower policies unecessary always
4362 if (compared_policy
->condition_mask
== 0) {
4366 // Compared must be more general than policy, and include only conditions within policy
4367 if ((policy
->condition_mask
& compared_policy
->condition_mask
) != compared_policy
->condition_mask
) {
4371 // Negative conditions must match for the overlapping conditions
4372 if ((policy
->condition_negated_mask
& compared_policy
->condition_mask
) != (compared_policy
->condition_negated_mask
& compared_policy
->condition_mask
)) {
4376 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
&&
4377 strcmp(compared_policy
->cond_domain
, policy
->cond_domain
) != 0) {
4381 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
&&
4382 strcmp(compared_policy
->cond_custom_entitlement
, policy
->cond_custom_entitlement
) != 0) {
4386 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
&&
4387 compared_policy
->cond_account_id
!= policy
->cond_account_id
) {
4391 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
&&
4392 compared_policy
->cond_policy_id
!= policy
->cond_policy_id
) {
4396 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
&&
4397 compared_policy
->cond_app_id
!= policy
->cond_app_id
) {
4401 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
&&
4402 compared_policy
->cond_real_app_id
!= policy
->cond_real_app_id
) {
4406 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_PID
&&
4407 compared_policy
->cond_pid
!= policy
->cond_pid
) {
4411 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_UID
&&
4412 compared_policy
->cond_uid
!= policy
->cond_uid
) {
4416 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
&&
4417 compared_policy
->cond_bound_interface
!= policy
->cond_bound_interface
) {
4421 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
&&
4422 compared_policy
->cond_protocol
!= policy
->cond_protocol
) {
4426 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_CLIENT_FLAGS
&&
4427 compared_policy
->cond_client_flags
!= policy
->cond_client_flags
) {
4431 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
&&
4432 !(compared_policy
->cond_traffic_class
.start_tc
<= policy
->cond_traffic_class
.start_tc
&&
4433 compared_policy
->cond_traffic_class
.end_tc
>= policy
->cond_traffic_class
.end_tc
)) {
4437 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
4438 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
4439 if (!necp_is_range_in_range((struct sockaddr
*)&policy
->cond_local_start
, (struct sockaddr
*)&policy
->cond_local_end
, (struct sockaddr
*)&compared_policy
->cond_local_start
, (struct sockaddr
*)&compared_policy
->cond_local_end
)) {
4442 } else if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
4443 if (compared_policy
->cond_local_prefix
> policy
->cond_local_prefix
||
4444 !necp_is_addr_in_subnet((struct sockaddr
*)&policy
->cond_local_start
, (struct sockaddr
*)&compared_policy
->cond_local_start
, compared_policy
->cond_local_prefix
)) {
4450 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
4451 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
4452 if (!necp_is_range_in_range((struct sockaddr
*)&policy
->cond_remote_start
, (struct sockaddr
*)&policy
->cond_remote_end
, (struct sockaddr
*)&compared_policy
->cond_remote_start
, (struct sockaddr
*)&compared_policy
->cond_remote_end
)) {
4455 } else if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
4456 if (compared_policy
->cond_remote_prefix
> policy
->cond_remote_prefix
||
4457 !necp_is_addr_in_subnet((struct sockaddr
*)&policy
->cond_remote_start
, (struct sockaddr
*)&compared_policy
->cond_remote_start
, compared_policy
->cond_remote_prefix
)) {
4463 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_AGENT_TYPE
&&
4464 memcmp(&compared_policy
->cond_agent_type
, &policy
->cond_agent_type
, sizeof(policy
->cond_agent_type
)) == 0) {
4475 necp_kernel_socket_policies_reprocess(void)
4478 int bucket_allocation_counts
[NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
];
4479 int bucket_current_free_index
[NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
];
4480 int app_layer_allocation_count
= 0;
4481 int app_layer_current_free_index
= 0;
4482 struct necp_kernel_socket_policy
*kernel_policy
= NULL
;
4484 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4487 necp_kernel_application_policies_condition_mask
= 0;
4488 necp_kernel_socket_policies_condition_mask
= 0;
4489 necp_kernel_application_policies_count
= 0;
4490 necp_kernel_socket_policies_count
= 0;
4491 necp_kernel_socket_policies_non_app_count
= 0;
4493 // Reset all maps to NULL
4494 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
4495 if (necp_kernel_socket_policies_map
[app_i
] != NULL
) {
4496 FREE(necp_kernel_socket_policies_map
[app_i
], M_NECP
);
4497 necp_kernel_socket_policies_map
[app_i
] = NULL
;
4501 bucket_allocation_counts
[app_i
] = 0;
4503 if (necp_kernel_socket_policies_app_layer_map
!= NULL
) {
4504 FREE(necp_kernel_socket_policies_app_layer_map
, M_NECP
);
4505 necp_kernel_socket_policies_app_layer_map
= NULL
;
4508 // Create masks and counts
4509 LIST_FOREACH(kernel_policy
, &necp_kernel_socket_policies
, chain
) {
4510 // App layer mask/count
4511 necp_kernel_application_policies_condition_mask
|= kernel_policy
->condition_mask
;
4512 necp_kernel_application_policies_count
++;
4513 app_layer_allocation_count
++;
4515 if ((kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_AGENT_TYPE
)) {
4516 // Agent type conditions only apply to app layer
4520 // Update socket layer bucket mask/counts
4521 necp_kernel_socket_policies_condition_mask
|= kernel_policy
->condition_mask
;
4522 necp_kernel_socket_policies_count
++;
4524 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) ||
4525 kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
4526 necp_kernel_socket_policies_non_app_count
++;
4527 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
4528 bucket_allocation_counts
[app_i
]++;
4531 bucket_allocation_counts
[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(kernel_policy
->cond_app_id
)]++;
4536 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
4537 if (bucket_allocation_counts
[app_i
] > 0) {
4538 // Allocate a NULL-terminated array of policy pointers for each bucket
4539 MALLOC(necp_kernel_socket_policies_map
[app_i
], struct necp_kernel_socket_policy
**, sizeof(struct necp_kernel_socket_policy
*) * (bucket_allocation_counts
[app_i
] + 1), M_NECP
, M_WAITOK
);
4540 if (necp_kernel_socket_policies_map
[app_i
] == NULL
) {
4544 // Initialize the first entry to NULL
4545 (necp_kernel_socket_policies_map
[app_i
])[0] = NULL
;
4547 bucket_current_free_index
[app_i
] = 0;
4549 MALLOC(necp_kernel_socket_policies_app_layer_map
, struct necp_kernel_socket_policy
**, sizeof(struct necp_kernel_socket_policy
*) * (app_layer_allocation_count
+ 1), M_NECP
, M_WAITOK
);
4550 if (necp_kernel_socket_policies_app_layer_map
== NULL
) {
4553 necp_kernel_socket_policies_app_layer_map
[0] = NULL
;
4556 LIST_FOREACH(kernel_policy
, &necp_kernel_socket_policies
, chain
) {
4557 // Add app layer policies
4558 if (!necp_kernel_socket_policy_is_unnecessary(kernel_policy
, necp_kernel_socket_policies_app_layer_map
, app_layer_current_free_index
)) {
4559 necp_kernel_socket_policies_app_layer_map
[app_layer_current_free_index
] = kernel_policy
;
4560 app_layer_current_free_index
++;
4561 necp_kernel_socket_policies_app_layer_map
[app_layer_current_free_index
] = NULL
;
4564 if ((kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_AGENT_TYPE
)) {
4565 // Agent type conditions only apply to app layer
4569 // Add socket policies
4570 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) ||
4571 kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
4572 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
4573 if (!necp_kernel_socket_policy_is_unnecessary(kernel_policy
, necp_kernel_socket_policies_map
[app_i
], bucket_current_free_index
[app_i
])) {
4574 (necp_kernel_socket_policies_map
[app_i
])[(bucket_current_free_index
[app_i
])] = kernel_policy
;
4575 bucket_current_free_index
[app_i
]++;
4576 (necp_kernel_socket_policies_map
[app_i
])[(bucket_current_free_index
[app_i
])] = NULL
;
4580 app_i
= NECP_SOCKET_MAP_APP_ID_TO_BUCKET(kernel_policy
->cond_app_id
);
4581 if (!necp_kernel_socket_policy_is_unnecessary(kernel_policy
, necp_kernel_socket_policies_map
[app_i
], bucket_current_free_index
[app_i
])) {
4582 (necp_kernel_socket_policies_map
[app_i
])[(bucket_current_free_index
[app_i
])] = kernel_policy
;
4583 bucket_current_free_index
[app_i
]++;
4584 (necp_kernel_socket_policies_map
[app_i
])[(bucket_current_free_index
[app_i
])] = NULL
;
4588 necp_kernel_socket_policies_dump_all();
4589 BUMP_KERNEL_SOCKET_POLICIES_GENERATION_COUNT();
4593 // Free memory, reset masks to 0
4594 necp_kernel_application_policies_condition_mask
= 0;
4595 necp_kernel_socket_policies_condition_mask
= 0;
4596 necp_kernel_application_policies_count
= 0;
4597 necp_kernel_socket_policies_count
= 0;
4598 necp_kernel_socket_policies_non_app_count
= 0;
4599 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
4600 if (necp_kernel_socket_policies_map
[app_i
] != NULL
) {
4601 FREE(necp_kernel_socket_policies_map
[app_i
], M_NECP
);
4602 necp_kernel_socket_policies_map
[app_i
] = NULL
;
4605 if (necp_kernel_socket_policies_app_layer_map
!= NULL
) {
4606 FREE(necp_kernel_socket_policies_app_layer_map
, M_NECP
);
4607 necp_kernel_socket_policies_app_layer_map
= NULL
;
4613 necp_get_new_string_id(void)
4615 static u_int32_t necp_last_string_id
= 0;
4617 u_int32_t newid
= 0;
4619 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4621 bool wrapped
= FALSE
;
4623 necp_last_string_id
++;
4624 if (necp_last_string_id
< 1) {
4626 // Already wrapped, give up
4627 NECPLOG0(LOG_ERR
, "Failed to find a free app UUID.\n");
4630 necp_last_string_id
= 1;
4633 newid
= necp_last_string_id
;
4634 } while (necp_lookup_string_with_id_locked(&necp_account_id_list
, newid
) != NULL
); // If already used, keep trying
4637 NECPLOG0(LOG_ERR
, "Allocate string id failed.\n");
4644 static struct necp_string_id_mapping
*
4645 necp_lookup_string_to_id_locked(struct necp_string_id_mapping_list
*list
, char *string
)
4647 struct necp_string_id_mapping
*searchentry
= NULL
;
4648 struct necp_string_id_mapping
*foundentry
= NULL
;
4650 LIST_FOREACH(searchentry
, list
, chain
) {
4651 if (strcmp(searchentry
->string
, string
) == 0) {
4652 foundentry
= searchentry
;
4660 static struct necp_string_id_mapping
*
4661 necp_lookup_string_with_id_locked(struct necp_string_id_mapping_list
*list
, u_int32_t local_id
)
4663 struct necp_string_id_mapping
*searchentry
= NULL
;
4664 struct necp_string_id_mapping
*foundentry
= NULL
;
4666 LIST_FOREACH(searchentry
, list
, chain
) {
4667 if (searchentry
->id
== local_id
) {
4668 foundentry
= searchentry
;
4677 necp_create_string_to_id_mapping(struct necp_string_id_mapping_list
*list
, char *string
)
4679 u_int32_t string_id
= 0;
4680 struct necp_string_id_mapping
*existing_mapping
= NULL
;
4682 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4684 existing_mapping
= necp_lookup_string_to_id_locked(list
, string
);
4685 if (existing_mapping
!= NULL
) {
4686 string_id
= existing_mapping
->id
;
4687 os_ref_retain_locked(&existing_mapping
->refcount
);
4689 struct necp_string_id_mapping
*new_mapping
= NULL
;
4690 MALLOC(new_mapping
, struct necp_string_id_mapping
*, sizeof(struct necp_string_id_mapping
), M_NECP
, M_WAITOK
);
4691 if (new_mapping
!= NULL
) {
4692 memset(new_mapping
, 0, sizeof(struct necp_string_id_mapping
));
4694 size_t length
= strlen(string
) + 1;
4695 MALLOC(new_mapping
->string
, char *, length
, M_NECP
, M_WAITOK
);
4696 if (new_mapping
->string
!= NULL
) {
4697 memcpy(new_mapping
->string
, string
, length
);
4698 new_mapping
->id
= necp_get_new_string_id();
4699 os_ref_init(&new_mapping
->refcount
, &necp_refgrp
);
4700 LIST_INSERT_HEAD(list
, new_mapping
, chain
);
4701 string_id
= new_mapping
->id
;
4703 FREE(new_mapping
, M_NECP
);
4712 necp_remove_string_to_id_mapping(struct necp_string_id_mapping_list
*list
, char *string
)
4714 struct necp_string_id_mapping
*existing_mapping
= NULL
;
4716 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4718 existing_mapping
= necp_lookup_string_to_id_locked(list
, string
);
4719 if (existing_mapping
!= NULL
) {
4720 if (os_ref_release_locked(&existing_mapping
->refcount
) == 0) {
4721 LIST_REMOVE(existing_mapping
, chain
);
4722 FREE(existing_mapping
->string
, M_NECP
);
4723 FREE(existing_mapping
, M_NECP
);
4731 #define NECP_FIRST_VALID_ROUTE_RULE_ID 1
4732 #define NECP_FIRST_VALID_AGGREGATE_ROUTE_RULE_ID UINT16_MAX
4734 necp_get_new_route_rule_id(bool aggregate
)
4736 static u_int32_t necp_last_route_rule_id
= 0;
4737 static u_int32_t necp_last_aggregate_route_rule_id
= 0;
4739 u_int32_t newid
= 0;
4742 // Main necp_kernel_policy_lock protects non-aggregate rule IDs
4743 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4745 bool wrapped
= FALSE
;
4747 necp_last_route_rule_id
++;
4748 if (necp_last_route_rule_id
< NECP_FIRST_VALID_ROUTE_RULE_ID
||
4749 necp_last_route_rule_id
>= NECP_FIRST_VALID_AGGREGATE_ROUTE_RULE_ID
) {
4751 // Already wrapped, give up
4752 NECPLOG0(LOG_ERR
, "Failed to find a free route rule id.\n");
4755 necp_last_route_rule_id
= NECP_FIRST_VALID_ROUTE_RULE_ID
;
4758 newid
= necp_last_route_rule_id
;
4759 } while (necp_lookup_route_rule_locked(&necp_route_rules
, newid
) != NULL
); // If already used, keep trying
4761 // necp_route_rule_lock protects aggregate rule IDs
4762 LCK_RW_ASSERT(&necp_route_rule_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4764 bool wrapped
= FALSE
;
4766 necp_last_aggregate_route_rule_id
++;
4767 if (necp_last_aggregate_route_rule_id
< NECP_FIRST_VALID_AGGREGATE_ROUTE_RULE_ID
) {
4769 // Already wrapped, give up
4770 NECPLOG0(LOG_ERR
, "Failed to find a free aggregate route rule id.\n");
4773 necp_last_aggregate_route_rule_id
= NECP_FIRST_VALID_AGGREGATE_ROUTE_RULE_ID
;
4776 newid
= necp_last_aggregate_route_rule_id
;
4777 } while (necp_lookup_route_rule_locked(&necp_route_rules
, newid
) != NULL
); // If already used, keep trying
4781 NECPLOG0(LOG_ERR
, "Allocate route rule ID failed.\n");
4788 static struct necp_route_rule
*
4789 necp_lookup_route_rule_locked(struct necp_route_rule_list
*list
, u_int32_t route_rule_id
)
4791 struct necp_route_rule
*searchentry
= NULL
;
4792 struct necp_route_rule
*foundentry
= NULL
;
4794 LIST_FOREACH(searchentry
, list
, chain
) {
4795 if (searchentry
->id
== route_rule_id
) {
4796 foundentry
= searchentry
;
4804 static struct necp_route_rule
*
4805 necp_lookup_route_rule_by_contents_locked(struct necp_route_rule_list
*list
, u_int32_t default_action
, u_int8_t cellular_action
, u_int8_t wifi_action
, u_int8_t wired_action
, u_int8_t expensive_action
, u_int8_t constrained_action
, u_int32_t
*if_indices
, u_int8_t
*if_actions
)
4807 struct necp_route_rule
*searchentry
= NULL
;
4808 struct necp_route_rule
*foundentry
= NULL
;
4810 LIST_FOREACH(searchentry
, list
, chain
) {
4811 if (searchentry
->default_action
== default_action
&&
4812 searchentry
->cellular_action
== cellular_action
&&
4813 searchentry
->wifi_action
== wifi_action
&&
4814 searchentry
->wired_action
== wired_action
&&
4815 searchentry
->expensive_action
== expensive_action
&&
4816 searchentry
->constrained_action
== constrained_action
) {
4817 bool match_failed
= FALSE
;
4822 for (index_a
= 0; index_a
< MAX_ROUTE_RULE_INTERFACES
; index_a
++) {
4823 bool found_index
= FALSE
;
4824 if (searchentry
->exception_if_indices
[index_a
] == 0) {
4828 for (index_b
= 0; index_b
< MAX_ROUTE_RULE_INTERFACES
; index_b
++) {
4829 if (if_indices
[index_b
] == 0) {
4832 if (index_b
>= count_b
) {
4833 count_b
= index_b
+ 1;
4835 if (searchentry
->exception_if_indices
[index_a
] == if_indices
[index_b
] &&
4836 searchentry
->exception_if_actions
[index_a
] == if_actions
[index_b
]) {
4842 match_failed
= TRUE
;
4846 if (!match_failed
&& count_a
== count_b
) {
4847 foundentry
= searchentry
;
4857 necp_create_route_rule(struct necp_route_rule_list
*list
, u_int8_t
*route_rules_array
, u_int32_t route_rules_array_size
)
4860 u_int32_t route_rule_id
= 0;
4861 struct necp_route_rule
*existing_rule
= NULL
;
4862 u_int32_t default_action
= NECP_ROUTE_RULE_ALLOW_INTERFACE
;
4863 u_int8_t cellular_action
= NECP_ROUTE_RULE_NONE
;
4864 u_int8_t wifi_action
= NECP_ROUTE_RULE_NONE
;
4865 u_int8_t wired_action
= NECP_ROUTE_RULE_NONE
;
4866 u_int8_t expensive_action
= NECP_ROUTE_RULE_NONE
;
4867 u_int8_t constrained_action
= NECP_ROUTE_RULE_NONE
;
4868 u_int32_t if_indices
[MAX_ROUTE_RULE_INTERFACES
];
4869 size_t num_valid_indices
= 0;
4870 memset(&if_indices
, 0, sizeof(if_indices
));
4871 u_int8_t if_actions
[MAX_ROUTE_RULE_INTERFACES
];
4872 memset(&if_actions
, 0, sizeof(if_actions
));
4874 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4876 if (route_rules_array
== NULL
|| route_rules_array_size
== 0) {
4881 while (offset
< route_rules_array_size
) {
4882 ifnet_t rule_interface
= NULL
;
4883 char interface_name
[IFXNAMSIZ
];
4884 u_int32_t length
= 0;
4885 u_int8_t
*value
= necp_buffer_get_tlv_value(route_rules_array
, offset
, &length
);
4887 u_int8_t rule_type
= necp_policy_condition_get_type_from_buffer(value
, length
);
4888 u_int8_t rule_flags
= necp_policy_condition_get_flags_from_buffer(value
, length
);
4889 u_int32_t rule_length
= necp_policy_condition_get_value_length_from_buffer(value
, length
);
4890 u_int8_t
*rule_value
= necp_policy_condition_get_value_pointer_from_buffer(value
, length
);
4892 if (rule_type
== NECP_ROUTE_RULE_NONE
) {
4893 // Don't allow an explicit rule to be None action
4897 if (rule_length
== 0) {
4898 if (rule_flags
& NECP_ROUTE_RULE_FLAG_CELLULAR
) {
4899 cellular_action
= rule_type
;
4901 if (rule_flags
& NECP_ROUTE_RULE_FLAG_WIFI
) {
4902 wifi_action
= rule_type
;
4904 if (rule_flags
& NECP_ROUTE_RULE_FLAG_WIRED
) {
4905 wired_action
= rule_type
;
4907 if (rule_flags
& NECP_ROUTE_RULE_FLAG_EXPENSIVE
) {
4908 expensive_action
= rule_type
;
4910 if (rule_flags
& NECP_ROUTE_RULE_FLAG_CONSTRAINED
) {
4911 constrained_action
= rule_type
;
4913 if (rule_flags
== 0) {
4914 default_action
= rule_type
;
4916 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
4920 if (num_valid_indices
>= MAX_ROUTE_RULE_INTERFACES
) {
4921 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
4925 if (rule_length
<= IFXNAMSIZ
) {
4926 memcpy(interface_name
, rule_value
, rule_length
);
4927 interface_name
[rule_length
- 1] = 0; // Make sure the string is NULL terminated
4928 if (ifnet_find_by_name(interface_name
, &rule_interface
) == 0) {
4929 if_actions
[num_valid_indices
] = rule_type
;
4930 if_indices
[num_valid_indices
++] = rule_interface
->if_index
;
4931 ifnet_release(rule_interface
);
4934 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
4937 existing_rule
= necp_lookup_route_rule_by_contents_locked(list
, default_action
, cellular_action
, wifi_action
, wired_action
, expensive_action
, constrained_action
, if_indices
, if_actions
);
4938 if (existing_rule
!= NULL
) {
4939 route_rule_id
= existing_rule
->id
;
4940 os_ref_retain_locked(&existing_rule
->refcount
);
4942 struct necp_route_rule
*new_rule
= NULL
;
4943 MALLOC(new_rule
, struct necp_route_rule
*, sizeof(struct necp_route_rule
), M_NECP
, M_WAITOK
);
4944 if (new_rule
!= NULL
) {
4945 memset(new_rule
, 0, sizeof(struct necp_route_rule
));
4946 route_rule_id
= new_rule
->id
= necp_get_new_route_rule_id(false);
4947 new_rule
->default_action
= default_action
;
4948 new_rule
->cellular_action
= cellular_action
;
4949 new_rule
->wifi_action
= wifi_action
;
4950 new_rule
->wired_action
= wired_action
;
4951 new_rule
->expensive_action
= expensive_action
;
4952 new_rule
->constrained_action
= constrained_action
;
4953 memcpy(&new_rule
->exception_if_indices
, &if_indices
, sizeof(if_indices
));
4954 memcpy(&new_rule
->exception_if_actions
, &if_actions
, sizeof(if_actions
));
4955 os_ref_init(&new_rule
->refcount
, &necp_refgrp
);
4956 LIST_INSERT_HEAD(list
, new_rule
, chain
);
4959 return route_rule_id
;
4963 necp_remove_aggregate_route_rule_for_id(u_int32_t rule_id
)
4966 lck_rw_lock_exclusive(&necp_route_rule_lock
);
4968 struct necp_aggregate_route_rule
*existing_rule
= NULL
;
4969 struct necp_aggregate_route_rule
*tmp_rule
= NULL
;
4971 LIST_FOREACH_SAFE(existing_rule
, &necp_aggregate_route_rules
, chain
, tmp_rule
) {
4973 for (index
= 0; index
< MAX_AGGREGATE_ROUTE_RULES
; index
++) {
4974 u_int32_t route_rule_id
= existing_rule
->rule_ids
[index
];
4975 if (route_rule_id
== rule_id
) {
4976 LIST_REMOVE(existing_rule
, chain
);
4977 FREE(existing_rule
, M_NECP
);
4983 lck_rw_done(&necp_route_rule_lock
);
4988 necp_remove_route_rule(struct necp_route_rule_list
*list
, u_int32_t route_rule_id
)
4990 struct necp_route_rule
*existing_rule
= NULL
;
4992 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4994 existing_rule
= necp_lookup_route_rule_locked(list
, route_rule_id
);
4995 if (existing_rule
!= NULL
) {
4996 if (os_ref_release_locked(&existing_rule
->refcount
) == 0) {
4997 necp_remove_aggregate_route_rule_for_id(existing_rule
->id
);
4998 LIST_REMOVE(existing_rule
, chain
);
4999 FREE(existing_rule
, M_NECP
);
5007 static struct necp_aggregate_route_rule
*
5008 necp_lookup_aggregate_route_rule_locked(u_int32_t route_rule_id
)
5010 struct necp_aggregate_route_rule
*searchentry
= NULL
;
5011 struct necp_aggregate_route_rule
*foundentry
= NULL
;
5013 lck_rw_lock_shared(&necp_route_rule_lock
);
5015 LIST_FOREACH(searchentry
, &necp_aggregate_route_rules
, chain
) {
5016 if (searchentry
->id
== route_rule_id
) {
5017 foundentry
= searchentry
;
5022 lck_rw_done(&necp_route_rule_lock
);
5028 necp_create_aggregate_route_rule(u_int32_t
*rule_ids
)
5030 u_int32_t aggregate_route_rule_id
= 0;
5031 struct necp_aggregate_route_rule
*new_rule
= NULL
;
5032 struct necp_aggregate_route_rule
*existing_rule
= NULL
;
5034 lck_rw_lock_exclusive(&necp_route_rule_lock
);
5036 // Check if the rule already exists
5037 LIST_FOREACH(existing_rule
, &necp_aggregate_route_rules
, chain
) {
5038 if (memcmp(existing_rule
->rule_ids
, rule_ids
, (sizeof(u_int32_t
) * MAX_AGGREGATE_ROUTE_RULES
)) == 0) {
5039 lck_rw_done(&necp_route_rule_lock
);
5040 return existing_rule
->id
;
5044 MALLOC(new_rule
, struct necp_aggregate_route_rule
*, sizeof(struct necp_aggregate_route_rule
), M_NECP
, M_WAITOK
);
5045 if (new_rule
!= NULL
) {
5046 memset(new_rule
, 0, sizeof(struct necp_aggregate_route_rule
));
5047 aggregate_route_rule_id
= new_rule
->id
= necp_get_new_route_rule_id(true);
5048 new_rule
->id
= aggregate_route_rule_id
;
5049 memcpy(new_rule
->rule_ids
, rule_ids
, (sizeof(u_int32_t
) * MAX_AGGREGATE_ROUTE_RULES
));
5050 LIST_INSERT_HEAD(&necp_aggregate_route_rules
, new_rule
, chain
);
5052 lck_rw_done(&necp_route_rule_lock
);
5054 return aggregate_route_rule_id
;
5057 #define NECP_NULL_SERVICE_ID 1
5058 #define NECP_FIRST_VALID_SERVICE_ID 2
5059 #define NECP_FIRST_VALID_APP_ID UINT16_MAX
5061 necp_get_new_uuid_id(bool service
)
5063 static u_int32_t necp_last_service_uuid_id
= 0;
5064 static u_int32_t necp_last_app_uuid_id
= 0;
5066 u_int32_t newid
= 0;
5068 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5071 bool wrapped
= FALSE
;
5073 necp_last_service_uuid_id
++;
5074 if (necp_last_service_uuid_id
< NECP_FIRST_VALID_SERVICE_ID
||
5075 necp_last_service_uuid_id
>= NECP_FIRST_VALID_APP_ID
) {
5077 // Already wrapped, give up
5078 NECPLOG0(LOG_ERR
, "Failed to find a free service UUID.\n");
5079 return NECP_NULL_SERVICE_ID
;
5081 necp_last_service_uuid_id
= NECP_FIRST_VALID_SERVICE_ID
;
5084 newid
= necp_last_service_uuid_id
;
5085 } while (necp_uuid_lookup_uuid_with_service_id_locked(newid
) != NULL
); // If already used, keep trying
5087 bool wrapped
= FALSE
;
5089 necp_last_app_uuid_id
++;
5090 if (necp_last_app_uuid_id
< NECP_FIRST_VALID_APP_ID
) {
5092 // Already wrapped, give up
5093 NECPLOG0(LOG_ERR
, "Failed to find a free app UUID.\n");
5094 return NECP_NULL_SERVICE_ID
;
5096 necp_last_app_uuid_id
= NECP_FIRST_VALID_APP_ID
;
5099 newid
= necp_last_app_uuid_id
;
5100 } while (necp_uuid_lookup_uuid_with_app_id_locked(newid
) != NULL
); // If already used, keep trying
5103 if (newid
== NECP_NULL_SERVICE_ID
) {
5104 NECPLOG0(LOG_ERR
, "Allocate uuid ID failed.\n");
5105 return NECP_NULL_SERVICE_ID
;
5111 static struct necp_uuid_id_mapping
*
5112 necp_uuid_lookup_app_id_locked(uuid_t uuid
)
5114 struct necp_uuid_id_mapping
*searchentry
= NULL
;
5115 struct necp_uuid_id_mapping
*foundentry
= NULL
;
5117 LIST_FOREACH(searchentry
, APPUUIDHASH(uuid
), chain
) {
5118 if (uuid_compare(searchentry
->uuid
, uuid
) == 0) {
5119 foundentry
= searchentry
;
5127 static struct necp_uuid_id_mapping
*
5128 necp_uuid_lookup_uuid_with_app_id_locked(u_int32_t local_id
)
5130 struct necp_uuid_id_mapping
*searchentry
= NULL
;
5131 struct necp_uuid_id_mapping
*foundentry
= NULL
;
5133 struct necp_uuid_id_mapping_head
*uuid_list_head
= NULL
;
5134 for (uuid_list_head
= &necp_uuid_app_id_hashtbl
[necp_uuid_app_id_hash_num_buckets
- 1]; uuid_list_head
>= necp_uuid_app_id_hashtbl
; uuid_list_head
--) {
5135 LIST_FOREACH(searchentry
, uuid_list_head
, chain
) {
5136 if (searchentry
->id
== local_id
) {
5137 foundentry
= searchentry
;
5147 necp_create_uuid_app_id_mapping(uuid_t uuid
, bool *allocated_mapping
, bool uuid_policy_table
)
5149 u_int32_t local_id
= 0;
5150 struct necp_uuid_id_mapping
*existing_mapping
= NULL
;
5152 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5154 if (allocated_mapping
) {
5155 *allocated_mapping
= FALSE
;
5158 existing_mapping
= necp_uuid_lookup_app_id_locked(uuid
);
5159 if (existing_mapping
!= NULL
) {
5160 local_id
= existing_mapping
->id
;
5161 os_ref_retain_locked(&existing_mapping
->refcount
);
5162 if (uuid_policy_table
) {
5163 existing_mapping
->table_usecount
++;
5166 struct necp_uuid_id_mapping
*new_mapping
= NULL
;
5167 MALLOC(new_mapping
, struct necp_uuid_id_mapping
*, sizeof(*new_mapping
), M_NECP
, M_WAITOK
);
5168 if (new_mapping
!= NULL
) {
5169 uuid_copy(new_mapping
->uuid
, uuid
);
5170 new_mapping
->id
= necp_get_new_uuid_id(false);
5171 os_ref_init(&new_mapping
->refcount
, &necp_refgrp
);
5172 if (uuid_policy_table
) {
5173 new_mapping
->table_usecount
= 1;
5175 new_mapping
->table_usecount
= 0;
5178 LIST_INSERT_HEAD(APPUUIDHASH(uuid
), new_mapping
, chain
);
5180 if (allocated_mapping
) {
5181 *allocated_mapping
= TRUE
;
5184 local_id
= new_mapping
->id
;
5192 necp_remove_uuid_app_id_mapping(uuid_t uuid
, bool *removed_mapping
, bool uuid_policy_table
)
5194 struct necp_uuid_id_mapping
*existing_mapping
= NULL
;
5196 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5198 if (removed_mapping
) {
5199 *removed_mapping
= FALSE
;
5202 existing_mapping
= necp_uuid_lookup_app_id_locked(uuid
);
5203 if (existing_mapping
!= NULL
) {
5204 if (uuid_policy_table
) {
5205 existing_mapping
->table_usecount
--;
5207 if (os_ref_release_locked(&existing_mapping
->refcount
) == 0) {
5208 LIST_REMOVE(existing_mapping
, chain
);
5209 FREE(existing_mapping
, M_NECP
);
5210 if (removed_mapping
) {
5211 *removed_mapping
= TRUE
;
5220 static struct necp_uuid_id_mapping
*
5221 necp_uuid_get_null_service_id_mapping(void)
5223 static struct necp_uuid_id_mapping null_mapping
;
5224 uuid_clear(null_mapping
.uuid
);
5225 null_mapping
.id
= NECP_NULL_SERVICE_ID
;
5227 return &null_mapping
;
5230 static struct necp_uuid_id_mapping
*
5231 necp_uuid_lookup_service_id_locked(uuid_t uuid
)
5233 struct necp_uuid_id_mapping
*searchentry
= NULL
;
5234 struct necp_uuid_id_mapping
*foundentry
= NULL
;
5236 if (uuid_is_null(uuid
)) {
5237 return necp_uuid_get_null_service_id_mapping();
5240 LIST_FOREACH(searchentry
, &necp_uuid_service_id_list
, chain
) {
5241 if (uuid_compare(searchentry
->uuid
, uuid
) == 0) {
5242 foundentry
= searchentry
;
5250 static struct necp_uuid_id_mapping
*
5251 necp_uuid_lookup_uuid_with_service_id_locked(u_int32_t local_id
)
5253 struct necp_uuid_id_mapping
*searchentry
= NULL
;
5254 struct necp_uuid_id_mapping
*foundentry
= NULL
;
5256 if (local_id
== NECP_NULL_SERVICE_ID
) {
5257 return necp_uuid_get_null_service_id_mapping();
5260 LIST_FOREACH(searchentry
, &necp_uuid_service_id_list
, chain
) {
5261 if (searchentry
->id
== local_id
) {
5262 foundentry
= searchentry
;
5271 necp_create_uuid_service_id_mapping(uuid_t uuid
)
5273 u_int32_t local_id
= 0;
5274 struct necp_uuid_id_mapping
*existing_mapping
= NULL
;
5276 if (uuid_is_null(uuid
)) {
5277 return NECP_NULL_SERVICE_ID
;
5280 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5282 existing_mapping
= necp_uuid_lookup_service_id_locked(uuid
);
5283 if (existing_mapping
!= NULL
) {
5284 local_id
= existing_mapping
->id
;
5285 os_ref_retain_locked(&existing_mapping
->refcount
);
5287 struct necp_uuid_id_mapping
*new_mapping
= NULL
;
5288 MALLOC(new_mapping
, struct necp_uuid_id_mapping
*, sizeof(*new_mapping
), M_NECP
, M_WAITOK
);
5289 if (new_mapping
!= NULL
) {
5290 uuid_copy(new_mapping
->uuid
, uuid
);
5291 new_mapping
->id
= necp_get_new_uuid_id(true);
5292 os_ref_init(&new_mapping
->refcount
, &necp_refgrp
);
5294 LIST_INSERT_HEAD(&necp_uuid_service_id_list
, new_mapping
, chain
);
5296 local_id
= new_mapping
->id
;
5304 necp_remove_uuid_service_id_mapping(uuid_t uuid
)
5306 struct necp_uuid_id_mapping
*existing_mapping
= NULL
;
5308 if (uuid_is_null(uuid
)) {
5312 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5314 existing_mapping
= necp_uuid_lookup_service_id_locked(uuid
);
5315 if (existing_mapping
!= NULL
) {
5316 if (os_ref_release_locked(&existing_mapping
->refcount
) == 0) {
5317 LIST_REMOVE(existing_mapping
, chain
);
5318 FREE(existing_mapping
, M_NECP
);
5328 necp_kernel_socket_policies_update_uuid_table(void)
5330 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5332 if (necp_uuid_app_id_mappings_dirty
) {
5333 if (proc_uuid_policy_kernel(PROC_UUID_POLICY_OPERATION_CLEAR
, NULL
, PROC_UUID_NECP_APP_POLICY
) < 0) {
5334 NECPLOG0(LOG_DEBUG
, "Error clearing uuids from policy table\n");
5338 if (necp_num_uuid_app_id_mappings
> 0) {
5339 struct necp_uuid_id_mapping_head
*uuid_list_head
= NULL
;
5340 for (uuid_list_head
= &necp_uuid_app_id_hashtbl
[necp_uuid_app_id_hash_num_buckets
- 1]; uuid_list_head
>= necp_uuid_app_id_hashtbl
; uuid_list_head
--) {
5341 struct necp_uuid_id_mapping
*mapping
= NULL
;
5342 LIST_FOREACH(mapping
, uuid_list_head
, chain
) {
5343 if (mapping
->table_usecount
> 0 &&
5344 proc_uuid_policy_kernel(PROC_UUID_POLICY_OPERATION_ADD
, mapping
->uuid
, PROC_UUID_NECP_APP_POLICY
) < 0) {
5345 NECPLOG0(LOG_DEBUG
, "Error adding uuid to policy table\n");
5351 necp_uuid_app_id_mappings_dirty
= FALSE
;
5357 #define NECP_KERNEL_VALID_IP_OUTPUT_CONDITIONS (NECP_KERNEL_CONDITION_ALL_INTERFACES | NECP_KERNEL_CONDITION_BOUND_INTERFACE | NECP_KERNEL_CONDITION_PROTOCOL | NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_POLICY_ID | NECP_KERNEL_CONDITION_LAST_INTERFACE | NECP_KERNEL_CONDITION_LOCAL_NETWORKS)
5358 static necp_kernel_policy_id
5359 necp_kernel_ip_output_policy_add(necp_policy_order order
, necp_policy_order suborder
, u_int32_t session_order
, int session_pid
, u_int32_t condition_mask
, u_int32_t condition_negated_mask
, necp_kernel_policy_id cond_policy_id
, ifnet_t cond_bound_interface
, u_int32_t cond_last_interface_index
, u_int16_t cond_protocol
, union necp_sockaddr_union
*cond_local_start
, union necp_sockaddr_union
*cond_local_end
, u_int8_t cond_local_prefix
, union necp_sockaddr_union
*cond_remote_start
, union necp_sockaddr_union
*cond_remote_end
, u_int8_t cond_remote_prefix
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
)
5361 struct necp_kernel_ip_output_policy
*new_kernel_policy
= NULL
;
5362 struct necp_kernel_ip_output_policy
*tmp_kernel_policy
= NULL
;
5364 MALLOC_ZONE(new_kernel_policy
, struct necp_kernel_ip_output_policy
*, sizeof(*new_kernel_policy
), M_NECP_IP_POLICY
, M_WAITOK
);
5365 if (new_kernel_policy
== NULL
) {
5369 memset(new_kernel_policy
, 0, sizeof(*new_kernel_policy
)); // M_ZERO is not supported for MALLOC_ZONE
5370 new_kernel_policy
->id
= necp_kernel_policy_get_new_id(false);
5371 new_kernel_policy
->suborder
= suborder
;
5372 new_kernel_policy
->order
= order
;
5373 new_kernel_policy
->session_order
= session_order
;
5374 new_kernel_policy
->session_pid
= session_pid
;
5376 // Sanitize condition mask
5377 new_kernel_policy
->condition_mask
= (condition_mask
& NECP_KERNEL_VALID_IP_OUTPUT_CONDITIONS
);
5378 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
)) {
5379 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_BOUND_INTERFACE
;
5381 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
)) {
5382 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_LOCAL_PREFIX
;
5384 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
)) {
5385 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_REMOTE_PREFIX
;
5387 new_kernel_policy
->condition_negated_mask
= condition_negated_mask
& new_kernel_policy
->condition_mask
;
5389 // Set condition values
5390 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
) {
5391 new_kernel_policy
->cond_policy_id
= cond_policy_id
;
5393 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
5394 if (cond_bound_interface
) {
5395 ifnet_reference(cond_bound_interface
);
5397 new_kernel_policy
->cond_bound_interface
= cond_bound_interface
;
5399 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LAST_INTERFACE
) {
5400 new_kernel_policy
->cond_last_interface_index
= cond_last_interface_index
;
5402 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
5403 new_kernel_policy
->cond_protocol
= cond_protocol
;
5405 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
5406 memcpy(&new_kernel_policy
->cond_local_start
, cond_local_start
, cond_local_start
->sa
.sa_len
);
5408 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
5409 memcpy(&new_kernel_policy
->cond_local_end
, cond_local_end
, cond_local_end
->sa
.sa_len
);
5411 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
5412 new_kernel_policy
->cond_local_prefix
= cond_local_prefix
;
5414 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
5415 memcpy(&new_kernel_policy
->cond_remote_start
, cond_remote_start
, cond_remote_start
->sa
.sa_len
);
5417 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
5418 memcpy(&new_kernel_policy
->cond_remote_end
, cond_remote_end
, cond_remote_end
->sa
.sa_len
);
5420 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
5421 new_kernel_policy
->cond_remote_prefix
= cond_remote_prefix
;
5424 new_kernel_policy
->result
= result
;
5425 memcpy(&new_kernel_policy
->result_parameter
, &result_parameter
, sizeof(result_parameter
));
5428 NECPLOG(LOG_DEBUG
, "Added kernel policy: ip output, id=%d, mask=%x\n", new_kernel_policy
->id
, new_kernel_policy
->condition_mask
);
5430 LIST_INSERT_SORTED_THRICE_ASCENDING(&necp_kernel_ip_output_policies
, new_kernel_policy
, chain
, session_order
, order
, suborder
, tmp_kernel_policy
);
5432 return new_kernel_policy
? new_kernel_policy
->id
: 0;
5435 static struct necp_kernel_ip_output_policy
*
5436 necp_kernel_ip_output_policy_find(necp_kernel_policy_id policy_id
)
5438 struct necp_kernel_ip_output_policy
*kernel_policy
= NULL
;
5439 struct necp_kernel_ip_output_policy
*tmp_kernel_policy
= NULL
;
5441 if (policy_id
== 0) {
5445 LIST_FOREACH_SAFE(kernel_policy
, &necp_kernel_ip_output_policies
, chain
, tmp_kernel_policy
) {
5446 if (kernel_policy
->id
== policy_id
) {
5447 return kernel_policy
;
5455 necp_kernel_ip_output_policy_delete(necp_kernel_policy_id policy_id
)
5457 struct necp_kernel_ip_output_policy
*policy
= NULL
;
5459 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5461 policy
= necp_kernel_ip_output_policy_find(policy_id
);
5463 LIST_REMOVE(policy
, chain
);
5465 if (policy
->cond_bound_interface
) {
5466 ifnet_release(policy
->cond_bound_interface
);
5467 policy
->cond_bound_interface
= NULL
;
5470 FREE_ZONE(policy
, sizeof(*policy
), M_NECP_IP_POLICY
);
5478 necp_kernel_ip_output_policies_dump_all(void)
5481 struct necp_kernel_ip_output_policy
*policy
= NULL
;
5484 char result_string
[MAX_RESULT_STRING_LEN
];
5485 char proc_name_string
[MAXCOMLEN
+ 1];
5486 memset(result_string
, 0, MAX_RESULT_STRING_LEN
);
5487 memset(proc_name_string
, 0, MAXCOMLEN
+ 1);
5489 NECPLOG0(LOG_DEBUG
, "NECP IP Output Policies:\n");
5490 NECPLOG0(LOG_DEBUG
, "-----------\n");
5491 for (id_i
= 0; id_i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; id_i
++) {
5492 NECPLOG(LOG_DEBUG
, " ID Bucket: %d\n", id_i
);
5493 for (policy_i
= 0; necp_kernel_ip_output_policies_map
[id_i
] != NULL
&& (necp_kernel_ip_output_policies_map
[id_i
])[policy_i
] != NULL
; policy_i
++) {
5494 policy
= (necp_kernel_ip_output_policies_map
[id_i
])[policy_i
];
5495 proc_name(policy
->session_pid
, proc_name_string
, MAXCOMLEN
);
5496 NECPLOG(LOG_DEBUG
, "\t%3d. Policy ID: %5d\tProcess: %10.10s\tOrder: %04d.%04d.%d\tMask: %5x\tResult: %s\n", policy_i
, policy
->id
, proc_name_string
, policy
->session_order
, policy
->order
, policy
->suborder
, policy
->condition_mask
, necp_get_result_description(result_string
, policy
->result
, policy
->result_parameter
));
5498 NECPLOG0(LOG_DEBUG
, "-----------\n");
5504 necp_kernel_ip_output_policy_results_overlap(struct necp_kernel_ip_output_policy
*upper_policy
, struct necp_kernel_ip_output_policy
*lower_policy
)
5506 if (upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
5507 if (upper_policy
->session_order
!= lower_policy
->session_order
) {
5508 // A skip cannot override a policy of a different session
5511 if (upper_policy
->result_parameter
.skip_policy_order
== 0 ||
5512 lower_policy
->order
>= upper_policy
->result_parameter
.skip_policy_order
) {
5513 // This policy is beyond the skip
5516 // This policy is inside the skip
5522 // All other IP Output policy results (drop, tunnel, hard pass) currently overlap
5527 necp_kernel_ip_output_policy_is_unnecessary(struct necp_kernel_ip_output_policy
*policy
, struct necp_kernel_ip_output_policy
**policy_array
, int valid_indices
)
5529 bool can_skip
= FALSE
;
5530 u_int32_t highest_skip_session_order
= 0;
5531 u_int32_t highest_skip_order
= 0;
5533 for (i
= 0; i
< valid_indices
; i
++) {
5534 struct necp_kernel_ip_output_policy
*compared_policy
= policy_array
[i
];
5536 // For policies in a skip window, we can't mark conflicting policies as unnecessary
5538 if (highest_skip_session_order
!= compared_policy
->session_order
||
5539 (highest_skip_order
!= 0 && compared_policy
->order
>= highest_skip_order
)) {
5540 // If we've moved on to the next session, or passed the skip window
5541 highest_skip_session_order
= 0;
5542 highest_skip_order
= 0;
5545 // If this policy is also a skip, in can increase the skip window
5546 if (compared_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
5547 if (compared_policy
->result_parameter
.skip_policy_order
> highest_skip_order
) {
5548 highest_skip_order
= compared_policy
->result_parameter
.skip_policy_order
;
5555 if (compared_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
5556 // This policy is a skip. Set the skip window accordingly
5558 highest_skip_session_order
= compared_policy
->session_order
;
5559 highest_skip_order
= compared_policy
->result_parameter
.skip_policy_order
;
5562 // The result of the compared policy must be able to block out this policy result
5563 if (!necp_kernel_ip_output_policy_results_overlap(compared_policy
, policy
)) {
5567 // If new policy matches All Interfaces, compared policy must also
5568 if ((policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) && !(compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
)) {
5572 // If new policy matches Local Networks, compared policy must also
5573 if ((policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_NETWORKS
) && !(compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_NETWORKS
)) {
5577 // Default makes lower policies unecessary always
5578 if (compared_policy
->condition_mask
== 0) {
5582 // Compared must be more general than policy, and include only conditions within policy
5583 if ((policy
->condition_mask
& compared_policy
->condition_mask
) != compared_policy
->condition_mask
) {
5587 // Negative conditions must match for the overlapping conditions
5588 if ((policy
->condition_negated_mask
& compared_policy
->condition_mask
) != (compared_policy
->condition_negated_mask
& compared_policy
->condition_mask
)) {
5592 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
&&
5593 compared_policy
->cond_policy_id
!= policy
->cond_policy_id
) {
5597 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
&&
5598 compared_policy
->cond_bound_interface
!= policy
->cond_bound_interface
) {
5602 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
&&
5603 compared_policy
->cond_protocol
!= policy
->cond_protocol
) {
5607 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
5608 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
5609 if (!necp_is_range_in_range((struct sockaddr
*)&policy
->cond_local_start
, (struct sockaddr
*)&policy
->cond_local_end
, (struct sockaddr
*)&compared_policy
->cond_local_start
, (struct sockaddr
*)&compared_policy
->cond_local_end
)) {
5612 } else if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
5613 if (compared_policy
->cond_local_prefix
> policy
->cond_local_prefix
||
5614 !necp_is_addr_in_subnet((struct sockaddr
*)&policy
->cond_local_start
, (struct sockaddr
*)&compared_policy
->cond_local_start
, compared_policy
->cond_local_prefix
)) {
5620 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
5621 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
5622 if (!necp_is_range_in_range((struct sockaddr
*)&policy
->cond_remote_start
, (struct sockaddr
*)&policy
->cond_remote_end
, (struct sockaddr
*)&compared_policy
->cond_remote_start
, (struct sockaddr
*)&compared_policy
->cond_remote_end
)) {
5625 } else if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
5626 if (compared_policy
->cond_remote_prefix
> policy
->cond_remote_prefix
||
5627 !necp_is_addr_in_subnet((struct sockaddr
*)&policy
->cond_remote_start
, (struct sockaddr
*)&compared_policy
->cond_remote_start
, compared_policy
->cond_remote_prefix
)) {
5640 necp_kernel_ip_output_policies_reprocess(void)
5643 int bucket_allocation_counts
[NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
];
5644 int bucket_current_free_index
[NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
];
5645 struct necp_kernel_ip_output_policy
*kernel_policy
= NULL
;
5647 LCK_RW_ASSERT(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
5650 necp_kernel_ip_output_policies_condition_mask
= 0;
5651 necp_kernel_ip_output_policies_count
= 0;
5652 necp_kernel_ip_output_policies_non_id_count
= 0;
5654 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
5655 if (necp_kernel_ip_output_policies_map
[i
] != NULL
) {
5656 FREE(necp_kernel_ip_output_policies_map
[i
], M_NECP
);
5657 necp_kernel_ip_output_policies_map
[i
] = NULL
;
5661 bucket_allocation_counts
[i
] = 0;
5664 LIST_FOREACH(kernel_policy
, &necp_kernel_ip_output_policies
, chain
) {
5666 necp_kernel_ip_output_policies_condition_mask
|= kernel_policy
->condition_mask
;
5667 necp_kernel_ip_output_policies_count
++;
5669 /* Update bucket counts:
5670 * Non-id and SKIP policies will be added to all buckets
5671 * Add local networks policy to all buckets for incoming IP
5673 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
) ||
5674 (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_NETWORKS
) ||
5675 kernel_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
5676 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
5677 bucket_allocation_counts
[i
]++;
5680 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
)) {
5681 necp_kernel_ip_output_policies_non_id_count
++;
5683 bucket_allocation_counts
[NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(kernel_policy
->cond_policy_id
)]++;
5687 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
5688 if (bucket_allocation_counts
[i
] > 0) {
5689 // Allocate a NULL-terminated array of policy pointers for each bucket
5690 MALLOC(necp_kernel_ip_output_policies_map
[i
], struct necp_kernel_ip_output_policy
**, sizeof(struct necp_kernel_ip_output_policy
*) * (bucket_allocation_counts
[i
] + 1), M_NECP
, M_WAITOK
);
5691 if (necp_kernel_ip_output_policies_map
[i
] == NULL
) {
5695 // Initialize the first entry to NULL
5696 (necp_kernel_ip_output_policies_map
[i
])[0] = NULL
;
5698 bucket_current_free_index
[i
] = 0;
5701 LIST_FOREACH(kernel_policy
, &necp_kernel_ip_output_policies
, chain
) {
5702 // Insert pointers into map
5703 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
) ||
5704 (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_NETWORKS
) ||
5705 kernel_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
5706 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
5707 if (!necp_kernel_ip_output_policy_is_unnecessary(kernel_policy
, necp_kernel_ip_output_policies_map
[i
], bucket_current_free_index
[i
])) {
5708 (necp_kernel_ip_output_policies_map
[i
])[(bucket_current_free_index
[i
])] = kernel_policy
;
5709 bucket_current_free_index
[i
]++;
5710 (necp_kernel_ip_output_policies_map
[i
])[(bucket_current_free_index
[i
])] = NULL
;
5714 i
= NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(kernel_policy
->cond_policy_id
);
5715 if (!necp_kernel_ip_output_policy_is_unnecessary(kernel_policy
, necp_kernel_ip_output_policies_map
[i
], bucket_current_free_index
[i
])) {
5716 (necp_kernel_ip_output_policies_map
[i
])[(bucket_current_free_index
[i
])] = kernel_policy
;
5717 bucket_current_free_index
[i
]++;
5718 (necp_kernel_ip_output_policies_map
[i
])[(bucket_current_free_index
[i
])] = NULL
;
5722 necp_kernel_ip_output_policies_dump_all();
5726 // Free memory, reset mask to 0
5727 necp_kernel_ip_output_policies_condition_mask
= 0;
5728 necp_kernel_ip_output_policies_count
= 0;
5729 necp_kernel_ip_output_policies_non_id_count
= 0;
5730 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
5731 if (necp_kernel_ip_output_policies_map
[i
] != NULL
) {
5732 FREE(necp_kernel_ip_output_policies_map
[i
], M_NECP
);
5733 necp_kernel_ip_output_policies_map
[i
] = NULL
;
5739 // Outbound Policy Matching
5740 // ---------------------
5746 static struct substring
5747 necp_trim_dots_and_stars(char *string
, size_t length
)
5749 struct substring sub
;
5750 sub
.string
= string
;
5751 sub
.length
= string
? length
: 0;
5753 while (sub
.length
&& (sub
.string
[0] == '.' || sub
.string
[0] == '*')) {
5758 while (sub
.length
&& (sub
.string
[sub
.length
- 1] == '.' || sub
.string
[sub
.length
- 1] == '*')) {
5766 necp_create_trimmed_domain(char *string
, size_t length
)
5768 char *trimmed_domain
= NULL
;
5769 struct substring sub
= necp_trim_dots_and_stars(string
, length
);
5771 MALLOC(trimmed_domain
, char *, sub
.length
+ 1, M_NECP
, M_WAITOK
);
5772 if (trimmed_domain
== NULL
) {
5776 memcpy(trimmed_domain
, sub
.string
, sub
.length
);
5777 trimmed_domain
[sub
.length
] = 0;
5779 return trimmed_domain
;
5783 necp_count_dots(char *string
, size_t length
)
5788 for (i
= 0; i
< length
; i
++) {
5789 if (string
[i
] == '.') {
5798 necp_check_suffix(struct substring parent
, struct substring suffix
, bool require_dot_before_suffix
)
5800 if (parent
.length
<= suffix
.length
) {
5804 size_t length_difference
= (parent
.length
- suffix
.length
);
5806 if (require_dot_before_suffix
) {
5807 if (((char *)(parent
.string
+ length_difference
- 1))[0] != '.') {
5812 // strncasecmp does case-insensitive check for all UTF-8 strings (ignores non-ASCII characters)
5813 return strncasecmp(parent
.string
+ length_difference
, suffix
.string
, suffix
.length
) == 0;
5817 necp_hostname_matches_domain(struct substring hostname_substring
, u_int8_t hostname_dot_count
, char *domain
, u_int8_t domain_dot_count
)
5819 if (hostname_substring
.string
== NULL
|| domain
== NULL
) {
5820 return hostname_substring
.string
== domain
;
5823 struct substring domain_substring
;
5824 domain_substring
.string
= domain
;
5825 domain_substring
.length
= strlen(domain
);
5827 if (hostname_dot_count
== domain_dot_count
) {
5828 // strncasecmp does case-insensitive check for all UTF-8 strings (ignores non-ASCII characters)
5829 if (hostname_substring
.length
== domain_substring
.length
&&
5830 strncasecmp(hostname_substring
.string
, domain_substring
.string
, hostname_substring
.length
) == 0) {
5833 } else if (domain_dot_count
< hostname_dot_count
) {
5834 if (necp_check_suffix(hostname_substring
, domain_substring
, TRUE
)) {
5843 net_domain_contains_hostname(char *hostname_string
, char *domain_string
)
5845 if (hostname_string
== NULL
||
5846 domain_string
== NULL
) {
5850 struct substring hostname_substring
;
5851 hostname_substring
.string
= hostname_string
;
5852 hostname_substring
.length
= strlen(hostname_string
);
5854 return necp_hostname_matches_domain(hostname_substring
,
5855 necp_count_dots(hostname_string
, hostname_substring
.length
),
5857 necp_count_dots(domain_string
, strlen(domain_string
)));
5860 #define NECP_MAX_STRING_LEN 1024
5863 necp_copy_string(char *string
, size_t length
)
5865 char *copied_string
= NULL
;
5867 if (length
> NECP_MAX_STRING_LEN
) {
5871 MALLOC(copied_string
, char *, length
+ 1, M_NECP
, M_WAITOK
);
5872 if (copied_string
== NULL
) {
5876 memcpy(copied_string
, string
, length
);
5877 copied_string
[length
] = 0;
5879 return copied_string
;
5883 necp_get_primary_direct_interface_index(void)
5885 u_int32_t interface_index
= IFSCOPE_NONE
;
5887 ifnet_head_lock_shared();
5888 struct ifnet
*ordered_interface
= NULL
;
5889 TAILQ_FOREACH(ordered_interface
, &ifnet_ordered_head
, if_ordered_link
) {
5890 const u_int8_t functional_type
= if_functional_type(ordered_interface
, TRUE
);
5891 if (functional_type
!= IFRTYPE_FUNCTIONAL_UNKNOWN
&&
5892 functional_type
!= IFRTYPE_FUNCTIONAL_LOOPBACK
) {
5893 // All known, non-loopback functional types represent direct physical interfaces (Wi-Fi, Cellular, Wired)
5894 interface_index
= ordered_interface
->if_index
;
5900 return interface_index
;
5904 necp_get_parent_cred_result(proc_t proc
, struct necp_socket_info
*info
)
5906 task_t task
= proc_task(proc
? proc
: current_proc());
5907 coalition_t coal
= task_get_coalition(task
, COALITION_TYPE_JETSAM
);
5909 if (coal
== COALITION_NULL
|| coalition_is_leader(task
, coal
)) {
5910 // No parent, nothing to do
5914 task_t lead_task
= coalition_get_leader(coal
);
5915 if (lead_task
!= NULL
) {
5916 proc_t lead_proc
= get_bsdtask_info(lead_task
);
5917 if (lead_proc
!= NULL
) {
5918 kauth_cred_t lead_cred
= kauth_cred_proc_ref(lead_proc
);
5919 if (lead_cred
!= NULL
) {
5920 errno_t cred_result
= priv_check_cred(lead_cred
, PRIV_NET_PRIVILEGED_NECP_MATCH
, 0);
5921 kauth_cred_unref(&lead_cred
);
5922 info
->cred_result
= cred_result
;
5925 task_deallocate(lead_task
);
5929 #define NECP_KERNEL_ADDRESS_TYPE_CONDITIONS (NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_LOCAL_EMPTY | NECP_KERNEL_CONDITION_REMOTE_EMPTY | NECP_KERNEL_CONDITION_LOCAL_NETWORKS)
5931 necp_application_fillout_info_locked(uuid_t application_uuid
, uuid_t real_application_uuid
, char *account
, char *domain
, pid_t pid
, uid_t uid
, u_int16_t protocol
, u_int32_t bound_interface_index
, u_int32_t traffic_class
, union necp_sockaddr_union
*local_addr
, union necp_sockaddr_union
*remote_addr
, u_int16_t local_port
, u_int16_t remote_port
, bool has_client
, proc_t proc
, u_int32_t drop_order
, u_int32_t client_flags
, struct necp_socket_info
*info
)
5933 memset(info
, 0, sizeof(struct necp_socket_info
));
5937 info
->protocol
= protocol
;
5938 info
->bound_interface_index
= bound_interface_index
;
5939 info
->traffic_class
= traffic_class
;
5940 info
->has_client
= has_client
;
5941 info
->drop_order
= drop_order
;
5942 info
->client_flags
= client_flags
;
5944 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
&& proc
!= NULL
) {
5945 info
->cred_result
= priv_check_cred(proc_ucred(proc
), PRIV_NET_PRIVILEGED_NECP_MATCH
, 0);
5946 if (info
->cred_result
!= 0) {
5947 // Process does not have entitlement, check the parent process
5948 necp_get_parent_cred_result(proc
, info
);
5952 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_PLATFORM_BINARY
&& proc
!= NULL
) {
5953 info
->is_platform_binary
= csproc_get_platform_binary(proc
) ? true : false;
5956 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_APP_ID
&& !uuid_is_null(application_uuid
)) {
5957 struct necp_uuid_id_mapping
*existing_mapping
= necp_uuid_lookup_app_id_locked(application_uuid
);
5958 if (existing_mapping
) {
5959 info
->application_id
= existing_mapping
->id
;
5963 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
&& !uuid_is_null(real_application_uuid
)) {
5964 if (uuid_compare(application_uuid
, real_application_uuid
) == 0) {
5965 info
->real_application_id
= info
->application_id
;
5967 struct necp_uuid_id_mapping
*existing_mapping
= necp_uuid_lookup_app_id_locked(real_application_uuid
);
5968 if (existing_mapping
) {
5969 info
->real_application_id
= existing_mapping
->id
;
5974 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
&& account
!= NULL
) {
5975 struct necp_string_id_mapping
*existing_mapping
= necp_lookup_string_to_id_locked(&necp_account_id_list
, account
);
5976 if (existing_mapping
) {
5977 info
->account_id
= existing_mapping
->id
;
5981 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
5982 info
->domain
= domain
;
5985 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_ADDRESS_TYPE_CONDITIONS
) {
5986 if (local_addr
&& local_addr
->sa
.sa_len
> 0) {
5987 memcpy(&info
->local_addr
, local_addr
, local_addr
->sa
.sa_len
);
5988 if (local_port
!= 0) {
5989 info
->local_addr
.sin6
.sin6_port
= local_port
;
5991 } else if (local_port
!= 0) {
5992 info
->local_addr
.sin6
.sin6_len
= sizeof(struct sockaddr_in6
);
5993 info
->local_addr
.sin6
.sin6_family
= AF_INET6
;
5994 info
->local_addr
.sin6
.sin6_port
= local_port
;
5996 if (remote_addr
&& remote_addr
->sa
.sa_len
> 0) {
5997 memcpy(&info
->remote_addr
, remote_addr
, remote_addr
->sa
.sa_len
);
5998 if (remote_port
!= 0) {
5999 info
->remote_addr
.sin6
.sin6_port
= remote_port
;
6001 } else if (remote_port
!= 0) {
6002 info
->remote_addr
.sin6
.sin6_len
= sizeof(struct sockaddr_in6
);
6003 info
->remote_addr
.sin6
.sin6_family
= AF_INET6
;
6004 info
->remote_addr
.sin6
.sin6_port
= remote_port
;
6010 necp_send_application_interface_denied_event(pid_t pid
, uuid_t proc_uuid
, u_int32_t if_functional_type
)
6012 struct kev_netpolicy_ifdenied ev_ifdenied
;
6014 bzero(&ev_ifdenied
, sizeof(ev_ifdenied
));
6016 ev_ifdenied
.ev_data
.epid
= pid
;
6017 uuid_copy(ev_ifdenied
.ev_data
.euuid
, proc_uuid
);
6018 ev_ifdenied
.ev_if_functional_type
= if_functional_type
;
6020 netpolicy_post_msg(KEV_NETPOLICY_IFDENIED
, &ev_ifdenied
.ev_data
, sizeof(ev_ifdenied
));
6023 extern char *proc_name_address(void *p
);
6025 #define NECP_VERIFY_DELEGATION_ENTITLEMENT(_p, _d) \
6026 if (!has_checked_delegation_entitlement) { \
6027 has_delegation_entitlement = (priv_check_cred(proc_ucred(_p), PRIV_NET_PRIVILEGED_SOCKET_DELEGATE, 0) == 0); \
6028 has_checked_delegation_entitlement = TRUE; \
6030 if (!has_delegation_entitlement) { \
6031 NECPLOG(LOG_ERR, "%s(%d) does not hold the necessary entitlement to delegate network traffic for other processes by %s", \
6032 proc_name_address(_p), proc_pid(_p), _d); \
6037 necp_application_find_policy_match_internal(proc_t proc
,
6038 u_int8_t
*parameters
,
6039 u_int32_t parameters_size
,
6040 struct necp_aggregate_result
*returned_result
,
6043 u_int required_interface_index
,
6044 const union necp_sockaddr_union
*override_local_addr
,
6045 const union necp_sockaddr_union
*override_remote_addr
,
6046 struct necp_client_endpoint
*returned_v4_gateway
,
6047 struct necp_client_endpoint
*returned_v6_gateway
,
6048 struct rtentry
**returned_route
, bool ignore_address
,
6054 struct necp_kernel_socket_policy
*matched_policy
= NULL
;
6055 struct necp_socket_info info
;
6056 necp_kernel_policy_filter filter_control_unit
= 0;
6057 necp_kernel_policy_result service_action
= 0;
6058 necp_kernel_policy_service service
= { 0, 0 };
6060 u_int16_t protocol
= 0;
6061 u_int32_t bound_interface_index
= required_interface_index
;
6062 u_int32_t traffic_class
= 0;
6063 u_int32_t client_flags
= 0;
6064 union necp_sockaddr_union local_addr
;
6065 union necp_sockaddr_union remote_addr
;
6066 bool no_remote_addr
= FALSE
;
6067 u_int8_t remote_family
= 0;
6068 bool no_local_addr
= FALSE
;
6069 u_int16_t local_port
= 0;
6070 u_int16_t remote_port
= 0;
6071 necp_drop_all_bypass_check_result_t drop_all_bypass
= NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
;
6073 if (override_local_addr
) {
6074 memcpy(&local_addr
, override_local_addr
, sizeof(local_addr
));
6076 memset(&local_addr
, 0, sizeof(local_addr
));
6078 if (override_remote_addr
) {
6079 memcpy(&remote_addr
, override_remote_addr
, sizeof(remote_addr
));
6081 memset(&remote_addr
, 0, sizeof(remote_addr
));
6084 // Initialize UID, PID, and UUIDs to the current process
6085 uid_t uid
= kauth_cred_getuid(proc_ucred(proc
));
6086 pid_t pid
= proc_pid(proc
);
6087 uuid_t application_uuid
;
6088 uuid_clear(application_uuid
);
6089 uuid_t real_application_uuid
;
6090 uuid_clear(real_application_uuid
);
6091 proc_getexecutableuuid(proc
, real_application_uuid
, sizeof(real_application_uuid
));
6092 uuid_copy(application_uuid
, real_application_uuid
);
6094 char *domain
= NULL
;
6095 char *account
= NULL
;
6097 #define NECP_MAX_REQUIRED_AGENTS 16
6098 u_int32_t num_required_agent_types
= 0;
6099 struct necp_client_parameter_netagent_type required_agent_types
[NECP_MAX_REQUIRED_AGENTS
];
6100 memset(&required_agent_types
, 0, sizeof(required_agent_types
));
6102 u_int32_t netagent_ids
[NECP_MAX_NETAGENTS
];
6103 u_int32_t netagent_use_flags
[NECP_MAX_NETAGENTS
];
6104 memset(&netagent_ids
, 0, sizeof(netagent_ids
));
6105 memset(&netagent_use_flags
, 0, sizeof(netagent_use_flags
));
6106 int netagent_cursor
;
6108 bool has_checked_delegation_entitlement
= FALSE
;
6109 bool has_delegation_entitlement
= FALSE
;
6111 if (returned_result
== NULL
) {
6115 if (returned_v4_gateway
!= NULL
) {
6116 memset(returned_v4_gateway
, 0, sizeof(struct necp_client_endpoint
));
6119 if (returned_v6_gateway
!= NULL
) {
6120 memset(returned_v6_gateway
, 0, sizeof(struct necp_client_endpoint
));
6123 memset(returned_result
, 0, sizeof(struct necp_aggregate_result
));
6125 u_int32_t drop_order
= necp_process_drop_order(proc_ucred(proc
));
6127 necp_kernel_policy_result drop_dest_policy_result
= NECP_KERNEL_POLICY_RESULT_NONE
;
6129 lck_rw_lock_shared(&necp_kernel_policy_lock
);
6130 if (necp_kernel_application_policies_count
== 0) {
6131 if (necp_drop_all_order
> 0 || drop_order
> 0) {
6132 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
6133 lck_rw_done(&necp_kernel_policy_lock
);
6137 lck_rw_done(&necp_kernel_policy_lock
);
6139 while ((offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
)) <= parameters_size
) {
6140 u_int8_t type
= necp_buffer_get_tlv_type(parameters
, offset
);
6141 u_int32_t length
= necp_buffer_get_tlv_length(parameters
, offset
);
6143 if (length
> (parameters_size
- (offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
)))) {
6144 // If the length is larger than what can fit in the remaining parameters size, bail
6145 NECPLOG(LOG_ERR
, "Invalid TLV length (%u)", length
);
6150 u_int8_t
*value
= necp_buffer_get_tlv_value(parameters
, offset
, NULL
);
6151 if (value
!= NULL
) {
6153 case NECP_CLIENT_PARAMETER_APPLICATION
: {
6154 if (length
>= sizeof(uuid_t
)) {
6155 if (uuid_compare(application_uuid
, value
) == 0) {
6160 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc
, "euuid");
6162 uuid_copy(application_uuid
, value
);
6166 case NECP_CLIENT_PARAMETER_REAL_APPLICATION
: {
6167 if (length
>= sizeof(uuid_t
)) {
6168 if (uuid_compare(real_application_uuid
, value
) == 0) {
6173 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc
, "uuid");
6175 uuid_copy(real_application_uuid
, value
);
6179 case NECP_CLIENT_PARAMETER_PID
: {
6180 if (length
>= sizeof(pid_t
)) {
6181 if (memcmp(&pid
, value
, sizeof(pid_t
)) == 0) {
6186 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc
, "pid");
6188 memcpy(&pid
, value
, sizeof(pid_t
));
6192 case NECP_CLIENT_PARAMETER_UID
: {
6193 if (length
>= sizeof(uid_t
)) {
6194 if (memcmp(&uid
, value
, sizeof(uid_t
)) == 0) {
6199 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc
, "uid");
6201 memcpy(&uid
, value
, sizeof(uid_t
));
6205 case NECP_CLIENT_PARAMETER_DOMAIN
: {
6206 domain
= (char *)value
;
6207 domain
[length
- 1] = 0;
6210 case NECP_CLIENT_PARAMETER_ACCOUNT
: {
6211 account
= (char *)value
;
6212 account
[length
- 1] = 0;
6215 case NECP_CLIENT_PARAMETER_TRAFFIC_CLASS
: {
6216 if (length
>= sizeof(u_int32_t
)) {
6217 memcpy(&traffic_class
, value
, sizeof(u_int32_t
));
6221 case NECP_CLIENT_PARAMETER_IP_PROTOCOL
: {
6222 if (length
>= sizeof(u_int16_t
)) {
6223 memcpy(&protocol
, value
, sizeof(u_int16_t
));
6224 } else if (length
>= sizeof(u_int8_t
)) {
6225 memcpy(&protocol
, value
, sizeof(u_int8_t
));
6229 case NECP_CLIENT_PARAMETER_BOUND_INTERFACE
: {
6230 if (length
<= IFXNAMSIZ
&& length
> 0) {
6231 ifnet_t bound_interface
= NULL
;
6232 char interface_name
[IFXNAMSIZ
];
6233 memcpy(interface_name
, value
, length
);
6234 interface_name
[length
- 1] = 0; // Make sure the string is NULL terminated
6235 if (ifnet_find_by_name(interface_name
, &bound_interface
) == 0) {
6236 bound_interface_index
= bound_interface
->if_index
;
6237 ifnet_release(bound_interface
);
6242 case NECP_CLIENT_PARAMETER_LOCAL_ADDRESS
: {
6243 if (ignore_address
|| override_local_addr
) {
6247 if (length
>= sizeof(struct necp_policy_condition_addr
)) {
6248 struct necp_policy_condition_addr
*address_struct
= (struct necp_policy_condition_addr
*)(void *)value
;
6249 if (necp_address_is_valid(&address_struct
->address
.sa
)) {
6250 memcpy(&local_addr
, &address_struct
->address
, sizeof(address_struct
->address
));
6255 case NECP_CLIENT_PARAMETER_REMOTE_ADDRESS
: {
6256 if (ignore_address
|| override_remote_addr
) {
6260 if (length
>= sizeof(struct necp_policy_condition_addr
)) {
6261 struct necp_policy_condition_addr
*address_struct
= (struct necp_policy_condition_addr
*)(void *)value
;
6262 if (necp_address_is_valid(&address_struct
->address
.sa
)) {
6263 memcpy(&remote_addr
, &address_struct
->address
, sizeof(address_struct
->address
));
6268 case NECP_CLIENT_PARAMETER_LOCAL_ENDPOINT
: {
6269 if (ignore_address
|| override_local_addr
) {
6273 if (length
>= sizeof(struct necp_client_endpoint
)) {
6274 struct necp_client_endpoint
*endpoint
= (struct necp_client_endpoint
*)(void *)value
;
6275 if (endpoint
->u
.endpoint
.endpoint_family
== AF_UNSPEC
&&
6276 endpoint
->u
.endpoint
.endpoint_port
!= 0) {
6278 local_port
= endpoint
->u
.endpoint
.endpoint_port
;
6283 case NECP_CLIENT_PARAMETER_REMOTE_ENDPOINT
: {
6284 if (ignore_address
|| override_remote_addr
) {
6288 if (length
>= sizeof(struct necp_client_endpoint
)) {
6289 struct necp_client_endpoint
*endpoint
= (struct necp_client_endpoint
*)(void *)value
;
6290 if (endpoint
->u
.endpoint
.endpoint_family
== AF_UNSPEC
&&
6291 endpoint
->u
.endpoint
.endpoint_port
!= 0) {
6293 remote_port
= endpoint
->u
.endpoint
.endpoint_port
;
6298 case NECP_CLIENT_PARAMETER_FLAGS
: {
6299 if (length
>= sizeof(client_flags
)) {
6300 memcpy(&client_flags
, value
, sizeof(client_flags
));
6304 case NECP_CLIENT_PARAMETER_REQUIRE_AGENT_TYPE
:
6305 case NECP_CLIENT_PARAMETER_PREFER_AGENT_TYPE
: {
6306 if (num_required_agent_types
>= NECP_MAX_REQUIRED_AGENTS
) {
6309 if (length
>= sizeof(struct necp_client_parameter_netagent_type
)) {
6310 memcpy(&required_agent_types
[num_required_agent_types
], value
, sizeof(struct necp_client_parameter_netagent_type
));
6311 num_required_agent_types
++;
6322 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
6325 // Check for loopback exception
6326 if (necp_pass_loopback
> 0 && necp_is_loopback(&local_addr
.sa
, &remote_addr
.sa
, NULL
, NULL
, bound_interface_index
)) {
6327 returned_result
->policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
6328 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_PASS
;
6329 returned_result
->routed_interface_index
= lo_ifp
->if_index
;
6330 *flags
|= (NECP_CLIENT_RESULT_FLAG_IS_LOCAL
| NECP_CLIENT_RESULT_FLAG_IS_DIRECT
);
6335 lck_rw_lock_shared(&necp_kernel_policy_lock
);
6337 u_int32_t route_rule_id_array
[MAX_AGGREGATE_ROUTE_RULES
];
6338 size_t route_rule_id_array_count
= 0;
6339 necp_application_fillout_info_locked(application_uuid
, real_application_uuid
, account
, domain
, pid
, uid
, protocol
, bound_interface_index
, traffic_class
, &local_addr
, &remote_addr
, local_port
, remote_port
, has_client
, proc
, drop_order
, client_flags
, &info
);
6340 matched_policy
= necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_app_layer_map
, &info
, &filter_control_unit
, route_rule_id_array
, &route_rule_id_array_count
, MAX_AGGREGATE_ROUTE_RULES
, &service_action
, &service
, netagent_ids
, netagent_use_flags
, NECP_MAX_NETAGENTS
, required_agent_types
, num_required_agent_types
, proc
, NULL
, NULL
, &drop_dest_policy_result
, &drop_all_bypass
);
6341 if (matched_policy
) {
6342 returned_result
->policy_id
= matched_policy
->id
;
6343 returned_result
->routing_result
= matched_policy
->result
;
6344 memcpy(&returned_result
->routing_result_parameter
, &matched_policy
->result_parameter
, sizeof(returned_result
->routing_result_parameter
));
6346 bool drop_all
= false;
6347 if (necp_drop_all_order
> 0 || info
.drop_order
> 0 || drop_dest_policy_result
== NECP_KERNEL_POLICY_RESULT_DROP
) {
6348 // Mark socket as a drop if drop_all is set
6350 if (drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
) {
6351 drop_all_bypass
= necp_check_drop_all_bypass_result(proc
);
6354 if (drop_all
&& drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_FALSE
) {
6355 returned_result
->policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
6356 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
6358 returned_result
->policy_id
= 0;
6359 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_NONE
;
6362 if (filter_control_unit
== NECP_FILTER_UNIT_NO_FILTER
) {
6363 returned_result
->filter_control_unit
= 0;
6365 returned_result
->filter_control_unit
= filter_control_unit
;
6367 returned_result
->service_action
= service_action
;
6369 // Handle trigger service
6370 if (service
.identifier
!= 0) {
6371 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(service
.identifier
);
6372 if (mapping
!= NULL
) {
6373 struct necp_service_registration
*service_registration
= NULL
;
6374 uuid_copy(returned_result
->service_uuid
, mapping
->uuid
);
6375 returned_result
->service_data
= service
.data
;
6376 if (service
.identifier
== NECP_NULL_SERVICE_ID
) {
6377 // NULL service is always 'registered'
6378 returned_result
->service_flags
|= NECP_SERVICE_FLAGS_REGISTERED
;
6380 LIST_FOREACH(service_registration
, &necp_registered_service_list
, kernel_chain
) {
6381 if (service
.identifier
== service_registration
->service_id
) {
6382 returned_result
->service_flags
|= NECP_SERVICE_FLAGS_REGISTERED
;
6391 for (netagent_cursor
= 0; netagent_cursor
< NECP_MAX_NETAGENTS
; netagent_cursor
++) {
6392 struct necp_uuid_id_mapping
*mapping
= NULL
;
6393 u_int32_t netagent_id
= netagent_ids
[netagent_cursor
];
6394 if (netagent_id
== 0) {
6397 mapping
= necp_uuid_lookup_uuid_with_service_id_locked(netagent_id
);
6398 if (mapping
!= NULL
) {
6399 uuid_copy(returned_result
->netagents
[netagent_cursor
], mapping
->uuid
);
6400 returned_result
->netagent_use_flags
[netagent_cursor
] = netagent_use_flags
[netagent_cursor
];
6404 // Do routing evaluation
6405 u_int output_bound_interface
= bound_interface_index
;
6406 if (returned_result
->routing_result
== NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED
) {
6407 output_bound_interface
= returned_result
->routing_result_parameter
.scoped_interface_index
;
6408 } else if (returned_result
->routing_result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
) {
6409 output_bound_interface
= returned_result
->routing_result_parameter
.tunnel_interface_index
;
6410 } else if (returned_result
->routing_result
== NECP_KERNEL_POLICY_RESULT_SCOPED_DIRECT
) {
6411 output_bound_interface
= necp_get_primary_direct_interface_index();
6412 if (output_bound_interface
== IFSCOPE_NONE
) {
6413 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
6415 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED
;
6416 returned_result
->routing_result_parameter
.scoped_interface_index
= output_bound_interface
;
6420 if (local_addr
.sa
.sa_len
== 0 ||
6421 (local_addr
.sa
.sa_family
== AF_INET
&& local_addr
.sin
.sin_addr
.s_addr
== 0) ||
6422 (local_addr
.sa
.sa_family
== AF_INET6
&& IN6_IS_ADDR_UNSPECIFIED(&local_addr
.sin6
.sin6_addr
))) {
6423 no_local_addr
= TRUE
;
6426 if (remote_addr
.sa
.sa_len
== 0 ||
6427 (remote_addr
.sa
.sa_family
== AF_INET
&& remote_addr
.sin
.sin_addr
.s_addr
== 0) ||
6428 (remote_addr
.sa
.sa_family
== AF_INET6
&& IN6_IS_ADDR_UNSPECIFIED(&remote_addr
.sin6
.sin6_addr
))) {
6429 no_remote_addr
= TRUE
;
6430 remote_family
= remote_addr
.sa
.sa_family
;
6433 returned_result
->routed_interface_index
= 0;
6434 struct rtentry
*rt
= NULL
;
6435 if (!no_local_addr
&& (client_flags
& NECP_CLIENT_PARAMETER_FLAG_LISTENER
) != 0) {
6436 // Treat the output bound interface as the routed interface for local address
6437 // validation later.
6438 returned_result
->routed_interface_index
= output_bound_interface
;
6440 if (no_remote_addr
) {
6441 memset(&remote_addr
, 0, sizeof(remote_addr
));
6442 if (remote_family
== AF_INET6
) {
6443 // Reset address to ::
6444 remote_addr
.sa
.sa_family
= AF_INET6
;
6445 remote_addr
.sa
.sa_len
= sizeof(struct sockaddr_in6
);
6447 // Reset address to 0.0.0.0
6448 remote_addr
.sa
.sa_family
= AF_INET
;
6449 remote_addr
.sa
.sa_len
= sizeof(struct sockaddr_in
);
6453 rt
= rtalloc1_scoped((struct sockaddr
*)&remote_addr
, 0, 0,
6454 output_bound_interface
);
6456 if (remote_addr
.sa
.sa_family
== AF_INET
&& rt
!= NULL
&&
6457 IS_INTF_CLAT46(rt
->rt_ifp
)) {
6460 returned_result
->routed_interface_index
= 0;
6463 if (no_remote_addr
&& remote_family
== AF_UNSPEC
&&
6464 (rt
== NULL
|| rt
->rt_ifp
== NULL
)) {
6465 // Route lookup for default IPv4 failed, try IPv6
6467 // Cleanup old route if necessary
6473 // Reset address to ::
6474 memset(&remote_addr
, 0, sizeof(remote_addr
));
6475 remote_addr
.sa
.sa_family
= AF_INET6
;
6476 remote_addr
.sa
.sa_len
= sizeof(struct sockaddr_in6
);
6479 rt
= rtalloc1_scoped((struct sockaddr
*)&remote_addr
, 0, 0,
6480 output_bound_interface
);
6484 rt
->rt_ifp
!= NULL
) {
6485 returned_result
->routed_interface_index
= rt
->rt_ifp
->if_index
;
6487 * For local addresses, we allow the interface scope to be
6488 * either the loopback interface or the interface hosting the
6491 if (bound_interface_index
!= IFSCOPE_NONE
&&
6492 rt
->rt_ifa
!= NULL
&& rt
->rt_ifa
->ifa_ifp
&&
6493 (output_bound_interface
== lo_ifp
->if_index
||
6494 rt
->rt_ifp
->if_index
== lo_ifp
->if_index
||
6495 rt
->rt_ifa
->ifa_ifp
->if_index
== bound_interface_index
)) {
6496 struct sockaddr_storage dst
;
6497 unsigned int ifscope
= bound_interface_index
;
6500 * Transform dst into the internal routing table form
6502 (void) sa_copy((struct sockaddr
*)&remote_addr
,
6505 if ((rt
->rt_ifp
->if_index
== lo_ifp
->if_index
) ||
6506 rt_ifa_is_dst((struct sockaddr
*)&dst
, rt
->rt_ifa
)) {
6507 returned_result
->routed_interface_index
=
6508 bound_interface_index
;
6514 if (returned_result
->routed_interface_index
!= 0 &&
6515 returned_result
->routed_interface_index
!= lo_ifp
->if_index
&& // Loopback can accept any local address
6517 // Transform local_addr into the ifaddr form
6518 // IPv6 Scope IDs are always embedded in the ifaddr list
6519 struct sockaddr_storage local_address_sanitized
;
6520 u_int ifscope
= IFSCOPE_NONE
;
6521 (void)sa_copy(&local_addr
.sa
, &local_address_sanitized
, &ifscope
);
6522 SIN(&local_address_sanitized
)->sin_port
= 0;
6523 if (local_address_sanitized
.ss_family
== AF_INET6
) {
6524 SIN6(&local_address_sanitized
)->sin6_scope_id
= 0;
6527 // Validate local address on routed interface
6528 struct ifaddr
*ifa
= ifa_ifwithaddr_scoped((struct sockaddr
*)&local_address_sanitized
, returned_result
->routed_interface_index
);
6530 // Interface address not found, reject route
6531 returned_result
->routed_interface_index
= 0;
6537 ifaddr_release(ifa
);
6542 if (flags
!= NULL
) {
6543 if ((client_flags
& NECP_CLIENT_PARAMETER_FLAG_LISTENER
) == 0) {
6544 // Check for local/direct
6545 bool is_local
= FALSE
;
6546 if (rt
!= NULL
&& (rt
->rt_flags
& RTF_LOCAL
)) {
6548 } else if (returned_result
->routed_interface_index
!= 0 &&
6550 // Clean up the address before comparison with interface addresses
6552 // Transform remote_addr into the ifaddr form
6553 // IPv6 Scope IDs are always embedded in the ifaddr list
6554 struct sockaddr_storage remote_address_sanitized
;
6555 u_int ifscope
= IFSCOPE_NONE
;
6556 (void)sa_copy(&remote_addr
.sa
, &remote_address_sanitized
, &ifscope
);
6557 SIN(&remote_address_sanitized
)->sin_port
= 0;
6558 if (remote_address_sanitized
.ss_family
== AF_INET6
) {
6559 SIN6(&remote_address_sanitized
)->sin6_scope_id
= 0;
6562 // Check if remote address is an interface address
6563 struct ifaddr
*ifa
= ifa_ifwithaddr((struct sockaddr
*)&remote_address_sanitized
);
6564 if (ifa
!= NULL
&& ifa
->ifa_ifp
!= NULL
) {
6565 u_int if_index_for_remote_addr
= ifa
->ifa_ifp
->if_index
;
6566 if (if_index_for_remote_addr
== returned_result
->routed_interface_index
||
6567 if_index_for_remote_addr
== lo_ifp
->if_index
) {
6572 ifaddr_release(ifa
);
6578 *flags
|= (NECP_CLIENT_RESULT_FLAG_IS_LOCAL
| NECP_CLIENT_RESULT_FLAG_IS_DIRECT
);
6581 !(rt
->rt_flags
& RTF_GATEWAY
) &&
6582 (rt
->rt_ifa
&& rt
->rt_ifa
->ifa_ifp
&& !(rt
->rt_ifa
->ifa_ifp
->if_flags
& IFF_POINTOPOINT
))) {
6583 // Route is directly accessible
6584 *flags
|= NECP_CLIENT_RESULT_FLAG_IS_DIRECT
;
6589 rt
->rt_ifp
!= NULL
) {
6590 // Check probe status
6591 if (rt
->rt_ifp
->if_eflags
& IFEF_PROBE_CONNECTIVITY
) {
6592 *flags
|= NECP_CLIENT_RESULT_FLAG_PROBE_CONNECTIVITY
;
6595 if (rt
->rt_ifp
->if_type
== IFT_CELLULAR
) {
6596 struct if_cellular_status_v1
*ifsr
;
6598 ifnet_lock_shared(rt
->rt_ifp
);
6599 lck_rw_lock_exclusive(&rt
->rt_ifp
->if_link_status_lock
);
6601 if (rt
->rt_ifp
->if_link_status
!= NULL
) {
6602 ifsr
= &rt
->rt_ifp
->if_link_status
->ifsr_u
.ifsr_cell
.if_cell_u
.if_status_v1
;
6604 if (ifsr
->valid_bitmask
& IF_CELL_UL_MSS_RECOMMENDED_VALID
) {
6605 if (ifsr
->mss_recommended
== IF_CELL_UL_MSS_RECOMMENDED_NONE
) {
6606 returned_result
->mss_recommended
= NECP_CLIENT_RESULT_RECOMMENDED_MSS_NONE
;
6607 } else if (ifsr
->mss_recommended
== IF_CELL_UL_MSS_RECOMMENDED_MEDIUM
) {
6608 returned_result
->mss_recommended
= NECP_CLIENT_RESULT_RECOMMENDED_MSS_MEDIUM
;
6609 } else if (ifsr
->mss_recommended
== IF_CELL_UL_MSS_RECOMMENDED_LOW
) {
6610 returned_result
->mss_recommended
= NECP_CLIENT_RESULT_RECOMMENDED_MSS_LOW
;
6614 lck_rw_done(&rt
->rt_ifp
->if_link_status_lock
);
6615 ifnet_lock_done(rt
->rt_ifp
);
6618 // Check link quality
6619 if ((client_flags
& NECP_CLIENT_PARAMETER_FLAG_DISCRETIONARY
) &&
6620 (rt
->rt_ifp
->if_interface_state
.valid_bitmask
& IF_INTERFACE_STATE_LQM_STATE_VALID
) &&
6621 rt
->rt_ifp
->if_interface_state
.lqm_state
== IFNET_LQM_THRESH_ABORT
) {
6622 *flags
|= NECP_CLIENT_RESULT_FLAG_LINK_QUALITY_ABORT
;
6625 // Check QoS marking (fastlane)
6626 for (size_t route_rule_index
= 0; route_rule_index
< route_rule_id_array_count
; route_rule_index
++) {
6627 if (necp_update_qos_marking(rt
->rt_ifp
, route_rule_id_array
[route_rule_index
])) {
6628 *flags
|= NECP_CLIENT_RESULT_FLAG_ALLOW_QOS_MARKING
;
6629 // If the route can use QoS markings, stop iterating route rules
6634 if (IFNET_IS_LOW_POWER(rt
->rt_ifp
)) {
6635 *flags
|= NECP_CLIENT_RESULT_FLAG_INTERFACE_LOW_POWER
;
6638 if (traffic_class
== SO_TC_BK_SYS
) {
6639 // Block BK_SYS traffic if interface is throttled
6640 u_int32_t throttle_level
= 0;
6641 if (ifnet_get_throttle(rt
->rt_ifp
, &throttle_level
) == 0) {
6642 if (throttle_level
== IFNET_THROTTLE_OPPORTUNISTIC
) {
6643 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
6644 memset(&returned_result
->routing_result_parameter
, 0, sizeof(returned_result
->routing_result_parameter
));
6651 if (returned_result
->routed_interface_index
!= 0) {
6652 union necp_sockaddr_union default_address
;
6653 struct rtentry
*v4Route
= NULL
;
6654 struct rtentry
*v6Route
= NULL
;
6656 memset(&default_address
, 0, sizeof(default_address
));
6658 // Reset address to 0.0.0.0
6659 default_address
.sa
.sa_family
= AF_INET
;
6660 default_address
.sa
.sa_len
= sizeof(struct sockaddr_in
);
6661 v4Route
= rtalloc1_scoped((struct sockaddr
*)&default_address
, 0, 0,
6662 returned_result
->routed_interface_index
);
6664 // Reset address to ::
6665 default_address
.sa
.sa_family
= AF_INET6
;
6666 default_address
.sa
.sa_len
= sizeof(struct sockaddr_in6
);
6667 v6Route
= rtalloc1_scoped((struct sockaddr
*)&default_address
, 0, 0,
6668 returned_result
->routed_interface_index
);
6670 if (v4Route
!= NULL
) {
6671 if (v4Route
->rt_ifp
!= NULL
&& !IS_INTF_CLAT46(v4Route
->rt_ifp
)) {
6672 *flags
|= NECP_CLIENT_RESULT_FLAG_HAS_IPV4
;
6674 if (returned_v4_gateway
!= NULL
&&
6675 v4Route
->rt_gateway
!= NULL
&&
6676 v4Route
->rt_gateway
->sa_len
== sizeof(returned_v4_gateway
->u
.sin
)) {
6677 memcpy(&returned_v4_gateway
->u
.sin
, v4Route
->rt_gateway
, sizeof(returned_v4_gateway
->u
.sin
));
6678 memset(&returned_v4_gateway
->u
.sin
.sin_zero
, 0, sizeof(returned_v4_gateway
->u
.sin
.sin_zero
));
6684 if (v6Route
!= NULL
) {
6685 if (v6Route
->rt_ifp
!= NULL
) {
6686 *flags
|= NECP_CLIENT_RESULT_FLAG_HAS_IPV6
;
6688 if (ifnet_get_nat64prefix(v6Route
->rt_ifp
, NULL
) == 0) {
6689 *flags
|= NECP_CLIENT_RESULT_FLAG_HAS_NAT64
;
6692 if (returned_v6_gateway
!= NULL
&&
6693 v6Route
->rt_gateway
!= NULL
&&
6694 v6Route
->rt_gateway
->sa_len
== sizeof(returned_v6_gateway
->u
.sin6
)) {
6695 memcpy(&returned_v6_gateway
->u
.sin6
, v6Route
->rt_gateway
, sizeof(returned_v6_gateway
->u
.sin6
));
6703 for (size_t route_rule_index
= 0; route_rule_index
< route_rule_id_array_count
; route_rule_index
++) {
6704 u_int32_t interface_type_denied
= IFRTYPE_FUNCTIONAL_UNKNOWN
;
6705 bool route_is_allowed
= necp_route_is_allowed(rt
, NULL
, route_rule_id_array
[route_rule_index
], &interface_type_denied
);
6706 if (!route_is_allowed
) {
6707 // If the route is blocked, treat the lookup as a drop
6708 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
6709 memset(&returned_result
->routing_result_parameter
, 0, sizeof(returned_result
->routing_result_parameter
));
6711 if (interface_type_denied
!= IFRTYPE_FUNCTIONAL_UNKNOWN
) {
6712 if (reason
!= NULL
) {
6713 if (interface_type_denied
== IFRTYPE_FUNCTIONAL_CELLULAR
) {
6714 *reason
= NECP_CLIENT_RESULT_REASON_CELLULAR_DENIED
;
6715 } else if (interface_type_denied
== IFRTYPE_FUNCTIONAL_WIFI_INFRA
) {
6716 *reason
= NECP_CLIENT_RESULT_REASON_WIFI_DENIED
;
6719 necp_send_application_interface_denied_event(pid
, application_uuid
, interface_type_denied
);
6721 // If the route gets denied, stop matching rules
6726 if (rt
!= NULL
&& rt
->rt_ifp
!= NULL
) {
6727 const bool expensive_prohibited
= ((client_flags
& NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_EXPENSIVE
) &&
6728 IFNET_IS_EXPENSIVE(rt
->rt_ifp
));
6729 const bool constrained_prohibited
= ((client_flags
& NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_CONSTRAINED
) &&
6730 IFNET_IS_CONSTRAINED(rt
->rt_ifp
));
6731 if (reason
!= NULL
) {
6732 if (expensive_prohibited
) {
6733 *reason
= NECP_CLIENT_RESULT_REASON_EXPENSIVE_PROHIBITED
;
6734 } else if (constrained_prohibited
) {
6735 *reason
= NECP_CLIENT_RESULT_REASON_CONSTRAINED_PROHIBITED
;
6738 if (expensive_prohibited
|| constrained_prohibited
) {
6739 // If the client flags prohibited a property of the interface, treat it as a drop
6740 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
6741 memset(&returned_result
->routing_result_parameter
, 0, sizeof(returned_result
->routing_result_parameter
));
6746 if (returned_route
!= NULL
) {
6747 *returned_route
= rt
;
6754 lck_rw_done(&necp_kernel_policy_lock
);
6760 necp_is_route_local(union necp_sockaddr_union
*remote_addr
)
6762 bool no_remote_addr
= FALSE
;
6763 u_int8_t remote_family
= 0;
6764 struct rtentry
*rt
= NULL
;
6765 bool is_local
= FALSE
;
6767 if (remote_addr
== NULL
) {
6771 if (remote_addr
->sa
.sa_len
== 0 ||
6772 (remote_addr
->sa
.sa_family
== AF_INET
&& remote_addr
->sin
.sin_addr
.s_addr
== 0) ||
6773 (remote_addr
->sa
.sa_family
== AF_INET6
&& IN6_IS_ADDR_UNSPECIFIED(&remote_addr
->sin6
.sin6_addr
))) {
6774 no_remote_addr
= TRUE
;
6775 remote_family
= remote_addr
->sa
.sa_family
;
6778 if (no_remote_addr
) {
6779 memset(remote_addr
, 0, sizeof(union necp_sockaddr_union
));
6780 if (remote_family
== AF_INET6
) {
6781 // Reset address to ::
6782 remote_addr
->sa
.sa_family
= AF_INET6
;
6783 remote_addr
->sa
.sa_len
= sizeof(struct sockaddr_in6
);
6785 // Reset address to 0.0.0.0
6786 remote_addr
->sa
.sa_family
= AF_INET
;
6787 remote_addr
->sa
.sa_len
= sizeof(struct sockaddr_in
);
6791 // Lookup route regardless of the scoped interface to check if
6792 // remote address is in a local network.
6793 rt
= rtalloc1_scoped((struct sockaddr
*)remote_addr
, 0, 0, 0);
6798 if (remote_addr
->sa
.sa_family
== AF_INET
&& IS_INTF_CLAT46(rt
->rt_ifp
)) {
6801 is_local
= IS_NECP_DEST_IN_LOCAL_NETWORKS(rt
);
6811 necp_socket_check_policy(struct necp_kernel_socket_policy
*kernel_policy
, necp_app_id app_id
, necp_app_id real_app_id
, errno_t cred_result
, u_int32_t account_id
, struct substring domain
, u_int8_t domain_dot_count
, pid_t pid
, uid_t uid
, u_int32_t bound_interface_index
, u_int32_t traffic_class
, u_int16_t protocol
, union necp_sockaddr_union
*local
, union necp_sockaddr_union
*remote
, struct necp_client_parameter_netagent_type
*required_agent_types
, u_int32_t num_required_agent_types
, bool has_client
, uint32_t client_flags
, int is_platform_binary
, proc_t proc
, struct rtentry
*rt
)
6813 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
)) {
6814 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
6815 u_int32_t cond_bound_interface_index
= kernel_policy
->cond_bound_interface
? kernel_policy
->cond_bound_interface
->if_index
: 0;
6816 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
6817 if (bound_interface_index
== cond_bound_interface_index
) {
6818 // No match, matches forbidden interface
6822 if (bound_interface_index
!= cond_bound_interface_index
) {
6823 // No match, does not match required interface
6828 if (bound_interface_index
!= 0) {
6829 // No match, requires a non-bound packet
6835 if (kernel_policy
->condition_mask
== 0) {
6839 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
6840 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
6841 if (app_id
== kernel_policy
->cond_app_id
) {
6842 // No match, matches forbidden application
6846 if (app_id
!= kernel_policy
->cond_app_id
) {
6847 // No match, does not match required application
6853 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
6854 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
6855 if (real_app_id
== kernel_policy
->cond_real_app_id
) {
6856 // No match, matches forbidden application
6860 if (real_app_id
!= kernel_policy
->cond_real_app_id
) {
6861 // No match, does not match required application
6867 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_HAS_CLIENT
) {
6873 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
) {
6874 if (cred_result
!= 0) {
6875 // Process is missing entitlement
6880 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PLATFORM_BINARY
) {
6881 if (is_platform_binary
== 0) {
6882 // Process is not platform binary
6887 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT
) {
6888 if (kernel_policy
->cond_custom_entitlement_matched
== necp_boolean_state_false
) {
6889 // Process is missing entitlement based on previous check
6891 } else if (kernel_policy
->cond_custom_entitlement_matched
== necp_boolean_state_unknown
) {
6892 if (kernel_policy
->cond_custom_entitlement
!= NULL
) {
6894 // No process found, cannot check entitlement
6897 task_t task
= proc_task(proc
);
6899 !IOTaskHasEntitlement(task
, kernel_policy
->cond_custom_entitlement
)) {
6900 // Process is missing custom entitlement
6901 kernel_policy
->cond_custom_entitlement_matched
= necp_boolean_state_false
;
6904 kernel_policy
->cond_custom_entitlement_matched
= necp_boolean_state_true
;
6910 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
6911 bool domain_matches
= necp_hostname_matches_domain(domain
, domain_dot_count
, kernel_policy
->cond_domain
, kernel_policy
->cond_domain_dot_count
);
6912 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
6913 if (domain_matches
) {
6914 // No match, matches forbidden domain
6918 if (!domain_matches
) {
6919 // No match, does not match required domain
6925 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
) {
6926 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
) {
6927 if (account_id
== kernel_policy
->cond_account_id
) {
6928 // No match, matches forbidden account
6932 if (account_id
!= kernel_policy
->cond_account_id
) {
6933 // No match, does not match required account
6939 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PID
) {
6940 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_PID
) {
6941 if (pid
== kernel_policy
->cond_pid
) {
6942 // No match, matches forbidden pid
6946 if (pid
!= kernel_policy
->cond_pid
) {
6947 // No match, does not match required pid
6953 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_UID
) {
6954 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_UID
) {
6955 if (uid
== kernel_policy
->cond_uid
) {
6956 // No match, matches forbidden uid
6960 if (uid
!= kernel_policy
->cond_uid
) {
6961 // No match, does not match required uid
6967 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
6968 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
6969 if (traffic_class
>= kernel_policy
->cond_traffic_class
.start_tc
&&
6970 traffic_class
<= kernel_policy
->cond_traffic_class
.end_tc
) {
6971 // No match, matches forbidden traffic class
6975 if (traffic_class
< kernel_policy
->cond_traffic_class
.start_tc
||
6976 traffic_class
> kernel_policy
->cond_traffic_class
.end_tc
) {
6977 // No match, does not match required traffic class
6983 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
6984 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
6985 if (protocol
== kernel_policy
->cond_protocol
) {
6986 // No match, matches forbidden protocol
6990 if (protocol
!= kernel_policy
->cond_protocol
) {
6991 // No match, does not match required protocol
6997 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_AGENT_TYPE
) {
6998 bool matches_agent_type
= FALSE
;
6999 for (u_int32_t i
= 0; i
< num_required_agent_types
; i
++) {
7000 struct necp_client_parameter_netagent_type
*required_agent_type
= &required_agent_types
[i
];
7001 if ((strlen(kernel_policy
->cond_agent_type
.agent_domain
) == 0 ||
7002 strncmp(required_agent_type
->netagent_domain
, kernel_policy
->cond_agent_type
.agent_domain
, NETAGENT_DOMAINSIZE
) == 0) &&
7003 (strlen(kernel_policy
->cond_agent_type
.agent_type
) == 0 ||
7004 strncmp(required_agent_type
->netagent_type
, kernel_policy
->cond_agent_type
.agent_type
, NETAGENT_TYPESIZE
) == 0)) {
7005 // Found a required agent that matches
7006 matches_agent_type
= TRUE
;
7010 if (!matches_agent_type
) {
7015 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_NETWORKS
) {
7016 bool is_local
= FALSE
;
7019 is_local
= IS_NECP_DEST_IN_LOCAL_NETWORKS(rt
);
7021 is_local
= necp_is_route_local(remote
);
7025 // Either no route to validate or no match for local networks
7030 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
7031 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
7032 bool inRange
= necp_is_addr_in_range((struct sockaddr
*)local
, (struct sockaddr
*)&kernel_policy
->cond_local_start
, (struct sockaddr
*)&kernel_policy
->cond_local_end
);
7033 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
7042 } else if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
7043 bool inSubnet
= necp_is_addr_in_subnet((struct sockaddr
*)local
, (struct sockaddr
*)&kernel_policy
->cond_local_start
, kernel_policy
->cond_local_prefix
);
7044 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
7056 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
7057 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
7058 bool inRange
= necp_is_addr_in_range((struct sockaddr
*)remote
, (struct sockaddr
*)&kernel_policy
->cond_remote_start
, (struct sockaddr
*)&kernel_policy
->cond_remote_end
);
7059 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
7068 } else if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
7069 bool inSubnet
= necp_is_addr_in_subnet((struct sockaddr
*)remote
, (struct sockaddr
*)&kernel_policy
->cond_remote_start
, kernel_policy
->cond_remote_prefix
);
7070 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
7082 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_CLIENT_FLAGS
) {
7083 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_CLIENT_FLAGS
) {
7084 if ((client_flags
& kernel_policy
->cond_client_flags
) == kernel_policy
->cond_client_flags
) {
7085 // Flags do match, and condition is negative, fail.
7089 if ((client_flags
& kernel_policy
->cond_client_flags
) != kernel_policy
->cond_client_flags
) {
7090 // Flags do not match, fail.
7096 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_EMPTY
) {
7097 bool isEmpty
= necp_addr_is_empty((struct sockaddr
*)local
);
7098 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_LOCAL_EMPTY
) {
7109 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_EMPTY
) {
7110 bool isEmpty
= necp_addr_is_empty((struct sockaddr
*)remote
);
7111 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REMOTE_EMPTY
) {
7125 static inline u_int32_t
7126 necp_socket_calc_flowhash_locked(struct necp_socket_info
*info
)
7128 return net_flowhash(info
, sizeof(*info
), necp_kernel_socket_policies_gencount
);
7132 necp_socket_fillout_info_locked(struct inpcb
*inp
, struct sockaddr
*override_local_addr
, struct sockaddr
*override_remote_addr
, u_int32_t override_bound_interface
, u_int32_t drop_order
, struct necp_socket_info
*info
)
7134 struct socket
*so
= NULL
;
7136 memset(info
, 0, sizeof(struct necp_socket_info
));
7138 so
= inp
->inp_socket
;
7140 info
->drop_order
= drop_order
;
7142 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_PID
) {
7143 info
->pid
= ((so
->so_flags
& SOF_DELEGATED
) ? so
->e_pid
: so
->last_pid
);
7146 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_UID
) {
7147 info
->uid
= kauth_cred_getuid(so
->so_cred
);
7150 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
7151 info
->traffic_class
= so
->so_traffic_class
;
7154 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_HAS_CLIENT
) {
7155 info
->has_client
= !uuid_is_null(inp
->necp_client_uuid
);
7158 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_CLIENT_FLAGS
) {
7159 info
->client_flags
= 0;
7160 if (INP_NO_CONSTRAINED(inp
)) {
7161 info
->client_flags
|= NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_CONSTRAINED
;
7163 if (INP_NO_EXPENSIVE(inp
)) {
7164 info
->client_flags
|= NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_EXPENSIVE
;
7166 if (inp
->inp_socket
->so_flags1
& SOF1_CELLFALLBACK
) {
7167 info
->client_flags
|= NECP_CLIENT_PARAMETER_FLAG_FALLBACK_TRAFFIC
;
7169 if (inp
->inp_socket
->so_flags1
& SOF1_INBOUND
) {
7170 info
->client_flags
|= NECP_CLIENT_PARAMETER_FLAG_INBOUND
;
7172 if (inp
->inp_socket
->so_options
& SO_ACCEPTCONN
||
7173 inp
->inp_flags2
& INP2_EXTERNAL_PORT
) {
7174 info
->client_flags
|= NECP_CLIENT_PARAMETER_FLAG_LISTENER
;
7178 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
7179 if (inp
->inp_ip_p
) {
7180 info
->protocol
= inp
->inp_ip_p
;
7182 info
->protocol
= SOCK_PROTO(so
);
7186 if (inp
->inp_flags2
& INP2_WANT_APP_POLICY
&& necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
7187 struct necp_uuid_id_mapping
*existing_mapping
= necp_uuid_lookup_app_id_locked(((so
->so_flags
& SOF_DELEGATED
) ? so
->e_uuid
: so
->last_uuid
));
7188 if (existing_mapping
) {
7189 info
->application_id
= existing_mapping
->id
;
7192 if (!(so
->so_flags
& SOF_DELEGATED
)) {
7193 info
->real_application_id
= info
->application_id
;
7194 } else if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
7195 struct necp_uuid_id_mapping
*real_existing_mapping
= necp_uuid_lookup_app_id_locked(so
->last_uuid
);
7196 if (real_existing_mapping
) {
7197 info
->real_application_id
= real_existing_mapping
->id
;
7201 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
) {
7202 info
->cred_result
= priv_check_cred(so
->so_cred
, PRIV_NET_PRIVILEGED_NECP_MATCH
, 0);
7203 if (info
->cred_result
!= 0) {
7204 // Process does not have entitlement, check the parent process
7205 necp_get_parent_cred_result(NULL
, info
);
7209 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_PLATFORM_BINARY
) {
7210 info
->is_platform_binary
= csproc_get_platform_binary(current_proc()) ? true : false;
7214 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
&& inp
->inp_necp_attributes
.inp_account
!= NULL
) {
7215 struct necp_string_id_mapping
*existing_mapping
= necp_lookup_string_to_id_locked(&necp_account_id_list
, inp
->inp_necp_attributes
.inp_account
);
7216 if (existing_mapping
) {
7217 info
->account_id
= existing_mapping
->id
;
7221 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
7222 info
->domain
= inp
->inp_necp_attributes
.inp_domain
;
7225 if (override_bound_interface
) {
7226 info
->bound_interface_index
= override_bound_interface
;
7228 if ((inp
->inp_flags
& INP_BOUND_IF
) && inp
->inp_boundifp
) {
7229 info
->bound_interface_index
= inp
->inp_boundifp
->if_index
;
7233 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_ADDRESS_TYPE_CONDITIONS
) {
7234 if (override_local_addr
!= NULL
) {
7235 if (override_local_addr
->sa_family
== AF_INET6
&& override_local_addr
->sa_len
<= sizeof(struct sockaddr_in6
)) {
7236 memcpy(&info
->local_addr
, override_local_addr
, override_local_addr
->sa_len
);
7237 if (IN6_IS_ADDR_V4MAPPED(&(info
->local_addr
.sin6
.sin6_addr
))) {
7238 struct sockaddr_in sin
;
7239 in6_sin6_2_sin(&sin
, &(info
->local_addr
.sin6
));
7240 memset(&info
->local_addr
, 0, sizeof(union necp_sockaddr_union
));
7241 memcpy(&info
->local_addr
, &sin
, sin
.sin_len
);
7243 } else if (override_local_addr
->sa_family
== AF_INET
&& override_local_addr
->sa_len
<= sizeof(struct sockaddr_in
)) {
7244 memcpy(&info
->local_addr
, override_local_addr
, override_local_addr
->sa_len
);
7247 if (inp
->inp_vflag
& INP_IPV4
) {
7248 ((struct sockaddr_in
*)&info
->local_addr
)->sin_family
= AF_INET
;
7249 ((struct sockaddr_in
*)&info
->local_addr
)->sin_len
= sizeof(struct sockaddr_in
);
7250 ((struct sockaddr_in
*)&info
->local_addr
)->sin_port
= inp
->inp_lport
;
7251 memcpy(&((struct sockaddr_in
*)&info
->local_addr
)->sin_addr
, &inp
->inp_laddr
, sizeof(struct in_addr
));
7252 } else if (inp
->inp_vflag
& INP_IPV6
) {
7253 ((struct sockaddr_in6
*)&info
->local_addr
)->sin6_family
= AF_INET6
;
7254 ((struct sockaddr_in6
*)&info
->local_addr
)->sin6_len
= sizeof(struct sockaddr_in6
);
7255 ((struct sockaddr_in6
*)&info
->local_addr
)->sin6_port
= inp
->inp_lport
;
7256 memcpy(&((struct sockaddr_in6
*)&info
->local_addr
)->sin6_addr
, &inp
->in6p_laddr
, sizeof(struct in6_addr
));
7260 if (override_remote_addr
!= NULL
) {
7261 if (override_remote_addr
->sa_family
== AF_INET6
&& override_remote_addr
->sa_len
<= sizeof(struct sockaddr_in6
)) {
7262 memcpy(&info
->remote_addr
, override_remote_addr
, override_remote_addr
->sa_len
);
7263 if (IN6_IS_ADDR_V4MAPPED(&(info
->remote_addr
.sin6
.sin6_addr
))) {
7264 struct sockaddr_in sin
;
7265 in6_sin6_2_sin(&sin
, &(info
->remote_addr
.sin6
));
7266 memset(&info
->remote_addr
, 0, sizeof(union necp_sockaddr_union
));
7267 memcpy(&info
->remote_addr
, &sin
, sin
.sin_len
);
7269 } else if (override_remote_addr
->sa_family
== AF_INET
&& override_remote_addr
->sa_len
<= sizeof(struct sockaddr_in
)) {
7270 memcpy(&info
->remote_addr
, override_remote_addr
, override_remote_addr
->sa_len
);
7273 if (inp
->inp_vflag
& INP_IPV4
) {
7274 ((struct sockaddr_in
*)&info
->remote_addr
)->sin_family
= AF_INET
;
7275 ((struct sockaddr_in
*)&info
->remote_addr
)->sin_len
= sizeof(struct sockaddr_in
);
7276 ((struct sockaddr_in
*)&info
->remote_addr
)->sin_port
= inp
->inp_fport
;
7277 memcpy(&((struct sockaddr_in
*)&info
->remote_addr
)->sin_addr
, &inp
->inp_faddr
, sizeof(struct in_addr
));
7278 } else if (inp
->inp_vflag
& INP_IPV6
) {
7279 ((struct sockaddr_in6
*)&info
->remote_addr
)->sin6_family
= AF_INET6
;
7280 ((struct sockaddr_in6
*)&info
->remote_addr
)->sin6_len
= sizeof(struct sockaddr_in6
);
7281 ((struct sockaddr_in6
*)&info
->remote_addr
)->sin6_port
= inp
->inp_fport
;
7282 memcpy(&((struct sockaddr_in6
*)&info
->remote_addr
)->sin6_addr
, &inp
->in6p_faddr
, sizeof(struct in6_addr
));
7288 static inline struct necp_kernel_socket_policy
*
7289 necp_socket_find_policy_match_with_info_locked(struct necp_kernel_socket_policy
**policy_search_array
, struct necp_socket_info
*info
,
7290 necp_kernel_policy_filter
*return_filter
,
7291 u_int32_t
*return_route_rule_id_array
, size_t *return_route_rule_id_array_count
, size_t route_rule_id_array_count
,
7292 necp_kernel_policy_result
*return_service_action
, necp_kernel_policy_service
*return_service
,
7293 u_int32_t
*return_netagent_array
, u_int32_t
*return_netagent_use_flags_array
, size_t netagent_array_count
,
7294 struct necp_client_parameter_netagent_type
*required_agent_types
,
7295 u_int32_t num_required_agent_types
, proc_t proc
, necp_kernel_policy_id
*skip_policy_id
, struct rtentry
*rt
,
7296 necp_kernel_policy_result
*return_drop_dest_policy_result
, necp_drop_all_bypass_check_result_t
*return_drop_all_bypass
)
7298 struct necp_kernel_socket_policy
*matched_policy
= NULL
;
7299 u_int32_t skip_order
= 0;
7300 u_int32_t skip_session_order
= 0;
7301 size_t route_rule_id_count
= 0;
7303 size_t netagent_cursor
= 0;
7304 necp_drop_all_bypass_check_result_t drop_all_bypass
= NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
;
7305 if (return_drop_all_bypass
!= NULL
) {
7306 *return_drop_all_bypass
= drop_all_bypass
;
7309 // Pre-process domain for quick matching
7310 struct substring domain_substring
= necp_trim_dots_and_stars(info
->domain
, info
->domain
? strlen(info
->domain
) : 0);
7311 u_int8_t domain_dot_count
= necp_count_dots(domain_substring
.string
, domain_substring
.length
);
7313 if (return_filter
!= NULL
) {
7317 if (return_route_rule_id_array_count
!= NULL
) {
7318 *return_route_rule_id_array_count
= 0;
7321 if (return_service_action
!= NULL
) {
7322 *return_service_action
= 0;
7325 if (return_service
!= NULL
) {
7326 return_service
->identifier
= 0;
7327 return_service
->data
= 0;
7330 // Do not subject layer-2 filter to NECP policies, return a PASS policy
7331 if (necp_pass_interpose
> 0 && info
->client_flags
& NECP_CLIENT_PARAMETER_FLAG_INTERPOSE
) {
7332 return &pass_policy
;
7335 *return_drop_dest_policy_result
= NECP_KERNEL_POLICY_RESULT_NONE
;
7337 if (policy_search_array
!= NULL
) {
7338 for (i
= 0; policy_search_array
[i
] != NULL
; i
++) {
7339 if (necp_drop_all_order
!= 0 && policy_search_array
[i
]->session_order
>= necp_drop_all_order
) {
7340 // We've hit a drop all rule
7341 if (drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
) {
7342 drop_all_bypass
= necp_check_drop_all_bypass_result(proc
);
7343 if (return_drop_all_bypass
!= NULL
) {
7344 *return_drop_all_bypass
= drop_all_bypass
;
7347 if (drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_FALSE
) {
7351 if (necp_drop_dest_policy
.entry_count
!= 0 &&
7352 necp_address_matches_drop_dest_policy(&info
->remote_addr
, policy_search_array
[i
]->session_order
)) {
7353 // We've hit a drop by destination address rule
7354 *return_drop_dest_policy_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7357 if (info
->drop_order
!= 0 && policy_search_array
[i
]->session_order
>= info
->drop_order
) {
7358 // We've hit a drop order for this socket
7361 if (skip_session_order
&& policy_search_array
[i
]->session_order
>= skip_session_order
) {
7364 skip_session_order
= 0;
7367 if (policy_search_array
[i
]->order
< skip_order
) {
7373 skip_session_order
= 0;
7375 } else if (skip_session_order
) {
7380 if (necp_socket_check_policy(policy_search_array
[i
], info
->application_id
, info
->real_application_id
, info
->cred_result
, info
->account_id
, domain_substring
, domain_dot_count
, info
->pid
, info
->uid
, info
->bound_interface_index
, info
->traffic_class
, info
->protocol
, &info
->local_addr
, &info
->remote_addr
, required_agent_types
, num_required_agent_types
, info
->has_client
, info
->client_flags
, info
->is_platform_binary
, proc
, rt
)) {
7381 if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER
) {
7382 if (return_filter
&& *return_filter
!= NECP_FILTER_UNIT_NO_FILTER
) {
7383 necp_kernel_policy_filter control_unit
= policy_search_array
[i
]->result_parameter
.filter_control_unit
;
7384 if (control_unit
== NECP_FILTER_UNIT_NO_FILTER
) {
7385 *return_filter
= control_unit
;
7387 *return_filter
|= control_unit
;
7389 if (necp_debug
> 1) {
7390 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Filter %d", info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
, policy_search_array
[i
]->result_parameter
.filter_control_unit
);
7394 } else if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_ROUTE_RULES
) {
7395 if (return_route_rule_id_array
&& route_rule_id_count
< route_rule_id_array_count
) {
7396 return_route_rule_id_array
[route_rule_id_count
++] = policy_search_array
[i
]->result_parameter
.route_rule_id
;
7397 if (necp_debug
> 1) {
7398 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Route Rule %d", info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
, policy_search_array
[i
]->result_parameter
.route_rule_id
);
7402 } else if (necp_kernel_socket_result_is_trigger_service_type(policy_search_array
[i
])) {
7403 if (return_service_action
&& *return_service_action
== 0) {
7404 *return_service_action
= policy_search_array
[i
]->result
;
7405 if (necp_debug
> 1) {
7406 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Service Action %d", info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
, policy_search_array
[i
]->result
);
7409 if (return_service
&& return_service
->identifier
== 0) {
7410 return_service
->identifier
= policy_search_array
[i
]->result_parameter
.service
.identifier
;
7411 return_service
->data
= policy_search_array
[i
]->result_parameter
.service
.data
;
7412 if (necp_debug
> 1) {
7413 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Service ID %d Data %d", info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
, policy_search_array
[i
]->result_parameter
.service
.identifier
, policy_search_array
[i
]->result_parameter
.service
.data
);
7417 } else if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_USE_NETAGENT
||
7418 policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_NETAGENT_SCOPED
) {
7419 if (return_netagent_array
!= NULL
&&
7420 netagent_cursor
< netagent_array_count
) {
7421 return_netagent_array
[netagent_cursor
] = policy_search_array
[i
]->result_parameter
.netagent_id
;
7422 if (return_netagent_use_flags_array
!= NULL
&&
7423 policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_NETAGENT_SCOPED
) {
7424 return_netagent_use_flags_array
[netagent_cursor
] |= NECP_AGENT_USE_FLAG_SCOPE
;
7427 if (necp_debug
> 1) {
7428 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) %s Netagent %d",
7429 info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
,
7430 policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_USE_NETAGENT
? "Use" : "Scope",
7431 policy_search_array
[i
]->result_parameter
.netagent_id
);
7437 // Matched policy is a skip. Do skip and continue.
7438 if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
7439 skip_order
= policy_search_array
[i
]->result_parameter
.skip_policy_order
;
7440 skip_session_order
= policy_search_array
[i
]->session_order
+ 1;
7441 if (skip_policy_id
) {
7442 *skip_policy_id
= policy_search_array
[i
]->id
;
7447 // Matched an allow unentitled, which clears any drop order
7448 if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_ALLOW_UNENTITLED
) {
7449 info
->drop_order
= 0;
7453 // Passed all tests, found a match
7454 matched_policy
= policy_search_array
[i
];
7460 if (return_route_rule_id_array_count
!= NULL
) {
7461 *return_route_rule_id_array_count
= route_rule_id_count
;
7463 return matched_policy
;
7467 necp_socket_uses_interface(struct inpcb
*inp
, u_int32_t interface_index
)
7469 bool found_match
= FALSE
;
7471 ifaddr_t
*addresses
= NULL
;
7472 union necp_sockaddr_union address_storage
;
7474 int family
= AF_INET
;
7475 ifnet_t interface
= ifindex2ifnet
[interface_index
];
7477 if (inp
== NULL
|| interface
== NULL
) {
7481 if (inp
->inp_vflag
& INP_IPV4
) {
7483 } else if (inp
->inp_vflag
& INP_IPV6
) {
7487 result
= ifnet_get_address_list_family(interface
, &addresses
, family
);
7489 NECPLOG(LOG_ERR
, "Failed to get address list for %s%d", ifnet_name(interface
), ifnet_unit(interface
));
7493 for (i
= 0; addresses
[i
] != NULL
; i
++) {
7494 if (ifaddr_address(addresses
[i
], &address_storage
.sa
, sizeof(address_storage
)) == 0) {
7495 if (family
== AF_INET
) {
7496 if (memcmp(&address_storage
.sin
.sin_addr
, &inp
->inp_laddr
, sizeof(inp
->inp_laddr
)) == 0) {
7500 } else if (family
== AF_INET6
) {
7501 if (memcmp(&address_storage
.sin6
.sin6_addr
, &inp
->in6p_laddr
, sizeof(inp
->in6p_laddr
)) == 0) {
7510 ifnet_free_address_list(addresses
);
7516 necp_socket_is_connected(struct inpcb
*inp
)
7518 return inp
->inp_socket
->so_state
& (SS_ISCONNECTING
| SS_ISCONNECTED
| SS_ISDISCONNECTING
);
7522 necp_socket_bypass(struct sockaddr
*override_local_addr
, struct sockaddr
*override_remote_addr
, struct inpcb
*inp
)
7524 if (necp_pass_loopback
> 0 && necp_is_loopback(override_local_addr
, override_remote_addr
, inp
, NULL
, IFSCOPE_NONE
)) {
7526 } else if (necp_is_intcoproc(inp
, NULL
)) {
7533 necp_kernel_policy_id
7534 necp_socket_find_policy_match(struct inpcb
*inp
, struct sockaddr
*override_local_addr
, struct sockaddr
*override_remote_addr
, u_int32_t override_bound_interface
)
7536 struct socket
*so
= NULL
;
7537 necp_kernel_policy_filter filter_control_unit
= 0;
7538 struct necp_kernel_socket_policy
*matched_policy
= NULL
;
7539 necp_kernel_policy_id matched_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
7540 necp_kernel_policy_result service_action
= 0;
7541 necp_kernel_policy_service service
= { 0, 0 };
7542 u_int32_t drop_dest_policy_result
= NECP_KERNEL_POLICY_RESULT_NONE
;
7543 necp_drop_all_bypass_check_result_t drop_all_bypass
= NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
;
7545 u_int32_t netagent_ids
[NECP_MAX_NETAGENTS
];
7546 memset(&netagent_ids
, 0, sizeof(netagent_ids
));
7547 int netagent_cursor
;
7549 struct necp_socket_info info
;
7552 return NECP_KERNEL_POLICY_ID_NONE
;
7555 // Ignore invalid addresses
7556 if (override_local_addr
!= NULL
&&
7557 !necp_address_is_valid(override_local_addr
)) {
7558 override_local_addr
= NULL
;
7560 if (override_remote_addr
!= NULL
&&
7561 !necp_address_is_valid(override_remote_addr
)) {
7562 override_remote_addr
= NULL
;
7565 so
= inp
->inp_socket
;
7567 u_int32_t drop_order
= necp_process_drop_order(so
->so_cred
);
7569 // Don't lock. Possible race condition, but we don't want the performance hit.
7570 if (necp_kernel_socket_policies_count
== 0 ||
7571 (!(inp
->inp_flags2
& INP2_WANT_APP_POLICY
) && necp_kernel_socket_policies_non_app_count
== 0)) {
7572 if (necp_drop_all_order
> 0 || drop_order
> 0) {
7573 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7574 inp
->inp_policyresult
.skip_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7575 inp
->inp_policyresult
.policy_gencount
= 0;
7576 inp
->inp_policyresult
.app_id
= 0;
7577 inp
->inp_policyresult
.flowhash
= 0;
7578 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
7579 inp
->inp_policyresult
.results
.route_rule_id
= 0;
7580 if (necp_socket_bypass(override_local_addr
, override_remote_addr
, inp
)) {
7581 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_PASS
;
7583 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7586 return NECP_KERNEL_POLICY_ID_NONE
;
7589 // Check for loopback exception
7590 if (necp_socket_bypass(override_local_addr
, override_remote_addr
, inp
)) {
7591 // Mark socket as a pass
7592 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7593 inp
->inp_policyresult
.skip_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7594 inp
->inp_policyresult
.policy_gencount
= 0;
7595 inp
->inp_policyresult
.app_id
= 0;
7596 inp
->inp_policyresult
.flowhash
= 0;
7597 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
7598 inp
->inp_policyresult
.results
.route_rule_id
= 0;
7599 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_PASS
;
7600 return NECP_KERNEL_POLICY_ID_NONE
;
7604 lck_rw_lock_shared(&necp_kernel_policy_lock
);
7606 necp_socket_fillout_info_locked(inp
, override_local_addr
, override_remote_addr
, override_bound_interface
, drop_order
, &info
);
7607 inp
->inp_policyresult
.app_id
= info
.application_id
;
7610 u_int32_t flowhash
= necp_socket_calc_flowhash_locked(&info
);
7611 if (inp
->inp_policyresult
.policy_id
!= NECP_KERNEL_POLICY_ID_NONE
&&
7612 inp
->inp_policyresult
.policy_gencount
== necp_kernel_socket_policies_gencount
&&
7613 inp
->inp_policyresult
.flowhash
== flowhash
) {
7614 // If already matched this socket on this generation of table, skip
7617 lck_rw_done(&necp_kernel_policy_lock
);
7619 return inp
->inp_policyresult
.policy_id
;
7622 // Match socket to policy
7623 necp_kernel_policy_id skip_policy_id
;
7624 u_int32_t route_rule_id_array
[MAX_AGGREGATE_ROUTE_RULES
];
7625 size_t route_rule_id_array_count
= 0;
7626 matched_policy
= necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_map
[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(info
.application_id
)], &info
, &filter_control_unit
, route_rule_id_array
, &route_rule_id_array_count
, MAX_AGGREGATE_ROUTE_RULES
, &service_action
, &service
, netagent_ids
, NULL
, NECP_MAX_NETAGENTS
, NULL
, 0, current_proc(), &skip_policy_id
, inp
->inp_route
.ro_rt
, &drop_dest_policy_result
, &drop_all_bypass
);
7628 // If the socket matched a scoped service policy, mark as Drop if not registered.
7629 // This covers the cases in which a service is required (on demand) but hasn't started yet.
7630 if ((service_action
== NECP_KERNEL_POLICY_RESULT_TRIGGER_SCOPED
||
7631 service_action
== NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED
) &&
7632 service
.identifier
!= 0 &&
7633 service
.identifier
!= NECP_NULL_SERVICE_ID
) {
7634 bool service_is_registered
= FALSE
;
7635 struct necp_service_registration
*service_registration
= NULL
;
7636 LIST_FOREACH(service_registration
, &necp_registered_service_list
, kernel_chain
) {
7637 if (service
.identifier
== service_registration
->service_id
) {
7638 service_is_registered
= TRUE
;
7642 if (!service_is_registered
) {
7643 // Mark socket as a drop if service is not registered
7644 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7645 inp
->inp_policyresult
.skip_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7646 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
7647 inp
->inp_policyresult
.flowhash
= flowhash
;
7648 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
7649 inp
->inp_policyresult
.results
.route_rule_id
= 0;
7650 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7652 if (necp_debug
> 1) {
7653 NECPLOG(LOG_DEBUG
, "Socket Policy: (BoundInterface %d Proto %d) Dropping packet because service is not registered", info
.bound_interface_index
, info
.protocol
);
7657 lck_rw_done(&necp_kernel_policy_lock
);
7658 return NECP_KERNEL_POLICY_ID_NONE
;
7662 for (netagent_cursor
= 0; netagent_cursor
< NECP_MAX_NETAGENTS
; netagent_cursor
++) {
7663 struct necp_uuid_id_mapping
*mapping
= NULL
;
7664 u_int32_t netagent_id
= netagent_ids
[netagent_cursor
];
7665 if (netagent_id
== 0) {
7668 mapping
= necp_uuid_lookup_uuid_with_service_id_locked(netagent_id
);
7669 if (mapping
!= NULL
) {
7670 u_int32_t agent_flags
= 0;
7671 agent_flags
= netagent_get_flags(mapping
->uuid
);
7672 if (agent_flags
& NETAGENT_FLAG_REGISTERED
) {
7673 if (agent_flags
& NETAGENT_FLAG_ACTIVE
) {
7675 } else if ((agent_flags
& NETAGENT_FLAG_VOLUNTARY
) == 0) {
7676 if (agent_flags
& NETAGENT_FLAG_KERNEL_ACTIVATED
) {
7677 int trigger_error
= 0;
7678 trigger_error
= netagent_kernel_trigger(mapping
->uuid
);
7679 if (necp_debug
> 1) {
7680 NECPLOG(LOG_DEBUG
, "Socket Policy: Triggering inactive agent, error %d", trigger_error
);
7684 // Mark socket as a drop if required agent is not active
7685 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7686 inp
->inp_policyresult
.skip_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7687 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
7688 inp
->inp_policyresult
.flowhash
= flowhash
;
7689 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
7690 inp
->inp_policyresult
.results
.route_rule_id
= 0;
7691 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7693 if (necp_debug
> 1) {
7694 NECPLOG(LOG_DEBUG
, "Socket Policy: (BoundInterface %d Proto %d) Dropping packet because agent is not active", info
.bound_interface_index
, info
.protocol
);
7698 lck_rw_done(&necp_kernel_policy_lock
);
7699 return NECP_KERNEL_POLICY_ID_NONE
;
7705 u_int32_t route_rule_id
= 0;
7706 if (route_rule_id_array_count
== 1) {
7707 route_rule_id
= route_rule_id_array
[0];
7708 } else if (route_rule_id_array_count
> 1) {
7709 route_rule_id
= necp_create_aggregate_route_rule(route_rule_id_array
);
7712 bool reset_tcp_mss
= false;
7713 if (matched_policy
) {
7714 matched_policy_id
= matched_policy
->id
;
7715 inp
->inp_policyresult
.policy_id
= matched_policy
->id
;
7716 inp
->inp_policyresult
.skip_policy_id
= skip_policy_id
;
7717 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
7718 inp
->inp_policyresult
.flowhash
= flowhash
;
7719 inp
->inp_policyresult
.results
.filter_control_unit
= filter_control_unit
;
7720 inp
->inp_policyresult
.results
.route_rule_id
= route_rule_id
;
7721 inp
->inp_policyresult
.results
.result
= matched_policy
->result
;
7722 memcpy(&inp
->inp_policyresult
.results
.result_parameter
, &matched_policy
->result_parameter
, sizeof(matched_policy
->result_parameter
));
7724 if (necp_socket_is_connected(inp
) &&
7725 (matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_DROP
||
7726 (matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&& !necp_socket_uses_interface(inp
, matched_policy
->result_parameter
.tunnel_interface_index
)))) {
7728 NECPLOG(LOG_DEBUG
, "Marking socket in state %d as defunct", so
->so_state
);
7730 sosetdefunct(current_proc(), so
, SHUTDOWN_SOCKET_LEVEL_NECP
| SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL
, TRUE
);
7731 } else if (necp_socket_is_connected(inp
) &&
7732 matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&&
7733 info
.protocol
== IPPROTO_TCP
) {
7734 // Reset MSS on TCP socket if tunnel policy changes
7735 reset_tcp_mss
= true;
7738 if (necp_debug
> 1) {
7739 NECPLOG(LOG_DEBUG
, "Socket Policy: %p (BoundInterface %d Proto %d) Policy %d Result %d Parameter %d", inp
->inp_socket
, info
.bound_interface_index
, info
.protocol
, matched_policy
->id
, matched_policy
->result
, matched_policy
->result_parameter
.tunnel_interface_index
);
7742 bool drop_all
= false;
7743 if (necp_drop_all_order
> 0 || info
.drop_order
> 0 || drop_dest_policy_result
== NECP_KERNEL_POLICY_RESULT_DROP
) {
7744 // Mark socket as a drop if set
7746 if (drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
) {
7747 drop_all_bypass
= necp_check_drop_all_bypass_result(NULL
);
7750 if (drop_all
&& drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_FALSE
) {
7751 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7752 inp
->inp_policyresult
.skip_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7753 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
7754 inp
->inp_policyresult
.flowhash
= flowhash
;
7755 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
7756 inp
->inp_policyresult
.results
.route_rule_id
= 0;
7757 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7759 // Mark non-matching socket so we don't re-check it
7760 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7761 inp
->inp_policyresult
.skip_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
7762 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
7763 inp
->inp_policyresult
.flowhash
= flowhash
;
7764 inp
->inp_policyresult
.results
.filter_control_unit
= filter_control_unit
; // We may have matched a filter, so mark it!
7765 inp
->inp_policyresult
.results
.route_rule_id
= route_rule_id
; // We may have matched a route rule, so mark it!
7766 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_NONE
;
7771 lck_rw_done(&necp_kernel_policy_lock
);
7773 if (reset_tcp_mss
) {
7774 // Update MSS when not holding the policy lock to avoid recursive locking
7775 tcp_mtudisc(inp
, 0);
7778 return matched_policy_id
;
7782 necp_ip_output_check_policy(struct necp_kernel_ip_output_policy
*kernel_policy
, necp_kernel_policy_id socket_policy_id
, necp_kernel_policy_id socket_skip_policy_id
, u_int32_t bound_interface_index
, u_int32_t last_interface_index
, u_int16_t protocol
, union necp_sockaddr_union
*local
, union necp_sockaddr_union
*remote
, struct rtentry
*rt
)
7784 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
)) {
7785 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
7786 u_int32_t cond_bound_interface_index
= kernel_policy
->cond_bound_interface
? kernel_policy
->cond_bound_interface
->if_index
: 0;
7787 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
7788 if (bound_interface_index
== cond_bound_interface_index
) {
7789 // No match, matches forbidden interface
7793 if (bound_interface_index
!= cond_bound_interface_index
) {
7794 // No match, does not match required interface
7799 if (bound_interface_index
!= 0) {
7800 // No match, requires a non-bound packet
7806 if (kernel_policy
->condition_mask
== 0) {
7810 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
) {
7811 necp_kernel_policy_id matched_policy_id
=
7812 kernel_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
? socket_skip_policy_id
: socket_policy_id
;
7813 if (matched_policy_id
!= kernel_policy
->cond_policy_id
) {
7814 // No match, does not match required id
7819 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LAST_INTERFACE
) {
7820 if (last_interface_index
!= kernel_policy
->cond_last_interface_index
) {
7825 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
7826 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
7827 if (protocol
== kernel_policy
->cond_protocol
) {
7828 // No match, matches forbidden protocol
7832 if (protocol
!= kernel_policy
->cond_protocol
) {
7833 // No match, does not match required protocol
7839 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_NETWORKS
) {
7840 bool is_local
= FALSE
;
7843 is_local
= IS_NECP_DEST_IN_LOCAL_NETWORKS(rt
);
7845 is_local
= necp_is_route_local(remote
);
7849 // Either no route to validate or no match for local networks
7854 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
7855 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
7856 bool inRange
= necp_is_addr_in_range((struct sockaddr
*)local
, (struct sockaddr
*)&kernel_policy
->cond_local_start
, (struct sockaddr
*)&kernel_policy
->cond_local_end
);
7857 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
7866 } else if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
7867 bool inSubnet
= necp_is_addr_in_subnet((struct sockaddr
*)local
, (struct sockaddr
*)&kernel_policy
->cond_local_start
, kernel_policy
->cond_local_prefix
);
7868 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
7880 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
7881 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
7882 bool inRange
= necp_is_addr_in_range((struct sockaddr
*)remote
, (struct sockaddr
*)&kernel_policy
->cond_remote_start
, (struct sockaddr
*)&kernel_policy
->cond_remote_end
);
7883 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
7892 } else if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
7893 bool inSubnet
= necp_is_addr_in_subnet((struct sockaddr
*)remote
, (struct sockaddr
*)&kernel_policy
->cond_remote_start
, kernel_policy
->cond_remote_prefix
);
7894 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
7909 static inline struct necp_kernel_ip_output_policy
*
7910 necp_ip_output_find_policy_match_locked(necp_kernel_policy_id socket_policy_id
, necp_kernel_policy_id socket_skip_policy_id
, u_int32_t bound_interface_index
, u_int32_t last_interface_index
, u_int16_t protocol
, union necp_sockaddr_union
*local_addr
, union necp_sockaddr_union
*remote_addr
, struct rtentry
*rt
, u_int32_t
*return_route_rule_id
, necp_kernel_policy_result
*return_drop_dest_policy_result
, necp_drop_all_bypass_check_result_t
*return_drop_all_bypass
)
7912 u_int32_t skip_order
= 0;
7913 u_int32_t skip_session_order
= 0;
7914 struct necp_kernel_ip_output_policy
*matched_policy
= NULL
;
7915 struct necp_kernel_ip_output_policy
**policy_search_array
= necp_kernel_ip_output_policies_map
[NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(socket_policy_id
)];
7916 u_int32_t route_rule_id_array
[MAX_AGGREGATE_ROUTE_RULES
];
7917 size_t route_rule_id_count
= 0;
7918 necp_drop_all_bypass_check_result_t drop_all_bypass
= NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
;
7919 if (return_drop_all_bypass
!= NULL
) {
7920 *return_drop_all_bypass
= drop_all_bypass
;
7923 if (return_route_rule_id
!= NULL
) {
7924 *return_route_rule_id
= 0;
7927 *return_drop_dest_policy_result
= NECP_KERNEL_POLICY_RESULT_NONE
;
7929 if (policy_search_array
!= NULL
) {
7930 for (int i
= 0; policy_search_array
[i
] != NULL
; i
++) {
7931 if (necp_drop_all_order
!= 0 && policy_search_array
[i
]->session_order
>= necp_drop_all_order
) {
7932 // We've hit a drop all rule
7933 if (drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
) {
7934 drop_all_bypass
= necp_check_drop_all_bypass_result(NULL
);
7935 if (return_drop_all_bypass
!= NULL
) {
7936 *return_drop_all_bypass
= drop_all_bypass
;
7939 if (drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_FALSE
) {
7943 if (necp_drop_dest_policy
.entry_count
> 0 &&
7944 necp_address_matches_drop_dest_policy(remote_addr
, policy_search_array
[i
]->session_order
)) {
7945 // We've hit a drop by destination address rule
7946 *return_drop_dest_policy_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
7949 if (skip_session_order
&& policy_search_array
[i
]->session_order
>= skip_session_order
) {
7952 skip_session_order
= 0;
7955 if (policy_search_array
[i
]->order
< skip_order
) {
7961 skip_session_order
= 0;
7963 } else if (skip_session_order
) {
7968 if (necp_ip_output_check_policy(policy_search_array
[i
], socket_policy_id
, socket_skip_policy_id
, bound_interface_index
, last_interface_index
, protocol
, local_addr
, remote_addr
, rt
)) {
7969 if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_ROUTE_RULES
) {
7970 if (return_route_rule_id
!= NULL
&& route_rule_id_count
< MAX_AGGREGATE_ROUTE_RULES
) {
7971 route_rule_id_array
[route_rule_id_count
++] = policy_search_array
[i
]->result_parameter
.route_rule_id
;
7974 } else if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
7975 skip_order
= policy_search_array
[i
]->result_parameter
.skip_policy_order
;
7976 skip_session_order
= policy_search_array
[i
]->session_order
+ 1;
7980 // Passed all tests, found a match
7981 matched_policy
= policy_search_array
[i
];
7987 if (route_rule_id_count
== 1) {
7988 *return_route_rule_id
= route_rule_id_array
[0];
7989 } else if (route_rule_id_count
> 1) {
7990 *return_route_rule_id
= necp_create_aggregate_route_rule(route_rule_id_array
);
7993 return matched_policy
;
7997 necp_output_bypass(struct mbuf
*packet
)
7999 if (necp_pass_loopback
> 0 && necp_is_loopback(NULL
, NULL
, NULL
, packet
, IFSCOPE_NONE
)) {
8002 if (necp_pass_keepalives
> 0 && necp_get_is_keepalive_from_packet(packet
)) {
8005 if (necp_is_intcoproc(NULL
, packet
)) {
8011 necp_kernel_policy_id
8012 necp_ip_output_find_policy_match(struct mbuf
*packet
, int flags
, struct ip_out_args
*ipoa
, struct rtentry
*rt
,
8013 necp_kernel_policy_result
*result
, necp_kernel_policy_result_parameter
*result_parameter
)
8015 struct ip
*ip
= NULL
;
8016 int hlen
= sizeof(struct ip
);
8017 necp_kernel_policy_id socket_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
8018 necp_kernel_policy_id socket_skip_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
8019 necp_kernel_policy_id matched_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
8020 struct necp_kernel_ip_output_policy
*matched_policy
= NULL
;
8021 u_int16_t protocol
= 0;
8022 u_int32_t bound_interface_index
= 0;
8023 u_int32_t last_interface_index
= 0;
8024 union necp_sockaddr_union local_addr
;
8025 union necp_sockaddr_union remote_addr
;
8026 u_int32_t drop_dest_policy_result
= NECP_KERNEL_POLICY_RESULT_NONE
;
8027 necp_drop_all_bypass_check_result_t drop_all_bypass
= NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
;
8033 if (result_parameter
) {
8034 memset(result_parameter
, 0, sizeof(*result_parameter
));
8037 if (packet
== NULL
) {
8038 return NECP_KERNEL_POLICY_ID_NONE
;
8041 socket_policy_id
= necp_get_policy_id_from_packet(packet
);
8042 socket_skip_policy_id
= necp_get_skip_policy_id_from_packet(packet
);
8044 // Exit early for an empty list
8045 // Don't lock. Possible race condition, but we don't want the performance hit.
8046 if (necp_kernel_ip_output_policies_count
== 0 ||
8047 (socket_policy_id
== NECP_KERNEL_POLICY_ID_NONE
&& necp_kernel_ip_output_policies_non_id_count
== 0 && necp_drop_dest_policy
.entry_count
== 0)) {
8048 if (necp_drop_all_order
> 0) {
8049 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8051 if (necp_output_bypass(packet
)) {
8052 *result
= NECP_KERNEL_POLICY_RESULT_PASS
;
8054 *result
= NECP_KERNEL_POLICY_RESULT_DROP
;
8059 return matched_policy_id
;
8062 // Check for loopback exception
8063 if (necp_output_bypass(packet
)) {
8064 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8066 *result
= NECP_KERNEL_POLICY_RESULT_PASS
;
8068 return matched_policy_id
;
8071 last_interface_index
= necp_get_last_interface_index_from_packet(packet
);
8073 // Process packet to get relevant fields
8074 ip
= mtod(packet
, struct ip
*);
8076 hlen
= _IP_VHL_HL(ip
->ip_vhl
) << 2;
8078 hlen
= ip
->ip_hl
<< 2;
8081 protocol
= ip
->ip_p
;
8083 if ((flags
& IP_OUTARGS
) && (ipoa
!= NULL
) &&
8084 (ipoa
->ipoa_flags
& IPOAF_BOUND_IF
) &&
8085 ipoa
->ipoa_boundif
!= IFSCOPE_NONE
) {
8086 bound_interface_index
= ipoa
->ipoa_boundif
;
8089 local_addr
.sin
.sin_family
= AF_INET
;
8090 local_addr
.sin
.sin_len
= sizeof(struct sockaddr_in
);
8091 memcpy(&local_addr
.sin
.sin_addr
, &ip
->ip_src
, sizeof(ip
->ip_src
));
8093 remote_addr
.sin
.sin_family
= AF_INET
;
8094 remote_addr
.sin
.sin_len
= sizeof(struct sockaddr_in
);
8095 memcpy(&((struct sockaddr_in
*)&remote_addr
)->sin_addr
, &ip
->ip_dst
, sizeof(ip
->ip_dst
));
8100 if ((int)(hlen
+ sizeof(th
)) <= packet
->m_pkthdr
.len
) {
8101 m_copydata(packet
, hlen
, sizeof(th
), (u_int8_t
*)&th
);
8102 ((struct sockaddr_in
*)&local_addr
)->sin_port
= th
.th_sport
;
8103 ((struct sockaddr_in
*)&remote_addr
)->sin_port
= th
.th_dport
;
8109 if ((int)(hlen
+ sizeof(uh
)) <= packet
->m_pkthdr
.len
) {
8110 m_copydata(packet
, hlen
, sizeof(uh
), (u_int8_t
*)&uh
);
8111 ((struct sockaddr_in
*)&local_addr
)->sin_port
= uh
.uh_sport
;
8112 ((struct sockaddr_in
*)&remote_addr
)->sin_port
= uh
.uh_dport
;
8117 ((struct sockaddr_in
*)&local_addr
)->sin_port
= 0;
8118 ((struct sockaddr_in
*)&remote_addr
)->sin_port
= 0;
8123 // Match packet to policy
8124 lck_rw_lock_shared(&necp_kernel_policy_lock
);
8125 u_int32_t route_rule_id
= 0;
8126 matched_policy
= necp_ip_output_find_policy_match_locked(socket_policy_id
, socket_skip_policy_id
, bound_interface_index
, last_interface_index
, protocol
, &local_addr
, &remote_addr
, rt
, &route_rule_id
, &drop_dest_policy_result
, &drop_all_bypass
);
8127 if (matched_policy
) {
8128 matched_policy_id
= matched_policy
->id
;
8130 *result
= matched_policy
->result
;
8133 if (result_parameter
) {
8134 memcpy(result_parameter
, &matched_policy
->result_parameter
, sizeof(matched_policy
->result_parameter
));
8137 if (route_rule_id
!= 0 &&
8138 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
== 0) {
8139 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
= route_rule_id
;
8142 if (necp_debug
> 1) {
8143 NECPLOG(LOG_DEBUG
, "IP Output: (ID %d BoundInterface %d LastInterface %d Proto %d) Policy %d Result %d Parameter %d Route Rule %u", socket_policy_id
, bound_interface_index
, last_interface_index
, protocol
, matched_policy
->id
, matched_policy
->result
, matched_policy
->result_parameter
.tunnel_interface_index
, route_rule_id
);
8146 bool drop_all
= false;
8148 * Apply drop-all only to packets which have never matched a primary policy (check
8149 * if the packet saved policy id is none or falls within the socket policy id range).
8151 if (socket_policy_id
< NECP_KERNEL_POLICY_ID_FIRST_VALID_IP
&&
8152 (necp_drop_all_order
> 0 || drop_dest_policy_result
== NECP_KERNEL_POLICY_RESULT_DROP
)) {
8154 if (drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
) {
8155 drop_all_bypass
= necp_check_drop_all_bypass_result(NULL
);
8158 if (drop_all
&& drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_FALSE
) {
8159 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8161 *result
= NECP_KERNEL_POLICY_RESULT_DROP
;
8163 } else if (route_rule_id
!= 0 &&
8164 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
== 0) {
8165 // If we matched a route rule, mark it
8166 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
= route_rule_id
;
8170 lck_rw_done(&necp_kernel_policy_lock
);
8172 return matched_policy_id
;
8175 necp_kernel_policy_id
8176 necp_ip6_output_find_policy_match(struct mbuf
*packet
, int flags
, struct ip6_out_args
*ip6oa
, struct rtentry
*rt
,
8177 necp_kernel_policy_result
*result
, necp_kernel_policy_result_parameter
*result_parameter
)
8179 struct ip6_hdr
*ip6
= NULL
;
8182 necp_kernel_policy_id socket_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
8183 necp_kernel_policy_id socket_skip_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
8184 necp_kernel_policy_id matched_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
8185 struct necp_kernel_ip_output_policy
*matched_policy
= NULL
;
8186 u_int16_t protocol
= 0;
8187 u_int32_t bound_interface_index
= 0;
8188 u_int32_t last_interface_index
= 0;
8189 union necp_sockaddr_union local_addr
;
8190 union necp_sockaddr_union remote_addr
;
8191 u_int32_t drop_dest_policy_result
= NECP_KERNEL_POLICY_RESULT_NONE
;
8192 necp_drop_all_bypass_check_result_t drop_all_bypass
= NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
;
8198 if (result_parameter
) {
8199 memset(result_parameter
, 0, sizeof(*result_parameter
));
8202 if (packet
== NULL
) {
8203 return NECP_KERNEL_POLICY_ID_NONE
;
8206 socket_policy_id
= necp_get_policy_id_from_packet(packet
);
8207 socket_skip_policy_id
= necp_get_skip_policy_id_from_packet(packet
);
8209 // Exit early for an empty list
8210 // Don't lock. Possible race condition, but we don't want the performance hit.
8211 if (necp_kernel_ip_output_policies_count
== 0 ||
8212 (socket_policy_id
== NECP_KERNEL_POLICY_ID_NONE
&& necp_kernel_ip_output_policies_non_id_count
== 0 && necp_drop_dest_policy
.entry_count
== 0)) {
8213 if (necp_drop_all_order
> 0) {
8214 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8216 if (necp_output_bypass(packet
)) {
8217 *result
= NECP_KERNEL_POLICY_RESULT_PASS
;
8219 *result
= NECP_KERNEL_POLICY_RESULT_DROP
;
8224 return matched_policy_id
;
8227 // Check for loopback exception
8228 if (necp_output_bypass(packet
)) {
8229 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8231 *result
= NECP_KERNEL_POLICY_RESULT_PASS
;
8233 return matched_policy_id
;
8236 last_interface_index
= necp_get_last_interface_index_from_packet(packet
);
8238 // Process packet to get relevant fields
8239 ip6
= mtod(packet
, struct ip6_hdr
*);
8241 if ((flags
& IPV6_OUTARGS
) && (ip6oa
!= NULL
) &&
8242 (ip6oa
->ip6oa_flags
& IP6OAF_BOUND_IF
) &&
8243 ip6oa
->ip6oa_boundif
!= IFSCOPE_NONE
) {
8244 bound_interface_index
= ip6oa
->ip6oa_boundif
;
8247 ((struct sockaddr_in6
*)&local_addr
)->sin6_family
= AF_INET6
;
8248 ((struct sockaddr_in6
*)&local_addr
)->sin6_len
= sizeof(struct sockaddr_in6
);
8249 memcpy(&((struct sockaddr_in6
*)&local_addr
)->sin6_addr
, &ip6
->ip6_src
, sizeof(ip6
->ip6_src
));
8251 ((struct sockaddr_in6
*)&remote_addr
)->sin6_family
= AF_INET6
;
8252 ((struct sockaddr_in6
*)&remote_addr
)->sin6_len
= sizeof(struct sockaddr_in6
);
8253 memcpy(&((struct sockaddr_in6
*)&remote_addr
)->sin6_addr
, &ip6
->ip6_dst
, sizeof(ip6
->ip6_dst
));
8255 offset
= ip6_lasthdr(packet
, 0, IPPROTO_IPV6
, &next
);
8256 if (offset
>= 0 && packet
->m_pkthdr
.len
>= offset
) {
8261 if ((int)(offset
+ sizeof(th
)) <= packet
->m_pkthdr
.len
) {
8262 m_copydata(packet
, offset
, sizeof(th
), (u_int8_t
*)&th
);
8263 ((struct sockaddr_in6
*)&local_addr
)->sin6_port
= th
.th_sport
;
8264 ((struct sockaddr_in6
*)&remote_addr
)->sin6_port
= th
.th_dport
;
8270 if ((int)(offset
+ sizeof(uh
)) <= packet
->m_pkthdr
.len
) {
8271 m_copydata(packet
, offset
, sizeof(uh
), (u_int8_t
*)&uh
);
8272 ((struct sockaddr_in6
*)&local_addr
)->sin6_port
= uh
.uh_sport
;
8273 ((struct sockaddr_in6
*)&remote_addr
)->sin6_port
= uh
.uh_dport
;
8278 ((struct sockaddr_in6
*)&local_addr
)->sin6_port
= 0;
8279 ((struct sockaddr_in6
*)&remote_addr
)->sin6_port
= 0;
8285 // Match packet to policy
8286 lck_rw_lock_shared(&necp_kernel_policy_lock
);
8287 u_int32_t route_rule_id
= 0;
8288 matched_policy
= necp_ip_output_find_policy_match_locked(socket_policy_id
, socket_skip_policy_id
, bound_interface_index
, last_interface_index
, protocol
, &local_addr
, &remote_addr
, rt
, &route_rule_id
, &drop_dest_policy_result
, &drop_all_bypass
);
8289 if (matched_policy
) {
8290 matched_policy_id
= matched_policy
->id
;
8292 *result
= matched_policy
->result
;
8295 if (result_parameter
) {
8296 memcpy(result_parameter
, &matched_policy
->result_parameter
, sizeof(matched_policy
->result_parameter
));
8299 if (route_rule_id
!= 0 &&
8300 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
== 0) {
8301 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
= route_rule_id
;
8304 if (necp_debug
> 1) {
8305 NECPLOG(LOG_DEBUG
, "IP6 Output: (ID %d BoundInterface %d LastInterface %d Proto %d) Policy %d Result %d Parameter %d Route Rule %u", socket_policy_id
, bound_interface_index
, last_interface_index
, protocol
, matched_policy
->id
, matched_policy
->result
, matched_policy
->result_parameter
.tunnel_interface_index
, route_rule_id
);
8308 bool drop_all
= false;
8310 * Apply drop-all only to packets which have never matched a primary policy (check
8311 * if the packet saved policy id is none or falls within the socket policy id range).
8313 if (socket_policy_id
< NECP_KERNEL_POLICY_ID_FIRST_VALID_IP
&&
8314 (necp_drop_all_order
> 0 || drop_dest_policy_result
== NECP_KERNEL_POLICY_RESULT_DROP
)) {
8316 if (drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
) {
8317 drop_all_bypass
= necp_check_drop_all_bypass_result(NULL
);
8320 if (drop_all
&& drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_FALSE
) {
8321 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
8323 *result
= NECP_KERNEL_POLICY_RESULT_DROP
;
8325 } else if (route_rule_id
!= 0 &&
8326 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
== 0) {
8327 // If we matched a route rule, mark it
8328 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
= route_rule_id
;
8332 lck_rw_done(&necp_kernel_policy_lock
);
8334 return matched_policy_id
;
8339 necp_is_addr_in_range(struct sockaddr
*addr
, struct sockaddr
*range_start
, struct sockaddr
*range_end
)
8343 if (addr
== NULL
|| range_start
== NULL
|| range_end
== NULL
) {
8347 /* Must be greater than or equal to start */
8348 cmp
= necp_addr_compare(addr
, range_start
, 1);
8349 if (cmp
!= 0 && cmp
!= 1) {
8353 /* Must be less than or equal to end */
8354 cmp
= necp_addr_compare(addr
, range_end
, 1);
8355 if (cmp
!= 0 && cmp
!= -1) {
8363 necp_is_range_in_range(struct sockaddr
*inner_range_start
, struct sockaddr
*inner_range_end
, struct sockaddr
*range_start
, struct sockaddr
*range_end
)
8367 if (inner_range_start
== NULL
|| inner_range_end
== NULL
|| range_start
== NULL
|| range_end
== NULL
) {
8371 /* Must be greater than or equal to start */
8372 cmp
= necp_addr_compare(inner_range_start
, range_start
, 1);
8373 if (cmp
!= 0 && cmp
!= 1) {
8377 /* Must be less than or equal to end */
8378 cmp
= necp_addr_compare(inner_range_end
, range_end
, 1);
8379 if (cmp
!= 0 && cmp
!= -1) {
8387 necp_is_addr_in_subnet(struct sockaddr
*addr
, struct sockaddr
*subnet_addr
, u_int8_t subnet_prefix
)
8389 if (addr
== NULL
|| subnet_addr
== NULL
) {
8393 if (addr
->sa_family
!= subnet_addr
->sa_family
|| addr
->sa_len
!= subnet_addr
->sa_len
) {
8397 switch (addr
->sa_family
) {
8399 if (satosin(subnet_addr
)->sin_port
!= 0 &&
8400 satosin(addr
)->sin_port
!= satosin(subnet_addr
)->sin_port
) {
8403 return necp_buffer_compare_with_bit_prefix((u_int8_t
*)&satosin(addr
)->sin_addr
, (u_int8_t
*)&satosin(subnet_addr
)->sin_addr
, subnet_prefix
);
8406 if (satosin6(subnet_addr
)->sin6_port
!= 0 &&
8407 satosin6(addr
)->sin6_port
!= satosin6(subnet_addr
)->sin6_port
) {
8410 if (satosin6(addr
)->sin6_scope_id
&&
8411 satosin6(subnet_addr
)->sin6_scope_id
&&
8412 satosin6(addr
)->sin6_scope_id
!= satosin6(subnet_addr
)->sin6_scope_id
) {
8415 return necp_buffer_compare_with_bit_prefix((u_int8_t
*)&satosin6(addr
)->sin6_addr
, (u_int8_t
*)&satosin6(subnet_addr
)->sin6_addr
, subnet_prefix
);
8430 * 2: Not comparable or error
8433 necp_addr_compare(struct sockaddr
*sa1
, struct sockaddr
*sa2
, int check_port
)
8436 int port_result
= 0;
8438 if (sa1
->sa_family
!= sa2
->sa_family
|| sa1
->sa_len
!= sa2
->sa_len
) {
8442 if (sa1
->sa_len
== 0) {
8446 switch (sa1
->sa_family
) {
8448 if (sa1
->sa_len
!= sizeof(struct sockaddr_in
)) {
8452 result
= memcmp(&satosin(sa1
)->sin_addr
.s_addr
, &satosin(sa2
)->sin_addr
.s_addr
, sizeof(satosin(sa1
)->sin_addr
.s_addr
));
8455 if (satosin(sa1
)->sin_port
< satosin(sa2
)->sin_port
) {
8457 } else if (satosin(sa1
)->sin_port
> satosin(sa2
)->sin_port
) {
8462 result
= port_result
;
8463 } else if ((result
> 0 && port_result
< 0) || (result
< 0 && port_result
> 0)) {
8471 if (sa1
->sa_len
!= sizeof(struct sockaddr_in6
)) {
8475 if (satosin6(sa1
)->sin6_scope_id
!= satosin6(sa2
)->sin6_scope_id
) {
8479 result
= memcmp(&satosin6(sa1
)->sin6_addr
.s6_addr
[0], &satosin6(sa2
)->sin6_addr
.s6_addr
[0], sizeof(struct in6_addr
));
8482 if (satosin6(sa1
)->sin6_port
< satosin6(sa2
)->sin6_port
) {
8484 } else if (satosin6(sa1
)->sin6_port
> satosin6(sa2
)->sin6_port
) {
8489 result
= port_result
;
8490 } else if ((result
> 0 && port_result
< 0) || (result
< 0 && port_result
> 0)) {
8498 result
= memcmp(sa1
, sa2
, sa1
->sa_len
);
8505 } else if (result
> 0) {
8513 necp_buffer_compare_with_bit_prefix(u_int8_t
*p1
, u_int8_t
*p2
, u_int32_t bits
)
8517 /* Handle null pointers */
8518 if (p1
== NULL
|| p2
== NULL
) {
8523 if (*p1
++ != *p2
++) {
8530 mask
= ~((1 << (8 - bits
)) - 1);
8531 if ((*p1
& mask
) != (*p2
& mask
)) {
8539 necp_addr_is_empty(struct sockaddr
*addr
)
8545 if (addr
->sa_len
== 0) {
8549 switch (addr
->sa_family
) {
8551 static struct sockaddr_in ipv4_empty_address
= {
8552 .sin_len
= sizeof(struct sockaddr_in
),
8553 .sin_family
= AF_INET
,
8555 .sin_addr
= { .s_addr
= 0 }, // 0.0.0.0
8558 if (necp_addr_compare(addr
, (struct sockaddr
*)&ipv4_empty_address
, 0) == 0) {
8565 static struct sockaddr_in6 ipv6_empty_address
= {
8566 .sin6_len
= sizeof(struct sockaddr_in6
),
8567 .sin6_family
= AF_INET6
,
8570 .sin6_addr
= IN6ADDR_ANY_INIT
, // ::
8573 if (necp_addr_compare(addr
, (struct sockaddr
*)&ipv6_empty_address
, 0) == 0) {
8587 necp_update_qos_marking(struct ifnet
*ifp
, u_int32_t route_rule_id
)
8589 bool qos_marking
= FALSE
;
8590 int exception_index
= 0;
8591 struct necp_route_rule
*route_rule
= NULL
;
8593 route_rule
= necp_lookup_route_rule_locked(&necp_route_rules
, route_rule_id
);
8594 if (route_rule
== NULL
) {
8595 qos_marking
= FALSE
;
8599 qos_marking
= (route_rule
->default_action
== NECP_ROUTE_RULE_QOS_MARKING
) ? TRUE
: FALSE
;
8605 for (exception_index
= 0; exception_index
< MAX_ROUTE_RULE_INTERFACES
; exception_index
++) {
8606 if (route_rule
->exception_if_indices
[exception_index
] == 0) {
8609 if (route_rule
->exception_if_actions
[exception_index
] != NECP_ROUTE_RULE_QOS_MARKING
) {
8612 if (route_rule
->exception_if_indices
[exception_index
] == ifp
->if_index
) {
8614 if (necp_debug
> 2) {
8615 NECPLOG(LOG_DEBUG
, "QoS Marking : Interface match %d for Rule %d Allowed %d",
8616 route_rule
->exception_if_indices
[exception_index
], route_rule_id
, qos_marking
);
8622 if ((route_rule
->cellular_action
== NECP_ROUTE_RULE_QOS_MARKING
&& IFNET_IS_CELLULAR(ifp
)) ||
8623 (route_rule
->wifi_action
== NECP_ROUTE_RULE_QOS_MARKING
&& IFNET_IS_WIFI(ifp
)) ||
8624 (route_rule
->wired_action
== NECP_ROUTE_RULE_QOS_MARKING
&& IFNET_IS_WIRED(ifp
)) ||
8625 (route_rule
->expensive_action
== NECP_ROUTE_RULE_QOS_MARKING
&& IFNET_IS_EXPENSIVE(ifp
)) ||
8626 (route_rule
->constrained_action
== NECP_ROUTE_RULE_QOS_MARKING
&& IFNET_IS_CONSTRAINED(ifp
))) {
8628 if (necp_debug
> 2) {
8629 NECPLOG(LOG_DEBUG
, "QoS Marking: C:%d WF:%d W:%d E:%d Cn:%d for Rule %d Allowed %d",
8630 route_rule
->cellular_action
, route_rule
->wifi_action
, route_rule
->wired_action
,
8631 route_rule
->expensive_action
, route_rule
->constrained_action
, route_rule_id
, qos_marking
);
8636 if (necp_debug
> 1) {
8637 NECPLOG(LOG_DEBUG
, "QoS Marking: Rule %d ifp %s Allowed %d",
8638 route_rule_id
, ifp
? ifp
->if_xname
: "", qos_marking
);
8644 necp_socket_update_qos_marking(struct inpcb
*inp
, struct rtentry
*route
, struct ifnet
*interface
, u_int32_t route_rule_id
)
8646 bool qos_marking
= FALSE
;
8647 struct ifnet
*ifp
= interface
= NULL
;
8649 if (net_qos_policy_restricted
== 0) {
8652 if (inp
->inp_socket
== NULL
) {
8655 if ((inp
->inp_socket
->so_flags1
& SOF1_QOSMARKING_POLICY_OVERRIDE
)) {
8659 * This is racy but we do not need the performance hit of taking necp_kernel_policy_lock
8661 if (inp
->inp_policyresult
.results
.qos_marking_gencount
== necp_kernel_socket_policies_gencount
) {
8665 lck_rw_lock_shared(&necp_kernel_policy_lock
);
8667 if (ifp
== NULL
&& route
!= NULL
) {
8668 ifp
= route
->rt_ifp
;
8671 * By default, until we have a interface, do not mark and reevaluate the Qos marking policy
8673 if (ifp
== NULL
|| route_rule_id
== 0) {
8674 qos_marking
= FALSE
;
8678 if (ROUTE_RULE_IS_AGGREGATE(route_rule_id
)) {
8679 struct necp_aggregate_route_rule
*aggregate_route_rule
= necp_lookup_aggregate_route_rule_locked(route_rule_id
);
8680 if (aggregate_route_rule
!= NULL
) {
8682 for (index
= 0; index
< MAX_AGGREGATE_ROUTE_RULES
; index
++) {
8683 u_int32_t sub_route_rule_id
= aggregate_route_rule
->rule_ids
[index
];
8684 if (sub_route_rule_id
== 0) {
8687 qos_marking
= necp_update_qos_marking(ifp
, sub_route_rule_id
);
8688 if (qos_marking
== TRUE
) {
8694 qos_marking
= necp_update_qos_marking(ifp
, route_rule_id
);
8697 * Now that we have an interface we remember the gencount
8699 inp
->inp_policyresult
.results
.qos_marking_gencount
= necp_kernel_socket_policies_gencount
;
8702 lck_rw_done(&necp_kernel_policy_lock
);
8704 if (qos_marking
== TRUE
) {
8705 inp
->inp_socket
->so_flags1
|= SOF1_QOSMARKING_ALLOWED
;
8707 inp
->inp_socket
->so_flags1
&= ~SOF1_QOSMARKING_ALLOWED
;
8712 necp_route_is_lqm_abort(struct ifnet
*ifp
, struct ifnet
*delegated_ifp
)
8715 (ifp
->if_interface_state
.valid_bitmask
& IF_INTERFACE_STATE_LQM_STATE_VALID
) &&
8716 ifp
->if_interface_state
.lqm_state
== IFNET_LQM_THRESH_ABORT
) {
8719 if (delegated_ifp
!= NULL
&&
8720 (delegated_ifp
->if_interface_state
.valid_bitmask
& IF_INTERFACE_STATE_LQM_STATE_VALID
) &&
8721 delegated_ifp
->if_interface_state
.lqm_state
== IFNET_LQM_THRESH_ABORT
) {
8728 necp_route_is_allowed_inner(struct rtentry
*route
, struct ifnet
*ifp
, u_int32_t route_rule_id
, u_int32_t
*interface_type_denied
)
8730 bool default_is_allowed
= TRUE
;
8731 u_int8_t type_aggregate_action
= NECP_ROUTE_RULE_NONE
;
8732 int exception_index
= 0;
8733 struct ifnet
*delegated_ifp
= NULL
;
8734 struct necp_route_rule
*route_rule
= NULL
;
8736 route_rule
= necp_lookup_route_rule_locked(&necp_route_rules
, route_rule_id
);
8737 if (route_rule
== NULL
) {
8741 default_is_allowed
= (route_rule
->default_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
;
8743 ifp
= route
->rt_ifp
;
8746 if (necp_debug
> 1 && !default_is_allowed
) {
8747 NECPLOG(LOG_DEBUG
, "Route Allowed: No interface for route, using default for Rule %d Allowed %d", route_rule_id
, default_is_allowed
);
8749 return default_is_allowed
;
8752 delegated_ifp
= ifp
->if_delegated
.ifp
;
8753 for (exception_index
= 0; exception_index
< MAX_ROUTE_RULE_INTERFACES
; exception_index
++) {
8754 if (route_rule
->exception_if_indices
[exception_index
] == 0) {
8757 if (route_rule
->exception_if_indices
[exception_index
] == ifp
->if_index
||
8758 (delegated_ifp
!= NULL
&& route_rule
->exception_if_indices
[exception_index
] == delegated_ifp
->if_index
)) {
8759 if (route_rule
->exception_if_actions
[exception_index
] == NECP_ROUTE_RULE_DENY_LQM_ABORT
) {
8760 const bool lqm_abort
= necp_route_is_lqm_abort(ifp
, delegated_ifp
);
8761 if (necp_debug
> 1 && lqm_abort
) {
8762 NECPLOG(LOG_DEBUG
, "Route Allowed: Interface match %d for Rule %d Deny LQM Abort",
8763 route_rule
->exception_if_indices
[exception_index
], route_rule_id
);
8766 } else if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule
->exception_if_actions
[exception_index
])) {
8767 if (necp_debug
> 1) {
8768 NECPLOG(LOG_DEBUG
, "Route Allowed: Interface match %d for Rule %d Allowed %d", route_rule
->exception_if_indices
[exception_index
], route_rule_id
, ((route_rule
->exception_if_actions
[exception_index
] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
));
8770 return (route_rule
->exception_if_actions
[exception_index
] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
;
8775 if (IFNET_IS_CELLULAR(ifp
)) {
8776 if (route_rule
->cellular_action
== NECP_ROUTE_RULE_DENY_LQM_ABORT
) {
8777 if (necp_route_is_lqm_abort(ifp
, delegated_ifp
)) {
8778 if (interface_type_denied
!= NULL
) {
8779 *interface_type_denied
= IFRTYPE_FUNCTIONAL_CELLULAR
;
8781 // Mark aggregate action as deny
8782 type_aggregate_action
= NECP_ROUTE_RULE_DENY_INTERFACE
;
8784 } else if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule
->cellular_action
)) {
8785 if (interface_type_denied
!= NULL
) {
8786 *interface_type_denied
= IFRTYPE_FUNCTIONAL_CELLULAR
;
8788 if (type_aggregate_action
== NECP_ROUTE_RULE_NONE
||
8789 (type_aggregate_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
&&
8790 route_rule
->cellular_action
== NECP_ROUTE_RULE_DENY_INTERFACE
)) {
8791 // Deny wins if there is a conflict
8792 type_aggregate_action
= route_rule
->cellular_action
;
8797 if (IFNET_IS_WIFI(ifp
)) {
8798 if (route_rule
->wifi_action
== NECP_ROUTE_RULE_DENY_LQM_ABORT
) {
8799 if (necp_route_is_lqm_abort(ifp
, delegated_ifp
)) {
8800 if (interface_type_denied
!= NULL
) {
8801 *interface_type_denied
= IFRTYPE_FUNCTIONAL_WIFI_INFRA
;
8803 // Mark aggregate action as deny
8804 type_aggregate_action
= NECP_ROUTE_RULE_DENY_INTERFACE
;
8806 } else if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule
->wifi_action
)) {
8807 if (interface_type_denied
!= NULL
) {
8808 *interface_type_denied
= IFRTYPE_FUNCTIONAL_WIFI_INFRA
;
8810 if (type_aggregate_action
== NECP_ROUTE_RULE_NONE
||
8811 (type_aggregate_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
&&
8812 route_rule
->wifi_action
== NECP_ROUTE_RULE_DENY_INTERFACE
)) {
8813 // Deny wins if there is a conflict
8814 type_aggregate_action
= route_rule
->wifi_action
;
8819 if (IFNET_IS_WIRED(ifp
)) {
8820 if (route_rule
->wired_action
== NECP_ROUTE_RULE_DENY_LQM_ABORT
) {
8821 if (necp_route_is_lqm_abort(ifp
, delegated_ifp
)) {
8822 if (interface_type_denied
!= NULL
) {
8823 *interface_type_denied
= IFRTYPE_FUNCTIONAL_WIRED
;
8825 // Mark aggregate action as deny
8826 type_aggregate_action
= NECP_ROUTE_RULE_DENY_INTERFACE
;
8828 } else if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule
->wired_action
)) {
8829 if (interface_type_denied
!= NULL
) {
8830 *interface_type_denied
= IFRTYPE_FUNCTIONAL_WIRED
;
8832 if (type_aggregate_action
== NECP_ROUTE_RULE_NONE
||
8833 (type_aggregate_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
&&
8834 route_rule
->wired_action
== NECP_ROUTE_RULE_DENY_INTERFACE
)) {
8835 // Deny wins if there is a conflict
8836 type_aggregate_action
= route_rule
->wired_action
;
8841 if (IFNET_IS_EXPENSIVE(ifp
)) {
8842 if (route_rule
->expensive_action
== NECP_ROUTE_RULE_DENY_LQM_ABORT
) {
8843 if (necp_route_is_lqm_abort(ifp
, delegated_ifp
)) {
8844 // Mark aggregate action as deny
8845 type_aggregate_action
= NECP_ROUTE_RULE_DENY_INTERFACE
;
8847 } else if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule
->expensive_action
)) {
8848 if (type_aggregate_action
== NECP_ROUTE_RULE_NONE
||
8849 (type_aggregate_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
&&
8850 route_rule
->expensive_action
== NECP_ROUTE_RULE_DENY_INTERFACE
)) {
8851 // Deny wins if there is a conflict
8852 type_aggregate_action
= route_rule
->expensive_action
;
8857 if (IFNET_IS_CONSTRAINED(ifp
)) {
8858 if (route_rule
->constrained_action
== NECP_ROUTE_RULE_DENY_LQM_ABORT
) {
8859 if (necp_route_is_lqm_abort(ifp
, delegated_ifp
)) {
8860 // Mark aggregate action as deny
8861 type_aggregate_action
= NECP_ROUTE_RULE_DENY_INTERFACE
;
8863 } else if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule
->constrained_action
)) {
8864 if (type_aggregate_action
== NECP_ROUTE_RULE_NONE
||
8865 (type_aggregate_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
&&
8866 route_rule
->constrained_action
== NECP_ROUTE_RULE_DENY_INTERFACE
)) {
8867 // Deny wins if there is a conflict
8868 type_aggregate_action
= route_rule
->constrained_action
;
8873 if (type_aggregate_action
!= NECP_ROUTE_RULE_NONE
) {
8874 if (necp_debug
> 1) {
8875 NECPLOG(LOG_DEBUG
, "Route Allowed: C:%d WF:%d W:%d E:%d for Rule %d Allowed %d", route_rule
->cellular_action
, route_rule
->wifi_action
, route_rule
->wired_action
, route_rule
->expensive_action
, route_rule_id
, ((type_aggregate_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
));
8877 return (type_aggregate_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
;
8880 if (necp_debug
> 1 && !default_is_allowed
) {
8881 NECPLOG(LOG_DEBUG
, "Route Allowed: Using default for Rule %d Allowed %d", route_rule_id
, default_is_allowed
);
8883 return default_is_allowed
;
8887 necp_route_is_allowed(struct rtentry
*route
, struct ifnet
*interface
, u_int32_t route_rule_id
, u_int32_t
*interface_type_denied
)
8889 if ((route
== NULL
&& interface
== NULL
) || route_rule_id
== 0) {
8890 if (necp_debug
> 1) {
8891 NECPLOG(LOG_DEBUG
, "Route Allowed: no route or interface, Rule %d Allowed %d", route_rule_id
, TRUE
);
8896 if (ROUTE_RULE_IS_AGGREGATE(route_rule_id
)) {
8897 struct necp_aggregate_route_rule
*aggregate_route_rule
= necp_lookup_aggregate_route_rule_locked(route_rule_id
);
8898 if (aggregate_route_rule
!= NULL
) {
8900 for (index
= 0; index
< MAX_AGGREGATE_ROUTE_RULES
; index
++) {
8901 u_int32_t sub_route_rule_id
= aggregate_route_rule
->rule_ids
[index
];
8902 if (sub_route_rule_id
== 0) {
8905 if (!necp_route_is_allowed_inner(route
, interface
, sub_route_rule_id
, interface_type_denied
)) {
8911 return necp_route_is_allowed_inner(route
, interface
, route_rule_id
, interface_type_denied
);
8918 necp_packet_is_allowed_over_interface(struct mbuf
*packet
, struct ifnet
*interface
)
8920 bool is_allowed
= TRUE
;
8921 u_int32_t route_rule_id
= necp_get_route_rule_id_from_packet(packet
);
8922 if (route_rule_id
!= 0 &&
8923 interface
!= NULL
) {
8924 lck_rw_lock_shared(&necp_kernel_policy_lock
);
8925 is_allowed
= necp_route_is_allowed(NULL
, interface
, necp_get_route_rule_id_from_packet(packet
), NULL
);
8926 lck_rw_done(&necp_kernel_policy_lock
);
8932 necp_netagents_allow_traffic(u_int32_t
*netagent_ids
, size_t netagent_id_count
)
8934 size_t netagent_cursor
;
8935 for (netagent_cursor
= 0; netagent_cursor
< netagent_id_count
; netagent_cursor
++) {
8936 struct necp_uuid_id_mapping
*mapping
= NULL
;
8937 u_int32_t netagent_id
= netagent_ids
[netagent_cursor
];
8938 if (netagent_id
== 0) {
8941 mapping
= necp_uuid_lookup_uuid_with_service_id_locked(netagent_id
);
8942 if (mapping
!= NULL
) {
8943 u_int32_t agent_flags
= 0;
8944 agent_flags
= netagent_get_flags(mapping
->uuid
);
8945 if (agent_flags
& NETAGENT_FLAG_REGISTERED
) {
8946 if (agent_flags
& NETAGENT_FLAG_ACTIVE
) {
8948 } else if ((agent_flags
& NETAGENT_FLAG_VOLUNTARY
) == 0) {
8958 necp_socket_is_allowed_to_send_recv_internal(struct inpcb
*inp
, struct sockaddr
*override_local_addr
, struct sockaddr
*override_remote_addr
, ifnet_t interface
, necp_kernel_policy_id
*return_policy_id
, u_int32_t
*return_route_rule_id
, necp_kernel_policy_id
*return_skip_policy_id
)
8960 u_int32_t verifyifindex
= interface
? interface
->if_index
: 0;
8961 bool allowed_to_receive
= TRUE
;
8962 struct necp_socket_info info
;
8963 u_int32_t flowhash
= 0;
8964 necp_kernel_policy_result service_action
= 0;
8965 necp_kernel_policy_service service
= { 0, 0 };
8966 u_int32_t route_rule_id
= 0;
8967 struct rtentry
*route
= NULL
;
8968 u_int32_t interface_type_denied
= IFRTYPE_FUNCTIONAL_UNKNOWN
;
8969 necp_kernel_policy_result drop_dest_policy_result
= NECP_KERNEL_POLICY_RESULT_NONE
;
8970 necp_drop_all_bypass_check_result_t drop_all_bypass
= NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
;
8971 u_int32_t netagent_ids
[NECP_MAX_NETAGENTS
];
8972 memset(&netagent_ids
, 0, sizeof(netagent_ids
));
8974 if (return_policy_id
) {
8975 *return_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
8977 if (return_skip_policy_id
) {
8978 *return_skip_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
8980 if (return_route_rule_id
) {
8981 *return_route_rule_id
= 0;
8988 route
= inp
->inp_route
.ro_rt
;
8990 struct socket
*so
= inp
->inp_socket
;
8992 u_int32_t drop_order
= necp_process_drop_order(so
->so_cred
);
8994 // Don't lock. Possible race condition, but we don't want the performance hit.
8995 if (necp_kernel_socket_policies_count
== 0 ||
8996 (!(inp
->inp_flags2
& INP2_WANT_APP_POLICY
) && necp_kernel_socket_policies_non_app_count
== 0)) {
8997 if (necp_drop_all_order
> 0 || drop_order
> 0) {
8998 if (necp_socket_bypass(override_local_addr
, override_remote_addr
, inp
)) {
8999 allowed_to_receive
= TRUE
;
9001 allowed_to_receive
= FALSE
;
9007 // If this socket is connected, or we are not taking addresses into account, try to reuse last result
9008 if ((necp_socket_is_connected(inp
) || (override_local_addr
== NULL
&& override_remote_addr
== NULL
)) && inp
->inp_policyresult
.policy_id
!= NECP_KERNEL_POLICY_ID_NONE
) {
9009 bool policies_have_changed
= FALSE
;
9010 bool route_allowed
= TRUE
;
9012 if (inp
->inp_policyresult
.policy_gencount
!= necp_kernel_socket_policies_gencount
) {
9013 policies_have_changed
= TRUE
;
9015 if (inp
->inp_policyresult
.results
.route_rule_id
!= 0) {
9016 lck_rw_lock_shared(&necp_kernel_policy_lock
);
9017 if (!necp_route_is_allowed(route
, interface
, inp
->inp_policyresult
.results
.route_rule_id
, &interface_type_denied
)) {
9018 route_allowed
= FALSE
;
9020 lck_rw_done(&necp_kernel_policy_lock
);
9024 if (!policies_have_changed
) {
9025 if (!route_allowed
||
9026 inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_DROP
||
9027 inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
||
9028 (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&& interface
&&
9029 inp
->inp_policyresult
.results
.result_parameter
.tunnel_interface_index
!= verifyifindex
)) {
9030 allowed_to_receive
= FALSE
;
9032 if (return_policy_id
) {
9033 *return_policy_id
= inp
->inp_policyresult
.policy_id
;
9035 if (return_skip_policy_id
) {
9036 *return_skip_policy_id
= inp
->inp_policyresult
.skip_policy_id
;
9038 if (return_route_rule_id
) {
9039 *return_route_rule_id
= inp
->inp_policyresult
.results
.route_rule_id
;
9046 // Check for loopback exception
9047 if (necp_socket_bypass(override_local_addr
, override_remote_addr
, inp
)) {
9048 allowed_to_receive
= TRUE
;
9052 // Actually calculate policy result
9053 lck_rw_lock_shared(&necp_kernel_policy_lock
);
9054 necp_socket_fillout_info_locked(inp
, override_local_addr
, override_remote_addr
, 0, drop_order
, &info
);
9056 flowhash
= necp_socket_calc_flowhash_locked(&info
);
9057 if (inp
->inp_policyresult
.policy_id
!= NECP_KERNEL_POLICY_ID_NONE
&&
9058 inp
->inp_policyresult
.policy_gencount
== necp_kernel_socket_policies_gencount
&&
9059 inp
->inp_policyresult
.flowhash
== flowhash
) {
9060 if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_DROP
||
9061 inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
||
9062 (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&& interface
&&
9063 inp
->inp_policyresult
.results
.result_parameter
.tunnel_interface_index
!= verifyifindex
) ||
9064 (inp
->inp_policyresult
.results
.route_rule_id
!= 0 &&
9065 !necp_route_is_allowed(route
, interface
, inp
->inp_policyresult
.results
.route_rule_id
, &interface_type_denied
))) {
9066 allowed_to_receive
= FALSE
;
9068 if (return_policy_id
) {
9069 *return_policy_id
= inp
->inp_policyresult
.policy_id
;
9071 if (return_route_rule_id
) {
9072 *return_route_rule_id
= inp
->inp_policyresult
.results
.route_rule_id
;
9074 if (return_skip_policy_id
) {
9075 *return_skip_policy_id
= inp
->inp_policyresult
.skip_policy_id
;
9078 lck_rw_done(&necp_kernel_policy_lock
);
9082 u_int32_t route_rule_id_array
[MAX_AGGREGATE_ROUTE_RULES
];
9083 size_t route_rule_id_array_count
= 0;
9084 struct necp_kernel_socket_policy
*matched_policy
= necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_map
[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(info
.application_id
)], &info
, NULL
, route_rule_id_array
, &route_rule_id_array_count
, MAX_AGGREGATE_ROUTE_RULES
, &service_action
, &service
, netagent_ids
, NULL
, NECP_MAX_NETAGENTS
, NULL
, 0, current_proc(), return_skip_policy_id
, inp
->inp_route
.ro_rt
, &drop_dest_policy_result
, &drop_all_bypass
);
9086 if (route_rule_id_array_count
== 1) {
9087 route_rule_id
= route_rule_id_array
[0];
9088 } else if (route_rule_id_array_count
> 1) {
9089 route_rule_id
= necp_create_aggregate_route_rule(route_rule_id_array
);
9092 if (matched_policy
!= NULL
) {
9093 if (matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_DROP
||
9094 matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
||
9095 (matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&& interface
&&
9096 matched_policy
->result_parameter
.tunnel_interface_index
!= verifyifindex
) ||
9097 ((service_action
== NECP_KERNEL_POLICY_RESULT_TRIGGER_SCOPED
||
9098 service_action
== NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED
) &&
9099 service
.identifier
!= 0 && service
.identifier
!= NECP_NULL_SERVICE_ID
) ||
9100 (route_rule_id
!= 0 &&
9101 !necp_route_is_allowed(route
, interface
, route_rule_id
, &interface_type_denied
)) ||
9102 !necp_netagents_allow_traffic(netagent_ids
, NECP_MAX_NETAGENTS
)) {
9103 allowed_to_receive
= FALSE
;
9105 if (return_policy_id
) {
9106 *return_policy_id
= matched_policy
->id
;
9108 if (return_route_rule_id
) {
9109 *return_route_rule_id
= route_rule_id
;
9112 lck_rw_done(&necp_kernel_policy_lock
);
9114 if (necp_debug
> 1 && matched_policy
->id
!= inp
->inp_policyresult
.policy_id
) {
9115 NECPLOG(LOG_DEBUG
, "Socket Send/Recv Policy: Policy %d Allowed %d", return_policy_id
? *return_policy_id
: 0, allowed_to_receive
);
9119 bool drop_all
= false;
9120 if (necp_drop_all_order
> 0 || info
.drop_order
> 0 || drop_dest_policy_result
== NECP_KERNEL_POLICY_RESULT_DROP
) {
9122 if (drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE
) {
9123 drop_all_bypass
= necp_check_drop_all_bypass_result(NULL
);
9126 if (drop_all
&& drop_all_bypass
== NECP_DROP_ALL_BYPASS_CHECK_RESULT_FALSE
) {
9127 allowed_to_receive
= FALSE
;
9129 if (return_policy_id
) {
9130 *return_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
9132 if (return_route_rule_id
) {
9133 *return_route_rule_id
= route_rule_id
;
9138 lck_rw_done(&necp_kernel_policy_lock
);
9141 if (!allowed_to_receive
&& interface_type_denied
!= IFRTYPE_FUNCTIONAL_UNKNOWN
) {
9142 soevent(inp
->inp_socket
, (SO_FILT_HINT_LOCKED
| SO_FILT_HINT_IFDENIED
));
9145 return allowed_to_receive
;
9149 necp_socket_is_allowed_to_send_recv_v4(struct inpcb
*inp
, u_int16_t local_port
, u_int16_t remote_port
, struct in_addr
*local_addr
, struct in_addr
*remote_addr
, ifnet_t interface
, necp_kernel_policy_id
*return_policy_id
, u_int32_t
*return_route_rule_id
, necp_kernel_policy_id
*return_skip_policy_id
)
9151 struct sockaddr_in local
= {};
9152 struct sockaddr_in remote
= {};
9153 local
.sin_family
= remote
.sin_family
= AF_INET
;
9154 local
.sin_len
= remote
.sin_len
= sizeof(struct sockaddr_in
);
9155 local
.sin_port
= local_port
;
9156 remote
.sin_port
= remote_port
;
9157 memcpy(&local
.sin_addr
, local_addr
, sizeof(local
.sin_addr
));
9158 memcpy(&remote
.sin_addr
, remote_addr
, sizeof(remote
.sin_addr
));
9160 return necp_socket_is_allowed_to_send_recv_internal(inp
, (struct sockaddr
*)&local
, (struct sockaddr
*)&remote
, interface
,
9161 return_policy_id
, return_route_rule_id
, return_skip_policy_id
);
9165 necp_socket_is_allowed_to_send_recv_v6(struct inpcb
*inp
, u_int16_t local_port
, u_int16_t remote_port
, struct in6_addr
*local_addr
, struct in6_addr
*remote_addr
, ifnet_t interface
, necp_kernel_policy_id
*return_policy_id
, u_int32_t
*return_route_rule_id
, necp_kernel_policy_id
*return_skip_policy_id
)
9167 struct sockaddr_in6 local
= {};
9168 struct sockaddr_in6 remote
= {};
9169 local
.sin6_family
= remote
.sin6_family
= AF_INET6
;
9170 local
.sin6_len
= remote
.sin6_len
= sizeof(struct sockaddr_in6
);
9171 local
.sin6_port
= local_port
;
9172 remote
.sin6_port
= remote_port
;
9173 memcpy(&local
.sin6_addr
, local_addr
, sizeof(local
.sin6_addr
));
9174 memcpy(&remote
.sin6_addr
, remote_addr
, sizeof(remote
.sin6_addr
));
9176 return necp_socket_is_allowed_to_send_recv_internal(inp
, (struct sockaddr
*)&local
, (struct sockaddr
*)&remote
, interface
,
9177 return_policy_id
, return_route_rule_id
, return_skip_policy_id
);
9181 necp_socket_is_allowed_to_send_recv(struct inpcb
*inp
, ifnet_t interface
, necp_kernel_policy_id
*return_policy_id
,
9182 u_int32_t
*return_route_rule_id
,
9183 necp_kernel_policy_id
*return_skip_policy_id
)
9185 return necp_socket_is_allowed_to_send_recv_internal(inp
, NULL
, NULL
, interface
,
9186 return_policy_id
, return_route_rule_id
,
9187 return_skip_policy_id
);
9191 necp_mark_packet_from_socket(struct mbuf
*packet
, struct inpcb
*inp
, necp_kernel_policy_id policy_id
, u_int32_t route_rule_id
,
9192 necp_kernel_policy_id skip_policy_id
)
9194 if (packet
== NULL
|| inp
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
9198 // Mark ID for Pass and IP Tunnel
9199 if (policy_id
!= NECP_KERNEL_POLICY_ID_NONE
) {
9200 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= policy_id
;
9201 } else if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_PASS
||
9202 inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
) {
9203 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= inp
->inp_policyresult
.policy_id
;
9205 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
9207 packet
->m_pkthdr
.necp_mtag
.necp_last_interface_index
= 0;
9208 if (route_rule_id
!= 0) {
9209 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
= route_rule_id
;
9211 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
= inp
->inp_policyresult
.results
.route_rule_id
;
9213 packet
->m_pkthdr
.necp_mtag
.necp_app_id
= inp
->inp_policyresult
.app_id
;
9215 if (skip_policy_id
!= NECP_KERNEL_POLICY_ID_NONE
&&
9216 skip_policy_id
!= NECP_KERNEL_POLICY_ID_NO_MATCH
) {
9217 // Only mark the skip policy if it is a valid policy ID
9218 packet
->m_pkthdr
.necp_mtag
.necp_skip_policy_id
= skip_policy_id
;
9219 } else if (inp
->inp_policyresult
.results
.filter_control_unit
== NECP_FILTER_UNIT_NO_FILTER
) {
9220 // Overload the meaning of "NECP_KERNEL_POLICY_ID_NO_MATCH"
9221 // to indicate that NECP_FILTER_UNIT_NO_FILTER was set
9222 // See necp_get_skip_policy_id_from_packet() and
9223 // necp_packet_should_skip_filters().
9224 packet
->m_pkthdr
.necp_mtag
.necp_skip_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
9226 packet
->m_pkthdr
.necp_mtag
.necp_skip_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
9233 necp_mark_packet_from_ip(struct mbuf
*packet
, necp_kernel_policy_id policy_id
)
9235 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
9239 // Mark ID for Pass and IP Tunnel
9240 if (policy_id
!= NECP_KERNEL_POLICY_ID_NONE
) {
9241 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= policy_id
;
9243 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
9250 necp_mark_packet_from_interface(struct mbuf
*packet
, ifnet_t interface
)
9252 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
9256 // Mark ID for Pass and IP Tunnel
9257 if (interface
!= NULL
) {
9258 packet
->m_pkthdr
.necp_mtag
.necp_last_interface_index
= interface
->if_index
;
9265 necp_mark_packet_as_keepalive(struct mbuf
*packet
, bool is_keepalive
)
9267 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
9272 packet
->m_pkthdr
.pkt_flags
|= PKTF_KEEPALIVE
;
9274 packet
->m_pkthdr
.pkt_flags
&= ~PKTF_KEEPALIVE
;
9280 necp_kernel_policy_id
9281 necp_get_policy_id_from_packet(struct mbuf
*packet
)
9283 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
9284 return NECP_KERNEL_POLICY_ID_NONE
;
9287 return packet
->m_pkthdr
.necp_mtag
.necp_policy_id
;
9290 necp_kernel_policy_id
9291 necp_get_skip_policy_id_from_packet(struct mbuf
*packet
)
9293 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
9294 return NECP_KERNEL_POLICY_ID_NONE
;
9297 // Check for overloaded value. See necp_mark_packet_from_socket().
9298 if (packet
->m_pkthdr
.necp_mtag
.necp_skip_policy_id
== NECP_KERNEL_POLICY_ID_NO_MATCH
) {
9299 return NECP_KERNEL_POLICY_ID_NONE
;
9302 return packet
->m_pkthdr
.necp_mtag
.necp_skip_policy_id
;
9306 necp_packet_should_skip_filters(struct mbuf
*packet
)
9308 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
9312 // Check for overloaded value. See necp_mark_packet_from_socket().
9313 return packet
->m_pkthdr
.necp_mtag
.necp_skip_policy_id
== NECP_KERNEL_POLICY_ID_NO_MATCH
;
9317 necp_get_last_interface_index_from_packet(struct mbuf
*packet
)
9319 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
9323 return packet
->m_pkthdr
.necp_mtag
.necp_last_interface_index
;
9327 necp_get_route_rule_id_from_packet(struct mbuf
*packet
)
9329 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
9333 return packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
;
9337 necp_get_app_uuid_from_packet(struct mbuf
*packet
,
9340 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
9344 bool found_mapping
= FALSE
;
9345 if (packet
->m_pkthdr
.necp_mtag
.necp_app_id
!= 0) {
9346 lck_rw_lock_shared(&necp_kernel_policy_lock
);
9347 struct necp_uuid_id_mapping
*entry
= necp_uuid_lookup_uuid_with_app_id_locked(packet
->m_pkthdr
.necp_mtag
.necp_app_id
);
9348 if (entry
!= NULL
) {
9349 uuid_copy(app_uuid
, entry
->uuid
);
9350 found_mapping
= true;
9352 lck_rw_done(&necp_kernel_policy_lock
);
9354 if (!found_mapping
) {
9355 uuid_clear(app_uuid
);
9361 necp_get_is_keepalive_from_packet(struct mbuf
*packet
)
9363 if (packet
== NULL
|| !(packet
->m_flags
& M_PKTHDR
)) {
9367 return packet
->m_pkthdr
.pkt_flags
& PKTF_KEEPALIVE
;
9371 necp_socket_get_content_filter_control_unit(struct socket
*so
)
9373 struct inpcb
*inp
= sotoinpcb(so
);
9378 return inp
->inp_policyresult
.results
.filter_control_unit
;
9382 necp_socket_should_use_flow_divert(struct inpcb
*inp
)
9388 return inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
;
9392 necp_socket_get_flow_divert_control_unit(struct inpcb
*inp
)
9398 if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
) {
9399 return inp
->inp_policyresult
.results
.result_parameter
.flow_divert_control_unit
;
9406 necp_socket_should_rescope(struct inpcb
*inp
)
9412 return inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED
||
9413 inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SCOPED_DIRECT
;
9417 necp_socket_get_rescope_if_index(struct inpcb
*inp
)
9423 if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED
) {
9424 return inp
->inp_policyresult
.results
.result_parameter
.scoped_interface_index
;
9425 } else if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SCOPED_DIRECT
) {
9426 return necp_get_primary_direct_interface_index();
9433 necp_socket_get_effective_mtu(struct inpcb
*inp
, u_int32_t current_mtu
)
9439 if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&&
9440 (inp
->inp_flags
& INP_BOUND_IF
) &&
9441 inp
->inp_boundifp
) {
9442 u_int bound_interface_index
= inp
->inp_boundifp
->if_index
;
9443 u_int tunnel_interface_index
= inp
->inp_policyresult
.results
.result_parameter
.tunnel_interface_index
;
9445 // The result is IP Tunnel, and is rescoping from one interface to another. Recalculate MTU.
9446 if (bound_interface_index
!= tunnel_interface_index
) {
9447 ifnet_t tunnel_interface
= NULL
;
9449 ifnet_head_lock_shared();
9450 tunnel_interface
= ifindex2ifnet
[tunnel_interface_index
];
9453 if (tunnel_interface
!= NULL
) {
9454 u_int32_t direct_tunnel_mtu
= tunnel_interface
->if_mtu
;
9455 u_int32_t delegate_tunnel_mtu
= (tunnel_interface
->if_delegated
.ifp
!= NULL
) ? tunnel_interface
->if_delegated
.ifp
->if_mtu
: 0;
9456 if (delegate_tunnel_mtu
!= 0 &&
9457 strncmp(tunnel_interface
->if_name
, "ipsec", strlen("ipsec")) == 0) {
9458 // For ipsec interfaces, calculate the overhead from the delegate interface
9459 u_int32_t tunnel_overhead
= (u_int32_t
)(esp_hdrsiz(NULL
) + sizeof(struct ip6_hdr
));
9460 if (delegate_tunnel_mtu
> tunnel_overhead
) {
9461 delegate_tunnel_mtu
-= tunnel_overhead
;
9464 if (delegate_tunnel_mtu
< direct_tunnel_mtu
) {
9465 // If the (delegate - overhead) < direct, return (delegate - overhead)
9466 return delegate_tunnel_mtu
;
9468 // Otherwise return direct
9469 return direct_tunnel_mtu
;
9472 // For non-ipsec interfaces, just return the tunnel MTU
9473 return direct_tunnel_mtu
;
9479 // By default, just return the MTU passed in
9484 necp_get_ifnet_from_result_parameter(necp_kernel_policy_result_parameter
*result_parameter
)
9486 if (result_parameter
== NULL
) {
9490 return ifindex2ifnet
[result_parameter
->tunnel_interface_index
];
9494 necp_packet_can_rebind_to_ifnet(struct mbuf
*packet
, struct ifnet
*interface
, struct route
*new_route
, int family
)
9496 bool found_match
= FALSE
;
9498 ifaddr_t
*addresses
= NULL
;
9499 union necp_sockaddr_union address_storage
;
9502 if (packet
== NULL
|| interface
== NULL
|| new_route
== NULL
|| (family
!= AF_INET
&& family
!= AF_INET6
)) {
9506 result
= ifnet_get_address_list_family(interface
, &addresses
, family
);
9508 NECPLOG(LOG_ERR
, "Failed to get address list for %s%d", ifnet_name(interface
), ifnet_unit(interface
));
9512 for (i
= 0; addresses
[i
] != NULL
; i
++) {
9513 ROUTE_RELEASE(new_route
);
9514 if (ifaddr_address(addresses
[i
], &address_storage
.sa
, sizeof(address_storage
)) == 0) {
9515 if (family
== AF_INET
) {
9516 struct ip
*ip
= mtod(packet
, struct ip
*);
9517 if (memcmp(&address_storage
.sin
.sin_addr
, &ip
->ip_src
, sizeof(ip
->ip_src
)) == 0) {
9518 struct sockaddr_in
*dst4
= (struct sockaddr_in
*)(void *)&new_route
->ro_dst
;
9519 dst4
->sin_family
= AF_INET
;
9520 dst4
->sin_len
= sizeof(struct sockaddr_in
);
9521 dst4
->sin_addr
= ip
->ip_dst
;
9522 rtalloc_scoped(new_route
, interface
->if_index
);
9523 if (!ROUTE_UNUSABLE(new_route
)) {
9528 } else if (family
== AF_INET6
) {
9529 struct ip6_hdr
*ip6
= mtod(packet
, struct ip6_hdr
*);
9530 if (memcmp(&address_storage
.sin6
.sin6_addr
, &ip6
->ip6_src
, sizeof(ip6
->ip6_src
)) == 0) {
9531 struct sockaddr_in6
*dst6
= (struct sockaddr_in6
*)(void *)&new_route
->ro_dst
;
9532 dst6
->sin6_family
= AF_INET6
;
9533 dst6
->sin6_len
= sizeof(struct sockaddr_in6
);
9534 dst6
->sin6_addr
= ip6
->ip6_dst
;
9535 rtalloc_scoped(new_route
, interface
->if_index
);
9536 if (!ROUTE_UNUSABLE(new_route
)) {
9546 ifnet_free_address_list(addresses
);
9552 necp_addr_is_loopback(struct sockaddr
*address
)
9554 if (address
== NULL
) {
9558 if (address
->sa_family
== AF_INET
) {
9559 return ntohl(((struct sockaddr_in
*)(void *)address
)->sin_addr
.s_addr
) == INADDR_LOOPBACK
;
9560 } else if (address
->sa_family
== AF_INET6
) {
9561 return IN6_IS_ADDR_LOOPBACK(&((struct sockaddr_in6
*)(void *)address
)->sin6_addr
);
9568 necp_is_loopback(struct sockaddr
*local_addr
, struct sockaddr
*remote_addr
, struct inpcb
*inp
, struct mbuf
*packet
, u_int32_t bound_interface_index
)
9570 // Note: This function only checks for the loopback addresses.
9571 // In the future, we may want to expand to also allow any traffic
9572 // going through the loopback interface, but until then, this
9573 // check is cheaper.
9575 if (local_addr
!= NULL
&& necp_addr_is_loopback(local_addr
)) {
9579 if (remote_addr
!= NULL
&& necp_addr_is_loopback(remote_addr
)) {
9584 if ((inp
->inp_flags
& INP_BOUND_IF
) && inp
->inp_boundifp
&& (inp
->inp_boundifp
->if_flags
& IFF_LOOPBACK
)) {
9587 if (inp
->inp_vflag
& INP_IPV4
) {
9588 if (ntohl(inp
->inp_laddr
.s_addr
) == INADDR_LOOPBACK
||
9589 ntohl(inp
->inp_faddr
.s_addr
) == INADDR_LOOPBACK
) {
9592 } else if (inp
->inp_vflag
& INP_IPV6
) {
9593 if (IN6_IS_ADDR_LOOPBACK(&inp
->in6p_laddr
) ||
9594 IN6_IS_ADDR_LOOPBACK(&inp
->in6p_faddr
)) {
9598 } else if (bound_interface_index
!= IFSCOPE_NONE
&& lo_ifp
->if_index
== bound_interface_index
) {
9602 if (packet
!= NULL
) {
9603 struct ip
*ip
= mtod(packet
, struct ip
*);
9604 if (ip
->ip_v
== 4) {
9605 if (ntohl(ip
->ip_src
.s_addr
) == INADDR_LOOPBACK
) {
9608 if (ntohl(ip
->ip_dst
.s_addr
) == INADDR_LOOPBACK
) {
9611 } else if (ip
->ip_v
== 6) {
9612 struct ip6_hdr
*ip6
= mtod(packet
, struct ip6_hdr
*);
9613 if (IN6_IS_ADDR_LOOPBACK(&ip6
->ip6_src
)) {
9616 if (IN6_IS_ADDR_LOOPBACK(&ip6
->ip6_dst
)) {
9626 necp_is_intcoproc(struct inpcb
*inp
, struct mbuf
*packet
)
9629 if (!(inp
->inp_vflag
& INP_IPV6
)) {
9632 if (INP_INTCOPROC_ALLOWED(inp
)) {
9635 if ((inp
->inp_flags
& INP_BOUND_IF
) &&
9636 IFNET_IS_INTCOPROC(inp
->inp_boundifp
)) {
9641 if (packet
!= NULL
) {
9642 struct ip6_hdr
*ip6
= mtod(packet
, struct ip6_hdr
*);
9643 if ((ip6
->ip6_vfc
& IPV6_VERSION_MASK
) == IPV6_VERSION
&&
9644 IN6_IS_ADDR_LINKLOCAL(&ip6
->ip6_dst
) &&
9645 ip6
->ip6_dst
.s6_addr32
[2] == ntohl(0xaede48ff) &&
9646 ip6
->ip6_dst
.s6_addr32
[3] == ntohl(0xfe334455)) {
9655 necp_address_matches_drop_dest_policy(union necp_sockaddr_union
*sau
, u_int32_t session_order
)
9657 char dest_str
[MAX_IPv6_STR_LEN
];
9659 if (necp_drop_dest_debug
> 0) {
9660 if (sau
->sa
.sa_family
== AF_INET
) {
9661 (void) inet_ntop(AF_INET
, &sau
->sin
.sin_addr
, dest_str
, sizeof(dest_str
));
9662 } else if (sau
->sa
.sa_family
== AF_INET6
) {
9663 (void) inet_ntop(AF_INET6
, &sau
->sin6
.sin6_addr
, dest_str
, sizeof(dest_str
));
9668 for (u_int32_t i
= 0; i
< necp_drop_dest_policy
.entry_count
; i
++) {
9669 struct necp_drop_dest_entry
*necp_drop_dest_entry
= &necp_drop_dest_policy
.entries
[i
];
9670 struct necp_policy_condition_addr
*npca
= &necp_drop_dest_entry
->cond_addr
;
9672 if (session_order
>= necp_drop_dest_entry
->order
&& necp_is_addr_in_subnet(&sau
->sa
, &npca
->address
.sa
, npca
->prefix
)) {
9673 if (necp_drop_dest_debug
> 0) {
9674 char subnet_str
[MAX_IPv6_STR_LEN
];
9675 struct proc
*p
= current_proc();
9676 pid_t pid
= proc_pid(p
);
9678 if (sau
->sa
.sa_family
== AF_INET
) {
9679 (void) inet_ntop(AF_INET
, &npca
->address
.sin
, subnet_str
, sizeof(subnet_str
));
9680 os_log(OS_LOG_DEFAULT
, "%s (process %s:%u) %s matches %s/%u", __func__
, proc_best_name(p
), pid
, dest_str
, subnet_str
, npca
->prefix
);
9681 } else if (sau
->sa
.sa_family
== AF_INET6
) {
9682 (void) inet_ntop(AF_INET6
, &npca
->address
.sin6
, subnet_str
, sizeof(subnet_str
));
9683 os_log(OS_LOG_DEFAULT
, "%s (process %s:%u) %s matches %s/%u", __func__
, proc_best_name(p
), pid
, dest_str
, subnet_str
, npca
->prefix
);
9689 if (necp_drop_dest_debug
> 1) {
9690 struct proc
*p
= current_proc();
9691 pid_t pid
= proc_pid(p
);
9693 os_log(OS_LOG_DEFAULT
, "%s (process %s:%u) %s no match", __func__
, proc_best_name(p
), pid
, dest_str
);
9699 sysctl_handle_necp_drop_dest_level SYSCTL_HANDLER_ARGS
9701 #pragma unused(arg1, arg2, oidp)
9704 struct necp_drop_dest_policy tmp_drop_dest_policy
;
9705 struct proc
*p
= current_proc();
9706 pid_t pid
= proc_pid(p
);
9708 if (req
->newptr
!= USER_ADDR_NULL
&& proc_suser(current_proc()) != 0 &&
9709 priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES
, 0) != 0) {
9710 NECPLOG(LOG_ERR
, "%s (process %s:%u) not permitted", __func__
, proc_best_name(p
), pid
);
9713 if (req
->newptr
!= USER_ADDR_NULL
&& req
->newlen
!= sizeof(struct necp_drop_dest_policy
)) {
9714 NECPLOG(LOG_ERR
, "%s (process %s:%u) bad newlen %lu", __func__
, proc_best_name(p
), pid
, req
->newlen
);
9718 memcpy(&tmp_drop_dest_policy
, &necp_drop_dest_policy
, sizeof(struct necp_drop_dest_policy
));
9719 error
= sysctl_io_opaque(req
, &tmp_drop_dest_policy
, sizeof(struct necp_drop_dest_policy
), &changed
);
9721 NECPLOG(LOG_ERR
, "%s (process %s:%u) sysctl_io_opaque() error %d", __func__
, proc_best_name(p
), pid
, error
);
9724 if (changed
== 0 || req
->newptr
== USER_ADDR_NULL
) {
9729 // Validate the passed parameters
9731 if (tmp_drop_dest_policy
.entry_count
>= MAX_NECP_DROP_DEST_LEVEL_ADDRS
) {
9732 NECPLOG(LOG_ERR
, "%s (process %s:%u) bad entry_count %u", __func__
, proc_best_name(p
), pid
, tmp_drop_dest_policy
.entry_count
);
9735 for (u_int32_t i
= 0; i
< tmp_drop_dest_policy
.entry_count
; i
++) {
9736 struct necp_drop_dest_entry
*tmp_drop_dest_entry
= &tmp_drop_dest_policy
.entries
[i
];
9737 struct necp_policy_condition_addr
*npca
= &tmp_drop_dest_entry
->cond_addr
;
9739 switch (tmp_drop_dest_entry
->level
) {
9740 case NECP_SESSION_PRIORITY_UNKNOWN
:
9741 if (tmp_drop_dest_policy
.entry_count
!= 0) {
9742 NECPLOG(LOG_ERR
, "%s (process %s:%u) NECP_SESSION_PRIORITY_UNKNOWN bad entry_count %u", __func__
, proc_best_name(p
), pid
, tmp_drop_dest_policy
.entry_count
);
9746 case NECP_SESSION_PRIORITY_CONTROL
:
9747 case NECP_SESSION_PRIORITY_PRIVILEGED_TUNNEL
:
9748 case NECP_SESSION_PRIORITY_HIGH
:
9749 case NECP_SESSION_PRIORITY_DEFAULT
:
9750 case NECP_SESSION_PRIORITY_LOW
:
9751 if (tmp_drop_dest_policy
.entry_count
== 0) {
9752 NECPLOG(LOG_ERR
, "%s (process %s:%u) priority %u entry_count 0", __func__
, proc_best_name(p
), pid
, tmp_drop_dest_entry
->level
);
9757 NECPLOG(LOG_ERR
, "%s (process %s:%u) bad level %u", __func__
, proc_best_name(p
), pid
, tmp_drop_dest_entry
->level
);
9762 switch (npca
->address
.sa
.sa_family
) {
9764 if (npca
->prefix
> 32) {
9765 NECPLOG(LOG_ERR
, "%s (process %s:%u) AF_INET bad prefix %u", __func__
, proc_best_name(p
), pid
, npca
->prefix
);
9768 if (npca
->address
.sin
.sin_len
!= sizeof(struct sockaddr_in
)) {
9769 NECPLOG(LOG_ERR
, "%s (process %s:%u) AF_INET bad sin_len %u", __func__
, proc_best_name(p
), pid
, npca
->address
.sin
.sin_len
);
9772 if (npca
->address
.sin
.sin_port
!= 0) {
9773 NECPLOG(LOG_ERR
, "%s (process %s:%u) AF_INET bad sin_port %u, not zero", __func__
, proc_best_name(p
), pid
, npca
->address
.sin
.sin_port
);
9779 if (npca
->prefix
> 128) {
9780 NECPLOG(LOG_ERR
, "%s (process %s:%u) AF_INET6 bad prefix %u", __func__
, proc_best_name(p
), pid
, npca
->prefix
);
9783 if (npca
->address
.sin6
.sin6_len
!= sizeof(struct sockaddr_in6
)) {
9784 NECPLOG(LOG_ERR
, "%s (process %s:%u) AF_INET6 bad sin6_len %u", __func__
, proc_best_name(p
), pid
, npca
->address
.sin6
.sin6_len
);
9787 if (npca
->address
.sin6
.sin6_port
!= 0) {
9788 NECPLOG(LOG_ERR
, "%s (process %s:%u) AF_INET6 bad sin6_port %u, not zero", __func__
, proc_best_name(p
), pid
, npca
->address
.sin6
.sin6_port
);
9791 if (npca
->address
.sin6
.sin6_flowinfo
!= 0) {
9792 NECPLOG(LOG_ERR
, "%s (process %s:%u) AF_INET6 bad sin6_flowinfo %u, not zero", __func__
, proc_best_name(p
), pid
, npca
->address
.sin6
.sin6_flowinfo
);
9795 if (npca
->address
.sin6
.sin6_scope_id
!= 0) {
9796 NECPLOG(LOG_ERR
, "%s (process %s:%u) AF_INET6 bad sin6_scope_id %u, not zero", __func__
, proc_best_name(p
), pid
, npca
->address
.sin6
.sin6_scope_id
);
9808 // Commit the changed policy
9810 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
9811 memset(&necp_drop_dest_policy
, 0, sizeof(struct necp_drop_dest_policy
));
9813 necp_drop_dest_policy
.entry_count
= tmp_drop_dest_policy
.entry_count
;
9814 for (u_int32_t i
= 0; i
< tmp_drop_dest_policy
.entry_count
; i
++) {
9815 struct necp_drop_dest_entry
*tmp_drop_dest_entry
= &tmp_drop_dest_policy
.entries
[i
];
9816 struct necp_drop_dest_entry
*necp_drop_dest_entry
= &necp_drop_dest_policy
.entries
[i
];
9818 memcpy(necp_drop_dest_entry
, tmp_drop_dest_entry
, sizeof(struct necp_drop_dest_entry
));
9820 necp_drop_dest_entry
->order
= necp_get_first_order_for_priority(necp_drop_dest_entry
->level
);
9822 lck_rw_done(&necp_kernel_policy_lock
);