2 * Copyright (c) 2013-2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <sys/systm.h>
31 #include <sys/types.h>
32 #include <sys/syslog.h>
33 #include <sys/queue.h>
34 #include <sys/malloc.h>
35 #include <libkern/OSMalloc.h>
36 #include <sys/kernel.h>
37 #include <sys/kern_control.h>
39 #include <sys/kpi_mbuf.h>
40 #include <sys/proc_uuid_policy.h>
42 #include <sys/domain.h>
43 #include <sys/protosw.h>
44 #include <sys/socket.h>
45 #include <sys/socketvar.h>
46 #include <netinet/ip.h>
47 #include <netinet/ip6.h>
48 #include <netinet/tcp.h>
49 #include <netinet/tcp_var.h>
50 #include <netinet/udp.h>
51 #include <netinet/in_pcb.h>
52 #include <netinet6/esp.h>
53 #include <net/flowhash.h>
54 #include <net/if_var.h>
55 #include <sys/kauth.h>
56 #include <sys/sysctl.h>
57 #include <sys/sysproto.h>
59 #include <sys/kern_event.h>
60 #include <net/network_agent.h>
64 * NECP - Network Extension Control Policy database
65 * ------------------------------------------------
66 * The goal of this module is to allow clients connecting via a
67 * kernel control socket to create high-level policy sessions, which
68 * are ingested into low-level kernel policies that control and tag
69 * traffic at the application, socket, and IP layers.
71 * ------------------------------------------------
73 * ------------------------------------------------
74 * Each session owns a list of session policies, each of which can
75 * specify any combination of conditions and a single result. Each
76 * session also has a priority level (such as High, Default, or Low)
77 * which is requested by the client. Based on the requested level,
78 * a session order value is assigned to the session, which will be used
79 * to sort kernel policies generated by the session. The session client
80 * can specify the sub-order for each policy it creates which will be
81 * used to further sort the kernel policies.
83 * Kernel Control Socket --> 1 necp_session --> list of necp_session_policy structs
85 * ------------------------------------------------
87 * ------------------------------------------------
88 * Whenever a session send the Apply command, its policies are ingested
89 * and generate kernel policies. There are two phases of kernel policy
92 * 1. The session policy is parsed to create kernel policies at the socket
93 * and IP layers, when applicable. For example, a policy that requires
94 * all traffic from App1 to Pass will generate a socket kernel policy to
95 * match App1 and mark packets with ID1, and also an IP policy to match
96 * ID1 and let the packet pass. This is handled in necp_apply_policy. The
97 * resulting kernel policies are added to the global socket and IP layer
99 * necp_session_policy --> necp_kernel_socket_policy and necp_kernel_ip_output_policy
102 * necp_kernel_socket_policies necp_kernel_ip_output_policies
104 * 2. Once the global lists of kernel policies have been filled out, each
105 * list is traversed to create optimized sub-lists ("Maps") which are used during
106 * data-path evaluation. IP policies are sent into necp_kernel_ip_output_policies_map,
107 * which hashes incoming packets based on marked socket-layer policies, and removes
108 * duplicate or overlapping polcies. Socket policies are sent into two maps,
109 * necp_kernel_socket_policies_map and necp_kernel_socket_policies_app_layer_map.
110 * The app layer map is used for policy checks coming in from user space, and is one
111 * list with duplicate and overlapping policies removed. The socket map hashes based
112 * on app UUID, and removes duplicate and overlapping policies.
113 * necp_kernel_socket_policy --> necp_kernel_socket_policies_app_layer_map
114 * |-> necp_kernel_socket_policies_map
116 * necp_kernel_ip_output_policies --> necp_kernel_ip_output_policies_map
118 * ------------------------------------------------
120 * ------------------------------------------------
121 * The Drop All Level is a sysctl that controls the level at which policies are allowed
122 * to override a global drop rule. If the value is 0, no drop rule is applied. If the value
123 * is 1, all traffic is dropped. If the value is greater than 1, all kernel policies created
124 * by a session with a priority level better than (numerically less than) the
125 * Drop All Level will allow matching traffic to not be dropped. The Drop All Level is
126 * dynamically interpreted into necp_drop_all_order, which specifies the equivalent assigned
127 * session orders to be dropped.
130 u_int32_t necp_drop_all_order
= 0;
131 u_int32_t necp_drop_all_level
= 0;
133 u_int32_t necp_pass_loopback
= 1; // 0=Off, 1=On
134 u_int32_t necp_pass_keepalives
= 1; // 0=Off, 1=On
136 u_int32_t necp_debug
= 0; // 0=None, 1=Basic, 2=EveryMatch
138 u_int32_t necp_session_count
= 0;
140 #define NECPLOG(level, format, ...) do { \
141 log((level > LOG_NOTICE ? LOG_NOTICE : level), "%s: " format "\n", __FUNCTION__, __VA_ARGS__); \
144 #define NECPLOG0(level, msg) do { \
145 log((level > LOG_NOTICE ? LOG_NOTICE : level), "%s: %s\n", __FUNCTION__, msg); \
148 #define LIST_INSERT_SORTED_ASCENDING(head, elm, field, sortfield, tmpelm) do { \
149 if (LIST_EMPTY((head)) || (LIST_FIRST(head)->sortfield >= (elm)->sortfield)) { \
150 LIST_INSERT_HEAD((head), elm, field); \
152 LIST_FOREACH(tmpelm, head, field) { \
153 if (LIST_NEXT(tmpelm, field) == NULL || LIST_NEXT(tmpelm, field)->sortfield >= (elm)->sortfield) { \
154 LIST_INSERT_AFTER(tmpelm, elm, field); \
161 #define LIST_INSERT_SORTED_TWICE_ASCENDING(head, elm, field, firstsortfield, secondsortfield, tmpelm) do { \
162 if (LIST_EMPTY((head)) || (LIST_FIRST(head)->firstsortfield > (elm)->firstsortfield) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield >= (elm)->secondsortfield))) { \
163 LIST_INSERT_HEAD((head), elm, field); \
165 LIST_FOREACH(tmpelm, head, field) { \
166 if (LIST_NEXT(tmpelm, field) == NULL || (LIST_NEXT(tmpelm, field)->firstsortfield > (elm)->firstsortfield) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield >= (elm)->secondsortfield))) { \
167 LIST_INSERT_AFTER(tmpelm, elm, field); \
174 #define LIST_INSERT_SORTED_THRICE_ASCENDING(head, elm, field, firstsortfield, secondsortfield, thirdsortfield, tmpelm) do { \
175 if (LIST_EMPTY((head)) || (LIST_FIRST(head)->firstsortfield > (elm)->firstsortfield) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield >= (elm)->secondsortfield)) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield == (elm)->secondsortfield) && (LIST_FIRST(head)->thirdsortfield >= (elm)->thirdsortfield))) { \
176 LIST_INSERT_HEAD((head), elm, field); \
178 LIST_FOREACH(tmpelm, head, field) { \
179 if (LIST_NEXT(tmpelm, field) == NULL || (LIST_NEXT(tmpelm, field)->firstsortfield > (elm)->firstsortfield) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield >= (elm)->secondsortfield)) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield == (elm)->secondsortfield) && (LIST_NEXT(tmpelm, field)->thirdsortfield >= (elm)->thirdsortfield))) { \
180 LIST_INSERT_AFTER(tmpelm, elm, field); \
187 #define NECP_KERNEL_CONDITION_ALL_INTERFACES 0x00001
188 #define NECP_KERNEL_CONDITION_BOUND_INTERFACE 0x00002
189 #define NECP_KERNEL_CONDITION_PROTOCOL 0x00004
190 #define NECP_KERNEL_CONDITION_LOCAL_START 0x00008
191 #define NECP_KERNEL_CONDITION_LOCAL_END 0x00010
192 #define NECP_KERNEL_CONDITION_LOCAL_PREFIX 0x00020
193 #define NECP_KERNEL_CONDITION_REMOTE_START 0x00040
194 #define NECP_KERNEL_CONDITION_REMOTE_END 0x00080
195 #define NECP_KERNEL_CONDITION_REMOTE_PREFIX 0x00100
196 #define NECP_KERNEL_CONDITION_APP_ID 0x00200
197 #define NECP_KERNEL_CONDITION_REAL_APP_ID 0x00400
198 #define NECP_KERNEL_CONDITION_DOMAIN 0x00800
199 #define NECP_KERNEL_CONDITION_ACCOUNT_ID 0x01000
200 #define NECP_KERNEL_CONDITION_POLICY_ID 0x02000
201 #define NECP_KERNEL_CONDITION_PID 0x04000
202 #define NECP_KERNEL_CONDITION_UID 0x08000
203 #define NECP_KERNEL_CONDITION_LAST_INTERFACE 0x10000 // Only set from packets looping between interfaces
204 #define NECP_KERNEL_CONDITION_TRAFFIC_CLASS 0x20000
205 #define NECP_KERNEL_CONDITION_ENTITLEMENT 0x40000
207 struct necp_service_registration
{
208 LIST_ENTRY(necp_service_registration
) session_chain
;
209 LIST_ENTRY(necp_service_registration
) kernel_chain
;
210 u_int32_t service_id
;
213 struct necp_session
{
214 u_int32_t control_unit
;
215 u_int32_t session_priority
; // Descriptive priority rating
216 u_int32_t session_order
;
218 bool proc_locked
; // Messages must come from proc_uuid
223 LIST_HEAD(_policies
, necp_session_policy
) policies
;
225 LIST_HEAD(_services
, necp_service_registration
) services
;
228 struct necp_socket_info
{
231 union necp_sockaddr_union local_addr
;
232 union necp_sockaddr_union remote_addr
;
233 u_int32_t bound_interface_index
;
234 u_int32_t traffic_class
;
236 u_int32_t application_id
;
237 u_int32_t real_application_id
;
238 u_int32_t account_id
;
243 static kern_ctl_ref necp_kctlref
;
244 static u_int32_t necp_family
;
245 static OSMallocTag necp_malloc_tag
;
246 static lck_grp_attr_t
*necp_kernel_policy_grp_attr
= NULL
;
247 static lck_attr_t
*necp_kernel_policy_mtx_attr
= NULL
;
248 static lck_grp_t
*necp_kernel_policy_mtx_grp
= NULL
;
249 decl_lck_rw_data(static, necp_kernel_policy_lock
);
251 static lck_grp_attr_t
*necp_route_rule_grp_attr
= NULL
;
252 static lck_attr_t
*necp_route_rule_mtx_attr
= NULL
;
253 static lck_grp_t
*necp_route_rule_mtx_grp
= NULL
;
254 decl_lck_rw_data(static, necp_route_rule_lock
);
256 static necp_policy_id necp_last_policy_id
= 0;
257 static necp_kernel_policy_id necp_last_kernel_policy_id
= 0;
258 static u_int32_t necp_last_uuid_id
= 0;
259 static u_int32_t necp_last_string_id
= 0;
260 static u_int32_t necp_last_route_rule_id
= 0;
261 static u_int32_t necp_last_aggregate_route_rule_id
= 0;
264 * On modification, invalidate cached lookups by bumping the generation count.
265 * Other calls will need to take the slowpath of taking
266 * the subsystem lock.
268 static volatile int32_t necp_kernel_socket_policies_gencount
;
269 #define BUMP_KERNEL_SOCKET_POLICIES_GENERATION_COUNT() do { \
270 if (OSIncrementAtomic(&necp_kernel_socket_policies_gencount) == (INT32_MAX - 1)) { \
271 necp_kernel_socket_policies_gencount = 1; \
275 static u_int32_t necp_kernel_application_policies_condition_mask
;
276 static size_t necp_kernel_application_policies_count
;
277 static u_int32_t necp_kernel_socket_policies_condition_mask
;
278 static size_t necp_kernel_socket_policies_count
;
279 static size_t necp_kernel_socket_policies_non_app_count
;
280 static LIST_HEAD(_necpkernelsocketconnectpolicies
, necp_kernel_socket_policy
) necp_kernel_socket_policies
;
281 #define NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS 5
282 #define NECP_SOCKET_MAP_APP_ID_TO_BUCKET(appid) (appid ? (appid%(NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS - 1) + 1) : 0)
283 static struct necp_kernel_socket_policy
**necp_kernel_socket_policies_map
[NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
];
284 static struct necp_kernel_socket_policy
**necp_kernel_socket_policies_app_layer_map
;
286 * A note on policy 'maps': these are used for boosting efficiency when matching policies. For each dimension of the map,
287 * such as an ID, the 0 bucket is reserved for sockets/packets that do not have this parameter, while the other
288 * buckets lead to an array of policy pointers that form the list applicable when the (parameter%(NUM_BUCKETS - 1) + 1) == bucket_index.
290 * For example, a packet with policy ID of 7, when there are 4 ID buckets, will map to bucket (7%3 + 1) = 2.
293 static u_int32_t necp_kernel_ip_output_policies_condition_mask
;
294 static size_t necp_kernel_ip_output_policies_count
;
295 static size_t necp_kernel_ip_output_policies_non_id_count
;
296 static LIST_HEAD(_necpkernelipoutputpolicies
, necp_kernel_ip_output_policy
) necp_kernel_ip_output_policies
;
297 #define NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS 5
298 #define NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(id) (id ? (id%(NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS - 1) + 1) : 0)
299 static struct necp_kernel_ip_output_policy
**necp_kernel_ip_output_policies_map
[NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
];
301 static struct necp_session
*necp_create_session(u_int32_t control_unit
);
302 static void necp_delete_session(struct necp_session
*session
);
304 static void necp_handle_policy_add(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
305 static void necp_handle_policy_get(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
306 static void necp_handle_policy_delete(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
307 static void necp_handle_policy_apply_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
308 static void necp_handle_policy_list_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
309 static void necp_handle_policy_delete_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
310 static void necp_handle_set_session_priority(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
311 static void necp_handle_lock_session_to_proc(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
312 static void necp_handle_register_service(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
313 static void necp_handle_unregister_service(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
);
315 static struct necp_session_policy
*necp_policy_create(struct necp_session
*session
, necp_policy_order order
, u_int8_t
*conditions_array
, u_int32_t conditions_array_size
, u_int8_t
*route_rules_array
, u_int32_t route_rules_array_size
, u_int8_t
*result
, u_int32_t result_size
);
316 static struct necp_session_policy
*necp_policy_find(struct necp_session
*session
, necp_policy_id policy_id
);
317 static bool necp_policy_mark_for_deletion(struct necp_session
*session
, struct necp_session_policy
*policy
);
318 static bool necp_policy_mark_all_for_deletion(struct necp_session
*session
);
319 static bool necp_policy_delete(struct necp_session
*session
, struct necp_session_policy
*policy
);
320 static void necp_policy_apply_all(struct necp_session
*session
);
322 static necp_kernel_policy_id
necp_kernel_socket_policy_add(necp_policy_id parent_policy_id
, necp_policy_order order
, u_int32_t session_order
, int session_pid
, u_int32_t condition_mask
, u_int32_t condition_negated_mask
, necp_app_id cond_app_id
, necp_app_id cond_real_app_id
, u_int32_t cond_account_id
, char *domain
, pid_t cond_pid
, uid_t cond_uid
, ifnet_t cond_bound_interface
, struct necp_policy_condition_tc_range cond_traffic_class
, u_int16_t cond_protocol
, union necp_sockaddr_union
*cond_local_start
, union necp_sockaddr_union
*cond_local_end
, u_int8_t cond_local_prefix
, union necp_sockaddr_union
*cond_remote_start
, union necp_sockaddr_union
*cond_remote_end
, u_int8_t cond_remote_prefix
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
);
323 static bool necp_kernel_socket_policy_delete(necp_kernel_policy_id policy_id
);
324 static bool necp_kernel_socket_policies_reprocess(void);
325 static bool necp_kernel_socket_policies_update_uuid_table(void);
326 static inline struct necp_kernel_socket_policy
*necp_socket_find_policy_match_with_info_locked(struct necp_kernel_socket_policy
**policy_search_array
, struct necp_socket_info
*info
, necp_kernel_policy_filter
*return_filter
, u_int32_t
*return_route_rule_id
, necp_kernel_policy_result
*return_service_action
, necp_kernel_policy_service
*return_service
, u_int32_t
*return_netagent_array
, size_t netagent_array_count
);
328 static necp_kernel_policy_id
necp_kernel_ip_output_policy_add(necp_policy_id parent_policy_id
, necp_policy_order order
, necp_policy_order suborder
, u_int32_t session_order
, int session_pid
, u_int32_t condition_mask
, u_int32_t condition_negated_mask
, necp_kernel_policy_id cond_policy_id
, ifnet_t cond_bound_interface
, u_int32_t cond_last_interface_index
, u_int16_t cond_protocol
, union necp_sockaddr_union
*cond_local_start
, union necp_sockaddr_union
*cond_local_end
, u_int8_t cond_local_prefix
, union necp_sockaddr_union
*cond_remote_start
, union necp_sockaddr_union
*cond_remote_end
, u_int8_t cond_remote_prefix
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
);
329 static bool necp_kernel_ip_output_policy_delete(necp_kernel_policy_id policy_id
);
330 static bool necp_kernel_ip_output_policies_reprocess(void);
332 static bool necp_is_addr_in_range(struct sockaddr
*addr
, struct sockaddr
*range_start
, struct sockaddr
*range_end
);
333 static bool necp_is_range_in_range(struct sockaddr
*inner_range_start
, struct sockaddr
*inner_range_end
, struct sockaddr
*range_start
, struct sockaddr
*range_end
);
334 static bool necp_is_addr_in_subnet(struct sockaddr
*addr
, struct sockaddr
*subnet_addr
, u_int8_t subnet_prefix
);
335 static int necp_addr_compare(struct sockaddr
*sa1
, struct sockaddr
*sa2
, int check_port
);
336 static bool necp_buffer_compare_with_bit_prefix(u_int8_t
*p1
, u_int8_t
*p2
, u_int32_t bits
);
337 static bool necp_is_loopback(struct sockaddr
*local_addr
, struct sockaddr
*remote_addr
, struct inpcb
*inp
, struct mbuf
*packet
);
339 struct necp_uuid_id_mapping
{
340 LIST_ENTRY(necp_uuid_id_mapping
) chain
;
344 u_int32_t table_refcount
; // Add to UUID policy table count
346 static size_t necp_num_uuid_app_id_mappings
;
347 static bool necp_uuid_app_id_mappings_dirty
;
348 #define NECP_UUID_APP_ID_HASH_SIZE 64
349 static u_long necp_uuid_app_id_hash_mask
;
350 static u_long necp_uuid_app_id_hash_num_buckets
;
351 static LIST_HEAD(necp_uuid_id_mapping_head
, necp_uuid_id_mapping
) *necp_uuid_app_id_hashtbl
, necp_uuid_service_id_list
; // App map is real hash table, service map is just mapping
352 #define APPUUIDHASH(uuid) (&necp_uuid_app_id_hashtbl[uuid[0] & necp_uuid_app_id_hash_mask]) // Assume first byte of UUIDs are evenly distributed
353 static u_int32_t
necp_create_uuid_app_id_mapping(uuid_t uuid
, bool *allocated_mapping
, bool uuid_policy_table
);
354 static bool necp_remove_uuid_app_id_mapping(uuid_t uuid
, bool *removed_mapping
, bool uuid_policy_table
);
356 static struct necp_uuid_id_mapping
*necp_uuid_lookup_service_id_locked(uuid_t uuid
);
357 static struct necp_uuid_id_mapping
*necp_uuid_lookup_uuid_with_service_id_locked(u_int32_t local_id
);
358 static u_int32_t
necp_create_uuid_service_id_mapping(uuid_t uuid
);
359 static bool necp_remove_uuid_service_id_mapping(uuid_t uuid
);
361 struct necp_string_id_mapping
{
362 LIST_ENTRY(necp_string_id_mapping
) chain
;
367 static LIST_HEAD(necp_string_id_mapping_list
, necp_string_id_mapping
) necp_account_id_list
;
368 static u_int32_t
necp_create_string_to_id_mapping(struct necp_string_id_mapping_list
*list
, char *domain
);
369 static bool necp_remove_string_to_id_mapping(struct necp_string_id_mapping_list
*list
, char *domain
);
371 static LIST_HEAD(_necp_kernel_service_list
, necp_service_registration
) necp_registered_service_list
;
373 static char *necp_create_trimmed_domain(char *string
, size_t length
);
374 static inline int necp_count_dots(char *string
, size_t length
);
376 #define ROUTE_RULE_IS_AGGREGATE(ruleid) (ruleid > UINT16_MAX)
378 #define MAX_ROUTE_RULE_INTERFACES 10
379 struct necp_route_rule
{
380 LIST_ENTRY(necp_route_rule
) chain
;
382 u_int32_t default_action
;
383 u_int8_t cellular_action
;
384 u_int8_t wifi_action
;
385 u_int8_t wired_action
;
386 u_int8_t expensive_action
;
387 u_int exception_if_indices
[MAX_ROUTE_RULE_INTERFACES
];
388 u_int8_t exception_if_actions
[MAX_ROUTE_RULE_INTERFACES
];
391 static LIST_HEAD(necp_route_rule_list
, necp_route_rule
) necp_route_rules
;
392 static u_int32_t
necp_create_route_rule(struct necp_route_rule_list
*list
, u_int8_t
*route_rules_array
, u_int32_t route_rules_array_size
);
393 static bool necp_remove_route_rule(struct necp_route_rule_list
*list
, u_int32_t route_rule_id
);
394 static bool necp_route_is_allowed(struct rtentry
*route
, ifnet_t interface
, u_int32_t route_rule_id
, bool *cellular_denied
);
395 static struct necp_route_rule
*necp_lookup_route_rule_locked(struct necp_route_rule_list
*list
, u_int32_t route_rule_id
);
397 #define MAX_AGGREGATE_ROUTE_RULES 16
398 struct necp_aggregate_route_rule
{
399 LIST_ENTRY(necp_aggregate_route_rule
) chain
;
401 u_int32_t rule_ids
[MAX_AGGREGATE_ROUTE_RULES
];
403 static LIST_HEAD(necp_aggregate_route_rule_list
, necp_aggregate_route_rule
) necp_aggregate_route_rules
;
404 static u_int32_t
necp_create_aggregate_route_rule(u_int32_t
*rule_ids
);
406 // Sysctl definitions
407 static int sysctl_handle_necp_level SYSCTL_HANDLER_ARGS
;
409 SYSCTL_NODE(_net
, OID_AUTO
, necp
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "NECP");
410 SYSCTL_INT(_net_necp
, NECPCTL_PASS_LOOPBACK
, pass_loopback
, CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_pass_loopback
, 0, "");
411 SYSCTL_INT(_net_necp
, NECPCTL_PASS_KEEPALIVES
, pass_keepalives
, CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_pass_keepalives
, 0, "");
412 SYSCTL_INT(_net_necp
, NECPCTL_DEBUG
, debug
, CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_debug
, 0, "");
413 SYSCTL_PROC(_net_necp
, NECPCTL_DROP_ALL_LEVEL
, drop_all_level
, CTLTYPE_INT
| CTLFLAG_LOCKED
| CTLFLAG_RW
, &necp_drop_all_level
, 0, &sysctl_handle_necp_level
, "IU", "");
414 SYSCTL_LONG(_net_necp
, NECPCTL_SOCKET_POLICY_COUNT
, socket_policy_count
, CTLFLAG_LOCKED
| CTLFLAG_RD
, &necp_kernel_socket_policies_count
, "");
415 SYSCTL_LONG(_net_necp
, NECPCTL_SOCKET_NON_APP_POLICY_COUNT
, socket_non_app_policy_count
, CTLFLAG_LOCKED
| CTLFLAG_RD
, &necp_kernel_socket_policies_non_app_count
, "");
416 SYSCTL_LONG(_net_necp
, NECPCTL_IP_POLICY_COUNT
, ip_policy_count
, CTLFLAG_LOCKED
| CTLFLAG_RD
, &necp_kernel_ip_output_policies_count
, "");
417 SYSCTL_INT(_net_necp
, NECPCTL_SESSION_COUNT
, session_count
, CTLFLAG_LOCKED
| CTLFLAG_RD
, &necp_session_count
, 0, "");
419 // Session order allocation
421 necp_allocate_new_session_order(u_int32_t priority
, u_int32_t control_unit
)
423 u_int32_t new_order
= 0;
425 // For now, just allocate 1000 orders for each priority
426 if (priority
== NECP_SESSION_PRIORITY_UNKNOWN
|| priority
> NECP_SESSION_NUM_PRIORITIES
) {
427 priority
= NECP_SESSION_PRIORITY_DEFAULT
;
430 // Use the control unit to decide the offset into the priority list
431 new_order
= (control_unit
) + ((priority
- 1) * 1000);
436 static inline u_int32_t
437 necp_get_first_order_for_priority(u_int32_t priority
)
439 return (((priority
- 1) * 1000) + 1);
444 sysctl_handle_necp_level SYSCTL_HANDLER_ARGS
446 #pragma unused(arg1, arg2)
447 int error
= sysctl_handle_int(oidp
, oidp
->oid_arg1
, oidp
->oid_arg2
, req
);
448 if (necp_drop_all_level
== 0) {
449 necp_drop_all_order
= 0;
451 necp_drop_all_order
= necp_get_first_order_for_priority(necp_drop_all_level
);
457 // Kernel Control functions
458 static errno_t
necp_register_control(void);
459 static errno_t
necp_ctl_connect(kern_ctl_ref kctlref
, struct sockaddr_ctl
*sac
, void **unitinfo
);
460 static errno_t
necp_ctl_disconnect(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
);
461 static errno_t
necp_ctl_send(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, mbuf_t m
, int flags
);
462 static void necp_ctl_rcvd(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, int flags
);
463 static errno_t
necp_ctl_getopt(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, int opt
, void *data
, size_t *len
);
464 static errno_t
necp_ctl_setopt(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, int opt
, void *data
, size_t len
);
466 static bool necp_send_ctl_data(struct necp_session
*session
, u_int8_t
*buffer
, size_t buffer_size
);
473 result
= necp_register_control();
478 necp_kernel_policy_grp_attr
= lck_grp_attr_alloc_init();
479 if (necp_kernel_policy_grp_attr
== NULL
) {
480 NECPLOG0(LOG_ERR
, "lck_grp_attr_alloc_init failed");
485 necp_kernel_policy_mtx_grp
= lck_grp_alloc_init(NECP_CONTROL_NAME
, necp_kernel_policy_grp_attr
);
486 if (necp_kernel_policy_mtx_grp
== NULL
) {
487 NECPLOG0(LOG_ERR
, "lck_grp_alloc_init failed");
492 necp_kernel_policy_mtx_attr
= lck_attr_alloc_init();
493 if (necp_kernel_policy_mtx_attr
== NULL
) {
494 NECPLOG0(LOG_ERR
, "lck_attr_alloc_init failed");
499 lck_rw_init(&necp_kernel_policy_lock
, necp_kernel_policy_mtx_grp
, necp_kernel_policy_mtx_attr
);
501 necp_route_rule_grp_attr
= lck_grp_attr_alloc_init();
502 if (necp_route_rule_grp_attr
== NULL
) {
503 NECPLOG0(LOG_ERR
, "lck_grp_attr_alloc_init failed");
508 necp_route_rule_mtx_grp
= lck_grp_alloc_init("necp_route_rule", necp_route_rule_grp_attr
);
509 if (necp_route_rule_mtx_grp
== NULL
) {
510 NECPLOG0(LOG_ERR
, "lck_grp_alloc_init failed");
515 necp_route_rule_mtx_attr
= lck_attr_alloc_init();
516 if (necp_route_rule_mtx_attr
== NULL
) {
517 NECPLOG0(LOG_ERR
, "lck_attr_alloc_init failed");
522 lck_rw_init(&necp_route_rule_lock
, necp_route_rule_mtx_grp
, necp_route_rule_mtx_attr
);
524 LIST_INIT(&necp_kernel_socket_policies
);
525 LIST_INIT(&necp_kernel_ip_output_policies
);
527 LIST_INIT(&necp_account_id_list
);
529 LIST_INIT(&necp_uuid_service_id_list
);
531 LIST_INIT(&necp_registered_service_list
);
533 LIST_INIT(&necp_route_rules
);
534 LIST_INIT(&necp_aggregate_route_rules
);
536 necp_uuid_app_id_hashtbl
= hashinit(NECP_UUID_APP_ID_HASH_SIZE
, M_NECP
, &necp_uuid_app_id_hash_mask
);
537 necp_uuid_app_id_hash_num_buckets
= necp_uuid_app_id_hash_mask
+ 1;
538 necp_num_uuid_app_id_mappings
= 0;
539 necp_uuid_app_id_mappings_dirty
= FALSE
;
541 necp_kernel_application_policies_condition_mask
= 0;
542 necp_kernel_socket_policies_condition_mask
= 0;
543 necp_kernel_ip_output_policies_condition_mask
= 0;
545 necp_kernel_application_policies_count
= 0;
546 necp_kernel_socket_policies_count
= 0;
547 necp_kernel_socket_policies_non_app_count
= 0;
548 necp_kernel_ip_output_policies_count
= 0;
549 necp_kernel_ip_output_policies_non_id_count
= 0;
551 necp_last_policy_id
= 0;
552 necp_last_kernel_policy_id
= 0;
553 necp_last_uuid_id
= 0;
554 necp_last_string_id
= 0;
555 necp_last_route_rule_id
= 0;
556 necp_last_aggregate_route_rule_id
= 0;
558 necp_kernel_socket_policies_gencount
= 1;
560 memset(&necp_kernel_socket_policies_map
, 0, sizeof(necp_kernel_socket_policies_map
));
561 memset(&necp_kernel_ip_output_policies_map
, 0, sizeof(necp_kernel_ip_output_policies_map
));
562 necp_kernel_socket_policies_app_layer_map
= NULL
;
566 if (necp_kernel_policy_mtx_attr
!= NULL
) {
567 lck_attr_free(necp_kernel_policy_mtx_attr
);
568 necp_kernel_policy_mtx_attr
= NULL
;
570 if (necp_kernel_policy_mtx_grp
!= NULL
) {
571 lck_grp_free(necp_kernel_policy_mtx_grp
);
572 necp_kernel_policy_mtx_grp
= NULL
;
574 if (necp_kernel_policy_grp_attr
!= NULL
) {
575 lck_grp_attr_free(necp_kernel_policy_grp_attr
);
576 necp_kernel_policy_grp_attr
= NULL
;
578 if (necp_route_rule_mtx_attr
!= NULL
) {
579 lck_attr_free(necp_route_rule_mtx_attr
);
580 necp_route_rule_mtx_attr
= NULL
;
582 if (necp_route_rule_mtx_grp
!= NULL
) {
583 lck_grp_free(necp_route_rule_mtx_grp
);
584 necp_route_rule_mtx_grp
= NULL
;
586 if (necp_route_rule_grp_attr
!= NULL
) {
587 lck_grp_attr_free(necp_route_rule_grp_attr
);
588 necp_route_rule_grp_attr
= NULL
;
590 if (necp_kctlref
!= NULL
) {
591 ctl_deregister(necp_kctlref
);
599 necp_register_control(void)
601 struct kern_ctl_reg kern_ctl
;
604 // Create a tag to allocate memory
605 necp_malloc_tag
= OSMalloc_Tagalloc(NECP_CONTROL_NAME
, OSMT_DEFAULT
);
607 // Find a unique value for our interface family
608 result
= mbuf_tag_id_find(NECP_CONTROL_NAME
, &necp_family
);
610 NECPLOG(LOG_ERR
, "mbuf_tag_id_find_internal failed: %d", result
);
614 bzero(&kern_ctl
, sizeof(kern_ctl
));
615 strlcpy(kern_ctl
.ctl_name
, NECP_CONTROL_NAME
, sizeof(kern_ctl
.ctl_name
));
616 kern_ctl
.ctl_name
[sizeof(kern_ctl
.ctl_name
) - 1] = 0;
617 kern_ctl
.ctl_flags
= CTL_FLAG_PRIVILEGED
; // Require root
618 kern_ctl
.ctl_sendsize
= 64 * 1024;
619 kern_ctl
.ctl_recvsize
= 64 * 1024;
620 kern_ctl
.ctl_connect
= necp_ctl_connect
;
621 kern_ctl
.ctl_disconnect
= necp_ctl_disconnect
;
622 kern_ctl
.ctl_send
= necp_ctl_send
;
623 kern_ctl
.ctl_rcvd
= necp_ctl_rcvd
;
624 kern_ctl
.ctl_setopt
= necp_ctl_setopt
;
625 kern_ctl
.ctl_getopt
= necp_ctl_getopt
;
627 result
= ctl_register(&kern_ctl
, &necp_kctlref
);
629 NECPLOG(LOG_ERR
, "ctl_register failed: %d", result
);
637 necp_post_change_event(struct kev_necp_policies_changed_data
*necp_event_data
)
639 struct kev_msg ev_msg
;
640 memset(&ev_msg
, 0, sizeof(ev_msg
));
642 ev_msg
.vendor_code
= KEV_VENDOR_APPLE
;
643 ev_msg
.kev_class
= KEV_NETWORK_CLASS
;
644 ev_msg
.kev_subclass
= KEV_NECP_SUBCLASS
;
645 ev_msg
.event_code
= KEV_NECP_POLICIES_CHANGED
;
647 ev_msg
.dv
[0].data_ptr
= necp_event_data
;
648 ev_msg
.dv
[0].data_length
= sizeof(necp_event_data
->changed_count
);
649 ev_msg
.dv
[1].data_length
= 0;
651 kev_post_msg(&ev_msg
);
655 necp_ctl_connect(kern_ctl_ref kctlref
, struct sockaddr_ctl
*sac
, void **unitinfo
)
657 #pragma unused(kctlref)
658 *unitinfo
= necp_create_session(sac
->sc_unit
);
659 if (*unitinfo
== NULL
) {
660 // Could not allocate session
668 necp_ctl_disconnect(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
)
670 #pragma unused(kctlref, unit)
671 struct necp_session
*session
= (struct necp_session
*)unitinfo
;
672 if (session
!= NULL
) {
673 necp_policy_mark_all_for_deletion(session
);
674 necp_policy_apply_all(session
);
675 necp_delete_session((struct necp_session
*)unitinfo
);
684 necp_packet_find_tlv(mbuf_t packet
, int offset
, u_int8_t type
, int *err
, int next
)
686 size_t cursor
= offset
;
688 u_int32_t curr_length
;
695 error
= mbuf_copydata(packet
, cursor
, sizeof(curr_type
), &curr_type
);
702 curr_type
= NECP_TLV_NIL
;
705 if (curr_type
!= type
) {
706 cursor
+= sizeof(curr_type
);
707 error
= mbuf_copydata(packet
, cursor
, sizeof(curr_length
), &curr_length
);
712 cursor
+= (sizeof(curr_length
) + curr_length
);
714 } while (curr_type
!= type
);
720 necp_packet_get_tlv_at_offset(mbuf_t packet
, int tlv_offset
, u_int32_t buff_len
, void *buff
, u_int32_t
*value_size
)
725 if (tlv_offset
< 0) {
729 error
= mbuf_copydata(packet
, tlv_offset
+ sizeof(u_int8_t
), sizeof(length
), &length
);
734 u_int32_t total_len
= m_length2(packet
, NULL
);
735 if (total_len
< (tlv_offset
+ sizeof(u_int8_t
) + sizeof(length
) + length
)) {
736 NECPLOG(LOG_ERR
, "Got a bad TLV, length (%u) + offset (%d) < total length (%u)",
737 length
, (tlv_offset
+ sizeof(u_int8_t
) + sizeof(length
)), total_len
);
741 if (value_size
!= NULL
) {
742 *value_size
= length
;
745 if (buff
!= NULL
&& buff_len
> 0) {
746 u_int32_t to_copy
= (length
< buff_len
) ? length
: buff_len
;
747 error
= mbuf_copydata(packet
, tlv_offset
+ sizeof(u_int8_t
) + sizeof(length
), to_copy
, buff
);
757 necp_packet_get_tlv(mbuf_t packet
, int offset
, u_int8_t type
, u_int32_t buff_len
, void *buff
, u_int32_t
*value_size
)
762 tlv_offset
= necp_packet_find_tlv(packet
, offset
, type
, &error
, 0);
763 if (tlv_offset
< 0) {
767 return (necp_packet_get_tlv_at_offset(packet
, tlv_offset
, buff_len
, buff
, value_size
));
771 necp_buffer_write_packet_header(u_int8_t
*buffer
, u_int8_t packet_type
, u_int8_t flags
, u_int32_t message_id
)
773 ((struct necp_packet_header
*)(void *)buffer
)->packet_type
= packet_type
;
774 ((struct necp_packet_header
*)(void *)buffer
)->flags
= flags
;
775 ((struct necp_packet_header
*)(void *)buffer
)->message_id
= message_id
;
776 return (buffer
+ sizeof(struct necp_packet_header
));
780 necp_buffer_write_tlv(u_int8_t
*buffer
, u_int8_t type
, u_int32_t length
, const void *value
)
782 *(u_int8_t
*)(buffer
) = type
;
783 *(u_int32_t
*)(void *)(buffer
+ sizeof(type
)) = length
;
785 memcpy((u_int8_t
*)(buffer
+ sizeof(type
) + sizeof(length
)), value
, length
);
788 return ((u_int8_t
*)(buffer
+ sizeof(type
) + sizeof(length
) + length
));
792 necp_buffer_get_tlv_type(u_int8_t
*buffer
, int tlv_offset
)
794 u_int8_t
*type
= NULL
;
796 if (buffer
== NULL
) {
800 type
= (u_int8_t
*)((u_int8_t
*)buffer
+ tlv_offset
);
801 return (type
? *type
: 0);
805 necp_buffer_get_tlv_length(u_int8_t
*buffer
, int tlv_offset
)
807 u_int32_t
*length
= NULL
;
809 if (buffer
== NULL
) {
813 length
= (u_int32_t
*)(void *)((u_int8_t
*)buffer
+ tlv_offset
+ sizeof(u_int8_t
));
814 return (length
? *length
: 0);
818 necp_buffer_get_tlv_value(u_int8_t
*buffer
, int tlv_offset
, u_int32_t
*value_size
)
820 u_int8_t
*value
= NULL
;
821 u_int32_t length
= necp_buffer_get_tlv_length(buffer
, tlv_offset
);
827 *value_size
= length
;
830 value
= (u_int8_t
*)((u_int8_t
*)buffer
+ tlv_offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
));
835 necp_buffer_find_tlv(u_int8_t
*buffer
, u_int32_t buffer_length
, int offset
, u_int8_t type
, int next
)
842 u_int32_t curr_length
;
846 if ((((u_int32_t
)cursor
) + sizeof(curr_type
) + sizeof(curr_length
)) > buffer_length
) {
850 curr_type
= necp_buffer_get_tlv_type(buffer
, cursor
);
853 curr_type
= NECP_TLV_NIL
;
855 curr_length
= necp_buffer_get_tlv_length(buffer
, cursor
);
856 next_cursor
= (cursor
+ sizeof(curr_type
) + sizeof(curr_length
) + curr_length
);
857 if (curr_type
== type
) {
858 // check if entire TLV fits inside buffer
859 if (((u_int32_t
)next_cursor
) <= buffer_length
) {
865 cursor
= next_cursor
;
870 necp_send_ctl_data(struct necp_session
*session
, u_int8_t
*buffer
, size_t buffer_size
)
874 if (necp_kctlref
== NULL
|| session
== NULL
|| buffer
== NULL
|| buffer_size
== 0) {
878 error
= ctl_enqueuedata(necp_kctlref
, session
->control_unit
, buffer
, buffer_size
, CTL_DATA_EOR
);
884 necp_send_success_response(struct necp_session
*session
, u_int8_t packet_type
, u_int32_t message_id
)
887 u_int8_t
*response
= NULL
;
888 u_int8_t
*cursor
= NULL
;
889 size_t response_size
= sizeof(struct necp_packet_header
) + sizeof(u_int8_t
) + sizeof(u_int32_t
);
890 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
);
891 if (response
== NULL
) {
895 cursor
= necp_buffer_write_packet_header(cursor
, packet_type
, NECP_PACKET_FLAGS_RESPONSE
, message_id
);
896 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_NIL
, 0, NULL
);
898 if (!(success
= necp_send_ctl_data(session
, (u_int8_t
*)response
, response_size
))) {
899 NECPLOG0(LOG_ERR
, "Failed to send response");
902 FREE(response
, M_NECP
);
907 necp_send_error_response(struct necp_session
*session
, u_int8_t packet_type
, u_int32_t message_id
, u_int32_t error
)
910 u_int8_t
*response
= NULL
;
911 u_int8_t
*cursor
= NULL
;
912 size_t response_size
= sizeof(struct necp_packet_header
) + sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(u_int32_t
);
913 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
);
914 if (response
== NULL
) {
918 cursor
= necp_buffer_write_packet_header(cursor
, packet_type
, NECP_PACKET_FLAGS_RESPONSE
, message_id
);
919 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_ERROR
, sizeof(error
), &error
);
921 if (!(success
= necp_send_ctl_data(session
, (u_int8_t
*)response
, response_size
))) {
922 NECPLOG0(LOG_ERR
, "Failed to send response");
925 FREE(response
, M_NECP
);
930 necp_send_policy_id_response(struct necp_session
*session
, u_int8_t packet_type
, u_int32_t message_id
, necp_policy_id policy_id
)
933 u_int8_t
*response
= NULL
;
934 u_int8_t
*cursor
= NULL
;
935 size_t response_size
= sizeof(struct necp_packet_header
) + sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(u_int32_t
);
936 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
);
937 if (response
== NULL
) {
941 cursor
= necp_buffer_write_packet_header(cursor
, packet_type
, NECP_PACKET_FLAGS_RESPONSE
, message_id
);
942 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ID
, sizeof(policy_id
), &policy_id
);
944 if (!(success
= necp_send_ctl_data(session
, (u_int8_t
*)response
, response_size
))) {
945 NECPLOG0(LOG_ERR
, "Failed to send response");
948 FREE(response
, M_NECP
);
953 necp_ctl_send(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, mbuf_t packet
, int flags
)
955 #pragma unused(kctlref, unit, flags)
956 struct necp_session
*session
= (struct necp_session
*)unitinfo
;
957 struct necp_packet_header header
;
960 if (session
== NULL
) {
961 NECPLOG0(LOG_ERR
, "Got a NULL session");
966 if (mbuf_pkthdr_len(packet
) < sizeof(header
)) {
967 NECPLOG(LOG_ERR
, "Got a bad packet, length (%lu) < sizeof header (%lu)", mbuf_pkthdr_len(packet
), sizeof(header
));
972 error
= mbuf_copydata(packet
, 0, sizeof(header
), &header
);
974 NECPLOG(LOG_ERR
, "mbuf_copydata failed for the header: %d", error
);
979 if (session
->proc_locked
) {
980 // Verify that the calling process is allowed to send messages
982 proc_getexecutableuuid(current_proc(), proc_uuid
, sizeof(proc_uuid
));
983 if (uuid_compare(proc_uuid
, session
->proc_uuid
) != 0) {
984 necp_send_error_response(session
, header
.packet_type
, header
.message_id
, NECP_ERROR_INVALID_PROCESS
);
988 // If not locked, update the proc_uuid and proc_pid of the session
989 proc_getexecutableuuid(current_proc(), session
->proc_uuid
, sizeof(session
->proc_uuid
));
990 session
->proc_pid
= proc_pid(current_proc());
993 switch (header
.packet_type
) {
994 case NECP_PACKET_TYPE_POLICY_ADD
: {
995 necp_handle_policy_add(session
, header
.message_id
, packet
, sizeof(header
));
998 case NECP_PACKET_TYPE_POLICY_GET
: {
999 necp_handle_policy_get(session
, header
.message_id
, packet
, sizeof(header
));
1002 case NECP_PACKET_TYPE_POLICY_DELETE
: {
1003 necp_handle_policy_delete(session
, header
.message_id
, packet
, sizeof(header
));
1006 case NECP_PACKET_TYPE_POLICY_APPLY_ALL
: {
1007 necp_handle_policy_apply_all(session
, header
.message_id
, packet
, sizeof(header
));
1010 case NECP_PACKET_TYPE_POLICY_LIST_ALL
: {
1011 necp_handle_policy_list_all(session
, header
.message_id
, packet
, sizeof(header
));
1014 case NECP_PACKET_TYPE_POLICY_DELETE_ALL
: {
1015 necp_handle_policy_delete_all(session
, header
.message_id
, packet
, sizeof(header
));
1018 case NECP_PACKET_TYPE_SET_SESSION_PRIORITY
: {
1019 necp_handle_set_session_priority(session
, header
.message_id
, packet
, sizeof(header
));
1022 case NECP_PACKET_TYPE_LOCK_SESSION_TO_PROC
: {
1023 necp_handle_lock_session_to_proc(session
, header
.message_id
, packet
, sizeof(header
));
1026 case NECP_PACKET_TYPE_REGISTER_SERVICE
: {
1027 necp_handle_register_service(session
, header
.message_id
, packet
, sizeof(header
));
1030 case NECP_PACKET_TYPE_UNREGISTER_SERVICE
: {
1031 necp_handle_unregister_service(session
, header
.message_id
, packet
, sizeof(header
));
1035 NECPLOG(LOG_ERR
, "Received unknown message type %d", header
.packet_type
);
1036 necp_send_error_response(session
, header
.packet_type
, header
.message_id
, NECP_ERROR_UNKNOWN_PACKET_TYPE
);
1047 necp_ctl_rcvd(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, int flags
)
1049 #pragma unused(kctlref, unit, unitinfo, flags)
1054 necp_ctl_getopt(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, int opt
, void *data
, size_t *len
)
1056 #pragma unused(kctlref, unit, unitinfo, opt, data, len)
1061 necp_ctl_setopt(kern_ctl_ref kctlref
, u_int32_t unit
, void *unitinfo
, int opt
, void *data
, size_t len
)
1063 #pragma unused(kctlref, unit, unitinfo, opt, data, len)
1067 // Session Management
1068 static struct necp_session
*
1069 necp_create_session(u_int32_t control_unit
)
1071 struct necp_session
*new_session
= NULL
;
1073 MALLOC(new_session
, struct necp_session
*, sizeof(*new_session
), M_NECP
, M_WAITOK
);
1074 if (new_session
== NULL
) {
1078 NECPLOG(LOG_DEBUG
, "Create NECP session, control unit %d", control_unit
);
1080 memset(new_session
, 0, sizeof(*new_session
));
1081 new_session
->session_priority
= NECP_SESSION_PRIORITY_UNKNOWN
;
1082 new_session
->session_order
= necp_allocate_new_session_order(new_session
->session_priority
, control_unit
);
1083 new_session
->control_unit
= control_unit
;
1084 new_session
->dirty
= FALSE
;
1085 LIST_INIT(&new_session
->policies
);
1087 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
1088 necp_session_count
++;
1089 lck_rw_done(&necp_kernel_policy_lock
);
1092 return (new_session
);
1096 necp_delete_session(struct necp_session
*session
)
1098 if (session
!= NULL
) {
1099 struct necp_service_registration
*service
= NULL
;
1100 struct necp_service_registration
*temp_service
= NULL
;
1101 LIST_FOREACH_SAFE(service
, &session
->services
, session_chain
, temp_service
) {
1102 LIST_REMOVE(service
, session_chain
);
1103 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
1104 LIST_REMOVE(service
, kernel_chain
);
1105 lck_rw_done(&necp_kernel_policy_lock
);
1106 FREE(service
, M_NECP
);
1109 NECPLOG0(LOG_DEBUG
, "Deleted NECP session");
1111 FREE(session
, M_NECP
);
1113 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
1114 necp_session_count
--;
1115 lck_rw_done(&necp_kernel_policy_lock
);
1119 // Session Policy Management
1120 static inline u_int8_t
1121 necp_policy_result_get_type_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
1123 return ((buffer
&& length
>= sizeof(u_int8_t
)) ? buffer
[0] : 0);
1126 static inline u_int32_t
1127 necp_policy_result_get_parameter_length_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
1129 return ((buffer
&& length
> sizeof(u_int8_t
)) ? (length
- sizeof(u_int8_t
)) : 0);
1132 static inline u_int8_t
*
1133 necp_policy_result_get_parameter_pointer_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
1135 return ((buffer
&& length
> sizeof(u_int8_t
)) ? (buffer
+ sizeof(u_int8_t
)) : NULL
);
1139 necp_policy_result_requires_route_rules(u_int8_t
*buffer
, u_int32_t length
)
1141 u_int8_t type
= necp_policy_result_get_type_from_buffer(buffer
, length
);
1142 if (type
== NECP_POLICY_RESULT_ROUTE_RULES
) {
1149 necp_policy_result_is_valid(u_int8_t
*buffer
, u_int32_t length
)
1151 bool validated
= FALSE
;
1152 u_int8_t type
= necp_policy_result_get_type_from_buffer(buffer
, length
);
1153 u_int32_t parameter_length
= necp_policy_result_get_parameter_length_from_buffer(buffer
, length
);
1155 case NECP_POLICY_RESULT_PASS
: {
1159 case NECP_POLICY_RESULT_SKIP
: {
1160 if (parameter_length
>= sizeof(u_int32_t
)) {
1165 case NECP_POLICY_RESULT_DROP
: {
1169 case NECP_POLICY_RESULT_SOCKET_DIVERT
: {
1170 if (parameter_length
>= sizeof(u_int32_t
)) {
1175 case NECP_POLICY_RESULT_SOCKET_SCOPED
: {
1176 if (parameter_length
> 0) {
1181 case NECP_POLICY_RESULT_IP_TUNNEL
: {
1182 if (parameter_length
> sizeof(u_int32_t
)) {
1187 case NECP_POLICY_RESULT_SOCKET_FILTER
: {
1188 if (parameter_length
>= sizeof(u_int32_t
)) {
1193 case NECP_POLICY_RESULT_ROUTE_RULES
: {
1197 case NECP_POLICY_RESULT_TRIGGER
:
1198 case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED
:
1199 case NECP_POLICY_RESULT_TRIGGER_SCOPED
:
1200 case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED
:
1201 case NECP_POLICY_RESULT_USE_NETAGENT
: {
1202 if (parameter_length
>= sizeof(uuid_t
)) {
1214 NECPLOG(LOG_DEBUG
, "Policy result type %d, valid %d", type
, validated
);
1220 static inline u_int8_t
1221 necp_policy_condition_get_type_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
1223 return ((buffer
&& length
>= sizeof(u_int8_t
)) ? buffer
[0] : 0);
1226 static inline u_int8_t
1227 necp_policy_condition_get_flags_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
1229 return ((buffer
&& length
>= (2 * sizeof(u_int8_t
))) ? buffer
[1] : 0);
1232 static inline u_int32_t
1233 necp_policy_condition_get_value_length_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
1235 return ((buffer
&& length
>= (2 * sizeof(u_int8_t
))) ? (length
- (2 * sizeof(u_int8_t
))) : 0);
1238 static inline u_int8_t
*
1239 necp_policy_condition_get_value_pointer_from_buffer(u_int8_t
*buffer
, u_int32_t length
)
1241 return ((buffer
&& length
> (2 * sizeof(u_int8_t
))) ? (buffer
+ (2 * sizeof(u_int8_t
))) : NULL
);
1245 necp_policy_condition_is_default(u_int8_t
*buffer
, u_int32_t length
)
1247 return (necp_policy_condition_get_type_from_buffer(buffer
, length
) == NECP_POLICY_CONDITION_DEFAULT
);
1251 necp_policy_condition_is_application(u_int8_t
*buffer
, u_int32_t length
)
1253 return (necp_policy_condition_get_type_from_buffer(buffer
, length
) == NECP_POLICY_CONDITION_APPLICATION
);
1257 necp_policy_condition_requires_application(u_int8_t
*buffer
, u_int32_t length
)
1259 u_int8_t type
= necp_policy_condition_get_type_from_buffer(buffer
, length
);
1260 return (type
== NECP_POLICY_CONDITION_REAL_APPLICATION
||
1261 type
== NECP_POLICY_CONDITION_ENTITLEMENT
);
1265 necp_policy_condition_is_valid(u_int8_t
*buffer
, u_int32_t length
, u_int8_t policy_result_type
)
1267 bool validated
= FALSE
;
1268 bool result_cannot_have_ip_layer
= (policy_result_type
== NECP_POLICY_RESULT_SOCKET_DIVERT
||
1269 policy_result_type
== NECP_POLICY_RESULT_SOCKET_FILTER
||
1270 policy_result_type
== NECP_POLICY_RESULT_TRIGGER
||
1271 policy_result_type
== NECP_POLICY_RESULT_TRIGGER_IF_NEEDED
||
1272 policy_result_type
== NECP_POLICY_RESULT_TRIGGER_SCOPED
||
1273 policy_result_type
== NECP_POLICY_RESULT_NO_TRIGGER_SCOPED
||
1274 policy_result_type
== NECP_POLICY_RESULT_SOCKET_SCOPED
||
1275 policy_result_type
== NECP_POLICY_RESULT_ROUTE_RULES
||
1276 policy_result_type
== NECP_POLICY_RESULT_USE_NETAGENT
) ? TRUE
: FALSE
;
1277 u_int32_t condition_length
= necp_policy_condition_get_value_length_from_buffer(buffer
, length
);
1278 u_int8_t
*condition_value
= necp_policy_condition_get_value_pointer_from_buffer(buffer
, length
);
1279 u_int8_t type
= necp_policy_condition_get_type_from_buffer(buffer
, length
);
1280 u_int8_t flags
= necp_policy_condition_get_flags_from_buffer(buffer
, length
);
1282 case NECP_POLICY_CONDITION_APPLICATION
:
1283 case NECP_POLICY_CONDITION_REAL_APPLICATION
: {
1284 if (!(flags
& NECP_POLICY_CONDITION_FLAGS_NEGATIVE
) &&
1285 condition_length
>= sizeof(uuid_t
) &&
1286 condition_value
!= NULL
&&
1287 !uuid_is_null(condition_value
)) {
1292 case NECP_POLICY_CONDITION_DOMAIN
:
1293 case NECP_POLICY_CONDITION_ACCOUNT
:
1294 case NECP_POLICY_CONDITION_BOUND_INTERFACE
: {
1295 if (condition_length
> 0) {
1300 case NECP_POLICY_CONDITION_TRAFFIC_CLASS
: {
1301 if (condition_length
>= sizeof(struct necp_policy_condition_tc_range
)) {
1306 case NECP_POLICY_CONDITION_DEFAULT
:
1307 case NECP_POLICY_CONDITION_ALL_INTERFACES
:
1308 case NECP_POLICY_CONDITION_ENTITLEMENT
: {
1309 if (!(flags
& NECP_POLICY_CONDITION_FLAGS_NEGATIVE
)) {
1314 case NECP_POLICY_CONDITION_IP_PROTOCOL
: {
1315 if (condition_length
>= sizeof(u_int16_t
)) {
1320 case NECP_POLICY_CONDITION_PID
: {
1321 if (condition_length
>= sizeof(pid_t
) &&
1322 condition_value
!= NULL
&&
1323 *((pid_t
*)(void *)condition_value
) != 0) {
1328 case NECP_POLICY_CONDITION_UID
: {
1329 if (condition_length
>= sizeof(uid_t
)) {
1334 case NECP_POLICY_CONDITION_LOCAL_ADDR
:
1335 case NECP_POLICY_CONDITION_REMOTE_ADDR
: {
1336 if (!result_cannot_have_ip_layer
&& condition_length
>= sizeof(struct necp_policy_condition_addr
)) {
1341 case NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE
:
1342 case NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE
: {
1343 if (!result_cannot_have_ip_layer
&& condition_length
>= sizeof(struct necp_policy_condition_addr_range
)) {
1355 NECPLOG(LOG_DEBUG
, "Policy condition type %d, valid %d", type
, validated
);
1362 necp_policy_route_rule_is_default(u_int8_t
*buffer
, u_int32_t length
)
1364 return (necp_policy_condition_get_value_length_from_buffer(buffer
, length
) == 0 &&
1365 necp_policy_condition_get_flags_from_buffer(buffer
, length
) == 0);
1369 necp_policy_route_rule_is_valid(u_int8_t
*buffer
, u_int32_t length
)
1371 bool validated
= FALSE
;
1372 u_int8_t type
= necp_policy_condition_get_type_from_buffer(buffer
, length
);
1374 case NECP_ROUTE_RULE_ALLOW_INTERFACE
: {
1378 case NECP_ROUTE_RULE_DENY_INTERFACE
: {
1389 NECPLOG(LOG_DEBUG
, "Policy route rule type %d, valid %d", type
, validated
);
1396 necp_handle_set_session_priority(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
1399 struct necp_session_policy
*policy
= NULL
;
1400 struct necp_session_policy
*temp_policy
= NULL
;
1401 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
1402 u_int32_t requested_session_priority
= NECP_SESSION_PRIORITY_UNKNOWN
;
1405 error
= necp_packet_get_tlv(packet
, offset
, NECP_TLV_SESSION_PRIORITY
, sizeof(requested_session_priority
), &requested_session_priority
, NULL
);
1407 NECPLOG(LOG_ERR
, "Failed to get session priority: %d", error
);
1408 response_error
= NECP_ERROR_INVALID_TLV
;
1412 if (session
== NULL
) {
1413 NECPLOG0(LOG_ERR
, "Failed to find session");
1414 response_error
= NECP_ERROR_INTERNAL
;
1418 // Enforce special session priorities with entitlements
1419 if (requested_session_priority
== NECP_SESSION_PRIORITY_CONTROL
||
1420 requested_session_priority
== NECP_SESSION_PRIORITY_PRIVILEGED_TUNNEL
) {
1421 errno_t cred_result
= priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES
, 0);
1422 if (cred_result
!= 0) {
1423 NECPLOG(LOG_ERR
, "Session does not hold necessary entitlement to claim priority level %d", requested_session_priority
);
1428 if (session
->session_priority
!= requested_session_priority
) {
1429 session
->session_priority
= requested_session_priority
;
1430 session
->session_order
= necp_allocate_new_session_order(session
->session_priority
, session
->control_unit
);
1431 session
->dirty
= TRUE
;
1433 // Mark all policies as needing updates
1434 LIST_FOREACH_SAFE(policy
, &session
->policies
, chain
, temp_policy
) {
1435 policy
->pending_update
= TRUE
;
1439 necp_send_success_response(session
, NECP_PACKET_TYPE_SET_SESSION_PRIORITY
, message_id
);
1443 necp_send_error_response(session
, NECP_PACKET_TYPE_SET_SESSION_PRIORITY
, message_id
, response_error
);
1447 necp_handle_lock_session_to_proc(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
1449 #pragma unused(packet, offset)
1450 // proc_uuid already filled out
1451 session
->proc_locked
= TRUE
;
1452 necp_send_success_response(session
, NECP_PACKET_TYPE_LOCK_SESSION_TO_PROC
, message_id
);
1456 necp_handle_register_service(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
1459 struct necp_service_registration
*new_service
= NULL
;
1460 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
1461 uuid_t service_uuid
;
1462 uuid_clear(service_uuid
);
1464 if (session
== NULL
) {
1465 NECPLOG0(LOG_ERR
, "Failed to find session");
1466 response_error
= NECP_ERROR_INTERNAL
;
1470 // Enforce entitlements
1471 errno_t cred_result
= priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES
, 0);
1472 if (cred_result
!= 0) {
1473 NECPLOG0(LOG_ERR
, "Session does not hold necessary entitlement to register service");
1477 // Read service uuid
1478 error
= necp_packet_get_tlv(packet
, offset
, NECP_TLV_SERVICE_UUID
, sizeof(uuid_t
), service_uuid
, NULL
);
1480 NECPLOG(LOG_ERR
, "Failed to get service UUID: %d", error
);
1481 response_error
= NECP_ERROR_INVALID_TLV
;
1485 MALLOC(new_service
, struct necp_service_registration
*, sizeof(*new_service
), M_NECP
, M_WAITOK
);
1486 if (new_service
== NULL
) {
1487 NECPLOG0(LOG_ERR
, "Failed to allocate service registration");
1488 response_error
= NECP_ERROR_INTERNAL
;
1492 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
1493 memset(new_service
, 0, sizeof(*new_service
));
1494 new_service
->service_id
= necp_create_uuid_service_id_mapping(service_uuid
);
1495 LIST_INSERT_HEAD(&session
->services
, new_service
, session_chain
);
1496 LIST_INSERT_HEAD(&necp_registered_service_list
, new_service
, kernel_chain
);
1497 lck_rw_done(&necp_kernel_policy_lock
);
1499 necp_send_success_response(session
, NECP_PACKET_TYPE_REGISTER_SERVICE
, message_id
);
1502 necp_send_error_response(session
, NECP_PACKET_TYPE_REGISTER_SERVICE
, message_id
, response_error
);
1506 necp_handle_unregister_service(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
1509 struct necp_service_registration
*service
= NULL
;
1510 struct necp_service_registration
*temp_service
= NULL
;
1511 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
1512 struct necp_uuid_id_mapping
*mapping
= NULL
;
1513 uuid_t service_uuid
;
1514 uuid_clear(service_uuid
);
1516 if (session
== NULL
) {
1517 NECPLOG0(LOG_ERR
, "Failed to find session");
1518 response_error
= NECP_ERROR_INTERNAL
;
1522 // Read service uuid
1523 error
= necp_packet_get_tlv(packet
, offset
, NECP_TLV_SERVICE_UUID
, sizeof(uuid_t
), service_uuid
, NULL
);
1525 NECPLOG(LOG_ERR
, "Failed to get service UUID: %d", error
);
1526 response_error
= NECP_ERROR_INVALID_TLV
;
1530 // Mark remove all matching services for this session
1531 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
1532 mapping
= necp_uuid_lookup_service_id_locked(service_uuid
);
1533 if (mapping
!= NULL
) {
1534 LIST_FOREACH_SAFE(service
, &session
->services
, session_chain
, temp_service
) {
1535 if (service
->service_id
== mapping
->id
) {
1536 LIST_REMOVE(service
, session_chain
);
1537 LIST_REMOVE(service
, kernel_chain
);
1538 FREE(service
, M_NECP
);
1541 necp_remove_uuid_service_id_mapping(service_uuid
);
1543 lck_rw_done(&necp_kernel_policy_lock
);
1545 necp_send_success_response(session
, NECP_PACKET_TYPE_UNREGISTER_SERVICE
, message_id
);
1548 necp_send_error_response(session
, NECP_PACKET_TYPE_UNREGISTER_SERVICE
, message_id
, response_error
);
1552 necp_handle_policy_add(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
1554 bool has_default_condition
= FALSE
;
1555 bool has_non_default_condition
= FALSE
;
1556 bool has_application_condition
= FALSE
;
1557 bool requires_application_condition
= FALSE
;
1558 u_int8_t
*conditions_array
= NULL
;
1559 u_int32_t conditions_array_size
= 0;
1560 int conditions_array_cursor
;
1562 bool has_default_route_rule
= FALSE
;
1563 u_int8_t
*route_rules_array
= NULL
;
1564 u_int32_t route_rules_array_size
= 0;
1565 int route_rules_array_cursor
;
1569 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
1571 necp_policy_order order
= 0;
1572 struct necp_session_policy
*policy
= NULL
;
1573 u_int8_t
*policy_result
= NULL
;
1574 u_int32_t policy_result_size
= 0;
1576 // Read policy order
1577 error
= necp_packet_get_tlv(packet
, offset
, NECP_TLV_POLICY_ORDER
, sizeof(order
), &order
, NULL
);
1579 NECPLOG(LOG_ERR
, "Failed to get policy order: %d", error
);
1580 response_error
= NECP_ERROR_INVALID_TLV
;
1584 // Read policy result
1585 cursor
= necp_packet_find_tlv(packet
, offset
, NECP_TLV_POLICY_RESULT
, &error
, 0);
1586 error
= necp_packet_get_tlv_at_offset(packet
, cursor
, 0, NULL
, &policy_result_size
);
1587 if (error
|| policy_result_size
== 0) {
1588 NECPLOG(LOG_ERR
, "Failed to get policy result length: %d", error
);
1589 response_error
= NECP_ERROR_INVALID_TLV
;
1592 MALLOC(policy_result
, u_int8_t
*, policy_result_size
, M_NECP
, M_WAITOK
);
1593 if (policy_result
== NULL
) {
1594 NECPLOG(LOG_ERR
, "Failed to allocate a policy result buffer (size %d)", policy_result_size
);
1595 response_error
= NECP_ERROR_INTERNAL
;
1598 error
= necp_packet_get_tlv_at_offset(packet
, cursor
, policy_result_size
, policy_result
, NULL
);
1600 NECPLOG(LOG_ERR
, "Failed to get policy result: %d", error
);
1601 response_error
= NECP_ERROR_POLICY_RESULT_INVALID
;
1604 if (!necp_policy_result_is_valid(policy_result
, policy_result_size
)) {
1605 NECPLOG0(LOG_ERR
, "Failed to validate policy result");
1606 response_error
= NECP_ERROR_POLICY_RESULT_INVALID
;
1610 if (necp_policy_result_requires_route_rules(policy_result
, policy_result_size
)) {
1611 // Read route rules conditions
1612 for (cursor
= necp_packet_find_tlv(packet
, offset
, NECP_TLV_ROUTE_RULE
, &error
, 0);
1614 cursor
= necp_packet_find_tlv(packet
, cursor
, NECP_TLV_ROUTE_RULE
, &error
, 1)) {
1615 u_int32_t route_rule_size
= 0;
1616 necp_packet_get_tlv_at_offset(packet
, cursor
, 0, NULL
, &route_rule_size
);
1617 if (route_rule_size
> 0) {
1618 route_rules_array_size
+= (sizeof(u_int8_t
) + sizeof(u_int32_t
) + route_rule_size
);
1622 if (route_rules_array_size
== 0) {
1623 NECPLOG0(LOG_ERR
, "Failed to get policy route rules");
1624 response_error
= NECP_ERROR_INVALID_TLV
;
1628 MALLOC(route_rules_array
, u_int8_t
*, route_rules_array_size
, M_NECP
, M_WAITOK
);
1629 if (route_rules_array
== NULL
) {
1630 NECPLOG(LOG_ERR
, "Failed to allocate a policy route rules array (size %d)", route_rules_array_size
);
1631 response_error
= NECP_ERROR_INTERNAL
;
1635 route_rules_array_cursor
= 0;
1636 for (cursor
= necp_packet_find_tlv(packet
, offset
, NECP_TLV_ROUTE_RULE
, &error
, 0);
1638 cursor
= necp_packet_find_tlv(packet
, cursor
, NECP_TLV_ROUTE_RULE
, &error
, 1)) {
1639 u_int8_t route_rule_type
= NECP_TLV_ROUTE_RULE
;
1640 u_int32_t route_rule_size
= 0;
1641 necp_packet_get_tlv_at_offset(packet
, cursor
, 0, NULL
, &route_rule_size
);
1642 if (route_rule_size
> 0 && route_rule_size
<= (route_rules_array_size
- route_rules_array_cursor
)) {
1644 memcpy((route_rules_array
+ route_rules_array_cursor
), &route_rule_type
, sizeof(route_rule_type
));
1645 route_rules_array_cursor
+= sizeof(route_rule_type
);
1648 memcpy((route_rules_array
+ route_rules_array_cursor
), &route_rule_size
, sizeof(route_rule_size
));
1649 route_rules_array_cursor
+= sizeof(route_rule_size
);
1652 necp_packet_get_tlv_at_offset(packet
, cursor
, route_rule_size
, (route_rules_array
+ route_rules_array_cursor
), NULL
);
1654 if (!necp_policy_route_rule_is_valid((route_rules_array
+ route_rules_array_cursor
), route_rule_size
)) {
1655 NECPLOG0(LOG_ERR
, "Failed to validate policy route rule");
1656 response_error
= NECP_ERROR_ROUTE_RULES_INVALID
;
1660 if (necp_policy_route_rule_is_default((route_rules_array
+ route_rules_array_cursor
), route_rule_size
)) {
1661 if (has_default_route_rule
) {
1662 NECPLOG0(LOG_ERR
, "Failed to validate route rule; contained multiple default route rules");
1663 response_error
= NECP_ERROR_ROUTE_RULES_INVALID
;
1666 has_default_route_rule
= TRUE
;
1669 route_rules_array_cursor
+= route_rule_size
;
1674 // Read policy conditions
1675 for (cursor
= necp_packet_find_tlv(packet
, offset
, NECP_TLV_POLICY_CONDITION
, &error
, 0);
1677 cursor
= necp_packet_find_tlv(packet
, cursor
, NECP_TLV_POLICY_CONDITION
, &error
, 1)) {
1678 u_int32_t condition_size
= 0;
1679 necp_packet_get_tlv_at_offset(packet
, cursor
, 0, NULL
, &condition_size
);
1681 if (condition_size
> 0) {
1682 conditions_array_size
+= (sizeof(u_int8_t
) + sizeof(u_int32_t
) + condition_size
);
1686 if (conditions_array_size
== 0) {
1687 NECPLOG0(LOG_ERR
, "Failed to get policy conditions");
1688 response_error
= NECP_ERROR_INVALID_TLV
;
1691 MALLOC(conditions_array
, u_int8_t
*, conditions_array_size
, M_NECP
, M_WAITOK
);
1692 if (conditions_array
== NULL
) {
1693 NECPLOG(LOG_ERR
, "Failed to allocate a policy conditions array (size %d)", conditions_array_size
);
1694 response_error
= NECP_ERROR_INTERNAL
;
1698 conditions_array_cursor
= 0;
1699 for (cursor
= necp_packet_find_tlv(packet
, offset
, NECP_TLV_POLICY_CONDITION
, &error
, 0);
1701 cursor
= necp_packet_find_tlv(packet
, cursor
, NECP_TLV_POLICY_CONDITION
, &error
, 1)) {
1702 u_int8_t condition_type
= NECP_TLV_POLICY_CONDITION
;
1703 u_int32_t condition_size
= 0;
1704 necp_packet_get_tlv_at_offset(packet
, cursor
, 0, NULL
, &condition_size
);
1705 if (condition_size
> 0 && condition_size
<= (conditions_array_size
- conditions_array_cursor
)) {
1707 memcpy((conditions_array
+ conditions_array_cursor
), &condition_type
, sizeof(condition_type
));
1708 conditions_array_cursor
+= sizeof(condition_type
);
1711 memcpy((conditions_array
+ conditions_array_cursor
), &condition_size
, sizeof(condition_size
));
1712 conditions_array_cursor
+= sizeof(condition_size
);
1715 necp_packet_get_tlv_at_offset(packet
, cursor
, condition_size
, (conditions_array
+ conditions_array_cursor
), NULL
);
1716 if (!necp_policy_condition_is_valid((conditions_array
+ conditions_array_cursor
), condition_size
, necp_policy_result_get_type_from_buffer(policy_result
, policy_result_size
))) {
1717 NECPLOG0(LOG_ERR
, "Failed to validate policy condition");
1718 response_error
= NECP_ERROR_POLICY_CONDITIONS_INVALID
;
1722 if (necp_policy_condition_is_default((conditions_array
+ conditions_array_cursor
), condition_size
)) {
1723 has_default_condition
= TRUE
;
1725 has_non_default_condition
= TRUE
;
1727 if (has_default_condition
&& has_non_default_condition
) {
1728 NECPLOG0(LOG_ERR
, "Failed to validate conditions; contained default and non-default conditions");
1729 response_error
= NECP_ERROR_POLICY_CONDITIONS_INVALID
;
1733 if (necp_policy_condition_is_application((conditions_array
+ conditions_array_cursor
), condition_size
)) {
1734 has_application_condition
= TRUE
;
1737 if (necp_policy_condition_requires_application((conditions_array
+ conditions_array_cursor
), condition_size
)) {
1738 requires_application_condition
= TRUE
;
1741 conditions_array_cursor
+= condition_size
;
1745 if (requires_application_condition
&& !has_application_condition
) {
1746 NECPLOG0(LOG_ERR
, "Failed to validate conditions; did not contain application condition");
1747 response_error
= NECP_ERROR_POLICY_CONDITIONS_INVALID
;
1751 if ((policy
= necp_policy_create(session
, order
, conditions_array
, conditions_array_size
, route_rules_array
, route_rules_array_size
, policy_result
, policy_result_size
)) == NULL
) {
1752 response_error
= NECP_ERROR_INTERNAL
;
1756 necp_send_policy_id_response(session
, NECP_PACKET_TYPE_POLICY_ADD
, message_id
, policy
->id
);
1760 if (policy_result
!= NULL
) {
1761 FREE(policy_result
, M_NECP
);
1763 if (conditions_array
!= NULL
) {
1764 FREE(conditions_array
, M_NECP
);
1766 if (route_rules_array
!= NULL
) {
1767 FREE(route_rules_array
, M_NECP
);
1770 necp_send_error_response(session
, NECP_PACKET_TYPE_POLICY_ADD
, message_id
, response_error
);
1774 necp_handle_policy_get(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
1776 #pragma unused(offset)
1778 u_int8_t
*response
= NULL
;
1779 u_int8_t
*cursor
= NULL
;
1780 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
1781 necp_policy_id policy_id
= 0;
1782 u_int32_t order_tlv_size
= 0;
1783 u_int32_t result_tlv_size
= 0;
1784 u_int32_t response_size
= 0;
1786 struct necp_session_policy
*policy
= NULL
;
1789 error
= necp_packet_get_tlv(packet
, offset
, NECP_TLV_POLICY_ID
, sizeof(policy_id
), &policy_id
, NULL
);
1791 NECPLOG(LOG_ERR
, "Failed to get policy id: %d", error
);
1792 response_error
= NECP_ERROR_INVALID_TLV
;
1796 policy
= necp_policy_find(session
, policy_id
);
1797 if (policy
== NULL
|| policy
->pending_deletion
) {
1798 NECPLOG(LOG_ERR
, "Failed to find policy with id %d", policy_id
);
1799 response_error
= NECP_ERROR_POLICY_ID_NOT_FOUND
;
1803 order_tlv_size
= sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(necp_policy_order
);
1804 result_tlv_size
= (policy
->result_size
? (sizeof(u_int8_t
) + sizeof(u_int32_t
) + policy
->result_size
) : 0);
1805 response_size
= sizeof(struct necp_packet_header
) + order_tlv_size
+ result_tlv_size
+ policy
->conditions_size
;
1806 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
);
1807 if (response
== NULL
) {
1808 necp_send_error_response(session
, NECP_PACKET_TYPE_POLICY_LIST_ALL
, message_id
, NECP_ERROR_INTERNAL
);
1813 cursor
= necp_buffer_write_packet_header(cursor
, NECP_PACKET_TYPE_POLICY_GET
, NECP_PACKET_FLAGS_RESPONSE
, message_id
);
1814 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ORDER
, sizeof(necp_policy_order
), &policy
->order
);
1816 if (result_tlv_size
) {
1817 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_RESULT
, policy
->result_size
, &policy
->result
);
1819 if (policy
->conditions_size
) {
1820 memcpy(((u_int8_t
*)(void *)(cursor
)), policy
->conditions
, policy
->conditions_size
);
1823 if (!necp_send_ctl_data(session
, (u_int8_t
*)response
, response_size
)) {
1824 NECPLOG0(LOG_ERR
, "Failed to send response");
1827 FREE(response
, M_NECP
);
1831 necp_send_error_response(session
, NECP_PACKET_TYPE_POLICY_GET
, message_id
, response_error
);
1835 necp_handle_policy_delete(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
1838 u_int32_t response_error
= NECP_ERROR_INTERNAL
;
1839 necp_policy_id policy_id
= 0;
1841 struct necp_session_policy
*policy
= NULL
;
1844 error
= necp_packet_get_tlv(packet
, offset
, NECP_TLV_POLICY_ID
, sizeof(policy_id
), &policy_id
, NULL
);
1846 NECPLOG(LOG_ERR
, "Failed to get policy id: %d", error
);
1847 response_error
= NECP_ERROR_INVALID_TLV
;
1851 policy
= necp_policy_find(session
, policy_id
);
1852 if (policy
== NULL
|| policy
->pending_deletion
) {
1853 NECPLOG(LOG_ERR
, "Failed to find policy with id %d", policy_id
);
1854 response_error
= NECP_ERROR_POLICY_ID_NOT_FOUND
;
1858 necp_policy_mark_for_deletion(session
, policy
);
1860 necp_send_success_response(session
, NECP_PACKET_TYPE_POLICY_DELETE
, message_id
);
1864 necp_send_error_response(session
, NECP_PACKET_TYPE_POLICY_DELETE
, message_id
, response_error
);
1868 necp_handle_policy_apply_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
1870 #pragma unused(packet, offset)
1871 necp_policy_apply_all(session
);
1872 necp_send_success_response(session
, NECP_PACKET_TYPE_POLICY_APPLY_ALL
, message_id
);
1876 necp_handle_policy_list_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
1878 #pragma unused(packet, offset)
1879 u_int32_t tlv_size
= (sizeof(u_int8_t
) + sizeof(u_int32_t
) + sizeof(u_int32_t
));
1880 u_int32_t response_size
= 0;
1881 u_int8_t
*response
= NULL
;
1882 u_int8_t
*cursor
= NULL
;
1883 int num_policies
= 0;
1884 int cur_policy_index
= 0;
1885 struct necp_session_policy
*policy
;
1887 LIST_FOREACH(policy
, &session
->policies
, chain
) {
1888 if (!policy
->pending_deletion
) {
1893 // Create a response with one Policy ID TLV for each policy
1894 response_size
= sizeof(struct necp_packet_header
) + num_policies
* tlv_size
;
1895 MALLOC(response
, u_int8_t
*, response_size
, M_NECP
, M_WAITOK
);
1896 if (response
== NULL
) {
1897 necp_send_error_response(session
, NECP_PACKET_TYPE_POLICY_LIST_ALL
, message_id
, NECP_ERROR_INTERNAL
);
1902 cursor
= necp_buffer_write_packet_header(cursor
, NECP_PACKET_TYPE_POLICY_LIST_ALL
, NECP_PACKET_FLAGS_RESPONSE
, message_id
);
1904 LIST_FOREACH(policy
, &session
->policies
, chain
) {
1905 if (!policy
->pending_deletion
&& cur_policy_index
< num_policies
) {
1906 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_POLICY_ID
, sizeof(u_int32_t
), &policy
->id
);
1911 if (!necp_send_ctl_data(session
, (u_int8_t
*)response
, response_size
)) {
1912 NECPLOG0(LOG_ERR
, "Failed to send response");
1915 FREE(response
, M_NECP
);
1919 necp_handle_policy_delete_all(struct necp_session
*session
, u_int32_t message_id
, mbuf_t packet
, int offset
)
1921 #pragma unused(packet, offset)
1922 necp_policy_mark_all_for_deletion(session
);
1923 necp_send_success_response(session
, NECP_PACKET_TYPE_POLICY_DELETE_ALL
, message_id
);
1926 static necp_policy_id
1927 necp_policy_get_new_id(void)
1929 necp_policy_id newid
= 0;
1931 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
1933 necp_last_policy_id
++;
1934 if (necp_last_policy_id
< 1) {
1935 necp_last_policy_id
= 1;
1938 newid
= necp_last_policy_id
;
1939 lck_rw_done(&necp_kernel_policy_lock
);
1942 NECPLOG0(LOG_DEBUG
, "Allocate policy id failed.\n");
1949 static struct necp_session_policy
*
1950 necp_policy_create(struct necp_session
*session
, necp_policy_order order
, u_int8_t
*conditions_array
, u_int32_t conditions_array_size
, u_int8_t
*route_rules_array
, u_int32_t route_rules_array_size
, u_int8_t
*result
, u_int32_t result_size
)
1952 struct necp_session_policy
*new_policy
= NULL
;
1953 struct necp_session_policy
*tmp_policy
= NULL
;
1955 if (session
== NULL
|| conditions_array
== NULL
|| result
== NULL
|| result_size
== 0) {
1959 MALLOC_ZONE(new_policy
, struct necp_session_policy
*, sizeof(*new_policy
), M_NECP_SESSION_POLICY
, M_WAITOK
);
1960 if (new_policy
== NULL
) {
1964 memset(new_policy
, 0, sizeof(*new_policy
));
1965 new_policy
->applied
= FALSE
;
1966 new_policy
->pending_deletion
= FALSE
;
1967 new_policy
->pending_update
= FALSE
;
1968 new_policy
->order
= order
;
1969 new_policy
->conditions
= conditions_array
;
1970 new_policy
->conditions_size
= conditions_array_size
;
1971 new_policy
->route_rules
= route_rules_array
;
1972 new_policy
->route_rules_size
= route_rules_array_size
;
1973 new_policy
->result
= result
;
1974 new_policy
->result_size
= result_size
;
1975 new_policy
->id
= necp_policy_get_new_id();
1977 LIST_INSERT_SORTED_ASCENDING(&session
->policies
, new_policy
, chain
, order
, tmp_policy
);
1979 session
->dirty
= TRUE
;
1982 NECPLOG(LOG_DEBUG
, "Created NECP policy, order %d", order
);
1985 return (new_policy
);
1988 static struct necp_session_policy
*
1989 necp_policy_find(struct necp_session
*session
, necp_policy_id policy_id
)
1991 struct necp_session_policy
*policy
= NULL
;
1992 if (policy_id
== 0) {
1996 LIST_FOREACH(policy
, &session
->policies
, chain
) {
1997 if (policy
->id
== policy_id
) {
2005 static inline u_int8_t
2006 necp_policy_get_result_type(struct necp_session_policy
*policy
)
2008 return (policy
? necp_policy_result_get_type_from_buffer(policy
->result
, policy
->result_size
) : 0);
2011 static inline u_int32_t
2012 necp_policy_get_result_parameter_length(struct necp_session_policy
*policy
)
2014 return (policy
? necp_policy_result_get_parameter_length_from_buffer(policy
->result
, policy
->result_size
) : 0);
2018 necp_policy_get_result_parameter(struct necp_session_policy
*policy
, u_int8_t
*parameter_buffer
, u_int32_t parameter_buffer_length
)
2021 u_int32_t parameter_length
= necp_policy_result_get_parameter_length_from_buffer(policy
->result
, policy
->result_size
);
2022 if (parameter_buffer_length
>= parameter_length
) {
2023 u_int8_t
*parameter
= necp_policy_result_get_parameter_pointer_from_buffer(policy
->result
, policy
->result_size
);
2024 if (parameter
&& parameter_buffer
) {
2025 memcpy(parameter_buffer
, parameter
, parameter_length
);
2035 necp_policy_mark_for_deletion(struct necp_session
*session
, struct necp_session_policy
*policy
)
2037 if (session
== NULL
|| policy
== NULL
) {
2041 policy
->pending_deletion
= TRUE
;
2042 session
->dirty
= TRUE
;
2045 NECPLOG0(LOG_DEBUG
, "Marked NECP policy for removal");
2051 necp_policy_mark_all_for_deletion(struct necp_session
*session
)
2053 struct necp_session_policy
*policy
= NULL
;
2054 struct necp_session_policy
*temp_policy
= NULL
;
2056 LIST_FOREACH_SAFE(policy
, &session
->policies
, chain
, temp_policy
) {
2057 necp_policy_mark_for_deletion(session
, policy
);
2064 necp_policy_delete(struct necp_session
*session
, struct necp_session_policy
*policy
)
2066 if (session
== NULL
|| policy
== NULL
) {
2070 LIST_REMOVE(policy
, chain
);
2072 if (policy
->result
) {
2073 FREE(policy
->result
, M_NECP
);
2074 policy
->result
= NULL
;
2077 if (policy
->conditions
) {
2078 FREE(policy
->conditions
, M_NECP
);
2079 policy
->conditions
= NULL
;
2082 FREE_ZONE(policy
, sizeof(*policy
), M_NECP_SESSION_POLICY
);
2085 NECPLOG0(LOG_DEBUG
, "Removed NECP policy");
2091 necp_policy_unapply(struct necp_session_policy
*policy
)
2094 if (policy
== NULL
) {
2098 lck_rw_assert(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
2100 // Release local uuid mappings
2101 if (!uuid_is_null(policy
->applied_app_uuid
)) {
2102 bool removed_mapping
= FALSE
;
2103 if (necp_remove_uuid_app_id_mapping(policy
->applied_app_uuid
, &removed_mapping
, TRUE
) && removed_mapping
) {
2104 necp_uuid_app_id_mappings_dirty
= TRUE
;
2105 necp_num_uuid_app_id_mappings
--;
2107 uuid_clear(policy
->applied_app_uuid
);
2109 if (!uuid_is_null(policy
->applied_real_app_uuid
)) {
2110 necp_remove_uuid_app_id_mapping(policy
->applied_real_app_uuid
, NULL
, FALSE
);
2111 uuid_clear(policy
->applied_real_app_uuid
);
2113 if (!uuid_is_null(policy
->applied_result_uuid
)) {
2114 necp_remove_uuid_service_id_mapping(policy
->applied_result_uuid
);
2115 uuid_clear(policy
->applied_result_uuid
);
2118 // Release string mappings
2119 if (policy
->applied_account
!= NULL
) {
2120 necp_remove_string_to_id_mapping(&necp_account_id_list
, policy
->applied_account
);
2121 FREE(policy
->applied_account
, M_NECP
);
2122 policy
->applied_account
= NULL
;
2125 // Release route rule
2126 if (policy
->applied_route_rules_id
!= 0) {
2127 necp_remove_route_rule(&necp_route_rules
, policy
->applied_route_rules_id
);
2128 policy
->applied_route_rules_id
= 0;
2131 // Remove socket policies
2132 for (i
= 0; i
< MAX_KERNEL_SOCKET_POLICIES
; i
++) {
2133 if (policy
->kernel_socket_policies
[i
] != 0) {
2134 necp_kernel_socket_policy_delete(policy
->kernel_socket_policies
[i
]);
2135 policy
->kernel_socket_policies
[i
] = 0;
2139 // Remove IP output policies
2140 for (i
= 0; i
< MAX_KERNEL_IP_OUTPUT_POLICIES
; i
++) {
2141 if (policy
->kernel_ip_output_policies
[i
] != 0) {
2142 necp_kernel_ip_output_policy_delete(policy
->kernel_ip_output_policies
[i
]);
2143 policy
->kernel_ip_output_policies
[i
] = 0;
2147 policy
->applied
= FALSE
;
2152 #define NECP_KERNEL_POLICY_SUBORDER_ID_TUNNEL_CONDITION 0
2153 #define NECP_KERNEL_POLICY_SUBORDER_NON_ID_TUNNEL_CONDITION 1
2154 #define NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION 2
2155 #define NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS 3
2156 struct necp_policy_result_ip_tunnel
{
2157 u_int32_t secondary_result
;
2158 char interface_name
[IFXNAMSIZ
];
2159 } __attribute__((__packed__
));
2161 struct necp_policy_result_service
{
2164 } __attribute__((__packed__
));
2167 necp_policy_apply(struct necp_session
*session
, struct necp_session_policy
*policy
)
2169 bool socket_only_conditions
= FALSE
;
2170 bool socket_ip_conditions
= FALSE
;
2172 bool socket_layer_non_id_conditions
= FALSE
;
2173 bool ip_output_layer_non_id_conditions
= FALSE
;
2174 bool ip_output_layer_id_condition
= FALSE
;
2175 bool ip_output_layer_tunnel_condition_from_id
= FALSE
;
2176 bool ip_output_layer_tunnel_condition_from_non_id
= FALSE
;
2177 necp_kernel_policy_id cond_ip_output_layer_id
= NECP_KERNEL_POLICY_ID_NONE
;
2179 u_int32_t master_condition_mask
= 0;
2180 u_int32_t master_condition_negated_mask
= 0;
2181 ifnet_t cond_bound_interface
= NULL
;
2182 u_int32_t cond_account_id
= 0;
2183 char *cond_domain
= NULL
;
2186 necp_app_id cond_app_id
= 0;
2187 necp_app_id cond_real_app_id
= 0;
2188 struct necp_policy_condition_tc_range cond_traffic_class
;
2189 cond_traffic_class
.start_tc
= 0;
2190 cond_traffic_class
.end_tc
= 0;
2191 u_int16_t cond_protocol
= 0;
2192 union necp_sockaddr_union cond_local_start
;
2193 union necp_sockaddr_union cond_local_end
;
2194 u_int8_t cond_local_prefix
= 0;
2195 union necp_sockaddr_union cond_remote_start
;
2196 union necp_sockaddr_union cond_remote_end
;
2197 u_int8_t cond_remote_prefix
= 0;
2198 u_int32_t offset
= 0;
2199 u_int8_t ultimate_result
= 0;
2200 u_int32_t secondary_result
= 0;
2201 necp_kernel_policy_result_parameter secondary_result_parameter
;
2202 memset(&secondary_result_parameter
, 0, sizeof(secondary_result_parameter
));
2203 u_int32_t cond_last_interface_index
= 0;
2204 necp_kernel_policy_result_parameter ultimate_result_parameter
;
2205 memset(&ultimate_result_parameter
, 0, sizeof(ultimate_result_parameter
));
2207 if (policy
== NULL
) {
2211 lck_rw_assert(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
2213 // Process conditions
2214 while (offset
< policy
->conditions_size
) {
2215 u_int32_t length
= 0;
2216 u_int8_t
*value
= necp_buffer_get_tlv_value(policy
->conditions
, offset
, &length
);
2218 u_int8_t condition_type
= necp_policy_condition_get_type_from_buffer(value
, length
);
2219 u_int8_t condition_flags
= necp_policy_condition_get_flags_from_buffer(value
, length
);
2220 bool condition_is_negative
= condition_flags
& NECP_POLICY_CONDITION_FLAGS_NEGATIVE
;
2221 u_int32_t condition_length
= necp_policy_condition_get_value_length_from_buffer(value
, length
);
2222 u_int8_t
*condition_value
= necp_policy_condition_get_value_pointer_from_buffer(value
, length
);
2223 switch (condition_type
) {
2224 case NECP_POLICY_CONDITION_DEFAULT
: {
2225 socket_ip_conditions
= TRUE
;
2228 case NECP_POLICY_CONDITION_ALL_INTERFACES
: {
2229 master_condition_mask
|= NECP_KERNEL_CONDITION_ALL_INTERFACES
;
2230 socket_ip_conditions
= TRUE
;
2233 case NECP_POLICY_CONDITION_ENTITLEMENT
: {
2234 master_condition_mask
|= NECP_KERNEL_CONDITION_ENTITLEMENT
;
2235 socket_only_conditions
= TRUE
;
2238 case NECP_POLICY_CONDITION_DOMAIN
: {
2239 // Make sure there is only one such rule
2240 if (condition_length
> 0 && cond_domain
== NULL
) {
2241 cond_domain
= necp_create_trimmed_domain((char *)condition_value
, condition_length
);
2242 if (cond_domain
!= NULL
) {
2243 master_condition_mask
|= NECP_KERNEL_CONDITION_DOMAIN
;
2244 if (condition_is_negative
) {
2245 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_DOMAIN
;
2247 socket_only_conditions
= TRUE
;
2252 case NECP_POLICY_CONDITION_ACCOUNT
: {
2253 // Make sure there is only one such rule
2254 if (condition_length
> 0 && cond_account_id
== 0 && policy
->applied_account
== NULL
) {
2255 char *string
= NULL
;
2256 MALLOC(string
, char *, condition_length
+ 1, M_NECP
, M_WAITOK
);
2257 if (string
!= NULL
) {
2258 memcpy(string
, condition_value
, condition_length
);
2259 string
[condition_length
] = 0;
2260 cond_account_id
= necp_create_string_to_id_mapping(&necp_account_id_list
, string
);
2261 if (cond_account_id
!= 0) {
2262 policy
->applied_account
= string
; // Save the string in parent policy
2263 master_condition_mask
|= NECP_KERNEL_CONDITION_ACCOUNT_ID
;
2264 if (condition_is_negative
) {
2265 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_ACCOUNT_ID
;
2267 socket_only_conditions
= TRUE
;
2269 FREE(string
, M_NECP
);
2275 case NECP_POLICY_CONDITION_APPLICATION
: {
2276 // Make sure there is only one such rule, because we save the uuid in the policy
2277 if (condition_length
>= sizeof(uuid_t
) && cond_app_id
== 0) {
2278 bool allocated_mapping
= FALSE
;
2279 uuid_t application_uuid
;
2280 memcpy(application_uuid
, condition_value
, sizeof(uuid_t
));
2281 cond_app_id
= necp_create_uuid_app_id_mapping(application_uuid
, &allocated_mapping
, TRUE
);
2282 if (cond_app_id
!= 0) {
2283 if (allocated_mapping
) {
2284 necp_uuid_app_id_mappings_dirty
= TRUE
;
2285 necp_num_uuid_app_id_mappings
++;
2287 uuid_copy(policy
->applied_app_uuid
, application_uuid
);
2288 master_condition_mask
|= NECP_KERNEL_CONDITION_APP_ID
;
2289 if (condition_is_negative
) {
2290 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_APP_ID
;
2292 socket_only_conditions
= TRUE
;
2297 case NECP_POLICY_CONDITION_REAL_APPLICATION
: {
2298 // Make sure there is only one such rule, because we save the uuid in the policy
2299 if (condition_length
>= sizeof(uuid_t
) && cond_real_app_id
== 0) {
2300 uuid_t real_application_uuid
;
2301 memcpy(real_application_uuid
, condition_value
, sizeof(uuid_t
));
2302 cond_real_app_id
= necp_create_uuid_app_id_mapping(real_application_uuid
, NULL
, FALSE
);
2303 if (cond_real_app_id
!= 0) {
2304 uuid_copy(policy
->applied_real_app_uuid
, real_application_uuid
);
2305 master_condition_mask
|= NECP_KERNEL_CONDITION_REAL_APP_ID
;
2306 if (condition_is_negative
) {
2307 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REAL_APP_ID
;
2309 socket_only_conditions
= TRUE
;
2314 case NECP_POLICY_CONDITION_PID
: {
2315 if (condition_length
>= sizeof(pid_t
)) {
2316 master_condition_mask
|= NECP_KERNEL_CONDITION_PID
;
2317 if (condition_is_negative
) {
2318 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_PID
;
2320 memcpy(&cond_pid
, condition_value
, sizeof(cond_pid
));
2321 socket_only_conditions
= TRUE
;
2325 case NECP_POLICY_CONDITION_UID
: {
2326 if (condition_length
>= sizeof(uid_t
)) {
2327 master_condition_mask
|= NECP_KERNEL_CONDITION_UID
;
2328 if (condition_is_negative
) {
2329 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_UID
;
2331 memcpy(&cond_uid
, condition_value
, sizeof(cond_uid
));
2332 socket_only_conditions
= TRUE
;
2336 case NECP_POLICY_CONDITION_TRAFFIC_CLASS
: {
2337 if (condition_length
>= sizeof(struct necp_policy_condition_tc_range
)) {
2338 master_condition_mask
|= NECP_KERNEL_CONDITION_TRAFFIC_CLASS
;
2339 if (condition_is_negative
) {
2340 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_TRAFFIC_CLASS
;
2342 memcpy(&cond_traffic_class
, condition_value
, sizeof(cond_traffic_class
));
2343 socket_only_conditions
= TRUE
;
2347 case NECP_POLICY_CONDITION_BOUND_INTERFACE
: {
2348 if (condition_length
<= IFXNAMSIZ
&& condition_length
> 0) {
2349 char interface_name
[IFXNAMSIZ
];
2350 memcpy(interface_name
, condition_value
, condition_length
);
2351 interface_name
[condition_length
- 1] = 0; // Make sure the string is NULL terminated
2352 if (ifnet_find_by_name(interface_name
, &cond_bound_interface
) == 0) {
2353 master_condition_mask
|= NECP_KERNEL_CONDITION_BOUND_INTERFACE
;
2354 if (condition_is_negative
) {
2355 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_BOUND_INTERFACE
;
2358 socket_ip_conditions
= TRUE
;
2362 case NECP_POLICY_CONDITION_IP_PROTOCOL
: {
2363 if (condition_length
>= sizeof(u_int16_t
)) {
2364 master_condition_mask
|= NECP_KERNEL_CONDITION_PROTOCOL
;
2365 if (condition_is_negative
) {
2366 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_PROTOCOL
;
2368 memcpy(&cond_protocol
, condition_value
, sizeof(cond_protocol
));
2369 socket_ip_conditions
= TRUE
;
2373 case NECP_POLICY_CONDITION_LOCAL_ADDR
: {
2374 struct necp_policy_condition_addr
*address_struct
= (struct necp_policy_condition_addr
*)(void *)condition_value
;
2375 cond_local_prefix
= address_struct
->prefix
;
2376 memcpy(&cond_local_start
, &address_struct
->address
, sizeof(address_struct
->address
));
2377 master_condition_mask
|= NECP_KERNEL_CONDITION_LOCAL_START
;
2378 master_condition_mask
|= NECP_KERNEL_CONDITION_LOCAL_PREFIX
;
2379 if (condition_is_negative
) {
2380 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_LOCAL_START
;
2381 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_LOCAL_PREFIX
;
2383 socket_ip_conditions
= TRUE
;
2386 case NECP_POLICY_CONDITION_REMOTE_ADDR
: {
2387 struct necp_policy_condition_addr
*address_struct
= (struct necp_policy_condition_addr
*)(void *)condition_value
;
2388 cond_remote_prefix
= address_struct
->prefix
;
2389 memcpy(&cond_remote_start
, &address_struct
->address
, sizeof(address_struct
->address
));
2390 master_condition_mask
|= NECP_KERNEL_CONDITION_REMOTE_START
;
2391 master_condition_mask
|= NECP_KERNEL_CONDITION_REMOTE_PREFIX
;
2392 if (condition_is_negative
) {
2393 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REMOTE_START
;
2394 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REMOTE_PREFIX
;
2396 socket_ip_conditions
= TRUE
;
2399 case NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE
: {
2400 struct necp_policy_condition_addr_range
*address_struct
= (struct necp_policy_condition_addr_range
*)(void *)condition_value
;
2401 memcpy(&cond_local_start
, &address_struct
->start_address
, sizeof(address_struct
->start_address
));
2402 memcpy(&cond_local_end
, &address_struct
->end_address
, sizeof(address_struct
->end_address
));
2403 master_condition_mask
|= NECP_KERNEL_CONDITION_LOCAL_START
;
2404 master_condition_mask
|= NECP_KERNEL_CONDITION_LOCAL_END
;
2405 if (condition_is_negative
) {
2406 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_LOCAL_START
;
2407 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_LOCAL_END
;
2409 socket_ip_conditions
= TRUE
;
2412 case NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE
: {
2413 struct necp_policy_condition_addr_range
*address_struct
= (struct necp_policy_condition_addr_range
*)(void *)condition_value
;
2414 memcpy(&cond_remote_start
, &address_struct
->start_address
, sizeof(address_struct
->start_address
));
2415 memcpy(&cond_remote_end
, &address_struct
->end_address
, sizeof(address_struct
->end_address
));
2416 master_condition_mask
|= NECP_KERNEL_CONDITION_REMOTE_START
;
2417 master_condition_mask
|= NECP_KERNEL_CONDITION_REMOTE_END
;
2418 if (condition_is_negative
) {
2419 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REMOTE_START
;
2420 master_condition_negated_mask
|= NECP_KERNEL_CONDITION_REMOTE_END
;
2422 socket_ip_conditions
= TRUE
;
2430 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
2434 ultimate_result
= necp_policy_get_result_type(policy
);
2435 switch (ultimate_result
) {
2436 case NECP_POLICY_RESULT_PASS
: {
2437 if (socket_only_conditions
) { // socket_ip_conditions can be TRUE or FALSE
2438 socket_layer_non_id_conditions
= TRUE
;
2439 ip_output_layer_id_condition
= TRUE
;
2440 } else if (socket_ip_conditions
) {
2441 socket_layer_non_id_conditions
= TRUE
;
2442 ip_output_layer_id_condition
= TRUE
;
2443 ip_output_layer_non_id_conditions
= TRUE
;
2447 case NECP_POLICY_RESULT_DROP
: {
2448 if (socket_only_conditions
) { // socket_ip_conditions can be TRUE or FALSE
2449 socket_layer_non_id_conditions
= TRUE
;
2450 } else if (socket_ip_conditions
) {
2451 socket_layer_non_id_conditions
= TRUE
;
2452 ip_output_layer_non_id_conditions
= TRUE
;
2456 case NECP_POLICY_RESULT_SKIP
: {
2457 u_int32_t skip_policy_order
= 0;
2458 if (necp_policy_get_result_parameter(policy
, (u_int8_t
*)&skip_policy_order
, sizeof(skip_policy_order
))) {
2459 ultimate_result_parameter
.skip_policy_order
= skip_policy_order
;
2462 if (socket_only_conditions
) { // socket_ip_conditions can be TRUE or FALSE
2463 socket_layer_non_id_conditions
= TRUE
;
2464 ip_output_layer_id_condition
= TRUE
;
2465 } else if (socket_ip_conditions
) {
2466 socket_layer_non_id_conditions
= TRUE
;
2467 ip_output_layer_non_id_conditions
= TRUE
;
2471 case NECP_POLICY_RESULT_SOCKET_DIVERT
:
2472 case NECP_POLICY_RESULT_SOCKET_FILTER
: {
2473 u_int32_t control_unit
= 0;
2474 if (necp_policy_get_result_parameter(policy
, (u_int8_t
*)&control_unit
, sizeof(control_unit
))) {
2475 ultimate_result_parameter
.flow_divert_control_unit
= control_unit
;
2477 socket_layer_non_id_conditions
= TRUE
;
2480 case NECP_POLICY_RESULT_IP_TUNNEL
: {
2481 struct necp_policy_result_ip_tunnel tunnel_parameters
;
2482 u_int32_t tunnel_parameters_length
= necp_policy_get_result_parameter_length(policy
);
2483 if (tunnel_parameters_length
> sizeof(u_int32_t
) &&
2484 tunnel_parameters_length
<= sizeof(struct necp_policy_result_ip_tunnel
) &&
2485 necp_policy_get_result_parameter(policy
, (u_int8_t
*)&tunnel_parameters
, sizeof(tunnel_parameters
))) {
2486 ifnet_t tunnel_interface
= NULL
;
2487 tunnel_parameters
.interface_name
[tunnel_parameters_length
- sizeof(u_int32_t
) - 1] = 0; // Make sure the string is NULL terminated
2488 if (ifnet_find_by_name(tunnel_parameters
.interface_name
, &tunnel_interface
) == 0) {
2489 ultimate_result_parameter
.tunnel_interface_index
= tunnel_interface
->if_index
;
2492 secondary_result
= tunnel_parameters
.secondary_result
;
2493 if (secondary_result
) {
2494 cond_last_interface_index
= ultimate_result_parameter
.tunnel_interface_index
;
2498 if (socket_only_conditions
) { // socket_ip_conditions can be TRUE or FALSE
2499 socket_layer_non_id_conditions
= TRUE
;
2500 ip_output_layer_id_condition
= TRUE
;
2501 if (secondary_result
) {
2502 ip_output_layer_tunnel_condition_from_id
= TRUE
;
2504 } else if (socket_ip_conditions
) {
2505 socket_layer_non_id_conditions
= TRUE
;
2506 ip_output_layer_id_condition
= TRUE
;
2507 ip_output_layer_non_id_conditions
= TRUE
;
2508 if (secondary_result
) {
2509 ip_output_layer_tunnel_condition_from_id
= TRUE
;
2510 ip_output_layer_tunnel_condition_from_non_id
= TRUE
;
2515 case NECP_POLICY_RESULT_TRIGGER
:
2516 case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED
:
2517 case NECP_POLICY_RESULT_TRIGGER_SCOPED
:
2518 case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED
: {
2519 struct necp_policy_result_service service_parameters
;
2520 u_int32_t service_result_length
= necp_policy_get_result_parameter_length(policy
);
2521 bool has_extra_service_data
= FALSE
;
2522 if (service_result_length
>= (sizeof(service_parameters
))) {
2523 has_extra_service_data
= TRUE
;
2525 if (necp_policy_get_result_parameter(policy
, (u_int8_t
*)&service_parameters
, sizeof(service_parameters
))) {
2526 ultimate_result_parameter
.service
.identifier
= necp_create_uuid_service_id_mapping(service_parameters
.identifier
);
2527 if (ultimate_result_parameter
.service
.identifier
!= 0) {
2528 uuid_copy(policy
->applied_result_uuid
, service_parameters
.identifier
);
2529 socket_layer_non_id_conditions
= TRUE
;
2530 if (has_extra_service_data
) {
2531 ultimate_result_parameter
.service
.data
= service_parameters
.data
;
2533 ultimate_result_parameter
.service
.data
= 0;
2539 case NECP_POLICY_RESULT_USE_NETAGENT
: {
2540 uuid_t netagent_uuid
;
2541 if (necp_policy_get_result_parameter(policy
, (u_int8_t
*)&netagent_uuid
, sizeof(netagent_uuid
))) {
2542 ultimate_result_parameter
.netagent_id
= necp_create_uuid_service_id_mapping(netagent_uuid
);
2543 if (ultimate_result_parameter
.netagent_id
!= 0) {
2544 uuid_copy(policy
->applied_result_uuid
, netagent_uuid
);
2545 socket_layer_non_id_conditions
= TRUE
;
2550 case NECP_POLICY_RESULT_SOCKET_SCOPED
: {
2551 u_int32_t interface_name_length
= necp_policy_get_result_parameter_length(policy
);
2552 if (interface_name_length
<= IFXNAMSIZ
&& interface_name_length
> 0) {
2553 char interface_name
[IFXNAMSIZ
];
2554 ifnet_t scope_interface
= NULL
;
2555 necp_policy_get_result_parameter(policy
, (u_int8_t
*)interface_name
, interface_name_length
);
2556 interface_name
[interface_name_length
- 1] = 0; // Make sure the string is NULL terminated
2557 if (ifnet_find_by_name(interface_name
, &scope_interface
) == 0) {
2558 ultimate_result_parameter
.scoped_interface_index
= scope_interface
->if_index
;
2559 socket_layer_non_id_conditions
= TRUE
;
2563 case NECP_POLICY_RESULT_ROUTE_RULES
: {
2564 if (policy
->route_rules
!= NULL
&& policy
->route_rules_size
> 0) {
2565 u_int32_t route_rule_id
= necp_create_route_rule(&necp_route_rules
, policy
->route_rules
, policy
->route_rules_size
);
2566 if (route_rule_id
> 0) {
2567 policy
->applied_route_rules_id
= route_rule_id
;
2568 ultimate_result_parameter
.route_rule_id
= route_rule_id
;
2569 socket_layer_non_id_conditions
= TRUE
;
2578 if (socket_layer_non_id_conditions
) {
2579 necp_kernel_policy_id policy_id
= necp_kernel_socket_policy_add(policy
->id
, policy
->order
, session
->session_order
, session
->proc_pid
, master_condition_mask
, master_condition_negated_mask
, cond_app_id
, cond_real_app_id
, cond_account_id
, cond_domain
, cond_pid
, cond_uid
, cond_bound_interface
, cond_traffic_class
, cond_protocol
, &cond_local_start
, &cond_local_end
, cond_local_prefix
, &cond_remote_start
, &cond_remote_end
, cond_remote_prefix
, ultimate_result
, ultimate_result_parameter
);
2581 if (policy_id
== 0) {
2582 NECPLOG0(LOG_DEBUG
, "Error applying socket kernel policy");
2586 cond_ip_output_layer_id
= policy_id
;
2587 policy
->kernel_socket_policies
[0] = policy_id
;
2590 if (ip_output_layer_non_id_conditions
) {
2591 necp_kernel_policy_id policy_id
= necp_kernel_ip_output_policy_add(policy
->id
, policy
->order
, NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS
, session
->session_order
, session
->proc_pid
, master_condition_mask
, master_condition_negated_mask
, NECP_KERNEL_POLICY_ID_NONE
, cond_bound_interface
, 0, cond_protocol
, &cond_local_start
, &cond_local_end
, cond_local_prefix
, &cond_remote_start
, &cond_remote_end
, cond_remote_prefix
, ultimate_result
, ultimate_result_parameter
);
2593 if (policy_id
== 0) {
2594 NECPLOG0(LOG_DEBUG
, "Error applying IP output kernel policy");
2598 policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS
] = policy_id
;
2601 if (ip_output_layer_id_condition
) {
2602 necp_kernel_policy_id policy_id
= necp_kernel_ip_output_policy_add(policy
->id
, policy
->order
, NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION
, session
->session_order
, session
->proc_pid
, NECP_KERNEL_CONDITION_POLICY_ID
| NECP_KERNEL_CONDITION_ALL_INTERFACES
, 0, cond_ip_output_layer_id
, NULL
, 0, 0, NULL
, NULL
, 0, NULL
, NULL
, 0, ultimate_result
, ultimate_result_parameter
);
2604 if (policy_id
== 0) {
2605 NECPLOG0(LOG_DEBUG
, "Error applying IP output kernel policy");
2609 policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION
] = policy_id
;
2612 // Extra policies for IP Output tunnels for when packets loop back
2613 if (ip_output_layer_tunnel_condition_from_id
) {
2614 necp_kernel_policy_id policy_id
= necp_kernel_ip_output_policy_add(policy
->id
, policy
->order
, NECP_KERNEL_POLICY_SUBORDER_NON_ID_TUNNEL_CONDITION
, session
->session_order
, session
->proc_pid
, NECP_KERNEL_CONDITION_POLICY_ID
| NECP_KERNEL_CONDITION_LAST_INTERFACE
| NECP_KERNEL_CONDITION_ALL_INTERFACES
, 0, policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS
], NULL
, cond_last_interface_index
, 0, NULL
, NULL
, 0, NULL
, NULL
, 0, secondary_result
, secondary_result_parameter
);
2616 if (policy_id
== 0) {
2617 NECPLOG0(LOG_DEBUG
, "Error applying IP output kernel policy");
2621 policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_NON_ID_TUNNEL_CONDITION
] = policy_id
;
2624 if (ip_output_layer_tunnel_condition_from_id
) {
2625 necp_kernel_policy_id policy_id
= necp_kernel_ip_output_policy_add(policy
->id
, policy
->order
, NECP_KERNEL_POLICY_SUBORDER_ID_TUNNEL_CONDITION
, session
->session_order
, session
->proc_pid
, NECP_KERNEL_CONDITION_POLICY_ID
| NECP_KERNEL_CONDITION_LAST_INTERFACE
| NECP_KERNEL_CONDITION_ALL_INTERFACES
, 0, policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION
], NULL
, cond_last_interface_index
, 0, NULL
, NULL
, 0, NULL
, NULL
, 0, secondary_result
, secondary_result_parameter
);
2627 if (policy_id
== 0) {
2628 NECPLOG0(LOG_DEBUG
, "Error applying IP output kernel policy");
2632 policy
->kernel_ip_output_policies
[NECP_KERNEL_POLICY_SUBORDER_ID_TUNNEL_CONDITION
] = policy_id
;
2635 policy
->applied
= TRUE
;
2636 policy
->pending_update
= FALSE
;
2644 necp_policy_apply_all(struct necp_session
*session
)
2646 struct necp_session_policy
*policy
= NULL
;
2647 struct necp_session_policy
*temp_policy
= NULL
;
2648 struct kev_necp_policies_changed_data kev_data
;
2649 kev_data
.changed_count
= 0;
2651 lck_rw_lock_exclusive(&necp_kernel_policy_lock
);
2653 // Remove exisiting applied policies
2654 if (session
->dirty
) {
2655 LIST_FOREACH_SAFE(policy
, &session
->policies
, chain
, temp_policy
) {
2656 if (policy
->pending_deletion
) {
2657 if (policy
->applied
) {
2658 necp_policy_unapply(policy
);
2660 // Delete the policy
2661 necp_policy_delete(session
, policy
);
2662 } else if (!policy
->applied
) {
2663 necp_policy_apply(session
, policy
);
2664 } else if (policy
->pending_update
) {
2665 // Must have been applied, but needs an update. Remove and re-add.
2666 necp_policy_unapply(policy
);
2667 necp_policy_apply(session
, policy
);
2671 necp_kernel_socket_policies_update_uuid_table();
2672 necp_kernel_socket_policies_reprocess();
2673 necp_kernel_ip_output_policies_reprocess();
2675 // Clear dirty bit flags
2676 session
->dirty
= FALSE
;
2679 lck_rw_done(&necp_kernel_policy_lock
);
2681 necp_post_change_event(&kev_data
);
2684 NECPLOG0(LOG_DEBUG
, "Applied NECP policies");
2688 // Kernel Policy Management
2689 // ---------------------
2690 // Kernel policies are derived from session policies
2691 static necp_kernel_policy_id
2692 necp_kernel_policy_get_new_id(void)
2694 necp_kernel_policy_id newid
= NECP_KERNEL_POLICY_ID_NONE
;
2696 lck_rw_assert(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
2698 necp_last_kernel_policy_id
++;
2699 if (necp_last_kernel_policy_id
< NECP_KERNEL_POLICY_ID_FIRST_VALID
) {
2700 necp_last_kernel_policy_id
= NECP_KERNEL_POLICY_ID_FIRST_VALID
;
2703 newid
= necp_last_kernel_policy_id
;
2704 if (newid
== NECP_KERNEL_POLICY_ID_NONE
) {
2705 NECPLOG0(LOG_DEBUG
, "Allocate kernel policy id failed.\n");
2712 #define NECP_KERNEL_VALID_SOCKET_CONDITIONS (NECP_KERNEL_CONDITION_APP_ID | NECP_KERNEL_CONDITION_REAL_APP_ID | NECP_KERNEL_CONDITION_DOMAIN | NECP_KERNEL_CONDITION_ACCOUNT_ID | NECP_KERNEL_CONDITION_PID | NECP_KERNEL_CONDITION_UID | NECP_KERNEL_CONDITION_ALL_INTERFACES | NECP_KERNEL_CONDITION_BOUND_INTERFACE | NECP_KERNEL_CONDITION_TRAFFIC_CLASS | NECP_KERNEL_CONDITION_PROTOCOL | NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_ENTITLEMENT)
2713 static necp_kernel_policy_id
2714 necp_kernel_socket_policy_add(necp_policy_id parent_policy_id
, necp_policy_order order
, u_int32_t session_order
, int session_pid
, u_int32_t condition_mask
, u_int32_t condition_negated_mask
, necp_app_id cond_app_id
, necp_app_id cond_real_app_id
, u_int32_t cond_account_id
, char *cond_domain
, pid_t cond_pid
, uid_t cond_uid
, ifnet_t cond_bound_interface
, struct necp_policy_condition_tc_range cond_traffic_class
, u_int16_t cond_protocol
, union necp_sockaddr_union
*cond_local_start
, union necp_sockaddr_union
*cond_local_end
, u_int8_t cond_local_prefix
, union necp_sockaddr_union
*cond_remote_start
, union necp_sockaddr_union
*cond_remote_end
, u_int8_t cond_remote_prefix
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
)
2716 struct necp_kernel_socket_policy
*new_kernel_policy
= NULL
;
2717 struct necp_kernel_socket_policy
*tmp_kernel_policy
= NULL
;
2719 MALLOC_ZONE(new_kernel_policy
, struct necp_kernel_socket_policy
*, sizeof(*new_kernel_policy
), M_NECP_SOCKET_POLICY
, M_WAITOK
);
2720 if (new_kernel_policy
== NULL
) {
2724 memset(new_kernel_policy
, 0, sizeof(*new_kernel_policy
));
2725 new_kernel_policy
->parent_policy_id
= parent_policy_id
;
2726 new_kernel_policy
->id
= necp_kernel_policy_get_new_id();
2727 new_kernel_policy
->order
= order
;
2728 new_kernel_policy
->session_order
= session_order
;
2729 new_kernel_policy
->session_pid
= session_pid
;
2731 // Sanitize condition mask
2732 new_kernel_policy
->condition_mask
= (condition_mask
& NECP_KERNEL_VALID_SOCKET_CONDITIONS
);
2733 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
)) {
2734 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_BOUND_INTERFACE
;
2736 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) && !(new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
)) {
2737 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_REAL_APP_ID
;
2739 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
) && !(new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
)) {
2740 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_ENTITLEMENT
;
2742 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
)) {
2743 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_LOCAL_PREFIX
;
2745 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
)) {
2746 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_REMOTE_PREFIX
;
2748 new_kernel_policy
->condition_negated_mask
= condition_negated_mask
& new_kernel_policy
->condition_mask
;
2750 // Set condition values
2751 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
2752 new_kernel_policy
->cond_app_id
= cond_app_id
;
2754 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
2755 new_kernel_policy
->cond_real_app_id
= cond_real_app_id
;
2757 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
) {
2758 new_kernel_policy
->cond_account_id
= cond_account_id
;
2760 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
2761 new_kernel_policy
->cond_domain
= cond_domain
;
2762 new_kernel_policy
->cond_domain_dot_count
= necp_count_dots(cond_domain
, strlen(cond_domain
));
2764 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PID
) {
2765 new_kernel_policy
->cond_pid
= cond_pid
;
2767 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_UID
) {
2768 new_kernel_policy
->cond_uid
= cond_uid
;
2770 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
2771 if (cond_bound_interface
) {
2772 ifnet_reference(cond_bound_interface
);
2774 new_kernel_policy
->cond_bound_interface
= cond_bound_interface
;
2776 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
2777 new_kernel_policy
->cond_traffic_class
= cond_traffic_class
;
2779 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
2780 new_kernel_policy
->cond_protocol
= cond_protocol
;
2782 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
2783 memcpy(&new_kernel_policy
->cond_local_start
, cond_local_start
, cond_local_start
->sa
.sa_len
);
2785 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
2786 memcpy(&new_kernel_policy
->cond_local_end
, cond_local_end
, cond_local_end
->sa
.sa_len
);
2788 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
2789 new_kernel_policy
->cond_local_prefix
= cond_local_prefix
;
2791 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
2792 memcpy(&new_kernel_policy
->cond_remote_start
, cond_remote_start
, cond_remote_start
->sa
.sa_len
);
2794 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
2795 memcpy(&new_kernel_policy
->cond_remote_end
, cond_remote_end
, cond_remote_end
->sa
.sa_len
);
2797 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
2798 new_kernel_policy
->cond_remote_prefix
= cond_remote_prefix
;
2801 new_kernel_policy
->result
= result
;
2802 memcpy(&new_kernel_policy
->result_parameter
, &result_parameter
, sizeof(result_parameter
));
2805 NECPLOG(LOG_DEBUG
, "Added kernel policy: socket, id=%d, mask=%x\n", new_kernel_policy
->id
, new_kernel_policy
->condition_mask
);
2807 LIST_INSERT_SORTED_TWICE_ASCENDING(&necp_kernel_socket_policies
, new_kernel_policy
, chain
, session_order
, order
, tmp_kernel_policy
);
2809 return (new_kernel_policy
? new_kernel_policy
->id
: 0);
2812 static struct necp_kernel_socket_policy
*
2813 necp_kernel_socket_policy_find(necp_kernel_policy_id policy_id
)
2815 struct necp_kernel_socket_policy
*kernel_policy
= NULL
;
2816 struct necp_kernel_socket_policy
*tmp_kernel_policy
= NULL
;
2818 if (policy_id
== 0) {
2822 LIST_FOREACH_SAFE(kernel_policy
, &necp_kernel_socket_policies
, chain
, tmp_kernel_policy
) {
2823 if (kernel_policy
->id
== policy_id
) {
2824 return (kernel_policy
);
2832 necp_kernel_socket_policy_delete(necp_kernel_policy_id policy_id
)
2834 struct necp_kernel_socket_policy
*policy
= NULL
;
2836 lck_rw_assert(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
2838 policy
= necp_kernel_socket_policy_find(policy_id
);
2840 LIST_REMOVE(policy
, chain
);
2842 if (policy
->cond_bound_interface
) {
2843 ifnet_release(policy
->cond_bound_interface
);
2844 policy
->cond_bound_interface
= NULL
;
2847 if (policy
->cond_domain
) {
2848 FREE(policy
->cond_domain
, M_NECP
);
2849 policy
->cond_domain
= NULL
;
2852 FREE_ZONE(policy
, sizeof(*policy
), M_NECP_SOCKET_POLICY
);
2859 #define MAX_RESULT_STRING_LEN 64
2860 static inline const char *
2861 necp_get_result_description(char *result_string
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
)
2863 uuid_string_t uuid_string
;
2865 case NECP_KERNEL_POLICY_RESULT_NONE
: {
2868 case NECP_KERNEL_POLICY_RESULT_PASS
: {
2871 case NECP_KERNEL_POLICY_RESULT_SKIP
: {
2874 case NECP_KERNEL_POLICY_RESULT_DROP
: {
2877 case NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
: {
2878 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "SocketDivert (%d)", result_parameter
.flow_divert_control_unit
);
2881 case NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER
: {
2882 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "SocketFilter (%d)", result_parameter
.filter_control_unit
);
2885 case NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
: {
2886 ifnet_t interface
= ifindex2ifnet
[result_parameter
.tunnel_interface_index
];
2887 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "IPTunnel (%s%d)", ifnet_name(interface
), ifnet_unit(interface
));
2890 case NECP_KERNEL_POLICY_RESULT_IP_FILTER
: {
2891 return ("IPFilter");
2893 case NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED
: {
2894 ifnet_t interface
= ifindex2ifnet
[result_parameter
.scoped_interface_index
];
2895 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "SocketScoped (%s%d)", ifnet_name(interface
), ifnet_unit(interface
));
2898 case NECP_KERNEL_POLICY_RESULT_ROUTE_RULES
: {
2900 char interface_names
[IFXNAMSIZ
][MAX_ROUTE_RULE_INTERFACES
];
2901 struct necp_route_rule
*route_rule
= necp_lookup_route_rule_locked(&necp_route_rules
, result_parameter
.route_rule_id
);
2902 if (route_rule
!= NULL
) {
2903 bool default_drop
= (route_rule
->default_action
== NECP_ROUTE_RULE_DENY_INTERFACE
);
2904 for (index
= 0; index
< MAX_ROUTE_RULE_INTERFACES
; index
++) {
2905 if (route_rule
->exception_if_indices
[index
] != 0) {
2906 ifnet_t interface
= ifindex2ifnet
[route_rule
->exception_if_indices
[index
]];
2907 snprintf(interface_names
[index
], IFXNAMSIZ
, "%s%d", ifnet_name(interface
), ifnet_unit(interface
));
2909 memset(interface_names
[index
], 0, IFXNAMSIZ
);
2913 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "RouteRules (Only %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
2914 (route_rule
->cellular_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? "Cell " : "",
2915 (route_rule
->wifi_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? "WiFi " : "",
2916 (route_rule
->wired_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? "Wired " : "",
2917 (route_rule
->expensive_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? "Exp " : "",
2918 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[0] : "",
2919 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
2920 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[1] : "",
2921 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
2922 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[2] : "",
2923 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
2924 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[3] : "",
2925 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
2926 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[4] : "",
2927 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
2928 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[5] : "",
2929 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
2930 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[6] : "",
2931 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
2932 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[7] : "",
2933 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
2934 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[8] : "",
2935 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? " " : "",
2936 (route_rule
->exception_if_actions
[9] == NECP_ROUTE_RULE_ALLOW_INTERFACE
) ? interface_names
[9] : "");
2938 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "RouteRules (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
2939 (route_rule
->cellular_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!Cell " : "",
2940 (route_rule
->wifi_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!WiFi " : "",
2941 (route_rule
->wired_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!Wired " : "",
2942 (route_rule
->expensive_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!Exp " : "",
2943 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
2944 (route_rule
->exception_if_actions
[0] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[0] : "",
2945 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
2946 (route_rule
->exception_if_actions
[1] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[1] : "",
2947 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
2948 (route_rule
->exception_if_actions
[2] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[2] : "",
2949 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
2950 (route_rule
->exception_if_actions
[3] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[3] : "",
2951 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
2952 (route_rule
->exception_if_actions
[4] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[4] : "",
2953 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
2954 (route_rule
->exception_if_actions
[5] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[5] : "",
2955 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
2956 (route_rule
->exception_if_actions
[6] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[6] : "",
2957 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
2958 (route_rule
->exception_if_actions
[7] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[7] : "",
2959 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
2960 (route_rule
->exception_if_actions
[8] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[8] : "",
2961 (route_rule
->exception_if_actions
[9] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? "!" : "",
2962 (route_rule
->exception_if_actions
[9] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? interface_names
[9] : "");
2965 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "RouteRules (Unknown)");
2969 case NECP_KERNEL_POLICY_RESULT_USE_NETAGENT
: {
2970 bool found_mapping
= FALSE
;
2971 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.netagent_id
);
2972 if (mapping
!= NULL
) {
2973 uuid_unparse(mapping
->uuid
, uuid_string
);
2974 found_mapping
= TRUE
;
2976 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "UseNetAgent (%s)", found_mapping
? uuid_string
: "Unknown");
2979 case NECP_POLICY_RESULT_TRIGGER
: {
2980 bool found_mapping
= FALSE
;
2981 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.service
.identifier
);
2982 if (mapping
!= NULL
) {
2983 uuid_unparse(mapping
->uuid
, uuid_string
);
2984 found_mapping
= TRUE
;
2986 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "Trigger (%s.%d)", found_mapping
? uuid_string
: "Unknown", result_parameter
.service
.data
);
2989 case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED
: {
2990 bool found_mapping
= FALSE
;
2991 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.service
.identifier
);
2992 if (mapping
!= NULL
) {
2993 uuid_unparse(mapping
->uuid
, uuid_string
);
2994 found_mapping
= TRUE
;
2996 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "TriggerIfNeeded (%s.%d)", found_mapping
? uuid_string
: "Unknown", result_parameter
.service
.data
);
2999 case NECP_POLICY_RESULT_TRIGGER_SCOPED
: {
3000 bool found_mapping
= FALSE
;
3001 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.service
.identifier
);
3002 if (mapping
!= NULL
) {
3003 uuid_unparse(mapping
->uuid
, uuid_string
);
3004 found_mapping
= TRUE
;
3006 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "TriggerScoped (%s.%d)", found_mapping
? uuid_string
: "Unknown", result_parameter
.service
.data
);
3009 case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED
: {
3010 bool found_mapping
= FALSE
;
3011 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(result_parameter
.service
.identifier
);
3012 if (mapping
!= NULL
) {
3013 uuid_unparse(mapping
->uuid
, uuid_string
);
3014 found_mapping
= TRUE
;
3016 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "NoTriggerScoped (%s.%d)", found_mapping
? uuid_string
: "Unknown", result_parameter
.service
.data
);
3020 snprintf(result_string
, MAX_RESULT_STRING_LEN
, "Unknown %d (%d)", result
, result_parameter
.tunnel_interface_index
);
3024 return (result_string
);
3028 necp_kernel_socket_policies_dump_all(void)
3031 struct necp_kernel_socket_policy
*policy
= NULL
;
3034 char result_string
[MAX_RESULT_STRING_LEN
];
3035 char proc_name_string
[MAXCOMLEN
+ 1];
3036 memset(result_string
, 0, MAX_RESULT_STRING_LEN
);
3037 memset(proc_name_string
, 0, MAXCOMLEN
+ 1);
3039 NECPLOG0(LOG_DEBUG
, "NECP Application Policies:\n");
3040 NECPLOG0(LOG_DEBUG
, "-----------\n");
3041 for (policy_i
= 0; necp_kernel_socket_policies_app_layer_map
!= NULL
&& necp_kernel_socket_policies_app_layer_map
[policy_i
] != NULL
; policy_i
++) {
3042 policy
= necp_kernel_socket_policies_app_layer_map
[policy_i
];
3043 proc_name(policy
->session_pid
, proc_name_string
, MAXCOMLEN
);
3044 NECPLOG(LOG_DEBUG
, "\t%3d. Policy ID: %5d\tProcess: %10.10s\tOrder: %04d.%04d\tMask: %5x\tResult: %s\n", policy_i
, policy
->id
, proc_name_string
, policy
->session_order
, policy
->order
, policy
->condition_mask
, necp_get_result_description(result_string
, policy
->result
, policy
->result_parameter
));
3046 if (necp_kernel_socket_policies_app_layer_map
[0] != NULL
) {
3047 NECPLOG0(LOG_DEBUG
, "-----------\n");
3050 NECPLOG0(LOG_DEBUG
, "NECP Socket Policies:\n");
3051 NECPLOG0(LOG_DEBUG
, "-----------\n");
3052 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
3053 NECPLOG(LOG_DEBUG
, "\tApp Bucket: %d\n", app_i
);
3054 for (policy_i
= 0; necp_kernel_socket_policies_map
[app_i
] != NULL
&& (necp_kernel_socket_policies_map
[app_i
])[policy_i
] != NULL
; policy_i
++) {
3055 policy
= (necp_kernel_socket_policies_map
[app_i
])[policy_i
];
3056 proc_name(policy
->session_pid
, proc_name_string
, MAXCOMLEN
);
3057 NECPLOG(LOG_DEBUG
, "\t%3d. Policy ID: %5d\tProcess: %10.10s\tOrder: %04d.%04d\tMask: %5x\tResult: %s\n", policy_i
, policy
->id
, proc_name_string
, policy
->session_order
, policy
->order
, policy
->condition_mask
, necp_get_result_description(result_string
, policy
->result
, policy
->result_parameter
));
3059 NECPLOG0(LOG_DEBUG
, "-----------\n");
3065 necp_kernel_socket_result_is_trigger_service_type(struct necp_kernel_socket_policy
*kernel_policy
)
3067 return (kernel_policy
->result
>= NECP_KERNEL_POLICY_RESULT_TRIGGER
&& kernel_policy
->result
<= NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED
);
3071 necp_kernel_socket_policy_results_overlap(struct necp_kernel_socket_policy
*upper_policy
, struct necp_kernel_socket_policy
*lower_policy
)
3073 if (upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_DROP
) {
3074 // Drop always cancels out lower policies
3076 } else if (upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER
||
3077 upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_ROUTE_RULES
||
3078 upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_USE_NETAGENT
) {
3079 // Filters and route rules never cancel out lower policies
3081 } else if (necp_kernel_socket_result_is_trigger_service_type(upper_policy
)) {
3082 // Trigger/Scoping policies can overlap one another, but not other results
3083 return (necp_kernel_socket_result_is_trigger_service_type(lower_policy
));
3084 } else if (upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
3085 if (upper_policy
->session_order
!= lower_policy
->session_order
) {
3086 // A skip cannot override a policy of a different session
3089 if (upper_policy
->result_parameter
.skip_policy_order
== 0 ||
3090 lower_policy
->order
>= upper_policy
->result_parameter
.skip_policy_order
) {
3091 // This policy is beyond the skip
3094 // This policy is inside the skip
3100 // A hard pass, flow divert, tunnel, or scope will currently block out lower policies
3105 necp_kernel_socket_policy_is_unnecessary(struct necp_kernel_socket_policy
*policy
, struct necp_kernel_socket_policy
**policy_array
, int valid_indices
)
3107 bool can_skip
= FALSE
;
3108 u_int32_t highest_skip_session_order
= 0;
3109 u_int32_t highest_skip_order
= 0;
3111 for (i
= 0; i
< valid_indices
; i
++) {
3112 struct necp_kernel_socket_policy
*compared_policy
= policy_array
[i
];
3114 // For policies in a skip window, we can't mark conflicting policies as unnecessary
3116 if (highest_skip_session_order
!= compared_policy
->session_order
||
3117 (highest_skip_order
!= 0 && compared_policy
->order
>= highest_skip_order
)) {
3118 // If we've moved on to the next session, or passed the skip window
3119 highest_skip_session_order
= 0;
3120 highest_skip_order
= 0;
3123 // If this policy is also a skip, in can increase the skip window
3124 if (compared_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
3125 if (compared_policy
->result_parameter
.skip_policy_order
> highest_skip_order
) {
3126 highest_skip_order
= compared_policy
->result_parameter
.skip_policy_order
;
3133 if (compared_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
3134 // This policy is a skip. Set the skip window accordingly
3136 highest_skip_session_order
= compared_policy
->session_order
;
3137 highest_skip_order
= compared_policy
->result_parameter
.skip_policy_order
;
3140 // The result of the compared policy must be able to block out this policy result
3141 if (!necp_kernel_socket_policy_results_overlap(compared_policy
, policy
)) {
3145 // If new policy matches All Interfaces, compared policy must also
3146 if ((policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) && !(compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
)) {
3150 // Default makes lower policies unecessary always
3151 if (compared_policy
->condition_mask
== 0) {
3155 // Compared must be more general than policy, and include only conditions within policy
3156 if ((policy
->condition_mask
& compared_policy
->condition_mask
) != compared_policy
->condition_mask
) {
3160 // Negative conditions must match for the overlapping conditions
3161 if ((policy
->condition_negated_mask
& compared_policy
->condition_mask
) != (compared_policy
->condition_negated_mask
& compared_policy
->condition_mask
)) {
3165 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
&&
3166 strcmp(compared_policy
->cond_domain
, policy
->cond_domain
) != 0) {
3170 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
&&
3171 compared_policy
->cond_account_id
!= policy
->cond_account_id
) {
3175 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
&&
3176 compared_policy
->cond_policy_id
!= policy
->cond_policy_id
) {
3180 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
&&
3181 compared_policy
->cond_app_id
!= policy
->cond_app_id
) {
3185 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
&&
3186 compared_policy
->cond_real_app_id
!= policy
->cond_real_app_id
) {
3190 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_PID
&&
3191 compared_policy
->cond_pid
!= policy
->cond_pid
) {
3195 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_UID
&&
3196 compared_policy
->cond_uid
!= policy
->cond_uid
) {
3200 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
&&
3201 compared_policy
->cond_bound_interface
!= policy
->cond_bound_interface
) {
3205 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
&&
3206 compared_policy
->cond_protocol
!= policy
->cond_protocol
) {
3210 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
&&
3211 !(compared_policy
->cond_traffic_class
.start_tc
<= policy
->cond_traffic_class
.start_tc
&&
3212 compared_policy
->cond_traffic_class
.end_tc
>= policy
->cond_traffic_class
.end_tc
)) {
3216 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
3217 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
3218 if (!necp_is_range_in_range((struct sockaddr
*)&policy
->cond_local_start
, (struct sockaddr
*)&policy
->cond_local_end
, (struct sockaddr
*)&compared_policy
->cond_local_start
, (struct sockaddr
*)&compared_policy
->cond_local_end
)) {
3221 } else if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
3222 if (compared_policy
->cond_local_prefix
> policy
->cond_local_prefix
||
3223 !necp_is_addr_in_subnet((struct sockaddr
*)&policy
->cond_local_start
, (struct sockaddr
*)&compared_policy
->cond_local_start
, compared_policy
->cond_local_prefix
)) {
3229 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
3230 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
3231 if (!necp_is_range_in_range((struct sockaddr
*)&policy
->cond_remote_start
, (struct sockaddr
*)&policy
->cond_remote_end
, (struct sockaddr
*)&compared_policy
->cond_remote_start
, (struct sockaddr
*)&compared_policy
->cond_remote_end
)) {
3234 } else if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
3235 if (compared_policy
->cond_remote_prefix
> policy
->cond_remote_prefix
||
3236 !necp_is_addr_in_subnet((struct sockaddr
*)&policy
->cond_remote_start
, (struct sockaddr
*)&compared_policy
->cond_remote_start
, compared_policy
->cond_remote_prefix
)) {
3249 necp_kernel_socket_policies_reprocess(void)
3252 int bucket_allocation_counts
[NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
];
3253 int bucket_current_free_index
[NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
];
3254 int app_layer_allocation_count
= 0;
3255 int app_layer_current_free_index
= 0;
3256 struct necp_kernel_socket_policy
*kernel_policy
= NULL
;
3258 lck_rw_assert(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
3261 necp_kernel_application_policies_condition_mask
= 0;
3262 necp_kernel_socket_policies_condition_mask
= 0;
3263 necp_kernel_application_policies_count
= 0;
3264 necp_kernel_socket_policies_count
= 0;
3265 necp_kernel_socket_policies_non_app_count
= 0;
3267 // Reset all maps to NULL
3268 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
3269 if (necp_kernel_socket_policies_map
[app_i
] != NULL
) {
3270 FREE(necp_kernel_socket_policies_map
[app_i
], M_NECP
);
3271 necp_kernel_socket_policies_map
[app_i
] = NULL
;
3275 bucket_allocation_counts
[app_i
] = 0;
3277 if (necp_kernel_socket_policies_app_layer_map
!= NULL
) {
3278 FREE(necp_kernel_socket_policies_app_layer_map
, M_NECP
);
3279 necp_kernel_socket_policies_app_layer_map
= NULL
;
3282 // Create masks and counts
3283 LIST_FOREACH(kernel_policy
, &necp_kernel_socket_policies
, chain
) {
3284 // App layer mask/count
3285 necp_kernel_application_policies_condition_mask
|= kernel_policy
->condition_mask
;
3286 necp_kernel_application_policies_count
++;
3287 app_layer_allocation_count
++;
3289 // Update socket layer bucket mask/counts
3290 necp_kernel_socket_policies_condition_mask
|= kernel_policy
->condition_mask
;
3291 necp_kernel_socket_policies_count
++;
3293 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) ||
3294 kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
3295 necp_kernel_socket_policies_non_app_count
++;
3296 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
3297 bucket_allocation_counts
[app_i
]++;
3300 bucket_allocation_counts
[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(kernel_policy
->cond_app_id
)]++;
3305 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
3306 if (bucket_allocation_counts
[app_i
] > 0) {
3307 // Allocate a NULL-terminated array of policy pointers for each bucket
3308 MALLOC(necp_kernel_socket_policies_map
[app_i
], struct necp_kernel_socket_policy
**, sizeof(struct necp_kernel_socket_policy
*) * (bucket_allocation_counts
[app_i
] + 1), M_NECP
, M_WAITOK
);
3309 if (necp_kernel_socket_policies_map
[app_i
] == NULL
) {
3313 // Initialize the first entry to NULL
3314 (necp_kernel_socket_policies_map
[app_i
])[0] = NULL
;
3316 bucket_current_free_index
[app_i
] = 0;
3318 MALLOC(necp_kernel_socket_policies_app_layer_map
, struct necp_kernel_socket_policy
**, sizeof(struct necp_kernel_socket_policy
*) * (app_layer_allocation_count
+ 1), M_NECP
, M_WAITOK
);
3319 if (necp_kernel_socket_policies_app_layer_map
== NULL
) {
3322 necp_kernel_socket_policies_app_layer_map
[0] = NULL
;
3325 LIST_FOREACH(kernel_policy
, &necp_kernel_socket_policies
, chain
) {
3326 // Insert pointers into map
3327 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) ||
3328 kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
3329 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
3330 if (!necp_kernel_socket_policy_is_unnecessary(kernel_policy
, necp_kernel_socket_policies_map
[app_i
], bucket_current_free_index
[app_i
])) {
3331 (necp_kernel_socket_policies_map
[app_i
])[(bucket_current_free_index
[app_i
])] = kernel_policy
;
3332 bucket_current_free_index
[app_i
]++;
3333 (necp_kernel_socket_policies_map
[app_i
])[(bucket_current_free_index
[app_i
])] = NULL
;
3337 app_i
= NECP_SOCKET_MAP_APP_ID_TO_BUCKET(kernel_policy
->cond_app_id
);
3338 if (!necp_kernel_socket_policy_is_unnecessary(kernel_policy
, necp_kernel_socket_policies_map
[app_i
], bucket_current_free_index
[app_i
])) {
3339 (necp_kernel_socket_policies_map
[app_i
])[(bucket_current_free_index
[app_i
])] = kernel_policy
;
3340 bucket_current_free_index
[app_i
]++;
3341 (necp_kernel_socket_policies_map
[app_i
])[(bucket_current_free_index
[app_i
])] = NULL
;
3345 if (!necp_kernel_socket_policy_is_unnecessary(kernel_policy
, necp_kernel_socket_policies_app_layer_map
, app_layer_current_free_index
)) {
3346 necp_kernel_socket_policies_app_layer_map
[app_layer_current_free_index
] = kernel_policy
;
3347 app_layer_current_free_index
++;
3348 necp_kernel_socket_policies_app_layer_map
[app_layer_current_free_index
] = NULL
;
3351 necp_kernel_socket_policies_dump_all();
3352 BUMP_KERNEL_SOCKET_POLICIES_GENERATION_COUNT();
3356 // Free memory, reset masks to 0
3357 necp_kernel_application_policies_condition_mask
= 0;
3358 necp_kernel_socket_policies_condition_mask
= 0;
3359 necp_kernel_application_policies_count
= 0;
3360 necp_kernel_socket_policies_count
= 0;
3361 necp_kernel_socket_policies_non_app_count
= 0;
3362 for (app_i
= 0; app_i
< NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS
; app_i
++) {
3363 if (necp_kernel_socket_policies_map
[app_i
] != NULL
) {
3364 FREE(necp_kernel_socket_policies_map
[app_i
], M_NECP
);
3365 necp_kernel_socket_policies_map
[app_i
] = NULL
;
3368 if (necp_kernel_socket_policies_app_layer_map
!= NULL
) {
3369 FREE(necp_kernel_socket_policies_app_layer_map
, M_NECP
);
3370 necp_kernel_socket_policies_app_layer_map
= NULL
;
3376 necp_get_new_string_id(void)
3378 u_int32_t newid
= 0;
3380 lck_rw_assert(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
3382 necp_last_string_id
++;
3383 if (necp_last_string_id
< 1) {
3384 necp_last_string_id
= 1;
3387 newid
= necp_last_string_id
;
3389 NECPLOG0(LOG_DEBUG
, "Allocate string id failed.\n");
3396 static struct necp_string_id_mapping
*
3397 necp_lookup_string_to_id_locked(struct necp_string_id_mapping_list
*list
, char *string
)
3399 struct necp_string_id_mapping
*searchentry
= NULL
;
3400 struct necp_string_id_mapping
*foundentry
= NULL
;
3402 LIST_FOREACH(searchentry
, list
, chain
) {
3403 if (strcmp(searchentry
->string
, string
) == 0) {
3404 foundentry
= searchentry
;
3409 return (foundentry
);
3413 necp_create_string_to_id_mapping(struct necp_string_id_mapping_list
*list
, char *string
)
3415 u_int32_t string_id
= 0;
3416 struct necp_string_id_mapping
*existing_mapping
= NULL
;
3418 lck_rw_assert(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
3420 existing_mapping
= necp_lookup_string_to_id_locked(list
, string
);
3421 if (existing_mapping
!= NULL
) {
3422 string_id
= existing_mapping
->id
;
3423 existing_mapping
->refcount
++;
3425 struct necp_string_id_mapping
*new_mapping
= NULL
;
3426 MALLOC(new_mapping
, struct necp_string_id_mapping
*, sizeof(struct necp_string_id_mapping
), M_NECP
, M_WAITOK
);
3427 if (new_mapping
!= NULL
) {
3428 memset(new_mapping
, 0, sizeof(struct necp_string_id_mapping
));
3430 size_t length
= strlen(string
) + 1;
3431 MALLOC(new_mapping
->string
, char *, length
, M_NECP
, M_WAITOK
);
3432 if (new_mapping
->string
!= NULL
) {
3433 memcpy(new_mapping
->string
, string
, length
);
3434 new_mapping
->id
= necp_get_new_string_id();
3435 new_mapping
->refcount
= 1;
3436 LIST_INSERT_HEAD(list
, new_mapping
, chain
);
3437 string_id
= new_mapping
->id
;
3439 FREE(new_mapping
, M_NECP
);
3448 necp_remove_string_to_id_mapping(struct necp_string_id_mapping_list
*list
, char *string
)
3450 struct necp_string_id_mapping
*existing_mapping
= NULL
;
3452 lck_rw_assert(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
3454 existing_mapping
= necp_lookup_string_to_id_locked(list
, string
);
3455 if (existing_mapping
!= NULL
) {
3456 if (--existing_mapping
->refcount
== 0) {
3457 LIST_REMOVE(existing_mapping
, chain
);
3458 FREE(existing_mapping
->string
, M_NECP
);
3459 FREE(existing_mapping
, M_NECP
);
3468 necp_get_new_route_rule_id(void)
3470 u_int32_t newid
= 0;
3472 lck_rw_assert(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
3474 necp_last_route_rule_id
++;
3475 if (necp_last_route_rule_id
< 1 || necp_last_route_rule_id
> UINT16_MAX
) {
3476 necp_last_route_rule_id
= 1;
3479 newid
= necp_last_route_rule_id
;
3481 NECPLOG0(LOG_DEBUG
, "Allocate route rule id failed.\n");
3489 necp_get_new_aggregate_route_rule_id(void)
3491 u_int32_t newid
= 0;
3493 lck_rw_assert(&necp_route_rule_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
3495 necp_last_aggregate_route_rule_id
++;
3496 if (necp_last_aggregate_route_rule_id
<= UINT16_MAX
) {
3497 necp_last_aggregate_route_rule_id
= UINT16_MAX
+ 1;
3500 newid
= necp_last_aggregate_route_rule_id
;
3502 NECPLOG0(LOG_DEBUG
, "Allocate aggregate route rule id failed.\n");
3509 static struct necp_route_rule
*
3510 necp_lookup_route_rule_locked(struct necp_route_rule_list
*list
, u_int32_t route_rule_id
)
3512 struct necp_route_rule
*searchentry
= NULL
;
3513 struct necp_route_rule
*foundentry
= NULL
;
3515 LIST_FOREACH(searchentry
, list
, chain
) {
3516 if (searchentry
->id
== route_rule_id
) {
3517 foundentry
= searchentry
;
3522 return (foundentry
);
3525 static struct necp_route_rule
*
3526 necp_lookup_route_rule_by_contents_locked(struct necp_route_rule_list
*list
, u_int32_t default_action
, u_int8_t cellular_action
, u_int8_t wifi_action
, u_int8_t wired_action
, u_int8_t expensive_action
, u_int32_t
*if_indices
, u_int8_t
*if_actions
)
3528 struct necp_route_rule
*searchentry
= NULL
;
3529 struct necp_route_rule
*foundentry
= NULL
;
3531 LIST_FOREACH(searchentry
, list
, chain
) {
3532 if (searchentry
->default_action
== default_action
&&
3533 searchentry
->cellular_action
== cellular_action
&&
3534 searchentry
->wifi_action
== wifi_action
&&
3535 searchentry
->wired_action
== wired_action
&&
3536 searchentry
->expensive_action
== expensive_action
) {
3537 bool match_failed
= FALSE
;
3542 for (index_a
= 0; index_a
< MAX_ROUTE_RULE_INTERFACES
; index_a
++) {
3543 bool found_index
= FALSE
;
3544 if (searchentry
->exception_if_indices
[index_a
] == 0) {
3548 for (index_b
= 0; index_b
< MAX_ROUTE_RULE_INTERFACES
; index_b
++) {
3549 if (if_indices
[index_b
] == 0) {
3552 if (index_b
>= count_b
) {
3553 count_b
= index_b
+ 1;
3555 if (searchentry
->exception_if_indices
[index_a
] == if_indices
[index_b
] &&
3556 searchentry
->exception_if_actions
[index_a
] == if_actions
[index_b
]) {
3562 match_failed
= TRUE
;
3566 if (!match_failed
&& count_a
== count_b
) {
3567 foundentry
= searchentry
;
3573 return (foundentry
);
3577 necp_create_route_rule(struct necp_route_rule_list
*list
, u_int8_t
*route_rules_array
, u_int32_t route_rules_array_size
)
3580 u_int32_t route_rule_id
= 0;
3581 struct necp_route_rule
*existing_rule
= NULL
;
3582 u_int32_t default_action
= NECP_ROUTE_RULE_ALLOW_INTERFACE
;
3583 u_int8_t cellular_action
= NECP_ROUTE_RULE_NONE
;
3584 u_int8_t wifi_action
= NECP_ROUTE_RULE_NONE
;
3585 u_int8_t wired_action
= NECP_ROUTE_RULE_NONE
;
3586 u_int8_t expensive_action
= NECP_ROUTE_RULE_NONE
;
3587 u_int32_t if_indices
[MAX_ROUTE_RULE_INTERFACES
];
3588 size_t num_valid_indices
= 0;
3589 memset(&if_indices
, 0, sizeof(if_indices
));
3590 u_int8_t if_actions
[MAX_ROUTE_RULE_INTERFACES
];
3591 memset(&if_actions
, 0, sizeof(if_actions
));
3593 lck_rw_assert(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
3595 if (route_rules_array
== NULL
|| route_rules_array_size
== 0) {
3600 while (offset
< route_rules_array_size
) {
3601 ifnet_t rule_interface
= NULL
;
3602 char interface_name
[IFXNAMSIZ
];
3603 u_int32_t length
= 0;
3604 u_int8_t
*value
= necp_buffer_get_tlv_value(route_rules_array
, offset
, &length
);
3606 u_int8_t rule_type
= necp_policy_condition_get_type_from_buffer(value
, length
);
3607 u_int8_t rule_flags
= necp_policy_condition_get_flags_from_buffer(value
, length
);
3608 u_int32_t rule_length
= necp_policy_condition_get_value_length_from_buffer(value
, length
);
3609 u_int8_t
*rule_value
= necp_policy_condition_get_value_pointer_from_buffer(value
, length
);
3611 if (rule_type
== NECP_ROUTE_RULE_NONE
) {
3612 // Don't allow an explicit rule to be None action
3616 if (rule_length
== 0) {
3617 if (rule_flags
& NECP_ROUTE_RULE_FLAG_CELLULAR
) {
3618 cellular_action
= rule_type
;
3620 if (rule_flags
& NECP_ROUTE_RULE_FLAG_WIFI
) {
3621 wifi_action
= rule_type
;
3623 if (rule_flags
& NECP_ROUTE_RULE_FLAG_WIRED
) {
3624 wired_action
= rule_type
;
3626 if (rule_flags
& NECP_ROUTE_RULE_FLAG_EXPENSIVE
) {
3627 expensive_action
= rule_type
;
3629 if (rule_flags
== 0) {
3630 default_action
= rule_type
;
3632 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
3636 if (num_valid_indices
>= MAX_ROUTE_RULE_INTERFACES
) {
3637 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
3641 memcpy(interface_name
, rule_value
, rule_length
);
3642 interface_name
[length
- 1] = 0; // Make sure the string is NULL terminated
3643 if (ifnet_find_by_name(interface_name
, &rule_interface
) == 0) {
3644 if_actions
[num_valid_indices
] = rule_type
;
3645 if_indices
[num_valid_indices
++] = rule_interface
->if_index
;
3648 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
3651 existing_rule
= necp_lookup_route_rule_by_contents_locked(list
, default_action
, cellular_action
, wifi_action
, wired_action
, expensive_action
, if_indices
, if_actions
);
3652 if (existing_rule
!= NULL
) {
3653 route_rule_id
= existing_rule
->id
;
3654 existing_rule
->refcount
++;
3656 struct necp_route_rule
*new_rule
= NULL
;
3657 MALLOC(new_rule
, struct necp_route_rule
*, sizeof(struct necp_route_rule
), M_NECP
, M_WAITOK
);
3658 if (new_rule
!= NULL
) {
3659 memset(new_rule
, 0, sizeof(struct necp_route_rule
));
3660 route_rule_id
= new_rule
->id
= necp_get_new_route_rule_id();
3661 new_rule
->default_action
= default_action
;
3662 new_rule
->cellular_action
= cellular_action
;
3663 new_rule
->wifi_action
= wifi_action
;
3664 new_rule
->wired_action
= wired_action
;
3665 new_rule
->expensive_action
= expensive_action
;
3666 memcpy(&new_rule
->exception_if_indices
, &if_indices
, sizeof(if_indices
));
3667 memcpy(&new_rule
->exception_if_actions
, &if_actions
, sizeof(if_actions
));
3668 new_rule
->refcount
= 1;
3669 LIST_INSERT_HEAD(list
, new_rule
, chain
);
3672 return (route_rule_id
);
3676 necp_remove_aggregate_route_rule_for_id(u_int32_t rule_id
)
3679 lck_rw_lock_exclusive(&necp_route_rule_lock
);
3681 struct necp_aggregate_route_rule
*existing_rule
= NULL
;
3682 struct necp_aggregate_route_rule
*tmp_rule
= NULL
;
3684 LIST_FOREACH_SAFE(existing_rule
, &necp_aggregate_route_rules
, chain
, tmp_rule
) {
3686 for (index
= 0; index
< MAX_AGGREGATE_ROUTE_RULES
; index
++) {
3687 u_int32_t route_rule_id
= existing_rule
->rule_ids
[index
];
3688 if (route_rule_id
== rule_id
) {
3689 LIST_REMOVE(existing_rule
, chain
);
3690 FREE(existing_rule
, M_NECP
);
3696 lck_rw_done(&necp_route_rule_lock
);
3701 necp_remove_route_rule(struct necp_route_rule_list
*list
, u_int32_t route_rule_id
)
3703 struct necp_route_rule
*existing_rule
= NULL
;
3705 lck_rw_assert(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
3707 existing_rule
= necp_lookup_route_rule_locked(list
, route_rule_id
);
3708 if (existing_rule
!= NULL
) {
3709 if (--existing_rule
->refcount
== 0) {
3710 necp_remove_aggregate_route_rule_for_id(existing_rule
->id
);
3711 LIST_REMOVE(existing_rule
, chain
);
3712 FREE(existing_rule
, M_NECP
);
3720 static struct necp_aggregate_route_rule
*
3721 necp_lookup_aggregate_route_rule_locked(u_int32_t route_rule_id
)
3723 struct necp_aggregate_route_rule
*searchentry
= NULL
;
3724 struct necp_aggregate_route_rule
*foundentry
= NULL
;
3726 lck_rw_lock_shared(&necp_route_rule_lock
);
3728 LIST_FOREACH(searchentry
, &necp_aggregate_route_rules
, chain
) {
3729 if (searchentry
->id
== route_rule_id
) {
3730 foundentry
= searchentry
;
3735 lck_rw_done(&necp_route_rule_lock
);
3737 return (foundentry
);
3741 necp_create_aggregate_route_rule(u_int32_t
*rule_ids
)
3743 u_int32_t aggregate_route_rule_id
= 0;
3744 struct necp_aggregate_route_rule
*new_rule
= NULL
;
3745 struct necp_aggregate_route_rule
*existing_rule
= NULL
;
3747 LIST_FOREACH(existing_rule
, &necp_aggregate_route_rules
, chain
) {
3748 if (memcmp(existing_rule
->rule_ids
, rule_ids
, (sizeof(u_int32_t
) * MAX_AGGREGATE_ROUTE_RULES
)) == 0) {
3749 return (existing_rule
->id
);
3753 lck_rw_lock_exclusive(&necp_route_rule_lock
);
3755 LIST_FOREACH(existing_rule
, &necp_aggregate_route_rules
, chain
) {
3756 // Re-check, in case something else created the rule while we are waiting to lock
3757 if (memcmp(existing_rule
->rule_ids
, rule_ids
, (sizeof(u_int32_t
) * MAX_AGGREGATE_ROUTE_RULES
)) == 0) {
3758 lck_rw_done(&necp_route_rule_lock
);
3759 return (existing_rule
->id
);
3763 MALLOC(new_rule
, struct necp_aggregate_route_rule
*, sizeof(struct necp_aggregate_route_rule
), M_NECP
, M_WAITOK
);
3764 if (new_rule
!= NULL
) {
3765 memset(new_rule
, 0, sizeof(struct necp_aggregate_route_rule
));
3766 aggregate_route_rule_id
= new_rule
->id
= necp_get_new_aggregate_route_rule_id();
3767 new_rule
->id
= aggregate_route_rule_id
;
3768 memcpy(new_rule
->rule_ids
, rule_ids
, (sizeof(u_int32_t
) * MAX_AGGREGATE_ROUTE_RULES
));
3769 LIST_INSERT_HEAD(&necp_aggregate_route_rules
, new_rule
, chain
);
3771 lck_rw_done(&necp_route_rule_lock
);
3773 return (aggregate_route_rule_id
);
3776 #define NECP_NULL_SERVICE_ID 1
3778 necp_get_new_uuid_id(void)
3780 u_int32_t newid
= 0;
3782 lck_rw_assert(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
3784 necp_last_uuid_id
++;
3785 if (necp_last_uuid_id
< (NECP_NULL_SERVICE_ID
+ 1)) {
3786 necp_last_uuid_id
= (NECP_NULL_SERVICE_ID
+ 1);
3789 newid
= necp_last_uuid_id
;
3791 NECPLOG0(LOG_DEBUG
, "Allocate uuid id failed.\n");
3798 static struct necp_uuid_id_mapping
*
3799 necp_uuid_lookup_app_id_locked(uuid_t uuid
)
3801 struct necp_uuid_id_mapping
*searchentry
= NULL
;
3802 struct necp_uuid_id_mapping
*foundentry
= NULL
;
3804 LIST_FOREACH(searchentry
, APPUUIDHASH(uuid
), chain
) {
3805 if (uuid_compare(searchentry
->uuid
, uuid
) == 0) {
3806 foundentry
= searchentry
;
3811 return (foundentry
);
3815 necp_create_uuid_app_id_mapping(uuid_t uuid
, bool *allocated_mapping
, bool uuid_policy_table
)
3817 u_int32_t local_id
= 0;
3818 struct necp_uuid_id_mapping
*existing_mapping
= NULL
;
3820 lck_rw_assert(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
3822 if (allocated_mapping
) {
3823 *allocated_mapping
= FALSE
;
3826 existing_mapping
= necp_uuid_lookup_app_id_locked(uuid
);
3827 if (existing_mapping
!= NULL
) {
3828 local_id
= existing_mapping
->id
;
3829 existing_mapping
->refcount
++;
3830 if (uuid_policy_table
) {
3831 existing_mapping
->table_refcount
++;
3834 struct necp_uuid_id_mapping
*new_mapping
= NULL
;
3835 MALLOC(new_mapping
, struct necp_uuid_id_mapping
*, sizeof(*new_mapping
), M_NECP
, M_WAITOK
);
3836 if (new_mapping
!= NULL
) {
3837 uuid_copy(new_mapping
->uuid
, uuid
);
3838 new_mapping
->id
= necp_get_new_uuid_id();
3839 new_mapping
->refcount
= 1;
3840 if (uuid_policy_table
) {
3841 new_mapping
->table_refcount
= 1;
3843 new_mapping
->table_refcount
= 0;
3846 LIST_INSERT_HEAD(APPUUIDHASH(uuid
), new_mapping
, chain
);
3848 if (allocated_mapping
) {
3849 *allocated_mapping
= TRUE
;
3852 local_id
= new_mapping
->id
;
3860 necp_remove_uuid_app_id_mapping(uuid_t uuid
, bool *removed_mapping
, bool uuid_policy_table
)
3862 struct necp_uuid_id_mapping
*existing_mapping
= NULL
;
3864 lck_rw_assert(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
3866 if (removed_mapping
) {
3867 *removed_mapping
= FALSE
;
3870 existing_mapping
= necp_uuid_lookup_app_id_locked(uuid
);
3871 if (existing_mapping
!= NULL
) {
3872 if (uuid_policy_table
) {
3873 existing_mapping
->table_refcount
--;
3875 if (--existing_mapping
->refcount
== 0) {
3876 LIST_REMOVE(existing_mapping
, chain
);
3877 FREE(existing_mapping
, M_NECP
);
3878 if (removed_mapping
) {
3879 *removed_mapping
= TRUE
;
3888 static struct necp_uuid_id_mapping
*
3889 necp_uuid_get_null_service_id_mapping(void)
3891 static struct necp_uuid_id_mapping null_mapping
;
3892 uuid_clear(null_mapping
.uuid
);
3893 null_mapping
.id
= NECP_NULL_SERVICE_ID
;
3895 return (&null_mapping
);
3898 static struct necp_uuid_id_mapping
*
3899 necp_uuid_lookup_service_id_locked(uuid_t uuid
)
3901 struct necp_uuid_id_mapping
*searchentry
= NULL
;
3902 struct necp_uuid_id_mapping
*foundentry
= NULL
;
3904 if (uuid_is_null(uuid
)) {
3905 return necp_uuid_get_null_service_id_mapping();
3908 LIST_FOREACH(searchentry
, &necp_uuid_service_id_list
, chain
) {
3909 if (uuid_compare(searchentry
->uuid
, uuid
) == 0) {
3910 foundentry
= searchentry
;
3915 return (foundentry
);
3918 static struct necp_uuid_id_mapping
*
3919 necp_uuid_lookup_uuid_with_service_id_locked(u_int32_t local_id
)
3921 struct necp_uuid_id_mapping
*searchentry
= NULL
;
3922 struct necp_uuid_id_mapping
*foundentry
= NULL
;
3924 if (local_id
== NECP_NULL_SERVICE_ID
) {
3925 return necp_uuid_get_null_service_id_mapping();
3928 LIST_FOREACH(searchentry
, &necp_uuid_service_id_list
, chain
) {
3929 if (searchentry
->id
== local_id
) {
3930 foundentry
= searchentry
;
3935 return (foundentry
);
3939 necp_create_uuid_service_id_mapping(uuid_t uuid
)
3941 u_int32_t local_id
= 0;
3942 struct necp_uuid_id_mapping
*existing_mapping
= NULL
;
3944 if (uuid_is_null(uuid
)) {
3945 return (NECP_NULL_SERVICE_ID
);
3948 lck_rw_assert(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
3950 existing_mapping
= necp_uuid_lookup_service_id_locked(uuid
);
3951 if (existing_mapping
!= NULL
) {
3952 local_id
= existing_mapping
->id
;
3953 existing_mapping
->refcount
++;
3955 struct necp_uuid_id_mapping
*new_mapping
= NULL
;
3956 MALLOC(new_mapping
, struct necp_uuid_id_mapping
*, sizeof(*new_mapping
), M_NECP
, M_WAITOK
);
3957 if (new_mapping
!= NULL
) {
3958 uuid_copy(new_mapping
->uuid
, uuid
);
3959 new_mapping
->id
= necp_get_new_uuid_id();
3960 new_mapping
->refcount
= 1;
3962 LIST_INSERT_HEAD(&necp_uuid_service_id_list
, new_mapping
, chain
);
3964 local_id
= new_mapping
->id
;
3972 necp_remove_uuid_service_id_mapping(uuid_t uuid
)
3974 struct necp_uuid_id_mapping
*existing_mapping
= NULL
;
3976 if (uuid_is_null(uuid
)) {
3980 lck_rw_assert(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
3982 existing_mapping
= necp_uuid_lookup_app_id_locked(uuid
);
3983 if (existing_mapping
!= NULL
) {
3984 if (--existing_mapping
->refcount
== 0) {
3985 LIST_REMOVE(existing_mapping
, chain
);
3986 FREE(existing_mapping
, M_NECP
);
3996 necp_kernel_socket_policies_update_uuid_table(void)
3998 lck_rw_assert(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4000 if (necp_uuid_app_id_mappings_dirty
) {
4001 if (proc_uuid_policy_kernel(PROC_UUID_POLICY_OPERATION_CLEAR
, NULL
, PROC_UUID_NECP_APP_POLICY
) < 0) {
4002 NECPLOG0(LOG_DEBUG
, "Error clearing uuids from policy table\n");
4006 if (necp_num_uuid_app_id_mappings
> 0) {
4007 struct necp_uuid_id_mapping_head
*uuid_list_head
= NULL
;
4008 for (uuid_list_head
= &necp_uuid_app_id_hashtbl
[necp_uuid_app_id_hash_num_buckets
- 1]; uuid_list_head
>= necp_uuid_app_id_hashtbl
; uuid_list_head
--) {
4009 struct necp_uuid_id_mapping
*mapping
= NULL
;
4010 LIST_FOREACH(mapping
, uuid_list_head
, chain
) {
4011 if (mapping
->table_refcount
> 0 &&
4012 proc_uuid_policy_kernel(PROC_UUID_POLICY_OPERATION_ADD
, mapping
->uuid
, PROC_UUID_NECP_APP_POLICY
) < 0) {
4013 NECPLOG0(LOG_DEBUG
, "Error adding uuid to policy table\n");
4019 necp_uuid_app_id_mappings_dirty
= FALSE
;
4025 #define NECP_KERNEL_VALID_IP_OUTPUT_CONDITIONS (NECP_KERNEL_CONDITION_ALL_INTERFACES | NECP_KERNEL_CONDITION_BOUND_INTERFACE | NECP_KERNEL_CONDITION_PROTOCOL | NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_POLICY_ID | NECP_KERNEL_CONDITION_LAST_INTERFACE)
4026 static necp_kernel_policy_id
4027 necp_kernel_ip_output_policy_add(necp_policy_id parent_policy_id
, necp_policy_order order
, necp_policy_order suborder
, u_int32_t session_order
, int session_pid
, u_int32_t condition_mask
, u_int32_t condition_negated_mask
, necp_kernel_policy_id cond_policy_id
, ifnet_t cond_bound_interface
, u_int32_t cond_last_interface_index
, u_int16_t cond_protocol
, union necp_sockaddr_union
*cond_local_start
, union necp_sockaddr_union
*cond_local_end
, u_int8_t cond_local_prefix
, union necp_sockaddr_union
*cond_remote_start
, union necp_sockaddr_union
*cond_remote_end
, u_int8_t cond_remote_prefix
, necp_kernel_policy_result result
, necp_kernel_policy_result_parameter result_parameter
)
4029 struct necp_kernel_ip_output_policy
*new_kernel_policy
= NULL
;
4030 struct necp_kernel_ip_output_policy
*tmp_kernel_policy
= NULL
;
4032 MALLOC_ZONE(new_kernel_policy
, struct necp_kernel_ip_output_policy
*, sizeof(*new_kernel_policy
), M_NECP_IP_POLICY
, M_WAITOK
);
4033 if (new_kernel_policy
== NULL
) {
4037 memset(new_kernel_policy
, 0, sizeof(*new_kernel_policy
));
4038 new_kernel_policy
->parent_policy_id
= parent_policy_id
;
4039 new_kernel_policy
->id
= necp_kernel_policy_get_new_id();
4040 new_kernel_policy
->suborder
= suborder
;
4041 new_kernel_policy
->order
= order
;
4042 new_kernel_policy
->session_order
= session_order
;
4043 new_kernel_policy
->session_pid
= session_pid
;
4045 // Sanitize condition mask
4046 new_kernel_policy
->condition_mask
= (condition_mask
& NECP_KERNEL_VALID_IP_OUTPUT_CONDITIONS
);
4047 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
)) {
4048 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_BOUND_INTERFACE
;
4050 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
)) {
4051 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_LOCAL_PREFIX
;
4053 if ((new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) && (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
)) {
4054 new_kernel_policy
->condition_mask
&= ~NECP_KERNEL_CONDITION_REMOTE_PREFIX
;
4056 new_kernel_policy
->condition_negated_mask
= condition_negated_mask
& new_kernel_policy
->condition_mask
;
4058 // Set condition values
4059 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
) {
4060 new_kernel_policy
->cond_policy_id
= cond_policy_id
;
4062 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
4063 if (cond_bound_interface
) {
4064 ifnet_reference(cond_bound_interface
);
4066 new_kernel_policy
->cond_bound_interface
= cond_bound_interface
;
4068 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LAST_INTERFACE
) {
4069 new_kernel_policy
->cond_last_interface_index
= cond_last_interface_index
;
4071 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
4072 new_kernel_policy
->cond_protocol
= cond_protocol
;
4074 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
4075 memcpy(&new_kernel_policy
->cond_local_start
, cond_local_start
, cond_local_start
->sa
.sa_len
);
4077 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
4078 memcpy(&new_kernel_policy
->cond_local_end
, cond_local_end
, cond_local_end
->sa
.sa_len
);
4080 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
4081 new_kernel_policy
->cond_local_prefix
= cond_local_prefix
;
4083 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
4084 memcpy(&new_kernel_policy
->cond_remote_start
, cond_remote_start
, cond_remote_start
->sa
.sa_len
);
4086 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
4087 memcpy(&new_kernel_policy
->cond_remote_end
, cond_remote_end
, cond_remote_end
->sa
.sa_len
);
4089 if (new_kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
4090 new_kernel_policy
->cond_remote_prefix
= cond_remote_prefix
;
4093 new_kernel_policy
->result
= result
;
4094 memcpy(&new_kernel_policy
->result_parameter
, &result_parameter
, sizeof(result_parameter
));
4097 NECPLOG(LOG_DEBUG
, "Added kernel policy: ip output, id=%d, mask=%x\n", new_kernel_policy
->id
, new_kernel_policy
->condition_mask
);
4099 LIST_INSERT_SORTED_THRICE_ASCENDING(&necp_kernel_ip_output_policies
, new_kernel_policy
, chain
, session_order
, order
, suborder
, tmp_kernel_policy
);
4101 return (new_kernel_policy
? new_kernel_policy
->id
: 0);
4104 static struct necp_kernel_ip_output_policy
*
4105 necp_kernel_ip_output_policy_find(necp_kernel_policy_id policy_id
)
4107 struct necp_kernel_ip_output_policy
*kernel_policy
= NULL
;
4108 struct necp_kernel_ip_output_policy
*tmp_kernel_policy
= NULL
;
4110 if (policy_id
== 0) {
4114 LIST_FOREACH_SAFE(kernel_policy
, &necp_kernel_ip_output_policies
, chain
, tmp_kernel_policy
) {
4115 if (kernel_policy
->id
== policy_id
) {
4116 return (kernel_policy
);
4124 necp_kernel_ip_output_policy_delete(necp_kernel_policy_id policy_id
)
4126 struct necp_kernel_ip_output_policy
*policy
= NULL
;
4128 lck_rw_assert(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4130 policy
= necp_kernel_ip_output_policy_find(policy_id
);
4132 LIST_REMOVE(policy
, chain
);
4134 if (policy
->cond_bound_interface
) {
4135 ifnet_release(policy
->cond_bound_interface
);
4136 policy
->cond_bound_interface
= NULL
;
4139 FREE_ZONE(policy
, sizeof(*policy
), M_NECP_IP_POLICY
);
4147 necp_kernel_ip_output_policies_dump_all(void)
4150 struct necp_kernel_ip_output_policy
*policy
= NULL
;
4153 char result_string
[MAX_RESULT_STRING_LEN
];
4154 char proc_name_string
[MAXCOMLEN
+ 1];
4155 memset(result_string
, 0, MAX_RESULT_STRING_LEN
);
4156 memset(proc_name_string
, 0, MAXCOMLEN
+ 1);
4158 NECPLOG0(LOG_DEBUG
, "NECP IP Output Policies:\n");
4159 NECPLOG0(LOG_DEBUG
, "-----------\n");
4160 for (id_i
= 0; id_i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; id_i
++) {
4161 NECPLOG(LOG_DEBUG
, " ID Bucket: %d\n", id_i
);
4162 for (policy_i
= 0; necp_kernel_ip_output_policies_map
[id_i
] != NULL
&& (necp_kernel_ip_output_policies_map
[id_i
])[policy_i
] != NULL
; policy_i
++) {
4163 policy
= (necp_kernel_ip_output_policies_map
[id_i
])[policy_i
];
4164 proc_name(policy
->session_pid
, proc_name_string
, MAXCOMLEN
);
4165 NECPLOG(LOG_DEBUG
, "\t%3d. Policy ID: %5d\tProcess: %10.10s\tOrder: %04d.%04d.%d\tMask: %5x\tResult: %s\n", policy_i
, policy
->id
, proc_name_string
, policy
->session_order
, policy
->order
, policy
->suborder
, policy
->condition_mask
, necp_get_result_description(result_string
, policy
->result
, policy
->result_parameter
));
4167 NECPLOG0(LOG_DEBUG
, "-----------\n");
4173 necp_kernel_ip_output_policy_results_overlap(struct necp_kernel_ip_output_policy
*upper_policy
, struct necp_kernel_ip_output_policy
*lower_policy
)
4175 if (upper_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
4176 if (upper_policy
->session_order
!= lower_policy
->session_order
) {
4177 // A skip cannot override a policy of a different session
4180 if (upper_policy
->result_parameter
.skip_policy_order
== 0 ||
4181 lower_policy
->order
>= upper_policy
->result_parameter
.skip_policy_order
) {
4182 // This policy is beyond the skip
4185 // This policy is inside the skip
4191 // All other IP Output policy results (drop, tunnel, hard pass) currently overlap
4196 necp_kernel_ip_output_policy_is_unnecessary(struct necp_kernel_ip_output_policy
*policy
, struct necp_kernel_ip_output_policy
**policy_array
, int valid_indices
)
4198 bool can_skip
= FALSE
;
4199 u_int32_t highest_skip_session_order
= 0;
4200 u_int32_t highest_skip_order
= 0;
4202 for (i
= 0; i
< valid_indices
; i
++) {
4203 struct necp_kernel_ip_output_policy
*compared_policy
= policy_array
[i
];
4205 // For policies in a skip window, we can't mark conflicting policies as unnecessary
4207 if (highest_skip_session_order
!= compared_policy
->session_order
||
4208 (highest_skip_order
!= 0 && compared_policy
->order
>= highest_skip_order
)) {
4209 // If we've moved on to the next session, or passed the skip window
4210 highest_skip_session_order
= 0;
4211 highest_skip_order
= 0;
4214 // If this policy is also a skip, in can increase the skip window
4215 if (compared_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
4216 if (compared_policy
->result_parameter
.skip_policy_order
> highest_skip_order
) {
4217 highest_skip_order
= compared_policy
->result_parameter
.skip_policy_order
;
4224 if (compared_policy
->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
4225 // This policy is a skip. Set the skip window accordingly
4227 highest_skip_session_order
= compared_policy
->session_order
;
4228 highest_skip_order
= compared_policy
->result_parameter
.skip_policy_order
;
4231 // The result of the compared policy must be able to block out this policy result
4232 if (!necp_kernel_ip_output_policy_results_overlap(compared_policy
, policy
)) {
4236 // If new policy matches All Interfaces, compared policy must also
4237 if ((policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
) && !(compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
)) {
4241 // Default makes lower policies unecessary always
4242 if (compared_policy
->condition_mask
== 0) {
4246 // Compared must be more general than policy, and include only conditions within policy
4247 if ((policy
->condition_mask
& compared_policy
->condition_mask
) != compared_policy
->condition_mask
) {
4251 // Negative conditions must match for the overlapping conditions
4252 if ((policy
->condition_negated_mask
& compared_policy
->condition_mask
) != (compared_policy
->condition_negated_mask
& compared_policy
->condition_mask
)) {
4256 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
&&
4257 compared_policy
->cond_policy_id
!= policy
->cond_policy_id
) {
4261 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
&&
4262 compared_policy
->cond_bound_interface
!= policy
->cond_bound_interface
) {
4266 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
&&
4267 compared_policy
->cond_protocol
!= policy
->cond_protocol
) {
4271 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
4272 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
4273 if (!necp_is_range_in_range((struct sockaddr
*)&policy
->cond_local_start
, (struct sockaddr
*)&policy
->cond_local_end
, (struct sockaddr
*)&compared_policy
->cond_local_start
, (struct sockaddr
*)&compared_policy
->cond_local_end
)) {
4276 } else if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
4277 if (compared_policy
->cond_local_prefix
> policy
->cond_local_prefix
||
4278 !necp_is_addr_in_subnet((struct sockaddr
*)&policy
->cond_local_start
, (struct sockaddr
*)&compared_policy
->cond_local_start
, compared_policy
->cond_local_prefix
)) {
4284 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
4285 if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
4286 if (!necp_is_range_in_range((struct sockaddr
*)&policy
->cond_remote_start
, (struct sockaddr
*)&policy
->cond_remote_end
, (struct sockaddr
*)&compared_policy
->cond_remote_start
, (struct sockaddr
*)&compared_policy
->cond_remote_end
)) {
4289 } else if (compared_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
4290 if (compared_policy
->cond_remote_prefix
> policy
->cond_remote_prefix
||
4291 !necp_is_addr_in_subnet((struct sockaddr
*)&policy
->cond_remote_start
, (struct sockaddr
*)&compared_policy
->cond_remote_start
, compared_policy
->cond_remote_prefix
)) {
4304 necp_kernel_ip_output_policies_reprocess(void)
4307 int bucket_allocation_counts
[NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
];
4308 int bucket_current_free_index
[NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
];
4309 struct necp_kernel_ip_output_policy
*kernel_policy
= NULL
;
4311 lck_rw_assert(&necp_kernel_policy_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
4314 necp_kernel_ip_output_policies_condition_mask
= 0;
4315 necp_kernel_ip_output_policies_count
= 0;
4316 necp_kernel_ip_output_policies_non_id_count
= 0;
4318 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
4319 if (necp_kernel_ip_output_policies_map
[i
] != NULL
) {
4320 FREE(necp_kernel_ip_output_policies_map
[i
], M_NECP
);
4321 necp_kernel_ip_output_policies_map
[i
] = NULL
;
4325 bucket_allocation_counts
[i
] = 0;
4328 LIST_FOREACH(kernel_policy
, &necp_kernel_ip_output_policies
, chain
) {
4330 necp_kernel_ip_output_policies_condition_mask
|= kernel_policy
->condition_mask
;
4331 necp_kernel_ip_output_policies_count
++;
4333 // Update bucket counts
4334 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
)) {
4335 necp_kernel_ip_output_policies_non_id_count
++;
4336 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
4337 bucket_allocation_counts
[i
]++;
4340 bucket_allocation_counts
[NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(kernel_policy
->cond_policy_id
)]++;
4344 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
4345 if (bucket_allocation_counts
[i
] > 0) {
4346 // Allocate a NULL-terminated array of policy pointers for each bucket
4347 MALLOC(necp_kernel_ip_output_policies_map
[i
], struct necp_kernel_ip_output_policy
**, sizeof(struct necp_kernel_ip_output_policy
*) * (bucket_allocation_counts
[i
] + 1), M_NECP
, M_WAITOK
);
4348 if (necp_kernel_ip_output_policies_map
[i
] == NULL
) {
4352 // Initialize the first entry to NULL
4353 (necp_kernel_ip_output_policies_map
[i
])[0] = NULL
;
4355 bucket_current_free_index
[i
] = 0;
4358 LIST_FOREACH(kernel_policy
, &necp_kernel_ip_output_policies
, chain
) {
4359 // Insert pointers into map
4360 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
)) {
4361 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
4362 if (!necp_kernel_ip_output_policy_is_unnecessary(kernel_policy
, necp_kernel_ip_output_policies_map
[i
], bucket_current_free_index
[i
])) {
4363 (necp_kernel_ip_output_policies_map
[i
])[(bucket_current_free_index
[i
])] = kernel_policy
;
4364 bucket_current_free_index
[i
]++;
4365 (necp_kernel_ip_output_policies_map
[i
])[(bucket_current_free_index
[i
])] = NULL
;
4369 i
= NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(kernel_policy
->cond_policy_id
);
4370 if (!necp_kernel_ip_output_policy_is_unnecessary(kernel_policy
, necp_kernel_ip_output_policies_map
[i
], bucket_current_free_index
[i
])) {
4371 (necp_kernel_ip_output_policies_map
[i
])[(bucket_current_free_index
[i
])] = kernel_policy
;
4372 bucket_current_free_index
[i
]++;
4373 (necp_kernel_ip_output_policies_map
[i
])[(bucket_current_free_index
[i
])] = NULL
;
4377 necp_kernel_ip_output_policies_dump_all();
4381 // Free memory, reset mask to 0
4382 necp_kernel_ip_output_policies_condition_mask
= 0;
4383 necp_kernel_ip_output_policies_count
= 0;
4384 necp_kernel_ip_output_policies_non_id_count
= 0;
4385 for (i
= 0; i
< NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS
; i
++) {
4386 if (necp_kernel_ip_output_policies_map
[i
] != NULL
) {
4387 FREE(necp_kernel_ip_output_policies_map
[i
], M_NECP
);
4388 necp_kernel_ip_output_policies_map
[i
] = NULL
;
4394 // Outbound Policy Matching
4395 // ---------------------
4401 static struct substring
4402 necp_trim_dots_and_stars(char *string
, size_t length
)
4404 struct substring sub
;
4405 sub
.string
= string
;
4406 sub
.length
= string
? length
: 0;
4408 while (sub
.length
&& (sub
.string
[0] == '.' || sub
.string
[0] == '*')) {
4413 while (sub
.length
&& (sub
.string
[sub
.length
- 1] == '.' || sub
.string
[sub
.length
- 1] == '*')) {
4421 necp_create_trimmed_domain(char *string
, size_t length
)
4423 char *trimmed_domain
= NULL
;
4424 struct substring sub
= necp_trim_dots_and_stars(string
, length
);
4426 MALLOC(trimmed_domain
, char *, sub
.length
+ 1, M_NECP
, M_WAITOK
);
4427 if (trimmed_domain
== NULL
) {
4431 memcpy(trimmed_domain
, sub
.string
, sub
.length
);
4432 trimmed_domain
[sub
.length
] = 0;
4434 return (trimmed_domain
);
4438 necp_count_dots(char *string
, size_t length
)
4443 for (i
= 0; i
< length
; i
++) {
4444 if (string
[i
] == '.') {
4453 necp_check_suffix(struct substring parent
, struct substring suffix
, bool require_dot_before_suffix
)
4455 if (parent
.length
<= suffix
.length
) {
4459 size_t length_difference
= (parent
.length
- suffix
.length
);
4461 if (require_dot_before_suffix
) {
4462 if (((char *)(parent
.string
+ length_difference
- 1))[0] != '.') {
4467 return (memcmp(parent
.string
+ length_difference
, suffix
.string
, suffix
.length
) == 0);
4471 necp_hostname_matches_domain(struct substring hostname_substring
, u_int8_t hostname_dot_count
, char *domain
, u_int8_t domain_dot_count
)
4473 if (hostname_substring
.string
== NULL
|| domain
== NULL
) {
4474 return (hostname_substring
.string
== domain
);
4477 struct substring domain_substring
;
4478 domain_substring
.string
= domain
;
4479 domain_substring
.length
= strlen(domain
);
4481 if (hostname_dot_count
== domain_dot_count
) {
4482 if (hostname_substring
.length
== domain_substring
.length
&&
4483 memcmp(hostname_substring
.string
, domain_substring
.string
, hostname_substring
.length
) == 0) {
4486 } else if (domain_dot_count
< hostname_dot_count
) {
4487 if (necp_check_suffix(hostname_substring
, domain_substring
, TRUE
)) {
4495 #define NECP_KERNEL_ADDRESS_TYPE_CONDITIONS (NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX)
4497 necp_application_fillout_info_locked(uuid_t application_uuid
, uuid_t real_application_uuid
, char *account
, char *domain
, pid_t pid
, uid_t uid
, u_int16_t protocol
, u_int32_t bound_interface_index
, u_int32_t traffic_class
, union necp_sockaddr_union
*local_addr
, union necp_sockaddr_union
*remote_addr
, struct necp_socket_info
*info
)
4499 memset(info
, 0, sizeof(struct necp_socket_info
));
4503 info
->protocol
= protocol
;
4504 info
->bound_interface_index
= bound_interface_index
;
4505 info
->traffic_class
= traffic_class
;
4506 info
->cred_result
= 0; // Don't check the entitlement here, only in the socket layer
4508 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_APP_ID
&& !uuid_is_null(application_uuid
)) {
4509 struct necp_uuid_id_mapping
*existing_mapping
= necp_uuid_lookup_app_id_locked(application_uuid
);
4510 if (existing_mapping
) {
4511 info
->application_id
= existing_mapping
->id
;
4515 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
&& !uuid_is_null(real_application_uuid
)) {
4516 if (uuid_compare(application_uuid
, real_application_uuid
) == 0) {
4517 info
->real_application_id
= info
->application_id
;
4519 struct necp_uuid_id_mapping
*existing_mapping
= necp_uuid_lookup_app_id_locked(real_application_uuid
);
4520 if (existing_mapping
) {
4521 info
->real_application_id
= existing_mapping
->id
;
4526 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
&& account
!= NULL
) {
4527 struct necp_string_id_mapping
*existing_mapping
= necp_lookup_string_to_id_locked(&necp_account_id_list
, account
);
4528 if (existing_mapping
) {
4529 info
->account_id
= existing_mapping
->id
;
4533 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
4534 info
->domain
= domain
;
4537 if (necp_kernel_application_policies_condition_mask
& NECP_KERNEL_ADDRESS_TYPE_CONDITIONS
) {
4538 if (local_addr
&& local_addr
->sa
.sa_len
> 0) {
4539 memcpy(&info
->local_addr
, local_addr
, local_addr
->sa
.sa_len
);
4541 if (remote_addr
&& remote_addr
->sa
.sa_len
> 0) {
4542 memcpy(&info
->remote_addr
, remote_addr
, remote_addr
->sa
.sa_len
);
4548 necp_send_application_cell_denied_event(pid_t pid
, uuid_t proc_uuid
)
4550 struct kev_netpolicy_ifdenied ev_ifdenied
;
4552 bzero(&ev_ifdenied
, sizeof(ev_ifdenied
));
4554 ev_ifdenied
.ev_data
.epid
= pid
;
4555 uuid_copy(ev_ifdenied
.ev_data
.euuid
, proc_uuid
);
4557 netpolicy_post_msg(KEV_NETPOLICY_IFDENIED
, &ev_ifdenied
.ev_data
, sizeof(ev_ifdenied
));
4561 necp_application_find_policy_match_internal(u_int8_t
*parameters
, u_int32_t parameters_size
, struct necp_aggregate_result
*returned_result
)
4566 struct necp_kernel_socket_policy
*matched_policy
= NULL
;
4567 struct necp_socket_info info
;
4568 necp_kernel_policy_filter filter_control_unit
= 0;
4569 u_int32_t route_rule_id
= 0;
4570 necp_kernel_policy_result service_action
= 0;
4571 necp_kernel_policy_service service
= { 0, 0 };
4575 u_int16_t protocol
= 0;
4576 u_int32_t bound_interface_index
= 0;
4577 u_int32_t traffic_class
= 0;
4578 union necp_sockaddr_union local_addr
;
4579 union necp_sockaddr_union remote_addr
;
4580 bool no_remote_addr
= FALSE
;
4582 memset(&local_addr
, 0, sizeof(local_addr
));
4583 memset(&remote_addr
, 0, sizeof(remote_addr
));
4584 uuid_t application_uuid
;
4585 uuid_clear(application_uuid
);
4586 uuid_t real_application_uuid
;
4587 uuid_clear(real_application_uuid
);
4588 char *domain
= NULL
;
4589 char *account
= NULL
;
4591 u_int32_t netagent_ids
[NECP_MAX_NETAGENTS
];
4592 memset(&netagent_ids
, 0, sizeof(netagent_ids
));
4593 int netagent_cursor
;
4595 if (returned_result
== NULL
) {
4599 memset(returned_result
, 0, sizeof(struct necp_aggregate_result
));
4601 lck_rw_lock_shared(&necp_kernel_policy_lock
);
4602 if (necp_kernel_application_policies_count
== 0) {
4603 if (necp_drop_all_order
> 0) {
4604 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
4605 lck_rw_done(&necp_kernel_policy_lock
);
4609 lck_rw_done(&necp_kernel_policy_lock
);
4611 while ((offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
)) <= parameters_size
) {
4612 u_int8_t type
= necp_buffer_get_tlv_type(parameters
, offset
);
4613 u_int32_t length
= necp_buffer_get_tlv_length(parameters
, offset
);
4615 if (length
> 0 && (offset
+ sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
) <= parameters_size
) {
4616 u_int8_t
*value
= necp_buffer_get_tlv_value(parameters
, offset
, NULL
);
4617 if (value
!= NULL
) {
4619 case NECP_POLICY_CONDITION_APPLICATION
: {
4620 if (length
>= sizeof(uuid_t
)) {
4621 uuid_copy(application_uuid
, value
);
4625 case NECP_POLICY_CONDITION_REAL_APPLICATION
: {
4626 if (length
>= sizeof(uuid_t
)) {
4627 uuid_copy(real_application_uuid
, value
);
4631 case NECP_POLICY_CONDITION_DOMAIN
: {
4632 domain
= (char *)value
;
4633 domain
[length
- 1] = 0;
4636 case NECP_POLICY_CONDITION_ACCOUNT
: {
4637 account
= (char *)value
;
4638 account
[length
- 1] = 0;
4641 case NECP_POLICY_CONDITION_TRAFFIC_CLASS
: {
4642 if (length
>= sizeof(u_int32_t
)) {
4643 memcpy(&traffic_class
, value
, sizeof(u_int32_t
));
4647 case NECP_POLICY_CONDITION_PID
: {
4648 if (length
>= sizeof(pid_t
)) {
4649 memcpy(&pid
, value
, sizeof(pid_t
));
4653 case NECP_POLICY_CONDITION_UID
: {
4654 if (length
>= sizeof(uid_t
)) {
4655 memcpy(&uid
, value
, sizeof(uid_t
));
4659 case NECP_POLICY_CONDITION_IP_PROTOCOL
: {
4660 if (length
>= sizeof(u_int16_t
)) {
4661 memcpy(&protocol
, value
, sizeof(u_int16_t
));
4665 case NECP_POLICY_CONDITION_BOUND_INTERFACE
: {
4666 if (length
<= IFXNAMSIZ
&& length
> 0) {
4667 ifnet_t bound_interface
= NULL
;
4668 char interface_name
[IFXNAMSIZ
];
4669 memcpy(interface_name
, value
, length
);
4670 interface_name
[length
- 1] = 0; // Make sure the string is NULL terminated
4671 if (ifnet_find_by_name(interface_name
, &bound_interface
) == 0) {
4672 bound_interface_index
= bound_interface
->if_index
;
4677 case NECP_POLICY_CONDITION_LOCAL_ADDR
: {
4678 if (length
>= sizeof(struct necp_policy_condition_addr
)) {
4679 struct necp_policy_condition_addr
*address_struct
= (struct necp_policy_condition_addr
*)(void *)value
;
4680 memcpy(&local_addr
, &address_struct
->address
, sizeof(address_struct
->address
));
4684 case NECP_POLICY_CONDITION_REMOTE_ADDR
: {
4685 if (length
>= sizeof(struct necp_policy_condition_addr
)) {
4686 struct necp_policy_condition_addr
*address_struct
= (struct necp_policy_condition_addr
*)(void *)value
;
4687 memcpy(&remote_addr
, &address_struct
->address
, sizeof(address_struct
->address
));
4698 offset
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + length
;
4702 lck_rw_lock_shared(&necp_kernel_policy_lock
);
4704 necp_application_fillout_info_locked(application_uuid
, real_application_uuid
, account
, domain
, pid
, uid
, protocol
, bound_interface_index
, traffic_class
, &local_addr
, &remote_addr
, &info
);
4705 matched_policy
= necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_app_layer_map
, &info
, &filter_control_unit
, &route_rule_id
, &service_action
, &service
, netagent_ids
, NECP_MAX_NETAGENTS
);
4706 if (matched_policy
) {
4707 returned_result
->policy_id
= matched_policy
->id
;
4708 returned_result
->routing_result
= matched_policy
->result
;
4709 memcpy(&returned_result
->routing_result_parameter
, &matched_policy
->result_parameter
, sizeof(returned_result
->routing_result_parameter
));
4711 returned_result
->policy_id
= 0;
4712 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_NONE
;
4714 returned_result
->filter_control_unit
= filter_control_unit
;
4715 returned_result
->service_action
= service_action
;
4717 // Handle trigger service
4718 if (service
.identifier
!= 0) {
4719 struct necp_uuid_id_mapping
*mapping
= necp_uuid_lookup_uuid_with_service_id_locked(service
.identifier
);
4720 if (mapping
!= NULL
) {
4721 struct necp_service_registration
*service_registration
= NULL
;
4722 uuid_copy(returned_result
->service_uuid
, mapping
->uuid
);
4723 returned_result
->service_data
= service
.data
;
4724 if (service
.identifier
== NECP_NULL_SERVICE_ID
) {
4725 // NULL service is always 'registered'
4726 returned_result
->service_flags
|= NECP_SERVICE_FLAGS_REGISTERED
;
4728 LIST_FOREACH(service_registration
, &necp_registered_service_list
, kernel_chain
) {
4729 if (service
.identifier
== service_registration
->service_id
) {
4730 returned_result
->service_flags
|= NECP_SERVICE_FLAGS_REGISTERED
;
4739 for (netagent_cursor
= 0; netagent_cursor
< NECP_MAX_NETAGENTS
; netagent_cursor
++) {
4740 struct necp_uuid_id_mapping
*mapping
= NULL
;
4741 u_int32_t netagent_id
= netagent_ids
[netagent_cursor
];
4742 if (netagent_id
== 0) {
4745 mapping
= necp_uuid_lookup_uuid_with_service_id_locked(netagent_id
);
4746 if (mapping
!= NULL
) {
4747 uuid_copy(returned_result
->netagents
[netagent_cursor
], mapping
->uuid
);
4748 returned_result
->netagent_flags
[netagent_cursor
] = netagent_get_flags(mapping
->uuid
);
4752 // Do routing evaluation
4753 u_int output_bound_interface
= bound_interface_index
;
4754 if (returned_result
->routing_result
== NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED
) {
4755 output_bound_interface
= returned_result
->routing_result_parameter
.scoped_interface_index
;
4756 } else if (returned_result
->routing_result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
) {
4757 output_bound_interface
= returned_result
->routing_result_parameter
.tunnel_interface_index
;
4760 if (remote_addr
.sa
.sa_len
== 0) {
4761 no_remote_addr
= TRUE
;
4762 // Default to 0.0.0.0:0
4763 remote_addr
.sa
.sa_family
= AF_INET
;
4764 remote_addr
.sa
.sa_len
= sizeof(struct sockaddr_in
);
4767 struct rtentry
*rt
= NULL
;
4768 rt
= rtalloc1_scoped((struct sockaddr
*)&remote_addr
, 0, 0, output_bound_interface
);
4770 if (no_remote_addr
&&
4771 (rt
== NULL
|| rt
->rt_ifp
== NULL
)) {
4772 // Route lookup for default IPv4 failed, try IPv6
4774 // Cleanup old route if necessary
4780 // Reset address to ::
4781 memset(&remote_addr
, 0, sizeof(remote_addr
));
4782 remote_addr
.sa
.sa_family
= AF_INET6
;
4783 remote_addr
.sa
.sa_len
= sizeof(struct sockaddr_in6
);
4786 rt
= rtalloc1_scoped((struct sockaddr
*)&remote_addr
, 0, 0, output_bound_interface
);
4789 returned_result
->routed_interface_index
= 0;
4791 rt
->rt_ifp
!= NULL
) {
4792 returned_result
->routed_interface_index
= rt
->rt_ifp
->if_index
;
4794 * For local addresses, we allow the interface scope to be
4795 * either the loopback interface or the interface hosting the
4798 if (bound_interface_index
!= IFSCOPE_NONE
&&
4799 rt
->rt_ifa
!= NULL
&& rt
->rt_ifa
->ifa_ifp
&&
4800 (output_bound_interface
== lo_ifp
->if_index
||
4801 rt
->rt_ifp
->if_index
== lo_ifp
->if_index
||
4802 rt
->rt_ifa
->ifa_ifp
->if_index
== bound_interface_index
)) {
4803 struct sockaddr_storage dst
;
4804 unsigned int ifscope
= bound_interface_index
;
4807 * Transform dst into the internal routing table form
4809 (void) sa_copy((struct sockaddr
*)&remote_addr
,
4812 if ((rt
->rt_ifp
->if_index
== lo_ifp
->if_index
) ||
4813 rt_ifa_is_dst((struct sockaddr
*)&dst
, rt
->rt_ifa
))
4814 returned_result
->routed_interface_index
=
4815 bound_interface_index
;
4819 bool cellular_denied
= FALSE
;
4820 bool route_is_allowed
= necp_route_is_allowed(rt
, NULL
, route_rule_id
, &cellular_denied
);
4821 if (!route_is_allowed
) {
4822 // If the route is blocked, treat the lookup as a drop
4823 returned_result
->routing_result
= NECP_KERNEL_POLICY_RESULT_DROP
;
4824 memset(&returned_result
->routing_result_parameter
, 0, sizeof(returned_result
->routing_result_parameter
));
4826 if (cellular_denied
) {
4827 necp_send_application_cell_denied_event(pid
, application_uuid
);
4836 lck_rw_done(&necp_kernel_policy_lock
);
4841 #define NECP_MAX_MATCH_POLICY_PARAMETER_SIZE 1024
4844 necp_match_policy(struct proc
*p
, struct necp_match_policy_args
*uap
, int32_t *retval
)
4846 #pragma unused(p, retval)
4847 u_int8_t
*parameters
= NULL
;
4848 struct necp_aggregate_result returned_result
;
4856 if (uap
->parameters
== 0 || uap
->parameters_size
== 0 || uap
->parameters_size
> NECP_MAX_MATCH_POLICY_PARAMETER_SIZE
|| uap
->returned_result
== 0) {
4861 MALLOC(parameters
, u_int8_t
*, uap
->parameters_size
, M_NECP
, M_WAITOK
);
4862 if (parameters
== NULL
) {
4866 // Copy parameters in
4867 error
= copyin(uap
->parameters
, parameters
, uap
->parameters_size
);
4872 error
= necp_application_find_policy_match_internal(parameters
, uap
->parameters_size
, &returned_result
);
4877 // Copy return value back
4878 error
= copyout(&returned_result
, uap
->returned_result
, sizeof(struct necp_aggregate_result
));
4883 if (parameters
!= NULL
) {
4884 FREE(parameters
, M_NECP
);
4890 necp_socket_check_policy(struct necp_kernel_socket_policy
*kernel_policy
, necp_app_id app_id
, necp_app_id real_app_id
, errno_t cred_result
, u_int32_t account_id
, struct substring domain
, u_int8_t domain_dot_count
, pid_t pid
, uid_t uid
, u_int32_t bound_interface_index
, u_int32_t traffic_class
, u_int16_t protocol
, union necp_sockaddr_union
*local
, union necp_sockaddr_union
*remote
)
4892 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
)) {
4893 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
4894 u_int32_t cond_bound_interface_index
= kernel_policy
->cond_bound_interface
? kernel_policy
->cond_bound_interface
->if_index
: 0;
4895 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
4896 if (bound_interface_index
== cond_bound_interface_index
) {
4897 // No match, matches forbidden interface
4901 if (bound_interface_index
!= cond_bound_interface_index
) {
4902 // No match, does not match required interface
4907 if (bound_interface_index
!= 0) {
4908 // No match, requires a non-bound packet
4914 if (kernel_policy
->condition_mask
== 0) {
4918 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
4919 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
4920 if (app_id
== kernel_policy
->cond_app_id
) {
4921 // No match, matches forbidden application
4925 if (app_id
!= kernel_policy
->cond_app_id
) {
4926 // No match, does not match required application
4932 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
4933 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
4934 if (real_app_id
== kernel_policy
->cond_real_app_id
) {
4935 // No match, matches forbidden application
4939 if (real_app_id
!= kernel_policy
->cond_real_app_id
) {
4940 // No match, does not match required application
4946 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
) {
4947 if (cred_result
!= 0) {
4948 // Process is missing entitlement
4953 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
4954 bool domain_matches
= necp_hostname_matches_domain(domain
, domain_dot_count
, kernel_policy
->cond_domain
, kernel_policy
->cond_domain_dot_count
);
4955 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
4956 if (domain_matches
) {
4957 // No match, matches forbidden domain
4961 if (!domain_matches
) {
4962 // No match, does not match required domain
4968 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
) {
4969 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
) {
4970 if (account_id
== kernel_policy
->cond_account_id
) {
4971 // No match, matches forbidden account
4975 if (account_id
!= kernel_policy
->cond_account_id
) {
4976 // No match, does not match required account
4982 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PID
) {
4983 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_PID
) {
4984 if (pid
== kernel_policy
->cond_pid
) {
4985 // No match, matches forbidden pid
4989 if (pid
!= kernel_policy
->cond_pid
) {
4990 // No match, does not match required pid
4996 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_UID
) {
4997 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_UID
) {
4998 if (uid
== kernel_policy
->cond_uid
) {
4999 // No match, matches forbidden uid
5003 if (uid
!= kernel_policy
->cond_uid
) {
5004 // No match, does not match required uid
5010 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
5011 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
5012 if (traffic_class
>= kernel_policy
->cond_traffic_class
.start_tc
&&
5013 traffic_class
<= kernel_policy
->cond_traffic_class
.end_tc
) {
5014 // No match, matches forbidden traffic class
5018 if (traffic_class
< kernel_policy
->cond_traffic_class
.start_tc
||
5019 traffic_class
> kernel_policy
->cond_traffic_class
.end_tc
) {
5020 // No match, does not match required traffic class
5026 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
5027 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
5028 if (protocol
== kernel_policy
->cond_protocol
) {
5029 // No match, matches forbidden protocol
5033 if (protocol
!= kernel_policy
->cond_protocol
) {
5034 // No match, does not match required protocol
5040 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
5041 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
5042 bool inRange
= necp_is_addr_in_range((struct sockaddr
*)local
, (struct sockaddr
*)&kernel_policy
->cond_local_start
, (struct sockaddr
*)&kernel_policy
->cond_local_end
);
5043 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
5052 } else if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
5053 bool inSubnet
= necp_is_addr_in_subnet((struct sockaddr
*)local
, (struct sockaddr
*)&kernel_policy
->cond_local_start
, kernel_policy
->cond_local_prefix
);
5054 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
5066 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
5067 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
5068 bool inRange
= necp_is_addr_in_range((struct sockaddr
*)remote
, (struct sockaddr
*)&kernel_policy
->cond_remote_start
, (struct sockaddr
*)&kernel_policy
->cond_remote_end
);
5069 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
5078 } else if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
5079 bool inSubnet
= necp_is_addr_in_subnet((struct sockaddr
*)remote
, (struct sockaddr
*)&kernel_policy
->cond_remote_start
, kernel_policy
->cond_remote_prefix
);
5080 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
5095 static inline u_int32_t
5096 necp_socket_calc_flowhash_locked(struct necp_socket_info
*info
)
5098 return (net_flowhash(info
, sizeof(*info
), necp_kernel_socket_policies_gencount
));
5102 necp_socket_fillout_info_locked(struct inpcb
*inp
, struct sockaddr
*override_local_addr
, struct sockaddr
*override_remote_addr
, u_int32_t override_bound_interface
, struct necp_socket_info
*info
)
5104 struct socket
*so
= NULL
;
5106 memset(info
, 0, sizeof(struct necp_socket_info
));
5108 so
= inp
->inp_socket
;
5110 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_PID
) {
5111 info
->pid
= ((so
->so_flags
& SOF_DELEGATED
) ? so
->e_pid
: so
->last_pid
);
5114 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_UID
) {
5115 info
->uid
= kauth_cred_getuid(so
->so_cred
);
5118 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_TRAFFIC_CLASS
) {
5119 info
->traffic_class
= so
->so_traffic_class
;
5122 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
5123 if (inp
->inp_ip_p
) {
5124 info
->protocol
= inp
->inp_ip_p
;
5126 info
->protocol
= SOCK_PROTO(so
);
5130 if (inp
->inp_flags2
& INP2_WANT_APP_POLICY
&& necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_APP_ID
) {
5131 struct necp_uuid_id_mapping
*existing_mapping
= necp_uuid_lookup_app_id_locked(((so
->so_flags
& SOF_DELEGATED
) ? so
->e_uuid
: so
->last_uuid
));
5132 if (existing_mapping
) {
5133 info
->application_id
= existing_mapping
->id
;
5136 if (!(so
->so_flags
& SOF_DELEGATED
)) {
5137 info
->real_application_id
= info
->application_id
;
5138 } else if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_REAL_APP_ID
) {
5139 struct necp_uuid_id_mapping
*real_existing_mapping
= necp_uuid_lookup_app_id_locked(so
->last_uuid
);
5140 if (real_existing_mapping
) {
5141 info
->real_application_id
= real_existing_mapping
->id
;
5145 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_ENTITLEMENT
) {
5146 info
->cred_result
= priv_check_cred(so
->so_cred
, PRIV_NET_PRIVILEGED_NECP_MATCH
, 0);
5150 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_ACCOUNT_ID
&& inp
->inp_necp_attributes
.inp_account
!= NULL
) {
5151 struct necp_string_id_mapping
*existing_mapping
= necp_lookup_string_to_id_locked(&necp_account_id_list
, inp
->inp_necp_attributes
.inp_account
);
5152 if (existing_mapping
) {
5153 info
->account_id
= existing_mapping
->id
;
5157 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_CONDITION_DOMAIN
) {
5158 info
->domain
= inp
->inp_necp_attributes
.inp_domain
;
5161 if (override_bound_interface
) {
5162 info
->bound_interface_index
= override_bound_interface
;
5164 if ((inp
->inp_flags
& INP_BOUND_IF
) && inp
->inp_boundifp
) {
5165 info
->bound_interface_index
= inp
->inp_boundifp
->if_index
;
5169 if (necp_kernel_socket_policies_condition_mask
& NECP_KERNEL_ADDRESS_TYPE_CONDITIONS
) {
5170 if (inp
->inp_vflag
& INP_IPV4
) {
5171 if (override_local_addr
) {
5172 memcpy(&info
->local_addr
, override_local_addr
, override_local_addr
->sa_len
);
5174 ((struct sockaddr_in
*)&info
->local_addr
)->sin_family
= AF_INET
;
5175 ((struct sockaddr_in
*)&info
->local_addr
)->sin_len
= sizeof(struct sockaddr_in
);
5176 ((struct sockaddr_in
*)&info
->local_addr
)->sin_port
= inp
->inp_lport
;
5177 memcpy(&((struct sockaddr_in
*)&info
->local_addr
)->sin_addr
, &inp
->inp_laddr
, sizeof(struct in_addr
));
5180 if (override_remote_addr
) {
5181 memcpy(&info
->remote_addr
, override_remote_addr
, override_remote_addr
->sa_len
);
5183 ((struct sockaddr_in
*)&info
->remote_addr
)->sin_family
= AF_INET
;
5184 ((struct sockaddr_in
*)&info
->remote_addr
)->sin_len
= sizeof(struct sockaddr_in
);
5185 ((struct sockaddr_in
*)&info
->remote_addr
)->sin_port
= inp
->inp_fport
;
5186 memcpy(&((struct sockaddr_in
*)&info
->remote_addr
)->sin_addr
, &inp
->inp_faddr
, sizeof(struct in_addr
));
5188 } else if (inp
->inp_vflag
& INP_IPV6
) {
5189 if (override_local_addr
) {
5190 memcpy(&info
->local_addr
, override_local_addr
, override_local_addr
->sa_len
);
5192 ((struct sockaddr_in6
*)&info
->local_addr
)->sin6_family
= AF_INET6
;
5193 ((struct sockaddr_in6
*)&info
->local_addr
)->sin6_len
= sizeof(struct sockaddr_in6
);
5194 ((struct sockaddr_in6
*)&info
->local_addr
)->sin6_port
= inp
->inp_lport
;
5195 memcpy(&((struct sockaddr_in6
*)&info
->local_addr
)->sin6_addr
, &inp
->in6p_laddr
, sizeof(struct in6_addr
));
5198 if (override_remote_addr
) {
5199 memcpy(&info
->remote_addr
, override_remote_addr
, override_remote_addr
->sa_len
);
5201 ((struct sockaddr_in6
*)&info
->remote_addr
)->sin6_family
= AF_INET6
;
5202 ((struct sockaddr_in6
*)&info
->remote_addr
)->sin6_len
= sizeof(struct sockaddr_in6
);
5203 ((struct sockaddr_in6
*)&info
->remote_addr
)->sin6_port
= inp
->inp_fport
;
5204 memcpy(&((struct sockaddr_in6
*)&info
->remote_addr
)->sin6_addr
, &inp
->in6p_faddr
, sizeof(struct in6_addr
));
5210 static inline struct necp_kernel_socket_policy
*
5211 necp_socket_find_policy_match_with_info_locked(struct necp_kernel_socket_policy
**policy_search_array
, struct necp_socket_info
*info
, necp_kernel_policy_filter
*return_filter
, u_int32_t
*return_route_rule_id
, necp_kernel_policy_result
*return_service_action
, necp_kernel_policy_service
*return_service
, u_int32_t
*return_netagent_array
, size_t netagent_array_count
)
5213 struct necp_kernel_socket_policy
*matched_policy
= NULL
;
5214 u_int32_t skip_order
= 0;
5215 u_int32_t skip_session_order
= 0;
5216 u_int32_t route_rule_id_array
[MAX_AGGREGATE_ROUTE_RULES
];
5217 size_t route_rule_id_count
= 0;
5219 size_t netagent_cursor
= 0;
5221 // Pre-process domain for quick matching
5222 struct substring domain_substring
= necp_trim_dots_and_stars(info
->domain
, info
->domain
? strlen(info
->domain
) : 0);
5223 u_int8_t domain_dot_count
= necp_count_dots(domain_substring
.string
, domain_substring
.length
);
5225 if (return_filter
) {
5229 if (return_route_rule_id
) {
5230 *return_route_rule_id
= 0;
5233 if (return_service_action
) {
5234 *return_service_action
= 0;
5237 if (return_service
) {
5238 return_service
->identifier
= 0;
5239 return_service
->data
= 0;
5242 if (policy_search_array
!= NULL
) {
5243 for (i
= 0; policy_search_array
[i
] != NULL
; i
++) {
5244 if (necp_drop_all_order
!= 0 && policy_search_array
[i
]->session_order
>= necp_drop_all_order
) {
5245 // We've hit a drop all rule
5248 if (skip_session_order
&& policy_search_array
[i
]->session_order
>= skip_session_order
) {
5251 skip_session_order
= 0;
5254 if (policy_search_array
[i
]->order
< skip_order
) {
5260 skip_session_order
= 0;
5262 } else if (skip_session_order
) {
5266 if (necp_socket_check_policy(policy_search_array
[i
], info
->application_id
, info
->real_application_id
, info
->cred_result
, info
->account_id
, domain_substring
, domain_dot_count
, info
->pid
, info
->uid
, info
->bound_interface_index
, info
->traffic_class
, info
->protocol
, &info
->local_addr
, &info
->remote_addr
)) {
5267 if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER
) {
5268 if (return_filter
&& *return_filter
== 0) {
5269 *return_filter
= policy_search_array
[i
]->result_parameter
.filter_control_unit
;
5270 if (necp_debug
> 1) {
5271 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Filter %d", info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
, policy_search_array
[i
]->result_parameter
.filter_control_unit
);
5275 } else if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_ROUTE_RULES
) {
5276 if (return_route_rule_id
&& route_rule_id_count
< MAX_AGGREGATE_ROUTE_RULES
) {
5277 route_rule_id_array
[route_rule_id_count
++] = policy_search_array
[i
]->result_parameter
.route_rule_id
;
5278 if (necp_debug
> 1) {
5279 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Route Rule %d", info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
, policy_search_array
[i
]->result_parameter
.route_rule_id
);
5283 } else if (necp_kernel_socket_result_is_trigger_service_type(policy_search_array
[i
])) {
5284 if (return_service_action
&& *return_service_action
== 0) {
5285 *return_service_action
= policy_search_array
[i
]->result
;
5286 if (necp_debug
> 1) {
5287 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Service Action %d", info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
, policy_search_array
[i
]->result
);
5290 if (return_service
&& return_service
->identifier
== 0) {
5291 return_service
->identifier
= policy_search_array
[i
]->result_parameter
.service
.identifier
;
5292 return_service
->data
= policy_search_array
[i
]->result_parameter
.service
.data
;
5293 if (necp_debug
> 1) {
5294 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Service ID %d Data %d", info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
, policy_search_array
[i
]->result_parameter
.service
.identifier
, policy_search_array
[i
]->result_parameter
.service
.data
);
5298 } else if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_USE_NETAGENT
) {
5299 if (return_netagent_array
!= NULL
&&
5300 netagent_cursor
< netagent_array_count
) {
5301 return_netagent_array
[netagent_cursor
] = policy_search_array
[i
]->result_parameter
.netagent_id
;
5303 if (necp_debug
> 1) {
5304 NECPLOG(LOG_DEBUG
, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Use Netagent %d", info
->application_id
, info
->real_application_id
, info
->bound_interface_index
, info
->protocol
, policy_search_array
[i
]->result_parameter
.netagent_id
);
5310 // Passed all tests, found a match
5311 matched_policy
= policy_search_array
[i
];
5312 if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
5313 skip_order
= policy_search_array
[i
]->result_parameter
.skip_policy_order
;
5314 skip_session_order
= policy_search_array
[i
]->session_order
+ 1;
5322 if (route_rule_id_count
== 1) {
5323 *return_route_rule_id
= route_rule_id_array
[0];
5324 } else if (route_rule_id_count
> 1) {
5325 *return_route_rule_id
= necp_create_aggregate_route_rule(route_rule_id_array
);
5327 return (matched_policy
);
5331 necp_socket_uses_interface(struct inpcb
*inp
, u_int32_t interface_index
)
5333 bool found_match
= FALSE
;
5335 ifaddr_t
*addresses
= NULL
;
5336 union necp_sockaddr_union address_storage
;
5338 int family
= AF_INET
;
5339 ifnet_t interface
= ifindex2ifnet
[interface_index
];
5341 if (inp
== NULL
|| interface
== NULL
) {
5345 if (inp
->inp_vflag
& INP_IPV4
) {
5347 } else if (inp
->inp_vflag
& INP_IPV6
) {
5351 result
= ifnet_get_address_list_family(interface
, &addresses
, family
);
5353 NECPLOG(LOG_ERR
, "Failed to get address list for %s%d", ifnet_name(interface
), ifnet_unit(interface
));
5357 for (i
= 0; addresses
[i
] != NULL
; i
++) {
5358 if (ifaddr_address(addresses
[i
], &address_storage
.sa
, sizeof(address_storage
)) == 0) {
5359 if (family
== AF_INET
) {
5360 if (memcmp(&address_storage
.sin
.sin_addr
, &inp
->inp_laddr
, sizeof(inp
->inp_laddr
)) == 0) {
5364 } else if (family
== AF_INET6
) {
5365 if (memcmp(&address_storage
.sin6
.sin6_addr
, &inp
->in6p_laddr
, sizeof(inp
->in6p_laddr
)) == 0) {
5374 ifnet_free_address_list(addresses
);
5376 return (found_match
);
5380 necp_socket_is_connected(struct inpcb
*inp
)
5382 return (inp
->inp_socket
->so_state
& (SS_ISCONNECTING
| SS_ISCONNECTED
| SS_ISDISCONNECTING
));
5385 necp_kernel_policy_id
5386 necp_socket_find_policy_match(struct inpcb
*inp
, struct sockaddr
*override_local_addr
, struct sockaddr
*override_remote_addr
, u_int32_t override_bound_interface
)
5388 struct socket
*so
= NULL
;
5389 necp_kernel_policy_filter filter_control_unit
= 0;
5390 u_int32_t route_rule_id
= 0;
5391 struct necp_kernel_socket_policy
*matched_policy
= NULL
;
5392 necp_kernel_policy_id matched_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
5393 necp_kernel_policy_result service_action
= 0;
5394 necp_kernel_policy_service service
= { 0, 0 };
5396 u_int32_t netagent_ids
[NECP_MAX_NETAGENTS
];
5397 memset(&netagent_ids
, 0, sizeof(netagent_ids
));
5398 int netagent_cursor
;
5400 struct necp_socket_info info
;
5403 return (NECP_KERNEL_POLICY_ID_NONE
);
5406 so
= inp
->inp_socket
;
5408 // Don't lock. Possible race condition, but we don't want the performance hit.
5409 if (necp_kernel_socket_policies_count
== 0 ||
5410 (!(inp
->inp_flags2
& INP2_WANT_APP_POLICY
) && necp_kernel_socket_policies_non_app_count
== 0)) {
5411 if (necp_drop_all_order
> 0) {
5412 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
5413 inp
->inp_policyresult
.policy_gencount
= 0;
5414 inp
->inp_policyresult
.flowhash
= 0;
5415 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
5416 inp
->inp_policyresult
.results
.route_rule_id
= 0;
5417 if (necp_pass_loopback
> 0 &&
5418 necp_is_loopback(override_local_addr
, override_remote_addr
, inp
, NULL
)) {
5419 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_PASS
;
5421 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_DROP
;
5424 return (NECP_KERNEL_POLICY_ID_NONE
);
5427 // Check for loopback exception
5428 if (necp_pass_loopback
> 0 &&
5429 necp_is_loopback(override_local_addr
, override_remote_addr
, inp
, NULL
)) {
5430 // Mark socket as a pass
5431 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
5432 inp
->inp_policyresult
.policy_gencount
= 0;
5433 inp
->inp_policyresult
.flowhash
= 0;
5434 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
5435 inp
->inp_policyresult
.results
.route_rule_id
= 0;
5436 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_PASS
;
5437 return (NECP_KERNEL_POLICY_ID_NONE
);
5441 lck_rw_lock_shared(&necp_kernel_policy_lock
);
5443 necp_socket_fillout_info_locked(inp
, override_local_addr
, override_remote_addr
, override_bound_interface
, &info
);
5446 u_int32_t flowhash
= necp_socket_calc_flowhash_locked(&info
);
5447 if (inp
->inp_policyresult
.policy_id
!= NECP_KERNEL_POLICY_ID_NONE
&&
5448 inp
->inp_policyresult
.policy_gencount
== necp_kernel_socket_policies_gencount
&&
5449 inp
->inp_policyresult
.flowhash
== flowhash
) {
5450 // If already matched this socket on this generation of table, skip
5453 lck_rw_done(&necp_kernel_policy_lock
);
5455 return (inp
->inp_policyresult
.policy_id
);
5458 // Match socket to policy
5459 matched_policy
= necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_map
[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(info
.application_id
)], &info
, &filter_control_unit
, &route_rule_id
, &service_action
, &service
, netagent_ids
, NECP_MAX_NETAGENTS
);
5460 // If the socket matched a scoped service policy, mark as Drop if not registered.
5461 // This covers the cases in which a service is required (on demand) but hasn't started yet.
5462 if ((service_action
== NECP_KERNEL_POLICY_RESULT_TRIGGER_SCOPED
||
5463 service_action
== NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED
) &&
5464 service
.identifier
!= 0 &&
5465 service
.identifier
!= NECP_NULL_SERVICE_ID
) {
5466 bool service_is_registered
= FALSE
;
5467 struct necp_service_registration
*service_registration
= NULL
;
5468 LIST_FOREACH(service_registration
, &necp_registered_service_list
, kernel_chain
) {
5469 if (service
.identifier
== service_registration
->service_id
) {
5470 service_is_registered
= TRUE
;
5474 if (!service_is_registered
) {
5475 // Mark socket as a drop if service is not registered
5476 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
5477 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
5478 inp
->inp_policyresult
.flowhash
= flowhash
;
5479 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
5480 inp
->inp_policyresult
.results
.route_rule_id
= 0;
5481 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_DROP
;
5483 if (necp_debug
> 1) {
5484 NECPLOG(LOG_DEBUG
, "Socket Policy: (BoundInterface %d Proto %d) Dropping packet because service is not registered", info
.bound_interface_index
, info
.protocol
);
5488 lck_rw_done(&necp_kernel_policy_lock
);
5489 return (NECP_KERNEL_POLICY_ID_NONE
);
5493 for (netagent_cursor
= 0; netagent_cursor
< NECP_MAX_NETAGENTS
; netagent_cursor
++) {
5494 struct necp_uuid_id_mapping
*mapping
= NULL
;
5495 u_int32_t netagent_id
= netagent_ids
[netagent_cursor
];
5496 if (netagent_id
== 0) {
5499 mapping
= necp_uuid_lookup_uuid_with_service_id_locked(netagent_id
);
5500 if (mapping
!= NULL
) {
5501 u_int32_t agent_flags
= 0;
5502 agent_flags
= netagent_get_flags(mapping
->uuid
);
5503 if (agent_flags
& NETAGENT_FLAG_REGISTERED
) {
5504 if (agent_flags
& NETAGENT_FLAG_ACTIVE
) {
5506 } else if ((agent_flags
& NETAGENT_FLAG_VOLUNTARY
) == 0) {
5507 if (agent_flags
& NETAGENT_FLAG_KERNEL_ACTIVATED
) {
5508 int trigger_error
= 0;
5509 trigger_error
= netagent_kernel_trigger(mapping
->uuid
);
5510 if (necp_debug
> 1) {
5511 NECPLOG(LOG_DEBUG
, "Socket Policy: Triggering inactive agent, error %d", trigger_error
);
5515 // Mark socket as a drop if required agent is not active
5516 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
5517 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
5518 inp
->inp_policyresult
.flowhash
= flowhash
;
5519 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
5520 inp
->inp_policyresult
.results
.route_rule_id
= 0;
5521 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_DROP
;
5523 if (necp_debug
> 1) {
5524 NECPLOG(LOG_DEBUG
, "Socket Policy: (BoundInterface %d Proto %d) Dropping packet because agent is not active", info
.bound_interface_index
, info
.protocol
);
5528 lck_rw_done(&necp_kernel_policy_lock
);
5529 return (NECP_KERNEL_POLICY_ID_NONE
);
5534 if (matched_policy
) {
5535 matched_policy_id
= matched_policy
->id
;
5536 inp
->inp_policyresult
.policy_id
= matched_policy
->id
;
5537 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
5538 inp
->inp_policyresult
.flowhash
= flowhash
;
5539 inp
->inp_policyresult
.results
.filter_control_unit
= filter_control_unit
;
5540 inp
->inp_policyresult
.results
.route_rule_id
= route_rule_id
;
5541 inp
->inp_policyresult
.results
.result
= matched_policy
->result
;
5542 memcpy(&inp
->inp_policyresult
.results
.result_parameter
, &matched_policy
->result_parameter
, sizeof(matched_policy
->result_parameter
));
5544 if (necp_socket_is_connected(inp
) &&
5545 (matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_DROP
||
5546 (matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&& !necp_socket_uses_interface(inp
, matched_policy
->result_parameter
.tunnel_interface_index
)))) {
5548 NECPLOG(LOG_DEBUG
, "Marking socket in state %d as defunct", so
->so_state
);
5550 sosetdefunct(current_proc(), so
, SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL
, TRUE
);
5551 } else if (necp_socket_is_connected(inp
) &&
5552 matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&&
5553 info
.protocol
== IPPROTO_TCP
) {
5554 // Reset MSS on TCP socket if tunnel policy changes
5555 tcp_mtudisc(inp
, 0);
5558 if (necp_debug
> 1) {
5559 NECPLOG(LOG_DEBUG
, "Socket Policy: %p (BoundInterface %d Proto %d) Policy %d Result %d Parameter %d", inp
->inp_socket
, info
.bound_interface_index
, info
.protocol
, matched_policy
->id
, matched_policy
->result
, matched_policy
->result_parameter
.tunnel_interface_index
);
5561 } else if (necp_drop_all_order
> 0) {
5562 // Mark socket as a drop if set
5563 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
5564 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
5565 inp
->inp_policyresult
.flowhash
= flowhash
;
5566 inp
->inp_policyresult
.results
.filter_control_unit
= 0;
5567 inp
->inp_policyresult
.results
.route_rule_id
= 0;
5568 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_DROP
;
5570 // Mark non-matching socket so we don't re-check it
5571 inp
->inp_policyresult
.policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
5572 inp
->inp_policyresult
.policy_gencount
= necp_kernel_socket_policies_gencount
;
5573 inp
->inp_policyresult
.flowhash
= flowhash
;
5574 inp
->inp_policyresult
.results
.filter_control_unit
= filter_control_unit
; // We may have matched a filter, so mark it!
5575 inp
->inp_policyresult
.results
.route_rule_id
= route_rule_id
; // We may have matched a route rule, so mark it!
5576 inp
->inp_policyresult
.results
.result
= NECP_KERNEL_POLICY_RESULT_NONE
;
5580 lck_rw_done(&necp_kernel_policy_lock
);
5582 return (matched_policy_id
);
5586 necp_ip_output_check_policy(struct necp_kernel_ip_output_policy
*kernel_policy
, necp_kernel_policy_id socket_policy_id
, u_int32_t bound_interface_index
, u_int32_t last_interface_index
, u_int16_t protocol
, union necp_sockaddr_union
*local
, union necp_sockaddr_union
*remote
)
5588 if (!(kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_ALL_INTERFACES
)) {
5589 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
5590 u_int32_t cond_bound_interface_index
= kernel_policy
->cond_bound_interface
? kernel_policy
->cond_bound_interface
->if_index
: 0;
5591 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_BOUND_INTERFACE
) {
5592 if (bound_interface_index
== cond_bound_interface_index
) {
5593 // No match, matches forbidden interface
5597 if (bound_interface_index
!= cond_bound_interface_index
) {
5598 // No match, does not match required interface
5603 if (bound_interface_index
!= 0) {
5604 // No match, requires a non-bound packet
5610 if (kernel_policy
->condition_mask
== 0) {
5614 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_POLICY_ID
) {
5615 if (socket_policy_id
!= kernel_policy
->cond_policy_id
) {
5616 // No match, does not match required id
5621 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LAST_INTERFACE
) {
5622 if (last_interface_index
!= kernel_policy
->cond_last_interface_index
) {
5627 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
5628 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_PROTOCOL
) {
5629 if (protocol
== kernel_policy
->cond_protocol
) {
5630 // No match, matches forbidden protocol
5634 if (protocol
!= kernel_policy
->cond_protocol
) {
5635 // No match, does not match required protocol
5641 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_START
) {
5642 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
5643 bool inRange
= necp_is_addr_in_range((struct sockaddr
*)local
, (struct sockaddr
*)&kernel_policy
->cond_local_start
, (struct sockaddr
*)&kernel_policy
->cond_local_end
);
5644 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_LOCAL_END
) {
5653 } else if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
5654 bool inSubnet
= necp_is_addr_in_subnet((struct sockaddr
*)local
, (struct sockaddr
*)&kernel_policy
->cond_local_start
, kernel_policy
->cond_local_prefix
);
5655 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_LOCAL_PREFIX
) {
5667 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_START
) {
5668 if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
5669 bool inRange
= necp_is_addr_in_range((struct sockaddr
*)remote
, (struct sockaddr
*)&kernel_policy
->cond_remote_start
, (struct sockaddr
*)&kernel_policy
->cond_remote_end
);
5670 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REMOTE_END
) {
5679 } else if (kernel_policy
->condition_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
5680 bool inSubnet
= necp_is_addr_in_subnet((struct sockaddr
*)remote
, (struct sockaddr
*)&kernel_policy
->cond_remote_start
, kernel_policy
->cond_remote_prefix
);
5681 if (kernel_policy
->condition_negated_mask
& NECP_KERNEL_CONDITION_REMOTE_PREFIX
) {
5696 static inline struct necp_kernel_ip_output_policy
*
5697 necp_ip_output_find_policy_match_locked(necp_kernel_policy_id socket_policy_id
, u_int32_t bound_interface_index
, u_int32_t last_interface_index
, u_int16_t protocol
, union necp_sockaddr_union
*local_addr
, union necp_sockaddr_union
*remote_addr
)
5699 u_int32_t skip_order
= 0;
5700 u_int32_t skip_session_order
= 0;
5702 struct necp_kernel_ip_output_policy
*matched_policy
= NULL
;
5703 struct necp_kernel_ip_output_policy
**policy_search_array
= necp_kernel_ip_output_policies_map
[NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(socket_policy_id
)];
5704 if (policy_search_array
!= NULL
) {
5705 for (i
= 0; policy_search_array
[i
] != NULL
; i
++) {
5706 if (necp_drop_all_order
!= 0 && policy_search_array
[i
]->session_order
>= necp_drop_all_order
) {
5707 // We've hit a drop all rule
5710 if (skip_session_order
&& policy_search_array
[i
]->session_order
>= skip_session_order
) {
5713 skip_session_order
= 0;
5716 if (policy_search_array
[i
]->order
< skip_order
) {
5722 skip_session_order
= 0;
5724 } else if (skip_session_order
) {
5728 if (necp_ip_output_check_policy(policy_search_array
[i
], socket_policy_id
, bound_interface_index
, last_interface_index
, protocol
, local_addr
, remote_addr
)) {
5729 // Passed all tests, found a match
5730 matched_policy
= policy_search_array
[i
];
5732 if (policy_search_array
[i
]->result
== NECP_KERNEL_POLICY_RESULT_SKIP
) {
5733 skip_order
= policy_search_array
[i
]->result_parameter
.skip_policy_order
;
5734 skip_session_order
= policy_search_array
[i
]->session_order
+ 1;
5743 return (matched_policy
);
5746 necp_kernel_policy_id
5747 necp_ip_output_find_policy_match(struct mbuf
*packet
, int flags
, struct ip_out_args
*ipoa
, necp_kernel_policy_result
*result
, necp_kernel_policy_result_parameter
*result_parameter
)
5749 struct ip
*ip
= NULL
;
5750 int hlen
= sizeof(struct ip
);
5751 necp_kernel_policy_id socket_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
5752 necp_kernel_policy_id matched_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
5753 struct necp_kernel_ip_output_policy
*matched_policy
= NULL
;
5754 u_int16_t protocol
= 0;
5755 u_int32_t bound_interface_index
= 0;
5756 u_int32_t last_interface_index
= 0;
5757 union necp_sockaddr_union local_addr
;
5758 union necp_sockaddr_union remote_addr
;
5764 if (result_parameter
) {
5765 memset(result_parameter
, 0, sizeof(*result_parameter
));
5768 if (packet
== NULL
) {
5769 return (NECP_KERNEL_POLICY_ID_NONE
);
5772 socket_policy_id
= necp_get_policy_id_from_packet(packet
);
5774 // Exit early for an empty list
5775 // Don't lock. Possible race condition, but we don't want the performance hit.
5776 if (necp_kernel_ip_output_policies_count
== 0 ||
5777 ((socket_policy_id
== NECP_KERNEL_POLICY_ID_NONE
) && necp_kernel_ip_output_policies_non_id_count
== 0)) {
5778 if (necp_drop_all_order
> 0) {
5779 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
5781 if ((necp_pass_loopback
> 0 &&
5782 necp_is_loopback(NULL
, NULL
, NULL
, packet
)) ||
5783 (necp_pass_keepalives
> 0 &&
5784 necp_get_is_keepalive_from_packet(packet
))) {
5785 *result
= NECP_KERNEL_POLICY_RESULT_PASS
;
5787 *result
= NECP_KERNEL_POLICY_RESULT_DROP
;
5792 return (matched_policy_id
);
5795 // Check for loopback exception
5796 if ((necp_pass_loopback
> 0 &&
5797 necp_is_loopback(NULL
, NULL
, NULL
, packet
)) ||
5798 (necp_pass_keepalives
> 0 &&
5799 necp_get_is_keepalive_from_packet(packet
))) {
5800 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
5802 *result
= NECP_KERNEL_POLICY_RESULT_PASS
;
5804 return (matched_policy_id
);
5807 last_interface_index
= necp_get_last_interface_index_from_packet(packet
);
5809 // Process packet to get relevant fields
5810 ip
= mtod(packet
, struct ip
*);
5812 hlen
= _IP_VHL_HL(ip
->ip_vhl
) << 2;
5814 hlen
= ip
->ip_hl
<< 2;
5817 protocol
= ip
->ip_p
;
5819 if ((flags
& IP_OUTARGS
) && (ipoa
!= NULL
) &&
5820 (ipoa
->ipoa_flags
& IPOAF_BOUND_IF
) &&
5821 ipoa
->ipoa_boundif
!= IFSCOPE_NONE
) {
5822 bound_interface_index
= ipoa
->ipoa_boundif
;
5825 local_addr
.sin
.sin_family
= AF_INET
;
5826 local_addr
.sin
.sin_len
= sizeof(struct sockaddr_in
);
5827 memcpy(&local_addr
.sin
.sin_addr
, &ip
->ip_src
, sizeof(ip
->ip_src
));
5829 remote_addr
.sin
.sin_family
= AF_INET
;
5830 remote_addr
.sin
.sin_len
= sizeof(struct sockaddr_in
);
5831 memcpy(&((struct sockaddr_in
*)&remote_addr
)->sin_addr
, &ip
->ip_dst
, sizeof(ip
->ip_dst
));
5836 if ((int)(hlen
+ sizeof(th
)) <= packet
->m_pkthdr
.len
) {
5837 m_copydata(packet
, hlen
, sizeof(th
), (u_int8_t
*)&th
);
5838 ((struct sockaddr_in
*)&local_addr
)->sin_port
= th
.th_sport
;
5839 ((struct sockaddr_in
*)&remote_addr
)->sin_port
= th
.th_dport
;
5845 if ((int)(hlen
+ sizeof(uh
)) <= packet
->m_pkthdr
.len
) {
5846 m_copydata(packet
, hlen
, sizeof(uh
), (u_int8_t
*)&uh
);
5847 ((struct sockaddr_in
*)&local_addr
)->sin_port
= uh
.uh_sport
;
5848 ((struct sockaddr_in
*)&remote_addr
)->sin_port
= uh
.uh_dport
;
5853 ((struct sockaddr_in
*)&local_addr
)->sin_port
= 0;
5854 ((struct sockaddr_in
*)&remote_addr
)->sin_port
= 0;
5859 // Match packet to policy
5860 lck_rw_lock_shared(&necp_kernel_policy_lock
);
5861 matched_policy
= necp_ip_output_find_policy_match_locked(socket_policy_id
, bound_interface_index
, last_interface_index
, protocol
, &local_addr
, &remote_addr
);
5862 if (matched_policy
) {
5863 matched_policy_id
= matched_policy
->id
;
5865 *result
= matched_policy
->result
;
5868 if (result_parameter
) {
5869 memcpy(result_parameter
, &matched_policy
->result_parameter
, sizeof(matched_policy
->result_parameter
));
5872 if (necp_debug
> 1) {
5873 NECPLOG(LOG_DEBUG
, "IP Output: (ID %d BoundInterface %d LastInterface %d Proto %d) Policy %d Result %d Parameter %d", socket_policy_id
, bound_interface_index
, last_interface_index
, protocol
, matched_policy
->id
, matched_policy
->result
, matched_policy
->result_parameter
.tunnel_interface_index
);
5875 } else if (necp_drop_all_order
> 0) {
5876 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
5878 *result
= NECP_KERNEL_POLICY_RESULT_DROP
;
5882 lck_rw_done(&necp_kernel_policy_lock
);
5884 return (matched_policy_id
);
5887 necp_kernel_policy_id
5888 necp_ip6_output_find_policy_match(struct mbuf
*packet
, int flags
, struct ip6_out_args
*ip6oa
, necp_kernel_policy_result
*result
, necp_kernel_policy_result_parameter
*result_parameter
)
5890 struct ip6_hdr
*ip6
= NULL
;
5893 necp_kernel_policy_id socket_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
5894 necp_kernel_policy_id matched_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
5895 struct necp_kernel_ip_output_policy
*matched_policy
= NULL
;
5896 u_int16_t protocol
= 0;
5897 u_int32_t bound_interface_index
= 0;
5898 u_int32_t last_interface_index
= 0;
5899 union necp_sockaddr_union local_addr
;
5900 union necp_sockaddr_union remote_addr
;
5906 if (result_parameter
) {
5907 memset(result_parameter
, 0, sizeof(*result_parameter
));
5910 if (packet
== NULL
) {
5911 return (NECP_KERNEL_POLICY_ID_NONE
);
5914 socket_policy_id
= necp_get_policy_id_from_packet(packet
);
5916 // Exit early for an empty list
5917 // Don't lock. Possible race condition, but we don't want the performance hit.
5918 if (necp_kernel_ip_output_policies_count
== 0 ||
5919 ((socket_policy_id
== NECP_KERNEL_POLICY_ID_NONE
) && necp_kernel_ip_output_policies_non_id_count
== 0)) {
5920 if (necp_drop_all_order
> 0) {
5921 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
5923 if ((necp_pass_loopback
> 0 &&
5924 necp_is_loopback(NULL
, NULL
, NULL
, packet
)) ||
5925 (necp_pass_keepalives
> 0 &&
5926 necp_get_is_keepalive_from_packet(packet
))) {
5927 *result
= NECP_KERNEL_POLICY_RESULT_PASS
;
5929 *result
= NECP_KERNEL_POLICY_RESULT_DROP
;
5934 return (matched_policy_id
);
5937 // Check for loopback exception
5938 if ((necp_pass_loopback
> 0 &&
5939 necp_is_loopback(NULL
, NULL
, NULL
, packet
)) ||
5940 (necp_pass_keepalives
> 0 &&
5941 necp_get_is_keepalive_from_packet(packet
))) {
5942 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
5944 *result
= NECP_KERNEL_POLICY_RESULT_PASS
;
5946 return (matched_policy_id
);
5949 last_interface_index
= necp_get_last_interface_index_from_packet(packet
);
5951 // Process packet to get relevant fields
5952 ip6
= mtod(packet
, struct ip6_hdr
*);
5954 if ((flags
& IPV6_OUTARGS
) && (ip6oa
!= NULL
) &&
5955 (ip6oa
->ip6oa_flags
& IP6OAF_BOUND_IF
) &&
5956 ip6oa
->ip6oa_boundif
!= IFSCOPE_NONE
) {
5957 bound_interface_index
= ip6oa
->ip6oa_boundif
;
5960 ((struct sockaddr_in6
*)&local_addr
)->sin6_family
= AF_INET6
;
5961 ((struct sockaddr_in6
*)&local_addr
)->sin6_len
= sizeof(struct sockaddr_in6
);
5962 memcpy(&((struct sockaddr_in6
*)&local_addr
)->sin6_addr
, &ip6
->ip6_src
, sizeof(ip6
->ip6_src
));
5964 ((struct sockaddr_in6
*)&remote_addr
)->sin6_family
= AF_INET6
;
5965 ((struct sockaddr_in6
*)&remote_addr
)->sin6_len
= sizeof(struct sockaddr_in6
);
5966 memcpy(&((struct sockaddr_in6
*)&remote_addr
)->sin6_addr
, &ip6
->ip6_dst
, sizeof(ip6
->ip6_dst
));
5968 offset
= ip6_lasthdr(packet
, 0, IPPROTO_IPV6
, &next
);
5969 if (offset
>= 0 && packet
->m_pkthdr
.len
>= offset
) {
5974 if ((int)(offset
+ sizeof(th
)) <= packet
->m_pkthdr
.len
) {
5975 m_copydata(packet
, offset
, sizeof(th
), (u_int8_t
*)&th
);
5976 ((struct sockaddr_in6
*)&local_addr
)->sin6_port
= th
.th_sport
;
5977 ((struct sockaddr_in6
*)&remote_addr
)->sin6_port
= th
.th_dport
;
5983 if ((int)(offset
+ sizeof(uh
)) <= packet
->m_pkthdr
.len
) {
5984 m_copydata(packet
, offset
, sizeof(uh
), (u_int8_t
*)&uh
);
5985 ((struct sockaddr_in6
*)&local_addr
)->sin6_port
= uh
.uh_sport
;
5986 ((struct sockaddr_in6
*)&remote_addr
)->sin6_port
= uh
.uh_dport
;
5991 ((struct sockaddr_in6
*)&local_addr
)->sin6_port
= 0;
5992 ((struct sockaddr_in6
*)&remote_addr
)->sin6_port
= 0;
5998 // Match packet to policy
5999 lck_rw_lock_shared(&necp_kernel_policy_lock
);
6000 matched_policy
= necp_ip_output_find_policy_match_locked(socket_policy_id
, bound_interface_index
, last_interface_index
, protocol
, &local_addr
, &remote_addr
);
6001 if (matched_policy
) {
6002 matched_policy_id
= matched_policy
->id
;
6004 *result
= matched_policy
->result
;
6007 if (result_parameter
) {
6008 memcpy(result_parameter
, &matched_policy
->result_parameter
, sizeof(matched_policy
->result_parameter
));
6011 if (necp_debug
> 1) {
6012 NECPLOG(LOG_DEBUG
, "IP6 Output: (ID %d BoundInterface %d LastInterface %d Proto %d) Policy %d Result %d Parameter %d", socket_policy_id
, bound_interface_index
, last_interface_index
, protocol
, matched_policy
->id
, matched_policy
->result
, matched_policy
->result_parameter
.tunnel_interface_index
);
6014 } else if (necp_drop_all_order
> 0) {
6015 matched_policy_id
= NECP_KERNEL_POLICY_ID_NO_MATCH
;
6017 *result
= NECP_KERNEL_POLICY_RESULT_DROP
;
6021 lck_rw_done(&necp_kernel_policy_lock
);
6023 return (matched_policy_id
);
6028 necp_is_addr_in_range(struct sockaddr
*addr
, struct sockaddr
*range_start
, struct sockaddr
*range_end
)
6032 if (addr
== NULL
|| range_start
== NULL
|| range_end
== NULL
) {
6036 /* Must be greater than or equal to start */
6037 cmp
= necp_addr_compare(addr
, range_start
, 1);
6038 if (cmp
!= 0 && cmp
!= 1) {
6042 /* Must be less than or equal to end */
6043 cmp
= necp_addr_compare(addr
, range_end
, 1);
6044 if (cmp
!= 0 && cmp
!= -1) {
6052 necp_is_range_in_range(struct sockaddr
*inner_range_start
, struct sockaddr
*inner_range_end
, struct sockaddr
*range_start
, struct sockaddr
*range_end
)
6056 if (inner_range_start
== NULL
|| inner_range_end
== NULL
|| range_start
== NULL
|| range_end
== NULL
) {
6060 /* Must be greater than or equal to start */
6061 cmp
= necp_addr_compare(inner_range_start
, range_start
, 1);
6062 if (cmp
!= 0 && cmp
!= 1) {
6066 /* Must be less than or equal to end */
6067 cmp
= necp_addr_compare(inner_range_end
, range_end
, 1);
6068 if (cmp
!= 0 && cmp
!= -1) {
6076 necp_is_addr_in_subnet(struct sockaddr
*addr
, struct sockaddr
*subnet_addr
, u_int8_t subnet_prefix
)
6078 if (addr
== NULL
|| subnet_addr
== NULL
) {
6082 if (addr
->sa_family
!= subnet_addr
->sa_family
|| addr
->sa_len
!= subnet_addr
->sa_len
) {
6086 switch (addr
->sa_family
) {
6088 if (satosin(subnet_addr
)->sin_port
!= 0 &&
6089 satosin(addr
)->sin_port
!= satosin(subnet_addr
)->sin_port
) {
6092 return (necp_buffer_compare_with_bit_prefix((u_int8_t
*)&satosin(addr
)->sin_addr
, (u_int8_t
*)&satosin(subnet_addr
)->sin_addr
, subnet_prefix
));
6095 if (satosin6(subnet_addr
)->sin6_port
!= 0 &&
6096 satosin6(addr
)->sin6_port
!= satosin6(subnet_addr
)->sin6_port
) {
6099 if (satosin6(addr
)->sin6_scope_id
&&
6100 satosin6(subnet_addr
)->sin6_scope_id
&&
6101 satosin6(addr
)->sin6_scope_id
!= satosin6(subnet_addr
)->sin6_scope_id
) {
6104 return (necp_buffer_compare_with_bit_prefix((u_int8_t
*)&satosin6(addr
)->sin6_addr
, (u_int8_t
*)&satosin6(subnet_addr
)->sin6_addr
, subnet_prefix
));
6119 * 2: Not comparable or error
6122 necp_addr_compare(struct sockaddr
*sa1
, struct sockaddr
*sa2
, int check_port
)
6125 int port_result
= 0;
6127 if (sa1
->sa_family
!= sa2
->sa_family
|| sa1
->sa_len
!= sa2
->sa_len
) {
6131 if (sa1
->sa_len
== 0) {
6135 switch (sa1
->sa_family
) {
6137 if (sa1
->sa_len
!= sizeof(struct sockaddr_in
)) {
6141 result
= memcmp(&satosin(sa1
)->sin_addr
.s_addr
, &satosin(sa2
)->sin_addr
.s_addr
, sizeof(satosin(sa1
)->sin_addr
.s_addr
));
6144 if (satosin(sa1
)->sin_port
< satosin(sa2
)->sin_port
) {
6146 } else if (satosin(sa1
)->sin_port
> satosin(sa2
)->sin_port
) {
6151 result
= port_result
;
6152 } else if ((result
> 0 && port_result
< 0) || (result
< 0 && port_result
> 0)) {
6160 if (sa1
->sa_len
!= sizeof(struct sockaddr_in6
)) {
6164 if (satosin6(sa1
)->sin6_scope_id
!= satosin6(sa2
)->sin6_scope_id
) {
6168 result
= memcmp(&satosin6(sa1
)->sin6_addr
.s6_addr
[0], &satosin6(sa2
)->sin6_addr
.s6_addr
[0], sizeof(struct in6_addr
));
6171 if (satosin6(sa1
)->sin6_port
< satosin6(sa2
)->sin6_port
) {
6173 } else if (satosin6(sa1
)->sin6_port
> satosin6(sa2
)->sin6_port
) {
6178 result
= port_result
;
6179 } else if ((result
> 0 && port_result
< 0) || (result
< 0 && port_result
> 0)) {
6187 result
= memcmp(sa1
, sa2
, sa1
->sa_len
);
6194 } else if (result
> 0) {
6202 necp_buffer_compare_with_bit_prefix(u_int8_t
*p1
, u_int8_t
*p2
, u_int32_t bits
)
6206 /* Handle null pointers */
6207 if (p1
== NULL
|| p2
== NULL
) {
6212 if (*p1
++ != *p2
++) {
6219 mask
= ~((1<<(8-bits
))-1);
6220 if ((*p1
& mask
) != (*p2
& mask
)) {
6227 // Socket operations
6228 #define NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH 253
6231 necp_set_socket_attribute(u_int8_t
*buffer
, size_t buffer_length
, u_int8_t type
, char **buffer_p
)
6235 size_t string_size
= 0;
6236 char *local_string
= NULL
;
6237 u_int8_t
*value
= NULL
;
6239 cursor
= necp_buffer_find_tlv(buffer
, buffer_length
, 0, type
, 0);
6241 // This will clear out the parameter
6245 string_size
= necp_buffer_get_tlv_length(buffer
, cursor
);
6246 if (string_size
== 0 || string_size
> NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH
) {
6247 // This will clear out the parameter
6251 MALLOC(local_string
, char *, string_size
+ 1, M_NECP
, M_WAITOK
);
6252 if (local_string
== NULL
) {
6253 NECPLOG(LOG_ERR
, "Failed to allocate a socket attribute buffer (size %d)", string_size
);
6257 value
= necp_buffer_get_tlv_value(buffer
, cursor
, NULL
);
6258 if (value
== NULL
) {
6259 NECPLOG0(LOG_ERR
, "Failed to get socket attribute");
6263 memcpy(local_string
, value
, string_size
);
6264 local_string
[string_size
] = 0;
6267 if (*buffer_p
!= NULL
) {
6268 FREE(*buffer_p
, M_NECP
);
6272 *buffer_p
= local_string
;
6275 if (local_string
!= NULL
) {
6276 FREE(local_string
, M_NECP
);
6282 necp_set_socket_attributes(struct socket
*so
, struct sockopt
*sopt
)
6285 u_int8_t
*buffer
= NULL
;
6286 struct inpcb
*inp
= NULL
;
6288 if ((SOCK_DOM(so
) != PF_INET
6290 && SOCK_DOM(so
) != PF_INET6
6297 inp
= sotoinpcb(so
);
6299 size_t valsize
= sopt
->sopt_valsize
;
6301 valsize
> ((sizeof(u_int8_t
) + sizeof(u_int32_t
) + NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH
) * 2)) {
6305 MALLOC(buffer
, u_int8_t
*, valsize
, M_NECP
, M_WAITOK
);
6306 if (buffer
== NULL
) {
6310 error
= sooptcopyin(sopt
, buffer
, valsize
, 0);
6315 error
= necp_set_socket_attribute(buffer
, valsize
, NECP_TLV_ATTRIBUTE_DOMAIN
, &inp
->inp_necp_attributes
.inp_domain
);
6317 NECPLOG0(LOG_ERR
, "Could not set domain TLV for socket attributes");
6321 error
= necp_set_socket_attribute(buffer
, valsize
, NECP_TLV_ATTRIBUTE_ACCOUNT
, &inp
->inp_necp_attributes
.inp_account
);
6323 NECPLOG0(LOG_ERR
, "Could not set account TLV for socket attributes");
6328 NECPLOG(LOG_DEBUG
, "Set on socket: Domain %s, Account %s", inp
->inp_necp_attributes
.inp_domain
, inp
->inp_necp_attributes
.inp_account
);
6331 if (buffer
!= NULL
) {
6332 FREE(buffer
, M_NECP
);
6339 necp_get_socket_attributes(struct socket
*so
, struct sockopt
*sopt
)
6342 u_int8_t
*buffer
= NULL
;
6343 u_int8_t
*cursor
= NULL
;
6345 struct inpcb
*inp
= sotoinpcb(so
);
6347 if (inp
->inp_necp_attributes
.inp_domain
!= NULL
) {
6348 valsize
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + strlen(inp
->inp_necp_attributes
.inp_domain
);
6350 if (inp
->inp_necp_attributes
.inp_account
!= NULL
) {
6351 valsize
+= sizeof(u_int8_t
) + sizeof(u_int32_t
) + strlen(inp
->inp_necp_attributes
.inp_account
);
6357 MALLOC(buffer
, u_int8_t
*, valsize
, M_NECP
, M_WAITOK
);
6358 if (buffer
== NULL
) {
6363 if (inp
->inp_necp_attributes
.inp_domain
!= NULL
) {
6364 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_ATTRIBUTE_DOMAIN
, strlen(inp
->inp_necp_attributes
.inp_domain
), inp
->inp_necp_attributes
.inp_domain
);
6367 if (inp
->inp_necp_attributes
.inp_account
!= NULL
) {
6368 cursor
= necp_buffer_write_tlv(cursor
, NECP_TLV_ATTRIBUTE_ACCOUNT
, strlen(inp
->inp_necp_attributes
.inp_account
), inp
->inp_necp_attributes
.inp_account
);
6371 error
= sooptcopyout(sopt
, buffer
, valsize
);
6376 if (buffer
!= NULL
) {
6377 FREE(buffer
, M_NECP
);
6384 necp_route_is_allowed_inner(struct rtentry
*route
, struct ifnet
*ifp
, u_int32_t route_rule_id
, bool *cellular_denied
)
6386 bool default_is_allowed
= TRUE
;
6387 u_int8_t type_aggregate_action
= NECP_ROUTE_RULE_NONE
;
6388 int exception_index
= 0;
6389 struct ifnet
*delegated_ifp
= NULL
;
6390 struct necp_route_rule
*route_rule
= NULL
;
6392 route_rule
= necp_lookup_route_rule_locked(&necp_route_rules
, route_rule_id
);
6393 if (route_rule
== NULL
) {
6397 default_is_allowed
= (route_rule
->default_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
;
6399 ifp
= route
->rt_ifp
;
6402 if (necp_debug
> 1 && !default_is_allowed
) {
6403 NECPLOG(LOG_DEBUG
, "Route Allowed: No interface for route, using default for Rule %d Allowed %d", route_rule_id
, default_is_allowed
);
6405 return (default_is_allowed
);
6408 delegated_ifp
= ifp
->if_delegated
.ifp
;
6409 for (exception_index
= 0; exception_index
< MAX_ROUTE_RULE_INTERFACES
; exception_index
++) {
6410 if (route_rule
->exception_if_indices
[exception_index
] == 0) {
6413 if (route_rule
->exception_if_indices
[exception_index
] == ifp
->if_index
||
6414 (delegated_ifp
!= NULL
&& route_rule
->exception_if_indices
[exception_index
] == delegated_ifp
->if_index
)) {
6415 if (necp_debug
> 1) {
6416 NECPLOG(LOG_DEBUG
, "Route Allowed: Interface match %d for Rule %d Allowed %d", route_rule
->exception_if_indices
[exception_index
], route_rule_id
, ((route_rule
->exception_if_actions
[exception_index
] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
));
6418 return ((route_rule
->exception_if_actions
[exception_index
] == NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
);
6422 if (route_rule
->cellular_action
!= NECP_ROUTE_RULE_NONE
&&
6423 IFNET_IS_CELLULAR(ifp
)) {
6424 if (cellular_denied
!= NULL
) {
6425 // Let clients know that cellular was blocked
6426 *cellular_denied
= TRUE
;
6428 if (type_aggregate_action
== NECP_ROUTE_RULE_NONE
||
6429 (type_aggregate_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
&&
6430 route_rule
->cellular_action
== NECP_ROUTE_RULE_DENY_INTERFACE
)) {
6431 // Deny wins if there is a conflict
6432 type_aggregate_action
= route_rule
->cellular_action
;
6436 if (route_rule
->wifi_action
!= NECP_ROUTE_RULE_NONE
&&
6437 IFNET_IS_WIFI(ifp
)) {
6438 if (type_aggregate_action
== NECP_ROUTE_RULE_NONE
||
6439 (type_aggregate_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
&&
6440 route_rule
->wifi_action
== NECP_ROUTE_RULE_DENY_INTERFACE
)) {
6441 // Deny wins if there is a conflict
6442 type_aggregate_action
= route_rule
->wifi_action
;
6446 if (route_rule
->wired_action
!= NECP_ROUTE_RULE_NONE
&&
6447 IFNET_IS_WIRED(ifp
)) {
6448 if (type_aggregate_action
== NECP_ROUTE_RULE_NONE
||
6449 (type_aggregate_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
&&
6450 route_rule
->wired_action
== NECP_ROUTE_RULE_DENY_INTERFACE
)) {
6451 // Deny wins if there is a conflict
6452 type_aggregate_action
= route_rule
->wired_action
;
6456 if (route_rule
->expensive_action
!= NECP_ROUTE_RULE_NONE
&&
6457 IFNET_IS_EXPENSIVE(ifp
)) {
6458 if (type_aggregate_action
== NECP_ROUTE_RULE_NONE
||
6459 (type_aggregate_action
== NECP_ROUTE_RULE_ALLOW_INTERFACE
&&
6460 route_rule
->expensive_action
== NECP_ROUTE_RULE_DENY_INTERFACE
)) {
6461 // Deny wins if there is a conflict
6462 type_aggregate_action
= route_rule
->expensive_action
;
6466 if (type_aggregate_action
!= NECP_ROUTE_RULE_NONE
) {
6467 if (necp_debug
> 1) {
6468 NECPLOG(LOG_DEBUG
, "Route Allowed: C:%d WF:%d W:%d E:%d for Rule %d Allowed %d", route_rule
->cellular_action
, route_rule
->wifi_action
, route_rule
->wired_action
, route_rule
->expensive_action
, route_rule_id
, ((type_aggregate_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
));
6470 return ((type_aggregate_action
== NECP_ROUTE_RULE_DENY_INTERFACE
) ? FALSE
: TRUE
);
6473 if (necp_debug
> 1 && !default_is_allowed
) {
6474 NECPLOG(LOG_DEBUG
, "Route Allowed: Using default for Rule %d Allowed %d", route_rule_id
, default_is_allowed
);
6476 return (default_is_allowed
);
6480 necp_route_is_allowed(struct rtentry
*route
, struct ifnet
*interface
, u_int32_t route_rule_id
, bool *cellular_denied
)
6482 if ((route
== NULL
&& interface
== NULL
) || route_rule_id
== 0) {
6483 if (necp_debug
> 1) {
6484 NECPLOG(LOG_DEBUG
, "Route Allowed: no route or interface, Rule %d Allowed %d", route_rule_id
, TRUE
);
6489 if (ROUTE_RULE_IS_AGGREGATE(route_rule_id
)) {
6490 struct necp_aggregate_route_rule
*aggregate_route_rule
= necp_lookup_aggregate_route_rule_locked(route_rule_id
);
6491 if (aggregate_route_rule
!= NULL
) {
6493 for (index
= 0; index
< MAX_AGGREGATE_ROUTE_RULES
; index
++) {
6494 u_int32_t sub_route_rule_id
= aggregate_route_rule
->rule_ids
[index
];
6495 if (sub_route_rule_id
== 0) {
6498 if (!necp_route_is_allowed_inner(route
, interface
, sub_route_rule_id
, cellular_denied
)) {
6504 return (necp_route_is_allowed_inner(route
, interface
, route_rule_id
, cellular_denied
));
6511 necp_packet_is_allowed_over_interface(struct mbuf
*packet
, struct ifnet
*interface
)
6513 bool is_allowed
= TRUE
;
6514 u_int32_t route_rule_id
= necp_get_route_rule_id_from_packet(packet
);
6515 if (route_rule_id
!= 0 &&
6516 interface
!= NULL
) {
6517 lck_rw_lock_shared(&necp_kernel_policy_lock
);
6518 is_allowed
= necp_route_is_allowed(NULL
, interface
, necp_get_route_rule_id_from_packet(packet
), NULL
);
6519 lck_rw_done(&necp_kernel_policy_lock
);
6521 return (is_allowed
);
6525 necp_netagents_allow_traffic(u_int32_t
*netagent_ids
, size_t netagent_id_count
)
6527 size_t netagent_cursor
;
6528 for (netagent_cursor
= 0; netagent_cursor
< netagent_id_count
; netagent_cursor
++) {
6529 struct necp_uuid_id_mapping
*mapping
= NULL
;
6530 u_int32_t netagent_id
= netagent_ids
[netagent_cursor
];
6531 if (netagent_id
== 0) {
6534 mapping
= necp_uuid_lookup_uuid_with_service_id_locked(netagent_id
);
6535 if (mapping
!= NULL
) {
6536 u_int32_t agent_flags
= 0;
6537 agent_flags
= netagent_get_flags(mapping
->uuid
);
6538 if (agent_flags
& NETAGENT_FLAG_REGISTERED
) {
6539 if (agent_flags
& NETAGENT_FLAG_ACTIVE
) {
6541 } else if ((agent_flags
& NETAGENT_FLAG_VOLUNTARY
) == 0) {
6551 necp_socket_is_allowed_to_send_recv_internal(struct inpcb
*inp
, struct sockaddr
*override_local_addr
, struct sockaddr
*override_remote_addr
, ifnet_t interface
, necp_kernel_policy_id
*return_policy_id
, u_int32_t
*return_route_rule_id
)
6553 u_int32_t verifyifindex
= interface
? interface
->if_index
: 0;
6554 bool allowed_to_receive
= TRUE
;
6555 struct necp_socket_info info
;
6556 u_int32_t flowhash
= 0;
6557 necp_kernel_policy_result service_action
= 0;
6558 necp_kernel_policy_service service
= { 0, 0 };
6559 u_int32_t route_rule_id
= 0;
6560 struct rtentry
*route
= NULL
;
6561 bool cellular_denied
= FALSE
;
6563 u_int32_t netagent_ids
[NECP_MAX_NETAGENTS
];
6564 memset(&netagent_ids
, 0, sizeof(netagent_ids
));
6566 if (return_policy_id
) {
6567 *return_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
6569 if (return_route_rule_id
) {
6570 *return_route_rule_id
= 0;
6577 route
= inp
->inp_route
.ro_rt
;
6579 // Don't lock. Possible race condition, but we don't want the performance hit.
6580 if (necp_kernel_socket_policies_count
== 0 ||
6581 (!(inp
->inp_flags2
& INP2_WANT_APP_POLICY
) && necp_kernel_socket_policies_non_app_count
== 0)) {
6582 if (necp_drop_all_order
> 0) {
6583 if (necp_pass_loopback
> 0 &&
6584 necp_is_loopback(override_local_addr
, override_remote_addr
, inp
, NULL
)) {
6585 allowed_to_receive
= TRUE
;
6587 allowed_to_receive
= FALSE
;
6593 // If this socket is connected, or we are not taking addresses into account, try to reuse last result
6594 if ((necp_socket_is_connected(inp
) || (override_local_addr
== NULL
&& override_remote_addr
== NULL
)) && inp
->inp_policyresult
.policy_id
!= NECP_KERNEL_POLICY_ID_NONE
) {
6595 bool policies_have_changed
= FALSE
;
6596 bool route_allowed
= TRUE
;
6597 lck_rw_lock_shared(&necp_kernel_policy_lock
);
6598 if (inp
->inp_policyresult
.policy_gencount
!= necp_kernel_socket_policies_gencount
) {
6599 policies_have_changed
= TRUE
;
6601 if (inp
->inp_policyresult
.results
.route_rule_id
!= 0 &&
6602 !necp_route_is_allowed(route
, interface
, inp
->inp_policyresult
.results
.route_rule_id
, &cellular_denied
)) {
6603 route_allowed
= FALSE
;
6606 lck_rw_done(&necp_kernel_policy_lock
);
6608 if (!policies_have_changed
) {
6609 if (!route_allowed
||
6610 inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_DROP
||
6611 inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
||
6612 (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&& interface
&&
6613 inp
->inp_policyresult
.results
.result_parameter
.tunnel_interface_index
!= verifyifindex
)) {
6614 allowed_to_receive
= FALSE
;
6616 if (return_policy_id
) {
6617 *return_policy_id
= inp
->inp_policyresult
.policy_id
;
6619 if (return_route_rule_id
) {
6620 *return_route_rule_id
= inp
->inp_policyresult
.results
.route_rule_id
;
6627 // Check for loopback exception
6628 if (necp_pass_loopback
> 0 &&
6629 necp_is_loopback(override_local_addr
, override_remote_addr
, inp
, NULL
)) {
6630 allowed_to_receive
= TRUE
;
6634 // Actually calculate policy result
6635 lck_rw_lock_shared(&necp_kernel_policy_lock
);
6636 necp_socket_fillout_info_locked(inp
, override_local_addr
, override_remote_addr
, 0, &info
);
6638 flowhash
= necp_socket_calc_flowhash_locked(&info
);
6639 if (inp
->inp_policyresult
.policy_id
!= NECP_KERNEL_POLICY_ID_NONE
&&
6640 inp
->inp_policyresult
.policy_gencount
== necp_kernel_socket_policies_gencount
&&
6641 inp
->inp_policyresult
.flowhash
== flowhash
) {
6642 if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_DROP
||
6643 inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
||
6644 (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&& interface
&&
6645 inp
->inp_policyresult
.results
.result_parameter
.tunnel_interface_index
!= verifyifindex
) ||
6646 (inp
->inp_policyresult
.results
.route_rule_id
!= 0 &&
6647 !necp_route_is_allowed(route
, interface
, inp
->inp_policyresult
.results
.route_rule_id
, &cellular_denied
))) {
6648 allowed_to_receive
= FALSE
;
6650 if (return_policy_id
) {
6651 *return_policy_id
= inp
->inp_policyresult
.policy_id
;
6653 if (return_route_rule_id
) {
6654 *return_route_rule_id
= inp
->inp_policyresult
.results
.route_rule_id
;
6657 lck_rw_done(&necp_kernel_policy_lock
);
6661 struct necp_kernel_socket_policy
*matched_policy
= necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_map
[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(info
.application_id
)], &info
, NULL
, &route_rule_id
, &service_action
, &service
, netagent_ids
, NECP_MAX_NETAGENTS
);
6662 if (matched_policy
!= NULL
) {
6663 if (matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_DROP
||
6664 matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
||
6665 (matched_policy
->result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&& interface
&&
6666 matched_policy
->result_parameter
.tunnel_interface_index
!= verifyifindex
) ||
6667 ((service_action
== NECP_KERNEL_POLICY_RESULT_TRIGGER_SCOPED
||
6668 service_action
== NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED
) &&
6669 service
.identifier
!= 0 && service
.identifier
!= NECP_NULL_SERVICE_ID
) ||
6670 (route_rule_id
!= 0 &&
6671 !necp_route_is_allowed(route
, interface
, route_rule_id
, &cellular_denied
)) ||
6672 !necp_netagents_allow_traffic(netagent_ids
, NECP_MAX_NETAGENTS
)) {
6673 allowed_to_receive
= FALSE
;
6675 if (return_policy_id
) {
6676 *return_policy_id
= matched_policy
->id
;
6678 if (return_route_rule_id
) {
6679 *return_route_rule_id
= route_rule_id
;
6682 lck_rw_done(&necp_kernel_policy_lock
);
6684 if (necp_debug
> 1 && matched_policy
->id
!= inp
->inp_policyresult
.policy_id
) {
6685 NECPLOG(LOG_DEBUG
, "Socket Send/Recv Policy: Policy %d Allowed %d", return_policy_id
? *return_policy_id
: 0, allowed_to_receive
);
6688 } else if (necp_drop_all_order
> 0) {
6689 allowed_to_receive
= FALSE
;
6692 lck_rw_done(&necp_kernel_policy_lock
);
6695 if (!allowed_to_receive
&& cellular_denied
) {
6696 soevent(inp
->inp_socket
, (SO_FILT_HINT_LOCKED
| SO_FILT_HINT_IFDENIED
));
6699 return (allowed_to_receive
);
6703 necp_socket_is_allowed_to_send_recv_v4(struct inpcb
*inp
, u_int16_t local_port
, u_int16_t remote_port
, struct in_addr
*local_addr
, struct in_addr
*remote_addr
, ifnet_t interface
, necp_kernel_policy_id
*return_policy_id
, u_int32_t
*return_route_rule_id
)
6705 struct sockaddr_in local
;
6706 struct sockaddr_in remote
;
6707 local
.sin_family
= remote
.sin_family
= AF_INET
;
6708 local
.sin_len
= remote
.sin_len
= sizeof(struct sockaddr_in
);
6709 local
.sin_port
= local_port
;
6710 remote
.sin_port
= remote_port
;
6711 memcpy(&local
.sin_addr
, local_addr
, sizeof(local
.sin_addr
));
6712 memcpy(&remote
.sin_addr
, remote_addr
, sizeof(remote
.sin_addr
));
6714 return (necp_socket_is_allowed_to_send_recv_internal(inp
, (struct sockaddr
*)&local
, (struct sockaddr
*)&remote
, interface
, return_policy_id
, return_route_rule_id
));
6718 necp_socket_is_allowed_to_send_recv_v6(struct inpcb
*inp
, u_int16_t local_port
, u_int16_t remote_port
, struct in6_addr
*local_addr
, struct in6_addr
*remote_addr
, ifnet_t interface
, necp_kernel_policy_id
*return_policy_id
, u_int32_t
*return_route_rule_id
)
6720 struct sockaddr_in6 local
;
6721 struct sockaddr_in6 remote
;
6722 local
.sin6_family
= remote
.sin6_family
= AF_INET6
;
6723 local
.sin6_len
= remote
.sin6_len
= sizeof(struct sockaddr_in6
);
6724 local
.sin6_port
= local_port
;
6725 remote
.sin6_port
= remote_port
;
6726 memcpy(&local
.sin6_addr
, local_addr
, sizeof(local
.sin6_addr
));
6727 memcpy(&remote
.sin6_addr
, remote_addr
, sizeof(remote
.sin6_addr
));
6729 return (necp_socket_is_allowed_to_send_recv_internal(inp
, (struct sockaddr
*)&local
, (struct sockaddr
*)&remote
, interface
, return_policy_id
, return_route_rule_id
));
6733 necp_socket_is_allowed_to_send_recv(struct inpcb
*inp
, necp_kernel_policy_id
*return_policy_id
, u_int32_t
*return_route_rule_id
)
6735 return (necp_socket_is_allowed_to_send_recv_internal(inp
, NULL
, NULL
, NULL
, return_policy_id
, return_route_rule_id
));
6739 necp_mark_packet_from_socket(struct mbuf
*packet
, struct inpcb
*inp
, necp_kernel_policy_id policy_id
, u_int32_t route_rule_id
)
6741 if (packet
== NULL
|| inp
== NULL
) {
6745 // Mark ID for Pass and IP Tunnel
6746 if (policy_id
!= NECP_KERNEL_POLICY_ID_NONE
) {
6747 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= policy_id
;
6748 } else if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_PASS
||
6749 inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
) {
6750 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= inp
->inp_policyresult
.policy_id
;
6752 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
6754 packet
->m_pkthdr
.necp_mtag
.necp_last_interface_index
= 0;
6755 if (route_rule_id
!= 0) {
6756 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
= route_rule_id
;
6758 packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
= inp
->inp_policyresult
.results
.route_rule_id
;
6765 necp_mark_packet_from_ip(struct mbuf
*packet
, necp_kernel_policy_id policy_id
)
6767 if (packet
== NULL
) {
6771 // Mark ID for Pass and IP Tunnel
6772 if (policy_id
!= NECP_KERNEL_POLICY_ID_NONE
) {
6773 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= policy_id
;
6775 packet
->m_pkthdr
.necp_mtag
.necp_policy_id
= NECP_KERNEL_POLICY_ID_NONE
;
6782 necp_mark_packet_from_interface(struct mbuf
*packet
, ifnet_t interface
)
6784 if (packet
== NULL
) {
6788 // Mark ID for Pass and IP Tunnel
6789 if (interface
!= NULL
) {
6790 packet
->m_pkthdr
.necp_mtag
.necp_last_interface_index
= interface
->if_index
;
6797 necp_mark_packet_as_keepalive(struct mbuf
*packet
, bool is_keepalive
)
6799 if (packet
== NULL
) {
6804 packet
->m_pkthdr
.pkt_flags
|= PKTF_KEEPALIVE
;
6806 packet
->m_pkthdr
.pkt_flags
&= ~PKTF_KEEPALIVE
;
6812 necp_kernel_policy_id
6813 necp_get_policy_id_from_packet(struct mbuf
*packet
)
6815 if (packet
== NULL
) {
6816 return (NECP_KERNEL_POLICY_ID_NONE
);
6819 return (packet
->m_pkthdr
.necp_mtag
.necp_policy_id
);
6823 necp_get_last_interface_index_from_packet(struct mbuf
*packet
)
6825 if (packet
== NULL
) {
6829 return (packet
->m_pkthdr
.necp_mtag
.necp_last_interface_index
);
6833 necp_get_route_rule_id_from_packet(struct mbuf
*packet
)
6835 if (packet
== NULL
) {
6839 return (packet
->m_pkthdr
.necp_mtag
.necp_route_rule_id
);
6843 necp_get_is_keepalive_from_packet(struct mbuf
*packet
)
6845 if (packet
== NULL
) {
6849 return (packet
->m_pkthdr
.pkt_flags
& PKTF_KEEPALIVE
);
6853 necp_socket_get_content_filter_control_unit(struct socket
*so
)
6855 struct inpcb
*inp
= sotoinpcb(so
);
6860 return (inp
->inp_policyresult
.results
.filter_control_unit
);
6864 necp_socket_should_use_flow_divert(struct inpcb
*inp
)
6870 return (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
);
6874 necp_socket_get_flow_divert_control_unit(struct inpcb
*inp
)
6880 if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT
) {
6881 return (inp
->inp_policyresult
.results
.result_parameter
.flow_divert_control_unit
);
6888 necp_socket_should_rescope(struct inpcb
*inp
)
6894 return (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED
);
6898 necp_socket_get_rescope_if_index(struct inpcb
*inp
)
6904 if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED
) {
6905 return (inp
->inp_policyresult
.results
.result_parameter
.scoped_interface_index
);
6912 necp_socket_get_effective_mtu(struct inpcb
*inp
, u_int32_t current_mtu
)
6915 return (current_mtu
);
6918 if (inp
->inp_policyresult
.results
.result
== NECP_KERNEL_POLICY_RESULT_IP_TUNNEL
&&
6919 (inp
->inp_flags
& INP_BOUND_IF
) &&
6920 inp
->inp_boundifp
) {
6922 u_int bound_interface_index
= inp
->inp_boundifp
->if_index
;
6923 u_int tunnel_interface_index
= inp
->inp_policyresult
.results
.result_parameter
.tunnel_interface_index
;
6925 // The result is IP Tunnel, and is rescoping from one interface to another. Recalculate MTU.
6926 if (bound_interface_index
!= tunnel_interface_index
) {
6927 ifnet_t tunnel_interface
= NULL
;
6929 ifnet_head_lock_shared();
6930 tunnel_interface
= ifindex2ifnet
[tunnel_interface_index
];
6933 if (tunnel_interface
!= NULL
) {
6934 u_int32_t direct_tunnel_mtu
= tunnel_interface
->if_mtu
;
6935 u_int32_t delegate_tunnel_mtu
= (tunnel_interface
->if_delegated
.ifp
!= NULL
) ? tunnel_interface
->if_delegated
.ifp
->if_mtu
: 0;
6936 if (delegate_tunnel_mtu
!= 0 &&
6937 strncmp(tunnel_interface
->if_name
, "ipsec", strlen("ipsec")) == 0) {
6938 // For ipsec interfaces, calculate the overhead from the delegate interface
6939 u_int32_t tunnel_overhead
= (u_int32_t
)(esp_hdrsiz(NULL
) + sizeof(struct ip6_hdr
));
6940 if (delegate_tunnel_mtu
> tunnel_overhead
) {
6941 delegate_tunnel_mtu
-= tunnel_overhead
;
6944 if (delegate_tunnel_mtu
< direct_tunnel_mtu
) {
6945 // If the (delegate - overhead) < direct, return (delegate - overhead)
6946 return (delegate_tunnel_mtu
);
6948 // Otherwise return direct
6949 return (direct_tunnel_mtu
);
6952 // For non-ipsec interfaces, just return the tunnel MTU
6953 return (direct_tunnel_mtu
);
6959 // By default, just return the MTU passed in
6960 return (current_mtu
);
6964 necp_get_ifnet_from_result_parameter(necp_kernel_policy_result_parameter
*result_parameter
)
6966 if (result_parameter
== NULL
) {
6970 return (ifindex2ifnet
[result_parameter
->tunnel_interface_index
]);
6974 necp_packet_can_rebind_to_ifnet(struct mbuf
*packet
, struct ifnet
*interface
, struct route
*new_route
, int family
)
6976 bool found_match
= FALSE
;
6978 ifaddr_t
*addresses
= NULL
;
6979 union necp_sockaddr_union address_storage
;
6982 if (packet
== NULL
|| interface
== NULL
|| new_route
== NULL
|| (family
!= AF_INET
&& family
!= AF_INET6
)) {
6986 result
= ifnet_get_address_list_family(interface
, &addresses
, family
);
6988 NECPLOG(LOG_ERR
, "Failed to get address list for %s%d", ifnet_name(interface
), ifnet_unit(interface
));
6992 for (i
= 0; addresses
[i
] != NULL
; i
++) {
6993 ROUTE_RELEASE(new_route
);
6994 if (ifaddr_address(addresses
[i
], &address_storage
.sa
, sizeof(address_storage
)) == 0) {
6995 if (family
== AF_INET
) {
6996 struct ip
*ip
= mtod(packet
, struct ip
*);
6997 if (memcmp(&address_storage
.sin
.sin_addr
, &ip
->ip_src
, sizeof(ip
->ip_src
)) == 0) {
6998 struct sockaddr_in
*dst4
= (struct sockaddr_in
*)(void *)&new_route
->ro_dst
;
6999 dst4
->sin_family
= AF_INET
;
7000 dst4
->sin_len
= sizeof(struct sockaddr_in
);
7001 dst4
->sin_addr
= ip
->ip_dst
;
7002 rtalloc_scoped(new_route
, interface
->if_index
);
7003 if (!ROUTE_UNUSABLE(new_route
)) {
7008 } else if (family
== AF_INET6
) {
7009 struct ip6_hdr
*ip6
= mtod(packet
, struct ip6_hdr
*);
7010 if (memcmp(&address_storage
.sin6
.sin6_addr
, &ip6
->ip6_src
, sizeof(ip6
->ip6_src
)) == 0) {
7011 struct sockaddr_in6
*dst6
= (struct sockaddr_in6
*)(void *)&new_route
->ro_dst
;
7012 dst6
->sin6_family
= AF_INET6
;
7013 dst6
->sin6_len
= sizeof(struct sockaddr_in6
);
7014 dst6
->sin6_addr
= ip6
->ip6_dst
;
7015 rtalloc_scoped(new_route
, interface
->if_index
);
7016 if (!ROUTE_UNUSABLE(new_route
)) {
7026 ifnet_free_address_list(addresses
);
7028 return (found_match
);
7032 necp_addr_is_loopback(struct sockaddr
*address
)
7034 if (address
== NULL
) {
7038 if (address
->sa_family
== AF_INET
) {
7039 return (ntohl(((struct sockaddr_in
*)(void *)address
)->sin_addr
.s_addr
) == INADDR_LOOPBACK
);
7040 } else if (address
->sa_family
== AF_INET6
) {
7041 return IN6_IS_ADDR_LOOPBACK(&((struct sockaddr_in6
*)(void *)address
)->sin6_addr
);
7048 necp_is_loopback(struct sockaddr
*local_addr
, struct sockaddr
*remote_addr
, struct inpcb
*inp
, struct mbuf
*packet
)
7050 // Note: This function only checks for the loopback addresses.
7051 // In the future, we may want to expand to also allow any traffic
7052 // going through the loopback interface, but until then, this
7053 // check is cheaper.
7055 if (local_addr
!= NULL
&& necp_addr_is_loopback(local_addr
)) {
7059 if (remote_addr
!= NULL
&& necp_addr_is_loopback(remote_addr
)) {
7064 if ((inp
->inp_flags
& INP_BOUND_IF
) && inp
->inp_boundifp
&& (inp
->inp_boundifp
->if_flags
& IFF_LOOPBACK
)) {
7067 if (inp
->inp_vflag
& INP_IPV4
) {
7068 if (ntohl(inp
->inp_laddr
.s_addr
) == INADDR_LOOPBACK
||
7069 ntohl(inp
->inp_faddr
.s_addr
) == INADDR_LOOPBACK
) {
7072 } else if (inp
->inp_vflag
& INP_IPV6
) {
7073 if (IN6_IS_ADDR_LOOPBACK(&inp
->in6p_laddr
) ||
7074 IN6_IS_ADDR_LOOPBACK(&inp
->in6p_faddr
)) {
7080 if (packet
!= NULL
) {
7081 struct ip
*ip
= mtod(packet
, struct ip
*);
7082 if (ip
->ip_v
== 4) {
7083 if (ntohl(ip
->ip_src
.s_addr
) == INADDR_LOOPBACK
) {
7086 if (ntohl(ip
->ip_dst
.s_addr
) == INADDR_LOOPBACK
) {
7089 } else if (ip
->ip_v
== 6) {
7090 struct ip6_hdr
*ip6
= mtod(packet
, struct ip6_hdr
*);
7091 if (IN6_IS_ADDR_LOOPBACK(&ip6
->ip6_src
)) {
7094 if (IN6_IS_ADDR_LOOPBACK(&ip6
->ip6_dst
)) {