]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/necp.c
b4b9ff3406f912c3d96fc09954a03487e0f9fa33
[apple/xnu.git] / bsd / net / necp.c
1 /*
2 * Copyright (c) 2013-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <string.h>
30 #include <sys/systm.h>
31 #include <sys/types.h>
32 #include <sys/queue.h>
33 #include <sys/malloc.h>
34 #include <libkern/OSMalloc.h>
35 #include <sys/kernel.h>
36 #include <sys/kern_control.h>
37 #include <sys/mbuf.h>
38 #include <sys/kpi_mbuf.h>
39 #include <sys/proc_uuid_policy.h>
40 #include <net/if.h>
41 #include <sys/domain.h>
42 #include <sys/protosw.h>
43 #include <sys/socket.h>
44 #include <sys/socketvar.h>
45 #include <sys/coalition.h>
46 #include <netinet/ip.h>
47 #include <netinet/ip6.h>
48 #include <netinet/tcp.h>
49 #include <netinet/tcp_var.h>
50 #include <netinet/tcp_cache.h>
51 #include <netinet/udp.h>
52 #include <netinet/in_pcb.h>
53 #include <netinet/in_tclass.h>
54 #include <netinet6/esp.h>
55 #include <net/flowhash.h>
56 #include <net/if_var.h>
57 #include <sys/kauth.h>
58 #include <sys/sysctl.h>
59 #include <sys/sysproto.h>
60 #include <sys/priv.h>
61 #include <sys/kern_event.h>
62 #include <sys/file_internal.h>
63 #include <IOKit/IOBSD.h>
64 #include <net/network_agent.h>
65 #include <net/necp.h>
66
67 /*
68 * NECP - Network Extension Control Policy database
69 * ------------------------------------------------
70 * The goal of this module is to allow clients connecting via a
71 * kernel control socket to create high-level policy sessions, which
72 * are ingested into low-level kernel policies that control and tag
73 * traffic at the application, socket, and IP layers.
74 *
75 * ------------------------------------------------
76 * Sessions
77 * ------------------------------------------------
78 * Each session owns a list of session policies, each of which can
79 * specify any combination of conditions and a single result. Each
80 * session also has a priority level (such as High, Default, or Low)
81 * which is requested by the client. Based on the requested level,
82 * a session order value is assigned to the session, which will be used
83 * to sort kernel policies generated by the session. The session client
84 * can specify the sub-order for each policy it creates which will be
85 * used to further sort the kernel policies.
86 *
87 * Kernel Control Socket --> 1 necp_session --> list of necp_session_policy structs
88 *
89 * ------------------------------------------------
90 * Kernel Policies
91 * ------------------------------------------------
92 * Whenever a session send the Apply command, its policies are ingested
93 * and generate kernel policies. There are two phases of kernel policy
94 * ingestion.
95 *
96 * 1. The session policy is parsed to create kernel policies at the socket
97 * and IP layers, when applicable. For example, a policy that requires
98 * all traffic from App1 to Pass will generate a socket kernel policy to
99 * match App1 and mark packets with ID1, and also an IP policy to match
100 * ID1 and let the packet pass. This is handled in necp_apply_policy. The
101 * resulting kernel policies are added to the global socket and IP layer
102 * policy lists.
103 * necp_session_policy --> necp_kernel_socket_policy and necp_kernel_ip_output_policy
104 * || ||
105 * \/ \/
106 * necp_kernel_socket_policies necp_kernel_ip_output_policies
107 *
108 * 2. Once the global lists of kernel policies have been filled out, each
109 * list is traversed to create optimized sub-lists ("Maps") which are used during
110 * data-path evaluation. IP policies are sent into necp_kernel_ip_output_policies_map,
111 * which hashes incoming packets based on marked socket-layer policies, and removes
112 * duplicate or overlapping policies. Socket policies are sent into two maps,
113 * necp_kernel_socket_policies_map and necp_kernel_socket_policies_app_layer_map.
114 * The app layer map is used for policy checks coming in from user space, and is one
115 * list with duplicate and overlapping policies removed. The socket map hashes based
116 * on app UUID, and removes duplicate and overlapping policies.
117 * necp_kernel_socket_policy --> necp_kernel_socket_policies_app_layer_map
118 * |-> necp_kernel_socket_policies_map
119 *
120 * necp_kernel_ip_output_policies --> necp_kernel_ip_output_policies_map
121 *
122 * ------------------------------------------------
123 * Drop All Level
124 * ------------------------------------------------
125 * The Drop All Level is a sysctl that controls the level at which policies are allowed
126 * to override a global drop rule. If the value is 0, no drop rule is applied. If the value
127 * is 1, all traffic is dropped. If the value is greater than 1, all kernel policies created
128 * by a session with a priority level better than (numerically less than) the
129 * Drop All Level will allow matching traffic to not be dropped. The Drop All Level is
130 * dynamically interpreted into necp_drop_all_order, which specifies the equivalent assigned
131 * session orders to be dropped.
132 */
133
134 u_int32_t necp_drop_all_order = 0;
135 u_int32_t necp_drop_all_level = 0;
136
137 u_int32_t necp_pass_loopback = 1; // 0=Off, 1=On
138 u_int32_t necp_pass_keepalives = 1; // 0=Off, 1=On
139
140 u_int32_t necp_debug = 0; // 0=None, 1=Basic, 2=EveryMatch
141
142 u_int32_t necp_session_count = 0;
143
144 #define LIST_INSERT_SORTED_ASCENDING(head, elm, field, sortfield, tmpelm) do { \
145 if (LIST_EMPTY((head)) || (LIST_FIRST(head)->sortfield >= (elm)->sortfield)) { \
146 LIST_INSERT_HEAD((head), elm, field); \
147 } else { \
148 LIST_FOREACH(tmpelm, head, field) { \
149 if (LIST_NEXT(tmpelm, field) == NULL || LIST_NEXT(tmpelm, field)->sortfield >= (elm)->sortfield) { \
150 LIST_INSERT_AFTER(tmpelm, elm, field); \
151 break; \
152 } \
153 } \
154 } \
155 } while (0)
156
157 #define LIST_INSERT_SORTED_TWICE_ASCENDING(head, elm, field, firstsortfield, secondsortfield, tmpelm) do { \
158 if (LIST_EMPTY((head)) || (LIST_FIRST(head)->firstsortfield > (elm)->firstsortfield) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield >= (elm)->secondsortfield))) { \
159 LIST_INSERT_HEAD((head), elm, field); \
160 } else { \
161 LIST_FOREACH(tmpelm, head, field) { \
162 if (LIST_NEXT(tmpelm, field) == NULL || (LIST_NEXT(tmpelm, field)->firstsortfield > (elm)->firstsortfield) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield >= (elm)->secondsortfield))) { \
163 LIST_INSERT_AFTER(tmpelm, elm, field); \
164 break; \
165 } \
166 } \
167 } \
168 } while (0)
169
170 #define LIST_INSERT_SORTED_THRICE_ASCENDING(head, elm, field, firstsortfield, secondsortfield, thirdsortfield, tmpelm) do { \
171 if (LIST_EMPTY((head)) || (LIST_FIRST(head)->firstsortfield > (elm)->firstsortfield) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield >= (elm)->secondsortfield)) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield == (elm)->secondsortfield) && (LIST_FIRST(head)->thirdsortfield >= (elm)->thirdsortfield))) { \
172 LIST_INSERT_HEAD((head), elm, field); \
173 } else { \
174 LIST_FOREACH(tmpelm, head, field) { \
175 if (LIST_NEXT(tmpelm, field) == NULL || (LIST_NEXT(tmpelm, field)->firstsortfield > (elm)->firstsortfield) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield >= (elm)->secondsortfield)) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield == (elm)->secondsortfield) && (LIST_NEXT(tmpelm, field)->thirdsortfield >= (elm)->thirdsortfield))) { \
176 LIST_INSERT_AFTER(tmpelm, elm, field); \
177 break; \
178 } \
179 } \
180 } \
181 } while (0)
182
183 #define IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(x) ((x) == NECP_ROUTE_RULE_DENY_INTERFACE || (x) == NECP_ROUTE_RULE_ALLOW_INTERFACE)
184
185 #define NECP_KERNEL_CONDITION_ALL_INTERFACES 0x00001
186 #define NECP_KERNEL_CONDITION_BOUND_INTERFACE 0x00002
187 #define NECP_KERNEL_CONDITION_PROTOCOL 0x00004
188 #define NECP_KERNEL_CONDITION_LOCAL_START 0x00008
189 #define NECP_KERNEL_CONDITION_LOCAL_END 0x00010
190 #define NECP_KERNEL_CONDITION_LOCAL_PREFIX 0x00020
191 #define NECP_KERNEL_CONDITION_REMOTE_START 0x00040
192 #define NECP_KERNEL_CONDITION_REMOTE_END 0x00080
193 #define NECP_KERNEL_CONDITION_REMOTE_PREFIX 0x00100
194 #define NECP_KERNEL_CONDITION_APP_ID 0x00200
195 #define NECP_KERNEL_CONDITION_REAL_APP_ID 0x00400
196 #define NECP_KERNEL_CONDITION_DOMAIN 0x00800
197 #define NECP_KERNEL_CONDITION_ACCOUNT_ID 0x01000
198 #define NECP_KERNEL_CONDITION_POLICY_ID 0x02000
199 #define NECP_KERNEL_CONDITION_PID 0x04000
200 #define NECP_KERNEL_CONDITION_UID 0x08000
201 #define NECP_KERNEL_CONDITION_LAST_INTERFACE 0x10000 // Only set from packets looping between interfaces
202 #define NECP_KERNEL_CONDITION_TRAFFIC_CLASS 0x20000
203 #define NECP_KERNEL_CONDITION_ENTITLEMENT 0x40000
204 #define NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT 0x80000
205
206 #define NECP_MAX_POLICY_RESULT_SIZE 512
207 #define NECP_MAX_ROUTE_RULES_ARRAY_SIZE 1024
208 #define NECP_MAX_CONDITIONS_ARRAY_SIZE 4096
209 #define NECP_MAX_POLICY_LIST_COUNT 1024
210
211 // Cap the policy size at the max result + conditions size, with room for extra TLVs
212 #define NECP_MAX_POLICY_SIZE (1024 + NECP_MAX_POLICY_RESULT_SIZE + NECP_MAX_CONDITIONS_ARRAY_SIZE)
213
214 struct necp_service_registration {
215 LIST_ENTRY(necp_service_registration) session_chain;
216 LIST_ENTRY(necp_service_registration) kernel_chain;
217 u_int32_t service_id;
218 };
219
220 struct necp_session {
221 u_int8_t necp_fd_type;
222 u_int32_t control_unit;
223 u_int32_t session_priority; // Descriptive priority rating
224 u_int32_t session_order;
225
226 decl_lck_mtx_data(, lock);
227
228 bool proc_locked; // Messages must come from proc_uuid
229 uuid_t proc_uuid;
230 int proc_pid;
231
232 bool dirty;
233 LIST_HEAD(_policies, necp_session_policy) policies;
234
235 LIST_HEAD(_services, necp_service_registration) services;
236
237 TAILQ_ENTRY(necp_session) chain;
238 };
239
240 #define NECP_SESSION_LOCK(_s) lck_mtx_lock(&_s->lock)
241 #define NECP_SESSION_UNLOCK(_s) lck_mtx_unlock(&_s->lock)
242
243 static TAILQ_HEAD(_necp_session_list, necp_session) necp_session_list;
244
245 struct necp_socket_info {
246 pid_t pid;
247 uid_t uid;
248 union necp_sockaddr_union local_addr;
249 union necp_sockaddr_union remote_addr;
250 u_int32_t bound_interface_index;
251 u_int32_t traffic_class;
252 u_int16_t protocol;
253 u_int32_t application_id;
254 u_int32_t real_application_id;
255 u_int32_t account_id;
256 char *domain;
257 errno_t cred_result;
258 };
259
260 static kern_ctl_ref necp_kctlref;
261 static u_int32_t necp_family;
262 static OSMallocTag necp_malloc_tag;
263 static lck_grp_attr_t *necp_kernel_policy_grp_attr = NULL;
264 static lck_attr_t *necp_kernel_policy_mtx_attr = NULL;
265 static lck_grp_t *necp_kernel_policy_mtx_grp = NULL;
266 decl_lck_rw_data(static, necp_kernel_policy_lock);
267
268 static lck_grp_attr_t *necp_route_rule_grp_attr = NULL;
269 static lck_attr_t *necp_route_rule_mtx_attr = NULL;
270 static lck_grp_t *necp_route_rule_mtx_grp = NULL;
271 decl_lck_rw_data(static, necp_route_rule_lock);
272
273 static necp_policy_id necp_last_policy_id = 0;
274 static necp_kernel_policy_id necp_last_kernel_policy_id = 0;
275 static u_int32_t necp_last_uuid_id = 0;
276 static u_int32_t necp_last_string_id = 0;
277 static u_int32_t necp_last_route_rule_id = 0;
278 static u_int32_t necp_last_aggregate_route_rule_id = 0;
279
280 /*
281 * On modification, invalidate cached lookups by bumping the generation count.
282 * Other calls will need to take the slowpath of taking
283 * the subsystem lock.
284 */
285 static volatile int32_t necp_kernel_socket_policies_gencount;
286 #define BUMP_KERNEL_SOCKET_POLICIES_GENERATION_COUNT() do { \
287 if (OSIncrementAtomic(&necp_kernel_socket_policies_gencount) == (INT32_MAX - 1)) { \
288 necp_kernel_socket_policies_gencount = 1; \
289 } \
290 } while (0)
291
292 static u_int32_t necp_kernel_application_policies_condition_mask;
293 static size_t necp_kernel_application_policies_count;
294 static u_int32_t necp_kernel_socket_policies_condition_mask;
295 static size_t necp_kernel_socket_policies_count;
296 static size_t necp_kernel_socket_policies_non_app_count;
297 static LIST_HEAD(_necpkernelsocketconnectpolicies, necp_kernel_socket_policy) necp_kernel_socket_policies;
298 #define NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS 5
299 #define NECP_SOCKET_MAP_APP_ID_TO_BUCKET(appid) (appid ? (appid%(NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS - 1) + 1) : 0)
300 static struct necp_kernel_socket_policy **necp_kernel_socket_policies_map[NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS];
301 static struct necp_kernel_socket_policy **necp_kernel_socket_policies_app_layer_map;
302 /*
303 * A note on policy 'maps': these are used for boosting efficiency when matching policies. For each dimension of the map,
304 * such as an ID, the 0 bucket is reserved for sockets/packets that do not have this parameter, while the other
305 * buckets lead to an array of policy pointers that form the list applicable when the (parameter%(NUM_BUCKETS - 1) + 1) == bucket_index.
306 *
307 * For example, a packet with policy ID of 7, when there are 4 ID buckets, will map to bucket (7%3 + 1) = 2.
308 */
309
310 static u_int32_t necp_kernel_ip_output_policies_condition_mask;
311 static size_t necp_kernel_ip_output_policies_count;
312 static size_t necp_kernel_ip_output_policies_non_id_count;
313 static LIST_HEAD(_necpkernelipoutputpolicies, necp_kernel_ip_output_policy) necp_kernel_ip_output_policies;
314 #define NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS 5
315 #define NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(id) (id ? (id%(NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS - 1) + 1) : 0)
316 static struct necp_kernel_ip_output_policy **necp_kernel_ip_output_policies_map[NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS];
317
318 static struct necp_session *necp_create_session(void);
319 static void necp_delete_session(struct necp_session *session);
320
321 static necp_policy_id necp_handle_policy_add(struct necp_session *session, u_int32_t message_id, mbuf_t packet,
322 u_int8_t *tlv_buffer, size_t tlv_buffer_length, int offset, int *error);
323 static void necp_handle_policy_get(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
324 static void necp_handle_policy_delete(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
325 static void necp_handle_policy_apply_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
326 static void necp_handle_policy_list_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
327 static void necp_handle_policy_delete_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
328 static int necp_handle_policy_dump_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet,
329 user_addr_t out_buffer, size_t out_buffer_length, int offset);
330 static void necp_handle_set_session_priority(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
331 static void necp_handle_lock_session_to_proc(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
332 static void necp_handle_register_service(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
333 static void necp_handle_unregister_service(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
334
335 #define MAX_RESULT_STRING_LEN 64
336 static inline const char * necp_get_result_description(char *result_string, necp_kernel_policy_result result, necp_kernel_policy_result_parameter result_parameter);
337
338 static struct necp_session_policy *necp_policy_create(struct necp_session *session, necp_policy_order order, u_int8_t *conditions_array, u_int32_t conditions_array_size, u_int8_t *route_rules_array, u_int32_t route_rules_array_size, u_int8_t *result, u_int32_t result_size);
339 static struct necp_session_policy *necp_policy_find(struct necp_session *session, necp_policy_id policy_id);
340 static bool necp_policy_mark_for_deletion(struct necp_session *session, struct necp_session_policy *policy);
341 static bool necp_policy_mark_all_for_deletion(struct necp_session *session);
342 static bool necp_policy_delete(struct necp_session *session, struct necp_session_policy *policy);
343 static void necp_policy_apply_all(struct necp_session *session);
344
345 static necp_kernel_policy_id necp_kernel_socket_policy_add(necp_policy_id parent_policy_id, necp_policy_order order, u_int32_t session_order, int session_pid, u_int32_t condition_mask, u_int32_t condition_negated_mask, necp_app_id cond_app_id, necp_app_id cond_real_app_id, char *cond_custom_entitlement, u_int32_t cond_account_id, char *domain, pid_t cond_pid, uid_t cond_uid, ifnet_t cond_bound_interface, struct necp_policy_condition_tc_range cond_traffic_class, u_int16_t cond_protocol, union necp_sockaddr_union *cond_local_start, union necp_sockaddr_union *cond_local_end, u_int8_t cond_local_prefix, union necp_sockaddr_union *cond_remote_start, union necp_sockaddr_union *cond_remote_end, u_int8_t cond_remote_prefix, necp_kernel_policy_result result, necp_kernel_policy_result_parameter result_parameter);
346 static bool necp_kernel_socket_policy_delete(necp_kernel_policy_id policy_id);
347 static bool necp_kernel_socket_policies_reprocess(void);
348 static bool necp_kernel_socket_policies_update_uuid_table(void);
349 static inline struct necp_kernel_socket_policy *necp_socket_find_policy_match_with_info_locked(struct necp_kernel_socket_policy **policy_search_array, struct necp_socket_info *info, necp_kernel_policy_filter *return_filter, u_int32_t *return_route_rule_id, necp_kernel_policy_result *return_service_action, necp_kernel_policy_service *return_service, u_int32_t *return_netagent_array, size_t netagent_array_count, proc_t proc);
350
351 static necp_kernel_policy_id necp_kernel_ip_output_policy_add(necp_policy_id parent_policy_id, necp_policy_order order, necp_policy_order suborder, u_int32_t session_order, int session_pid, u_int32_t condition_mask, u_int32_t condition_negated_mask, necp_kernel_policy_id cond_policy_id, ifnet_t cond_bound_interface, u_int32_t cond_last_interface_index, u_int16_t cond_protocol, union necp_sockaddr_union *cond_local_start, union necp_sockaddr_union *cond_local_end, u_int8_t cond_local_prefix, union necp_sockaddr_union *cond_remote_start, union necp_sockaddr_union *cond_remote_end, u_int8_t cond_remote_prefix, necp_kernel_policy_result result, necp_kernel_policy_result_parameter result_parameter);
352 static bool necp_kernel_ip_output_policy_delete(necp_kernel_policy_id policy_id);
353 static bool necp_kernel_ip_output_policies_reprocess(void);
354
355 static bool necp_is_addr_in_range(struct sockaddr *addr, struct sockaddr *range_start, struct sockaddr *range_end);
356 static bool necp_is_range_in_range(struct sockaddr *inner_range_start, struct sockaddr *inner_range_end, struct sockaddr *range_start, struct sockaddr *range_end);
357 static bool necp_is_addr_in_subnet(struct sockaddr *addr, struct sockaddr *subnet_addr, u_int8_t subnet_prefix);
358 static int necp_addr_compare(struct sockaddr *sa1, struct sockaddr *sa2, int check_port);
359 static bool necp_buffer_compare_with_bit_prefix(u_int8_t *p1, u_int8_t *p2, u_int32_t bits);
360 static bool necp_is_loopback(struct sockaddr *local_addr, struct sockaddr *remote_addr, struct inpcb *inp, struct mbuf *packet);
361 static bool necp_is_intcoproc(struct inpcb *inp, struct mbuf *packet);
362
363 struct necp_uuid_id_mapping {
364 LIST_ENTRY(necp_uuid_id_mapping) chain;
365 uuid_t uuid;
366 u_int32_t id;
367 u_int32_t refcount;
368 u_int32_t table_refcount; // Add to UUID policy table count
369 };
370 static size_t necp_num_uuid_app_id_mappings;
371 static bool necp_uuid_app_id_mappings_dirty;
372 #define NECP_UUID_APP_ID_HASH_SIZE 64
373 static u_long necp_uuid_app_id_hash_mask;
374 static u_long necp_uuid_app_id_hash_num_buckets;
375 static LIST_HEAD(necp_uuid_id_mapping_head, necp_uuid_id_mapping) *necp_uuid_app_id_hashtbl, necp_uuid_service_id_list; // App map is real hash table, service map is just mapping
376 #define APPUUIDHASH(uuid) (&necp_uuid_app_id_hashtbl[uuid[0] & necp_uuid_app_id_hash_mask]) // Assume first byte of UUIDs are evenly distributed
377 static u_int32_t necp_create_uuid_app_id_mapping(uuid_t uuid, bool *allocated_mapping, bool uuid_policy_table);
378 static bool necp_remove_uuid_app_id_mapping(uuid_t uuid, bool *removed_mapping, bool uuid_policy_table);
379 static struct necp_uuid_id_mapping *necp_uuid_lookup_uuid_with_app_id_locked(u_int32_t local_id);
380
381 static struct necp_uuid_id_mapping *necp_uuid_lookup_service_id_locked(uuid_t uuid);
382 static struct necp_uuid_id_mapping *necp_uuid_lookup_uuid_with_service_id_locked(u_int32_t local_id);
383 static u_int32_t necp_create_uuid_service_id_mapping(uuid_t uuid);
384 static bool necp_remove_uuid_service_id_mapping(uuid_t uuid);
385
386 struct necp_string_id_mapping {
387 LIST_ENTRY(necp_string_id_mapping) chain;
388 char *string;
389 necp_app_id id;
390 u_int32_t refcount;
391 };
392 static LIST_HEAD(necp_string_id_mapping_list, necp_string_id_mapping) necp_account_id_list;
393 static u_int32_t necp_create_string_to_id_mapping(struct necp_string_id_mapping_list *list, char *domain);
394 static bool necp_remove_string_to_id_mapping(struct necp_string_id_mapping_list *list, char *domain);
395 static struct necp_string_id_mapping *necp_lookup_string_with_id_locked(struct necp_string_id_mapping_list *list, u_int32_t local_id);
396
397 static struct necp_kernel_socket_policy *necp_kernel_socket_policy_find(necp_kernel_policy_id policy_id);
398 static struct necp_kernel_ip_output_policy *necp_kernel_ip_output_policy_find(necp_kernel_policy_id policy_id);
399
400 static LIST_HEAD(_necp_kernel_service_list, necp_service_registration) necp_registered_service_list;
401
402 static char *necp_create_trimmed_domain(char *string, size_t length);
403 static inline int necp_count_dots(char *string, size_t length);
404
405 static char *necp_copy_string(char *string, size_t length);
406 static bool necp_update_qos_marking(struct ifnet *ifp, u_int32_t route_rule_id);
407
408 #define ROUTE_RULE_IS_AGGREGATE(ruleid) (ruleid > UINT16_MAX)
409
410 #define MAX_ROUTE_RULE_INTERFACES 10
411 struct necp_route_rule {
412 LIST_ENTRY(necp_route_rule) chain;
413 u_int32_t id;
414 u_int32_t default_action;
415 u_int8_t cellular_action;
416 u_int8_t wifi_action;
417 u_int8_t wired_action;
418 u_int8_t expensive_action;
419 u_int exception_if_indices[MAX_ROUTE_RULE_INTERFACES];
420 u_int8_t exception_if_actions[MAX_ROUTE_RULE_INTERFACES];
421 u_int32_t refcount;
422 };
423 static LIST_HEAD(necp_route_rule_list, necp_route_rule) necp_route_rules;
424 static u_int32_t necp_create_route_rule(struct necp_route_rule_list *list, u_int8_t *route_rules_array, u_int32_t route_rules_array_size);
425 static bool necp_remove_route_rule(struct necp_route_rule_list *list, u_int32_t route_rule_id);
426 static bool necp_route_is_allowed(struct rtentry *route, ifnet_t interface, u_int32_t route_rule_id, u_int32_t *interface_type_denied);
427 static struct necp_route_rule *necp_lookup_route_rule_locked(struct necp_route_rule_list *list, u_int32_t route_rule_id);
428 static inline void necp_get_parent_cred_result(proc_t proc, struct necp_socket_info *info);
429
430 #define MAX_AGGREGATE_ROUTE_RULES 16
431 struct necp_aggregate_route_rule {
432 LIST_ENTRY(necp_aggregate_route_rule) chain;
433 u_int32_t id;
434 u_int32_t rule_ids[MAX_AGGREGATE_ROUTE_RULES];
435 };
436 static LIST_HEAD(necp_aggregate_route_rule_list, necp_aggregate_route_rule) necp_aggregate_route_rules;
437 static u_int32_t necp_create_aggregate_route_rule(u_int32_t *rule_ids);
438
439 // Sysctl definitions
440 static int sysctl_handle_necp_level SYSCTL_HANDLER_ARGS;
441
442 SYSCTL_NODE(_net, OID_AUTO, necp, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "NECP");
443 SYSCTL_INT(_net_necp, NECPCTL_PASS_LOOPBACK, pass_loopback, CTLFLAG_LOCKED | CTLFLAG_RW, &necp_pass_loopback, 0, "");
444 SYSCTL_INT(_net_necp, NECPCTL_PASS_KEEPALIVES, pass_keepalives, CTLFLAG_LOCKED | CTLFLAG_RW, &necp_pass_keepalives, 0, "");
445 SYSCTL_INT(_net_necp, NECPCTL_DEBUG, debug, CTLFLAG_LOCKED | CTLFLAG_RW, &necp_debug, 0, "");
446 SYSCTL_PROC(_net_necp, NECPCTL_DROP_ALL_LEVEL, drop_all_level, CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW, &necp_drop_all_level, 0, &sysctl_handle_necp_level, "IU", "");
447 SYSCTL_LONG(_net_necp, NECPCTL_SOCKET_POLICY_COUNT, socket_policy_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_kernel_socket_policies_count, "");
448 SYSCTL_LONG(_net_necp, NECPCTL_SOCKET_NON_APP_POLICY_COUNT, socket_non_app_policy_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_kernel_socket_policies_non_app_count, "");
449 SYSCTL_LONG(_net_necp, NECPCTL_IP_POLICY_COUNT, ip_policy_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_kernel_ip_output_policies_count, "");
450 SYSCTL_INT(_net_necp, NECPCTL_SESSION_COUNT, session_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_session_count, 0, "");
451
452 // Session order allocation
453 static u_int32_t
454 necp_allocate_new_session_order(u_int32_t priority, u_int32_t control_unit)
455 {
456 u_int32_t new_order = 0;
457
458 // For now, just allocate 1000 orders for each priority
459 if (priority == NECP_SESSION_PRIORITY_UNKNOWN || priority > NECP_SESSION_NUM_PRIORITIES) {
460 priority = NECP_SESSION_PRIORITY_DEFAULT;
461 }
462
463 // Use the control unit to decide the offset into the priority list
464 new_order = (control_unit) + ((priority - 1) * 1000);
465
466 return (new_order);
467 }
468
469 static inline u_int32_t
470 necp_get_first_order_for_priority(u_int32_t priority)
471 {
472 return (((priority - 1) * 1000) + 1);
473 }
474
475 // Sysctl handler
476 static int
477 sysctl_handle_necp_level SYSCTL_HANDLER_ARGS
478 {
479 #pragma unused(arg1, arg2)
480 int error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
481 if (necp_drop_all_level == 0) {
482 necp_drop_all_order = 0;
483 } else {
484 necp_drop_all_order = necp_get_first_order_for_priority(necp_drop_all_level);
485 }
486 return (error);
487 }
488
489 // Session fd
490
491 static int noop_read(struct fileproc *, struct uio *, int, vfs_context_t);
492 static int noop_write(struct fileproc *, struct uio *, int, vfs_context_t);
493 static int noop_ioctl(struct fileproc *, unsigned long, caddr_t,
494 vfs_context_t);
495 static int noop_select(struct fileproc *, int, void *, vfs_context_t);
496 static int necp_session_op_close(struct fileglob *, vfs_context_t);
497 static int noop_kqfilter(struct fileproc *, struct knote *,
498 struct kevent_internal_s *, vfs_context_t);
499
500 static const struct fileops necp_session_fd_ops = {
501 .fo_type = DTYPE_NETPOLICY,
502 .fo_read = noop_read,
503 .fo_write = noop_write,
504 .fo_ioctl = noop_ioctl,
505 .fo_select = noop_select,
506 .fo_close = necp_session_op_close,
507 .fo_kqfilter = noop_kqfilter,
508 .fo_drain = NULL,
509 };
510
511 static int
512 noop_read(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx)
513 {
514 #pragma unused(fp, uio, flags, ctx)
515 return (ENXIO);
516 }
517
518 static int
519 noop_write(struct fileproc *fp, struct uio *uio, int flags,
520 vfs_context_t ctx)
521 {
522 #pragma unused(fp, uio, flags, ctx)
523 return (ENXIO);
524 }
525
526 static int
527 noop_ioctl(struct fileproc *fp, unsigned long com, caddr_t data,
528 vfs_context_t ctx)
529 {
530 #pragma unused(fp, com, data, ctx)
531 return (ENOTTY);
532 }
533
534 static int
535 noop_select(struct fileproc *fp, int which, void *wql, vfs_context_t ctx)
536 {
537 #pragma unused(fp, which, wql, ctx)
538 return (ENXIO);
539 }
540
541 static int
542 noop_kqfilter(struct fileproc *fp, struct knote *kn,
543 struct kevent_internal_s *kev, vfs_context_t ctx)
544 {
545 #pragma unused(fp, kn, kev, ctx)
546 return (ENXIO);
547 }
548
549 int
550 necp_session_open(struct proc *p, struct necp_session_open_args *uap, int *retval)
551 {
552 #pragma unused(uap)
553 int error = 0;
554 struct necp_session *session = NULL;
555 struct fileproc *fp = NULL;
556 int fd = -1;
557
558 uid_t uid = kauth_cred_getuid(proc_ucred(p));
559 if (uid != 0 && priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES, 0) != 0) {
560 NECPLOG0(LOG_ERR, "Process does not hold necessary entitlement to open NECP session");
561 error = EACCES;
562 goto done;
563 }
564
565 error = falloc(p, &fp, &fd, vfs_context_current());
566 if (error != 0) {
567 goto done;
568 }
569
570 session = necp_create_session();
571 if (session == NULL) {
572 error = ENOMEM;
573 goto done;
574 }
575
576 fp->f_fglob->fg_flag = 0;
577 fp->f_fglob->fg_ops = &necp_session_fd_ops;
578 fp->f_fglob->fg_data = session;
579
580 proc_fdlock(p);
581 FDFLAGS_SET(p, fd, (UF_EXCLOSE | UF_FORKCLOSE));
582 procfdtbl_releasefd(p, fd, NULL);
583 fp_drop(p, fd, fp, 1);
584 proc_fdunlock(p);
585
586 *retval = fd;
587 done:
588 if (error != 0) {
589 if (fp != NULL) {
590 fp_free(p, fd, fp);
591 fp = NULL;
592 }
593 }
594
595 return (error);
596 }
597
598 static int
599 necp_session_op_close(struct fileglob *fg, vfs_context_t ctx)
600 {
601 #pragma unused(ctx)
602 struct necp_session *session = (struct necp_session *)fg->fg_data;
603 fg->fg_data = NULL;
604
605 if (session != NULL) {
606 necp_policy_mark_all_for_deletion(session);
607 necp_policy_apply_all(session);
608 necp_delete_session(session);
609 return (0);
610 } else {
611 return (ENOENT);
612 }
613 }
614
615 static int
616 necp_session_find_from_fd(int fd, struct necp_session **session)
617 {
618 proc_t p = current_proc();
619 struct fileproc *fp = NULL;
620 int error = 0;
621
622 proc_fdlock_spin(p);
623 if ((error = fp_lookup(p, fd, &fp, 1)) != 0) {
624 goto done;
625 }
626 if (fp->f_fglob->fg_ops->fo_type != DTYPE_NETPOLICY) {
627 fp_drop(p, fd, fp, 1);
628 error = ENODEV;
629 goto done;
630 }
631 *session = (struct necp_session *)fp->f_fglob->fg_data;
632
633 done:
634 proc_fdunlock(p);
635 return (error);
636 }
637
638 static int
639 necp_session_add_policy(struct necp_session *session, struct necp_session_action_args *uap, int *retval)
640 {
641 int error = 0;
642 u_int8_t *tlv_buffer = NULL;
643
644 if (uap->in_buffer_length == 0 || uap->in_buffer_length > NECP_MAX_POLICY_SIZE || uap->in_buffer == 0) {
645 NECPLOG(LOG_ERR, "necp_session_add_policy invalid input (%zu)", uap->in_buffer_length);
646 error = EINVAL;
647 goto done;
648 }
649
650 if (uap->out_buffer_length < sizeof(necp_policy_id) || uap->out_buffer == 0) {
651 NECPLOG(LOG_ERR, "necp_session_add_policy invalid output buffer (%zu)", uap->out_buffer_length);
652 error = EINVAL;
653 goto done;
654 }
655
656 if ((tlv_buffer = _MALLOC(uap->in_buffer_length, M_NECP, M_WAITOK | M_ZERO)) == NULL) {
657 error = ENOMEM;
658 goto done;
659 }
660
661 error = copyin(uap->in_buffer, tlv_buffer, uap->in_buffer_length);
662 if (error != 0) {
663 NECPLOG(LOG_ERR, "necp_session_add_policy tlv copyin error (%d)", error);
664 goto done;
665 }
666
667 necp_policy_id new_policy_id = necp_handle_policy_add(session, 0, NULL, tlv_buffer, uap->in_buffer_length, 0, &error);
668 if (error != 0) {
669 NECPLOG(LOG_ERR, "necp_session_add_policy failed to add policy (%d)", error);
670 goto done;
671 }
672
673 error = copyout(&new_policy_id, uap->out_buffer, sizeof(new_policy_id));
674 if (error != 0) {
675 NECPLOG(LOG_ERR, "necp_session_add_policy policy_id copyout error (%d)", error);
676 goto done;
677 }
678
679 done:
680 if (tlv_buffer != NULL) {
681 FREE(tlv_buffer, M_NECP);
682 tlv_buffer = NULL;
683 }
684 *retval = error;
685
686 return (error);
687 }
688
689 static int
690 necp_session_get_policy(struct necp_session *session, struct necp_session_action_args *uap, int *retval)
691 {
692 int error = 0;
693 u_int8_t *response = NULL;
694
695 if (uap->in_buffer_length < sizeof(necp_policy_id) || uap->in_buffer == 0) {
696 NECPLOG(LOG_ERR, "necp_session_get_policy invalid input (%zu)", uap->in_buffer_length);
697 error = EINVAL;
698 goto done;
699 }
700
701 necp_policy_id policy_id = 0;
702 error = copyin(uap->in_buffer, &policy_id, sizeof(policy_id));
703 if (error != 0) {
704 NECPLOG(LOG_ERR, "necp_session_get_policy policy_id copyin error (%d)", error);
705 goto done;
706 }
707
708 struct necp_session_policy *policy = necp_policy_find(session, policy_id);
709 if (policy == NULL || policy->pending_deletion) {
710 NECPLOG(LOG_ERR, "Failed to find policy with id %d", policy_id);
711 error = ENOENT;
712 goto done;
713 }
714
715 u_int32_t order_tlv_size = sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(necp_policy_order);
716 u_int32_t result_tlv_size = (policy->result_size ? (sizeof(u_int8_t) + sizeof(u_int32_t) + policy->result_size) : 0);
717 u_int32_t response_size = order_tlv_size + result_tlv_size + policy->conditions_size;
718
719 if (uap->out_buffer_length < response_size || uap->out_buffer == 0) {
720 NECPLOG(LOG_ERR, "necp_session_get_policy buffer not large enough (%u < %u)", uap->out_buffer_length, response_size);
721 error = EINVAL;
722 goto done;
723 }
724
725 if (response_size > NECP_MAX_POLICY_SIZE) {
726 NECPLOG(LOG_ERR, "necp_session_get_policy size too large to copy (%u)", response_size);
727 error = EINVAL;
728 goto done;
729 }
730
731 MALLOC(response, u_int8_t *, response_size, M_NECP, M_WAITOK | M_ZERO);
732 if (response == NULL) {
733 error = ENOMEM;
734 goto done;
735 }
736
737 u_int8_t *cursor = response;
738 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_ORDER, sizeof(necp_policy_order), &policy->order, response, response_size);
739 if (result_tlv_size) {
740 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_RESULT, policy->result_size, &policy->result, response, response_size);
741 }
742 if (policy->conditions_size) {
743 memcpy(((u_int8_t *)(void *)(cursor)), policy->conditions, policy->conditions_size);
744 }
745
746 error = copyout(response, uap->out_buffer, response_size);
747 if (error != 0) {
748 NECPLOG(LOG_ERR, "necp_session_get_policy TLV copyout error (%d)", error);
749 goto done;
750 }
751
752 done:
753 if (response != NULL) {
754 FREE(response, M_NECP);
755 response = NULL;
756 }
757 *retval = error;
758
759 return (error);
760 }
761
762 static int
763 necp_session_delete_policy(struct necp_session *session, struct necp_session_action_args *uap, int *retval)
764 {
765 int error = 0;
766
767 if (uap->in_buffer_length < sizeof(necp_policy_id) || uap->in_buffer == 0) {
768 NECPLOG(LOG_ERR, "necp_session_delete_policy invalid input (%zu)", uap->in_buffer_length);
769 error = EINVAL;
770 goto done;
771 }
772
773 necp_policy_id delete_policy_id = 0;
774 error = copyin(uap->in_buffer, &delete_policy_id, sizeof(delete_policy_id));
775 if (error != 0) {
776 NECPLOG(LOG_ERR, "necp_session_delete_policy policy_id copyin error (%d)", error);
777 goto done;
778 }
779
780 struct necp_session_policy *policy = necp_policy_find(session, delete_policy_id);
781 if (policy == NULL || policy->pending_deletion) {
782 NECPLOG(LOG_ERR, "necp_session_delete_policy failed to find policy with id %u", delete_policy_id);
783 error = ENOENT;
784 goto done;
785 }
786
787 necp_policy_mark_for_deletion(session, policy);
788 done:
789 *retval = error;
790 return (error);
791 }
792
793 static int
794 necp_session_apply_all(struct necp_session *session, struct necp_session_action_args *uap, int *retval)
795 {
796 #pragma unused(uap)
797 necp_policy_apply_all(session);
798 *retval = 0;
799 return (0);
800 }
801
802 static int
803 necp_session_list_all(struct necp_session *session, struct necp_session_action_args *uap, int *retval)
804 {
805 u_int32_t tlv_size = (sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(necp_policy_id));
806 u_int32_t response_size = 0;
807 u_int8_t *response = NULL;
808 int num_policies = 0;
809 int cur_policy_index = 0;
810 int error = 0;
811 struct necp_session_policy *policy;
812
813 LIST_FOREACH(policy, &session->policies, chain) {
814 if (!policy->pending_deletion) {
815 num_policies++;
816 }
817 }
818
819 if (num_policies > NECP_MAX_POLICY_LIST_COUNT) {
820 NECPLOG(LOG_ERR, "necp_session_list_all size too large to copy (%u policies)", num_policies);
821 error = EINVAL;
822 goto done;
823 }
824
825 response_size = num_policies * tlv_size;
826 if (uap->out_buffer_length < response_size || uap->out_buffer == 0) {
827 NECPLOG(LOG_ERR, "necp_session_list_all buffer not large enough (%u < %u)", uap->out_buffer_length, response_size);
828 error = EINVAL;
829 goto done;
830 }
831
832 // Create a response with one Policy ID TLV for each policy
833 MALLOC(response, u_int8_t *, response_size, M_NECP, M_WAITOK | M_ZERO);
834 if (response == NULL) {
835 error = ENOMEM;
836 goto done;
837 }
838
839 u_int8_t *cursor = response;
840 LIST_FOREACH(policy, &session->policies, chain) {
841 if (!policy->pending_deletion && cur_policy_index < num_policies) {
842 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_ID, sizeof(u_int32_t), &policy->id, response, response_size);
843 cur_policy_index++;
844 }
845 }
846
847 error = copyout(response, uap->out_buffer, response_size);
848 if (error != 0) {
849 NECPLOG(LOG_ERR, "necp_session_list_all TLV copyout error (%d)", error);
850 goto done;
851 }
852
853 done:
854 if (response != NULL) {
855 FREE(response, M_NECP);
856 response = NULL;
857 }
858 *retval = error;
859
860 return (error);
861 }
862
863
864 static int
865 necp_session_delete_all(struct necp_session *session, struct necp_session_action_args *uap, int *retval)
866 {
867 #pragma unused(uap)
868 necp_policy_mark_all_for_deletion(session);
869 *retval = 0;
870 return (0);
871 }
872
873 static int
874 necp_session_set_session_priority(struct necp_session *session, struct necp_session_action_args *uap, int *retval)
875 {
876 int error = 0;
877 struct necp_session_policy *policy = NULL;
878 struct necp_session_policy *temp_policy = NULL;
879
880 if (uap->in_buffer_length < sizeof(necp_session_priority) || uap->in_buffer == 0) {
881 NECPLOG(LOG_ERR, "necp_session_set_session_priority invalid input (%zu)", uap->in_buffer_length);
882 error = EINVAL;
883 goto done;
884 }
885
886 necp_session_priority requested_session_priority = 0;
887 error = copyin(uap->in_buffer, &requested_session_priority, sizeof(requested_session_priority));
888 if (error != 0) {
889 NECPLOG(LOG_ERR, "necp_session_set_session_priority priority copyin error (%d)", error);
890 goto done;
891 }
892
893 // Enforce special session priorities with entitlements
894 if (requested_session_priority == NECP_SESSION_PRIORITY_CONTROL ||
895 requested_session_priority == NECP_SESSION_PRIORITY_PRIVILEGED_TUNNEL) {
896 errno_t cred_result = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES, 0);
897 if (cred_result != 0) {
898 NECPLOG(LOG_ERR, "Session does not hold necessary entitlement to claim priority level %d", requested_session_priority);
899 error = EPERM;
900 goto done;
901 }
902 }
903
904 if (session->session_priority != requested_session_priority) {
905 session->session_priority = requested_session_priority;
906 session->session_order = necp_allocate_new_session_order(session->session_priority, session->control_unit);
907 session->dirty = TRUE;
908
909 // Mark all policies as needing updates
910 LIST_FOREACH_SAFE(policy, &session->policies, chain, temp_policy) {
911 policy->pending_update = TRUE;
912 }
913 }
914
915 done:
916 *retval = error;
917 return (error);
918 }
919
920 static int
921 necp_session_lock_to_process(struct necp_session *session, struct necp_session_action_args *uap, int *retval)
922 {
923 #pragma unused(uap)
924 session->proc_locked = TRUE;
925 *retval = 0;
926 return (0);
927 }
928
929 static int
930 necp_session_register_service(struct necp_session *session, struct necp_session_action_args *uap, int *retval)
931 {
932 int error = 0;
933 struct necp_service_registration *new_service = NULL;
934
935 if (uap->in_buffer_length < sizeof(uuid_t) || uap->in_buffer == 0) {
936 NECPLOG(LOG_ERR, "necp_session_register_service invalid input (%zu)", uap->in_buffer_length);
937 error = EINVAL;
938 goto done;
939 }
940
941 uuid_t service_uuid;
942 error = copyin(uap->in_buffer, service_uuid, sizeof(service_uuid));
943 if (error != 0) {
944 NECPLOG(LOG_ERR, "necp_session_register_service uuid copyin error (%d)", error);
945 goto done;
946 }
947
948 MALLOC(new_service, struct necp_service_registration *, sizeof(*new_service), M_NECP, M_WAITOK | M_ZERO);
949 if (new_service == NULL) {
950 NECPLOG0(LOG_ERR, "Failed to allocate service registration");
951 error = ENOMEM;
952 goto done;
953 }
954
955 lck_rw_lock_exclusive(&necp_kernel_policy_lock);
956 new_service->service_id = necp_create_uuid_service_id_mapping(service_uuid);
957 LIST_INSERT_HEAD(&session->services, new_service, session_chain);
958 LIST_INSERT_HEAD(&necp_registered_service_list, new_service, kernel_chain);
959 lck_rw_done(&necp_kernel_policy_lock);
960
961 done:
962 *retval = error;
963 return (error);
964 }
965
966 static int
967 necp_session_unregister_service(struct necp_session *session, struct necp_session_action_args *uap, int *retval)
968 {
969 int error = 0;
970 struct necp_service_registration *service = NULL;
971 struct necp_service_registration *temp_service = NULL;
972 struct necp_uuid_id_mapping *mapping = NULL;
973
974 if (uap->in_buffer_length < sizeof(uuid_t) || uap->in_buffer == 0) {
975 NECPLOG(LOG_ERR, "necp_session_unregister_service invalid input (%zu)", uap->in_buffer_length);
976 error = EINVAL;
977 goto done;
978 }
979
980 uuid_t service_uuid;
981 error = copyin(uap->in_buffer, service_uuid, sizeof(service_uuid));
982 if (error != 0) {
983 NECPLOG(LOG_ERR, "necp_session_unregister_service uuid copyin error (%d)", error);
984 goto done;
985 }
986
987 // Remove all matching services for this session
988 lck_rw_lock_exclusive(&necp_kernel_policy_lock);
989 mapping = necp_uuid_lookup_service_id_locked(service_uuid);
990 if (mapping != NULL) {
991 LIST_FOREACH_SAFE(service, &session->services, session_chain, temp_service) {
992 if (service->service_id == mapping->id) {
993 LIST_REMOVE(service, session_chain);
994 LIST_REMOVE(service, kernel_chain);
995 FREE(service, M_NECP);
996 }
997 }
998 necp_remove_uuid_service_id_mapping(service_uuid);
999 }
1000 lck_rw_done(&necp_kernel_policy_lock);
1001
1002 done:
1003 *retval = error;
1004 return (error);
1005 }
1006
1007 static int
1008 necp_session_dump_all(struct necp_session *session, struct necp_session_action_args *uap, int *retval)
1009 {
1010 int error = 0;
1011
1012 if (uap->out_buffer_length == 0 || uap->out_buffer == 0) {
1013 NECPLOG(LOG_ERR, "necp_session_dump_all invalid output buffer (%zu)", uap->out_buffer_length);
1014 error = EINVAL;
1015 goto done;
1016 }
1017
1018 error = necp_handle_policy_dump_all(session, 0, NULL, uap->out_buffer, uap->out_buffer_length, 0);
1019 done:
1020 *retval = error;
1021 return (error);
1022 }
1023
1024 int
1025 necp_session_action(struct proc *p, struct necp_session_action_args *uap, int *retval)
1026 {
1027 #pragma unused(p)
1028 int error = 0;
1029 int return_value = 0;
1030 struct necp_session *session = NULL;
1031 error = necp_session_find_from_fd(uap->necp_fd, &session);
1032 if (error != 0) {
1033 NECPLOG(LOG_ERR, "necp_session_action find fd error (%d)", error);
1034 return (error);
1035 }
1036
1037 NECP_SESSION_LOCK(session);
1038
1039 if (session->proc_locked) {
1040 // Verify that the calling process is allowed to do actions
1041 uuid_t proc_uuid;
1042 proc_getexecutableuuid(current_proc(), proc_uuid, sizeof(proc_uuid));
1043 if (uuid_compare(proc_uuid, session->proc_uuid) != 0) {
1044 error = EPERM;
1045 goto done;
1046 }
1047 } else {
1048 // If not locked, update the proc_uuid and proc_pid of the session
1049 proc_getexecutableuuid(current_proc(), session->proc_uuid, sizeof(session->proc_uuid));
1050 session->proc_pid = proc_pid(current_proc());
1051 }
1052
1053 u_int32_t action = uap->action;
1054 switch (action) {
1055 case NECP_SESSION_ACTION_POLICY_ADD: {
1056 return_value = necp_session_add_policy(session, uap, retval);
1057 break;
1058 }
1059 case NECP_SESSION_ACTION_POLICY_GET: {
1060 return_value = necp_session_get_policy(session, uap, retval);
1061 break;
1062 }
1063 case NECP_SESSION_ACTION_POLICY_DELETE: {
1064 return_value = necp_session_delete_policy(session, uap, retval);
1065 break;
1066 }
1067 case NECP_SESSION_ACTION_POLICY_APPLY_ALL: {
1068 return_value = necp_session_apply_all(session, uap, retval);
1069 break;
1070 }
1071 case NECP_SESSION_ACTION_POLICY_LIST_ALL: {
1072 return_value = necp_session_list_all(session, uap, retval);
1073 break;
1074 }
1075 case NECP_SESSION_ACTION_POLICY_DELETE_ALL: {
1076 return_value = necp_session_delete_all(session, uap, retval);
1077 break;
1078 }
1079 case NECP_SESSION_ACTION_SET_SESSION_PRIORITY: {
1080 return_value = necp_session_set_session_priority(session, uap, retval);
1081 break;
1082 }
1083 case NECP_SESSION_ACTION_LOCK_SESSION_TO_PROC: {
1084 return_value = necp_session_lock_to_process(session, uap, retval);
1085 break;
1086 }
1087 case NECP_SESSION_ACTION_REGISTER_SERVICE: {
1088 return_value = necp_session_register_service(session, uap, retval);
1089 break;
1090 }
1091 case NECP_SESSION_ACTION_UNREGISTER_SERVICE: {
1092 return_value = necp_session_unregister_service(session, uap, retval);
1093 break;
1094 }
1095 case NECP_SESSION_ACTION_POLICY_DUMP_ALL: {
1096 return_value = necp_session_dump_all(session, uap, retval);
1097 break;
1098 }
1099 default: {
1100 NECPLOG(LOG_ERR, "necp_session_action unknown action (%u)", action);
1101 return_value = EINVAL;
1102 break;
1103 }
1104 }
1105
1106 done:
1107 NECP_SESSION_UNLOCK(session);
1108 file_drop(uap->necp_fd);
1109
1110 return (return_value);
1111 }
1112
1113 // Kernel Control functions
1114 static errno_t necp_register_control(void);
1115 static errno_t necp_ctl_connect(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, void **unitinfo);
1116 static errno_t necp_ctl_disconnect(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo);
1117 static errno_t necp_ctl_send(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, mbuf_t m, int flags);
1118 static void necp_ctl_rcvd(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, int flags);
1119 static errno_t necp_ctl_getopt(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, int opt, void *data, size_t *len);
1120 static errno_t necp_ctl_setopt(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, int opt, void *data, size_t len);
1121
1122 static bool necp_send_ctl_data(struct necp_session *session, u_int8_t *buffer, size_t buffer_size);
1123
1124 errno_t
1125 necp_init(void)
1126 {
1127 errno_t result = 0;
1128
1129 result = necp_register_control();
1130 if (result != 0) {
1131 goto done;
1132 }
1133
1134 necp_kernel_policy_grp_attr = lck_grp_attr_alloc_init();
1135 if (necp_kernel_policy_grp_attr == NULL) {
1136 NECPLOG0(LOG_ERR, "lck_grp_attr_alloc_init failed");
1137 result = ENOMEM;
1138 goto done;
1139 }
1140
1141 necp_kernel_policy_mtx_grp = lck_grp_alloc_init(NECP_CONTROL_NAME, necp_kernel_policy_grp_attr);
1142 if (necp_kernel_policy_mtx_grp == NULL) {
1143 NECPLOG0(LOG_ERR, "lck_grp_alloc_init failed");
1144 result = ENOMEM;
1145 goto done;
1146 }
1147
1148 necp_kernel_policy_mtx_attr = lck_attr_alloc_init();
1149 if (necp_kernel_policy_mtx_attr == NULL) {
1150 NECPLOG0(LOG_ERR, "lck_attr_alloc_init failed");
1151 result = ENOMEM;
1152 goto done;
1153 }
1154
1155 lck_rw_init(&necp_kernel_policy_lock, necp_kernel_policy_mtx_grp, necp_kernel_policy_mtx_attr);
1156
1157 necp_route_rule_grp_attr = lck_grp_attr_alloc_init();
1158 if (necp_route_rule_grp_attr == NULL) {
1159 NECPLOG0(LOG_ERR, "lck_grp_attr_alloc_init failed");
1160 result = ENOMEM;
1161 goto done;
1162 }
1163
1164 necp_route_rule_mtx_grp = lck_grp_alloc_init("necp_route_rule", necp_route_rule_grp_attr);
1165 if (necp_route_rule_mtx_grp == NULL) {
1166 NECPLOG0(LOG_ERR, "lck_grp_alloc_init failed");
1167 result = ENOMEM;
1168 goto done;
1169 }
1170
1171 necp_route_rule_mtx_attr = lck_attr_alloc_init();
1172 if (necp_route_rule_mtx_attr == NULL) {
1173 NECPLOG0(LOG_ERR, "lck_attr_alloc_init failed");
1174 result = ENOMEM;
1175 goto done;
1176 }
1177
1178 lck_rw_init(&necp_route_rule_lock, necp_route_rule_mtx_grp, necp_route_rule_mtx_attr);
1179
1180 necp_client_init();
1181
1182 TAILQ_INIT(&necp_session_list);
1183
1184 LIST_INIT(&necp_kernel_socket_policies);
1185 LIST_INIT(&necp_kernel_ip_output_policies);
1186
1187 LIST_INIT(&necp_account_id_list);
1188
1189 LIST_INIT(&necp_uuid_service_id_list);
1190
1191 LIST_INIT(&necp_registered_service_list);
1192
1193 LIST_INIT(&necp_route_rules);
1194 LIST_INIT(&necp_aggregate_route_rules);
1195
1196 necp_uuid_app_id_hashtbl = hashinit(NECP_UUID_APP_ID_HASH_SIZE, M_NECP, &necp_uuid_app_id_hash_mask);
1197 necp_uuid_app_id_hash_num_buckets = necp_uuid_app_id_hash_mask + 1;
1198 necp_num_uuid_app_id_mappings = 0;
1199 necp_uuid_app_id_mappings_dirty = FALSE;
1200
1201 necp_kernel_application_policies_condition_mask = 0;
1202 necp_kernel_socket_policies_condition_mask = 0;
1203 necp_kernel_ip_output_policies_condition_mask = 0;
1204
1205 necp_kernel_application_policies_count = 0;
1206 necp_kernel_socket_policies_count = 0;
1207 necp_kernel_socket_policies_non_app_count = 0;
1208 necp_kernel_ip_output_policies_count = 0;
1209 necp_kernel_ip_output_policies_non_id_count = 0;
1210
1211 necp_last_policy_id = 0;
1212 necp_last_kernel_policy_id = 0;
1213 necp_last_uuid_id = 0;
1214 necp_last_string_id = 0;
1215 necp_last_route_rule_id = 0;
1216 necp_last_aggregate_route_rule_id = 0;
1217
1218 necp_kernel_socket_policies_gencount = 1;
1219
1220 memset(&necp_kernel_socket_policies_map, 0, sizeof(necp_kernel_socket_policies_map));
1221 memset(&necp_kernel_ip_output_policies_map, 0, sizeof(necp_kernel_ip_output_policies_map));
1222 necp_kernel_socket_policies_app_layer_map = NULL;
1223
1224 done:
1225 if (result != 0) {
1226 if (necp_kernel_policy_mtx_attr != NULL) {
1227 lck_attr_free(necp_kernel_policy_mtx_attr);
1228 necp_kernel_policy_mtx_attr = NULL;
1229 }
1230 if (necp_kernel_policy_mtx_grp != NULL) {
1231 lck_grp_free(necp_kernel_policy_mtx_grp);
1232 necp_kernel_policy_mtx_grp = NULL;
1233 }
1234 if (necp_kernel_policy_grp_attr != NULL) {
1235 lck_grp_attr_free(necp_kernel_policy_grp_attr);
1236 necp_kernel_policy_grp_attr = NULL;
1237 }
1238 if (necp_route_rule_mtx_attr != NULL) {
1239 lck_attr_free(necp_route_rule_mtx_attr);
1240 necp_route_rule_mtx_attr = NULL;
1241 }
1242 if (necp_route_rule_mtx_grp != NULL) {
1243 lck_grp_free(necp_route_rule_mtx_grp);
1244 necp_route_rule_mtx_grp = NULL;
1245 }
1246 if (necp_route_rule_grp_attr != NULL) {
1247 lck_grp_attr_free(necp_route_rule_grp_attr);
1248 necp_route_rule_grp_attr = NULL;
1249 }
1250 if (necp_kctlref != NULL) {
1251 ctl_deregister(necp_kctlref);
1252 necp_kctlref = NULL;
1253 }
1254 }
1255 return (result);
1256 }
1257
1258 static errno_t
1259 necp_register_control(void)
1260 {
1261 struct kern_ctl_reg kern_ctl;
1262 errno_t result = 0;
1263
1264 // Create a tag to allocate memory
1265 necp_malloc_tag = OSMalloc_Tagalloc(NECP_CONTROL_NAME, OSMT_DEFAULT);
1266
1267 // Find a unique value for our interface family
1268 result = mbuf_tag_id_find(NECP_CONTROL_NAME, &necp_family);
1269 if (result != 0) {
1270 NECPLOG(LOG_ERR, "mbuf_tag_id_find_internal failed: %d", result);
1271 return (result);
1272 }
1273
1274 bzero(&kern_ctl, sizeof(kern_ctl));
1275 strlcpy(kern_ctl.ctl_name, NECP_CONTROL_NAME, sizeof(kern_ctl.ctl_name));
1276 kern_ctl.ctl_name[sizeof(kern_ctl.ctl_name) - 1] = 0;
1277 kern_ctl.ctl_flags = CTL_FLAG_PRIVILEGED; // Require root
1278 kern_ctl.ctl_sendsize = 64 * 1024;
1279 kern_ctl.ctl_recvsize = 64 * 1024;
1280 kern_ctl.ctl_connect = necp_ctl_connect;
1281 kern_ctl.ctl_disconnect = necp_ctl_disconnect;
1282 kern_ctl.ctl_send = necp_ctl_send;
1283 kern_ctl.ctl_rcvd = necp_ctl_rcvd;
1284 kern_ctl.ctl_setopt = necp_ctl_setopt;
1285 kern_ctl.ctl_getopt = necp_ctl_getopt;
1286
1287 result = ctl_register(&kern_ctl, &necp_kctlref);
1288 if (result != 0) {
1289 NECPLOG(LOG_ERR, "ctl_register failed: %d", result);
1290 return (result);
1291 }
1292
1293 return (0);
1294 }
1295
1296 static void
1297 necp_post_change_event(struct kev_necp_policies_changed_data *necp_event_data)
1298 {
1299 struct kev_msg ev_msg;
1300 memset(&ev_msg, 0, sizeof(ev_msg));
1301
1302 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1303 ev_msg.kev_class = KEV_NETWORK_CLASS;
1304 ev_msg.kev_subclass = KEV_NECP_SUBCLASS;
1305 ev_msg.event_code = KEV_NECP_POLICIES_CHANGED;
1306
1307 ev_msg.dv[0].data_ptr = necp_event_data;
1308 ev_msg.dv[0].data_length = sizeof(necp_event_data->changed_count);
1309 ev_msg.dv[1].data_length = 0;
1310
1311 kev_post_msg(&ev_msg);
1312 }
1313
1314 static errno_t
1315 necp_ctl_connect(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, void **unitinfo)
1316 {
1317 #pragma unused(kctlref, sac)
1318 *unitinfo = necp_create_session();
1319 if (*unitinfo == NULL) {
1320 // Could not allocate session
1321 return (ENOBUFS);
1322 }
1323
1324 return (0);
1325 }
1326
1327 static errno_t
1328 necp_ctl_disconnect(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo)
1329 {
1330 #pragma unused(kctlref, unit)
1331 struct necp_session *session = (struct necp_session *)unitinfo;
1332 if (session != NULL) {
1333 necp_policy_mark_all_for_deletion(session);
1334 necp_policy_apply_all(session);
1335 necp_delete_session((struct necp_session *)unitinfo);
1336 }
1337
1338 return (0);
1339 }
1340
1341
1342 // Message handling
1343 static int
1344 necp_packet_find_tlv(mbuf_t packet, int offset, u_int8_t type, int *err, int next)
1345 {
1346 size_t cursor = offset;
1347 int error = 0;
1348 u_int32_t curr_length;
1349 u_int8_t curr_type;
1350
1351 *err = 0;
1352
1353 do {
1354 if (!next) {
1355 error = mbuf_copydata(packet, cursor, sizeof(curr_type), &curr_type);
1356 if (error) {
1357 *err = ENOENT;
1358 return (-1);
1359 }
1360 } else {
1361 next = 0;
1362 curr_type = NECP_TLV_NIL;
1363 }
1364
1365 if (curr_type != type) {
1366 cursor += sizeof(curr_type);
1367 error = mbuf_copydata(packet, cursor, sizeof(curr_length), &curr_length);
1368 if (error) {
1369 *err = error;
1370 return (-1);
1371 }
1372 cursor += (sizeof(curr_length) + curr_length);
1373 }
1374 } while (curr_type != type);
1375
1376 return (cursor);
1377 }
1378
1379 static int
1380 necp_packet_get_tlv_at_offset(mbuf_t packet, int tlv_offset, u_int32_t buff_len, void *buff, u_int32_t *value_size)
1381 {
1382 int error = 0;
1383 u_int32_t length;
1384
1385 if (tlv_offset < 0) {
1386 return (EINVAL);
1387 }
1388
1389 error = mbuf_copydata(packet, tlv_offset + sizeof(u_int8_t), sizeof(length), &length);
1390 if (error) {
1391 return (error);
1392 }
1393
1394 u_int32_t total_len = m_length2(packet, NULL);
1395 if (total_len < (tlv_offset + sizeof(u_int8_t) + sizeof(length) + length)) {
1396 NECPLOG(LOG_ERR, "Got a bad TLV, length (%u) + offset (%d) < total length (%u)",
1397 length, (tlv_offset + sizeof(u_int8_t) + sizeof(length)), total_len);
1398 return (EINVAL);
1399 }
1400
1401 if (value_size != NULL) {
1402 *value_size = length;
1403 }
1404
1405 if (buff != NULL && buff_len > 0) {
1406 u_int32_t to_copy = (length < buff_len) ? length : buff_len;
1407 error = mbuf_copydata(packet, tlv_offset + sizeof(u_int8_t) + sizeof(length), to_copy, buff);
1408 if (error) {
1409 return (error);
1410 }
1411 }
1412
1413 return (0);
1414 }
1415
1416 static u_int8_t *
1417 necp_buffer_write_packet_header(u_int8_t *buffer, u_int8_t packet_type, u_int8_t flags, u_int32_t message_id)
1418 {
1419 ((struct necp_packet_header *)(void *)buffer)->packet_type = packet_type;
1420 ((struct necp_packet_header *)(void *)buffer)->flags = flags;
1421 ((struct necp_packet_header *)(void *)buffer)->message_id = message_id;
1422 return (buffer + sizeof(struct necp_packet_header));
1423 }
1424
1425 static inline bool
1426 necp_buffer_write_tlv_validate(u_int8_t *cursor, u_int8_t type, u_int32_t length,
1427 u_int8_t *buffer, u_int32_t buffer_length)
1428 {
1429 if (cursor < buffer || (uintptr_t)(cursor - buffer) > buffer_length) {
1430 NECPLOG0(LOG_ERR, "Cannot write TLV in buffer (invalid cursor)");
1431 return (false);
1432 }
1433 u_int8_t *next_tlv = (u_int8_t *)(cursor + sizeof(type) + sizeof(length) + length);
1434 if (next_tlv <= buffer || // make sure the next TLV start doesn't overflow
1435 (uintptr_t)(next_tlv - buffer) > buffer_length) { // make sure the next TLV has enough room in buffer
1436 NECPLOG(LOG_ERR, "Cannot write TLV in buffer (TLV length %u, buffer length %u)",
1437 length, buffer_length);
1438 return (false);
1439 }
1440 return (true);
1441 }
1442
1443 u_int8_t *
1444 necp_buffer_write_tlv_if_different(u_int8_t *cursor, u_int8_t type,
1445 u_int32_t length, const void *value, bool *updated,
1446 u_int8_t *buffer, u_int32_t buffer_length)
1447 {
1448 if (!necp_buffer_write_tlv_validate(cursor, type, length, buffer, buffer_length)) {
1449 return (NULL);
1450 }
1451 u_int8_t *next_tlv = (u_int8_t *)(cursor + sizeof(type) + sizeof(length) + length);
1452 if (*updated || *(u_int8_t *)(cursor) != type) {
1453 *(u_int8_t *)(cursor) = type;
1454 *updated = TRUE;
1455 }
1456 if (*updated || *(u_int32_t *)(void *)(cursor + sizeof(type)) != length) {
1457 *(u_int32_t *)(void *)(cursor + sizeof(type)) = length;
1458 *updated = TRUE;
1459 }
1460 if (length > 0) {
1461 if (*updated || memcmp((u_int8_t *)(cursor + sizeof(type) + sizeof(length)), value, length) != 0) {
1462 memcpy((u_int8_t *)(cursor + sizeof(type) + sizeof(length)), value, length);
1463 *updated = TRUE;
1464 }
1465 }
1466 return (next_tlv);
1467 }
1468
1469 u_int8_t *
1470 necp_buffer_write_tlv(u_int8_t *cursor, u_int8_t type,
1471 u_int32_t length, const void *value,
1472 u_int8_t *buffer, u_int32_t buffer_length)
1473 {
1474 if (!necp_buffer_write_tlv_validate(cursor, type, length, buffer, buffer_length)) {
1475 return (NULL);
1476 }
1477 u_int8_t *next_tlv = (u_int8_t *)(cursor + sizeof(type) + sizeof(length) + length);
1478 *(u_int8_t *)(cursor) = type;
1479 *(u_int32_t *)(void *)(cursor + sizeof(type)) = length;
1480 if (length > 0) {
1481 memcpy((u_int8_t *)(cursor + sizeof(type) + sizeof(length)), value, length);
1482 }
1483
1484 return (next_tlv);
1485 }
1486
1487 u_int8_t
1488 necp_buffer_get_tlv_type(u_int8_t *buffer, int tlv_offset)
1489 {
1490 u_int8_t *type = NULL;
1491
1492 if (buffer == NULL) {
1493 return (0);
1494 }
1495
1496 type = (u_int8_t *)((u_int8_t *)buffer + tlv_offset);
1497 return (type ? *type : 0);
1498 }
1499
1500 u_int32_t
1501 necp_buffer_get_tlv_length(u_int8_t *buffer, int tlv_offset)
1502 {
1503 u_int32_t *length = NULL;
1504
1505 if (buffer == NULL) {
1506 return (0);
1507 }
1508
1509 length = (u_int32_t *)(void *)((u_int8_t *)buffer + tlv_offset + sizeof(u_int8_t));
1510 return (length ? *length : 0);
1511 }
1512
1513 u_int8_t *
1514 necp_buffer_get_tlv_value(u_int8_t *buffer, int tlv_offset, u_int32_t *value_size)
1515 {
1516 u_int8_t *value = NULL;
1517 u_int32_t length = necp_buffer_get_tlv_length(buffer, tlv_offset);
1518 if (length == 0) {
1519 return (value);
1520 }
1521
1522 if (value_size) {
1523 *value_size = length;
1524 }
1525
1526 value = (u_int8_t *)((u_int8_t *)buffer + tlv_offset + sizeof(u_int8_t) + sizeof(u_int32_t));
1527 return (value);
1528 }
1529
1530 int
1531 necp_buffer_find_tlv(u_int8_t *buffer, u_int32_t buffer_length, int offset, u_int8_t type, int next)
1532 {
1533 if (offset < 0) {
1534 return (-1);
1535 }
1536 int cursor = offset;
1537 int next_cursor;
1538 u_int32_t curr_length;
1539 u_int8_t curr_type;
1540
1541 while (TRUE) {
1542 if ((((u_int32_t)cursor) + sizeof(curr_type) + sizeof(curr_length)) > buffer_length) {
1543 return (-1);
1544 }
1545 if (!next) {
1546 curr_type = necp_buffer_get_tlv_type(buffer, cursor);
1547 } else {
1548 next = 0;
1549 curr_type = NECP_TLV_NIL;
1550 }
1551 curr_length = necp_buffer_get_tlv_length(buffer, cursor);
1552 if (curr_length > buffer_length - ((u_int32_t)cursor + sizeof(curr_type) + sizeof(curr_length))) {
1553 return (-1);
1554 }
1555
1556 next_cursor = (cursor + sizeof(curr_type) + sizeof(curr_length) + curr_length);
1557 if (curr_type == type) {
1558 // check if entire TLV fits inside buffer
1559 if (((u_int32_t)next_cursor) <= buffer_length) {
1560 return (cursor);
1561 } else {
1562 return (-1);
1563 }
1564 }
1565 cursor = next_cursor;
1566 }
1567 }
1568
1569 static int
1570 necp_find_tlv(mbuf_t packet, u_int8_t *buffer, u_int32_t buffer_length, int offset, u_int8_t type, int *err, int next)
1571 {
1572 int cursor = -1;
1573 if (packet != NULL) {
1574 cursor = necp_packet_find_tlv(packet, offset, type, err, next);
1575 } else if (buffer != NULL) {
1576 cursor = necp_buffer_find_tlv(buffer, buffer_length, offset, type, next);
1577 }
1578 return (cursor);
1579 }
1580
1581 static int
1582 necp_get_tlv_at_offset(mbuf_t packet, u_int8_t *buffer, u_int32_t buffer_length,
1583 int tlv_offset, u_int32_t out_buffer_length, void *out_buffer, u_int32_t *value_size)
1584 {
1585 if (packet != NULL) {
1586 // Handle mbuf parsing
1587 return necp_packet_get_tlv_at_offset(packet, tlv_offset, out_buffer_length, out_buffer, value_size);
1588 }
1589
1590 if (buffer == NULL) {
1591 NECPLOG0(LOG_ERR, "necp_get_tlv_at_offset buffer is NULL");
1592 return (EINVAL);
1593 }
1594
1595 // Handle buffer parsing
1596
1597 // Validate that buffer has enough room for any TLV
1598 if (tlv_offset + sizeof(u_int8_t) + sizeof(u_int32_t) > buffer_length) {
1599 NECPLOG(LOG_ERR, "necp_get_tlv_at_offset buffer_length is too small for TLV (%u < %u)",
1600 buffer_length, tlv_offset + sizeof(u_int8_t) + sizeof(u_int32_t));
1601 return (EINVAL);
1602 }
1603
1604 // Validate that buffer has enough room for this TLV
1605 u_int32_t tlv_length = necp_buffer_get_tlv_length(buffer, tlv_offset);
1606 if (tlv_length > buffer_length - (tlv_offset + sizeof(u_int8_t) + sizeof(u_int32_t))) {
1607 NECPLOG(LOG_ERR, "necp_get_tlv_at_offset buffer_length is too small for TLV of length %u (%u < %u)",
1608 tlv_length, buffer_length, tlv_offset + sizeof(u_int8_t) + sizeof(u_int32_t) + tlv_length);
1609 return (EINVAL);
1610 }
1611
1612 if (out_buffer != NULL && out_buffer_length > 0) {
1613 // Validate that out buffer is large enough for value
1614 if (out_buffer_length < tlv_length) {
1615 NECPLOG(LOG_ERR, "necp_get_tlv_at_offset out_buffer_length is too small for TLV value (%u < %u)",
1616 out_buffer_length, tlv_length);
1617 return (EINVAL);
1618 }
1619
1620 // Get value pointer
1621 u_int8_t *tlv_value = necp_buffer_get_tlv_value(buffer, tlv_offset, NULL);
1622 if (tlv_value == NULL) {
1623 NECPLOG0(LOG_ERR, "necp_get_tlv_at_offset tlv_value is NULL");
1624 return (ENOENT);
1625 }
1626
1627 // Copy value
1628 memcpy(out_buffer, tlv_value, tlv_length);
1629 }
1630
1631 // Copy out length
1632 if (value_size != NULL) {
1633 *value_size = tlv_length;
1634 }
1635
1636 return (0);
1637 }
1638
1639 static int
1640 necp_get_tlv(mbuf_t packet, u_int8_t *buffer, u_int32_t buffer_length,
1641 int offset, u_int8_t type, u_int32_t buff_len, void *buff, u_int32_t *value_size)
1642 {
1643 int error = 0;
1644
1645 int tlv_offset = necp_find_tlv(packet, buffer, buffer_length, offset, type, &error, 0);
1646 if (tlv_offset < 0) {
1647 return (error);
1648 }
1649
1650 return (necp_get_tlv_at_offset(packet, buffer, buffer_length, tlv_offset, buff_len, buff, value_size));
1651 }
1652
1653 static bool
1654 necp_send_ctl_data(struct necp_session *session, u_int8_t *buffer, size_t buffer_size)
1655 {
1656 int error;
1657
1658 if (necp_kctlref == NULL || session == NULL || buffer == NULL || buffer_size == 0) {
1659 return (FALSE);
1660 }
1661
1662 error = ctl_enqueuedata(necp_kctlref, session->control_unit, buffer, buffer_size, CTL_DATA_EOR);
1663
1664 return (error == 0);
1665 }
1666
1667 static bool
1668 necp_send_success_response(struct necp_session *session, u_int8_t packet_type, u_int32_t message_id)
1669 {
1670 bool success = TRUE;
1671 u_int8_t *response = NULL;
1672 u_int8_t *cursor = NULL;
1673 size_t response_size = sizeof(struct necp_packet_header) + sizeof(u_int8_t) + sizeof(u_int32_t);
1674 MALLOC(response, u_int8_t *, response_size, M_NECP, M_WAITOK);
1675 if (response == NULL) {
1676 return (FALSE);
1677 }
1678 cursor = response;
1679 cursor = necp_buffer_write_packet_header(cursor, packet_type, NECP_PACKET_FLAGS_RESPONSE, message_id);
1680 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_NIL, 0, NULL, response, response_size);
1681
1682 if (!(success = necp_send_ctl_data(session, (u_int8_t *)response, response_size))) {
1683 NECPLOG0(LOG_ERR, "Failed to send response");
1684 }
1685
1686 FREE(response, M_NECP);
1687 return (success);
1688 }
1689
1690 static bool
1691 necp_send_error_response(struct necp_session *session, u_int8_t packet_type, u_int32_t message_id, u_int32_t error)
1692 {
1693 bool success = TRUE;
1694 u_int8_t *response = NULL;
1695 u_int8_t *cursor = NULL;
1696 size_t response_size = sizeof(struct necp_packet_header) + sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(u_int32_t);
1697 MALLOC(response, u_int8_t *, response_size, M_NECP, M_WAITOK);
1698 if (response == NULL) {
1699 return (FALSE);
1700 }
1701 cursor = response;
1702 cursor = necp_buffer_write_packet_header(cursor, packet_type, NECP_PACKET_FLAGS_RESPONSE, message_id);
1703 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_ERROR, sizeof(error), &error, response, response_size);
1704
1705 if (!(success = necp_send_ctl_data(session, (u_int8_t *)response, response_size))) {
1706 NECPLOG0(LOG_ERR, "Failed to send response");
1707 }
1708
1709 FREE(response, M_NECP);
1710 return (success);
1711 }
1712
1713 static bool
1714 necp_send_policy_id_response(struct necp_session *session, u_int8_t packet_type, u_int32_t message_id, necp_policy_id policy_id)
1715 {
1716 bool success = TRUE;
1717 u_int8_t *response = NULL;
1718 u_int8_t *cursor = NULL;
1719 size_t response_size = sizeof(struct necp_packet_header) + sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(u_int32_t);
1720 MALLOC(response, u_int8_t *, response_size, M_NECP, M_WAITOK);
1721 if (response == NULL) {
1722 return (FALSE);
1723 }
1724 cursor = response;
1725 cursor = necp_buffer_write_packet_header(cursor, packet_type, NECP_PACKET_FLAGS_RESPONSE, message_id);
1726 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_ID, sizeof(policy_id), &policy_id, response, response_size);
1727
1728 if (!(success = necp_send_ctl_data(session, (u_int8_t *)response, response_size))) {
1729 NECPLOG0(LOG_ERR, "Failed to send response");
1730 }
1731
1732 FREE(response, M_NECP);
1733 return (success);
1734 }
1735
1736 static errno_t
1737 necp_ctl_send(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, mbuf_t packet, int flags)
1738 {
1739 #pragma unused(kctlref, unit, flags)
1740 struct necp_session *session = (struct necp_session *)unitinfo;
1741 struct necp_packet_header header;
1742 int error = 0;
1743
1744 if (session == NULL) {
1745 NECPLOG0(LOG_ERR, "Got a NULL session");
1746 error = EINVAL;
1747 goto done;
1748 }
1749
1750 if (mbuf_pkthdr_len(packet) < sizeof(header)) {
1751 NECPLOG(LOG_ERR, "Got a bad packet, length (%lu) < sizeof header (%lu)", mbuf_pkthdr_len(packet), sizeof(header));
1752 error = EINVAL;
1753 goto done;
1754 }
1755
1756 error = mbuf_copydata(packet, 0, sizeof(header), &header);
1757 if (error) {
1758 NECPLOG(LOG_ERR, "mbuf_copydata failed for the header: %d", error);
1759 error = ENOBUFS;
1760 goto done;
1761 }
1762
1763 if (session->proc_locked) {
1764 // Verify that the calling process is allowed to send messages
1765 uuid_t proc_uuid;
1766 proc_getexecutableuuid(current_proc(), proc_uuid, sizeof(proc_uuid));
1767 if (uuid_compare(proc_uuid, session->proc_uuid) != 0) {
1768 necp_send_error_response(session, header.packet_type, header.message_id, NECP_ERROR_INVALID_PROCESS);
1769 goto done;
1770 }
1771 } else {
1772 // If not locked, update the proc_uuid and proc_pid of the session
1773 proc_getexecutableuuid(current_proc(), session->proc_uuid, sizeof(session->proc_uuid));
1774 session->proc_pid = proc_pid(current_proc());
1775 }
1776
1777 switch (header.packet_type) {
1778 case NECP_PACKET_TYPE_POLICY_ADD: {
1779 necp_handle_policy_add(session, header.message_id, packet, NULL, 0, sizeof(header), NULL);
1780 break;
1781 }
1782 case NECP_PACKET_TYPE_POLICY_GET: {
1783 necp_handle_policy_get(session, header.message_id, packet, sizeof(header));
1784 break;
1785 }
1786 case NECP_PACKET_TYPE_POLICY_DELETE: {
1787 necp_handle_policy_delete(session, header.message_id, packet, sizeof(header));
1788 break;
1789 }
1790 case NECP_PACKET_TYPE_POLICY_APPLY_ALL: {
1791 necp_handle_policy_apply_all(session, header.message_id, packet, sizeof(header));
1792 break;
1793 }
1794 case NECP_PACKET_TYPE_POLICY_LIST_ALL: {
1795 necp_handle_policy_list_all(session, header.message_id, packet, sizeof(header));
1796 break;
1797 }
1798 case NECP_PACKET_TYPE_POLICY_DELETE_ALL: {
1799 necp_handle_policy_delete_all(session, header.message_id, packet, sizeof(header));
1800 break;
1801 }
1802 case NECP_PACKET_TYPE_POLICY_DUMP_ALL: {
1803 necp_handle_policy_dump_all(session, header.message_id, packet, 0, 0, sizeof(header));
1804 break;
1805 }
1806 case NECP_PACKET_TYPE_SET_SESSION_PRIORITY: {
1807 necp_handle_set_session_priority(session, header.message_id, packet, sizeof(header));
1808 break;
1809 }
1810 case NECP_PACKET_TYPE_LOCK_SESSION_TO_PROC: {
1811 necp_handle_lock_session_to_proc(session, header.message_id, packet, sizeof(header));
1812 break;
1813 }
1814 case NECP_PACKET_TYPE_REGISTER_SERVICE: {
1815 necp_handle_register_service(session, header.message_id, packet, sizeof(header));
1816 break;
1817 }
1818 case NECP_PACKET_TYPE_UNREGISTER_SERVICE: {
1819 necp_handle_unregister_service(session, header.message_id, packet, sizeof(header));
1820 break;
1821 }
1822 default: {
1823 NECPLOG(LOG_ERR, "Received unknown message type %d", header.packet_type);
1824 necp_send_error_response(session, header.packet_type, header.message_id, NECP_ERROR_UNKNOWN_PACKET_TYPE);
1825 break;
1826 }
1827 }
1828
1829 done:
1830 mbuf_freem(packet);
1831 return (error);
1832 }
1833
1834 static void
1835 necp_ctl_rcvd(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, int flags)
1836 {
1837 #pragma unused(kctlref, unit, unitinfo, flags)
1838 return;
1839 }
1840
1841 static errno_t
1842 necp_ctl_getopt(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, int opt, void *data, size_t *len)
1843 {
1844 #pragma unused(kctlref, unit, unitinfo, opt, data, len)
1845 return (0);
1846 }
1847
1848 static errno_t
1849 necp_ctl_setopt(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, int opt, void *data, size_t len)
1850 {
1851 #pragma unused(kctlref, unit, unitinfo, opt, data, len)
1852 return (0);
1853 }
1854
1855 // Session Management
1856
1857 static struct necp_session *
1858 necp_create_session(void)
1859 {
1860 struct necp_session *new_session = NULL;
1861
1862 MALLOC(new_session, struct necp_session *, sizeof(*new_session), M_NECP, M_WAITOK | M_ZERO);
1863 if (new_session == NULL) {
1864 goto done;
1865 }
1866
1867 new_session->necp_fd_type = necp_fd_type_session;
1868 new_session->session_priority = NECP_SESSION_PRIORITY_UNKNOWN;
1869 new_session->dirty = FALSE;
1870 LIST_INIT(&new_session->policies);
1871 lck_mtx_init(&new_session->lock, necp_kernel_policy_mtx_grp, necp_kernel_policy_mtx_attr);
1872
1873 // Take the lock
1874 lck_rw_lock_exclusive(&necp_kernel_policy_lock);
1875
1876 // Find the next available control unit
1877 u_int32_t control_unit = 1;
1878 struct necp_session *next_session = NULL;
1879 TAILQ_FOREACH(next_session, &necp_session_list, chain) {
1880 if (next_session->control_unit > control_unit) {
1881 // Found a gap, grab this control unit
1882 break;
1883 }
1884
1885 // Try the next control unit, loop around
1886 control_unit = next_session->control_unit + 1;
1887 }
1888
1889 new_session->control_unit = control_unit;
1890 new_session->session_order = necp_allocate_new_session_order(new_session->session_priority, control_unit);
1891
1892 if (next_session != NULL) {
1893 TAILQ_INSERT_BEFORE(next_session, new_session, chain);
1894 } else {
1895 TAILQ_INSERT_TAIL(&necp_session_list, new_session, chain);
1896 }
1897
1898 necp_session_count++;
1899 lck_rw_done(&necp_kernel_policy_lock);
1900
1901 if (necp_debug) {
1902 NECPLOG(LOG_DEBUG, "Created NECP session, control unit %d", control_unit);
1903 }
1904
1905 done:
1906 return (new_session);
1907 }
1908
1909 static void
1910 necp_delete_session(struct necp_session *session)
1911 {
1912 if (session != NULL) {
1913 struct necp_service_registration *service = NULL;
1914 struct necp_service_registration *temp_service = NULL;
1915 LIST_FOREACH_SAFE(service, &session->services, session_chain, temp_service) {
1916 LIST_REMOVE(service, session_chain);
1917 lck_rw_lock_exclusive(&necp_kernel_policy_lock);
1918 LIST_REMOVE(service, kernel_chain);
1919 lck_rw_done(&necp_kernel_policy_lock);
1920 FREE(service, M_NECP);
1921 }
1922 if (necp_debug) {
1923 NECPLOG0(LOG_DEBUG, "Deleted NECP session");
1924 }
1925
1926 lck_rw_lock_exclusive(&necp_kernel_policy_lock);
1927 TAILQ_REMOVE(&necp_session_list, session, chain);
1928 necp_session_count--;
1929 lck_rw_done(&necp_kernel_policy_lock);
1930
1931 lck_mtx_destroy(&session->lock, necp_kernel_policy_mtx_grp);
1932 FREE(session, M_NECP);
1933 }
1934 }
1935
1936 // Session Policy Management
1937
1938 static inline u_int8_t
1939 necp_policy_result_get_type_from_buffer(u_int8_t *buffer, u_int32_t length)
1940 {
1941 return ((buffer && length >= sizeof(u_int8_t)) ? buffer[0] : 0);
1942 }
1943
1944 static inline u_int32_t
1945 necp_policy_result_get_parameter_length_from_buffer(u_int8_t *buffer, u_int32_t length)
1946 {
1947 return ((buffer && length > sizeof(u_int8_t)) ? (length - sizeof(u_int8_t)) : 0);
1948 }
1949
1950 static inline u_int8_t *
1951 necp_policy_result_get_parameter_pointer_from_buffer(u_int8_t *buffer, u_int32_t length)
1952 {
1953 return ((buffer && length > sizeof(u_int8_t)) ? (buffer + sizeof(u_int8_t)) : NULL);
1954 }
1955
1956 static bool
1957 necp_policy_result_requires_route_rules(u_int8_t *buffer, u_int32_t length)
1958 {
1959 u_int8_t type = necp_policy_result_get_type_from_buffer(buffer, length);
1960 if (type == NECP_POLICY_RESULT_ROUTE_RULES) {
1961 return (TRUE);
1962 }
1963 return (FALSE);
1964 }
1965
1966 static inline bool
1967 necp_address_is_valid(struct sockaddr *address)
1968 {
1969 if (address->sa_family == AF_INET) {
1970 return (address->sa_len == sizeof(struct sockaddr_in));
1971 } else if (address->sa_family == AF_INET6) {
1972 return (address->sa_len == sizeof(struct sockaddr_in6));
1973 } else {
1974 return (FALSE);
1975 }
1976 }
1977
1978 static bool
1979 necp_policy_result_is_valid(u_int8_t *buffer, u_int32_t length)
1980 {
1981 bool validated = FALSE;
1982 u_int8_t type = necp_policy_result_get_type_from_buffer(buffer, length);
1983 u_int32_t parameter_length = necp_policy_result_get_parameter_length_from_buffer(buffer, length);
1984 switch (type) {
1985 case NECP_POLICY_RESULT_PASS: {
1986 validated = TRUE;
1987 break;
1988 }
1989 case NECP_POLICY_RESULT_SKIP: {
1990 if (parameter_length >= sizeof(u_int32_t)) {
1991 validated = TRUE;
1992 }
1993 break;
1994 }
1995 case NECP_POLICY_RESULT_DROP: {
1996 validated = TRUE;
1997 break;
1998 }
1999 case NECP_POLICY_RESULT_SOCKET_DIVERT: {
2000 if (parameter_length >= sizeof(u_int32_t)) {
2001 validated = TRUE;
2002 }
2003 break;
2004 }
2005 case NECP_POLICY_RESULT_SOCKET_SCOPED: {
2006 if (parameter_length > 0) {
2007 validated = TRUE;
2008 }
2009 break;
2010 }
2011 case NECP_POLICY_RESULT_IP_TUNNEL: {
2012 if (parameter_length > sizeof(u_int32_t)) {
2013 validated = TRUE;
2014 }
2015 break;
2016 }
2017 case NECP_POLICY_RESULT_SOCKET_FILTER: {
2018 if (parameter_length >= sizeof(u_int32_t)) {
2019 validated = TRUE;
2020 }
2021 break;
2022 }
2023 case NECP_POLICY_RESULT_ROUTE_RULES: {
2024 validated = TRUE;
2025 break;
2026 }
2027 case NECP_POLICY_RESULT_TRIGGER:
2028 case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED:
2029 case NECP_POLICY_RESULT_TRIGGER_SCOPED:
2030 case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED:
2031 case NECP_POLICY_RESULT_USE_NETAGENT: {
2032 if (parameter_length >= sizeof(uuid_t)) {
2033 validated = TRUE;
2034 }
2035 break;
2036 }
2037 default: {
2038 validated = FALSE;
2039 break;
2040 }
2041 }
2042
2043 if (necp_debug) {
2044 NECPLOG(LOG_DEBUG, "Policy result type %d, valid %d", type, validated);
2045 }
2046
2047 return (validated);
2048 }
2049
2050 static inline u_int8_t
2051 necp_policy_condition_get_type_from_buffer(u_int8_t *buffer, u_int32_t length)
2052 {
2053 return ((buffer && length >= sizeof(u_int8_t)) ? buffer[0] : 0);
2054 }
2055
2056 static inline u_int8_t
2057 necp_policy_condition_get_flags_from_buffer(u_int8_t *buffer, u_int32_t length)
2058 {
2059 return ((buffer && length >= (2 * sizeof(u_int8_t))) ? buffer[1] : 0);
2060 }
2061
2062 static inline u_int32_t
2063 necp_policy_condition_get_value_length_from_buffer(u_int8_t *buffer, u_int32_t length)
2064 {
2065 return ((buffer && length >= (2 * sizeof(u_int8_t))) ? (length - (2 * sizeof(u_int8_t))) : 0);
2066 }
2067
2068 static inline u_int8_t *
2069 necp_policy_condition_get_value_pointer_from_buffer(u_int8_t *buffer, u_int32_t length)
2070 {
2071 return ((buffer && length > (2 * sizeof(u_int8_t))) ? (buffer + (2 * sizeof(u_int8_t))) : NULL);
2072 }
2073
2074 static inline bool
2075 necp_policy_condition_is_default(u_int8_t *buffer, u_int32_t length)
2076 {
2077 return (necp_policy_condition_get_type_from_buffer(buffer, length) == NECP_POLICY_CONDITION_DEFAULT);
2078 }
2079
2080 static inline bool
2081 necp_policy_condition_is_application(u_int8_t *buffer, u_int32_t length)
2082 {
2083 return (necp_policy_condition_get_type_from_buffer(buffer, length) == NECP_POLICY_CONDITION_APPLICATION);
2084 }
2085
2086 static inline bool
2087 necp_policy_condition_is_real_application(u_int8_t *buffer, u_int32_t length)
2088 {
2089 return (necp_policy_condition_get_type_from_buffer(buffer, length) == NECP_POLICY_CONDITION_REAL_APPLICATION);
2090 }
2091
2092 static inline bool
2093 necp_policy_condition_requires_application(u_int8_t *buffer, u_int32_t length)
2094 {
2095 u_int8_t type = necp_policy_condition_get_type_from_buffer(buffer, length);
2096 return (type == NECP_POLICY_CONDITION_REAL_APPLICATION);
2097 }
2098
2099 static inline bool
2100 necp_policy_condition_requires_real_application(u_int8_t *buffer, u_int32_t length)
2101 {
2102 u_int8_t type = necp_policy_condition_get_type_from_buffer(buffer, length);
2103 return (type == NECP_POLICY_CONDITION_ENTITLEMENT);
2104 }
2105
2106 static bool
2107 necp_policy_condition_is_valid(u_int8_t *buffer, u_int32_t length, u_int8_t policy_result_type)
2108 {
2109 bool validated = FALSE;
2110 bool result_cannot_have_ip_layer = (policy_result_type == NECP_POLICY_RESULT_SOCKET_DIVERT ||
2111 policy_result_type == NECP_POLICY_RESULT_SOCKET_FILTER ||
2112 policy_result_type == NECP_POLICY_RESULT_TRIGGER ||
2113 policy_result_type == NECP_POLICY_RESULT_TRIGGER_IF_NEEDED ||
2114 policy_result_type == NECP_POLICY_RESULT_TRIGGER_SCOPED ||
2115 policy_result_type == NECP_POLICY_RESULT_NO_TRIGGER_SCOPED ||
2116 policy_result_type == NECP_POLICY_RESULT_SOCKET_SCOPED ||
2117 policy_result_type == NECP_POLICY_RESULT_ROUTE_RULES ||
2118 policy_result_type == NECP_POLICY_RESULT_USE_NETAGENT) ? TRUE : FALSE;
2119 u_int32_t condition_length = necp_policy_condition_get_value_length_from_buffer(buffer, length);
2120 u_int8_t *condition_value = necp_policy_condition_get_value_pointer_from_buffer(buffer, length);
2121 u_int8_t type = necp_policy_condition_get_type_from_buffer(buffer, length);
2122 u_int8_t flags = necp_policy_condition_get_flags_from_buffer(buffer, length);
2123 switch (type) {
2124 case NECP_POLICY_CONDITION_APPLICATION:
2125 case NECP_POLICY_CONDITION_REAL_APPLICATION: {
2126 if (!(flags & NECP_POLICY_CONDITION_FLAGS_NEGATIVE) &&
2127 condition_length >= sizeof(uuid_t) &&
2128 condition_value != NULL &&
2129 !uuid_is_null(condition_value)) {
2130 validated = TRUE;
2131 }
2132 break;
2133 }
2134 case NECP_POLICY_CONDITION_DOMAIN:
2135 case NECP_POLICY_CONDITION_ACCOUNT:
2136 case NECP_POLICY_CONDITION_BOUND_INTERFACE: {
2137 if (condition_length > 0) {
2138 validated = TRUE;
2139 }
2140 break;
2141 }
2142 case NECP_POLICY_CONDITION_TRAFFIC_CLASS: {
2143 if (condition_length >= sizeof(struct necp_policy_condition_tc_range)) {
2144 validated = TRUE;
2145 }
2146 break;
2147 }
2148 case NECP_POLICY_CONDITION_DEFAULT:
2149 case NECP_POLICY_CONDITION_ALL_INTERFACES:
2150 case NECP_POLICY_CONDITION_ENTITLEMENT: {
2151 if (!(flags & NECP_POLICY_CONDITION_FLAGS_NEGATIVE)) {
2152 validated = TRUE;
2153 }
2154 break;
2155 }
2156 case NECP_POLICY_CONDITION_IP_PROTOCOL: {
2157 if (condition_length >= sizeof(u_int16_t)) {
2158 validated = TRUE;
2159 }
2160 break;
2161 }
2162 case NECP_POLICY_CONDITION_PID: {
2163 if (condition_length >= sizeof(pid_t) &&
2164 condition_value != NULL &&
2165 *((pid_t *)(void *)condition_value) != 0) {
2166 validated = TRUE;
2167 }
2168 break;
2169 }
2170 case NECP_POLICY_CONDITION_UID: {
2171 if (condition_length >= sizeof(uid_t)) {
2172 validated = TRUE;
2173 }
2174 break;
2175 }
2176 case NECP_POLICY_CONDITION_LOCAL_ADDR:
2177 case NECP_POLICY_CONDITION_REMOTE_ADDR: {
2178 if (!result_cannot_have_ip_layer && condition_length >= sizeof(struct necp_policy_condition_addr) &&
2179 necp_address_is_valid(&((struct necp_policy_condition_addr *)(void *)condition_value)->address.sa)) {
2180 validated = TRUE;
2181 }
2182 break;
2183 }
2184 case NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE:
2185 case NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE: {
2186 if (!result_cannot_have_ip_layer && condition_length >= sizeof(struct necp_policy_condition_addr_range) &&
2187 necp_address_is_valid(&((struct necp_policy_condition_addr_range *)(void *)condition_value)->start_address.sa) &&
2188 necp_address_is_valid(&((struct necp_policy_condition_addr_range *)(void *)condition_value)->end_address.sa)) {
2189 validated = TRUE;
2190 }
2191 break;
2192 }
2193 default: {
2194 validated = FALSE;
2195 break;
2196 }
2197 }
2198
2199 if (necp_debug) {
2200 NECPLOG(LOG_DEBUG, "Policy condition type %d, valid %d", type, validated);
2201 }
2202
2203 return (validated);
2204 }
2205
2206 static bool
2207 necp_policy_route_rule_is_default(u_int8_t *buffer, u_int32_t length)
2208 {
2209 return (necp_policy_condition_get_value_length_from_buffer(buffer, length) == 0 &&
2210 necp_policy_condition_get_flags_from_buffer(buffer, length) == 0);
2211 }
2212
2213 static bool
2214 necp_policy_route_rule_is_valid(u_int8_t *buffer, u_int32_t length)
2215 {
2216 bool validated = FALSE;
2217 u_int8_t type = necp_policy_condition_get_type_from_buffer(buffer, length);
2218 switch (type) {
2219 case NECP_ROUTE_RULE_ALLOW_INTERFACE: {
2220 validated = TRUE;
2221 break;
2222 }
2223 case NECP_ROUTE_RULE_DENY_INTERFACE: {
2224 validated = TRUE;
2225 break;
2226 }
2227 case NECP_ROUTE_RULE_QOS_MARKING: {
2228 validated = TRUE;
2229 break;
2230 }
2231 default: {
2232 validated = FALSE;
2233 break;
2234 }
2235 }
2236
2237 if (necp_debug) {
2238 NECPLOG(LOG_DEBUG, "Policy route rule type %d, valid %d", type, validated);
2239 }
2240
2241 return (validated);
2242 }
2243
2244 static int
2245 necp_get_posix_error_for_necp_error(int response_error)
2246 {
2247 switch (response_error) {
2248 case NECP_ERROR_UNKNOWN_PACKET_TYPE:
2249 case NECP_ERROR_INVALID_TLV:
2250 case NECP_ERROR_POLICY_RESULT_INVALID:
2251 case NECP_ERROR_POLICY_CONDITIONS_INVALID:
2252 case NECP_ERROR_ROUTE_RULES_INVALID: {
2253 return (EINVAL);
2254 }
2255 case NECP_ERROR_POLICY_ID_NOT_FOUND: {
2256 return (ENOENT);
2257 }
2258 case NECP_ERROR_INVALID_PROCESS: {
2259 return (EPERM);
2260 }
2261 case NECP_ERROR_INTERNAL:
2262 default: {
2263 return (ENOMEM);
2264 }
2265 }
2266 }
2267
2268 static void
2269 necp_handle_set_session_priority(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
2270 {
2271 int error;
2272 struct necp_session_policy *policy = NULL;
2273 struct necp_session_policy *temp_policy = NULL;
2274 u_int32_t response_error = NECP_ERROR_INTERNAL;
2275 u_int32_t requested_session_priority = NECP_SESSION_PRIORITY_UNKNOWN;
2276
2277 // Read policy id
2278 error = necp_get_tlv(packet, NULL, 0, offset, NECP_TLV_SESSION_PRIORITY, sizeof(requested_session_priority), &requested_session_priority, NULL);
2279 if (error) {
2280 NECPLOG(LOG_ERR, "Failed to get session priority: %d", error);
2281 response_error = NECP_ERROR_INVALID_TLV;
2282 goto fail;
2283 }
2284
2285 if (session == NULL) {
2286 NECPLOG0(LOG_ERR, "Failed to find session");
2287 response_error = NECP_ERROR_INTERNAL;
2288 goto fail;
2289 }
2290
2291 // Enforce special session priorities with entitlements
2292 if (requested_session_priority == NECP_SESSION_PRIORITY_CONTROL ||
2293 requested_session_priority == NECP_SESSION_PRIORITY_PRIVILEGED_TUNNEL) {
2294 errno_t cred_result = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES, 0);
2295 if (cred_result != 0) {
2296 NECPLOG(LOG_ERR, "Session does not hold necessary entitlement to claim priority level %d", requested_session_priority);
2297 goto fail;
2298 }
2299 }
2300
2301 if (session->session_priority != requested_session_priority) {
2302 session->session_priority = requested_session_priority;
2303 session->session_order = necp_allocate_new_session_order(session->session_priority, session->control_unit);
2304 session->dirty = TRUE;
2305
2306 // Mark all policies as needing updates
2307 LIST_FOREACH_SAFE(policy, &session->policies, chain, temp_policy) {
2308 policy->pending_update = TRUE;
2309 }
2310 }
2311
2312 necp_send_success_response(session, NECP_PACKET_TYPE_SET_SESSION_PRIORITY, message_id);
2313 return;
2314
2315 fail:
2316 necp_send_error_response(session, NECP_PACKET_TYPE_SET_SESSION_PRIORITY, message_id, response_error);
2317 }
2318
2319 static void
2320 necp_handle_lock_session_to_proc(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
2321 {
2322 #pragma unused(packet, offset)
2323 // proc_uuid already filled out
2324 session->proc_locked = TRUE;
2325 necp_send_success_response(session, NECP_PACKET_TYPE_LOCK_SESSION_TO_PROC, message_id);
2326 }
2327
2328 static void
2329 necp_handle_register_service(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
2330 {
2331 int error;
2332 struct necp_service_registration *new_service = NULL;
2333 u_int32_t response_error = NECP_ERROR_INTERNAL;
2334 uuid_t service_uuid;
2335 uuid_clear(service_uuid);
2336
2337 if (session == NULL) {
2338 NECPLOG0(LOG_ERR, "Failed to find session");
2339 response_error = NECP_ERROR_INTERNAL;
2340 goto fail;
2341 }
2342
2343 // Enforce entitlements
2344 errno_t cred_result = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES, 0);
2345 if (cred_result != 0) {
2346 NECPLOG0(LOG_ERR, "Session does not hold necessary entitlement to register service");
2347 goto fail;
2348 }
2349
2350 // Read service uuid
2351 error = necp_get_tlv(packet, NULL, 0, offset, NECP_TLV_SERVICE_UUID, sizeof(uuid_t), service_uuid, NULL);
2352 if (error) {
2353 NECPLOG(LOG_ERR, "Failed to get service UUID: %d", error);
2354 response_error = NECP_ERROR_INVALID_TLV;
2355 goto fail;
2356 }
2357
2358 MALLOC(new_service, struct necp_service_registration *, sizeof(*new_service), M_NECP, M_WAITOK);
2359 if (new_service == NULL) {
2360 NECPLOG0(LOG_ERR, "Failed to allocate service registration");
2361 response_error = NECP_ERROR_INTERNAL;
2362 goto fail;
2363 }
2364
2365 lck_rw_lock_exclusive(&necp_kernel_policy_lock);
2366 memset(new_service, 0, sizeof(*new_service));
2367 new_service->service_id = necp_create_uuid_service_id_mapping(service_uuid);
2368 LIST_INSERT_HEAD(&session->services, new_service, session_chain);
2369 LIST_INSERT_HEAD(&necp_registered_service_list, new_service, kernel_chain);
2370 lck_rw_done(&necp_kernel_policy_lock);
2371
2372 necp_send_success_response(session, NECP_PACKET_TYPE_REGISTER_SERVICE, message_id);
2373 return;
2374 fail:
2375 necp_send_error_response(session, NECP_PACKET_TYPE_REGISTER_SERVICE, message_id, response_error);
2376 }
2377
2378 static void
2379 necp_handle_unregister_service(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
2380 {
2381 int error;
2382 struct necp_service_registration *service = NULL;
2383 struct necp_service_registration *temp_service = NULL;
2384 u_int32_t response_error = NECP_ERROR_INTERNAL;
2385 struct necp_uuid_id_mapping *mapping = NULL;
2386 uuid_t service_uuid;
2387 uuid_clear(service_uuid);
2388
2389 if (session == NULL) {
2390 NECPLOG0(LOG_ERR, "Failed to find session");
2391 response_error = NECP_ERROR_INTERNAL;
2392 goto fail;
2393 }
2394
2395 // Read service uuid
2396 error = necp_get_tlv(packet, NULL, 0, offset, NECP_TLV_SERVICE_UUID, sizeof(uuid_t), service_uuid, NULL);
2397 if (error) {
2398 NECPLOG(LOG_ERR, "Failed to get service UUID: %d", error);
2399 response_error = NECP_ERROR_INVALID_TLV;
2400 goto fail;
2401 }
2402
2403 // Mark remove all matching services for this session
2404 lck_rw_lock_exclusive(&necp_kernel_policy_lock);
2405 mapping = necp_uuid_lookup_service_id_locked(service_uuid);
2406 if (mapping != NULL) {
2407 LIST_FOREACH_SAFE(service, &session->services, session_chain, temp_service) {
2408 if (service->service_id == mapping->id) {
2409 LIST_REMOVE(service, session_chain);
2410 LIST_REMOVE(service, kernel_chain);
2411 FREE(service, M_NECP);
2412 }
2413 }
2414 necp_remove_uuid_service_id_mapping(service_uuid);
2415 }
2416 lck_rw_done(&necp_kernel_policy_lock);
2417
2418 necp_send_success_response(session, NECP_PACKET_TYPE_UNREGISTER_SERVICE, message_id);
2419 return;
2420 fail:
2421 necp_send_error_response(session, NECP_PACKET_TYPE_UNREGISTER_SERVICE, message_id, response_error);
2422 }
2423
2424 static necp_policy_id
2425 necp_handle_policy_add(struct necp_session *session, u_int32_t message_id, mbuf_t packet,
2426 u_int8_t *tlv_buffer, size_t tlv_buffer_length, int offset, int *return_error)
2427 {
2428 bool has_default_condition = FALSE;
2429 bool has_non_default_condition = FALSE;
2430 bool has_application_condition = FALSE;
2431 bool has_real_application_condition = FALSE;
2432 bool requires_application_condition = FALSE;
2433 bool requires_real_application_condition = FALSE;
2434 u_int8_t *conditions_array = NULL;
2435 u_int32_t conditions_array_size = 0;
2436 int conditions_array_cursor;
2437
2438 bool has_default_route_rule = FALSE;
2439 u_int8_t *route_rules_array = NULL;
2440 u_int32_t route_rules_array_size = 0;
2441 int route_rules_array_cursor;
2442
2443 int cursor;
2444 int error = 0;
2445 u_int32_t response_error = NECP_ERROR_INTERNAL;
2446
2447 necp_policy_order order = 0;
2448 struct necp_session_policy *policy = NULL;
2449 u_int8_t *policy_result = NULL;
2450 u_int32_t policy_result_size = 0;
2451
2452 // Read policy order
2453 error = necp_get_tlv(packet, tlv_buffer, tlv_buffer_length, offset, NECP_TLV_POLICY_ORDER, sizeof(order), &order, NULL);
2454 if (error) {
2455 NECPLOG(LOG_ERR, "Failed to get policy order: %d", error);
2456 response_error = NECP_ERROR_INVALID_TLV;
2457 goto fail;
2458 }
2459
2460 // Read policy result
2461 cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, offset, NECP_TLV_POLICY_RESULT, &error, 0);
2462 error = necp_get_tlv_at_offset(packet, tlv_buffer, tlv_buffer_length, cursor, 0, NULL, &policy_result_size);
2463 if (error || policy_result_size == 0) {
2464 NECPLOG(LOG_ERR, "Failed to get policy result length: %d", error);
2465 response_error = NECP_ERROR_INVALID_TLV;
2466 goto fail;
2467 }
2468 if (policy_result_size > NECP_MAX_POLICY_RESULT_SIZE) {
2469 NECPLOG(LOG_ERR, "Policy result length too large: %u", policy_result_size);
2470 response_error = NECP_ERROR_INVALID_TLV;
2471 goto fail;
2472 }
2473 MALLOC(policy_result, u_int8_t *, policy_result_size, M_NECP, M_WAITOK);
2474 if (policy_result == NULL) {
2475 NECPLOG(LOG_ERR, "Failed to allocate a policy result buffer (size %d)", policy_result_size);
2476 response_error = NECP_ERROR_INTERNAL;
2477 goto fail;
2478 }
2479 error = necp_get_tlv_at_offset(packet, tlv_buffer, tlv_buffer_length, cursor, policy_result_size, policy_result, NULL);
2480 if (error) {
2481 NECPLOG(LOG_ERR, "Failed to get policy result: %d", error);
2482 response_error = NECP_ERROR_POLICY_RESULT_INVALID;
2483 goto fail;
2484 }
2485 if (!necp_policy_result_is_valid(policy_result, policy_result_size)) {
2486 NECPLOG0(LOG_ERR, "Failed to validate policy result");
2487 response_error = NECP_ERROR_POLICY_RESULT_INVALID;
2488 goto fail;
2489 }
2490
2491 if (necp_policy_result_requires_route_rules(policy_result, policy_result_size)) {
2492 // Read route rules conditions
2493 for (cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, offset, NECP_TLV_ROUTE_RULE, &error, 0);
2494 cursor >= 0;
2495 cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, cursor, NECP_TLV_ROUTE_RULE, &error, 1)) {
2496 u_int32_t route_rule_size = 0;
2497 necp_get_tlv_at_offset(packet, tlv_buffer, tlv_buffer_length, cursor, 0, NULL, &route_rule_size);
2498 if (route_rule_size > 0) {
2499 route_rules_array_size += (sizeof(u_int8_t) + sizeof(u_int32_t) + route_rule_size);
2500 }
2501 }
2502
2503 if (route_rules_array_size == 0) {
2504 NECPLOG0(LOG_ERR, "Failed to get policy route rules");
2505 response_error = NECP_ERROR_INVALID_TLV;
2506 goto fail;
2507 }
2508 if (route_rules_array_size > NECP_MAX_ROUTE_RULES_ARRAY_SIZE) {
2509 NECPLOG(LOG_ERR, "Route rules length too large: %u", route_rules_array_size);
2510 response_error = NECP_ERROR_INVALID_TLV;
2511 goto fail;
2512 }
2513 MALLOC(route_rules_array, u_int8_t *, route_rules_array_size, M_NECP, M_WAITOK);
2514 if (route_rules_array == NULL) {
2515 NECPLOG(LOG_ERR, "Failed to allocate a policy route rules array (size %d)", route_rules_array_size);
2516 response_error = NECP_ERROR_INTERNAL;
2517 goto fail;
2518 }
2519
2520 route_rules_array_cursor = 0;
2521 for (cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, offset, NECP_TLV_ROUTE_RULE, &error, 0);
2522 cursor >= 0;
2523 cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, cursor, NECP_TLV_ROUTE_RULE, &error, 1)) {
2524 u_int8_t route_rule_type = NECP_TLV_ROUTE_RULE;
2525 u_int32_t route_rule_size = 0;
2526 necp_get_tlv_at_offset(packet, tlv_buffer, tlv_buffer_length, cursor, 0, NULL, &route_rule_size);
2527 if (route_rule_size > 0 && route_rule_size <= (route_rules_array_size - route_rules_array_cursor)) {
2528 // Add type
2529 memcpy((route_rules_array + route_rules_array_cursor), &route_rule_type, sizeof(route_rule_type));
2530 route_rules_array_cursor += sizeof(route_rule_type);
2531
2532 // Add length
2533 memcpy((route_rules_array + route_rules_array_cursor), &route_rule_size, sizeof(route_rule_size));
2534 route_rules_array_cursor += sizeof(route_rule_size);
2535
2536 // Add value
2537 necp_get_tlv_at_offset(packet, tlv_buffer, tlv_buffer_length, cursor, route_rule_size, (route_rules_array + route_rules_array_cursor), NULL);
2538
2539 if (!necp_policy_route_rule_is_valid((route_rules_array + route_rules_array_cursor), route_rule_size)) {
2540 NECPLOG0(LOG_ERR, "Failed to validate policy route rule");
2541 response_error = NECP_ERROR_ROUTE_RULES_INVALID;
2542 goto fail;
2543 }
2544
2545 if (necp_policy_route_rule_is_default((route_rules_array + route_rules_array_cursor), route_rule_size)) {
2546 if (has_default_route_rule) {
2547 NECPLOG0(LOG_ERR, "Failed to validate route rule; contained multiple default route rules");
2548 response_error = NECP_ERROR_ROUTE_RULES_INVALID;
2549 goto fail;
2550 }
2551 has_default_route_rule = TRUE;
2552 }
2553
2554 route_rules_array_cursor += route_rule_size;
2555 }
2556 }
2557 }
2558
2559 // Read policy conditions
2560 for (cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, offset, NECP_TLV_POLICY_CONDITION, &error, 0);
2561 cursor >= 0;
2562 cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, cursor, NECP_TLV_POLICY_CONDITION, &error, 1)) {
2563 u_int32_t condition_size = 0;
2564 necp_get_tlv_at_offset(packet, tlv_buffer, tlv_buffer_length, cursor, 0, NULL, &condition_size);
2565
2566 if (condition_size > 0) {
2567 conditions_array_size += (sizeof(u_int8_t) + sizeof(u_int32_t) + condition_size);
2568 }
2569 }
2570
2571 if (conditions_array_size == 0) {
2572 NECPLOG0(LOG_ERR, "Failed to get policy conditions");
2573 response_error = NECP_ERROR_INVALID_TLV;
2574 goto fail;
2575 }
2576 if (conditions_array_size > NECP_MAX_CONDITIONS_ARRAY_SIZE) {
2577 NECPLOG(LOG_ERR, "Conditions length too large: %u", conditions_array_size);
2578 response_error = NECP_ERROR_INVALID_TLV;
2579 goto fail;
2580 }
2581 MALLOC(conditions_array, u_int8_t *, conditions_array_size, M_NECP, M_WAITOK);
2582 if (conditions_array == NULL) {
2583 NECPLOG(LOG_ERR, "Failed to allocate a policy conditions array (size %d)", conditions_array_size);
2584 response_error = NECP_ERROR_INTERNAL;
2585 goto fail;
2586 }
2587
2588 conditions_array_cursor = 0;
2589 for (cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, offset, NECP_TLV_POLICY_CONDITION, &error, 0);
2590 cursor >= 0;
2591 cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, cursor, NECP_TLV_POLICY_CONDITION, &error, 1)) {
2592 u_int8_t condition_type = NECP_TLV_POLICY_CONDITION;
2593 u_int32_t condition_size = 0;
2594 necp_get_tlv_at_offset(packet, tlv_buffer, tlv_buffer_length, cursor, 0, NULL, &condition_size);
2595 if (condition_size > 0 && condition_size <= (conditions_array_size - conditions_array_cursor)) {
2596 // Add type
2597 memcpy((conditions_array + conditions_array_cursor), &condition_type, sizeof(condition_type));
2598 conditions_array_cursor += sizeof(condition_type);
2599
2600 // Add length
2601 memcpy((conditions_array + conditions_array_cursor), &condition_size, sizeof(condition_size));
2602 conditions_array_cursor += sizeof(condition_size);
2603
2604 // Add value
2605 necp_get_tlv_at_offset(packet, tlv_buffer, tlv_buffer_length, cursor, condition_size, (conditions_array + conditions_array_cursor), NULL);
2606 if (!necp_policy_condition_is_valid((conditions_array + conditions_array_cursor), condition_size, necp_policy_result_get_type_from_buffer(policy_result, policy_result_size))) {
2607 NECPLOG0(LOG_ERR, "Failed to validate policy condition");
2608 response_error = NECP_ERROR_POLICY_CONDITIONS_INVALID;
2609 goto fail;
2610 }
2611
2612 if (necp_policy_condition_is_default((conditions_array + conditions_array_cursor), condition_size)) {
2613 has_default_condition = TRUE;
2614 } else {
2615 has_non_default_condition = TRUE;
2616 }
2617 if (has_default_condition && has_non_default_condition) {
2618 NECPLOG0(LOG_ERR, "Failed to validate conditions; contained default and non-default conditions");
2619 response_error = NECP_ERROR_POLICY_CONDITIONS_INVALID;
2620 goto fail;
2621 }
2622
2623 if (necp_policy_condition_is_application((conditions_array + conditions_array_cursor), condition_size)) {
2624 has_application_condition = TRUE;
2625 }
2626
2627 if (necp_policy_condition_is_real_application((conditions_array + conditions_array_cursor), condition_size)) {
2628 has_real_application_condition = TRUE;
2629 }
2630
2631 if (necp_policy_condition_requires_application((conditions_array + conditions_array_cursor), condition_size)) {
2632 requires_application_condition = TRUE;
2633 }
2634
2635 if (necp_policy_condition_requires_real_application((conditions_array + conditions_array_cursor), condition_size)) {
2636 requires_real_application_condition = TRUE;
2637 }
2638
2639 conditions_array_cursor += condition_size;
2640 }
2641 }
2642
2643 if (requires_application_condition && !has_application_condition) {
2644 NECPLOG0(LOG_ERR, "Failed to validate conditions; did not contain application condition");
2645 response_error = NECP_ERROR_POLICY_CONDITIONS_INVALID;
2646 goto fail;
2647 }
2648
2649 if (requires_real_application_condition && !has_real_application_condition) {
2650 NECPLOG0(LOG_ERR, "Failed to validate conditions; did not contain real application condition");
2651 response_error = NECP_ERROR_POLICY_CONDITIONS_INVALID;
2652 goto fail;
2653 }
2654
2655 if ((policy = necp_policy_create(session, order, conditions_array, conditions_array_size, route_rules_array, route_rules_array_size, policy_result, policy_result_size)) == NULL) {
2656 response_error = NECP_ERROR_INTERNAL;
2657 goto fail;
2658 }
2659
2660 if (packet != NULL) {
2661 necp_send_policy_id_response(session, NECP_PACKET_TYPE_POLICY_ADD, message_id, policy->id);
2662 }
2663 return (policy->id);
2664
2665 fail:
2666 if (policy_result != NULL) {
2667 FREE(policy_result, M_NECP);
2668 }
2669 if (conditions_array != NULL) {
2670 FREE(conditions_array, M_NECP);
2671 }
2672 if (route_rules_array != NULL) {
2673 FREE(route_rules_array, M_NECP);
2674 }
2675
2676 if (packet != NULL) {
2677 necp_send_error_response(session, NECP_PACKET_TYPE_POLICY_ADD, message_id, response_error);
2678 }
2679 if (return_error != NULL) {
2680 *return_error = necp_get_posix_error_for_necp_error(response_error);
2681 }
2682 return (0);
2683 }
2684
2685 static void
2686 necp_handle_policy_get(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
2687 {
2688 #pragma unused(offset)
2689 int error;
2690 u_int8_t *response = NULL;
2691 u_int8_t *cursor = NULL;
2692 u_int32_t response_error = NECP_ERROR_INTERNAL;
2693 necp_policy_id policy_id = 0;
2694 u_int32_t order_tlv_size = 0;
2695 u_int32_t result_tlv_size = 0;
2696 u_int32_t response_size = 0;
2697
2698 struct necp_session_policy *policy = NULL;
2699
2700 // Read policy id
2701 error = necp_get_tlv(packet, NULL, 0, offset, NECP_TLV_POLICY_ID, sizeof(policy_id), &policy_id, NULL);
2702 if (error) {
2703 NECPLOG(LOG_ERR, "Failed to get policy id: %d", error);
2704 response_error = NECP_ERROR_INVALID_TLV;
2705 goto fail;
2706 }
2707
2708 policy = necp_policy_find(session, policy_id);
2709 if (policy == NULL || policy->pending_deletion) {
2710 NECPLOG(LOG_ERR, "Failed to find policy with id %d", policy_id);
2711 response_error = NECP_ERROR_POLICY_ID_NOT_FOUND;
2712 goto fail;
2713 }
2714
2715 order_tlv_size = sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(necp_policy_order);
2716 result_tlv_size = (policy->result_size ? (sizeof(u_int8_t) + sizeof(u_int32_t) + policy->result_size) : 0);
2717 response_size = sizeof(struct necp_packet_header) + order_tlv_size + result_tlv_size + policy->conditions_size;
2718 MALLOC(response, u_int8_t *, response_size, M_NECP, M_WAITOK);
2719 if (response == NULL) {
2720 necp_send_error_response(session, NECP_PACKET_TYPE_POLICY_GET, message_id, NECP_ERROR_INTERNAL);
2721 return;
2722 }
2723
2724 cursor = response;
2725 cursor = necp_buffer_write_packet_header(cursor, NECP_PACKET_TYPE_POLICY_GET, NECP_PACKET_FLAGS_RESPONSE, message_id);
2726 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_ORDER, sizeof(necp_policy_order), &policy->order, response, response_size);
2727
2728 if (result_tlv_size) {
2729 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_RESULT, policy->result_size, &policy->result, response, response_size);
2730 }
2731 if (policy->conditions_size) {
2732 memcpy(((u_int8_t *)(void *)(cursor)), policy->conditions, policy->conditions_size);
2733 }
2734
2735 if (!necp_send_ctl_data(session, (u_int8_t *)response, response_size)) {
2736 NECPLOG0(LOG_ERR, "Failed to send response");
2737 }
2738
2739 FREE(response, M_NECP);
2740 return;
2741
2742 fail:
2743 necp_send_error_response(session, NECP_PACKET_TYPE_POLICY_GET, message_id, response_error);
2744 }
2745
2746 static void
2747 necp_handle_policy_delete(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
2748 {
2749 int error;
2750 u_int32_t response_error = NECP_ERROR_INTERNAL;
2751 necp_policy_id policy_id = 0;
2752
2753 struct necp_session_policy *policy = NULL;
2754
2755 // Read policy id
2756 error = necp_get_tlv(packet, NULL, 0, offset, NECP_TLV_POLICY_ID, sizeof(policy_id), &policy_id, NULL);
2757 if (error) {
2758 NECPLOG(LOG_ERR, "Failed to get policy id: %d", error);
2759 response_error = NECP_ERROR_INVALID_TLV;
2760 goto fail;
2761 }
2762
2763 policy = necp_policy_find(session, policy_id);
2764 if (policy == NULL || policy->pending_deletion) {
2765 NECPLOG(LOG_ERR, "Failed to find policy with id %d", policy_id);
2766 response_error = NECP_ERROR_POLICY_ID_NOT_FOUND;
2767 goto fail;
2768 }
2769
2770 necp_policy_mark_for_deletion(session, policy);
2771
2772 necp_send_success_response(session, NECP_PACKET_TYPE_POLICY_DELETE, message_id);
2773 return;
2774
2775 fail:
2776 necp_send_error_response(session, NECP_PACKET_TYPE_POLICY_DELETE, message_id, response_error);
2777 }
2778
2779 static void
2780 necp_handle_policy_apply_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
2781 {
2782 #pragma unused(packet, offset)
2783 necp_policy_apply_all(session);
2784 necp_send_success_response(session, NECP_PACKET_TYPE_POLICY_APPLY_ALL, message_id);
2785 }
2786
2787 static void
2788 necp_handle_policy_list_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
2789 {
2790 #pragma unused(packet, offset)
2791 u_int32_t tlv_size = (sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(u_int32_t));
2792 u_int32_t response_size = 0;
2793 u_int8_t *response = NULL;
2794 u_int8_t *cursor = NULL;
2795 int num_policies = 0;
2796 int cur_policy_index = 0;
2797 struct necp_session_policy *policy;
2798
2799 LIST_FOREACH(policy, &session->policies, chain) {
2800 if (!policy->pending_deletion) {
2801 num_policies++;
2802 }
2803 }
2804
2805 // Create a response with one Policy ID TLV for each policy
2806 response_size = sizeof(struct necp_packet_header) + num_policies * tlv_size;
2807 MALLOC(response, u_int8_t *, response_size, M_NECP, M_WAITOK);
2808 if (response == NULL) {
2809 necp_send_error_response(session, NECP_PACKET_TYPE_POLICY_LIST_ALL, message_id, NECP_ERROR_INTERNAL);
2810 return;
2811 }
2812
2813 cursor = response;
2814 cursor = necp_buffer_write_packet_header(cursor, NECP_PACKET_TYPE_POLICY_LIST_ALL, NECP_PACKET_FLAGS_RESPONSE, message_id);
2815
2816 LIST_FOREACH(policy, &session->policies, chain) {
2817 if (!policy->pending_deletion && cur_policy_index < num_policies) {
2818 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_ID, sizeof(u_int32_t), &policy->id, response, response_size);
2819 cur_policy_index++;
2820 }
2821 }
2822
2823 if (!necp_send_ctl_data(session, (u_int8_t *)response, response_size)) {
2824 NECPLOG0(LOG_ERR, "Failed to send response");
2825 }
2826
2827 FREE(response, M_NECP);
2828 }
2829
2830 static void
2831 necp_handle_policy_delete_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
2832 {
2833 #pragma unused(packet, offset)
2834 necp_policy_mark_all_for_deletion(session);
2835 necp_send_success_response(session, NECP_PACKET_TYPE_POLICY_DELETE_ALL, message_id);
2836 }
2837
2838 static necp_policy_id
2839 necp_policy_get_new_id(void)
2840 {
2841 necp_policy_id newid = 0;
2842
2843 lck_rw_lock_exclusive(&necp_kernel_policy_lock);
2844
2845 necp_last_policy_id++;
2846 if (necp_last_policy_id < 1) {
2847 necp_last_policy_id = 1;
2848 }
2849
2850 newid = necp_last_policy_id;
2851 lck_rw_done(&necp_kernel_policy_lock);
2852
2853 if (newid == 0) {
2854 NECPLOG0(LOG_DEBUG, "Allocate policy id failed.\n");
2855 return (0);
2856 }
2857
2858 return (newid);
2859 }
2860
2861 /*
2862 * For the policy dump response this is the structure:
2863 *
2864 * <NECP_PACKET_HEADER>
2865 * {
2866 * type : NECP_TLV_POLICY_DUMP
2867 * length : ...
2868 * value :
2869 * {
2870 * {
2871 * type : NECP_TLV_POLICY_ID
2872 * len : ...
2873 * value : ...
2874 * }
2875 * {
2876 * type : NECP_TLV_POLICY_ORDER
2877 * len : ...
2878 * value : ...
2879 * }
2880 * {
2881 * type : NECP_TLV_POLICY_RESULT_STRING
2882 * len : ...
2883 * value : ...
2884 * }
2885 * {
2886 * type : NECP_TLV_POLICY_OWNER
2887 * len : ...
2888 * value : ...
2889 * }
2890 * {
2891 * type : NECP_TLV_POLICY_CONDITION
2892 * len : ...
2893 * value :
2894 * {
2895 * {
2896 * type : NECP_POLICY_CONDITION_ALL_INTERFACES
2897 * len : ...
2898 * value : ...
2899 * }
2900 * {
2901 * type : NECP_POLICY_CONDITION_BOUND_INTERFACES
2902 * len : ...
2903 * value : ...
2904 * }
2905 * ...
2906 * }
2907 * }
2908 * }
2909 * }
2910 * {
2911 * type : NECP_TLV_POLICY_DUMP
2912 * length : ...
2913 * value :
2914 * {
2915 * {
2916 * type : NECP_TLV_POLICY_ID
2917 * len : ...
2918 * value : ...
2919 * }
2920 * {
2921 * type : NECP_TLV_POLICY_ORDER
2922 * len : ...
2923 * value : ...
2924 * }
2925 * {
2926 * type : NECP_TLV_POLICY_RESULT_STRING
2927 * len : ...
2928 * value : ...
2929 * }
2930 * {
2931 * type : NECP_TLV_POLICY_OWNER
2932 * len : ...
2933 * value : ...
2934 * }
2935 * {
2936 * type : NECP_TLV_POLICY_CONDITION
2937 * len : ...
2938 * value :
2939 * {
2940 * {
2941 * type : NECP_POLICY_CONDITION_ALL_INTERFACES
2942 * len : ...
2943 * value : ...
2944 * }
2945 * {
2946 * type : NECP_POLICY_CONDITION_BOUND_INTERFACES
2947 * len : ...
2948 * value : ...
2949 * }
2950 * ...
2951 * }
2952 * }
2953 * }
2954 * }
2955 * ...
2956 */
2957 static int
2958 necp_handle_policy_dump_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet,
2959 user_addr_t out_buffer, size_t out_buffer_length, int offset)
2960 {
2961 #pragma unused(offset)
2962 struct necp_kernel_socket_policy *policy = NULL;
2963 int policy_i;
2964 int policy_count = 0;
2965 u_int8_t **tlv_buffer_pointers = NULL;
2966 u_int32_t *tlv_buffer_lengths = NULL;
2967 u_int32_t total_tlv_len = 0;
2968 u_int8_t *result_buf = NULL;
2969 u_int8_t *result_buf_cursor = result_buf;
2970 char result_string[MAX_RESULT_STRING_LEN];
2971 char proc_name_string[MAXCOMLEN + 1];
2972
2973 int error_code = 0;
2974 bool error_occured = false;
2975 u_int32_t response_error = NECP_ERROR_INTERNAL;
2976
2977 #define REPORT_ERROR(error) error_occured = true; \
2978 response_error = error; \
2979 goto done
2980
2981 #define UNLOCK_AND_REPORT_ERROR(lock, error) lck_rw_done(lock); \
2982 REPORT_ERROR(error)
2983
2984 errno_t cred_result = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES, 0);
2985 if (cred_result != 0) {
2986 NECPLOG0(LOG_ERR, "Session does not hold the necessary entitlement to get Network Extension Policy information");
2987 REPORT_ERROR(NECP_ERROR_INTERNAL);
2988 }
2989
2990 // LOCK
2991 lck_rw_lock_shared(&necp_kernel_policy_lock);
2992
2993 if (necp_debug) {
2994 NECPLOG0(LOG_DEBUG, "Gathering policies");
2995 }
2996
2997 policy_count = necp_kernel_application_policies_count;
2998
2999 MALLOC(tlv_buffer_pointers, u_int8_t **, sizeof(u_int8_t *) * policy_count, M_NECP, M_NOWAIT | M_ZERO);
3000 if (tlv_buffer_pointers == NULL) {
3001 NECPLOG(LOG_DEBUG, "Failed to allocate tlv_buffer_pointers (%u bytes)", sizeof(u_int8_t *) * policy_count);
3002 UNLOCK_AND_REPORT_ERROR(&necp_kernel_policy_lock, NECP_ERROR_INTERNAL);
3003 }
3004
3005 MALLOC(tlv_buffer_lengths, u_int32_t *, sizeof(u_int32_t) * policy_count, M_NECP, M_NOWAIT | M_ZERO);
3006 if (tlv_buffer_lengths == NULL) {
3007 NECPLOG(LOG_DEBUG, "Failed to allocate tlv_buffer_lengths (%u bytes)", sizeof(u_int32_t) * policy_count);
3008 UNLOCK_AND_REPORT_ERROR(&necp_kernel_policy_lock, NECP_ERROR_INTERNAL);
3009 }
3010
3011 for (policy_i = 0; necp_kernel_socket_policies_app_layer_map != NULL && necp_kernel_socket_policies_app_layer_map[policy_i] != NULL; policy_i++) {
3012 policy = necp_kernel_socket_policies_app_layer_map[policy_i];
3013
3014 memset(result_string, 0, MAX_RESULT_STRING_LEN);
3015 memset(proc_name_string, 0, MAXCOMLEN + 1);
3016
3017 necp_get_result_description(result_string, policy->result, policy->result_parameter);
3018 proc_name(policy->session_pid, proc_name_string, MAXCOMLEN);
3019
3020 u_int16_t proc_name_len = strlen(proc_name_string) + 1;
3021 u_int16_t result_string_len = strlen(result_string) + 1;
3022
3023 if (necp_debug) {
3024 NECPLOG(LOG_DEBUG, "Policy: process: %s, result: %s", proc_name_string, result_string);
3025 }
3026
3027 u_int32_t total_allocated_bytes = sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(policy->id) + // NECP_TLV_POLICY_ID
3028 sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(policy->order) + // NECP_TLV_POLICY_ORDER
3029 sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(policy->session_order) + // NECP_TLV_POLICY_SESSION_ORDER
3030 sizeof(u_int8_t) + sizeof(u_int32_t) + result_string_len + // NECP_TLV_POLICY_RESULT_STRING
3031 sizeof(u_int8_t) + sizeof(u_int32_t) + proc_name_len + // NECP_TLV_POLICY_OWNER
3032 sizeof(u_int8_t) + sizeof(u_int32_t); // NECP_TLV_POLICY_CONDITION
3033
3034 // We now traverse the condition_mask to see how much space we need to allocate
3035 u_int32_t condition_mask = policy->condition_mask;
3036 u_int8_t num_conditions = 0;
3037 struct necp_string_id_mapping *account_id_entry = NULL;
3038 char if_name[IFXNAMSIZ];
3039 u_int32_t condition_tlv_length = 0;
3040 memset(if_name, 0, sizeof(if_name));
3041
3042 if (condition_mask == NECP_POLICY_CONDITION_DEFAULT) {
3043 num_conditions++;
3044 } else {
3045 if (condition_mask & NECP_KERNEL_CONDITION_ALL_INTERFACES) {
3046 num_conditions++;
3047 }
3048 if (condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE) {
3049 snprintf(if_name, IFXNAMSIZ, "%s%d", ifnet_name(policy->cond_bound_interface), ifnet_unit(policy->cond_bound_interface));
3050 condition_tlv_length += strlen(if_name) + 1;
3051 num_conditions++;
3052 }
3053 if (condition_mask & NECP_KERNEL_CONDITION_PROTOCOL) {
3054 condition_tlv_length += sizeof(policy->cond_protocol);
3055 num_conditions++;
3056 }
3057 if (condition_mask & NECP_KERNEL_CONDITION_APP_ID) {
3058 condition_tlv_length += sizeof(uuid_t);
3059 num_conditions++;
3060 }
3061 if (condition_mask & NECP_KERNEL_CONDITION_REAL_APP_ID) {
3062 condition_tlv_length += sizeof(uuid_t);
3063 num_conditions++;
3064 }
3065 if (condition_mask & NECP_KERNEL_CONDITION_DOMAIN) {
3066 u_int32_t domain_len = strlen(policy->cond_domain) + 1;
3067 condition_tlv_length += domain_len;
3068 num_conditions++;
3069 }
3070 if (condition_mask & NECP_KERNEL_CONDITION_ACCOUNT_ID) {
3071 account_id_entry = necp_lookup_string_with_id_locked(&necp_account_id_list, policy->cond_account_id);
3072 u_int32_t account_id_len = 0;
3073 if (account_id_entry) {
3074 account_id_len = account_id_entry->string ? strlen(account_id_entry->string) + 1 : 0;
3075 }
3076 condition_tlv_length += account_id_len;
3077 num_conditions++;
3078 }
3079 if (condition_mask & NECP_KERNEL_CONDITION_PID) {
3080 condition_tlv_length += sizeof(pid_t);
3081 num_conditions++;
3082 }
3083 if (condition_mask & NECP_KERNEL_CONDITION_UID) {
3084 condition_tlv_length += sizeof(uid_t);
3085 num_conditions++;
3086 }
3087 if (condition_mask & NECP_KERNEL_CONDITION_TRAFFIC_CLASS) {
3088 condition_tlv_length += sizeof(struct necp_policy_condition_tc_range);
3089 num_conditions++;
3090 }
3091 if (condition_mask & NECP_KERNEL_CONDITION_ENTITLEMENT) {
3092 num_conditions++;
3093 }
3094 if (condition_mask & NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT) {
3095 u_int32_t entitlement_len = strlen(policy->cond_custom_entitlement) + 1;
3096 condition_tlv_length += entitlement_len;
3097 num_conditions++;
3098 }
3099 if (condition_mask & NECP_KERNEL_CONDITION_LOCAL_START) {
3100 if (condition_mask & NECP_KERNEL_CONDITION_LOCAL_END) {
3101 condition_tlv_length += sizeof(struct necp_policy_condition_addr_range);
3102 } else {
3103 condition_tlv_length += sizeof(struct necp_policy_condition_addr);
3104 }
3105 num_conditions++;
3106 }
3107 if (condition_mask & NECP_KERNEL_CONDITION_REMOTE_START) {
3108 if (condition_mask & NECP_KERNEL_CONDITION_REMOTE_END) {
3109 condition_tlv_length += sizeof(struct necp_policy_condition_addr_range);
3110 } else {
3111 condition_tlv_length += sizeof(struct necp_policy_condition_addr);
3112 }
3113 num_conditions++;
3114 }
3115 }
3116
3117 condition_tlv_length += num_conditions * (sizeof(u_int8_t) + sizeof(u_int32_t)); // These are for the condition TLVs. The space for "value" is already accounted for above.
3118 total_allocated_bytes += condition_tlv_length;
3119
3120 u_int8_t *tlv_buffer;
3121 MALLOC(tlv_buffer, u_int8_t *, total_allocated_bytes, M_NECP, M_NOWAIT | M_ZERO);
3122 if (tlv_buffer == NULL) {
3123 NECPLOG(LOG_DEBUG, "Failed to allocate tlv_buffer (%u bytes)", total_allocated_bytes);
3124 continue;
3125 }
3126
3127 u_int8_t *cursor = tlv_buffer;
3128 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_ID, sizeof(policy->id), &policy->id, tlv_buffer, total_allocated_bytes);
3129 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_ORDER, sizeof(necp_policy_order), &policy->order, tlv_buffer, total_allocated_bytes);
3130 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_SESSION_ORDER, sizeof(policy->session_order), &policy->session_order, tlv_buffer, total_allocated_bytes);
3131 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_RESULT_STRING, result_string_len, result_string, tlv_buffer, total_allocated_bytes);
3132 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_OWNER, proc_name_len, proc_name_string, tlv_buffer, total_allocated_bytes);
3133
3134 #define N_QUICK 256
3135 u_int8_t q_cond_buf[N_QUICK]; // Minor optimization
3136
3137 u_int8_t *cond_buf; // To be used for condition TLVs
3138 if (condition_tlv_length <= N_QUICK) {
3139 cond_buf = q_cond_buf;
3140 } else {
3141 MALLOC(cond_buf, u_int8_t *, condition_tlv_length, M_NECP, M_NOWAIT);
3142 if (cond_buf == NULL) {
3143 NECPLOG(LOG_DEBUG, "Failed to allocate cond_buffer (%u bytes)", condition_tlv_length);
3144 FREE(tlv_buffer, M_NECP);
3145 continue;
3146 }
3147 }
3148
3149 memset(cond_buf, 0, condition_tlv_length);
3150 u_int8_t *cond_buf_cursor = cond_buf;
3151 if (condition_mask == NECP_POLICY_CONDITION_DEFAULT) {
3152 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_DEFAULT, 0, "", cond_buf, condition_tlv_length);
3153 } else {
3154 if (condition_mask & NECP_KERNEL_CONDITION_ALL_INTERFACES) {
3155 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_ALL_INTERFACES, 0, "", cond_buf, condition_tlv_length);
3156 }
3157 if (condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE) {
3158 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_BOUND_INTERFACE, strlen(if_name) + 1,
3159 if_name, cond_buf, condition_tlv_length);
3160 }
3161 if (condition_mask & NECP_KERNEL_CONDITION_PROTOCOL) {
3162 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_IP_PROTOCOL, sizeof(policy->cond_protocol), &policy->cond_protocol,
3163 cond_buf, condition_tlv_length);
3164 }
3165 if (condition_mask & NECP_KERNEL_CONDITION_APP_ID) {
3166 struct necp_uuid_id_mapping *entry = necp_uuid_lookup_uuid_with_app_id_locked(policy->cond_app_id);
3167 if (entry != NULL) {
3168 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_APPLICATION, sizeof(entry->uuid), entry->uuid,
3169 cond_buf, condition_tlv_length);
3170 }
3171 }
3172 if (condition_mask & NECP_KERNEL_CONDITION_REAL_APP_ID) {
3173 struct necp_uuid_id_mapping *entry = necp_uuid_lookup_uuid_with_app_id_locked(policy->cond_real_app_id);
3174 if (entry != NULL) {
3175 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_REAL_APPLICATION, sizeof(entry->uuid), entry->uuid,
3176 cond_buf, condition_tlv_length);
3177 }
3178 }
3179 if (condition_mask & NECP_KERNEL_CONDITION_DOMAIN) {
3180 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_DOMAIN, strlen(policy->cond_domain) + 1, policy->cond_domain,
3181 cond_buf, condition_tlv_length);
3182 }
3183 if (condition_mask & NECP_KERNEL_CONDITION_ACCOUNT_ID) {
3184 if (account_id_entry != NULL) {
3185 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_ACCOUNT, strlen(account_id_entry->string) + 1, account_id_entry->string,
3186 cond_buf, condition_tlv_length);
3187 }
3188 }
3189 if (condition_mask & NECP_KERNEL_CONDITION_PID) {
3190 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_PID, sizeof(policy->cond_pid), &policy->cond_pid,
3191 cond_buf, condition_tlv_length);
3192 }
3193 if (condition_mask & NECP_KERNEL_CONDITION_UID) {
3194 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_UID, sizeof(policy->cond_uid), &policy->cond_uid,
3195 cond_buf, condition_tlv_length);
3196 }
3197 if (condition_mask & NECP_KERNEL_CONDITION_TRAFFIC_CLASS) {
3198 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_TRAFFIC_CLASS, sizeof(policy->cond_traffic_class), &policy->cond_traffic_class,
3199 cond_buf, condition_tlv_length);
3200 }
3201 if (condition_mask & NECP_KERNEL_CONDITION_ENTITLEMENT) {
3202 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_ENTITLEMENT, 0, "",
3203 cond_buf, condition_tlv_length);
3204 }
3205 if (condition_mask & NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT) {
3206 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_ENTITLEMENT, strlen(policy->cond_custom_entitlement) + 1, policy->cond_custom_entitlement,
3207 cond_buf, condition_tlv_length);
3208 }
3209 if (condition_mask & NECP_KERNEL_CONDITION_LOCAL_START) {
3210 if (condition_mask & NECP_KERNEL_CONDITION_LOCAL_END) {
3211 struct necp_policy_condition_addr_range range;
3212 memcpy(&range.start_address, &policy->cond_local_start, sizeof(policy->cond_local_start));
3213 memcpy(&range.end_address, &policy->cond_local_end, sizeof(policy->cond_local_end));
3214 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE, sizeof(range), &range,
3215 cond_buf, condition_tlv_length);
3216 } else {
3217 struct necp_policy_condition_addr addr;
3218 addr.prefix = policy->cond_local_prefix;
3219 memcpy(&addr.address, &policy->cond_local_start, sizeof(policy->cond_local_start));
3220 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_LOCAL_ADDR, sizeof(addr), &addr,
3221 cond_buf, condition_tlv_length);
3222 }
3223 }
3224 if (condition_mask & NECP_KERNEL_CONDITION_REMOTE_START) {
3225 if (condition_mask & NECP_KERNEL_CONDITION_REMOTE_END) {
3226 struct necp_policy_condition_addr_range range;
3227 memcpy(&range.start_address, &policy->cond_remote_start, sizeof(policy->cond_remote_start));
3228 memcpy(&range.end_address, &policy->cond_remote_end, sizeof(policy->cond_remote_end));
3229 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE, sizeof(range), &range,
3230 cond_buf, condition_tlv_length);
3231 } else {
3232 struct necp_policy_condition_addr addr;
3233 addr.prefix = policy->cond_remote_prefix;
3234 memcpy(&addr.address, &policy->cond_remote_start, sizeof(policy->cond_remote_start));
3235 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_REMOTE_ADDR, sizeof(addr), &addr,
3236 cond_buf, condition_tlv_length);
3237 }
3238 }
3239 }
3240
3241 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_CONDITION, cond_buf_cursor - cond_buf, cond_buf, tlv_buffer, total_allocated_bytes);
3242 if (cond_buf != q_cond_buf) {
3243 FREE(cond_buf, M_NECP);
3244 }
3245
3246 tlv_buffer_pointers[policy_i] = tlv_buffer;
3247 tlv_buffer_lengths[policy_i] = (cursor - tlv_buffer);
3248
3249 // This is the length of the TLV for NECP_TLV_POLICY_DUMP
3250 total_tlv_len += sizeof(u_int8_t) + sizeof(u_int32_t) + (cursor - tlv_buffer);
3251 }
3252
3253 // UNLOCK
3254 lck_rw_done(&necp_kernel_policy_lock);
3255
3256 // Send packet
3257 if (packet != NULL) {
3258 u_int32_t total_result_length = sizeof(struct necp_packet_header) + total_tlv_len;
3259
3260 // Allow malloc to wait, since the total buffer may be large and we are not holding any locks
3261 MALLOC(result_buf, u_int8_t *, total_result_length, M_NECP, M_WAITOK | M_ZERO);
3262 if (result_buf == NULL) {
3263 NECPLOG(LOG_DEBUG, "Failed to allocate result_buffer (%u bytes)", total_result_length);
3264 REPORT_ERROR(NECP_ERROR_INTERNAL);
3265 }
3266
3267 result_buf_cursor = result_buf;
3268 result_buf_cursor = necp_buffer_write_packet_header(result_buf_cursor, NECP_PACKET_TYPE_POLICY_DUMP_ALL, NECP_PACKET_FLAGS_RESPONSE, message_id);
3269
3270 for (int i = 0; i < policy_count; i++) {
3271 if (tlv_buffer_pointers[i] != NULL) {
3272 result_buf_cursor = necp_buffer_write_tlv(result_buf_cursor, NECP_TLV_POLICY_DUMP, tlv_buffer_lengths[i], tlv_buffer_pointers[i], result_buf, total_result_length);
3273 }
3274 }
3275
3276 if (!necp_send_ctl_data(session, result_buf, result_buf_cursor - result_buf)) {
3277 NECPLOG(LOG_ERR, "Failed to send response (%u bytes)", result_buf_cursor - result_buf);
3278 } else {
3279 NECPLOG(LOG_ERR, "Sent data worth %u bytes. Total result buffer length was %u bytes", result_buf_cursor - result_buf, total_result_length);
3280 }
3281 }
3282
3283 // Copy out
3284 if (out_buffer != 0) {
3285 if (out_buffer_length < total_tlv_len + sizeof(u_int32_t)) {
3286 NECPLOG(LOG_DEBUG, "out_buffer_length too small (%u < %u)", out_buffer_length, total_tlv_len + sizeof(u_int32_t));
3287 REPORT_ERROR(NECP_ERROR_INVALID_TLV);
3288 }
3289
3290 // Allow malloc to wait, since the total buffer may be large and we are not holding any locks
3291 MALLOC(result_buf, u_int8_t *, total_tlv_len + sizeof(u_int32_t), M_NECP, M_WAITOK | M_ZERO);
3292 if (result_buf == NULL) {
3293 NECPLOG(LOG_DEBUG, "Failed to allocate result_buffer (%u bytes)", total_tlv_len + sizeof(u_int32_t));
3294 REPORT_ERROR(NECP_ERROR_INTERNAL);
3295 }
3296
3297 // Add four bytes for total length at the start
3298 memcpy(result_buf, &total_tlv_len, sizeof(u_int32_t));
3299
3300 // Copy the TLVs
3301 result_buf_cursor = result_buf + sizeof(u_int32_t);
3302 for (int i = 0; i < policy_count; i++) {
3303 if (tlv_buffer_pointers[i] != NULL) {
3304 result_buf_cursor = necp_buffer_write_tlv(result_buf_cursor, NECP_TLV_POLICY_DUMP, tlv_buffer_lengths[i], tlv_buffer_pointers[i],
3305 result_buf, total_tlv_len + sizeof(u_int32_t));
3306 }
3307 }
3308
3309 int copy_error = copyout(result_buf, out_buffer, total_tlv_len + sizeof(u_int32_t));
3310 if (copy_error) {
3311 NECPLOG(LOG_DEBUG, "Failed to copy out result_buffer (%u bytes)", total_tlv_len + sizeof(u_int32_t));
3312 REPORT_ERROR(NECP_ERROR_INTERNAL);
3313 }
3314 }
3315
3316 done:
3317
3318 if (error_occured) {
3319 if (packet != NULL) {
3320 if(!necp_send_error_response(session, NECP_PACKET_TYPE_POLICY_DUMP_ALL, message_id, response_error)) {
3321 NECPLOG0(LOG_ERR, "Failed to send error response");
3322 } else {
3323 NECPLOG0(LOG_ERR, "Sent error response");
3324 }
3325 }
3326 error_code = necp_get_posix_error_for_necp_error(response_error);
3327 }
3328
3329 if (result_buf != NULL) {
3330 FREE(result_buf, M_NECP);
3331 }
3332
3333 if (tlv_buffer_pointers != NULL) {
3334 for (int i = 0; i < policy_count; i++) {
3335 if (tlv_buffer_pointers[i] != NULL) {
3336 FREE(tlv_buffer_pointers[i], M_NECP);
3337 tlv_buffer_pointers[i] = NULL;
3338 }
3339 }
3340 FREE(tlv_buffer_pointers, M_NECP);
3341 }
3342
3343 if (tlv_buffer_lengths != NULL) {
3344 FREE(tlv_buffer_lengths, M_NECP);
3345 }
3346 #undef N_QUICK
3347 #undef RESET_COND_BUF
3348 #undef REPORT_ERROR
3349 #undef UNLOCK_AND_REPORT_ERROR
3350
3351 return (error_code);
3352 }
3353
3354 static struct necp_session_policy *
3355 necp_policy_create(struct necp_session *session, necp_policy_order order, u_int8_t *conditions_array, u_int32_t conditions_array_size, u_int8_t *route_rules_array, u_int32_t route_rules_array_size, u_int8_t *result, u_int32_t result_size)
3356 {
3357 struct necp_session_policy *new_policy = NULL;
3358 struct necp_session_policy *tmp_policy = NULL;
3359
3360 if (session == NULL || conditions_array == NULL || result == NULL || result_size == 0) {
3361 goto done;
3362 }
3363
3364 MALLOC_ZONE(new_policy, struct necp_session_policy *, sizeof(*new_policy), M_NECP_SESSION_POLICY, M_WAITOK);
3365 if (new_policy == NULL) {
3366 goto done;
3367 }
3368
3369 memset(new_policy, 0, sizeof(*new_policy)); // M_ZERO is not supported for MALLOC_ZONE
3370 new_policy->applied = FALSE;
3371 new_policy->pending_deletion = FALSE;
3372 new_policy->pending_update = FALSE;
3373 new_policy->order = order;
3374 new_policy->conditions = conditions_array;
3375 new_policy->conditions_size = conditions_array_size;
3376 new_policy->route_rules = route_rules_array;
3377 new_policy->route_rules_size = route_rules_array_size;
3378 new_policy->result = result;
3379 new_policy->result_size = result_size;
3380 new_policy->id = necp_policy_get_new_id();
3381
3382 LIST_INSERT_SORTED_ASCENDING(&session->policies, new_policy, chain, order, tmp_policy);
3383
3384 session->dirty = TRUE;
3385
3386 if (necp_debug) {
3387 NECPLOG(LOG_DEBUG, "Created NECP policy, order %d", order);
3388 }
3389 done:
3390 return (new_policy);
3391 }
3392
3393 static struct necp_session_policy *
3394 necp_policy_find(struct necp_session *session, necp_policy_id policy_id)
3395 {
3396 struct necp_session_policy *policy = NULL;
3397 if (policy_id == 0) {
3398 return (NULL);
3399 }
3400
3401 LIST_FOREACH(policy, &session->policies, chain) {
3402 if (policy->id == policy_id) {
3403 return (policy);
3404 }
3405 }
3406
3407 return (NULL);
3408 }
3409
3410 static inline u_int8_t
3411 necp_policy_get_result_type(struct necp_session_policy *policy)
3412 {
3413 return (policy ? necp_policy_result_get_type_from_buffer(policy->result, policy->result_size) : 0);
3414 }
3415
3416 static inline u_int32_t
3417 necp_policy_get_result_parameter_length(struct necp_session_policy *policy)
3418 {
3419 return (policy ? necp_policy_result_get_parameter_length_from_buffer(policy->result, policy->result_size) : 0);
3420 }
3421
3422 static bool
3423 necp_policy_get_result_parameter(struct necp_session_policy *policy, u_int8_t *parameter_buffer, u_int32_t parameter_buffer_length)
3424 {
3425 if (policy) {
3426 u_int32_t parameter_length = necp_policy_result_get_parameter_length_from_buffer(policy->result, policy->result_size);
3427 if (parameter_buffer_length >= parameter_length) {
3428 u_int8_t *parameter = necp_policy_result_get_parameter_pointer_from_buffer(policy->result, policy->result_size);
3429 if (parameter && parameter_buffer) {
3430 memcpy(parameter_buffer, parameter, parameter_length);
3431 return (TRUE);
3432 }
3433 }
3434 }
3435
3436 return (FALSE);
3437 }
3438
3439 static bool
3440 necp_policy_mark_for_deletion(struct necp_session *session, struct necp_session_policy *policy)
3441 {
3442 if (session == NULL || policy == NULL) {
3443 return (FALSE);
3444 }
3445
3446 policy->pending_deletion = TRUE;
3447 session->dirty = TRUE;
3448
3449 if (necp_debug) {
3450 NECPLOG0(LOG_DEBUG, "Marked NECP policy for removal");
3451 }
3452 return (TRUE);
3453 }
3454
3455 static bool
3456 necp_policy_mark_all_for_deletion(struct necp_session *session)
3457 {
3458 struct necp_session_policy *policy = NULL;
3459 struct necp_session_policy *temp_policy = NULL;
3460
3461 LIST_FOREACH_SAFE(policy, &session->policies, chain, temp_policy) {
3462 necp_policy_mark_for_deletion(session, policy);
3463 }
3464
3465 return (TRUE);
3466 }
3467
3468 static bool
3469 necp_policy_delete(struct necp_session *session, struct necp_session_policy *policy)
3470 {
3471 if (session == NULL || policy == NULL) {
3472 return (FALSE);
3473 }
3474
3475 LIST_REMOVE(policy, chain);
3476
3477 if (policy->result) {
3478 FREE(policy->result, M_NECP);
3479 policy->result = NULL;
3480 }
3481
3482 if (policy->conditions) {
3483 FREE(policy->conditions, M_NECP);
3484 policy->conditions = NULL;
3485 }
3486
3487 if (policy->route_rules) {
3488 FREE(policy->route_rules, M_NECP);
3489 policy->route_rules = NULL;
3490 }
3491
3492 FREE_ZONE(policy, sizeof(*policy), M_NECP_SESSION_POLICY);
3493
3494 if (necp_debug) {
3495 NECPLOG0(LOG_DEBUG, "Removed NECP policy");
3496 }
3497 return (TRUE);
3498 }
3499
3500 static bool
3501 necp_policy_unapply(struct necp_session_policy *policy)
3502 {
3503 int i = 0;
3504 if (policy == NULL) {
3505 return (FALSE);
3506 }
3507
3508 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
3509
3510 // Release local uuid mappings
3511 if (!uuid_is_null(policy->applied_app_uuid)) {
3512 bool removed_mapping = FALSE;
3513 if (necp_remove_uuid_app_id_mapping(policy->applied_app_uuid, &removed_mapping, TRUE) && removed_mapping) {
3514 necp_uuid_app_id_mappings_dirty = TRUE;
3515 necp_num_uuid_app_id_mappings--;
3516 }
3517 uuid_clear(policy->applied_app_uuid);
3518 }
3519 if (!uuid_is_null(policy->applied_real_app_uuid)) {
3520 necp_remove_uuid_app_id_mapping(policy->applied_real_app_uuid, NULL, FALSE);
3521 uuid_clear(policy->applied_real_app_uuid);
3522 }
3523 if (!uuid_is_null(policy->applied_result_uuid)) {
3524 necp_remove_uuid_service_id_mapping(policy->applied_result_uuid);
3525 uuid_clear(policy->applied_result_uuid);
3526 }
3527
3528 // Release string mappings
3529 if (policy->applied_account != NULL) {
3530 necp_remove_string_to_id_mapping(&necp_account_id_list, policy->applied_account);
3531 FREE(policy->applied_account, M_NECP);
3532 policy->applied_account = NULL;
3533 }
3534
3535 // Release route rule
3536 if (policy->applied_route_rules_id != 0) {
3537 necp_remove_route_rule(&necp_route_rules, policy->applied_route_rules_id);
3538 policy->applied_route_rules_id = 0;
3539 }
3540
3541 // Remove socket policies
3542 for (i = 0; i < MAX_KERNEL_SOCKET_POLICIES; i++) {
3543 if (policy->kernel_socket_policies[i] != 0) {
3544 necp_kernel_socket_policy_delete(policy->kernel_socket_policies[i]);
3545 policy->kernel_socket_policies[i] = 0;
3546 }
3547 }
3548
3549 // Remove IP output policies
3550 for (i = 0; i < MAX_KERNEL_IP_OUTPUT_POLICIES; i++) {
3551 if (policy->kernel_ip_output_policies[i] != 0) {
3552 necp_kernel_ip_output_policy_delete(policy->kernel_ip_output_policies[i]);
3553 policy->kernel_ip_output_policies[i] = 0;
3554 }
3555 }
3556
3557 policy->applied = FALSE;
3558
3559 return (TRUE);
3560 }
3561
3562 #define NECP_KERNEL_POLICY_SUBORDER_ID_TUNNEL_CONDITION 0
3563 #define NECP_KERNEL_POLICY_SUBORDER_NON_ID_TUNNEL_CONDITION 1
3564 #define NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION 2
3565 #define NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS 3
3566 struct necp_policy_result_ip_tunnel {
3567 u_int32_t secondary_result;
3568 char interface_name[IFXNAMSIZ];
3569 } __attribute__((__packed__));
3570
3571 struct necp_policy_result_service {
3572 uuid_t identifier;
3573 u_int32_t data;
3574 } __attribute__((__packed__));
3575
3576 static bool
3577 necp_policy_apply(struct necp_session *session, struct necp_session_policy *policy)
3578 {
3579 bool socket_only_conditions = FALSE;
3580 bool socket_ip_conditions = FALSE;
3581
3582 bool socket_layer_non_id_conditions = FALSE;
3583 bool ip_output_layer_non_id_conditions = FALSE;
3584 bool ip_output_layer_non_id_only = FALSE;
3585 bool ip_output_layer_id_condition = FALSE;
3586 bool ip_output_layer_tunnel_condition_from_id = FALSE;
3587 bool ip_output_layer_tunnel_condition_from_non_id = FALSE;
3588 necp_kernel_policy_id cond_ip_output_layer_id = NECP_KERNEL_POLICY_ID_NONE;
3589
3590 u_int32_t master_condition_mask = 0;
3591 u_int32_t master_condition_negated_mask = 0;
3592 ifnet_t cond_bound_interface = NULL;
3593 u_int32_t cond_account_id = 0;
3594 char *cond_domain = NULL;
3595 char *cond_custom_entitlement = NULL;
3596 pid_t cond_pid = 0;
3597 uid_t cond_uid = 0;
3598 necp_app_id cond_app_id = 0;
3599 necp_app_id cond_real_app_id = 0;
3600 struct necp_policy_condition_tc_range cond_traffic_class;
3601 cond_traffic_class.start_tc = 0;
3602 cond_traffic_class.end_tc = 0;
3603 u_int16_t cond_protocol = 0;
3604 union necp_sockaddr_union cond_local_start;
3605 union necp_sockaddr_union cond_local_end;
3606 u_int8_t cond_local_prefix = 0;
3607 union necp_sockaddr_union cond_remote_start;
3608 union necp_sockaddr_union cond_remote_end;
3609 u_int8_t cond_remote_prefix = 0;
3610 u_int32_t offset = 0;
3611 u_int8_t ultimate_result = 0;
3612 u_int32_t secondary_result = 0;
3613 necp_kernel_policy_result_parameter secondary_result_parameter;
3614 memset(&secondary_result_parameter, 0, sizeof(secondary_result_parameter));
3615 u_int32_t cond_last_interface_index = 0;
3616 necp_kernel_policy_result_parameter ultimate_result_parameter;
3617 memset(&ultimate_result_parameter, 0, sizeof(ultimate_result_parameter));
3618
3619 if (policy == NULL) {
3620 return (FALSE);
3621 }
3622
3623 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
3624
3625 // Process conditions
3626 while (offset < policy->conditions_size) {
3627 u_int32_t length = 0;
3628 u_int8_t *value = necp_buffer_get_tlv_value(policy->conditions, offset, &length);
3629
3630 u_int8_t condition_type = necp_policy_condition_get_type_from_buffer(value, length);
3631 u_int8_t condition_flags = necp_policy_condition_get_flags_from_buffer(value, length);
3632 bool condition_is_negative = condition_flags & NECP_POLICY_CONDITION_FLAGS_NEGATIVE;
3633 u_int32_t condition_length = necp_policy_condition_get_value_length_from_buffer(value, length);
3634 u_int8_t *condition_value = necp_policy_condition_get_value_pointer_from_buffer(value, length);
3635 switch (condition_type) {
3636 case NECP_POLICY_CONDITION_DEFAULT: {
3637 socket_ip_conditions = TRUE;
3638 break;
3639 }
3640 case NECP_POLICY_CONDITION_ALL_INTERFACES: {
3641 master_condition_mask |= NECP_KERNEL_CONDITION_ALL_INTERFACES;
3642 socket_ip_conditions = TRUE;
3643 break;
3644 }
3645 case NECP_POLICY_CONDITION_ENTITLEMENT: {
3646 if (condition_length > 0) {
3647 if (cond_custom_entitlement == NULL) {
3648 cond_custom_entitlement = necp_copy_string((char *)condition_value, condition_length);
3649 if (cond_custom_entitlement != NULL) {
3650 master_condition_mask |= NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT;
3651 socket_only_conditions = TRUE;
3652 }
3653 }
3654 } else {
3655 master_condition_mask |= NECP_KERNEL_CONDITION_ENTITLEMENT;
3656 socket_only_conditions = TRUE;
3657 }
3658 break;
3659 }
3660 case NECP_POLICY_CONDITION_DOMAIN: {
3661 // Make sure there is only one such rule
3662 if (condition_length > 0 && cond_domain == NULL) {
3663 cond_domain = necp_create_trimmed_domain((char *)condition_value, condition_length);
3664 if (cond_domain != NULL) {
3665 master_condition_mask |= NECP_KERNEL_CONDITION_DOMAIN;
3666 if (condition_is_negative) {
3667 master_condition_negated_mask |= NECP_KERNEL_CONDITION_DOMAIN;
3668 }
3669 socket_only_conditions = TRUE;
3670 }
3671 }
3672 break;
3673 }
3674 case NECP_POLICY_CONDITION_ACCOUNT: {
3675 // Make sure there is only one such rule
3676 if (condition_length > 0 && cond_account_id == 0 && policy->applied_account == NULL) {
3677 char *string = NULL;
3678 MALLOC(string, char *, condition_length + 1, M_NECP, M_WAITOK);
3679 if (string != NULL) {
3680 memcpy(string, condition_value, condition_length);
3681 string[condition_length] = 0;
3682 cond_account_id = necp_create_string_to_id_mapping(&necp_account_id_list, string);
3683 if (cond_account_id != 0) {
3684 policy->applied_account = string; // Save the string in parent policy
3685 master_condition_mask |= NECP_KERNEL_CONDITION_ACCOUNT_ID;
3686 if (condition_is_negative) {
3687 master_condition_negated_mask |= NECP_KERNEL_CONDITION_ACCOUNT_ID;
3688 }
3689 socket_only_conditions = TRUE;
3690 } else {
3691 FREE(string, M_NECP);
3692 }
3693 }
3694 }
3695 break;
3696 }
3697 case NECP_POLICY_CONDITION_APPLICATION: {
3698 // Make sure there is only one such rule, because we save the uuid in the policy
3699 if (condition_length >= sizeof(uuid_t) && cond_app_id == 0) {
3700 bool allocated_mapping = FALSE;
3701 uuid_t application_uuid;
3702 memcpy(application_uuid, condition_value, sizeof(uuid_t));
3703 cond_app_id = necp_create_uuid_app_id_mapping(application_uuid, &allocated_mapping, TRUE);
3704 if (cond_app_id != 0) {
3705 if (allocated_mapping) {
3706 necp_uuid_app_id_mappings_dirty = TRUE;
3707 necp_num_uuid_app_id_mappings++;
3708 }
3709 uuid_copy(policy->applied_app_uuid, application_uuid);
3710 master_condition_mask |= NECP_KERNEL_CONDITION_APP_ID;
3711 if (condition_is_negative) {
3712 master_condition_negated_mask |= NECP_KERNEL_CONDITION_APP_ID;
3713 }
3714 socket_only_conditions = TRUE;
3715 }
3716 }
3717 break;
3718 }
3719 case NECP_POLICY_CONDITION_REAL_APPLICATION: {
3720 // Make sure there is only one such rule, because we save the uuid in the policy
3721 if (condition_length >= sizeof(uuid_t) && cond_real_app_id == 0) {
3722 uuid_t real_application_uuid;
3723 memcpy(real_application_uuid, condition_value, sizeof(uuid_t));
3724 cond_real_app_id = necp_create_uuid_app_id_mapping(real_application_uuid, NULL, FALSE);
3725 if (cond_real_app_id != 0) {
3726 uuid_copy(policy->applied_real_app_uuid, real_application_uuid);
3727 master_condition_mask |= NECP_KERNEL_CONDITION_REAL_APP_ID;
3728 if (condition_is_negative) {
3729 master_condition_negated_mask |= NECP_KERNEL_CONDITION_REAL_APP_ID;
3730 }
3731 socket_only_conditions = TRUE;
3732 }
3733 }
3734 break;
3735 }
3736 case NECP_POLICY_CONDITION_PID: {
3737 if (condition_length >= sizeof(pid_t)) {
3738 master_condition_mask |= NECP_KERNEL_CONDITION_PID;
3739 if (condition_is_negative) {
3740 master_condition_negated_mask |= NECP_KERNEL_CONDITION_PID;
3741 }
3742 memcpy(&cond_pid, condition_value, sizeof(cond_pid));
3743 socket_only_conditions = TRUE;
3744 }
3745 break;
3746 }
3747 case NECP_POLICY_CONDITION_UID: {
3748 if (condition_length >= sizeof(uid_t)) {
3749 master_condition_mask |= NECP_KERNEL_CONDITION_UID;
3750 if (condition_is_negative) {
3751 master_condition_negated_mask |= NECP_KERNEL_CONDITION_UID;
3752 }
3753 memcpy(&cond_uid, condition_value, sizeof(cond_uid));
3754 socket_only_conditions = TRUE;
3755 }
3756 break;
3757 }
3758 case NECP_POLICY_CONDITION_TRAFFIC_CLASS: {
3759 if (condition_length >= sizeof(struct necp_policy_condition_tc_range)) {
3760 master_condition_mask |= NECP_KERNEL_CONDITION_TRAFFIC_CLASS;
3761 if (condition_is_negative) {
3762 master_condition_negated_mask |= NECP_KERNEL_CONDITION_TRAFFIC_CLASS;
3763 }
3764 memcpy(&cond_traffic_class, condition_value, sizeof(cond_traffic_class));
3765 socket_only_conditions = TRUE;
3766 }
3767 break;
3768 }
3769 case NECP_POLICY_CONDITION_BOUND_INTERFACE: {
3770 if (condition_length <= IFXNAMSIZ && condition_length > 0) {
3771 char interface_name[IFXNAMSIZ];
3772 memcpy(interface_name, condition_value, condition_length);
3773 interface_name[condition_length - 1] = 0; // Make sure the string is NULL terminated
3774 if (ifnet_find_by_name(interface_name, &cond_bound_interface) == 0) {
3775 master_condition_mask |= NECP_KERNEL_CONDITION_BOUND_INTERFACE;
3776 if (condition_is_negative) {
3777 master_condition_negated_mask |= NECP_KERNEL_CONDITION_BOUND_INTERFACE;
3778 }
3779 }
3780 socket_ip_conditions = TRUE;
3781 }
3782 break;
3783 }
3784 case NECP_POLICY_CONDITION_IP_PROTOCOL: {
3785 if (condition_length >= sizeof(u_int16_t)) {
3786 master_condition_mask |= NECP_KERNEL_CONDITION_PROTOCOL;
3787 if (condition_is_negative) {
3788 master_condition_negated_mask |= NECP_KERNEL_CONDITION_PROTOCOL;
3789 }
3790 memcpy(&cond_protocol, condition_value, sizeof(cond_protocol));
3791 socket_ip_conditions = TRUE;
3792 }
3793 break;
3794 }
3795 case NECP_POLICY_CONDITION_LOCAL_ADDR: {
3796 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)condition_value;
3797 if (!necp_address_is_valid(&address_struct->address.sa)) {
3798 break;
3799 }
3800
3801 cond_local_prefix = address_struct->prefix;
3802 memcpy(&cond_local_start, &address_struct->address, sizeof(address_struct->address));
3803 master_condition_mask |= NECP_KERNEL_CONDITION_LOCAL_START;
3804 master_condition_mask |= NECP_KERNEL_CONDITION_LOCAL_PREFIX;
3805 if (condition_is_negative) {
3806 master_condition_negated_mask |= NECP_KERNEL_CONDITION_LOCAL_START;
3807 master_condition_negated_mask |= NECP_KERNEL_CONDITION_LOCAL_PREFIX;
3808 }
3809 socket_ip_conditions = TRUE;
3810 break;
3811 }
3812 case NECP_POLICY_CONDITION_REMOTE_ADDR: {
3813 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)condition_value;
3814 if (!necp_address_is_valid(&address_struct->address.sa)) {
3815 break;
3816 }
3817
3818 cond_remote_prefix = address_struct->prefix;
3819 memcpy(&cond_remote_start, &address_struct->address, sizeof(address_struct->address));
3820 master_condition_mask |= NECP_KERNEL_CONDITION_REMOTE_START;
3821 master_condition_mask |= NECP_KERNEL_CONDITION_REMOTE_PREFIX;
3822 if (condition_is_negative) {
3823 master_condition_negated_mask |= NECP_KERNEL_CONDITION_REMOTE_START;
3824 master_condition_negated_mask |= NECP_KERNEL_CONDITION_REMOTE_PREFIX;
3825 }
3826 socket_ip_conditions = TRUE;
3827 break;
3828 }
3829 case NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE: {
3830 struct necp_policy_condition_addr_range *address_struct = (struct necp_policy_condition_addr_range *)(void *)condition_value;
3831 if (!necp_address_is_valid(&address_struct->start_address.sa) ||
3832 !necp_address_is_valid(&address_struct->end_address.sa)) {
3833 break;
3834 }
3835
3836 memcpy(&cond_local_start, &address_struct->start_address, sizeof(address_struct->start_address));
3837 memcpy(&cond_local_end, &address_struct->end_address, sizeof(address_struct->end_address));
3838 master_condition_mask |= NECP_KERNEL_CONDITION_LOCAL_START;
3839 master_condition_mask |= NECP_KERNEL_CONDITION_LOCAL_END;
3840 if (condition_is_negative) {
3841 master_condition_negated_mask |= NECP_KERNEL_CONDITION_LOCAL_START;
3842 master_condition_negated_mask |= NECP_KERNEL_CONDITION_LOCAL_END;
3843 }
3844 socket_ip_conditions = TRUE;
3845 break;
3846 }
3847 case NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE: {
3848 struct necp_policy_condition_addr_range *address_struct = (struct necp_policy_condition_addr_range *)(void *)condition_value;
3849 if (!necp_address_is_valid(&address_struct->start_address.sa) ||
3850 !necp_address_is_valid(&address_struct->end_address.sa)) {
3851 break;
3852 }
3853
3854 memcpy(&cond_remote_start, &address_struct->start_address, sizeof(address_struct->start_address));
3855 memcpy(&cond_remote_end, &address_struct->end_address, sizeof(address_struct->end_address));
3856 master_condition_mask |= NECP_KERNEL_CONDITION_REMOTE_START;
3857 master_condition_mask |= NECP_KERNEL_CONDITION_REMOTE_END;
3858 if (condition_is_negative) {
3859 master_condition_negated_mask |= NECP_KERNEL_CONDITION_REMOTE_START;
3860 master_condition_negated_mask |= NECP_KERNEL_CONDITION_REMOTE_END;
3861 }
3862 socket_ip_conditions = TRUE;
3863 break;
3864 }
3865 default: {
3866 break;
3867 }
3868 }
3869
3870 offset += sizeof(u_int8_t) + sizeof(u_int32_t) + length;
3871 }
3872
3873 // Process result
3874 ultimate_result = necp_policy_get_result_type(policy);
3875 switch (ultimate_result) {
3876 case NECP_POLICY_RESULT_PASS: {
3877 if (socket_only_conditions) { // socket_ip_conditions can be TRUE or FALSE
3878 socket_layer_non_id_conditions = TRUE;
3879 ip_output_layer_id_condition = TRUE;
3880 } else if (socket_ip_conditions) {
3881 socket_layer_non_id_conditions = TRUE;
3882 ip_output_layer_id_condition = TRUE;
3883 ip_output_layer_non_id_conditions = TRUE;
3884 }
3885 break;
3886 }
3887 case NECP_POLICY_RESULT_DROP: {
3888 if (socket_only_conditions) { // socket_ip_conditions can be TRUE or FALSE
3889 socket_layer_non_id_conditions = TRUE;
3890 } else if (socket_ip_conditions) {
3891 socket_layer_non_id_conditions = TRUE;
3892 ip_output_layer_non_id_conditions = TRUE;
3893 ip_output_layer_non_id_only = TRUE; // Only apply drop to packets that didn't go through socket layer
3894 }
3895 break;
3896 }
3897 case NECP_POLICY_RESULT_SKIP: {
3898 u_int32_t skip_policy_order = 0;
3899 if (necp_policy_get_result_parameter(policy, (u_int8_t *)&skip_policy_order, sizeof(skip_policy_order))) {
3900 ultimate_result_parameter.skip_policy_order = skip_policy_order;
3901 }
3902
3903 if (socket_only_conditions) { // socket_ip_conditions can be TRUE or FALSE
3904 socket_layer_non_id_conditions = TRUE;
3905 ip_output_layer_id_condition = TRUE;
3906 } else if (socket_ip_conditions) {
3907 socket_layer_non_id_conditions = TRUE;
3908 ip_output_layer_non_id_conditions = TRUE;
3909 }
3910 break;
3911 }
3912 case NECP_POLICY_RESULT_SOCKET_DIVERT:
3913 case NECP_POLICY_RESULT_SOCKET_FILTER: {
3914 u_int32_t control_unit = 0;
3915 if (necp_policy_get_result_parameter(policy, (u_int8_t *)&control_unit, sizeof(control_unit))) {
3916 ultimate_result_parameter.flow_divert_control_unit = control_unit;
3917 }
3918 socket_layer_non_id_conditions = TRUE;
3919 break;
3920 }
3921 case NECP_POLICY_RESULT_IP_TUNNEL: {
3922 struct necp_policy_result_ip_tunnel tunnel_parameters;
3923 u_int32_t tunnel_parameters_length = necp_policy_get_result_parameter_length(policy);
3924 if (tunnel_parameters_length > sizeof(u_int32_t) &&
3925 tunnel_parameters_length <= sizeof(struct necp_policy_result_ip_tunnel) &&
3926 necp_policy_get_result_parameter(policy, (u_int8_t *)&tunnel_parameters, sizeof(tunnel_parameters))) {
3927 ifnet_t tunnel_interface = NULL;
3928 tunnel_parameters.interface_name[tunnel_parameters_length - sizeof(u_int32_t) - 1] = 0; // Make sure the string is NULL terminated
3929 if (ifnet_find_by_name(tunnel_parameters.interface_name, &tunnel_interface) == 0) {
3930 ultimate_result_parameter.tunnel_interface_index = tunnel_interface->if_index;
3931 ifnet_release(tunnel_interface);
3932 }
3933
3934 secondary_result = tunnel_parameters.secondary_result;
3935 if (secondary_result) {
3936 cond_last_interface_index = ultimate_result_parameter.tunnel_interface_index;
3937 }
3938 }
3939
3940 if (socket_only_conditions) { // socket_ip_conditions can be TRUE or FALSE
3941 socket_layer_non_id_conditions = TRUE;
3942 ip_output_layer_id_condition = TRUE;
3943 if (secondary_result) {
3944 ip_output_layer_tunnel_condition_from_id = TRUE;
3945 }
3946 } else if (socket_ip_conditions) {
3947 socket_layer_non_id_conditions = TRUE;
3948 ip_output_layer_id_condition = TRUE;
3949 ip_output_layer_non_id_conditions = TRUE;
3950 if (secondary_result) {
3951 ip_output_layer_tunnel_condition_from_id = TRUE;
3952 ip_output_layer_tunnel_condition_from_non_id = TRUE;
3953 }
3954 }
3955 break;
3956 }
3957 case NECP_POLICY_RESULT_TRIGGER:
3958 case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED:
3959 case NECP_POLICY_RESULT_TRIGGER_SCOPED:
3960 case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED: {
3961 struct necp_policy_result_service service_parameters;
3962 u_int32_t service_result_length = necp_policy_get_result_parameter_length(policy);
3963 bool has_extra_service_data = FALSE;
3964 if (service_result_length >= (sizeof(service_parameters))) {
3965 has_extra_service_data = TRUE;
3966 }
3967 if (necp_policy_get_result_parameter(policy, (u_int8_t *)&service_parameters, sizeof(service_parameters))) {
3968 ultimate_result_parameter.service.identifier = necp_create_uuid_service_id_mapping(service_parameters.identifier);
3969 if (ultimate_result_parameter.service.identifier != 0) {
3970 uuid_copy(policy->applied_result_uuid, service_parameters.identifier);
3971 socket_layer_non_id_conditions = TRUE;
3972 if (has_extra_service_data) {
3973 ultimate_result_parameter.service.data = service_parameters.data;
3974 } else {
3975 ultimate_result_parameter.service.data = 0;
3976 }
3977 }
3978 }
3979 break;
3980 }
3981 case NECP_POLICY_RESULT_USE_NETAGENT: {
3982 uuid_t netagent_uuid;
3983 if (necp_policy_get_result_parameter(policy, (u_int8_t *)&netagent_uuid, sizeof(netagent_uuid))) {
3984 ultimate_result_parameter.netagent_id = necp_create_uuid_service_id_mapping(netagent_uuid);
3985 if (ultimate_result_parameter.netagent_id != 0) {
3986 uuid_copy(policy->applied_result_uuid, netagent_uuid);
3987 socket_layer_non_id_conditions = TRUE;
3988 }
3989 }
3990 break;
3991 }
3992 case NECP_POLICY_RESULT_SOCKET_SCOPED: {
3993 u_int32_t interface_name_length = necp_policy_get_result_parameter_length(policy);
3994 if (interface_name_length <= IFXNAMSIZ && interface_name_length > 0) {
3995 char interface_name[IFXNAMSIZ];
3996 ifnet_t scope_interface = NULL;
3997 necp_policy_get_result_parameter(policy, (u_int8_t *)interface_name, interface_name_length);
3998 interface_name[interface_name_length - 1] = 0; // Make sure the string is NULL terminated
3999 if (ifnet_find_by_name(interface_name, &scope_interface) == 0) {
4000 ultimate_result_parameter.scoped_interface_index = scope_interface->if_index;
4001 socket_layer_non_id_conditions = TRUE;
4002 ifnet_release(scope_interface);
4003 }
4004 }
4005 break;
4006 }
4007 case NECP_POLICY_RESULT_ROUTE_RULES: {
4008 if (policy->route_rules != NULL && policy->route_rules_size > 0) {
4009 u_int32_t route_rule_id = necp_create_route_rule(&necp_route_rules, policy->route_rules, policy->route_rules_size);
4010 if (route_rule_id > 0) {
4011 policy->applied_route_rules_id = route_rule_id;
4012 ultimate_result_parameter.route_rule_id = route_rule_id;
4013 socket_layer_non_id_conditions = TRUE;
4014 }
4015 }
4016 break;
4017 }
4018 default: {
4019 break;
4020 }
4021 }
4022
4023 if (socket_layer_non_id_conditions) {
4024 necp_kernel_policy_id policy_id = necp_kernel_socket_policy_add(policy->id, policy->order, session->session_order, session->proc_pid, master_condition_mask, master_condition_negated_mask, cond_app_id, cond_real_app_id, cond_custom_entitlement, cond_account_id, cond_domain, cond_pid, cond_uid, cond_bound_interface, cond_traffic_class, cond_protocol, &cond_local_start, &cond_local_end, cond_local_prefix, &cond_remote_start, &cond_remote_end, cond_remote_prefix, ultimate_result, ultimate_result_parameter);
4025
4026 if (policy_id == 0) {
4027 NECPLOG0(LOG_DEBUG, "Error applying socket kernel policy");
4028 goto fail;
4029 }
4030
4031 cond_ip_output_layer_id = policy_id;
4032 policy->kernel_socket_policies[0] = policy_id;
4033 }
4034
4035 if (ip_output_layer_non_id_conditions) {
4036 u_int32_t condition_mask = master_condition_mask;
4037 if (ip_output_layer_non_id_only) {
4038 condition_mask |= NECP_KERNEL_CONDITION_POLICY_ID;
4039 }
4040 necp_kernel_policy_id policy_id = necp_kernel_ip_output_policy_add(policy->id, policy->order, NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS, session->session_order, session->proc_pid, condition_mask, master_condition_negated_mask, NECP_KERNEL_POLICY_ID_NONE, cond_bound_interface, 0, cond_protocol, &cond_local_start, &cond_local_end, cond_local_prefix, &cond_remote_start, &cond_remote_end, cond_remote_prefix, ultimate_result, ultimate_result_parameter);
4041
4042 if (policy_id == 0) {
4043 NECPLOG0(LOG_DEBUG, "Error applying IP output kernel policy");
4044 goto fail;
4045 }
4046
4047 policy->kernel_ip_output_policies[NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS] = policy_id;
4048 }
4049
4050 if (ip_output_layer_id_condition) {
4051 necp_kernel_policy_id policy_id = necp_kernel_ip_output_policy_add(policy->id, policy->order, NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION, session->session_order, session->proc_pid, NECP_KERNEL_CONDITION_POLICY_ID | NECP_KERNEL_CONDITION_ALL_INTERFACES, 0, cond_ip_output_layer_id, NULL, 0, 0, NULL, NULL, 0, NULL, NULL, 0, ultimate_result, ultimate_result_parameter);
4052
4053 if (policy_id == 0) {
4054 NECPLOG0(LOG_DEBUG, "Error applying IP output kernel policy");
4055 goto fail;
4056 }
4057
4058 policy->kernel_ip_output_policies[NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION] = policy_id;
4059 }
4060
4061 // Extra policies for IP Output tunnels for when packets loop back
4062 if (ip_output_layer_tunnel_condition_from_id) {
4063 necp_kernel_policy_id policy_id = necp_kernel_ip_output_policy_add(policy->id, policy->order, NECP_KERNEL_POLICY_SUBORDER_NON_ID_TUNNEL_CONDITION, session->session_order, session->proc_pid, NECP_KERNEL_CONDITION_POLICY_ID | NECP_KERNEL_CONDITION_LAST_INTERFACE | NECP_KERNEL_CONDITION_ALL_INTERFACES, 0, policy->kernel_ip_output_policies[NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS], NULL, cond_last_interface_index, 0, NULL, NULL, 0, NULL, NULL, 0, secondary_result, secondary_result_parameter);
4064
4065 if (policy_id == 0) {
4066 NECPLOG0(LOG_DEBUG, "Error applying IP output kernel policy");
4067 goto fail;
4068 }
4069
4070 policy->kernel_ip_output_policies[NECP_KERNEL_POLICY_SUBORDER_NON_ID_TUNNEL_CONDITION] = policy_id;
4071 }
4072
4073 if (ip_output_layer_tunnel_condition_from_id) {
4074 necp_kernel_policy_id policy_id = necp_kernel_ip_output_policy_add(policy->id, policy->order, NECP_KERNEL_POLICY_SUBORDER_ID_TUNNEL_CONDITION, session->session_order, session->proc_pid, NECP_KERNEL_CONDITION_POLICY_ID | NECP_KERNEL_CONDITION_LAST_INTERFACE | NECP_KERNEL_CONDITION_ALL_INTERFACES, 0, policy->kernel_ip_output_policies[NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION], NULL, cond_last_interface_index, 0, NULL, NULL, 0, NULL, NULL, 0, secondary_result, secondary_result_parameter);
4075
4076 if (policy_id == 0) {
4077 NECPLOG0(LOG_DEBUG, "Error applying IP output kernel policy");
4078 goto fail;
4079 }
4080
4081 policy->kernel_ip_output_policies[NECP_KERNEL_POLICY_SUBORDER_ID_TUNNEL_CONDITION] = policy_id;
4082 }
4083
4084 policy->applied = TRUE;
4085 policy->pending_update = FALSE;
4086 return (TRUE);
4087
4088 fail:
4089 return (FALSE);
4090 }
4091
4092 static void
4093 necp_policy_apply_all(struct necp_session *session)
4094 {
4095 struct necp_session_policy *policy = NULL;
4096 struct necp_session_policy *temp_policy = NULL;
4097 struct kev_necp_policies_changed_data kev_data;
4098 kev_data.changed_count = 0;
4099
4100 lck_rw_lock_exclusive(&necp_kernel_policy_lock);
4101
4102 // Remove exisiting applied policies
4103 if (session->dirty) {
4104 LIST_FOREACH_SAFE(policy, &session->policies, chain, temp_policy) {
4105 if (policy->pending_deletion) {
4106 if (policy->applied) {
4107 necp_policy_unapply(policy);
4108 }
4109 // Delete the policy
4110 necp_policy_delete(session, policy);
4111 } else if (!policy->applied) {
4112 necp_policy_apply(session, policy);
4113 } else if (policy->pending_update) {
4114 // Must have been applied, but needs an update. Remove and re-add.
4115 necp_policy_unapply(policy);
4116 necp_policy_apply(session, policy);
4117 }
4118 }
4119
4120 necp_kernel_socket_policies_update_uuid_table();
4121 necp_kernel_socket_policies_reprocess();
4122 necp_kernel_ip_output_policies_reprocess();
4123
4124 // Clear dirty bit flags
4125 session->dirty = FALSE;
4126 }
4127
4128 lck_rw_done(&necp_kernel_policy_lock);
4129
4130 necp_update_all_clients();
4131 necp_post_change_event(&kev_data);
4132
4133 if (necp_debug) {
4134 NECPLOG0(LOG_DEBUG, "Applied NECP policies");
4135 }
4136 }
4137
4138 // Kernel Policy Management
4139 // ---------------------
4140 // Kernel policies are derived from session policies
4141 static necp_kernel_policy_id
4142 necp_kernel_policy_get_new_id(bool socket_level)
4143 {
4144 static necp_kernel_policy_id necp_last_kernel_socket_policy_id = 0;
4145 static necp_kernel_policy_id necp_last_kernel_ip_policy_id = 0;
4146
4147 necp_kernel_policy_id newid = NECP_KERNEL_POLICY_ID_NONE;
4148
4149 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
4150
4151 if (socket_level) {
4152 bool wrapped = FALSE;
4153 do {
4154 necp_last_kernel_socket_policy_id++;
4155 if (necp_last_kernel_socket_policy_id < NECP_KERNEL_POLICY_ID_FIRST_VALID_SOCKET ||
4156 necp_last_kernel_socket_policy_id >= NECP_KERNEL_POLICY_ID_FIRST_VALID_IP) {
4157 if (wrapped) {
4158 // Already wrapped, give up
4159 NECPLOG0(LOG_ERR, "Failed to find a free socket kernel policy ID.\n");
4160 return (NECP_KERNEL_POLICY_ID_NONE);
4161 }
4162 necp_last_kernel_socket_policy_id = NECP_KERNEL_POLICY_ID_FIRST_VALID_SOCKET;
4163 wrapped = TRUE;
4164 }
4165 newid = necp_last_kernel_socket_policy_id;
4166 } while (necp_kernel_socket_policy_find(newid) != NULL); // If already used, keep trying
4167 } else {
4168 bool wrapped = FALSE;
4169 do {
4170 necp_last_kernel_ip_policy_id++;
4171 if (necp_last_kernel_ip_policy_id < NECP_KERNEL_POLICY_ID_FIRST_VALID_IP) {
4172 if (wrapped) {
4173 // Already wrapped, give up
4174 NECPLOG0(LOG_ERR, "Failed to find a free IP kernel policy ID.\n");
4175 return (NECP_KERNEL_POLICY_ID_NONE);
4176 }
4177 necp_last_kernel_ip_policy_id = NECP_KERNEL_POLICY_ID_FIRST_VALID_IP;
4178 wrapped = TRUE;
4179 }
4180 newid = necp_last_kernel_ip_policy_id;
4181 } while (necp_kernel_ip_output_policy_find(newid) != NULL); // If already used, keep trying
4182 }
4183
4184 if (newid == NECP_KERNEL_POLICY_ID_NONE) {
4185 NECPLOG0(LOG_ERR, "Allocate kernel policy id failed.\n");
4186 return (NECP_KERNEL_POLICY_ID_NONE);
4187 }
4188
4189 return (newid);
4190 }
4191
4192 #define NECP_KERNEL_VALID_SOCKET_CONDITIONS (NECP_KERNEL_CONDITION_APP_ID | NECP_KERNEL_CONDITION_REAL_APP_ID | NECP_KERNEL_CONDITION_DOMAIN | NECP_KERNEL_CONDITION_ACCOUNT_ID | NECP_KERNEL_CONDITION_PID | NECP_KERNEL_CONDITION_UID | NECP_KERNEL_CONDITION_ALL_INTERFACES | NECP_KERNEL_CONDITION_BOUND_INTERFACE | NECP_KERNEL_CONDITION_TRAFFIC_CLASS | NECP_KERNEL_CONDITION_PROTOCOL | NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_ENTITLEMENT | NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT)
4193 static necp_kernel_policy_id
4194 necp_kernel_socket_policy_add(necp_policy_id parent_policy_id, necp_policy_order order, u_int32_t session_order, int session_pid, u_int32_t condition_mask, u_int32_t condition_negated_mask, necp_app_id cond_app_id, necp_app_id cond_real_app_id, char *cond_custom_entitlement, u_int32_t cond_account_id, char *cond_domain, pid_t cond_pid, uid_t cond_uid, ifnet_t cond_bound_interface, struct necp_policy_condition_tc_range cond_traffic_class, u_int16_t cond_protocol, union necp_sockaddr_union *cond_local_start, union necp_sockaddr_union *cond_local_end, u_int8_t cond_local_prefix, union necp_sockaddr_union *cond_remote_start, union necp_sockaddr_union *cond_remote_end, u_int8_t cond_remote_prefix, necp_kernel_policy_result result, necp_kernel_policy_result_parameter result_parameter)
4195 {
4196 struct necp_kernel_socket_policy *new_kernel_policy = NULL;
4197 struct necp_kernel_socket_policy *tmp_kernel_policy = NULL;
4198
4199 MALLOC_ZONE(new_kernel_policy, struct necp_kernel_socket_policy *, sizeof(*new_kernel_policy), M_NECP_SOCKET_POLICY, M_WAITOK);
4200 if (new_kernel_policy == NULL) {
4201 goto done;
4202 }
4203
4204 memset(new_kernel_policy, 0, sizeof(*new_kernel_policy)); // M_ZERO is not supported for MALLOC_ZONE
4205 new_kernel_policy->parent_policy_id = parent_policy_id;
4206 new_kernel_policy->id = necp_kernel_policy_get_new_id(true);
4207 new_kernel_policy->order = order;
4208 new_kernel_policy->session_order = session_order;
4209 new_kernel_policy->session_pid = session_pid;
4210
4211 // Sanitize condition mask
4212 new_kernel_policy->condition_mask = (condition_mask & NECP_KERNEL_VALID_SOCKET_CONDITIONS);
4213 if ((new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_ALL_INTERFACES) && (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE)) {
4214 new_kernel_policy->condition_mask &= ~NECP_KERNEL_CONDITION_BOUND_INTERFACE;
4215 }
4216 if ((new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REAL_APP_ID) && !(new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_APP_ID)) {
4217 new_kernel_policy->condition_mask &= ~NECP_KERNEL_CONDITION_REAL_APP_ID;
4218 }
4219 if ((new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_ENTITLEMENT) && !(new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_APP_ID)) {
4220 new_kernel_policy->condition_mask &= ~NECP_KERNEL_CONDITION_ENTITLEMENT;
4221 }
4222 if ((new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_END) && (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX)) {
4223 new_kernel_policy->condition_mask &= ~NECP_KERNEL_CONDITION_LOCAL_PREFIX;
4224 }
4225 if ((new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_END) && (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX)) {
4226 new_kernel_policy->condition_mask &= ~NECP_KERNEL_CONDITION_REMOTE_PREFIX;
4227 }
4228 new_kernel_policy->condition_negated_mask = condition_negated_mask & new_kernel_policy->condition_mask;
4229
4230 // Set condition values
4231 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_APP_ID) {
4232 new_kernel_policy->cond_app_id = cond_app_id;
4233 }
4234 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REAL_APP_ID) {
4235 new_kernel_policy->cond_real_app_id = cond_real_app_id;
4236 }
4237 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT) {
4238 new_kernel_policy->cond_custom_entitlement = cond_custom_entitlement;
4239 new_kernel_policy->cond_custom_entitlement_matched = necp_boolean_state_unknown;
4240 }
4241 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_ACCOUNT_ID) {
4242 new_kernel_policy->cond_account_id = cond_account_id;
4243 }
4244 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_DOMAIN) {
4245 new_kernel_policy->cond_domain = cond_domain;
4246 new_kernel_policy->cond_domain_dot_count = necp_count_dots(cond_domain, strlen(cond_domain));
4247 }
4248 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_PID) {
4249 new_kernel_policy->cond_pid = cond_pid;
4250 }
4251 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_UID) {
4252 new_kernel_policy->cond_uid = cond_uid;
4253 }
4254 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE) {
4255 if (cond_bound_interface) {
4256 ifnet_reference(cond_bound_interface);
4257 }
4258 new_kernel_policy->cond_bound_interface = cond_bound_interface;
4259 }
4260 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_TRAFFIC_CLASS) {
4261 new_kernel_policy->cond_traffic_class = cond_traffic_class;
4262 }
4263 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_PROTOCOL) {
4264 new_kernel_policy->cond_protocol = cond_protocol;
4265 }
4266 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_START) {
4267 memcpy(&new_kernel_policy->cond_local_start, cond_local_start, cond_local_start->sa.sa_len);
4268 }
4269 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_END) {
4270 memcpy(&new_kernel_policy->cond_local_end, cond_local_end, cond_local_end->sa.sa_len);
4271 }
4272 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX) {
4273 new_kernel_policy->cond_local_prefix = cond_local_prefix;
4274 }
4275 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_START) {
4276 memcpy(&new_kernel_policy->cond_remote_start, cond_remote_start, cond_remote_start->sa.sa_len);
4277 }
4278 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_END) {
4279 memcpy(&new_kernel_policy->cond_remote_end, cond_remote_end, cond_remote_end->sa.sa_len);
4280 }
4281 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX) {
4282 new_kernel_policy->cond_remote_prefix = cond_remote_prefix;
4283 }
4284
4285 new_kernel_policy->result = result;
4286 memcpy(&new_kernel_policy->result_parameter, &result_parameter, sizeof(result_parameter));
4287
4288 if (necp_debug) {
4289 NECPLOG(LOG_DEBUG, "Added kernel policy: socket, id=%d, mask=%x\n", new_kernel_policy->id, new_kernel_policy->condition_mask);
4290 }
4291 LIST_INSERT_SORTED_TWICE_ASCENDING(&necp_kernel_socket_policies, new_kernel_policy, chain, session_order, order, tmp_kernel_policy);
4292 done:
4293 return (new_kernel_policy ? new_kernel_policy->id : 0);
4294 }
4295
4296 static struct necp_kernel_socket_policy *
4297 necp_kernel_socket_policy_find(necp_kernel_policy_id policy_id)
4298 {
4299 struct necp_kernel_socket_policy *kernel_policy = NULL;
4300 struct necp_kernel_socket_policy *tmp_kernel_policy = NULL;
4301
4302 if (policy_id == 0) {
4303 return (NULL);
4304 }
4305
4306 LIST_FOREACH_SAFE(kernel_policy, &necp_kernel_socket_policies, chain, tmp_kernel_policy) {
4307 if (kernel_policy->id == policy_id) {
4308 return (kernel_policy);
4309 }
4310 }
4311
4312 return (NULL);
4313 }
4314
4315 static bool
4316 necp_kernel_socket_policy_delete(necp_kernel_policy_id policy_id)
4317 {
4318 struct necp_kernel_socket_policy *policy = NULL;
4319
4320 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
4321
4322 policy = necp_kernel_socket_policy_find(policy_id);
4323 if (policy) {
4324 LIST_REMOVE(policy, chain);
4325
4326 if (policy->cond_bound_interface) {
4327 ifnet_release(policy->cond_bound_interface);
4328 policy->cond_bound_interface = NULL;
4329 }
4330
4331 if (policy->cond_domain) {
4332 FREE(policy->cond_domain, M_NECP);
4333 policy->cond_domain = NULL;
4334 }
4335
4336 if (policy->cond_custom_entitlement) {
4337 FREE(policy->cond_custom_entitlement, M_NECP);
4338 policy->cond_custom_entitlement = NULL;
4339 }
4340
4341 FREE_ZONE(policy, sizeof(*policy), M_NECP_SOCKET_POLICY);
4342 return (TRUE);
4343 }
4344
4345 return (FALSE);
4346 }
4347
4348 static inline const char *
4349 necp_get_result_description(char *result_string, necp_kernel_policy_result result, necp_kernel_policy_result_parameter result_parameter)
4350 {
4351 uuid_string_t uuid_string;
4352 switch (result) {
4353 case NECP_KERNEL_POLICY_RESULT_NONE: {
4354 snprintf(result_string, MAX_RESULT_STRING_LEN, "None");
4355 break;
4356 }
4357 case NECP_KERNEL_POLICY_RESULT_PASS: {
4358 snprintf(result_string, MAX_RESULT_STRING_LEN, "Pass");
4359 break;
4360 }
4361 case NECP_KERNEL_POLICY_RESULT_SKIP: {
4362 snprintf(result_string, MAX_RESULT_STRING_LEN, "Skip (%u)", result_parameter.skip_policy_order);
4363 break;
4364 }
4365 case NECP_KERNEL_POLICY_RESULT_DROP: {
4366 snprintf(result_string, MAX_RESULT_STRING_LEN, "Drop");
4367 break;
4368 }
4369 case NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT: {
4370 snprintf(result_string, MAX_RESULT_STRING_LEN, "SocketDivert (%d)", result_parameter.flow_divert_control_unit);
4371 break;
4372 }
4373 case NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER: {
4374 snprintf(result_string, MAX_RESULT_STRING_LEN, "SocketFilter (%d)", result_parameter.filter_control_unit);
4375 break;
4376 }
4377 case NECP_KERNEL_POLICY_RESULT_IP_TUNNEL: {
4378 ifnet_t interface = ifindex2ifnet[result_parameter.tunnel_interface_index];
4379 snprintf(result_string, MAX_RESULT_STRING_LEN, "IPTunnel (%s%d)", ifnet_name(interface), ifnet_unit(interface));
4380 break;
4381 }
4382 case NECP_KERNEL_POLICY_RESULT_IP_FILTER: {
4383 snprintf(result_string, MAX_RESULT_STRING_LEN, "IPFilter");
4384 break;
4385 }
4386 case NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED: {
4387 ifnet_t interface = ifindex2ifnet[result_parameter.scoped_interface_index];
4388 snprintf(result_string, MAX_RESULT_STRING_LEN, "SocketScoped (%s%d)", ifnet_name(interface), ifnet_unit(interface));
4389 break;
4390 }
4391 case NECP_KERNEL_POLICY_RESULT_ROUTE_RULES: {
4392 int index = 0;
4393 char interface_names[IFXNAMSIZ][MAX_ROUTE_RULE_INTERFACES];
4394 struct necp_route_rule *route_rule = necp_lookup_route_rule_locked(&necp_route_rules, result_parameter.route_rule_id);
4395 if (route_rule != NULL) {
4396 for (index = 0; index < MAX_ROUTE_RULE_INTERFACES; index++) {
4397 if (route_rule->exception_if_indices[index] != 0) {
4398 ifnet_t interface = ifindex2ifnet[route_rule->exception_if_indices[index]];
4399 snprintf(interface_names[index], IFXNAMSIZ, "%s%d", ifnet_name(interface), ifnet_unit(interface));
4400 } else {
4401 memset(interface_names[index], 0, IFXNAMSIZ);
4402 }
4403 }
4404 switch (route_rule->default_action) {
4405 case NECP_ROUTE_RULE_DENY_INTERFACE:
4406 snprintf(result_string, MAX_RESULT_STRING_LEN, "RouteRules (Only %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
4407 (route_rule->cellular_action == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? "Cell " : "",
4408 (route_rule->wifi_action == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? "WiFi " : "",
4409 (route_rule->wired_action == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? "Wired " : "",
4410 (route_rule->expensive_action == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? "Exp " : "",
4411 (route_rule->exception_if_actions[0] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[0] : "",
4412 (route_rule->exception_if_actions[0] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "",
4413 (route_rule->exception_if_actions[1] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[1] : "",
4414 (route_rule->exception_if_actions[1] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "",
4415 (route_rule->exception_if_actions[2] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[2] : "",
4416 (route_rule->exception_if_actions[2] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "",
4417 (route_rule->exception_if_actions[3] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[3] : "",
4418 (route_rule->exception_if_actions[3] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "",
4419 (route_rule->exception_if_actions[4] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[4] : "",
4420 (route_rule->exception_if_actions[4] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "",
4421 (route_rule->exception_if_actions[5] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[5] : "",
4422 (route_rule->exception_if_actions[5] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "",
4423 (route_rule->exception_if_actions[6] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[6] : "",
4424 (route_rule->exception_if_actions[6] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "",
4425 (route_rule->exception_if_actions[7] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[7] : "",
4426 (route_rule->exception_if_actions[7] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "",
4427 (route_rule->exception_if_actions[8] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[8] : "",
4428 (route_rule->exception_if_actions[8] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "",
4429 (route_rule->exception_if_actions[9] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[9] : "");
4430 break;
4431 case NECP_ROUTE_RULE_ALLOW_INTERFACE:
4432 snprintf(result_string, MAX_RESULT_STRING_LEN, "RouteRules (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
4433 (route_rule->cellular_action == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!Cell " : "",
4434 (route_rule->wifi_action == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!WiFi " : "",
4435 (route_rule->wired_action == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!Wired " : "",
4436 (route_rule->expensive_action == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!Exp " : "",
4437 (route_rule->exception_if_actions[0] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "",
4438 (route_rule->exception_if_actions[0] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[0] : "",
4439 (route_rule->exception_if_actions[1] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "",
4440 (route_rule->exception_if_actions[1] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[1] : "",
4441 (route_rule->exception_if_actions[2] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "",
4442 (route_rule->exception_if_actions[2] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[2] : "",
4443 (route_rule->exception_if_actions[3] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "",
4444 (route_rule->exception_if_actions[3] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[3] : "",
4445 (route_rule->exception_if_actions[4] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "",
4446 (route_rule->exception_if_actions[4] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[4] : "",
4447 (route_rule->exception_if_actions[5] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "",
4448 (route_rule->exception_if_actions[5] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[5] : "",
4449 (route_rule->exception_if_actions[6] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "",
4450 (route_rule->exception_if_actions[6] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[6] : "",
4451 (route_rule->exception_if_actions[7] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "",
4452 (route_rule->exception_if_actions[7] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[7] : "",
4453 (route_rule->exception_if_actions[8] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "",
4454 (route_rule->exception_if_actions[8] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[8] : "",
4455 (route_rule->exception_if_actions[9] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "",
4456 (route_rule->exception_if_actions[9] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[9] : "");
4457 break;
4458 case NECP_ROUTE_RULE_QOS_MARKING:
4459 snprintf(result_string, MAX_RESULT_STRING_LEN, "RouteRules (QoSMarking %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
4460 (route_rule->cellular_action == NECP_ROUTE_RULE_QOS_MARKING) ? "Cell " : "",
4461 (route_rule->wifi_action == NECP_ROUTE_RULE_QOS_MARKING) ? "WiFi " : "",
4462 (route_rule->wired_action == NECP_ROUTE_RULE_QOS_MARKING) ? "Wired " : "",
4463 (route_rule->expensive_action == NECP_ROUTE_RULE_QOS_MARKING) ? "Exp " : "",
4464 (route_rule->exception_if_actions[0] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[0] : "",
4465 (route_rule->exception_if_actions[0] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "",
4466 (route_rule->exception_if_actions[1] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[1] : "",
4467 (route_rule->exception_if_actions[1] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "",
4468 (route_rule->exception_if_actions[2] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[2] : "",
4469 (route_rule->exception_if_actions[2] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "",
4470 (route_rule->exception_if_actions[3] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[3] : "",
4471 (route_rule->exception_if_actions[3] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "",
4472 (route_rule->exception_if_actions[4] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[4] : "",
4473 (route_rule->exception_if_actions[4] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "",
4474 (route_rule->exception_if_actions[5] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[5] : "",
4475 (route_rule->exception_if_actions[5] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "",
4476 (route_rule->exception_if_actions[6] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[6] : "",
4477 (route_rule->exception_if_actions[6] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "",
4478 (route_rule->exception_if_actions[7] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[7] : "",
4479 (route_rule->exception_if_actions[7] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "",
4480 (route_rule->exception_if_actions[8] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[8] : "",
4481 (route_rule->exception_if_actions[8] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "",
4482 (route_rule->exception_if_actions[9] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[9] : "");
4483 break;
4484 default:
4485 snprintf(result_string, MAX_RESULT_STRING_LEN, "RouteRules (Unknown)");
4486 break;
4487 }
4488 }
4489 break;
4490 }
4491 case NECP_KERNEL_POLICY_RESULT_USE_NETAGENT: {
4492 bool found_mapping = FALSE;
4493 struct necp_uuid_id_mapping *mapping = necp_uuid_lookup_uuid_with_service_id_locked(result_parameter.netagent_id);
4494 if (mapping != NULL) {
4495 uuid_unparse(mapping->uuid, uuid_string);
4496 found_mapping = TRUE;
4497 }
4498 snprintf(result_string, MAX_RESULT_STRING_LEN, "UseNetAgent (%s)", found_mapping ? uuid_string : "Unknown");
4499 break;
4500 }
4501 case NECP_POLICY_RESULT_TRIGGER: {
4502 bool found_mapping = FALSE;
4503 struct necp_uuid_id_mapping *mapping = necp_uuid_lookup_uuid_with_service_id_locked(result_parameter.service.identifier);
4504 if (mapping != NULL) {
4505 uuid_unparse(mapping->uuid, uuid_string);
4506 found_mapping = TRUE;
4507 }
4508 snprintf(result_string, MAX_RESULT_STRING_LEN, "Trigger (%s.%d)", found_mapping ? uuid_string : "Unknown", result_parameter.service.data);
4509 break;
4510 }
4511 case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED: {
4512 bool found_mapping = FALSE;
4513 struct necp_uuid_id_mapping *mapping = necp_uuid_lookup_uuid_with_service_id_locked(result_parameter.service.identifier);
4514 if (mapping != NULL) {
4515 uuid_unparse(mapping->uuid, uuid_string);
4516 found_mapping = TRUE;
4517 }
4518 snprintf(result_string, MAX_RESULT_STRING_LEN, "TriggerIfNeeded (%s.%d)", found_mapping ? uuid_string : "Unknown", result_parameter.service.data);
4519 break;
4520 }
4521 case NECP_POLICY_RESULT_TRIGGER_SCOPED: {
4522 bool found_mapping = FALSE;
4523 struct necp_uuid_id_mapping *mapping = necp_uuid_lookup_uuid_with_service_id_locked(result_parameter.service.identifier);
4524 if (mapping != NULL) {
4525 uuid_unparse(mapping->uuid, uuid_string);
4526 found_mapping = TRUE;
4527 }
4528 snprintf(result_string, MAX_RESULT_STRING_LEN, "TriggerScoped (%s.%d)", found_mapping ? uuid_string : "Unknown", result_parameter.service.data);
4529 break;
4530 }
4531 case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED: {
4532 bool found_mapping = FALSE;
4533 struct necp_uuid_id_mapping *mapping = necp_uuid_lookup_uuid_with_service_id_locked(result_parameter.service.identifier);
4534 if (mapping != NULL) {
4535 uuid_unparse(mapping->uuid, uuid_string);
4536 found_mapping = TRUE;
4537 }
4538 snprintf(result_string, MAX_RESULT_STRING_LEN, "NoTriggerScoped (%s.%d)", found_mapping ? uuid_string : "Unknown", result_parameter.service.data);
4539 break;
4540 }
4541 default: {
4542 snprintf(result_string, MAX_RESULT_STRING_LEN, "Unknown %d (%d)", result, result_parameter.tunnel_interface_index);
4543 break;
4544 }
4545 }
4546 return (result_string);
4547 }
4548
4549 static void
4550 necp_kernel_socket_policies_dump_all(void)
4551 {
4552 if (necp_debug) {
4553 struct necp_kernel_socket_policy *policy = NULL;
4554 int policy_i;
4555 int app_i;
4556 char result_string[MAX_RESULT_STRING_LEN];
4557 char proc_name_string[MAXCOMLEN + 1];
4558 memset(result_string, 0, MAX_RESULT_STRING_LEN);
4559 memset(proc_name_string, 0, MAXCOMLEN + 1);
4560
4561 NECPLOG0(LOG_DEBUG, "NECP Application Policies:\n");
4562 NECPLOG0(LOG_DEBUG, "-----------\n");
4563 for (policy_i = 0; necp_kernel_socket_policies_app_layer_map != NULL && necp_kernel_socket_policies_app_layer_map[policy_i] != NULL; policy_i++) {
4564 policy = necp_kernel_socket_policies_app_layer_map[policy_i];
4565 proc_name(policy->session_pid, proc_name_string, MAXCOMLEN);
4566 NECPLOG(LOG_DEBUG, "\t%3d. Policy ID: %5d\tProcess: %10.10s\tOrder: %04d.%04d\tMask: %5x\tResult: %s\n", policy_i, policy->id, proc_name_string, policy->session_order, policy->order, policy->condition_mask, necp_get_result_description(result_string, policy->result, policy->result_parameter));
4567 }
4568 if (necp_kernel_socket_policies_app_layer_map[0] != NULL) {
4569 NECPLOG0(LOG_DEBUG, "-----------\n");
4570 }
4571
4572 NECPLOG0(LOG_DEBUG, "NECP Socket Policies:\n");
4573 NECPLOG0(LOG_DEBUG, "-----------\n");
4574 for (app_i = 0; app_i < NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS; app_i++) {
4575 NECPLOG(LOG_DEBUG, "\tApp Bucket: %d\n", app_i);
4576 for (policy_i = 0; necp_kernel_socket_policies_map[app_i] != NULL && (necp_kernel_socket_policies_map[app_i])[policy_i] != NULL; policy_i++) {
4577 policy = (necp_kernel_socket_policies_map[app_i])[policy_i];
4578 proc_name(policy->session_pid, proc_name_string, MAXCOMLEN);
4579 NECPLOG(LOG_DEBUG, "\t%3d. Policy ID: %5d\tProcess: %10.10s\tOrder: %04d.%04d\tMask: %5x\tResult: %s\n", policy_i, policy->id, proc_name_string, policy->session_order, policy->order, policy->condition_mask, necp_get_result_description(result_string, policy->result, policy->result_parameter));
4580 }
4581 NECPLOG0(LOG_DEBUG, "-----------\n");
4582 }
4583 }
4584 }
4585
4586 static inline bool
4587 necp_kernel_socket_result_is_trigger_service_type(struct necp_kernel_socket_policy *kernel_policy)
4588 {
4589 return (kernel_policy->result >= NECP_KERNEL_POLICY_RESULT_TRIGGER && kernel_policy->result <= NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED);
4590 }
4591
4592 static inline bool
4593 necp_kernel_socket_policy_results_overlap(struct necp_kernel_socket_policy *upper_policy, struct necp_kernel_socket_policy *lower_policy)
4594 {
4595 if (upper_policy->result == NECP_KERNEL_POLICY_RESULT_DROP) {
4596 // Drop always cancels out lower policies
4597 return (TRUE);
4598 } else if (upper_policy->result == NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER ||
4599 upper_policy->result == NECP_KERNEL_POLICY_RESULT_ROUTE_RULES ||
4600 upper_policy->result == NECP_KERNEL_POLICY_RESULT_USE_NETAGENT) {
4601 // Filters and route rules never cancel out lower policies
4602 return (FALSE);
4603 } else if (necp_kernel_socket_result_is_trigger_service_type(upper_policy)) {
4604 // Trigger/Scoping policies can overlap one another, but not other results
4605 return (necp_kernel_socket_result_is_trigger_service_type(lower_policy));
4606 } else if (upper_policy->result == NECP_KERNEL_POLICY_RESULT_SKIP) {
4607 if (upper_policy->session_order != lower_policy->session_order) {
4608 // A skip cannot override a policy of a different session
4609 return (FALSE);
4610 } else {
4611 if (upper_policy->result_parameter.skip_policy_order == 0 ||
4612 lower_policy->order >= upper_policy->result_parameter.skip_policy_order) {
4613 // This policy is beyond the skip
4614 return (FALSE);
4615 } else {
4616 // This policy is inside the skip
4617 return (TRUE);
4618 }
4619 }
4620 }
4621
4622 // A hard pass, flow divert, tunnel, or scope will currently block out lower policies
4623 return (TRUE);
4624 }
4625
4626 static bool
4627 necp_kernel_socket_policy_is_unnecessary(struct necp_kernel_socket_policy *policy, struct necp_kernel_socket_policy **policy_array, int valid_indices)
4628 {
4629 bool can_skip = FALSE;
4630 u_int32_t highest_skip_session_order = 0;
4631 u_int32_t highest_skip_order = 0;
4632 int i;
4633 for (i = 0; i < valid_indices; i++) {
4634 struct necp_kernel_socket_policy *compared_policy = policy_array[i];
4635
4636 // For policies in a skip window, we can't mark conflicting policies as unnecessary
4637 if (can_skip) {
4638 if (highest_skip_session_order != compared_policy->session_order ||
4639 (highest_skip_order != 0 && compared_policy->order >= highest_skip_order)) {
4640 // If we've moved on to the next session, or passed the skip window
4641 highest_skip_session_order = 0;
4642 highest_skip_order = 0;
4643 can_skip = FALSE;
4644 } else {
4645 // If this policy is also a skip, in can increase the skip window
4646 if (compared_policy->result == NECP_KERNEL_POLICY_RESULT_SKIP) {
4647 if (compared_policy->result_parameter.skip_policy_order > highest_skip_order) {
4648 highest_skip_order = compared_policy->result_parameter.skip_policy_order;
4649 }
4650 }
4651 continue;
4652 }
4653 }
4654
4655 if (compared_policy->result == NECP_KERNEL_POLICY_RESULT_SKIP) {
4656 // This policy is a skip. Set the skip window accordingly
4657 can_skip = TRUE;
4658 highest_skip_session_order = compared_policy->session_order;
4659 highest_skip_order = compared_policy->result_parameter.skip_policy_order;
4660 }
4661
4662 // The result of the compared policy must be able to block out this policy result
4663 if (!necp_kernel_socket_policy_results_overlap(compared_policy, policy)) {
4664 continue;
4665 }
4666
4667 // If new policy matches All Interfaces, compared policy must also
4668 if ((policy->condition_mask & NECP_KERNEL_CONDITION_ALL_INTERFACES) && !(compared_policy->condition_mask & NECP_KERNEL_CONDITION_ALL_INTERFACES)) {
4669 continue;
4670 }
4671
4672 // Default makes lower policies unecessary always
4673 if (compared_policy->condition_mask == 0) {
4674 return (TRUE);
4675 }
4676
4677 // Compared must be more general than policy, and include only conditions within policy
4678 if ((policy->condition_mask & compared_policy->condition_mask) != compared_policy->condition_mask) {
4679 continue;
4680 }
4681
4682 // Negative conditions must match for the overlapping conditions
4683 if ((policy->condition_negated_mask & compared_policy->condition_mask) != (compared_policy->condition_negated_mask & compared_policy->condition_mask)) {
4684 continue;
4685 }
4686
4687 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_DOMAIN &&
4688 strcmp(compared_policy->cond_domain, policy->cond_domain) != 0) {
4689 continue;
4690 }
4691
4692 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT &&
4693 strcmp(compared_policy->cond_custom_entitlement, policy->cond_custom_entitlement) != 0) {
4694 continue;
4695 }
4696
4697 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_ACCOUNT_ID &&
4698 compared_policy->cond_account_id != policy->cond_account_id) {
4699 continue;
4700 }
4701
4702 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_POLICY_ID &&
4703 compared_policy->cond_policy_id != policy->cond_policy_id) {
4704 continue;
4705 }
4706
4707 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_APP_ID &&
4708 compared_policy->cond_app_id != policy->cond_app_id) {
4709 continue;
4710 }
4711
4712 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_REAL_APP_ID &&
4713 compared_policy->cond_real_app_id != policy->cond_real_app_id) {
4714 continue;
4715 }
4716
4717 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_PID &&
4718 compared_policy->cond_pid != policy->cond_pid) {
4719 continue;
4720 }
4721
4722 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_UID &&
4723 compared_policy->cond_uid != policy->cond_uid) {
4724 continue;
4725 }
4726
4727 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE &&
4728 compared_policy->cond_bound_interface != policy->cond_bound_interface) {
4729 continue;
4730 }
4731
4732 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_PROTOCOL &&
4733 compared_policy->cond_protocol != policy->cond_protocol) {
4734 continue;
4735 }
4736
4737 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_TRAFFIC_CLASS &&
4738 !(compared_policy->cond_traffic_class.start_tc <= policy->cond_traffic_class.start_tc &&
4739 compared_policy->cond_traffic_class.end_tc >= policy->cond_traffic_class.end_tc)) {
4740 continue;
4741 }
4742
4743 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_START) {
4744 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_END) {
4745 if (!necp_is_range_in_range((struct sockaddr *)&policy->cond_local_start, (struct sockaddr *)&policy->cond_local_end, (struct sockaddr *)&compared_policy->cond_local_start, (struct sockaddr *)&compared_policy->cond_local_end)) {
4746 continue;
4747 }
4748 } else if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX) {
4749 if (compared_policy->cond_local_prefix > policy->cond_local_prefix ||
4750 !necp_is_addr_in_subnet((struct sockaddr *)&policy->cond_local_start, (struct sockaddr *)&compared_policy->cond_local_start, compared_policy->cond_local_prefix)) {
4751 continue;
4752 }
4753 }
4754 }
4755
4756 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_START) {
4757 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_END) {
4758 if (!necp_is_range_in_range((struct sockaddr *)&policy->cond_remote_start, (struct sockaddr *)&policy->cond_remote_end, (struct sockaddr *)&compared_policy->cond_remote_start, (struct sockaddr *)&compared_policy->cond_remote_end)) {
4759 continue;
4760 }
4761 } else if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX) {
4762 if (compared_policy->cond_remote_prefix > policy->cond_remote_prefix ||
4763 !necp_is_addr_in_subnet((struct sockaddr *)&policy->cond_remote_start, (struct sockaddr *)&compared_policy->cond_remote_start, compared_policy->cond_remote_prefix)) {
4764 continue;
4765 }
4766 }
4767 }
4768
4769 return (TRUE);
4770 }
4771
4772 return (FALSE);
4773 }
4774
4775 static bool
4776 necp_kernel_socket_policies_reprocess(void)
4777 {
4778 int app_i;
4779 int bucket_allocation_counts[NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS];
4780 int bucket_current_free_index[NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS];
4781 int app_layer_allocation_count = 0;
4782 int app_layer_current_free_index = 0;
4783 struct necp_kernel_socket_policy *kernel_policy = NULL;
4784
4785 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
4786
4787 // Reset mask to 0
4788 necp_kernel_application_policies_condition_mask = 0;
4789 necp_kernel_socket_policies_condition_mask = 0;
4790 necp_kernel_application_policies_count = 0;
4791 necp_kernel_socket_policies_count = 0;
4792 necp_kernel_socket_policies_non_app_count = 0;
4793
4794 // Reset all maps to NULL
4795 for (app_i = 0; app_i < NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS; app_i++) {
4796 if (necp_kernel_socket_policies_map[app_i] != NULL) {
4797 FREE(necp_kernel_socket_policies_map[app_i], M_NECP);
4798 necp_kernel_socket_policies_map[app_i] = NULL;
4799 }
4800
4801 // Init counts
4802 bucket_allocation_counts[app_i] = 0;
4803 }
4804 if (necp_kernel_socket_policies_app_layer_map != NULL) {
4805 FREE(necp_kernel_socket_policies_app_layer_map, M_NECP);
4806 necp_kernel_socket_policies_app_layer_map = NULL;
4807 }
4808
4809 // Create masks and counts
4810 LIST_FOREACH(kernel_policy, &necp_kernel_socket_policies, chain) {
4811 // App layer mask/count
4812 necp_kernel_application_policies_condition_mask |= kernel_policy->condition_mask;
4813 necp_kernel_application_policies_count++;
4814 app_layer_allocation_count++;
4815
4816 // Update socket layer bucket mask/counts
4817 necp_kernel_socket_policies_condition_mask |= kernel_policy->condition_mask;
4818 necp_kernel_socket_policies_count++;
4819
4820 if (!(kernel_policy->condition_mask & NECP_KERNEL_CONDITION_APP_ID) ||
4821 kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_APP_ID) {
4822 necp_kernel_socket_policies_non_app_count++;
4823 for (app_i = 0; app_i < NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS; app_i++) {
4824 bucket_allocation_counts[app_i]++;
4825 }
4826 } else {
4827 bucket_allocation_counts[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(kernel_policy->cond_app_id)]++;
4828 }
4829 }
4830
4831 // Allocate maps
4832 for (app_i = 0; app_i < NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS; app_i++) {
4833 if (bucket_allocation_counts[app_i] > 0) {
4834 // Allocate a NULL-terminated array of policy pointers for each bucket
4835 MALLOC(necp_kernel_socket_policies_map[app_i], struct necp_kernel_socket_policy **, sizeof(struct necp_kernel_socket_policy *) * (bucket_allocation_counts[app_i] + 1), M_NECP, M_WAITOK);
4836 if (necp_kernel_socket_policies_map[app_i] == NULL) {
4837 goto fail;
4838 }
4839
4840 // Initialize the first entry to NULL
4841 (necp_kernel_socket_policies_map[app_i])[0] = NULL;
4842 }
4843 bucket_current_free_index[app_i] = 0;
4844 }
4845 MALLOC(necp_kernel_socket_policies_app_layer_map, struct necp_kernel_socket_policy **, sizeof(struct necp_kernel_socket_policy *) * (app_layer_allocation_count + 1), M_NECP, M_WAITOK);
4846 if (necp_kernel_socket_policies_app_layer_map == NULL) {
4847 goto fail;
4848 }
4849 necp_kernel_socket_policies_app_layer_map[0] = NULL;
4850
4851 // Fill out maps
4852 LIST_FOREACH(kernel_policy, &necp_kernel_socket_policies, chain) {
4853 // Insert pointers into map
4854 if (!(kernel_policy->condition_mask & NECP_KERNEL_CONDITION_APP_ID) ||
4855 kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_APP_ID) {
4856 for (app_i = 0; app_i < NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS; app_i++) {
4857 if (!necp_kernel_socket_policy_is_unnecessary(kernel_policy, necp_kernel_socket_policies_map[app_i], bucket_current_free_index[app_i])) {
4858 (necp_kernel_socket_policies_map[app_i])[(bucket_current_free_index[app_i])] = kernel_policy;
4859 bucket_current_free_index[app_i]++;
4860 (necp_kernel_socket_policies_map[app_i])[(bucket_current_free_index[app_i])] = NULL;
4861 }
4862 }
4863 } else {
4864 app_i = NECP_SOCKET_MAP_APP_ID_TO_BUCKET(kernel_policy->cond_app_id);
4865 if (!necp_kernel_socket_policy_is_unnecessary(kernel_policy, necp_kernel_socket_policies_map[app_i], bucket_current_free_index[app_i])) {
4866 (necp_kernel_socket_policies_map[app_i])[(bucket_current_free_index[app_i])] = kernel_policy;
4867 bucket_current_free_index[app_i]++;
4868 (necp_kernel_socket_policies_map[app_i])[(bucket_current_free_index[app_i])] = NULL;
4869 }
4870 }
4871
4872 if (!necp_kernel_socket_policy_is_unnecessary(kernel_policy, necp_kernel_socket_policies_app_layer_map, app_layer_current_free_index)) {
4873 necp_kernel_socket_policies_app_layer_map[app_layer_current_free_index] = kernel_policy;
4874 app_layer_current_free_index++;
4875 necp_kernel_socket_policies_app_layer_map[app_layer_current_free_index] = NULL;
4876 }
4877 }
4878 necp_kernel_socket_policies_dump_all();
4879 BUMP_KERNEL_SOCKET_POLICIES_GENERATION_COUNT();
4880 return (TRUE);
4881
4882 fail:
4883 // Free memory, reset masks to 0
4884 necp_kernel_application_policies_condition_mask = 0;
4885 necp_kernel_socket_policies_condition_mask = 0;
4886 necp_kernel_application_policies_count = 0;
4887 necp_kernel_socket_policies_count = 0;
4888 necp_kernel_socket_policies_non_app_count = 0;
4889 for (app_i = 0; app_i < NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS; app_i++) {
4890 if (necp_kernel_socket_policies_map[app_i] != NULL) {
4891 FREE(necp_kernel_socket_policies_map[app_i], M_NECP);
4892 necp_kernel_socket_policies_map[app_i] = NULL;
4893 }
4894 }
4895 if (necp_kernel_socket_policies_app_layer_map != NULL) {
4896 FREE(necp_kernel_socket_policies_app_layer_map, M_NECP);
4897 necp_kernel_socket_policies_app_layer_map = NULL;
4898 }
4899 return (FALSE);
4900 }
4901
4902 static u_int32_t
4903 necp_get_new_string_id(void)
4904 {
4905 u_int32_t newid = 0;
4906
4907 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
4908
4909 necp_last_string_id++;
4910 if (necp_last_string_id < 1) {
4911 necp_last_string_id = 1;
4912 }
4913
4914 newid = necp_last_string_id;
4915 if (newid == 0) {
4916 NECPLOG0(LOG_DEBUG, "Allocate string id failed.\n");
4917 return (0);
4918 }
4919
4920 return (newid);
4921 }
4922
4923 static struct necp_string_id_mapping *
4924 necp_lookup_string_to_id_locked(struct necp_string_id_mapping_list *list, char *string)
4925 {
4926 struct necp_string_id_mapping *searchentry = NULL;
4927 struct necp_string_id_mapping *foundentry = NULL;
4928
4929 LIST_FOREACH(searchentry, list, chain) {
4930 if (strcmp(searchentry->string, string) == 0) {
4931 foundentry = searchentry;
4932 break;
4933 }
4934 }
4935
4936 return (foundentry);
4937 }
4938
4939 static struct necp_string_id_mapping *
4940 necp_lookup_string_with_id_locked(struct necp_string_id_mapping_list *list, u_int32_t local_id)
4941 {
4942 struct necp_string_id_mapping *searchentry = NULL;
4943 struct necp_string_id_mapping *foundentry = NULL;
4944
4945 LIST_FOREACH(searchentry, list, chain) {
4946 if (searchentry->id == local_id) {
4947 foundentry = searchentry;
4948 break;
4949 }
4950 }
4951
4952 return (foundentry);
4953 }
4954
4955 static u_int32_t
4956 necp_create_string_to_id_mapping(struct necp_string_id_mapping_list *list, char *string)
4957 {
4958 u_int32_t string_id = 0;
4959 struct necp_string_id_mapping *existing_mapping = NULL;
4960
4961 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
4962
4963 existing_mapping = necp_lookup_string_to_id_locked(list, string);
4964 if (existing_mapping != NULL) {
4965 string_id = existing_mapping->id;
4966 existing_mapping->refcount++;
4967 } else {
4968 struct necp_string_id_mapping *new_mapping = NULL;
4969 MALLOC(new_mapping, struct necp_string_id_mapping *, sizeof(struct necp_string_id_mapping), M_NECP, M_WAITOK);
4970 if (new_mapping != NULL) {
4971 memset(new_mapping, 0, sizeof(struct necp_string_id_mapping));
4972
4973 size_t length = strlen(string) + 1;
4974 MALLOC(new_mapping->string, char *, length, M_NECP, M_WAITOK);
4975 if (new_mapping->string != NULL) {
4976 memcpy(new_mapping->string, string, length);
4977 new_mapping->id = necp_get_new_string_id();
4978 new_mapping->refcount = 1;
4979 LIST_INSERT_HEAD(list, new_mapping, chain);
4980 string_id = new_mapping->id;
4981 } else {
4982 FREE(new_mapping, M_NECP);
4983 new_mapping = NULL;
4984 }
4985 }
4986 }
4987 return (string_id);
4988 }
4989
4990 static bool
4991 necp_remove_string_to_id_mapping(struct necp_string_id_mapping_list *list, char *string)
4992 {
4993 struct necp_string_id_mapping *existing_mapping = NULL;
4994
4995 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
4996
4997 existing_mapping = necp_lookup_string_to_id_locked(list, string);
4998 if (existing_mapping != NULL) {
4999 if (--existing_mapping->refcount == 0) {
5000 LIST_REMOVE(existing_mapping, chain);
5001 FREE(existing_mapping->string, M_NECP);
5002 FREE(existing_mapping, M_NECP);
5003 }
5004 return (TRUE);
5005 }
5006
5007 return (FALSE);
5008 }
5009
5010 static u_int32_t
5011 necp_get_new_route_rule_id(void)
5012 {
5013 u_int32_t newid = 0;
5014
5015 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
5016
5017 necp_last_route_rule_id++;
5018 if (necp_last_route_rule_id < 1 || necp_last_route_rule_id > UINT16_MAX) {
5019 necp_last_route_rule_id = 1;
5020 }
5021
5022 newid = necp_last_route_rule_id;
5023 if (newid == 0) {
5024 NECPLOG0(LOG_DEBUG, "Allocate route rule id failed.\n");
5025 return (0);
5026 }
5027
5028 return (newid);
5029 }
5030
5031 static u_int32_t
5032 necp_get_new_aggregate_route_rule_id(void)
5033 {
5034 u_int32_t newid = 0;
5035
5036 LCK_RW_ASSERT(&necp_route_rule_lock, LCK_RW_ASSERT_EXCLUSIVE);
5037
5038 necp_last_aggregate_route_rule_id++;
5039 if (necp_last_aggregate_route_rule_id <= UINT16_MAX) {
5040 necp_last_aggregate_route_rule_id = UINT16_MAX + 1;
5041 }
5042
5043 newid = necp_last_aggregate_route_rule_id;
5044 if (newid == 0) {
5045 NECPLOG0(LOG_DEBUG, "Allocate aggregate route rule id failed.\n");
5046 return (0);
5047 }
5048
5049 return (newid);
5050 }
5051
5052 static struct necp_route_rule *
5053 necp_lookup_route_rule_locked(struct necp_route_rule_list *list, u_int32_t route_rule_id)
5054 {
5055 struct necp_route_rule *searchentry = NULL;
5056 struct necp_route_rule *foundentry = NULL;
5057
5058 LIST_FOREACH(searchentry, list, chain) {
5059 if (searchentry->id == route_rule_id) {
5060 foundentry = searchentry;
5061 break;
5062 }
5063 }
5064
5065 return (foundentry);
5066 }
5067
5068 static struct necp_route_rule *
5069 necp_lookup_route_rule_by_contents_locked(struct necp_route_rule_list *list, u_int32_t default_action, u_int8_t cellular_action, u_int8_t wifi_action, u_int8_t wired_action, u_int8_t expensive_action, u_int32_t *if_indices, u_int8_t *if_actions)
5070 {
5071 struct necp_route_rule *searchentry = NULL;
5072 struct necp_route_rule *foundentry = NULL;
5073
5074 LIST_FOREACH(searchentry, list, chain) {
5075 if (searchentry->default_action == default_action &&
5076 searchentry->cellular_action == cellular_action &&
5077 searchentry->wifi_action == wifi_action &&
5078 searchentry->wired_action == wired_action &&
5079 searchentry->expensive_action == expensive_action) {
5080 bool match_failed = FALSE;
5081 size_t index_a = 0;
5082 size_t index_b = 0;
5083 size_t count_a = 0;
5084 size_t count_b = 0;
5085 for (index_a = 0; index_a < MAX_ROUTE_RULE_INTERFACES; index_a++) {
5086 bool found_index = FALSE;
5087 if (searchentry->exception_if_indices[index_a] == 0) {
5088 break;
5089 }
5090 count_a++;
5091 for (index_b = 0; index_b < MAX_ROUTE_RULE_INTERFACES; index_b++) {
5092 if (if_indices[index_b] == 0) {
5093 break;
5094 }
5095 if (index_b >= count_b) {
5096 count_b = index_b + 1;
5097 }
5098 if (searchentry->exception_if_indices[index_a] == if_indices[index_b] &&
5099 searchentry->exception_if_actions[index_a] == if_actions[index_b]) {
5100 found_index = TRUE;
5101 break;
5102 }
5103 }
5104 if (!found_index) {
5105 match_failed = TRUE;
5106 break;
5107 }
5108 }
5109 if (!match_failed && count_a == count_b) {
5110 foundentry = searchentry;
5111 break;
5112 }
5113 }
5114 }
5115
5116 return (foundentry);
5117 }
5118
5119 static u_int32_t
5120 necp_create_route_rule(struct necp_route_rule_list *list, u_int8_t *route_rules_array, u_int32_t route_rules_array_size)
5121 {
5122 size_t offset = 0;
5123 u_int32_t route_rule_id = 0;
5124 struct necp_route_rule *existing_rule = NULL;
5125 u_int32_t default_action = NECP_ROUTE_RULE_ALLOW_INTERFACE;
5126 u_int8_t cellular_action = NECP_ROUTE_RULE_NONE;
5127 u_int8_t wifi_action = NECP_ROUTE_RULE_NONE;
5128 u_int8_t wired_action = NECP_ROUTE_RULE_NONE;
5129 u_int8_t expensive_action = NECP_ROUTE_RULE_NONE;
5130 u_int32_t if_indices[MAX_ROUTE_RULE_INTERFACES];
5131 size_t num_valid_indices = 0;
5132 memset(&if_indices, 0, sizeof(if_indices));
5133 u_int8_t if_actions[MAX_ROUTE_RULE_INTERFACES];
5134 memset(&if_actions, 0, sizeof(if_actions));
5135
5136 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
5137
5138 if (route_rules_array == NULL || route_rules_array_size == 0) {
5139 return (0);
5140 }
5141
5142 // Process rules
5143 while (offset < route_rules_array_size) {
5144 ifnet_t rule_interface = NULL;
5145 char interface_name[IFXNAMSIZ];
5146 u_int32_t length = 0;
5147 u_int8_t *value = necp_buffer_get_tlv_value(route_rules_array, offset, &length);
5148
5149 u_int8_t rule_type = necp_policy_condition_get_type_from_buffer(value, length);
5150 u_int8_t rule_flags = necp_policy_condition_get_flags_from_buffer(value, length);
5151 u_int32_t rule_length = necp_policy_condition_get_value_length_from_buffer(value, length);
5152 u_int8_t *rule_value = necp_policy_condition_get_value_pointer_from_buffer(value, length);
5153
5154 if (rule_type == NECP_ROUTE_RULE_NONE) {
5155 // Don't allow an explicit rule to be None action
5156 continue;
5157 }
5158
5159 if (rule_length == 0) {
5160 if (rule_flags & NECP_ROUTE_RULE_FLAG_CELLULAR) {
5161 cellular_action = rule_type;
5162 }
5163 if (rule_flags & NECP_ROUTE_RULE_FLAG_WIFI) {
5164 wifi_action = rule_type;
5165 }
5166 if (rule_flags & NECP_ROUTE_RULE_FLAG_WIRED) {
5167 wired_action = rule_type;
5168 }
5169 if (rule_flags & NECP_ROUTE_RULE_FLAG_EXPENSIVE) {
5170 expensive_action = rule_type;
5171 }
5172 if (rule_flags == 0) {
5173 default_action = rule_type;
5174 }
5175 offset += sizeof(u_int8_t) + sizeof(u_int32_t) + length;
5176 continue;
5177 }
5178
5179 if (num_valid_indices >= MAX_ROUTE_RULE_INTERFACES) {
5180 offset += sizeof(u_int8_t) + sizeof(u_int32_t) + length;
5181 continue;
5182 }
5183
5184 if (rule_length <= IFXNAMSIZ) {
5185 memcpy(interface_name, rule_value, rule_length);
5186 interface_name[rule_length - 1] = 0; // Make sure the string is NULL terminated
5187 if (ifnet_find_by_name(interface_name, &rule_interface) == 0) {
5188 if_actions[num_valid_indices] = rule_type;
5189 if_indices[num_valid_indices++] = rule_interface->if_index;
5190 ifnet_release(rule_interface);
5191 }
5192 }
5193 offset += sizeof(u_int8_t) + sizeof(u_int32_t) + length;
5194 }
5195
5196 existing_rule = necp_lookup_route_rule_by_contents_locked(list, default_action, cellular_action, wifi_action, wired_action, expensive_action, if_indices, if_actions);
5197 if (existing_rule != NULL) {
5198 route_rule_id = existing_rule->id;
5199 existing_rule->refcount++;
5200 } else {
5201 struct necp_route_rule *new_rule = NULL;
5202 MALLOC(new_rule, struct necp_route_rule *, sizeof(struct necp_route_rule), M_NECP, M_WAITOK);
5203 if (new_rule != NULL) {
5204 memset(new_rule, 0, sizeof(struct necp_route_rule));
5205 route_rule_id = new_rule->id = necp_get_new_route_rule_id();
5206 new_rule->default_action = default_action;
5207 new_rule->cellular_action = cellular_action;
5208 new_rule->wifi_action = wifi_action;
5209 new_rule->wired_action = wired_action;
5210 new_rule->expensive_action = expensive_action;
5211 memcpy(&new_rule->exception_if_indices, &if_indices, sizeof(if_indices));
5212 memcpy(&new_rule->exception_if_actions, &if_actions, sizeof(if_actions));
5213 new_rule->refcount = 1;
5214 LIST_INSERT_HEAD(list, new_rule, chain);
5215 }
5216 }
5217 return (route_rule_id);
5218 }
5219
5220 static void
5221 necp_remove_aggregate_route_rule_for_id(u_int32_t rule_id)
5222 {
5223 if (rule_id) {
5224 lck_rw_lock_exclusive(&necp_route_rule_lock);
5225
5226 struct necp_aggregate_route_rule *existing_rule = NULL;
5227 struct necp_aggregate_route_rule *tmp_rule = NULL;
5228
5229 LIST_FOREACH_SAFE(existing_rule, &necp_aggregate_route_rules, chain, tmp_rule) {
5230 int index = 0;
5231 for (index = 0; index < MAX_AGGREGATE_ROUTE_RULES; index++) {
5232 u_int32_t route_rule_id = existing_rule->rule_ids[index];
5233 if (route_rule_id == rule_id) {
5234 LIST_REMOVE(existing_rule, chain);
5235 FREE(existing_rule, M_NECP);
5236 break;
5237 }
5238 }
5239 }
5240
5241 lck_rw_done(&necp_route_rule_lock);
5242 }
5243 }
5244
5245 static bool
5246 necp_remove_route_rule(struct necp_route_rule_list *list, u_int32_t route_rule_id)
5247 {
5248 struct necp_route_rule *existing_rule = NULL;
5249
5250 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
5251
5252 existing_rule = necp_lookup_route_rule_locked(list, route_rule_id);
5253 if (existing_rule != NULL) {
5254 if (--existing_rule->refcount == 0) {
5255 necp_remove_aggregate_route_rule_for_id(existing_rule->id);
5256 LIST_REMOVE(existing_rule, chain);
5257 FREE(existing_rule, M_NECP);
5258 }
5259 return (TRUE);
5260 }
5261
5262 return (FALSE);
5263 }
5264
5265 static struct necp_aggregate_route_rule *
5266 necp_lookup_aggregate_route_rule_locked(u_int32_t route_rule_id)
5267 {
5268 struct necp_aggregate_route_rule *searchentry = NULL;
5269 struct necp_aggregate_route_rule *foundentry = NULL;
5270
5271 lck_rw_lock_shared(&necp_route_rule_lock);
5272
5273 LIST_FOREACH(searchentry, &necp_aggregate_route_rules, chain) {
5274 if (searchentry->id == route_rule_id) {
5275 foundentry = searchentry;
5276 break;
5277 }
5278 }
5279
5280 lck_rw_done(&necp_route_rule_lock);
5281
5282 return (foundentry);
5283 }
5284
5285 static u_int32_t
5286 necp_create_aggregate_route_rule(u_int32_t *rule_ids)
5287 {
5288 u_int32_t aggregate_route_rule_id = 0;
5289 struct necp_aggregate_route_rule *new_rule = NULL;
5290 struct necp_aggregate_route_rule *existing_rule = NULL;
5291
5292 LIST_FOREACH(existing_rule, &necp_aggregate_route_rules, chain) {
5293 if (memcmp(existing_rule->rule_ids, rule_ids, (sizeof(u_int32_t) * MAX_AGGREGATE_ROUTE_RULES)) == 0) {
5294 return (existing_rule->id);
5295 }
5296 }
5297
5298 lck_rw_lock_exclusive(&necp_route_rule_lock);
5299
5300 LIST_FOREACH(existing_rule, &necp_aggregate_route_rules, chain) {
5301 // Re-check, in case something else created the rule while we are waiting to lock
5302 if (memcmp(existing_rule->rule_ids, rule_ids, (sizeof(u_int32_t) * MAX_AGGREGATE_ROUTE_RULES)) == 0) {
5303 lck_rw_done(&necp_route_rule_lock);
5304 return (existing_rule->id);
5305 }
5306 }
5307
5308 MALLOC(new_rule, struct necp_aggregate_route_rule *, sizeof(struct necp_aggregate_route_rule), M_NECP, M_WAITOK);
5309 if (new_rule != NULL) {
5310 memset(new_rule, 0, sizeof(struct necp_aggregate_route_rule));
5311 aggregate_route_rule_id = new_rule->id = necp_get_new_aggregate_route_rule_id();
5312 new_rule->id = aggregate_route_rule_id;
5313 memcpy(new_rule->rule_ids, rule_ids, (sizeof(u_int32_t) * MAX_AGGREGATE_ROUTE_RULES));
5314 LIST_INSERT_HEAD(&necp_aggregate_route_rules, new_rule, chain);
5315 }
5316 lck_rw_done(&necp_route_rule_lock);
5317
5318 return (aggregate_route_rule_id);
5319 }
5320
5321 #define NECP_NULL_SERVICE_ID 1
5322 static u_int32_t
5323 necp_get_new_uuid_id(void)
5324 {
5325 u_int32_t newid = 0;
5326
5327 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
5328
5329 necp_last_uuid_id++;
5330 if (necp_last_uuid_id < (NECP_NULL_SERVICE_ID + 1)) {
5331 necp_last_uuid_id = (NECP_NULL_SERVICE_ID + 1);
5332 }
5333
5334 newid = necp_last_uuid_id;
5335 if (newid == 0) {
5336 NECPLOG0(LOG_DEBUG, "Allocate uuid id failed.\n");
5337 return (0);
5338 }
5339
5340 return (newid);
5341 }
5342
5343 static struct necp_uuid_id_mapping *
5344 necp_uuid_lookup_app_id_locked(uuid_t uuid)
5345 {
5346 struct necp_uuid_id_mapping *searchentry = NULL;
5347 struct necp_uuid_id_mapping *foundentry = NULL;
5348
5349 LIST_FOREACH(searchentry, APPUUIDHASH(uuid), chain) {
5350 if (uuid_compare(searchentry->uuid, uuid) == 0) {
5351 foundentry = searchentry;
5352 break;
5353 }
5354 }
5355
5356 return (foundentry);
5357 }
5358
5359 static struct necp_uuid_id_mapping *
5360 necp_uuid_lookup_uuid_with_app_id_locked(u_int32_t local_id)
5361 {
5362 struct necp_uuid_id_mapping *searchentry = NULL;
5363 struct necp_uuid_id_mapping *foundentry = NULL;
5364
5365 struct necp_uuid_id_mapping_head *uuid_list_head = NULL;
5366 for (uuid_list_head = &necp_uuid_app_id_hashtbl[necp_uuid_app_id_hash_num_buckets - 1]; uuid_list_head >= necp_uuid_app_id_hashtbl; uuid_list_head--) {
5367 LIST_FOREACH(searchentry, uuid_list_head, chain) {
5368 if (searchentry->id == local_id) {
5369 foundentry = searchentry;
5370 break;
5371 }
5372 }
5373 }
5374
5375 return (foundentry);
5376 }
5377
5378 static u_int32_t
5379 necp_create_uuid_app_id_mapping(uuid_t uuid, bool *allocated_mapping, bool uuid_policy_table)
5380 {
5381 u_int32_t local_id = 0;
5382 struct necp_uuid_id_mapping *existing_mapping = NULL;
5383
5384 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
5385
5386 if (allocated_mapping) {
5387 *allocated_mapping = FALSE;
5388 }
5389
5390 existing_mapping = necp_uuid_lookup_app_id_locked(uuid);
5391 if (existing_mapping != NULL) {
5392 local_id = existing_mapping->id;
5393 existing_mapping->refcount++;
5394 if (uuid_policy_table) {
5395 existing_mapping->table_refcount++;
5396 }
5397 } else {
5398 struct necp_uuid_id_mapping *new_mapping = NULL;
5399 MALLOC(new_mapping, struct necp_uuid_id_mapping *, sizeof(*new_mapping), M_NECP, M_WAITOK);
5400 if (new_mapping != NULL) {
5401 uuid_copy(new_mapping->uuid, uuid);
5402 new_mapping->id = necp_get_new_uuid_id();
5403 new_mapping->refcount = 1;
5404 if (uuid_policy_table) {
5405 new_mapping->table_refcount = 1;
5406 } else {
5407 new_mapping->table_refcount = 0;
5408 }
5409
5410 LIST_INSERT_HEAD(APPUUIDHASH(uuid), new_mapping, chain);
5411
5412 if (allocated_mapping) {
5413 *allocated_mapping = TRUE;
5414 }
5415
5416 local_id = new_mapping->id;
5417 }
5418 }
5419
5420 return (local_id);
5421 }
5422
5423 static bool
5424 necp_remove_uuid_app_id_mapping(uuid_t uuid, bool *removed_mapping, bool uuid_policy_table)
5425 {
5426 struct necp_uuid_id_mapping *existing_mapping = NULL;
5427
5428 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
5429
5430 if (removed_mapping) {
5431 *removed_mapping = FALSE;
5432 }
5433
5434 existing_mapping = necp_uuid_lookup_app_id_locked(uuid);
5435 if (existing_mapping != NULL) {
5436 if (uuid_policy_table) {
5437 existing_mapping->table_refcount--;
5438 }
5439 if (--existing_mapping->refcount == 0) {
5440 LIST_REMOVE(existing_mapping, chain);
5441 FREE(existing_mapping, M_NECP);
5442 if (removed_mapping) {
5443 *removed_mapping = TRUE;
5444 }
5445 }
5446 return (TRUE);
5447 }
5448
5449 return (FALSE);
5450 }
5451
5452 static struct necp_uuid_id_mapping *
5453 necp_uuid_get_null_service_id_mapping(void)
5454 {
5455 static struct necp_uuid_id_mapping null_mapping;
5456 uuid_clear(null_mapping.uuid);
5457 null_mapping.id = NECP_NULL_SERVICE_ID;
5458
5459 return (&null_mapping);
5460 }
5461
5462 static struct necp_uuid_id_mapping *
5463 necp_uuid_lookup_service_id_locked(uuid_t uuid)
5464 {
5465 struct necp_uuid_id_mapping *searchentry = NULL;
5466 struct necp_uuid_id_mapping *foundentry = NULL;
5467
5468 if (uuid_is_null(uuid)) {
5469 return necp_uuid_get_null_service_id_mapping();
5470 }
5471
5472 LIST_FOREACH(searchentry, &necp_uuid_service_id_list, chain) {
5473 if (uuid_compare(searchentry->uuid, uuid) == 0) {
5474 foundentry = searchentry;
5475 break;
5476 }
5477 }
5478
5479 return (foundentry);
5480 }
5481
5482 static struct necp_uuid_id_mapping *
5483 necp_uuid_lookup_uuid_with_service_id_locked(u_int32_t local_id)
5484 {
5485 struct necp_uuid_id_mapping *searchentry = NULL;
5486 struct necp_uuid_id_mapping *foundentry = NULL;
5487
5488 if (local_id == NECP_NULL_SERVICE_ID) {
5489 return necp_uuid_get_null_service_id_mapping();
5490 }
5491
5492 LIST_FOREACH(searchentry, &necp_uuid_service_id_list, chain) {
5493 if (searchentry->id == local_id) {
5494 foundentry = searchentry;
5495 break;
5496 }
5497 }
5498
5499 return (foundentry);
5500 }
5501
5502 static u_int32_t
5503 necp_create_uuid_service_id_mapping(uuid_t uuid)
5504 {
5505 u_int32_t local_id = 0;
5506 struct necp_uuid_id_mapping *existing_mapping = NULL;
5507
5508 if (uuid_is_null(uuid)) {
5509 return (NECP_NULL_SERVICE_ID);
5510 }
5511
5512 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
5513
5514 existing_mapping = necp_uuid_lookup_service_id_locked(uuid);
5515 if (existing_mapping != NULL) {
5516 local_id = existing_mapping->id;
5517 existing_mapping->refcount++;
5518 } else {
5519 struct necp_uuid_id_mapping *new_mapping = NULL;
5520 MALLOC(new_mapping, struct necp_uuid_id_mapping *, sizeof(*new_mapping), M_NECP, M_WAITOK);
5521 if (new_mapping != NULL) {
5522 uuid_copy(new_mapping->uuid, uuid);
5523 new_mapping->id = necp_get_new_uuid_id();
5524 new_mapping->refcount = 1;
5525
5526 LIST_INSERT_HEAD(&necp_uuid_service_id_list, new_mapping, chain);
5527
5528 local_id = new_mapping->id;
5529 }
5530 }
5531
5532 return (local_id);
5533 }
5534
5535 static bool
5536 necp_remove_uuid_service_id_mapping(uuid_t uuid)
5537 {
5538 struct necp_uuid_id_mapping *existing_mapping = NULL;
5539
5540 if (uuid_is_null(uuid)) {
5541 return (TRUE);
5542 }
5543
5544 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
5545
5546 existing_mapping = necp_uuid_lookup_app_id_locked(uuid);
5547 if (existing_mapping != NULL) {
5548 if (--existing_mapping->refcount == 0) {
5549 LIST_REMOVE(existing_mapping, chain);
5550 FREE(existing_mapping, M_NECP);
5551 }
5552 return (TRUE);
5553 }
5554
5555 return (FALSE);
5556 }
5557
5558
5559 static bool
5560 necp_kernel_socket_policies_update_uuid_table(void)
5561 {
5562 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
5563
5564 if (necp_uuid_app_id_mappings_dirty) {
5565 if (proc_uuid_policy_kernel(PROC_UUID_POLICY_OPERATION_CLEAR, NULL, PROC_UUID_NECP_APP_POLICY) < 0) {
5566 NECPLOG0(LOG_DEBUG, "Error clearing uuids from policy table\n");
5567 return (FALSE);
5568 }
5569
5570 if (necp_num_uuid_app_id_mappings > 0) {
5571 struct necp_uuid_id_mapping_head *uuid_list_head = NULL;
5572 for (uuid_list_head = &necp_uuid_app_id_hashtbl[necp_uuid_app_id_hash_num_buckets - 1]; uuid_list_head >= necp_uuid_app_id_hashtbl; uuid_list_head--) {
5573 struct necp_uuid_id_mapping *mapping = NULL;
5574 LIST_FOREACH(mapping, uuid_list_head, chain) {
5575 if (mapping->table_refcount > 0 &&
5576 proc_uuid_policy_kernel(PROC_UUID_POLICY_OPERATION_ADD, mapping->uuid, PROC_UUID_NECP_APP_POLICY) < 0) {
5577 NECPLOG0(LOG_DEBUG, "Error adding uuid to policy table\n");
5578 }
5579 }
5580 }
5581 }
5582
5583 necp_uuid_app_id_mappings_dirty = FALSE;
5584 }
5585
5586 return (TRUE);
5587 }
5588
5589 #define NECP_KERNEL_VALID_IP_OUTPUT_CONDITIONS (NECP_KERNEL_CONDITION_ALL_INTERFACES | NECP_KERNEL_CONDITION_BOUND_INTERFACE | NECP_KERNEL_CONDITION_PROTOCOL | NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_POLICY_ID | NECP_KERNEL_CONDITION_LAST_INTERFACE)
5590 static necp_kernel_policy_id
5591 necp_kernel_ip_output_policy_add(necp_policy_id parent_policy_id, necp_policy_order order, necp_policy_order suborder, u_int32_t session_order, int session_pid, u_int32_t condition_mask, u_int32_t condition_negated_mask, necp_kernel_policy_id cond_policy_id, ifnet_t cond_bound_interface, u_int32_t cond_last_interface_index, u_int16_t cond_protocol, union necp_sockaddr_union *cond_local_start, union necp_sockaddr_union *cond_local_end, u_int8_t cond_local_prefix, union necp_sockaddr_union *cond_remote_start, union necp_sockaddr_union *cond_remote_end, u_int8_t cond_remote_prefix, necp_kernel_policy_result result, necp_kernel_policy_result_parameter result_parameter)
5592 {
5593 struct necp_kernel_ip_output_policy *new_kernel_policy = NULL;
5594 struct necp_kernel_ip_output_policy *tmp_kernel_policy = NULL;
5595
5596 MALLOC_ZONE(new_kernel_policy, struct necp_kernel_ip_output_policy *, sizeof(*new_kernel_policy), M_NECP_IP_POLICY, M_WAITOK);
5597 if (new_kernel_policy == NULL) {
5598 goto done;
5599 }
5600
5601 memset(new_kernel_policy, 0, sizeof(*new_kernel_policy)); // M_ZERO is not supported for MALLOC_ZONE
5602 new_kernel_policy->parent_policy_id = parent_policy_id;
5603 new_kernel_policy->id = necp_kernel_policy_get_new_id(false);
5604 new_kernel_policy->suborder = suborder;
5605 new_kernel_policy->order = order;
5606 new_kernel_policy->session_order = session_order;
5607 new_kernel_policy->session_pid = session_pid;
5608
5609 // Sanitize condition mask
5610 new_kernel_policy->condition_mask = (condition_mask & NECP_KERNEL_VALID_IP_OUTPUT_CONDITIONS);
5611 if ((new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_ALL_INTERFACES) && (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE)) {
5612 new_kernel_policy->condition_mask &= ~NECP_KERNEL_CONDITION_BOUND_INTERFACE;
5613 }
5614 if ((new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_END) && (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX)) {
5615 new_kernel_policy->condition_mask &= ~NECP_KERNEL_CONDITION_LOCAL_PREFIX;
5616 }
5617 if ((new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_END) && (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX)) {
5618 new_kernel_policy->condition_mask &= ~NECP_KERNEL_CONDITION_REMOTE_PREFIX;
5619 }
5620 new_kernel_policy->condition_negated_mask = condition_negated_mask & new_kernel_policy->condition_mask;
5621
5622 // Set condition values
5623 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_POLICY_ID) {
5624 new_kernel_policy->cond_policy_id = cond_policy_id;
5625 }
5626 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE) {
5627 if (cond_bound_interface) {
5628 ifnet_reference(cond_bound_interface);
5629 }
5630 new_kernel_policy->cond_bound_interface = cond_bound_interface;
5631 }
5632 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LAST_INTERFACE) {
5633 new_kernel_policy->cond_last_interface_index = cond_last_interface_index;
5634 }
5635 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_PROTOCOL) {
5636 new_kernel_policy->cond_protocol = cond_protocol;
5637 }
5638 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_START) {
5639 memcpy(&new_kernel_policy->cond_local_start, cond_local_start, cond_local_start->sa.sa_len);
5640 }
5641 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_END) {
5642 memcpy(&new_kernel_policy->cond_local_end, cond_local_end, cond_local_end->sa.sa_len);
5643 }
5644 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX) {
5645 new_kernel_policy->cond_local_prefix = cond_local_prefix;
5646 }
5647 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_START) {
5648 memcpy(&new_kernel_policy->cond_remote_start, cond_remote_start, cond_remote_start->sa.sa_len);
5649 }
5650 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_END) {
5651 memcpy(&new_kernel_policy->cond_remote_end, cond_remote_end, cond_remote_end->sa.sa_len);
5652 }
5653 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX) {
5654 new_kernel_policy->cond_remote_prefix = cond_remote_prefix;
5655 }
5656
5657 new_kernel_policy->result = result;
5658 memcpy(&new_kernel_policy->result_parameter, &result_parameter, sizeof(result_parameter));
5659
5660 if (necp_debug) {
5661 NECPLOG(LOG_DEBUG, "Added kernel policy: ip output, id=%d, mask=%x\n", new_kernel_policy->id, new_kernel_policy->condition_mask);
5662 }
5663 LIST_INSERT_SORTED_THRICE_ASCENDING(&necp_kernel_ip_output_policies, new_kernel_policy, chain, session_order, order, suborder, tmp_kernel_policy);
5664 done:
5665 return (new_kernel_policy ? new_kernel_policy->id : 0);
5666 }
5667
5668 static struct necp_kernel_ip_output_policy *
5669 necp_kernel_ip_output_policy_find(necp_kernel_policy_id policy_id)
5670 {
5671 struct necp_kernel_ip_output_policy *kernel_policy = NULL;
5672 struct necp_kernel_ip_output_policy *tmp_kernel_policy = NULL;
5673
5674 if (policy_id == 0) {
5675 return (NULL);
5676 }
5677
5678 LIST_FOREACH_SAFE(kernel_policy, &necp_kernel_ip_output_policies, chain, tmp_kernel_policy) {
5679 if (kernel_policy->id == policy_id) {
5680 return (kernel_policy);
5681 }
5682 }
5683
5684 return (NULL);
5685 }
5686
5687 static bool
5688 necp_kernel_ip_output_policy_delete(necp_kernel_policy_id policy_id)
5689 {
5690 struct necp_kernel_ip_output_policy *policy = NULL;
5691
5692 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
5693
5694 policy = necp_kernel_ip_output_policy_find(policy_id);
5695 if (policy) {
5696 LIST_REMOVE(policy, chain);
5697
5698 if (policy->cond_bound_interface) {
5699 ifnet_release(policy->cond_bound_interface);
5700 policy->cond_bound_interface = NULL;
5701 }
5702
5703 FREE_ZONE(policy, sizeof(*policy), M_NECP_IP_POLICY);
5704 return (TRUE);
5705 }
5706
5707 return (FALSE);
5708 }
5709
5710 static void
5711 necp_kernel_ip_output_policies_dump_all(void)
5712 {
5713 if (necp_debug) {
5714 struct necp_kernel_ip_output_policy *policy = NULL;
5715 int policy_i;
5716 int id_i;
5717 char result_string[MAX_RESULT_STRING_LEN];
5718 char proc_name_string[MAXCOMLEN + 1];
5719 memset(result_string, 0, MAX_RESULT_STRING_LEN);
5720 memset(proc_name_string, 0, MAXCOMLEN + 1);
5721
5722 NECPLOG0(LOG_DEBUG, "NECP IP Output Policies:\n");
5723 NECPLOG0(LOG_DEBUG, "-----------\n");
5724 for (id_i = 0; id_i < NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS; id_i++) {
5725 NECPLOG(LOG_DEBUG, " ID Bucket: %d\n", id_i);
5726 for (policy_i = 0; necp_kernel_ip_output_policies_map[id_i] != NULL && (necp_kernel_ip_output_policies_map[id_i])[policy_i] != NULL; policy_i++) {
5727 policy = (necp_kernel_ip_output_policies_map[id_i])[policy_i];
5728 proc_name(policy->session_pid, proc_name_string, MAXCOMLEN);
5729 NECPLOG(LOG_DEBUG, "\t%3d. Policy ID: %5d\tProcess: %10.10s\tOrder: %04d.%04d.%d\tMask: %5x\tResult: %s\n", policy_i, policy->id, proc_name_string, policy->session_order, policy->order, policy->suborder, policy->condition_mask, necp_get_result_description(result_string, policy->result, policy->result_parameter));
5730 }
5731 NECPLOG0(LOG_DEBUG, "-----------\n");
5732 }
5733 }
5734 }
5735
5736 static inline bool
5737 necp_kernel_ip_output_policy_results_overlap(struct necp_kernel_ip_output_policy *upper_policy, struct necp_kernel_ip_output_policy *lower_policy)
5738 {
5739 if (upper_policy->result == NECP_KERNEL_POLICY_RESULT_SKIP) {
5740 if (upper_policy->session_order != lower_policy->session_order) {
5741 // A skip cannot override a policy of a different session
5742 return (FALSE);
5743 } else {
5744 if (upper_policy->result_parameter.skip_policy_order == 0 ||
5745 lower_policy->order >= upper_policy->result_parameter.skip_policy_order) {
5746 // This policy is beyond the skip
5747 return (FALSE);
5748 } else {
5749 // This policy is inside the skip
5750 return (TRUE);
5751 }
5752 }
5753 }
5754
5755 // All other IP Output policy results (drop, tunnel, hard pass) currently overlap
5756 return (TRUE);
5757 }
5758
5759 static bool
5760 necp_kernel_ip_output_policy_is_unnecessary(struct necp_kernel_ip_output_policy *policy, struct necp_kernel_ip_output_policy **policy_array, int valid_indices)
5761 {
5762 bool can_skip = FALSE;
5763 u_int32_t highest_skip_session_order = 0;
5764 u_int32_t highest_skip_order = 0;
5765 int i;
5766 for (i = 0; i < valid_indices; i++) {
5767 struct necp_kernel_ip_output_policy *compared_policy = policy_array[i];
5768
5769 // For policies in a skip window, we can't mark conflicting policies as unnecessary
5770 if (can_skip) {
5771 if (highest_skip_session_order != compared_policy->session_order ||
5772 (highest_skip_order != 0 && compared_policy->order >= highest_skip_order)) {
5773 // If we've moved on to the next session, or passed the skip window
5774 highest_skip_session_order = 0;
5775 highest_skip_order = 0;
5776 can_skip = FALSE;
5777 } else {
5778 // If this policy is also a skip, in can increase the skip window
5779 if (compared_policy->result == NECP_KERNEL_POLICY_RESULT_SKIP) {
5780 if (compared_policy->result_parameter.skip_policy_order > highest_skip_order) {
5781 highest_skip_order = compared_policy->result_parameter.skip_policy_order;
5782 }
5783 }
5784 continue;
5785 }
5786 }
5787
5788 if (compared_policy->result == NECP_KERNEL_POLICY_RESULT_SKIP) {
5789 // This policy is a skip. Set the skip window accordingly
5790 can_skip = TRUE;
5791 highest_skip_session_order = compared_policy->session_order;
5792 highest_skip_order = compared_policy->result_parameter.skip_policy_order;
5793 }
5794
5795 // The result of the compared policy must be able to block out this policy result
5796 if (!necp_kernel_ip_output_policy_results_overlap(compared_policy, policy)) {
5797 continue;
5798 }
5799
5800 // If new policy matches All Interfaces, compared policy must also
5801 if ((policy->condition_mask & NECP_KERNEL_CONDITION_ALL_INTERFACES) && !(compared_policy->condition_mask & NECP_KERNEL_CONDITION_ALL_INTERFACES)) {
5802 continue;
5803 }
5804
5805 // Default makes lower policies unecessary always
5806 if (compared_policy->condition_mask == 0) {
5807 return (TRUE);
5808 }
5809
5810 // Compared must be more general than policy, and include only conditions within policy
5811 if ((policy->condition_mask & compared_policy->condition_mask) != compared_policy->condition_mask) {
5812 continue;
5813 }
5814
5815 // Negative conditions must match for the overlapping conditions
5816 if ((policy->condition_negated_mask & compared_policy->condition_mask) != (compared_policy->condition_negated_mask & compared_policy->condition_mask)) {
5817 continue;
5818 }
5819
5820 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_POLICY_ID &&
5821 compared_policy->cond_policy_id != policy->cond_policy_id) {
5822 continue;
5823 }
5824
5825 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE &&
5826 compared_policy->cond_bound_interface != policy->cond_bound_interface) {
5827 continue;
5828 }
5829
5830 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_PROTOCOL &&
5831 compared_policy->cond_protocol != policy->cond_protocol) {
5832 continue;
5833 }
5834
5835 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_START) {
5836 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_END) {
5837 if (!necp_is_range_in_range((struct sockaddr *)&policy->cond_local_start, (struct sockaddr *)&policy->cond_local_end, (struct sockaddr *)&compared_policy->cond_local_start, (struct sockaddr *)&compared_policy->cond_local_end)) {
5838 continue;
5839 }
5840 } else if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX) {
5841 if (compared_policy->cond_local_prefix > policy->cond_local_prefix ||
5842 !necp_is_addr_in_subnet((struct sockaddr *)&policy->cond_local_start, (struct sockaddr *)&compared_policy->cond_local_start, compared_policy->cond_local_prefix)) {
5843 continue;
5844 }
5845 }
5846 }
5847
5848 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_START) {
5849 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_END) {
5850 if (!necp_is_range_in_range((struct sockaddr *)&policy->cond_remote_start, (struct sockaddr *)&policy->cond_remote_end, (struct sockaddr *)&compared_policy->cond_remote_start, (struct sockaddr *)&compared_policy->cond_remote_end)) {
5851 continue;
5852 }
5853 } else if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX) {
5854 if (compared_policy->cond_remote_prefix > policy->cond_remote_prefix ||
5855 !necp_is_addr_in_subnet((struct sockaddr *)&policy->cond_remote_start, (struct sockaddr *)&compared_policy->cond_remote_start, compared_policy->cond_remote_prefix)) {
5856 continue;
5857 }
5858 }
5859 }
5860
5861 return (TRUE);
5862 }
5863
5864 return (FALSE);
5865 }
5866
5867 static bool
5868 necp_kernel_ip_output_policies_reprocess(void)
5869 {
5870 int i;
5871 int bucket_allocation_counts[NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS];
5872 int bucket_current_free_index[NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS];
5873 struct necp_kernel_ip_output_policy *kernel_policy = NULL;
5874
5875 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
5876
5877 // Reset mask to 0
5878 necp_kernel_ip_output_policies_condition_mask = 0;
5879 necp_kernel_ip_output_policies_count = 0;
5880 necp_kernel_ip_output_policies_non_id_count = 0;
5881
5882 for (i = 0; i < NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS; i++) {
5883 if (necp_kernel_ip_output_policies_map[i] != NULL) {
5884 FREE(necp_kernel_ip_output_policies_map[i], M_NECP);
5885 necp_kernel_ip_output_policies_map[i] = NULL;
5886 }
5887
5888 // Init counts
5889 bucket_allocation_counts[i] = 0;
5890 }
5891
5892 LIST_FOREACH(kernel_policy, &necp_kernel_ip_output_policies, chain) {
5893 // Update mask
5894 necp_kernel_ip_output_policies_condition_mask |= kernel_policy->condition_mask;
5895 necp_kernel_ip_output_policies_count++;
5896
5897 // Update bucket counts
5898 if (!(kernel_policy->condition_mask & NECP_KERNEL_CONDITION_POLICY_ID)) {
5899 necp_kernel_ip_output_policies_non_id_count++;
5900 for (i = 0; i < NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS; i++) {
5901 bucket_allocation_counts[i]++;
5902 }
5903 } else {
5904 bucket_allocation_counts[NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(kernel_policy->cond_policy_id)]++;
5905 }
5906 }
5907
5908 for (i = 0; i < NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS; i++) {
5909 if (bucket_allocation_counts[i] > 0) {
5910 // Allocate a NULL-terminated array of policy pointers for each bucket
5911 MALLOC(necp_kernel_ip_output_policies_map[i], struct necp_kernel_ip_output_policy **, sizeof(struct necp_kernel_ip_output_policy *) * (bucket_allocation_counts[i] + 1), M_NECP, M_WAITOK);
5912 if (necp_kernel_ip_output_policies_map[i] == NULL) {
5913 goto fail;
5914 }
5915
5916 // Initialize the first entry to NULL
5917 (necp_kernel_ip_output_policies_map[i])[0] = NULL;
5918 }
5919 bucket_current_free_index[i] = 0;
5920 }
5921
5922 LIST_FOREACH(kernel_policy, &necp_kernel_ip_output_policies, chain) {
5923 // Insert pointers into map
5924 if (!(kernel_policy->condition_mask & NECP_KERNEL_CONDITION_POLICY_ID)) {
5925 for (i = 0; i < NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS; i++) {
5926 if (!necp_kernel_ip_output_policy_is_unnecessary(kernel_policy, necp_kernel_ip_output_policies_map[i], bucket_current_free_index[i])) {
5927 (necp_kernel_ip_output_policies_map[i])[(bucket_current_free_index[i])] = kernel_policy;
5928 bucket_current_free_index[i]++;
5929 (necp_kernel_ip_output_policies_map[i])[(bucket_current_free_index[i])] = NULL;
5930 }
5931 }
5932 } else {
5933 i = NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(kernel_policy->cond_policy_id);
5934 if (!necp_kernel_ip_output_policy_is_unnecessary(kernel_policy, necp_kernel_ip_output_policies_map[i], bucket_current_free_index[i])) {
5935 (necp_kernel_ip_output_policies_map[i])[(bucket_current_free_index[i])] = kernel_policy;
5936 bucket_current_free_index[i]++;
5937 (necp_kernel_ip_output_policies_map[i])[(bucket_current_free_index[i])] = NULL;
5938 }
5939 }
5940 }
5941 necp_kernel_ip_output_policies_dump_all();
5942 return (TRUE);
5943
5944 fail:
5945 // Free memory, reset mask to 0
5946 necp_kernel_ip_output_policies_condition_mask = 0;
5947 necp_kernel_ip_output_policies_count = 0;
5948 necp_kernel_ip_output_policies_non_id_count = 0;
5949 for (i = 0; i < NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS; i++) {
5950 if (necp_kernel_ip_output_policies_map[i] != NULL) {
5951 FREE(necp_kernel_ip_output_policies_map[i], M_NECP);
5952 necp_kernel_ip_output_policies_map[i] = NULL;
5953 }
5954 }
5955 return (FALSE);
5956 }
5957
5958 // Outbound Policy Matching
5959 // ---------------------
5960 struct substring {
5961 char *string;
5962 size_t length;
5963 };
5964
5965 static struct substring
5966 necp_trim_dots_and_stars(char *string, size_t length)
5967 {
5968 struct substring sub;
5969 sub.string = string;
5970 sub.length = string ? length : 0;
5971
5972 while (sub.length && (sub.string[0] == '.' || sub.string[0] == '*')) {
5973 sub.string++;
5974 sub.length--;
5975 }
5976
5977 while (sub.length && (sub.string[sub.length - 1] == '.' || sub.string[sub.length - 1] == '*')) {
5978 sub.length--;
5979 }
5980
5981 return (sub);
5982 }
5983
5984 static char *
5985 necp_create_trimmed_domain(char *string, size_t length)
5986 {
5987 char *trimmed_domain = NULL;
5988 struct substring sub = necp_trim_dots_and_stars(string, length);
5989
5990 MALLOC(trimmed_domain, char *, sub.length + 1, M_NECP, M_WAITOK);
5991 if (trimmed_domain == NULL) {
5992 return (NULL);
5993 }
5994
5995 memcpy(trimmed_domain, sub.string, sub.length);
5996 trimmed_domain[sub.length] = 0;
5997
5998 return (trimmed_domain);
5999 }
6000
6001 static inline int
6002 necp_count_dots(char *string, size_t length)
6003 {
6004 int dot_count = 0;
6005 size_t i = 0;
6006
6007 for (i = 0; i < length; i++) {
6008 if (string[i] == '.') {
6009 dot_count++;
6010 }
6011 }
6012
6013 return (dot_count);
6014 }
6015
6016 static bool
6017 necp_check_suffix(struct substring parent, struct substring suffix, bool require_dot_before_suffix)
6018 {
6019 if (parent.length <= suffix.length) {
6020 return (FALSE);
6021 }
6022
6023 size_t length_difference = (parent.length - suffix.length);
6024
6025 if (require_dot_before_suffix) {
6026 if (((char *)(parent.string + length_difference - 1))[0] != '.') {
6027 return (FALSE);
6028 }
6029 }
6030
6031 // strncasecmp does case-insensitive check for all UTF-8 strings (ignores non-ASCII characters)
6032 return (strncasecmp(parent.string + length_difference, suffix.string, suffix.length) == 0);
6033 }
6034
6035 static bool
6036 necp_hostname_matches_domain(struct substring hostname_substring, u_int8_t hostname_dot_count, char *domain, u_int8_t domain_dot_count)
6037 {
6038 if (hostname_substring.string == NULL || domain == NULL) {
6039 return (hostname_substring.string == domain);
6040 }
6041
6042 struct substring domain_substring;
6043 domain_substring.string = domain;
6044 domain_substring.length = strlen(domain);
6045
6046 if (hostname_dot_count == domain_dot_count) {
6047 // strncasecmp does case-insensitive check for all UTF-8 strings (ignores non-ASCII characters)
6048 if (hostname_substring.length == domain_substring.length &&
6049 strncasecmp(hostname_substring.string, domain_substring.string, hostname_substring.length) == 0) {
6050 return (TRUE);
6051 }
6052 } else if (domain_dot_count < hostname_dot_count) {
6053 if (necp_check_suffix(hostname_substring, domain_substring, TRUE)) {
6054 return (TRUE);
6055 }
6056 }
6057
6058 return (FALSE);
6059 }
6060
6061 static char *
6062 necp_copy_string(char *string, size_t length)
6063 {
6064 char *copied_string = NULL;
6065
6066 MALLOC(copied_string, char *, length + 1, M_NECP, M_WAITOK);
6067 if (copied_string == NULL) {
6068 return (NULL);
6069 }
6070
6071 memcpy(copied_string, string, length);
6072 copied_string[length] = 0;
6073
6074 return (copied_string);
6075 }
6076
6077 static inline void
6078 necp_get_parent_cred_result(proc_t proc, struct necp_socket_info *info)
6079 {
6080 task_t task = proc_task(proc ? proc : current_proc());
6081 coalition_t coal = COALITION_NULL;
6082 Boolean is_leader = coalition_is_leader(task, COALITION_TYPE_JETSAM, &coal);
6083
6084 if (is_leader == TRUE) {
6085 // No parent, nothing to do
6086 return;
6087 }
6088
6089 if (coal != NULL) {
6090 task_t lead_task = coalition_get_leader(coal);
6091 if (lead_task != NULL) {
6092 proc_t lead_proc = get_bsdtask_info(lead_task);
6093 if (lead_proc != NULL) {
6094 kauth_cred_t lead_cred = kauth_cred_proc_ref(lead_proc);
6095 if (lead_cred != NULL) {
6096 errno_t cred_result = priv_check_cred(lead_cred, PRIV_NET_PRIVILEGED_NECP_MATCH, 0);
6097 kauth_cred_unref(&lead_cred);
6098 info->cred_result = cred_result;
6099 }
6100 }
6101 task_deallocate(lead_task);
6102 }
6103 }
6104 }
6105
6106 #define NECP_KERNEL_ADDRESS_TYPE_CONDITIONS (NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX)
6107 static void
6108 necp_application_fillout_info_locked(uuid_t application_uuid, uuid_t real_application_uuid, char *account, char *domain, pid_t pid, uid_t uid, u_int16_t protocol, u_int32_t bound_interface_index, u_int32_t traffic_class, union necp_sockaddr_union *local_addr, union necp_sockaddr_union *remote_addr, proc_t proc, struct necp_socket_info *info)
6109 {
6110 memset(info, 0, sizeof(struct necp_socket_info));
6111
6112 info->pid = pid;
6113 info->uid = uid;
6114 info->protocol = protocol;
6115 info->bound_interface_index = bound_interface_index;
6116 info->traffic_class = traffic_class;
6117
6118 if (necp_kernel_application_policies_condition_mask & NECP_KERNEL_CONDITION_ENTITLEMENT && proc != NULL) {
6119 info->cred_result = priv_check_cred(proc_ucred(proc), PRIV_NET_PRIVILEGED_NECP_MATCH, 0);
6120 if (info->cred_result != 0) {
6121 // Process does not have entitlement, check the parent process
6122 necp_get_parent_cred_result(proc, info);
6123 }
6124 }
6125
6126 if (necp_kernel_application_policies_condition_mask & NECP_KERNEL_CONDITION_APP_ID && !uuid_is_null(application_uuid)) {
6127 struct necp_uuid_id_mapping *existing_mapping = necp_uuid_lookup_app_id_locked(application_uuid);
6128 if (existing_mapping) {
6129 info->application_id = existing_mapping->id;
6130 }
6131 }
6132
6133 if (necp_kernel_application_policies_condition_mask & NECP_KERNEL_CONDITION_REAL_APP_ID && !uuid_is_null(real_application_uuid)) {
6134 if (uuid_compare(application_uuid, real_application_uuid) == 0) {
6135 info->real_application_id = info->application_id;
6136 } else {
6137 struct necp_uuid_id_mapping *existing_mapping = necp_uuid_lookup_app_id_locked(real_application_uuid);
6138 if (existing_mapping) {
6139 info->real_application_id = existing_mapping->id;
6140 }
6141 }
6142 }
6143
6144 if (necp_kernel_application_policies_condition_mask & NECP_KERNEL_CONDITION_ACCOUNT_ID && account != NULL) {
6145 struct necp_string_id_mapping *existing_mapping = necp_lookup_string_to_id_locked(&necp_account_id_list, account);
6146 if (existing_mapping) {
6147 info->account_id = existing_mapping->id;
6148 }
6149 }
6150
6151 if (necp_kernel_application_policies_condition_mask & NECP_KERNEL_CONDITION_DOMAIN) {
6152 info->domain = domain;
6153 }
6154
6155 if (necp_kernel_application_policies_condition_mask & NECP_KERNEL_ADDRESS_TYPE_CONDITIONS) {
6156 if (local_addr && local_addr->sa.sa_len > 0) {
6157 memcpy(&info->local_addr, local_addr, local_addr->sa.sa_len);
6158 }
6159 if (remote_addr && remote_addr->sa.sa_len > 0) {
6160 memcpy(&info->remote_addr, remote_addr, remote_addr->sa.sa_len);
6161 }
6162 }
6163 }
6164
6165 static void
6166 necp_send_application_interface_denied_event(pid_t pid, uuid_t proc_uuid, u_int32_t if_functional_type)
6167 {
6168 struct kev_netpolicy_ifdenied ev_ifdenied;
6169
6170 bzero(&ev_ifdenied, sizeof(ev_ifdenied));
6171
6172 ev_ifdenied.ev_data.epid = pid;
6173 uuid_copy(ev_ifdenied.ev_data.euuid, proc_uuid);
6174 ev_ifdenied.ev_if_functional_type = if_functional_type;
6175
6176 netpolicy_post_msg(KEV_NETPOLICY_IFDENIED, &ev_ifdenied.ev_data, sizeof(ev_ifdenied));
6177 }
6178
6179 extern char *proc_name_address(void *p);
6180
6181 #define NECP_VERIFY_DELEGATION_ENTITLEMENT(_p, _d) \
6182 if (!has_checked_delegation_entitlement) { \
6183 has_delegation_entitlement = (priv_check_cred(proc_ucred(_p), PRIV_NET_PRIVILEGED_SOCKET_DELEGATE, 0) == 0); \
6184 has_checked_delegation_entitlement = TRUE; \
6185 } \
6186 if (!has_delegation_entitlement) { \
6187 NECPLOG(LOG_ERR, "%s(%d) does not hold the necessary entitlement to delegate network traffic for other processes by %s", \
6188 proc_name_address(_p), proc_pid(_p), _d); \
6189 break; \
6190 }
6191
6192 int
6193 necp_application_find_policy_match_internal(proc_t proc,
6194 u_int8_t *parameters,
6195 u_int32_t parameters_size,
6196 struct necp_aggregate_result *returned_result,
6197 u_int32_t *flags,
6198 u_int required_interface_index,
6199 const union necp_sockaddr_union *override_local_addr,
6200 const union necp_sockaddr_union *override_remote_addr,
6201 struct rtentry **returned_route, bool ignore_address)
6202 {
6203 int error = 0;
6204 size_t offset = 0;
6205
6206 struct necp_kernel_socket_policy *matched_policy = NULL;
6207 struct necp_socket_info info;
6208 necp_kernel_policy_filter filter_control_unit = 0;
6209 u_int32_t route_rule_id = 0;
6210 necp_kernel_policy_result service_action = 0;
6211 necp_kernel_policy_service service = { 0, 0 };
6212
6213 u_int16_t protocol = 0;
6214 u_int32_t bound_interface_index = required_interface_index;
6215 u_int32_t traffic_class = 0;
6216 u_int32_t client_flags = 0;
6217 union necp_sockaddr_union local_addr;
6218 union necp_sockaddr_union remote_addr;
6219 bool no_remote_addr = FALSE;
6220 u_int8_t remote_family = 0;
6221 bool no_local_addr = FALSE;
6222
6223 if (override_local_addr) {
6224 memcpy(&local_addr, override_local_addr, sizeof(local_addr));
6225 } else {
6226 memset(&local_addr, 0, sizeof(local_addr));
6227 }
6228 if (override_remote_addr) {
6229 memcpy(&remote_addr, override_remote_addr, sizeof(remote_addr));
6230 } else {
6231 memset(&remote_addr, 0, sizeof(remote_addr));
6232 }
6233
6234 // Initialize UID, PID, and UUIDs to the current process
6235 uid_t uid = kauth_cred_getuid(proc_ucred(proc));
6236 pid_t pid = proc_pid(proc);
6237 uuid_t application_uuid;
6238 uuid_clear(application_uuid);
6239 uuid_t real_application_uuid;
6240 uuid_clear(real_application_uuid);
6241 proc_getexecutableuuid(proc, real_application_uuid, sizeof(real_application_uuid));
6242 uuid_copy(application_uuid, real_application_uuid);
6243
6244 char *domain = NULL;
6245 char *account = NULL;
6246
6247 u_int32_t netagent_ids[NECP_MAX_NETAGENTS];
6248 memset(&netagent_ids, 0, sizeof(netagent_ids));
6249 int netagent_cursor;
6250
6251 bool has_checked_delegation_entitlement = FALSE;
6252 bool has_delegation_entitlement = FALSE;
6253
6254 if (returned_result == NULL) {
6255 return (EINVAL);
6256 }
6257
6258 memset(returned_result, 0, sizeof(struct necp_aggregate_result));
6259
6260 lck_rw_lock_shared(&necp_kernel_policy_lock);
6261 if (necp_kernel_application_policies_count == 0) {
6262 if (necp_drop_all_order > 0) {
6263 returned_result->routing_result = NECP_KERNEL_POLICY_RESULT_DROP;
6264 lck_rw_done(&necp_kernel_policy_lock);
6265 return (0);
6266 }
6267 }
6268 lck_rw_done(&necp_kernel_policy_lock);
6269
6270 while ((offset + sizeof(u_int8_t) + sizeof(u_int32_t)) <= parameters_size) {
6271 u_int8_t type = necp_buffer_get_tlv_type(parameters, offset);
6272 u_int32_t length = necp_buffer_get_tlv_length(parameters, offset);
6273
6274 if (length > (parameters_size - (offset + sizeof(u_int8_t) + sizeof(u_int32_t)))) {
6275 // If the length is larger than what can fit in the remaining parameters size, bail
6276 NECPLOG(LOG_ERR, "Invalid TLV length (%u)", length);
6277 break;
6278 }
6279
6280 if (length > 0) {
6281 u_int8_t *value = necp_buffer_get_tlv_value(parameters, offset, NULL);
6282 if (value != NULL) {
6283 switch (type) {
6284 case NECP_CLIENT_PARAMETER_APPLICATION: {
6285 if (length >= sizeof(uuid_t)) {
6286 if (uuid_compare(application_uuid, value) == 0) {
6287 // No delegation
6288 break;
6289 }
6290
6291 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc, "euuid");
6292
6293 uuid_copy(application_uuid, value);
6294 }
6295 break;
6296 }
6297 case NECP_CLIENT_PARAMETER_REAL_APPLICATION: {
6298 if (length >= sizeof(uuid_t)) {
6299 if (uuid_compare(real_application_uuid, value) == 0) {
6300 // No delegation
6301 break;
6302 }
6303
6304 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc, "uuid");
6305
6306 uuid_copy(real_application_uuid, value);
6307 }
6308 break;
6309 }
6310 case NECP_CLIENT_PARAMETER_PID: {
6311 if (length >= sizeof(pid_t)) {
6312 if (memcmp(&pid, value, sizeof(pid_t)) == 0) {
6313 // No delegation
6314 break;
6315 }
6316
6317 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc, "pid");
6318
6319 memcpy(&pid, value, sizeof(pid_t));
6320 }
6321 break;
6322 }
6323 case NECP_CLIENT_PARAMETER_UID: {
6324 if (length >= sizeof(uid_t)) {
6325 if (memcmp(&uid, value, sizeof(uid_t)) == 0) {
6326 // No delegation
6327 break;
6328 }
6329
6330 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc, "uid");
6331
6332 memcpy(&uid, value, sizeof(uid_t));
6333 }
6334 break;
6335 }
6336 case NECP_CLIENT_PARAMETER_DOMAIN: {
6337 domain = (char *)value;
6338 domain[length - 1] = 0;
6339 break;
6340 }
6341 case NECP_CLIENT_PARAMETER_ACCOUNT: {
6342 account = (char *)value;
6343 account[length - 1] = 0;
6344 break;
6345 }
6346 case NECP_CLIENT_PARAMETER_TRAFFIC_CLASS: {
6347 if (length >= sizeof(u_int32_t)) {
6348 memcpy(&traffic_class, value, sizeof(u_int32_t));
6349 }
6350 break;
6351 }
6352 case NECP_CLIENT_PARAMETER_IP_PROTOCOL: {
6353 if (length >= sizeof(u_int16_t)) {
6354 memcpy(&protocol, value, sizeof(u_int16_t));
6355 }
6356 break;
6357 }
6358 case NECP_CLIENT_PARAMETER_BOUND_INTERFACE: {
6359 if (length <= IFXNAMSIZ && length > 0) {
6360 ifnet_t bound_interface = NULL;
6361 char interface_name[IFXNAMSIZ];
6362 memcpy(interface_name, value, length);
6363 interface_name[length - 1] = 0; // Make sure the string is NULL terminated
6364 if (ifnet_find_by_name(interface_name, &bound_interface) == 0) {
6365 bound_interface_index = bound_interface->if_index;
6366 ifnet_release(bound_interface);
6367 }
6368 }
6369 break;
6370 }
6371 case NECP_CLIENT_PARAMETER_LOCAL_ADDRESS: {
6372 if (ignore_address) {
6373 break;
6374 }
6375
6376 if (length >= sizeof(struct necp_policy_condition_addr)) {
6377 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value;
6378 if (necp_address_is_valid(&address_struct->address.sa)) {
6379 memcpy(&local_addr, &address_struct->address, sizeof(address_struct->address));
6380 }
6381 }
6382 break;
6383 }
6384 case NECP_CLIENT_PARAMETER_REMOTE_ADDRESS: {
6385 if (ignore_address) {
6386 break;
6387 }
6388
6389 if (length >= sizeof(struct necp_policy_condition_addr)) {
6390 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value;
6391 if (necp_address_is_valid(&address_struct->address.sa)) {
6392 memcpy(&remote_addr, &address_struct->address, sizeof(address_struct->address));
6393 }
6394 }
6395 break;
6396 }
6397 case NECP_CLIENT_PARAMETER_FLAGS: {
6398 if (length >= sizeof(client_flags)) {
6399 memcpy(&client_flags, value, sizeof(client_flags));
6400 }
6401 }
6402 default: {
6403 break;
6404 }
6405 }
6406 }
6407 }
6408
6409 offset += sizeof(u_int8_t) + sizeof(u_int32_t) + length;
6410 }
6411
6412 // Lock
6413 lck_rw_lock_shared(&necp_kernel_policy_lock);
6414
6415 necp_application_fillout_info_locked(application_uuid, real_application_uuid, account, domain, pid, uid, protocol, bound_interface_index, traffic_class, &local_addr, &remote_addr, proc, &info);
6416 matched_policy = necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_app_layer_map, &info, &filter_control_unit, &route_rule_id, &service_action, &service, netagent_ids, NECP_MAX_NETAGENTS, proc);
6417 if (matched_policy) {
6418 returned_result->policy_id = matched_policy->id;
6419 returned_result->routing_result = matched_policy->result;
6420 memcpy(&returned_result->routing_result_parameter, &matched_policy->result_parameter, sizeof(returned_result->routing_result_parameter));
6421 } else if (necp_drop_all_order > 0) {
6422 // Mark socket as a drop if drop_all is set
6423 returned_result->policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
6424 returned_result->routing_result = NECP_KERNEL_POLICY_RESULT_DROP;
6425 } else {
6426 returned_result->policy_id = 0;
6427 returned_result->routing_result = NECP_KERNEL_POLICY_RESULT_NONE;
6428 }
6429 returned_result->filter_control_unit = filter_control_unit;
6430 returned_result->service_action = service_action;
6431
6432 // Handle trigger service
6433 if (service.identifier != 0) {
6434 struct necp_uuid_id_mapping *mapping = necp_uuid_lookup_uuid_with_service_id_locked(service.identifier);
6435 if (mapping != NULL) {
6436 struct necp_service_registration *service_registration = NULL;
6437 uuid_copy(returned_result->service_uuid, mapping->uuid);
6438 returned_result->service_data = service.data;
6439 if (service.identifier == NECP_NULL_SERVICE_ID) {
6440 // NULL service is always 'registered'
6441 returned_result->service_flags |= NECP_SERVICE_FLAGS_REGISTERED;
6442 } else {
6443 LIST_FOREACH(service_registration, &necp_registered_service_list, kernel_chain) {
6444 if (service.identifier == service_registration->service_id) {
6445 returned_result->service_flags |= NECP_SERVICE_FLAGS_REGISTERED;
6446 break;
6447 }
6448 }
6449 }
6450 }
6451 }
6452
6453 // Handle netagents
6454 for (netagent_cursor = 0; netagent_cursor < NECP_MAX_NETAGENTS; netagent_cursor++) {
6455 struct necp_uuid_id_mapping *mapping = NULL;
6456 u_int32_t netagent_id = netagent_ids[netagent_cursor];
6457 if (netagent_id == 0) {
6458 break;
6459 }
6460 mapping = necp_uuid_lookup_uuid_with_service_id_locked(netagent_id);
6461 if (mapping != NULL) {
6462 uuid_copy(returned_result->netagents[netagent_cursor], mapping->uuid);
6463 returned_result->netagent_flags[netagent_cursor] = netagent_get_flags(mapping->uuid);
6464 }
6465 }
6466
6467 // Do routing evaluation
6468 u_int output_bound_interface = bound_interface_index;
6469 if (returned_result->routing_result == NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED) {
6470 output_bound_interface = returned_result->routing_result_parameter.scoped_interface_index;
6471 } else if (returned_result->routing_result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL) {
6472 output_bound_interface = returned_result->routing_result_parameter.tunnel_interface_index;
6473 }
6474
6475 if (local_addr.sa.sa_len == 0 ||
6476 (local_addr.sa.sa_family == AF_INET && local_addr.sin.sin_addr.s_addr == 0) ||
6477 (local_addr.sa.sa_family == AF_INET6 && IN6_IS_ADDR_UNSPECIFIED(&local_addr.sin6.sin6_addr))) {
6478 no_local_addr = TRUE;
6479 }
6480
6481 if (remote_addr.sa.sa_len == 0 ||
6482 (remote_addr.sa.sa_family == AF_INET && remote_addr.sin.sin_addr.s_addr == 0) ||
6483 (remote_addr.sa.sa_family == AF_INET6 && IN6_IS_ADDR_UNSPECIFIED(&remote_addr.sin6.sin6_addr))) {
6484 no_remote_addr = TRUE;
6485 remote_family = remote_addr.sa.sa_family;
6486 }
6487
6488 returned_result->routed_interface_index = 0;
6489 struct rtentry *rt = NULL;
6490 if (!no_local_addr && (client_flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER) != 0) {
6491 // Treat the output bound interface as the routed interface for local address
6492 // validation later.
6493 returned_result->routed_interface_index = output_bound_interface;
6494 } else {
6495 if (no_remote_addr) {
6496 memset(&remote_addr, 0, sizeof(remote_addr));
6497 if (remote_family == AF_INET6) {
6498 // Reset address to ::
6499 remote_addr.sa.sa_family = AF_INET6;
6500 remote_addr.sa.sa_len = sizeof(struct sockaddr_in6);
6501 } else {
6502 // Reset address to 0.0.0.0
6503 remote_addr.sa.sa_family = AF_INET;
6504 remote_addr.sa.sa_len = sizeof(struct sockaddr_in);
6505 }
6506 }
6507
6508 rt = rtalloc1_scoped((struct sockaddr *)&remote_addr, 0, 0,
6509 output_bound_interface);
6510
6511 if (no_remote_addr && remote_family == 0 &&
6512 (rt == NULL || rt->rt_ifp == NULL)) {
6513 // Route lookup for default IPv4 failed, try IPv6
6514
6515 // Cleanup old route if necessary
6516 if (rt != NULL) {
6517 rtfree(rt);
6518 rt = NULL;
6519 }
6520
6521 // Reset address to ::
6522 memset(&remote_addr, 0, sizeof(remote_addr));
6523 remote_addr.sa.sa_family = AF_INET6;
6524 remote_addr.sa.sa_len = sizeof(struct sockaddr_in6);
6525
6526 // Get route
6527 rt = rtalloc1_scoped((struct sockaddr *)&remote_addr, 0, 0,
6528 output_bound_interface);
6529 }
6530
6531 if (rt != NULL &&
6532 rt->rt_ifp != NULL) {
6533 returned_result->routed_interface_index = rt->rt_ifp->if_index;
6534 /*
6535 * For local addresses, we allow the interface scope to be
6536 * either the loopback interface or the interface hosting the
6537 * local address.
6538 */
6539 if (bound_interface_index != IFSCOPE_NONE &&
6540 rt->rt_ifa != NULL && rt->rt_ifa->ifa_ifp &&
6541 (output_bound_interface == lo_ifp->if_index ||
6542 rt->rt_ifp->if_index == lo_ifp->if_index ||
6543 rt->rt_ifa->ifa_ifp->if_index == bound_interface_index)) {
6544 struct sockaddr_storage dst;
6545 unsigned int ifscope = bound_interface_index;
6546
6547 /*
6548 * Transform dst into the internal routing table form
6549 */
6550 (void) sa_copy((struct sockaddr *)&remote_addr,
6551 &dst, &ifscope);
6552
6553 if ((rt->rt_ifp->if_index == lo_ifp->if_index) ||
6554 rt_ifa_is_dst((struct sockaddr *)&dst, rt->rt_ifa))
6555 returned_result->routed_interface_index =
6556 bound_interface_index;
6557 }
6558 }
6559 }
6560
6561 if (returned_result->routed_interface_index != 0 &&
6562 returned_result->routed_interface_index != lo_ifp->if_index && // Loopback can accept any local address
6563 !no_local_addr) {
6564
6565 // Transform local_addr into the ifaddr form
6566 // IPv6 Scope IDs are always embedded in the ifaddr list
6567 struct sockaddr_storage local_address_sanitized;
6568 u_int ifscope = IFSCOPE_NONE;
6569 (void)sa_copy(&local_addr.sa, &local_address_sanitized, &ifscope);
6570 SIN(&local_address_sanitized)->sin_port = 0;
6571 if (local_address_sanitized.ss_family == AF_INET6) {
6572 SIN6(&local_address_sanitized)->sin6_scope_id = 0;
6573 }
6574
6575 // Validate local address on routed interface
6576 struct ifaddr *ifa = ifa_ifwithaddr_scoped((struct sockaddr *)&local_address_sanitized, returned_result->routed_interface_index);
6577 if (ifa == NULL) {
6578 // Interface address not found, reject route
6579 returned_result->routed_interface_index = 0;
6580 if (rt != NULL) {
6581 rtfree(rt);
6582 rt = NULL;
6583 }
6584 } else {
6585 ifaddr_release(ifa);
6586 ifa = NULL;
6587 }
6588 }
6589
6590 if (flags != NULL) {
6591 if ((client_flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER) == 0) {
6592 // Check for local/direct
6593 bool is_local = FALSE;
6594 if (rt != NULL && (rt->rt_flags & RTF_LOCAL)) {
6595 is_local = TRUE;
6596 } else if (returned_result->routed_interface_index != 0 &&
6597 !no_remote_addr) {
6598 // Clean up the address before comparison with interface addresses
6599
6600 // Transform remote_addr into the ifaddr form
6601 // IPv6 Scope IDs are always embedded in the ifaddr list
6602 struct sockaddr_storage remote_address_sanitized;
6603 u_int ifscope = IFSCOPE_NONE;
6604 (void)sa_copy(&remote_addr.sa, &remote_address_sanitized, &ifscope);
6605 SIN(&remote_address_sanitized)->sin_port = 0;
6606 if (remote_address_sanitized.ss_family == AF_INET6) {
6607 SIN6(&remote_address_sanitized)->sin6_scope_id = 0;
6608 }
6609
6610 // Check if remote address is an interface address
6611 struct ifaddr *ifa = ifa_ifwithaddr((struct sockaddr *)&remote_address_sanitized);
6612 if (ifa != NULL && ifa->ifa_ifp != NULL) {
6613 u_int if_index_for_remote_addr = ifa->ifa_ifp->if_index;
6614 if (if_index_for_remote_addr == returned_result->routed_interface_index ||
6615 if_index_for_remote_addr == lo_ifp->if_index) {
6616 is_local = TRUE;
6617 }
6618 }
6619 if (ifa != NULL) {
6620 ifaddr_release(ifa);
6621 ifa = NULL;
6622 }
6623 }
6624
6625 if (is_local) {
6626 *flags |= (NECP_CLIENT_RESULT_FLAG_IS_LOCAL | NECP_CLIENT_RESULT_FLAG_IS_DIRECT);
6627 } else {
6628 if (rt != NULL &&
6629 !(rt->rt_flags & RTF_GATEWAY) &&
6630 (rt->rt_ifa && rt->rt_ifa->ifa_ifp && !(rt->rt_ifa->ifa_ifp->if_flags & IFF_POINTOPOINT))) {
6631 // Route is directly accessible
6632 *flags |= NECP_CLIENT_RESULT_FLAG_IS_DIRECT;
6633 }
6634 }
6635
6636 if (rt != NULL &&
6637 rt->rt_ifp != NULL) {
6638 // Check probe status
6639 if (rt->rt_ifp->if_eflags & IFEF_PROBE_CONNECTIVITY) {
6640 *flags |= NECP_CLIENT_RESULT_FLAG_PROBE_CONNECTIVITY;
6641 }
6642
6643 if (rt->rt_ifp->if_type == IFT_CELLULAR) {
6644 struct if_cellular_status_v1 *ifsr;
6645
6646 ifnet_lock_shared(rt->rt_ifp);
6647 lck_rw_lock_exclusive(&rt->rt_ifp->if_link_status_lock);
6648
6649 if (rt->rt_ifp->if_link_status != NULL) {
6650 ifsr = &rt->rt_ifp->if_link_status->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
6651
6652 if (ifsr->valid_bitmask & IF_CELL_UL_MSS_RECOMMENDED_VALID) {
6653 if (ifsr->mss_recommended == IF_CELL_UL_MSS_RECOMMENDED_NONE) {
6654 returned_result->mss_recommended = NECP_CLIENT_RESULT_RECOMMENDED_MSS_NONE;
6655 } else if (ifsr->mss_recommended == IF_CELL_UL_MSS_RECOMMENDED_MEDIUM) {
6656 returned_result->mss_recommended = NECP_CLIENT_RESULT_RECOMMENDED_MSS_MEDIUM;
6657 } else if (ifsr->mss_recommended == IF_CELL_UL_MSS_RECOMMENDED_LOW) {
6658 returned_result->mss_recommended = NECP_CLIENT_RESULT_RECOMMENDED_MSS_LOW;
6659 }
6660 }
6661 }
6662 lck_rw_done(&rt->rt_ifp->if_link_status_lock);
6663 ifnet_lock_done(rt->rt_ifp);
6664 }
6665
6666 // Check link quality
6667 if ((client_flags & NECP_CLIENT_PARAMETER_FLAG_DISCRETIONARY) &&
6668 (rt->rt_ifp->if_interface_state.valid_bitmask & IF_INTERFACE_STATE_LQM_STATE_VALID) &&
6669 rt->rt_ifp->if_interface_state.lqm_state == IFNET_LQM_THRESH_ABORT) {
6670 *flags |= NECP_CLIENT_RESULT_FLAG_LINK_QUALITY_ABORT;
6671 }
6672
6673 // Check QoS marking (fastlane)
6674 if (necp_update_qos_marking(rt->rt_ifp, route_rule_id)) {
6675 *flags |= NECP_CLIENT_RESULT_FLAG_ALLOW_QOS_MARKING;
6676 }
6677 }
6678 }
6679
6680 if (returned_result->routed_interface_index != 0) {
6681 union necp_sockaddr_union default_address;
6682 struct rtentry *v4Route = NULL;
6683 struct rtentry *v6Route = NULL;
6684
6685 memset(&default_address, 0, sizeof(default_address));
6686
6687 // Reset address to 0.0.0.0
6688 default_address.sa.sa_family = AF_INET;
6689 default_address.sa.sa_len = sizeof(struct sockaddr_in);
6690 v4Route = rtalloc1_scoped((struct sockaddr *)&default_address, 0, 0,
6691 returned_result->routed_interface_index);
6692
6693 // Reset address to ::
6694 default_address.sa.sa_family = AF_INET6;
6695 default_address.sa.sa_len = sizeof(struct sockaddr_in6);
6696 v6Route = rtalloc1_scoped((struct sockaddr *)&default_address, 0, 0,
6697 returned_result->routed_interface_index);
6698
6699 if (v4Route != NULL) {
6700 if (v4Route->rt_ifp != NULL) {
6701 *flags |= NECP_CLIENT_RESULT_FLAG_HAS_IPV4;
6702 }
6703 rtfree(v4Route);
6704 v4Route = NULL;
6705 }
6706
6707 if (v6Route != NULL) {
6708 if (v6Route->rt_ifp != NULL) {
6709 *flags |= NECP_CLIENT_RESULT_FLAG_HAS_IPV6;
6710 }
6711 rtfree(v6Route);
6712 v6Route = NULL;
6713 }
6714 }
6715 }
6716
6717 u_int32_t interface_type_denied = IFRTYPE_FUNCTIONAL_UNKNOWN;
6718 bool route_is_allowed = necp_route_is_allowed(rt, NULL, route_rule_id, &interface_type_denied);
6719 if (!route_is_allowed) {
6720 // If the route is blocked, treat the lookup as a drop
6721 returned_result->routing_result = NECP_KERNEL_POLICY_RESULT_DROP;
6722 memset(&returned_result->routing_result_parameter, 0, sizeof(returned_result->routing_result_parameter));
6723
6724 if (interface_type_denied != IFRTYPE_FUNCTIONAL_UNKNOWN) {
6725 necp_send_application_interface_denied_event(pid, application_uuid, interface_type_denied);
6726 }
6727 }
6728
6729 if (rt != NULL) {
6730 if (returned_route != NULL) {
6731 *returned_route = rt;
6732 } else {
6733 rtfree(rt);
6734 }
6735 rt = NULL;
6736 }
6737 // Unlock
6738 lck_rw_done(&necp_kernel_policy_lock);
6739
6740 return (error);
6741 }
6742
6743 static bool
6744 necp_socket_check_policy(struct necp_kernel_socket_policy *kernel_policy, necp_app_id app_id, necp_app_id real_app_id, errno_t cred_result, u_int32_t account_id, struct substring domain, u_int8_t domain_dot_count, pid_t pid, uid_t uid, u_int32_t bound_interface_index, u_int32_t traffic_class, u_int16_t protocol, union necp_sockaddr_union *local, union necp_sockaddr_union *remote, proc_t proc)
6745 {
6746 if (!(kernel_policy->condition_mask & NECP_KERNEL_CONDITION_ALL_INTERFACES)) {
6747 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE) {
6748 u_int32_t cond_bound_interface_index = kernel_policy->cond_bound_interface ? kernel_policy->cond_bound_interface->if_index : 0;
6749 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE) {
6750 if (bound_interface_index == cond_bound_interface_index) {
6751 // No match, matches forbidden interface
6752 return (FALSE);
6753 }
6754 } else {
6755 if (bound_interface_index != cond_bound_interface_index) {
6756 // No match, does not match required interface
6757 return (FALSE);
6758 }
6759 }
6760 } else {
6761 if (bound_interface_index != 0) {
6762 // No match, requires a non-bound packet
6763 return (FALSE);
6764 }
6765 }
6766 }
6767
6768 if (kernel_policy->condition_mask == 0) {
6769 return (TRUE);
6770 }
6771
6772 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_APP_ID) {
6773 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_APP_ID) {
6774 if (app_id == kernel_policy->cond_app_id) {
6775 // No match, matches forbidden application
6776 return (FALSE);
6777 }
6778 } else {
6779 if (app_id != kernel_policy->cond_app_id) {
6780 // No match, does not match required application
6781 return (FALSE);
6782 }
6783 }
6784 }
6785
6786 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REAL_APP_ID) {
6787 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_REAL_APP_ID) {
6788 if (real_app_id == kernel_policy->cond_real_app_id) {
6789 // No match, matches forbidden application
6790 return (FALSE);
6791 }
6792 } else {
6793 if (real_app_id != kernel_policy->cond_real_app_id) {
6794 // No match, does not match required application
6795 return (FALSE);
6796 }
6797 }
6798 }
6799
6800 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_ENTITLEMENT) {
6801 if (cred_result != 0) {
6802 // Process is missing entitlement
6803 return (FALSE);
6804 }
6805 }
6806
6807 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT) {
6808 if (kernel_policy->cond_custom_entitlement_matched == necp_boolean_state_false) {
6809 // Process is missing entitlement based on previous check
6810 return (FALSE);
6811 } else if (kernel_policy->cond_custom_entitlement_matched == necp_boolean_state_unknown) {
6812 if (kernel_policy->cond_custom_entitlement != NULL) {
6813 if (proc == NULL) {
6814 // No process found, cannot check entitlement
6815 return (FALSE);
6816 }
6817 task_t task = proc_task(proc);
6818 if (task == NULL ||
6819 !IOTaskHasEntitlement(task, kernel_policy->cond_custom_entitlement)) {
6820 // Process is missing custom entitlement
6821 kernel_policy->cond_custom_entitlement_matched = necp_boolean_state_false;
6822 return (FALSE);
6823 } else {
6824 kernel_policy->cond_custom_entitlement_matched = necp_boolean_state_true;
6825 }
6826 }
6827 }
6828 }
6829
6830 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_DOMAIN) {
6831 bool domain_matches = necp_hostname_matches_domain(domain, domain_dot_count, kernel_policy->cond_domain, kernel_policy->cond_domain_dot_count);
6832 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_DOMAIN) {
6833 if (domain_matches) {
6834 // No match, matches forbidden domain
6835 return (FALSE);
6836 }
6837 } else {
6838 if (!domain_matches) {
6839 // No match, does not match required domain
6840 return (FALSE);
6841 }
6842 }
6843 }
6844
6845 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_ACCOUNT_ID) {
6846 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_ACCOUNT_ID) {
6847 if (account_id == kernel_policy->cond_account_id) {
6848 // No match, matches forbidden account
6849 return (FALSE);
6850 }
6851 } else {
6852 if (account_id != kernel_policy->cond_account_id) {
6853 // No match, does not match required account
6854 return (FALSE);
6855 }
6856 }
6857 }
6858
6859 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_PID) {
6860 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_PID) {
6861 if (pid == kernel_policy->cond_pid) {
6862 // No match, matches forbidden pid
6863 return (FALSE);
6864 }
6865 } else {
6866 if (pid != kernel_policy->cond_pid) {
6867 // No match, does not match required pid
6868 return (FALSE);
6869 }
6870 }
6871 }
6872
6873 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_UID) {
6874 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_UID) {
6875 if (uid == kernel_policy->cond_uid) {
6876 // No match, matches forbidden uid
6877 return (FALSE);
6878 }
6879 } else {
6880 if (uid != kernel_policy->cond_uid) {
6881 // No match, does not match required uid
6882 return (FALSE);
6883 }
6884 }
6885 }
6886
6887 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_TRAFFIC_CLASS) {
6888 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_TRAFFIC_CLASS) {
6889 if (traffic_class >= kernel_policy->cond_traffic_class.start_tc &&
6890 traffic_class <= kernel_policy->cond_traffic_class.end_tc) {
6891 // No match, matches forbidden traffic class
6892 return (FALSE);
6893 }
6894 } else {
6895 if (traffic_class < kernel_policy->cond_traffic_class.start_tc ||
6896 traffic_class > kernel_policy->cond_traffic_class.end_tc) {
6897 // No match, does not match required traffic class
6898 return (FALSE);
6899 }
6900 }
6901 }
6902
6903 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_PROTOCOL) {
6904 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_PROTOCOL) {
6905 if (protocol == kernel_policy->cond_protocol) {
6906 // No match, matches forbidden protocol
6907 return (FALSE);
6908 }
6909 } else {
6910 if (protocol != kernel_policy->cond_protocol) {
6911 // No match, does not match required protocol
6912 return (FALSE);
6913 }
6914 }
6915 }
6916
6917 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_START) {
6918 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_END) {
6919 bool inRange = necp_is_addr_in_range((struct sockaddr *)local, (struct sockaddr *)&kernel_policy->cond_local_start, (struct sockaddr *)&kernel_policy->cond_local_end);
6920 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_LOCAL_END) {
6921 if (inRange) {
6922 return (FALSE);
6923 }
6924 } else {
6925 if (!inRange) {
6926 return (FALSE);
6927 }
6928 }
6929 } else if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX) {
6930 bool inSubnet = necp_is_addr_in_subnet((struct sockaddr *)local, (struct sockaddr *)&kernel_policy->cond_local_start, kernel_policy->cond_local_prefix);
6931 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX) {
6932 if (inSubnet) {
6933 return (FALSE);
6934 }
6935 } else {
6936 if (!inSubnet) {
6937 return (FALSE);
6938 }
6939 }
6940 }
6941 }
6942
6943 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_START) {
6944 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_END) {
6945 bool inRange = necp_is_addr_in_range((struct sockaddr *)remote, (struct sockaddr *)&kernel_policy->cond_remote_start, (struct sockaddr *)&kernel_policy->cond_remote_end);
6946 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_REMOTE_END) {
6947 if (inRange) {
6948 return (FALSE);
6949 }
6950 } else {
6951 if (!inRange) {
6952 return (FALSE);
6953 }
6954 }
6955 } else if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX) {
6956 bool inSubnet = necp_is_addr_in_subnet((struct sockaddr *)remote, (struct sockaddr *)&kernel_policy->cond_remote_start, kernel_policy->cond_remote_prefix);
6957 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX) {
6958 if (inSubnet) {
6959 return (FALSE);
6960 }
6961 } else {
6962 if (!inSubnet) {
6963 return (FALSE);
6964 }
6965 }
6966 }
6967 }
6968
6969 return (TRUE);
6970 }
6971
6972 static inline u_int32_t
6973 necp_socket_calc_flowhash_locked(struct necp_socket_info *info)
6974 {
6975 return (net_flowhash(info, sizeof(*info), necp_kernel_socket_policies_gencount));
6976 }
6977
6978 static void
6979 necp_socket_fillout_info_locked(struct inpcb *inp, struct sockaddr *override_local_addr, struct sockaddr *override_remote_addr, u_int32_t override_bound_interface, struct necp_socket_info *info)
6980 {
6981 struct socket *so = NULL;
6982
6983 memset(info, 0, sizeof(struct necp_socket_info));
6984
6985 so = inp->inp_socket;
6986
6987 if (necp_kernel_socket_policies_condition_mask & NECP_KERNEL_CONDITION_PID) {
6988 info->pid = ((so->so_flags & SOF_DELEGATED) ? so->e_pid : so->last_pid);
6989 }
6990
6991 if (necp_kernel_socket_policies_condition_mask & NECP_KERNEL_CONDITION_UID) {
6992 info->uid = kauth_cred_getuid(so->so_cred);
6993 }
6994
6995 if (necp_kernel_socket_policies_condition_mask & NECP_KERNEL_CONDITION_TRAFFIC_CLASS) {
6996 info->traffic_class = so->so_traffic_class;
6997 }
6998
6999 if (necp_kernel_socket_policies_condition_mask & NECP_KERNEL_CONDITION_PROTOCOL) {
7000 if (inp->inp_ip_p) {
7001 info->protocol = inp->inp_ip_p;
7002 } else {
7003 info->protocol = SOCK_PROTO(so);
7004 }
7005 }
7006
7007 if (inp->inp_flags2 & INP2_WANT_APP_POLICY && necp_kernel_socket_policies_condition_mask & NECP_KERNEL_CONDITION_APP_ID) {
7008 struct necp_uuid_id_mapping *existing_mapping = necp_uuid_lookup_app_id_locked(((so->so_flags & SOF_DELEGATED) ? so->e_uuid : so->last_uuid));
7009 if (existing_mapping) {
7010 info->application_id = existing_mapping->id;
7011 }
7012
7013 if (!(so->so_flags & SOF_DELEGATED)) {
7014 info->real_application_id = info->application_id;
7015 } else if (necp_kernel_socket_policies_condition_mask & NECP_KERNEL_CONDITION_REAL_APP_ID) {
7016 struct necp_uuid_id_mapping *real_existing_mapping = necp_uuid_lookup_app_id_locked(so->last_uuid);
7017 if (real_existing_mapping) {
7018 info->real_application_id = real_existing_mapping->id;
7019 }
7020 }
7021
7022 if (necp_kernel_socket_policies_condition_mask & NECP_KERNEL_CONDITION_ENTITLEMENT) {
7023 info->cred_result = priv_check_cred(so->so_cred, PRIV_NET_PRIVILEGED_NECP_MATCH, 0);
7024 if (info->cred_result != 0) {
7025 // Process does not have entitlement, check the parent process
7026 necp_get_parent_cred_result(NULL, info);
7027 }
7028 }
7029 }
7030
7031 if (necp_kernel_socket_policies_condition_mask & NECP_KERNEL_CONDITION_ACCOUNT_ID && inp->inp_necp_attributes.inp_account != NULL) {
7032 struct necp_string_id_mapping *existing_mapping = necp_lookup_string_to_id_locked(&necp_account_id_list, inp->inp_necp_attributes.inp_account);
7033 if (existing_mapping) {
7034 info->account_id = existing_mapping->id;
7035 }
7036 }
7037
7038 if (necp_kernel_socket_policies_condition_mask & NECP_KERNEL_CONDITION_DOMAIN) {
7039 info->domain = inp->inp_necp_attributes.inp_domain;
7040 }
7041
7042 if (override_bound_interface) {
7043 info->bound_interface_index = override_bound_interface;
7044 } else {
7045 if ((inp->inp_flags & INP_BOUND_IF) && inp->inp_boundifp) {
7046 info->bound_interface_index = inp->inp_boundifp->if_index;
7047 }
7048 }
7049
7050 if (necp_kernel_socket_policies_condition_mask & NECP_KERNEL_ADDRESS_TYPE_CONDITIONS) {
7051 if (inp->inp_vflag & INP_IPV4) {
7052 if (override_local_addr) {
7053 if (override_local_addr->sa_len <= sizeof(struct sockaddr_in)) {
7054 memcpy(&info->local_addr, override_local_addr, override_local_addr->sa_len);
7055 }
7056 } else {
7057 ((struct sockaddr_in *)&info->local_addr)->sin_family = AF_INET;
7058 ((struct sockaddr_in *)&info->local_addr)->sin_len = sizeof(struct sockaddr_in);
7059 ((struct sockaddr_in *)&info->local_addr)->sin_port = inp->inp_lport;
7060 memcpy(&((struct sockaddr_in *)&info->local_addr)->sin_addr, &inp->inp_laddr, sizeof(struct in_addr));
7061 }
7062
7063 if (override_remote_addr) {
7064 if (override_remote_addr->sa_len <= sizeof(struct sockaddr_in)) {
7065 memcpy(&info->remote_addr, override_remote_addr, override_remote_addr->sa_len);
7066 }
7067 } else {
7068 ((struct sockaddr_in *)&info->remote_addr)->sin_family = AF_INET;
7069 ((struct sockaddr_in *)&info->remote_addr)->sin_len = sizeof(struct sockaddr_in);
7070 ((struct sockaddr_in *)&info->remote_addr)->sin_port = inp->inp_fport;
7071 memcpy(&((struct sockaddr_in *)&info->remote_addr)->sin_addr, &inp->inp_faddr, sizeof(struct in_addr));
7072 }
7073 } else if (inp->inp_vflag & INP_IPV6) {
7074 if (override_local_addr) {
7075 if (override_local_addr->sa_len <= sizeof(struct sockaddr_in6)) {
7076 memcpy(&info->local_addr, override_local_addr, override_local_addr->sa_len);
7077 }
7078 } else {
7079 ((struct sockaddr_in6 *)&info->local_addr)->sin6_family = AF_INET6;
7080 ((struct sockaddr_in6 *)&info->local_addr)->sin6_len = sizeof(struct sockaddr_in6);
7081 ((struct sockaddr_in6 *)&info->local_addr)->sin6_port = inp->inp_lport;
7082 memcpy(&((struct sockaddr_in6 *)&info->local_addr)->sin6_addr, &inp->in6p_laddr, sizeof(struct in6_addr));
7083 }
7084
7085 if (override_remote_addr) {
7086 if (override_remote_addr->sa_len <= sizeof(struct sockaddr_in6)) {
7087 memcpy(&info->remote_addr, override_remote_addr, override_remote_addr->sa_len);
7088 }
7089 } else {
7090 ((struct sockaddr_in6 *)&info->remote_addr)->sin6_family = AF_INET6;
7091 ((struct sockaddr_in6 *)&info->remote_addr)->sin6_len = sizeof(struct sockaddr_in6);
7092 ((struct sockaddr_in6 *)&info->remote_addr)->sin6_port = inp->inp_fport;
7093 memcpy(&((struct sockaddr_in6 *)&info->remote_addr)->sin6_addr, &inp->in6p_faddr, sizeof(struct in6_addr));
7094 }
7095 }
7096 }
7097 }
7098
7099 static inline struct necp_kernel_socket_policy *
7100 necp_socket_find_policy_match_with_info_locked(struct necp_kernel_socket_policy **policy_search_array, struct necp_socket_info *info, necp_kernel_policy_filter *return_filter, u_int32_t *return_route_rule_id, necp_kernel_policy_result *return_service_action, necp_kernel_policy_service *return_service, u_int32_t *return_netagent_array, size_t netagent_array_count, proc_t proc)
7101 {
7102 struct necp_kernel_socket_policy *matched_policy = NULL;
7103 u_int32_t skip_order = 0;
7104 u_int32_t skip_session_order = 0;
7105 u_int32_t route_rule_id_array[MAX_AGGREGATE_ROUTE_RULES];
7106 size_t route_rule_id_count = 0;
7107 int i;
7108 size_t netagent_cursor = 0;
7109
7110 // Pre-process domain for quick matching
7111 struct substring domain_substring = necp_trim_dots_and_stars(info->domain, info->domain ? strlen(info->domain) : 0);
7112 u_int8_t domain_dot_count = necp_count_dots(domain_substring.string, domain_substring.length);
7113
7114 if (return_filter) {
7115 *return_filter = 0;
7116 }
7117
7118 if (return_route_rule_id) {
7119 *return_route_rule_id = 0;
7120 }
7121
7122 if (return_service_action) {
7123 *return_service_action = 0;
7124 }
7125
7126 if (return_service) {
7127 return_service->identifier = 0;
7128 return_service->data = 0;
7129 }
7130
7131 if (policy_search_array != NULL) {
7132 for (i = 0; policy_search_array[i] != NULL; i++) {
7133 if (necp_drop_all_order != 0 && policy_search_array[i]->session_order >= necp_drop_all_order) {
7134 // We've hit a drop all rule
7135 break;
7136 }
7137 if (skip_session_order && policy_search_array[i]->session_order >= skip_session_order) {
7138 // Done skipping
7139 skip_order = 0;
7140 skip_session_order = 0;
7141 }
7142 if (skip_order) {
7143 if (policy_search_array[i]->order < skip_order) {
7144 // Skip this policy
7145 continue;
7146 } else {
7147 // Done skipping
7148 skip_order = 0;
7149 skip_session_order = 0;
7150 }
7151 } else if (skip_session_order) {
7152 // Skip this policy
7153 continue;
7154 }
7155 if (necp_socket_check_policy(policy_search_array[i], info->application_id, info->real_application_id, info->cred_result, info->account_id, domain_substring, domain_dot_count, info->pid, info->uid, info->bound_interface_index, info->traffic_class, info->protocol, &info->local_addr, &info->remote_addr, proc)) {
7156 if (policy_search_array[i]->result == NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER) {
7157 if (return_filter && *return_filter == 0) {
7158 *return_filter = policy_search_array[i]->result_parameter.filter_control_unit;
7159 if (necp_debug > 1) {
7160 NECPLOG(LOG_DEBUG, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Filter %d", info->application_id, info->real_application_id, info->bound_interface_index, info->protocol, policy_search_array[i]->result_parameter.filter_control_unit);
7161 }
7162 }
7163 continue;
7164 } else if (policy_search_array[i]->result == NECP_KERNEL_POLICY_RESULT_ROUTE_RULES) {
7165 if (return_route_rule_id && route_rule_id_count < MAX_AGGREGATE_ROUTE_RULES) {
7166 route_rule_id_array[route_rule_id_count++] = policy_search_array[i]->result_parameter.route_rule_id;
7167 if (necp_debug > 1) {
7168 NECPLOG(LOG_DEBUG, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Route Rule %d", info->application_id, info->real_application_id, info->bound_interface_index, info->protocol, policy_search_array[i]->result_parameter.route_rule_id);
7169 }
7170 }
7171 continue;
7172 } else if (necp_kernel_socket_result_is_trigger_service_type(policy_search_array[i])) {
7173 if (return_service_action && *return_service_action == 0) {
7174 *return_service_action = policy_search_array[i]->result;
7175 if (necp_debug > 1) {
7176 NECPLOG(LOG_DEBUG, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Service Action %d", info->application_id, info->real_application_id, info->bound_interface_index, info->protocol, policy_search_array[i]->result);
7177 }
7178 }
7179 if (return_service && return_service->identifier == 0) {
7180 return_service->identifier = policy_search_array[i]->result_parameter.service.identifier;
7181 return_service->data = policy_search_array[i]->result_parameter.service.data;
7182 if (necp_debug > 1) {
7183 NECPLOG(LOG_DEBUG, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Service ID %d Data %d", info->application_id, info->real_application_id, info->bound_interface_index, info->protocol, policy_search_array[i]->result_parameter.service.identifier, policy_search_array[i]->result_parameter.service.data);
7184 }
7185 }
7186 continue;
7187 } else if (policy_search_array[i]->result == NECP_KERNEL_POLICY_RESULT_USE_NETAGENT) {
7188 if (return_netagent_array != NULL &&
7189 netagent_cursor < netagent_array_count) {
7190 return_netagent_array[netagent_cursor] = policy_search_array[i]->result_parameter.netagent_id;
7191 netagent_cursor++;
7192 if (necp_debug > 1) {
7193 NECPLOG(LOG_DEBUG, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Use Netagent %d", info->application_id, info->real_application_id, info->bound_interface_index, info->protocol, policy_search_array[i]->result_parameter.netagent_id);
7194 }
7195 }
7196 continue;
7197 }
7198
7199 // Matched policy is a skip. Do skip and continue.
7200 if (policy_search_array[i]->result == NECP_KERNEL_POLICY_RESULT_SKIP) {
7201 skip_order = policy_search_array[i]->result_parameter.skip_policy_order;
7202 skip_session_order = policy_search_array[i]->session_order + 1;
7203 continue;
7204 }
7205
7206 // Passed all tests, found a match
7207 matched_policy = policy_search_array[i];
7208 break;
7209 }
7210 }
7211 }
7212
7213 if (route_rule_id_count == 1) {
7214 *return_route_rule_id = route_rule_id_array[0];
7215 } else if (route_rule_id_count > 1) {
7216 *return_route_rule_id = necp_create_aggregate_route_rule(route_rule_id_array);
7217 }
7218 return (matched_policy);
7219 }
7220
7221 static bool
7222 necp_socket_uses_interface(struct inpcb *inp, u_int32_t interface_index)
7223 {
7224 bool found_match = FALSE;
7225 errno_t result = 0;
7226 ifaddr_t *addresses = NULL;
7227 union necp_sockaddr_union address_storage;
7228 int i;
7229 int family = AF_INET;
7230 ifnet_t interface = ifindex2ifnet[interface_index];
7231
7232 if (inp == NULL || interface == NULL) {
7233 return (FALSE);
7234 }
7235
7236 if (inp->inp_vflag & INP_IPV4) {
7237 family = AF_INET;
7238 } else if (inp->inp_vflag & INP_IPV6) {
7239 family = AF_INET6;
7240 }
7241
7242 result = ifnet_get_address_list_family(interface, &addresses, family);
7243 if (result != 0) {
7244 NECPLOG(LOG_ERR, "Failed to get address list for %s%d", ifnet_name(interface), ifnet_unit(interface));
7245 return (FALSE);
7246 }
7247
7248 for (i = 0; addresses[i] != NULL; i++) {
7249 if (ifaddr_address(addresses[i], &address_storage.sa, sizeof(address_storage)) == 0) {
7250 if (family == AF_INET) {
7251 if (memcmp(&address_storage.sin.sin_addr, &inp->inp_laddr, sizeof(inp->inp_laddr)) == 0) {
7252 found_match = TRUE;
7253 goto done;
7254 }
7255 } else if (family == AF_INET6) {
7256 if (memcmp(&address_storage.sin6.sin6_addr, &inp->in6p_laddr, sizeof(inp->in6p_laddr)) == 0) {
7257 found_match = TRUE;
7258 goto done;
7259 }
7260 }
7261 }
7262 }
7263
7264 done:
7265 ifnet_free_address_list(addresses);
7266 addresses = NULL;
7267 return (found_match);
7268 }
7269
7270 static inline bool
7271 necp_socket_is_connected(struct inpcb *inp)
7272 {
7273 return (inp->inp_socket->so_state & (SS_ISCONNECTING | SS_ISCONNECTED | SS_ISDISCONNECTING));
7274 }
7275
7276 static inline bool
7277 necp_socket_bypass(struct sockaddr *override_local_addr, struct sockaddr *override_remote_addr, struct inpcb *inp)
7278 {
7279
7280 if (necp_pass_loopback > 0 && necp_is_loopback(override_local_addr, override_remote_addr, inp, NULL)) {
7281 return (true);
7282 } else if (necp_is_intcoproc(inp, NULL)) {
7283 return (true);
7284 }
7285
7286 return (false);
7287 }
7288
7289 necp_kernel_policy_id
7290 necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local_addr, struct sockaddr *override_remote_addr, u_int32_t override_bound_interface)
7291 {
7292 struct socket *so = NULL;
7293 necp_kernel_policy_filter filter_control_unit = 0;
7294 u_int32_t route_rule_id = 0;
7295 struct necp_kernel_socket_policy *matched_policy = NULL;
7296 necp_kernel_policy_id matched_policy_id = NECP_KERNEL_POLICY_ID_NONE;
7297 necp_kernel_policy_result service_action = 0;
7298 necp_kernel_policy_service service = { 0, 0 };
7299
7300 u_int32_t netagent_ids[NECP_MAX_NETAGENTS];
7301 memset(&netagent_ids, 0, sizeof(netagent_ids));
7302 int netagent_cursor;
7303
7304 struct necp_socket_info info;
7305
7306 if (inp == NULL) {
7307 return (NECP_KERNEL_POLICY_ID_NONE);
7308 }
7309
7310 // Ignore invalid addresses
7311 if (override_local_addr != NULL &&
7312 !necp_address_is_valid(override_local_addr)) {
7313 override_local_addr = NULL;
7314 }
7315 if (override_remote_addr != NULL &&
7316 !necp_address_is_valid(override_remote_addr)) {
7317 override_remote_addr = NULL;
7318 }
7319
7320 so = inp->inp_socket;
7321
7322 // Don't lock. Possible race condition, but we don't want the performance hit.
7323 if (necp_kernel_socket_policies_count == 0 ||
7324 (!(inp->inp_flags2 & INP2_WANT_APP_POLICY) && necp_kernel_socket_policies_non_app_count == 0)) {
7325 if (necp_drop_all_order > 0) {
7326 inp->inp_policyresult.policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
7327 inp->inp_policyresult.policy_gencount = 0;
7328 inp->inp_policyresult.app_id = 0;
7329 inp->inp_policyresult.flowhash = 0;
7330 inp->inp_policyresult.results.filter_control_unit = 0;
7331 inp->inp_policyresult.results.route_rule_id = 0;
7332 if (necp_socket_bypass(override_local_addr, override_remote_addr, inp)) {
7333 inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_PASS;
7334 } else {
7335 inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_DROP;
7336 }
7337 }
7338 return (NECP_KERNEL_POLICY_ID_NONE);
7339 }
7340
7341 // Check for loopback exception
7342 if (necp_socket_bypass(override_local_addr, override_remote_addr, inp)) {
7343 // Mark socket as a pass
7344 inp->inp_policyresult.policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
7345 inp->inp_policyresult.policy_gencount = 0;
7346 inp->inp_policyresult.app_id = 0;
7347 inp->inp_policyresult.flowhash = 0;
7348 inp->inp_policyresult.results.filter_control_unit = 0;
7349 inp->inp_policyresult.results.route_rule_id = 0;
7350 inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_PASS;
7351 return (NECP_KERNEL_POLICY_ID_NONE);
7352 }
7353
7354 // Lock
7355 lck_rw_lock_shared(&necp_kernel_policy_lock);
7356
7357 necp_socket_fillout_info_locked(inp, override_local_addr, override_remote_addr, override_bound_interface, &info);
7358 inp->inp_policyresult.app_id = info.application_id;
7359
7360 // Check info
7361 u_int32_t flowhash = necp_socket_calc_flowhash_locked(&info);
7362 if (inp->inp_policyresult.policy_id != NECP_KERNEL_POLICY_ID_NONE &&
7363 inp->inp_policyresult.policy_gencount == necp_kernel_socket_policies_gencount &&
7364 inp->inp_policyresult.flowhash == flowhash) {
7365 // If already matched this socket on this generation of table, skip
7366
7367 // Unlock
7368 lck_rw_done(&necp_kernel_policy_lock);
7369
7370 return (inp->inp_policyresult.policy_id);
7371 }
7372
7373 // Match socket to policy
7374 matched_policy = necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_map[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(info.application_id)], &info, &filter_control_unit, &route_rule_id, &service_action, &service, netagent_ids, NECP_MAX_NETAGENTS, current_proc());
7375 // If the socket matched a scoped service policy, mark as Drop if not registered.
7376 // This covers the cases in which a service is required (on demand) but hasn't started yet.
7377 if ((service_action == NECP_KERNEL_POLICY_RESULT_TRIGGER_SCOPED ||
7378 service_action == NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED) &&
7379 service.identifier != 0 &&
7380 service.identifier != NECP_NULL_SERVICE_ID) {
7381 bool service_is_registered = FALSE;
7382 struct necp_service_registration *service_registration = NULL;
7383 LIST_FOREACH(service_registration, &necp_registered_service_list, kernel_chain) {
7384 if (service.identifier == service_registration->service_id) {
7385 service_is_registered = TRUE;
7386 break;
7387 }
7388 }
7389 if (!service_is_registered) {
7390 // Mark socket as a drop if service is not registered
7391 inp->inp_policyresult.policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
7392 inp->inp_policyresult.policy_gencount = necp_kernel_socket_policies_gencount;
7393 inp->inp_policyresult.flowhash = flowhash;
7394 inp->inp_policyresult.results.filter_control_unit = 0;
7395 inp->inp_policyresult.results.route_rule_id = 0;
7396 inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_DROP;
7397
7398 if (necp_debug > 1) {
7399 NECPLOG(LOG_DEBUG, "Socket Policy: (BoundInterface %d Proto %d) Dropping packet because service is not registered", info.bound_interface_index, info.protocol);
7400 }
7401
7402 // Unlock
7403 lck_rw_done(&necp_kernel_policy_lock);
7404 return (NECP_KERNEL_POLICY_ID_NONE);
7405 }
7406 }
7407 // Verify netagents
7408 for (netagent_cursor = 0; netagent_cursor < NECP_MAX_NETAGENTS; netagent_cursor++) {
7409 struct necp_uuid_id_mapping *mapping = NULL;
7410 u_int32_t netagent_id = netagent_ids[netagent_cursor];
7411 if (netagent_id == 0) {
7412 break;
7413 }
7414 mapping = necp_uuid_lookup_uuid_with_service_id_locked(netagent_id);
7415 if (mapping != NULL) {
7416 u_int32_t agent_flags = 0;
7417 agent_flags = netagent_get_flags(mapping->uuid);
7418 if (agent_flags & NETAGENT_FLAG_REGISTERED) {
7419 if (agent_flags & NETAGENT_FLAG_ACTIVE) {
7420 continue;
7421 } else if ((agent_flags & NETAGENT_FLAG_VOLUNTARY) == 0) {
7422 if (agent_flags & NETAGENT_FLAG_KERNEL_ACTIVATED) {
7423 int trigger_error = 0;
7424 trigger_error = netagent_kernel_trigger(mapping->uuid);
7425 if (necp_debug > 1) {
7426 NECPLOG(LOG_DEBUG, "Socket Policy: Triggering inactive agent, error %d", trigger_error);
7427 }
7428 }
7429
7430 // Mark socket as a drop if required agent is not active
7431 inp->inp_policyresult.policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
7432 inp->inp_policyresult.policy_gencount = necp_kernel_socket_policies_gencount;
7433 inp->inp_policyresult.flowhash = flowhash;
7434 inp->inp_policyresult.results.filter_control_unit = 0;
7435 inp->inp_policyresult.results.route_rule_id = 0;
7436 inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_DROP;
7437
7438 if (necp_debug > 1) {
7439 NECPLOG(LOG_DEBUG, "Socket Policy: (BoundInterface %d Proto %d) Dropping packet because agent is not active", info.bound_interface_index, info.protocol);
7440 }
7441
7442 // Unlock
7443 lck_rw_done(&necp_kernel_policy_lock);
7444 return (NECP_KERNEL_POLICY_ID_NONE);
7445 }
7446 }
7447 }
7448 }
7449 if (matched_policy) {
7450 matched_policy_id = matched_policy->id;
7451 inp->inp_policyresult.policy_id = matched_policy->id;
7452 inp->inp_policyresult.policy_gencount = necp_kernel_socket_policies_gencount;
7453 inp->inp_policyresult.flowhash = flowhash;
7454 inp->inp_policyresult.results.filter_control_unit = filter_control_unit;
7455 inp->inp_policyresult.results.route_rule_id = route_rule_id;
7456 inp->inp_policyresult.results.result = matched_policy->result;
7457 memcpy(&inp->inp_policyresult.results.result_parameter, &matched_policy->result_parameter, sizeof(matched_policy->result_parameter));
7458
7459 if (necp_socket_is_connected(inp) &&
7460 (matched_policy->result == NECP_KERNEL_POLICY_RESULT_DROP ||
7461 (matched_policy->result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL && !necp_socket_uses_interface(inp, matched_policy->result_parameter.tunnel_interface_index)))) {
7462 if (necp_debug) {
7463 NECPLOG(LOG_DEBUG, "Marking socket in state %d as defunct", so->so_state);
7464 }
7465 sosetdefunct(current_proc(), so, SHUTDOWN_SOCKET_LEVEL_NECP | SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL, TRUE);
7466 } else if (necp_socket_is_connected(inp) &&
7467 matched_policy->result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL &&
7468 info.protocol == IPPROTO_TCP) {
7469 // Reset MSS on TCP socket if tunnel policy changes
7470 tcp_mtudisc(inp, 0);
7471 }
7472
7473 if (necp_debug > 1) {
7474 NECPLOG(LOG_DEBUG, "Socket Policy: %p (BoundInterface %d Proto %d) Policy %d Result %d Parameter %d", inp->inp_socket, info.bound_interface_index, info.protocol, matched_policy->id, matched_policy->result, matched_policy->result_parameter.tunnel_interface_index);
7475 }
7476 } else if (necp_drop_all_order > 0) {
7477 // Mark socket as a drop if set
7478 inp->inp_policyresult.policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
7479 inp->inp_policyresult.policy_gencount = necp_kernel_socket_policies_gencount;
7480 inp->inp_policyresult.flowhash = flowhash;
7481 inp->inp_policyresult.results.filter_control_unit = 0;
7482 inp->inp_policyresult.results.route_rule_id = 0;
7483 inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_DROP;
7484 } else {
7485 // Mark non-matching socket so we don't re-check it
7486 inp->inp_policyresult.policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
7487 inp->inp_policyresult.policy_gencount = necp_kernel_socket_policies_gencount;
7488 inp->inp_policyresult.flowhash = flowhash;
7489 inp->inp_policyresult.results.filter_control_unit = filter_control_unit; // We may have matched a filter, so mark it!
7490 inp->inp_policyresult.results.route_rule_id = route_rule_id; // We may have matched a route rule, so mark it!
7491 inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_NONE;
7492 }
7493
7494 // Unlock
7495 lck_rw_done(&necp_kernel_policy_lock);
7496
7497 return (matched_policy_id);
7498 }
7499
7500 static bool
7501 necp_ip_output_check_policy(struct necp_kernel_ip_output_policy *kernel_policy, necp_kernel_policy_id socket_policy_id, u_int32_t bound_interface_index, u_int32_t last_interface_index, u_int16_t protocol, union necp_sockaddr_union *local, union necp_sockaddr_union *remote)
7502 {
7503 if (!(kernel_policy->condition_mask & NECP_KERNEL_CONDITION_ALL_INTERFACES)) {
7504 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE) {
7505 u_int32_t cond_bound_interface_index = kernel_policy->cond_bound_interface ? kernel_policy->cond_bound_interface->if_index : 0;
7506 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE) {
7507 if (bound_interface_index == cond_bound_interface_index) {
7508 // No match, matches forbidden interface
7509 return (FALSE);
7510 }
7511 } else {
7512 if (bound_interface_index != cond_bound_interface_index) {
7513 // No match, does not match required interface
7514 return (FALSE);
7515 }
7516 }
7517 } else {
7518 if (bound_interface_index != 0) {
7519 // No match, requires a non-bound packet
7520 return (FALSE);
7521 }
7522 }
7523 }
7524
7525 if (kernel_policy->condition_mask == 0) {
7526 return (TRUE);
7527 }
7528
7529 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_POLICY_ID) {
7530 if (socket_policy_id != kernel_policy->cond_policy_id) {
7531 // No match, does not match required id
7532 return (FALSE);
7533 }
7534 }
7535
7536 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LAST_INTERFACE) {
7537 if (last_interface_index != kernel_policy->cond_last_interface_index) {
7538 return (FALSE);
7539 }
7540 }
7541
7542 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_PROTOCOL) {
7543 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_PROTOCOL) {
7544 if (protocol == kernel_policy->cond_protocol) {
7545 // No match, matches forbidden protocol
7546 return (FALSE);
7547 }
7548 } else {
7549 if (protocol != kernel_policy->cond_protocol) {
7550 // No match, does not match required protocol
7551 return (FALSE);
7552 }
7553 }
7554 }
7555
7556 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_START) {
7557 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_END) {
7558 bool inRange = necp_is_addr_in_range((struct sockaddr *)local, (struct sockaddr *)&kernel_policy->cond_local_start, (struct sockaddr *)&kernel_policy->cond_local_end);
7559 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_LOCAL_END) {
7560 if (inRange) {
7561 return (FALSE);
7562 }
7563 } else {
7564 if (!inRange) {
7565 return (FALSE);
7566 }
7567 }
7568 } else if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX) {
7569 bool inSubnet = necp_is_addr_in_subnet((struct sockaddr *)local, (struct sockaddr *)&kernel_policy->cond_local_start, kernel_policy->cond_local_prefix);
7570 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX) {
7571 if (inSubnet) {
7572 return (FALSE);
7573 }
7574 } else {
7575 if (!inSubnet) {
7576 return (FALSE);
7577 }
7578 }
7579 }
7580 }
7581
7582 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_START) {
7583 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_END) {
7584 bool inRange = necp_is_addr_in_range((struct sockaddr *)remote, (struct sockaddr *)&kernel_policy->cond_remote_start, (struct sockaddr *)&kernel_policy->cond_remote_end);
7585 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_REMOTE_END) {
7586 if (inRange) {
7587 return (FALSE);
7588 }
7589 } else {
7590 if (!inRange) {
7591 return (FALSE);
7592 }
7593 }
7594 } else if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX) {
7595 bool inSubnet = necp_is_addr_in_subnet((struct sockaddr *)remote, (struct sockaddr *)&kernel_policy->cond_remote_start, kernel_policy->cond_remote_prefix);
7596 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX) {
7597 if (inSubnet) {
7598 return (FALSE);
7599 }
7600 } else {
7601 if (!inSubnet) {
7602 return (FALSE);
7603 }
7604 }
7605 }
7606 }
7607
7608 return (TRUE);
7609 }
7610
7611 static inline struct necp_kernel_ip_output_policy *
7612 necp_ip_output_find_policy_match_locked(necp_kernel_policy_id socket_policy_id, u_int32_t bound_interface_index, u_int32_t last_interface_index, u_int16_t protocol, union necp_sockaddr_union *local_addr, union necp_sockaddr_union *remote_addr)
7613 {
7614 u_int32_t skip_order = 0;
7615 u_int32_t skip_session_order = 0;
7616 int i;
7617 struct necp_kernel_ip_output_policy *matched_policy = NULL;
7618 struct necp_kernel_ip_output_policy **policy_search_array = necp_kernel_ip_output_policies_map[NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(socket_policy_id)];
7619 if (policy_search_array != NULL) {
7620 for (i = 0; policy_search_array[i] != NULL; i++) {
7621 if (necp_drop_all_order != 0 && policy_search_array[i]->session_order >= necp_drop_all_order) {
7622 // We've hit a drop all rule
7623 break;
7624 }
7625 if (skip_session_order && policy_search_array[i]->session_order >= skip_session_order) {
7626 // Done skipping
7627 skip_order = 0;
7628 skip_session_order = 0;
7629 }
7630 if (skip_order) {
7631 if (policy_search_array[i]->order < skip_order) {
7632 // Skip this policy
7633 continue;
7634 } else {
7635 // Done skipping
7636 skip_order = 0;
7637 skip_session_order = 0;
7638 }
7639 } else if (skip_session_order) {
7640 // Skip this policy
7641 continue;
7642 }
7643 if (necp_ip_output_check_policy(policy_search_array[i], socket_policy_id, bound_interface_index, last_interface_index, protocol, local_addr, remote_addr)) {
7644 // Passed all tests, found a match
7645 matched_policy = policy_search_array[i];
7646
7647 if (policy_search_array[i]->result == NECP_KERNEL_POLICY_RESULT_SKIP) {
7648 skip_order = policy_search_array[i]->result_parameter.skip_policy_order;
7649 skip_session_order = policy_search_array[i]->session_order + 1;
7650 continue;
7651 }
7652
7653 break;
7654 }
7655 }
7656 }
7657
7658 return (matched_policy);
7659 }
7660
7661 static inline bool
7662 necp_output_bypass(struct mbuf *packet)
7663 {
7664 if (necp_pass_loopback > 0 && necp_is_loopback(NULL, NULL, NULL, packet)) {
7665 return (true);
7666 }
7667 if (necp_pass_keepalives > 0 && necp_get_is_keepalive_from_packet(packet)) {
7668 return (true);
7669 }
7670 if (necp_is_intcoproc(NULL, packet)) {
7671 return (true);
7672 }
7673 return (false);
7674 }
7675
7676 necp_kernel_policy_id
7677 necp_ip_output_find_policy_match(struct mbuf *packet, int flags, struct ip_out_args *ipoa, necp_kernel_policy_result *result, necp_kernel_policy_result_parameter *result_parameter)
7678 {
7679 struct ip *ip = NULL;
7680 int hlen = sizeof(struct ip);
7681 necp_kernel_policy_id socket_policy_id = NECP_KERNEL_POLICY_ID_NONE;
7682 necp_kernel_policy_id matched_policy_id = NECP_KERNEL_POLICY_ID_NONE;
7683 struct necp_kernel_ip_output_policy *matched_policy = NULL;
7684 u_int16_t protocol = 0;
7685 u_int32_t bound_interface_index = 0;
7686 u_int32_t last_interface_index = 0;
7687 union necp_sockaddr_union local_addr;
7688 union necp_sockaddr_union remote_addr;
7689
7690 if (result) {
7691 *result = 0;
7692 }
7693
7694 if (result_parameter) {
7695 memset(result_parameter, 0, sizeof(*result_parameter));
7696 }
7697
7698 if (packet == NULL) {
7699 return (NECP_KERNEL_POLICY_ID_NONE);
7700 }
7701
7702 socket_policy_id = necp_get_policy_id_from_packet(packet);
7703
7704 // Exit early for an empty list
7705 // Don't lock. Possible race condition, but we don't want the performance hit.
7706 if (necp_kernel_ip_output_policies_count == 0 ||
7707 ((socket_policy_id == NECP_KERNEL_POLICY_ID_NONE) && necp_kernel_ip_output_policies_non_id_count == 0)) {
7708 if (necp_drop_all_order > 0) {
7709 matched_policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
7710 if (result) {
7711 if (necp_output_bypass(packet)) {
7712 *result = NECP_KERNEL_POLICY_RESULT_PASS;
7713 } else {
7714 *result = NECP_KERNEL_POLICY_RESULT_DROP;
7715 }
7716 }
7717 }
7718
7719 return (matched_policy_id);
7720 }
7721
7722 // Check for loopback exception
7723 if (necp_output_bypass(packet)) {
7724 matched_policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
7725 if (result) {
7726 *result = NECP_KERNEL_POLICY_RESULT_PASS;
7727 }
7728 return (matched_policy_id);
7729 }
7730
7731 last_interface_index = necp_get_last_interface_index_from_packet(packet);
7732
7733 // Process packet to get relevant fields
7734 ip = mtod(packet, struct ip *);
7735 #ifdef _IP_VHL
7736 hlen = _IP_VHL_HL(ip->ip_vhl) << 2;
7737 #else
7738 hlen = ip->ip_hl << 2;
7739 #endif
7740
7741 protocol = ip->ip_p;
7742
7743 if ((flags & IP_OUTARGS) && (ipoa != NULL) &&
7744 (ipoa->ipoa_flags & IPOAF_BOUND_IF) &&
7745 ipoa->ipoa_boundif != IFSCOPE_NONE) {
7746 bound_interface_index = ipoa->ipoa_boundif;
7747 }
7748
7749 local_addr.sin.sin_family = AF_INET;
7750 local_addr.sin.sin_len = sizeof(struct sockaddr_in);
7751 memcpy(&local_addr.sin.sin_addr, &ip->ip_src, sizeof(ip->ip_src));
7752
7753 remote_addr.sin.sin_family = AF_INET;
7754 remote_addr.sin.sin_len = sizeof(struct sockaddr_in);
7755 memcpy(&((struct sockaddr_in *)&remote_addr)->sin_addr, &ip->ip_dst, sizeof(ip->ip_dst));
7756
7757 switch (protocol) {
7758 case IPPROTO_TCP: {
7759 struct tcphdr th;
7760 if ((int)(hlen + sizeof(th)) <= packet->m_pkthdr.len) {
7761 m_copydata(packet, hlen, sizeof(th), (u_int8_t *)&th);
7762 ((struct sockaddr_in *)&local_addr)->sin_port = th.th_sport;
7763 ((struct sockaddr_in *)&remote_addr)->sin_port = th.th_dport;
7764 }
7765 break;
7766 }
7767 case IPPROTO_UDP: {
7768 struct udphdr uh;
7769 if ((int)(hlen + sizeof(uh)) <= packet->m_pkthdr.len) {
7770 m_copydata(packet, hlen, sizeof(uh), (u_int8_t *)&uh);
7771 ((struct sockaddr_in *)&local_addr)->sin_port = uh.uh_sport;
7772 ((struct sockaddr_in *)&remote_addr)->sin_port = uh.uh_dport;
7773 }
7774 break;
7775 }
7776 default: {
7777 ((struct sockaddr_in *)&local_addr)->sin_port = 0;
7778 ((struct sockaddr_in *)&remote_addr)->sin_port = 0;
7779 break;
7780 }
7781 }
7782
7783 // Match packet to policy
7784 lck_rw_lock_shared(&necp_kernel_policy_lock);
7785 matched_policy = necp_ip_output_find_policy_match_locked(socket_policy_id, bound_interface_index, last_interface_index, protocol, &local_addr, &remote_addr);
7786 if (matched_policy) {
7787 matched_policy_id = matched_policy->id;
7788 if (result) {
7789 *result = matched_policy->result;
7790 }
7791
7792 if (result_parameter) {
7793 memcpy(result_parameter, &matched_policy->result_parameter, sizeof(matched_policy->result_parameter));
7794 }
7795
7796 if (necp_debug > 1) {
7797 NECPLOG(LOG_DEBUG, "IP Output: (ID %d BoundInterface %d LastInterface %d Proto %d) Policy %d Result %d Parameter %d", socket_policy_id, bound_interface_index, last_interface_index, protocol, matched_policy->id, matched_policy->result, matched_policy->result_parameter.tunnel_interface_index);
7798 }
7799 } else if (necp_drop_all_order > 0) {
7800 matched_policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
7801 if (result) {
7802 *result = NECP_KERNEL_POLICY_RESULT_DROP;
7803 }
7804 }
7805
7806 lck_rw_done(&necp_kernel_policy_lock);
7807
7808 return (matched_policy_id);
7809 }
7810
7811 necp_kernel_policy_id
7812 necp_ip6_output_find_policy_match(struct mbuf *packet, int flags, struct ip6_out_args *ip6oa, necp_kernel_policy_result *result, necp_kernel_policy_result_parameter *result_parameter)
7813 {
7814 struct ip6_hdr *ip6 = NULL;
7815 int next = -1;
7816 int offset = 0;
7817 necp_kernel_policy_id socket_policy_id = NECP_KERNEL_POLICY_ID_NONE;
7818 necp_kernel_policy_id matched_policy_id = NECP_KERNEL_POLICY_ID_NONE;
7819 struct necp_kernel_ip_output_policy *matched_policy = NULL;
7820 u_int16_t protocol = 0;
7821 u_int32_t bound_interface_index = 0;
7822 u_int32_t last_interface_index = 0;
7823 union necp_sockaddr_union local_addr;
7824 union necp_sockaddr_union remote_addr;
7825
7826 if (result) {
7827 *result = 0;
7828 }
7829
7830 if (result_parameter) {
7831 memset(result_parameter, 0, sizeof(*result_parameter));
7832 }
7833
7834 if (packet == NULL) {
7835 return (NECP_KERNEL_POLICY_ID_NONE);
7836 }
7837
7838 socket_policy_id = necp_get_policy_id_from_packet(packet);
7839
7840 // Exit early for an empty list
7841 // Don't lock. Possible race condition, but we don't want the performance hit.
7842 if (necp_kernel_ip_output_policies_count == 0 ||
7843 ((socket_policy_id == NECP_KERNEL_POLICY_ID_NONE) && necp_kernel_ip_output_policies_non_id_count == 0)) {
7844 if (necp_drop_all_order > 0) {
7845 matched_policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
7846 if (result) {
7847 if (necp_output_bypass(packet)) {
7848 *result = NECP_KERNEL_POLICY_RESULT_PASS;
7849 } else {
7850 *result = NECP_KERNEL_POLICY_RESULT_DROP;
7851 }
7852 }
7853 }
7854
7855 return (matched_policy_id);
7856 }
7857
7858 // Check for loopback exception
7859 if (necp_output_bypass(packet)) {
7860 matched_policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
7861 if (result) {
7862 *result = NECP_KERNEL_POLICY_RESULT_PASS;
7863 }
7864 return (matched_policy_id);
7865 }
7866
7867 last_interface_index = necp_get_last_interface_index_from_packet(packet);
7868
7869 // Process packet to get relevant fields
7870 ip6 = mtod(packet, struct ip6_hdr *);
7871
7872 if ((flags & IPV6_OUTARGS) && (ip6oa != NULL) &&
7873 (ip6oa->ip6oa_flags & IP6OAF_BOUND_IF) &&
7874 ip6oa->ip6oa_boundif != IFSCOPE_NONE) {
7875 bound_interface_index = ip6oa->ip6oa_boundif;
7876 }
7877
7878 ((struct sockaddr_in6 *)&local_addr)->sin6_family = AF_INET6;
7879 ((struct sockaddr_in6 *)&local_addr)->sin6_len = sizeof(struct sockaddr_in6);
7880 memcpy(&((struct sockaddr_in6 *)&local_addr)->sin6_addr, &ip6->ip6_src, sizeof(ip6->ip6_src));
7881
7882 ((struct sockaddr_in6 *)&remote_addr)->sin6_family = AF_INET6;
7883 ((struct sockaddr_in6 *)&remote_addr)->sin6_len = sizeof(struct sockaddr_in6);
7884 memcpy(&((struct sockaddr_in6 *)&remote_addr)->sin6_addr, &ip6->ip6_dst, sizeof(ip6->ip6_dst));
7885
7886 offset = ip6_lasthdr(packet, 0, IPPROTO_IPV6, &next);
7887 if (offset >= 0 && packet->m_pkthdr.len >= offset) {
7888 protocol = next;
7889 switch (protocol) {
7890 case IPPROTO_TCP: {
7891 struct tcphdr th;
7892 if ((int)(offset + sizeof(th)) <= packet->m_pkthdr.len) {
7893 m_copydata(packet, offset, sizeof(th), (u_int8_t *)&th);
7894 ((struct sockaddr_in6 *)&local_addr)->sin6_port = th.th_sport;
7895 ((struct sockaddr_in6 *)&remote_addr)->sin6_port = th.th_dport;
7896 }
7897 break;
7898 }
7899 case IPPROTO_UDP: {
7900 struct udphdr uh;
7901 if ((int)(offset + sizeof(uh)) <= packet->m_pkthdr.len) {
7902 m_copydata(packet, offset, sizeof(uh), (u_int8_t *)&uh);
7903 ((struct sockaddr_in6 *)&local_addr)->sin6_port = uh.uh_sport;
7904 ((struct sockaddr_in6 *)&remote_addr)->sin6_port = uh.uh_dport;
7905 }
7906 break;
7907 }
7908 default: {
7909 ((struct sockaddr_in6 *)&local_addr)->sin6_port = 0;
7910 ((struct sockaddr_in6 *)&remote_addr)->sin6_port = 0;
7911 break;
7912 }
7913 }
7914 }
7915
7916 // Match packet to policy
7917 lck_rw_lock_shared(&necp_kernel_policy_lock);
7918 matched_policy = necp_ip_output_find_policy_match_locked(socket_policy_id, bound_interface_index, last_interface_index, protocol, &local_addr, &remote_addr);
7919 if (matched_policy) {
7920 matched_policy_id = matched_policy->id;
7921 if (result) {
7922 *result = matched_policy->result;
7923 }
7924
7925 if (result_parameter) {
7926 memcpy(result_parameter, &matched_policy->result_parameter, sizeof(matched_policy->result_parameter));
7927 }
7928
7929 if (necp_debug > 1) {
7930 NECPLOG(LOG_DEBUG, "IP6 Output: (ID %d BoundInterface %d LastInterface %d Proto %d) Policy %d Result %d Parameter %d", socket_policy_id, bound_interface_index, last_interface_index, protocol, matched_policy->id, matched_policy->result, matched_policy->result_parameter.tunnel_interface_index);
7931 }
7932 } else if (necp_drop_all_order > 0) {
7933 matched_policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
7934 if (result) {
7935 *result = NECP_KERNEL_POLICY_RESULT_DROP;
7936 }
7937 }
7938
7939 lck_rw_done(&necp_kernel_policy_lock);
7940
7941 return (matched_policy_id);
7942 }
7943
7944 // Utilities
7945 static bool
7946 necp_is_addr_in_range(struct sockaddr *addr, struct sockaddr *range_start, struct sockaddr *range_end)
7947 {
7948 int cmp = 0;
7949
7950 if (addr == NULL || range_start == NULL || range_end == NULL) {
7951 return (FALSE);
7952 }
7953
7954 /* Must be greater than or equal to start */
7955 cmp = necp_addr_compare(addr, range_start, 1);
7956 if (cmp != 0 && cmp != 1) {
7957 return (FALSE);
7958 }
7959
7960 /* Must be less than or equal to end */
7961 cmp = necp_addr_compare(addr, range_end, 1);
7962 if (cmp != 0 && cmp != -1) {
7963 return (FALSE);
7964 }
7965
7966 return (TRUE);
7967 }
7968
7969 static bool
7970 necp_is_range_in_range(struct sockaddr *inner_range_start, struct sockaddr *inner_range_end, struct sockaddr *range_start, struct sockaddr *range_end)
7971 {
7972 int cmp = 0;
7973
7974 if (inner_range_start == NULL || inner_range_end == NULL || range_start == NULL || range_end == NULL) {
7975 return (FALSE);
7976 }
7977
7978 /* Must be greater than or equal to start */
7979 cmp = necp_addr_compare(inner_range_start, range_start, 1);
7980 if (cmp != 0 && cmp != 1) {
7981 return (FALSE);
7982 }
7983
7984 /* Must be less than or equal to end */
7985 cmp = necp_addr_compare(inner_range_end, range_end, 1);
7986 if (cmp != 0 && cmp != -1) {
7987 return (FALSE);
7988 }
7989
7990 return (TRUE);
7991 }
7992
7993 static bool
7994 necp_is_addr_in_subnet(struct sockaddr *addr, struct sockaddr *subnet_addr, u_int8_t subnet_prefix)
7995 {
7996 if (addr == NULL || subnet_addr == NULL) {
7997 return (FALSE);
7998 }
7999
8000 if (addr->sa_family != subnet_addr->sa_family || addr->sa_len != subnet_addr->sa_len) {
8001 return (FALSE);
8002 }
8003
8004 switch (addr->sa_family) {
8005 case AF_INET: {
8006 if (satosin(subnet_addr)->sin_port != 0 &&
8007 satosin(addr)->sin_port != satosin(subnet_addr)->sin_port) {
8008 return (FALSE);
8009 }
8010 return (necp_buffer_compare_with_bit_prefix((u_int8_t *)&satosin(addr)->sin_addr, (u_int8_t *)&satosin(subnet_addr)->sin_addr, subnet_prefix));
8011 }
8012 case AF_INET6: {
8013 if (satosin6(subnet_addr)->sin6_port != 0 &&
8014 satosin6(addr)->sin6_port != satosin6(subnet_addr)->sin6_port) {
8015 return (FALSE);
8016 }
8017 if (satosin6(addr)->sin6_scope_id &&
8018 satosin6(subnet_addr)->sin6_scope_id &&
8019 satosin6(addr)->sin6_scope_id != satosin6(subnet_addr)->sin6_scope_id) {
8020 return (FALSE);
8021 }
8022 return (necp_buffer_compare_with_bit_prefix((u_int8_t *)&satosin6(addr)->sin6_addr, (u_int8_t *)&satosin6(subnet_addr)->sin6_addr, subnet_prefix));
8023 }
8024 default: {
8025 return (FALSE);
8026 }
8027 }
8028
8029 return (FALSE);
8030 }
8031
8032 /*
8033 * Return values:
8034 * -1: sa1 < sa2
8035 * 0: sa1 == sa2
8036 * 1: sa1 > sa2
8037 * 2: Not comparable or error
8038 */
8039 static int
8040 necp_addr_compare(struct sockaddr *sa1, struct sockaddr *sa2, int check_port)
8041 {
8042 int result = 0;
8043 int port_result = 0;
8044
8045 if (sa1->sa_family != sa2->sa_family || sa1->sa_len != sa2->sa_len) {
8046 return (2);
8047 }
8048
8049 if (sa1->sa_len == 0) {
8050 return (0);
8051 }
8052
8053 switch (sa1->sa_family) {
8054 case AF_INET: {
8055 if (sa1->sa_len != sizeof(struct sockaddr_in)) {
8056 return (2);
8057 }
8058
8059 result = memcmp(&satosin(sa1)->sin_addr.s_addr, &satosin(sa2)->sin_addr.s_addr, sizeof(satosin(sa1)->sin_addr.s_addr));
8060
8061 if (check_port) {
8062 if (satosin(sa1)->sin_port < satosin(sa2)->sin_port) {
8063 port_result = -1;
8064 } else if (satosin(sa1)->sin_port > satosin(sa2)->sin_port) {
8065 port_result = 1;
8066 }
8067
8068 if (result == 0) {
8069 result = port_result;
8070 } else if ((result > 0 && port_result < 0) || (result < 0 && port_result > 0)) {
8071 return (2);
8072 }
8073 }
8074
8075 break;
8076 }
8077 case AF_INET6: {
8078 if (sa1->sa_len != sizeof(struct sockaddr_in6)) {
8079 return (2);
8080 }
8081
8082 if (satosin6(sa1)->sin6_scope_id != satosin6(sa2)->sin6_scope_id) {
8083 return (2);
8084 }
8085
8086 result = memcmp(&satosin6(sa1)->sin6_addr.s6_addr[0], &satosin6(sa2)->sin6_addr.s6_addr[0], sizeof(struct in6_addr));
8087
8088 if (check_port) {
8089 if (satosin6(sa1)->sin6_port < satosin6(sa2)->sin6_port) {
8090 port_result = -1;
8091 } else if (satosin6(sa1)->sin6_port > satosin6(sa2)->sin6_port) {
8092 port_result = 1;
8093 }
8094
8095 if (result == 0) {
8096 result = port_result;
8097 } else if ((result > 0 && port_result < 0) || (result < 0 && port_result > 0)) {
8098 return (2);
8099 }
8100 }
8101
8102 break;
8103 }
8104 default: {
8105 result = memcmp(sa1, sa2, sa1->sa_len);
8106 break;
8107 }
8108 }
8109
8110 if (result < 0) {
8111 result = (-1);
8112 } else if (result > 0) {
8113 result = (1);
8114 }
8115
8116 return (result);
8117 }
8118
8119 static bool
8120 necp_buffer_compare_with_bit_prefix(u_int8_t *p1, u_int8_t *p2, u_int32_t bits)
8121 {
8122 u_int8_t mask;
8123
8124 /* Handle null pointers */
8125 if (p1 == NULL || p2 == NULL) {
8126 return (p1 == p2);
8127 }
8128
8129 while (bits >= 8) {
8130 if (*p1++ != *p2++) {
8131 return (FALSE);
8132 }
8133 bits -= 8;
8134 }
8135
8136 if (bits > 0) {
8137 mask = ~((1<<(8-bits))-1);
8138 if ((*p1 & mask) != (*p2 & mask)) {
8139 return (FALSE);
8140 }
8141 }
8142 return (TRUE);
8143 }
8144
8145 static bool
8146 necp_update_qos_marking(struct ifnet *ifp, u_int32_t route_rule_id)
8147 {
8148 bool qos_marking = FALSE;
8149 int exception_index = 0;
8150 struct necp_route_rule *route_rule = NULL;
8151
8152 route_rule = necp_lookup_route_rule_locked(&necp_route_rules, route_rule_id);
8153 if (route_rule == NULL) {
8154 qos_marking = FALSE;
8155 goto done;
8156 }
8157
8158 qos_marking = (route_rule->default_action == NECP_ROUTE_RULE_QOS_MARKING) ? TRUE : FALSE;
8159
8160 if (ifp == NULL) {
8161 goto done;
8162 }
8163
8164 for (exception_index = 0; exception_index < MAX_ROUTE_RULE_INTERFACES; exception_index++) {
8165 if (route_rule->exception_if_indices[exception_index] == 0) {
8166 break;
8167 }
8168 if (route_rule->exception_if_actions[exception_index] != NECP_ROUTE_RULE_QOS_MARKING) {
8169 continue;
8170 }
8171 if (route_rule->exception_if_indices[exception_index] == ifp->if_index) {
8172 qos_marking = TRUE;
8173 if (necp_debug > 2) {
8174 NECPLOG(LOG_DEBUG, "QoS Marking : Interface match %d for Rule %d Allowed %d",
8175 route_rule->exception_if_indices[exception_index], route_rule_id, qos_marking);
8176 }
8177 goto done;
8178 }
8179 }
8180
8181 if ((route_rule->cellular_action == NECP_ROUTE_RULE_QOS_MARKING && IFNET_IS_CELLULAR(ifp)) ||
8182 (route_rule->wifi_action == NECP_ROUTE_RULE_QOS_MARKING && IFNET_IS_WIFI(ifp)) ||
8183 (route_rule->wired_action == NECP_ROUTE_RULE_QOS_MARKING && IFNET_IS_WIRED(ifp)) ||
8184 (route_rule->expensive_action == NECP_ROUTE_RULE_QOS_MARKING && IFNET_IS_EXPENSIVE(ifp))) {
8185 qos_marking = TRUE;
8186 if (necp_debug > 2) {
8187 NECPLOG(LOG_DEBUG, "QoS Marking: C:%d WF:%d W:%d E:%d for Rule %d Allowed %d",
8188 route_rule->cellular_action, route_rule->wifi_action, route_rule->wired_action,
8189 route_rule->expensive_action, route_rule_id, qos_marking);
8190 }
8191 goto done;
8192 }
8193 done:
8194 if (necp_debug > 1) {
8195 NECPLOG(LOG_DEBUG, "QoS Marking: Rule %d ifp %s Allowed %d",
8196 route_rule_id, ifp ? ifp->if_xname : "", qos_marking);
8197 }
8198 return (qos_marking);
8199 }
8200
8201 void
8202 necp_socket_update_qos_marking(struct inpcb *inp, struct rtentry *route, struct ifnet *interface, u_int32_t route_rule_id)
8203 {
8204 bool qos_marking = FALSE;
8205 struct ifnet *ifp = interface = NULL;
8206
8207 if (net_qos_policy_restricted == 0) {
8208 return;
8209 }
8210 if (inp->inp_socket == NULL) {
8211 return;
8212 }
8213 if ((inp->inp_socket->so_flags1 & SOF1_QOSMARKING_POLICY_OVERRIDE)) {
8214 return;
8215 }
8216 /*
8217 * This is racy but we do not need the performance hit of taking necp_kernel_policy_lock
8218 */
8219 if (inp->inp_policyresult.results.qos_marking_gencount == necp_kernel_socket_policies_gencount) {
8220 return;
8221 }
8222
8223 lck_rw_lock_shared(&necp_kernel_policy_lock);
8224
8225 if (ifp == NULL && route != NULL) {
8226 ifp = route->rt_ifp;
8227 }
8228 /*
8229 * By default, until we have a interface, do not mark and reevaluate the Qos marking policy
8230 */
8231 if (ifp == NULL || route_rule_id == 0) {
8232 qos_marking = FALSE;
8233 goto done;
8234 }
8235
8236 if (ROUTE_RULE_IS_AGGREGATE(route_rule_id)) {
8237 struct necp_aggregate_route_rule *aggregate_route_rule = necp_lookup_aggregate_route_rule_locked(route_rule_id);
8238 if (aggregate_route_rule != NULL) {
8239 int index = 0;
8240 for (index = 0; index < MAX_AGGREGATE_ROUTE_RULES; index++) {
8241 u_int32_t sub_route_rule_id = aggregate_route_rule->rule_ids[index];
8242 if (sub_route_rule_id == 0) {
8243 break;
8244 }
8245 qos_marking = necp_update_qos_marking(ifp, sub_route_rule_id);
8246 if (qos_marking == TRUE) {
8247 break;
8248 }
8249 }
8250 }
8251 } else {
8252 qos_marking = necp_update_qos_marking(ifp, route_rule_id);
8253 }
8254 /*
8255 * Now that we have an interface we remember the gencount
8256 */
8257 inp->inp_policyresult.results.qos_marking_gencount = necp_kernel_socket_policies_gencount;
8258
8259 done:
8260 lck_rw_done(&necp_kernel_policy_lock);
8261
8262 if (qos_marking == TRUE) {
8263 inp->inp_socket->so_flags1 |= SOF1_QOSMARKING_ALLOWED;
8264 } else {
8265 inp->inp_socket->so_flags1 &= ~SOF1_QOSMARKING_ALLOWED;
8266 }
8267 }
8268
8269 static bool
8270 necp_route_is_allowed_inner(struct rtentry *route, struct ifnet *ifp, u_int32_t route_rule_id, u_int32_t *interface_type_denied)
8271 {
8272 bool default_is_allowed = TRUE;
8273 u_int8_t type_aggregate_action = NECP_ROUTE_RULE_NONE;
8274 int exception_index = 0;
8275 struct ifnet *delegated_ifp = NULL;
8276 struct necp_route_rule *route_rule = NULL;
8277
8278 route_rule = necp_lookup_route_rule_locked(&necp_route_rules, route_rule_id);
8279 if (route_rule == NULL) {
8280 return (TRUE);
8281 }
8282
8283 default_is_allowed = (route_rule->default_action == NECP_ROUTE_RULE_DENY_INTERFACE) ? FALSE : TRUE;
8284 if (ifp == NULL) {
8285 ifp = route->rt_ifp;
8286 }
8287 if (ifp == NULL) {
8288 if (necp_debug > 1 && !default_is_allowed) {
8289 NECPLOG(LOG_DEBUG, "Route Allowed: No interface for route, using default for Rule %d Allowed %d", route_rule_id, default_is_allowed);
8290 }
8291 return (default_is_allowed);
8292 }
8293
8294 delegated_ifp = ifp->if_delegated.ifp;
8295 for (exception_index = 0; exception_index < MAX_ROUTE_RULE_INTERFACES; exception_index++) {
8296 if (route_rule->exception_if_indices[exception_index] == 0) {
8297 break;
8298 }
8299 if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule->exception_if_actions[exception_index]) == FALSE) {
8300 continue;
8301 }
8302 if (route_rule->exception_if_indices[exception_index] == ifp->if_index ||
8303 (delegated_ifp != NULL && route_rule->exception_if_indices[exception_index] == delegated_ifp->if_index)) {
8304 if (necp_debug > 1) {
8305 NECPLOG(LOG_DEBUG, "Route Allowed: Interface match %d for Rule %d Allowed %d", route_rule->exception_if_indices[exception_index], route_rule_id, ((route_rule->exception_if_actions[exception_index] == NECP_ROUTE_RULE_DENY_INTERFACE) ? FALSE : TRUE));
8306 }
8307 return ((route_rule->exception_if_actions[exception_index] == NECP_ROUTE_RULE_DENY_INTERFACE) ? FALSE : TRUE);
8308 }
8309 }
8310
8311 if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule->cellular_action) &&
8312 IFNET_IS_CELLULAR(ifp)) {
8313 if (interface_type_denied != NULL) {
8314 *interface_type_denied = IFRTYPE_FUNCTIONAL_CELLULAR;
8315 }
8316 if (type_aggregate_action == NECP_ROUTE_RULE_NONE ||
8317 (type_aggregate_action == NECP_ROUTE_RULE_ALLOW_INTERFACE &&
8318 route_rule->cellular_action == NECP_ROUTE_RULE_DENY_INTERFACE)) {
8319 // Deny wins if there is a conflict
8320 type_aggregate_action = route_rule->cellular_action;
8321 }
8322 }
8323
8324 if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule->wifi_action) &&
8325 IFNET_IS_WIFI(ifp)) {
8326 if (interface_type_denied != NULL) {
8327 *interface_type_denied = IFRTYPE_FUNCTIONAL_WIFI_INFRA;
8328 }
8329 if (type_aggregate_action == NECP_ROUTE_RULE_NONE ||
8330 (type_aggregate_action == NECP_ROUTE_RULE_ALLOW_INTERFACE &&
8331 route_rule->wifi_action == NECP_ROUTE_RULE_DENY_INTERFACE)) {
8332 // Deny wins if there is a conflict
8333 type_aggregate_action = route_rule->wifi_action;
8334 }
8335 }
8336
8337 if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule->wired_action) &&
8338 IFNET_IS_WIRED(ifp)) {
8339 if (interface_type_denied != NULL) {
8340 *interface_type_denied = IFRTYPE_FUNCTIONAL_WIRED;
8341 }
8342 if (type_aggregate_action == NECP_ROUTE_RULE_NONE ||
8343 (type_aggregate_action == NECP_ROUTE_RULE_ALLOW_INTERFACE &&
8344 route_rule->wired_action == NECP_ROUTE_RULE_DENY_INTERFACE)) {
8345 // Deny wins if there is a conflict
8346 type_aggregate_action = route_rule->wired_action;
8347 }
8348 }
8349
8350 if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule->expensive_action) &&
8351 IFNET_IS_EXPENSIVE(ifp)) {
8352 if (type_aggregate_action == NECP_ROUTE_RULE_NONE ||
8353 (type_aggregate_action == NECP_ROUTE_RULE_ALLOW_INTERFACE &&
8354 route_rule->expensive_action == NECP_ROUTE_RULE_DENY_INTERFACE)) {
8355 // Deny wins if there is a conflict
8356 type_aggregate_action = route_rule->expensive_action;
8357 }
8358 }
8359
8360 if (type_aggregate_action != NECP_ROUTE_RULE_NONE) {
8361 if (necp_debug > 1) {
8362 NECPLOG(LOG_DEBUG, "Route Allowed: C:%d WF:%d W:%d E:%d for Rule %d Allowed %d", route_rule->cellular_action, route_rule->wifi_action, route_rule->wired_action, route_rule->expensive_action, route_rule_id, ((type_aggregate_action == NECP_ROUTE_RULE_DENY_INTERFACE) ? FALSE : TRUE));
8363 }
8364 return ((type_aggregate_action == NECP_ROUTE_RULE_DENY_INTERFACE) ? FALSE : TRUE);
8365 }
8366
8367 if (necp_debug > 1 && !default_is_allowed) {
8368 NECPLOG(LOG_DEBUG, "Route Allowed: Using default for Rule %d Allowed %d", route_rule_id, default_is_allowed);
8369 }
8370 return (default_is_allowed);
8371 }
8372
8373 static bool
8374 necp_route_is_allowed(struct rtentry *route, struct ifnet *interface, u_int32_t route_rule_id, u_int32_t *interface_type_denied)
8375 {
8376 if ((route == NULL && interface == NULL) || route_rule_id == 0) {
8377 if (necp_debug > 1) {
8378 NECPLOG(LOG_DEBUG, "Route Allowed: no route or interface, Rule %d Allowed %d", route_rule_id, TRUE);
8379 }
8380 return (TRUE);
8381 }
8382
8383 if (ROUTE_RULE_IS_AGGREGATE(route_rule_id)) {
8384 struct necp_aggregate_route_rule *aggregate_route_rule = necp_lookup_aggregate_route_rule_locked(route_rule_id);
8385 if (aggregate_route_rule != NULL) {
8386 int index = 0;
8387 for (index = 0; index < MAX_AGGREGATE_ROUTE_RULES; index++) {
8388 u_int32_t sub_route_rule_id = aggregate_route_rule->rule_ids[index];
8389 if (sub_route_rule_id == 0) {
8390 break;
8391 }
8392 if (!necp_route_is_allowed_inner(route, interface, sub_route_rule_id, interface_type_denied)) {
8393 return (FALSE);
8394 }
8395 }
8396 }
8397 } else {
8398 return (necp_route_is_allowed_inner(route, interface, route_rule_id, interface_type_denied));
8399 }
8400
8401 return (TRUE);
8402 }
8403
8404 bool
8405 necp_packet_is_allowed_over_interface(struct mbuf *packet, struct ifnet *interface)
8406 {
8407 bool is_allowed = TRUE;
8408 u_int32_t route_rule_id = necp_get_route_rule_id_from_packet(packet);
8409 if (route_rule_id != 0 &&
8410 interface != NULL) {
8411 lck_rw_lock_shared(&necp_kernel_policy_lock);
8412 is_allowed = necp_route_is_allowed(NULL, interface, necp_get_route_rule_id_from_packet(packet), NULL);
8413 lck_rw_done(&necp_kernel_policy_lock);
8414 }
8415 return (is_allowed);
8416 }
8417
8418 static bool
8419 necp_netagents_allow_traffic(u_int32_t *netagent_ids, size_t netagent_id_count)
8420 {
8421 size_t netagent_cursor;
8422 for (netagent_cursor = 0; netagent_cursor < netagent_id_count; netagent_cursor++) {
8423 struct necp_uuid_id_mapping *mapping = NULL;
8424 u_int32_t netagent_id = netagent_ids[netagent_cursor];
8425 if (netagent_id == 0) {
8426 break;
8427 }
8428 mapping = necp_uuid_lookup_uuid_with_service_id_locked(netagent_id);
8429 if (mapping != NULL) {
8430 u_int32_t agent_flags = 0;
8431 agent_flags = netagent_get_flags(mapping->uuid);
8432 if (agent_flags & NETAGENT_FLAG_REGISTERED) {
8433 if (agent_flags & NETAGENT_FLAG_ACTIVE) {
8434 continue;
8435 } else if ((agent_flags & NETAGENT_FLAG_VOLUNTARY) == 0) {
8436 return (FALSE);
8437 }
8438 }
8439 }
8440 }
8441 return (TRUE);
8442 }
8443
8444 static bool
8445 necp_socket_is_allowed_to_send_recv_internal(struct inpcb *inp, struct sockaddr *override_local_addr, struct sockaddr *override_remote_addr, ifnet_t interface, necp_kernel_policy_id *return_policy_id, u_int32_t *return_route_rule_id)
8446 {
8447 u_int32_t verifyifindex = interface ? interface->if_index : 0;
8448 bool allowed_to_receive = TRUE;
8449 struct necp_socket_info info;
8450 u_int32_t flowhash = 0;
8451 necp_kernel_policy_result service_action = 0;
8452 necp_kernel_policy_service service = { 0, 0 };
8453 u_int32_t route_rule_id = 0;
8454 struct rtentry *route = NULL;
8455 u_int32_t interface_type_denied = IFRTYPE_FUNCTIONAL_UNKNOWN;
8456
8457 u_int32_t netagent_ids[NECP_MAX_NETAGENTS];
8458 memset(&netagent_ids, 0, sizeof(netagent_ids));
8459
8460 if (return_policy_id) {
8461 *return_policy_id = NECP_KERNEL_POLICY_ID_NONE;
8462 }
8463 if (return_route_rule_id) {
8464 *return_route_rule_id = 0;
8465 }
8466
8467 if (inp == NULL) {
8468 goto done;
8469 }
8470
8471 route = inp->inp_route.ro_rt;
8472
8473 // Don't lock. Possible race condition, but we don't want the performance hit.
8474 if (necp_kernel_socket_policies_count == 0 ||
8475 (!(inp->inp_flags2 & INP2_WANT_APP_POLICY) && necp_kernel_socket_policies_non_app_count == 0)) {
8476 if (necp_drop_all_order > 0) {
8477 if (necp_socket_bypass(override_local_addr, override_remote_addr, inp)) {
8478 allowed_to_receive = TRUE;
8479 } else {
8480 allowed_to_receive = FALSE;
8481 }
8482 }
8483 goto done;
8484 }
8485
8486 // If this socket is connected, or we are not taking addresses into account, try to reuse last result
8487 if ((necp_socket_is_connected(inp) || (override_local_addr == NULL && override_remote_addr == NULL)) && inp->inp_policyresult.policy_id != NECP_KERNEL_POLICY_ID_NONE) {
8488 bool policies_have_changed = FALSE;
8489 bool route_allowed = TRUE;
8490
8491 if (inp->inp_policyresult.policy_gencount != necp_kernel_socket_policies_gencount) {
8492 policies_have_changed = TRUE;
8493 } else {
8494 if (inp->inp_policyresult.results.route_rule_id != 0) {
8495 lck_rw_lock_shared(&necp_kernel_policy_lock);
8496 if (!necp_route_is_allowed(route, interface, inp->inp_policyresult.results.route_rule_id, &interface_type_denied)) {
8497 route_allowed = FALSE;
8498 }
8499 lck_rw_done(&necp_kernel_policy_lock);
8500 }
8501 }
8502
8503 if (!policies_have_changed) {
8504 if (!route_allowed ||
8505 inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_DROP ||
8506 inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT ||
8507 (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL && interface &&
8508 inp->inp_policyresult.results.result_parameter.tunnel_interface_index != verifyifindex)) {
8509 allowed_to_receive = FALSE;
8510 } else {
8511 if (return_policy_id) {
8512 *return_policy_id = inp->inp_policyresult.policy_id;
8513 }
8514 if (return_route_rule_id) {
8515 *return_route_rule_id = inp->inp_policyresult.results.route_rule_id;
8516 }
8517 }
8518 goto done;
8519 }
8520 }
8521
8522 // Check for loopback exception
8523 if (necp_socket_bypass(override_local_addr, override_remote_addr, inp)) {
8524 allowed_to_receive = TRUE;
8525 goto done;
8526 }
8527
8528 // Actually calculate policy result
8529 lck_rw_lock_shared(&necp_kernel_policy_lock);
8530 necp_socket_fillout_info_locked(inp, override_local_addr, override_remote_addr, 0, &info);
8531
8532 flowhash = necp_socket_calc_flowhash_locked(&info);
8533 if (inp->inp_policyresult.policy_id != NECP_KERNEL_POLICY_ID_NONE &&
8534 inp->inp_policyresult.policy_gencount == necp_kernel_socket_policies_gencount &&
8535 inp->inp_policyresult.flowhash == flowhash) {
8536 if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_DROP ||
8537 inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT ||
8538 (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL && interface &&
8539 inp->inp_policyresult.results.result_parameter.tunnel_interface_index != verifyifindex) ||
8540 (inp->inp_policyresult.results.route_rule_id != 0 &&
8541 !necp_route_is_allowed(route, interface, inp->inp_policyresult.results.route_rule_id, &interface_type_denied))) {
8542 allowed_to_receive = FALSE;
8543 } else {
8544 if (return_policy_id) {
8545 *return_policy_id = inp->inp_policyresult.policy_id;
8546 }
8547 if (return_route_rule_id) {
8548 *return_route_rule_id = inp->inp_policyresult.results.route_rule_id;
8549 }
8550 }
8551 lck_rw_done(&necp_kernel_policy_lock);
8552 goto done;
8553 }
8554
8555 struct necp_kernel_socket_policy *matched_policy = necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_map[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(info.application_id)], &info, NULL, &route_rule_id, &service_action, &service, netagent_ids, NECP_MAX_NETAGENTS, current_proc());
8556 if (matched_policy != NULL) {
8557 if (matched_policy->result == NECP_KERNEL_POLICY_RESULT_DROP ||
8558 matched_policy->result == NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT ||
8559 (matched_policy->result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL && interface &&
8560 matched_policy->result_parameter.tunnel_interface_index != verifyifindex) ||
8561 ((service_action == NECP_KERNEL_POLICY_RESULT_TRIGGER_SCOPED ||
8562 service_action == NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED) &&
8563 service.identifier != 0 && service.identifier != NECP_NULL_SERVICE_ID) ||
8564 (route_rule_id != 0 &&
8565 !necp_route_is_allowed(route, interface, route_rule_id, &interface_type_denied)) ||
8566 !necp_netagents_allow_traffic(netagent_ids, NECP_MAX_NETAGENTS)) {
8567 allowed_to_receive = FALSE;
8568 } else {
8569 if (return_policy_id) {
8570 *return_policy_id = matched_policy->id;
8571 }
8572 if (return_route_rule_id) {
8573 *return_route_rule_id = route_rule_id;
8574 }
8575 }
8576 lck_rw_done(&necp_kernel_policy_lock);
8577
8578 if (necp_debug > 1 && matched_policy->id != inp->inp_policyresult.policy_id) {
8579 NECPLOG(LOG_DEBUG, "Socket Send/Recv Policy: Policy %d Allowed %d", return_policy_id ? *return_policy_id : 0, allowed_to_receive);
8580 }
8581 goto done;
8582 } else if (necp_drop_all_order > 0) {
8583 allowed_to_receive = FALSE;
8584 } else {
8585 if (return_policy_id) {
8586 *return_policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
8587 }
8588 if (return_route_rule_id) {
8589 *return_route_rule_id = route_rule_id;
8590 }
8591 }
8592
8593 lck_rw_done(&necp_kernel_policy_lock);
8594
8595 done:
8596 if (!allowed_to_receive && interface_type_denied != IFRTYPE_FUNCTIONAL_UNKNOWN) {
8597 soevent(inp->inp_socket, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_IFDENIED));
8598 }
8599
8600 return (allowed_to_receive);
8601 }
8602
8603 bool
8604 necp_socket_is_allowed_to_send_recv_v4(struct inpcb *inp, u_int16_t local_port, u_int16_t remote_port, struct in_addr *local_addr, struct in_addr *remote_addr, ifnet_t interface, necp_kernel_policy_id *return_policy_id, u_int32_t *return_route_rule_id)
8605 {
8606 struct sockaddr_in local;
8607 struct sockaddr_in remote;
8608 local.sin_family = remote.sin_family = AF_INET;
8609 local.sin_len = remote.sin_len = sizeof(struct sockaddr_in);
8610 local.sin_port = local_port;
8611 remote.sin_port = remote_port;
8612 memcpy(&local.sin_addr, local_addr, sizeof(local.sin_addr));
8613 memcpy(&remote.sin_addr, remote_addr, sizeof(remote.sin_addr));
8614
8615 return (necp_socket_is_allowed_to_send_recv_internal(inp, (struct sockaddr *)&local, (struct sockaddr *)&remote, interface, return_policy_id, return_route_rule_id));
8616 }
8617
8618 bool
8619 necp_socket_is_allowed_to_send_recv_v6(struct inpcb *inp, u_int16_t local_port, u_int16_t remote_port, struct in6_addr *local_addr, struct in6_addr *remote_addr, ifnet_t interface, necp_kernel_policy_id *return_policy_id, u_int32_t *return_route_rule_id)
8620 {
8621 struct sockaddr_in6 local;
8622 struct sockaddr_in6 remote;
8623 local.sin6_family = remote.sin6_family = AF_INET6;
8624 local.sin6_len = remote.sin6_len = sizeof(struct sockaddr_in6);
8625 local.sin6_port = local_port;
8626 remote.sin6_port = remote_port;
8627 memcpy(&local.sin6_addr, local_addr, sizeof(local.sin6_addr));
8628 memcpy(&remote.sin6_addr, remote_addr, sizeof(remote.sin6_addr));
8629
8630 return (necp_socket_is_allowed_to_send_recv_internal(inp, (struct sockaddr *)&local, (struct sockaddr *)&remote, interface, return_policy_id, return_route_rule_id));
8631 }
8632
8633 bool
8634 necp_socket_is_allowed_to_send_recv(struct inpcb *inp, necp_kernel_policy_id *return_policy_id, u_int32_t *return_route_rule_id)
8635 {
8636 return (necp_socket_is_allowed_to_send_recv_internal(inp, NULL, NULL, NULL, return_policy_id, return_route_rule_id));
8637 }
8638
8639 int
8640 necp_mark_packet_from_socket(struct mbuf *packet, struct inpcb *inp, necp_kernel_policy_id policy_id, u_int32_t route_rule_id)
8641 {
8642 if (packet == NULL || inp == NULL || !(packet->m_flags & M_PKTHDR)) {
8643 return (EINVAL);
8644 }
8645
8646 // Mark ID for Pass and IP Tunnel
8647 if (policy_id != NECP_KERNEL_POLICY_ID_NONE) {
8648 packet->m_pkthdr.necp_mtag.necp_policy_id = policy_id;
8649 } else if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_PASS ||
8650 inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL) {
8651 packet->m_pkthdr.necp_mtag.necp_policy_id = inp->inp_policyresult.policy_id;
8652 } else {
8653 packet->m_pkthdr.necp_mtag.necp_policy_id = NECP_KERNEL_POLICY_ID_NONE;
8654 }
8655 packet->m_pkthdr.necp_mtag.necp_last_interface_index = 0;
8656 if (route_rule_id != 0) {
8657 packet->m_pkthdr.necp_mtag.necp_route_rule_id = route_rule_id;
8658 } else {
8659 packet->m_pkthdr.necp_mtag.necp_route_rule_id = inp->inp_policyresult.results.route_rule_id;
8660 }
8661 packet->m_pkthdr.necp_mtag.necp_app_id = inp->inp_policyresult.app_id;
8662
8663 return (0);
8664 }
8665
8666 int
8667 necp_mark_packet_from_ip(struct mbuf *packet, necp_kernel_policy_id policy_id)
8668 {
8669 if (packet == NULL || !(packet->m_flags & M_PKTHDR)) {
8670 return (EINVAL);
8671 }
8672
8673 // Mark ID for Pass and IP Tunnel
8674 if (policy_id != NECP_KERNEL_POLICY_ID_NONE) {
8675 packet->m_pkthdr.necp_mtag.necp_policy_id = policy_id;
8676 } else {
8677 packet->m_pkthdr.necp_mtag.necp_policy_id = NECP_KERNEL_POLICY_ID_NONE;
8678 }
8679
8680 return (0);
8681 }
8682
8683 int
8684 necp_mark_packet_from_interface(struct mbuf *packet, ifnet_t interface)
8685 {
8686 if (packet == NULL || !(packet->m_flags & M_PKTHDR)) {
8687 return (EINVAL);
8688 }
8689
8690 // Mark ID for Pass and IP Tunnel
8691 if (interface != NULL) {
8692 packet->m_pkthdr.necp_mtag.necp_last_interface_index = interface->if_index;
8693 }
8694
8695 return (0);
8696 }
8697
8698 int
8699 necp_mark_packet_as_keepalive(struct mbuf *packet, bool is_keepalive)
8700 {
8701 if (packet == NULL || !(packet->m_flags & M_PKTHDR)) {
8702 return (EINVAL);
8703 }
8704
8705 if (is_keepalive) {
8706 packet->m_pkthdr.pkt_flags |= PKTF_KEEPALIVE;
8707 } else {
8708 packet->m_pkthdr.pkt_flags &= ~PKTF_KEEPALIVE;
8709 }
8710
8711 return (0);
8712 }
8713
8714 necp_kernel_policy_id
8715 necp_get_policy_id_from_packet(struct mbuf *packet)
8716 {
8717 if (packet == NULL || !(packet->m_flags & M_PKTHDR)) {
8718 return (NECP_KERNEL_POLICY_ID_NONE);
8719 }
8720
8721 return (packet->m_pkthdr.necp_mtag.necp_policy_id);
8722 }
8723
8724 u_int32_t
8725 necp_get_last_interface_index_from_packet(struct mbuf *packet)
8726 {
8727 if (packet == NULL || !(packet->m_flags & M_PKTHDR)) {
8728 return (0);
8729 }
8730
8731 return (packet->m_pkthdr.necp_mtag.necp_last_interface_index);
8732 }
8733
8734 u_int32_t
8735 necp_get_route_rule_id_from_packet(struct mbuf *packet)
8736 {
8737 if (packet == NULL || !(packet->m_flags & M_PKTHDR)) {
8738 return (0);
8739 }
8740
8741 return (packet->m_pkthdr.necp_mtag.necp_route_rule_id);
8742 }
8743
8744 int
8745 necp_get_app_uuid_from_packet(struct mbuf *packet,
8746 uuid_t app_uuid)
8747 {
8748 if (packet == NULL || !(packet->m_flags & M_PKTHDR)) {
8749 return (EINVAL);
8750 }
8751
8752 bool found_mapping = FALSE;
8753 if (packet->m_pkthdr.necp_mtag.necp_app_id != 0) {
8754 lck_rw_lock_shared(&necp_kernel_policy_lock);
8755 struct necp_uuid_id_mapping *entry = necp_uuid_lookup_uuid_with_app_id_locked(packet->m_pkthdr.necp_mtag.necp_app_id);
8756 if (entry != NULL) {
8757 uuid_copy(app_uuid, entry->uuid);
8758 found_mapping = true;
8759 }
8760 lck_rw_done(&necp_kernel_policy_lock);
8761 }
8762 if (!found_mapping) {
8763 uuid_clear(app_uuid);
8764 }
8765 return (0);
8766 }
8767
8768 bool
8769 necp_get_is_keepalive_from_packet(struct mbuf *packet)
8770 {
8771 if (packet == NULL || !(packet->m_flags & M_PKTHDR)) {
8772 return (FALSE);
8773 }
8774
8775 return (packet->m_pkthdr.pkt_flags & PKTF_KEEPALIVE);
8776 }
8777
8778 u_int32_t
8779 necp_socket_get_content_filter_control_unit(struct socket *so)
8780 {
8781 struct inpcb *inp = sotoinpcb(so);
8782
8783 if (inp == NULL) {
8784 return (0);
8785 }
8786 return (inp->inp_policyresult.results.filter_control_unit);
8787 }
8788
8789 bool
8790 necp_socket_should_use_flow_divert(struct inpcb *inp)
8791 {
8792 if (inp == NULL) {
8793 return (FALSE);
8794 }
8795
8796 return (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT);
8797 }
8798
8799 u_int32_t
8800 necp_socket_get_flow_divert_control_unit(struct inpcb *inp)
8801 {
8802 if (inp == NULL) {
8803 return (0);
8804 }
8805
8806 if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT) {
8807 return (inp->inp_policyresult.results.result_parameter.flow_divert_control_unit);
8808 }
8809
8810 return (0);
8811 }
8812
8813 bool
8814 necp_socket_should_rescope(struct inpcb *inp)
8815 {
8816 if (inp == NULL) {
8817 return (FALSE);
8818 }
8819
8820 return (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED);
8821 }
8822
8823 u_int
8824 necp_socket_get_rescope_if_index(struct inpcb *inp)
8825 {
8826 if (inp == NULL) {
8827 return (0);
8828 }
8829
8830 if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED) {
8831 return (inp->inp_policyresult.results.result_parameter.scoped_interface_index);
8832 }
8833
8834 return (0);
8835 }
8836
8837 u_int32_t
8838 necp_socket_get_effective_mtu(struct inpcb *inp, u_int32_t current_mtu)
8839 {
8840 if (inp == NULL) {
8841 return (current_mtu);
8842 }
8843
8844 if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL &&
8845 (inp->inp_flags & INP_BOUND_IF) &&
8846 inp->inp_boundifp) {
8847
8848 u_int bound_interface_index = inp->inp_boundifp->if_index;
8849 u_int tunnel_interface_index = inp->inp_policyresult.results.result_parameter.tunnel_interface_index;
8850
8851 // The result is IP Tunnel, and is rescoping from one interface to another. Recalculate MTU.
8852 if (bound_interface_index != tunnel_interface_index) {
8853 ifnet_t tunnel_interface = NULL;
8854
8855 ifnet_head_lock_shared();
8856 tunnel_interface = ifindex2ifnet[tunnel_interface_index];
8857 ifnet_head_done();
8858
8859 if (tunnel_interface != NULL) {
8860 u_int32_t direct_tunnel_mtu = tunnel_interface->if_mtu;
8861 u_int32_t delegate_tunnel_mtu = (tunnel_interface->if_delegated.ifp != NULL) ? tunnel_interface->if_delegated.ifp->if_mtu : 0;
8862 if (delegate_tunnel_mtu != 0 &&
8863 strncmp(tunnel_interface->if_name, "ipsec", strlen("ipsec")) == 0) {
8864 // For ipsec interfaces, calculate the overhead from the delegate interface
8865 u_int32_t tunnel_overhead = (u_int32_t)(esp_hdrsiz(NULL) + sizeof(struct ip6_hdr));
8866 if (delegate_tunnel_mtu > tunnel_overhead) {
8867 delegate_tunnel_mtu -= tunnel_overhead;
8868 }
8869
8870 if (delegate_tunnel_mtu < direct_tunnel_mtu) {
8871 // If the (delegate - overhead) < direct, return (delegate - overhead)
8872 return (delegate_tunnel_mtu);
8873 } else {
8874 // Otherwise return direct
8875 return (direct_tunnel_mtu);
8876 }
8877 } else {
8878 // For non-ipsec interfaces, just return the tunnel MTU
8879 return (direct_tunnel_mtu);
8880 }
8881 }
8882 }
8883 }
8884
8885 // By default, just return the MTU passed in
8886 return (current_mtu);
8887 }
8888
8889 ifnet_t
8890 necp_get_ifnet_from_result_parameter(necp_kernel_policy_result_parameter *result_parameter)
8891 {
8892 if (result_parameter == NULL) {
8893 return (NULL);
8894 }
8895
8896 return (ifindex2ifnet[result_parameter->tunnel_interface_index]);
8897 }
8898
8899 bool
8900 necp_packet_can_rebind_to_ifnet(struct mbuf *packet, struct ifnet *interface, struct route *new_route, int family)
8901 {
8902 bool found_match = FALSE;
8903 errno_t result = 0;
8904 ifaddr_t *addresses = NULL;
8905 union necp_sockaddr_union address_storage;
8906 int i;
8907
8908 if (packet == NULL || interface == NULL || new_route == NULL || (family != AF_INET && family != AF_INET6)) {
8909 return (FALSE);
8910 }
8911
8912 result = ifnet_get_address_list_family(interface, &addresses, family);
8913 if (result != 0) {
8914 NECPLOG(LOG_ERR, "Failed to get address list for %s%d", ifnet_name(interface), ifnet_unit(interface));
8915 return (FALSE);
8916 }
8917
8918 for (i = 0; addresses[i] != NULL; i++) {
8919 ROUTE_RELEASE(new_route);
8920 if (ifaddr_address(addresses[i], &address_storage.sa, sizeof(address_storage)) == 0) {
8921 if (family == AF_INET) {
8922 struct ip *ip = mtod(packet, struct ip *);
8923 if (memcmp(&address_storage.sin.sin_addr, &ip->ip_src, sizeof(ip->ip_src)) == 0) {
8924 struct sockaddr_in *dst4 = (struct sockaddr_in *)(void *)&new_route->ro_dst;
8925 dst4->sin_family = AF_INET;
8926 dst4->sin_len = sizeof(struct sockaddr_in);
8927 dst4->sin_addr = ip->ip_dst;
8928 rtalloc_scoped(new_route, interface->if_index);
8929 if (!ROUTE_UNUSABLE(new_route)) {
8930 found_match = TRUE;
8931 goto done;
8932 }
8933 }
8934 } else if (family == AF_INET6) {
8935 struct ip6_hdr *ip6 = mtod(packet, struct ip6_hdr *);
8936 if (memcmp(&address_storage.sin6.sin6_addr, &ip6->ip6_src, sizeof(ip6->ip6_src)) == 0) {
8937 struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)(void *)&new_route->ro_dst;
8938 dst6->sin6_family = AF_INET6;
8939 dst6->sin6_len = sizeof(struct sockaddr_in6);
8940 dst6->sin6_addr = ip6->ip6_dst;
8941 rtalloc_scoped(new_route, interface->if_index);
8942 if (!ROUTE_UNUSABLE(new_route)) {
8943 found_match = TRUE;
8944 goto done;
8945 }
8946 }
8947 }
8948 }
8949 }
8950
8951 done:
8952 ifnet_free_address_list(addresses);
8953 addresses = NULL;
8954 return (found_match);
8955 }
8956
8957 static bool
8958 necp_addr_is_loopback(struct sockaddr *address)
8959 {
8960 if (address == NULL) {
8961 return (FALSE);
8962 }
8963
8964 if (address->sa_family == AF_INET) {
8965 return (ntohl(((struct sockaddr_in *)(void *)address)->sin_addr.s_addr) == INADDR_LOOPBACK);
8966 } else if (address->sa_family == AF_INET6) {
8967 return IN6_IS_ADDR_LOOPBACK(&((struct sockaddr_in6 *)(void *)address)->sin6_addr);
8968 }
8969
8970 return (FALSE);
8971 }
8972
8973 static bool
8974 necp_is_loopback(struct sockaddr *local_addr, struct sockaddr *remote_addr, struct inpcb *inp, struct mbuf *packet)
8975 {
8976 // Note: This function only checks for the loopback addresses.
8977 // In the future, we may want to expand to also allow any traffic
8978 // going through the loopback interface, but until then, this
8979 // check is cheaper.
8980
8981 if (local_addr != NULL && necp_addr_is_loopback(local_addr)) {
8982 return (TRUE);
8983 }
8984
8985 if (remote_addr != NULL && necp_addr_is_loopback(remote_addr)) {
8986 return (TRUE);
8987 }
8988
8989 if (inp != NULL) {
8990 if ((inp->inp_flags & INP_BOUND_IF) && inp->inp_boundifp && (inp->inp_boundifp->if_flags & IFF_LOOPBACK)) {
8991 return (TRUE);
8992 }
8993 if (inp->inp_vflag & INP_IPV4) {
8994 if (ntohl(inp->inp_laddr.s_addr) == INADDR_LOOPBACK ||
8995 ntohl(inp->inp_faddr.s_addr) == INADDR_LOOPBACK) {
8996 return (TRUE);
8997 }
8998 } else if (inp->inp_vflag & INP_IPV6) {
8999 if (IN6_IS_ADDR_LOOPBACK(&inp->in6p_laddr) ||
9000 IN6_IS_ADDR_LOOPBACK(&inp->in6p_faddr)) {
9001 return (TRUE);
9002 }
9003 }
9004 }
9005
9006 if (packet != NULL) {
9007 struct ip *ip = mtod(packet, struct ip *);
9008 if (ip->ip_v == 4) {
9009 if (ntohl(ip->ip_src.s_addr) == INADDR_LOOPBACK) {
9010 return (TRUE);
9011 }
9012 if (ntohl(ip->ip_dst.s_addr) == INADDR_LOOPBACK) {
9013 return (TRUE);
9014 }
9015 } else if (ip->ip_v == 6) {
9016 struct ip6_hdr *ip6 = mtod(packet, struct ip6_hdr *);
9017 if (IN6_IS_ADDR_LOOPBACK(&ip6->ip6_src)) {
9018 return (TRUE);
9019 }
9020 if (IN6_IS_ADDR_LOOPBACK(&ip6->ip6_dst)) {
9021 return (TRUE);
9022 }
9023 }
9024 }
9025
9026 return (FALSE);
9027 }
9028
9029 static bool
9030 necp_is_intcoproc(struct inpcb *inp, struct mbuf *packet)
9031 {
9032
9033 if (inp != NULL) {
9034 return (sflt_permission_check(inp) ? true : false);
9035 }
9036 if (packet != NULL) {
9037 struct ip6_hdr *ip6 = mtod(packet, struct ip6_hdr *);
9038 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) == IPV6_VERSION &&
9039 IN6_IS_ADDR_LINKLOCAL(&ip6->ip6_dst) &&
9040 ip6->ip6_dst.s6_addr32[2] == ntohl(0xaede48ff) &&
9041 ip6->ip6_dst.s6_addr32[3] == ntohl(0xfe334455)) {
9042 return (true);
9043 }
9044 }
9045
9046 return (false);
9047 }