]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/necp.c
xnu-4570.51.1.tar.gz
[apple/xnu.git] / bsd / net / necp.c
1 /*
2 * Copyright (c) 2013-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <string.h>
30 #include <sys/systm.h>
31 #include <sys/types.h>
32 #include <sys/queue.h>
33 #include <sys/malloc.h>
34 #include <libkern/OSMalloc.h>
35 #include <sys/kernel.h>
36 #include <sys/kern_control.h>
37 #include <sys/mbuf.h>
38 #include <sys/kpi_mbuf.h>
39 #include <sys/proc_uuid_policy.h>
40 #include <net/if.h>
41 #include <sys/domain.h>
42 #include <sys/protosw.h>
43 #include <sys/socket.h>
44 #include <sys/socketvar.h>
45 #include <netinet/ip.h>
46 #include <netinet/ip6.h>
47 #include <netinet/tcp.h>
48 #include <netinet/tcp_var.h>
49 #include <netinet/tcp_cache.h>
50 #include <netinet/udp.h>
51 #include <netinet/in_pcb.h>
52 #include <netinet/in_tclass.h>
53 #include <netinet6/esp.h>
54 #include <net/flowhash.h>
55 #include <net/if_var.h>
56 #include <sys/kauth.h>
57 #include <sys/sysctl.h>
58 #include <sys/sysproto.h>
59 #include <sys/priv.h>
60 #include <sys/kern_event.h>
61 #include <sys/file_internal.h>
62 #include <IOKit/IOBSD.h>
63 #include <net/network_agent.h>
64 #include <net/necp.h>
65
66 /*
67 * NECP - Network Extension Control Policy database
68 * ------------------------------------------------
69 * The goal of this module is to allow clients connecting via a
70 * kernel control socket to create high-level policy sessions, which
71 * are ingested into low-level kernel policies that control and tag
72 * traffic at the application, socket, and IP layers.
73 *
74 * ------------------------------------------------
75 * Sessions
76 * ------------------------------------------------
77 * Each session owns a list of session policies, each of which can
78 * specify any combination of conditions and a single result. Each
79 * session also has a priority level (such as High, Default, or Low)
80 * which is requested by the client. Based on the requested level,
81 * a session order value is assigned to the session, which will be used
82 * to sort kernel policies generated by the session. The session client
83 * can specify the sub-order for each policy it creates which will be
84 * used to further sort the kernel policies.
85 *
86 * Kernel Control Socket --> 1 necp_session --> list of necp_session_policy structs
87 *
88 * ------------------------------------------------
89 * Kernel Policies
90 * ------------------------------------------------
91 * Whenever a session send the Apply command, its policies are ingested
92 * and generate kernel policies. There are two phases of kernel policy
93 * ingestion.
94 *
95 * 1. The session policy is parsed to create kernel policies at the socket
96 * and IP layers, when applicable. For example, a policy that requires
97 * all traffic from App1 to Pass will generate a socket kernel policy to
98 * match App1 and mark packets with ID1, and also an IP policy to match
99 * ID1 and let the packet pass. This is handled in necp_apply_policy. The
100 * resulting kernel policies are added to the global socket and IP layer
101 * policy lists.
102 * necp_session_policy --> necp_kernel_socket_policy and necp_kernel_ip_output_policy
103 * || ||
104 * \/ \/
105 * necp_kernel_socket_policies necp_kernel_ip_output_policies
106 *
107 * 2. Once the global lists of kernel policies have been filled out, each
108 * list is traversed to create optimized sub-lists ("Maps") which are used during
109 * data-path evaluation. IP policies are sent into necp_kernel_ip_output_policies_map,
110 * which hashes incoming packets based on marked socket-layer policies, and removes
111 * duplicate or overlapping policies. Socket policies are sent into two maps,
112 * necp_kernel_socket_policies_map and necp_kernel_socket_policies_app_layer_map.
113 * The app layer map is used for policy checks coming in from user space, and is one
114 * list with duplicate and overlapping policies removed. The socket map hashes based
115 * on app UUID, and removes duplicate and overlapping policies.
116 * necp_kernel_socket_policy --> necp_kernel_socket_policies_app_layer_map
117 * |-> necp_kernel_socket_policies_map
118 *
119 * necp_kernel_ip_output_policies --> necp_kernel_ip_output_policies_map
120 *
121 * ------------------------------------------------
122 * Drop All Level
123 * ------------------------------------------------
124 * The Drop All Level is a sysctl that controls the level at which policies are allowed
125 * to override a global drop rule. If the value is 0, no drop rule is applied. If the value
126 * is 1, all traffic is dropped. If the value is greater than 1, all kernel policies created
127 * by a session with a priority level better than (numerically less than) the
128 * Drop All Level will allow matching traffic to not be dropped. The Drop All Level is
129 * dynamically interpreted into necp_drop_all_order, which specifies the equivalent assigned
130 * session orders to be dropped.
131 */
132
133 u_int32_t necp_drop_all_order = 0;
134 u_int32_t necp_drop_all_level = 0;
135
136 u_int32_t necp_pass_loopback = 1; // 0=Off, 1=On
137 u_int32_t necp_pass_keepalives = 1; // 0=Off, 1=On
138
139 u_int32_t necp_debug = 0; // 0=None, 1=Basic, 2=EveryMatch
140
141 u_int32_t necp_session_count = 0;
142
143 #define LIST_INSERT_SORTED_ASCENDING(head, elm, field, sortfield, tmpelm) do { \
144 if (LIST_EMPTY((head)) || (LIST_FIRST(head)->sortfield >= (elm)->sortfield)) { \
145 LIST_INSERT_HEAD((head), elm, field); \
146 } else { \
147 LIST_FOREACH(tmpelm, head, field) { \
148 if (LIST_NEXT(tmpelm, field) == NULL || LIST_NEXT(tmpelm, field)->sortfield >= (elm)->sortfield) { \
149 LIST_INSERT_AFTER(tmpelm, elm, field); \
150 break; \
151 } \
152 } \
153 } \
154 } while (0)
155
156 #define LIST_INSERT_SORTED_TWICE_ASCENDING(head, elm, field, firstsortfield, secondsortfield, tmpelm) do { \
157 if (LIST_EMPTY((head)) || (LIST_FIRST(head)->firstsortfield > (elm)->firstsortfield) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield >= (elm)->secondsortfield))) { \
158 LIST_INSERT_HEAD((head), elm, field); \
159 } else { \
160 LIST_FOREACH(tmpelm, head, field) { \
161 if (LIST_NEXT(tmpelm, field) == NULL || (LIST_NEXT(tmpelm, field)->firstsortfield > (elm)->firstsortfield) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield >= (elm)->secondsortfield))) { \
162 LIST_INSERT_AFTER(tmpelm, elm, field); \
163 break; \
164 } \
165 } \
166 } \
167 } while (0)
168
169 #define LIST_INSERT_SORTED_THRICE_ASCENDING(head, elm, field, firstsortfield, secondsortfield, thirdsortfield, tmpelm) do { \
170 if (LIST_EMPTY((head)) || (LIST_FIRST(head)->firstsortfield > (elm)->firstsortfield) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield >= (elm)->secondsortfield)) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield == (elm)->secondsortfield) && (LIST_FIRST(head)->thirdsortfield >= (elm)->thirdsortfield))) { \
171 LIST_INSERT_HEAD((head), elm, field); \
172 } else { \
173 LIST_FOREACH(tmpelm, head, field) { \
174 if (LIST_NEXT(tmpelm, field) == NULL || (LIST_NEXT(tmpelm, field)->firstsortfield > (elm)->firstsortfield) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield >= (elm)->secondsortfield)) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield == (elm)->secondsortfield) && (LIST_NEXT(tmpelm, field)->thirdsortfield >= (elm)->thirdsortfield))) { \
175 LIST_INSERT_AFTER(tmpelm, elm, field); \
176 break; \
177 } \
178 } \
179 } \
180 } while (0)
181
182 #define IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(x) ((x) == NECP_ROUTE_RULE_DENY_INTERFACE || (x) == NECP_ROUTE_RULE_ALLOW_INTERFACE)
183
184 #define NECP_KERNEL_CONDITION_ALL_INTERFACES 0x00001
185 #define NECP_KERNEL_CONDITION_BOUND_INTERFACE 0x00002
186 #define NECP_KERNEL_CONDITION_PROTOCOL 0x00004
187 #define NECP_KERNEL_CONDITION_LOCAL_START 0x00008
188 #define NECP_KERNEL_CONDITION_LOCAL_END 0x00010
189 #define NECP_KERNEL_CONDITION_LOCAL_PREFIX 0x00020
190 #define NECP_KERNEL_CONDITION_REMOTE_START 0x00040
191 #define NECP_KERNEL_CONDITION_REMOTE_END 0x00080
192 #define NECP_KERNEL_CONDITION_REMOTE_PREFIX 0x00100
193 #define NECP_KERNEL_CONDITION_APP_ID 0x00200
194 #define NECP_KERNEL_CONDITION_REAL_APP_ID 0x00400
195 #define NECP_KERNEL_CONDITION_DOMAIN 0x00800
196 #define NECP_KERNEL_CONDITION_ACCOUNT_ID 0x01000
197 #define NECP_KERNEL_CONDITION_POLICY_ID 0x02000
198 #define NECP_KERNEL_CONDITION_PID 0x04000
199 #define NECP_KERNEL_CONDITION_UID 0x08000
200 #define NECP_KERNEL_CONDITION_LAST_INTERFACE 0x10000 // Only set from packets looping between interfaces
201 #define NECP_KERNEL_CONDITION_TRAFFIC_CLASS 0x20000
202 #define NECP_KERNEL_CONDITION_ENTITLEMENT 0x40000
203 #define NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT 0x80000
204
205 #define NECP_MAX_POLICY_RESULT_SIZE 512
206 #define NECP_MAX_ROUTE_RULES_ARRAY_SIZE 1024
207 #define NECP_MAX_CONDITIONS_ARRAY_SIZE 4096
208 #define NECP_MAX_POLICY_LIST_COUNT 1024
209
210 // Cap the policy size at the max result + conditions size, with room for extra TLVs
211 #define NECP_MAX_POLICY_SIZE (1024 + NECP_MAX_POLICY_RESULT_SIZE + NECP_MAX_CONDITIONS_ARRAY_SIZE)
212
213 struct necp_service_registration {
214 LIST_ENTRY(necp_service_registration) session_chain;
215 LIST_ENTRY(necp_service_registration) kernel_chain;
216 u_int32_t service_id;
217 };
218
219 struct necp_session {
220 u_int8_t necp_fd_type;
221 u_int32_t control_unit;
222 u_int32_t session_priority; // Descriptive priority rating
223 u_int32_t session_order;
224
225 decl_lck_mtx_data(, lock);
226
227 bool proc_locked; // Messages must come from proc_uuid
228 uuid_t proc_uuid;
229 int proc_pid;
230
231 bool dirty;
232 LIST_HEAD(_policies, necp_session_policy) policies;
233
234 LIST_HEAD(_services, necp_service_registration) services;
235
236 TAILQ_ENTRY(necp_session) chain;
237 };
238
239 #define NECP_SESSION_LOCK(_s) lck_mtx_lock(&_s->lock)
240 #define NECP_SESSION_UNLOCK(_s) lck_mtx_unlock(&_s->lock)
241
242 static TAILQ_HEAD(_necp_session_list, necp_session) necp_session_list;
243
244 struct necp_socket_info {
245 pid_t pid;
246 uid_t uid;
247 union necp_sockaddr_union local_addr;
248 union necp_sockaddr_union remote_addr;
249 u_int32_t bound_interface_index;
250 u_int32_t traffic_class;
251 u_int16_t protocol;
252 u_int32_t application_id;
253 u_int32_t real_application_id;
254 u_int32_t account_id;
255 char *domain;
256 errno_t cred_result;
257 };
258
259 static kern_ctl_ref necp_kctlref;
260 static u_int32_t necp_family;
261 static OSMallocTag necp_malloc_tag;
262 static lck_grp_attr_t *necp_kernel_policy_grp_attr = NULL;
263 static lck_attr_t *necp_kernel_policy_mtx_attr = NULL;
264 static lck_grp_t *necp_kernel_policy_mtx_grp = NULL;
265 decl_lck_rw_data(static, necp_kernel_policy_lock);
266
267 static lck_grp_attr_t *necp_route_rule_grp_attr = NULL;
268 static lck_attr_t *necp_route_rule_mtx_attr = NULL;
269 static lck_grp_t *necp_route_rule_mtx_grp = NULL;
270 decl_lck_rw_data(static, necp_route_rule_lock);
271
272 static necp_policy_id necp_last_policy_id = 0;
273 static necp_kernel_policy_id necp_last_kernel_policy_id = 0;
274 static u_int32_t necp_last_uuid_id = 0;
275 static u_int32_t necp_last_string_id = 0;
276 static u_int32_t necp_last_route_rule_id = 0;
277 static u_int32_t necp_last_aggregate_route_rule_id = 0;
278
279 /*
280 * On modification, invalidate cached lookups by bumping the generation count.
281 * Other calls will need to take the slowpath of taking
282 * the subsystem lock.
283 */
284 static volatile int32_t necp_kernel_socket_policies_gencount;
285 #define BUMP_KERNEL_SOCKET_POLICIES_GENERATION_COUNT() do { \
286 if (OSIncrementAtomic(&necp_kernel_socket_policies_gencount) == (INT32_MAX - 1)) { \
287 necp_kernel_socket_policies_gencount = 1; \
288 } \
289 } while (0)
290
291 static u_int32_t necp_kernel_application_policies_condition_mask;
292 static size_t necp_kernel_application_policies_count;
293 static u_int32_t necp_kernel_socket_policies_condition_mask;
294 static size_t necp_kernel_socket_policies_count;
295 static size_t necp_kernel_socket_policies_non_app_count;
296 static LIST_HEAD(_necpkernelsocketconnectpolicies, necp_kernel_socket_policy) necp_kernel_socket_policies;
297 #define NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS 5
298 #define NECP_SOCKET_MAP_APP_ID_TO_BUCKET(appid) (appid ? (appid%(NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS - 1) + 1) : 0)
299 static struct necp_kernel_socket_policy **necp_kernel_socket_policies_map[NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS];
300 static struct necp_kernel_socket_policy **necp_kernel_socket_policies_app_layer_map;
301 /*
302 * A note on policy 'maps': these are used for boosting efficiency when matching policies. For each dimension of the map,
303 * such as an ID, the 0 bucket is reserved for sockets/packets that do not have this parameter, while the other
304 * buckets lead to an array of policy pointers that form the list applicable when the (parameter%(NUM_BUCKETS - 1) + 1) == bucket_index.
305 *
306 * For example, a packet with policy ID of 7, when there are 4 ID buckets, will map to bucket (7%3 + 1) = 2.
307 */
308
309 static u_int32_t necp_kernel_ip_output_policies_condition_mask;
310 static size_t necp_kernel_ip_output_policies_count;
311 static size_t necp_kernel_ip_output_policies_non_id_count;
312 static LIST_HEAD(_necpkernelipoutputpolicies, necp_kernel_ip_output_policy) necp_kernel_ip_output_policies;
313 #define NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS 5
314 #define NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(id) (id ? (id%(NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS - 1) + 1) : 0)
315 static struct necp_kernel_ip_output_policy **necp_kernel_ip_output_policies_map[NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS];
316
317 static struct necp_session *necp_create_session(void);
318 static void necp_delete_session(struct necp_session *session);
319
320 static necp_policy_id necp_handle_policy_add(struct necp_session *session, u_int32_t message_id, mbuf_t packet,
321 u_int8_t *tlv_buffer, size_t tlv_buffer_length, int offset, int *error);
322 static void necp_handle_policy_get(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
323 static void necp_handle_policy_delete(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
324 static void necp_handle_policy_apply_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
325 static void necp_handle_policy_list_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
326 static void necp_handle_policy_delete_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
327 static int necp_handle_policy_dump_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet,
328 user_addr_t out_buffer, size_t out_buffer_length, int offset);
329 static void necp_handle_set_session_priority(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
330 static void necp_handle_lock_session_to_proc(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
331 static void necp_handle_register_service(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
332 static void necp_handle_unregister_service(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
333
334 #define MAX_RESULT_STRING_LEN 64
335 static inline const char * necp_get_result_description(char *result_string, necp_kernel_policy_result result, necp_kernel_policy_result_parameter result_parameter);
336
337 static struct necp_session_policy *necp_policy_create(struct necp_session *session, necp_policy_order order, u_int8_t *conditions_array, u_int32_t conditions_array_size, u_int8_t *route_rules_array, u_int32_t route_rules_array_size, u_int8_t *result, u_int32_t result_size);
338 static struct necp_session_policy *necp_policy_find(struct necp_session *session, necp_policy_id policy_id);
339 static bool necp_policy_mark_for_deletion(struct necp_session *session, struct necp_session_policy *policy);
340 static bool necp_policy_mark_all_for_deletion(struct necp_session *session);
341 static bool necp_policy_delete(struct necp_session *session, struct necp_session_policy *policy);
342 static void necp_policy_apply_all(struct necp_session *session);
343
344 static necp_kernel_policy_id necp_kernel_socket_policy_add(necp_policy_id parent_policy_id, necp_policy_order order, u_int32_t session_order, int session_pid, u_int32_t condition_mask, u_int32_t condition_negated_mask, necp_app_id cond_app_id, necp_app_id cond_real_app_id, char *cond_custom_entitlement, u_int32_t cond_account_id, char *domain, pid_t cond_pid, uid_t cond_uid, ifnet_t cond_bound_interface, struct necp_policy_condition_tc_range cond_traffic_class, u_int16_t cond_protocol, union necp_sockaddr_union *cond_local_start, union necp_sockaddr_union *cond_local_end, u_int8_t cond_local_prefix, union necp_sockaddr_union *cond_remote_start, union necp_sockaddr_union *cond_remote_end, u_int8_t cond_remote_prefix, necp_kernel_policy_result result, necp_kernel_policy_result_parameter result_parameter);
345 static bool necp_kernel_socket_policy_delete(necp_kernel_policy_id policy_id);
346 static bool necp_kernel_socket_policies_reprocess(void);
347 static bool necp_kernel_socket_policies_update_uuid_table(void);
348 static inline struct necp_kernel_socket_policy *necp_socket_find_policy_match_with_info_locked(struct necp_kernel_socket_policy **policy_search_array, struct necp_socket_info *info, necp_kernel_policy_filter *return_filter, u_int32_t *return_route_rule_id, necp_kernel_policy_result *return_service_action, necp_kernel_policy_service *return_service, u_int32_t *return_netagent_array, size_t netagent_array_count, proc_t proc);
349
350 static necp_kernel_policy_id necp_kernel_ip_output_policy_add(necp_policy_id parent_policy_id, necp_policy_order order, necp_policy_order suborder, u_int32_t session_order, int session_pid, u_int32_t condition_mask, u_int32_t condition_negated_mask, necp_kernel_policy_id cond_policy_id, ifnet_t cond_bound_interface, u_int32_t cond_last_interface_index, u_int16_t cond_protocol, union necp_sockaddr_union *cond_local_start, union necp_sockaddr_union *cond_local_end, u_int8_t cond_local_prefix, union necp_sockaddr_union *cond_remote_start, union necp_sockaddr_union *cond_remote_end, u_int8_t cond_remote_prefix, necp_kernel_policy_result result, necp_kernel_policy_result_parameter result_parameter);
351 static bool necp_kernel_ip_output_policy_delete(necp_kernel_policy_id policy_id);
352 static bool necp_kernel_ip_output_policies_reprocess(void);
353
354 static bool necp_is_addr_in_range(struct sockaddr *addr, struct sockaddr *range_start, struct sockaddr *range_end);
355 static bool necp_is_range_in_range(struct sockaddr *inner_range_start, struct sockaddr *inner_range_end, struct sockaddr *range_start, struct sockaddr *range_end);
356 static bool necp_is_addr_in_subnet(struct sockaddr *addr, struct sockaddr *subnet_addr, u_int8_t subnet_prefix);
357 static int necp_addr_compare(struct sockaddr *sa1, struct sockaddr *sa2, int check_port);
358 static bool necp_buffer_compare_with_bit_prefix(u_int8_t *p1, u_int8_t *p2, u_int32_t bits);
359 static bool necp_is_loopback(struct sockaddr *local_addr, struct sockaddr *remote_addr, struct inpcb *inp, struct mbuf *packet);
360 static bool necp_is_intcoproc(struct inpcb *inp, struct mbuf *packet);
361
362 struct necp_uuid_id_mapping {
363 LIST_ENTRY(necp_uuid_id_mapping) chain;
364 uuid_t uuid;
365 u_int32_t id;
366 u_int32_t refcount;
367 u_int32_t table_refcount; // Add to UUID policy table count
368 };
369 static size_t necp_num_uuid_app_id_mappings;
370 static bool necp_uuid_app_id_mappings_dirty;
371 #define NECP_UUID_APP_ID_HASH_SIZE 64
372 static u_long necp_uuid_app_id_hash_mask;
373 static u_long necp_uuid_app_id_hash_num_buckets;
374 static LIST_HEAD(necp_uuid_id_mapping_head, necp_uuid_id_mapping) *necp_uuid_app_id_hashtbl, necp_uuid_service_id_list; // App map is real hash table, service map is just mapping
375 #define APPUUIDHASH(uuid) (&necp_uuid_app_id_hashtbl[uuid[0] & necp_uuid_app_id_hash_mask]) // Assume first byte of UUIDs are evenly distributed
376 static u_int32_t necp_create_uuid_app_id_mapping(uuid_t uuid, bool *allocated_mapping, bool uuid_policy_table);
377 static bool necp_remove_uuid_app_id_mapping(uuid_t uuid, bool *removed_mapping, bool uuid_policy_table);
378 static struct necp_uuid_id_mapping *necp_uuid_lookup_uuid_with_app_id_locked(u_int32_t local_id);
379
380 static struct necp_uuid_id_mapping *necp_uuid_lookup_service_id_locked(uuid_t uuid);
381 static struct necp_uuid_id_mapping *necp_uuid_lookup_uuid_with_service_id_locked(u_int32_t local_id);
382 static u_int32_t necp_create_uuid_service_id_mapping(uuid_t uuid);
383 static bool necp_remove_uuid_service_id_mapping(uuid_t uuid);
384
385 struct necp_string_id_mapping {
386 LIST_ENTRY(necp_string_id_mapping) chain;
387 char *string;
388 necp_app_id id;
389 u_int32_t refcount;
390 };
391 static LIST_HEAD(necp_string_id_mapping_list, necp_string_id_mapping) necp_account_id_list;
392 static u_int32_t necp_create_string_to_id_mapping(struct necp_string_id_mapping_list *list, char *domain);
393 static bool necp_remove_string_to_id_mapping(struct necp_string_id_mapping_list *list, char *domain);
394 static struct necp_string_id_mapping *necp_lookup_string_with_id_locked(struct necp_string_id_mapping_list *list, u_int32_t local_id);
395
396 static LIST_HEAD(_necp_kernel_service_list, necp_service_registration) necp_registered_service_list;
397
398 static char *necp_create_trimmed_domain(char *string, size_t length);
399 static inline int necp_count_dots(char *string, size_t length);
400
401 static char *necp_copy_string(char *string, size_t length);
402 static bool necp_update_qos_marking(struct ifnet *ifp, u_int32_t route_rule_id);
403
404 #define ROUTE_RULE_IS_AGGREGATE(ruleid) (ruleid > UINT16_MAX)
405
406 #define MAX_ROUTE_RULE_INTERFACES 10
407 struct necp_route_rule {
408 LIST_ENTRY(necp_route_rule) chain;
409 u_int32_t id;
410 u_int32_t default_action;
411 u_int8_t cellular_action;
412 u_int8_t wifi_action;
413 u_int8_t wired_action;
414 u_int8_t expensive_action;
415 u_int exception_if_indices[MAX_ROUTE_RULE_INTERFACES];
416 u_int8_t exception_if_actions[MAX_ROUTE_RULE_INTERFACES];
417 u_int32_t refcount;
418 };
419 static LIST_HEAD(necp_route_rule_list, necp_route_rule) necp_route_rules;
420 static u_int32_t necp_create_route_rule(struct necp_route_rule_list *list, u_int8_t *route_rules_array, u_int32_t route_rules_array_size);
421 static bool necp_remove_route_rule(struct necp_route_rule_list *list, u_int32_t route_rule_id);
422 static bool necp_route_is_allowed(struct rtentry *route, ifnet_t interface, u_int32_t route_rule_id, u_int32_t *interface_type_denied);
423 static struct necp_route_rule *necp_lookup_route_rule_locked(struct necp_route_rule_list *list, u_int32_t route_rule_id);
424
425 #define MAX_AGGREGATE_ROUTE_RULES 16
426 struct necp_aggregate_route_rule {
427 LIST_ENTRY(necp_aggregate_route_rule) chain;
428 u_int32_t id;
429 u_int32_t rule_ids[MAX_AGGREGATE_ROUTE_RULES];
430 };
431 static LIST_HEAD(necp_aggregate_route_rule_list, necp_aggregate_route_rule) necp_aggregate_route_rules;
432 static u_int32_t necp_create_aggregate_route_rule(u_int32_t *rule_ids);
433
434 // Sysctl definitions
435 static int sysctl_handle_necp_level SYSCTL_HANDLER_ARGS;
436
437 SYSCTL_NODE(_net, OID_AUTO, necp, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "NECP");
438 SYSCTL_INT(_net_necp, NECPCTL_PASS_LOOPBACK, pass_loopback, CTLFLAG_LOCKED | CTLFLAG_RW, &necp_pass_loopback, 0, "");
439 SYSCTL_INT(_net_necp, NECPCTL_PASS_KEEPALIVES, pass_keepalives, CTLFLAG_LOCKED | CTLFLAG_RW, &necp_pass_keepalives, 0, "");
440 SYSCTL_INT(_net_necp, NECPCTL_DEBUG, debug, CTLFLAG_LOCKED | CTLFLAG_RW, &necp_debug, 0, "");
441 SYSCTL_PROC(_net_necp, NECPCTL_DROP_ALL_LEVEL, drop_all_level, CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW, &necp_drop_all_level, 0, &sysctl_handle_necp_level, "IU", "");
442 SYSCTL_LONG(_net_necp, NECPCTL_SOCKET_POLICY_COUNT, socket_policy_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_kernel_socket_policies_count, "");
443 SYSCTL_LONG(_net_necp, NECPCTL_SOCKET_NON_APP_POLICY_COUNT, socket_non_app_policy_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_kernel_socket_policies_non_app_count, "");
444 SYSCTL_LONG(_net_necp, NECPCTL_IP_POLICY_COUNT, ip_policy_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_kernel_ip_output_policies_count, "");
445 SYSCTL_INT(_net_necp, NECPCTL_SESSION_COUNT, session_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_session_count, 0, "");
446
447 // Session order allocation
448 static u_int32_t
449 necp_allocate_new_session_order(u_int32_t priority, u_int32_t control_unit)
450 {
451 u_int32_t new_order = 0;
452
453 // For now, just allocate 1000 orders for each priority
454 if (priority == NECP_SESSION_PRIORITY_UNKNOWN || priority > NECP_SESSION_NUM_PRIORITIES) {
455 priority = NECP_SESSION_PRIORITY_DEFAULT;
456 }
457
458 // Use the control unit to decide the offset into the priority list
459 new_order = (control_unit) + ((priority - 1) * 1000);
460
461 return (new_order);
462 }
463
464 static inline u_int32_t
465 necp_get_first_order_for_priority(u_int32_t priority)
466 {
467 return (((priority - 1) * 1000) + 1);
468 }
469
470 // Sysctl handler
471 static int
472 sysctl_handle_necp_level SYSCTL_HANDLER_ARGS
473 {
474 #pragma unused(arg1, arg2)
475 int error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
476 if (necp_drop_all_level == 0) {
477 necp_drop_all_order = 0;
478 } else {
479 necp_drop_all_order = necp_get_first_order_for_priority(necp_drop_all_level);
480 }
481 return (error);
482 }
483
484 // Session fd
485
486 static int noop_read(struct fileproc *, struct uio *, int, vfs_context_t);
487 static int noop_write(struct fileproc *, struct uio *, int, vfs_context_t);
488 static int noop_ioctl(struct fileproc *, unsigned long, caddr_t,
489 vfs_context_t);
490 static int noop_select(struct fileproc *, int, void *, vfs_context_t);
491 static int necp_session_op_close(struct fileglob *, vfs_context_t);
492 static int noop_kqfilter(struct fileproc *, struct knote *,
493 struct kevent_internal_s *, vfs_context_t);
494
495 static const struct fileops necp_session_fd_ops = {
496 .fo_type = DTYPE_NETPOLICY,
497 .fo_read = noop_read,
498 .fo_write = noop_write,
499 .fo_ioctl = noop_ioctl,
500 .fo_select = noop_select,
501 .fo_close = necp_session_op_close,
502 .fo_kqfilter = noop_kqfilter,
503 .fo_drain = NULL,
504 };
505
506 static int
507 noop_read(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx)
508 {
509 #pragma unused(fp, uio, flags, ctx)
510 return (ENXIO);
511 }
512
513 static int
514 noop_write(struct fileproc *fp, struct uio *uio, int flags,
515 vfs_context_t ctx)
516 {
517 #pragma unused(fp, uio, flags, ctx)
518 return (ENXIO);
519 }
520
521 static int
522 noop_ioctl(struct fileproc *fp, unsigned long com, caddr_t data,
523 vfs_context_t ctx)
524 {
525 #pragma unused(fp, com, data, ctx)
526 return (ENOTTY);
527 }
528
529 static int
530 noop_select(struct fileproc *fp, int which, void *wql, vfs_context_t ctx)
531 {
532 #pragma unused(fp, which, wql, ctx)
533 return (ENXIO);
534 }
535
536 static int
537 noop_kqfilter(struct fileproc *fp, struct knote *kn,
538 struct kevent_internal_s *kev, vfs_context_t ctx)
539 {
540 #pragma unused(fp, kn, kev, ctx)
541 return (ENXIO);
542 }
543
544 int
545 necp_session_open(struct proc *p, struct necp_session_open_args *uap, int *retval)
546 {
547 #pragma unused(uap)
548 int error = 0;
549 struct necp_session *session = NULL;
550 struct fileproc *fp = NULL;
551 int fd = -1;
552
553 uid_t uid = kauth_cred_getuid(proc_ucred(p));
554 if (uid != 0 && priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES, 0) != 0) {
555 NECPLOG0(LOG_ERR, "Process does not hold necessary entitlement to open NECP session");
556 error = EACCES;
557 goto done;
558 }
559
560 error = falloc(p, &fp, &fd, vfs_context_current());
561 if (error != 0) {
562 goto done;
563 }
564
565 session = necp_create_session();
566 if (session == NULL) {
567 error = ENOMEM;
568 goto done;
569 }
570
571 fp->f_fglob->fg_flag = 0;
572 fp->f_fglob->fg_ops = &necp_session_fd_ops;
573 fp->f_fglob->fg_data = session;
574
575 proc_fdlock(p);
576 FDFLAGS_SET(p, fd, (UF_EXCLOSE | UF_FORKCLOSE));
577 procfdtbl_releasefd(p, fd, NULL);
578 fp_drop(p, fd, fp, 1);
579 proc_fdunlock(p);
580
581 *retval = fd;
582 done:
583 if (error != 0) {
584 if (fp != NULL) {
585 fp_free(p, fd, fp);
586 fp = NULL;
587 }
588 }
589
590 return (error);
591 }
592
593 static int
594 necp_session_op_close(struct fileglob *fg, vfs_context_t ctx)
595 {
596 #pragma unused(ctx)
597 struct necp_session *session = (struct necp_session *)fg->fg_data;
598 fg->fg_data = NULL;
599
600 if (session != NULL) {
601 necp_policy_mark_all_for_deletion(session);
602 necp_policy_apply_all(session);
603 necp_delete_session(session);
604 return (0);
605 } else {
606 return (ENOENT);
607 }
608 }
609
610 static int
611 necp_session_find_from_fd(int fd, struct necp_session **session)
612 {
613 proc_t p = current_proc();
614 struct fileproc *fp = NULL;
615 int error = 0;
616
617 proc_fdlock_spin(p);
618 if ((error = fp_lookup(p, fd, &fp, 1)) != 0) {
619 goto done;
620 }
621 if (fp->f_fglob->fg_ops->fo_type != DTYPE_NETPOLICY) {
622 fp_drop(p, fd, fp, 1);
623 error = ENODEV;
624 goto done;
625 }
626 *session = (struct necp_session *)fp->f_fglob->fg_data;
627
628 done:
629 proc_fdunlock(p);
630 return (error);
631 }
632
633 static int
634 necp_session_add_policy(struct necp_session *session, struct necp_session_action_args *uap, int *retval)
635 {
636 int error = 0;
637 u_int8_t *tlv_buffer = NULL;
638
639 if (uap->in_buffer_length == 0 || uap->in_buffer_length > NECP_MAX_POLICY_SIZE || uap->in_buffer == 0) {
640 NECPLOG(LOG_ERR, "necp_session_add_policy invalid input (%zu)", uap->in_buffer_length);
641 error = EINVAL;
642 goto done;
643 }
644
645 if (uap->out_buffer_length < sizeof(necp_policy_id) || uap->out_buffer == 0) {
646 NECPLOG(LOG_ERR, "necp_session_add_policy invalid output buffer (%zu)", uap->out_buffer_length);
647 error = EINVAL;
648 goto done;
649 }
650
651 if ((tlv_buffer = _MALLOC(uap->in_buffer_length, M_NECP, M_WAITOK | M_ZERO)) == NULL) {
652 error = ENOMEM;
653 goto done;
654 }
655
656 error = copyin(uap->in_buffer, tlv_buffer, uap->in_buffer_length);
657 if (error != 0) {
658 NECPLOG(LOG_ERR, "necp_session_add_policy tlv copyin error (%d)", error);
659 goto done;
660 }
661
662 necp_policy_id new_policy_id = necp_handle_policy_add(session, 0, NULL, tlv_buffer, uap->in_buffer_length, 0, &error);
663 if (error != 0) {
664 NECPLOG(LOG_ERR, "necp_session_add_policy failed to add policy (%d)", error);
665 goto done;
666 }
667
668 error = copyout(&new_policy_id, uap->out_buffer, sizeof(new_policy_id));
669 if (error != 0) {
670 NECPLOG(LOG_ERR, "necp_session_add_policy policy_id copyout error (%d)", error);
671 goto done;
672 }
673
674 done:
675 if (tlv_buffer != NULL) {
676 FREE(tlv_buffer, M_NECP);
677 tlv_buffer = NULL;
678 }
679 *retval = error;
680
681 return (error);
682 }
683
684 static int
685 necp_session_get_policy(struct necp_session *session, struct necp_session_action_args *uap, int *retval)
686 {
687 int error = 0;
688 u_int8_t *response = NULL;
689
690 if (uap->in_buffer_length < sizeof(necp_policy_id) || uap->in_buffer == 0) {
691 NECPLOG(LOG_ERR, "necp_session_get_policy invalid input (%zu)", uap->in_buffer_length);
692 error = EINVAL;
693 goto done;
694 }
695
696 necp_policy_id policy_id = 0;
697 error = copyin(uap->in_buffer, &policy_id, sizeof(policy_id));
698 if (error != 0) {
699 NECPLOG(LOG_ERR, "necp_session_get_policy policy_id copyin error (%d)", error);
700 goto done;
701 }
702
703 struct necp_session_policy *policy = necp_policy_find(session, policy_id);
704 if (policy == NULL || policy->pending_deletion) {
705 NECPLOG(LOG_ERR, "Failed to find policy with id %d", policy_id);
706 error = ENOENT;
707 goto done;
708 }
709
710 u_int32_t order_tlv_size = sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(necp_policy_order);
711 u_int32_t result_tlv_size = (policy->result_size ? (sizeof(u_int8_t) + sizeof(u_int32_t) + policy->result_size) : 0);
712 u_int32_t response_size = order_tlv_size + result_tlv_size + policy->conditions_size;
713
714 if (uap->out_buffer_length < response_size || uap->out_buffer == 0) {
715 NECPLOG(LOG_ERR, "necp_session_get_policy buffer not large enough (%u < %u)", uap->out_buffer_length, response_size);
716 error = EINVAL;
717 goto done;
718 }
719
720 if (response_size > NECP_MAX_POLICY_SIZE) {
721 NECPLOG(LOG_ERR, "necp_session_get_policy size too large to copy (%u)", response_size);
722 error = EINVAL;
723 goto done;
724 }
725
726 MALLOC(response, u_int8_t *, response_size, M_NECP, M_WAITOK | M_ZERO);
727 if (response == NULL) {
728 error = ENOMEM;
729 goto done;
730 }
731
732 u_int8_t *cursor = response;
733 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_ORDER, sizeof(necp_policy_order), &policy->order, response, response_size);
734 if (result_tlv_size) {
735 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_RESULT, policy->result_size, &policy->result, response, response_size);
736 }
737 if (policy->conditions_size) {
738 memcpy(((u_int8_t *)(void *)(cursor)), policy->conditions, policy->conditions_size);
739 }
740
741 error = copyout(response, uap->out_buffer, response_size);
742 if (error != 0) {
743 NECPLOG(LOG_ERR, "necp_session_get_policy TLV copyout error (%d)", error);
744 goto done;
745 }
746
747 done:
748 if (response != NULL) {
749 FREE(response, M_NECP);
750 response = NULL;
751 }
752 *retval = error;
753
754 return (error);
755 }
756
757 static int
758 necp_session_delete_policy(struct necp_session *session, struct necp_session_action_args *uap, int *retval)
759 {
760 int error = 0;
761
762 if (uap->in_buffer_length < sizeof(necp_policy_id) || uap->in_buffer == 0) {
763 NECPLOG(LOG_ERR, "necp_session_delete_policy invalid input (%zu)", uap->in_buffer_length);
764 error = EINVAL;
765 goto done;
766 }
767
768 necp_policy_id delete_policy_id = 0;
769 error = copyin(uap->in_buffer, &delete_policy_id, sizeof(delete_policy_id));
770 if (error != 0) {
771 NECPLOG(LOG_ERR, "necp_session_delete_policy policy_id copyin error (%d)", error);
772 goto done;
773 }
774
775 struct necp_session_policy *policy = necp_policy_find(session, delete_policy_id);
776 if (policy == NULL || policy->pending_deletion) {
777 NECPLOG(LOG_ERR, "necp_session_delete_policy failed to find policy with id %u", delete_policy_id);
778 error = ENOENT;
779 goto done;
780 }
781
782 necp_policy_mark_for_deletion(session, policy);
783 done:
784 *retval = error;
785 return (error);
786 }
787
788 static int
789 necp_session_apply_all(struct necp_session *session, struct necp_session_action_args *uap, int *retval)
790 {
791 #pragma unused(uap)
792 necp_policy_apply_all(session);
793 *retval = 0;
794 return (0);
795 }
796
797 static int
798 necp_session_list_all(struct necp_session *session, struct necp_session_action_args *uap, int *retval)
799 {
800 u_int32_t tlv_size = (sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(necp_policy_id));
801 u_int32_t response_size = 0;
802 u_int8_t *response = NULL;
803 int num_policies = 0;
804 int cur_policy_index = 0;
805 int error = 0;
806 struct necp_session_policy *policy;
807
808 LIST_FOREACH(policy, &session->policies, chain) {
809 if (!policy->pending_deletion) {
810 num_policies++;
811 }
812 }
813
814 if (num_policies > NECP_MAX_POLICY_LIST_COUNT) {
815 NECPLOG(LOG_ERR, "necp_session_list_all size too large to copy (%u policies)", num_policies);
816 error = EINVAL;
817 goto done;
818 }
819
820 response_size = num_policies * tlv_size;
821 if (uap->out_buffer_length < response_size || uap->out_buffer == 0) {
822 NECPLOG(LOG_ERR, "necp_session_list_all buffer not large enough (%u < %u)", uap->out_buffer_length, response_size);
823 error = EINVAL;
824 goto done;
825 }
826
827 // Create a response with one Policy ID TLV for each policy
828 MALLOC(response, u_int8_t *, response_size, M_NECP, M_WAITOK | M_ZERO);
829 if (response == NULL) {
830 error = ENOMEM;
831 goto done;
832 }
833
834 u_int8_t *cursor = response;
835 LIST_FOREACH(policy, &session->policies, chain) {
836 if (!policy->pending_deletion && cur_policy_index < num_policies) {
837 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_ID, sizeof(u_int32_t), &policy->id, response, response_size);
838 cur_policy_index++;
839 }
840 }
841
842 error = copyout(response, uap->out_buffer, response_size);
843 if (error != 0) {
844 NECPLOG(LOG_ERR, "necp_session_list_all TLV copyout error (%d)", error);
845 goto done;
846 }
847
848 done:
849 if (response != NULL) {
850 FREE(response, M_NECP);
851 response = NULL;
852 }
853 *retval = error;
854
855 return (error);
856 }
857
858
859 static int
860 necp_session_delete_all(struct necp_session *session, struct necp_session_action_args *uap, int *retval)
861 {
862 #pragma unused(uap)
863 necp_policy_mark_all_for_deletion(session);
864 *retval = 0;
865 return (0);
866 }
867
868 static int
869 necp_session_set_session_priority(struct necp_session *session, struct necp_session_action_args *uap, int *retval)
870 {
871 int error = 0;
872 struct necp_session_policy *policy = NULL;
873 struct necp_session_policy *temp_policy = NULL;
874
875 if (uap->in_buffer_length < sizeof(necp_session_priority) || uap->in_buffer == 0) {
876 NECPLOG(LOG_ERR, "necp_session_set_session_priority invalid input (%zu)", uap->in_buffer_length);
877 error = EINVAL;
878 goto done;
879 }
880
881 necp_session_priority requested_session_priority = 0;
882 error = copyin(uap->in_buffer, &requested_session_priority, sizeof(requested_session_priority));
883 if (error != 0) {
884 NECPLOG(LOG_ERR, "necp_session_set_session_priority priority copyin error (%d)", error);
885 goto done;
886 }
887
888 // Enforce special session priorities with entitlements
889 if (requested_session_priority == NECP_SESSION_PRIORITY_CONTROL ||
890 requested_session_priority == NECP_SESSION_PRIORITY_PRIVILEGED_TUNNEL) {
891 errno_t cred_result = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES, 0);
892 if (cred_result != 0) {
893 NECPLOG(LOG_ERR, "Session does not hold necessary entitlement to claim priority level %d", requested_session_priority);
894 error = EPERM;
895 goto done;
896 }
897 }
898
899 if (session->session_priority != requested_session_priority) {
900 session->session_priority = requested_session_priority;
901 session->session_order = necp_allocate_new_session_order(session->session_priority, session->control_unit);
902 session->dirty = TRUE;
903
904 // Mark all policies as needing updates
905 LIST_FOREACH_SAFE(policy, &session->policies, chain, temp_policy) {
906 policy->pending_update = TRUE;
907 }
908 }
909
910 done:
911 *retval = error;
912 return (error);
913 }
914
915 static int
916 necp_session_lock_to_process(struct necp_session *session, struct necp_session_action_args *uap, int *retval)
917 {
918 #pragma unused(uap)
919 session->proc_locked = TRUE;
920 *retval = 0;
921 return (0);
922 }
923
924 static int
925 necp_session_register_service(struct necp_session *session, struct necp_session_action_args *uap, int *retval)
926 {
927 int error = 0;
928 struct necp_service_registration *new_service = NULL;
929
930 if (uap->in_buffer_length < sizeof(uuid_t) || uap->in_buffer == 0) {
931 NECPLOG(LOG_ERR, "necp_session_register_service invalid input (%zu)", uap->in_buffer_length);
932 error = EINVAL;
933 goto done;
934 }
935
936 uuid_t service_uuid;
937 error = copyin(uap->in_buffer, service_uuid, sizeof(service_uuid));
938 if (error != 0) {
939 NECPLOG(LOG_ERR, "necp_session_register_service uuid copyin error (%d)", error);
940 goto done;
941 }
942
943 MALLOC(new_service, struct necp_service_registration *, sizeof(*new_service), M_NECP, M_WAITOK | M_ZERO);
944 if (new_service == NULL) {
945 NECPLOG0(LOG_ERR, "Failed to allocate service registration");
946 error = ENOMEM;
947 goto done;
948 }
949
950 lck_rw_lock_exclusive(&necp_kernel_policy_lock);
951 new_service->service_id = necp_create_uuid_service_id_mapping(service_uuid);
952 LIST_INSERT_HEAD(&session->services, new_service, session_chain);
953 LIST_INSERT_HEAD(&necp_registered_service_list, new_service, kernel_chain);
954 lck_rw_done(&necp_kernel_policy_lock);
955
956 done:
957 *retval = error;
958 return (error);
959 }
960
961 static int
962 necp_session_unregister_service(struct necp_session *session, struct necp_session_action_args *uap, int *retval)
963 {
964 int error = 0;
965 struct necp_service_registration *service = NULL;
966 struct necp_service_registration *temp_service = NULL;
967 struct necp_uuid_id_mapping *mapping = NULL;
968
969 if (uap->in_buffer_length < sizeof(uuid_t) || uap->in_buffer == 0) {
970 NECPLOG(LOG_ERR, "necp_session_unregister_service invalid input (%zu)", uap->in_buffer_length);
971 error = EINVAL;
972 goto done;
973 }
974
975 uuid_t service_uuid;
976 error = copyin(uap->in_buffer, service_uuid, sizeof(service_uuid));
977 if (error != 0) {
978 NECPLOG(LOG_ERR, "necp_session_unregister_service uuid copyin error (%d)", error);
979 goto done;
980 }
981
982 // Remove all matching services for this session
983 lck_rw_lock_exclusive(&necp_kernel_policy_lock);
984 mapping = necp_uuid_lookup_service_id_locked(service_uuid);
985 if (mapping != NULL) {
986 LIST_FOREACH_SAFE(service, &session->services, session_chain, temp_service) {
987 if (service->service_id == mapping->id) {
988 LIST_REMOVE(service, session_chain);
989 LIST_REMOVE(service, kernel_chain);
990 FREE(service, M_NECP);
991 }
992 }
993 necp_remove_uuid_service_id_mapping(service_uuid);
994 }
995 lck_rw_done(&necp_kernel_policy_lock);
996
997 done:
998 *retval = error;
999 return (error);
1000 }
1001
1002 static int
1003 necp_session_dump_all(struct necp_session *session, struct necp_session_action_args *uap, int *retval)
1004 {
1005 int error = 0;
1006
1007 if (uap->out_buffer_length == 0 || uap->out_buffer == 0) {
1008 NECPLOG(LOG_ERR, "necp_session_dump_all invalid output buffer (%zu)", uap->out_buffer_length);
1009 error = EINVAL;
1010 goto done;
1011 }
1012
1013 error = necp_handle_policy_dump_all(session, 0, NULL, uap->out_buffer, uap->out_buffer_length, 0);
1014 done:
1015 *retval = error;
1016 return (error);
1017 }
1018
1019 int
1020 necp_session_action(struct proc *p, struct necp_session_action_args *uap, int *retval)
1021 {
1022 #pragma unused(p)
1023 int error = 0;
1024 int return_value = 0;
1025 struct necp_session *session = NULL;
1026 error = necp_session_find_from_fd(uap->necp_fd, &session);
1027 if (error != 0) {
1028 NECPLOG(LOG_ERR, "necp_session_action find fd error (%d)", error);
1029 return (error);
1030 }
1031
1032 NECP_SESSION_LOCK(session);
1033
1034 if (session->proc_locked) {
1035 // Verify that the calling process is allowed to do actions
1036 uuid_t proc_uuid;
1037 proc_getexecutableuuid(current_proc(), proc_uuid, sizeof(proc_uuid));
1038 if (uuid_compare(proc_uuid, session->proc_uuid) != 0) {
1039 error = EPERM;
1040 goto done;
1041 }
1042 } else {
1043 // If not locked, update the proc_uuid and proc_pid of the session
1044 proc_getexecutableuuid(current_proc(), session->proc_uuid, sizeof(session->proc_uuid));
1045 session->proc_pid = proc_pid(current_proc());
1046 }
1047
1048 u_int32_t action = uap->action;
1049 switch (action) {
1050 case NECP_SESSION_ACTION_POLICY_ADD: {
1051 return_value = necp_session_add_policy(session, uap, retval);
1052 break;
1053 }
1054 case NECP_SESSION_ACTION_POLICY_GET: {
1055 return_value = necp_session_get_policy(session, uap, retval);
1056 break;
1057 }
1058 case NECP_SESSION_ACTION_POLICY_DELETE: {
1059 return_value = necp_session_delete_policy(session, uap, retval);
1060 break;
1061 }
1062 case NECP_SESSION_ACTION_POLICY_APPLY_ALL: {
1063 return_value = necp_session_apply_all(session, uap, retval);
1064 break;
1065 }
1066 case NECP_SESSION_ACTION_POLICY_LIST_ALL: {
1067 return_value = necp_session_list_all(session, uap, retval);
1068 break;
1069 }
1070 case NECP_SESSION_ACTION_POLICY_DELETE_ALL: {
1071 return_value = necp_session_delete_all(session, uap, retval);
1072 break;
1073 }
1074 case NECP_SESSION_ACTION_SET_SESSION_PRIORITY: {
1075 return_value = necp_session_set_session_priority(session, uap, retval);
1076 break;
1077 }
1078 case NECP_SESSION_ACTION_LOCK_SESSION_TO_PROC: {
1079 return_value = necp_session_lock_to_process(session, uap, retval);
1080 break;
1081 }
1082 case NECP_SESSION_ACTION_REGISTER_SERVICE: {
1083 return_value = necp_session_register_service(session, uap, retval);
1084 break;
1085 }
1086 case NECP_SESSION_ACTION_UNREGISTER_SERVICE: {
1087 return_value = necp_session_unregister_service(session, uap, retval);
1088 break;
1089 }
1090 case NECP_SESSION_ACTION_POLICY_DUMP_ALL: {
1091 return_value = necp_session_dump_all(session, uap, retval);
1092 break;
1093 }
1094 default: {
1095 NECPLOG(LOG_ERR, "necp_session_action unknown action (%u)", action);
1096 return_value = EINVAL;
1097 break;
1098 }
1099 }
1100
1101 done:
1102 NECP_SESSION_UNLOCK(session);
1103 file_drop(uap->necp_fd);
1104
1105 return (return_value);
1106 }
1107
1108 // Kernel Control functions
1109 static errno_t necp_register_control(void);
1110 static errno_t necp_ctl_connect(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, void **unitinfo);
1111 static errno_t necp_ctl_disconnect(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo);
1112 static errno_t necp_ctl_send(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, mbuf_t m, int flags);
1113 static void necp_ctl_rcvd(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, int flags);
1114 static errno_t necp_ctl_getopt(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, int opt, void *data, size_t *len);
1115 static errno_t necp_ctl_setopt(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, int opt, void *data, size_t len);
1116
1117 static bool necp_send_ctl_data(struct necp_session *session, u_int8_t *buffer, size_t buffer_size);
1118
1119 errno_t
1120 necp_init(void)
1121 {
1122 errno_t result = 0;
1123
1124 result = necp_register_control();
1125 if (result != 0) {
1126 goto done;
1127 }
1128
1129 necp_kernel_policy_grp_attr = lck_grp_attr_alloc_init();
1130 if (necp_kernel_policy_grp_attr == NULL) {
1131 NECPLOG0(LOG_ERR, "lck_grp_attr_alloc_init failed");
1132 result = ENOMEM;
1133 goto done;
1134 }
1135
1136 necp_kernel_policy_mtx_grp = lck_grp_alloc_init(NECP_CONTROL_NAME, necp_kernel_policy_grp_attr);
1137 if (necp_kernel_policy_mtx_grp == NULL) {
1138 NECPLOG0(LOG_ERR, "lck_grp_alloc_init failed");
1139 result = ENOMEM;
1140 goto done;
1141 }
1142
1143 necp_kernel_policy_mtx_attr = lck_attr_alloc_init();
1144 if (necp_kernel_policy_mtx_attr == NULL) {
1145 NECPLOG0(LOG_ERR, "lck_attr_alloc_init failed");
1146 result = ENOMEM;
1147 goto done;
1148 }
1149
1150 lck_rw_init(&necp_kernel_policy_lock, necp_kernel_policy_mtx_grp, necp_kernel_policy_mtx_attr);
1151
1152 necp_route_rule_grp_attr = lck_grp_attr_alloc_init();
1153 if (necp_route_rule_grp_attr == NULL) {
1154 NECPLOG0(LOG_ERR, "lck_grp_attr_alloc_init failed");
1155 result = ENOMEM;
1156 goto done;
1157 }
1158
1159 necp_route_rule_mtx_grp = lck_grp_alloc_init("necp_route_rule", necp_route_rule_grp_attr);
1160 if (necp_route_rule_mtx_grp == NULL) {
1161 NECPLOG0(LOG_ERR, "lck_grp_alloc_init failed");
1162 result = ENOMEM;
1163 goto done;
1164 }
1165
1166 necp_route_rule_mtx_attr = lck_attr_alloc_init();
1167 if (necp_route_rule_mtx_attr == NULL) {
1168 NECPLOG0(LOG_ERR, "lck_attr_alloc_init failed");
1169 result = ENOMEM;
1170 goto done;
1171 }
1172
1173 lck_rw_init(&necp_route_rule_lock, necp_route_rule_mtx_grp, necp_route_rule_mtx_attr);
1174
1175 necp_client_init();
1176
1177 TAILQ_INIT(&necp_session_list);
1178
1179 LIST_INIT(&necp_kernel_socket_policies);
1180 LIST_INIT(&necp_kernel_ip_output_policies);
1181
1182 LIST_INIT(&necp_account_id_list);
1183
1184 LIST_INIT(&necp_uuid_service_id_list);
1185
1186 LIST_INIT(&necp_registered_service_list);
1187
1188 LIST_INIT(&necp_route_rules);
1189 LIST_INIT(&necp_aggregate_route_rules);
1190
1191 necp_uuid_app_id_hashtbl = hashinit(NECP_UUID_APP_ID_HASH_SIZE, M_NECP, &necp_uuid_app_id_hash_mask);
1192 necp_uuid_app_id_hash_num_buckets = necp_uuid_app_id_hash_mask + 1;
1193 necp_num_uuid_app_id_mappings = 0;
1194 necp_uuid_app_id_mappings_dirty = FALSE;
1195
1196 necp_kernel_application_policies_condition_mask = 0;
1197 necp_kernel_socket_policies_condition_mask = 0;
1198 necp_kernel_ip_output_policies_condition_mask = 0;
1199
1200 necp_kernel_application_policies_count = 0;
1201 necp_kernel_socket_policies_count = 0;
1202 necp_kernel_socket_policies_non_app_count = 0;
1203 necp_kernel_ip_output_policies_count = 0;
1204 necp_kernel_ip_output_policies_non_id_count = 0;
1205
1206 necp_last_policy_id = 0;
1207 necp_last_kernel_policy_id = 0;
1208 necp_last_uuid_id = 0;
1209 necp_last_string_id = 0;
1210 necp_last_route_rule_id = 0;
1211 necp_last_aggregate_route_rule_id = 0;
1212
1213 necp_kernel_socket_policies_gencount = 1;
1214
1215 memset(&necp_kernel_socket_policies_map, 0, sizeof(necp_kernel_socket_policies_map));
1216 memset(&necp_kernel_ip_output_policies_map, 0, sizeof(necp_kernel_ip_output_policies_map));
1217 necp_kernel_socket_policies_app_layer_map = NULL;
1218
1219 done:
1220 if (result != 0) {
1221 if (necp_kernel_policy_mtx_attr != NULL) {
1222 lck_attr_free(necp_kernel_policy_mtx_attr);
1223 necp_kernel_policy_mtx_attr = NULL;
1224 }
1225 if (necp_kernel_policy_mtx_grp != NULL) {
1226 lck_grp_free(necp_kernel_policy_mtx_grp);
1227 necp_kernel_policy_mtx_grp = NULL;
1228 }
1229 if (necp_kernel_policy_grp_attr != NULL) {
1230 lck_grp_attr_free(necp_kernel_policy_grp_attr);
1231 necp_kernel_policy_grp_attr = NULL;
1232 }
1233 if (necp_route_rule_mtx_attr != NULL) {
1234 lck_attr_free(necp_route_rule_mtx_attr);
1235 necp_route_rule_mtx_attr = NULL;
1236 }
1237 if (necp_route_rule_mtx_grp != NULL) {
1238 lck_grp_free(necp_route_rule_mtx_grp);
1239 necp_route_rule_mtx_grp = NULL;
1240 }
1241 if (necp_route_rule_grp_attr != NULL) {
1242 lck_grp_attr_free(necp_route_rule_grp_attr);
1243 necp_route_rule_grp_attr = NULL;
1244 }
1245 if (necp_kctlref != NULL) {
1246 ctl_deregister(necp_kctlref);
1247 necp_kctlref = NULL;
1248 }
1249 }
1250 return (result);
1251 }
1252
1253 static errno_t
1254 necp_register_control(void)
1255 {
1256 struct kern_ctl_reg kern_ctl;
1257 errno_t result = 0;
1258
1259 // Create a tag to allocate memory
1260 necp_malloc_tag = OSMalloc_Tagalloc(NECP_CONTROL_NAME, OSMT_DEFAULT);
1261
1262 // Find a unique value for our interface family
1263 result = mbuf_tag_id_find(NECP_CONTROL_NAME, &necp_family);
1264 if (result != 0) {
1265 NECPLOG(LOG_ERR, "mbuf_tag_id_find_internal failed: %d", result);
1266 return (result);
1267 }
1268
1269 bzero(&kern_ctl, sizeof(kern_ctl));
1270 strlcpy(kern_ctl.ctl_name, NECP_CONTROL_NAME, sizeof(kern_ctl.ctl_name));
1271 kern_ctl.ctl_name[sizeof(kern_ctl.ctl_name) - 1] = 0;
1272 kern_ctl.ctl_flags = CTL_FLAG_PRIVILEGED; // Require root
1273 kern_ctl.ctl_sendsize = 64 * 1024;
1274 kern_ctl.ctl_recvsize = 64 * 1024;
1275 kern_ctl.ctl_connect = necp_ctl_connect;
1276 kern_ctl.ctl_disconnect = necp_ctl_disconnect;
1277 kern_ctl.ctl_send = necp_ctl_send;
1278 kern_ctl.ctl_rcvd = necp_ctl_rcvd;
1279 kern_ctl.ctl_setopt = necp_ctl_setopt;
1280 kern_ctl.ctl_getopt = necp_ctl_getopt;
1281
1282 result = ctl_register(&kern_ctl, &necp_kctlref);
1283 if (result != 0) {
1284 NECPLOG(LOG_ERR, "ctl_register failed: %d", result);
1285 return (result);
1286 }
1287
1288 return (0);
1289 }
1290
1291 static void
1292 necp_post_change_event(struct kev_necp_policies_changed_data *necp_event_data)
1293 {
1294 struct kev_msg ev_msg;
1295 memset(&ev_msg, 0, sizeof(ev_msg));
1296
1297 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1298 ev_msg.kev_class = KEV_NETWORK_CLASS;
1299 ev_msg.kev_subclass = KEV_NECP_SUBCLASS;
1300 ev_msg.event_code = KEV_NECP_POLICIES_CHANGED;
1301
1302 ev_msg.dv[0].data_ptr = necp_event_data;
1303 ev_msg.dv[0].data_length = sizeof(necp_event_data->changed_count);
1304 ev_msg.dv[1].data_length = 0;
1305
1306 kev_post_msg(&ev_msg);
1307 }
1308
1309 static errno_t
1310 necp_ctl_connect(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, void **unitinfo)
1311 {
1312 #pragma unused(kctlref, sac)
1313 *unitinfo = necp_create_session();
1314 if (*unitinfo == NULL) {
1315 // Could not allocate session
1316 return (ENOBUFS);
1317 }
1318
1319 return (0);
1320 }
1321
1322 static errno_t
1323 necp_ctl_disconnect(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo)
1324 {
1325 #pragma unused(kctlref, unit)
1326 struct necp_session *session = (struct necp_session *)unitinfo;
1327 if (session != NULL) {
1328 necp_policy_mark_all_for_deletion(session);
1329 necp_policy_apply_all(session);
1330 necp_delete_session((struct necp_session *)unitinfo);
1331 }
1332
1333 return (0);
1334 }
1335
1336
1337 // Message handling
1338 static int
1339 necp_packet_find_tlv(mbuf_t packet, int offset, u_int8_t type, int *err, int next)
1340 {
1341 size_t cursor = offset;
1342 int error = 0;
1343 u_int32_t curr_length;
1344 u_int8_t curr_type;
1345
1346 *err = 0;
1347
1348 do {
1349 if (!next) {
1350 error = mbuf_copydata(packet, cursor, sizeof(curr_type), &curr_type);
1351 if (error) {
1352 *err = ENOENT;
1353 return (-1);
1354 }
1355 } else {
1356 next = 0;
1357 curr_type = NECP_TLV_NIL;
1358 }
1359
1360 if (curr_type != type) {
1361 cursor += sizeof(curr_type);
1362 error = mbuf_copydata(packet, cursor, sizeof(curr_length), &curr_length);
1363 if (error) {
1364 *err = error;
1365 return (-1);
1366 }
1367 cursor += (sizeof(curr_length) + curr_length);
1368 }
1369 } while (curr_type != type);
1370
1371 return (cursor);
1372 }
1373
1374 static int
1375 necp_packet_get_tlv_at_offset(mbuf_t packet, int tlv_offset, u_int32_t buff_len, void *buff, u_int32_t *value_size)
1376 {
1377 int error = 0;
1378 u_int32_t length;
1379
1380 if (tlv_offset < 0) {
1381 return (EINVAL);
1382 }
1383
1384 error = mbuf_copydata(packet, tlv_offset + sizeof(u_int8_t), sizeof(length), &length);
1385 if (error) {
1386 return (error);
1387 }
1388
1389 u_int32_t total_len = m_length2(packet, NULL);
1390 if (total_len < (tlv_offset + sizeof(u_int8_t) + sizeof(length) + length)) {
1391 NECPLOG(LOG_ERR, "Got a bad TLV, length (%u) + offset (%d) < total length (%u)",
1392 length, (tlv_offset + sizeof(u_int8_t) + sizeof(length)), total_len);
1393 return (EINVAL);
1394 }
1395
1396 if (value_size != NULL) {
1397 *value_size = length;
1398 }
1399
1400 if (buff != NULL && buff_len > 0) {
1401 u_int32_t to_copy = (length < buff_len) ? length : buff_len;
1402 error = mbuf_copydata(packet, tlv_offset + sizeof(u_int8_t) + sizeof(length), to_copy, buff);
1403 if (error) {
1404 return (error);
1405 }
1406 }
1407
1408 return (0);
1409 }
1410
1411 static u_int8_t *
1412 necp_buffer_write_packet_header(u_int8_t *buffer, u_int8_t packet_type, u_int8_t flags, u_int32_t message_id)
1413 {
1414 ((struct necp_packet_header *)(void *)buffer)->packet_type = packet_type;
1415 ((struct necp_packet_header *)(void *)buffer)->flags = flags;
1416 ((struct necp_packet_header *)(void *)buffer)->message_id = message_id;
1417 return (buffer + sizeof(struct necp_packet_header));
1418 }
1419
1420 static inline bool
1421 necp_buffer_write_tlv_validate(u_int8_t *cursor, u_int8_t type, u_int32_t length,
1422 u_int8_t *buffer, u_int32_t buffer_length)
1423 {
1424 if (cursor < buffer || (uintptr_t)(cursor - buffer) > buffer_length) {
1425 NECPLOG0(LOG_ERR, "Cannot write TLV in buffer (invalid cursor)");
1426 return (false);
1427 }
1428 u_int8_t *next_tlv = (u_int8_t *)(cursor + sizeof(type) + sizeof(length) + length);
1429 if (next_tlv <= buffer || // make sure the next TLV start doesn't overflow
1430 (uintptr_t)(next_tlv - buffer) > buffer_length) { // make sure the next TLV has enough room in buffer
1431 NECPLOG(LOG_ERR, "Cannot write TLV in buffer (TLV length %u, buffer length %u)",
1432 length, buffer_length);
1433 return (false);
1434 }
1435 return (true);
1436 }
1437
1438 u_int8_t *
1439 necp_buffer_write_tlv_if_different(u_int8_t *cursor, u_int8_t type,
1440 u_int32_t length, const void *value, bool *updated,
1441 u_int8_t *buffer, u_int32_t buffer_length)
1442 {
1443 if (!necp_buffer_write_tlv_validate(cursor, type, length, buffer, buffer_length)) {
1444 return (NULL);
1445 }
1446 u_int8_t *next_tlv = (u_int8_t *)(cursor + sizeof(type) + sizeof(length) + length);
1447 if (*updated || *(u_int8_t *)(cursor) != type) {
1448 *(u_int8_t *)(cursor) = type;
1449 *updated = TRUE;
1450 }
1451 if (*updated || *(u_int32_t *)(void *)(cursor + sizeof(type)) != length) {
1452 *(u_int32_t *)(void *)(cursor + sizeof(type)) = length;
1453 *updated = TRUE;
1454 }
1455 if (length > 0) {
1456 if (*updated || memcmp((u_int8_t *)(cursor + sizeof(type) + sizeof(length)), value, length) != 0) {
1457 memcpy((u_int8_t *)(cursor + sizeof(type) + sizeof(length)), value, length);
1458 *updated = TRUE;
1459 }
1460 }
1461 return (next_tlv);
1462 }
1463
1464 u_int8_t *
1465 necp_buffer_write_tlv(u_int8_t *cursor, u_int8_t type,
1466 u_int32_t length, const void *value,
1467 u_int8_t *buffer, u_int32_t buffer_length)
1468 {
1469 if (!necp_buffer_write_tlv_validate(cursor, type, length, buffer, buffer_length)) {
1470 return (NULL);
1471 }
1472 u_int8_t *next_tlv = (u_int8_t *)(cursor + sizeof(type) + sizeof(length) + length);
1473 *(u_int8_t *)(cursor) = type;
1474 *(u_int32_t *)(void *)(cursor + sizeof(type)) = length;
1475 if (length > 0) {
1476 memcpy((u_int8_t *)(cursor + sizeof(type) + sizeof(length)), value, length);
1477 }
1478
1479 return (next_tlv);
1480 }
1481
1482 u_int8_t
1483 necp_buffer_get_tlv_type(u_int8_t *buffer, int tlv_offset)
1484 {
1485 u_int8_t *type = NULL;
1486
1487 if (buffer == NULL) {
1488 return (0);
1489 }
1490
1491 type = (u_int8_t *)((u_int8_t *)buffer + tlv_offset);
1492 return (type ? *type : 0);
1493 }
1494
1495 u_int32_t
1496 necp_buffer_get_tlv_length(u_int8_t *buffer, int tlv_offset)
1497 {
1498 u_int32_t *length = NULL;
1499
1500 if (buffer == NULL) {
1501 return (0);
1502 }
1503
1504 length = (u_int32_t *)(void *)((u_int8_t *)buffer + tlv_offset + sizeof(u_int8_t));
1505 return (length ? *length : 0);
1506 }
1507
1508 u_int8_t *
1509 necp_buffer_get_tlv_value(u_int8_t *buffer, int tlv_offset, u_int32_t *value_size)
1510 {
1511 u_int8_t *value = NULL;
1512 u_int32_t length = necp_buffer_get_tlv_length(buffer, tlv_offset);
1513 if (length == 0) {
1514 return (value);
1515 }
1516
1517 if (value_size) {
1518 *value_size = length;
1519 }
1520
1521 value = (u_int8_t *)((u_int8_t *)buffer + tlv_offset + sizeof(u_int8_t) + sizeof(u_int32_t));
1522 return (value);
1523 }
1524
1525 int
1526 necp_buffer_find_tlv(u_int8_t *buffer, u_int32_t buffer_length, int offset, u_int8_t type, int next)
1527 {
1528 if (offset < 0) {
1529 return (-1);
1530 }
1531 int cursor = offset;
1532 int next_cursor;
1533 u_int32_t curr_length;
1534 u_int8_t curr_type;
1535
1536 while (TRUE) {
1537 if ((((u_int32_t)cursor) + sizeof(curr_type) + sizeof(curr_length)) > buffer_length) {
1538 return (-1);
1539 }
1540 if (!next) {
1541 curr_type = necp_buffer_get_tlv_type(buffer, cursor);
1542 } else {
1543 next = 0;
1544 curr_type = NECP_TLV_NIL;
1545 }
1546 curr_length = necp_buffer_get_tlv_length(buffer, cursor);
1547 if (curr_length > buffer_length - ((u_int32_t)cursor + sizeof(curr_type) + sizeof(curr_length))) {
1548 return (-1);
1549 }
1550
1551 next_cursor = (cursor + sizeof(curr_type) + sizeof(curr_length) + curr_length);
1552 if (curr_type == type) {
1553 // check if entire TLV fits inside buffer
1554 if (((u_int32_t)next_cursor) <= buffer_length) {
1555 return (cursor);
1556 } else {
1557 return (-1);
1558 }
1559 }
1560 cursor = next_cursor;
1561 }
1562 }
1563
1564 static int
1565 necp_find_tlv(mbuf_t packet, u_int8_t *buffer, u_int32_t buffer_length, int offset, u_int8_t type, int *err, int next)
1566 {
1567 int cursor = -1;
1568 if (packet != NULL) {
1569 cursor = necp_packet_find_tlv(packet, offset, type, err, next);
1570 } else if (buffer != NULL) {
1571 cursor = necp_buffer_find_tlv(buffer, buffer_length, offset, type, next);
1572 }
1573 return (cursor);
1574 }
1575
1576 static int
1577 necp_get_tlv_at_offset(mbuf_t packet, u_int8_t *buffer, u_int32_t buffer_length,
1578 int tlv_offset, u_int32_t out_buffer_length, void *out_buffer, u_int32_t *value_size)
1579 {
1580 if (packet != NULL) {
1581 // Handle mbuf parsing
1582 return necp_packet_get_tlv_at_offset(packet, tlv_offset, out_buffer_length, out_buffer, value_size);
1583 }
1584
1585 if (buffer == NULL) {
1586 NECPLOG0(LOG_ERR, "necp_get_tlv_at_offset buffer is NULL");
1587 return (EINVAL);
1588 }
1589
1590 // Handle buffer parsing
1591
1592 // Validate that buffer has enough room for any TLV
1593 if (tlv_offset + sizeof(u_int8_t) + sizeof(u_int32_t) > buffer_length) {
1594 NECPLOG(LOG_ERR, "necp_get_tlv_at_offset buffer_length is too small for TLV (%u < %u)",
1595 buffer_length, tlv_offset + sizeof(u_int8_t) + sizeof(u_int32_t));
1596 return (EINVAL);
1597 }
1598
1599 // Validate that buffer has enough room for this TLV
1600 u_int32_t tlv_length = necp_buffer_get_tlv_length(buffer, tlv_offset);
1601 if (tlv_length > buffer_length - (tlv_offset + sizeof(u_int8_t) + sizeof(u_int32_t))) {
1602 NECPLOG(LOG_ERR, "necp_get_tlv_at_offset buffer_length is too small for TLV of length %u (%u < %u)",
1603 tlv_length, buffer_length, tlv_offset + sizeof(u_int8_t) + sizeof(u_int32_t) + tlv_length);
1604 return (EINVAL);
1605 }
1606
1607 if (out_buffer != NULL && out_buffer_length > 0) {
1608 // Validate that out buffer is large enough for value
1609 if (out_buffer_length < tlv_length) {
1610 NECPLOG(LOG_ERR, "necp_get_tlv_at_offset out_buffer_length is too small for TLV value (%u < %u)",
1611 out_buffer_length, tlv_length);
1612 return (EINVAL);
1613 }
1614
1615 // Get value pointer
1616 u_int8_t *tlv_value = necp_buffer_get_tlv_value(buffer, tlv_offset, NULL);
1617 if (tlv_value == NULL) {
1618 NECPLOG0(LOG_ERR, "necp_get_tlv_at_offset tlv_value is NULL");
1619 return (ENOENT);
1620 }
1621
1622 // Copy value
1623 memcpy(out_buffer, tlv_value, tlv_length);
1624 }
1625
1626 // Copy out length
1627 if (value_size != NULL) {
1628 *value_size = tlv_length;
1629 }
1630
1631 return (0);
1632 }
1633
1634 static int
1635 necp_get_tlv(mbuf_t packet, u_int8_t *buffer, u_int32_t buffer_length,
1636 int offset, u_int8_t type, u_int32_t buff_len, void *buff, u_int32_t *value_size)
1637 {
1638 int error = 0;
1639
1640 int tlv_offset = necp_find_tlv(packet, buffer, buffer_length, offset, type, &error, 0);
1641 if (tlv_offset < 0) {
1642 return (error);
1643 }
1644
1645 return (necp_get_tlv_at_offset(packet, buffer, buffer_length, tlv_offset, buff_len, buff, value_size));
1646 }
1647
1648 static bool
1649 necp_send_ctl_data(struct necp_session *session, u_int8_t *buffer, size_t buffer_size)
1650 {
1651 int error;
1652
1653 if (necp_kctlref == NULL || session == NULL || buffer == NULL || buffer_size == 0) {
1654 return (FALSE);
1655 }
1656
1657 error = ctl_enqueuedata(necp_kctlref, session->control_unit, buffer, buffer_size, CTL_DATA_EOR);
1658
1659 return (error == 0);
1660 }
1661
1662 static bool
1663 necp_send_success_response(struct necp_session *session, u_int8_t packet_type, u_int32_t message_id)
1664 {
1665 bool success = TRUE;
1666 u_int8_t *response = NULL;
1667 u_int8_t *cursor = NULL;
1668 size_t response_size = sizeof(struct necp_packet_header) + sizeof(u_int8_t) + sizeof(u_int32_t);
1669 MALLOC(response, u_int8_t *, response_size, M_NECP, M_WAITOK);
1670 if (response == NULL) {
1671 return (FALSE);
1672 }
1673 cursor = response;
1674 cursor = necp_buffer_write_packet_header(cursor, packet_type, NECP_PACKET_FLAGS_RESPONSE, message_id);
1675 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_NIL, 0, NULL, response, response_size);
1676
1677 if (!(success = necp_send_ctl_data(session, (u_int8_t *)response, response_size))) {
1678 NECPLOG0(LOG_ERR, "Failed to send response");
1679 }
1680
1681 FREE(response, M_NECP);
1682 return (success);
1683 }
1684
1685 static bool
1686 necp_send_error_response(struct necp_session *session, u_int8_t packet_type, u_int32_t message_id, u_int32_t error)
1687 {
1688 bool success = TRUE;
1689 u_int8_t *response = NULL;
1690 u_int8_t *cursor = NULL;
1691 size_t response_size = sizeof(struct necp_packet_header) + sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(u_int32_t);
1692 MALLOC(response, u_int8_t *, response_size, M_NECP, M_WAITOK);
1693 if (response == NULL) {
1694 return (FALSE);
1695 }
1696 cursor = response;
1697 cursor = necp_buffer_write_packet_header(cursor, packet_type, NECP_PACKET_FLAGS_RESPONSE, message_id);
1698 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_ERROR, sizeof(error), &error, response, response_size);
1699
1700 if (!(success = necp_send_ctl_data(session, (u_int8_t *)response, response_size))) {
1701 NECPLOG0(LOG_ERR, "Failed to send response");
1702 }
1703
1704 FREE(response, M_NECP);
1705 return (success);
1706 }
1707
1708 static bool
1709 necp_send_policy_id_response(struct necp_session *session, u_int8_t packet_type, u_int32_t message_id, necp_policy_id policy_id)
1710 {
1711 bool success = TRUE;
1712 u_int8_t *response = NULL;
1713 u_int8_t *cursor = NULL;
1714 size_t response_size = sizeof(struct necp_packet_header) + sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(u_int32_t);
1715 MALLOC(response, u_int8_t *, response_size, M_NECP, M_WAITOK);
1716 if (response == NULL) {
1717 return (FALSE);
1718 }
1719 cursor = response;
1720 cursor = necp_buffer_write_packet_header(cursor, packet_type, NECP_PACKET_FLAGS_RESPONSE, message_id);
1721 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_ID, sizeof(policy_id), &policy_id, response, response_size);
1722
1723 if (!(success = necp_send_ctl_data(session, (u_int8_t *)response, response_size))) {
1724 NECPLOG0(LOG_ERR, "Failed to send response");
1725 }
1726
1727 FREE(response, M_NECP);
1728 return (success);
1729 }
1730
1731 static errno_t
1732 necp_ctl_send(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, mbuf_t packet, int flags)
1733 {
1734 #pragma unused(kctlref, unit, flags)
1735 struct necp_session *session = (struct necp_session *)unitinfo;
1736 struct necp_packet_header header;
1737 int error = 0;
1738
1739 if (session == NULL) {
1740 NECPLOG0(LOG_ERR, "Got a NULL session");
1741 error = EINVAL;
1742 goto done;
1743 }
1744
1745 if (mbuf_pkthdr_len(packet) < sizeof(header)) {
1746 NECPLOG(LOG_ERR, "Got a bad packet, length (%lu) < sizeof header (%lu)", mbuf_pkthdr_len(packet), sizeof(header));
1747 error = EINVAL;
1748 goto done;
1749 }
1750
1751 error = mbuf_copydata(packet, 0, sizeof(header), &header);
1752 if (error) {
1753 NECPLOG(LOG_ERR, "mbuf_copydata failed for the header: %d", error);
1754 error = ENOBUFS;
1755 goto done;
1756 }
1757
1758 if (session->proc_locked) {
1759 // Verify that the calling process is allowed to send messages
1760 uuid_t proc_uuid;
1761 proc_getexecutableuuid(current_proc(), proc_uuid, sizeof(proc_uuid));
1762 if (uuid_compare(proc_uuid, session->proc_uuid) != 0) {
1763 necp_send_error_response(session, header.packet_type, header.message_id, NECP_ERROR_INVALID_PROCESS);
1764 goto done;
1765 }
1766 } else {
1767 // If not locked, update the proc_uuid and proc_pid of the session
1768 proc_getexecutableuuid(current_proc(), session->proc_uuid, sizeof(session->proc_uuid));
1769 session->proc_pid = proc_pid(current_proc());
1770 }
1771
1772 switch (header.packet_type) {
1773 case NECP_PACKET_TYPE_POLICY_ADD: {
1774 necp_handle_policy_add(session, header.message_id, packet, NULL, 0, sizeof(header), NULL);
1775 break;
1776 }
1777 case NECP_PACKET_TYPE_POLICY_GET: {
1778 necp_handle_policy_get(session, header.message_id, packet, sizeof(header));
1779 break;
1780 }
1781 case NECP_PACKET_TYPE_POLICY_DELETE: {
1782 necp_handle_policy_delete(session, header.message_id, packet, sizeof(header));
1783 break;
1784 }
1785 case NECP_PACKET_TYPE_POLICY_APPLY_ALL: {
1786 necp_handle_policy_apply_all(session, header.message_id, packet, sizeof(header));
1787 break;
1788 }
1789 case NECP_PACKET_TYPE_POLICY_LIST_ALL: {
1790 necp_handle_policy_list_all(session, header.message_id, packet, sizeof(header));
1791 break;
1792 }
1793 case NECP_PACKET_TYPE_POLICY_DELETE_ALL: {
1794 necp_handle_policy_delete_all(session, header.message_id, packet, sizeof(header));
1795 break;
1796 }
1797 case NECP_PACKET_TYPE_POLICY_DUMP_ALL: {
1798 necp_handle_policy_dump_all(session, header.message_id, packet, 0, 0, sizeof(header));
1799 break;
1800 }
1801 case NECP_PACKET_TYPE_SET_SESSION_PRIORITY: {
1802 necp_handle_set_session_priority(session, header.message_id, packet, sizeof(header));
1803 break;
1804 }
1805 case NECP_PACKET_TYPE_LOCK_SESSION_TO_PROC: {
1806 necp_handle_lock_session_to_proc(session, header.message_id, packet, sizeof(header));
1807 break;
1808 }
1809 case NECP_PACKET_TYPE_REGISTER_SERVICE: {
1810 necp_handle_register_service(session, header.message_id, packet, sizeof(header));
1811 break;
1812 }
1813 case NECP_PACKET_TYPE_UNREGISTER_SERVICE: {
1814 necp_handle_unregister_service(session, header.message_id, packet, sizeof(header));
1815 break;
1816 }
1817 default: {
1818 NECPLOG(LOG_ERR, "Received unknown message type %d", header.packet_type);
1819 necp_send_error_response(session, header.packet_type, header.message_id, NECP_ERROR_UNKNOWN_PACKET_TYPE);
1820 break;
1821 }
1822 }
1823
1824 done:
1825 mbuf_freem(packet);
1826 return (error);
1827 }
1828
1829 static void
1830 necp_ctl_rcvd(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, int flags)
1831 {
1832 #pragma unused(kctlref, unit, unitinfo, flags)
1833 return;
1834 }
1835
1836 static errno_t
1837 necp_ctl_getopt(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, int opt, void *data, size_t *len)
1838 {
1839 #pragma unused(kctlref, unit, unitinfo, opt, data, len)
1840 return (0);
1841 }
1842
1843 static errno_t
1844 necp_ctl_setopt(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, int opt, void *data, size_t len)
1845 {
1846 #pragma unused(kctlref, unit, unitinfo, opt, data, len)
1847 return (0);
1848 }
1849
1850 // Session Management
1851
1852 static struct necp_session *
1853 necp_create_session(void)
1854 {
1855 struct necp_session *new_session = NULL;
1856
1857 MALLOC(new_session, struct necp_session *, sizeof(*new_session), M_NECP, M_WAITOK | M_ZERO);
1858 if (new_session == NULL) {
1859 goto done;
1860 }
1861
1862 new_session->necp_fd_type = necp_fd_type_session;
1863 new_session->session_priority = NECP_SESSION_PRIORITY_UNKNOWN;
1864 new_session->dirty = FALSE;
1865 LIST_INIT(&new_session->policies);
1866 lck_mtx_init(&new_session->lock, necp_kernel_policy_mtx_grp, necp_kernel_policy_mtx_attr);
1867
1868 // Take the lock
1869 lck_rw_lock_exclusive(&necp_kernel_policy_lock);
1870
1871 // Find the next available control unit
1872 u_int32_t control_unit = 1;
1873 struct necp_session *next_session = NULL;
1874 TAILQ_FOREACH(next_session, &necp_session_list, chain) {
1875 if (next_session->control_unit > control_unit) {
1876 // Found a gap, grab this control unit
1877 break;
1878 }
1879
1880 // Try the next control unit, loop around
1881 control_unit = next_session->control_unit + 1;
1882 }
1883
1884 new_session->control_unit = control_unit;
1885 new_session->session_order = necp_allocate_new_session_order(new_session->session_priority, control_unit);
1886
1887 if (next_session != NULL) {
1888 TAILQ_INSERT_BEFORE(next_session, new_session, chain);
1889 } else {
1890 TAILQ_INSERT_TAIL(&necp_session_list, new_session, chain);
1891 }
1892
1893 necp_session_count++;
1894 lck_rw_done(&necp_kernel_policy_lock);
1895
1896 if (necp_debug) {
1897 NECPLOG(LOG_DEBUG, "Created NECP session, control unit %d", control_unit);
1898 }
1899
1900 done:
1901 return (new_session);
1902 }
1903
1904 static void
1905 necp_delete_session(struct necp_session *session)
1906 {
1907 if (session != NULL) {
1908 struct necp_service_registration *service = NULL;
1909 struct necp_service_registration *temp_service = NULL;
1910 LIST_FOREACH_SAFE(service, &session->services, session_chain, temp_service) {
1911 LIST_REMOVE(service, session_chain);
1912 lck_rw_lock_exclusive(&necp_kernel_policy_lock);
1913 LIST_REMOVE(service, kernel_chain);
1914 lck_rw_done(&necp_kernel_policy_lock);
1915 FREE(service, M_NECP);
1916 }
1917 if (necp_debug) {
1918 NECPLOG0(LOG_DEBUG, "Deleted NECP session");
1919 }
1920
1921 lck_rw_lock_exclusive(&necp_kernel_policy_lock);
1922 TAILQ_REMOVE(&necp_session_list, session, chain);
1923 necp_session_count--;
1924 lck_rw_done(&necp_kernel_policy_lock);
1925
1926 lck_mtx_destroy(&session->lock, necp_kernel_policy_mtx_grp);
1927 FREE(session, M_NECP);
1928 }
1929 }
1930
1931 // Session Policy Management
1932
1933 static inline u_int8_t
1934 necp_policy_result_get_type_from_buffer(u_int8_t *buffer, u_int32_t length)
1935 {
1936 return ((buffer && length >= sizeof(u_int8_t)) ? buffer[0] : 0);
1937 }
1938
1939 static inline u_int32_t
1940 necp_policy_result_get_parameter_length_from_buffer(u_int8_t *buffer, u_int32_t length)
1941 {
1942 return ((buffer && length > sizeof(u_int8_t)) ? (length - sizeof(u_int8_t)) : 0);
1943 }
1944
1945 static inline u_int8_t *
1946 necp_policy_result_get_parameter_pointer_from_buffer(u_int8_t *buffer, u_int32_t length)
1947 {
1948 return ((buffer && length > sizeof(u_int8_t)) ? (buffer + sizeof(u_int8_t)) : NULL);
1949 }
1950
1951 static bool
1952 necp_policy_result_requires_route_rules(u_int8_t *buffer, u_int32_t length)
1953 {
1954 u_int8_t type = necp_policy_result_get_type_from_buffer(buffer, length);
1955 if (type == NECP_POLICY_RESULT_ROUTE_RULES) {
1956 return (TRUE);
1957 }
1958 return (FALSE);
1959 }
1960
1961 static inline bool
1962 necp_address_is_valid(struct sockaddr *address)
1963 {
1964 if (address->sa_family == AF_INET) {
1965 return (address->sa_len == sizeof(struct sockaddr_in));
1966 } else if (address->sa_family == AF_INET6) {
1967 return (address->sa_len == sizeof(struct sockaddr_in6));
1968 } else {
1969 return (FALSE);
1970 }
1971 }
1972
1973 static bool
1974 necp_policy_result_is_valid(u_int8_t *buffer, u_int32_t length)
1975 {
1976 bool validated = FALSE;
1977 u_int8_t type = necp_policy_result_get_type_from_buffer(buffer, length);
1978 u_int32_t parameter_length = necp_policy_result_get_parameter_length_from_buffer(buffer, length);
1979 switch (type) {
1980 case NECP_POLICY_RESULT_PASS: {
1981 validated = TRUE;
1982 break;
1983 }
1984 case NECP_POLICY_RESULT_SKIP: {
1985 if (parameter_length >= sizeof(u_int32_t)) {
1986 validated = TRUE;
1987 }
1988 break;
1989 }
1990 case NECP_POLICY_RESULT_DROP: {
1991 validated = TRUE;
1992 break;
1993 }
1994 case NECP_POLICY_RESULT_SOCKET_DIVERT: {
1995 if (parameter_length >= sizeof(u_int32_t)) {
1996 validated = TRUE;
1997 }
1998 break;
1999 }
2000 case NECP_POLICY_RESULT_SOCKET_SCOPED: {
2001 if (parameter_length > 0) {
2002 validated = TRUE;
2003 }
2004 break;
2005 }
2006 case NECP_POLICY_RESULT_IP_TUNNEL: {
2007 if (parameter_length > sizeof(u_int32_t)) {
2008 validated = TRUE;
2009 }
2010 break;
2011 }
2012 case NECP_POLICY_RESULT_SOCKET_FILTER: {
2013 if (parameter_length >= sizeof(u_int32_t)) {
2014 validated = TRUE;
2015 }
2016 break;
2017 }
2018 case NECP_POLICY_RESULT_ROUTE_RULES: {
2019 validated = TRUE;
2020 break;
2021 }
2022 case NECP_POLICY_RESULT_TRIGGER:
2023 case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED:
2024 case NECP_POLICY_RESULT_TRIGGER_SCOPED:
2025 case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED:
2026 case NECP_POLICY_RESULT_USE_NETAGENT: {
2027 if (parameter_length >= sizeof(uuid_t)) {
2028 validated = TRUE;
2029 }
2030 break;
2031 }
2032 default: {
2033 validated = FALSE;
2034 break;
2035 }
2036 }
2037
2038 if (necp_debug) {
2039 NECPLOG(LOG_DEBUG, "Policy result type %d, valid %d", type, validated);
2040 }
2041
2042 return (validated);
2043 }
2044
2045 static inline u_int8_t
2046 necp_policy_condition_get_type_from_buffer(u_int8_t *buffer, u_int32_t length)
2047 {
2048 return ((buffer && length >= sizeof(u_int8_t)) ? buffer[0] : 0);
2049 }
2050
2051 static inline u_int8_t
2052 necp_policy_condition_get_flags_from_buffer(u_int8_t *buffer, u_int32_t length)
2053 {
2054 return ((buffer && length >= (2 * sizeof(u_int8_t))) ? buffer[1] : 0);
2055 }
2056
2057 static inline u_int32_t
2058 necp_policy_condition_get_value_length_from_buffer(u_int8_t *buffer, u_int32_t length)
2059 {
2060 return ((buffer && length >= (2 * sizeof(u_int8_t))) ? (length - (2 * sizeof(u_int8_t))) : 0);
2061 }
2062
2063 static inline u_int8_t *
2064 necp_policy_condition_get_value_pointer_from_buffer(u_int8_t *buffer, u_int32_t length)
2065 {
2066 return ((buffer && length > (2 * sizeof(u_int8_t))) ? (buffer + (2 * sizeof(u_int8_t))) : NULL);
2067 }
2068
2069 static inline bool
2070 necp_policy_condition_is_default(u_int8_t *buffer, u_int32_t length)
2071 {
2072 return (necp_policy_condition_get_type_from_buffer(buffer, length) == NECP_POLICY_CONDITION_DEFAULT);
2073 }
2074
2075 static inline bool
2076 necp_policy_condition_is_application(u_int8_t *buffer, u_int32_t length)
2077 {
2078 return (necp_policy_condition_get_type_from_buffer(buffer, length) == NECP_POLICY_CONDITION_APPLICATION);
2079 }
2080
2081 static inline bool
2082 necp_policy_condition_is_real_application(u_int8_t *buffer, u_int32_t length)
2083 {
2084 return (necp_policy_condition_get_type_from_buffer(buffer, length) == NECP_POLICY_CONDITION_REAL_APPLICATION);
2085 }
2086
2087 static inline bool
2088 necp_policy_condition_requires_application(u_int8_t *buffer, u_int32_t length)
2089 {
2090 u_int8_t type = necp_policy_condition_get_type_from_buffer(buffer, length);
2091 return (type == NECP_POLICY_CONDITION_REAL_APPLICATION);
2092 }
2093
2094 static inline bool
2095 necp_policy_condition_requires_real_application(u_int8_t *buffer, u_int32_t length)
2096 {
2097 u_int8_t type = necp_policy_condition_get_type_from_buffer(buffer, length);
2098 return (type == NECP_POLICY_CONDITION_ENTITLEMENT);
2099 }
2100
2101 static bool
2102 necp_policy_condition_is_valid(u_int8_t *buffer, u_int32_t length, u_int8_t policy_result_type)
2103 {
2104 bool validated = FALSE;
2105 bool result_cannot_have_ip_layer = (policy_result_type == NECP_POLICY_RESULT_SOCKET_DIVERT ||
2106 policy_result_type == NECP_POLICY_RESULT_SOCKET_FILTER ||
2107 policy_result_type == NECP_POLICY_RESULT_TRIGGER ||
2108 policy_result_type == NECP_POLICY_RESULT_TRIGGER_IF_NEEDED ||
2109 policy_result_type == NECP_POLICY_RESULT_TRIGGER_SCOPED ||
2110 policy_result_type == NECP_POLICY_RESULT_NO_TRIGGER_SCOPED ||
2111 policy_result_type == NECP_POLICY_RESULT_SOCKET_SCOPED ||
2112 policy_result_type == NECP_POLICY_RESULT_ROUTE_RULES ||
2113 policy_result_type == NECP_POLICY_RESULT_USE_NETAGENT) ? TRUE : FALSE;
2114 u_int32_t condition_length = necp_policy_condition_get_value_length_from_buffer(buffer, length);
2115 u_int8_t *condition_value = necp_policy_condition_get_value_pointer_from_buffer(buffer, length);
2116 u_int8_t type = necp_policy_condition_get_type_from_buffer(buffer, length);
2117 u_int8_t flags = necp_policy_condition_get_flags_from_buffer(buffer, length);
2118 switch (type) {
2119 case NECP_POLICY_CONDITION_APPLICATION:
2120 case NECP_POLICY_CONDITION_REAL_APPLICATION: {
2121 if (!(flags & NECP_POLICY_CONDITION_FLAGS_NEGATIVE) &&
2122 condition_length >= sizeof(uuid_t) &&
2123 condition_value != NULL &&
2124 !uuid_is_null(condition_value)) {
2125 validated = TRUE;
2126 }
2127 break;
2128 }
2129 case NECP_POLICY_CONDITION_DOMAIN:
2130 case NECP_POLICY_CONDITION_ACCOUNT:
2131 case NECP_POLICY_CONDITION_BOUND_INTERFACE: {
2132 if (condition_length > 0) {
2133 validated = TRUE;
2134 }
2135 break;
2136 }
2137 case NECP_POLICY_CONDITION_TRAFFIC_CLASS: {
2138 if (condition_length >= sizeof(struct necp_policy_condition_tc_range)) {
2139 validated = TRUE;
2140 }
2141 break;
2142 }
2143 case NECP_POLICY_CONDITION_DEFAULT:
2144 case NECP_POLICY_CONDITION_ALL_INTERFACES:
2145 case NECP_POLICY_CONDITION_ENTITLEMENT: {
2146 if (!(flags & NECP_POLICY_CONDITION_FLAGS_NEGATIVE)) {
2147 validated = TRUE;
2148 }
2149 break;
2150 }
2151 case NECP_POLICY_CONDITION_IP_PROTOCOL: {
2152 if (condition_length >= sizeof(u_int16_t)) {
2153 validated = TRUE;
2154 }
2155 break;
2156 }
2157 case NECP_POLICY_CONDITION_PID: {
2158 if (condition_length >= sizeof(pid_t) &&
2159 condition_value != NULL &&
2160 *((pid_t *)(void *)condition_value) != 0) {
2161 validated = TRUE;
2162 }
2163 break;
2164 }
2165 case NECP_POLICY_CONDITION_UID: {
2166 if (condition_length >= sizeof(uid_t)) {
2167 validated = TRUE;
2168 }
2169 break;
2170 }
2171 case NECP_POLICY_CONDITION_LOCAL_ADDR:
2172 case NECP_POLICY_CONDITION_REMOTE_ADDR: {
2173 if (!result_cannot_have_ip_layer && condition_length >= sizeof(struct necp_policy_condition_addr) &&
2174 necp_address_is_valid(&((struct necp_policy_condition_addr *)(void *)condition_value)->address.sa)) {
2175 validated = TRUE;
2176 }
2177 break;
2178 }
2179 case NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE:
2180 case NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE: {
2181 if (!result_cannot_have_ip_layer && condition_length >= sizeof(struct necp_policy_condition_addr_range) &&
2182 necp_address_is_valid(&((struct necp_policy_condition_addr_range *)(void *)condition_value)->start_address.sa) &&
2183 necp_address_is_valid(&((struct necp_policy_condition_addr_range *)(void *)condition_value)->end_address.sa)) {
2184 validated = TRUE;
2185 }
2186 break;
2187 }
2188 default: {
2189 validated = FALSE;
2190 break;
2191 }
2192 }
2193
2194 if (necp_debug) {
2195 NECPLOG(LOG_DEBUG, "Policy condition type %d, valid %d", type, validated);
2196 }
2197
2198 return (validated);
2199 }
2200
2201 static bool
2202 necp_policy_route_rule_is_default(u_int8_t *buffer, u_int32_t length)
2203 {
2204 return (necp_policy_condition_get_value_length_from_buffer(buffer, length) == 0 &&
2205 necp_policy_condition_get_flags_from_buffer(buffer, length) == 0);
2206 }
2207
2208 static bool
2209 necp_policy_route_rule_is_valid(u_int8_t *buffer, u_int32_t length)
2210 {
2211 bool validated = FALSE;
2212 u_int8_t type = necp_policy_condition_get_type_from_buffer(buffer, length);
2213 switch (type) {
2214 case NECP_ROUTE_RULE_ALLOW_INTERFACE: {
2215 validated = TRUE;
2216 break;
2217 }
2218 case NECP_ROUTE_RULE_DENY_INTERFACE: {
2219 validated = TRUE;
2220 break;
2221 }
2222 case NECP_ROUTE_RULE_QOS_MARKING: {
2223 validated = TRUE;
2224 break;
2225 }
2226 default: {
2227 validated = FALSE;
2228 break;
2229 }
2230 }
2231
2232 if (necp_debug) {
2233 NECPLOG(LOG_DEBUG, "Policy route rule type %d, valid %d", type, validated);
2234 }
2235
2236 return (validated);
2237 }
2238
2239 static int
2240 necp_get_posix_error_for_necp_error(int response_error)
2241 {
2242 switch (response_error) {
2243 case NECP_ERROR_UNKNOWN_PACKET_TYPE:
2244 case NECP_ERROR_INVALID_TLV:
2245 case NECP_ERROR_POLICY_RESULT_INVALID:
2246 case NECP_ERROR_POLICY_CONDITIONS_INVALID:
2247 case NECP_ERROR_ROUTE_RULES_INVALID: {
2248 return (EINVAL);
2249 }
2250 case NECP_ERROR_POLICY_ID_NOT_FOUND: {
2251 return (ENOENT);
2252 }
2253 case NECP_ERROR_INVALID_PROCESS: {
2254 return (EPERM);
2255 }
2256 case NECP_ERROR_INTERNAL:
2257 default: {
2258 return (ENOMEM);
2259 }
2260 }
2261 }
2262
2263 static void
2264 necp_handle_set_session_priority(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
2265 {
2266 int error;
2267 struct necp_session_policy *policy = NULL;
2268 struct necp_session_policy *temp_policy = NULL;
2269 u_int32_t response_error = NECP_ERROR_INTERNAL;
2270 u_int32_t requested_session_priority = NECP_SESSION_PRIORITY_UNKNOWN;
2271
2272 // Read policy id
2273 error = necp_get_tlv(packet, NULL, 0, offset, NECP_TLV_SESSION_PRIORITY, sizeof(requested_session_priority), &requested_session_priority, NULL);
2274 if (error) {
2275 NECPLOG(LOG_ERR, "Failed to get session priority: %d", error);
2276 response_error = NECP_ERROR_INVALID_TLV;
2277 goto fail;
2278 }
2279
2280 if (session == NULL) {
2281 NECPLOG0(LOG_ERR, "Failed to find session");
2282 response_error = NECP_ERROR_INTERNAL;
2283 goto fail;
2284 }
2285
2286 // Enforce special session priorities with entitlements
2287 if (requested_session_priority == NECP_SESSION_PRIORITY_CONTROL ||
2288 requested_session_priority == NECP_SESSION_PRIORITY_PRIVILEGED_TUNNEL) {
2289 errno_t cred_result = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES, 0);
2290 if (cred_result != 0) {
2291 NECPLOG(LOG_ERR, "Session does not hold necessary entitlement to claim priority level %d", requested_session_priority);
2292 goto fail;
2293 }
2294 }
2295
2296 if (session->session_priority != requested_session_priority) {
2297 session->session_priority = requested_session_priority;
2298 session->session_order = necp_allocate_new_session_order(session->session_priority, session->control_unit);
2299 session->dirty = TRUE;
2300
2301 // Mark all policies as needing updates
2302 LIST_FOREACH_SAFE(policy, &session->policies, chain, temp_policy) {
2303 policy->pending_update = TRUE;
2304 }
2305 }
2306
2307 necp_send_success_response(session, NECP_PACKET_TYPE_SET_SESSION_PRIORITY, message_id);
2308 return;
2309
2310 fail:
2311 necp_send_error_response(session, NECP_PACKET_TYPE_SET_SESSION_PRIORITY, message_id, response_error);
2312 }
2313
2314 static void
2315 necp_handle_lock_session_to_proc(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
2316 {
2317 #pragma unused(packet, offset)
2318 // proc_uuid already filled out
2319 session->proc_locked = TRUE;
2320 necp_send_success_response(session, NECP_PACKET_TYPE_LOCK_SESSION_TO_PROC, message_id);
2321 }
2322
2323 static void
2324 necp_handle_register_service(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
2325 {
2326 int error;
2327 struct necp_service_registration *new_service = NULL;
2328 u_int32_t response_error = NECP_ERROR_INTERNAL;
2329 uuid_t service_uuid;
2330 uuid_clear(service_uuid);
2331
2332 if (session == NULL) {
2333 NECPLOG0(LOG_ERR, "Failed to find session");
2334 response_error = NECP_ERROR_INTERNAL;
2335 goto fail;
2336 }
2337
2338 // Enforce entitlements
2339 errno_t cred_result = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES, 0);
2340 if (cred_result != 0) {
2341 NECPLOG0(LOG_ERR, "Session does not hold necessary entitlement to register service");
2342 goto fail;
2343 }
2344
2345 // Read service uuid
2346 error = necp_get_tlv(packet, NULL, 0, offset, NECP_TLV_SERVICE_UUID, sizeof(uuid_t), service_uuid, NULL);
2347 if (error) {
2348 NECPLOG(LOG_ERR, "Failed to get service UUID: %d", error);
2349 response_error = NECP_ERROR_INVALID_TLV;
2350 goto fail;
2351 }
2352
2353 MALLOC(new_service, struct necp_service_registration *, sizeof(*new_service), M_NECP, M_WAITOK);
2354 if (new_service == NULL) {
2355 NECPLOG0(LOG_ERR, "Failed to allocate service registration");
2356 response_error = NECP_ERROR_INTERNAL;
2357 goto fail;
2358 }
2359
2360 lck_rw_lock_exclusive(&necp_kernel_policy_lock);
2361 memset(new_service, 0, sizeof(*new_service));
2362 new_service->service_id = necp_create_uuid_service_id_mapping(service_uuid);
2363 LIST_INSERT_HEAD(&session->services, new_service, session_chain);
2364 LIST_INSERT_HEAD(&necp_registered_service_list, new_service, kernel_chain);
2365 lck_rw_done(&necp_kernel_policy_lock);
2366
2367 necp_send_success_response(session, NECP_PACKET_TYPE_REGISTER_SERVICE, message_id);
2368 return;
2369 fail:
2370 necp_send_error_response(session, NECP_PACKET_TYPE_REGISTER_SERVICE, message_id, response_error);
2371 }
2372
2373 static void
2374 necp_handle_unregister_service(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
2375 {
2376 int error;
2377 struct necp_service_registration *service = NULL;
2378 struct necp_service_registration *temp_service = NULL;
2379 u_int32_t response_error = NECP_ERROR_INTERNAL;
2380 struct necp_uuid_id_mapping *mapping = NULL;
2381 uuid_t service_uuid;
2382 uuid_clear(service_uuid);
2383
2384 if (session == NULL) {
2385 NECPLOG0(LOG_ERR, "Failed to find session");
2386 response_error = NECP_ERROR_INTERNAL;
2387 goto fail;
2388 }
2389
2390 // Read service uuid
2391 error = necp_get_tlv(packet, NULL, 0, offset, NECP_TLV_SERVICE_UUID, sizeof(uuid_t), service_uuid, NULL);
2392 if (error) {
2393 NECPLOG(LOG_ERR, "Failed to get service UUID: %d", error);
2394 response_error = NECP_ERROR_INVALID_TLV;
2395 goto fail;
2396 }
2397
2398 // Mark remove all matching services for this session
2399 lck_rw_lock_exclusive(&necp_kernel_policy_lock);
2400 mapping = necp_uuid_lookup_service_id_locked(service_uuid);
2401 if (mapping != NULL) {
2402 LIST_FOREACH_SAFE(service, &session->services, session_chain, temp_service) {
2403 if (service->service_id == mapping->id) {
2404 LIST_REMOVE(service, session_chain);
2405 LIST_REMOVE(service, kernel_chain);
2406 FREE(service, M_NECP);
2407 }
2408 }
2409 necp_remove_uuid_service_id_mapping(service_uuid);
2410 }
2411 lck_rw_done(&necp_kernel_policy_lock);
2412
2413 necp_send_success_response(session, NECP_PACKET_TYPE_UNREGISTER_SERVICE, message_id);
2414 return;
2415 fail:
2416 necp_send_error_response(session, NECP_PACKET_TYPE_UNREGISTER_SERVICE, message_id, response_error);
2417 }
2418
2419 static necp_policy_id
2420 necp_handle_policy_add(struct necp_session *session, u_int32_t message_id, mbuf_t packet,
2421 u_int8_t *tlv_buffer, size_t tlv_buffer_length, int offset, int *return_error)
2422 {
2423 bool has_default_condition = FALSE;
2424 bool has_non_default_condition = FALSE;
2425 bool has_application_condition = FALSE;
2426 bool has_real_application_condition = FALSE;
2427 bool requires_application_condition = FALSE;
2428 bool requires_real_application_condition = FALSE;
2429 u_int8_t *conditions_array = NULL;
2430 u_int32_t conditions_array_size = 0;
2431 int conditions_array_cursor;
2432
2433 bool has_default_route_rule = FALSE;
2434 u_int8_t *route_rules_array = NULL;
2435 u_int32_t route_rules_array_size = 0;
2436 int route_rules_array_cursor;
2437
2438 int cursor;
2439 int error = 0;
2440 u_int32_t response_error = NECP_ERROR_INTERNAL;
2441
2442 necp_policy_order order = 0;
2443 struct necp_session_policy *policy = NULL;
2444 u_int8_t *policy_result = NULL;
2445 u_int32_t policy_result_size = 0;
2446
2447 // Read policy order
2448 error = necp_get_tlv(packet, tlv_buffer, tlv_buffer_length, offset, NECP_TLV_POLICY_ORDER, sizeof(order), &order, NULL);
2449 if (error) {
2450 NECPLOG(LOG_ERR, "Failed to get policy order: %d", error);
2451 response_error = NECP_ERROR_INVALID_TLV;
2452 goto fail;
2453 }
2454
2455 // Read policy result
2456 cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, offset, NECP_TLV_POLICY_RESULT, &error, 0);
2457 error = necp_get_tlv_at_offset(packet, tlv_buffer, tlv_buffer_length, cursor, 0, NULL, &policy_result_size);
2458 if (error || policy_result_size == 0) {
2459 NECPLOG(LOG_ERR, "Failed to get policy result length: %d", error);
2460 response_error = NECP_ERROR_INVALID_TLV;
2461 goto fail;
2462 }
2463 if (policy_result_size > NECP_MAX_POLICY_RESULT_SIZE) {
2464 NECPLOG(LOG_ERR, "Policy result length too large: %u", policy_result_size);
2465 response_error = NECP_ERROR_INVALID_TLV;
2466 goto fail;
2467 }
2468 MALLOC(policy_result, u_int8_t *, policy_result_size, M_NECP, M_WAITOK);
2469 if (policy_result == NULL) {
2470 NECPLOG(LOG_ERR, "Failed to allocate a policy result buffer (size %d)", policy_result_size);
2471 response_error = NECP_ERROR_INTERNAL;
2472 goto fail;
2473 }
2474 error = necp_get_tlv_at_offset(packet, tlv_buffer, tlv_buffer_length, cursor, policy_result_size, policy_result, NULL);
2475 if (error) {
2476 NECPLOG(LOG_ERR, "Failed to get policy result: %d", error);
2477 response_error = NECP_ERROR_POLICY_RESULT_INVALID;
2478 goto fail;
2479 }
2480 if (!necp_policy_result_is_valid(policy_result, policy_result_size)) {
2481 NECPLOG0(LOG_ERR, "Failed to validate policy result");
2482 response_error = NECP_ERROR_POLICY_RESULT_INVALID;
2483 goto fail;
2484 }
2485
2486 if (necp_policy_result_requires_route_rules(policy_result, policy_result_size)) {
2487 // Read route rules conditions
2488 for (cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, offset, NECP_TLV_ROUTE_RULE, &error, 0);
2489 cursor >= 0;
2490 cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, cursor, NECP_TLV_ROUTE_RULE, &error, 1)) {
2491 u_int32_t route_rule_size = 0;
2492 necp_get_tlv_at_offset(packet, tlv_buffer, tlv_buffer_length, cursor, 0, NULL, &route_rule_size);
2493 if (route_rule_size > 0) {
2494 route_rules_array_size += (sizeof(u_int8_t) + sizeof(u_int32_t) + route_rule_size);
2495 }
2496 }
2497
2498 if (route_rules_array_size == 0) {
2499 NECPLOG0(LOG_ERR, "Failed to get policy route rules");
2500 response_error = NECP_ERROR_INVALID_TLV;
2501 goto fail;
2502 }
2503 if (route_rules_array_size > NECP_MAX_ROUTE_RULES_ARRAY_SIZE) {
2504 NECPLOG(LOG_ERR, "Route rules length too large: %u", route_rules_array_size);
2505 response_error = NECP_ERROR_INVALID_TLV;
2506 goto fail;
2507 }
2508 MALLOC(route_rules_array, u_int8_t *, route_rules_array_size, M_NECP, M_WAITOK);
2509 if (route_rules_array == NULL) {
2510 NECPLOG(LOG_ERR, "Failed to allocate a policy route rules array (size %d)", route_rules_array_size);
2511 response_error = NECP_ERROR_INTERNAL;
2512 goto fail;
2513 }
2514
2515 route_rules_array_cursor = 0;
2516 for (cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, offset, NECP_TLV_ROUTE_RULE, &error, 0);
2517 cursor >= 0;
2518 cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, cursor, NECP_TLV_ROUTE_RULE, &error, 1)) {
2519 u_int8_t route_rule_type = NECP_TLV_ROUTE_RULE;
2520 u_int32_t route_rule_size = 0;
2521 necp_get_tlv_at_offset(packet, tlv_buffer, tlv_buffer_length, cursor, 0, NULL, &route_rule_size);
2522 if (route_rule_size > 0 && route_rule_size <= (route_rules_array_size - route_rules_array_cursor)) {
2523 // Add type
2524 memcpy((route_rules_array + route_rules_array_cursor), &route_rule_type, sizeof(route_rule_type));
2525 route_rules_array_cursor += sizeof(route_rule_type);
2526
2527 // Add length
2528 memcpy((route_rules_array + route_rules_array_cursor), &route_rule_size, sizeof(route_rule_size));
2529 route_rules_array_cursor += sizeof(route_rule_size);
2530
2531 // Add value
2532 necp_get_tlv_at_offset(packet, tlv_buffer, tlv_buffer_length, cursor, route_rule_size, (route_rules_array + route_rules_array_cursor), NULL);
2533
2534 if (!necp_policy_route_rule_is_valid((route_rules_array + route_rules_array_cursor), route_rule_size)) {
2535 NECPLOG0(LOG_ERR, "Failed to validate policy route rule");
2536 response_error = NECP_ERROR_ROUTE_RULES_INVALID;
2537 goto fail;
2538 }
2539
2540 if (necp_policy_route_rule_is_default((route_rules_array + route_rules_array_cursor), route_rule_size)) {
2541 if (has_default_route_rule) {
2542 NECPLOG0(LOG_ERR, "Failed to validate route rule; contained multiple default route rules");
2543 response_error = NECP_ERROR_ROUTE_RULES_INVALID;
2544 goto fail;
2545 }
2546 has_default_route_rule = TRUE;
2547 }
2548
2549 route_rules_array_cursor += route_rule_size;
2550 }
2551 }
2552 }
2553
2554 // Read policy conditions
2555 for (cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, offset, NECP_TLV_POLICY_CONDITION, &error, 0);
2556 cursor >= 0;
2557 cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, cursor, NECP_TLV_POLICY_CONDITION, &error, 1)) {
2558 u_int32_t condition_size = 0;
2559 necp_get_tlv_at_offset(packet, tlv_buffer, tlv_buffer_length, cursor, 0, NULL, &condition_size);
2560
2561 if (condition_size > 0) {
2562 conditions_array_size += (sizeof(u_int8_t) + sizeof(u_int32_t) + condition_size);
2563 }
2564 }
2565
2566 if (conditions_array_size == 0) {
2567 NECPLOG0(LOG_ERR, "Failed to get policy conditions");
2568 response_error = NECP_ERROR_INVALID_TLV;
2569 goto fail;
2570 }
2571 if (conditions_array_size > NECP_MAX_CONDITIONS_ARRAY_SIZE) {
2572 NECPLOG(LOG_ERR, "Conditions length too large: %u", conditions_array_size);
2573 response_error = NECP_ERROR_INVALID_TLV;
2574 goto fail;
2575 }
2576 MALLOC(conditions_array, u_int8_t *, conditions_array_size, M_NECP, M_WAITOK);
2577 if (conditions_array == NULL) {
2578 NECPLOG(LOG_ERR, "Failed to allocate a policy conditions array (size %d)", conditions_array_size);
2579 response_error = NECP_ERROR_INTERNAL;
2580 goto fail;
2581 }
2582
2583 conditions_array_cursor = 0;
2584 for (cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, offset, NECP_TLV_POLICY_CONDITION, &error, 0);
2585 cursor >= 0;
2586 cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, cursor, NECP_TLV_POLICY_CONDITION, &error, 1)) {
2587 u_int8_t condition_type = NECP_TLV_POLICY_CONDITION;
2588 u_int32_t condition_size = 0;
2589 necp_get_tlv_at_offset(packet, tlv_buffer, tlv_buffer_length, cursor, 0, NULL, &condition_size);
2590 if (condition_size > 0 && condition_size <= (conditions_array_size - conditions_array_cursor)) {
2591 // Add type
2592 memcpy((conditions_array + conditions_array_cursor), &condition_type, sizeof(condition_type));
2593 conditions_array_cursor += sizeof(condition_type);
2594
2595 // Add length
2596 memcpy((conditions_array + conditions_array_cursor), &condition_size, sizeof(condition_size));
2597 conditions_array_cursor += sizeof(condition_size);
2598
2599 // Add value
2600 necp_get_tlv_at_offset(packet, tlv_buffer, tlv_buffer_length, cursor, condition_size, (conditions_array + conditions_array_cursor), NULL);
2601 if (!necp_policy_condition_is_valid((conditions_array + conditions_array_cursor), condition_size, necp_policy_result_get_type_from_buffer(policy_result, policy_result_size))) {
2602 NECPLOG0(LOG_ERR, "Failed to validate policy condition");
2603 response_error = NECP_ERROR_POLICY_CONDITIONS_INVALID;
2604 goto fail;
2605 }
2606
2607 if (necp_policy_condition_is_default((conditions_array + conditions_array_cursor), condition_size)) {
2608 has_default_condition = TRUE;
2609 } else {
2610 has_non_default_condition = TRUE;
2611 }
2612 if (has_default_condition && has_non_default_condition) {
2613 NECPLOG0(LOG_ERR, "Failed to validate conditions; contained default and non-default conditions");
2614 response_error = NECP_ERROR_POLICY_CONDITIONS_INVALID;
2615 goto fail;
2616 }
2617
2618 if (necp_policy_condition_is_application((conditions_array + conditions_array_cursor), condition_size)) {
2619 has_application_condition = TRUE;
2620 }
2621
2622 if (necp_policy_condition_is_real_application((conditions_array + conditions_array_cursor), condition_size)) {
2623 has_real_application_condition = TRUE;
2624 }
2625
2626 if (necp_policy_condition_requires_application((conditions_array + conditions_array_cursor), condition_size)) {
2627 requires_application_condition = TRUE;
2628 }
2629
2630 if (necp_policy_condition_requires_real_application((conditions_array + conditions_array_cursor), condition_size)) {
2631 requires_real_application_condition = TRUE;
2632 }
2633
2634 conditions_array_cursor += condition_size;
2635 }
2636 }
2637
2638 if (requires_application_condition && !has_application_condition) {
2639 NECPLOG0(LOG_ERR, "Failed to validate conditions; did not contain application condition");
2640 response_error = NECP_ERROR_POLICY_CONDITIONS_INVALID;
2641 goto fail;
2642 }
2643
2644 if (requires_real_application_condition && !has_real_application_condition) {
2645 NECPLOG0(LOG_ERR, "Failed to validate conditions; did not contain real application condition");
2646 response_error = NECP_ERROR_POLICY_CONDITIONS_INVALID;
2647 goto fail;
2648 }
2649
2650 if ((policy = necp_policy_create(session, order, conditions_array, conditions_array_size, route_rules_array, route_rules_array_size, policy_result, policy_result_size)) == NULL) {
2651 response_error = NECP_ERROR_INTERNAL;
2652 goto fail;
2653 }
2654
2655 if (packet != NULL) {
2656 necp_send_policy_id_response(session, NECP_PACKET_TYPE_POLICY_ADD, message_id, policy->id);
2657 }
2658 return (policy->id);
2659
2660 fail:
2661 if (policy_result != NULL) {
2662 FREE(policy_result, M_NECP);
2663 }
2664 if (conditions_array != NULL) {
2665 FREE(conditions_array, M_NECP);
2666 }
2667 if (route_rules_array != NULL) {
2668 FREE(route_rules_array, M_NECP);
2669 }
2670
2671 if (packet != NULL) {
2672 necp_send_error_response(session, NECP_PACKET_TYPE_POLICY_ADD, message_id, response_error);
2673 }
2674 if (return_error != NULL) {
2675 *return_error = necp_get_posix_error_for_necp_error(response_error);
2676 }
2677 return (0);
2678 }
2679
2680 static void
2681 necp_handle_policy_get(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
2682 {
2683 #pragma unused(offset)
2684 int error;
2685 u_int8_t *response = NULL;
2686 u_int8_t *cursor = NULL;
2687 u_int32_t response_error = NECP_ERROR_INTERNAL;
2688 necp_policy_id policy_id = 0;
2689 u_int32_t order_tlv_size = 0;
2690 u_int32_t result_tlv_size = 0;
2691 u_int32_t response_size = 0;
2692
2693 struct necp_session_policy *policy = NULL;
2694
2695 // Read policy id
2696 error = necp_get_tlv(packet, NULL, 0, offset, NECP_TLV_POLICY_ID, sizeof(policy_id), &policy_id, NULL);
2697 if (error) {
2698 NECPLOG(LOG_ERR, "Failed to get policy id: %d", error);
2699 response_error = NECP_ERROR_INVALID_TLV;
2700 goto fail;
2701 }
2702
2703 policy = necp_policy_find(session, policy_id);
2704 if (policy == NULL || policy->pending_deletion) {
2705 NECPLOG(LOG_ERR, "Failed to find policy with id %d", policy_id);
2706 response_error = NECP_ERROR_POLICY_ID_NOT_FOUND;
2707 goto fail;
2708 }
2709
2710 order_tlv_size = sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(necp_policy_order);
2711 result_tlv_size = (policy->result_size ? (sizeof(u_int8_t) + sizeof(u_int32_t) + policy->result_size) : 0);
2712 response_size = sizeof(struct necp_packet_header) + order_tlv_size + result_tlv_size + policy->conditions_size;
2713 MALLOC(response, u_int8_t *, response_size, M_NECP, M_WAITOK);
2714 if (response == NULL) {
2715 necp_send_error_response(session, NECP_PACKET_TYPE_POLICY_GET, message_id, NECP_ERROR_INTERNAL);
2716 return;
2717 }
2718
2719 cursor = response;
2720 cursor = necp_buffer_write_packet_header(cursor, NECP_PACKET_TYPE_POLICY_GET, NECP_PACKET_FLAGS_RESPONSE, message_id);
2721 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_ORDER, sizeof(necp_policy_order), &policy->order, response, response_size);
2722
2723 if (result_tlv_size) {
2724 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_RESULT, policy->result_size, &policy->result, response, response_size);
2725 }
2726 if (policy->conditions_size) {
2727 memcpy(((u_int8_t *)(void *)(cursor)), policy->conditions, policy->conditions_size);
2728 }
2729
2730 if (!necp_send_ctl_data(session, (u_int8_t *)response, response_size)) {
2731 NECPLOG0(LOG_ERR, "Failed to send response");
2732 }
2733
2734 FREE(response, M_NECP);
2735 return;
2736
2737 fail:
2738 necp_send_error_response(session, NECP_PACKET_TYPE_POLICY_GET, message_id, response_error);
2739 }
2740
2741 static void
2742 necp_handle_policy_delete(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
2743 {
2744 int error;
2745 u_int32_t response_error = NECP_ERROR_INTERNAL;
2746 necp_policy_id policy_id = 0;
2747
2748 struct necp_session_policy *policy = NULL;
2749
2750 // Read policy id
2751 error = necp_get_tlv(packet, NULL, 0, offset, NECP_TLV_POLICY_ID, sizeof(policy_id), &policy_id, NULL);
2752 if (error) {
2753 NECPLOG(LOG_ERR, "Failed to get policy id: %d", error);
2754 response_error = NECP_ERROR_INVALID_TLV;
2755 goto fail;
2756 }
2757
2758 policy = necp_policy_find(session, policy_id);
2759 if (policy == NULL || policy->pending_deletion) {
2760 NECPLOG(LOG_ERR, "Failed to find policy with id %d", policy_id);
2761 response_error = NECP_ERROR_POLICY_ID_NOT_FOUND;
2762 goto fail;
2763 }
2764
2765 necp_policy_mark_for_deletion(session, policy);
2766
2767 necp_send_success_response(session, NECP_PACKET_TYPE_POLICY_DELETE, message_id);
2768 return;
2769
2770 fail:
2771 necp_send_error_response(session, NECP_PACKET_TYPE_POLICY_DELETE, message_id, response_error);
2772 }
2773
2774 static void
2775 necp_handle_policy_apply_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
2776 {
2777 #pragma unused(packet, offset)
2778 necp_policy_apply_all(session);
2779 necp_send_success_response(session, NECP_PACKET_TYPE_POLICY_APPLY_ALL, message_id);
2780 }
2781
2782 static void
2783 necp_handle_policy_list_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
2784 {
2785 #pragma unused(packet, offset)
2786 u_int32_t tlv_size = (sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(u_int32_t));
2787 u_int32_t response_size = 0;
2788 u_int8_t *response = NULL;
2789 u_int8_t *cursor = NULL;
2790 int num_policies = 0;
2791 int cur_policy_index = 0;
2792 struct necp_session_policy *policy;
2793
2794 LIST_FOREACH(policy, &session->policies, chain) {
2795 if (!policy->pending_deletion) {
2796 num_policies++;
2797 }
2798 }
2799
2800 // Create a response with one Policy ID TLV for each policy
2801 response_size = sizeof(struct necp_packet_header) + num_policies * tlv_size;
2802 MALLOC(response, u_int8_t *, response_size, M_NECP, M_WAITOK);
2803 if (response == NULL) {
2804 necp_send_error_response(session, NECP_PACKET_TYPE_POLICY_LIST_ALL, message_id, NECP_ERROR_INTERNAL);
2805 return;
2806 }
2807
2808 cursor = response;
2809 cursor = necp_buffer_write_packet_header(cursor, NECP_PACKET_TYPE_POLICY_LIST_ALL, NECP_PACKET_FLAGS_RESPONSE, message_id);
2810
2811 LIST_FOREACH(policy, &session->policies, chain) {
2812 if (!policy->pending_deletion && cur_policy_index < num_policies) {
2813 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_ID, sizeof(u_int32_t), &policy->id, response, response_size);
2814 cur_policy_index++;
2815 }
2816 }
2817
2818 if (!necp_send_ctl_data(session, (u_int8_t *)response, response_size)) {
2819 NECPLOG0(LOG_ERR, "Failed to send response");
2820 }
2821
2822 FREE(response, M_NECP);
2823 }
2824
2825 static void
2826 necp_handle_policy_delete_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
2827 {
2828 #pragma unused(packet, offset)
2829 necp_policy_mark_all_for_deletion(session);
2830 necp_send_success_response(session, NECP_PACKET_TYPE_POLICY_DELETE_ALL, message_id);
2831 }
2832
2833 static necp_policy_id
2834 necp_policy_get_new_id(void)
2835 {
2836 necp_policy_id newid = 0;
2837
2838 lck_rw_lock_exclusive(&necp_kernel_policy_lock);
2839
2840 necp_last_policy_id++;
2841 if (necp_last_policy_id < 1) {
2842 necp_last_policy_id = 1;
2843 }
2844
2845 newid = necp_last_policy_id;
2846 lck_rw_done(&necp_kernel_policy_lock);
2847
2848 if (newid == 0) {
2849 NECPLOG0(LOG_DEBUG, "Allocate policy id failed.\n");
2850 return (0);
2851 }
2852
2853 return (newid);
2854 }
2855
2856 /*
2857 * For the policy dump response this is the structure:
2858 *
2859 * <NECP_PACKET_HEADER>
2860 * {
2861 * type : NECP_TLV_POLICY_DUMP
2862 * length : ...
2863 * value :
2864 * {
2865 * {
2866 * type : NECP_TLV_POLICY_ID
2867 * len : ...
2868 * value : ...
2869 * }
2870 * {
2871 * type : NECP_TLV_POLICY_ORDER
2872 * len : ...
2873 * value : ...
2874 * }
2875 * {
2876 * type : NECP_TLV_POLICY_RESULT_STRING
2877 * len : ...
2878 * value : ...
2879 * }
2880 * {
2881 * type : NECP_TLV_POLICY_OWNER
2882 * len : ...
2883 * value : ...
2884 * }
2885 * {
2886 * type : NECP_TLV_POLICY_CONDITION
2887 * len : ...
2888 * value :
2889 * {
2890 * {
2891 * type : NECP_POLICY_CONDITION_ALL_INTERFACES
2892 * len : ...
2893 * value : ...
2894 * }
2895 * {
2896 * type : NECP_POLICY_CONDITION_BOUND_INTERFACES
2897 * len : ...
2898 * value : ...
2899 * }
2900 * ...
2901 * }
2902 * }
2903 * }
2904 * }
2905 * {
2906 * type : NECP_TLV_POLICY_DUMP
2907 * length : ...
2908 * value :
2909 * {
2910 * {
2911 * type : NECP_TLV_POLICY_ID
2912 * len : ...
2913 * value : ...
2914 * }
2915 * {
2916 * type : NECP_TLV_POLICY_ORDER
2917 * len : ...
2918 * value : ...
2919 * }
2920 * {
2921 * type : NECP_TLV_POLICY_RESULT_STRING
2922 * len : ...
2923 * value : ...
2924 * }
2925 * {
2926 * type : NECP_TLV_POLICY_OWNER
2927 * len : ...
2928 * value : ...
2929 * }
2930 * {
2931 * type : NECP_TLV_POLICY_CONDITION
2932 * len : ...
2933 * value :
2934 * {
2935 * {
2936 * type : NECP_POLICY_CONDITION_ALL_INTERFACES
2937 * len : ...
2938 * value : ...
2939 * }
2940 * {
2941 * type : NECP_POLICY_CONDITION_BOUND_INTERFACES
2942 * len : ...
2943 * value : ...
2944 * }
2945 * ...
2946 * }
2947 * }
2948 * }
2949 * }
2950 * ...
2951 */
2952 static int
2953 necp_handle_policy_dump_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet,
2954 user_addr_t out_buffer, size_t out_buffer_length, int offset)
2955 {
2956 #pragma unused(offset)
2957 struct necp_kernel_socket_policy *policy = NULL;
2958 int policy_i;
2959 int policy_count = 0;
2960 u_int8_t **tlv_buffer_pointers = NULL;
2961 u_int32_t *tlv_buffer_lengths = NULL;
2962 u_int32_t total_tlv_len = 0;
2963 u_int8_t *result_buf = NULL;
2964 u_int8_t *result_buf_cursor = result_buf;
2965 char result_string[MAX_RESULT_STRING_LEN];
2966 char proc_name_string[MAXCOMLEN + 1];
2967
2968 int error_code = 0;
2969 bool error_occured = false;
2970 u_int32_t response_error = NECP_ERROR_INTERNAL;
2971
2972 #define REPORT_ERROR(error) error_occured = true; \
2973 response_error = error; \
2974 goto done
2975
2976 #define UNLOCK_AND_REPORT_ERROR(lock, error) lck_rw_done(lock); \
2977 REPORT_ERROR(error)
2978
2979 errno_t cred_result = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES, 0);
2980 if (cred_result != 0) {
2981 NECPLOG0(LOG_ERR, "Session does not hold the necessary entitlement to get Network Extension Policy information");
2982 REPORT_ERROR(NECP_ERROR_INTERNAL);
2983 }
2984
2985 // LOCK
2986 lck_rw_lock_shared(&necp_kernel_policy_lock);
2987
2988 if (necp_debug) {
2989 NECPLOG0(LOG_DEBUG, "Gathering policies");
2990 }
2991
2992 policy_count = necp_kernel_application_policies_count;
2993
2994 MALLOC(tlv_buffer_pointers, u_int8_t **, sizeof(u_int8_t *) * policy_count, M_NECP, M_NOWAIT | M_ZERO);
2995 if (tlv_buffer_pointers == NULL) {
2996 NECPLOG(LOG_DEBUG, "Failed to allocate tlv_buffer_pointers (%u bytes)", sizeof(u_int8_t *) * policy_count);
2997 UNLOCK_AND_REPORT_ERROR(&necp_kernel_policy_lock, NECP_ERROR_INTERNAL);
2998 }
2999
3000 MALLOC(tlv_buffer_lengths, u_int32_t *, sizeof(u_int32_t) * policy_count, M_NECP, M_NOWAIT | M_ZERO);
3001 if (tlv_buffer_lengths == NULL) {
3002 NECPLOG(LOG_DEBUG, "Failed to allocate tlv_buffer_lengths (%u bytes)", sizeof(u_int32_t) * policy_count);
3003 UNLOCK_AND_REPORT_ERROR(&necp_kernel_policy_lock, NECP_ERROR_INTERNAL);
3004 }
3005
3006 for (policy_i = 0; necp_kernel_socket_policies_app_layer_map != NULL && necp_kernel_socket_policies_app_layer_map[policy_i] != NULL; policy_i++) {
3007 policy = necp_kernel_socket_policies_app_layer_map[policy_i];
3008
3009 memset(result_string, 0, MAX_RESULT_STRING_LEN);
3010 memset(proc_name_string, 0, MAXCOMLEN + 1);
3011
3012 necp_get_result_description(result_string, policy->result, policy->result_parameter);
3013 proc_name(policy->session_pid, proc_name_string, MAXCOMLEN);
3014
3015 u_int16_t proc_name_len = strlen(proc_name_string) + 1;
3016 u_int16_t result_string_len = strlen(result_string) + 1;
3017
3018 if (necp_debug) {
3019 NECPLOG(LOG_DEBUG, "Policy: process: %s, result: %s", proc_name_string, result_string);
3020 }
3021
3022 u_int32_t total_allocated_bytes = sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(policy->id) + // NECP_TLV_POLICY_ID
3023 sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(policy->order) + // NECP_TLV_POLICY_ORDER
3024 sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(policy->session_order) + // NECP_TLV_POLICY_SESSION_ORDER
3025 sizeof(u_int8_t) + sizeof(u_int32_t) + result_string_len + // NECP_TLV_POLICY_RESULT_STRING
3026 sizeof(u_int8_t) + sizeof(u_int32_t) + proc_name_len + // NECP_TLV_POLICY_OWNER
3027 sizeof(u_int8_t) + sizeof(u_int32_t); // NECP_TLV_POLICY_CONDITION
3028
3029 // We now traverse the condition_mask to see how much space we need to allocate
3030 u_int32_t condition_mask = policy->condition_mask;
3031 u_int8_t num_conditions = 0;
3032 struct necp_string_id_mapping *account_id_entry = NULL;
3033 char if_name[IFXNAMSIZ];
3034 u_int32_t condition_tlv_length = 0;
3035 memset(if_name, 0, sizeof(if_name));
3036
3037 if (condition_mask == NECP_POLICY_CONDITION_DEFAULT) {
3038 num_conditions++;
3039 } else {
3040 if (condition_mask & NECP_KERNEL_CONDITION_ALL_INTERFACES) {
3041 num_conditions++;
3042 }
3043 if (condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE) {
3044 snprintf(if_name, IFXNAMSIZ, "%s%d", ifnet_name(policy->cond_bound_interface), ifnet_unit(policy->cond_bound_interface));
3045 condition_tlv_length += strlen(if_name) + 1;
3046 num_conditions++;
3047 }
3048 if (condition_mask & NECP_KERNEL_CONDITION_PROTOCOL) {
3049 condition_tlv_length += sizeof(policy->cond_protocol);
3050 num_conditions++;
3051 }
3052 if (condition_mask & NECP_KERNEL_CONDITION_APP_ID) {
3053 condition_tlv_length += sizeof(uuid_t);
3054 num_conditions++;
3055 }
3056 if (condition_mask & NECP_KERNEL_CONDITION_REAL_APP_ID) {
3057 condition_tlv_length += sizeof(uuid_t);
3058 num_conditions++;
3059 }
3060 if (condition_mask & NECP_KERNEL_CONDITION_DOMAIN) {
3061 u_int32_t domain_len = strlen(policy->cond_domain) + 1;
3062 condition_tlv_length += domain_len;
3063 num_conditions++;
3064 }
3065 if (condition_mask & NECP_KERNEL_CONDITION_ACCOUNT_ID) {
3066 account_id_entry = necp_lookup_string_with_id_locked(&necp_account_id_list, policy->cond_account_id);
3067 u_int32_t account_id_len = 0;
3068 if (account_id_entry) {
3069 account_id_len = account_id_entry->string ? strlen(account_id_entry->string) + 1 : 0;
3070 }
3071 condition_tlv_length += account_id_len;
3072 num_conditions++;
3073 }
3074 if (condition_mask & NECP_KERNEL_CONDITION_PID) {
3075 condition_tlv_length += sizeof(pid_t);
3076 num_conditions++;
3077 }
3078 if (condition_mask & NECP_KERNEL_CONDITION_UID) {
3079 condition_tlv_length += sizeof(uid_t);
3080 num_conditions++;
3081 }
3082 if (condition_mask & NECP_KERNEL_CONDITION_TRAFFIC_CLASS) {
3083 condition_tlv_length += sizeof(struct necp_policy_condition_tc_range);
3084 num_conditions++;
3085 }
3086 if (condition_mask & NECP_KERNEL_CONDITION_ENTITLEMENT) {
3087 num_conditions++;
3088 }
3089 if (condition_mask & NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT) {
3090 u_int32_t entitlement_len = strlen(policy->cond_custom_entitlement) + 1;
3091 condition_tlv_length += entitlement_len;
3092 num_conditions++;
3093 }
3094 if (condition_mask & NECP_KERNEL_CONDITION_LOCAL_START) {
3095 if (condition_mask & NECP_KERNEL_CONDITION_LOCAL_END) {
3096 condition_tlv_length += sizeof(struct necp_policy_condition_addr_range);
3097 } else {
3098 condition_tlv_length += sizeof(struct necp_policy_condition_addr);
3099 }
3100 num_conditions++;
3101 }
3102 if (condition_mask & NECP_KERNEL_CONDITION_REMOTE_START) {
3103 if (condition_mask & NECP_KERNEL_CONDITION_REMOTE_END) {
3104 condition_tlv_length += sizeof(struct necp_policy_condition_addr_range);
3105 } else {
3106 condition_tlv_length += sizeof(struct necp_policy_condition_addr);
3107 }
3108 num_conditions++;
3109 }
3110 }
3111
3112 condition_tlv_length += num_conditions * (sizeof(u_int8_t) + sizeof(u_int32_t)); // These are for the condition TLVs. The space for "value" is already accounted for above.
3113 total_allocated_bytes += condition_tlv_length;
3114
3115 u_int8_t *tlv_buffer;
3116 MALLOC(tlv_buffer, u_int8_t *, total_allocated_bytes, M_NECP, M_NOWAIT | M_ZERO);
3117 if (tlv_buffer == NULL) {
3118 NECPLOG(LOG_DEBUG, "Failed to allocate tlv_buffer (%u bytes)", total_allocated_bytes);
3119 continue;
3120 }
3121
3122 u_int8_t *cursor = tlv_buffer;
3123 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_ID, sizeof(policy->id), &policy->id, tlv_buffer, total_allocated_bytes);
3124 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_ORDER, sizeof(necp_policy_order), &policy->order, tlv_buffer, total_allocated_bytes);
3125 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_SESSION_ORDER, sizeof(policy->session_order), &policy->session_order, tlv_buffer, total_allocated_bytes);
3126 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_RESULT_STRING, result_string_len, result_string, tlv_buffer, total_allocated_bytes);
3127 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_OWNER, proc_name_len, proc_name_string, tlv_buffer, total_allocated_bytes);
3128
3129 #define N_QUICK 256
3130 u_int8_t q_cond_buf[N_QUICK]; // Minor optimization
3131
3132 u_int8_t *cond_buf; // To be used for condition TLVs
3133 if (condition_tlv_length <= N_QUICK) {
3134 cond_buf = q_cond_buf;
3135 } else {
3136 MALLOC(cond_buf, u_int8_t *, condition_tlv_length, M_NECP, M_NOWAIT);
3137 if (cond_buf == NULL) {
3138 NECPLOG(LOG_DEBUG, "Failed to allocate cond_buffer (%u bytes)", condition_tlv_length);
3139 FREE(tlv_buffer, M_NECP);
3140 continue;
3141 }
3142 }
3143
3144 memset(cond_buf, 0, condition_tlv_length);
3145 u_int8_t *cond_buf_cursor = cond_buf;
3146 if (condition_mask == NECP_POLICY_CONDITION_DEFAULT) {
3147 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_DEFAULT, 0, "", cond_buf, condition_tlv_length);
3148 } else {
3149 if (condition_mask & NECP_KERNEL_CONDITION_ALL_INTERFACES) {
3150 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_ALL_INTERFACES, 0, "", cond_buf, condition_tlv_length);
3151 }
3152 if (condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE) {
3153 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_BOUND_INTERFACE, strlen(if_name) + 1,
3154 if_name, cond_buf, condition_tlv_length);
3155 }
3156 if (condition_mask & NECP_KERNEL_CONDITION_PROTOCOL) {
3157 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_IP_PROTOCOL, sizeof(policy->cond_protocol), &policy->cond_protocol,
3158 cond_buf, condition_tlv_length);
3159 }
3160 if (condition_mask & NECP_KERNEL_CONDITION_APP_ID) {
3161 struct necp_uuid_id_mapping *entry = necp_uuid_lookup_uuid_with_app_id_locked(policy->cond_app_id);
3162 if (entry != NULL) {
3163 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_APPLICATION, sizeof(entry->uuid), entry->uuid,
3164 cond_buf, condition_tlv_length);
3165 }
3166 }
3167 if (condition_mask & NECP_KERNEL_CONDITION_REAL_APP_ID) {
3168 struct necp_uuid_id_mapping *entry = necp_uuid_lookup_uuid_with_app_id_locked(policy->cond_real_app_id);
3169 if (entry != NULL) {
3170 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_REAL_APPLICATION, sizeof(entry->uuid), entry->uuid,
3171 cond_buf, condition_tlv_length);
3172 }
3173 }
3174 if (condition_mask & NECP_KERNEL_CONDITION_DOMAIN) {
3175 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_DOMAIN, strlen(policy->cond_domain) + 1, policy->cond_domain,
3176 cond_buf, condition_tlv_length);
3177 }
3178 if (condition_mask & NECP_KERNEL_CONDITION_ACCOUNT_ID) {
3179 if (account_id_entry != NULL) {
3180 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_ACCOUNT, strlen(account_id_entry->string) + 1, account_id_entry->string,
3181 cond_buf, condition_tlv_length);
3182 }
3183 }
3184 if (condition_mask & NECP_KERNEL_CONDITION_PID) {
3185 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_PID, sizeof(policy->cond_pid), &policy->cond_pid,
3186 cond_buf, condition_tlv_length);
3187 }
3188 if (condition_mask & NECP_KERNEL_CONDITION_UID) {
3189 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_UID, sizeof(policy->cond_uid), &policy->cond_uid,
3190 cond_buf, condition_tlv_length);
3191 }
3192 if (condition_mask & NECP_KERNEL_CONDITION_TRAFFIC_CLASS) {
3193 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_TRAFFIC_CLASS, sizeof(policy->cond_traffic_class), &policy->cond_traffic_class,
3194 cond_buf, condition_tlv_length);
3195 }
3196 if (condition_mask & NECP_KERNEL_CONDITION_ENTITLEMENT) {
3197 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_ENTITLEMENT, 0, "",
3198 cond_buf, condition_tlv_length);
3199 }
3200 if (condition_mask & NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT) {
3201 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_ENTITLEMENT, strlen(policy->cond_custom_entitlement) + 1, policy->cond_custom_entitlement,
3202 cond_buf, condition_tlv_length);
3203 }
3204 if (condition_mask & NECP_KERNEL_CONDITION_LOCAL_START) {
3205 if (condition_mask & NECP_KERNEL_CONDITION_LOCAL_END) {
3206 struct necp_policy_condition_addr_range range;
3207 memcpy(&range.start_address, &policy->cond_local_start, sizeof(policy->cond_local_start));
3208 memcpy(&range.end_address, &policy->cond_local_end, sizeof(policy->cond_local_end));
3209 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE, sizeof(range), &range,
3210 cond_buf, condition_tlv_length);
3211 } else {
3212 struct necp_policy_condition_addr addr;
3213 addr.prefix = policy->cond_local_prefix;
3214 memcpy(&addr.address, &policy->cond_local_start, sizeof(policy->cond_local_start));
3215 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_LOCAL_ADDR, sizeof(addr), &addr,
3216 cond_buf, condition_tlv_length);
3217 }
3218 }
3219 if (condition_mask & NECP_KERNEL_CONDITION_REMOTE_START) {
3220 if (condition_mask & NECP_KERNEL_CONDITION_REMOTE_END) {
3221 struct necp_policy_condition_addr_range range;
3222 memcpy(&range.start_address, &policy->cond_remote_start, sizeof(policy->cond_remote_start));
3223 memcpy(&range.end_address, &policy->cond_remote_end, sizeof(policy->cond_remote_end));
3224 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE, sizeof(range), &range,
3225 cond_buf, condition_tlv_length);
3226 } else {
3227 struct necp_policy_condition_addr addr;
3228 addr.prefix = policy->cond_remote_prefix;
3229 memcpy(&addr.address, &policy->cond_remote_start, sizeof(policy->cond_remote_start));
3230 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_REMOTE_ADDR, sizeof(addr), &addr,
3231 cond_buf, condition_tlv_length);
3232 }
3233 }
3234 }
3235
3236 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_CONDITION, cond_buf_cursor - cond_buf, cond_buf, tlv_buffer, total_allocated_bytes);
3237 if (cond_buf != q_cond_buf) {
3238 FREE(cond_buf, M_NECP);
3239 }
3240
3241 tlv_buffer_pointers[policy_i] = tlv_buffer;
3242 tlv_buffer_lengths[policy_i] = (cursor - tlv_buffer);
3243
3244 // This is the length of the TLV for NECP_TLV_POLICY_DUMP
3245 total_tlv_len += sizeof(u_int8_t) + sizeof(u_int32_t) + (cursor - tlv_buffer);
3246 }
3247
3248 // UNLOCK
3249 lck_rw_done(&necp_kernel_policy_lock);
3250
3251 // Send packet
3252 if (packet != NULL) {
3253 u_int32_t total_result_length = sizeof(struct necp_packet_header) + total_tlv_len;
3254
3255 // Allow malloc to wait, since the total buffer may be large and we are not holding any locks
3256 MALLOC(result_buf, u_int8_t *, total_result_length, M_NECP, M_WAITOK | M_ZERO);
3257 if (result_buf == NULL) {
3258 NECPLOG(LOG_DEBUG, "Failed to allocate result_buffer (%u bytes)", total_result_length);
3259 REPORT_ERROR(NECP_ERROR_INTERNAL);
3260 }
3261
3262 result_buf_cursor = result_buf;
3263 result_buf_cursor = necp_buffer_write_packet_header(result_buf_cursor, NECP_PACKET_TYPE_POLICY_DUMP_ALL, NECP_PACKET_FLAGS_RESPONSE, message_id);
3264
3265 for (int i = 0; i < policy_count; i++) {
3266 if (tlv_buffer_pointers[i] != NULL) {
3267 result_buf_cursor = necp_buffer_write_tlv(result_buf_cursor, NECP_TLV_POLICY_DUMP, tlv_buffer_lengths[i], tlv_buffer_pointers[i], result_buf, total_result_length);
3268 }
3269 }
3270
3271 if (!necp_send_ctl_data(session, result_buf, result_buf_cursor - result_buf)) {
3272 NECPLOG(LOG_ERR, "Failed to send response (%u bytes)", result_buf_cursor - result_buf);
3273 } else {
3274 NECPLOG(LOG_ERR, "Sent data worth %u bytes. Total result buffer length was %u bytes", result_buf_cursor - result_buf, total_result_length);
3275 }
3276 }
3277
3278 // Copy out
3279 if (out_buffer != 0) {
3280 if (out_buffer_length < total_tlv_len + sizeof(u_int32_t)) {
3281 NECPLOG(LOG_DEBUG, "out_buffer_length too small (%u < %u)", out_buffer_length, total_tlv_len + sizeof(u_int32_t));
3282 REPORT_ERROR(NECP_ERROR_INVALID_TLV);
3283 }
3284
3285 // Allow malloc to wait, since the total buffer may be large and we are not holding any locks
3286 MALLOC(result_buf, u_int8_t *, total_tlv_len + sizeof(u_int32_t), M_NECP, M_WAITOK | M_ZERO);
3287 if (result_buf == NULL) {
3288 NECPLOG(LOG_DEBUG, "Failed to allocate result_buffer (%u bytes)", total_tlv_len + sizeof(u_int32_t));
3289 REPORT_ERROR(NECP_ERROR_INTERNAL);
3290 }
3291
3292 // Add four bytes for total length at the start
3293 memcpy(result_buf, &total_tlv_len, sizeof(u_int32_t));
3294
3295 // Copy the TLVs
3296 result_buf_cursor = result_buf + sizeof(u_int32_t);
3297 for (int i = 0; i < policy_count; i++) {
3298 if (tlv_buffer_pointers[i] != NULL) {
3299 result_buf_cursor = necp_buffer_write_tlv(result_buf_cursor, NECP_TLV_POLICY_DUMP, tlv_buffer_lengths[i], tlv_buffer_pointers[i],
3300 result_buf, total_tlv_len + sizeof(u_int32_t));
3301 }
3302 }
3303
3304 int copy_error = copyout(result_buf, out_buffer, total_tlv_len + sizeof(u_int32_t));
3305 if (copy_error) {
3306 NECPLOG(LOG_DEBUG, "Failed to copy out result_buffer (%u bytes)", total_tlv_len + sizeof(u_int32_t));
3307 REPORT_ERROR(NECP_ERROR_INTERNAL);
3308 }
3309 }
3310
3311 done:
3312
3313 if (error_occured) {
3314 if (packet != NULL) {
3315 if(!necp_send_error_response(session, NECP_PACKET_TYPE_POLICY_DUMP_ALL, message_id, response_error)) {
3316 NECPLOG0(LOG_ERR, "Failed to send error response");
3317 } else {
3318 NECPLOG0(LOG_ERR, "Sent error response");
3319 }
3320 }
3321 error_code = necp_get_posix_error_for_necp_error(response_error);
3322 }
3323
3324 if (result_buf != NULL) {
3325 FREE(result_buf, M_NECP);
3326 }
3327
3328 if (tlv_buffer_pointers != NULL) {
3329 for (int i = 0; i < policy_count; i++) {
3330 if (tlv_buffer_pointers[i] != NULL) {
3331 FREE(tlv_buffer_pointers[i], M_NECP);
3332 tlv_buffer_pointers[i] = NULL;
3333 }
3334 }
3335 FREE(tlv_buffer_pointers, M_NECP);
3336 }
3337
3338 if (tlv_buffer_lengths != NULL) {
3339 FREE(tlv_buffer_lengths, M_NECP);
3340 }
3341 #undef N_QUICK
3342 #undef RESET_COND_BUF
3343 #undef REPORT_ERROR
3344 #undef UNLOCK_AND_REPORT_ERROR
3345
3346 return (error_code);
3347 }
3348
3349 static struct necp_session_policy *
3350 necp_policy_create(struct necp_session *session, necp_policy_order order, u_int8_t *conditions_array, u_int32_t conditions_array_size, u_int8_t *route_rules_array, u_int32_t route_rules_array_size, u_int8_t *result, u_int32_t result_size)
3351 {
3352 struct necp_session_policy *new_policy = NULL;
3353 struct necp_session_policy *tmp_policy = NULL;
3354
3355 if (session == NULL || conditions_array == NULL || result == NULL || result_size == 0) {
3356 goto done;
3357 }
3358
3359 MALLOC_ZONE(new_policy, struct necp_session_policy *, sizeof(*new_policy), M_NECP_SESSION_POLICY, M_WAITOK);
3360 if (new_policy == NULL) {
3361 goto done;
3362 }
3363
3364 memset(new_policy, 0, sizeof(*new_policy)); // M_ZERO is not supported for MALLOC_ZONE
3365 new_policy->applied = FALSE;
3366 new_policy->pending_deletion = FALSE;
3367 new_policy->pending_update = FALSE;
3368 new_policy->order = order;
3369 new_policy->conditions = conditions_array;
3370 new_policy->conditions_size = conditions_array_size;
3371 new_policy->route_rules = route_rules_array;
3372 new_policy->route_rules_size = route_rules_array_size;
3373 new_policy->result = result;
3374 new_policy->result_size = result_size;
3375 new_policy->id = necp_policy_get_new_id();
3376
3377 LIST_INSERT_SORTED_ASCENDING(&session->policies, new_policy, chain, order, tmp_policy);
3378
3379 session->dirty = TRUE;
3380
3381 if (necp_debug) {
3382 NECPLOG(LOG_DEBUG, "Created NECP policy, order %d", order);
3383 }
3384 done:
3385 return (new_policy);
3386 }
3387
3388 static struct necp_session_policy *
3389 necp_policy_find(struct necp_session *session, necp_policy_id policy_id)
3390 {
3391 struct necp_session_policy *policy = NULL;
3392 if (policy_id == 0) {
3393 return (NULL);
3394 }
3395
3396 LIST_FOREACH(policy, &session->policies, chain) {
3397 if (policy->id == policy_id) {
3398 return (policy);
3399 }
3400 }
3401
3402 return (NULL);
3403 }
3404
3405 static inline u_int8_t
3406 necp_policy_get_result_type(struct necp_session_policy *policy)
3407 {
3408 return (policy ? necp_policy_result_get_type_from_buffer(policy->result, policy->result_size) : 0);
3409 }
3410
3411 static inline u_int32_t
3412 necp_policy_get_result_parameter_length(struct necp_session_policy *policy)
3413 {
3414 return (policy ? necp_policy_result_get_parameter_length_from_buffer(policy->result, policy->result_size) : 0);
3415 }
3416
3417 static bool
3418 necp_policy_get_result_parameter(struct necp_session_policy *policy, u_int8_t *parameter_buffer, u_int32_t parameter_buffer_length)
3419 {
3420 if (policy) {
3421 u_int32_t parameter_length = necp_policy_result_get_parameter_length_from_buffer(policy->result, policy->result_size);
3422 if (parameter_buffer_length >= parameter_length) {
3423 u_int8_t *parameter = necp_policy_result_get_parameter_pointer_from_buffer(policy->result, policy->result_size);
3424 if (parameter && parameter_buffer) {
3425 memcpy(parameter_buffer, parameter, parameter_length);
3426 return (TRUE);
3427 }
3428 }
3429 }
3430
3431 return (FALSE);
3432 }
3433
3434 static bool
3435 necp_policy_mark_for_deletion(struct necp_session *session, struct necp_session_policy *policy)
3436 {
3437 if (session == NULL || policy == NULL) {
3438 return (FALSE);
3439 }
3440
3441 policy->pending_deletion = TRUE;
3442 session->dirty = TRUE;
3443
3444 if (necp_debug) {
3445 NECPLOG0(LOG_DEBUG, "Marked NECP policy for removal");
3446 }
3447 return (TRUE);
3448 }
3449
3450 static bool
3451 necp_policy_mark_all_for_deletion(struct necp_session *session)
3452 {
3453 struct necp_session_policy *policy = NULL;
3454 struct necp_session_policy *temp_policy = NULL;
3455
3456 LIST_FOREACH_SAFE(policy, &session->policies, chain, temp_policy) {
3457 necp_policy_mark_for_deletion(session, policy);
3458 }
3459
3460 return (TRUE);
3461 }
3462
3463 static bool
3464 necp_policy_delete(struct necp_session *session, struct necp_session_policy *policy)
3465 {
3466 if (session == NULL || policy == NULL) {
3467 return (FALSE);
3468 }
3469
3470 LIST_REMOVE(policy, chain);
3471
3472 if (policy->result) {
3473 FREE(policy->result, M_NECP);
3474 policy->result = NULL;
3475 }
3476
3477 if (policy->conditions) {
3478 FREE(policy->conditions, M_NECP);
3479 policy->conditions = NULL;
3480 }
3481
3482 if (policy->route_rules) {
3483 FREE(policy->route_rules, M_NECP);
3484 policy->route_rules = NULL;
3485 }
3486
3487 FREE_ZONE(policy, sizeof(*policy), M_NECP_SESSION_POLICY);
3488
3489 if (necp_debug) {
3490 NECPLOG0(LOG_DEBUG, "Removed NECP policy");
3491 }
3492 return (TRUE);
3493 }
3494
3495 static bool
3496 necp_policy_unapply(struct necp_session_policy *policy)
3497 {
3498 int i = 0;
3499 if (policy == NULL) {
3500 return (FALSE);
3501 }
3502
3503 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
3504
3505 // Release local uuid mappings
3506 if (!uuid_is_null(policy->applied_app_uuid)) {
3507 bool removed_mapping = FALSE;
3508 if (necp_remove_uuid_app_id_mapping(policy->applied_app_uuid, &removed_mapping, TRUE) && removed_mapping) {
3509 necp_uuid_app_id_mappings_dirty = TRUE;
3510 necp_num_uuid_app_id_mappings--;
3511 }
3512 uuid_clear(policy->applied_app_uuid);
3513 }
3514 if (!uuid_is_null(policy->applied_real_app_uuid)) {
3515 necp_remove_uuid_app_id_mapping(policy->applied_real_app_uuid, NULL, FALSE);
3516 uuid_clear(policy->applied_real_app_uuid);
3517 }
3518 if (!uuid_is_null(policy->applied_result_uuid)) {
3519 necp_remove_uuid_service_id_mapping(policy->applied_result_uuid);
3520 uuid_clear(policy->applied_result_uuid);
3521 }
3522
3523 // Release string mappings
3524 if (policy->applied_account != NULL) {
3525 necp_remove_string_to_id_mapping(&necp_account_id_list, policy->applied_account);
3526 FREE(policy->applied_account, M_NECP);
3527 policy->applied_account = NULL;
3528 }
3529
3530 // Release route rule
3531 if (policy->applied_route_rules_id != 0) {
3532 necp_remove_route_rule(&necp_route_rules, policy->applied_route_rules_id);
3533 policy->applied_route_rules_id = 0;
3534 }
3535
3536 // Remove socket policies
3537 for (i = 0; i < MAX_KERNEL_SOCKET_POLICIES; i++) {
3538 if (policy->kernel_socket_policies[i] != 0) {
3539 necp_kernel_socket_policy_delete(policy->kernel_socket_policies[i]);
3540 policy->kernel_socket_policies[i] = 0;
3541 }
3542 }
3543
3544 // Remove IP output policies
3545 for (i = 0; i < MAX_KERNEL_IP_OUTPUT_POLICIES; i++) {
3546 if (policy->kernel_ip_output_policies[i] != 0) {
3547 necp_kernel_ip_output_policy_delete(policy->kernel_ip_output_policies[i]);
3548 policy->kernel_ip_output_policies[i] = 0;
3549 }
3550 }
3551
3552 policy->applied = FALSE;
3553
3554 return (TRUE);
3555 }
3556
3557 #define NECP_KERNEL_POLICY_SUBORDER_ID_TUNNEL_CONDITION 0
3558 #define NECP_KERNEL_POLICY_SUBORDER_NON_ID_TUNNEL_CONDITION 1
3559 #define NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION 2
3560 #define NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS 3
3561 struct necp_policy_result_ip_tunnel {
3562 u_int32_t secondary_result;
3563 char interface_name[IFXNAMSIZ];
3564 } __attribute__((__packed__));
3565
3566 struct necp_policy_result_service {
3567 uuid_t identifier;
3568 u_int32_t data;
3569 } __attribute__((__packed__));
3570
3571 static bool
3572 necp_policy_apply(struct necp_session *session, struct necp_session_policy *policy)
3573 {
3574 bool socket_only_conditions = FALSE;
3575 bool socket_ip_conditions = FALSE;
3576
3577 bool socket_layer_non_id_conditions = FALSE;
3578 bool ip_output_layer_non_id_conditions = FALSE;
3579 bool ip_output_layer_non_id_only = FALSE;
3580 bool ip_output_layer_id_condition = FALSE;
3581 bool ip_output_layer_tunnel_condition_from_id = FALSE;
3582 bool ip_output_layer_tunnel_condition_from_non_id = FALSE;
3583 necp_kernel_policy_id cond_ip_output_layer_id = NECP_KERNEL_POLICY_ID_NONE;
3584
3585 u_int32_t master_condition_mask = 0;
3586 u_int32_t master_condition_negated_mask = 0;
3587 ifnet_t cond_bound_interface = NULL;
3588 u_int32_t cond_account_id = 0;
3589 char *cond_domain = NULL;
3590 char *cond_custom_entitlement = NULL;
3591 pid_t cond_pid = 0;
3592 uid_t cond_uid = 0;
3593 necp_app_id cond_app_id = 0;
3594 necp_app_id cond_real_app_id = 0;
3595 struct necp_policy_condition_tc_range cond_traffic_class;
3596 cond_traffic_class.start_tc = 0;
3597 cond_traffic_class.end_tc = 0;
3598 u_int16_t cond_protocol = 0;
3599 union necp_sockaddr_union cond_local_start;
3600 union necp_sockaddr_union cond_local_end;
3601 u_int8_t cond_local_prefix = 0;
3602 union necp_sockaddr_union cond_remote_start;
3603 union necp_sockaddr_union cond_remote_end;
3604 u_int8_t cond_remote_prefix = 0;
3605 u_int32_t offset = 0;
3606 u_int8_t ultimate_result = 0;
3607 u_int32_t secondary_result = 0;
3608 necp_kernel_policy_result_parameter secondary_result_parameter;
3609 memset(&secondary_result_parameter, 0, sizeof(secondary_result_parameter));
3610 u_int32_t cond_last_interface_index = 0;
3611 necp_kernel_policy_result_parameter ultimate_result_parameter;
3612 memset(&ultimate_result_parameter, 0, sizeof(ultimate_result_parameter));
3613
3614 if (policy == NULL) {
3615 return (FALSE);
3616 }
3617
3618 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
3619
3620 // Process conditions
3621 while (offset < policy->conditions_size) {
3622 u_int32_t length = 0;
3623 u_int8_t *value = necp_buffer_get_tlv_value(policy->conditions, offset, &length);
3624
3625 u_int8_t condition_type = necp_policy_condition_get_type_from_buffer(value, length);
3626 u_int8_t condition_flags = necp_policy_condition_get_flags_from_buffer(value, length);
3627 bool condition_is_negative = condition_flags & NECP_POLICY_CONDITION_FLAGS_NEGATIVE;
3628 u_int32_t condition_length = necp_policy_condition_get_value_length_from_buffer(value, length);
3629 u_int8_t *condition_value = necp_policy_condition_get_value_pointer_from_buffer(value, length);
3630 switch (condition_type) {
3631 case NECP_POLICY_CONDITION_DEFAULT: {
3632 socket_ip_conditions = TRUE;
3633 break;
3634 }
3635 case NECP_POLICY_CONDITION_ALL_INTERFACES: {
3636 master_condition_mask |= NECP_KERNEL_CONDITION_ALL_INTERFACES;
3637 socket_ip_conditions = TRUE;
3638 break;
3639 }
3640 case NECP_POLICY_CONDITION_ENTITLEMENT: {
3641 if (condition_length > 0) {
3642 if (cond_custom_entitlement == NULL) {
3643 cond_custom_entitlement = necp_copy_string((char *)condition_value, condition_length);
3644 if (cond_custom_entitlement != NULL) {
3645 master_condition_mask |= NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT;
3646 socket_only_conditions = TRUE;
3647 }
3648 }
3649 } else {
3650 master_condition_mask |= NECP_KERNEL_CONDITION_ENTITLEMENT;
3651 socket_only_conditions = TRUE;
3652 }
3653 break;
3654 }
3655 case NECP_POLICY_CONDITION_DOMAIN: {
3656 // Make sure there is only one such rule
3657 if (condition_length > 0 && cond_domain == NULL) {
3658 cond_domain = necp_create_trimmed_domain((char *)condition_value, condition_length);
3659 if (cond_domain != NULL) {
3660 master_condition_mask |= NECP_KERNEL_CONDITION_DOMAIN;
3661 if (condition_is_negative) {
3662 master_condition_negated_mask |= NECP_KERNEL_CONDITION_DOMAIN;
3663 }
3664 socket_only_conditions = TRUE;
3665 }
3666 }
3667 break;
3668 }
3669 case NECP_POLICY_CONDITION_ACCOUNT: {
3670 // Make sure there is only one such rule
3671 if (condition_length > 0 && cond_account_id == 0 && policy->applied_account == NULL) {
3672 char *string = NULL;
3673 MALLOC(string, char *, condition_length + 1, M_NECP, M_WAITOK);
3674 if (string != NULL) {
3675 memcpy(string, condition_value, condition_length);
3676 string[condition_length] = 0;
3677 cond_account_id = necp_create_string_to_id_mapping(&necp_account_id_list, string);
3678 if (cond_account_id != 0) {
3679 policy->applied_account = string; // Save the string in parent policy
3680 master_condition_mask |= NECP_KERNEL_CONDITION_ACCOUNT_ID;
3681 if (condition_is_negative) {
3682 master_condition_negated_mask |= NECP_KERNEL_CONDITION_ACCOUNT_ID;
3683 }
3684 socket_only_conditions = TRUE;
3685 } else {
3686 FREE(string, M_NECP);
3687 }
3688 }
3689 }
3690 break;
3691 }
3692 case NECP_POLICY_CONDITION_APPLICATION: {
3693 // Make sure there is only one such rule, because we save the uuid in the policy
3694 if (condition_length >= sizeof(uuid_t) && cond_app_id == 0) {
3695 bool allocated_mapping = FALSE;
3696 uuid_t application_uuid;
3697 memcpy(application_uuid, condition_value, sizeof(uuid_t));
3698 cond_app_id = necp_create_uuid_app_id_mapping(application_uuid, &allocated_mapping, TRUE);
3699 if (cond_app_id != 0) {
3700 if (allocated_mapping) {
3701 necp_uuid_app_id_mappings_dirty = TRUE;
3702 necp_num_uuid_app_id_mappings++;
3703 }
3704 uuid_copy(policy->applied_app_uuid, application_uuid);
3705 master_condition_mask |= NECP_KERNEL_CONDITION_APP_ID;
3706 if (condition_is_negative) {
3707 master_condition_negated_mask |= NECP_KERNEL_CONDITION_APP_ID;
3708 }
3709 socket_only_conditions = TRUE;
3710 }
3711 }
3712 break;
3713 }
3714 case NECP_POLICY_CONDITION_REAL_APPLICATION: {
3715 // Make sure there is only one such rule, because we save the uuid in the policy
3716 if (condition_length >= sizeof(uuid_t) && cond_real_app_id == 0) {
3717 uuid_t real_application_uuid;
3718 memcpy(real_application_uuid, condition_value, sizeof(uuid_t));
3719 cond_real_app_id = necp_create_uuid_app_id_mapping(real_application_uuid, NULL, FALSE);
3720 if (cond_real_app_id != 0) {
3721 uuid_copy(policy->applied_real_app_uuid, real_application_uuid);
3722 master_condition_mask |= NECP_KERNEL_CONDITION_REAL_APP_ID;
3723 if (condition_is_negative) {
3724 master_condition_negated_mask |= NECP_KERNEL_CONDITION_REAL_APP_ID;
3725 }
3726 socket_only_conditions = TRUE;
3727 }
3728 }
3729 break;
3730 }
3731 case NECP_POLICY_CONDITION_PID: {
3732 if (condition_length >= sizeof(pid_t)) {
3733 master_condition_mask |= NECP_KERNEL_CONDITION_PID;
3734 if (condition_is_negative) {
3735 master_condition_negated_mask |= NECP_KERNEL_CONDITION_PID;
3736 }
3737 memcpy(&cond_pid, condition_value, sizeof(cond_pid));
3738 socket_only_conditions = TRUE;
3739 }
3740 break;
3741 }
3742 case NECP_POLICY_CONDITION_UID: {
3743 if (condition_length >= sizeof(uid_t)) {
3744 master_condition_mask |= NECP_KERNEL_CONDITION_UID;
3745 if (condition_is_negative) {
3746 master_condition_negated_mask |= NECP_KERNEL_CONDITION_UID;
3747 }
3748 memcpy(&cond_uid, condition_value, sizeof(cond_uid));
3749 socket_only_conditions = TRUE;
3750 }
3751 break;
3752 }
3753 case NECP_POLICY_CONDITION_TRAFFIC_CLASS: {
3754 if (condition_length >= sizeof(struct necp_policy_condition_tc_range)) {
3755 master_condition_mask |= NECP_KERNEL_CONDITION_TRAFFIC_CLASS;
3756 if (condition_is_negative) {
3757 master_condition_negated_mask |= NECP_KERNEL_CONDITION_TRAFFIC_CLASS;
3758 }
3759 memcpy(&cond_traffic_class, condition_value, sizeof(cond_traffic_class));
3760 socket_only_conditions = TRUE;
3761 }
3762 break;
3763 }
3764 case NECP_POLICY_CONDITION_BOUND_INTERFACE: {
3765 if (condition_length <= IFXNAMSIZ && condition_length > 0) {
3766 char interface_name[IFXNAMSIZ];
3767 memcpy(interface_name, condition_value, condition_length);
3768 interface_name[condition_length - 1] = 0; // Make sure the string is NULL terminated
3769 if (ifnet_find_by_name(interface_name, &cond_bound_interface) == 0) {
3770 master_condition_mask |= NECP_KERNEL_CONDITION_BOUND_INTERFACE;
3771 if (condition_is_negative) {
3772 master_condition_negated_mask |= NECP_KERNEL_CONDITION_BOUND_INTERFACE;
3773 }
3774 }
3775 socket_ip_conditions = TRUE;
3776 }
3777 break;
3778 }
3779 case NECP_POLICY_CONDITION_IP_PROTOCOL: {
3780 if (condition_length >= sizeof(u_int16_t)) {
3781 master_condition_mask |= NECP_KERNEL_CONDITION_PROTOCOL;
3782 if (condition_is_negative) {
3783 master_condition_negated_mask |= NECP_KERNEL_CONDITION_PROTOCOL;
3784 }
3785 memcpy(&cond_protocol, condition_value, sizeof(cond_protocol));
3786 socket_ip_conditions = TRUE;
3787 }
3788 break;
3789 }
3790 case NECP_POLICY_CONDITION_LOCAL_ADDR: {
3791 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)condition_value;
3792 if (!necp_address_is_valid(&address_struct->address.sa)) {
3793 break;
3794 }
3795
3796 cond_local_prefix = address_struct->prefix;
3797 memcpy(&cond_local_start, &address_struct->address, sizeof(address_struct->address));
3798 master_condition_mask |= NECP_KERNEL_CONDITION_LOCAL_START;
3799 master_condition_mask |= NECP_KERNEL_CONDITION_LOCAL_PREFIX;
3800 if (condition_is_negative) {
3801 master_condition_negated_mask |= NECP_KERNEL_CONDITION_LOCAL_START;
3802 master_condition_negated_mask |= NECP_KERNEL_CONDITION_LOCAL_PREFIX;
3803 }
3804 socket_ip_conditions = TRUE;
3805 break;
3806 }
3807 case NECP_POLICY_CONDITION_REMOTE_ADDR: {
3808 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)condition_value;
3809 if (!necp_address_is_valid(&address_struct->address.sa)) {
3810 break;
3811 }
3812
3813 cond_remote_prefix = address_struct->prefix;
3814 memcpy(&cond_remote_start, &address_struct->address, sizeof(address_struct->address));
3815 master_condition_mask |= NECP_KERNEL_CONDITION_REMOTE_START;
3816 master_condition_mask |= NECP_KERNEL_CONDITION_REMOTE_PREFIX;
3817 if (condition_is_negative) {
3818 master_condition_negated_mask |= NECP_KERNEL_CONDITION_REMOTE_START;
3819 master_condition_negated_mask |= NECP_KERNEL_CONDITION_REMOTE_PREFIX;
3820 }
3821 socket_ip_conditions = TRUE;
3822 break;
3823 }
3824 case NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE: {
3825 struct necp_policy_condition_addr_range *address_struct = (struct necp_policy_condition_addr_range *)(void *)condition_value;
3826 if (!necp_address_is_valid(&address_struct->start_address.sa) ||
3827 !necp_address_is_valid(&address_struct->end_address.sa)) {
3828 break;
3829 }
3830
3831 memcpy(&cond_local_start, &address_struct->start_address, sizeof(address_struct->start_address));
3832 memcpy(&cond_local_end, &address_struct->end_address, sizeof(address_struct->end_address));
3833 master_condition_mask |= NECP_KERNEL_CONDITION_LOCAL_START;
3834 master_condition_mask |= NECP_KERNEL_CONDITION_LOCAL_END;
3835 if (condition_is_negative) {
3836 master_condition_negated_mask |= NECP_KERNEL_CONDITION_LOCAL_START;
3837 master_condition_negated_mask |= NECP_KERNEL_CONDITION_LOCAL_END;
3838 }
3839 socket_ip_conditions = TRUE;
3840 break;
3841 }
3842 case NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE: {
3843 struct necp_policy_condition_addr_range *address_struct = (struct necp_policy_condition_addr_range *)(void *)condition_value;
3844 if (!necp_address_is_valid(&address_struct->start_address.sa) ||
3845 !necp_address_is_valid(&address_struct->end_address.sa)) {
3846 break;
3847 }
3848
3849 memcpy(&cond_remote_start, &address_struct->start_address, sizeof(address_struct->start_address));
3850 memcpy(&cond_remote_end, &address_struct->end_address, sizeof(address_struct->end_address));
3851 master_condition_mask |= NECP_KERNEL_CONDITION_REMOTE_START;
3852 master_condition_mask |= NECP_KERNEL_CONDITION_REMOTE_END;
3853 if (condition_is_negative) {
3854 master_condition_negated_mask |= NECP_KERNEL_CONDITION_REMOTE_START;
3855 master_condition_negated_mask |= NECP_KERNEL_CONDITION_REMOTE_END;
3856 }
3857 socket_ip_conditions = TRUE;
3858 break;
3859 }
3860 default: {
3861 break;
3862 }
3863 }
3864
3865 offset += sizeof(u_int8_t) + sizeof(u_int32_t) + length;
3866 }
3867
3868 // Process result
3869 ultimate_result = necp_policy_get_result_type(policy);
3870 switch (ultimate_result) {
3871 case NECP_POLICY_RESULT_PASS: {
3872 if (socket_only_conditions) { // socket_ip_conditions can be TRUE or FALSE
3873 socket_layer_non_id_conditions = TRUE;
3874 ip_output_layer_id_condition = TRUE;
3875 } else if (socket_ip_conditions) {
3876 socket_layer_non_id_conditions = TRUE;
3877 ip_output_layer_id_condition = TRUE;
3878 ip_output_layer_non_id_conditions = TRUE;
3879 }
3880 break;
3881 }
3882 case NECP_POLICY_RESULT_DROP: {
3883 if (socket_only_conditions) { // socket_ip_conditions can be TRUE or FALSE
3884 socket_layer_non_id_conditions = TRUE;
3885 } else if (socket_ip_conditions) {
3886 socket_layer_non_id_conditions = TRUE;
3887 ip_output_layer_non_id_conditions = TRUE;
3888 ip_output_layer_non_id_only = TRUE; // Only apply drop to packets that didn't go through socket layer
3889 }
3890 break;
3891 }
3892 case NECP_POLICY_RESULT_SKIP: {
3893 u_int32_t skip_policy_order = 0;
3894 if (necp_policy_get_result_parameter(policy, (u_int8_t *)&skip_policy_order, sizeof(skip_policy_order))) {
3895 ultimate_result_parameter.skip_policy_order = skip_policy_order;
3896 }
3897
3898 if (socket_only_conditions) { // socket_ip_conditions can be TRUE or FALSE
3899 socket_layer_non_id_conditions = TRUE;
3900 ip_output_layer_id_condition = TRUE;
3901 } else if (socket_ip_conditions) {
3902 socket_layer_non_id_conditions = TRUE;
3903 ip_output_layer_non_id_conditions = TRUE;
3904 }
3905 break;
3906 }
3907 case NECP_POLICY_RESULT_SOCKET_DIVERT:
3908 case NECP_POLICY_RESULT_SOCKET_FILTER: {
3909 u_int32_t control_unit = 0;
3910 if (necp_policy_get_result_parameter(policy, (u_int8_t *)&control_unit, sizeof(control_unit))) {
3911 ultimate_result_parameter.flow_divert_control_unit = control_unit;
3912 }
3913 socket_layer_non_id_conditions = TRUE;
3914 break;
3915 }
3916 case NECP_POLICY_RESULT_IP_TUNNEL: {
3917 struct necp_policy_result_ip_tunnel tunnel_parameters;
3918 u_int32_t tunnel_parameters_length = necp_policy_get_result_parameter_length(policy);
3919 if (tunnel_parameters_length > sizeof(u_int32_t) &&
3920 tunnel_parameters_length <= sizeof(struct necp_policy_result_ip_tunnel) &&
3921 necp_policy_get_result_parameter(policy, (u_int8_t *)&tunnel_parameters, sizeof(tunnel_parameters))) {
3922 ifnet_t tunnel_interface = NULL;
3923 tunnel_parameters.interface_name[tunnel_parameters_length - sizeof(u_int32_t) - 1] = 0; // Make sure the string is NULL terminated
3924 if (ifnet_find_by_name(tunnel_parameters.interface_name, &tunnel_interface) == 0) {
3925 ultimate_result_parameter.tunnel_interface_index = tunnel_interface->if_index;
3926 ifnet_release(tunnel_interface);
3927 }
3928
3929 secondary_result = tunnel_parameters.secondary_result;
3930 if (secondary_result) {
3931 cond_last_interface_index = ultimate_result_parameter.tunnel_interface_index;
3932 }
3933 }
3934
3935 if (socket_only_conditions) { // socket_ip_conditions can be TRUE or FALSE
3936 socket_layer_non_id_conditions = TRUE;
3937 ip_output_layer_id_condition = TRUE;
3938 if (secondary_result) {
3939 ip_output_layer_tunnel_condition_from_id = TRUE;
3940 }
3941 } else if (socket_ip_conditions) {
3942 socket_layer_non_id_conditions = TRUE;
3943 ip_output_layer_id_condition = TRUE;
3944 ip_output_layer_non_id_conditions = TRUE;
3945 if (secondary_result) {
3946 ip_output_layer_tunnel_condition_from_id = TRUE;
3947 ip_output_layer_tunnel_condition_from_non_id = TRUE;
3948 }
3949 }
3950 break;
3951 }
3952 case NECP_POLICY_RESULT_TRIGGER:
3953 case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED:
3954 case NECP_POLICY_RESULT_TRIGGER_SCOPED:
3955 case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED: {
3956 struct necp_policy_result_service service_parameters;
3957 u_int32_t service_result_length = necp_policy_get_result_parameter_length(policy);
3958 bool has_extra_service_data = FALSE;
3959 if (service_result_length >= (sizeof(service_parameters))) {
3960 has_extra_service_data = TRUE;
3961 }
3962 if (necp_policy_get_result_parameter(policy, (u_int8_t *)&service_parameters, sizeof(service_parameters))) {
3963 ultimate_result_parameter.service.identifier = necp_create_uuid_service_id_mapping(service_parameters.identifier);
3964 if (ultimate_result_parameter.service.identifier != 0) {
3965 uuid_copy(policy->applied_result_uuid, service_parameters.identifier);
3966 socket_layer_non_id_conditions = TRUE;
3967 if (has_extra_service_data) {
3968 ultimate_result_parameter.service.data = service_parameters.data;
3969 } else {
3970 ultimate_result_parameter.service.data = 0;
3971 }
3972 }
3973 }
3974 break;
3975 }
3976 case NECP_POLICY_RESULT_USE_NETAGENT: {
3977 uuid_t netagent_uuid;
3978 if (necp_policy_get_result_parameter(policy, (u_int8_t *)&netagent_uuid, sizeof(netagent_uuid))) {
3979 ultimate_result_parameter.netagent_id = necp_create_uuid_service_id_mapping(netagent_uuid);
3980 if (ultimate_result_parameter.netagent_id != 0) {
3981 uuid_copy(policy->applied_result_uuid, netagent_uuid);
3982 socket_layer_non_id_conditions = TRUE;
3983 }
3984 }
3985 break;
3986 }
3987 case NECP_POLICY_RESULT_SOCKET_SCOPED: {
3988 u_int32_t interface_name_length = necp_policy_get_result_parameter_length(policy);
3989 if (interface_name_length <= IFXNAMSIZ && interface_name_length > 0) {
3990 char interface_name[IFXNAMSIZ];
3991 ifnet_t scope_interface = NULL;
3992 necp_policy_get_result_parameter(policy, (u_int8_t *)interface_name, interface_name_length);
3993 interface_name[interface_name_length - 1] = 0; // Make sure the string is NULL terminated
3994 if (ifnet_find_by_name(interface_name, &scope_interface) == 0) {
3995 ultimate_result_parameter.scoped_interface_index = scope_interface->if_index;
3996 socket_layer_non_id_conditions = TRUE;
3997 ifnet_release(scope_interface);
3998 }
3999 }
4000 break;
4001 }
4002 case NECP_POLICY_RESULT_ROUTE_RULES: {
4003 if (policy->route_rules != NULL && policy->route_rules_size > 0) {
4004 u_int32_t route_rule_id = necp_create_route_rule(&necp_route_rules, policy->route_rules, policy->route_rules_size);
4005 if (route_rule_id > 0) {
4006 policy->applied_route_rules_id = route_rule_id;
4007 ultimate_result_parameter.route_rule_id = route_rule_id;
4008 socket_layer_non_id_conditions = TRUE;
4009 }
4010 }
4011 break;
4012 }
4013 default: {
4014 break;
4015 }
4016 }
4017
4018 if (socket_layer_non_id_conditions) {
4019 necp_kernel_policy_id policy_id = necp_kernel_socket_policy_add(policy->id, policy->order, session->session_order, session->proc_pid, master_condition_mask, master_condition_negated_mask, cond_app_id, cond_real_app_id, cond_custom_entitlement, cond_account_id, cond_domain, cond_pid, cond_uid, cond_bound_interface, cond_traffic_class, cond_protocol, &cond_local_start, &cond_local_end, cond_local_prefix, &cond_remote_start, &cond_remote_end, cond_remote_prefix, ultimate_result, ultimate_result_parameter);
4020
4021 if (policy_id == 0) {
4022 NECPLOG0(LOG_DEBUG, "Error applying socket kernel policy");
4023 goto fail;
4024 }
4025
4026 cond_ip_output_layer_id = policy_id;
4027 policy->kernel_socket_policies[0] = policy_id;
4028 }
4029
4030 if (ip_output_layer_non_id_conditions) {
4031 u_int32_t condition_mask = master_condition_mask;
4032 if (ip_output_layer_non_id_only) {
4033 condition_mask |= NECP_KERNEL_CONDITION_POLICY_ID;
4034 }
4035 necp_kernel_policy_id policy_id = necp_kernel_ip_output_policy_add(policy->id, policy->order, NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS, session->session_order, session->proc_pid, condition_mask, master_condition_negated_mask, NECP_KERNEL_POLICY_ID_NONE, cond_bound_interface, 0, cond_protocol, &cond_local_start, &cond_local_end, cond_local_prefix, &cond_remote_start, &cond_remote_end, cond_remote_prefix, ultimate_result, ultimate_result_parameter);
4036
4037 if (policy_id == 0) {
4038 NECPLOG0(LOG_DEBUG, "Error applying IP output kernel policy");
4039 goto fail;
4040 }
4041
4042 policy->kernel_ip_output_policies[NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS] = policy_id;
4043 }
4044
4045 if (ip_output_layer_id_condition) {
4046 necp_kernel_policy_id policy_id = necp_kernel_ip_output_policy_add(policy->id, policy->order, NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION, session->session_order, session->proc_pid, NECP_KERNEL_CONDITION_POLICY_ID | NECP_KERNEL_CONDITION_ALL_INTERFACES, 0, cond_ip_output_layer_id, NULL, 0, 0, NULL, NULL, 0, NULL, NULL, 0, ultimate_result, ultimate_result_parameter);
4047
4048 if (policy_id == 0) {
4049 NECPLOG0(LOG_DEBUG, "Error applying IP output kernel policy");
4050 goto fail;
4051 }
4052
4053 policy->kernel_ip_output_policies[NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION] = policy_id;
4054 }
4055
4056 // Extra policies for IP Output tunnels for when packets loop back
4057 if (ip_output_layer_tunnel_condition_from_id) {
4058 necp_kernel_policy_id policy_id = necp_kernel_ip_output_policy_add(policy->id, policy->order, NECP_KERNEL_POLICY_SUBORDER_NON_ID_TUNNEL_CONDITION, session->session_order, session->proc_pid, NECP_KERNEL_CONDITION_POLICY_ID | NECP_KERNEL_CONDITION_LAST_INTERFACE | NECP_KERNEL_CONDITION_ALL_INTERFACES, 0, policy->kernel_ip_output_policies[NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS], NULL, cond_last_interface_index, 0, NULL, NULL, 0, NULL, NULL, 0, secondary_result, secondary_result_parameter);
4059
4060 if (policy_id == 0) {
4061 NECPLOG0(LOG_DEBUG, "Error applying IP output kernel policy");
4062 goto fail;
4063 }
4064
4065 policy->kernel_ip_output_policies[NECP_KERNEL_POLICY_SUBORDER_NON_ID_TUNNEL_CONDITION] = policy_id;
4066 }
4067
4068 if (ip_output_layer_tunnel_condition_from_id) {
4069 necp_kernel_policy_id policy_id = necp_kernel_ip_output_policy_add(policy->id, policy->order, NECP_KERNEL_POLICY_SUBORDER_ID_TUNNEL_CONDITION, session->session_order, session->proc_pid, NECP_KERNEL_CONDITION_POLICY_ID | NECP_KERNEL_CONDITION_LAST_INTERFACE | NECP_KERNEL_CONDITION_ALL_INTERFACES, 0, policy->kernel_ip_output_policies[NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION], NULL, cond_last_interface_index, 0, NULL, NULL, 0, NULL, NULL, 0, secondary_result, secondary_result_parameter);
4070
4071 if (policy_id == 0) {
4072 NECPLOG0(LOG_DEBUG, "Error applying IP output kernel policy");
4073 goto fail;
4074 }
4075
4076 policy->kernel_ip_output_policies[NECP_KERNEL_POLICY_SUBORDER_ID_TUNNEL_CONDITION] = policy_id;
4077 }
4078
4079 policy->applied = TRUE;
4080 policy->pending_update = FALSE;
4081 return (TRUE);
4082
4083 fail:
4084 return (FALSE);
4085 }
4086
4087 static void
4088 necp_policy_apply_all(struct necp_session *session)
4089 {
4090 struct necp_session_policy *policy = NULL;
4091 struct necp_session_policy *temp_policy = NULL;
4092 struct kev_necp_policies_changed_data kev_data;
4093 kev_data.changed_count = 0;
4094
4095 lck_rw_lock_exclusive(&necp_kernel_policy_lock);
4096
4097 // Remove exisiting applied policies
4098 if (session->dirty) {
4099 LIST_FOREACH_SAFE(policy, &session->policies, chain, temp_policy) {
4100 if (policy->pending_deletion) {
4101 if (policy->applied) {
4102 necp_policy_unapply(policy);
4103 }
4104 // Delete the policy
4105 necp_policy_delete(session, policy);
4106 } else if (!policy->applied) {
4107 necp_policy_apply(session, policy);
4108 } else if (policy->pending_update) {
4109 // Must have been applied, but needs an update. Remove and re-add.
4110 necp_policy_unapply(policy);
4111 necp_policy_apply(session, policy);
4112 }
4113 }
4114
4115 necp_kernel_socket_policies_update_uuid_table();
4116 necp_kernel_socket_policies_reprocess();
4117 necp_kernel_ip_output_policies_reprocess();
4118
4119 // Clear dirty bit flags
4120 session->dirty = FALSE;
4121 }
4122
4123 lck_rw_done(&necp_kernel_policy_lock);
4124
4125 necp_update_all_clients();
4126 necp_post_change_event(&kev_data);
4127
4128 if (necp_debug) {
4129 NECPLOG0(LOG_DEBUG, "Applied NECP policies");
4130 }
4131 }
4132
4133 // Kernel Policy Management
4134 // ---------------------
4135 // Kernel policies are derived from session policies
4136 static necp_kernel_policy_id
4137 necp_kernel_policy_get_new_id(bool socket_level)
4138 {
4139 static necp_kernel_policy_id necp_last_kernel_socket_policy_id = 0;
4140 static necp_kernel_policy_id necp_last_kernel_ip_policy_id = 0;
4141
4142 necp_kernel_policy_id newid = NECP_KERNEL_POLICY_ID_NONE;
4143
4144 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
4145
4146 if (socket_level) {
4147 necp_last_kernel_socket_policy_id++;
4148 if (necp_last_kernel_socket_policy_id < NECP_KERNEL_POLICY_ID_FIRST_VALID_SOCKET ||
4149 necp_last_kernel_socket_policy_id >= NECP_KERNEL_POLICY_ID_FIRST_VALID_IP) {
4150 necp_last_kernel_socket_policy_id = NECP_KERNEL_POLICY_ID_FIRST_VALID_SOCKET;
4151 }
4152 newid = necp_last_kernel_socket_policy_id;
4153 } else {
4154 necp_last_kernel_ip_policy_id++;
4155 if (necp_last_kernel_ip_policy_id < NECP_KERNEL_POLICY_ID_FIRST_VALID_IP) {
4156 necp_last_kernel_ip_policy_id = NECP_KERNEL_POLICY_ID_FIRST_VALID_IP;
4157 }
4158 newid = necp_last_kernel_ip_policy_id;
4159 }
4160
4161 if (newid == NECP_KERNEL_POLICY_ID_NONE) {
4162 NECPLOG0(LOG_DEBUG, "Allocate kernel policy id failed.\n");
4163 return (0);
4164 }
4165
4166 return (newid);
4167 }
4168
4169 #define NECP_KERNEL_VALID_SOCKET_CONDITIONS (NECP_KERNEL_CONDITION_APP_ID | NECP_KERNEL_CONDITION_REAL_APP_ID | NECP_KERNEL_CONDITION_DOMAIN | NECP_KERNEL_CONDITION_ACCOUNT_ID | NECP_KERNEL_CONDITION_PID | NECP_KERNEL_CONDITION_UID | NECP_KERNEL_CONDITION_ALL_INTERFACES | NECP_KERNEL_CONDITION_BOUND_INTERFACE | NECP_KERNEL_CONDITION_TRAFFIC_CLASS | NECP_KERNEL_CONDITION_PROTOCOL | NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_ENTITLEMENT | NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT)
4170 static necp_kernel_policy_id
4171 necp_kernel_socket_policy_add(necp_policy_id parent_policy_id, necp_policy_order order, u_int32_t session_order, int session_pid, u_int32_t condition_mask, u_int32_t condition_negated_mask, necp_app_id cond_app_id, necp_app_id cond_real_app_id, char *cond_custom_entitlement, u_int32_t cond_account_id, char *cond_domain, pid_t cond_pid, uid_t cond_uid, ifnet_t cond_bound_interface, struct necp_policy_condition_tc_range cond_traffic_class, u_int16_t cond_protocol, union necp_sockaddr_union *cond_local_start, union necp_sockaddr_union *cond_local_end, u_int8_t cond_local_prefix, union necp_sockaddr_union *cond_remote_start, union necp_sockaddr_union *cond_remote_end, u_int8_t cond_remote_prefix, necp_kernel_policy_result result, necp_kernel_policy_result_parameter result_parameter)
4172 {
4173 struct necp_kernel_socket_policy *new_kernel_policy = NULL;
4174 struct necp_kernel_socket_policy *tmp_kernel_policy = NULL;
4175
4176 MALLOC_ZONE(new_kernel_policy, struct necp_kernel_socket_policy *, sizeof(*new_kernel_policy), M_NECP_SOCKET_POLICY, M_WAITOK);
4177 if (new_kernel_policy == NULL) {
4178 goto done;
4179 }
4180
4181 memset(new_kernel_policy, 0, sizeof(*new_kernel_policy)); // M_ZERO is not supported for MALLOC_ZONE
4182 new_kernel_policy->parent_policy_id = parent_policy_id;
4183 new_kernel_policy->id = necp_kernel_policy_get_new_id(true);
4184 new_kernel_policy->order = order;
4185 new_kernel_policy->session_order = session_order;
4186 new_kernel_policy->session_pid = session_pid;
4187
4188 // Sanitize condition mask
4189 new_kernel_policy->condition_mask = (condition_mask & NECP_KERNEL_VALID_SOCKET_CONDITIONS);
4190 if ((new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_ALL_INTERFACES) && (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE)) {
4191 new_kernel_policy->condition_mask &= ~NECP_KERNEL_CONDITION_BOUND_INTERFACE;
4192 }
4193 if ((new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REAL_APP_ID) && !(new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_APP_ID)) {
4194 new_kernel_policy->condition_mask &= ~NECP_KERNEL_CONDITION_REAL_APP_ID;
4195 }
4196 if ((new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_ENTITLEMENT) && !(new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_APP_ID)) {
4197 new_kernel_policy->condition_mask &= ~NECP_KERNEL_CONDITION_ENTITLEMENT;
4198 }
4199 if ((new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_END) && (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX)) {
4200 new_kernel_policy->condition_mask &= ~NECP_KERNEL_CONDITION_LOCAL_PREFIX;
4201 }
4202 if ((new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_END) && (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX)) {
4203 new_kernel_policy->condition_mask &= ~NECP_KERNEL_CONDITION_REMOTE_PREFIX;
4204 }
4205 new_kernel_policy->condition_negated_mask = condition_negated_mask & new_kernel_policy->condition_mask;
4206
4207 // Set condition values
4208 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_APP_ID) {
4209 new_kernel_policy->cond_app_id = cond_app_id;
4210 }
4211 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REAL_APP_ID) {
4212 new_kernel_policy->cond_real_app_id = cond_real_app_id;
4213 }
4214 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT) {
4215 new_kernel_policy->cond_custom_entitlement = cond_custom_entitlement;
4216 new_kernel_policy->cond_custom_entitlement_matched = necp_boolean_state_unknown;
4217 }
4218 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_ACCOUNT_ID) {
4219 new_kernel_policy->cond_account_id = cond_account_id;
4220 }
4221 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_DOMAIN) {
4222 new_kernel_policy->cond_domain = cond_domain;
4223 new_kernel_policy->cond_domain_dot_count = necp_count_dots(cond_domain, strlen(cond_domain));
4224 }
4225 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_PID) {
4226 new_kernel_policy->cond_pid = cond_pid;
4227 }
4228 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_UID) {
4229 new_kernel_policy->cond_uid = cond_uid;
4230 }
4231 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE) {
4232 if (cond_bound_interface) {
4233 ifnet_reference(cond_bound_interface);
4234 }
4235 new_kernel_policy->cond_bound_interface = cond_bound_interface;
4236 }
4237 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_TRAFFIC_CLASS) {
4238 new_kernel_policy->cond_traffic_class = cond_traffic_class;
4239 }
4240 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_PROTOCOL) {
4241 new_kernel_policy->cond_protocol = cond_protocol;
4242 }
4243 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_START) {
4244 memcpy(&new_kernel_policy->cond_local_start, cond_local_start, cond_local_start->sa.sa_len);
4245 }
4246 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_END) {
4247 memcpy(&new_kernel_policy->cond_local_end, cond_local_end, cond_local_end->sa.sa_len);
4248 }
4249 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX) {
4250 new_kernel_policy->cond_local_prefix = cond_local_prefix;
4251 }
4252 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_START) {
4253 memcpy(&new_kernel_policy->cond_remote_start, cond_remote_start, cond_remote_start->sa.sa_len);
4254 }
4255 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_END) {
4256 memcpy(&new_kernel_policy->cond_remote_end, cond_remote_end, cond_remote_end->sa.sa_len);
4257 }
4258 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX) {
4259 new_kernel_policy->cond_remote_prefix = cond_remote_prefix;
4260 }
4261
4262 new_kernel_policy->result = result;
4263 memcpy(&new_kernel_policy->result_parameter, &result_parameter, sizeof(result_parameter));
4264
4265 if (necp_debug) {
4266 NECPLOG(LOG_DEBUG, "Added kernel policy: socket, id=%d, mask=%x\n", new_kernel_policy->id, new_kernel_policy->condition_mask);
4267 }
4268 LIST_INSERT_SORTED_TWICE_ASCENDING(&necp_kernel_socket_policies, new_kernel_policy, chain, session_order, order, tmp_kernel_policy);
4269 done:
4270 return (new_kernel_policy ? new_kernel_policy->id : 0);
4271 }
4272
4273 static struct necp_kernel_socket_policy *
4274 necp_kernel_socket_policy_find(necp_kernel_policy_id policy_id)
4275 {
4276 struct necp_kernel_socket_policy *kernel_policy = NULL;
4277 struct necp_kernel_socket_policy *tmp_kernel_policy = NULL;
4278
4279 if (policy_id == 0) {
4280 return (NULL);
4281 }
4282
4283 LIST_FOREACH_SAFE(kernel_policy, &necp_kernel_socket_policies, chain, tmp_kernel_policy) {
4284 if (kernel_policy->id == policy_id) {
4285 return (kernel_policy);
4286 }
4287 }
4288
4289 return (NULL);
4290 }
4291
4292 static bool
4293 necp_kernel_socket_policy_delete(necp_kernel_policy_id policy_id)
4294 {
4295 struct necp_kernel_socket_policy *policy = NULL;
4296
4297 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
4298
4299 policy = necp_kernel_socket_policy_find(policy_id);
4300 if (policy) {
4301 LIST_REMOVE(policy, chain);
4302
4303 if (policy->cond_bound_interface) {
4304 ifnet_release(policy->cond_bound_interface);
4305 policy->cond_bound_interface = NULL;
4306 }
4307
4308 if (policy->cond_domain) {
4309 FREE(policy->cond_domain, M_NECP);
4310 policy->cond_domain = NULL;
4311 }
4312
4313 if (policy->cond_custom_entitlement) {
4314 FREE(policy->cond_custom_entitlement, M_NECP);
4315 policy->cond_custom_entitlement = NULL;
4316 }
4317
4318 FREE_ZONE(policy, sizeof(*policy), M_NECP_SOCKET_POLICY);
4319 return (TRUE);
4320 }
4321
4322 return (FALSE);
4323 }
4324
4325 static inline const char *
4326 necp_get_result_description(char *result_string, necp_kernel_policy_result result, necp_kernel_policy_result_parameter result_parameter)
4327 {
4328 uuid_string_t uuid_string;
4329 switch (result) {
4330 case NECP_KERNEL_POLICY_RESULT_NONE: {
4331 snprintf(result_string, MAX_RESULT_STRING_LEN, "None");
4332 break;
4333 }
4334 case NECP_KERNEL_POLICY_RESULT_PASS: {
4335 snprintf(result_string, MAX_RESULT_STRING_LEN, "Pass");
4336 break;
4337 }
4338 case NECP_KERNEL_POLICY_RESULT_SKIP: {
4339 snprintf(result_string, MAX_RESULT_STRING_LEN, "Skip (%u)", result_parameter.skip_policy_order);
4340 break;
4341 }
4342 case NECP_KERNEL_POLICY_RESULT_DROP: {
4343 snprintf(result_string, MAX_RESULT_STRING_LEN, "Drop");
4344 break;
4345 }
4346 case NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT: {
4347 snprintf(result_string, MAX_RESULT_STRING_LEN, "SocketDivert (%d)", result_parameter.flow_divert_control_unit);
4348 break;
4349 }
4350 case NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER: {
4351 snprintf(result_string, MAX_RESULT_STRING_LEN, "SocketFilter (%d)", result_parameter.filter_control_unit);
4352 break;
4353 }
4354 case NECP_KERNEL_POLICY_RESULT_IP_TUNNEL: {
4355 ifnet_t interface = ifindex2ifnet[result_parameter.tunnel_interface_index];
4356 snprintf(result_string, MAX_RESULT_STRING_LEN, "IPTunnel (%s%d)", ifnet_name(interface), ifnet_unit(interface));
4357 break;
4358 }
4359 case NECP_KERNEL_POLICY_RESULT_IP_FILTER: {
4360 snprintf(result_string, MAX_RESULT_STRING_LEN, "IPFilter");
4361 break;
4362 }
4363 case NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED: {
4364 ifnet_t interface = ifindex2ifnet[result_parameter.scoped_interface_index];
4365 snprintf(result_string, MAX_RESULT_STRING_LEN, "SocketScoped (%s%d)", ifnet_name(interface), ifnet_unit(interface));
4366 break;
4367 }
4368 case NECP_KERNEL_POLICY_RESULT_ROUTE_RULES: {
4369 int index = 0;
4370 char interface_names[IFXNAMSIZ][MAX_ROUTE_RULE_INTERFACES];
4371 struct necp_route_rule *route_rule = necp_lookup_route_rule_locked(&necp_route_rules, result_parameter.route_rule_id);
4372 if (route_rule != NULL) {
4373 for (index = 0; index < MAX_ROUTE_RULE_INTERFACES; index++) {
4374 if (route_rule->exception_if_indices[index] != 0) {
4375 ifnet_t interface = ifindex2ifnet[route_rule->exception_if_indices[index]];
4376 snprintf(interface_names[index], IFXNAMSIZ, "%s%d", ifnet_name(interface), ifnet_unit(interface));
4377 } else {
4378 memset(interface_names[index], 0, IFXNAMSIZ);
4379 }
4380 }
4381 switch (route_rule->default_action) {
4382 case NECP_ROUTE_RULE_DENY_INTERFACE:
4383 snprintf(result_string, MAX_RESULT_STRING_LEN, "RouteRules (Only %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
4384 (route_rule->cellular_action == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? "Cell " : "",
4385 (route_rule->wifi_action == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? "WiFi " : "",
4386 (route_rule->wired_action == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? "Wired " : "",
4387 (route_rule->expensive_action == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? "Exp " : "",
4388 (route_rule->exception_if_actions[0] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[0] : "",
4389 (route_rule->exception_if_actions[0] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "",
4390 (route_rule->exception_if_actions[1] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[1] : "",
4391 (route_rule->exception_if_actions[1] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "",
4392 (route_rule->exception_if_actions[2] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[2] : "",
4393 (route_rule->exception_if_actions[2] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "",
4394 (route_rule->exception_if_actions[3] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[3] : "",
4395 (route_rule->exception_if_actions[3] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "",
4396 (route_rule->exception_if_actions[4] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[4] : "",
4397 (route_rule->exception_if_actions[4] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "",
4398 (route_rule->exception_if_actions[5] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[5] : "",
4399 (route_rule->exception_if_actions[5] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "",
4400 (route_rule->exception_if_actions[6] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[6] : "",
4401 (route_rule->exception_if_actions[6] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "",
4402 (route_rule->exception_if_actions[7] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[7] : "",
4403 (route_rule->exception_if_actions[7] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "",
4404 (route_rule->exception_if_actions[8] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[8] : "",
4405 (route_rule->exception_if_actions[8] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "",
4406 (route_rule->exception_if_actions[9] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[9] : "");
4407 break;
4408 case NECP_ROUTE_RULE_ALLOW_INTERFACE:
4409 snprintf(result_string, MAX_RESULT_STRING_LEN, "RouteRules (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
4410 (route_rule->cellular_action == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!Cell " : "",
4411 (route_rule->wifi_action == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!WiFi " : "",
4412 (route_rule->wired_action == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!Wired " : "",
4413 (route_rule->expensive_action == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!Exp " : "",
4414 (route_rule->exception_if_actions[0] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "",
4415 (route_rule->exception_if_actions[0] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[0] : "",
4416 (route_rule->exception_if_actions[1] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "",
4417 (route_rule->exception_if_actions[1] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[1] : "",
4418 (route_rule->exception_if_actions[2] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "",
4419 (route_rule->exception_if_actions[2] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[2] : "",
4420 (route_rule->exception_if_actions[3] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "",
4421 (route_rule->exception_if_actions[3] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[3] : "",
4422 (route_rule->exception_if_actions[4] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "",
4423 (route_rule->exception_if_actions[4] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[4] : "",
4424 (route_rule->exception_if_actions[5] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "",
4425 (route_rule->exception_if_actions[5] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[5] : "",
4426 (route_rule->exception_if_actions[6] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "",
4427 (route_rule->exception_if_actions[6] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[6] : "",
4428 (route_rule->exception_if_actions[7] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "",
4429 (route_rule->exception_if_actions[7] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[7] : "",
4430 (route_rule->exception_if_actions[8] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "",
4431 (route_rule->exception_if_actions[8] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[8] : "",
4432 (route_rule->exception_if_actions[9] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "",
4433 (route_rule->exception_if_actions[9] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[9] : "");
4434 break;
4435 case NECP_ROUTE_RULE_QOS_MARKING:
4436 snprintf(result_string, MAX_RESULT_STRING_LEN, "RouteRules (QoSMarking %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
4437 (route_rule->cellular_action == NECP_ROUTE_RULE_QOS_MARKING) ? "Cell " : "",
4438 (route_rule->wifi_action == NECP_ROUTE_RULE_QOS_MARKING) ? "WiFi " : "",
4439 (route_rule->wired_action == NECP_ROUTE_RULE_QOS_MARKING) ? "Wired " : "",
4440 (route_rule->expensive_action == NECP_ROUTE_RULE_QOS_MARKING) ? "Exp " : "",
4441 (route_rule->exception_if_actions[0] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[0] : "",
4442 (route_rule->exception_if_actions[0] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "",
4443 (route_rule->exception_if_actions[1] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[1] : "",
4444 (route_rule->exception_if_actions[1] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "",
4445 (route_rule->exception_if_actions[2] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[2] : "",
4446 (route_rule->exception_if_actions[2] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "",
4447 (route_rule->exception_if_actions[3] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[3] : "",
4448 (route_rule->exception_if_actions[3] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "",
4449 (route_rule->exception_if_actions[4] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[4] : "",
4450 (route_rule->exception_if_actions[4] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "",
4451 (route_rule->exception_if_actions[5] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[5] : "",
4452 (route_rule->exception_if_actions[5] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "",
4453 (route_rule->exception_if_actions[6] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[6] : "",
4454 (route_rule->exception_if_actions[6] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "",
4455 (route_rule->exception_if_actions[7] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[7] : "",
4456 (route_rule->exception_if_actions[7] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "",
4457 (route_rule->exception_if_actions[8] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[8] : "",
4458 (route_rule->exception_if_actions[8] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "",
4459 (route_rule->exception_if_actions[9] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[9] : "");
4460 break;
4461 default:
4462 snprintf(result_string, MAX_RESULT_STRING_LEN, "RouteRules (Unknown)");
4463 break;
4464 }
4465 }
4466 break;
4467 }
4468 case NECP_KERNEL_POLICY_RESULT_USE_NETAGENT: {
4469 bool found_mapping = FALSE;
4470 struct necp_uuid_id_mapping *mapping = necp_uuid_lookup_uuid_with_service_id_locked(result_parameter.netagent_id);
4471 if (mapping != NULL) {
4472 uuid_unparse(mapping->uuid, uuid_string);
4473 found_mapping = TRUE;
4474 }
4475 snprintf(result_string, MAX_RESULT_STRING_LEN, "UseNetAgent (%s)", found_mapping ? uuid_string : "Unknown");
4476 break;
4477 }
4478 case NECP_POLICY_RESULT_TRIGGER: {
4479 bool found_mapping = FALSE;
4480 struct necp_uuid_id_mapping *mapping = necp_uuid_lookup_uuid_with_service_id_locked(result_parameter.service.identifier);
4481 if (mapping != NULL) {
4482 uuid_unparse(mapping->uuid, uuid_string);
4483 found_mapping = TRUE;
4484 }
4485 snprintf(result_string, MAX_RESULT_STRING_LEN, "Trigger (%s.%d)", found_mapping ? uuid_string : "Unknown", result_parameter.service.data);
4486 break;
4487 }
4488 case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED: {
4489 bool found_mapping = FALSE;
4490 struct necp_uuid_id_mapping *mapping = necp_uuid_lookup_uuid_with_service_id_locked(result_parameter.service.identifier);
4491 if (mapping != NULL) {
4492 uuid_unparse(mapping->uuid, uuid_string);
4493 found_mapping = TRUE;
4494 }
4495 snprintf(result_string, MAX_RESULT_STRING_LEN, "TriggerIfNeeded (%s.%d)", found_mapping ? uuid_string : "Unknown", result_parameter.service.data);
4496 break;
4497 }
4498 case NECP_POLICY_RESULT_TRIGGER_SCOPED: {
4499 bool found_mapping = FALSE;
4500 struct necp_uuid_id_mapping *mapping = necp_uuid_lookup_uuid_with_service_id_locked(result_parameter.service.identifier);
4501 if (mapping != NULL) {
4502 uuid_unparse(mapping->uuid, uuid_string);
4503 found_mapping = TRUE;
4504 }
4505 snprintf(result_string, MAX_RESULT_STRING_LEN, "TriggerScoped (%s.%d)", found_mapping ? uuid_string : "Unknown", result_parameter.service.data);
4506 break;
4507 }
4508 case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED: {
4509 bool found_mapping = FALSE;
4510 struct necp_uuid_id_mapping *mapping = necp_uuid_lookup_uuid_with_service_id_locked(result_parameter.service.identifier);
4511 if (mapping != NULL) {
4512 uuid_unparse(mapping->uuid, uuid_string);
4513 found_mapping = TRUE;
4514 }
4515 snprintf(result_string, MAX_RESULT_STRING_LEN, "NoTriggerScoped (%s.%d)", found_mapping ? uuid_string : "Unknown", result_parameter.service.data);
4516 break;
4517 }
4518 default: {
4519 snprintf(result_string, MAX_RESULT_STRING_LEN, "Unknown %d (%d)", result, result_parameter.tunnel_interface_index);
4520 break;
4521 }
4522 }
4523 return (result_string);
4524 }
4525
4526 static void
4527 necp_kernel_socket_policies_dump_all(void)
4528 {
4529 if (necp_debug) {
4530 struct necp_kernel_socket_policy *policy = NULL;
4531 int policy_i;
4532 int app_i;
4533 char result_string[MAX_RESULT_STRING_LEN];
4534 char proc_name_string[MAXCOMLEN + 1];
4535 memset(result_string, 0, MAX_RESULT_STRING_LEN);
4536 memset(proc_name_string, 0, MAXCOMLEN + 1);
4537
4538 NECPLOG0(LOG_DEBUG, "NECP Application Policies:\n");
4539 NECPLOG0(LOG_DEBUG, "-----------\n");
4540 for (policy_i = 0; necp_kernel_socket_policies_app_layer_map != NULL && necp_kernel_socket_policies_app_layer_map[policy_i] != NULL; policy_i++) {
4541 policy = necp_kernel_socket_policies_app_layer_map[policy_i];
4542 proc_name(policy->session_pid, proc_name_string, MAXCOMLEN);
4543 NECPLOG(LOG_DEBUG, "\t%3d. Policy ID: %5d\tProcess: %10.10s\tOrder: %04d.%04d\tMask: %5x\tResult: %s\n", policy_i, policy->id, proc_name_string, policy->session_order, policy->order, policy->condition_mask, necp_get_result_description(result_string, policy->result, policy->result_parameter));
4544 }
4545 if (necp_kernel_socket_policies_app_layer_map[0] != NULL) {
4546 NECPLOG0(LOG_DEBUG, "-----------\n");
4547 }
4548
4549 NECPLOG0(LOG_DEBUG, "NECP Socket Policies:\n");
4550 NECPLOG0(LOG_DEBUG, "-----------\n");
4551 for (app_i = 0; app_i < NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS; app_i++) {
4552 NECPLOG(LOG_DEBUG, "\tApp Bucket: %d\n", app_i);
4553 for (policy_i = 0; necp_kernel_socket_policies_map[app_i] != NULL && (necp_kernel_socket_policies_map[app_i])[policy_i] != NULL; policy_i++) {
4554 policy = (necp_kernel_socket_policies_map[app_i])[policy_i];
4555 proc_name(policy->session_pid, proc_name_string, MAXCOMLEN);
4556 NECPLOG(LOG_DEBUG, "\t%3d. Policy ID: %5d\tProcess: %10.10s\tOrder: %04d.%04d\tMask: %5x\tResult: %s\n", policy_i, policy->id, proc_name_string, policy->session_order, policy->order, policy->condition_mask, necp_get_result_description(result_string, policy->result, policy->result_parameter));
4557 }
4558 NECPLOG0(LOG_DEBUG, "-----------\n");
4559 }
4560 }
4561 }
4562
4563 static inline bool
4564 necp_kernel_socket_result_is_trigger_service_type(struct necp_kernel_socket_policy *kernel_policy)
4565 {
4566 return (kernel_policy->result >= NECP_KERNEL_POLICY_RESULT_TRIGGER && kernel_policy->result <= NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED);
4567 }
4568
4569 static inline bool
4570 necp_kernel_socket_policy_results_overlap(struct necp_kernel_socket_policy *upper_policy, struct necp_kernel_socket_policy *lower_policy)
4571 {
4572 if (upper_policy->result == NECP_KERNEL_POLICY_RESULT_DROP) {
4573 // Drop always cancels out lower policies
4574 return (TRUE);
4575 } else if (upper_policy->result == NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER ||
4576 upper_policy->result == NECP_KERNEL_POLICY_RESULT_ROUTE_RULES ||
4577 upper_policy->result == NECP_KERNEL_POLICY_RESULT_USE_NETAGENT) {
4578 // Filters and route rules never cancel out lower policies
4579 return (FALSE);
4580 } else if (necp_kernel_socket_result_is_trigger_service_type(upper_policy)) {
4581 // Trigger/Scoping policies can overlap one another, but not other results
4582 return (necp_kernel_socket_result_is_trigger_service_type(lower_policy));
4583 } else if (upper_policy->result == NECP_KERNEL_POLICY_RESULT_SKIP) {
4584 if (upper_policy->session_order != lower_policy->session_order) {
4585 // A skip cannot override a policy of a different session
4586 return (FALSE);
4587 } else {
4588 if (upper_policy->result_parameter.skip_policy_order == 0 ||
4589 lower_policy->order >= upper_policy->result_parameter.skip_policy_order) {
4590 // This policy is beyond the skip
4591 return (FALSE);
4592 } else {
4593 // This policy is inside the skip
4594 return (TRUE);
4595 }
4596 }
4597 }
4598
4599 // A hard pass, flow divert, tunnel, or scope will currently block out lower policies
4600 return (TRUE);
4601 }
4602
4603 static bool
4604 necp_kernel_socket_policy_is_unnecessary(struct necp_kernel_socket_policy *policy, struct necp_kernel_socket_policy **policy_array, int valid_indices)
4605 {
4606 bool can_skip = FALSE;
4607 u_int32_t highest_skip_session_order = 0;
4608 u_int32_t highest_skip_order = 0;
4609 int i;
4610 for (i = 0; i < valid_indices; i++) {
4611 struct necp_kernel_socket_policy *compared_policy = policy_array[i];
4612
4613 // For policies in a skip window, we can't mark conflicting policies as unnecessary
4614 if (can_skip) {
4615 if (highest_skip_session_order != compared_policy->session_order ||
4616 (highest_skip_order != 0 && compared_policy->order >= highest_skip_order)) {
4617 // If we've moved on to the next session, or passed the skip window
4618 highest_skip_session_order = 0;
4619 highest_skip_order = 0;
4620 can_skip = FALSE;
4621 } else {
4622 // If this policy is also a skip, in can increase the skip window
4623 if (compared_policy->result == NECP_KERNEL_POLICY_RESULT_SKIP) {
4624 if (compared_policy->result_parameter.skip_policy_order > highest_skip_order) {
4625 highest_skip_order = compared_policy->result_parameter.skip_policy_order;
4626 }
4627 }
4628 continue;
4629 }
4630 }
4631
4632 if (compared_policy->result == NECP_KERNEL_POLICY_RESULT_SKIP) {
4633 // This policy is a skip. Set the skip window accordingly
4634 can_skip = TRUE;
4635 highest_skip_session_order = compared_policy->session_order;
4636 highest_skip_order = compared_policy->result_parameter.skip_policy_order;
4637 }
4638
4639 // The result of the compared policy must be able to block out this policy result
4640 if (!necp_kernel_socket_policy_results_overlap(compared_policy, policy)) {
4641 continue;
4642 }
4643
4644 // If new policy matches All Interfaces, compared policy must also
4645 if ((policy->condition_mask & NECP_KERNEL_CONDITION_ALL_INTERFACES) && !(compared_policy->condition_mask & NECP_KERNEL_CONDITION_ALL_INTERFACES)) {
4646 continue;
4647 }
4648
4649 // Default makes lower policies unecessary always
4650 if (compared_policy->condition_mask == 0) {
4651 return (TRUE);
4652 }
4653
4654 // Compared must be more general than policy, and include only conditions within policy
4655 if ((policy->condition_mask & compared_policy->condition_mask) != compared_policy->condition_mask) {
4656 continue;
4657 }
4658
4659 // Negative conditions must match for the overlapping conditions
4660 if ((policy->condition_negated_mask & compared_policy->condition_mask) != (compared_policy->condition_negated_mask & compared_policy->condition_mask)) {
4661 continue;
4662 }
4663
4664 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_DOMAIN &&
4665 strcmp(compared_policy->cond_domain, policy->cond_domain) != 0) {
4666 continue;
4667 }
4668
4669 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT &&
4670 strcmp(compared_policy->cond_custom_entitlement, policy->cond_custom_entitlement) != 0) {
4671 continue;
4672 }
4673
4674 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_ACCOUNT_ID &&
4675 compared_policy->cond_account_id != policy->cond_account_id) {
4676 continue;
4677 }
4678
4679 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_POLICY_ID &&
4680 compared_policy->cond_policy_id != policy->cond_policy_id) {
4681 continue;
4682 }
4683
4684 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_APP_ID &&
4685 compared_policy->cond_app_id != policy->cond_app_id) {
4686 continue;
4687 }
4688
4689 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_REAL_APP_ID &&
4690 compared_policy->cond_real_app_id != policy->cond_real_app_id) {
4691 continue;
4692 }
4693
4694 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_PID &&
4695 compared_policy->cond_pid != policy->cond_pid) {
4696 continue;
4697 }
4698
4699 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_UID &&
4700 compared_policy->cond_uid != policy->cond_uid) {
4701 continue;
4702 }
4703
4704 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE &&
4705 compared_policy->cond_bound_interface != policy->cond_bound_interface) {
4706 continue;
4707 }
4708
4709 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_PROTOCOL &&
4710 compared_policy->cond_protocol != policy->cond_protocol) {
4711 continue;
4712 }
4713
4714 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_TRAFFIC_CLASS &&
4715 !(compared_policy->cond_traffic_class.start_tc <= policy->cond_traffic_class.start_tc &&
4716 compared_policy->cond_traffic_class.end_tc >= policy->cond_traffic_class.end_tc)) {
4717 continue;
4718 }
4719
4720 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_START) {
4721 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_END) {
4722 if (!necp_is_range_in_range((struct sockaddr *)&policy->cond_local_start, (struct sockaddr *)&policy->cond_local_end, (struct sockaddr *)&compared_policy->cond_local_start, (struct sockaddr *)&compared_policy->cond_local_end)) {
4723 continue;
4724 }
4725 } else if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX) {
4726 if (compared_policy->cond_local_prefix > policy->cond_local_prefix ||
4727 !necp_is_addr_in_subnet((struct sockaddr *)&policy->cond_local_start, (struct sockaddr *)&compared_policy->cond_local_start, compared_policy->cond_local_prefix)) {
4728 continue;
4729 }
4730 }
4731 }
4732
4733 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_START) {
4734 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_END) {
4735 if (!necp_is_range_in_range((struct sockaddr *)&policy->cond_remote_start, (struct sockaddr *)&policy->cond_remote_end, (struct sockaddr *)&compared_policy->cond_remote_start, (struct sockaddr *)&compared_policy->cond_remote_end)) {
4736 continue;
4737 }
4738 } else if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX) {
4739 if (compared_policy->cond_remote_prefix > policy->cond_remote_prefix ||
4740 !necp_is_addr_in_subnet((struct sockaddr *)&policy->cond_remote_start, (struct sockaddr *)&compared_policy->cond_remote_start, compared_policy->cond_remote_prefix)) {
4741 continue;
4742 }
4743 }
4744 }
4745
4746 return (TRUE);
4747 }
4748
4749 return (FALSE);
4750 }
4751
4752 static bool
4753 necp_kernel_socket_policies_reprocess(void)
4754 {
4755 int app_i;
4756 int bucket_allocation_counts[NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS];
4757 int bucket_current_free_index[NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS];
4758 int app_layer_allocation_count = 0;
4759 int app_layer_current_free_index = 0;
4760 struct necp_kernel_socket_policy *kernel_policy = NULL;
4761
4762 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
4763
4764 // Reset mask to 0
4765 necp_kernel_application_policies_condition_mask = 0;
4766 necp_kernel_socket_policies_condition_mask = 0;
4767 necp_kernel_application_policies_count = 0;
4768 necp_kernel_socket_policies_count = 0;
4769 necp_kernel_socket_policies_non_app_count = 0;
4770
4771 // Reset all maps to NULL
4772 for (app_i = 0; app_i < NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS; app_i++) {
4773 if (necp_kernel_socket_policies_map[app_i] != NULL) {
4774 FREE(necp_kernel_socket_policies_map[app_i], M_NECP);
4775 necp_kernel_socket_policies_map[app_i] = NULL;
4776 }
4777
4778 // Init counts
4779 bucket_allocation_counts[app_i] = 0;
4780 }
4781 if (necp_kernel_socket_policies_app_layer_map != NULL) {
4782 FREE(necp_kernel_socket_policies_app_layer_map, M_NECP);
4783 necp_kernel_socket_policies_app_layer_map = NULL;
4784 }
4785
4786 // Create masks and counts
4787 LIST_FOREACH(kernel_policy, &necp_kernel_socket_policies, chain) {
4788 // App layer mask/count
4789 necp_kernel_application_policies_condition_mask |= kernel_policy->condition_mask;
4790 necp_kernel_application_policies_count++;
4791 app_layer_allocation_count++;
4792
4793 // Update socket layer bucket mask/counts
4794 necp_kernel_socket_policies_condition_mask |= kernel_policy->condition_mask;
4795 necp_kernel_socket_policies_count++;
4796
4797 if (!(kernel_policy->condition_mask & NECP_KERNEL_CONDITION_APP_ID) ||
4798 kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_APP_ID) {
4799 necp_kernel_socket_policies_non_app_count++;
4800 for (app_i = 0; app_i < NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS; app_i++) {
4801 bucket_allocation_counts[app_i]++;
4802 }
4803 } else {
4804 bucket_allocation_counts[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(kernel_policy->cond_app_id)]++;
4805 }
4806 }
4807
4808 // Allocate maps
4809 for (app_i = 0; app_i < NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS; app_i++) {
4810 if (bucket_allocation_counts[app_i] > 0) {
4811 // Allocate a NULL-terminated array of policy pointers for each bucket
4812 MALLOC(necp_kernel_socket_policies_map[app_i], struct necp_kernel_socket_policy **, sizeof(struct necp_kernel_socket_policy *) * (bucket_allocation_counts[app_i] + 1), M_NECP, M_WAITOK);
4813 if (necp_kernel_socket_policies_map[app_i] == NULL) {
4814 goto fail;
4815 }
4816
4817 // Initialize the first entry to NULL
4818 (necp_kernel_socket_policies_map[app_i])[0] = NULL;
4819 }
4820 bucket_current_free_index[app_i] = 0;
4821 }
4822 MALLOC(necp_kernel_socket_policies_app_layer_map, struct necp_kernel_socket_policy **, sizeof(struct necp_kernel_socket_policy *) * (app_layer_allocation_count + 1), M_NECP, M_WAITOK);
4823 if (necp_kernel_socket_policies_app_layer_map == NULL) {
4824 goto fail;
4825 }
4826 necp_kernel_socket_policies_app_layer_map[0] = NULL;
4827
4828 // Fill out maps
4829 LIST_FOREACH(kernel_policy, &necp_kernel_socket_policies, chain) {
4830 // Insert pointers into map
4831 if (!(kernel_policy->condition_mask & NECP_KERNEL_CONDITION_APP_ID) ||
4832 kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_APP_ID) {
4833 for (app_i = 0; app_i < NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS; app_i++) {
4834 if (!necp_kernel_socket_policy_is_unnecessary(kernel_policy, necp_kernel_socket_policies_map[app_i], bucket_current_free_index[app_i])) {
4835 (necp_kernel_socket_policies_map[app_i])[(bucket_current_free_index[app_i])] = kernel_policy;
4836 bucket_current_free_index[app_i]++;
4837 (necp_kernel_socket_policies_map[app_i])[(bucket_current_free_index[app_i])] = NULL;
4838 }
4839 }
4840 } else {
4841 app_i = NECP_SOCKET_MAP_APP_ID_TO_BUCKET(kernel_policy->cond_app_id);
4842 if (!necp_kernel_socket_policy_is_unnecessary(kernel_policy, necp_kernel_socket_policies_map[app_i], bucket_current_free_index[app_i])) {
4843 (necp_kernel_socket_policies_map[app_i])[(bucket_current_free_index[app_i])] = kernel_policy;
4844 bucket_current_free_index[app_i]++;
4845 (necp_kernel_socket_policies_map[app_i])[(bucket_current_free_index[app_i])] = NULL;
4846 }
4847 }
4848
4849 if (!necp_kernel_socket_policy_is_unnecessary(kernel_policy, necp_kernel_socket_policies_app_layer_map, app_layer_current_free_index)) {
4850 necp_kernel_socket_policies_app_layer_map[app_layer_current_free_index] = kernel_policy;
4851 app_layer_current_free_index++;
4852 necp_kernel_socket_policies_app_layer_map[app_layer_current_free_index] = NULL;
4853 }
4854 }
4855 necp_kernel_socket_policies_dump_all();
4856 BUMP_KERNEL_SOCKET_POLICIES_GENERATION_COUNT();
4857 return (TRUE);
4858
4859 fail:
4860 // Free memory, reset masks to 0
4861 necp_kernel_application_policies_condition_mask = 0;
4862 necp_kernel_socket_policies_condition_mask = 0;
4863 necp_kernel_application_policies_count = 0;
4864 necp_kernel_socket_policies_count = 0;
4865 necp_kernel_socket_policies_non_app_count = 0;
4866 for (app_i = 0; app_i < NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS; app_i++) {
4867 if (necp_kernel_socket_policies_map[app_i] != NULL) {
4868 FREE(necp_kernel_socket_policies_map[app_i], M_NECP);
4869 necp_kernel_socket_policies_map[app_i] = NULL;
4870 }
4871 }
4872 if (necp_kernel_socket_policies_app_layer_map != NULL) {
4873 FREE(necp_kernel_socket_policies_app_layer_map, M_NECP);
4874 necp_kernel_socket_policies_app_layer_map = NULL;
4875 }
4876 return (FALSE);
4877 }
4878
4879 static u_int32_t
4880 necp_get_new_string_id(void)
4881 {
4882 u_int32_t newid = 0;
4883
4884 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
4885
4886 necp_last_string_id++;
4887 if (necp_last_string_id < 1) {
4888 necp_last_string_id = 1;
4889 }
4890
4891 newid = necp_last_string_id;
4892 if (newid == 0) {
4893 NECPLOG0(LOG_DEBUG, "Allocate string id failed.\n");
4894 return (0);
4895 }
4896
4897 return (newid);
4898 }
4899
4900 static struct necp_string_id_mapping *
4901 necp_lookup_string_to_id_locked(struct necp_string_id_mapping_list *list, char *string)
4902 {
4903 struct necp_string_id_mapping *searchentry = NULL;
4904 struct necp_string_id_mapping *foundentry = NULL;
4905
4906 LIST_FOREACH(searchentry, list, chain) {
4907 if (strcmp(searchentry->string, string) == 0) {
4908 foundentry = searchentry;
4909 break;
4910 }
4911 }
4912
4913 return (foundentry);
4914 }
4915
4916 static struct necp_string_id_mapping *
4917 necp_lookup_string_with_id_locked(struct necp_string_id_mapping_list *list, u_int32_t local_id)
4918 {
4919 struct necp_string_id_mapping *searchentry = NULL;
4920 struct necp_string_id_mapping *foundentry = NULL;
4921
4922 LIST_FOREACH(searchentry, list, chain) {
4923 if (searchentry->id == local_id) {
4924 foundentry = searchentry;
4925 break;
4926 }
4927 }
4928
4929 return (foundentry);
4930 }
4931
4932 static u_int32_t
4933 necp_create_string_to_id_mapping(struct necp_string_id_mapping_list *list, char *string)
4934 {
4935 u_int32_t string_id = 0;
4936 struct necp_string_id_mapping *existing_mapping = NULL;
4937
4938 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
4939
4940 existing_mapping = necp_lookup_string_to_id_locked(list, string);
4941 if (existing_mapping != NULL) {
4942 string_id = existing_mapping->id;
4943 existing_mapping->refcount++;
4944 } else {
4945 struct necp_string_id_mapping *new_mapping = NULL;
4946 MALLOC(new_mapping, struct necp_string_id_mapping *, sizeof(struct necp_string_id_mapping), M_NECP, M_WAITOK);
4947 if (new_mapping != NULL) {
4948 memset(new_mapping, 0, sizeof(struct necp_string_id_mapping));
4949
4950 size_t length = strlen(string) + 1;
4951 MALLOC(new_mapping->string, char *, length, M_NECP, M_WAITOK);
4952 if (new_mapping->string != NULL) {
4953 memcpy(new_mapping->string, string, length);
4954 new_mapping->id = necp_get_new_string_id();
4955 new_mapping->refcount = 1;
4956 LIST_INSERT_HEAD(list, new_mapping, chain);
4957 string_id = new_mapping->id;
4958 } else {
4959 FREE(new_mapping, M_NECP);
4960 new_mapping = NULL;
4961 }
4962 }
4963 }
4964 return (string_id);
4965 }
4966
4967 static bool
4968 necp_remove_string_to_id_mapping(struct necp_string_id_mapping_list *list, char *string)
4969 {
4970 struct necp_string_id_mapping *existing_mapping = NULL;
4971
4972 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
4973
4974 existing_mapping = necp_lookup_string_to_id_locked(list, string);
4975 if (existing_mapping != NULL) {
4976 if (--existing_mapping->refcount == 0) {
4977 LIST_REMOVE(existing_mapping, chain);
4978 FREE(existing_mapping->string, M_NECP);
4979 FREE(existing_mapping, M_NECP);
4980 }
4981 return (TRUE);
4982 }
4983
4984 return (FALSE);
4985 }
4986
4987 static u_int32_t
4988 necp_get_new_route_rule_id(void)
4989 {
4990 u_int32_t newid = 0;
4991
4992 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
4993
4994 necp_last_route_rule_id++;
4995 if (necp_last_route_rule_id < 1 || necp_last_route_rule_id > UINT16_MAX) {
4996 necp_last_route_rule_id = 1;
4997 }
4998
4999 newid = necp_last_route_rule_id;
5000 if (newid == 0) {
5001 NECPLOG0(LOG_DEBUG, "Allocate route rule id failed.\n");
5002 return (0);
5003 }
5004
5005 return (newid);
5006 }
5007
5008 static u_int32_t
5009 necp_get_new_aggregate_route_rule_id(void)
5010 {
5011 u_int32_t newid = 0;
5012
5013 LCK_RW_ASSERT(&necp_route_rule_lock, LCK_RW_ASSERT_EXCLUSIVE);
5014
5015 necp_last_aggregate_route_rule_id++;
5016 if (necp_last_aggregate_route_rule_id <= UINT16_MAX) {
5017 necp_last_aggregate_route_rule_id = UINT16_MAX + 1;
5018 }
5019
5020 newid = necp_last_aggregate_route_rule_id;
5021 if (newid == 0) {
5022 NECPLOG0(LOG_DEBUG, "Allocate aggregate route rule id failed.\n");
5023 return (0);
5024 }
5025
5026 return (newid);
5027 }
5028
5029 static struct necp_route_rule *
5030 necp_lookup_route_rule_locked(struct necp_route_rule_list *list, u_int32_t route_rule_id)
5031 {
5032 struct necp_route_rule *searchentry = NULL;
5033 struct necp_route_rule *foundentry = NULL;
5034
5035 LIST_FOREACH(searchentry, list, chain) {
5036 if (searchentry->id == route_rule_id) {
5037 foundentry = searchentry;
5038 break;
5039 }
5040 }
5041
5042 return (foundentry);
5043 }
5044
5045 static struct necp_route_rule *
5046 necp_lookup_route_rule_by_contents_locked(struct necp_route_rule_list *list, u_int32_t default_action, u_int8_t cellular_action, u_int8_t wifi_action, u_int8_t wired_action, u_int8_t expensive_action, u_int32_t *if_indices, u_int8_t *if_actions)
5047 {
5048 struct necp_route_rule *searchentry = NULL;
5049 struct necp_route_rule *foundentry = NULL;
5050
5051 LIST_FOREACH(searchentry, list, chain) {
5052 if (searchentry->default_action == default_action &&
5053 searchentry->cellular_action == cellular_action &&
5054 searchentry->wifi_action == wifi_action &&
5055 searchentry->wired_action == wired_action &&
5056 searchentry->expensive_action == expensive_action) {
5057 bool match_failed = FALSE;
5058 size_t index_a = 0;
5059 size_t index_b = 0;
5060 size_t count_a = 0;
5061 size_t count_b = 0;
5062 for (index_a = 0; index_a < MAX_ROUTE_RULE_INTERFACES; index_a++) {
5063 bool found_index = FALSE;
5064 if (searchentry->exception_if_indices[index_a] == 0) {
5065 break;
5066 }
5067 count_a++;
5068 for (index_b = 0; index_b < MAX_ROUTE_RULE_INTERFACES; index_b++) {
5069 if (if_indices[index_b] == 0) {
5070 break;
5071 }
5072 if (index_b >= count_b) {
5073 count_b = index_b + 1;
5074 }
5075 if (searchentry->exception_if_indices[index_a] == if_indices[index_b] &&
5076 searchentry->exception_if_actions[index_a] == if_actions[index_b]) {
5077 found_index = TRUE;
5078 break;
5079 }
5080 }
5081 if (!found_index) {
5082 match_failed = TRUE;
5083 break;
5084 }
5085 }
5086 if (!match_failed && count_a == count_b) {
5087 foundentry = searchentry;
5088 break;
5089 }
5090 }
5091 }
5092
5093 return (foundentry);
5094 }
5095
5096 static u_int32_t
5097 necp_create_route_rule(struct necp_route_rule_list *list, u_int8_t *route_rules_array, u_int32_t route_rules_array_size)
5098 {
5099 size_t offset = 0;
5100 u_int32_t route_rule_id = 0;
5101 struct necp_route_rule *existing_rule = NULL;
5102 u_int32_t default_action = NECP_ROUTE_RULE_ALLOW_INTERFACE;
5103 u_int8_t cellular_action = NECP_ROUTE_RULE_NONE;
5104 u_int8_t wifi_action = NECP_ROUTE_RULE_NONE;
5105 u_int8_t wired_action = NECP_ROUTE_RULE_NONE;
5106 u_int8_t expensive_action = NECP_ROUTE_RULE_NONE;
5107 u_int32_t if_indices[MAX_ROUTE_RULE_INTERFACES];
5108 size_t num_valid_indices = 0;
5109 memset(&if_indices, 0, sizeof(if_indices));
5110 u_int8_t if_actions[MAX_ROUTE_RULE_INTERFACES];
5111 memset(&if_actions, 0, sizeof(if_actions));
5112
5113 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
5114
5115 if (route_rules_array == NULL || route_rules_array_size == 0) {
5116 return (0);
5117 }
5118
5119 // Process rules
5120 while (offset < route_rules_array_size) {
5121 ifnet_t rule_interface = NULL;
5122 char interface_name[IFXNAMSIZ];
5123 u_int32_t length = 0;
5124 u_int8_t *value = necp_buffer_get_tlv_value(route_rules_array, offset, &length);
5125
5126 u_int8_t rule_type = necp_policy_condition_get_type_from_buffer(value, length);
5127 u_int8_t rule_flags = necp_policy_condition_get_flags_from_buffer(value, length);
5128 u_int32_t rule_length = necp_policy_condition_get_value_length_from_buffer(value, length);
5129 u_int8_t *rule_value = necp_policy_condition_get_value_pointer_from_buffer(value, length);
5130
5131 if (rule_type == NECP_ROUTE_RULE_NONE) {
5132 // Don't allow an explicit rule to be None action
5133 continue;
5134 }
5135
5136 if (rule_length == 0) {
5137 if (rule_flags & NECP_ROUTE_RULE_FLAG_CELLULAR) {
5138 cellular_action = rule_type;
5139 }
5140 if (rule_flags & NECP_ROUTE_RULE_FLAG_WIFI) {
5141 wifi_action = rule_type;
5142 }
5143 if (rule_flags & NECP_ROUTE_RULE_FLAG_WIRED) {
5144 wired_action = rule_type;
5145 }
5146 if (rule_flags & NECP_ROUTE_RULE_FLAG_EXPENSIVE) {
5147 expensive_action = rule_type;
5148 }
5149 if (rule_flags == 0) {
5150 default_action = rule_type;
5151 }
5152 offset += sizeof(u_int8_t) + sizeof(u_int32_t) + length;
5153 continue;
5154 }
5155
5156 if (num_valid_indices >= MAX_ROUTE_RULE_INTERFACES) {
5157 offset += sizeof(u_int8_t) + sizeof(u_int32_t) + length;
5158 continue;
5159 }
5160
5161 if (rule_length <= IFXNAMSIZ) {
5162 memcpy(interface_name, rule_value, rule_length);
5163 interface_name[rule_length - 1] = 0; // Make sure the string is NULL terminated
5164 if (ifnet_find_by_name(interface_name, &rule_interface) == 0) {
5165 if_actions[num_valid_indices] = rule_type;
5166 if_indices[num_valid_indices++] = rule_interface->if_index;
5167 ifnet_release(rule_interface);
5168 }
5169 }
5170 offset += sizeof(u_int8_t) + sizeof(u_int32_t) + length;
5171 }
5172
5173 existing_rule = necp_lookup_route_rule_by_contents_locked(list, default_action, cellular_action, wifi_action, wired_action, expensive_action, if_indices, if_actions);
5174 if (existing_rule != NULL) {
5175 route_rule_id = existing_rule->id;
5176 existing_rule->refcount++;
5177 } else {
5178 struct necp_route_rule *new_rule = NULL;
5179 MALLOC(new_rule, struct necp_route_rule *, sizeof(struct necp_route_rule), M_NECP, M_WAITOK);
5180 if (new_rule != NULL) {
5181 memset(new_rule, 0, sizeof(struct necp_route_rule));
5182 route_rule_id = new_rule->id = necp_get_new_route_rule_id();
5183 new_rule->default_action = default_action;
5184 new_rule->cellular_action = cellular_action;
5185 new_rule->wifi_action = wifi_action;
5186 new_rule->wired_action = wired_action;
5187 new_rule->expensive_action = expensive_action;
5188 memcpy(&new_rule->exception_if_indices, &if_indices, sizeof(if_indices));
5189 memcpy(&new_rule->exception_if_actions, &if_actions, sizeof(if_actions));
5190 new_rule->refcount = 1;
5191 LIST_INSERT_HEAD(list, new_rule, chain);
5192 }
5193 }
5194 return (route_rule_id);
5195 }
5196
5197 static void
5198 necp_remove_aggregate_route_rule_for_id(u_int32_t rule_id)
5199 {
5200 if (rule_id) {
5201 lck_rw_lock_exclusive(&necp_route_rule_lock);
5202
5203 struct necp_aggregate_route_rule *existing_rule = NULL;
5204 struct necp_aggregate_route_rule *tmp_rule = NULL;
5205
5206 LIST_FOREACH_SAFE(existing_rule, &necp_aggregate_route_rules, chain, tmp_rule) {
5207 int index = 0;
5208 for (index = 0; index < MAX_AGGREGATE_ROUTE_RULES; index++) {
5209 u_int32_t route_rule_id = existing_rule->rule_ids[index];
5210 if (route_rule_id == rule_id) {
5211 LIST_REMOVE(existing_rule, chain);
5212 FREE(existing_rule, M_NECP);
5213 break;
5214 }
5215 }
5216 }
5217
5218 lck_rw_done(&necp_route_rule_lock);
5219 }
5220 }
5221
5222 static bool
5223 necp_remove_route_rule(struct necp_route_rule_list *list, u_int32_t route_rule_id)
5224 {
5225 struct necp_route_rule *existing_rule = NULL;
5226
5227 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
5228
5229 existing_rule = necp_lookup_route_rule_locked(list, route_rule_id);
5230 if (existing_rule != NULL) {
5231 if (--existing_rule->refcount == 0) {
5232 necp_remove_aggregate_route_rule_for_id(existing_rule->id);
5233 LIST_REMOVE(existing_rule, chain);
5234 FREE(existing_rule, M_NECP);
5235 }
5236 return (TRUE);
5237 }
5238
5239 return (FALSE);
5240 }
5241
5242 static struct necp_aggregate_route_rule *
5243 necp_lookup_aggregate_route_rule_locked(u_int32_t route_rule_id)
5244 {
5245 struct necp_aggregate_route_rule *searchentry = NULL;
5246 struct necp_aggregate_route_rule *foundentry = NULL;
5247
5248 lck_rw_lock_shared(&necp_route_rule_lock);
5249
5250 LIST_FOREACH(searchentry, &necp_aggregate_route_rules, chain) {
5251 if (searchentry->id == route_rule_id) {
5252 foundentry = searchentry;
5253 break;
5254 }
5255 }
5256
5257 lck_rw_done(&necp_route_rule_lock);
5258
5259 return (foundentry);
5260 }
5261
5262 static u_int32_t
5263 necp_create_aggregate_route_rule(u_int32_t *rule_ids)
5264 {
5265 u_int32_t aggregate_route_rule_id = 0;
5266 struct necp_aggregate_route_rule *new_rule = NULL;
5267 struct necp_aggregate_route_rule *existing_rule = NULL;
5268
5269 LIST_FOREACH(existing_rule, &necp_aggregate_route_rules, chain) {
5270 if (memcmp(existing_rule->rule_ids, rule_ids, (sizeof(u_int32_t) * MAX_AGGREGATE_ROUTE_RULES)) == 0) {
5271 return (existing_rule->id);
5272 }
5273 }
5274
5275 lck_rw_lock_exclusive(&necp_route_rule_lock);
5276
5277 LIST_FOREACH(existing_rule, &necp_aggregate_route_rules, chain) {
5278 // Re-check, in case something else created the rule while we are waiting to lock
5279 if (memcmp(existing_rule->rule_ids, rule_ids, (sizeof(u_int32_t) * MAX_AGGREGATE_ROUTE_RULES)) == 0) {
5280 lck_rw_done(&necp_route_rule_lock);
5281 return (existing_rule->id);
5282 }
5283 }
5284
5285 MALLOC(new_rule, struct necp_aggregate_route_rule *, sizeof(struct necp_aggregate_route_rule), M_NECP, M_WAITOK);
5286 if (new_rule != NULL) {
5287 memset(new_rule, 0, sizeof(struct necp_aggregate_route_rule));
5288 aggregate_route_rule_id = new_rule->id = necp_get_new_aggregate_route_rule_id();
5289 new_rule->id = aggregate_route_rule_id;
5290 memcpy(new_rule->rule_ids, rule_ids, (sizeof(u_int32_t) * MAX_AGGREGATE_ROUTE_RULES));
5291 LIST_INSERT_HEAD(&necp_aggregate_route_rules, new_rule, chain);
5292 }
5293 lck_rw_done(&necp_route_rule_lock);
5294
5295 return (aggregate_route_rule_id);
5296 }
5297
5298 #define NECP_NULL_SERVICE_ID 1
5299 static u_int32_t
5300 necp_get_new_uuid_id(void)
5301 {
5302 u_int32_t newid = 0;
5303
5304 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
5305
5306 necp_last_uuid_id++;
5307 if (necp_last_uuid_id < (NECP_NULL_SERVICE_ID + 1)) {
5308 necp_last_uuid_id = (NECP_NULL_SERVICE_ID + 1);
5309 }
5310
5311 newid = necp_last_uuid_id;
5312 if (newid == 0) {
5313 NECPLOG0(LOG_DEBUG, "Allocate uuid id failed.\n");
5314 return (0);
5315 }
5316
5317 return (newid);
5318 }
5319
5320 static struct necp_uuid_id_mapping *
5321 necp_uuid_lookup_app_id_locked(uuid_t uuid)
5322 {
5323 struct necp_uuid_id_mapping *searchentry = NULL;
5324 struct necp_uuid_id_mapping *foundentry = NULL;
5325
5326 LIST_FOREACH(searchentry, APPUUIDHASH(uuid), chain) {
5327 if (uuid_compare(searchentry->uuid, uuid) == 0) {
5328 foundentry = searchentry;
5329 break;
5330 }
5331 }
5332
5333 return (foundentry);
5334 }
5335
5336 static struct necp_uuid_id_mapping *
5337 necp_uuid_lookup_uuid_with_app_id_locked(u_int32_t local_id)
5338 {
5339 struct necp_uuid_id_mapping *searchentry = NULL;
5340 struct necp_uuid_id_mapping *foundentry = NULL;
5341
5342 struct necp_uuid_id_mapping_head *uuid_list_head = NULL;
5343 for (uuid_list_head = &necp_uuid_app_id_hashtbl[necp_uuid_app_id_hash_num_buckets - 1]; uuid_list_head >= necp_uuid_app_id_hashtbl; uuid_list_head--) {
5344 LIST_FOREACH(searchentry, uuid_list_head, chain) {
5345 if (searchentry->id == local_id) {
5346 foundentry = searchentry;
5347 break;
5348 }
5349 }
5350 }
5351
5352 return (foundentry);
5353 }
5354
5355 static u_int32_t
5356 necp_create_uuid_app_id_mapping(uuid_t uuid, bool *allocated_mapping, bool uuid_policy_table)
5357 {
5358 u_int32_t local_id = 0;
5359 struct necp_uuid_id_mapping *existing_mapping = NULL;
5360
5361 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
5362
5363 if (allocated_mapping) {
5364 *allocated_mapping = FALSE;
5365 }
5366
5367 existing_mapping = necp_uuid_lookup_app_id_locked(uuid);
5368 if (existing_mapping != NULL) {
5369 local_id = existing_mapping->id;
5370 existing_mapping->refcount++;
5371 if (uuid_policy_table) {
5372 existing_mapping->table_refcount++;
5373 }
5374 } else {
5375 struct necp_uuid_id_mapping *new_mapping = NULL;
5376 MALLOC(new_mapping, struct necp_uuid_id_mapping *, sizeof(*new_mapping), M_NECP, M_WAITOK);
5377 if (new_mapping != NULL) {
5378 uuid_copy(new_mapping->uuid, uuid);
5379 new_mapping->id = necp_get_new_uuid_id();
5380 new_mapping->refcount = 1;
5381 if (uuid_policy_table) {
5382 new_mapping->table_refcount = 1;
5383 } else {
5384 new_mapping->table_refcount = 0;
5385 }
5386
5387 LIST_INSERT_HEAD(APPUUIDHASH(uuid), new_mapping, chain);
5388
5389 if (allocated_mapping) {
5390 *allocated_mapping = TRUE;
5391 }
5392
5393 local_id = new_mapping->id;
5394 }
5395 }
5396
5397 return (local_id);
5398 }
5399
5400 static bool
5401 necp_remove_uuid_app_id_mapping(uuid_t uuid, bool *removed_mapping, bool uuid_policy_table)
5402 {
5403 struct necp_uuid_id_mapping *existing_mapping = NULL;
5404
5405 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
5406
5407 if (removed_mapping) {
5408 *removed_mapping = FALSE;
5409 }
5410
5411 existing_mapping = necp_uuid_lookup_app_id_locked(uuid);
5412 if (existing_mapping != NULL) {
5413 if (uuid_policy_table) {
5414 existing_mapping->table_refcount--;
5415 }
5416 if (--existing_mapping->refcount == 0) {
5417 LIST_REMOVE(existing_mapping, chain);
5418 FREE(existing_mapping, M_NECP);
5419 if (removed_mapping) {
5420 *removed_mapping = TRUE;
5421 }
5422 }
5423 return (TRUE);
5424 }
5425
5426 return (FALSE);
5427 }
5428
5429 static struct necp_uuid_id_mapping *
5430 necp_uuid_get_null_service_id_mapping(void)
5431 {
5432 static struct necp_uuid_id_mapping null_mapping;
5433 uuid_clear(null_mapping.uuid);
5434 null_mapping.id = NECP_NULL_SERVICE_ID;
5435
5436 return (&null_mapping);
5437 }
5438
5439 static struct necp_uuid_id_mapping *
5440 necp_uuid_lookup_service_id_locked(uuid_t uuid)
5441 {
5442 struct necp_uuid_id_mapping *searchentry = NULL;
5443 struct necp_uuid_id_mapping *foundentry = NULL;
5444
5445 if (uuid_is_null(uuid)) {
5446 return necp_uuid_get_null_service_id_mapping();
5447 }
5448
5449 LIST_FOREACH(searchentry, &necp_uuid_service_id_list, chain) {
5450 if (uuid_compare(searchentry->uuid, uuid) == 0) {
5451 foundentry = searchentry;
5452 break;
5453 }
5454 }
5455
5456 return (foundentry);
5457 }
5458
5459 static struct necp_uuid_id_mapping *
5460 necp_uuid_lookup_uuid_with_service_id_locked(u_int32_t local_id)
5461 {
5462 struct necp_uuid_id_mapping *searchentry = NULL;
5463 struct necp_uuid_id_mapping *foundentry = NULL;
5464
5465 if (local_id == NECP_NULL_SERVICE_ID) {
5466 return necp_uuid_get_null_service_id_mapping();
5467 }
5468
5469 LIST_FOREACH(searchentry, &necp_uuid_service_id_list, chain) {
5470 if (searchentry->id == local_id) {
5471 foundentry = searchentry;
5472 break;
5473 }
5474 }
5475
5476 return (foundentry);
5477 }
5478
5479 static u_int32_t
5480 necp_create_uuid_service_id_mapping(uuid_t uuid)
5481 {
5482 u_int32_t local_id = 0;
5483 struct necp_uuid_id_mapping *existing_mapping = NULL;
5484
5485 if (uuid_is_null(uuid)) {
5486 return (NECP_NULL_SERVICE_ID);
5487 }
5488
5489 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
5490
5491 existing_mapping = necp_uuid_lookup_service_id_locked(uuid);
5492 if (existing_mapping != NULL) {
5493 local_id = existing_mapping->id;
5494 existing_mapping->refcount++;
5495 } else {
5496 struct necp_uuid_id_mapping *new_mapping = NULL;
5497 MALLOC(new_mapping, struct necp_uuid_id_mapping *, sizeof(*new_mapping), M_NECP, M_WAITOK);
5498 if (new_mapping != NULL) {
5499 uuid_copy(new_mapping->uuid, uuid);
5500 new_mapping->id = necp_get_new_uuid_id();
5501 new_mapping->refcount = 1;
5502
5503 LIST_INSERT_HEAD(&necp_uuid_service_id_list, new_mapping, chain);
5504
5505 local_id = new_mapping->id;
5506 }
5507 }
5508
5509 return (local_id);
5510 }
5511
5512 static bool
5513 necp_remove_uuid_service_id_mapping(uuid_t uuid)
5514 {
5515 struct necp_uuid_id_mapping *existing_mapping = NULL;
5516
5517 if (uuid_is_null(uuid)) {
5518 return (TRUE);
5519 }
5520
5521 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
5522
5523 existing_mapping = necp_uuid_lookup_app_id_locked(uuid);
5524 if (existing_mapping != NULL) {
5525 if (--existing_mapping->refcount == 0) {
5526 LIST_REMOVE(existing_mapping, chain);
5527 FREE(existing_mapping, M_NECP);
5528 }
5529 return (TRUE);
5530 }
5531
5532 return (FALSE);
5533 }
5534
5535
5536 static bool
5537 necp_kernel_socket_policies_update_uuid_table(void)
5538 {
5539 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
5540
5541 if (necp_uuid_app_id_mappings_dirty) {
5542 if (proc_uuid_policy_kernel(PROC_UUID_POLICY_OPERATION_CLEAR, NULL, PROC_UUID_NECP_APP_POLICY) < 0) {
5543 NECPLOG0(LOG_DEBUG, "Error clearing uuids from policy table\n");
5544 return (FALSE);
5545 }
5546
5547 if (necp_num_uuid_app_id_mappings > 0) {
5548 struct necp_uuid_id_mapping_head *uuid_list_head = NULL;
5549 for (uuid_list_head = &necp_uuid_app_id_hashtbl[necp_uuid_app_id_hash_num_buckets - 1]; uuid_list_head >= necp_uuid_app_id_hashtbl; uuid_list_head--) {
5550 struct necp_uuid_id_mapping *mapping = NULL;
5551 LIST_FOREACH(mapping, uuid_list_head, chain) {
5552 if (mapping->table_refcount > 0 &&
5553 proc_uuid_policy_kernel(PROC_UUID_POLICY_OPERATION_ADD, mapping->uuid, PROC_UUID_NECP_APP_POLICY) < 0) {
5554 NECPLOG0(LOG_DEBUG, "Error adding uuid to policy table\n");
5555 }
5556 }
5557 }
5558 }
5559
5560 necp_uuid_app_id_mappings_dirty = FALSE;
5561 }
5562
5563 return (TRUE);
5564 }
5565
5566 #define NECP_KERNEL_VALID_IP_OUTPUT_CONDITIONS (NECP_KERNEL_CONDITION_ALL_INTERFACES | NECP_KERNEL_CONDITION_BOUND_INTERFACE | NECP_KERNEL_CONDITION_PROTOCOL | NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_POLICY_ID | NECP_KERNEL_CONDITION_LAST_INTERFACE)
5567 static necp_kernel_policy_id
5568 necp_kernel_ip_output_policy_add(necp_policy_id parent_policy_id, necp_policy_order order, necp_policy_order suborder, u_int32_t session_order, int session_pid, u_int32_t condition_mask, u_int32_t condition_negated_mask, necp_kernel_policy_id cond_policy_id, ifnet_t cond_bound_interface, u_int32_t cond_last_interface_index, u_int16_t cond_protocol, union necp_sockaddr_union *cond_local_start, union necp_sockaddr_union *cond_local_end, u_int8_t cond_local_prefix, union necp_sockaddr_union *cond_remote_start, union necp_sockaddr_union *cond_remote_end, u_int8_t cond_remote_prefix, necp_kernel_policy_result result, necp_kernel_policy_result_parameter result_parameter)
5569 {
5570 struct necp_kernel_ip_output_policy *new_kernel_policy = NULL;
5571 struct necp_kernel_ip_output_policy *tmp_kernel_policy = NULL;
5572
5573 MALLOC_ZONE(new_kernel_policy, struct necp_kernel_ip_output_policy *, sizeof(*new_kernel_policy), M_NECP_IP_POLICY, M_WAITOK);
5574 if (new_kernel_policy == NULL) {
5575 goto done;
5576 }
5577
5578 memset(new_kernel_policy, 0, sizeof(*new_kernel_policy)); // M_ZERO is not supported for MALLOC_ZONE
5579 new_kernel_policy->parent_policy_id = parent_policy_id;
5580 new_kernel_policy->id = necp_kernel_policy_get_new_id(false);
5581 new_kernel_policy->suborder = suborder;
5582 new_kernel_policy->order = order;
5583 new_kernel_policy->session_order = session_order;
5584 new_kernel_policy->session_pid = session_pid;
5585
5586 // Sanitize condition mask
5587 new_kernel_policy->condition_mask = (condition_mask & NECP_KERNEL_VALID_IP_OUTPUT_CONDITIONS);
5588 if ((new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_ALL_INTERFACES) && (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE)) {
5589 new_kernel_policy->condition_mask &= ~NECP_KERNEL_CONDITION_BOUND_INTERFACE;
5590 }
5591 if ((new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_END) && (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX)) {
5592 new_kernel_policy->condition_mask &= ~NECP_KERNEL_CONDITION_LOCAL_PREFIX;
5593 }
5594 if ((new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_END) && (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX)) {
5595 new_kernel_policy->condition_mask &= ~NECP_KERNEL_CONDITION_REMOTE_PREFIX;
5596 }
5597 new_kernel_policy->condition_negated_mask = condition_negated_mask & new_kernel_policy->condition_mask;
5598
5599 // Set condition values
5600 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_POLICY_ID) {
5601 new_kernel_policy->cond_policy_id = cond_policy_id;
5602 }
5603 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE) {
5604 if (cond_bound_interface) {
5605 ifnet_reference(cond_bound_interface);
5606 }
5607 new_kernel_policy->cond_bound_interface = cond_bound_interface;
5608 }
5609 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LAST_INTERFACE) {
5610 new_kernel_policy->cond_last_interface_index = cond_last_interface_index;
5611 }
5612 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_PROTOCOL) {
5613 new_kernel_policy->cond_protocol = cond_protocol;
5614 }
5615 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_START) {
5616 memcpy(&new_kernel_policy->cond_local_start, cond_local_start, cond_local_start->sa.sa_len);
5617 }
5618 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_END) {
5619 memcpy(&new_kernel_policy->cond_local_end, cond_local_end, cond_local_end->sa.sa_len);
5620 }
5621 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX) {
5622 new_kernel_policy->cond_local_prefix = cond_local_prefix;
5623 }
5624 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_START) {
5625 memcpy(&new_kernel_policy->cond_remote_start, cond_remote_start, cond_remote_start->sa.sa_len);
5626 }
5627 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_END) {
5628 memcpy(&new_kernel_policy->cond_remote_end, cond_remote_end, cond_remote_end->sa.sa_len);
5629 }
5630 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX) {
5631 new_kernel_policy->cond_remote_prefix = cond_remote_prefix;
5632 }
5633
5634 new_kernel_policy->result = result;
5635 memcpy(&new_kernel_policy->result_parameter, &result_parameter, sizeof(result_parameter));
5636
5637 if (necp_debug) {
5638 NECPLOG(LOG_DEBUG, "Added kernel policy: ip output, id=%d, mask=%x\n", new_kernel_policy->id, new_kernel_policy->condition_mask);
5639 }
5640 LIST_INSERT_SORTED_THRICE_ASCENDING(&necp_kernel_ip_output_policies, new_kernel_policy, chain, session_order, order, suborder, tmp_kernel_policy);
5641 done:
5642 return (new_kernel_policy ? new_kernel_policy->id : 0);
5643 }
5644
5645 static struct necp_kernel_ip_output_policy *
5646 necp_kernel_ip_output_policy_find(necp_kernel_policy_id policy_id)
5647 {
5648 struct necp_kernel_ip_output_policy *kernel_policy = NULL;
5649 struct necp_kernel_ip_output_policy *tmp_kernel_policy = NULL;
5650
5651 if (policy_id == 0) {
5652 return (NULL);
5653 }
5654
5655 LIST_FOREACH_SAFE(kernel_policy, &necp_kernel_ip_output_policies, chain, tmp_kernel_policy) {
5656 if (kernel_policy->id == policy_id) {
5657 return (kernel_policy);
5658 }
5659 }
5660
5661 return (NULL);
5662 }
5663
5664 static bool
5665 necp_kernel_ip_output_policy_delete(necp_kernel_policy_id policy_id)
5666 {
5667 struct necp_kernel_ip_output_policy *policy = NULL;
5668
5669 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
5670
5671 policy = necp_kernel_ip_output_policy_find(policy_id);
5672 if (policy) {
5673 LIST_REMOVE(policy, chain);
5674
5675 if (policy->cond_bound_interface) {
5676 ifnet_release(policy->cond_bound_interface);
5677 policy->cond_bound_interface = NULL;
5678 }
5679
5680 FREE_ZONE(policy, sizeof(*policy), M_NECP_IP_POLICY);
5681 return (TRUE);
5682 }
5683
5684 return (FALSE);
5685 }
5686
5687 static void
5688 necp_kernel_ip_output_policies_dump_all(void)
5689 {
5690 if (necp_debug) {
5691 struct necp_kernel_ip_output_policy *policy = NULL;
5692 int policy_i;
5693 int id_i;
5694 char result_string[MAX_RESULT_STRING_LEN];
5695 char proc_name_string[MAXCOMLEN + 1];
5696 memset(result_string, 0, MAX_RESULT_STRING_LEN);
5697 memset(proc_name_string, 0, MAXCOMLEN + 1);
5698
5699 NECPLOG0(LOG_DEBUG, "NECP IP Output Policies:\n");
5700 NECPLOG0(LOG_DEBUG, "-----------\n");
5701 for (id_i = 0; id_i < NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS; id_i++) {
5702 NECPLOG(LOG_DEBUG, " ID Bucket: %d\n", id_i);
5703 for (policy_i = 0; necp_kernel_ip_output_policies_map[id_i] != NULL && (necp_kernel_ip_output_policies_map[id_i])[policy_i] != NULL; policy_i++) {
5704 policy = (necp_kernel_ip_output_policies_map[id_i])[policy_i];
5705 proc_name(policy->session_pid, proc_name_string, MAXCOMLEN);
5706 NECPLOG(LOG_DEBUG, "\t%3d. Policy ID: %5d\tProcess: %10.10s\tOrder: %04d.%04d.%d\tMask: %5x\tResult: %s\n", policy_i, policy->id, proc_name_string, policy->session_order, policy->order, policy->suborder, policy->condition_mask, necp_get_result_description(result_string, policy->result, policy->result_parameter));
5707 }
5708 NECPLOG0(LOG_DEBUG, "-----------\n");
5709 }
5710 }
5711 }
5712
5713 static inline bool
5714 necp_kernel_ip_output_policy_results_overlap(struct necp_kernel_ip_output_policy *upper_policy, struct necp_kernel_ip_output_policy *lower_policy)
5715 {
5716 if (upper_policy->result == NECP_KERNEL_POLICY_RESULT_SKIP) {
5717 if (upper_policy->session_order != lower_policy->session_order) {
5718 // A skip cannot override a policy of a different session
5719 return (FALSE);
5720 } else {
5721 if (upper_policy->result_parameter.skip_policy_order == 0 ||
5722 lower_policy->order >= upper_policy->result_parameter.skip_policy_order) {
5723 // This policy is beyond the skip
5724 return (FALSE);
5725 } else {
5726 // This policy is inside the skip
5727 return (TRUE);
5728 }
5729 }
5730 }
5731
5732 // All other IP Output policy results (drop, tunnel, hard pass) currently overlap
5733 return (TRUE);
5734 }
5735
5736 static bool
5737 necp_kernel_ip_output_policy_is_unnecessary(struct necp_kernel_ip_output_policy *policy, struct necp_kernel_ip_output_policy **policy_array, int valid_indices)
5738 {
5739 bool can_skip = FALSE;
5740 u_int32_t highest_skip_session_order = 0;
5741 u_int32_t highest_skip_order = 0;
5742 int i;
5743 for (i = 0; i < valid_indices; i++) {
5744 struct necp_kernel_ip_output_policy *compared_policy = policy_array[i];
5745
5746 // For policies in a skip window, we can't mark conflicting policies as unnecessary
5747 if (can_skip) {
5748 if (highest_skip_session_order != compared_policy->session_order ||
5749 (highest_skip_order != 0 && compared_policy->order >= highest_skip_order)) {
5750 // If we've moved on to the next session, or passed the skip window
5751 highest_skip_session_order = 0;
5752 highest_skip_order = 0;
5753 can_skip = FALSE;
5754 } else {
5755 // If this policy is also a skip, in can increase the skip window
5756 if (compared_policy->result == NECP_KERNEL_POLICY_RESULT_SKIP) {
5757 if (compared_policy->result_parameter.skip_policy_order > highest_skip_order) {
5758 highest_skip_order = compared_policy->result_parameter.skip_policy_order;
5759 }
5760 }
5761 continue;
5762 }
5763 }
5764
5765 if (compared_policy->result == NECP_KERNEL_POLICY_RESULT_SKIP) {
5766 // This policy is a skip. Set the skip window accordingly
5767 can_skip = TRUE;
5768 highest_skip_session_order = compared_policy->session_order;
5769 highest_skip_order = compared_policy->result_parameter.skip_policy_order;
5770 }
5771
5772 // The result of the compared policy must be able to block out this policy result
5773 if (!necp_kernel_ip_output_policy_results_overlap(compared_policy, policy)) {
5774 continue;
5775 }
5776
5777 // If new policy matches All Interfaces, compared policy must also
5778 if ((policy->condition_mask & NECP_KERNEL_CONDITION_ALL_INTERFACES) && !(compared_policy->condition_mask & NECP_KERNEL_CONDITION_ALL_INTERFACES)) {
5779 continue;
5780 }
5781
5782 // Default makes lower policies unecessary always
5783 if (compared_policy->condition_mask == 0) {
5784 return (TRUE);
5785 }
5786
5787 // Compared must be more general than policy, and include only conditions within policy
5788 if ((policy->condition_mask & compared_policy->condition_mask) != compared_policy->condition_mask) {
5789 continue;
5790 }
5791
5792 // Negative conditions must match for the overlapping conditions
5793 if ((policy->condition_negated_mask & compared_policy->condition_mask) != (compared_policy->condition_negated_mask & compared_policy->condition_mask)) {
5794 continue;
5795 }
5796
5797 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_POLICY_ID &&
5798 compared_policy->cond_policy_id != policy->cond_policy_id) {
5799 continue;
5800 }
5801
5802 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE &&
5803 compared_policy->cond_bound_interface != policy->cond_bound_interface) {
5804 continue;
5805 }
5806
5807 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_PROTOCOL &&
5808 compared_policy->cond_protocol != policy->cond_protocol) {
5809 continue;
5810 }
5811
5812 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_START) {
5813 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_END) {
5814 if (!necp_is_range_in_range((struct sockaddr *)&policy->cond_local_start, (struct sockaddr *)&policy->cond_local_end, (struct sockaddr *)&compared_policy->cond_local_start, (struct sockaddr *)&compared_policy->cond_local_end)) {
5815 continue;
5816 }
5817 } else if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX) {
5818 if (compared_policy->cond_local_prefix > policy->cond_local_prefix ||
5819 !necp_is_addr_in_subnet((struct sockaddr *)&policy->cond_local_start, (struct sockaddr *)&compared_policy->cond_local_start, compared_policy->cond_local_prefix)) {
5820 continue;
5821 }
5822 }
5823 }
5824
5825 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_START) {
5826 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_END) {
5827 if (!necp_is_range_in_range((struct sockaddr *)&policy->cond_remote_start, (struct sockaddr *)&policy->cond_remote_end, (struct sockaddr *)&compared_policy->cond_remote_start, (struct sockaddr *)&compared_policy->cond_remote_end)) {
5828 continue;
5829 }
5830 } else if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX) {
5831 if (compared_policy->cond_remote_prefix > policy->cond_remote_prefix ||
5832 !necp_is_addr_in_subnet((struct sockaddr *)&policy->cond_remote_start, (struct sockaddr *)&compared_policy->cond_remote_start, compared_policy->cond_remote_prefix)) {
5833 continue;
5834 }
5835 }
5836 }
5837
5838 return (TRUE);
5839 }
5840
5841 return (FALSE);
5842 }
5843
5844 static bool
5845 necp_kernel_ip_output_policies_reprocess(void)
5846 {
5847 int i;
5848 int bucket_allocation_counts[NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS];
5849 int bucket_current_free_index[NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS];
5850 struct necp_kernel_ip_output_policy *kernel_policy = NULL;
5851
5852 LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
5853
5854 // Reset mask to 0
5855 necp_kernel_ip_output_policies_condition_mask = 0;
5856 necp_kernel_ip_output_policies_count = 0;
5857 necp_kernel_ip_output_policies_non_id_count = 0;
5858
5859 for (i = 0; i < NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS; i++) {
5860 if (necp_kernel_ip_output_policies_map[i] != NULL) {
5861 FREE(necp_kernel_ip_output_policies_map[i], M_NECP);
5862 necp_kernel_ip_output_policies_map[i] = NULL;
5863 }
5864
5865 // Init counts
5866 bucket_allocation_counts[i] = 0;
5867 }
5868
5869 LIST_FOREACH(kernel_policy, &necp_kernel_ip_output_policies, chain) {
5870 // Update mask
5871 necp_kernel_ip_output_policies_condition_mask |= kernel_policy->condition_mask;
5872 necp_kernel_ip_output_policies_count++;
5873
5874 // Update bucket counts
5875 if (!(kernel_policy->condition_mask & NECP_KERNEL_CONDITION_POLICY_ID)) {
5876 necp_kernel_ip_output_policies_non_id_count++;
5877 for (i = 0; i < NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS; i++) {
5878 bucket_allocation_counts[i]++;
5879 }
5880 } else {
5881 bucket_allocation_counts[NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(kernel_policy->cond_policy_id)]++;
5882 }
5883 }
5884
5885 for (i = 0; i < NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS; i++) {
5886 if (bucket_allocation_counts[i] > 0) {
5887 // Allocate a NULL-terminated array of policy pointers for each bucket
5888 MALLOC(necp_kernel_ip_output_policies_map[i], struct necp_kernel_ip_output_policy **, sizeof(struct necp_kernel_ip_output_policy *) * (bucket_allocation_counts[i] + 1), M_NECP, M_WAITOK);
5889 if (necp_kernel_ip_output_policies_map[i] == NULL) {
5890 goto fail;
5891 }
5892
5893 // Initialize the first entry to NULL
5894 (necp_kernel_ip_output_policies_map[i])[0] = NULL;
5895 }
5896 bucket_current_free_index[i] = 0;
5897 }
5898
5899 LIST_FOREACH(kernel_policy, &necp_kernel_ip_output_policies, chain) {
5900 // Insert pointers into map
5901 if (!(kernel_policy->condition_mask & NECP_KERNEL_CONDITION_POLICY_ID)) {
5902 for (i = 0; i < NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS; i++) {
5903 if (!necp_kernel_ip_output_policy_is_unnecessary(kernel_policy, necp_kernel_ip_output_policies_map[i], bucket_current_free_index[i])) {
5904 (necp_kernel_ip_output_policies_map[i])[(bucket_current_free_index[i])] = kernel_policy;
5905 bucket_current_free_index[i]++;
5906 (necp_kernel_ip_output_policies_map[i])[(bucket_current_free_index[i])] = NULL;
5907 }
5908 }
5909 } else {
5910 i = NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(kernel_policy->cond_policy_id);
5911 if (!necp_kernel_ip_output_policy_is_unnecessary(kernel_policy, necp_kernel_ip_output_policies_map[i], bucket_current_free_index[i])) {
5912 (necp_kernel_ip_output_policies_map[i])[(bucket_current_free_index[i])] = kernel_policy;
5913 bucket_current_free_index[i]++;
5914 (necp_kernel_ip_output_policies_map[i])[(bucket_current_free_index[i])] = NULL;
5915 }
5916 }
5917 }
5918 necp_kernel_ip_output_policies_dump_all();
5919 return (TRUE);
5920
5921 fail:
5922 // Free memory, reset mask to 0
5923 necp_kernel_ip_output_policies_condition_mask = 0;
5924 necp_kernel_ip_output_policies_count = 0;
5925 necp_kernel_ip_output_policies_non_id_count = 0;
5926 for (i = 0; i < NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS; i++) {
5927 if (necp_kernel_ip_output_policies_map[i] != NULL) {
5928 FREE(necp_kernel_ip_output_policies_map[i], M_NECP);
5929 necp_kernel_ip_output_policies_map[i] = NULL;
5930 }
5931 }
5932 return (FALSE);
5933 }
5934
5935 // Outbound Policy Matching
5936 // ---------------------
5937 struct substring {
5938 char *string;
5939 size_t length;
5940 };
5941
5942 static struct substring
5943 necp_trim_dots_and_stars(char *string, size_t length)
5944 {
5945 struct substring sub;
5946 sub.string = string;
5947 sub.length = string ? length : 0;
5948
5949 while (sub.length && (sub.string[0] == '.' || sub.string[0] == '*')) {
5950 sub.string++;
5951 sub.length--;
5952 }
5953
5954 while (sub.length && (sub.string[sub.length - 1] == '.' || sub.string[sub.length - 1] == '*')) {
5955 sub.length--;
5956 }
5957
5958 return (sub);
5959 }
5960
5961 static char *
5962 necp_create_trimmed_domain(char *string, size_t length)
5963 {
5964 char *trimmed_domain = NULL;
5965 struct substring sub = necp_trim_dots_and_stars(string, length);
5966
5967 MALLOC(trimmed_domain, char *, sub.length + 1, M_NECP, M_WAITOK);
5968 if (trimmed_domain == NULL) {
5969 return (NULL);
5970 }
5971
5972 memcpy(trimmed_domain, sub.string, sub.length);
5973 trimmed_domain[sub.length] = 0;
5974
5975 return (trimmed_domain);
5976 }
5977
5978 static inline int
5979 necp_count_dots(char *string, size_t length)
5980 {
5981 int dot_count = 0;
5982 size_t i = 0;
5983
5984 for (i = 0; i < length; i++) {
5985 if (string[i] == '.') {
5986 dot_count++;
5987 }
5988 }
5989
5990 return (dot_count);
5991 }
5992
5993 static bool
5994 necp_check_suffix(struct substring parent, struct substring suffix, bool require_dot_before_suffix)
5995 {
5996 if (parent.length <= suffix.length) {
5997 return (FALSE);
5998 }
5999
6000 size_t length_difference = (parent.length - suffix.length);
6001
6002 if (require_dot_before_suffix) {
6003 if (((char *)(parent.string + length_difference - 1))[0] != '.') {
6004 return (FALSE);
6005 }
6006 }
6007
6008 // strncasecmp does case-insensitive check for all UTF-8 strings (ignores non-ASCII characters)
6009 return (strncasecmp(parent.string + length_difference, suffix.string, suffix.length) == 0);
6010 }
6011
6012 static bool
6013 necp_hostname_matches_domain(struct substring hostname_substring, u_int8_t hostname_dot_count, char *domain, u_int8_t domain_dot_count)
6014 {
6015 if (hostname_substring.string == NULL || domain == NULL) {
6016 return (hostname_substring.string == domain);
6017 }
6018
6019 struct substring domain_substring;
6020 domain_substring.string = domain;
6021 domain_substring.length = strlen(domain);
6022
6023 if (hostname_dot_count == domain_dot_count) {
6024 // strncasecmp does case-insensitive check for all UTF-8 strings (ignores non-ASCII characters)
6025 if (hostname_substring.length == domain_substring.length &&
6026 strncasecmp(hostname_substring.string, domain_substring.string, hostname_substring.length) == 0) {
6027 return (TRUE);
6028 }
6029 } else if (domain_dot_count < hostname_dot_count) {
6030 if (necp_check_suffix(hostname_substring, domain_substring, TRUE)) {
6031 return (TRUE);
6032 }
6033 }
6034
6035 return (FALSE);
6036 }
6037
6038 static char *
6039 necp_copy_string(char *string, size_t length)
6040 {
6041 char *copied_string = NULL;
6042
6043 MALLOC(copied_string, char *, length + 1, M_NECP, M_WAITOK);
6044 if (copied_string == NULL) {
6045 return (NULL);
6046 }
6047
6048 memcpy(copied_string, string, length);
6049 copied_string[length] = 0;
6050
6051 return (copied_string);
6052 }
6053
6054 #define NECP_KERNEL_ADDRESS_TYPE_CONDITIONS (NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX)
6055 static void
6056 necp_application_fillout_info_locked(uuid_t application_uuid, uuid_t real_application_uuid, char *account, char *domain, pid_t pid, uid_t uid, u_int16_t protocol, u_int32_t bound_interface_index, u_int32_t traffic_class, union necp_sockaddr_union *local_addr, union necp_sockaddr_union *remote_addr, proc_t proc, struct necp_socket_info *info)
6057 {
6058 memset(info, 0, sizeof(struct necp_socket_info));
6059
6060 info->pid = pid;
6061 info->uid = uid;
6062 info->protocol = protocol;
6063 info->bound_interface_index = bound_interface_index;
6064 info->traffic_class = traffic_class;
6065
6066 if (necp_kernel_application_policies_condition_mask & NECP_KERNEL_CONDITION_ENTITLEMENT && proc != NULL) {
6067 info->cred_result = priv_check_cred(proc_ucred(proc), PRIV_NET_PRIVILEGED_NECP_MATCH, 0);
6068 }
6069
6070 if (necp_kernel_application_policies_condition_mask & NECP_KERNEL_CONDITION_APP_ID && !uuid_is_null(application_uuid)) {
6071 struct necp_uuid_id_mapping *existing_mapping = necp_uuid_lookup_app_id_locked(application_uuid);
6072 if (existing_mapping) {
6073 info->application_id = existing_mapping->id;
6074 }
6075 }
6076
6077 if (necp_kernel_application_policies_condition_mask & NECP_KERNEL_CONDITION_REAL_APP_ID && !uuid_is_null(real_application_uuid)) {
6078 if (uuid_compare(application_uuid, real_application_uuid) == 0) {
6079 info->real_application_id = info->application_id;
6080 } else {
6081 struct necp_uuid_id_mapping *existing_mapping = necp_uuid_lookup_app_id_locked(real_application_uuid);
6082 if (existing_mapping) {
6083 info->real_application_id = existing_mapping->id;
6084 }
6085 }
6086 }
6087
6088 if (necp_kernel_application_policies_condition_mask & NECP_KERNEL_CONDITION_ACCOUNT_ID && account != NULL) {
6089 struct necp_string_id_mapping *existing_mapping = necp_lookup_string_to_id_locked(&necp_account_id_list, account);
6090 if (existing_mapping) {
6091 info->account_id = existing_mapping->id;
6092 }
6093 }
6094
6095 if (necp_kernel_application_policies_condition_mask & NECP_KERNEL_CONDITION_DOMAIN) {
6096 info->domain = domain;
6097 }
6098
6099 if (necp_kernel_application_policies_condition_mask & NECP_KERNEL_ADDRESS_TYPE_CONDITIONS) {
6100 if (local_addr && local_addr->sa.sa_len > 0) {
6101 memcpy(&info->local_addr, local_addr, local_addr->sa.sa_len);
6102 }
6103 if (remote_addr && remote_addr->sa.sa_len > 0) {
6104 memcpy(&info->remote_addr, remote_addr, remote_addr->sa.sa_len);
6105 }
6106 }
6107 }
6108
6109 static void
6110 necp_send_application_interface_denied_event(pid_t pid, uuid_t proc_uuid, u_int32_t if_functional_type)
6111 {
6112 struct kev_netpolicy_ifdenied ev_ifdenied;
6113
6114 bzero(&ev_ifdenied, sizeof(ev_ifdenied));
6115
6116 ev_ifdenied.ev_data.epid = pid;
6117 uuid_copy(ev_ifdenied.ev_data.euuid, proc_uuid);
6118 ev_ifdenied.ev_if_functional_type = if_functional_type;
6119
6120 netpolicy_post_msg(KEV_NETPOLICY_IFDENIED, &ev_ifdenied.ev_data, sizeof(ev_ifdenied));
6121 }
6122
6123 extern char *proc_name_address(void *p);
6124
6125 #define NECP_VERIFY_DELEGATION_ENTITLEMENT(_p, _d) \
6126 if (!has_checked_delegation_entitlement) { \
6127 has_delegation_entitlement = (priv_check_cred(proc_ucred(_p), PRIV_NET_PRIVILEGED_SOCKET_DELEGATE, 0) == 0); \
6128 has_checked_delegation_entitlement = TRUE; \
6129 } \
6130 if (!has_delegation_entitlement) { \
6131 NECPLOG(LOG_ERR, "%s(%d) does not hold the necessary entitlement to delegate network traffic for other processes by %s", \
6132 proc_name_address(_p), proc_pid(_p), _d); \
6133 break; \
6134 }
6135
6136 int
6137 necp_application_find_policy_match_internal(proc_t proc,
6138 u_int8_t *parameters,
6139 u_int32_t parameters_size,
6140 struct necp_aggregate_result *returned_result,
6141 u_int32_t *flags,
6142 u_int required_interface_index,
6143 const union necp_sockaddr_union *override_local_addr,
6144 const union necp_sockaddr_union *override_remote_addr,
6145 struct rtentry **returned_route, bool ignore_address)
6146 {
6147 int error = 0;
6148 size_t offset = 0;
6149
6150 struct necp_kernel_socket_policy *matched_policy = NULL;
6151 struct necp_socket_info info;
6152 necp_kernel_policy_filter filter_control_unit = 0;
6153 u_int32_t route_rule_id = 0;
6154 necp_kernel_policy_result service_action = 0;
6155 necp_kernel_policy_service service = { 0, 0 };
6156
6157 u_int16_t protocol = 0;
6158 u_int32_t bound_interface_index = required_interface_index;
6159 u_int32_t traffic_class = 0;
6160 u_int32_t client_flags = 0;
6161 union necp_sockaddr_union local_addr;
6162 union necp_sockaddr_union remote_addr;
6163 bool no_remote_addr = FALSE;
6164 u_int8_t remote_family = 0;
6165 bool no_local_addr = FALSE;
6166
6167 if (override_local_addr) {
6168 memcpy(&local_addr, override_local_addr, sizeof(local_addr));
6169 } else {
6170 memset(&local_addr, 0, sizeof(local_addr));
6171 }
6172 if (override_remote_addr) {
6173 memcpy(&remote_addr, override_remote_addr, sizeof(remote_addr));
6174 } else {
6175 memset(&remote_addr, 0, sizeof(remote_addr));
6176 }
6177
6178 // Initialize UID, PID, and UUIDs to the current process
6179 uid_t uid = kauth_cred_getuid(proc_ucred(proc));
6180 pid_t pid = proc_pid(proc);
6181 uuid_t application_uuid;
6182 uuid_clear(application_uuid);
6183 uuid_t real_application_uuid;
6184 uuid_clear(real_application_uuid);
6185 proc_getexecutableuuid(proc, real_application_uuid, sizeof(real_application_uuid));
6186 uuid_copy(application_uuid, real_application_uuid);
6187
6188 char *domain = NULL;
6189 char *account = NULL;
6190
6191 u_int32_t netagent_ids[NECP_MAX_NETAGENTS];
6192 memset(&netagent_ids, 0, sizeof(netagent_ids));
6193 int netagent_cursor;
6194
6195 bool has_checked_delegation_entitlement = FALSE;
6196 bool has_delegation_entitlement = FALSE;
6197
6198 if (returned_result == NULL) {
6199 return (EINVAL);
6200 }
6201
6202 memset(returned_result, 0, sizeof(struct necp_aggregate_result));
6203
6204 lck_rw_lock_shared(&necp_kernel_policy_lock);
6205 if (necp_kernel_application_policies_count == 0) {
6206 if (necp_drop_all_order > 0) {
6207 returned_result->routing_result = NECP_KERNEL_POLICY_RESULT_DROP;
6208 lck_rw_done(&necp_kernel_policy_lock);
6209 return (0);
6210 }
6211 }
6212 lck_rw_done(&necp_kernel_policy_lock);
6213
6214 while ((offset + sizeof(u_int8_t) + sizeof(u_int32_t)) <= parameters_size) {
6215 u_int8_t type = necp_buffer_get_tlv_type(parameters, offset);
6216 u_int32_t length = necp_buffer_get_tlv_length(parameters, offset);
6217
6218 if (length > (parameters_size - (offset + sizeof(u_int8_t) + sizeof(u_int32_t)))) {
6219 // If the length is larger than what can fit in the remaining parameters size, bail
6220 NECPLOG(LOG_ERR, "Invalid TLV length (%u)", length);
6221 break;
6222 }
6223
6224 if (length > 0) {
6225 u_int8_t *value = necp_buffer_get_tlv_value(parameters, offset, NULL);
6226 if (value != NULL) {
6227 switch (type) {
6228 case NECP_CLIENT_PARAMETER_APPLICATION: {
6229 if (length >= sizeof(uuid_t)) {
6230 if (uuid_compare(application_uuid, value) == 0) {
6231 // No delegation
6232 break;
6233 }
6234
6235 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc, "euuid");
6236
6237 uuid_copy(application_uuid, value);
6238 }
6239 break;
6240 }
6241 case NECP_CLIENT_PARAMETER_REAL_APPLICATION: {
6242 if (length >= sizeof(uuid_t)) {
6243 if (uuid_compare(real_application_uuid, value) == 0) {
6244 // No delegation
6245 break;
6246 }
6247
6248 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc, "uuid");
6249
6250 uuid_copy(real_application_uuid, value);
6251 }
6252 break;
6253 }
6254 case NECP_CLIENT_PARAMETER_PID: {
6255 if (length >= sizeof(pid_t)) {
6256 if (memcmp(&pid, value, sizeof(pid_t)) == 0) {
6257 // No delegation
6258 break;
6259 }
6260
6261 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc, "pid");
6262
6263 memcpy(&pid, value, sizeof(pid_t));
6264 }
6265 break;
6266 }
6267 case NECP_CLIENT_PARAMETER_UID: {
6268 if (length >= sizeof(uid_t)) {
6269 if (memcmp(&uid, value, sizeof(uid_t)) == 0) {
6270 // No delegation
6271 break;
6272 }
6273
6274 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc, "uid");
6275
6276 memcpy(&uid, value, sizeof(uid_t));
6277 }
6278 break;
6279 }
6280 case NECP_CLIENT_PARAMETER_DOMAIN: {
6281 domain = (char *)value;
6282 domain[length - 1] = 0;
6283 break;
6284 }
6285 case NECP_CLIENT_PARAMETER_ACCOUNT: {
6286 account = (char *)value;
6287 account[length - 1] = 0;
6288 break;
6289 }
6290 case NECP_CLIENT_PARAMETER_TRAFFIC_CLASS: {
6291 if (length >= sizeof(u_int32_t)) {
6292 memcpy(&traffic_class, value, sizeof(u_int32_t));
6293 }
6294 break;
6295 }
6296 case NECP_CLIENT_PARAMETER_IP_PROTOCOL: {
6297 if (length >= sizeof(u_int16_t)) {
6298 memcpy(&protocol, value, sizeof(u_int16_t));
6299 }
6300 break;
6301 }
6302 case NECP_CLIENT_PARAMETER_BOUND_INTERFACE: {
6303 if (length <= IFXNAMSIZ && length > 0) {
6304 ifnet_t bound_interface = NULL;
6305 char interface_name[IFXNAMSIZ];
6306 memcpy(interface_name, value, length);
6307 interface_name[length - 1] = 0; // Make sure the string is NULL terminated
6308 if (ifnet_find_by_name(interface_name, &bound_interface) == 0) {
6309 bound_interface_index = bound_interface->if_index;
6310 ifnet_release(bound_interface);
6311 }
6312 }
6313 break;
6314 }
6315 case NECP_CLIENT_PARAMETER_LOCAL_ADDRESS: {
6316 if (ignore_address) {
6317 break;
6318 }
6319
6320 if (length >= sizeof(struct necp_policy_condition_addr)) {
6321 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value;
6322 if (necp_address_is_valid(&address_struct->address.sa)) {
6323 memcpy(&local_addr, &address_struct->address, sizeof(address_struct->address));
6324 }
6325 }
6326 break;
6327 }
6328 case NECP_CLIENT_PARAMETER_REMOTE_ADDRESS: {
6329 if (ignore_address) {
6330 break;
6331 }
6332
6333 if (length >= sizeof(struct necp_policy_condition_addr)) {
6334 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value;
6335 if (necp_address_is_valid(&address_struct->address.sa)) {
6336 memcpy(&remote_addr, &address_struct->address, sizeof(address_struct->address));
6337 }
6338 }
6339 break;
6340 }
6341 case NECP_CLIENT_PARAMETER_FLAGS: {
6342 if (length >= sizeof(client_flags)) {
6343 memcpy(&client_flags, value, sizeof(client_flags));
6344 }
6345 }
6346 default: {
6347 break;
6348 }
6349 }
6350 }
6351 }
6352
6353 offset += sizeof(u_int8_t) + sizeof(u_int32_t) + length;
6354 }
6355
6356 // Lock
6357 lck_rw_lock_shared(&necp_kernel_policy_lock);
6358
6359 necp_application_fillout_info_locked(application_uuid, real_application_uuid, account, domain, pid, uid, protocol, bound_interface_index, traffic_class, &local_addr, &remote_addr, proc, &info);
6360 matched_policy = necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_app_layer_map, &info, &filter_control_unit, &route_rule_id, &service_action, &service, netagent_ids, NECP_MAX_NETAGENTS, proc);
6361 if (matched_policy) {
6362 returned_result->policy_id = matched_policy->id;
6363 returned_result->routing_result = matched_policy->result;
6364 memcpy(&returned_result->routing_result_parameter, &matched_policy->result_parameter, sizeof(returned_result->routing_result_parameter));
6365 } else if (necp_drop_all_order > 0) {
6366 // Mark socket as a drop if drop_all is set
6367 returned_result->policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
6368 returned_result->routing_result = NECP_KERNEL_POLICY_RESULT_DROP;
6369 } else {
6370 returned_result->policy_id = 0;
6371 returned_result->routing_result = NECP_KERNEL_POLICY_RESULT_NONE;
6372 }
6373 returned_result->filter_control_unit = filter_control_unit;
6374 returned_result->service_action = service_action;
6375
6376 // Handle trigger service
6377 if (service.identifier != 0) {
6378 struct necp_uuid_id_mapping *mapping = necp_uuid_lookup_uuid_with_service_id_locked(service.identifier);
6379 if (mapping != NULL) {
6380 struct necp_service_registration *service_registration = NULL;
6381 uuid_copy(returned_result->service_uuid, mapping->uuid);
6382 returned_result->service_data = service.data;
6383 if (service.identifier == NECP_NULL_SERVICE_ID) {
6384 // NULL service is always 'registered'
6385 returned_result->service_flags |= NECP_SERVICE_FLAGS_REGISTERED;
6386 } else {
6387 LIST_FOREACH(service_registration, &necp_registered_service_list, kernel_chain) {
6388 if (service.identifier == service_registration->service_id) {
6389 returned_result->service_flags |= NECP_SERVICE_FLAGS_REGISTERED;
6390 break;
6391 }
6392 }
6393 }
6394 }
6395 }
6396
6397 // Handle netagents
6398 for (netagent_cursor = 0; netagent_cursor < NECP_MAX_NETAGENTS; netagent_cursor++) {
6399 struct necp_uuid_id_mapping *mapping = NULL;
6400 u_int32_t netagent_id = netagent_ids[netagent_cursor];
6401 if (netagent_id == 0) {
6402 break;
6403 }
6404 mapping = necp_uuid_lookup_uuid_with_service_id_locked(netagent_id);
6405 if (mapping != NULL) {
6406 uuid_copy(returned_result->netagents[netagent_cursor], mapping->uuid);
6407 returned_result->netagent_flags[netagent_cursor] = netagent_get_flags(mapping->uuid);
6408 }
6409 }
6410
6411 // Do routing evaluation
6412 u_int output_bound_interface = bound_interface_index;
6413 if (returned_result->routing_result == NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED) {
6414 output_bound_interface = returned_result->routing_result_parameter.scoped_interface_index;
6415 } else if (returned_result->routing_result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL) {
6416 output_bound_interface = returned_result->routing_result_parameter.tunnel_interface_index;
6417 }
6418
6419 if (local_addr.sa.sa_len == 0 ||
6420 (local_addr.sa.sa_family == AF_INET && local_addr.sin.sin_addr.s_addr == 0) ||
6421 (local_addr.sa.sa_family == AF_INET6 && IN6_IS_ADDR_UNSPECIFIED(&local_addr.sin6.sin6_addr))) {
6422 no_local_addr = TRUE;
6423 }
6424
6425 if (remote_addr.sa.sa_len == 0 ||
6426 (remote_addr.sa.sa_family == AF_INET && remote_addr.sin.sin_addr.s_addr == 0) ||
6427 (remote_addr.sa.sa_family == AF_INET6 && IN6_IS_ADDR_UNSPECIFIED(&remote_addr.sin6.sin6_addr))) {
6428 no_remote_addr = TRUE;
6429 remote_family = remote_addr.sa.sa_family;
6430 }
6431
6432 returned_result->routed_interface_index = 0;
6433 struct rtentry *rt = NULL;
6434 if (!no_local_addr && (client_flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER) != 0) {
6435 // Treat the output bound interface as the routed interface for local address
6436 // validation later.
6437 returned_result->routed_interface_index = output_bound_interface;
6438 } else {
6439 if (no_remote_addr) {
6440 memset(&remote_addr, 0, sizeof(remote_addr));
6441 if (remote_family == AF_INET6) {
6442 // Reset address to ::
6443 remote_addr.sa.sa_family = AF_INET6;
6444 remote_addr.sa.sa_len = sizeof(struct sockaddr_in6);
6445 } else {
6446 // Reset address to 0.0.0.0
6447 remote_addr.sa.sa_family = AF_INET;
6448 remote_addr.sa.sa_len = sizeof(struct sockaddr_in);
6449 }
6450 }
6451
6452 rt = rtalloc1_scoped((struct sockaddr *)&remote_addr, 0, 0,
6453 output_bound_interface);
6454
6455 if (no_remote_addr && remote_family == 0 &&
6456 (rt == NULL || rt->rt_ifp == NULL)) {
6457 // Route lookup for default IPv4 failed, try IPv6
6458
6459 // Cleanup old route if necessary
6460 if (rt != NULL) {
6461 rtfree(rt);
6462 rt = NULL;
6463 }
6464
6465 // Reset address to ::
6466 memset(&remote_addr, 0, sizeof(remote_addr));
6467 remote_addr.sa.sa_family = AF_INET6;
6468 remote_addr.sa.sa_len = sizeof(struct sockaddr_in6);
6469
6470 // Get route
6471 rt = rtalloc1_scoped((struct sockaddr *)&remote_addr, 0, 0,
6472 output_bound_interface);
6473 }
6474
6475 if (rt != NULL &&
6476 rt->rt_ifp != NULL) {
6477 returned_result->routed_interface_index = rt->rt_ifp->if_index;
6478 /*
6479 * For local addresses, we allow the interface scope to be
6480 * either the loopback interface or the interface hosting the
6481 * local address.
6482 */
6483 if (bound_interface_index != IFSCOPE_NONE &&
6484 rt->rt_ifa != NULL && rt->rt_ifa->ifa_ifp &&
6485 (output_bound_interface == lo_ifp->if_index ||
6486 rt->rt_ifp->if_index == lo_ifp->if_index ||
6487 rt->rt_ifa->ifa_ifp->if_index == bound_interface_index)) {
6488 struct sockaddr_storage dst;
6489 unsigned int ifscope = bound_interface_index;
6490
6491 /*
6492 * Transform dst into the internal routing table form
6493 */
6494 (void) sa_copy((struct sockaddr *)&remote_addr,
6495 &dst, &ifscope);
6496
6497 if ((rt->rt_ifp->if_index == lo_ifp->if_index) ||
6498 rt_ifa_is_dst((struct sockaddr *)&dst, rt->rt_ifa))
6499 returned_result->routed_interface_index =
6500 bound_interface_index;
6501 }
6502 }
6503 }
6504
6505 if (returned_result->routed_interface_index != 0 &&
6506 returned_result->routed_interface_index != lo_ifp->if_index && // Loopback can accept any local address
6507 !no_local_addr) {
6508
6509 // Transform local_addr into the ifaddr form
6510 // IPv6 Scope IDs are always embedded in the ifaddr list
6511 struct sockaddr_storage local_address_sanitized;
6512 u_int ifscope = IFSCOPE_NONE;
6513 (void)sa_copy(&local_addr.sa, &local_address_sanitized, &ifscope);
6514 SIN(&local_address_sanitized)->sin_port = 0;
6515 if (local_address_sanitized.ss_family == AF_INET6) {
6516 SIN6(&local_address_sanitized)->sin6_scope_id = 0;
6517 }
6518
6519 // Validate local address on routed interface
6520 struct ifaddr *ifa = ifa_ifwithaddr_scoped((struct sockaddr *)&local_address_sanitized, returned_result->routed_interface_index);
6521 if (ifa == NULL) {
6522 // Interface address not found, reject route
6523 returned_result->routed_interface_index = 0;
6524 if (rt != NULL) {
6525 rtfree(rt);
6526 rt = NULL;
6527 }
6528 } else {
6529 ifaddr_release(ifa);
6530 ifa = NULL;
6531 }
6532 }
6533
6534 if (flags != NULL) {
6535 if ((client_flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER) == 0) {
6536 // Check for local/direct
6537 bool is_local = FALSE;
6538 if (rt != NULL && (rt->rt_flags & RTF_LOCAL)) {
6539 is_local = TRUE;
6540 } else if (returned_result->routed_interface_index != 0 &&
6541 !no_remote_addr) {
6542 // Clean up the address before comparison with interface addresses
6543
6544 // Transform remote_addr into the ifaddr form
6545 // IPv6 Scope IDs are always embedded in the ifaddr list
6546 struct sockaddr_storage remote_address_sanitized;
6547 u_int ifscope = IFSCOPE_NONE;
6548 (void)sa_copy(&remote_addr.sa, &remote_address_sanitized, &ifscope);
6549 SIN(&remote_address_sanitized)->sin_port = 0;
6550 if (remote_address_sanitized.ss_family == AF_INET6) {
6551 SIN6(&remote_address_sanitized)->sin6_scope_id = 0;
6552 }
6553
6554 // Check if remote address is an interface address
6555 struct ifaddr *ifa = ifa_ifwithaddr((struct sockaddr *)&remote_address_sanitized);
6556 if (ifa != NULL && ifa->ifa_ifp != NULL) {
6557 u_int if_index_for_remote_addr = ifa->ifa_ifp->if_index;
6558 if (if_index_for_remote_addr == returned_result->routed_interface_index ||
6559 if_index_for_remote_addr == lo_ifp->if_index) {
6560 is_local = TRUE;
6561 }
6562 }
6563 if (ifa != NULL) {
6564 ifaddr_release(ifa);
6565 ifa = NULL;
6566 }
6567 }
6568
6569 if (is_local) {
6570 *flags |= (NECP_CLIENT_RESULT_FLAG_IS_LOCAL | NECP_CLIENT_RESULT_FLAG_IS_DIRECT);
6571 } else {
6572 if (rt != NULL &&
6573 !(rt->rt_flags & RTF_GATEWAY) &&
6574 (rt->rt_ifa && rt->rt_ifa->ifa_ifp && !(rt->rt_ifa->ifa_ifp->if_flags & IFF_POINTOPOINT))) {
6575 // Route is directly accessible
6576 *flags |= NECP_CLIENT_RESULT_FLAG_IS_DIRECT;
6577 }
6578 }
6579
6580 if (rt != NULL &&
6581 rt->rt_ifp != NULL) {
6582 // Check probe status
6583 if (rt->rt_ifp->if_eflags & IFEF_PROBE_CONNECTIVITY) {
6584 *flags |= NECP_CLIENT_RESULT_FLAG_PROBE_CONNECTIVITY;
6585 }
6586
6587 if (rt->rt_ifp->if_type == IFT_CELLULAR) {
6588 struct if_cellular_status_v1 *ifsr;
6589
6590 ifnet_lock_shared(rt->rt_ifp);
6591 lck_rw_lock_exclusive(&rt->rt_ifp->if_link_status_lock);
6592
6593 if (rt->rt_ifp->if_link_status != NULL) {
6594 ifsr = &rt->rt_ifp->if_link_status->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
6595
6596 if (ifsr->valid_bitmask & IF_CELL_UL_MSS_RECOMMENDED_VALID) {
6597 if (ifsr->mss_recommended == IF_CELL_UL_MSS_RECOMMENDED_NONE) {
6598 returned_result->mss_recommended = NECP_CLIENT_RESULT_RECOMMENDED_MSS_NONE;
6599 } else if (ifsr->mss_recommended == IF_CELL_UL_MSS_RECOMMENDED_MEDIUM) {
6600 returned_result->mss_recommended = NECP_CLIENT_RESULT_RECOMMENDED_MSS_MEDIUM;
6601 } else if (ifsr->mss_recommended == IF_CELL_UL_MSS_RECOMMENDED_LOW) {
6602 returned_result->mss_recommended = NECP_CLIENT_RESULT_RECOMMENDED_MSS_LOW;
6603 }
6604 }
6605 }
6606 lck_rw_done(&rt->rt_ifp->if_link_status_lock);
6607 ifnet_lock_done(rt->rt_ifp);
6608 }
6609
6610 // Check link quality
6611 if ((client_flags & NECP_CLIENT_PARAMETER_FLAG_DISCRETIONARY) &&
6612 (rt->rt_ifp->if_interface_state.valid_bitmask & IF_INTERFACE_STATE_LQM_STATE_VALID) &&
6613 rt->rt_ifp->if_interface_state.lqm_state == IFNET_LQM_THRESH_ABORT) {
6614 *flags |= NECP_CLIENT_RESULT_FLAG_LINK_QUALITY_ABORT;
6615 }
6616
6617 // Check QoS marking (fastlane)
6618 if (necp_update_qos_marking(rt->rt_ifp, route_rule_id)) {
6619 *flags |= NECP_CLIENT_RESULT_FLAG_ALLOW_QOS_MARKING;
6620 }
6621 }
6622 }
6623
6624 if (returned_result->routed_interface_index != 0) {
6625 union necp_sockaddr_union default_address;
6626 struct rtentry *v4Route = NULL;
6627 struct rtentry *v6Route = NULL;
6628
6629 memset(&default_address, 0, sizeof(default_address));
6630
6631 // Reset address to 0.0.0.0
6632 default_address.sa.sa_family = AF_INET;
6633 default_address.sa.sa_len = sizeof(struct sockaddr_in);
6634 v4Route = rtalloc1_scoped((struct sockaddr *)&default_address, 0, 0,
6635 returned_result->routed_interface_index);
6636
6637 // Reset address to ::
6638 default_address.sa.sa_family = AF_INET6;
6639 default_address.sa.sa_len = sizeof(struct sockaddr_in6);
6640 v6Route = rtalloc1_scoped((struct sockaddr *)&default_address, 0, 0,
6641 returned_result->routed_interface_index);
6642
6643 if (v4Route != NULL) {
6644 if (v4Route->rt_ifp != NULL) {
6645 *flags |= NECP_CLIENT_RESULT_FLAG_HAS_IPV4;
6646 }
6647 rtfree(v4Route);
6648 v4Route = NULL;
6649 }
6650
6651 if (v6Route != NULL) {
6652 if (v6Route->rt_ifp != NULL) {
6653 *flags |= NECP_CLIENT_RESULT_FLAG_HAS_IPV6;
6654 }
6655 rtfree(v6Route);
6656 v6Route = NULL;
6657 }
6658 }
6659 }
6660
6661 u_int32_t interface_type_denied = IFRTYPE_FUNCTIONAL_UNKNOWN;
6662 bool route_is_allowed = necp_route_is_allowed(rt, NULL, route_rule_id, &interface_type_denied);
6663 if (!route_is_allowed) {
6664 // If the route is blocked, treat the lookup as a drop
6665 returned_result->routing_result = NECP_KERNEL_POLICY_RESULT_DROP;
6666 memset(&returned_result->routing_result_parameter, 0, sizeof(returned_result->routing_result_parameter));
6667
6668 if (interface_type_denied != IFRTYPE_FUNCTIONAL_UNKNOWN) {
6669 necp_send_application_interface_denied_event(pid, application_uuid, interface_type_denied);
6670 }
6671 }
6672
6673 if (rt != NULL) {
6674 if (returned_route != NULL) {
6675 *returned_route = rt;
6676 } else {
6677 rtfree(rt);
6678 }
6679 rt = NULL;
6680 }
6681 // Unlock
6682 lck_rw_done(&necp_kernel_policy_lock);
6683
6684 return (error);
6685 }
6686
6687 static bool
6688 necp_socket_check_policy(struct necp_kernel_socket_policy *kernel_policy, necp_app_id app_id, necp_app_id real_app_id, errno_t cred_result, u_int32_t account_id, struct substring domain, u_int8_t domain_dot_count, pid_t pid, uid_t uid, u_int32_t bound_interface_index, u_int32_t traffic_class, u_int16_t protocol, union necp_sockaddr_union *local, union necp_sockaddr_union *remote, proc_t proc)
6689 {
6690 if (!(kernel_policy->condition_mask & NECP_KERNEL_CONDITION_ALL_INTERFACES)) {
6691 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE) {
6692 u_int32_t cond_bound_interface_index = kernel_policy->cond_bound_interface ? kernel_policy->cond_bound_interface->if_index : 0;
6693 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE) {
6694 if (bound_interface_index == cond_bound_interface_index) {
6695 // No match, matches forbidden interface
6696 return (FALSE);
6697 }
6698 } else {
6699 if (bound_interface_index != cond_bound_interface_index) {
6700 // No match, does not match required interface
6701 return (FALSE);
6702 }
6703 }
6704 } else {
6705 if (bound_interface_index != 0) {
6706 // No match, requires a non-bound packet
6707 return (FALSE);
6708 }
6709 }
6710 }
6711
6712 if (kernel_policy->condition_mask == 0) {
6713 return (TRUE);
6714 }
6715
6716 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_APP_ID) {
6717 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_APP_ID) {
6718 if (app_id == kernel_policy->cond_app_id) {
6719 // No match, matches forbidden application
6720 return (FALSE);
6721 }
6722 } else {
6723 if (app_id != kernel_policy->cond_app_id) {
6724 // No match, does not match required application
6725 return (FALSE);
6726 }
6727 }
6728 }
6729
6730 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REAL_APP_ID) {
6731 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_REAL_APP_ID) {
6732 if (real_app_id == kernel_policy->cond_real_app_id) {
6733 // No match, matches forbidden application
6734 return (FALSE);
6735 }
6736 } else {
6737 if (real_app_id != kernel_policy->cond_real_app_id) {
6738 // No match, does not match required application
6739 return (FALSE);
6740 }
6741 }
6742 }
6743
6744 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_ENTITLEMENT) {
6745 if (cred_result != 0) {
6746 // Process is missing entitlement
6747 return (FALSE);
6748 }
6749 }
6750
6751 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT) {
6752 if (kernel_policy->cond_custom_entitlement_matched == necp_boolean_state_false) {
6753 // Process is missing entitlement based on previous check
6754 return (FALSE);
6755 } else if (kernel_policy->cond_custom_entitlement_matched == necp_boolean_state_unknown) {
6756 if (kernel_policy->cond_custom_entitlement != NULL) {
6757 if (proc == NULL) {
6758 // No process found, cannot check entitlement
6759 return (FALSE);
6760 }
6761 task_t task = proc_task(proc);
6762 if (task == NULL ||
6763 !IOTaskHasEntitlement(task, kernel_policy->cond_custom_entitlement)) {
6764 // Process is missing custom entitlement
6765 kernel_policy->cond_custom_entitlement_matched = necp_boolean_state_false;
6766 return (FALSE);
6767 } else {
6768 kernel_policy->cond_custom_entitlement_matched = necp_boolean_state_true;
6769 }
6770 }
6771 }
6772 }
6773
6774 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_DOMAIN) {
6775 bool domain_matches = necp_hostname_matches_domain(domain, domain_dot_count, kernel_policy->cond_domain, kernel_policy->cond_domain_dot_count);
6776 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_DOMAIN) {
6777 if (domain_matches) {
6778 // No match, matches forbidden domain
6779 return (FALSE);
6780 }
6781 } else {
6782 if (!domain_matches) {
6783 // No match, does not match required domain
6784 return (FALSE);
6785 }
6786 }
6787 }
6788
6789 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_ACCOUNT_ID) {
6790 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_ACCOUNT_ID) {
6791 if (account_id == kernel_policy->cond_account_id) {
6792 // No match, matches forbidden account
6793 return (FALSE);
6794 }
6795 } else {
6796 if (account_id != kernel_policy->cond_account_id) {
6797 // No match, does not match required account
6798 return (FALSE);
6799 }
6800 }
6801 }
6802
6803 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_PID) {
6804 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_PID) {
6805 if (pid == kernel_policy->cond_pid) {
6806 // No match, matches forbidden pid
6807 return (FALSE);
6808 }
6809 } else {
6810 if (pid != kernel_policy->cond_pid) {
6811 // No match, does not match required pid
6812 return (FALSE);
6813 }
6814 }
6815 }
6816
6817 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_UID) {
6818 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_UID) {
6819 if (uid == kernel_policy->cond_uid) {
6820 // No match, matches forbidden uid
6821 return (FALSE);
6822 }
6823 } else {
6824 if (uid != kernel_policy->cond_uid) {
6825 // No match, does not match required uid
6826 return (FALSE);
6827 }
6828 }
6829 }
6830
6831 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_TRAFFIC_CLASS) {
6832 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_TRAFFIC_CLASS) {
6833 if (traffic_class >= kernel_policy->cond_traffic_class.start_tc &&
6834 traffic_class <= kernel_policy->cond_traffic_class.end_tc) {
6835 // No match, matches forbidden traffic class
6836 return (FALSE);
6837 }
6838 } else {
6839 if (traffic_class < kernel_policy->cond_traffic_class.start_tc ||
6840 traffic_class > kernel_policy->cond_traffic_class.end_tc) {
6841 // No match, does not match required traffic class
6842 return (FALSE);
6843 }
6844 }
6845 }
6846
6847 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_PROTOCOL) {
6848 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_PROTOCOL) {
6849 if (protocol == kernel_policy->cond_protocol) {
6850 // No match, matches forbidden protocol
6851 return (FALSE);
6852 }
6853 } else {
6854 if (protocol != kernel_policy->cond_protocol) {
6855 // No match, does not match required protocol
6856 return (FALSE);
6857 }
6858 }
6859 }
6860
6861 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_START) {
6862 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_END) {
6863 bool inRange = necp_is_addr_in_range((struct sockaddr *)local, (struct sockaddr *)&kernel_policy->cond_local_start, (struct sockaddr *)&kernel_policy->cond_local_end);
6864 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_LOCAL_END) {
6865 if (inRange) {
6866 return (FALSE);
6867 }
6868 } else {
6869 if (!inRange) {
6870 return (FALSE);
6871 }
6872 }
6873 } else if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX) {
6874 bool inSubnet = necp_is_addr_in_subnet((struct sockaddr *)local, (struct sockaddr *)&kernel_policy->cond_local_start, kernel_policy->cond_local_prefix);
6875 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX) {
6876 if (inSubnet) {
6877 return (FALSE);
6878 }
6879 } else {
6880 if (!inSubnet) {
6881 return (FALSE);
6882 }
6883 }
6884 }
6885 }
6886
6887 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_START) {
6888 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_END) {
6889 bool inRange = necp_is_addr_in_range((struct sockaddr *)remote, (struct sockaddr *)&kernel_policy->cond_remote_start, (struct sockaddr *)&kernel_policy->cond_remote_end);
6890 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_REMOTE_END) {
6891 if (inRange) {
6892 return (FALSE);
6893 }
6894 } else {
6895 if (!inRange) {
6896 return (FALSE);
6897 }
6898 }
6899 } else if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX) {
6900 bool inSubnet = necp_is_addr_in_subnet((struct sockaddr *)remote, (struct sockaddr *)&kernel_policy->cond_remote_start, kernel_policy->cond_remote_prefix);
6901 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX) {
6902 if (inSubnet) {
6903 return (FALSE);
6904 }
6905 } else {
6906 if (!inSubnet) {
6907 return (FALSE);
6908 }
6909 }
6910 }
6911 }
6912
6913 return (TRUE);
6914 }
6915
6916 static inline u_int32_t
6917 necp_socket_calc_flowhash_locked(struct necp_socket_info *info)
6918 {
6919 return (net_flowhash(info, sizeof(*info), necp_kernel_socket_policies_gencount));
6920 }
6921
6922 static void
6923 necp_socket_fillout_info_locked(struct inpcb *inp, struct sockaddr *override_local_addr, struct sockaddr *override_remote_addr, u_int32_t override_bound_interface, struct necp_socket_info *info)
6924 {
6925 struct socket *so = NULL;
6926
6927 memset(info, 0, sizeof(struct necp_socket_info));
6928
6929 so = inp->inp_socket;
6930
6931 if (necp_kernel_socket_policies_condition_mask & NECP_KERNEL_CONDITION_PID) {
6932 info->pid = ((so->so_flags & SOF_DELEGATED) ? so->e_pid : so->last_pid);
6933 }
6934
6935 if (necp_kernel_socket_policies_condition_mask & NECP_KERNEL_CONDITION_UID) {
6936 info->uid = kauth_cred_getuid(so->so_cred);
6937 }
6938
6939 if (necp_kernel_socket_policies_condition_mask & NECP_KERNEL_CONDITION_TRAFFIC_CLASS) {
6940 info->traffic_class = so->so_traffic_class;
6941 }
6942
6943 if (necp_kernel_socket_policies_condition_mask & NECP_KERNEL_CONDITION_PROTOCOL) {
6944 if (inp->inp_ip_p) {
6945 info->protocol = inp->inp_ip_p;
6946 } else {
6947 info->protocol = SOCK_PROTO(so);
6948 }
6949 }
6950
6951 if (inp->inp_flags2 & INP2_WANT_APP_POLICY && necp_kernel_socket_policies_condition_mask & NECP_KERNEL_CONDITION_APP_ID) {
6952 struct necp_uuid_id_mapping *existing_mapping = necp_uuid_lookup_app_id_locked(((so->so_flags & SOF_DELEGATED) ? so->e_uuid : so->last_uuid));
6953 if (existing_mapping) {
6954 info->application_id = existing_mapping->id;
6955 }
6956
6957 if (!(so->so_flags & SOF_DELEGATED)) {
6958 info->real_application_id = info->application_id;
6959 } else if (necp_kernel_socket_policies_condition_mask & NECP_KERNEL_CONDITION_REAL_APP_ID) {
6960 struct necp_uuid_id_mapping *real_existing_mapping = necp_uuid_lookup_app_id_locked(so->last_uuid);
6961 if (real_existing_mapping) {
6962 info->real_application_id = real_existing_mapping->id;
6963 }
6964 }
6965
6966 if (necp_kernel_socket_policies_condition_mask & NECP_KERNEL_CONDITION_ENTITLEMENT) {
6967 info->cred_result = priv_check_cred(so->so_cred, PRIV_NET_PRIVILEGED_NECP_MATCH, 0);
6968 }
6969 }
6970
6971 if (necp_kernel_socket_policies_condition_mask & NECP_KERNEL_CONDITION_ACCOUNT_ID && inp->inp_necp_attributes.inp_account != NULL) {
6972 struct necp_string_id_mapping *existing_mapping = necp_lookup_string_to_id_locked(&necp_account_id_list, inp->inp_necp_attributes.inp_account);
6973 if (existing_mapping) {
6974 info->account_id = existing_mapping->id;
6975 }
6976 }
6977
6978 if (necp_kernel_socket_policies_condition_mask & NECP_KERNEL_CONDITION_DOMAIN) {
6979 info->domain = inp->inp_necp_attributes.inp_domain;
6980 }
6981
6982 if (override_bound_interface) {
6983 info->bound_interface_index = override_bound_interface;
6984 } else {
6985 if ((inp->inp_flags & INP_BOUND_IF) && inp->inp_boundifp) {
6986 info->bound_interface_index = inp->inp_boundifp->if_index;
6987 }
6988 }
6989
6990 if (necp_kernel_socket_policies_condition_mask & NECP_KERNEL_ADDRESS_TYPE_CONDITIONS) {
6991 if (inp->inp_vflag & INP_IPV4) {
6992 if (override_local_addr) {
6993 if (override_local_addr->sa_len <= sizeof(struct sockaddr_in)) {
6994 memcpy(&info->local_addr, override_local_addr, override_local_addr->sa_len);
6995 }
6996 } else {
6997 ((struct sockaddr_in *)&info->local_addr)->sin_family = AF_INET;
6998 ((struct sockaddr_in *)&info->local_addr)->sin_len = sizeof(struct sockaddr_in);
6999 ((struct sockaddr_in *)&info->local_addr)->sin_port = inp->inp_lport;
7000 memcpy(&((struct sockaddr_in *)&info->local_addr)->sin_addr, &inp->inp_laddr, sizeof(struct in_addr));
7001 }
7002
7003 if (override_remote_addr) {
7004 if (override_remote_addr->sa_len <= sizeof(struct sockaddr_in)) {
7005 memcpy(&info->remote_addr, override_remote_addr, override_remote_addr->sa_len);
7006 }
7007 } else {
7008 ((struct sockaddr_in *)&info->remote_addr)->sin_family = AF_INET;
7009 ((struct sockaddr_in *)&info->remote_addr)->sin_len = sizeof(struct sockaddr_in);
7010 ((struct sockaddr_in *)&info->remote_addr)->sin_port = inp->inp_fport;
7011 memcpy(&((struct sockaddr_in *)&info->remote_addr)->sin_addr, &inp->inp_faddr, sizeof(struct in_addr));
7012 }
7013 } else if (inp->inp_vflag & INP_IPV6) {
7014 if (override_local_addr) {
7015 if (override_local_addr->sa_len <= sizeof(struct sockaddr_in6)) {
7016 memcpy(&info->local_addr, override_local_addr, override_local_addr->sa_len);
7017 }
7018 } else {
7019 ((struct sockaddr_in6 *)&info->local_addr)->sin6_family = AF_INET6;
7020 ((struct sockaddr_in6 *)&info->local_addr)->sin6_len = sizeof(struct sockaddr_in6);
7021 ((struct sockaddr_in6 *)&info->local_addr)->sin6_port = inp->inp_lport;
7022 memcpy(&((struct sockaddr_in6 *)&info->local_addr)->sin6_addr, &inp->in6p_laddr, sizeof(struct in6_addr));
7023 }
7024
7025 if (override_remote_addr) {
7026 if (override_remote_addr->sa_len <= sizeof(struct sockaddr_in6)) {
7027 memcpy(&info->remote_addr, override_remote_addr, override_remote_addr->sa_len);
7028 }
7029 } else {
7030 ((struct sockaddr_in6 *)&info->remote_addr)->sin6_family = AF_INET6;
7031 ((struct sockaddr_in6 *)&info->remote_addr)->sin6_len = sizeof(struct sockaddr_in6);
7032 ((struct sockaddr_in6 *)&info->remote_addr)->sin6_port = inp->inp_fport;
7033 memcpy(&((struct sockaddr_in6 *)&info->remote_addr)->sin6_addr, &inp->in6p_faddr, sizeof(struct in6_addr));
7034 }
7035 }
7036 }
7037 }
7038
7039 static inline struct necp_kernel_socket_policy *
7040 necp_socket_find_policy_match_with_info_locked(struct necp_kernel_socket_policy **policy_search_array, struct necp_socket_info *info, necp_kernel_policy_filter *return_filter, u_int32_t *return_route_rule_id, necp_kernel_policy_result *return_service_action, necp_kernel_policy_service *return_service, u_int32_t *return_netagent_array, size_t netagent_array_count, proc_t proc)
7041 {
7042 struct necp_kernel_socket_policy *matched_policy = NULL;
7043 u_int32_t skip_order = 0;
7044 u_int32_t skip_session_order = 0;
7045 u_int32_t route_rule_id_array[MAX_AGGREGATE_ROUTE_RULES];
7046 size_t route_rule_id_count = 0;
7047 int i;
7048 size_t netagent_cursor = 0;
7049
7050 // Pre-process domain for quick matching
7051 struct substring domain_substring = necp_trim_dots_and_stars(info->domain, info->domain ? strlen(info->domain) : 0);
7052 u_int8_t domain_dot_count = necp_count_dots(domain_substring.string, domain_substring.length);
7053
7054 if (return_filter) {
7055 *return_filter = 0;
7056 }
7057
7058 if (return_route_rule_id) {
7059 *return_route_rule_id = 0;
7060 }
7061
7062 if (return_service_action) {
7063 *return_service_action = 0;
7064 }
7065
7066 if (return_service) {
7067 return_service->identifier = 0;
7068 return_service->data = 0;
7069 }
7070
7071 if (policy_search_array != NULL) {
7072 for (i = 0; policy_search_array[i] != NULL; i++) {
7073 if (necp_drop_all_order != 0 && policy_search_array[i]->session_order >= necp_drop_all_order) {
7074 // We've hit a drop all rule
7075 break;
7076 }
7077 if (skip_session_order && policy_search_array[i]->session_order >= skip_session_order) {
7078 // Done skipping
7079 skip_order = 0;
7080 skip_session_order = 0;
7081 }
7082 if (skip_order) {
7083 if (policy_search_array[i]->order < skip_order) {
7084 // Skip this policy
7085 continue;
7086 } else {
7087 // Done skipping
7088 skip_order = 0;
7089 skip_session_order = 0;
7090 }
7091 } else if (skip_session_order) {
7092 // Skip this policy
7093 continue;
7094 }
7095 if (necp_socket_check_policy(policy_search_array[i], info->application_id, info->real_application_id, info->cred_result, info->account_id, domain_substring, domain_dot_count, info->pid, info->uid, info->bound_interface_index, info->traffic_class, info->protocol, &info->local_addr, &info->remote_addr, proc)) {
7096 if (policy_search_array[i]->result == NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER) {
7097 if (return_filter && *return_filter == 0) {
7098 *return_filter = policy_search_array[i]->result_parameter.filter_control_unit;
7099 if (necp_debug > 1) {
7100 NECPLOG(LOG_DEBUG, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Filter %d", info->application_id, info->real_application_id, info->bound_interface_index, info->protocol, policy_search_array[i]->result_parameter.filter_control_unit);
7101 }
7102 }
7103 continue;
7104 } else if (policy_search_array[i]->result == NECP_KERNEL_POLICY_RESULT_ROUTE_RULES) {
7105 if (return_route_rule_id && route_rule_id_count < MAX_AGGREGATE_ROUTE_RULES) {
7106 route_rule_id_array[route_rule_id_count++] = policy_search_array[i]->result_parameter.route_rule_id;
7107 if (necp_debug > 1) {
7108 NECPLOG(LOG_DEBUG, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Route Rule %d", info->application_id, info->real_application_id, info->bound_interface_index, info->protocol, policy_search_array[i]->result_parameter.route_rule_id);
7109 }
7110 }
7111 continue;
7112 } else if (necp_kernel_socket_result_is_trigger_service_type(policy_search_array[i])) {
7113 if (return_service_action && *return_service_action == 0) {
7114 *return_service_action = policy_search_array[i]->result;
7115 if (necp_debug > 1) {
7116 NECPLOG(LOG_DEBUG, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Service Action %d", info->application_id, info->real_application_id, info->bound_interface_index, info->protocol, policy_search_array[i]->result);
7117 }
7118 }
7119 if (return_service && return_service->identifier == 0) {
7120 return_service->identifier = policy_search_array[i]->result_parameter.service.identifier;
7121 return_service->data = policy_search_array[i]->result_parameter.service.data;
7122 if (necp_debug > 1) {
7123 NECPLOG(LOG_DEBUG, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Service ID %d Data %d", info->application_id, info->real_application_id, info->bound_interface_index, info->protocol, policy_search_array[i]->result_parameter.service.identifier, policy_search_array[i]->result_parameter.service.data);
7124 }
7125 }
7126 continue;
7127 } else if (policy_search_array[i]->result == NECP_KERNEL_POLICY_RESULT_USE_NETAGENT) {
7128 if (return_netagent_array != NULL &&
7129 netagent_cursor < netagent_array_count) {
7130 return_netagent_array[netagent_cursor] = policy_search_array[i]->result_parameter.netagent_id;
7131 netagent_cursor++;
7132 if (necp_debug > 1) {
7133 NECPLOG(LOG_DEBUG, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Use Netagent %d", info->application_id, info->real_application_id, info->bound_interface_index, info->protocol, policy_search_array[i]->result_parameter.netagent_id);
7134 }
7135 }
7136 continue;
7137 }
7138
7139 // Matched policy is a skip. Do skip and continue.
7140 if (policy_search_array[i]->result == NECP_KERNEL_POLICY_RESULT_SKIP) {
7141 skip_order = policy_search_array[i]->result_parameter.skip_policy_order;
7142 skip_session_order = policy_search_array[i]->session_order + 1;
7143 continue;
7144 }
7145
7146 // Passed all tests, found a match
7147 matched_policy = policy_search_array[i];
7148 break;
7149 }
7150 }
7151 }
7152
7153 if (route_rule_id_count == 1) {
7154 *return_route_rule_id = route_rule_id_array[0];
7155 } else if (route_rule_id_count > 1) {
7156 *return_route_rule_id = necp_create_aggregate_route_rule(route_rule_id_array);
7157 }
7158 return (matched_policy);
7159 }
7160
7161 static bool
7162 necp_socket_uses_interface(struct inpcb *inp, u_int32_t interface_index)
7163 {
7164 bool found_match = FALSE;
7165 errno_t result = 0;
7166 ifaddr_t *addresses = NULL;
7167 union necp_sockaddr_union address_storage;
7168 int i;
7169 int family = AF_INET;
7170 ifnet_t interface = ifindex2ifnet[interface_index];
7171
7172 if (inp == NULL || interface == NULL) {
7173 return (FALSE);
7174 }
7175
7176 if (inp->inp_vflag & INP_IPV4) {
7177 family = AF_INET;
7178 } else if (inp->inp_vflag & INP_IPV6) {
7179 family = AF_INET6;
7180 }
7181
7182 result = ifnet_get_address_list_family(interface, &addresses, family);
7183 if (result != 0) {
7184 NECPLOG(LOG_ERR, "Failed to get address list for %s%d", ifnet_name(interface), ifnet_unit(interface));
7185 return (FALSE);
7186 }
7187
7188 for (i = 0; addresses[i] != NULL; i++) {
7189 if (ifaddr_address(addresses[i], &address_storage.sa, sizeof(address_storage)) == 0) {
7190 if (family == AF_INET) {
7191 if (memcmp(&address_storage.sin.sin_addr, &inp->inp_laddr, sizeof(inp->inp_laddr)) == 0) {
7192 found_match = TRUE;
7193 goto done;
7194 }
7195 } else if (family == AF_INET6) {
7196 if (memcmp(&address_storage.sin6.sin6_addr, &inp->in6p_laddr, sizeof(inp->in6p_laddr)) == 0) {
7197 found_match = TRUE;
7198 goto done;
7199 }
7200 }
7201 }
7202 }
7203
7204 done:
7205 ifnet_free_address_list(addresses);
7206 addresses = NULL;
7207 return (found_match);
7208 }
7209
7210 static inline bool
7211 necp_socket_is_connected(struct inpcb *inp)
7212 {
7213 return (inp->inp_socket->so_state & (SS_ISCONNECTING | SS_ISCONNECTED | SS_ISDISCONNECTING));
7214 }
7215
7216 static inline bool
7217 necp_socket_bypass(struct sockaddr *override_local_addr, struct sockaddr *override_remote_addr, struct inpcb *inp)
7218 {
7219
7220 if (necp_pass_loopback > 0 && necp_is_loopback(override_local_addr, override_remote_addr, inp, NULL)) {
7221 return (true);
7222 } else if (necp_is_intcoproc(inp, NULL)) {
7223 return (true);
7224 }
7225
7226 return (false);
7227 }
7228
7229 necp_kernel_policy_id
7230 necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local_addr, struct sockaddr *override_remote_addr, u_int32_t override_bound_interface)
7231 {
7232 struct socket *so = NULL;
7233 necp_kernel_policy_filter filter_control_unit = 0;
7234 u_int32_t route_rule_id = 0;
7235 struct necp_kernel_socket_policy *matched_policy = NULL;
7236 necp_kernel_policy_id matched_policy_id = NECP_KERNEL_POLICY_ID_NONE;
7237 necp_kernel_policy_result service_action = 0;
7238 necp_kernel_policy_service service = { 0, 0 };
7239
7240 u_int32_t netagent_ids[NECP_MAX_NETAGENTS];
7241 memset(&netagent_ids, 0, sizeof(netagent_ids));
7242 int netagent_cursor;
7243
7244 struct necp_socket_info info;
7245
7246 if (inp == NULL) {
7247 return (NECP_KERNEL_POLICY_ID_NONE);
7248 }
7249
7250 // Ignore invalid addresses
7251 if (override_local_addr != NULL &&
7252 !necp_address_is_valid(override_local_addr)) {
7253 override_local_addr = NULL;
7254 }
7255 if (override_remote_addr != NULL &&
7256 !necp_address_is_valid(override_remote_addr)) {
7257 override_remote_addr = NULL;
7258 }
7259
7260 so = inp->inp_socket;
7261
7262 // Don't lock. Possible race condition, but we don't want the performance hit.
7263 if (necp_kernel_socket_policies_count == 0 ||
7264 (!(inp->inp_flags2 & INP2_WANT_APP_POLICY) && necp_kernel_socket_policies_non_app_count == 0)) {
7265 if (necp_drop_all_order > 0) {
7266 inp->inp_policyresult.policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
7267 inp->inp_policyresult.policy_gencount = 0;
7268 inp->inp_policyresult.app_id = 0;
7269 inp->inp_policyresult.flowhash = 0;
7270 inp->inp_policyresult.results.filter_control_unit = 0;
7271 inp->inp_policyresult.results.route_rule_id = 0;
7272 if (necp_socket_bypass(override_local_addr, override_remote_addr, inp)) {
7273 inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_PASS;
7274 } else {
7275 inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_DROP;
7276 }
7277 }
7278 return (NECP_KERNEL_POLICY_ID_NONE);
7279 }
7280
7281 // Check for loopback exception
7282 if (necp_socket_bypass(override_local_addr, override_remote_addr, inp)) {
7283 // Mark socket as a pass
7284 inp->inp_policyresult.policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
7285 inp->inp_policyresult.policy_gencount = 0;
7286 inp->inp_policyresult.app_id = 0;
7287 inp->inp_policyresult.flowhash = 0;
7288 inp->inp_policyresult.results.filter_control_unit = 0;
7289 inp->inp_policyresult.results.route_rule_id = 0;
7290 inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_PASS;
7291 return (NECP_KERNEL_POLICY_ID_NONE);
7292 }
7293
7294 // Lock
7295 lck_rw_lock_shared(&necp_kernel_policy_lock);
7296
7297 necp_socket_fillout_info_locked(inp, override_local_addr, override_remote_addr, override_bound_interface, &info);
7298 inp->inp_policyresult.app_id = info.application_id;
7299
7300 // Check info
7301 u_int32_t flowhash = necp_socket_calc_flowhash_locked(&info);
7302 if (inp->inp_policyresult.policy_id != NECP_KERNEL_POLICY_ID_NONE &&
7303 inp->inp_policyresult.policy_gencount == necp_kernel_socket_policies_gencount &&
7304 inp->inp_policyresult.flowhash == flowhash) {
7305 // If already matched this socket on this generation of table, skip
7306
7307 // Unlock
7308 lck_rw_done(&necp_kernel_policy_lock);
7309
7310 return (inp->inp_policyresult.policy_id);
7311 }
7312
7313 // Match socket to policy
7314 matched_policy = necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_map[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(info.application_id)], &info, &filter_control_unit, &route_rule_id, &service_action, &service, netagent_ids, NECP_MAX_NETAGENTS, current_proc());
7315 // If the socket matched a scoped service policy, mark as Drop if not registered.
7316 // This covers the cases in which a service is required (on demand) but hasn't started yet.
7317 if ((service_action == NECP_KERNEL_POLICY_RESULT_TRIGGER_SCOPED ||
7318 service_action == NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED) &&
7319 service.identifier != 0 &&
7320 service.identifier != NECP_NULL_SERVICE_ID) {
7321 bool service_is_registered = FALSE;
7322 struct necp_service_registration *service_registration = NULL;
7323 LIST_FOREACH(service_registration, &necp_registered_service_list, kernel_chain) {
7324 if (service.identifier == service_registration->service_id) {
7325 service_is_registered = TRUE;
7326 break;
7327 }
7328 }
7329 if (!service_is_registered) {
7330 // Mark socket as a drop if service is not registered
7331 inp->inp_policyresult.policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
7332 inp->inp_policyresult.policy_gencount = necp_kernel_socket_policies_gencount;
7333 inp->inp_policyresult.flowhash = flowhash;
7334 inp->inp_policyresult.results.filter_control_unit = 0;
7335 inp->inp_policyresult.results.route_rule_id = 0;
7336 inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_DROP;
7337
7338 if (necp_debug > 1) {
7339 NECPLOG(LOG_DEBUG, "Socket Policy: (BoundInterface %d Proto %d) Dropping packet because service is not registered", info.bound_interface_index, info.protocol);
7340 }
7341
7342 // Unlock
7343 lck_rw_done(&necp_kernel_policy_lock);
7344 return (NECP_KERNEL_POLICY_ID_NONE);
7345 }
7346 }
7347 // Verify netagents
7348 for (netagent_cursor = 0; netagent_cursor < NECP_MAX_NETAGENTS; netagent_cursor++) {
7349 struct necp_uuid_id_mapping *mapping = NULL;
7350 u_int32_t netagent_id = netagent_ids[netagent_cursor];
7351 if (netagent_id == 0) {
7352 break;
7353 }
7354 mapping = necp_uuid_lookup_uuid_with_service_id_locked(netagent_id);
7355 if (mapping != NULL) {
7356 u_int32_t agent_flags = 0;
7357 agent_flags = netagent_get_flags(mapping->uuid);
7358 if (agent_flags & NETAGENT_FLAG_REGISTERED) {
7359 if (agent_flags & NETAGENT_FLAG_ACTIVE) {
7360 continue;
7361 } else if ((agent_flags & NETAGENT_FLAG_VOLUNTARY) == 0) {
7362 if (agent_flags & NETAGENT_FLAG_KERNEL_ACTIVATED) {
7363 int trigger_error = 0;
7364 trigger_error = netagent_kernel_trigger(mapping->uuid);
7365 if (necp_debug > 1) {
7366 NECPLOG(LOG_DEBUG, "Socket Policy: Triggering inactive agent, error %d", trigger_error);
7367 }
7368 }
7369
7370 // Mark socket as a drop if required agent is not active
7371 inp->inp_policyresult.policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
7372 inp->inp_policyresult.policy_gencount = necp_kernel_socket_policies_gencount;
7373 inp->inp_policyresult.flowhash = flowhash;
7374 inp->inp_policyresult.results.filter_control_unit = 0;
7375 inp->inp_policyresult.results.route_rule_id = 0;
7376 inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_DROP;
7377
7378 if (necp_debug > 1) {
7379 NECPLOG(LOG_DEBUG, "Socket Policy: (BoundInterface %d Proto %d) Dropping packet because agent is not active", info.bound_interface_index, info.protocol);
7380 }
7381
7382 // Unlock
7383 lck_rw_done(&necp_kernel_policy_lock);
7384 return (NECP_KERNEL_POLICY_ID_NONE);
7385 }
7386 }
7387 }
7388 }
7389 if (matched_policy) {
7390 matched_policy_id = matched_policy->id;
7391 inp->inp_policyresult.policy_id = matched_policy->id;
7392 inp->inp_policyresult.policy_gencount = necp_kernel_socket_policies_gencount;
7393 inp->inp_policyresult.flowhash = flowhash;
7394 inp->inp_policyresult.results.filter_control_unit = filter_control_unit;
7395 inp->inp_policyresult.results.route_rule_id = route_rule_id;
7396 inp->inp_policyresult.results.result = matched_policy->result;
7397 memcpy(&inp->inp_policyresult.results.result_parameter, &matched_policy->result_parameter, sizeof(matched_policy->result_parameter));
7398
7399 if (necp_socket_is_connected(inp) &&
7400 (matched_policy->result == NECP_KERNEL_POLICY_RESULT_DROP ||
7401 (matched_policy->result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL && !necp_socket_uses_interface(inp, matched_policy->result_parameter.tunnel_interface_index)))) {
7402 if (necp_debug) {
7403 NECPLOG(LOG_DEBUG, "Marking socket in state %d as defunct", so->so_state);
7404 }
7405 sosetdefunct(current_proc(), so, SHUTDOWN_SOCKET_LEVEL_NECP | SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL, TRUE);
7406 } else if (necp_socket_is_connected(inp) &&
7407 matched_policy->result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL &&
7408 info.protocol == IPPROTO_TCP) {
7409 // Reset MSS on TCP socket if tunnel policy changes
7410 tcp_mtudisc(inp, 0);
7411 }
7412
7413 if (necp_debug > 1) {
7414 NECPLOG(LOG_DEBUG, "Socket Policy: %p (BoundInterface %d Proto %d) Policy %d Result %d Parameter %d", inp->inp_socket, info.bound_interface_index, info.protocol, matched_policy->id, matched_policy->result, matched_policy->result_parameter.tunnel_interface_index);
7415 }
7416 } else if (necp_drop_all_order > 0) {
7417 // Mark socket as a drop if set
7418 inp->inp_policyresult.policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
7419 inp->inp_policyresult.policy_gencount = necp_kernel_socket_policies_gencount;
7420 inp->inp_policyresult.flowhash = flowhash;
7421 inp->inp_policyresult.results.filter_control_unit = 0;
7422 inp->inp_policyresult.results.route_rule_id = 0;
7423 inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_DROP;
7424 } else {
7425 // Mark non-matching socket so we don't re-check it
7426 inp->inp_policyresult.policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
7427 inp->inp_policyresult.policy_gencount = necp_kernel_socket_policies_gencount;
7428 inp->inp_policyresult.flowhash = flowhash;
7429 inp->inp_policyresult.results.filter_control_unit = filter_control_unit; // We may have matched a filter, so mark it!
7430 inp->inp_policyresult.results.route_rule_id = route_rule_id; // We may have matched a route rule, so mark it!
7431 inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_NONE;
7432 }
7433
7434 // Unlock
7435 lck_rw_done(&necp_kernel_policy_lock);
7436
7437 return (matched_policy_id);
7438 }
7439
7440 static bool
7441 necp_ip_output_check_policy(struct necp_kernel_ip_output_policy *kernel_policy, necp_kernel_policy_id socket_policy_id, u_int32_t bound_interface_index, u_int32_t last_interface_index, u_int16_t protocol, union necp_sockaddr_union *local, union necp_sockaddr_union *remote)
7442 {
7443 if (!(kernel_policy->condition_mask & NECP_KERNEL_CONDITION_ALL_INTERFACES)) {
7444 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE) {
7445 u_int32_t cond_bound_interface_index = kernel_policy->cond_bound_interface ? kernel_policy->cond_bound_interface->if_index : 0;
7446 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE) {
7447 if (bound_interface_index == cond_bound_interface_index) {
7448 // No match, matches forbidden interface
7449 return (FALSE);
7450 }
7451 } else {
7452 if (bound_interface_index != cond_bound_interface_index) {
7453 // No match, does not match required interface
7454 return (FALSE);
7455 }
7456 }
7457 } else {
7458 if (bound_interface_index != 0) {
7459 // No match, requires a non-bound packet
7460 return (FALSE);
7461 }
7462 }
7463 }
7464
7465 if (kernel_policy->condition_mask == 0) {
7466 return (TRUE);
7467 }
7468
7469 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_POLICY_ID) {
7470 if (socket_policy_id != kernel_policy->cond_policy_id) {
7471 // No match, does not match required id
7472 return (FALSE);
7473 }
7474 }
7475
7476 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LAST_INTERFACE) {
7477 if (last_interface_index != kernel_policy->cond_last_interface_index) {
7478 return (FALSE);
7479 }
7480 }
7481
7482 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_PROTOCOL) {
7483 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_PROTOCOL) {
7484 if (protocol == kernel_policy->cond_protocol) {
7485 // No match, matches forbidden protocol
7486 return (FALSE);
7487 }
7488 } else {
7489 if (protocol != kernel_policy->cond_protocol) {
7490 // No match, does not match required protocol
7491 return (FALSE);
7492 }
7493 }
7494 }
7495
7496 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_START) {
7497 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_END) {
7498 bool inRange = necp_is_addr_in_range((struct sockaddr *)local, (struct sockaddr *)&kernel_policy->cond_local_start, (struct sockaddr *)&kernel_policy->cond_local_end);
7499 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_LOCAL_END) {
7500 if (inRange) {
7501 return (FALSE);
7502 }
7503 } else {
7504 if (!inRange) {
7505 return (FALSE);
7506 }
7507 }
7508 } else if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX) {
7509 bool inSubnet = necp_is_addr_in_subnet((struct sockaddr *)local, (struct sockaddr *)&kernel_policy->cond_local_start, kernel_policy->cond_local_prefix);
7510 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX) {
7511 if (inSubnet) {
7512 return (FALSE);
7513 }
7514 } else {
7515 if (!inSubnet) {
7516 return (FALSE);
7517 }
7518 }
7519 }
7520 }
7521
7522 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_START) {
7523 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_END) {
7524 bool inRange = necp_is_addr_in_range((struct sockaddr *)remote, (struct sockaddr *)&kernel_policy->cond_remote_start, (struct sockaddr *)&kernel_policy->cond_remote_end);
7525 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_REMOTE_END) {
7526 if (inRange) {
7527 return (FALSE);
7528 }
7529 } else {
7530 if (!inRange) {
7531 return (FALSE);
7532 }
7533 }
7534 } else if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX) {
7535 bool inSubnet = necp_is_addr_in_subnet((struct sockaddr *)remote, (struct sockaddr *)&kernel_policy->cond_remote_start, kernel_policy->cond_remote_prefix);
7536 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX) {
7537 if (inSubnet) {
7538 return (FALSE);
7539 }
7540 } else {
7541 if (!inSubnet) {
7542 return (FALSE);
7543 }
7544 }
7545 }
7546 }
7547
7548 return (TRUE);
7549 }
7550
7551 static inline struct necp_kernel_ip_output_policy *
7552 necp_ip_output_find_policy_match_locked(necp_kernel_policy_id socket_policy_id, u_int32_t bound_interface_index, u_int32_t last_interface_index, u_int16_t protocol, union necp_sockaddr_union *local_addr, union necp_sockaddr_union *remote_addr)
7553 {
7554 u_int32_t skip_order = 0;
7555 u_int32_t skip_session_order = 0;
7556 int i;
7557 struct necp_kernel_ip_output_policy *matched_policy = NULL;
7558 struct necp_kernel_ip_output_policy **policy_search_array = necp_kernel_ip_output_policies_map[NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(socket_policy_id)];
7559 if (policy_search_array != NULL) {
7560 for (i = 0; policy_search_array[i] != NULL; i++) {
7561 if (necp_drop_all_order != 0 && policy_search_array[i]->session_order >= necp_drop_all_order) {
7562 // We've hit a drop all rule
7563 break;
7564 }
7565 if (skip_session_order && policy_search_array[i]->session_order >= skip_session_order) {
7566 // Done skipping
7567 skip_order = 0;
7568 skip_session_order = 0;
7569 }
7570 if (skip_order) {
7571 if (policy_search_array[i]->order < skip_order) {
7572 // Skip this policy
7573 continue;
7574 } else {
7575 // Done skipping
7576 skip_order = 0;
7577 skip_session_order = 0;
7578 }
7579 } else if (skip_session_order) {
7580 // Skip this policy
7581 continue;
7582 }
7583 if (necp_ip_output_check_policy(policy_search_array[i], socket_policy_id, bound_interface_index, last_interface_index, protocol, local_addr, remote_addr)) {
7584 // Passed all tests, found a match
7585 matched_policy = policy_search_array[i];
7586
7587 if (policy_search_array[i]->result == NECP_KERNEL_POLICY_RESULT_SKIP) {
7588 skip_order = policy_search_array[i]->result_parameter.skip_policy_order;
7589 skip_session_order = policy_search_array[i]->session_order + 1;
7590 continue;
7591 }
7592
7593 break;
7594 }
7595 }
7596 }
7597
7598 return (matched_policy);
7599 }
7600
7601 static inline bool
7602 necp_output_bypass(struct mbuf *packet)
7603 {
7604 if (necp_pass_loopback > 0 && necp_is_loopback(NULL, NULL, NULL, packet)) {
7605 return (true);
7606 }
7607 if (necp_pass_keepalives > 0 && necp_get_is_keepalive_from_packet(packet)) {
7608 return (true);
7609 }
7610 if (necp_is_intcoproc(NULL, packet)) {
7611 return (true);
7612 }
7613 return (false);
7614 }
7615
7616 necp_kernel_policy_id
7617 necp_ip_output_find_policy_match(struct mbuf *packet, int flags, struct ip_out_args *ipoa, necp_kernel_policy_result *result, necp_kernel_policy_result_parameter *result_parameter)
7618 {
7619 struct ip *ip = NULL;
7620 int hlen = sizeof(struct ip);
7621 necp_kernel_policy_id socket_policy_id = NECP_KERNEL_POLICY_ID_NONE;
7622 necp_kernel_policy_id matched_policy_id = NECP_KERNEL_POLICY_ID_NONE;
7623 struct necp_kernel_ip_output_policy *matched_policy = NULL;
7624 u_int16_t protocol = 0;
7625 u_int32_t bound_interface_index = 0;
7626 u_int32_t last_interface_index = 0;
7627 union necp_sockaddr_union local_addr;
7628 union necp_sockaddr_union remote_addr;
7629
7630 if (result) {
7631 *result = 0;
7632 }
7633
7634 if (result_parameter) {
7635 memset(result_parameter, 0, sizeof(*result_parameter));
7636 }
7637
7638 if (packet == NULL) {
7639 return (NECP_KERNEL_POLICY_ID_NONE);
7640 }
7641
7642 socket_policy_id = necp_get_policy_id_from_packet(packet);
7643
7644 // Exit early for an empty list
7645 // Don't lock. Possible race condition, but we don't want the performance hit.
7646 if (necp_kernel_ip_output_policies_count == 0 ||
7647 ((socket_policy_id == NECP_KERNEL_POLICY_ID_NONE) && necp_kernel_ip_output_policies_non_id_count == 0)) {
7648 if (necp_drop_all_order > 0) {
7649 matched_policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
7650 if (result) {
7651 if (necp_output_bypass(packet)) {
7652 *result = NECP_KERNEL_POLICY_RESULT_PASS;
7653 } else {
7654 *result = NECP_KERNEL_POLICY_RESULT_DROP;
7655 }
7656 }
7657 }
7658
7659 return (matched_policy_id);
7660 }
7661
7662 // Check for loopback exception
7663 if (necp_output_bypass(packet)) {
7664 matched_policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
7665 if (result) {
7666 *result = NECP_KERNEL_POLICY_RESULT_PASS;
7667 }
7668 return (matched_policy_id);
7669 }
7670
7671 last_interface_index = necp_get_last_interface_index_from_packet(packet);
7672
7673 // Process packet to get relevant fields
7674 ip = mtod(packet, struct ip *);
7675 #ifdef _IP_VHL
7676 hlen = _IP_VHL_HL(ip->ip_vhl) << 2;
7677 #else
7678 hlen = ip->ip_hl << 2;
7679 #endif
7680
7681 protocol = ip->ip_p;
7682
7683 if ((flags & IP_OUTARGS) && (ipoa != NULL) &&
7684 (ipoa->ipoa_flags & IPOAF_BOUND_IF) &&
7685 ipoa->ipoa_boundif != IFSCOPE_NONE) {
7686 bound_interface_index = ipoa->ipoa_boundif;
7687 }
7688
7689 local_addr.sin.sin_family = AF_INET;
7690 local_addr.sin.sin_len = sizeof(struct sockaddr_in);
7691 memcpy(&local_addr.sin.sin_addr, &ip->ip_src, sizeof(ip->ip_src));
7692
7693 remote_addr.sin.sin_family = AF_INET;
7694 remote_addr.sin.sin_len = sizeof(struct sockaddr_in);
7695 memcpy(&((struct sockaddr_in *)&remote_addr)->sin_addr, &ip->ip_dst, sizeof(ip->ip_dst));
7696
7697 switch (protocol) {
7698 case IPPROTO_TCP: {
7699 struct tcphdr th;
7700 if ((int)(hlen + sizeof(th)) <= packet->m_pkthdr.len) {
7701 m_copydata(packet, hlen, sizeof(th), (u_int8_t *)&th);
7702 ((struct sockaddr_in *)&local_addr)->sin_port = th.th_sport;
7703 ((struct sockaddr_in *)&remote_addr)->sin_port = th.th_dport;
7704 }
7705 break;
7706 }
7707 case IPPROTO_UDP: {
7708 struct udphdr uh;
7709 if ((int)(hlen + sizeof(uh)) <= packet->m_pkthdr.len) {
7710 m_copydata(packet, hlen, sizeof(uh), (u_int8_t *)&uh);
7711 ((struct sockaddr_in *)&local_addr)->sin_port = uh.uh_sport;
7712 ((struct sockaddr_in *)&remote_addr)->sin_port = uh.uh_dport;
7713 }
7714 break;
7715 }
7716 default: {
7717 ((struct sockaddr_in *)&local_addr)->sin_port = 0;
7718 ((struct sockaddr_in *)&remote_addr)->sin_port = 0;
7719 break;
7720 }
7721 }
7722
7723 // Match packet to policy
7724 lck_rw_lock_shared(&necp_kernel_policy_lock);
7725 matched_policy = necp_ip_output_find_policy_match_locked(socket_policy_id, bound_interface_index, last_interface_index, protocol, &local_addr, &remote_addr);
7726 if (matched_policy) {
7727 matched_policy_id = matched_policy->id;
7728 if (result) {
7729 *result = matched_policy->result;
7730 }
7731
7732 if (result_parameter) {
7733 memcpy(result_parameter, &matched_policy->result_parameter, sizeof(matched_policy->result_parameter));
7734 }
7735
7736 if (necp_debug > 1) {
7737 NECPLOG(LOG_DEBUG, "IP Output: (ID %d BoundInterface %d LastInterface %d Proto %d) Policy %d Result %d Parameter %d", socket_policy_id, bound_interface_index, last_interface_index, protocol, matched_policy->id, matched_policy->result, matched_policy->result_parameter.tunnel_interface_index);
7738 }
7739 } else if (necp_drop_all_order > 0) {
7740 matched_policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
7741 if (result) {
7742 *result = NECP_KERNEL_POLICY_RESULT_DROP;
7743 }
7744 }
7745
7746 lck_rw_done(&necp_kernel_policy_lock);
7747
7748 return (matched_policy_id);
7749 }
7750
7751 necp_kernel_policy_id
7752 necp_ip6_output_find_policy_match(struct mbuf *packet, int flags, struct ip6_out_args *ip6oa, necp_kernel_policy_result *result, necp_kernel_policy_result_parameter *result_parameter)
7753 {
7754 struct ip6_hdr *ip6 = NULL;
7755 int next = -1;
7756 int offset = 0;
7757 necp_kernel_policy_id socket_policy_id = NECP_KERNEL_POLICY_ID_NONE;
7758 necp_kernel_policy_id matched_policy_id = NECP_KERNEL_POLICY_ID_NONE;
7759 struct necp_kernel_ip_output_policy *matched_policy = NULL;
7760 u_int16_t protocol = 0;
7761 u_int32_t bound_interface_index = 0;
7762 u_int32_t last_interface_index = 0;
7763 union necp_sockaddr_union local_addr;
7764 union necp_sockaddr_union remote_addr;
7765
7766 if (result) {
7767 *result = 0;
7768 }
7769
7770 if (result_parameter) {
7771 memset(result_parameter, 0, sizeof(*result_parameter));
7772 }
7773
7774 if (packet == NULL) {
7775 return (NECP_KERNEL_POLICY_ID_NONE);
7776 }
7777
7778 socket_policy_id = necp_get_policy_id_from_packet(packet);
7779
7780 // Exit early for an empty list
7781 // Don't lock. Possible race condition, but we don't want the performance hit.
7782 if (necp_kernel_ip_output_policies_count == 0 ||
7783 ((socket_policy_id == NECP_KERNEL_POLICY_ID_NONE) && necp_kernel_ip_output_policies_non_id_count == 0)) {
7784 if (necp_drop_all_order > 0) {
7785 matched_policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
7786 if (result) {
7787 if (necp_output_bypass(packet)) {
7788 *result = NECP_KERNEL_POLICY_RESULT_PASS;
7789 } else {
7790 *result = NECP_KERNEL_POLICY_RESULT_DROP;
7791 }
7792 }
7793 }
7794
7795 return (matched_policy_id);
7796 }
7797
7798 // Check for loopback exception
7799 if (necp_output_bypass(packet)) {
7800 matched_policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
7801 if (result) {
7802 *result = NECP_KERNEL_POLICY_RESULT_PASS;
7803 }
7804 return (matched_policy_id);
7805 }
7806
7807 last_interface_index = necp_get_last_interface_index_from_packet(packet);
7808
7809 // Process packet to get relevant fields
7810 ip6 = mtod(packet, struct ip6_hdr *);
7811
7812 if ((flags & IPV6_OUTARGS) && (ip6oa != NULL) &&
7813 (ip6oa->ip6oa_flags & IP6OAF_BOUND_IF) &&
7814 ip6oa->ip6oa_boundif != IFSCOPE_NONE) {
7815 bound_interface_index = ip6oa->ip6oa_boundif;
7816 }
7817
7818 ((struct sockaddr_in6 *)&local_addr)->sin6_family = AF_INET6;
7819 ((struct sockaddr_in6 *)&local_addr)->sin6_len = sizeof(struct sockaddr_in6);
7820 memcpy(&((struct sockaddr_in6 *)&local_addr)->sin6_addr, &ip6->ip6_src, sizeof(ip6->ip6_src));
7821
7822 ((struct sockaddr_in6 *)&remote_addr)->sin6_family = AF_INET6;
7823 ((struct sockaddr_in6 *)&remote_addr)->sin6_len = sizeof(struct sockaddr_in6);
7824 memcpy(&((struct sockaddr_in6 *)&remote_addr)->sin6_addr, &ip6->ip6_dst, sizeof(ip6->ip6_dst));
7825
7826 offset = ip6_lasthdr(packet, 0, IPPROTO_IPV6, &next);
7827 if (offset >= 0 && packet->m_pkthdr.len >= offset) {
7828 protocol = next;
7829 switch (protocol) {
7830 case IPPROTO_TCP: {
7831 struct tcphdr th;
7832 if ((int)(offset + sizeof(th)) <= packet->m_pkthdr.len) {
7833 m_copydata(packet, offset, sizeof(th), (u_int8_t *)&th);
7834 ((struct sockaddr_in6 *)&local_addr)->sin6_port = th.th_sport;
7835 ((struct sockaddr_in6 *)&remote_addr)->sin6_port = th.th_dport;
7836 }
7837 break;
7838 }
7839 case IPPROTO_UDP: {
7840 struct udphdr uh;
7841 if ((int)(offset + sizeof(uh)) <= packet->m_pkthdr.len) {
7842 m_copydata(packet, offset, sizeof(uh), (u_int8_t *)&uh);
7843 ((struct sockaddr_in6 *)&local_addr)->sin6_port = uh.uh_sport;
7844 ((struct sockaddr_in6 *)&remote_addr)->sin6_port = uh.uh_dport;
7845 }
7846 break;
7847 }
7848 default: {
7849 ((struct sockaddr_in6 *)&local_addr)->sin6_port = 0;
7850 ((struct sockaddr_in6 *)&remote_addr)->sin6_port = 0;
7851 break;
7852 }
7853 }
7854 }
7855
7856 // Match packet to policy
7857 lck_rw_lock_shared(&necp_kernel_policy_lock);
7858 matched_policy = necp_ip_output_find_policy_match_locked(socket_policy_id, bound_interface_index, last_interface_index, protocol, &local_addr, &remote_addr);
7859 if (matched_policy) {
7860 matched_policy_id = matched_policy->id;
7861 if (result) {
7862 *result = matched_policy->result;
7863 }
7864
7865 if (result_parameter) {
7866 memcpy(result_parameter, &matched_policy->result_parameter, sizeof(matched_policy->result_parameter));
7867 }
7868
7869 if (necp_debug > 1) {
7870 NECPLOG(LOG_DEBUG, "IP6 Output: (ID %d BoundInterface %d LastInterface %d Proto %d) Policy %d Result %d Parameter %d", socket_policy_id, bound_interface_index, last_interface_index, protocol, matched_policy->id, matched_policy->result, matched_policy->result_parameter.tunnel_interface_index);
7871 }
7872 } else if (necp_drop_all_order > 0) {
7873 matched_policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
7874 if (result) {
7875 *result = NECP_KERNEL_POLICY_RESULT_DROP;
7876 }
7877 }
7878
7879 lck_rw_done(&necp_kernel_policy_lock);
7880
7881 return (matched_policy_id);
7882 }
7883
7884 // Utilities
7885 static bool
7886 necp_is_addr_in_range(struct sockaddr *addr, struct sockaddr *range_start, struct sockaddr *range_end)
7887 {
7888 int cmp = 0;
7889
7890 if (addr == NULL || range_start == NULL || range_end == NULL) {
7891 return (FALSE);
7892 }
7893
7894 /* Must be greater than or equal to start */
7895 cmp = necp_addr_compare(addr, range_start, 1);
7896 if (cmp != 0 && cmp != 1) {
7897 return (FALSE);
7898 }
7899
7900 /* Must be less than or equal to end */
7901 cmp = necp_addr_compare(addr, range_end, 1);
7902 if (cmp != 0 && cmp != -1) {
7903 return (FALSE);
7904 }
7905
7906 return (TRUE);
7907 }
7908
7909 static bool
7910 necp_is_range_in_range(struct sockaddr *inner_range_start, struct sockaddr *inner_range_end, struct sockaddr *range_start, struct sockaddr *range_end)
7911 {
7912 int cmp = 0;
7913
7914 if (inner_range_start == NULL || inner_range_end == NULL || range_start == NULL || range_end == NULL) {
7915 return (FALSE);
7916 }
7917
7918 /* Must be greater than or equal to start */
7919 cmp = necp_addr_compare(inner_range_start, range_start, 1);
7920 if (cmp != 0 && cmp != 1) {
7921 return (FALSE);
7922 }
7923
7924 /* Must be less than or equal to end */
7925 cmp = necp_addr_compare(inner_range_end, range_end, 1);
7926 if (cmp != 0 && cmp != -1) {
7927 return (FALSE);
7928 }
7929
7930 return (TRUE);
7931 }
7932
7933 static bool
7934 necp_is_addr_in_subnet(struct sockaddr *addr, struct sockaddr *subnet_addr, u_int8_t subnet_prefix)
7935 {
7936 if (addr == NULL || subnet_addr == NULL) {
7937 return (FALSE);
7938 }
7939
7940 if (addr->sa_family != subnet_addr->sa_family || addr->sa_len != subnet_addr->sa_len) {
7941 return (FALSE);
7942 }
7943
7944 switch (addr->sa_family) {
7945 case AF_INET: {
7946 if (satosin(subnet_addr)->sin_port != 0 &&
7947 satosin(addr)->sin_port != satosin(subnet_addr)->sin_port) {
7948 return (FALSE);
7949 }
7950 return (necp_buffer_compare_with_bit_prefix((u_int8_t *)&satosin(addr)->sin_addr, (u_int8_t *)&satosin(subnet_addr)->sin_addr, subnet_prefix));
7951 }
7952 case AF_INET6: {
7953 if (satosin6(subnet_addr)->sin6_port != 0 &&
7954 satosin6(addr)->sin6_port != satosin6(subnet_addr)->sin6_port) {
7955 return (FALSE);
7956 }
7957 if (satosin6(addr)->sin6_scope_id &&
7958 satosin6(subnet_addr)->sin6_scope_id &&
7959 satosin6(addr)->sin6_scope_id != satosin6(subnet_addr)->sin6_scope_id) {
7960 return (FALSE);
7961 }
7962 return (necp_buffer_compare_with_bit_prefix((u_int8_t *)&satosin6(addr)->sin6_addr, (u_int8_t *)&satosin6(subnet_addr)->sin6_addr, subnet_prefix));
7963 }
7964 default: {
7965 return (FALSE);
7966 }
7967 }
7968
7969 return (FALSE);
7970 }
7971
7972 /*
7973 * Return values:
7974 * -1: sa1 < sa2
7975 * 0: sa1 == sa2
7976 * 1: sa1 > sa2
7977 * 2: Not comparable or error
7978 */
7979 static int
7980 necp_addr_compare(struct sockaddr *sa1, struct sockaddr *sa2, int check_port)
7981 {
7982 int result = 0;
7983 int port_result = 0;
7984
7985 if (sa1->sa_family != sa2->sa_family || sa1->sa_len != sa2->sa_len) {
7986 return (2);
7987 }
7988
7989 if (sa1->sa_len == 0) {
7990 return (0);
7991 }
7992
7993 switch (sa1->sa_family) {
7994 case AF_INET: {
7995 if (sa1->sa_len != sizeof(struct sockaddr_in)) {
7996 return (2);
7997 }
7998
7999 result = memcmp(&satosin(sa1)->sin_addr.s_addr, &satosin(sa2)->sin_addr.s_addr, sizeof(satosin(sa1)->sin_addr.s_addr));
8000
8001 if (check_port) {
8002 if (satosin(sa1)->sin_port < satosin(sa2)->sin_port) {
8003 port_result = -1;
8004 } else if (satosin(sa1)->sin_port > satosin(sa2)->sin_port) {
8005 port_result = 1;
8006 }
8007
8008 if (result == 0) {
8009 result = port_result;
8010 } else if ((result > 0 && port_result < 0) || (result < 0 && port_result > 0)) {
8011 return (2);
8012 }
8013 }
8014
8015 break;
8016 }
8017 case AF_INET6: {
8018 if (sa1->sa_len != sizeof(struct sockaddr_in6)) {
8019 return (2);
8020 }
8021
8022 if (satosin6(sa1)->sin6_scope_id != satosin6(sa2)->sin6_scope_id) {
8023 return (2);
8024 }
8025
8026 result = memcmp(&satosin6(sa1)->sin6_addr.s6_addr[0], &satosin6(sa2)->sin6_addr.s6_addr[0], sizeof(struct in6_addr));
8027
8028 if (check_port) {
8029 if (satosin6(sa1)->sin6_port < satosin6(sa2)->sin6_port) {
8030 port_result = -1;
8031 } else if (satosin6(sa1)->sin6_port > satosin6(sa2)->sin6_port) {
8032 port_result = 1;
8033 }
8034
8035 if (result == 0) {
8036 result = port_result;
8037 } else if ((result > 0 && port_result < 0) || (result < 0 && port_result > 0)) {
8038 return (2);
8039 }
8040 }
8041
8042 break;
8043 }
8044 default: {
8045 result = memcmp(sa1, sa2, sa1->sa_len);
8046 break;
8047 }
8048 }
8049
8050 if (result < 0) {
8051 result = (-1);
8052 } else if (result > 0) {
8053 result = (1);
8054 }
8055
8056 return (result);
8057 }
8058
8059 static bool
8060 necp_buffer_compare_with_bit_prefix(u_int8_t *p1, u_int8_t *p2, u_int32_t bits)
8061 {
8062 u_int8_t mask;
8063
8064 /* Handle null pointers */
8065 if (p1 == NULL || p2 == NULL) {
8066 return (p1 == p2);
8067 }
8068
8069 while (bits >= 8) {
8070 if (*p1++ != *p2++) {
8071 return (FALSE);
8072 }
8073 bits -= 8;
8074 }
8075
8076 if (bits > 0) {
8077 mask = ~((1<<(8-bits))-1);
8078 if ((*p1 & mask) != (*p2 & mask)) {
8079 return (FALSE);
8080 }
8081 }
8082 return (TRUE);
8083 }
8084
8085 static bool
8086 necp_update_qos_marking(struct ifnet *ifp, u_int32_t route_rule_id)
8087 {
8088 bool qos_marking = FALSE;
8089 int exception_index = 0;
8090 struct necp_route_rule *route_rule = NULL;
8091
8092 route_rule = necp_lookup_route_rule_locked(&necp_route_rules, route_rule_id);
8093 if (route_rule == NULL) {
8094 qos_marking = FALSE;
8095 goto done;
8096 }
8097
8098 qos_marking = (route_rule->default_action == NECP_ROUTE_RULE_QOS_MARKING) ? TRUE : FALSE;
8099
8100 if (ifp == NULL) {
8101 goto done;
8102 }
8103
8104 for (exception_index = 0; exception_index < MAX_ROUTE_RULE_INTERFACES; exception_index++) {
8105 if (route_rule->exception_if_indices[exception_index] == 0) {
8106 break;
8107 }
8108 if (route_rule->exception_if_actions[exception_index] != NECP_ROUTE_RULE_QOS_MARKING) {
8109 continue;
8110 }
8111 if (route_rule->exception_if_indices[exception_index] == ifp->if_index) {
8112 qos_marking = TRUE;
8113 if (necp_debug > 2) {
8114 NECPLOG(LOG_DEBUG, "QoS Marking : Interface match %d for Rule %d Allowed %d",
8115 route_rule->exception_if_indices[exception_index], route_rule_id, qos_marking);
8116 }
8117 goto done;
8118 }
8119 }
8120
8121 if ((route_rule->cellular_action == NECP_ROUTE_RULE_QOS_MARKING && IFNET_IS_CELLULAR(ifp)) ||
8122 (route_rule->wifi_action == NECP_ROUTE_RULE_QOS_MARKING && IFNET_IS_WIFI(ifp)) ||
8123 (route_rule->wired_action == NECP_ROUTE_RULE_QOS_MARKING && IFNET_IS_WIRED(ifp)) ||
8124 (route_rule->expensive_action == NECP_ROUTE_RULE_QOS_MARKING && IFNET_IS_EXPENSIVE(ifp))) {
8125 qos_marking = TRUE;
8126 if (necp_debug > 2) {
8127 NECPLOG(LOG_DEBUG, "QoS Marking: C:%d WF:%d W:%d E:%d for Rule %d Allowed %d",
8128 route_rule->cellular_action, route_rule->wifi_action, route_rule->wired_action,
8129 route_rule->expensive_action, route_rule_id, qos_marking);
8130 }
8131 goto done;
8132 }
8133 done:
8134 if (necp_debug > 1) {
8135 NECPLOG(LOG_DEBUG, "QoS Marking: Rule %d ifp %s Allowed %d",
8136 route_rule_id, ifp ? ifp->if_xname : "", qos_marking);
8137 }
8138 return (qos_marking);
8139 }
8140
8141 void
8142 necp_socket_update_qos_marking(struct inpcb *inp, struct rtentry *route, struct ifnet *interface, u_int32_t route_rule_id)
8143 {
8144 bool qos_marking = FALSE;
8145 struct ifnet *ifp = interface = NULL;
8146
8147 if (net_qos_policy_restricted == 0) {
8148 return;
8149 }
8150 if (inp->inp_socket == NULL) {
8151 return;
8152 }
8153 if ((inp->inp_socket->so_flags1 & SOF1_QOSMARKING_POLICY_OVERRIDE)) {
8154 return;
8155 }
8156 /*
8157 * This is racy but we do not need the performance hit of taking necp_kernel_policy_lock
8158 */
8159 if (inp->inp_policyresult.results.qos_marking_gencount == necp_kernel_socket_policies_gencount) {
8160 return;
8161 }
8162
8163 lck_rw_lock_shared(&necp_kernel_policy_lock);
8164
8165 if (ifp == NULL && route != NULL) {
8166 ifp = route->rt_ifp;
8167 }
8168 /*
8169 * By default, until we have a interface, do not mark and reevaluate the Qos marking policy
8170 */
8171 if (ifp == NULL || route_rule_id == 0) {
8172 qos_marking = FALSE;
8173 goto done;
8174 }
8175
8176 if (ROUTE_RULE_IS_AGGREGATE(route_rule_id)) {
8177 struct necp_aggregate_route_rule *aggregate_route_rule = necp_lookup_aggregate_route_rule_locked(route_rule_id);
8178 if (aggregate_route_rule != NULL) {
8179 int index = 0;
8180 for (index = 0; index < MAX_AGGREGATE_ROUTE_RULES; index++) {
8181 u_int32_t sub_route_rule_id = aggregate_route_rule->rule_ids[index];
8182 if (sub_route_rule_id == 0) {
8183 break;
8184 }
8185 qos_marking = necp_update_qos_marking(ifp, sub_route_rule_id);
8186 if (qos_marking == TRUE) {
8187 break;
8188 }
8189 }
8190 }
8191 } else {
8192 qos_marking = necp_update_qos_marking(ifp, route_rule_id);
8193 }
8194 /*
8195 * Now that we have an interface we remember the gencount
8196 */
8197 inp->inp_policyresult.results.qos_marking_gencount = necp_kernel_socket_policies_gencount;
8198
8199 done:
8200 lck_rw_done(&necp_kernel_policy_lock);
8201
8202 if (qos_marking == TRUE) {
8203 inp->inp_socket->so_flags1 |= SOF1_QOSMARKING_ALLOWED;
8204 } else {
8205 inp->inp_socket->so_flags1 &= ~SOF1_QOSMARKING_ALLOWED;
8206 }
8207 }
8208
8209 static bool
8210 necp_route_is_allowed_inner(struct rtentry *route, struct ifnet *ifp, u_int32_t route_rule_id, u_int32_t *interface_type_denied)
8211 {
8212 bool default_is_allowed = TRUE;
8213 u_int8_t type_aggregate_action = NECP_ROUTE_RULE_NONE;
8214 int exception_index = 0;
8215 struct ifnet *delegated_ifp = NULL;
8216 struct necp_route_rule *route_rule = NULL;
8217
8218 route_rule = necp_lookup_route_rule_locked(&necp_route_rules, route_rule_id);
8219 if (route_rule == NULL) {
8220 return (TRUE);
8221 }
8222
8223 default_is_allowed = (route_rule->default_action == NECP_ROUTE_RULE_DENY_INTERFACE) ? FALSE : TRUE;
8224 if (ifp == NULL) {
8225 ifp = route->rt_ifp;
8226 }
8227 if (ifp == NULL) {
8228 if (necp_debug > 1 && !default_is_allowed) {
8229 NECPLOG(LOG_DEBUG, "Route Allowed: No interface for route, using default for Rule %d Allowed %d", route_rule_id, default_is_allowed);
8230 }
8231 return (default_is_allowed);
8232 }
8233
8234 delegated_ifp = ifp->if_delegated.ifp;
8235 for (exception_index = 0; exception_index < MAX_ROUTE_RULE_INTERFACES; exception_index++) {
8236 if (route_rule->exception_if_indices[exception_index] == 0) {
8237 break;
8238 }
8239 if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule->exception_if_actions[exception_index]) == FALSE) {
8240 continue;
8241 }
8242 if (route_rule->exception_if_indices[exception_index] == ifp->if_index ||
8243 (delegated_ifp != NULL && route_rule->exception_if_indices[exception_index] == delegated_ifp->if_index)) {
8244 if (necp_debug > 1) {
8245 NECPLOG(LOG_DEBUG, "Route Allowed: Interface match %d for Rule %d Allowed %d", route_rule->exception_if_indices[exception_index], route_rule_id, ((route_rule->exception_if_actions[exception_index] == NECP_ROUTE_RULE_DENY_INTERFACE) ? FALSE : TRUE));
8246 }
8247 return ((route_rule->exception_if_actions[exception_index] == NECP_ROUTE_RULE_DENY_INTERFACE) ? FALSE : TRUE);
8248 }
8249 }
8250
8251 if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule->cellular_action) &&
8252 IFNET_IS_CELLULAR(ifp)) {
8253 if (interface_type_denied != NULL) {
8254 *interface_type_denied = IFRTYPE_FUNCTIONAL_CELLULAR;
8255 }
8256 if (type_aggregate_action == NECP_ROUTE_RULE_NONE ||
8257 (type_aggregate_action == NECP_ROUTE_RULE_ALLOW_INTERFACE &&
8258 route_rule->cellular_action == NECP_ROUTE_RULE_DENY_INTERFACE)) {
8259 // Deny wins if there is a conflict
8260 type_aggregate_action = route_rule->cellular_action;
8261 }
8262 }
8263
8264 if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule->wifi_action) &&
8265 IFNET_IS_WIFI(ifp)) {
8266 if (interface_type_denied != NULL) {
8267 *interface_type_denied = IFRTYPE_FUNCTIONAL_WIFI_INFRA;
8268 }
8269 if (type_aggregate_action == NECP_ROUTE_RULE_NONE ||
8270 (type_aggregate_action == NECP_ROUTE_RULE_ALLOW_INTERFACE &&
8271 route_rule->wifi_action == NECP_ROUTE_RULE_DENY_INTERFACE)) {
8272 // Deny wins if there is a conflict
8273 type_aggregate_action = route_rule->wifi_action;
8274 }
8275 }
8276
8277 if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule->wired_action) &&
8278 IFNET_IS_WIRED(ifp)) {
8279 if (interface_type_denied != NULL) {
8280 *interface_type_denied = IFRTYPE_FUNCTIONAL_WIRED;
8281 }
8282 if (type_aggregate_action == NECP_ROUTE_RULE_NONE ||
8283 (type_aggregate_action == NECP_ROUTE_RULE_ALLOW_INTERFACE &&
8284 route_rule->wired_action == NECP_ROUTE_RULE_DENY_INTERFACE)) {
8285 // Deny wins if there is a conflict
8286 type_aggregate_action = route_rule->wired_action;
8287 }
8288 }
8289
8290 if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule->expensive_action) &&
8291 IFNET_IS_EXPENSIVE(ifp)) {
8292 if (type_aggregate_action == NECP_ROUTE_RULE_NONE ||
8293 (type_aggregate_action == NECP_ROUTE_RULE_ALLOW_INTERFACE &&
8294 route_rule->expensive_action == NECP_ROUTE_RULE_DENY_INTERFACE)) {
8295 // Deny wins if there is a conflict
8296 type_aggregate_action = route_rule->expensive_action;
8297 }
8298 }
8299
8300 if (type_aggregate_action != NECP_ROUTE_RULE_NONE) {
8301 if (necp_debug > 1) {
8302 NECPLOG(LOG_DEBUG, "Route Allowed: C:%d WF:%d W:%d E:%d for Rule %d Allowed %d", route_rule->cellular_action, route_rule->wifi_action, route_rule->wired_action, route_rule->expensive_action, route_rule_id, ((type_aggregate_action == NECP_ROUTE_RULE_DENY_INTERFACE) ? FALSE : TRUE));
8303 }
8304 return ((type_aggregate_action == NECP_ROUTE_RULE_DENY_INTERFACE) ? FALSE : TRUE);
8305 }
8306
8307 if (necp_debug > 1 && !default_is_allowed) {
8308 NECPLOG(LOG_DEBUG, "Route Allowed: Using default for Rule %d Allowed %d", route_rule_id, default_is_allowed);
8309 }
8310 return (default_is_allowed);
8311 }
8312
8313 static bool
8314 necp_route_is_allowed(struct rtentry *route, struct ifnet *interface, u_int32_t route_rule_id, u_int32_t *interface_type_denied)
8315 {
8316 if ((route == NULL && interface == NULL) || route_rule_id == 0) {
8317 if (necp_debug > 1) {
8318 NECPLOG(LOG_DEBUG, "Route Allowed: no route or interface, Rule %d Allowed %d", route_rule_id, TRUE);
8319 }
8320 return (TRUE);
8321 }
8322
8323 if (ROUTE_RULE_IS_AGGREGATE(route_rule_id)) {
8324 struct necp_aggregate_route_rule *aggregate_route_rule = necp_lookup_aggregate_route_rule_locked(route_rule_id);
8325 if (aggregate_route_rule != NULL) {
8326 int index = 0;
8327 for (index = 0; index < MAX_AGGREGATE_ROUTE_RULES; index++) {
8328 u_int32_t sub_route_rule_id = aggregate_route_rule->rule_ids[index];
8329 if (sub_route_rule_id == 0) {
8330 break;
8331 }
8332 if (!necp_route_is_allowed_inner(route, interface, sub_route_rule_id, interface_type_denied)) {
8333 return (FALSE);
8334 }
8335 }
8336 }
8337 } else {
8338 return (necp_route_is_allowed_inner(route, interface, route_rule_id, interface_type_denied));
8339 }
8340
8341 return (TRUE);
8342 }
8343
8344 bool
8345 necp_packet_is_allowed_over_interface(struct mbuf *packet, struct ifnet *interface)
8346 {
8347 bool is_allowed = TRUE;
8348 u_int32_t route_rule_id = necp_get_route_rule_id_from_packet(packet);
8349 if (route_rule_id != 0 &&
8350 interface != NULL) {
8351 lck_rw_lock_shared(&necp_kernel_policy_lock);
8352 is_allowed = necp_route_is_allowed(NULL, interface, necp_get_route_rule_id_from_packet(packet), NULL);
8353 lck_rw_done(&necp_kernel_policy_lock);
8354 }
8355 return (is_allowed);
8356 }
8357
8358 static bool
8359 necp_netagents_allow_traffic(u_int32_t *netagent_ids, size_t netagent_id_count)
8360 {
8361 size_t netagent_cursor;
8362 for (netagent_cursor = 0; netagent_cursor < netagent_id_count; netagent_cursor++) {
8363 struct necp_uuid_id_mapping *mapping = NULL;
8364 u_int32_t netagent_id = netagent_ids[netagent_cursor];
8365 if (netagent_id == 0) {
8366 break;
8367 }
8368 mapping = necp_uuid_lookup_uuid_with_service_id_locked(netagent_id);
8369 if (mapping != NULL) {
8370 u_int32_t agent_flags = 0;
8371 agent_flags = netagent_get_flags(mapping->uuid);
8372 if (agent_flags & NETAGENT_FLAG_REGISTERED) {
8373 if (agent_flags & NETAGENT_FLAG_ACTIVE) {
8374 continue;
8375 } else if ((agent_flags & NETAGENT_FLAG_VOLUNTARY) == 0) {
8376 return (FALSE);
8377 }
8378 }
8379 }
8380 }
8381 return (TRUE);
8382 }
8383
8384 static bool
8385 necp_socket_is_allowed_to_send_recv_internal(struct inpcb *inp, struct sockaddr *override_local_addr, struct sockaddr *override_remote_addr, ifnet_t interface, necp_kernel_policy_id *return_policy_id, u_int32_t *return_route_rule_id)
8386 {
8387 u_int32_t verifyifindex = interface ? interface->if_index : 0;
8388 bool allowed_to_receive = TRUE;
8389 struct necp_socket_info info;
8390 u_int32_t flowhash = 0;
8391 necp_kernel_policy_result service_action = 0;
8392 necp_kernel_policy_service service = { 0, 0 };
8393 u_int32_t route_rule_id = 0;
8394 struct rtentry *route = NULL;
8395 u_int32_t interface_type_denied = IFRTYPE_FUNCTIONAL_UNKNOWN;
8396
8397 u_int32_t netagent_ids[NECP_MAX_NETAGENTS];
8398 memset(&netagent_ids, 0, sizeof(netagent_ids));
8399
8400 if (return_policy_id) {
8401 *return_policy_id = NECP_KERNEL_POLICY_ID_NONE;
8402 }
8403 if (return_route_rule_id) {
8404 *return_route_rule_id = 0;
8405 }
8406
8407 if (inp == NULL) {
8408 goto done;
8409 }
8410
8411 route = inp->inp_route.ro_rt;
8412
8413 // Don't lock. Possible race condition, but we don't want the performance hit.
8414 if (necp_kernel_socket_policies_count == 0 ||
8415 (!(inp->inp_flags2 & INP2_WANT_APP_POLICY) && necp_kernel_socket_policies_non_app_count == 0)) {
8416 if (necp_drop_all_order > 0) {
8417 if (necp_socket_bypass(override_local_addr, override_remote_addr, inp)) {
8418 allowed_to_receive = TRUE;
8419 } else {
8420 allowed_to_receive = FALSE;
8421 }
8422 }
8423 goto done;
8424 }
8425
8426 // If this socket is connected, or we are not taking addresses into account, try to reuse last result
8427 if ((necp_socket_is_connected(inp) || (override_local_addr == NULL && override_remote_addr == NULL)) && inp->inp_policyresult.policy_id != NECP_KERNEL_POLICY_ID_NONE) {
8428 bool policies_have_changed = FALSE;
8429 bool route_allowed = TRUE;
8430
8431 if (inp->inp_policyresult.policy_gencount != necp_kernel_socket_policies_gencount) {
8432 policies_have_changed = TRUE;
8433 } else {
8434 if (inp->inp_policyresult.results.route_rule_id != 0) {
8435 lck_rw_lock_shared(&necp_kernel_policy_lock);
8436 if (!necp_route_is_allowed(route, interface, inp->inp_policyresult.results.route_rule_id, &interface_type_denied)) {
8437 route_allowed = FALSE;
8438 }
8439 lck_rw_done(&necp_kernel_policy_lock);
8440 }
8441 }
8442
8443 if (!policies_have_changed) {
8444 if (!route_allowed ||
8445 inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_DROP ||
8446 inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT ||
8447 (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL && interface &&
8448 inp->inp_policyresult.results.result_parameter.tunnel_interface_index != verifyifindex)) {
8449 allowed_to_receive = FALSE;
8450 } else {
8451 if (return_policy_id) {
8452 *return_policy_id = inp->inp_policyresult.policy_id;
8453 }
8454 if (return_route_rule_id) {
8455 *return_route_rule_id = inp->inp_policyresult.results.route_rule_id;
8456 }
8457 }
8458 goto done;
8459 }
8460 }
8461
8462 // Check for loopback exception
8463 if (necp_socket_bypass(override_local_addr, override_remote_addr, inp)) {
8464 allowed_to_receive = TRUE;
8465 goto done;
8466 }
8467
8468 // Actually calculate policy result
8469 lck_rw_lock_shared(&necp_kernel_policy_lock);
8470 necp_socket_fillout_info_locked(inp, override_local_addr, override_remote_addr, 0, &info);
8471
8472 flowhash = necp_socket_calc_flowhash_locked(&info);
8473 if (inp->inp_policyresult.policy_id != NECP_KERNEL_POLICY_ID_NONE &&
8474 inp->inp_policyresult.policy_gencount == necp_kernel_socket_policies_gencount &&
8475 inp->inp_policyresult.flowhash == flowhash) {
8476 if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_DROP ||
8477 inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT ||
8478 (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL && interface &&
8479 inp->inp_policyresult.results.result_parameter.tunnel_interface_index != verifyifindex) ||
8480 (inp->inp_policyresult.results.route_rule_id != 0 &&
8481 !necp_route_is_allowed(route, interface, inp->inp_policyresult.results.route_rule_id, &interface_type_denied))) {
8482 allowed_to_receive = FALSE;
8483 } else {
8484 if (return_policy_id) {
8485 *return_policy_id = inp->inp_policyresult.policy_id;
8486 }
8487 if (return_route_rule_id) {
8488 *return_route_rule_id = inp->inp_policyresult.results.route_rule_id;
8489 }
8490 }
8491 lck_rw_done(&necp_kernel_policy_lock);
8492 goto done;
8493 }
8494
8495 struct necp_kernel_socket_policy *matched_policy = necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_map[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(info.application_id)], &info, NULL, &route_rule_id, &service_action, &service, netagent_ids, NECP_MAX_NETAGENTS, current_proc());
8496 if (matched_policy != NULL) {
8497 if (matched_policy->result == NECP_KERNEL_POLICY_RESULT_DROP ||
8498 matched_policy->result == NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT ||
8499 (matched_policy->result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL && interface &&
8500 matched_policy->result_parameter.tunnel_interface_index != verifyifindex) ||
8501 ((service_action == NECP_KERNEL_POLICY_RESULT_TRIGGER_SCOPED ||
8502 service_action == NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED) &&
8503 service.identifier != 0 && service.identifier != NECP_NULL_SERVICE_ID) ||
8504 (route_rule_id != 0 &&
8505 !necp_route_is_allowed(route, interface, route_rule_id, &interface_type_denied)) ||
8506 !necp_netagents_allow_traffic(netagent_ids, NECP_MAX_NETAGENTS)) {
8507 allowed_to_receive = FALSE;
8508 } else {
8509 if (return_policy_id) {
8510 *return_policy_id = matched_policy->id;
8511 }
8512 if (return_route_rule_id) {
8513 *return_route_rule_id = route_rule_id;
8514 }
8515 }
8516 lck_rw_done(&necp_kernel_policy_lock);
8517
8518 if (necp_debug > 1 && matched_policy->id != inp->inp_policyresult.policy_id) {
8519 NECPLOG(LOG_DEBUG, "Socket Send/Recv Policy: Policy %d Allowed %d", return_policy_id ? *return_policy_id : 0, allowed_to_receive);
8520 }
8521 goto done;
8522 } else if (necp_drop_all_order > 0) {
8523 allowed_to_receive = FALSE;
8524 } else {
8525 if (return_policy_id) {
8526 *return_policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
8527 }
8528 if (return_route_rule_id) {
8529 *return_route_rule_id = route_rule_id;
8530 }
8531 }
8532
8533 lck_rw_done(&necp_kernel_policy_lock);
8534
8535 done:
8536 if (!allowed_to_receive && interface_type_denied != IFRTYPE_FUNCTIONAL_UNKNOWN) {
8537 soevent(inp->inp_socket, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_IFDENIED));
8538 }
8539
8540 return (allowed_to_receive);
8541 }
8542
8543 bool
8544 necp_socket_is_allowed_to_send_recv_v4(struct inpcb *inp, u_int16_t local_port, u_int16_t remote_port, struct in_addr *local_addr, struct in_addr *remote_addr, ifnet_t interface, necp_kernel_policy_id *return_policy_id, u_int32_t *return_route_rule_id)
8545 {
8546 struct sockaddr_in local;
8547 struct sockaddr_in remote;
8548 local.sin_family = remote.sin_family = AF_INET;
8549 local.sin_len = remote.sin_len = sizeof(struct sockaddr_in);
8550 local.sin_port = local_port;
8551 remote.sin_port = remote_port;
8552 memcpy(&local.sin_addr, local_addr, sizeof(local.sin_addr));
8553 memcpy(&remote.sin_addr, remote_addr, sizeof(remote.sin_addr));
8554
8555 return (necp_socket_is_allowed_to_send_recv_internal(inp, (struct sockaddr *)&local, (struct sockaddr *)&remote, interface, return_policy_id, return_route_rule_id));
8556 }
8557
8558 bool
8559 necp_socket_is_allowed_to_send_recv_v6(struct inpcb *inp, u_int16_t local_port, u_int16_t remote_port, struct in6_addr *local_addr, struct in6_addr *remote_addr, ifnet_t interface, necp_kernel_policy_id *return_policy_id, u_int32_t *return_route_rule_id)
8560 {
8561 struct sockaddr_in6 local;
8562 struct sockaddr_in6 remote;
8563 local.sin6_family = remote.sin6_family = AF_INET6;
8564 local.sin6_len = remote.sin6_len = sizeof(struct sockaddr_in6);
8565 local.sin6_port = local_port;
8566 remote.sin6_port = remote_port;
8567 memcpy(&local.sin6_addr, local_addr, sizeof(local.sin6_addr));
8568 memcpy(&remote.sin6_addr, remote_addr, sizeof(remote.sin6_addr));
8569
8570 return (necp_socket_is_allowed_to_send_recv_internal(inp, (struct sockaddr *)&local, (struct sockaddr *)&remote, interface, return_policy_id, return_route_rule_id));
8571 }
8572
8573 bool
8574 necp_socket_is_allowed_to_send_recv(struct inpcb *inp, necp_kernel_policy_id *return_policy_id, u_int32_t *return_route_rule_id)
8575 {
8576 return (necp_socket_is_allowed_to_send_recv_internal(inp, NULL, NULL, NULL, return_policy_id, return_route_rule_id));
8577 }
8578
8579 int
8580 necp_mark_packet_from_socket(struct mbuf *packet, struct inpcb *inp, necp_kernel_policy_id policy_id, u_int32_t route_rule_id)
8581 {
8582 if (packet == NULL || inp == NULL || !(packet->m_flags & M_PKTHDR)) {
8583 return (EINVAL);
8584 }
8585
8586 // Mark ID for Pass and IP Tunnel
8587 if (policy_id != NECP_KERNEL_POLICY_ID_NONE) {
8588 packet->m_pkthdr.necp_mtag.necp_policy_id = policy_id;
8589 } else if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_PASS ||
8590 inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL) {
8591 packet->m_pkthdr.necp_mtag.necp_policy_id = inp->inp_policyresult.policy_id;
8592 } else {
8593 packet->m_pkthdr.necp_mtag.necp_policy_id = NECP_KERNEL_POLICY_ID_NONE;
8594 }
8595 packet->m_pkthdr.necp_mtag.necp_last_interface_index = 0;
8596 if (route_rule_id != 0) {
8597 packet->m_pkthdr.necp_mtag.necp_route_rule_id = route_rule_id;
8598 } else {
8599 packet->m_pkthdr.necp_mtag.necp_route_rule_id = inp->inp_policyresult.results.route_rule_id;
8600 }
8601 packet->m_pkthdr.necp_mtag.necp_app_id = inp->inp_policyresult.app_id;
8602
8603 return (0);
8604 }
8605
8606 int
8607 necp_mark_packet_from_ip(struct mbuf *packet, necp_kernel_policy_id policy_id)
8608 {
8609 if (packet == NULL || !(packet->m_flags & M_PKTHDR)) {
8610 return (EINVAL);
8611 }
8612
8613 // Mark ID for Pass and IP Tunnel
8614 if (policy_id != NECP_KERNEL_POLICY_ID_NONE) {
8615 packet->m_pkthdr.necp_mtag.necp_policy_id = policy_id;
8616 } else {
8617 packet->m_pkthdr.necp_mtag.necp_policy_id = NECP_KERNEL_POLICY_ID_NONE;
8618 }
8619
8620 return (0);
8621 }
8622
8623 int
8624 necp_mark_packet_from_interface(struct mbuf *packet, ifnet_t interface)
8625 {
8626 if (packet == NULL || !(packet->m_flags & M_PKTHDR)) {
8627 return (EINVAL);
8628 }
8629
8630 // Mark ID for Pass and IP Tunnel
8631 if (interface != NULL) {
8632 packet->m_pkthdr.necp_mtag.necp_last_interface_index = interface->if_index;
8633 }
8634
8635 return (0);
8636 }
8637
8638 int
8639 necp_mark_packet_as_keepalive(struct mbuf *packet, bool is_keepalive)
8640 {
8641 if (packet == NULL || !(packet->m_flags & M_PKTHDR)) {
8642 return (EINVAL);
8643 }
8644
8645 if (is_keepalive) {
8646 packet->m_pkthdr.pkt_flags |= PKTF_KEEPALIVE;
8647 } else {
8648 packet->m_pkthdr.pkt_flags &= ~PKTF_KEEPALIVE;
8649 }
8650
8651 return (0);
8652 }
8653
8654 necp_kernel_policy_id
8655 necp_get_policy_id_from_packet(struct mbuf *packet)
8656 {
8657 if (packet == NULL || !(packet->m_flags & M_PKTHDR)) {
8658 return (NECP_KERNEL_POLICY_ID_NONE);
8659 }
8660
8661 return (packet->m_pkthdr.necp_mtag.necp_policy_id);
8662 }
8663
8664 u_int32_t
8665 necp_get_last_interface_index_from_packet(struct mbuf *packet)
8666 {
8667 if (packet == NULL || !(packet->m_flags & M_PKTHDR)) {
8668 return (0);
8669 }
8670
8671 return (packet->m_pkthdr.necp_mtag.necp_last_interface_index);
8672 }
8673
8674 u_int32_t
8675 necp_get_route_rule_id_from_packet(struct mbuf *packet)
8676 {
8677 if (packet == NULL || !(packet->m_flags & M_PKTHDR)) {
8678 return (0);
8679 }
8680
8681 return (packet->m_pkthdr.necp_mtag.necp_route_rule_id);
8682 }
8683
8684 int
8685 necp_get_app_uuid_from_packet(struct mbuf *packet,
8686 uuid_t app_uuid)
8687 {
8688 if (packet == NULL || !(packet->m_flags & M_PKTHDR)) {
8689 return (EINVAL);
8690 }
8691
8692 bool found_mapping = FALSE;
8693 if (packet->m_pkthdr.necp_mtag.necp_app_id != 0) {
8694 lck_rw_lock_shared(&necp_kernel_policy_lock);
8695 struct necp_uuid_id_mapping *entry = necp_uuid_lookup_uuid_with_app_id_locked(packet->m_pkthdr.necp_mtag.necp_app_id);
8696 if (entry != NULL) {
8697 uuid_copy(app_uuid, entry->uuid);
8698 found_mapping = true;
8699 }
8700 lck_rw_done(&necp_kernel_policy_lock);
8701 }
8702 if (!found_mapping) {
8703 uuid_clear(app_uuid);
8704 }
8705 return (0);
8706 }
8707
8708 bool
8709 necp_get_is_keepalive_from_packet(struct mbuf *packet)
8710 {
8711 if (packet == NULL || !(packet->m_flags & M_PKTHDR)) {
8712 return (FALSE);
8713 }
8714
8715 return (packet->m_pkthdr.pkt_flags & PKTF_KEEPALIVE);
8716 }
8717
8718 u_int32_t
8719 necp_socket_get_content_filter_control_unit(struct socket *so)
8720 {
8721 struct inpcb *inp = sotoinpcb(so);
8722
8723 if (inp == NULL) {
8724 return (0);
8725 }
8726 return (inp->inp_policyresult.results.filter_control_unit);
8727 }
8728
8729 bool
8730 necp_socket_should_use_flow_divert(struct inpcb *inp)
8731 {
8732 if (inp == NULL) {
8733 return (FALSE);
8734 }
8735
8736 return (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT);
8737 }
8738
8739 u_int32_t
8740 necp_socket_get_flow_divert_control_unit(struct inpcb *inp)
8741 {
8742 if (inp == NULL) {
8743 return (0);
8744 }
8745
8746 if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT) {
8747 return (inp->inp_policyresult.results.result_parameter.flow_divert_control_unit);
8748 }
8749
8750 return (0);
8751 }
8752
8753 bool
8754 necp_socket_should_rescope(struct inpcb *inp)
8755 {
8756 if (inp == NULL) {
8757 return (FALSE);
8758 }
8759
8760 return (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED);
8761 }
8762
8763 u_int
8764 necp_socket_get_rescope_if_index(struct inpcb *inp)
8765 {
8766 if (inp == NULL) {
8767 return (0);
8768 }
8769
8770 if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED) {
8771 return (inp->inp_policyresult.results.result_parameter.scoped_interface_index);
8772 }
8773
8774 return (0);
8775 }
8776
8777 u_int32_t
8778 necp_socket_get_effective_mtu(struct inpcb *inp, u_int32_t current_mtu)
8779 {
8780 if (inp == NULL) {
8781 return (current_mtu);
8782 }
8783
8784 if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL &&
8785 (inp->inp_flags & INP_BOUND_IF) &&
8786 inp->inp_boundifp) {
8787
8788 u_int bound_interface_index = inp->inp_boundifp->if_index;
8789 u_int tunnel_interface_index = inp->inp_policyresult.results.result_parameter.tunnel_interface_index;
8790
8791 // The result is IP Tunnel, and is rescoping from one interface to another. Recalculate MTU.
8792 if (bound_interface_index != tunnel_interface_index) {
8793 ifnet_t tunnel_interface = NULL;
8794
8795 ifnet_head_lock_shared();
8796 tunnel_interface = ifindex2ifnet[tunnel_interface_index];
8797 ifnet_head_done();
8798
8799 if (tunnel_interface != NULL) {
8800 u_int32_t direct_tunnel_mtu = tunnel_interface->if_mtu;
8801 u_int32_t delegate_tunnel_mtu = (tunnel_interface->if_delegated.ifp != NULL) ? tunnel_interface->if_delegated.ifp->if_mtu : 0;
8802 if (delegate_tunnel_mtu != 0 &&
8803 strncmp(tunnel_interface->if_name, "ipsec", strlen("ipsec")) == 0) {
8804 // For ipsec interfaces, calculate the overhead from the delegate interface
8805 u_int32_t tunnel_overhead = (u_int32_t)(esp_hdrsiz(NULL) + sizeof(struct ip6_hdr));
8806 if (delegate_tunnel_mtu > tunnel_overhead) {
8807 delegate_tunnel_mtu -= tunnel_overhead;
8808 }
8809
8810 if (delegate_tunnel_mtu < direct_tunnel_mtu) {
8811 // If the (delegate - overhead) < direct, return (delegate - overhead)
8812 return (delegate_tunnel_mtu);
8813 } else {
8814 // Otherwise return direct
8815 return (direct_tunnel_mtu);
8816 }
8817 } else {
8818 // For non-ipsec interfaces, just return the tunnel MTU
8819 return (direct_tunnel_mtu);
8820 }
8821 }
8822 }
8823 }
8824
8825 // By default, just return the MTU passed in
8826 return (current_mtu);
8827 }
8828
8829 ifnet_t
8830 necp_get_ifnet_from_result_parameter(necp_kernel_policy_result_parameter *result_parameter)
8831 {
8832 if (result_parameter == NULL) {
8833 return (NULL);
8834 }
8835
8836 return (ifindex2ifnet[result_parameter->tunnel_interface_index]);
8837 }
8838
8839 bool
8840 necp_packet_can_rebind_to_ifnet(struct mbuf *packet, struct ifnet *interface, struct route *new_route, int family)
8841 {
8842 bool found_match = FALSE;
8843 errno_t result = 0;
8844 ifaddr_t *addresses = NULL;
8845 union necp_sockaddr_union address_storage;
8846 int i;
8847
8848 if (packet == NULL || interface == NULL || new_route == NULL || (family != AF_INET && family != AF_INET6)) {
8849 return (FALSE);
8850 }
8851
8852 result = ifnet_get_address_list_family(interface, &addresses, family);
8853 if (result != 0) {
8854 NECPLOG(LOG_ERR, "Failed to get address list for %s%d", ifnet_name(interface), ifnet_unit(interface));
8855 return (FALSE);
8856 }
8857
8858 for (i = 0; addresses[i] != NULL; i++) {
8859 ROUTE_RELEASE(new_route);
8860 if (ifaddr_address(addresses[i], &address_storage.sa, sizeof(address_storage)) == 0) {
8861 if (family == AF_INET) {
8862 struct ip *ip = mtod(packet, struct ip *);
8863 if (memcmp(&address_storage.sin.sin_addr, &ip->ip_src, sizeof(ip->ip_src)) == 0) {
8864 struct sockaddr_in *dst4 = (struct sockaddr_in *)(void *)&new_route->ro_dst;
8865 dst4->sin_family = AF_INET;
8866 dst4->sin_len = sizeof(struct sockaddr_in);
8867 dst4->sin_addr = ip->ip_dst;
8868 rtalloc_scoped(new_route, interface->if_index);
8869 if (!ROUTE_UNUSABLE(new_route)) {
8870 found_match = TRUE;
8871 goto done;
8872 }
8873 }
8874 } else if (family == AF_INET6) {
8875 struct ip6_hdr *ip6 = mtod(packet, struct ip6_hdr *);
8876 if (memcmp(&address_storage.sin6.sin6_addr, &ip6->ip6_src, sizeof(ip6->ip6_src)) == 0) {
8877 struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)(void *)&new_route->ro_dst;
8878 dst6->sin6_family = AF_INET6;
8879 dst6->sin6_len = sizeof(struct sockaddr_in6);
8880 dst6->sin6_addr = ip6->ip6_dst;
8881 rtalloc_scoped(new_route, interface->if_index);
8882 if (!ROUTE_UNUSABLE(new_route)) {
8883 found_match = TRUE;
8884 goto done;
8885 }
8886 }
8887 }
8888 }
8889 }
8890
8891 done:
8892 ifnet_free_address_list(addresses);
8893 addresses = NULL;
8894 return (found_match);
8895 }
8896
8897 static bool
8898 necp_addr_is_loopback(struct sockaddr *address)
8899 {
8900 if (address == NULL) {
8901 return (FALSE);
8902 }
8903
8904 if (address->sa_family == AF_INET) {
8905 return (ntohl(((struct sockaddr_in *)(void *)address)->sin_addr.s_addr) == INADDR_LOOPBACK);
8906 } else if (address->sa_family == AF_INET6) {
8907 return IN6_IS_ADDR_LOOPBACK(&((struct sockaddr_in6 *)(void *)address)->sin6_addr);
8908 }
8909
8910 return (FALSE);
8911 }
8912
8913 static bool
8914 necp_is_loopback(struct sockaddr *local_addr, struct sockaddr *remote_addr, struct inpcb *inp, struct mbuf *packet)
8915 {
8916 // Note: This function only checks for the loopback addresses.
8917 // In the future, we may want to expand to also allow any traffic
8918 // going through the loopback interface, but until then, this
8919 // check is cheaper.
8920
8921 if (local_addr != NULL && necp_addr_is_loopback(local_addr)) {
8922 return (TRUE);
8923 }
8924
8925 if (remote_addr != NULL && necp_addr_is_loopback(remote_addr)) {
8926 return (TRUE);
8927 }
8928
8929 if (inp != NULL) {
8930 if ((inp->inp_flags & INP_BOUND_IF) && inp->inp_boundifp && (inp->inp_boundifp->if_flags & IFF_LOOPBACK)) {
8931 return (TRUE);
8932 }
8933 if (inp->inp_vflag & INP_IPV4) {
8934 if (ntohl(inp->inp_laddr.s_addr) == INADDR_LOOPBACK ||
8935 ntohl(inp->inp_faddr.s_addr) == INADDR_LOOPBACK) {
8936 return (TRUE);
8937 }
8938 } else if (inp->inp_vflag & INP_IPV6) {
8939 if (IN6_IS_ADDR_LOOPBACK(&inp->in6p_laddr) ||
8940 IN6_IS_ADDR_LOOPBACK(&inp->in6p_faddr)) {
8941 return (TRUE);
8942 }
8943 }
8944 }
8945
8946 if (packet != NULL) {
8947 struct ip *ip = mtod(packet, struct ip *);
8948 if (ip->ip_v == 4) {
8949 if (ntohl(ip->ip_src.s_addr) == INADDR_LOOPBACK) {
8950 return (TRUE);
8951 }
8952 if (ntohl(ip->ip_dst.s_addr) == INADDR_LOOPBACK) {
8953 return (TRUE);
8954 }
8955 } else if (ip->ip_v == 6) {
8956 struct ip6_hdr *ip6 = mtod(packet, struct ip6_hdr *);
8957 if (IN6_IS_ADDR_LOOPBACK(&ip6->ip6_src)) {
8958 return (TRUE);
8959 }
8960 if (IN6_IS_ADDR_LOOPBACK(&ip6->ip6_dst)) {
8961 return (TRUE);
8962 }
8963 }
8964 }
8965
8966 return (FALSE);
8967 }
8968
8969 static bool
8970 necp_is_intcoproc(struct inpcb *inp, struct mbuf *packet)
8971 {
8972
8973 if (inp != NULL) {
8974 return (sflt_permission_check(inp) ? true : false);
8975 }
8976 if (packet != NULL) {
8977 struct ip6_hdr *ip6 = mtod(packet, struct ip6_hdr *);
8978 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) == IPV6_VERSION &&
8979 IN6_IS_ADDR_LINKLOCAL(&ip6->ip6_dst) &&
8980 ip6->ip6_dst.s6_addr32[2] == ntohl(0xaede48ff) &&
8981 ip6->ip6_dst.s6_addr32[3] == ntohl(0xfe334455)) {
8982 return (true);
8983 }
8984 }
8985
8986 return (false);
8987 }