]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/necp.c
xnu-3789.1.32.tar.gz
[apple/xnu.git] / bsd / net / necp.c
1 /*
2 * Copyright (c) 2013-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <string.h>
30 #include <sys/systm.h>
31 #include <sys/types.h>
32 #include <sys/queue.h>
33 #include <sys/malloc.h>
34 #include <libkern/OSMalloc.h>
35 #include <sys/kernel.h>
36 #include <sys/kern_control.h>
37 #include <sys/mbuf.h>
38 #include <sys/kpi_mbuf.h>
39 #include <sys/proc_uuid_policy.h>
40 #include <net/if.h>
41 #include <sys/domain.h>
42 #include <sys/protosw.h>
43 #include <sys/socket.h>
44 #include <sys/socketvar.h>
45 #include <netinet/ip.h>
46 #include <netinet/ip6.h>
47 #include <netinet/tcp.h>
48 #include <netinet/tcp_var.h>
49 #include <netinet/udp.h>
50 #include <netinet/in_pcb.h>
51 #include <netinet/in_tclass.h>
52 #include <netinet6/esp.h>
53 #include <net/flowhash.h>
54 #include <net/if_var.h>
55 #include <sys/kauth.h>
56 #include <sys/sysctl.h>
57 #include <sys/sysproto.h>
58 #include <sys/priv.h>
59 #include <sys/kern_event.h>
60 #include <IOKit/IOBSD.h>
61 #include <net/network_agent.h>
62 #include <net/necp.h>
63
64 /*
65 * NECP - Network Extension Control Policy database
66 * ------------------------------------------------
67 * The goal of this module is to allow clients connecting via a
68 * kernel control socket to create high-level policy sessions, which
69 * are ingested into low-level kernel policies that control and tag
70 * traffic at the application, socket, and IP layers.
71 *
72 * ------------------------------------------------
73 * Sessions
74 * ------------------------------------------------
75 * Each session owns a list of session policies, each of which can
76 * specify any combination of conditions and a single result. Each
77 * session also has a priority level (such as High, Default, or Low)
78 * which is requested by the client. Based on the requested level,
79 * a session order value is assigned to the session, which will be used
80 * to sort kernel policies generated by the session. The session client
81 * can specify the sub-order for each policy it creates which will be
82 * used to further sort the kernel policies.
83 *
84 * Kernel Control Socket --> 1 necp_session --> list of necp_session_policy structs
85 *
86 * ------------------------------------------------
87 * Kernel Policies
88 * ------------------------------------------------
89 * Whenever a session send the Apply command, its policies are ingested
90 * and generate kernel policies. There are two phases of kernel policy
91 * ingestion.
92 *
93 * 1. The session policy is parsed to create kernel policies at the socket
94 * and IP layers, when applicable. For example, a policy that requires
95 * all traffic from App1 to Pass will generate a socket kernel policy to
96 * match App1 and mark packets with ID1, and also an IP policy to match
97 * ID1 and let the packet pass. This is handled in necp_apply_policy. The
98 * resulting kernel policies are added to the global socket and IP layer
99 * policy lists.
100 * necp_session_policy --> necp_kernel_socket_policy and necp_kernel_ip_output_policy
101 * || ||
102 * \/ \/
103 * necp_kernel_socket_policies necp_kernel_ip_output_policies
104 *
105 * 2. Once the global lists of kernel policies have been filled out, each
106 * list is traversed to create optimized sub-lists ("Maps") which are used during
107 * data-path evaluation. IP policies are sent into necp_kernel_ip_output_policies_map,
108 * which hashes incoming packets based on marked socket-layer policies, and removes
109 * duplicate or overlapping policies. Socket policies are sent into two maps,
110 * necp_kernel_socket_policies_map and necp_kernel_socket_policies_app_layer_map.
111 * The app layer map is used for policy checks coming in from user space, and is one
112 * list with duplicate and overlapping policies removed. The socket map hashes based
113 * on app UUID, and removes duplicate and overlapping policies.
114 * necp_kernel_socket_policy --> necp_kernel_socket_policies_app_layer_map
115 * |-> necp_kernel_socket_policies_map
116 *
117 * necp_kernel_ip_output_policies --> necp_kernel_ip_output_policies_map
118 *
119 * ------------------------------------------------
120 * Drop All Level
121 * ------------------------------------------------
122 * The Drop All Level is a sysctl that controls the level at which policies are allowed
123 * to override a global drop rule. If the value is 0, no drop rule is applied. If the value
124 * is 1, all traffic is dropped. If the value is greater than 1, all kernel policies created
125 * by a session with a priority level better than (numerically less than) the
126 * Drop All Level will allow matching traffic to not be dropped. The Drop All Level is
127 * dynamically interpreted into necp_drop_all_order, which specifies the equivalent assigned
128 * session orders to be dropped.
129 */
130
131 u_int32_t necp_drop_all_order = 0;
132 u_int32_t necp_drop_all_level = 0;
133
134 u_int32_t necp_pass_loopback = 1; // 0=Off, 1=On
135 u_int32_t necp_pass_keepalives = 1; // 0=Off, 1=On
136
137 u_int32_t necp_debug = 0; // 0=None, 1=Basic, 2=EveryMatch
138
139 u_int32_t necp_session_count = 0;
140
141 #define LIST_INSERT_SORTED_ASCENDING(head, elm, field, sortfield, tmpelm) do { \
142 if (LIST_EMPTY((head)) || (LIST_FIRST(head)->sortfield >= (elm)->sortfield)) { \
143 LIST_INSERT_HEAD((head), elm, field); \
144 } else { \
145 LIST_FOREACH(tmpelm, head, field) { \
146 if (LIST_NEXT(tmpelm, field) == NULL || LIST_NEXT(tmpelm, field)->sortfield >= (elm)->sortfield) { \
147 LIST_INSERT_AFTER(tmpelm, elm, field); \
148 break; \
149 } \
150 } \
151 } \
152 } while (0)
153
154 #define LIST_INSERT_SORTED_TWICE_ASCENDING(head, elm, field, firstsortfield, secondsortfield, tmpelm) do { \
155 if (LIST_EMPTY((head)) || (LIST_FIRST(head)->firstsortfield > (elm)->firstsortfield) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield >= (elm)->secondsortfield))) { \
156 LIST_INSERT_HEAD((head), elm, field); \
157 } else { \
158 LIST_FOREACH(tmpelm, head, field) { \
159 if (LIST_NEXT(tmpelm, field) == NULL || (LIST_NEXT(tmpelm, field)->firstsortfield > (elm)->firstsortfield) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield >= (elm)->secondsortfield))) { \
160 LIST_INSERT_AFTER(tmpelm, elm, field); \
161 break; \
162 } \
163 } \
164 } \
165 } while (0)
166
167 #define LIST_INSERT_SORTED_THRICE_ASCENDING(head, elm, field, firstsortfield, secondsortfield, thirdsortfield, tmpelm) do { \
168 if (LIST_EMPTY((head)) || (LIST_FIRST(head)->firstsortfield > (elm)->firstsortfield) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield >= (elm)->secondsortfield)) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield == (elm)->secondsortfield) && (LIST_FIRST(head)->thirdsortfield >= (elm)->thirdsortfield))) { \
169 LIST_INSERT_HEAD((head), elm, field); \
170 } else { \
171 LIST_FOREACH(tmpelm, head, field) { \
172 if (LIST_NEXT(tmpelm, field) == NULL || (LIST_NEXT(tmpelm, field)->firstsortfield > (elm)->firstsortfield) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield >= (elm)->secondsortfield)) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield == (elm)->secondsortfield) && (LIST_NEXT(tmpelm, field)->thirdsortfield >= (elm)->thirdsortfield))) { \
173 LIST_INSERT_AFTER(tmpelm, elm, field); \
174 break; \
175 } \
176 } \
177 } \
178 } while (0)
179
180 #define IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(x) ((x) == NECP_ROUTE_RULE_DENY_INTERFACE || (x) == NECP_ROUTE_RULE_ALLOW_INTERFACE)
181
182 #define NECP_KERNEL_CONDITION_ALL_INTERFACES 0x00001
183 #define NECP_KERNEL_CONDITION_BOUND_INTERFACE 0x00002
184 #define NECP_KERNEL_CONDITION_PROTOCOL 0x00004
185 #define NECP_KERNEL_CONDITION_LOCAL_START 0x00008
186 #define NECP_KERNEL_CONDITION_LOCAL_END 0x00010
187 #define NECP_KERNEL_CONDITION_LOCAL_PREFIX 0x00020
188 #define NECP_KERNEL_CONDITION_REMOTE_START 0x00040
189 #define NECP_KERNEL_CONDITION_REMOTE_END 0x00080
190 #define NECP_KERNEL_CONDITION_REMOTE_PREFIX 0x00100
191 #define NECP_KERNEL_CONDITION_APP_ID 0x00200
192 #define NECP_KERNEL_CONDITION_REAL_APP_ID 0x00400
193 #define NECP_KERNEL_CONDITION_DOMAIN 0x00800
194 #define NECP_KERNEL_CONDITION_ACCOUNT_ID 0x01000
195 #define NECP_KERNEL_CONDITION_POLICY_ID 0x02000
196 #define NECP_KERNEL_CONDITION_PID 0x04000
197 #define NECP_KERNEL_CONDITION_UID 0x08000
198 #define NECP_KERNEL_CONDITION_LAST_INTERFACE 0x10000 // Only set from packets looping between interfaces
199 #define NECP_KERNEL_CONDITION_TRAFFIC_CLASS 0x20000
200 #define NECP_KERNEL_CONDITION_ENTITLEMENT 0x40000
201 #define NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT 0x80000
202
203 #define NECP_MAX_POLICY_RESULT_SIZE 512
204 #define NECP_MAX_ROUTE_RULES_ARRAY_SIZE 1024
205 #define NECP_MAX_CONDITIONS_ARRAY_SIZE 4096
206
207 struct necp_service_registration {
208 LIST_ENTRY(necp_service_registration) session_chain;
209 LIST_ENTRY(necp_service_registration) kernel_chain;
210 u_int32_t service_id;
211 };
212
213 struct necp_session {
214 u_int32_t control_unit;
215 u_int32_t session_priority; // Descriptive priority rating
216 u_int32_t session_order;
217
218 bool proc_locked; // Messages must come from proc_uuid
219 uuid_t proc_uuid;
220 int proc_pid;
221
222 bool dirty;
223 LIST_HEAD(_policies, necp_session_policy) policies;
224
225 LIST_HEAD(_services, necp_service_registration) services;
226 };
227
228 struct necp_socket_info {
229 pid_t pid;
230 uid_t uid;
231 union necp_sockaddr_union local_addr;
232 union necp_sockaddr_union remote_addr;
233 u_int32_t bound_interface_index;
234 u_int32_t traffic_class;
235 u_int16_t protocol;
236 u_int32_t application_id;
237 u_int32_t real_application_id;
238 u_int32_t account_id;
239 char *domain;
240 errno_t cred_result;
241 };
242
243 static kern_ctl_ref necp_kctlref;
244 static u_int32_t necp_family;
245 static OSMallocTag necp_malloc_tag;
246 static lck_grp_attr_t *necp_kernel_policy_grp_attr = NULL;
247 static lck_attr_t *necp_kernel_policy_mtx_attr = NULL;
248 static lck_grp_t *necp_kernel_policy_mtx_grp = NULL;
249 decl_lck_rw_data(static, necp_kernel_policy_lock);
250
251 static lck_grp_attr_t *necp_route_rule_grp_attr = NULL;
252 static lck_attr_t *necp_route_rule_mtx_attr = NULL;
253 static lck_grp_t *necp_route_rule_mtx_grp = NULL;
254 decl_lck_rw_data(static, necp_route_rule_lock);
255
256 static necp_policy_id necp_last_policy_id = 0;
257 static necp_kernel_policy_id necp_last_kernel_policy_id = 0;
258 static u_int32_t necp_last_uuid_id = 0;
259 static u_int32_t necp_last_string_id = 0;
260 static u_int32_t necp_last_route_rule_id = 0;
261 static u_int32_t necp_last_aggregate_route_rule_id = 0;
262
263 /*
264 * On modification, invalidate cached lookups by bumping the generation count.
265 * Other calls will need to take the slowpath of taking
266 * the subsystem lock.
267 */
268 static volatile int32_t necp_kernel_socket_policies_gencount;
269 #define BUMP_KERNEL_SOCKET_POLICIES_GENERATION_COUNT() do { \
270 if (OSIncrementAtomic(&necp_kernel_socket_policies_gencount) == (INT32_MAX - 1)) { \
271 necp_kernel_socket_policies_gencount = 1; \
272 } \
273 } while (0)
274
275 static u_int32_t necp_kernel_application_policies_condition_mask;
276 static size_t necp_kernel_application_policies_count;
277 static u_int32_t necp_kernel_socket_policies_condition_mask;
278 static size_t necp_kernel_socket_policies_count;
279 static size_t necp_kernel_socket_policies_non_app_count;
280 static LIST_HEAD(_necpkernelsocketconnectpolicies, necp_kernel_socket_policy) necp_kernel_socket_policies;
281 #define NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS 5
282 #define NECP_SOCKET_MAP_APP_ID_TO_BUCKET(appid) (appid ? (appid%(NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS - 1) + 1) : 0)
283 static struct necp_kernel_socket_policy **necp_kernel_socket_policies_map[NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS];
284 static struct necp_kernel_socket_policy **necp_kernel_socket_policies_app_layer_map;
285 /*
286 * A note on policy 'maps': these are used for boosting efficiency when matching policies. For each dimension of the map,
287 * such as an ID, the 0 bucket is reserved for sockets/packets that do not have this parameter, while the other
288 * buckets lead to an array of policy pointers that form the list applicable when the (parameter%(NUM_BUCKETS - 1) + 1) == bucket_index.
289 *
290 * For example, a packet with policy ID of 7, when there are 4 ID buckets, will map to bucket (7%3 + 1) = 2.
291 */
292
293 static u_int32_t necp_kernel_ip_output_policies_condition_mask;
294 static size_t necp_kernel_ip_output_policies_count;
295 static size_t necp_kernel_ip_output_policies_non_id_count;
296 static LIST_HEAD(_necpkernelipoutputpolicies, necp_kernel_ip_output_policy) necp_kernel_ip_output_policies;
297 #define NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS 5
298 #define NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(id) (id ? (id%(NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS - 1) + 1) : 0)
299 static struct necp_kernel_ip_output_policy **necp_kernel_ip_output_policies_map[NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS];
300
301 static struct necp_session *necp_create_session(u_int32_t control_unit);
302 static void necp_delete_session(struct necp_session *session);
303
304 static void necp_handle_policy_add(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
305 static void necp_handle_policy_get(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
306 static void necp_handle_policy_delete(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
307 static void necp_handle_policy_apply_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
308 static void necp_handle_policy_list_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
309 static void necp_handle_policy_delete_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
310 static void necp_handle_policy_dump_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
311 static void necp_handle_set_session_priority(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
312 static void necp_handle_lock_session_to_proc(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
313 static void necp_handle_register_service(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
314 static void necp_handle_unregister_service(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset);
315
316 #define MAX_RESULT_STRING_LEN 64
317 static inline const char * necp_get_result_description(char *result_string, necp_kernel_policy_result result, necp_kernel_policy_result_parameter result_parameter);
318
319 static struct necp_session_policy *necp_policy_create(struct necp_session *session, necp_policy_order order, u_int8_t *conditions_array, u_int32_t conditions_array_size, u_int8_t *route_rules_array, u_int32_t route_rules_array_size, u_int8_t *result, u_int32_t result_size);
320 static struct necp_session_policy *necp_policy_find(struct necp_session *session, necp_policy_id policy_id);
321 static bool necp_policy_mark_for_deletion(struct necp_session *session, struct necp_session_policy *policy);
322 static bool necp_policy_mark_all_for_deletion(struct necp_session *session);
323 static bool necp_policy_delete(struct necp_session *session, struct necp_session_policy *policy);
324 static void necp_policy_apply_all(struct necp_session *session);
325
326 static necp_kernel_policy_id necp_kernel_socket_policy_add(necp_policy_id parent_policy_id, necp_policy_order order, u_int32_t session_order, int session_pid, u_int32_t condition_mask, u_int32_t condition_negated_mask, necp_app_id cond_app_id, necp_app_id cond_real_app_id, char *cond_custom_entitlement, u_int32_t cond_account_id, char *domain, pid_t cond_pid, uid_t cond_uid, ifnet_t cond_bound_interface, struct necp_policy_condition_tc_range cond_traffic_class, u_int16_t cond_protocol, union necp_sockaddr_union *cond_local_start, union necp_sockaddr_union *cond_local_end, u_int8_t cond_local_prefix, union necp_sockaddr_union *cond_remote_start, union necp_sockaddr_union *cond_remote_end, u_int8_t cond_remote_prefix, necp_kernel_policy_result result, necp_kernel_policy_result_parameter result_parameter);
327 static bool necp_kernel_socket_policy_delete(necp_kernel_policy_id policy_id);
328 static bool necp_kernel_socket_policies_reprocess(void);
329 static bool necp_kernel_socket_policies_update_uuid_table(void);
330 static inline struct necp_kernel_socket_policy *necp_socket_find_policy_match_with_info_locked(struct necp_kernel_socket_policy **policy_search_array, struct necp_socket_info *info, necp_kernel_policy_filter *return_filter, u_int32_t *return_route_rule_id, necp_kernel_policy_result *return_service_action, necp_kernel_policy_service *return_service, u_int32_t *return_netagent_array, size_t netagent_array_count, proc_t proc);
331
332 static necp_kernel_policy_id necp_kernel_ip_output_policy_add(necp_policy_id parent_policy_id, necp_policy_order order, necp_policy_order suborder, u_int32_t session_order, int session_pid, u_int32_t condition_mask, u_int32_t condition_negated_mask, necp_kernel_policy_id cond_policy_id, ifnet_t cond_bound_interface, u_int32_t cond_last_interface_index, u_int16_t cond_protocol, union necp_sockaddr_union *cond_local_start, union necp_sockaddr_union *cond_local_end, u_int8_t cond_local_prefix, union necp_sockaddr_union *cond_remote_start, union necp_sockaddr_union *cond_remote_end, u_int8_t cond_remote_prefix, necp_kernel_policy_result result, necp_kernel_policy_result_parameter result_parameter);
333 static bool necp_kernel_ip_output_policy_delete(necp_kernel_policy_id policy_id);
334 static bool necp_kernel_ip_output_policies_reprocess(void);
335
336 static bool necp_is_addr_in_range(struct sockaddr *addr, struct sockaddr *range_start, struct sockaddr *range_end);
337 static bool necp_is_range_in_range(struct sockaddr *inner_range_start, struct sockaddr *inner_range_end, struct sockaddr *range_start, struct sockaddr *range_end);
338 static bool necp_is_addr_in_subnet(struct sockaddr *addr, struct sockaddr *subnet_addr, u_int8_t subnet_prefix);
339 static int necp_addr_compare(struct sockaddr *sa1, struct sockaddr *sa2, int check_port);
340 static bool necp_buffer_compare_with_bit_prefix(u_int8_t *p1, u_int8_t *p2, u_int32_t bits);
341 static bool necp_is_loopback(struct sockaddr *local_addr, struct sockaddr *remote_addr, struct inpcb *inp, struct mbuf *packet);
342
343 struct necp_uuid_id_mapping {
344 LIST_ENTRY(necp_uuid_id_mapping) chain;
345 uuid_t uuid;
346 u_int32_t id;
347 u_int32_t refcount;
348 u_int32_t table_refcount; // Add to UUID policy table count
349 };
350 static size_t necp_num_uuid_app_id_mappings;
351 static bool necp_uuid_app_id_mappings_dirty;
352 #define NECP_UUID_APP_ID_HASH_SIZE 64
353 static u_long necp_uuid_app_id_hash_mask;
354 static u_long necp_uuid_app_id_hash_num_buckets;
355 static LIST_HEAD(necp_uuid_id_mapping_head, necp_uuid_id_mapping) *necp_uuid_app_id_hashtbl, necp_uuid_service_id_list; // App map is real hash table, service map is just mapping
356 #define APPUUIDHASH(uuid) (&necp_uuid_app_id_hashtbl[uuid[0] & necp_uuid_app_id_hash_mask]) // Assume first byte of UUIDs are evenly distributed
357 static u_int32_t necp_create_uuid_app_id_mapping(uuid_t uuid, bool *allocated_mapping, bool uuid_policy_table);
358 static bool necp_remove_uuid_app_id_mapping(uuid_t uuid, bool *removed_mapping, bool uuid_policy_table);
359 static struct necp_uuid_id_mapping *necp_uuid_lookup_uuid_with_app_id_locked(u_int32_t local_id);
360
361 static struct necp_uuid_id_mapping *necp_uuid_lookup_service_id_locked(uuid_t uuid);
362 static struct necp_uuid_id_mapping *necp_uuid_lookup_uuid_with_service_id_locked(u_int32_t local_id);
363 static u_int32_t necp_create_uuid_service_id_mapping(uuid_t uuid);
364 static bool necp_remove_uuid_service_id_mapping(uuid_t uuid);
365
366 struct necp_string_id_mapping {
367 LIST_ENTRY(necp_string_id_mapping) chain;
368 char *string;
369 necp_app_id id;
370 u_int32_t refcount;
371 };
372 static LIST_HEAD(necp_string_id_mapping_list, necp_string_id_mapping) necp_account_id_list;
373 static u_int32_t necp_create_string_to_id_mapping(struct necp_string_id_mapping_list *list, char *domain);
374 static bool necp_remove_string_to_id_mapping(struct necp_string_id_mapping_list *list, char *domain);
375 static struct necp_string_id_mapping *necp_lookup_string_with_id_locked(struct necp_string_id_mapping_list *list, u_int32_t local_id);
376
377 static LIST_HEAD(_necp_kernel_service_list, necp_service_registration) necp_registered_service_list;
378
379 static char *necp_create_trimmed_domain(char *string, size_t length);
380 static inline int necp_count_dots(char *string, size_t length);
381
382 static char *necp_copy_string(char *string, size_t length);
383
384 #define ROUTE_RULE_IS_AGGREGATE(ruleid) (ruleid > UINT16_MAX)
385
386 #define MAX_ROUTE_RULE_INTERFACES 10
387 struct necp_route_rule {
388 LIST_ENTRY(necp_route_rule) chain;
389 u_int32_t id;
390 u_int32_t default_action;
391 u_int8_t cellular_action;
392 u_int8_t wifi_action;
393 u_int8_t wired_action;
394 u_int8_t expensive_action;
395 u_int exception_if_indices[MAX_ROUTE_RULE_INTERFACES];
396 u_int8_t exception_if_actions[MAX_ROUTE_RULE_INTERFACES];
397 u_int32_t refcount;
398 };
399 static LIST_HEAD(necp_route_rule_list, necp_route_rule) necp_route_rules;
400 static u_int32_t necp_create_route_rule(struct necp_route_rule_list *list, u_int8_t *route_rules_array, u_int32_t route_rules_array_size);
401 static bool necp_remove_route_rule(struct necp_route_rule_list *list, u_int32_t route_rule_id);
402 static bool necp_route_is_allowed(struct rtentry *route, ifnet_t interface, u_int32_t route_rule_id, u_int32_t *interface_type_denied);
403 static struct necp_route_rule *necp_lookup_route_rule_locked(struct necp_route_rule_list *list, u_int32_t route_rule_id);
404
405 #define MAX_AGGREGATE_ROUTE_RULES 16
406 struct necp_aggregate_route_rule {
407 LIST_ENTRY(necp_aggregate_route_rule) chain;
408 u_int32_t id;
409 u_int32_t rule_ids[MAX_AGGREGATE_ROUTE_RULES];
410 };
411 static LIST_HEAD(necp_aggregate_route_rule_list, necp_aggregate_route_rule) necp_aggregate_route_rules;
412 static u_int32_t necp_create_aggregate_route_rule(u_int32_t *rule_ids);
413
414 // Sysctl definitions
415 static int sysctl_handle_necp_level SYSCTL_HANDLER_ARGS;
416
417 SYSCTL_NODE(_net, OID_AUTO, necp, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "NECP");
418 SYSCTL_INT(_net_necp, NECPCTL_PASS_LOOPBACK, pass_loopback, CTLFLAG_LOCKED | CTLFLAG_RW, &necp_pass_loopback, 0, "");
419 SYSCTL_INT(_net_necp, NECPCTL_PASS_KEEPALIVES, pass_keepalives, CTLFLAG_LOCKED | CTLFLAG_RW, &necp_pass_keepalives, 0, "");
420 SYSCTL_INT(_net_necp, NECPCTL_DEBUG, debug, CTLFLAG_LOCKED | CTLFLAG_RW, &necp_debug, 0, "");
421 SYSCTL_PROC(_net_necp, NECPCTL_DROP_ALL_LEVEL, drop_all_level, CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW, &necp_drop_all_level, 0, &sysctl_handle_necp_level, "IU", "");
422 SYSCTL_LONG(_net_necp, NECPCTL_SOCKET_POLICY_COUNT, socket_policy_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_kernel_socket_policies_count, "");
423 SYSCTL_LONG(_net_necp, NECPCTL_SOCKET_NON_APP_POLICY_COUNT, socket_non_app_policy_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_kernel_socket_policies_non_app_count, "");
424 SYSCTL_LONG(_net_necp, NECPCTL_IP_POLICY_COUNT, ip_policy_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_kernel_ip_output_policies_count, "");
425 SYSCTL_INT(_net_necp, NECPCTL_SESSION_COUNT, session_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_session_count, 0, "");
426
427 // Session order allocation
428 static u_int32_t
429 necp_allocate_new_session_order(u_int32_t priority, u_int32_t control_unit)
430 {
431 u_int32_t new_order = 0;
432
433 // For now, just allocate 1000 orders for each priority
434 if (priority == NECP_SESSION_PRIORITY_UNKNOWN || priority > NECP_SESSION_NUM_PRIORITIES) {
435 priority = NECP_SESSION_PRIORITY_DEFAULT;
436 }
437
438 // Use the control unit to decide the offset into the priority list
439 new_order = (control_unit) + ((priority - 1) * 1000);
440
441 return (new_order);
442 }
443
444 static inline u_int32_t
445 necp_get_first_order_for_priority(u_int32_t priority)
446 {
447 return (((priority - 1) * 1000) + 1);
448 }
449
450 // Sysctl handler
451 static int
452 sysctl_handle_necp_level SYSCTL_HANDLER_ARGS
453 {
454 #pragma unused(arg1, arg2)
455 int error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
456 if (necp_drop_all_level == 0) {
457 necp_drop_all_order = 0;
458 } else {
459 necp_drop_all_order = necp_get_first_order_for_priority(necp_drop_all_level);
460 }
461 return (error);
462 }
463
464 // Kernel Control functions
465 static errno_t necp_register_control(void);
466 static errno_t necp_ctl_connect(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, void **unitinfo);
467 static errno_t necp_ctl_disconnect(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo);
468 static errno_t necp_ctl_send(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, mbuf_t m, int flags);
469 static void necp_ctl_rcvd(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, int flags);
470 static errno_t necp_ctl_getopt(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, int opt, void *data, size_t *len);
471 static errno_t necp_ctl_setopt(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, int opt, void *data, size_t len);
472
473 static bool necp_send_ctl_data(struct necp_session *session, u_int8_t *buffer, size_t buffer_size);
474
475 errno_t
476 necp_init(void)
477 {
478 errno_t result = 0;
479
480 result = necp_register_control();
481 if (result != 0) {
482 goto done;
483 }
484
485 necp_kernel_policy_grp_attr = lck_grp_attr_alloc_init();
486 if (necp_kernel_policy_grp_attr == NULL) {
487 NECPLOG0(LOG_ERR, "lck_grp_attr_alloc_init failed");
488 result = ENOMEM;
489 goto done;
490 }
491
492 necp_kernel_policy_mtx_grp = lck_grp_alloc_init(NECP_CONTROL_NAME, necp_kernel_policy_grp_attr);
493 if (necp_kernel_policy_mtx_grp == NULL) {
494 NECPLOG0(LOG_ERR, "lck_grp_alloc_init failed");
495 result = ENOMEM;
496 goto done;
497 }
498
499 necp_kernel_policy_mtx_attr = lck_attr_alloc_init();
500 if (necp_kernel_policy_mtx_attr == NULL) {
501 NECPLOG0(LOG_ERR, "lck_attr_alloc_init failed");
502 result = ENOMEM;
503 goto done;
504 }
505
506 lck_rw_init(&necp_kernel_policy_lock, necp_kernel_policy_mtx_grp, necp_kernel_policy_mtx_attr);
507
508 necp_route_rule_grp_attr = lck_grp_attr_alloc_init();
509 if (necp_route_rule_grp_attr == NULL) {
510 NECPLOG0(LOG_ERR, "lck_grp_attr_alloc_init failed");
511 result = ENOMEM;
512 goto done;
513 }
514
515 necp_route_rule_mtx_grp = lck_grp_alloc_init("necp_route_rule", necp_route_rule_grp_attr);
516 if (necp_route_rule_mtx_grp == NULL) {
517 NECPLOG0(LOG_ERR, "lck_grp_alloc_init failed");
518 result = ENOMEM;
519 goto done;
520 }
521
522 necp_route_rule_mtx_attr = lck_attr_alloc_init();
523 if (necp_route_rule_mtx_attr == NULL) {
524 NECPLOG0(LOG_ERR, "lck_attr_alloc_init failed");
525 result = ENOMEM;
526 goto done;
527 }
528
529 lck_rw_init(&necp_route_rule_lock, necp_route_rule_mtx_grp, necp_route_rule_mtx_attr);
530
531 necp_client_init();
532
533 LIST_INIT(&necp_kernel_socket_policies);
534 LIST_INIT(&necp_kernel_ip_output_policies);
535
536 LIST_INIT(&necp_account_id_list);
537
538 LIST_INIT(&necp_uuid_service_id_list);
539
540 LIST_INIT(&necp_registered_service_list);
541
542 LIST_INIT(&necp_route_rules);
543 LIST_INIT(&necp_aggregate_route_rules);
544
545 necp_uuid_app_id_hashtbl = hashinit(NECP_UUID_APP_ID_HASH_SIZE, M_NECP, &necp_uuid_app_id_hash_mask);
546 necp_uuid_app_id_hash_num_buckets = necp_uuid_app_id_hash_mask + 1;
547 necp_num_uuid_app_id_mappings = 0;
548 necp_uuid_app_id_mappings_dirty = FALSE;
549
550 necp_kernel_application_policies_condition_mask = 0;
551 necp_kernel_socket_policies_condition_mask = 0;
552 necp_kernel_ip_output_policies_condition_mask = 0;
553
554 necp_kernel_application_policies_count = 0;
555 necp_kernel_socket_policies_count = 0;
556 necp_kernel_socket_policies_non_app_count = 0;
557 necp_kernel_ip_output_policies_count = 0;
558 necp_kernel_ip_output_policies_non_id_count = 0;
559
560 necp_last_policy_id = 0;
561 necp_last_kernel_policy_id = 0;
562 necp_last_uuid_id = 0;
563 necp_last_string_id = 0;
564 necp_last_route_rule_id = 0;
565 necp_last_aggregate_route_rule_id = 0;
566
567 necp_kernel_socket_policies_gencount = 1;
568
569 memset(&necp_kernel_socket_policies_map, 0, sizeof(necp_kernel_socket_policies_map));
570 memset(&necp_kernel_ip_output_policies_map, 0, sizeof(necp_kernel_ip_output_policies_map));
571 necp_kernel_socket_policies_app_layer_map = NULL;
572
573 done:
574 if (result != 0) {
575 if (necp_kernel_policy_mtx_attr != NULL) {
576 lck_attr_free(necp_kernel_policy_mtx_attr);
577 necp_kernel_policy_mtx_attr = NULL;
578 }
579 if (necp_kernel_policy_mtx_grp != NULL) {
580 lck_grp_free(necp_kernel_policy_mtx_grp);
581 necp_kernel_policy_mtx_grp = NULL;
582 }
583 if (necp_kernel_policy_grp_attr != NULL) {
584 lck_grp_attr_free(necp_kernel_policy_grp_attr);
585 necp_kernel_policy_grp_attr = NULL;
586 }
587 if (necp_route_rule_mtx_attr != NULL) {
588 lck_attr_free(necp_route_rule_mtx_attr);
589 necp_route_rule_mtx_attr = NULL;
590 }
591 if (necp_route_rule_mtx_grp != NULL) {
592 lck_grp_free(necp_route_rule_mtx_grp);
593 necp_route_rule_mtx_grp = NULL;
594 }
595 if (necp_route_rule_grp_attr != NULL) {
596 lck_grp_attr_free(necp_route_rule_grp_attr);
597 necp_route_rule_grp_attr = NULL;
598 }
599 if (necp_kctlref != NULL) {
600 ctl_deregister(necp_kctlref);
601 necp_kctlref = NULL;
602 }
603 }
604 return (result);
605 }
606
607 static errno_t
608 necp_register_control(void)
609 {
610 struct kern_ctl_reg kern_ctl;
611 errno_t result = 0;
612
613 // Create a tag to allocate memory
614 necp_malloc_tag = OSMalloc_Tagalloc(NECP_CONTROL_NAME, OSMT_DEFAULT);
615
616 // Find a unique value for our interface family
617 result = mbuf_tag_id_find(NECP_CONTROL_NAME, &necp_family);
618 if (result != 0) {
619 NECPLOG(LOG_ERR, "mbuf_tag_id_find_internal failed: %d", result);
620 return (result);
621 }
622
623 bzero(&kern_ctl, sizeof(kern_ctl));
624 strlcpy(kern_ctl.ctl_name, NECP_CONTROL_NAME, sizeof(kern_ctl.ctl_name));
625 kern_ctl.ctl_name[sizeof(kern_ctl.ctl_name) - 1] = 0;
626 kern_ctl.ctl_flags = CTL_FLAG_PRIVILEGED; // Require root
627 kern_ctl.ctl_sendsize = 64 * 1024;
628 kern_ctl.ctl_recvsize = 64 * 1024;
629 kern_ctl.ctl_connect = necp_ctl_connect;
630 kern_ctl.ctl_disconnect = necp_ctl_disconnect;
631 kern_ctl.ctl_send = necp_ctl_send;
632 kern_ctl.ctl_rcvd = necp_ctl_rcvd;
633 kern_ctl.ctl_setopt = necp_ctl_setopt;
634 kern_ctl.ctl_getopt = necp_ctl_getopt;
635
636 result = ctl_register(&kern_ctl, &necp_kctlref);
637 if (result != 0) {
638 NECPLOG(LOG_ERR, "ctl_register failed: %d", result);
639 return (result);
640 }
641
642 return (0);
643 }
644
645 static void
646 necp_post_change_event(struct kev_necp_policies_changed_data *necp_event_data)
647 {
648 struct kev_msg ev_msg;
649 memset(&ev_msg, 0, sizeof(ev_msg));
650
651 ev_msg.vendor_code = KEV_VENDOR_APPLE;
652 ev_msg.kev_class = KEV_NETWORK_CLASS;
653 ev_msg.kev_subclass = KEV_NECP_SUBCLASS;
654 ev_msg.event_code = KEV_NECP_POLICIES_CHANGED;
655
656 ev_msg.dv[0].data_ptr = necp_event_data;
657 ev_msg.dv[0].data_length = sizeof(necp_event_data->changed_count);
658 ev_msg.dv[1].data_length = 0;
659
660 kev_post_msg(&ev_msg);
661 }
662
663 static errno_t
664 necp_ctl_connect(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, void **unitinfo)
665 {
666 #pragma unused(kctlref)
667 *unitinfo = necp_create_session(sac->sc_unit);
668 if (*unitinfo == NULL) {
669 // Could not allocate session
670 return (ENOBUFS);
671 }
672
673 return (0);
674 }
675
676 static errno_t
677 necp_ctl_disconnect(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo)
678 {
679 #pragma unused(kctlref, unit)
680 struct necp_session *session = (struct necp_session *)unitinfo;
681 if (session != NULL) {
682 necp_policy_mark_all_for_deletion(session);
683 necp_policy_apply_all(session);
684 necp_delete_session((struct necp_session *)unitinfo);
685 }
686
687 return (0);
688 }
689
690
691 // Message handling
692 static int
693 necp_packet_find_tlv(mbuf_t packet, int offset, u_int8_t type, int *err, int next)
694 {
695 size_t cursor = offset;
696 int error = 0;
697 u_int32_t curr_length;
698 u_int8_t curr_type;
699
700 *err = 0;
701
702 do {
703 if (!next) {
704 error = mbuf_copydata(packet, cursor, sizeof(curr_type), &curr_type);
705 if (error) {
706 *err = ENOENT;
707 return (-1);
708 }
709 } else {
710 next = 0;
711 curr_type = NECP_TLV_NIL;
712 }
713
714 if (curr_type != type) {
715 cursor += sizeof(curr_type);
716 error = mbuf_copydata(packet, cursor, sizeof(curr_length), &curr_length);
717 if (error) {
718 *err = error;
719 return (-1);
720 }
721 cursor += (sizeof(curr_length) + curr_length);
722 }
723 } while (curr_type != type);
724
725 return (cursor);
726 }
727
728 static int
729 necp_packet_get_tlv_at_offset(mbuf_t packet, int tlv_offset, u_int32_t buff_len, void *buff, u_int32_t *value_size)
730 {
731 int error = 0;
732 u_int32_t length;
733
734 if (tlv_offset < 0) {
735 return (EINVAL);
736 }
737
738 error = mbuf_copydata(packet, tlv_offset + sizeof(u_int8_t), sizeof(length), &length);
739 if (error) {
740 return (error);
741 }
742
743 u_int32_t total_len = m_length2(packet, NULL);
744 if (total_len < (tlv_offset + sizeof(u_int8_t) + sizeof(length) + length)) {
745 NECPLOG(LOG_ERR, "Got a bad TLV, length (%u) + offset (%d) < total length (%u)",
746 length, (tlv_offset + sizeof(u_int8_t) + sizeof(length)), total_len);
747 return (EINVAL);
748 }
749
750 if (value_size != NULL) {
751 *value_size = length;
752 }
753
754 if (buff != NULL && buff_len > 0) {
755 u_int32_t to_copy = (length < buff_len) ? length : buff_len;
756 error = mbuf_copydata(packet, tlv_offset + sizeof(u_int8_t) + sizeof(length), to_copy, buff);
757 if (error) {
758 return (error);
759 }
760 }
761
762 return (0);
763 }
764
765 static int
766 necp_packet_get_tlv(mbuf_t packet, int offset, u_int8_t type, u_int32_t buff_len, void *buff, u_int32_t *value_size)
767 {
768 int error = 0;
769 int tlv_offset;
770
771 tlv_offset = necp_packet_find_tlv(packet, offset, type, &error, 0);
772 if (tlv_offset < 0) {
773 return (error);
774 }
775
776 return (necp_packet_get_tlv_at_offset(packet, tlv_offset, buff_len, buff, value_size));
777 }
778
779 static u_int8_t *
780 necp_buffer_write_packet_header(u_int8_t *buffer, u_int8_t packet_type, u_int8_t flags, u_int32_t message_id)
781 {
782 ((struct necp_packet_header *)(void *)buffer)->packet_type = packet_type;
783 ((struct necp_packet_header *)(void *)buffer)->flags = flags;
784 ((struct necp_packet_header *)(void *)buffer)->message_id = message_id;
785 return (buffer + sizeof(struct necp_packet_header));
786 }
787
788
789 u_int8_t *
790 necp_buffer_write_tlv_if_different(u_int8_t *buffer, const u_int8_t *max, u_int8_t type,
791 u_int32_t length, const void *value, bool *updated)
792 {
793 u_int8_t *next_tlv = (u_int8_t *)(buffer + sizeof(type) + sizeof(length) + length);
794 if (next_tlv <= max) {
795 if (*updated || *(u_int8_t *)(buffer) != type) {
796 *(u_int8_t *)(buffer) = type;
797 *updated = TRUE;
798 }
799 if (*updated || *(u_int32_t *)(void *)(buffer + sizeof(type)) != length) {
800 *(u_int32_t *)(void *)(buffer + sizeof(type)) = length;
801 *updated = TRUE;
802 }
803 if (length > 0) {
804 if (*updated || memcmp((u_int8_t *)(buffer + sizeof(type) + sizeof(length)), value, length) != 0) {
805 memcpy((u_int8_t *)(buffer + sizeof(type) + sizeof(length)), value, length);
806 *updated = TRUE;
807 }
808 }
809 }
810 return (next_tlv);
811 }
812
813 u_int8_t *
814 necp_buffer_write_tlv(u_int8_t *buffer, u_int8_t type, u_int32_t length, const void *value)
815 {
816 *(u_int8_t *)(buffer) = type;
817 *(u_int32_t *)(void *)(buffer + sizeof(type)) = length;
818 if (length > 0) {
819 memcpy((u_int8_t *)(buffer + sizeof(type) + sizeof(length)), value, length);
820 }
821
822 return ((u_int8_t *)(buffer + sizeof(type) + sizeof(length) + length));
823 }
824
825 u_int8_t
826 necp_buffer_get_tlv_type(u_int8_t *buffer, int tlv_offset)
827 {
828 u_int8_t *type = NULL;
829
830 if (buffer == NULL) {
831 return (0);
832 }
833
834 type = (u_int8_t *)((u_int8_t *)buffer + tlv_offset);
835 return (type ? *type : 0);
836 }
837
838 u_int32_t
839 necp_buffer_get_tlv_length(u_int8_t *buffer, int tlv_offset)
840 {
841 u_int32_t *length = NULL;
842
843 if (buffer == NULL) {
844 return (0);
845 }
846
847 length = (u_int32_t *)(void *)((u_int8_t *)buffer + tlv_offset + sizeof(u_int8_t));
848 return (length ? *length : 0);
849 }
850
851 u_int8_t *
852 necp_buffer_get_tlv_value(u_int8_t *buffer, int tlv_offset, u_int32_t *value_size)
853 {
854 u_int8_t *value = NULL;
855 u_int32_t length = necp_buffer_get_tlv_length(buffer, tlv_offset);
856 if (length == 0) {
857 return (value);
858 }
859
860 if (value_size) {
861 *value_size = length;
862 }
863
864 value = (u_int8_t *)((u_int8_t *)buffer + tlv_offset + sizeof(u_int8_t) + sizeof(u_int32_t));
865 return (value);
866 }
867
868 int
869 necp_buffer_find_tlv(u_int8_t *buffer, u_int32_t buffer_length, int offset, u_int8_t type, int next)
870 {
871 if (offset < 0) {
872 return (-1);
873 }
874 int cursor = offset;
875 int next_cursor;
876 u_int32_t curr_length;
877 u_int8_t curr_type;
878
879 while (TRUE) {
880 if ((((u_int32_t)cursor) + sizeof(curr_type) + sizeof(curr_length)) > buffer_length) {
881 return (-1);
882 }
883 if (!next) {
884 curr_type = necp_buffer_get_tlv_type(buffer, cursor);
885 } else {
886 next = 0;
887 curr_type = NECP_TLV_NIL;
888 }
889 curr_length = necp_buffer_get_tlv_length(buffer, cursor);
890 next_cursor = (cursor + sizeof(curr_type) + sizeof(curr_length) + curr_length);
891 if (curr_type == type) {
892 // check if entire TLV fits inside buffer
893 if (((u_int32_t)next_cursor) <= buffer_length) {
894 return (cursor);
895 } else {
896 return (-1);
897 }
898 }
899 cursor = next_cursor;
900 }
901 }
902
903 static bool
904 necp_send_ctl_data(struct necp_session *session, u_int8_t *buffer, size_t buffer_size)
905 {
906 int error;
907
908 if (necp_kctlref == NULL || session == NULL || buffer == NULL || buffer_size == 0) {
909 return (FALSE);
910 }
911
912 error = ctl_enqueuedata(necp_kctlref, session->control_unit, buffer, buffer_size, CTL_DATA_EOR);
913
914 return (error == 0);
915 }
916
917 static bool
918 necp_send_success_response(struct necp_session *session, u_int8_t packet_type, u_int32_t message_id)
919 {
920 bool success = TRUE;
921 u_int8_t *response = NULL;
922 u_int8_t *cursor = NULL;
923 size_t response_size = sizeof(struct necp_packet_header) + sizeof(u_int8_t) + sizeof(u_int32_t);
924 MALLOC(response, u_int8_t *, response_size, M_NECP, M_WAITOK);
925 if (response == NULL) {
926 return (FALSE);
927 }
928 cursor = response;
929 cursor = necp_buffer_write_packet_header(cursor, packet_type, NECP_PACKET_FLAGS_RESPONSE, message_id);
930 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_NIL, 0, NULL);
931
932 if (!(success = necp_send_ctl_data(session, (u_int8_t *)response, response_size))) {
933 NECPLOG0(LOG_ERR, "Failed to send response");
934 }
935
936 FREE(response, M_NECP);
937 return (success);
938 }
939
940 static bool
941 necp_send_error_response(struct necp_session *session, u_int8_t packet_type, u_int32_t message_id, u_int32_t error)
942 {
943 bool success = TRUE;
944 u_int8_t *response = NULL;
945 u_int8_t *cursor = NULL;
946 size_t response_size = sizeof(struct necp_packet_header) + sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(u_int32_t);
947 MALLOC(response, u_int8_t *, response_size, M_NECP, M_WAITOK);
948 if (response == NULL) {
949 return (FALSE);
950 }
951 cursor = response;
952 cursor = necp_buffer_write_packet_header(cursor, packet_type, NECP_PACKET_FLAGS_RESPONSE, message_id);
953 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_ERROR, sizeof(error), &error);
954
955 if (!(success = necp_send_ctl_data(session, (u_int8_t *)response, response_size))) {
956 NECPLOG0(LOG_ERR, "Failed to send response");
957 }
958
959 FREE(response, M_NECP);
960 return (success);
961 }
962
963 static bool
964 necp_send_policy_id_response(struct necp_session *session, u_int8_t packet_type, u_int32_t message_id, necp_policy_id policy_id)
965 {
966 bool success = TRUE;
967 u_int8_t *response = NULL;
968 u_int8_t *cursor = NULL;
969 size_t response_size = sizeof(struct necp_packet_header) + sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(u_int32_t);
970 MALLOC(response, u_int8_t *, response_size, M_NECP, M_WAITOK);
971 if (response == NULL) {
972 return (FALSE);
973 }
974 cursor = response;
975 cursor = necp_buffer_write_packet_header(cursor, packet_type, NECP_PACKET_FLAGS_RESPONSE, message_id);
976 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_ID, sizeof(policy_id), &policy_id);
977
978 if (!(success = necp_send_ctl_data(session, (u_int8_t *)response, response_size))) {
979 NECPLOG0(LOG_ERR, "Failed to send response");
980 }
981
982 FREE(response, M_NECP);
983 return (success);
984 }
985
986 static errno_t
987 necp_ctl_send(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, mbuf_t packet, int flags)
988 {
989 #pragma unused(kctlref, unit, flags)
990 struct necp_session *session = (struct necp_session *)unitinfo;
991 struct necp_packet_header header;
992 int error = 0;
993
994 if (session == NULL) {
995 NECPLOG0(LOG_ERR, "Got a NULL session");
996 error = EINVAL;
997 goto done;
998 }
999
1000 if (mbuf_pkthdr_len(packet) < sizeof(header)) {
1001 NECPLOG(LOG_ERR, "Got a bad packet, length (%lu) < sizeof header (%lu)", mbuf_pkthdr_len(packet), sizeof(header));
1002 error = EINVAL;
1003 goto done;
1004 }
1005
1006 error = mbuf_copydata(packet, 0, sizeof(header), &header);
1007 if (error) {
1008 NECPLOG(LOG_ERR, "mbuf_copydata failed for the header: %d", error);
1009 error = ENOBUFS;
1010 goto done;
1011 }
1012
1013 if (session->proc_locked) {
1014 // Verify that the calling process is allowed to send messages
1015 uuid_t proc_uuid;
1016 proc_getexecutableuuid(current_proc(), proc_uuid, sizeof(proc_uuid));
1017 if (uuid_compare(proc_uuid, session->proc_uuid) != 0) {
1018 necp_send_error_response(session, header.packet_type, header.message_id, NECP_ERROR_INVALID_PROCESS);
1019 goto done;
1020 }
1021 } else {
1022 // If not locked, update the proc_uuid and proc_pid of the session
1023 proc_getexecutableuuid(current_proc(), session->proc_uuid, sizeof(session->proc_uuid));
1024 session->proc_pid = proc_pid(current_proc());
1025 }
1026
1027 switch (header.packet_type) {
1028 case NECP_PACKET_TYPE_POLICY_ADD: {
1029 necp_handle_policy_add(session, header.message_id, packet, sizeof(header));
1030 break;
1031 }
1032 case NECP_PACKET_TYPE_POLICY_GET: {
1033 necp_handle_policy_get(session, header.message_id, packet, sizeof(header));
1034 break;
1035 }
1036 case NECP_PACKET_TYPE_POLICY_DELETE: {
1037 necp_handle_policy_delete(session, header.message_id, packet, sizeof(header));
1038 break;
1039 }
1040 case NECP_PACKET_TYPE_POLICY_APPLY_ALL: {
1041 necp_handle_policy_apply_all(session, header.message_id, packet, sizeof(header));
1042 break;
1043 }
1044 case NECP_PACKET_TYPE_POLICY_LIST_ALL: {
1045 necp_handle_policy_list_all(session, header.message_id, packet, sizeof(header));
1046 break;
1047 }
1048 case NECP_PACKET_TYPE_POLICY_DELETE_ALL: {
1049 necp_handle_policy_delete_all(session, header.message_id, packet, sizeof(header));
1050 break;
1051 }
1052 case NECP_PACKET_TYPE_POLICY_DUMP_ALL: {
1053 necp_handle_policy_dump_all(session, header.message_id, packet, sizeof(header));
1054 break;
1055 }
1056 case NECP_PACKET_TYPE_SET_SESSION_PRIORITY: {
1057 necp_handle_set_session_priority(session, header.message_id, packet, sizeof(header));
1058 break;
1059 }
1060 case NECP_PACKET_TYPE_LOCK_SESSION_TO_PROC: {
1061 necp_handle_lock_session_to_proc(session, header.message_id, packet, sizeof(header));
1062 break;
1063 }
1064 case NECP_PACKET_TYPE_REGISTER_SERVICE: {
1065 necp_handle_register_service(session, header.message_id, packet, sizeof(header));
1066 break;
1067 }
1068 case NECP_PACKET_TYPE_UNREGISTER_SERVICE: {
1069 necp_handle_unregister_service(session, header.message_id, packet, sizeof(header));
1070 break;
1071 }
1072 default: {
1073 NECPLOG(LOG_ERR, "Received unknown message type %d", header.packet_type);
1074 necp_send_error_response(session, header.packet_type, header.message_id, NECP_ERROR_UNKNOWN_PACKET_TYPE);
1075 break;
1076 }
1077 }
1078
1079 done:
1080 mbuf_freem(packet);
1081 return (error);
1082 }
1083
1084 static void
1085 necp_ctl_rcvd(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, int flags)
1086 {
1087 #pragma unused(kctlref, unit, unitinfo, flags)
1088 return;
1089 }
1090
1091 static errno_t
1092 necp_ctl_getopt(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, int opt, void *data, size_t *len)
1093 {
1094 #pragma unused(kctlref, unit, unitinfo, opt, data, len)
1095 return (0);
1096 }
1097
1098 static errno_t
1099 necp_ctl_setopt(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, int opt, void *data, size_t len)
1100 {
1101 #pragma unused(kctlref, unit, unitinfo, opt, data, len)
1102 return (0);
1103 }
1104
1105 // Session Management
1106 static struct necp_session *
1107 necp_create_session(u_int32_t control_unit)
1108 {
1109 struct necp_session *new_session = NULL;
1110
1111 MALLOC(new_session, struct necp_session *, sizeof(*new_session), M_NECP, M_WAITOK);
1112 if (new_session == NULL) {
1113 goto done;
1114 }
1115 if (necp_debug) {
1116 NECPLOG(LOG_DEBUG, "Create NECP session, control unit %d", control_unit);
1117 }
1118 memset(new_session, 0, sizeof(*new_session));
1119 new_session->session_priority = NECP_SESSION_PRIORITY_UNKNOWN;
1120 new_session->session_order = necp_allocate_new_session_order(new_session->session_priority, control_unit);
1121 new_session->control_unit = control_unit;
1122 new_session->dirty = FALSE;
1123 LIST_INIT(&new_session->policies);
1124
1125 lck_rw_lock_exclusive(&necp_kernel_policy_lock);
1126 necp_session_count++;
1127 lck_rw_done(&necp_kernel_policy_lock);
1128
1129 done:
1130 return (new_session);
1131 }
1132
1133 static void
1134 necp_delete_session(struct necp_session *session)
1135 {
1136 if (session != NULL) {
1137 struct necp_service_registration *service = NULL;
1138 struct necp_service_registration *temp_service = NULL;
1139 LIST_FOREACH_SAFE(service, &session->services, session_chain, temp_service) {
1140 LIST_REMOVE(service, session_chain);
1141 lck_rw_lock_exclusive(&necp_kernel_policy_lock);
1142 LIST_REMOVE(service, kernel_chain);
1143 lck_rw_done(&necp_kernel_policy_lock);
1144 FREE(service, M_NECP);
1145 }
1146 if (necp_debug) {
1147 NECPLOG0(LOG_DEBUG, "Deleted NECP session");
1148 }
1149 FREE(session, M_NECP);
1150
1151 lck_rw_lock_exclusive(&necp_kernel_policy_lock);
1152 necp_session_count--;
1153 lck_rw_done(&necp_kernel_policy_lock);
1154 }
1155 }
1156
1157 // Session Policy Management
1158
1159 static inline u_int8_t
1160 necp_policy_result_get_type_from_buffer(u_int8_t *buffer, u_int32_t length)
1161 {
1162 return ((buffer && length >= sizeof(u_int8_t)) ? buffer[0] : 0);
1163 }
1164
1165 static inline u_int32_t
1166 necp_policy_result_get_parameter_length_from_buffer(u_int8_t *buffer, u_int32_t length)
1167 {
1168 return ((buffer && length > sizeof(u_int8_t)) ? (length - sizeof(u_int8_t)) : 0);
1169 }
1170
1171 static inline u_int8_t *
1172 necp_policy_result_get_parameter_pointer_from_buffer(u_int8_t *buffer, u_int32_t length)
1173 {
1174 return ((buffer && length > sizeof(u_int8_t)) ? (buffer + sizeof(u_int8_t)) : NULL);
1175 }
1176
1177 static bool
1178 necp_policy_result_requires_route_rules(u_int8_t *buffer, u_int32_t length)
1179 {
1180 u_int8_t type = necp_policy_result_get_type_from_buffer(buffer, length);
1181 if (type == NECP_POLICY_RESULT_ROUTE_RULES) {
1182 return (TRUE);
1183 }
1184 return (FALSE);
1185 }
1186
1187 static inline bool
1188 necp_address_is_valid(struct sockaddr *address)
1189 {
1190 if (address->sa_family == AF_INET) {
1191 return (address->sa_len == sizeof(struct sockaddr_in));
1192 } else if (address->sa_family == AF_INET6) {
1193 return (address->sa_len == sizeof(struct sockaddr_in6));
1194 } else {
1195 return (FALSE);
1196 }
1197 }
1198
1199 static bool
1200 necp_policy_result_is_valid(u_int8_t *buffer, u_int32_t length)
1201 {
1202 bool validated = FALSE;
1203 u_int8_t type = necp_policy_result_get_type_from_buffer(buffer, length);
1204 u_int32_t parameter_length = necp_policy_result_get_parameter_length_from_buffer(buffer, length);
1205 switch (type) {
1206 case NECP_POLICY_RESULT_PASS: {
1207 validated = TRUE;
1208 break;
1209 }
1210 case NECP_POLICY_RESULT_SKIP: {
1211 if (parameter_length >= sizeof(u_int32_t)) {
1212 validated = TRUE;
1213 }
1214 break;
1215 }
1216 case NECP_POLICY_RESULT_DROP: {
1217 validated = TRUE;
1218 break;
1219 }
1220 case NECP_POLICY_RESULT_SOCKET_DIVERT: {
1221 if (parameter_length >= sizeof(u_int32_t)) {
1222 validated = TRUE;
1223 }
1224 break;
1225 }
1226 case NECP_POLICY_RESULT_SOCKET_SCOPED: {
1227 if (parameter_length > 0) {
1228 validated = TRUE;
1229 }
1230 break;
1231 }
1232 case NECP_POLICY_RESULT_IP_TUNNEL: {
1233 if (parameter_length > sizeof(u_int32_t)) {
1234 validated = TRUE;
1235 }
1236 break;
1237 }
1238 case NECP_POLICY_RESULT_SOCKET_FILTER: {
1239 if (parameter_length >= sizeof(u_int32_t)) {
1240 validated = TRUE;
1241 }
1242 break;
1243 }
1244 case NECP_POLICY_RESULT_ROUTE_RULES: {
1245 validated = TRUE;
1246 break;
1247 }
1248 case NECP_POLICY_RESULT_TRIGGER:
1249 case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED:
1250 case NECP_POLICY_RESULT_TRIGGER_SCOPED:
1251 case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED:
1252 case NECP_POLICY_RESULT_USE_NETAGENT: {
1253 if (parameter_length >= sizeof(uuid_t)) {
1254 validated = TRUE;
1255 }
1256 break;
1257 }
1258 default: {
1259 validated = FALSE;
1260 break;
1261 }
1262 }
1263
1264 if (necp_debug) {
1265 NECPLOG(LOG_DEBUG, "Policy result type %d, valid %d", type, validated);
1266 }
1267
1268 return (validated);
1269 }
1270
1271 static inline u_int8_t
1272 necp_policy_condition_get_type_from_buffer(u_int8_t *buffer, u_int32_t length)
1273 {
1274 return ((buffer && length >= sizeof(u_int8_t)) ? buffer[0] : 0);
1275 }
1276
1277 static inline u_int8_t
1278 necp_policy_condition_get_flags_from_buffer(u_int8_t *buffer, u_int32_t length)
1279 {
1280 return ((buffer && length >= (2 * sizeof(u_int8_t))) ? buffer[1] : 0);
1281 }
1282
1283 static inline u_int32_t
1284 necp_policy_condition_get_value_length_from_buffer(u_int8_t *buffer, u_int32_t length)
1285 {
1286 return ((buffer && length >= (2 * sizeof(u_int8_t))) ? (length - (2 * sizeof(u_int8_t))) : 0);
1287 }
1288
1289 static inline u_int8_t *
1290 necp_policy_condition_get_value_pointer_from_buffer(u_int8_t *buffer, u_int32_t length)
1291 {
1292 return ((buffer && length > (2 * sizeof(u_int8_t))) ? (buffer + (2 * sizeof(u_int8_t))) : NULL);
1293 }
1294
1295 static inline bool
1296 necp_policy_condition_is_default(u_int8_t *buffer, u_int32_t length)
1297 {
1298 return (necp_policy_condition_get_type_from_buffer(buffer, length) == NECP_POLICY_CONDITION_DEFAULT);
1299 }
1300
1301 static inline bool
1302 necp_policy_condition_is_application(u_int8_t *buffer, u_int32_t length)
1303 {
1304 return (necp_policy_condition_get_type_from_buffer(buffer, length) == NECP_POLICY_CONDITION_APPLICATION);
1305 }
1306
1307 static inline bool
1308 necp_policy_condition_requires_application(u_int8_t *buffer, u_int32_t length)
1309 {
1310 u_int8_t type = necp_policy_condition_get_type_from_buffer(buffer, length);
1311 return (type == NECP_POLICY_CONDITION_REAL_APPLICATION);
1312 }
1313
1314 static bool
1315 necp_policy_condition_is_valid(u_int8_t *buffer, u_int32_t length, u_int8_t policy_result_type)
1316 {
1317 bool validated = FALSE;
1318 bool result_cannot_have_ip_layer = (policy_result_type == NECP_POLICY_RESULT_SOCKET_DIVERT ||
1319 policy_result_type == NECP_POLICY_RESULT_SOCKET_FILTER ||
1320 policy_result_type == NECP_POLICY_RESULT_TRIGGER ||
1321 policy_result_type == NECP_POLICY_RESULT_TRIGGER_IF_NEEDED ||
1322 policy_result_type == NECP_POLICY_RESULT_TRIGGER_SCOPED ||
1323 policy_result_type == NECP_POLICY_RESULT_NO_TRIGGER_SCOPED ||
1324 policy_result_type == NECP_POLICY_RESULT_SOCKET_SCOPED ||
1325 policy_result_type == NECP_POLICY_RESULT_ROUTE_RULES ||
1326 policy_result_type == NECP_POLICY_RESULT_USE_NETAGENT) ? TRUE : FALSE;
1327 u_int32_t condition_length = necp_policy_condition_get_value_length_from_buffer(buffer, length);
1328 u_int8_t *condition_value = necp_policy_condition_get_value_pointer_from_buffer(buffer, length);
1329 u_int8_t type = necp_policy_condition_get_type_from_buffer(buffer, length);
1330 u_int8_t flags = necp_policy_condition_get_flags_from_buffer(buffer, length);
1331 switch (type) {
1332 case NECP_POLICY_CONDITION_APPLICATION:
1333 case NECP_POLICY_CONDITION_REAL_APPLICATION: {
1334 if (!(flags & NECP_POLICY_CONDITION_FLAGS_NEGATIVE) &&
1335 condition_length >= sizeof(uuid_t) &&
1336 condition_value != NULL &&
1337 !uuid_is_null(condition_value)) {
1338 validated = TRUE;
1339 }
1340 break;
1341 }
1342 case NECP_POLICY_CONDITION_DOMAIN:
1343 case NECP_POLICY_CONDITION_ACCOUNT:
1344 case NECP_POLICY_CONDITION_BOUND_INTERFACE: {
1345 if (condition_length > 0) {
1346 validated = TRUE;
1347 }
1348 break;
1349 }
1350 case NECP_POLICY_CONDITION_TRAFFIC_CLASS: {
1351 if (condition_length >= sizeof(struct necp_policy_condition_tc_range)) {
1352 validated = TRUE;
1353 }
1354 break;
1355 }
1356 case NECP_POLICY_CONDITION_DEFAULT:
1357 case NECP_POLICY_CONDITION_ALL_INTERFACES:
1358 case NECP_POLICY_CONDITION_ENTITLEMENT: {
1359 if (!(flags & NECP_POLICY_CONDITION_FLAGS_NEGATIVE)) {
1360 validated = TRUE;
1361 }
1362 break;
1363 }
1364 case NECP_POLICY_CONDITION_IP_PROTOCOL: {
1365 if (condition_length >= sizeof(u_int16_t)) {
1366 validated = TRUE;
1367 }
1368 break;
1369 }
1370 case NECP_POLICY_CONDITION_PID: {
1371 if (condition_length >= sizeof(pid_t) &&
1372 condition_value != NULL &&
1373 *((pid_t *)(void *)condition_value) != 0) {
1374 validated = TRUE;
1375 }
1376 break;
1377 }
1378 case NECP_POLICY_CONDITION_UID: {
1379 if (condition_length >= sizeof(uid_t)) {
1380 validated = TRUE;
1381 }
1382 break;
1383 }
1384 case NECP_POLICY_CONDITION_LOCAL_ADDR:
1385 case NECP_POLICY_CONDITION_REMOTE_ADDR: {
1386 if (!result_cannot_have_ip_layer && condition_length >= sizeof(struct necp_policy_condition_addr) &&
1387 necp_address_is_valid(&((struct necp_policy_condition_addr *)(void *)condition_value)->address.sa)) {
1388 validated = TRUE;
1389 }
1390 break;
1391 }
1392 case NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE:
1393 case NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE: {
1394 if (!result_cannot_have_ip_layer && condition_length >= sizeof(struct necp_policy_condition_addr_range) &&
1395 necp_address_is_valid(&((struct necp_policy_condition_addr_range *)(void *)condition_value)->start_address.sa) &&
1396 necp_address_is_valid(&((struct necp_policy_condition_addr_range *)(void *)condition_value)->end_address.sa)) {
1397 validated = TRUE;
1398 }
1399 break;
1400 }
1401 default: {
1402 validated = FALSE;
1403 break;
1404 }
1405 }
1406
1407 if (necp_debug) {
1408 NECPLOG(LOG_DEBUG, "Policy condition type %d, valid %d", type, validated);
1409 }
1410
1411 return (validated);
1412 }
1413
1414 static bool
1415 necp_policy_route_rule_is_default(u_int8_t *buffer, u_int32_t length)
1416 {
1417 return (necp_policy_condition_get_value_length_from_buffer(buffer, length) == 0 &&
1418 necp_policy_condition_get_flags_from_buffer(buffer, length) == 0);
1419 }
1420
1421 static bool
1422 necp_policy_route_rule_is_valid(u_int8_t *buffer, u_int32_t length)
1423 {
1424 bool validated = FALSE;
1425 u_int8_t type = necp_policy_condition_get_type_from_buffer(buffer, length);
1426 switch (type) {
1427 case NECP_ROUTE_RULE_ALLOW_INTERFACE: {
1428 validated = TRUE;
1429 break;
1430 }
1431 case NECP_ROUTE_RULE_DENY_INTERFACE: {
1432 validated = TRUE;
1433 break;
1434 }
1435 case NECP_ROUTE_RULE_QOS_MARKING: {
1436 validated = TRUE;
1437 break;
1438 }
1439 default: {
1440 validated = FALSE;
1441 break;
1442 }
1443 }
1444
1445 if (necp_debug) {
1446 NECPLOG(LOG_DEBUG, "Policy route rule type %d, valid %d", type, validated);
1447 }
1448
1449 return (validated);
1450 }
1451
1452 static void
1453 necp_handle_set_session_priority(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
1454 {
1455 int error;
1456 struct necp_session_policy *policy = NULL;
1457 struct necp_session_policy *temp_policy = NULL;
1458 u_int32_t response_error = NECP_ERROR_INTERNAL;
1459 u_int32_t requested_session_priority = NECP_SESSION_PRIORITY_UNKNOWN;
1460
1461 // Read policy id
1462 error = necp_packet_get_tlv(packet, offset, NECP_TLV_SESSION_PRIORITY, sizeof(requested_session_priority), &requested_session_priority, NULL);
1463 if (error) {
1464 NECPLOG(LOG_ERR, "Failed to get session priority: %d", error);
1465 response_error = NECP_ERROR_INVALID_TLV;
1466 goto fail;
1467 }
1468
1469 if (session == NULL) {
1470 NECPLOG0(LOG_ERR, "Failed to find session");
1471 response_error = NECP_ERROR_INTERNAL;
1472 goto fail;
1473 }
1474
1475 // Enforce special session priorities with entitlements
1476 if (requested_session_priority == NECP_SESSION_PRIORITY_CONTROL ||
1477 requested_session_priority == NECP_SESSION_PRIORITY_PRIVILEGED_TUNNEL) {
1478 errno_t cred_result = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES, 0);
1479 if (cred_result != 0) {
1480 NECPLOG(LOG_ERR, "Session does not hold necessary entitlement to claim priority level %d", requested_session_priority);
1481 goto fail;
1482 }
1483 }
1484
1485 if (session->session_priority != requested_session_priority) {
1486 session->session_priority = requested_session_priority;
1487 session->session_order = necp_allocate_new_session_order(session->session_priority, session->control_unit);
1488 session->dirty = TRUE;
1489
1490 // Mark all policies as needing updates
1491 LIST_FOREACH_SAFE(policy, &session->policies, chain, temp_policy) {
1492 policy->pending_update = TRUE;
1493 }
1494 }
1495
1496 necp_send_success_response(session, NECP_PACKET_TYPE_SET_SESSION_PRIORITY, message_id);
1497 return;
1498
1499 fail:
1500 necp_send_error_response(session, NECP_PACKET_TYPE_SET_SESSION_PRIORITY, message_id, response_error);
1501 }
1502
1503 static void
1504 necp_handle_lock_session_to_proc(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
1505 {
1506 #pragma unused(packet, offset)
1507 // proc_uuid already filled out
1508 session->proc_locked = TRUE;
1509 necp_send_success_response(session, NECP_PACKET_TYPE_LOCK_SESSION_TO_PROC, message_id);
1510 }
1511
1512 static void
1513 necp_handle_register_service(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
1514 {
1515 int error;
1516 struct necp_service_registration *new_service = NULL;
1517 u_int32_t response_error = NECP_ERROR_INTERNAL;
1518 uuid_t service_uuid;
1519 uuid_clear(service_uuid);
1520
1521 if (session == NULL) {
1522 NECPLOG0(LOG_ERR, "Failed to find session");
1523 response_error = NECP_ERROR_INTERNAL;
1524 goto fail;
1525 }
1526
1527 // Enforce entitlements
1528 errno_t cred_result = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES, 0);
1529 if (cred_result != 0) {
1530 NECPLOG0(LOG_ERR, "Session does not hold necessary entitlement to register service");
1531 goto fail;
1532 }
1533
1534 // Read service uuid
1535 error = necp_packet_get_tlv(packet, offset, NECP_TLV_SERVICE_UUID, sizeof(uuid_t), service_uuid, NULL);
1536 if (error) {
1537 NECPLOG(LOG_ERR, "Failed to get service UUID: %d", error);
1538 response_error = NECP_ERROR_INVALID_TLV;
1539 goto fail;
1540 }
1541
1542 MALLOC(new_service, struct necp_service_registration *, sizeof(*new_service), M_NECP, M_WAITOK);
1543 if (new_service == NULL) {
1544 NECPLOG0(LOG_ERR, "Failed to allocate service registration");
1545 response_error = NECP_ERROR_INTERNAL;
1546 goto fail;
1547 }
1548
1549 lck_rw_lock_exclusive(&necp_kernel_policy_lock);
1550 memset(new_service, 0, sizeof(*new_service));
1551 new_service->service_id = necp_create_uuid_service_id_mapping(service_uuid);
1552 LIST_INSERT_HEAD(&session->services, new_service, session_chain);
1553 LIST_INSERT_HEAD(&necp_registered_service_list, new_service, kernel_chain);
1554 lck_rw_done(&necp_kernel_policy_lock);
1555
1556 necp_send_success_response(session, NECP_PACKET_TYPE_REGISTER_SERVICE, message_id);
1557 return;
1558 fail:
1559 necp_send_error_response(session, NECP_PACKET_TYPE_REGISTER_SERVICE, message_id, response_error);
1560 }
1561
1562 static void
1563 necp_handle_unregister_service(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
1564 {
1565 int error;
1566 struct necp_service_registration *service = NULL;
1567 struct necp_service_registration *temp_service = NULL;
1568 u_int32_t response_error = NECP_ERROR_INTERNAL;
1569 struct necp_uuid_id_mapping *mapping = NULL;
1570 uuid_t service_uuid;
1571 uuid_clear(service_uuid);
1572
1573 if (session == NULL) {
1574 NECPLOG0(LOG_ERR, "Failed to find session");
1575 response_error = NECP_ERROR_INTERNAL;
1576 goto fail;
1577 }
1578
1579 // Read service uuid
1580 error = necp_packet_get_tlv(packet, offset, NECP_TLV_SERVICE_UUID, sizeof(uuid_t), service_uuid, NULL);
1581 if (error) {
1582 NECPLOG(LOG_ERR, "Failed to get service UUID: %d", error);
1583 response_error = NECP_ERROR_INVALID_TLV;
1584 goto fail;
1585 }
1586
1587 // Mark remove all matching services for this session
1588 lck_rw_lock_exclusive(&necp_kernel_policy_lock);
1589 mapping = necp_uuid_lookup_service_id_locked(service_uuid);
1590 if (mapping != NULL) {
1591 LIST_FOREACH_SAFE(service, &session->services, session_chain, temp_service) {
1592 if (service->service_id == mapping->id) {
1593 LIST_REMOVE(service, session_chain);
1594 LIST_REMOVE(service, kernel_chain);
1595 FREE(service, M_NECP);
1596 }
1597 }
1598 necp_remove_uuid_service_id_mapping(service_uuid);
1599 }
1600 lck_rw_done(&necp_kernel_policy_lock);
1601
1602 necp_send_success_response(session, NECP_PACKET_TYPE_UNREGISTER_SERVICE, message_id);
1603 return;
1604 fail:
1605 necp_send_error_response(session, NECP_PACKET_TYPE_UNREGISTER_SERVICE, message_id, response_error);
1606 }
1607
1608 static void
1609 necp_handle_policy_add(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
1610 {
1611 bool has_default_condition = FALSE;
1612 bool has_non_default_condition = FALSE;
1613 bool has_application_condition = FALSE;
1614 bool requires_application_condition = FALSE;
1615 u_int8_t *conditions_array = NULL;
1616 u_int32_t conditions_array_size = 0;
1617 int conditions_array_cursor;
1618
1619 bool has_default_route_rule = FALSE;
1620 u_int8_t *route_rules_array = NULL;
1621 u_int32_t route_rules_array_size = 0;
1622 int route_rules_array_cursor;
1623
1624 int cursor;
1625 int error = 0;
1626 u_int32_t response_error = NECP_ERROR_INTERNAL;
1627
1628 necp_policy_order order = 0;
1629 struct necp_session_policy *policy = NULL;
1630 u_int8_t *policy_result = NULL;
1631 u_int32_t policy_result_size = 0;
1632
1633 // Read policy order
1634 error = necp_packet_get_tlv(packet, offset, NECP_TLV_POLICY_ORDER, sizeof(order), &order, NULL);
1635 if (error) {
1636 NECPLOG(LOG_ERR, "Failed to get policy order: %d", error);
1637 response_error = NECP_ERROR_INVALID_TLV;
1638 goto fail;
1639 }
1640
1641 // Read policy result
1642 cursor = necp_packet_find_tlv(packet, offset, NECP_TLV_POLICY_RESULT, &error, 0);
1643 error = necp_packet_get_tlv_at_offset(packet, cursor, 0, NULL, &policy_result_size);
1644 if (error || policy_result_size == 0) {
1645 NECPLOG(LOG_ERR, "Failed to get policy result length: %d", error);
1646 response_error = NECP_ERROR_INVALID_TLV;
1647 goto fail;
1648 }
1649 if (policy_result_size > NECP_MAX_POLICY_RESULT_SIZE) {
1650 NECPLOG(LOG_ERR, "Policy result length too large: %u", policy_result_size);
1651 response_error = NECP_ERROR_INVALID_TLV;
1652 goto fail;
1653 }
1654 MALLOC(policy_result, u_int8_t *, policy_result_size, M_NECP, M_WAITOK);
1655 if (policy_result == NULL) {
1656 NECPLOG(LOG_ERR, "Failed to allocate a policy result buffer (size %d)", policy_result_size);
1657 response_error = NECP_ERROR_INTERNAL;
1658 goto fail;
1659 }
1660 error = necp_packet_get_tlv_at_offset(packet, cursor, policy_result_size, policy_result, NULL);
1661 if (error) {
1662 NECPLOG(LOG_ERR, "Failed to get policy result: %d", error);
1663 response_error = NECP_ERROR_POLICY_RESULT_INVALID;
1664 goto fail;
1665 }
1666 if (!necp_policy_result_is_valid(policy_result, policy_result_size)) {
1667 NECPLOG0(LOG_ERR, "Failed to validate policy result");
1668 response_error = NECP_ERROR_POLICY_RESULT_INVALID;
1669 goto fail;
1670 }
1671
1672 if (necp_policy_result_requires_route_rules(policy_result, policy_result_size)) {
1673 // Read route rules conditions
1674 for (cursor = necp_packet_find_tlv(packet, offset, NECP_TLV_ROUTE_RULE, &error, 0);
1675 cursor >= 0;
1676 cursor = necp_packet_find_tlv(packet, cursor, NECP_TLV_ROUTE_RULE, &error, 1)) {
1677 u_int32_t route_rule_size = 0;
1678 necp_packet_get_tlv_at_offset(packet, cursor, 0, NULL, &route_rule_size);
1679 if (route_rule_size > 0) {
1680 route_rules_array_size += (sizeof(u_int8_t) + sizeof(u_int32_t) + route_rule_size);
1681 }
1682 }
1683
1684 if (route_rules_array_size == 0) {
1685 NECPLOG0(LOG_ERR, "Failed to get policy route rules");
1686 response_error = NECP_ERROR_INVALID_TLV;
1687 goto fail;
1688 }
1689 if (route_rules_array_size > NECP_MAX_ROUTE_RULES_ARRAY_SIZE) {
1690 NECPLOG(LOG_ERR, "Route rules length too large: %u", route_rules_array_size);
1691 response_error = NECP_ERROR_INVALID_TLV;
1692 goto fail;
1693 }
1694 MALLOC(route_rules_array, u_int8_t *, route_rules_array_size, M_NECP, M_WAITOK);
1695 if (route_rules_array == NULL) {
1696 NECPLOG(LOG_ERR, "Failed to allocate a policy route rules array (size %d)", route_rules_array_size);
1697 response_error = NECP_ERROR_INTERNAL;
1698 goto fail;
1699 }
1700
1701 route_rules_array_cursor = 0;
1702 for (cursor = necp_packet_find_tlv(packet, offset, NECP_TLV_ROUTE_RULE, &error, 0);
1703 cursor >= 0;
1704 cursor = necp_packet_find_tlv(packet, cursor, NECP_TLV_ROUTE_RULE, &error, 1)) {
1705 u_int8_t route_rule_type = NECP_TLV_ROUTE_RULE;
1706 u_int32_t route_rule_size = 0;
1707 necp_packet_get_tlv_at_offset(packet, cursor, 0, NULL, &route_rule_size);
1708 if (route_rule_size > 0 && route_rule_size <= (route_rules_array_size - route_rules_array_cursor)) {
1709 // Add type
1710 memcpy((route_rules_array + route_rules_array_cursor), &route_rule_type, sizeof(route_rule_type));
1711 route_rules_array_cursor += sizeof(route_rule_type);
1712
1713 // Add length
1714 memcpy((route_rules_array + route_rules_array_cursor), &route_rule_size, sizeof(route_rule_size));
1715 route_rules_array_cursor += sizeof(route_rule_size);
1716
1717 // Add value
1718 necp_packet_get_tlv_at_offset(packet, cursor, route_rule_size, (route_rules_array + route_rules_array_cursor), NULL);
1719
1720 if (!necp_policy_route_rule_is_valid((route_rules_array + route_rules_array_cursor), route_rule_size)) {
1721 NECPLOG0(LOG_ERR, "Failed to validate policy route rule");
1722 response_error = NECP_ERROR_ROUTE_RULES_INVALID;
1723 goto fail;
1724 }
1725
1726 if (necp_policy_route_rule_is_default((route_rules_array + route_rules_array_cursor), route_rule_size)) {
1727 if (has_default_route_rule) {
1728 NECPLOG0(LOG_ERR, "Failed to validate route rule; contained multiple default route rules");
1729 response_error = NECP_ERROR_ROUTE_RULES_INVALID;
1730 goto fail;
1731 }
1732 has_default_route_rule = TRUE;
1733 }
1734
1735 route_rules_array_cursor += route_rule_size;
1736 }
1737 }
1738 }
1739
1740 // Read policy conditions
1741 for (cursor = necp_packet_find_tlv(packet, offset, NECP_TLV_POLICY_CONDITION, &error, 0);
1742 cursor >= 0;
1743 cursor = necp_packet_find_tlv(packet, cursor, NECP_TLV_POLICY_CONDITION, &error, 1)) {
1744 u_int32_t condition_size = 0;
1745 necp_packet_get_tlv_at_offset(packet, cursor, 0, NULL, &condition_size);
1746
1747 if (condition_size > 0) {
1748 conditions_array_size += (sizeof(u_int8_t) + sizeof(u_int32_t) + condition_size);
1749 }
1750 }
1751
1752 if (conditions_array_size == 0) {
1753 NECPLOG0(LOG_ERR, "Failed to get policy conditions");
1754 response_error = NECP_ERROR_INVALID_TLV;
1755 goto fail;
1756 }
1757 if (conditions_array_size > NECP_MAX_CONDITIONS_ARRAY_SIZE) {
1758 NECPLOG(LOG_ERR, "Conditions length too large: %u", conditions_array_size);
1759 response_error = NECP_ERROR_INVALID_TLV;
1760 goto fail;
1761 }
1762 MALLOC(conditions_array, u_int8_t *, conditions_array_size, M_NECP, M_WAITOK);
1763 if (conditions_array == NULL) {
1764 NECPLOG(LOG_ERR, "Failed to allocate a policy conditions array (size %d)", conditions_array_size);
1765 response_error = NECP_ERROR_INTERNAL;
1766 goto fail;
1767 }
1768
1769 conditions_array_cursor = 0;
1770 for (cursor = necp_packet_find_tlv(packet, offset, NECP_TLV_POLICY_CONDITION, &error, 0);
1771 cursor >= 0;
1772 cursor = necp_packet_find_tlv(packet, cursor, NECP_TLV_POLICY_CONDITION, &error, 1)) {
1773 u_int8_t condition_type = NECP_TLV_POLICY_CONDITION;
1774 u_int32_t condition_size = 0;
1775 necp_packet_get_tlv_at_offset(packet, cursor, 0, NULL, &condition_size);
1776 if (condition_size > 0 && condition_size <= (conditions_array_size - conditions_array_cursor)) {
1777 // Add type
1778 memcpy((conditions_array + conditions_array_cursor), &condition_type, sizeof(condition_type));
1779 conditions_array_cursor += sizeof(condition_type);
1780
1781 // Add length
1782 memcpy((conditions_array + conditions_array_cursor), &condition_size, sizeof(condition_size));
1783 conditions_array_cursor += sizeof(condition_size);
1784
1785 // Add value
1786 necp_packet_get_tlv_at_offset(packet, cursor, condition_size, (conditions_array + conditions_array_cursor), NULL);
1787 if (!necp_policy_condition_is_valid((conditions_array + conditions_array_cursor), condition_size, necp_policy_result_get_type_from_buffer(policy_result, policy_result_size))) {
1788 NECPLOG0(LOG_ERR, "Failed to validate policy condition");
1789 response_error = NECP_ERROR_POLICY_CONDITIONS_INVALID;
1790 goto fail;
1791 }
1792
1793 if (necp_policy_condition_is_default((conditions_array + conditions_array_cursor), condition_size)) {
1794 has_default_condition = TRUE;
1795 } else {
1796 has_non_default_condition = TRUE;
1797 }
1798 if (has_default_condition && has_non_default_condition) {
1799 NECPLOG0(LOG_ERR, "Failed to validate conditions; contained default and non-default conditions");
1800 response_error = NECP_ERROR_POLICY_CONDITIONS_INVALID;
1801 goto fail;
1802 }
1803
1804 if (necp_policy_condition_is_application((conditions_array + conditions_array_cursor), condition_size)) {
1805 has_application_condition = TRUE;
1806 }
1807
1808 if (necp_policy_condition_requires_application((conditions_array + conditions_array_cursor), condition_size)) {
1809 requires_application_condition = TRUE;
1810 }
1811
1812 conditions_array_cursor += condition_size;
1813 }
1814 }
1815
1816 if (requires_application_condition && !has_application_condition) {
1817 NECPLOG0(LOG_ERR, "Failed to validate conditions; did not contain application condition");
1818 response_error = NECP_ERROR_POLICY_CONDITIONS_INVALID;
1819 goto fail;
1820 }
1821
1822 if ((policy = necp_policy_create(session, order, conditions_array, conditions_array_size, route_rules_array, route_rules_array_size, policy_result, policy_result_size)) == NULL) {
1823 response_error = NECP_ERROR_INTERNAL;
1824 goto fail;
1825 }
1826
1827 necp_send_policy_id_response(session, NECP_PACKET_TYPE_POLICY_ADD, message_id, policy->id);
1828 return;
1829
1830 fail:
1831 if (policy_result != NULL) {
1832 FREE(policy_result, M_NECP);
1833 }
1834 if (conditions_array != NULL) {
1835 FREE(conditions_array, M_NECP);
1836 }
1837 if (route_rules_array != NULL) {
1838 FREE(route_rules_array, M_NECP);
1839 }
1840
1841 necp_send_error_response(session, NECP_PACKET_TYPE_POLICY_ADD, message_id, response_error);
1842 }
1843
1844 static void
1845 necp_handle_policy_get(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
1846 {
1847 #pragma unused(offset)
1848 int error;
1849 u_int8_t *response = NULL;
1850 u_int8_t *cursor = NULL;
1851 u_int32_t response_error = NECP_ERROR_INTERNAL;
1852 necp_policy_id policy_id = 0;
1853 u_int32_t order_tlv_size = 0;
1854 u_int32_t result_tlv_size = 0;
1855 u_int32_t response_size = 0;
1856
1857 struct necp_session_policy *policy = NULL;
1858
1859 // Read policy id
1860 error = necp_packet_get_tlv(packet, offset, NECP_TLV_POLICY_ID, sizeof(policy_id), &policy_id, NULL);
1861 if (error) {
1862 NECPLOG(LOG_ERR, "Failed to get policy id: %d", error);
1863 response_error = NECP_ERROR_INVALID_TLV;
1864 goto fail;
1865 }
1866
1867 policy = necp_policy_find(session, policy_id);
1868 if (policy == NULL || policy->pending_deletion) {
1869 NECPLOG(LOG_ERR, "Failed to find policy with id %d", policy_id);
1870 response_error = NECP_ERROR_POLICY_ID_NOT_FOUND;
1871 goto fail;
1872 }
1873
1874 order_tlv_size = sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(necp_policy_order);
1875 result_tlv_size = (policy->result_size ? (sizeof(u_int8_t) + sizeof(u_int32_t) + policy->result_size) : 0);
1876 response_size = sizeof(struct necp_packet_header) + order_tlv_size + result_tlv_size + policy->conditions_size;
1877 MALLOC(response, u_int8_t *, response_size, M_NECP, M_WAITOK);
1878 if (response == NULL) {
1879 necp_send_error_response(session, NECP_PACKET_TYPE_POLICY_LIST_ALL, message_id, NECP_ERROR_INTERNAL);
1880 return;
1881 }
1882
1883 cursor = response;
1884 cursor = necp_buffer_write_packet_header(cursor, NECP_PACKET_TYPE_POLICY_GET, NECP_PACKET_FLAGS_RESPONSE, message_id);
1885 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_ORDER, sizeof(necp_policy_order), &policy->order);
1886
1887 if (result_tlv_size) {
1888 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_RESULT, policy->result_size, &policy->result);
1889 }
1890 if (policy->conditions_size) {
1891 memcpy(((u_int8_t *)(void *)(cursor)), policy->conditions, policy->conditions_size);
1892 }
1893
1894 if (!necp_send_ctl_data(session, (u_int8_t *)response, response_size)) {
1895 NECPLOG0(LOG_ERR, "Failed to send response");
1896 }
1897
1898 FREE(response, M_NECP);
1899 return;
1900
1901 fail:
1902 necp_send_error_response(session, NECP_PACKET_TYPE_POLICY_GET, message_id, response_error);
1903 }
1904
1905 static void
1906 necp_handle_policy_delete(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
1907 {
1908 int error;
1909 u_int32_t response_error = NECP_ERROR_INTERNAL;
1910 necp_policy_id policy_id = 0;
1911
1912 struct necp_session_policy *policy = NULL;
1913
1914 // Read policy id
1915 error = necp_packet_get_tlv(packet, offset, NECP_TLV_POLICY_ID, sizeof(policy_id), &policy_id, NULL);
1916 if (error) {
1917 NECPLOG(LOG_ERR, "Failed to get policy id: %d", error);
1918 response_error = NECP_ERROR_INVALID_TLV;
1919 goto fail;
1920 }
1921
1922 policy = necp_policy_find(session, policy_id);
1923 if (policy == NULL || policy->pending_deletion) {
1924 NECPLOG(LOG_ERR, "Failed to find policy with id %d", policy_id);
1925 response_error = NECP_ERROR_POLICY_ID_NOT_FOUND;
1926 goto fail;
1927 }
1928
1929 necp_policy_mark_for_deletion(session, policy);
1930
1931 necp_send_success_response(session, NECP_PACKET_TYPE_POLICY_DELETE, message_id);
1932 return;
1933
1934 fail:
1935 necp_send_error_response(session, NECP_PACKET_TYPE_POLICY_DELETE, message_id, response_error);
1936 }
1937
1938 static void
1939 necp_handle_policy_apply_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
1940 {
1941 #pragma unused(packet, offset)
1942 necp_policy_apply_all(session);
1943 necp_send_success_response(session, NECP_PACKET_TYPE_POLICY_APPLY_ALL, message_id);
1944 }
1945
1946 static void
1947 necp_handle_policy_list_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
1948 {
1949 #pragma unused(packet, offset)
1950 u_int32_t tlv_size = (sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(u_int32_t));
1951 u_int32_t response_size = 0;
1952 u_int8_t *response = NULL;
1953 u_int8_t *cursor = NULL;
1954 int num_policies = 0;
1955 int cur_policy_index = 0;
1956 struct necp_session_policy *policy;
1957
1958 LIST_FOREACH(policy, &session->policies, chain) {
1959 if (!policy->pending_deletion) {
1960 num_policies++;
1961 }
1962 }
1963
1964 // Create a response with one Policy ID TLV for each policy
1965 response_size = sizeof(struct necp_packet_header) + num_policies * tlv_size;
1966 MALLOC(response, u_int8_t *, response_size, M_NECP, M_WAITOK);
1967 if (response == NULL) {
1968 necp_send_error_response(session, NECP_PACKET_TYPE_POLICY_LIST_ALL, message_id, NECP_ERROR_INTERNAL);
1969 return;
1970 }
1971
1972 cursor = response;
1973 cursor = necp_buffer_write_packet_header(cursor, NECP_PACKET_TYPE_POLICY_LIST_ALL, NECP_PACKET_FLAGS_RESPONSE, message_id);
1974
1975 LIST_FOREACH(policy, &session->policies, chain) {
1976 if (!policy->pending_deletion && cur_policy_index < num_policies) {
1977 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_ID, sizeof(u_int32_t), &policy->id);
1978 cur_policy_index++;
1979 }
1980 }
1981
1982 if (!necp_send_ctl_data(session, (u_int8_t *)response, response_size)) {
1983 NECPLOG0(LOG_ERR, "Failed to send response");
1984 }
1985
1986 FREE(response, M_NECP);
1987 }
1988
1989 static void
1990 necp_handle_policy_delete_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
1991 {
1992 #pragma unused(packet, offset)
1993 necp_policy_mark_all_for_deletion(session);
1994 necp_send_success_response(session, NECP_PACKET_TYPE_POLICY_DELETE_ALL, message_id);
1995 }
1996
1997 static necp_policy_id
1998 necp_policy_get_new_id(void)
1999 {
2000 necp_policy_id newid = 0;
2001
2002 lck_rw_lock_exclusive(&necp_kernel_policy_lock);
2003
2004 necp_last_policy_id++;
2005 if (necp_last_policy_id < 1) {
2006 necp_last_policy_id = 1;
2007 }
2008
2009 newid = necp_last_policy_id;
2010 lck_rw_done(&necp_kernel_policy_lock);
2011
2012 if (newid == 0) {
2013 NECPLOG0(LOG_DEBUG, "Allocate policy id failed.\n");
2014 return (0);
2015 }
2016
2017 return (newid);
2018 }
2019
2020 /*
2021 * For the policy dump response this is the structure:
2022 *
2023 * <NECP_PACKET_HEADER>
2024 * {
2025 * type : NECP_TLV_POLICY_DUMP
2026 * length : ...
2027 * value :
2028 * {
2029 * {
2030 * type : NECP_TLV_POLICY_ID
2031 * len : ...
2032 * value : ...
2033 * }
2034 * {
2035 * type : NECP_TLV_POLICY_ORDER
2036 * len : ...
2037 * value : ...
2038 * }
2039 * {
2040 * type : NECP_TLV_POLICY_RESULT_STRING
2041 * len : ...
2042 * value : ...
2043 * }
2044 * {
2045 * type : NECP_TLV_POLICY_OWNER
2046 * len : ...
2047 * value : ...
2048 * }
2049 * {
2050 * type : NECP_TLV_POLICY_CONDITION
2051 * len : ...
2052 * value :
2053 * {
2054 * {
2055 * type : NECP_POLICY_CONDITION_ALL_INTERFACES
2056 * len : ...
2057 * value : ...
2058 * }
2059 * {
2060 * type : NECP_POLICY_CONDITION_BOUND_INTERFACES
2061 * len : ...
2062 * value : ...
2063 * }
2064 * ...
2065 * }
2066 * }
2067 * }
2068 * }
2069 * {
2070 * type : NECP_TLV_POLICY_DUMP
2071 * length : ...
2072 * value :
2073 * {
2074 * {
2075 * type : NECP_TLV_POLICY_ID
2076 * len : ...
2077 * value : ...
2078 * }
2079 * {
2080 * type : NECP_TLV_POLICY_ORDER
2081 * len : ...
2082 * value : ...
2083 * }
2084 * {
2085 * type : NECP_TLV_POLICY_RESULT_STRING
2086 * len : ...
2087 * value : ...
2088 * }
2089 * {
2090 * type : NECP_TLV_POLICY_OWNER
2091 * len : ...
2092 * value : ...
2093 * }
2094 * {
2095 * type : NECP_TLV_POLICY_CONDITION
2096 * len : ...
2097 * value :
2098 * {
2099 * {
2100 * type : NECP_POLICY_CONDITION_ALL_INTERFACES
2101 * len : ...
2102 * value : ...
2103 * }
2104 * {
2105 * type : NECP_POLICY_CONDITION_BOUND_INTERFACES
2106 * len : ...
2107 * value : ...
2108 * }
2109 * ...
2110 * }
2111 * }
2112 * }
2113 * }
2114 * ...
2115 */
2116 static void
2117 necp_handle_policy_dump_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset)
2118 {
2119 #pragma unused(packet, offset)
2120 struct necp_kernel_socket_policy *policy = NULL;
2121 int policy_i;
2122 int policy_count = 0;
2123 u_int8_t **tlv_buffer_pointers = NULL;
2124 u_int32_t *tlv_buffer_lengths = NULL;
2125 int total_tlv_len = 0;
2126 u_int8_t *result_buf = NULL;
2127 u_int8_t *result_buf_cursor = result_buf;
2128 char result_string[MAX_RESULT_STRING_LEN];
2129 char proc_name_string[MAXCOMLEN + 1];
2130
2131 bool error_occured = false;
2132 u_int32_t response_error = NECP_ERROR_INTERNAL;
2133
2134 #define REPORT_ERROR(error) error_occured = true; \
2135 response_error = error; \
2136 goto done
2137
2138 #define UNLOCK_AND_REPORT_ERROR(lock, error) lck_rw_done(lock); \
2139 REPORT_ERROR(error)
2140
2141 errno_t cred_result = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES, 0);
2142 if (cred_result != 0) {
2143 NECPLOG0(LOG_ERR, "Session does not hold the necessary entitlement to get Network Extension Policy information");
2144 REPORT_ERROR(NECP_ERROR_INTERNAL);
2145 }
2146
2147 // LOCK
2148 lck_rw_lock_shared(&necp_kernel_policy_lock);
2149
2150 NECPLOG0(LOG_DEBUG, "Gathering policies");
2151
2152 policy_count = necp_kernel_application_policies_count;
2153
2154 MALLOC(tlv_buffer_pointers, u_int8_t **, sizeof(u_int8_t *) * policy_count, M_NECP, M_NOWAIT | M_ZERO);
2155 if (tlv_buffer_pointers == NULL) {
2156 NECPLOG(LOG_DEBUG, "Failed to allocate tlv_buffer_pointers (%u bytes)", sizeof(u_int8_t *) * policy_count);
2157 UNLOCK_AND_REPORT_ERROR(&necp_kernel_policy_lock, NECP_ERROR_INTERNAL);
2158 }
2159
2160 MALLOC(tlv_buffer_lengths, u_int32_t *, sizeof(u_int32_t) * policy_count, M_NECP, M_NOWAIT | M_ZERO);
2161 if (tlv_buffer_lengths == NULL) {
2162 NECPLOG(LOG_DEBUG, "Failed to allocate tlv_buffer_lengths (%u bytes)", sizeof(u_int32_t) * policy_count);
2163 UNLOCK_AND_REPORT_ERROR(&necp_kernel_policy_lock, NECP_ERROR_INTERNAL);
2164 }
2165
2166 for (policy_i = 0; necp_kernel_socket_policies_app_layer_map != NULL && necp_kernel_socket_policies_app_layer_map[policy_i] != NULL; policy_i++) {
2167 policy = necp_kernel_socket_policies_app_layer_map[policy_i];
2168
2169 memset(result_string, 0, MAX_RESULT_STRING_LEN);
2170 memset(proc_name_string, 0, MAXCOMLEN + 1);
2171
2172 necp_get_result_description(result_string, policy->result, policy->result_parameter);
2173 proc_name(policy->session_pid, proc_name_string, MAXCOMLEN);
2174
2175 u_int16_t proc_name_len = strlen(proc_name_string) + 1;
2176 u_int16_t result_string_len = strlen(result_string) + 1;
2177
2178 NECPLOG(LOG_DEBUG, "Policy: process: %s, result: %s", proc_name_string, result_string);
2179
2180 u_int32_t total_allocated_bytes = sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(policy->id) + // NECP_TLV_POLICY_ID
2181 sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(policy->order) + // NECP_TLV_POLICY_ORDER
2182 sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(policy->session_order) + // NECP_TLV_POLICY_SESSION_ORDER
2183 sizeof(u_int8_t) + sizeof(u_int32_t) + result_string_len + // NECP_TLV_POLICY_RESULT_STRING
2184 sizeof(u_int8_t) + sizeof(u_int32_t) + proc_name_len + // NECP_TLV_POLICY_OWNER
2185 sizeof(u_int8_t) + sizeof(u_int32_t); // NECP_TLV_POLICY_CONDITION
2186
2187 // We now traverse the condition_mask to see how much space we need to allocate
2188 u_int32_t condition_mask = policy->condition_mask;
2189 u_int8_t num_conditions = 0;
2190 struct necp_string_id_mapping *account_id_entry = NULL;
2191 char if_name[IFXNAMSIZ];
2192 u_int32_t condition_tlv_length = 0;
2193 memset(if_name, 0, sizeof(if_name));
2194
2195 if (condition_mask == NECP_POLICY_CONDITION_DEFAULT) {
2196 num_conditions++;
2197 } else {
2198 if (condition_mask & NECP_KERNEL_CONDITION_ALL_INTERFACES) {
2199 num_conditions++;
2200 }
2201 if (condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE) {
2202 snprintf(if_name, IFXNAMSIZ, "%s%d", ifnet_name(policy->cond_bound_interface), ifnet_unit(policy->cond_bound_interface));
2203 condition_tlv_length += strlen(if_name) + 1;
2204 num_conditions++;
2205 }
2206 if (condition_mask & NECP_KERNEL_CONDITION_PROTOCOL) {
2207 condition_tlv_length += sizeof(policy->cond_protocol);
2208 num_conditions++;
2209 }
2210 if (condition_mask & NECP_KERNEL_CONDITION_APP_ID) {
2211 condition_tlv_length += sizeof(uuid_t);
2212 num_conditions++;
2213 }
2214 if (condition_mask & NECP_KERNEL_CONDITION_REAL_APP_ID) {
2215 condition_tlv_length += sizeof(uuid_t);
2216 num_conditions++;
2217 }
2218 if (condition_mask & NECP_KERNEL_CONDITION_DOMAIN) {
2219 u_int32_t domain_len = strlen(policy->cond_domain) + 1;
2220 condition_tlv_length += domain_len;
2221 num_conditions++;
2222 }
2223 if (condition_mask & NECP_KERNEL_CONDITION_ACCOUNT_ID) {
2224 account_id_entry = necp_lookup_string_with_id_locked(&necp_account_id_list, policy->cond_account_id);
2225 u_int32_t account_id_len = 0;
2226 if (account_id_entry) {
2227 account_id_len = account_id_entry->string ? strlen(account_id_entry->string) + 1 : 0;
2228 }
2229 condition_tlv_length += account_id_len;
2230 num_conditions++;
2231 }
2232 if (condition_mask & NECP_KERNEL_CONDITION_PID) {
2233 condition_tlv_length += sizeof(pid_t);
2234 num_conditions++;
2235 }
2236 if (condition_mask & NECP_KERNEL_CONDITION_UID) {
2237 condition_tlv_length += sizeof(uid_t);
2238 num_conditions++;
2239 }
2240 if (condition_mask & NECP_KERNEL_CONDITION_TRAFFIC_CLASS) {
2241 condition_tlv_length += sizeof(struct necp_policy_condition_tc_range);
2242 num_conditions++;
2243 }
2244 if (condition_mask & NECP_KERNEL_CONDITION_ENTITLEMENT) {
2245 num_conditions++;
2246 }
2247 if (condition_mask & NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT) {
2248 u_int32_t entitlement_len = strlen(policy->cond_custom_entitlement) + 1;
2249 condition_tlv_length += entitlement_len;
2250 num_conditions++;
2251 }
2252 if (condition_mask & NECP_KERNEL_CONDITION_LOCAL_START) {
2253 if (condition_mask & NECP_KERNEL_CONDITION_LOCAL_END) {
2254 condition_tlv_length += sizeof(struct necp_policy_condition_addr_range);
2255 } else {
2256 condition_tlv_length += sizeof(struct necp_policy_condition_addr);
2257 }
2258 num_conditions++;
2259 }
2260 if (condition_mask & NECP_KERNEL_CONDITION_REMOTE_START) {
2261 if (condition_mask & NECP_KERNEL_CONDITION_REMOTE_END) {
2262 condition_tlv_length += sizeof(struct necp_policy_condition_addr_range);
2263 } else {
2264 condition_tlv_length += sizeof(struct necp_policy_condition_addr);
2265 }
2266 num_conditions++;
2267 }
2268 }
2269
2270 condition_tlv_length += num_conditions * (sizeof(u_int8_t) + sizeof(u_int32_t)); // These are for the condition TLVs. The space for "value" is already accounted for above.
2271 total_allocated_bytes += condition_tlv_length;
2272
2273 u_int8_t *tlv_buffer;
2274 MALLOC(tlv_buffer, u_int8_t *, total_allocated_bytes, M_NECP, M_NOWAIT | M_ZERO);
2275 if (tlv_buffer == NULL) {
2276 NECPLOG(LOG_DEBUG, "Failed to allocate tlv_buffer (%u bytes)", total_allocated_bytes);
2277 continue;
2278 }
2279
2280 u_int8_t *cursor = tlv_buffer;
2281 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_ID, sizeof(policy->id), &policy->id);
2282 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_ORDER, sizeof(necp_policy_order), &policy->order);
2283 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_SESSION_ORDER, sizeof(policy->session_order), &policy->session_order);
2284 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_RESULT_STRING, result_string_len , result_string);
2285 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_OWNER, proc_name_len , proc_name_string);
2286
2287 #define N_QUICK 256
2288 u_int8_t q_cond_buf[N_QUICK]; // Minor optimization
2289
2290 u_int8_t *cond_buf; // To be used for condition TLVs
2291 if (condition_tlv_length <= N_QUICK) {
2292 cond_buf = q_cond_buf;
2293 } else {
2294 MALLOC(cond_buf, u_int8_t *, condition_tlv_length, M_NECP, M_NOWAIT);
2295 if (cond_buf == NULL) {
2296 NECPLOG(LOG_DEBUG, "Failed to allocate cond_buffer (%u bytes)", condition_tlv_length);
2297 FREE(tlv_buffer, M_NECP);
2298 continue;
2299 }
2300 }
2301
2302 memset(cond_buf, 0, condition_tlv_length);
2303 u_int8_t *cond_buf_cursor = cond_buf;
2304 if (condition_mask == NECP_POLICY_CONDITION_DEFAULT) {
2305 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_DEFAULT, 0, "");
2306 } else {
2307 if (condition_mask & NECP_KERNEL_CONDITION_ALL_INTERFACES) {
2308 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_ALL_INTERFACES, 0, "");
2309 }
2310 if (condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE) {
2311 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_BOUND_INTERFACE, strlen(if_name) + 1, if_name);
2312 }
2313 if (condition_mask & NECP_KERNEL_CONDITION_PROTOCOL) {
2314 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_IP_PROTOCOL, sizeof(policy->cond_protocol), &policy->cond_protocol);
2315 }
2316 if (condition_mask & NECP_KERNEL_CONDITION_APP_ID) {
2317 struct necp_uuid_id_mapping *entry = necp_uuid_lookup_uuid_with_app_id_locked(policy->cond_app_id);
2318 if (entry != NULL) {
2319 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_APPLICATION, sizeof(entry->uuid), entry->uuid);
2320 }
2321 }
2322 if (condition_mask & NECP_KERNEL_CONDITION_REAL_APP_ID) {
2323 struct necp_uuid_id_mapping *entry = necp_uuid_lookup_uuid_with_app_id_locked(policy->cond_real_app_id);
2324 if (entry != NULL) {
2325 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_REAL_APPLICATION, sizeof(entry->uuid), entry->uuid);
2326 }
2327 }
2328 if (condition_mask & NECP_KERNEL_CONDITION_DOMAIN) {
2329 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_DOMAIN, strlen(policy->cond_domain) + 1, policy->cond_domain);
2330 }
2331 if (condition_mask & NECP_KERNEL_CONDITION_ACCOUNT_ID) {
2332 if (account_id_entry != NULL) {
2333 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_ACCOUNT, strlen(account_id_entry->string) + 1, account_id_entry->string);
2334 }
2335 }
2336 if (condition_mask & NECP_KERNEL_CONDITION_PID) {
2337 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_PID, sizeof(policy->cond_pid), &policy->cond_pid);
2338 }
2339 if (condition_mask & NECP_KERNEL_CONDITION_UID) {
2340 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_UID, sizeof(policy->cond_uid), &policy->cond_uid);
2341 }
2342 if (condition_mask & NECP_KERNEL_CONDITION_TRAFFIC_CLASS) {
2343 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_TRAFFIC_CLASS, sizeof(policy->cond_traffic_class), &policy->cond_traffic_class);
2344 }
2345 if (condition_mask & NECP_KERNEL_CONDITION_ENTITLEMENT) {
2346 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_ENTITLEMENT, 0, "");
2347 }
2348 if (condition_mask & NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT) {
2349 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_ENTITLEMENT, strlen(policy->cond_custom_entitlement) + 1, policy->cond_custom_entitlement);
2350 }
2351 if (condition_mask & NECP_KERNEL_CONDITION_LOCAL_START) {
2352 if (condition_mask & NECP_KERNEL_CONDITION_LOCAL_END) {
2353 struct necp_policy_condition_addr_range range;
2354 memcpy(&range.start_address, &policy->cond_local_start, sizeof(policy->cond_local_start));
2355 memcpy(&range.end_address, &policy->cond_local_end, sizeof(policy->cond_local_end));
2356 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE, sizeof(range), &range);
2357 } else {
2358 struct necp_policy_condition_addr addr;
2359 addr.prefix = policy->cond_local_prefix;
2360 memcpy(&addr.address, &policy->cond_local_start, sizeof(policy->cond_local_start));
2361 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_LOCAL_ADDR, sizeof(addr), &addr);
2362 }
2363 }
2364 if (condition_mask & NECP_KERNEL_CONDITION_REMOTE_START) {
2365 if (condition_mask & NECP_KERNEL_CONDITION_REMOTE_END) {
2366 struct necp_policy_condition_addr_range range;
2367 memcpy(&range.start_address, &policy->cond_remote_start, sizeof(policy->cond_remote_start));
2368 memcpy(&range.end_address, &policy->cond_remote_end, sizeof(policy->cond_remote_end));
2369 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE, sizeof(range), &range);
2370 } else {
2371 struct necp_policy_condition_addr addr;
2372 addr.prefix = policy->cond_remote_prefix;
2373 memcpy(&addr.address, &policy->cond_remote_start, sizeof(policy->cond_remote_start));
2374 cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_REMOTE_ADDR, sizeof(addr), &addr);
2375 }
2376 }
2377 }
2378
2379 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_CONDITION, cond_buf_cursor - cond_buf, cond_buf);
2380 if (cond_buf != q_cond_buf) {
2381 FREE(cond_buf, M_NECP);
2382 }
2383
2384 tlv_buffer_pointers[policy_i] = tlv_buffer;
2385 tlv_buffer_lengths[policy_i] = (cursor - tlv_buffer);
2386
2387 // This is the length of the TLV for NECP_TLV_POLICY_DUMP
2388 total_tlv_len += sizeof(u_int8_t) + sizeof(u_int32_t) + (cursor - tlv_buffer);
2389 }
2390
2391 // UNLOCK
2392 lck_rw_done(&necp_kernel_policy_lock);
2393
2394 u_int32_t total_result_length = sizeof(struct necp_packet_header) + total_tlv_len;
2395 MALLOC(result_buf, u_int8_t *, total_result_length, M_NECP, M_NOWAIT | M_ZERO);
2396 if (result_buf == NULL) {
2397 NECPLOG(LOG_DEBUG, "Failed to allocate result_buffer (%u bytes)", total_result_length);
2398 REPORT_ERROR(NECP_ERROR_INTERNAL);
2399 }
2400
2401 result_buf_cursor = result_buf;
2402 result_buf_cursor = necp_buffer_write_packet_header(result_buf_cursor, NECP_PACKET_TYPE_POLICY_DUMP_ALL, NECP_PACKET_FLAGS_RESPONSE, message_id);
2403
2404 for (int i = 0; i < policy_count; i++) {
2405 if (tlv_buffer_pointers[i] != NULL) {
2406 result_buf_cursor = necp_buffer_write_tlv(result_buf_cursor, NECP_TLV_POLICY_DUMP, tlv_buffer_lengths[i], tlv_buffer_pointers[i]);
2407 }
2408 }
2409
2410 if (!necp_send_ctl_data(session, result_buf, result_buf_cursor - result_buf)) {
2411 NECPLOG(LOG_ERR, "Failed to send response (%u bytes)", result_buf_cursor - result_buf);
2412 } else {
2413 NECPLOG(LOG_ERR, "Sent data worth %u bytes. Total result buffer length was %u bytes", result_buf_cursor - result_buf, total_result_length);
2414 }
2415
2416 done:
2417
2418 if (error_occured) {
2419 if(!necp_send_error_response(session, NECP_PACKET_TYPE_POLICY_DUMP_ALL, message_id, response_error)) {
2420 NECPLOG0(LOG_ERR, "Failed to send error response");
2421 } else {
2422 NECPLOG0(LOG_ERR, "Sent error response");
2423 }
2424 }
2425
2426 if (result_buf != NULL) {
2427 FREE(result_buf, M_NECP);
2428 }
2429
2430 if (tlv_buffer_pointers != NULL) {
2431 for (int i = 0; i < policy_count; i++) {
2432 if (tlv_buffer_pointers[i] != NULL) {
2433 FREE(tlv_buffer_pointers[i], M_NECP);
2434 tlv_buffer_pointers[i] = NULL;
2435 }
2436 }
2437 FREE(tlv_buffer_pointers, M_NECP);
2438 }
2439
2440 if (tlv_buffer_lengths != NULL) {
2441 FREE(tlv_buffer_lengths, M_NECP);
2442 }
2443 #undef N_QUICK
2444 #undef RESET_COND_BUF
2445 #undef REPORT_ERROR
2446 #undef UNLOCK_AND_REPORT_ERROR
2447 }
2448
2449 static struct necp_session_policy *
2450 necp_policy_create(struct necp_session *session, necp_policy_order order, u_int8_t *conditions_array, u_int32_t conditions_array_size, u_int8_t *route_rules_array, u_int32_t route_rules_array_size, u_int8_t *result, u_int32_t result_size)
2451 {
2452 struct necp_session_policy *new_policy = NULL;
2453 struct necp_session_policy *tmp_policy = NULL;
2454
2455 if (session == NULL || conditions_array == NULL || result == NULL || result_size == 0) {
2456 goto done;
2457 }
2458
2459 MALLOC_ZONE(new_policy, struct necp_session_policy *, sizeof(*new_policy), M_NECP_SESSION_POLICY, M_WAITOK);
2460 if (new_policy == NULL) {
2461 goto done;
2462 }
2463
2464 memset(new_policy, 0, sizeof(*new_policy));
2465 new_policy->applied = FALSE;
2466 new_policy->pending_deletion = FALSE;
2467 new_policy->pending_update = FALSE;
2468 new_policy->order = order;
2469 new_policy->conditions = conditions_array;
2470 new_policy->conditions_size = conditions_array_size;
2471 new_policy->route_rules = route_rules_array;
2472 new_policy->route_rules_size = route_rules_array_size;
2473 new_policy->result = result;
2474 new_policy->result_size = result_size;
2475 new_policy->id = necp_policy_get_new_id();
2476
2477 LIST_INSERT_SORTED_ASCENDING(&session->policies, new_policy, chain, order, tmp_policy);
2478
2479 session->dirty = TRUE;
2480
2481 if (necp_debug) {
2482 NECPLOG(LOG_DEBUG, "Created NECP policy, order %d", order);
2483 }
2484 done:
2485 return (new_policy);
2486 }
2487
2488 static struct necp_session_policy *
2489 necp_policy_find(struct necp_session *session, necp_policy_id policy_id)
2490 {
2491 struct necp_session_policy *policy = NULL;
2492 if (policy_id == 0) {
2493 return (NULL);
2494 }
2495
2496 LIST_FOREACH(policy, &session->policies, chain) {
2497 if (policy->id == policy_id) {
2498 return (policy);
2499 }
2500 }
2501
2502 return (NULL);
2503 }
2504
2505 static inline u_int8_t
2506 necp_policy_get_result_type(struct necp_session_policy *policy)
2507 {
2508 return (policy ? necp_policy_result_get_type_from_buffer(policy->result, policy->result_size) : 0);
2509 }
2510
2511 static inline u_int32_t
2512 necp_policy_get_result_parameter_length(struct necp_session_policy *policy)
2513 {
2514 return (policy ? necp_policy_result_get_parameter_length_from_buffer(policy->result, policy->result_size) : 0);
2515 }
2516
2517 static bool
2518 necp_policy_get_result_parameter(struct necp_session_policy *policy, u_int8_t *parameter_buffer, u_int32_t parameter_buffer_length)
2519 {
2520 if (policy) {
2521 u_int32_t parameter_length = necp_policy_result_get_parameter_length_from_buffer(policy->result, policy->result_size);
2522 if (parameter_buffer_length >= parameter_length) {
2523 u_int8_t *parameter = necp_policy_result_get_parameter_pointer_from_buffer(policy->result, policy->result_size);
2524 if (parameter && parameter_buffer) {
2525 memcpy(parameter_buffer, parameter, parameter_length);
2526 return (TRUE);
2527 }
2528 }
2529 }
2530
2531 return (FALSE);
2532 }
2533
2534 static bool
2535 necp_policy_mark_for_deletion(struct necp_session *session, struct necp_session_policy *policy)
2536 {
2537 if (session == NULL || policy == NULL) {
2538 return (FALSE);
2539 }
2540
2541 policy->pending_deletion = TRUE;
2542 session->dirty = TRUE;
2543
2544 if (necp_debug) {
2545 NECPLOG0(LOG_DEBUG, "Marked NECP policy for removal");
2546 }
2547 return (TRUE);
2548 }
2549
2550 static bool
2551 necp_policy_mark_all_for_deletion(struct necp_session *session)
2552 {
2553 struct necp_session_policy *policy = NULL;
2554 struct necp_session_policy *temp_policy = NULL;
2555
2556 LIST_FOREACH_SAFE(policy, &session->policies, chain, temp_policy) {
2557 necp_policy_mark_for_deletion(session, policy);
2558 }
2559
2560 return (TRUE);
2561 }
2562
2563 static bool
2564 necp_policy_delete(struct necp_session *session, struct necp_session_policy *policy)
2565 {
2566 if (session == NULL || policy == NULL) {
2567 return (FALSE);
2568 }
2569
2570 LIST_REMOVE(policy, chain);
2571
2572 if (policy->result) {
2573 FREE(policy->result, M_NECP);
2574 policy->result = NULL;
2575 }
2576
2577 if (policy->conditions) {
2578 FREE(policy->conditions, M_NECP);
2579 policy->conditions = NULL;
2580 }
2581
2582 if (policy->route_rules) {
2583 FREE(policy->route_rules, M_NECP);
2584 policy->route_rules = NULL;
2585 }
2586
2587 FREE_ZONE(policy, sizeof(*policy), M_NECP_SESSION_POLICY);
2588
2589 if (necp_debug) {
2590 NECPLOG0(LOG_DEBUG, "Removed NECP policy");
2591 }
2592 return (TRUE);
2593 }
2594
2595 static bool
2596 necp_policy_unapply(struct necp_session_policy *policy)
2597 {
2598 int i = 0;
2599 if (policy == NULL) {
2600 return (FALSE);
2601 }
2602
2603 lck_rw_assert(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
2604
2605 // Release local uuid mappings
2606 if (!uuid_is_null(policy->applied_app_uuid)) {
2607 bool removed_mapping = FALSE;
2608 if (necp_remove_uuid_app_id_mapping(policy->applied_app_uuid, &removed_mapping, TRUE) && removed_mapping) {
2609 necp_uuid_app_id_mappings_dirty = TRUE;
2610 necp_num_uuid_app_id_mappings--;
2611 }
2612 uuid_clear(policy->applied_app_uuid);
2613 }
2614 if (!uuid_is_null(policy->applied_real_app_uuid)) {
2615 necp_remove_uuid_app_id_mapping(policy->applied_real_app_uuid, NULL, FALSE);
2616 uuid_clear(policy->applied_real_app_uuid);
2617 }
2618 if (!uuid_is_null(policy->applied_result_uuid)) {
2619 necp_remove_uuid_service_id_mapping(policy->applied_result_uuid);
2620 uuid_clear(policy->applied_result_uuid);
2621 }
2622
2623 // Release string mappings
2624 if (policy->applied_account != NULL) {
2625 necp_remove_string_to_id_mapping(&necp_account_id_list, policy->applied_account);
2626 FREE(policy->applied_account, M_NECP);
2627 policy->applied_account = NULL;
2628 }
2629
2630 // Release route rule
2631 if (policy->applied_route_rules_id != 0) {
2632 necp_remove_route_rule(&necp_route_rules, policy->applied_route_rules_id);
2633 policy->applied_route_rules_id = 0;
2634 }
2635
2636 // Remove socket policies
2637 for (i = 0; i < MAX_KERNEL_SOCKET_POLICIES; i++) {
2638 if (policy->kernel_socket_policies[i] != 0) {
2639 necp_kernel_socket_policy_delete(policy->kernel_socket_policies[i]);
2640 policy->kernel_socket_policies[i] = 0;
2641 }
2642 }
2643
2644 // Remove IP output policies
2645 for (i = 0; i < MAX_KERNEL_IP_OUTPUT_POLICIES; i++) {
2646 if (policy->kernel_ip_output_policies[i] != 0) {
2647 necp_kernel_ip_output_policy_delete(policy->kernel_ip_output_policies[i]);
2648 policy->kernel_ip_output_policies[i] = 0;
2649 }
2650 }
2651
2652 policy->applied = FALSE;
2653
2654 return (TRUE);
2655 }
2656
2657 #define NECP_KERNEL_POLICY_SUBORDER_ID_TUNNEL_CONDITION 0
2658 #define NECP_KERNEL_POLICY_SUBORDER_NON_ID_TUNNEL_CONDITION 1
2659 #define NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION 2
2660 #define NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS 3
2661 struct necp_policy_result_ip_tunnel {
2662 u_int32_t secondary_result;
2663 char interface_name[IFXNAMSIZ];
2664 } __attribute__((__packed__));
2665
2666 struct necp_policy_result_service {
2667 uuid_t identifier;
2668 u_int32_t data;
2669 } __attribute__((__packed__));
2670
2671 static bool
2672 necp_policy_apply(struct necp_session *session, struct necp_session_policy *policy)
2673 {
2674 bool socket_only_conditions = FALSE;
2675 bool socket_ip_conditions = FALSE;
2676
2677 bool socket_layer_non_id_conditions = FALSE;
2678 bool ip_output_layer_non_id_conditions = FALSE;
2679 bool ip_output_layer_non_id_only = FALSE;
2680 bool ip_output_layer_id_condition = FALSE;
2681 bool ip_output_layer_tunnel_condition_from_id = FALSE;
2682 bool ip_output_layer_tunnel_condition_from_non_id = FALSE;
2683 necp_kernel_policy_id cond_ip_output_layer_id = NECP_KERNEL_POLICY_ID_NONE;
2684
2685 u_int32_t master_condition_mask = 0;
2686 u_int32_t master_condition_negated_mask = 0;
2687 ifnet_t cond_bound_interface = NULL;
2688 u_int32_t cond_account_id = 0;
2689 char *cond_domain = NULL;
2690 char *cond_custom_entitlement = NULL;
2691 pid_t cond_pid = 0;
2692 uid_t cond_uid = 0;
2693 necp_app_id cond_app_id = 0;
2694 necp_app_id cond_real_app_id = 0;
2695 struct necp_policy_condition_tc_range cond_traffic_class;
2696 cond_traffic_class.start_tc = 0;
2697 cond_traffic_class.end_tc = 0;
2698 u_int16_t cond_protocol = 0;
2699 union necp_sockaddr_union cond_local_start;
2700 union necp_sockaddr_union cond_local_end;
2701 u_int8_t cond_local_prefix = 0;
2702 union necp_sockaddr_union cond_remote_start;
2703 union necp_sockaddr_union cond_remote_end;
2704 u_int8_t cond_remote_prefix = 0;
2705 u_int32_t offset = 0;
2706 u_int8_t ultimate_result = 0;
2707 u_int32_t secondary_result = 0;
2708 necp_kernel_policy_result_parameter secondary_result_parameter;
2709 memset(&secondary_result_parameter, 0, sizeof(secondary_result_parameter));
2710 u_int32_t cond_last_interface_index = 0;
2711 necp_kernel_policy_result_parameter ultimate_result_parameter;
2712 memset(&ultimate_result_parameter, 0, sizeof(ultimate_result_parameter));
2713
2714 if (policy == NULL) {
2715 return (FALSE);
2716 }
2717
2718 lck_rw_assert(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
2719
2720 // Process conditions
2721 while (offset < policy->conditions_size) {
2722 u_int32_t length = 0;
2723 u_int8_t *value = necp_buffer_get_tlv_value(policy->conditions, offset, &length);
2724
2725 u_int8_t condition_type = necp_policy_condition_get_type_from_buffer(value, length);
2726 u_int8_t condition_flags = necp_policy_condition_get_flags_from_buffer(value, length);
2727 bool condition_is_negative = condition_flags & NECP_POLICY_CONDITION_FLAGS_NEGATIVE;
2728 u_int32_t condition_length = necp_policy_condition_get_value_length_from_buffer(value, length);
2729 u_int8_t *condition_value = necp_policy_condition_get_value_pointer_from_buffer(value, length);
2730 switch (condition_type) {
2731 case NECP_POLICY_CONDITION_DEFAULT: {
2732 socket_ip_conditions = TRUE;
2733 break;
2734 }
2735 case NECP_POLICY_CONDITION_ALL_INTERFACES: {
2736 master_condition_mask |= NECP_KERNEL_CONDITION_ALL_INTERFACES;
2737 socket_ip_conditions = TRUE;
2738 break;
2739 }
2740 case NECP_POLICY_CONDITION_ENTITLEMENT: {
2741 if (condition_length > 0) {
2742 if (cond_custom_entitlement == NULL) {
2743 cond_custom_entitlement = necp_copy_string((char *)condition_value, condition_length);
2744 if (cond_custom_entitlement != NULL) {
2745 master_condition_mask |= NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT;
2746 socket_only_conditions = TRUE;
2747 }
2748 }
2749 } else {
2750 master_condition_mask |= NECP_KERNEL_CONDITION_ENTITLEMENT;
2751 socket_only_conditions = TRUE;
2752 }
2753 break;
2754 }
2755 case NECP_POLICY_CONDITION_DOMAIN: {
2756 // Make sure there is only one such rule
2757 if (condition_length > 0 && cond_domain == NULL) {
2758 cond_domain = necp_create_trimmed_domain((char *)condition_value, condition_length);
2759 if (cond_domain != NULL) {
2760 master_condition_mask |= NECP_KERNEL_CONDITION_DOMAIN;
2761 if (condition_is_negative) {
2762 master_condition_negated_mask |= NECP_KERNEL_CONDITION_DOMAIN;
2763 }
2764 socket_only_conditions = TRUE;
2765 }
2766 }
2767 break;
2768 }
2769 case NECP_POLICY_CONDITION_ACCOUNT: {
2770 // Make sure there is only one such rule
2771 if (condition_length > 0 && cond_account_id == 0 && policy->applied_account == NULL) {
2772 char *string = NULL;
2773 MALLOC(string, char *, condition_length + 1, M_NECP, M_WAITOK);
2774 if (string != NULL) {
2775 memcpy(string, condition_value, condition_length);
2776 string[condition_length] = 0;
2777 cond_account_id = necp_create_string_to_id_mapping(&necp_account_id_list, string);
2778 if (cond_account_id != 0) {
2779 policy->applied_account = string; // Save the string in parent policy
2780 master_condition_mask |= NECP_KERNEL_CONDITION_ACCOUNT_ID;
2781 if (condition_is_negative) {
2782 master_condition_negated_mask |= NECP_KERNEL_CONDITION_ACCOUNT_ID;
2783 }
2784 socket_only_conditions = TRUE;
2785 } else {
2786 FREE(string, M_NECP);
2787 }
2788 }
2789 }
2790 break;
2791 }
2792 case NECP_POLICY_CONDITION_APPLICATION: {
2793 // Make sure there is only one such rule, because we save the uuid in the policy
2794 if (condition_length >= sizeof(uuid_t) && cond_app_id == 0) {
2795 bool allocated_mapping = FALSE;
2796 uuid_t application_uuid;
2797 memcpy(application_uuid, condition_value, sizeof(uuid_t));
2798 cond_app_id = necp_create_uuid_app_id_mapping(application_uuid, &allocated_mapping, TRUE);
2799 if (cond_app_id != 0) {
2800 if (allocated_mapping) {
2801 necp_uuid_app_id_mappings_dirty = TRUE;
2802 necp_num_uuid_app_id_mappings++;
2803 }
2804 uuid_copy(policy->applied_app_uuid, application_uuid);
2805 master_condition_mask |= NECP_KERNEL_CONDITION_APP_ID;
2806 if (condition_is_negative) {
2807 master_condition_negated_mask |= NECP_KERNEL_CONDITION_APP_ID;
2808 }
2809 socket_only_conditions = TRUE;
2810 }
2811 }
2812 break;
2813 }
2814 case NECP_POLICY_CONDITION_REAL_APPLICATION: {
2815 // Make sure there is only one such rule, because we save the uuid in the policy
2816 if (condition_length >= sizeof(uuid_t) && cond_real_app_id == 0) {
2817 uuid_t real_application_uuid;
2818 memcpy(real_application_uuid, condition_value, sizeof(uuid_t));
2819 cond_real_app_id = necp_create_uuid_app_id_mapping(real_application_uuid, NULL, FALSE);
2820 if (cond_real_app_id != 0) {
2821 uuid_copy(policy->applied_real_app_uuid, real_application_uuid);
2822 master_condition_mask |= NECP_KERNEL_CONDITION_REAL_APP_ID;
2823 if (condition_is_negative) {
2824 master_condition_negated_mask |= NECP_KERNEL_CONDITION_REAL_APP_ID;
2825 }
2826 socket_only_conditions = TRUE;
2827 }
2828 }
2829 break;
2830 }
2831 case NECP_POLICY_CONDITION_PID: {
2832 if (condition_length >= sizeof(pid_t)) {
2833 master_condition_mask |= NECP_KERNEL_CONDITION_PID;
2834 if (condition_is_negative) {
2835 master_condition_negated_mask |= NECP_KERNEL_CONDITION_PID;
2836 }
2837 memcpy(&cond_pid, condition_value, sizeof(cond_pid));
2838 socket_only_conditions = TRUE;
2839 }
2840 break;
2841 }
2842 case NECP_POLICY_CONDITION_UID: {
2843 if (condition_length >= sizeof(uid_t)) {
2844 master_condition_mask |= NECP_KERNEL_CONDITION_UID;
2845 if (condition_is_negative) {
2846 master_condition_negated_mask |= NECP_KERNEL_CONDITION_UID;
2847 }
2848 memcpy(&cond_uid, condition_value, sizeof(cond_uid));
2849 socket_only_conditions = TRUE;
2850 }
2851 break;
2852 }
2853 case NECP_POLICY_CONDITION_TRAFFIC_CLASS: {
2854 if (condition_length >= sizeof(struct necp_policy_condition_tc_range)) {
2855 master_condition_mask |= NECP_KERNEL_CONDITION_TRAFFIC_CLASS;
2856 if (condition_is_negative) {
2857 master_condition_negated_mask |= NECP_KERNEL_CONDITION_TRAFFIC_CLASS;
2858 }
2859 memcpy(&cond_traffic_class, condition_value, sizeof(cond_traffic_class));
2860 socket_only_conditions = TRUE;
2861 }
2862 break;
2863 }
2864 case NECP_POLICY_CONDITION_BOUND_INTERFACE: {
2865 if (condition_length <= IFXNAMSIZ && condition_length > 0) {
2866 char interface_name[IFXNAMSIZ];
2867 memcpy(interface_name, condition_value, condition_length);
2868 interface_name[condition_length - 1] = 0; // Make sure the string is NULL terminated
2869 if (ifnet_find_by_name(interface_name, &cond_bound_interface) == 0) {
2870 master_condition_mask |= NECP_KERNEL_CONDITION_BOUND_INTERFACE;
2871 if (condition_is_negative) {
2872 master_condition_negated_mask |= NECP_KERNEL_CONDITION_BOUND_INTERFACE;
2873 }
2874 }
2875 socket_ip_conditions = TRUE;
2876 }
2877 break;
2878 }
2879 case NECP_POLICY_CONDITION_IP_PROTOCOL: {
2880 if (condition_length >= sizeof(u_int16_t)) {
2881 master_condition_mask |= NECP_KERNEL_CONDITION_PROTOCOL;
2882 if (condition_is_negative) {
2883 master_condition_negated_mask |= NECP_KERNEL_CONDITION_PROTOCOL;
2884 }
2885 memcpy(&cond_protocol, condition_value, sizeof(cond_protocol));
2886 socket_ip_conditions = TRUE;
2887 }
2888 break;
2889 }
2890 case NECP_POLICY_CONDITION_LOCAL_ADDR: {
2891 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)condition_value;
2892 if (!necp_address_is_valid(&address_struct->address.sa)) {
2893 break;
2894 }
2895
2896 cond_local_prefix = address_struct->prefix;
2897 memcpy(&cond_local_start, &address_struct->address, sizeof(address_struct->address));
2898 master_condition_mask |= NECP_KERNEL_CONDITION_LOCAL_START;
2899 master_condition_mask |= NECP_KERNEL_CONDITION_LOCAL_PREFIX;
2900 if (condition_is_negative) {
2901 master_condition_negated_mask |= NECP_KERNEL_CONDITION_LOCAL_START;
2902 master_condition_negated_mask |= NECP_KERNEL_CONDITION_LOCAL_PREFIX;
2903 }
2904 socket_ip_conditions = TRUE;
2905 break;
2906 }
2907 case NECP_POLICY_CONDITION_REMOTE_ADDR: {
2908 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)condition_value;
2909 if (!necp_address_is_valid(&address_struct->address.sa)) {
2910 break;
2911 }
2912
2913 cond_remote_prefix = address_struct->prefix;
2914 memcpy(&cond_remote_start, &address_struct->address, sizeof(address_struct->address));
2915 master_condition_mask |= NECP_KERNEL_CONDITION_REMOTE_START;
2916 master_condition_mask |= NECP_KERNEL_CONDITION_REMOTE_PREFIX;
2917 if (condition_is_negative) {
2918 master_condition_negated_mask |= NECP_KERNEL_CONDITION_REMOTE_START;
2919 master_condition_negated_mask |= NECP_KERNEL_CONDITION_REMOTE_PREFIX;
2920 }
2921 socket_ip_conditions = TRUE;
2922 break;
2923 }
2924 case NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE: {
2925 struct necp_policy_condition_addr_range *address_struct = (struct necp_policy_condition_addr_range *)(void *)condition_value;
2926 if (!necp_address_is_valid(&address_struct->start_address.sa) ||
2927 !necp_address_is_valid(&address_struct->end_address.sa)) {
2928 break;
2929 }
2930
2931 memcpy(&cond_local_start, &address_struct->start_address, sizeof(address_struct->start_address));
2932 memcpy(&cond_local_end, &address_struct->end_address, sizeof(address_struct->end_address));
2933 master_condition_mask |= NECP_KERNEL_CONDITION_LOCAL_START;
2934 master_condition_mask |= NECP_KERNEL_CONDITION_LOCAL_END;
2935 if (condition_is_negative) {
2936 master_condition_negated_mask |= NECP_KERNEL_CONDITION_LOCAL_START;
2937 master_condition_negated_mask |= NECP_KERNEL_CONDITION_LOCAL_END;
2938 }
2939 socket_ip_conditions = TRUE;
2940 break;
2941 }
2942 case NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE: {
2943 struct necp_policy_condition_addr_range *address_struct = (struct necp_policy_condition_addr_range *)(void *)condition_value;
2944 if (!necp_address_is_valid(&address_struct->start_address.sa) ||
2945 !necp_address_is_valid(&address_struct->end_address.sa)) {
2946 break;
2947 }
2948
2949 memcpy(&cond_remote_start, &address_struct->start_address, sizeof(address_struct->start_address));
2950 memcpy(&cond_remote_end, &address_struct->end_address, sizeof(address_struct->end_address));
2951 master_condition_mask |= NECP_KERNEL_CONDITION_REMOTE_START;
2952 master_condition_mask |= NECP_KERNEL_CONDITION_REMOTE_END;
2953 if (condition_is_negative) {
2954 master_condition_negated_mask |= NECP_KERNEL_CONDITION_REMOTE_START;
2955 master_condition_negated_mask |= NECP_KERNEL_CONDITION_REMOTE_END;
2956 }
2957 socket_ip_conditions = TRUE;
2958 break;
2959 }
2960 default: {
2961 break;
2962 }
2963 }
2964
2965 offset += sizeof(u_int8_t) + sizeof(u_int32_t) + length;
2966 }
2967
2968 // Process result
2969 ultimate_result = necp_policy_get_result_type(policy);
2970 switch (ultimate_result) {
2971 case NECP_POLICY_RESULT_PASS: {
2972 if (socket_only_conditions) { // socket_ip_conditions can be TRUE or FALSE
2973 socket_layer_non_id_conditions = TRUE;
2974 ip_output_layer_id_condition = TRUE;
2975 } else if (socket_ip_conditions) {
2976 socket_layer_non_id_conditions = TRUE;
2977 ip_output_layer_id_condition = TRUE;
2978 ip_output_layer_non_id_conditions = TRUE;
2979 }
2980 break;
2981 }
2982 case NECP_POLICY_RESULT_DROP: {
2983 if (socket_only_conditions) { // socket_ip_conditions can be TRUE or FALSE
2984 socket_layer_non_id_conditions = TRUE;
2985 } else if (socket_ip_conditions) {
2986 socket_layer_non_id_conditions = TRUE;
2987 ip_output_layer_non_id_conditions = TRUE;
2988 ip_output_layer_non_id_only = TRUE; // Only apply drop to packets that didn't go through socket layer
2989 }
2990 break;
2991 }
2992 case NECP_POLICY_RESULT_SKIP: {
2993 u_int32_t skip_policy_order = 0;
2994 if (necp_policy_get_result_parameter(policy, (u_int8_t *)&skip_policy_order, sizeof(skip_policy_order))) {
2995 ultimate_result_parameter.skip_policy_order = skip_policy_order;
2996 }
2997
2998 if (socket_only_conditions) { // socket_ip_conditions can be TRUE or FALSE
2999 socket_layer_non_id_conditions = TRUE;
3000 ip_output_layer_id_condition = TRUE;
3001 } else if (socket_ip_conditions) {
3002 socket_layer_non_id_conditions = TRUE;
3003 ip_output_layer_non_id_conditions = TRUE;
3004 }
3005 break;
3006 }
3007 case NECP_POLICY_RESULT_SOCKET_DIVERT:
3008 case NECP_POLICY_RESULT_SOCKET_FILTER: {
3009 u_int32_t control_unit = 0;
3010 if (necp_policy_get_result_parameter(policy, (u_int8_t *)&control_unit, sizeof(control_unit))) {
3011 ultimate_result_parameter.flow_divert_control_unit = control_unit;
3012 }
3013 socket_layer_non_id_conditions = TRUE;
3014 break;
3015 }
3016 case NECP_POLICY_RESULT_IP_TUNNEL: {
3017 struct necp_policy_result_ip_tunnel tunnel_parameters;
3018 u_int32_t tunnel_parameters_length = necp_policy_get_result_parameter_length(policy);
3019 if (tunnel_parameters_length > sizeof(u_int32_t) &&
3020 tunnel_parameters_length <= sizeof(struct necp_policy_result_ip_tunnel) &&
3021 necp_policy_get_result_parameter(policy, (u_int8_t *)&tunnel_parameters, sizeof(tunnel_parameters))) {
3022 ifnet_t tunnel_interface = NULL;
3023 tunnel_parameters.interface_name[tunnel_parameters_length - sizeof(u_int32_t) - 1] = 0; // Make sure the string is NULL terminated
3024 if (ifnet_find_by_name(tunnel_parameters.interface_name, &tunnel_interface) == 0) {
3025 ultimate_result_parameter.tunnel_interface_index = tunnel_interface->if_index;
3026 ifnet_release(tunnel_interface);
3027 }
3028
3029 secondary_result = tunnel_parameters.secondary_result;
3030 if (secondary_result) {
3031 cond_last_interface_index = ultimate_result_parameter.tunnel_interface_index;
3032 }
3033 }
3034
3035 if (socket_only_conditions) { // socket_ip_conditions can be TRUE or FALSE
3036 socket_layer_non_id_conditions = TRUE;
3037 ip_output_layer_id_condition = TRUE;
3038 if (secondary_result) {
3039 ip_output_layer_tunnel_condition_from_id = TRUE;
3040 }
3041 } else if (socket_ip_conditions) {
3042 socket_layer_non_id_conditions = TRUE;
3043 ip_output_layer_id_condition = TRUE;
3044 ip_output_layer_non_id_conditions = TRUE;
3045 if (secondary_result) {
3046 ip_output_layer_tunnel_condition_from_id = TRUE;
3047 ip_output_layer_tunnel_condition_from_non_id = TRUE;
3048 }
3049 }
3050 break;
3051 }
3052 case NECP_POLICY_RESULT_TRIGGER:
3053 case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED:
3054 case NECP_POLICY_RESULT_TRIGGER_SCOPED:
3055 case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED: {
3056 struct necp_policy_result_service service_parameters;
3057 u_int32_t service_result_length = necp_policy_get_result_parameter_length(policy);
3058 bool has_extra_service_data = FALSE;
3059 if (service_result_length >= (sizeof(service_parameters))) {
3060 has_extra_service_data = TRUE;
3061 }
3062 if (necp_policy_get_result_parameter(policy, (u_int8_t *)&service_parameters, sizeof(service_parameters))) {
3063 ultimate_result_parameter.service.identifier = necp_create_uuid_service_id_mapping(service_parameters.identifier);
3064 if (ultimate_result_parameter.service.identifier != 0) {
3065 uuid_copy(policy->applied_result_uuid, service_parameters.identifier);
3066 socket_layer_non_id_conditions = TRUE;
3067 if (has_extra_service_data) {
3068 ultimate_result_parameter.service.data = service_parameters.data;
3069 } else {
3070 ultimate_result_parameter.service.data = 0;
3071 }
3072 }
3073 }
3074 break;
3075 }
3076 case NECP_POLICY_RESULT_USE_NETAGENT: {
3077 uuid_t netagent_uuid;
3078 if (necp_policy_get_result_parameter(policy, (u_int8_t *)&netagent_uuid, sizeof(netagent_uuid))) {
3079 ultimate_result_parameter.netagent_id = necp_create_uuid_service_id_mapping(netagent_uuid);
3080 if (ultimate_result_parameter.netagent_id != 0) {
3081 uuid_copy(policy->applied_result_uuid, netagent_uuid);
3082 socket_layer_non_id_conditions = TRUE;
3083 }
3084 }
3085 break;
3086 }
3087 case NECP_POLICY_RESULT_SOCKET_SCOPED: {
3088 u_int32_t interface_name_length = necp_policy_get_result_parameter_length(policy);
3089 if (interface_name_length <= IFXNAMSIZ && interface_name_length > 0) {
3090 char interface_name[IFXNAMSIZ];
3091 ifnet_t scope_interface = NULL;
3092 necp_policy_get_result_parameter(policy, (u_int8_t *)interface_name, interface_name_length);
3093 interface_name[interface_name_length - 1] = 0; // Make sure the string is NULL terminated
3094 if (ifnet_find_by_name(interface_name, &scope_interface) == 0) {
3095 ultimate_result_parameter.scoped_interface_index = scope_interface->if_index;
3096 socket_layer_non_id_conditions = TRUE;
3097 ifnet_release(scope_interface);
3098 }
3099 }
3100 break;
3101 }
3102 case NECP_POLICY_RESULT_ROUTE_RULES: {
3103 if (policy->route_rules != NULL && policy->route_rules_size > 0) {
3104 u_int32_t route_rule_id = necp_create_route_rule(&necp_route_rules, policy->route_rules, policy->route_rules_size);
3105 if (route_rule_id > 0) {
3106 policy->applied_route_rules_id = route_rule_id;
3107 ultimate_result_parameter.route_rule_id = route_rule_id;
3108 socket_layer_non_id_conditions = TRUE;
3109 }
3110 }
3111 break;
3112 }
3113 default: {
3114 break;
3115 }
3116 }
3117
3118 if (socket_layer_non_id_conditions) {
3119 necp_kernel_policy_id policy_id = necp_kernel_socket_policy_add(policy->id, policy->order, session->session_order, session->proc_pid, master_condition_mask, master_condition_negated_mask, cond_app_id, cond_real_app_id, cond_custom_entitlement, cond_account_id, cond_domain, cond_pid, cond_uid, cond_bound_interface, cond_traffic_class, cond_protocol, &cond_local_start, &cond_local_end, cond_local_prefix, &cond_remote_start, &cond_remote_end, cond_remote_prefix, ultimate_result, ultimate_result_parameter);
3120
3121 if (policy_id == 0) {
3122 NECPLOG0(LOG_DEBUG, "Error applying socket kernel policy");
3123 goto fail;
3124 }
3125
3126 cond_ip_output_layer_id = policy_id;
3127 policy->kernel_socket_policies[0] = policy_id;
3128 }
3129
3130 if (ip_output_layer_non_id_conditions) {
3131 u_int32_t condition_mask = master_condition_mask;
3132 if (ip_output_layer_non_id_only) {
3133 condition_mask |= NECP_KERNEL_CONDITION_POLICY_ID;
3134 }
3135 necp_kernel_policy_id policy_id = necp_kernel_ip_output_policy_add(policy->id, policy->order, NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS, session->session_order, session->proc_pid, condition_mask, master_condition_negated_mask, NECP_KERNEL_POLICY_ID_NONE, cond_bound_interface, 0, cond_protocol, &cond_local_start, &cond_local_end, cond_local_prefix, &cond_remote_start, &cond_remote_end, cond_remote_prefix, ultimate_result, ultimate_result_parameter);
3136
3137 if (policy_id == 0) {
3138 NECPLOG0(LOG_DEBUG, "Error applying IP output kernel policy");
3139 goto fail;
3140 }
3141
3142 policy->kernel_ip_output_policies[NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS] = policy_id;
3143 }
3144
3145 if (ip_output_layer_id_condition) {
3146 necp_kernel_policy_id policy_id = necp_kernel_ip_output_policy_add(policy->id, policy->order, NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION, session->session_order, session->proc_pid, NECP_KERNEL_CONDITION_POLICY_ID | NECP_KERNEL_CONDITION_ALL_INTERFACES, 0, cond_ip_output_layer_id, NULL, 0, 0, NULL, NULL, 0, NULL, NULL, 0, ultimate_result, ultimate_result_parameter);
3147
3148 if (policy_id == 0) {
3149 NECPLOG0(LOG_DEBUG, "Error applying IP output kernel policy");
3150 goto fail;
3151 }
3152
3153 policy->kernel_ip_output_policies[NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION] = policy_id;
3154 }
3155
3156 // Extra policies for IP Output tunnels for when packets loop back
3157 if (ip_output_layer_tunnel_condition_from_id) {
3158 necp_kernel_policy_id policy_id = necp_kernel_ip_output_policy_add(policy->id, policy->order, NECP_KERNEL_POLICY_SUBORDER_NON_ID_TUNNEL_CONDITION, session->session_order, session->proc_pid, NECP_KERNEL_CONDITION_POLICY_ID | NECP_KERNEL_CONDITION_LAST_INTERFACE | NECP_KERNEL_CONDITION_ALL_INTERFACES, 0, policy->kernel_ip_output_policies[NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS], NULL, cond_last_interface_index, 0, NULL, NULL, 0, NULL, NULL, 0, secondary_result, secondary_result_parameter);
3159
3160 if (policy_id == 0) {
3161 NECPLOG0(LOG_DEBUG, "Error applying IP output kernel policy");
3162 goto fail;
3163 }
3164
3165 policy->kernel_ip_output_policies[NECP_KERNEL_POLICY_SUBORDER_NON_ID_TUNNEL_CONDITION] = policy_id;
3166 }
3167
3168 if (ip_output_layer_tunnel_condition_from_id) {
3169 necp_kernel_policy_id policy_id = necp_kernel_ip_output_policy_add(policy->id, policy->order, NECP_KERNEL_POLICY_SUBORDER_ID_TUNNEL_CONDITION, session->session_order, session->proc_pid, NECP_KERNEL_CONDITION_POLICY_ID | NECP_KERNEL_CONDITION_LAST_INTERFACE | NECP_KERNEL_CONDITION_ALL_INTERFACES, 0, policy->kernel_ip_output_policies[NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION], NULL, cond_last_interface_index, 0, NULL, NULL, 0, NULL, NULL, 0, secondary_result, secondary_result_parameter);
3170
3171 if (policy_id == 0) {
3172 NECPLOG0(LOG_DEBUG, "Error applying IP output kernel policy");
3173 goto fail;
3174 }
3175
3176 policy->kernel_ip_output_policies[NECP_KERNEL_POLICY_SUBORDER_ID_TUNNEL_CONDITION] = policy_id;
3177 }
3178
3179 policy->applied = TRUE;
3180 policy->pending_update = FALSE;
3181 return (TRUE);
3182
3183 fail:
3184 return (FALSE);
3185 }
3186
3187 static void
3188 necp_policy_apply_all(struct necp_session *session)
3189 {
3190 struct necp_session_policy *policy = NULL;
3191 struct necp_session_policy *temp_policy = NULL;
3192 struct kev_necp_policies_changed_data kev_data;
3193 kev_data.changed_count = 0;
3194
3195 lck_rw_lock_exclusive(&necp_kernel_policy_lock);
3196
3197 // Remove exisiting applied policies
3198 if (session->dirty) {
3199 LIST_FOREACH_SAFE(policy, &session->policies, chain, temp_policy) {
3200 if (policy->pending_deletion) {
3201 if (policy->applied) {
3202 necp_policy_unapply(policy);
3203 }
3204 // Delete the policy
3205 necp_policy_delete(session, policy);
3206 } else if (!policy->applied) {
3207 necp_policy_apply(session, policy);
3208 } else if (policy->pending_update) {
3209 // Must have been applied, but needs an update. Remove and re-add.
3210 necp_policy_unapply(policy);
3211 necp_policy_apply(session, policy);
3212 }
3213 }
3214
3215 necp_kernel_socket_policies_update_uuid_table();
3216 necp_kernel_socket_policies_reprocess();
3217 necp_kernel_ip_output_policies_reprocess();
3218
3219 // Clear dirty bit flags
3220 session->dirty = FALSE;
3221 }
3222
3223 lck_rw_done(&necp_kernel_policy_lock);
3224
3225 necp_update_all_clients();
3226 necp_post_change_event(&kev_data);
3227
3228 if (necp_debug) {
3229 NECPLOG0(LOG_DEBUG, "Applied NECP policies");
3230 }
3231 }
3232
3233 // Kernel Policy Management
3234 // ---------------------
3235 // Kernel policies are derived from session policies
3236 static necp_kernel_policy_id
3237 necp_kernel_policy_get_new_id(void)
3238 {
3239 necp_kernel_policy_id newid = NECP_KERNEL_POLICY_ID_NONE;
3240
3241 lck_rw_assert(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
3242
3243 necp_last_kernel_policy_id++;
3244 if (necp_last_kernel_policy_id < NECP_KERNEL_POLICY_ID_FIRST_VALID) {
3245 necp_last_kernel_policy_id = NECP_KERNEL_POLICY_ID_FIRST_VALID;
3246 }
3247
3248 newid = necp_last_kernel_policy_id;
3249 if (newid == NECP_KERNEL_POLICY_ID_NONE) {
3250 NECPLOG0(LOG_DEBUG, "Allocate kernel policy id failed.\n");
3251 return (0);
3252 }
3253
3254 return (newid);
3255 }
3256
3257 #define NECP_KERNEL_VALID_SOCKET_CONDITIONS (NECP_KERNEL_CONDITION_APP_ID | NECP_KERNEL_CONDITION_REAL_APP_ID | NECP_KERNEL_CONDITION_DOMAIN | NECP_KERNEL_CONDITION_ACCOUNT_ID | NECP_KERNEL_CONDITION_PID | NECP_KERNEL_CONDITION_UID | NECP_KERNEL_CONDITION_ALL_INTERFACES | NECP_KERNEL_CONDITION_BOUND_INTERFACE | NECP_KERNEL_CONDITION_TRAFFIC_CLASS | NECP_KERNEL_CONDITION_PROTOCOL | NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_ENTITLEMENT | NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT)
3258 static necp_kernel_policy_id
3259 necp_kernel_socket_policy_add(necp_policy_id parent_policy_id, necp_policy_order order, u_int32_t session_order, int session_pid, u_int32_t condition_mask, u_int32_t condition_negated_mask, necp_app_id cond_app_id, necp_app_id cond_real_app_id, char *cond_custom_entitlement, u_int32_t cond_account_id, char *cond_domain, pid_t cond_pid, uid_t cond_uid, ifnet_t cond_bound_interface, struct necp_policy_condition_tc_range cond_traffic_class, u_int16_t cond_protocol, union necp_sockaddr_union *cond_local_start, union necp_sockaddr_union *cond_local_end, u_int8_t cond_local_prefix, union necp_sockaddr_union *cond_remote_start, union necp_sockaddr_union *cond_remote_end, u_int8_t cond_remote_prefix, necp_kernel_policy_result result, necp_kernel_policy_result_parameter result_parameter)
3260 {
3261 struct necp_kernel_socket_policy *new_kernel_policy = NULL;
3262 struct necp_kernel_socket_policy *tmp_kernel_policy = NULL;
3263
3264 MALLOC_ZONE(new_kernel_policy, struct necp_kernel_socket_policy *, sizeof(*new_kernel_policy), M_NECP_SOCKET_POLICY, M_WAITOK);
3265 if (new_kernel_policy == NULL) {
3266 goto done;
3267 }
3268
3269 memset(new_kernel_policy, 0, sizeof(*new_kernel_policy));
3270 new_kernel_policy->parent_policy_id = parent_policy_id;
3271 new_kernel_policy->id = necp_kernel_policy_get_new_id();
3272 new_kernel_policy->order = order;
3273 new_kernel_policy->session_order = session_order;
3274 new_kernel_policy->session_pid = session_pid;
3275
3276 // Sanitize condition mask
3277 new_kernel_policy->condition_mask = (condition_mask & NECP_KERNEL_VALID_SOCKET_CONDITIONS);
3278 if ((new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_ALL_INTERFACES) && (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE)) {
3279 new_kernel_policy->condition_mask &= ~NECP_KERNEL_CONDITION_BOUND_INTERFACE;
3280 }
3281 if ((new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REAL_APP_ID) && !(new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_APP_ID)) {
3282 new_kernel_policy->condition_mask &= ~NECP_KERNEL_CONDITION_REAL_APP_ID;
3283 }
3284 if ((new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_ENTITLEMENT) && !(new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_APP_ID)) {
3285 new_kernel_policy->condition_mask &= ~NECP_KERNEL_CONDITION_ENTITLEMENT;
3286 }
3287 if ((new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_END) && (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX)) {
3288 new_kernel_policy->condition_mask &= ~NECP_KERNEL_CONDITION_LOCAL_PREFIX;
3289 }
3290 if ((new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_END) && (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX)) {
3291 new_kernel_policy->condition_mask &= ~NECP_KERNEL_CONDITION_REMOTE_PREFIX;
3292 }
3293 new_kernel_policy->condition_negated_mask = condition_negated_mask & new_kernel_policy->condition_mask;
3294
3295 // Set condition values
3296 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_APP_ID) {
3297 new_kernel_policy->cond_app_id = cond_app_id;
3298 }
3299 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REAL_APP_ID) {
3300 new_kernel_policy->cond_real_app_id = cond_real_app_id;
3301 }
3302 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT) {
3303 new_kernel_policy->cond_custom_entitlement = cond_custom_entitlement;
3304 }
3305 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_ACCOUNT_ID) {
3306 new_kernel_policy->cond_account_id = cond_account_id;
3307 }
3308 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_DOMAIN) {
3309 new_kernel_policy->cond_domain = cond_domain;
3310 new_kernel_policy->cond_domain_dot_count = necp_count_dots(cond_domain, strlen(cond_domain));
3311 }
3312 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_PID) {
3313 new_kernel_policy->cond_pid = cond_pid;
3314 }
3315 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_UID) {
3316 new_kernel_policy->cond_uid = cond_uid;
3317 }
3318 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE) {
3319 if (cond_bound_interface) {
3320 ifnet_reference(cond_bound_interface);
3321 }
3322 new_kernel_policy->cond_bound_interface = cond_bound_interface;
3323 }
3324 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_TRAFFIC_CLASS) {
3325 new_kernel_policy->cond_traffic_class = cond_traffic_class;
3326 }
3327 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_PROTOCOL) {
3328 new_kernel_policy->cond_protocol = cond_protocol;
3329 }
3330 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_START) {
3331 memcpy(&new_kernel_policy->cond_local_start, cond_local_start, cond_local_start->sa.sa_len);
3332 }
3333 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_END) {
3334 memcpy(&new_kernel_policy->cond_local_end, cond_local_end, cond_local_end->sa.sa_len);
3335 }
3336 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX) {
3337 new_kernel_policy->cond_local_prefix = cond_local_prefix;
3338 }
3339 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_START) {
3340 memcpy(&new_kernel_policy->cond_remote_start, cond_remote_start, cond_remote_start->sa.sa_len);
3341 }
3342 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_END) {
3343 memcpy(&new_kernel_policy->cond_remote_end, cond_remote_end, cond_remote_end->sa.sa_len);
3344 }
3345 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX) {
3346 new_kernel_policy->cond_remote_prefix = cond_remote_prefix;
3347 }
3348
3349 new_kernel_policy->result = result;
3350 memcpy(&new_kernel_policy->result_parameter, &result_parameter, sizeof(result_parameter));
3351
3352 if (necp_debug) {
3353 NECPLOG(LOG_DEBUG, "Added kernel policy: socket, id=%d, mask=%x\n", new_kernel_policy->id, new_kernel_policy->condition_mask);
3354 }
3355 LIST_INSERT_SORTED_TWICE_ASCENDING(&necp_kernel_socket_policies, new_kernel_policy, chain, session_order, order, tmp_kernel_policy);
3356 done:
3357 return (new_kernel_policy ? new_kernel_policy->id : 0);
3358 }
3359
3360 static struct necp_kernel_socket_policy *
3361 necp_kernel_socket_policy_find(necp_kernel_policy_id policy_id)
3362 {
3363 struct necp_kernel_socket_policy *kernel_policy = NULL;
3364 struct necp_kernel_socket_policy *tmp_kernel_policy = NULL;
3365
3366 if (policy_id == 0) {
3367 return (NULL);
3368 }
3369
3370 LIST_FOREACH_SAFE(kernel_policy, &necp_kernel_socket_policies, chain, tmp_kernel_policy) {
3371 if (kernel_policy->id == policy_id) {
3372 return (kernel_policy);
3373 }
3374 }
3375
3376 return (NULL);
3377 }
3378
3379 static bool
3380 necp_kernel_socket_policy_delete(necp_kernel_policy_id policy_id)
3381 {
3382 struct necp_kernel_socket_policy *policy = NULL;
3383
3384 lck_rw_assert(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
3385
3386 policy = necp_kernel_socket_policy_find(policy_id);
3387 if (policy) {
3388 LIST_REMOVE(policy, chain);
3389
3390 if (policy->cond_bound_interface) {
3391 ifnet_release(policy->cond_bound_interface);
3392 policy->cond_bound_interface = NULL;
3393 }
3394
3395 if (policy->cond_domain) {
3396 FREE(policy->cond_domain, M_NECP);
3397 policy->cond_domain = NULL;
3398 }
3399
3400 if (policy->cond_custom_entitlement) {
3401 FREE(policy->cond_custom_entitlement, M_NECP);
3402 policy->cond_custom_entitlement = NULL;
3403 }
3404
3405 FREE_ZONE(policy, sizeof(*policy), M_NECP_SOCKET_POLICY);
3406 return (TRUE);
3407 }
3408
3409 return (FALSE);
3410 }
3411
3412 static inline const char *
3413 necp_get_result_description(char *result_string, necp_kernel_policy_result result, necp_kernel_policy_result_parameter result_parameter)
3414 {
3415 uuid_string_t uuid_string;
3416 switch (result) {
3417 case NECP_KERNEL_POLICY_RESULT_NONE: {
3418 snprintf(result_string, MAX_RESULT_STRING_LEN, "None");
3419 break;
3420 }
3421 case NECP_KERNEL_POLICY_RESULT_PASS: {
3422 snprintf(result_string, MAX_RESULT_STRING_LEN, "Pass");
3423 break;
3424 }
3425 case NECP_KERNEL_POLICY_RESULT_SKIP: {
3426 snprintf(result_string, MAX_RESULT_STRING_LEN, "Skip (%u)", result_parameter.skip_policy_order);
3427 break;
3428 }
3429 case NECP_KERNEL_POLICY_RESULT_DROP: {
3430 snprintf(result_string, MAX_RESULT_STRING_LEN, "Drop");
3431 break;
3432 }
3433 case NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT: {
3434 snprintf(result_string, MAX_RESULT_STRING_LEN, "SocketDivert (%d)", result_parameter.flow_divert_control_unit);
3435 break;
3436 }
3437 case NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER: {
3438 snprintf(result_string, MAX_RESULT_STRING_LEN, "SocketFilter (%d)", result_parameter.filter_control_unit);
3439 break;
3440 }
3441 case NECP_KERNEL_POLICY_RESULT_IP_TUNNEL: {
3442 ifnet_t interface = ifindex2ifnet[result_parameter.tunnel_interface_index];
3443 snprintf(result_string, MAX_RESULT_STRING_LEN, "IPTunnel (%s%d)", ifnet_name(interface), ifnet_unit(interface));
3444 break;
3445 }
3446 case NECP_KERNEL_POLICY_RESULT_IP_FILTER: {
3447 snprintf(result_string, MAX_RESULT_STRING_LEN, "IPFilter");
3448 break;
3449 }
3450 case NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED: {
3451 ifnet_t interface = ifindex2ifnet[result_parameter.scoped_interface_index];
3452 snprintf(result_string, MAX_RESULT_STRING_LEN, "SocketScoped (%s%d)", ifnet_name(interface), ifnet_unit(interface));
3453 break;
3454 }
3455 case NECP_KERNEL_POLICY_RESULT_ROUTE_RULES: {
3456 int index = 0;
3457 char interface_names[IFXNAMSIZ][MAX_ROUTE_RULE_INTERFACES];
3458 struct necp_route_rule *route_rule = necp_lookup_route_rule_locked(&necp_route_rules, result_parameter.route_rule_id);
3459 if (route_rule != NULL) {
3460 for (index = 0; index < MAX_ROUTE_RULE_INTERFACES; index++) {
3461 if (route_rule->exception_if_indices[index] != 0) {
3462 ifnet_t interface = ifindex2ifnet[route_rule->exception_if_indices[index]];
3463 snprintf(interface_names[index], IFXNAMSIZ, "%s%d", ifnet_name(interface), ifnet_unit(interface));
3464 } else {
3465 memset(interface_names[index], 0, IFXNAMSIZ);
3466 }
3467 }
3468 switch (route_rule->default_action) {
3469 case NECP_ROUTE_RULE_DENY_INTERFACE:
3470 snprintf(result_string, MAX_RESULT_STRING_LEN, "RouteRules (Only %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
3471 (route_rule->cellular_action == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? "Cell " : "",
3472 (route_rule->wifi_action == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? "WiFi " : "",
3473 (route_rule->wired_action == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? "Wired " : "",
3474 (route_rule->expensive_action == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? "Exp " : "",
3475 (route_rule->exception_if_actions[0] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[0] : "",
3476 (route_rule->exception_if_actions[0] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "",
3477 (route_rule->exception_if_actions[1] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[1] : "",
3478 (route_rule->exception_if_actions[1] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "",
3479 (route_rule->exception_if_actions[2] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[2] : "",
3480 (route_rule->exception_if_actions[2] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "",
3481 (route_rule->exception_if_actions[3] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[3] : "",
3482 (route_rule->exception_if_actions[3] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "",
3483 (route_rule->exception_if_actions[4] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[4] : "",
3484 (route_rule->exception_if_actions[4] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "",
3485 (route_rule->exception_if_actions[5] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[5] : "",
3486 (route_rule->exception_if_actions[5] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "",
3487 (route_rule->exception_if_actions[6] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[6] : "",
3488 (route_rule->exception_if_actions[6] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "",
3489 (route_rule->exception_if_actions[7] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[7] : "",
3490 (route_rule->exception_if_actions[7] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "",
3491 (route_rule->exception_if_actions[8] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[8] : "",
3492 (route_rule->exception_if_actions[8] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "",
3493 (route_rule->exception_if_actions[9] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[9] : "");
3494 break;
3495 case NECP_ROUTE_RULE_ALLOW_INTERFACE:
3496 snprintf(result_string, MAX_RESULT_STRING_LEN, "RouteRules (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
3497 (route_rule->cellular_action == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!Cell " : "",
3498 (route_rule->wifi_action == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!WiFi " : "",
3499 (route_rule->wired_action == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!Wired " : "",
3500 (route_rule->expensive_action == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!Exp " : "",
3501 (route_rule->exception_if_actions[0] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "",
3502 (route_rule->exception_if_actions[0] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[0] : "",
3503 (route_rule->exception_if_actions[1] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "",
3504 (route_rule->exception_if_actions[1] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[1] : "",
3505 (route_rule->exception_if_actions[2] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "",
3506 (route_rule->exception_if_actions[2] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[2] : "",
3507 (route_rule->exception_if_actions[3] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "",
3508 (route_rule->exception_if_actions[3] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[3] : "",
3509 (route_rule->exception_if_actions[4] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "",
3510 (route_rule->exception_if_actions[4] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[4] : "",
3511 (route_rule->exception_if_actions[5] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "",
3512 (route_rule->exception_if_actions[5] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[5] : "",
3513 (route_rule->exception_if_actions[6] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "",
3514 (route_rule->exception_if_actions[6] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[6] : "",
3515 (route_rule->exception_if_actions[7] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "",
3516 (route_rule->exception_if_actions[7] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[7] : "",
3517 (route_rule->exception_if_actions[8] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "",
3518 (route_rule->exception_if_actions[8] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[8] : "",
3519 (route_rule->exception_if_actions[9] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "",
3520 (route_rule->exception_if_actions[9] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[9] : "");
3521 break;
3522 case NECP_ROUTE_RULE_QOS_MARKING:
3523 snprintf(result_string, MAX_RESULT_STRING_LEN, "RouteRules (QoSMarking %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
3524 (route_rule->cellular_action == NECP_ROUTE_RULE_QOS_MARKING) ? "Cell " : "",
3525 (route_rule->wifi_action == NECP_ROUTE_RULE_QOS_MARKING) ? "WiFi " : "",
3526 (route_rule->wired_action == NECP_ROUTE_RULE_QOS_MARKING) ? "Wired " : "",
3527 (route_rule->expensive_action == NECP_ROUTE_RULE_QOS_MARKING) ? "Exp " : "",
3528 (route_rule->exception_if_actions[0] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[0] : "",
3529 (route_rule->exception_if_actions[0] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "",
3530 (route_rule->exception_if_actions[1] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[1] : "",
3531 (route_rule->exception_if_actions[1] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "",
3532 (route_rule->exception_if_actions[2] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[2] : "",
3533 (route_rule->exception_if_actions[2] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "",
3534 (route_rule->exception_if_actions[3] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[3] : "",
3535 (route_rule->exception_if_actions[3] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "",
3536 (route_rule->exception_if_actions[4] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[4] : "",
3537 (route_rule->exception_if_actions[4] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "",
3538 (route_rule->exception_if_actions[5] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[5] : "",
3539 (route_rule->exception_if_actions[5] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "",
3540 (route_rule->exception_if_actions[6] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[6] : "",
3541 (route_rule->exception_if_actions[6] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "",
3542 (route_rule->exception_if_actions[7] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[7] : "",
3543 (route_rule->exception_if_actions[7] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "",
3544 (route_rule->exception_if_actions[8] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[8] : "",
3545 (route_rule->exception_if_actions[8] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "",
3546 (route_rule->exception_if_actions[9] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[9] : "");
3547 break;
3548 default:
3549 snprintf(result_string, MAX_RESULT_STRING_LEN, "RouteRules (Unknown)");
3550 break;
3551 }
3552 }
3553 break;
3554 }
3555 case NECP_KERNEL_POLICY_RESULT_USE_NETAGENT: {
3556 bool found_mapping = FALSE;
3557 struct necp_uuid_id_mapping *mapping = necp_uuid_lookup_uuid_with_service_id_locked(result_parameter.netagent_id);
3558 if (mapping != NULL) {
3559 uuid_unparse(mapping->uuid, uuid_string);
3560 found_mapping = TRUE;
3561 }
3562 snprintf(result_string, MAX_RESULT_STRING_LEN, "UseNetAgent (%s)", found_mapping ? uuid_string : "Unknown");
3563 break;
3564 }
3565 case NECP_POLICY_RESULT_TRIGGER: {
3566 bool found_mapping = FALSE;
3567 struct necp_uuid_id_mapping *mapping = necp_uuid_lookup_uuid_with_service_id_locked(result_parameter.service.identifier);
3568 if (mapping != NULL) {
3569 uuid_unparse(mapping->uuid, uuid_string);
3570 found_mapping = TRUE;
3571 }
3572 snprintf(result_string, MAX_RESULT_STRING_LEN, "Trigger (%s.%d)", found_mapping ? uuid_string : "Unknown", result_parameter.service.data);
3573 break;
3574 }
3575 case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED: {
3576 bool found_mapping = FALSE;
3577 struct necp_uuid_id_mapping *mapping = necp_uuid_lookup_uuid_with_service_id_locked(result_parameter.service.identifier);
3578 if (mapping != NULL) {
3579 uuid_unparse(mapping->uuid, uuid_string);
3580 found_mapping = TRUE;
3581 }
3582 snprintf(result_string, MAX_RESULT_STRING_LEN, "TriggerIfNeeded (%s.%d)", found_mapping ? uuid_string : "Unknown", result_parameter.service.data);
3583 break;
3584 }
3585 case NECP_POLICY_RESULT_TRIGGER_SCOPED: {
3586 bool found_mapping = FALSE;
3587 struct necp_uuid_id_mapping *mapping = necp_uuid_lookup_uuid_with_service_id_locked(result_parameter.service.identifier);
3588 if (mapping != NULL) {
3589 uuid_unparse(mapping->uuid, uuid_string);
3590 found_mapping = TRUE;
3591 }
3592 snprintf(result_string, MAX_RESULT_STRING_LEN, "TriggerScoped (%s.%d)", found_mapping ? uuid_string : "Unknown", result_parameter.service.data);
3593 break;
3594 }
3595 case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED: {
3596 bool found_mapping = FALSE;
3597 struct necp_uuid_id_mapping *mapping = necp_uuid_lookup_uuid_with_service_id_locked(result_parameter.service.identifier);
3598 if (mapping != NULL) {
3599 uuid_unparse(mapping->uuid, uuid_string);
3600 found_mapping = TRUE;
3601 }
3602 snprintf(result_string, MAX_RESULT_STRING_LEN, "NoTriggerScoped (%s.%d)", found_mapping ? uuid_string : "Unknown", result_parameter.service.data);
3603 break;
3604 }
3605 default: {
3606 snprintf(result_string, MAX_RESULT_STRING_LEN, "Unknown %d (%d)", result, result_parameter.tunnel_interface_index);
3607 break;
3608 }
3609 }
3610 return (result_string);
3611 }
3612
3613 static void
3614 necp_kernel_socket_policies_dump_all(void)
3615 {
3616 if (necp_debug) {
3617 struct necp_kernel_socket_policy *policy = NULL;
3618 int policy_i;
3619 int app_i;
3620 char result_string[MAX_RESULT_STRING_LEN];
3621 char proc_name_string[MAXCOMLEN + 1];
3622 memset(result_string, 0, MAX_RESULT_STRING_LEN);
3623 memset(proc_name_string, 0, MAXCOMLEN + 1);
3624
3625 NECPLOG0(LOG_DEBUG, "NECP Application Policies:\n");
3626 NECPLOG0(LOG_DEBUG, "-----------\n");
3627 for (policy_i = 0; necp_kernel_socket_policies_app_layer_map != NULL && necp_kernel_socket_policies_app_layer_map[policy_i] != NULL; policy_i++) {
3628 policy = necp_kernel_socket_policies_app_layer_map[policy_i];
3629 proc_name(policy->session_pid, proc_name_string, MAXCOMLEN);
3630 NECPLOG(LOG_DEBUG, "\t%3d. Policy ID: %5d\tProcess: %10.10s\tOrder: %04d.%04d\tMask: %5x\tResult: %s\n", policy_i, policy->id, proc_name_string, policy->session_order, policy->order, policy->condition_mask, necp_get_result_description(result_string, policy->result, policy->result_parameter));
3631 }
3632 if (necp_kernel_socket_policies_app_layer_map[0] != NULL) {
3633 NECPLOG0(LOG_DEBUG, "-----------\n");
3634 }
3635
3636 NECPLOG0(LOG_DEBUG, "NECP Socket Policies:\n");
3637 NECPLOG0(LOG_DEBUG, "-----------\n");
3638 for (app_i = 0; app_i < NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS; app_i++) {
3639 NECPLOG(LOG_DEBUG, "\tApp Bucket: %d\n", app_i);
3640 for (policy_i = 0; necp_kernel_socket_policies_map[app_i] != NULL && (necp_kernel_socket_policies_map[app_i])[policy_i] != NULL; policy_i++) {
3641 policy = (necp_kernel_socket_policies_map[app_i])[policy_i];
3642 proc_name(policy->session_pid, proc_name_string, MAXCOMLEN);
3643 NECPLOG(LOG_DEBUG, "\t%3d. Policy ID: %5d\tProcess: %10.10s\tOrder: %04d.%04d\tMask: %5x\tResult: %s\n", policy_i, policy->id, proc_name_string, policy->session_order, policy->order, policy->condition_mask, necp_get_result_description(result_string, policy->result, policy->result_parameter));
3644 }
3645 NECPLOG0(LOG_DEBUG, "-----------\n");
3646 }
3647 }
3648 }
3649
3650 static inline bool
3651 necp_kernel_socket_result_is_trigger_service_type(struct necp_kernel_socket_policy *kernel_policy)
3652 {
3653 return (kernel_policy->result >= NECP_KERNEL_POLICY_RESULT_TRIGGER && kernel_policy->result <= NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED);
3654 }
3655
3656 static inline bool
3657 necp_kernel_socket_policy_results_overlap(struct necp_kernel_socket_policy *upper_policy, struct necp_kernel_socket_policy *lower_policy)
3658 {
3659 if (upper_policy->result == NECP_KERNEL_POLICY_RESULT_DROP) {
3660 // Drop always cancels out lower policies
3661 return (TRUE);
3662 } else if (upper_policy->result == NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER ||
3663 upper_policy->result == NECP_KERNEL_POLICY_RESULT_ROUTE_RULES ||
3664 upper_policy->result == NECP_KERNEL_POLICY_RESULT_USE_NETAGENT) {
3665 // Filters and route rules never cancel out lower policies
3666 return (FALSE);
3667 } else if (necp_kernel_socket_result_is_trigger_service_type(upper_policy)) {
3668 // Trigger/Scoping policies can overlap one another, but not other results
3669 return (necp_kernel_socket_result_is_trigger_service_type(lower_policy));
3670 } else if (upper_policy->result == NECP_KERNEL_POLICY_RESULT_SKIP) {
3671 if (upper_policy->session_order != lower_policy->session_order) {
3672 // A skip cannot override a policy of a different session
3673 return (FALSE);
3674 } else {
3675 if (upper_policy->result_parameter.skip_policy_order == 0 ||
3676 lower_policy->order >= upper_policy->result_parameter.skip_policy_order) {
3677 // This policy is beyond the skip
3678 return (FALSE);
3679 } else {
3680 // This policy is inside the skip
3681 return (TRUE);
3682 }
3683 }
3684 }
3685
3686 // A hard pass, flow divert, tunnel, or scope will currently block out lower policies
3687 return (TRUE);
3688 }
3689
3690 static bool
3691 necp_kernel_socket_policy_is_unnecessary(struct necp_kernel_socket_policy *policy, struct necp_kernel_socket_policy **policy_array, int valid_indices)
3692 {
3693 bool can_skip = FALSE;
3694 u_int32_t highest_skip_session_order = 0;
3695 u_int32_t highest_skip_order = 0;
3696 int i;
3697 for (i = 0; i < valid_indices; i++) {
3698 struct necp_kernel_socket_policy *compared_policy = policy_array[i];
3699
3700 // For policies in a skip window, we can't mark conflicting policies as unnecessary
3701 if (can_skip) {
3702 if (highest_skip_session_order != compared_policy->session_order ||
3703 (highest_skip_order != 0 && compared_policy->order >= highest_skip_order)) {
3704 // If we've moved on to the next session, or passed the skip window
3705 highest_skip_session_order = 0;
3706 highest_skip_order = 0;
3707 can_skip = FALSE;
3708 } else {
3709 // If this policy is also a skip, in can increase the skip window
3710 if (compared_policy->result == NECP_KERNEL_POLICY_RESULT_SKIP) {
3711 if (compared_policy->result_parameter.skip_policy_order > highest_skip_order) {
3712 highest_skip_order = compared_policy->result_parameter.skip_policy_order;
3713 }
3714 }
3715 continue;
3716 }
3717 }
3718
3719 if (compared_policy->result == NECP_KERNEL_POLICY_RESULT_SKIP) {
3720 // This policy is a skip. Set the skip window accordingly
3721 can_skip = TRUE;
3722 highest_skip_session_order = compared_policy->session_order;
3723 highest_skip_order = compared_policy->result_parameter.skip_policy_order;
3724 }
3725
3726 // The result of the compared policy must be able to block out this policy result
3727 if (!necp_kernel_socket_policy_results_overlap(compared_policy, policy)) {
3728 continue;
3729 }
3730
3731 // If new policy matches All Interfaces, compared policy must also
3732 if ((policy->condition_mask & NECP_KERNEL_CONDITION_ALL_INTERFACES) && !(compared_policy->condition_mask & NECP_KERNEL_CONDITION_ALL_INTERFACES)) {
3733 continue;
3734 }
3735
3736 // Default makes lower policies unecessary always
3737 if (compared_policy->condition_mask == 0) {
3738 return (TRUE);
3739 }
3740
3741 // Compared must be more general than policy, and include only conditions within policy
3742 if ((policy->condition_mask & compared_policy->condition_mask) != compared_policy->condition_mask) {
3743 continue;
3744 }
3745
3746 // Negative conditions must match for the overlapping conditions
3747 if ((policy->condition_negated_mask & compared_policy->condition_mask) != (compared_policy->condition_negated_mask & compared_policy->condition_mask)) {
3748 continue;
3749 }
3750
3751 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_DOMAIN &&
3752 strcmp(compared_policy->cond_domain, policy->cond_domain) != 0) {
3753 continue;
3754 }
3755
3756 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT &&
3757 strcmp(compared_policy->cond_custom_entitlement, policy->cond_custom_entitlement) != 0) {
3758 continue;
3759 }
3760
3761 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_ACCOUNT_ID &&
3762 compared_policy->cond_account_id != policy->cond_account_id) {
3763 continue;
3764 }
3765
3766 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_POLICY_ID &&
3767 compared_policy->cond_policy_id != policy->cond_policy_id) {
3768 continue;
3769 }
3770
3771 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_APP_ID &&
3772 compared_policy->cond_app_id != policy->cond_app_id) {
3773 continue;
3774 }
3775
3776 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_REAL_APP_ID &&
3777 compared_policy->cond_real_app_id != policy->cond_real_app_id) {
3778 continue;
3779 }
3780
3781 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_PID &&
3782 compared_policy->cond_pid != policy->cond_pid) {
3783 continue;
3784 }
3785
3786 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_UID &&
3787 compared_policy->cond_uid != policy->cond_uid) {
3788 continue;
3789 }
3790
3791 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE &&
3792 compared_policy->cond_bound_interface != policy->cond_bound_interface) {
3793 continue;
3794 }
3795
3796 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_PROTOCOL &&
3797 compared_policy->cond_protocol != policy->cond_protocol) {
3798 continue;
3799 }
3800
3801 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_TRAFFIC_CLASS &&
3802 !(compared_policy->cond_traffic_class.start_tc <= policy->cond_traffic_class.start_tc &&
3803 compared_policy->cond_traffic_class.end_tc >= policy->cond_traffic_class.end_tc)) {
3804 continue;
3805 }
3806
3807 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_START) {
3808 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_END) {
3809 if (!necp_is_range_in_range((struct sockaddr *)&policy->cond_local_start, (struct sockaddr *)&policy->cond_local_end, (struct sockaddr *)&compared_policy->cond_local_start, (struct sockaddr *)&compared_policy->cond_local_end)) {
3810 continue;
3811 }
3812 } else if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX) {
3813 if (compared_policy->cond_local_prefix > policy->cond_local_prefix ||
3814 !necp_is_addr_in_subnet((struct sockaddr *)&policy->cond_local_start, (struct sockaddr *)&compared_policy->cond_local_start, compared_policy->cond_local_prefix)) {
3815 continue;
3816 }
3817 }
3818 }
3819
3820 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_START) {
3821 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_END) {
3822 if (!necp_is_range_in_range((struct sockaddr *)&policy->cond_remote_start, (struct sockaddr *)&policy->cond_remote_end, (struct sockaddr *)&compared_policy->cond_remote_start, (struct sockaddr *)&compared_policy->cond_remote_end)) {
3823 continue;
3824 }
3825 } else if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX) {
3826 if (compared_policy->cond_remote_prefix > policy->cond_remote_prefix ||
3827 !necp_is_addr_in_subnet((struct sockaddr *)&policy->cond_remote_start, (struct sockaddr *)&compared_policy->cond_remote_start, compared_policy->cond_remote_prefix)) {
3828 continue;
3829 }
3830 }
3831 }
3832
3833 return (TRUE);
3834 }
3835
3836 return (FALSE);
3837 }
3838
3839 static bool
3840 necp_kernel_socket_policies_reprocess(void)
3841 {
3842 int app_i;
3843 int bucket_allocation_counts[NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS];
3844 int bucket_current_free_index[NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS];
3845 int app_layer_allocation_count = 0;
3846 int app_layer_current_free_index = 0;
3847 struct necp_kernel_socket_policy *kernel_policy = NULL;
3848
3849 lck_rw_assert(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
3850
3851 // Reset mask to 0
3852 necp_kernel_application_policies_condition_mask = 0;
3853 necp_kernel_socket_policies_condition_mask = 0;
3854 necp_kernel_application_policies_count = 0;
3855 necp_kernel_socket_policies_count = 0;
3856 necp_kernel_socket_policies_non_app_count = 0;
3857
3858 // Reset all maps to NULL
3859 for (app_i = 0; app_i < NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS; app_i++) {
3860 if (necp_kernel_socket_policies_map[app_i] != NULL) {
3861 FREE(necp_kernel_socket_policies_map[app_i], M_NECP);
3862 necp_kernel_socket_policies_map[app_i] = NULL;
3863 }
3864
3865 // Init counts
3866 bucket_allocation_counts[app_i] = 0;
3867 }
3868 if (necp_kernel_socket_policies_app_layer_map != NULL) {
3869 FREE(necp_kernel_socket_policies_app_layer_map, M_NECP);
3870 necp_kernel_socket_policies_app_layer_map = NULL;
3871 }
3872
3873 // Create masks and counts
3874 LIST_FOREACH(kernel_policy, &necp_kernel_socket_policies, chain) {
3875 // App layer mask/count
3876 necp_kernel_application_policies_condition_mask |= kernel_policy->condition_mask;
3877 necp_kernel_application_policies_count++;
3878 app_layer_allocation_count++;
3879
3880 // Update socket layer bucket mask/counts
3881 necp_kernel_socket_policies_condition_mask |= kernel_policy->condition_mask;
3882 necp_kernel_socket_policies_count++;
3883
3884 if (!(kernel_policy->condition_mask & NECP_KERNEL_CONDITION_APP_ID) ||
3885 kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_APP_ID) {
3886 necp_kernel_socket_policies_non_app_count++;
3887 for (app_i = 0; app_i < NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS; app_i++) {
3888 bucket_allocation_counts[app_i]++;
3889 }
3890 } else {
3891 bucket_allocation_counts[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(kernel_policy->cond_app_id)]++;
3892 }
3893 }
3894
3895 // Allocate maps
3896 for (app_i = 0; app_i < NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS; app_i++) {
3897 if (bucket_allocation_counts[app_i] > 0) {
3898 // Allocate a NULL-terminated array of policy pointers for each bucket
3899 MALLOC(necp_kernel_socket_policies_map[app_i], struct necp_kernel_socket_policy **, sizeof(struct necp_kernel_socket_policy *) * (bucket_allocation_counts[app_i] + 1), M_NECP, M_WAITOK);
3900 if (necp_kernel_socket_policies_map[app_i] == NULL) {
3901 goto fail;
3902 }
3903
3904 // Initialize the first entry to NULL
3905 (necp_kernel_socket_policies_map[app_i])[0] = NULL;
3906 }
3907 bucket_current_free_index[app_i] = 0;
3908 }
3909 MALLOC(necp_kernel_socket_policies_app_layer_map, struct necp_kernel_socket_policy **, sizeof(struct necp_kernel_socket_policy *) * (app_layer_allocation_count + 1), M_NECP, M_WAITOK);
3910 if (necp_kernel_socket_policies_app_layer_map == NULL) {
3911 goto fail;
3912 }
3913 necp_kernel_socket_policies_app_layer_map[0] = NULL;
3914
3915 // Fill out maps
3916 LIST_FOREACH(kernel_policy, &necp_kernel_socket_policies, chain) {
3917 // Insert pointers into map
3918 if (!(kernel_policy->condition_mask & NECP_KERNEL_CONDITION_APP_ID) ||
3919 kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_APP_ID) {
3920 for (app_i = 0; app_i < NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS; app_i++) {
3921 if (!necp_kernel_socket_policy_is_unnecessary(kernel_policy, necp_kernel_socket_policies_map[app_i], bucket_current_free_index[app_i])) {
3922 (necp_kernel_socket_policies_map[app_i])[(bucket_current_free_index[app_i])] = kernel_policy;
3923 bucket_current_free_index[app_i]++;
3924 (necp_kernel_socket_policies_map[app_i])[(bucket_current_free_index[app_i])] = NULL;
3925 }
3926 }
3927 } else {
3928 app_i = NECP_SOCKET_MAP_APP_ID_TO_BUCKET(kernel_policy->cond_app_id);
3929 if (!necp_kernel_socket_policy_is_unnecessary(kernel_policy, necp_kernel_socket_policies_map[app_i], bucket_current_free_index[app_i])) {
3930 (necp_kernel_socket_policies_map[app_i])[(bucket_current_free_index[app_i])] = kernel_policy;
3931 bucket_current_free_index[app_i]++;
3932 (necp_kernel_socket_policies_map[app_i])[(bucket_current_free_index[app_i])] = NULL;
3933 }
3934 }
3935
3936 if (!necp_kernel_socket_policy_is_unnecessary(kernel_policy, necp_kernel_socket_policies_app_layer_map, app_layer_current_free_index)) {
3937 necp_kernel_socket_policies_app_layer_map[app_layer_current_free_index] = kernel_policy;
3938 app_layer_current_free_index++;
3939 necp_kernel_socket_policies_app_layer_map[app_layer_current_free_index] = NULL;
3940 }
3941 }
3942 necp_kernel_socket_policies_dump_all();
3943 BUMP_KERNEL_SOCKET_POLICIES_GENERATION_COUNT();
3944 return (TRUE);
3945
3946 fail:
3947 // Free memory, reset masks to 0
3948 necp_kernel_application_policies_condition_mask = 0;
3949 necp_kernel_socket_policies_condition_mask = 0;
3950 necp_kernel_application_policies_count = 0;
3951 necp_kernel_socket_policies_count = 0;
3952 necp_kernel_socket_policies_non_app_count = 0;
3953 for (app_i = 0; app_i < NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS; app_i++) {
3954 if (necp_kernel_socket_policies_map[app_i] != NULL) {
3955 FREE(necp_kernel_socket_policies_map[app_i], M_NECP);
3956 necp_kernel_socket_policies_map[app_i] = NULL;
3957 }
3958 }
3959 if (necp_kernel_socket_policies_app_layer_map != NULL) {
3960 FREE(necp_kernel_socket_policies_app_layer_map, M_NECP);
3961 necp_kernel_socket_policies_app_layer_map = NULL;
3962 }
3963 return (FALSE);
3964 }
3965
3966 static u_int32_t
3967 necp_get_new_string_id(void)
3968 {
3969 u_int32_t newid = 0;
3970
3971 lck_rw_assert(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
3972
3973 necp_last_string_id++;
3974 if (necp_last_string_id < 1) {
3975 necp_last_string_id = 1;
3976 }
3977
3978 newid = necp_last_string_id;
3979 if (newid == 0) {
3980 NECPLOG0(LOG_DEBUG, "Allocate string id failed.\n");
3981 return (0);
3982 }
3983
3984 return (newid);
3985 }
3986
3987 static struct necp_string_id_mapping *
3988 necp_lookup_string_to_id_locked(struct necp_string_id_mapping_list *list, char *string)
3989 {
3990 struct necp_string_id_mapping *searchentry = NULL;
3991 struct necp_string_id_mapping *foundentry = NULL;
3992
3993 LIST_FOREACH(searchentry, list, chain) {
3994 if (strcmp(searchentry->string, string) == 0) {
3995 foundentry = searchentry;
3996 break;
3997 }
3998 }
3999
4000 return (foundentry);
4001 }
4002
4003 static struct necp_string_id_mapping *
4004 necp_lookup_string_with_id_locked(struct necp_string_id_mapping_list *list, u_int32_t local_id)
4005 {
4006 struct necp_string_id_mapping *searchentry = NULL;
4007 struct necp_string_id_mapping *foundentry = NULL;
4008
4009 LIST_FOREACH(searchentry, list, chain) {
4010 if (searchentry->id == local_id) {
4011 foundentry = searchentry;
4012 break;
4013 }
4014 }
4015
4016 return (foundentry);
4017 }
4018
4019 static u_int32_t
4020 necp_create_string_to_id_mapping(struct necp_string_id_mapping_list *list, char *string)
4021 {
4022 u_int32_t string_id = 0;
4023 struct necp_string_id_mapping *existing_mapping = NULL;
4024
4025 lck_rw_assert(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
4026
4027 existing_mapping = necp_lookup_string_to_id_locked(list, string);
4028 if (existing_mapping != NULL) {
4029 string_id = existing_mapping->id;
4030 existing_mapping->refcount++;
4031 } else {
4032 struct necp_string_id_mapping *new_mapping = NULL;
4033 MALLOC(new_mapping, struct necp_string_id_mapping *, sizeof(struct necp_string_id_mapping), M_NECP, M_WAITOK);
4034 if (new_mapping != NULL) {
4035 memset(new_mapping, 0, sizeof(struct necp_string_id_mapping));
4036
4037 size_t length = strlen(string) + 1;
4038 MALLOC(new_mapping->string, char *, length, M_NECP, M_WAITOK);
4039 if (new_mapping->string != NULL) {
4040 memcpy(new_mapping->string, string, length);
4041 new_mapping->id = necp_get_new_string_id();
4042 new_mapping->refcount = 1;
4043 LIST_INSERT_HEAD(list, new_mapping, chain);
4044 string_id = new_mapping->id;
4045 } else {
4046 FREE(new_mapping, M_NECP);
4047 new_mapping = NULL;
4048 }
4049 }
4050 }
4051 return (string_id);
4052 }
4053
4054 static bool
4055 necp_remove_string_to_id_mapping(struct necp_string_id_mapping_list *list, char *string)
4056 {
4057 struct necp_string_id_mapping *existing_mapping = NULL;
4058
4059 lck_rw_assert(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
4060
4061 existing_mapping = necp_lookup_string_to_id_locked(list, string);
4062 if (existing_mapping != NULL) {
4063 if (--existing_mapping->refcount == 0) {
4064 LIST_REMOVE(existing_mapping, chain);
4065 FREE(existing_mapping->string, M_NECP);
4066 FREE(existing_mapping, M_NECP);
4067 }
4068 return (TRUE);
4069 }
4070
4071 return (FALSE);
4072 }
4073
4074 static u_int32_t
4075 necp_get_new_route_rule_id(void)
4076 {
4077 u_int32_t newid = 0;
4078
4079 lck_rw_assert(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
4080
4081 necp_last_route_rule_id++;
4082 if (necp_last_route_rule_id < 1 || necp_last_route_rule_id > UINT16_MAX) {
4083 necp_last_route_rule_id = 1;
4084 }
4085
4086 newid = necp_last_route_rule_id;
4087 if (newid == 0) {
4088 NECPLOG0(LOG_DEBUG, "Allocate route rule id failed.\n");
4089 return (0);
4090 }
4091
4092 return (newid);
4093 }
4094
4095 static u_int32_t
4096 necp_get_new_aggregate_route_rule_id(void)
4097 {
4098 u_int32_t newid = 0;
4099
4100 lck_rw_assert(&necp_route_rule_lock, LCK_RW_ASSERT_EXCLUSIVE);
4101
4102 necp_last_aggregate_route_rule_id++;
4103 if (necp_last_aggregate_route_rule_id <= UINT16_MAX) {
4104 necp_last_aggregate_route_rule_id = UINT16_MAX + 1;
4105 }
4106
4107 newid = necp_last_aggregate_route_rule_id;
4108 if (newid == 0) {
4109 NECPLOG0(LOG_DEBUG, "Allocate aggregate route rule id failed.\n");
4110 return (0);
4111 }
4112
4113 return (newid);
4114 }
4115
4116 static struct necp_route_rule *
4117 necp_lookup_route_rule_locked(struct necp_route_rule_list *list, u_int32_t route_rule_id)
4118 {
4119 struct necp_route_rule *searchentry = NULL;
4120 struct necp_route_rule *foundentry = NULL;
4121
4122 LIST_FOREACH(searchentry, list, chain) {
4123 if (searchentry->id == route_rule_id) {
4124 foundentry = searchentry;
4125 break;
4126 }
4127 }
4128
4129 return (foundentry);
4130 }
4131
4132 static struct necp_route_rule *
4133 necp_lookup_route_rule_by_contents_locked(struct necp_route_rule_list *list, u_int32_t default_action, u_int8_t cellular_action, u_int8_t wifi_action, u_int8_t wired_action, u_int8_t expensive_action, u_int32_t *if_indices, u_int8_t *if_actions)
4134 {
4135 struct necp_route_rule *searchentry = NULL;
4136 struct necp_route_rule *foundentry = NULL;
4137
4138 LIST_FOREACH(searchentry, list, chain) {
4139 if (searchentry->default_action == default_action &&
4140 searchentry->cellular_action == cellular_action &&
4141 searchentry->wifi_action == wifi_action &&
4142 searchentry->wired_action == wired_action &&
4143 searchentry->expensive_action == expensive_action) {
4144 bool match_failed = FALSE;
4145 size_t index_a = 0;
4146 size_t index_b = 0;
4147 size_t count_a = 0;
4148 size_t count_b = 0;
4149 for (index_a = 0; index_a < MAX_ROUTE_RULE_INTERFACES; index_a++) {
4150 bool found_index = FALSE;
4151 if (searchentry->exception_if_indices[index_a] == 0) {
4152 break;
4153 }
4154 count_a++;
4155 for (index_b = 0; index_b < MAX_ROUTE_RULE_INTERFACES; index_b++) {
4156 if (if_indices[index_b] == 0) {
4157 break;
4158 }
4159 if (index_b >= count_b) {
4160 count_b = index_b + 1;
4161 }
4162 if (searchentry->exception_if_indices[index_a] == if_indices[index_b] &&
4163 searchentry->exception_if_actions[index_a] == if_actions[index_b]) {
4164 found_index = TRUE;
4165 break;
4166 }
4167 }
4168 if (!found_index) {
4169 match_failed = TRUE;
4170 break;
4171 }
4172 }
4173 if (!match_failed && count_a == count_b) {
4174 foundentry = searchentry;
4175 break;
4176 }
4177 }
4178 }
4179
4180 return (foundentry);
4181 }
4182
4183 static u_int32_t
4184 necp_create_route_rule(struct necp_route_rule_list *list, u_int8_t *route_rules_array, u_int32_t route_rules_array_size)
4185 {
4186 size_t offset = 0;
4187 u_int32_t route_rule_id = 0;
4188 struct necp_route_rule *existing_rule = NULL;
4189 u_int32_t default_action = NECP_ROUTE_RULE_ALLOW_INTERFACE;
4190 u_int8_t cellular_action = NECP_ROUTE_RULE_NONE;
4191 u_int8_t wifi_action = NECP_ROUTE_RULE_NONE;
4192 u_int8_t wired_action = NECP_ROUTE_RULE_NONE;
4193 u_int8_t expensive_action = NECP_ROUTE_RULE_NONE;
4194 u_int32_t if_indices[MAX_ROUTE_RULE_INTERFACES];
4195 size_t num_valid_indices = 0;
4196 memset(&if_indices, 0, sizeof(if_indices));
4197 u_int8_t if_actions[MAX_ROUTE_RULE_INTERFACES];
4198 memset(&if_actions, 0, sizeof(if_actions));
4199
4200 lck_rw_assert(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
4201
4202 if (route_rules_array == NULL || route_rules_array_size == 0) {
4203 return (0);
4204 }
4205
4206 // Process rules
4207 while (offset < route_rules_array_size) {
4208 ifnet_t rule_interface = NULL;
4209 char interface_name[IFXNAMSIZ];
4210 u_int32_t length = 0;
4211 u_int8_t *value = necp_buffer_get_tlv_value(route_rules_array, offset, &length);
4212
4213 u_int8_t rule_type = necp_policy_condition_get_type_from_buffer(value, length);
4214 u_int8_t rule_flags = necp_policy_condition_get_flags_from_buffer(value, length);
4215 u_int32_t rule_length = necp_policy_condition_get_value_length_from_buffer(value, length);
4216 u_int8_t *rule_value = necp_policy_condition_get_value_pointer_from_buffer(value, length);
4217
4218 if (rule_type == NECP_ROUTE_RULE_NONE) {
4219 // Don't allow an explicit rule to be None action
4220 continue;
4221 }
4222
4223 if (rule_length == 0) {
4224 if (rule_flags & NECP_ROUTE_RULE_FLAG_CELLULAR) {
4225 cellular_action = rule_type;
4226 }
4227 if (rule_flags & NECP_ROUTE_RULE_FLAG_WIFI) {
4228 wifi_action = rule_type;
4229 }
4230 if (rule_flags & NECP_ROUTE_RULE_FLAG_WIRED) {
4231 wired_action = rule_type;
4232 }
4233 if (rule_flags & NECP_ROUTE_RULE_FLAG_EXPENSIVE) {
4234 expensive_action = rule_type;
4235 }
4236 if (rule_flags == 0) {
4237 default_action = rule_type;
4238 }
4239 offset += sizeof(u_int8_t) + sizeof(u_int32_t) + length;
4240 continue;
4241 }
4242
4243 if (num_valid_indices >= MAX_ROUTE_RULE_INTERFACES) {
4244 offset += sizeof(u_int8_t) + sizeof(u_int32_t) + length;
4245 continue;
4246 }
4247
4248 if (rule_length <= IFXNAMSIZ) {
4249 memcpy(interface_name, rule_value, rule_length);
4250 interface_name[rule_length - 1] = 0; // Make sure the string is NULL terminated
4251 if (ifnet_find_by_name(interface_name, &rule_interface) == 0) {
4252 if_actions[num_valid_indices] = rule_type;
4253 if_indices[num_valid_indices++] = rule_interface->if_index;
4254 ifnet_release(rule_interface);
4255 }
4256 }
4257 offset += sizeof(u_int8_t) + sizeof(u_int32_t) + length;
4258 }
4259
4260 existing_rule = necp_lookup_route_rule_by_contents_locked(list, default_action, cellular_action, wifi_action, wired_action, expensive_action, if_indices, if_actions);
4261 if (existing_rule != NULL) {
4262 route_rule_id = existing_rule->id;
4263 existing_rule->refcount++;
4264 } else {
4265 struct necp_route_rule *new_rule = NULL;
4266 MALLOC(new_rule, struct necp_route_rule *, sizeof(struct necp_route_rule), M_NECP, M_WAITOK);
4267 if (new_rule != NULL) {
4268 memset(new_rule, 0, sizeof(struct necp_route_rule));
4269 route_rule_id = new_rule->id = necp_get_new_route_rule_id();
4270 new_rule->default_action = default_action;
4271 new_rule->cellular_action = cellular_action;
4272 new_rule->wifi_action = wifi_action;
4273 new_rule->wired_action = wired_action;
4274 new_rule->expensive_action = expensive_action;
4275 memcpy(&new_rule->exception_if_indices, &if_indices, sizeof(if_indices));
4276 memcpy(&new_rule->exception_if_actions, &if_actions, sizeof(if_actions));
4277 new_rule->refcount = 1;
4278 LIST_INSERT_HEAD(list, new_rule, chain);
4279 }
4280 }
4281 return (route_rule_id);
4282 }
4283
4284 static void
4285 necp_remove_aggregate_route_rule_for_id(u_int32_t rule_id)
4286 {
4287 if (rule_id) {
4288 lck_rw_lock_exclusive(&necp_route_rule_lock);
4289
4290 struct necp_aggregate_route_rule *existing_rule = NULL;
4291 struct necp_aggregate_route_rule *tmp_rule = NULL;
4292
4293 LIST_FOREACH_SAFE(existing_rule, &necp_aggregate_route_rules, chain, tmp_rule) {
4294 int index = 0;
4295 for (index = 0; index < MAX_AGGREGATE_ROUTE_RULES; index++) {
4296 u_int32_t route_rule_id = existing_rule->rule_ids[index];
4297 if (route_rule_id == rule_id) {
4298 LIST_REMOVE(existing_rule, chain);
4299 FREE(existing_rule, M_NECP);
4300 break;
4301 }
4302 }
4303 }
4304
4305 lck_rw_done(&necp_route_rule_lock);
4306 }
4307 }
4308
4309 static bool
4310 necp_remove_route_rule(struct necp_route_rule_list *list, u_int32_t route_rule_id)
4311 {
4312 struct necp_route_rule *existing_rule = NULL;
4313
4314 lck_rw_assert(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
4315
4316 existing_rule = necp_lookup_route_rule_locked(list, route_rule_id);
4317 if (existing_rule != NULL) {
4318 if (--existing_rule->refcount == 0) {
4319 necp_remove_aggregate_route_rule_for_id(existing_rule->id);
4320 LIST_REMOVE(existing_rule, chain);
4321 FREE(existing_rule, M_NECP);
4322 }
4323 return (TRUE);
4324 }
4325
4326 return (FALSE);
4327 }
4328
4329 static struct necp_aggregate_route_rule *
4330 necp_lookup_aggregate_route_rule_locked(u_int32_t route_rule_id)
4331 {
4332 struct necp_aggregate_route_rule *searchentry = NULL;
4333 struct necp_aggregate_route_rule *foundentry = NULL;
4334
4335 lck_rw_lock_shared(&necp_route_rule_lock);
4336
4337 LIST_FOREACH(searchentry, &necp_aggregate_route_rules, chain) {
4338 if (searchentry->id == route_rule_id) {
4339 foundentry = searchentry;
4340 break;
4341 }
4342 }
4343
4344 lck_rw_done(&necp_route_rule_lock);
4345
4346 return (foundentry);
4347 }
4348
4349 static u_int32_t
4350 necp_create_aggregate_route_rule(u_int32_t *rule_ids)
4351 {
4352 u_int32_t aggregate_route_rule_id = 0;
4353 struct necp_aggregate_route_rule *new_rule = NULL;
4354 struct necp_aggregate_route_rule *existing_rule = NULL;
4355
4356 LIST_FOREACH(existing_rule, &necp_aggregate_route_rules, chain) {
4357 if (memcmp(existing_rule->rule_ids, rule_ids, (sizeof(u_int32_t) * MAX_AGGREGATE_ROUTE_RULES)) == 0) {
4358 return (existing_rule->id);
4359 }
4360 }
4361
4362 lck_rw_lock_exclusive(&necp_route_rule_lock);
4363
4364 LIST_FOREACH(existing_rule, &necp_aggregate_route_rules, chain) {
4365 // Re-check, in case something else created the rule while we are waiting to lock
4366 if (memcmp(existing_rule->rule_ids, rule_ids, (sizeof(u_int32_t) * MAX_AGGREGATE_ROUTE_RULES)) == 0) {
4367 lck_rw_done(&necp_route_rule_lock);
4368 return (existing_rule->id);
4369 }
4370 }
4371
4372 MALLOC(new_rule, struct necp_aggregate_route_rule *, sizeof(struct necp_aggregate_route_rule), M_NECP, M_WAITOK);
4373 if (new_rule != NULL) {
4374 memset(new_rule, 0, sizeof(struct necp_aggregate_route_rule));
4375 aggregate_route_rule_id = new_rule->id = necp_get_new_aggregate_route_rule_id();
4376 new_rule->id = aggregate_route_rule_id;
4377 memcpy(new_rule->rule_ids, rule_ids, (sizeof(u_int32_t) * MAX_AGGREGATE_ROUTE_RULES));
4378 LIST_INSERT_HEAD(&necp_aggregate_route_rules, new_rule, chain);
4379 }
4380 lck_rw_done(&necp_route_rule_lock);
4381
4382 return (aggregate_route_rule_id);
4383 }
4384
4385 #define NECP_NULL_SERVICE_ID 1
4386 static u_int32_t
4387 necp_get_new_uuid_id(void)
4388 {
4389 u_int32_t newid = 0;
4390
4391 lck_rw_assert(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
4392
4393 necp_last_uuid_id++;
4394 if (necp_last_uuid_id < (NECP_NULL_SERVICE_ID + 1)) {
4395 necp_last_uuid_id = (NECP_NULL_SERVICE_ID + 1);
4396 }
4397
4398 newid = necp_last_uuid_id;
4399 if (newid == 0) {
4400 NECPLOG0(LOG_DEBUG, "Allocate uuid id failed.\n");
4401 return (0);
4402 }
4403
4404 return (newid);
4405 }
4406
4407 static struct necp_uuid_id_mapping *
4408 necp_uuid_lookup_app_id_locked(uuid_t uuid)
4409 {
4410 struct necp_uuid_id_mapping *searchentry = NULL;
4411 struct necp_uuid_id_mapping *foundentry = NULL;
4412
4413 LIST_FOREACH(searchentry, APPUUIDHASH(uuid), chain) {
4414 if (uuid_compare(searchentry->uuid, uuid) == 0) {
4415 foundentry = searchentry;
4416 break;
4417 }
4418 }
4419
4420 return (foundentry);
4421 }
4422
4423 static struct necp_uuid_id_mapping *
4424 necp_uuid_lookup_uuid_with_app_id_locked(u_int32_t local_id)
4425 {
4426 struct necp_uuid_id_mapping *searchentry = NULL;
4427 struct necp_uuid_id_mapping *foundentry = NULL;
4428
4429 struct necp_uuid_id_mapping_head *uuid_list_head = NULL;
4430 for (uuid_list_head = &necp_uuid_app_id_hashtbl[necp_uuid_app_id_hash_num_buckets - 1]; uuid_list_head >= necp_uuid_app_id_hashtbl; uuid_list_head--) {
4431 LIST_FOREACH(searchentry, uuid_list_head, chain) {
4432 if (searchentry->id == local_id) {
4433 foundentry = searchentry;
4434 break;
4435 }
4436 }
4437 }
4438
4439 return (foundentry);
4440 }
4441
4442 static u_int32_t
4443 necp_create_uuid_app_id_mapping(uuid_t uuid, bool *allocated_mapping, bool uuid_policy_table)
4444 {
4445 u_int32_t local_id = 0;
4446 struct necp_uuid_id_mapping *existing_mapping = NULL;
4447
4448 lck_rw_assert(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
4449
4450 if (allocated_mapping) {
4451 *allocated_mapping = FALSE;
4452 }
4453
4454 existing_mapping = necp_uuid_lookup_app_id_locked(uuid);
4455 if (existing_mapping != NULL) {
4456 local_id = existing_mapping->id;
4457 existing_mapping->refcount++;
4458 if (uuid_policy_table) {
4459 existing_mapping->table_refcount++;
4460 }
4461 } else {
4462 struct necp_uuid_id_mapping *new_mapping = NULL;
4463 MALLOC(new_mapping, struct necp_uuid_id_mapping *, sizeof(*new_mapping), M_NECP, M_WAITOK);
4464 if (new_mapping != NULL) {
4465 uuid_copy(new_mapping->uuid, uuid);
4466 new_mapping->id = necp_get_new_uuid_id();
4467 new_mapping->refcount = 1;
4468 if (uuid_policy_table) {
4469 new_mapping->table_refcount = 1;
4470 } else {
4471 new_mapping->table_refcount = 0;
4472 }
4473
4474 LIST_INSERT_HEAD(APPUUIDHASH(uuid), new_mapping, chain);
4475
4476 if (allocated_mapping) {
4477 *allocated_mapping = TRUE;
4478 }
4479
4480 local_id = new_mapping->id;
4481 }
4482 }
4483
4484 return (local_id);
4485 }
4486
4487 static bool
4488 necp_remove_uuid_app_id_mapping(uuid_t uuid, bool *removed_mapping, bool uuid_policy_table)
4489 {
4490 struct necp_uuid_id_mapping *existing_mapping = NULL;
4491
4492 lck_rw_assert(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
4493
4494 if (removed_mapping) {
4495 *removed_mapping = FALSE;
4496 }
4497
4498 existing_mapping = necp_uuid_lookup_app_id_locked(uuid);
4499 if (existing_mapping != NULL) {
4500 if (uuid_policy_table) {
4501 existing_mapping->table_refcount--;
4502 }
4503 if (--existing_mapping->refcount == 0) {
4504 LIST_REMOVE(existing_mapping, chain);
4505 FREE(existing_mapping, M_NECP);
4506 if (removed_mapping) {
4507 *removed_mapping = TRUE;
4508 }
4509 }
4510 return (TRUE);
4511 }
4512
4513 return (FALSE);
4514 }
4515
4516 static struct necp_uuid_id_mapping *
4517 necp_uuid_get_null_service_id_mapping(void)
4518 {
4519 static struct necp_uuid_id_mapping null_mapping;
4520 uuid_clear(null_mapping.uuid);
4521 null_mapping.id = NECP_NULL_SERVICE_ID;
4522
4523 return (&null_mapping);
4524 }
4525
4526 static struct necp_uuid_id_mapping *
4527 necp_uuid_lookup_service_id_locked(uuid_t uuid)
4528 {
4529 struct necp_uuid_id_mapping *searchentry = NULL;
4530 struct necp_uuid_id_mapping *foundentry = NULL;
4531
4532 if (uuid_is_null(uuid)) {
4533 return necp_uuid_get_null_service_id_mapping();
4534 }
4535
4536 LIST_FOREACH(searchentry, &necp_uuid_service_id_list, chain) {
4537 if (uuid_compare(searchentry->uuid, uuid) == 0) {
4538 foundentry = searchentry;
4539 break;
4540 }
4541 }
4542
4543 return (foundentry);
4544 }
4545
4546 static struct necp_uuid_id_mapping *
4547 necp_uuid_lookup_uuid_with_service_id_locked(u_int32_t local_id)
4548 {
4549 struct necp_uuid_id_mapping *searchentry = NULL;
4550 struct necp_uuid_id_mapping *foundentry = NULL;
4551
4552 if (local_id == NECP_NULL_SERVICE_ID) {
4553 return necp_uuid_get_null_service_id_mapping();
4554 }
4555
4556 LIST_FOREACH(searchentry, &necp_uuid_service_id_list, chain) {
4557 if (searchentry->id == local_id) {
4558 foundentry = searchentry;
4559 break;
4560 }
4561 }
4562
4563 return (foundentry);
4564 }
4565
4566 static u_int32_t
4567 necp_create_uuid_service_id_mapping(uuid_t uuid)
4568 {
4569 u_int32_t local_id = 0;
4570 struct necp_uuid_id_mapping *existing_mapping = NULL;
4571
4572 if (uuid_is_null(uuid)) {
4573 return (NECP_NULL_SERVICE_ID);
4574 }
4575
4576 lck_rw_assert(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
4577
4578 existing_mapping = necp_uuid_lookup_service_id_locked(uuid);
4579 if (existing_mapping != NULL) {
4580 local_id = existing_mapping->id;
4581 existing_mapping->refcount++;
4582 } else {
4583 struct necp_uuid_id_mapping *new_mapping = NULL;
4584 MALLOC(new_mapping, struct necp_uuid_id_mapping *, sizeof(*new_mapping), M_NECP, M_WAITOK);
4585 if (new_mapping != NULL) {
4586 uuid_copy(new_mapping->uuid, uuid);
4587 new_mapping->id = necp_get_new_uuid_id();
4588 new_mapping->refcount = 1;
4589
4590 LIST_INSERT_HEAD(&necp_uuid_service_id_list, new_mapping, chain);
4591
4592 local_id = new_mapping->id;
4593 }
4594 }
4595
4596 return (local_id);
4597 }
4598
4599 static bool
4600 necp_remove_uuid_service_id_mapping(uuid_t uuid)
4601 {
4602 struct necp_uuid_id_mapping *existing_mapping = NULL;
4603
4604 if (uuid_is_null(uuid)) {
4605 return (TRUE);
4606 }
4607
4608 lck_rw_assert(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
4609
4610 existing_mapping = necp_uuid_lookup_app_id_locked(uuid);
4611 if (existing_mapping != NULL) {
4612 if (--existing_mapping->refcount == 0) {
4613 LIST_REMOVE(existing_mapping, chain);
4614 FREE(existing_mapping, M_NECP);
4615 }
4616 return (TRUE);
4617 }
4618
4619 return (FALSE);
4620 }
4621
4622
4623 static bool
4624 necp_kernel_socket_policies_update_uuid_table(void)
4625 {
4626 lck_rw_assert(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
4627
4628 if (necp_uuid_app_id_mappings_dirty) {
4629 if (proc_uuid_policy_kernel(PROC_UUID_POLICY_OPERATION_CLEAR, NULL, PROC_UUID_NECP_APP_POLICY) < 0) {
4630 NECPLOG0(LOG_DEBUG, "Error clearing uuids from policy table\n");
4631 return (FALSE);
4632 }
4633
4634 if (necp_num_uuid_app_id_mappings > 0) {
4635 struct necp_uuid_id_mapping_head *uuid_list_head = NULL;
4636 for (uuid_list_head = &necp_uuid_app_id_hashtbl[necp_uuid_app_id_hash_num_buckets - 1]; uuid_list_head >= necp_uuid_app_id_hashtbl; uuid_list_head--) {
4637 struct necp_uuid_id_mapping *mapping = NULL;
4638 LIST_FOREACH(mapping, uuid_list_head, chain) {
4639 if (mapping->table_refcount > 0 &&
4640 proc_uuid_policy_kernel(PROC_UUID_POLICY_OPERATION_ADD, mapping->uuid, PROC_UUID_NECP_APP_POLICY) < 0) {
4641 NECPLOG0(LOG_DEBUG, "Error adding uuid to policy table\n");
4642 }
4643 }
4644 }
4645 }
4646
4647 necp_uuid_app_id_mappings_dirty = FALSE;
4648 }
4649
4650 return (TRUE);
4651 }
4652
4653 #define NECP_KERNEL_VALID_IP_OUTPUT_CONDITIONS (NECP_KERNEL_CONDITION_ALL_INTERFACES | NECP_KERNEL_CONDITION_BOUND_INTERFACE | NECP_KERNEL_CONDITION_PROTOCOL | NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_POLICY_ID | NECP_KERNEL_CONDITION_LAST_INTERFACE)
4654 static necp_kernel_policy_id
4655 necp_kernel_ip_output_policy_add(necp_policy_id parent_policy_id, necp_policy_order order, necp_policy_order suborder, u_int32_t session_order, int session_pid, u_int32_t condition_mask, u_int32_t condition_negated_mask, necp_kernel_policy_id cond_policy_id, ifnet_t cond_bound_interface, u_int32_t cond_last_interface_index, u_int16_t cond_protocol, union necp_sockaddr_union *cond_local_start, union necp_sockaddr_union *cond_local_end, u_int8_t cond_local_prefix, union necp_sockaddr_union *cond_remote_start, union necp_sockaddr_union *cond_remote_end, u_int8_t cond_remote_prefix, necp_kernel_policy_result result, necp_kernel_policy_result_parameter result_parameter)
4656 {
4657 struct necp_kernel_ip_output_policy *new_kernel_policy = NULL;
4658 struct necp_kernel_ip_output_policy *tmp_kernel_policy = NULL;
4659
4660 MALLOC_ZONE(new_kernel_policy, struct necp_kernel_ip_output_policy *, sizeof(*new_kernel_policy), M_NECP_IP_POLICY, M_WAITOK);
4661 if (new_kernel_policy == NULL) {
4662 goto done;
4663 }
4664
4665 memset(new_kernel_policy, 0, sizeof(*new_kernel_policy));
4666 new_kernel_policy->parent_policy_id = parent_policy_id;
4667 new_kernel_policy->id = necp_kernel_policy_get_new_id();
4668 new_kernel_policy->suborder = suborder;
4669 new_kernel_policy->order = order;
4670 new_kernel_policy->session_order = session_order;
4671 new_kernel_policy->session_pid = session_pid;
4672
4673 // Sanitize condition mask
4674 new_kernel_policy->condition_mask = (condition_mask & NECP_KERNEL_VALID_IP_OUTPUT_CONDITIONS);
4675 if ((new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_ALL_INTERFACES) && (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE)) {
4676 new_kernel_policy->condition_mask &= ~NECP_KERNEL_CONDITION_BOUND_INTERFACE;
4677 }
4678 if ((new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_END) && (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX)) {
4679 new_kernel_policy->condition_mask &= ~NECP_KERNEL_CONDITION_LOCAL_PREFIX;
4680 }
4681 if ((new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_END) && (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX)) {
4682 new_kernel_policy->condition_mask &= ~NECP_KERNEL_CONDITION_REMOTE_PREFIX;
4683 }
4684 new_kernel_policy->condition_negated_mask = condition_negated_mask & new_kernel_policy->condition_mask;
4685
4686 // Set condition values
4687 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_POLICY_ID) {
4688 new_kernel_policy->cond_policy_id = cond_policy_id;
4689 }
4690 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE) {
4691 if (cond_bound_interface) {
4692 ifnet_reference(cond_bound_interface);
4693 }
4694 new_kernel_policy->cond_bound_interface = cond_bound_interface;
4695 }
4696 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LAST_INTERFACE) {
4697 new_kernel_policy->cond_last_interface_index = cond_last_interface_index;
4698 }
4699 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_PROTOCOL) {
4700 new_kernel_policy->cond_protocol = cond_protocol;
4701 }
4702 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_START) {
4703 memcpy(&new_kernel_policy->cond_local_start, cond_local_start, cond_local_start->sa.sa_len);
4704 }
4705 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_END) {
4706 memcpy(&new_kernel_policy->cond_local_end, cond_local_end, cond_local_end->sa.sa_len);
4707 }
4708 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX) {
4709 new_kernel_policy->cond_local_prefix = cond_local_prefix;
4710 }
4711 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_START) {
4712 memcpy(&new_kernel_policy->cond_remote_start, cond_remote_start, cond_remote_start->sa.sa_len);
4713 }
4714 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_END) {
4715 memcpy(&new_kernel_policy->cond_remote_end, cond_remote_end, cond_remote_end->sa.sa_len);
4716 }
4717 if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX) {
4718 new_kernel_policy->cond_remote_prefix = cond_remote_prefix;
4719 }
4720
4721 new_kernel_policy->result = result;
4722 memcpy(&new_kernel_policy->result_parameter, &result_parameter, sizeof(result_parameter));
4723
4724 if (necp_debug) {
4725 NECPLOG(LOG_DEBUG, "Added kernel policy: ip output, id=%d, mask=%x\n", new_kernel_policy->id, new_kernel_policy->condition_mask);
4726 }
4727 LIST_INSERT_SORTED_THRICE_ASCENDING(&necp_kernel_ip_output_policies, new_kernel_policy, chain, session_order, order, suborder, tmp_kernel_policy);
4728 done:
4729 return (new_kernel_policy ? new_kernel_policy->id : 0);
4730 }
4731
4732 static struct necp_kernel_ip_output_policy *
4733 necp_kernel_ip_output_policy_find(necp_kernel_policy_id policy_id)
4734 {
4735 struct necp_kernel_ip_output_policy *kernel_policy = NULL;
4736 struct necp_kernel_ip_output_policy *tmp_kernel_policy = NULL;
4737
4738 if (policy_id == 0) {
4739 return (NULL);
4740 }
4741
4742 LIST_FOREACH_SAFE(kernel_policy, &necp_kernel_ip_output_policies, chain, tmp_kernel_policy) {
4743 if (kernel_policy->id == policy_id) {
4744 return (kernel_policy);
4745 }
4746 }
4747
4748 return (NULL);
4749 }
4750
4751 static bool
4752 necp_kernel_ip_output_policy_delete(necp_kernel_policy_id policy_id)
4753 {
4754 struct necp_kernel_ip_output_policy *policy = NULL;
4755
4756 lck_rw_assert(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
4757
4758 policy = necp_kernel_ip_output_policy_find(policy_id);
4759 if (policy) {
4760 LIST_REMOVE(policy, chain);
4761
4762 if (policy->cond_bound_interface) {
4763 ifnet_release(policy->cond_bound_interface);
4764 policy->cond_bound_interface = NULL;
4765 }
4766
4767 FREE_ZONE(policy, sizeof(*policy), M_NECP_IP_POLICY);
4768 return (TRUE);
4769 }
4770
4771 return (FALSE);
4772 }
4773
4774 static void
4775 necp_kernel_ip_output_policies_dump_all(void)
4776 {
4777 if (necp_debug) {
4778 struct necp_kernel_ip_output_policy *policy = NULL;
4779 int policy_i;
4780 int id_i;
4781 char result_string[MAX_RESULT_STRING_LEN];
4782 char proc_name_string[MAXCOMLEN + 1];
4783 memset(result_string, 0, MAX_RESULT_STRING_LEN);
4784 memset(proc_name_string, 0, MAXCOMLEN + 1);
4785
4786 NECPLOG0(LOG_DEBUG, "NECP IP Output Policies:\n");
4787 NECPLOG0(LOG_DEBUG, "-----------\n");
4788 for (id_i = 0; id_i < NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS; id_i++) {
4789 NECPLOG(LOG_DEBUG, " ID Bucket: %d\n", id_i);
4790 for (policy_i = 0; necp_kernel_ip_output_policies_map[id_i] != NULL && (necp_kernel_ip_output_policies_map[id_i])[policy_i] != NULL; policy_i++) {
4791 policy = (necp_kernel_ip_output_policies_map[id_i])[policy_i];
4792 proc_name(policy->session_pid, proc_name_string, MAXCOMLEN);
4793 NECPLOG(LOG_DEBUG, "\t%3d. Policy ID: %5d\tProcess: %10.10s\tOrder: %04d.%04d.%d\tMask: %5x\tResult: %s\n", policy_i, policy->id, proc_name_string, policy->session_order, policy->order, policy->suborder, policy->condition_mask, necp_get_result_description(result_string, policy->result, policy->result_parameter));
4794 }
4795 NECPLOG0(LOG_DEBUG, "-----------\n");
4796 }
4797 }
4798 }
4799
4800 static inline bool
4801 necp_kernel_ip_output_policy_results_overlap(struct necp_kernel_ip_output_policy *upper_policy, struct necp_kernel_ip_output_policy *lower_policy)
4802 {
4803 if (upper_policy->result == NECP_KERNEL_POLICY_RESULT_SKIP) {
4804 if (upper_policy->session_order != lower_policy->session_order) {
4805 // A skip cannot override a policy of a different session
4806 return (FALSE);
4807 } else {
4808 if (upper_policy->result_parameter.skip_policy_order == 0 ||
4809 lower_policy->order >= upper_policy->result_parameter.skip_policy_order) {
4810 // This policy is beyond the skip
4811 return (FALSE);
4812 } else {
4813 // This policy is inside the skip
4814 return (TRUE);
4815 }
4816 }
4817 }
4818
4819 // All other IP Output policy results (drop, tunnel, hard pass) currently overlap
4820 return (TRUE);
4821 }
4822
4823 static bool
4824 necp_kernel_ip_output_policy_is_unnecessary(struct necp_kernel_ip_output_policy *policy, struct necp_kernel_ip_output_policy **policy_array, int valid_indices)
4825 {
4826 bool can_skip = FALSE;
4827 u_int32_t highest_skip_session_order = 0;
4828 u_int32_t highest_skip_order = 0;
4829 int i;
4830 for (i = 0; i < valid_indices; i++) {
4831 struct necp_kernel_ip_output_policy *compared_policy = policy_array[i];
4832
4833 // For policies in a skip window, we can't mark conflicting policies as unnecessary
4834 if (can_skip) {
4835 if (highest_skip_session_order != compared_policy->session_order ||
4836 (highest_skip_order != 0 && compared_policy->order >= highest_skip_order)) {
4837 // If we've moved on to the next session, or passed the skip window
4838 highest_skip_session_order = 0;
4839 highest_skip_order = 0;
4840 can_skip = FALSE;
4841 } else {
4842 // If this policy is also a skip, in can increase the skip window
4843 if (compared_policy->result == NECP_KERNEL_POLICY_RESULT_SKIP) {
4844 if (compared_policy->result_parameter.skip_policy_order > highest_skip_order) {
4845 highest_skip_order = compared_policy->result_parameter.skip_policy_order;
4846 }
4847 }
4848 continue;
4849 }
4850 }
4851
4852 if (compared_policy->result == NECP_KERNEL_POLICY_RESULT_SKIP) {
4853 // This policy is a skip. Set the skip window accordingly
4854 can_skip = TRUE;
4855 highest_skip_session_order = compared_policy->session_order;
4856 highest_skip_order = compared_policy->result_parameter.skip_policy_order;
4857 }
4858
4859 // The result of the compared policy must be able to block out this policy result
4860 if (!necp_kernel_ip_output_policy_results_overlap(compared_policy, policy)) {
4861 continue;
4862 }
4863
4864 // If new policy matches All Interfaces, compared policy must also
4865 if ((policy->condition_mask & NECP_KERNEL_CONDITION_ALL_INTERFACES) && !(compared_policy->condition_mask & NECP_KERNEL_CONDITION_ALL_INTERFACES)) {
4866 continue;
4867 }
4868
4869 // Default makes lower policies unecessary always
4870 if (compared_policy->condition_mask == 0) {
4871 return (TRUE);
4872 }
4873
4874 // Compared must be more general than policy, and include only conditions within policy
4875 if ((policy->condition_mask & compared_policy->condition_mask) != compared_policy->condition_mask) {
4876 continue;
4877 }
4878
4879 // Negative conditions must match for the overlapping conditions
4880 if ((policy->condition_negated_mask & compared_policy->condition_mask) != (compared_policy->condition_negated_mask & compared_policy->condition_mask)) {
4881 continue;
4882 }
4883
4884 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_POLICY_ID &&
4885 compared_policy->cond_policy_id != policy->cond_policy_id) {
4886 continue;
4887 }
4888
4889 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE &&
4890 compared_policy->cond_bound_interface != policy->cond_bound_interface) {
4891 continue;
4892 }
4893
4894 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_PROTOCOL &&
4895 compared_policy->cond_protocol != policy->cond_protocol) {
4896 continue;
4897 }
4898
4899 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_START) {
4900 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_END) {
4901 if (!necp_is_range_in_range((struct sockaddr *)&policy->cond_local_start, (struct sockaddr *)&policy->cond_local_end, (struct sockaddr *)&compared_policy->cond_local_start, (struct sockaddr *)&compared_policy->cond_local_end)) {
4902 continue;
4903 }
4904 } else if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX) {
4905 if (compared_policy->cond_local_prefix > policy->cond_local_prefix ||
4906 !necp_is_addr_in_subnet((struct sockaddr *)&policy->cond_local_start, (struct sockaddr *)&compared_policy->cond_local_start, compared_policy->cond_local_prefix)) {
4907 continue;
4908 }
4909 }
4910 }
4911
4912 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_START) {
4913 if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_END) {
4914 if (!necp_is_range_in_range((struct sockaddr *)&policy->cond_remote_start, (struct sockaddr *)&policy->cond_remote_end, (struct sockaddr *)&compared_policy->cond_remote_start, (struct sockaddr *)&compared_policy->cond_remote_end)) {
4915 continue;
4916 }
4917 } else if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX) {
4918 if (compared_policy->cond_remote_prefix > policy->cond_remote_prefix ||
4919 !necp_is_addr_in_subnet((struct sockaddr *)&policy->cond_remote_start, (struct sockaddr *)&compared_policy->cond_remote_start, compared_policy->cond_remote_prefix)) {
4920 continue;
4921 }
4922 }
4923 }
4924
4925 return (TRUE);
4926 }
4927
4928 return (FALSE);
4929 }
4930
4931 static bool
4932 necp_kernel_ip_output_policies_reprocess(void)
4933 {
4934 int i;
4935 int bucket_allocation_counts[NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS];
4936 int bucket_current_free_index[NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS];
4937 struct necp_kernel_ip_output_policy *kernel_policy = NULL;
4938
4939 lck_rw_assert(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
4940
4941 // Reset mask to 0
4942 necp_kernel_ip_output_policies_condition_mask = 0;
4943 necp_kernel_ip_output_policies_count = 0;
4944 necp_kernel_ip_output_policies_non_id_count = 0;
4945
4946 for (i = 0; i < NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS; i++) {
4947 if (necp_kernel_ip_output_policies_map[i] != NULL) {
4948 FREE(necp_kernel_ip_output_policies_map[i], M_NECP);
4949 necp_kernel_ip_output_policies_map[i] = NULL;
4950 }
4951
4952 // Init counts
4953 bucket_allocation_counts[i] = 0;
4954 }
4955
4956 LIST_FOREACH(kernel_policy, &necp_kernel_ip_output_policies, chain) {
4957 // Update mask
4958 necp_kernel_ip_output_policies_condition_mask |= kernel_policy->condition_mask;
4959 necp_kernel_ip_output_policies_count++;
4960
4961 // Update bucket counts
4962 if (!(kernel_policy->condition_mask & NECP_KERNEL_CONDITION_POLICY_ID)) {
4963 necp_kernel_ip_output_policies_non_id_count++;
4964 for (i = 0; i < NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS; i++) {
4965 bucket_allocation_counts[i]++;
4966 }
4967 } else {
4968 bucket_allocation_counts[NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(kernel_policy->cond_policy_id)]++;
4969 }
4970 }
4971
4972 for (i = 0; i < NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS; i++) {
4973 if (bucket_allocation_counts[i] > 0) {
4974 // Allocate a NULL-terminated array of policy pointers for each bucket
4975 MALLOC(necp_kernel_ip_output_policies_map[i], struct necp_kernel_ip_output_policy **, sizeof(struct necp_kernel_ip_output_policy *) * (bucket_allocation_counts[i] + 1), M_NECP, M_WAITOK);
4976 if (necp_kernel_ip_output_policies_map[i] == NULL) {
4977 goto fail;
4978 }
4979
4980 // Initialize the first entry to NULL
4981 (necp_kernel_ip_output_policies_map[i])[0] = NULL;
4982 }
4983 bucket_current_free_index[i] = 0;
4984 }
4985
4986 LIST_FOREACH(kernel_policy, &necp_kernel_ip_output_policies, chain) {
4987 // Insert pointers into map
4988 if (!(kernel_policy->condition_mask & NECP_KERNEL_CONDITION_POLICY_ID)) {
4989 for (i = 0; i < NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS; i++) {
4990 if (!necp_kernel_ip_output_policy_is_unnecessary(kernel_policy, necp_kernel_ip_output_policies_map[i], bucket_current_free_index[i])) {
4991 (necp_kernel_ip_output_policies_map[i])[(bucket_current_free_index[i])] = kernel_policy;
4992 bucket_current_free_index[i]++;
4993 (necp_kernel_ip_output_policies_map[i])[(bucket_current_free_index[i])] = NULL;
4994 }
4995 }
4996 } else {
4997 i = NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(kernel_policy->cond_policy_id);
4998 if (!necp_kernel_ip_output_policy_is_unnecessary(kernel_policy, necp_kernel_ip_output_policies_map[i], bucket_current_free_index[i])) {
4999 (necp_kernel_ip_output_policies_map[i])[(bucket_current_free_index[i])] = kernel_policy;
5000 bucket_current_free_index[i]++;
5001 (necp_kernel_ip_output_policies_map[i])[(bucket_current_free_index[i])] = NULL;
5002 }
5003 }
5004 }
5005 necp_kernel_ip_output_policies_dump_all();
5006 return (TRUE);
5007
5008 fail:
5009 // Free memory, reset mask to 0
5010 necp_kernel_ip_output_policies_condition_mask = 0;
5011 necp_kernel_ip_output_policies_count = 0;
5012 necp_kernel_ip_output_policies_non_id_count = 0;
5013 for (i = 0; i < NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS; i++) {
5014 if (necp_kernel_ip_output_policies_map[i] != NULL) {
5015 FREE(necp_kernel_ip_output_policies_map[i], M_NECP);
5016 necp_kernel_ip_output_policies_map[i] = NULL;
5017 }
5018 }
5019 return (FALSE);
5020 }
5021
5022 // Outbound Policy Matching
5023 // ---------------------
5024 struct substring {
5025 char *string;
5026 size_t length;
5027 };
5028
5029 static struct substring
5030 necp_trim_dots_and_stars(char *string, size_t length)
5031 {
5032 struct substring sub;
5033 sub.string = string;
5034 sub.length = string ? length : 0;
5035
5036 while (sub.length && (sub.string[0] == '.' || sub.string[0] == '*')) {
5037 sub.string++;
5038 sub.length--;
5039 }
5040
5041 while (sub.length && (sub.string[sub.length - 1] == '.' || sub.string[sub.length - 1] == '*')) {
5042 sub.length--;
5043 }
5044
5045 return (sub);
5046 }
5047
5048 static char *
5049 necp_create_trimmed_domain(char *string, size_t length)
5050 {
5051 char *trimmed_domain = NULL;
5052 struct substring sub = necp_trim_dots_and_stars(string, length);
5053
5054 MALLOC(trimmed_domain, char *, sub.length + 1, M_NECP, M_WAITOK);
5055 if (trimmed_domain == NULL) {
5056 return (NULL);
5057 }
5058
5059 memcpy(trimmed_domain, sub.string, sub.length);
5060 trimmed_domain[sub.length] = 0;
5061
5062 return (trimmed_domain);
5063 }
5064
5065 static inline int
5066 necp_count_dots(char *string, size_t length)
5067 {
5068 int dot_count = 0;
5069 size_t i = 0;
5070
5071 for (i = 0; i < length; i++) {
5072 if (string[i] == '.') {
5073 dot_count++;
5074 }
5075 }
5076
5077 return (dot_count);
5078 }
5079
5080 static bool
5081 necp_check_suffix(struct substring parent, struct substring suffix, bool require_dot_before_suffix)
5082 {
5083 if (parent.length <= suffix.length) {
5084 return (FALSE);
5085 }
5086
5087 size_t length_difference = (parent.length - suffix.length);
5088
5089 if (require_dot_before_suffix) {
5090 if (((char *)(parent.string + length_difference - 1))[0] != '.') {
5091 return (FALSE);
5092 }
5093 }
5094
5095 // strncasecmp does case-insensitive check for all UTF-8 strings (ignores non-ASCII characters)
5096 return (strncasecmp(parent.string + length_difference, suffix.string, suffix.length) == 0);
5097 }
5098
5099 static bool
5100 necp_hostname_matches_domain(struct substring hostname_substring, u_int8_t hostname_dot_count, char *domain, u_int8_t domain_dot_count)
5101 {
5102 if (hostname_substring.string == NULL || domain == NULL) {
5103 return (hostname_substring.string == domain);
5104 }
5105
5106 struct substring domain_substring;
5107 domain_substring.string = domain;
5108 domain_substring.length = strlen(domain);
5109
5110 if (hostname_dot_count == domain_dot_count) {
5111 // strncasecmp does case-insensitive check for all UTF-8 strings (ignores non-ASCII characters)
5112 if (hostname_substring.length == domain_substring.length &&
5113 strncasecmp(hostname_substring.string, domain_substring.string, hostname_substring.length) == 0) {
5114 return (TRUE);
5115 }
5116 } else if (domain_dot_count < hostname_dot_count) {
5117 if (necp_check_suffix(hostname_substring, domain_substring, TRUE)) {
5118 return (TRUE);
5119 }
5120 }
5121
5122 return (FALSE);
5123 }
5124
5125 static char *
5126 necp_copy_string(char *string, size_t length)
5127 {
5128 char *copied_string = NULL;
5129
5130 MALLOC(copied_string, char *, length + 1, M_NECP, M_WAITOK);
5131 if (copied_string == NULL) {
5132 return (NULL);
5133 }
5134
5135 memcpy(copied_string, string, length);
5136 copied_string[length] = 0;
5137
5138 return (copied_string);
5139 }
5140
5141 #define NECP_KERNEL_ADDRESS_TYPE_CONDITIONS (NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX)
5142 static void
5143 necp_application_fillout_info_locked(uuid_t application_uuid, uuid_t real_application_uuid, char *account, char *domain, pid_t pid, uid_t uid, u_int16_t protocol, u_int32_t bound_interface_index, u_int32_t traffic_class, union necp_sockaddr_union *local_addr, union necp_sockaddr_union *remote_addr, proc_t proc, struct necp_socket_info *info)
5144 {
5145 memset(info, 0, sizeof(struct necp_socket_info));
5146
5147 info->pid = pid;
5148 info->uid = uid;
5149 info->protocol = protocol;
5150 info->bound_interface_index = bound_interface_index;
5151 info->traffic_class = traffic_class;
5152
5153 if (necp_kernel_application_policies_condition_mask & NECP_KERNEL_CONDITION_ENTITLEMENT && proc != NULL) {
5154 info->cred_result = priv_check_cred(proc_ucred(proc), PRIV_NET_PRIVILEGED_NECP_MATCH, 0);
5155 }
5156
5157 if (necp_kernel_application_policies_condition_mask & NECP_KERNEL_CONDITION_APP_ID && !uuid_is_null(application_uuid)) {
5158 struct necp_uuid_id_mapping *existing_mapping = necp_uuid_lookup_app_id_locked(application_uuid);
5159 if (existing_mapping) {
5160 info->application_id = existing_mapping->id;
5161 }
5162 }
5163
5164 if (necp_kernel_application_policies_condition_mask & NECP_KERNEL_CONDITION_REAL_APP_ID && !uuid_is_null(real_application_uuid)) {
5165 if (uuid_compare(application_uuid, real_application_uuid) == 0) {
5166 info->real_application_id = info->application_id;
5167 } else {
5168 struct necp_uuid_id_mapping *existing_mapping = necp_uuid_lookup_app_id_locked(real_application_uuid);
5169 if (existing_mapping) {
5170 info->real_application_id = existing_mapping->id;
5171 }
5172 }
5173 }
5174
5175 if (necp_kernel_application_policies_condition_mask & NECP_KERNEL_CONDITION_ACCOUNT_ID && account != NULL) {
5176 struct necp_string_id_mapping *existing_mapping = necp_lookup_string_to_id_locked(&necp_account_id_list, account);
5177 if (existing_mapping) {
5178 info->account_id = existing_mapping->id;
5179 }
5180 }
5181
5182 if (necp_kernel_application_policies_condition_mask & NECP_KERNEL_CONDITION_DOMAIN) {
5183 info->domain = domain;
5184 }
5185
5186 if (necp_kernel_application_policies_condition_mask & NECP_KERNEL_ADDRESS_TYPE_CONDITIONS) {
5187 if (local_addr && local_addr->sa.sa_len > 0) {
5188 memcpy(&info->local_addr, local_addr, local_addr->sa.sa_len);
5189 }
5190 if (remote_addr && remote_addr->sa.sa_len > 0) {
5191 memcpy(&info->remote_addr, remote_addr, remote_addr->sa.sa_len);
5192 }
5193 }
5194 }
5195
5196 static void
5197 necp_send_application_interface_denied_event(pid_t pid, uuid_t proc_uuid, u_int32_t if_functional_type)
5198 {
5199 struct kev_netpolicy_ifdenied ev_ifdenied;
5200
5201 bzero(&ev_ifdenied, sizeof(ev_ifdenied));
5202
5203 ev_ifdenied.ev_data.epid = pid;
5204 uuid_copy(ev_ifdenied.ev_data.euuid, proc_uuid);
5205 ev_ifdenied.ev_if_functional_type = if_functional_type;
5206
5207 netpolicy_post_msg(KEV_NETPOLICY_IFDENIED, &ev_ifdenied.ev_data, sizeof(ev_ifdenied));
5208 }
5209
5210 extern char *proc_name_address(void *p);
5211
5212 #define NECP_VERIFY_DELEGATION_ENTITLEMENT(_p, _d) \
5213 if (!has_checked_delegation_entitlement) { \
5214 has_delegation_entitlement = (priv_check_cred(proc_ucred(_p), PRIV_NET_PRIVILEGED_SOCKET_DELEGATE, 0) == 0); \
5215 has_checked_delegation_entitlement = TRUE; \
5216 } \
5217 if (!has_delegation_entitlement) { \
5218 NECPLOG(LOG_ERR, "%s(%d) does not hold the necessary entitlement to delegate network traffic for other processes by %s", \
5219 proc_name_address(_p), proc_pid(_p), _d); \
5220 break; \
5221 }
5222
5223 int
5224 necp_application_find_policy_match_internal(proc_t proc,
5225 u_int8_t *parameters,
5226 u_int32_t parameters_size,
5227 struct necp_aggregate_result *returned_result,
5228 u_int32_t *flags,
5229 u_int required_interface_index)
5230 {
5231 int error = 0;
5232 size_t offset = 0;
5233
5234 struct necp_kernel_socket_policy *matched_policy = NULL;
5235 struct necp_socket_info info;
5236 necp_kernel_policy_filter filter_control_unit = 0;
5237 u_int32_t route_rule_id = 0;
5238 necp_kernel_policy_result service_action = 0;
5239 necp_kernel_policy_service service = { 0, 0 };
5240
5241 u_int16_t protocol = 0;
5242 u_int32_t bound_interface_index = required_interface_index;
5243 u_int32_t traffic_class = 0;
5244 union necp_sockaddr_union local_addr;
5245 union necp_sockaddr_union remote_addr;
5246 bool no_remote_addr = FALSE;
5247 u_int8_t remote_family = 0;
5248 bool no_local_addr = FALSE;
5249
5250 memset(&local_addr, 0, sizeof(local_addr));
5251 memset(&remote_addr, 0, sizeof(remote_addr));
5252
5253 // Initialize UID, PID, and UUIDs to the current process
5254 uid_t uid = kauth_cred_getuid(proc_ucred(proc));
5255 pid_t pid = proc_pid(proc);
5256 uuid_t application_uuid;
5257 uuid_clear(application_uuid);
5258 uuid_t real_application_uuid;
5259 uuid_clear(real_application_uuid);
5260 proc_getexecutableuuid(proc, real_application_uuid, sizeof(real_application_uuid));
5261 uuid_copy(application_uuid, real_application_uuid);
5262
5263 char *domain = NULL;
5264 char *account = NULL;
5265
5266 u_int32_t netagent_ids[NECP_MAX_NETAGENTS];
5267 memset(&netagent_ids, 0, sizeof(netagent_ids));
5268 int netagent_cursor;
5269
5270 bool has_checked_delegation_entitlement = FALSE;
5271 bool has_delegation_entitlement = FALSE;
5272
5273 if (returned_result == NULL) {
5274 return (EINVAL);
5275 }
5276
5277 memset(returned_result, 0, sizeof(struct necp_aggregate_result));
5278
5279 lck_rw_lock_shared(&necp_kernel_policy_lock);
5280 if (necp_kernel_application_policies_count == 0) {
5281 if (necp_drop_all_order > 0) {
5282 returned_result->routing_result = NECP_KERNEL_POLICY_RESULT_DROP;
5283 lck_rw_done(&necp_kernel_policy_lock);
5284 return (0);
5285 }
5286 }
5287 lck_rw_done(&necp_kernel_policy_lock);
5288
5289 while ((offset + sizeof(u_int8_t) + sizeof(u_int32_t)) <= parameters_size) {
5290 u_int8_t type = necp_buffer_get_tlv_type(parameters, offset);
5291 u_int32_t length = necp_buffer_get_tlv_length(parameters, offset);
5292
5293 if (length > 0 && (offset + sizeof(u_int8_t) + sizeof(u_int32_t) + length) <= parameters_size) {
5294 u_int8_t *value = necp_buffer_get_tlv_value(parameters, offset, NULL);
5295 if (value != NULL) {
5296 switch (type) {
5297 case NECP_CLIENT_PARAMETER_APPLICATION: {
5298 if (length >= sizeof(uuid_t)) {
5299 if (uuid_compare(application_uuid, value) == 0) {
5300 // No delegation
5301 break;
5302 }
5303
5304 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc, "euuid");
5305
5306 uuid_copy(application_uuid, value);
5307 }
5308 break;
5309 }
5310 case NECP_CLIENT_PARAMETER_REAL_APPLICATION: {
5311 if (length >= sizeof(uuid_t)) {
5312 if (uuid_compare(real_application_uuid, value) == 0) {
5313 // No delegation
5314 break;
5315 }
5316
5317 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc, "uuid");
5318
5319 uuid_copy(real_application_uuid, value);
5320 }
5321 break;
5322 }
5323 case NECP_CLIENT_PARAMETER_PID: {
5324 if (length >= sizeof(pid_t)) {
5325 if (memcmp(&pid, value, sizeof(pid_t)) == 0) {
5326 // No delegation
5327 break;
5328 }
5329
5330 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc, "pid");
5331
5332 memcpy(&pid, value, sizeof(pid_t));
5333 }
5334 break;
5335 }
5336 case NECP_CLIENT_PARAMETER_UID: {
5337 if (length >= sizeof(uid_t)) {
5338 if (memcmp(&uid, value, sizeof(uid_t)) == 0) {
5339 // No delegation
5340 break;
5341 }
5342
5343 NECP_VERIFY_DELEGATION_ENTITLEMENT(proc, "uid");
5344
5345 memcpy(&uid, value, sizeof(uid_t));
5346 }
5347 break;
5348 }
5349 case NECP_CLIENT_PARAMETER_DOMAIN: {
5350 domain = (char *)value;
5351 domain[length - 1] = 0;
5352 break;
5353 }
5354 case NECP_CLIENT_PARAMETER_ACCOUNT: {
5355 account = (char *)value;
5356 account[length - 1] = 0;
5357 break;
5358 }
5359 case NECP_CLIENT_PARAMETER_TRAFFIC_CLASS: {
5360 if (length >= sizeof(u_int32_t)) {
5361 memcpy(&traffic_class, value, sizeof(u_int32_t));
5362 }
5363 break;
5364 }
5365 case NECP_CLIENT_PARAMETER_IP_PROTOCOL: {
5366 if (length >= sizeof(u_int16_t)) {
5367 memcpy(&protocol, value, sizeof(u_int16_t));
5368 }
5369 break;
5370 }
5371 case NECP_CLIENT_PARAMETER_BOUND_INTERFACE: {
5372 if (length <= IFXNAMSIZ && length > 0) {
5373 ifnet_t bound_interface = NULL;
5374 char interface_name[IFXNAMSIZ];
5375 memcpy(interface_name, value, length);
5376 interface_name[length - 1] = 0; // Make sure the string is NULL terminated
5377 if (ifnet_find_by_name(interface_name, &bound_interface) == 0) {
5378 bound_interface_index = bound_interface->if_index;
5379 ifnet_release(bound_interface);
5380 }
5381 }
5382 break;
5383 }
5384 case NECP_CLIENT_PARAMETER_LOCAL_ADDRESS: {
5385 if (length >= sizeof(struct necp_policy_condition_addr)) {
5386 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value;
5387 if (necp_address_is_valid(&address_struct->address.sa)) {
5388 memcpy(&local_addr, &address_struct->address, sizeof(address_struct->address));
5389 }
5390 }
5391 break;
5392 }
5393 case NECP_CLIENT_PARAMETER_REMOTE_ADDRESS: {
5394 if (length >= sizeof(struct necp_policy_condition_addr)) {
5395 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value;
5396 if (necp_address_is_valid(&address_struct->address.sa)) {
5397 memcpy(&remote_addr, &address_struct->address, sizeof(address_struct->address));
5398 }
5399 }
5400 break;
5401 }
5402 default: {
5403 break;
5404 }
5405 }
5406 }
5407 }
5408
5409 offset += sizeof(u_int8_t) + sizeof(u_int32_t) + length;
5410 }
5411
5412 // Lock
5413 lck_rw_lock_shared(&necp_kernel_policy_lock);
5414
5415 necp_application_fillout_info_locked(application_uuid, real_application_uuid, account, domain, pid, uid, protocol, bound_interface_index, traffic_class, &local_addr, &remote_addr, proc, &info);
5416 matched_policy = necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_app_layer_map, &info, &filter_control_unit, &route_rule_id, &service_action, &service, netagent_ids, NECP_MAX_NETAGENTS, proc);
5417 if (matched_policy) {
5418 returned_result->policy_id = matched_policy->id;
5419 returned_result->routing_result = matched_policy->result;
5420 memcpy(&returned_result->routing_result_parameter, &matched_policy->result_parameter, sizeof(returned_result->routing_result_parameter));
5421 } else {
5422 returned_result->policy_id = 0;
5423 returned_result->routing_result = NECP_KERNEL_POLICY_RESULT_NONE;
5424 }
5425 returned_result->filter_control_unit = filter_control_unit;
5426 returned_result->service_action = service_action;
5427
5428 // Handle trigger service
5429 if (service.identifier != 0) {
5430 struct necp_uuid_id_mapping *mapping = necp_uuid_lookup_uuid_with_service_id_locked(service.identifier);
5431 if (mapping != NULL) {
5432 struct necp_service_registration *service_registration = NULL;
5433 uuid_copy(returned_result->service_uuid, mapping->uuid);
5434 returned_result->service_data = service.data;
5435 if (service.identifier == NECP_NULL_SERVICE_ID) {
5436 // NULL service is always 'registered'
5437 returned_result->service_flags |= NECP_SERVICE_FLAGS_REGISTERED;
5438 } else {
5439 LIST_FOREACH(service_registration, &necp_registered_service_list, kernel_chain) {
5440 if (service.identifier == service_registration->service_id) {
5441 returned_result->service_flags |= NECP_SERVICE_FLAGS_REGISTERED;
5442 break;
5443 }
5444 }
5445 }
5446 }
5447 }
5448
5449 // Handle netagents
5450 for (netagent_cursor = 0; netagent_cursor < NECP_MAX_NETAGENTS; netagent_cursor++) {
5451 struct necp_uuid_id_mapping *mapping = NULL;
5452 u_int32_t netagent_id = netagent_ids[netagent_cursor];
5453 if (netagent_id == 0) {
5454 break;
5455 }
5456 mapping = necp_uuid_lookup_uuid_with_service_id_locked(netagent_id);
5457 if (mapping != NULL) {
5458 uuid_copy(returned_result->netagents[netagent_cursor], mapping->uuid);
5459 returned_result->netagent_flags[netagent_cursor] = netagent_get_flags(mapping->uuid);
5460 }
5461 }
5462
5463 // Do routing evaluation
5464 u_int output_bound_interface = bound_interface_index;
5465 if (returned_result->routing_result == NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED) {
5466 output_bound_interface = returned_result->routing_result_parameter.scoped_interface_index;
5467 } else if (returned_result->routing_result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL) {
5468 output_bound_interface = returned_result->routing_result_parameter.tunnel_interface_index;
5469 }
5470
5471 if (local_addr.sa.sa_len == 0 ||
5472 (local_addr.sa.sa_family == AF_INET && local_addr.sin.sin_addr.s_addr == 0) ||
5473 (local_addr.sa.sa_family == AF_INET6 && IN6_IS_ADDR_UNSPECIFIED(&local_addr.sin6.sin6_addr))) {
5474 no_local_addr = TRUE;
5475 }
5476
5477 if (remote_addr.sa.sa_len == 0 ||
5478 (remote_addr.sa.sa_family == AF_INET && remote_addr.sin.sin_addr.s_addr == 0) ||
5479 (remote_addr.sa.sa_family == AF_INET6 && IN6_IS_ADDR_UNSPECIFIED(&remote_addr.sin6.sin6_addr))) {
5480 no_remote_addr = TRUE;
5481 remote_family = remote_addr.sa.sa_family;
5482 }
5483
5484 if (no_remote_addr) {
5485 memset(&remote_addr, 0, sizeof(remote_addr));
5486 if (remote_family == AF_INET6) {
5487 // Reset address to ::
5488 remote_addr.sa.sa_family = AF_INET6;
5489 remote_addr.sa.sa_len = sizeof(struct sockaddr_in6);
5490 } else {
5491 // Reset address to 0.0.0.0
5492 remote_addr.sa.sa_family = AF_INET;
5493 remote_addr.sa.sa_len = sizeof(struct sockaddr_in);
5494 }
5495 }
5496
5497 struct rtentry *rt = NULL;
5498 rt = rtalloc1_scoped((struct sockaddr *)&remote_addr, 0, 0,
5499 output_bound_interface);
5500
5501 if (no_remote_addr && remote_family == 0 &&
5502 (rt == NULL || rt->rt_ifp == NULL)) {
5503 // Route lookup for default IPv4 failed, try IPv6
5504
5505 // Cleanup old route if necessary
5506 if (rt != NULL) {
5507 rtfree(rt);
5508 rt = NULL;
5509 }
5510
5511 // Reset address to ::
5512 memset(&remote_addr, 0, sizeof(remote_addr));
5513 remote_addr.sa.sa_family = AF_INET6;
5514 remote_addr.sa.sa_len = sizeof(struct sockaddr_in6);
5515
5516 // Get route
5517 rt = rtalloc1_scoped((struct sockaddr *)&remote_addr, 0, 0,
5518 output_bound_interface);
5519 }
5520
5521 returned_result->routed_interface_index = 0;
5522 if (rt != NULL &&
5523 rt->rt_ifp != NULL) {
5524 returned_result->routed_interface_index = rt->rt_ifp->if_index;
5525 /*
5526 * For local addresses, we allow the interface scope to be
5527 * either the loopback interface or the interface hosting the
5528 * local address.
5529 */
5530 if (bound_interface_index != IFSCOPE_NONE &&
5531 rt->rt_ifa != NULL && rt->rt_ifa->ifa_ifp &&
5532 (output_bound_interface == lo_ifp->if_index ||
5533 rt->rt_ifp->if_index == lo_ifp->if_index ||
5534 rt->rt_ifa->ifa_ifp->if_index == bound_interface_index)) {
5535 struct sockaddr_storage dst;
5536 unsigned int ifscope = bound_interface_index;
5537
5538 /*
5539 * Transform dst into the internal routing table form
5540 */
5541 (void) sa_copy((struct sockaddr *)&remote_addr,
5542 &dst, &ifscope);
5543
5544 if ((rt->rt_ifp->if_index == lo_ifp->if_index) ||
5545 rt_ifa_is_dst((struct sockaddr *)&dst, rt->rt_ifa))
5546 returned_result->routed_interface_index =
5547 bound_interface_index;
5548 }
5549 }
5550
5551 if (returned_result->routed_interface_index != 0 &&
5552 returned_result->routed_interface_index != lo_ifp->if_index && // Loopback can accept any local address
5553 !no_local_addr) {
5554
5555 // Transform local_addr into the ifaddr form
5556 // IPv6 Scope IDs are always embedded in the ifaddr list
5557 struct sockaddr_storage local_address_sanitized;
5558 u_int ifscope = IFSCOPE_NONE;
5559 (void)sa_copy(&local_addr.sa, &local_address_sanitized, &ifscope);
5560 SIN(&local_address_sanitized)->sin_port = 0;
5561 if (local_address_sanitized.ss_family == AF_INET6) {
5562 SIN6(&local_address_sanitized)->sin6_scope_id = 0;
5563 }
5564
5565 // Validate local address on routed interface
5566 struct ifaddr *ifa = ifa_ifwithaddr_scoped((struct sockaddr *)&local_address_sanitized, returned_result->routed_interface_index);
5567 if (ifa == NULL) {
5568 // Interface address not found, reject route
5569 returned_result->routed_interface_index = 0;
5570 if (rt != NULL) {
5571 rtfree(rt);
5572 rt = NULL;
5573 }
5574 } else {
5575 ifaddr_release(ifa);
5576 ifa = NULL;
5577 }
5578 }
5579
5580 if (flags != NULL) {
5581 // Check for local/direct
5582 bool is_local = FALSE;
5583 if (rt != NULL && (rt->rt_flags & RTF_LOCAL)) {
5584 is_local = TRUE;
5585 } else if (returned_result->routed_interface_index != 0 &&
5586 !no_remote_addr) {
5587 // Check if remote address is an interface address
5588 struct ifaddr *ifa = ifa_ifwithaddr(&remote_addr.sa);
5589 if (ifa != NULL && ifa->ifa_ifp != NULL) {
5590 u_int if_index_for_remote_addr = ifa->ifa_ifp->if_index;
5591 if (if_index_for_remote_addr == returned_result->routed_interface_index ||
5592 if_index_for_remote_addr == lo_ifp->if_index) {
5593 is_local = TRUE;
5594 }
5595 }
5596 if (ifa != NULL) {
5597 ifaddr_release(ifa);
5598 ifa = NULL;
5599 }
5600 }
5601
5602 if (is_local) {
5603 *flags |= (NECP_CLIENT_RESULT_FLAG_IS_LOCAL | NECP_CLIENT_RESULT_FLAG_IS_DIRECT);
5604 } else {
5605 if (rt != NULL &&
5606 !(rt->rt_flags & RTF_GATEWAY) &&
5607 (rt->rt_ifa && rt->rt_ifa->ifa_ifp && !(rt->rt_ifa->ifa_ifp->if_flags & IFF_POINTOPOINT))) {
5608 // Route is directly accessible
5609 *flags |= NECP_CLIENT_RESULT_FLAG_IS_DIRECT;
5610 }
5611 }
5612
5613 if (returned_result->routed_interface_index != 0) {
5614 union necp_sockaddr_union default_address;
5615 struct rtentry *v4Route = NULL;
5616 struct rtentry *v6Route = NULL;
5617
5618 memset(&default_address, 0, sizeof(default_address));
5619
5620 // Reset address to 0.0.0.0
5621 default_address.sa.sa_family = AF_INET;
5622 default_address.sa.sa_len = sizeof(struct sockaddr_in);
5623 v4Route = rtalloc1_scoped((struct sockaddr *)&default_address, 0, 0,
5624 returned_result->routed_interface_index);
5625
5626 // Reset address to ::
5627 default_address.sa.sa_family = AF_INET6;
5628 default_address.sa.sa_len = sizeof(struct sockaddr_in6);
5629 v6Route = rtalloc1_scoped((struct sockaddr *)&default_address, 0, 0,
5630 returned_result->routed_interface_index);
5631
5632 if (v4Route != NULL) {
5633 if (v4Route->rt_ifp != NULL) {
5634 *flags |= NECP_CLIENT_RESULT_FLAG_HAS_IPV4;
5635 }
5636 rtfree(v4Route);
5637 v4Route = NULL;
5638 }
5639
5640 if (v6Route != NULL) {
5641 if (v6Route->rt_ifp != NULL) {
5642 *flags |= NECP_CLIENT_RESULT_FLAG_HAS_IPV6;
5643 }
5644 rtfree(v6Route);
5645 v6Route = NULL;
5646 }
5647 }
5648 }
5649
5650 u_int32_t interface_type_denied = IFRTYPE_FUNCTIONAL_UNKNOWN;
5651 bool route_is_allowed = necp_route_is_allowed(rt, NULL, route_rule_id, &interface_type_denied);
5652 if (!route_is_allowed) {
5653 // If the route is blocked, treat the lookup as a drop
5654 returned_result->routing_result = NECP_KERNEL_POLICY_RESULT_DROP;
5655 memset(&returned_result->routing_result_parameter, 0, sizeof(returned_result->routing_result_parameter));
5656
5657 if (interface_type_denied != IFRTYPE_FUNCTIONAL_UNKNOWN) {
5658 necp_send_application_interface_denied_event(pid, application_uuid, interface_type_denied);
5659 }
5660 }
5661
5662 if (rt != NULL) {
5663 rtfree(rt);
5664 rt = NULL;
5665 }
5666 // Unlock
5667 lck_rw_done(&necp_kernel_policy_lock);
5668
5669 return (error);
5670 }
5671
5672 static bool
5673 necp_socket_check_policy(struct necp_kernel_socket_policy *kernel_policy, necp_app_id app_id, necp_app_id real_app_id, errno_t cred_result, u_int32_t account_id, struct substring domain, u_int8_t domain_dot_count, pid_t pid, uid_t uid, u_int32_t bound_interface_index, u_int32_t traffic_class, u_int16_t protocol, union necp_sockaddr_union *local, union necp_sockaddr_union *remote, proc_t proc)
5674 {
5675 if (!(kernel_policy->condition_mask & NECP_KERNEL_CONDITION_ALL_INTERFACES)) {
5676 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE) {
5677 u_int32_t cond_bound_interface_index = kernel_policy->cond_bound_interface ? kernel_policy->cond_bound_interface->if_index : 0;
5678 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE) {
5679 if (bound_interface_index == cond_bound_interface_index) {
5680 // No match, matches forbidden interface
5681 return (FALSE);
5682 }
5683 } else {
5684 if (bound_interface_index != cond_bound_interface_index) {
5685 // No match, does not match required interface
5686 return (FALSE);
5687 }
5688 }
5689 } else {
5690 if (bound_interface_index != 0) {
5691 // No match, requires a non-bound packet
5692 return (FALSE);
5693 }
5694 }
5695 }
5696
5697 if (kernel_policy->condition_mask == 0) {
5698 return (TRUE);
5699 }
5700
5701 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_APP_ID) {
5702 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_APP_ID) {
5703 if (app_id == kernel_policy->cond_app_id) {
5704 // No match, matches forbidden application
5705 return (FALSE);
5706 }
5707 } else {
5708 if (app_id != kernel_policy->cond_app_id) {
5709 // No match, does not match required application
5710 return (FALSE);
5711 }
5712 }
5713 }
5714
5715 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REAL_APP_ID) {
5716 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_REAL_APP_ID) {
5717 if (real_app_id == kernel_policy->cond_real_app_id) {
5718 // No match, matches forbidden application
5719 return (FALSE);
5720 }
5721 } else {
5722 if (real_app_id != kernel_policy->cond_real_app_id) {
5723 // No match, does not match required application
5724 return (FALSE);
5725 }
5726 }
5727 }
5728
5729 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_ENTITLEMENT) {
5730 if (cred_result != 0) {
5731 // Process is missing entitlement
5732 return (FALSE);
5733 }
5734 }
5735
5736 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT) {
5737 if (kernel_policy->cond_custom_entitlement != NULL) {
5738 if (proc == NULL) {
5739 // No process found, cannot check entitlement
5740 return (FALSE);
5741 }
5742 task_t task = proc_task(proc);
5743 if (task == NULL ||
5744 !IOTaskHasEntitlement(task, kernel_policy->cond_custom_entitlement)) {
5745 // Process is missing custom entitlement
5746 return (FALSE);
5747 }
5748 }
5749 }
5750
5751 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_DOMAIN) {
5752 bool domain_matches = necp_hostname_matches_domain(domain, domain_dot_count, kernel_policy->cond_domain, kernel_policy->cond_domain_dot_count);
5753 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_DOMAIN) {
5754 if (domain_matches) {
5755 // No match, matches forbidden domain
5756 return (FALSE);
5757 }
5758 } else {
5759 if (!domain_matches) {
5760 // No match, does not match required domain
5761 return (FALSE);
5762 }
5763 }
5764 }
5765
5766 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_ACCOUNT_ID) {
5767 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_ACCOUNT_ID) {
5768 if (account_id == kernel_policy->cond_account_id) {
5769 // No match, matches forbidden account
5770 return (FALSE);
5771 }
5772 } else {
5773 if (account_id != kernel_policy->cond_account_id) {
5774 // No match, does not match required account
5775 return (FALSE);
5776 }
5777 }
5778 }
5779
5780 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_PID) {
5781 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_PID) {
5782 if (pid == kernel_policy->cond_pid) {
5783 // No match, matches forbidden pid
5784 return (FALSE);
5785 }
5786 } else {
5787 if (pid != kernel_policy->cond_pid) {
5788 // No match, does not match required pid
5789 return (FALSE);
5790 }
5791 }
5792 }
5793
5794 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_UID) {
5795 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_UID) {
5796 if (uid == kernel_policy->cond_uid) {
5797 // No match, matches forbidden uid
5798 return (FALSE);
5799 }
5800 } else {
5801 if (uid != kernel_policy->cond_uid) {
5802 // No match, does not match required uid
5803 return (FALSE);
5804 }
5805 }
5806 }
5807
5808 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_TRAFFIC_CLASS) {
5809 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_TRAFFIC_CLASS) {
5810 if (traffic_class >= kernel_policy->cond_traffic_class.start_tc &&
5811 traffic_class <= kernel_policy->cond_traffic_class.end_tc) {
5812 // No match, matches forbidden traffic class
5813 return (FALSE);
5814 }
5815 } else {
5816 if (traffic_class < kernel_policy->cond_traffic_class.start_tc ||
5817 traffic_class > kernel_policy->cond_traffic_class.end_tc) {
5818 // No match, does not match required traffic class
5819 return (FALSE);
5820 }
5821 }
5822 }
5823
5824 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_PROTOCOL) {
5825 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_PROTOCOL) {
5826 if (protocol == kernel_policy->cond_protocol) {
5827 // No match, matches forbidden protocol
5828 return (FALSE);
5829 }
5830 } else {
5831 if (protocol != kernel_policy->cond_protocol) {
5832 // No match, does not match required protocol
5833 return (FALSE);
5834 }
5835 }
5836 }
5837
5838 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_START) {
5839 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_END) {
5840 bool inRange = necp_is_addr_in_range((struct sockaddr *)local, (struct sockaddr *)&kernel_policy->cond_local_start, (struct sockaddr *)&kernel_policy->cond_local_end);
5841 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_LOCAL_END) {
5842 if (inRange) {
5843 return (FALSE);
5844 }
5845 } else {
5846 if (!inRange) {
5847 return (FALSE);
5848 }
5849 }
5850 } else if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX) {
5851 bool inSubnet = necp_is_addr_in_subnet((struct sockaddr *)local, (struct sockaddr *)&kernel_policy->cond_local_start, kernel_policy->cond_local_prefix);
5852 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX) {
5853 if (inSubnet) {
5854 return (FALSE);
5855 }
5856 } else {
5857 if (!inSubnet) {
5858 return (FALSE);
5859 }
5860 }
5861 }
5862 }
5863
5864 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_START) {
5865 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_END) {
5866 bool inRange = necp_is_addr_in_range((struct sockaddr *)remote, (struct sockaddr *)&kernel_policy->cond_remote_start, (struct sockaddr *)&kernel_policy->cond_remote_end);
5867 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_REMOTE_END) {
5868 if (inRange) {
5869 return (FALSE);
5870 }
5871 } else {
5872 if (!inRange) {
5873 return (FALSE);
5874 }
5875 }
5876 } else if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX) {
5877 bool inSubnet = necp_is_addr_in_subnet((struct sockaddr *)remote, (struct sockaddr *)&kernel_policy->cond_remote_start, kernel_policy->cond_remote_prefix);
5878 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX) {
5879 if (inSubnet) {
5880 return (FALSE);
5881 }
5882 } else {
5883 if (!inSubnet) {
5884 return (FALSE);
5885 }
5886 }
5887 }
5888 }
5889
5890 return (TRUE);
5891 }
5892
5893 static inline u_int32_t
5894 necp_socket_calc_flowhash_locked(struct necp_socket_info *info)
5895 {
5896 return (net_flowhash(info, sizeof(*info), necp_kernel_socket_policies_gencount));
5897 }
5898
5899 static void
5900 necp_socket_fillout_info_locked(struct inpcb *inp, struct sockaddr *override_local_addr, struct sockaddr *override_remote_addr, u_int32_t override_bound_interface, struct necp_socket_info *info)
5901 {
5902 struct socket *so = NULL;
5903
5904 memset(info, 0, sizeof(struct necp_socket_info));
5905
5906 so = inp->inp_socket;
5907
5908 if (necp_kernel_socket_policies_condition_mask & NECP_KERNEL_CONDITION_PID) {
5909 info->pid = ((so->so_flags & SOF_DELEGATED) ? so->e_pid : so->last_pid);
5910 }
5911
5912 if (necp_kernel_socket_policies_condition_mask & NECP_KERNEL_CONDITION_UID) {
5913 info->uid = kauth_cred_getuid(so->so_cred);
5914 }
5915
5916 if (necp_kernel_socket_policies_condition_mask & NECP_KERNEL_CONDITION_TRAFFIC_CLASS) {
5917 info->traffic_class = so->so_traffic_class;
5918 }
5919
5920 if (necp_kernel_socket_policies_condition_mask & NECP_KERNEL_CONDITION_PROTOCOL) {
5921 if (inp->inp_ip_p) {
5922 info->protocol = inp->inp_ip_p;
5923 } else {
5924 info->protocol = SOCK_PROTO(so);
5925 }
5926 }
5927
5928 if (inp->inp_flags2 & INP2_WANT_APP_POLICY && necp_kernel_socket_policies_condition_mask & NECP_KERNEL_CONDITION_APP_ID) {
5929 struct necp_uuid_id_mapping *existing_mapping = necp_uuid_lookup_app_id_locked(((so->so_flags & SOF_DELEGATED) ? so->e_uuid : so->last_uuid));
5930 if (existing_mapping) {
5931 info->application_id = existing_mapping->id;
5932 }
5933
5934 if (!(so->so_flags & SOF_DELEGATED)) {
5935 info->real_application_id = info->application_id;
5936 } else if (necp_kernel_socket_policies_condition_mask & NECP_KERNEL_CONDITION_REAL_APP_ID) {
5937 struct necp_uuid_id_mapping *real_existing_mapping = necp_uuid_lookup_app_id_locked(so->last_uuid);
5938 if (real_existing_mapping) {
5939 info->real_application_id = real_existing_mapping->id;
5940 }
5941 }
5942
5943 if (necp_kernel_socket_policies_condition_mask & NECP_KERNEL_CONDITION_ENTITLEMENT) {
5944 info->cred_result = priv_check_cred(so->so_cred, PRIV_NET_PRIVILEGED_NECP_MATCH, 0);
5945 }
5946 }
5947
5948 if (necp_kernel_socket_policies_condition_mask & NECP_KERNEL_CONDITION_ACCOUNT_ID && inp->inp_necp_attributes.inp_account != NULL) {
5949 struct necp_string_id_mapping *existing_mapping = necp_lookup_string_to_id_locked(&necp_account_id_list, inp->inp_necp_attributes.inp_account);
5950 if (existing_mapping) {
5951 info->account_id = existing_mapping->id;
5952 }
5953 }
5954
5955 if (necp_kernel_socket_policies_condition_mask & NECP_KERNEL_CONDITION_DOMAIN) {
5956 info->domain = inp->inp_necp_attributes.inp_domain;
5957 }
5958
5959 if (override_bound_interface) {
5960 info->bound_interface_index = override_bound_interface;
5961 } else {
5962 if ((inp->inp_flags & INP_BOUND_IF) && inp->inp_boundifp) {
5963 info->bound_interface_index = inp->inp_boundifp->if_index;
5964 }
5965 }
5966
5967 if (necp_kernel_socket_policies_condition_mask & NECP_KERNEL_ADDRESS_TYPE_CONDITIONS) {
5968 if (inp->inp_vflag & INP_IPV4) {
5969 if (override_local_addr) {
5970 if (override_local_addr->sa_len <= sizeof(struct sockaddr_in)) {
5971 memcpy(&info->local_addr, override_local_addr, override_local_addr->sa_len);
5972 }
5973 } else {
5974 ((struct sockaddr_in *)&info->local_addr)->sin_family = AF_INET;
5975 ((struct sockaddr_in *)&info->local_addr)->sin_len = sizeof(struct sockaddr_in);
5976 ((struct sockaddr_in *)&info->local_addr)->sin_port = inp->inp_lport;
5977 memcpy(&((struct sockaddr_in *)&info->local_addr)->sin_addr, &inp->inp_laddr, sizeof(struct in_addr));
5978 }
5979
5980 if (override_remote_addr) {
5981 if (override_remote_addr->sa_len <= sizeof(struct sockaddr_in)) {
5982 memcpy(&info->remote_addr, override_remote_addr, override_remote_addr->sa_len);
5983 }
5984 } else {
5985 ((struct sockaddr_in *)&info->remote_addr)->sin_family = AF_INET;
5986 ((struct sockaddr_in *)&info->remote_addr)->sin_len = sizeof(struct sockaddr_in);
5987 ((struct sockaddr_in *)&info->remote_addr)->sin_port = inp->inp_fport;
5988 memcpy(&((struct sockaddr_in *)&info->remote_addr)->sin_addr, &inp->inp_faddr, sizeof(struct in_addr));
5989 }
5990 } else if (inp->inp_vflag & INP_IPV6) {
5991 if (override_local_addr) {
5992 if (override_local_addr->sa_len <= sizeof(struct sockaddr_in6)) {
5993 memcpy(&info->local_addr, override_local_addr, override_local_addr->sa_len);
5994 }
5995 } else {
5996 ((struct sockaddr_in6 *)&info->local_addr)->sin6_family = AF_INET6;
5997 ((struct sockaddr_in6 *)&info->local_addr)->sin6_len = sizeof(struct sockaddr_in6);
5998 ((struct sockaddr_in6 *)&info->local_addr)->sin6_port = inp->inp_lport;
5999 memcpy(&((struct sockaddr_in6 *)&info->local_addr)->sin6_addr, &inp->in6p_laddr, sizeof(struct in6_addr));
6000 }
6001
6002 if (override_remote_addr) {
6003 if (override_remote_addr->sa_len <= sizeof(struct sockaddr_in6)) {
6004 memcpy(&info->remote_addr, override_remote_addr, override_remote_addr->sa_len);
6005 }
6006 } else {
6007 ((struct sockaddr_in6 *)&info->remote_addr)->sin6_family = AF_INET6;
6008 ((struct sockaddr_in6 *)&info->remote_addr)->sin6_len = sizeof(struct sockaddr_in6);
6009 ((struct sockaddr_in6 *)&info->remote_addr)->sin6_port = inp->inp_fport;
6010 memcpy(&((struct sockaddr_in6 *)&info->remote_addr)->sin6_addr, &inp->in6p_faddr, sizeof(struct in6_addr));
6011 }
6012 }
6013 }
6014 }
6015
6016 static inline struct necp_kernel_socket_policy *
6017 necp_socket_find_policy_match_with_info_locked(struct necp_kernel_socket_policy **policy_search_array, struct necp_socket_info *info, necp_kernel_policy_filter *return_filter, u_int32_t *return_route_rule_id, necp_kernel_policy_result *return_service_action, necp_kernel_policy_service *return_service, u_int32_t *return_netagent_array, size_t netagent_array_count, proc_t proc)
6018 {
6019 struct necp_kernel_socket_policy *matched_policy = NULL;
6020 u_int32_t skip_order = 0;
6021 u_int32_t skip_session_order = 0;
6022 u_int32_t route_rule_id_array[MAX_AGGREGATE_ROUTE_RULES];
6023 size_t route_rule_id_count = 0;
6024 int i;
6025 size_t netagent_cursor = 0;
6026
6027 // Pre-process domain for quick matching
6028 struct substring domain_substring = necp_trim_dots_and_stars(info->domain, info->domain ? strlen(info->domain) : 0);
6029 u_int8_t domain_dot_count = necp_count_dots(domain_substring.string, domain_substring.length);
6030
6031 if (return_filter) {
6032 *return_filter = 0;
6033 }
6034
6035 if (return_route_rule_id) {
6036 *return_route_rule_id = 0;
6037 }
6038
6039 if (return_service_action) {
6040 *return_service_action = 0;
6041 }
6042
6043 if (return_service) {
6044 return_service->identifier = 0;
6045 return_service->data = 0;
6046 }
6047
6048 if (policy_search_array != NULL) {
6049 for (i = 0; policy_search_array[i] != NULL; i++) {
6050 if (necp_drop_all_order != 0 && policy_search_array[i]->session_order >= necp_drop_all_order) {
6051 // We've hit a drop all rule
6052 break;
6053 }
6054 if (skip_session_order && policy_search_array[i]->session_order >= skip_session_order) {
6055 // Done skipping
6056 skip_order = 0;
6057 skip_session_order = 0;
6058 }
6059 if (skip_order) {
6060 if (policy_search_array[i]->order < skip_order) {
6061 // Skip this policy
6062 continue;
6063 } else {
6064 // Done skipping
6065 skip_order = 0;
6066 skip_session_order = 0;
6067 }
6068 } else if (skip_session_order) {
6069 // Skip this policy
6070 continue;
6071 }
6072 if (necp_socket_check_policy(policy_search_array[i], info->application_id, info->real_application_id, info->cred_result, info->account_id, domain_substring, domain_dot_count, info->pid, info->uid, info->bound_interface_index, info->traffic_class, info->protocol, &info->local_addr, &info->remote_addr, proc)) {
6073 if (policy_search_array[i]->result == NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER) {
6074 if (return_filter && *return_filter == 0) {
6075 *return_filter = policy_search_array[i]->result_parameter.filter_control_unit;
6076 if (necp_debug > 1) {
6077 NECPLOG(LOG_DEBUG, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Filter %d", info->application_id, info->real_application_id, info->bound_interface_index, info->protocol, policy_search_array[i]->result_parameter.filter_control_unit);
6078 }
6079 }
6080 continue;
6081 } else if (policy_search_array[i]->result == NECP_KERNEL_POLICY_RESULT_ROUTE_RULES) {
6082 if (return_route_rule_id && route_rule_id_count < MAX_AGGREGATE_ROUTE_RULES) {
6083 route_rule_id_array[route_rule_id_count++] = policy_search_array[i]->result_parameter.route_rule_id;
6084 if (necp_debug > 1) {
6085 NECPLOG(LOG_DEBUG, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Route Rule %d", info->application_id, info->real_application_id, info->bound_interface_index, info->protocol, policy_search_array[i]->result_parameter.route_rule_id);
6086 }
6087 }
6088 continue;
6089 } else if (necp_kernel_socket_result_is_trigger_service_type(policy_search_array[i])) {
6090 if (return_service_action && *return_service_action == 0) {
6091 *return_service_action = policy_search_array[i]->result;
6092 if (necp_debug > 1) {
6093 NECPLOG(LOG_DEBUG, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Service Action %d", info->application_id, info->real_application_id, info->bound_interface_index, info->protocol, policy_search_array[i]->result);
6094 }
6095 }
6096 if (return_service && return_service->identifier == 0) {
6097 return_service->identifier = policy_search_array[i]->result_parameter.service.identifier;
6098 return_service->data = policy_search_array[i]->result_parameter.service.data;
6099 if (necp_debug > 1) {
6100 NECPLOG(LOG_DEBUG, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Service ID %d Data %d", info->application_id, info->real_application_id, info->bound_interface_index, info->protocol, policy_search_array[i]->result_parameter.service.identifier, policy_search_array[i]->result_parameter.service.data);
6101 }
6102 }
6103 continue;
6104 } else if (policy_search_array[i]->result == NECP_KERNEL_POLICY_RESULT_USE_NETAGENT) {
6105 if (return_netagent_array != NULL &&
6106 netagent_cursor < netagent_array_count) {
6107 return_netagent_array[netagent_cursor] = policy_search_array[i]->result_parameter.netagent_id;
6108 netagent_cursor++;
6109 if (necp_debug > 1) {
6110 NECPLOG(LOG_DEBUG, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) Use Netagent %d", info->application_id, info->real_application_id, info->bound_interface_index, info->protocol, policy_search_array[i]->result_parameter.netagent_id);
6111 }
6112 }
6113 continue;
6114 }
6115
6116 // Matched policy is a skip. Do skip and continue.
6117 if (policy_search_array[i]->result == NECP_KERNEL_POLICY_RESULT_SKIP) {
6118 skip_order = policy_search_array[i]->result_parameter.skip_policy_order;
6119 skip_session_order = policy_search_array[i]->session_order + 1;
6120 continue;
6121 }
6122
6123 // Passed all tests, found a match
6124 matched_policy = policy_search_array[i];
6125 break;
6126 }
6127 }
6128 }
6129
6130 if (route_rule_id_count == 1) {
6131 *return_route_rule_id = route_rule_id_array[0];
6132 } else if (route_rule_id_count > 1) {
6133 *return_route_rule_id = necp_create_aggregate_route_rule(route_rule_id_array);
6134 }
6135 return (matched_policy);
6136 }
6137
6138 static bool
6139 necp_socket_uses_interface(struct inpcb *inp, u_int32_t interface_index)
6140 {
6141 bool found_match = FALSE;
6142 errno_t result = 0;
6143 ifaddr_t *addresses = NULL;
6144 union necp_sockaddr_union address_storage;
6145 int i;
6146 int family = AF_INET;
6147 ifnet_t interface = ifindex2ifnet[interface_index];
6148
6149 if (inp == NULL || interface == NULL) {
6150 return (FALSE);
6151 }
6152
6153 if (inp->inp_vflag & INP_IPV4) {
6154 family = AF_INET;
6155 } else if (inp->inp_vflag & INP_IPV6) {
6156 family = AF_INET6;
6157 }
6158
6159 result = ifnet_get_address_list_family(interface, &addresses, family);
6160 if (result != 0) {
6161 NECPLOG(LOG_ERR, "Failed to get address list for %s%d", ifnet_name(interface), ifnet_unit(interface));
6162 return (FALSE);
6163 }
6164
6165 for (i = 0; addresses[i] != NULL; i++) {
6166 if (ifaddr_address(addresses[i], &address_storage.sa, sizeof(address_storage)) == 0) {
6167 if (family == AF_INET) {
6168 if (memcmp(&address_storage.sin.sin_addr, &inp->inp_laddr, sizeof(inp->inp_laddr)) == 0) {
6169 found_match = TRUE;
6170 goto done;
6171 }
6172 } else if (family == AF_INET6) {
6173 if (memcmp(&address_storage.sin6.sin6_addr, &inp->in6p_laddr, sizeof(inp->in6p_laddr)) == 0) {
6174 found_match = TRUE;
6175 goto done;
6176 }
6177 }
6178 }
6179 }
6180
6181 done:
6182 ifnet_free_address_list(addresses);
6183 addresses = NULL;
6184 return (found_match);
6185 }
6186
6187 static inline bool
6188 necp_socket_is_connected(struct inpcb *inp)
6189 {
6190 return (inp->inp_socket->so_state & (SS_ISCONNECTING | SS_ISCONNECTED | SS_ISDISCONNECTING));
6191 }
6192
6193 necp_kernel_policy_id
6194 necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local_addr, struct sockaddr *override_remote_addr, u_int32_t override_bound_interface)
6195 {
6196 struct socket *so = NULL;
6197 necp_kernel_policy_filter filter_control_unit = 0;
6198 u_int32_t route_rule_id = 0;
6199 struct necp_kernel_socket_policy *matched_policy = NULL;
6200 necp_kernel_policy_id matched_policy_id = NECP_KERNEL_POLICY_ID_NONE;
6201 necp_kernel_policy_result service_action = 0;
6202 necp_kernel_policy_service service = { 0, 0 };
6203
6204 u_int32_t netagent_ids[NECP_MAX_NETAGENTS];
6205 memset(&netagent_ids, 0, sizeof(netagent_ids));
6206 int netagent_cursor;
6207
6208 struct necp_socket_info info;
6209
6210 if (inp == NULL) {
6211 return (NECP_KERNEL_POLICY_ID_NONE);
6212 }
6213
6214 // Ignore invalid addresses
6215 if (override_local_addr != NULL &&
6216 !necp_address_is_valid(override_local_addr)) {
6217 override_local_addr = NULL;
6218 }
6219 if (override_remote_addr != NULL &&
6220 !necp_address_is_valid(override_remote_addr)) {
6221 override_remote_addr = NULL;
6222 }
6223
6224 so = inp->inp_socket;
6225
6226 // Don't lock. Possible race condition, but we don't want the performance hit.
6227 if (necp_kernel_socket_policies_count == 0 ||
6228 (!(inp->inp_flags2 & INP2_WANT_APP_POLICY) && necp_kernel_socket_policies_non_app_count == 0)) {
6229 if (necp_drop_all_order > 0) {
6230 inp->inp_policyresult.policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
6231 inp->inp_policyresult.policy_gencount = 0;
6232 inp->inp_policyresult.app_id = 0;
6233 inp->inp_policyresult.flowhash = 0;
6234 inp->inp_policyresult.results.filter_control_unit = 0;
6235 inp->inp_policyresult.results.route_rule_id = 0;
6236 if (necp_pass_loopback > 0 &&
6237 necp_is_loopback(override_local_addr, override_remote_addr, inp, NULL)) {
6238 inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_PASS;
6239 } else {
6240 inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_DROP;
6241 }
6242 }
6243 return (NECP_KERNEL_POLICY_ID_NONE);
6244 }
6245
6246 // Check for loopback exception
6247 if (necp_pass_loopback > 0 &&
6248 necp_is_loopback(override_local_addr, override_remote_addr, inp, NULL)) {
6249 // Mark socket as a pass
6250 inp->inp_policyresult.policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
6251 inp->inp_policyresult.policy_gencount = 0;
6252 inp->inp_policyresult.app_id = 0;
6253 inp->inp_policyresult.flowhash = 0;
6254 inp->inp_policyresult.results.filter_control_unit = 0;
6255 inp->inp_policyresult.results.route_rule_id = 0;
6256 inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_PASS;
6257 return (NECP_KERNEL_POLICY_ID_NONE);
6258 }
6259
6260 // Lock
6261 lck_rw_lock_shared(&necp_kernel_policy_lock);
6262
6263 necp_socket_fillout_info_locked(inp, override_local_addr, override_remote_addr, override_bound_interface, &info);
6264 inp->inp_policyresult.app_id = info.application_id;
6265
6266 // Check info
6267 u_int32_t flowhash = necp_socket_calc_flowhash_locked(&info);
6268 if (inp->inp_policyresult.policy_id != NECP_KERNEL_POLICY_ID_NONE &&
6269 inp->inp_policyresult.policy_gencount == necp_kernel_socket_policies_gencount &&
6270 inp->inp_policyresult.flowhash == flowhash) {
6271 // If already matched this socket on this generation of table, skip
6272
6273 // Unlock
6274 lck_rw_done(&necp_kernel_policy_lock);
6275
6276 return (inp->inp_policyresult.policy_id);
6277 }
6278
6279 // Match socket to policy
6280 matched_policy = necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_map[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(info.application_id)], &info, &filter_control_unit, &route_rule_id, &service_action, &service, netagent_ids, NECP_MAX_NETAGENTS, current_proc());
6281 // If the socket matched a scoped service policy, mark as Drop if not registered.
6282 // This covers the cases in which a service is required (on demand) but hasn't started yet.
6283 if ((service_action == NECP_KERNEL_POLICY_RESULT_TRIGGER_SCOPED ||
6284 service_action == NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED) &&
6285 service.identifier != 0 &&
6286 service.identifier != NECP_NULL_SERVICE_ID) {
6287 bool service_is_registered = FALSE;
6288 struct necp_service_registration *service_registration = NULL;
6289 LIST_FOREACH(service_registration, &necp_registered_service_list, kernel_chain) {
6290 if (service.identifier == service_registration->service_id) {
6291 service_is_registered = TRUE;
6292 break;
6293 }
6294 }
6295 if (!service_is_registered) {
6296 // Mark socket as a drop if service is not registered
6297 inp->inp_policyresult.policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
6298 inp->inp_policyresult.policy_gencount = necp_kernel_socket_policies_gencount;
6299 inp->inp_policyresult.flowhash = flowhash;
6300 inp->inp_policyresult.results.filter_control_unit = 0;
6301 inp->inp_policyresult.results.route_rule_id = 0;
6302 inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_DROP;
6303
6304 if (necp_debug > 1) {
6305 NECPLOG(LOG_DEBUG, "Socket Policy: (BoundInterface %d Proto %d) Dropping packet because service is not registered", info.bound_interface_index, info.protocol);
6306 }
6307
6308 // Unlock
6309 lck_rw_done(&necp_kernel_policy_lock);
6310 return (NECP_KERNEL_POLICY_ID_NONE);
6311 }
6312 }
6313 // Verify netagents
6314 for (netagent_cursor = 0; netagent_cursor < NECP_MAX_NETAGENTS; netagent_cursor++) {
6315 struct necp_uuid_id_mapping *mapping = NULL;
6316 u_int32_t netagent_id = netagent_ids[netagent_cursor];
6317 if (netagent_id == 0) {
6318 break;
6319 }
6320 mapping = necp_uuid_lookup_uuid_with_service_id_locked(netagent_id);
6321 if (mapping != NULL) {
6322 u_int32_t agent_flags = 0;
6323 agent_flags = netagent_get_flags(mapping->uuid);
6324 if (agent_flags & NETAGENT_FLAG_REGISTERED) {
6325 if (agent_flags & NETAGENT_FLAG_ACTIVE) {
6326 continue;
6327 } else if ((agent_flags & NETAGENT_FLAG_VOLUNTARY) == 0) {
6328 if (agent_flags & NETAGENT_FLAG_KERNEL_ACTIVATED) {
6329 int trigger_error = 0;
6330 trigger_error = netagent_kernel_trigger(mapping->uuid);
6331 if (necp_debug > 1) {
6332 NECPLOG(LOG_DEBUG, "Socket Policy: Triggering inactive agent, error %d", trigger_error);
6333 }
6334 }
6335
6336 // Mark socket as a drop if required agent is not active
6337 inp->inp_policyresult.policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
6338 inp->inp_policyresult.policy_gencount = necp_kernel_socket_policies_gencount;
6339 inp->inp_policyresult.flowhash = flowhash;
6340 inp->inp_policyresult.results.filter_control_unit = 0;
6341 inp->inp_policyresult.results.route_rule_id = 0;
6342 inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_DROP;
6343
6344 if (necp_debug > 1) {
6345 NECPLOG(LOG_DEBUG, "Socket Policy: (BoundInterface %d Proto %d) Dropping packet because agent is not active", info.bound_interface_index, info.protocol);
6346 }
6347
6348 // Unlock
6349 lck_rw_done(&necp_kernel_policy_lock);
6350 return (NECP_KERNEL_POLICY_ID_NONE);
6351 }
6352 }
6353 }
6354 }
6355 if (matched_policy) {
6356 matched_policy_id = matched_policy->id;
6357 inp->inp_policyresult.policy_id = matched_policy->id;
6358 inp->inp_policyresult.policy_gencount = necp_kernel_socket_policies_gencount;
6359 inp->inp_policyresult.flowhash = flowhash;
6360 inp->inp_policyresult.results.filter_control_unit = filter_control_unit;
6361 inp->inp_policyresult.results.route_rule_id = route_rule_id;
6362 inp->inp_policyresult.results.result = matched_policy->result;
6363 memcpy(&inp->inp_policyresult.results.result_parameter, &matched_policy->result_parameter, sizeof(matched_policy->result_parameter));
6364
6365 if (necp_socket_is_connected(inp) &&
6366 (matched_policy->result == NECP_KERNEL_POLICY_RESULT_DROP ||
6367 (matched_policy->result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL && !necp_socket_uses_interface(inp, matched_policy->result_parameter.tunnel_interface_index)))) {
6368 if (necp_debug) {
6369 NECPLOG(LOG_DEBUG, "Marking socket in state %d as defunct", so->so_state);
6370 }
6371 sosetdefunct(current_proc(), so, SHUTDOWN_SOCKET_LEVEL_NECP | SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL, TRUE);
6372 } else if (necp_socket_is_connected(inp) &&
6373 matched_policy->result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL &&
6374 info.protocol == IPPROTO_TCP) {
6375 // Reset MSS on TCP socket if tunnel policy changes
6376 tcp_mtudisc(inp, 0);
6377 }
6378
6379 if (necp_debug > 1) {
6380 NECPLOG(LOG_DEBUG, "Socket Policy: %p (BoundInterface %d Proto %d) Policy %d Result %d Parameter %d", inp->inp_socket, info.bound_interface_index, info.protocol, matched_policy->id, matched_policy->result, matched_policy->result_parameter.tunnel_interface_index);
6381 }
6382 } else if (necp_drop_all_order > 0) {
6383 // Mark socket as a drop if set
6384 inp->inp_policyresult.policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
6385 inp->inp_policyresult.policy_gencount = necp_kernel_socket_policies_gencount;
6386 inp->inp_policyresult.flowhash = flowhash;
6387 inp->inp_policyresult.results.filter_control_unit = 0;
6388 inp->inp_policyresult.results.route_rule_id = 0;
6389 inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_DROP;
6390 } else {
6391 // Mark non-matching socket so we don't re-check it
6392 inp->inp_policyresult.policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
6393 inp->inp_policyresult.policy_gencount = necp_kernel_socket_policies_gencount;
6394 inp->inp_policyresult.flowhash = flowhash;
6395 inp->inp_policyresult.results.filter_control_unit = filter_control_unit; // We may have matched a filter, so mark it!
6396 inp->inp_policyresult.results.route_rule_id = route_rule_id; // We may have matched a route rule, so mark it!
6397 inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_NONE;
6398 }
6399
6400 // Unlock
6401 lck_rw_done(&necp_kernel_policy_lock);
6402
6403 return (matched_policy_id);
6404 }
6405
6406 static bool
6407 necp_ip_output_check_policy(struct necp_kernel_ip_output_policy *kernel_policy, necp_kernel_policy_id socket_policy_id, u_int32_t bound_interface_index, u_int32_t last_interface_index, u_int16_t protocol, union necp_sockaddr_union *local, union necp_sockaddr_union *remote)
6408 {
6409 if (!(kernel_policy->condition_mask & NECP_KERNEL_CONDITION_ALL_INTERFACES)) {
6410 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE) {
6411 u_int32_t cond_bound_interface_index = kernel_policy->cond_bound_interface ? kernel_policy->cond_bound_interface->if_index : 0;
6412 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE) {
6413 if (bound_interface_index == cond_bound_interface_index) {
6414 // No match, matches forbidden interface
6415 return (FALSE);
6416 }
6417 } else {
6418 if (bound_interface_index != cond_bound_interface_index) {
6419 // No match, does not match required interface
6420 return (FALSE);
6421 }
6422 }
6423 } else {
6424 if (bound_interface_index != 0) {
6425 // No match, requires a non-bound packet
6426 return (FALSE);
6427 }
6428 }
6429 }
6430
6431 if (kernel_policy->condition_mask == 0) {
6432 return (TRUE);
6433 }
6434
6435 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_POLICY_ID) {
6436 if (socket_policy_id != kernel_policy->cond_policy_id) {
6437 // No match, does not match required id
6438 return (FALSE);
6439 }
6440 }
6441
6442 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LAST_INTERFACE) {
6443 if (last_interface_index != kernel_policy->cond_last_interface_index) {
6444 return (FALSE);
6445 }
6446 }
6447
6448 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_PROTOCOL) {
6449 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_PROTOCOL) {
6450 if (protocol == kernel_policy->cond_protocol) {
6451 // No match, matches forbidden protocol
6452 return (FALSE);
6453 }
6454 } else {
6455 if (protocol != kernel_policy->cond_protocol) {
6456 // No match, does not match required protocol
6457 return (FALSE);
6458 }
6459 }
6460 }
6461
6462 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_START) {
6463 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_END) {
6464 bool inRange = necp_is_addr_in_range((struct sockaddr *)local, (struct sockaddr *)&kernel_policy->cond_local_start, (struct sockaddr *)&kernel_policy->cond_local_end);
6465 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_LOCAL_END) {
6466 if (inRange) {
6467 return (FALSE);
6468 }
6469 } else {
6470 if (!inRange) {
6471 return (FALSE);
6472 }
6473 }
6474 } else if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX) {
6475 bool inSubnet = necp_is_addr_in_subnet((struct sockaddr *)local, (struct sockaddr *)&kernel_policy->cond_local_start, kernel_policy->cond_local_prefix);
6476 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX) {
6477 if (inSubnet) {
6478 return (FALSE);
6479 }
6480 } else {
6481 if (!inSubnet) {
6482 return (FALSE);
6483 }
6484 }
6485 }
6486 }
6487
6488 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_START) {
6489 if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_END) {
6490 bool inRange = necp_is_addr_in_range((struct sockaddr *)remote, (struct sockaddr *)&kernel_policy->cond_remote_start, (struct sockaddr *)&kernel_policy->cond_remote_end);
6491 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_REMOTE_END) {
6492 if (inRange) {
6493 return (FALSE);
6494 }
6495 } else {
6496 if (!inRange) {
6497 return (FALSE);
6498 }
6499 }
6500 } else if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX) {
6501 bool inSubnet = necp_is_addr_in_subnet((struct sockaddr *)remote, (struct sockaddr *)&kernel_policy->cond_remote_start, kernel_policy->cond_remote_prefix);
6502 if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX) {
6503 if (inSubnet) {
6504 return (FALSE);
6505 }
6506 } else {
6507 if (!inSubnet) {
6508 return (FALSE);
6509 }
6510 }
6511 }
6512 }
6513
6514 return (TRUE);
6515 }
6516
6517 static inline struct necp_kernel_ip_output_policy *
6518 necp_ip_output_find_policy_match_locked(necp_kernel_policy_id socket_policy_id, u_int32_t bound_interface_index, u_int32_t last_interface_index, u_int16_t protocol, union necp_sockaddr_union *local_addr, union necp_sockaddr_union *remote_addr)
6519 {
6520 u_int32_t skip_order = 0;
6521 u_int32_t skip_session_order = 0;
6522 int i;
6523 struct necp_kernel_ip_output_policy *matched_policy = NULL;
6524 struct necp_kernel_ip_output_policy **policy_search_array = necp_kernel_ip_output_policies_map[NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(socket_policy_id)];
6525 if (policy_search_array != NULL) {
6526 for (i = 0; policy_search_array[i] != NULL; i++) {
6527 if (necp_drop_all_order != 0 && policy_search_array[i]->session_order >= necp_drop_all_order) {
6528 // We've hit a drop all rule
6529 break;
6530 }
6531 if (skip_session_order && policy_search_array[i]->session_order >= skip_session_order) {
6532 // Done skipping
6533 skip_order = 0;
6534 skip_session_order = 0;
6535 }
6536 if (skip_order) {
6537 if (policy_search_array[i]->order < skip_order) {
6538 // Skip this policy
6539 continue;
6540 } else {
6541 // Done skipping
6542 skip_order = 0;
6543 skip_session_order = 0;
6544 }
6545 } else if (skip_session_order) {
6546 // Skip this policy
6547 continue;
6548 }
6549 if (necp_ip_output_check_policy(policy_search_array[i], socket_policy_id, bound_interface_index, last_interface_index, protocol, local_addr, remote_addr)) {
6550 // Passed all tests, found a match
6551 matched_policy = policy_search_array[i];
6552
6553 if (policy_search_array[i]->result == NECP_KERNEL_POLICY_RESULT_SKIP) {
6554 skip_order = policy_search_array[i]->result_parameter.skip_policy_order;
6555 skip_session_order = policy_search_array[i]->session_order + 1;
6556 continue;
6557 }
6558
6559 break;
6560 }
6561 }
6562 }
6563
6564 return (matched_policy);
6565 }
6566
6567 necp_kernel_policy_id
6568 necp_ip_output_find_policy_match(struct mbuf *packet, int flags, struct ip_out_args *ipoa, necp_kernel_policy_result *result, necp_kernel_policy_result_parameter *result_parameter)
6569 {
6570 struct ip *ip = NULL;
6571 int hlen = sizeof(struct ip);
6572 necp_kernel_policy_id socket_policy_id = NECP_KERNEL_POLICY_ID_NONE;
6573 necp_kernel_policy_id matched_policy_id = NECP_KERNEL_POLICY_ID_NONE;
6574 struct necp_kernel_ip_output_policy *matched_policy = NULL;
6575 u_int16_t protocol = 0;
6576 u_int32_t bound_interface_index = 0;
6577 u_int32_t last_interface_index = 0;
6578 union necp_sockaddr_union local_addr;
6579 union necp_sockaddr_union remote_addr;
6580
6581 if (result) {
6582 *result = 0;
6583 }
6584
6585 if (result_parameter) {
6586 memset(result_parameter, 0, sizeof(*result_parameter));
6587 }
6588
6589 if (packet == NULL) {
6590 return (NECP_KERNEL_POLICY_ID_NONE);
6591 }
6592
6593 socket_policy_id = necp_get_policy_id_from_packet(packet);
6594
6595 // Exit early for an empty list
6596 // Don't lock. Possible race condition, but we don't want the performance hit.
6597 if (necp_kernel_ip_output_policies_count == 0 ||
6598 ((socket_policy_id == NECP_KERNEL_POLICY_ID_NONE) && necp_kernel_ip_output_policies_non_id_count == 0)) {
6599 if (necp_drop_all_order > 0) {
6600 matched_policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
6601 if (result) {
6602 if ((necp_pass_loopback > 0 &&
6603 necp_is_loopback(NULL, NULL, NULL, packet)) ||
6604 (necp_pass_keepalives > 0 &&
6605 necp_get_is_keepalive_from_packet(packet))) {
6606 *result = NECP_KERNEL_POLICY_RESULT_PASS;
6607 } else {
6608 *result = NECP_KERNEL_POLICY_RESULT_DROP;
6609 }
6610 }
6611 }
6612
6613 return (matched_policy_id);
6614 }
6615
6616 // Check for loopback exception
6617 if ((necp_pass_loopback > 0 &&
6618 necp_is_loopback(NULL, NULL, NULL, packet)) ||
6619 (necp_pass_keepalives > 0 &&
6620 necp_get_is_keepalive_from_packet(packet))) {
6621 matched_policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
6622 if (result) {
6623 *result = NECP_KERNEL_POLICY_RESULT_PASS;
6624 }
6625 return (matched_policy_id);
6626 }
6627
6628 last_interface_index = necp_get_last_interface_index_from_packet(packet);
6629
6630 // Process packet to get relevant fields
6631 ip = mtod(packet, struct ip *);
6632 #ifdef _IP_VHL
6633 hlen = _IP_VHL_HL(ip->ip_vhl) << 2;
6634 #else
6635 hlen = ip->ip_hl << 2;
6636 #endif
6637
6638 protocol = ip->ip_p;
6639
6640 if ((flags & IP_OUTARGS) && (ipoa != NULL) &&
6641 (ipoa->ipoa_flags & IPOAF_BOUND_IF) &&
6642 ipoa->ipoa_boundif != IFSCOPE_NONE) {
6643 bound_interface_index = ipoa->ipoa_boundif;
6644 }
6645
6646 local_addr.sin.sin_family = AF_INET;
6647 local_addr.sin.sin_len = sizeof(struct sockaddr_in);
6648 memcpy(&local_addr.sin.sin_addr, &ip->ip_src, sizeof(ip->ip_src));
6649
6650 remote_addr.sin.sin_family = AF_INET;
6651 remote_addr.sin.sin_len = sizeof(struct sockaddr_in);
6652 memcpy(&((struct sockaddr_in *)&remote_addr)->sin_addr, &ip->ip_dst, sizeof(ip->ip_dst));
6653
6654 switch (protocol) {
6655 case IPPROTO_TCP: {
6656 struct tcphdr th;
6657 if ((int)(hlen + sizeof(th)) <= packet->m_pkthdr.len) {
6658 m_copydata(packet, hlen, sizeof(th), (u_int8_t *)&th);
6659 ((struct sockaddr_in *)&local_addr)->sin_port = th.th_sport;
6660 ((struct sockaddr_in *)&remote_addr)->sin_port = th.th_dport;
6661 }
6662 break;
6663 }
6664 case IPPROTO_UDP: {
6665 struct udphdr uh;
6666 if ((int)(hlen + sizeof(uh)) <= packet->m_pkthdr.len) {
6667 m_copydata(packet, hlen, sizeof(uh), (u_int8_t *)&uh);
6668 ((struct sockaddr_in *)&local_addr)->sin_port = uh.uh_sport;
6669 ((struct sockaddr_in *)&remote_addr)->sin_port = uh.uh_dport;
6670 }
6671 break;
6672 }
6673 default: {
6674 ((struct sockaddr_in *)&local_addr)->sin_port = 0;
6675 ((struct sockaddr_in *)&remote_addr)->sin_port = 0;
6676 break;
6677 }
6678 }
6679
6680 // Match packet to policy
6681 lck_rw_lock_shared(&necp_kernel_policy_lock);
6682 matched_policy = necp_ip_output_find_policy_match_locked(socket_policy_id, bound_interface_index, last_interface_index, protocol, &local_addr, &remote_addr);
6683 if (matched_policy) {
6684 matched_policy_id = matched_policy->id;
6685 if (result) {
6686 *result = matched_policy->result;
6687 }
6688
6689 if (result_parameter) {
6690 memcpy(result_parameter, &matched_policy->result_parameter, sizeof(matched_policy->result_parameter));
6691 }
6692
6693 if (necp_debug > 1) {
6694 NECPLOG(LOG_DEBUG, "IP Output: (ID %d BoundInterface %d LastInterface %d Proto %d) Policy %d Result %d Parameter %d", socket_policy_id, bound_interface_index, last_interface_index, protocol, matched_policy->id, matched_policy->result, matched_policy->result_parameter.tunnel_interface_index);
6695 }
6696 } else if (necp_drop_all_order > 0) {
6697 matched_policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
6698 if (result) {
6699 *result = NECP_KERNEL_POLICY_RESULT_DROP;
6700 }
6701 }
6702
6703 lck_rw_done(&necp_kernel_policy_lock);
6704
6705 return (matched_policy_id);
6706 }
6707
6708 necp_kernel_policy_id
6709 necp_ip6_output_find_policy_match(struct mbuf *packet, int flags, struct ip6_out_args *ip6oa, necp_kernel_policy_result *result, necp_kernel_policy_result_parameter *result_parameter)
6710 {
6711 struct ip6_hdr *ip6 = NULL;
6712 int next = -1;
6713 int offset = 0;
6714 necp_kernel_policy_id socket_policy_id = NECP_KERNEL_POLICY_ID_NONE;
6715 necp_kernel_policy_id matched_policy_id = NECP_KERNEL_POLICY_ID_NONE;
6716 struct necp_kernel_ip_output_policy *matched_policy = NULL;
6717 u_int16_t protocol = 0;
6718 u_int32_t bound_interface_index = 0;
6719 u_int32_t last_interface_index = 0;
6720 union necp_sockaddr_union local_addr;
6721 union necp_sockaddr_union remote_addr;
6722
6723 if (result) {
6724 *result = 0;
6725 }
6726
6727 if (result_parameter) {
6728 memset(result_parameter, 0, sizeof(*result_parameter));
6729 }
6730
6731 if (packet == NULL) {
6732 return (NECP_KERNEL_POLICY_ID_NONE);
6733 }
6734
6735 socket_policy_id = necp_get_policy_id_from_packet(packet);
6736
6737 // Exit early for an empty list
6738 // Don't lock. Possible race condition, but we don't want the performance hit.
6739 if (necp_kernel_ip_output_policies_count == 0 ||
6740 ((socket_policy_id == NECP_KERNEL_POLICY_ID_NONE) && necp_kernel_ip_output_policies_non_id_count == 0)) {
6741 if (necp_drop_all_order > 0) {
6742 matched_policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
6743 if (result) {
6744 if ((necp_pass_loopback > 0 &&
6745 necp_is_loopback(NULL, NULL, NULL, packet)) ||
6746 (necp_pass_keepalives > 0 &&
6747 necp_get_is_keepalive_from_packet(packet))) {
6748 *result = NECP_KERNEL_POLICY_RESULT_PASS;
6749 } else {
6750 *result = NECP_KERNEL_POLICY_RESULT_DROP;
6751 }
6752 }
6753 }
6754
6755 return (matched_policy_id);
6756 }
6757
6758 // Check for loopback exception
6759 if ((necp_pass_loopback > 0 &&
6760 necp_is_loopback(NULL, NULL, NULL, packet)) ||
6761 (necp_pass_keepalives > 0 &&
6762 necp_get_is_keepalive_from_packet(packet))) {
6763 matched_policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
6764 if (result) {
6765 *result = NECP_KERNEL_POLICY_RESULT_PASS;
6766 }
6767 return (matched_policy_id);
6768 }
6769
6770 last_interface_index = necp_get_last_interface_index_from_packet(packet);
6771
6772 // Process packet to get relevant fields
6773 ip6 = mtod(packet, struct ip6_hdr *);
6774
6775 if ((flags & IPV6_OUTARGS) && (ip6oa != NULL) &&
6776 (ip6oa->ip6oa_flags & IP6OAF_BOUND_IF) &&
6777 ip6oa->ip6oa_boundif != IFSCOPE_NONE) {
6778 bound_interface_index = ip6oa->ip6oa_boundif;
6779 }
6780
6781 ((struct sockaddr_in6 *)&local_addr)->sin6_family = AF_INET6;
6782 ((struct sockaddr_in6 *)&local_addr)->sin6_len = sizeof(struct sockaddr_in6);
6783 memcpy(&((struct sockaddr_in6 *)&local_addr)->sin6_addr, &ip6->ip6_src, sizeof(ip6->ip6_src));
6784
6785 ((struct sockaddr_in6 *)&remote_addr)->sin6_family = AF_INET6;
6786 ((struct sockaddr_in6 *)&remote_addr)->sin6_len = sizeof(struct sockaddr_in6);
6787 memcpy(&((struct sockaddr_in6 *)&remote_addr)->sin6_addr, &ip6->ip6_dst, sizeof(ip6->ip6_dst));
6788
6789 offset = ip6_lasthdr(packet, 0, IPPROTO_IPV6, &next);
6790 if (offset >= 0 && packet->m_pkthdr.len >= offset) {
6791 protocol = next;
6792 switch (protocol) {
6793 case IPPROTO_TCP: {
6794 struct tcphdr th;
6795 if ((int)(offset + sizeof(th)) <= packet->m_pkthdr.len) {
6796 m_copydata(packet, offset, sizeof(th), (u_int8_t *)&th);
6797 ((struct sockaddr_in6 *)&local_addr)->sin6_port = th.th_sport;
6798 ((struct sockaddr_in6 *)&remote_addr)->sin6_port = th.th_dport;
6799 }
6800 break;
6801 }
6802 case IPPROTO_UDP: {
6803 struct udphdr uh;
6804 if ((int)(offset + sizeof(uh)) <= packet->m_pkthdr.len) {
6805 m_copydata(packet, offset, sizeof(uh), (u_int8_t *)&uh);
6806 ((struct sockaddr_in6 *)&local_addr)->sin6_port = uh.uh_sport;
6807 ((struct sockaddr_in6 *)&remote_addr)->sin6_port = uh.uh_dport;
6808 }
6809 break;
6810 }
6811 default: {
6812 ((struct sockaddr_in6 *)&local_addr)->sin6_port = 0;
6813 ((struct sockaddr_in6 *)&remote_addr)->sin6_port = 0;
6814 break;
6815 }
6816 }
6817 }
6818
6819 // Match packet to policy
6820 lck_rw_lock_shared(&necp_kernel_policy_lock);
6821 matched_policy = necp_ip_output_find_policy_match_locked(socket_policy_id, bound_interface_index, last_interface_index, protocol, &local_addr, &remote_addr);
6822 if (matched_policy) {
6823 matched_policy_id = matched_policy->id;
6824 if (result) {
6825 *result = matched_policy->result;
6826 }
6827
6828 if (result_parameter) {
6829 memcpy(result_parameter, &matched_policy->result_parameter, sizeof(matched_policy->result_parameter));
6830 }
6831
6832 if (necp_debug > 1) {
6833 NECPLOG(LOG_DEBUG, "IP6 Output: (ID %d BoundInterface %d LastInterface %d Proto %d) Policy %d Result %d Parameter %d", socket_policy_id, bound_interface_index, last_interface_index, protocol, matched_policy->id, matched_policy->result, matched_policy->result_parameter.tunnel_interface_index);
6834 }
6835 } else if (necp_drop_all_order > 0) {
6836 matched_policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
6837 if (result) {
6838 *result = NECP_KERNEL_POLICY_RESULT_DROP;
6839 }
6840 }
6841
6842 lck_rw_done(&necp_kernel_policy_lock);
6843
6844 return (matched_policy_id);
6845 }
6846
6847 // Utilities
6848 static bool
6849 necp_is_addr_in_range(struct sockaddr *addr, struct sockaddr *range_start, struct sockaddr *range_end)
6850 {
6851 int cmp = 0;
6852
6853 if (addr == NULL || range_start == NULL || range_end == NULL) {
6854 return (FALSE);
6855 }
6856
6857 /* Must be greater than or equal to start */
6858 cmp = necp_addr_compare(addr, range_start, 1);
6859 if (cmp != 0 && cmp != 1) {
6860 return (FALSE);
6861 }
6862
6863 /* Must be less than or equal to end */
6864 cmp = necp_addr_compare(addr, range_end, 1);
6865 if (cmp != 0 && cmp != -1) {
6866 return (FALSE);
6867 }
6868
6869 return (TRUE);
6870 }
6871
6872 static bool
6873 necp_is_range_in_range(struct sockaddr *inner_range_start, struct sockaddr *inner_range_end, struct sockaddr *range_start, struct sockaddr *range_end)
6874 {
6875 int cmp = 0;
6876
6877 if (inner_range_start == NULL || inner_range_end == NULL || range_start == NULL || range_end == NULL) {
6878 return (FALSE);
6879 }
6880
6881 /* Must be greater than or equal to start */
6882 cmp = necp_addr_compare(inner_range_start, range_start, 1);
6883 if (cmp != 0 && cmp != 1) {
6884 return (FALSE);
6885 }
6886
6887 /* Must be less than or equal to end */
6888 cmp = necp_addr_compare(inner_range_end, range_end, 1);
6889 if (cmp != 0 && cmp != -1) {
6890 return (FALSE);
6891 }
6892
6893 return (TRUE);
6894 }
6895
6896 static bool
6897 necp_is_addr_in_subnet(struct sockaddr *addr, struct sockaddr *subnet_addr, u_int8_t subnet_prefix)
6898 {
6899 if (addr == NULL || subnet_addr == NULL) {
6900 return (FALSE);
6901 }
6902
6903 if (addr->sa_family != subnet_addr->sa_family || addr->sa_len != subnet_addr->sa_len) {
6904 return (FALSE);
6905 }
6906
6907 switch (addr->sa_family) {
6908 case AF_INET: {
6909 if (satosin(subnet_addr)->sin_port != 0 &&
6910 satosin(addr)->sin_port != satosin(subnet_addr)->sin_port) {
6911 return (FALSE);
6912 }
6913 return (necp_buffer_compare_with_bit_prefix((u_int8_t *)&satosin(addr)->sin_addr, (u_int8_t *)&satosin(subnet_addr)->sin_addr, subnet_prefix));
6914 }
6915 case AF_INET6: {
6916 if (satosin6(subnet_addr)->sin6_port != 0 &&
6917 satosin6(addr)->sin6_port != satosin6(subnet_addr)->sin6_port) {
6918 return (FALSE);
6919 }
6920 if (satosin6(addr)->sin6_scope_id &&
6921 satosin6(subnet_addr)->sin6_scope_id &&
6922 satosin6(addr)->sin6_scope_id != satosin6(subnet_addr)->sin6_scope_id) {
6923 return (FALSE);
6924 }
6925 return (necp_buffer_compare_with_bit_prefix((u_int8_t *)&satosin6(addr)->sin6_addr, (u_int8_t *)&satosin6(subnet_addr)->sin6_addr, subnet_prefix));
6926 }
6927 default: {
6928 return (FALSE);
6929 }
6930 }
6931
6932 return (FALSE);
6933 }
6934
6935 /*
6936 * Return values:
6937 * -1: sa1 < sa2
6938 * 0: sa1 == sa2
6939 * 1: sa1 > sa2
6940 * 2: Not comparable or error
6941 */
6942 static int
6943 necp_addr_compare(struct sockaddr *sa1, struct sockaddr *sa2, int check_port)
6944 {
6945 int result = 0;
6946 int port_result = 0;
6947
6948 if (sa1->sa_family != sa2->sa_family || sa1->sa_len != sa2->sa_len) {
6949 return (2);
6950 }
6951
6952 if (sa1->sa_len == 0) {
6953 return (0);
6954 }
6955
6956 switch (sa1->sa_family) {
6957 case AF_INET: {
6958 if (sa1->sa_len != sizeof(struct sockaddr_in)) {
6959 return (2);
6960 }
6961
6962 result = memcmp(&satosin(sa1)->sin_addr.s_addr, &satosin(sa2)->sin_addr.s_addr, sizeof(satosin(sa1)->sin_addr.s_addr));
6963
6964 if (check_port) {
6965 if (satosin(sa1)->sin_port < satosin(sa2)->sin_port) {
6966 port_result = -1;
6967 } else if (satosin(sa1)->sin_port > satosin(sa2)->sin_port) {
6968 port_result = 1;
6969 }
6970
6971 if (result == 0) {
6972 result = port_result;
6973 } else if ((result > 0 && port_result < 0) || (result < 0 && port_result > 0)) {
6974 return (2);
6975 }
6976 }
6977
6978 break;
6979 }
6980 case AF_INET6: {
6981 if (sa1->sa_len != sizeof(struct sockaddr_in6)) {
6982 return (2);
6983 }
6984
6985 if (satosin6(sa1)->sin6_scope_id != satosin6(sa2)->sin6_scope_id) {
6986 return (2);
6987 }
6988
6989 result = memcmp(&satosin6(sa1)->sin6_addr.s6_addr[0], &satosin6(sa2)->sin6_addr.s6_addr[0], sizeof(struct in6_addr));
6990
6991 if (check_port) {
6992 if (satosin6(sa1)->sin6_port < satosin6(sa2)->sin6_port) {
6993 port_result = -1;
6994 } else if (satosin6(sa1)->sin6_port > satosin6(sa2)->sin6_port) {
6995 port_result = 1;
6996 }
6997
6998 if (result == 0) {
6999 result = port_result;
7000 } else if ((result > 0 && port_result < 0) || (result < 0 && port_result > 0)) {
7001 return (2);
7002 }
7003 }
7004
7005 break;
7006 }
7007 default: {
7008 result = memcmp(sa1, sa2, sa1->sa_len);
7009 break;
7010 }
7011 }
7012
7013 if (result < 0) {
7014 result = (-1);
7015 } else if (result > 0) {
7016 result = (1);
7017 }
7018
7019 return (result);
7020 }
7021
7022 static bool
7023 necp_buffer_compare_with_bit_prefix(u_int8_t *p1, u_int8_t *p2, u_int32_t bits)
7024 {
7025 u_int8_t mask;
7026
7027 /* Handle null pointers */
7028 if (p1 == NULL || p2 == NULL) {
7029 return (p1 == p2);
7030 }
7031
7032 while (bits >= 8) {
7033 if (*p1++ != *p2++) {
7034 return (FALSE);
7035 }
7036 bits -= 8;
7037 }
7038
7039 if (bits > 0) {
7040 mask = ~((1<<(8-bits))-1);
7041 if ((*p1 & mask) != (*p2 & mask)) {
7042 return (FALSE);
7043 }
7044 }
7045 return (TRUE);
7046 }
7047
7048 static bool
7049 necp_socket_update_qos_marking_inner(struct ifnet *ifp, u_int32_t route_rule_id)
7050 {
7051 bool qos_marking = FALSE;
7052 int exception_index = 0;
7053 struct necp_route_rule *route_rule = NULL;
7054
7055 route_rule = necp_lookup_route_rule_locked(&necp_route_rules, route_rule_id);
7056 if (route_rule == NULL) {
7057 qos_marking = FALSE;
7058 goto done;
7059 }
7060
7061 qos_marking = (route_rule->default_action == NECP_ROUTE_RULE_QOS_MARKING) ? TRUE : FALSE;
7062
7063 if (ifp == NULL) {
7064 goto done;
7065 }
7066
7067 for (exception_index = 0; exception_index < MAX_ROUTE_RULE_INTERFACES; exception_index++) {
7068 if (route_rule->exception_if_indices[exception_index] == 0) {
7069 break;
7070 }
7071 if (route_rule->exception_if_actions[exception_index] != NECP_ROUTE_RULE_QOS_MARKING) {
7072 continue;
7073 }
7074 if (route_rule->exception_if_indices[exception_index] == ifp->if_index) {
7075 qos_marking = TRUE;
7076 if (necp_debug > 2) {
7077 NECPLOG(LOG_DEBUG, "QoS Marking : Interface match %d for Rule %d Allowed %d",
7078 route_rule->exception_if_indices[exception_index], route_rule_id, qos_marking);
7079 }
7080 goto done;
7081 }
7082 }
7083
7084 if ((route_rule->cellular_action == NECP_ROUTE_RULE_QOS_MARKING && IFNET_IS_CELLULAR(ifp)) ||
7085 (route_rule->wifi_action == NECP_ROUTE_RULE_QOS_MARKING && IFNET_IS_WIFI(ifp)) ||
7086 (route_rule->wired_action == NECP_ROUTE_RULE_QOS_MARKING && IFNET_IS_WIRED(ifp)) ||
7087 (route_rule->expensive_action == NECP_ROUTE_RULE_QOS_MARKING && IFNET_IS_EXPENSIVE(ifp))) {
7088 qos_marking = TRUE;
7089 if (necp_debug > 2) {
7090 NECPLOG(LOG_DEBUG, "QoS Marking: C:%d WF:%d W:%d E:%d for Rule %d Allowed %d",
7091 route_rule->cellular_action, route_rule->wifi_action, route_rule->wired_action,
7092 route_rule->expensive_action, route_rule_id, qos_marking);
7093 }
7094 goto done;
7095 }
7096 done:
7097 if (necp_debug > 1) {
7098 NECPLOG(LOG_DEBUG, "QoS Marking: Rule %d ifp %s Allowed %d",
7099 route_rule_id, ifp ? ifp->if_xname : "", qos_marking);
7100 }
7101 return (qos_marking);
7102 }
7103
7104 void
7105 necp_socket_update_qos_marking(struct inpcb *inp, struct rtentry *route, struct ifnet *interface, u_int32_t route_rule_id)
7106 {
7107 bool qos_marking = FALSE;
7108 struct ifnet *ifp = interface = NULL;
7109
7110 ASSERT(net_qos_policy_restricted != 0);
7111
7112 if (inp->inp_socket == NULL) {
7113 return;
7114 }
7115 if ((inp->inp_socket->so_flags1 & SOF1_QOSMARKING_POLICY_OVERRIDE)) {
7116 return;
7117 }
7118 /*
7119 * This is racy but we do not need the performance hit of taking necp_kernel_policy_lock
7120 */
7121 if (inp->inp_policyresult.results.qos_marking_gencount == necp_kernel_socket_policies_gencount) {
7122 return;
7123 }
7124
7125 lck_rw_lock_shared(&necp_kernel_policy_lock);
7126
7127 if (ifp == NULL && route != NULL) {
7128 ifp = route->rt_ifp;
7129 }
7130 /*
7131 * By default, until we have a interface, do not mark and reevaluate the Qos marking policy
7132 */
7133 if (ifp == NULL || route_rule_id == 0) {
7134 qos_marking = FALSE;
7135 goto done;
7136 }
7137
7138 if (ROUTE_RULE_IS_AGGREGATE(route_rule_id)) {
7139 struct necp_aggregate_route_rule *aggregate_route_rule = necp_lookup_aggregate_route_rule_locked(route_rule_id);
7140 if (aggregate_route_rule != NULL) {
7141 int index = 0;
7142 for (index = 0; index < MAX_AGGREGATE_ROUTE_RULES; index++) {
7143 u_int32_t sub_route_rule_id = aggregate_route_rule->rule_ids[index];
7144 if (sub_route_rule_id == 0) {
7145 break;
7146 }
7147 qos_marking = necp_socket_update_qos_marking_inner(ifp, sub_route_rule_id);
7148 if (qos_marking == TRUE) {
7149 break;
7150 }
7151 }
7152 }
7153 } else {
7154 qos_marking = necp_socket_update_qos_marking_inner(ifp, route_rule_id);
7155 }
7156 /*
7157 * Now that we have an interface we remember the gencount
7158 */
7159 inp->inp_policyresult.results.qos_marking_gencount = necp_kernel_socket_policies_gencount;
7160
7161 done:
7162 lck_rw_done(&necp_kernel_policy_lock);
7163
7164 if (qos_marking == TRUE) {
7165 inp->inp_socket->so_flags1 |= SOF1_QOSMARKING_ALLOWED;
7166 } else {
7167 inp->inp_socket->so_flags1 &= ~SOF1_QOSMARKING_ALLOWED;
7168 }
7169 }
7170
7171 static bool
7172 necp_route_is_allowed_inner(struct rtentry *route, struct ifnet *ifp, u_int32_t route_rule_id, u_int32_t *interface_type_denied)
7173 {
7174 bool default_is_allowed = TRUE;
7175 u_int8_t type_aggregate_action = NECP_ROUTE_RULE_NONE;
7176 int exception_index = 0;
7177 struct ifnet *delegated_ifp = NULL;
7178 struct necp_route_rule *route_rule = NULL;
7179
7180 route_rule = necp_lookup_route_rule_locked(&necp_route_rules, route_rule_id);
7181 if (route_rule == NULL) {
7182 return (TRUE);
7183 }
7184
7185 default_is_allowed = (route_rule->default_action == NECP_ROUTE_RULE_DENY_INTERFACE) ? FALSE : TRUE;
7186 if (ifp == NULL) {
7187 ifp = route->rt_ifp;
7188 }
7189 if (ifp == NULL) {
7190 if (necp_debug > 1 && !default_is_allowed) {
7191 NECPLOG(LOG_DEBUG, "Route Allowed: No interface for route, using default for Rule %d Allowed %d", route_rule_id, default_is_allowed);
7192 }
7193 return (default_is_allowed);
7194 }
7195
7196 delegated_ifp = ifp->if_delegated.ifp;
7197 for (exception_index = 0; exception_index < MAX_ROUTE_RULE_INTERFACES; exception_index++) {
7198 if (route_rule->exception_if_indices[exception_index] == 0) {
7199 break;
7200 }
7201 if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule->exception_if_actions[exception_index]) == FALSE) {
7202 continue;
7203 }
7204 if (route_rule->exception_if_indices[exception_index] == ifp->if_index ||
7205 (delegated_ifp != NULL && route_rule->exception_if_indices[exception_index] == delegated_ifp->if_index)) {
7206 if (necp_debug > 1) {
7207 NECPLOG(LOG_DEBUG, "Route Allowed: Interface match %d for Rule %d Allowed %d", route_rule->exception_if_indices[exception_index], route_rule_id, ((route_rule->exception_if_actions[exception_index] == NECP_ROUTE_RULE_DENY_INTERFACE) ? FALSE : TRUE));
7208 }
7209 return ((route_rule->exception_if_actions[exception_index] == NECP_ROUTE_RULE_DENY_INTERFACE) ? FALSE : TRUE);
7210 }
7211 }
7212
7213 if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule->cellular_action) &&
7214 IFNET_IS_CELLULAR(ifp)) {
7215 if (interface_type_denied != NULL) {
7216 *interface_type_denied = IFRTYPE_FUNCTIONAL_CELLULAR;
7217 }
7218 if (type_aggregate_action == NECP_ROUTE_RULE_NONE ||
7219 (type_aggregate_action == NECP_ROUTE_RULE_ALLOW_INTERFACE &&
7220 route_rule->cellular_action == NECP_ROUTE_RULE_DENY_INTERFACE)) {
7221 // Deny wins if there is a conflict
7222 type_aggregate_action = route_rule->cellular_action;
7223 }
7224 }
7225
7226 if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule->wifi_action) &&
7227 IFNET_IS_WIFI(ifp)) {
7228 if (interface_type_denied != NULL) {
7229 *interface_type_denied = IFRTYPE_FUNCTIONAL_WIFI_INFRA;
7230 }
7231 if (type_aggregate_action == NECP_ROUTE_RULE_NONE ||
7232 (type_aggregate_action == NECP_ROUTE_RULE_ALLOW_INTERFACE &&
7233 route_rule->wifi_action == NECP_ROUTE_RULE_DENY_INTERFACE)) {
7234 // Deny wins if there is a conflict
7235 type_aggregate_action = route_rule->wifi_action;
7236 }
7237 }
7238
7239 if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule->wired_action) &&
7240 IFNET_IS_WIRED(ifp)) {
7241 if (interface_type_denied != NULL) {
7242 *interface_type_denied = IFRTYPE_FUNCTIONAL_WIRED;
7243 }
7244 if (type_aggregate_action == NECP_ROUTE_RULE_NONE ||
7245 (type_aggregate_action == NECP_ROUTE_RULE_ALLOW_INTERFACE &&
7246 route_rule->wired_action == NECP_ROUTE_RULE_DENY_INTERFACE)) {
7247 // Deny wins if there is a conflict
7248 type_aggregate_action = route_rule->wired_action;
7249 }
7250 }
7251
7252 if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule->expensive_action) &&
7253 IFNET_IS_EXPENSIVE(ifp)) {
7254 if (type_aggregate_action == NECP_ROUTE_RULE_NONE ||
7255 (type_aggregate_action == NECP_ROUTE_RULE_ALLOW_INTERFACE &&
7256 route_rule->expensive_action == NECP_ROUTE_RULE_DENY_INTERFACE)) {
7257 // Deny wins if there is a conflict
7258 type_aggregate_action = route_rule->expensive_action;
7259 }
7260 }
7261
7262 if (type_aggregate_action != NECP_ROUTE_RULE_NONE) {
7263 if (necp_debug > 1) {
7264 NECPLOG(LOG_DEBUG, "Route Allowed: C:%d WF:%d W:%d E:%d for Rule %d Allowed %d", route_rule->cellular_action, route_rule->wifi_action, route_rule->wired_action, route_rule->expensive_action, route_rule_id, ((type_aggregate_action == NECP_ROUTE_RULE_DENY_INTERFACE) ? FALSE : TRUE));
7265 }
7266 return ((type_aggregate_action == NECP_ROUTE_RULE_DENY_INTERFACE) ? FALSE : TRUE);
7267 }
7268
7269 if (necp_debug > 1 && !default_is_allowed) {
7270 NECPLOG(LOG_DEBUG, "Route Allowed: Using default for Rule %d Allowed %d", route_rule_id, default_is_allowed);
7271 }
7272 return (default_is_allowed);
7273 }
7274
7275 static bool
7276 necp_route_is_allowed(struct rtentry *route, struct ifnet *interface, u_int32_t route_rule_id, u_int32_t *interface_type_denied)
7277 {
7278 if ((route == NULL && interface == NULL) || route_rule_id == 0) {
7279 if (necp_debug > 1) {
7280 NECPLOG(LOG_DEBUG, "Route Allowed: no route or interface, Rule %d Allowed %d", route_rule_id, TRUE);
7281 }
7282 return (TRUE);
7283 }
7284
7285 if (ROUTE_RULE_IS_AGGREGATE(route_rule_id)) {
7286 struct necp_aggregate_route_rule *aggregate_route_rule = necp_lookup_aggregate_route_rule_locked(route_rule_id);
7287 if (aggregate_route_rule != NULL) {
7288 int index = 0;
7289 for (index = 0; index < MAX_AGGREGATE_ROUTE_RULES; index++) {
7290 u_int32_t sub_route_rule_id = aggregate_route_rule->rule_ids[index];
7291 if (sub_route_rule_id == 0) {
7292 break;
7293 }
7294 if (!necp_route_is_allowed_inner(route, interface, sub_route_rule_id, interface_type_denied)) {
7295 return (FALSE);
7296 }
7297 }
7298 }
7299 } else {
7300 return (necp_route_is_allowed_inner(route, interface, route_rule_id, interface_type_denied));
7301 }
7302
7303 return (TRUE);
7304 }
7305
7306 bool
7307 necp_packet_is_allowed_over_interface(struct mbuf *packet, struct ifnet *interface)
7308 {
7309 bool is_allowed = TRUE;
7310 u_int32_t route_rule_id = necp_get_route_rule_id_from_packet(packet);
7311 if (route_rule_id != 0 &&
7312 interface != NULL) {
7313 lck_rw_lock_shared(&necp_kernel_policy_lock);
7314 is_allowed = necp_route_is_allowed(NULL, interface, necp_get_route_rule_id_from_packet(packet), NULL);
7315 lck_rw_done(&necp_kernel_policy_lock);
7316 }
7317 return (is_allowed);
7318 }
7319
7320 static bool
7321 necp_netagents_allow_traffic(u_int32_t *netagent_ids, size_t netagent_id_count)
7322 {
7323 size_t netagent_cursor;
7324 for (netagent_cursor = 0; netagent_cursor < netagent_id_count; netagent_cursor++) {
7325 struct necp_uuid_id_mapping *mapping = NULL;
7326 u_int32_t netagent_id = netagent_ids[netagent_cursor];
7327 if (netagent_id == 0) {
7328 break;
7329 }
7330 mapping = necp_uuid_lookup_uuid_with_service_id_locked(netagent_id);
7331 if (mapping != NULL) {
7332 u_int32_t agent_flags = 0;
7333 agent_flags = netagent_get_flags(mapping->uuid);
7334 if (agent_flags & NETAGENT_FLAG_REGISTERED) {
7335 if (agent_flags & NETAGENT_FLAG_ACTIVE) {
7336 continue;
7337 } else if ((agent_flags & NETAGENT_FLAG_VOLUNTARY) == 0) {
7338 return (FALSE);
7339 }
7340 }
7341 }
7342 }
7343 return (TRUE);
7344 }
7345
7346 static bool
7347 necp_socket_is_allowed_to_send_recv_internal(struct inpcb *inp, struct sockaddr *override_local_addr, struct sockaddr *override_remote_addr, ifnet_t interface, necp_kernel_policy_id *return_policy_id, u_int32_t *return_route_rule_id)
7348 {
7349 u_int32_t verifyifindex = interface ? interface->if_index : 0;
7350 bool allowed_to_receive = TRUE;
7351 struct necp_socket_info info;
7352 u_int32_t flowhash = 0;
7353 necp_kernel_policy_result service_action = 0;
7354 necp_kernel_policy_service service = { 0, 0 };
7355 u_int32_t route_rule_id = 0;
7356 struct rtentry *route = NULL;
7357 u_int32_t interface_type_denied = IFRTYPE_FUNCTIONAL_UNKNOWN;
7358
7359 u_int32_t netagent_ids[NECP_MAX_NETAGENTS];
7360 memset(&netagent_ids, 0, sizeof(netagent_ids));
7361
7362 if (return_policy_id) {
7363 *return_policy_id = NECP_KERNEL_POLICY_ID_NONE;
7364 }
7365 if (return_route_rule_id) {
7366 *return_route_rule_id = 0;
7367 }
7368
7369 if (inp == NULL) {
7370 goto done;
7371 }
7372
7373 route = inp->inp_route.ro_rt;
7374
7375 // Don't lock. Possible race condition, but we don't want the performance hit.
7376 if (necp_kernel_socket_policies_count == 0 ||
7377 (!(inp->inp_flags2 & INP2_WANT_APP_POLICY) && necp_kernel_socket_policies_non_app_count == 0)) {
7378 if (necp_drop_all_order > 0) {
7379 if (necp_pass_loopback > 0 &&
7380 necp_is_loopback(override_local_addr, override_remote_addr, inp, NULL)) {
7381 allowed_to_receive = TRUE;
7382 } else {
7383 allowed_to_receive = FALSE;
7384 }
7385 }
7386 goto done;
7387 }
7388
7389 // If this socket is connected, or we are not taking addresses into account, try to reuse last result
7390 if ((necp_socket_is_connected(inp) || (override_local_addr == NULL && override_remote_addr == NULL)) && inp->inp_policyresult.policy_id != NECP_KERNEL_POLICY_ID_NONE) {
7391 bool policies_have_changed = FALSE;
7392 bool route_allowed = TRUE;
7393 lck_rw_lock_shared(&necp_kernel_policy_lock);
7394 if (inp->inp_policyresult.policy_gencount != necp_kernel_socket_policies_gencount) {
7395 policies_have_changed = TRUE;
7396 } else {
7397 if (inp->inp_policyresult.results.route_rule_id != 0 &&
7398 !necp_route_is_allowed(route, interface, inp->inp_policyresult.results.route_rule_id, &interface_type_denied)) {
7399 route_allowed = FALSE;
7400 }
7401 }
7402 lck_rw_done(&necp_kernel_policy_lock);
7403
7404 if (!policies_have_changed) {
7405 if (!route_allowed ||
7406 inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_DROP ||
7407 inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT ||
7408 (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL && interface &&
7409 inp->inp_policyresult.results.result_parameter.tunnel_interface_index != verifyifindex)) {
7410 allowed_to_receive = FALSE;
7411 } else {
7412 if (return_policy_id) {
7413 *return_policy_id = inp->inp_policyresult.policy_id;
7414 }
7415 if (return_route_rule_id) {
7416 *return_route_rule_id = inp->inp_policyresult.results.route_rule_id;
7417 }
7418 }
7419 goto done;
7420 }
7421 }
7422
7423 // Check for loopback exception
7424 if (necp_pass_loopback > 0 &&
7425 necp_is_loopback(override_local_addr, override_remote_addr, inp, NULL)) {
7426 allowed_to_receive = TRUE;
7427 goto done;
7428 }
7429
7430 // Actually calculate policy result
7431 lck_rw_lock_shared(&necp_kernel_policy_lock);
7432 necp_socket_fillout_info_locked(inp, override_local_addr, override_remote_addr, 0, &info);
7433
7434 flowhash = necp_socket_calc_flowhash_locked(&info);
7435 if (inp->inp_policyresult.policy_id != NECP_KERNEL_POLICY_ID_NONE &&
7436 inp->inp_policyresult.policy_gencount == necp_kernel_socket_policies_gencount &&
7437 inp->inp_policyresult.flowhash == flowhash) {
7438 if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_DROP ||
7439 inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT ||
7440 (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL && interface &&
7441 inp->inp_policyresult.results.result_parameter.tunnel_interface_index != verifyifindex) ||
7442 (inp->inp_policyresult.results.route_rule_id != 0 &&
7443 !necp_route_is_allowed(route, interface, inp->inp_policyresult.results.route_rule_id, &interface_type_denied))) {
7444 allowed_to_receive = FALSE;
7445 } else {
7446 if (return_policy_id) {
7447 *return_policy_id = inp->inp_policyresult.policy_id;
7448 }
7449 if (return_route_rule_id) {
7450 *return_route_rule_id = inp->inp_policyresult.results.route_rule_id;
7451 }
7452 }
7453 lck_rw_done(&necp_kernel_policy_lock);
7454 goto done;
7455 }
7456
7457 struct necp_kernel_socket_policy *matched_policy = necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_map[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(info.application_id)], &info, NULL, &route_rule_id, &service_action, &service, netagent_ids, NECP_MAX_NETAGENTS, current_proc());
7458 if (matched_policy != NULL) {
7459 if (matched_policy->result == NECP_KERNEL_POLICY_RESULT_DROP ||
7460 matched_policy->result == NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT ||
7461 (matched_policy->result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL && interface &&
7462 matched_policy->result_parameter.tunnel_interface_index != verifyifindex) ||
7463 ((service_action == NECP_KERNEL_POLICY_RESULT_TRIGGER_SCOPED ||
7464 service_action == NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED) &&
7465 service.identifier != 0 && service.identifier != NECP_NULL_SERVICE_ID) ||
7466 (route_rule_id != 0 &&
7467 !necp_route_is_allowed(route, interface, route_rule_id, &interface_type_denied)) ||
7468 !necp_netagents_allow_traffic(netagent_ids, NECP_MAX_NETAGENTS)) {
7469 allowed_to_receive = FALSE;
7470 } else {
7471 if (return_policy_id) {
7472 *return_policy_id = matched_policy->id;
7473 }
7474 if (return_route_rule_id) {
7475 *return_route_rule_id = route_rule_id;
7476 }
7477 }
7478 lck_rw_done(&necp_kernel_policy_lock);
7479
7480 if (necp_debug > 1 && matched_policy->id != inp->inp_policyresult.policy_id) {
7481 NECPLOG(LOG_DEBUG, "Socket Send/Recv Policy: Policy %d Allowed %d", return_policy_id ? *return_policy_id : 0, allowed_to_receive);
7482 }
7483 goto done;
7484 } else if (necp_drop_all_order > 0) {
7485 allowed_to_receive = FALSE;
7486 } else {
7487 if (return_policy_id) {
7488 *return_policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
7489 }
7490 if (return_route_rule_id) {
7491 *return_route_rule_id = route_rule_id;
7492 }
7493 }
7494
7495 lck_rw_done(&necp_kernel_policy_lock);
7496
7497 done:
7498 if (!allowed_to_receive && interface_type_denied != IFRTYPE_FUNCTIONAL_UNKNOWN) {
7499 soevent(inp->inp_socket, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_IFDENIED));
7500 }
7501
7502 return (allowed_to_receive);
7503 }
7504
7505 bool
7506 necp_socket_is_allowed_to_send_recv_v4(struct inpcb *inp, u_int16_t local_port, u_int16_t remote_port, struct in_addr *local_addr, struct in_addr *remote_addr, ifnet_t interface, necp_kernel_policy_id *return_policy_id, u_int32_t *return_route_rule_id)
7507 {
7508 struct sockaddr_in local;
7509 struct sockaddr_in remote;
7510 local.sin_family = remote.sin_family = AF_INET;
7511 local.sin_len = remote.sin_len = sizeof(struct sockaddr_in);
7512 local.sin_port = local_port;
7513 remote.sin_port = remote_port;
7514 memcpy(&local.sin_addr, local_addr, sizeof(local.sin_addr));
7515 memcpy(&remote.sin_addr, remote_addr, sizeof(remote.sin_addr));
7516
7517 return (necp_socket_is_allowed_to_send_recv_internal(inp, (struct sockaddr *)&local, (struct sockaddr *)&remote, interface, return_policy_id, return_route_rule_id));
7518 }
7519
7520 bool
7521 necp_socket_is_allowed_to_send_recv_v6(struct inpcb *inp, u_int16_t local_port, u_int16_t remote_port, struct in6_addr *local_addr, struct in6_addr *remote_addr, ifnet_t interface, necp_kernel_policy_id *return_policy_id, u_int32_t *return_route_rule_id)
7522 {
7523 struct sockaddr_in6 local;
7524 struct sockaddr_in6 remote;
7525 local.sin6_family = remote.sin6_family = AF_INET6;
7526 local.sin6_len = remote.sin6_len = sizeof(struct sockaddr_in6);
7527 local.sin6_port = local_port;
7528 remote.sin6_port = remote_port;
7529 memcpy(&local.sin6_addr, local_addr, sizeof(local.sin6_addr));
7530 memcpy(&remote.sin6_addr, remote_addr, sizeof(remote.sin6_addr));
7531
7532 return (necp_socket_is_allowed_to_send_recv_internal(inp, (struct sockaddr *)&local, (struct sockaddr *)&remote, interface, return_policy_id, return_route_rule_id));
7533 }
7534
7535 bool
7536 necp_socket_is_allowed_to_send_recv(struct inpcb *inp, necp_kernel_policy_id *return_policy_id, u_int32_t *return_route_rule_id)
7537 {
7538 return (necp_socket_is_allowed_to_send_recv_internal(inp, NULL, NULL, NULL, return_policy_id, return_route_rule_id));
7539 }
7540
7541 int
7542 necp_mark_packet_from_socket(struct mbuf *packet, struct inpcb *inp, necp_kernel_policy_id policy_id, u_int32_t route_rule_id)
7543 {
7544 if (packet == NULL || inp == NULL || !(packet->m_flags & M_PKTHDR)) {
7545 return (EINVAL);
7546 }
7547
7548 // Mark ID for Pass and IP Tunnel
7549 if (policy_id != NECP_KERNEL_POLICY_ID_NONE) {
7550 packet->m_pkthdr.necp_mtag.necp_policy_id = policy_id;
7551 } else if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_PASS ||
7552 inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL) {
7553 packet->m_pkthdr.necp_mtag.necp_policy_id = inp->inp_policyresult.policy_id;
7554 } else {
7555 packet->m_pkthdr.necp_mtag.necp_policy_id = NECP_KERNEL_POLICY_ID_NONE;
7556 }
7557 packet->m_pkthdr.necp_mtag.necp_last_interface_index = 0;
7558 if (route_rule_id != 0) {
7559 packet->m_pkthdr.necp_mtag.necp_route_rule_id = route_rule_id;
7560 } else {
7561 packet->m_pkthdr.necp_mtag.necp_route_rule_id = inp->inp_policyresult.results.route_rule_id;
7562 }
7563 packet->m_pkthdr.necp_mtag.necp_app_id = inp->inp_policyresult.app_id;
7564
7565 return (0);
7566 }
7567
7568 int
7569 necp_mark_packet_from_ip(struct mbuf *packet, necp_kernel_policy_id policy_id)
7570 {
7571 if (packet == NULL || !(packet->m_flags & M_PKTHDR)) {
7572 return (EINVAL);
7573 }
7574
7575 // Mark ID for Pass and IP Tunnel
7576 if (policy_id != NECP_KERNEL_POLICY_ID_NONE) {
7577 packet->m_pkthdr.necp_mtag.necp_policy_id = policy_id;
7578 } else {
7579 packet->m_pkthdr.necp_mtag.necp_policy_id = NECP_KERNEL_POLICY_ID_NONE;
7580 }
7581
7582 return (0);
7583 }
7584
7585 int
7586 necp_mark_packet_from_interface(struct mbuf *packet, ifnet_t interface)
7587 {
7588 if (packet == NULL || !(packet->m_flags & M_PKTHDR)) {
7589 return (EINVAL);
7590 }
7591
7592 // Mark ID for Pass and IP Tunnel
7593 if (interface != NULL) {
7594 packet->m_pkthdr.necp_mtag.necp_last_interface_index = interface->if_index;
7595 }
7596
7597 return (0);
7598 }
7599
7600 int
7601 necp_mark_packet_as_keepalive(struct mbuf *packet, bool is_keepalive)
7602 {
7603 if (packet == NULL || !(packet->m_flags & M_PKTHDR)) {
7604 return (EINVAL);
7605 }
7606
7607 if (is_keepalive) {
7608 packet->m_pkthdr.pkt_flags |= PKTF_KEEPALIVE;
7609 } else {
7610 packet->m_pkthdr.pkt_flags &= ~PKTF_KEEPALIVE;
7611 }
7612
7613 return (0);
7614 }
7615
7616 necp_kernel_policy_id
7617 necp_get_policy_id_from_packet(struct mbuf *packet)
7618 {
7619 if (packet == NULL || !(packet->m_flags & M_PKTHDR)) {
7620 return (NECP_KERNEL_POLICY_ID_NONE);
7621 }
7622
7623 return (packet->m_pkthdr.necp_mtag.necp_policy_id);
7624 }
7625
7626 u_int32_t
7627 necp_get_last_interface_index_from_packet(struct mbuf *packet)
7628 {
7629 if (packet == NULL || !(packet->m_flags & M_PKTHDR)) {
7630 return (0);
7631 }
7632
7633 return (packet->m_pkthdr.necp_mtag.necp_last_interface_index);
7634 }
7635
7636 u_int32_t
7637 necp_get_route_rule_id_from_packet(struct mbuf *packet)
7638 {
7639 if (packet == NULL || !(packet->m_flags & M_PKTHDR)) {
7640 return (0);
7641 }
7642
7643 return (packet->m_pkthdr.necp_mtag.necp_route_rule_id);
7644 }
7645
7646 int
7647 necp_get_app_uuid_from_packet(struct mbuf *packet,
7648 uuid_t app_uuid)
7649 {
7650 if (packet == NULL || !(packet->m_flags & M_PKTHDR)) {
7651 return (EINVAL);
7652 }
7653
7654 bool found_mapping = FALSE;
7655 if (packet->m_pkthdr.necp_mtag.necp_app_id != 0) {
7656 lck_rw_lock_shared(&necp_kernel_policy_lock);
7657 struct necp_uuid_id_mapping *entry = necp_uuid_lookup_uuid_with_app_id_locked(packet->m_pkthdr.necp_mtag.necp_app_id);
7658 if (entry != NULL) {
7659 uuid_copy(app_uuid, entry->uuid);
7660 found_mapping = true;
7661 }
7662 lck_rw_done(&necp_kernel_policy_lock);
7663 }
7664 if (!found_mapping) {
7665 uuid_clear(app_uuid);
7666 }
7667 return (0);
7668 }
7669
7670 bool
7671 necp_get_is_keepalive_from_packet(struct mbuf *packet)
7672 {
7673 if (packet == NULL || !(packet->m_flags & M_PKTHDR)) {
7674 return (FALSE);
7675 }
7676
7677 return (packet->m_pkthdr.pkt_flags & PKTF_KEEPALIVE);
7678 }
7679
7680 u_int32_t
7681 necp_socket_get_content_filter_control_unit(struct socket *so)
7682 {
7683 struct inpcb *inp = sotoinpcb(so);
7684
7685 if (inp == NULL) {
7686 return (0);
7687 }
7688 return (inp->inp_policyresult.results.filter_control_unit);
7689 }
7690
7691 bool
7692 necp_socket_should_use_flow_divert(struct inpcb *inp)
7693 {
7694 if (inp == NULL) {
7695 return (FALSE);
7696 }
7697
7698 return (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT);
7699 }
7700
7701 u_int32_t
7702 necp_socket_get_flow_divert_control_unit(struct inpcb *inp)
7703 {
7704 if (inp == NULL) {
7705 return (0);
7706 }
7707
7708 if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT) {
7709 return (inp->inp_policyresult.results.result_parameter.flow_divert_control_unit);
7710 }
7711
7712 return (0);
7713 }
7714
7715 bool
7716 necp_socket_should_rescope(struct inpcb *inp)
7717 {
7718 if (inp == NULL) {
7719 return (FALSE);
7720 }
7721
7722 return (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED);
7723 }
7724
7725 u_int
7726 necp_socket_get_rescope_if_index(struct inpcb *inp)
7727 {
7728 if (inp == NULL) {
7729 return (0);
7730 }
7731
7732 if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED) {
7733 return (inp->inp_policyresult.results.result_parameter.scoped_interface_index);
7734 }
7735
7736 return (0);
7737 }
7738
7739 u_int32_t
7740 necp_socket_get_effective_mtu(struct inpcb *inp, u_int32_t current_mtu)
7741 {
7742 if (inp == NULL) {
7743 return (current_mtu);
7744 }
7745
7746 if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL &&
7747 (inp->inp_flags & INP_BOUND_IF) &&
7748 inp->inp_boundifp) {
7749
7750 u_int bound_interface_index = inp->inp_boundifp->if_index;
7751 u_int tunnel_interface_index = inp->inp_policyresult.results.result_parameter.tunnel_interface_index;
7752
7753 // The result is IP Tunnel, and is rescoping from one interface to another. Recalculate MTU.
7754 if (bound_interface_index != tunnel_interface_index) {
7755 ifnet_t tunnel_interface = NULL;
7756
7757 ifnet_head_lock_shared();
7758 tunnel_interface = ifindex2ifnet[tunnel_interface_index];
7759 ifnet_head_done();
7760
7761 if (tunnel_interface != NULL) {
7762 u_int32_t direct_tunnel_mtu = tunnel_interface->if_mtu;
7763 u_int32_t delegate_tunnel_mtu = (tunnel_interface->if_delegated.ifp != NULL) ? tunnel_interface->if_delegated.ifp->if_mtu : 0;
7764 if (delegate_tunnel_mtu != 0 &&
7765 strncmp(tunnel_interface->if_name, "ipsec", strlen("ipsec")) == 0) {
7766 // For ipsec interfaces, calculate the overhead from the delegate interface
7767 u_int32_t tunnel_overhead = (u_int32_t)(esp_hdrsiz(NULL) + sizeof(struct ip6_hdr));
7768 if (delegate_tunnel_mtu > tunnel_overhead) {
7769 delegate_tunnel_mtu -= tunnel_overhead;
7770 }
7771
7772 if (delegate_tunnel_mtu < direct_tunnel_mtu) {
7773 // If the (delegate - overhead) < direct, return (delegate - overhead)
7774 return (delegate_tunnel_mtu);
7775 } else {
7776 // Otherwise return direct
7777 return (direct_tunnel_mtu);
7778 }
7779 } else {
7780 // For non-ipsec interfaces, just return the tunnel MTU
7781 return (direct_tunnel_mtu);
7782 }
7783 }
7784 }
7785 }
7786
7787 // By default, just return the MTU passed in
7788 return (current_mtu);
7789 }
7790
7791 ifnet_t
7792 necp_get_ifnet_from_result_parameter(necp_kernel_policy_result_parameter *result_parameter)
7793 {
7794 if (result_parameter == NULL) {
7795 return (NULL);
7796 }
7797
7798 return (ifindex2ifnet[result_parameter->tunnel_interface_index]);
7799 }
7800
7801 bool
7802 necp_packet_can_rebind_to_ifnet(struct mbuf *packet, struct ifnet *interface, struct route *new_route, int family)
7803 {
7804 bool found_match = FALSE;
7805 errno_t result = 0;
7806 ifaddr_t *addresses = NULL;
7807 union necp_sockaddr_union address_storage;
7808 int i;
7809
7810 if (packet == NULL || interface == NULL || new_route == NULL || (family != AF_INET && family != AF_INET6)) {
7811 return (FALSE);
7812 }
7813
7814 result = ifnet_get_address_list_family(interface, &addresses, family);
7815 if (result != 0) {
7816 NECPLOG(LOG_ERR, "Failed to get address list for %s%d", ifnet_name(interface), ifnet_unit(interface));
7817 return (FALSE);
7818 }
7819
7820 for (i = 0; addresses[i] != NULL; i++) {
7821 ROUTE_RELEASE(new_route);
7822 if (ifaddr_address(addresses[i], &address_storage.sa, sizeof(address_storage)) == 0) {
7823 if (family == AF_INET) {
7824 struct ip *ip = mtod(packet, struct ip *);
7825 if (memcmp(&address_storage.sin.sin_addr, &ip->ip_src, sizeof(ip->ip_src)) == 0) {
7826 struct sockaddr_in *dst4 = (struct sockaddr_in *)(void *)&new_route->ro_dst;
7827 dst4->sin_family = AF_INET;
7828 dst4->sin_len = sizeof(struct sockaddr_in);
7829 dst4->sin_addr = ip->ip_dst;
7830 rtalloc_scoped(new_route, interface->if_index);
7831 if (!ROUTE_UNUSABLE(new_route)) {
7832 found_match = TRUE;
7833 goto done;
7834 }
7835 }
7836 } else if (family == AF_INET6) {
7837 struct ip6_hdr *ip6 = mtod(packet, struct ip6_hdr *);
7838 if (memcmp(&address_storage.sin6.sin6_addr, &ip6->ip6_src, sizeof(ip6->ip6_src)) == 0) {
7839 struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)(void *)&new_route->ro_dst;
7840 dst6->sin6_family = AF_INET6;
7841 dst6->sin6_len = sizeof(struct sockaddr_in6);
7842 dst6->sin6_addr = ip6->ip6_dst;
7843 rtalloc_scoped(new_route, interface->if_index);
7844 if (!ROUTE_UNUSABLE(new_route)) {
7845 found_match = TRUE;
7846 goto done;
7847 }
7848 }
7849 }
7850 }
7851 }
7852
7853 done:
7854 ifnet_free_address_list(addresses);
7855 addresses = NULL;
7856 return (found_match);
7857 }
7858
7859 static bool
7860 necp_addr_is_loopback(struct sockaddr *address)
7861 {
7862 if (address == NULL) {
7863 return (FALSE);
7864 }
7865
7866 if (address->sa_family == AF_INET) {
7867 return (ntohl(((struct sockaddr_in *)(void *)address)->sin_addr.s_addr) == INADDR_LOOPBACK);
7868 } else if (address->sa_family == AF_INET6) {
7869 return IN6_IS_ADDR_LOOPBACK(&((struct sockaddr_in6 *)(void *)address)->sin6_addr);
7870 }
7871
7872 return (FALSE);
7873 }
7874
7875 static bool
7876 necp_is_loopback(struct sockaddr *local_addr, struct sockaddr *remote_addr, struct inpcb *inp, struct mbuf *packet)
7877 {
7878 // Note: This function only checks for the loopback addresses.
7879 // In the future, we may want to expand to also allow any traffic
7880 // going through the loopback interface, but until then, this
7881 // check is cheaper.
7882
7883 if (local_addr != NULL && necp_addr_is_loopback(local_addr)) {
7884 return (TRUE);
7885 }
7886
7887 if (remote_addr != NULL && necp_addr_is_loopback(remote_addr)) {
7888 return (TRUE);
7889 }
7890
7891 if (inp != NULL) {
7892 if ((inp->inp_flags & INP_BOUND_IF) && inp->inp_boundifp && (inp->inp_boundifp->if_flags & IFF_LOOPBACK)) {
7893 return (TRUE);
7894 }
7895 if (inp->inp_vflag & INP_IPV4) {
7896 if (ntohl(inp->inp_laddr.s_addr) == INADDR_LOOPBACK ||
7897 ntohl(inp->inp_faddr.s_addr) == INADDR_LOOPBACK) {
7898 return (TRUE);
7899 }
7900 } else if (inp->inp_vflag & INP_IPV6) {
7901 if (IN6_IS_ADDR_LOOPBACK(&inp->in6p_laddr) ||
7902 IN6_IS_ADDR_LOOPBACK(&inp->in6p_faddr)) {
7903 return (TRUE);
7904 }
7905 }
7906 }
7907
7908 if (packet != NULL) {
7909 struct ip *ip = mtod(packet, struct ip *);
7910 if (ip->ip_v == 4) {
7911 if (ntohl(ip->ip_src.s_addr) == INADDR_LOOPBACK) {
7912 return (TRUE);
7913 }
7914 if (ntohl(ip->ip_dst.s_addr) == INADDR_LOOPBACK) {
7915 return (TRUE);
7916 }
7917 } else if (ip->ip_v == 6) {
7918 struct ip6_hdr *ip6 = mtod(packet, struct ip6_hdr *);
7919 if (IN6_IS_ADDR_LOOPBACK(&ip6->ip6_src)) {
7920 return (TRUE);
7921 }
7922 if (IN6_IS_ADDR_LOOPBACK(&ip6->ip6_dst)) {
7923 return (TRUE);
7924 }
7925 }
7926 }
7927
7928 return (FALSE);
7929 }