]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/flow_divert.c
xnu-7195.60.75.tar.gz
[apple/xnu.git] / bsd / netinet / flow_divert.c
1 /*
2 * Copyright (c) 2012-2017, 2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <string.h>
30 #include <sys/types.h>
31 #include <sys/syslog.h>
32 #include <sys/queue.h>
33 #include <sys/malloc.h>
34 #include <sys/socket.h>
35 #include <sys/kpi_mbuf.h>
36 #include <sys/mbuf.h>
37 #include <sys/domain.h>
38 #include <sys/protosw.h>
39 #include <sys/socketvar.h>
40 #include <sys/kernel.h>
41 #include <sys/systm.h>
42 #include <sys/kern_control.h>
43 #include <sys/ubc.h>
44 #include <sys/codesign.h>
45 #include <libkern/tree.h>
46 #include <kern/locks.h>
47 #include <kern/debug.h>
48 #include <kern/task.h>
49 #include <mach/task_info.h>
50 #include <net/if_var.h>
51 #include <net/route.h>
52 #include <net/flowhash.h>
53 #include <net/ntstat.h>
54 #include <net/content_filter.h>
55 #include <net/necp.h>
56 #include <netinet/in.h>
57 #include <netinet/in_var.h>
58 #include <netinet/tcp.h>
59 #include <netinet/tcp_var.h>
60 #include <netinet/tcp_fsm.h>
61 #include <netinet/flow_divert.h>
62 #include <netinet/flow_divert_proto.h>
63 #include <netinet6/in6_pcb.h>
64 #include <netinet6/ip6protosw.h>
65 #include <dev/random/randomdev.h>
66 #include <libkern/crypto/sha1.h>
67 #include <libkern/crypto/crypto_internal.h>
68 #include <os/log.h>
69 #include <corecrypto/cc.h>
70 #if CONTENT_FILTER
71 #include <net/content_filter.h>
72 #endif /* CONTENT_FILTER */
73
74 #define FLOW_DIVERT_CONNECT_STARTED 0x00000001
75 #define FLOW_DIVERT_READ_CLOSED 0x00000002
76 #define FLOW_DIVERT_WRITE_CLOSED 0x00000004
77 #define FLOW_DIVERT_TUNNEL_RD_CLOSED 0x00000008
78 #define FLOW_DIVERT_TUNNEL_WR_CLOSED 0x00000010
79 #define FLOW_DIVERT_HAS_HMAC 0x00000040
80 #define FLOW_DIVERT_NOTIFY_ON_RECEIVED 0x00000080
81 #define FLOW_DIVERT_IMPLICIT_CONNECT 0x00000100
82 #define FLOW_DIVERT_DID_SET_LOCAL_ADDR 0x00000200
83
84 #define FDLOG(level, pcb, format, ...) \
85 os_log_with_type(OS_LOG_DEFAULT, flow_divert_syslog_type_to_oslog_type(level), "(%u): " format "\n", (pcb)->hash, __VA_ARGS__)
86
87 #define FDLOG0(level, pcb, msg) \
88 os_log_with_type(OS_LOG_DEFAULT, flow_divert_syslog_type_to_oslog_type(level), "(%u): " msg "\n", (pcb)->hash)
89
90 #define FDRETAIN(pcb) if ((pcb) != NULL) OSIncrementAtomic(&(pcb)->ref_count)
91 #define FDRELEASE(pcb) \
92 do { \
93 if ((pcb) != NULL && 1 == OSDecrementAtomic(&(pcb)->ref_count)) { \
94 flow_divert_pcb_destroy(pcb); \
95 } \
96 } while (0)
97
98 #define FDLOCK(pcb) lck_mtx_lock(&(pcb)->mtx)
99 #define FDUNLOCK(pcb) lck_mtx_unlock(&(pcb)->mtx)
100
101 #define FD_CTL_SENDBUFF_SIZE (128 * 1024)
102 #define FD_CTL_RCVBUFF_SIZE (128 * 1024)
103
104 #define GROUP_BIT_CTL_ENQUEUE_BLOCKED 0
105
106 #define GROUP_COUNT_MAX 31
107 #define FLOW_DIVERT_MAX_NAME_SIZE 4096
108 #define FLOW_DIVERT_MAX_KEY_SIZE 1024
109 #define FLOW_DIVERT_MAX_TRIE_MEMORY (1024 * 1024)
110
111 struct flow_divert_trie_node {
112 uint16_t start;
113 uint16_t length;
114 uint16_t child_map;
115 };
116
117 #define CHILD_MAP_SIZE 256
118 #define NULL_TRIE_IDX 0xffff
119 #define TRIE_NODE(t, i) ((t)->nodes[(i)])
120 #define TRIE_CHILD(t, i, b) (((t)->child_maps + (CHILD_MAP_SIZE * TRIE_NODE(t, i).child_map))[(b)])
121 #define TRIE_BYTE(t, i) ((t)->bytes[(i)])
122
123 static struct flow_divert_pcb nil_pcb;
124
125 decl_lck_rw_data(static, g_flow_divert_group_lck);
126 static struct flow_divert_group **g_flow_divert_groups = NULL;
127 static uint32_t g_active_group_count = 0;
128
129 static lck_grp_attr_t *flow_divert_grp_attr = NULL;
130 static lck_attr_t *flow_divert_mtx_attr = NULL;
131 static lck_grp_t *flow_divert_mtx_grp = NULL;
132 static errno_t g_init_result = 0;
133
134 static kern_ctl_ref g_flow_divert_kctl_ref = NULL;
135
136 static struct protosw g_flow_divert_in_protosw;
137 static struct pr_usrreqs g_flow_divert_in_usrreqs;
138 static struct protosw g_flow_divert_in_udp_protosw;
139 static struct pr_usrreqs g_flow_divert_in_udp_usrreqs;
140 static struct ip6protosw g_flow_divert_in6_protosw;
141 static struct pr_usrreqs g_flow_divert_in6_usrreqs;
142 static struct ip6protosw g_flow_divert_in6_udp_protosw;
143 static struct pr_usrreqs g_flow_divert_in6_udp_usrreqs;
144
145 static struct protosw *g_tcp_protosw = NULL;
146 static struct ip6protosw *g_tcp6_protosw = NULL;
147 static struct protosw *g_udp_protosw = NULL;
148 static struct ip6protosw *g_udp6_protosw = NULL;
149
150 ZONE_DECLARE(flow_divert_group_zone, "flow_divert_group",
151 sizeof(struct flow_divert_group), ZC_ZFREE_CLEARMEM | ZC_NOENCRYPT);
152 ZONE_DECLARE(flow_divert_pcb_zone, "flow_divert_pcb",
153 sizeof(struct flow_divert_pcb), ZC_ZFREE_CLEARMEM | ZC_NOENCRYPT);
154
155 static errno_t
156 flow_divert_dup_addr(sa_family_t family, struct sockaddr *addr, struct sockaddr **dup);
157
158 static boolean_t
159 flow_divert_is_sockaddr_valid(struct sockaddr *addr);
160
161 static int
162 flow_divert_append_target_endpoint_tlv(mbuf_t connect_packet, struct sockaddr *toaddr);
163
164 struct sockaddr *
165 flow_divert_get_buffered_target_address(mbuf_t buffer);
166
167 static void
168 flow_divert_disconnect_socket(struct socket *so);
169
170 static inline uint8_t
171 flow_divert_syslog_type_to_oslog_type(int syslog_type)
172 {
173 switch (syslog_type) {
174 case LOG_ERR: return OS_LOG_TYPE_ERROR;
175 case LOG_INFO: return OS_LOG_TYPE_INFO;
176 case LOG_DEBUG: return OS_LOG_TYPE_DEBUG;
177 default: return OS_LOG_TYPE_DEFAULT;
178 }
179 }
180
181 static inline int
182 flow_divert_pcb_cmp(const struct flow_divert_pcb *pcb_a, const struct flow_divert_pcb *pcb_b)
183 {
184 return memcmp(&pcb_a->hash, &pcb_b->hash, sizeof(pcb_a->hash));
185 }
186
187 RB_PROTOTYPE(fd_pcb_tree, flow_divert_pcb, rb_link, flow_divert_pcb_cmp);
188 RB_GENERATE(fd_pcb_tree, flow_divert_pcb, rb_link, flow_divert_pcb_cmp);
189
190 static const char *
191 flow_divert_packet_type2str(uint8_t packet_type)
192 {
193 switch (packet_type) {
194 case FLOW_DIVERT_PKT_CONNECT:
195 return "connect";
196 case FLOW_DIVERT_PKT_CONNECT_RESULT:
197 return "connect result";
198 case FLOW_DIVERT_PKT_DATA:
199 return "data";
200 case FLOW_DIVERT_PKT_CLOSE:
201 return "close";
202 case FLOW_DIVERT_PKT_READ_NOTIFY:
203 return "read notification";
204 case FLOW_DIVERT_PKT_PROPERTIES_UPDATE:
205 return "properties update";
206 case FLOW_DIVERT_PKT_APP_MAP_CREATE:
207 return "app map create";
208 default:
209 return "unknown";
210 }
211 }
212
213 static struct flow_divert_pcb *
214 flow_divert_pcb_lookup(uint32_t hash, struct flow_divert_group *group)
215 {
216 struct flow_divert_pcb key_item;
217 struct flow_divert_pcb *fd_cb = NULL;
218
219 key_item.hash = hash;
220
221 lck_rw_lock_shared(&group->lck);
222 fd_cb = RB_FIND(fd_pcb_tree, &group->pcb_tree, &key_item);
223 FDRETAIN(fd_cb);
224 lck_rw_done(&group->lck);
225
226 return fd_cb;
227 }
228
229 static errno_t
230 flow_divert_pcb_insert(struct flow_divert_pcb *fd_cb, uint32_t ctl_unit)
231 {
232 errno_t error = 0;
233 struct flow_divert_pcb *exist = NULL;
234 struct flow_divert_group *group;
235 static uint32_t g_nextkey = 1;
236 static uint32_t g_hash_seed = 0;
237 int try_count = 0;
238
239 if (ctl_unit == 0 || ctl_unit >= GROUP_COUNT_MAX) {
240 return EINVAL;
241 }
242
243 socket_unlock(fd_cb->so, 0);
244 lck_rw_lock_shared(&g_flow_divert_group_lck);
245
246 if (g_flow_divert_groups == NULL || g_active_group_count == 0) {
247 FDLOG0(LOG_ERR, &nil_pcb, "No active groups, flow divert cannot be used for this socket");
248 error = ENETUNREACH;
249 goto done;
250 }
251
252 group = g_flow_divert_groups[ctl_unit];
253 if (group == NULL) {
254 FDLOG(LOG_ERR, &nil_pcb, "Group for control unit %u is NULL, flow divert cannot be used for this socket", ctl_unit);
255 error = ENETUNREACH;
256 goto done;
257 }
258
259 socket_lock(fd_cb->so, 0);
260
261 do {
262 uint32_t key[2];
263 uint32_t idx;
264
265 key[0] = g_nextkey++;
266 key[1] = RandomULong();
267
268 if (g_hash_seed == 0) {
269 g_hash_seed = RandomULong();
270 }
271
272 fd_cb->hash = net_flowhash(key, sizeof(key), g_hash_seed);
273
274 for (idx = 1; idx < GROUP_COUNT_MAX; idx++) {
275 struct flow_divert_group *curr_group = g_flow_divert_groups[idx];
276 if (curr_group != NULL && curr_group != group) {
277 lck_rw_lock_shared(&curr_group->lck);
278 exist = RB_FIND(fd_pcb_tree, &curr_group->pcb_tree, fd_cb);
279 lck_rw_done(&curr_group->lck);
280 if (exist != NULL) {
281 break;
282 }
283 }
284 }
285
286 if (exist == NULL) {
287 lck_rw_lock_exclusive(&group->lck);
288 exist = RB_INSERT(fd_pcb_tree, &group->pcb_tree, fd_cb);
289 lck_rw_done(&group->lck);
290 }
291 } while (exist != NULL && try_count++ < 3);
292
293 if (exist == NULL) {
294 fd_cb->group = group;
295 FDRETAIN(fd_cb); /* The group now has a reference */
296 } else {
297 fd_cb->hash = 0;
298 error = EEXIST;
299 }
300
301 socket_unlock(fd_cb->so, 0);
302
303 done:
304 lck_rw_done(&g_flow_divert_group_lck);
305 socket_lock(fd_cb->so, 0);
306
307 return error;
308 }
309
310 static struct flow_divert_pcb *
311 flow_divert_pcb_create(socket_t so)
312 {
313 struct flow_divert_pcb *new_pcb = NULL;
314
315 new_pcb = zalloc_flags(flow_divert_pcb_zone, Z_WAITOK | Z_ZERO);
316 lck_mtx_init(&new_pcb->mtx, flow_divert_mtx_grp, flow_divert_mtx_attr);
317 new_pcb->so = so;
318 new_pcb->log_level = nil_pcb.log_level;
319
320 FDRETAIN(new_pcb); /* Represents the socket's reference */
321
322 return new_pcb;
323 }
324
325 static void
326 flow_divert_pcb_destroy(struct flow_divert_pcb *fd_cb)
327 {
328 FDLOG(LOG_INFO, fd_cb, "Destroying, app tx %u, tunnel tx %u, tunnel rx %u",
329 fd_cb->bytes_written_by_app, fd_cb->bytes_sent, fd_cb->bytes_received);
330
331 if (fd_cb->connect_token != NULL) {
332 mbuf_freem(fd_cb->connect_token);
333 }
334 if (fd_cb->connect_packet != NULL) {
335 mbuf_freem(fd_cb->connect_packet);
336 }
337 if (fd_cb->app_data != NULL) {
338 FREE(fd_cb->app_data, M_TEMP);
339 }
340 if (fd_cb->original_remote_endpoint != NULL) {
341 FREE(fd_cb->original_remote_endpoint, M_SONAME);
342 }
343 zfree(flow_divert_pcb_zone, fd_cb);
344 }
345
346 static void
347 flow_divert_pcb_remove(struct flow_divert_pcb *fd_cb)
348 {
349 if (fd_cb->group != NULL) {
350 struct flow_divert_group *group = fd_cb->group;
351 lck_rw_lock_exclusive(&group->lck);
352 FDLOG(LOG_INFO, fd_cb, "Removing from group %d, ref count = %d", group->ctl_unit, fd_cb->ref_count);
353 RB_REMOVE(fd_pcb_tree, &group->pcb_tree, fd_cb);
354 fd_cb->group = NULL;
355 FDRELEASE(fd_cb); /* Release the group's reference */
356 lck_rw_done(&group->lck);
357 }
358 }
359
360 static int
361 flow_divert_packet_init(struct flow_divert_pcb *fd_cb, uint8_t packet_type, mbuf_t *packet)
362 {
363 struct flow_divert_packet_header hdr;
364 int error = 0;
365
366 error = mbuf_gethdr(MBUF_DONTWAIT, MBUF_TYPE_HEADER, packet);
367 if (error) {
368 FDLOG(LOG_ERR, fd_cb, "failed to allocate the header mbuf: %d", error);
369 return error;
370 }
371
372 hdr.packet_type = packet_type;
373 hdr.conn_id = htonl(fd_cb->hash);
374
375 /* Lay down the header */
376 error = mbuf_copyback(*packet, 0, sizeof(hdr), &hdr, MBUF_DONTWAIT);
377 if (error) {
378 FDLOG(LOG_ERR, fd_cb, "mbuf_copyback(hdr) failed: %d", error);
379 mbuf_freem(*packet);
380 *packet = NULL;
381 return error;
382 }
383
384 return 0;
385 }
386
387 static int
388 flow_divert_packet_append_tlv(mbuf_t packet, uint8_t type, uint32_t length, const void *value)
389 {
390 uint32_t net_length = htonl(length);
391 int error = 0;
392
393 error = mbuf_copyback(packet, mbuf_pkthdr_len(packet), sizeof(type), &type, MBUF_DONTWAIT);
394 if (error) {
395 FDLOG(LOG_ERR, &nil_pcb, "failed to append the type (%d)", type);
396 return error;
397 }
398
399 error = mbuf_copyback(packet, mbuf_pkthdr_len(packet), sizeof(net_length), &net_length, MBUF_DONTWAIT);
400 if (error) {
401 FDLOG(LOG_ERR, &nil_pcb, "failed to append the length (%u)", length);
402 return error;
403 }
404
405 error = mbuf_copyback(packet, mbuf_pkthdr_len(packet), length, value, MBUF_DONTWAIT);
406 if (error) {
407 FDLOG0(LOG_ERR, &nil_pcb, "failed to append the value");
408 return error;
409 }
410
411 return error;
412 }
413
414 static int
415 flow_divert_packet_find_tlv(mbuf_t packet, int offset, uint8_t type, int *err, int next)
416 {
417 size_t cursor = offset;
418 int error = 0;
419 uint32_t curr_length;
420 uint8_t curr_type;
421
422 *err = 0;
423
424 do {
425 if (!next) {
426 error = mbuf_copydata(packet, cursor, sizeof(curr_type), &curr_type);
427 if (error) {
428 *err = ENOENT;
429 return -1;
430 }
431 } else {
432 next = 0;
433 curr_type = FLOW_DIVERT_TLV_NIL;
434 }
435
436 if (curr_type != type) {
437 cursor += sizeof(curr_type);
438 error = mbuf_copydata(packet, cursor, sizeof(curr_length), &curr_length);
439 if (error) {
440 *err = error;
441 return -1;
442 }
443
444 cursor += (sizeof(curr_length) + ntohl(curr_length));
445 }
446 } while (curr_type != type);
447
448 return (int)cursor;
449 }
450
451 static int
452 flow_divert_packet_get_tlv(mbuf_t packet, int offset, uint8_t type, size_t buff_len, void *buff, uint32_t *val_size)
453 {
454 int error = 0;
455 uint32_t length;
456 int tlv_offset;
457
458 tlv_offset = flow_divert_packet_find_tlv(packet, offset, type, &error, 0);
459 if (tlv_offset < 0) {
460 return error;
461 }
462
463 error = mbuf_copydata(packet, tlv_offset + sizeof(type), sizeof(length), &length);
464 if (error) {
465 return error;
466 }
467
468 length = ntohl(length);
469
470 uint32_t data_offset = tlv_offset + sizeof(type) + sizeof(length);
471
472 if (length > (mbuf_pkthdr_len(packet) - data_offset)) {
473 FDLOG(LOG_ERR, &nil_pcb, "Length of %u TLV (%u) is larger than remaining packet data (%lu)", type, length, (mbuf_pkthdr_len(packet) - data_offset));
474 return EINVAL;
475 }
476
477 if (val_size != NULL) {
478 *val_size = length;
479 }
480
481 if (buff != NULL && buff_len > 0) {
482 memset(buff, 0, buff_len);
483 size_t to_copy = (length < buff_len) ? length : buff_len;
484 error = mbuf_copydata(packet, data_offset, to_copy, buff);
485 if (error) {
486 return error;
487 }
488 }
489
490 return 0;
491 }
492
493 static int
494 flow_divert_packet_compute_hmac(mbuf_t packet, struct flow_divert_group *group, uint8_t *hmac)
495 {
496 mbuf_t curr_mbuf = packet;
497
498 if (g_crypto_funcs == NULL || group->token_key == NULL) {
499 return ENOPROTOOPT;
500 }
501
502 cchmac_di_decl(g_crypto_funcs->ccsha1_di, hmac_ctx);
503 g_crypto_funcs->cchmac_init_fn(g_crypto_funcs->ccsha1_di, hmac_ctx, group->token_key_size, group->token_key);
504
505 while (curr_mbuf != NULL) {
506 g_crypto_funcs->cchmac_update_fn(g_crypto_funcs->ccsha1_di, hmac_ctx, mbuf_len(curr_mbuf), mbuf_data(curr_mbuf));
507 curr_mbuf = mbuf_next(curr_mbuf);
508 }
509
510 g_crypto_funcs->cchmac_final_fn(g_crypto_funcs->ccsha1_di, hmac_ctx, hmac);
511
512 return 0;
513 }
514
515 static int
516 flow_divert_packet_verify_hmac(mbuf_t packet, uint32_t ctl_unit)
517 {
518 int error = 0;
519 struct flow_divert_group *group = NULL;
520 int hmac_offset;
521 uint8_t packet_hmac[SHA_DIGEST_LENGTH];
522 uint8_t computed_hmac[SHA_DIGEST_LENGTH];
523 mbuf_t tail;
524
525 lck_rw_lock_shared(&g_flow_divert_group_lck);
526
527 if (g_flow_divert_groups != NULL && g_active_group_count > 0) {
528 group = g_flow_divert_groups[ctl_unit];
529 }
530
531 if (group == NULL) {
532 lck_rw_done(&g_flow_divert_group_lck);
533 return ENOPROTOOPT;
534 }
535
536 lck_rw_lock_shared(&group->lck);
537
538 if (group->token_key == NULL) {
539 error = ENOPROTOOPT;
540 goto done;
541 }
542
543 hmac_offset = flow_divert_packet_find_tlv(packet, 0, FLOW_DIVERT_TLV_HMAC, &error, 0);
544 if (hmac_offset < 0) {
545 goto done;
546 }
547
548 error = flow_divert_packet_get_tlv(packet, hmac_offset, FLOW_DIVERT_TLV_HMAC, sizeof(packet_hmac), packet_hmac, NULL);
549 if (error) {
550 goto done;
551 }
552
553 /* Chop off the HMAC TLV */
554 error = mbuf_split(packet, hmac_offset, MBUF_WAITOK, &tail);
555 if (error) {
556 goto done;
557 }
558
559 mbuf_free(tail);
560
561 error = flow_divert_packet_compute_hmac(packet, group, computed_hmac);
562 if (error) {
563 goto done;
564 }
565
566 if (cc_cmp_safe(sizeof(packet_hmac), packet_hmac, computed_hmac)) {
567 FDLOG0(LOG_WARNING, &nil_pcb, "HMAC in token does not match computed HMAC");
568 error = EINVAL;
569 goto done;
570 }
571
572 done:
573 lck_rw_done(&group->lck);
574 lck_rw_done(&g_flow_divert_group_lck);
575 return error;
576 }
577
578 static void
579 flow_divert_add_data_statistics(struct flow_divert_pcb *fd_cb, size_t data_len, Boolean send)
580 {
581 struct inpcb *inp = NULL;
582 struct ifnet *ifp = NULL;
583 Boolean cell = FALSE;
584 Boolean wifi = FALSE;
585 Boolean wired = FALSE;
586
587 inp = sotoinpcb(fd_cb->so);
588 if (inp == NULL) {
589 return;
590 }
591
592 if (inp->inp_vflag & INP_IPV4) {
593 ifp = inp->inp_last_outifp;
594 } else if (inp->inp_vflag & INP_IPV6) {
595 ifp = inp->in6p_last_outifp;
596 }
597 if (ifp != NULL) {
598 cell = IFNET_IS_CELLULAR(ifp);
599 wifi = (!cell && IFNET_IS_WIFI(ifp));
600 wired = (!wifi && IFNET_IS_WIRED(ifp));
601 }
602
603 if (send) {
604 INP_ADD_STAT(inp, cell, wifi, wired, txpackets, 1);
605 INP_ADD_STAT(inp, cell, wifi, wired, txbytes, data_len);
606 } else {
607 INP_ADD_STAT(inp, cell, wifi, wired, rxpackets, 1);
608 INP_ADD_STAT(inp, cell, wifi, wired, rxbytes, data_len);
609 }
610 inp_set_activity_bitmap(inp);
611 }
612
613 static errno_t
614 flow_divert_check_no_cellular(struct flow_divert_pcb *fd_cb)
615 {
616 struct inpcb *inp = sotoinpcb(fd_cb->so);
617 if (INP_NO_CELLULAR(inp)) {
618 struct ifnet *ifp = NULL;
619 if (inp->inp_vflag & INP_IPV4) {
620 ifp = inp->inp_last_outifp;
621 } else if (inp->inp_vflag & INP_IPV6) {
622 ifp = inp->in6p_last_outifp;
623 }
624 if (ifp != NULL && IFNET_IS_CELLULAR(ifp)) {
625 FDLOG0(LOG_ERR, fd_cb, "Cellular is denied");
626 return EHOSTUNREACH;
627 }
628 }
629 return 0;
630 }
631
632 static errno_t
633 flow_divert_check_no_expensive(struct flow_divert_pcb *fd_cb)
634 {
635 struct inpcb *inp = sotoinpcb(fd_cb->so);
636 if (INP_NO_EXPENSIVE(inp)) {
637 struct ifnet *ifp = NULL;
638 if (inp->inp_vflag & INP_IPV4) {
639 ifp = inp->inp_last_outifp;
640 } else if (inp->inp_vflag & INP_IPV6) {
641 ifp = inp->in6p_last_outifp;
642 }
643 if (ifp != NULL && IFNET_IS_EXPENSIVE(ifp)) {
644 FDLOG0(LOG_ERR, fd_cb, "Expensive is denied");
645 return EHOSTUNREACH;
646 }
647 }
648 return 0;
649 }
650
651 static errno_t
652 flow_divert_check_no_constrained(struct flow_divert_pcb *fd_cb)
653 {
654 struct inpcb *inp = sotoinpcb(fd_cb->so);
655 if (INP_NO_CONSTRAINED(inp)) {
656 struct ifnet *ifp = NULL;
657 if (inp->inp_vflag & INP_IPV4) {
658 ifp = inp->inp_last_outifp;
659 } else if (inp->inp_vflag & INP_IPV6) {
660 ifp = inp->in6p_last_outifp;
661 }
662 if (ifp != NULL && IFNET_IS_CONSTRAINED(ifp)) {
663 FDLOG0(LOG_ERR, fd_cb, "Constrained is denied");
664 return EHOSTUNREACH;
665 }
666 }
667 return 0;
668 }
669
670 static void
671 flow_divert_update_closed_state(struct flow_divert_pcb *fd_cb, int how, Boolean tunnel)
672 {
673 if (how != SHUT_RD) {
674 fd_cb->flags |= FLOW_DIVERT_WRITE_CLOSED;
675 if (tunnel || !(fd_cb->flags & FLOW_DIVERT_CONNECT_STARTED)) {
676 fd_cb->flags |= FLOW_DIVERT_TUNNEL_WR_CLOSED;
677 /* If the tunnel is not accepting writes any more, then flush the send buffer */
678 sbflush(&fd_cb->so->so_snd);
679 }
680 }
681 if (how != SHUT_WR) {
682 fd_cb->flags |= FLOW_DIVERT_READ_CLOSED;
683 if (tunnel || !(fd_cb->flags & FLOW_DIVERT_CONNECT_STARTED)) {
684 fd_cb->flags |= FLOW_DIVERT_TUNNEL_RD_CLOSED;
685 }
686 }
687 }
688
689 static uint16_t
690 trie_node_alloc(struct flow_divert_trie *trie)
691 {
692 if (trie->nodes_free_next < trie->nodes_count) {
693 uint16_t node_idx = trie->nodes_free_next++;
694 TRIE_NODE(trie, node_idx).child_map = NULL_TRIE_IDX;
695 return node_idx;
696 } else {
697 return NULL_TRIE_IDX;
698 }
699 }
700
701 static uint16_t
702 trie_child_map_alloc(struct flow_divert_trie *trie)
703 {
704 if (trie->child_maps_free_next < trie->child_maps_count) {
705 return trie->child_maps_free_next++;
706 } else {
707 return NULL_TRIE_IDX;
708 }
709 }
710
711 static uint16_t
712 trie_bytes_move(struct flow_divert_trie *trie, uint16_t bytes_idx, size_t bytes_size)
713 {
714 uint16_t start = trie->bytes_free_next;
715 if (start + bytes_size <= trie->bytes_count) {
716 if (start != bytes_idx) {
717 memmove(&TRIE_BYTE(trie, start), &TRIE_BYTE(trie, bytes_idx), bytes_size);
718 }
719 trie->bytes_free_next += bytes_size;
720 return start;
721 } else {
722 return NULL_TRIE_IDX;
723 }
724 }
725
726 static uint16_t
727 flow_divert_trie_insert(struct flow_divert_trie *trie, uint16_t string_start, size_t string_len)
728 {
729 uint16_t current = trie->root;
730 uint16_t child = trie->root;
731 uint16_t string_end = string_start + (uint16_t)string_len;
732 uint16_t string_idx = string_start;
733 uint16_t string_remainder = (uint16_t)string_len;
734
735 while (child != NULL_TRIE_IDX) {
736 uint16_t parent = current;
737 uint16_t node_idx;
738 uint16_t current_end;
739
740 current = child;
741 child = NULL_TRIE_IDX;
742
743 current_end = TRIE_NODE(trie, current).start + TRIE_NODE(trie, current).length;
744
745 for (node_idx = TRIE_NODE(trie, current).start;
746 node_idx < current_end &&
747 string_idx < string_end &&
748 TRIE_BYTE(trie, node_idx) == TRIE_BYTE(trie, string_idx);
749 node_idx++, string_idx++) {
750 ;
751 }
752
753 string_remainder = string_end - string_idx;
754
755 if (node_idx < (TRIE_NODE(trie, current).start + TRIE_NODE(trie, current).length)) {
756 /*
757 * We did not reach the end of the current node's string.
758 * We need to split the current node into two:
759 * 1. A new node that contains the prefix of the node that matches
760 * the prefix of the string being inserted.
761 * 2. The current node modified to point to the remainder
762 * of the current node's string.
763 */
764 uint16_t prefix = trie_node_alloc(trie);
765 if (prefix == NULL_TRIE_IDX) {
766 FDLOG0(LOG_ERR, &nil_pcb, "Ran out of trie nodes while splitting an existing node");
767 return NULL_TRIE_IDX;
768 }
769
770 /*
771 * Prefix points to the portion of the current nodes's string that has matched
772 * the input string thus far.
773 */
774 TRIE_NODE(trie, prefix).start = TRIE_NODE(trie, current).start;
775 TRIE_NODE(trie, prefix).length = (node_idx - TRIE_NODE(trie, current).start);
776
777 /*
778 * Prefix has the current node as the child corresponding to the first byte
779 * after the split.
780 */
781 TRIE_NODE(trie, prefix).child_map = trie_child_map_alloc(trie);
782 if (TRIE_NODE(trie, prefix).child_map == NULL_TRIE_IDX) {
783 FDLOG0(LOG_ERR, &nil_pcb, "Ran out of child maps while splitting an existing node");
784 return NULL_TRIE_IDX;
785 }
786 TRIE_CHILD(trie, prefix, TRIE_BYTE(trie, node_idx)) = current;
787
788 /* Parent has the prefix as the child correspoding to the first byte in the prefix */
789 TRIE_CHILD(trie, parent, TRIE_BYTE(trie, TRIE_NODE(trie, prefix).start)) = prefix;
790
791 /* Current node is adjusted to point to the remainder */
792 TRIE_NODE(trie, current).start = node_idx;
793 TRIE_NODE(trie, current).length -= TRIE_NODE(trie, prefix).length;
794
795 /* We want to insert the new leaf (if any) as a child of the prefix */
796 current = prefix;
797 }
798
799 if (string_remainder > 0) {
800 /*
801 * We still have bytes in the string that have not been matched yet.
802 * If the current node has children, iterate to the child corresponding
803 * to the next byte in the string.
804 */
805 if (TRIE_NODE(trie, current).child_map != NULL_TRIE_IDX) {
806 child = TRIE_CHILD(trie, current, TRIE_BYTE(trie, string_idx));
807 }
808 }
809 } /* while (child != NULL_TRIE_IDX) */
810
811 if (string_remainder > 0) {
812 /* Add a new leaf containing the remainder of the string */
813 uint16_t leaf = trie_node_alloc(trie);
814 if (leaf == NULL_TRIE_IDX) {
815 FDLOG0(LOG_ERR, &nil_pcb, "Ran out of trie nodes while inserting a new leaf");
816 return NULL_TRIE_IDX;
817 }
818
819 TRIE_NODE(trie, leaf).start = trie_bytes_move(trie, string_idx, string_remainder);
820 if (TRIE_NODE(trie, leaf).start == NULL_TRIE_IDX) {
821 FDLOG0(LOG_ERR, &nil_pcb, "Ran out of bytes while inserting a new leaf");
822 return NULL_TRIE_IDX;
823 }
824 TRIE_NODE(trie, leaf).length = string_remainder;
825
826 /* Set the new leaf as the child of the current node */
827 if (TRIE_NODE(trie, current).child_map == NULL_TRIE_IDX) {
828 TRIE_NODE(trie, current).child_map = trie_child_map_alloc(trie);
829 if (TRIE_NODE(trie, current).child_map == NULL_TRIE_IDX) {
830 FDLOG0(LOG_ERR, &nil_pcb, "Ran out of child maps while inserting a new leaf");
831 return NULL_TRIE_IDX;
832 }
833 }
834 TRIE_CHILD(trie, current, TRIE_BYTE(trie, TRIE_NODE(trie, leaf).start)) = leaf;
835 current = leaf;
836 } /* else duplicate or this string is a prefix of one of the existing strings */
837
838 return current;
839 }
840
841 #define APPLE_WEBCLIP_ID_PREFIX "com.apple.webapp"
842 static uint16_t
843 flow_divert_trie_search(struct flow_divert_trie *trie, const uint8_t *string_bytes)
844 {
845 uint16_t current = trie->root;
846 uint16_t string_idx = 0;
847
848 while (current != NULL_TRIE_IDX) {
849 uint16_t next = NULL_TRIE_IDX;
850 uint16_t node_end = TRIE_NODE(trie, current).start + TRIE_NODE(trie, current).length;
851 uint16_t node_idx;
852
853 for (node_idx = TRIE_NODE(trie, current).start;
854 node_idx < node_end && string_bytes[string_idx] != '\0' && string_bytes[string_idx] == TRIE_BYTE(trie, node_idx);
855 node_idx++, string_idx++) {
856 ;
857 }
858
859 if (node_idx == node_end) {
860 if (string_bytes[string_idx] == '\0') {
861 return current; /* Got an exact match */
862 } else if (string_idx == strlen(APPLE_WEBCLIP_ID_PREFIX) &&
863 0 == strncmp((const char *)string_bytes, APPLE_WEBCLIP_ID_PREFIX, string_idx)) {
864 return current; /* Got an apple webclip id prefix match */
865 } else if (TRIE_NODE(trie, current).child_map != NULL_TRIE_IDX) {
866 next = TRIE_CHILD(trie, current, string_bytes[string_idx]);
867 }
868 }
869 current = next;
870 }
871
872 return NULL_TRIE_IDX;
873 }
874
875 struct uuid_search_info {
876 uuid_t target_uuid;
877 char *found_signing_id;
878 boolean_t found_multiple_signing_ids;
879 proc_t found_proc;
880 };
881
882 static int
883 flow_divert_find_proc_by_uuid_callout(proc_t p, void *arg)
884 {
885 struct uuid_search_info *info = (struct uuid_search_info *)arg;
886 int result = PROC_RETURNED_DONE; /* By default, we didn't find the process */
887
888 if (info->found_signing_id != NULL) {
889 if (!info->found_multiple_signing_ids) {
890 /* All processes that were found had the same signing identifier, so just claim this first one and be done. */
891 info->found_proc = p;
892 result = PROC_CLAIMED_DONE;
893 } else {
894 uuid_string_t uuid_str;
895 uuid_unparse(info->target_uuid, uuid_str);
896 FDLOG(LOG_WARNING, &nil_pcb, "Found multiple processes with UUID %s with different signing identifiers", uuid_str);
897 }
898 FREE(info->found_signing_id, M_TEMP);
899 info->found_signing_id = NULL;
900 }
901
902 if (result == PROC_RETURNED_DONE) {
903 uuid_string_t uuid_str;
904 uuid_unparse(info->target_uuid, uuid_str);
905 FDLOG(LOG_WARNING, &nil_pcb, "Failed to find a process with UUID %s", uuid_str);
906 }
907
908 return result;
909 }
910
911 static int
912 flow_divert_find_proc_by_uuid_filter(proc_t p, void *arg)
913 {
914 struct uuid_search_info *info = (struct uuid_search_info *)arg;
915 int include = 0;
916
917 if (info->found_multiple_signing_ids) {
918 return include;
919 }
920
921 include = (uuid_compare(p->p_uuid, info->target_uuid) == 0);
922 if (include) {
923 const char *signing_id = cs_identity_get(p);
924 if (signing_id != NULL) {
925 FDLOG(LOG_INFO, &nil_pcb, "Found process %d with signing identifier %s", p->p_pid, signing_id);
926 size_t signing_id_size = strlen(signing_id) + 1;
927 if (info->found_signing_id == NULL) {
928 MALLOC(info->found_signing_id, char *, signing_id_size, M_TEMP, M_WAITOK);
929 memcpy(info->found_signing_id, signing_id, signing_id_size);
930 } else if (memcmp(signing_id, info->found_signing_id, signing_id_size)) {
931 info->found_multiple_signing_ids = TRUE;
932 }
933 } else {
934 info->found_multiple_signing_ids = TRUE;
935 }
936 include = !info->found_multiple_signing_ids;
937 }
938
939 return include;
940 }
941
942 static proc_t
943 flow_divert_find_proc_by_uuid(uuid_t uuid)
944 {
945 struct uuid_search_info info;
946
947 if (LOG_INFO <= nil_pcb.log_level) {
948 uuid_string_t uuid_str;
949 uuid_unparse(uuid, uuid_str);
950 FDLOG(LOG_INFO, &nil_pcb, "Looking for process with UUID %s", uuid_str);
951 }
952
953 memset(&info, 0, sizeof(info));
954 info.found_proc = PROC_NULL;
955 uuid_copy(info.target_uuid, uuid);
956
957 proc_iterate(PROC_ALLPROCLIST, flow_divert_find_proc_by_uuid_callout, &info, flow_divert_find_proc_by_uuid_filter, &info);
958
959 return info.found_proc;
960 }
961
962 static int
963 flow_divert_add_proc_info(struct flow_divert_pcb *fd_cb, proc_t proc, const char *signing_id, mbuf_t connect_packet, bool is_effective)
964 {
965 int error = 0;
966 uint8_t *cdhash = NULL;
967 audit_token_t audit_token = {};
968 const char *proc_cs_id = signing_id;
969
970 proc_lock(proc);
971
972 if (proc_cs_id == NULL) {
973 if (proc->p_csflags & (CS_VALID | CS_DEBUGGED)) {
974 proc_cs_id = cs_identity_get(proc);
975 } else {
976 FDLOG0(LOG_ERR, fd_cb, "Signature of proc is invalid");
977 }
978 }
979
980 if (is_effective) {
981 lck_rw_lock_shared(&fd_cb->group->lck);
982 if (!(fd_cb->group->flags & FLOW_DIVERT_GROUP_FLAG_NO_APP_MAP)) {
983 if (proc_cs_id != NULL) {
984 uint16_t result = flow_divert_trie_search(&fd_cb->group->signing_id_trie, (const uint8_t *)proc_cs_id);
985 if (result == NULL_TRIE_IDX) {
986 FDLOG(LOG_WARNING, fd_cb, "%s did not match", proc_cs_id);
987 error = EPERM;
988 } else {
989 FDLOG(LOG_INFO, fd_cb, "%s matched", proc_cs_id);
990 }
991 } else {
992 error = EPERM;
993 }
994 }
995 lck_rw_done(&fd_cb->group->lck);
996 }
997
998 if (error != 0) {
999 goto done;
1000 }
1001
1002 /*
1003 * If signing_id is not NULL then it came from the flow divert token and will be added
1004 * as part of the token, so there is no need to add it here.
1005 */
1006 if (signing_id == NULL && proc_cs_id != NULL) {
1007 error = flow_divert_packet_append_tlv(connect_packet,
1008 (is_effective ? FLOW_DIVERT_TLV_SIGNING_ID : FLOW_DIVERT_TLV_APP_REAL_SIGNING_ID),
1009 (uint32_t)strlen(proc_cs_id),
1010 proc_cs_id);
1011 if (error != 0) {
1012 FDLOG(LOG_ERR, fd_cb, "failed to append the signing ID: %d", error);
1013 goto done;
1014 }
1015 }
1016
1017 cdhash = cs_get_cdhash(proc);
1018 if (cdhash != NULL) {
1019 error = flow_divert_packet_append_tlv(connect_packet,
1020 (is_effective ? FLOW_DIVERT_TLV_CDHASH : FLOW_DIVERT_TLV_APP_REAL_CDHASH),
1021 SHA1_RESULTLEN,
1022 cdhash);
1023 if (error) {
1024 FDLOG(LOG_ERR, fd_cb, "failed to append the cdhash: %d", error);
1025 goto done;
1026 }
1027 } else {
1028 FDLOG0(LOG_ERR, fd_cb, "failed to get the cdhash");
1029 }
1030
1031 task_t task = proc_task(proc);
1032 if (task != TASK_NULL) {
1033 mach_msg_type_number_t count = TASK_AUDIT_TOKEN_COUNT;
1034 kern_return_t rc = task_info(task, TASK_AUDIT_TOKEN, (task_info_t)&audit_token, &count);
1035 if (rc == KERN_SUCCESS) {
1036 int append_error = flow_divert_packet_append_tlv(connect_packet,
1037 (is_effective ? FLOW_DIVERT_TLV_APP_AUDIT_TOKEN : FLOW_DIVERT_TLV_APP_REAL_AUDIT_TOKEN),
1038 sizeof(audit_token_t),
1039 &audit_token);
1040 if (append_error) {
1041 FDLOG(LOG_ERR, fd_cb, "failed to append app audit token: %d", append_error);
1042 }
1043 }
1044 }
1045
1046 done:
1047 proc_unlock(proc);
1048
1049 return error;
1050 }
1051
1052 static int
1053 flow_divert_add_all_proc_info(struct flow_divert_pcb *fd_cb, struct socket *so, proc_t proc, const char *signing_id, mbuf_t connect_packet)
1054 {
1055 int error = 0;
1056 proc_t effective_proc = PROC_NULL;
1057 proc_t responsible_proc = PROC_NULL;
1058 proc_t real_proc = proc_find(so->last_pid);
1059 bool release_real_proc = true;
1060
1061 proc_t src_proc = PROC_NULL;
1062 proc_t real_src_proc = PROC_NULL;
1063
1064 if (real_proc == PROC_NULL) {
1065 FDLOG(LOG_ERR, fd_cb, "failed to find the real proc record for %d", so->last_pid);
1066 release_real_proc = false;
1067 real_proc = proc;
1068 if (real_proc == PROC_NULL) {
1069 real_proc = current_proc();
1070 }
1071 }
1072
1073 if (so->so_flags & SOF_DELEGATED) {
1074 if (real_proc->p_pid != so->e_pid) {
1075 effective_proc = proc_find(so->e_pid);
1076 } else if (uuid_compare(real_proc->p_uuid, so->e_uuid)) {
1077 effective_proc = flow_divert_find_proc_by_uuid(so->e_uuid);
1078 }
1079 }
1080
1081 #if defined(XNU_TARGET_OS_OSX)
1082 lck_rw_lock_shared(&fd_cb->group->lck);
1083 if (!(fd_cb->group->flags & FLOW_DIVERT_GROUP_FLAG_NO_APP_MAP)) {
1084 if (so->so_rpid > 0) {
1085 responsible_proc = proc_find(so->so_rpid);
1086 }
1087 }
1088 lck_rw_done(&fd_cb->group->lck);
1089 #endif
1090
1091 real_src_proc = real_proc;
1092
1093 if (responsible_proc != PROC_NULL) {
1094 src_proc = responsible_proc;
1095 if (effective_proc != NULL) {
1096 real_src_proc = effective_proc;
1097 }
1098 } else if (effective_proc != PROC_NULL) {
1099 src_proc = effective_proc;
1100 } else {
1101 src_proc = real_proc;
1102 }
1103
1104 error = flow_divert_add_proc_info(fd_cb, src_proc, signing_id, connect_packet, true);
1105 if (error != 0) {
1106 goto done;
1107 }
1108
1109 if (real_src_proc != NULL && real_src_proc != src_proc) {
1110 error = flow_divert_add_proc_info(fd_cb, real_src_proc, NULL, connect_packet, false);
1111 if (error != 0) {
1112 goto done;
1113 }
1114 }
1115
1116 done:
1117 if (responsible_proc != PROC_NULL) {
1118 proc_rele(responsible_proc);
1119 }
1120
1121 if (effective_proc != PROC_NULL) {
1122 proc_rele(effective_proc);
1123 }
1124
1125 if (real_proc != PROC_NULL && release_real_proc) {
1126 proc_rele(real_proc);
1127 }
1128
1129 return error;
1130 }
1131
1132 static int
1133 flow_divert_send_packet(struct flow_divert_pcb *fd_cb, mbuf_t packet, Boolean enqueue)
1134 {
1135 int error;
1136
1137 if (fd_cb->group == NULL) {
1138 fd_cb->so->so_error = ECONNABORTED;
1139 flow_divert_disconnect_socket(fd_cb->so);
1140 return ECONNABORTED;
1141 }
1142
1143 lck_rw_lock_shared(&fd_cb->group->lck);
1144
1145 if (MBUFQ_EMPTY(&fd_cb->group->send_queue)) {
1146 error = ctl_enqueuembuf(g_flow_divert_kctl_ref, fd_cb->group->ctl_unit, packet, CTL_DATA_EOR);
1147 } else {
1148 error = ENOBUFS;
1149 }
1150
1151 if (error == ENOBUFS) {
1152 if (enqueue) {
1153 if (!lck_rw_lock_shared_to_exclusive(&fd_cb->group->lck)) {
1154 lck_rw_lock_exclusive(&fd_cb->group->lck);
1155 }
1156 MBUFQ_ENQUEUE(&fd_cb->group->send_queue, packet);
1157 error = 0;
1158 }
1159 OSTestAndSet(GROUP_BIT_CTL_ENQUEUE_BLOCKED, &fd_cb->group->atomic_bits);
1160 }
1161
1162 lck_rw_done(&fd_cb->group->lck);
1163
1164 return error;
1165 }
1166
1167 static int
1168 flow_divert_create_connect_packet(struct flow_divert_pcb *fd_cb, struct sockaddr *to, struct socket *so, proc_t p, mbuf_t *out_connect_packet)
1169 {
1170 int error = 0;
1171 int flow_type = 0;
1172 char *signing_id = NULL;
1173 mbuf_t connect_packet = NULL;
1174 cfil_sock_id_t cfil_sock_id = CFIL_SOCK_ID_NONE;
1175 const void *cfil_id = NULL;
1176 size_t cfil_id_size = 0;
1177 struct inpcb *inp = sotoinpcb(so);
1178 struct ifnet *ifp = NULL;
1179 uint32_t flags = 0;
1180
1181 error = flow_divert_packet_init(fd_cb, FLOW_DIVERT_PKT_CONNECT, &connect_packet);
1182 if (error) {
1183 goto done;
1184 }
1185
1186 if (fd_cb->connect_token != NULL && (fd_cb->flags & FLOW_DIVERT_HAS_HMAC)) {
1187 uint32_t sid_size = 0;
1188 int find_error = flow_divert_packet_get_tlv(fd_cb->connect_token, 0, FLOW_DIVERT_TLV_SIGNING_ID, 0, NULL, &sid_size);
1189 if (find_error == 0 && sid_size > 0) {
1190 MALLOC(signing_id, char *, sid_size + 1, M_TEMP, M_WAITOK | M_ZERO);
1191 if (signing_id != NULL) {
1192 flow_divert_packet_get_tlv(fd_cb->connect_token, 0, FLOW_DIVERT_TLV_SIGNING_ID, sid_size, signing_id, NULL);
1193 FDLOG(LOG_INFO, fd_cb, "Got %s from token", signing_id);
1194 }
1195 }
1196 }
1197
1198 socket_unlock(so, 0);
1199
1200 error = flow_divert_add_all_proc_info(fd_cb, so, p, signing_id, connect_packet);
1201
1202 socket_lock(so, 0);
1203
1204 if (signing_id != NULL) {
1205 FREE(signing_id, M_TEMP);
1206 }
1207
1208 if (error) {
1209 FDLOG(LOG_ERR, fd_cb, "Failed to add source proc info: %d", error);
1210 goto done;
1211 }
1212
1213 error = flow_divert_packet_append_tlv(connect_packet,
1214 FLOW_DIVERT_TLV_TRAFFIC_CLASS,
1215 sizeof(fd_cb->so->so_traffic_class),
1216 &fd_cb->so->so_traffic_class);
1217 if (error) {
1218 goto done;
1219 }
1220
1221 if (SOCK_TYPE(fd_cb->so) == SOCK_STREAM) {
1222 flow_type = FLOW_DIVERT_FLOW_TYPE_TCP;
1223 } else if (SOCK_TYPE(fd_cb->so) == SOCK_DGRAM) {
1224 flow_type = FLOW_DIVERT_FLOW_TYPE_UDP;
1225 } else {
1226 error = EINVAL;
1227 goto done;
1228 }
1229 error = flow_divert_packet_append_tlv(connect_packet,
1230 FLOW_DIVERT_TLV_FLOW_TYPE,
1231 sizeof(flow_type),
1232 &flow_type);
1233
1234 if (error) {
1235 goto done;
1236 }
1237
1238 if (fd_cb->connect_token != NULL) {
1239 unsigned int token_len = m_length(fd_cb->connect_token);
1240 mbuf_concatenate(connect_packet, fd_cb->connect_token);
1241 mbuf_pkthdr_adjustlen(connect_packet, token_len);
1242 fd_cb->connect_token = NULL;
1243 } else {
1244 error = flow_divert_append_target_endpoint_tlv(connect_packet, to);
1245 if (error) {
1246 goto done;
1247 }
1248 }
1249
1250 if (fd_cb->local_endpoint.sa.sa_family == AF_INET || fd_cb->local_endpoint.sa.sa_family == AF_INET6) {
1251 error = flow_divert_packet_append_tlv(connect_packet, FLOW_DIVERT_TLV_LOCAL_ADDR, fd_cb->local_endpoint.sa.sa_len, &(fd_cb->local_endpoint.sa));
1252 if (error) {
1253 goto done;
1254 }
1255 }
1256
1257 if (inp->inp_vflag & INP_IPV4) {
1258 ifp = inp->inp_last_outifp;
1259 } else if (inp->inp_vflag & INP_IPV6) {
1260 ifp = inp->in6p_last_outifp;
1261 }
1262 if (ifp != NULL) {
1263 uint32_t flow_if_index = ifp->if_index;
1264 error = flow_divert_packet_append_tlv(connect_packet, FLOW_DIVERT_TLV_OUT_IF_INDEX,
1265 sizeof(flow_if_index), &flow_if_index);
1266 if (error) {
1267 goto done;
1268 }
1269 }
1270
1271 if (so->so_flags1 & SOF1_DATA_IDEMPOTENT) {
1272 flags |= FLOW_DIVERT_TOKEN_FLAG_TFO;
1273 }
1274
1275 if ((inp->inp_flags & INP_BOUND_IF) ||
1276 ((inp->inp_vflag & INP_IPV6) && !IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) ||
1277 ((inp->inp_vflag & INP_IPV4) && inp->inp_laddr.s_addr != INADDR_ANY)) {
1278 flags |= FLOW_DIVERT_TOKEN_FLAG_BOUND;
1279 }
1280
1281 if (flags != 0) {
1282 error = flow_divert_packet_append_tlv(connect_packet, FLOW_DIVERT_TLV_FLAGS, sizeof(flags), &flags);
1283 if (error) {
1284 goto done;
1285 }
1286 }
1287
1288 if (SOCK_TYPE(so) == SOCK_DGRAM) {
1289 cfil_sock_id = cfil_sock_id_from_datagram_socket(so, NULL, to);
1290 } else {
1291 cfil_sock_id = cfil_sock_id_from_socket(so);
1292 }
1293
1294 if (cfil_sock_id != CFIL_SOCK_ID_NONE) {
1295 cfil_id = &cfil_sock_id;
1296 cfil_id_size = sizeof(cfil_sock_id);
1297 } else if (so->so_flags1 & SOF1_CONTENT_FILTER_SKIP) {
1298 cfil_id = &inp->necp_client_uuid;
1299 cfil_id_size = sizeof(inp->necp_client_uuid);
1300 }
1301
1302 if (cfil_id != NULL && cfil_id_size > 0 && cfil_id_size <= sizeof(uuid_t)) {
1303 error = flow_divert_packet_append_tlv(connect_packet, FLOW_DIVERT_TLV_CFIL_ID, (uint32_t)cfil_id_size, cfil_id);
1304 if (error) {
1305 goto done;
1306 }
1307 }
1308
1309 done:
1310 if (!error) {
1311 *out_connect_packet = connect_packet;
1312 } else if (connect_packet != NULL) {
1313 mbuf_freem(connect_packet);
1314 }
1315
1316 return error;
1317 }
1318
1319 static int
1320 flow_divert_send_connect_packet(struct flow_divert_pcb *fd_cb)
1321 {
1322 int error = 0;
1323 mbuf_t connect_packet = fd_cb->connect_packet;
1324 mbuf_t saved_connect_packet = NULL;
1325
1326 if (connect_packet != NULL) {
1327 error = mbuf_copym(connect_packet, 0, mbuf_pkthdr_len(connect_packet), MBUF_DONTWAIT, &saved_connect_packet);
1328 if (error) {
1329 FDLOG0(LOG_ERR, fd_cb, "Failed to copy the connect packet");
1330 goto done;
1331 }
1332
1333 error = flow_divert_send_packet(fd_cb, connect_packet, TRUE);
1334 if (error) {
1335 goto done;
1336 }
1337
1338 fd_cb->connect_packet = saved_connect_packet;
1339 saved_connect_packet = NULL;
1340 } else {
1341 error = ENOENT;
1342 }
1343 done:
1344 if (saved_connect_packet != NULL) {
1345 mbuf_freem(saved_connect_packet);
1346 }
1347
1348 return error;
1349 }
1350
1351 static int
1352 flow_divert_send_connect_result(struct flow_divert_pcb *fd_cb)
1353 {
1354 int error = 0;
1355 mbuf_t packet = NULL;
1356 int rbuff_space = 0;
1357
1358 error = flow_divert_packet_init(fd_cb, FLOW_DIVERT_PKT_CONNECT_RESULT, &packet);
1359 if (error) {
1360 FDLOG(LOG_ERR, fd_cb, "failed to create a connect result packet: %d", error);
1361 goto done;
1362 }
1363
1364 rbuff_space = fd_cb->so->so_rcv.sb_hiwat;
1365 if (rbuff_space < 0) {
1366 rbuff_space = 0;
1367 }
1368 rbuff_space = htonl(rbuff_space);
1369 error = flow_divert_packet_append_tlv(packet,
1370 FLOW_DIVERT_TLV_SPACE_AVAILABLE,
1371 sizeof(rbuff_space),
1372 &rbuff_space);
1373 if (error) {
1374 goto done;
1375 }
1376
1377 error = flow_divert_send_packet(fd_cb, packet, TRUE);
1378 if (error) {
1379 goto done;
1380 }
1381
1382 done:
1383 if (error && packet != NULL) {
1384 mbuf_freem(packet);
1385 }
1386
1387 return error;
1388 }
1389
1390 static int
1391 flow_divert_send_close(struct flow_divert_pcb *fd_cb, int how)
1392 {
1393 int error = 0;
1394 mbuf_t packet = NULL;
1395 uint32_t zero = 0;
1396
1397 error = flow_divert_packet_init(fd_cb, FLOW_DIVERT_PKT_CLOSE, &packet);
1398 if (error) {
1399 FDLOG(LOG_ERR, fd_cb, "failed to create a close packet: %d", error);
1400 goto done;
1401 }
1402
1403 error = flow_divert_packet_append_tlv(packet, FLOW_DIVERT_TLV_ERROR_CODE, sizeof(zero), &zero);
1404 if (error) {
1405 FDLOG(LOG_ERR, fd_cb, "failed to add the error code TLV: %d", error);
1406 goto done;
1407 }
1408
1409 how = htonl(how);
1410 error = flow_divert_packet_append_tlv(packet, FLOW_DIVERT_TLV_HOW, sizeof(how), &how);
1411 if (error) {
1412 FDLOG(LOG_ERR, fd_cb, "failed to add the how flag: %d", error);
1413 goto done;
1414 }
1415
1416 error = flow_divert_send_packet(fd_cb, packet, TRUE);
1417 if (error) {
1418 goto done;
1419 }
1420
1421 done:
1422 if (error && packet != NULL) {
1423 mbuf_free(packet);
1424 }
1425
1426 return error;
1427 }
1428
1429 static int
1430 flow_divert_tunnel_how_closed(struct flow_divert_pcb *fd_cb)
1431 {
1432 if ((fd_cb->flags & (FLOW_DIVERT_TUNNEL_RD_CLOSED | FLOW_DIVERT_TUNNEL_WR_CLOSED)) ==
1433 (FLOW_DIVERT_TUNNEL_RD_CLOSED | FLOW_DIVERT_TUNNEL_WR_CLOSED)) {
1434 return SHUT_RDWR;
1435 } else if (fd_cb->flags & FLOW_DIVERT_TUNNEL_RD_CLOSED) {
1436 return SHUT_RD;
1437 } else if (fd_cb->flags & FLOW_DIVERT_TUNNEL_WR_CLOSED) {
1438 return SHUT_WR;
1439 }
1440
1441 return -1;
1442 }
1443
1444 /*
1445 * Determine what close messages if any need to be sent to the tunnel. Returns TRUE if the tunnel is closed for both reads and
1446 * writes. Returns FALSE otherwise.
1447 */
1448 static void
1449 flow_divert_send_close_if_needed(struct flow_divert_pcb *fd_cb)
1450 {
1451 int how = -1;
1452
1453 /* Do not send any close messages if there is still data in the send buffer */
1454 if (fd_cb->so->so_snd.sb_cc == 0) {
1455 if ((fd_cb->flags & (FLOW_DIVERT_READ_CLOSED | FLOW_DIVERT_TUNNEL_RD_CLOSED)) == FLOW_DIVERT_READ_CLOSED) {
1456 /* Socket closed reads, but tunnel did not. Tell tunnel to close reads */
1457 how = SHUT_RD;
1458 }
1459 if ((fd_cb->flags & (FLOW_DIVERT_WRITE_CLOSED | FLOW_DIVERT_TUNNEL_WR_CLOSED)) == FLOW_DIVERT_WRITE_CLOSED) {
1460 /* Socket closed writes, but tunnel did not. Tell tunnel to close writes */
1461 if (how == SHUT_RD) {
1462 how = SHUT_RDWR;
1463 } else {
1464 how = SHUT_WR;
1465 }
1466 }
1467 }
1468
1469 if (how != -1) {
1470 FDLOG(LOG_INFO, fd_cb, "sending close, how = %d", how);
1471 if (flow_divert_send_close(fd_cb, how) != ENOBUFS) {
1472 /* Successfully sent the close packet. Record the ways in which the tunnel has been closed */
1473 if (how != SHUT_RD) {
1474 fd_cb->flags |= FLOW_DIVERT_TUNNEL_WR_CLOSED;
1475 }
1476 if (how != SHUT_WR) {
1477 fd_cb->flags |= FLOW_DIVERT_TUNNEL_RD_CLOSED;
1478 }
1479 }
1480 }
1481
1482 if (flow_divert_tunnel_how_closed(fd_cb) == SHUT_RDWR) {
1483 flow_divert_disconnect_socket(fd_cb->so);
1484 }
1485 }
1486
1487 static errno_t
1488 flow_divert_send_data_packet(struct flow_divert_pcb *fd_cb, mbuf_t data, size_t data_len, struct sockaddr *toaddr, Boolean force)
1489 {
1490 mbuf_t packet = NULL;
1491 mbuf_t last = NULL;
1492 int error = 0;
1493
1494 error = flow_divert_packet_init(fd_cb, FLOW_DIVERT_PKT_DATA, &packet);
1495 if (error || packet == NULL) {
1496 FDLOG(LOG_ERR, fd_cb, "flow_divert_packet_init failed: %d", error);
1497 goto done;
1498 }
1499
1500 if (toaddr != NULL) {
1501 error = flow_divert_append_target_endpoint_tlv(packet, toaddr);
1502 if (error) {
1503 FDLOG(LOG_ERR, fd_cb, "flow_divert_append_target_endpoint_tlv() failed: %d", error);
1504 goto done;
1505 }
1506 }
1507
1508 if (data_len > 0 && data_len <= INT_MAX && data != NULL) {
1509 last = m_last(packet);
1510 mbuf_setnext(last, data);
1511 mbuf_pkthdr_adjustlen(packet, (int)data_len);
1512 } else {
1513 data_len = 0;
1514 }
1515 error = flow_divert_send_packet(fd_cb, packet, force);
1516 if (error == 0 && data_len > 0) {
1517 fd_cb->bytes_sent += data_len;
1518 flow_divert_add_data_statistics(fd_cb, data_len, TRUE);
1519 }
1520
1521 done:
1522 if (error) {
1523 if (last != NULL) {
1524 mbuf_setnext(last, NULL);
1525 }
1526 if (packet != NULL) {
1527 mbuf_freem(packet);
1528 }
1529 }
1530
1531 return error;
1532 }
1533
1534 static void
1535 flow_divert_send_buffered_data(struct flow_divert_pcb *fd_cb, Boolean force)
1536 {
1537 size_t to_send;
1538 size_t sent = 0;
1539 int error = 0;
1540 mbuf_t buffer;
1541
1542 to_send = fd_cb->so->so_snd.sb_cc;
1543 buffer = fd_cb->so->so_snd.sb_mb;
1544
1545 if (buffer == NULL && to_send > 0) {
1546 FDLOG(LOG_ERR, fd_cb, "Send buffer is NULL, but size is supposed to be %lu", to_send);
1547 return;
1548 }
1549
1550 /* Ignore the send window if force is enabled */
1551 if (!force && (to_send > fd_cb->send_window)) {
1552 to_send = fd_cb->send_window;
1553 }
1554
1555 if (SOCK_TYPE(fd_cb->so) == SOCK_STREAM) {
1556 while (sent < to_send) {
1557 mbuf_t data;
1558 size_t data_len;
1559
1560 data_len = to_send - sent;
1561 if (data_len > FLOW_DIVERT_CHUNK_SIZE) {
1562 data_len = FLOW_DIVERT_CHUNK_SIZE;
1563 }
1564
1565 error = mbuf_copym(buffer, sent, data_len, MBUF_DONTWAIT, &data);
1566 if (error) {
1567 FDLOG(LOG_ERR, fd_cb, "mbuf_copym failed: %d", error);
1568 break;
1569 }
1570
1571 error = flow_divert_send_data_packet(fd_cb, data, data_len, NULL, force);
1572 if (error) {
1573 if (data != NULL) {
1574 mbuf_freem(data);
1575 }
1576 break;
1577 }
1578
1579 sent += data_len;
1580 }
1581 sbdrop(&fd_cb->so->so_snd, (int)sent);
1582 sowwakeup(fd_cb->so);
1583 } else if (SOCK_TYPE(fd_cb->so) == SOCK_DGRAM) {
1584 mbuf_t data;
1585 mbuf_t m;
1586 size_t data_len;
1587
1588 while (buffer) {
1589 struct sockaddr *toaddr = flow_divert_get_buffered_target_address(buffer);
1590
1591 m = buffer;
1592 if (toaddr != NULL) {
1593 /* look for data in the chain */
1594 do {
1595 m = m->m_next;
1596 if (m != NULL && m->m_type == MT_DATA) {
1597 break;
1598 }
1599 } while (m);
1600 if (m == NULL) {
1601 /* unexpected */
1602 FDLOG0(LOG_ERR, fd_cb, "failed to find type MT_DATA in the mbuf chain.");
1603 goto move_on;
1604 }
1605 }
1606 data_len = mbuf_pkthdr_len(m);
1607 if (data_len > 0) {
1608 FDLOG(LOG_DEBUG, fd_cb, "mbuf_copym() data_len = %lu", data_len);
1609 error = mbuf_copym(m, 0, data_len, MBUF_DONTWAIT, &data);
1610 if (error) {
1611 FDLOG(LOG_ERR, fd_cb, "mbuf_copym failed: %d", error);
1612 break;
1613 }
1614 } else {
1615 data = NULL;
1616 }
1617 error = flow_divert_send_data_packet(fd_cb, data, data_len, toaddr, force);
1618 if (error) {
1619 if (data != NULL) {
1620 mbuf_freem(data);
1621 }
1622 break;
1623 }
1624 sent += data_len;
1625 move_on:
1626 buffer = buffer->m_nextpkt;
1627 (void) sbdroprecord(&(fd_cb->so->so_snd));
1628 }
1629 }
1630
1631 if (sent > 0) {
1632 FDLOG(LOG_DEBUG, fd_cb, "sent %lu bytes of buffered data", sent);
1633 if (fd_cb->send_window >= sent) {
1634 fd_cb->send_window -= sent;
1635 } else {
1636 fd_cb->send_window = 0;
1637 }
1638 }
1639 }
1640
1641 static int
1642 flow_divert_send_app_data(struct flow_divert_pcb *fd_cb, mbuf_t data, struct sockaddr *toaddr)
1643 {
1644 size_t to_send = mbuf_pkthdr_len(data);
1645 int error = 0;
1646
1647 if (to_send > fd_cb->send_window) {
1648 to_send = fd_cb->send_window;
1649 }
1650
1651 if (fd_cb->so->so_snd.sb_cc > 0) {
1652 to_send = 0; /* If the send buffer is non-empty, then we can't send anything */
1653 }
1654
1655 if (SOCK_TYPE(fd_cb->so) == SOCK_STREAM) {
1656 size_t sent = 0;
1657 mbuf_t remaining_data = data;
1658 mbuf_t pkt_data = NULL;
1659 while (sent < to_send && remaining_data != NULL) {
1660 size_t pkt_data_len;
1661
1662 pkt_data = remaining_data;
1663
1664 if ((to_send - sent) > FLOW_DIVERT_CHUNK_SIZE) {
1665 pkt_data_len = FLOW_DIVERT_CHUNK_SIZE;
1666 } else {
1667 pkt_data_len = to_send - sent;
1668 }
1669
1670 if (pkt_data_len < mbuf_pkthdr_len(pkt_data)) {
1671 error = mbuf_split(pkt_data, pkt_data_len, MBUF_DONTWAIT, &remaining_data);
1672 if (error) {
1673 FDLOG(LOG_ERR, fd_cb, "mbuf_split failed: %d", error);
1674 pkt_data = NULL;
1675 break;
1676 }
1677 } else {
1678 remaining_data = NULL;
1679 }
1680
1681 error = flow_divert_send_data_packet(fd_cb, pkt_data, pkt_data_len, NULL, FALSE);
1682
1683 if (error) {
1684 break;
1685 }
1686
1687 pkt_data = NULL;
1688 sent += pkt_data_len;
1689 }
1690
1691 fd_cb->send_window -= sent;
1692
1693 error = 0;
1694
1695 if (pkt_data != NULL) {
1696 if (sbspace(&fd_cb->so->so_snd) > 0) {
1697 if (!sbappendstream(&fd_cb->so->so_snd, pkt_data)) {
1698 FDLOG(LOG_ERR, fd_cb, "sbappendstream failed with pkt_data, send buffer size = %u, send_window = %u\n",
1699 fd_cb->so->so_snd.sb_cc, fd_cb->send_window);
1700 }
1701 } else {
1702 mbuf_freem(pkt_data);
1703 error = ENOBUFS;
1704 }
1705 }
1706
1707 if (remaining_data != NULL) {
1708 if (sbspace(&fd_cb->so->so_snd) > 0) {
1709 if (!sbappendstream(&fd_cb->so->so_snd, remaining_data)) {
1710 FDLOG(LOG_ERR, fd_cb, "sbappendstream failed with remaining_data, send buffer size = %u, send_window = %u\n",
1711 fd_cb->so->so_snd.sb_cc, fd_cb->send_window);
1712 }
1713 } else {
1714 mbuf_freem(remaining_data);
1715 error = ENOBUFS;
1716 }
1717 }
1718 } else if (SOCK_TYPE(fd_cb->so) == SOCK_DGRAM) {
1719 if (to_send || mbuf_pkthdr_len(data) == 0) {
1720 error = flow_divert_send_data_packet(fd_cb, data, to_send, toaddr, FALSE);
1721 if (error) {
1722 FDLOG(LOG_ERR, fd_cb, "flow_divert_send_data_packet failed. send data size = %lu", to_send);
1723 if (data != NULL) {
1724 mbuf_freem(data);
1725 }
1726 } else {
1727 fd_cb->send_window -= to_send;
1728 }
1729 } else {
1730 /* buffer it */
1731 if (sbspace(&fd_cb->so->so_snd) >= (int)mbuf_pkthdr_len(data)) {
1732 if (toaddr != NULL) {
1733 if (!sbappendaddr(&fd_cb->so->so_snd, toaddr, data, NULL, &error)) {
1734 FDLOG(LOG_ERR, fd_cb,
1735 "sbappendaddr failed. send buffer size = %u, send_window = %u, error = %d\n",
1736 fd_cb->so->so_snd.sb_cc, fd_cb->send_window, error);
1737 }
1738 error = 0;
1739 } else {
1740 if (!sbappendrecord(&fd_cb->so->so_snd, data)) {
1741 FDLOG(LOG_ERR, fd_cb,
1742 "sbappendrecord failed. send buffer size = %u, send_window = %u, error = %d\n",
1743 fd_cb->so->so_snd.sb_cc, fd_cb->send_window, error);
1744 }
1745 }
1746 } else {
1747 if (data != NULL) {
1748 mbuf_freem(data);
1749 }
1750 error = ENOBUFS;
1751 }
1752 }
1753 }
1754
1755 return error;
1756 }
1757
1758 static int
1759 flow_divert_send_read_notification(struct flow_divert_pcb *fd_cb)
1760 {
1761 int error = 0;
1762 mbuf_t packet = NULL;
1763
1764 error = flow_divert_packet_init(fd_cb, FLOW_DIVERT_PKT_READ_NOTIFY, &packet);
1765 if (error) {
1766 FDLOG(LOG_ERR, fd_cb, "failed to create a read notification packet: %d", error);
1767 goto done;
1768 }
1769
1770 error = flow_divert_send_packet(fd_cb, packet, TRUE);
1771 if (error) {
1772 goto done;
1773 }
1774
1775 done:
1776 if (error && packet != NULL) {
1777 mbuf_free(packet);
1778 }
1779
1780 return error;
1781 }
1782
1783 static int
1784 flow_divert_send_traffic_class_update(struct flow_divert_pcb *fd_cb, int traffic_class)
1785 {
1786 int error = 0;
1787 mbuf_t packet = NULL;
1788
1789 error = flow_divert_packet_init(fd_cb, FLOW_DIVERT_PKT_PROPERTIES_UPDATE, &packet);
1790 if (error) {
1791 FDLOG(LOG_ERR, fd_cb, "failed to create a properties update packet: %d", error);
1792 goto done;
1793 }
1794
1795 error = flow_divert_packet_append_tlv(packet, FLOW_DIVERT_TLV_TRAFFIC_CLASS, sizeof(traffic_class), &traffic_class);
1796 if (error) {
1797 FDLOG(LOG_ERR, fd_cb, "failed to add the traffic class: %d", error);
1798 goto done;
1799 }
1800
1801 error = flow_divert_send_packet(fd_cb, packet, TRUE);
1802 if (error) {
1803 goto done;
1804 }
1805
1806 done:
1807 if (error && packet != NULL) {
1808 mbuf_free(packet);
1809 }
1810
1811 return error;
1812 }
1813
1814 static void
1815 flow_divert_set_local_endpoint(struct flow_divert_pcb *fd_cb, struct sockaddr *local_endpoint, bool port_only)
1816 {
1817 struct inpcb *inp = sotoinpcb(fd_cb->so);
1818
1819 if (local_endpoint->sa_family == AF_INET6) {
1820 if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr) && !port_only) {
1821 fd_cb->flags |= FLOW_DIVERT_DID_SET_LOCAL_ADDR;
1822 inp->in6p_laddr = (satosin6(local_endpoint))->sin6_addr;
1823 }
1824 if (inp->inp_lport == 0) {
1825 inp->inp_lport = (satosin6(local_endpoint))->sin6_port;
1826 }
1827 } else if (local_endpoint->sa_family == AF_INET) {
1828 if (inp->inp_laddr.s_addr == INADDR_ANY && !port_only) {
1829 fd_cb->flags |= FLOW_DIVERT_DID_SET_LOCAL_ADDR;
1830 inp->inp_laddr = (satosin(local_endpoint))->sin_addr;
1831 }
1832 if (inp->inp_lport == 0) {
1833 inp->inp_lport = (satosin(local_endpoint))->sin_port;
1834 }
1835 }
1836 }
1837
1838 static void
1839 flow_divert_set_remote_endpoint(struct flow_divert_pcb *fd_cb, struct sockaddr *remote_endpoint)
1840 {
1841 struct inpcb *inp = sotoinpcb(fd_cb->so);
1842
1843 if (remote_endpoint->sa_family == AF_INET6) {
1844 if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) {
1845 inp->in6p_faddr = (satosin6(remote_endpoint))->sin6_addr;
1846 }
1847 if (inp->inp_fport == 0) {
1848 inp->inp_fport = (satosin6(remote_endpoint))->sin6_port;
1849 }
1850 } else if (remote_endpoint->sa_family == AF_INET) {
1851 if (inp->inp_laddr.s_addr == INADDR_ANY) {
1852 inp->inp_faddr = (satosin(remote_endpoint))->sin_addr;
1853 }
1854 if (inp->inp_fport == 0) {
1855 inp->inp_fport = (satosin(remote_endpoint))->sin_port;
1856 }
1857 }
1858 }
1859
1860 static uint32_t
1861 flow_divert_derive_kernel_control_unit(uint32_t ctl_unit, uint32_t *aggregate_unit)
1862 {
1863 if (aggregate_unit != NULL && *aggregate_unit != 0) {
1864 uint32_t counter;
1865 for (counter = 0; counter < (GROUP_COUNT_MAX - 1); counter++) {
1866 if ((*aggregate_unit) & (1 << counter)) {
1867 break;
1868 }
1869 }
1870 if (counter < (GROUP_COUNT_MAX - 1)) {
1871 *aggregate_unit &= ~(1 << counter);
1872 return counter + 1;
1873 } else {
1874 return ctl_unit;
1875 }
1876 } else {
1877 return ctl_unit;
1878 }
1879 }
1880
1881 static int
1882 flow_divert_try_next(struct flow_divert_pcb *fd_cb)
1883 {
1884 uint32_t current_ctl_unit = 0;
1885 uint32_t next_ctl_unit = 0;
1886 struct flow_divert_group *current_group = NULL;
1887 struct flow_divert_group *next_group = NULL;
1888 int error = 0;
1889
1890 next_ctl_unit = flow_divert_derive_kernel_control_unit(fd_cb->policy_control_unit, &(fd_cb->aggregate_unit));
1891 current_ctl_unit = fd_cb->control_group_unit;
1892
1893 if (current_ctl_unit == next_ctl_unit) {
1894 FDLOG0(LOG_NOTICE, fd_cb, "Next control unit is the same as the current control unit, disabling flow divert");
1895 error = EALREADY;
1896 goto done;
1897 }
1898
1899 if (next_ctl_unit == 0 || next_ctl_unit >= GROUP_COUNT_MAX) {
1900 FDLOG0(LOG_NOTICE, fd_cb, "No more valid control units, disabling flow divert");
1901 error = ENOENT;
1902 goto done;
1903 }
1904
1905 if (g_flow_divert_groups == NULL || g_active_group_count == 0) {
1906 FDLOG0(LOG_NOTICE, fd_cb, "No active groups, disabling flow divert");
1907 error = ENOENT;
1908 goto done;
1909 }
1910
1911 next_group = g_flow_divert_groups[next_ctl_unit];
1912 if (next_group == NULL) {
1913 FDLOG(LOG_NOTICE, fd_cb, "Group for control unit %u does not exist", next_ctl_unit);
1914 error = ENOENT;
1915 goto done;
1916 }
1917
1918 current_group = fd_cb->group;
1919
1920 lck_rw_lock_exclusive(&(current_group->lck));
1921 lck_rw_lock_exclusive(&(next_group->lck));
1922
1923 FDLOG(LOG_NOTICE, fd_cb, "Moving from %u to %u", current_ctl_unit, next_ctl_unit);
1924
1925 RB_REMOVE(fd_pcb_tree, &(current_group->pcb_tree), fd_cb);
1926 if (RB_INSERT(fd_pcb_tree, &(next_group->pcb_tree), fd_cb) != NULL) {
1927 panic("group with unit %u already contains a connection with hash %u", next_ctl_unit, fd_cb->hash);
1928 }
1929
1930 fd_cb->group = next_group;
1931 fd_cb->control_group_unit = next_ctl_unit;
1932
1933 lck_rw_done(&(next_group->lck));
1934 lck_rw_done(&(current_group->lck));
1935
1936 error = flow_divert_send_connect_packet(fd_cb);
1937 if (error) {
1938 FDLOG(LOG_NOTICE, fd_cb, "Failed to send the connect packet to %u, disabling flow divert", next_ctl_unit);
1939 error = ENOENT;
1940 goto done;
1941 }
1942
1943 done:
1944 return error;
1945 }
1946
1947 static void
1948 flow_divert_disable(struct flow_divert_pcb *fd_cb)
1949 {
1950 struct socket *so = NULL;
1951 mbuf_t buffer;
1952 int error = 0;
1953 proc_t last_proc = NULL;
1954 struct sockaddr *remote_endpoint = fd_cb->original_remote_endpoint;
1955 bool do_connect = !(fd_cb->flags & FLOW_DIVERT_IMPLICIT_CONNECT);
1956 struct inpcb *inp = NULL;
1957
1958 so = fd_cb->so;
1959 if (so == NULL) {
1960 goto done;
1961 }
1962
1963 FDLOG0(LOG_NOTICE, fd_cb, "Skipped all flow divert services, disabling flow divert");
1964
1965 /* Restore the IP state */
1966 inp = sotoinpcb(so);
1967 inp->inp_vflag = fd_cb->original_vflag;
1968 inp->inp_faddr.s_addr = INADDR_ANY;
1969 inp->inp_fport = 0;
1970 memset(&(inp->in6p_faddr), 0, sizeof(inp->in6p_faddr));
1971 inp->in6p_fport = 0;
1972 /* If flow divert set the local address, clear it out */
1973 if (fd_cb->flags & FLOW_DIVERT_DID_SET_LOCAL_ADDR) {
1974 inp->inp_laddr.s_addr = INADDR_ANY;
1975 memset(&(inp->in6p_laddr), 0, sizeof(inp->in6p_laddr));
1976 }
1977 inp->inp_last_outifp = fd_cb->original_last_outifp;
1978 inp->in6p_last_outifp = fd_cb->original_last_outifp6;
1979
1980 /* Dis-associate the socket */
1981 so->so_flags &= ~SOF_FLOW_DIVERT;
1982 so->so_flags1 |= SOF1_FLOW_DIVERT_SKIP;
1983 so->so_fd_pcb = NULL;
1984 fd_cb->so = NULL;
1985
1986 /* Remove from the group */
1987 flow_divert_pcb_remove(fd_cb);
1988
1989 FDRELEASE(fd_cb); /* Release the socket's reference */
1990
1991 /* Revert back to the original protocol */
1992 so->so_proto = pffindproto(SOCK_DOM(so), SOCK_PROTO(so), SOCK_TYPE(so));
1993
1994 last_proc = proc_find(so->last_pid);
1995
1996 if (do_connect) {
1997 /* Connect using the original protocol */
1998 error = (*so->so_proto->pr_usrreqs->pru_connect)(so, remote_endpoint, (last_proc != NULL ? last_proc : current_proc()));
1999 if (error) {
2000 FDLOG(LOG_ERR, fd_cb, "Failed to connect using the socket's original protocol: %d", error);
2001 goto done;
2002 }
2003 }
2004
2005 buffer = so->so_snd.sb_mb;
2006 if (buffer == NULL) {
2007 /* No buffered data, done */
2008 goto done;
2009 }
2010
2011 /* Send any buffered data using the original protocol */
2012 if (SOCK_TYPE(so) == SOCK_STREAM) {
2013 mbuf_t data_to_send = NULL;
2014 size_t data_len = so->so_snd.sb_cc;
2015
2016 error = mbuf_copym(buffer, 0, data_len, MBUF_DONTWAIT, &data_to_send);
2017 if (error) {
2018 FDLOG0(LOG_ERR, fd_cb, "Failed to copy the mbuf chain in the socket's send buffer");
2019 goto done;
2020 }
2021
2022 sbflush(&so->so_snd);
2023
2024 if (data_to_send->m_flags & M_PKTHDR) {
2025 mbuf_pkthdr_setlen(data_to_send, data_len);
2026 }
2027
2028 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
2029 0,
2030 data_to_send,
2031 NULL,
2032 NULL,
2033 (last_proc != NULL ? last_proc : current_proc()));
2034
2035 if (error) {
2036 FDLOG(LOG_ERR, fd_cb, "Failed to send queued data using the socket's original protocol: %d", error);
2037 }
2038 } else if (SOCK_TYPE(so) == SOCK_DGRAM) {
2039 struct sockbuf *sb = &so->so_snd;
2040 MBUFQ_HEAD(send_queue_head) send_queue;
2041 MBUFQ_INIT(&send_queue);
2042
2043 /* Flush the send buffer, moving all records to a temporary queue */
2044 while (sb->sb_mb != NULL) {
2045 mbuf_t record = sb->sb_mb;
2046 mbuf_t m = record;
2047 sb->sb_mb = sb->sb_mb->m_nextpkt;
2048 while (m != NULL) {
2049 sbfree(sb, m);
2050 m = m->m_next;
2051 }
2052 record->m_nextpkt = NULL;
2053 MBUFQ_ENQUEUE(&send_queue, record);
2054 }
2055 SB_EMPTY_FIXUP(sb);
2056
2057 while (!MBUFQ_EMPTY(&send_queue)) {
2058 mbuf_t next_record = MBUFQ_FIRST(&send_queue);
2059 mbuf_t addr = NULL;
2060 mbuf_t control = NULL;
2061 mbuf_t last_control = NULL;
2062 mbuf_t data = NULL;
2063 mbuf_t m = next_record;
2064 struct sockaddr *to_endpoint = NULL;
2065
2066 MBUFQ_DEQUEUE(&send_queue, next_record);
2067
2068 while (m != NULL) {
2069 if (m->m_type == MT_SONAME) {
2070 addr = m;
2071 } else if (m->m_type == MT_CONTROL) {
2072 if (control == NULL) {
2073 control = m;
2074 }
2075 last_control = m;
2076 } else if (m->m_type == MT_DATA) {
2077 data = m;
2078 break;
2079 }
2080 m = m->m_next;
2081 }
2082
2083 if (addr != NULL) {
2084 to_endpoint = flow_divert_get_buffered_target_address(addr);
2085 if (to_endpoint == NULL) {
2086 FDLOG0(LOG_NOTICE, fd_cb, "Failed to get the remote address from the buffer");
2087 }
2088 }
2089
2090 if (data == NULL) {
2091 FDLOG0(LOG_ERR, fd_cb, "Buffered record does not contain any data");
2092 mbuf_freem(next_record);
2093 continue;
2094 }
2095
2096 if (!(data->m_flags & M_PKTHDR)) {
2097 FDLOG0(LOG_ERR, fd_cb, "Buffered data does not have a packet header");
2098 mbuf_freem(next_record);
2099 continue;
2100 }
2101
2102 if (addr != NULL) {
2103 addr->m_next = NULL;
2104 }
2105
2106 if (last_control != NULL) {
2107 last_control->m_next = NULL;
2108 }
2109
2110 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
2111 0,
2112 data,
2113 to_endpoint,
2114 control,
2115 (last_proc != NULL ? last_proc : current_proc()));
2116
2117 if (addr != NULL) {
2118 mbuf_freem(addr);
2119 }
2120
2121 if (error) {
2122 FDLOG(LOG_ERR, fd_cb, "Failed to send queued data using the socket's original protocol: %d", error);
2123 }
2124 }
2125 }
2126 done:
2127 if (last_proc != NULL) {
2128 proc_rele(last_proc);
2129 }
2130
2131 if (error) {
2132 so->so_error = (uint16_t)error;
2133 flow_divert_disconnect_socket(so);
2134 }
2135 }
2136
2137 static void
2138 flow_divert_handle_connect_result(struct flow_divert_pcb *fd_cb, mbuf_t packet, int offset)
2139 {
2140 uint32_t connect_error = 0;
2141 uint32_t ctl_unit = 0;
2142 int error = 0;
2143 struct flow_divert_group *grp = NULL;
2144 union sockaddr_in_4_6 local_endpoint = {};
2145 union sockaddr_in_4_6 remote_endpoint = {};
2146 int out_if_index = 0;
2147 uint32_t send_window;
2148 uint32_t app_data_length = 0;
2149
2150 memset(&local_endpoint, 0, sizeof(local_endpoint));
2151 memset(&remote_endpoint, 0, sizeof(remote_endpoint));
2152
2153 error = flow_divert_packet_get_tlv(packet, offset, FLOW_DIVERT_TLV_ERROR_CODE, sizeof(connect_error), &connect_error, NULL);
2154 if (error) {
2155 FDLOG(LOG_ERR, fd_cb, "failed to get the connect result: %d", error);
2156 return;
2157 }
2158
2159 connect_error = ntohl(connect_error);
2160 FDLOG(LOG_INFO, fd_cb, "received connect result %u", connect_error);
2161
2162 error = flow_divert_packet_get_tlv(packet, offset, FLOW_DIVERT_TLV_SPACE_AVAILABLE, sizeof(send_window), &send_window, NULL);
2163 if (error) {
2164 FDLOG(LOG_ERR, fd_cb, "failed to get the send window: %d", error);
2165 return;
2166 }
2167
2168 error = flow_divert_packet_get_tlv(packet, offset, FLOW_DIVERT_TLV_CTL_UNIT, sizeof(ctl_unit), &ctl_unit, NULL);
2169 if (error) {
2170 FDLOG0(LOG_INFO, fd_cb, "No control unit provided in the connect result");
2171 }
2172
2173 error = flow_divert_packet_get_tlv(packet, offset, FLOW_DIVERT_TLV_LOCAL_ADDR, sizeof(local_endpoint), &(local_endpoint.sa), NULL);
2174 if (error) {
2175 FDLOG0(LOG_INFO, fd_cb, "No local address provided");
2176 }
2177
2178 error = flow_divert_packet_get_tlv(packet, offset, FLOW_DIVERT_TLV_REMOTE_ADDR, sizeof(remote_endpoint), &(remote_endpoint.sa), NULL);
2179 if (error) {
2180 FDLOG0(LOG_INFO, fd_cb, "No remote address provided");
2181 }
2182
2183 error = flow_divert_packet_get_tlv(packet, offset, FLOW_DIVERT_TLV_OUT_IF_INDEX, sizeof(out_if_index), &out_if_index, NULL);
2184 if (error) {
2185 FDLOG0(LOG_INFO, fd_cb, "No output if index provided");
2186 }
2187
2188 error = flow_divert_packet_get_tlv(packet, offset, FLOW_DIVERT_TLV_APP_DATA, 0, NULL, &app_data_length);
2189 if (error) {
2190 FDLOG0(LOG_INFO, fd_cb, "No application data provided in connect result");
2191 }
2192
2193 error = 0;
2194 ctl_unit = ntohl(ctl_unit);
2195
2196 lck_rw_lock_shared(&g_flow_divert_group_lck);
2197
2198 if (connect_error == 0 && ctl_unit > 0) {
2199 if (ctl_unit >= GROUP_COUNT_MAX) {
2200 FDLOG(LOG_ERR, fd_cb, "Connect result contains an invalid control unit: %u", ctl_unit);
2201 error = EINVAL;
2202 } else if (g_flow_divert_groups == NULL || g_active_group_count == 0) {
2203 FDLOG0(LOG_ERR, fd_cb, "No active groups, dropping connection");
2204 error = EINVAL;
2205 } else {
2206 grp = g_flow_divert_groups[ctl_unit];
2207 if (grp == NULL) {
2208 error = ECONNRESET;
2209 }
2210 }
2211 }
2212
2213 FDLOCK(fd_cb);
2214 if (fd_cb->so != NULL) {
2215 struct inpcb *inp = NULL;
2216 struct ifnet *ifp = NULL;
2217 struct flow_divert_group *old_group;
2218 struct socket *so = fd_cb->so;
2219
2220 socket_lock(so, 0);
2221
2222 if (SOCK_TYPE(so) == SOCK_STREAM && !(so->so_state & SS_ISCONNECTING)) {
2223 FDLOG0(LOG_ERR, fd_cb, "TCP socket is not in the connecting state, ignoring connect result");
2224 goto done;
2225 }
2226
2227 inp = sotoinpcb(so);
2228
2229 if (connect_error || error) {
2230 goto set_socket_state;
2231 }
2232
2233 if (flow_divert_is_sockaddr_valid(&(local_endpoint.sa))) {
2234 if (local_endpoint.sa.sa_family == AF_INET) {
2235 local_endpoint.sa.sa_len = sizeof(struct sockaddr_in);
2236 } else if (local_endpoint.sa.sa_family == AF_INET6) {
2237 local_endpoint.sa.sa_len = sizeof(struct sockaddr_in6);
2238 }
2239 fd_cb->local_endpoint = local_endpoint;
2240 flow_divert_set_local_endpoint(fd_cb, &(local_endpoint.sa), (SOCK_TYPE(so) == SOCK_DGRAM));
2241 }
2242
2243 if (flow_divert_is_sockaddr_valid(&(remote_endpoint.sa)) && SOCK_TYPE(so) == SOCK_STREAM) {
2244 if (remote_endpoint.sa.sa_family == AF_INET) {
2245 remote_endpoint.sa.sa_len = sizeof(struct sockaddr_in);
2246 } else if (remote_endpoint.sa.sa_family == AF_INET6) {
2247 remote_endpoint.sa.sa_len = sizeof(struct sockaddr_in6);
2248 }
2249 flow_divert_set_remote_endpoint(fd_cb, &(remote_endpoint.sa));
2250 }
2251
2252 if (app_data_length > 0) {
2253 uint8_t *app_data = NULL;
2254 MALLOC(app_data, uint8_t *, app_data_length, M_TEMP, M_WAITOK);
2255 if (app_data != NULL) {
2256 error = flow_divert_packet_get_tlv(packet, offset, FLOW_DIVERT_TLV_APP_DATA, app_data_length, app_data, NULL);
2257 if (error == 0) {
2258 FDLOG(LOG_INFO, fd_cb, "Got %u bytes of app data from the connect result", app_data_length);
2259 if (fd_cb->app_data != NULL) {
2260 FREE(fd_cb->app_data, M_TEMP);
2261 }
2262 fd_cb->app_data = app_data;
2263 fd_cb->app_data_length = app_data_length;
2264 } else {
2265 FDLOG(LOG_ERR, fd_cb, "Failed to copy %u bytes of application data from the connect result packet", app_data_length);
2266 FREE(app_data, M_TEMP);
2267 }
2268 } else {
2269 FDLOG(LOG_ERR, fd_cb, "Failed to allocate a buffer of size %u to hold the application data from the connect result", app_data_length);
2270 }
2271 }
2272
2273 ifnet_head_lock_shared();
2274 if (out_if_index > 0 && out_if_index <= if_index) {
2275 ifp = ifindex2ifnet[out_if_index];
2276 }
2277
2278 if (ifp != NULL) {
2279 if (inp->inp_vflag & INP_IPV4) {
2280 inp->inp_last_outifp = ifp;
2281 } else if (inp->inp_vflag & INP_IPV6) {
2282 inp->in6p_last_outifp = ifp;
2283 }
2284 } else {
2285 error = EINVAL;
2286 }
2287 ifnet_head_done();
2288
2289 if (error) {
2290 goto set_socket_state;
2291 }
2292
2293 if (fd_cb->group == NULL) {
2294 error = EINVAL;
2295 goto set_socket_state;
2296 }
2297
2298 if (grp != NULL) {
2299 old_group = fd_cb->group;
2300
2301 lck_rw_lock_exclusive(&old_group->lck);
2302 lck_rw_lock_exclusive(&grp->lck);
2303
2304 RB_REMOVE(fd_pcb_tree, &old_group->pcb_tree, fd_cb);
2305 if (RB_INSERT(fd_pcb_tree, &grp->pcb_tree, fd_cb) != NULL) {
2306 panic("group with unit %u already contains a connection with hash %u", grp->ctl_unit, fd_cb->hash);
2307 }
2308
2309 fd_cb->group = grp;
2310
2311 lck_rw_done(&grp->lck);
2312 lck_rw_done(&old_group->lck);
2313 }
2314
2315 fd_cb->send_window = ntohl(send_window);
2316
2317 set_socket_state:
2318 if (!connect_error && !error) {
2319 FDLOG0(LOG_INFO, fd_cb, "sending connect result");
2320 error = flow_divert_send_connect_result(fd_cb);
2321 }
2322
2323 if (connect_error || error) {
2324 if (connect_error && fd_cb->control_group_unit != fd_cb->policy_control_unit) {
2325 error = flow_divert_try_next(fd_cb);
2326 if (error) {
2327 flow_divert_disable(fd_cb);
2328 }
2329 goto done;
2330 }
2331
2332 if (!connect_error) {
2333 flow_divert_update_closed_state(fd_cb, SHUT_RDWR, FALSE);
2334 so->so_error = (uint16_t)error;
2335 flow_divert_send_close_if_needed(fd_cb);
2336 } else {
2337 flow_divert_update_closed_state(fd_cb, SHUT_RDWR, TRUE);
2338 so->so_error = (uint16_t)connect_error;
2339 }
2340 flow_divert_disconnect_socket(so);
2341 } else {
2342 #if NECP
2343 /* Update NECP client with connected five-tuple */
2344 if (!uuid_is_null(inp->necp_client_uuid)) {
2345 socket_unlock(so, 0);
2346 necp_client_assign_from_socket(so->last_pid, inp->necp_client_uuid, inp);
2347 socket_lock(so, 0);
2348 }
2349 #endif /* NECP */
2350
2351 flow_divert_send_buffered_data(fd_cb, FALSE);
2352 soisconnected(so);
2353 }
2354
2355 /* We don't need the connect packet any more */
2356 if (fd_cb->connect_packet != NULL) {
2357 mbuf_freem(fd_cb->connect_packet);
2358 fd_cb->connect_packet = NULL;
2359 }
2360
2361 /* We don't need the original remote endpoint any more */
2362 if (fd_cb->original_remote_endpoint != NULL) {
2363 FREE(fd_cb->original_remote_endpoint, M_SONAME);
2364 fd_cb->original_remote_endpoint = NULL;
2365 }
2366 done:
2367 socket_unlock(so, 0);
2368 }
2369 FDUNLOCK(fd_cb);
2370
2371 lck_rw_done(&g_flow_divert_group_lck);
2372 }
2373
2374 static void
2375 flow_divert_handle_close(struct flow_divert_pcb *fd_cb, mbuf_t packet, int offset)
2376 {
2377 uint32_t close_error;
2378 int error = 0;
2379 int how;
2380
2381 error = flow_divert_packet_get_tlv(packet, offset, FLOW_DIVERT_TLV_ERROR_CODE, sizeof(close_error), &close_error, NULL);
2382 if (error) {
2383 FDLOG(LOG_ERR, fd_cb, "failed to get the close error: %d", error);
2384 return;
2385 }
2386
2387 error = flow_divert_packet_get_tlv(packet, offset, FLOW_DIVERT_TLV_HOW, sizeof(how), &how, NULL);
2388 if (error) {
2389 FDLOG(LOG_ERR, fd_cb, "failed to get the close how flag: %d", error);
2390 return;
2391 }
2392
2393 how = ntohl(how);
2394
2395 FDLOG(LOG_INFO, fd_cb, "close received, how = %d", how);
2396
2397 FDLOCK(fd_cb);
2398 if (fd_cb->so != NULL) {
2399 socket_lock(fd_cb->so, 0);
2400
2401 fd_cb->so->so_error = (uint16_t)ntohl(close_error);
2402
2403 flow_divert_update_closed_state(fd_cb, how, TRUE);
2404
2405 how = flow_divert_tunnel_how_closed(fd_cb);
2406 if (how == SHUT_RDWR) {
2407 flow_divert_disconnect_socket(fd_cb->so);
2408 } else if (how == SHUT_RD) {
2409 socantrcvmore(fd_cb->so);
2410 } else if (how == SHUT_WR) {
2411 socantsendmore(fd_cb->so);
2412 }
2413
2414 socket_unlock(fd_cb->so, 0);
2415 }
2416 FDUNLOCK(fd_cb);
2417 }
2418
2419 static mbuf_t
2420 flow_divert_create_control_mbuf(struct flow_divert_pcb *fd_cb)
2421 {
2422 struct inpcb *inp = sotoinpcb(fd_cb->so);
2423 bool is_cfil_enabled = false;
2424 #if CONTENT_FILTER
2425 /* Content Filter needs to see the local address */
2426 is_cfil_enabled = (inp->inp_socket && inp->inp_socket->so_cfil_db != NULL);
2427 #endif
2428 if ((inp->inp_vflag & INP_IPV4) &&
2429 fd_cb->local_endpoint.sa.sa_family == AF_INET &&
2430 ((inp->inp_flags & INP_RECVDSTADDR) || is_cfil_enabled)) {
2431 return sbcreatecontrol((caddr_t)&(fd_cb->local_endpoint.sin.sin_addr), sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP);
2432 } else if ((inp->inp_vflag & INP_IPV6) &&
2433 fd_cb->local_endpoint.sa.sa_family == AF_INET6 &&
2434 ((inp->inp_flags & IN6P_PKTINFO) || is_cfil_enabled)) {
2435 struct in6_pktinfo pi6;
2436 memset(&pi6, 0, sizeof(pi6));
2437 pi6.ipi6_addr = fd_cb->local_endpoint.sin6.sin6_addr;
2438
2439 return sbcreatecontrol((caddr_t)&pi6, sizeof(pi6), IPV6_PKTINFO, IPPROTO_IPV6);
2440 }
2441 return NULL;
2442 }
2443
2444 static int
2445 flow_divert_handle_data(struct flow_divert_pcb *fd_cb, mbuf_t packet, size_t offset)
2446 {
2447 int error = 0;
2448
2449 FDLOCK(fd_cb);
2450 if (fd_cb->so != NULL) {
2451 mbuf_t data = NULL;
2452 size_t data_size;
2453 struct sockaddr_storage remote_address;
2454 boolean_t got_remote_sa = FALSE;
2455 boolean_t appended = FALSE;
2456 boolean_t append_success = FALSE;
2457
2458 socket_lock(fd_cb->so, 0);
2459
2460 if (sbspace(&fd_cb->so->so_rcv) == 0) {
2461 error = ENOBUFS;
2462 fd_cb->flags |= FLOW_DIVERT_NOTIFY_ON_RECEIVED;
2463 FDLOG0(LOG_INFO, fd_cb, "Receive buffer is full, will send read notification when app reads some data");
2464 goto done;
2465 }
2466
2467 if (SOCK_TYPE(fd_cb->so) == SOCK_DGRAM) {
2468 uint32_t val_size = 0;
2469
2470 /* check if we got remote address with data */
2471 memset(&remote_address, 0, sizeof(remote_address));
2472 error = flow_divert_packet_get_tlv(packet, (int)offset, FLOW_DIVERT_TLV_REMOTE_ADDR, sizeof(remote_address), &remote_address, &val_size);
2473 if (error || val_size > sizeof(remote_address)) {
2474 FDLOG0(LOG_INFO, fd_cb, "No remote address provided");
2475 error = 0;
2476 } else {
2477 if (remote_address.ss_len > sizeof(remote_address)) {
2478 remote_address.ss_len = sizeof(remote_address);
2479 }
2480 /* validate the address */
2481 if (flow_divert_is_sockaddr_valid((struct sockaddr *)&remote_address)) {
2482 got_remote_sa = TRUE;
2483 } else {
2484 FDLOG0(LOG_INFO, fd_cb, "Remote address is invalid");
2485 }
2486 offset += (sizeof(uint8_t) + sizeof(uint32_t) + val_size);
2487 }
2488 }
2489
2490 data_size = (mbuf_pkthdr_len(packet) - offset);
2491
2492 if (fd_cb->so->so_state & SS_CANTRCVMORE) {
2493 FDLOG(LOG_NOTICE, fd_cb, "app cannot receive any more data, dropping %lu bytes of data", data_size);
2494 goto done;
2495 }
2496
2497 if (SOCK_TYPE(fd_cb->so) != SOCK_STREAM && SOCK_TYPE(fd_cb->so) != SOCK_DGRAM) {
2498 FDLOG(LOG_ERR, fd_cb, "socket has an unsupported type: %d", SOCK_TYPE(fd_cb->so));
2499 goto done;
2500 }
2501
2502 FDLOG(LOG_DEBUG, fd_cb, "received %lu bytes of data", data_size);
2503
2504 error = mbuf_split(packet, offset, MBUF_DONTWAIT, &data);
2505 if (error || data == NULL) {
2506 FDLOG(LOG_ERR, fd_cb, "mbuf_split failed: %d", error);
2507 goto done;
2508 }
2509
2510 if (SOCK_TYPE(fd_cb->so) == SOCK_STREAM) {
2511 appended = (sbappendstream(&fd_cb->so->so_rcv, data) != 0);
2512 append_success = TRUE;
2513 } else {
2514 struct sockaddr *append_sa = NULL;
2515 mbuf_t mctl;
2516
2517 if (got_remote_sa == TRUE) {
2518 error = flow_divert_dup_addr(remote_address.ss_family, (struct sockaddr *)&remote_address, &append_sa);
2519 } else {
2520 if (fd_cb->so->so_proto->pr_domain->dom_family == AF_INET6) {
2521 error = in6_mapped_peeraddr(fd_cb->so, &append_sa);
2522 } else {
2523 error = in_getpeeraddr(fd_cb->so, &append_sa);
2524 }
2525 }
2526 if (error) {
2527 FDLOG0(LOG_ERR, fd_cb, "failed to dup the socket address.");
2528 }
2529
2530 mctl = flow_divert_create_control_mbuf(fd_cb);
2531 int append_error = 0;
2532 if (sbappendaddr(&fd_cb->so->so_rcv, append_sa, data, mctl, &append_error) || append_error == EJUSTRETURN) {
2533 append_success = TRUE;
2534 appended = (append_error == 0);
2535 } else {
2536 FDLOG(LOG_ERR, fd_cb, "failed to append %lu bytes of data: %d", data_size, append_error);
2537 }
2538
2539 if (append_sa != NULL) {
2540 FREE(append_sa, M_SONAME);
2541 }
2542 }
2543
2544 if (append_success) {
2545 fd_cb->bytes_received += data_size;
2546 flow_divert_add_data_statistics(fd_cb, data_size, FALSE);
2547 }
2548
2549 if (appended) {
2550 sorwakeup(fd_cb->so);
2551 }
2552 done:
2553 socket_unlock(fd_cb->so, 0);
2554 }
2555 FDUNLOCK(fd_cb);
2556
2557 return error;
2558 }
2559
2560 static void
2561 flow_divert_handle_read_notification(struct flow_divert_pcb *fd_cb, mbuf_t packet, int offset)
2562 {
2563 uint32_t read_count;
2564 int error = 0;
2565
2566 error = flow_divert_packet_get_tlv(packet, offset, FLOW_DIVERT_TLV_READ_COUNT, sizeof(read_count), &read_count, NULL);
2567 if (error) {
2568 FDLOG(LOG_ERR, fd_cb, "failed to get the read count: %d", error);
2569 return;
2570 }
2571
2572 FDLOG(LOG_DEBUG, fd_cb, "received a read notification for %u bytes", ntohl(read_count));
2573
2574 FDLOCK(fd_cb);
2575 if (fd_cb->so != NULL) {
2576 socket_lock(fd_cb->so, 0);
2577 fd_cb->send_window += ntohl(read_count);
2578 flow_divert_send_buffered_data(fd_cb, FALSE);
2579 socket_unlock(fd_cb->so, 0);
2580 }
2581 FDUNLOCK(fd_cb);
2582 }
2583
2584 static void
2585 flow_divert_handle_group_init(struct flow_divert_group *group, mbuf_t packet, int offset)
2586 {
2587 int error = 0;
2588 uint32_t key_size = 0;
2589 int log_level;
2590 uint32_t flags = 0;
2591
2592 error = flow_divert_packet_get_tlv(packet, offset, FLOW_DIVERT_TLV_TOKEN_KEY, 0, NULL, &key_size);
2593 if (error) {
2594 FDLOG(LOG_ERR, &nil_pcb, "failed to get the key size: %d", error);
2595 return;
2596 }
2597
2598 if (key_size == 0 || key_size > FLOW_DIVERT_MAX_KEY_SIZE) {
2599 FDLOG(LOG_ERR, &nil_pcb, "Invalid key size: %u", key_size);
2600 return;
2601 }
2602
2603 error = flow_divert_packet_get_tlv(packet, offset, FLOW_DIVERT_TLV_LOG_LEVEL, sizeof(log_level), &log_level, NULL);
2604 if (!error) {
2605 nil_pcb.log_level = (uint8_t)log_level;
2606 }
2607
2608 lck_rw_lock_exclusive(&group->lck);
2609
2610 if (group->token_key != NULL) {
2611 FREE(group->token_key, M_TEMP);
2612 group->token_key = NULL;
2613 }
2614
2615 MALLOC(group->token_key, uint8_t *, key_size, M_TEMP, M_WAITOK);
2616 error = flow_divert_packet_get_tlv(packet, offset, FLOW_DIVERT_TLV_TOKEN_KEY, key_size, group->token_key, NULL);
2617 if (error) {
2618 FDLOG(LOG_ERR, &nil_pcb, "failed to get the token key: %d", error);
2619 FREE(group->token_key, M_TEMP);
2620 group->token_key = NULL;
2621 lck_rw_done(&group->lck);
2622 return;
2623 }
2624
2625 group->token_key_size = key_size;
2626
2627 error = flow_divert_packet_get_tlv(packet, offset, FLOW_DIVERT_TLV_FLAGS, sizeof(flags), &flags, NULL);
2628 if (!error) {
2629 group->flags = flags;
2630 }
2631
2632 lck_rw_done(&group->lck);
2633 }
2634
2635 static void
2636 flow_divert_handle_properties_update(struct flow_divert_pcb *fd_cb, mbuf_t packet, int offset)
2637 {
2638 int error = 0;
2639 int out_if_index = 0;
2640 uint32_t app_data_length = 0;
2641
2642 FDLOG0(LOG_INFO, fd_cb, "received a properties update");
2643
2644 error = flow_divert_packet_get_tlv(packet, offset, FLOW_DIVERT_TLV_OUT_IF_INDEX, sizeof(out_if_index), &out_if_index, NULL);
2645 if (error) {
2646 FDLOG0(LOG_INFO, fd_cb, "No output if index provided in properties update");
2647 }
2648
2649 error = flow_divert_packet_get_tlv(packet, offset, FLOW_DIVERT_TLV_APP_DATA, 0, NULL, &app_data_length);
2650 if (error) {
2651 FDLOG0(LOG_INFO, fd_cb, "No application data provided in properties update");
2652 }
2653
2654 FDLOCK(fd_cb);
2655 if (fd_cb->so != NULL) {
2656 socket_lock(fd_cb->so, 0);
2657
2658 if (out_if_index > 0) {
2659 struct inpcb *inp = NULL;
2660 struct ifnet *ifp = NULL;
2661
2662 inp = sotoinpcb(fd_cb->so);
2663
2664 ifnet_head_lock_shared();
2665 if (out_if_index <= if_index) {
2666 ifp = ifindex2ifnet[out_if_index];
2667 }
2668
2669 if (ifp != NULL) {
2670 if (inp->inp_vflag & INP_IPV4) {
2671 inp->inp_last_outifp = ifp;
2672 } else if (inp->inp_vflag & INP_IPV6) {
2673 inp->in6p_last_outifp = ifp;
2674 }
2675 }
2676 ifnet_head_done();
2677 }
2678
2679 if (app_data_length > 0) {
2680 uint8_t *app_data = NULL;
2681 MALLOC(app_data, uint8_t *, app_data_length, M_TEMP, M_WAITOK);
2682 if (app_data != NULL) {
2683 error = flow_divert_packet_get_tlv(packet, offset, FLOW_DIVERT_TLV_APP_DATA, app_data_length, app_data, NULL);
2684 if (error == 0) {
2685 if (fd_cb->app_data != NULL) {
2686 FREE(fd_cb->app_data, M_TEMP);
2687 }
2688 fd_cb->app_data = app_data;
2689 fd_cb->app_data_length = app_data_length;
2690 } else {
2691 FDLOG(LOG_ERR, fd_cb, "Failed to copy %u bytes of application data from the properties update packet", app_data_length);
2692 FREE(app_data, M_TEMP);
2693 }
2694 } else {
2695 FDLOG(LOG_ERR, fd_cb, "Failed to allocate a buffer of size %u to hold the application data from the properties update", app_data_length);
2696 }
2697 }
2698
2699 socket_unlock(fd_cb->so, 0);
2700 }
2701 FDUNLOCK(fd_cb);
2702 }
2703
2704 static void
2705 flow_divert_handle_app_map_create(struct flow_divert_group *group, mbuf_t packet, int offset)
2706 {
2707 size_t bytes_mem_size;
2708 size_t child_maps_mem_size;
2709 size_t nodes_mem_size;
2710 size_t trie_memory_size = 0;
2711 int cursor;
2712 int error = 0;
2713 struct flow_divert_trie new_trie;
2714 int insert_error = 0;
2715 int prefix_count = -1;
2716 int signing_id_count = 0;
2717 size_t bytes_count = 0;
2718 size_t nodes_count = 0;
2719 size_t maps_count = 0;
2720
2721 lck_rw_lock_exclusive(&group->lck);
2722
2723 /* Re-set the current trie */
2724 if (group->signing_id_trie.memory != NULL) {
2725 FREE(group->signing_id_trie.memory, M_TEMP);
2726 }
2727 memset(&group->signing_id_trie, 0, sizeof(group->signing_id_trie));
2728 group->signing_id_trie.root = NULL_TRIE_IDX;
2729
2730 memset(&new_trie, 0, sizeof(new_trie));
2731
2732 /* Get the number of shared prefixes in the new set of signing ID strings */
2733 error = flow_divert_packet_get_tlv(packet, offset, FLOW_DIVERT_TLV_PREFIX_COUNT, sizeof(prefix_count), &prefix_count, NULL);
2734
2735 if (prefix_count < 0 || error) {
2736 FDLOG(LOG_ERR, &nil_pcb, "Invalid prefix count (%d) or an error occurred while reading the prefix count: %d", prefix_count, error);
2737 lck_rw_done(&group->lck);
2738 return;
2739 }
2740
2741 /* Compute the number of signing IDs and the total amount of bytes needed to store them */
2742 for (cursor = flow_divert_packet_find_tlv(packet, offset, FLOW_DIVERT_TLV_SIGNING_ID, &error, 0);
2743 cursor >= 0;
2744 cursor = flow_divert_packet_find_tlv(packet, cursor, FLOW_DIVERT_TLV_SIGNING_ID, &error, 1)) {
2745 uint32_t sid_size = 0;
2746 error = flow_divert_packet_get_tlv(packet, cursor, FLOW_DIVERT_TLV_SIGNING_ID, 0, NULL, &sid_size);
2747 if (error || sid_size == 0) {
2748 FDLOG(LOG_ERR, &nil_pcb, "Failed to get the length of the signing identifier at offset %d: %d", cursor, error);
2749 signing_id_count = 0;
2750 break;
2751 }
2752 if (os_add_overflow(bytes_count, sid_size, &bytes_count)) {
2753 FDLOG0(LOG_ERR, &nil_pcb, "Overflow while incrementing number of bytes");
2754 signing_id_count = 0;
2755 break;
2756 }
2757 signing_id_count++;
2758 }
2759
2760 if (signing_id_count == 0) {
2761 lck_rw_done(&group->lck);
2762 FDLOG0(LOG_NOTICE, &nil_pcb, "No signing identifiers");
2763 return;
2764 }
2765
2766 if (os_add3_overflow(prefix_count, signing_id_count, 1, &nodes_count)) { /* + 1 for the root node */
2767 lck_rw_done(&group->lck);
2768 FDLOG0(LOG_ERR, &nil_pcb, "Overflow while computing the number of nodes");
2769 return;
2770 }
2771
2772 if (os_add_overflow(prefix_count, 1, &maps_count)) { /* + 1 for the root node */
2773 lck_rw_done(&group->lck);
2774 FDLOG0(LOG_ERR, &nil_pcb, "Overflow while computing the number of maps");
2775 return;
2776 }
2777
2778 if (bytes_count > UINT16_MAX || nodes_count > UINT16_MAX || maps_count > UINT16_MAX) {
2779 lck_rw_done(&group->lck);
2780 FDLOG(LOG_NOTICE, &nil_pcb, "Invalid bytes count (%lu), nodes count (%lu) or maps count (%lu)", bytes_count, nodes_count, maps_count);
2781 return;
2782 }
2783
2784 FDLOG(LOG_INFO, &nil_pcb, "Nodes count = %lu, child maps count = %lu, bytes_count = %lu",
2785 nodes_count, maps_count, bytes_count);
2786
2787 if (os_mul_overflow(sizeof(*new_trie.nodes), (size_t)nodes_count, &nodes_mem_size) ||
2788 os_mul3_overflow(sizeof(*new_trie.child_maps), CHILD_MAP_SIZE, (size_t)maps_count, &child_maps_mem_size) ||
2789 os_mul_overflow(sizeof(*new_trie.bytes), (size_t)bytes_count, &bytes_mem_size) ||
2790 os_add3_overflow(nodes_mem_size, child_maps_mem_size, bytes_mem_size, &trie_memory_size)) {
2791 FDLOG0(LOG_ERR, &nil_pcb, "Overflow while computing trie memory sizes");
2792 lck_rw_done(&group->lck);
2793 return;
2794 }
2795
2796 if (trie_memory_size > FLOW_DIVERT_MAX_TRIE_MEMORY) {
2797 FDLOG(LOG_ERR, &nil_pcb, "Trie memory size (%lu) is too big (maximum is %u)", trie_memory_size, FLOW_DIVERT_MAX_TRIE_MEMORY);
2798 lck_rw_done(&group->lck);
2799 return;
2800 }
2801
2802 MALLOC(new_trie.memory, void *, trie_memory_size, M_TEMP, M_WAITOK);
2803 if (new_trie.memory == NULL) {
2804 FDLOG(LOG_ERR, &nil_pcb, "Failed to allocate %lu bytes of memory for the signing ID trie",
2805 nodes_mem_size + child_maps_mem_size + bytes_mem_size);
2806 lck_rw_done(&group->lck);
2807 return;
2808 }
2809
2810 new_trie.bytes_count = (uint16_t)bytes_count;
2811 new_trie.nodes_count = (uint16_t)nodes_count;
2812 new_trie.child_maps_count = (uint16_t)maps_count;
2813
2814 /* Initialize the free lists */
2815 new_trie.nodes = (struct flow_divert_trie_node *)new_trie.memory;
2816 new_trie.nodes_free_next = 0;
2817 memset(new_trie.nodes, 0, nodes_mem_size);
2818
2819 new_trie.child_maps = (uint16_t *)(void *)((uint8_t *)new_trie.memory + nodes_mem_size);
2820 new_trie.child_maps_free_next = 0;
2821 memset(new_trie.child_maps, 0xff, child_maps_mem_size);
2822
2823 new_trie.bytes = (uint8_t *)(void *)((uint8_t *)new_trie.memory + nodes_mem_size + child_maps_mem_size);
2824 new_trie.bytes_free_next = 0;
2825 memset(new_trie.bytes, 0, bytes_mem_size);
2826
2827 /* The root is an empty node */
2828 new_trie.root = trie_node_alloc(&new_trie);
2829
2830 /* Add each signing ID to the trie */
2831 for (cursor = flow_divert_packet_find_tlv(packet, offset, FLOW_DIVERT_TLV_SIGNING_ID, &error, 0);
2832 cursor >= 0;
2833 cursor = flow_divert_packet_find_tlv(packet, cursor, FLOW_DIVERT_TLV_SIGNING_ID, &error, 1)) {
2834 uint32_t sid_size = 0;
2835 error = flow_divert_packet_get_tlv(packet, cursor, FLOW_DIVERT_TLV_SIGNING_ID, 0, NULL, &sid_size);
2836 if (error || sid_size == 0) {
2837 FDLOG(LOG_ERR, &nil_pcb, "Failed to get the length of the signing identifier at offset %d while building: %d", cursor, error);
2838 insert_error = EINVAL;
2839 break;
2840 }
2841 if (sid_size <= UINT16_MAX && new_trie.bytes_free_next + (uint16_t)sid_size <= new_trie.bytes_count) {
2842 uint16_t new_node_idx;
2843 error = flow_divert_packet_get_tlv(packet, cursor, FLOW_DIVERT_TLV_SIGNING_ID, sid_size, &TRIE_BYTE(&new_trie, new_trie.bytes_free_next), NULL);
2844 if (error) {
2845 FDLOG(LOG_ERR, &nil_pcb, "Failed to read the signing identifier at offset %d: %d", cursor, error);
2846 insert_error = EINVAL;
2847 break;
2848 }
2849 new_node_idx = flow_divert_trie_insert(&new_trie, new_trie.bytes_free_next, sid_size);
2850 if (new_node_idx == NULL_TRIE_IDX) {
2851 insert_error = EINVAL;
2852 break;
2853 }
2854 } else {
2855 FDLOG0(LOG_ERR, &nil_pcb, "No place to put signing ID for insertion");
2856 insert_error = ENOBUFS;
2857 break;
2858 }
2859 }
2860
2861 if (!insert_error) {
2862 group->signing_id_trie = new_trie;
2863 } else {
2864 FREE(new_trie.memory, M_TEMP);
2865 }
2866
2867 lck_rw_done(&group->lck);
2868 }
2869
2870 static int
2871 flow_divert_input(mbuf_t packet, struct flow_divert_group *group)
2872 {
2873 struct flow_divert_packet_header hdr;
2874 int error = 0;
2875 struct flow_divert_pcb *fd_cb;
2876
2877 if (mbuf_pkthdr_len(packet) < sizeof(hdr)) {
2878 FDLOG(LOG_ERR, &nil_pcb, "got a bad packet, length (%lu) < sizeof hdr (%lu)", mbuf_pkthdr_len(packet), sizeof(hdr));
2879 error = EINVAL;
2880 goto done;
2881 }
2882
2883 if (mbuf_pkthdr_len(packet) > FD_CTL_RCVBUFF_SIZE) {
2884 FDLOG(LOG_ERR, &nil_pcb, "got a bad packet, length (%lu) > %d", mbuf_pkthdr_len(packet), FD_CTL_RCVBUFF_SIZE);
2885 error = EINVAL;
2886 goto done;
2887 }
2888
2889 error = mbuf_copydata(packet, 0, sizeof(hdr), &hdr);
2890 if (error) {
2891 FDLOG(LOG_ERR, &nil_pcb, "mbuf_copydata failed for the header: %d", error);
2892 error = ENOBUFS;
2893 goto done;
2894 }
2895
2896 hdr.conn_id = ntohl(hdr.conn_id);
2897
2898 if (hdr.conn_id == 0) {
2899 switch (hdr.packet_type) {
2900 case FLOW_DIVERT_PKT_GROUP_INIT:
2901 flow_divert_handle_group_init(group, packet, sizeof(hdr));
2902 break;
2903 case FLOW_DIVERT_PKT_APP_MAP_CREATE:
2904 flow_divert_handle_app_map_create(group, packet, sizeof(hdr));
2905 break;
2906 default:
2907 FDLOG(LOG_WARNING, &nil_pcb, "got an unknown message type: %d", hdr.packet_type);
2908 break;
2909 }
2910 goto done;
2911 }
2912
2913 fd_cb = flow_divert_pcb_lookup(hdr.conn_id, group); /* This retains the PCB */
2914 if (fd_cb == NULL) {
2915 if (hdr.packet_type != FLOW_DIVERT_PKT_CLOSE && hdr.packet_type != FLOW_DIVERT_PKT_READ_NOTIFY) {
2916 FDLOG(LOG_NOTICE, &nil_pcb, "got a %s message from group %d for an unknown pcb: %u", flow_divert_packet_type2str(hdr.packet_type), group->ctl_unit, hdr.conn_id);
2917 }
2918 goto done;
2919 }
2920
2921 switch (hdr.packet_type) {
2922 case FLOW_DIVERT_PKT_CONNECT_RESULT:
2923 flow_divert_handle_connect_result(fd_cb, packet, sizeof(hdr));
2924 break;
2925 case FLOW_DIVERT_PKT_CLOSE:
2926 flow_divert_handle_close(fd_cb, packet, sizeof(hdr));
2927 break;
2928 case FLOW_DIVERT_PKT_DATA:
2929 error = flow_divert_handle_data(fd_cb, packet, sizeof(hdr));
2930 break;
2931 case FLOW_DIVERT_PKT_READ_NOTIFY:
2932 flow_divert_handle_read_notification(fd_cb, packet, sizeof(hdr));
2933 break;
2934 case FLOW_DIVERT_PKT_PROPERTIES_UPDATE:
2935 flow_divert_handle_properties_update(fd_cb, packet, sizeof(hdr));
2936 break;
2937 default:
2938 FDLOG(LOG_WARNING, fd_cb, "got an unknown message type: %d", hdr.packet_type);
2939 break;
2940 }
2941
2942 FDRELEASE(fd_cb);
2943
2944 done:
2945 mbuf_freem(packet);
2946 return error;
2947 }
2948
2949 static void
2950 flow_divert_close_all(struct flow_divert_group *group)
2951 {
2952 struct flow_divert_pcb *fd_cb;
2953 SLIST_HEAD(, flow_divert_pcb) tmp_list;
2954
2955 SLIST_INIT(&tmp_list);
2956
2957 lck_rw_lock_exclusive(&group->lck);
2958
2959 MBUFQ_DRAIN(&group->send_queue);
2960
2961 RB_FOREACH(fd_cb, fd_pcb_tree, &group->pcb_tree) {
2962 FDRETAIN(fd_cb);
2963 SLIST_INSERT_HEAD(&tmp_list, fd_cb, tmp_list_entry);
2964 }
2965
2966 lck_rw_done(&group->lck);
2967
2968 while (!SLIST_EMPTY(&tmp_list)) {
2969 fd_cb = SLIST_FIRST(&tmp_list);
2970 FDLOCK(fd_cb);
2971 SLIST_REMOVE_HEAD(&tmp_list, tmp_list_entry);
2972 if (fd_cb->so != NULL) {
2973 socket_lock(fd_cb->so, 0);
2974 flow_divert_pcb_remove(fd_cb);
2975 flow_divert_update_closed_state(fd_cb, SHUT_RDWR, TRUE);
2976 fd_cb->so->so_error = ECONNABORTED;
2977 flow_divert_disconnect_socket(fd_cb->so);
2978 socket_unlock(fd_cb->so, 0);
2979 }
2980 FDUNLOCK(fd_cb);
2981 FDRELEASE(fd_cb);
2982 }
2983 }
2984
2985 void
2986 flow_divert_detach(struct socket *so)
2987 {
2988 struct flow_divert_pcb *fd_cb = so->so_fd_pcb;
2989
2990 VERIFY((so->so_flags & SOF_FLOW_DIVERT) && so->so_fd_pcb != NULL);
2991
2992 so->so_flags &= ~SOF_FLOW_DIVERT;
2993 so->so_fd_pcb = NULL;
2994
2995 FDLOG(LOG_INFO, fd_cb, "Detaching, ref count = %d", fd_cb->ref_count);
2996
2997 if (fd_cb->group != NULL) {
2998 /* Last-ditch effort to send any buffered data */
2999 flow_divert_send_buffered_data(fd_cb, TRUE);
3000
3001 flow_divert_update_closed_state(fd_cb, SHUT_RDWR, FALSE);
3002 flow_divert_send_close_if_needed(fd_cb);
3003 /* Remove from the group */
3004 flow_divert_pcb_remove(fd_cb);
3005 }
3006
3007 socket_unlock(so, 0);
3008 FDLOCK(fd_cb);
3009 fd_cb->so = NULL;
3010 FDUNLOCK(fd_cb);
3011 socket_lock(so, 0);
3012
3013 FDRELEASE(fd_cb); /* Release the socket's reference */
3014 }
3015
3016 static int
3017 flow_divert_close(struct socket *so)
3018 {
3019 struct flow_divert_pcb *fd_cb = so->so_fd_pcb;
3020
3021 VERIFY((so->so_flags & SOF_FLOW_DIVERT) && so->so_fd_pcb != NULL);
3022
3023 FDLOG0(LOG_INFO, fd_cb, "Closing");
3024
3025 if (SOCK_TYPE(so) == SOCK_STREAM) {
3026 soisdisconnecting(so);
3027 sbflush(&so->so_rcv);
3028 }
3029
3030 flow_divert_send_buffered_data(fd_cb, TRUE);
3031 flow_divert_update_closed_state(fd_cb, SHUT_RDWR, FALSE);
3032 flow_divert_send_close_if_needed(fd_cb);
3033
3034 /* Remove from the group */
3035 flow_divert_pcb_remove(fd_cb);
3036
3037 return 0;
3038 }
3039
3040 static int
3041 flow_divert_disconnectx(struct socket *so, sae_associd_t aid,
3042 sae_connid_t cid __unused)
3043 {
3044 if (aid != SAE_ASSOCID_ANY && aid != SAE_ASSOCID_ALL) {
3045 return EINVAL;
3046 }
3047
3048 return flow_divert_close(so);
3049 }
3050
3051 static int
3052 flow_divert_shutdown(struct socket *so)
3053 {
3054 struct flow_divert_pcb *fd_cb = so->so_fd_pcb;
3055
3056 VERIFY((so->so_flags & SOF_FLOW_DIVERT) && so->so_fd_pcb != NULL);
3057
3058 FDLOG0(LOG_INFO, fd_cb, "Can't send more");
3059
3060 socantsendmore(so);
3061
3062 flow_divert_update_closed_state(fd_cb, SHUT_WR, FALSE);
3063 flow_divert_send_close_if_needed(fd_cb);
3064
3065 return 0;
3066 }
3067
3068 static int
3069 flow_divert_rcvd(struct socket *so, int flags __unused)
3070 {
3071 struct flow_divert_pcb *fd_cb = so->so_fd_pcb;
3072 int space = sbspace(&so->so_rcv);
3073
3074 VERIFY((so->so_flags & SOF_FLOW_DIVERT) && so->so_fd_pcb != NULL);
3075
3076 FDLOG(LOG_DEBUG, fd_cb, "app read bytes, space = %d", space);
3077 if ((fd_cb->flags & FLOW_DIVERT_NOTIFY_ON_RECEIVED) &&
3078 (space > 0) &&
3079 flow_divert_send_read_notification(fd_cb) == 0) {
3080 FDLOG0(LOG_INFO, fd_cb, "Sent a read notification");
3081 fd_cb->flags &= ~FLOW_DIVERT_NOTIFY_ON_RECEIVED;
3082 }
3083
3084 return 0;
3085 }
3086
3087 static int
3088 flow_divert_append_target_endpoint_tlv(mbuf_t connect_packet, struct sockaddr *toaddr)
3089 {
3090 int error = 0;
3091 int port = 0;
3092
3093 if (!flow_divert_is_sockaddr_valid(toaddr)) {
3094 FDLOG(LOG_ERR, &nil_pcb, "Invalid target address, family = %u, length = %u", toaddr->sa_family, toaddr->sa_len);
3095 error = EINVAL;
3096 goto done;
3097 }
3098
3099 error = flow_divert_packet_append_tlv(connect_packet, FLOW_DIVERT_TLV_TARGET_ADDRESS, toaddr->sa_len, toaddr);
3100 if (error) {
3101 goto done;
3102 }
3103
3104 if (toaddr->sa_family == AF_INET) {
3105 port = ntohs((satosin(toaddr))->sin_port);
3106 } else {
3107 port = ntohs((satosin6(toaddr))->sin6_port);
3108 }
3109
3110 error = flow_divert_packet_append_tlv(connect_packet, FLOW_DIVERT_TLV_TARGET_PORT, sizeof(port), &port);
3111 if (error) {
3112 goto done;
3113 }
3114
3115 done:
3116 return error;
3117 }
3118
3119 struct sockaddr *
3120 flow_divert_get_buffered_target_address(mbuf_t buffer)
3121 {
3122 if (buffer != NULL && buffer->m_type == MT_SONAME) {
3123 struct sockaddr *toaddr = mtod(buffer, struct sockaddr *);
3124 if (toaddr != NULL && flow_divert_is_sockaddr_valid(toaddr)) {
3125 return toaddr;
3126 }
3127 }
3128 return NULL;
3129 }
3130
3131 static boolean_t
3132 flow_divert_is_sockaddr_valid(struct sockaddr *addr)
3133 {
3134 switch (addr->sa_family) {
3135 case AF_INET:
3136 if (addr->sa_len < sizeof(struct sockaddr_in)) {
3137 return FALSE;
3138 }
3139 break;
3140 case AF_INET6:
3141 if (addr->sa_len < sizeof(struct sockaddr_in6)) {
3142 return FALSE;
3143 }
3144 break;
3145 default:
3146 return FALSE;
3147 }
3148 return TRUE;
3149 }
3150
3151 static errno_t
3152 flow_divert_dup_addr(sa_family_t family, struct sockaddr *addr,
3153 struct sockaddr **dup)
3154 {
3155 int error = 0;
3156 struct sockaddr *result;
3157 struct sockaddr_storage ss;
3158
3159 if (addr != NULL) {
3160 result = addr;
3161 } else {
3162 memset(&ss, 0, sizeof(ss));
3163 ss.ss_family = family;
3164 if (ss.ss_family == AF_INET) {
3165 ss.ss_len = sizeof(struct sockaddr_in);
3166 } else if (ss.ss_family == AF_INET6) {
3167 ss.ss_len = sizeof(struct sockaddr_in6);
3168 } else {
3169 error = EINVAL;
3170 }
3171 result = (struct sockaddr *)&ss;
3172 }
3173
3174 if (!error) {
3175 *dup = dup_sockaddr(result, 1);
3176 if (*dup == NULL) {
3177 error = ENOBUFS;
3178 }
3179 }
3180
3181 return error;
3182 }
3183
3184 static void
3185 flow_divert_disconnect_socket(struct socket *so)
3186 {
3187 soisdisconnected(so);
3188 if (SOCK_TYPE(so) == SOCK_DGRAM) {
3189 struct inpcb *inp = NULL;
3190
3191 inp = sotoinpcb(so);
3192 if (inp != NULL) {
3193 if (SOCK_CHECK_DOM(so, PF_INET6)) {
3194 in6_pcbdetach(inp);
3195 } else {
3196 in_pcbdetach(inp);
3197 }
3198 }
3199 }
3200 }
3201
3202 static errno_t
3203 flow_divert_ctloutput(struct socket *so, struct sockopt *sopt)
3204 {
3205 struct flow_divert_pcb *fd_cb = so->so_fd_pcb;
3206
3207 VERIFY((so->so_flags & SOF_FLOW_DIVERT) && so->so_fd_pcb != NULL);
3208
3209 if (sopt->sopt_name == SO_TRAFFIC_CLASS) {
3210 if (sopt->sopt_dir == SOPT_SET && fd_cb->flags & FLOW_DIVERT_CONNECT_STARTED) {
3211 flow_divert_send_traffic_class_update(fd_cb, so->so_traffic_class);
3212 }
3213 }
3214
3215 if (SOCK_DOM(so) == PF_INET) {
3216 return g_tcp_protosw->pr_ctloutput(so, sopt);
3217 } else if (SOCK_DOM(so) == PF_INET6) {
3218 return g_tcp6_protosw->pr_ctloutput(so, sopt);
3219 }
3220 return 0;
3221 }
3222
3223 static errno_t
3224 flow_divert_connect_out_internal(struct socket *so, struct sockaddr *to, proc_t p, bool implicit)
3225 {
3226 struct flow_divert_pcb *fd_cb = so->so_fd_pcb;
3227 int error = 0;
3228 struct inpcb *inp = sotoinpcb(so);
3229 struct sockaddr_in *sinp;
3230 mbuf_t connect_packet = NULL;
3231 int do_send = 1;
3232
3233 VERIFY((so->so_flags & SOF_FLOW_DIVERT) && so->so_fd_pcb != NULL);
3234
3235 if (fd_cb->group == NULL) {
3236 error = ENETUNREACH;
3237 goto done;
3238 }
3239
3240 if (inp == NULL) {
3241 error = EINVAL;
3242 goto done;
3243 } else if (inp->inp_state == INPCB_STATE_DEAD) {
3244 if (so->so_error) {
3245 error = so->so_error;
3246 so->so_error = 0;
3247 } else {
3248 error = EINVAL;
3249 }
3250 goto done;
3251 }
3252
3253 if (fd_cb->flags & FLOW_DIVERT_CONNECT_STARTED) {
3254 error = EALREADY;
3255 goto done;
3256 }
3257
3258 FDLOG0(LOG_INFO, fd_cb, "Connecting");
3259
3260 if (fd_cb->connect_packet == NULL) {
3261 struct sockaddr_in sin = {};
3262 struct ifnet *ifp = NULL;
3263
3264 if (to == NULL) {
3265 FDLOG0(LOG_ERR, fd_cb, "No destination address available when creating connect packet");
3266 error = EINVAL;
3267 goto done;
3268 }
3269
3270 fd_cb->original_remote_endpoint = dup_sockaddr(to, 0);
3271 if (fd_cb->original_remote_endpoint == NULL) {
3272 FDLOG0(LOG_ERR, fd_cb, "Failed to dup the remote endpoint");
3273 error = ENOMEM;
3274 goto done;
3275 }
3276 fd_cb->original_vflag = inp->inp_vflag;
3277 fd_cb->original_last_outifp = inp->inp_last_outifp;
3278 fd_cb->original_last_outifp6 = inp->in6p_last_outifp;
3279
3280 sinp = (struct sockaddr_in *)(void *)to;
3281 if (sinp->sin_family == AF_INET && IN_MULTICAST(ntohl(sinp->sin_addr.s_addr))) {
3282 error = EAFNOSUPPORT;
3283 goto done;
3284 }
3285
3286 if (to->sa_family == AF_INET6 && !(inp->inp_flags & IN6P_IPV6_V6ONLY)) {
3287 struct sockaddr_in6 sin6 = {};
3288 sin6.sin6_family = AF_INET6;
3289 sin6.sin6_len = sizeof(struct sockaddr_in6);
3290 sin6.sin6_port = satosin6(to)->sin6_port;
3291 sin6.sin6_addr = satosin6(to)->sin6_addr;
3292 if (IN6_IS_ADDR_V4MAPPED(&(sin6.sin6_addr))) {
3293 in6_sin6_2_sin(&sin, &sin6);
3294 to = (struct sockaddr *)&sin;
3295 }
3296 }
3297
3298 if (to->sa_family == AF_INET6) {
3299 inp->inp_vflag &= ~INP_IPV4;
3300 inp->inp_vflag |= INP_IPV6;
3301 fd_cb->local_endpoint.sin6.sin6_len = sizeof(struct sockaddr_in6);
3302 fd_cb->local_endpoint.sin6.sin6_family = AF_INET6;
3303 fd_cb->local_endpoint.sin6.sin6_port = inp->inp_lport;
3304 error = in6_pcbladdr(inp, to, &(fd_cb->local_endpoint.sin6.sin6_addr), &ifp);
3305 if (error) {
3306 FDLOG(LOG_WARNING, fd_cb, "failed to get a local IPv6 address: %d", error);
3307 error = 0;
3308 }
3309 if (ifp != NULL) {
3310 inp->in6p_last_outifp = ifp;
3311 ifnet_release(ifp);
3312 }
3313 } else if (to->sa_family == AF_INET) {
3314 inp->inp_vflag |= INP_IPV4;
3315 inp->inp_vflag &= ~INP_IPV6;
3316 fd_cb->local_endpoint.sin.sin_len = sizeof(struct sockaddr_in);
3317 fd_cb->local_endpoint.sin.sin_family = AF_INET;
3318 fd_cb->local_endpoint.sin.sin_port = inp->inp_lport;
3319 error = in_pcbladdr(inp, to, &(fd_cb->local_endpoint.sin.sin_addr), IFSCOPE_NONE, &ifp, 0);
3320 if (error) {
3321 FDLOG(LOG_WARNING, fd_cb, "failed to get a local IPv4 address: %d", error);
3322 error = 0;
3323 }
3324 if (ifp != NULL) {
3325 inp->inp_last_outifp = ifp;
3326 ifnet_release(ifp);
3327 }
3328 } else {
3329 FDLOG(LOG_WARNING, fd_cb, "target address has an unsupported family: %d", to->sa_family);
3330 }
3331
3332 error = flow_divert_check_no_cellular(fd_cb) ||
3333 flow_divert_check_no_expensive(fd_cb) ||
3334 flow_divert_check_no_constrained(fd_cb);
3335 if (error) {
3336 goto done;
3337 }
3338
3339 error = flow_divert_create_connect_packet(fd_cb, to, so, p, &connect_packet);
3340 if (error) {
3341 goto done;
3342 }
3343
3344 if (!implicit || SOCK_TYPE(so) == SOCK_STREAM) {
3345 flow_divert_set_remote_endpoint(fd_cb, to);
3346 flow_divert_set_local_endpoint(fd_cb, &(fd_cb->local_endpoint.sa), false);
3347 }
3348
3349 if (implicit) {
3350 fd_cb->flags |= FLOW_DIVERT_IMPLICIT_CONNECT;
3351 }
3352
3353 if (so->so_flags1 & SOF1_PRECONNECT_DATA) {
3354 FDLOG0(LOG_INFO, fd_cb, "Delaying sending the connect packet until send or receive");
3355 do_send = 0;
3356 }
3357
3358 fd_cb->connect_packet = connect_packet;
3359 connect_packet = NULL;
3360 } else {
3361 FDLOG0(LOG_INFO, fd_cb, "Sending saved connect packet");
3362 }
3363
3364 if (do_send) {
3365 error = flow_divert_send_connect_packet(fd_cb);
3366 if (error) {
3367 goto done;
3368 }
3369
3370 fd_cb->flags |= FLOW_DIVERT_CONNECT_STARTED;
3371 }
3372
3373 if (SOCK_TYPE(so) == SOCK_DGRAM) {
3374 soisconnected(so);
3375 } else {
3376 soisconnecting(so);
3377 }
3378
3379 done:
3380 return error;
3381 }
3382
3383 errno_t
3384 flow_divert_connect_out(struct socket *so, struct sockaddr *to, proc_t p)
3385 {
3386 return flow_divert_connect_out_internal(so, to, p, false);
3387 }
3388
3389 static int
3390 flow_divert_connectx_out_common(struct socket *so, struct sockaddr *dst,
3391 struct proc *p, sae_connid_t *pcid, struct uio *auio, user_ssize_t *bytes_written)
3392 {
3393 struct inpcb *inp = sotoinpcb(so);
3394 int error;
3395
3396 if (inp == NULL) {
3397 return EINVAL;
3398 }
3399
3400 VERIFY(dst != NULL);
3401
3402 error = flow_divert_connect_out(so, dst, p);
3403
3404 if (error != 0) {
3405 return error;
3406 }
3407
3408 /* if there is data, send it */
3409 if (auio != NULL) {
3410 user_ssize_t datalen = 0;
3411
3412 socket_unlock(so, 0);
3413
3414 VERIFY(bytes_written != NULL);
3415
3416 datalen = uio_resid(auio);
3417 error = so->so_proto->pr_usrreqs->pru_sosend(so, NULL, (uio_t)auio, NULL, NULL, 0);
3418 socket_lock(so, 0);
3419
3420 if (error == 0 || error == EWOULDBLOCK) {
3421 *bytes_written = datalen - uio_resid(auio);
3422 }
3423
3424 /*
3425 * sosend returns EWOULDBLOCK if it's a non-blocking
3426 * socket or a timeout occured (this allows to return
3427 * the amount of queued data through sendit()).
3428 *
3429 * However, connectx() returns EINPROGRESS in case of a
3430 * blocking socket. So we change the return value here.
3431 */
3432 if (error == EWOULDBLOCK) {
3433 error = EINPROGRESS;
3434 }
3435 }
3436
3437 if (error == 0 && pcid != NULL) {
3438 *pcid = 1; /* there is only 1 connection for a TCP */
3439 }
3440
3441 return error;
3442 }
3443
3444 static int
3445 flow_divert_connectx_out(struct socket *so, struct sockaddr *src __unused,
3446 struct sockaddr *dst, struct proc *p, uint32_t ifscope __unused,
3447 sae_associd_t aid __unused, sae_connid_t *pcid, uint32_t flags __unused, void *arg __unused,
3448 uint32_t arglen __unused, struct uio *uio, user_ssize_t *bytes_written)
3449 {
3450 return flow_divert_connectx_out_common(so, dst, p, pcid, uio, bytes_written);
3451 }
3452
3453 static int
3454 flow_divert_connectx6_out(struct socket *so, struct sockaddr *src __unused,
3455 struct sockaddr *dst, struct proc *p, uint32_t ifscope __unused,
3456 sae_associd_t aid __unused, sae_connid_t *pcid, uint32_t flags __unused, void *arg __unused,
3457 uint32_t arglen __unused, struct uio *uio, user_ssize_t *bytes_written)
3458 {
3459 return flow_divert_connectx_out_common(so, dst, p, pcid, uio, bytes_written);
3460 }
3461
3462 static errno_t
3463 flow_divert_data_out(struct socket *so, int flags, mbuf_t data, struct sockaddr *to, mbuf_t control, struct proc *p)
3464 {
3465 struct flow_divert_pcb *fd_cb = so->so_fd_pcb;
3466 int error = 0;
3467 struct inpcb *inp;
3468 #if CONTENT_FILTER
3469 struct m_tag *cfil_tag = NULL;
3470 #endif
3471
3472 VERIFY((so->so_flags & SOF_FLOW_DIVERT) && so->so_fd_pcb != NULL);
3473
3474 inp = sotoinpcb(so);
3475 if (inp == NULL || inp->inp_state == INPCB_STATE_DEAD) {
3476 error = ECONNRESET;
3477 goto done;
3478 }
3479
3480 if (control && mbuf_len(control) > 0) {
3481 error = EINVAL;
3482 goto done;
3483 }
3484
3485 if (flags & MSG_OOB) {
3486 error = EINVAL;
3487 goto done; /* We don't support OOB data */
3488 }
3489
3490 #if CONTENT_FILTER
3491 /*
3492 * If the socket is subject to a UDP Content Filter and no remote address is passed in,
3493 * retrieve the CFIL saved remote address from the mbuf and use it.
3494 */
3495 if (to == NULL && so->so_cfil_db) {
3496 struct sockaddr *cfil_faddr = NULL;
3497 cfil_tag = cfil_dgram_get_socket_state(data, NULL, NULL, &cfil_faddr, NULL);
3498 if (cfil_tag) {
3499 to = (struct sockaddr *)(void *)cfil_faddr;
3500 }
3501 FDLOG(LOG_INFO, fd_cb, "Using remote address from CFIL saved state: %p", to);
3502 }
3503 #endif
3504
3505 /* Implicit connect */
3506 if (!(fd_cb->flags & FLOW_DIVERT_CONNECT_STARTED)) {
3507 FDLOG0(LOG_INFO, fd_cb, "implicit connect");
3508
3509 error = flow_divert_connect_out_internal(so, to, p, true);
3510 if (error) {
3511 goto done;
3512 }
3513
3514 if (so->so_flags1 & SOF1_DATA_IDEMPOTENT) {
3515 /* Open up the send window so that the data will get sent right away */
3516 fd_cb->send_window = (uint32_t)mbuf_pkthdr_len(data);
3517 }
3518 } else {
3519 error = flow_divert_check_no_cellular(fd_cb) ||
3520 flow_divert_check_no_expensive(fd_cb) ||
3521 flow_divert_check_no_constrained(fd_cb);
3522 if (error) {
3523 goto done;
3524 }
3525 }
3526
3527 FDLOG(LOG_DEBUG, fd_cb, "app wrote %lu bytes", mbuf_pkthdr_len(data));
3528
3529 fd_cb->bytes_written_by_app += mbuf_pkthdr_len(data);
3530 error = flow_divert_send_app_data(fd_cb, data, to);
3531
3532 data = NULL;
3533
3534 if (error) {
3535 goto done;
3536 }
3537
3538 if (flags & PRUS_EOF) {
3539 flow_divert_shutdown(so);
3540 }
3541
3542 done:
3543 if (data) {
3544 mbuf_freem(data);
3545 }
3546 if (control) {
3547 mbuf_free(control);
3548 }
3549 #if CONTENT_FILTER
3550 if (cfil_tag) {
3551 m_tag_free(cfil_tag);
3552 }
3553 #endif
3554
3555 return error;
3556 }
3557
3558 static int
3559 flow_divert_preconnect(struct socket *so)
3560 {
3561 int error = 0;
3562 struct flow_divert_pcb *fd_cb = so->so_fd_pcb;
3563
3564 VERIFY((so->so_flags & SOF_FLOW_DIVERT) && so->so_fd_pcb != NULL);
3565
3566 if (!(fd_cb->flags & FLOW_DIVERT_CONNECT_STARTED)) {
3567 FDLOG0(LOG_INFO, fd_cb, "Pre-connect read: sending saved connect packet");
3568 error = flow_divert_send_connect_packet(so->so_fd_pcb);
3569 if (error) {
3570 return error;
3571 }
3572
3573 fd_cb->flags |= FLOW_DIVERT_CONNECT_STARTED;
3574 }
3575
3576 soclearfastopen(so);
3577
3578 return error;
3579 }
3580
3581 static void
3582 flow_divert_set_protosw(struct socket *so)
3583 {
3584 if (SOCK_DOM(so) == PF_INET) {
3585 so->so_proto = &g_flow_divert_in_protosw;
3586 } else {
3587 so->so_proto = (struct protosw *)&g_flow_divert_in6_protosw;
3588 }
3589 }
3590
3591 static void
3592 flow_divert_set_udp_protosw(struct socket *so)
3593 {
3594 if (SOCK_DOM(so) == PF_INET) {
3595 so->so_proto = &g_flow_divert_in_udp_protosw;
3596 } else {
3597 so->so_proto = (struct protosw *)&g_flow_divert_in6_udp_protosw;
3598 }
3599 }
3600
3601 errno_t
3602 flow_divert_implicit_data_out(struct socket *so, int flags, mbuf_t data, struct sockaddr *to, mbuf_t control, struct proc *p)
3603 {
3604 struct flow_divert_pcb *fd_cb = so->so_fd_pcb;
3605 struct inpcb *inp;
3606 int error = 0;
3607
3608 inp = sotoinpcb(so);
3609 if (inp == NULL) {
3610 return EINVAL;
3611 }
3612
3613 if (fd_cb == NULL) {
3614 error = flow_divert_pcb_init(so);
3615 fd_cb = so->so_fd_pcb;
3616 if (error != 0 || fd_cb == NULL) {
3617 goto done;
3618 }
3619 }
3620 return flow_divert_data_out(so, flags, data, to, control, p);
3621
3622 done:
3623 if (data) {
3624 mbuf_freem(data);
3625 }
3626 if (control) {
3627 mbuf_free(control);
3628 }
3629
3630 return error;
3631 }
3632
3633 static errno_t
3634 flow_divert_pcb_init_internal(struct socket *so, uint32_t ctl_unit, uint32_t aggregate_unit)
3635 {
3636 errno_t error = 0;
3637 struct flow_divert_pcb *fd_cb;
3638 uint32_t agg_unit = aggregate_unit;
3639 uint32_t group_unit = flow_divert_derive_kernel_control_unit(ctl_unit, &agg_unit);
3640
3641 if (group_unit == 0) {
3642 return EINVAL;
3643 }
3644
3645 if (so->so_flags & SOF_FLOW_DIVERT) {
3646 return EALREADY;
3647 }
3648
3649 fd_cb = flow_divert_pcb_create(so);
3650 if (fd_cb != NULL) {
3651 so->so_fd_pcb = fd_cb;
3652 so->so_flags |= SOF_FLOW_DIVERT;
3653 fd_cb->control_group_unit = group_unit;
3654 fd_cb->policy_control_unit = ctl_unit;
3655 fd_cb->aggregate_unit = agg_unit;
3656
3657 error = flow_divert_pcb_insert(fd_cb, group_unit);
3658 if (error) {
3659 FDLOG(LOG_ERR, fd_cb, "pcb insert failed: %d", error);
3660 so->so_fd_pcb = NULL;
3661 so->so_flags &= ~SOF_FLOW_DIVERT;
3662 FDRELEASE(fd_cb);
3663 } else {
3664 if (SOCK_TYPE(so) == SOCK_STREAM) {
3665 flow_divert_set_protosw(so);
3666 } else if (SOCK_TYPE(so) == SOCK_DGRAM) {
3667 flow_divert_set_udp_protosw(so);
3668 }
3669
3670 FDLOG0(LOG_INFO, fd_cb, "Created");
3671 }
3672 } else {
3673 error = ENOMEM;
3674 }
3675
3676 return error;
3677 }
3678
3679 errno_t
3680 flow_divert_pcb_init(struct socket *so)
3681 {
3682 struct inpcb *inp = sotoinpcb(so);
3683 uint32_t aggregate_units = 0;
3684 uint32_t ctl_unit = necp_socket_get_flow_divert_control_unit(inp, &aggregate_units);
3685 return flow_divert_pcb_init_internal(so, ctl_unit, aggregate_units);
3686 }
3687
3688 errno_t
3689 flow_divert_token_set(struct socket *so, struct sockopt *sopt)
3690 {
3691 uint32_t ctl_unit = 0;
3692 uint32_t key_unit = 0;
3693 uint32_t aggregate_unit = 0;
3694 int error = 0;
3695 int hmac_error = 0;
3696 mbuf_t token = NULL;
3697
3698 if (so->so_flags & SOF_FLOW_DIVERT) {
3699 error = EALREADY;
3700 goto done;
3701 }
3702
3703 if (g_init_result) {
3704 FDLOG(LOG_ERR, &nil_pcb, "flow_divert_init failed (%d), cannot use flow divert", g_init_result);
3705 error = ENOPROTOOPT;
3706 goto done;
3707 }
3708
3709 if ((SOCK_TYPE(so) != SOCK_STREAM && SOCK_TYPE(so) != SOCK_DGRAM) ||
3710 (SOCK_PROTO(so) != IPPROTO_TCP && SOCK_PROTO(so) != IPPROTO_UDP) ||
3711 (SOCK_DOM(so) != PF_INET && SOCK_DOM(so) != PF_INET6)) {
3712 error = EINVAL;
3713 goto done;
3714 } else {
3715 if (SOCK_TYPE(so) == SOCK_STREAM && SOCK_PROTO(so) == IPPROTO_TCP) {
3716 struct tcpcb *tp = sototcpcb(so);
3717 if (tp == NULL || tp->t_state != TCPS_CLOSED) {
3718 error = EINVAL;
3719 goto done;
3720 }
3721 }
3722 }
3723
3724 error = soopt_getm(sopt, &token);
3725 if (error) {
3726 token = NULL;
3727 goto done;
3728 }
3729
3730 error = soopt_mcopyin(sopt, token);
3731 if (error) {
3732 token = NULL;
3733 goto done;
3734 }
3735
3736 error = flow_divert_packet_get_tlv(token, 0, FLOW_DIVERT_TLV_KEY_UNIT, sizeof(key_unit), (void *)&key_unit, NULL);
3737 if (!error) {
3738 key_unit = ntohl(key_unit);
3739 if (key_unit >= GROUP_COUNT_MAX) {
3740 key_unit = 0;
3741 }
3742 } else if (error != ENOENT) {
3743 FDLOG(LOG_ERR, &nil_pcb, "Failed to get the key unit from the token: %d", error);
3744 goto done;
3745 } else {
3746 key_unit = 0;
3747 }
3748
3749 error = flow_divert_packet_get_tlv(token, 0, FLOW_DIVERT_TLV_CTL_UNIT, sizeof(ctl_unit), (void *)&ctl_unit, NULL);
3750 if (error) {
3751 FDLOG(LOG_ERR, &nil_pcb, "Failed to get the control socket unit from the token: %d", error);
3752 goto done;
3753 }
3754
3755 error = flow_divert_packet_get_tlv(token, 0, FLOW_DIVERT_TLV_AGGREGATE_UNIT, sizeof(aggregate_unit), (void *)&aggregate_unit, NULL);
3756 if (error && error != ENOENT) {
3757 FDLOG(LOG_ERR, &nil_pcb, "Failed to get the aggregate unit from the token: %d", error);
3758 goto done;
3759 }
3760
3761 /* A valid kernel control unit is required */
3762 ctl_unit = ntohl(ctl_unit);
3763 aggregate_unit = ntohl(aggregate_unit);
3764
3765 if (ctl_unit > 0 && ctl_unit < GROUP_COUNT_MAX) {
3766 socket_unlock(so, 0);
3767 hmac_error = flow_divert_packet_verify_hmac(token, (key_unit != 0 ? key_unit : ctl_unit));
3768 socket_lock(so, 0);
3769
3770 if (hmac_error && hmac_error != ENOENT) {
3771 FDLOG(LOG_ERR, &nil_pcb, "HMAC verfication failed: %d", hmac_error);
3772 error = hmac_error;
3773 goto done;
3774 }
3775 }
3776
3777 error = flow_divert_pcb_init_internal(so, ctl_unit, aggregate_unit);
3778 if (error == 0) {
3779 struct flow_divert_pcb *fd_cb = so->so_fd_pcb;
3780 int log_level = LOG_NOTICE;
3781
3782 error = flow_divert_packet_get_tlv(token, 0, FLOW_DIVERT_TLV_LOG_LEVEL, sizeof(log_level), &log_level, NULL);
3783 if (error == 0) {
3784 fd_cb->log_level = (uint8_t)log_level;
3785 }
3786 error = 0;
3787
3788 fd_cb->connect_token = token;
3789 token = NULL;
3790 }
3791
3792 if (hmac_error == 0) {
3793 struct flow_divert_pcb *fd_cb = so->so_fd_pcb;
3794 if (fd_cb != NULL) {
3795 fd_cb->flags |= FLOW_DIVERT_HAS_HMAC;
3796 }
3797 }
3798
3799 done:
3800 if (token != NULL) {
3801 mbuf_freem(token);
3802 }
3803
3804 return error;
3805 }
3806
3807 errno_t
3808 flow_divert_token_get(struct socket *so, struct sockopt *sopt)
3809 {
3810 uint32_t ctl_unit;
3811 int error = 0;
3812 uint8_t hmac[SHA_DIGEST_LENGTH];
3813 struct flow_divert_pcb *fd_cb = so->so_fd_pcb;
3814 mbuf_t token = NULL;
3815 struct flow_divert_group *control_group = NULL;
3816
3817 if (!(so->so_flags & SOF_FLOW_DIVERT)) {
3818 error = EINVAL;
3819 goto done;
3820 }
3821
3822 VERIFY((so->so_flags & SOF_FLOW_DIVERT) && so->so_fd_pcb != NULL);
3823
3824 if (fd_cb->group == NULL) {
3825 error = EINVAL;
3826 goto done;
3827 }
3828
3829 error = mbuf_gethdr(MBUF_DONTWAIT, MBUF_TYPE_HEADER, &token);
3830 if (error) {
3831 FDLOG(LOG_ERR, fd_cb, "failed to allocate the header mbuf: %d", error);
3832 goto done;
3833 }
3834
3835 ctl_unit = htonl(fd_cb->group->ctl_unit);
3836
3837 error = flow_divert_packet_append_tlv(token, FLOW_DIVERT_TLV_CTL_UNIT, sizeof(ctl_unit), &ctl_unit);
3838 if (error) {
3839 goto done;
3840 }
3841
3842 error = flow_divert_packet_append_tlv(token, FLOW_DIVERT_TLV_FLOW_ID, sizeof(fd_cb->hash), &fd_cb->hash);
3843 if (error) {
3844 goto done;
3845 }
3846
3847 if (fd_cb->app_data != NULL) {
3848 error = flow_divert_packet_append_tlv(token, FLOW_DIVERT_TLV_APP_DATA, (uint32_t)fd_cb->app_data_length, fd_cb->app_data);
3849 if (error) {
3850 goto done;
3851 }
3852 }
3853
3854 socket_unlock(so, 0);
3855 lck_rw_lock_shared(&g_flow_divert_group_lck);
3856
3857 if (g_flow_divert_groups != NULL && g_active_group_count > 0 &&
3858 fd_cb->control_group_unit > 0 && fd_cb->control_group_unit < GROUP_COUNT_MAX) {
3859 control_group = g_flow_divert_groups[fd_cb->control_group_unit];
3860 }
3861
3862 if (control_group != NULL) {
3863 lck_rw_lock_shared(&control_group->lck);
3864 ctl_unit = htonl(control_group->ctl_unit);
3865 error = flow_divert_packet_append_tlv(token, FLOW_DIVERT_TLV_KEY_UNIT, sizeof(ctl_unit), &ctl_unit);
3866 if (!error) {
3867 error = flow_divert_packet_compute_hmac(token, control_group, hmac);
3868 }
3869 lck_rw_done(&control_group->lck);
3870 } else {
3871 error = ENOPROTOOPT;
3872 }
3873
3874 lck_rw_done(&g_flow_divert_group_lck);
3875 socket_lock(so, 0);
3876
3877 if (error) {
3878 goto done;
3879 }
3880
3881 error = flow_divert_packet_append_tlv(token, FLOW_DIVERT_TLV_HMAC, sizeof(hmac), hmac);
3882 if (error) {
3883 goto done;
3884 }
3885
3886 if (sopt->sopt_val == USER_ADDR_NULL) {
3887 /* If the caller passed NULL to getsockopt, just set the size of the token and return */
3888 sopt->sopt_valsize = mbuf_pkthdr_len(token);
3889 goto done;
3890 }
3891
3892 error = soopt_mcopyout(sopt, token);
3893 if (error) {
3894 token = NULL; /* For some reason, soopt_mcopyout() frees the mbuf if it fails */
3895 goto done;
3896 }
3897
3898 done:
3899 if (token != NULL) {
3900 mbuf_freem(token);
3901 }
3902
3903 return error;
3904 }
3905
3906 static errno_t
3907 flow_divert_kctl_connect(kern_ctl_ref kctlref __unused, struct sockaddr_ctl *sac, void **unitinfo)
3908 {
3909 struct flow_divert_group *new_group = NULL;
3910 int error = 0;
3911
3912 if (sac->sc_unit >= GROUP_COUNT_MAX) {
3913 error = EINVAL;
3914 goto done;
3915 }
3916
3917 *unitinfo = NULL;
3918
3919 new_group = zalloc_flags(flow_divert_group_zone, Z_WAITOK | Z_ZERO);
3920 lck_rw_init(&new_group->lck, flow_divert_mtx_grp, flow_divert_mtx_attr);
3921 RB_INIT(&new_group->pcb_tree);
3922 new_group->ctl_unit = sac->sc_unit;
3923 MBUFQ_INIT(&new_group->send_queue);
3924 new_group->signing_id_trie.root = NULL_TRIE_IDX;
3925
3926 lck_rw_lock_exclusive(&g_flow_divert_group_lck);
3927
3928 if (g_flow_divert_groups == NULL) {
3929 MALLOC(g_flow_divert_groups,
3930 struct flow_divert_group **,
3931 GROUP_COUNT_MAX * sizeof(struct flow_divert_group *),
3932 M_TEMP,
3933 M_WAITOK | M_ZERO);
3934 }
3935
3936 if (g_flow_divert_groups == NULL) {
3937 error = ENOBUFS;
3938 } else if (g_flow_divert_groups[sac->sc_unit] != NULL) {
3939 error = EALREADY;
3940 } else {
3941 g_flow_divert_groups[sac->sc_unit] = new_group;
3942 g_active_group_count++;
3943 }
3944
3945 lck_rw_done(&g_flow_divert_group_lck);
3946
3947 done:
3948 if (error == 0) {
3949 *unitinfo = new_group;
3950 } else if (new_group != NULL) {
3951 zfree(flow_divert_group_zone, new_group);
3952 }
3953 return error;
3954 }
3955
3956 static errno_t
3957 flow_divert_kctl_disconnect(kern_ctl_ref kctlref __unused, uint32_t unit, void *unitinfo)
3958 {
3959 struct flow_divert_group *group = NULL;
3960 errno_t error = 0;
3961
3962 if (unit >= GROUP_COUNT_MAX) {
3963 return EINVAL;
3964 }
3965
3966 if (unitinfo == NULL) {
3967 return 0;
3968 }
3969
3970 FDLOG(LOG_INFO, &nil_pcb, "disconnecting group %d", unit);
3971
3972 lck_rw_lock_exclusive(&g_flow_divert_group_lck);
3973
3974 if (g_flow_divert_groups == NULL || g_active_group_count == 0) {
3975 panic("flow divert group %u is disconnecting, but no groups are active (groups = %p, active count = %u", unit,
3976 g_flow_divert_groups, g_active_group_count);
3977 }
3978
3979 group = g_flow_divert_groups[unit];
3980
3981 if (group != (struct flow_divert_group *)unitinfo) {
3982 panic("group with unit %d (%p) != unit info (%p)", unit, group, unitinfo);
3983 }
3984
3985 g_flow_divert_groups[unit] = NULL;
3986 g_active_group_count--;
3987
3988 if (g_active_group_count == 0) {
3989 FREE(g_flow_divert_groups, M_TEMP);
3990 g_flow_divert_groups = NULL;
3991 }
3992
3993 lck_rw_done(&g_flow_divert_group_lck);
3994
3995 if (group != NULL) {
3996 flow_divert_close_all(group);
3997
3998 lck_rw_lock_exclusive(&group->lck);
3999
4000 if (group->token_key != NULL) {
4001 memset(group->token_key, 0, group->token_key_size);
4002 FREE(group->token_key, M_TEMP);
4003 group->token_key = NULL;
4004 group->token_key_size = 0;
4005 }
4006
4007 /* Re-set the current trie */
4008 if (group->signing_id_trie.memory != NULL) {
4009 FREE(group->signing_id_trie.memory, M_TEMP);
4010 }
4011 memset(&group->signing_id_trie, 0, sizeof(group->signing_id_trie));
4012 group->signing_id_trie.root = NULL_TRIE_IDX;
4013
4014 lck_rw_done(&group->lck);
4015
4016 zfree(flow_divert_group_zone, group);
4017 } else {
4018 error = EINVAL;
4019 }
4020
4021 return error;
4022 }
4023
4024 static errno_t
4025 flow_divert_kctl_send(kern_ctl_ref kctlref __unused, uint32_t unit __unused, void *unitinfo, mbuf_t m, int flags __unused)
4026 {
4027 return flow_divert_input(m, (struct flow_divert_group *)unitinfo);
4028 }
4029
4030 static void
4031 flow_divert_kctl_rcvd(kern_ctl_ref kctlref __unused, uint32_t unit __unused, void *unitinfo, int flags __unused)
4032 {
4033 struct flow_divert_group *group = (struct flow_divert_group *)unitinfo;
4034
4035 if (!OSTestAndClear(GROUP_BIT_CTL_ENQUEUE_BLOCKED, &group->atomic_bits)) {
4036 struct flow_divert_pcb *fd_cb;
4037 SLIST_HEAD(, flow_divert_pcb) tmp_list;
4038
4039 lck_rw_lock_shared(&g_flow_divert_group_lck);
4040 lck_rw_lock_exclusive(&group->lck);
4041
4042 while (!MBUFQ_EMPTY(&group->send_queue)) {
4043 mbuf_t next_packet;
4044 FDLOG0(LOG_DEBUG, &nil_pcb, "trying ctl_enqueuembuf again");
4045 next_packet = MBUFQ_FIRST(&group->send_queue);
4046 int error = ctl_enqueuembuf(g_flow_divert_kctl_ref, group->ctl_unit, next_packet, CTL_DATA_EOR);
4047 if (error) {
4048 FDLOG(LOG_DEBUG, &nil_pcb, "ctl_enqueuembuf returned an error: %d", error);
4049 OSTestAndSet(GROUP_BIT_CTL_ENQUEUE_BLOCKED, &group->atomic_bits);
4050 lck_rw_done(&group->lck);
4051 lck_rw_done(&g_flow_divert_group_lck);
4052 return;
4053 }
4054 MBUFQ_DEQUEUE(&group->send_queue, next_packet);
4055 }
4056
4057 SLIST_INIT(&tmp_list);
4058
4059 RB_FOREACH(fd_cb, fd_pcb_tree, &group->pcb_tree) {
4060 FDRETAIN(fd_cb);
4061 SLIST_INSERT_HEAD(&tmp_list, fd_cb, tmp_list_entry);
4062 }
4063
4064 lck_rw_done(&group->lck);
4065
4066 SLIST_FOREACH(fd_cb, &tmp_list, tmp_list_entry) {
4067 FDLOCK(fd_cb);
4068 if (fd_cb->so != NULL) {
4069 socket_lock(fd_cb->so, 0);
4070 if (fd_cb->group != NULL) {
4071 flow_divert_send_buffered_data(fd_cb, FALSE);
4072 }
4073 socket_unlock(fd_cb->so, 0);
4074 }
4075 FDUNLOCK(fd_cb);
4076 FDRELEASE(fd_cb);
4077 }
4078
4079 lck_rw_done(&g_flow_divert_group_lck);
4080 }
4081 }
4082
4083 static int
4084 flow_divert_kctl_init(void)
4085 {
4086 struct kern_ctl_reg ctl_reg;
4087 int result;
4088
4089 memset(&ctl_reg, 0, sizeof(ctl_reg));
4090
4091 strlcpy(ctl_reg.ctl_name, FLOW_DIVERT_CONTROL_NAME, sizeof(ctl_reg.ctl_name));
4092 ctl_reg.ctl_name[sizeof(ctl_reg.ctl_name) - 1] = '\0';
4093 ctl_reg.ctl_flags = CTL_FLAG_PRIVILEGED | CTL_FLAG_REG_EXTENDED;
4094 ctl_reg.ctl_sendsize = FD_CTL_SENDBUFF_SIZE;
4095 ctl_reg.ctl_recvsize = FD_CTL_RCVBUFF_SIZE;
4096
4097 ctl_reg.ctl_connect = flow_divert_kctl_connect;
4098 ctl_reg.ctl_disconnect = flow_divert_kctl_disconnect;
4099 ctl_reg.ctl_send = flow_divert_kctl_send;
4100 ctl_reg.ctl_rcvd = flow_divert_kctl_rcvd;
4101
4102 result = ctl_register(&ctl_reg, &g_flow_divert_kctl_ref);
4103
4104 if (result) {
4105 FDLOG(LOG_ERR, &nil_pcb, "flow_divert_kctl_init - ctl_register failed: %d\n", result);
4106 return result;
4107 }
4108
4109 return 0;
4110 }
4111
4112 void
4113 flow_divert_init(void)
4114 {
4115 memset(&nil_pcb, 0, sizeof(nil_pcb));
4116 nil_pcb.log_level = LOG_NOTICE;
4117
4118 g_tcp_protosw = pffindproto(AF_INET, IPPROTO_TCP, SOCK_STREAM);
4119
4120 VERIFY(g_tcp_protosw != NULL);
4121
4122 memcpy(&g_flow_divert_in_protosw, g_tcp_protosw, sizeof(g_flow_divert_in_protosw));
4123 memcpy(&g_flow_divert_in_usrreqs, g_tcp_protosw->pr_usrreqs, sizeof(g_flow_divert_in_usrreqs));
4124
4125 g_flow_divert_in_usrreqs.pru_connect = flow_divert_connect_out;
4126 g_flow_divert_in_usrreqs.pru_connectx = flow_divert_connectx_out;
4127 g_flow_divert_in_usrreqs.pru_disconnect = flow_divert_close;
4128 g_flow_divert_in_usrreqs.pru_disconnectx = flow_divert_disconnectx;
4129 g_flow_divert_in_usrreqs.pru_rcvd = flow_divert_rcvd;
4130 g_flow_divert_in_usrreqs.pru_send = flow_divert_data_out;
4131 g_flow_divert_in_usrreqs.pru_shutdown = flow_divert_shutdown;
4132 g_flow_divert_in_usrreqs.pru_preconnect = flow_divert_preconnect;
4133
4134 g_flow_divert_in_protosw.pr_usrreqs = &g_flow_divert_in_usrreqs;
4135 g_flow_divert_in_protosw.pr_ctloutput = flow_divert_ctloutput;
4136
4137 /*
4138 * Socket filters shouldn't attach/detach to/from this protosw
4139 * since pr_protosw is to be used instead, which points to the
4140 * real protocol; if they do, it is a bug and we should panic.
4141 */
4142 g_flow_divert_in_protosw.pr_filter_head.tqh_first =
4143 (struct socket_filter *)(uintptr_t)0xdeadbeefdeadbeef;
4144 g_flow_divert_in_protosw.pr_filter_head.tqh_last =
4145 (struct socket_filter **)(uintptr_t)0xdeadbeefdeadbeef;
4146
4147 /* UDP */
4148 g_udp_protosw = pffindproto(AF_INET, IPPROTO_UDP, SOCK_DGRAM);
4149 VERIFY(g_udp_protosw != NULL);
4150
4151 memcpy(&g_flow_divert_in_udp_protosw, g_udp_protosw, sizeof(g_flow_divert_in_udp_protosw));
4152 memcpy(&g_flow_divert_in_udp_usrreqs, g_udp_protosw->pr_usrreqs, sizeof(g_flow_divert_in_udp_usrreqs));
4153
4154 g_flow_divert_in_udp_usrreqs.pru_connect = flow_divert_connect_out;
4155 g_flow_divert_in_udp_usrreqs.pru_connectx = flow_divert_connectx_out;
4156 g_flow_divert_in_udp_usrreqs.pru_disconnect = flow_divert_close;
4157 g_flow_divert_in_udp_usrreqs.pru_disconnectx = flow_divert_disconnectx;
4158 g_flow_divert_in_udp_usrreqs.pru_rcvd = flow_divert_rcvd;
4159 g_flow_divert_in_udp_usrreqs.pru_send = flow_divert_data_out;
4160 g_flow_divert_in_udp_usrreqs.pru_shutdown = flow_divert_shutdown;
4161 g_flow_divert_in_udp_usrreqs.pru_sosend_list = pru_sosend_list_notsupp;
4162 g_flow_divert_in_udp_usrreqs.pru_soreceive_list = pru_soreceive_list_notsupp;
4163 g_flow_divert_in_udp_usrreqs.pru_preconnect = flow_divert_preconnect;
4164
4165 g_flow_divert_in_udp_protosw.pr_usrreqs = &g_flow_divert_in_usrreqs;
4166 g_flow_divert_in_udp_protosw.pr_ctloutput = flow_divert_ctloutput;
4167
4168 /*
4169 * Socket filters shouldn't attach/detach to/from this protosw
4170 * since pr_protosw is to be used instead, which points to the
4171 * real protocol; if they do, it is a bug and we should panic.
4172 */
4173 g_flow_divert_in_udp_protosw.pr_filter_head.tqh_first =
4174 (struct socket_filter *)(uintptr_t)0xdeadbeefdeadbeef;
4175 g_flow_divert_in_udp_protosw.pr_filter_head.tqh_last =
4176 (struct socket_filter **)(uintptr_t)0xdeadbeefdeadbeef;
4177
4178 g_tcp6_protosw = (struct ip6protosw *)pffindproto(AF_INET6, IPPROTO_TCP, SOCK_STREAM);
4179
4180 VERIFY(g_tcp6_protosw != NULL);
4181
4182 memcpy(&g_flow_divert_in6_protosw, g_tcp6_protosw, sizeof(g_flow_divert_in6_protosw));
4183 memcpy(&g_flow_divert_in6_usrreqs, g_tcp6_protosw->pr_usrreqs, sizeof(g_flow_divert_in6_usrreqs));
4184
4185 g_flow_divert_in6_usrreqs.pru_connect = flow_divert_connect_out;
4186 g_flow_divert_in6_usrreqs.pru_connectx = flow_divert_connectx6_out;
4187 g_flow_divert_in6_usrreqs.pru_disconnect = flow_divert_close;
4188 g_flow_divert_in6_usrreqs.pru_disconnectx = flow_divert_disconnectx;
4189 g_flow_divert_in6_usrreqs.pru_rcvd = flow_divert_rcvd;
4190 g_flow_divert_in6_usrreqs.pru_send = flow_divert_data_out;
4191 g_flow_divert_in6_usrreqs.pru_shutdown = flow_divert_shutdown;
4192 g_flow_divert_in6_usrreqs.pru_preconnect = flow_divert_preconnect;
4193
4194 g_flow_divert_in6_protosw.pr_usrreqs = &g_flow_divert_in6_usrreqs;
4195 g_flow_divert_in6_protosw.pr_ctloutput = flow_divert_ctloutput;
4196 /*
4197 * Socket filters shouldn't attach/detach to/from this protosw
4198 * since pr_protosw is to be used instead, which points to the
4199 * real protocol; if they do, it is a bug and we should panic.
4200 */
4201 g_flow_divert_in6_protosw.pr_filter_head.tqh_first =
4202 (struct socket_filter *)(uintptr_t)0xdeadbeefdeadbeef;
4203 g_flow_divert_in6_protosw.pr_filter_head.tqh_last =
4204 (struct socket_filter **)(uintptr_t)0xdeadbeefdeadbeef;
4205
4206 /* UDP6 */
4207 g_udp6_protosw = (struct ip6protosw *)pffindproto(AF_INET6, IPPROTO_UDP, SOCK_DGRAM);
4208
4209 VERIFY(g_udp6_protosw != NULL);
4210
4211 memcpy(&g_flow_divert_in6_udp_protosw, g_udp6_protosw, sizeof(g_flow_divert_in6_udp_protosw));
4212 memcpy(&g_flow_divert_in6_udp_usrreqs, g_udp6_protosw->pr_usrreqs, sizeof(g_flow_divert_in6_udp_usrreqs));
4213
4214 g_flow_divert_in6_udp_usrreqs.pru_connect = flow_divert_connect_out;
4215 g_flow_divert_in6_udp_usrreqs.pru_connectx = flow_divert_connectx6_out;
4216 g_flow_divert_in6_udp_usrreqs.pru_disconnect = flow_divert_close;
4217 g_flow_divert_in6_udp_usrreqs.pru_disconnectx = flow_divert_disconnectx;
4218 g_flow_divert_in6_udp_usrreqs.pru_rcvd = flow_divert_rcvd;
4219 g_flow_divert_in6_udp_usrreqs.pru_send = flow_divert_data_out;
4220 g_flow_divert_in6_udp_usrreqs.pru_shutdown = flow_divert_shutdown;
4221 g_flow_divert_in6_udp_usrreqs.pru_sosend_list = pru_sosend_list_notsupp;
4222 g_flow_divert_in6_udp_usrreqs.pru_soreceive_list = pru_soreceive_list_notsupp;
4223 g_flow_divert_in6_udp_usrreqs.pru_preconnect = flow_divert_preconnect;
4224
4225 g_flow_divert_in6_udp_protosw.pr_usrreqs = &g_flow_divert_in6_udp_usrreqs;
4226 g_flow_divert_in6_udp_protosw.pr_ctloutput = flow_divert_ctloutput;
4227 /*
4228 * Socket filters shouldn't attach/detach to/from this protosw
4229 * since pr_protosw is to be used instead, which points to the
4230 * real protocol; if they do, it is a bug and we should panic.
4231 */
4232 g_flow_divert_in6_udp_protosw.pr_filter_head.tqh_first =
4233 (struct socket_filter *)(uintptr_t)0xdeadbeefdeadbeef;
4234 g_flow_divert_in6_udp_protosw.pr_filter_head.tqh_last =
4235 (struct socket_filter **)(uintptr_t)0xdeadbeefdeadbeef;
4236
4237 flow_divert_grp_attr = lck_grp_attr_alloc_init();
4238 if (flow_divert_grp_attr == NULL) {
4239 FDLOG0(LOG_ERR, &nil_pcb, "lck_grp_attr_alloc_init failed");
4240 g_init_result = ENOMEM;
4241 goto done;
4242 }
4243
4244 flow_divert_mtx_grp = lck_grp_alloc_init(FLOW_DIVERT_CONTROL_NAME, flow_divert_grp_attr);
4245 if (flow_divert_mtx_grp == NULL) {
4246 FDLOG0(LOG_ERR, &nil_pcb, "lck_grp_alloc_init failed");
4247 g_init_result = ENOMEM;
4248 goto done;
4249 }
4250
4251 flow_divert_mtx_attr = lck_attr_alloc_init();
4252 if (flow_divert_mtx_attr == NULL) {
4253 FDLOG0(LOG_ERR, &nil_pcb, "lck_attr_alloc_init failed");
4254 g_init_result = ENOMEM;
4255 goto done;
4256 }
4257
4258 g_init_result = flow_divert_kctl_init();
4259 if (g_init_result) {
4260 goto done;
4261 }
4262
4263 lck_rw_init(&g_flow_divert_group_lck, flow_divert_mtx_grp, flow_divert_mtx_attr);
4264
4265 done:
4266 if (g_init_result != 0) {
4267 if (flow_divert_mtx_attr != NULL) {
4268 lck_attr_free(flow_divert_mtx_attr);
4269 flow_divert_mtx_attr = NULL;
4270 }
4271 if (flow_divert_mtx_grp != NULL) {
4272 lck_grp_free(flow_divert_mtx_grp);
4273 flow_divert_mtx_grp = NULL;
4274 }
4275 if (flow_divert_grp_attr != NULL) {
4276 lck_grp_attr_free(flow_divert_grp_attr);
4277 flow_divert_grp_attr = NULL;
4278 }
4279
4280 if (g_flow_divert_kctl_ref != NULL) {
4281 ctl_deregister(g_flow_divert_kctl_ref);
4282 g_flow_divert_kctl_ref = NULL;
4283 }
4284 }
4285 }